diff --git a/.github/workflows/build_test_cbsinit.yml b/.github/workflows/build_test_cbsinit.yml index 4926957a..b85f3285 100644 --- a/.github/workflows/build_test_cbsinit.yml +++ b/.github/workflows/build_test_cbsinit.yml @@ -11,7 +11,7 @@ jobs: os: ['windows-2022'] cbsinit_repo: ['https://github.com/cloudbase/cloudbase-init'] cbsinit_branch: ['master'] - python_version: ['3.13_13'] + python_version: ['3.14_4'] platform: ['x64'] cloud: [openstack] diff --git a/BuildAutomation/BuildCloudbaseInitSetup.ps1 b/BuildAutomation/BuildCloudbaseInitSetup.ps1 index f7fe4b9c..ca31d518 100644 --- a/BuildAutomation/BuildCloudbaseInitSetup.ps1 +++ b/BuildAutomation/BuildCloudbaseInitSetup.ps1 @@ -1,6 +1,6 @@ Param( [string]$platform = "x64", - [string]$pythonversion = "3.13_13", + [string]$pythonversion = "3.14_4", [string]$SignX509Thumbprint = $null, [string]$release = $null, # Cloudbase-Init repo details diff --git a/BuildAutomation/BuildUtils.ps1 b/BuildAutomation/BuildUtils.ps1 index 5e0bde1a..604f52e2 100644 --- a/BuildAutomation/BuildUtils.ps1 +++ b/BuildAutomation/BuildUtils.ps1 @@ -393,8 +393,70 @@ function ImportCertificateUser($pfxPath, $pfxPassword) { } function ChechFileHash($path, $hash, $algorithm="SHA1") { - $h = Get-Filehash -Algorithm $algorithm $path - if ($h.Hash.ToUpper() -ne $hash.ToUpper()) { - throw "Hash comparison failed for file: $path" + $actualHash = (Get-Filehash -Algorithm $algorithm $path).Hash.ToUpper() + if ($actualHash -ne $hash.ToUpper()) { + throw "Hash comparison failed for file: $path. Expected hash: ${hash}. Actual hash: ${actualHash}" } } + + +function DownloadInstall-PythonMsi($platform, $python_template_dir, $pythonVersion, $PythonMsiChecksum, $algorithm="SHA1") { + $platformSuffix = "" + if ($platform -eq "x64") { + $platformSuffix = "-amd64" + } + + if (Test-Path $python_template_dir) { + throw "$python_template_dir folder already exists" + } + + $pythonInstallerPath = Join-Path (Resolve-Path "${python_template_dir}/..").Path "/python-${pythonVersion}${platformSuffix}.exe" + $pythonVersionEscaped = $pythonVersion.replace("_",".") + $PythonMsiUrl = "https://www.python.org/ftp/python/${pythonVersionEscaped}/python-${pythonVersionEscaped}${platformSuffix}.exe" + + if ($python_template_dir -and (Test-Path $python_template_dir)) { + throw "Python template directory already exists" + } + + $tmp_python_template_dir = "${python_template_dir}_tmp" + if ($tmp_python_template_dir -and (Test-Path $tmp_python_template_dir)) { + throw "Python temp template directory already exists" + } + + try { + ExecRetry { DownloadFile $PythonMsiUrl $pythonInstallerPath } + ChechFileHash $pythonInstallerPath $PythonMsiChecksum $algorithm + + Write-Host "Trying to uninstall Python using $pythonInstallerPath" + Start-Process -FilePath "${pythonInstallerPath}" -NoNewWindow -Wait ` + -ArgumentList @("/quiet", "/uninstall") + + $package = Get-Package -Name "Python ${pythonVersionEscaped}*" -ErrorAction SilentlyContinue + if ($package) { + throw "Python package was already installed" + } + + Write-Host "Installing Python using $pythonInstallerPath" + Start-Process -FilePath "${pythonInstallerPath}" -NoNewWindow -Wait ` + -ArgumentList @("/quiet", "TargetDir=${tmp_python_template_dir}","Include_test=0","Include_tcltk=0","Include_launcher=0","Include_doc=0") + + Copy-Item -Recurse $tmp_python_template_dir $python_template_dir + + } finally { + + Start-Process -FilePath "${pythonInstallerPath}" -NoNewWindow -Wait ` + -ArgumentList @("/quiet", "/uninstall") + + if (Test-Path $pythonInstallerPath) { + Remove-Item $pythonInstallerPath + } + + if (Test-Path $tmp_python_template_dir) { + Remove-Item $tmp_python_template_dir -Recurse -Force + } + } + if (!(Test-Path $python_template_dir)) { + throw "$python_template_dir has not been created" + } + +} diff --git a/Python313_13_x64_Template/DLLs/_asyncio.pyd b/Python313_13_x64_Template/DLLs/_asyncio.pyd deleted file mode 100644 index c5cfdb02..00000000 Binary files a/Python313_13_x64_Template/DLLs/_asyncio.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_bz2.pyd b/Python313_13_x64_Template/DLLs/_bz2.pyd deleted file mode 100644 index ee443b90..00000000 Binary files a/Python313_13_x64_Template/DLLs/_bz2.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_ctypes.pyd b/Python313_13_x64_Template/DLLs/_ctypes.pyd deleted file mode 100644 index 66334bd5..00000000 Binary files a/Python313_13_x64_Template/DLLs/_ctypes.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_decimal.pyd b/Python313_13_x64_Template/DLLs/_decimal.pyd deleted file mode 100644 index a9dd5cbf..00000000 Binary files a/Python313_13_x64_Template/DLLs/_decimal.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_elementtree.pyd b/Python313_13_x64_Template/DLLs/_elementtree.pyd deleted file mode 100644 index 58b290f1..00000000 Binary files a/Python313_13_x64_Template/DLLs/_elementtree.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_hashlib.pyd b/Python313_13_x64_Template/DLLs/_hashlib.pyd deleted file mode 100644 index 52926dee..00000000 Binary files a/Python313_13_x64_Template/DLLs/_hashlib.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_lzma.pyd b/Python313_13_x64_Template/DLLs/_lzma.pyd deleted file mode 100644 index 398d39ad..00000000 Binary files a/Python313_13_x64_Template/DLLs/_lzma.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_multiprocessing.pyd b/Python313_13_x64_Template/DLLs/_multiprocessing.pyd deleted file mode 100644 index caf4504f..00000000 Binary files a/Python313_13_x64_Template/DLLs/_multiprocessing.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_overlapped.pyd b/Python313_13_x64_Template/DLLs/_overlapped.pyd deleted file mode 100644 index 6b05f02e..00000000 Binary files a/Python313_13_x64_Template/DLLs/_overlapped.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_queue.pyd b/Python313_13_x64_Template/DLLs/_queue.pyd deleted file mode 100644 index e29b6660..00000000 Binary files a/Python313_13_x64_Template/DLLs/_queue.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_socket.pyd b/Python313_13_x64_Template/DLLs/_socket.pyd deleted file mode 100644 index 207328b6..00000000 Binary files a/Python313_13_x64_Template/DLLs/_socket.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_sqlite3.pyd b/Python313_13_x64_Template/DLLs/_sqlite3.pyd deleted file mode 100644 index f284e2fd..00000000 Binary files a/Python313_13_x64_Template/DLLs/_sqlite3.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_ssl.pyd b/Python313_13_x64_Template/DLLs/_ssl.pyd deleted file mode 100644 index f37f4105..00000000 Binary files a/Python313_13_x64_Template/DLLs/_ssl.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_uuid.pyd b/Python313_13_x64_Template/DLLs/_uuid.pyd deleted file mode 100644 index 9743c8f9..00000000 Binary files a/Python313_13_x64_Template/DLLs/_uuid.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_wmi.pyd b/Python313_13_x64_Template/DLLs/_wmi.pyd deleted file mode 100644 index 21c95230..00000000 Binary files a/Python313_13_x64_Template/DLLs/_wmi.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/_zoneinfo.pyd b/Python313_13_x64_Template/DLLs/_zoneinfo.pyd deleted file mode 100644 index 155a8b47..00000000 Binary files a/Python313_13_x64_Template/DLLs/_zoneinfo.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/pyexpat.pyd b/Python313_13_x64_Template/DLLs/pyexpat.pyd deleted file mode 100644 index e4a867f4..00000000 Binary files a/Python313_13_x64_Template/DLLs/pyexpat.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/python_lib.cat b/Python313_13_x64_Template/DLLs/python_lib.cat deleted file mode 100644 index 2b1aa45f..00000000 Binary files a/Python313_13_x64_Template/DLLs/python_lib.cat and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/select.pyd b/Python313_13_x64_Template/DLLs/select.pyd deleted file mode 100644 index e8e283c7..00000000 Binary files a/Python313_13_x64_Template/DLLs/select.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/sqlite3.dll b/Python313_13_x64_Template/DLLs/sqlite3.dll deleted file mode 100644 index 6be79c47..00000000 Binary files a/Python313_13_x64_Template/DLLs/sqlite3.dll and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/unicodedata.pyd b/Python313_13_x64_Template/DLLs/unicodedata.pyd deleted file mode 100644 index 5c2f3b29..00000000 Binary files a/Python313_13_x64_Template/DLLs/unicodedata.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/DLLs/winsound.pyd b/Python313_13_x64_Template/DLLs/winsound.pyd deleted file mode 100644 index 8c1b9a4e..00000000 Binary files a/Python313_13_x64_Template/DLLs/winsound.pyd and /dev/null differ diff --git a/Python313_13_x64_Template/LICENSE.txt b/Python313_13_x64_Template/LICENSE.txt deleted file mode 100644 index b97536f4..00000000 --- a/Python313_13_x64_Template/LICENSE.txt +++ /dev/null @@ -1,645 +0,0 @@ -A. HISTORY OF THE SOFTWARE -========================== - -Python was created in the early 1990s by Guido van Rossum at Stichting -Mathematisch Centrum (CWI, see https://www.cwi.nl) in the Netherlands -as a successor of a language called ABC. Guido remains Python's -principal author, although it includes many contributions from others. - -In 1995, Guido continued his work on Python at the Corporation for -National Research Initiatives (CNRI, see https://www.cnri.reston.va.us) -in Reston, Virginia where he released several versions of the -software. - -In May 2000, Guido and the Python core development team moved to -BeOpen.com to form the BeOpen PythonLabs team. In October of the same -year, the PythonLabs team moved to Digital Creations, which became -Zope Corporation. In 2001, the Python Software Foundation (PSF, see -https://www.python.org/psf/) was formed, a non-profit organization -created specifically to own Python-related Intellectual Property. -Zope Corporation was a sponsoring member of the PSF. - -All Python releases are Open Source (see https://opensource.org for -the Open Source Definition). Historically, most, but not all, Python -releases have also been GPL-compatible; the table below summarizes -the various releases. - - Release Derived Year Owner GPL- - from compatible? (1) - - 0.9.0 thru 1.2 1991-1995 CWI yes - 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes - 1.6 1.5.2 2000 CNRI no - 2.0 1.6 2000 BeOpen.com no - 1.6.1 1.6 2001 CNRI yes (2) - 2.1 2.0+1.6.1 2001 PSF no - 2.0.1 2.0+1.6.1 2001 PSF yes - 2.1.1 2.1+2.0.1 2001 PSF yes - 2.1.2 2.1.1 2002 PSF yes - 2.1.3 2.1.2 2002 PSF yes - 2.2 and above 2.1.1 2001-now PSF yes - -Footnotes: - -(1) GPL-compatible doesn't mean that we're distributing Python under - the GPL. All Python licenses, unlike the GPL, let you distribute - a modified version without making your changes open source. The - GPL-compatible licenses make it possible to combine Python with - other software that is released under the GPL; the others don't. - -(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, - because its license has a choice of law clause. According to - CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 - is "not incompatible" with the GPL. - -Thanks to the many outside volunteers who have worked under Guido's -direction to make these releases possible. - - -B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON -=============================================================== - -Python software and documentation are licensed under the -Python Software Foundation License Version 2. - -Starting with Python 3.8.6, examples, recipes, and other code in -the documentation are dual licensed under the PSF License Version 2 -and the Zero-Clause BSD license. - -Some software incorporated into Python is under different licenses. -The licenses are listed with code falling under that license. - - -PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 --------------------------------------------- - -1. This LICENSE AGREEMENT is between the Python Software Foundation -("PSF"), and the Individual or Organization ("Licensee") accessing and -otherwise using this software ("Python") in source or binary form and -its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, PSF hereby -grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, -analyze, test, perform and/or display publicly, prepare derivative works, -distribute, and otherwise use Python alone or in any derivative version, -provided, however, that PSF's License Agreement and PSF's notice of copyright, -i.e., "Copyright (c) 2001-2024 Python Software Foundation; All Rights Reserved" -are retained in Python alone or in any derivative version prepared by Licensee. - -3. In the event Licensee prepares a derivative work that is based on -or incorporates Python or any part thereof, and wants to make -the derivative work available to others as provided herein, then -Licensee hereby agrees to include in any such work a brief summary of -the changes made to Python. - -4. PSF is making Python available to Licensee on an "AS IS" -basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, -OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -7. Nothing in this License Agreement shall be deemed to create any -relationship of agency, partnership, or joint venture between PSF and -Licensee. This License Agreement does not grant permission to use PSF -trademarks or trade name in a trademark sense to endorse or promote -products or services of Licensee, or any third party. - -8. By copying, installing or otherwise using Python, Licensee -agrees to be bound by the terms and conditions of this License -Agreement. - - -BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 -------------------------------------------- - -BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 - -1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an -office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the -Individual or Organization ("Licensee") accessing and otherwise using -this software in source or binary form and its associated -documentation ("the Software"). - -2. Subject to the terms and conditions of this BeOpen Python License -Agreement, BeOpen hereby grants Licensee a non-exclusive, -royalty-free, world-wide license to reproduce, analyze, test, perform -and/or display publicly, prepare derivative works, distribute, and -otherwise use the Software alone or in any derivative version, -provided, however, that the BeOpen Python License is retained in the -Software, alone or in any derivative version prepared by Licensee. - -3. BeOpen is making the Software available to Licensee on an "AS IS" -basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE -SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS -AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY -DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -5. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -6. This License Agreement shall be governed by and interpreted in all -respects by the law of the State of California, excluding conflict of -law provisions. Nothing in this License Agreement shall be deemed to -create any relationship of agency, partnership, or joint venture -between BeOpen and Licensee. This License Agreement does not grant -permission to use BeOpen trademarks or trade names in a trademark -sense to endorse or promote products or services of Licensee, or any -third party. As an exception, the "BeOpen Python" logos available at -http://www.pythonlabs.com/logos.html may be used according to the -permissions granted on that web page. - -7. By copying, installing or otherwise using the software, Licensee -agrees to be bound by the terms and conditions of this License -Agreement. - - -CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 ---------------------------------------- - -1. This LICENSE AGREEMENT is between the Corporation for National -Research Initiatives, having an office at 1895 Preston White Drive, -Reston, VA 20191 ("CNRI"), and the Individual or Organization -("Licensee") accessing and otherwise using Python 1.6.1 software in -source or binary form and its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, CNRI -hereby grants Licensee a nonexclusive, royalty-free, world-wide -license to reproduce, analyze, test, perform and/or display publicly, -prepare derivative works, distribute, and otherwise use Python 1.6.1 -alone or in any derivative version, provided, however, that CNRI's -License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) -1995-2001 Corporation for National Research Initiatives; All Rights -Reserved" are retained in Python 1.6.1 alone or in any derivative -version prepared by Licensee. Alternately, in lieu of CNRI's License -Agreement, Licensee may substitute the following text (omitting the -quotes): "Python 1.6.1 is made available subject to the terms and -conditions in CNRI's License Agreement. This Agreement together with -Python 1.6.1 may be located on the internet using the following -unique, persistent identifier (known as a handle): 1895.22/1013. This -Agreement may also be obtained from a proxy server on the internet -using the following URL: http://hdl.handle.net/1895.22/1013". - -3. In the event Licensee prepares a derivative work that is based on -or incorporates Python 1.6.1 or any part thereof, and wants to make -the derivative work available to others as provided herein, then -Licensee hereby agrees to include in any such work a brief summary of -the changes made to Python 1.6.1. - -4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" -basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, -OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -7. This License Agreement shall be governed by the federal -intellectual property law of the United States, including without -limitation the federal copyright law, and, to the extent such -U.S. federal law does not apply, by the law of the Commonwealth of -Virginia, excluding Virginia's conflict of law provisions. -Notwithstanding the foregoing, with regard to derivative works based -on Python 1.6.1 that incorporate non-separable material that was -previously distributed under the GNU General Public License (GPL), the -law of the Commonwealth of Virginia shall govern this License -Agreement only as to issues arising under or with respect to -Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this -License Agreement shall be deemed to create any relationship of -agency, partnership, or joint venture between CNRI and Licensee. This -License Agreement does not grant permission to use CNRI trademarks or -trade name in a trademark sense to endorse or promote products or -services of Licensee, or any third party. - -8. By clicking on the "ACCEPT" button where indicated, or by copying, -installing or otherwise using Python 1.6.1, Licensee agrees to be -bound by the terms and conditions of this License Agreement. - - ACCEPT - - -CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 --------------------------------------------------- - -Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, -The Netherlands. All rights reserved. - -Permission to use, copy, modify, and distribute this software and its -documentation for any purpose and without fee is hereby granted, -provided that the above copyright notice appear in all copies and that -both that copyright notice and this permission notice appear in -supporting documentation, and that the name of Stichting Mathematisch -Centrum or CWI not be used in advertising or publicity pertaining to -distribution of the software without specific, written prior -permission. - -STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO -THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE -FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION ----------------------------------------------------------------------- - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY -AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR -OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. - - - -Additional Conditions for this Windows binary build ---------------------------------------------------- - -This program is linked with and uses Microsoft Distributable Code, -copyrighted by Microsoft Corporation. The Microsoft Distributable Code -is embedded in each .exe, .dll and .pyd file as a result of running -the code through a linker. - -If you further distribute programs that include the Microsoft -Distributable Code, you must comply with the restrictions on -distribution specified by Microsoft. In particular, you must require -distributors and external end users to agree to terms that protect the -Microsoft Distributable Code at least as much as Microsoft's own -requirements for the Distributable Code. See Microsoft's documentation -(included in its developer tools and on its website at microsoft.com) -for specific details. - -Redistribution of the Windows binary build of the Python interpreter -complies with this agreement, provided that you do not: - -- alter any copyright, trademark or patent notice in Microsoft's -Distributable Code; - -- use Microsoft's trademarks in your programs' names or in a way that -suggests your programs come from or are endorsed by Microsoft; - -- distribute Microsoft's Distributable Code to run on a platform other -than Microsoft operating systems, run-time technologies or application -platforms; or - -- include Microsoft Distributable Code in malicious, deceptive or -unlawful programs. - -These restrictions apply only to the Microsoft Distributable Code as -defined above, not to Python itself or any programs running on the -Python interpreter. The redistribution of the Python interpreter and -libraries is governed by the Python Software License included with this -file, or by other licenses as marked. - - - --------------------------------------------------------------------------- - -This program, "bzip2", the associated library "libbzip2", and all -documentation, are copyright (C) 1996-2019 Julian R Seward. All -rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. The origin of this software must not be misrepresented; you must - not claim that you wrote the original software. If you use this - software in a product, an acknowledgment in the product - documentation would be appreciated but is not required. - -3. Altered source versions must be plainly marked as such, and must - not be misrepresented as being the original software. - -4. The name of the author may not be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE -GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Julian Seward, jseward@acm.org -bzip2/libbzip2 version 1.0.8 of 13 July 2019 - --------------------------------------------------------------------------- - -libffi - Copyright (c) 1996-2022 Anthony Green, Red Hat, Inc and others. -See source files for details. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -``Software''), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - -This software is copyrighted by the Regents of the University of -California, Sun Microsystems, Inc., Scriptics Corporation, ActiveState -Corporation and other parties. The following terms apply to all files -associated with the software unless explicitly disclaimed in -individual files. - -The authors hereby grant permission to use, copy, modify, distribute, -and license this software and its documentation for any purpose, provided -that existing copyright notices are retained in all copies and that this -notice is included verbatim in any distributions. No written agreement, -license, or royalty fee is required for any of the authorized uses. -Modifications to this software may be copyrighted by their authors -and need not follow the licensing terms described here, provided that -the new terms are clearly indicated on the first page of each file where -they apply. - -IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY -FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES -ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY -DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE -IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE -NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR -MODIFICATIONS. - -GOVERNMENT USE: If you are acquiring this software on behalf of the -U.S. government, the Government shall have only "Restricted Rights" -in the software and related documentation as defined in the Federal -Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you -are acquiring the software on behalf of the Department of Defense, the -software shall be classified as "Commercial Computer Software" and the -Government shall have only "Restricted Rights" as defined in Clause -252.227-7014 (b) (3) of DFARs. Notwithstanding the foregoing, the -authors grant the U.S. Government and others acting in its behalf -permission to use and distribute the software in accordance with the -terms specified in this license. - -This software is copyrighted by the Regents of the University of -California, Sun Microsystems, Inc., Scriptics Corporation, ActiveState -Corporation, Apple Inc. and other parties. The following terms apply to -all files associated with the software unless explicitly disclaimed in -individual files. - -The authors hereby grant permission to use, copy, modify, distribute, -and license this software and its documentation for any purpose, provided -that existing copyright notices are retained in all copies and that this -notice is included verbatim in any distributions. No written agreement, -license, or royalty fee is required for any of the authorized uses. -Modifications to this software may be copyrighted by their authors -and need not follow the licensing terms described here, provided that -the new terms are clearly indicated on the first page of each file where -they apply. - -IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY -FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES -ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY -DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE -IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE -NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR -MODIFICATIONS. - -GOVERNMENT USE: If you are acquiring this software on behalf of the -U.S. government, the Government shall have only "Restricted Rights" -in the software and related documentation as defined in the Federal -Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you -are acquiring the software on behalf of the Department of Defense, the -software shall be classified as "Commercial Computer Software" and the -Government shall have only "Restricted Rights" as defined in Clause -252.227-7013 (b) (3) of DFARs. Notwithstanding the foregoing, the -authors grant the U.S. Government and others acting in its behalf -permission to use and distribute the software in accordance with the -terms specified in this license. - diff --git a/Python313_13_x64_Template/Lib/__pycache__/__future__.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/__future__.cpython-313.pyc deleted file mode 100644 index 216e1b3b..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/__future__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/_colorize.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/_colorize.cpython-313.pyc deleted file mode 100644 index 1b54a450..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/_colorize.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/_compat_pickle.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/_compat_pickle.cpython-313.pyc deleted file mode 100644 index fdb9ffc1..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/_compat_pickle.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/_compression.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/_compression.cpython-313.pyc deleted file mode 100644 index 9ddc34ed..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/_compression.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/_markupbase.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/_markupbase.cpython-313.pyc deleted file mode 100644 index 3f396109..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/_markupbase.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/_opcode_metadata.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/_opcode_metadata.cpython-313.pyc deleted file mode 100644 index f8bea618..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/_opcode_metadata.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/_weakrefset.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/_weakrefset.cpython-313.pyc deleted file mode 100644 index fd566ac8..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/_weakrefset.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/argparse.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/argparse.cpython-313.pyc deleted file mode 100644 index dfe03f5b..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/argparse.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/ast.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/ast.cpython-313.pyc deleted file mode 100644 index b78bb174..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/ast.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/base64.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/base64.cpython-313.pyc deleted file mode 100644 index 6b38b9d2..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/base64.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/bisect.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/bisect.cpython-313.pyc deleted file mode 100644 index 19c0d207..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/bisect.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/bz2.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/bz2.cpython-313.pyc deleted file mode 100644 index 57c61171..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/bz2.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/calendar.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/calendar.cpython-313.pyc deleted file mode 100644 index 17193923..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/calendar.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/colorsys.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/colorsys.cpython-313.pyc deleted file mode 100644 index 2755c5a8..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/colorsys.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/compileall.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/compileall.cpython-313.pyc deleted file mode 100644 index 54b0ff14..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/compileall.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/configparser.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/configparser.cpython-313.pyc deleted file mode 100644 index 3b038c9a..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/configparser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/contextlib.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/contextlib.cpython-313.pyc deleted file mode 100644 index d16c1ecf..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/contextlib.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/copy.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/copy.cpython-313.pyc deleted file mode 100644 index 210b2d32..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/copy.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/copyreg.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/copyreg.cpython-313.pyc deleted file mode 100644 index ff4559fb..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/copyreg.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/csv.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/csv.cpython-313.pyc deleted file mode 100644 index 4b183b91..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/csv.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/dataclasses.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/dataclasses.cpython-313.pyc deleted file mode 100644 index f022b0a7..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/dataclasses.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/datetime.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/datetime.cpython-313.pyc deleted file mode 100644 index c9a9563b..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/datetime.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/decimal.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/decimal.cpython-313.pyc deleted file mode 100644 index f2ff2547..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/decimal.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/dis.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/dis.cpython-313.pyc deleted file mode 100644 index 33b62d9d..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/dis.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/enum.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/enum.cpython-313.pyc deleted file mode 100644 index b277453a..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/enum.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/filecmp.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/filecmp.cpython-313.pyc deleted file mode 100644 index d9bf9124..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/filecmp.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/fnmatch.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/fnmatch.cpython-313.pyc deleted file mode 100644 index 5ca91bf9..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/fnmatch.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/fractions.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/fractions.cpython-313.pyc deleted file mode 100644 index b839ee2a..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/fractions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/functools.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/functools.cpython-313.pyc deleted file mode 100644 index 9ef97688..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/functools.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/getpass.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/getpass.cpython-313.pyc deleted file mode 100644 index 39108695..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/getpass.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/gettext.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/gettext.cpython-313.pyc deleted file mode 100644 index fa2417d2..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/gettext.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/glob.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/glob.cpython-313.pyc deleted file mode 100644 index c49fc97e..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/glob.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/gzip.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/gzip.cpython-313.pyc deleted file mode 100644 index 84dc7fcc..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/gzip.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/hashlib.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/hashlib.cpython-313.pyc deleted file mode 100644 index 8699fde6..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/hashlib.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/heapq.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/heapq.cpython-313.pyc deleted file mode 100644 index 15826aed..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/heapq.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/hmac.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/hmac.cpython-313.pyc deleted file mode 100644 index 5cf74e4a..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/hmac.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/inspect.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/inspect.cpython-313.pyc deleted file mode 100644 index 088c021c..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/inspect.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/ipaddress.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/ipaddress.cpython-313.pyc deleted file mode 100644 index 7cd3e3cf..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/ipaddress.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/keyword.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/keyword.cpython-313.pyc deleted file mode 100644 index ac05e23b..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/keyword.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/linecache.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/linecache.cpython-313.pyc deleted file mode 100644 index b99abcc7..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/linecache.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/locale.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/locale.cpython-313.pyc deleted file mode 100644 index 3f3da0e2..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/locale.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/lzma.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/lzma.cpython-313.pyc deleted file mode 100644 index 572a5921..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/lzma.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/mimetypes.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/mimetypes.cpython-313.pyc deleted file mode 100644 index cea8be80..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/mimetypes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/nturl2path.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/nturl2path.cpython-313.pyc deleted file mode 100644 index ce03ddd5..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/nturl2path.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/numbers.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/numbers.cpython-313.pyc deleted file mode 100644 index 8ac9fffb..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/numbers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/opcode.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/opcode.cpython-313.pyc deleted file mode 100644 index 039cf118..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/opcode.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/operator.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/operator.cpython-313.pyc deleted file mode 100644 index f168d6b1..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/operator.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/optparse.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/optparse.cpython-313.pyc deleted file mode 100644 index 97ac3bf6..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/optparse.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/pickle.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/pickle.cpython-313.pyc deleted file mode 100644 index 93eec13a..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/pickle.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/pkgutil.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/pkgutil.cpython-313.pyc deleted file mode 100644 index 5a595c81..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/pkgutil.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/platform.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/platform.cpython-313.pyc deleted file mode 100644 index 828c6dcd..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/platform.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/py_compile.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/py_compile.cpython-313.pyc deleted file mode 100644 index b3942679..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/py_compile.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/queue.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/queue.cpython-313.pyc deleted file mode 100644 index 89662c13..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/queue.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/quopri.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/quopri.cpython-313.pyc deleted file mode 100644 index c749e21a..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/quopri.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/random.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/random.cpython-313.pyc deleted file mode 100644 index 8d9c2fa6..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/random.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/reprlib.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/reprlib.cpython-313.pyc deleted file mode 100644 index 621bd777..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/reprlib.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/selectors.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/selectors.cpython-313.pyc deleted file mode 100644 index 3e70956c..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/selectors.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/shlex.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/shlex.cpython-313.pyc deleted file mode 100644 index e6a8c033..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/shlex.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/shutil.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/shutil.cpython-313.pyc deleted file mode 100644 index 11eb838e..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/shutil.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/signal.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/signal.cpython-313.pyc deleted file mode 100644 index 86b175fe..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/signal.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/socket.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/socket.cpython-313.pyc deleted file mode 100644 index a2aab8a9..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/socket.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/socketserver.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/socketserver.cpython-313.pyc deleted file mode 100644 index 69a1bdff..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/socketserver.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/ssl.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/ssl.cpython-313.pyc deleted file mode 100644 index 1118a1e0..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/ssl.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/string.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/string.cpython-313.pyc deleted file mode 100644 index 75e997de..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/string.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/stringprep.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/stringprep.cpython-313.pyc deleted file mode 100644 index b20b9237..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/stringprep.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/struct.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/struct.cpython-313.pyc deleted file mode 100644 index 55431e83..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/struct.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/subprocess.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/subprocess.cpython-313.pyc deleted file mode 100644 index 34163bc7..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/subprocess.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/tarfile.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/tarfile.cpython-313.pyc deleted file mode 100644 index ed1f3e65..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/tarfile.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/tempfile.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/tempfile.cpython-313.pyc deleted file mode 100644 index 8cfac50d..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/tempfile.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/textwrap.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/textwrap.cpython-313.pyc deleted file mode 100644 index 7410c037..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/textwrap.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/threading.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/threading.cpython-313.pyc deleted file mode 100644 index 2d9ac2f9..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/threading.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/token.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/token.cpython-313.pyc deleted file mode 100644 index cf057f9e..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/token.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/tokenize.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/tokenize.cpython-313.pyc deleted file mode 100644 index 5b007147..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/tokenize.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/traceback.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/traceback.cpython-313.pyc deleted file mode 100644 index e958db0d..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/traceback.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/types.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/types.cpython-313.pyc deleted file mode 100644 index 70039821..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/types.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/typing.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/typing.cpython-313.pyc deleted file mode 100644 index b166f158..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/typing.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/uuid.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/uuid.cpython-313.pyc deleted file mode 100644 index 33e26b88..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/uuid.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/warnings.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/warnings.cpython-313.pyc deleted file mode 100644 index 762ddfc0..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/warnings.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/__pycache__/weakref.cpython-313.pyc b/Python313_13_x64_Template/Lib/__pycache__/weakref.cpython-313.pyc deleted file mode 100644 index e4a2c0d9..00000000 Binary files a/Python313_13_x64_Template/Lib/__pycache__/weakref.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/_collections_abc.py b/Python313_13_x64_Template/Lib/_collections_abc.py deleted file mode 100644 index 6e224d36..00000000 --- a/Python313_13_x64_Template/Lib/_collections_abc.py +++ /dev/null @@ -1,1182 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. - -Unit tests are in test_collections. -""" - -############ Maintenance notes ######################################### -# -# ABCs are different from other standard library modules in that they -# specify compliance tests. In general, once an ABC has been published, -# new methods (either abstract or concrete) cannot be added. -# -# Though classes that inherit from an ABC would automatically receive a -# new mixin method, registered classes would become non-compliant and -# violate the contract promised by ``isinstance(someobj, SomeABC)``. -# -# Though irritating, the correct procedure for adding new abstract or -# mixin methods is to create a new ABC as a subclass of the previous -# ABC. For example, union(), intersection(), and difference() cannot -# be added to Set but could go into a new ABC that extends Set. -# -# Because they are so hard to change, new ABCs should have their APIs -# carefully thought through prior to publication. -# -# Since ABCMeta only checks for the presence of methods, it is possible -# to alter the signature of a method by adding optional arguments -# or changing parameters names. This is still a bit dubious but at -# least it won't cause isinstance() to return an incorrect result. -# -# -####################################################################### - -from abc import ABCMeta, abstractmethod -import sys - -GenericAlias = type(list[int]) -EllipsisType = type(...) -def _f(): pass -FunctionType = type(_f) -del _f - -__all__ = ["Awaitable", "Coroutine", - "AsyncIterable", "AsyncIterator", "AsyncGenerator", - "Hashable", "Iterable", "Iterator", "Generator", "Reversible", - "Sized", "Container", "Callable", "Collection", - "Set", "MutableSet", - "Mapping", "MutableMapping", - "MappingView", "KeysView", "ItemsView", "ValuesView", - "Sequence", "MutableSequence", - "ByteString", "Buffer", - ] - -# This module has been renamed from collections.abc to _collections_abc to -# speed up interpreter startup. Some of the types such as MutableMapping are -# required early but collections module imports a lot of other modules. -# See issue #19218 -__name__ = "collections.abc" - -# Private list of types that we want to register with the various ABCs -# so that they will pass tests like: -# it = iter(somebytearray) -# assert isinstance(it, Iterable) -# Note: in other implementations, these types might not be distinct -# and they may have their own implementation specific types that -# are not included on this list. -bytes_iterator = type(iter(b'')) -bytearray_iterator = type(iter(bytearray())) -#callable_iterator = ??? -dict_keyiterator = type(iter({}.keys())) -dict_valueiterator = type(iter({}.values())) -dict_itemiterator = type(iter({}.items())) -list_iterator = type(iter([])) -list_reverseiterator = type(iter(reversed([]))) -range_iterator = type(iter(range(0))) -longrange_iterator = type(iter(range(1 << 1000))) -set_iterator = type(iter(set())) -str_iterator = type(iter("")) -tuple_iterator = type(iter(())) -zip_iterator = type(iter(zip())) -## views ## -dict_keys = type({}.keys()) -dict_values = type({}.values()) -dict_items = type({}.items()) -## misc ## -mappingproxy = type(type.__dict__) -def _get_framelocalsproxy(): - return type(sys._getframe().f_locals) -framelocalsproxy = _get_framelocalsproxy() -del _get_framelocalsproxy -generator = type((lambda: (yield))()) -## coroutine ## -async def _coro(): pass -_coro = _coro() -coroutine = type(_coro) -_coro.close() # Prevent ResourceWarning -del _coro -## asynchronous generator ## -async def _ag(): yield -_ag = _ag() -async_generator = type(_ag) -del _ag - - -### ONE-TRICK PONIES ### - -def _check_methods(C, *methods): - mro = C.__mro__ - for method in methods: - for B in mro: - if method in B.__dict__: - if B.__dict__[method] is None: - return NotImplemented - break - else: - return NotImplemented - return True - -class Hashable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __hash__(self): - return 0 - - @classmethod - def __subclasshook__(cls, C): - if cls is Hashable: - return _check_methods(C, "__hash__") - return NotImplemented - - -class Awaitable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __await__(self): - yield - - @classmethod - def __subclasshook__(cls, C): - if cls is Awaitable: - return _check_methods(C, "__await__") - return NotImplemented - - __class_getitem__ = classmethod(GenericAlias) - - -class Coroutine(Awaitable): - - __slots__ = () - - @abstractmethod - def send(self, value): - """Send a value into the coroutine. - Return next yielded value or raise StopIteration. - """ - raise StopIteration - - @abstractmethod - def throw(self, typ, val=None, tb=None): - """Raise an exception in the coroutine. - Return next yielded value or raise StopIteration. - """ - if val is None: - if tb is None: - raise typ - val = typ() - if tb is not None: - val = val.with_traceback(tb) - raise val - - def close(self): - """Raise GeneratorExit inside coroutine. - """ - try: - self.throw(GeneratorExit) - except (GeneratorExit, StopIteration): - pass - else: - raise RuntimeError("coroutine ignored GeneratorExit") - - @classmethod - def __subclasshook__(cls, C): - if cls is Coroutine: - return _check_methods(C, '__await__', 'send', 'throw', 'close') - return NotImplemented - - -Coroutine.register(coroutine) - - -class AsyncIterable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __aiter__(self): - return AsyncIterator() - - @classmethod - def __subclasshook__(cls, C): - if cls is AsyncIterable: - return _check_methods(C, "__aiter__") - return NotImplemented - - __class_getitem__ = classmethod(GenericAlias) - - -class AsyncIterator(AsyncIterable): - - __slots__ = () - - @abstractmethod - async def __anext__(self): - """Return the next item or raise StopAsyncIteration when exhausted.""" - raise StopAsyncIteration - - def __aiter__(self): - return self - - @classmethod - def __subclasshook__(cls, C): - if cls is AsyncIterator: - return _check_methods(C, "__anext__", "__aiter__") - return NotImplemented - - -class AsyncGenerator(AsyncIterator): - - __slots__ = () - - async def __anext__(self): - """Return the next item from the asynchronous generator. - When exhausted, raise StopAsyncIteration. - """ - return await self.asend(None) - - @abstractmethod - async def asend(self, value): - """Send a value into the asynchronous generator. - Return next yielded value or raise StopAsyncIteration. - """ - raise StopAsyncIteration - - @abstractmethod - async def athrow(self, typ, val=None, tb=None): - """Raise an exception in the asynchronous generator. - Return next yielded value or raise StopAsyncIteration. - """ - if val is None: - if tb is None: - raise typ - val = typ() - if tb is not None: - val = val.with_traceback(tb) - raise val - - async def aclose(self): - """Raise GeneratorExit inside coroutine. - """ - try: - await self.athrow(GeneratorExit) - except (GeneratorExit, StopAsyncIteration): - pass - else: - raise RuntimeError("asynchronous generator ignored GeneratorExit") - - @classmethod - def __subclasshook__(cls, C): - if cls is AsyncGenerator: - return _check_methods(C, '__aiter__', '__anext__', - 'asend', 'athrow', 'aclose') - return NotImplemented - - -AsyncGenerator.register(async_generator) - - -class Iterable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __iter__(self): - while False: - yield None - - @classmethod - def __subclasshook__(cls, C): - if cls is Iterable: - return _check_methods(C, "__iter__") - return NotImplemented - - __class_getitem__ = classmethod(GenericAlias) - - -class Iterator(Iterable): - - __slots__ = () - - @abstractmethod - def __next__(self): - 'Return the next item from the iterator. When exhausted, raise StopIteration' - raise StopIteration - - def __iter__(self): - return self - - @classmethod - def __subclasshook__(cls, C): - if cls is Iterator: - return _check_methods(C, '__iter__', '__next__') - return NotImplemented - - -Iterator.register(bytes_iterator) -Iterator.register(bytearray_iterator) -#Iterator.register(callable_iterator) -Iterator.register(dict_keyiterator) -Iterator.register(dict_valueiterator) -Iterator.register(dict_itemiterator) -Iterator.register(list_iterator) -Iterator.register(list_reverseiterator) -Iterator.register(range_iterator) -Iterator.register(longrange_iterator) -Iterator.register(set_iterator) -Iterator.register(str_iterator) -Iterator.register(tuple_iterator) -Iterator.register(zip_iterator) - - -class Reversible(Iterable): - - __slots__ = () - - @abstractmethod - def __reversed__(self): - while False: - yield None - - @classmethod - def __subclasshook__(cls, C): - if cls is Reversible: - return _check_methods(C, "__reversed__", "__iter__") - return NotImplemented - - -class Generator(Iterator): - - __slots__ = () - - def __next__(self): - """Return the next item from the generator. - When exhausted, raise StopIteration. - """ - return self.send(None) - - @abstractmethod - def send(self, value): - """Send a value into the generator. - Return next yielded value or raise StopIteration. - """ - raise StopIteration - - @abstractmethod - def throw(self, typ, val=None, tb=None): - """Raise an exception in the generator. - Return next yielded value or raise StopIteration. - """ - if val is None: - if tb is None: - raise typ - val = typ() - if tb is not None: - val = val.with_traceback(tb) - raise val - - def close(self): - """Raise GeneratorExit inside generator. - """ - try: - self.throw(GeneratorExit) - except (GeneratorExit, StopIteration): - pass - else: - raise RuntimeError("generator ignored GeneratorExit") - - @classmethod - def __subclasshook__(cls, C): - if cls is Generator: - return _check_methods(C, '__iter__', '__next__', - 'send', 'throw', 'close') - return NotImplemented - - -Generator.register(generator) - - -class Sized(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __len__(self): - return 0 - - @classmethod - def __subclasshook__(cls, C): - if cls is Sized: - return _check_methods(C, "__len__") - return NotImplemented - - -class Container(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __contains__(self, x): - return False - - @classmethod - def __subclasshook__(cls, C): - if cls is Container: - return _check_methods(C, "__contains__") - return NotImplemented - - __class_getitem__ = classmethod(GenericAlias) - - -class Collection(Sized, Iterable, Container): - - __slots__ = () - - @classmethod - def __subclasshook__(cls, C): - if cls is Collection: - return _check_methods(C, "__len__", "__iter__", "__contains__") - return NotImplemented - - -class Buffer(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __buffer__(self, flags: int, /) -> memoryview: - raise NotImplementedError - - @classmethod - def __subclasshook__(cls, C): - if cls is Buffer: - return _check_methods(C, "__buffer__") - return NotImplemented - - -class _CallableGenericAlias(GenericAlias): - """ Represent `Callable[argtypes, resulttype]`. - - This sets ``__args__`` to a tuple containing the flattened ``argtypes`` - followed by ``resulttype``. - - Example: ``Callable[[int, str], float]`` sets ``__args__`` to - ``(int, str, float)``. - """ - - __slots__ = () - - def __new__(cls, origin, args): - if not (isinstance(args, tuple) and len(args) == 2): - raise TypeError( - "Callable must be used as Callable[[arg, ...], result].") - t_args, t_result = args - if isinstance(t_args, (tuple, list)): - args = (*t_args, t_result) - elif not _is_param_expr(t_args): - raise TypeError(f"Expected a list of types, an ellipsis, " - f"ParamSpec, or Concatenate. Got {t_args}") - return super().__new__(cls, origin, args) - - def __repr__(self): - if len(self.__args__) == 2 and _is_param_expr(self.__args__[0]): - return super().__repr__() - return (f'collections.abc.Callable' - f'[[{", ".join([_type_repr(a) for a in self.__args__[:-1]])}], ' - f'{_type_repr(self.__args__[-1])}]') - - def __reduce__(self): - args = self.__args__ - if not (len(args) == 2 and _is_param_expr(args[0])): - args = list(args[:-1]), args[-1] - return _CallableGenericAlias, (Callable, args) - - def __getitem__(self, item): - # Called during TypeVar substitution, returns the custom subclass - # rather than the default types.GenericAlias object. Most of the - # code is copied from typing's _GenericAlias and the builtin - # types.GenericAlias. - if not isinstance(item, tuple): - item = (item,) - - new_args = super().__getitem__(item).__args__ - - # args[0] occurs due to things like Z[[int, str, bool]] from PEP 612 - if not isinstance(new_args[0], (tuple, list)): - t_result = new_args[-1] - t_args = new_args[:-1] - new_args = (t_args, t_result) - return _CallableGenericAlias(Callable, tuple(new_args)) - -def _is_param_expr(obj): - """Checks if obj matches either a list of types, ``...``, ``ParamSpec`` or - ``_ConcatenateGenericAlias`` from typing.py - """ - if obj is Ellipsis: - return True - if isinstance(obj, list): - return True - obj = type(obj) - names = ('ParamSpec', '_ConcatenateGenericAlias') - return obj.__module__ == 'typing' and any(obj.__name__ == name for name in names) - -def _type_repr(obj): - """Return the repr() of an object, special-casing types (internal helper). - - Copied from :mod:`typing` since collections.abc - shouldn't depend on that module. - (Keep this roughly in sync with the typing version.) - """ - if isinstance(obj, type): - if obj.__module__ == 'builtins': - return obj.__qualname__ - return f'{obj.__module__}.{obj.__qualname__}' - if obj is Ellipsis: - return '...' - if isinstance(obj, FunctionType): - return obj.__name__ - return repr(obj) - - -class Callable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __call__(self, *args, **kwds): - return False - - @classmethod - def __subclasshook__(cls, C): - if cls is Callable: - return _check_methods(C, "__call__") - return NotImplemented - - __class_getitem__ = classmethod(_CallableGenericAlias) - - -### SETS ### - - -class Set(Collection): - """A set is a finite, iterable container. - - This class provides concrete generic implementations of all - methods except for __contains__, __iter__ and __len__. - - To override the comparisons (presumably for speed, as the - semantics are fixed), redefine __le__ and __ge__, - then the other operations will automatically follow suit. - """ - - __slots__ = () - - def __le__(self, other): - if not isinstance(other, Set): - return NotImplemented - if len(self) > len(other): - return False - for elem in self: - if elem not in other: - return False - return True - - def __lt__(self, other): - if not isinstance(other, Set): - return NotImplemented - return len(self) < len(other) and self.__le__(other) - - def __gt__(self, other): - if not isinstance(other, Set): - return NotImplemented - return len(self) > len(other) and self.__ge__(other) - - def __ge__(self, other): - if not isinstance(other, Set): - return NotImplemented - if len(self) < len(other): - return False - for elem in other: - if elem not in self: - return False - return True - - def __eq__(self, other): - if not isinstance(other, Set): - return NotImplemented - return len(self) == len(other) and self.__le__(other) - - @classmethod - def _from_iterable(cls, it): - '''Construct an instance of the class from any iterable input. - - Must override this method if the class constructor signature - does not accept an iterable for an input. - ''' - return cls(it) - - def __and__(self, other): - if not isinstance(other, Iterable): - return NotImplemented - return self._from_iterable(value for value in other if value in self) - - __rand__ = __and__ - - def isdisjoint(self, other): - 'Return True if two sets have a null intersection.' - for value in other: - if value in self: - return False - return True - - def __or__(self, other): - if not isinstance(other, Iterable): - return NotImplemented - chain = (e for s in (self, other) for e in s) - return self._from_iterable(chain) - - __ror__ = __or__ - - def __sub__(self, other): - if not isinstance(other, Set): - if not isinstance(other, Iterable): - return NotImplemented - other = self._from_iterable(other) - return self._from_iterable(value for value in self - if value not in other) - - def __rsub__(self, other): - if not isinstance(other, Set): - if not isinstance(other, Iterable): - return NotImplemented - other = self._from_iterable(other) - return self._from_iterable(value for value in other - if value not in self) - - def __xor__(self, other): - if not isinstance(other, Set): - if not isinstance(other, Iterable): - return NotImplemented - other = self._from_iterable(other) - return (self - other) | (other - self) - - __rxor__ = __xor__ - - def _hash(self): - """Compute the hash value of a set. - - Note that we don't define __hash__: not all sets are hashable. - But if you define a hashable set type, its __hash__ should - call this function. - - This must be compatible __eq__. - - All sets ought to compare equal if they contain the same - elements, regardless of how they are implemented, and - regardless of the order of the elements; so there's not much - freedom for __eq__ or __hash__. We match the algorithm used - by the built-in frozenset type. - """ - MAX = sys.maxsize - MASK = 2 * MAX + 1 - n = len(self) - h = 1927868237 * (n + 1) - h &= MASK - for x in self: - hx = hash(x) - h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 - h &= MASK - h ^= (h >> 11) ^ (h >> 25) - h = h * 69069 + 907133923 - h &= MASK - if h > MAX: - h -= MASK + 1 - if h == -1: - h = 590923713 - return h - - -Set.register(frozenset) - - -class MutableSet(Set): - """A mutable set is a finite, iterable container. - - This class provides concrete generic implementations of all - methods except for __contains__, __iter__, __len__, - add(), and discard(). - - To override the comparisons (presumably for speed, as the - semantics are fixed), all you have to do is redefine __le__ and - then the other operations will automatically follow suit. - """ - - __slots__ = () - - @abstractmethod - def add(self, value): - """Add an element.""" - raise NotImplementedError - - @abstractmethod - def discard(self, value): - """Remove an element. Do not raise an exception if absent.""" - raise NotImplementedError - - def remove(self, value): - """Remove an element. If not a member, raise a KeyError.""" - if value not in self: - raise KeyError(value) - self.discard(value) - - def pop(self): - """Return the popped value. Raise KeyError if empty.""" - it = iter(self) - try: - value = next(it) - except StopIteration: - raise KeyError from None - self.discard(value) - return value - - def clear(self): - """This is slow (creates N new iterators!) but effective.""" - try: - while True: - self.pop() - except KeyError: - pass - - def __ior__(self, it): - for value in it: - self.add(value) - return self - - def __iand__(self, it): - for value in (self - it): - self.discard(value) - return self - - def __ixor__(self, it): - if it is self: - self.clear() - else: - if not isinstance(it, Set): - it = self._from_iterable(it) - for value in it: - if value in self: - self.discard(value) - else: - self.add(value) - return self - - def __isub__(self, it): - if it is self: - self.clear() - else: - for value in it: - self.discard(value) - return self - - -MutableSet.register(set) - - -### MAPPINGS ### - -class Mapping(Collection): - """A Mapping is a generic container for associating key/value - pairs. - - This class provides concrete generic implementations of all - methods except for __getitem__, __iter__, and __len__. - """ - - __slots__ = () - - # Tell ABCMeta.__new__ that this class should have TPFLAGS_MAPPING set. - __abc_tpflags__ = 1 << 6 # Py_TPFLAGS_MAPPING - - @abstractmethod - def __getitem__(self, key): - raise KeyError - - def get(self, key, default=None): - 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.' - try: - return self[key] - except KeyError: - return default - - def __contains__(self, key): - try: - self[key] - except KeyError: - return False - else: - return True - - def keys(self): - "D.keys() -> a set-like object providing a view on D's keys" - return KeysView(self) - - def items(self): - "D.items() -> a set-like object providing a view on D's items" - return ItemsView(self) - - def values(self): - "D.values() -> an object providing a view on D's values" - return ValuesView(self) - - def __eq__(self, other): - if not isinstance(other, Mapping): - return NotImplemented - return dict(self.items()) == dict(other.items()) - - __reversed__ = None - -Mapping.register(mappingproxy) -Mapping.register(framelocalsproxy) - - -class MappingView(Sized): - - __slots__ = '_mapping', - - def __init__(self, mapping): - self._mapping = mapping - - def __len__(self): - return len(self._mapping) - - def __repr__(self): - return '{0.__class__.__name__}({0._mapping!r})'.format(self) - - __class_getitem__ = classmethod(GenericAlias) - - -class KeysView(MappingView, Set): - - __slots__ = () - - @classmethod - def _from_iterable(cls, it): - return set(it) - - def __contains__(self, key): - return key in self._mapping - - def __iter__(self): - yield from self._mapping - - -KeysView.register(dict_keys) - - -class ItemsView(MappingView, Set): - - __slots__ = () - - @classmethod - def _from_iterable(cls, it): - return set(it) - - def __contains__(self, item): - key, value = item - try: - v = self._mapping[key] - except KeyError: - return False - else: - return v is value or v == value - - def __iter__(self): - for key in self._mapping: - yield (key, self._mapping[key]) - - -ItemsView.register(dict_items) - - -class ValuesView(MappingView, Collection): - - __slots__ = () - - def __contains__(self, value): - for key in self._mapping: - v = self._mapping[key] - if v is value or v == value: - return True - return False - - def __iter__(self): - for key in self._mapping: - yield self._mapping[key] - - -ValuesView.register(dict_values) - - -class MutableMapping(Mapping): - """A MutableMapping is a generic container for associating - key/value pairs. - - This class provides concrete generic implementations of all - methods except for __getitem__, __setitem__, __delitem__, - __iter__, and __len__. - """ - - __slots__ = () - - @abstractmethod - def __setitem__(self, key, value): - raise KeyError - - @abstractmethod - def __delitem__(self, key): - raise KeyError - - __marker = object() - - def pop(self, key, default=__marker): - '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. - If key is not found, d is returned if given, otherwise KeyError is raised. - ''' - try: - value = self[key] - except KeyError: - if default is self.__marker: - raise - return default - else: - del self[key] - return value - - def popitem(self): - '''D.popitem() -> (k, v), remove and return some (key, value) pair - as a 2-tuple; but raise KeyError if D is empty. - ''' - try: - key = next(iter(self)) - except StopIteration: - raise KeyError from None - value = self[key] - del self[key] - return key, value - - def clear(self): - 'D.clear() -> None. Remove all items from D.' - try: - while True: - self.popitem() - except KeyError: - pass - - def update(self, other=(), /, **kwds): - ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. - If E present and has a .keys() method, does: for k in E.keys(): D[k] = E[k] - If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v - In either case, this is followed by: for k, v in F.items(): D[k] = v - ''' - if isinstance(other, Mapping): - for key in other: - self[key] = other[key] - elif hasattr(other, "keys"): - for key in other.keys(): - self[key] = other[key] - else: - for key, value in other: - self[key] = value - for key, value in kwds.items(): - self[key] = value - - def setdefault(self, key, default=None): - 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D' - try: - return self[key] - except KeyError: - self[key] = default - return default - - -MutableMapping.register(dict) - - -### SEQUENCES ### - -class Sequence(Reversible, Collection): - """All the operations on a read-only sequence. - - Concrete subclasses must override __new__ or __init__, - __getitem__, and __len__. - """ - - __slots__ = () - - # Tell ABCMeta.__new__ that this class should have TPFLAGS_SEQUENCE set. - __abc_tpflags__ = 1 << 5 # Py_TPFLAGS_SEQUENCE - - @abstractmethod - def __getitem__(self, index): - raise IndexError - - def __iter__(self): - i = 0 - try: - while True: - v = self[i] - yield v - i += 1 - except IndexError: - return - - def __contains__(self, value): - for v in self: - if v is value or v == value: - return True - return False - - def __reversed__(self): - for i in reversed(range(len(self))): - yield self[i] - - def index(self, value, start=0, stop=None): - '''S.index(value, [start, [stop]]) -> integer -- return first index of value. - Raises ValueError if the value is not present. - - Supporting start and stop arguments is optional, but - recommended. - ''' - if start is not None and start < 0: - start = max(len(self) + start, 0) - if stop is not None and stop < 0: - stop += len(self) - - i = start - while stop is None or i < stop: - try: - v = self[i] - except IndexError: - break - if v is value or v == value: - return i - i += 1 - raise ValueError - - def count(self, value): - 'S.count(value) -> integer -- return number of occurrences of value' - return sum(1 for v in self if v is value or v == value) - -Sequence.register(tuple) -Sequence.register(str) -Sequence.register(range) -Sequence.register(memoryview) - -class _DeprecateByteStringMeta(ABCMeta): - def __new__(cls, name, bases, namespace, **kwargs): - if name != "ByteString": - import warnings - - warnings._deprecated( - "collections.abc.ByteString", - remove=(3, 17), - ) - return super().__new__(cls, name, bases, namespace, **kwargs) - - def __instancecheck__(cls, instance): - import warnings - - warnings._deprecated( - "collections.abc.ByteString", - remove=(3, 17), - ) - return super().__instancecheck__(instance) - -class ByteString(Sequence, metaclass=_DeprecateByteStringMeta): - """Deprecated ABC serving as a common supertype of ``bytes`` and ``bytearray``. - - This ABC is scheduled for removal in Python 3.17. - Use ``isinstance(obj, collections.abc.Buffer)`` to test if ``obj`` - implements the buffer protocol at runtime. For use in type annotations, - either use ``Buffer`` or a union that explicitly specifies the types your - code supports (e.g., ``bytes | bytearray | memoryview``). - """ - - __slots__ = () - -ByteString.register(bytes) -ByteString.register(bytearray) - - -class MutableSequence(Sequence): - """All the operations on a read-write sequence. - - Concrete subclasses must provide __new__ or __init__, - __getitem__, __setitem__, __delitem__, __len__, and insert(). - """ - - __slots__ = () - - @abstractmethod - def __setitem__(self, index, value): - raise IndexError - - @abstractmethod - def __delitem__(self, index): - raise IndexError - - @abstractmethod - def insert(self, index, value): - 'S.insert(index, value) -- insert value before index' - raise IndexError - - def append(self, value): - 'S.append(value) -- append value to the end of the sequence' - self.insert(len(self), value) - - def clear(self): - 'S.clear() -> None -- remove all items from S' - try: - while True: - self.pop() - except IndexError: - pass - - def reverse(self): - 'S.reverse() -- reverse *IN PLACE*' - n = len(self) - for i in range(n//2): - self[i], self[n-i-1] = self[n-i-1], self[i] - - def extend(self, values): - 'S.extend(iterable) -- extend sequence by appending elements from the iterable' - if values is self: - values = list(values) - for v in values: - self.append(v) - - def pop(self, index=-1): - '''S.pop([index]) -> item -- remove and return item at index (default last). - Raise IndexError if list is empty or index is out of range. - ''' - v = self[index] - del self[index] - return v - - def remove(self, value): - '''S.remove(value) -- remove first occurrence of value. - Raise ValueError if the value is not present. - ''' - del self[self.index(value)] - - def __iadd__(self, values): - self.extend(values) - return self - - -MutableSequence.register(list) -MutableSequence.register(bytearray) # Multiply inheriting, see ByteString diff --git a/Python313_13_x64_Template/Lib/_colorize.py b/Python313_13_x64_Template/Lib/_colorize.py deleted file mode 100644 index 8263d2df..00000000 --- a/Python313_13_x64_Template/Lib/_colorize.py +++ /dev/null @@ -1,119 +0,0 @@ -from __future__ import annotations -import os -import sys - -COLORIZE = True - -# types -if False: - from typing import IO - - -class ANSIColors: - RESET = "\x1b[0m" - - BLACK = "\x1b[30m" - BLUE = "\x1b[34m" - CYAN = "\x1b[36m" - GREEN = "\x1b[32m" - MAGENTA = "\x1b[35m" - RED = "\x1b[31m" - WHITE = "\x1b[37m" # more like LIGHT GRAY - YELLOW = "\x1b[33m" - - BOLD_BLACK = "\x1b[1;30m" # DARK GRAY - BOLD_BLUE = "\x1b[1;34m" - BOLD_CYAN = "\x1b[1;36m" - BOLD_GREEN = "\x1b[1;32m" - BOLD_MAGENTA = "\x1b[1;35m" - BOLD_RED = "\x1b[1;31m" - BOLD_WHITE = "\x1b[1;37m" # actual WHITE - BOLD_YELLOW = "\x1b[1;33m" - - # intense = like bold but without being bold - INTENSE_BLACK = "\x1b[90m" - INTENSE_BLUE = "\x1b[94m" - INTENSE_CYAN = "\x1b[96m" - INTENSE_GREEN = "\x1b[92m" - INTENSE_MAGENTA = "\x1b[95m" - INTENSE_RED = "\x1b[91m" - INTENSE_WHITE = "\x1b[97m" - INTENSE_YELLOW = "\x1b[93m" - - BACKGROUND_BLACK = "\x1b[40m" - BACKGROUND_BLUE = "\x1b[44m" - BACKGROUND_CYAN = "\x1b[46m" - BACKGROUND_GREEN = "\x1b[42m" - BACKGROUND_MAGENTA = "\x1b[45m" - BACKGROUND_RED = "\x1b[41m" - BACKGROUND_WHITE = "\x1b[47m" - BACKGROUND_YELLOW = "\x1b[43m" - - INTENSE_BACKGROUND_BLACK = "\x1b[100m" - INTENSE_BACKGROUND_BLUE = "\x1b[104m" - INTENSE_BACKGROUND_CYAN = "\x1b[106m" - INTENSE_BACKGROUND_GREEN = "\x1b[102m" - INTENSE_BACKGROUND_MAGENTA = "\x1b[105m" - INTENSE_BACKGROUND_RED = "\x1b[101m" - INTENSE_BACKGROUND_WHITE = "\x1b[107m" - INTENSE_BACKGROUND_YELLOW = "\x1b[103m" - - -NoColors = ANSIColors() - -for attr in dir(NoColors): - if not attr.startswith("__"): - setattr(NoColors, attr, "") - - -def get_colors( - colorize: bool = False, *, file: IO[str] | IO[bytes] | None = None -) -> ANSIColors: - if colorize or can_colorize(file=file): - return ANSIColors() - else: - return NoColors - - -def can_colorize(*, file: IO[str] | IO[bytes] | None = None) -> bool: - - def _safe_getenv(k: str, fallback: str | None = None) -> str | None: - """Exception-safe environment retrieval. See gh-128636.""" - try: - return os.environ.get(k, fallback) - except Exception: - return fallback - - if file is None: - file = sys.stdout - - if not sys.flags.ignore_environment: - if _safe_getenv("PYTHON_COLORS") == "0": - return False - if _safe_getenv("PYTHON_COLORS") == "1": - return True - if _safe_getenv("NO_COLOR"): - return False - if not COLORIZE: - return False - if _safe_getenv("FORCE_COLOR"): - return True - if _safe_getenv("TERM") == "dumb": - return False - - if not hasattr(file, "fileno"): - return False - - if sys.platform == "win32": - try: - import nt - - if not nt._supports_virtual_terminal(): - return False - except (ImportError, AttributeError): - return False - - try: - return os.isatty(file.fileno()) - except OSError: - return hasattr(file, "isatty") and file.isatty() diff --git a/Python313_13_x64_Template/Lib/_compression.py b/Python313_13_x64_Template/Lib/_compression.py deleted file mode 100644 index e8b70aa0..00000000 --- a/Python313_13_x64_Template/Lib/_compression.py +++ /dev/null @@ -1,162 +0,0 @@ -"""Internal classes used by the gzip, lzma and bz2 modules""" - -import io -import sys - -BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE # Compressed data read chunk size - - -class BaseStream(io.BufferedIOBase): - """Mode-checking helper functions.""" - - def _check_not_closed(self): - if self.closed: - raise ValueError("I/O operation on closed file") - - def _check_can_read(self): - if not self.readable(): - raise io.UnsupportedOperation("File not open for reading") - - def _check_can_write(self): - if not self.writable(): - raise io.UnsupportedOperation("File not open for writing") - - def _check_can_seek(self): - if not self.readable(): - raise io.UnsupportedOperation("Seeking is only supported " - "on files open for reading") - if not self.seekable(): - raise io.UnsupportedOperation("The underlying file object " - "does not support seeking") - - -class DecompressReader(io.RawIOBase): - """Adapts the decompressor API to a RawIOBase reader API""" - - def readable(self): - return True - - def __init__(self, fp, decomp_factory, trailing_error=(), **decomp_args): - self._fp = fp - self._eof = False - self._pos = 0 # Current offset in decompressed stream - - # Set to size of decompressed stream once it is known, for SEEK_END - self._size = -1 - - # Save the decompressor factory and arguments. - # If the file contains multiple compressed streams, each - # stream will need a separate decompressor object. A new decompressor - # object is also needed when implementing a backwards seek(). - self._decomp_factory = decomp_factory - self._decomp_args = decomp_args - self._decompressor = self._decomp_factory(**self._decomp_args) - - # Exception class to catch from decompressor signifying invalid - # trailing data to ignore - self._trailing_error = trailing_error - - def close(self): - self._decompressor = None - return super().close() - - def seekable(self): - return self._fp.seekable() - - def readinto(self, b): - with memoryview(b) as view, view.cast("B") as byte_view: - data = self.read(len(byte_view)) - byte_view[:len(data)] = data - return len(data) - - def read(self, size=-1): - if size < 0: - return self.readall() - - if not size or self._eof: - return b"" - data = None # Default if EOF is encountered - # Depending on the input data, our call to the decompressor may not - # return any data. In this case, try again after reading another block. - while True: - if self._decompressor.eof: - rawblock = (self._decompressor.unused_data or - self._fp.read(BUFFER_SIZE)) - if not rawblock: - break - # Continue to next stream. - self._decompressor = self._decomp_factory( - **self._decomp_args) - try: - data = self._decompressor.decompress(rawblock, size) - except self._trailing_error: - # Trailing data isn't a valid compressed stream; ignore it. - break - else: - if self._decompressor.needs_input: - rawblock = self._fp.read(BUFFER_SIZE) - if not rawblock: - raise EOFError("Compressed file ended before the " - "end-of-stream marker was reached") - else: - rawblock = b"" - data = self._decompressor.decompress(rawblock, size) - if data: - break - if not data: - self._eof = True - self._size = self._pos - return b"" - self._pos += len(data) - return data - - def readall(self): - chunks = [] - # sys.maxsize means the max length of output buffer is unlimited, - # so that the whole input buffer can be decompressed within one - # .decompress() call. - while data := self.read(sys.maxsize): - chunks.append(data) - - return b"".join(chunks) - - # Rewind the file to the beginning of the data stream. - def _rewind(self): - self._fp.seek(0) - self._eof = False - self._pos = 0 - self._decompressor = self._decomp_factory(**self._decomp_args) - - def seek(self, offset, whence=io.SEEK_SET): - # Recalculate offset as an absolute file position. - if whence == io.SEEK_SET: - pass - elif whence == io.SEEK_CUR: - offset = self._pos + offset - elif whence == io.SEEK_END: - # Seeking relative to EOF - we need to know the file's size. - if self._size < 0: - while self.read(io.DEFAULT_BUFFER_SIZE): - pass - offset = self._size + offset - else: - raise ValueError("Invalid value for whence: {}".format(whence)) - - # Make it so that offset is the number of bytes to skip forward. - if offset < self._pos: - self._rewind() - else: - offset -= self._pos - - # Read and discard data until we reach the desired position. - while offset > 0: - data = self.read(min(io.DEFAULT_BUFFER_SIZE, offset)) - if not data: - break - offset -= len(data) - - return self._pos - - def tell(self): - """Return the current file position.""" - return self._pos diff --git a/Python313_13_x64_Template/Lib/_markupbase.py b/Python313_13_x64_Template/Lib/_markupbase.py deleted file mode 100644 index 3ad7e279..00000000 --- a/Python313_13_x64_Template/Lib/_markupbase.py +++ /dev/null @@ -1,396 +0,0 @@ -"""Shared support for scanning document type declarations in HTML and XHTML. - -This module is used as a foundation for the html.parser module. It has no -documented public API and should not be used directly. - -""" - -import re - -_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match -_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match -_commentclose = re.compile(r'--\s*>') -_markedsectionclose = re.compile(r']\s*]\s*>') - -# An analysis of the MS-Word extensions is available at -# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf - -_msmarkedsectionclose = re.compile(r']\s*>') - -del re - - -class ParserBase: - """Parser base class which provides some common support methods used - by the SGML/HTML and XHTML parsers.""" - - def __init__(self): - if self.__class__ is ParserBase: - raise RuntimeError( - "_markupbase.ParserBase must be subclassed") - - def reset(self): - self.lineno = 1 - self.offset = 0 - - def getpos(self): - """Return current line number and offset.""" - return self.lineno, self.offset - - # Internal -- update line number and offset. This should be - # called for each piece of data exactly once, in order -- in other - # words the concatenation of all the input strings to this - # function should be exactly the entire input. - def updatepos(self, i, j): - if i >= j: - return j - rawdata = self.rawdata - nlines = rawdata.count("\n", i, j) - if nlines: - self.lineno = self.lineno + nlines - pos = rawdata.rindex("\n", i, j) # Should not fail - self.offset = j-(pos+1) - else: - self.offset = self.offset + j-i - return j - - _decl_otherchars = '' - - # Internal -- parse declaration (for use by subclasses). - def parse_declaration(self, i): - # This is some sort of declaration; in "HTML as - # deployed," this should only be the document type - # declaration (""). - # ISO 8879:1986, however, has more complex - # declaration syntax for elements in , including: - # --comment-- - # [marked section] - # name in the following list: ENTITY, DOCTYPE, ELEMENT, - # ATTLIST, NOTATION, SHORTREF, USEMAP, - # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM - rawdata = self.rawdata - j = i + 2 - assert rawdata[i:j] == "": - # the empty comment - return j + 1 - if rawdata[j:j+1] in ("-", ""): - # Start of comment followed by buffer boundary, - # or just a buffer boundary. - return -1 - # A simple, practical version could look like: ((name|stringlit) S*) + '>' - n = len(rawdata) - if rawdata[j:j+2] == '--': #comment - # Locate --.*-- as the body of the comment - return self.parse_comment(i) - elif rawdata[j] == '[': #marked section - # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section - # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA - # Note that this is extended by Microsoft Office "Save as Web" function - # to include [if...] and [endif]. - return self.parse_marked_section(i) - else: #all other declaration elements - decltype, j = self._scan_name(j, i) - if j < 0: - return j - if decltype == "doctype": - self._decl_otherchars = '' - while j < n: - c = rawdata[j] - if c == ">": - # end of declaration syntax - data = rawdata[i+2:j] - if decltype == "doctype": - self.handle_decl(data) - else: - # According to the HTML5 specs sections "8.2.4.44 Bogus - # comment state" and "8.2.4.45 Markup declaration open - # state", a comment token should be emitted. - # Calling unknown_decl provides more flexibility though. - self.unknown_decl(data) - return j + 1 - if c in "\"'": - m = _declstringlit_match(rawdata, j) - if not m: - return -1 # incomplete - j = m.end() - elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": - name, j = self._scan_name(j, i) - elif c in self._decl_otherchars: - j = j + 1 - elif c == "[": - # this could be handled in a separate doctype parser - if decltype == "doctype": - j = self._parse_doctype_subset(j + 1, i) - elif decltype in {"attlist", "linktype", "link", "element"}: - # must tolerate []'d groups in a content model in an element declaration - # also in data attribute specifications of attlist declaration - # also link type declaration subsets in linktype declarations - # also link attribute specification lists in link declarations - raise AssertionError("unsupported '[' char in %s declaration" % decltype) - else: - raise AssertionError("unexpected '[' char in declaration") - else: - raise AssertionError("unexpected %r char in declaration" % rawdata[j]) - if j < 0: - return j - return -1 # incomplete - - # Internal -- parse a marked section - # Override this to handle MS-word extension syntax content - def parse_marked_section(self, i, report=1): - rawdata= self.rawdata - assert rawdata[i:i+3] == ' ending - match= _markedsectionclose.search(rawdata, i+3) - elif sectName in {"if", "else", "endif"}: - # look for MS Office ]> ending - match= _msmarkedsectionclose.search(rawdata, i+3) - else: - raise AssertionError( - 'unknown status keyword %r in marked section' % rawdata[i+3:j] - ) - if not match: - return -1 - if report: - j = match.start(0) - self.unknown_decl(rawdata[i+3: j]) - return match.end(0) - - # Internal -- parse comment, return length or -1 if not terminated - def parse_comment(self, i, report=1): - rawdata = self.rawdata - if rawdata[i:i+4] != '') - else: - fields.append(' ') - # Column: Opcode name - fields.append(instr.opname.ljust(_OPNAME_WIDTH)) - # Column: Opcode argument - if instr.arg is not None: - arg = repr(instr.arg) - # If opname is longer than _OPNAME_WIDTH, we allow it to overflow into - # the space reserved for oparg. This results in fewer misaligned opargs - # in the disassembly output. - opname_excess = max(0, len(instr.opname) - _OPNAME_WIDTH) - fields.append(repr(instr.arg).rjust(_OPARG_WIDTH - opname_excess)) - # Column: Opcode argument details - if instr.argrepr: - fields.append('(' + instr.argrepr + ')') - print(' '.join(fields).rstrip(), file=self.file) - - def print_exception_table(self, exception_entries): - file = self.file - if exception_entries: - print("ExceptionTable:", file=file) - for entry in exception_entries: - lasti = " lasti" if entry.lasti else "" - start = entry.start_label - end = entry.end_label - target = entry.target_label - print(f" L{start} to L{end} -> L{target} [{entry.depth}]{lasti}", file=file) - - -class ArgResolver: - def __init__(self, co_consts=None, names=None, varname_from_oparg=None, labels_map=None): - self.co_consts = co_consts - self.names = names - self.varname_from_oparg = varname_from_oparg - self.labels_map = labels_map or {} - - def offset_from_jump_arg(self, op, arg, offset): - deop = _deoptop(op) - if deop in hasjabs: - return arg * 2 - elif deop in hasjrel: - signed_arg = -arg if _is_backward_jump(deop) else arg - argval = offset + 2 + signed_arg*2 - caches = _get_cache_size(_all_opname[deop]) - argval += 2 * caches - return argval - return None - - def get_label_for_offset(self, offset): - return self.labels_map.get(offset, None) - - def get_argval_argrepr(self, op, arg, offset): - get_name = None if self.names is None else self.names.__getitem__ - argval = None - argrepr = '' - deop = _deoptop(op) - if arg is not None: - # Set argval to the dereferenced value of the argument when - # available, and argrepr to the string representation of argval. - # _disassemble_bytes needs the string repr of the - # raw name index for LOAD_GLOBAL, LOAD_CONST, etc. - argval = arg - if deop in hasconst: - argval, argrepr = _get_const_info(deop, arg, self.co_consts) - elif deop in hasname: - if deop == LOAD_GLOBAL: - argval, argrepr = _get_name_info(arg//2, get_name) - if (arg & 1) and argrepr: - argrepr = f"{argrepr} + NULL" - elif deop == LOAD_ATTR: - argval, argrepr = _get_name_info(arg//2, get_name) - if (arg & 1) and argrepr: - argrepr = f"{argrepr} + NULL|self" - elif deop == LOAD_SUPER_ATTR: - argval, argrepr = _get_name_info(arg//4, get_name) - if (arg & 1) and argrepr: - argrepr = f"{argrepr} + NULL|self" - else: - argval, argrepr = _get_name_info(arg, get_name) - elif deop in hasjump or deop in hasexc: - argval = self.offset_from_jump_arg(op, arg, offset) - lbl = self.get_label_for_offset(argval) - assert lbl is not None - argrepr = f"to L{lbl}" - elif deop in (LOAD_FAST_LOAD_FAST, STORE_FAST_LOAD_FAST, STORE_FAST_STORE_FAST): - arg1 = arg >> 4 - arg2 = arg & 15 - val1, argrepr1 = _get_name_info(arg1, self.varname_from_oparg) - val2, argrepr2 = _get_name_info(arg2, self.varname_from_oparg) - argrepr = argrepr1 + ", " + argrepr2 - argval = val1, val2 - elif deop in haslocal or deop in hasfree: - argval, argrepr = _get_name_info(arg, self.varname_from_oparg) - elif deop in hascompare: - argval = cmp_op[arg >> 5] - argrepr = argval - if arg & 16: - argrepr = f"bool({argrepr})" - elif deop == CONVERT_VALUE: - argval = (None, str, repr, ascii)[arg] - argrepr = ('', 'str', 'repr', 'ascii')[arg] - elif deop == SET_FUNCTION_ATTRIBUTE: - argrepr = ', '.join(s for i, s in enumerate(FUNCTION_ATTR_FLAGS) - if arg & (1<> 1 - lasti = bool(dl&1) - entries.append(_ExceptionTableEntry(start, end, target, depth, lasti)) - except StopIteration: - return entries - -def _is_backward_jump(op): - return opname[op] in ('JUMP_BACKWARD', - 'JUMP_BACKWARD_NO_INTERRUPT') - -def _get_instructions_bytes(code, linestarts=None, line_offset=0, co_positions=None, - original_code=None, arg_resolver=None): - """Iterate over the instructions in a bytecode string. - - Generates a sequence of Instruction namedtuples giving the details of each - opcode. - - """ - # Use the basic, unadaptive code for finding labels and actually walking the - # bytecode, since replacements like ENTER_EXECUTOR and INSTRUMENTED_* can - # mess that logic up pretty badly: - original_code = original_code or code - co_positions = co_positions or iter(()) - - starts_line = False - local_line_number = None - line_number = None - for offset, start_offset, op, arg in _unpack_opargs(original_code): - if linestarts is not None: - starts_line = offset in linestarts - if starts_line: - local_line_number = linestarts[offset] - if local_line_number is not None: - line_number = local_line_number + line_offset - else: - line_number = None - positions = Positions(*next(co_positions, ())) - deop = _deoptop(op) - op = code[offset] - - if arg_resolver: - argval, argrepr = arg_resolver.get_argval_argrepr(op, arg, offset) - else: - argval, argrepr = arg, repr(arg) - - caches = _get_cache_size(_all_opname[deop]) - # Advance the co_positions iterator: - for _ in range(caches): - next(co_positions, ()) - - if caches: - cache_info = [] - for name, size in _cache_format[opname[deop]].items(): - data = code[offset + 2: offset + 2 + 2 * size] - cache_info.append((name, size, data)) - else: - cache_info = None - - label = arg_resolver.get_label_for_offset(offset) if arg_resolver else None - yield Instruction(_all_opname[op], op, arg, argval, argrepr, - offset, start_offset, starts_line, line_number, - label, positions, cache_info) - - -def disassemble(co, lasti=-1, *, file=None, show_caches=False, adaptive=False, - show_offsets=False): - """Disassemble a code object.""" - linestarts = dict(findlinestarts(co)) - exception_entries = _parse_exception_table(co) - labels_map = _make_labels_map(co.co_code, exception_entries=exception_entries) - label_width = 4 + len(str(len(labels_map))) - formatter = Formatter(file=file, - lineno_width=_get_lineno_width(linestarts), - offset_width=len(str(max(len(co.co_code) - 2, 9999))) if show_offsets else 0, - label_width=label_width, - show_caches=show_caches) - arg_resolver = ArgResolver(co_consts=co.co_consts, - names=co.co_names, - varname_from_oparg=co._varname_from_oparg, - labels_map=labels_map) - _disassemble_bytes(_get_code_array(co, adaptive), lasti, linestarts, - exception_entries=exception_entries, co_positions=co.co_positions(), - original_code=co.co_code, arg_resolver=arg_resolver, formatter=formatter) - -def _disassemble_recursive(co, *, file=None, depth=None, show_caches=False, adaptive=False, show_offsets=False): - disassemble(co, file=file, show_caches=show_caches, adaptive=adaptive, show_offsets=show_offsets) - if depth is None or depth > 0: - if depth is not None: - depth = depth - 1 - for x in co.co_consts: - if hasattr(x, 'co_code'): - print(file=file) - print("Disassembly of %r:" % (x,), file=file) - _disassemble_recursive( - x, file=file, depth=depth, show_caches=show_caches, - adaptive=adaptive, show_offsets=show_offsets - ) - - -def _make_labels_map(original_code, exception_entries=()): - jump_targets = set(findlabels(original_code)) - labels = set(jump_targets) - for start, end, target, _, _ in exception_entries: - labels.add(start) - labels.add(end) - labels.add(target) - labels = sorted(labels) - labels_map = {offset: i+1 for (i, offset) in enumerate(sorted(labels))} - for e in exception_entries: - e.start_label = labels_map[e.start] - e.end_label = labels_map[e.end] - e.target_label = labels_map[e.target] - return labels_map - -_NO_LINENO = ' --' - -def _get_lineno_width(linestarts): - if linestarts is None: - return 0 - maxlineno = max(filter(None, linestarts.values()), default=-1) - if maxlineno == -1: - # Omit the line number column entirely if we have no line number info - return 0 - lineno_width = max(3, len(str(maxlineno))) - if lineno_width < len(_NO_LINENO) and None in linestarts.values(): - lineno_width = len(_NO_LINENO) - return lineno_width - - -def _disassemble_bytes(code, lasti=-1, linestarts=None, - *, line_offset=0, exception_entries=(), - co_positions=None, original_code=None, - arg_resolver=None, formatter=None): - - assert formatter is not None - assert arg_resolver is not None - - instrs = _get_instructions_bytes(code, linestarts=linestarts, - line_offset=line_offset, - co_positions=co_positions, - original_code=original_code, - arg_resolver=arg_resolver) - - print_instructions(instrs, exception_entries, formatter, lasti=lasti) - - -def print_instructions(instrs, exception_entries, formatter, lasti=-1): - for instr in instrs: - # Each CACHE takes 2 bytes - is_current_instr = instr.offset <= lasti \ - <= instr.offset + 2 * _get_cache_size(_all_opname[_deoptop(instr.opcode)]) - formatter.print_instruction(instr, is_current_instr) - - formatter.print_exception_table(exception_entries) - -def _disassemble_str(source, **kwargs): - """Compile the source string, then disassemble the code object.""" - _disassemble_recursive(_try_compile(source, ''), **kwargs) - -disco = disassemble # XXX For backwards compatibility - - -# Rely on C `int` being 32 bits for oparg -_INT_BITS = 32 -# Value for c int when it overflows -_INT_OVERFLOW = 2 ** (_INT_BITS - 1) - -def _unpack_opargs(code): - extended_arg = 0 - extended_args_offset = 0 # Number of EXTENDED_ARG instructions preceding the current instruction - caches = 0 - for i in range(0, len(code), 2): - # Skip inline CACHE entries: - if caches: - caches -= 1 - continue - op = code[i] - deop = _deoptop(op) - caches = _get_cache_size(_all_opname[deop]) - if deop in hasarg: - arg = code[i+1] | extended_arg - extended_arg = (arg << 8) if deop == EXTENDED_ARG else 0 - # The oparg is stored as a signed integer - # If the value exceeds its upper limit, it will overflow and wrap - # to a negative integer - if extended_arg >= _INT_OVERFLOW: - extended_arg -= 2 * _INT_OVERFLOW - else: - arg = None - extended_arg = 0 - if deop == EXTENDED_ARG: - extended_args_offset += 1 - yield (i, i, op, arg) - else: - start_offset = i - extended_args_offset*2 - yield (i, start_offset, op, arg) - extended_args_offset = 0 - -def findlabels(code): - """Detect all offsets in a byte code which are jump targets. - - Return the list of offsets. - - """ - labels = [] - for offset, _, op, arg in _unpack_opargs(code): - if arg is not None: - label = _get_jump_target(op, arg, offset) - if label is None: - continue - if label not in labels: - labels.append(label) - return labels - -def findlinestarts(code): - """Find the offsets in a byte code which are start of lines in the source. - - Generate pairs (offset, lineno) - lineno will be an integer or None the offset does not have a source line. - """ - - lastline = False # None is a valid line number - for start, end, line in code.co_lines(): - if line is not lastline: - lastline = line - yield start, line - return - -def _find_imports(co): - """Find import statements in the code - - Generate triplets (name, level, fromlist) where - name is the imported module and level, fromlist are - the corresponding args to __import__. - """ - IMPORT_NAME = opmap['IMPORT_NAME'] - - consts = co.co_consts - names = co.co_names - opargs = [(op, arg) for _, _, op, arg in _unpack_opargs(co.co_code) - if op != EXTENDED_ARG] - for i, (op, oparg) in enumerate(opargs): - if op == IMPORT_NAME and i >= 2: - from_op = opargs[i-1] - level_op = opargs[i-2] - if (from_op[0] in hasconst and level_op[0] in hasconst): - level = _get_const_value(level_op[0], level_op[1], consts) - fromlist = _get_const_value(from_op[0], from_op[1], consts) - yield (names[oparg], level, fromlist) - -def _find_store_names(co): - """Find names of variables which are written in the code - - Generate sequence of strings - """ - STORE_OPS = { - opmap['STORE_NAME'], - opmap['STORE_GLOBAL'] - } - - names = co.co_names - for _, _, op, arg in _unpack_opargs(co.co_code): - if op in STORE_OPS: - yield names[arg] - - -class Bytecode: - """The bytecode operations of a piece of code - - Instantiate this with a function, method, other compiled object, string of - code, or a code object (as returned by compile()). - - Iterating over this yields the bytecode operations as Instruction instances. - """ - def __init__(self, x, *, first_line=None, current_offset=None, show_caches=False, adaptive=False, show_offsets=False): - self.codeobj = co = _get_code_object(x) - if first_line is None: - self.first_line = co.co_firstlineno - self._line_offset = 0 - else: - self.first_line = first_line - self._line_offset = first_line - co.co_firstlineno - self._linestarts = dict(findlinestarts(co)) - self._original_object = x - self.current_offset = current_offset - self.exception_entries = _parse_exception_table(co) - self.show_caches = show_caches - self.adaptive = adaptive - self.show_offsets = show_offsets - - def __iter__(self): - co = self.codeobj - original_code = co.co_code - labels_map = _make_labels_map(original_code, self.exception_entries) - arg_resolver = ArgResolver(co_consts=co.co_consts, - names=co.co_names, - varname_from_oparg=co._varname_from_oparg, - labels_map=labels_map) - return _get_instructions_bytes(_get_code_array(co, self.adaptive), - linestarts=self._linestarts, - line_offset=self._line_offset, - co_positions=co.co_positions(), - original_code=original_code, - arg_resolver=arg_resolver) - - def __repr__(self): - return "{}({!r})".format(self.__class__.__name__, - self._original_object) - - @classmethod - def from_traceback(cls, tb, *, show_caches=False, adaptive=False): - """ Construct a Bytecode from the given traceback """ - while tb.tb_next: - tb = tb.tb_next - return cls( - tb.tb_frame.f_code, current_offset=tb.tb_lasti, show_caches=show_caches, adaptive=adaptive - ) - - def info(self): - """Return formatted information about the code object.""" - return _format_code_info(self.codeobj) - - def dis(self): - """Return a formatted view of the bytecode operations.""" - co = self.codeobj - if self.current_offset is not None: - offset = self.current_offset - else: - offset = -1 - with io.StringIO() as output: - code = _get_code_array(co, self.adaptive) - offset_width = len(str(max(len(code) - 2, 9999))) if self.show_offsets else 0 - - - labels_map = _make_labels_map(co.co_code, self.exception_entries) - label_width = 4 + len(str(len(labels_map))) - formatter = Formatter(file=output, - lineno_width=_get_lineno_width(self._linestarts), - offset_width=offset_width, - label_width=label_width, - line_offset=self._line_offset, - show_caches=self.show_caches) - - arg_resolver = ArgResolver(co_consts=co.co_consts, - names=co.co_names, - varname_from_oparg=co._varname_from_oparg, - labels_map=labels_map) - _disassemble_bytes(code, - linestarts=self._linestarts, - line_offset=self._line_offset, - lasti=offset, - exception_entries=self.exception_entries, - co_positions=co.co_positions(), - original_code=co.co_code, - arg_resolver=arg_resolver, - formatter=formatter) - return output.getvalue() - - -def main(args=None): - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument('-C', '--show-caches', action='store_true', - help='show inline caches') - parser.add_argument('-O', '--show-offsets', action='store_true', - help='show instruction offsets') - parser.add_argument('infile', nargs='?', default='-') - args = parser.parse_args(args=args) - if args.infile == '-': - name = '' - source = sys.stdin.buffer.read() - else: - name = args.infile - with open(args.infile, 'rb') as infile: - source = infile.read() - code = compile(source, name, "exec") - dis(code, show_caches=args.show_caches, show_offsets=args.show_offsets) - -if __name__ == "__main__": - main() diff --git a/Python313_13_x64_Template/Lib/doctest.py b/Python313_13_x64_Template/Lib/doctest.py deleted file mode 100644 index ecac54ad..00000000 --- a/Python313_13_x64_Template/Lib/doctest.py +++ /dev/null @@ -1,2919 +0,0 @@ -# Module doctest. -# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org). -# Major enhancements and refactoring by: -# Jim Fulton -# Edward Loper - -# Provided as-is; use at your own risk; no warranty; no promises; enjoy! - -r"""Module doctest -- a framework for running examples in docstrings. - -In simplest use, end each module M to be tested with: - -def _test(): - import doctest - doctest.testmod() - -if __name__ == "__main__": - _test() - -Then running the module as a script will cause the examples in the -docstrings to get executed and verified: - -python M.py - -This won't display anything unless an example fails, in which case the -failing example(s) and the cause(s) of the failure(s) are printed to stdout -(why not stderr? because stderr is a lame hack <0.2 wink>), and the final -line of output is "Test failed.". - -Run it with the -v switch instead: - -python M.py -v - -and a detailed report of all examples tried is printed to stdout, along -with assorted summaries at the end. - -You can force verbose mode by passing "verbose=True" to testmod, or prohibit -it by passing "verbose=False". In either of those cases, sys.argv is not -examined by testmod. - -There are a variety of other ways to run doctests, including integration -with the unittest framework, and support for running non-Python text -files containing doctests. There are also many ways to override parts -of doctest's default behaviors. See the Library Reference Manual for -details. -""" - -__docformat__ = 'reStructuredText en' - -__all__ = [ - # 0, Option Flags - 'register_optionflag', - 'DONT_ACCEPT_TRUE_FOR_1', - 'DONT_ACCEPT_BLANKLINE', - 'NORMALIZE_WHITESPACE', - 'ELLIPSIS', - 'SKIP', - 'IGNORE_EXCEPTION_DETAIL', - 'COMPARISON_FLAGS', - 'REPORT_UDIFF', - 'REPORT_CDIFF', - 'REPORT_NDIFF', - 'REPORT_ONLY_FIRST_FAILURE', - 'REPORTING_FLAGS', - 'FAIL_FAST', - # 1. Utility Functions - # 2. Example & DocTest - 'Example', - 'DocTest', - # 3. Doctest Parser - 'DocTestParser', - # 4. Doctest Finder - 'DocTestFinder', - # 5. Doctest Runner - 'DocTestRunner', - 'OutputChecker', - 'DocTestFailure', - 'UnexpectedException', - 'DebugRunner', - # 6. Test Functions - 'testmod', - 'testfile', - 'run_docstring_examples', - # 7. Unittest Support - 'DocTestSuite', - 'DocFileSuite', - 'set_unittest_reportflags', - # 8. Debugging Support - 'script_from_examples', - 'testsource', - 'debug_src', - 'debug', -] - -import __future__ -import difflib -import functools -import inspect -import linecache -import os -import pdb -import re -import sys -import traceback -import unittest -from io import StringIO, IncrementalNewlineDecoder -from collections import namedtuple -import _colorize # Used in doctests -from _colorize import ANSIColors, can_colorize - - -class TestResults(namedtuple('TestResults', 'failed attempted')): - def __new__(cls, failed, attempted, *, skipped=0): - results = super().__new__(cls, failed, attempted) - results.skipped = skipped - return results - - def __repr__(self): - if self.skipped: - return (f'TestResults(failed={self.failed}, ' - f'attempted={self.attempted}, ' - f'skipped={self.skipped})') - else: - # Leave the repr() unchanged for backward compatibility - # if skipped is zero - return super().__repr__() - - -# There are 4 basic classes: -# - Example: a pair, plus an intra-docstring line number. -# - DocTest: a collection of examples, parsed from a docstring, plus -# info about where the docstring came from (name, filename, lineno). -# - DocTestFinder: extracts DocTests from a given object's docstring and -# its contained objects' docstrings. -# - DocTestRunner: runs DocTest cases, and accumulates statistics. -# -# So the basic picture is: -# -# list of: -# +------+ +---------+ +-------+ -# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results| -# +------+ +---------+ +-------+ -# | Example | -# | ... | -# | Example | -# +---------+ - -# Option constants. - -OPTIONFLAGS_BY_NAME = {} -def register_optionflag(name): - # Create a new flag unless `name` is already known. - return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME)) - -DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1') -DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE') -NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE') -ELLIPSIS = register_optionflag('ELLIPSIS') -SKIP = register_optionflag('SKIP') -IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL') - -COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 | - DONT_ACCEPT_BLANKLINE | - NORMALIZE_WHITESPACE | - ELLIPSIS | - SKIP | - IGNORE_EXCEPTION_DETAIL) - -REPORT_UDIFF = register_optionflag('REPORT_UDIFF') -REPORT_CDIFF = register_optionflag('REPORT_CDIFF') -REPORT_NDIFF = register_optionflag('REPORT_NDIFF') -REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE') -FAIL_FAST = register_optionflag('FAIL_FAST') - -REPORTING_FLAGS = (REPORT_UDIFF | - REPORT_CDIFF | - REPORT_NDIFF | - REPORT_ONLY_FIRST_FAILURE | - FAIL_FAST) - -# Special string markers for use in `want` strings: -BLANKLINE_MARKER = '' -ELLIPSIS_MARKER = '...' - -###################################################################### -## Table of Contents -###################################################################### -# 1. Utility Functions -# 2. Example & DocTest -- store test cases -# 3. DocTest Parser -- extracts examples from strings -# 4. DocTest Finder -- extracts test cases from objects -# 5. DocTest Runner -- runs test cases -# 6. Test Functions -- convenient wrappers for testing -# 7. Unittest Support -# 8. Debugging Support -# 9. Example Usage - -###################################################################### -## 1. Utility Functions -###################################################################### - -def _extract_future_flags(globs): - """ - Return the compiler-flags associated with the future features that - have been imported into the given namespace (globs). - """ - flags = 0 - for fname in __future__.all_feature_names: - feature = globs.get(fname, None) - if feature is getattr(__future__, fname): - flags |= feature.compiler_flag - return flags - -def _normalize_module(module, depth=2): - """ - Return the module specified by `module`. In particular: - - If `module` is a module, then return module. - - If `module` is a string, then import and return the - module with that name. - - If `module` is None, then return the calling module. - The calling module is assumed to be the module of - the stack frame at the given depth in the call stack. - """ - if inspect.ismodule(module): - return module - elif isinstance(module, str): - return __import__(module, globals(), locals(), ["*"]) - elif module is None: - try: - try: - return sys.modules[sys._getframemodulename(depth)] - except AttributeError: - return sys.modules[sys._getframe(depth).f_globals['__name__']] - except KeyError: - pass - else: - raise TypeError("Expected a module, string, or None") - -def _newline_convert(data): - # The IO module provides a handy decoder for universal newline conversion - return IncrementalNewlineDecoder(None, True).decode(data, True) - -def _load_testfile(filename, package, module_relative, encoding): - if module_relative: - package = _normalize_module(package, 3) - filename = _module_relative_path(package, filename) - if (loader := getattr(package, '__loader__', None)) is None: - try: - loader = package.__spec__.loader - except AttributeError: - pass - if hasattr(loader, 'get_data'): - file_contents = loader.get_data(filename) - file_contents = file_contents.decode(encoding) - # get_data() opens files as 'rb', so one must do the equivalent - # conversion as universal newlines would do. - return _newline_convert(file_contents), filename - with open(filename, encoding=encoding) as f: - return f.read(), filename - -def _indent(s, indent=4): - """ - Add the given number of space characters to the beginning of - every non-blank line in `s`, and return the result. - """ - # This regexp matches the start of non-blank lines: - return re.sub('(?m)^(?!$)', indent*' ', s) - -def _exception_traceback(exc_info): - """ - Return a string containing a traceback message for the given - exc_info tuple (as returned by sys.exc_info()). - """ - # Get a traceback message. - excout = StringIO() - exc_type, exc_val, exc_tb = exc_info - traceback.print_exception(exc_type, exc_val, exc_tb, file=excout) - return excout.getvalue() - -# Override some StringIO methods. -class _SpoofOut(StringIO): - def getvalue(self): - result = StringIO.getvalue(self) - # If anything at all was written, make sure there's a trailing - # newline. There's no way for the expected output to indicate - # that a trailing newline is missing. - if result and not result.endswith("\n"): - result += "\n" - return result - - def truncate(self, size=None): - self.seek(size) - StringIO.truncate(self) - -# Worst-case linear-time ellipsis matching. -def _ellipsis_match(want, got): - """ - Essentially the only subtle case: - >>> _ellipsis_match('aa...aa', 'aaa') - False - """ - if ELLIPSIS_MARKER not in want: - return want == got - - # Find "the real" strings. - ws = want.split(ELLIPSIS_MARKER) - assert len(ws) >= 2 - - # Deal with exact matches possibly needed at one or both ends. - startpos, endpos = 0, len(got) - w = ws[0] - if w: # starts with exact match - if got.startswith(w): - startpos = len(w) - del ws[0] - else: - return False - w = ws[-1] - if w: # ends with exact match - if got.endswith(w): - endpos -= len(w) - del ws[-1] - else: - return False - - if startpos > endpos: - # Exact end matches required more characters than we have, as in - # _ellipsis_match('aa...aa', 'aaa') - return False - - # For the rest, we only need to find the leftmost non-overlapping - # match for each piece. If there's no overall match that way alone, - # there's no overall match period. - for w in ws: - # w may be '' at times, if there are consecutive ellipses, or - # due to an ellipsis at the start or end of `want`. That's OK. - # Search for an empty string succeeds, and doesn't change startpos. - startpos = got.find(w, startpos, endpos) - if startpos < 0: - return False - startpos += len(w) - - return True - -def _comment_line(line): - "Return a commented form of the given line" - line = line.rstrip() - if line: - return '# '+line - else: - return '#' - -def _strip_exception_details(msg): - # Support for IGNORE_EXCEPTION_DETAIL. - # Get rid of everything except the exception name; in particular, drop - # the possibly dotted module path (if any) and the exception message (if - # any). We assume that a colon is never part of a dotted name, or of an - # exception name. - # E.g., given - # "foo.bar.MyError: la di da" - # return "MyError" - # Or for "abc.def" or "abc.def:\n" return "def". - - start, end = 0, len(msg) - # The exception name must appear on the first line. - i = msg.find("\n") - if i >= 0: - end = i - # retain up to the first colon (if any) - i = msg.find(':', 0, end) - if i >= 0: - end = i - # retain just the exception name - i = msg.rfind('.', 0, end) - if i >= 0: - start = i+1 - return msg[start: end] - -class _OutputRedirectingPdb(pdb.Pdb): - """ - A specialized version of the python debugger that redirects stdout - to a given stream when interacting with the user. Stdout is *not* - redirected when traced code is executed. - """ - def __init__(self, out): - self.__out = out - self.__debugger_used = False - # do not play signal games in the pdb - pdb.Pdb.__init__(self, stdout=out, nosigint=True) - # still use input() to get user input - self.use_rawinput = 1 - - def set_trace(self, frame=None): - self.__debugger_used = True - if frame is None: - frame = sys._getframe().f_back - pdb.Pdb.set_trace(self, frame) - - def set_continue(self): - # Calling set_continue unconditionally would break unit test - # coverage reporting, as Bdb.set_continue calls sys.settrace(None). - if self.__debugger_used: - pdb.Pdb.set_continue(self) - - def trace_dispatch(self, *args): - # Redirect stdout to the given stream. - save_stdout = sys.stdout - sys.stdout = self.__out - # Call Pdb's trace dispatch method. - try: - return pdb.Pdb.trace_dispatch(self, *args) - finally: - sys.stdout = save_stdout - -# [XX] Normalize with respect to os.path.pardir? -def _module_relative_path(module, test_path): - if not inspect.ismodule(module): - raise TypeError('Expected a module: %r' % module) - if test_path.startswith('/'): - raise ValueError('Module-relative files may not have absolute paths') - - # Normalize the path. On Windows, replace "/" with "\". - test_path = os.path.join(*(test_path.split('/'))) - - # Find the base directory for the path. - if hasattr(module, '__file__'): - # A normal module/package - basedir = os.path.split(module.__file__)[0] - elif module.__name__ == '__main__': - # An interactive session. - if len(sys.argv)>0 and sys.argv[0] != '': - basedir = os.path.split(sys.argv[0])[0] - else: - basedir = os.curdir - else: - if hasattr(module, '__path__'): - for directory in module.__path__: - fullpath = os.path.join(directory, test_path) - if os.path.exists(fullpath): - return fullpath - - # A module w/o __file__ (this includes builtins) - raise ValueError("Can't resolve paths relative to the module " - "%r (it has no __file__)" - % module.__name__) - - # Combine the base directory and the test path. - return os.path.join(basedir, test_path) - -###################################################################### -## 2. Example & DocTest -###################################################################### -## - An "example" is a pair, where "source" is a -## fragment of source code, and "want" is the expected output for -## "source." The Example class also includes information about -## where the example was extracted from. -## -## - A "doctest" is a collection of examples, typically extracted from -## a string (such as an object's docstring). The DocTest class also -## includes information about where the string was extracted from. - -class Example: - """ - A single doctest example, consisting of source code and expected - output. `Example` defines the following attributes: - - - source: A single Python statement, always ending with a newline. - The constructor adds a newline if needed. - - - want: The expected output from running the source code (either - from stdout, or a traceback in case of exception). `want` ends - with a newline unless it's empty, in which case it's an empty - string. The constructor adds a newline if needed. - - - exc_msg: The exception message generated by the example, if - the example is expected to generate an exception; or `None` if - it is not expected to generate an exception. This exception - message is compared against the return value of - `traceback.format_exception_only()`. `exc_msg` ends with a - newline unless it's `None`. The constructor adds a newline - if needed. - - - lineno: The line number within the DocTest string containing - this Example where the Example begins. This line number is - zero-based, with respect to the beginning of the DocTest. - - - indent: The example's indentation in the DocTest string. - I.e., the number of space characters that precede the - example's first prompt. - - - options: A dictionary mapping from option flags to True or - False, which is used to override default options for this - example. Any option flags not contained in this dictionary - are left at their default value (as specified by the - DocTestRunner's optionflags). By default, no options are set. - """ - def __init__(self, source, want, exc_msg=None, lineno=0, indent=0, - options=None): - # Normalize inputs. - if not source.endswith('\n'): - source += '\n' - if want and not want.endswith('\n'): - want += '\n' - if exc_msg is not None and not exc_msg.endswith('\n'): - exc_msg += '\n' - # Store properties. - self.source = source - self.want = want - self.lineno = lineno - self.indent = indent - if options is None: options = {} - self.options = options - self.exc_msg = exc_msg - - def __eq__(self, other): - if type(self) is not type(other): - return NotImplemented - - return self.source == other.source and \ - self.want == other.want and \ - self.lineno == other.lineno and \ - self.indent == other.indent and \ - self.options == other.options and \ - self.exc_msg == other.exc_msg - - def __hash__(self): - return hash((self.source, self.want, self.lineno, self.indent, - self.exc_msg)) - -class DocTest: - """ - A collection of doctest examples that should be run in a single - namespace. Each `DocTest` defines the following attributes: - - - examples: the list of examples. - - - globs: The namespace (aka globals) that the examples should - be run in. - - - name: A name identifying the DocTest (typically, the name of - the object whose docstring this DocTest was extracted from). - - - filename: The name of the file that this DocTest was extracted - from, or `None` if the filename is unknown. - - - lineno: The line number within filename where this DocTest - begins, or `None` if the line number is unavailable. This - line number is zero-based, with respect to the beginning of - the file. - - - docstring: The string that the examples were extracted from, - or `None` if the string is unavailable. - """ - def __init__(self, examples, globs, name, filename, lineno, docstring): - """ - Create a new DocTest containing the given examples. The - DocTest's globals are initialized with a copy of `globs`. - """ - assert not isinstance(examples, str), \ - "DocTest no longer accepts str; use DocTestParser instead" - self.examples = examples - self.docstring = docstring - self.globs = globs.copy() - self.name = name - self.filename = filename - self.lineno = lineno - - def __repr__(self): - if len(self.examples) == 0: - examples = 'no examples' - elif len(self.examples) == 1: - examples = '1 example' - else: - examples = '%d examples' % len(self.examples) - return ('<%s %s from %s:%s (%s)>' % - (self.__class__.__name__, - self.name, self.filename, self.lineno, examples)) - - def __eq__(self, other): - if type(self) is not type(other): - return NotImplemented - - return self.examples == other.examples and \ - self.docstring == other.docstring and \ - self.globs == other.globs and \ - self.name == other.name and \ - self.filename == other.filename and \ - self.lineno == other.lineno - - def __hash__(self): - return hash((self.docstring, self.name, self.filename, self.lineno)) - - # This lets us sort tests by name: - def __lt__(self, other): - if not isinstance(other, DocTest): - return NotImplemented - self_lno = self.lineno if self.lineno is not None else -1 - other_lno = other.lineno if other.lineno is not None else -1 - return ((self.name, self.filename, self_lno, id(self)) - < - (other.name, other.filename, other_lno, id(other))) - -###################################################################### -## 3. DocTestParser -###################################################################### - -class DocTestParser: - """ - A class used to parse strings containing doctest examples. - """ - # This regular expression is used to find doctest examples in a - # string. It defines three groups: `source` is the source code - # (including leading indentation and prompts); `indent` is the - # indentation of the first (PS1) line of the source code; and - # `want` is the expected output (including leading indentation). - _EXAMPLE_RE = re.compile(r''' - # Source consists of a PS1 line followed by zero or more PS2 lines. - (?P - (?:^(?P [ ]*) >>> .*) # PS1 line - (?:\n [ ]* \.\.\. .*)*) # PS2 lines - \n? - # Want consists of any non-blank lines that do not start with PS1. - (?P (?:(?![ ]*$) # Not a blank line - (?![ ]*>>>) # Not a line starting with PS1 - .+$\n? # But any other line - )*) - ''', re.MULTILINE | re.VERBOSE) - - # A regular expression for handling `want` strings that contain - # expected exceptions. It divides `want` into three pieces: - # - the traceback header line (`hdr`) - # - the traceback stack (`stack`) - # - the exception message (`msg`), as generated by - # traceback.format_exception_only() - # `msg` may have multiple lines. We assume/require that the - # exception message is the first non-indented line starting with a word - # character following the traceback header line. - _EXCEPTION_RE = re.compile(r""" - # Grab the traceback header. Different versions of Python have - # said different things on the first traceback line. - ^(?P Traceback\ \( - (?: most\ recent\ call\ last - | innermost\ last - ) \) : - ) - \s* $ # toss trailing whitespace on the header. - (?P .*?) # don't blink: absorb stuff until... - ^ (?P \w+ .*) # a line *starts* with alphanum. - """, re.VERBOSE | re.MULTILINE | re.DOTALL) - - # A callable returning a true value iff its argument is a blank line - # or contains a single comment. - _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match - - def parse(self, string, name=''): - """ - Divide the given string into examples and intervening text, - and return them as a list of alternating Examples and strings. - Line numbers for the Examples are 0-based. The optional - argument `name` is a name identifying this string, and is only - used for error messages. - """ - string = string.expandtabs() - # If all lines begin with the same indentation, then strip it. - min_indent = self._min_indent(string) - if min_indent > 0: - string = '\n'.join([l[min_indent:] for l in string.split('\n')]) - - output = [] - charno, lineno = 0, 0 - # Find all doctest examples in the string: - for m in self._EXAMPLE_RE.finditer(string): - # Add the pre-example text to `output`. - output.append(string[charno:m.start()]) - # Update lineno (lines before this example) - lineno += string.count('\n', charno, m.start()) - # Extract info from the regexp match. - (source, options, want, exc_msg) = \ - self._parse_example(m, name, lineno) - # Create an Example, and add it to the list. - if not self._IS_BLANK_OR_COMMENT(source): - output.append( Example(source, want, exc_msg, - lineno=lineno, - indent=min_indent+len(m.group('indent')), - options=options) ) - # Update lineno (lines inside this example) - lineno += string.count('\n', m.start(), m.end()) - # Update charno. - charno = m.end() - # Add any remaining post-example text to `output`. - output.append(string[charno:]) - return output - - def get_doctest(self, string, globs, name, filename, lineno): - """ - Extract all doctest examples from the given string, and - collect them into a `DocTest` object. - - `globs`, `name`, `filename`, and `lineno` are attributes for - the new `DocTest` object. See the documentation for `DocTest` - for more information. - """ - return DocTest(self.get_examples(string, name), globs, - name, filename, lineno, string) - - def get_examples(self, string, name=''): - """ - Extract all doctest examples from the given string, and return - them as a list of `Example` objects. Line numbers are - 0-based, because it's most common in doctests that nothing - interesting appears on the same line as opening triple-quote, - and so the first interesting line is called \"line 1\" then. - - The optional argument `name` is a name identifying this - string, and is only used for error messages. - """ - return [x for x in self.parse(string, name) - if isinstance(x, Example)] - - def _parse_example(self, m, name, lineno): - """ - Given a regular expression match from `_EXAMPLE_RE` (`m`), - return a pair `(source, want)`, where `source` is the matched - example's source code (with prompts and indentation stripped); - and `want` is the example's expected output (with indentation - stripped). - - `name` is the string's name, and `lineno` is the line number - where the example starts; both are used for error messages. - """ - # Get the example's indentation level. - indent = len(m.group('indent')) - - # Divide source into lines; check that they're properly - # indented; and then strip their indentation & prompts. - source_lines = m.group('source').split('\n') - self._check_prompt_blank(source_lines, indent, name, lineno) - self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno) - source = '\n'.join([sl[indent+4:] for sl in source_lines]) - - # Divide want into lines; check that it's properly indented; and - # then strip the indentation. Spaces before the last newline should - # be preserved, so plain rstrip() isn't good enough. - want = m.group('want') - want_lines = want.split('\n') - if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]): - del want_lines[-1] # forget final newline & spaces after it - self._check_prefix(want_lines, ' '*indent, name, - lineno + len(source_lines)) - want = '\n'.join([wl[indent:] for wl in want_lines]) - - # If `want` contains a traceback message, then extract it. - m = self._EXCEPTION_RE.match(want) - if m: - exc_msg = m.group('msg') - else: - exc_msg = None - - # Extract options from the source. - options = self._find_options(source, name, lineno) - - return source, options, want, exc_msg - - # This regular expression looks for option directives in the - # source code of an example. Option directives are comments - # starting with "doctest:". Warning: this may give false - # positives for string-literals that contain the string - # "#doctest:". Eliminating these false positives would require - # actually parsing the string; but we limit them by ignoring any - # line containing "#doctest:" that is *followed* by a quote mark. - _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$', - re.MULTILINE) - - def _find_options(self, source, name, lineno): - """ - Return a dictionary containing option overrides extracted from - option directives in the given source string. - - `name` is the string's name, and `lineno` is the line number - where the example starts; both are used for error messages. - """ - options = {} - # (note: with the current regexp, this will match at most once:) - for m in self._OPTION_DIRECTIVE_RE.finditer(source): - option_strings = m.group(1).replace(',', ' ').split() - for option in option_strings: - if (option[0] not in '+-' or - option[1:] not in OPTIONFLAGS_BY_NAME): - raise ValueError('line %r of the doctest for %s ' - 'has an invalid option: %r' % - (lineno+1, name, option)) - flag = OPTIONFLAGS_BY_NAME[option[1:]] - options[flag] = (option[0] == '+') - if options and self._IS_BLANK_OR_COMMENT(source): - raise ValueError('line %r of the doctest for %s has an option ' - 'directive on a line with no example: %r' % - (lineno, name, source)) - return options - - # This regular expression finds the indentation of every non-blank - # line in a string. - _INDENT_RE = re.compile(r'^([ ]*)(?=\S)', re.MULTILINE) - - def _min_indent(self, s): - "Return the minimum indentation of any non-blank line in `s`" - indents = [len(indent) for indent in self._INDENT_RE.findall(s)] - if len(indents) > 0: - return min(indents) - else: - return 0 - - def _check_prompt_blank(self, lines, indent, name, lineno): - """ - Given the lines of a source string (including prompts and - leading indentation), check to make sure that every prompt is - followed by a space character. If any line is not followed by - a space character, then raise ValueError. - """ - for i, line in enumerate(lines): - if len(line) >= indent+4 and line[indent+3] != ' ': - raise ValueError('line %r of the docstring for %s ' - 'lacks blank after %s: %r' % - (lineno+i+1, name, - line[indent:indent+3], line)) - - def _check_prefix(self, lines, prefix, name, lineno): - """ - Check that every line in the given list starts with the given - prefix; if any line does not, then raise a ValueError. - """ - for i, line in enumerate(lines): - if line and not line.startswith(prefix): - raise ValueError('line %r of the docstring for %s has ' - 'inconsistent leading whitespace: %r' % - (lineno+i+1, name, line)) - - -###################################################################### -## 4. DocTest Finder -###################################################################### - -class DocTestFinder: - """ - A class used to extract the DocTests that are relevant to a given - object, from its docstring and the docstrings of its contained - objects. Doctests can currently be extracted from the following - object types: modules, functions, classes, methods, staticmethods, - classmethods, and properties. - """ - - def __init__(self, verbose=False, parser=DocTestParser(), - recurse=True, exclude_empty=True): - """ - Create a new doctest finder. - - The optional argument `parser` specifies a class or - function that should be used to create new DocTest objects (or - objects that implement the same interface as DocTest). The - signature for this factory function should match the signature - of the DocTest constructor. - - If the optional argument `recurse` is false, then `find` will - only examine the given object, and not any contained objects. - - If the optional argument `exclude_empty` is false, then `find` - will include tests for objects with empty docstrings. - """ - self._parser = parser - self._verbose = verbose - self._recurse = recurse - self._exclude_empty = exclude_empty - - def find(self, obj, name=None, module=None, globs=None, extraglobs=None): - """ - Return a list of the DocTests that are defined by the given - object's docstring, or by any of its contained objects' - docstrings. - - The optional parameter `module` is the module that contains - the given object. If the module is not specified or is None, then - the test finder will attempt to automatically determine the - correct module. The object's module is used: - - - As a default namespace, if `globs` is not specified. - - To prevent the DocTestFinder from extracting DocTests - from objects that are imported from other modules. - - To find the name of the file containing the object. - - To help find the line number of the object within its - file. - - Contained objects whose module does not match `module` are ignored. - - If `module` is False, no attempt to find the module will be made. - This is obscure, of use mostly in tests: if `module` is False, or - is None but cannot be found automatically, then all objects are - considered to belong to the (non-existent) module, so all contained - objects will (recursively) be searched for doctests. - - The globals for each DocTest is formed by combining `globs` - and `extraglobs` (bindings in `extraglobs` override bindings - in `globs`). A new copy of the globals dictionary is created - for each DocTest. If `globs` is not specified, then it - defaults to the module's `__dict__`, if specified, or {} - otherwise. If `extraglobs` is not specified, then it defaults - to {}. - - """ - # If name was not specified, then extract it from the object. - if name is None: - name = getattr(obj, '__name__', None) - if name is None: - raise ValueError("DocTestFinder.find: name must be given " - "when obj.__name__ doesn't exist: %r" % - (type(obj),)) - - # Find the module that contains the given object (if obj is - # a module, then module=obj.). Note: this may fail, in which - # case module will be None. - if module is False: - module = None - elif module is None: - module = inspect.getmodule(obj) - - # Read the module's source code. This is used by - # DocTestFinder._find_lineno to find the line number for a - # given object's docstring. - try: - file = inspect.getsourcefile(obj) - except TypeError: - source_lines = None - else: - if not file: - # Check to see if it's one of our special internal "files" - # (see __patched_linecache_getlines). - file = inspect.getfile(obj) - if not file[0]+file[-2:] == '<]>': file = None - if file is None: - source_lines = None - else: - if module is not None: - # Supply the module globals in case the module was - # originally loaded via a PEP 302 loader and - # file is not a valid filesystem path - source_lines = linecache.getlines(file, module.__dict__) - else: - # No access to a loader, so assume it's a normal - # filesystem path - source_lines = linecache.getlines(file) - if not source_lines: - source_lines = None - - # Initialize globals, and merge in extraglobs. - if globs is None: - if module is None: - globs = {} - else: - globs = module.__dict__.copy() - else: - globs = globs.copy() - if extraglobs is not None: - globs.update(extraglobs) - if '__name__' not in globs: - globs['__name__'] = '__main__' # provide a default module name - - # Recursively explore `obj`, extracting DocTests. - tests = [] - self._find(tests, obj, name, module, source_lines, globs, {}) - # Sort the tests by alpha order of names, for consistency in - # verbose-mode output. This was a feature of doctest in Pythons - # <= 2.3 that got lost by accident in 2.4. It was repaired in - # 2.4.4 and 2.5. - tests.sort() - return tests - - def _from_module(self, module, object): - """ - Return true if the given object is defined in the given - module. - """ - if module is None: - return True - elif inspect.getmodule(object) is not None: - return module is inspect.getmodule(object) - elif inspect.isfunction(object): - return module.__dict__ is object.__globals__ - elif (inspect.ismethoddescriptor(object) or - inspect.ismethodwrapper(object)): - if hasattr(object, '__objclass__'): - obj_mod = object.__objclass__.__module__ - elif hasattr(object, '__module__'): - obj_mod = object.__module__ - else: - return True # [XX] no easy way to tell otherwise - return module.__name__ == obj_mod - elif inspect.isclass(object): - return module.__name__ == object.__module__ - elif hasattr(object, '__module__'): - return module.__name__ == object.__module__ - elif isinstance(object, property): - return True # [XX] no way not be sure. - else: - raise ValueError("object must be a class or function") - - def _is_routine(self, obj): - """ - Safely unwrap objects and determine if they are functions. - """ - maybe_routine = obj - try: - maybe_routine = inspect.unwrap(maybe_routine) - except ValueError: - pass - return inspect.isroutine(maybe_routine) - - def _find(self, tests, obj, name, module, source_lines, globs, seen): - """ - Find tests for the given object and any contained objects, and - add them to `tests`. - """ - if self._verbose: - print('Finding tests in %s' % name) - - # If we've already processed this object, then ignore it. - if id(obj) in seen: - return - seen[id(obj)] = 1 - - # Find a test for this object, and add it to the list of tests. - test = self._get_test(obj, name, module, globs, source_lines) - if test is not None: - tests.append(test) - - # Look for tests in a module's contained objects. - if inspect.ismodule(obj) and self._recurse: - for valname, val in obj.__dict__.items(): - valname = '%s.%s' % (name, valname) - - # Recurse to functions & classes. - if ((self._is_routine(val) or inspect.isclass(val)) and - self._from_module(module, val)): - self._find(tests, val, valname, module, source_lines, - globs, seen) - - # Look for tests in a module's __test__ dictionary. - if inspect.ismodule(obj) and self._recurse: - for valname, val in getattr(obj, '__test__', {}).items(): - if not isinstance(valname, str): - raise ValueError("DocTestFinder.find: __test__ keys " - "must be strings: %r" % - (type(valname),)) - if not (inspect.isroutine(val) or inspect.isclass(val) or - inspect.ismodule(val) or isinstance(val, str)): - raise ValueError("DocTestFinder.find: __test__ values " - "must be strings, functions, methods, " - "classes, or modules: %r" % - (type(val),)) - valname = '%s.__test__.%s' % (name, valname) - self._find(tests, val, valname, module, source_lines, - globs, seen) - - # Look for tests in a class's contained objects. - if inspect.isclass(obj) and self._recurse: - for valname, val in obj.__dict__.items(): - # Special handling for staticmethod/classmethod. - if isinstance(val, (staticmethod, classmethod)): - val = val.__func__ - - # Recurse to methods, properties, and nested classes. - if ((inspect.isroutine(val) or inspect.isclass(val) or - isinstance(val, property)) and - self._from_module(module, val)): - valname = '%s.%s' % (name, valname) - self._find(tests, val, valname, module, source_lines, - globs, seen) - - def _get_test(self, obj, name, module, globs, source_lines): - """ - Return a DocTest for the given object, if it defines a docstring; - otherwise, return None. - """ - # Extract the object's docstring. If it doesn't have one, - # then return None (no test for this object). - if isinstance(obj, str): - docstring = obj - else: - try: - if obj.__doc__ is None: - docstring = '' - else: - docstring = obj.__doc__ - if not isinstance(docstring, str): - docstring = str(docstring) - except (TypeError, AttributeError): - docstring = '' - - # Find the docstring's location in the file. - lineno = self._find_lineno(obj, source_lines) - - # Don't bother if the docstring is empty. - if self._exclude_empty and not docstring: - return None - - # Return a DocTest for this object. - if module is None: - filename = None - else: - # __file__ can be None for namespace packages. - filename = getattr(module, '__file__', None) or module.__name__ - if filename[-4:] == ".pyc": - filename = filename[:-1] - return self._parser.get_doctest(docstring, globs, name, - filename, lineno) - - def _find_lineno(self, obj, source_lines): - """ - Return a line number of the given object's docstring. - - Returns `None` if the given object does not have a docstring. - """ - lineno = None - docstring = getattr(obj, '__doc__', None) - - # Find the line number for modules. - if inspect.ismodule(obj) and docstring is not None: - lineno = 0 - - # Find the line number for classes. - # Note: this could be fooled if a class is defined multiple - # times in a single file. - if inspect.isclass(obj) and docstring is not None: - if source_lines is None: - return None - pat = re.compile(r'^\s*class\s*%s\b' % - re.escape(getattr(obj, '__name__', '-'))) - for i, line in enumerate(source_lines): - if pat.match(line): - lineno = i - break - - # Find the line number for functions & methods. - if inspect.ismethod(obj): obj = obj.__func__ - if isinstance(obj, property): - obj = obj.fget - if isinstance(obj, functools.cached_property): - obj = obj.func - if inspect.isroutine(obj) and getattr(obj, '__doc__', None): - # We don't use `docstring` var here, because `obj` can be changed. - obj = inspect.unwrap(obj) - try: - obj = obj.__code__ - except AttributeError: - # Functions implemented in C don't necessarily - # have a __code__ attribute. - # If there's no code, there's no lineno - return None - if inspect.istraceback(obj): obj = obj.tb_frame - if inspect.isframe(obj): obj = obj.f_code - if inspect.iscode(obj): - lineno = obj.co_firstlineno - 1 - - # Find the line number where the docstring starts. Assume - # that it's the first line that begins with a quote mark. - # Note: this could be fooled by a multiline function - # signature, where a continuation line begins with a quote - # mark. - if lineno is not None: - if source_lines is None: - return lineno+1 - pat = re.compile(r'(^|.*:)\s*\w*("|\')') - for lineno in range(lineno, len(source_lines)): - if pat.match(source_lines[lineno]): - return lineno - - # We couldn't find the line number. - return None - -###################################################################### -## 5. DocTest Runner -###################################################################### - -class DocTestRunner: - """ - A class used to run DocTest test cases, and accumulate statistics. - The `run` method is used to process a single DocTest case. It - returns a TestResults instance. - - >>> save_colorize = _colorize.COLORIZE - >>> _colorize.COLORIZE = False - - >>> tests = DocTestFinder().find(_TestClass) - >>> runner = DocTestRunner(verbose=False) - >>> tests.sort(key = lambda test: test.name) - >>> for test in tests: - ... print(test.name, '->', runner.run(test)) - _TestClass -> TestResults(failed=0, attempted=2) - _TestClass.__init__ -> TestResults(failed=0, attempted=2) - _TestClass.get -> TestResults(failed=0, attempted=2) - _TestClass.square -> TestResults(failed=0, attempted=1) - - The `summarize` method prints a summary of all the test cases that - have been run by the runner, and returns an aggregated TestResults - instance: - - >>> runner.summarize(verbose=1) - 4 items passed all tests: - 2 tests in _TestClass - 2 tests in _TestClass.__init__ - 2 tests in _TestClass.get - 1 test in _TestClass.square - 7 tests in 4 items. - 7 passed. - Test passed. - TestResults(failed=0, attempted=7) - - The aggregated number of tried examples and failed examples is also - available via the `tries`, `failures` and `skips` attributes: - - >>> runner.tries - 7 - >>> runner.failures - 0 - >>> runner.skips - 0 - - The comparison between expected outputs and actual outputs is done - by an `OutputChecker`. This comparison may be customized with a - number of option flags; see the documentation for `testmod` for - more information. If the option flags are insufficient, then the - comparison may also be customized by passing a subclass of - `OutputChecker` to the constructor. - - The test runner's display output can be controlled in two ways. - First, an output function (`out) can be passed to - `TestRunner.run`; this function will be called with strings that - should be displayed. It defaults to `sys.stdout.write`. If - capturing the output is not sufficient, then the display output - can be also customized by subclassing DocTestRunner, and - overriding the methods `report_start`, `report_success`, - `report_unexpected_exception`, and `report_failure`. - - >>> _colorize.COLORIZE = save_colorize - """ - # This divider string is used to separate failure messages, and to - # separate sections of the summary. - DIVIDER = "*" * 70 - - def __init__(self, checker=None, verbose=None, optionflags=0): - """ - Create a new test runner. - - Optional keyword arg `checker` is the `OutputChecker` that - should be used to compare the expected outputs and actual - outputs of doctest examples. - - Optional keyword arg 'verbose' prints lots of stuff if true, - only failures if false; by default, it's true iff '-v' is in - sys.argv. - - Optional argument `optionflags` can be used to control how the - test runner compares expected output to actual output, and how - it displays failures. See the documentation for `testmod` for - more information. - """ - self._checker = checker or OutputChecker() - if verbose is None: - verbose = '-v' in sys.argv - self._verbose = verbose - self.optionflags = optionflags - self.original_optionflags = optionflags - - # Keep track of the examples we've run. - self.tries = 0 - self.failures = 0 - self.skips = 0 - self._stats = {} - - # Create a fake output target for capturing doctest output. - self._fakeout = _SpoofOut() - - #///////////////////////////////////////////////////////////////// - # Reporting methods - #///////////////////////////////////////////////////////////////// - - def report_start(self, out, test, example): - """ - Report that the test runner is about to process the given - example. (Only displays a message if verbose=True) - """ - if self._verbose: - if example.want: - out('Trying:\n' + _indent(example.source) + - 'Expecting:\n' + _indent(example.want)) - else: - out('Trying:\n' + _indent(example.source) + - 'Expecting nothing\n') - - def report_success(self, out, test, example, got): - """ - Report that the given example ran successfully. (Only - displays a message if verbose=True) - """ - if self._verbose: - out("ok\n") - - def report_failure(self, out, test, example, got): - """ - Report that the given example failed. - """ - out(self._failure_header(test, example) + - self._checker.output_difference(example, got, self.optionflags)) - - def report_unexpected_exception(self, out, test, example, exc_info): - """ - Report that the given example raised an unexpected exception. - """ - out(self._failure_header(test, example) + - 'Exception raised:\n' + _indent(_exception_traceback(exc_info))) - - def _failure_header(self, test, example): - red, reset = ( - (ANSIColors.RED, ANSIColors.RESET) if can_colorize() else ("", "") - ) - out = [f"{red}{self.DIVIDER}{reset}"] - if test.filename: - if test.lineno is not None and example.lineno is not None: - lineno = test.lineno + example.lineno + 1 - else: - lineno = '?' - out.append('File "%s", line %s, in %s' % - (test.filename, lineno, test.name)) - else: - out.append('Line %s, in %s' % (example.lineno+1, test.name)) - out.append('Failed example:') - source = example.source - out.append(_indent(source)) - return '\n'.join(out) - - #///////////////////////////////////////////////////////////////// - # DocTest Running - #///////////////////////////////////////////////////////////////// - - def __run(self, test, compileflags, out): - """ - Run the examples in `test`. Write the outcome of each example - with one of the `DocTestRunner.report_*` methods, using the - writer function `out`. `compileflags` is the set of compiler - flags that should be used to execute examples. Return a TestResults - instance. The examples are run in the namespace `test.globs`. - """ - # Keep track of the number of failed, attempted, skipped examples. - failures = attempted = skips = 0 - - # Save the option flags (since option directives can be used - # to modify them). - original_optionflags = self.optionflags - - SUCCESS, FAILURE, BOOM = range(3) # `outcome` state - - check = self._checker.check_output - - # Process each example. - for examplenum, example in enumerate(test.examples): - attempted += 1 - - # If REPORT_ONLY_FIRST_FAILURE is set, then suppress - # reporting after the first failure. - quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and - failures > 0) - - # Merge in the example's options. - self.optionflags = original_optionflags - if example.options: - for (optionflag, val) in example.options.items(): - if val: - self.optionflags |= optionflag - else: - self.optionflags &= ~optionflag - - # If 'SKIP' is set, then skip this example. - if self.optionflags & SKIP: - skips += 1 - continue - - # Record that we started this example. - if not quiet: - self.report_start(out, test, example) - - # Use a special filename for compile(), so we can retrieve - # the source code during interactive debugging (see - # __patched_linecache_getlines). - filename = '' % (test.name, examplenum) - - # Run the example in the given context (globs), and record - # any exception that gets raised. (But don't intercept - # keyboard interrupts.) - try: - # Don't blink! This is where the user's code gets run. - exec(compile(example.source, filename, "single", - compileflags, True), test.globs) - self.debugger.set_continue() # ==== Example Finished ==== - exception = None - except KeyboardInterrupt: - raise - except: - exception = sys.exc_info() - self.debugger.set_continue() # ==== Example Finished ==== - - got = self._fakeout.getvalue() # the actual output - self._fakeout.truncate(0) - outcome = FAILURE # guilty until proved innocent or insane - - # If the example executed without raising any exceptions, - # verify its output. - if exception is None: - if check(example.want, got, self.optionflags): - outcome = SUCCESS - - # The example raised an exception: check if it was expected. - else: - formatted_ex = traceback.format_exception_only(*exception[:2]) - if issubclass(exception[0], SyntaxError): - # SyntaxError / IndentationError is special: - # we don't care about the carets / suggestions / etc - # We only care about the error message and notes. - # They start with `SyntaxError:` (or any other class name) - exception_line_prefixes = ( - f"{exception[0].__qualname__}:", - f"{exception[0].__module__}.{exception[0].__qualname__}:", - ) - exc_msg_index = next( - index - for index, line in enumerate(formatted_ex) - if line.startswith(exception_line_prefixes) - ) - formatted_ex = formatted_ex[exc_msg_index:] - - exc_msg = "".join(formatted_ex) - if not quiet: - got += _exception_traceback(exception) - - # If `example.exc_msg` is None, then we weren't expecting - # an exception. - if example.exc_msg is None: - outcome = BOOM - - # We expected an exception: see whether it matches. - elif check(example.exc_msg, exc_msg, self.optionflags): - outcome = SUCCESS - - # Another chance if they didn't care about the detail. - elif self.optionflags & IGNORE_EXCEPTION_DETAIL: - if check(_strip_exception_details(example.exc_msg), - _strip_exception_details(exc_msg), - self.optionflags): - outcome = SUCCESS - - # Report the outcome. - if outcome is SUCCESS: - if not quiet: - self.report_success(out, test, example, got) - elif outcome is FAILURE: - if not quiet: - self.report_failure(out, test, example, got) - failures += 1 - elif outcome is BOOM: - if not quiet: - self.report_unexpected_exception(out, test, example, - exception) - failures += 1 - else: - assert False, ("unknown outcome", outcome) - - if failures and self.optionflags & FAIL_FAST: - break - - # Restore the option flags (in case they were modified) - self.optionflags = original_optionflags - - # Record and return the number of failures and attempted. - self.__record_outcome(test, failures, attempted, skips) - return TestResults(failures, attempted, skipped=skips) - - def __record_outcome(self, test, failures, tries, skips): - """ - Record the fact that the given DocTest (`test`) generated `failures` - failures out of `tries` tried examples. - """ - failures2, tries2, skips2 = self._stats.get(test.name, (0, 0, 0)) - self._stats[test.name] = (failures + failures2, - tries + tries2, - skips + skips2) - self.failures += failures - self.tries += tries - self.skips += skips - - __LINECACHE_FILENAME_RE = re.compile(r'.+)' - r'\[(?P\d+)\]>$') - def __patched_linecache_getlines(self, filename, module_globals=None): - m = self.__LINECACHE_FILENAME_RE.match(filename) - if m and m.group('name') == self.test.name: - example = self.test.examples[int(m.group('examplenum'))] - return example.source.splitlines(keepends=True) - else: - return self.save_linecache_getlines(filename, module_globals) - - def run(self, test, compileflags=None, out=None, clear_globs=True): - """ - Run the examples in `test`, and display the results using the - writer function `out`. - - The examples are run in the namespace `test.globs`. If - `clear_globs` is true (the default), then this namespace will - be cleared after the test runs, to help with garbage - collection. If you would like to examine the namespace after - the test completes, then use `clear_globs=False`. - - `compileflags` gives the set of flags that should be used by - the Python compiler when running the examples. If not - specified, then it will default to the set of future-import - flags that apply to `globs`. - - The output of each example is checked using - `DocTestRunner.check_output`, and the results are formatted by - the `DocTestRunner.report_*` methods. - """ - self.test = test - - if compileflags is None: - compileflags = _extract_future_flags(test.globs) - - save_stdout = sys.stdout - if out is None: - encoding = save_stdout.encoding - if encoding is None or encoding.lower() == 'utf-8': - out = save_stdout.write - else: - # Use backslashreplace error handling on write - def out(s): - s = str(s.encode(encoding, 'backslashreplace'), encoding) - save_stdout.write(s) - sys.stdout = self._fakeout - - # Patch pdb.set_trace to restore sys.stdout during interactive - # debugging (so it's not still redirected to self._fakeout). - # Note that the interactive output will go to *our* - # save_stdout, even if that's not the real sys.stdout; this - # allows us to write test cases for the set_trace behavior. - save_trace = sys.gettrace() - save_set_trace = pdb.set_trace - self.debugger = _OutputRedirectingPdb(save_stdout) - self.debugger.reset() - pdb.set_trace = self.debugger.set_trace - - # Patch linecache.getlines, so we can see the example's source - # when we're inside the debugger. - self.save_linecache_getlines = linecache.getlines - linecache.getlines = self.__patched_linecache_getlines - - # Make sure sys.displayhook just prints the value to stdout - save_displayhook = sys.displayhook - sys.displayhook = sys.__displayhook__ - saved_can_colorize = _colorize.can_colorize - _colorize.can_colorize = lambda *args, **kwargs: False - color_variables = {"PYTHON_COLORS": None, "FORCE_COLOR": None} - for key in color_variables: - color_variables[key] = os.environ.pop(key, None) - try: - return self.__run(test, compileflags, out) - finally: - sys.stdout = save_stdout - pdb.set_trace = save_set_trace - sys.settrace(save_trace) - linecache.getlines = self.save_linecache_getlines - sys.displayhook = save_displayhook - _colorize.can_colorize = saved_can_colorize - for key, value in color_variables.items(): - if value is not None: - os.environ[key] = value - if clear_globs: - test.globs.clear() - import builtins - builtins._ = None - - #///////////////////////////////////////////////////////////////// - # Summarization - #///////////////////////////////////////////////////////////////// - def summarize(self, verbose=None): - """ - Print a summary of all the test cases that have been run by - this DocTestRunner, and return a TestResults instance. - - The optional `verbose` argument controls how detailed the - summary is. If the verbosity is not specified, then the - DocTestRunner's verbosity is used. - """ - if verbose is None: - verbose = self._verbose - - notests, passed, failed = [], [], [] - total_tries = total_failures = total_skips = 0 - - for name, (failures, tries, skips) in self._stats.items(): - assert failures <= tries - total_tries += tries - total_failures += failures - total_skips += skips - - if tries == 0: - notests.append(name) - elif failures == 0: - passed.append((name, tries)) - else: - failed.append((name, (failures, tries, skips))) - - ansi = _colorize.get_colors() - bold_green = ansi.BOLD_GREEN - bold_red = ansi.BOLD_RED - green = ansi.GREEN - red = ansi.RED - reset = ansi.RESET - yellow = ansi.YELLOW - - if verbose: - if notests: - print(f"{_n_items(notests)} had no tests:") - notests.sort() - for name in notests: - print(f" {name}") - - if passed: - print(f"{green}{_n_items(passed)} passed all tests:{reset}") - for name, count in sorted(passed): - s = "" if count == 1 else "s" - print(f" {green}{count:3d} test{s} in {name}{reset}") - - if failed: - print(f"{red}{self.DIVIDER}{reset}") - print(f"{_n_items(failed)} had failures:") - for name, (failures, tries, skips) in sorted(failed): - print(f" {failures:3d} of {tries:3d} in {name}") - - if verbose: - s = "" if total_tries == 1 else "s" - print(f"{total_tries} test{s} in {_n_items(self._stats)}.") - - and_f = ( - f" and {red}{total_failures} failed{reset}" - if total_failures else "" - ) - print(f"{green}{total_tries - total_failures} passed{reset}{and_f}.") - - if total_failures: - s = "" if total_failures == 1 else "s" - msg = f"{bold_red}***Test Failed*** {total_failures} failure{s}{reset}" - if total_skips: - s = "" if total_skips == 1 else "s" - msg = f"{msg} and {yellow}{total_skips} skipped test{s}{reset}" - print(f"{msg}.") - elif verbose: - print(f"{bold_green}Test passed.{reset}") - - return TestResults(total_failures, total_tries, skipped=total_skips) - - #///////////////////////////////////////////////////////////////// - # Backward compatibility cruft to maintain doctest.master. - #///////////////////////////////////////////////////////////////// - def merge(self, other): - d = self._stats - for name, (failures, tries, skips) in other._stats.items(): - if name in d: - failures2, tries2, skips2 = d[name] - failures = failures + failures2 - tries = tries + tries2 - skips = skips + skips2 - d[name] = (failures, tries, skips) - - -def _n_items(items: list | dict) -> str: - """ - Helper to pluralise the number of items in a list. - """ - n = len(items) - s = "" if n == 1 else "s" - return f"{n} item{s}" - - -class OutputChecker: - """ - A class used to check whether the actual output from a doctest - example matches the expected output. `OutputChecker` defines two - methods: `check_output`, which compares a given pair of outputs, - and returns true if they match; and `output_difference`, which - returns a string describing the differences between two outputs. - """ - def _toAscii(self, s): - """ - Convert string to hex-escaped ASCII string. - """ - return str(s.encode('ASCII', 'backslashreplace'), "ASCII") - - def check_output(self, want, got, optionflags): - """ - Return True iff the actual output from an example (`got`) - matches the expected output (`want`). These strings are - always considered to match if they are identical; but - depending on what option flags the test runner is using, - several non-exact match types are also possible. See the - documentation for `TestRunner` for more information about - option flags. - """ - - # If `want` contains hex-escaped character such as "\u1234", - # then `want` is a string of six characters(e.g. [\,u,1,2,3,4]). - # On the other hand, `got` could be another sequence of - # characters such as [\u1234], so `want` and `got` should - # be folded to hex-escaped ASCII string to compare. - got = self._toAscii(got) - want = self._toAscii(want) - - # Handle the common case first, for efficiency: - # if they're string-identical, always return true. - if got == want: - return True - - # The values True and False replaced 1 and 0 as the return - # value for boolean comparisons in Python 2.3. - if not (optionflags & DONT_ACCEPT_TRUE_FOR_1): - if (got,want) == ("True\n", "1\n"): - return True - if (got,want) == ("False\n", "0\n"): - return True - - # can be used as a special sequence to signify a - # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. - if not (optionflags & DONT_ACCEPT_BLANKLINE): - # Replace in want with a blank line. - want = re.sub(r'(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER), - '', want) - # If a line in got contains only spaces, then remove the - # spaces. - got = re.sub(r'(?m)^[^\S\n]+$', '', got) - if got == want: - return True - - # This flag causes doctest to ignore any differences in the - # contents of whitespace strings. Note that this can be used - # in conjunction with the ELLIPSIS flag. - if optionflags & NORMALIZE_WHITESPACE: - got = ' '.join(got.split()) - want = ' '.join(want.split()) - if got == want: - return True - - # The ELLIPSIS flag says to let the sequence "..." in `want` - # match any substring in `got`. - if optionflags & ELLIPSIS: - if _ellipsis_match(want, got): - return True - - # We didn't find any match; return false. - return False - - # Should we do a fancy diff? - def _do_a_fancy_diff(self, want, got, optionflags): - # Not unless they asked for a fancy diff. - if not optionflags & (REPORT_UDIFF | - REPORT_CDIFF | - REPORT_NDIFF): - return False - - # If expected output uses ellipsis, a meaningful fancy diff is - # too hard ... or maybe not. In two real-life failures Tim saw, - # a diff was a major help anyway, so this is commented out. - # [todo] _ellipsis_match() knows which pieces do and don't match, - # and could be the basis for a kick-ass diff in this case. - ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want: - ## return False - - # ndiff does intraline difference marking, so can be useful even - # for 1-line differences. - if optionflags & REPORT_NDIFF: - return True - - # The other diff types need at least a few lines to be helpful. - return want.count('\n') > 2 and got.count('\n') > 2 - - def output_difference(self, example, got, optionflags): - """ - Return a string describing the differences between the - expected output for a given example (`example`) and the actual - output (`got`). `optionflags` is the set of option flags used - to compare `want` and `got`. - """ - want = example.want - # If s are being used, then replace blank lines - # with in the actual output string. - if not (optionflags & DONT_ACCEPT_BLANKLINE): - got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) - - # Check if we should use diff. - if self._do_a_fancy_diff(want, got, optionflags): - # Split want & got into lines. - want_lines = want.splitlines(keepends=True) - got_lines = got.splitlines(keepends=True) - # Use difflib to find their differences. - if optionflags & REPORT_UDIFF: - diff = difflib.unified_diff(want_lines, got_lines, n=2) - diff = list(diff)[2:] # strip the diff header - kind = 'unified diff with -expected +actual' - elif optionflags & REPORT_CDIFF: - diff = difflib.context_diff(want_lines, got_lines, n=2) - diff = list(diff)[2:] # strip the diff header - kind = 'context diff with expected followed by actual' - elif optionflags & REPORT_NDIFF: - engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) - diff = list(engine.compare(want_lines, got_lines)) - kind = 'ndiff with -expected +actual' - else: - assert 0, 'Bad diff option' - return 'Differences (%s):\n' % kind + _indent(''.join(diff)) - - # If we're not using diff, then simply list the expected - # output followed by the actual output. - if want and got: - return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got)) - elif want: - return 'Expected:\n%sGot nothing\n' % _indent(want) - elif got: - return 'Expected nothing\nGot:\n%s' % _indent(got) - else: - return 'Expected nothing\nGot nothing\n' - -class DocTestFailure(Exception): - """A DocTest example has failed in debugging mode. - - The exception instance has variables: - - - test: the DocTest object being run - - - example: the Example object that failed - - - got: the actual output - """ - def __init__(self, test, example, got): - self.test = test - self.example = example - self.got = got - - def __str__(self): - return str(self.test) - -class UnexpectedException(Exception): - """A DocTest example has encountered an unexpected exception - - The exception instance has variables: - - - test: the DocTest object being run - - - example: the Example object that failed - - - exc_info: the exception info - """ - def __init__(self, test, example, exc_info): - self.test = test - self.example = example - self.exc_info = exc_info - - def __str__(self): - return str(self.test) - -class DebugRunner(DocTestRunner): - r"""Run doc tests but raise an exception as soon as there is a failure. - - If an unexpected exception occurs, an UnexpectedException is raised. - It contains the test, the example, and the original exception: - - >>> runner = DebugRunner(verbose=False) - >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', - ... {}, 'foo', 'foo.py', 0) - >>> try: - ... runner.run(test) - ... except UnexpectedException as f: - ... failure = f - - >>> failure.test is test - True - - >>> failure.example.want - '42\n' - - >>> exc_info = failure.exc_info - >>> raise exc_info[1] # Already has the traceback - Traceback (most recent call last): - ... - KeyError - - We wrap the original exception to give the calling application - access to the test and example information. - - If the output doesn't match, then a DocTestFailure is raised: - - >>> test = DocTestParser().get_doctest(''' - ... >>> x = 1 - ... >>> x - ... 2 - ... ''', {}, 'foo', 'foo.py', 0) - - >>> try: - ... runner.run(test) - ... except DocTestFailure as f: - ... failure = f - - DocTestFailure objects provide access to the test: - - >>> failure.test is test - True - - As well as to the example: - - >>> failure.example.want - '2\n' - - and the actual output: - - >>> failure.got - '1\n' - - If a failure or error occurs, the globals are left intact: - - >>> del test.globs['__builtins__'] - >>> test.globs - {'x': 1} - - >>> test = DocTestParser().get_doctest(''' - ... >>> x = 2 - ... >>> raise KeyError - ... ''', {}, 'foo', 'foo.py', 0) - - >>> runner.run(test) - Traceback (most recent call last): - ... - doctest.UnexpectedException: - - >>> del test.globs['__builtins__'] - >>> test.globs - {'x': 2} - - But the globals are cleared if there is no error: - - >>> test = DocTestParser().get_doctest(''' - ... >>> x = 2 - ... ''', {}, 'foo', 'foo.py', 0) - - >>> runner.run(test) - TestResults(failed=0, attempted=1) - - >>> test.globs - {} - - """ - - def run(self, test, compileflags=None, out=None, clear_globs=True): - r = DocTestRunner.run(self, test, compileflags, out, False) - if clear_globs: - test.globs.clear() - return r - - def report_unexpected_exception(self, out, test, example, exc_info): - raise UnexpectedException(test, example, exc_info) - - def report_failure(self, out, test, example, got): - raise DocTestFailure(test, example, got) - -###################################################################### -## 6. Test Functions -###################################################################### -# These should be backwards compatible. - -# For backward compatibility, a global instance of a DocTestRunner -# class, updated by testmod. -master = None - -def testmod(m=None, name=None, globs=None, verbose=None, - report=True, optionflags=0, extraglobs=None, - raise_on_error=False, exclude_empty=False): - """m=None, name=None, globs=None, verbose=None, report=True, - optionflags=0, extraglobs=None, raise_on_error=False, - exclude_empty=False - - Test examples in docstrings in functions and classes reachable - from module m (or the current module if m is not supplied), starting - with m.__doc__. - - Also test examples reachable from dict m.__test__ if it exists. - m.__test__ maps names to functions, classes and strings; - function and class docstrings are tested even if the name is private; - strings are tested directly, as if they were docstrings. - - Return (#failures, #tests). - - See help(doctest) for an overview. - - Optional keyword arg "name" gives the name of the module; by default - use m.__name__. - - Optional keyword arg "globs" gives a dict to be used as the globals - when executing examples; by default, use m.__dict__. A copy of this - dict is actually used for each docstring, so that each docstring's - examples start with a clean slate. - - Optional keyword arg "extraglobs" gives a dictionary that should be - merged into the globals that are used to execute examples. By - default, no extra globals are used. This is new in 2.4. - - Optional keyword arg "verbose" prints lots of stuff if true, prints - only failures if false; by default, it's true iff "-v" is in sys.argv. - - Optional keyword arg "report" prints a summary at the end when true, - else prints nothing at the end. In verbose mode, the summary is - detailed, else very brief (in fact, empty if all tests passed). - - Optional keyword arg "optionflags" or's together module constants, - and defaults to 0. This is new in 2.3. Possible values (see the - docs for details): - - DONT_ACCEPT_TRUE_FOR_1 - DONT_ACCEPT_BLANKLINE - NORMALIZE_WHITESPACE - ELLIPSIS - SKIP - IGNORE_EXCEPTION_DETAIL - REPORT_UDIFF - REPORT_CDIFF - REPORT_NDIFF - REPORT_ONLY_FIRST_FAILURE - - Optional keyword arg "raise_on_error" raises an exception on the - first unexpected exception or failure. This allows failures to be - post-mortem debugged. - - Advanced tomfoolery: testmod runs methods of a local instance of - class doctest.Tester, then merges the results into (or creates) - global Tester instance doctest.master. Methods of doctest.master - can be called directly too, if you want to do something unusual. - Passing report=0 to testmod is especially useful then, to delay - displaying a summary. Invoke doctest.master.summarize(verbose) - when you're done fiddling. - """ - global master - - # If no module was given, then use __main__. - if m is None: - # DWA - m will still be None if this wasn't invoked from the command - # line, in which case the following TypeError is about as good an error - # as we should expect - m = sys.modules.get('__main__') - - # Check that we were actually given a module. - if not inspect.ismodule(m): - raise TypeError("testmod: module required; %r" % (m,)) - - # If no name was given, then use the module's name. - if name is None: - name = m.__name__ - - # Find, parse, and run all tests in the given module. - finder = DocTestFinder(exclude_empty=exclude_empty) - - if raise_on_error: - runner = DebugRunner(verbose=verbose, optionflags=optionflags) - else: - runner = DocTestRunner(verbose=verbose, optionflags=optionflags) - - for test in finder.find(m, name, globs=globs, extraglobs=extraglobs): - runner.run(test) - - if report: - runner.summarize() - - if master is None: - master = runner - else: - master.merge(runner) - - return TestResults(runner.failures, runner.tries, skipped=runner.skips) - - -def testfile(filename, module_relative=True, name=None, package=None, - globs=None, verbose=None, report=True, optionflags=0, - extraglobs=None, raise_on_error=False, parser=DocTestParser(), - encoding=None): - """ - Test examples in the given file. Return (#failures, #tests). - - Optional keyword arg "module_relative" specifies how filenames - should be interpreted: - - - If "module_relative" is True (the default), then "filename" - specifies a module-relative path. By default, this path is - relative to the calling module's directory; but if the - "package" argument is specified, then it is relative to that - package. To ensure os-independence, "filename" should use - "/" characters to separate path segments, and should not - be an absolute path (i.e., it may not begin with "/"). - - - If "module_relative" is False, then "filename" specifies an - os-specific path. The path may be absolute or relative (to - the current working directory). - - Optional keyword arg "name" gives the name of the test; by default - use the file's basename. - - Optional keyword argument "package" is a Python package or the - name of a Python package whose directory should be used as the - base directory for a module relative filename. If no package is - specified, then the calling module's directory is used as the base - directory for module relative filenames. It is an error to - specify "package" if "module_relative" is False. - - Optional keyword arg "globs" gives a dict to be used as the globals - when executing examples; by default, use {}. A copy of this dict - is actually used for each docstring, so that each docstring's - examples start with a clean slate. - - Optional keyword arg "extraglobs" gives a dictionary that should be - merged into the globals that are used to execute examples. By - default, no extra globals are used. - - Optional keyword arg "verbose" prints lots of stuff if true, prints - only failures if false; by default, it's true iff "-v" is in sys.argv. - - Optional keyword arg "report" prints a summary at the end when true, - else prints nothing at the end. In verbose mode, the summary is - detailed, else very brief (in fact, empty if all tests passed). - - Optional keyword arg "optionflags" or's together module constants, - and defaults to 0. Possible values (see the docs for details): - - DONT_ACCEPT_TRUE_FOR_1 - DONT_ACCEPT_BLANKLINE - NORMALIZE_WHITESPACE - ELLIPSIS - SKIP - IGNORE_EXCEPTION_DETAIL - REPORT_UDIFF - REPORT_CDIFF - REPORT_NDIFF - REPORT_ONLY_FIRST_FAILURE - - Optional keyword arg "raise_on_error" raises an exception on the - first unexpected exception or failure. This allows failures to be - post-mortem debugged. - - Optional keyword arg "parser" specifies a DocTestParser (or - subclass) that should be used to extract tests from the files. - - Optional keyword arg "encoding" specifies an encoding that should - be used to convert the file to unicode. - - Advanced tomfoolery: testmod runs methods of a local instance of - class doctest.Tester, then merges the results into (or creates) - global Tester instance doctest.master. Methods of doctest.master - can be called directly too, if you want to do something unusual. - Passing report=0 to testmod is especially useful then, to delay - displaying a summary. Invoke doctest.master.summarize(verbose) - when you're done fiddling. - """ - global master - - if package and not module_relative: - raise ValueError("Package may only be specified for module-" - "relative paths.") - - # Relativize the path - text, filename = _load_testfile(filename, package, module_relative, - encoding or "utf-8") - - # If no name was given, then use the file's name. - if name is None: - name = os.path.basename(filename) - - # Assemble the globals. - if globs is None: - globs = {} - else: - globs = globs.copy() - if extraglobs is not None: - globs.update(extraglobs) - if '__name__' not in globs: - globs['__name__'] = '__main__' - - if raise_on_error: - runner = DebugRunner(verbose=verbose, optionflags=optionflags) - else: - runner = DocTestRunner(verbose=verbose, optionflags=optionflags) - - # Read the file, convert it to a test, and run it. - test = parser.get_doctest(text, globs, name, filename, 0) - runner.run(test) - - if report: - runner.summarize() - - if master is None: - master = runner - else: - master.merge(runner) - - return TestResults(runner.failures, runner.tries, skipped=runner.skips) - - -def run_docstring_examples(f, globs, verbose=False, name="NoName", - compileflags=None, optionflags=0): - """ - Test examples in the given object's docstring (`f`), using `globs` - as globals. Optional argument `name` is used in failure messages. - If the optional argument `verbose` is true, then generate output - even if there are no failures. - - `compileflags` gives the set of flags that should be used by the - Python compiler when running the examples. If not specified, then - it will default to the set of future-import flags that apply to - `globs`. - - Optional keyword arg `optionflags` specifies options for the - testing and output. See the documentation for `testmod` for more - information. - """ - # Find, parse, and run all tests in the given module. - finder = DocTestFinder(verbose=verbose, recurse=False) - runner = DocTestRunner(verbose=verbose, optionflags=optionflags) - for test in finder.find(f, name, globs=globs): - runner.run(test, compileflags=compileflags) - -###################################################################### -## 7. Unittest Support -###################################################################### - -_unittest_reportflags = 0 - -def set_unittest_reportflags(flags): - """Sets the unittest option flags. - - The old flag is returned so that a runner could restore the old - value if it wished to: - - >>> import doctest - >>> old = doctest._unittest_reportflags - >>> doctest.set_unittest_reportflags(REPORT_NDIFF | - ... REPORT_ONLY_FIRST_FAILURE) == old - True - - >>> doctest._unittest_reportflags == (REPORT_NDIFF | - ... REPORT_ONLY_FIRST_FAILURE) - True - - Only reporting flags can be set: - - >>> doctest.set_unittest_reportflags(ELLIPSIS) - Traceback (most recent call last): - ... - ValueError: ('Only reporting flags allowed', 8) - - >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF | - ... REPORT_ONLY_FIRST_FAILURE) - True - """ - global _unittest_reportflags - - if (flags & REPORTING_FLAGS) != flags: - raise ValueError("Only reporting flags allowed", flags) - old = _unittest_reportflags - _unittest_reportflags = flags - return old - - -class DocTestCase(unittest.TestCase): - - def __init__(self, test, optionflags=0, setUp=None, tearDown=None, - checker=None): - - unittest.TestCase.__init__(self) - self._dt_optionflags = optionflags - self._dt_checker = checker - self._dt_test = test - self._dt_setUp = setUp - self._dt_tearDown = tearDown - - def setUp(self): - test = self._dt_test - self._dt_globs = test.globs.copy() - - if self._dt_setUp is not None: - self._dt_setUp(test) - - def tearDown(self): - test = self._dt_test - - if self._dt_tearDown is not None: - self._dt_tearDown(test) - - # restore the original globs - test.globs.clear() - test.globs.update(self._dt_globs) - - def runTest(self): - test = self._dt_test - old = sys.stdout - new = StringIO() - optionflags = self._dt_optionflags - - if not (optionflags & REPORTING_FLAGS): - # The option flags don't include any reporting flags, - # so add the default reporting flags - optionflags |= _unittest_reportflags - - runner = DocTestRunner(optionflags=optionflags, - checker=self._dt_checker, verbose=False) - - try: - runner.DIVIDER = "-"*70 - results = runner.run(test, out=new.write, clear_globs=False) - if results.skipped == results.attempted: - raise unittest.SkipTest("all examples were skipped") - finally: - sys.stdout = old - - if results.failed: - raise self.failureException(self.format_failure(new.getvalue())) - - def format_failure(self, err): - test = self._dt_test - if test.lineno is None: - lineno = 'unknown line number' - else: - lineno = '%s' % test.lineno - lname = '.'.join(test.name.split('.')[-1:]) - return ('Failed doctest test for %s\n' - ' File "%s", line %s, in %s\n\n%s' - % (test.name, test.filename, lineno, lname, err) - ) - - def debug(self): - r"""Run the test case without results and without catching exceptions - - The unit test framework includes a debug method on test cases - and test suites to support post-mortem debugging. The test code - is run in such a way that errors are not caught. This way a - caller can catch the errors and initiate post-mortem debugging. - - The DocTestCase provides a debug method that raises - UnexpectedException errors if there is an unexpected - exception: - - >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', - ... {}, 'foo', 'foo.py', 0) - >>> case = DocTestCase(test) - >>> try: - ... case.debug() - ... except UnexpectedException as f: - ... failure = f - - The UnexpectedException contains the test, the example, and - the original exception: - - >>> failure.test is test - True - - >>> failure.example.want - '42\n' - - >>> exc_info = failure.exc_info - >>> raise exc_info[1] # Already has the traceback - Traceback (most recent call last): - ... - KeyError - - If the output doesn't match, then a DocTestFailure is raised: - - >>> test = DocTestParser().get_doctest(''' - ... >>> x = 1 - ... >>> x - ... 2 - ... ''', {}, 'foo', 'foo.py', 0) - >>> case = DocTestCase(test) - - >>> try: - ... case.debug() - ... except DocTestFailure as f: - ... failure = f - - DocTestFailure objects provide access to the test: - - >>> failure.test is test - True - - As well as to the example: - - >>> failure.example.want - '2\n' - - and the actual output: - - >>> failure.got - '1\n' - - """ - - self.setUp() - runner = DebugRunner(optionflags=self._dt_optionflags, - checker=self._dt_checker, verbose=False) - runner.run(self._dt_test, clear_globs=False) - self.tearDown() - - def id(self): - return self._dt_test.name - - def __eq__(self, other): - if type(self) is not type(other): - return NotImplemented - - return self._dt_test == other._dt_test and \ - self._dt_optionflags == other._dt_optionflags and \ - self._dt_setUp == other._dt_setUp and \ - self._dt_tearDown == other._dt_tearDown and \ - self._dt_checker == other._dt_checker - - def __hash__(self): - return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown, - self._dt_checker)) - - def __repr__(self): - name = self._dt_test.name.split('.') - return "%s (%s)" % (name[-1], '.'.join(name[:-1])) - - __str__ = object.__str__ - - def shortDescription(self): - return "Doctest: " + self._dt_test.name - -class SkipDocTestCase(DocTestCase): - def __init__(self, module): - self.module = module - DocTestCase.__init__(self, None) - - def setUp(self): - self.skipTest("DocTestSuite will not work with -O2 and above") - - def test_skip(self): - pass - - def shortDescription(self): - return "Skipping tests from %s" % self.module.__name__ - - __str__ = shortDescription - - -class _DocTestSuite(unittest.TestSuite): - - def _removeTestAtIndex(self, index): - pass - - -def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None, - **options): - """ - Convert doctest tests for a module to a unittest test suite. - - This converts each documentation string in a module that - contains doctest tests to a unittest test case. If any of the - tests in a doc string fail, then the test case fails. An exception - is raised showing the name of the file containing the test and a - (sometimes approximate) line number. - - The `module` argument provides the module to be tested. The argument - can be either a module or a module name. - - If no argument is given, the calling module is used. - - A number of options may be provided as keyword arguments: - - setUp - A set-up function. This is called before running the - tests in each file. The setUp function will be passed a DocTest - object. The setUp function can access the test globals as the - globs attribute of the test passed. - - tearDown - A tear-down function. This is called after running the - tests in each file. The tearDown function will be passed a DocTest - object. The tearDown function can access the test globals as the - globs attribute of the test passed. - - globs - A dictionary containing initial global variables for the tests. - - optionflags - A set of doctest option flags expressed as an integer. - """ - - if test_finder is None: - test_finder = DocTestFinder() - - module = _normalize_module(module) - tests = test_finder.find(module, globs=globs, extraglobs=extraglobs) - - if not tests and sys.flags.optimize >=2: - # Skip doctests when running with -O2 - suite = _DocTestSuite() - suite.addTest(SkipDocTestCase(module)) - return suite - - tests.sort() - suite = _DocTestSuite() - - for test in tests: - if len(test.examples) == 0: - continue - if not test.filename: - filename = module.__file__ - if filename[-4:] == ".pyc": - filename = filename[:-1] - test.filename = filename - suite.addTest(DocTestCase(test, **options)) - - return suite - -class DocFileCase(DocTestCase): - - def id(self): - return '_'.join(self._dt_test.name.split('.')) - - def __repr__(self): - return self._dt_test.filename - - def format_failure(self, err): - return ('Failed doctest test for %s\n File "%s", line 0\n\n%s' - % (self._dt_test.name, self._dt_test.filename, err) - ) - -def DocFileTest(path, module_relative=True, package=None, - globs=None, parser=DocTestParser(), - encoding=None, **options): - if globs is None: - globs = {} - else: - globs = globs.copy() - - if package and not module_relative: - raise ValueError("Package may only be specified for module-" - "relative paths.") - - # Relativize the path. - doc, path = _load_testfile(path, package, module_relative, - encoding or "utf-8") - - if "__file__" not in globs: - globs["__file__"] = path - - # Find the file and read it. - name = os.path.basename(path) - - # Convert it to a test, and wrap it in a DocFileCase. - test = parser.get_doctest(doc, globs, name, path, 0) - return DocFileCase(test, **options) - -def DocFileSuite(*paths, **kw): - """A unittest suite for one or more doctest files. - - The path to each doctest file is given as a string; the - interpretation of that string depends on the keyword argument - "module_relative". - - A number of options may be provided as keyword arguments: - - module_relative - If "module_relative" is True, then the given file paths are - interpreted as os-independent module-relative paths. By - default, these paths are relative to the calling module's - directory; but if the "package" argument is specified, then - they are relative to that package. To ensure os-independence, - "filename" should use "/" characters to separate path - segments, and may not be an absolute path (i.e., it may not - begin with "/"). - - If "module_relative" is False, then the given file paths are - interpreted as os-specific paths. These paths may be absolute - or relative (to the current working directory). - - package - A Python package or the name of a Python package whose directory - should be used as the base directory for module relative paths. - If "package" is not specified, then the calling module's - directory is used as the base directory for module relative - filenames. It is an error to specify "package" if - "module_relative" is False. - - setUp - A set-up function. This is called before running the - tests in each file. The setUp function will be passed a DocTest - object. The setUp function can access the test globals as the - globs attribute of the test passed. - - tearDown - A tear-down function. This is called after running the - tests in each file. The tearDown function will be passed a DocTest - object. The tearDown function can access the test globals as the - globs attribute of the test passed. - - globs - A dictionary containing initial global variables for the tests. - - optionflags - A set of doctest option flags expressed as an integer. - - parser - A DocTestParser (or subclass) that should be used to extract - tests from the files. - - encoding - An encoding that will be used to convert the files to unicode. - """ - suite = _DocTestSuite() - - # We do this here so that _normalize_module is called at the right - # level. If it were called in DocFileTest, then this function - # would be the caller and we might guess the package incorrectly. - if kw.get('module_relative', True): - kw['package'] = _normalize_module(kw.get('package')) - - for path in paths: - suite.addTest(DocFileTest(path, **kw)) - - return suite - -###################################################################### -## 8. Debugging Support -###################################################################### - -def script_from_examples(s): - r"""Extract script from text with examples. - - Converts text with examples to a Python script. Example input is - converted to regular code. Example output and all other words - are converted to comments: - - >>> text = ''' - ... Here are examples of simple math. - ... - ... Python has super accurate integer addition - ... - ... >>> 2 + 2 - ... 5 - ... - ... And very friendly error messages: - ... - ... >>> 1/0 - ... To Infinity - ... And - ... Beyond - ... - ... You can use logic if you want: - ... - ... >>> if 0: - ... ... blah - ... ... blah - ... ... - ... - ... Ho hum - ... ''' - - >>> print(script_from_examples(text)) - # Here are examples of simple math. - # - # Python has super accurate integer addition - # - 2 + 2 - # Expected: - ## 5 - # - # And very friendly error messages: - # - 1/0 - # Expected: - ## To Infinity - ## And - ## Beyond - # - # You can use logic if you want: - # - if 0: - blah - blah - # - # Ho hum - - """ - output = [] - for piece in DocTestParser().parse(s): - if isinstance(piece, Example): - # Add the example's source code (strip trailing NL) - output.append(piece.source[:-1]) - # Add the expected output: - want = piece.want - if want: - output.append('# Expected:') - output += ['## '+l for l in want.split('\n')[:-1]] - else: - # Add non-example text. - output += [_comment_line(l) - for l in piece.split('\n')[:-1]] - - # Trim junk on both ends. - while output and output[-1] == '#': - output.pop() - while output and output[0] == '#': - output.pop(0) - # Combine the output, and return it. - # Add a courtesy newline to prevent exec from choking (see bug #1172785) - return '\n'.join(output) + '\n' - -def testsource(module, name): - """Extract the test sources from a doctest docstring as a script. - - Provide the module (or dotted name of the module) containing the - test to be debugged and the name (within the module) of the object - with the doc string with tests to be debugged. - """ - module = _normalize_module(module) - tests = DocTestFinder().find(module) - test = [t for t in tests if t.name == name] - if not test: - raise ValueError(name, "not found in tests") - test = test[0] - testsrc = script_from_examples(test.docstring) - return testsrc - -def debug_src(src, pm=False, globs=None): - """Debug a single doctest docstring, in argument `src`'""" - testsrc = script_from_examples(src) - debug_script(testsrc, pm, globs) - -def debug_script(src, pm=False, globs=None): - "Debug a test script. `src` is the script, as a string." - import pdb - - if globs: - globs = globs.copy() - else: - globs = {} - - if pm: - try: - exec(src, globs, globs) - except: - print(sys.exc_info()[1]) - p = pdb.Pdb(nosigint=True) - p.reset() - p.interaction(None, sys.exc_info()[2]) - else: - pdb.Pdb(nosigint=True).run("exec(%r)" % src, globs, globs) - -def debug(module, name, pm=False): - """Debug a single doctest docstring. - - Provide the module (or dotted name of the module) containing the - test to be debugged and the name (within the module) of the object - with the docstring with tests to be debugged. - """ - module = _normalize_module(module) - testsrc = testsource(module, name) - debug_script(testsrc, pm, module.__dict__) - -###################################################################### -## 9. Example Usage -###################################################################### -class _TestClass: - """ - A pointless class, for sanity-checking of docstring testing. - - Methods: - square() - get() - - >>> _TestClass(13).get() + _TestClass(-12).get() - 1 - >>> hex(_TestClass(13).square().get()) - '0xa9' - """ - - def __init__(self, val): - """val -> _TestClass object with associated value val. - - >>> t = _TestClass(123) - >>> print(t.get()) - 123 - """ - - self.val = val - - def square(self): - """square() -> square TestClass's associated value - - >>> _TestClass(13).square().get() - 169 - """ - - self.val = self.val ** 2 - return self - - def get(self): - """get() -> return TestClass's associated value. - - >>> x = _TestClass(-42) - >>> print(x.get()) - -42 - """ - - return self.val - -__test__ = {"_TestClass": _TestClass, - "string": r""" - Example of a string object, searched as-is. - >>> x = 1; y = 2 - >>> x + y, x * y - (3, 2) - """, - - "bool-int equivalence": r""" - In 2.2, boolean expressions displayed - 0 or 1. By default, we still accept - them. This can be disabled by passing - DONT_ACCEPT_TRUE_FOR_1 to the new - optionflags argument. - >>> 4 == 4 - 1 - >>> 4 == 4 - True - >>> 4 > 4 - 0 - >>> 4 > 4 - False - """, - - "blank lines": r""" - Blank lines can be marked with : - >>> print('foo\n\nbar\n') - foo - - bar - - """, - - "ellipsis": r""" - If the ellipsis flag is used, then '...' can be used to - elide substrings in the desired output: - >>> print(list(range(1000))) #doctest: +ELLIPSIS - [0, 1, 2, ..., 999] - """, - - "whitespace normalization": r""" - If the whitespace normalization flag is used, then - differences in whitespace are ignored. - >>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29] - """, - } - - -def _test(): - import argparse - - parser = argparse.ArgumentParser(description="doctest runner") - parser.add_argument('-v', '--verbose', action='store_true', default=False, - help='print very verbose output for all tests') - parser.add_argument('-o', '--option', action='append', - choices=OPTIONFLAGS_BY_NAME.keys(), default=[], - help=('specify a doctest option flag to apply' - ' to the test run; may be specified more' - ' than once to apply multiple options')) - parser.add_argument('-f', '--fail-fast', action='store_true', - help=('stop running tests after first failure (this' - ' is a shorthand for -o FAIL_FAST, and is' - ' in addition to any other -o options)')) - parser.add_argument('file', nargs='+', - help='file containing the tests to run') - args = parser.parse_args() - testfiles = args.file - # Verbose used to be handled by the "inspect argv" magic in DocTestRunner, - # but since we are using argparse we are passing it manually now. - verbose = args.verbose - options = 0 - for option in args.option: - options |= OPTIONFLAGS_BY_NAME[option] - if args.fail_fast: - options |= FAIL_FAST - for filename in testfiles: - if filename.endswith(".py"): - # It is a module -- insert its dir into sys.path and try to - # import it. If it is part of a package, that possibly - # won't work because of package imports. - dirname, filename = os.path.split(filename) - sys.path.insert(0, dirname) - m = __import__(filename[:-3]) - del sys.path[0] - failures, _ = testmod(m, verbose=verbose, optionflags=options) - else: - failures, _ = testfile(filename, module_relative=False, - verbose=verbose, optionflags=options) - if failures: - return 1 - return 0 - - -if __name__ == "__main__": - sys.exit(_test()) diff --git a/Python313_13_x64_Template/Lib/email/__init__.py b/Python313_13_x64_Template/Lib/email/__init__.py deleted file mode 100644 index 9fa47783..00000000 --- a/Python313_13_x64_Template/Lib/email/__init__.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (C) 2001-2007 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""A package for parsing, handling, and generating email messages.""" - -__all__ = [ - 'base64mime', - 'charset', - 'encoders', - 'errors', - 'feedparser', - 'generator', - 'header', - 'iterators', - 'message', - 'message_from_file', - 'message_from_binary_file', - 'message_from_string', - 'message_from_bytes', - 'mime', - 'parser', - 'quoprimime', - 'utils', - ] - - -# Some convenience routines. Don't import Parser and Message as side-effects -# of importing email since those cascadingly import most of the rest of the -# email package. -def message_from_string(s, *args, **kws): - """Parse a string into a Message object model. - - Optional _class and strict are passed to the Parser constructor. - """ - from email.parser import Parser - return Parser(*args, **kws).parsestr(s) - -def message_from_bytes(s, *args, **kws): - """Parse a bytes string into a Message object model. - - Optional _class and strict are passed to the Parser constructor. - """ - from email.parser import BytesParser - return BytesParser(*args, **kws).parsebytes(s) - -def message_from_file(fp, *args, **kws): - """Read a file and parse its contents into a Message object model. - - Optional _class and strict are passed to the Parser constructor. - """ - from email.parser import Parser - return Parser(*args, **kws).parse(fp) - -def message_from_binary_file(fp, *args, **kws): - """Read a binary file and parse its contents into a Message object model. - - Optional _class and strict are passed to the Parser constructor. - """ - from email.parser import BytesParser - return BytesParser(*args, **kws).parse(fp) diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 5aa471ca..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/_encoded_words.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/_encoded_words.cpython-313.pyc deleted file mode 100644 index 022e5ec3..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/_encoded_words.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/_parseaddr.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/_parseaddr.cpython-313.pyc deleted file mode 100644 index 1dc96179..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/_parseaddr.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/_policybase.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/_policybase.cpython-313.pyc deleted file mode 100644 index 566d97a6..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/_policybase.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/base64mime.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/base64mime.cpython-313.pyc deleted file mode 100644 index 10ac8fe6..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/base64mime.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/charset.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/charset.cpython-313.pyc deleted file mode 100644 index c2a23f87..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/charset.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/encoders.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/encoders.cpython-313.pyc deleted file mode 100644 index 0b26190d..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/encoders.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/errors.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/errors.cpython-313.pyc deleted file mode 100644 index d9dac107..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/errors.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/feedparser.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/feedparser.cpython-313.pyc deleted file mode 100644 index 82882787..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/feedparser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/header.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/header.cpython-313.pyc deleted file mode 100644 index 6ca012eb..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/header.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/iterators.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/iterators.cpython-313.pyc deleted file mode 100644 index 340d795a..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/iterators.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/message.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/message.cpython-313.pyc deleted file mode 100644 index 1217a864..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/message.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/parser.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/parser.cpython-313.pyc deleted file mode 100644 index 043e27b7..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/parser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/quoprimime.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/quoprimime.cpython-313.pyc deleted file mode 100644 index 118eae1f..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/quoprimime.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/__pycache__/utils.cpython-313.pyc b/Python313_13_x64_Template/Lib/email/__pycache__/utils.cpython-313.pyc deleted file mode 100644 index f05ca152..00000000 Binary files a/Python313_13_x64_Template/Lib/email/__pycache__/utils.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/email/_parseaddr.py b/Python313_13_x64_Template/Lib/email/_parseaddr.py deleted file mode 100644 index 565af0cf..00000000 --- a/Python313_13_x64_Template/Lib/email/_parseaddr.py +++ /dev/null @@ -1,563 +0,0 @@ -# Copyright (C) 2002-2007 Python Software Foundation -# Contact: email-sig@python.org - -"""Email address parsing code. - -Lifted directly from rfc822.py. This should eventually be rewritten. -""" - -__all__ = [ - 'mktime_tz', - 'parsedate', - 'parsedate_tz', - 'quote', - ] - -import time - -SPACE = ' ' -EMPTYSTRING = '' -COMMASPACE = ', ' - -# Parse a date field -_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', - 'aug', 'sep', 'oct', 'nov', 'dec', - 'january', 'february', 'march', 'april', 'may', 'june', 'july', - 'august', 'september', 'october', 'november', 'december'] - -_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] - -# The timezone table does not include the military time zones defined -# in RFC822, other than Z. According to RFC1123, the description in -# RFC822 gets the signs wrong, so we can't rely on any such time -# zones. RFC1123 recommends that numeric timezone indicators be used -# instead of timezone names. - -_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0, - 'AST': -400, 'ADT': -300, # Atlantic (used in Canada) - 'EST': -500, 'EDT': -400, # Eastern - 'CST': -600, 'CDT': -500, # Central - 'MST': -700, 'MDT': -600, # Mountain - 'PST': -800, 'PDT': -700 # Pacific - } - - -def parsedate_tz(data): - """Convert a date string to a time tuple. - - Accounts for military timezones. - """ - res = _parsedate_tz(data) - if not res: - return - if res[9] is None: - res[9] = 0 - return tuple(res) - -def _parsedate_tz(data): - """Convert date to extended time tuple. - - The last (additional) element is the time zone offset in seconds, except if - the timezone was specified as -0000. In that case the last element is - None. This indicates a UTC timestamp that explicitly declaims knowledge of - the source timezone, as opposed to a +0000 timestamp that indicates the - source timezone really was UTC. - - """ - if not data: - return None - data = data.split() - if not data: # This happens for whitespace-only input. - return None - # The FWS after the comma after the day-of-week is optional, so search and - # adjust for this. - if data[0].endswith(',') or data[0].lower() in _daynames: - # There's a dayname here. Skip it - del data[0] - else: - i = data[0].rfind(',') - if i >= 0: - data[0] = data[0][i+1:] - if len(data) == 3: # RFC 850 date, deprecated - stuff = data[0].split('-') - if len(stuff) == 3: - data = stuff + data[1:] - if len(data) == 4: - s = data[3] - i = s.find('+') - if i == -1: - i = s.find('-') - if i > 0: - data[3:] = [s[:i], s[i:]] - else: - data.append('') # Dummy tz - if len(data) < 5: - return None - data = data[:5] - [dd, mm, yy, tm, tz] = data - if not (dd and mm and yy): - return None - mm = mm.lower() - if mm not in _monthnames: - dd, mm = mm, dd.lower() - if mm not in _monthnames: - return None - mm = _monthnames.index(mm) + 1 - if mm > 12: - mm -= 12 - if dd[-1] == ',': - dd = dd[:-1] - i = yy.find(':') - if i > 0: - yy, tm = tm, yy - if yy[-1] == ',': - yy = yy[:-1] - if not yy: - return None - if not yy[0].isdigit(): - yy, tz = tz, yy - if tm[-1] == ',': - tm = tm[:-1] - tm = tm.split(':') - if len(tm) == 2: - [thh, tmm] = tm - tss = '0' - elif len(tm) == 3: - [thh, tmm, tss] = tm - elif len(tm) == 1 and '.' in tm[0]: - # Some non-compliant MUAs use '.' to separate time elements. - tm = tm[0].split('.') - if len(tm) == 2: - [thh, tmm] = tm - tss = 0 - elif len(tm) == 3: - [thh, tmm, tss] = tm - else: - return None - else: - return None - try: - yy = int(yy) - dd = int(dd) - thh = int(thh) - tmm = int(tmm) - tss = int(tss) - except ValueError: - return None - # Check for a yy specified in two-digit format, then convert it to the - # appropriate four-digit format, according to the POSIX standard. RFC 822 - # calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822) already - # mandated a 4-digit yy, and RFC 5322 (which obsoletes RFC 2822) continues - # this requirement. For more information, see the documentation for - # the time module. - if yy < 100: - # The year is between 1969 and 1999 (inclusive). - if yy > 68: - yy += 1900 - # The year is between 2000 and 2068 (inclusive). - else: - yy += 2000 - tzoffset = None - tz = tz.upper() - if tz in _timezones: - tzoffset = _timezones[tz] - else: - try: - tzoffset = int(tz) - except ValueError: - pass - if tzoffset==0 and tz.startswith('-'): - tzoffset = None - # Convert a timezone offset into seconds ; -0500 -> -18000 - if tzoffset: - if tzoffset < 0: - tzsign = -1 - tzoffset = -tzoffset - else: - tzsign = 1 - tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60) - # Daylight Saving Time flag is set to -1, since DST is unknown. - return [yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset] - - -def parsedate(data): - """Convert a time string to a time tuple.""" - t = parsedate_tz(data) - if isinstance(t, tuple): - return t[:9] - else: - return t - - -def mktime_tz(data): - """Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp.""" - if data[9] is None: - # No zone info, so localtime is better assumption than GMT - return time.mktime(data[:8] + (-1,)) - else: - # Delay the import, since mktime_tz is rarely used - import calendar - - t = calendar.timegm(data) - return t - data[9] - - -def quote(str): - """Prepare string to be used in a quoted string. - - Turns backslash and double quote characters into quoted pairs. These - are the only characters that need to be quoted inside a quoted string. - Does not add the surrounding double quotes. - """ - return str.replace('\\', '\\\\').replace('"', '\\"') - - -class AddrlistClass: - """Address parser class by Ben Escoto. - - To understand what this class does, it helps to have a copy of RFC 2822 in - front of you. - - Note: this class interface is deprecated and may be removed in the future. - Use email.utils.AddressList instead. - """ - - def __init__(self, field): - """Initialize a new instance. - - `field' is an unparsed address header field, containing - one or more addresses. - """ - self.specials = '()<>@,:;.\"[]' - self.pos = 0 - self.LWS = ' \t' - self.CR = '\r\n' - self.FWS = self.LWS + self.CR - self.atomends = self.specials + self.LWS + self.CR - # Note that RFC 2822 section 4.1 introduced '.' as obs-phrase to handle - # existing practice (periods in display names), even though it was not - # allowed in RFC 822. RFC 5322 section 4.1 (which obsoletes RFC 2822) - # continues this requirement. We must recognize obsolete syntax, so - # allow dots in phrases. - self.phraseends = self.atomends.replace('.', '') - self.field = field - self.commentlist = [] - - def gotonext(self): - """Skip white space and extract comments.""" - wslist = [] - while self.pos < len(self.field): - if self.field[self.pos] in self.LWS + '\n\r': - if self.field[self.pos] not in '\n\r': - wslist.append(self.field[self.pos]) - self.pos += 1 - elif self.field[self.pos] == '(': - self.commentlist.append(self.getcomment()) - else: - break - return EMPTYSTRING.join(wslist) - - def getaddrlist(self): - """Parse all addresses. - - Returns a list containing all of the addresses. - """ - result = [] - while self.pos < len(self.field): - ad = self.getaddress() - if ad: - result += ad - else: - result.append(('', '')) - return result - - def getaddress(self): - """Parse the next address.""" - self.commentlist = [] - self.gotonext() - - oldpos = self.pos - oldcl = self.commentlist - plist = self.getphraselist() - - self.gotonext() - returnlist = [] - - if self.pos >= len(self.field): - # Bad email address technically, no domain. - if plist: - returnlist = [(SPACE.join(self.commentlist), plist[0])] - - elif self.field[self.pos] in '.@': - # email address is just an addrspec - # this isn't very efficient since we start over - self.pos = oldpos - self.commentlist = oldcl - addrspec = self.getaddrspec() - returnlist = [(SPACE.join(self.commentlist), addrspec)] - - elif self.field[self.pos] == ':': - # address is a group - returnlist = [] - - fieldlen = len(self.field) - self.pos += 1 - while self.pos < len(self.field): - self.gotonext() - if self.pos < fieldlen and self.field[self.pos] == ';': - self.pos += 1 - break - returnlist = returnlist + self.getaddress() - - elif self.field[self.pos] == '<': - # Address is a phrase then a route addr - routeaddr = self.getrouteaddr() - - if self.commentlist: - returnlist = [(SPACE.join(plist) + ' (' + - ' '.join(self.commentlist) + ')', routeaddr)] - else: - returnlist = [(SPACE.join(plist), routeaddr)] - - else: - if plist: - returnlist = [(SPACE.join(self.commentlist), plist[0])] - elif self.field[self.pos] in self.specials: - self.pos += 1 - - self.gotonext() - if self.pos < len(self.field) and self.field[self.pos] == ',': - self.pos += 1 - return returnlist - - def getrouteaddr(self): - """Parse a route address (Return-path value). - - This method just skips all the route stuff and returns the addrspec. - """ - if self.field[self.pos] != '<': - return - - expectroute = False - self.pos += 1 - self.gotonext() - adlist = '' - while self.pos < len(self.field): - if expectroute: - self.getdomain() - expectroute = False - elif self.field[self.pos] == '>': - self.pos += 1 - break - elif self.field[self.pos] == '@': - self.pos += 1 - expectroute = True - elif self.field[self.pos] == ':': - self.pos += 1 - else: - adlist = self.getaddrspec() - self.pos += 1 - break - self.gotonext() - - return adlist - - def getaddrspec(self): - """Parse an RFC 2822 addr-spec.""" - aslist = [] - - self.gotonext() - while self.pos < len(self.field): - preserve_ws = True - if self.field[self.pos] == '.': - if aslist and not aslist[-1].strip(): - aslist.pop() - aslist.append('.') - self.pos += 1 - preserve_ws = False - elif self.field[self.pos] == '"': - aslist.append('"%s"' % quote(self.getquote())) - elif self.field[self.pos] in self.atomends: - if aslist and not aslist[-1].strip(): - aslist.pop() - break - else: - aslist.append(self.getatom()) - ws = self.gotonext() - if preserve_ws and ws: - aslist.append(ws) - - if self.pos >= len(self.field) or self.field[self.pos] != '@': - return EMPTYSTRING.join(aslist) - - aslist.append('@') - self.pos += 1 - self.gotonext() - domain = self.getdomain() - if not domain: - # Invalid domain, return an empty address instead of returning a - # local part to denote failed parsing. - return EMPTYSTRING - return EMPTYSTRING.join(aslist) + domain - - def getdomain(self): - """Get the complete domain name from an address.""" - sdlist = [] - while self.pos < len(self.field): - if self.field[self.pos] in self.LWS: - self.pos += 1 - elif self.field[self.pos] == '(': - self.commentlist.append(self.getcomment()) - elif self.field[self.pos] == '[': - sdlist.append(self.getdomainliteral()) - elif self.field[self.pos] == '.': - self.pos += 1 - sdlist.append('.') - elif self.field[self.pos] == '@': - # bpo-34155: Don't parse domains with two `@` like - # `a@malicious.org@important.com`. - return EMPTYSTRING - elif self.field[self.pos] in self.atomends: - break - else: - sdlist.append(self.getatom()) - return EMPTYSTRING.join(sdlist) - - def getdelimited(self, beginchar, endchars, allowcomments=True): - """Parse a header fragment delimited by special characters. - - `beginchar' is the start character for the fragment. - If self is not looking at an instance of `beginchar' then - getdelimited returns the empty string. - - `endchars' is a sequence of allowable end-delimiting characters. - Parsing stops when one of these is encountered. - - If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed - within the parsed fragment. - """ - if self.field[self.pos] != beginchar: - return '' - - slist = [''] - quote = False - self.pos += 1 - while self.pos < len(self.field): - if quote: - slist.append(self.field[self.pos]) - quote = False - elif self.field[self.pos] in endchars: - self.pos += 1 - break - elif allowcomments and self.field[self.pos] == '(': - slist.append(self.getcomment()) - continue # have already advanced pos from getcomment - elif self.field[self.pos] == '\\': - quote = True - else: - slist.append(self.field[self.pos]) - self.pos += 1 - - return EMPTYSTRING.join(slist) - - def getquote(self): - """Get a quote-delimited fragment from self's field.""" - return self.getdelimited('"', '"\r', False) - - def getcomment(self): - """Get a parenthesis-delimited fragment from self's field.""" - return self.getdelimited('(', ')\r', True) - - def getdomainliteral(self): - """Parse an RFC 2822 domain-literal.""" - return '[%s]' % self.getdelimited('[', ']\r', False) - - def getatom(self, atomends=None): - """Parse an RFC 2822 atom. - - Optional atomends specifies a different set of end token delimiters - (the default is to use self.atomends). This is used e.g. in - getphraselist() since phrase endings must not include the `.' (which - is legal in phrases).""" - atomlist = [''] - if atomends is None: - atomends = self.atomends - - while self.pos < len(self.field): - if self.field[self.pos] in atomends: - break - else: - atomlist.append(self.field[self.pos]) - self.pos += 1 - - return EMPTYSTRING.join(atomlist) - - def getphraselist(self): - """Parse a sequence of RFC 2822 phrases. - - A phrase is a sequence of words, which are in turn either RFC 2822 - atoms or quoted-strings. Phrases are canonicalized by squeezing all - runs of continuous whitespace into one space. - """ - plist = [] - - while self.pos < len(self.field): - if self.field[self.pos] in self.FWS: - self.pos += 1 - elif self.field[self.pos] == '"': - plist.append(self.getquote()) - elif self.field[self.pos] == '(': - self.commentlist.append(self.getcomment()) - elif self.field[self.pos] in self.phraseends: - break - else: - plist.append(self.getatom(self.phraseends)) - - return plist - -class AddressList(AddrlistClass): - """An AddressList encapsulates a list of parsed RFC 2822 addresses.""" - def __init__(self, field): - AddrlistClass.__init__(self, field) - if field: - self.addresslist = self.getaddrlist() - else: - self.addresslist = [] - - def __len__(self): - return len(self.addresslist) - - def __add__(self, other): - # Set union - newaddr = AddressList(None) - newaddr.addresslist = self.addresslist[:] - for x in other.addresslist: - if not x in self.addresslist: - newaddr.addresslist.append(x) - return newaddr - - def __iadd__(self, other): - # Set union, in-place - for x in other.addresslist: - if not x in self.addresslist: - self.addresslist.append(x) - return self - - def __sub__(self, other): - # Set difference - newaddr = AddressList(None) - for x in self.addresslist: - if not x in other.addresslist: - newaddr.addresslist.append(x) - return newaddr - - def __isub__(self, other): - # Set difference, in-place - for x in other.addresslist: - if x in self.addresslist: - self.addresslist.remove(x) - return self - - def __getitem__(self, index): - # Make indexing, slices, and 'in' work - return self.addresslist[index] diff --git a/Python313_13_x64_Template/Lib/email/_policybase.py b/Python313_13_x64_Template/Lib/email/_policybase.py deleted file mode 100644 index 0d486c90..00000000 --- a/Python313_13_x64_Template/Lib/email/_policybase.py +++ /dev/null @@ -1,382 +0,0 @@ -"""Policy framework for the email package. - -Allows fine grained feature control of how the package parses and emits data. -""" - -import abc -from email import header -from email import charset as _charset -from email.utils import _has_surrogates - -__all__ = [ - 'Policy', - 'Compat32', - 'compat32', - ] - - -class _PolicyBase: - - """Policy Object basic framework. - - This class is useless unless subclassed. A subclass should define - class attributes with defaults for any values that are to be - managed by the Policy object. The constructor will then allow - non-default values to be set for these attributes at instance - creation time. The instance will be callable, taking these same - attributes keyword arguments, and returning a new instance - identical to the called instance except for those values changed - by the keyword arguments. Instances may be added, yielding new - instances with any non-default values from the right hand - operand overriding those in the left hand operand. That is, - - A + B == A() - - The repr of an instance can be used to reconstruct the object - if and only if the repr of the values can be used to reconstruct - those values. - - """ - - def __init__(self, **kw): - """Create new Policy, possibly overriding some defaults. - - See class docstring for a list of overridable attributes. - - """ - for name, value in kw.items(): - if hasattr(self, name): - super(_PolicyBase,self).__setattr__(name, value) - else: - raise TypeError( - "{!r} is an invalid keyword argument for {}".format( - name, self.__class__.__name__)) - - def __repr__(self): - args = [ "{}={!r}".format(name, value) - for name, value in self.__dict__.items() ] - return "{}({})".format(self.__class__.__name__, ', '.join(args)) - - def clone(self, **kw): - """Return a new instance with specified attributes changed. - - The new instance has the same attribute values as the current object, - except for the changes passed in as keyword arguments. - - """ - newpolicy = self.__class__.__new__(self.__class__) - for attr, value in self.__dict__.items(): - object.__setattr__(newpolicy, attr, value) - for attr, value in kw.items(): - if not hasattr(self, attr): - raise TypeError( - "{!r} is an invalid keyword argument for {}".format( - attr, self.__class__.__name__)) - object.__setattr__(newpolicy, attr, value) - return newpolicy - - def __setattr__(self, name, value): - if hasattr(self, name): - msg = "{!r} object attribute {!r} is read-only" - else: - msg = "{!r} object has no attribute {!r}" - raise AttributeError(msg.format(self.__class__.__name__, name)) - - def __add__(self, other): - """Non-default values from right operand override those from left. - - The object returned is a new instance of the subclass. - - """ - return self.clone(**other.__dict__) - - -def _append_doc(doc, added_doc): - doc = doc.rsplit('\n', 1)[0] - added_doc = added_doc.split('\n', 1)[1] - return doc + '\n' + added_doc - -def _extend_docstrings(cls): - if cls.__doc__ and cls.__doc__.startswith('+'): - cls.__doc__ = _append_doc(cls.__bases__[0].__doc__, cls.__doc__) - for name, attr in cls.__dict__.items(): - if attr.__doc__ and attr.__doc__.startswith('+'): - for c in (c for base in cls.__bases__ for c in base.mro()): - doc = getattr(getattr(c, name), '__doc__') - if doc: - attr.__doc__ = _append_doc(doc, attr.__doc__) - break - return cls - - -class Policy(_PolicyBase, metaclass=abc.ABCMeta): - - r"""Controls for how messages are interpreted and formatted. - - Most of the classes and many of the methods in the email package accept - Policy objects as parameters. A Policy object contains a set of values and - functions that control how input is interpreted and how output is rendered. - For example, the parameter 'raise_on_defect' controls whether or not an RFC - violation results in an error being raised or not, while 'max_line_length' - controls the maximum length of output lines when a Message is serialized. - - Any valid attribute may be overridden when a Policy is created by passing - it as a keyword argument to the constructor. Policy objects are immutable, - but a new Policy object can be created with only certain values changed by - calling the Policy instance with keyword arguments. Policy objects can - also be added, producing a new Policy object in which the non-default - attributes set in the right hand operand overwrite those specified in the - left operand. - - Settable attributes: - - raise_on_defect -- If true, then defects should be raised as errors. - Default: False. - - linesep -- string containing the value to use as separation - between output lines. Default '\n'. - - cte_type -- Type of allowed content transfer encodings - - 7bit -- ASCII only - 8bit -- Content-Transfer-Encoding: 8bit is allowed - - Default: 8bit. Also controls the disposition of - (RFC invalid) binary data in headers; see the - documentation of the binary_fold method. - - max_line_length -- maximum length of lines, excluding 'linesep', - during serialization. None or 0 means no line - wrapping is done. Default is 78. - - mangle_from_ -- a flag that, when True escapes From_ lines in the - body of the message by putting a `>' in front of - them. This is used when the message is being - serialized by a generator. Default: False. - - message_factory -- the class to use to create new message objects. - If the value is None, the default is Message. - - verify_generated_headers - -- if true, the generator verifies that each header - they are properly folded, so that a parser won't - treat it as multiple headers, start-of-body, or - part of another header. - This is a check against custom Header & fold() - implementations. - """ - - raise_on_defect = False - linesep = '\n' - cte_type = '8bit' - max_line_length = 78 - mangle_from_ = False - message_factory = None - verify_generated_headers = True - - def handle_defect(self, obj, defect): - """Based on policy, either raise defect or call register_defect. - - handle_defect(obj, defect) - - defect should be a Defect subclass, but in any case must be an - Exception subclass. obj is the object on which the defect should be - registered if it is not raised. If the raise_on_defect is True, the - defect is raised as an error, otherwise the object and the defect are - passed to register_defect. - - This method is intended to be called by parsers that discover defects. - The email package parsers always call it with Defect instances. - - """ - if self.raise_on_defect: - raise defect - self.register_defect(obj, defect) - - def register_defect(self, obj, defect): - """Record 'defect' on 'obj'. - - Called by handle_defect if raise_on_defect is False. This method is - part of the Policy API so that Policy subclasses can implement custom - defect handling. The default implementation calls the append method of - the defects attribute of obj. The objects used by the email package by - default that get passed to this method will always have a defects - attribute with an append method. - - """ - obj.defects.append(defect) - - def header_max_count(self, name): - """Return the maximum allowed number of headers named 'name'. - - Called when a header is added to a Message object. If the returned - value is not 0 or None, and there are already a number of headers with - the name 'name' equal to the value returned, a ValueError is raised. - - Because the default behavior of Message's __setitem__ is to append the - value to the list of headers, it is easy to create duplicate headers - without realizing it. This method allows certain headers to be limited - in the number of instances of that header that may be added to a - Message programmatically. (The limit is not observed by the parser, - which will faithfully produce as many headers as exist in the message - being parsed.) - - The default implementation returns None for all header names. - """ - return None - - @abc.abstractmethod - def header_source_parse(self, sourcelines): - """Given a list of linesep terminated strings constituting the lines of - a single header, return the (name, value) tuple that should be stored - in the model. The input lines should retain their terminating linesep - characters. The lines passed in by the email package may contain - surrogateescaped binary data. - """ - raise NotImplementedError - - @abc.abstractmethod - def header_store_parse(self, name, value): - """Given the header name and the value provided by the application - program, return the (name, value) that should be stored in the model. - """ - raise NotImplementedError - - @abc.abstractmethod - def header_fetch_parse(self, name, value): - """Given the header name and the value from the model, return the value - to be returned to the application program that is requesting that - header. The value passed in by the email package may contain - surrogateescaped binary data if the lines were parsed by a BytesParser. - The returned value should not contain any surrogateescaped data. - - """ - raise NotImplementedError - - @abc.abstractmethod - def fold(self, name, value): - """Given the header name and the value from the model, return a string - containing linesep characters that implement the folding of the header - according to the policy controls. The value passed in by the email - package may contain surrogateescaped binary data if the lines were - parsed by a BytesParser. The returned value should not contain any - surrogateescaped data. - - """ - raise NotImplementedError - - @abc.abstractmethod - def fold_binary(self, name, value): - """Given the header name and the value from the model, return binary - data containing linesep characters that implement the folding of the - header according to the policy controls. The value passed in by the - email package may contain surrogateescaped binary data. - - """ - raise NotImplementedError - - -@_extend_docstrings -class Compat32(Policy): - - """+ - This particular policy is the backward compatibility Policy. It - replicates the behavior of the email package version 5.1. - """ - - mangle_from_ = True - - def _sanitize_header(self, name, value): - # If the header value contains surrogates, return a Header using - # the unknown-8bit charset to encode the bytes as encoded words. - if not isinstance(value, str): - # Assume it is already a header object - return value - if _has_surrogates(value): - return header.Header(value, charset=_charset.UNKNOWN8BIT, - header_name=name) - else: - return value - - def header_source_parse(self, sourcelines): - """+ - The name is parsed as everything up to the ':' and returned unmodified. - The value is determined by stripping leading whitespace off the - remainder of the first line joined with all subsequent lines, and - stripping any trailing carriage return or linefeed characters. - - """ - name, value = sourcelines[0].split(':', 1) - value = ''.join((value, *sourcelines[1:])).lstrip(' \t\r\n') - return (name, value.rstrip('\r\n')) - - def header_store_parse(self, name, value): - """+ - The name and value are returned unmodified. - """ - return (name, value) - - def header_fetch_parse(self, name, value): - """+ - If the value contains binary data, it is converted into a Header object - using the unknown-8bit charset. Otherwise it is returned unmodified. - """ - return self._sanitize_header(name, value) - - def fold(self, name, value): - """+ - Headers are folded using the Header folding algorithm, which preserves - existing line breaks in the value, and wraps each resulting line to the - max_line_length. Non-ASCII binary data are CTE encoded using the - unknown-8bit charset. - - """ - return self._fold(name, value, sanitize=True) - - def fold_binary(self, name, value): - """+ - Headers are folded using the Header folding algorithm, which preserves - existing line breaks in the value, and wraps each resulting line to the - max_line_length. If cte_type is 7bit, non-ascii binary data is CTE - encoded using the unknown-8bit charset. Otherwise the original source - header is used, with its existing line breaks and/or binary data. - - """ - folded = self._fold(name, value, sanitize=self.cte_type=='7bit') - return folded.encode('ascii', 'surrogateescape') - - def _fold(self, name, value, sanitize): - parts = [] - parts.append('%s: ' % name) - if isinstance(value, str): - if _has_surrogates(value): - if sanitize: - h = header.Header(value, - charset=_charset.UNKNOWN8BIT, - header_name=name) - else: - # If we have raw 8bit data in a byte string, we have no idea - # what the encoding is. There is no safe way to split this - # string. If it's ascii-subset, then we could do a normal - # ascii split, but if it's multibyte then we could break the - # string. There's no way to know so the least harm seems to - # be to not split the string and risk it being too long. - parts.append(value) - h = None - else: - h = header.Header(value, header_name=name) - else: - # Assume it is a Header-like object. - h = value - if h is not None: - # The Header class interprets a value of None for maxlinelen as the - # default value of 78, as recommended by RFC 5322 section 2.1.1. - maxlinelen = 0 - if self.max_line_length is not None: - maxlinelen = self.max_line_length - parts.append(h.encode(linesep=self.linesep, maxlinelen=maxlinelen)) - parts.append(self.linesep) - return ''.join(parts) - - -compat32 = Compat32() diff --git a/Python313_13_x64_Template/Lib/email/base64mime.py b/Python313_13_x64_Template/Lib/email/base64mime.py deleted file mode 100644 index 4cdf2266..00000000 --- a/Python313_13_x64_Template/Lib/email/base64mime.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (C) 2002-2007 Python Software Foundation -# Author: Ben Gertzfield -# Contact: email-sig@python.org - -"""Base64 content transfer encoding per RFCs 2045-2047. - -This module handles the content transfer encoding method defined in RFC 2045 -to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit -characters encoding known as Base64. - -It is used in the MIME standards for email to attach images, audio, and text -using some 8-bit character sets to messages. - -This module provides an interface to encode and decode both headers and bodies -with Base64 encoding. - -RFC 2045 defines a method for including character set information in an -`encoded-word' in a header. This method is commonly used for 8-bit real names -in To:, From:, Cc:, etc. fields, as well as Subject: lines. - -This module does not do the line wrapping or end-of-line character conversion -necessary for proper internationalized headers; it only does dumb encoding and -decoding. To deal with the various line wrapping issues, use the email.header -module. -""" - -__all__ = [ - 'body_decode', - 'body_encode', - 'decode', - 'decodestring', - 'header_encode', - 'header_length', - ] - - -from base64 import b64encode -from binascii import b2a_base64, a2b_base64 - -CRLF = '\r\n' -NL = '\n' -EMPTYSTRING = '' - -# See also Charset.py -MISC_LEN = 7 - - -# Helpers -def header_length(bytearray): - """Return the length of s when it is encoded with base64.""" - groups_of_3, leftover = divmod(len(bytearray), 3) - # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in. - n = groups_of_3 * 4 - if leftover: - n += 4 - return n - - -def header_encode(header_bytes, charset='iso-8859-1'): - """Encode a single header line with Base64 encoding in a given charset. - - charset names the character set to use to encode the header. It defaults - to iso-8859-1. Base64 encoding is defined in RFC 2045. - """ - if not header_bytes: - return "" - if isinstance(header_bytes, str): - header_bytes = header_bytes.encode(charset) - encoded = b64encode(header_bytes).decode("ascii") - return '=?%s?b?%s?=' % (charset, encoded) - - -def body_encode(s, maxlinelen=76, eol=NL): - r"""Encode a string with base64. - - Each line will be wrapped at, at most, maxlinelen characters (defaults to - 76 characters). - - Each line of encoded text will end with eol, which defaults to "\n". Set - this to "\r\n" if you will be using the result of this function directly - in an email. - """ - if not s: - return "" - - encvec = [] - max_unencoded = maxlinelen * 3 // 4 - for i in range(0, len(s), max_unencoded): - # BAW: should encode() inherit b2a_base64()'s dubious behavior in - # adding a newline to the encoded string? - enc = b2a_base64(s[i:i + max_unencoded]).decode("ascii") - if enc.endswith(NL) and eol != NL: - enc = enc[:-1] + eol - encvec.append(enc) - return EMPTYSTRING.join(encvec) - - -def decode(string): - """Decode a raw base64 string, returning a bytes object. - - This function does not parse a full MIME header value encoded with - base64 (like =?iso-8859-1?b?bmloISBuaWgh?=) -- please use the high - level email.header class for that functionality. - """ - if not string: - return bytes() - elif isinstance(string, str): - return a2b_base64(string.encode('raw-unicode-escape')) - else: - return a2b_base64(string) - - -# For convenience and backwards compatibility w/ standard base64 module -body_decode = decode -decodestring = decode diff --git a/Python313_13_x64_Template/Lib/email/charset.py b/Python313_13_x64_Template/Lib/email/charset.py deleted file mode 100644 index 04380110..00000000 --- a/Python313_13_x64_Template/Lib/email/charset.py +++ /dev/null @@ -1,398 +0,0 @@ -# Copyright (C) 2001-2007 Python Software Foundation -# Author: Ben Gertzfield, Barry Warsaw -# Contact: email-sig@python.org - -__all__ = [ - 'Charset', - 'add_alias', - 'add_charset', - 'add_codec', - ] - -from functools import partial - -import email.base64mime -import email.quoprimime - -from email import errors -from email.encoders import encode_7or8bit - - -# Flags for types of header encodings -QP = 1 # Quoted-Printable -BASE64 = 2 # Base64 -SHORTEST = 3 # the shorter of QP and base64, but only for headers - -# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7 -RFC2047_CHROME_LEN = 7 - -DEFAULT_CHARSET = 'us-ascii' -UNKNOWN8BIT = 'unknown-8bit' -EMPTYSTRING = '' - - -# Defaults -CHARSETS = { - # input header enc body enc output conv - 'iso-8859-1': (QP, QP, None), - 'iso-8859-2': (QP, QP, None), - 'iso-8859-3': (QP, QP, None), - 'iso-8859-4': (QP, QP, None), - # iso-8859-5 is Cyrillic, and not especially used - # iso-8859-6 is Arabic, also not particularly used - # iso-8859-7 is Greek, QP will not make it readable - # iso-8859-8 is Hebrew, QP will not make it readable - 'iso-8859-9': (QP, QP, None), - 'iso-8859-10': (QP, QP, None), - # iso-8859-11 is Thai, QP will not make it readable - 'iso-8859-13': (QP, QP, None), - 'iso-8859-14': (QP, QP, None), - 'iso-8859-15': (QP, QP, None), - 'iso-8859-16': (QP, QP, None), - 'windows-1252':(QP, QP, None), - 'viscii': (QP, QP, None), - 'us-ascii': (None, None, None), - 'big5': (BASE64, BASE64, None), - 'gb2312': (BASE64, BASE64, None), - 'euc-jp': (BASE64, None, 'iso-2022-jp'), - 'shift_jis': (BASE64, None, 'iso-2022-jp'), - 'iso-2022-jp': (BASE64, None, None), - 'koi8-r': (BASE64, BASE64, None), - 'utf-8': (SHORTEST, BASE64, 'utf-8'), - } - -# Aliases for other commonly-used names for character sets. Map -# them to the real ones used in email. -ALIASES = { - 'latin_1': 'iso-8859-1', - 'latin-1': 'iso-8859-1', - 'latin_2': 'iso-8859-2', - 'latin-2': 'iso-8859-2', - 'latin_3': 'iso-8859-3', - 'latin-3': 'iso-8859-3', - 'latin_4': 'iso-8859-4', - 'latin-4': 'iso-8859-4', - 'latin_5': 'iso-8859-9', - 'latin-5': 'iso-8859-9', - 'latin_6': 'iso-8859-10', - 'latin-6': 'iso-8859-10', - 'latin_7': 'iso-8859-13', - 'latin-7': 'iso-8859-13', - 'latin_8': 'iso-8859-14', - 'latin-8': 'iso-8859-14', - 'latin_9': 'iso-8859-15', - 'latin-9': 'iso-8859-15', - 'latin_10':'iso-8859-16', - 'latin-10':'iso-8859-16', - 'cp949': 'ks_c_5601-1987', - 'euc_jp': 'euc-jp', - 'euc_kr': 'euc-kr', - 'ascii': 'us-ascii', - } - - -# Map charsets to their Unicode codec strings. -CODEC_MAP = { - 'gb2312': 'eucgb2312_cn', - 'big5': 'big5_tw', - # Hack: We don't want *any* conversion for stuff marked us-ascii, as all - # sorts of garbage might be sent to us in the guise of 7-bit us-ascii. - # Let that stuff pass through without conversion to/from Unicode. - 'us-ascii': None, - } - - -# Convenience functions for extending the above mappings -def add_charset(charset, header_enc=None, body_enc=None, output_charset=None): - """Add character set properties to the global registry. - - charset is the input character set, and must be the canonical name of a - character set. - - Optional header_enc and body_enc is either charset.QP for - quoted-printable, charset.BASE64 for base64 encoding, charset.SHORTEST for - the shortest of qp or base64 encoding, or None for no encoding. SHORTEST - is only valid for header_enc. It describes how message headers and - message bodies in the input charset are to be encoded. Default is no - encoding. - - Optional output_charset is the character set that the output should be - in. Conversions will proceed from input charset, to Unicode, to the - output charset when the method Charset.convert() is called. The default - is to output in the same character set as the input. - - Both input_charset and output_charset must have Unicode codec entries in - the module's charset-to-codec mapping; use add_codec(charset, codecname) - to add codecs the module does not know about. See the codecs module's - documentation for more information. - """ - if body_enc == SHORTEST: - raise ValueError('SHORTEST not allowed for body_enc') - CHARSETS[charset] = (header_enc, body_enc, output_charset) - - -def add_alias(alias, canonical): - """Add a character set alias. - - alias is the alias name, e.g. latin-1 - canonical is the character set's canonical name, e.g. iso-8859-1 - """ - ALIASES[alias] = canonical - - -def add_codec(charset, codecname): - """Add a codec that map characters in the given charset to/from Unicode. - - charset is the canonical name of a character set. codecname is the name - of a Python codec, as appropriate for the second argument to the unicode() - built-in, or to the encode() method of a Unicode string. - """ - CODEC_MAP[charset] = codecname - - -# Convenience function for encoding strings, taking into account -# that they might be unknown-8bit (ie: have surrogate-escaped bytes) -def _encode(string, codec): - if codec == UNKNOWN8BIT: - return string.encode('ascii', 'surrogateescape') - else: - return string.encode(codec) - - -class Charset: - """Map character sets to their email properties. - - This class provides information about the requirements imposed on email - for a specific character set. It also provides convenience routines for - converting between character sets, given the availability of the - applicable codecs. Given a character set, it will do its best to provide - information on how to use that character set in an email in an - RFC-compliant way. - - Certain character sets must be encoded with quoted-printable or base64 - when used in email headers or bodies. Certain character sets must be - converted outright, and are not allowed in email. Instances of this - module expose the following information about a character set: - - input_charset: The initial character set specified. Common aliases - are converted to their `official' email names (e.g. latin_1 - is converted to iso-8859-1). Defaults to 7-bit us-ascii. - - header_encoding: If the character set must be encoded before it can be - used in an email header, this attribute will be set to - charset.QP (for quoted-printable), charset.BASE64 (for - base64 encoding), or charset.SHORTEST for the shortest of - QP or BASE64 encoding. Otherwise, it will be None. - - body_encoding: Same as header_encoding, but describes the encoding for the - mail message's body, which indeed may be different than the - header encoding. charset.SHORTEST is not allowed for - body_encoding. - - output_charset: Some character sets must be converted before they can be - used in email headers or bodies. If the input_charset is - one of them, this attribute will contain the name of the - charset output will be converted to. Otherwise, it will - be None. - - input_codec: The name of the Python codec used to convert the - input_charset to Unicode. If no conversion codec is - necessary, this attribute will be None. - - output_codec: The name of the Python codec used to convert Unicode - to the output_charset. If no conversion codec is necessary, - this attribute will have the same value as the input_codec. - """ - def __init__(self, input_charset=DEFAULT_CHARSET): - # RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to - # unicode because its .lower() is locale insensitive. If the argument - # is already a unicode, we leave it at that, but ensure that the - # charset is ASCII, as the standard (RFC XXX) requires. - try: - if isinstance(input_charset, str): - input_charset.encode('ascii') - else: - input_charset = str(input_charset, 'ascii') - except UnicodeError: - raise errors.CharsetError(input_charset) - input_charset = input_charset.lower() - # Set the input charset after filtering through the aliases - self.input_charset = ALIASES.get(input_charset, input_charset) - # We can try to guess which encoding and conversion to use by the - # charset_map dictionary. Try that first, but let the user override - # it. - henc, benc, conv = CHARSETS.get(self.input_charset, - (SHORTEST, BASE64, None)) - if not conv: - conv = self.input_charset - # Set the attributes, allowing the arguments to override the default. - self.header_encoding = henc - self.body_encoding = benc - self.output_charset = ALIASES.get(conv, conv) - # Now set the codecs. If one isn't defined for input_charset, - # guess and try a Unicode codec with the same name as input_codec. - self.input_codec = CODEC_MAP.get(self.input_charset, - self.input_charset) - self.output_codec = CODEC_MAP.get(self.output_charset, - self.output_charset) - - def __repr__(self): - return self.input_charset.lower() - - def __eq__(self, other): - return str(self) == str(other).lower() - - def get_body_encoding(self): - """Return the content-transfer-encoding used for body encoding. - - This is either the string `quoted-printable' or `base64' depending on - the encoding used, or it is a function in which case you should call - the function with a single argument, the Message object being - encoded. The function should then set the Content-Transfer-Encoding - header itself to whatever is appropriate. - - Returns "quoted-printable" if self.body_encoding is QP. - Returns "base64" if self.body_encoding is BASE64. - Returns conversion function otherwise. - """ - assert self.body_encoding != SHORTEST - if self.body_encoding == QP: - return 'quoted-printable' - elif self.body_encoding == BASE64: - return 'base64' - else: - return encode_7or8bit - - def get_output_charset(self): - """Return the output character set. - - This is self.output_charset if that is not None, otherwise it is - self.input_charset. - """ - return self.output_charset or self.input_charset - - def header_encode(self, string): - """Header-encode a string by converting it first to bytes. - - The type of encoding (base64 or quoted-printable) will be based on - this charset's `header_encoding`. - - :param string: A unicode string for the header. It must be possible - to encode this string to bytes using the character set's - output codec. - :return: The encoded string, with RFC 2047 chrome. - """ - codec = self.output_codec or 'us-ascii' - header_bytes = _encode(string, codec) - # 7bit/8bit encodings return the string unchanged (modulo conversions) - encoder_module = self._get_encoder(header_bytes) - if encoder_module is None: - return string - return encoder_module.header_encode(header_bytes, codec) - - def header_encode_lines(self, string, maxlengths): - """Header-encode a string by converting it first to bytes. - - This is similar to `header_encode()` except that the string is fit - into maximum line lengths as given by the argument. - - :param string: A unicode string for the header. It must be possible - to encode this string to bytes using the character set's - output codec. - :param maxlengths: Maximum line length iterator. Each element - returned from this iterator will provide the next maximum line - length. This parameter is used as an argument to built-in next() - and should never be exhausted. The maximum line lengths should - not count the RFC 2047 chrome. These line lengths are only a - hint; the splitter does the best it can. - :return: Lines of encoded strings, each with RFC 2047 chrome. - """ - # See which encoding we should use. - codec = self.output_codec or 'us-ascii' - header_bytes = _encode(string, codec) - encoder_module = self._get_encoder(header_bytes) - encoder = partial(encoder_module.header_encode, charset=codec) - # Calculate the number of characters that the RFC 2047 chrome will - # contribute to each line. - charset = self.get_output_charset() - extra = len(charset) + RFC2047_CHROME_LEN - # Now comes the hard part. We must encode bytes but we can't split on - # bytes because some character sets are variable length and each - # encoded word must stand on its own. So the problem is you have to - # encode to bytes to figure out this word's length, but you must split - # on characters. This causes two problems: first, we don't know how - # many octets a specific substring of unicode characters will get - # encoded to, and second, we don't know how many ASCII characters - # those octets will get encoded to. Unless we try it. Which seems - # inefficient. In the interest of being correct rather than fast (and - # in the hope that there will be few encoded headers in any such - # message), brute force it. :( - lines = [] - current_line = [] - maxlen = next(maxlengths) - extra - for character in string: - current_line.append(character) - this_line = EMPTYSTRING.join(current_line) - length = encoder_module.header_length(_encode(this_line, charset)) - if length > maxlen: - # This last character doesn't fit so pop it off. - current_line.pop() - # Does nothing fit on the first line? - if not lines and not current_line: - lines.append(None) - else: - joined_line = EMPTYSTRING.join(current_line) - header_bytes = _encode(joined_line, codec) - lines.append(encoder(header_bytes)) - current_line = [character] - maxlen = next(maxlengths) - extra - joined_line = EMPTYSTRING.join(current_line) - header_bytes = _encode(joined_line, codec) - lines.append(encoder(header_bytes)) - return lines - - def _get_encoder(self, header_bytes): - if self.header_encoding == BASE64: - return email.base64mime - elif self.header_encoding == QP: - return email.quoprimime - elif self.header_encoding == SHORTEST: - len64 = email.base64mime.header_length(header_bytes) - lenqp = email.quoprimime.header_length(header_bytes) - if len64 < lenqp: - return email.base64mime - else: - return email.quoprimime - else: - return None - - def body_encode(self, string): - """Body-encode a string by converting it first to bytes. - - The type of encoding (base64 or quoted-printable) will be based on - self.body_encoding. If body_encoding is None, we assume the - output charset is a 7bit encoding, so re-encoding the decoded - string using the ascii codec produces the correct string version - of the content. - """ - if not string: - return string - if self.body_encoding is BASE64: - if isinstance(string, str): - string = string.encode(self.output_charset) - return email.base64mime.body_encode(string) - elif self.body_encoding is QP: - # quopromime.body_encode takes a string, but operates on it as if - # it were a list of byte codes. For a (minimal) history on why - # this is so, see changeset 0cf700464177. To correctly encode a - # character set, then, we must turn it into pseudo bytes via the - # latin1 charset, which will encode any byte as a single code point - # between 0 and 255, which is what body_encode is expecting. - if isinstance(string, str): - string = string.encode(self.output_charset) - string = string.decode('latin1') - return email.quoprimime.body_encode(string) - else: - if isinstance(string, str): - string = string.encode(self.output_charset).decode('ascii') - return string diff --git a/Python313_13_x64_Template/Lib/email/encoders.py b/Python313_13_x64_Template/Lib/email/encoders.py deleted file mode 100644 index 17bd1ab7..00000000 --- a/Python313_13_x64_Template/Lib/email/encoders.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Encodings and related functions.""" - -__all__ = [ - 'encode_7or8bit', - 'encode_base64', - 'encode_noop', - 'encode_quopri', - ] - - -from base64 import encodebytes as _bencode -from quopri import encodestring as _encodestring - - -def _qencode(s): - enc = _encodestring(s, quotetabs=True) - # Must encode spaces, which quopri.encodestring() doesn't do - return enc.replace(b' ', b'=20') - - -def encode_base64(msg): - """Encode the message's payload in Base64. - - Also, add an appropriate Content-Transfer-Encoding header. - """ - orig = msg.get_payload(decode=True) - encdata = str(_bencode(orig), 'ascii') - msg.set_payload(encdata) - msg['Content-Transfer-Encoding'] = 'base64' - - -def encode_quopri(msg): - """Encode the message's payload in quoted-printable. - - Also, add an appropriate Content-Transfer-Encoding header. - """ - orig = msg.get_payload(decode=True) - encdata = _qencode(orig) - msg.set_payload(encdata) - msg['Content-Transfer-Encoding'] = 'quoted-printable' - - -def encode_7or8bit(msg): - """Set the Content-Transfer-Encoding header to 7bit or 8bit.""" - orig = msg.get_payload(decode=True) - if orig is None: - # There's no payload. For backwards compatibility we use 7bit - msg['Content-Transfer-Encoding'] = '7bit' - return - # We play a trick to make this go fast. If decoding from ASCII succeeds, - # we know the data must be 7bit, otherwise treat it as 8bit. - try: - orig.decode('ascii') - except UnicodeError: - msg['Content-Transfer-Encoding'] = '8bit' - else: - msg['Content-Transfer-Encoding'] = '7bit' - - -def encode_noop(msg): - """Do nothing.""" diff --git a/Python313_13_x64_Template/Lib/email/errors.py b/Python313_13_x64_Template/Lib/email/errors.py deleted file mode 100644 index 02aa5ece..00000000 --- a/Python313_13_x64_Template/Lib/email/errors.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""email package exception classes.""" - - -class MessageError(Exception): - """Base class for errors in the email package.""" - - -class MessageParseError(MessageError): - """Base class for message parsing errors.""" - - -class HeaderParseError(MessageParseError): - """Error while parsing headers.""" - - -class BoundaryError(MessageParseError): - """Couldn't find terminating boundary.""" - - -class MultipartConversionError(MessageError, TypeError): - """Conversion to a multipart is prohibited.""" - - -class CharsetError(MessageError): - """An illegal charset was given.""" - - -class HeaderWriteError(MessageError): - """Error while writing headers.""" - - -# These are parsing defects which the parser was able to work around. -class MessageDefect(ValueError): - """Base class for a message defect.""" - - def __init__(self, line=None): - if line is not None: - super().__init__(line) - self.line = line - -class NoBoundaryInMultipartDefect(MessageDefect): - """A message claimed to be a multipart but had no boundary parameter.""" - -class StartBoundaryNotFoundDefect(MessageDefect): - """The claimed start boundary was never found.""" - -class CloseBoundaryNotFoundDefect(MessageDefect): - """A start boundary was found, but not the corresponding close boundary.""" - -class FirstHeaderLineIsContinuationDefect(MessageDefect): - """A message had a continuation line as its first header line.""" - -class MisplacedEnvelopeHeaderDefect(MessageDefect): - """A 'Unix-from' header was found in the middle of a header block.""" - -class MissingHeaderBodySeparatorDefect(MessageDefect): - """Found line with no leading whitespace and no colon before blank line.""" -# XXX: backward compatibility, just in case (it was never emitted). -MalformedHeaderDefect = MissingHeaderBodySeparatorDefect - -class MultipartInvariantViolationDefect(MessageDefect): - """A message claimed to be a multipart but no subparts were found.""" - -class InvalidMultipartContentTransferEncodingDefect(MessageDefect): - """An invalid content transfer encoding was set on the multipart itself.""" - -class UndecodableBytesDefect(MessageDefect): - """Header contained bytes that could not be decoded""" - -class InvalidBase64PaddingDefect(MessageDefect): - """base64 encoded sequence had an incorrect length""" - -class InvalidBase64CharactersDefect(MessageDefect): - """base64 encoded sequence had characters not in base64 alphabet""" - -class InvalidBase64LengthDefect(MessageDefect): - """base64 encoded sequence had invalid length (1 mod 4)""" - -# These errors are specific to header parsing. - -class HeaderDefect(MessageDefect): - """Base class for a header defect.""" - - def __init__(self, *args, **kw): - super().__init__(*args, **kw) - -class InvalidHeaderDefect(HeaderDefect): - """Header is not valid, message gives details.""" - -class HeaderMissingRequiredValue(HeaderDefect): - """A header that must have a value had none""" - -class NonPrintableDefect(HeaderDefect): - """ASCII characters outside the ascii-printable range found""" - - def __init__(self, non_printables): - super().__init__(non_printables) - self.non_printables = non_printables - - def __str__(self): - return ("the following ASCII non-printables found in header: " - "{}".format(self.non_printables)) - -class ObsoleteHeaderDefect(HeaderDefect): - """Header uses syntax declared obsolete by RFC 5322""" - -class NonASCIILocalPartDefect(HeaderDefect): - """local_part contains non-ASCII characters""" - # This defect only occurs during unicode parsing, not when - # parsing messages decoded from binary. - -class InvalidDateDefect(HeaderDefect): - """Header has unparsable or invalid date""" diff --git a/Python313_13_x64_Template/Lib/email/feedparser.py b/Python313_13_x64_Template/Lib/email/feedparser.py deleted file mode 100644 index 8e60f1d1..00000000 --- a/Python313_13_x64_Template/Lib/email/feedparser.py +++ /dev/null @@ -1,536 +0,0 @@ -# Copyright (C) 2004-2006 Python Software Foundation -# Authors: Baxter, Wouters and Warsaw -# Contact: email-sig@python.org - -"""FeedParser - An email feed parser. - -The feed parser implements an interface for incrementally parsing an email -message, line by line. This has advantages for certain applications, such as -those reading email messages off a socket. - -FeedParser.feed() is the primary interface for pushing new data into the -parser. It returns when there's nothing more it can do with the available -data. When you have no more data to push into the parser, call .close(). -This completes the parsing and returns the root message object. - -The other advantage of this parser is that it will never raise a parsing -exception. Instead, when it finds something unexpected, it adds a 'defect' to -the current message. Defects are just instances that live on the message -object's .defects attribute. -""" - -__all__ = ['FeedParser', 'BytesFeedParser'] - -import re - -from email import errors -from email._policybase import compat32 -from collections import deque -from io import StringIO - -NLCRE = re.compile(r'\r\n|\r|\n') -NLCRE_bol = re.compile(r'(\r\n|\r|\n)') -NLCRE_eol = re.compile(r'(\r\n|\r|\n)\Z') -NLCRE_crack = re.compile(r'(\r\n|\r|\n)') -# RFC 5322 section 3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character -# except controls, SP, and ":". -headerRE = re.compile(r'^(From |[\041-\071\073-\176]*:|[\t ])') -EMPTYSTRING = '' -NL = '\n' -boundaryendRE = re.compile( - r'(?P--)?(?P[ \t]*)(?P\r\n|\r|\n)?$') - -NeedMoreData = object() - - -class BufferedSubFile(object): - """A file-ish object that can have new data loaded into it. - - You can also push and pop line-matching predicates onto a stack. When the - current predicate matches the current line, a false EOF response - (i.e. empty string) is returned instead. This lets the parser adhere to a - simple abstraction -- it parses until EOF closes the current message. - """ - def __init__(self): - # Text stream of the last partial line pushed into this object. - # See issue 22233 for why this is a text stream and not a list. - self._partial = StringIO(newline='') - # A deque of full, pushed lines - self._lines = deque() - # The stack of false-EOF checking predicates. - self._eofstack = [] - # A flag indicating whether the file has been closed or not. - self._closed = False - - def push_eof_matcher(self, pred): - self._eofstack.append(pred) - - def pop_eof_matcher(self): - return self._eofstack.pop() - - def close(self): - # Don't forget any trailing partial line. - self._partial.seek(0) - self.pushlines(self._partial.readlines()) - self._partial.seek(0) - self._partial.truncate() - self._closed = True - - def readline(self): - if not self._lines: - if self._closed: - return '' - return NeedMoreData - # Pop the line off the stack and see if it matches the current - # false-EOF predicate. - line = self._lines.popleft() - # RFC 2046, section 5.1.2 requires us to recognize outer level - # boundaries at any level of inner nesting. Do this, but be sure it's - # in the order of most to least nested. - for ateof in reversed(self._eofstack): - if ateof(line): - # We're at the false EOF. But push the last line back first. - self._lines.appendleft(line) - return '' - return line - - def unreadline(self, line): - # Let the consumer push a line back into the buffer. - assert line is not NeedMoreData - self._lines.appendleft(line) - - def push(self, data): - """Push some new data into this object.""" - self._partial.write(data) - if '\n' not in data and '\r' not in data: - # No new complete lines, wait for more. - return - - # Crack into lines, preserving the linesep characters. - self._partial.seek(0) - parts = self._partial.readlines() - self._partial.seek(0) - self._partial.truncate() - - # If the last element of the list does not end in a newline, then treat - # it as a partial line. We only check for '\n' here because a line - # ending with '\r' might be a line that was split in the middle of a - # '\r\n' sequence (see bugs 1555570 and 1721862). - if not parts[-1].endswith('\n'): - self._partial.write(parts.pop()) - self.pushlines(parts) - - def pushlines(self, lines): - self._lines.extend(lines) - - def __iter__(self): - return self - - def __next__(self): - line = self.readline() - if line == '': - raise StopIteration - return line - - -class FeedParser: - """A feed-style parser of email.""" - - def __init__(self, _factory=None, *, policy=compat32): - """_factory is called with no arguments to create a new message obj - - The policy keyword specifies a policy object that controls a number of - aspects of the parser's operation. The default policy maintains - backward compatibility. - - """ - self.policy = policy - self._old_style_factory = False - if _factory is None: - if policy.message_factory is None: - from email.message import Message - self._factory = Message - else: - self._factory = policy.message_factory - else: - self._factory = _factory - try: - _factory(policy=self.policy) - except TypeError: - # Assume this is an old-style factory - self._old_style_factory = True - self._input = BufferedSubFile() - self._msgstack = [] - self._parse = self._parsegen().__next__ - self._cur = None - self._last = None - self._headersonly = False - - # Non-public interface for supporting Parser's headersonly flag - def _set_headersonly(self): - self._headersonly = True - - def feed(self, data): - """Push more data into the parser.""" - self._input.push(data) - self._call_parse() - - def _call_parse(self): - try: - self._parse() - except StopIteration: - pass - - def close(self): - """Parse all remaining data and return the root message object.""" - self._input.close() - self._call_parse() - root = self._pop_message() - assert not self._msgstack - # Look for final set of defects - if root.get_content_maintype() == 'multipart' \ - and not root.is_multipart() and not self._headersonly: - defect = errors.MultipartInvariantViolationDefect() - self.policy.handle_defect(root, defect) - return root - - def _new_message(self): - if self._old_style_factory: - msg = self._factory() - else: - msg = self._factory(policy=self.policy) - if self._cur and self._cur.get_content_type() == 'multipart/digest': - msg.set_default_type('message/rfc822') - if self._msgstack: - self._msgstack[-1].attach(msg) - self._msgstack.append(msg) - self._cur = msg - self._last = msg - - def _pop_message(self): - retval = self._msgstack.pop() - if self._msgstack: - self._cur = self._msgstack[-1] - else: - self._cur = None - return retval - - def _parsegen(self): - # Create a new message and start by parsing headers. - self._new_message() - headers = [] - # Collect the headers, searching for a line that doesn't match the RFC - # 2822 header or continuation pattern (including an empty line). - for line in self._input: - if line is NeedMoreData: - yield NeedMoreData - continue - if not headerRE.match(line): - # If we saw the RFC defined header/body separator - # (i.e. newline), just throw it away. Otherwise the line is - # part of the body so push it back. - if not NLCRE.match(line): - defect = errors.MissingHeaderBodySeparatorDefect() - self.policy.handle_defect(self._cur, defect) - self._input.unreadline(line) - break - headers.append(line) - # Done with the headers, so parse them and figure out what we're - # supposed to see in the body of the message. - self._parse_headers(headers) - # Headers-only parsing is a backwards compatibility hack, which was - # necessary in the older parser, which could raise errors. All - # remaining lines in the input are thrown into the message body. - if self._headersonly: - lines = [] - while True: - line = self._input.readline() - if line is NeedMoreData: - yield NeedMoreData - continue - if line == '': - break - lines.append(line) - self._cur.set_payload(EMPTYSTRING.join(lines)) - return - if self._cur.get_content_type() == 'message/delivery-status': - # message/delivery-status contains blocks of headers separated by - # a blank line. We'll represent each header block as a separate - # nested message object, but the processing is a bit different - # than standard message/* types because there is no body for the - # nested messages. A blank line separates the subparts. - while True: - self._input.push_eof_matcher(NLCRE.match) - for retval in self._parsegen(): - if retval is NeedMoreData: - yield NeedMoreData - continue - break - self._pop_message() - # We need to pop the EOF matcher in order to tell if we're at - # the end of the current file, not the end of the last block - # of message headers. - self._input.pop_eof_matcher() - # The input stream must be sitting at the newline or at the - # EOF. We want to see if we're at the end of this subpart, so - # first consume the blank line, then test the next line to see - # if we're at this subpart's EOF. - while True: - line = self._input.readline() - if line is NeedMoreData: - yield NeedMoreData - continue - break - while True: - line = self._input.readline() - if line is NeedMoreData: - yield NeedMoreData - continue - break - if line == '': - break - # Not at EOF so this is a line we're going to need. - self._input.unreadline(line) - return - if self._cur.get_content_maintype() == 'message': - # The message claims to be a message/* type, then what follows is - # another RFC 5322 message. - for retval in self._parsegen(): - if retval is NeedMoreData: - yield NeedMoreData - continue - break - self._pop_message() - return - if self._cur.get_content_maintype() == 'multipart': - boundary = self._cur.get_boundary() - if boundary is None: - # The message /claims/ to be a multipart but it has not - # defined a boundary. That's a problem which we'll handle by - # reading everything until the EOF and marking the message as - # defective. - defect = errors.NoBoundaryInMultipartDefect() - self.policy.handle_defect(self._cur, defect) - lines = [] - for line in self._input: - if line is NeedMoreData: - yield NeedMoreData - continue - lines.append(line) - self._cur.set_payload(EMPTYSTRING.join(lines)) - return - # Make sure a valid content type was specified per RFC 2045:6.4. - if (str(self._cur.get('content-transfer-encoding', '8bit')).lower() - not in ('7bit', '8bit', 'binary')): - defect = errors.InvalidMultipartContentTransferEncodingDefect() - self.policy.handle_defect(self._cur, defect) - # Create a line match predicate which matches the inter-part - # boundary as well as the end-of-multipart boundary. Don't push - # this onto the input stream until we've scanned past the - # preamble. - separator = '--' + boundary - def boundarymatch(line): - if not line.startswith(separator): - return None - return boundaryendRE.match(line, len(separator)) - capturing_preamble = True - preamble = [] - linesep = False - close_boundary_seen = False - while True: - line = self._input.readline() - if line is NeedMoreData: - yield NeedMoreData - continue - if line == '': - break - mo = boundarymatch(line) - if mo: - # If we're looking at the end boundary, we're done with - # this multipart. If there was a newline at the end of - # the closing boundary, then we need to initialize the - # epilogue with the empty string (see below). - if mo.group('end'): - close_boundary_seen = True - linesep = mo.group('linesep') - break - # We saw an inter-part boundary. Were we in the preamble? - if capturing_preamble: - if preamble: - # According to RFC 2046, the last newline belongs - # to the boundary. - lastline = preamble[-1] - eolmo = NLCRE_eol.search(lastline) - if eolmo: - preamble[-1] = lastline[:-len(eolmo.group(0))] - self._cur.preamble = EMPTYSTRING.join(preamble) - capturing_preamble = False - self._input.unreadline(line) - continue - # We saw a boundary separating two parts. Consume any - # multiple boundary lines that may be following. Our - # interpretation of RFC 2046 BNF grammar does not produce - # body parts within such double boundaries. - while True: - line = self._input.readline() - if line is NeedMoreData: - yield NeedMoreData - continue - mo = boundarymatch(line) - if not mo: - self._input.unreadline(line) - break - # Recurse to parse this subpart; the input stream points - # at the subpart's first line. - self._input.push_eof_matcher(boundarymatch) - for retval in self._parsegen(): - if retval is NeedMoreData: - yield NeedMoreData - continue - break - # Because of RFC 2046, the newline preceding the boundary - # separator actually belongs to the boundary, not the - # previous subpart's payload (or epilogue if the previous - # part is a multipart). - if self._last.get_content_maintype() == 'multipart': - epilogue = self._last.epilogue - if epilogue == '': - self._last.epilogue = None - elif epilogue is not None: - mo = NLCRE_eol.search(epilogue) - if mo: - end = len(mo.group(0)) - self._last.epilogue = epilogue[:-end] - else: - payload = self._last._payload - if isinstance(payload, str): - mo = NLCRE_eol.search(payload) - if mo: - payload = payload[:-len(mo.group(0))] - self._last._payload = payload - self._input.pop_eof_matcher() - self._pop_message() - # Set the multipart up for newline cleansing, which will - # happen if we're in a nested multipart. - self._last = self._cur - else: - # I think we must be in the preamble - assert capturing_preamble - preamble.append(line) - # We've seen either the EOF or the end boundary. If we're still - # capturing the preamble, we never saw the start boundary. Note - # that as a defect and store the captured text as the payload. - if capturing_preamble: - defect = errors.StartBoundaryNotFoundDefect() - self.policy.handle_defect(self._cur, defect) - self._cur.set_payload(EMPTYSTRING.join(preamble)) - epilogue = [] - for line in self._input: - if line is NeedMoreData: - yield NeedMoreData - continue - self._cur.epilogue = EMPTYSTRING.join(epilogue) - return - # If we're not processing the preamble, then we might have seen - # EOF without seeing that end boundary...that is also a defect. - if not close_boundary_seen: - defect = errors.CloseBoundaryNotFoundDefect() - self.policy.handle_defect(self._cur, defect) - return - # Everything from here to the EOF is epilogue. If the end boundary - # ended in a newline, we'll need to make sure the epilogue isn't - # None - if linesep: - epilogue = [''] - else: - epilogue = [] - for line in self._input: - if line is NeedMoreData: - yield NeedMoreData - continue - epilogue.append(line) - # Any CRLF at the front of the epilogue is not technically part of - # the epilogue. Also, watch out for an empty string epilogue, - # which means a single newline. - if epilogue: - firstline = epilogue[0] - bolmo = NLCRE_bol.match(firstline) - if bolmo: - epilogue[0] = firstline[len(bolmo.group(0)):] - self._cur.epilogue = EMPTYSTRING.join(epilogue) - return - # Otherwise, it's some non-multipart type, so the entire rest of the - # file contents becomes the payload. - lines = [] - for line in self._input: - if line is NeedMoreData: - yield NeedMoreData - continue - lines.append(line) - self._cur.set_payload(EMPTYSTRING.join(lines)) - - def _parse_headers(self, lines): - # Passed a list of lines that make up the headers for the current msg - lastheader = '' - lastvalue = [] - for lineno, line in enumerate(lines): - # Check for continuation - if line[0] in ' \t': - if not lastheader: - # The first line of the headers was a continuation. This - # is illegal, so let's note the defect, store the illegal - # line, and ignore it for purposes of headers. - defect = errors.FirstHeaderLineIsContinuationDefect(line) - self.policy.handle_defect(self._cur, defect) - continue - lastvalue.append(line) - continue - if lastheader: - self._cur.set_raw(*self.policy.header_source_parse(lastvalue)) - lastheader, lastvalue = '', [] - # Check for envelope header, i.e. unix-from - if line.startswith('From '): - if lineno == 0: - # Strip off the trailing newline - mo = NLCRE_eol.search(line) - if mo: - line = line[:-len(mo.group(0))] - self._cur.set_unixfrom(line) - continue - elif lineno == len(lines) - 1: - # Something looking like a unix-from at the end - it's - # probably the first line of the body, so push back the - # line and stop. - self._input.unreadline(line) - return - else: - # Weirdly placed unix-from line. - defect = errors.MisplacedEnvelopeHeaderDefect(line) - self.policy.handle_defect(self._cur, defect) - continue - # Split the line on the colon separating field name from value. - # There will always be a colon, because if there wasn't the part of - # the parser that calls us would have started parsing the body. - i = line.find(':') - - # If the colon is on the start of the line the header is clearly - # malformed, but we might be able to salvage the rest of the - # message. Track the error but keep going. - if i == 0: - defect = errors.InvalidHeaderDefect("Missing header name.") - self.policy.handle_defect(self._cur, defect) - continue - - assert i>0, "_parse_headers fed line with no : and no leading WS" - lastheader = line[:i] - lastvalue = [line] - # Done with all the lines, so handle the last header. - if lastheader: - self._cur.set_raw(*self.policy.header_source_parse(lastvalue)) - - -class BytesFeedParser(FeedParser): - """Like FeedParser, but feed accepts bytes.""" - - def feed(self, data): - super().feed(data.decode('ascii', 'surrogateescape')) diff --git a/Python313_13_x64_Template/Lib/email/generator.py b/Python313_13_x64_Template/Lib/email/generator.py deleted file mode 100644 index a03eb1fb..00000000 --- a/Python313_13_x64_Template/Lib/email/generator.py +++ /dev/null @@ -1,530 +0,0 @@ -# Copyright (C) 2001-2010 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Classes to generate plain text from a message object tree.""" - -__all__ = ['Generator', 'DecodedGenerator', 'BytesGenerator'] - -import re -import sys -import time -import random - -from copy import deepcopy -from io import StringIO, BytesIO -from email.utils import _has_surrogates -from email.errors import HeaderWriteError - -UNDERSCORE = '_' -NL = '\n' # XXX: no longer used by the code below. - -NLCRE = re.compile(r'\r\n|\r|\n') -fcre = re.compile(r'^From ', re.MULTILINE) -NEWLINE_WITHOUT_FWSP = re.compile(r'\r\n[^ \t]|\r[^ \n\t]|\n[^ \t]') -NEWLINE_WITHOUT_FWSP_BYTES = re.compile(br'\r\n[^ \t]|\r[^ \n\t]|\n[^ \t]') - - -class Generator: - """Generates output from a Message object tree. - - This basic generator writes the message to the given file object as plain - text. - """ - # - # Public interface - # - - def __init__(self, outfp, mangle_from_=None, maxheaderlen=None, *, - policy=None): - """Create the generator for message flattening. - - outfp is the output file-like object for writing the message to. It - must have a write() method. - - Optional mangle_from_ is a flag that, when True (the default if policy - is not set), escapes From_ lines in the body of the message by putting - a `>' in front of them. - - Optional maxheaderlen specifies the longest length for a non-continued - header. When a header line is longer (in characters, with tabs - expanded to 8 spaces) than maxheaderlen, the header will split as - defined in the Header class. Set maxheaderlen to zero to disable - header wrapping. The default is 78, as recommended (but not required) - by RFC 5322 section 2.1.1. - - The policy keyword specifies a policy object that controls a number of - aspects of the generator's operation. If no policy is specified, - the policy associated with the Message object passed to the - flatten method is used. - - """ - - if mangle_from_ is None: - mangle_from_ = True if policy is None else policy.mangle_from_ - self._fp = outfp - self._mangle_from_ = mangle_from_ - self.maxheaderlen = maxheaderlen - self.policy = policy - - def write(self, s): - # Just delegate to the file object - self._fp.write(s) - - def flatten(self, msg, unixfrom=False, linesep=None): - r"""Print the message object tree rooted at msg to the output file - specified when the Generator instance was created. - - unixfrom is a flag that forces the printing of a Unix From_ delimiter - before the first object in the message tree. If the original message - has no From_ delimiter, a `standard' one is crafted. By default, this - is False to inhibit the printing of any From_ delimiter. - - Note that for subobjects, no From_ line is printed. - - linesep specifies the characters used to indicate a new line in - the output. The default value is determined by the policy specified - when the Generator instance was created or, if none was specified, - from the policy associated with the msg. - - """ - # We use the _XXX constants for operating on data that comes directly - # from the msg, and _encoded_XXX constants for operating on data that - # has already been converted (to bytes in the BytesGenerator) and - # inserted into a temporary buffer. - policy = msg.policy if self.policy is None else self.policy - if linesep is not None: - policy = policy.clone(linesep=linesep) - if self.maxheaderlen is not None: - policy = policy.clone(max_line_length=self.maxheaderlen) - self._NL = policy.linesep - self._encoded_NL = self._encode(self._NL) - self._EMPTY = '' - self._encoded_EMPTY = self._encode(self._EMPTY) - # Because we use clone (below) when we recursively process message - # subparts, and because clone uses the computed policy (not None), - # submessages will automatically get set to the computed policy when - # they are processed by this code. - old_gen_policy = self.policy - old_msg_policy = msg.policy - try: - self.policy = policy - msg.policy = policy - if unixfrom: - ufrom = msg.get_unixfrom() - if not ufrom: - ufrom = 'From nobody ' + time.ctime(time.time()) - self.write(ufrom + self._NL) - self._write(msg) - finally: - self.policy = old_gen_policy - msg.policy = old_msg_policy - - def clone(self, fp): - """Clone this generator with the exact same options.""" - return self.__class__(fp, - self._mangle_from_, - None, # Use policy setting, which we've adjusted - policy=self.policy) - - # - # Protected interface - undocumented ;/ - # - - # Note that we use 'self.write' when what we are writing is coming from - # the source, and self._fp.write when what we are writing is coming from a - # buffer (because the Bytes subclass has already had a chance to transform - # the data in its write method in that case). This is an entirely - # pragmatic split determined by experiment; we could be more general by - # always using write and having the Bytes subclass write method detect when - # it has already transformed the input; but, since this whole thing is a - # hack anyway this seems good enough. - - def _new_buffer(self): - # BytesGenerator overrides this to return BytesIO. - return StringIO() - - def _encode(self, s): - # BytesGenerator overrides this to encode strings to bytes. - return s - - def _write_lines(self, lines): - # We have to transform the line endings. - if not lines: - return - lines = NLCRE.split(lines) - for line in lines[:-1]: - self.write(line) - self.write(self._NL) - if lines[-1]: - self.write(lines[-1]) - # XXX logic tells me this else should be needed, but the tests fail - # with it and pass without it. (NLCRE.split ends with a blank element - # if and only if there was a trailing newline.) - #else: - # self.write(self._NL) - - def _write(self, msg): - # We can't write the headers yet because of the following scenario: - # say a multipart message includes the boundary string somewhere in - # its body. We'd have to calculate the new boundary /before/ we write - # the headers so that we can write the correct Content-Type: - # parameter. - # - # The way we do this, so as to make the _handle_*() methods simpler, - # is to cache any subpart writes into a buffer. Then we write the - # headers and the buffer contents. That way, subpart handlers can - # Do The Right Thing, and can still modify the Content-Type: header if - # necessary. - oldfp = self._fp - try: - self._munge_cte = None - self._fp = sfp = self._new_buffer() - self._dispatch(msg) - finally: - self._fp = oldfp - munge_cte = self._munge_cte - del self._munge_cte - # If we munged the cte, copy the message again and re-fix the CTE. - if munge_cte: - msg = deepcopy(msg) - # Preserve the header order if the CTE header already exists. - if msg.get('content-transfer-encoding') is None: - msg['Content-Transfer-Encoding'] = munge_cte[0] - else: - msg.replace_header('content-transfer-encoding', munge_cte[0]) - msg.replace_header('content-type', munge_cte[1]) - # Write the headers. First we see if the message object wants to - # handle that itself. If not, we'll do it generically. - meth = getattr(msg, '_write_headers', None) - if meth is None: - self._write_headers(msg) - else: - meth(self) - self._fp.write(sfp.getvalue()) - - def _dispatch(self, msg): - # Get the Content-Type: for the message, then try to dispatch to - # self._handle__(). If there's no handler for the - # full MIME type, then dispatch to self._handle_(). If - # that's missing too, then dispatch to self._writeBody(). - main = msg.get_content_maintype() - sub = msg.get_content_subtype() - specific = UNDERSCORE.join((main, sub)).replace('-', '_') - meth = getattr(self, '_handle_' + specific, None) - if meth is None: - generic = main.replace('-', '_') - meth = getattr(self, '_handle_' + generic, None) - if meth is None: - meth = self._writeBody - meth(msg) - - # - # Default handlers - # - - def _write_headers(self, msg): - for h, v in msg.raw_items(): - folded = self.policy.fold(h, v) - if self.policy.verify_generated_headers: - linesep = self.policy.linesep - if not folded.endswith(self.policy.linesep): - raise HeaderWriteError( - f'folded header does not end with {linesep!r}: {folded!r}') - if NEWLINE_WITHOUT_FWSP.search(folded.removesuffix(linesep)): - raise HeaderWriteError( - f'folded header contains newline: {folded!r}') - self.write(folded) - # A blank line always separates headers from body - self.write(self._NL) - - # - # Handlers for writing types and subtypes - # - - def _handle_text(self, msg): - payload = msg.get_payload() - if payload is None: - return - if not isinstance(payload, str): - raise TypeError('string payload expected: %s' % type(payload)) - if _has_surrogates(msg._payload): - charset = msg.get_param('charset') - if charset is not None: - # XXX: This copy stuff is an ugly hack to avoid modifying the - # existing message. - msg = deepcopy(msg) - del msg['content-transfer-encoding'] - msg.set_payload(msg._payload, charset) - payload = msg.get_payload() - self._munge_cte = (msg['content-transfer-encoding'], - msg['content-type']) - if self._mangle_from_: - payload = fcre.sub('>From ', payload) - self._write_lines(payload) - - # Default body handler - _writeBody = _handle_text - - def _handle_multipart(self, msg): - # The trick here is to write out each part separately, merge them all - # together, and then make sure that the boundary we've chosen isn't - # present in the payload. - msgtexts = [] - subparts = msg.get_payload() - if subparts is None: - subparts = [] - elif isinstance(subparts, str): - # e.g. a non-strict parse of a message with no starting boundary. - self.write(subparts) - return - elif not isinstance(subparts, list): - # Scalar payload - subparts = [subparts] - for part in subparts: - s = self._new_buffer() - g = self.clone(s) - g.flatten(part, unixfrom=False, linesep=self._NL) - msgtexts.append(s.getvalue()) - # BAW: What about boundaries that are wrapped in double-quotes? - boundary = msg.get_boundary() - if not boundary: - # Create a boundary that doesn't appear in any of the - # message texts. - alltext = self._encoded_NL.join(msgtexts) - boundary = self._make_boundary(alltext) - msg.set_boundary(boundary) - # If there's a preamble, write it out, with a trailing CRLF - if msg.preamble is not None: - if self._mangle_from_: - preamble = fcre.sub('>From ', msg.preamble) - else: - preamble = msg.preamble - self._write_lines(preamble) - self.write(self._NL) - # dash-boundary transport-padding CRLF - self.write('--' + boundary + self._NL) - # body-part - if msgtexts: - self._fp.write(msgtexts.pop(0)) - # *encapsulation - # --> delimiter transport-padding - # --> CRLF body-part - for body_part in msgtexts: - # delimiter transport-padding CRLF - self.write(self._NL + '--' + boundary + self._NL) - # body-part - self._fp.write(body_part) - # close-delimiter transport-padding - self.write(self._NL + '--' + boundary + '--' + self._NL) - if msg.epilogue is not None: - if self._mangle_from_: - epilogue = fcre.sub('>From ', msg.epilogue) - else: - epilogue = msg.epilogue - self._write_lines(epilogue) - - def _handle_multipart_signed(self, msg): - # The contents of signed parts has to stay unmodified in order to keep - # the signature intact per RFC1847 2.1, so we disable header wrapping. - # RDM: This isn't enough to completely preserve the part, but it helps. - p = self.policy - self.policy = p.clone(max_line_length=0) - try: - self._handle_multipart(msg) - finally: - self.policy = p - - def _handle_message_delivery_status(self, msg): - # We can't just write the headers directly to self's file object - # because this will leave an extra newline between the last header - # block and the boundary. Sigh. - blocks = [] - for part in msg.get_payload(): - s = self._new_buffer() - g = self.clone(s) - g.flatten(part, unixfrom=False, linesep=self._NL) - text = s.getvalue() - lines = text.split(self._encoded_NL) - # Strip off the unnecessary trailing empty line - if lines and lines[-1] == self._encoded_EMPTY: - blocks.append(self._encoded_NL.join(lines[:-1])) - else: - blocks.append(text) - # Now join all the blocks with an empty line. This has the lovely - # effect of separating each block with an empty line, but not adding - # an extra one after the last one. - self._fp.write(self._encoded_NL.join(blocks)) - - def _handle_message(self, msg): - s = self._new_buffer() - g = self.clone(s) - # The payload of a message/rfc822 part should be a multipart sequence - # of length 1. The zeroth element of the list should be the Message - # object for the subpart. Extract that object, stringify it, and - # write it out. - # Except, it turns out, when it's a string instead, which happens when - # and only when HeaderParser is used on a message of mime type - # message/rfc822. Such messages are generated by, for example, - # Groupwise when forwarding unadorned messages. (Issue 7970.) So - # in that case we just emit the string body. - payload = msg._payload - if isinstance(payload, list): - g.flatten(msg.get_payload(0), unixfrom=False, linesep=self._NL) - payload = s.getvalue() - else: - payload = self._encode(payload) - self._fp.write(payload) - - # This used to be a module level function; we use a classmethod for this - # and _compile_re so we can continue to provide the module level function - # for backward compatibility by doing - # _make_boundary = Generator._make_boundary - # at the end of the module. It *is* internal, so we could drop that... - @classmethod - def _make_boundary(cls, text=None): - # Craft a random boundary. If text is given, ensure that the chosen - # boundary doesn't appear in the text. - token = random.randrange(sys.maxsize) - boundary = ('=' * 15) + (_fmt % token) + '==' - if text is None: - return boundary - b = boundary - counter = 0 - while True: - cre = cls._compile_re('^--' + re.escape(b) + '(--)?$', re.MULTILINE) - if not cre.search(text): - break - b = boundary + '.' + str(counter) - counter += 1 - return b - - @classmethod - def _compile_re(cls, s, flags): - return re.compile(s, flags) - - -class BytesGenerator(Generator): - """Generates a bytes version of a Message object tree. - - Functionally identical to the base Generator except that the output is - bytes and not string. When surrogates were used in the input to encode - bytes, these are decoded back to bytes for output. If the policy has - cte_type set to 7bit, then the message is transformed such that the - non-ASCII bytes are properly content transfer encoded, using the charset - unknown-8bit. - - The outfp object must accept bytes in its write method. - """ - - def write(self, s): - self._fp.write(s.encode('ascii', 'surrogateescape')) - - def _new_buffer(self): - return BytesIO() - - def _encode(self, s): - return s.encode('ascii') - - def _write_headers(self, msg): - # This is almost the same as the string version, except for handling - # strings with 8bit bytes. - for h, v in msg.raw_items(): - folded = self.policy.fold_binary(h, v) - if self.policy.verify_generated_headers: - linesep = self.policy.linesep.encode() - if not folded.endswith(linesep): - raise HeaderWriteError( - f'folded header does not end with {linesep!r}: {folded!r}') - if NEWLINE_WITHOUT_FWSP_BYTES.search(folded.removesuffix(linesep)): - raise HeaderWriteError( - f'folded header contains newline: {folded!r}') - self._fp.write(folded) - # A blank line always separates headers from body - self.write(self._NL) - - def _handle_text(self, msg): - # If the string has surrogates the original source was bytes, so - # just write it back out. - if msg._payload is None: - return - if _has_surrogates(msg._payload) and not self.policy.cte_type=='7bit': - if self._mangle_from_: - msg._payload = fcre.sub(">From ", msg._payload) - self._write_lines(msg._payload) - else: - super(BytesGenerator,self)._handle_text(msg) - - # Default body handler - _writeBody = _handle_text - - @classmethod - def _compile_re(cls, s, flags): - return re.compile(s.encode('ascii'), flags) - - -_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]' - -class DecodedGenerator(Generator): - """Generates a text representation of a message. - - Like the Generator base class, except that non-text parts are substituted - with a format string representing the part. - """ - def __init__(self, outfp, mangle_from_=None, maxheaderlen=None, fmt=None, *, - policy=None): - """Like Generator.__init__() except that an additional optional - argument is allowed. - - Walks through all subparts of a message. If the subpart is of main - type `text', then it prints the decoded payload of the subpart. - - Otherwise, fmt is a format string that is used instead of the message - payload. fmt is expanded with the following keywords (in - %(keyword)s format): - - type : Full MIME type of the non-text part - maintype : Main MIME type of the non-text part - subtype : Sub-MIME type of the non-text part - filename : Filename of the non-text part - description: Description associated with the non-text part - encoding : Content transfer encoding of the non-text part - - The default value for fmt is None, meaning - - [Non-text (%(type)s) part of message omitted, filename %(filename)s] - """ - Generator.__init__(self, outfp, mangle_from_, maxheaderlen, - policy=policy) - if fmt is None: - self._fmt = _FMT - else: - self._fmt = fmt - - def _dispatch(self, msg): - for part in msg.walk(): - maintype = part.get_content_maintype() - if maintype == 'text': - print(part.get_payload(decode=False), file=self) - elif maintype == 'multipart': - # Just skip this - pass - else: - print(self._fmt % { - 'type' : part.get_content_type(), - 'maintype' : part.get_content_maintype(), - 'subtype' : part.get_content_subtype(), - 'filename' : part.get_filename('[no filename]'), - 'description': part.get('Content-Description', - '[no description]'), - 'encoding' : part.get('Content-Transfer-Encoding', - '[no encoding]'), - }, file=self) - - -# Helper used by Generator._make_boundary -_width = len(repr(sys.maxsize-1)) -_fmt = '%%0%dd' % _width - -# Backward compatibility -_make_boundary = Generator._make_boundary diff --git a/Python313_13_x64_Template/Lib/email/header.py b/Python313_13_x64_Template/Lib/email/header.py deleted file mode 100644 index a0aadb97..00000000 --- a/Python313_13_x64_Template/Lib/email/header.py +++ /dev/null @@ -1,582 +0,0 @@ -# Copyright (C) 2002-2007 Python Software Foundation -# Author: Ben Gertzfield, Barry Warsaw -# Contact: email-sig@python.org - -"""Header encoding and decoding functionality.""" - -__all__ = [ - 'Header', - 'decode_header', - 'make_header', - ] - -import re -import binascii - -import email.quoprimime -import email.base64mime - -from email.errors import HeaderParseError -from email import charset as _charset -Charset = _charset.Charset - -NL = '\n' -SPACE = ' ' -BSPACE = b' ' -SPACE8 = ' ' * 8 -EMPTYSTRING = '' -MAXLINELEN = 78 -FWS = ' \t' - -USASCII = Charset('us-ascii') -UTF8 = Charset('utf-8') - -# Match encoded-word strings in the form =?charset?q?Hello_World?= -ecre = re.compile(r''' - =\? # literal =? - (?P[^?]*?) # non-greedy up to the next ? is the charset - \? # literal ? - (?P[qQbB]) # either a "q" or a "b", case insensitive - \? # literal ? - (?P.*?) # non-greedy up to the next ?= is the encoded string - \?= # literal ?= - ''', re.VERBOSE | re.MULTILINE) - -# Field name regexp, including trailing colon, but not separating whitespace, -# according to RFC 2822. Character range is from tilde to exclamation mark. -# For use with .match() -fcre = re.compile(r'[\041-\176]+:$') - -# Find a header embedded in a putative header value. Used to check for -# header injection attack. -_embedded_header = re.compile(r'\n[^ \t]+:') - - -# Helpers -_max_append = email.quoprimime._max_append - - -def decode_header(header): - """Decode a message header value without converting charset. - - For historical reasons, this function may return either: - - 1. A list of length 1 containing a pair (str, None). - 2. A list of (bytes, charset) pairs containing each of the decoded - parts of the header. Charset is None for non-encoded parts of the header, - otherwise a lower-case string containing the name of the character set - specified in the encoded string. - - header may be a string that may or may not contain RFC2047 encoded words, - or it may be a Header object. - - An email.errors.HeaderParseError may be raised when certain decoding error - occurs (e.g. a base64 decoding exception). - - This function exists for backwards compatibility only. For new code, we - recommend using email.headerregistry.HeaderRegistry instead. - """ - # If it is a Header object, we can just return the encoded chunks. - if hasattr(header, '_chunks'): - return [(_charset._encode(string, str(charset)), str(charset)) - for string, charset in header._chunks] - # If no encoding, just return the header with no charset. - if not ecre.search(header): - return [(header, None)] - # First step is to parse all the encoded parts into triplets of the form - # (encoded_string, encoding, charset). For unencoded strings, the last - # two parts will be None. - words = [] - for line in header.splitlines(): - parts = ecre.split(line) - first = True - while parts: - unencoded = parts.pop(0) - if first: - unencoded = unencoded.lstrip() - first = False - if unencoded: - words.append((unencoded, None, None)) - if parts: - charset = parts.pop(0).lower() - encoding = parts.pop(0).lower() - encoded = parts.pop(0) - words.append((encoded, encoding, charset)) - # Now loop over words and remove words that consist of whitespace - # between two encoded strings. - droplist = [] - for n, w in enumerate(words): - if n>1 and w[1] and words[n-2][1] and words[n-1][0].isspace(): - droplist.append(n-1) - for d in reversed(droplist): - del words[d] - - # The next step is to decode each encoded word by applying the reverse - # base64 or quopri transformation. decoded_words is now a list of the - # form (decoded_word, charset). - decoded_words = [] - for encoded_string, encoding, charset in words: - if encoding is None: - # This is an unencoded word. - decoded_words.append((encoded_string, charset)) - elif encoding == 'q': - word = email.quoprimime.header_decode(encoded_string) - decoded_words.append((word, charset)) - elif encoding == 'b': - paderr = len(encoded_string) % 4 # Postel's law: add missing padding - if paderr: - encoded_string += '==='[:4 - paderr] - try: - word = email.base64mime.decode(encoded_string) - except binascii.Error: - raise HeaderParseError('Base64 decoding error') - else: - decoded_words.append((word, charset)) - else: - raise AssertionError('Unexpected encoding: ' + encoding) - # Now convert all words to bytes and collapse consecutive runs of - # similarly encoded words. - collapsed = [] - last_word = last_charset = None - for word, charset in decoded_words: - if isinstance(word, str): - word = bytes(word, 'raw-unicode-escape') - if last_word is None: - last_word = word - last_charset = charset - elif charset != last_charset: - collapsed.append((last_word, last_charset)) - last_word = word - last_charset = charset - elif last_charset is None: - last_word += BSPACE + word - else: - last_word += word - collapsed.append((last_word, last_charset)) - return collapsed - - -def make_header(decoded_seq, maxlinelen=None, header_name=None, - continuation_ws=' '): - """Create a Header from a sequence of pairs as returned by decode_header() - - decode_header() takes a header value string and returns a sequence of - pairs of the format (decoded_string, charset) where charset is the string - name of the character set. - - This function takes one of those sequence of pairs and returns a Header - instance. Optional maxlinelen, header_name, and continuation_ws are as in - the Header constructor. - - This function exists for backwards compatibility only, and is not - recommended for use in new code. - """ - h = Header(maxlinelen=maxlinelen, header_name=header_name, - continuation_ws=continuation_ws) - for s, charset in decoded_seq: - # None means us-ascii but we can simply pass it on to h.append() - if charset is not None and not isinstance(charset, Charset): - charset = Charset(charset) - h.append(s, charset) - return h - - -class Header: - def __init__(self, s=None, charset=None, - maxlinelen=None, header_name=None, - continuation_ws=' ', errors='strict'): - """Create a MIME-compliant header that can contain many character sets. - - Optional s is the initial header value. If None, the initial header - value is not set. You can later append to the header with .append() - method calls. s may be a byte string or a Unicode string, but see the - .append() documentation for semantics. - - Optional charset serves two purposes: it has the same meaning as the - charset argument to the .append() method. It also sets the default - character set for all subsequent .append() calls that omit the charset - argument. If charset is not provided in the constructor, the us-ascii - charset is used both as s's initial charset and as the default for - subsequent .append() calls. - - The maximum line length can be specified explicitly via maxlinelen. For - splitting the first line to a shorter value (to account for the field - header which isn't included in s, e.g. `Subject') pass in the name of - the field in header_name. The default maxlinelen is 78 as recommended - by RFC 2822. - - continuation_ws must be RFC 2822 compliant folding whitespace (usually - either a space or a hard tab) which will be prepended to continuation - lines. - - errors is passed through to the .append() call. - """ - if charset is None: - charset = USASCII - elif not isinstance(charset, Charset): - charset = Charset(charset) - self._charset = charset - self._continuation_ws = continuation_ws - self._chunks = [] - if s is not None: - self.append(s, charset, errors) - if maxlinelen is None: - maxlinelen = MAXLINELEN - self._maxlinelen = maxlinelen - if header_name is None: - self._headerlen = 0 - else: - # Take the separating colon and space into account. - self._headerlen = len(header_name) + 2 - - def __str__(self): - """Return the string value of the header.""" - self._normalize() - uchunks = [] - lastcs = None - lastspace = None - for string, charset in self._chunks: - # We must preserve spaces between encoded and non-encoded word - # boundaries, which means for us we need to add a space when we go - # from a charset to None/us-ascii, or from None/us-ascii to a - # charset. Only do this for the second and subsequent chunks. - # Don't add a space if the None/us-ascii string already has - # a space (trailing or leading depending on transition) - nextcs = charset - if nextcs == _charset.UNKNOWN8BIT: - original_bytes = string.encode('ascii', 'surrogateescape') - string = original_bytes.decode('ascii', 'replace') - if uchunks: - hasspace = string and self._nonctext(string[0]) - if lastcs not in (None, 'us-ascii'): - if nextcs in (None, 'us-ascii') and not hasspace: - uchunks.append(SPACE) - nextcs = None - elif nextcs not in (None, 'us-ascii') and not lastspace: - uchunks.append(SPACE) - lastspace = string and self._nonctext(string[-1]) - lastcs = nextcs - uchunks.append(string) - return EMPTYSTRING.join(uchunks) - - # Rich comparison operators for equality only. BAW: does it make sense to - # have or explicitly disable <, <=, >, >= operators? - def __eq__(self, other): - # other may be a Header or a string. Both are fine so coerce - # ourselves to a unicode (of the unencoded header value), swap the - # args and do another comparison. - return other == str(self) - - def append(self, s, charset=None, errors='strict'): - """Append a string to the MIME header. - - Optional charset, if given, should be a Charset instance or the name - of a character set (which will be converted to a Charset instance). A - value of None (the default) means that the charset given in the - constructor is used. - - s may be a byte string or a Unicode string. If it is a byte string - (i.e. isinstance(s, str) is false), then charset is the encoding of - that byte string, and a UnicodeError will be raised if the string - cannot be decoded with that charset. If s is a Unicode string, then - charset is a hint specifying the character set of the characters in - the string. In either case, when producing an RFC 2822 compliant - header using RFC 2047 rules, the string will be encoded using the - output codec of the charset. If the string cannot be encoded to the - output codec, a UnicodeError will be raised. - - Optional `errors' is passed as the errors argument to the decode - call if s is a byte string. - """ - if charset is None: - charset = self._charset - elif not isinstance(charset, Charset): - charset = Charset(charset) - if not isinstance(s, str): - input_charset = charset.input_codec or 'us-ascii' - if input_charset == _charset.UNKNOWN8BIT: - s = s.decode('us-ascii', 'surrogateescape') - else: - s = s.decode(input_charset, errors) - # Ensure that the bytes we're storing can be decoded to the output - # character set, otherwise an early error is raised. - output_charset = charset.output_codec or 'us-ascii' - if output_charset != _charset.UNKNOWN8BIT: - try: - s.encode(output_charset, errors) - except UnicodeEncodeError: - if output_charset!='us-ascii': - raise - charset = UTF8 - self._chunks.append((s, charset)) - - def _nonctext(self, s): - """True if string s is not a ctext character of RFC822. - """ - return s.isspace() or s in ('(', ')', '\\') - - def encode(self, splitchars=';, \t', maxlinelen=None, linesep='\n'): - r"""Encode a message header into an RFC-compliant format. - - There are many issues involved in converting a given string for use in - an email header. Only certain character sets are readable in most - email clients, and as header strings can only contain a subset of - 7-bit ASCII, care must be taken to properly convert and encode (with - Base64 or quoted-printable) header strings. In addition, there is a - 75-character length limit on any given encoded header field, so - line-wrapping must be performed, even with double-byte character sets. - - Optional maxlinelen specifies the maximum length of each generated - line, exclusive of the linesep string. Individual lines may be longer - than maxlinelen if a folding point cannot be found. The first line - will be shorter by the length of the header name plus ": " if a header - name was specified at Header construction time. The default value for - maxlinelen is determined at header construction time. - - Optional splitchars is a string containing characters which should be - given extra weight by the splitting algorithm during normal header - wrapping. This is in very rough support of RFC 2822's `higher level - syntactic breaks': split points preceded by a splitchar are preferred - during line splitting, with the characters preferred in the order in - which they appear in the string. Space and tab may be included in the - string to indicate whether preference should be given to one over the - other as a split point when other split chars do not appear in the line - being split. Splitchars does not affect RFC 2047 encoded lines. - - Optional linesep is a string to be used to separate the lines of - the value. The default value is the most useful for typical - Python applications, but it can be set to \r\n to produce RFC-compliant - line separators when needed. - """ - self._normalize() - if maxlinelen is None: - maxlinelen = self._maxlinelen - # A maxlinelen of 0 means don't wrap. For all practical purposes, - # choosing a huge number here accomplishes that and makes the - # _ValueFormatter algorithm much simpler. - if maxlinelen == 0: - maxlinelen = 1000000 - formatter = _ValueFormatter(self._headerlen, maxlinelen, - self._continuation_ws, splitchars) - lastcs = None - hasspace = lastspace = None - for string, charset in self._chunks: - if hasspace is not None: - hasspace = string and self._nonctext(string[0]) - if lastcs not in (None, 'us-ascii'): - if not hasspace or charset not in (None, 'us-ascii'): - formatter.add_transition() - elif charset not in (None, 'us-ascii') and not lastspace: - formatter.add_transition() - lastspace = string and self._nonctext(string[-1]) - lastcs = charset - hasspace = False - lines = string.splitlines() - if lines: - formatter.feed('', lines[0], charset) - else: - formatter.feed('', '', charset) - for line in lines[1:]: - formatter.newline() - if charset.header_encoding is not None: - formatter.feed(self._continuation_ws, ' ' + line.lstrip(), - charset) - else: - sline = line.lstrip() - fws = line[:len(line)-len(sline)] - formatter.feed(fws, sline, charset) - if len(lines) > 1: - formatter.newline() - if self._chunks: - formatter.add_transition() - value = formatter._str(linesep) - if _embedded_header.search(value): - raise HeaderParseError("header value appears to contain " - "an embedded header: {!r}".format(value)) - return value - - def _normalize(self): - # Step 1: Normalize the chunks so that all runs of identical charsets - # get collapsed into a single unicode string. - chunks = [] - last_charset = None - last_chunk = [] - for string, charset in self._chunks: - if charset == last_charset: - last_chunk.append(string) - else: - if last_charset is not None: - chunks.append((SPACE.join(last_chunk), last_charset)) - last_chunk = [string] - last_charset = charset - if last_chunk: - chunks.append((SPACE.join(last_chunk), last_charset)) - self._chunks = chunks - - -class _ValueFormatter: - def __init__(self, headerlen, maxlen, continuation_ws, splitchars): - self._maxlen = maxlen - self._continuation_ws = continuation_ws - self._continuation_ws_len = len(continuation_ws) - self._splitchars = splitchars - self._lines = [] - self._current_line = _Accumulator(headerlen) - - def _str(self, linesep): - self.newline() - return linesep.join(self._lines) - - def __str__(self): - return self._str(NL) - - def newline(self): - end_of_line = self._current_line.pop() - if end_of_line != (' ', ''): - self._current_line.push(*end_of_line) - if len(self._current_line) > 0: - if self._current_line.is_onlyws() and self._lines: - self._lines[-1] += str(self._current_line) - else: - self._lines.append(str(self._current_line)) - self._current_line.reset() - - def add_transition(self): - self._current_line.push(' ', '') - - def feed(self, fws, string, charset): - # If the charset has no header encoding (i.e. it is an ASCII encoding) - # then we must split the header at the "highest level syntactic break" - # possible. Note that we don't have a lot of smarts about field - # syntax; we just try to break on semi-colons, then commas, then - # whitespace. Eventually, this should be pluggable. - if charset.header_encoding is None: - self._ascii_split(fws, string, self._splitchars) - return - # Otherwise, we're doing either a Base64 or a quoted-printable - # encoding which means we don't need to split the line on syntactic - # breaks. We can basically just find enough characters to fit on the - # current line, minus the RFC 2047 chrome. What makes this trickier - # though is that we have to split at octet boundaries, not character - # boundaries but it's only safe to split at character boundaries so at - # best we can only get close. - encoded_lines = charset.header_encode_lines(string, self._maxlengths()) - # The first element extends the current line, but if it's None then - # nothing more fit on the current line so start a new line. - try: - first_line = encoded_lines.pop(0) - except IndexError: - # There are no encoded lines, so we're done. - return - if first_line is not None: - self._append_chunk(fws, first_line) - try: - last_line = encoded_lines.pop() - except IndexError: - # There was only one line. - return - self.newline() - self._current_line.push(self._continuation_ws, last_line) - # Everything else are full lines in themselves. - for line in encoded_lines: - self._lines.append(self._continuation_ws + line) - - def _maxlengths(self): - # The first line's length. - yield self._maxlen - len(self._current_line) - while True: - yield self._maxlen - self._continuation_ws_len - - def _ascii_split(self, fws, string, splitchars): - # The RFC 2822 header folding algorithm is simple in principle but - # complex in practice. Lines may be folded any place where "folding - # white space" appears by inserting a linesep character in front of the - # FWS. The complication is that not all spaces or tabs qualify as FWS, - # and we are also supposed to prefer to break at "higher level - # syntactic breaks". We can't do either of these without intimate - # knowledge of the structure of structured headers, which we don't have - # here. So the best we can do here is prefer to break at the specified - # splitchars, and hope that we don't choose any spaces or tabs that - # aren't legal FWS. (This is at least better than the old algorithm, - # where we would sometimes *introduce* FWS after a splitchar, or the - # algorithm before that, where we would turn all white space runs into - # single spaces or tabs.) - parts = re.split("(["+FWS+"]+)", fws+string) - if parts[0]: - parts[:0] = [''] - else: - parts.pop(0) - for fws, part in zip(*[iter(parts)]*2): - self._append_chunk(fws, part) - - def _append_chunk(self, fws, string): - self._current_line.push(fws, string) - if len(self._current_line) > self._maxlen: - # Find the best split point, working backward from the end. - # There might be none, on a long first line. - for ch in self._splitchars: - for i in range(self._current_line.part_count()-1, 0, -1): - if ch.isspace(): - fws = self._current_line[i][0] - if fws and fws[0]==ch: - break - prevpart = self._current_line[i-1][1] - if prevpart and prevpart[-1]==ch: - break - else: - continue - break - else: - fws, part = self._current_line.pop() - if self._current_line._initial_size > 0: - # There will be a header, so leave it on a line by itself. - self.newline() - if not fws: - # We don't use continuation_ws here because the whitespace - # after a header should always be a space. - fws = ' ' - self._current_line.push(fws, part) - return - remainder = self._current_line.pop_from(i) - self._lines.append(str(self._current_line)) - self._current_line.reset(remainder) - - -class _Accumulator(list): - - def __init__(self, initial_size=0): - self._initial_size = initial_size - super().__init__() - - def push(self, fws, string): - self.append((fws, string)) - - def pop_from(self, i=0): - popped = self[i:] - self[i:] = [] - return popped - - def pop(self): - if self.part_count()==0: - return ('', '') - return super().pop() - - def __len__(self): - return sum((len(fws)+len(part) for fws, part in self), - self._initial_size) - - def __str__(self): - return EMPTYSTRING.join((EMPTYSTRING.join((fws, part)) - for fws, part in self)) - - def reset(self, startval=None): - if startval is None: - startval = [] - self[:] = startval - self._initial_size = 0 - - def is_onlyws(self): - return self._initial_size==0 and (not self or str(self).isspace()) - - def part_count(self): - return super().__len__() diff --git a/Python313_13_x64_Template/Lib/email/iterators.py b/Python313_13_x64_Template/Lib/email/iterators.py deleted file mode 100644 index 3410935e..00000000 --- a/Python313_13_x64_Template/Lib/email/iterators.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Various types of useful iterators and generators.""" - -__all__ = [ - 'body_line_iterator', - 'typed_subpart_iterator', - 'walk', - # Do not include _structure() since it's part of the debugging API. - ] - -import sys -from io import StringIO - - -# This function will become a method of the Message class -def walk(self): - """Walk over the message tree, yielding each subpart. - - The walk is performed in depth-first order. This method is a - generator. - """ - yield self - if self.is_multipart(): - for subpart in self.get_payload(): - yield from subpart.walk() - - -# These two functions are imported into the Iterators.py interface module. -def body_line_iterator(msg, decode=False): - """Iterate over the parts, returning string payloads line-by-line. - - Optional decode (default False) is passed through to .get_payload(). - """ - for subpart in msg.walk(): - payload = subpart.get_payload(decode=decode) - if isinstance(payload, str): - yield from StringIO(payload) - - -def typed_subpart_iterator(msg, maintype='text', subtype=None): - """Iterate over the subparts with a given MIME type. - - Use `maintype' as the main MIME type to match against; this defaults to - "text". Optional `subtype' is the MIME subtype to match against; if - omitted, only the main type is matched. - """ - for subpart in msg.walk(): - if subpart.get_content_maintype() == maintype: - if subtype is None or subpart.get_content_subtype() == subtype: - yield subpart - - -def _structure(msg, fp=None, level=0, include_default=False): - """A handy debugging aid""" - if fp is None: - fp = sys.stdout - tab = ' ' * (level * 4) - print(tab + msg.get_content_type(), end='', file=fp) - if include_default: - print(' [%s]' % msg.get_default_type(), file=fp) - else: - print(file=fp) - if msg.is_multipart(): - for subpart in msg.get_payload(): - _structure(subpart, fp, level+1, include_default) diff --git a/Python313_13_x64_Template/Lib/email/message.py b/Python313_13_x64_Template/Lib/email/message.py deleted file mode 100644 index 80f01d66..00000000 --- a/Python313_13_x64_Template/Lib/email/message.py +++ /dev/null @@ -1,1217 +0,0 @@ -# Copyright (C) 2001-2007 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Basic message object for the email package object model.""" - -__all__ = ['Message', 'EmailMessage'] - -import binascii -import re -import quopri -from io import BytesIO, StringIO - -# Intrapackage imports -from email import utils -from email import errors -from email._policybase import compat32 -from email import charset as _charset -from email._encoded_words import decode_b -Charset = _charset.Charset - -SEMISPACE = '; ' - -# Regular expression that matches `special' characters in parameters, the -# existence of which force quoting of the parameter value. -tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]') - - -def _splitparam(param): - # Split header parameters. BAW: this may be too simple. It isn't - # strictly RFC 2045 (section 5.1) compliant, but it catches most headers - # found in the wild. We may eventually need a full fledged parser. - # RDM: we might have a Header here; for now just stringify it. - a, sep, b = str(param).partition(';') - if not sep: - return a.strip(), None - return a.strip(), b.strip() - -def _formatparam(param, value=None, quote=True): - """Convenience function to format and return a key=value pair. - - This will quote the value if needed or if quote is true. If value is a - three tuple (charset, language, value), it will be encoded according - to RFC2231 rules. If it contains non-ascii characters it will likewise - be encoded according to RFC2231 rules, using the utf-8 charset and - a null language. - """ - if value is not None and len(value) > 0: - # A tuple is used for RFC 2231 encoded parameter values where items - # are (charset, language, value). charset is a string, not a Charset - # instance. RFC 2231 encoded values are never quoted, per RFC. - if isinstance(value, tuple): - # Encode as per RFC 2231 - param += '*' - value = utils.encode_rfc2231(value[2], value[0], value[1]) - return '%s=%s' % (param, value) - else: - try: - value.encode('ascii') - except UnicodeEncodeError: - param += '*' - value = utils.encode_rfc2231(value, 'utf-8', '') - return '%s=%s' % (param, value) - # BAW: Please check this. I think that if quote is set it should - # force quoting even if not necessary. - if quote or tspecials.search(value): - return '%s="%s"' % (param, utils.quote(value)) - else: - return '%s=%s' % (param, value) - else: - return param - -def _parseparam(s): - # RDM This might be a Header, so for now stringify it. - s = ';' + str(s) - plist = [] - start = 0 - while s.find(';', start) == start: - start += 1 - end = s.find(';', start) - ind, diff = start, 0 - while end > 0: - diff += s.count('"', ind, end) - s.count('\\"', ind, end) - if diff % 2 == 0: - break - end, ind = ind, s.find(';', end + 1) - if end < 0: - end = len(s) - i = s.find('=', start, end) - if i == -1: - f = s[start:end] - else: - f = s[start:i].rstrip().lower() + '=' + s[i+1:end].lstrip() - plist.append(f.strip()) - start = end - return plist - - -def _unquotevalue(value): - # This is different than utils.collapse_rfc2231_value() because it doesn't - # try to convert the value to a unicode. Message.get_param() and - # Message.get_params() are both currently defined to return the tuple in - # the face of RFC 2231 parameters. - if isinstance(value, tuple): - return value[0], value[1], utils.unquote(value[2]) - else: - return utils.unquote(value) - - -def _decode_uu(encoded): - """Decode uuencoded data.""" - decoded_lines = [] - encoded_lines_iter = iter(encoded.splitlines()) - for line in encoded_lines_iter: - if line.startswith(b"begin "): - mode, _, path = line.removeprefix(b"begin ").partition(b" ") - try: - int(mode, base=8) - except ValueError: - continue - else: - break - else: - raise ValueError("`begin` line not found") - for line in encoded_lines_iter: - if not line: - raise ValueError("Truncated input") - elif line.strip(b' \t\r\n\f') == b'end': - break - try: - decoded_line = binascii.a2b_uu(line) - except binascii.Error: - # Workaround for broken uuencoders by /Fredrik Lundh - nbytes = (((line[0]-32) & 63) * 4 + 5) // 3 - decoded_line = binascii.a2b_uu(line[:nbytes]) - decoded_lines.append(decoded_line) - - return b''.join(decoded_lines) - - -class Message: - """Basic message object. - - A message object is defined as something that has a bunch of RFC 5322 - headers and a payload. It may optionally have an envelope header - (a.k.a. Unix-From or From_ header). If the message is a container (i.e. a - multipart or a message/rfc822), then the payload is a list of Message - objects, otherwise it is a string. - - Message objects implement part of the `mapping' interface, which assumes - there is exactly one occurrence of the header per message. Some headers - do in fact appear multiple times (e.g. Received) and for those headers, - you must use the explicit API to set or get all the headers. Not all of - the mapping methods are implemented. - """ - def __init__(self, policy=compat32): - self.policy = policy - self._headers = [] - self._unixfrom = None - self._payload = None - self._charset = None - # Defaults for multipart messages - self.preamble = self.epilogue = None - self.defects = [] - # Default content type - self._default_type = 'text/plain' - - def __str__(self): - """Return the entire formatted message as a string. - """ - return self.as_string() - - def as_string(self, unixfrom=False, maxheaderlen=0, policy=None): - """Return the entire formatted message as a string. - - Optional 'unixfrom', when true, means include the Unix From_ envelope - header. For backward compatibility reasons, if maxheaderlen is - not specified it defaults to 0, so you must override it explicitly - if you want a different maxheaderlen. 'policy' is passed to the - Generator instance used to serialize the message; if it is not - specified the policy associated with the message instance is used. - - If the message object contains binary data that is not encoded - according to RFC standards, the non-compliant data will be replaced by - unicode "unknown character" code points. - """ - from email.generator import Generator - policy = self.policy if policy is None else policy - fp = StringIO() - g = Generator(fp, - mangle_from_=False, - maxheaderlen=maxheaderlen, - policy=policy) - g.flatten(self, unixfrom=unixfrom) - return fp.getvalue() - - def __bytes__(self): - """Return the entire formatted message as a bytes object. - """ - return self.as_bytes() - - def as_bytes(self, unixfrom=False, policy=None): - """Return the entire formatted message as a bytes object. - - Optional 'unixfrom', when true, means include the Unix From_ envelope - header. 'policy' is passed to the BytesGenerator instance used to - serialize the message; if not specified the policy associated with - the message instance is used. - """ - from email.generator import BytesGenerator - policy = self.policy if policy is None else policy - fp = BytesIO() - g = BytesGenerator(fp, mangle_from_=False, policy=policy) - g.flatten(self, unixfrom=unixfrom) - return fp.getvalue() - - def is_multipart(self): - """Return True if the message consists of multiple parts.""" - return isinstance(self._payload, list) - - # - # Unix From_ line - # - def set_unixfrom(self, unixfrom): - self._unixfrom = unixfrom - - def get_unixfrom(self): - return self._unixfrom - - # - # Payload manipulation. - # - def attach(self, payload): - """Add the given payload to the current payload. - - The current payload will always be a list of objects after this method - is called. If you want to set the payload to a scalar object, use - set_payload() instead. - """ - if self._payload is None: - self._payload = [payload] - else: - try: - self._payload.append(payload) - except AttributeError: - raise TypeError("Attach is not valid on a message with a" - " non-multipart payload") - - def get_payload(self, i=None, decode=False): - """Return a reference to the payload. - - The payload will either be a list object or a string. If you mutate - the list object, you modify the message's payload in place. Optional - i returns that index into the payload. - - Optional decode is a flag indicating whether the payload should be - decoded or not, according to the Content-Transfer-Encoding header - (default is False). - - When True and the message is not a multipart, the payload will be - decoded if this header's value is `quoted-printable' or `base64'. If - some other encoding is used, or the header is missing, or if the - payload has bogus data (i.e. bogus base64 or uuencoded data), the - payload is returned as-is. - - If the message is a multipart and the decode flag is True, then None - is returned. - """ - # Here is the logic table for this code, based on the email5.0.0 code: - # i decode is_multipart result - # ------ ------ ------------ ------------------------------ - # None True True None - # i True True None - # None False True _payload (a list) - # i False True _payload element i (a Message) - # i False False error (not a list) - # i True False error (not a list) - # None False False _payload - # None True False _payload decoded (bytes) - # Note that Barry planned to factor out the 'decode' case, but that - # isn't so easy now that we handle the 8 bit data, which needs to be - # converted in both the decode and non-decode path. - if self.is_multipart(): - if decode: - return None - if i is None: - return self._payload - else: - return self._payload[i] - # For backward compatibility, Use isinstance and this error message - # instead of the more logical is_multipart test. - if i is not None and not isinstance(self._payload, list): - raise TypeError('Expected list, got %s' % type(self._payload)) - payload = self._payload - cte = self.get('content-transfer-encoding', '') - if hasattr(cte, 'cte'): - cte = cte.cte - else: - # cte might be a Header, so for now stringify it. - cte = str(cte).strip().lower() - # payload may be bytes here. - if not decode: - if isinstance(payload, str) and utils._has_surrogates(payload): - try: - bpayload = payload.encode('ascii', 'surrogateescape') - try: - payload = bpayload.decode(self.get_content_charset('ascii'), 'replace') - except LookupError: - payload = bpayload.decode('ascii', 'replace') - except UnicodeEncodeError: - pass - return payload - if isinstance(payload, str): - try: - bpayload = payload.encode('ascii', 'surrogateescape') - except UnicodeEncodeError: - # This won't happen for RFC compliant messages (messages - # containing only ASCII code points in the unicode input). - # If it does happen, turn the string into bytes in a way - # guaranteed not to fail. - bpayload = payload.encode('raw-unicode-escape') - else: - bpayload = payload - if cte == 'quoted-printable': - return quopri.decodestring(bpayload) - elif cte == 'base64': - # XXX: this is a bit of a hack; decode_b should probably be factored - # out somewhere, but I haven't figured out where yet. - value, defects = decode_b(b''.join(bpayload.splitlines())) - for defect in defects: - self.policy.handle_defect(self, defect) - return value - elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'): - try: - return _decode_uu(bpayload) - except ValueError: - # Some decoding problem. - return bpayload - if isinstance(payload, str): - return bpayload - return payload - - def set_payload(self, payload, charset=None): - """Set the payload to the given value. - - Optional charset sets the message's default character set. See - set_charset() for details. - """ - if hasattr(payload, 'encode'): - if charset is None: - self._payload = payload - return - if not isinstance(charset, Charset): - charset = Charset(charset) - payload = payload.encode(charset.output_charset, 'surrogateescape') - if hasattr(payload, 'decode'): - self._payload = payload.decode('ascii', 'surrogateescape') - else: - self._payload = payload - if charset is not None: - self.set_charset(charset) - - def set_charset(self, charset): - """Set the charset of the payload to a given character set. - - charset can be a Charset instance, a string naming a character set, or - None. If it is a string it will be converted to a Charset instance. - If charset is None, the charset parameter will be removed from the - Content-Type field. Anything else will generate a TypeError. - - The message will be assumed to be of type text/* encoded with - charset.input_charset. It will be converted to charset.output_charset - and encoded properly, if needed, when generating the plain text - representation of the message. MIME headers (MIME-Version, - Content-Type, Content-Transfer-Encoding) will be added as needed. - """ - if charset is None: - self.del_param('charset') - self._charset = None - return - if not isinstance(charset, Charset): - charset = Charset(charset) - self._charset = charset - if 'MIME-Version' not in self: - self.add_header('MIME-Version', '1.0') - if 'Content-Type' not in self: - self.add_header('Content-Type', 'text/plain', - charset=charset.get_output_charset()) - else: - self.set_param('charset', charset.get_output_charset()) - if charset != charset.get_output_charset(): - self._payload = charset.body_encode(self._payload) - if 'Content-Transfer-Encoding' not in self: - cte = charset.get_body_encoding() - try: - cte(self) - except TypeError: - # This 'if' is for backward compatibility, it allows unicode - # through even though that won't work correctly if the - # message is serialized. - payload = self._payload - if payload: - try: - payload = payload.encode('ascii', 'surrogateescape') - except UnicodeError: - payload = payload.encode(charset.output_charset) - self._payload = charset.body_encode(payload) - self.add_header('Content-Transfer-Encoding', cte) - - def get_charset(self): - """Return the Charset instance associated with the message's payload. - """ - return self._charset - - # - # MAPPING INTERFACE (partial) - # - def __len__(self): - """Return the total number of headers, including duplicates.""" - return len(self._headers) - - def __getitem__(self, name): - """Get a header value. - - Return None if the header is missing instead of raising an exception. - - Note that if the header appeared multiple times, exactly which - occurrence gets returned is undefined. Use get_all() to get all - the values matching a header field name. - """ - return self.get(name) - - def __setitem__(self, name, val): - """Set the value of a header. - - Note: this does not overwrite an existing header with the same field - name. Use __delitem__() first to delete any existing headers. - """ - max_count = self.policy.header_max_count(name) - if max_count: - lname = name.lower() - found = 0 - for k, v in self._headers: - if k.lower() == lname: - found += 1 - if found >= max_count: - raise ValueError("There may be at most {} {} headers " - "in a message".format(max_count, name)) - self._headers.append(self.policy.header_store_parse(name, val)) - - def __delitem__(self, name): - """Delete all occurrences of a header, if present. - - Does not raise an exception if the header is missing. - """ - name = name.lower() - newheaders = [] - for k, v in self._headers: - if k.lower() != name: - newheaders.append((k, v)) - self._headers = newheaders - - def __contains__(self, name): - name_lower = name.lower() - for k, v in self._headers: - if name_lower == k.lower(): - return True - return False - - def __iter__(self): - for field, value in self._headers: - yield field - - def keys(self): - """Return a list of all the message's header field names. - - These will be sorted in the order they appeared in the original - message, or were added to the message, and may contain duplicates. - Any fields deleted and re-inserted are always appended to the header - list. - """ - return [k for k, v in self._headers] - - def values(self): - """Return a list of all the message's header values. - - These will be sorted in the order they appeared in the original - message, or were added to the message, and may contain duplicates. - Any fields deleted and re-inserted are always appended to the header - list. - """ - return [self.policy.header_fetch_parse(k, v) - for k, v in self._headers] - - def items(self): - """Get all the message's header fields and values. - - These will be sorted in the order they appeared in the original - message, or were added to the message, and may contain duplicates. - Any fields deleted and re-inserted are always appended to the header - list. - """ - return [(k, self.policy.header_fetch_parse(k, v)) - for k, v in self._headers] - - def get(self, name, failobj=None): - """Get a header value. - - Like __getitem__() but return failobj instead of None when the field - is missing. - """ - name = name.lower() - for k, v in self._headers: - if k.lower() == name: - return self.policy.header_fetch_parse(k, v) - return failobj - - # - # "Internal" methods (public API, but only intended for use by a parser - # or generator, not normal application code. - # - - def set_raw(self, name, value): - """Store name and value in the model without modification. - - This is an "internal" API, intended only for use by a parser. - """ - self._headers.append((name, value)) - - def raw_items(self): - """Return the (name, value) header pairs without modification. - - This is an "internal" API, intended only for use by a generator. - """ - return iter(self._headers.copy()) - - # - # Additional useful stuff - # - - def get_all(self, name, failobj=None): - """Return a list of all the values for the named field. - - These will be sorted in the order they appeared in the original - message, and may contain duplicates. Any fields deleted and - re-inserted are always appended to the header list. - - If no such fields exist, failobj is returned (defaults to None). - """ - values = [] - name = name.lower() - for k, v in self._headers: - if k.lower() == name: - values.append(self.policy.header_fetch_parse(k, v)) - if not values: - return failobj - return values - - def add_header(self, _name, _value, **_params): - """Extended header setting. - - name is the header field to add. keyword arguments can be used to set - additional parameters for the header field, with underscores converted - to dashes. Normally the parameter will be added as key="value" unless - value is None, in which case only the key will be added. If a - parameter value contains non-ASCII characters it can be specified as a - three-tuple of (charset, language, value), in which case it will be - encoded according to RFC2231 rules. Otherwise it will be encoded using - the utf-8 charset and a language of ''. - - Examples: - - msg.add_header('content-disposition', 'attachment', filename='bud.gif') - msg.add_header('content-disposition', 'attachment', - filename=('utf-8', '', 'Fußballer.ppt')) - msg.add_header('content-disposition', 'attachment', - filename='Fußballer.ppt')) - """ - parts = [] - for k, v in _params.items(): - if v is None: - parts.append(k.replace('_', '-')) - else: - parts.append(_formatparam(k.replace('_', '-'), v)) - if _value is not None: - parts.insert(0, _value) - self[_name] = SEMISPACE.join(parts) - - def replace_header(self, _name, _value): - """Replace a header. - - Replace the first matching header found in the message, retaining - header order and case. If no matching header was found, a KeyError is - raised. - """ - _name = _name.lower() - for i, (k, v) in zip(range(len(self._headers)), self._headers): - if k.lower() == _name: - self._headers[i] = self.policy.header_store_parse(k, _value) - break - else: - raise KeyError(_name) - - # - # Use these three methods instead of the three above. - # - - def get_content_type(self): - """Return the message's content type. - - The returned string is coerced to lower case of the form - `maintype/subtype'. If there was no Content-Type header in the - message, the default type as given by get_default_type() will be - returned. Since according to RFC 2045, messages always have a default - type this will always return a value. - - RFC 2045 defines a message's default type to be text/plain unless it - appears inside a multipart/digest container, in which case it would be - message/rfc822. - """ - missing = object() - value = self.get('content-type', missing) - if value is missing: - # This should have no parameters - return self.get_default_type() - ctype = _splitparam(value)[0].lower() - # RFC 2045, section 5.2 says if its invalid, use text/plain - if ctype.count('/') != 1: - return 'text/plain' - return ctype - - def get_content_maintype(self): - """Return the message's main content type. - - This is the `maintype' part of the string returned by - get_content_type(). - """ - ctype = self.get_content_type() - return ctype.split('/')[0] - - def get_content_subtype(self): - """Returns the message's sub-content type. - - This is the `subtype' part of the string returned by - get_content_type(). - """ - ctype = self.get_content_type() - return ctype.split('/')[1] - - def get_default_type(self): - """Return the `default' content type. - - Most messages have a default content type of text/plain, except for - messages that are subparts of multipart/digest containers. Such - subparts have a default content type of message/rfc822. - """ - return self._default_type - - def set_default_type(self, ctype): - """Set the `default' content type. - - ctype should be either "text/plain" or "message/rfc822", although this - is not enforced. The default content type is not stored in the - Content-Type header. - """ - self._default_type = ctype - - def _get_params_preserve(self, failobj, header): - # Like get_params() but preserves the quoting of values. BAW: - # should this be part of the public interface? - missing = object() - value = self.get(header, missing) - if value is missing: - return failobj - params = [] - for p in _parseparam(value): - try: - name, val = p.split('=', 1) - name = name.strip() - val = val.strip() - except ValueError: - # Must have been a bare attribute - name = p.strip() - val = '' - params.append((name, val)) - params = utils.decode_params(params) - return params - - def get_params(self, failobj=None, header='content-type', unquote=True): - """Return the message's Content-Type parameters, as a list. - - The elements of the returned list are 2-tuples of key/value pairs, as - split on the `=' sign. The left hand side of the `=' is the key, - while the right hand side is the value. If there is no `=' sign in - the parameter the value is the empty string. The value is as - described in the get_param() method. - - Optional failobj is the object to return if there is no Content-Type - header. Optional header is the header to search instead of - Content-Type. If unquote is True, the value is unquoted. - """ - missing = object() - params = self._get_params_preserve(missing, header) - if params is missing: - return failobj - if unquote: - return [(k, _unquotevalue(v)) for k, v in params] - else: - return params - - def get_param(self, param, failobj=None, header='content-type', - unquote=True): - """Return the parameter value if found in the Content-Type header. - - Optional failobj is the object to return if there is no Content-Type - header, or the Content-Type header has no such parameter. Optional - header is the header to search instead of Content-Type. - - Parameter keys are always compared case insensitively. The return - value can either be a string, or a 3-tuple if the parameter was RFC - 2231 encoded. When it's a 3-tuple, the elements of the value are of - the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and - LANGUAGE can be None, in which case you should consider VALUE to be - encoded in the us-ascii charset. You can usually ignore LANGUAGE. - The parameter value (either the returned string, or the VALUE item in - the 3-tuple) is always unquoted, unless unquote is set to False. - - If your application doesn't care whether the parameter was RFC 2231 - encoded, it can turn the return value into a string as follows: - - rawparam = msg.get_param('foo') - param = email.utils.collapse_rfc2231_value(rawparam) - - """ - if header not in self: - return failobj - for k, v in self._get_params_preserve(failobj, header): - if k.lower() == param.lower(): - if unquote: - return _unquotevalue(v) - else: - return v - return failobj - - def set_param(self, param, value, header='Content-Type', requote=True, - charset=None, language='', replace=False): - """Set a parameter in the Content-Type header. - - If the parameter already exists in the header, its value will be - replaced with the new value. - - If header is Content-Type and has not yet been defined for this - message, it will be set to "text/plain" and the new parameter and - value will be appended as per RFC 2045. - - An alternate header can be specified in the header argument, and all - parameters will be quoted as necessary unless requote is False. - - If charset is specified, the parameter will be encoded according to RFC - 2231. Optional language specifies the RFC 2231 language, defaulting - to the empty string. Both charset and language should be strings. - """ - if not isinstance(value, tuple) and charset: - value = (charset, language, value) - - if header not in self and header.lower() == 'content-type': - ctype = 'text/plain' - else: - ctype = self.get(header) - if not self.get_param(param, header=header): - if not ctype: - ctype = _formatparam(param, value, requote) - else: - ctype = SEMISPACE.join( - [ctype, _formatparam(param, value, requote)]) - else: - ctype = '' - for old_param, old_value in self.get_params(header=header, - unquote=requote): - append_param = '' - if old_param.lower() == param.lower(): - append_param = _formatparam(param, value, requote) - else: - append_param = _formatparam(old_param, old_value, requote) - if not ctype: - ctype = append_param - else: - ctype = SEMISPACE.join([ctype, append_param]) - if ctype != self.get(header): - if replace: - self.replace_header(header, ctype) - else: - del self[header] - self[header] = ctype - - def del_param(self, param, header='content-type', requote=True): - """Remove the given parameter completely from the Content-Type header. - - The header will be re-written in place without the parameter or its - value. All values will be quoted as necessary unless requote is - False. Optional header specifies an alternative to the Content-Type - header. - """ - if header not in self: - return - new_ctype = '' - for p, v in self.get_params(header=header, unquote=requote): - if p.lower() != param.lower(): - if not new_ctype: - new_ctype = _formatparam(p, v, requote) - else: - new_ctype = SEMISPACE.join([new_ctype, - _formatparam(p, v, requote)]) - if new_ctype != self.get(header): - del self[header] - self[header] = new_ctype - - def set_type(self, type, header='Content-Type', requote=True): - """Set the main type and subtype for the Content-Type header. - - type must be a string in the form "maintype/subtype", otherwise a - ValueError is raised. - - This method replaces the Content-Type header, keeping all the - parameters in place. If requote is False, this leaves the existing - header's quoting as is. Otherwise, the parameters will be quoted (the - default). - - An alternative header can be specified in the header argument. When - the Content-Type header is set, we'll always also add a MIME-Version - header. - """ - # BAW: should we be strict? - if not type.count('/') == 1: - raise ValueError - # Set the Content-Type, you get a MIME-Version - if header.lower() == 'content-type': - del self['mime-version'] - self['MIME-Version'] = '1.0' - if header not in self: - self[header] = type - return - params = self.get_params(header=header, unquote=requote) - del self[header] - self[header] = type - # Skip the first param; it's the old type. - for p, v in params[1:]: - self.set_param(p, v, header, requote) - - def get_filename(self, failobj=None): - """Return the filename associated with the payload if present. - - The filename is extracted from the Content-Disposition header's - `filename' parameter, and it is unquoted. If that header is missing - the `filename' parameter, this method falls back to looking for the - `name' parameter. - """ - missing = object() - filename = self.get_param('filename', missing, 'content-disposition') - if filename is missing: - filename = self.get_param('name', missing, 'content-type') - if filename is missing: - return failobj - return utils.collapse_rfc2231_value(filename).strip() - - def get_boundary(self, failobj=None): - """Return the boundary associated with the payload if present. - - The boundary is extracted from the Content-Type header's `boundary' - parameter, and it is unquoted. - """ - missing = object() - boundary = self.get_param('boundary', missing) - if boundary is missing: - return failobj - # RFC 2046 says that boundaries may begin but not end in w/s - return utils.collapse_rfc2231_value(boundary).rstrip() - - def set_boundary(self, boundary): - """Set the boundary parameter in Content-Type to 'boundary'. - - This is subtly different than deleting the Content-Type header and - adding a new one with a new boundary parameter via add_header(). The - main difference is that using the set_boundary() method preserves the - order of the Content-Type header in the original message. - - HeaderParseError is raised if the message has no Content-Type header. - """ - missing = object() - params = self._get_params_preserve(missing, 'content-type') - if params is missing: - # There was no Content-Type header, and we don't know what type - # to set it to, so raise an exception. - raise errors.HeaderParseError('No Content-Type header found') - newparams = [] - foundp = False - for pk, pv in params: - if pk.lower() == 'boundary': - newparams.append(('boundary', '"%s"' % boundary)) - foundp = True - else: - newparams.append((pk, pv)) - if not foundp: - # The original Content-Type header had no boundary attribute. - # Tack one on the end. BAW: should we raise an exception - # instead??? - newparams.append(('boundary', '"%s"' % boundary)) - # Replace the existing Content-Type header with the new value - newheaders = [] - for h, v in self._headers: - if h.lower() == 'content-type': - parts = [] - for k, v in newparams: - if v == '': - parts.append(k) - else: - parts.append('%s=%s' % (k, v)) - val = SEMISPACE.join(parts) - newheaders.append(self.policy.header_store_parse(h, val)) - - else: - newheaders.append((h, v)) - self._headers = newheaders - - def get_content_charset(self, failobj=None): - """Return the charset parameter of the Content-Type header. - - The returned string is always coerced to lower case. If there is no - Content-Type header, or if that header has no charset parameter, - failobj is returned. - """ - missing = object() - charset = self.get_param('charset', missing) - if charset is missing: - return failobj - if isinstance(charset, tuple): - # RFC 2231 encoded, so decode it, and it better end up as ascii. - pcharset = charset[0] or 'us-ascii' - try: - # LookupError will be raised if the charset isn't known to - # Python. UnicodeError will be raised if the encoded text - # contains a character not in the charset. - as_bytes = charset[2].encode('raw-unicode-escape') - charset = str(as_bytes, pcharset) - except (LookupError, UnicodeError): - charset = charset[2] - # charset characters must be in us-ascii range - try: - charset.encode('us-ascii') - except UnicodeError: - return failobj - # RFC 2046, $4.1.2 says charsets are not case sensitive - return charset.lower() - - def get_charsets(self, failobj=None): - """Return a list containing the charset(s) used in this message. - - The returned list of items describes the Content-Type headers' - charset parameter for this message and all the subparts in its - payload. - - Each item will either be a string (the value of the charset parameter - in the Content-Type header of that part) or the value of the - 'failobj' parameter (defaults to None), if the part does not have a - main MIME type of "text", or the charset is not defined. - - The list will contain one string for each part of the message, plus - one for the container message (i.e. self), so that a non-multipart - message will still return a list of length 1. - """ - return [part.get_content_charset(failobj) for part in self.walk()] - - def get_content_disposition(self): - """Return the message's content-disposition if it exists, or None. - - The return values can be either 'inline', 'attachment' or None - according to the rfc2183. - """ - value = self.get('content-disposition') - if value is None: - return None - c_d = _splitparam(value)[0].lower() - return c_d - - # I.e. def walk(self): ... - from email.iterators import walk - - -class MIMEPart(Message): - - def __init__(self, policy=None): - if policy is None: - from email.policy import default - policy = default - super().__init__(policy) - - - def as_string(self, unixfrom=False, maxheaderlen=None, policy=None): - """Return the entire formatted message as a string. - - Optional 'unixfrom', when true, means include the Unix From_ envelope - header. maxheaderlen is retained for backward compatibility with the - base Message class, but defaults to None, meaning that the policy value - for max_line_length controls the header maximum length. 'policy' is - passed to the Generator instance used to serialize the message; if it - is not specified the policy associated with the message instance is - used. - """ - policy = self.policy if policy is None else policy - if maxheaderlen is None: - maxheaderlen = policy.max_line_length - return super().as_string(unixfrom, maxheaderlen, policy) - - def __str__(self): - return self.as_string(policy=self.policy.clone(utf8=True)) - - def is_attachment(self): - c_d = self.get('content-disposition') - return False if c_d is None else c_d.content_disposition == 'attachment' - - def _find_body(self, part, preferencelist): - if part.is_attachment(): - return - maintype, subtype = part.get_content_type().split('/') - if maintype == 'text': - if subtype in preferencelist: - yield (preferencelist.index(subtype), part) - return - if maintype != 'multipart' or not self.is_multipart(): - return - if subtype != 'related': - for subpart in part.iter_parts(): - yield from self._find_body(subpart, preferencelist) - return - if 'related' in preferencelist: - yield (preferencelist.index('related'), part) - candidate = None - start = part.get_param('start') - if start: - for subpart in part.iter_parts(): - if subpart['content-id'] == start: - candidate = subpart - break - if candidate is None: - subparts = part.get_payload() - candidate = subparts[0] if subparts else None - if candidate is not None: - yield from self._find_body(candidate, preferencelist) - - def get_body(self, preferencelist=('related', 'html', 'plain')): - """Return best candidate mime part for display as 'body' of message. - - Do a depth first search, starting with self, looking for the first part - matching each of the items in preferencelist, and return the part - corresponding to the first item that has a match, or None if no items - have a match. If 'related' is not included in preferencelist, consider - the root part of any multipart/related encountered as a candidate - match. Ignore parts with 'Content-Disposition: attachment'. - """ - best_prio = len(preferencelist) - body = None - for prio, part in self._find_body(self, preferencelist): - if prio < best_prio: - best_prio = prio - body = part - if prio == 0: - break - return body - - _body_types = {('text', 'plain'), - ('text', 'html'), - ('multipart', 'related'), - ('multipart', 'alternative')} - def iter_attachments(self): - """Return an iterator over the non-main parts of a multipart. - - Skip the first of each occurrence of text/plain, text/html, - multipart/related, or multipart/alternative in the multipart (unless - they have a 'Content-Disposition: attachment' header) and include all - remaining subparts in the returned iterator. When applied to a - multipart/related, return all parts except the root part. Return an - empty iterator when applied to a multipart/alternative or a - non-multipart. - """ - maintype, subtype = self.get_content_type().split('/') - if maintype != 'multipart' or subtype == 'alternative': - return - payload = self.get_payload() - # Certain malformed messages can have content type set to `multipart/*` - # but still have single part body, in which case payload.copy() can - # fail with AttributeError. - try: - parts = payload.copy() - except AttributeError: - # payload is not a list, it is most probably a string. - return - - if maintype == 'multipart' and subtype == 'related': - # For related, we treat everything but the root as an attachment. - # The root may be indicated by 'start'; if there's no start or we - # can't find the named start, treat the first subpart as the root. - start = self.get_param('start') - if start: - found = False - attachments = [] - for part in parts: - if part.get('content-id') == start: - found = True - else: - attachments.append(part) - if found: - yield from attachments - return - parts.pop(0) - yield from parts - return - # Otherwise we more or less invert the remaining logic in get_body. - # This only really works in edge cases (ex: non-text related or - # alternatives) if the sending agent sets content-disposition. - seen = [] # Only skip the first example of each candidate type. - for part in parts: - maintype, subtype = part.get_content_type().split('/') - if ((maintype, subtype) in self._body_types and - not part.is_attachment() and subtype not in seen): - seen.append(subtype) - continue - yield part - - def iter_parts(self): - """Return an iterator over all immediate subparts of a multipart. - - Return an empty iterator for a non-multipart. - """ - if self.is_multipart(): - yield from self.get_payload() - - def get_content(self, *args, content_manager=None, **kw): - if content_manager is None: - content_manager = self.policy.content_manager - return content_manager.get_content(self, *args, **kw) - - def set_content(self, *args, content_manager=None, **kw): - if content_manager is None: - content_manager = self.policy.content_manager - content_manager.set_content(self, *args, **kw) - - def _make_multipart(self, subtype, disallowed_subtypes, boundary): - if self.get_content_maintype() == 'multipart': - existing_subtype = self.get_content_subtype() - disallowed_subtypes = disallowed_subtypes + (subtype,) - if existing_subtype in disallowed_subtypes: - raise ValueError("Cannot convert {} to {}".format( - existing_subtype, subtype)) - keep_headers = [] - part_headers = [] - for name, value in self._headers: - if name.lower().startswith('content-'): - part_headers.append((name, value)) - else: - keep_headers.append((name, value)) - if part_headers: - # There is existing content, move it to the first subpart. - part = type(self)(policy=self.policy) - part._headers = part_headers - part._payload = self._payload - self._payload = [part] - else: - self._payload = [] - self._headers = keep_headers - self['Content-Type'] = 'multipart/' + subtype - if boundary is not None: - self.set_param('boundary', boundary) - - def make_related(self, boundary=None): - self._make_multipart('related', ('alternative', 'mixed'), boundary) - - def make_alternative(self, boundary=None): - self._make_multipart('alternative', ('mixed',), boundary) - - def make_mixed(self, boundary=None): - self._make_multipart('mixed', (), boundary) - - def _add_multipart(self, _subtype, *args, _disp=None, **kw): - if (self.get_content_maintype() != 'multipart' or - self.get_content_subtype() != _subtype): - getattr(self, 'make_' + _subtype)() - part = type(self)(policy=self.policy) - part.set_content(*args, **kw) - if _disp and 'content-disposition' not in part: - part['Content-Disposition'] = _disp - self.attach(part) - - def add_related(self, *args, **kw): - self._add_multipart('related', *args, _disp='inline', **kw) - - def add_alternative(self, *args, **kw): - self._add_multipart('alternative', *args, **kw) - - def add_attachment(self, *args, **kw): - self._add_multipart('mixed', *args, _disp='attachment', **kw) - - def clear(self): - self._headers = [] - self._payload = None - - def clear_content(self): - self._headers = [(n, v) for n, v in self._headers - if not n.lower().startswith('content-')] - self._payload = None - - -class EmailMessage(MIMEPart): - - def set_content(self, *args, **kw): - super().set_content(*args, **kw) - if 'MIME-Version' not in self: - self['MIME-Version'] = '1.0' diff --git a/Python313_13_x64_Template/Lib/email/mime/application.py b/Python313_13_x64_Template/Lib/email/mime/application.py deleted file mode 100644 index f67cbad3..00000000 --- a/Python313_13_x64_Template/Lib/email/mime/application.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Keith Dart -# Contact: email-sig@python.org - -"""Class representing application/* type MIME documents.""" - -__all__ = ["MIMEApplication"] - -from email import encoders -from email.mime.nonmultipart import MIMENonMultipart - - -class MIMEApplication(MIMENonMultipart): - """Class for generating application/* MIME documents.""" - - def __init__(self, _data, _subtype='octet-stream', - _encoder=encoders.encode_base64, *, policy=None, **_params): - """Create an application/* type MIME document. - - _data contains the bytes for the raw application data. - - _subtype is the MIME content type subtype, defaulting to - 'octet-stream'. - - _encoder is a function which will perform the actual encoding for - transport of the application data, defaulting to base64 encoding. - - Any additional keyword arguments are passed to the base class - constructor, which turns them into parameters on the Content-Type - header. - """ - if _subtype is None: - raise TypeError('Invalid application MIME subtype') - MIMENonMultipart.__init__(self, 'application', _subtype, policy=policy, - **_params) - self.set_payload(_data) - _encoder(self) diff --git a/Python313_13_x64_Template/Lib/email/mime/audio.py b/Python313_13_x64_Template/Lib/email/mime/audio.py deleted file mode 100644 index aa0c4905..00000000 --- a/Python313_13_x64_Template/Lib/email/mime/audio.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (C) 2001-2007 Python Software Foundation -# Author: Anthony Baxter -# Contact: email-sig@python.org - -"""Class representing audio/* type MIME documents.""" - -__all__ = ['MIMEAudio'] - -from email import encoders -from email.mime.nonmultipart import MIMENonMultipart - - -class MIMEAudio(MIMENonMultipart): - """Class for generating audio/* MIME documents.""" - - def __init__(self, _audiodata, _subtype=None, - _encoder=encoders.encode_base64, *, policy=None, **_params): - """Create an audio/* type MIME document. - - _audiodata contains the bytes for the raw audio data. If this data - can be decoded as au, wav, aiff, or aifc, then the - subtype will be automatically included in the Content-Type header. - Otherwise, you can specify the specific audio subtype via the - _subtype parameter. If _subtype is not given, and no subtype can be - guessed, a TypeError is raised. - - _encoder is a function which will perform the actual encoding for - transport of the image data. It takes one argument, which is this - Image instance. It should use get_payload() and set_payload() to - change the payload to the encoded form. It should also add any - Content-Transfer-Encoding or other headers to the message as - necessary. The default encoding is Base64. - - Any additional keyword arguments are passed to the base class - constructor, which turns them into parameters on the Content-Type - header. - """ - if _subtype is None: - _subtype = _what(_audiodata) - if _subtype is None: - raise TypeError('Could not find audio MIME subtype') - MIMENonMultipart.__init__(self, 'audio', _subtype, policy=policy, - **_params) - self.set_payload(_audiodata) - _encoder(self) - - -_rules = [] - - -# Originally from the sndhdr module. -# -# There are others in sndhdr that don't have MIME types. :( -# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma?? -def _what(data): - # Try to identify a sound file type. - # - # sndhdr.what() had a pretty cruddy interface, unfortunately. This is why - # we re-do it here. It would be easier to reverse engineer the Unix 'file' - # command and use the standard 'magic' file, as shipped with a modern Unix. - for testfn in _rules: - if res := testfn(data): - return res - else: - return None - - -def rule(rulefunc): - _rules.append(rulefunc) - return rulefunc - - -@rule -def _aiff(h): - if not h.startswith(b'FORM'): - return None - if h[8:12] in {b'AIFC', b'AIFF'}: - return 'x-aiff' - else: - return None - - -@rule -def _au(h): - if h.startswith(b'.snd'): - return 'basic' - else: - return None - - -@rule -def _wav(h): - # 'RIFF' 'WAVE' 'fmt ' - if not h.startswith(b'RIFF') or h[8:12] != b'WAVE' or h[12:16] != b'fmt ': - return None - else: - return "x-wav" diff --git a/Python313_13_x64_Template/Lib/email/mime/base.py b/Python313_13_x64_Template/Lib/email/mime/base.py deleted file mode 100644 index f601f621..00000000 --- a/Python313_13_x64_Template/Lib/email/mime/base.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Base class for MIME specializations.""" - -__all__ = ['MIMEBase'] - -import email.policy - -from email import message - - -class MIMEBase(message.Message): - """Base class for MIME specializations.""" - - def __init__(self, _maintype, _subtype, *, policy=None, **_params): - """This constructor adds a Content-Type: and a MIME-Version: header. - - The Content-Type: header is taken from the _maintype and _subtype - arguments. Additional parameters for this header are taken from the - keyword arguments. - """ - if policy is None: - policy = email.policy.compat32 - message.Message.__init__(self, policy=policy) - ctype = '%s/%s' % (_maintype, _subtype) - self.add_header('Content-Type', ctype, **_params) - self['MIME-Version'] = '1.0' diff --git a/Python313_13_x64_Template/Lib/email/mime/image.py b/Python313_13_x64_Template/Lib/email/mime/image.py deleted file mode 100644 index 4b7f2f9c..00000000 --- a/Python313_13_x64_Template/Lib/email/mime/image.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Class representing image/* type MIME documents.""" - -__all__ = ['MIMEImage'] - -from email import encoders -from email.mime.nonmultipart import MIMENonMultipart - - -class MIMEImage(MIMENonMultipart): - """Class for generating image/* type MIME documents.""" - - def __init__(self, _imagedata, _subtype=None, - _encoder=encoders.encode_base64, *, policy=None, **_params): - """Create an image/* type MIME document. - - _imagedata contains the bytes for the raw image data. If the data - type can be detected (jpeg, png, gif, tiff, rgb, pbm, pgm, ppm, - rast, xbm, bmp, webp, and exr attempted), then the subtype will be - automatically included in the Content-Type header. Otherwise, you can - specify the specific image subtype via the _subtype parameter. - - _encoder is a function which will perform the actual encoding for - transport of the image data. It takes one argument, which is this - Image instance. It should use get_payload() and set_payload() to - change the payload to the encoded form. It should also add any - Content-Transfer-Encoding or other headers to the message as - necessary. The default encoding is Base64. - - Any additional keyword arguments are passed to the base class - constructor, which turns them into parameters on the Content-Type - header. - """ - _subtype = _what(_imagedata) if _subtype is None else _subtype - if _subtype is None: - raise TypeError('Could not guess image MIME subtype') - MIMENonMultipart.__init__(self, 'image', _subtype, policy=policy, - **_params) - self.set_payload(_imagedata) - _encoder(self) - - -_rules = [] - - -# Originally from the imghdr module. -def _what(data): - for rule in _rules: - if res := rule(data): - return res - else: - return None - - -def rule(rulefunc): - _rules.append(rulefunc) - return rulefunc - - -@rule -def _jpeg(h): - """JPEG data with JFIF or Exif markers; and raw JPEG""" - if h[6:10] in (b'JFIF', b'Exif'): - return 'jpeg' - elif h[:4] == b'\xff\xd8\xff\xdb': - return 'jpeg' - - -@rule -def _png(h): - if h.startswith(b'\211PNG\r\n\032\n'): - return 'png' - - -@rule -def _gif(h): - """GIF ('87 and '89 variants)""" - if h[:6] in (b'GIF87a', b'GIF89a'): - return 'gif' - - -@rule -def _tiff(h): - """TIFF (can be in Motorola or Intel byte order)""" - if h[:2] in (b'MM', b'II'): - return 'tiff' - - -@rule -def _rgb(h): - """SGI image library""" - if h.startswith(b'\001\332'): - return 'rgb' - - -@rule -def _pbm(h): - """PBM (portable bitmap)""" - if len(h) >= 3 and \ - h[0] == ord(b'P') and h[1] in b'14' and h[2] in b' \t\n\r': - return 'pbm' - - -@rule -def _pgm(h): - """PGM (portable graymap)""" - if len(h) >= 3 and \ - h[0] == ord(b'P') and h[1] in b'25' and h[2] in b' \t\n\r': - return 'pgm' - - -@rule -def _ppm(h): - """PPM (portable pixmap)""" - if len(h) >= 3 and \ - h[0] == ord(b'P') and h[1] in b'36' and h[2] in b' \t\n\r': - return 'ppm' - - -@rule -def _rast(h): - """Sun raster file""" - if h.startswith(b'\x59\xA6\x6A\x95'): - return 'rast' - - -@rule -def _xbm(h): - """X bitmap (X10 or X11)""" - if h.startswith(b'#define '): - return 'xbm' - - -@rule -def _bmp(h): - if h.startswith(b'BM'): - return 'bmp' - - -@rule -def _webp(h): - if h.startswith(b'RIFF') and h[8:12] == b'WEBP': - return 'webp' - - -@rule -def _exr(h): - if h.startswith(b'\x76\x2f\x31\x01'): - return 'exr' diff --git a/Python313_13_x64_Template/Lib/email/mime/message.py b/Python313_13_x64_Template/Lib/email/mime/message.py deleted file mode 100644 index 61836b5a..00000000 --- a/Python313_13_x64_Template/Lib/email/mime/message.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Class representing message/* MIME documents.""" - -__all__ = ['MIMEMessage'] - -from email import message -from email.mime.nonmultipart import MIMENonMultipart - - -class MIMEMessage(MIMENonMultipart): - """Class representing message/* MIME documents.""" - - def __init__(self, _msg, _subtype='rfc822', *, policy=None): - """Create a message/* type MIME document. - - _msg is a message object and must be an instance of Message, or a - derived class of Message, otherwise a TypeError is raised. - - Optional _subtype defines the subtype of the contained message. The - default is "rfc822" (this is defined by the MIME standard, even though - the term "rfc822" is technically outdated by RFC 2822). - """ - MIMENonMultipart.__init__(self, 'message', _subtype, policy=policy) - if not isinstance(_msg, message.Message): - raise TypeError('Argument is not an instance of Message') - # It's convenient to use this base class method. We need to do it - # this way or we'll get an exception - message.Message.attach(self, _msg) - # And be sure our default type is set correctly - self.set_default_type('message/rfc822') diff --git a/Python313_13_x64_Template/Lib/email/mime/multipart.py b/Python313_13_x64_Template/Lib/email/mime/multipart.py deleted file mode 100644 index 94d81c77..00000000 --- a/Python313_13_x64_Template/Lib/email/mime/multipart.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2002-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Base class for MIME multipart/* type messages.""" - -__all__ = ['MIMEMultipart'] - -from email.mime.base import MIMEBase - - -class MIMEMultipart(MIMEBase): - """Base class for MIME multipart/* type messages.""" - - def __init__(self, _subtype='mixed', boundary=None, _subparts=None, - *, policy=None, - **_params): - """Creates a multipart/* type message. - - By default, creates a multipart/mixed message, with proper - Content-Type and MIME-Version headers. - - _subtype is the subtype of the multipart content type, defaulting to - `mixed'. - - boundary is the multipart boundary string. By default it is - calculated as needed. - - _subparts is a sequence of initial subparts for the payload. It - must be an iterable object, such as a list. You can always - attach new subparts to the message by using the attach() method. - - Additional parameters for the Content-Type header are taken from the - keyword arguments (or passed into the _params argument). - """ - MIMEBase.__init__(self, 'multipart', _subtype, policy=policy, **_params) - - # Initialise _payload to an empty list as the Message superclass's - # implementation of is_multipart assumes that _payload is a list for - # multipart messages. - self._payload = [] - - if _subparts: - for p in _subparts: - self.attach(p) - if boundary: - self.set_boundary(boundary) diff --git a/Python313_13_x64_Template/Lib/email/mime/nonmultipart.py b/Python313_13_x64_Template/Lib/email/mime/nonmultipart.py deleted file mode 100644 index a41386eb..00000000 --- a/Python313_13_x64_Template/Lib/email/mime/nonmultipart.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2002-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Base class for MIME type messages that are not multipart.""" - -__all__ = ['MIMENonMultipart'] - -from email import errors -from email.mime.base import MIMEBase - - -class MIMENonMultipart(MIMEBase): - """Base class for MIME non-multipart type messages.""" - - def attach(self, payload): - # The public API prohibits attaching multiple subparts to MIMEBase - # derived subtypes since none of them are, by definition, of content - # type multipart/* - raise errors.MultipartConversionError( - 'Cannot attach additional subparts to non-multipart/*') diff --git a/Python313_13_x64_Template/Lib/email/mime/text.py b/Python313_13_x64_Template/Lib/email/mime/text.py deleted file mode 100644 index 7672b789..00000000 --- a/Python313_13_x64_Template/Lib/email/mime/text.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Class representing text/* type MIME documents.""" - -__all__ = ['MIMEText'] - -from email.mime.nonmultipart import MIMENonMultipart - - -class MIMEText(MIMENonMultipart): - """Class for generating text/* type MIME documents.""" - - def __init__(self, _text, _subtype='plain', _charset=None, *, policy=None): - """Create a text/* type MIME document. - - _text is the string for this message object. - - _subtype is the MIME sub content type, defaulting to "plain". - - _charset is the character set parameter added to the Content-Type - header. This defaults to "us-ascii". Note that as a side-effect, the - Content-Transfer-Encoding header will also be set. - """ - - # If no _charset was specified, check to see if there are non-ascii - # characters present. If not, use 'us-ascii', otherwise use utf-8. - # XXX: This can be removed once #7304 is fixed. - if _charset is None: - try: - _text.encode('us-ascii') - _charset = 'us-ascii' - except UnicodeEncodeError: - _charset = 'utf-8' - - MIMENonMultipart.__init__(self, 'text', _subtype, policy=policy, - charset=str(_charset)) - - self.set_payload(_text, _charset) diff --git a/Python313_13_x64_Template/Lib/email/parser.py b/Python313_13_x64_Template/Lib/email/parser.py deleted file mode 100644 index e3003118..00000000 --- a/Python313_13_x64_Template/Lib/email/parser.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (C) 2001-2007 Python Software Foundation -# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter -# Contact: email-sig@python.org - -"""A parser of RFC 5322 and MIME email messages.""" - -__all__ = ['Parser', 'HeaderParser', 'BytesParser', 'BytesHeaderParser', - 'FeedParser', 'BytesFeedParser'] - -from io import StringIO, TextIOWrapper - -from email.feedparser import FeedParser, BytesFeedParser -from email._policybase import compat32 - - -class Parser: - def __init__(self, _class=None, *, policy=compat32): - """Parser of RFC 5322 and MIME email messages. - - Creates an in-memory object tree representing the email message, which - can then be manipulated and turned over to a Generator to return the - textual representation of the message. - - The string must be formatted as a block of RFC 5322 headers and header - continuation lines, optionally preceded by a 'Unix-from' header. The - header block is terminated either by the end of the string or by a - blank line. - - _class is the class to instantiate for new message objects when they - must be created. This class must have a constructor that can take - zero arguments. Default is Message.Message. - - The policy keyword specifies a policy object that controls a number of - aspects of the parser's operation. The default policy maintains - backward compatibility. - - """ - self._class = _class - self.policy = policy - - def parse(self, fp, headersonly=False): - """Create a message structure from the data in a file. - - Reads all the data from the file and returns the root of the message - structure. Optional headersonly is a flag specifying whether to stop - parsing after reading the headers or not. The default is False, - meaning it parses the entire contents of the file. - """ - feedparser = FeedParser(self._class, policy=self.policy) - if headersonly: - feedparser._set_headersonly() - while data := fp.read(8192): - feedparser.feed(data) - return feedparser.close() - - def parsestr(self, text, headersonly=False): - """Create a message structure from a string. - - Returns the root of the message structure. Optional headersonly is a - flag specifying whether to stop parsing after reading the headers or - not. The default is False, meaning it parses the entire contents of - the file. - """ - return self.parse(StringIO(text), headersonly=headersonly) - - -class HeaderParser(Parser): - def parse(self, fp, headersonly=True): - return Parser.parse(self, fp, True) - - def parsestr(self, text, headersonly=True): - return Parser.parsestr(self, text, True) - - -class BytesParser: - - def __init__(self, *args, **kw): - """Parser of binary RFC 5322 and MIME email messages. - - Creates an in-memory object tree representing the email message, which - can then be manipulated and turned over to a Generator to return the - textual representation of the message. - - The input must be formatted as a block of RFC 5322 headers and header - continuation lines, optionally preceded by a 'Unix-from' header. The - header block is terminated either by the end of the input or by a - blank line. - - _class is the class to instantiate for new message objects when they - must be created. This class must have a constructor that can take - zero arguments. Default is Message.Message. - """ - self.parser = Parser(*args, **kw) - - def parse(self, fp, headersonly=False): - """Create a message structure from the data in a binary file. - - Reads all the data from the file and returns the root of the message - structure. Optional headersonly is a flag specifying whether to stop - parsing after reading the headers or not. The default is False, - meaning it parses the entire contents of the file. - """ - fp = TextIOWrapper(fp, encoding='ascii', errors='surrogateescape') - try: - return self.parser.parse(fp, headersonly) - finally: - fp.detach() - - - def parsebytes(self, text, headersonly=False): - """Create a message structure from a byte string. - - Returns the root of the message structure. Optional headersonly is a - flag specifying whether to stop parsing after reading the headers or - not. The default is False, meaning it parses the entire contents of - the file. - """ - text = text.decode('ASCII', errors='surrogateescape') - return self.parser.parsestr(text, headersonly) - - -class BytesHeaderParser(BytesParser): - def parse(self, fp, headersonly=True): - return BytesParser.parse(self, fp, headersonly=True) - - def parsebytes(self, text, headersonly=True): - return BytesParser.parsebytes(self, text, headersonly=True) diff --git a/Python313_13_x64_Template/Lib/email/policy.py b/Python313_13_x64_Template/Lib/email/policy.py deleted file mode 100644 index 6e109b65..00000000 --- a/Python313_13_x64_Template/Lib/email/policy.py +++ /dev/null @@ -1,232 +0,0 @@ -"""This will be the home for the policy that hooks in the new -code that adds all the email6 features. -""" - -import re -import sys -from email._policybase import Policy, Compat32, compat32, _extend_docstrings -from email.utils import _has_surrogates -from email.headerregistry import HeaderRegistry as HeaderRegistry -from email.contentmanager import raw_data_manager -from email.message import EmailMessage - -__all__ = [ - 'Compat32', - 'compat32', - 'Policy', - 'EmailPolicy', - 'default', - 'strict', - 'SMTP', - 'HTTP', - ] - -linesep_splitter = re.compile(r'\n|\r\n?') - -@_extend_docstrings -class EmailPolicy(Policy): - - """+ - PROVISIONAL - - The API extensions enabled by this policy are currently provisional. - Refer to the documentation for details. - - This policy adds new header parsing and folding algorithms. Instead of - simple strings, headers are custom objects with custom attributes - depending on the type of the field. The folding algorithm fully - implements RFCs 2047 and 5322. - - In addition to the settable attributes listed above that apply to - all Policies, this policy adds the following additional attributes: - - utf8 -- if False (the default) message headers will be - serialized as ASCII, using encoded words to encode - any non-ASCII characters in the source strings. If - True, the message headers will be serialized using - utf8 and will not contain encoded words (see RFC - 6532 for more on this serialization format). - - refold_source -- if the value for a header in the Message object - came from the parsing of some source, this attribute - indicates whether or not a generator should refold - that value when transforming the message back into - stream form. The possible values are: - - none -- all source values use original folding - long -- source values that have any line that is - longer than max_line_length will be - refolded - all -- all values are refolded. - - The default is 'long'. - - header_factory -- a callable that takes two arguments, 'name' and - 'value', where 'name' is a header field name and - 'value' is an unfolded header field value, and - returns a string-like object that represents that - header. A default header_factory is provided that - understands some of the RFC5322 header field types. - (Currently address fields and date fields have - special treatment, while all other fields are - treated as unstructured. This list will be - completed before the extension is marked stable.) - - content_manager -- an object with at least two methods: get_content - and set_content. When the get_content or - set_content method of a Message object is called, - it calls the corresponding method of this object, - passing it the message object as its first argument, - and any arguments or keywords that were passed to - it as additional arguments. The default - content_manager is - :data:`~email.contentmanager.raw_data_manager`. - - """ - - message_factory = EmailMessage - utf8 = False - refold_source = 'long' - header_factory = HeaderRegistry() - content_manager = raw_data_manager - - def __init__(self, **kw): - # Ensure that each new instance gets a unique header factory - # (as opposed to clones, which share the factory). - if 'header_factory' not in kw: - object.__setattr__(self, 'header_factory', HeaderRegistry()) - super().__init__(**kw) - - def header_max_count(self, name): - """+ - The implementation for this class returns the max_count attribute from - the specialized header class that would be used to construct a header - of type 'name'. - """ - return self.header_factory[name].max_count - - # The logic of the next three methods is chosen such that it is possible to - # switch a Message object between a Compat32 policy and a policy derived - # from this class and have the results stay consistent. This allows a - # Message object constructed with this policy to be passed to a library - # that only handles Compat32 objects, or to receive such an object and - # convert it to use the newer style by just changing its policy. It is - # also chosen because it postpones the relatively expensive full rfc5322 - # parse until as late as possible when parsing from source, since in many - # applications only a few headers will actually be inspected. - - def header_source_parse(self, sourcelines): - """+ - The name is parsed as everything up to the ':' and returned unmodified. - The value is determined by stripping leading whitespace off the - remainder of the first line joined with all subsequent lines, and - stripping any trailing carriage return or linefeed characters. (This - is the same as Compat32). - - """ - name, value = sourcelines[0].split(':', 1) - value = ''.join((value, *sourcelines[1:])).lstrip(' \t\r\n') - return (name, value.rstrip('\r\n')) - - def header_store_parse(self, name, value): - """+ - The name is returned unchanged. If the input value has a 'name' - attribute and it matches the name ignoring case, the value is returned - unchanged. Otherwise the name and value are passed to header_factory - method, and the resulting custom header object is returned as the - value. In this case a ValueError is raised if the input value contains - CR or LF characters. - - """ - if hasattr(value, 'name') and value.name.lower() == name.lower(): - return (name, value) - if isinstance(value, str) and len(value.splitlines())>1: - # XXX this error message isn't quite right when we use splitlines - # (see issue 22233), but I'm not sure what should happen here. - raise ValueError("Header values may not contain linefeed " - "or carriage return characters") - return (name, self.header_factory(name, value)) - - def header_fetch_parse(self, name, value): - """+ - If the value has a 'name' attribute, it is returned to unmodified. - Otherwise the name and the value with any linesep characters removed - are passed to the header_factory method, and the resulting custom - header object is returned. Any surrogateescaped bytes get turned - into the unicode unknown-character glyph. - - """ - if hasattr(value, 'name'): - return value - # We can't use splitlines here because it splits on more than \r and \n. - value = ''.join(linesep_splitter.split(value)) - return self.header_factory(name, value) - - def fold(self, name, value): - """+ - Header folding is controlled by the refold_source policy setting. A - value is considered to be a 'source value' if and only if it does not - have a 'name' attribute (having a 'name' attribute means it is a header - object of some sort). If a source value needs to be refolded according - to the policy, it is converted into a custom header object by passing - the name and the value with any linesep characters removed to the - header_factory method. Folding of a custom header object is done by - calling its fold method with the current policy. - - Source values are split into lines using splitlines. If the value is - not to be refolded, the lines are rejoined using the linesep from the - policy and returned. The exception is lines containing non-ascii - binary data. In that case the value is refolded regardless of the - refold_source setting, which causes the binary data to be CTE encoded - using the unknown-8bit charset. - - """ - return self._fold(name, value, refold_binary=True) - - def fold_binary(self, name, value): - """+ - The same as fold if cte_type is 7bit, except that the returned value is - bytes. - - If cte_type is 8bit, non-ASCII binary data is converted back into - bytes. Headers with binary data are not refolded, regardless of the - refold_header setting, since there is no way to know whether the binary - data consists of single byte characters or multibyte characters. - - If utf8 is true, headers are encoded to utf8, otherwise to ascii with - non-ASCII unicode rendered as encoded words. - - """ - folded = self._fold(name, value, refold_binary=self.cte_type=='7bit') - charset = 'utf8' if self.utf8 else 'ascii' - return folded.encode(charset, 'surrogateescape') - - def _fold(self, name, value, refold_binary=False): - if hasattr(value, 'name'): - return value.fold(policy=self) - maxlen = self.max_line_length if self.max_line_length else sys.maxsize - # We can't use splitlines here because it splits on more than \r and \n. - lines = linesep_splitter.split(value) - refold = (self.refold_source == 'all' or - self.refold_source == 'long' and - (lines and len(lines[0])+len(name)+2 > maxlen or - any(len(x) > maxlen for x in lines[1:]))) - - if not refold: - if not self.utf8: - refold = not value.isascii() - elif refold_binary: - refold = _has_surrogates(value) - if refold: - return self.header_factory(name, ''.join(lines)).fold(policy=self) - - return name + ': ' + self.linesep.join(lines) + self.linesep - - -default = EmailPolicy() -# Make the default policy use the class default header_factory -del default.header_factory -strict = default.clone(raise_on_defect=True) -SMTP = default.clone(linesep='\r\n') -HTTP = default.clone(linesep='\r\n', max_line_length=None) -SMTPUTF8 = SMTP.clone(utf8=True) diff --git a/Python313_13_x64_Template/Lib/email/quoprimime.py b/Python313_13_x64_Template/Lib/email/quoprimime.py deleted file mode 100644 index 27fcbb5a..00000000 --- a/Python313_13_x64_Template/Lib/email/quoprimime.py +++ /dev/null @@ -1,300 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Ben Gertzfield -# Contact: email-sig@python.org - -"""Quoted-printable content transfer encoding per RFCs 2045-2047. - -This module handles the content transfer encoding method defined in RFC 2045 -to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to -safely encode text that is in a character set similar to the 7-bit US ASCII -character set, but that includes some 8-bit characters that are normally not -allowed in email bodies or headers. - -Quoted-printable is very space-inefficient for encoding binary files; use the -email.base64mime module for that instead. - -This module provides an interface to encode and decode both headers and bodies -with quoted-printable encoding. - -RFC 2045 defines a method for including character set information in an -`encoded-word' in a header. This method is commonly used for 8-bit real names -in To:/From:/Cc: etc. fields, as well as Subject: lines. - -This module does not do the line wrapping or end-of-line character -conversion necessary for proper internationalized headers; it only -does dumb encoding and decoding. To deal with the various line -wrapping issues, use the email.header module. -""" - -__all__ = [ - 'body_decode', - 'body_encode', - 'body_length', - 'decode', - 'decodestring', - 'header_decode', - 'header_encode', - 'header_length', - 'quote', - 'unquote', - ] - -import re - -from string import ascii_letters, digits, hexdigits - -CRLF = '\r\n' -NL = '\n' -EMPTYSTRING = '' - -# Build a mapping of octets to the expansion of that octet. Since we're only -# going to have 256 of these things, this isn't terribly inefficient -# space-wise. Remember that headers and bodies have different sets of safe -# characters. Initialize both maps with the full expansion, and then override -# the safe bytes with the more compact form. -_QUOPRI_MAP = ['=%02X' % c for c in range(256)] -_QUOPRI_HEADER_MAP = _QUOPRI_MAP[:] -_QUOPRI_BODY_MAP = _QUOPRI_MAP[:] - -# Safe header bytes which need no encoding. -for c in b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii'): - _QUOPRI_HEADER_MAP[c] = chr(c) -# Headers have one other special encoding; spaces become underscores. -_QUOPRI_HEADER_MAP[ord(' ')] = '_' - -# Safe body bytes which need no encoding. -for c in (b' !"#$%&\'()*+,-./0123456789:;<>' - b'?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`' - b'abcdefghijklmnopqrstuvwxyz{|}~\t'): - _QUOPRI_BODY_MAP[c] = chr(c) - - - -# Helpers -def header_check(octet): - """Return True if the octet should be escaped with header quopri.""" - return chr(octet) != _QUOPRI_HEADER_MAP[octet] - - -def body_check(octet): - """Return True if the octet should be escaped with body quopri.""" - return chr(octet) != _QUOPRI_BODY_MAP[octet] - - -def header_length(bytearray): - """Return a header quoted-printable encoding length. - - Note that this does not include any RFC 2047 chrome added by - `header_encode()`. - - :param bytearray: An array of bytes (a.k.a. octets). - :return: The length in bytes of the byte array when it is encoded with - quoted-printable for headers. - """ - return sum(len(_QUOPRI_HEADER_MAP[octet]) for octet in bytearray) - - -def body_length(bytearray): - """Return a body quoted-printable encoding length. - - :param bytearray: An array of bytes (a.k.a. octets). - :return: The length in bytes of the byte array when it is encoded with - quoted-printable for bodies. - """ - return sum(len(_QUOPRI_BODY_MAP[octet]) for octet in bytearray) - - -def _max_append(L, s, maxlen, extra=''): - if not isinstance(s, str): - s = chr(s) - if not L: - L.append(s.lstrip()) - elif len(L[-1]) + len(s) <= maxlen: - L[-1] += extra + s - else: - L.append(s.lstrip()) - - -def unquote(s): - """Turn a string in the form =AB to the ASCII character with value 0xab""" - return chr(int(s[1:3], 16)) - - -def quote(c): - return _QUOPRI_MAP[ord(c)] - - -def header_encode(header_bytes, charset='iso-8859-1'): - """Encode a single header line with quoted-printable (like) encoding. - - Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but - used specifically for email header fields to allow charsets with mostly 7 - bit characters (and some 8 bit) to remain more or less readable in non-RFC - 2045 aware mail clients. - - charset names the character set to use in the RFC 2046 header. It - defaults to iso-8859-1. - """ - # Return empty headers as an empty string. - if not header_bytes: - return '' - # Iterate over every byte, encoding if necessary. - encoded = header_bytes.decode('latin1').translate(_QUOPRI_HEADER_MAP) - # Now add the RFC chrome to each encoded chunk and glue the chunks - # together. - return '=?%s?q?%s?=' % (charset, encoded) - - -_QUOPRI_BODY_ENCODE_MAP = _QUOPRI_BODY_MAP[:] -for c in b'\r\n': - _QUOPRI_BODY_ENCODE_MAP[c] = chr(c) -del c - -def body_encode(body, maxlinelen=76, eol=NL): - """Encode with quoted-printable, wrapping at maxlinelen characters. - - Each line of encoded text will end with eol, which defaults to "\\n". Set - this to "\\r\\n" if you will be using the result of this function directly - in an email. - - Each line will be wrapped at, at most, maxlinelen characters before the - eol string (maxlinelen defaults to 76 characters, the maximum value - permitted by RFC 2045). Long lines will have the 'soft line break' - quoted-printable character "=" appended to them, so the decoded text will - be identical to the original text. - - The minimum maxlinelen is 4 to have room for a quoted character ("=XX") - followed by a soft line break. Smaller values will generate a - ValueError. - - """ - - if maxlinelen < 4: - raise ValueError("maxlinelen must be at least 4") - if not body: - return body - - # quote special characters - body = body.translate(_QUOPRI_BODY_ENCODE_MAP) - - soft_break = '=' + eol - # leave space for the '=' at the end of a line - maxlinelen1 = maxlinelen - 1 - - encoded_body = [] - append = encoded_body.append - - for line in body.splitlines(): - # break up the line into pieces no longer than maxlinelen - 1 - start = 0 - laststart = len(line) - 1 - maxlinelen - while start <= laststart: - stop = start + maxlinelen1 - # make sure we don't break up an escape sequence - if line[stop - 2] == '=': - append(line[start:stop - 1]) - start = stop - 2 - elif line[stop - 1] == '=': - append(line[start:stop]) - start = stop - 1 - else: - append(line[start:stop] + '=') - start = stop - - # handle rest of line, special case if line ends in whitespace - if line and line[-1] in ' \t': - room = start - laststart - if room >= 3: - # It's a whitespace character at end-of-line, and we have room - # for the three-character quoted encoding. - q = quote(line[-1]) - elif room == 2: - # There's room for the whitespace character and a soft break. - q = line[-1] + soft_break - else: - # There's room only for a soft break. The quoted whitespace - # will be the only content on the subsequent line. - q = soft_break + quote(line[-1]) - append(line[start:-1] + q) - else: - append(line[start:]) - - # add back final newline if present - if body[-1] in CRLF: - append('') - - return eol.join(encoded_body) - - - -# BAW: I'm not sure if the intent was for the signature of this function to be -# the same as base64MIME.decode() or not... -def decode(encoded, eol=NL): - """Decode a quoted-printable string. - - Lines are separated with eol, which defaults to \\n. - """ - if not encoded: - return encoded - # BAW: see comment in encode() above. Again, we're building up the - # decoded string with string concatenation, which could be done much more - # efficiently. - decoded = '' - - for line in encoded.splitlines(): - line = line.rstrip() - if not line: - decoded += eol - continue - - i = 0 - n = len(line) - while i < n: - c = line[i] - if c != '=': - decoded += c - i += 1 - # Otherwise, c == "=". Are we at the end of the line? If so, add - # a soft line break. - elif i+1 == n: - i += 1 - continue - # Decode if in form =AB - elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits: - decoded += unquote(line[i:i+3]) - i += 3 - # Otherwise, not in form =AB, pass literally - else: - decoded += c - i += 1 - - if i == n: - decoded += eol - # Special case if original string did not end with eol - if encoded[-1] not in '\r\n' and decoded.endswith(eol): - decoded = decoded[:-1] - return decoded - - -# For convenience and backwards compatibility w/ standard base64 module -body_decode = decode -decodestring = decode - - - -def _unquote_match(match): - """Turn a match in the form =AB to the ASCII character with value 0xab""" - s = match.group(0) - return unquote(s) - - -# Header decoding is done a bit differently -def header_decode(s): - """Decode a string encoded with RFC 2045 MIME header `Q' encoding. - - This function does not parse a full MIME header value encoded with - quoted-printable (like =?iso-8859-1?q?Hello_World?=) -- please use - the high level email.header class for that functionality. - """ - s = s.replace('_', ' ') - return re.sub(r'=[a-fA-F0-9]{2}', _unquote_match, s, flags=re.ASCII) diff --git a/Python313_13_x64_Template/Lib/email/utils.py b/Python313_13_x64_Template/Lib/email/utils.py deleted file mode 100644 index e4d35f06..00000000 --- a/Python313_13_x64_Template/Lib/email/utils.py +++ /dev/null @@ -1,494 +0,0 @@ -# Copyright (C) 2001-2010 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Miscellaneous utilities.""" - -__all__ = [ - 'collapse_rfc2231_value', - 'decode_params', - 'decode_rfc2231', - 'encode_rfc2231', - 'formataddr', - 'formatdate', - 'format_datetime', - 'getaddresses', - 'make_msgid', - 'mktime_tz', - 'parseaddr', - 'parsedate', - 'parsedate_tz', - 'parsedate_to_datetime', - 'unquote', - ] - -import os -import re -import time -import datetime -import urllib.parse - -from email._parseaddr import quote -from email._parseaddr import AddressList as _AddressList -from email._parseaddr import mktime_tz - -from email._parseaddr import parsedate, parsedate_tz, _parsedate_tz - -COMMASPACE = ', ' -EMPTYSTRING = '' -UEMPTYSTRING = '' -CRLF = '\r\n' -TICK = "'" - -specialsre = re.compile(r'[][\\()<>@,:;".]') -escapesre = re.compile(r'[\\"]') - - -def _has_surrogates(s): - """Return True if s may contain surrogate-escaped binary data.""" - # This check is based on the fact that unless there are surrogates, utf8 - # (Python's default encoding) can encode any string. This is the fastest - # way to check for surrogates, see bpo-11454 (moved to gh-55663) for timings. - try: - s.encode() - return False - except UnicodeEncodeError: - return True - -# How to deal with a string containing bytes before handing it to the -# application through the 'normal' interface. -def _sanitize(string): - # Turn any escaped bytes into unicode 'unknown' char. If the escaped - # bytes happen to be utf-8 they will instead get decoded, even if they - # were invalid in the charset the source was supposed to be in. This - # seems like it is not a bad thing; a defect was still registered. - original_bytes = string.encode('utf-8', 'surrogateescape') - return original_bytes.decode('utf-8', 'replace') - - - -# Helpers - -def formataddr(pair, charset='utf-8'): - """The inverse of parseaddr(), this takes a 2-tuple of the form - (realname, email_address) and returns the string value suitable - for an RFC 2822 From, To or Cc header. - - If the first element of pair is false, then the second element is - returned unmodified. - - The optional charset is the character set that is used to encode - realname in case realname is not ASCII safe. Can be an instance of str or - a Charset-like object which has a header_encode method. Default is - 'utf-8'. - """ - name, address = pair - # The address MUST (per RFC) be ascii, so raise a UnicodeError if it isn't. - address.encode('ascii') - if name: - try: - name.encode('ascii') - except UnicodeEncodeError: - if isinstance(charset, str): - # lazy import to improve module import time - from email.charset import Charset - charset = Charset(charset) - encoded_name = charset.header_encode(name) - return "%s <%s>" % (encoded_name, address) - else: - quotes = '' - if specialsre.search(name): - quotes = '"' - name = escapesre.sub(r'\\\g<0>', name) - return '%s%s%s <%s>' % (quotes, name, quotes, address) - return address - - -def _iter_escaped_chars(addr): - pos = 0 - escape = False - for pos, ch in enumerate(addr): - if escape: - yield (pos, '\\' + ch) - escape = False - elif ch == '\\': - escape = True - else: - yield (pos, ch) - if escape: - yield (pos, '\\') - - -def _strip_quoted_realnames(addr): - """Strip real names between quotes.""" - if '"' not in addr: - # Fast path - return addr - - start = 0 - open_pos = None - result = [] - for pos, ch in _iter_escaped_chars(addr): - if ch == '"': - if open_pos is None: - open_pos = pos - else: - if start != open_pos: - result.append(addr[start:open_pos]) - start = pos + 1 - open_pos = None - - if start < len(addr): - result.append(addr[start:]) - - return ''.join(result) - - -supports_strict_parsing = True - -def getaddresses(fieldvalues, *, strict=True): - """Return a list of (REALNAME, EMAIL) or ('','') for each fieldvalue. - - When parsing fails for a fieldvalue, a 2-tuple of ('', '') is returned in - its place. - - If strict is true, use a strict parser which rejects malformed inputs. - """ - - # If strict is true, if the resulting list of parsed addresses is greater - # than the number of fieldvalues in the input list, a parsing error has - # occurred and consequently a list containing a single empty 2-tuple [('', - # '')] is returned in its place. This is done to avoid invalid output. - # - # Malformed input: getaddresses(['alice@example.com ']) - # Invalid output: [('', 'alice@example.com'), ('', 'bob@example.com')] - # Safe output: [('', '')] - - if not strict: - all = COMMASPACE.join(str(v) for v in fieldvalues) - a = _AddressList(all) - return a.addresslist - - fieldvalues = [str(v) for v in fieldvalues] - fieldvalues = _pre_parse_validation(fieldvalues) - addr = COMMASPACE.join(fieldvalues) - a = _AddressList(addr) - result = _post_parse_validation(a.addresslist) - - # Treat output as invalid if the number of addresses is not equal to the - # expected number of addresses. - n = 0 - for v in fieldvalues: - # When a comma is used in the Real Name part it is not a deliminator. - # So strip those out before counting the commas. - v = _strip_quoted_realnames(v) - # Expected number of addresses: 1 + number of commas - n += 1 + v.count(',') - if len(result) != n: - return [('', '')] - - return result - - -def _check_parenthesis(addr): - # Ignore parenthesis in quoted real names. - addr = _strip_quoted_realnames(addr) - - opens = 0 - for pos, ch in _iter_escaped_chars(addr): - if ch == '(': - opens += 1 - elif ch == ')': - opens -= 1 - if opens < 0: - return False - return (opens == 0) - - -def _pre_parse_validation(email_header_fields): - accepted_values = [] - for v in email_header_fields: - if not _check_parenthesis(v): - v = "('', '')" - accepted_values.append(v) - - return accepted_values - - -def _post_parse_validation(parsed_email_header_tuples): - accepted_values = [] - # The parser would have parsed a correctly formatted domain-literal - # The existence of an [ after parsing indicates a parsing failure - for v in parsed_email_header_tuples: - if '[' in v[1]: - v = ('', '') - accepted_values.append(v) - - return accepted_values - - -def _format_timetuple_and_zone(timetuple, zone): - return '%s, %02d %s %04d %02d:%02d:%02d %s' % ( - ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]], - timetuple[2], - ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', - 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1], - timetuple[0], timetuple[3], timetuple[4], timetuple[5], - zone) - -def formatdate(timeval=None, localtime=False, usegmt=False): - """Returns a date string as specified by RFC 2822, e.g.: - - Fri, 09 Nov 2001 01:08:47 -0000 - - Optional timeval if given is a floating-point time value as accepted by - gmtime() and localtime(), otherwise the current time is used. - - Optional localtime is a flag that when True, interprets timeval, and - returns a date relative to the local timezone instead of UTC, properly - taking daylight savings time into account. - - Optional argument usegmt means that the timezone is written out as - an ascii string, not numeric one (so "GMT" instead of "+0000"). This - is needed for HTTP, and is only used when localtime==False. - """ - # Note: we cannot use strftime() because that honors the locale and RFC - # 2822 requires that day and month names be the English abbreviations. - if timeval is None: - timeval = time.time() - dt = datetime.datetime.fromtimestamp(timeval, datetime.timezone.utc) - - if localtime: - dt = dt.astimezone() - usegmt = False - elif not usegmt: - dt = dt.replace(tzinfo=None) - return format_datetime(dt, usegmt) - -def format_datetime(dt, usegmt=False): - """Turn a datetime into a date string as specified in RFC 2822. - - If usegmt is True, dt must be an aware datetime with an offset of zero. In - this case 'GMT' will be rendered instead of the normal +0000 required by - RFC2822. This is to support HTTP headers involving date stamps. - """ - now = dt.timetuple() - if usegmt: - if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc: - raise ValueError("usegmt option requires a UTC datetime") - zone = 'GMT' - elif dt.tzinfo is None: - zone = '-0000' - else: - zone = dt.strftime("%z") - return _format_timetuple_and_zone(now, zone) - - -def make_msgid(idstring=None, domain=None): - """Returns a string suitable for RFC 2822 compliant Message-ID, e.g: - - <142480216486.20800.16526388040877946887@nightshade.la.mastaler.com> - - Optional idstring if given is a string used to strengthen the - uniqueness of the message id. Optional domain if given provides the - portion of the message id after the '@'. It defaults to the locally - defined hostname. - """ - # Lazy imports to speedup module import time - # (no other functions in email.utils need these modules) - import random - import socket - - timeval = int(time.time()*100) - pid = os.getpid() - randint = random.getrandbits(64) - if idstring is None: - idstring = '' - else: - idstring = '.' + idstring - if domain is None: - domain = socket.getfqdn() - msgid = '<%d.%d.%d%s@%s>' % (timeval, pid, randint, idstring, domain) - return msgid - - -def parsedate_to_datetime(data): - parsed_date_tz = _parsedate_tz(data) - if parsed_date_tz is None: - raise ValueError('Invalid date value or format "%s"' % str(data)) - *dtuple, tz = parsed_date_tz - if tz is None: - return datetime.datetime(*dtuple[:6]) - return datetime.datetime(*dtuple[:6], - tzinfo=datetime.timezone(datetime.timedelta(seconds=tz))) - - -def parseaddr(addr, *, strict=True): - """ - Parse addr into its constituent realname and email address parts. - - Return a tuple of realname and email address, unless the parse fails, in - which case return a 2-tuple of ('', ''). - - If strict is True, use a strict parser which rejects malformed inputs. - """ - if not strict: - addrs = _AddressList(addr).addresslist - if not addrs: - return ('', '') - return addrs[0] - - if isinstance(addr, list): - addr = addr[0] - - if not isinstance(addr, str): - return ('', '') - - addr = _pre_parse_validation([addr])[0] - addrs = _post_parse_validation(_AddressList(addr).addresslist) - - if not addrs or len(addrs) > 1: - return ('', '') - - return addrs[0] - - -# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3. -def unquote(str): - """Remove quotes from a string.""" - if len(str) > 1: - if str.startswith('"') and str.endswith('"'): - return str[1:-1].replace('\\\\', '\\').replace('\\"', '"') - if str.startswith('<') and str.endswith('>'): - return str[1:-1] - return str - - - -# RFC2231-related functions - parameter encoding and decoding -def decode_rfc2231(s): - """Decode string according to RFC 2231""" - parts = s.split(TICK, 2) - if len(parts) <= 2: - return None, None, s - return parts - - -def encode_rfc2231(s, charset=None, language=None): - """Encode string according to RFC 2231. - - If neither charset nor language is given, then s is returned as-is. If - charset is given but not language, the string is encoded using the empty - string for language. - """ - s = urllib.parse.quote(s, safe='', encoding=charset or 'ascii') - if charset is None and language is None: - return s - if language is None: - language = '' - return "%s'%s'%s" % (charset, language, s) - - -rfc2231_continuation = re.compile(r'^(?P\w+)\*((?P[0-9]+)\*?)?$', - re.ASCII) - -def decode_params(params): - """Decode parameters list according to RFC 2231. - - params is a sequence of 2-tuples containing (param name, string value). - """ - new_params = [params[0]] - # Map parameter's name to a list of continuations. The values are a - # 3-tuple of the continuation number, the string value, and a flag - # specifying whether a particular segment is %-encoded. - rfc2231_params = {} - for name, value in params[1:]: - encoded = name.endswith('*') - value = unquote(value) - mo = rfc2231_continuation.match(name) - if mo: - name, num = mo.group('name', 'num') - if num is not None: - num = int(num) - rfc2231_params.setdefault(name, []).append((num, value, encoded)) - else: - new_params.append((name, '"%s"' % quote(value))) - if rfc2231_params: - for name, continuations in rfc2231_params.items(): - value = [] - extended = False - # Sort by number, treating None as 0 if there is no 0, - # and ignore it if there is already a 0. - has_zero = any(x[0] == 0 for x in continuations) - if has_zero: - continuations = [x for x in continuations if x[0] is not None] - else: - continuations = [(x[0] or 0, x[1], x[2]) for x in continuations] - continuations.sort(key=lambda x: x[0]) - # And now append all values in numerical order, converting - # %-encodings for the encoded segments. If any of the - # continuation names ends in a *, then the entire string, after - # decoding segments and concatenating, must have the charset and - # language specifiers at the beginning of the string. - for num, s, encoded in continuations: - if encoded: - # Decode as "latin-1", so the characters in s directly - # represent the percent-encoded octet values. - # collapse_rfc2231_value treats this as an octet sequence. - s = urllib.parse.unquote(s, encoding="latin-1") - extended = True - value.append(s) - value = quote(EMPTYSTRING.join(value)) - if extended: - charset, language, value = decode_rfc2231(value) - new_params.append((name, (charset, language, '"%s"' % value))) - else: - new_params.append((name, '"%s"' % value)) - return new_params - -def collapse_rfc2231_value(value, errors='replace', - fallback_charset='us-ascii'): - if not isinstance(value, tuple) or len(value) != 3: - return unquote(value) - # While value comes to us as a unicode string, we need it to be a bytes - # object. We do not want bytes() normal utf-8 decoder, we want a straight - # interpretation of the string as character bytes. - charset, language, text = value - if charset is None: - # Issue 17369: if charset/lang is None, decode_rfc2231 couldn't parse - # the value, so use the fallback_charset. - charset = fallback_charset - rawbytes = bytes(text, 'raw-unicode-escape') - try: - return str(rawbytes, charset, errors) - except LookupError: - # charset is not a known codec. - return unquote(text) - - -# -# datetime doesn't provide a localtime function yet, so provide one. Code -# adapted from the patch in issue 9527. This may not be perfect, but it is -# better than not having it. -# - -def localtime(dt=None, isdst=None): - """Return local time as an aware datetime object. - - If called without arguments, return current time. Otherwise *dt* - argument should be a datetime instance, and it is converted to the - local time zone according to the system time zone database. If *dt* is - naive (that is, dt.tzinfo is None), it is assumed to be in local time. - The isdst parameter is ignored. - - """ - if isdst is not None: - import warnings - warnings._deprecated( - "The 'isdst' parameter to 'localtime'", - message='{name} is deprecated and slated for removal in Python {remove}', - remove=(3, 14), - ) - if dt is None: - dt = datetime.datetime.now() - return dt.astimezone() diff --git a/Python313_13_x64_Template/Lib/encodings/__init__.py b/Python313_13_x64_Template/Lib/encodings/__init__.py deleted file mode 100644 index 1c420ba1..00000000 --- a/Python313_13_x64_Template/Lib/encodings/__init__.py +++ /dev/null @@ -1,179 +0,0 @@ -""" Standard "encodings" Package - - Standard Python encoding modules are stored in this package - directory. - - Codec modules must have names corresponding to normalized encoding - names as defined in the normalize_encoding() function below, e.g. - 'utf-8' must be implemented by the module 'utf_8.py'. - - Each codec module must export the following interface: - - * getregentry() -> codecs.CodecInfo object - The getregentry() API must return a CodecInfo object with encoder, decoder, - incrementalencoder, incrementaldecoder, streamwriter and streamreader - attributes which adhere to the Python Codec Interface Standard. - - In addition, a module may optionally also define the following - APIs which are then used by the package's codec search function: - - * getaliases() -> sequence of encoding name strings to use as aliases - - Alias names returned by getaliases() must be normalized encoding - names as defined by normalize_encoding(). - -Written by Marc-Andre Lemburg (mal@lemburg.com). - -(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. - -"""#" - -import codecs -import sys -from . import aliases - -_cache = {} -_MAXCACHE = 500 -_unknown = '--unknown--' -_import_tail = ['*'] -_aliases = aliases.aliases - -class CodecRegistryError(LookupError, SystemError): - pass - -def normalize_encoding(encoding): - - """ Normalize an encoding name. - - Normalization works as follows: all non-alphanumeric - characters except the dot used for Python package names are - collapsed and replaced with a single underscore, e.g. ' -;#' - becomes '_'. Leading and trailing underscores are removed. - - Note that encoding names should be ASCII only. - - """ - if isinstance(encoding, bytes): - encoding = str(encoding, "ascii") - - chars = [] - punct = False - for c in encoding: - if c.isalnum() or c == '.': - if punct and chars: - chars.append('_') - if c.isascii(): - chars.append(c) - punct = False - else: - punct = True - return ''.join(chars) - -def search_function(encoding): - - # Cache lookup - entry = _cache.get(encoding, _unknown) - if entry is not _unknown: - return entry - - # Import the module: - # - # First try to find an alias for the normalized encoding - # name and lookup the module using the aliased name, then try to - # lookup the module using the standard import scheme, i.e. first - # try in the encodings package, then at top-level. - # - norm_encoding = normalize_encoding(encoding) - aliased_encoding = _aliases.get(norm_encoding) or \ - _aliases.get(norm_encoding.replace('.', '_')) - if aliased_encoding is not None: - modnames = [aliased_encoding, - norm_encoding] - else: - modnames = [norm_encoding] - for modname in modnames: - if not modname or '.' in modname: - continue - try: - # Import is absolute to prevent the possibly malicious import of a - # module with side-effects that is not in the 'encodings' package. - mod = __import__('encodings.' + modname, fromlist=_import_tail, - level=0) - except ImportError: - # ImportError may occur because 'encodings.(modname)' does not exist, - # or because it imports a name that does not exist (see mbcs and oem) - pass - else: - break - else: - mod = None - - try: - getregentry = mod.getregentry - except AttributeError: - # Not a codec module - mod = None - - if mod is None: - # Cache misses - if len(_cache) >= _MAXCACHE: - _cache.clear() - _cache[encoding] = None - return None - - # Now ask the module for the registry entry - entry = getregentry() - if not isinstance(entry, codecs.CodecInfo): - if not 4 <= len(entry) <= 7: - raise CodecRegistryError('module "%s" (%s) failed to register' - % (mod.__name__, mod.__file__)) - if not callable(entry[0]) or not callable(entry[1]) or \ - (entry[2] is not None and not callable(entry[2])) or \ - (entry[3] is not None and not callable(entry[3])) or \ - (len(entry) > 4 and entry[4] is not None and not callable(entry[4])) or \ - (len(entry) > 5 and entry[5] is not None and not callable(entry[5])): - raise CodecRegistryError('incompatible codecs in module "%s" (%s)' - % (mod.__name__, mod.__file__)) - if len(entry)<7 or entry[6] is None: - entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],) - entry = codecs.CodecInfo(*entry) - - # Cache the codec registry entry - if len(_cache) >= _MAXCACHE: - _cache.clear() - _cache[encoding] = entry - - # Register its aliases (without overwriting previously registered - # aliases) - try: - codecaliases = mod.getaliases() - except AttributeError: - pass - else: - for alias in codecaliases: - if alias not in _aliases: - _aliases[alias] = modname - - # Return the registry entry - return entry - -# Register the search_function in the Python codec registry -codecs.register(search_function) - -if sys.platform == 'win32': - # bpo-671666, bpo-46668: If Python does not implement a codec for current - # Windows ANSI code page, use the "mbcs" codec instead: - # WideCharToMultiByte() and MultiByteToWideChar() functions with CP_ACP. - # Python does not support custom code pages. - def _alias_mbcs(encoding): - try: - import _winapi - ansi_code_page = "cp%s" % _winapi.GetACP() - if encoding == ansi_code_page: - import encodings.mbcs - return encodings.mbcs.getregentry() - except ImportError: - # Imports may fail while we are shutting down - pass - - codecs.register(_alias_mbcs) diff --git a/Python313_13_x64_Template/Lib/encodings/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/encodings/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 976f86a0..00000000 Binary files a/Python313_13_x64_Template/Lib/encodings/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/encodings/__pycache__/aliases.cpython-313.pyc b/Python313_13_x64_Template/Lib/encodings/__pycache__/aliases.cpython-313.pyc deleted file mode 100644 index 1e1328bb..00000000 Binary files a/Python313_13_x64_Template/Lib/encodings/__pycache__/aliases.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/encodings/__pycache__/cp1252.cpython-313.pyc b/Python313_13_x64_Template/Lib/encodings/__pycache__/cp1252.cpython-313.pyc deleted file mode 100644 index 511b11b8..00000000 Binary files a/Python313_13_x64_Template/Lib/encodings/__pycache__/cp1252.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/encodings/__pycache__/cp437.cpython-313.pyc b/Python313_13_x64_Template/Lib/encodings/__pycache__/cp437.cpython-313.pyc deleted file mode 100644 index fceaefab..00000000 Binary files a/Python313_13_x64_Template/Lib/encodings/__pycache__/cp437.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/encodings/__pycache__/idna.cpython-313.pyc b/Python313_13_x64_Template/Lib/encodings/__pycache__/idna.cpython-313.pyc deleted file mode 100644 index 63455476..00000000 Binary files a/Python313_13_x64_Template/Lib/encodings/__pycache__/idna.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/encodings/__pycache__/utf_8.cpython-313.pyc b/Python313_13_x64_Template/Lib/encodings/__pycache__/utf_8.cpython-313.pyc deleted file mode 100644 index 6f321c29..00000000 Binary files a/Python313_13_x64_Template/Lib/encodings/__pycache__/utf_8.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/encodings/aliases.py b/Python313_13_x64_Template/Lib/encodings/aliases.py deleted file mode 100644 index 6a5ca046..00000000 --- a/Python313_13_x64_Template/Lib/encodings/aliases.py +++ /dev/null @@ -1,552 +0,0 @@ -""" Encoding Aliases Support - - This module is used by the encodings package search function to - map encodings names to module names. - - Note that the search function normalizes the encoding names before - doing the lookup, so the mapping will have to map normalized - encoding names to module names. - - Contents: - - The following aliases dictionary contains mappings of all IANA - character set names for which the Python core library provides - codecs. In addition to these, a few Python specific codec - aliases have also been added. - -""" -aliases = { - - # Please keep this list sorted alphabetically by value ! - - # ascii codec - '646' : 'ascii', - 'ansi_x3.4_1968' : 'ascii', - 'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name - 'ansi_x3.4_1986' : 'ascii', - 'cp367' : 'ascii', - 'csascii' : 'ascii', - 'ibm367' : 'ascii', - 'iso646_us' : 'ascii', - 'iso_646.irv_1991' : 'ascii', - 'iso_ir_6' : 'ascii', - 'us' : 'ascii', - 'us_ascii' : 'ascii', - - # base64_codec codec - 'base64' : 'base64_codec', - 'base_64' : 'base64_codec', - - # big5 codec - 'big5_tw' : 'big5', - 'csbig5' : 'big5', - - # big5hkscs codec - 'big5_hkscs' : 'big5hkscs', - 'hkscs' : 'big5hkscs', - - # bz2_codec codec - 'bz2' : 'bz2_codec', - - # cp037 codec - '037' : 'cp037', - 'csibm037' : 'cp037', - 'ebcdic_cp_ca' : 'cp037', - 'ebcdic_cp_nl' : 'cp037', - 'ebcdic_cp_us' : 'cp037', - 'ebcdic_cp_wt' : 'cp037', - 'ibm037' : 'cp037', - 'ibm039' : 'cp037', - - # cp1026 codec - '1026' : 'cp1026', - 'csibm1026' : 'cp1026', - 'ibm1026' : 'cp1026', - - # cp1125 codec - '1125' : 'cp1125', - 'ibm1125' : 'cp1125', - 'cp866u' : 'cp1125', - 'ruscii' : 'cp1125', - - # cp1140 codec - '1140' : 'cp1140', - 'ibm1140' : 'cp1140', - - # cp1250 codec - '1250' : 'cp1250', - 'windows_1250' : 'cp1250', - - # cp1251 codec - '1251' : 'cp1251', - 'windows_1251' : 'cp1251', - - # cp1252 codec - '1252' : 'cp1252', - 'windows_1252' : 'cp1252', - - # cp1253 codec - '1253' : 'cp1253', - 'windows_1253' : 'cp1253', - - # cp1254 codec - '1254' : 'cp1254', - 'windows_1254' : 'cp1254', - - # cp1255 codec - '1255' : 'cp1255', - 'windows_1255' : 'cp1255', - - # cp1256 codec - '1256' : 'cp1256', - 'windows_1256' : 'cp1256', - - # cp1257 codec - '1257' : 'cp1257', - 'windows_1257' : 'cp1257', - - # cp1258 codec - '1258' : 'cp1258', - 'windows_1258' : 'cp1258', - - # cp273 codec - '273' : 'cp273', - 'ibm273' : 'cp273', - 'csibm273' : 'cp273', - - # cp424 codec - '424' : 'cp424', - 'csibm424' : 'cp424', - 'ebcdic_cp_he' : 'cp424', - 'ibm424' : 'cp424', - - # cp437 codec - '437' : 'cp437', - 'cspc8codepage437' : 'cp437', - 'ibm437' : 'cp437', - - # cp500 codec - '500' : 'cp500', - 'csibm500' : 'cp500', - 'ebcdic_cp_be' : 'cp500', - 'ebcdic_cp_ch' : 'cp500', - 'ibm500' : 'cp500', - - # cp775 codec - '775' : 'cp775', - 'cspc775baltic' : 'cp775', - 'ibm775' : 'cp775', - - # cp850 codec - '850' : 'cp850', - 'cspc850multilingual' : 'cp850', - 'ibm850' : 'cp850', - - # cp852 codec - '852' : 'cp852', - 'cspcp852' : 'cp852', - 'ibm852' : 'cp852', - - # cp855 codec - '855' : 'cp855', - 'csibm855' : 'cp855', - 'ibm855' : 'cp855', - - # cp857 codec - '857' : 'cp857', - 'csibm857' : 'cp857', - 'ibm857' : 'cp857', - - # cp858 codec - '858' : 'cp858', - 'csibm858' : 'cp858', - 'ibm858' : 'cp858', - - # cp860 codec - '860' : 'cp860', - 'csibm860' : 'cp860', - 'ibm860' : 'cp860', - - # cp861 codec - '861' : 'cp861', - 'cp_is' : 'cp861', - 'csibm861' : 'cp861', - 'ibm861' : 'cp861', - - # cp862 codec - '862' : 'cp862', - 'cspc862latinhebrew' : 'cp862', - 'ibm862' : 'cp862', - - # cp863 codec - '863' : 'cp863', - 'csibm863' : 'cp863', - 'ibm863' : 'cp863', - - # cp864 codec - '864' : 'cp864', - 'csibm864' : 'cp864', - 'ibm864' : 'cp864', - - # cp865 codec - '865' : 'cp865', - 'csibm865' : 'cp865', - 'ibm865' : 'cp865', - - # cp866 codec - '866' : 'cp866', - 'csibm866' : 'cp866', - 'ibm866' : 'cp866', - - # cp869 codec - '869' : 'cp869', - 'cp_gr' : 'cp869', - 'csibm869' : 'cp869', - 'ibm869' : 'cp869', - - # cp932 codec - '932' : 'cp932', - 'ms932' : 'cp932', - 'mskanji' : 'cp932', - 'ms_kanji' : 'cp932', - 'windows_31j' : 'cp932', - - # cp949 codec - '949' : 'cp949', - 'ms949' : 'cp949', - 'uhc' : 'cp949', - - # cp950 codec - '950' : 'cp950', - 'ms950' : 'cp950', - - # euc_jis_2004 codec - 'jisx0213' : 'euc_jis_2004', - 'eucjis2004' : 'euc_jis_2004', - 'euc_jis2004' : 'euc_jis_2004', - - # euc_jisx0213 codec - 'eucjisx0213' : 'euc_jisx0213', - - # euc_jp codec - 'eucjp' : 'euc_jp', - 'ujis' : 'euc_jp', - 'u_jis' : 'euc_jp', - - # euc_kr codec - 'euckr' : 'euc_kr', - 'korean' : 'euc_kr', - 'ksc5601' : 'euc_kr', - 'ks_c_5601' : 'euc_kr', - 'ks_c_5601_1987' : 'euc_kr', - 'ksx1001' : 'euc_kr', - 'ks_x_1001' : 'euc_kr', - - # gb18030 codec - 'gb18030_2000' : 'gb18030', - - # gb2312 codec - 'chinese' : 'gb2312', - 'csiso58gb231280' : 'gb2312', - 'euc_cn' : 'gb2312', - 'euccn' : 'gb2312', - 'eucgb2312_cn' : 'gb2312', - 'gb2312_1980' : 'gb2312', - 'gb2312_80' : 'gb2312', - 'iso_ir_58' : 'gb2312', - - # gbk codec - '936' : 'gbk', - 'cp936' : 'gbk', - 'ms936' : 'gbk', - - # hex_codec codec - 'hex' : 'hex_codec', - - # hp_roman8 codec - 'roman8' : 'hp_roman8', - 'r8' : 'hp_roman8', - 'csHPRoman8' : 'hp_roman8', - 'cp1051' : 'hp_roman8', - 'ibm1051' : 'hp_roman8', - - # hz codec - 'hzgb' : 'hz', - 'hz_gb' : 'hz', - 'hz_gb_2312' : 'hz', - - # iso2022_jp codec - 'csiso2022jp' : 'iso2022_jp', - 'iso2022jp' : 'iso2022_jp', - 'iso_2022_jp' : 'iso2022_jp', - - # iso2022_jp_1 codec - 'iso2022jp_1' : 'iso2022_jp_1', - 'iso_2022_jp_1' : 'iso2022_jp_1', - - # iso2022_jp_2 codec - 'iso2022jp_2' : 'iso2022_jp_2', - 'iso_2022_jp_2' : 'iso2022_jp_2', - - # iso2022_jp_2004 codec - 'iso_2022_jp_2004' : 'iso2022_jp_2004', - 'iso2022jp_2004' : 'iso2022_jp_2004', - - # iso2022_jp_3 codec - 'iso2022jp_3' : 'iso2022_jp_3', - 'iso_2022_jp_3' : 'iso2022_jp_3', - - # iso2022_jp_ext codec - 'iso2022jp_ext' : 'iso2022_jp_ext', - 'iso_2022_jp_ext' : 'iso2022_jp_ext', - - # iso2022_kr codec - 'csiso2022kr' : 'iso2022_kr', - 'iso2022kr' : 'iso2022_kr', - 'iso_2022_kr' : 'iso2022_kr', - - # iso8859_10 codec - 'csisolatin6' : 'iso8859_10', - 'iso_8859_10' : 'iso8859_10', - 'iso_8859_10_1992' : 'iso8859_10', - 'iso_ir_157' : 'iso8859_10', - 'l6' : 'iso8859_10', - 'latin6' : 'iso8859_10', - - # iso8859_11 codec - 'thai' : 'iso8859_11', - 'iso_8859_11' : 'iso8859_11', - 'iso_8859_11_2001' : 'iso8859_11', - - # iso8859_13 codec - 'iso_8859_13' : 'iso8859_13', - 'l7' : 'iso8859_13', - 'latin7' : 'iso8859_13', - - # iso8859_14 codec - 'iso_8859_14' : 'iso8859_14', - 'iso_8859_14_1998' : 'iso8859_14', - 'iso_celtic' : 'iso8859_14', - 'iso_ir_199' : 'iso8859_14', - 'l8' : 'iso8859_14', - 'latin8' : 'iso8859_14', - - # iso8859_15 codec - 'iso_8859_15' : 'iso8859_15', - 'l9' : 'iso8859_15', - 'latin9' : 'iso8859_15', - - # iso8859_16 codec - 'iso_8859_16' : 'iso8859_16', - 'iso_8859_16_2001' : 'iso8859_16', - 'iso_ir_226' : 'iso8859_16', - 'l10' : 'iso8859_16', - 'latin10' : 'iso8859_16', - - # iso8859_2 codec - 'csisolatin2' : 'iso8859_2', - 'iso_8859_2' : 'iso8859_2', - 'iso_8859_2_1987' : 'iso8859_2', - 'iso_ir_101' : 'iso8859_2', - 'l2' : 'iso8859_2', - 'latin2' : 'iso8859_2', - - # iso8859_3 codec - 'csisolatin3' : 'iso8859_3', - 'iso_8859_3' : 'iso8859_3', - 'iso_8859_3_1988' : 'iso8859_3', - 'iso_ir_109' : 'iso8859_3', - 'l3' : 'iso8859_3', - 'latin3' : 'iso8859_3', - - # iso8859_4 codec - 'csisolatin4' : 'iso8859_4', - 'iso_8859_4' : 'iso8859_4', - 'iso_8859_4_1988' : 'iso8859_4', - 'iso_ir_110' : 'iso8859_4', - 'l4' : 'iso8859_4', - 'latin4' : 'iso8859_4', - - # iso8859_5 codec - 'csisolatincyrillic' : 'iso8859_5', - 'cyrillic' : 'iso8859_5', - 'iso_8859_5' : 'iso8859_5', - 'iso_8859_5_1988' : 'iso8859_5', - 'iso_ir_144' : 'iso8859_5', - - # iso8859_6 codec - 'arabic' : 'iso8859_6', - 'asmo_708' : 'iso8859_6', - 'csisolatinarabic' : 'iso8859_6', - 'ecma_114' : 'iso8859_6', - 'iso_8859_6' : 'iso8859_6', - 'iso_8859_6_1987' : 'iso8859_6', - 'iso_ir_127' : 'iso8859_6', - - # iso8859_7 codec - 'csisolatingreek' : 'iso8859_7', - 'ecma_118' : 'iso8859_7', - 'elot_928' : 'iso8859_7', - 'greek' : 'iso8859_7', - 'greek8' : 'iso8859_7', - 'iso_8859_7' : 'iso8859_7', - 'iso_8859_7_1987' : 'iso8859_7', - 'iso_ir_126' : 'iso8859_7', - - # iso8859_8 codec - 'csisolatinhebrew' : 'iso8859_8', - 'hebrew' : 'iso8859_8', - 'iso_8859_8' : 'iso8859_8', - 'iso_8859_8_1988' : 'iso8859_8', - 'iso_ir_138' : 'iso8859_8', - - # iso8859_9 codec - 'csisolatin5' : 'iso8859_9', - 'iso_8859_9' : 'iso8859_9', - 'iso_8859_9_1989' : 'iso8859_9', - 'iso_ir_148' : 'iso8859_9', - 'l5' : 'iso8859_9', - 'latin5' : 'iso8859_9', - - # johab codec - 'cp1361' : 'johab', - 'ms1361' : 'johab', - - # koi8_r codec - 'cskoi8r' : 'koi8_r', - - # kz1048 codec - 'kz_1048' : 'kz1048', - 'rk1048' : 'kz1048', - 'strk1048_2002' : 'kz1048', - - # latin_1 codec - # - # Note that the latin_1 codec is implemented internally in C and a - # lot faster than the charmap codec iso8859_1 which uses the same - # encoding. This is why we discourage the use of the iso8859_1 - # codec and alias it to latin_1 instead. - # - '8859' : 'latin_1', - 'cp819' : 'latin_1', - 'csisolatin1' : 'latin_1', - 'ibm819' : 'latin_1', - 'iso8859' : 'latin_1', - 'iso8859_1' : 'latin_1', - 'iso_8859_1' : 'latin_1', - 'iso_8859_1_1987' : 'latin_1', - 'iso_ir_100' : 'latin_1', - 'l1' : 'latin_1', - 'latin' : 'latin_1', - 'latin1' : 'latin_1', - - # mac_cyrillic codec - 'maccyrillic' : 'mac_cyrillic', - - # mac_greek codec - 'macgreek' : 'mac_greek', - - # mac_iceland codec - 'maciceland' : 'mac_iceland', - - # mac_latin2 codec - 'maccentraleurope' : 'mac_latin2', - 'mac_centeuro' : 'mac_latin2', - 'maclatin2' : 'mac_latin2', - - # mac_roman codec - 'macintosh' : 'mac_roman', - 'macroman' : 'mac_roman', - - # mac_turkish codec - 'macturkish' : 'mac_turkish', - - # mbcs codec - 'ansi' : 'mbcs', - 'dbcs' : 'mbcs', - - # ptcp154 codec - 'csptcp154' : 'ptcp154', - 'pt154' : 'ptcp154', - 'cp154' : 'ptcp154', - 'cyrillic_asian' : 'ptcp154', - - # quopri_codec codec - 'quopri' : 'quopri_codec', - 'quoted_printable' : 'quopri_codec', - 'quotedprintable' : 'quopri_codec', - - # rot_13 codec - 'rot13' : 'rot_13', - - # shift_jis codec - 'csshiftjis' : 'shift_jis', - 'shiftjis' : 'shift_jis', - 'sjis' : 'shift_jis', - 's_jis' : 'shift_jis', - - # shift_jis_2004 codec - 'shiftjis2004' : 'shift_jis_2004', - 'sjis_2004' : 'shift_jis_2004', - 's_jis_2004' : 'shift_jis_2004', - - # shift_jisx0213 codec - 'shiftjisx0213' : 'shift_jisx0213', - 'sjisx0213' : 'shift_jisx0213', - 's_jisx0213' : 'shift_jisx0213', - - # tis_620 codec - 'tis620' : 'tis_620', - 'tis_620_0' : 'tis_620', - 'tis_620_2529_0' : 'tis_620', - 'tis_620_2529_1' : 'tis_620', - 'iso_ir_166' : 'tis_620', - - # utf_16 codec - 'u16' : 'utf_16', - 'utf16' : 'utf_16', - - # utf_16_be codec - 'unicodebigunmarked' : 'utf_16_be', - 'utf_16be' : 'utf_16_be', - - # utf_16_le codec - 'unicodelittleunmarked' : 'utf_16_le', - 'utf_16le' : 'utf_16_le', - - # utf_32 codec - 'u32' : 'utf_32', - 'utf32' : 'utf_32', - - # utf_32_be codec - 'utf_32be' : 'utf_32_be', - - # utf_32_le codec - 'utf_32le' : 'utf_32_le', - - # utf_7 codec - 'u7' : 'utf_7', - 'utf7' : 'utf_7', - 'unicode_1_1_utf_7' : 'utf_7', - - # utf_8 codec - 'u8' : 'utf_8', - 'utf' : 'utf_8', - 'utf8' : 'utf_8', - 'utf8_ucs2' : 'utf_8', - 'utf8_ucs4' : 'utf_8', - 'cp65001' : 'utf_8', - - # uu_codec codec - 'uu' : 'uu_codec', - - # zlib_codec codec - 'zip' : 'zlib_codec', - 'zlib' : 'zlib_codec', - - # temporary mac CJK aliases, will be replaced by proper codecs in 3.1 - 'x_mac_japanese' : 'shift_jis', - 'x_mac_korean' : 'euc_kr', - 'x_mac_simp_chinese' : 'gb2312', - 'x_mac_trad_chinese' : 'big5', -} diff --git a/Python313_13_x64_Template/Lib/ensurepip/__init__.py b/Python313_13_x64_Template/Lib/ensurepip/__init__.py deleted file mode 100644 index 9f4e64bc..00000000 --- a/Python313_13_x64_Template/Lib/ensurepip/__init__.py +++ /dev/null @@ -1,265 +0,0 @@ -import os -import subprocess -import sys -import sysconfig -import tempfile -from contextlib import nullcontext -from importlib import resources -from pathlib import Path -from shutil import copy2 - - -__all__ = ["version", "bootstrap"] -_PIP_VERSION = "26.0.1" - -# Directory of system wheel packages. Some Linux distribution packaging -# policies recommend against bundling dependencies. For example, Fedora -# installs wheel packages in the /usr/share/python-wheels/ directory and don't -# install the ensurepip._bundled package. -_pkg_dir = sysconfig.get_config_var('WHEEL_PKG_DIR') -if _pkg_dir: - _WHEEL_PKG_DIR = Path(_pkg_dir).resolve() -else: - _WHEEL_PKG_DIR = None - - -def _find_wheel_pkg_dir_pip(): - if _WHEEL_PKG_DIR is None: - # NOTE: The compile-time `WHEEL_PKG_DIR` is unset so there is no place - # NOTE: for looking up the wheels. - return None - - dist_matching_wheels = _WHEEL_PKG_DIR.glob('pip-*.whl') - try: - last_matching_dist_wheel = sorted(dist_matching_wheels)[-1] - except IndexError: - # NOTE: `WHEEL_PKG_DIR` does not contain any wheel files for `pip`. - return None - - return nullcontext(last_matching_dist_wheel) - - -def _get_pip_whl_path_ctx(): - # Prefer pip from the wheel package directory, if present. - if (alternative_pip_wheel_path := _find_wheel_pkg_dir_pip()) is not None: - return alternative_pip_wheel_path - - return resources.as_file( - resources.files('ensurepip') - / '_bundled' - / f'pip-{_PIP_VERSION}-py3-none-any.whl' - ) - - -def _get_pip_version(): - with _get_pip_whl_path_ctx() as bundled_wheel_path: - wheel_name = bundled_wheel_path.name - return ( - # Extract '21.2.4' from 'pip-21.2.4-py3-none-any.whl' - wheel_name. - removeprefix('pip-'). - partition('-')[0] - ) - - -def _run_pip(args, additional_paths=None): - # Run the bootstrapping in a subprocess to avoid leaking any state that happens - # after pip has executed. Particularly, this avoids the case when pip holds onto - # the files in *additional_paths*, preventing us to remove them at the end of the - # invocation. - code = f""" -import runpy -import sys -sys.path = {additional_paths or []} + sys.path -sys.argv[1:] = {args} -runpy.run_module("pip", run_name="__main__", alter_sys=True) -""" - - cmd = [ - sys.executable, - '-W', - 'ignore::DeprecationWarning', - '-c', - code, - ] - if sys.flags.isolated: - # run code in isolated mode if currently running isolated - cmd.insert(1, '-I') - return subprocess.run(cmd, check=True).returncode - - -def version(): - """ - Returns a string specifying the bundled version of pip. - """ - return _get_pip_version() - - -def _disable_pip_configuration_settings(): - # We deliberately ignore all pip environment variables - # when invoking pip - # See http://bugs.python.org/issue19734 for details - keys_to_remove = [k for k in os.environ if k.startswith("PIP_")] - for k in keys_to_remove: - del os.environ[k] - # We also ignore the settings in the default pip configuration file - # See http://bugs.python.org/issue20053 for details - os.environ['PIP_CONFIG_FILE'] = os.devnull - - -def bootstrap(*, root=None, upgrade=False, user=False, - altinstall=False, default_pip=False, - verbosity=0): - """ - Bootstrap pip into the current Python installation (or the given root - directory). - - Note that calling this function will alter both sys.path and os.environ. - """ - # Discard the return value - _bootstrap(root=root, upgrade=upgrade, user=user, - altinstall=altinstall, default_pip=default_pip, - verbosity=verbosity) - - -def _bootstrap(*, root=None, upgrade=False, user=False, - altinstall=False, default_pip=False, - verbosity=0): - """ - Bootstrap pip into the current Python installation (or the given root - directory). Returns pip command status code. - - Note that calling this function will alter both sys.path and os.environ. - """ - if altinstall and default_pip: - raise ValueError("Cannot use altinstall and default_pip together") - - sys.audit("ensurepip.bootstrap", root) - - _disable_pip_configuration_settings() - - # By default, installing pip installs all of the - # following scripts (X.Y == running Python version): - # - # pip, pipX, pipX.Y - # - # pip 1.5+ allows ensurepip to request that some of those be left out - if altinstall: - # omit pip, pipX - os.environ["ENSUREPIP_OPTIONS"] = "altinstall" - elif not default_pip: - # omit pip - os.environ["ENSUREPIP_OPTIONS"] = "install" - - with tempfile.TemporaryDirectory() as tmpdir: - # Put our bundled wheels into a temporary directory and construct the - # additional paths that need added to sys.path - tmpdir_path = Path(tmpdir) - with _get_pip_whl_path_ctx() as bundled_wheel_path: - tmp_wheel_path = tmpdir_path / bundled_wheel_path.name - copy2(bundled_wheel_path, tmp_wheel_path) - - # Construct the arguments to be passed to the pip command - args = ["install", "--no-cache-dir", "--no-index", "--find-links", tmpdir] - if root: - args += ["--root", root] - if upgrade: - args += ["--upgrade"] - if user: - args += ["--user"] - if verbosity: - args += ["-" + "v" * verbosity] - - return _run_pip([*args, "pip"], [os.fsdecode(tmp_wheel_path)]) - - -def _uninstall_helper(*, verbosity=0): - """Helper to support a clean default uninstall process on Windows - - Note that calling this function may alter os.environ. - """ - # Nothing to do if pip was never installed, or has been removed - try: - import pip - except ImportError: - return - - # If the installed pip version doesn't match the available one, - # leave it alone - available_version = version() - if pip.__version__ != available_version: - print(f"ensurepip will only uninstall a matching version " - f"({pip.__version__!r} installed, " - f"{available_version!r} available)", - file=sys.stderr) - return - - _disable_pip_configuration_settings() - - # Construct the arguments to be passed to the pip command - args = ["uninstall", "-y", "--disable-pip-version-check"] - if verbosity: - args += ["-" + "v" * verbosity] - - return _run_pip([*args, "pip"]) - - -def _main(argv=None): - import argparse - parser = argparse.ArgumentParser(prog="python -m ensurepip") - parser.add_argument( - "--version", - action="version", - version="pip {}".format(version()), - help="Show the version of pip that is bundled with this Python.", - ) - parser.add_argument( - "-v", "--verbose", - action="count", - default=0, - dest="verbosity", - help=("Give more output. Option is additive, and can be used up to 3 " - "times."), - ) - parser.add_argument( - "-U", "--upgrade", - action="store_true", - default=False, - help="Upgrade pip and dependencies, even if already installed.", - ) - parser.add_argument( - "--user", - action="store_true", - default=False, - help="Install using the user scheme.", - ) - parser.add_argument( - "--root", - default=None, - help="Install everything relative to this alternate root directory.", - ) - parser.add_argument( - "--altinstall", - action="store_true", - default=False, - help=("Make an alternate install, installing only the X.Y versioned " - "scripts (Default: pipX, pipX.Y)."), - ) - parser.add_argument( - "--default-pip", - action="store_true", - default=False, - help=("Make a default pip install, installing the unqualified pip " - "in addition to the versioned scripts."), - ) - - args = parser.parse_args(argv) - - return _bootstrap( - root=args.root, - upgrade=args.upgrade, - user=args.user, - verbosity=args.verbosity, - altinstall=args.altinstall, - default_pip=args.default_pip, - ) diff --git a/Python313_13_x64_Template/Lib/ensurepip/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/ensurepip/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 5cdf6b7c..00000000 Binary files a/Python313_13_x64_Template/Lib/ensurepip/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/ensurepip/__pycache__/__main__.cpython-313.pyc b/Python313_13_x64_Template/Lib/ensurepip/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index 106177c6..00000000 Binary files a/Python313_13_x64_Template/Lib/ensurepip/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/ensurepip/_uninstall.py b/Python313_13_x64_Template/Lib/ensurepip/_uninstall.py deleted file mode 100644 index b2579043..00000000 --- a/Python313_13_x64_Template/Lib/ensurepip/_uninstall.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Basic pip uninstallation support, helper for the Windows uninstaller""" - -import argparse -import ensurepip -import sys - - -def _main(argv=None): - parser = argparse.ArgumentParser(prog="python -m ensurepip._uninstall") - parser.add_argument( - "--version", - action="version", - version="pip {}".format(ensurepip.version()), - help="Show the version of pip this will attempt to uninstall.", - ) - parser.add_argument( - "-v", "--verbose", - action="count", - default=0, - dest="verbosity", - help=("Give more output. Option is additive, and can be used up to 3 " - "times."), - ) - - args = parser.parse_args(argv) - - return ensurepip._uninstall_helper(verbosity=args.verbosity) - - -if __name__ == "__main__": - sys.exit(_main()) diff --git a/Python313_13_x64_Template/Lib/enum.py b/Python313_13_x64_Template/Lib/enum.py deleted file mode 100644 index 5e7b0065..00000000 --- a/Python313_13_x64_Template/Lib/enum.py +++ /dev/null @@ -1,2182 +0,0 @@ -import sys -import builtins as bltns -from functools import partial -from types import MappingProxyType, DynamicClassAttribute - - -__all__ = [ - 'EnumType', 'EnumMeta', 'EnumDict', - 'Enum', 'IntEnum', 'StrEnum', 'Flag', 'IntFlag', 'ReprEnum', - 'auto', 'unique', 'property', 'verify', 'member', 'nonmember', - 'FlagBoundary', 'STRICT', 'CONFORM', 'EJECT', 'KEEP', - 'global_flag_repr', 'global_enum_repr', 'global_str', 'global_enum', - 'EnumCheck', 'CONTINUOUS', 'NAMED_FLAGS', 'UNIQUE', - 'pickle_by_global_name', 'pickle_by_enum_name', - ] - - -# Dummy value for Enum and Flag as there are explicit checks for them -# before they have been created. -# This is also why there are checks in EnumType like `if Enum is not None` -Enum = Flag = EJECT = _stdlib_enums = ReprEnum = None - -class nonmember(object): - """ - Protects item from becoming an Enum member during class creation. - """ - def __init__(self, value): - self.value = value - -class member(object): - """ - Forces item to become an Enum member during class creation. - """ - def __init__(self, value): - self.value = value - -def _is_descriptor(obj): - """ - Returns True if obj is a descriptor, False otherwise. - """ - return not isinstance(obj, partial) and ( - hasattr(obj, '__get__') or - hasattr(obj, '__set__') or - hasattr(obj, '__delete__') - ) - -def _is_dunder(name): - """ - Returns True if a __dunder__ name, False otherwise. - """ - return ( - len(name) > 4 and - name[:2] == name[-2:] == '__' and - name[2] != '_' and - name[-3] != '_' - ) - -def _is_sunder(name): - """ - Returns True if a _sunder_ name, False otherwise. - """ - return ( - len(name) > 2 and - name[0] == name[-1] == '_' and - name[1] != '_' and - name[-2] != '_' - ) - -def _is_internal_class(cls_name, obj): - # do not use `re` as `re` imports `enum` - if not isinstance(obj, type): - return False - qualname = getattr(obj, '__qualname__', '') - s_pattern = cls_name + '.' + getattr(obj, '__name__', '') - e_pattern = '.' + s_pattern - return qualname == s_pattern or qualname.endswith(e_pattern) - -def _is_private(cls_name, name): - # do not use `re` as `re` imports `enum` - pattern = '_%s__' % (cls_name, ) - pat_len = len(pattern) - if ( - len(name) > pat_len - and name.startswith(pattern) - and (name[-1] != '_' or name[-2] != '_') - ): - return True - else: - return False - -def _is_single_bit(num): - """ - True if only one bit set in num (should be an int) - """ - if num == 0: - return False - num &= num - 1 - return num == 0 - -def _make_class_unpicklable(obj): - """ - Make the given obj un-picklable. - - obj should be either a dictionary, or an Enum - """ - def _break_on_call_reduce(self, proto): - raise TypeError('%r cannot be pickled' % self) - if isinstance(obj, dict): - obj['__reduce_ex__'] = _break_on_call_reduce - obj['__module__'] = '' - else: - setattr(obj, '__reduce_ex__', _break_on_call_reduce) - setattr(obj, '__module__', '') - -def _iter_bits_lsb(num): - # num must be a positive integer - original = num - if isinstance(num, Enum): - num = num.value - if num < 0: - raise ValueError('%r is not a positive integer' % original) - while num: - b = num & (~num + 1) - yield b - num ^= b - -def show_flag_values(value): - return list(_iter_bits_lsb(value)) - -def bin(num, max_bits=None): - """ - Like built-in bin(), except negative values are represented in - twos-complement, and the leading bit always indicates sign - (0=positive, 1=negative). - - >>> bin(10) - '0b0 1010' - >>> bin(~10) # ~10 is -11 - '0b1 0101' - """ - - num = num.__index__() - ceiling = 2 ** (num).bit_length() - if num >= 0: - s = bltns.bin(num + ceiling).replace('1', '0', 1) - else: - s = bltns.bin(~num ^ (ceiling - 1) + ceiling) - sign = s[:3] - digits = s[3:] - if max_bits is not None: - if len(digits) < max_bits: - digits = (sign[-1] * max_bits + digits)[-max_bits:] - return "%s %s" % (sign, digits) - -def _dedent(text): - """ - Like textwrap.dedent. Rewritten because we cannot import textwrap. - """ - lines = text.split('\n') - for i, ch in enumerate(lines[0]): - if ch != ' ': - break - for j, l in enumerate(lines): - lines[j] = l[i:] - return '\n'.join(lines) - -class _not_given: - def __repr__(self): - return('') -_not_given = _not_given() - -class _auto_null: - def __repr__(self): - return '_auto_null' -_auto_null = _auto_null() - -class auto: - """ - Instances are replaced with an appropriate value in Enum class suites. - """ - def __init__(self, value=_auto_null): - self.value = value - - def __repr__(self): - return "auto(%r)" % self.value - -class property(DynamicClassAttribute): - """ - This is a descriptor, used to define attributes that act differently - when accessed through an enum member and through an enum class. - Instance access is the same as property(), but access to an attribute - through the enum class will instead look in the class' _member_map_ for - a corresponding enum member. - """ - - member = None - _attr_type = None - _cls_type = None - - def __get__(self, instance, ownerclass=None): - if instance is None: - if self.member is not None: - return self.member - else: - raise AttributeError( - '%r has no attribute %r' % (ownerclass, self.name) - ) - if self.fget is not None: - # use previous enum.property - return self.fget(instance) - elif self._attr_type == 'attr': - # look up previous attibute - return getattr(self._cls_type, self.name) - elif self._attr_type == 'desc': - # use previous descriptor - return getattr(instance._value_, self.name) - # look for a member by this name. - try: - return ownerclass._member_map_[self.name] - except KeyError: - raise AttributeError( - '%r has no attribute %r' % (ownerclass, self.name) - ) from None - - def __set__(self, instance, value): - if self.fset is not None: - return self.fset(instance, value) - raise AttributeError( - " cannot set attribute %r" % (self.clsname, self.name) - ) - - def __delete__(self, instance): - if self.fdel is not None: - return self.fdel(instance) - raise AttributeError( - " cannot delete attribute %r" % (self.clsname, self.name) - ) - - def __set_name__(self, ownerclass, name): - self.name = name - self.clsname = ownerclass.__name__ - - -class _proto_member: - """ - intermediate step for enum members between class execution and final creation - """ - - def __init__(self, value): - self.value = value - - def __set_name__(self, enum_class, member_name): - """ - convert each quasi-member into an instance of the new enum class - """ - # first step: remove ourself from enum_class - delattr(enum_class, member_name) - # second step: create member based on enum_class - value = self.value - if not isinstance(value, tuple): - args = (value, ) - else: - args = value - if enum_class._member_type_ is tuple: # special case for tuple enums - args = (args, ) # wrap it one more time - if not enum_class._use_args_: - enum_member = enum_class._new_member_(enum_class) - else: - enum_member = enum_class._new_member_(enum_class, *args) - if not hasattr(enum_member, '_value_'): - if enum_class._member_type_ is object: - enum_member._value_ = value - else: - try: - enum_member._value_ = enum_class._member_type_(*args) - except Exception as exc: - new_exc = TypeError( - '_value_ not set in __new__, unable to create it' - ) - new_exc.__cause__ = exc - raise new_exc - value = enum_member._value_ - enum_member._name_ = member_name - enum_member.__objclass__ = enum_class - enum_member.__init__(*args) - enum_member._sort_order_ = len(enum_class._member_names_) - - if Flag is not None and issubclass(enum_class, Flag): - if isinstance(value, int): - enum_class._flag_mask_ |= value - if _is_single_bit(value): - enum_class._singles_mask_ |= value - enum_class._all_bits_ = 2 ** ((enum_class._flag_mask_).bit_length()) - 1 - - # If another member with the same value was already defined, the - # new member becomes an alias to the existing one. - try: - try: - # try to do a fast lookup to avoid the quadratic loop - enum_member = enum_class._value2member_map_[value] - except TypeError: - for name, canonical_member in enum_class._member_map_.items(): - if canonical_member._value_ == value: - enum_member = canonical_member - break - else: - raise KeyError - except KeyError: - # this could still be an alias if the value is multi-bit and the - # class is a flag class - if ( - Flag is None - or not issubclass(enum_class, Flag) - ): - # no other instances found, record this member in _member_names_ - enum_class._member_names_.append(member_name) - elif ( - Flag is not None - and issubclass(enum_class, Flag) - and isinstance(value, int) - and _is_single_bit(value) - ): - # no other instances found, record this member in _member_names_ - enum_class._member_names_.append(member_name) - - enum_class._add_member_(member_name, enum_member) - try: - # This may fail if value is not hashable. We can't add the value - # to the map, and by-value lookups for this value will be - # linear. - enum_class._value2member_map_.setdefault(value, enum_member) - if value not in enum_class._hashable_values_: - enum_class._hashable_values_.append(value) - except TypeError: - # keep track of the value in a list so containment checks are quick - enum_class._unhashable_values_.append(value) - enum_class._unhashable_values_map_.setdefault(member_name, []).append(value) - - -class EnumDict(dict): - """ - Track enum member order and ensure member names are not reused. - - EnumType will use the names found in self._member_names as the - enumeration member names. - """ - def __init__(self, cls_name=None): - super().__init__() - self._member_names = {} # use a dict -- faster look-up than a list, and keeps insertion order since 3.7 - self._last_values = [] - self._ignore = [] - self._auto_called = False - self._cls_name = cls_name - - def __setitem__(self, key, value): - """ - Changes anything not dundered or not a descriptor. - - If an enum member name is used twice, an error is raised; duplicate - values are not checked for. - - Single underscore (sunder) names are reserved. - """ - if self._cls_name is not None and _is_private(self._cls_name, key): - # do nothing, name will be a normal attribute - pass - elif _is_sunder(key): - if key not in ( - '_order_', - '_generate_next_value_', '_numeric_repr_', '_missing_', '_ignore_', - '_iter_member_', '_iter_member_by_value_', '_iter_member_by_def_', - '_add_alias_', '_add_value_alias_', - # While not in use internally, those are common for pretty - # printing and thus excluded from Enum's reservation of - # _sunder_ names - ) and not key.startswith('_repr_'): - raise ValueError( - '_sunder_ names, such as %r, are reserved for future Enum use' - % (key, ) - ) - if key == '_generate_next_value_': - # check if members already defined as auto() - if self._auto_called: - raise TypeError("_generate_next_value_ must be defined before members") - _gnv = value.__func__ if isinstance(value, staticmethod) else value - setattr(self, '_generate_next_value', _gnv) - elif key == '_ignore_': - if isinstance(value, str): - value = value.replace(',',' ').split() - else: - value = list(value) - self._ignore = value - already = set(value) & set(self._member_names) - if already: - raise ValueError( - '_ignore_ cannot specify already set names: %r' - % (already, ) - ) - elif _is_dunder(key): - if key == '__order__': - key = '_order_' - elif key in self._member_names: - # descriptor overwriting an enum? - raise TypeError('%r already defined as %r' % (key, self[key])) - elif key in self._ignore: - pass - elif isinstance(value, nonmember): - # unwrap value here; it won't be processed by the below `else` - value = value.value - elif isinstance(value, partial): - import warnings - warnings.warn('functools.partial will be a method descriptor ' - 'in future Python versions; wrap it in ' - 'enum.member() if you want to preserve the ' - 'old behavior', FutureWarning, stacklevel=2) - elif _is_descriptor(value): - pass - elif self._cls_name is not None and _is_internal_class(self._cls_name, value): - # do nothing, name will be a normal attribute - pass - else: - if key in self: - # enum overwriting a descriptor? - raise TypeError('%r already defined as %r' % (key, self[key])) - elif isinstance(value, member): - # unwrap value here -- it will become a member - value = value.value - non_auto_store = True - single = False - if isinstance(value, auto): - single = True - value = (value, ) - if isinstance(value, tuple) and any(isinstance(v, auto) for v in value): - # insist on an actual tuple, no subclasses, in keeping with only supporting - # top-level auto() usage (not contained in any other data structure) - auto_valued = [] - t = type(value) - for v in value: - if isinstance(v, auto): - non_auto_store = False - if v.value == _auto_null: - v.value = self._generate_next_value( - key, 1, len(self._member_names), self._last_values[:], - ) - self._auto_called = True - v = v.value - self._last_values.append(v) - auto_valued.append(v) - if single: - value = auto_valued[0] - else: - try: - # accepts iterable as multiple arguments? - value = t(auto_valued) - except TypeError: - # then pass them in singly - value = t(*auto_valued) - self._member_names[key] = None - if non_auto_store: - self._last_values.append(value) - super().__setitem__(key, value) - - @property - def member_names(self): - return list(self._member_names) - - def update(self, members, **more_members): - try: - for name in members.keys(): - self[name] = members[name] - except AttributeError: - for name, value in members: - self[name] = value - for name, value in more_members.items(): - self[name] = value - -_EnumDict = EnumDict # keep private name for backwards compatibility - - -class EnumType(type): - """ - Metaclass for Enum - """ - - @classmethod - def __prepare__(metacls, cls, bases, **kwds): - # check that previous enum members do not exist - metacls._check_for_existing_members_(cls, bases) - # create the namespace dict - enum_dict = EnumDict(cls) - # inherit previous flags and _generate_next_value_ function - member_type, first_enum = metacls._get_mixins_(cls, bases) - if first_enum is not None: - enum_dict['_generate_next_value_'] = getattr( - first_enum, '_generate_next_value_', None, - ) - return enum_dict - - def __new__(metacls, cls, bases, classdict, *, boundary=None, _simple=False, **kwds): - # an Enum class is final once enumeration items have been defined; it - # cannot be mixed with other types (int, float, etc.) if it has an - # inherited __new__ unless a new __new__ is defined (or the resulting - # class will fail). - # - if _simple: - return super().__new__(metacls, cls, bases, classdict, **kwds) - # - # remove any keys listed in _ignore_ - classdict.setdefault('_ignore_', []).append('_ignore_') - ignore = classdict['_ignore_'] - for key in ignore: - classdict.pop(key, None) - # - # grab member names - member_names = classdict._member_names - # - # check for illegal enum names (any others?) - invalid_names = set(member_names) & {'mro', ''} - if invalid_names: - raise ValueError('invalid enum member name(s) %s' % ( - ','.join(repr(n) for n in invalid_names) - )) - # - # adjust the sunders - _order_ = classdict.pop('_order_', None) - _gnv = classdict.get('_generate_next_value_') - if _gnv is not None and type(_gnv) is not staticmethod: - _gnv = staticmethod(_gnv) - # convert to normal dict - classdict = dict(classdict.items()) - if _gnv is not None: - classdict['_generate_next_value_'] = _gnv - # - # data type of member and the controlling Enum class - member_type, first_enum = metacls._get_mixins_(cls, bases) - __new__, save_new, use_args = metacls._find_new_( - classdict, member_type, first_enum, - ) - classdict['_new_member_'] = __new__ - classdict['_use_args_'] = use_args - # - # convert future enum members into temporary _proto_members - for name in member_names: - value = classdict[name] - classdict[name] = _proto_member(value) - # - # house-keeping structures - classdict['_member_names_'] = [] - classdict['_member_map_'] = {} - classdict['_value2member_map_'] = {} - classdict['_hashable_values_'] = [] # for comparing with non-hashable types - classdict['_unhashable_values_'] = [] # e.g. frozenset() with set() - classdict['_unhashable_values_map_'] = {} - classdict['_member_type_'] = member_type - # now set the __repr__ for the value - classdict['_value_repr_'] = metacls._find_data_repr_(cls, bases) - # - # Flag structures (will be removed if final class is not a Flag - classdict['_boundary_'] = ( - boundary - or getattr(first_enum, '_boundary_', None) - ) - classdict['_flag_mask_'] = 0 - classdict['_singles_mask_'] = 0 - classdict['_all_bits_'] = 0 - classdict['_inverted_'] = None - try: - classdict['_%s__in_progress' % cls] = True - enum_class = super().__new__(metacls, cls, bases, classdict, **kwds) - classdict['_%s__in_progress' % cls] = False - delattr(enum_class, '_%s__in_progress' % cls) - except Exception as e: - # since 3.12 the note "Error calling __set_name__ on '_proto_member' instance ..." - # is tacked on to the error instead of raising a RuntimeError, so discard it - if hasattr(e, '__notes__'): - del e.__notes__ - raise - # update classdict with any changes made by __init_subclass__ - classdict.update(enum_class.__dict__) - # - # double check that repr and friends are not the mixin's or various - # things break (such as pickle) - # however, if the method is defined in the Enum itself, don't replace - # it - # - # Also, special handling for ReprEnum - if ReprEnum is not None and ReprEnum in bases: - if member_type is object: - raise TypeError( - 'ReprEnum subclasses must be mixed with a data type (i.e.' - ' int, str, float, etc.)' - ) - if '__format__' not in classdict: - enum_class.__format__ = member_type.__format__ - classdict['__format__'] = enum_class.__format__ - if '__str__' not in classdict: - method = member_type.__str__ - if method is object.__str__: - # if member_type does not define __str__, object.__str__ will use - # its __repr__ instead, so we'll also use its __repr__ - method = member_type.__repr__ - enum_class.__str__ = method - classdict['__str__'] = enum_class.__str__ - for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'): - if name not in classdict: - # check for mixin overrides before replacing - enum_method = getattr(first_enum, name) - found_method = getattr(enum_class, name) - object_method = getattr(object, name) - data_type_method = getattr(member_type, name) - if found_method in (data_type_method, object_method): - setattr(enum_class, name, enum_method) - # - # for Flag, add __or__, __and__, __xor__, and __invert__ - if Flag is not None and issubclass(enum_class, Flag): - for name in ( - '__or__', '__and__', '__xor__', - '__ror__', '__rand__', '__rxor__', - '__invert__' - ): - if name not in classdict: - enum_method = getattr(Flag, name) - setattr(enum_class, name, enum_method) - classdict[name] = enum_method - # - # replace any other __new__ with our own (as long as Enum is not None, - # anyway) -- again, this is to support pickle - if Enum is not None: - # if the user defined their own __new__, save it before it gets - # clobbered in case they subclass later - if save_new: - enum_class.__new_member__ = __new__ - enum_class.__new__ = Enum.__new__ - # - # py3 support for definition order (helps keep py2/py3 code in sync) - # - # _order_ checking is spread out into three/four steps - # - if enum_class is a Flag: - # - remove any non-single-bit flags from _order_ - # - remove any aliases from _order_ - # - check that _order_ and _member_names_ match - # - # step 1: ensure we have a list - if _order_ is not None: - if isinstance(_order_, str): - _order_ = _order_.replace(',', ' ').split() - # - # remove Flag structures if final class is not a Flag - if ( - Flag is None and cls != 'Flag' - or Flag is not None and not issubclass(enum_class, Flag) - ): - delattr(enum_class, '_boundary_') - delattr(enum_class, '_flag_mask_') - delattr(enum_class, '_singles_mask_') - delattr(enum_class, '_all_bits_') - delattr(enum_class, '_inverted_') - elif Flag is not None and issubclass(enum_class, Flag): - # set correct __iter__ - member_list = [m._value_ for m in enum_class] - if member_list != sorted(member_list): - enum_class._iter_member_ = enum_class._iter_member_by_def_ - if _order_: - # _order_ step 2: remove any items from _order_ that are not single-bit - _order_ = [ - o - for o in _order_ - if o not in enum_class._member_map_ or _is_single_bit(enum_class[o]._value_) - ] - # - if _order_: - # _order_ step 3: remove aliases from _order_ - _order_ = [ - o - for o in _order_ - if ( - o not in enum_class._member_map_ - or - (o in enum_class._member_map_ and o in enum_class._member_names_) - )] - # _order_ step 4: verify that _order_ and _member_names_ match - if _order_ != enum_class._member_names_: - raise TypeError( - 'member order does not match _order_:\n %r\n %r' - % (enum_class._member_names_, _order_) - ) - # - return enum_class - - def __bool__(cls): - """ - classes/types should always be True. - """ - return True - - def __call__(cls, value, names=_not_given, *values, module=None, qualname=None, type=None, start=1, boundary=None): - """ - Either returns an existing member, or creates a new enum class. - - This method is used both when an enum class is given a value to match - to an enumeration member (i.e. Color(3)) and for the functional API - (i.e. Color = Enum('Color', names='RED GREEN BLUE')). - - The value lookup branch is chosen if the enum is final. - - When used for the functional API: - - `value` will be the name of the new class. - - `names` should be either a string of white-space/comma delimited names - (values will start at `start`), or an iterator/mapping of name, value pairs. - - `module` should be set to the module this class is being created in; - if it is not set, an attempt to find that module will be made, but if - it fails the class will not be picklable. - - `qualname` should be set to the actual location this class can be found - at in its module; by default it is set to the global scope. If this is - not correct, unpickling will fail in some circumstances. - - `type`, if set, will be mixed in as the first base class. - """ - if cls._member_map_: - # simple value lookup if members exist - if names is not _not_given: - value = (value, names) + values - return cls.__new__(cls, value) - # otherwise, functional API: we're creating a new Enum type - if names is _not_given and type is None: - # no body? no data-type? possibly wrong usage - raise TypeError( - f"{cls} has no members; specify `names=()` if you meant to create a new, empty, enum" - ) - return cls._create_( - class_name=value, - names=None if names is _not_given else names, - module=module, - qualname=qualname, - type=type, - start=start, - boundary=boundary, - ) - - def __contains__(cls, value): - """Return True if `value` is in `cls`. - - `value` is in `cls` if: - 1) `value` is a member of `cls`, or - 2) `value` is the value of one of the `cls`'s members. - 3) `value` is a pseudo-member (flags) - """ - if isinstance(value, cls): - return True - if issubclass(cls, Flag): - try: - result = cls._missing_(value) - return isinstance(result, cls) - except ValueError: - pass - return ( - value in cls._unhashable_values_ # both structures are lists - or value in cls._hashable_values_ - ) - - def __delattr__(cls, attr): - # nicer error message when someone tries to delete an attribute - # (see issue19025). - if attr in cls._member_map_: - raise AttributeError("%r cannot delete member %r." % (cls.__name__, attr)) - super().__delattr__(attr) - - def __dir__(cls): - interesting = set([ - '__class__', '__contains__', '__doc__', '__getitem__', - '__iter__', '__len__', '__members__', '__module__', - '__name__', '__qualname__', - ] - + cls._member_names_ - ) - if cls._new_member_ is not object.__new__: - interesting.add('__new__') - if cls.__init_subclass__ is not object.__init_subclass__: - interesting.add('__init_subclass__') - if cls._member_type_ is object: - return sorted(interesting) - else: - # return whatever mixed-in data type has - return sorted(set(dir(cls._member_type_)) | interesting) - - def __getitem__(cls, name): - """ - Return the member matching `name`. - """ - return cls._member_map_[name] - - def __iter__(cls): - """ - Return members in definition order. - """ - return (cls._member_map_[name] for name in cls._member_names_) - - def __len__(cls): - """ - Return the number of members (no aliases) - """ - return len(cls._member_names_) - - @bltns.property - def __members__(cls): - """ - Returns a mapping of member name->value. - - This mapping lists all enum members, including aliases. Note that this - is a read-only view of the internal mapping. - """ - return MappingProxyType(cls._member_map_) - - def __repr__(cls): - if Flag is not None and issubclass(cls, Flag): - return "" % cls.__name__ - else: - return "" % cls.__name__ - - def __reversed__(cls): - """ - Return members in reverse definition order. - """ - return (cls._member_map_[name] for name in reversed(cls._member_names_)) - - def __setattr__(cls, name, value): - """ - Block attempts to reassign Enum members. - - A simple assignment to the class namespace only changes one of the - several possible ways to get an Enum member from the Enum class, - resulting in an inconsistent Enumeration. - """ - member_map = cls.__dict__.get('_member_map_', {}) - if name in member_map: - raise AttributeError('cannot reassign member %r' % (name, )) - super().__setattr__(name, value) - - def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, start=1, boundary=None): - """ - Convenience method to create a new Enum class. - - `names` can be: - - * A string containing member names, separated either with spaces or - commas. Values are incremented by 1 from `start`. - * An iterable of member names. Values are incremented by 1 from `start`. - * An iterable of (member name, value) pairs. - * A mapping of member name -> value pairs. - """ - metacls = cls.__class__ - bases = (cls, ) if type is None else (type, cls) - _, first_enum = cls._get_mixins_(class_name, bases) - classdict = metacls.__prepare__(class_name, bases) - - # special processing needed for names? - if isinstance(names, str): - names = names.replace(',', ' ').split() - if isinstance(names, (tuple, list)) and names and isinstance(names[0], str): - original_names, names = names, [] - last_values = [] - for count, name in enumerate(original_names): - value = first_enum._generate_next_value_(name, start, count, last_values[:]) - last_values.append(value) - names.append((name, value)) - if names is None: - names = () - - # Here, names is either an iterable of (name, value) or a mapping. - for item in names: - if isinstance(item, str): - member_name, member_value = item, names[item] - else: - member_name, member_value = item - classdict[member_name] = member_value - - if module is None: - try: - module = sys._getframemodulename(2) - except AttributeError: - # Fall back on _getframe if _getframemodulename is missing - try: - module = sys._getframe(2).f_globals['__name__'] - except (AttributeError, ValueError, KeyError): - pass - if module is None: - _make_class_unpicklable(classdict) - else: - classdict['__module__'] = module - if qualname is not None: - classdict['__qualname__'] = qualname - - return metacls.__new__(metacls, class_name, bases, classdict, boundary=boundary) - - def _convert_(cls, name, module, filter, source=None, *, boundary=None, as_global=False): - """ - Create a new Enum subclass that replaces a collection of global constants - """ - # convert all constants from source (or module) that pass filter() to - # a new Enum called name, and export the enum and its members back to - # module; - # also, replace the __reduce_ex__ method so unpickling works in - # previous Python versions - module_globals = sys.modules[module].__dict__ - if source: - source = source.__dict__ - else: - source = module_globals - # _value2member_map_ is populated in the same order every time - # for a consistent reverse mapping of number to name when there - # are multiple names for the same number. - members = [ - (name, value) - for name, value in source.items() - if filter(name)] - try: - # sort by value - members.sort(key=lambda t: (t[1], t[0])) - except TypeError: - # unless some values aren't comparable, in which case sort by name - members.sort(key=lambda t: t[0]) - body = {t[0]: t[1] for t in members} - body['__module__'] = module - tmp_cls = type(name, (object, ), body) - cls = _simple_enum(etype=cls, boundary=boundary or KEEP)(tmp_cls) - if as_global: - global_enum(cls) - else: - sys.modules[cls.__module__].__dict__.update(cls.__members__) - module_globals[name] = cls - return cls - - @classmethod - def _check_for_existing_members_(mcls, class_name, bases): - for chain in bases: - for base in chain.__mro__: - if isinstance(base, EnumType) and base._member_names_: - raise TypeError( - " cannot extend %r" - % (class_name, base) - ) - - @classmethod - def _get_mixins_(mcls, class_name, bases): - """ - Returns the type for creating enum members, and the first inherited - enum class. - - bases: the tuple of bases that was given to __new__ - """ - if not bases: - return object, Enum - # ensure final parent class is an Enum derivative, find any concrete - # data type, and check that Enum has no members - first_enum = bases[-1] - if not isinstance(first_enum, EnumType): - raise TypeError("new enumerations should be created as " - "`EnumName([mixin_type, ...] [data_type,] enum_type)`") - member_type = mcls._find_data_type_(class_name, bases) or object - return member_type, first_enum - - @classmethod - def _find_data_repr_(mcls, class_name, bases): - for chain in bases: - for base in chain.__mro__: - if base is object: - continue - elif isinstance(base, EnumType): - # if we hit an Enum, use it's _value_repr_ - return base._value_repr_ - elif '__repr__' in base.__dict__: - # this is our data repr - # double-check if a dataclass with a default __repr__ - if ( - '__dataclass_fields__' in base.__dict__ - and '__dataclass_params__' in base.__dict__ - and base.__dict__['__dataclass_params__'].repr - ): - return _dataclass_repr - else: - return base.__dict__['__repr__'] - return None - - @classmethod - def _find_data_type_(mcls, class_name, bases): - # a datatype has a __new__ method, or a __dataclass_fields__ attribute - data_types = set() - base_chain = set() - for chain in bases: - candidate = None - for base in chain.__mro__: - base_chain.add(base) - if base is object: - continue - elif isinstance(base, EnumType): - if base._member_type_ is not object: - data_types.add(base._member_type_) - break - elif '__new__' in base.__dict__ or '__dataclass_fields__' in base.__dict__: - data_types.add(candidate or base) - break - else: - candidate = candidate or base - if len(data_types) > 1: - raise TypeError('too many data types for %r: %r' % (class_name, data_types)) - elif data_types: - return data_types.pop() - else: - return None - - @classmethod - def _find_new_(mcls, classdict, member_type, first_enum): - """ - Returns the __new__ to be used for creating the enum members. - - classdict: the class dictionary given to __new__ - member_type: the data type whose __new__ will be used by default - first_enum: enumeration to check for an overriding __new__ - """ - # now find the correct __new__, checking to see of one was defined - # by the user; also check earlier enum classes in case a __new__ was - # saved as __new_member__ - __new__ = classdict.get('__new__', None) - - # should __new__ be saved as __new_member__ later? - save_new = first_enum is not None and __new__ is not None - - if __new__ is None: - # check all possibles for __new_member__ before falling back to - # __new__ - for method in ('__new_member__', '__new__'): - for possible in (member_type, first_enum): - target = getattr(possible, method, None) - if target not in { - None, - None.__new__, - object.__new__, - Enum.__new__, - }: - __new__ = target - break - if __new__ is not None: - break - else: - __new__ = object.__new__ - - # if a non-object.__new__ is used then whatever value/tuple was - # assigned to the enum member name will be passed to __new__ and to the - # new enum member's __init__ - if first_enum is None or __new__ in (Enum.__new__, object.__new__): - use_args = False - else: - use_args = True - return __new__, save_new, use_args - - def _add_member_(cls, name, member): - # _value_ structures are not updated - if name in cls._member_map_: - if cls._member_map_[name] is not member: - raise NameError('%r is already bound: %r' % (name, cls._member_map_[name])) - return - # - # if necessary, get redirect in place and then add it to _member_map_ - found_descriptor = None - descriptor_type = None - class_type = None - for base in cls.__mro__[1:]: - attr = base.__dict__.get(name) - if attr is not None: - if isinstance(attr, (property, DynamicClassAttribute)): - found_descriptor = attr - class_type = base - descriptor_type = 'enum' - break - elif _is_descriptor(attr): - found_descriptor = attr - descriptor_type = descriptor_type or 'desc' - class_type = class_type or base - continue - else: - descriptor_type = 'attr' - class_type = base - if found_descriptor: - redirect = property() - redirect.member = member - redirect.__set_name__(cls, name) - if descriptor_type in ('enum', 'desc'): - # earlier descriptor found; copy fget, fset, fdel to this one. - redirect.fget = getattr(found_descriptor, 'fget', None) - redirect._get = getattr(found_descriptor, '__get__', None) - redirect.fset = getattr(found_descriptor, 'fset', None) - redirect._set = getattr(found_descriptor, '__set__', None) - redirect.fdel = getattr(found_descriptor, 'fdel', None) - redirect._del = getattr(found_descriptor, '__delete__', None) - redirect._attr_type = descriptor_type - redirect._cls_type = class_type - setattr(cls, name, redirect) - else: - setattr(cls, name, member) - # now add to _member_map_ (even aliases) - cls._member_map_[name] = member - -EnumMeta = EnumType # keep EnumMeta name for backwards compatibility - - -class Enum(metaclass=EnumType): - """ - Create a collection of name/value pairs. - - Example enumeration: - - >>> class Color(Enum): - ... RED = 1 - ... BLUE = 2 - ... GREEN = 3 - - Access them by: - - - attribute access: - - >>> Color.RED - - - - value lookup: - - >>> Color(1) - - - - name lookup: - - >>> Color['RED'] - - - Enumerations can be iterated over, and know how many members they have: - - >>> len(Color) - 3 - - >>> list(Color) - [, , ] - - Methods can be added to enumerations, and members can have their own - attributes -- see the documentation for details. - """ - - @classmethod - def __signature__(cls): - if cls._member_names_: - return '(*values)' - else: - return '(new_class_name, /, names, *, module=None, qualname=None, type=None, start=1, boundary=None)' - - def __new__(cls, value): - # all enum instances are actually created during class construction - # without calling this method; this method is called by the metaclass' - # __call__ (i.e. Color(3) ), and by pickle - if type(value) is cls: - # For lookups like Color(Color.RED) - return value - # by-value search for a matching enum member - # see if it's in the reverse mapping (for hashable values) - try: - return cls._value2member_map_[value] - except KeyError: - # Not found, no need to do long O(n) search - pass - except TypeError: - # not there, now do long search -- O(n) behavior - for name, unhashable_values in cls._unhashable_values_map_.items(): - if value in unhashable_values: - return cls[name] - for name, member in cls._member_map_.items(): - if value == member._value_: - return cls[name] - # still not found -- verify that members exist, in-case somebody got here mistakenly - # (such as via super when trying to override __new__) - if not cls._member_map_: - if getattr(cls, '_%s__in_progress' % cls.__name__, False): - raise TypeError('do not use `super().__new__; call the appropriate __new__ directly') from None - raise TypeError("%r has no members defined" % cls) - # - # still not found -- try _missing_ hook - try: - exc = None - result = cls._missing_(value) - except Exception as e: - exc = e - result = None - try: - if isinstance(result, cls): - return result - elif ( - Flag is not None and issubclass(cls, Flag) - and cls._boundary_ is EJECT and isinstance(result, int) - ): - return result - else: - ve_exc = ValueError("%r is not a valid %s" % (value, cls.__qualname__)) - if result is None and exc is None: - raise ve_exc - elif exc is None: - exc = TypeError( - 'error in %s._missing_: returned %r instead of None or a valid member' - % (cls.__name__, result) - ) - if not isinstance(exc, ValueError): - exc.__context__ = ve_exc - raise exc - finally: - # ensure all variables that could hold an exception are destroyed - exc = None - ve_exc = None - - def __init__(self, *args, **kwds): - pass - - def _add_alias_(self, name): - self.__class__._add_member_(name, self) - - def _add_value_alias_(self, value): - cls = self.__class__ - try: - if value in cls._value2member_map_: - if cls._value2member_map_[value] is not self: - raise ValueError('%r is already bound: %r' % (value, cls._value2member_map_[value])) - return - except TypeError: - # unhashable value, do long search - for m in cls._member_map_.values(): - if m._value_ == value: - if m is not self: - raise ValueError('%r is already bound: %r' % (value, cls._value2member_map_[value])) - return - try: - # This may fail if value is not hashable. We can't add the value - # to the map, and by-value lookups for this value will be - # linear. - cls._value2member_map_.setdefault(value, self) - cls._hashable_values_.append(value) - except TypeError: - # keep track of the value in a list so containment checks are quick - cls._unhashable_values_.append(value) - cls._unhashable_values_map_.setdefault(self.name, []).append(value) - - @staticmethod - def _generate_next_value_(name, start, count, last_values): - """ - Generate the next value when not given. - - name: the name of the member - start: the initial start value or None - count: the number of existing members - last_values: the list of values assigned - """ - if not last_values: - return start - try: - last_value = sorted(last_values).pop() - except TypeError: - raise TypeError('unable to sort non-numeric values') from None - try: - return last_value + 1 - except TypeError: - raise TypeError('unable to increment %r' % (last_value, )) from None - - @classmethod - def _missing_(cls, value): - return None - - def __repr__(self): - v_repr = self.__class__._value_repr_ or repr - return "<%s.%s: %s>" % (self.__class__.__name__, self._name_, v_repr(self._value_)) - - def __str__(self): - return "%s.%s" % (self.__class__.__name__, self._name_, ) - - def __dir__(self): - """ - Returns public methods and other interesting attributes. - """ - interesting = set() - if self.__class__._member_type_ is not object: - interesting = set(object.__dir__(self)) - for name in getattr(self, '__dict__', []): - if name[0] != '_' and name not in self._member_map_: - interesting.add(name) - for cls in self.__class__.mro(): - for name, obj in cls.__dict__.items(): - if name[0] == '_': - continue - if isinstance(obj, property): - # that's an enum.property - if obj.fget is not None or name not in self._member_map_: - interesting.add(name) - else: - # in case it was added by `dir(self)` - interesting.discard(name) - elif name not in self._member_map_: - interesting.add(name) - names = sorted( - set(['__class__', '__doc__', '__eq__', '__hash__', '__module__']) - | interesting - ) - return names - - def __format__(self, format_spec): - return str.__format__(str(self), format_spec) - - def __hash__(self): - return hash(self._name_) - - def __reduce_ex__(self, proto): - return self.__class__, (self._value_, ) - - def __deepcopy__(self,memo): - return self - - def __copy__(self): - return self - - # enum.property is used to provide access to the `name` and - # `value` attributes of enum members while keeping some measure of - # protection from modification, while still allowing for an enumeration - # to have members named `name` and `value`. This works because each - # instance of enum.property saves its companion member, which it returns - # on class lookup; on instance lookup it either executes a provided function - # or raises an AttributeError. - - @property - def name(self): - """The name of the Enum member.""" - return self._name_ - - @property - def value(self): - """The value of the Enum member.""" - return self._value_ - - -class ReprEnum(Enum): - """ - Only changes the repr(), leaving str() and format() to the mixed-in type. - """ - - -class IntEnum(int, ReprEnum): - """ - Enum where members are also (and must be) ints - """ - - -class StrEnum(str, ReprEnum): - """ - Enum where members are also (and must be) strings - """ - - def __new__(cls, *values): - "values must already be of type `str`" - if len(values) > 3: - raise TypeError('too many arguments for str(): %r' % (values, )) - if len(values) == 1: - # it must be a string - if not isinstance(values[0], str): - raise TypeError('%r is not a string' % (values[0], )) - if len(values) >= 2: - # check that encoding argument is a string - if not isinstance(values[1], str): - raise TypeError('encoding must be a string, not %r' % (values[1], )) - if len(values) == 3: - # check that errors argument is a string - if not isinstance(values[2], str): - raise TypeError('errors must be a string, not %r' % (values[2])) - value = str(*values) - member = str.__new__(cls, value) - member._value_ = value - return member - - @staticmethod - def _generate_next_value_(name, start, count, last_values): - """ - Return the lower-cased version of the member name. - """ - return name.lower() - - -def pickle_by_global_name(self, proto): - # should not be used with Flag-type enums - return self.name -_reduce_ex_by_global_name = pickle_by_global_name - -def pickle_by_enum_name(self, proto): - # should not be used with Flag-type enums - return getattr, (self.__class__, self._name_) - -class FlagBoundary(StrEnum): - """ - control how out of range values are handled - "strict" -> error is raised [default for Flag] - "conform" -> extra bits are discarded - "eject" -> lose flag status - "keep" -> keep flag status and all bits [default for IntFlag] - """ - STRICT = auto() - CONFORM = auto() - EJECT = auto() - KEEP = auto() -STRICT, CONFORM, EJECT, KEEP = FlagBoundary - - -class Flag(Enum, boundary=STRICT): - """ - Support for flags - """ - - _numeric_repr_ = repr - - @staticmethod - def _generate_next_value_(name, start, count, last_values): - """ - Generate the next value when not given. - - name: the name of the member - start: the initial start value or None - count: the number of existing members - last_values: the last value assigned or None - """ - if not count: - return start if start is not None else 1 - last_value = max(last_values) - try: - high_bit = _high_bit(last_value) - except Exception: - raise TypeError('invalid flag value %r' % last_value) from None - return 2 ** (high_bit+1) - - @classmethod - def _iter_member_by_value_(cls, value): - """ - Extract all members from the value in definition (i.e. increasing value) order. - """ - for val in _iter_bits_lsb(value & cls._flag_mask_): - yield cls._value2member_map_.get(val) - - _iter_member_ = _iter_member_by_value_ - - @classmethod - def _iter_member_by_def_(cls, value): - """ - Extract all members from the value in definition order. - """ - yield from sorted( - cls._iter_member_by_value_(value), - key=lambda m: m._sort_order_, - ) - - @classmethod - def _missing_(cls, value): - """ - Create a composite member containing all canonical members present in `value`. - - If non-member values are present, result depends on `_boundary_` setting. - """ - if not isinstance(value, int): - raise ValueError( - "%r is not a valid %s" % (value, cls.__qualname__) - ) - # check boundaries - # - value must be in range (e.g. -16 <-> +15, i.e. ~15 <-> 15) - # - value must not include any skipped flags (e.g. if bit 2 is not - # defined, then 0d10 is invalid) - flag_mask = cls._flag_mask_ - singles_mask = cls._singles_mask_ - all_bits = cls._all_bits_ - neg_value = None - if ( - not ~all_bits <= value <= all_bits - or value & (all_bits ^ flag_mask) - ): - if cls._boundary_ is STRICT: - max_bits = max(value.bit_length(), flag_mask.bit_length()) - raise ValueError( - "%r invalid value %r\n given %s\n allowed %s" % ( - cls, value, bin(value, max_bits), bin(flag_mask, max_bits), - )) - elif cls._boundary_ is CONFORM: - value = value & flag_mask - elif cls._boundary_ is EJECT: - return value - elif cls._boundary_ is KEEP: - if value < 0: - value = ( - max(all_bits+1, 2**(value.bit_length())) - + value - ) - else: - raise ValueError( - '%r unknown flag boundary %r' % (cls, cls._boundary_, ) - ) - if value < 0: - neg_value = value - value = all_bits + 1 + value - # get members and unknown - unknown = value & ~flag_mask - aliases = value & ~singles_mask - member_value = value & singles_mask - if unknown and cls._boundary_ is not KEEP: - raise ValueError( - '%s(%r) --> unknown values %r [%s]' - % (cls.__name__, value, unknown, bin(unknown)) - ) - # normal Flag? - if cls._member_type_ is object: - # construct a singleton enum pseudo-member - pseudo_member = object.__new__(cls) - else: - pseudo_member = cls._member_type_.__new__(cls, value) - if not hasattr(pseudo_member, '_value_'): - pseudo_member._value_ = value - if member_value or aliases: - members = [] - combined_value = 0 - for m in cls._iter_member_(member_value): - members.append(m) - combined_value |= m._value_ - if aliases: - value = member_value | aliases - for n, pm in cls._member_map_.items(): - if pm not in members and pm._value_ and pm._value_ & value == pm._value_: - members.append(pm) - combined_value |= pm._value_ - unknown = value ^ combined_value - pseudo_member._name_ = '|'.join([m._name_ for m in members]) - if not combined_value: - pseudo_member._name_ = None - elif unknown and cls._boundary_ is STRICT: - raise ValueError('%r: no members with value %r' % (cls, unknown)) - elif unknown: - pseudo_member._name_ += '|%s' % cls._numeric_repr_(unknown) - else: - pseudo_member._name_ = None - # use setdefault in case another thread already created a composite - # with this value - # note: zero is a special case -- always add it - pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) - if neg_value is not None: - cls._value2member_map_[neg_value] = pseudo_member - return pseudo_member - - def __contains__(self, other): - """ - Returns True if self has at least the same flags set as other. - """ - if not isinstance(other, self.__class__): - raise TypeError( - "unsupported operand type(s) for 'in': %r and %r" % ( - type(other).__qualname__, self.__class__.__qualname__)) - return other._value_ & self._value_ == other._value_ - - def __iter__(self): - """ - Returns flags in definition order. - """ - yield from self._iter_member_(self._value_) - - def __len__(self): - return self._value_.bit_count() - - def __repr__(self): - cls_name = self.__class__.__name__ - v_repr = self.__class__._value_repr_ or repr - if self._name_ is None: - return "<%s: %s>" % (cls_name, v_repr(self._value_)) - else: - return "<%s.%s: %s>" % (cls_name, self._name_, v_repr(self._value_)) - - def __str__(self): - cls_name = self.__class__.__name__ - if self._name_ is None: - return '%s(%r)' % (cls_name, self._value_) - else: - return "%s.%s" % (cls_name, self._name_) - - def __bool__(self): - return bool(self._value_) - - def _get_value(self, flag): - if isinstance(flag, self.__class__): - return flag._value_ - elif self._member_type_ is not object and isinstance(flag, self._member_type_): - return flag - return NotImplemented - - def __or__(self, other): - other_value = self._get_value(other) - if other_value is NotImplemented: - return NotImplemented - - for flag in self, other: - if self._get_value(flag) is None: - raise TypeError(f"'{flag}' cannot be combined with other flags with |") - value = self._value_ - return self.__class__(value | other_value) - - def __and__(self, other): - other_value = self._get_value(other) - if other_value is NotImplemented: - return NotImplemented - - for flag in self, other: - if self._get_value(flag) is None: - raise TypeError(f"'{flag}' cannot be combined with other flags with &") - value = self._value_ - return self.__class__(value & other_value) - - def __xor__(self, other): - other_value = self._get_value(other) - if other_value is NotImplemented: - return NotImplemented - - for flag in self, other: - if self._get_value(flag) is None: - raise TypeError(f"'{flag}' cannot be combined with other flags with ^") - value = self._value_ - return self.__class__(value ^ other_value) - - def __invert__(self): - if self._get_value(self) is None: - raise TypeError(f"'{self}' cannot be inverted") - - if self._inverted_ is None: - if self._boundary_ in (EJECT, KEEP): - self._inverted_ = self.__class__(~self._value_) - else: - self._inverted_ = self.__class__(self._singles_mask_ & ~self._value_) - return self._inverted_ - - __rand__ = __and__ - __ror__ = __or__ - __rxor__ = __xor__ - - -class IntFlag(int, ReprEnum, Flag, boundary=KEEP): - """ - Support for integer-based Flags - """ - - -def _high_bit(value): - """ - returns index of highest bit, or -1 if value is zero or negative - """ - return value.bit_length() - 1 - -def unique(enumeration): - """ - Class decorator for enumerations ensuring unique member values. - """ - duplicates = [] - for name, member in enumeration.__members__.items(): - if name != member.name: - duplicates.append((name, member.name)) - if duplicates: - alias_details = ', '.join( - ["%s -> %s" % (alias, name) for (alias, name) in duplicates]) - raise ValueError('duplicate values found in %r: %s' % - (enumeration, alias_details)) - return enumeration - -def _dataclass_repr(self): - dcf = self.__dataclass_fields__ - return ', '.join( - '%s=%r' % (k, getattr(self, k)) - for k in dcf.keys() - if dcf[k].repr - ) - -def global_enum_repr(self): - """ - use module.enum_name instead of class.enum_name - - the module is the last module in case of a multi-module name - """ - module = self.__class__.__module__.split('.')[-1] - return '%s.%s' % (module, self._name_) - -def global_flag_repr(self): - """ - use module.flag_name instead of class.flag_name - - the module is the last module in case of a multi-module name - """ - module = self.__class__.__module__.split('.')[-1] - cls_name = self.__class__.__name__ - if self._name_ is None: - return "%s.%s(%r)" % (module, cls_name, self._value_) - if _is_single_bit(self._value_): - return '%s.%s' % (module, self._name_) - if self._boundary_ is not FlagBoundary.KEEP: - return '|'.join(['%s.%s' % (module, name) for name in self.name.split('|')]) - else: - name = [] - for n in self._name_.split('|'): - if n[0].isdigit(): - name.append(n) - else: - name.append('%s.%s' % (module, n)) - return '|'.join(name) - -def global_str(self): - """ - use enum_name instead of class.enum_name - """ - if self._name_ is None: - cls_name = self.__class__.__name__ - return "%s(%r)" % (cls_name, self._value_) - else: - return self._name_ - -def global_enum(cls, update_str=False): - """ - decorator that makes the repr() of an enum member reference its module - instead of its class; also exports all members to the enum's module's - global namespace - """ - if issubclass(cls, Flag): - cls.__repr__ = global_flag_repr - else: - cls.__repr__ = global_enum_repr - if not issubclass(cls, ReprEnum) or update_str: - cls.__str__ = global_str - sys.modules[cls.__module__].__dict__.update(cls.__members__) - return cls - -def _simple_enum(etype=Enum, *, boundary=None, use_args=None): - """ - Class decorator that converts a normal class into an :class:`Enum`. No - safety checks are done, and some advanced behavior (such as - :func:`__init_subclass__`) is not available. Enum creation can be faster - using :func:`_simple_enum`. - - >>> from enum import Enum, _simple_enum - >>> @_simple_enum(Enum) - ... class Color: - ... RED = auto() - ... GREEN = auto() - ... BLUE = auto() - >>> Color - - """ - def convert_class(cls): - nonlocal use_args - cls_name = cls.__name__ - if use_args is None: - use_args = etype._use_args_ - __new__ = cls.__dict__.get('__new__') - if __new__ is not None: - new_member = __new__.__func__ - else: - new_member = etype._member_type_.__new__ - attrs = {} - body = {} - if __new__ is not None: - body['__new_member__'] = new_member - body['_new_member_'] = new_member - body['_use_args_'] = use_args - body['_generate_next_value_'] = gnv = etype._generate_next_value_ - body['_member_names_'] = member_names = [] - body['_member_map_'] = member_map = {} - body['_value2member_map_'] = value2member_map = {} - body['_hashable_values_'] = hashable_values = [] - body['_unhashable_values_'] = unhashable_values = [] - body['_unhashable_values_map_'] = {} - body['_member_type_'] = member_type = etype._member_type_ - body['_value_repr_'] = etype._value_repr_ - if issubclass(etype, Flag): - body['_boundary_'] = boundary or etype._boundary_ - body['_flag_mask_'] = None - body['_all_bits_'] = None - body['_singles_mask_'] = None - body['_inverted_'] = None - body['__or__'] = Flag.__or__ - body['__xor__'] = Flag.__xor__ - body['__and__'] = Flag.__and__ - body['__ror__'] = Flag.__ror__ - body['__rxor__'] = Flag.__rxor__ - body['__rand__'] = Flag.__rand__ - body['__invert__'] = Flag.__invert__ - for name, obj in cls.__dict__.items(): - if name in ('__dict__', '__weakref__'): - continue - if _is_dunder(name) or _is_private(cls_name, name) or _is_sunder(name) or _is_descriptor(obj): - body[name] = obj - else: - attrs[name] = obj - if cls.__dict__.get('__doc__') is None: - body['__doc__'] = 'An enumeration.' - # - # double check that repr and friends are not the mixin's or various - # things break (such as pickle) - # however, if the method is defined in the Enum itself, don't replace - # it - enum_class = type(cls_name, (etype, ), body, boundary=boundary, _simple=True) - for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'): - if name not in body: - # check for mixin overrides before replacing - enum_method = getattr(etype, name) - found_method = getattr(enum_class, name) - object_method = getattr(object, name) - data_type_method = getattr(member_type, name) - if found_method in (data_type_method, object_method): - setattr(enum_class, name, enum_method) - gnv_last_values = [] - if issubclass(enum_class, Flag): - # Flag / IntFlag - single_bits = multi_bits = 0 - for name, value in attrs.items(): - if isinstance(value, auto) and auto.value is _auto_null: - value = gnv(name, 1, len(member_names), gnv_last_values) - # create basic member (possibly isolate value for alias check) - if use_args: - if not isinstance(value, tuple): - value = (value, ) - member = new_member(enum_class, *value) - value = value[0] - else: - member = new_member(enum_class) - if __new__ is None: - member._value_ = value - # now check if alias - try: - contained = value2member_map.get(member._value_) - except TypeError: - contained = None - if member._value_ in unhashable_values or member.value in hashable_values: - for m in enum_class: - if m._value_ == member._value_: - contained = m - break - if contained is not None: - # an alias to an existing member - contained._add_alias_(name) - else: - # finish creating member - member._name_ = name - member.__objclass__ = enum_class - member.__init__(value) - member._sort_order_ = len(member_names) - if name not in ('name', 'value'): - setattr(enum_class, name, member) - member_map[name] = member - else: - enum_class._add_member_(name, member) - value2member_map[value] = member - hashable_values.append(value) - if _is_single_bit(value): - # not a multi-bit alias, record in _member_names_ and _flag_mask_ - member_names.append(name) - single_bits |= value - else: - multi_bits |= value - gnv_last_values.append(value) - enum_class._flag_mask_ = single_bits | multi_bits - enum_class._singles_mask_ = single_bits - enum_class._all_bits_ = 2 ** ((single_bits|multi_bits).bit_length()) - 1 - # set correct __iter__ - member_list = [m._value_ for m in enum_class] - if member_list != sorted(member_list): - enum_class._iter_member_ = enum_class._iter_member_by_def_ - else: - # Enum / IntEnum / StrEnum - for name, value in attrs.items(): - if isinstance(value, auto): - if value.value is _auto_null: - value.value = gnv(name, 1, len(member_names), gnv_last_values) - value = value.value - # create basic member (possibly isolate value for alias check) - if use_args: - if not isinstance(value, tuple): - value = (value, ) - member = new_member(enum_class, *value) - value = value[0] - else: - member = new_member(enum_class) - if __new__ is None: - member._value_ = value - # now check if alias - try: - contained = value2member_map.get(member._value_) - except TypeError: - contained = None - if member._value_ in unhashable_values or member._value_ in hashable_values: - for m in enum_class: - if m._value_ == member._value_: - contained = m - break - if contained is not None: - # an alias to an existing member - contained._add_alias_(name) - else: - # finish creating member - member._name_ = name - member.__objclass__ = enum_class - member.__init__(value) - member._sort_order_ = len(member_names) - if name not in ('name', 'value'): - setattr(enum_class, name, member) - member_map[name] = member - else: - enum_class._add_member_(name, member) - member_names.append(name) - gnv_last_values.append(value) - try: - # This may fail if value is not hashable. We can't add the value - # to the map, and by-value lookups for this value will be - # linear. - enum_class._value2member_map_.setdefault(value, member) - if value not in hashable_values: - hashable_values.append(value) - except TypeError: - # keep track of the value in a list so containment checks are quick - enum_class._unhashable_values_.append(value) - enum_class._unhashable_values_map_.setdefault(name, []).append(value) - if '__new__' in body: - enum_class.__new_member__ = enum_class.__new__ - enum_class.__new__ = Enum.__new__ - return enum_class - return convert_class - -@_simple_enum(StrEnum) -class EnumCheck: - """ - various conditions to check an enumeration for - """ - CONTINUOUS = "no skipped integer values" - NAMED_FLAGS = "multi-flag aliases may not contain unnamed flags" - UNIQUE = "one name per value" -CONTINUOUS, NAMED_FLAGS, UNIQUE = EnumCheck - - -class verify: - """ - Check an enumeration for various constraints. (see EnumCheck) - """ - def __init__(self, *checks): - self.checks = checks - def __call__(self, enumeration): - checks = self.checks - cls_name = enumeration.__name__ - if Flag is not None and issubclass(enumeration, Flag): - enum_type = 'flag' - elif issubclass(enumeration, Enum): - enum_type = 'enum' - else: - raise TypeError("the 'verify' decorator only works with Enum and Flag") - for check in checks: - if check is UNIQUE: - # check for duplicate names - duplicates = [] - for name, member in enumeration.__members__.items(): - if name != member.name: - duplicates.append((name, member.name)) - if duplicates: - alias_details = ', '.join( - ["%s -> %s" % (alias, name) for (alias, name) in duplicates]) - raise ValueError('aliases found in %r: %s' % - (enumeration, alias_details)) - elif check is CONTINUOUS: - values = set(e.value for e in enumeration) - if len(values) < 2: - continue - low, high = min(values), max(values) - missing = [] - if enum_type == 'flag': - # check for powers of two - for i in range(_high_bit(low)+1, _high_bit(high)): - if 2**i not in values: - missing.append(2**i) - elif enum_type == 'enum': - # check for missing consecutive integers - for i in range(low+1, high): - if i not in values: - missing.append(i) - else: - raise Exception('verify: unknown type %r' % enum_type) - if missing: - raise ValueError(('invalid %s %r: missing values %s' % ( - enum_type, cls_name, ', '.join((str(m) for m in missing))) - )[:256]) - # limit max length to protect against DOS attacks - elif check is NAMED_FLAGS: - # examine each alias and check for unnamed flags - member_names = enumeration._member_names_ - member_values = [m.value for m in enumeration] - missing_names = [] - missing_value = 0 - for name, alias in enumeration._member_map_.items(): - if name in member_names: - # not an alias - continue - if alias.value < 0: - # negative numbers are not checked - continue - values = list(_iter_bits_lsb(alias.value)) - missed = [v for v in values if v not in member_values] - if missed: - missing_names.append(name) - for val in missed: - missing_value |= val - if missing_names: - if len(missing_names) == 1: - alias = 'alias %s is missing' % missing_names[0] - else: - alias = 'aliases %s and %s are missing' % ( - ', '.join(missing_names[:-1]), missing_names[-1] - ) - if _is_single_bit(missing_value): - value = 'value 0x%x' % missing_value - else: - value = 'combined values of 0x%x' % missing_value - raise ValueError( - 'invalid Flag %r: %s %s [use enum.show_flag_values(value) for details]' - % (cls_name, alias, value) - ) - return enumeration - -def _test_simple_enum(checked_enum, simple_enum): - """ - A function that can be used to test an enum created with :func:`_simple_enum` - against the version created by subclassing :class:`Enum`:: - - >>> from enum import Enum, _simple_enum, _test_simple_enum - >>> @_simple_enum(Enum) - ... class Color: - ... RED = auto() - ... GREEN = auto() - ... BLUE = auto() - >>> class CheckedColor(Enum): - ... RED = auto() - ... GREEN = auto() - ... BLUE = auto() - >>> _test_simple_enum(CheckedColor, Color) - - If differences are found, a :exc:`TypeError` is raised. - """ - failed = [] - if checked_enum.__dict__ != simple_enum.__dict__: - checked_dict = checked_enum.__dict__ - checked_keys = list(checked_dict.keys()) - simple_dict = simple_enum.__dict__ - simple_keys = list(simple_dict.keys()) - member_names = set( - list(checked_enum._member_map_.keys()) - + list(simple_enum._member_map_.keys()) - ) - for key in set(checked_keys + simple_keys): - if key in ('__module__', '_member_map_', '_value2member_map_', '__doc__', - '__static_attributes__', '__firstlineno__'): - # keys known to be different, or very long - continue - elif key in member_names: - # members are checked below - continue - elif key not in simple_keys: - failed.append("missing key: %r" % (key, )) - elif key not in checked_keys: - failed.append("extra key: %r" % (key, )) - else: - checked_value = checked_dict[key] - simple_value = simple_dict[key] - if callable(checked_value) or isinstance(checked_value, bltns.property): - continue - if key == '__doc__': - # remove all spaces/tabs - compressed_checked_value = checked_value.replace(' ','').replace('\t','') - compressed_simple_value = simple_value.replace(' ','').replace('\t','') - if compressed_checked_value != compressed_simple_value: - failed.append("%r:\n %s\n %s" % ( - key, - "checked -> %r" % (checked_value, ), - "simple -> %r" % (simple_value, ), - )) - elif checked_value != simple_value: - failed.append("%r:\n %s\n %s" % ( - key, - "checked -> %r" % (checked_value, ), - "simple -> %r" % (simple_value, ), - )) - failed.sort() - for name in member_names: - failed_member = [] - if name not in simple_keys: - failed.append('missing member from simple enum: %r' % name) - elif name not in checked_keys: - failed.append('extra member in simple enum: %r' % name) - else: - checked_member_dict = checked_enum[name].__dict__ - checked_member_keys = list(checked_member_dict.keys()) - simple_member_dict = simple_enum[name].__dict__ - simple_member_keys = list(simple_member_dict.keys()) - for key in set(checked_member_keys + simple_member_keys): - if key in ('__module__', '__objclass__', '_inverted_'): - # keys known to be different or absent - continue - elif key not in simple_member_keys: - failed_member.append("missing key %r not in the simple enum member %r" % (key, name)) - elif key not in checked_member_keys: - failed_member.append("extra key %r in simple enum member %r" % (key, name)) - else: - checked_value = checked_member_dict[key] - simple_value = simple_member_dict[key] - if checked_value != simple_value: - failed_member.append("%r:\n %s\n %s" % ( - key, - "checked member -> %r" % (checked_value, ), - "simple member -> %r" % (simple_value, ), - )) - if failed_member: - failed.append('%r member mismatch:\n %s' % ( - name, '\n '.join(failed_member), - )) - for method in ( - '__str__', '__repr__', '__reduce_ex__', '__format__', - '__getnewargs_ex__', '__getnewargs__', '__reduce_ex__', '__reduce__' - ): - if method in simple_keys and method in checked_keys: - # cannot compare functions, and it exists in both, so we're good - continue - elif method not in simple_keys and method not in checked_keys: - # method is inherited -- check it out - checked_method = getattr(checked_enum, method, None) - simple_method = getattr(simple_enum, method, None) - if hasattr(checked_method, '__func__'): - checked_method = checked_method.__func__ - simple_method = simple_method.__func__ - if checked_method != simple_method: - failed.append("%r: %-30s %s" % ( - method, - "checked -> %r" % (checked_method, ), - "simple -> %r" % (simple_method, ), - )) - else: - # if the method existed in only one of the enums, it will have been caught - # in the first checks above - pass - if failed: - raise TypeError('enum mismatch:\n %s' % '\n '.join(failed)) - -def _old_convert_(etype, name, module, filter, source=None, *, boundary=None): - """ - Create a new Enum subclass that replaces a collection of global constants - """ - # convert all constants from source (or module) that pass filter() to - # a new Enum called name, and export the enum and its members back to - # module; - # also, replace the __reduce_ex__ method so unpickling works in - # previous Python versions - module_globals = sys.modules[module].__dict__ - if source: - source = source.__dict__ - else: - source = module_globals - # _value2member_map_ is populated in the same order every time - # for a consistent reverse mapping of number to name when there - # are multiple names for the same number. - members = [ - (name, value) - for name, value in source.items() - if filter(name)] - try: - # sort by value - members.sort(key=lambda t: (t[1], t[0])) - except TypeError: - # unless some values aren't comparable, in which case sort by name - members.sort(key=lambda t: t[0]) - cls = etype(name, members, module=module, boundary=boundary or KEEP) - return cls - -_stdlib_enums = IntEnum, StrEnum, IntFlag diff --git a/Python313_13_x64_Template/Lib/fnmatch.py b/Python313_13_x64_Template/Lib/fnmatch.py deleted file mode 100644 index 73acb1fe..00000000 --- a/Python313_13_x64_Template/Lib/fnmatch.py +++ /dev/null @@ -1,192 +0,0 @@ -"""Filename matching with shell patterns. - -fnmatch(FILENAME, PATTERN) matches according to the local convention. -fnmatchcase(FILENAME, PATTERN) always takes case in account. - -The functions operate by translating the pattern into a regular -expression. They cache the compiled regular expressions for speed. - -The function translate(PATTERN) returns a regular expression -corresponding to PATTERN. (It does not compile it.) -""" -import os -import posixpath -import re -import functools - -__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"] - -def fnmatch(name, pat): - """Test whether FILENAME matches PATTERN. - - Patterns are Unix shell style: - - * matches everything - ? matches any single character - [seq] matches any character in seq - [!seq] matches any char not in seq - - An initial period in FILENAME is not special. - Both FILENAME and PATTERN are first case-normalized - if the operating system requires it. - If you don't want this, use fnmatchcase(FILENAME, PATTERN). - """ - name = os.path.normcase(name) - pat = os.path.normcase(pat) - return fnmatchcase(name, pat) - -@functools.lru_cache(maxsize=32768, typed=True) -def _compile_pattern(pat): - if isinstance(pat, bytes): - pat_str = str(pat, 'ISO-8859-1') - res_str = translate(pat_str) - res = bytes(res_str, 'ISO-8859-1') - else: - res = translate(pat) - return re.compile(res).match - -def filter(names, pat): - """Construct a list from those elements of the iterable NAMES that match PAT.""" - result = [] - pat = os.path.normcase(pat) - match = _compile_pattern(pat) - if os.path is posixpath: - # normcase on posix is NOP. Optimize it away from the loop. - for name in names: - if match(name): - result.append(name) - else: - for name in names: - if match(os.path.normcase(name)): - result.append(name) - return result - -def fnmatchcase(name, pat): - """Test whether FILENAME matches PATTERN, including case. - - This is a version of fnmatch() which doesn't case-normalize - its arguments. - """ - match = _compile_pattern(pat) - return match(name) is not None - - -def translate(pat): - """Translate a shell PATTERN to a regular expression. - - There is no way to quote meta-characters. - """ - - STAR = object() - parts = _translate(pat, STAR, '.') - return _join_translated_parts(parts, STAR) - - -def _translate(pat, STAR, QUESTION_MARK): - res = [] - add = res.append - i, n = 0, len(pat) - while i < n: - c = pat[i] - i = i+1 - if c == '*': - # compress consecutive `*` into one - if (not res) or res[-1] is not STAR: - add(STAR) - elif c == '?': - add(QUESTION_MARK) - elif c == '[': - j = i - if j < n and pat[j] == '!': - j = j+1 - if j < n and pat[j] == ']': - j = j+1 - while j < n and pat[j] != ']': - j = j+1 - if j >= n: - add('\\[') - else: - stuff = pat[i:j] - if '-' not in stuff: - stuff = stuff.replace('\\', r'\\') - else: - chunks = [] - k = i+2 if pat[i] == '!' else i+1 - while True: - k = pat.find('-', k, j) - if k < 0: - break - chunks.append(pat[i:k]) - i = k+1 - k = k+3 - chunk = pat[i:j] - if chunk: - chunks.append(chunk) - else: - chunks[-1] += '-' - # Remove empty ranges -- invalid in RE. - for k in range(len(chunks)-1, 0, -1): - if chunks[k-1][-1] > chunks[k][0]: - chunks[k-1] = chunks[k-1][:-1] + chunks[k][1:] - del chunks[k] - # Escape backslashes and hyphens for set difference (--). - # Hyphens that create ranges shouldn't be escaped. - stuff = '-'.join(s.replace('\\', r'\\').replace('-', r'\-') - for s in chunks) - # Escape set operations (&&, ~~ and ||). - stuff = re.sub(r'([&~|])', r'\\\1', stuff) - i = j+1 - if not stuff: - # Empty range: never match. - add('(?!)') - elif stuff == '!': - # Negated empty range: match any character. - add('.') - else: - if stuff[0] == '!': - stuff = '^' + stuff[1:] - elif stuff[0] in ('^', '['): - stuff = '\\' + stuff - add(f'[{stuff}]') - else: - add(re.escape(c)) - assert i == n - return res - - -def _join_translated_parts(inp, STAR): - # Deal with STARs. - res = [] - add = res.append - i, n = 0, len(inp) - # Fixed pieces at the start? - while i < n and inp[i] is not STAR: - add(inp[i]) - i += 1 - # Now deal with STAR fixed STAR fixed ... - # For an interior `STAR fixed` pairing, we want to do a minimal - # .*? match followed by `fixed`, with no possibility of backtracking. - # Atomic groups ("(?>...)") allow us to spell that directly. - # Note: people rely on the undocumented ability to join multiple - # translate() results together via "|" to build large regexps matching - # "one of many" shell patterns. - while i < n: - assert inp[i] is STAR - i += 1 - if i == n: - add(".*") - break - assert inp[i] is not STAR - fixed = [] - while i < n and inp[i] is not STAR: - fixed.append(inp[i]) - i += 1 - fixed = "".join(fixed) - if i == n: - add(".*") - add(fixed) - else: - add(f"(?>.*?{fixed})") - assert i == n - res = "".join(res) - return fr'(?s:{res})\Z' diff --git a/Python313_13_x64_Template/Lib/fractions.py b/Python313_13_x64_Template/Lib/fractions.py deleted file mode 100644 index 9d42e809..00000000 --- a/Python313_13_x64_Template/Lib/fractions.py +++ /dev/null @@ -1,1043 +0,0 @@ -# Originally contributed by Sjoerd Mullender. -# Significantly modified by Jeffrey Yasskin . - -"""Fraction, infinite-precision, rational numbers.""" - -from decimal import Decimal -import functools -import math -import numbers -import operator -import re -import sys - -__all__ = ['Fraction'] - - -# Constants related to the hash implementation; hash(x) is based -# on the reduction of x modulo the prime _PyHASH_MODULUS. -_PyHASH_MODULUS = sys.hash_info.modulus -# Value to be used for rationals that reduce to infinity modulo -# _PyHASH_MODULUS. -_PyHASH_INF = sys.hash_info.inf - -@functools.lru_cache(maxsize = 1 << 14) -def _hash_algorithm(numerator, denominator): - - # To make sure that the hash of a Fraction agrees with the hash - # of a numerically equal integer, float or Decimal instance, we - # follow the rules for numeric hashes outlined in the - # documentation. (See library docs, 'Built-in Types'). - - try: - dinv = pow(denominator, -1, _PyHASH_MODULUS) - except ValueError: - # ValueError means there is no modular inverse. - hash_ = _PyHASH_INF - else: - # The general algorithm now specifies that the absolute value of - # the hash is - # (|N| * dinv) % P - # where N is self._numerator and P is _PyHASH_MODULUS. That's - # optimized here in two ways: first, for a non-negative int i, - # hash(i) == i % P, but the int hash implementation doesn't need - # to divide, and is faster than doing % P explicitly. So we do - # hash(|N| * dinv) - # instead. Second, N is unbounded, so its product with dinv may - # be arbitrarily expensive to compute. The final answer is the - # same if we use the bounded |N| % P instead, which can again - # be done with an int hash() call. If 0 <= i < P, hash(i) == i, - # so this nested hash() call wastes a bit of time making a - # redundant copy when |N| < P, but can save an arbitrarily large - # amount of computation for large |N|. - hash_ = hash(hash(abs(numerator)) * dinv) - result = hash_ if numerator >= 0 else -hash_ - return -2 if result == -1 else result - -_RATIONAL_FORMAT = re.compile(r""" - \A\s* # optional whitespace at the start, - (?P[-+]?) # an optional sign, then - (?=\d|\.\d) # lookahead for digit or .digit - (?P\d*|\d+(_\d+)*) # numerator (possibly empty) - (?: # followed by - (?:\s*/\s*(?P\d+(_\d+)*))? # an optional denominator - | # or - (?:\.(?P\d*|\d+(_\d+)*))? # an optional fractional part - (?:E(?P[-+]?\d+(_\d+)*))? # and optional exponent - ) - \s*\Z # and optional whitespace to finish -""", re.VERBOSE | re.IGNORECASE) - - -# Helpers for formatting - -def _round_to_exponent(n, d, exponent, no_neg_zero=False): - """Round a rational number to the nearest multiple of a given power of 10. - - Rounds the rational number n/d to the nearest integer multiple of - 10**exponent, rounding to the nearest even integer multiple in the case of - a tie. Returns a pair (sign: bool, significand: int) representing the - rounded value (-1)**sign * significand * 10**exponent. - - If no_neg_zero is true, then the returned sign will always be False when - the significand is zero. Otherwise, the sign reflects the sign of the - input. - - d must be positive, but n and d need not be relatively prime. - """ - if exponent >= 0: - d *= 10**exponent - else: - n *= 10**-exponent - - # The divmod quotient is correct for round-ties-towards-positive-infinity; - # In the case of a tie, we zero out the least significant bit of q. - q, r = divmod(n + (d >> 1), d) - if r == 0 and d & 1 == 0: - q &= -2 - - sign = q < 0 if no_neg_zero else n < 0 - return sign, abs(q) - - -def _round_to_figures(n, d, figures): - """Round a rational number to a given number of significant figures. - - Rounds the rational number n/d to the given number of significant figures - using the round-ties-to-even rule, and returns a triple - (sign: bool, significand: int, exponent: int) representing the rounded - value (-1)**sign * significand * 10**exponent. - - In the special case where n = 0, returns a significand of zero and - an exponent of 1 - figures, for compatibility with formatting. - Otherwise, the returned significand satisfies - 10**(figures - 1) <= significand < 10**figures. - - d must be positive, but n and d need not be relatively prime. - figures must be positive. - """ - # Special case for n == 0. - if n == 0: - return False, 0, 1 - figures - - # Find integer m satisfying 10**(m - 1) <= abs(n)/d <= 10**m. (If abs(n)/d - # is a power of 10, either of the two possible values for m is fine.) - str_n, str_d = str(abs(n)), str(d) - m = len(str_n) - len(str_d) + (str_d <= str_n) - - # Round to a multiple of 10**(m - figures). The significand we get - # satisfies 10**(figures - 1) <= significand <= 10**figures. - exponent = m - figures - sign, significand = _round_to_exponent(n, d, exponent) - - # Adjust in the case where significand == 10**figures, to ensure that - # 10**(figures - 1) <= significand < 10**figures. - if len(str(significand)) == figures + 1: - significand //= 10 - exponent += 1 - - return sign, significand, exponent - - -# Pattern for matching non-float-style format specifications. -_GENERAL_FORMAT_SPECIFICATION_MATCHER = re.compile(r""" - (?: - (?P.)? - (?P[<>=^]) - )? - (?P[-+ ]?) - # Alt flag forces a slash and denominator in the output, even for - # integer-valued Fraction objects. - (?P\#)? - # We don't implement the zeropad flag since there's no single obvious way - # to interpret it. - (?P0|[1-9][0-9]*)? - (?P[,_])? -""", re.DOTALL | re.VERBOSE).fullmatch - - -# Pattern for matching float-style format specifications; -# supports 'e', 'E', 'f', 'F', 'g', 'G' and '%' presentation types. -_FLOAT_FORMAT_SPECIFICATION_MATCHER = re.compile(r""" - (?: - (?P.)? - (?P[<>=^]) - )? - (?P[-+ ]?) - (?Pz)? - (?P\#)? - # A '0' that's *not* followed by another digit is parsed as a minimum width - # rather than a zeropad flag. - (?P0(?=[0-9]))? - (?P0|[1-9][0-9]*)? - (?P[,_])? - (?:\.(?P0|[1-9][0-9]*))? - (?P[eEfFgG%]) -""", re.DOTALL | re.VERBOSE).fullmatch - - -class Fraction(numbers.Rational): - """This class implements rational numbers. - - In the two-argument form of the constructor, Fraction(8, 6) will - produce a rational number equivalent to 4/3. Both arguments must - be Rational. The numerator defaults to 0 and the denominator - defaults to 1 so that Fraction(3) == 3 and Fraction() == 0. - - Fractions can also be constructed from: - - - numeric strings similar to those accepted by the - float constructor (for example, '-2.3' or '1e10') - - - strings of the form '123/456' - - - float and Decimal instances - - - other Rational instances (including integers) - - """ - - __slots__ = ('_numerator', '_denominator') - - # We're immutable, so use __new__ not __init__ - def __new__(cls, numerator=0, denominator=None): - """Constructs a Rational. - - Takes a string like '3/2' or '1.5', another Rational instance, a - numerator/denominator pair, or a float. - - Examples - -------- - - >>> Fraction(10, -8) - Fraction(-5, 4) - >>> Fraction(Fraction(1, 7), 5) - Fraction(1, 35) - >>> Fraction(Fraction(1, 7), Fraction(2, 3)) - Fraction(3, 14) - >>> Fraction('314') - Fraction(314, 1) - >>> Fraction('-35/4') - Fraction(-35, 4) - >>> Fraction('3.1415') # conversion from numeric string - Fraction(6283, 2000) - >>> Fraction('-47e-2') # string may include a decimal exponent - Fraction(-47, 100) - >>> Fraction(1.47) # direct construction from float (exact conversion) - Fraction(6620291452234629, 4503599627370496) - >>> Fraction(2.25) - Fraction(9, 4) - >>> Fraction(Decimal('1.47')) - Fraction(147, 100) - - """ - self = super(Fraction, cls).__new__(cls) - - if denominator is None: - if type(numerator) is int: - self._numerator = numerator - self._denominator = 1 - return self - - elif isinstance(numerator, numbers.Rational): - self._numerator = numerator.numerator - self._denominator = numerator.denominator - return self - - elif isinstance(numerator, (float, Decimal)): - # Exact conversion - self._numerator, self._denominator = numerator.as_integer_ratio() - return self - - elif isinstance(numerator, str): - # Handle construction from strings. - m = _RATIONAL_FORMAT.match(numerator) - if m is None: - raise ValueError('Invalid literal for Fraction: %r' % - numerator) - numerator = int(m.group('num') or '0') - denom = m.group('denom') - if denom: - denominator = int(denom) - else: - denominator = 1 - decimal = m.group('decimal') - if decimal: - decimal = decimal.replace('_', '') - scale = 10**len(decimal) - numerator = numerator * scale + int(decimal) - denominator *= scale - exp = m.group('exp') - if exp: - exp = int(exp) - if exp >= 0: - numerator *= 10**exp - else: - denominator *= 10**-exp - if m.group('sign') == '-': - numerator = -numerator - - else: - raise TypeError("argument should be a string " - "or a Rational instance") - - elif type(numerator) is int is type(denominator): - pass # *very* normal case - - elif (isinstance(numerator, numbers.Rational) and - isinstance(denominator, numbers.Rational)): - numerator, denominator = ( - numerator.numerator * denominator.denominator, - denominator.numerator * numerator.denominator - ) - else: - raise TypeError("both arguments should be " - "Rational instances") - - if denominator == 0: - raise ZeroDivisionError('Fraction(%s, 0)' % numerator) - g = math.gcd(numerator, denominator) - if denominator < 0: - g = -g - numerator //= g - denominator //= g - self._numerator = numerator - self._denominator = denominator - return self - - @classmethod - def from_float(cls, f): - """Converts a finite float to a rational number, exactly. - - Beware that Fraction.from_float(0.3) != Fraction(3, 10). - - """ - if isinstance(f, numbers.Integral): - return cls(f) - elif not isinstance(f, float): - raise TypeError("%s.from_float() only takes floats, not %r (%s)" % - (cls.__name__, f, type(f).__name__)) - return cls._from_coprime_ints(*f.as_integer_ratio()) - - @classmethod - def from_decimal(cls, dec): - """Converts a finite Decimal instance to a rational number, exactly.""" - from decimal import Decimal - if isinstance(dec, numbers.Integral): - dec = Decimal(int(dec)) - elif not isinstance(dec, Decimal): - raise TypeError( - "%s.from_decimal() only takes Decimals, not %r (%s)" % - (cls.__name__, dec, type(dec).__name__)) - return cls._from_coprime_ints(*dec.as_integer_ratio()) - - @classmethod - def _from_coprime_ints(cls, numerator, denominator, /): - """Convert a pair of ints to a rational number, for internal use. - - The ratio of integers should be in lowest terms and the denominator - should be positive. - """ - obj = super(Fraction, cls).__new__(cls) - obj._numerator = numerator - obj._denominator = denominator - return obj - - def is_integer(self): - """Return True if the Fraction is an integer.""" - return self._denominator == 1 - - def as_integer_ratio(self): - """Return a pair of integers, whose ratio is equal to the original Fraction. - - The ratio is in lowest terms and has a positive denominator. - """ - return (self._numerator, self._denominator) - - def limit_denominator(self, max_denominator=1000000): - """Closest Fraction to self with denominator at most max_denominator. - - >>> Fraction('3.141592653589793').limit_denominator(10) - Fraction(22, 7) - >>> Fraction('3.141592653589793').limit_denominator(100) - Fraction(311, 99) - >>> Fraction(4321, 8765).limit_denominator(10000) - Fraction(4321, 8765) - - """ - # Algorithm notes: For any real number x, define a *best upper - # approximation* to x to be a rational number p/q such that: - # - # (1) p/q >= x, and - # (2) if p/q > r/s >= x then s > q, for any rational r/s. - # - # Define *best lower approximation* similarly. Then it can be - # proved that a rational number is a best upper or lower - # approximation to x if, and only if, it is a convergent or - # semiconvergent of the (unique shortest) continued fraction - # associated to x. - # - # To find a best rational approximation with denominator <= M, - # we find the best upper and lower approximations with - # denominator <= M and take whichever of these is closer to x. - # In the event of a tie, the bound with smaller denominator is - # chosen. If both denominators are equal (which can happen - # only when max_denominator == 1 and self is midway between - # two integers) the lower bound---i.e., the floor of self, is - # taken. - - if max_denominator < 1: - raise ValueError("max_denominator should be at least 1") - if self._denominator <= max_denominator: - return Fraction(self) - - p0, q0, p1, q1 = 0, 1, 1, 0 - n, d = self._numerator, self._denominator - while True: - a = n//d - q2 = q0+a*q1 - if q2 > max_denominator: - break - p0, q0, p1, q1 = p1, q1, p0+a*p1, q2 - n, d = d, n-a*d - k = (max_denominator-q0)//q1 - - # Determine which of the candidates (p0+k*p1)/(q0+k*q1) and p1/q1 is - # closer to self. The distance between them is 1/(q1*(q0+k*q1)), while - # the distance from p1/q1 to self is d/(q1*self._denominator). So we - # need to compare 2*(q0+k*q1) with self._denominator/d. - if 2*d*(q0+k*q1) <= self._denominator: - return Fraction._from_coprime_ints(p1, q1) - else: - return Fraction._from_coprime_ints(p0+k*p1, q0+k*q1) - - @property - def numerator(a): - return a._numerator - - @property - def denominator(a): - return a._denominator - - def __repr__(self): - """repr(self)""" - return '%s(%s, %s)' % (self.__class__.__name__, - self._numerator, self._denominator) - - def __str__(self): - """str(self)""" - if self._denominator == 1: - return str(self._numerator) - else: - return '%s/%s' % (self._numerator, self._denominator) - - def _format_general(self, match): - """Helper method for __format__. - - Handles fill, alignment, signs, and thousands separators in the - case of no presentation type. - """ - # Validate and parse the format specifier. - fill = match["fill"] or " " - align = match["align"] or ">" - pos_sign = "" if match["sign"] == "-" else match["sign"] - alternate_form = bool(match["alt"]) - minimumwidth = int(match["minimumwidth"] or "0") - thousands_sep = match["thousands_sep"] or '' - - # Determine the body and sign representation. - n, d = self._numerator, self._denominator - if d > 1 or alternate_form: - body = f"{abs(n):{thousands_sep}}/{d:{thousands_sep}}" - else: - body = f"{abs(n):{thousands_sep}}" - sign = '-' if n < 0 else pos_sign - - # Pad with fill character if necessary and return. - padding = fill * (minimumwidth - len(sign) - len(body)) - if align == ">": - return padding + sign + body - elif align == "<": - return sign + body + padding - elif align == "^": - half = len(padding) // 2 - return padding[:half] + sign + body + padding[half:] - else: # align == "=" - return sign + padding + body - - def _format_float_style(self, match): - """Helper method for __format__; handles float presentation types.""" - fill = match["fill"] or " " - align = match["align"] or ">" - pos_sign = "" if match["sign"] == "-" else match["sign"] - no_neg_zero = bool(match["no_neg_zero"]) - alternate_form = bool(match["alt"]) - zeropad = bool(match["zeropad"]) - minimumwidth = int(match["minimumwidth"] or "0") - thousands_sep = match["thousands_sep"] - precision = int(match["precision"] or "6") - presentation_type = match["presentation_type"] - trim_zeros = presentation_type in "gG" and not alternate_form - trim_point = not alternate_form - exponent_indicator = "E" if presentation_type in "EFG" else "e" - - if align == '=' and fill == '0': - zeropad = True - - # Round to get the digits we need, figure out where to place the point, - # and decide whether to use scientific notation. 'point_pos' is the - # relative to the _end_ of the digit string: that is, it's the number - # of digits that should follow the point. - if presentation_type in "fF%": - exponent = -precision - if presentation_type == "%": - exponent -= 2 - negative, significand = _round_to_exponent( - self._numerator, self._denominator, exponent, no_neg_zero) - scientific = False - point_pos = precision - else: # presentation_type in "eEgG" - figures = ( - max(precision, 1) - if presentation_type in "gG" - else precision + 1 - ) - negative, significand, exponent = _round_to_figures( - self._numerator, self._denominator, figures) - scientific = ( - presentation_type in "eE" - or exponent > 0 - or exponent + figures <= -4 - ) - point_pos = figures - 1 if scientific else -exponent - - # Get the suffix - the part following the digits, if any. - if presentation_type == "%": - suffix = "%" - elif scientific: - suffix = f"{exponent_indicator}{exponent + point_pos:+03d}" - else: - suffix = "" - - # String of output digits, padded sufficiently with zeros on the left - # so that we'll have at least one digit before the decimal point. - digits = f"{significand:0{point_pos + 1}d}" - - # Before padding, the output has the form f"{sign}{leading}{trailing}", - # where `leading` includes thousands separators if necessary and - # `trailing` includes the decimal separator where appropriate. - sign = "-" if negative else pos_sign - leading = digits[: len(digits) - point_pos] - frac_part = digits[len(digits) - point_pos :] - if trim_zeros: - frac_part = frac_part.rstrip("0") - separator = "" if trim_point and not frac_part else "." - trailing = separator + frac_part + suffix - - # Do zero padding if required. - if zeropad: - min_leading = minimumwidth - len(sign) - len(trailing) - # When adding thousands separators, they'll be added to the - # zero-padded portion too, so we need to compensate. - leading = leading.zfill( - 3 * min_leading // 4 + 1 if thousands_sep else min_leading - ) - - # Insert thousands separators if required. - if thousands_sep: - first_pos = 1 + (len(leading) - 1) % 3 - leading = leading[:first_pos] + "".join( - thousands_sep + leading[pos : pos + 3] - for pos in range(first_pos, len(leading), 3) - ) - - # We now have a sign and a body. Pad with fill character if necessary - # and return. - body = leading + trailing - padding = fill * (minimumwidth - len(sign) - len(body)) - if align == ">": - return padding + sign + body - elif align == "<": - return sign + body + padding - elif align == "^": - half = len(padding) // 2 - return padding[:half] + sign + body + padding[half:] - else: # align == "=" - return sign + padding + body - - def __format__(self, format_spec, /): - """Format this fraction according to the given format specification.""" - - if match := _GENERAL_FORMAT_SPECIFICATION_MATCHER(format_spec): - return self._format_general(match) - - if match := _FLOAT_FORMAT_SPECIFICATION_MATCHER(format_spec): - # Refuse the temptation to guess if both alignment _and_ - # zero padding are specified. - if match["align"] is None or match["zeropad"] is None: - return self._format_float_style(match) - - raise ValueError( - f"Invalid format specifier {format_spec!r} " - f"for object of type {type(self).__name__!r}" - ) - - def _operator_fallbacks(monomorphic_operator, fallback_operator, - handle_complex=True): - """Generates forward and reverse operators given a purely-rational - operator and a function from the operator module. - - Use this like: - __op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op) - - In general, we want to implement the arithmetic operations so - that mixed-mode operations either call an implementation whose - author knew about the types of both arguments, or convert both - to the nearest built in type and do the operation there. In - Fraction, that means that we define __add__ and __radd__ as: - - def __add__(self, other): - # Both types have numerators/denominator attributes, - # so do the operation directly - if isinstance(other, (int, Fraction)): - return Fraction(self.numerator * other.denominator + - other.numerator * self.denominator, - self.denominator * other.denominator) - # float and complex don't have those operations, but we - # know about those types, so special case them. - elif isinstance(other, float): - return float(self) + other - elif isinstance(other, complex): - return complex(self) + other - # Let the other type take over. - return NotImplemented - - def __radd__(self, other): - # radd handles more types than add because there's - # nothing left to fall back to. - if isinstance(other, numbers.Rational): - return Fraction(self.numerator * other.denominator + - other.numerator * self.denominator, - self.denominator * other.denominator) - elif isinstance(other, Real): - return float(other) + float(self) - elif isinstance(other, Complex): - return complex(other) + complex(self) - return NotImplemented - - - There are 5 different cases for a mixed-type addition on - Fraction. I'll refer to all of the above code that doesn't - refer to Fraction, float, or complex as "boilerplate". 'r' - will be an instance of Fraction, which is a subtype of - Rational (r : Fraction <: Rational), and b : B <: - Complex. The first three involve 'r + b': - - 1. If B <: Fraction, int, float, or complex, we handle - that specially, and all is well. - 2. If Fraction falls back to the boilerplate code, and it - were to return a value from __add__, we'd miss the - possibility that B defines a more intelligent __radd__, - so the boilerplate should return NotImplemented from - __add__. In particular, we don't handle Rational - here, even though we could get an exact answer, in case - the other type wants to do something special. - 3. If B <: Fraction, Python tries B.__radd__ before - Fraction.__add__. This is ok, because it was - implemented with knowledge of Fraction, so it can - handle those instances before delegating to Real or - Complex. - - The next two situations describe 'b + r'. We assume that b - didn't know about Fraction in its implementation, and that it - uses similar boilerplate code: - - 4. If B <: Rational, then __radd_ converts both to the - builtin rational type (hey look, that's us) and - proceeds. - 5. Otherwise, __radd__ tries to find the nearest common - base ABC, and fall back to its builtin type. Since this - class doesn't subclass a concrete type, there's no - implementation to fall back to, so we need to try as - hard as possible to return an actual value, or the user - will get a TypeError. - - """ - def forward(a, b): - if isinstance(b, Fraction): - return monomorphic_operator(a, b) - elif isinstance(b, int): - return monomorphic_operator(a, Fraction(b)) - elif isinstance(b, float): - return fallback_operator(float(a), b) - elif handle_complex and isinstance(b, complex): - return fallback_operator(complex(a), b) - else: - return NotImplemented - forward.__name__ = '__' + fallback_operator.__name__ + '__' - forward.__doc__ = monomorphic_operator.__doc__ - - def reverse(b, a): - if isinstance(a, numbers.Rational): - # Includes ints. - return monomorphic_operator(Fraction(a), b) - elif isinstance(a, numbers.Real): - return fallback_operator(float(a), float(b)) - elif handle_complex and isinstance(a, numbers.Complex): - return fallback_operator(complex(a), complex(b)) - else: - return NotImplemented - reverse.__name__ = '__r' + fallback_operator.__name__ + '__' - reverse.__doc__ = monomorphic_operator.__doc__ - - return forward, reverse - - # Rational arithmetic algorithms: Knuth, TAOCP, Volume 2, 4.5.1. - # - # Assume input fractions a and b are normalized. - # - # 1) Consider addition/subtraction. - # - # Let g = gcd(da, db). Then - # - # na nb na*db ± nb*da - # a ± b == -- ± -- == ------------- == - # da db da*db - # - # na*(db//g) ± nb*(da//g) t - # == ----------------------- == - - # (da*db)//g d - # - # Now, if g > 1, we're working with smaller integers. - # - # Note, that t, (da//g) and (db//g) are pairwise coprime. - # - # Indeed, (da//g) and (db//g) share no common factors (they were - # removed) and da is coprime with na (since input fractions are - # normalized), hence (da//g) and na are coprime. By symmetry, - # (db//g) and nb are coprime too. Then, - # - # gcd(t, da//g) == gcd(na*(db//g), da//g) == 1 - # gcd(t, db//g) == gcd(nb*(da//g), db//g) == 1 - # - # Above allows us optimize reduction of the result to lowest - # terms. Indeed, - # - # g2 = gcd(t, d) == gcd(t, (da//g)*(db//g)*g) == gcd(t, g) - # - # t//g2 t//g2 - # a ± b == ----------------------- == ---------------- - # (da//g)*(db//g)*(g//g2) (da//g)*(db//g2) - # - # is a normalized fraction. This is useful because the unnormalized - # denominator d could be much larger than g. - # - # We should special-case g == 1 (and g2 == 1), since 60.8% of - # randomly-chosen integers are coprime: - # https://en.wikipedia.org/wiki/Coprime_integers#Probability_of_coprimality - # Note, that g2 == 1 always for fractions, obtained from floats: here - # g is a power of 2 and the unnormalized numerator t is an odd integer. - # - # 2) Consider multiplication - # - # Let g1 = gcd(na, db) and g2 = gcd(nb, da), then - # - # na*nb na*nb (na//g1)*(nb//g2) - # a*b == ----- == ----- == ----------------- - # da*db db*da (db//g1)*(da//g2) - # - # Note, that after divisions we're multiplying smaller integers. - # - # Also, the resulting fraction is normalized, because each of - # two factors in the numerator is coprime to each of the two factors - # in the denominator. - # - # Indeed, pick (na//g1). It's coprime with (da//g2), because input - # fractions are normalized. It's also coprime with (db//g1), because - # common factors are removed by g1 == gcd(na, db). - # - # As for addition/subtraction, we should special-case g1 == 1 - # and g2 == 1 for same reason. That happens also for multiplying - # rationals, obtained from floats. - - def _add(a, b): - """a + b""" - na, da = a._numerator, a._denominator - nb, db = b._numerator, b._denominator - g = math.gcd(da, db) - if g == 1: - return Fraction._from_coprime_ints(na * db + da * nb, da * db) - s = da // g - t = na * (db // g) + nb * s - g2 = math.gcd(t, g) - if g2 == 1: - return Fraction._from_coprime_ints(t, s * db) - return Fraction._from_coprime_ints(t // g2, s * (db // g2)) - - __add__, __radd__ = _operator_fallbacks(_add, operator.add) - - def _sub(a, b): - """a - b""" - na, da = a._numerator, a._denominator - nb, db = b._numerator, b._denominator - g = math.gcd(da, db) - if g == 1: - return Fraction._from_coprime_ints(na * db - da * nb, da * db) - s = da // g - t = na * (db // g) - nb * s - g2 = math.gcd(t, g) - if g2 == 1: - return Fraction._from_coprime_ints(t, s * db) - return Fraction._from_coprime_ints(t // g2, s * (db // g2)) - - __sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub) - - def _mul(a, b): - """a * b""" - na, da = a._numerator, a._denominator - nb, db = b._numerator, b._denominator - g1 = math.gcd(na, db) - if g1 > 1: - na //= g1 - db //= g1 - g2 = math.gcd(nb, da) - if g2 > 1: - nb //= g2 - da //= g2 - return Fraction._from_coprime_ints(na * nb, db * da) - - __mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul) - - def _div(a, b): - """a / b""" - # Same as _mul(), with inversed b. - nb, db = b._numerator, b._denominator - if nb == 0: - raise ZeroDivisionError('Fraction(%s, 0)' % db) - na, da = a._numerator, a._denominator - g1 = math.gcd(na, nb) - if g1 > 1: - na //= g1 - nb //= g1 - g2 = math.gcd(db, da) - if g2 > 1: - da //= g2 - db //= g2 - n, d = na * db, nb * da - if d < 0: - n, d = -n, -d - return Fraction._from_coprime_ints(n, d) - - __truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv) - - def _floordiv(a, b): - """a // b""" - return (a.numerator * b.denominator) // (a.denominator * b.numerator) - - __floordiv__, __rfloordiv__ = _operator_fallbacks(_floordiv, operator.floordiv, False) - - def _divmod(a, b): - """(a // b, a % b)""" - da, db = a.denominator, b.denominator - div, n_mod = divmod(a.numerator * db, da * b.numerator) - return div, Fraction(n_mod, da * db) - - __divmod__, __rdivmod__ = _operator_fallbacks(_divmod, divmod, False) - - def _mod(a, b): - """a % b""" - da, db = a.denominator, b.denominator - return Fraction((a.numerator * db) % (b.numerator * da), da * db) - - __mod__, __rmod__ = _operator_fallbacks(_mod, operator.mod, False) - - def __pow__(a, b): - """a ** b - - If b is not an integer, the result will be a float or complex - since roots are generally irrational. If b is an integer, the - result will be rational. - - """ - if isinstance(b, numbers.Rational): - if b.denominator == 1: - power = b.numerator - if power >= 0: - return Fraction._from_coprime_ints(a._numerator ** power, - a._denominator ** power) - elif a._numerator > 0: - return Fraction._from_coprime_ints(a._denominator ** -power, - a._numerator ** -power) - elif a._numerator == 0: - raise ZeroDivisionError('Fraction(%s, 0)' % - a._denominator ** -power) - else: - return Fraction._from_coprime_ints((-a._denominator) ** -power, - (-a._numerator) ** -power) - else: - # A fractional power will generally produce an - # irrational number. - return float(a) ** float(b) - elif isinstance(b, (float, complex)): - return float(a) ** b - else: - return NotImplemented - - def __rpow__(b, a): - """a ** b""" - if b._denominator == 1 and b._numerator >= 0: - # If a is an int, keep it that way if possible. - return a ** b._numerator - - if isinstance(a, numbers.Rational): - return Fraction(a.numerator, a.denominator) ** b - - if b._denominator == 1: - return a ** b._numerator - - return a ** float(b) - - def __pos__(a): - """+a: Coerces a subclass instance to Fraction""" - return Fraction._from_coprime_ints(a._numerator, a._denominator) - - def __neg__(a): - """-a""" - return Fraction._from_coprime_ints(-a._numerator, a._denominator) - - def __abs__(a): - """abs(a)""" - return Fraction._from_coprime_ints(abs(a._numerator), a._denominator) - - def __int__(a, _index=operator.index): - """int(a)""" - if a._numerator < 0: - return _index(-(-a._numerator // a._denominator)) - else: - return _index(a._numerator // a._denominator) - - def __trunc__(a): - """math.trunc(a)""" - if a._numerator < 0: - return -(-a._numerator // a._denominator) - else: - return a._numerator // a._denominator - - def __floor__(a): - """math.floor(a)""" - return a._numerator // a._denominator - - def __ceil__(a): - """math.ceil(a)""" - # The negations cleverly convince floordiv to return the ceiling. - return -(-a._numerator // a._denominator) - - def __round__(self, ndigits=None): - """round(self, ndigits) - - Rounds half toward even. - """ - if ndigits is None: - d = self._denominator - floor, remainder = divmod(self._numerator, d) - if remainder * 2 < d: - return floor - elif remainder * 2 > d: - return floor + 1 - # Deal with the half case: - elif floor % 2 == 0: - return floor - else: - return floor + 1 - shift = 10**abs(ndigits) - # See _operator_fallbacks.forward to check that the results of - # these operations will always be Fraction and therefore have - # round(). - if ndigits > 0: - return Fraction(round(self * shift), shift) - else: - return Fraction(round(self / shift) * shift) - - def __hash__(self): - """hash(self)""" - return _hash_algorithm(self._numerator, self._denominator) - - def __eq__(a, b): - """a == b""" - if type(b) is int: - return a._numerator == b and a._denominator == 1 - if isinstance(b, numbers.Rational): - return (a._numerator == b.numerator and - a._denominator == b.denominator) - if isinstance(b, numbers.Complex) and b.imag == 0: - b = b.real - if isinstance(b, float): - if math.isnan(b) or math.isinf(b): - # comparisons with an infinity or nan should behave in - # the same way for any finite a, so treat a as zero. - return 0.0 == b - else: - return a == a.from_float(b) - else: - # Since a doesn't know how to compare with b, let's give b - # a chance to compare itself with a. - return NotImplemented - - def _richcmp(self, other, op): - """Helper for comparison operators, for internal use only. - - Implement comparison between a Rational instance `self`, and - either another Rational instance or a float `other`. If - `other` is not a Rational instance or a float, return - NotImplemented. `op` should be one of the six standard - comparison operators. - - """ - # convert other to a Rational instance where reasonable. - if isinstance(other, numbers.Rational): - return op(self._numerator * other.denominator, - self._denominator * other.numerator) - if isinstance(other, float): - if math.isnan(other) or math.isinf(other): - return op(0.0, other) - else: - return op(self, self.from_float(other)) - else: - return NotImplemented - - def __lt__(a, b): - """a < b""" - return a._richcmp(b, operator.lt) - - def __gt__(a, b): - """a > b""" - return a._richcmp(b, operator.gt) - - def __le__(a, b): - """a <= b""" - return a._richcmp(b, operator.le) - - def __ge__(a, b): - """a >= b""" - return a._richcmp(b, operator.ge) - - def __bool__(a): - """a != 0""" - # bpo-39274: Use bool() because (a._numerator != 0) can return an - # object which is not a bool. - return bool(a._numerator) - - # support for pickling, copy, and deepcopy - - def __reduce__(self): - return (self.__class__, (self._numerator, self._denominator)) - - def __copy__(self): - if type(self) == Fraction: - return self # I'm immutable; therefore I am my own clone - return self.__class__(self._numerator, self._denominator) - - def __deepcopy__(self, memo): - if type(self) == Fraction: - return self # My components are also immutable - return self.__class__(self._numerator, self._denominator) diff --git a/Python313_13_x64_Template/Lib/ftplib.py b/Python313_13_x64_Template/Lib/ftplib.py deleted file mode 100644 index 10c5d1ea..00000000 --- a/Python313_13_x64_Template/Lib/ftplib.py +++ /dev/null @@ -1,966 +0,0 @@ -"""An FTP client class and some helper functions. - -Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds - -Example: - ->>> from ftplib import FTP ->>> ftp = FTP('ftp.python.org') # connect to host, default port ->>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@ -'230 Guest login ok, access restrictions apply.' ->>> ftp.retrlines('LIST') # list directory contents -total 9 -drwxr-xr-x 8 root wheel 1024 Jan 3 1994 . -drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .. -drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin -drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc -d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming -drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib -drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub -drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr --rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg -'226 Transfer complete.' ->>> ftp.quit() -'221 Goodbye.' ->>> - -A nice test that reveals some of the network dialogue would be: -python ftplib.py -d localhost -l -p -l -""" - -# -# Changes and improvements suggested by Steve Majewski. -# Modified by Jack to work on the mac. -# Modified by Siebren to support docstrings and PASV. -# Modified by Phil Schwartz to add storbinary and storlines callbacks. -# Modified by Giampaolo Rodola' to add TLS support. -# - -import sys -import socket -from socket import _GLOBAL_DEFAULT_TIMEOUT - -__all__ = ["FTP", "error_reply", "error_temp", "error_perm", "error_proto", - "all_errors"] - -# Magic number from -MSG_OOB = 0x1 # Process data out of band - - -# The standard FTP server control port -FTP_PORT = 21 -# The sizehint parameter passed to readline() calls -MAXLINE = 8192 - - -# Exception raised when an error or invalid response is received -class Error(Exception): pass -class error_reply(Error): pass # unexpected [123]xx reply -class error_temp(Error): pass # 4xx errors -class error_perm(Error): pass # 5xx errors -class error_proto(Error): pass # response does not begin with [1-5] - - -# All exceptions (hopefully) that may be raised here and that aren't -# (always) programming errors on our side -all_errors = (Error, OSError, EOFError) - - -# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF) -CRLF = '\r\n' -B_CRLF = b'\r\n' - -# The class itself -class FTP: - '''An FTP client class. - - To create a connection, call the class using these arguments: - host, user, passwd, acct, timeout, source_address, encoding - - The first four arguments are all strings, and have default value ''. - The parameter ´timeout´ must be numeric and defaults to None if not - passed, meaning that no timeout will be set on any ftp socket(s). - If a timeout is passed, then this is now the default timeout for all ftp - socket operations for this instance. - The last parameter is the encoding of filenames, which defaults to utf-8. - - Then use self.connect() with optional host and port argument. - - To download a file, use ftp.retrlines('RETR ' + filename), - or ftp.retrbinary() with slightly different arguments. - To upload a file, use ftp.storlines() or ftp.storbinary(), - which have an open file as argument (see their definitions - below for details). - The download/upload functions first issue appropriate TYPE - and PORT or PASV commands. - ''' - - debugging = 0 - host = '' - port = FTP_PORT - maxline = MAXLINE - sock = None - file = None - welcome = None - passiveserver = True - # Disables https://bugs.python.org/issue43285 security if set to True. - trust_server_pasv_ipv4_address = False - - def __init__(self, host='', user='', passwd='', acct='', - timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *, - encoding='utf-8'): - """Initialization method (called by class instantiation). - Initialize host to localhost, port to standard ftp port. - Optional arguments are host (for connect()), - and user, passwd, acct (for login()). - """ - self.encoding = encoding - self.source_address = source_address - self.timeout = timeout - if host: - self.connect(host) - if user: - self.login(user, passwd, acct) - - def __enter__(self): - return self - - # Context management protocol: try to quit() if active - def __exit__(self, *args): - if self.sock is not None: - try: - self.quit() - except (OSError, EOFError): - pass - finally: - if self.sock is not None: - self.close() - - def connect(self, host='', port=0, timeout=-999, source_address=None): - '''Connect to host. Arguments are: - - host: hostname to connect to (string, default previous host) - - port: port to connect to (integer, default previous port) - - timeout: the timeout to set against the ftp socket(s) - - source_address: a 2-tuple (host, port) for the socket to bind - to as its source address before connecting. - ''' - if host != '': - self.host = host - if port > 0: - self.port = port - if timeout != -999: - self.timeout = timeout - if self.timeout is not None and not self.timeout: - raise ValueError('Non-blocking socket (timeout=0) is not supported') - if source_address is not None: - self.source_address = source_address - sys.audit("ftplib.connect", self, self.host, self.port) - self.sock = socket.create_connection((self.host, self.port), self.timeout, - source_address=self.source_address) - self.af = self.sock.family - self.file = self.sock.makefile('r', encoding=self.encoding) - self.welcome = self.getresp() - return self.welcome - - def getwelcome(self): - '''Get the welcome message from the server. - (this is read and squirreled away by connect())''' - if self.debugging: - print('*welcome*', self.sanitize(self.welcome)) - return self.welcome - - def set_debuglevel(self, level): - '''Set the debugging level. - The required argument level means: - 0: no debugging output (default) - 1: print commands and responses but not body text etc. - 2: also print raw lines read and sent before stripping CR/LF''' - self.debugging = level - debug = set_debuglevel - - def set_pasv(self, val): - '''Use passive or active mode for data transfers. - With a false argument, use the normal PORT mode, - With a true argument, use the PASV command.''' - self.passiveserver = val - - # Internal: "sanitize" a string for printing - def sanitize(self, s): - if s[:5] in {'pass ', 'PASS '}: - i = len(s.rstrip('\r\n')) - s = s[:5] + '*'*(i-5) + s[i:] - return repr(s) - - # Internal: send one line to the server, appending CRLF - def putline(self, line): - if '\r' in line or '\n' in line: - raise ValueError('an illegal newline character should not be contained') - sys.audit("ftplib.sendcmd", self, line) - line = line + CRLF - if self.debugging > 1: - print('*put*', self.sanitize(line)) - self.sock.sendall(line.encode(self.encoding)) - - # Internal: send one command to the server (through putline()) - def putcmd(self, line): - if self.debugging: print('*cmd*', self.sanitize(line)) - self.putline(line) - - # Internal: return one line from the server, stripping CRLF. - # Raise EOFError if the connection is closed - def getline(self): - line = self.file.readline(self.maxline + 1) - if len(line) > self.maxline: - raise Error("got more than %d bytes" % self.maxline) - if self.debugging > 1: - print('*get*', self.sanitize(line)) - if not line: - raise EOFError - if line[-2:] == CRLF: - line = line[:-2] - elif line[-1:] in CRLF: - line = line[:-1] - return line - - # Internal: get a response from the server, which may possibly - # consist of multiple lines. Return a single string with no - # trailing CRLF. If the response consists of multiple lines, - # these are separated by '\n' characters in the string - def getmultiline(self): - line = self.getline() - if line[3:4] == '-': - code = line[:3] - while 1: - nextline = self.getline() - line = line + ('\n' + nextline) - if nextline[:3] == code and \ - nextline[3:4] != '-': - break - return line - - # Internal: get a response from the server. - # Raise various errors if the response indicates an error - def getresp(self): - resp = self.getmultiline() - if self.debugging: - print('*resp*', self.sanitize(resp)) - self.lastresp = resp[:3] - c = resp[:1] - if c in {'1', '2', '3'}: - return resp - if c == '4': - raise error_temp(resp) - if c == '5': - raise error_perm(resp) - raise error_proto(resp) - - def voidresp(self): - """Expect a response beginning with '2'.""" - resp = self.getresp() - if resp[:1] != '2': - raise error_reply(resp) - return resp - - def abort(self): - '''Abort a file transfer. Uses out-of-band data. - This does not follow the procedure from the RFC to send Telnet - IP and Synch; that doesn't seem to work with the servers I've - tried. Instead, just send the ABOR command as OOB data.''' - line = b'ABOR' + B_CRLF - if self.debugging > 1: - print('*put urgent*', self.sanitize(line)) - self.sock.sendall(line, MSG_OOB) - resp = self.getmultiline() - if resp[:3] not in {'426', '225', '226'}: - raise error_proto(resp) - return resp - - def sendcmd(self, cmd): - '''Send a command and return the response.''' - self.putcmd(cmd) - return self.getresp() - - def voidcmd(self, cmd): - """Send a command and expect a response beginning with '2'.""" - self.putcmd(cmd) - return self.voidresp() - - def sendport(self, host, port): - '''Send a PORT command with the current host and the given - port number. - ''' - hbytes = host.split('.') - pbytes = [repr(port//256), repr(port%256)] - bytes = hbytes + pbytes - cmd = 'PORT ' + ','.join(bytes) - return self.voidcmd(cmd) - - def sendeprt(self, host, port): - '''Send an EPRT command with the current host and the given port number.''' - af = 0 - if self.af == socket.AF_INET: - af = 1 - if self.af == socket.AF_INET6: - af = 2 - if af == 0: - raise error_proto('unsupported address family') - fields = ['', repr(af), host, repr(port), ''] - cmd = 'EPRT ' + '|'.join(fields) - return self.voidcmd(cmd) - - def makeport(self): - '''Create a new socket and send a PORT command for it.''' - sock = socket.create_server(("", 0), family=self.af, backlog=1) - port = sock.getsockname()[1] # Get proper port - host = self.sock.getsockname()[0] # Get proper host - if self.af == socket.AF_INET: - resp = self.sendport(host, port) - else: - resp = self.sendeprt(host, port) - if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT: - sock.settimeout(self.timeout) - return sock - - def makepasv(self): - """Internal: Does the PASV or EPSV handshake -> (address, port)""" - if self.af == socket.AF_INET: - untrusted_host, port = parse227(self.sendcmd('PASV')) - if self.trust_server_pasv_ipv4_address: - host = untrusted_host - else: - host = self.sock.getpeername()[0] - else: - host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername()) - return host, port - - def ntransfercmd(self, cmd, rest=None): - """Initiate a transfer over the data connection. - - If the transfer is active, send a port command and the - transfer command, and accept the connection. If the server is - passive, send a pasv command, connect to it, and start the - transfer command. Either way, return the socket for the - connection and the expected size of the transfer. The - expected size may be None if it could not be determined. - - Optional `rest' argument can be a string that is sent as the - argument to a REST command. This is essentially a server - marker used to tell the server to skip over any data up to the - given marker. - """ - size = None - if self.passiveserver: - host, port = self.makepasv() - conn = socket.create_connection((host, port), self.timeout, - source_address=self.source_address) - try: - if rest is not None: - self.sendcmd("REST %s" % rest) - resp = self.sendcmd(cmd) - # Some servers apparently send a 200 reply to - # a LIST or STOR command, before the 150 reply - # (and way before the 226 reply). This seems to - # be in violation of the protocol (which only allows - # 1xx or error messages for LIST), so we just discard - # this response. - if resp[0] == '2': - resp = self.getresp() - if resp[0] != '1': - raise error_reply(resp) - except: - conn.close() - raise - else: - with self.makeport() as sock: - if rest is not None: - self.sendcmd("REST %s" % rest) - resp = self.sendcmd(cmd) - # See above. - if resp[0] == '2': - resp = self.getresp() - if resp[0] != '1': - raise error_reply(resp) - conn, sockaddr = sock.accept() - if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT: - conn.settimeout(self.timeout) - if resp[:3] == '150': - # this is conditional in case we received a 125 - size = parse150(resp) - return conn, size - - def transfercmd(self, cmd, rest=None): - """Like ntransfercmd() but returns only the socket.""" - return self.ntransfercmd(cmd, rest)[0] - - def login(self, user = '', passwd = '', acct = ''): - '''Login, default anonymous.''' - if not user: - user = 'anonymous' - if not passwd: - passwd = '' - if not acct: - acct = '' - if user == 'anonymous' and passwd in {'', '-'}: - # If there is no anonymous ftp password specified - # then we'll just use anonymous@ - # We don't send any other thing because: - # - We want to remain anonymous - # - We want to stop SPAM - # - We don't want to let ftp sites to discriminate by the user, - # host or country. - passwd = passwd + 'anonymous@' - resp = self.sendcmd('USER ' + user) - if resp[0] == '3': - resp = self.sendcmd('PASS ' + passwd) - if resp[0] == '3': - resp = self.sendcmd('ACCT ' + acct) - if resp[0] != '2': - raise error_reply(resp) - return resp - - def retrbinary(self, cmd, callback, blocksize=8192, rest=None): - """Retrieve data in binary mode. A new port is created for you. - - Args: - cmd: A RETR command. - callback: A single parameter callable to be called on each - block of data read. - blocksize: The maximum number of bytes to read from the - socket at one time. [default: 8192] - rest: Passed to transfercmd(). [default: None] - - Returns: - The response code. - """ - self.voidcmd('TYPE I') - with self.transfercmd(cmd, rest) as conn: - while data := conn.recv(blocksize): - callback(data) - # shutdown ssl layer - if _SSLSocket is not None and isinstance(conn, _SSLSocket): - conn.unwrap() - return self.voidresp() - - def retrlines(self, cmd, callback = None): - """Retrieve data in line mode. A new port is created for you. - - Args: - cmd: A RETR, LIST, or NLST command. - callback: An optional single parameter callable that is called - for each line with the trailing CRLF stripped. - [default: print_line()] - - Returns: - The response code. - """ - if callback is None: - callback = print_line - resp = self.sendcmd('TYPE A') - with self.transfercmd(cmd) as conn, \ - conn.makefile('r', encoding=self.encoding) as fp: - while 1: - line = fp.readline(self.maxline + 1) - if len(line) > self.maxline: - raise Error("got more than %d bytes" % self.maxline) - if self.debugging > 2: - print('*retr*', repr(line)) - if not line: - break - if line[-2:] == CRLF: - line = line[:-2] - elif line[-1:] == '\n': - line = line[:-1] - callback(line) - # shutdown ssl layer - if _SSLSocket is not None and isinstance(conn, _SSLSocket): - conn.unwrap() - return self.voidresp() - - def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None): - """Store a file in binary mode. A new port is created for you. - - Args: - cmd: A STOR command. - fp: A file-like object with a read(num_bytes) method. - blocksize: The maximum data size to read from fp and send over - the connection at once. [default: 8192] - callback: An optional single parameter callable that is called on - each block of data after it is sent. [default: None] - rest: Passed to transfercmd(). [default: None] - - Returns: - The response code. - """ - self.voidcmd('TYPE I') - with self.transfercmd(cmd, rest) as conn: - while buf := fp.read(blocksize): - conn.sendall(buf) - if callback: - callback(buf) - # shutdown ssl layer - if _SSLSocket is not None and isinstance(conn, _SSLSocket): - conn.unwrap() - return self.voidresp() - - def storlines(self, cmd, fp, callback=None): - """Store a file in line mode. A new port is created for you. - - Args: - cmd: A STOR command. - fp: A file-like object with a readline() method. - callback: An optional single parameter callable that is called on - each line after it is sent. [default: None] - - Returns: - The response code. - """ - self.voidcmd('TYPE A') - with self.transfercmd(cmd) as conn: - while 1: - buf = fp.readline(self.maxline + 1) - if len(buf) > self.maxline: - raise Error("got more than %d bytes" % self.maxline) - if not buf: - break - if buf[-2:] != B_CRLF: - if buf[-1] in B_CRLF: buf = buf[:-1] - buf = buf + B_CRLF - conn.sendall(buf) - if callback: - callback(buf) - # shutdown ssl layer - if _SSLSocket is not None and isinstance(conn, _SSLSocket): - conn.unwrap() - return self.voidresp() - - def acct(self, password): - '''Send new account name.''' - cmd = 'ACCT ' + password - return self.voidcmd(cmd) - - def nlst(self, *args): - '''Return a list of files in a given directory (default the current).''' - cmd = 'NLST' - for arg in args: - cmd = cmd + (' ' + arg) - files = [] - self.retrlines(cmd, files.append) - return files - - def dir(self, *args): - '''List a directory in long form. - By default list current directory to stdout. - Optional last argument is callback function; all - non-empty arguments before it are concatenated to the - LIST command. (This *should* only be used for a pathname.)''' - cmd = 'LIST' - func = None - if args[-1:] and not isinstance(args[-1], str): - args, func = args[:-1], args[-1] - for arg in args: - if arg: - cmd = cmd + (' ' + arg) - self.retrlines(cmd, func) - - def mlsd(self, path="", facts=[]): - '''List a directory in a standardized format by using MLSD - command (RFC-3659). If path is omitted the current directory - is assumed. "facts" is a list of strings representing the type - of information desired (e.g. ["type", "size", "perm"]). - - Return a generator object yielding a tuple of two elements - for every file found in path. - First element is the file name, the second one is a dictionary - including a variable number of "facts" depending on the server - and whether "facts" argument has been provided. - ''' - if facts: - self.sendcmd("OPTS MLST " + ";".join(facts) + ";") - if path: - cmd = "MLSD %s" % path - else: - cmd = "MLSD" - lines = [] - self.retrlines(cmd, lines.append) - for line in lines: - facts_found, _, name = line.rstrip(CRLF).partition(' ') - entry = {} - for fact in facts_found[:-1].split(";"): - key, _, value = fact.partition("=") - entry[key.lower()] = value - yield (name, entry) - - def rename(self, fromname, toname): - '''Rename a file.''' - resp = self.sendcmd('RNFR ' + fromname) - if resp[0] != '3': - raise error_reply(resp) - return self.voidcmd('RNTO ' + toname) - - def delete(self, filename): - '''Delete a file.''' - resp = self.sendcmd('DELE ' + filename) - if resp[:3] in {'250', '200'}: - return resp - else: - raise error_reply(resp) - - def cwd(self, dirname): - '''Change to a directory.''' - if dirname == '..': - try: - return self.voidcmd('CDUP') - except error_perm as msg: - if msg.args[0][:3] != '500': - raise - elif dirname == '': - dirname = '.' # does nothing, but could return error - cmd = 'CWD ' + dirname - return self.voidcmd(cmd) - - def size(self, filename): - '''Retrieve the size of a file.''' - # The SIZE command is defined in RFC-3659 - resp = self.sendcmd('SIZE ' + filename) - if resp[:3] == '213': - s = resp[3:].strip() - return int(s) - - def mkd(self, dirname): - '''Make a directory, return its full pathname.''' - resp = self.voidcmd('MKD ' + dirname) - # fix around non-compliant implementations such as IIS shipped - # with Windows server 2003 - if not resp.startswith('257'): - return '' - return parse257(resp) - - def rmd(self, dirname): - '''Remove a directory.''' - return self.voidcmd('RMD ' + dirname) - - def pwd(self): - '''Return current working directory.''' - resp = self.voidcmd('PWD') - # fix around non-compliant implementations such as IIS shipped - # with Windows server 2003 - if not resp.startswith('257'): - return '' - return parse257(resp) - - def quit(self): - '''Quit, and close the connection.''' - resp = self.voidcmd('QUIT') - self.close() - return resp - - def close(self): - '''Close the connection without assuming anything about it.''' - try: - file = self.file - self.file = None - if file is not None: - file.close() - finally: - sock = self.sock - self.sock = None - if sock is not None: - sock.close() - -try: - import ssl -except ImportError: - _SSLSocket = None -else: - _SSLSocket = ssl.SSLSocket - - class FTP_TLS(FTP): - '''A FTP subclass which adds TLS support to FTP as described - in RFC-4217. - - Connect as usual to port 21 implicitly securing the FTP control - connection before authenticating. - - Securing the data connection requires user to explicitly ask - for it by calling prot_p() method. - - Usage example: - >>> from ftplib import FTP_TLS - >>> ftps = FTP_TLS('ftp.python.org') - >>> ftps.login() # login anonymously previously securing control channel - '230 Guest login ok, access restrictions apply.' - >>> ftps.prot_p() # switch to secure data connection - '200 Protection level set to P' - >>> ftps.retrlines('LIST') # list directory content securely - total 9 - drwxr-xr-x 8 root wheel 1024 Jan 3 1994 . - drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .. - drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin - drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc - d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming - drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib - drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub - drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr - -rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg - '226 Transfer complete.' - >>> ftps.quit() - '221 Goodbye.' - >>> - ''' - - def __init__(self, host='', user='', passwd='', acct='', - *, context=None, timeout=_GLOBAL_DEFAULT_TIMEOUT, - source_address=None, encoding='utf-8'): - if context is None: - context = ssl._create_stdlib_context() - self.context = context - self._prot_p = False - super().__init__(host, user, passwd, acct, - timeout, source_address, encoding=encoding) - - def login(self, user='', passwd='', acct='', secure=True): - if secure and not isinstance(self.sock, ssl.SSLSocket): - self.auth() - return super().login(user, passwd, acct) - - def auth(self): - '''Set up secure control connection by using TLS/SSL.''' - if isinstance(self.sock, ssl.SSLSocket): - raise ValueError("Already using TLS") - if self.context.protocol >= ssl.PROTOCOL_TLS: - resp = self.voidcmd('AUTH TLS') - else: - resp = self.voidcmd('AUTH SSL') - self.sock = self.context.wrap_socket(self.sock, server_hostname=self.host) - self.file = self.sock.makefile(mode='r', encoding=self.encoding) - return resp - - def ccc(self): - '''Switch back to a clear-text control connection.''' - if not isinstance(self.sock, ssl.SSLSocket): - raise ValueError("not using TLS") - resp = self.voidcmd('CCC') - self.sock = self.sock.unwrap() - return resp - - def prot_p(self): - '''Set up secure data connection.''' - # PROT defines whether or not the data channel is to be protected. - # Though RFC-2228 defines four possible protection levels, - # RFC-4217 only recommends two, Clear and Private. - # Clear (PROT C) means that no security is to be used on the - # data-channel, Private (PROT P) means that the data-channel - # should be protected by TLS. - # PBSZ command MUST still be issued, but must have a parameter of - # '0' to indicate that no buffering is taking place and the data - # connection should not be encapsulated. - self.voidcmd('PBSZ 0') - resp = self.voidcmd('PROT P') - self._prot_p = True - return resp - - def prot_c(self): - '''Set up clear text data connection.''' - resp = self.voidcmd('PROT C') - self._prot_p = False - return resp - - # --- Overridden FTP methods - - def ntransfercmd(self, cmd, rest=None): - conn, size = super().ntransfercmd(cmd, rest) - if self._prot_p: - conn = self.context.wrap_socket(conn, - server_hostname=self.host) - return conn, size - - def abort(self): - # overridden as we can't pass MSG_OOB flag to sendall() - line = b'ABOR' + B_CRLF - self.sock.sendall(line) - resp = self.getmultiline() - if resp[:3] not in {'426', '225', '226'}: - raise error_proto(resp) - return resp - - __all__.append('FTP_TLS') - all_errors = (Error, OSError, EOFError, ssl.SSLError) - - -_150_re = None - -def parse150(resp): - '''Parse the '150' response for a RETR request. - Returns the expected transfer size or None; size is not guaranteed to - be present in the 150 message. - ''' - if resp[:3] != '150': - raise error_reply(resp) - global _150_re - if _150_re is None: - import re - _150_re = re.compile( - r"150 .* \((\d+) bytes\)", re.IGNORECASE | re.ASCII) - m = _150_re.match(resp) - if not m: - return None - return int(m.group(1)) - - -_227_re = None - -def parse227(resp): - '''Parse the '227' response for a PASV request. - Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)' - Return ('host.addr.as.numbers', port#) tuple.''' - if resp[:3] != '227': - raise error_reply(resp) - global _227_re - if _227_re is None: - import re - _227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)', re.ASCII) - m = _227_re.search(resp) - if not m: - raise error_proto(resp) - numbers = m.groups() - host = '.'.join(numbers[:4]) - port = (int(numbers[4]) << 8) + int(numbers[5]) - return host, port - - -def parse229(resp, peer): - '''Parse the '229' response for an EPSV request. - Raises error_proto if it does not contain '(|||port|)' - Return ('host.addr.as.numbers', port#) tuple.''' - if resp[:3] != '229': - raise error_reply(resp) - left = resp.find('(') - if left < 0: raise error_proto(resp) - right = resp.find(')', left + 1) - if right < 0: - raise error_proto(resp) # should contain '(|||port|)' - if resp[left + 1] != resp[right - 1]: - raise error_proto(resp) - parts = resp[left + 1:right].split(resp[left+1]) - if len(parts) != 5: - raise error_proto(resp) - host = peer[0] - port = int(parts[3]) - return host, port - - -def parse257(resp): - '''Parse the '257' response for a MKD or PWD request. - This is a response to a MKD or PWD request: a directory name. - Returns the directoryname in the 257 reply.''' - if resp[:3] != '257': - raise error_reply(resp) - if resp[3:5] != ' "': - return '' # Not compliant to RFC 959, but UNIX ftpd does this - dirname = '' - i = 5 - n = len(resp) - while i < n: - c = resp[i] - i = i+1 - if c == '"': - if i >= n or resp[i] != '"': - break - i = i+1 - dirname = dirname + c - return dirname - - -def print_line(line): - '''Default retrlines callback to print a line.''' - print(line) - - -def ftpcp(source, sourcename, target, targetname = '', type = 'I'): - '''Copy file from one FTP-instance to another.''' - if not targetname: - targetname = sourcename - type = 'TYPE ' + type - source.voidcmd(type) - target.voidcmd(type) - sourcehost, sourceport = parse227(source.sendcmd('PASV')) - target.sendport(sourcehost, sourceport) - # RFC 959: the user must "listen" [...] BEFORE sending the - # transfer request. - # So: STOR before RETR, because here the target is a "user". - treply = target.sendcmd('STOR ' + targetname) - if treply[:3] not in {'125', '150'}: - raise error_proto # RFC 959 - sreply = source.sendcmd('RETR ' + sourcename) - if sreply[:3] not in {'125', '150'}: - raise error_proto # RFC 959 - source.voidresp() - target.voidresp() - - -def test(): - '''Test program. - Usage: ftplib [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ... - - Options: - -d increase debugging level - -r[file] set alternate ~/.netrc file - - Commands: - -l[dir] list directory - -d[dir] change the current directory - -p toggle passive and active mode - file retrieve the file and write it to stdout - ''' - - if len(sys.argv) < 2: - print(test.__doc__) - sys.exit(0) - - import netrc - - debugging = 0 - rcfile = None - while sys.argv[1] == '-d': - debugging = debugging+1 - del sys.argv[1] - if sys.argv[1][:2] == '-r': - # get name of alternate ~/.netrc file: - rcfile = sys.argv[1][2:] - del sys.argv[1] - host = sys.argv[1] - ftp = FTP(host) - ftp.set_debuglevel(debugging) - userid = passwd = acct = '' - try: - netrcobj = netrc.netrc(rcfile) - except OSError: - if rcfile is not None: - print("Could not open account file -- using anonymous login.", - file=sys.stderr) - else: - try: - userid, acct, passwd = netrcobj.authenticators(host) - except (KeyError, TypeError): - # no account for host - print("No account -- using anonymous login.", file=sys.stderr) - ftp.login(userid, passwd, acct) - for file in sys.argv[2:]: - if file[:2] == '-l': - ftp.dir(file[2:]) - elif file[:2] == '-d': - cmd = 'CWD' - if file[2:]: cmd = cmd + ' ' + file[2:] - resp = ftp.sendcmd(cmd) - elif file == '-p': - ftp.set_pasv(not ftp.passiveserver) - else: - ftp.retrbinary('RETR ' + file, \ - sys.stdout.buffer.write, 1024) - sys.stdout.buffer.flush() - sys.stdout.flush() - ftp.quit() - - -if __name__ == '__main__': - test() diff --git a/Python313_13_x64_Template/Lib/functools.py b/Python313_13_x64_Template/Lib/functools.py deleted file mode 100644 index 0dee17e5..00000000 --- a/Python313_13_x64_Template/Lib/functools.py +++ /dev/null @@ -1,1036 +0,0 @@ -"""functools.py - Tools for working with functions and callable objects -""" -# Python module wrapper for _functools C module -# to allow utilities written in Python to be added -# to the functools module. -# Written by Nick Coghlan , -# Raymond Hettinger , -# and Łukasz Langa . -# Copyright (C) 2006-2013 Python Software Foundation. -# See C source code for _functools credits/copyright - -__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', - 'total_ordering', 'cache', 'cmp_to_key', 'lru_cache', 'reduce', - 'partial', 'partialmethod', 'singledispatch', 'singledispatchmethod', - 'cached_property'] - -from abc import get_cache_token -from collections import namedtuple -# import types, weakref # Deferred to single_dispatch() -from reprlib import recursive_repr -from _thread import RLock - -# Avoid importing types, so we can speedup import time -GenericAlias = type(list[int]) - -################################################################################ -### update_wrapper() and wraps() decorator -################################################################################ - -# update_wrapper() and wraps() are tools to help write -# wrapper functions that can handle naive introspection - -WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__', - '__annotations__', '__type_params__') -WRAPPER_UPDATES = ('__dict__',) -def update_wrapper(wrapper, - wrapped, - assigned = WRAPPER_ASSIGNMENTS, - updated = WRAPPER_UPDATES): - """Update a wrapper function to look like the wrapped function - - wrapper is the function to be updated - wrapped is the original function - assigned is a tuple naming the attributes assigned directly - from the wrapped function to the wrapper function (defaults to - functools.WRAPPER_ASSIGNMENTS) - updated is a tuple naming the attributes of the wrapper that - are updated with the corresponding attribute from the wrapped - function (defaults to functools.WRAPPER_UPDATES) - """ - for attr in assigned: - try: - value = getattr(wrapped, attr) - except AttributeError: - pass - else: - setattr(wrapper, attr, value) - for attr in updated: - getattr(wrapper, attr).update(getattr(wrapped, attr, {})) - # Issue #17482: set __wrapped__ last so we don't inadvertently copy it - # from the wrapped function when updating __dict__ - wrapper.__wrapped__ = wrapped - # Return the wrapper so this can be used as a decorator via partial() - return wrapper - -def wraps(wrapped, - assigned = WRAPPER_ASSIGNMENTS, - updated = WRAPPER_UPDATES): - """Decorator factory to apply update_wrapper() to a wrapper function - - Returns a decorator that invokes update_wrapper() with the decorated - function as the wrapper argument and the arguments to wraps() as the - remaining arguments. Default arguments are as for update_wrapper(). - This is a convenience function to simplify applying partial() to - update_wrapper(). - """ - return partial(update_wrapper, wrapped=wrapped, - assigned=assigned, updated=updated) - - -################################################################################ -### total_ordering class decorator -################################################################################ - -# The total ordering functions all invoke the root magic method directly -# rather than using the corresponding operator. This avoids possible -# infinite recursion that could occur when the operator dispatch logic -# detects a NotImplemented result and then calls a reflected method. - -def _gt_from_lt(self, other): - 'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).' - op_result = type(self).__lt__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result and self != other - -def _le_from_lt(self, other): - 'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).' - op_result = type(self).__lt__(self, other) - if op_result is NotImplemented: - return op_result - return op_result or self == other - -def _ge_from_lt(self, other): - 'Return a >= b. Computed by @total_ordering from (not a < b).' - op_result = type(self).__lt__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result - -def _ge_from_le(self, other): - 'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).' - op_result = type(self).__le__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result or self == other - -def _lt_from_le(self, other): - 'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).' - op_result = type(self).__le__(self, other) - if op_result is NotImplemented: - return op_result - return op_result and self != other - -def _gt_from_le(self, other): - 'Return a > b. Computed by @total_ordering from (not a <= b).' - op_result = type(self).__le__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result - -def _lt_from_gt(self, other): - 'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).' - op_result = type(self).__gt__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result and self != other - -def _ge_from_gt(self, other): - 'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).' - op_result = type(self).__gt__(self, other) - if op_result is NotImplemented: - return op_result - return op_result or self == other - -def _le_from_gt(self, other): - 'Return a <= b. Computed by @total_ordering from (not a > b).' - op_result = type(self).__gt__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result - -def _le_from_ge(self, other): - 'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).' - op_result = type(self).__ge__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result or self == other - -def _gt_from_ge(self, other): - 'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).' - op_result = type(self).__ge__(self, other) - if op_result is NotImplemented: - return op_result - return op_result and self != other - -def _lt_from_ge(self, other): - 'Return a < b. Computed by @total_ordering from (not a >= b).' - op_result = type(self).__ge__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result - -_convert = { - '__lt__': [('__gt__', _gt_from_lt), - ('__le__', _le_from_lt), - ('__ge__', _ge_from_lt)], - '__le__': [('__ge__', _ge_from_le), - ('__lt__', _lt_from_le), - ('__gt__', _gt_from_le)], - '__gt__': [('__lt__', _lt_from_gt), - ('__ge__', _ge_from_gt), - ('__le__', _le_from_gt)], - '__ge__': [('__le__', _le_from_ge), - ('__gt__', _gt_from_ge), - ('__lt__', _lt_from_ge)] -} - -def total_ordering(cls): - """Class decorator that fills in missing ordering methods""" - # Find user-defined comparisons (not those inherited from object). - roots = {op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)} - if not roots: - raise ValueError('must define at least one ordering operation: < > <= >=') - root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ - for opname, opfunc in _convert[root]: - if opname not in roots: - opfunc.__name__ = opname - setattr(cls, opname, opfunc) - return cls - - -################################################################################ -### cmp_to_key() function converter -################################################################################ - -def cmp_to_key(mycmp): - """Convert a cmp= function into a key= function""" - class K(object): - __slots__ = ['obj'] - def __init__(self, obj): - self.obj = obj - def __lt__(self, other): - return mycmp(self.obj, other.obj) < 0 - def __gt__(self, other): - return mycmp(self.obj, other.obj) > 0 - def __eq__(self, other): - return mycmp(self.obj, other.obj) == 0 - def __le__(self, other): - return mycmp(self.obj, other.obj) <= 0 - def __ge__(self, other): - return mycmp(self.obj, other.obj) >= 0 - __hash__ = None - return K - -try: - from _functools import cmp_to_key -except ImportError: - pass - - -################################################################################ -### reduce() sequence to a single item -################################################################################ - -_initial_missing = object() - -def reduce(function, sequence, initial=_initial_missing): - """ - reduce(function, iterable[, initial], /) -> value - - Apply a function of two arguments cumulatively to the items of an iterable, from left to right. - - This effectively reduces the iterable to a single value. If initial is present, - it is placed before the items of the iterable in the calculation, and serves as - a default when the iterable is empty. - - For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) - calculates ((((1 + 2) + 3) + 4) + 5). - """ - - it = iter(sequence) - - if initial is _initial_missing: - try: - value = next(it) - except StopIteration: - raise TypeError( - "reduce() of empty iterable with no initial value") from None - else: - value = initial - - for element in it: - value = function(value, element) - - return value - -try: - from _functools import reduce -except ImportError: - pass - - -################################################################################ -### partial() argument application -################################################################################ - -# Purely functional, no descriptor behaviour -class partial: - """New function with partial application of the given arguments - and keywords. - """ - - __slots__ = "func", "args", "keywords", "__dict__", "__weakref__" - - def __new__(cls, func, /, *args, **keywords): - if not callable(func): - raise TypeError("the first argument must be callable") - - if isinstance(func, partial): - args = func.args + args - keywords = {**func.keywords, **keywords} - func = func.func - - self = super(partial, cls).__new__(cls) - - self.func = func - self.args = args - self.keywords = keywords - return self - - def __call__(self, /, *args, **keywords): - keywords = {**self.keywords, **keywords} - return self.func(*self.args, *args, **keywords) - - @recursive_repr() - def __repr__(self): - cls = type(self) - qualname = cls.__qualname__ - module = cls.__module__ - args = [repr(self.func)] - args.extend(repr(x) for x in self.args) - args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items()) - return f"{module}.{qualname}({', '.join(args)})" - - def __get__(self, obj, objtype=None): - if obj is None: - return self - import warnings - warnings.warn('functools.partial will be a method descriptor in ' - 'future Python versions; wrap it in staticmethod() ' - 'if you want to preserve the old behavior', - FutureWarning, 2) - return self - - def __reduce__(self): - return type(self), (self.func,), (self.func, self.args, - self.keywords or None, self.__dict__ or None) - - def __setstate__(self, state): - if not isinstance(state, tuple): - raise TypeError("argument to __setstate__ must be a tuple") - if len(state) != 4: - raise TypeError(f"expected 4 items in state, got {len(state)}") - func, args, kwds, namespace = state - if (not callable(func) or not isinstance(args, tuple) or - (kwds is not None and not isinstance(kwds, dict)) or - (namespace is not None and not isinstance(namespace, dict))): - raise TypeError("invalid partial state") - - args = tuple(args) # just in case it's a subclass - if kwds is None: - kwds = {} - elif type(kwds) is not dict: # XXX does it need to be *exactly* dict? - kwds = dict(kwds) - if namespace is None: - namespace = {} - - self.__dict__ = namespace - self.func = func - self.args = args - self.keywords = kwds - - __class_getitem__ = classmethod(GenericAlias) - - -try: - from _functools import partial -except ImportError: - pass - -# Descriptor version -class partialmethod(object): - """Method descriptor with partial application of the given arguments - and keywords. - - Supports wrapping existing descriptors and handles non-descriptor - callables as instance methods. - """ - - def __init__(self, func, /, *args, **keywords): - if not callable(func) and not hasattr(func, "__get__"): - raise TypeError("{!r} is not callable or a descriptor" - .format(func)) - - # func could be a descriptor like classmethod which isn't callable, - # so we can't inherit from partial (it verifies func is callable) - if isinstance(func, partialmethod): - # flattening is mandatory in order to place cls/self before all - # other arguments - # it's also more efficient since only one function will be called - self.func = func.func - self.args = func.args + args - self.keywords = {**func.keywords, **keywords} - else: - self.func = func - self.args = args - self.keywords = keywords - - def __repr__(self): - cls = type(self) - module = cls.__module__ - qualname = cls.__qualname__ - args = [repr(self.func)] - args.extend(map(repr, self.args)) - args.extend(f"{k}={v!r}" for k, v in self.keywords.items()) - return f"{module}.{qualname}({', '.join(args)})" - - def _make_unbound_method(self): - def _method(cls_or_self, /, *args, **keywords): - keywords = {**self.keywords, **keywords} - return self.func(cls_or_self, *self.args, *args, **keywords) - _method.__isabstractmethod__ = self.__isabstractmethod__ - _method.__partialmethod__ = self - return _method - - def __get__(self, obj, cls=None): - get = getattr(self.func, "__get__", None) - result = None - if get is not None and not isinstance(self.func, partial): - new_func = get(obj, cls) - if new_func is not self.func: - # Assume __get__ returning something new indicates the - # creation of an appropriate callable - result = partial(new_func, *self.args, **self.keywords) - try: - result.__self__ = new_func.__self__ - except AttributeError: - pass - if result is None: - # If the underlying descriptor didn't do anything, treat this - # like an instance method - result = self._make_unbound_method().__get__(obj, cls) - return result - - @property - def __isabstractmethod__(self): - return getattr(self.func, "__isabstractmethod__", False) - - __class_getitem__ = classmethod(GenericAlias) - - -# Helper functions - -def _unwrap_partial(func): - while isinstance(func, partial): - func = func.func - return func - -def _unwrap_partialmethod(func): - prev = None - while func is not prev: - prev = func - while isinstance(getattr(func, "__partialmethod__", None), partialmethod): - func = func.__partialmethod__ - while isinstance(func, partialmethod): - func = getattr(func, 'func') - func = _unwrap_partial(func) - return func - -################################################################################ -### LRU Cache function decorator -################################################################################ - -_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) - -class _HashedSeq(list): - """ This class guarantees that hash() will be called no more than once - per element. This is important because the lru_cache() will hash - the key multiple times on a cache miss. - - """ - - __slots__ = 'hashvalue' - - def __init__(self, tup, hash=hash): - self[:] = tup - self.hashvalue = hash(tup) - - def __hash__(self): - return self.hashvalue - -def _make_key(args, kwds, typed, - kwd_mark = (object(),), - fasttypes = {int, str}, - tuple=tuple, type=type, len=len): - """Make a cache key from optionally typed positional and keyword arguments - - The key is constructed in a way that is flat as possible rather than - as a nested structure that would take more memory. - - If there is only a single argument and its data type is known to cache - its hash value, then that argument is returned without a wrapper. This - saves space and improves lookup speed. - - """ - # All of code below relies on kwds preserving the order input by the user. - # Formerly, we sorted() the kwds before looping. The new way is *much* - # faster; however, it means that f(x=1, y=2) will now be treated as a - # distinct call from f(y=2, x=1) which will be cached separately. - key = args - if kwds: - key += kwd_mark - for item in kwds.items(): - key += item - if typed: - key += tuple(type(v) for v in args) - if kwds: - key += tuple(type(v) for v in kwds.values()) - elif len(key) == 1 and type(key[0]) in fasttypes: - return key[0] - return _HashedSeq(key) - -def lru_cache(maxsize=128, typed=False): - """Least-recently-used cache decorator. - - If *maxsize* is set to None, the LRU features are disabled and the cache - can grow without bound. - - If *typed* is True, arguments of different types will be cached separately. - For example, f(decimal.Decimal("3.0")) and f(3.0) will be treated as - distinct calls with distinct results. Some types such as str and int may - be cached separately even when typed is false. - - Arguments to the cached function must be hashable. - - View the cache statistics named tuple (hits, misses, maxsize, currsize) - with f.cache_info(). Clear the cache and statistics with f.cache_clear(). - Access the underlying function with f.__wrapped__. - - See: https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU) - - """ - - # Users should only access the lru_cache through its public API: - # cache_info, cache_clear, and f.__wrapped__ - # The internals of the lru_cache are encapsulated for thread safety and - # to allow the implementation to change (including a possible C version). - - if isinstance(maxsize, int): - # Negative maxsize is treated as 0 - if maxsize < 0: - maxsize = 0 - elif callable(maxsize) and isinstance(typed, bool): - # The user_function was passed in directly via the maxsize argument - user_function, maxsize = maxsize, 128 - wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) - wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed} - return update_wrapper(wrapper, user_function) - elif maxsize is not None: - raise TypeError( - 'Expected first argument to be an integer, a callable, or None') - - def decorating_function(user_function): - wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) - wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed} - return update_wrapper(wrapper, user_function) - - return decorating_function - -def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo): - # Constants shared by all lru cache instances: - sentinel = object() # unique object used to signal cache misses - make_key = _make_key # build a key from the function arguments - PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields - - cache = {} - hits = misses = 0 - full = False - cache_get = cache.get # bound method to lookup a key or return None - cache_len = cache.__len__ # get cache size without calling len() - lock = RLock() # because linkedlist updates aren't threadsafe - root = [] # root of the circular doubly linked list - root[:] = [root, root, None, None] # initialize by pointing to self - - if maxsize == 0: - - def wrapper(*args, **kwds): - # No caching -- just a statistics update - nonlocal misses - misses += 1 - result = user_function(*args, **kwds) - return result - - elif maxsize is None: - - def wrapper(*args, **kwds): - # Simple caching without ordering or size limit - nonlocal hits, misses - key = make_key(args, kwds, typed) - result = cache_get(key, sentinel) - if result is not sentinel: - hits += 1 - return result - misses += 1 - result = user_function(*args, **kwds) - cache[key] = result - return result - - else: - - def wrapper(*args, **kwds): - # Size limited caching that tracks accesses by recency - nonlocal root, hits, misses, full - key = make_key(args, kwds, typed) - with lock: - link = cache_get(key) - if link is not None: - # Move the link to the front of the circular queue - link_prev, link_next, _key, result = link - link_prev[NEXT] = link_next - link_next[PREV] = link_prev - last = root[PREV] - last[NEXT] = root[PREV] = link - link[PREV] = last - link[NEXT] = root - hits += 1 - return result - misses += 1 - result = user_function(*args, **kwds) - with lock: - if key in cache: - # Getting here means that this same key was added to the - # cache while the lock was released. Since the link - # update is already done, we need only return the - # computed result and update the count of misses. - pass - elif full: - # Use the old root to store the new key and result. - oldroot = root - oldroot[KEY] = key - oldroot[RESULT] = result - # Empty the oldest link and make it the new root. - # Keep a reference to the old key and old result to - # prevent their ref counts from going to zero during the - # update. That will prevent potentially arbitrary object - # clean-up code (i.e. __del__) from running while we're - # still adjusting the links. - root = oldroot[NEXT] - oldkey = root[KEY] - oldresult = root[RESULT] - root[KEY] = root[RESULT] = None - # Now update the cache dictionary. - del cache[oldkey] - # Save the potentially reentrant cache[key] assignment - # for last, after the root and links have been put in - # a consistent state. - cache[key] = oldroot - else: - # Put result in a new link at the front of the queue. - last = root[PREV] - link = [last, root, key, result] - last[NEXT] = root[PREV] = cache[key] = link - # Use the cache_len bound method instead of the len() function - # which could potentially be wrapped in an lru_cache itself. - full = (cache_len() >= maxsize) - return result - - def cache_info(): - """Report cache statistics""" - with lock: - return _CacheInfo(hits, misses, maxsize, cache_len()) - - def cache_clear(): - """Clear the cache and cache statistics""" - nonlocal hits, misses, full - with lock: - cache.clear() - root[:] = [root, root, None, None] - hits = misses = 0 - full = False - - wrapper.cache_info = cache_info - wrapper.cache_clear = cache_clear - return wrapper - -try: - from _functools import _lru_cache_wrapper -except ImportError: - pass - - -################################################################################ -### cache -- simplified access to the infinity cache -################################################################################ - -def cache(user_function, /): - 'Simple lightweight unbounded cache. Sometimes called "memoize".' - return lru_cache(maxsize=None)(user_function) - - -################################################################################ -### singledispatch() - single-dispatch generic function decorator -################################################################################ - -def _c3_merge(sequences): - """Merges MROs in *sequences* to a single MRO using the C3 algorithm. - - Adapted from https://docs.python.org/3/howto/mro.html. - - """ - result = [] - while True: - sequences = [s for s in sequences if s] # purge empty sequences - if not sequences: - return result - for s1 in sequences: # find merge candidates among seq heads - candidate = s1[0] - for s2 in sequences: - if candidate in s2[1:]: - candidate = None - break # reject the current head, it appears later - else: - break - if candidate is None: - raise RuntimeError("Inconsistent hierarchy") - result.append(candidate) - # remove the chosen candidate - for seq in sequences: - if seq[0] == candidate: - del seq[0] - -def _c3_mro(cls, abcs=None): - """Computes the method resolution order using extended C3 linearization. - - If no *abcs* are given, the algorithm works exactly like the built-in C3 - linearization used for method resolution. - - If given, *abcs* is a list of abstract base classes that should be inserted - into the resulting MRO. Unrelated ABCs are ignored and don't end up in the - result. The algorithm inserts ABCs where their functionality is introduced, - i.e. issubclass(cls, abc) returns True for the class itself but returns - False for all its direct base classes. Implicit ABCs for a given class - (either registered or inferred from the presence of a special method like - __len__) are inserted directly after the last ABC explicitly listed in the - MRO of said class. If two implicit ABCs end up next to each other in the - resulting MRO, their ordering depends on the order of types in *abcs*. - - """ - for i, base in enumerate(reversed(cls.__bases__)): - if hasattr(base, '__abstractmethods__'): - boundary = len(cls.__bases__) - i - break # Bases up to the last explicit ABC are considered first. - else: - boundary = 0 - abcs = list(abcs) if abcs else [] - explicit_bases = list(cls.__bases__[:boundary]) - abstract_bases = [] - other_bases = list(cls.__bases__[boundary:]) - for base in abcs: - if issubclass(cls, base) and not any( - issubclass(b, base) for b in cls.__bases__ - ): - # If *cls* is the class that introduces behaviour described by - # an ABC *base*, insert said ABC to its MRO. - abstract_bases.append(base) - for base in abstract_bases: - abcs.remove(base) - explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] - abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] - other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] - return _c3_merge( - [[cls]] + - explicit_c3_mros + abstract_c3_mros + other_c3_mros + - [explicit_bases] + [abstract_bases] + [other_bases] - ) - -def _compose_mro(cls, types): - """Calculates the method resolution order for a given class *cls*. - - Includes relevant abstract base classes (with their respective bases) from - the *types* iterable. Uses a modified C3 linearization algorithm. - - """ - bases = set(cls.__mro__) - # Remove entries which are already present in the __mro__ or unrelated. - def is_related(typ): - return (typ not in bases and hasattr(typ, '__mro__') - and not isinstance(typ, GenericAlias) - and issubclass(cls, typ)) - types = [n for n in types if is_related(n)] - # Remove entries which are strict bases of other entries (they will end up - # in the MRO anyway. - def is_strict_base(typ): - for other in types: - if typ != other and typ in other.__mro__: - return True - return False - types = [n for n in types if not is_strict_base(n)] - # Subclasses of the ABCs in *types* which are also implemented by - # *cls* can be used to stabilize ABC ordering. - type_set = set(types) - mro = [] - for typ in types: - found = [] - for sub in typ.__subclasses__(): - if sub not in bases and issubclass(cls, sub): - found.append([s for s in sub.__mro__ if s in type_set]) - if not found: - mro.append(typ) - continue - # Favor subclasses with the biggest number of useful bases - found.sort(key=len, reverse=True) - for sub in found: - for subcls in sub: - if subcls not in mro: - mro.append(subcls) - return _c3_mro(cls, abcs=mro) - -def _find_impl(cls, registry): - """Returns the best matching implementation from *registry* for type *cls*. - - Where there is no registered implementation for a specific type, its method - resolution order is used to find a more generic implementation. - - Note: if *registry* does not contain an implementation for the base - *object* type, this function may return None. - - """ - mro = _compose_mro(cls, registry.keys()) - match = None - for t in mro: - if match is not None: - # If *match* is an implicit ABC but there is another unrelated, - # equally matching implicit ABC, refuse the temptation to guess. - if (t in registry and t not in cls.__mro__ - and match not in cls.__mro__ - and not issubclass(match, t)): - raise RuntimeError("Ambiguous dispatch: {} or {}".format( - match, t)) - break - if t in registry: - match = t - return registry.get(match) - -def singledispatch(func): - """Single-dispatch generic function decorator. - - Transforms a function into a generic function, which can have different - behaviours depending upon the type of its first argument. The decorated - function acts as the default implementation, and additional - implementations can be registered using the register() attribute of the - generic function. - """ - # There are many programs that use functools without singledispatch, so we - # trade-off making singledispatch marginally slower for the benefit of - # making start-up of such applications slightly faster. - import types, weakref - - registry = {} - dispatch_cache = weakref.WeakKeyDictionary() - cache_token = None - - def dispatch(cls): - """generic_func.dispatch(cls) -> - - Runs the dispatch algorithm to return the best available implementation - for the given *cls* registered on *generic_func*. - - """ - nonlocal cache_token - if cache_token is not None: - current_token = get_cache_token() - if cache_token != current_token: - dispatch_cache.clear() - cache_token = current_token - try: - impl = dispatch_cache[cls] - except KeyError: - try: - impl = registry[cls] - except KeyError: - impl = _find_impl(cls, registry) - dispatch_cache[cls] = impl - return impl - - def _is_union_type(cls): - from typing import get_origin, Union - return get_origin(cls) in {Union, types.UnionType} - - def _is_valid_dispatch_type(cls): - if isinstance(cls, type): - return True - from typing import get_args - return (_is_union_type(cls) and - all(isinstance(arg, type) for arg in get_args(cls))) - - def register(cls, func=None): - """generic_func.register(cls, func) -> func - - Registers a new implementation for the given *cls* on a *generic_func*. - - """ - nonlocal cache_token - if _is_valid_dispatch_type(cls): - if func is None: - return lambda f: register(cls, f) - else: - if func is not None: - raise TypeError( - f"Invalid first argument to `register()`. " - f"{cls!r} is not a class or union type." - ) - ann = getattr(cls, '__annotations__', {}) - if not ann: - raise TypeError( - f"Invalid first argument to `register()`: {cls!r}. " - f"Use either `@register(some_class)` or plain `@register` " - f"on an annotated function." - ) - func = cls - - # only import typing if annotation parsing is necessary - from typing import get_type_hints - argname, cls = next(iter(get_type_hints(func).items())) - if not _is_valid_dispatch_type(cls): - if _is_union_type(cls): - raise TypeError( - f"Invalid annotation for {argname!r}. " - f"{cls!r} not all arguments are classes." - ) - else: - raise TypeError( - f"Invalid annotation for {argname!r}. " - f"{cls!r} is not a class." - ) - - if _is_union_type(cls): - from typing import get_args - - for arg in get_args(cls): - registry[arg] = func - else: - registry[cls] = func - if cache_token is None and hasattr(cls, '__abstractmethods__'): - cache_token = get_cache_token() - dispatch_cache.clear() - return func - - def wrapper(*args, **kw): - if not args: - raise TypeError(f'{funcname} requires at least ' - '1 positional argument') - return dispatch(args[0].__class__)(*args, **kw) - - funcname = getattr(func, '__name__', 'singledispatch function') - registry[object] = func - wrapper.register = register - wrapper.dispatch = dispatch - wrapper.registry = types.MappingProxyType(registry) - wrapper._clear_cache = dispatch_cache.clear - update_wrapper(wrapper, func) - return wrapper - - -# Descriptor version -class singledispatchmethod: - """Single-dispatch generic method descriptor. - - Supports wrapping existing descriptors. - """ - - def __init__(self, func): - if not callable(func) and not hasattr(func, "__get__"): - raise TypeError(f"{func!r} is not callable or a descriptor") - - self.dispatcher = singledispatch(func) - self.func = func - - def register(self, cls, method=None): - """generic_method.register(cls, func) -> func - - Registers a new implementation for the given *cls* on a *generic_method*. - """ - return self.dispatcher.register(cls, func=method) - - def __get__(self, obj, cls=None): - dispatch = self.dispatcher.dispatch - funcname = getattr(self.func, '__name__', 'singledispatchmethod method') - def _method(*args, **kwargs): - if not args: - raise TypeError(f'{funcname} requires at least ' - '1 positional argument') - return dispatch(args[0].__class__).__get__(obj, cls)(*args, **kwargs) - - _method.__isabstractmethod__ = self.__isabstractmethod__ - _method.register = self.register - update_wrapper(_method, self.func) - - return _method - - @property - def __isabstractmethod__(self): - return getattr(self.func, '__isabstractmethod__', False) - - -################################################################################ -### cached_property() - property result cached as instance attribute -################################################################################ - -_NOT_FOUND = object() - -class cached_property: - def __init__(self, func): - self.func = func - self.attrname = None - self.__doc__ = func.__doc__ - self.__module__ = func.__module__ - - def __set_name__(self, owner, name): - if self.attrname is None: - self.attrname = name - elif name != self.attrname: - raise TypeError( - "Cannot assign the same cached_property to two different names " - f"({self.attrname!r} and {name!r})." - ) - - def __get__(self, instance, owner=None): - if instance is None: - return self - if self.attrname is None: - raise TypeError( - "Cannot use cached_property instance without calling __set_name__ on it.") - try: - cache = instance.__dict__ - except AttributeError: # not all objects have __dict__ (e.g. class defines slots) - msg = ( - f"No '__dict__' attribute on {type(instance).__name__!r} " - f"instance to cache {self.attrname!r} property." - ) - raise TypeError(msg) from None - val = cache.get(self.attrname, _NOT_FOUND) - if val is _NOT_FOUND: - val = self.func(instance) - try: - cache[self.attrname] = val - except TypeError: - msg = ( - f"The '__dict__' attribute on {type(instance).__name__!r} instance " - f"does not support item assignment for caching {self.attrname!r} property." - ) - raise TypeError(msg) from None - return val - - __class_getitem__ = classmethod(GenericAlias) diff --git a/Python313_13_x64_Template/Lib/getopt.py b/Python313_13_x64_Template/Lib/getopt.py deleted file mode 100644 index 5419d77f..00000000 --- a/Python313_13_x64_Template/Lib/getopt.py +++ /dev/null @@ -1,215 +0,0 @@ -"""Parser for command line options. - -This module helps scripts to parse the command line arguments in -sys.argv. It supports the same conventions as the Unix getopt() -function (including the special meanings of arguments of the form `-' -and `--'). Long options similar to those supported by GNU software -may be used as well via an optional third argument. This module -provides two functions and an exception: - -getopt() -- Parse command line options -gnu_getopt() -- Like getopt(), but allow option and non-option arguments -to be intermixed. -GetoptError -- exception (class) raised with 'opt' attribute, which is the -option involved with the exception. -""" - -# Long option support added by Lars Wirzenius . -# -# Gerrit Holl moved the string-based exceptions -# to class-based exceptions. -# -# Peter Åstrand added gnu_getopt(). -# -# TODO for gnu_getopt(): -# -# - GNU getopt_long_only mechanism -# - allow the caller to specify ordering -# - RETURN_IN_ORDER option -# - GNU extension with '-' as first character of option string -# - optional arguments, specified by double colons -# - an option string with a W followed by semicolon should -# treat "-W foo" as "--foo" - -__all__ = ["GetoptError","error","getopt","gnu_getopt"] - -import os -try: - from gettext import gettext as _ -except ImportError: - # Bootstrapping Python: gettext's dependencies not built yet - def _(s): return s - -class GetoptError(Exception): - opt = '' - msg = '' - def __init__(self, msg, opt=''): - self.msg = msg - self.opt = opt - Exception.__init__(self, msg, opt) - - def __str__(self): - return self.msg - -error = GetoptError # backward compatibility - -def getopt(args, shortopts, longopts = []): - """getopt(args, options[, long_options]) -> opts, args - - Parses command line options and parameter list. args is the - argument list to be parsed, without the leading reference to the - running program. Typically, this means "sys.argv[1:]". shortopts - is the string of option letters that the script wants to - recognize, with options that require an argument followed by a - colon (i.e., the same format that Unix getopt() uses). If - specified, longopts is a list of strings with the names of the - long options which should be supported. The leading '--' - characters should not be included in the option name. Options - which require an argument should be followed by an equal sign - ('='). - - The return value consists of two elements: the first is a list of - (option, value) pairs; the second is the list of program arguments - left after the option list was stripped (this is a trailing slice - of the first argument). Each option-and-value pair returned has - the option as its first element, prefixed with a hyphen (e.g., - '-x'), and the option argument as its second element, or an empty - string if the option has no argument. The options occur in the - list in the same order in which they were found, thus allowing - multiple occurrences. Long and short options may be mixed. - - """ - - opts = [] - if isinstance(longopts, str): - longopts = [longopts] - else: - longopts = list(longopts) - while args and args[0].startswith('-') and args[0] != '-': - if args[0] == '--': - args = args[1:] - break - if args[0].startswith('--'): - opts, args = do_longs(opts, args[0][2:], longopts, args[1:]) - else: - opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:]) - - return opts, args - -def gnu_getopt(args, shortopts, longopts = []): - """getopt(args, options[, long_options]) -> opts, args - - This function works like getopt(), except that GNU style scanning - mode is used by default. This means that option and non-option - arguments may be intermixed. The getopt() function stops - processing options as soon as a non-option argument is - encountered. - - If the first character of the option string is `+', or if the - environment variable POSIXLY_CORRECT is set, then option - processing stops as soon as a non-option argument is encountered. - - """ - - opts = [] - prog_args = [] - if isinstance(longopts, str): - longopts = [longopts] - else: - longopts = list(longopts) - - # Allow options after non-option arguments? - if shortopts.startswith('+'): - shortopts = shortopts[1:] - all_options_first = True - elif os.environ.get("POSIXLY_CORRECT"): - all_options_first = True - else: - all_options_first = False - - while args: - if args[0] == '--': - prog_args += args[1:] - break - - if args[0][:2] == '--': - opts, args = do_longs(opts, args[0][2:], longopts, args[1:]) - elif args[0][:1] == '-' and args[0] != '-': - opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:]) - else: - if all_options_first: - prog_args += args - break - else: - prog_args.append(args[0]) - args = args[1:] - - return opts, prog_args - -def do_longs(opts, opt, longopts, args): - try: - i = opt.index('=') - except ValueError: - optarg = None - else: - opt, optarg = opt[:i], opt[i+1:] - - has_arg, opt = long_has_args(opt, longopts) - if has_arg: - if optarg is None: - if not args: - raise GetoptError(_('option --%s requires argument') % opt, opt) - optarg, args = args[0], args[1:] - elif optarg is not None: - raise GetoptError(_('option --%s must not have an argument') % opt, opt) - opts.append(('--' + opt, optarg or '')) - return opts, args - -# Return: -# has_arg? -# full option name -def long_has_args(opt, longopts): - possibilities = [o for o in longopts if o.startswith(opt)] - if not possibilities: - raise GetoptError(_('option --%s not recognized') % opt, opt) - # Is there an exact match? - if opt in possibilities: - return False, opt - elif opt + '=' in possibilities: - return True, opt - # No exact match, so better be unique. - if len(possibilities) > 1: - # XXX since possibilities contains all valid continuations, might be - # nice to work them into the error msg - raise GetoptError(_('option --%s not a unique prefix') % opt, opt) - assert len(possibilities) == 1 - unique_match = possibilities[0] - has_arg = unique_match.endswith('=') - if has_arg: - unique_match = unique_match[:-1] - return has_arg, unique_match - -def do_shorts(opts, optstring, shortopts, args): - while optstring != '': - opt, optstring = optstring[0], optstring[1:] - if short_has_arg(opt, shortopts): - if optstring == '': - if not args: - raise GetoptError(_('option -%s requires argument') % opt, - opt) - optstring, args = args[0], args[1:] - optarg, optstring = optstring, '' - else: - optarg = '' - opts.append(('-' + opt, optarg)) - return opts, args - -def short_has_arg(opt, shortopts): - for i in range(len(shortopts)): - if opt == shortopts[i] != ':': - return shortopts.startswith(':', i+1) - raise GetoptError(_('option -%s not recognized') % opt, opt) - -if __name__ == '__main__': - import sys - print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"])) diff --git a/Python313_13_x64_Template/Lib/getpass.py b/Python313_13_x64_Template/Lib/getpass.py deleted file mode 100644 index bd0097ce..00000000 --- a/Python313_13_x64_Template/Lib/getpass.py +++ /dev/null @@ -1,192 +0,0 @@ -"""Utilities to get a password and/or the current user name. - -getpass(prompt[, stream]) - Prompt for a password, with echo turned off. -getuser() - Get the user name from the environment or password database. - -GetPassWarning - This UserWarning is issued when getpass() cannot prevent - echoing of the password contents while reading. - -On Windows, the msvcrt module will be used. - -""" - -# Authors: Piers Lauder (original) -# Guido van Rossum (Windows support and cleanup) -# Gregory P. Smith (tty support & GetPassWarning) - -import contextlib -import io -import os -import sys - -__all__ = ["getpass","getuser","GetPassWarning"] - - -class GetPassWarning(UserWarning): pass - - -def unix_getpass(prompt='Password: ', stream=None): - """Prompt for a password, with echo turned off. - - Args: - prompt: Written on stream to ask for the input. Default: 'Password: ' - stream: A writable file object to display the prompt. Defaults to - the tty. If no tty is available defaults to sys.stderr. - Returns: - The seKr3t input. - Raises: - EOFError: If our input tty or stdin was closed. - GetPassWarning: When we were unable to turn echo off on the input. - - Always restores terminal settings before returning. - """ - passwd = None - with contextlib.ExitStack() as stack: - try: - # Always try reading and writing directly on the tty first. - fd = os.open('/dev/tty', os.O_RDWR|os.O_NOCTTY) - tty = io.FileIO(fd, 'w+') - stack.enter_context(tty) - input = io.TextIOWrapper(tty) - stack.enter_context(input) - if not stream: - stream = input - except OSError: - # If that fails, see if stdin can be controlled. - stack.close() - try: - fd = sys.stdin.fileno() - except (AttributeError, ValueError): - fd = None - passwd = fallback_getpass(prompt, stream) - input = sys.stdin - if not stream: - stream = sys.stderr - - if fd is not None: - try: - old = termios.tcgetattr(fd) # a copy to save - new = old[:] - new[3] &= ~termios.ECHO # 3 == 'lflags' - tcsetattr_flags = termios.TCSAFLUSH - if hasattr(termios, 'TCSASOFT'): - tcsetattr_flags |= termios.TCSASOFT - try: - termios.tcsetattr(fd, tcsetattr_flags, new) - passwd = _raw_input(prompt, stream, input=input) - finally: - termios.tcsetattr(fd, tcsetattr_flags, old) - stream.flush() # issue7208 - except termios.error: - if passwd is not None: - # _raw_input succeeded. The final tcsetattr failed. Reraise - # instead of leaving the terminal in an unknown state. - raise - # We can't control the tty or stdin. Give up and use normal IO. - # fallback_getpass() raises an appropriate warning. - if stream is not input: - # clean up unused file objects before blocking - stack.close() - passwd = fallback_getpass(prompt, stream) - - stream.write('\n') - return passwd - - -def win_getpass(prompt='Password: ', stream=None): - """Prompt for password with echo off, using Windows getwch().""" - if sys.stdin is not sys.__stdin__: - return fallback_getpass(prompt, stream) - - for c in prompt: - msvcrt.putwch(c) - pw = "" - while 1: - c = msvcrt.getwch() - if c == '\r' or c == '\n': - break - if c == '\003': - raise KeyboardInterrupt - if c == '\b': - pw = pw[:-1] - else: - pw = pw + c - msvcrt.putwch('\r') - msvcrt.putwch('\n') - return pw - - -def fallback_getpass(prompt='Password: ', stream=None): - import warnings - warnings.warn("Can not control echo on the terminal.", GetPassWarning, - stacklevel=2) - if not stream: - stream = sys.stderr - print("Warning: Password input may be echoed.", file=stream) - return _raw_input(prompt, stream) - - -def _raw_input(prompt="", stream=None, input=None): - # This doesn't save the string in the GNU readline history. - if not stream: - stream = sys.stderr - if not input: - input = sys.stdin - prompt = str(prompt) - if prompt: - try: - stream.write(prompt) - except UnicodeEncodeError: - # Use replace error handler to get as much as possible printed. - prompt = prompt.encode(stream.encoding, 'replace') - prompt = prompt.decode(stream.encoding) - stream.write(prompt) - stream.flush() - # NOTE: The Python C API calls flockfile() (and unlock) during readline. - line = input.readline() - if not line: - raise EOFError - if line[-1] == '\n': - line = line[:-1] - return line - - -def getuser(): - """Get the username from the environment or password database. - - First try various environment variables, then the password - database. This works on Windows as long as USERNAME is set. - Any failure to find a username raises OSError. - - .. versionchanged:: 3.13 - Previously, various exceptions beyond just :exc:`OSError` - were raised. - """ - - for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'): - user = os.environ.get(name) - if user: - return user - - try: - import pwd - return pwd.getpwuid(os.getuid())[0] - except (ImportError, KeyError) as e: - raise OSError('No username set in the environment') from e - - -# Bind the name getpass to the appropriate function -try: - import termios - # it's possible there is an incompatible termios from the - # McMillan Installer, make sure we have a UNIX-compatible termios - termios.tcgetattr, termios.tcsetattr -except (ImportError, AttributeError): - try: - import msvcrt - except ImportError: - getpass = fallback_getpass - else: - getpass = win_getpass -else: - getpass = unix_getpass diff --git a/Python313_13_x64_Template/Lib/gettext.py b/Python313_13_x64_Template/Lib/gettext.py deleted file mode 100644 index 62cff81b..00000000 --- a/Python313_13_x64_Template/Lib/gettext.py +++ /dev/null @@ -1,657 +0,0 @@ -"""Internationalization and localization support. - -This module provides internationalization (I18N) and localization (L10N) -support for your Python programs by providing an interface to the GNU gettext -message catalog library. - -I18N refers to the operation by which a program is made aware of multiple -languages. L10N refers to the adaptation of your program, once -internationalized, to the local language and cultural habits. - -""" - -# This module represents the integration of work, contributions, feedback, and -# suggestions from the following people: -# -# Martin von Loewis, who wrote the initial implementation of the underlying -# C-based libintlmodule (later renamed _gettext), along with a skeletal -# gettext.py implementation. -# -# Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule, -# which also included a pure-Python implementation to read .mo files if -# intlmodule wasn't available. -# -# James Henstridge, who also wrote a gettext.py module, which has some -# interesting, but currently unsupported experimental features: the notion of -# a Catalog class and instances, and the ability to add to a catalog file via -# a Python API. -# -# Barry Warsaw integrated these modules, wrote the .install() API and code, -# and conformed all C and Python code to Python's coding standards. -# -# Francois Pinard and Marc-Andre Lemburg also contributed valuably to this -# module. -# -# J. David Ibanez implemented plural forms. Bruno Haible fixed some bugs. -# -# TODO: -# - Lazy loading of .mo files. Currently the entire catalog is loaded into -# memory, but that's probably bad for large translated programs. Instead, -# the lexical sort of original strings in GNU .mo files should be exploited -# to do binary searches and lazy initializations. Or you might want to use -# the undocumented double-hash algorithm for .mo files with hash tables, but -# you'll need to study the GNU gettext code to do this. -# -# - Support Solaris .mo file formats. Unfortunately, we've been unable to -# find this format documented anywhere. - - -import operator -import os -import re -import sys - - -__all__ = ['NullTranslations', 'GNUTranslations', 'Catalog', - 'bindtextdomain', 'find', 'translation', 'install', - 'textdomain', 'dgettext', 'dngettext', 'gettext', - 'ngettext', 'pgettext', 'dpgettext', 'npgettext', - 'dnpgettext' - ] - -_default_localedir = os.path.join(sys.base_prefix, 'share', 'locale') - -# Expression parsing for plural form selection. -# -# The gettext library supports a small subset of C syntax. The only -# incompatible difference is that integer literals starting with zero are -# decimal. -# -# https://www.gnu.org/software/gettext/manual/gettext.html#Plural-forms -# http://git.savannah.gnu.org/cgit/gettext.git/tree/gettext-runtime/intl/plural.y - -_token_pattern = re.compile(r""" - (?P[ \t]+) | # spaces and horizontal tabs - (?P[0-9]+\b) | # decimal integer - (?Pn\b) | # only n is allowed - (?P[()]) | - (?P[-*/%+?:]|[>, - # <=, >=, ==, !=, &&, ||, - # ? : - # unary and bitwise ops - # not allowed - (?P\w+|.) # invalid token - """, re.VERBOSE|re.DOTALL) - - -def _tokenize(plural): - for mo in re.finditer(_token_pattern, plural): - kind = mo.lastgroup - if kind == 'WHITESPACES': - continue - value = mo.group(kind) - if kind == 'INVALID': - raise ValueError('invalid token in plural form: %s' % value) - yield value - yield '' - - -def _error(value): - if value: - return ValueError('unexpected token in plural form: %s' % value) - else: - return ValueError('unexpected end of plural form') - - -_binary_ops = ( - ('||',), - ('&&',), - ('==', '!='), - ('<', '>', '<=', '>='), - ('+', '-'), - ('*', '/', '%'), -) -_binary_ops = {op: i for i, ops in enumerate(_binary_ops, 1) for op in ops} -_c2py_ops = {'||': 'or', '&&': 'and', '/': '//'} - - -def _parse(tokens, priority=-1): - result = '' - nexttok = next(tokens) - while nexttok == '!': - result += 'not ' - nexttok = next(tokens) - - if nexttok == '(': - sub, nexttok = _parse(tokens) - result = '%s(%s)' % (result, sub) - if nexttok != ')': - raise ValueError('unbalanced parenthesis in plural form') - elif nexttok == 'n': - result = '%s%s' % (result, nexttok) - else: - try: - value = int(nexttok, 10) - except ValueError: - raise _error(nexttok) from None - result = '%s%d' % (result, value) - nexttok = next(tokens) - - j = 100 - while nexttok in _binary_ops: - i = _binary_ops[nexttok] - if i < priority: - break - # Break chained comparisons - if i in (3, 4) and j in (3, 4): # '==', '!=', '<', '>', '<=', '>=' - result = '(%s)' % result - # Replace some C operators by their Python equivalents - op = _c2py_ops.get(nexttok, nexttok) - right, nexttok = _parse(tokens, i + 1) - result = '%s %s %s' % (result, op, right) - j = i - if j == priority == 4: # '<', '>', '<=', '>=' - result = '(%s)' % result - - if nexttok == '?' and priority <= 0: - if_true, nexttok = _parse(tokens, 0) - if nexttok != ':': - raise _error(nexttok) - if_false, nexttok = _parse(tokens) - result = '%s if %s else %s' % (if_true, result, if_false) - if priority == 0: - result = '(%s)' % result - - return result, nexttok - - -def _as_int(n): - try: - round(n) - except TypeError: - raise TypeError('Plural value must be an integer, got %s' % - (n.__class__.__name__,)) from None - return _as_int2(n) - -def _as_int2(n): - try: - return operator.index(n) - except TypeError: - pass - - import warnings - frame = sys._getframe(1) - stacklevel = 2 - while frame.f_back is not None and frame.f_globals.get('__name__') == __name__: - stacklevel += 1 - frame = frame.f_back - warnings.warn('Plural value must be an integer, got %s' % - (n.__class__.__name__,), - DeprecationWarning, - stacklevel) - return n - - -def c2py(plural): - """Gets a C expression as used in PO files for plural forms and returns a - Python function that implements an equivalent expression. - """ - - if len(plural) > 1000: - raise ValueError('plural form expression is too long') - try: - result, nexttok = _parse(_tokenize(plural)) - if nexttok: - raise _error(nexttok) - - depth = 0 - for c in result: - if c == '(': - depth += 1 - if depth > 20: - # Python compiler limit is about 90. - # The most complex example has 2. - raise ValueError('plural form expression is too complex') - elif c == ')': - depth -= 1 - - ns = {'_as_int': _as_int, '__name__': __name__} - exec('''if True: - def func(n): - if not isinstance(n, int): - n = _as_int(n) - return int(%s) - ''' % result, ns) - return ns['func'] - except RecursionError: - # Recursion error can be raised in _parse() or exec(). - raise ValueError('plural form expression is too complex') - - -def _expand_lang(loc): - import locale - loc = locale.normalize(loc) - COMPONENT_CODESET = 1 << 0 - COMPONENT_TERRITORY = 1 << 1 - COMPONENT_MODIFIER = 1 << 2 - # split up the locale into its base components - mask = 0 - pos = loc.find('@') - if pos >= 0: - modifier = loc[pos:] - loc = loc[:pos] - mask |= COMPONENT_MODIFIER - else: - modifier = '' - pos = loc.find('.') - if pos >= 0: - codeset = loc[pos:] - loc = loc[:pos] - mask |= COMPONENT_CODESET - else: - codeset = '' - pos = loc.find('_') - if pos >= 0: - territory = loc[pos:] - loc = loc[:pos] - mask |= COMPONENT_TERRITORY - else: - territory = '' - language = loc - ret = [] - for i in range(mask+1): - if not (i & ~mask): # if all components for this combo exist ... - val = language - if i & COMPONENT_TERRITORY: val += territory - if i & COMPONENT_CODESET: val += codeset - if i & COMPONENT_MODIFIER: val += modifier - ret.append(val) - ret.reverse() - return ret - - -class NullTranslations: - def __init__(self, fp=None): - self._info = {} - self._charset = None - self._fallback = None - if fp is not None: - self._parse(fp) - - def _parse(self, fp): - pass - - def add_fallback(self, fallback): - if self._fallback: - self._fallback.add_fallback(fallback) - else: - self._fallback = fallback - - def gettext(self, message): - if self._fallback: - return self._fallback.gettext(message) - return message - - def ngettext(self, msgid1, msgid2, n): - if self._fallback: - return self._fallback.ngettext(msgid1, msgid2, n) - n = _as_int2(n) - if n == 1: - return msgid1 - else: - return msgid2 - - def pgettext(self, context, message): - if self._fallback: - return self._fallback.pgettext(context, message) - return message - - def npgettext(self, context, msgid1, msgid2, n): - if self._fallback: - return self._fallback.npgettext(context, msgid1, msgid2, n) - n = _as_int2(n) - if n == 1: - return msgid1 - else: - return msgid2 - - def info(self): - return self._info - - def charset(self): - return self._charset - - def install(self, names=None): - import builtins - builtins.__dict__['_'] = self.gettext - if names is not None: - allowed = {'gettext', 'ngettext', 'npgettext', 'pgettext'} - for name in allowed & set(names): - builtins.__dict__[name] = getattr(self, name) - - -class GNUTranslations(NullTranslations): - # Magic number of .mo files - LE_MAGIC = 0x950412de - BE_MAGIC = 0xde120495 - - # The encoding of a msgctxt and a msgid in a .mo file is - # msgctxt + "\x04" + msgid (gettext version >= 0.15) - CONTEXT = "%s\x04%s" - - # Acceptable .mo versions - VERSIONS = (0, 1) - - def _get_versions(self, version): - """Returns a tuple of major version, minor version""" - return (version >> 16, version & 0xffff) - - def _parse(self, fp): - """Override this method to support alternative .mo formats.""" - # Delay struct import for speeding up gettext import when .mo files - # are not used. - from struct import unpack - filename = getattr(fp, 'name', '') - # Parse the .mo file header, which consists of 5 little endian 32 - # bit words. - self._catalog = catalog = {} - self.plural = lambda n: int(n != 1) # germanic plural by default - buf = fp.read() - buflen = len(buf) - # Are we big endian or little endian? - magic = unpack('4I', buf[4:20]) - ii = '>II' - else: - raise OSError(0, 'Bad magic number', filename) - - major_version, minor_version = self._get_versions(version) - - if major_version not in self.VERSIONS: - raise OSError(0, 'Bad version number ' + str(major_version), filename) - - # Now put all messages from the .mo file buffer into the catalog - # dictionary. - for i in range(0, msgcount): - mlen, moff = unpack(ii, buf[masteridx:masteridx+8]) - mend = moff + mlen - tlen, toff = unpack(ii, buf[transidx:transidx+8]) - tend = toff + tlen - if mend < buflen and tend < buflen: - msg = buf[moff:mend] - tmsg = buf[toff:tend] - else: - raise OSError(0, 'File is corrupt', filename) - # See if we're looking at GNU .mo conventions for metadata - if mlen == 0: - # Catalog description - lastk = None - for b_item in tmsg.split(b'\n'): - item = b_item.decode().strip() - if not item: - continue - # Skip over comment lines: - if item.startswith('#-#-#-#-#') and item.endswith('#-#-#-#-#'): - continue - k = v = None - if ':' in item: - k, v = item.split(':', 1) - k = k.strip().lower() - v = v.strip() - self._info[k] = v - lastk = k - elif lastk: - self._info[lastk] += '\n' + item - if k == 'content-type': - self._charset = v.split('charset=')[1] - elif k == 'plural-forms': - v = v.split(';') - plural = v[1].split('plural=')[1] - self.plural = c2py(plural) - # Note: we unconditionally convert both msgids and msgstrs to - # Unicode using the character encoding specified in the charset - # parameter of the Content-Type header. The gettext documentation - # strongly encourages msgids to be us-ascii, but some applications - # require alternative encodings (e.g. Zope's ZCML and ZPT). For - # traditional gettext applications, the msgid conversion will - # cause no problems since us-ascii should always be a subset of - # the charset encoding. We may want to fall back to 8-bit msgids - # if the Unicode conversion fails. - charset = self._charset or 'ascii' - if b'\x00' in msg: - # Plural forms - msgid1, msgid2 = msg.split(b'\x00') - tmsg = tmsg.split(b'\x00') - msgid1 = str(msgid1, charset) - for i, x in enumerate(tmsg): - catalog[(msgid1, i)] = str(x, charset) - else: - catalog[str(msg, charset)] = str(tmsg, charset) - # advance to next entry in the seek tables - masteridx += 8 - transidx += 8 - - def gettext(self, message): - missing = object() - tmsg = self._catalog.get(message, missing) - if tmsg is missing: - tmsg = self._catalog.get((message, self.plural(1)), missing) - if tmsg is not missing: - return tmsg - if self._fallback: - return self._fallback.gettext(message) - return message - - def ngettext(self, msgid1, msgid2, n): - try: - tmsg = self._catalog[(msgid1, self.plural(n))] - except KeyError: - if self._fallback: - return self._fallback.ngettext(msgid1, msgid2, n) - if n == 1: - tmsg = msgid1 - else: - tmsg = msgid2 - return tmsg - - def pgettext(self, context, message): - ctxt_msg_id = self.CONTEXT % (context, message) - missing = object() - tmsg = self._catalog.get(ctxt_msg_id, missing) - if tmsg is missing: - tmsg = self._catalog.get((ctxt_msg_id, self.plural(1)), missing) - if tmsg is not missing: - return tmsg - if self._fallback: - return self._fallback.pgettext(context, message) - return message - - def npgettext(self, context, msgid1, msgid2, n): - ctxt_msg_id = self.CONTEXT % (context, msgid1) - try: - tmsg = self._catalog[ctxt_msg_id, self.plural(n)] - except KeyError: - if self._fallback: - return self._fallback.npgettext(context, msgid1, msgid2, n) - if n == 1: - tmsg = msgid1 - else: - tmsg = msgid2 - return tmsg - - -# Locate a .mo file using the gettext strategy -def find(domain, localedir=None, languages=None, all=False): - # Get some reasonable defaults for arguments that were not supplied - if localedir is None: - localedir = _default_localedir - if languages is None: - languages = [] - for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'): - val = os.environ.get(envar) - if val: - languages = val.split(':') - break - if 'C' not in languages: - languages.append('C') - # now normalize and expand the languages - nelangs = [] - for lang in languages: - for nelang in _expand_lang(lang): - if nelang not in nelangs: - nelangs.append(nelang) - # select a language - if all: - result = [] - else: - result = None - for lang in nelangs: - if lang == 'C': - break - mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain) - if os.path.exists(mofile): - if all: - result.append(mofile) - else: - return mofile - return result - - -# a mapping between absolute .mo file path and Translation object -_translations = {} - - -def translation(domain, localedir=None, languages=None, - class_=None, fallback=False): - if class_ is None: - class_ = GNUTranslations - mofiles = find(domain, localedir, languages, all=True) - if not mofiles: - if fallback: - return NullTranslations() - from errno import ENOENT - raise FileNotFoundError(ENOENT, - 'No translation file found for domain', domain) - # Avoid opening, reading, and parsing the .mo file after it's been done - # once. - result = None - for mofile in mofiles: - key = (class_, os.path.abspath(mofile)) - t = _translations.get(key) - if t is None: - with open(mofile, 'rb') as fp: - t = _translations.setdefault(key, class_(fp)) - # Copy the translation object to allow setting fallbacks and - # output charset. All other instance data is shared with the - # cached object. - # Delay copy import for speeding up gettext import when .mo files - # are not used. - import copy - t = copy.copy(t) - if result is None: - result = t - else: - result.add_fallback(t) - return result - - -def install(domain, localedir=None, *, names=None): - t = translation(domain, localedir, fallback=True) - t.install(names) - - -# a mapping b/w domains and locale directories -_localedirs = {} -# current global domain, `messages' used for compatibility w/ GNU gettext -_current_domain = 'messages' - - -def textdomain(domain=None): - global _current_domain - if domain is not None: - _current_domain = domain - return _current_domain - - -def bindtextdomain(domain, localedir=None): - global _localedirs - if localedir is not None: - _localedirs[domain] = localedir - return _localedirs.get(domain, _default_localedir) - - -def dgettext(domain, message): - try: - t = translation(domain, _localedirs.get(domain, None)) - except OSError: - return message - return t.gettext(message) - - -def dngettext(domain, msgid1, msgid2, n): - try: - t = translation(domain, _localedirs.get(domain, None)) - except OSError: - n = _as_int2(n) - if n == 1: - return msgid1 - else: - return msgid2 - return t.ngettext(msgid1, msgid2, n) - - -def dpgettext(domain, context, message): - try: - t = translation(domain, _localedirs.get(domain, None)) - except OSError: - return message - return t.pgettext(context, message) - - -def dnpgettext(domain, context, msgid1, msgid2, n): - try: - t = translation(domain, _localedirs.get(domain, None)) - except OSError: - n = _as_int2(n) - if n == 1: - return msgid1 - else: - return msgid2 - return t.npgettext(context, msgid1, msgid2, n) - - -def gettext(message): - return dgettext(_current_domain, message) - - -def ngettext(msgid1, msgid2, n): - return dngettext(_current_domain, msgid1, msgid2, n) - - -def pgettext(context, message): - return dpgettext(_current_domain, context, message) - - -def npgettext(context, msgid1, msgid2, n): - return dnpgettext(_current_domain, context, msgid1, msgid2, n) - - -# dcgettext() has been deemed unnecessary and is not implemented. - -# James Henstridge's Catalog constructor from GNOME gettext. Documented usage -# was: -# -# import gettext -# cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR) -# _ = cat.gettext -# print _('Hello World') - -# The resulting catalog object currently don't support access through a -# dictionary API, which was supported (but apparently unused) in GNOME -# gettext. - -Catalog = translation diff --git a/Python313_13_x64_Template/Lib/glob.py b/Python313_13_x64_Template/Lib/glob.py deleted file mode 100644 index 3fec2aa9..00000000 --- a/Python313_13_x64_Template/Lib/glob.py +++ /dev/null @@ -1,572 +0,0 @@ -"""Filename globbing utility.""" - -import contextlib -import os -import re -import fnmatch -import functools -import itertools -import operator -import stat -import sys - - -__all__ = ["glob", "iglob", "escape", "translate"] - -def glob(pathname, *, root_dir=None, dir_fd=None, recursive=False, - include_hidden=False): - """Return a list of paths matching a `pathname` pattern. - - The pattern may contain simple shell-style wildcards a la - fnmatch. Unlike fnmatch, filenames starting with a - dot are special cases that are not matched by '*' and '?' - patterns by default. - - The order of the returned list is undefined. Sort it if you need a - particular order. - - If `root_dir` is not None, it should be a path-like object specifying the - root directory for searching. It has the same effect as changing the - current directory before calling it (without actually - changing it). If pathname is relative, the result will contain - paths relative to `root_dir`. - - If `dir_fd` is not None, it should be a file descriptor referring to a - directory, and paths will then be relative to that directory. - - If `include_hidden` is true, the patterns '*', '?', '**' will match hidden - directories. - - If `recursive` is true, the pattern '**' will match any files and - zero or more directories and subdirectories. - """ - return list(iglob(pathname, root_dir=root_dir, dir_fd=dir_fd, recursive=recursive, - include_hidden=include_hidden)) - -def iglob(pathname, *, root_dir=None, dir_fd=None, recursive=False, - include_hidden=False): - """Return an iterator which yields the paths matching a `pathname` pattern. - - The pattern may contain simple shell-style wildcards a la - fnmatch. However, unlike fnmatch, filenames starting with a - dot are special cases that are not matched by '*' and '?' - patterns. - - The order of the returned paths is undefined. Sort them if you need a - particular order. - - If `root_dir` is not None, it should be a path-like object specifying - the root directory for searching. It has the same effect as changing - the current directory before calling it (without actually - changing it). If pathname is relative, the result will contain - paths relative to `root_dir`. - - If `dir_fd` is not None, it should be a file descriptor referring to a - directory, and paths will then be relative to that directory. - - If `include_hidden` is true, the patterns '*', '?', '**' will match hidden - directories. - - If `recursive` is true, the pattern '**' will match any files and - zero or more directories and subdirectories. - """ - sys.audit("glob.glob", pathname, recursive) - sys.audit("glob.glob/2", pathname, recursive, root_dir, dir_fd) - if root_dir is not None: - root_dir = os.fspath(root_dir) - else: - root_dir = pathname[:0] - it = _iglob(pathname, root_dir, dir_fd, recursive, False, - include_hidden=include_hidden) - if not pathname or recursive and _isrecursive(pathname[:2]): - try: - s = next(it) # skip empty string - if s: - it = itertools.chain((s,), it) - except StopIteration: - pass - return it - -def _iglob(pathname, root_dir, dir_fd, recursive, dironly, - include_hidden=False): - dirname, basename = os.path.split(pathname) - if not has_magic(pathname): - assert not dironly - if basename: - if _lexists(_join(root_dir, pathname), dir_fd): - yield pathname - else: - # Patterns ending with a slash should match only directories - if _isdir(_join(root_dir, dirname), dir_fd): - yield pathname - return - if not dirname: - if recursive and _isrecursive(basename): - yield from _glob2(root_dir, basename, dir_fd, dironly, - include_hidden=include_hidden) - else: - yield from _glob1(root_dir, basename, dir_fd, dironly, - include_hidden=include_hidden) - return - # `os.path.split()` returns the argument itself as a dirname if it is a - # drive or UNC path. Prevent an infinite recursion if a drive or UNC path - # contains magic characters (i.e. r'\\?\C:'). - if dirname != pathname and has_magic(dirname): - dirs = _iglob(dirname, root_dir, dir_fd, recursive, True, - include_hidden=include_hidden) - else: - dirs = [dirname] - if has_magic(basename): - if recursive and _isrecursive(basename): - glob_in_dir = _glob2 - else: - glob_in_dir = _glob1 - else: - glob_in_dir = _glob0 - for dirname in dirs: - for name in glob_in_dir(_join(root_dir, dirname), basename, dir_fd, dironly, - include_hidden=include_hidden): - yield os.path.join(dirname, name) - -# These 2 helper functions non-recursively glob inside a literal directory. -# They return a list of basenames. _glob1 accepts a pattern while _glob0 -# takes a literal basename (so it only has to check for its existence). - -def _glob1(dirname, pattern, dir_fd, dironly, include_hidden=False): - names = _listdir(dirname, dir_fd, dironly) - if not (include_hidden or _ishidden(pattern)): - names = (x for x in names if not _ishidden(x)) - return fnmatch.filter(names, pattern) - -def _glob0(dirname, basename, dir_fd, dironly, include_hidden=False): - if basename: - if _lexists(_join(dirname, basename), dir_fd): - return [basename] - else: - # `os.path.split()` returns an empty basename for paths ending with a - # directory separator. 'q*x/' should match only directories. - if _isdir(dirname, dir_fd): - return [basename] - return [] - -_deprecated_function_message = ( - "{name} is deprecated and will be removed in Python {remove}. Use " - "glob.glob and pass a directory to its root_dir argument instead." -) - -def glob0(dirname, pattern): - import warnings - warnings._deprecated("glob.glob0", _deprecated_function_message, remove=(3, 15)) - return _glob0(dirname, pattern, None, False) - -def glob1(dirname, pattern): - import warnings - warnings._deprecated("glob.glob1", _deprecated_function_message, remove=(3, 15)) - return _glob1(dirname, pattern, None, False) - -# This helper function recursively yields relative pathnames inside a literal -# directory. - -def _glob2(dirname, pattern, dir_fd, dironly, include_hidden=False): - assert _isrecursive(pattern) - if not dirname or _isdir(dirname, dir_fd): - yield pattern[:0] - yield from _rlistdir(dirname, dir_fd, dironly, - include_hidden=include_hidden) - -# If dironly is false, yields all file names inside a directory. -# If dironly is true, yields only directory names. -def _iterdir(dirname, dir_fd, dironly): - try: - fd = None - fsencode = None - if dir_fd is not None: - if dirname: - fd = arg = os.open(dirname, _dir_open_flags, dir_fd=dir_fd) - else: - arg = dir_fd - if isinstance(dirname, bytes): - fsencode = os.fsencode - elif dirname: - arg = dirname - elif isinstance(dirname, bytes): - arg = bytes(os.curdir, 'ASCII') - else: - arg = os.curdir - try: - with os.scandir(arg) as it: - for entry in it: - try: - if not dironly or entry.is_dir(): - if fsencode is not None: - yield fsencode(entry.name) - else: - yield entry.name - except OSError: - pass - finally: - if fd is not None: - os.close(fd) - except OSError: - return - -def _listdir(dirname, dir_fd, dironly): - with contextlib.closing(_iterdir(dirname, dir_fd, dironly)) as it: - return list(it) - -# Recursively yields relative pathnames inside a literal directory. -def _rlistdir(dirname, dir_fd, dironly, include_hidden=False): - names = _listdir(dirname, dir_fd, dironly) - for x in names: - if include_hidden or not _ishidden(x): - yield x - path = _join(dirname, x) if dirname else x - for y in _rlistdir(path, dir_fd, dironly, - include_hidden=include_hidden): - yield _join(x, y) - - -def _lexists(pathname, dir_fd): - # Same as os.path.lexists(), but with dir_fd - if dir_fd is None: - return os.path.lexists(pathname) - try: - os.lstat(pathname, dir_fd=dir_fd) - except (OSError, ValueError): - return False - else: - return True - -def _isdir(pathname, dir_fd): - # Same as os.path.isdir(), but with dir_fd - if dir_fd is None: - return os.path.isdir(pathname) - try: - st = os.stat(pathname, dir_fd=dir_fd) - except (OSError, ValueError): - return False - else: - return stat.S_ISDIR(st.st_mode) - -def _join(dirname, basename): - # It is common if dirname or basename is empty - if not dirname or not basename: - return dirname or basename - return os.path.join(dirname, basename) - -magic_check = re.compile('([*?[])') -magic_check_bytes = re.compile(b'([*?[])') - -def has_magic(s): - if isinstance(s, bytes): - match = magic_check_bytes.search(s) - else: - match = magic_check.search(s) - return match is not None - -def _ishidden(path): - return path[0] in ('.', b'.'[0]) - -def _isrecursive(pattern): - if isinstance(pattern, bytes): - return pattern == b'**' - else: - return pattern == '**' - -def escape(pathname): - """Escape all special characters. - """ - # Escaping is done by wrapping any of "*?[" between square brackets. - # Metacharacters do not work in the drive part and shouldn't be escaped. - drive, pathname = os.path.splitdrive(pathname) - if isinstance(pathname, bytes): - pathname = magic_check_bytes.sub(br'[\1]', pathname) - else: - pathname = magic_check.sub(r'[\1]', pathname) - return drive + pathname - - -_special_parts = ('', '.', '..') -_dir_open_flags = os.O_RDONLY | getattr(os, 'O_DIRECTORY', 0) -_no_recurse_symlinks = object() - - -def translate(pat, *, recursive=False, include_hidden=False, seps=None): - """Translate a pathname with shell wildcards to a regular expression. - - If `recursive` is true, the pattern segment '**' will match any number of - path segments. - - If `include_hidden` is true, wildcards can match path segments beginning - with a dot ('.'). - - If a sequence of separator characters is given to `seps`, they will be - used to split the pattern into segments and match path separators. If not - given, os.path.sep and os.path.altsep (where available) are used. - """ - if not seps: - if os.path.altsep: - seps = (os.path.sep, os.path.altsep) - else: - seps = os.path.sep - escaped_seps = ''.join(map(re.escape, seps)) - any_sep = f'[{escaped_seps}]' if len(seps) > 1 else escaped_seps - not_sep = f'[^{escaped_seps}]' - if include_hidden: - one_last_segment = f'{not_sep}+' - one_segment = f'{one_last_segment}{any_sep}' - any_segments = f'(?:.+{any_sep})?' - any_last_segments = '.*' - else: - one_last_segment = f'[^{escaped_seps}.]{not_sep}*' - one_segment = f'{one_last_segment}{any_sep}' - any_segments = f'(?:{one_segment})*' - any_last_segments = f'{any_segments}(?:{one_last_segment})?' - - results = [] - parts = re.split(any_sep, pat) - last_part_idx = len(parts) - 1 - for idx, part in enumerate(parts): - if part == '*': - results.append(one_segment if idx < last_part_idx else one_last_segment) - elif recursive and part == '**': - if idx < last_part_idx: - if parts[idx + 1] != '**': - results.append(any_segments) - else: - results.append(any_last_segments) - else: - if part: - if not include_hidden and part[0] in '*?': - results.append(r'(?!\.)') - results.extend(fnmatch._translate(part, f'{not_sep}*', not_sep)) - if idx < last_part_idx: - results.append(any_sep) - res = ''.join(results) - return fr'(?s:{res})\Z' - - -@functools.lru_cache(maxsize=512) -def _compile_pattern(pat, sep, case_sensitive, recursive=True): - """Compile given glob pattern to a re.Pattern object (observing case - sensitivity).""" - flags = re.NOFLAG if case_sensitive else re.IGNORECASE - regex = translate(pat, recursive=recursive, include_hidden=True, seps=sep) - return re.compile(regex, flags=flags).match - - -class _Globber: - """Class providing shell-style pattern matching and globbing. - """ - - def __init__(self, sep, case_sensitive, case_pedantic=False, recursive=False): - self.sep = sep - self.case_sensitive = case_sensitive - self.case_pedantic = case_pedantic - self.recursive = recursive - - # Low-level methods - - lstat = operator.methodcaller('lstat') - add_slash = operator.methodcaller('joinpath', '') - - @staticmethod - def scandir(path): - """Emulates os.scandir(), which returns an object that can be used as - a context manager. This method is called by walk() and glob(). - """ - return contextlib.nullcontext(path.iterdir()) - - @staticmethod - def concat_path(path, text): - """Appends text to the given path. - """ - return path.with_segments(path._raw_path + text) - - @staticmethod - def parse_entry(entry): - """Returns the path of an entry yielded from scandir(). - """ - return entry - - # High-level methods - - def compile(self, pat): - return _compile_pattern(pat, self.sep, self.case_sensitive, self.recursive) - - def selector(self, parts): - """Returns a function that selects from a given path, walking and - filtering according to the glob-style pattern parts in *parts*. - """ - if not parts: - return self.select_exists - part = parts.pop() - if self.recursive and part == '**': - selector = self.recursive_selector - elif part in _special_parts: - selector = self.special_selector - elif not self.case_pedantic and magic_check.search(part) is None: - selector = self.literal_selector - else: - selector = self.wildcard_selector - return selector(part, parts) - - def special_selector(self, part, parts): - """Returns a function that selects special children of the given path. - """ - select_next = self.selector(parts) - - def select_special(path, exists=False): - path = self.concat_path(self.add_slash(path), part) - return select_next(path, exists) - return select_special - - def literal_selector(self, part, parts): - """Returns a function that selects a literal descendant of a path. - """ - - # Optimization: consume and join any subsequent literal parts here, - # rather than leaving them for the next selector. This reduces the - # number of string concatenation operations and calls to add_slash(). - while parts and magic_check.search(parts[-1]) is None: - part += self.sep + parts.pop() - - select_next = self.selector(parts) - - def select_literal(path, exists=False): - path = self.concat_path(self.add_slash(path), part) - return select_next(path, exists=False) - return select_literal - - def wildcard_selector(self, part, parts): - """Returns a function that selects direct children of a given path, - filtering by pattern. - """ - - match = None if part == '*' else self.compile(part) - dir_only = bool(parts) - if dir_only: - select_next = self.selector(parts) - - def select_wildcard(path, exists=False): - try: - # We must close the scandir() object before proceeding to - # avoid exhausting file descriptors when globbing deep trees. - with self.scandir(path) as scandir_it: - entries = list(scandir_it) - except OSError: - pass - else: - for entry in entries: - if match is None or match(entry.name): - if dir_only: - try: - if not entry.is_dir(): - continue - except OSError: - continue - entry_path = self.parse_entry(entry) - if dir_only: - yield from select_next(entry_path, exists=True) - else: - yield entry_path - return select_wildcard - - def recursive_selector(self, part, parts): - """Returns a function that selects a given path and all its children, - recursively, filtering by pattern. - """ - # Optimization: consume following '**' parts, which have no effect. - while parts and parts[-1] == '**': - parts.pop() - - # Optimization: consume and join any following non-special parts here, - # rather than leaving them for the next selector. They're used to - # build a regular expression, which we use to filter the results of - # the recursive walk. As a result, non-special pattern segments - # following a '**' wildcard don't require additional filesystem access - # to expand. - follow_symlinks = self.recursive is not _no_recurse_symlinks - if follow_symlinks: - while parts and parts[-1] not in _special_parts: - part += self.sep + parts.pop() - - match = None if part == '**' else self.compile(part) - dir_only = bool(parts) - select_next = self.selector(parts) - - def select_recursive(path, exists=False): - path = self.add_slash(path) - match_pos = len(str(path)) - if match is None or match(str(path), match_pos): - yield from select_next(path, exists) - stack = [path] - while stack: - yield from select_recursive_step(stack, match_pos) - - def select_recursive_step(stack, match_pos): - path = stack.pop() - try: - # We must close the scandir() object before proceeding to - # avoid exhausting file descriptors when globbing deep trees. - with self.scandir(path) as scandir_it: - entries = list(scandir_it) - except OSError: - pass - else: - for entry in entries: - is_dir = False - try: - if entry.is_dir(follow_symlinks=follow_symlinks): - is_dir = True - except OSError: - pass - - if is_dir or not dir_only: - entry_path = self.parse_entry(entry) - if match is None or match(str(entry_path), match_pos): - if dir_only: - yield from select_next(entry_path, exists=True) - else: - # Optimization: directly yield the path if this is - # last pattern part. - yield entry_path - if is_dir: - stack.append(entry_path) - - return select_recursive - - def select_exists(self, path, exists=False): - """Yields the given path, if it exists. - """ - if exists: - # Optimization: this path is already known to exist, e.g. because - # it was returned from os.scandir(), so we skip calling lstat(). - yield path - else: - try: - self.lstat(path) - yield path - except OSError: - pass - - -class _StringGlobber(_Globber): - lstat = staticmethod(os.lstat) - scandir = staticmethod(os.scandir) - parse_entry = operator.attrgetter('path') - concat_path = operator.add - - if os.name == 'nt': - @staticmethod - def add_slash(pathname): - tail = os.path.splitroot(pathname)[2] - if not tail or tail[-1] in '\\/': - return pathname - return f'{pathname}\\' - else: - @staticmethod - def add_slash(pathname): - if not pathname or pathname[-1] == '/': - return pathname - return f'{pathname}/' diff --git a/Python313_13_x64_Template/Lib/graphlib.py b/Python313_13_x64_Template/Lib/graphlib.py deleted file mode 100644 index 9512865a..00000000 --- a/Python313_13_x64_Template/Lib/graphlib.py +++ /dev/null @@ -1,250 +0,0 @@ -from types import GenericAlias - -__all__ = ["TopologicalSorter", "CycleError"] - -_NODE_OUT = -1 -_NODE_DONE = -2 - - -class _NodeInfo: - __slots__ = "node", "npredecessors", "successors" - - def __init__(self, node): - # The node this class is augmenting. - self.node = node - - # Number of predecessors, generally >= 0. When this value falls to 0, - # and is returned by get_ready(), this is set to _NODE_OUT and when the - # node is marked done by a call to done(), set to _NODE_DONE. - self.npredecessors = 0 - - # List of successor nodes. The list can contain duplicated elements as - # long as they're all reflected in the successor's npredecessors attribute. - self.successors = [] - - -class CycleError(ValueError): - """Subclass of ValueError raised by TopologicalSorter.prepare if cycles - exist in the working graph. - - If multiple cycles exist, only one undefined choice among them will be reported - and included in the exception. The detected cycle can be accessed via the second - element in the *args* attribute of the exception instance and consists in a list - of nodes, such that each node is, in the graph, an immediate predecessor of the - next node in the list. In the reported list, the first and the last node will be - the same, to make it clear that it is cyclic. - """ - - pass - - -class TopologicalSorter: - """Provides functionality to topologically sort a graph of hashable nodes""" - - def __init__(self, graph=None): - self._node2info = {} - self._ready_nodes = None - self._npassedout = 0 - self._nfinished = 0 - - if graph is not None: - for node, predecessors in graph.items(): - self.add(node, *predecessors) - - def _get_nodeinfo(self, node): - if (result := self._node2info.get(node)) is None: - self._node2info[node] = result = _NodeInfo(node) - return result - - def add(self, node, *predecessors): - """Add a new node and its predecessors to the graph. - - Both the *node* and all elements in *predecessors* must be hashable. - - If called multiple times with the same node argument, the set of dependencies - will be the union of all dependencies passed in. - - It is possible to add a node with no dependencies (*predecessors* is not provided) - as well as provide a dependency twice. If a node that has not been provided before - is included among *predecessors* it will be automatically added to the graph with - no predecessors of its own. - - Raises ValueError if called after "prepare". - """ - if self._ready_nodes is not None: - raise ValueError("Nodes cannot be added after a call to prepare()") - - # Create the node -> predecessor edges - nodeinfo = self._get_nodeinfo(node) - nodeinfo.npredecessors += len(predecessors) - - # Create the predecessor -> node edges - for pred in predecessors: - pred_info = self._get_nodeinfo(pred) - pred_info.successors.append(node) - - def prepare(self): - """Mark the graph as finished and check for cycles in the graph. - - If any cycle is detected, "CycleError" will be raised, but "get_ready" can - still be used to obtain as many nodes as possible until cycles block more - progress. After a call to this function, the graph cannot be modified and - therefore no more nodes can be added using "add". - """ - if self._ready_nodes is not None: - raise ValueError("cannot prepare() more than once") - - self._ready_nodes = [ - i.node for i in self._node2info.values() if i.npredecessors == 0 - ] - # ready_nodes is set before we look for cycles on purpose: - # if the user wants to catch the CycleError, that's fine, - # they can continue using the instance to grab as many - # nodes as possible before cycles block more progress - cycle = self._find_cycle() - if cycle: - raise CycleError(f"nodes are in a cycle", cycle) - - def get_ready(self): - """Return a tuple of all the nodes that are ready. - - Initially it returns all nodes with no predecessors; once those are marked - as processed by calling "done", further calls will return all new nodes that - have all their predecessors already processed. Once no more progress can be made, - empty tuples are returned. - - Raises ValueError if called without calling "prepare" previously. - """ - if self._ready_nodes is None: - raise ValueError("prepare() must be called first") - - # Get the nodes that are ready and mark them - result = tuple(self._ready_nodes) - n2i = self._node2info - for node in result: - n2i[node].npredecessors = _NODE_OUT - - # Clean the list of nodes that are ready and update - # the counter of nodes that we have returned. - self._ready_nodes.clear() - self._npassedout += len(result) - - return result - - def is_active(self): - """Return ``True`` if more progress can be made and ``False`` otherwise. - - Progress can be made if cycles do not block the resolution and either there - are still nodes ready that haven't yet been returned by "get_ready" or the - number of nodes marked "done" is less than the number that have been returned - by "get_ready". - - Raises ValueError if called without calling "prepare" previously. - """ - if self._ready_nodes is None: - raise ValueError("prepare() must be called first") - return self._nfinished < self._npassedout or bool(self._ready_nodes) - - def __bool__(self): - return self.is_active() - - def done(self, *nodes): - """Marks a set of nodes returned by "get_ready" as processed. - - This method unblocks any successor of each node in *nodes* for being returned - in the future by a call to "get_ready". - - Raises ValueError if any node in *nodes* has already been marked as - processed by a previous call to this method, if a node was not added to the - graph by using "add" or if called without calling "prepare" previously or if - node has not yet been returned by "get_ready". - """ - - if self._ready_nodes is None: - raise ValueError("prepare() must be called first") - - n2i = self._node2info - - for node in nodes: - - # Check if we know about this node (it was added previously using add() - if (nodeinfo := n2i.get(node)) is None: - raise ValueError(f"node {node!r} was not added using add()") - - # If the node has not being returned (marked as ready) previously, inform the user. - stat = nodeinfo.npredecessors - if stat != _NODE_OUT: - if stat >= 0: - raise ValueError( - f"node {node!r} was not passed out (still not ready)" - ) - elif stat == _NODE_DONE: - raise ValueError(f"node {node!r} was already marked done") - else: - assert False, f"node {node!r}: unknown status {stat}" - - # Mark the node as processed - nodeinfo.npredecessors = _NODE_DONE - - # Go to all the successors and reduce the number of predecessors, collecting all the ones - # that are ready to be returned in the next get_ready() call. - for successor in nodeinfo.successors: - successor_info = n2i[successor] - successor_info.npredecessors -= 1 - if successor_info.npredecessors == 0: - self._ready_nodes.append(successor) - self._nfinished += 1 - - def _find_cycle(self): - n2i = self._node2info - stack = [] - itstack = [] - seen = set() - node2stacki = {} - - for node in n2i: - if node in seen: - continue - - while True: - if node in seen: - # If we have seen already the node and is in the - # current stack we have found a cycle. - if node in node2stacki: - return stack[node2stacki[node] :] + [node] - # else go on to get next successor - else: - seen.add(node) - itstack.append(iter(n2i[node].successors).__next__) - node2stacki[node] = len(stack) - stack.append(node) - - # Backtrack to the topmost stack entry with - # at least another successor. - while stack: - try: - node = itstack[-1]() - break - except StopIteration: - del node2stacki[stack.pop()] - itstack.pop() - else: - break - return None - - def static_order(self): - """Returns an iterable of nodes in a topological order. - - The particular order that is returned may depend on the specific - order in which the items were inserted in the graph. - - Using this method does not require to call "prepare" or "done". If any - cycle is detected, :exc:`CycleError` will be raised. - """ - self.prepare() - while self.is_active(): - node_group = self.get_ready() - yield from node_group - self.done(*node_group) - - __class_getitem__ = classmethod(GenericAlias) diff --git a/Python313_13_x64_Template/Lib/gzip.py b/Python313_13_x64_Template/Lib/gzip.py deleted file mode 100644 index a550c20a..00000000 --- a/Python313_13_x64_Template/Lib/gzip.py +++ /dev/null @@ -1,691 +0,0 @@ -"""Functions that read and write gzipped files. - -The user of the file doesn't have to worry about the compression, -but random access is not allowed.""" - -# based on Andrew Kuchling's minigzip.py distributed with the zlib module - -import _compression -import builtins -import io -import os -import struct -import sys -import time -import weakref -import zlib - -__all__ = ["BadGzipFile", "GzipFile", "open", "compress", "decompress"] - -FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16 - -READ = 'rb' -WRITE = 'wb' - -_COMPRESS_LEVEL_FAST = 1 -_COMPRESS_LEVEL_TRADEOFF = 6 -_COMPRESS_LEVEL_BEST = 9 - -READ_BUFFER_SIZE = 128 * 1024 -_WRITE_BUFFER_SIZE = 4 * io.DEFAULT_BUFFER_SIZE - - -def open(filename, mode="rb", compresslevel=_COMPRESS_LEVEL_BEST, - encoding=None, errors=None, newline=None): - """Open a gzip-compressed file in binary or text mode. - - The filename argument can be an actual filename (a str or bytes object), or - an existing file object to read from or write to. - - The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or "ab" for - binary mode, or "rt", "wt", "xt" or "at" for text mode. The default mode is - "rb", and the default compresslevel is 9. - - For binary mode, this function is equivalent to the GzipFile constructor: - GzipFile(filename, mode, compresslevel). In this case, the encoding, errors - and newline arguments must not be provided. - - For text mode, a GzipFile object is created, and wrapped in an - io.TextIOWrapper instance with the specified encoding, error handling - behavior, and line ending(s). - - """ - if "t" in mode: - if "b" in mode: - raise ValueError("Invalid mode: %r" % (mode,)) - else: - if encoding is not None: - raise ValueError("Argument 'encoding' not supported in binary mode") - if errors is not None: - raise ValueError("Argument 'errors' not supported in binary mode") - if newline is not None: - raise ValueError("Argument 'newline' not supported in binary mode") - - gz_mode = mode.replace("t", "") - if isinstance(filename, (str, bytes, os.PathLike)): - binary_file = GzipFile(filename, gz_mode, compresslevel) - elif hasattr(filename, "read") or hasattr(filename, "write"): - binary_file = GzipFile(None, gz_mode, compresslevel, filename) - else: - raise TypeError("filename must be a str or bytes object, or a file") - - if "t" in mode: - encoding = io.text_encoding(encoding) - return io.TextIOWrapper(binary_file, encoding, errors, newline) - else: - return binary_file - -def write32u(output, value): - # The L format writes the bit pattern correctly whether signed - # or unsigned. - output.write(struct.pack("' - - def _init_write(self, filename): - self.name = filename - self.crc = zlib.crc32(b"") - self.size = 0 - self.writebuf = [] - self.bufsize = 0 - self.offset = 0 # Current file offset for seek(), tell(), etc - - def tell(self): - self._check_not_closed() - self._buffer.flush() - return super().tell() - - def _write_gzip_header(self, compresslevel): - self.fileobj.write(b'\037\213') # magic header - self.fileobj.write(b'\010') # compression method - try: - # RFC 1952 requires the FNAME field to be Latin-1. Do not - # include filenames that cannot be represented that way. - fname = os.path.basename(self.name) - if not isinstance(fname, bytes): - fname = fname.encode('latin-1') - if fname.endswith(b'.gz'): - fname = fname[:-3] - except UnicodeEncodeError: - fname = b'' - flags = 0 - if fname: - flags = FNAME - self.fileobj.write(chr(flags).encode('latin-1')) - mtime = self._write_mtime - if mtime is None: - mtime = time.time() - write32u(self.fileobj, int(mtime)) - if compresslevel == _COMPRESS_LEVEL_BEST: - xfl = b'\002' - elif compresslevel == _COMPRESS_LEVEL_FAST: - xfl = b'\004' - else: - xfl = b'\000' - self.fileobj.write(xfl) - self.fileobj.write(b'\377') - if fname: - self.fileobj.write(fname + b'\000') - - def write(self,data): - self._check_not_closed() - if self.mode != WRITE: - import errno - raise OSError(errno.EBADF, "write() on read-only GzipFile object") - - if self.fileobj is None: - raise ValueError("write() on closed GzipFile object") - - return self._buffer.write(data) - - def _write_raw(self, data): - # Called by our self._buffer underlying WriteBufferStream. - if isinstance(data, (bytes, bytearray)): - length = len(data) - else: - # accept any data that supports the buffer protocol - data = memoryview(data) - length = data.nbytes - - if length > 0: - self.fileobj.write(self.compress.compress(data)) - self.size += length - self.crc = zlib.crc32(data, self.crc) - self.offset += length - - return length - - def read(self, size=-1): - self._check_not_closed() - if self.mode != READ: - import errno - raise OSError(errno.EBADF, "read() on write-only GzipFile object") - return self._buffer.read(size) - - def read1(self, size=-1): - """Implements BufferedIOBase.read1() - - Reads up to a buffer's worth of data if size is negative.""" - self._check_not_closed() - if self.mode != READ: - import errno - raise OSError(errno.EBADF, "read1() on write-only GzipFile object") - - if size < 0: - size = io.DEFAULT_BUFFER_SIZE - return self._buffer.read1(size) - - def peek(self, n): - self._check_not_closed() - if self.mode != READ: - import errno - raise OSError(errno.EBADF, "peek() on write-only GzipFile object") - return self._buffer.peek(n) - - @property - def closed(self): - return self.fileobj is None - - def close(self): - fileobj = self.fileobj - if fileobj is None or self._buffer.closed: - return - try: - if self.mode == WRITE: - self._buffer.flush() - fileobj.write(self.compress.flush()) - write32u(fileobj, self.crc) - # self.size may exceed 2 GiB, or even 4 GiB - write32u(fileobj, self.size & 0xffffffff) - elif self.mode == READ: - self._buffer.close() - finally: - self._close() - - def _close(self): - self.fileobj = None - myfileobj = self.myfileobj - if myfileobj is not None: - self.myfileobj = None - myfileobj.close() - - def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH): - self._check_not_closed() - if self.mode == WRITE: - self._buffer.flush() - # Ensure the compressor's buffer is flushed - self.fileobj.write(self.compress.flush(zlib_mode)) - self.fileobj.flush() - - def fileno(self): - """Invoke the underlying file object's fileno() method. - - This will raise AttributeError if the underlying file object - doesn't support fileno(). - """ - return self.fileobj.fileno() - - def rewind(self): - '''Return the uncompressed stream file position indicator to the - beginning of the file''' - if self.mode != READ: - raise OSError("Can't rewind in write mode") - self._buffer.seek(0) - - def readable(self): - return self.mode == READ - - def writable(self): - return self.mode == WRITE - - def seekable(self): - return True - - def seek(self, offset, whence=io.SEEK_SET): - if self.mode == WRITE: - self._check_not_closed() - # Flush buffer to ensure validity of self.offset - self._buffer.flush() - if whence != io.SEEK_SET: - if whence == io.SEEK_CUR: - offset = self.offset + offset - else: - raise ValueError('Seek from end not supported') - if offset < self.offset: - raise OSError('Negative seek in write mode') - count = offset - self.offset - chunk = b'\0' * self._buffer_size - for i in range(count // self._buffer_size): - self.write(chunk) - self.write(b'\0' * (count % self._buffer_size)) - elif self.mode == READ: - self._check_not_closed() - return self._buffer.seek(offset, whence) - - return self.offset - - def readline(self, size=-1): - self._check_not_closed() - return self._buffer.readline(size) - - -def _read_exact(fp, n): - '''Read exactly *n* bytes from `fp` - - This method is required because fp may be unbuffered, - i.e. return short reads. - ''' - data = fp.read(n) - while len(data) < n: - b = fp.read(n - len(data)) - if not b: - raise EOFError("Compressed file ended before the " - "end-of-stream marker was reached") - data += b - return data - - -def _read_gzip_header(fp): - '''Read a gzip header from `fp` and progress to the end of the header. - - Returns last mtime if header was present or None otherwise. - ''' - magic = fp.read(2) - if magic == b'': - return None - - if magic != b'\037\213': - raise BadGzipFile('Not a gzipped file (%r)' % magic) - - (method, flag, last_mtime) = struct.unpack(">> import hashlib - >>> m = hashlib.md5() - >>> m.update(b"Nobody inspects") - >>> m.update(b" the spammish repetition") - >>> m.digest() - b'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9' - -More condensed: - - >>> hashlib.sha224(b"Nobody inspects the spammish repetition").hexdigest() - 'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2' - -""" - -# This tuple and __get_builtin_constructor() must be modified if a new -# always available algorithm is added. -__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', - 'blake2b', 'blake2s', - 'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512', - 'shake_128', 'shake_256') - - -algorithms_guaranteed = set(__always_supported) -algorithms_available = set(__always_supported) - -__all__ = __always_supported + ('new', 'algorithms_guaranteed', - 'algorithms_available', 'file_digest') - - -__builtin_constructor_cache = {} - -# Prefer our blake2 implementation -# OpenSSL 1.1.0 comes with a limited implementation of blake2b/s. The OpenSSL -# implementations neither support keyed blake2 (blake2 MAC) nor advanced -# features like salt, personalization, or tree hashing. OpenSSL hash-only -# variants are available as 'blake2b512' and 'blake2s256', though. -__block_openssl_constructor = { - 'blake2b', 'blake2s', -} - -def __get_builtin_constructor(name): - cache = __builtin_constructor_cache - constructor = cache.get(name) - if constructor is not None: - return constructor - try: - if name in {'SHA1', 'sha1'}: - import _sha1 - cache['SHA1'] = cache['sha1'] = _sha1.sha1 - elif name in {'MD5', 'md5'}: - import _md5 - cache['MD5'] = cache['md5'] = _md5.md5 - elif name in {'SHA256', 'sha256', 'SHA224', 'sha224'}: - import _sha2 - cache['SHA224'] = cache['sha224'] = _sha2.sha224 - cache['SHA256'] = cache['sha256'] = _sha2.sha256 - elif name in {'SHA512', 'sha512', 'SHA384', 'sha384'}: - import _sha2 - cache['SHA384'] = cache['sha384'] = _sha2.sha384 - cache['SHA512'] = cache['sha512'] = _sha2.sha512 - elif name in {'blake2b', 'blake2s'}: - import _blake2 - cache['blake2b'] = _blake2.blake2b - cache['blake2s'] = _blake2.blake2s - elif name in {'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512'}: - import _sha3 - cache['sha3_224'] = _sha3.sha3_224 - cache['sha3_256'] = _sha3.sha3_256 - cache['sha3_384'] = _sha3.sha3_384 - cache['sha3_512'] = _sha3.sha3_512 - elif name in {'shake_128', 'shake_256'}: - import _sha3 - cache['shake_128'] = _sha3.shake_128 - cache['shake_256'] = _sha3.shake_256 - except ImportError: - pass # no extension module, this hash is unsupported. - - constructor = cache.get(name) - if constructor is not None: - return constructor - - raise ValueError('unsupported hash type ' + name) - - -def __get_openssl_constructor(name): - if name in __block_openssl_constructor: - # Prefer our builtin blake2 implementation. - return __get_builtin_constructor(name) - try: - # MD5, SHA1, and SHA2 are in all supported OpenSSL versions - # SHA3/shake are available in OpenSSL 1.1.1+ - f = getattr(_hashlib, 'openssl_' + name) - # Allow the C module to raise ValueError. The function will be - # defined but the hash not actually available. Don't fall back to - # builtin if the current security policy blocks a digest, bpo#40695. - f(usedforsecurity=False) - # Use the C function directly (very fast) - return f - except (AttributeError, ValueError): - return __get_builtin_constructor(name) - - -def __py_new(name, *args, **kwargs): - """new(name, data=b'', **kwargs) - Return a new hashing object using the - named algorithm; optionally initialized with data (which must be - a bytes-like object). - """ - return __get_builtin_constructor(name)(*args, **kwargs) - - -def __hash_new(name, *args, **kwargs): - """new(name, data=b'') - Return a new hashing object using the named algorithm; - optionally initialized with data (which must be a bytes-like object). - """ - if name in __block_openssl_constructor: - # Prefer our builtin blake2 implementation. - return __get_builtin_constructor(name)(*args, **kwargs) - try: - return _hashlib.new(name, *args, **kwargs) - except ValueError: - # If the _hashlib module (OpenSSL) doesn't support the named - # hash, try using our builtin implementations. - # This allows for SHA224/256 and SHA384/512 support even though - # the OpenSSL library prior to 0.9.8 doesn't provide them. - return __get_builtin_constructor(name)(*args, **kwargs) - - -try: - import _hashlib - new = __hash_new - __get_hash = __get_openssl_constructor - algorithms_available = algorithms_available.union( - _hashlib.openssl_md_meth_names) -except ImportError: - _hashlib = None - new = __py_new - __get_hash = __get_builtin_constructor - -try: - # OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA - from _hashlib import pbkdf2_hmac - __all__ += ('pbkdf2_hmac',) -except ImportError: - pass - - -try: - # OpenSSL's scrypt requires OpenSSL 1.1+ - from _hashlib import scrypt -except ImportError: - pass - - -def file_digest(fileobj, digest, /, *, _bufsize=2**18): - """Hash the contents of a file-like object. Returns a digest object. - - *fileobj* must be a file-like object opened for reading in binary mode. - It accepts file objects from open(), io.BytesIO(), and SocketIO objects. - The function may bypass Python's I/O and use the file descriptor *fileno* - directly. - - *digest* must either be a hash algorithm name as a *str*, a hash - constructor, or a callable that returns a hash object. - """ - # On Linux we could use AF_ALG sockets and sendfile() to archive zero-copy - # hashing with hardware acceleration. - if isinstance(digest, str): - digestobj = new(digest) - else: - digestobj = digest() - - if hasattr(fileobj, "getbuffer"): - # io.BytesIO object, use zero-copy buffer - digestobj.update(fileobj.getbuffer()) - return digestobj - - # Only binary files implement readinto(). - if not ( - hasattr(fileobj, "readinto") - and hasattr(fileobj, "readable") - and fileobj.readable() - ): - raise ValueError( - f"'{fileobj!r}' is not a file-like object in binary reading mode." - ) - - # binary file, socket.SocketIO object - # Note: socket I/O uses different syscalls than file I/O. - buf = bytearray(_bufsize) # Reusable buffer to reduce allocations. - view = memoryview(buf) - while True: - size = fileobj.readinto(buf) - if size is None: - raise BlockingIOError("I/O operation would block.") - if size == 0: - break # EOF - digestobj.update(view[:size]) - - return digestobj - - -for __func_name in __always_supported: - # try them all, some may not work due to the OpenSSL - # version not supporting that algorithm. - try: - globals()[__func_name] = __get_hash(__func_name) - except ValueError: - import logging - logging.exception('code for hash %s was not found.', __func_name) - - -# Cleanup locals() -del __always_supported, __func_name, __get_hash -del __py_new, __hash_new, __get_openssl_constructor diff --git a/Python313_13_x64_Template/Lib/heapq.py b/Python313_13_x64_Template/Lib/heapq.py deleted file mode 100644 index 2fd9d1ff..00000000 --- a/Python313_13_x64_Template/Lib/heapq.py +++ /dev/null @@ -1,603 +0,0 @@ -"""Heap queue algorithm (a.k.a. priority queue). - -Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for -all k, counting elements from 0. For the sake of comparison, -non-existing elements are considered to be infinite. The interesting -property of a heap is that a[0] is always its smallest element. - -Usage: - -heap = [] # creates an empty heap -heappush(heap, item) # pushes a new item on the heap -item = heappop(heap) # pops the smallest item from the heap -item = heap[0] # smallest item on the heap without popping it -heapify(x) # transforms list into a heap, in-place, in linear time -item = heappushpop(heap, item) # pushes a new item and then returns - # the smallest item; the heap size is unchanged -item = heapreplace(heap, item) # pops and returns smallest item, and adds - # new item; the heap size is unchanged - -Our API differs from textbook heap algorithms as follows: - -- We use 0-based indexing. This makes the relationship between the - index for a node and the indexes for its children slightly less - obvious, but is more suitable since Python uses 0-based indexing. - -- Our heappop() method returns the smallest item, not the largest. - -These two make it possible to view the heap as a regular Python list -without surprises: heap[0] is the smallest item, and heap.sort() -maintains the heap invariant! -""" - -# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger - -__about__ = """Heap queues - -[explanation by François Pinard] - -Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for -all k, counting elements from 0. For the sake of comparison, -non-existing elements are considered to be infinite. The interesting -property of a heap is that a[0] is always its smallest element. - -The strange invariant above is meant to be an efficient memory -representation for a tournament. The numbers below are `k', not a[k]: - - 0 - - 1 2 - - 3 4 5 6 - - 7 8 9 10 11 12 13 14 - - 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 - - -In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In -a usual binary tournament we see in sports, each cell is the winner -over the two cells it tops, and we can trace the winner down the tree -to see all opponents s/he had. However, in many computer applications -of such tournaments, we do not need to trace the history of a winner. -To be more memory efficient, when a winner is promoted, we try to -replace it by something else at a lower level, and the rule becomes -that a cell and the two cells it tops contain three different items, -but the top cell "wins" over the two topped cells. - -If this heap invariant is protected at all time, index 0 is clearly -the overall winner. The simplest algorithmic way to remove it and -find the "next" winner is to move some loser (let's say cell 30 in the -diagram above) into the 0 position, and then percolate this new 0 down -the tree, exchanging values, until the invariant is re-established. -This is clearly logarithmic on the total number of items in the tree. -By iterating over all items, you get an O(n ln n) sort. - -A nice feature of this sort is that you can efficiently insert new -items while the sort is going on, provided that the inserted items are -not "better" than the last 0'th element you extracted. This is -especially useful in simulation contexts, where the tree holds all -incoming events, and the "win" condition means the smallest scheduled -time. When an event schedule other events for execution, they are -scheduled into the future, so they can easily go into the heap. So, a -heap is a good structure for implementing schedulers (this is what I -used for my MIDI sequencer :-). - -Various structures for implementing schedulers have been extensively -studied, and heaps are good for this, as they are reasonably speedy, -the speed is almost constant, and the worst case is not much different -than the average case. However, there are other representations which -are more efficient overall, yet the worst cases might be terrible. - -Heaps are also very useful in big disk sorts. You most probably all -know that a big sort implies producing "runs" (which are pre-sorted -sequences, which size is usually related to the amount of CPU memory), -followed by a merging passes for these runs, which merging is often -very cleverly organised[1]. It is very important that the initial -sort produces the longest runs possible. Tournaments are a good way -to that. If, using all the memory available to hold a tournament, you -replace and percolate items that happen to fit the current run, you'll -produce runs which are twice the size of the memory for random input, -and much better for input fuzzily ordered. - -Moreover, if you output the 0'th item on disk and get an input which -may not fit in the current tournament (because the value "wins" over -the last output value), it cannot fit in the heap, so the size of the -heap decreases. The freed memory could be cleverly reused immediately -for progressively building a second heap, which grows at exactly the -same rate the first heap is melting. When the first heap completely -vanishes, you switch heaps and start a new run. Clever and quite -effective! - -In a word, heaps are useful memory structures to know. I use them in -a few applications, and I think it is good to keep a `heap' module -around. :-) - --------------------- -[1] The disk balancing algorithms which are current, nowadays, are -more annoying than clever, and this is a consequence of the seeking -capabilities of the disks. On devices which cannot seek, like big -tape drives, the story was quite different, and one had to be very -clever to ensure (far in advance) that each tape movement will be the -most effective possible (that is, will best participate at -"progressing" the merge). Some tapes were even able to read -backwards, and this was also used to avoid the rewinding time. -Believe me, real good tape sorts were quite spectacular to watch! -From all times, sorting has always been a Great Art! :-) -""" - -__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge', - 'nlargest', 'nsmallest', 'heappushpop'] - -def heappush(heap, item): - """Push item onto heap, maintaining the heap invariant.""" - heap.append(item) - _siftdown(heap, 0, len(heap)-1) - -def heappop(heap): - """Pop the smallest item off the heap, maintaining the heap invariant.""" - lastelt = heap.pop() # raises appropriate IndexError if heap is empty - if heap: - returnitem = heap[0] - heap[0] = lastelt - _siftup(heap, 0) - return returnitem - return lastelt - -def heapreplace(heap, item): - """Pop and return the current smallest value, and add the new item. - - This is more efficient than heappop() followed by heappush(), and can be - more appropriate when using a fixed-size heap. Note that the value - returned may be larger than item! That constrains reasonable uses of - this routine unless written as part of a conditional replacement: - - if item > heap[0]: - item = heapreplace(heap, item) - """ - returnitem = heap[0] # raises appropriate IndexError if heap is empty - heap[0] = item - _siftup(heap, 0) - return returnitem - -def heappushpop(heap, item): - """Fast version of a heappush followed by a heappop.""" - if heap and heap[0] < item: - item, heap[0] = heap[0], item - _siftup(heap, 0) - return item - -def heapify(x): - """Transform list into a heap, in-place, in O(len(x)) time.""" - n = len(x) - # Transform bottom-up. The largest index there's any point to looking at - # is the largest with a child index in-range, so must have 2*i + 1 < n, - # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so - # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is - # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. - for i in reversed(range(n//2)): - _siftup(x, i) - -def _heappop_max(heap): - """Maxheap version of a heappop.""" - lastelt = heap.pop() # raises appropriate IndexError if heap is empty - if heap: - returnitem = heap[0] - heap[0] = lastelt - _siftup_max(heap, 0) - return returnitem - return lastelt - -def _heapreplace_max(heap, item): - """Maxheap version of a heappop followed by a heappush.""" - returnitem = heap[0] # raises appropriate IndexError if heap is empty - heap[0] = item - _siftup_max(heap, 0) - return returnitem - -def _heapify_max(x): - """Transform list into a maxheap, in-place, in O(len(x)) time.""" - n = len(x) - for i in reversed(range(n//2)): - _siftup_max(x, i) - -# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos -# is the index of a leaf with a possibly out-of-order value. Restore the -# heap invariant. -def _siftdown(heap, startpos, pos): - newitem = heap[pos] - # Follow the path to the root, moving parents down until finding a place - # newitem fits. - while pos > startpos: - parentpos = (pos - 1) >> 1 - parent = heap[parentpos] - if newitem < parent: - heap[pos] = parent - pos = parentpos - continue - break - heap[pos] = newitem - -# The child indices of heap index pos are already heaps, and we want to make -# a heap at index pos too. We do this by bubbling the smaller child of -# pos up (and so on with that child's children, etc) until hitting a leaf, -# then using _siftdown to move the oddball originally at index pos into place. -# -# We *could* break out of the loop as soon as we find a pos where newitem <= -# both its children, but turns out that's not a good idea, and despite that -# many books write the algorithm that way. During a heap pop, the last array -# element is sifted in, and that tends to be large, so that comparing it -# against values starting from the root usually doesn't pay (= usually doesn't -# get us out of the loop early). See Knuth, Volume 3, where this is -# explained and quantified in an exercise. -# -# Cutting the # of comparisons is important, since these routines have no -# way to extract "the priority" from an array element, so that intelligence -# is likely to be hiding in custom comparison methods, or in array elements -# storing (priority, record) tuples. Comparisons are thus potentially -# expensive. -# -# On random arrays of length 1000, making this change cut the number of -# comparisons made by heapify() a little, and those made by exhaustive -# heappop() a lot, in accord with theory. Here are typical results from 3 -# runs (3 just to demonstrate how small the variance is): -# -# Compares needed by heapify Compares needed by 1000 heappops -# -------------------------- -------------------------------- -# 1837 cut to 1663 14996 cut to 8680 -# 1855 cut to 1659 14966 cut to 8678 -# 1847 cut to 1660 15024 cut to 8703 -# -# Building the heap by using heappush() 1000 times instead required -# 2198, 2148, and 2219 compares: heapify() is more efficient, when -# you can use it. -# -# The total compares needed by list.sort() on the same lists were 8627, -# 8627, and 8632 (this should be compared to the sum of heapify() and -# heappop() compares): list.sort() is (unsurprisingly!) more efficient -# for sorting. - -def _siftup(heap, pos): - endpos = len(heap) - startpos = pos - newitem = heap[pos] - # Bubble up the smaller child until hitting a leaf. - childpos = 2*pos + 1 # leftmost child position - while childpos < endpos: - # Set childpos to index of smaller child. - rightpos = childpos + 1 - if rightpos < endpos and not heap[childpos] < heap[rightpos]: - childpos = rightpos - # Move the smaller child up. - heap[pos] = heap[childpos] - pos = childpos - childpos = 2*pos + 1 - # The leaf at pos is empty now. Put newitem there, and bubble it up - # to its final resting place (by sifting its parents down). - heap[pos] = newitem - _siftdown(heap, startpos, pos) - -def _siftdown_max(heap, startpos, pos): - 'Maxheap variant of _siftdown' - newitem = heap[pos] - # Follow the path to the root, moving parents down until finding a place - # newitem fits. - while pos > startpos: - parentpos = (pos - 1) >> 1 - parent = heap[parentpos] - if parent < newitem: - heap[pos] = parent - pos = parentpos - continue - break - heap[pos] = newitem - -def _siftup_max(heap, pos): - 'Maxheap variant of _siftup' - endpos = len(heap) - startpos = pos - newitem = heap[pos] - # Bubble up the larger child until hitting a leaf. - childpos = 2*pos + 1 # leftmost child position - while childpos < endpos: - # Set childpos to index of larger child. - rightpos = childpos + 1 - if rightpos < endpos and not heap[rightpos] < heap[childpos]: - childpos = rightpos - # Move the larger child up. - heap[pos] = heap[childpos] - pos = childpos - childpos = 2*pos + 1 - # The leaf at pos is empty now. Put newitem there, and bubble it up - # to its final resting place (by sifting its parents down). - heap[pos] = newitem - _siftdown_max(heap, startpos, pos) - -def merge(*iterables, key=None, reverse=False): - '''Merge multiple sorted inputs into a single sorted output. - - Similar to sorted(itertools.chain(*iterables)) but returns a generator, - does not pull the data into memory all at once, and assumes that each of - the input streams is already sorted (smallest to largest). - - >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) - [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] - - If *key* is not None, applies a key function to each element to determine - its sort order. - - >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) - ['dog', 'cat', 'fish', 'horse', 'kangaroo'] - - ''' - - h = [] - h_append = h.append - - if reverse: - _heapify = _heapify_max - _heappop = _heappop_max - _heapreplace = _heapreplace_max - direction = -1 - else: - _heapify = heapify - _heappop = heappop - _heapreplace = heapreplace - direction = 1 - - if key is None: - for order, it in enumerate(map(iter, iterables)): - try: - next = it.__next__ - h_append([next(), order * direction, next]) - except StopIteration: - pass - _heapify(h) - while len(h) > 1: - try: - while True: - value, order, next = s = h[0] - yield value - s[0] = next() # raises StopIteration when exhausted - _heapreplace(h, s) # restore heap condition - except StopIteration: - _heappop(h) # remove empty iterator - if h: - # fast case when only a single iterator remains - value, order, next = h[0] - yield value - yield from next.__self__ - return - - for order, it in enumerate(map(iter, iterables)): - try: - next = it.__next__ - value = next() - h_append([key(value), order * direction, value, next]) - except StopIteration: - pass - _heapify(h) - while len(h) > 1: - try: - while True: - key_value, order, value, next = s = h[0] - yield value - value = next() - s[0] = key(value) - s[2] = value - _heapreplace(h, s) - except StopIteration: - _heappop(h) - if h: - key_value, order, value, next = h[0] - yield value - yield from next.__self__ - - -# Algorithm notes for nlargest() and nsmallest() -# ============================================== -# -# Make a single pass over the data while keeping the k most extreme values -# in a heap. Memory consumption is limited to keeping k values in a list. -# -# Measured performance for random inputs: -# -# number of comparisons -# n inputs k-extreme values (average of 5 trials) % more than min() -# ------------- ---------------- --------------------- ----------------- -# 1,000 100 3,317 231.7% -# 10,000 100 14,046 40.5% -# 100,000 100 105,749 5.7% -# 1,000,000 100 1,007,751 0.8% -# 10,000,000 100 10,009,401 0.1% -# -# Theoretical number of comparisons for k smallest of n random inputs: -# -# Step Comparisons Action -# ---- -------------------------- --------------------------- -# 1 1.66 * k heapify the first k-inputs -# 2 n - k compare remaining elements to top of heap -# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap -# 4 k * lg2(k) - (k/2) final sort of the k most extreme values -# -# Combining and simplifying for a rough estimate gives: -# -# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k)) -# -# Computing the number of comparisons for step 3: -# ----------------------------------------------- -# * For the i-th new value from the iterable, the probability of being in the -# k most extreme values is k/i. For example, the probability of the 101st -# value seen being in the 100 most extreme values is 100/101. -# * If the value is a new extreme value, the cost of inserting it into the -# heap is 1 + log(k, 2). -# * The probability times the cost gives: -# (k/i) * (1 + log(k, 2)) -# * Summing across the remaining n-k elements gives: -# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1)) -# * This reduces to: -# (H(n) - H(k)) * k * (1 + log(k, 2)) -# * Where H(n) is the n-th harmonic number estimated by: -# gamma = 0.5772156649 -# H(n) = log(n, e) + gamma + 1 / (2 * n) -# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence -# * Substituting the H(n) formula: -# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2) -# -# Worst-case for step 3: -# ---------------------- -# In the worst case, the input data is reversed sorted so that every new element -# must be inserted in the heap: -# -# comparisons = 1.66 * k + log(k, 2) * (n - k) -# -# Alternative Algorithms -# ---------------------- -# Other algorithms were not used because they: -# 1) Took much more auxiliary memory, -# 2) Made multiple passes over the data. -# 3) Made more comparisons in common cases (small k, large n, semi-random input). -# See the more detailed comparison of approach at: -# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest - -def nsmallest(n, iterable, key=None): - """Find the n smallest elements in a dataset. - - Equivalent to: sorted(iterable, key=key)[:n] - """ - - # Short-cut for n==1 is to use min() - if n == 1: - it = iter(iterable) - sentinel = object() - result = min(it, default=sentinel, key=key) - return [] if result is sentinel else [result] - - # When n>=size, it's faster to use sorted() - try: - size = len(iterable) - except (TypeError, AttributeError): - pass - else: - if n >= size: - return sorted(iterable, key=key)[:n] - - # When key is none, use simpler decoration - if key is None: - it = iter(iterable) - # put the range(n) first so that zip() doesn't - # consume one too many elements from the iterator - result = [(elem, i) for i, elem in zip(range(n), it)] - if not result: - return result - _heapify_max(result) - top = result[0][0] - order = n - _heapreplace = _heapreplace_max - for elem in it: - if elem < top: - _heapreplace(result, (elem, order)) - top, _order = result[0] - order += 1 - result.sort() - return [elem for (elem, order) in result] - - # General case, slowest method - it = iter(iterable) - result = [(key(elem), i, elem) for i, elem in zip(range(n), it)] - if not result: - return result - _heapify_max(result) - top = result[0][0] - order = n - _heapreplace = _heapreplace_max - for elem in it: - k = key(elem) - if k < top: - _heapreplace(result, (k, order, elem)) - top, _order, _elem = result[0] - order += 1 - result.sort() - return [elem for (k, order, elem) in result] - -def nlargest(n, iterable, key=None): - """Find the n largest elements in a dataset. - - Equivalent to: sorted(iterable, key=key, reverse=True)[:n] - """ - - # Short-cut for n==1 is to use max() - if n == 1: - it = iter(iterable) - sentinel = object() - result = max(it, default=sentinel, key=key) - return [] if result is sentinel else [result] - - # When n>=size, it's faster to use sorted() - try: - size = len(iterable) - except (TypeError, AttributeError): - pass - else: - if n >= size: - return sorted(iterable, key=key, reverse=True)[:n] - - # When key is none, use simpler decoration - if key is None: - it = iter(iterable) - result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)] - if not result: - return result - heapify(result) - top = result[0][0] - order = -n - _heapreplace = heapreplace - for elem in it: - if top < elem: - _heapreplace(result, (elem, order)) - top, _order = result[0] - order -= 1 - result.sort(reverse=True) - return [elem for (elem, order) in result] - - # General case, slowest method - it = iter(iterable) - result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)] - if not result: - return result - heapify(result) - top = result[0][0] - order = -n - _heapreplace = heapreplace - for elem in it: - k = key(elem) - if top < k: - _heapreplace(result, (k, order, elem)) - top, _order, _elem = result[0] - order -= 1 - result.sort(reverse=True) - return [elem for (k, order, elem) in result] - -# If available, use C implementation -try: - from _heapq import * -except ImportError: - pass -try: - from _heapq import _heapreplace_max -except ImportError: - pass -try: - from _heapq import _heapify_max -except ImportError: - pass -try: - from _heapq import _heappop_max -except ImportError: - pass - - -if __name__ == "__main__": - - import doctest # pragma: no cover - print(doctest.testmod()) # pragma: no cover diff --git a/Python313_13_x64_Template/Lib/hmac.py b/Python313_13_x64_Template/Lib/hmac.py deleted file mode 100644 index a49f2aeb..00000000 --- a/Python313_13_x64_Template/Lib/hmac.py +++ /dev/null @@ -1,220 +0,0 @@ -"""HMAC (Keyed-Hashing for Message Authentication) module. - -Implements the HMAC algorithm as described by RFC 2104. -""" - -import warnings as _warnings -try: - import _hashlib as _hashopenssl -except ImportError: - _hashopenssl = None - _functype = None - from _operator import _compare_digest as compare_digest -else: - compare_digest = _hashopenssl.compare_digest - _functype = type(_hashopenssl.openssl_sha256) # builtin type - -import hashlib as _hashlib - -trans_5C = bytes((x ^ 0x5C) for x in range(256)) -trans_36 = bytes((x ^ 0x36) for x in range(256)) - -# The size of the digests returned by HMAC depends on the underlying -# hashing module used. Use digest_size from the instance of HMAC instead. -digest_size = None - - -class HMAC: - """RFC 2104 HMAC class. Also complies with RFC 4231. - - This supports the API for Cryptographic Hash Functions (PEP 247). - """ - blocksize = 64 # 512-bit HMAC; can be changed in subclasses. - - __slots__ = ( - "_hmac", "_inner", "_outer", "block_size", "digest_size" - ) - - def __init__(self, key, msg=None, digestmod=''): - """Create a new HMAC object. - - key: bytes or buffer, key for the keyed hash object. - msg: bytes or buffer, Initial input for the hash or None. - digestmod: A hash name suitable for hashlib.new(). *OR* - A hashlib constructor returning a new hash object. *OR* - A module supporting PEP 247. - - Required as of 3.8, despite its position after the optional - msg argument. Passing it as a keyword argument is - recommended, though not required for legacy API reasons. - """ - - if not isinstance(key, (bytes, bytearray)): - raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__) - - if not digestmod: - raise TypeError("Missing required argument 'digestmod'.") - - if _hashopenssl and isinstance(digestmod, (str, _functype)): - try: - self._init_hmac(key, msg, digestmod) - except _hashopenssl.UnsupportedDigestmodError: - self._init_old(key, msg, digestmod) - else: - self._init_old(key, msg, digestmod) - - def _init_hmac(self, key, msg, digestmod): - self._hmac = _hashopenssl.hmac_new(key, msg, digestmod=digestmod) - self.digest_size = self._hmac.digest_size - self.block_size = self._hmac.block_size - - def _init_old(self, key, msg, digestmod): - if callable(digestmod): - digest_cons = digestmod - elif isinstance(digestmod, str): - digest_cons = lambda d=b'': _hashlib.new(digestmod, d) - else: - digest_cons = lambda d=b'': digestmod.new(d) - - self._hmac = None - self._outer = digest_cons() - self._inner = digest_cons() - self.digest_size = self._inner.digest_size - - if hasattr(self._inner, 'block_size'): - blocksize = self._inner.block_size - if blocksize < 16: - _warnings.warn('block_size of %d seems too small; using our ' - 'default of %d.' % (blocksize, self.blocksize), - RuntimeWarning, 2) - blocksize = self.blocksize - else: - _warnings.warn('No block_size attribute on given digest object; ' - 'Assuming %d.' % (self.blocksize), - RuntimeWarning, 2) - blocksize = self.blocksize - - if len(key) > blocksize: - key = digest_cons(key).digest() - - # self.blocksize is the default blocksize. self.block_size is - # effective block size as well as the public API attribute. - self.block_size = blocksize - - key = key.ljust(blocksize, b'\0') - self._outer.update(key.translate(trans_5C)) - self._inner.update(key.translate(trans_36)) - if msg is not None: - self.update(msg) - - @property - def name(self): - if self._hmac: - return self._hmac.name - else: - return f"hmac-{self._inner.name}" - - def update(self, msg): - """Feed data from msg into this hashing object.""" - inst = self._hmac or self._inner - inst.update(msg) - - def copy(self): - """Return a separate copy of this hashing object. - - An update to this copy won't affect the original object. - """ - # Call __new__ directly to avoid the expensive __init__. - other = self.__class__.__new__(self.__class__) - other.digest_size = self.digest_size - other.block_size = self.block_size - if self._hmac: - other._hmac = self._hmac.copy() - other._inner = other._outer = None - else: - other._hmac = None - other._inner = self._inner.copy() - other._outer = self._outer.copy() - return other - - def _current(self): - """Return a hash object for the current state. - - To be used only internally with digest() and hexdigest(). - """ - if self._hmac: - return self._hmac - else: - h = self._outer.copy() - h.update(self._inner.digest()) - return h - - def digest(self): - """Return the hash value of this hashing object. - - This returns the hmac value as bytes. The object is - not altered in any way by this function; you can continue - updating the object after calling this function. - """ - h = self._current() - return h.digest() - - def hexdigest(self): - """Like digest(), but returns a string of hexadecimal digits instead. - """ - h = self._current() - return h.hexdigest() - -def new(key, msg=None, digestmod=''): - """Create a new hashing object and return it. - - key: bytes or buffer, The starting key for the hash. - msg: bytes or buffer, Initial input for the hash, or None. - digestmod: A hash name suitable for hashlib.new(). *OR* - A hashlib constructor returning a new hash object. *OR* - A module supporting PEP 247. - - Required as of 3.8, despite its position after the optional - msg argument. Passing it as a keyword argument is - recommended, though not required for legacy API reasons. - - You can now feed arbitrary bytes into the object using its update() - method, and can ask for the hash value at any time by calling its digest() - or hexdigest() methods. - """ - return HMAC(key, msg, digestmod) - - -def digest(key, msg, digest): - """Fast inline implementation of HMAC. - - key: bytes or buffer, The key for the keyed hash object. - msg: bytes or buffer, Input message. - digest: A hash name suitable for hashlib.new() for best performance. *OR* - A hashlib constructor returning a new hash object. *OR* - A module supporting PEP 247. - """ - if _hashopenssl is not None and isinstance(digest, (str, _functype)): - try: - return _hashopenssl.hmac_digest(key, msg, digest) - except _hashopenssl.UnsupportedDigestmodError: - pass - - if callable(digest): - digest_cons = digest - elif isinstance(digest, str): - digest_cons = lambda d=b'': _hashlib.new(digest, d) - else: - digest_cons = lambda d=b'': digest.new(d) - - inner = digest_cons() - outer = digest_cons() - blocksize = getattr(inner, 'block_size', 64) - if len(key) > blocksize: - key = digest_cons(key).digest() - key = key + b'\x00' * (blocksize - len(key)) - inner.update(key.translate(trans_36)) - outer.update(key.translate(trans_5C)) - inner.update(msg) - outer.update(inner.digest()) - return outer.digest() diff --git a/Python313_13_x64_Template/Lib/html/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/html/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index d59862f5..00000000 Binary files a/Python313_13_x64_Template/Lib/html/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/html/__pycache__/entities.cpython-313.pyc b/Python313_13_x64_Template/Lib/html/__pycache__/entities.cpython-313.pyc deleted file mode 100644 index 7afe6f3d..00000000 Binary files a/Python313_13_x64_Template/Lib/html/__pycache__/entities.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/html/__pycache__/parser.cpython-313.pyc b/Python313_13_x64_Template/Lib/html/__pycache__/parser.cpython-313.pyc deleted file mode 100644 index 5d0f77b9..00000000 Binary files a/Python313_13_x64_Template/Lib/html/__pycache__/parser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/html/parser.py b/Python313_13_x64_Template/Lib/html/parser.py deleted file mode 100644 index 3aa86f8f..00000000 --- a/Python313_13_x64_Template/Lib/html/parser.py +++ /dev/null @@ -1,553 +0,0 @@ -"""A parser for HTML and XHTML.""" - -# This file is based on sgmllib.py, but the API is slightly different. - -# XXX There should be a way to distinguish between PCDATA (parsed -# character data -- the normal case), RCDATA (replaceable character -# data -- only char and entity references and end tags are special) -# and CDATA (character data -- only end tags are special). - - -import re -import _markupbase - -from html import unescape -from html.entities import html5 as html5_entities - - -__all__ = ['HTMLParser'] - -# Regular expressions used for parsing - -interesting_normal = re.compile('[&<]') -incomplete = re.compile('&[a-zA-Z#]') - -entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]') -charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]') -incomplete_charref = re.compile('&#(?:[0-9]|[xX][0-9a-fA-F])') -attr_charref = re.compile(r'&(#[0-9]+|#[xX][0-9a-fA-F]+|[a-zA-Z][a-zA-Z0-9]*)[;=]?') - -starttagopen = re.compile('<[a-zA-Z]') -endtagopen = re.compile('') -commentclose = re.compile(r'--!?>') -commentabruptclose = re.compile(r'-?>') -# Note: -# 1) if you change tagfind/attrfind remember to update locatetagend too; -# 2) if you change tagfind/attrfind and/or locatetagend the parser will -# explode, so don't do it. -# see the HTML5 specs section "13.2.5.6 Tag open state", -# "13.2.5.8 Tag name state" and "13.2.5.33 Attribute name state". -# https://html.spec.whatwg.org/multipage/parsing.html#tag-open-state -# https://html.spec.whatwg.org/multipage/parsing.html#tag-name-state -# https://html.spec.whatwg.org/multipage/parsing.html#attribute-name-state -tagfind_tolerant = re.compile(r'([a-zA-Z][^\t\n\r\f />]*)(?:[\t\n\r\f ]|/(?!>))*') -attrfind_tolerant = re.compile(r""" - ( - (?<=['"\t\n\r\f /])[^\t\n\r\f />][^\t\n\r\f /=>]* # attribute name - ) - ([\t\n\r\f ]*=[\t\n\r\f ]* # value indicator - ('[^']*' # LITA-enclosed value - |"[^"]*" # LIT-enclosed value - |(?!['"])[^>\t\n\r\f ]* # bare value - ) - )? - (?:[\t\n\r\f ]|/(?!>))* # possibly followed by a space -""", re.VERBOSE) -locatetagend = re.compile(r""" - [a-zA-Z][^\t\n\r\f />]* # tag name - [\t\n\r\f /]* # optional whitespace before attribute name - (?:(?<=['"\t\n\r\f /])[^\t\n\r\f />][^\t\n\r\f /=>]* # attribute name - (?:[\t\n\r\f ]*=[\t\n\r\f ]* # value indicator - (?:'[^']*' # LITA-enclosed value - |"[^"]*" # LIT-enclosed value - |(?!['"])[^>\t\n\r\f ]* # bare value - ) - )? - [\t\n\r\f /]* # possibly followed by a space - )* - >? -""", re.VERBOSE) -# The following variables are not used, but are temporarily left for -# backward compatibility. -locatestarttagend_tolerant = re.compile(r""" - <[a-zA-Z][^\t\n\r\f />\x00]* # tag name - (?:[\s/]* # optional whitespace before attribute name - (?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name - (?:\s*=+\s* # value indicator - (?:'[^']*' # LITA-enclosed value - |"[^"]*" # LIT-enclosed value - |(?!['"])[^>\s]* # bare value - ) - \s* # possibly followed by a space - )?(?:\s|/(?!>))* - )* - )? - \s* # trailing whitespace -""", re.VERBOSE) -endendtag = re.compile('>') -endtagfind = re.compile(r'') - -# Character reference processing logic specific to attribute values -# See: https://html.spec.whatwg.org/multipage/parsing.html#named-character-reference-state -def _replace_attr_charref(match): - ref = match.group(0) - # Numeric / hex char refs must always be unescaped - if ref.startswith('&#'): - return unescape(ref) - # Named character / entity references must only be unescaped - # if they are an exact match, and they are not followed by an equals sign - if not ref.endswith('=') and ref[1:] in html5_entities: - return unescape(ref) - # Otherwise do not unescape - return ref - -def _unescape_attrvalue(s): - return attr_charref.sub(_replace_attr_charref, s) - - -class HTMLParser(_markupbase.ParserBase): - """Find tags and other markup and call handler functions. - - Usage: - p = HTMLParser() - p.feed(data) - ... - p.close() - - Start tags are handled by calling self.handle_starttag() or - self.handle_startendtag(); end tags by self.handle_endtag(). The - data between tags is passed from the parser to the derived class - by calling self.handle_data() with the data as argument (the data - may be split up in arbitrary chunks). If convert_charrefs is - True the character references are converted automatically to the - corresponding Unicode character (and self.handle_data() is no - longer split in chunks), otherwise they are passed by calling - self.handle_entityref() or self.handle_charref() with the string - containing respectively the named or numeric reference as the - argument. - """ - - # See the HTML5 specs section "13.4 Parsing HTML fragments". - # https://html.spec.whatwg.org/multipage/parsing.html#parsing-html-fragments - # CDATA_CONTENT_ELEMENTS are parsed in RAWTEXT mode - CDATA_CONTENT_ELEMENTS = ("script", "style", "xmp", "iframe", "noembed", "noframes") - RCDATA_CONTENT_ELEMENTS = ("textarea", "title") - - def __init__(self, *, convert_charrefs=True, scripting=False): - """Initialize and reset this instance. - - If convert_charrefs is true (the default), all character references - are automatically converted to the corresponding Unicode characters. - - If *scripting* is false (the default), the content of the - ``noscript`` element is parsed normally; if it's true, - it's returned as is without being parsed. - """ - super().__init__() - self.convert_charrefs = convert_charrefs - self.scripting = scripting - self.reset() - - def reset(self): - """Reset this instance. Loses all unprocessed data.""" - self.rawdata = '' - self.lasttag = '???' - self.interesting = interesting_normal - self.cdata_elem = None - self._support_cdata = True - self._escapable = True - super().reset() - - def feed(self, data): - r"""Feed data to the parser. - - Call this as often as you want, with as little or as much text - as you want (may include '\n'). - """ - self.rawdata = self.rawdata + data - self.goahead(0) - - def close(self): - """Handle any buffered data.""" - self.goahead(1) - - __starttag_text = None - - def get_starttag_text(self): - """Return full source of start tag: '<...>'.""" - return self.__starttag_text - - def set_cdata_mode(self, elem, *, escapable=False): - self.cdata_elem = elem.lower() - self._escapable = escapable - if self.cdata_elem == 'plaintext': - self.interesting = re.compile(r'\Z') - elif escapable and not self.convert_charrefs: - self.interesting = re.compile(r'&|])' % self.cdata_elem, - re.IGNORECASE|re.ASCII) - else: - self.interesting = re.compile(r'])' % self.cdata_elem, - re.IGNORECASE|re.ASCII) - - def clear_cdata_mode(self): - self.interesting = interesting_normal - self.cdata_elem = None - self._escapable = True - - def _set_support_cdata(self, flag=True): - """Enable or disable support of the CDATA sections. - If enabled, "<[CDATA[" starts a CDATA section which ends with "]]>". - If disabled, "<[CDATA[" starts a bogus comments which ends with ">". - - This method is not called by default. Its purpose is to be called - in custom handle_starttag() and handle_endtag() methods, with - value that depends on the adjusted current node. - See https://html.spec.whatwg.org/multipage/parsing.html#markup-declaration-open-state - for details. - """ - self._support_cdata = flag - - # Internal -- handle data as far as reasonable. May leave state - # and data to be processed by a subsequent call. If 'end' is - # true, force handling all data as if followed by EOF marker. - def goahead(self, end): - rawdata = self.rawdata - i = 0 - n = len(rawdata) - while i < n: - if self.convert_charrefs and not self.cdata_elem: - j = rawdata.find('<', i) - if j < 0: - # if we can't find the next <, either we are at the end - # or there's more text incoming. If the latter is True, - # we can't pass the text to handle_data in case we have - # a charref cut in half at end. Try to determine if - # this is the case before proceeding by looking for an - # & near the end and see if it's followed by a space or ;. - amppos = rawdata.rfind('&', max(i, n-34)) - if (amppos >= 0 and - not re.compile(r'[\t\n\r\f ;]').search(rawdata, amppos)): - break # wait till we get all the text - j = n - else: - match = self.interesting.search(rawdata, i) # < or & - if match: - j = match.start() - else: - if self.cdata_elem: - break - j = n - if i < j: - if self.convert_charrefs and self._escapable: - self.handle_data(unescape(rawdata[i:j])) - else: - self.handle_data(rawdata[i:j]) - i = self.updatepos(i, j) - if i == n: break - startswith = rawdata.startswith - if startswith('<', i): - if starttagopen.match(rawdata, i): # < + letter - k = self.parse_starttag(i) - elif startswith("', i+9) - if j < 0: - return -1 - self.unknown_decl(rawdata[i+3: j]) - return j + 3 - elif rawdata[i:i+9].lower() == ' - gtpos = rawdata.find('>', i+9) - if gtpos == -1: - return -1 - self.handle_decl(rawdata[i+2:gtpos]) - return gtpos+1 - else: - return self.parse_bogus_comment(i) - - # Internal -- parse comment, return length or -1 if not terminated - # see https://html.spec.whatwg.org/multipage/parsing.html#comment-start-state - def parse_comment(self, i, report=True): - rawdata = self.rawdata - assert rawdata.startswith(' \n - # \" --> " - # - return _unquote_sub(_unquote_replace, str) - -# The _getdate() routine is used to set the expiration time in the cookie's HTTP -# header. By default, _getdate() returns the current time in the appropriate -# "expires" format for a Set-Cookie header. The one optional argument is an -# offset from now, in seconds. For example, an offset of -3600 means "one hour -# ago". The offset may be a floating-point number. -# - -_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] - -_monthname = [None, - 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', - 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] - -def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname): - from time import gmtime, time - now = time() - year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future) - return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \ - (weekdayname[wd], day, monthname[month], year, hh, mm, ss) - - -class Morsel(dict): - """A class to hold ONE (key, value) pair. - - In a cookie, each such pair may have several attributes, so this class is - used to keep the attributes associated with the appropriate key,value pair. - This class also includes a coded_value attribute, which is used to hold - the network representation of the value. - """ - # RFC 2109 lists these attributes as reserved: - # path comment domain - # max-age secure version - # - # For historical reasons, these attributes are also reserved: - # expires - # - # This is an extension from Microsoft: - # httponly - # - # This dictionary provides a mapping from the lowercase - # variant on the left to the appropriate traditional - # formatting on the right. - _reserved = { - "expires" : "expires", - "path" : "Path", - "comment" : "Comment", - "domain" : "Domain", - "max-age" : "Max-Age", - "secure" : "Secure", - "httponly" : "HttpOnly", - "version" : "Version", - "samesite" : "SameSite", - } - - _flags = {'secure', 'httponly'} - - def __init__(self): - # Set defaults - self._key = self._value = self._coded_value = None - - # Set default attributes - for key in self._reserved: - dict.__setitem__(self, key, "") - - @property - def key(self): - return self._key - - @property - def value(self): - return self._value - - @property - def coded_value(self): - return self._coded_value - - def __setitem__(self, K, V): - K = K.lower() - if not K in self._reserved: - raise CookieError("Invalid attribute %r" % (K,)) - if _has_control_character(K, V): - raise CookieError(f"Control characters are not allowed in cookies {K!r} {V!r}") - dict.__setitem__(self, K, V) - - def setdefault(self, key, val=None): - key = key.lower() - if key not in self._reserved: - raise CookieError("Invalid attribute %r" % (key,)) - if _has_control_character(key, val): - raise CookieError("Control characters are not allowed in cookies %r %r" % (key, val,)) - return dict.setdefault(self, key, val) - - def __eq__(self, morsel): - if not isinstance(morsel, Morsel): - return NotImplemented - return (dict.__eq__(self, morsel) and - self._value == morsel._value and - self._key == morsel._key and - self._coded_value == morsel._coded_value) - - __ne__ = object.__ne__ - - def copy(self): - morsel = Morsel() - dict.update(morsel, self) - morsel.__dict__.update(self.__dict__) - return morsel - - def update(self, values): - data = {} - for key, val in dict(values).items(): - key = key.lower() - if key not in self._reserved: - raise CookieError("Invalid attribute %r" % (key,)) - if _has_control_character(key, val): - raise CookieError("Control characters are not allowed in " - f"cookies {key!r} {val!r}") - data[key] = val - dict.update(self, data) - - def __ior__(self, values): - self.update(values) - return self - - def isReservedKey(self, K): - return K.lower() in self._reserved - - def set(self, key, val, coded_val): - if key.lower() in self._reserved: - raise CookieError('Attempt to set a reserved key %r' % (key,)) - if not _is_legal_key(key): - raise CookieError('Illegal key %r' % (key,)) - if _has_control_character(key, val, coded_val): - raise CookieError( - "Control characters are not allowed in cookies %r %r %r" % (key, val, coded_val,)) - - # It's a good key, so save it. - self._key = key - self._value = val - self._coded_value = coded_val - - def __getstate__(self): - return { - 'key': self._key, - 'value': self._value, - 'coded_value': self._coded_value, - } - - def __setstate__(self, state): - key = state['key'] - value = state['value'] - coded_value = state['coded_value'] - if _has_control_character(key, value, coded_value): - raise CookieError("Control characters are not allowed in cookies " - f"{key!r} {value!r} {coded_value!r}") - self._key = key - self._value = value - self._coded_value = coded_value - - def output(self, attrs=None, header="Set-Cookie:"): - return "%s %s" % (header, self.OutputString(attrs)) - - __str__ = output - - def __repr__(self): - return '<%s: %s>' % (self.__class__.__name__, self.OutputString()) - - def js_output(self, attrs=None): - # Print javascript - output_string = self.OutputString(attrs) - if _has_control_character(output_string): - raise CookieError("Control characters are not allowed in cookies") - return """ - - """ % (output_string.replace('"', r'\"')) - - def OutputString(self, attrs=None): - # Build up our result - # - result = [] - append = result.append - - # First, the key=value pair - append("%s=%s" % (self.key, self.coded_value)) - - # Now add any defined attributes - if attrs is None: - attrs = self._reserved - items = sorted(self.items()) - for key, value in items: - if value == "": - continue - if key not in attrs: - continue - if key == "expires" and isinstance(value, int): - append("%s=%s" % (self._reserved[key], _getdate(value))) - elif key == "max-age" and isinstance(value, int): - append("%s=%d" % (self._reserved[key], value)) - elif key == "comment" and isinstance(value, str): - append("%s=%s" % (self._reserved[key], _quote(value))) - elif key in self._flags: - if value: - append(str(self._reserved[key])) - else: - append("%s=%s" % (self._reserved[key], value)) - - # Return the result - return _semispacejoin(result) - - __class_getitem__ = classmethod(types.GenericAlias) - - -# -# Pattern for finding cookie -# -# This used to be strict parsing based on the RFC2109 and RFC2068 -# specifications. I have since discovered that MSIE 3.0x doesn't -# follow the character rules outlined in those specs. As a -# result, the parsing rules here are less strict. -# - -_LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=" -_LegalValueChars = _LegalKeyChars + r'\[\]' -_CookiePattern = re.compile(r""" - \s* # Optional whitespace at start of cookie - (?P # Start of group 'key' - [""" + _LegalKeyChars + r"""]+? # Any word of at least one letter - ) # End of group 'key' - ( # Optional group: there may not be a value. - \s*=\s* # Equal Sign - (?P # Start of group 'val' - "(?:[^\\"]|\\.)*" # Any double-quoted string - | # or - # Special case for "expires" attr - (\w{3,6}day|\w{3}),\s # Day of the week or abbreviated day - [\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Date and time in specific format - | # or - [""" + _LegalValueChars + r"""]* # Any word or empty string - ) # End of group 'val' - )? # End of optional value group - \s* # Any number of spaces. - (\s+|;|$) # Ending either at space, semicolon, or EOS. - """, re.ASCII | re.VERBOSE) # re.ASCII may be removed if safe. - - -# At long last, here is the cookie class. Using this class is almost just like -# using a dictionary. See this module's docstring for example usage. -# -class BaseCookie(dict): - """A container class for a set of Morsels.""" - - def value_decode(self, val): - """real_value, coded_value = value_decode(STRING) - Called prior to setting a cookie's value from the network - representation. The VALUE is the value read from HTTP - header. - Override this function to modify the behavior of cookies. - """ - return val, val - - def value_encode(self, val): - """real_value, coded_value = value_encode(VALUE) - Called prior to setting a cookie's value from the dictionary - representation. The VALUE is the value being assigned. - Override this function to modify the behavior of cookies. - """ - strval = str(val) - return strval, strval - - def __init__(self, input=None): - if input: - self.load(input) - - def __set(self, key, real_value, coded_value): - """Private method for setting a cookie's value""" - M = self.get(key, Morsel()) - M.set(key, real_value, coded_value) - dict.__setitem__(self, key, M) - - def __setitem__(self, key, value): - """Dictionary style assignment.""" - if isinstance(value, Morsel): - # allow assignment of constructed Morsels (e.g. for pickling) - dict.__setitem__(self, key, value) - else: - rval, cval = self.value_encode(value) - self.__set(key, rval, cval) - - def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"): - """Return a string suitable for HTTP.""" - result = [] - items = sorted(self.items()) - for key, value in items: - value_output = value.output(attrs, header) - if _has_control_character(value_output): - raise CookieError("Control characters are not allowed in cookies") - result.append(value_output) - return sep.join(result) - - __str__ = output - - def __repr__(self): - l = [] - items = sorted(self.items()) - for key, value in items: - l.append('%s=%s' % (key, repr(value.value))) - return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l)) - - def js_output(self, attrs=None): - """Return a string suitable for JavaScript.""" - result = [] - items = sorted(self.items()) - for key, value in items: - result.append(value.js_output(attrs)) - return _nulljoin(result) - - def load(self, rawdata): - """Load cookies from a string (presumably HTTP_COOKIE) or - from a dictionary. Loading cookies from a dictionary 'd' - is equivalent to calling: - map(Cookie.__setitem__, d.keys(), d.values()) - """ - if isinstance(rawdata, str): - self.__parse_string(rawdata) - else: - # self.update() wouldn't call our custom __setitem__ - for key, value in rawdata.items(): - self[key] = value - return - - def __parse_string(self, str, patt=_CookiePattern): - i = 0 # Our starting point - n = len(str) # Length of string - parsed_items = [] # Parsed (type, key, value) triples - morsel_seen = False # A key=value pair was previously encountered - - TYPE_ATTRIBUTE = 1 - TYPE_KEYVALUE = 2 - - # We first parse the whole cookie string and reject it if it's - # syntactically invalid (this helps avoid some classes of injection - # attacks). - while 0 <= i < n: - # Start looking for a cookie - match = patt.match(str, i) - if not match: - # No more cookies - break - - key, value = match.group("key"), match.group("val") - i = match.end(0) - - if key[0] == "$": - if not morsel_seen: - # We ignore attributes which pertain to the cookie - # mechanism as a whole, such as "$Version". - # See RFC 2965. (Does anyone care?) - continue - parsed_items.append((TYPE_ATTRIBUTE, key[1:], value)) - elif key.lower() in Morsel._reserved: - if not morsel_seen: - # Invalid cookie string - return - if value is None: - if key.lower() in Morsel._flags: - parsed_items.append((TYPE_ATTRIBUTE, key, True)) - else: - # Invalid cookie string - return - else: - parsed_items.append((TYPE_ATTRIBUTE, key, _unquote(value))) - elif value is not None: - parsed_items.append((TYPE_KEYVALUE, key, self.value_decode(value))) - morsel_seen = True - else: - # Invalid cookie string - return - - # The cookie string is valid, apply it. - M = None # current morsel - for tp, key, value in parsed_items: - if tp == TYPE_ATTRIBUTE: - assert M is not None - M[key] = value - else: - assert tp == TYPE_KEYVALUE - rval, cval = value - self.__set(key, rval, cval) - M = self[key] - - -class SimpleCookie(BaseCookie): - """ - SimpleCookie supports strings as cookie values. When setting - the value using the dictionary assignment notation, SimpleCookie - calls the builtin str() to convert the value to a string. Values - received from HTTP are kept as strings. - """ - def value_decode(self, val): - return _unquote(val), val - - def value_encode(self, val): - strval = str(val) - return strval, _quote(strval) diff --git a/Python313_13_x64_Template/Lib/http/server.py b/Python313_13_x64_Template/Lib/http/server.py deleted file mode 100644 index 0ec47900..00000000 --- a/Python313_13_x64_Template/Lib/http/server.py +++ /dev/null @@ -1,1351 +0,0 @@ -"""HTTP server classes. - -Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see -SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST, -and (deprecated) CGIHTTPRequestHandler for CGI scripts. - -It does, however, optionally implement HTTP/1.1 persistent connections. - -Notes on CGIHTTPRequestHandler ------------------------------- - -This class is deprecated. It implements GET and POST requests to cgi-bin scripts. - -If the os.fork() function is not present (Windows), subprocess.Popen() is used, -with slightly altered but never documented semantics. Use from a threaded -process is likely to trigger a warning at os.fork() time. - -In all cases, the implementation is intentionally naive -- all -requests are executed synchronously. - -SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL --- it may execute arbitrary Python code or external programs. - -Note that status code 200 is sent prior to execution of a CGI script, so -scripts cannot send other status codes such as 302 (redirect). - -XXX To do: - -- log requests even later (to capture byte count) -- log user-agent header and other interesting goodies -- send error log to separate file -""" - - -# See also: -# -# HTTP Working Group T. Berners-Lee -# INTERNET-DRAFT R. T. Fielding -# H. Frystyk Nielsen -# Expires September 8, 1995 March 8, 1995 -# -# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt -# -# and -# -# Network Working Group R. Fielding -# Request for Comments: 2616 et al -# Obsoletes: 2068 June 1999 -# Category: Standards Track -# -# URL: http://www.faqs.org/rfcs/rfc2616.html - -# Log files -# --------- -# -# Here's a quote from the NCSA httpd docs about log file format. -# -# | The logfile format is as follows. Each line consists of: -# | -# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb -# | -# | host: Either the DNS name or the IP number of the remote client -# | rfc931: Any information returned by identd for this person, -# | - otherwise. -# | authuser: If user sent a userid for authentication, the user name, -# | - otherwise. -# | DD: Day -# | Mon: Month (calendar name) -# | YYYY: Year -# | hh: hour (24-hour format, the machine's timezone) -# | mm: minutes -# | ss: seconds -# | request: The first line of the HTTP request as sent by the client. -# | ddd: the status code returned by the server, - if not available. -# | bbbb: the total number of bytes sent, -# | *not including the HTTP/1.0 header*, - if not available -# | -# | You can determine the name of the file accessed through request. -# -# (Actually, the latter is only true if you know the server configuration -# at the time the request was made!) - -__version__ = "0.6" - -__all__ = [ - "HTTPServer", "ThreadingHTTPServer", "BaseHTTPRequestHandler", - "SimpleHTTPRequestHandler", "CGIHTTPRequestHandler", -] - -import copy -import datetime -import email.utils -import html -import http.client -import io -import itertools -import mimetypes -import os -import posixpath -import select -import shutil -import socket # For gethostbyaddr() -import socketserver -import sys -import time -import urllib.parse - -from http import HTTPStatus - - -# Default error message template -DEFAULT_ERROR_MESSAGE = """\ - - - - - Error response - - -

Error response

-

Error code: %(code)d

-

Message: %(message)s.

-

Error code explanation: %(code)s - %(explain)s.

- - -""" - -DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8" - -# Data larger than this will be read in chunks, to prevent extreme -# overallocation. -_MIN_READ_BUF_SIZE = 1 << 20 - -class HTTPServer(socketserver.TCPServer): - - allow_reuse_address = 1 # Seems to make sense in testing environment - - def server_bind(self): - """Override server_bind to store the server name.""" - socketserver.TCPServer.server_bind(self) - host, port = self.server_address[:2] - self.server_name = socket.getfqdn(host) - self.server_port = port - - -class ThreadingHTTPServer(socketserver.ThreadingMixIn, HTTPServer): - daemon_threads = True - - -class BaseHTTPRequestHandler(socketserver.StreamRequestHandler): - - """HTTP request handler base class. - - The following explanation of HTTP serves to guide you through the - code as well as to expose any misunderstandings I may have about - HTTP (so you don't need to read the code to figure out I'm wrong - :-). - - HTTP (HyperText Transfer Protocol) is an extensible protocol on - top of a reliable stream transport (e.g. TCP/IP). The protocol - recognizes three parts to a request: - - 1. One line identifying the request type and path - 2. An optional set of RFC-822-style headers - 3. An optional data part - - The headers and data are separated by a blank line. - - The first line of the request has the form - - - - where is a (case-sensitive) keyword such as GET or POST, - is a string containing path information for the request, - and should be the string "HTTP/1.0" or "HTTP/1.1". - is encoded using the URL encoding scheme (using %xx to signify - the ASCII character with hex code xx). - - The specification specifies that lines are separated by CRLF but - for compatibility with the widest range of clients recommends - servers also handle LF. Similarly, whitespace in the request line - is treated sensibly (allowing multiple spaces between components - and allowing trailing whitespace). - - Similarly, for output, lines ought to be separated by CRLF pairs - but most clients grok LF characters just fine. - - If the first line of the request has the form - - - - (i.e. is left out) then this is assumed to be an HTTP - 0.9 request; this form has no optional headers and data part and - the reply consists of just the data. - - The reply form of the HTTP 1.x protocol again has three parts: - - 1. One line giving the response code - 2. An optional set of RFC-822-style headers - 3. The data - - Again, the headers and data are separated by a blank line. - - The response code line has the form - - - - where is the protocol version ("HTTP/1.0" or "HTTP/1.1"), - is a 3-digit response code indicating success or - failure of the request, and is an optional - human-readable string explaining what the response code means. - - This server parses the request and the headers, and then calls a - function specific to the request type (). Specifically, - a request SPAM will be handled by a method do_SPAM(). If no - such method exists the server sends an error response to the - client. If it exists, it is called with no arguments: - - do_SPAM() - - Note that the request name is case sensitive (i.e. SPAM and spam - are different requests). - - The various request details are stored in instance variables: - - - client_address is the client IP address in the form (host, - port); - - - command, path and version are the broken-down request line; - - - headers is an instance of email.message.Message (or a derived - class) containing the header information; - - - rfile is a file object open for reading positioned at the - start of the optional input data part; - - - wfile is a file object open for writing. - - IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING! - - The first thing to be written must be the response line. Then - follow 0 or more header lines, then a blank line, and then the - actual data (if any). The meaning of the header lines depends on - the command executed by the server; in most cases, when data is - returned, there should be at least one header line of the form - - Content-type: / - - where and should be registered MIME types, - e.g. "text/html" or "text/plain". - - """ - - # The Python system version, truncated to its first component. - sys_version = "Python/" + sys.version.split()[0] - - # The server software version. You may want to override this. - # The format is multiple whitespace-separated strings, - # where each string is of the form name[/version]. - server_version = "BaseHTTP/" + __version__ - - error_message_format = DEFAULT_ERROR_MESSAGE - error_content_type = DEFAULT_ERROR_CONTENT_TYPE - - # The default request version. This only affects responses up until - # the point where the request line is parsed, so it mainly decides what - # the client gets back when sending a malformed request line. - # Most web servers default to HTTP 0.9, i.e. don't send a status line. - default_request_version = "HTTP/0.9" - - def parse_request(self): - """Parse a request (internal). - - The request should be stored in self.raw_requestline; the results - are in self.command, self.path, self.request_version and - self.headers. - - Return True for success, False for failure; on failure, any relevant - error response has already been sent back. - - """ - is_http_0_9 = False - self.command = None # set in case of error on the first line - self.request_version = version = self.default_request_version - self.close_connection = True - requestline = str(self.raw_requestline, 'iso-8859-1') - requestline = requestline.rstrip('\r\n') - self.requestline = requestline - words = requestline.split() - if len(words) == 0: - return False - - if len(words) >= 3: # Enough to determine protocol version - version = words[-1] - try: - if not version.startswith('HTTP/'): - raise ValueError - base_version_number = version.split('/', 1)[1] - version_number = base_version_number.split(".") - # RFC 2145 section 3.1 says there can be only one "." and - # - major and minor numbers MUST be treated as - # separate integers; - # - HTTP/2.4 is a lower version than HTTP/2.13, which in - # turn is lower than HTTP/12.3; - # - Leading zeros MUST be ignored by recipients. - if len(version_number) != 2: - raise ValueError - if any(not component.isdigit() for component in version_number): - raise ValueError("non digit in http version") - if any(len(component) > 10 for component in version_number): - raise ValueError("unreasonable length http version") - version_number = int(version_number[0]), int(version_number[1]) - except (ValueError, IndexError): - self.send_error( - HTTPStatus.BAD_REQUEST, - "Bad request version (%r)" % version) - return False - if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": - self.close_connection = False - if version_number >= (2, 0): - self.send_error( - HTTPStatus.HTTP_VERSION_NOT_SUPPORTED, - "Invalid HTTP version (%s)" % base_version_number) - return False - self.request_version = version - - if not 2 <= len(words) <= 3: - self.send_error( - HTTPStatus.BAD_REQUEST, - "Bad request syntax (%r)" % requestline) - return False - command, path = words[:2] - if len(words) == 2: - self.close_connection = True - if command != 'GET': - self.send_error( - HTTPStatus.BAD_REQUEST, - "Bad HTTP/0.9 request type (%r)" % command) - return False - is_http_0_9 = True - self.command, self.path = command, path - - # gh-87389: The purpose of replacing '//' with '/' is to protect - # against open redirect attacks possibly triggered if the path starts - # with '//' because http clients treat //path as an absolute URI - # without scheme (similar to http://path) rather than a path. - if self.path.startswith('//'): - self.path = '/' + self.path.lstrip('/') # Reduce to a single / - - # For HTTP/0.9, headers are not expected at all. - if is_http_0_9: - self.headers = {} - return True - - # Examine the headers and look for a Connection directive. - try: - self.headers = http.client.parse_headers(self.rfile, - _class=self.MessageClass) - except http.client.LineTooLong as err: - self.send_error( - HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE, - "Line too long", - str(err)) - return False - except http.client.HTTPException as err: - self.send_error( - HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE, - "Too many headers", - str(err) - ) - return False - - conntype = self.headers.get('Connection', "") - if conntype.lower() == 'close': - self.close_connection = True - elif (conntype.lower() == 'keep-alive' and - self.protocol_version >= "HTTP/1.1"): - self.close_connection = False - # Examine the headers and look for an Expect directive - expect = self.headers.get('Expect', "") - if (expect.lower() == "100-continue" and - self.protocol_version >= "HTTP/1.1" and - self.request_version >= "HTTP/1.1"): - if not self.handle_expect_100(): - return False - return True - - def handle_expect_100(self): - """Decide what to do with an "Expect: 100-continue" header. - - If the client is expecting a 100 Continue response, we must - respond with either a 100 Continue or a final response before - waiting for the request body. The default is to always respond - with a 100 Continue. You can behave differently (for example, - reject unauthorized requests) by overriding this method. - - This method should either return True (possibly after sending - a 100 Continue response) or send an error response and return - False. - - """ - self.send_response_only(HTTPStatus.CONTINUE) - self.end_headers() - return True - - def handle_one_request(self): - """Handle a single HTTP request. - - You normally don't need to override this method; see the class - __doc__ string for information on how to handle specific HTTP - commands such as GET and POST. - - """ - try: - self.raw_requestline = self.rfile.readline(65537) - if len(self.raw_requestline) > 65536: - self.requestline = '' - self.request_version = '' - self.command = '' - self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG) - return - if not self.raw_requestline: - self.close_connection = True - return - if not self.parse_request(): - # An error code has been sent, just exit - return - mname = 'do_' + self.command - if not hasattr(self, mname): - self.send_error( - HTTPStatus.NOT_IMPLEMENTED, - "Unsupported method (%r)" % self.command) - return - method = getattr(self, mname) - method() - self.wfile.flush() #actually send the response if not already done. - except TimeoutError as e: - #a read or a write timed out. Discard this connection - self.log_error("Request timed out: %r", e) - self.close_connection = True - return - - def handle(self): - """Handle multiple requests if necessary.""" - self.close_connection = True - - self.handle_one_request() - while not self.close_connection: - self.handle_one_request() - - def send_error(self, code, message=None, explain=None): - """Send and log an error reply. - - Arguments are - * code: an HTTP error code - 3 digits - * message: a simple optional 1 line reason phrase. - *( HTAB / SP / VCHAR / %x80-FF ) - defaults to short entry matching the response code - * explain: a detailed message defaults to the long entry - matching the response code. - - This sends an error response (so it must be called before any - output has been generated), logs the error, and finally sends - a piece of HTML explaining the error to the user. - - """ - - try: - shortmsg, longmsg = self.responses[code] - except KeyError: - shortmsg, longmsg = '???', '???' - if message is None: - message = shortmsg - if explain is None: - explain = longmsg - self.log_error("code %d, message %s", code, message) - self.send_response(code, message) - self.send_header('Connection', 'close') - - # Message body is omitted for cases described in: - # - RFC7230: 3.3. 1xx, 204(No Content), 304(Not Modified) - # - RFC7231: 6.3.6. 205(Reset Content) - body = None - if (code >= 200 and - code not in (HTTPStatus.NO_CONTENT, - HTTPStatus.RESET_CONTENT, - HTTPStatus.NOT_MODIFIED)): - # HTML encode to prevent Cross Site Scripting attacks - # (see bug #1100201) - content = (self.error_message_format % { - 'code': code, - 'message': html.escape(message, quote=False), - 'explain': html.escape(explain, quote=False) - }) - body = content.encode('UTF-8', 'replace') - self.send_header("Content-Type", self.error_content_type) - self.send_header('Content-Length', str(len(body))) - self.end_headers() - - if self.command != 'HEAD' and body: - self.wfile.write(body) - - def send_response(self, code, message=None): - """Add the response header to the headers buffer and log the - response code. - - Also send two standard headers with the server software - version and the current date. - - """ - self.log_request(code) - self.send_response_only(code, message) - self.send_header('Server', self.version_string()) - self.send_header('Date', self.date_time_string()) - - def send_response_only(self, code, message=None): - """Send the response header only.""" - if self.request_version != 'HTTP/0.9': - if message is None: - if code in self.responses: - message = self.responses[code][0] - else: - message = '' - if not hasattr(self, '_headers_buffer'): - self._headers_buffer = [] - self._headers_buffer.append(("%s %d %s\r\n" % - (self.protocol_version, code, message)).encode( - 'latin-1', 'strict')) - - def send_header(self, keyword, value): - """Send a MIME header to the headers buffer.""" - if self.request_version != 'HTTP/0.9': - if not hasattr(self, '_headers_buffer'): - self._headers_buffer = [] - self._headers_buffer.append( - ("%s: %s\r\n" % (keyword, value)).encode('latin-1', 'strict')) - - if keyword.lower() == 'connection': - if value.lower() == 'close': - self.close_connection = True - elif value.lower() == 'keep-alive': - self.close_connection = False - - def end_headers(self): - """Send the blank line ending the MIME headers.""" - if self.request_version != 'HTTP/0.9': - self._headers_buffer.append(b"\r\n") - self.flush_headers() - - def flush_headers(self): - if hasattr(self, '_headers_buffer'): - self.wfile.write(b"".join(self._headers_buffer)) - self._headers_buffer = [] - - def log_request(self, code='-', size='-'): - """Log an accepted request. - - This is called by send_response(). - - """ - if isinstance(code, HTTPStatus): - code = code.value - self.log_message('"%s" %s %s', - self.requestline, str(code), str(size)) - - def log_error(self, format, *args): - """Log an error. - - This is called when a request cannot be fulfilled. By - default it passes the message on to log_message(). - - Arguments are the same as for log_message(). - - XXX This should go to the separate error log. - - """ - - self.log_message(format, *args) - - # https://en.wikipedia.org/wiki/List_of_Unicode_characters#Control_codes - _control_char_table = str.maketrans( - {c: fr'\x{c:02x}' for c in itertools.chain(range(0x20), range(0x7f,0xa0))}) - _control_char_table[ord('\\')] = r'\\' - - def log_message(self, format, *args): - """Log an arbitrary message. - - This is used by all other logging functions. Override - it if you have specific logging wishes. - - The first argument, FORMAT, is a format string for the - message to be logged. If the format string contains - any % escapes requiring parameters, they should be - specified as subsequent arguments (it's just like - printf!). - - The client ip and current date/time are prefixed to - every message. - - Unicode control characters are replaced with escaped hex - before writing the output to stderr. - - """ - - message = format % args - sys.stderr.write("%s - - [%s] %s\n" % - (self.address_string(), - self.log_date_time_string(), - message.translate(self._control_char_table))) - - def version_string(self): - """Return the server software version string.""" - return self.server_version + ' ' + self.sys_version - - def date_time_string(self, timestamp=None): - """Return the current date and time formatted for a message header.""" - if timestamp is None: - timestamp = time.time() - return email.utils.formatdate(timestamp, usegmt=True) - - def log_date_time_string(self): - """Return the current time formatted for logging.""" - now = time.time() - year, month, day, hh, mm, ss, x, y, z = time.localtime(now) - s = "%02d/%3s/%04d %02d:%02d:%02d" % ( - day, self.monthname[month], year, hh, mm, ss) - return s - - weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] - - monthname = [None, - 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', - 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] - - def address_string(self): - """Return the client address.""" - - return self.client_address[0] - - # Essentially static class variables - - # The version of the HTTP protocol we support. - # Set this to HTTP/1.1 to enable automatic keepalive - protocol_version = "HTTP/1.0" - - # MessageClass used to parse headers - MessageClass = http.client.HTTPMessage - - # hack to maintain backwards compatibility - responses = { - v: (v.phrase, v.description) - for v in HTTPStatus.__members__.values() - } - - -class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): - - """Simple HTTP request handler with GET and HEAD commands. - - This serves files from the current directory and any of its - subdirectories. The MIME type for files is determined by - calling the .guess_type() method. - - The GET and HEAD requests are identical except that the HEAD - request omits the actual contents of the file. - - """ - - server_version = "SimpleHTTP/" + __version__ - index_pages = ("index.html", "index.htm") - extensions_map = _encodings_map_default = { - '.gz': 'application/gzip', - '.Z': 'application/octet-stream', - '.bz2': 'application/x-bzip2', - '.xz': 'application/x-xz', - } - - def __init__(self, *args, directory=None, **kwargs): - if directory is None: - directory = os.getcwd() - self.directory = os.fspath(directory) - super().__init__(*args, **kwargs) - - def do_GET(self): - """Serve a GET request.""" - f = self.send_head() - if f: - try: - self.copyfile(f, self.wfile) - finally: - f.close() - - def do_HEAD(self): - """Serve a HEAD request.""" - f = self.send_head() - if f: - f.close() - - def send_head(self): - """Common code for GET and HEAD commands. - - This sends the response code and MIME headers. - - Return value is either a file object (which has to be copied - to the outputfile by the caller unless the command was HEAD, - and must be closed by the caller under all circumstances), or - None, in which case the caller has nothing further to do. - - """ - path = self.translate_path(self.path) - f = None - if os.path.isdir(path): - parts = urllib.parse.urlsplit(self.path) - if not parts.path.endswith(('/', '%2f', '%2F')): - # redirect browser - doing basically what apache does - self.send_response(HTTPStatus.MOVED_PERMANENTLY) - new_parts = (parts[0], parts[1], parts[2] + '/', - parts[3], parts[4]) - new_url = urllib.parse.urlunsplit(new_parts) - self.send_header("Location", new_url) - self.send_header("Content-Length", "0") - self.end_headers() - return None - for index in self.index_pages: - index = os.path.join(path, index) - if os.path.isfile(index): - path = index - break - else: - return self.list_directory(path) - ctype = self.guess_type(path) - # check for trailing "/" which should return 404. See Issue17324 - # The test for this was added in test_httpserver.py - # However, some OS platforms accept a trailingSlash as a filename - # See discussion on python-dev and Issue34711 regarding - # parsing and rejection of filenames with a trailing slash - if path.endswith("/"): - self.send_error(HTTPStatus.NOT_FOUND, "File not found") - return None - try: - f = open(path, 'rb') - except OSError: - self.send_error(HTTPStatus.NOT_FOUND, "File not found") - return None - - try: - fs = os.fstat(f.fileno()) - # Use browser cache if possible - if ("If-Modified-Since" in self.headers - and "If-None-Match" not in self.headers): - # compare If-Modified-Since and time of last file modification - try: - ims = email.utils.parsedate_to_datetime( - self.headers["If-Modified-Since"]) - except (TypeError, IndexError, OverflowError, ValueError): - # ignore ill-formed values - pass - else: - if ims.tzinfo is None: - # obsolete format with no timezone, cf. - # https://tools.ietf.org/html/rfc7231#section-7.1.1.1 - ims = ims.replace(tzinfo=datetime.timezone.utc) - if ims.tzinfo is datetime.timezone.utc: - # compare to UTC datetime of last modification - last_modif = datetime.datetime.fromtimestamp( - fs.st_mtime, datetime.timezone.utc) - # remove microseconds, like in If-Modified-Since - last_modif = last_modif.replace(microsecond=0) - - if last_modif <= ims: - self.send_response(HTTPStatus.NOT_MODIFIED) - self.end_headers() - f.close() - return None - - self.send_response(HTTPStatus.OK) - self.send_header("Content-type", ctype) - self.send_header("Content-Length", str(fs[6])) - self.send_header("Last-Modified", - self.date_time_string(fs.st_mtime)) - self.end_headers() - return f - except: - f.close() - raise - - def list_directory(self, path): - """Helper to produce a directory listing (absent index.html). - - Return value is either a file object, or None (indicating an - error). In either case, the headers are sent, making the - interface the same as for send_head(). - - """ - try: - list = os.listdir(path) - except OSError: - self.send_error( - HTTPStatus.NOT_FOUND, - "No permission to list directory") - return None - list.sort(key=lambda a: a.lower()) - r = [] - displaypath = self.path - displaypath = displaypath.split('#', 1)[0] - displaypath = displaypath.split('?', 1)[0] - try: - displaypath = urllib.parse.unquote(displaypath, - errors='surrogatepass') - except UnicodeDecodeError: - displaypath = urllib.parse.unquote(displaypath) - displaypath = html.escape(displaypath, quote=False) - enc = sys.getfilesystemencoding() - title = f'Directory listing for {displaypath}' - r.append('') - r.append('') - r.append('') - r.append(f'') - r.append(f'{title}\n') - r.append(f'\n

{title}

') - r.append('
\n
    ') - for name in list: - fullname = os.path.join(path, name) - displayname = linkname = name - # Append / for directories or @ for symbolic links - if os.path.isdir(fullname): - displayname = name + "/" - linkname = name + "/" - if os.path.islink(fullname): - displayname = name + "@" - # Note: a link to a directory displays with @ and links with / - r.append('
  • %s
  • ' - % (urllib.parse.quote(linkname, - errors='surrogatepass'), - html.escape(displayname, quote=False))) - r.append('
\n
\n\n\n') - encoded = '\n'.join(r).encode(enc, 'surrogateescape') - f = io.BytesIO() - f.write(encoded) - f.seek(0) - self.send_response(HTTPStatus.OK) - self.send_header("Content-type", "text/html; charset=%s" % enc) - self.send_header("Content-Length", str(len(encoded))) - self.end_headers() - return f - - def translate_path(self, path): - """Translate a /-separated PATH to the local filename syntax. - - Components that mean special things to the local file system - (e.g. drive or directory names) are ignored. (XXX They should - probably be diagnosed.) - - """ - # abandon query parameters - path = path.split('#', 1)[0] - path = path.split('?', 1)[0] - # Don't forget explicit trailing slash when normalizing. Issue17324 - try: - path = urllib.parse.unquote(path, errors='surrogatepass') - except UnicodeDecodeError: - path = urllib.parse.unquote(path) - trailing_slash = path.endswith('/') - path = posixpath.normpath(path) - words = path.split('/') - words = filter(None, words) - path = self.directory - for word in words: - if os.path.dirname(word) or word in (os.curdir, os.pardir): - # Ignore components that are not a simple file/directory name - continue - path = os.path.join(path, word) - if trailing_slash: - path += '/' - return path - - def copyfile(self, source, outputfile): - """Copy all data between two file objects. - - The SOURCE argument is a file object open for reading - (or anything with a read() method) and the DESTINATION - argument is a file object open for writing (or - anything with a write() method). - - The only reason for overriding this would be to change - the block size or perhaps to replace newlines by CRLF - -- note however that this the default server uses this - to copy binary data as well. - - """ - shutil.copyfileobj(source, outputfile) - - def guess_type(self, path): - """Guess the type of a file. - - Argument is a PATH (a filename). - - Return value is a string of the form type/subtype, - usable for a MIME Content-type header. - - The default implementation looks the file's extension - up in the table self.extensions_map, using application/octet-stream - as a default; however it would be permissible (if - slow) to look inside the data to make a better guess. - - """ - base, ext = posixpath.splitext(path) - if ext in self.extensions_map: - return self.extensions_map[ext] - ext = ext.lower() - if ext in self.extensions_map: - return self.extensions_map[ext] - guess, _ = mimetypes.guess_file_type(path) - if guess: - return guess - return 'application/octet-stream' - - -# Utilities for CGIHTTPRequestHandler - -def _url_collapse_path(path): - """ - Given a URL path, remove extra '/'s and '.' path elements and collapse - any '..' references and returns a collapsed path. - - Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. - The utility of this function is limited to is_cgi method and helps - preventing some security attacks. - - Returns: The reconstituted URL, which will always start with a '/'. - - Raises: IndexError if too many '..' occur within the path. - - """ - # Query component should not be involved. - path, _, query = path.partition('?') - path = urllib.parse.unquote(path) - - # Similar to os.path.split(os.path.normpath(path)) but specific to URL - # path semantics rather than local operating system semantics. - path_parts = path.split('/') - head_parts = [] - for part in path_parts[:-1]: - if part == '..': - head_parts.pop() # IndexError if more '..' than prior parts - elif part and part != '.': - head_parts.append( part ) - if path_parts: - tail_part = path_parts.pop() - if tail_part: - if tail_part == '..': - head_parts.pop() - tail_part = '' - elif tail_part == '.': - tail_part = '' - else: - tail_part = '' - - if query: - tail_part = '?'.join((tail_part, query)) - - splitpath = ('/' + '/'.join(head_parts), tail_part) - collapsed_path = "/".join(splitpath) - - return collapsed_path - - - -nobody = None - -def nobody_uid(): - """Internal routine to get nobody's uid""" - global nobody - if nobody: - return nobody - try: - import pwd - except ImportError: - return -1 - try: - nobody = pwd.getpwnam('nobody')[2] - except KeyError: - nobody = 1 + max(x[2] for x in pwd.getpwall()) - return nobody - - -def executable(path): - """Test for executable file.""" - return os.access(path, os.X_OK) - - -class CGIHTTPRequestHandler(SimpleHTTPRequestHandler): - - """Complete HTTP server with GET, HEAD and POST commands. - - GET and HEAD also support running CGI scripts. - - The POST command is *only* implemented for CGI scripts. - - """ - - def __init__(self, *args, **kwargs): - import warnings - warnings._deprecated("http.server.CGIHTTPRequestHandler", - remove=(3, 15)) - super().__init__(*args, **kwargs) - - # Determine platform specifics - have_fork = hasattr(os, 'fork') - - # Make rfile unbuffered -- we need to read one line and then pass - # the rest to a subprocess, so we can't use buffered input. - rbufsize = 0 - - def do_POST(self): - """Serve a POST request. - - This is only implemented for CGI scripts. - - """ - - if self.is_cgi(): - self.run_cgi() - else: - self.send_error( - HTTPStatus.NOT_IMPLEMENTED, - "Can only POST to CGI scripts") - - def send_head(self): - """Version of send_head that support CGI scripts""" - if self.is_cgi(): - return self.run_cgi() - else: - return SimpleHTTPRequestHandler.send_head(self) - - def is_cgi(self): - """Test whether self.path corresponds to a CGI script. - - Returns True and updates the cgi_info attribute to the tuple - (dir, rest) if self.path requires running a CGI script. - Returns False otherwise. - - If any exception is raised, the caller should assume that - self.path was rejected as invalid and act accordingly. - - The default implementation tests whether the normalized url - path begins with one of the strings in self.cgi_directories - (and the next character is a '/' or the end of the string). - - """ - collapsed_path = _url_collapse_path(self.path) - dir_sep = collapsed_path.find('/', 1) - while dir_sep > 0 and not collapsed_path[:dir_sep] in self.cgi_directories: - dir_sep = collapsed_path.find('/', dir_sep+1) - if dir_sep > 0: - head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] - self.cgi_info = head, tail - return True - return False - - - cgi_directories = ['/cgi-bin', '/htbin'] - - def is_executable(self, path): - """Test whether argument path is an executable file.""" - return executable(path) - - def is_python(self, path): - """Test whether argument path is a Python script.""" - head, tail = os.path.splitext(path) - return tail.lower() in (".py", ".pyw") - - def run_cgi(self): - """Execute a CGI script.""" - dir, rest = self.cgi_info - path = dir + '/' + rest - i = path.find('/', len(dir)+1) - while i >= 0: - nextdir = path[:i] - nextrest = path[i+1:] - - scriptdir = self.translate_path(nextdir) - if os.path.isdir(scriptdir): - dir, rest = nextdir, nextrest - i = path.find('/', len(dir)+1) - else: - break - - # find an explicit query string, if present. - rest, _, query = rest.partition('?') - - # dissect the part after the directory name into a script name & - # a possible additional path, to be stored in PATH_INFO. - i = rest.find('/') - if i >= 0: - script, rest = rest[:i], rest[i:] - else: - script, rest = rest, '' - - scriptname = dir + '/' + script - scriptfile = self.translate_path(scriptname) - if not os.path.exists(scriptfile): - self.send_error( - HTTPStatus.NOT_FOUND, - "No such CGI script (%r)" % scriptname) - return - if not os.path.isfile(scriptfile): - self.send_error( - HTTPStatus.FORBIDDEN, - "CGI script is not a plain file (%r)" % scriptname) - return - ispy = self.is_python(scriptname) - if self.have_fork or not ispy: - if not self.is_executable(scriptfile): - self.send_error( - HTTPStatus.FORBIDDEN, - "CGI script is not executable (%r)" % scriptname) - return - - # Reference: https://www6.uniovi.es/~antonio/ncsa_httpd/cgi/env.html - # XXX Much of the following could be prepared ahead of time! - env = copy.deepcopy(os.environ) - env['SERVER_SOFTWARE'] = self.version_string() - env['SERVER_NAME'] = self.server.server_name - env['GATEWAY_INTERFACE'] = 'CGI/1.1' - env['SERVER_PROTOCOL'] = self.protocol_version - env['SERVER_PORT'] = str(self.server.server_port) - env['REQUEST_METHOD'] = self.command - uqrest = urllib.parse.unquote(rest) - env['PATH_INFO'] = uqrest - env['PATH_TRANSLATED'] = self.translate_path(uqrest) - env['SCRIPT_NAME'] = scriptname - env['QUERY_STRING'] = query - env['REMOTE_ADDR'] = self.client_address[0] - authorization = self.headers.get("authorization") - if authorization: - authorization = authorization.split() - if len(authorization) == 2: - import base64, binascii - env['AUTH_TYPE'] = authorization[0] - if authorization[0].lower() == "basic": - try: - authorization = authorization[1].encode('ascii') - authorization = base64.decodebytes(authorization).\ - decode('ascii') - except (binascii.Error, UnicodeError): - pass - else: - authorization = authorization.split(':') - if len(authorization) == 2: - env['REMOTE_USER'] = authorization[0] - # XXX REMOTE_IDENT - if self.headers.get('content-type') is None: - env['CONTENT_TYPE'] = self.headers.get_content_type() - else: - env['CONTENT_TYPE'] = self.headers['content-type'] - length = self.headers.get('content-length') - if length: - env['CONTENT_LENGTH'] = length - referer = self.headers.get('referer') - if referer: - env['HTTP_REFERER'] = referer - accept = self.headers.get_all('accept', ()) - env['HTTP_ACCEPT'] = ','.join(accept) - ua = self.headers.get('user-agent') - if ua: - env['HTTP_USER_AGENT'] = ua - co = filter(None, self.headers.get_all('cookie', [])) - cookie_str = ', '.join(co) - if cookie_str: - env['HTTP_COOKIE'] = cookie_str - # XXX Other HTTP_* headers - # Since we're setting the env in the parent, provide empty - # values to override previously set values - for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH', - 'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'): - env.setdefault(k, "") - - self.send_response(HTTPStatus.OK, "Script output follows") - self.flush_headers() - - decoded_query = query.replace('+', ' ') - - if self.have_fork: - # Unix -- fork as we should - args = [script] - if '=' not in decoded_query: - args.append(decoded_query) - nobody = nobody_uid() - self.wfile.flush() # Always flush before forking - pid = os.fork() - if pid != 0: - # Parent - pid, sts = os.waitpid(pid, 0) - # throw away additional data [see bug #427345] - while select.select([self.rfile], [], [], 0)[0]: - if not self.rfile.read(1): - break - exitcode = os.waitstatus_to_exitcode(sts) - if exitcode: - self.log_error(f"CGI script exit code {exitcode}") - return - # Child - try: - try: - os.setuid(nobody) - except OSError: - pass - os.dup2(self.rfile.fileno(), 0) - os.dup2(self.wfile.fileno(), 1) - os.execve(scriptfile, args, env) - except: - self.server.handle_error(self.request, self.client_address) - os._exit(127) - - else: - # Non-Unix -- use subprocess - import subprocess - cmdline = [scriptfile] - if self.is_python(scriptfile): - interp = sys.executable - if interp.lower().endswith("w.exe"): - # On Windows, use python.exe, not pythonw.exe - interp = interp[:-5] + interp[-4:] - cmdline = [interp, '-u'] + cmdline - if '=' not in query: - cmdline.append(query) - self.log_message("command: %s", subprocess.list2cmdline(cmdline)) - try: - nbytes = int(length) - except (TypeError, ValueError): - nbytes = 0 - p = subprocess.Popen(cmdline, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env = env - ) - if self.command.lower() == "post" and nbytes > 0: - cursize = 0 - data = self.rfile.read(min(nbytes, _MIN_READ_BUF_SIZE)) - while len(data) < nbytes and len(data) != cursize: - cursize = len(data) - # This is a geometric increase in read size (never more - # than doubling out the current length of data per loop - # iteration). - delta = min(cursize, nbytes - cursize) - try: - data += self.rfile.read(delta) - except TimeoutError: - break - else: - data = None - # throw away additional data [see bug #427345] - while select.select([self.rfile._sock], [], [], 0)[0]: - if not self.rfile._sock.recv(1): - break - stdout, stderr = p.communicate(data) - self.wfile.write(stdout) - if stderr: - self.log_error('%s', stderr) - p.stderr.close() - p.stdout.close() - status = p.returncode - if status: - self.log_error("CGI script exit status %#x", status) - else: - self.log_message("CGI script exited OK") - - -def _get_best_family(*address): - infos = socket.getaddrinfo( - *address, - type=socket.SOCK_STREAM, - flags=socket.AI_PASSIVE, - ) - family, type, proto, canonname, sockaddr = next(iter(infos)) - return family, sockaddr - - -def test(HandlerClass=BaseHTTPRequestHandler, - ServerClass=ThreadingHTTPServer, - protocol="HTTP/1.0", port=8000, bind=None): - """Test the HTTP request handler class. - - This runs an HTTP server on port 8000 (or the port argument). - - """ - ServerClass.address_family, addr = _get_best_family(bind, port) - HandlerClass.protocol_version = protocol - with ServerClass(addr, HandlerClass) as httpd: - host, port = httpd.socket.getsockname()[:2] - url_host = f'[{host}]' if ':' in host else host - print( - f"Serving HTTP on {host} port {port} " - f"(http://{url_host}:{port}/) ..." - ) - try: - httpd.serve_forever() - except KeyboardInterrupt: - print("\nKeyboard interrupt received, exiting.") - sys.exit(0) - -if __name__ == '__main__': - import argparse - import contextlib - - parser = argparse.ArgumentParser() - parser.add_argument('--cgi', action='store_true', - help='run as CGI server') - parser.add_argument('-b', '--bind', metavar='ADDRESS', - help='bind to this address ' - '(default: all interfaces)') - parser.add_argument('-d', '--directory', default=os.getcwd(), - help='serve this directory ' - '(default: current directory)') - parser.add_argument('-p', '--protocol', metavar='VERSION', - default='HTTP/1.0', - help='conform to this HTTP version ' - '(default: %(default)s)') - parser.add_argument('port', default=8000, type=int, nargs='?', - help='bind to this port ' - '(default: %(default)s)') - args = parser.parse_args() - if args.cgi: - handler_class = CGIHTTPRequestHandler - else: - handler_class = SimpleHTTPRequestHandler - - # ensure dual-stack is not disabled; ref #38907 - class DualStackServer(ThreadingHTTPServer): - - def server_bind(self): - # suppress exception when protocol is IPv4 - with contextlib.suppress(Exception): - self.socket.setsockopt( - socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) - return super().server_bind() - - def finish_request(self, request, client_address): - self.RequestHandlerClass(request, client_address, self, - directory=args.directory) - - test( - HandlerClass=handler_class, - ServerClass=DualStackServer, - port=args.port, - bind=args.bind, - protocol=args.protocol, - ) diff --git a/Python313_13_x64_Template/Lib/imaplib.py b/Python313_13_x64_Template/Lib/imaplib.py deleted file mode 100644 index 141e6398..00000000 --- a/Python313_13_x64_Template/Lib/imaplib.py +++ /dev/null @@ -1,1651 +0,0 @@ -"""IMAP4 client. - -Based on RFC 2060. - -Public class: IMAP4 -Public variable: Debug -Public functions: Internaldate2tuple - Int2AP - ParseFlags - Time2Internaldate -""" - -# Author: Piers Lauder December 1997. -# -# Authentication code contributed by Donn Cave June 1998. -# String method conversion by ESR, February 2001. -# GET/SETACL contributed by Anthony Baxter April 2001. -# IMAP4_SSL contributed by Tino Lange March 2002. -# GET/SETQUOTA contributed by Andreas Zeidler June 2002. -# PROXYAUTH contributed by Rick Holbert November 2002. -# GET/SETANNOTATION contributed by Tomas Lindroos June 2005. - -__version__ = "2.58" - -import binascii, errno, random, re, socket, subprocess, sys, time, calendar -from datetime import datetime, timezone, timedelta -from io import DEFAULT_BUFFER_SIZE - -try: - import ssl - HAVE_SSL = True -except ImportError: - HAVE_SSL = False - -__all__ = ["IMAP4", "IMAP4_stream", "Internaldate2tuple", - "Int2AP", "ParseFlags", "Time2Internaldate"] - -# Globals - -CRLF = b'\r\n' -Debug = 0 -IMAP4_PORT = 143 -IMAP4_SSL_PORT = 993 -AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first - -# Maximal line length when calling readline(). This is to prevent -# reading arbitrary length lines. RFC 3501 and 2060 (IMAP 4rev1) -# don't specify a line length. RFC 2683 suggests limiting client -# command lines to 1000 octets and that servers should be prepared -# to accept command lines up to 8000 octets, so we used to use 10K here. -# In the modern world (eg: gmail) the response to, for example, a -# search command can be quite large, so we now use 1M. -_MAXLINE = 1000000 - -# Data larger than this will be read in chunks, to prevent extreme -# overallocation. -_SAFE_BUF_SIZE = 1 << 20 - -# Commands - -Commands = { - # name valid states - 'APPEND': ('AUTH', 'SELECTED'), - 'AUTHENTICATE': ('NONAUTH',), - 'CAPABILITY': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), - 'CHECK': ('SELECTED',), - 'CLOSE': ('SELECTED',), - 'COPY': ('SELECTED',), - 'CREATE': ('AUTH', 'SELECTED'), - 'DELETE': ('AUTH', 'SELECTED'), - 'DELETEACL': ('AUTH', 'SELECTED'), - 'ENABLE': ('AUTH', ), - 'EXAMINE': ('AUTH', 'SELECTED'), - 'EXPUNGE': ('SELECTED',), - 'FETCH': ('SELECTED',), - 'GETACL': ('AUTH', 'SELECTED'), - 'GETANNOTATION':('AUTH', 'SELECTED'), - 'GETQUOTA': ('AUTH', 'SELECTED'), - 'GETQUOTAROOT': ('AUTH', 'SELECTED'), - 'MYRIGHTS': ('AUTH', 'SELECTED'), - 'LIST': ('AUTH', 'SELECTED'), - 'LOGIN': ('NONAUTH',), - 'LOGOUT': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), - 'LSUB': ('AUTH', 'SELECTED'), - 'MOVE': ('SELECTED',), - 'NAMESPACE': ('AUTH', 'SELECTED'), - 'NOOP': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), - 'PARTIAL': ('SELECTED',), # NB: obsolete - 'PROXYAUTH': ('AUTH',), - 'RENAME': ('AUTH', 'SELECTED'), - 'SEARCH': ('SELECTED',), - 'SELECT': ('AUTH', 'SELECTED'), - 'SETACL': ('AUTH', 'SELECTED'), - 'SETANNOTATION':('AUTH', 'SELECTED'), - 'SETQUOTA': ('AUTH', 'SELECTED'), - 'SORT': ('SELECTED',), - 'STARTTLS': ('NONAUTH',), - 'STATUS': ('AUTH', 'SELECTED'), - 'STORE': ('SELECTED',), - 'SUBSCRIBE': ('AUTH', 'SELECTED'), - 'THREAD': ('SELECTED',), - 'UID': ('SELECTED',), - 'UNSUBSCRIBE': ('AUTH', 'SELECTED'), - 'UNSELECT': ('SELECTED',), - } - -# Patterns to match server responses - -Continuation = re.compile(br'\+( (?P.*))?') -Flags = re.compile(br'.*FLAGS \((?P[^\)]*)\)') -InternalDate = re.compile(br'.*INTERNALDATE "' - br'(?P[ 0123][0-9])-(?P[A-Z][a-z][a-z])-(?P[0-9][0-9][0-9][0-9])' - br' (?P[0-9][0-9]):(?P[0-9][0-9]):(?P[0-9][0-9])' - br' (?P[-+])(?P[0-9][0-9])(?P[0-9][0-9])' - br'"') -# Literal is no longer used; kept for backward compatibility. -Literal = re.compile(br'.*{(?P\d+)}$', re.ASCII) -MapCRLF = re.compile(br'\r\n|\r|\n') -# We no longer exclude the ']' character from the data portion of the response -# code, even though it violates the RFC. Popular IMAP servers such as Gmail -# allow flags with ']', and there are programs (including imaplib!) that can -# produce them. The problem with this is if the 'text' portion of the response -# includes a ']' we'll parse the response wrong (which is the point of the RFC -# restriction). However, that seems less likely to be a problem in practice -# than being unable to correctly parse flags that include ']' chars, which -# was reported as a real-world problem in issue #21815. -Response_code = re.compile(br'\[(?P[A-Z-]+)( (?P.*))?\]') -Untagged_response = re.compile(br'\* (?P[A-Z-]+)( (?P.*))?') -# Untagged_status is no longer used; kept for backward compatibility -Untagged_status = re.compile( - br'\* (?P\d+) (?P[A-Z-]+)( (?P.*))?', re.ASCII) -# We compile these in _mode_xxx. -_Literal = br'.*{(?P\d+)}$' -_Untagged_status = br'\* (?P\d+) (?P[A-Z-]+)( (?P.*))?' - - - -class IMAP4: - - r"""IMAP4 client class. - - Instantiate with: IMAP4([host[, port[, timeout=None]]]) - - host - host's name (default: localhost); - port - port number (default: standard IMAP4 port). - timeout - socket timeout (default: None) - If timeout is not given or is None, - the global default socket timeout is used - - All IMAP4rev1 commands are supported by methods of the same - name (in lowercase). - - All arguments to commands are converted to strings, except for - AUTHENTICATE, and the last argument to APPEND which is passed as - an IMAP4 literal. If necessary (the string contains any - non-printing characters or white-space and isn't enclosed with - either parentheses or double quotes) each string is quoted. - However, the 'password' argument to the LOGIN command is always - quoted. If you want to avoid having an argument string quoted - (eg: the 'flags' argument to STORE) then enclose the string in - parentheses (eg: "(\Deleted)"). - - Each command returns a tuple: (type, [data, ...]) where 'type' - is usually 'OK' or 'NO', and 'data' is either the text from the - tagged response, or untagged results from command. Each 'data' - is either a string, or a tuple. If a tuple, then the first part - is the header of the response, and the second part contains - the data (ie: 'literal' value). - - Errors raise the exception class .error(""). - IMAP4 server errors raise .abort(""), - which is a sub-class of 'error'. Mailbox status changes - from READ-WRITE to READ-ONLY raise the exception class - .readonly(""), which is a sub-class of 'abort'. - - "error" exceptions imply a program error. - "abort" exceptions imply the connection should be reset, and - the command re-tried. - "readonly" exceptions imply the command should be re-tried. - - Note: to use this module, you must read the RFCs pertaining to the - IMAP4 protocol, as the semantics of the arguments to each IMAP4 - command are left to the invoker, not to mention the results. Also, - most IMAP servers implement a sub-set of the commands available here. - """ - - class error(Exception): pass # Logical errors - debug required - class abort(error): pass # Service errors - close and retry - class readonly(abort): pass # Mailbox status changed to READ-ONLY - - def __init__(self, host='', port=IMAP4_PORT, timeout=None): - self.debug = Debug - self.state = 'LOGOUT' - self.literal = None # A literal argument to a command - self.tagged_commands = {} # Tagged commands awaiting response - self.untagged_responses = {} # {typ: [data, ...], ...} - self.continuation_response = '' # Last continuation response - self.is_readonly = False # READ-ONLY desired state - self.tagnum = 0 - self._tls_established = False - self._mode_ascii() - - # Open socket to server. - - self.open(host, port, timeout) - - try: - self._connect() - except Exception: - try: - self.shutdown() - except OSError: - pass - raise - - def _mode_ascii(self): - self.utf8_enabled = False - self._encoding = 'ascii' - self.Literal = re.compile(_Literal, re.ASCII) - self.Untagged_status = re.compile(_Untagged_status, re.ASCII) - - - def _mode_utf8(self): - self.utf8_enabled = True - self._encoding = 'utf-8' - self.Literal = re.compile(_Literal) - self.Untagged_status = re.compile(_Untagged_status) - - - def _connect(self): - # Create unique tag for this session, - # and compile tagged response matcher. - - self.tagpre = Int2AP(random.randint(4096, 65535)) - self.tagre = re.compile(br'(?P' - + self.tagpre - + br'\d+) (?P[A-Z]+) (?P.*)', re.ASCII) - - # Get server welcome message, - # request and store CAPABILITY response. - - if __debug__: - self._cmd_log_len = 10 - self._cmd_log_idx = 0 - self._cmd_log = {} # Last `_cmd_log_len' interactions - if self.debug >= 1: - self._mesg('imaplib version %s' % __version__) - self._mesg('new IMAP4 connection, tag=%s' % self.tagpre) - - self.welcome = self._get_response() - if 'PREAUTH' in self.untagged_responses: - self.state = 'AUTH' - elif 'OK' in self.untagged_responses: - self.state = 'NONAUTH' - else: - raise self.error(self.welcome) - - self._get_capabilities() - if __debug__: - if self.debug >= 3: - self._mesg('CAPABILITIES: %r' % (self.capabilities,)) - - for version in AllowedVersions: - if not version in self.capabilities: - continue - self.PROTOCOL_VERSION = version - return - - raise self.error('server not IMAP4 compliant') - - - def __getattr__(self, attr): - # Allow UPPERCASE variants of IMAP4 command methods. - if attr in Commands: - return getattr(self, attr.lower()) - raise AttributeError("Unknown IMAP4 command: '%s'" % attr) - - def __enter__(self): - return self - - def __exit__(self, *args): - if self.state == "LOGOUT": - return - - try: - self.logout() - except OSError: - pass - - - # Overridable methods - - - def _create_socket(self, timeout): - # Default value of IMAP4.host is '', but socket.getaddrinfo() - # (which is used by socket.create_connection()) expects None - # as a default value for host. - if timeout is not None and not timeout: - raise ValueError('Non-blocking socket (timeout=0) is not supported') - host = None if not self.host else self.host - sys.audit("imaplib.open", self, self.host, self.port) - address = (host, self.port) - if timeout is not None: - return socket.create_connection(address, timeout) - return socket.create_connection(address) - - def open(self, host='', port=IMAP4_PORT, timeout=None): - """Setup connection to remote server on "host:port" - (default: localhost:standard IMAP4 port). - This connection will be used by the routines: - read, readline, send, shutdown. - """ - self.host = host - self.port = port - self.sock = self._create_socket(timeout) - self.file = self.sock.makefile('rb') - - - def read(self, size): - """Read 'size' bytes from remote.""" - cursize = min(size, _SAFE_BUF_SIZE) - data = self.file.read(cursize) - while cursize < size and len(data) == cursize: - delta = min(cursize, size - cursize) - data += self.file.read(delta) - cursize += delta - return data - - - def readline(self): - """Read line from remote.""" - line = self.file.readline(_MAXLINE + 1) - if len(line) > _MAXLINE: - raise self.error("got more than %d bytes" % _MAXLINE) - return line - - - def send(self, data): - """Send data to remote.""" - sys.audit("imaplib.send", self, data) - self.sock.sendall(data) - - - def shutdown(self): - """Close I/O established in "open".""" - self.file.close() - try: - self.sock.shutdown(socket.SHUT_RDWR) - except OSError as exc: - # The server might already have closed the connection. - # On Windows, this may result in WSAEINVAL (error 10022): - # An invalid operation was attempted. - if (exc.errno != errno.ENOTCONN - and getattr(exc, 'winerror', 0) != 10022): - raise - finally: - self.sock.close() - - - def socket(self): - """Return socket instance used to connect to IMAP4 server. - - socket = .socket() - """ - return self.sock - - - - # Utility methods - - - def recent(self): - """Return most recent 'RECENT' responses if any exist, - else prompt server for an update using the 'NOOP' command. - - (typ, [data]) = .recent() - - 'data' is None if no new messages, - else list of RECENT responses, most recent last. - """ - name = 'RECENT' - typ, dat = self._untagged_response('OK', [None], name) - if dat[-1]: - return typ, dat - typ, dat = self.noop() # Prod server for response - return self._untagged_response(typ, dat, name) - - - def response(self, code): - """Return data for response 'code' if received, or None. - - Old value for response 'code' is cleared. - - (code, [data]) = .response(code) - """ - return self._untagged_response(code, [None], code.upper()) - - - - # IMAP4 commands - - - def append(self, mailbox, flags, date_time, message): - """Append message to named mailbox. - - (typ, [data]) = .append(mailbox, flags, date_time, message) - - All args except `message' can be None. - """ - name = 'APPEND' - if not mailbox: - mailbox = 'INBOX' - if flags: - if (flags[0],flags[-1]) != ('(',')'): - flags = '(%s)' % flags - else: - flags = None - if date_time: - date_time = Time2Internaldate(date_time) - else: - date_time = None - literal = MapCRLF.sub(CRLF, message) - self.literal = literal - return self._simple_command(name, mailbox, flags, date_time) - - - def authenticate(self, mechanism, authobject): - """Authenticate command - requires response processing. - - 'mechanism' specifies which authentication mechanism is to - be used - it must appear in .capabilities in the - form AUTH=. - - 'authobject' must be a callable object: - - data = authobject(response) - - It will be called to process server continuation responses; the - response argument it is passed will be a bytes. It should return bytes - data that will be base64 encoded and sent to the server. It should - return None if the client abort response '*' should be sent instead. - """ - mech = mechanism.upper() - # XXX: shouldn't this code be removed, not commented out? - #cap = 'AUTH=%s' % mech - #if not cap in self.capabilities: # Let the server decide! - # raise self.error("Server doesn't allow %s authentication." % mech) - self.literal = _Authenticator(authobject).process - typ, dat = self._simple_command('AUTHENTICATE', mech) - if typ != 'OK': - raise self.error(dat[-1].decode('utf-8', 'replace')) - self.state = 'AUTH' - return typ, dat - - - def capability(self): - """(typ, [data]) = .capability() - Fetch capabilities list from server.""" - - name = 'CAPABILITY' - typ, dat = self._simple_command(name) - return self._untagged_response(typ, dat, name) - - - def check(self): - """Checkpoint mailbox on server. - - (typ, [data]) = .check() - """ - return self._simple_command('CHECK') - - - def close(self): - """Close currently selected mailbox. - - Deleted messages are removed from writable mailbox. - This is the recommended command before 'LOGOUT'. - - (typ, [data]) = .close() - """ - try: - typ, dat = self._simple_command('CLOSE') - finally: - self.state = 'AUTH' - return typ, dat - - - def copy(self, message_set, new_mailbox): - """Copy 'message_set' messages onto end of 'new_mailbox'. - - (typ, [data]) = .copy(message_set, new_mailbox) - """ - return self._simple_command('COPY', message_set, new_mailbox) - - - def create(self, mailbox): - """Create new mailbox. - - (typ, [data]) = .create(mailbox) - """ - return self._simple_command('CREATE', mailbox) - - - def delete(self, mailbox): - """Delete old mailbox. - - (typ, [data]) = .delete(mailbox) - """ - return self._simple_command('DELETE', mailbox) - - def deleteacl(self, mailbox, who): - """Delete the ACLs (remove any rights) set for who on mailbox. - - (typ, [data]) = .deleteacl(mailbox, who) - """ - return self._simple_command('DELETEACL', mailbox, who) - - def enable(self, capability): - """Send an RFC5161 enable string to the server. - - (typ, [data]) = .enable(capability) - """ - if 'ENABLE' not in self.capabilities: - raise IMAP4.error("Server does not support ENABLE") - typ, data = self._simple_command('ENABLE', capability) - if typ == 'OK' and 'UTF8=ACCEPT' in capability.upper(): - self._mode_utf8() - return typ, data - - def expunge(self): - """Permanently remove deleted items from selected mailbox. - - Generates 'EXPUNGE' response for each deleted message. - - (typ, [data]) = .expunge() - - 'data' is list of 'EXPUNGE'd message numbers in order received. - """ - name = 'EXPUNGE' - typ, dat = self._simple_command(name) - return self._untagged_response(typ, dat, name) - - - def fetch(self, message_set, message_parts): - """Fetch (parts of) messages. - - (typ, [data, ...]) = .fetch(message_set, message_parts) - - 'message_parts' should be a string of selected parts - enclosed in parentheses, eg: "(UID BODY[TEXT])". - - 'data' are tuples of message part envelope and data. - """ - name = 'FETCH' - typ, dat = self._simple_command(name, message_set, message_parts) - return self._untagged_response(typ, dat, name) - - - def getacl(self, mailbox): - """Get the ACLs for a mailbox. - - (typ, [data]) = .getacl(mailbox) - """ - typ, dat = self._simple_command('GETACL', mailbox) - return self._untagged_response(typ, dat, 'ACL') - - - def getannotation(self, mailbox, entry, attribute): - """(typ, [data]) = .getannotation(mailbox, entry, attribute) - Retrieve ANNOTATIONs.""" - - typ, dat = self._simple_command('GETANNOTATION', mailbox, entry, attribute) - return self._untagged_response(typ, dat, 'ANNOTATION') - - - def getquota(self, root): - """Get the quota root's resource usage and limits. - - Part of the IMAP4 QUOTA extension defined in rfc2087. - - (typ, [data]) = .getquota(root) - """ - typ, dat = self._simple_command('GETQUOTA', root) - return self._untagged_response(typ, dat, 'QUOTA') - - - def getquotaroot(self, mailbox): - """Get the list of quota roots for the named mailbox. - - (typ, [[QUOTAROOT responses...], [QUOTA responses]]) = .getquotaroot(mailbox) - """ - typ, dat = self._simple_command('GETQUOTAROOT', mailbox) - typ, quota = self._untagged_response(typ, dat, 'QUOTA') - typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT') - return typ, [quotaroot, quota] - - - def list(self, directory='""', pattern='*'): - """List mailbox names in directory matching pattern. - - (typ, [data]) = .list(directory='""', pattern='*') - - 'data' is list of LIST responses. - """ - name = 'LIST' - typ, dat = self._simple_command(name, directory, pattern) - return self._untagged_response(typ, dat, name) - - - def login(self, user, password): - """Identify client using plaintext password. - - (typ, [data]) = .login(user, password) - - NB: 'password' will be quoted. - """ - typ, dat = self._simple_command('LOGIN', user, self._quote(password)) - if typ != 'OK': - raise self.error(dat[-1]) - self.state = 'AUTH' - return typ, dat - - - def login_cram_md5(self, user, password): - """ Force use of CRAM-MD5 authentication. - - (typ, [data]) = .login_cram_md5(user, password) - """ - self.user, self.password = user, password - return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH) - - - def _CRAM_MD5_AUTH(self, challenge): - """ Authobject to use with CRAM-MD5 authentication. """ - import hmac - - if isinstance(self.password, str): - password = self.password.encode('utf-8') - else: - password = self.password - - try: - authcode = hmac.HMAC(password, challenge, 'md5') - except ValueError: # HMAC-MD5 is not available - raise self.error("CRAM-MD5 authentication is not supported") - return f"{self.user} {authcode.hexdigest()}" - - - def logout(self): - """Shutdown connection to server. - - (typ, [data]) = .logout() - - Returns server 'BYE' response. - """ - self.state = 'LOGOUT' - typ, dat = self._simple_command('LOGOUT') - self.shutdown() - return typ, dat - - - def lsub(self, directory='""', pattern='*'): - """List 'subscribed' mailbox names in directory matching pattern. - - (typ, [data, ...]) = .lsub(directory='""', pattern='*') - - 'data' are tuples of message part envelope and data. - """ - name = 'LSUB' - typ, dat = self._simple_command(name, directory, pattern) - return self._untagged_response(typ, dat, name) - - def myrights(self, mailbox): - """Show my ACLs for a mailbox (i.e. the rights that I have on mailbox). - - (typ, [data]) = .myrights(mailbox) - """ - typ,dat = self._simple_command('MYRIGHTS', mailbox) - return self._untagged_response(typ, dat, 'MYRIGHTS') - - def namespace(self): - """ Returns IMAP namespaces ala rfc2342 - - (typ, [data, ...]) = .namespace() - """ - name = 'NAMESPACE' - typ, dat = self._simple_command(name) - return self._untagged_response(typ, dat, name) - - - def noop(self): - """Send NOOP command. - - (typ, [data]) = .noop() - """ - if __debug__: - if self.debug >= 3: - self._dump_ur(self.untagged_responses) - return self._simple_command('NOOP') - - - def partial(self, message_num, message_part, start, length): - """Fetch truncated part of a message. - - (typ, [data, ...]) = .partial(message_num, message_part, start, length) - - 'data' is tuple of message part envelope and data. - """ - name = 'PARTIAL' - typ, dat = self._simple_command(name, message_num, message_part, start, length) - return self._untagged_response(typ, dat, 'FETCH') - - - def proxyauth(self, user): - """Assume authentication as "user". - - Allows an authorised administrator to proxy into any user's - mailbox. - - (typ, [data]) = .proxyauth(user) - """ - - name = 'PROXYAUTH' - return self._simple_command('PROXYAUTH', user) - - - def rename(self, oldmailbox, newmailbox): - """Rename old mailbox name to new. - - (typ, [data]) = .rename(oldmailbox, newmailbox) - """ - return self._simple_command('RENAME', oldmailbox, newmailbox) - - - def search(self, charset, *criteria): - """Search mailbox for matching messages. - - (typ, [data]) = .search(charset, criterion, ...) - - 'data' is space separated list of matching message numbers. - If UTF8 is enabled, charset MUST be None. - """ - name = 'SEARCH' - if charset: - if self.utf8_enabled: - raise IMAP4.error("Non-None charset not valid in UTF8 mode") - typ, dat = self._simple_command(name, 'CHARSET', charset, *criteria) - else: - typ, dat = self._simple_command(name, *criteria) - return self._untagged_response(typ, dat, name) - - - def select(self, mailbox='INBOX', readonly=False): - """Select a mailbox. - - Flush all untagged responses. - - (typ, [data]) = .select(mailbox='INBOX', readonly=False) - - 'data' is count of messages in mailbox ('EXISTS' response). - - Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so - other responses should be obtained via .response('FLAGS') etc. - """ - self.untagged_responses = {} # Flush old responses. - self.is_readonly = readonly - if readonly: - name = 'EXAMINE' - else: - name = 'SELECT' - typ, dat = self._simple_command(name, mailbox) - if typ != 'OK': - self.state = 'AUTH' # Might have been 'SELECTED' - return typ, dat - self.state = 'SELECTED' - if 'READ-ONLY' in self.untagged_responses \ - and not readonly: - if __debug__: - if self.debug >= 1: - self._dump_ur(self.untagged_responses) - raise self.readonly('%s is not writable' % mailbox) - return typ, self.untagged_responses.get('EXISTS', [None]) - - - def setacl(self, mailbox, who, what): - """Set a mailbox acl. - - (typ, [data]) = .setacl(mailbox, who, what) - """ - return self._simple_command('SETACL', mailbox, who, what) - - - def setannotation(self, *args): - """(typ, [data]) = .setannotation(mailbox[, entry, attribute]+) - Set ANNOTATIONs.""" - - typ, dat = self._simple_command('SETANNOTATION', *args) - return self._untagged_response(typ, dat, 'ANNOTATION') - - - def setquota(self, root, limits): - """Set the quota root's resource limits. - - (typ, [data]) = .setquota(root, limits) - """ - typ, dat = self._simple_command('SETQUOTA', root, limits) - return self._untagged_response(typ, dat, 'QUOTA') - - - def sort(self, sort_criteria, charset, *search_criteria): - """IMAP4rev1 extension SORT command. - - (typ, [data]) = .sort(sort_criteria, charset, search_criteria, ...) - """ - name = 'SORT' - #if not name in self.capabilities: # Let the server decide! - # raise self.error('unimplemented extension command: %s' % name) - if (sort_criteria[0],sort_criteria[-1]) != ('(',')'): - sort_criteria = '(%s)' % sort_criteria - typ, dat = self._simple_command(name, sort_criteria, charset, *search_criteria) - return self._untagged_response(typ, dat, name) - - - def starttls(self, ssl_context=None): - name = 'STARTTLS' - if not HAVE_SSL: - raise self.error('SSL support missing') - if self._tls_established: - raise self.abort('TLS session already established') - if name not in self.capabilities: - raise self.abort('TLS not supported by server') - # Generate a default SSL context if none was passed. - if ssl_context is None: - ssl_context = ssl._create_stdlib_context() - typ, dat = self._simple_command(name) - if typ == 'OK': - self.sock = ssl_context.wrap_socket(self.sock, - server_hostname=self.host) - self.file = self.sock.makefile('rb') - self._tls_established = True - self._get_capabilities() - else: - raise self.error("Couldn't establish TLS session") - return self._untagged_response(typ, dat, name) - - - def status(self, mailbox, names): - """Request named status conditions for mailbox. - - (typ, [data]) = .status(mailbox, names) - """ - name = 'STATUS' - #if self.PROTOCOL_VERSION == 'IMAP4': # Let the server decide! - # raise self.error('%s unimplemented in IMAP4 (obtain IMAP4rev1 server, or re-code)' % name) - typ, dat = self._simple_command(name, mailbox, names) - return self._untagged_response(typ, dat, name) - - - def store(self, message_set, command, flags): - """Alters flag dispositions for messages in mailbox. - - (typ, [data]) = .store(message_set, command, flags) - """ - if (flags[0],flags[-1]) != ('(',')'): - flags = '(%s)' % flags # Avoid quoting the flags - typ, dat = self._simple_command('STORE', message_set, command, flags) - return self._untagged_response(typ, dat, 'FETCH') - - - def subscribe(self, mailbox): - """Subscribe to new mailbox. - - (typ, [data]) = .subscribe(mailbox) - """ - return self._simple_command('SUBSCRIBE', mailbox) - - - def thread(self, threading_algorithm, charset, *search_criteria): - """IMAPrev1 extension THREAD command. - - (type, [data]) = .thread(threading_algorithm, charset, search_criteria, ...) - """ - name = 'THREAD' - typ, dat = self._simple_command(name, threading_algorithm, charset, *search_criteria) - return self._untagged_response(typ, dat, name) - - - def uid(self, command, *args): - """Execute "command arg ..." with messages identified by UID, - rather than message number. - - (typ, [data]) = .uid(command, arg1, arg2, ...) - - Returns response appropriate to 'command'. - """ - command = command.upper() - if not command in Commands: - raise self.error("Unknown IMAP4 UID command: %s" % command) - if self.state not in Commands[command]: - raise self.error("command %s illegal in state %s, " - "only allowed in states %s" % - (command, self.state, - ', '.join(Commands[command]))) - name = 'UID' - typ, dat = self._simple_command(name, command, *args) - if command in ('SEARCH', 'SORT', 'THREAD'): - name = command - else: - name = 'FETCH' - return self._untagged_response(typ, dat, name) - - - def unsubscribe(self, mailbox): - """Unsubscribe from old mailbox. - - (typ, [data]) = .unsubscribe(mailbox) - """ - return self._simple_command('UNSUBSCRIBE', mailbox) - - - def unselect(self): - """Free server's resources associated with the selected mailbox - and returns the server to the authenticated state. - This command performs the same actions as CLOSE, except - that no messages are permanently removed from the currently - selected mailbox. - - (typ, [data]) = .unselect() - """ - try: - typ, data = self._simple_command('UNSELECT') - finally: - self.state = 'AUTH' - return typ, data - - - def xatom(self, name, *args): - """Allow simple extension commands - notified by server in CAPABILITY response. - - Assumes command is legal in current state. - - (typ, [data]) = .xatom(name, arg, ...) - - Returns response appropriate to extension command `name'. - """ - name = name.upper() - #if not name in self.capabilities: # Let the server decide! - # raise self.error('unknown extension command: %s' % name) - if not name in Commands: - Commands[name] = (self.state,) - return self._simple_command(name, *args) - - - - # Private methods - - - def _append_untagged(self, typ, dat): - if dat is None: - dat = b'' - ur = self.untagged_responses - if __debug__: - if self.debug >= 5: - self._mesg('untagged_responses[%s] %s += ["%r"]' % - (typ, len(ur.get(typ,'')), dat)) - if typ in ur: - ur[typ].append(dat) - else: - ur[typ] = [dat] - - - def _check_bye(self): - bye = self.untagged_responses.get('BYE') - if bye: - raise self.abort(bye[-1].decode(self._encoding, 'replace')) - - - def _command(self, name, *args): - - if self.state not in Commands[name]: - self.literal = None - raise self.error("command %s illegal in state %s, " - "only allowed in states %s" % - (name, self.state, - ', '.join(Commands[name]))) - - for typ in ('OK', 'NO', 'BAD'): - if typ in self.untagged_responses: - del self.untagged_responses[typ] - - if 'READ-ONLY' in self.untagged_responses \ - and not self.is_readonly: - raise self.readonly('mailbox status changed to READ-ONLY') - - tag = self._new_tag() - name = bytes(name, self._encoding) - data = tag + b' ' + name - for arg in args: - if arg is None: continue - if isinstance(arg, str): - arg = bytes(arg, self._encoding) - data = data + b' ' + arg - - literal = self.literal - if literal is not None: - self.literal = None - if type(literal) is type(self._command): - literator = literal - else: - literator = None - if self.utf8_enabled: - data = data + bytes(' UTF8 (~{%s}' % len(literal), self._encoding) - literal = literal + b')' - else: - data = data + bytes(' {%s}' % len(literal), self._encoding) - - if __debug__: - if self.debug >= 4: - self._mesg('> %r' % data) - else: - self._log('> %r' % data) - - try: - self.send(data + CRLF) - except OSError as val: - raise self.abort('socket error: %s' % val) - - if literal is None: - return tag - - while 1: - # Wait for continuation response - - while self._get_response(): - if self.tagged_commands[tag]: # BAD/NO? - return tag - - # Send literal - - if literator: - literal = literator(self.continuation_response) - - if __debug__: - if self.debug >= 4: - self._mesg('write literal size %s' % len(literal)) - - try: - self.send(literal) - self.send(CRLF) - except OSError as val: - raise self.abort('socket error: %s' % val) - - if not literator: - break - - return tag - - - def _command_complete(self, name, tag): - logout = (name == 'LOGOUT') - # BYE is expected after LOGOUT - if not logout: - self._check_bye() - try: - typ, data = self._get_tagged_response(tag, expect_bye=logout) - except self.abort as val: - raise self.abort('command: %s => %s' % (name, val)) - except self.error as val: - raise self.error('command: %s => %s' % (name, val)) - if not logout: - self._check_bye() - if typ == 'BAD': - raise self.error('%s command error: %s %s' % (name, typ, data)) - return typ, data - - - def _get_capabilities(self): - typ, dat = self.capability() - if dat == [None]: - raise self.error('no CAPABILITY response from server') - dat = str(dat[-1], self._encoding) - dat = dat.upper() - self.capabilities = tuple(dat.split()) - - - def _get_response(self): - - # Read response and store. - # - # Returns None for continuation responses, - # otherwise first response line received. - - resp = self._get_line() - - # Command completion response? - - if self._match(self.tagre, resp): - tag = self.mo.group('tag') - if not tag in self.tagged_commands: - raise self.abort('unexpected tagged response: %r' % resp) - - typ = self.mo.group('type') - typ = str(typ, self._encoding) - dat = self.mo.group('data') - self.tagged_commands[tag] = (typ, [dat]) - else: - dat2 = None - - # '*' (untagged) responses? - - if not self._match(Untagged_response, resp): - if self._match(self.Untagged_status, resp): - dat2 = self.mo.group('data2') - - if self.mo is None: - # Only other possibility is '+' (continuation) response... - - if self._match(Continuation, resp): - self.continuation_response = self.mo.group('data') - return None # NB: indicates continuation - - raise self.abort("unexpected response: %r" % resp) - - typ = self.mo.group('type') - typ = str(typ, self._encoding) - dat = self.mo.group('data') - if dat is None: dat = b'' # Null untagged response - if dat2: dat = dat + b' ' + dat2 - - # Is there a literal to come? - - while self._match(self.Literal, dat): - - # Read literal direct from connection. - - size = int(self.mo.group('size')) - if __debug__: - if self.debug >= 4: - self._mesg('read literal size %s' % size) - data = self.read(size) - - # Store response with literal as tuple - - self._append_untagged(typ, (dat, data)) - - # Read trailer - possibly containing another literal - - dat = self._get_line() - - self._append_untagged(typ, dat) - - # Bracketed response information? - - if typ in ('OK', 'NO', 'BAD') and self._match(Response_code, dat): - typ = self.mo.group('type') - typ = str(typ, self._encoding) - self._append_untagged(typ, self.mo.group('data')) - - if __debug__: - if self.debug >= 1 and typ in ('NO', 'BAD', 'BYE'): - self._mesg('%s response: %r' % (typ, dat)) - - return resp - - - def _get_tagged_response(self, tag, expect_bye=False): - - while 1: - result = self.tagged_commands[tag] - if result is not None: - del self.tagged_commands[tag] - return result - - if expect_bye: - typ = 'BYE' - bye = self.untagged_responses.pop(typ, None) - if bye is not None: - # Server replies to the "LOGOUT" command with "BYE" - return (typ, bye) - - # If we've seen a BYE at this point, the socket will be - # closed, so report the BYE now. - self._check_bye() - - # Some have reported "unexpected response" exceptions. - # Note that ignoring them here causes loops. - # Instead, send me details of the unexpected response and - # I'll update the code in `_get_response()'. - - try: - self._get_response() - except self.abort as val: - if __debug__: - if self.debug >= 1: - self.print_log() - raise - - - def _get_line(self): - - line = self.readline() - if not line: - raise self.abort('socket error: EOF') - - # Protocol mandates all lines terminated by CRLF - if not line.endswith(b'\r\n'): - raise self.abort('socket error: unterminated line: %r' % line) - - line = line[:-2] - if __debug__: - if self.debug >= 4: - self._mesg('< %r' % line) - else: - self._log('< %r' % line) - return line - - - def _match(self, cre, s): - - # Run compiled regular expression match method on 's'. - # Save result, return success. - - self.mo = cre.match(s) - if __debug__: - if self.mo is not None and self.debug >= 5: - self._mesg("\tmatched %r => %r" % (cre.pattern, self.mo.groups())) - return self.mo is not None - - - def _new_tag(self): - - tag = self.tagpre + bytes(str(self.tagnum), self._encoding) - self.tagnum = self.tagnum + 1 - self.tagged_commands[tag] = None - return tag - - - def _quote(self, arg): - - arg = arg.replace('\\', '\\\\') - arg = arg.replace('"', '\\"') - - return '"' + arg + '"' - - - def _simple_command(self, name, *args): - - return self._command_complete(name, self._command(name, *args)) - - - def _untagged_response(self, typ, dat, name): - if typ == 'NO': - return typ, dat - if not name in self.untagged_responses: - return typ, [None] - data = self.untagged_responses.pop(name) - if __debug__: - if self.debug >= 5: - self._mesg('untagged_responses[%s] => %s' % (name, data)) - return typ, data - - - if __debug__: - - def _mesg(self, s, secs=None): - if secs is None: - secs = time.time() - tm = time.strftime('%M:%S', time.localtime(secs)) - sys.stderr.write(' %s.%02d %s\n' % (tm, (secs*100)%100, s)) - sys.stderr.flush() - - def _dump_ur(self, untagged_resp_dict): - if not untagged_resp_dict: - return - items = (f'{key}: {value!r}' - for key, value in untagged_resp_dict.items()) - self._mesg('untagged responses dump:' + '\n\t\t'.join(items)) - - def _log(self, line): - # Keep log of last `_cmd_log_len' interactions for debugging. - self._cmd_log[self._cmd_log_idx] = (line, time.time()) - self._cmd_log_idx += 1 - if self._cmd_log_idx >= self._cmd_log_len: - self._cmd_log_idx = 0 - - def print_log(self): - self._mesg('last %d IMAP4 interactions:' % len(self._cmd_log)) - i, n = self._cmd_log_idx, self._cmd_log_len - while n: - try: - self._mesg(*self._cmd_log[i]) - except: - pass - i += 1 - if i >= self._cmd_log_len: - i = 0 - n -= 1 - - -if HAVE_SSL: - - class IMAP4_SSL(IMAP4): - - """IMAP4 client class over SSL connection - - Instantiate with: IMAP4_SSL([host[, port[, ssl_context[, timeout=None]]]]) - - host - host's name (default: localhost); - port - port number (default: standard IMAP4 SSL port); - ssl_context - a SSLContext object that contains your certificate chain - and private key (default: None) - timeout - socket timeout (default: None) If timeout is not given or is None, - the global default socket timeout is used - - for more documentation see the docstring of the parent class IMAP4. - """ - - - def __init__(self, host='', port=IMAP4_SSL_PORT, - *, ssl_context=None, timeout=None): - if ssl_context is None: - ssl_context = ssl._create_stdlib_context() - self.ssl_context = ssl_context - IMAP4.__init__(self, host, port, timeout) - - def _create_socket(self, timeout): - sock = IMAP4._create_socket(self, timeout) - return self.ssl_context.wrap_socket(sock, - server_hostname=self.host) - - def open(self, host='', port=IMAP4_SSL_PORT, timeout=None): - """Setup connection to remote server on "host:port". - (default: localhost:standard IMAP4 SSL port). - This connection will be used by the routines: - read, readline, send, shutdown. - """ - IMAP4.open(self, host, port, timeout) - - __all__.append("IMAP4_SSL") - - -class IMAP4_stream(IMAP4): - - """IMAP4 client class over a stream - - Instantiate with: IMAP4_stream(command) - - "command" - a string that can be passed to subprocess.Popen() - - for more documentation see the docstring of the parent class IMAP4. - """ - - - def __init__(self, command): - self.command = command - IMAP4.__init__(self) - - - def open(self, host=None, port=None, timeout=None): - """Setup a stream connection. - This connection will be used by the routines: - read, readline, send, shutdown. - """ - self.host = None # For compatibility with parent class - self.port = None - self.sock = None - self.file = None - self.process = subprocess.Popen(self.command, - bufsize=DEFAULT_BUFFER_SIZE, - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - shell=True, close_fds=True) - self.writefile = self.process.stdin - self.readfile = self.process.stdout - - def read(self, size): - """Read 'size' bytes from remote.""" - return self.readfile.read(size) - - - def readline(self): - """Read line from remote.""" - return self.readfile.readline() - - - def send(self, data): - """Send data to remote.""" - self.writefile.write(data) - self.writefile.flush() - - - def shutdown(self): - """Close I/O established in "open".""" - self.readfile.close() - self.writefile.close() - self.process.wait() - - - -class _Authenticator: - - """Private class to provide en/decoding - for base64-based authentication conversation. - """ - - def __init__(self, mechinst): - self.mech = mechinst # Callable object to provide/process data - - def process(self, data): - ret = self.mech(self.decode(data)) - if ret is None: - return b'*' # Abort conversation - return self.encode(ret) - - def encode(self, inp): - # - # Invoke binascii.b2a_base64 iteratively with - # short even length buffers, strip the trailing - # line feed from the result and append. "Even" - # means a number that factors to both 6 and 8, - # so when it gets to the end of the 8-bit input - # there's no partial 6-bit output. - # - oup = b'' - if isinstance(inp, str): - inp = inp.encode('utf-8') - while inp: - if len(inp) > 48: - t = inp[:48] - inp = inp[48:] - else: - t = inp - inp = b'' - e = binascii.b2a_base64(t) - if e: - oup = oup + e[:-1] - return oup - - def decode(self, inp): - if not inp: - return b'' - return binascii.a2b_base64(inp) - -Months = ' Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split(' ') -Mon2num = {s.encode():n+1 for n, s in enumerate(Months[1:])} - -def Internaldate2tuple(resp): - """Parse an IMAP4 INTERNALDATE string. - - Return corresponding local time. The return value is a - time.struct_time tuple or None if the string has wrong format. - """ - - mo = InternalDate.match(resp) - if not mo: - return None - - mon = Mon2num[mo.group('mon')] - zonen = mo.group('zonen') - - day = int(mo.group('day')) - year = int(mo.group('year')) - hour = int(mo.group('hour')) - min = int(mo.group('min')) - sec = int(mo.group('sec')) - zoneh = int(mo.group('zoneh')) - zonem = int(mo.group('zonem')) - - # INTERNALDATE timezone must be subtracted to get UT - - zone = (zoneh*60 + zonem)*60 - if zonen == b'-': - zone = -zone - - tt = (year, mon, day, hour, min, sec, -1, -1, -1) - utc = calendar.timegm(tt) - zone - - return time.localtime(utc) - - - -def Int2AP(num): - - """Convert integer to A-P string representation.""" - - val = b''; AP = b'ABCDEFGHIJKLMNOP' - num = int(abs(num)) - while num: - num, mod = divmod(num, 16) - val = AP[mod:mod+1] + val - return val - - - -def ParseFlags(resp): - - """Convert IMAP4 flags response to python tuple.""" - - mo = Flags.match(resp) - if not mo: - return () - - return tuple(mo.group('flags').split()) - - -def Time2Internaldate(date_time): - - """Convert date_time to IMAP4 INTERNALDATE representation. - - Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The - date_time argument can be a number (int or float) representing - seconds since epoch (as returned by time.time()), a 9-tuple - representing local time, an instance of time.struct_time (as - returned by time.localtime()), an aware datetime instance or a - double-quoted string. In the last case, it is assumed to already - be in the correct format. - """ - if isinstance(date_time, (int, float)): - dt = datetime.fromtimestamp(date_time, - timezone.utc).astimezone() - elif isinstance(date_time, tuple): - try: - gmtoff = date_time.tm_gmtoff - except AttributeError: - if time.daylight: - dst = date_time[8] - if dst == -1: - dst = time.localtime(time.mktime(date_time))[8] - gmtoff = -(time.timezone, time.altzone)[dst] - else: - gmtoff = -time.timezone - delta = timedelta(seconds=gmtoff) - dt = datetime(*date_time[:6], tzinfo=timezone(delta)) - elif isinstance(date_time, datetime): - if date_time.tzinfo is None: - raise ValueError("date_time must be aware") - dt = date_time - elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'): - return date_time # Assume in correct format - else: - raise ValueError("date_time not of a known type") - fmt = '"%d-{}-%Y %H:%M:%S %z"'.format(Months[dt.month]) - return dt.strftime(fmt) - - - -if __name__ == '__main__': - - # To test: invoke either as 'python imaplib.py [IMAP4_server_hostname]' - # or 'python imaplib.py -s "rsh IMAP4_server_hostname exec /etc/rimapd"' - # to test the IMAP4_stream class - - import getopt, getpass - - try: - optlist, args = getopt.getopt(sys.argv[1:], 'd:s:') - except getopt.error as val: - optlist, args = (), () - - stream_command = None - for opt,val in optlist: - if opt == '-d': - Debug = int(val) - elif opt == '-s': - stream_command = val - if not args: args = (stream_command,) - - if not args: args = ('',) - - host = args[0] - - USER = getpass.getuser() - PASSWD = getpass.getpass("IMAP password for %s on %s: " % (USER, host or "localhost")) - - test_mesg = 'From: %(user)s@localhost%(lf)sSubject: IMAP4 test%(lf)s%(lf)sdata...%(lf)s' % {'user':USER, 'lf':'\n'} - test_seq1 = ( - ('login', (USER, PASSWD)), - ('create', ('/tmp/xxx 1',)), - ('rename', ('/tmp/xxx 1', '/tmp/yyy')), - ('CREATE', ('/tmp/yyz 2',)), - ('append', ('/tmp/yyz 2', None, None, test_mesg)), - ('list', ('/tmp', 'yy*')), - ('select', ('/tmp/yyz 2',)), - ('search', (None, 'SUBJECT', 'test')), - ('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')), - ('store', ('1', 'FLAGS', r'(\Deleted)')), - ('namespace', ()), - ('expunge', ()), - ('recent', ()), - ('close', ()), - ) - - test_seq2 = ( - ('select', ()), - ('response',('UIDVALIDITY',)), - ('uid', ('SEARCH', 'ALL')), - ('response', ('EXISTS',)), - ('append', (None, None, None, test_mesg)), - ('recent', ()), - ('logout', ()), - ) - - def run(cmd, args): - M._mesg('%s %s' % (cmd, args)) - typ, dat = getattr(M, cmd)(*args) - M._mesg('%s => %s %s' % (cmd, typ, dat)) - if typ == 'NO': raise dat[0] - return dat - - try: - if stream_command: - M = IMAP4_stream(stream_command) - else: - M = IMAP4(host) - if M.state == 'AUTH': - test_seq1 = test_seq1[1:] # Login not needed - M._mesg('PROTOCOL_VERSION = %s' % M.PROTOCOL_VERSION) - M._mesg('CAPABILITIES = %r' % (M.capabilities,)) - - for cmd,args in test_seq1: - run(cmd, args) - - for ml in run('list', ('/tmp/', 'yy%')): - mo = re.match(r'.*"([^"]+)"$', ml) - if mo: path = mo.group(1) - else: path = ml.split()[-1] - run('delete', (path,)) - - for cmd,args in test_seq2: - dat = run(cmd, args) - - if (cmd,args) != ('uid', ('SEARCH', 'ALL')): - continue - - uid = dat[-1].split() - if not uid: continue - run('uid', ('FETCH', '%s' % uid[-1], - '(FLAGS INTERNALDATE RFC822.SIZE RFC822.HEADER RFC822.TEXT)')) - - print('\nAll tests OK.') - - except: - print('\nTests failed.') - - if not Debug: - print(''' -If you would like to see debugging output, -try: %s -d5 -''' % sys.argv[0]) - - raise diff --git a/Python313_13_x64_Template/Lib/importlib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 49e1ab22..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/__pycache__/_abc.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/__pycache__/_abc.cpython-313.pyc deleted file mode 100644 index 6796758e..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/__pycache__/_abc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/__pycache__/abc.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/__pycache__/abc.cpython-313.pyc deleted file mode 100644 index 3c62a310..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/__pycache__/abc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/__pycache__/readers.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/__pycache__/readers.cpython-313.pyc deleted file mode 100644 index 89951a5d..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/__pycache__/readers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/_bootstrap.py b/Python313_13_x64_Template/Lib/importlib/_bootstrap.py deleted file mode 100644 index aed993cc..00000000 --- a/Python313_13_x64_Template/Lib/importlib/_bootstrap.py +++ /dev/null @@ -1,1559 +0,0 @@ -"""Core implementation of import. - -This module is NOT meant to be directly imported! It has been designed such -that it can be bootstrapped into Python as the implementation of import. As -such it requires the injection of specific modules and attributes in order to -work. One should use importlib as the public-facing version of this module. - -""" -# -# IMPORTANT: Whenever making changes to this module, be sure to run a top-level -# `make regen-importlib` followed by `make` in order to get the frozen version -# of the module updated. Not doing so will result in the Makefile to fail for -# all others who don't have a ./python around to freeze the module -# in the early stages of compilation. -# - -# See importlib._setup() for what is injected into the global namespace. - -# When editing this code be aware that code executed at import time CANNOT -# reference any injected objects! This includes not only global code but also -# anything specified at the class level. - -def _object_name(obj): - try: - return obj.__qualname__ - except AttributeError: - return type(obj).__qualname__ - -# Bootstrap-related code ###################################################### - -# Modules injected manually by _setup() -_thread = None -_warnings = None -_weakref = None - -# Import done by _install_external_importers() -_bootstrap_external = None - - -def _wrap(new, old): - """Simple substitute for functools.update_wrapper.""" - for replace in ['__module__', '__name__', '__qualname__', '__doc__']: - if hasattr(old, replace): - setattr(new, replace, getattr(old, replace)) - new.__dict__.update(old.__dict__) - - -def _new_module(name): - return type(sys)(name) - - -# Module-level locking ######################################################## - -# For a list that can have a weakref to it. -class _List(list): - __slots__ = ("__weakref__",) - - -# Copied from weakref.py with some simplifications and modifications unique to -# bootstrapping importlib. Many methods were simply deleting for simplicity, so if they -# are needed in the future they may work if simply copied back in. -class _WeakValueDictionary: - - def __init__(self): - self_weakref = _weakref.ref(self) - - # Inlined to avoid issues with inheriting from _weakref.ref before _weakref is - # set by _setup(). Since there's only one instance of this class, this is - # not expensive. - class KeyedRef(_weakref.ref): - - __slots__ = "key", - - def __new__(type, ob, key): - self = super().__new__(type, ob, type.remove) - self.key = key - return self - - def __init__(self, ob, key): - super().__init__(ob, self.remove) - - @staticmethod - def remove(wr): - nonlocal self_weakref - - self = self_weakref() - if self is not None: - if self._iterating: - self._pending_removals.append(wr.key) - else: - _weakref._remove_dead_weakref(self.data, wr.key) - - self._KeyedRef = KeyedRef - self.clear() - - def clear(self): - self._pending_removals = [] - self._iterating = set() - self.data = {} - - def _commit_removals(self): - pop = self._pending_removals.pop - d = self.data - while True: - try: - key = pop() - except IndexError: - return - _weakref._remove_dead_weakref(d, key) - - def get(self, key, default=None): - if self._pending_removals: - self._commit_removals() - try: - wr = self.data[key] - except KeyError: - return default - else: - if (o := wr()) is None: - return default - else: - return o - - def setdefault(self, key, default=None): - try: - o = self.data[key]() - except KeyError: - o = None - if o is None: - if self._pending_removals: - self._commit_removals() - self.data[key] = self._KeyedRef(default, key) - return default - else: - return o - - -# A dict mapping module names to weakrefs of _ModuleLock instances. -# Dictionary protected by the global import lock. -_module_locks = {} - -# A dict mapping thread IDs to weakref'ed lists of _ModuleLock instances. -# This maps a thread to the module locks it is blocking on acquiring. The -# values are lists because a single thread could perform a re-entrant import -# and be "in the process" of blocking on locks for more than one module. A -# thread can be "in the process" because a thread cannot actually block on -# acquiring more than one lock but it can have set up bookkeeping that reflects -# that it intends to block on acquiring more than one lock. -# -# The dictionary uses a WeakValueDictionary to avoid keeping unnecessary -# lists around, regardless of GC runs. This way there's no memory leak if -# the list is no longer needed (GH-106176). -_blocking_on = None - - -class _BlockingOnManager: - """A context manager responsible to updating ``_blocking_on``.""" - def __init__(self, thread_id, lock): - self.thread_id = thread_id - self.lock = lock - - def __enter__(self): - """Mark the running thread as waiting for self.lock. via _blocking_on.""" - # Interactions with _blocking_on are *not* protected by the global - # import lock here because each thread only touches the state that it - # owns (state keyed on its thread id). The global import lock is - # re-entrant (i.e., a single thread may take it more than once) so it - # wouldn't help us be correct in the face of re-entrancy either. - - self.blocked_on = _blocking_on.setdefault(self.thread_id, _List()) - self.blocked_on.append(self.lock) - - def __exit__(self, *args, **kwargs): - """Remove self.lock from this thread's _blocking_on list.""" - self.blocked_on.remove(self.lock) - - -class _DeadlockError(RuntimeError): - pass - - - -def _has_deadlocked(target_id, *, seen_ids, candidate_ids, blocking_on): - """Check if 'target_id' is holding the same lock as another thread(s). - - The search within 'blocking_on' starts with the threads listed in - 'candidate_ids'. 'seen_ids' contains any threads that are considered - already traversed in the search. - - Keyword arguments: - target_id -- The thread id to try to reach. - seen_ids -- A set of threads that have already been visited. - candidate_ids -- The thread ids from which to begin. - blocking_on -- A dict representing the thread/blocking-on graph. This may - be the same object as the global '_blocking_on' but it is - a parameter to reduce the impact that global mutable - state has on the result of this function. - """ - if target_id in candidate_ids: - # If we have already reached the target_id, we're done - signal that it - # is reachable. - return True - - # Otherwise, try to reach the target_id from each of the given candidate_ids. - for tid in candidate_ids: - if not (candidate_blocking_on := blocking_on.get(tid)): - # There are no edges out from this node, skip it. - continue - elif tid in seen_ids: - # bpo 38091: the chain of tid's we encounter here eventually leads - # to a fixed point or a cycle, but does not reach target_id. - # This means we would not actually deadlock. This can happen if - # other threads are at the beginning of acquire() below. - return False - seen_ids.add(tid) - - # Follow the edges out from this thread. - edges = [lock.owner for lock in candidate_blocking_on] - if _has_deadlocked(target_id, seen_ids=seen_ids, candidate_ids=edges, - blocking_on=blocking_on): - return True - - return False - - -class _ModuleLock: - """A recursive lock implementation which is able to detect deadlocks - (e.g. thread 1 trying to take locks A then B, and thread 2 trying to - take locks B then A). - """ - - def __init__(self, name): - # Create an RLock for protecting the import process for the - # corresponding module. Since it is an RLock, a single thread will be - # able to take it more than once. This is necessary to support - # re-entrancy in the import system that arises from (at least) signal - # handlers and the garbage collector. Consider the case of: - # - # import foo - # -> ... - # -> importlib._bootstrap._ModuleLock.acquire - # -> ... - # -> - # -> __del__ - # -> import foo - # -> ... - # -> importlib._bootstrap._ModuleLock.acquire - # -> _BlockingOnManager.__enter__ - # - # If a different thread than the running one holds the lock then the - # thread will have to block on taking the lock, which is what we want - # for thread safety. - self.lock = _thread.RLock() - self.wakeup = _thread.allocate_lock() - - # The name of the module for which this is a lock. - self.name = name - - # Can end up being set to None if this lock is not owned by any thread - # or the thread identifier for the owning thread. - self.owner = None - - # Represent the number of times the owning thread has acquired this lock - # via a list of True. This supports RLock-like ("re-entrant lock") - # behavior, necessary in case a single thread is following a circular - # import dependency and needs to take the lock for a single module - # more than once. - # - # Counts are represented as a list of True because list.append(True) - # and list.pop() are both atomic and thread-safe in CPython and it's hard - # to find another primitive with the same properties. - self.count = [] - - # This is a count of the number of threads that are blocking on - # self.wakeup.acquire() awaiting to get their turn holding this module - # lock. When the module lock is released, if this is greater than - # zero, it is decremented and `self.wakeup` is released one time. The - # intent is that this will let one other thread make more progress on - # acquiring this module lock. This repeats until all the threads have - # gotten a turn. - # - # This is incremented in self.acquire() when a thread notices it is - # going to have to wait for another thread to finish. - # - # See the comment above count for explanation of the representation. - self.waiters = [] - - def has_deadlock(self): - # To avoid deadlocks for concurrent or re-entrant circular imports, - # look at _blocking_on to see if any threads are blocking - # on getting the import lock for any module for which the import lock - # is held by this thread. - return _has_deadlocked( - # Try to find this thread. - target_id=_thread.get_ident(), - seen_ids=set(), - # Start from the thread that holds the import lock for this - # module. - candidate_ids=[self.owner], - # Use the global "blocking on" state. - blocking_on=_blocking_on, - ) - - def acquire(self): - """ - Acquire the module lock. If a potential deadlock is detected, - a _DeadlockError is raised. - Otherwise, the lock is always acquired and True is returned. - """ - tid = _thread.get_ident() - with _BlockingOnManager(tid, self): - while True: - # Protect interaction with state on self with a per-module - # lock. This makes it safe for more than one thread to try to - # acquire the lock for a single module at the same time. - with self.lock: - if self.count == [] or self.owner == tid: - # If the lock for this module is unowned then we can - # take the lock immediately and succeed. If the lock - # for this module is owned by the running thread then - # we can also allow the acquire to succeed. This - # supports circular imports (thread T imports module A - # which imports module B which imports module A). - self.owner = tid - self.count.append(True) - return True - - # At this point we know the lock is held (because count != - # 0) by another thread (because owner != tid). We'll have - # to get in line to take the module lock. - - # But first, check to see if this thread would create a - # deadlock by acquiring this module lock. If it would - # then just stop with an error. - # - # It's not clear who is expected to handle this error. - # There is one handler in _lock_unlock_module but many - # times this method is called when entering the context - # manager _ModuleLockManager instead - so _DeadlockError - # will just propagate up to application code. - # - # This seems to be more than just a hypothetical - - # https://stackoverflow.com/questions/59509154 - # https://github.com/encode/django-rest-framework/issues/7078 - if self.has_deadlock(): - raise _DeadlockError(f'deadlock detected by {self!r}') - - # Check to see if we're going to be able to acquire the - # lock. If we are going to have to wait then increment - # the waiters so `self.release` will know to unblock us - # later on. We do this part non-blockingly so we don't - # get stuck here before we increment waiters. We have - # this extra acquire call (in addition to the one below, - # outside the self.lock context manager) to make sure - # self.wakeup is held when the next acquire is called (so - # we block). This is probably needlessly complex and we - # should just take self.wakeup in the return codepath - # above. - if self.wakeup.acquire(False): - self.waiters.append(None) - - # Now take the lock in a blocking fashion. This won't - # complete until the thread holding this lock - # (self.owner) calls self.release. - self.wakeup.acquire() - - # Taking the lock has served its purpose (making us wait), so we can - # give it up now. We'll take it w/o blocking again on the - # next iteration around this 'while' loop. - self.wakeup.release() - - def release(self): - tid = _thread.get_ident() - with self.lock: - if self.owner != tid: - raise RuntimeError('cannot release un-acquired lock') - assert len(self.count) > 0 - self.count.pop() - if not len(self.count): - self.owner = None - if len(self.waiters) > 0: - self.waiters.pop() - self.wakeup.release() - - def __repr__(self): - return f'_ModuleLock({self.name!r}) at {id(self)}' - - -class _DummyModuleLock: - """A simple _ModuleLock equivalent for Python builds without - multi-threading support.""" - - def __init__(self, name): - self.name = name - self.count = 0 - - def acquire(self): - self.count += 1 - return True - - def release(self): - if self.count == 0: - raise RuntimeError('cannot release un-acquired lock') - self.count -= 1 - - def __repr__(self): - return f'_DummyModuleLock({self.name!r}) at {id(self)}' - - -class _ModuleLockManager: - - def __init__(self, name): - self._name = name - self._lock = None - - def __enter__(self): - self._lock = _get_module_lock(self._name) - self._lock.acquire() - - def __exit__(self, *args, **kwargs): - self._lock.release() - - -# The following two functions are for consumption by Python/import.c. - -def _get_module_lock(name): - """Get or create the module lock for a given module name. - - Acquire/release internally the global import lock to protect - _module_locks.""" - - _imp.acquire_lock() - try: - try: - lock = _module_locks[name]() - except KeyError: - lock = None - - if lock is None: - if _thread is None: - lock = _DummyModuleLock(name) - else: - lock = _ModuleLock(name) - - def cb(ref, name=name): - _imp.acquire_lock() - try: - # bpo-31070: Check if another thread created a new lock - # after the previous lock was destroyed - # but before the weakref callback was called. - if _module_locks.get(name) is ref: - del _module_locks[name] - finally: - _imp.release_lock() - - _module_locks[name] = _weakref.ref(lock, cb) - finally: - _imp.release_lock() - - return lock - - -def _lock_unlock_module(name): - """Acquires then releases the module lock for a given module name. - - This is used to ensure a module is completely initialized, in the - event it is being imported by another thread. - """ - lock = _get_module_lock(name) - try: - lock.acquire() - except _DeadlockError: - # Concurrent circular import, we'll accept a partially initialized - # module object. - pass - else: - lock.release() - -# Frame stripping magic ############################################### -def _call_with_frames_removed(f, *args, **kwds): - """remove_importlib_frames in import.c will always remove sequences - of importlib frames that end with a call to this function - - Use it instead of a normal call in places where including the importlib - frames introduces unwanted noise into the traceback (e.g. when executing - module code) - """ - return f(*args, **kwds) - - -def _verbose_message(message, *args, verbosity=1): - """Print the message to stderr if -v/PYTHONVERBOSE is turned on.""" - if sys.flags.verbose >= verbosity: - if not message.startswith(('#', 'import ')): - message = '# ' + message - print(message.format(*args), file=sys.stderr) - - -def _requires_builtin(fxn): - """Decorator to verify the named module is built-in.""" - def _requires_builtin_wrapper(self, fullname): - if fullname not in sys.builtin_module_names: - raise ImportError(f'{fullname!r} is not a built-in module', - name=fullname) - return fxn(self, fullname) - _wrap(_requires_builtin_wrapper, fxn) - return _requires_builtin_wrapper - - -def _requires_frozen(fxn): - """Decorator to verify the named module is frozen.""" - def _requires_frozen_wrapper(self, fullname): - if not _imp.is_frozen(fullname): - raise ImportError(f'{fullname!r} is not a frozen module', - name=fullname) - return fxn(self, fullname) - _wrap(_requires_frozen_wrapper, fxn) - return _requires_frozen_wrapper - - -# Typically used by loader classes as a method replacement. -def _load_module_shim(self, fullname): - """Load the specified module into sys.modules and return it. - - This method is deprecated. Use loader.exec_module() instead. - - """ - msg = ("the load_module() method is deprecated and slated for removal in " - "Python 3.15; use exec_module() instead") - _warnings.warn(msg, DeprecationWarning) - spec = spec_from_loader(fullname, self) - if fullname in sys.modules: - module = sys.modules[fullname] - _exec(spec, module) - return sys.modules[fullname] - else: - return _load(spec) - -# Module specifications ####################################################### - -def _module_repr(module): - """The implementation of ModuleType.__repr__().""" - loader = getattr(module, '__loader__', None) - if spec := getattr(module, "__spec__", None): - return _module_repr_from_spec(spec) - # Fall through to a catch-all which always succeeds. - try: - name = module.__name__ - except AttributeError: - name = '?' - try: - filename = module.__file__ - except AttributeError: - if loader is None: - return f'' - else: - return f'' - else: - return f'' - - -class ModuleSpec: - """The specification for a module, used for loading. - - A module's spec is the source for information about the module. For - data associated with the module, including source, use the spec's - loader. - - `name` is the absolute name of the module. `loader` is the loader - to use when loading the module. `parent` is the name of the - package the module is in. The parent is derived from the name. - - `is_package` determines if the module is considered a package or - not. On modules this is reflected by the `__path__` attribute. - - `origin` is the specific location used by the loader from which to - load the module, if that information is available. When filename is - set, origin will match. - - `has_location` indicates that a spec's "origin" reflects a location. - When this is True, `__file__` attribute of the module is set. - - `cached` is the location of the cached bytecode file, if any. It - corresponds to the `__cached__` attribute. - - `submodule_search_locations` is the sequence of path entries to - search when importing submodules. If set, is_package should be - True--and False otherwise. - - Packages are simply modules that (may) have submodules. If a spec - has a non-None value in `submodule_search_locations`, the import - system will consider modules loaded from the spec as packages. - - Only finders (see importlib.abc.MetaPathFinder and - importlib.abc.PathEntryFinder) should modify ModuleSpec instances. - - """ - - def __init__(self, name, loader, *, origin=None, loader_state=None, - is_package=None): - self.name = name - self.loader = loader - self.origin = origin - self.loader_state = loader_state - self.submodule_search_locations = [] if is_package else None - self._uninitialized_submodules = [] - - # file-location attributes - self._set_fileattr = False - self._cached = None - - def __repr__(self): - args = [f'name={self.name!r}', f'loader={self.loader!r}'] - if self.origin is not None: - args.append(f'origin={self.origin!r}') - if self.submodule_search_locations is not None: - args.append(f'submodule_search_locations={self.submodule_search_locations}') - return f'{self.__class__.__name__}({", ".join(args)})' - - def __eq__(self, other): - smsl = self.submodule_search_locations - try: - return (self.name == other.name and - self.loader == other.loader and - self.origin == other.origin and - smsl == other.submodule_search_locations and - self.cached == other.cached and - self.has_location == other.has_location) - except AttributeError: - return NotImplemented - - @property - def cached(self): - if self._cached is None: - if self.origin is not None and self._set_fileattr: - if _bootstrap_external is None: - raise NotImplementedError - self._cached = _bootstrap_external._get_cached(self.origin) - return self._cached - - @cached.setter - def cached(self, cached): - self._cached = cached - - @property - def parent(self): - """The name of the module's parent.""" - if self.submodule_search_locations is None: - return self.name.rpartition('.')[0] - else: - return self.name - - @property - def has_location(self): - return self._set_fileattr - - @has_location.setter - def has_location(self, value): - self._set_fileattr = bool(value) - - -def spec_from_loader(name, loader, *, origin=None, is_package=None): - """Return a module spec based on various loader methods.""" - if origin is None: - origin = getattr(loader, '_ORIGIN', None) - - if not origin and hasattr(loader, 'get_filename'): - if _bootstrap_external is None: - raise NotImplementedError - spec_from_file_location = _bootstrap_external.spec_from_file_location - - if is_package is None: - return spec_from_file_location(name, loader=loader) - search = [] if is_package else None - return spec_from_file_location(name, loader=loader, - submodule_search_locations=search) - - if is_package is None: - if hasattr(loader, 'is_package'): - try: - is_package = loader.is_package(name) - except ImportError: - is_package = None # aka, undefined - else: - # the default - is_package = False - - return ModuleSpec(name, loader, origin=origin, is_package=is_package) - - -def _spec_from_module(module, loader=None, origin=None): - # This function is meant for use in _setup(). - try: - spec = module.__spec__ - except AttributeError: - pass - else: - if spec is not None: - return spec - - name = module.__name__ - if loader is None: - try: - loader = module.__loader__ - except AttributeError: - # loader will stay None. - pass - try: - location = module.__file__ - except AttributeError: - location = None - if origin is None: - if loader is not None: - origin = getattr(loader, '_ORIGIN', None) - if not origin and location is not None: - origin = location - try: - cached = module.__cached__ - except AttributeError: - cached = None - try: - submodule_search_locations = list(module.__path__) - except AttributeError: - submodule_search_locations = None - - spec = ModuleSpec(name, loader, origin=origin) - spec._set_fileattr = False if location is None else (origin == location) - spec.cached = cached - spec.submodule_search_locations = submodule_search_locations - return spec - - -def _init_module_attrs(spec, module, *, override=False): - # The passed-in module may be not support attribute assignment, - # in which case we simply don't set the attributes. - # __name__ - if (override or getattr(module, '__name__', None) is None): - try: - module.__name__ = spec.name - except AttributeError: - pass - # __loader__ - if override or getattr(module, '__loader__', None) is None: - loader = spec.loader - if loader is None: - # A backward compatibility hack. - if spec.submodule_search_locations is not None: - if _bootstrap_external is None: - raise NotImplementedError - NamespaceLoader = _bootstrap_external.NamespaceLoader - - loader = NamespaceLoader.__new__(NamespaceLoader) - loader._path = spec.submodule_search_locations - spec.loader = loader - # While the docs say that module.__file__ is not set for - # built-in modules, and the code below will avoid setting it if - # spec.has_location is false, this is incorrect for namespace - # packages. Namespace packages have no location, but their - # __spec__.origin is None, and thus their module.__file__ - # should also be None for consistency. While a bit of a hack, - # this is the best place to ensure this consistency. - # - # See # https://docs.python.org/3/library/importlib.html#importlib.abc.Loader.load_module - # and bpo-32305 - module.__file__ = None - try: - module.__loader__ = loader - except AttributeError: - pass - # __package__ - if override or getattr(module, '__package__', None) is None: - try: - module.__package__ = spec.parent - except AttributeError: - pass - # __spec__ - try: - module.__spec__ = spec - except AttributeError: - pass - # __path__ - if override or getattr(module, '__path__', None) is None: - if spec.submodule_search_locations is not None: - # XXX We should extend __path__ if it's already a list. - try: - module.__path__ = spec.submodule_search_locations - except AttributeError: - pass - # __file__/__cached__ - if spec.has_location: - if override or getattr(module, '__file__', None) is None: - try: - module.__file__ = spec.origin - except AttributeError: - pass - - if override or getattr(module, '__cached__', None) is None: - if spec.cached is not None: - try: - module.__cached__ = spec.cached - except AttributeError: - pass - return module - - -def module_from_spec(spec): - """Create a module based on the provided spec.""" - # Typically loaders will not implement create_module(). - module = None - if hasattr(spec.loader, 'create_module'): - # If create_module() returns `None` then it means default - # module creation should be used. - module = spec.loader.create_module(spec) - elif hasattr(spec.loader, 'exec_module'): - raise ImportError('loaders that define exec_module() ' - 'must also define create_module()') - if module is None: - module = _new_module(spec.name) - _init_module_attrs(spec, module) - return module - - -def _module_repr_from_spec(spec): - """Return the repr to use for the module.""" - name = '?' if spec.name is None else spec.name - if spec.origin is None: - loader = spec.loader - if loader is None: - return f'' - elif ( - _bootstrap_external is not None - and isinstance(loader, _bootstrap_external.NamespaceLoader) - ): - return f'' - else: - return f'' - else: - if spec.has_location: - return f'' - else: - return f'' - - -# Used by importlib.reload() and _load_module_shim(). -def _exec(spec, module): - """Execute the spec's specified module in an existing module's namespace.""" - name = spec.name - with _ModuleLockManager(name): - if sys.modules.get(name) is not module: - msg = f'module {name!r} not in sys.modules' - raise ImportError(msg, name=name) - try: - if spec.loader is None: - if spec.submodule_search_locations is None: - raise ImportError('missing loader', name=spec.name) - # Namespace package. - _init_module_attrs(spec, module, override=True) - else: - _init_module_attrs(spec, module, override=True) - if not hasattr(spec.loader, 'exec_module'): - msg = (f"{_object_name(spec.loader)}.exec_module() not found; " - "falling back to load_module()") - _warnings.warn(msg, ImportWarning) - spec.loader.load_module(name) - else: - spec.loader.exec_module(module) - finally: - # Update the order of insertion into sys.modules for module - # clean-up at shutdown. - module = sys.modules.pop(spec.name) - sys.modules[spec.name] = module - return module - - -def _load_backward_compatible(spec): - # It is assumed that all callers have been warned about using load_module() - # appropriately before calling this function. - try: - spec.loader.load_module(spec.name) - except: - if spec.name in sys.modules: - module = sys.modules.pop(spec.name) - sys.modules[spec.name] = module - raise - # The module must be in sys.modules at this point! - # Move it to the end of sys.modules. - module = sys.modules.pop(spec.name) - sys.modules[spec.name] = module - if getattr(module, '__loader__', None) is None: - try: - module.__loader__ = spec.loader - except AttributeError: - pass - if getattr(module, '__package__', None) is None: - try: - # Since module.__path__ may not line up with - # spec.submodule_search_paths, we can't necessarily rely - # on spec.parent here. - module.__package__ = module.__name__ - if not hasattr(module, '__path__'): - module.__package__ = spec.name.rpartition('.')[0] - except AttributeError: - pass - if getattr(module, '__spec__', None) is None: - try: - module.__spec__ = spec - except AttributeError: - pass - return module - -def _load_unlocked(spec): - # A helper for direct use by the import system. - if spec.loader is not None: - # Not a namespace package. - if not hasattr(spec.loader, 'exec_module'): - msg = (f"{_object_name(spec.loader)}.exec_module() not found; " - "falling back to load_module()") - _warnings.warn(msg, ImportWarning) - return _load_backward_compatible(spec) - - module = module_from_spec(spec) - - # This must be done before putting the module in sys.modules - # (otherwise an optimization shortcut in import.c becomes - # wrong). - spec._initializing = True - try: - sys.modules[spec.name] = module - try: - if spec.loader is None: - if spec.submodule_search_locations is None: - raise ImportError('missing loader', name=spec.name) - # A namespace package so do nothing. - else: - spec.loader.exec_module(module) - except: - try: - del sys.modules[spec.name] - except KeyError: - pass - raise - # Move the module to the end of sys.modules. - # We don't ensure that the import-related module attributes get - # set in the sys.modules replacement case. Such modules are on - # their own. - module = sys.modules.pop(spec.name) - sys.modules[spec.name] = module - _verbose_message('import {!r} # {!r}', spec.name, spec.loader) - finally: - spec._initializing = False - - return module - -# A method used during testing of _load_unlocked() and by -# _load_module_shim(). -def _load(spec): - """Return a new module object, loaded by the spec's loader. - - The module is not added to its parent. - - If a module is already in sys.modules, that existing module gets - clobbered. - - """ - with _ModuleLockManager(spec.name): - return _load_unlocked(spec) - - -# Loaders ##################################################################### - -class BuiltinImporter: - - """Meta path import for built-in modules. - - All methods are either class or static methods to avoid the need to - instantiate the class. - - """ - - _ORIGIN = "built-in" - - @classmethod - def find_spec(cls, fullname, path=None, target=None): - if _imp.is_builtin(fullname): - return spec_from_loader(fullname, cls, origin=cls._ORIGIN) - else: - return None - - @staticmethod - def create_module(spec): - """Create a built-in module""" - if spec.name not in sys.builtin_module_names: - raise ImportError(f'{spec.name!r} is not a built-in module', - name=spec.name) - return _call_with_frames_removed(_imp.create_builtin, spec) - - @staticmethod - def exec_module(module): - """Exec a built-in module""" - _call_with_frames_removed(_imp.exec_builtin, module) - - @classmethod - @_requires_builtin - def get_code(cls, fullname): - """Return None as built-in modules do not have code objects.""" - return None - - @classmethod - @_requires_builtin - def get_source(cls, fullname): - """Return None as built-in modules do not have source code.""" - return None - - @classmethod - @_requires_builtin - def is_package(cls, fullname): - """Return False as built-in modules are never packages.""" - return False - - load_module = classmethod(_load_module_shim) - - -class FrozenImporter: - - """Meta path import for frozen modules. - - All methods are either class or static methods to avoid the need to - instantiate the class. - - """ - - _ORIGIN = "frozen" - - @classmethod - def _fix_up_module(cls, module): - spec = module.__spec__ - state = spec.loader_state - if state is None: - # The module is missing FrozenImporter-specific values. - - # Fix up the spec attrs. - origname = vars(module).pop('__origname__', None) - assert origname, 'see PyImport_ImportFrozenModuleObject()' - ispkg = hasattr(module, '__path__') - assert _imp.is_frozen_package(module.__name__) == ispkg, ispkg - filename, pkgdir = cls._resolve_filename(origname, spec.name, ispkg) - spec.loader_state = type(sys.implementation)( - filename=filename, - origname=origname, - ) - __path__ = spec.submodule_search_locations - if ispkg: - assert __path__ == [], __path__ - if pkgdir: - spec.submodule_search_locations.insert(0, pkgdir) - else: - assert __path__ is None, __path__ - - # Fix up the module attrs (the bare minimum). - assert not hasattr(module, '__file__'), module.__file__ - if filename: - try: - module.__file__ = filename - except AttributeError: - pass - if ispkg: - if module.__path__ != __path__: - assert module.__path__ == [], module.__path__ - module.__path__.extend(__path__) - else: - # These checks ensure that _fix_up_module() is only called - # in the right places. - __path__ = spec.submodule_search_locations - ispkg = __path__ is not None - # Check the loader state. - assert sorted(vars(state)) == ['filename', 'origname'], state - if state.origname: - # The only frozen modules with "origname" set are stdlib modules. - (__file__, pkgdir, - ) = cls._resolve_filename(state.origname, spec.name, ispkg) - assert state.filename == __file__, (state.filename, __file__) - if pkgdir: - assert __path__ == [pkgdir], (__path__, pkgdir) - else: - assert __path__ == ([] if ispkg else None), __path__ - else: - __file__ = None - assert state.filename is None, state.filename - assert __path__ == ([] if ispkg else None), __path__ - # Check the file attrs. - if __file__: - assert hasattr(module, '__file__') - assert module.__file__ == __file__, (module.__file__, __file__) - else: - assert not hasattr(module, '__file__'), module.__file__ - if ispkg: - assert hasattr(module, '__path__') - assert module.__path__ == __path__, (module.__path__, __path__) - else: - assert not hasattr(module, '__path__'), module.__path__ - assert not spec.has_location - - @classmethod - def _resolve_filename(cls, fullname, alias=None, ispkg=False): - if not fullname or not getattr(sys, '_stdlib_dir', None): - return None, None - try: - sep = cls._SEP - except AttributeError: - sep = cls._SEP = '\\' if sys.platform == 'win32' else '/' - - if fullname != alias: - if fullname.startswith('<'): - fullname = fullname[1:] - if not ispkg: - fullname = f'{fullname}.__init__' - else: - ispkg = False - relfile = fullname.replace('.', sep) - if ispkg: - pkgdir = f'{sys._stdlib_dir}{sep}{relfile}' - filename = f'{pkgdir}{sep}__init__.py' - else: - pkgdir = None - filename = f'{sys._stdlib_dir}{sep}{relfile}.py' - return filename, pkgdir - - @classmethod - def find_spec(cls, fullname, path=None, target=None): - info = _call_with_frames_removed(_imp.find_frozen, fullname) - if info is None: - return None - # We get the marshaled data in exec_module() (the loader - # part of the importer), instead of here (the finder part). - # The loader is the usual place to get the data that will - # be loaded into the module. (For example, see _LoaderBasics - # in _bootstrap_external.py.) Most importantly, this importer - # is simpler if we wait to get the data. - # However, getting as much data in the finder as possible - # to later load the module is okay, and sometimes important. - # (That's why ModuleSpec.loader_state exists.) This is - # especially true if it avoids throwing away expensive data - # the loader would otherwise duplicate later and can be done - # efficiently. In this case it isn't worth it. - _, ispkg, origname = info - spec = spec_from_loader(fullname, cls, - origin=cls._ORIGIN, - is_package=ispkg) - filename, pkgdir = cls._resolve_filename(origname, fullname, ispkg) - spec.loader_state = type(sys.implementation)( - filename=filename, - origname=origname, - ) - if pkgdir: - spec.submodule_search_locations.insert(0, pkgdir) - return spec - - @staticmethod - def create_module(spec): - """Set __file__, if able.""" - module = _new_module(spec.name) - try: - filename = spec.loader_state.filename - except AttributeError: - pass - else: - if filename: - module.__file__ = filename - return module - - @staticmethod - def exec_module(module): - spec = module.__spec__ - name = spec.name - code = _call_with_frames_removed(_imp.get_frozen_object, name) - exec(code, module.__dict__) - - @classmethod - def load_module(cls, fullname): - """Load a frozen module. - - This method is deprecated. Use exec_module() instead. - - """ - # Warning about deprecation implemented in _load_module_shim(). - module = _load_module_shim(cls, fullname) - info = _imp.find_frozen(fullname) - assert info is not None - _, ispkg, origname = info - module.__origname__ = origname - vars(module).pop('__file__', None) - if ispkg: - module.__path__ = [] - cls._fix_up_module(module) - return module - - @classmethod - @_requires_frozen - def get_code(cls, fullname): - """Return the code object for the frozen module.""" - return _imp.get_frozen_object(fullname) - - @classmethod - @_requires_frozen - def get_source(cls, fullname): - """Return None as frozen modules do not have source code.""" - return None - - @classmethod - @_requires_frozen - def is_package(cls, fullname): - """Return True if the frozen module is a package.""" - return _imp.is_frozen_package(fullname) - - -# Import itself ############################################################### - -class _ImportLockContext: - - """Context manager for the import lock.""" - - def __enter__(self): - """Acquire the import lock.""" - _imp.acquire_lock() - - def __exit__(self, exc_type, exc_value, exc_traceback): - """Release the import lock regardless of any raised exceptions.""" - _imp.release_lock() - - -def _resolve_name(name, package, level): - """Resolve a relative module name to an absolute one.""" - bits = package.rsplit('.', level - 1) - if len(bits) < level: - raise ImportError('attempted relative import beyond top-level package') - base = bits[0] - return f'{base}.{name}' if name else base - - -def _find_spec(name, path, target=None): - """Find a module's spec.""" - meta_path = sys.meta_path - if meta_path is None: - # PyImport_Cleanup() is running or has been called. - raise ImportError("sys.meta_path is None, Python is likely " - "shutting down") - - if not meta_path: - _warnings.warn('sys.meta_path is empty', ImportWarning) - - # We check sys.modules here for the reload case. While a passed-in - # target will usually indicate a reload there is no guarantee, whereas - # sys.modules provides one. - is_reload = name in sys.modules - for finder in meta_path: - with _ImportLockContext(): - try: - find_spec = finder.find_spec - except AttributeError: - continue - else: - spec = find_spec(name, path, target) - if spec is not None: - # The parent import may have already imported this module. - if not is_reload and name in sys.modules: - module = sys.modules[name] - try: - __spec__ = module.__spec__ - except AttributeError: - # We use the found spec since that is the one that - # we would have used if the parent module hadn't - # beaten us to the punch. - return spec - else: - if __spec__ is None: - return spec - else: - return __spec__ - else: - return spec - else: - return None - - -def _sanity_check(name, package, level): - """Verify arguments are "sane".""" - if not isinstance(name, str): - raise TypeError(f'module name must be str, not {type(name)}') - if level < 0: - raise ValueError('level must be >= 0') - if level > 0: - if not isinstance(package, str): - raise TypeError('__package__ not set to a string') - elif not package: - raise ImportError('attempted relative import with no known parent ' - 'package') - if not name and level == 0: - raise ValueError('Empty module name') - - -_ERR_MSG_PREFIX = 'No module named ' -_ERR_MSG = _ERR_MSG_PREFIX + '{!r}' - -def _find_and_load_unlocked(name, import_): - path = None - parent = name.rpartition('.')[0] - parent_spec = None - if parent: - if parent not in sys.modules: - _call_with_frames_removed(import_, parent) - # Crazy side-effects! - if name in sys.modules: - return sys.modules[name] - parent_module = sys.modules[parent] - try: - path = parent_module.__path__ - except AttributeError: - msg = f'{_ERR_MSG_PREFIX}{name!r}; {parent!r} is not a package' - raise ModuleNotFoundError(msg, name=name) from None - parent_spec = parent_module.__spec__ - child = name.rpartition('.')[2] - spec = _find_spec(name, path) - if spec is None: - raise ModuleNotFoundError(f'{_ERR_MSG_PREFIX}{name!r}', name=name) - else: - if parent_spec: - # Temporarily add child we are currently importing to parent's - # _uninitialized_submodules for circular import tracking. - parent_spec._uninitialized_submodules.append(child) - try: - module = _load_unlocked(spec) - finally: - if parent_spec: - parent_spec._uninitialized_submodules.pop() - if parent: - # Set the module as an attribute on its parent. - parent_module = sys.modules[parent] - try: - setattr(parent_module, child, module) - except AttributeError: - msg = f"Cannot set an attribute on {parent!r} for child module {child!r}" - _warnings.warn(msg, ImportWarning) - return module - - -_NEEDS_LOADING = object() - - -def _find_and_load(name, import_): - """Find and load the module.""" - - # Optimization: we avoid unneeded module locking if the module - # already exists in sys.modules and is fully initialized. - module = sys.modules.get(name, _NEEDS_LOADING) - if (module is _NEEDS_LOADING or - getattr(getattr(module, "__spec__", None), "_initializing", False)): - with _ModuleLockManager(name): - module = sys.modules.get(name, _NEEDS_LOADING) - if module is _NEEDS_LOADING: - return _find_and_load_unlocked(name, import_) - - # Optimization: only call _bootstrap._lock_unlock_module() if - # module.__spec__._initializing is True. - # NOTE: because of this, initializing must be set *before* - # putting the new module in sys.modules. - _lock_unlock_module(name) - else: - # Verify the module is still in sys.modules. Another thread may have - # removed it (due to import failure) between our sys.modules.get() - # above and the _initializing check. If removed, we retry the import - # to preserve normal semantics: the caller gets the exception from - # the actual import failure rather than a synthetic error. - if sys.modules.get(name) is not module: - return _find_and_load(name, import_) - - if module is None: - message = f'import of {name} halted; None in sys.modules' - raise ModuleNotFoundError(message, name=name) - - return module - - -def _gcd_import(name, package=None, level=0): - """Import and return the module based on its name, the package the call is - being made from, and the level adjustment. - - This function represents the greatest common denominator of functionality - between import_module and __import__. This includes setting __package__ if - the loader did not. - - """ - _sanity_check(name, package, level) - if level > 0: - name = _resolve_name(name, package, level) - return _find_and_load(name, _gcd_import) - - -def _handle_fromlist(module, fromlist, import_, *, recursive=False): - """Figure out what __import__ should return. - - The import_ parameter is a callable which takes the name of module to - import. It is required to decouple the function from assuming importlib's - import implementation is desired. - - """ - # The hell that is fromlist ... - # If a package was imported, try to import stuff from fromlist. - for x in fromlist: - if not isinstance(x, str): - if recursive: - where = module.__name__ + '.__all__' - else: - where = "``from list''" - raise TypeError(f"Item in {where} must be str, " - f"not {type(x).__name__}") - elif x == '*': - if not recursive and hasattr(module, '__all__'): - _handle_fromlist(module, module.__all__, import_, - recursive=True) - elif not hasattr(module, x): - from_name = f'{module.__name__}.{x}' - try: - _call_with_frames_removed(import_, from_name) - except ModuleNotFoundError as exc: - # Backwards-compatibility dictates we ignore failed - # imports triggered by fromlist for modules that don't - # exist. - if (exc.name == from_name and - sys.modules.get(from_name, _NEEDS_LOADING) is not None): - continue - raise - return module - - -def _calc___package__(globals): - """Calculate what __package__ should be. - - __package__ is not guaranteed to be defined or could be set to None - to represent that its proper value is unknown. - - """ - package = globals.get('__package__') - spec = globals.get('__spec__') - if package is not None: - if spec is not None and package != spec.parent: - _warnings.warn("__package__ != __spec__.parent " - f"({package!r} != {spec.parent!r})", - DeprecationWarning, stacklevel=3) - return package - elif spec is not None: - return spec.parent - else: - _warnings.warn("can't resolve package from __spec__ or __package__, " - "falling back on __name__ and __path__", - ImportWarning, stacklevel=3) - package = globals['__name__'] - if '__path__' not in globals: - package = package.rpartition('.')[0] - return package - - -def __import__(name, globals=None, locals=None, fromlist=(), level=0): - """Import a module. - - The 'globals' argument is used to infer where the import is occurring from - to handle relative imports. The 'locals' argument is ignored. The - 'fromlist' argument specifies what should exist as attributes on the module - being imported (e.g. ``from module import ``). The 'level' - argument represents the package location to import from in a relative - import (e.g. ``from ..pkg import mod`` would have a 'level' of 2). - - """ - if level == 0: - module = _gcd_import(name) - else: - globals_ = globals if globals is not None else {} - package = _calc___package__(globals_) - module = _gcd_import(name, package, level) - if not fromlist: - # Return up to the first dot in 'name'. This is complicated by the fact - # that 'name' may be relative. - if level == 0: - return _gcd_import(name.partition('.')[0]) - elif not name: - return module - else: - # Figure out where to slice the module's name up to the first dot - # in 'name'. - cut_off = len(name) - len(name.partition('.')[0]) - # Slice end needs to be positive to alleviate need to special-case - # when ``'.' not in name``. - return sys.modules[module.__name__[:len(module.__name__)-cut_off]] - elif hasattr(module, '__path__'): - return _handle_fromlist(module, fromlist, _gcd_import) - else: - return module - - -def _builtin_from_name(name): - spec = BuiltinImporter.find_spec(name) - if spec is None: - raise ImportError('no built-in module named ' + name) - return _load_unlocked(spec) - - -def _setup(sys_module, _imp_module): - """Setup importlib by importing needed built-in modules and injecting them - into the global namespace. - - As sys is needed for sys.modules access and _imp is needed to load built-in - modules, those two modules must be explicitly passed in. - - """ - global _imp, sys, _blocking_on - _imp = _imp_module - sys = sys_module - - # Set up the spec for existing builtin/frozen modules. - module_type = type(sys) - for name, module in sys.modules.items(): - if isinstance(module, module_type): - if name in sys.builtin_module_names: - loader = BuiltinImporter - elif _imp.is_frozen(name): - loader = FrozenImporter - else: - continue - spec = _spec_from_module(module, loader) - _init_module_attrs(spec, module) - if loader is FrozenImporter: - loader._fix_up_module(module) - - # Directly load built-in modules needed during bootstrap. - self_module = sys.modules[__name__] - for builtin_name in ('_thread', '_warnings', '_weakref'): - if builtin_name not in sys.modules: - builtin_module = _builtin_from_name(builtin_name) - else: - builtin_module = sys.modules[builtin_name] - setattr(self_module, builtin_name, builtin_module) - - # Instantiation requires _weakref to have been set. - _blocking_on = _WeakValueDictionary() - - -def _install(sys_module, _imp_module): - """Install importers for builtin and frozen modules""" - _setup(sys_module, _imp_module) - - sys.meta_path.append(BuiltinImporter) - sys.meta_path.append(FrozenImporter) - - -def _install_external_importers(): - """Install importers that require external filesystem access""" - global _bootstrap_external - import _frozen_importlib_external - _bootstrap_external = _frozen_importlib_external - _frozen_importlib_external._install(sys.modules[__name__]) diff --git a/Python313_13_x64_Template/Lib/importlib/_bootstrap_external.py b/Python313_13_x64_Template/Lib/importlib/_bootstrap_external.py deleted file mode 100644 index 0741f62e..00000000 --- a/Python313_13_x64_Template/Lib/importlib/_bootstrap_external.py +++ /dev/null @@ -1,1826 +0,0 @@ -"""Core implementation of path-based import. - -This module is NOT meant to be directly imported! It has been designed such -that it can be bootstrapped into Python as the implementation of import. As -such it requires the injection of specific modules and attributes in order to -work. One should use importlib as the public-facing version of this module. - -""" -# IMPORTANT: Whenever making changes to this module, be sure to run a top-level -# `make regen-importlib` followed by `make` in order to get the frozen version -# of the module updated. Not doing so will result in the Makefile to fail for -# all others who don't have a ./python around to freeze the module in the early -# stages of compilation. -# - -# See importlib._setup() for what is injected into the global namespace. - -# When editing this code be aware that code executed at import time CANNOT -# reference any injected objects! This includes not only global code but also -# anything specified at the class level. - -# Module injected manually by _set_bootstrap_module() -_bootstrap = None - -# Import builtin modules -import _imp -import _io -import sys -import _warnings -import marshal - - -_MS_WINDOWS = (sys.platform == 'win32') -if _MS_WINDOWS: - import nt as _os - import winreg -else: - import posix as _os - - -if _MS_WINDOWS: - path_separators = ['\\', '/'] -else: - path_separators = ['/'] -# Assumption made in _path_join() -assert all(len(sep) == 1 for sep in path_separators) -path_sep = path_separators[0] -path_sep_tuple = tuple(path_separators) -path_separators = ''.join(path_separators) -_pathseps_with_colon = {f':{s}' for s in path_separators} - - -# Bootstrap-related code ###################################################### -_CASE_INSENSITIVE_PLATFORMS_STR_KEY = 'win', -_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY = 'cygwin', 'darwin', 'ios', 'tvos', 'watchos' -_CASE_INSENSITIVE_PLATFORMS = (_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY - + _CASE_INSENSITIVE_PLATFORMS_STR_KEY) - - -def _make_relax_case(): - if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS): - if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS_STR_KEY): - key = 'PYTHONCASEOK' - else: - key = b'PYTHONCASEOK' - - def _relax_case(): - """True if filenames must be checked case-insensitively and ignore environment flags are not set.""" - return not sys.flags.ignore_environment and key in _os.environ - else: - def _relax_case(): - """True if filenames must be checked case-insensitively.""" - return False - return _relax_case - -_relax_case = _make_relax_case() - - -def _pack_uint32(x): - """Convert a 32-bit integer to little-endian.""" - return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little') - - -def _unpack_uint64(data): - """Convert 8 bytes in little-endian to an integer.""" - assert len(data) == 8 - return int.from_bytes(data, 'little') - -def _unpack_uint32(data): - """Convert 4 bytes in little-endian to an integer.""" - assert len(data) == 4 - return int.from_bytes(data, 'little') - -def _unpack_uint16(data): - """Convert 2 bytes in little-endian to an integer.""" - assert len(data) == 2 - return int.from_bytes(data, 'little') - - -if _MS_WINDOWS: - def _path_join(*path_parts): - """Replacement for os.path.join().""" - if not path_parts: - return "" - if len(path_parts) == 1: - return path_parts[0] - root = "" - path = [] - for new_root, tail in map(_os._path_splitroot, path_parts): - if new_root.startswith(path_sep_tuple) or new_root.endswith(path_sep_tuple): - root = new_root.rstrip(path_separators) or root - path = [path_sep + tail] - elif new_root.endswith(':'): - if root.casefold() != new_root.casefold(): - # Drive relative paths have to be resolved by the OS, so we reset the - # tail but do not add a path_sep prefix. - root = new_root - path = [tail] - else: - path.append(tail) - else: - root = new_root or root - path.append(tail) - path = [p.rstrip(path_separators) for p in path if p] - if len(path) == 1 and not path[0]: - # Avoid losing the root's trailing separator when joining with nothing - return root + path_sep - return root + path_sep.join(path) - -else: - def _path_join(*path_parts): - """Replacement for os.path.join().""" - return path_sep.join([part.rstrip(path_separators) - for part in path_parts if part]) - - -def _path_split(path): - """Replacement for os.path.split().""" - i = max(path.rfind(p) for p in path_separators) - if i < 0: - return '', path - return path[:i], path[i + 1:] - - -def _path_stat(path): - """Stat the path. - - Made a separate function to make it easier to override in experiments - (e.g. cache stat results). - - """ - return _os.stat(path) - - -def _path_is_mode_type(path, mode): - """Test whether the path is the specified mode type.""" - try: - stat_info = _path_stat(path) - except OSError: - return False - return (stat_info.st_mode & 0o170000) == mode - - -def _path_isfile(path): - """Replacement for os.path.isfile.""" - return _path_is_mode_type(path, 0o100000) - - -def _path_isdir(path): - """Replacement for os.path.isdir.""" - if not path: - path = _os.getcwd() - return _path_is_mode_type(path, 0o040000) - - -if _MS_WINDOWS: - def _path_isabs(path): - """Replacement for os.path.isabs.""" - if not path: - return False - root = _os._path_splitroot(path)[0].replace('/', '\\') - return len(root) > 1 and (root.startswith('\\\\') or root.endswith('\\')) - -else: - def _path_isabs(path): - """Replacement for os.path.isabs.""" - return path.startswith(path_separators) - - -def _path_abspath(path): - """Replacement for os.path.abspath.""" - if not _path_isabs(path): - for sep in path_separators: - path = path.removeprefix(f".{sep}") - return _path_join(_os.getcwd(), path) - else: - return path - - -def _write_atomic(path, data, mode=0o666): - """Best-effort function to write data to a path atomically. - Be prepared to handle a FileExistsError if concurrent writing of the - temporary file is attempted.""" - # id() is used to generate a pseudo-random filename. - path_tmp = f'{path}.{id(path)}' - fd = _os.open(path_tmp, - _os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666) - try: - # We first write data to a temporary file, and then use os.replace() to - # perform an atomic rename. - with _io.open(fd, 'wb') as file: - file.write(data) - _os.replace(path_tmp, path) - except OSError: - try: - _os.unlink(path_tmp) - except OSError: - pass - raise - - -_code_type = type(_write_atomic.__code__) - - -# Finder/loader utility code ############################################### - -# Magic word to reject .pyc files generated by other Python versions. -# It should change for each incompatible change to the bytecode. -# -# The value of CR and LF is incorporated so if you ever read or write -# a .pyc file in text mode the magic number will be wrong; also, the -# Apple MPW compiler swaps their values, botching string constants. -# -# There were a variety of old schemes for setting the magic number. -# The current working scheme is to increment the previous value by -# 10. -# -# Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic -# number also includes a new "magic tag", i.e. a human readable string used -# to represent the magic number in __pycache__ directories. When you change -# the magic number, you must also set a new unique magic tag. Generally this -# can be named after the Python major version of the magic number bump, but -# it can really be anything, as long as it's different than anything else -# that's come before. The tags are included in the following table, starting -# with Python 3.2a0. -# -# Known values: -# Python 1.5: 20121 -# Python 1.5.1: 20121 -# Python 1.5.2: 20121 -# Python 1.6: 50428 -# Python 2.0: 50823 -# Python 2.0.1: 50823 -# Python 2.1: 60202 -# Python 2.1.1: 60202 -# Python 2.1.2: 60202 -# Python 2.2: 60717 -# Python 2.3a0: 62011 -# Python 2.3a0: 62021 -# Python 2.3a0: 62011 (!) -# Python 2.4a0: 62041 -# Python 2.4a3: 62051 -# Python 2.4b1: 62061 -# Python 2.5a0: 62071 -# Python 2.5a0: 62081 (ast-branch) -# Python 2.5a0: 62091 (with) -# Python 2.5a0: 62092 (changed WITH_CLEANUP opcode) -# Python 2.5b3: 62101 (fix wrong code: for x, in ...) -# Python 2.5b3: 62111 (fix wrong code: x += yield) -# Python 2.5c1: 62121 (fix wrong lnotab with for loops and -# storing constants that should have been removed) -# Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp) -# Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode) -# Python 2.6a1: 62161 (WITH_CLEANUP optimization) -# Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND) -# Python 2.7a0: 62181 (optimize conditional branches: -# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE) -# Python 2.7a0 62191 (introduce SETUP_WITH) -# Python 2.7a0 62201 (introduce BUILD_SET) -# Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD) -# Python 3000: 3000 -# 3010 (removed UNARY_CONVERT) -# 3020 (added BUILD_SET) -# 3030 (added keyword-only parameters) -# 3040 (added signature annotations) -# 3050 (print becomes a function) -# 3060 (PEP 3115 metaclass syntax) -# 3061 (string literals become unicode) -# 3071 (PEP 3109 raise changes) -# 3081 (PEP 3137 make __file__ and __name__ unicode) -# 3091 (kill str8 interning) -# 3101 (merge from 2.6a0, see 62151) -# 3103 (__file__ points to source file) -# Python 3.0a4: 3111 (WITH_CLEANUP optimization). -# Python 3.0b1: 3131 (lexical exception stacking, including POP_EXCEPT - #3021) -# Python 3.1a1: 3141 (optimize list, set and dict comprehensions: -# change LIST_APPEND and SET_ADD, add MAP_ADD #2183) -# Python 3.1a1: 3151 (optimize conditional branches: -# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE - #4715) -# Python 3.2a1: 3160 (add SETUP_WITH #6101) -# tag: cpython-32 -# Python 3.2a2: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR #9225) -# tag: cpython-32 -# Python 3.2a3 3180 (add DELETE_DEREF #4617) -# Python 3.3a1 3190 (__class__ super closure changed) -# Python 3.3a1 3200 (PEP 3155 __qualname__ added #13448) -# Python 3.3a1 3210 (added size modulo 2**32 to the pyc header #13645) -# Python 3.3a2 3220 (changed PEP 380 implementation #14230) -# Python 3.3a4 3230 (revert changes to implicit __class__ closure #14857) -# Python 3.4a1 3250 (evaluate positional default arguments before -# keyword-only defaults #16967) -# Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override -# free vars #17853) -# Python 3.4a1 3270 (various tweaks to the __class__ closure #12370) -# Python 3.4a1 3280 (remove implicit class argument) -# Python 3.4a4 3290 (changes to __qualname__ computation #19301) -# Python 3.4a4 3300 (more changes to __qualname__ computation #19301) -# Python 3.4rc2 3310 (alter __qualname__ computation #20625) -# Python 3.5a1 3320 (PEP 465: Matrix multiplication operator #21176) -# Python 3.5b1 3330 (PEP 448: Additional Unpacking Generalizations #2292) -# Python 3.5b2 3340 (fix dictionary display evaluation order #11205) -# Python 3.5b3 3350 (add GET_YIELD_FROM_ITER opcode #24400) -# Python 3.5.2 3351 (fix BUILD_MAP_UNPACK_WITH_CALL opcode #27286) -# Python 3.6a0 3360 (add FORMAT_VALUE opcode #25483) -# Python 3.6a1 3361 (lineno delta of code.co_lnotab becomes signed #26107) -# Python 3.6a2 3370 (16 bit wordcode #26647) -# Python 3.6a2 3371 (add BUILD_CONST_KEY_MAP opcode #27140) -# Python 3.6a2 3372 (MAKE_FUNCTION simplification, remove MAKE_CLOSURE -# #27095) -# Python 3.6b1 3373 (add BUILD_STRING opcode #27078) -# Python 3.6b1 3375 (add SETUP_ANNOTATIONS and STORE_ANNOTATION opcodes -# #27985) -# Python 3.6b1 3376 (simplify CALL_FUNCTIONs & BUILD_MAP_UNPACK_WITH_CALL - #27213) -# Python 3.6b1 3377 (set __class__ cell from type.__new__ #23722) -# Python 3.6b2 3378 (add BUILD_TUPLE_UNPACK_WITH_CALL #28257) -# Python 3.6rc1 3379 (more thorough __class__ validation #23722) -# Python 3.7a1 3390 (add LOAD_METHOD and CALL_METHOD opcodes #26110) -# Python 3.7a2 3391 (update GET_AITER #31709) -# Python 3.7a4 3392 (PEP 552: Deterministic pycs #31650) -# Python 3.7b1 3393 (remove STORE_ANNOTATION opcode #32550) -# Python 3.7b5 3394 (restored docstring as the first stmt in the body; -# this might affected the first line number #32911) -# Python 3.8a1 3400 (move frame block handling to compiler #17611) -# Python 3.8a1 3401 (add END_ASYNC_FOR #33041) -# Python 3.8a1 3410 (PEP570 Python Positional-Only Parameters #36540) -# Python 3.8b2 3411 (Reverse evaluation order of key: value in dict -# comprehensions #35224) -# Python 3.8b2 3412 (Swap the position of positional args and positional -# only args in ast.arguments #37593) -# Python 3.8b4 3413 (Fix "break" and "continue" in "finally" #37830) -# Python 3.9a0 3420 (add LOAD_ASSERTION_ERROR #34880) -# Python 3.9a0 3421 (simplified bytecode for with blocks #32949) -# Python 3.9a0 3422 (remove BEGIN_FINALLY, END_FINALLY, CALL_FINALLY, POP_FINALLY bytecodes #33387) -# Python 3.9a2 3423 (add IS_OP, CONTAINS_OP and JUMP_IF_NOT_EXC_MATCH bytecodes #39156) -# Python 3.9a2 3424 (simplify bytecodes for *value unpacking) -# Python 3.9a2 3425 (simplify bytecodes for **value unpacking) -# Python 3.10a1 3430 (Make 'annotations' future by default) -# Python 3.10a1 3431 (New line number table format -- PEP 626) -# Python 3.10a2 3432 (Function annotation for MAKE_FUNCTION is changed from dict to tuple bpo-42202) -# Python 3.10a2 3433 (RERAISE restores f_lasti if oparg != 0) -# Python 3.10a6 3434 (PEP 634: Structural Pattern Matching) -# Python 3.10a7 3435 Use instruction offsets (as opposed to byte offsets). -# Python 3.10b1 3436 (Add GEN_START bytecode #43683) -# Python 3.10b1 3437 (Undo making 'annotations' future by default - We like to dance among core devs!) -# Python 3.10b1 3438 Safer line number table handling. -# Python 3.10b1 3439 (Add ROT_N) -# Python 3.11a1 3450 Use exception table for unwinding ("zero cost" exception handling) -# Python 3.11a1 3451 (Add CALL_METHOD_KW) -# Python 3.11a1 3452 (drop nlocals from marshaled code objects) -# Python 3.11a1 3453 (add co_fastlocalnames and co_fastlocalkinds) -# Python 3.11a1 3454 (compute cell offsets relative to locals bpo-43693) -# Python 3.11a1 3455 (add MAKE_CELL bpo-43693) -# Python 3.11a1 3456 (interleave cell args bpo-43693) -# Python 3.11a1 3457 (Change localsplus to a bytes object bpo-43693) -# Python 3.11a1 3458 (imported objects now don't use LOAD_METHOD/CALL_METHOD) -# Python 3.11a1 3459 (PEP 657: add end line numbers and column offsets for instructions) -# Python 3.11a1 3460 (Add co_qualname field to PyCodeObject bpo-44530) -# Python 3.11a1 3461 (JUMP_ABSOLUTE must jump backwards) -# Python 3.11a2 3462 (bpo-44511: remove COPY_DICT_WITHOUT_KEYS, change -# MATCH_CLASS and MATCH_KEYS, and add COPY) -# Python 3.11a3 3463 (bpo-45711: JUMP_IF_NOT_EXC_MATCH no longer pops the -# active exception) -# Python 3.11a3 3464 (bpo-45636: Merge numeric BINARY_*/INPLACE_* into -# BINARY_OP) -# Python 3.11a3 3465 (Add COPY_FREE_VARS opcode) -# Python 3.11a4 3466 (bpo-45292: PEP-654 except*) -# Python 3.11a4 3467 (Change CALL_xxx opcodes) -# Python 3.11a4 3468 (Add SEND opcode) -# Python 3.11a4 3469 (bpo-45711: remove type, traceback from exc_info) -# Python 3.11a4 3470 (bpo-46221: PREP_RERAISE_STAR no longer pushes lasti) -# Python 3.11a4 3471 (bpo-46202: remove pop POP_EXCEPT_AND_RERAISE) -# Python 3.11a4 3472 (bpo-46009: replace GEN_START with POP_TOP) -# Python 3.11a4 3473 (Add POP_JUMP_IF_NOT_NONE/POP_JUMP_IF_NONE opcodes) -# Python 3.11a4 3474 (Add RESUME opcode) -# Python 3.11a5 3475 (Add RETURN_GENERATOR opcode) -# Python 3.11a5 3476 (Add ASYNC_GEN_WRAP opcode) -# Python 3.11a5 3477 (Replace DUP_TOP/DUP_TOP_TWO with COPY and -# ROT_TWO/ROT_THREE/ROT_FOUR/ROT_N with SWAP) -# Python 3.11a5 3478 (New CALL opcodes) -# Python 3.11a5 3479 (Add PUSH_NULL opcode) -# Python 3.11a5 3480 (New CALL opcodes, second iteration) -# Python 3.11a5 3481 (Use inline cache for BINARY_OP) -# Python 3.11a5 3482 (Use inline caching for UNPACK_SEQUENCE and LOAD_GLOBAL) -# Python 3.11a5 3483 (Use inline caching for COMPARE_OP and BINARY_SUBSCR) -# Python 3.11a5 3484 (Use inline caching for LOAD_ATTR, LOAD_METHOD, and -# STORE_ATTR) -# Python 3.11a5 3485 (Add an oparg to GET_AWAITABLE) -# Python 3.11a6 3486 (Use inline caching for PRECALL and CALL) -# Python 3.11a6 3487 (Remove the adaptive "oparg counter" mechanism) -# Python 3.11a6 3488 (LOAD_GLOBAL can push additional NULL) -# Python 3.11a6 3489 (Add JUMP_BACKWARD, remove JUMP_ABSOLUTE) -# Python 3.11a6 3490 (remove JUMP_IF_NOT_EXC_MATCH, add CHECK_EXC_MATCH) -# Python 3.11a6 3491 (remove JUMP_IF_NOT_EG_MATCH, add CHECK_EG_MATCH, -# add JUMP_BACKWARD_NO_INTERRUPT, make JUMP_NO_INTERRUPT virtual) -# Python 3.11a7 3492 (make POP_JUMP_IF_NONE/NOT_NONE/TRUE/FALSE relative) -# Python 3.11a7 3493 (Make JUMP_IF_TRUE_OR_POP/JUMP_IF_FALSE_OR_POP relative) -# Python 3.11a7 3494 (New location info table) -# Python 3.11b4 3495 (Set line number of module's RESUME instr to 0 per PEP 626) -# Python 3.12a1 3500 (Remove PRECALL opcode) -# Python 3.12a1 3501 (YIELD_VALUE oparg == stack_depth) -# Python 3.12a1 3502 (LOAD_FAST_CHECK, no NULL-check in LOAD_FAST) -# Python 3.12a1 3503 (Shrink LOAD_METHOD cache) -# Python 3.12a1 3504 (Merge LOAD_METHOD back into LOAD_ATTR) -# Python 3.12a1 3505 (Specialization/Cache for FOR_ITER) -# Python 3.12a1 3506 (Add BINARY_SLICE and STORE_SLICE instructions) -# Python 3.12a1 3507 (Set lineno of module's RESUME to 0) -# Python 3.12a1 3508 (Add CLEANUP_THROW) -# Python 3.12a1 3509 (Conditional jumps only jump forward) -# Python 3.12a2 3510 (FOR_ITER leaves iterator on the stack) -# Python 3.12a2 3511 (Add STOPITERATION_ERROR instruction) -# Python 3.12a2 3512 (Remove all unused consts from code objects) -# Python 3.12a4 3513 (Add CALL_INTRINSIC_1 instruction, removed STOPITERATION_ERROR, PRINT_EXPR, IMPORT_STAR) -# Python 3.12a4 3514 (Remove ASYNC_GEN_WRAP, LIST_TO_TUPLE, and UNARY_POSITIVE) -# Python 3.12a5 3515 (Embed jump mask in COMPARE_OP oparg) -# Python 3.12a5 3516 (Add COMPARE_AND_BRANCH instruction) -# Python 3.12a5 3517 (Change YIELD_VALUE oparg to exception block depth) -# Python 3.12a6 3518 (Add RETURN_CONST instruction) -# Python 3.12a6 3519 (Modify SEND instruction) -# Python 3.12a6 3520 (Remove PREP_RERAISE_STAR, add CALL_INTRINSIC_2) -# Python 3.12a7 3521 (Shrink the LOAD_GLOBAL caches) -# Python 3.12a7 3522 (Removed JUMP_IF_FALSE_OR_POP/JUMP_IF_TRUE_OR_POP) -# Python 3.12a7 3523 (Convert COMPARE_AND_BRANCH back to COMPARE_OP) -# Python 3.12a7 3524 (Shrink the BINARY_SUBSCR caches) -# Python 3.12b1 3525 (Shrink the CALL caches) -# Python 3.12b1 3526 (Add instrumentation support) -# Python 3.12b1 3527 (Add LOAD_SUPER_ATTR) -# Python 3.12b1 3528 (Add LOAD_SUPER_ATTR_METHOD specialization) -# Python 3.12b1 3529 (Inline list/dict/set comprehensions) -# Python 3.12b1 3530 (Shrink the LOAD_SUPER_ATTR caches) -# Python 3.12b1 3531 (Add PEP 695 changes) -# Python 3.13a1 3550 (Plugin optimizer support) -# Python 3.13a1 3551 (Compact superinstructions) -# Python 3.13a1 3552 (Remove LOAD_FAST__LOAD_CONST and LOAD_CONST__LOAD_FAST) -# Python 3.13a1 3553 (Add SET_FUNCTION_ATTRIBUTE) -# Python 3.13a1 3554 (more efficient bytecodes for f-strings) -# Python 3.13a1 3555 (generate specialized opcodes metadata from bytecodes.c) -# Python 3.13a1 3556 (Convert LOAD_CLOSURE to a pseudo-op) -# Python 3.13a1 3557 (Make the conversion to boolean in jumps explicit) -# Python 3.13a1 3558 (Reorder the stack items for CALL) -# Python 3.13a1 3559 (Generate opcode IDs from bytecodes.c) -# Python 3.13a1 3560 (Add RESUME_CHECK instruction) -# Python 3.13a1 3561 (Add cache entry to branch instructions) -# Python 3.13a1 3562 (Assign opcode IDs for internal ops in separate range) -# Python 3.13a1 3563 (Add CALL_KW and remove KW_NAMES) -# Python 3.13a1 3564 (Removed oparg from YIELD_VALUE, changed oparg values of RESUME) -# Python 3.13a1 3565 (Oparg of YIELD_VALUE indicates whether it is in a yield-from) -# Python 3.13a1 3566 (Emit JUMP_NO_INTERRUPT instead of JUMP for non-loop no-lineno cases) -# Python 3.13a1 3567 (Reimplement line number propagation by the compiler) -# Python 3.13a1 3568 (Change semantics of END_FOR) -# Python 3.13a5 3569 (Specialize CONTAINS_OP) -# Python 3.13a6 3570 (Add __firstlineno__ class attribute) -# Python 3.13b1 3571 (Fix miscompilation of private names in generic classes) - -# Python 3.14 will start with 3600 - -# Please don't copy-paste the same pre-release tag for new entries above!!! -# You should always use the *upcoming* tag. For example, if 3.12a6 came out -# a week ago, I should put "Python 3.12a7" next to my new magic number. - -# MAGIC must change whenever the bytecode emitted by the compiler may no -# longer be understood by older implementations of the eval loop (usually -# due to the addition of new opcodes). -# -# Starting with Python 3.11, Python 3.n starts with magic number 2900+50n. -# -# Whenever MAGIC_NUMBER is changed, the ranges in the magic_values array -# in PC/launcher.c must also be updated. - -MAGIC_NUMBER = (3571).to_bytes(2, 'little') + b'\r\n' - -_RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c - -_PYCACHE = '__pycache__' -_OPT = 'opt-' - -SOURCE_SUFFIXES = ['.py'] -if _MS_WINDOWS: - SOURCE_SUFFIXES.append('.pyw') - -EXTENSION_SUFFIXES = _imp.extension_suffixes() - -BYTECODE_SUFFIXES = ['.pyc'] -# Deprecated. -DEBUG_BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES = BYTECODE_SUFFIXES - -def cache_from_source(path, debug_override=None, *, optimization=None): - """Given the path to a .py file, return the path to its .pyc file. - - The .py file does not need to exist; this simply returns the path to the - .pyc file calculated as if the .py file were imported. - - The 'optimization' parameter controls the presumed optimization level of - the bytecode file. If 'optimization' is not None, the string representation - of the argument is taken and verified to be alphanumeric (else ValueError - is raised). - - The debug_override parameter is deprecated. If debug_override is not None, - a True value is the same as setting 'optimization' to the empty string - while a False value is equivalent to setting 'optimization' to '1'. - - If sys.implementation.cache_tag is None then NotImplementedError is raised. - - """ - if debug_override is not None: - _warnings.warn('the debug_override parameter is deprecated; use ' - "'optimization' instead", DeprecationWarning) - if optimization is not None: - message = 'debug_override or optimization must be set to None' - raise TypeError(message) - optimization = '' if debug_override else 1 - path = _os.fspath(path) - head, tail = _path_split(path) - base, sep, rest = tail.rpartition('.') - tag = sys.implementation.cache_tag - if tag is None: - raise NotImplementedError('sys.implementation.cache_tag is None') - almost_filename = ''.join([(base if base else rest), sep, tag]) - if optimization is None: - if sys.flags.optimize == 0: - optimization = '' - else: - optimization = sys.flags.optimize - optimization = str(optimization) - if optimization != '': - if not optimization.isalnum(): - raise ValueError(f'{optimization!r} is not alphanumeric') - almost_filename = f'{almost_filename}.{_OPT}{optimization}' - filename = almost_filename + BYTECODE_SUFFIXES[0] - if sys.pycache_prefix is not None: - # We need an absolute path to the py file to avoid the possibility of - # collisions within sys.pycache_prefix, if someone has two different - # `foo/bar.py` on their system and they import both of them using the - # same sys.pycache_prefix. Let's say sys.pycache_prefix is - # `C:\Bytecode`; the idea here is that if we get `Foo\Bar`, we first - # make it absolute (`C:\Somewhere\Foo\Bar`), then make it root-relative - # (`Somewhere\Foo\Bar`), so we end up placing the bytecode file in an - # unambiguous `C:\Bytecode\Somewhere\Foo\Bar\`. - head = _path_abspath(head) - - # Strip initial drive from a Windows path. We know we have an absolute - # path here, so the second part of the check rules out a POSIX path that - # happens to contain a colon at the second character. - # Slicing avoids issues with an empty (or short) `head`. - if head[1:2] == ':' and head[0:1] not in path_separators: - head = head[2:] - - # Strip initial path separator from `head` to complete the conversion - # back to a root-relative path before joining. - return _path_join( - sys.pycache_prefix, - head.lstrip(path_separators), - filename, - ) - return _path_join(head, _PYCACHE, filename) - - -def source_from_cache(path): - """Given the path to a .pyc. file, return the path to its .py file. - - The .pyc file does not need to exist; this simply returns the path to - the .py file calculated to correspond to the .pyc file. If path does - not conform to PEP 3147/488 format, ValueError will be raised. If - sys.implementation.cache_tag is None then NotImplementedError is raised. - - """ - if sys.implementation.cache_tag is None: - raise NotImplementedError('sys.implementation.cache_tag is None') - path = _os.fspath(path) - head, pycache_filename = _path_split(path) - found_in_pycache_prefix = False - if sys.pycache_prefix is not None: - stripped_path = sys.pycache_prefix.rstrip(path_separators) - if head.startswith(stripped_path + path_sep): - head = head[len(stripped_path):] - found_in_pycache_prefix = True - if not found_in_pycache_prefix: - head, pycache = _path_split(head) - if pycache != _PYCACHE: - raise ValueError(f'{_PYCACHE} not bottom-level directory in ' - f'{path!r}') - dot_count = pycache_filename.count('.') - if dot_count not in {2, 3}: - raise ValueError(f'expected only 2 or 3 dots in {pycache_filename!r}') - elif dot_count == 3: - optimization = pycache_filename.rsplit('.', 2)[-2] - if not optimization.startswith(_OPT): - raise ValueError("optimization portion of filename does not start " - f"with {_OPT!r}") - opt_level = optimization[len(_OPT):] - if not opt_level.isalnum(): - raise ValueError(f"optimization level {optimization!r} is not an " - "alphanumeric value") - base_filename = pycache_filename.partition('.')[0] - return _path_join(head, base_filename + SOURCE_SUFFIXES[0]) - - -def _get_sourcefile(bytecode_path): - """Convert a bytecode file path to a source path (if possible). - - This function exists purely for backwards-compatibility for - PyImport_ExecCodeModuleWithFilenames() in the C API. - - """ - if len(bytecode_path) == 0: - return None - rest, _, extension = bytecode_path.rpartition('.') - if not rest or extension.lower()[-3:-1] != 'py': - return bytecode_path - try: - source_path = source_from_cache(bytecode_path) - except (NotImplementedError, ValueError): - source_path = bytecode_path[:-1] - return source_path if _path_isfile(source_path) else bytecode_path - - -def _get_cached(filename): - if filename.endswith(tuple(SOURCE_SUFFIXES)): - try: - return cache_from_source(filename) - except NotImplementedError: - pass - elif filename.endswith(tuple(BYTECODE_SUFFIXES)): - return filename - else: - return None - - -def _calc_mode(path): - """Calculate the mode permissions for a bytecode file.""" - try: - mode = _path_stat(path).st_mode - except OSError: - mode = 0o666 - # We always ensure write access so we can update cached files - # later even when the source files are read-only on Windows (#6074) - mode |= 0o200 - return mode - - -def _check_name(method): - """Decorator to verify that the module being requested matches the one the - loader can handle. - - The first argument (self) must define _name which the second argument is - compared against. If the comparison fails then ImportError is raised. - - """ - def _check_name_wrapper(self, name=None, *args, **kwargs): - if name is None: - name = self.name - elif self.name != name: - raise ImportError('loader for %s cannot handle %s' % - (self.name, name), name=name) - return method(self, name, *args, **kwargs) - - # FIXME: @_check_name is used to define class methods before the - # _bootstrap module is set by _set_bootstrap_module(). - if _bootstrap is not None: - _wrap = _bootstrap._wrap - else: - def _wrap(new, old): - for replace in ['__module__', '__name__', '__qualname__', '__doc__']: - if hasattr(old, replace): - setattr(new, replace, getattr(old, replace)) - new.__dict__.update(old.__dict__) - - _wrap(_check_name_wrapper, method) - return _check_name_wrapper - - -def _classify_pyc(data, name, exc_details): - """Perform basic validity checking of a pyc header and return the flags field, - which determines how the pyc should be further validated against the source. - - *data* is the contents of the pyc file. (Only the first 16 bytes are - required, though.) - - *name* is the name of the module being imported. It is used for logging. - - *exc_details* is a dictionary passed to ImportError if it raised for - improved debugging. - - ImportError is raised when the magic number is incorrect or when the flags - field is invalid. EOFError is raised when the data is found to be truncated. - - """ - magic = data[:4] - if magic != MAGIC_NUMBER: - message = f'bad magic number in {name!r}: {magic!r}' - _bootstrap._verbose_message('{}', message) - raise ImportError(message, **exc_details) - if len(data) < 16: - message = f'reached EOF while reading pyc header of {name!r}' - _bootstrap._verbose_message('{}', message) - raise EOFError(message) - flags = _unpack_uint32(data[4:8]) - # Only the first two flags are defined. - if flags & ~0b11: - message = f'invalid flags {flags!r} in {name!r}' - raise ImportError(message, **exc_details) - return flags - - -def _validate_timestamp_pyc(data, source_mtime, source_size, name, - exc_details): - """Validate a pyc against the source last-modified time. - - *data* is the contents of the pyc file. (Only the first 16 bytes are - required.) - - *source_mtime* is the last modified timestamp of the source file. - - *source_size* is None or the size of the source file in bytes. - - *name* is the name of the module being imported. It is used for logging. - - *exc_details* is a dictionary passed to ImportError if it raised for - improved debugging. - - An ImportError is raised if the bytecode is stale. - - """ - if _unpack_uint32(data[8:12]) != (source_mtime & 0xFFFFFFFF): - message = f'bytecode is stale for {name!r}' - _bootstrap._verbose_message('{}', message) - raise ImportError(message, **exc_details) - if (source_size is not None and - _unpack_uint32(data[12:16]) != (source_size & 0xFFFFFFFF)): - raise ImportError(f'bytecode is stale for {name!r}', **exc_details) - - -def _validate_hash_pyc(data, source_hash, name, exc_details): - """Validate a hash-based pyc by checking the real source hash against the one in - the pyc header. - - *data* is the contents of the pyc file. (Only the first 16 bytes are - required.) - - *source_hash* is the importlib.util.source_hash() of the source file. - - *name* is the name of the module being imported. It is used for logging. - - *exc_details* is a dictionary passed to ImportError if it raised for - improved debugging. - - An ImportError is raised if the bytecode is stale. - - """ - if data[8:16] != source_hash: - raise ImportError( - f'hash in bytecode doesn\'t match hash of source {name!r}', - **exc_details, - ) - - -def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None): - """Compile bytecode as found in a pyc.""" - code = marshal.loads(data) - if isinstance(code, _code_type): - _bootstrap._verbose_message('code object from {!r}', bytecode_path) - if source_path is not None: - _imp._fix_co_filename(code, source_path) - return code - else: - raise ImportError(f'Non-code object in {bytecode_path!r}', - name=name, path=bytecode_path) - - -def _code_to_timestamp_pyc(code, mtime=0, source_size=0): - "Produce the data for a timestamp-based pyc." - data = bytearray(MAGIC_NUMBER) - data.extend(_pack_uint32(0)) - data.extend(_pack_uint32(mtime)) - data.extend(_pack_uint32(source_size)) - data.extend(marshal.dumps(code)) - return data - - -def _code_to_hash_pyc(code, source_hash, checked=True): - "Produce the data for a hash-based pyc." - data = bytearray(MAGIC_NUMBER) - flags = 0b1 | checked << 1 - data.extend(_pack_uint32(flags)) - assert len(source_hash) == 8 - data.extend(source_hash) - data.extend(marshal.dumps(code)) - return data - - -def decode_source(source_bytes): - """Decode bytes representing source code and return the string. - - Universal newline support is used in the decoding. - """ - import tokenize # To avoid bootstrap issues. - source_bytes_readline = _io.BytesIO(source_bytes).readline - encoding = tokenize.detect_encoding(source_bytes_readline) - newline_decoder = _io.IncrementalNewlineDecoder(None, True) - return newline_decoder.decode(source_bytes.decode(encoding[0])) - - -# Module specifications ####################################################### - -_POPULATE = object() - - -def spec_from_file_location(name, location=None, *, loader=None, - submodule_search_locations=_POPULATE): - """Return a module spec based on a file location. - - To indicate that the module is a package, set - submodule_search_locations to a list of directory paths. An - empty list is sufficient, though its not otherwise useful to the - import system. - - The loader must take a spec as its only __init__() arg. - - """ - if location is None: - # The caller may simply want a partially populated location- - # oriented spec. So we set the location to a bogus value and - # fill in as much as we can. - location = '' - if hasattr(loader, 'get_filename'): - # ExecutionLoader - try: - location = loader.get_filename(name) - except ImportError: - pass - else: - location = _os.fspath(location) - try: - location = _path_abspath(location) - except OSError: - pass - - # If the location is on the filesystem, but doesn't actually exist, - # we could return None here, indicating that the location is not - # valid. However, we don't have a good way of testing since an - # indirect location (e.g. a zip file or URL) will look like a - # non-existent file relative to the filesystem. - - spec = _bootstrap.ModuleSpec(name, loader, origin=location) - spec._set_fileattr = True - - # Pick a loader if one wasn't provided. - if loader is None: - for loader_class, suffixes in _get_supported_file_loaders(): - if location.endswith(tuple(suffixes)): - loader = loader_class(name, location) - spec.loader = loader - break - else: - return None - - # Set submodule_search_paths appropriately. - if submodule_search_locations is _POPULATE: - # Check the loader. - if hasattr(loader, 'is_package'): - try: - is_package = loader.is_package(name) - except ImportError: - pass - else: - if is_package: - spec.submodule_search_locations = [] - else: - spec.submodule_search_locations = submodule_search_locations - if spec.submodule_search_locations == []: - if location: - dirname = _path_split(location)[0] - spec.submodule_search_locations.append(dirname) - - return spec - - -def _bless_my_loader(module_globals): - """Helper function for _warnings.c - - See GH#97850 for details. - """ - # 2022-10-06(warsaw): For now, this helper is only used in _warnings.c and - # that use case only has the module globals. This function could be - # extended to accept either that or a module object. However, in the - # latter case, it would be better to raise certain exceptions when looking - # at a module, which should have either a __loader__ or __spec__.loader. - # For backward compatibility, it is possible that we'll get an empty - # dictionary for the module globals, and that cannot raise an exception. - if not isinstance(module_globals, dict): - return None - - missing = object() - loader = module_globals.get('__loader__', None) - spec = module_globals.get('__spec__', missing) - - if loader is None: - if spec is missing: - # If working with a module: - # raise AttributeError('Module globals is missing a __spec__') - return None - elif spec is None: - raise ValueError('Module globals is missing a __spec__.loader') - - spec_loader = getattr(spec, 'loader', missing) - - if spec_loader in (missing, None): - if loader is None: - exc = AttributeError if spec_loader is missing else ValueError - raise exc('Module globals is missing a __spec__.loader') - _warnings.warn( - 'Module globals is missing a __spec__.loader', - DeprecationWarning) - spec_loader = loader - - assert spec_loader is not None - if loader is not None and loader != spec_loader: - _warnings.warn( - 'Module globals; __loader__ != __spec__.loader', - DeprecationWarning) - return loader - - return spec_loader - - -# Loaders ##################################################################### - -class WindowsRegistryFinder: - - """Meta path finder for modules declared in the Windows registry.""" - - REGISTRY_KEY = ( - 'Software\\Python\\PythonCore\\{sys_version}' - '\\Modules\\{fullname}') - REGISTRY_KEY_DEBUG = ( - 'Software\\Python\\PythonCore\\{sys_version}' - '\\Modules\\{fullname}\\Debug') - DEBUG_BUILD = (_MS_WINDOWS and '_d.pyd' in EXTENSION_SUFFIXES) - - @staticmethod - def _open_registry(key): - try: - return winreg.OpenKey(winreg.HKEY_CURRENT_USER, key) - except OSError: - return winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key) - - @classmethod - def _search_registry(cls, fullname): - if cls.DEBUG_BUILD: - registry_key = cls.REGISTRY_KEY_DEBUG - else: - registry_key = cls.REGISTRY_KEY - key = registry_key.format(fullname=fullname, - sys_version='%d.%d' % sys.version_info[:2]) - try: - with cls._open_registry(key) as hkey: - filepath = winreg.QueryValue(hkey, '') - except OSError: - return None - return filepath - - @classmethod - def find_spec(cls, fullname, path=None, target=None): - filepath = cls._search_registry(fullname) - if filepath is None: - return None - try: - _path_stat(filepath) - except OSError: - return None - for loader, suffixes in _get_supported_file_loaders(): - if filepath.endswith(tuple(suffixes)): - spec = _bootstrap.spec_from_loader(fullname, - loader(fullname, filepath), - origin=filepath) - return spec - - -class _LoaderBasics: - - """Base class of common code needed by both SourceLoader and - SourcelessFileLoader.""" - - def is_package(self, fullname): - """Concrete implementation of InspectLoader.is_package by checking if - the path returned by get_filename has a filename of '__init__.py'.""" - filename = _path_split(self.get_filename(fullname))[1] - filename_base = filename.rsplit('.', 1)[0] - tail_name = fullname.rpartition('.')[2] - return filename_base == '__init__' and tail_name != '__init__' - - def create_module(self, spec): - """Use default semantics for module creation.""" - - def exec_module(self, module): - """Execute the module.""" - code = self.get_code(module.__name__) - if code is None: - raise ImportError(f'cannot load module {module.__name__!r} when ' - 'get_code() returns None') - _bootstrap._call_with_frames_removed(exec, code, module.__dict__) - - def load_module(self, fullname): - """This method is deprecated.""" - # Warning implemented in _load_module_shim(). - return _bootstrap._load_module_shim(self, fullname) - - -class SourceLoader(_LoaderBasics): - - def path_mtime(self, path): - """Optional method that returns the modification time (an int) for the - specified path (a str). - - Raises OSError when the path cannot be handled. - """ - raise OSError - - def path_stats(self, path): - """Optional method returning a metadata dict for the specified - path (a str). - - Possible keys: - - 'mtime' (mandatory) is the numeric timestamp of last source - code modification; - - 'size' (optional) is the size in bytes of the source code. - - Implementing this method allows the loader to read bytecode files. - Raises OSError when the path cannot be handled. - """ - return {'mtime': self.path_mtime(path)} - - def _cache_bytecode(self, source_path, cache_path, data): - """Optional method which writes data (bytes) to a file path (a str). - - Implementing this method allows for the writing of bytecode files. - - The source path is needed in order to correctly transfer permissions - """ - # For backwards compatibility, we delegate to set_data() - return self.set_data(cache_path, data) - - def set_data(self, path, data): - """Optional method which writes data (bytes) to a file path (a str). - - Implementing this method allows for the writing of bytecode files. - """ - - - def get_source(self, fullname): - """Concrete implementation of InspectLoader.get_source.""" - path = self.get_filename(fullname) - try: - source_bytes = self.get_data(path) - except OSError as exc: - raise ImportError('source not available through get_data()', - name=fullname) from exc - return decode_source(source_bytes) - - def source_to_code(self, data, path, *, _optimize=-1): - """Return the code object compiled from source. - - The 'data' argument can be any object type that compile() supports. - """ - return _bootstrap._call_with_frames_removed(compile, data, path, 'exec', - dont_inherit=True, optimize=_optimize) - - def get_code(self, fullname): - """Concrete implementation of InspectLoader.get_code. - - Reading of bytecode requires path_stats to be implemented. To write - bytecode, set_data must also be implemented. - - """ - source_path = self.get_filename(fullname) - source_mtime = None - source_bytes = None - source_hash = None - hash_based = False - check_source = True - try: - bytecode_path = cache_from_source(source_path) - except NotImplementedError: - bytecode_path = None - else: - try: - st = self.path_stats(source_path) - except OSError: - pass - else: - source_mtime = int(st['mtime']) - try: - data = self.get_data(bytecode_path) - except OSError: - pass - else: - exc_details = { - 'name': fullname, - 'path': bytecode_path, - } - try: - flags = _classify_pyc(data, fullname, exc_details) - bytes_data = memoryview(data)[16:] - hash_based = flags & 0b1 != 0 - if hash_based: - check_source = flags & 0b10 != 0 - if (_imp.check_hash_based_pycs != 'never' and - (check_source or - _imp.check_hash_based_pycs == 'always')): - source_bytes = self.get_data(source_path) - source_hash = _imp.source_hash( - _RAW_MAGIC_NUMBER, - source_bytes, - ) - _validate_hash_pyc(data, source_hash, fullname, - exc_details) - else: - _validate_timestamp_pyc( - data, - source_mtime, - st['size'], - fullname, - exc_details, - ) - except (ImportError, EOFError): - pass - else: - _bootstrap._verbose_message('{} matches {}', bytecode_path, - source_path) - return _compile_bytecode(bytes_data, name=fullname, - bytecode_path=bytecode_path, - source_path=source_path) - if source_bytes is None: - source_bytes = self.get_data(source_path) - code_object = self.source_to_code(source_bytes, source_path) - _bootstrap._verbose_message('code object from {}', source_path) - if (not sys.dont_write_bytecode and bytecode_path is not None and - source_mtime is not None): - if hash_based: - if source_hash is None: - source_hash = _imp.source_hash(_RAW_MAGIC_NUMBER, - source_bytes) - data = _code_to_hash_pyc(code_object, source_hash, check_source) - else: - data = _code_to_timestamp_pyc(code_object, source_mtime, - len(source_bytes)) - try: - self._cache_bytecode(source_path, bytecode_path, data) - except NotImplementedError: - pass - return code_object - - -class FileLoader: - - """Base file loader class which implements the loader protocol methods that - require file system usage.""" - - def __init__(self, fullname, path): - """Cache the module name and the path to the file found by the - finder.""" - self.name = fullname - self.path = path - - def __eq__(self, other): - return (self.__class__ == other.__class__ and - self.__dict__ == other.__dict__) - - def __hash__(self): - return hash(self.name) ^ hash(self.path) - - @_check_name - def load_module(self, fullname): - """Load a module from a file. - - This method is deprecated. Use exec_module() instead. - - """ - # The only reason for this method is for the name check. - # Issue #14857: Avoid the zero-argument form of super so the implementation - # of that form can be updated without breaking the frozen module. - return super(FileLoader, self).load_module(fullname) - - @_check_name - def get_filename(self, fullname): - """Return the path to the source file as found by the finder.""" - return self.path - - def get_data(self, path): - """Return the data from path as raw bytes.""" - if isinstance(self, (SourceLoader, SourcelessFileLoader, ExtensionFileLoader)): - with _io.open_code(str(path)) as file: - return file.read() - else: - with _io.FileIO(path, 'r') as file: - return file.read() - - @_check_name - def get_resource_reader(self, module): - from importlib.readers import FileReader - return FileReader(self) - - -class SourceFileLoader(FileLoader, SourceLoader): - - """Concrete implementation of SourceLoader using the file system.""" - - def path_stats(self, path): - """Return the metadata for the path.""" - st = _path_stat(path) - return {'mtime': st.st_mtime, 'size': st.st_size} - - def _cache_bytecode(self, source_path, bytecode_path, data): - # Adapt between the two APIs - mode = _calc_mode(source_path) - return self.set_data(bytecode_path, data, _mode=mode) - - def set_data(self, path, data, *, _mode=0o666): - """Write bytes data to a file.""" - parent, filename = _path_split(path) - path_parts = [] - # Figure out what directories are missing. - while parent and not _path_isdir(parent): - parent, part = _path_split(parent) - path_parts.append(part) - # Create needed directories. - for part in reversed(path_parts): - parent = _path_join(parent, part) - try: - _os.mkdir(parent) - except FileExistsError: - # Probably another Python process already created the dir. - continue - except OSError as exc: - # Could be a permission error, read-only filesystem: just forget - # about writing the data. - _bootstrap._verbose_message('could not create {!r}: {!r}', - parent, exc) - return - try: - _write_atomic(path, data, _mode) - _bootstrap._verbose_message('created {!r}', path) - except OSError as exc: - # Same as above: just don't write the bytecode. - _bootstrap._verbose_message('could not create {!r}: {!r}', path, - exc) - - -class SourcelessFileLoader(FileLoader, _LoaderBasics): - - """Loader which handles sourceless file imports.""" - - def get_code(self, fullname): - path = self.get_filename(fullname) - data = self.get_data(path) - # Call _classify_pyc to do basic validation of the pyc but ignore the - # result. There's no source to check against. - exc_details = { - 'name': fullname, - 'path': path, - } - _classify_pyc(data, fullname, exc_details) - return _compile_bytecode( - memoryview(data)[16:], - name=fullname, - bytecode_path=path, - ) - - def get_source(self, fullname): - """Return None as there is no source code.""" - return None - - -class ExtensionFileLoader(FileLoader, _LoaderBasics): - - """Loader for extension modules. - - The constructor is designed to work with FileFinder. - - """ - - def __init__(self, name, path): - self.name = name - self.path = path - - def __eq__(self, other): - return (self.__class__ == other.__class__ and - self.__dict__ == other.__dict__) - - def __hash__(self): - return hash(self.name) ^ hash(self.path) - - def create_module(self, spec): - """Create an uninitialized extension module""" - module = _bootstrap._call_with_frames_removed( - _imp.create_dynamic, spec) - _bootstrap._verbose_message('extension module {!r} loaded from {!r}', - spec.name, self.path) - return module - - def exec_module(self, module): - """Initialize an extension module""" - _bootstrap._call_with_frames_removed(_imp.exec_dynamic, module) - _bootstrap._verbose_message('extension module {!r} executed from {!r}', - self.name, self.path) - - def is_package(self, fullname): - """Return True if the extension module is a package.""" - file_name = _path_split(self.path)[1] - return any(file_name == '__init__' + suffix - for suffix in EXTENSION_SUFFIXES) - - def get_code(self, fullname): - """Return None as an extension module cannot create a code object.""" - return None - - def get_source(self, fullname): - """Return None as extension modules have no source code.""" - return None - - @_check_name - def get_filename(self, fullname): - """Return the path to the source file as found by the finder.""" - return self.path - - -class _NamespacePath: - """Represents a namespace package's path. It uses the module name - to find its parent module, and from there it looks up the parent's - __path__. When this changes, the module's own path is recomputed, - using path_finder. For top-level modules, the parent module's path - is sys.path.""" - - # When invalidate_caches() is called, this epoch is incremented - # https://bugs.python.org/issue45703 - _epoch = 0 - - def __init__(self, name, path, path_finder): - self._name = name - self._path = path - self._last_parent_path = tuple(self._get_parent_path()) - self._last_epoch = self._epoch - self._path_finder = path_finder - - def _find_parent_path_names(self): - """Returns a tuple of (parent-module-name, parent-path-attr-name)""" - parent, dot, me = self._name.rpartition('.') - if dot == '': - # This is a top-level module. sys.path contains the parent path. - return 'sys', 'path' - # Not a top-level module. parent-module.__path__ contains the - # parent path. - return parent, '__path__' - - def _get_parent_path(self): - parent_module_name, path_attr_name = self._find_parent_path_names() - return getattr(sys.modules[parent_module_name], path_attr_name) - - def _recalculate(self): - # If the parent's path has changed, recalculate _path - parent_path = tuple(self._get_parent_path()) # Make a copy - if parent_path != self._last_parent_path or self._epoch != self._last_epoch: - spec = self._path_finder(self._name, parent_path) - # Note that no changes are made if a loader is returned, but we - # do remember the new parent path - if spec is not None and spec.loader is None: - if spec.submodule_search_locations: - self._path = spec.submodule_search_locations - self._last_parent_path = parent_path # Save the copy - self._last_epoch = self._epoch - return self._path - - def __iter__(self): - return iter(self._recalculate()) - - def __getitem__(self, index): - return self._recalculate()[index] - - def __setitem__(self, index, path): - self._path[index] = path - - def __len__(self): - return len(self._recalculate()) - - def __repr__(self): - return f'_NamespacePath({self._path!r})' - - def __contains__(self, item): - return item in self._recalculate() - - def append(self, item): - self._path.append(item) - - -# This class is actually exposed publicly in a namespace package's __loader__ -# attribute, so it should be available through a non-private name. -# https://github.com/python/cpython/issues/92054 -class NamespaceLoader: - def __init__(self, name, path, path_finder): - self._path = _NamespacePath(name, path, path_finder) - - def is_package(self, fullname): - return True - - def get_source(self, fullname): - return '' - - def get_code(self, fullname): - return compile('', '', 'exec', dont_inherit=True) - - def create_module(self, spec): - """Use default semantics for module creation.""" - - def exec_module(self, module): - pass - - def load_module(self, fullname): - """Load a namespace module. - - This method is deprecated. Use exec_module() instead. - - """ - # The import system never calls this method. - _bootstrap._verbose_message('namespace module loaded with path {!r}', - self._path) - # Warning implemented in _load_module_shim(). - return _bootstrap._load_module_shim(self, fullname) - - def get_resource_reader(self, module): - from importlib.readers import NamespaceReader - return NamespaceReader(self._path) - - -# We use this exclusively in module_from_spec() for backward-compatibility. -_NamespaceLoader = NamespaceLoader - - -# Finders ##################################################################### - -class PathFinder: - - """Meta path finder for sys.path and package __path__ attributes.""" - - @staticmethod - def invalidate_caches(): - """Call the invalidate_caches() method on all path entry finders - stored in sys.path_importer_cache (where implemented).""" - for name, finder in list(sys.path_importer_cache.items()): - # Drop entry if finder name is a relative path. The current - # working directory may have changed. - if finder is None or not _path_isabs(name): - del sys.path_importer_cache[name] - elif hasattr(finder, 'invalidate_caches'): - finder.invalidate_caches() - # Also invalidate the caches of _NamespacePaths - # https://bugs.python.org/issue45703 - _NamespacePath._epoch += 1 - - from importlib.metadata import MetadataPathFinder - MetadataPathFinder.invalidate_caches() - - @staticmethod - def _path_hooks(path): - """Search sys.path_hooks for a finder for 'path'.""" - if sys.path_hooks is not None and not sys.path_hooks: - _warnings.warn('sys.path_hooks is empty', ImportWarning) - for hook in sys.path_hooks: - try: - return hook(path) - except ImportError: - continue - else: - return None - - @classmethod - def _path_importer_cache(cls, path): - """Get the finder for the path entry from sys.path_importer_cache. - - If the path entry is not in the cache, find the appropriate finder - and cache it. If no finder is available, store None. - - """ - if path == '': - try: - path = _os.getcwd() - except FileNotFoundError: - # Don't cache the failure as the cwd can easily change to - # a valid directory later on. - return None - try: - finder = sys.path_importer_cache[path] - except KeyError: - finder = cls._path_hooks(path) - sys.path_importer_cache[path] = finder - return finder - - @classmethod - def _get_spec(cls, fullname, path, target=None): - """Find the loader or namespace_path for this module/package name.""" - # If this ends up being a namespace package, namespace_path is - # the list of paths that will become its __path__ - namespace_path = [] - for entry in path: - if not isinstance(entry, str): - continue - finder = cls._path_importer_cache(entry) - if finder is not None: - spec = finder.find_spec(fullname, target) - if spec is None: - continue - if spec.loader is not None: - return spec - portions = spec.submodule_search_locations - if portions is None: - raise ImportError('spec missing loader') - # This is possibly part of a namespace package. - # Remember these path entries (if any) for when we - # create a namespace package, and continue iterating - # on path. - namespace_path.extend(portions) - else: - spec = _bootstrap.ModuleSpec(fullname, None) - spec.submodule_search_locations = namespace_path - return spec - - @classmethod - def find_spec(cls, fullname, path=None, target=None): - """Try to find a spec for 'fullname' on sys.path or 'path'. - - The search is based on sys.path_hooks and sys.path_importer_cache. - """ - if path is None: - path = sys.path - spec = cls._get_spec(fullname, path, target) - if spec is None: - return None - elif spec.loader is None: - namespace_path = spec.submodule_search_locations - if namespace_path: - # We found at least one namespace path. Return a spec which - # can create the namespace package. - spec.origin = None - spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec) - return spec - else: - return None - else: - return spec - - @staticmethod - def find_distributions(*args, **kwargs): - """ - Find distributions. - - Return an iterable of all Distribution instances capable of - loading the metadata for packages matching ``context.name`` - (or all names if ``None`` indicated) along the paths in the list - of directories ``context.path``. - """ - from importlib.metadata import MetadataPathFinder - return MetadataPathFinder.find_distributions(*args, **kwargs) - - -class FileFinder: - - """File-based finder. - - Interactions with the file system are cached for performance, being - refreshed when the directory the finder is handling has been modified. - - """ - - def __init__(self, path, *loader_details): - """Initialize with the path to search on and a variable number of - 2-tuples containing the loader and the file suffixes the loader - recognizes.""" - loaders = [] - for loader, suffixes in loader_details: - loaders.extend((suffix, loader) for suffix in suffixes) - self._loaders = loaders - # Base (directory) path - if not path or path == '.': - self.path = _os.getcwd() - else: - self.path = _path_abspath(path) - self._path_mtime = -1 - self._path_cache = set() - self._relaxed_path_cache = set() - - def invalidate_caches(self): - """Invalidate the directory mtime.""" - self._path_mtime = -1 - - def _get_spec(self, loader_class, fullname, path, smsl, target): - loader = loader_class(fullname, path) - return spec_from_file_location(fullname, path, loader=loader, - submodule_search_locations=smsl) - - def find_spec(self, fullname, target=None): - """Try to find a spec for the specified module. - - Returns the matching spec, or None if not found. - """ - is_namespace = False - tail_module = fullname.rpartition('.')[2] - try: - mtime = _path_stat(self.path or _os.getcwd()).st_mtime - except OSError: - mtime = -1 - if mtime != self._path_mtime: - self._fill_cache() - self._path_mtime = mtime - # tail_module keeps the original casing, for __file__ and friends - if _relax_case(): - cache = self._relaxed_path_cache - cache_module = tail_module.lower() - else: - cache = self._path_cache - cache_module = tail_module - # Check if the module is the name of a directory (and thus a package). - if cache_module in cache: - base_path = _path_join(self.path, tail_module) - for suffix, loader_class in self._loaders: - init_filename = '__init__' + suffix - full_path = _path_join(base_path, init_filename) - if _path_isfile(full_path): - return self._get_spec(loader_class, fullname, full_path, [base_path], target) - else: - # If a namespace package, return the path if we don't - # find a module in the next section. - is_namespace = _path_isdir(base_path) - # Check for a file w/ a proper suffix exists. - for suffix, loader_class in self._loaders: - try: - full_path = _path_join(self.path, tail_module + suffix) - except ValueError: - return None - _bootstrap._verbose_message('trying {}', full_path, verbosity=2) - if cache_module + suffix in cache: - if _path_isfile(full_path): - return self._get_spec(loader_class, fullname, full_path, - None, target) - if is_namespace: - _bootstrap._verbose_message('possible namespace for {}', base_path) - spec = _bootstrap.ModuleSpec(fullname, None) - spec.submodule_search_locations = [base_path] - return spec - return None - - def _fill_cache(self): - """Fill the cache of potential modules and packages for this directory.""" - path = self.path - try: - contents = _os.listdir(path or _os.getcwd()) - except (FileNotFoundError, PermissionError, NotADirectoryError): - # Directory has either been removed, turned into a file, or made - # unreadable. - contents = [] - # We store two cached versions, to handle runtime changes of the - # PYTHONCASEOK environment variable. - if not sys.platform.startswith('win'): - self._path_cache = set(contents) - else: - # Windows users can import modules with case-insensitive file - # suffixes (for legacy reasons). Make the suffix lowercase here - # so it's done once instead of for every import. This is safe as - # the specified suffixes to check against are always specified in a - # case-sensitive manner. - lower_suffix_contents = set() - for item in contents: - name, dot, suffix = item.partition('.') - if dot: - new_name = f'{name}.{suffix.lower()}' - else: - new_name = name - lower_suffix_contents.add(new_name) - self._path_cache = lower_suffix_contents - if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS): - self._relaxed_path_cache = {fn.lower() for fn in contents} - - @classmethod - def path_hook(cls, *loader_details): - """A class method which returns a closure to use on sys.path_hook - which will return an instance using the specified loaders and the path - called on the closure. - - If the path called on the closure is not a directory, ImportError is - raised. - - """ - def path_hook_for_FileFinder(path): - """Path hook for importlib.machinery.FileFinder.""" - if not _path_isdir(path): - raise ImportError('only directories are supported', path=path) - return cls(path, *loader_details) - - return path_hook_for_FileFinder - - def __repr__(self): - return f'FileFinder({self.path!r})' - - -class AppleFrameworkLoader(ExtensionFileLoader): - """A loader for modules that have been packaged as frameworks for - compatibility with Apple's iOS App Store policies. - """ - def create_module(self, spec): - # If the ModuleSpec has been created by the FileFinder, it will have - # been created with an origin pointing to the .fwork file. We need to - # redirect this to the location in the Frameworks folder, using the - # content of the .fwork file. - if spec.origin.endswith(".fwork"): - with _io.FileIO(spec.origin, 'r') as file: - framework_binary = file.read().decode().strip() - bundle_path = _path_split(sys.executable)[0] - spec.origin = _path_join(bundle_path, framework_binary) - - # If the loader is created based on the spec for a loaded module, the - # path will be pointing at the Framework location. If this occurs, - # get the original .fwork location to use as the module's __file__. - if self.path.endswith(".fwork"): - path = self.path - else: - with _io.FileIO(self.path + ".origin", 'r') as file: - origin = file.read().decode().strip() - bundle_path = _path_split(sys.executable)[0] - path = _path_join(bundle_path, origin) - - module = _bootstrap._call_with_frames_removed(_imp.create_dynamic, spec) - - _bootstrap._verbose_message( - "Apple framework extension module {!r} loaded from {!r} (path {!r})", - spec.name, - spec.origin, - path, - ) - - # Ensure that the __file__ points at the .fwork location - try: - module.__file__ = path - except AttributeError: - # Not important enough to report. - # (The error is also ignored in _bootstrap._init_module_attrs or - # import_run_extension in import.c) - pass - - return module - -# Import setup ############################################################### - -def _fix_up_module(ns, name, pathname, cpathname=None): - # This function is used by PyImport_ExecCodeModuleObject(). - loader = ns.get('__loader__') - spec = ns.get('__spec__') - if not loader: - if spec: - loader = spec.loader - elif pathname == cpathname: - loader = SourcelessFileLoader(name, pathname) - else: - loader = SourceFileLoader(name, pathname) - if not spec: - spec = spec_from_file_location(name, pathname, loader=loader) - if cpathname: - spec.cached = _path_abspath(cpathname) - try: - ns['__spec__'] = spec - ns['__loader__'] = loader - ns['__file__'] = pathname - ns['__cached__'] = cpathname - except Exception: - # Not important enough to report. - pass - - -def _get_supported_file_loaders(): - """Returns a list of file-based module loaders. - - Each item is a tuple (loader, suffixes). - """ - extension_loaders = [] - if hasattr(_imp, 'create_dynamic'): - if sys.platform in {"ios", "tvos", "watchos"}: - extension_loaders = [(AppleFrameworkLoader, [ - suffix.replace(".so", ".fwork") - for suffix in _imp.extension_suffixes() - ])] - extension_loaders.append((ExtensionFileLoader, _imp.extension_suffixes())) - source = SourceFileLoader, SOURCE_SUFFIXES - bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES - return extension_loaders + [source, bytecode] - - -def _set_bootstrap_module(_bootstrap_module): - global _bootstrap - _bootstrap = _bootstrap_module - - -def _install(_bootstrap_module): - """Install the path-based import components.""" - _set_bootstrap_module(_bootstrap_module) - supported_loaders = _get_supported_file_loaders() - sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)]) - sys.meta_path.append(PathFinder) diff --git a/Python313_13_x64_Template/Lib/importlib/abc.py b/Python313_13_x64_Template/Lib/importlib/abc.py deleted file mode 100644 index 37fef357..00000000 --- a/Python313_13_x64_Template/Lib/importlib/abc.py +++ /dev/null @@ -1,243 +0,0 @@ -"""Abstract base classes related to import.""" -from . import _bootstrap_external -from . import machinery -try: - import _frozen_importlib -except ImportError as exc: - if exc.name != '_frozen_importlib': - raise - _frozen_importlib = None -try: - import _frozen_importlib_external -except ImportError: - _frozen_importlib_external = _bootstrap_external -from ._abc import Loader -import abc -import warnings - -from .resources import abc as _resources_abc - - -__all__ = [ - 'Loader', 'MetaPathFinder', 'PathEntryFinder', - 'ResourceLoader', 'InspectLoader', 'ExecutionLoader', - 'FileLoader', 'SourceLoader', -] - - -def __getattr__(name): - """ - For backwards compatibility, continue to make names - from _resources_abc available through this module. #93963 - """ - if name in _resources_abc.__all__: - obj = getattr(_resources_abc, name) - warnings._deprecated(f"{__name__}.{name}", remove=(3, 14)) - globals()[name] = obj - return obj - raise AttributeError(f'module {__name__!r} has no attribute {name!r}') - - -def _register(abstract_cls, *classes): - for cls in classes: - abstract_cls.register(cls) - if _frozen_importlib is not None: - try: - frozen_cls = getattr(_frozen_importlib, cls.__name__) - except AttributeError: - frozen_cls = getattr(_frozen_importlib_external, cls.__name__) - abstract_cls.register(frozen_cls) - - -class MetaPathFinder(metaclass=abc.ABCMeta): - - """Abstract base class for import finders on sys.meta_path.""" - - # We don't define find_spec() here since that would break - # hasattr checks we do to support backward compatibility. - - def invalidate_caches(self): - """An optional method for clearing the finder's cache, if any. - This method is used by importlib.invalidate_caches(). - """ - -_register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter, - machinery.PathFinder, machinery.WindowsRegistryFinder) - - -class PathEntryFinder(metaclass=abc.ABCMeta): - - """Abstract base class for path entry finders used by PathFinder.""" - - def invalidate_caches(self): - """An optional method for clearing the finder's cache, if any. - This method is used by PathFinder.invalidate_caches(). - """ - -_register(PathEntryFinder, machinery.FileFinder) - - -class ResourceLoader(Loader): - - """Abstract base class for loaders which can return data from their - back-end storage. - - This ABC represents one of the optional protocols specified by PEP 302. - - """ - - @abc.abstractmethod - def get_data(self, path): - """Abstract method which when implemented should return the bytes for - the specified path. The path must be a str.""" - raise OSError - - -class InspectLoader(Loader): - - """Abstract base class for loaders which support inspection about the - modules they can load. - - This ABC represents one of the optional protocols specified by PEP 302. - - """ - - def is_package(self, fullname): - """Optional method which when implemented should return whether the - module is a package. The fullname is a str. Returns a bool. - - Raises ImportError if the module cannot be found. - """ - raise ImportError - - def get_code(self, fullname): - """Method which returns the code object for the module. - - The fullname is a str. Returns a types.CodeType if possible, else - returns None if a code object does not make sense - (e.g. built-in module). Raises ImportError if the module cannot be - found. - """ - source = self.get_source(fullname) - if source is None: - return None - return self.source_to_code(source) - - @abc.abstractmethod - def get_source(self, fullname): - """Abstract method which should return the source code for the - module. The fullname is a str. Returns a str. - - Raises ImportError if the module cannot be found. - """ - raise ImportError - - @staticmethod - def source_to_code(data, path=''): - """Compile 'data' into a code object. - - The 'data' argument can be anything that compile() can handle. The'path' - argument should be where the data was retrieved (when applicable).""" - return compile(data, path, 'exec', dont_inherit=True) - - exec_module = _bootstrap_external._LoaderBasics.exec_module - load_module = _bootstrap_external._LoaderBasics.load_module - -_register(InspectLoader, machinery.BuiltinImporter, machinery.FrozenImporter, machinery.NamespaceLoader) - - -class ExecutionLoader(InspectLoader): - - """Abstract base class for loaders that wish to support the execution of - modules as scripts. - - This ABC represents one of the optional protocols specified in PEP 302. - - """ - - @abc.abstractmethod - def get_filename(self, fullname): - """Abstract method which should return the value that __file__ is to be - set to. - - Raises ImportError if the module cannot be found. - """ - raise ImportError - - def get_code(self, fullname): - """Method to return the code object for fullname. - - Should return None if not applicable (e.g. built-in module). - Raise ImportError if the module cannot be found. - """ - source = self.get_source(fullname) - if source is None: - return None - try: - path = self.get_filename(fullname) - except ImportError: - return self.source_to_code(source) - else: - return self.source_to_code(source, path) - -_register( - ExecutionLoader, - machinery.ExtensionFileLoader, - machinery.AppleFrameworkLoader, -) - - -class FileLoader(_bootstrap_external.FileLoader, ResourceLoader, ExecutionLoader): - - """Abstract base class partially implementing the ResourceLoader and - ExecutionLoader ABCs.""" - -_register(FileLoader, machinery.SourceFileLoader, - machinery.SourcelessFileLoader) - - -class SourceLoader(_bootstrap_external.SourceLoader, ResourceLoader, ExecutionLoader): - - """Abstract base class for loading source code (and optionally any - corresponding bytecode). - - To support loading from source code, the abstractmethods inherited from - ResourceLoader and ExecutionLoader need to be implemented. To also support - loading from bytecode, the optional methods specified directly by this ABC - is required. - - Inherited abstractmethods not implemented in this ABC: - - * ResourceLoader.get_data - * ExecutionLoader.get_filename - - """ - - def path_mtime(self, path): - """Return the (int) modification time for the path (str).""" - if self.path_stats.__func__ is SourceLoader.path_stats: - raise OSError - return int(self.path_stats(path)['mtime']) - - def path_stats(self, path): - """Return a metadata dict for the source pointed to by the path (str). - Possible keys: - - 'mtime' (mandatory) is the numeric timestamp of last source - code modification; - - 'size' (optional) is the size in bytes of the source code. - """ - if self.path_mtime.__func__ is SourceLoader.path_mtime: - raise OSError - return {'mtime': self.path_mtime(path)} - - def set_data(self, path, data): - """Write the bytes to the path (if possible). - - Accepts a str path and data as bytes. - - Any needed intermediary directories are to be created. If for some - reason the file cannot be written because of permissions, fail - silently. - """ - -_register(SourceLoader, machinery.SourceFileLoader) diff --git a/Python313_13_x64_Template/Lib/importlib/machinery.py b/Python313_13_x64_Template/Lib/importlib/machinery.py deleted file mode 100644 index fbd30b15..00000000 --- a/Python313_13_x64_Template/Lib/importlib/machinery.py +++ /dev/null @@ -1,21 +0,0 @@ -"""The machinery of importlib: finders, loaders, hooks, etc.""" - -from ._bootstrap import ModuleSpec -from ._bootstrap import BuiltinImporter -from ._bootstrap import FrozenImporter -from ._bootstrap_external import (SOURCE_SUFFIXES, DEBUG_BYTECODE_SUFFIXES, - OPTIMIZED_BYTECODE_SUFFIXES, BYTECODE_SUFFIXES, - EXTENSION_SUFFIXES) -from ._bootstrap_external import WindowsRegistryFinder -from ._bootstrap_external import PathFinder -from ._bootstrap_external import FileFinder -from ._bootstrap_external import SourceFileLoader -from ._bootstrap_external import SourcelessFileLoader -from ._bootstrap_external import ExtensionFileLoader -from ._bootstrap_external import AppleFrameworkLoader -from ._bootstrap_external import NamespaceLoader - - -def all_suffixes(): - """Returns a list of all recognized module suffixes for this process""" - return SOURCE_SUFFIXES + BYTECODE_SUFFIXES + EXTENSION_SUFFIXES diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 95887b5a..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_adapters.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_adapters.cpython-313.pyc deleted file mode 100644 index 7090c6e1..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_adapters.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_collections.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_collections.cpython-313.pyc deleted file mode 100644 index 22bb104e..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_collections.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_functools.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_functools.cpython-313.pyc deleted file mode 100644 index 575f9e08..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_functools.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_itertools.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_itertools.cpython-313.pyc deleted file mode 100644 index 4f3c9685..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_itertools.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_meta.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_meta.cpython-313.pyc deleted file mode 100644 index 1b3267ad..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_meta.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_text.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_text.cpython-313.pyc deleted file mode 100644 index 993e14aa..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/metadata/__pycache__/_text.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 54cf4f72..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/_adapters.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/_adapters.cpython-313.pyc deleted file mode 100644 index 20ec09d5..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/_adapters.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/_common.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/_common.cpython-313.pyc deleted file mode 100644 index ec35f9b8..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/_common.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/_functional.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/_functional.cpython-313.pyc deleted file mode 100644 index a51b3055..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/_functional.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/_itertools.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/_itertools.cpython-313.pyc deleted file mode 100644 index 6a499edd..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/_itertools.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/abc.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/abc.cpython-313.pyc deleted file mode 100644 index e3b33334..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/abc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/readers.cpython-313.pyc b/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/readers.cpython-313.pyc deleted file mode 100644 index 7fc1a1be..00000000 Binary files a/Python313_13_x64_Template/Lib/importlib/resources/__pycache__/readers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/importlib/resources/_common.py b/Python313_13_x64_Template/Lib/importlib/resources/_common.py deleted file mode 100644 index cae4699f..00000000 --- a/Python313_13_x64_Template/Lib/importlib/resources/_common.py +++ /dev/null @@ -1,211 +0,0 @@ -import os -import pathlib -import tempfile -import functools -import contextlib -import types -import importlib -import inspect -import warnings -import itertools - -from typing import Union, Optional, cast -from .abc import ResourceReader, Traversable - -Package = Union[types.ModuleType, str] -Anchor = Package - - -def package_to_anchor(func): - """ - Replace 'package' parameter as 'anchor' and warn about the change. - - Other errors should fall through. - - >>> files('a', 'b') - Traceback (most recent call last): - TypeError: files() takes from 0 to 1 positional arguments but 2 were given - - Remove this compatibility in Python 3.14. - """ - undefined = object() - - @functools.wraps(func) - def wrapper(anchor=undefined, package=undefined): - if package is not undefined: - if anchor is not undefined: - return func(anchor, package) - warnings.warn( - "First parameter to files is renamed to 'anchor'", - DeprecationWarning, - stacklevel=2, - ) - return func(package) - elif anchor is undefined: - return func() - return func(anchor) - - return wrapper - - -@package_to_anchor -def files(anchor: Optional[Anchor] = None) -> Traversable: - """ - Get a Traversable resource for an anchor. - """ - return from_package(resolve(anchor)) - - -def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]: - """ - Return the package's loader if it's a ResourceReader. - """ - # We can't use - # a issubclass() check here because apparently abc.'s __subclasscheck__() - # hook wants to create a weak reference to the object, but - # zipimport.zipimporter does not support weak references, resulting in a - # TypeError. That seems terrible. - spec = package.__spec__ - reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore[union-attr] - if reader is None: - return None - return reader(spec.name) # type: ignore[union-attr] - - -@functools.singledispatch -def resolve(cand: Optional[Anchor]) -> types.ModuleType: - return cast(types.ModuleType, cand) - - -@resolve.register -def _(cand: str) -> types.ModuleType: - return importlib.import_module(cand) - - -@resolve.register -def _(cand: None) -> types.ModuleType: - return resolve(_infer_caller().f_globals['__name__']) - - -def _infer_caller(): - """ - Walk the stack and find the frame of the first caller not in this module. - """ - - def is_this_file(frame_info): - return frame_info.filename == stack[0].filename - - def is_wrapper(frame_info): - return frame_info.function == 'wrapper' - - stack = inspect.stack() - not_this_file = itertools.filterfalse(is_this_file, stack) - # also exclude 'wrapper' due to singledispatch in the call stack - callers = itertools.filterfalse(is_wrapper, not_this_file) - return next(callers).frame - - -def from_package(package: types.ModuleType): - """ - Return a Traversable object for the given package. - - """ - # deferred for performance (python/cpython#109829) - from ._adapters import wrap_spec - - spec = wrap_spec(package) - reader = spec.loader.get_resource_reader(spec.name) - return reader.files() - - -@contextlib.contextmanager -def _tempfile( - reader, - suffix='', - # gh-93353: Keep a reference to call os.remove() in late Python - # finalization. - *, - _os_remove=os.remove, -): - # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' - # blocks due to the need to close the temporary file to work on Windows - # properly. - fd, raw_path = tempfile.mkstemp(suffix=suffix) - try: - try: - os.write(fd, reader()) - finally: - os.close(fd) - del reader - yield pathlib.Path(raw_path) - finally: - try: - _os_remove(raw_path) - except FileNotFoundError: - pass - - -def _temp_file(path): - return _tempfile(path.read_bytes, suffix=path.name) - - -def _is_present_dir(path: Traversable) -> bool: - """ - Some Traversables implement ``is_dir()`` to raise an - exception (i.e. ``FileNotFoundError``) when the - directory doesn't exist. This function wraps that call - to always return a boolean and only return True - if there's a dir and it exists. - """ - with contextlib.suppress(FileNotFoundError): - return path.is_dir() - return False - - -@functools.singledispatch -def as_file(path): - """ - Given a Traversable object, return that object as a - path on the local file system in a context manager. - """ - return _temp_dir(path) if _is_present_dir(path) else _temp_file(path) - - -@as_file.register(pathlib.Path) -@contextlib.contextmanager -def _(path): - """ - Degenerate behavior for pathlib.Path objects. - """ - yield path - - -@contextlib.contextmanager -def _temp_path(dir: tempfile.TemporaryDirectory): - """ - Wrap tempfile.TemporyDirectory to return a pathlib object. - """ - with dir as result: - yield pathlib.Path(result) - - -@contextlib.contextmanager -def _temp_dir(path): - """ - Given a traversable dir, recursively replicate the whole tree - to the file system in a context manager. - """ - assert path.is_dir() - with _temp_path(tempfile.TemporaryDirectory()) as temp_dir: - yield _write_contents(temp_dir, path) - - -def _write_contents(target, source): - child = target.joinpath(source.name) - if source.is_dir(): - child.mkdir() - for item in source.iterdir(): - _write_contents(child, item) - else: - child.write_bytes(source.read_bytes()) - return child diff --git a/Python313_13_x64_Template/Lib/importlib/util.py b/Python313_13_x64_Template/Lib/importlib/util.py deleted file mode 100644 index 284206b6..00000000 --- a/Python313_13_x64_Template/Lib/importlib/util.py +++ /dev/null @@ -1,274 +0,0 @@ -"""Utility code for constructing importers, etc.""" -from ._abc import Loader -from ._bootstrap import module_from_spec -from ._bootstrap import _resolve_name -from ._bootstrap import spec_from_loader -from ._bootstrap import _find_spec -from ._bootstrap_external import MAGIC_NUMBER -from ._bootstrap_external import _RAW_MAGIC_NUMBER -from ._bootstrap_external import cache_from_source -from ._bootstrap_external import decode_source -from ._bootstrap_external import source_from_cache -from ._bootstrap_external import spec_from_file_location - -import _imp -import sys -import types - - -def source_hash(source_bytes): - "Return the hash of *source_bytes* as used in hash-based pyc files." - return _imp.source_hash(_RAW_MAGIC_NUMBER, source_bytes) - - -def resolve_name(name, package): - """Resolve a relative module name to an absolute one.""" - if not name.startswith('.'): - return name - elif not package: - raise ImportError(f'no package specified for {repr(name)} ' - '(required for relative module names)') - level = 0 - for character in name: - if character != '.': - break - level += 1 - return _resolve_name(name[level:], package, level) - - -def _find_spec_from_path(name, path=None): - """Return the spec for the specified module. - - First, sys.modules is checked to see if the module was already imported. If - so, then sys.modules[name].__spec__ is returned. If that happens to be - set to None, then ValueError is raised. If the module is not in - sys.modules, then sys.meta_path is searched for a suitable spec with the - value of 'path' given to the finders. None is returned if no spec could - be found. - - Dotted names do not have their parent packages implicitly imported. You will - most likely need to explicitly import all parent packages in the proper - order for a submodule to get the correct spec. - - """ - if name not in sys.modules: - return _find_spec(name, path) - else: - module = sys.modules[name] - if module is None: - return None - try: - spec = module.__spec__ - except AttributeError: - raise ValueError(f'{name}.__spec__ is not set') from None - else: - if spec is None: - raise ValueError(f'{name}.__spec__ is None') - return spec - - -def find_spec(name, package=None): - """Return the spec for the specified module. - - First, sys.modules is checked to see if the module was already imported. If - so, then sys.modules[name].__spec__ is returned. If that happens to be - set to None, then ValueError is raised. If the module is not in - sys.modules, then sys.meta_path is searched for a suitable spec with the - value of 'path' given to the finders. None is returned if no spec could - be found. - - If the name is for submodule (contains a dot), the parent module is - automatically imported. - - The name and package arguments work the same as importlib.import_module(). - In other words, relative module names (with leading dots) work. - - """ - fullname = resolve_name(name, package) if name.startswith('.') else name - if fullname not in sys.modules: - parent_name = fullname.rpartition('.')[0] - if parent_name: - parent = __import__(parent_name, fromlist=['__path__']) - try: - parent_path = parent.__path__ - except AttributeError as e: - raise ModuleNotFoundError( - f"__path__ attribute not found on {parent_name!r} " - f"while trying to find {fullname!r}", name=fullname) from e - else: - parent_path = None - return _find_spec(fullname, parent_path) - else: - module = sys.modules[fullname] - if module is None: - return None - try: - spec = module.__spec__ - except AttributeError: - raise ValueError(f'{name}.__spec__ is not set') from None - else: - if spec is None: - raise ValueError(f'{name}.__spec__ is None') - return spec - - -# Normally we would use contextlib.contextmanager. However, this module -# is imported by runpy, which means we want to avoid any unnecessary -# dependencies. Thus we use a class. - -class _incompatible_extension_module_restrictions: - """A context manager that can temporarily skip the compatibility check. - - NOTE: This function is meant to accommodate an unusual case; one - which is likely to eventually go away. There's is a pretty good - chance this is not what you were looking for. - - WARNING: Using this function to disable the check can lead to - unexpected behavior and even crashes. It should only be used during - extension module development. - - If "disable_check" is True then the compatibility check will not - happen while the context manager is active. Otherwise the check - *will* happen. - - Normally, extensions that do not support multiple interpreters - may not be imported in a subinterpreter. That implies modules - that do not implement multi-phase init or that explicitly of out. - - Likewise for modules import in a subinterpreter with its own GIL - when the extension does not support a per-interpreter GIL. This - implies the module does not have a Py_mod_multiple_interpreters slot - set to Py_MOD_PER_INTERPRETER_GIL_SUPPORTED. - - In both cases, this context manager may be used to temporarily - disable the check for compatible extension modules. - - You can get the same effect as this function by implementing the - basic interface of multi-phase init (PEP 489) and lying about - support for multiple interpreters (or per-interpreter GIL). - """ - - def __init__(self, *, disable_check): - self.disable_check = bool(disable_check) - - def __enter__(self): - self.old = _imp._override_multi_interp_extensions_check(self.override) - return self - - def __exit__(self, *args): - old = self.old - del self.old - _imp._override_multi_interp_extensions_check(old) - - @property - def override(self): - return -1 if self.disable_check else 1 - - -class _LazyModule(types.ModuleType): - - """A subclass of the module type which triggers loading upon attribute access.""" - - def __getattribute__(self, attr): - """Trigger the load of the module and return the attribute.""" - __spec__ = object.__getattribute__(self, '__spec__') - loader_state = __spec__.loader_state - with loader_state['lock']: - # Only the first thread to get the lock should trigger the load - # and reset the module's class. The rest can now getattr(). - if object.__getattribute__(self, '__class__') is _LazyModule: - __class__ = loader_state['__class__'] - - # Reentrant calls from the same thread must be allowed to proceed without - # triggering the load again. - # exec_module() and self-referential imports are the primary ways this can - # happen, but in any case we must return something to avoid deadlock. - if loader_state['is_loading']: - return __class__.__getattribute__(self, attr) - loader_state['is_loading'] = True - - __dict__ = __class__.__getattribute__(self, '__dict__') - - # All module metadata must be gathered from __spec__ in order to avoid - # using mutated values. - # Get the original name to make sure no object substitution occurred - # in sys.modules. - original_name = __spec__.name - # Figure out exactly what attributes were mutated between the creation - # of the module and now. - attrs_then = loader_state['__dict__'] - attrs_now = __dict__ - attrs_updated = {} - for key, value in attrs_now.items(): - # Code that set an attribute may have kept a reference to the - # assigned object, making identity more important than equality. - if key not in attrs_then: - attrs_updated[key] = value - elif id(attrs_now[key]) != id(attrs_then[key]): - attrs_updated[key] = value - __spec__.loader.exec_module(self) - # If exec_module() was used directly there is no guarantee the module - # object was put into sys.modules. - if original_name in sys.modules: - if id(self) != id(sys.modules[original_name]): - raise ValueError(f"module object for {original_name!r} " - "substituted in sys.modules during a lazy " - "load") - # Update after loading since that's what would happen in an eager - # loading situation. - __dict__.update(attrs_updated) - # Finally, stop triggering this method, if the module did not - # already update its own __class__. - if isinstance(self, _LazyModule): - object.__setattr__(self, '__class__', __class__) - - return getattr(self, attr) - - def __delattr__(self, attr): - """Trigger the load and then perform the deletion.""" - # To trigger the load and raise an exception if the attribute - # doesn't exist. - self.__getattribute__(attr) - delattr(self, attr) - - -class LazyLoader(Loader): - - """A loader that creates a module which defers loading until attribute access.""" - - @staticmethod - def __check_eager_loader(loader): - if not hasattr(loader, 'exec_module'): - raise TypeError('loader must define exec_module()') - - @classmethod - def factory(cls, loader): - """Construct a callable which returns the eager loader made lazy.""" - cls.__check_eager_loader(loader) - return lambda *args, **kwargs: cls(loader(*args, **kwargs)) - - def __init__(self, loader): - self.__check_eager_loader(loader) - self.loader = loader - - def create_module(self, spec): - return self.loader.create_module(spec) - - def exec_module(self, module): - """Make the module load lazily.""" - # Threading is only needed for lazy loading, and importlib.util can - # be pulled in at interpreter startup, so defer until needed. - import threading - module.__spec__.loader = self.loader - module.__loader__ = self.loader - # Don't need to worry about deep-copying as trying to set an attribute - # on an object would have triggered the load, - # e.g. ``module.__spec__.loader = None`` would trigger a load from - # trying to access module.__spec__. - loader_state = {} - loader_state['__dict__'] = module.__dict__.copy() - loader_state['__class__'] = module.__class__ - loader_state['lock'] = threading.RLock() - loader_state['is_loading'] = False - module.__spec__.loader_state = loader_state - module.__class__ = _LazyModule diff --git a/Python313_13_x64_Template/Lib/inspect.py b/Python313_13_x64_Template/Lib/inspect.py deleted file mode 100644 index d74444e2..00000000 --- a/Python313_13_x64_Template/Lib/inspect.py +++ /dev/null @@ -1,3474 +0,0 @@ -"""Get useful information from live Python objects. - -This module encapsulates the interface provided by the internal special -attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion. -It also provides some help for examining source code and class layout. - -Here are some of the useful functions provided by this module: - - ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(), - isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(), - isroutine() - check object types - getmembers() - get members of an object that satisfy a given condition - - getfile(), getsourcefile(), getsource() - find an object's source code - getdoc(), getcomments() - get documentation on an object - getmodule() - determine the module that an object came from - getclasstree() - arrange classes so as to represent their hierarchy - - getargvalues(), getcallargs() - get info about function arguments - getfullargspec() - same, with support for Python 3 features - formatargvalues() - format an argument spec - getouterframes(), getinnerframes() - get info about frames - currentframe() - get the current stack frame - stack(), trace() - get info about frames on the stack or in a traceback - - signature() - get a Signature object for the callable - - get_annotations() - safely compute an object's annotations -""" - -# This module is in the public domain. No warranties. - -__author__ = ('Ka-Ping Yee ', - 'Yury Selivanov ') - -__all__ = [ - "AGEN_CLOSED", - "AGEN_CREATED", - "AGEN_RUNNING", - "AGEN_SUSPENDED", - "ArgInfo", - "Arguments", - "Attribute", - "BlockFinder", - "BoundArguments", - "BufferFlags", - "CORO_CLOSED", - "CORO_CREATED", - "CORO_RUNNING", - "CORO_SUSPENDED", - "CO_ASYNC_GENERATOR", - "CO_COROUTINE", - "CO_GENERATOR", - "CO_ITERABLE_COROUTINE", - "CO_NESTED", - "CO_NEWLOCALS", - "CO_NOFREE", - "CO_OPTIMIZED", - "CO_VARARGS", - "CO_VARKEYWORDS", - "ClassFoundException", - "ClosureVars", - "EndOfBlock", - "FrameInfo", - "FullArgSpec", - "GEN_CLOSED", - "GEN_CREATED", - "GEN_RUNNING", - "GEN_SUSPENDED", - "Parameter", - "Signature", - "TPFLAGS_IS_ABSTRACT", - "Traceback", - "classify_class_attrs", - "cleandoc", - "currentframe", - "findsource", - "formatannotation", - "formatannotationrelativeto", - "formatargvalues", - "get_annotations", - "getabsfile", - "getargs", - "getargvalues", - "getasyncgenlocals", - "getasyncgenstate", - "getattr_static", - "getblock", - "getcallargs", - "getclasstree", - "getclosurevars", - "getcomments", - "getcoroutinelocals", - "getcoroutinestate", - "getdoc", - "getfile", - "getframeinfo", - "getfullargspec", - "getgeneratorlocals", - "getgeneratorstate", - "getinnerframes", - "getlineno", - "getmembers", - "getmembers_static", - "getmodule", - "getmodulename", - "getmro", - "getouterframes", - "getsource", - "getsourcefile", - "getsourcelines", - "indentsize", - "isabstract", - "isasyncgen", - "isasyncgenfunction", - "isawaitable", - "isbuiltin", - "isclass", - "iscode", - "iscoroutine", - "iscoroutinefunction", - "isdatadescriptor", - "isframe", - "isfunction", - "isgenerator", - "isgeneratorfunction", - "isgetsetdescriptor", - "ismemberdescriptor", - "ismethod", - "ismethoddescriptor", - "ismethodwrapper", - "ismodule", - "isroutine", - "istraceback", - "markcoroutinefunction", - "signature", - "stack", - "trace", - "unwrap", - "walktree", -] - - -import abc -import ast -import dis -import collections.abc -import enum -import importlib.machinery -import itertools -import linecache -import os -import re -import sys -import tokenize -import token -import types -import functools -import builtins -from keyword import iskeyword -from operator import attrgetter -from collections import namedtuple, OrderedDict -from typing import _rewrite_star_unpack -from weakref import ref as make_weakref - -# Create constants for the compiler flags in Include/code.h -# We try to get them from dis to avoid duplication -mod_dict = globals() -for k, v in dis.COMPILER_FLAG_NAMES.items(): - mod_dict["CO_" + v] = k -del k, v, mod_dict - -# See Include/object.h -TPFLAGS_IS_ABSTRACT = 1 << 20 - - -def get_annotations(obj, *, globals=None, locals=None, eval_str=False): - """Compute the annotations dict for an object. - - obj may be a callable, class, or module. - Passing in an object of any other type raises TypeError. - - Returns a dict. get_annotations() returns a new dict every time - it's called; calling it twice on the same object will return two - different but equivalent dicts. - - This function handles several details for you: - - * If eval_str is true, values of type str will - be un-stringized using eval(). This is intended - for use with stringized annotations - ("from __future__ import annotations"). - * If obj doesn't have an annotations dict, returns an - empty dict. (Functions and methods always have an - annotations dict; classes, modules, and other types of - callables may not.) - * Ignores inherited annotations on classes. If a class - doesn't have its own annotations dict, returns an empty dict. - * All accesses to object members and dict values are done - using getattr() and dict.get() for safety. - * Always, always, always returns a freshly-created dict. - - eval_str controls whether or not values of type str are replaced - with the result of calling eval() on those values: - - * If eval_str is true, eval() is called on values of type str. - * If eval_str is false (the default), values of type str are unchanged. - - globals and locals are passed in to eval(); see the documentation - for eval() for more information. If either globals or locals is - None, this function may replace that value with a context-specific - default, contingent on type(obj): - - * If obj is a module, globals defaults to obj.__dict__. - * If obj is a class, globals defaults to - sys.modules[obj.__module__].__dict__ and locals - defaults to the obj class namespace. - * If obj is a callable, globals defaults to obj.__globals__, - although if obj is a wrapped function (using - functools.update_wrapper()) it is first unwrapped. - """ - if isinstance(obj, type): - # class - obj_dict = getattr(obj, '__dict__', None) - if obj_dict and hasattr(obj_dict, 'get'): - ann = obj_dict.get('__annotations__', None) - if isinstance(ann, types.GetSetDescriptorType): - ann = None - else: - ann = None - - obj_globals = None - module_name = getattr(obj, '__module__', None) - if module_name: - module = sys.modules.get(module_name, None) - if module: - obj_globals = getattr(module, '__dict__', None) - obj_locals = dict(vars(obj)) - unwrap = obj - elif isinstance(obj, types.ModuleType): - # module - ann = getattr(obj, '__annotations__', None) - obj_globals = getattr(obj, '__dict__') - obj_locals = None - unwrap = None - elif callable(obj): - # this includes types.Function, types.BuiltinFunctionType, - # types.BuiltinMethodType, functools.partial, functools.singledispatch, - # "class funclike" from Lib/test/test_inspect... on and on it goes. - ann = getattr(obj, '__annotations__', None) - obj_globals = getattr(obj, '__globals__', None) - obj_locals = None - unwrap = obj - else: - raise TypeError(f"{obj!r} is not a module, class, or callable.") - - if ann is None: - return {} - - if not isinstance(ann, dict): - raise ValueError(f"{obj!r}.__annotations__ is neither a dict nor None") - - if not ann: - return {} - - if not eval_str: - return dict(ann) - - if unwrap is not None: - while True: - if hasattr(unwrap, '__wrapped__'): - unwrap = unwrap.__wrapped__ - continue - if isinstance(unwrap, functools.partial): - unwrap = unwrap.func - continue - break - if hasattr(unwrap, "__globals__"): - obj_globals = unwrap.__globals__ - - if globals is None: - globals = obj_globals - if locals is None: - locals = obj_locals or {} - - # "Inject" type parameters into the local namespace - # (unless they are shadowed by assignments *in* the local namespace), - # as a way of emulating annotation scopes when calling `eval()` - if type_params := getattr(obj, "__type_params__", ()): - locals = {param.__name__: param for param in type_params} | locals - - return_value = { - key: value if not isinstance(value, str) - else eval(_rewrite_star_unpack(value), globals, locals) - for key, value in ann.items() } - return return_value - - -# ----------------------------------------------------------- type-checking -def ismodule(object): - """Return true if the object is a module.""" - return isinstance(object, types.ModuleType) - -def isclass(object): - """Return true if the object is a class.""" - return isinstance(object, type) - -def ismethod(object): - """Return true if the object is an instance method.""" - return isinstance(object, types.MethodType) - -def ismethoddescriptor(object): - """Return true if the object is a method descriptor. - - But not if ismethod() or isclass() or isfunction() are true. - - This is new in Python 2.2, and, for example, is true of int.__add__. - An object passing this test has a __get__ attribute, but not a - __set__ attribute or a __delete__ attribute. Beyond that, the set - of attributes varies; __name__ is usually sensible, and __doc__ - often is. - - Methods implemented via descriptors that also pass one of the other - tests return false from the ismethoddescriptor() test, simply because - the other tests promise more -- you can, e.g., count on having the - __func__ attribute (etc) when an object passes ismethod().""" - if isclass(object) or ismethod(object) or isfunction(object): - # mutual exclusion - return False - if isinstance(object, functools.partial): - # Lie for children. The addition of partial.__get__ - # doesn't currently change the partial objects behaviour, - # not counting a warning about future changes. - return False - tp = type(object) - return (hasattr(tp, "__get__") - and not hasattr(tp, "__set__") - and not hasattr(tp, "__delete__")) - -def isdatadescriptor(object): - """Return true if the object is a data descriptor. - - Data descriptors have a __set__ or a __delete__ attribute. Examples are - properties (defined in Python) and getsets and members (defined in C). - Typically, data descriptors will also have __name__ and __doc__ attributes - (properties, getsets, and members have both of these attributes), but this - is not guaranteed.""" - if isclass(object) or ismethod(object) or isfunction(object): - # mutual exclusion - return False - tp = type(object) - return hasattr(tp, "__set__") or hasattr(tp, "__delete__") - -if hasattr(types, 'MemberDescriptorType'): - # CPython and equivalent - def ismemberdescriptor(object): - """Return true if the object is a member descriptor. - - Member descriptors are specialized descriptors defined in extension - modules.""" - return isinstance(object, types.MemberDescriptorType) -else: - # Other implementations - def ismemberdescriptor(object): - """Return true if the object is a member descriptor. - - Member descriptors are specialized descriptors defined in extension - modules.""" - return False - -if hasattr(types, 'GetSetDescriptorType'): - # CPython and equivalent - def isgetsetdescriptor(object): - """Return true if the object is a getset descriptor. - - getset descriptors are specialized descriptors defined in extension - modules.""" - return isinstance(object, types.GetSetDescriptorType) -else: - # Other implementations - def isgetsetdescriptor(object): - """Return true if the object is a getset descriptor. - - getset descriptors are specialized descriptors defined in extension - modules.""" - return False - -def isfunction(object): - """Return true if the object is a user-defined function. - - Function objects provide these attributes: - __doc__ documentation string - __name__ name with which this function was defined - __code__ code object containing compiled function bytecode - __defaults__ tuple of any default values for arguments - __globals__ global namespace in which this function was defined - __annotations__ dict of parameter annotations - __kwdefaults__ dict of keyword only parameters with defaults""" - return isinstance(object, types.FunctionType) - -def _has_code_flag(f, flag): - """Return true if ``f`` is a function (or a method or functools.partial - wrapper wrapping a function or a functools.partialmethod wrapping a - function) whose code object has the given ``flag`` - set in its flags.""" - f = functools._unwrap_partialmethod(f) - while ismethod(f): - f = f.__func__ - f = functools._unwrap_partial(f) - if not (isfunction(f) or _signature_is_functionlike(f)): - return False - return bool(f.__code__.co_flags & flag) - -def isgeneratorfunction(obj): - """Return true if the object is a user-defined generator function. - - Generator function objects provide the same attributes as functions. - See help(isfunction) for a list of attributes.""" - return _has_code_flag(obj, CO_GENERATOR) - -# A marker for markcoroutinefunction and iscoroutinefunction. -_is_coroutine_mark = object() - -def _has_coroutine_mark(f): - while ismethod(f): - f = f.__func__ - f = functools._unwrap_partial(f) - return getattr(f, "_is_coroutine_marker", None) is _is_coroutine_mark - -def markcoroutinefunction(func): - """ - Decorator to ensure callable is recognised as a coroutine function. - """ - if hasattr(func, '__func__'): - func = func.__func__ - func._is_coroutine_marker = _is_coroutine_mark - return func - -def iscoroutinefunction(obj): - """Return true if the object is a coroutine function. - - Coroutine functions are normally defined with "async def" syntax, but may - be marked via markcoroutinefunction. - """ - return _has_code_flag(obj, CO_COROUTINE) or _has_coroutine_mark(obj) - -def isasyncgenfunction(obj): - """Return true if the object is an asynchronous generator function. - - Asynchronous generator functions are defined with "async def" - syntax and have "yield" expressions in their body. - """ - return _has_code_flag(obj, CO_ASYNC_GENERATOR) - -def isasyncgen(object): - """Return true if the object is an asynchronous generator.""" - return isinstance(object, types.AsyncGeneratorType) - -def isgenerator(object): - """Return true if the object is a generator. - - Generator objects provide these attributes: - __iter__ defined to support iteration over container - close raises a new GeneratorExit exception inside the - generator to terminate the iteration - gi_code code object - gi_frame frame object or possibly None once the generator has - been exhausted - gi_running set to 1 when generator is executing, 0 otherwise - gi_suspended set to 1 when the generator is suspended at a yield point, 0 otherwise - gi_yieldfrom object being iterated by yield from or None - next return the next item from the container - send resumes the generator and "sends" a value that becomes - the result of the current yield-expression - throw used to raise an exception inside the generator""" - return isinstance(object, types.GeneratorType) - -def iscoroutine(object): - """Return true if the object is a coroutine.""" - return isinstance(object, types.CoroutineType) - -def isawaitable(object): - """Return true if object can be passed to an ``await`` expression.""" - return (isinstance(object, types.CoroutineType) or - isinstance(object, types.GeneratorType) and - bool(object.gi_code.co_flags & CO_ITERABLE_COROUTINE) or - isinstance(object, collections.abc.Awaitable)) - -def istraceback(object): - """Return true if the object is a traceback. - - Traceback objects provide these attributes: - tb_frame frame object at this level - tb_lasti index of last attempted instruction in bytecode - tb_lineno current line number in Python source code - tb_next next inner traceback object (called by this level)""" - return isinstance(object, types.TracebackType) - -def isframe(object): - """Return true if the object is a frame object. - - Frame objects provide these attributes: - f_back next outer frame object (this frame's caller) - f_builtins built-in namespace seen by this frame - f_code code object being executed in this frame - f_globals global namespace seen by this frame - f_lasti index of last attempted instruction in bytecode - f_lineno current line number in Python source code - f_locals local namespace seen by this frame - f_trace tracing function for this frame, or None""" - return isinstance(object, types.FrameType) - -def iscode(object): - """Return true if the object is a code object. - - Code objects provide these attributes: - co_argcount number of arguments (not including *, ** args - or keyword only arguments) - co_code string of raw compiled bytecode - co_cellvars tuple of names of cell variables - co_consts tuple of constants used in the bytecode - co_filename name of file in which this code object was created - co_firstlineno number of first line in Python source code - co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg - | 16=nested | 32=generator | 64=nofree | 128=coroutine - | 256=iterable_coroutine | 512=async_generator - co_freevars tuple of names of free variables - co_posonlyargcount number of positional only arguments - co_kwonlyargcount number of keyword only arguments (not including ** arg) - co_lnotab encoded mapping of line numbers to bytecode indices - co_name name with which this code object was defined - co_names tuple of names other than arguments and function locals - co_nlocals number of local variables - co_stacksize virtual machine stack space required - co_varnames tuple of names of arguments and local variables""" - return isinstance(object, types.CodeType) - -def isbuiltin(object): - """Return true if the object is a built-in function or method. - - Built-in functions and methods provide these attributes: - __doc__ documentation string - __name__ original name of this function or method - __self__ instance to which a method is bound, or None""" - return isinstance(object, types.BuiltinFunctionType) - -def ismethodwrapper(object): - """Return true if the object is a method wrapper.""" - return isinstance(object, types.MethodWrapperType) - -def isroutine(object): - """Return true if the object is any kind of function or method.""" - return (isbuiltin(object) - or isfunction(object) - or ismethod(object) - or ismethoddescriptor(object) - or ismethodwrapper(object)) - -def isabstract(object): - """Return true if the object is an abstract base class (ABC).""" - if not isinstance(object, type): - return False - if object.__flags__ & TPFLAGS_IS_ABSTRACT: - return True - if not issubclass(type(object), abc.ABCMeta): - return False - if hasattr(object, '__abstractmethods__'): - # It looks like ABCMeta.__new__ has finished running; - # TPFLAGS_IS_ABSTRACT should have been accurate. - return False - # It looks like ABCMeta.__new__ has not finished running yet; we're - # probably in __init_subclass__. We'll look for abstractmethods manually. - for name, value in object.__dict__.items(): - if getattr(value, "__isabstractmethod__", False): - return True - for base in object.__bases__: - for name in getattr(base, "__abstractmethods__", ()): - value = getattr(object, name, None) - if getattr(value, "__isabstractmethod__", False): - return True - return False - -def _getmembers(object, predicate, getter): - results = [] - processed = set() - names = dir(object) - if isclass(object): - mro = getmro(object) - # add any DynamicClassAttributes to the list of names if object is a class; - # this may result in duplicate entries if, for example, a virtual - # attribute with the same name as a DynamicClassAttribute exists - try: - for base in object.__bases__: - for k, v in base.__dict__.items(): - if isinstance(v, types.DynamicClassAttribute): - names.append(k) - except AttributeError: - pass - else: - mro = () - for key in names: - # First try to get the value via getattr. Some descriptors don't - # like calling their __get__ (see bug #1785), so fall back to - # looking in the __dict__. - try: - value = getter(object, key) - # handle the duplicate key - if key in processed: - raise AttributeError - except AttributeError: - for base in mro: - if key in base.__dict__: - value = base.__dict__[key] - break - else: - # could be a (currently) missing slot member, or a buggy - # __dir__; discard and move on - continue - if not predicate or predicate(value): - results.append((key, value)) - processed.add(key) - results.sort(key=lambda pair: pair[0]) - return results - -def getmembers(object, predicate=None): - """Return all members of an object as (name, value) pairs sorted by name. - Optionally, only return members that satisfy a given predicate.""" - return _getmembers(object, predicate, getattr) - -def getmembers_static(object, predicate=None): - """Return all members of an object as (name, value) pairs sorted by name - without triggering dynamic lookup via the descriptor protocol, - __getattr__ or __getattribute__. Optionally, only return members that - satisfy a given predicate. - - Note: this function may not be able to retrieve all members - that getmembers can fetch (like dynamically created attributes) - and may find members that getmembers can't (like descriptors - that raise AttributeError). It can also return descriptor objects - instead of instance members in some cases. - """ - return _getmembers(object, predicate, getattr_static) - -Attribute = namedtuple('Attribute', 'name kind defining_class object') - -def classify_class_attrs(cls): - """Return list of attribute-descriptor tuples. - - For each name in dir(cls), the return list contains a 4-tuple - with these elements: - - 0. The name (a string). - - 1. The kind of attribute this is, one of these strings: - 'class method' created via classmethod() - 'static method' created via staticmethod() - 'property' created via property() - 'method' any other flavor of method or descriptor - 'data' not a method - - 2. The class which defined this attribute (a class). - - 3. The object as obtained by calling getattr; if this fails, or if the - resulting object does not live anywhere in the class' mro (including - metaclasses) then the object is looked up in the defining class's - dict (found by walking the mro). - - If one of the items in dir(cls) is stored in the metaclass it will now - be discovered and not have None be listed as the class in which it was - defined. Any items whose home class cannot be discovered are skipped. - """ - - mro = getmro(cls) - metamro = getmro(type(cls)) # for attributes stored in the metaclass - metamro = tuple(cls for cls in metamro if cls not in (type, object)) - class_bases = (cls,) + mro - all_bases = class_bases + metamro - names = dir(cls) - # :dd any DynamicClassAttributes to the list of names; - # this may result in duplicate entries if, for example, a virtual - # attribute with the same name as a DynamicClassAttribute exists. - for base in mro: - for k, v in base.__dict__.items(): - if isinstance(v, types.DynamicClassAttribute) and v.fget is not None: - names.append(k) - result = [] - processed = set() - - for name in names: - # Get the object associated with the name, and where it was defined. - # Normal objects will be looked up with both getattr and directly in - # its class' dict (in case getattr fails [bug #1785], and also to look - # for a docstring). - # For DynamicClassAttributes on the second pass we only look in the - # class's dict. - # - # Getting an obj from the __dict__ sometimes reveals more than - # using getattr. Static and class methods are dramatic examples. - homecls = None - get_obj = None - dict_obj = None - if name not in processed: - try: - if name == '__dict__': - raise Exception("__dict__ is special, don't want the proxy") - get_obj = getattr(cls, name) - except Exception: - pass - else: - homecls = getattr(get_obj, "__objclass__", homecls) - if homecls not in class_bases: - # if the resulting object does not live somewhere in the - # mro, drop it and search the mro manually - homecls = None - last_cls = None - # first look in the classes - for srch_cls in class_bases: - srch_obj = getattr(srch_cls, name, None) - if srch_obj is get_obj: - last_cls = srch_cls - # then check the metaclasses - for srch_cls in metamro: - try: - srch_obj = srch_cls.__getattr__(cls, name) - except AttributeError: - continue - if srch_obj is get_obj: - last_cls = srch_cls - if last_cls is not None: - homecls = last_cls - for base in all_bases: - if name in base.__dict__: - dict_obj = base.__dict__[name] - if homecls not in metamro: - homecls = base - break - if homecls is None: - # unable to locate the attribute anywhere, most likely due to - # buggy custom __dir__; discard and move on - continue - obj = get_obj if get_obj is not None else dict_obj - # Classify the object or its descriptor. - if isinstance(dict_obj, (staticmethod, types.BuiltinMethodType)): - kind = "static method" - obj = dict_obj - elif isinstance(dict_obj, (classmethod, types.ClassMethodDescriptorType)): - kind = "class method" - obj = dict_obj - elif isinstance(dict_obj, property): - kind = "property" - obj = dict_obj - elif isroutine(obj): - kind = "method" - else: - kind = "data" - result.append(Attribute(name, kind, homecls, obj)) - processed.add(name) - return result - -# ----------------------------------------------------------- class helpers - -def getmro(cls): - "Return tuple of base classes (including cls) in method resolution order." - return cls.__mro__ - -# -------------------------------------------------------- function helpers - -def unwrap(func, *, stop=None): - """Get the object wrapped by *func*. - - Follows the chain of :attr:`__wrapped__` attributes returning the last - object in the chain. - - *stop* is an optional callback accepting an object in the wrapper chain - as its sole argument that allows the unwrapping to be terminated early if - the callback returns a true value. If the callback never returns a true - value, the last object in the chain is returned as usual. For example, - :func:`signature` uses this to stop unwrapping if any object in the - chain has a ``__signature__`` attribute defined. - - :exc:`ValueError` is raised if a cycle is encountered. - - """ - f = func # remember the original func for error reporting - # Memoise by id to tolerate non-hashable objects, but store objects to - # ensure they aren't destroyed, which would allow their IDs to be reused. - memo = {id(f): f} - recursion_limit = sys.getrecursionlimit() - while not isinstance(func, type) and hasattr(func, '__wrapped__'): - if stop is not None and stop(func): - break - func = func.__wrapped__ - id_func = id(func) - if (id_func in memo) or (len(memo) >= recursion_limit): - raise ValueError('wrapper loop when unwrapping {!r}'.format(f)) - memo[id_func] = func - return func - -# -------------------------------------------------- source code extraction -def indentsize(line): - """Return the indent size, in spaces, at the start of a line of text.""" - expline = line.expandtabs() - return len(expline) - len(expline.lstrip()) - -def _findclass(func): - cls = sys.modules.get(func.__module__) - if cls is None: - return None - for name in func.__qualname__.split('.')[:-1]: - cls = getattr(cls, name) - if not isclass(cls): - return None - return cls - -def _finddoc(obj): - if isclass(obj): - for base in obj.__mro__: - if base is not object: - try: - doc = base.__doc__ - except AttributeError: - continue - if doc is not None: - return doc - return None - - if ismethod(obj): - name = obj.__func__.__name__ - self = obj.__self__ - if (isclass(self) and - getattr(getattr(self, name, None), '__func__') is obj.__func__): - # classmethod - cls = self - else: - cls = self.__class__ - elif isfunction(obj): - name = obj.__name__ - cls = _findclass(obj) - if cls is None or getattr(cls, name) is not obj: - return None - elif isbuiltin(obj): - name = obj.__name__ - self = obj.__self__ - if (isclass(self) and - self.__qualname__ + '.' + name == obj.__qualname__): - # classmethod - cls = self - else: - cls = self.__class__ - # Should be tested before isdatadescriptor(). - elif isinstance(obj, property): - name = obj.__name__ - cls = _findclass(obj.fget) - if cls is None or getattr(cls, name) is not obj: - return None - elif ismethoddescriptor(obj) or isdatadescriptor(obj): - name = obj.__name__ - cls = obj.__objclass__ - if getattr(cls, name) is not obj: - return None - if ismemberdescriptor(obj): - slots = getattr(cls, '__slots__', None) - if isinstance(slots, dict) and name in slots: - return slots[name] - else: - return None - for base in cls.__mro__: - try: - doc = getattr(base, name).__doc__ - except AttributeError: - continue - if doc is not None: - return doc - return None - -def getdoc(object): - """Get the documentation string for an object. - - All tabs are expanded to spaces. To clean up docstrings that are - indented to line up with blocks of code, any whitespace than can be - uniformly removed from the second line onwards is removed.""" - try: - doc = object.__doc__ - except AttributeError: - return None - if doc is None: - try: - doc = _finddoc(object) - except (AttributeError, TypeError): - return None - if not isinstance(doc, str): - return None - return cleandoc(doc) - -def cleandoc(doc): - """Clean up indentation from docstrings. - - Any whitespace that can be uniformly removed from the second line - onwards is removed.""" - lines = doc.expandtabs().split('\n') - - # Find minimum indentation of any non-blank lines after first line. - margin = sys.maxsize - for line in lines[1:]: - content = len(line.lstrip(' ')) - if content: - indent = len(line) - content - margin = min(margin, indent) - # Remove indentation. - if lines: - lines[0] = lines[0].lstrip(' ') - if margin < sys.maxsize: - for i in range(1, len(lines)): - lines[i] = lines[i][margin:] - # Remove any trailing or leading blank lines. - while lines and not lines[-1]: - lines.pop() - while lines and not lines[0]: - lines.pop(0) - return '\n'.join(lines) - - -def getfile(object): - """Work out which source or compiled file an object was defined in.""" - if ismodule(object): - if getattr(object, '__file__', None): - return object.__file__ - raise TypeError('{!r} is a built-in module'.format(object)) - if isclass(object): - if hasattr(object, '__module__'): - module = sys.modules.get(object.__module__) - if getattr(module, '__file__', None): - return module.__file__ - if object.__module__ == '__main__': - raise OSError('source code not available') - raise TypeError('{!r} is a built-in class'.format(object)) - if ismethod(object): - object = object.__func__ - if isfunction(object): - object = object.__code__ - if istraceback(object): - object = object.tb_frame - if isframe(object): - object = object.f_code - if iscode(object): - return object.co_filename - raise TypeError('module, class, method, function, traceback, frame, or ' - 'code object was expected, got {}'.format( - type(object).__name__)) - -def getmodulename(path): - """Return the module name for a given file, or None.""" - fname = os.path.basename(path) - # Check for paths that look like an actual module file - suffixes = [(-len(suffix), suffix) - for suffix in importlib.machinery.all_suffixes()] - suffixes.sort() # try longest suffixes first, in case they overlap - for neglen, suffix in suffixes: - if fname.endswith(suffix): - return fname[:neglen] - return None - -def getsourcefile(object): - """Return the filename that can be used to locate an object's source. - Return None if no way can be identified to get the source. - """ - filename = getfile(object) - all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:] - all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:] - if any(filename.endswith(s) for s in all_bytecode_suffixes): - filename = (os.path.splitext(filename)[0] + - importlib.machinery.SOURCE_SUFFIXES[0]) - elif any(filename.endswith(s) for s in - importlib.machinery.EXTENSION_SUFFIXES): - return None - elif filename.endswith(".fwork"): - # Apple mobile framework markers are another type of non-source file - return None - - # return a filename found in the linecache even if it doesn't exist on disk - if filename in linecache.cache: - return filename - if os.path.exists(filename): - return filename - # only return a non-existent filename if the module has a PEP 302 loader - module = getmodule(object, filename) - if getattr(module, '__loader__', None) is not None: - return filename - elif getattr(getattr(module, "__spec__", None), "loader", None) is not None: - return filename - -def getabsfile(object, _filename=None): - """Return an absolute path to the source or compiled file for an object. - - The idea is for each object to have a unique origin, so this routine - normalizes the result as much as possible.""" - if _filename is None: - _filename = getsourcefile(object) or getfile(object) - return os.path.normcase(os.path.abspath(_filename)) - -modulesbyfile = {} -_filesbymodname = {} - -def getmodule(object, _filename=None): - """Return the module an object was defined in, or None if not found.""" - if ismodule(object): - return object - if hasattr(object, '__module__'): - return sys.modules.get(object.__module__) - - # Try the filename to modulename cache - if _filename is not None and _filename in modulesbyfile: - return sys.modules.get(modulesbyfile[_filename]) - # Try the cache again with the absolute file name - try: - file = getabsfile(object, _filename) - except (TypeError, FileNotFoundError): - return None - if file in modulesbyfile: - return sys.modules.get(modulesbyfile[file]) - # Update the filename to module name cache and check yet again - # Copy sys.modules in order to cope with changes while iterating - for modname, module in sys.modules.copy().items(): - if ismodule(module) and hasattr(module, '__file__'): - f = module.__file__ - if f == _filesbymodname.get(modname, None): - # Have already mapped this module, so skip it - continue - _filesbymodname[modname] = f - f = getabsfile(module) - # Always map to the name the module knows itself by - modulesbyfile[f] = modulesbyfile[ - os.path.realpath(f)] = module.__name__ - if file in modulesbyfile: - return sys.modules.get(modulesbyfile[file]) - # Check the main module - main = sys.modules['__main__'] - if not hasattr(object, '__name__'): - return None - if hasattr(main, object.__name__): - mainobject = getattr(main, object.__name__) - if mainobject is object: - return main - # Check builtins - builtin = sys.modules['builtins'] - if hasattr(builtin, object.__name__): - builtinobject = getattr(builtin, object.__name__) - if builtinobject is object: - return builtin - - -class ClassFoundException(Exception): - pass - - -def findsource(object): - """Return the entire source file and starting line number for an object. - - The argument may be a module, class, method, function, traceback, frame, - or code object. The source code is returned as a list of all the lines - in the file and the line number indexes a line in that list. An OSError - is raised if the source code cannot be retrieved.""" - - file = getsourcefile(object) - if file: - # Invalidate cache if needed. - linecache.checkcache(file) - else: - file = getfile(object) - # Allow filenames in form of "" to pass through. - # `doctest` monkeypatches `linecache` module to enable - # inspection, so let `linecache.getlines` to be called. - if (not (file.startswith('<') and file.endswith('>'))) or file.endswith('.fwork'): - raise OSError('source code not available') - - module = getmodule(object, file) - if module: - lines = linecache.getlines(file, module.__dict__) - if not lines and file.startswith('<') and hasattr(object, "__code__"): - lines = linecache._getlines_from_code(object.__code__) - else: - lines = linecache.getlines(file) - if not lines: - raise OSError('could not get source code') - - if ismodule(object): - return lines, 0 - - if isclass(object): - try: - lnum = vars(object)['__firstlineno__'] - 1 - except (TypeError, KeyError): - raise OSError('source code not available') - if lnum >= len(lines): - raise OSError('lineno is out of bounds') - return lines, lnum - - if ismethod(object): - object = object.__func__ - if isfunction(object): - object = object.__code__ - if istraceback(object): - object = object.tb_frame - if isframe(object): - object = object.f_code - if iscode(object): - if not hasattr(object, 'co_firstlineno'): - raise OSError('could not find function definition') - lnum = object.co_firstlineno - 1 - if lnum >= len(lines): - raise OSError('lineno is out of bounds') - return lines, lnum - raise OSError('could not find code object') - -def getcomments(object): - """Get lines of comments immediately preceding an object's source code. - - Returns None when source can't be found. - """ - try: - lines, lnum = findsource(object) - except (OSError, TypeError): - return None - - if ismodule(object): - # Look for a comment block at the top of the file. - start = 0 - if lines and lines[0][:2] == '#!': start = 1 - while start < len(lines) and lines[start].strip() in ('', '#'): - start = start + 1 - if start < len(lines) and lines[start][:1] == '#': - comments = [] - end = start - while end < len(lines) and lines[end][:1] == '#': - comments.append(lines[end].expandtabs()) - end = end + 1 - return ''.join(comments) - - # Look for a preceding block of comments at the same indentation. - elif lnum > 0: - indent = indentsize(lines[lnum]) - end = lnum - 1 - if end >= 0 and lines[end].lstrip()[:1] == '#' and \ - indentsize(lines[end]) == indent: - comments = [lines[end].expandtabs().lstrip()] - if end > 0: - end = end - 1 - comment = lines[end].expandtabs().lstrip() - while comment[:1] == '#' and indentsize(lines[end]) == indent: - comments[:0] = [comment] - end = end - 1 - if end < 0: break - comment = lines[end].expandtabs().lstrip() - while comments and comments[0].strip() == '#': - comments[:1] = [] - while comments and comments[-1].strip() == '#': - comments[-1:] = [] - return ''.join(comments) - -class EndOfBlock(Exception): pass - -class BlockFinder: - """Provide a tokeneater() method to detect the end of a code block.""" - def __init__(self): - self.indent = 0 - self.singleline = False - self.started = False - self.passline = False - self.indecorator = False - self.last = 1 - self.body_col0 = None - - def tokeneater(self, type, token, srowcol, erowcol, line): - if not self.started and not self.indecorator: - if type in (tokenize.INDENT, tokenize.COMMENT, tokenize.NL): - pass - elif token == "async": - pass - # skip any decorators - elif token == "@": - self.indecorator = True - else: - # For "def" and "class" scan to the end of the block. - # For "lambda" and generator expression scan to - # the end of the logical line. - self.singleline = token not in ("def", "class") - self.started = True - self.passline = True # skip to the end of the line - elif type == tokenize.NEWLINE: - self.passline = False # stop skipping when a NEWLINE is seen - self.last = srowcol[0] - if self.singleline: - raise EndOfBlock - # hitting a NEWLINE when in a decorator without args - # ends the decorator - if self.indecorator: - self.indecorator = False - elif self.passline: - pass - elif type == tokenize.INDENT: - if self.body_col0 is None and self.started: - self.body_col0 = erowcol[1] - self.indent = self.indent + 1 - self.passline = True - elif type == tokenize.DEDENT: - self.indent = self.indent - 1 - # the end of matching indent/dedent pairs end a block - # (note that this only works for "def"/"class" blocks, - # not e.g. for "if: else:" or "try: finally:" blocks) - if self.indent <= 0: - raise EndOfBlock - elif type == tokenize.COMMENT: - if self.body_col0 is not None and srowcol[1] >= self.body_col0: - # Include comments if indented at least as much as the block - self.last = srowcol[0] - elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): - # any other token on the same indentation level end the previous - # block as well, except the pseudo-tokens COMMENT and NL. - raise EndOfBlock - -def getblock(lines): - """Extract the block of code at the top of the given list of lines.""" - blockfinder = BlockFinder() - try: - tokens = tokenize.generate_tokens(iter(lines).__next__) - for _token in tokens: - blockfinder.tokeneater(*_token) - except (EndOfBlock, IndentationError): - pass - except SyntaxError as e: - if "unmatched" not in e.msg: - raise e from None - _, *_token_info = _token - try: - blockfinder.tokeneater(tokenize.NEWLINE, *_token_info) - except (EndOfBlock, IndentationError): - pass - return lines[:blockfinder.last] - -def getsourcelines(object): - """Return a list of source lines and starting line number for an object. - - The argument may be a module, class, method, function, traceback, frame, - or code object. The source code is returned as a list of the lines - corresponding to the object and the line number indicates where in the - original source file the first line of code was found. An OSError is - raised if the source code cannot be retrieved.""" - object = unwrap(object) - lines, lnum = findsource(object) - - if istraceback(object): - object = object.tb_frame - - # for module or frame that corresponds to module, return all source lines - if (ismodule(object) or - (isframe(object) and object.f_code.co_name == "")): - return lines, 0 - else: - return getblock(lines[lnum:]), lnum + 1 - -def getsource(object): - """Return the text of the source code for an object. - - The argument may be a module, class, method, function, traceback, frame, - or code object. The source code is returned as a single string. An - OSError is raised if the source code cannot be retrieved.""" - lines, lnum = getsourcelines(object) - return ''.join(lines) - -# --------------------------------------------------- class tree extraction -def walktree(classes, children, parent): - """Recursive helper function for getclasstree().""" - results = [] - classes.sort(key=attrgetter('__module__', '__name__')) - for c in classes: - results.append((c, c.__bases__)) - if c in children: - results.append(walktree(children[c], children, c)) - return results - -def getclasstree(classes, unique=False): - """Arrange the given list of classes into a hierarchy of nested lists. - - Where a nested list appears, it contains classes derived from the class - whose entry immediately precedes the list. Each entry is a 2-tuple - containing a class and a tuple of its base classes. If the 'unique' - argument is true, exactly one entry appears in the returned structure - for each class in the given list. Otherwise, classes using multiple - inheritance and their descendants will appear multiple times.""" - children = {} - roots = [] - for c in classes: - if c.__bases__: - for parent in c.__bases__: - if parent not in children: - children[parent] = [] - if c not in children[parent]: - children[parent].append(c) - if unique and parent in classes: break - elif c not in roots: - roots.append(c) - for parent in children: - if parent not in classes: - roots.append(parent) - return walktree(roots, children, None) - -# ------------------------------------------------ argument list extraction -Arguments = namedtuple('Arguments', 'args, varargs, varkw') - -def getargs(co): - """Get information about the arguments accepted by a code object. - - Three things are returned: (args, varargs, varkw), where - 'args' is the list of argument names. Keyword-only arguments are - appended. 'varargs' and 'varkw' are the names of the * and ** - arguments or None.""" - if not iscode(co): - raise TypeError('{!r} is not a code object'.format(co)) - - names = co.co_varnames - nargs = co.co_argcount - nkwargs = co.co_kwonlyargcount - args = list(names[:nargs]) - kwonlyargs = list(names[nargs:nargs+nkwargs]) - - nargs += nkwargs - varargs = None - if co.co_flags & CO_VARARGS: - varargs = co.co_varnames[nargs] - nargs = nargs + 1 - varkw = None - if co.co_flags & CO_VARKEYWORDS: - varkw = co.co_varnames[nargs] - return Arguments(args + kwonlyargs, varargs, varkw) - - -FullArgSpec = namedtuple('FullArgSpec', - 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations') - -def getfullargspec(func): - """Get the names and default values of a callable object's parameters. - - A tuple of seven things is returned: - (args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations). - 'args' is a list of the parameter names. - 'varargs' and 'varkw' are the names of the * and ** parameters or None. - 'defaults' is an n-tuple of the default values of the last n parameters. - 'kwonlyargs' is a list of keyword-only parameter names. - 'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults. - 'annotations' is a dictionary mapping parameter names to annotations. - - Notable differences from inspect.signature(): - - the "self" parameter is always reported, even for bound methods - - wrapper chains defined by __wrapped__ *not* unwrapped automatically - """ - try: - # Re: `skip_bound_arg=False` - # - # There is a notable difference in behaviour between getfullargspec - # and Signature: the former always returns 'self' parameter for bound - # methods, whereas the Signature always shows the actual calling - # signature of the passed object. - # - # To simulate this behaviour, we "unbind" bound methods, to trick - # inspect.signature to always return their first parameter ("self", - # usually) - - # Re: `follow_wrapper_chains=False` - # - # getfullargspec() historically ignored __wrapped__ attributes, - # so we ensure that remains the case in 3.3+ - - sig = _signature_from_callable(func, - follow_wrapper_chains=False, - skip_bound_arg=False, - sigcls=Signature, - eval_str=False) - except Exception as ex: - # Most of the times 'signature' will raise ValueError. - # But, it can also raise AttributeError, and, maybe something - # else. So to be fully backwards compatible, we catch all - # possible exceptions here, and reraise a TypeError. - raise TypeError('unsupported callable') from ex - - args = [] - varargs = None - varkw = None - posonlyargs = [] - kwonlyargs = [] - annotations = {} - defaults = () - kwdefaults = {} - - if sig.return_annotation is not sig.empty: - annotations['return'] = sig.return_annotation - - for param in sig.parameters.values(): - kind = param.kind - name = param.name - - if kind is _POSITIONAL_ONLY: - posonlyargs.append(name) - if param.default is not param.empty: - defaults += (param.default,) - elif kind is _POSITIONAL_OR_KEYWORD: - args.append(name) - if param.default is not param.empty: - defaults += (param.default,) - elif kind is _VAR_POSITIONAL: - varargs = name - elif kind is _KEYWORD_ONLY: - kwonlyargs.append(name) - if param.default is not param.empty: - kwdefaults[name] = param.default - elif kind is _VAR_KEYWORD: - varkw = name - - if param.annotation is not param.empty: - annotations[name] = param.annotation - - if not kwdefaults: - # compatibility with 'func.__kwdefaults__' - kwdefaults = None - - if not defaults: - # compatibility with 'func.__defaults__' - defaults = None - - return FullArgSpec(posonlyargs + args, varargs, varkw, defaults, - kwonlyargs, kwdefaults, annotations) - - -ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals') - -def getargvalues(frame): - """Get information about arguments passed into a particular frame. - - A tuple of four things is returned: (args, varargs, varkw, locals). - 'args' is a list of the argument names. - 'varargs' and 'varkw' are the names of the * and ** arguments or None. - 'locals' is the locals dictionary of the given frame.""" - args, varargs, varkw = getargs(frame.f_code) - return ArgInfo(args, varargs, varkw, frame.f_locals) - -def formatannotation(annotation, base_module=None): - if getattr(annotation, '__module__', None) == 'typing': - def repl(match): - text = match.group() - return text.removeprefix('typing.') - return re.sub(r'[\w\.]+', repl, repr(annotation)) - if isinstance(annotation, types.GenericAlias): - return str(annotation) - if isinstance(annotation, type): - if annotation.__module__ in ('builtins', base_module): - return annotation.__qualname__ - return annotation.__module__+'.'+annotation.__qualname__ - return repr(annotation) - -def formatannotationrelativeto(object): - module = getattr(object, '__module__', None) - def _formatannotation(annotation): - return formatannotation(annotation, module) - return _formatannotation - - -def formatargvalues(args, varargs, varkw, locals, - formatarg=str, - formatvarargs=lambda name: '*' + name, - formatvarkw=lambda name: '**' + name, - formatvalue=lambda value: '=' + repr(value)): - """Format an argument spec from the 4 values returned by getargvalues. - - The first four arguments are (args, varargs, varkw, locals). The - next four arguments are the corresponding optional formatting functions - that are called to turn names and values into strings. The ninth - argument is an optional function to format the sequence of arguments.""" - def convert(name, locals=locals, - formatarg=formatarg, formatvalue=formatvalue): - return formatarg(name) + formatvalue(locals[name]) - specs = [] - for i in range(len(args)): - specs.append(convert(args[i])) - if varargs: - specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) - if varkw: - specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) - return '(' + ', '.join(specs) + ')' - -def _missing_arguments(f_name, argnames, pos, values): - names = [repr(name) for name in argnames if name not in values] - missing = len(names) - if missing == 1: - s = names[0] - elif missing == 2: - s = "{} and {}".format(*names) - else: - tail = ", {} and {}".format(*names[-2:]) - del names[-2:] - s = ", ".join(names) + tail - raise TypeError("%s() missing %i required %s argument%s: %s" % - (f_name, missing, - "positional" if pos else "keyword-only", - "" if missing == 1 else "s", s)) - -def _too_many(f_name, args, kwonly, varargs, defcount, given, values): - atleast = len(args) - defcount - kwonly_given = len([arg for arg in kwonly if arg in values]) - if varargs: - plural = atleast != 1 - sig = "at least %d" % (atleast,) - elif defcount: - plural = True - sig = "from %d to %d" % (atleast, len(args)) - else: - plural = len(args) != 1 - sig = str(len(args)) - kwonly_sig = "" - if kwonly_given: - msg = " positional argument%s (and %d keyword-only argument%s)" - kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given, - "s" if kwonly_given != 1 else "")) - raise TypeError("%s() takes %s positional argument%s but %d%s %s given" % - (f_name, sig, "s" if plural else "", given, kwonly_sig, - "was" if given == 1 and not kwonly_given else "were")) - -def getcallargs(func, /, *positional, **named): - """Get the mapping of arguments to values. - - A dict is returned, with keys the function argument names (including the - names of the * and ** arguments, if any), and values the respective bound - values from 'positional' and 'named'.""" - spec = getfullargspec(func) - args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec - f_name = func.__name__ - arg2value = {} - - - if ismethod(func) and func.__self__ is not None: - # implicit 'self' (or 'cls' for classmethods) argument - positional = (func.__self__,) + positional - num_pos = len(positional) - num_args = len(args) - num_defaults = len(defaults) if defaults else 0 - - n = min(num_pos, num_args) - for i in range(n): - arg2value[args[i]] = positional[i] - if varargs: - arg2value[varargs] = tuple(positional[n:]) - possible_kwargs = set(args + kwonlyargs) - if varkw: - arg2value[varkw] = {} - for kw, value in named.items(): - if kw not in possible_kwargs: - if not varkw: - raise TypeError("%s() got an unexpected keyword argument %r" % - (f_name, kw)) - arg2value[varkw][kw] = value - continue - if kw in arg2value: - raise TypeError("%s() got multiple values for argument %r" % - (f_name, kw)) - arg2value[kw] = value - if num_pos > num_args and not varargs: - _too_many(f_name, args, kwonlyargs, varargs, num_defaults, - num_pos, arg2value) - if num_pos < num_args: - req = args[:num_args - num_defaults] - for arg in req: - if arg not in arg2value: - _missing_arguments(f_name, req, True, arg2value) - for i, arg in enumerate(args[num_args - num_defaults:]): - if arg not in arg2value: - arg2value[arg] = defaults[i] - missing = 0 - for kwarg in kwonlyargs: - if kwarg not in arg2value: - if kwonlydefaults and kwarg in kwonlydefaults: - arg2value[kwarg] = kwonlydefaults[kwarg] - else: - missing += 1 - if missing: - _missing_arguments(f_name, kwonlyargs, False, arg2value) - return arg2value - -ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound') - -def getclosurevars(func): - """ - Get the mapping of free variables to their current values. - - Returns a named tuple of dicts mapping the current nonlocal, global - and builtin references as seen by the body of the function. A final - set of unbound names that could not be resolved is also provided. - """ - - if ismethod(func): - func = func.__func__ - - if not isfunction(func): - raise TypeError("{!r} is not a Python function".format(func)) - - code = func.__code__ - # Nonlocal references are named in co_freevars and resolved - # by looking them up in __closure__ by positional index - if func.__closure__ is None: - nonlocal_vars = {} - else: - nonlocal_vars = { - var : cell.cell_contents - for var, cell in zip(code.co_freevars, func.__closure__) - } - - # Global and builtin references are named in co_names and resolved - # by looking them up in __globals__ or __builtins__ - global_ns = func.__globals__ - builtin_ns = global_ns.get("__builtins__", builtins.__dict__) - if ismodule(builtin_ns): - builtin_ns = builtin_ns.__dict__ - global_vars = {} - builtin_vars = {} - unbound_names = set() - global_names = set() - for instruction in dis.get_instructions(code): - opname = instruction.opname - name = instruction.argval - if opname == "LOAD_ATTR": - unbound_names.add(name) - elif opname == "LOAD_GLOBAL": - global_names.add(name) - for name in global_names: - try: - global_vars[name] = global_ns[name] - except KeyError: - try: - builtin_vars[name] = builtin_ns[name] - except KeyError: - unbound_names.add(name) - - return ClosureVars(nonlocal_vars, global_vars, - builtin_vars, unbound_names) - -# -------------------------------------------------- stack frame extraction - -_Traceback = namedtuple('_Traceback', 'filename lineno function code_context index') - -class Traceback(_Traceback): - def __new__(cls, filename, lineno, function, code_context, index, *, positions=None): - instance = super().__new__(cls, filename, lineno, function, code_context, index) - instance.positions = positions - return instance - - def __repr__(self): - return ('Traceback(filename={!r}, lineno={!r}, function={!r}, ' - 'code_context={!r}, index={!r}, positions={!r})'.format( - self.filename, self.lineno, self.function, self.code_context, - self.index, self.positions)) - -def _get_code_position_from_tb(tb): - code, instruction_index = tb.tb_frame.f_code, tb.tb_lasti - return _get_code_position(code, instruction_index) - -def _get_code_position(code, instruction_index): - if instruction_index < 0: - return (None, None, None, None) - positions_gen = code.co_positions() - # The nth entry in code.co_positions() corresponds to instruction (2*n)th since Python 3.10+ - return next(itertools.islice(positions_gen, instruction_index // 2, None)) - -def getframeinfo(frame, context=1): - """Get information about a frame or traceback object. - - A tuple of five things is returned: the filename, the line number of - the current line, the function name, a list of lines of context from - the source code, and the index of the current line within that list. - The optional second argument specifies the number of lines of context - to return, which are centered around the current line.""" - if istraceback(frame): - positions = _get_code_position_from_tb(frame) - lineno = frame.tb_lineno - frame = frame.tb_frame - else: - lineno = frame.f_lineno - positions = _get_code_position(frame.f_code, frame.f_lasti) - - if positions[0] is None: - frame, *positions = (frame, lineno, *positions[1:]) - else: - frame, *positions = (frame, *positions) - - lineno = positions[0] - - if not isframe(frame): - raise TypeError('{!r} is not a frame or traceback object'.format(frame)) - - filename = getsourcefile(frame) or getfile(frame) - if context > 0: - start = lineno - 1 - context//2 - try: - lines, lnum = findsource(frame) - except OSError: - lines = index = None - else: - start = max(0, min(start, len(lines) - context)) - lines = lines[start:start+context] - index = lineno - 1 - start - else: - lines = index = None - - return Traceback(filename, lineno, frame.f_code.co_name, lines, - index, positions=dis.Positions(*positions)) - -def getlineno(frame): - """Get the line number from a frame object, allowing for optimization.""" - # FrameType.f_lineno is now a descriptor that grovels co_lnotab - return frame.f_lineno - -_FrameInfo = namedtuple('_FrameInfo', ('frame',) + Traceback._fields) -class FrameInfo(_FrameInfo): - def __new__(cls, frame, filename, lineno, function, code_context, index, *, positions=None): - instance = super().__new__(cls, frame, filename, lineno, function, code_context, index) - instance.positions = positions - return instance - - def __repr__(self): - return ('FrameInfo(frame={!r}, filename={!r}, lineno={!r}, function={!r}, ' - 'code_context={!r}, index={!r}, positions={!r})'.format( - self.frame, self.filename, self.lineno, self.function, - self.code_context, self.index, self.positions)) - -def getouterframes(frame, context=1): - """Get a list of records for a frame and all higher (calling) frames. - - Each record contains a frame object, filename, line number, function - name, a list of lines of context, and index within the context.""" - framelist = [] - while frame: - traceback_info = getframeinfo(frame, context) - frameinfo = (frame,) + traceback_info - framelist.append(FrameInfo(*frameinfo, positions=traceback_info.positions)) - frame = frame.f_back - return framelist - -def getinnerframes(tb, context=1): - """Get a list of records for a traceback's frame and all lower frames. - - Each record contains a frame object, filename, line number, function - name, a list of lines of context, and index within the context.""" - framelist = [] - while tb: - traceback_info = getframeinfo(tb, context) - frameinfo = (tb.tb_frame,) + traceback_info - framelist.append(FrameInfo(*frameinfo, positions=traceback_info.positions)) - tb = tb.tb_next - return framelist - -def currentframe(): - """Return the frame of the caller or None if this is not possible.""" - return sys._getframe(1) if hasattr(sys, "_getframe") else None - -def stack(context=1): - """Return a list of records for the stack above the caller's frame.""" - return getouterframes(sys._getframe(1), context) - -def trace(context=1): - """Return a list of records for the stack below the current exception.""" - exc = sys.exception() - tb = None if exc is None else exc.__traceback__ - return getinnerframes(tb, context) - - -# ------------------------------------------------ static version of getattr - -_sentinel = object() -_static_getmro = type.__dict__['__mro__'].__get__ -_get_dunder_dict_of_class = type.__dict__["__dict__"].__get__ - - -def _check_instance(obj, attr): - instance_dict = {} - try: - instance_dict = object.__getattribute__(obj, "__dict__") - except AttributeError: - pass - return dict.get(instance_dict, attr, _sentinel) - - -def _check_class(klass, attr): - for entry in _static_getmro(klass): - if _shadowed_dict(type(entry)) is _sentinel and attr in entry.__dict__: - return entry.__dict__[attr] - return _sentinel - - -@functools.lru_cache() -def _shadowed_dict_from_weakref_mro_tuple(*weakref_mro): - for weakref_entry in weakref_mro: - # Normally we'd have to check whether the result of weakref_entry() - # is None here, in case the object the weakref is pointing to has died. - # In this specific case, however, we know that the only caller of this - # function is `_shadowed_dict()`, and that therefore this weakref is - # guaranteed to point to an object that is still alive. - entry = weakref_entry() - dunder_dict = _get_dunder_dict_of_class(entry) - if '__dict__' in dunder_dict: - class_dict = dunder_dict['__dict__'] - if not (type(class_dict) is types.GetSetDescriptorType and - class_dict.__name__ == "__dict__" and - class_dict.__objclass__ is entry): - return class_dict - return _sentinel - - -def _shadowed_dict(klass): - # gh-118013: the inner function here is decorated with lru_cache for - # performance reasons, *but* make sure not to pass strong references - # to the items in the mro. Doing so can lead to unexpected memory - # consumption in cases where classes are dynamically created and - # destroyed, and the dynamically created classes happen to be the only - # objects that hold strong references to other objects that take up a - # significant amount of memory. - return _shadowed_dict_from_weakref_mro_tuple( - *[make_weakref(entry) for entry in _static_getmro(klass)] - ) - - -def getattr_static(obj, attr, default=_sentinel): - """Retrieve attributes without triggering dynamic lookup via the - descriptor protocol, __getattr__ or __getattribute__. - - Note: this function may not be able to retrieve all attributes - that getattr can fetch (like dynamically created attributes) - and may find attributes that getattr can't (like descriptors - that raise AttributeError). It can also return descriptor objects - instead of instance members in some cases. See the - documentation for details. - """ - instance_result = _sentinel - - objtype = type(obj) - if type not in _static_getmro(objtype): - klass = objtype - dict_attr = _shadowed_dict(klass) - if (dict_attr is _sentinel or - type(dict_attr) is types.MemberDescriptorType): - instance_result = _check_instance(obj, attr) - else: - klass = obj - - klass_result = _check_class(klass, attr) - - if instance_result is not _sentinel and klass_result is not _sentinel: - if _check_class(type(klass_result), "__get__") is not _sentinel and ( - _check_class(type(klass_result), "__set__") is not _sentinel - or _check_class(type(klass_result), "__delete__") is not _sentinel - ): - return klass_result - - if instance_result is not _sentinel: - return instance_result - if klass_result is not _sentinel: - return klass_result - - if obj is klass: - # for types we check the metaclass too - for entry in _static_getmro(type(klass)): - if ( - _shadowed_dict(type(entry)) is _sentinel - and attr in entry.__dict__ - ): - return entry.__dict__[attr] - if default is not _sentinel: - return default - raise AttributeError(attr) - - -# ------------------------------------------------ generator introspection - -GEN_CREATED = 'GEN_CREATED' -GEN_RUNNING = 'GEN_RUNNING' -GEN_SUSPENDED = 'GEN_SUSPENDED' -GEN_CLOSED = 'GEN_CLOSED' - -def getgeneratorstate(generator): - """Get current state of a generator-iterator. - - Possible states are: - GEN_CREATED: Waiting to start execution. - GEN_RUNNING: Currently being executed by the interpreter. - GEN_SUSPENDED: Currently suspended at a yield expression. - GEN_CLOSED: Execution has completed. - """ - if generator.gi_running: - return GEN_RUNNING - if generator.gi_suspended: - return GEN_SUSPENDED - if generator.gi_frame is None: - return GEN_CLOSED - return GEN_CREATED - - -def getgeneratorlocals(generator): - """ - Get the mapping of generator local variables to their current values. - - A dict is returned, with the keys the local variable names and values the - bound values.""" - - if not isgenerator(generator): - raise TypeError("{!r} is not a Python generator".format(generator)) - - frame = getattr(generator, "gi_frame", None) - if frame is not None: - return generator.gi_frame.f_locals - else: - return {} - - -# ------------------------------------------------ coroutine introspection - -CORO_CREATED = 'CORO_CREATED' -CORO_RUNNING = 'CORO_RUNNING' -CORO_SUSPENDED = 'CORO_SUSPENDED' -CORO_CLOSED = 'CORO_CLOSED' - -def getcoroutinestate(coroutine): - """Get current state of a coroutine object. - - Possible states are: - CORO_CREATED: Waiting to start execution. - CORO_RUNNING: Currently being executed by the interpreter. - CORO_SUSPENDED: Currently suspended at an await expression. - CORO_CLOSED: Execution has completed. - """ - if coroutine.cr_running: - return CORO_RUNNING - if coroutine.cr_suspended: - return CORO_SUSPENDED - if coroutine.cr_frame is None: - return CORO_CLOSED - return CORO_CREATED - - -def getcoroutinelocals(coroutine): - """ - Get the mapping of coroutine local variables to their current values. - - A dict is returned, with the keys the local variable names and values the - bound values.""" - frame = getattr(coroutine, "cr_frame", None) - if frame is not None: - return frame.f_locals - else: - return {} - - -# ----------------------------------- asynchronous generator introspection - -AGEN_CREATED = 'AGEN_CREATED' -AGEN_RUNNING = 'AGEN_RUNNING' -AGEN_SUSPENDED = 'AGEN_SUSPENDED' -AGEN_CLOSED = 'AGEN_CLOSED' - - -def getasyncgenstate(agen): - """Get current state of an asynchronous generator object. - - Possible states are: - AGEN_CREATED: Waiting to start execution. - AGEN_RUNNING: Currently being executed by the interpreter. - AGEN_SUSPENDED: Currently suspended at a yield expression. - AGEN_CLOSED: Execution has completed. - """ - if agen.ag_running: - return AGEN_RUNNING - if agen.ag_suspended: - return AGEN_SUSPENDED - if agen.ag_frame is None: - return AGEN_CLOSED - return AGEN_CREATED - - -def getasyncgenlocals(agen): - """ - Get the mapping of asynchronous generator local variables to their current - values. - - A dict is returned, with the keys the local variable names and values the - bound values.""" - - if not isasyncgen(agen): - raise TypeError(f"{agen!r} is not a Python async generator") - - frame = getattr(agen, "ag_frame", None) - if frame is not None: - return agen.ag_frame.f_locals - else: - return {} - - -############################################################################### -### Function Signature Object (PEP 362) -############################################################################### - - -_NonUserDefinedCallables = (types.WrapperDescriptorType, - types.MethodWrapperType, - types.ClassMethodDescriptorType, - types.BuiltinFunctionType) - - -def _signature_get_user_defined_method(cls, method_name, *, follow_wrapper_chains=True): - """Private helper. Checks if ``cls`` has an attribute - named ``method_name`` and returns it only if it is a - pure python function. - """ - if method_name == '__new__': - meth = getattr(cls, method_name, None) - else: - meth = getattr_static(cls, method_name, None) - if meth is None: - return None - - # NOTE: The meth may wraps a non-user-defined callable. - # In this case, we treat the meth as non-user-defined callable too. - # (e.g. cls.__new__ generated by @warnings.deprecated) - unwrapped_meth = None - if follow_wrapper_chains: - unwrapped_meth = unwrap(meth, stop=(lambda m: hasattr(m, "__signature__") - or _signature_is_builtin(m))) - - if (isinstance(meth, _NonUserDefinedCallables) - or isinstance(unwrapped_meth, _NonUserDefinedCallables)): - # Once '__signature__' will be added to 'C'-level - # callables, this check won't be necessary - return None - if method_name != '__new__': - meth = _descriptor_get(meth, cls) - return meth - - -def _signature_get_partial(wrapped_sig, partial, extra_args=()): - """Private helper to calculate how 'wrapped_sig' signature will - look like after applying a 'functools.partial' object (or alike) - on it. - """ - - old_params = wrapped_sig.parameters - new_params = OrderedDict(old_params.items()) - - partial_args = partial.args or () - partial_keywords = partial.keywords or {} - - if extra_args: - partial_args = extra_args + partial_args - - try: - ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords) - except TypeError as ex: - msg = 'partial object {!r} has incorrect arguments'.format(partial) - raise ValueError(msg) from ex - - - transform_to_kwonly = False - for param_name, param in old_params.items(): - try: - arg_value = ba.arguments[param_name] - except KeyError: - pass - else: - if param.kind is _POSITIONAL_ONLY: - # If positional-only parameter is bound by partial, - # it effectively disappears from the signature - new_params.pop(param_name) - continue - - if param.kind is _POSITIONAL_OR_KEYWORD: - if param_name in partial_keywords: - # This means that this parameter, and all parameters - # after it should be keyword-only (and var-positional - # should be removed). Here's why. Consider the following - # function: - # foo(a, b, *args, c): - # pass - # - # "partial(foo, a='spam')" will have the following - # signature: "(*, a='spam', b, c)". Because attempting - # to call that partial with "(10, 20)" arguments will - # raise a TypeError, saying that "a" argument received - # multiple values. - transform_to_kwonly = True - # Set the new default value - new_params[param_name] = param.replace(default=arg_value) - else: - # was passed as a positional argument - new_params.pop(param.name) - continue - - if param.kind is _KEYWORD_ONLY: - # Set the new default value - new_params[param_name] = param.replace(default=arg_value) - - if transform_to_kwonly: - assert param.kind is not _POSITIONAL_ONLY - - if param.kind is _POSITIONAL_OR_KEYWORD: - new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY) - new_params[param_name] = new_param - new_params.move_to_end(param_name) - elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD): - new_params.move_to_end(param_name) - elif param.kind is _VAR_POSITIONAL: - new_params.pop(param.name) - - return wrapped_sig.replace(parameters=new_params.values()) - - -def _signature_bound_method(sig): - """Private helper to transform signatures for unbound - functions to bound methods. - """ - - params = tuple(sig.parameters.values()) - - if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY): - raise ValueError('invalid method signature') - - kind = params[0].kind - if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY): - # Drop first parameter: - # '(p1, p2[, ...])' -> '(p2[, ...])' - params = params[1:] - else: - if kind is not _VAR_POSITIONAL: - # Unless we add a new parameter type we never - # get here - raise ValueError('invalid argument type') - # It's a var-positional parameter. - # Do nothing. '(*args[, ...])' -> '(*args[, ...])' - - return sig.replace(parameters=params) - - -def _signature_is_builtin(obj): - """Private helper to test if `obj` is a callable that might - support Argument Clinic's __text_signature__ protocol. - """ - return (isbuiltin(obj) or - ismethoddescriptor(obj) or - isinstance(obj, _NonUserDefinedCallables) or - # Can't test 'isinstance(type)' here, as it would - # also be True for regular python classes. - # Can't use the `in` operator here, as it would - # invoke the custom __eq__ method. - obj is type or obj is object) - - -def _signature_is_functionlike(obj): - """Private helper to test if `obj` is a duck type of FunctionType. - A good example of such objects are functions compiled with - Cython, which have all attributes that a pure Python function - would have, but have their code statically compiled. - """ - - if not callable(obj) or isclass(obj): - # All function-like objects are obviously callables, - # and not classes. - return False - - name = getattr(obj, '__name__', None) - code = getattr(obj, '__code__', None) - defaults = getattr(obj, '__defaults__', _void) # Important to use _void ... - kwdefaults = getattr(obj, '__kwdefaults__', _void) # ... and not None here - annotations = getattr(obj, '__annotations__', None) - - return (isinstance(code, types.CodeType) and - isinstance(name, str) and - (defaults is None or isinstance(defaults, tuple)) and - (kwdefaults is None or isinstance(kwdefaults, dict)) and - (isinstance(annotations, (dict)) or annotations is None) ) - - -def _signature_strip_non_python_syntax(signature): - """ - Private helper function. Takes a signature in Argument Clinic's - extended signature format. - - Returns a tuple of two things: - * that signature re-rendered in standard Python syntax, and - * the index of the "self" parameter (generally 0), or None if - the function does not have a "self" parameter. - """ - - if not signature: - return signature, None - - self_parameter = None - - lines = [l.encode('ascii') for l in signature.split('\n') if l] - generator = iter(lines).__next__ - token_stream = tokenize.tokenize(generator) - - text = [] - add = text.append - - current_parameter = 0 - OP = token.OP - ERRORTOKEN = token.ERRORTOKEN - - # token stream always starts with ENCODING token, skip it - t = next(token_stream) - assert t.type == tokenize.ENCODING - - for t in token_stream: - type, string = t.type, t.string - - if type == OP: - if string == ',': - current_parameter += 1 - - if (type == OP) and (string == '$'): - assert self_parameter is None - self_parameter = current_parameter - continue - - add(string) - if (string == ','): - add(' ') - clean_signature = ''.join(text).strip().replace("\n", "") - return clean_signature, self_parameter - - -def _signature_fromstr(cls, obj, s, skip_bound_arg=True): - """Private helper to parse content of '__text_signature__' - and return a Signature based on it. - """ - Parameter = cls._parameter_cls - - clean_signature, self_parameter = _signature_strip_non_python_syntax(s) - - program = "def foo" + clean_signature + ": pass" - - try: - module = ast.parse(program) - except SyntaxError: - module = None - - if not isinstance(module, ast.Module): - raise ValueError("{!r} builtin has invalid signature".format(obj)) - - f = module.body[0] - - parameters = [] - empty = Parameter.empty - - module = None - module_dict = {} - - module_name = getattr(obj, '__module__', None) - if not module_name: - objclass = getattr(obj, '__objclass__', None) - module_name = getattr(objclass, '__module__', None) - - if module_name: - module = sys.modules.get(module_name, None) - if module: - module_dict = module.__dict__ - sys_module_dict = sys.modules.copy() - - def parse_name(node): - assert isinstance(node, ast.arg) - if node.annotation is not None: - raise ValueError("Annotations are not currently supported") - return node.arg - - def wrap_value(s): - try: - value = eval(s, module_dict) - except NameError: - try: - value = eval(s, sys_module_dict) - except NameError: - raise ValueError - - if isinstance(value, (str, int, float, bytes, bool, type(None))): - return ast.Constant(value) - raise ValueError - - class RewriteSymbolics(ast.NodeTransformer): - def visit_Attribute(self, node): - a = [] - n = node - while isinstance(n, ast.Attribute): - a.append(n.attr) - n = n.value - if not isinstance(n, ast.Name): - raise ValueError - a.append(n.id) - value = ".".join(reversed(a)) - return wrap_value(value) - - def visit_Name(self, node): - if not isinstance(node.ctx, ast.Load): - raise ValueError() - return wrap_value(node.id) - - def visit_BinOp(self, node): - # Support constant folding of a couple simple binary operations - # commonly used to define default values in text signatures - left = self.visit(node.left) - right = self.visit(node.right) - if not isinstance(left, ast.Constant) or not isinstance(right, ast.Constant): - raise ValueError - if isinstance(node.op, ast.Add): - return ast.Constant(left.value + right.value) - elif isinstance(node.op, ast.Sub): - return ast.Constant(left.value - right.value) - elif isinstance(node.op, ast.BitOr): - return ast.Constant(left.value | right.value) - raise ValueError - - def p(name_node, default_node, default=empty): - name = parse_name(name_node) - if default_node and default_node is not _empty: - try: - default_node = RewriteSymbolics().visit(default_node) - default = ast.literal_eval(default_node) - except ValueError: - raise ValueError("{!r} builtin has invalid signature".format(obj)) from None - parameters.append(Parameter(name, kind, default=default, annotation=empty)) - - # non-keyword-only parameters - total_non_kw_args = len(f.args.posonlyargs) + len(f.args.args) - required_non_kw_args = total_non_kw_args - len(f.args.defaults) - defaults = itertools.chain(itertools.repeat(None, required_non_kw_args), f.args.defaults) - - kind = Parameter.POSITIONAL_ONLY - for (name, default) in zip(f.args.posonlyargs, defaults): - p(name, default) - - kind = Parameter.POSITIONAL_OR_KEYWORD - for (name, default) in zip(f.args.args, defaults): - p(name, default) - - # *args - if f.args.vararg: - kind = Parameter.VAR_POSITIONAL - p(f.args.vararg, empty) - - # keyword-only arguments - kind = Parameter.KEYWORD_ONLY - for name, default in zip(f.args.kwonlyargs, f.args.kw_defaults): - p(name, default) - - # **kwargs - if f.args.kwarg: - kind = Parameter.VAR_KEYWORD - p(f.args.kwarg, empty) - - if self_parameter is not None: - # Possibly strip the bound argument: - # - We *always* strip first bound argument if - # it is a module. - # - We don't strip first bound argument if - # skip_bound_arg is False. - assert parameters - _self = getattr(obj, '__self__', None) - self_isbound = _self is not None - self_ismodule = ismodule(_self) - if self_isbound and (self_ismodule or skip_bound_arg): - parameters.pop(0) - else: - # for builtins, self parameter is always positional-only! - p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY) - parameters[0] = p - - return cls(parameters, return_annotation=cls.empty) - - -def _signature_from_builtin(cls, func, skip_bound_arg=True): - """Private helper function to get signature for - builtin callables. - """ - - if not _signature_is_builtin(func): - raise TypeError("{!r} is not a Python builtin " - "function".format(func)) - - s = getattr(func, "__text_signature__", None) - if not s: - raise ValueError("no signature found for builtin {!r}".format(func)) - - return _signature_fromstr(cls, func, s, skip_bound_arg) - - -def _signature_from_function(cls, func, skip_bound_arg=True, - globals=None, locals=None, eval_str=False): - """Private helper: constructs Signature for the given python function.""" - - is_duck_function = False - if not isfunction(func): - if _signature_is_functionlike(func): - is_duck_function = True - else: - # If it's not a pure Python function, and not a duck type - # of pure function: - raise TypeError('{!r} is not a Python function'.format(func)) - - s = getattr(func, "__text_signature__", None) - if s: - return _signature_fromstr(cls, func, s, skip_bound_arg) - - Parameter = cls._parameter_cls - - # Parameter information. - func_code = func.__code__ - pos_count = func_code.co_argcount - arg_names = func_code.co_varnames - posonly_count = func_code.co_posonlyargcount - positional = arg_names[:pos_count] - keyword_only_count = func_code.co_kwonlyargcount - keyword_only = arg_names[pos_count:pos_count + keyword_only_count] - annotations = get_annotations(func, globals=globals, locals=locals, eval_str=eval_str) - defaults = func.__defaults__ - kwdefaults = func.__kwdefaults__ - - if defaults: - pos_default_count = len(defaults) - else: - pos_default_count = 0 - - parameters = [] - - non_default_count = pos_count - pos_default_count - posonly_left = posonly_count - - # Non-keyword-only parameters w/o defaults. - for name in positional[:non_default_count]: - kind = _POSITIONAL_ONLY if posonly_left else _POSITIONAL_OR_KEYWORD - annotation = annotations.get(name, _empty) - parameters.append(Parameter(name, annotation=annotation, - kind=kind)) - if posonly_left: - posonly_left -= 1 - - # ... w/ defaults. - for offset, name in enumerate(positional[non_default_count:]): - kind = _POSITIONAL_ONLY if posonly_left else _POSITIONAL_OR_KEYWORD - annotation = annotations.get(name, _empty) - parameters.append(Parameter(name, annotation=annotation, - kind=kind, - default=defaults[offset])) - if posonly_left: - posonly_left -= 1 - - # *args - if func_code.co_flags & CO_VARARGS: - name = arg_names[pos_count + keyword_only_count] - annotation = annotations.get(name, _empty) - parameters.append(Parameter(name, annotation=annotation, - kind=_VAR_POSITIONAL)) - - # Keyword-only parameters. - for name in keyword_only: - default = _empty - if kwdefaults is not None: - default = kwdefaults.get(name, _empty) - - annotation = annotations.get(name, _empty) - parameters.append(Parameter(name, annotation=annotation, - kind=_KEYWORD_ONLY, - default=default)) - # **kwargs - if func_code.co_flags & CO_VARKEYWORDS: - index = pos_count + keyword_only_count - if func_code.co_flags & CO_VARARGS: - index += 1 - - name = arg_names[index] - annotation = annotations.get(name, _empty) - parameters.append(Parameter(name, annotation=annotation, - kind=_VAR_KEYWORD)) - - # Is 'func' is a pure Python function - don't validate the - # parameters list (for correct order and defaults), it should be OK. - return cls(parameters, - return_annotation=annotations.get('return', _empty), - __validate_parameters__=is_duck_function) - - -def _descriptor_get(descriptor, obj): - if isclass(descriptor): - return descriptor - get = getattr(type(descriptor), '__get__', _sentinel) - if get is _sentinel: - return descriptor - return get(descriptor, obj, type(obj)) - - -def _signature_from_callable(obj, *, - follow_wrapper_chains=True, - skip_bound_arg=True, - globals=None, - locals=None, - eval_str=False, - sigcls): - - """Private helper function to get signature for arbitrary - callable objects. - """ - - _get_signature_of = functools.partial(_signature_from_callable, - follow_wrapper_chains=follow_wrapper_chains, - skip_bound_arg=skip_bound_arg, - globals=globals, - locals=locals, - sigcls=sigcls, - eval_str=eval_str) - - if not callable(obj): - raise TypeError('{!r} is not a callable object'.format(obj)) - - if isinstance(obj, types.MethodType): - # In this case we skip the first parameter of the underlying - # function (usually `self` or `cls`). - sig = _get_signature_of(obj.__func__) - - if skip_bound_arg: - return _signature_bound_method(sig) - else: - return sig - - # Was this function wrapped by a decorator? - if follow_wrapper_chains: - # Unwrap until we find an explicit signature or a MethodType (which will be - # handled explicitly below). - obj = unwrap(obj, stop=(lambda f: hasattr(f, "__signature__") - or isinstance(f, types.MethodType))) - if isinstance(obj, types.MethodType): - # If the unwrapped object is a *method*, we might want to - # skip its first parameter (self). - # See test_signature_wrapped_bound_method for details. - return _get_signature_of(obj) - - try: - sig = obj.__signature__ - except AttributeError: - pass - else: - if sig is not None: - # since __text_signature__ is not writable on classes, __signature__ - # may contain text (or be a callable that returns text); - # if so, convert it - o_sig = sig - if not isinstance(sig, (Signature, str)) and callable(sig): - sig = sig() - if isinstance(sig, str): - sig = _signature_fromstr(sigcls, obj, sig) - if not isinstance(sig, Signature): - raise TypeError( - 'unexpected object {!r} in __signature__ ' - 'attribute'.format(o_sig)) - return sig - - try: - partialmethod = obj.__partialmethod__ - except AttributeError: - pass - else: - if isinstance(partialmethod, functools.partialmethod): - # Unbound partialmethod (see functools.partialmethod) - # This means, that we need to calculate the signature - # as if it's a regular partial object, but taking into - # account that the first positional argument - # (usually `self`, or `cls`) will not be passed - # automatically (as for boundmethods) - - wrapped_sig = _get_signature_of(partialmethod.func) - - sig = _signature_get_partial(wrapped_sig, partialmethod, (None,)) - first_wrapped_param = tuple(wrapped_sig.parameters.values())[0] - if first_wrapped_param.kind is Parameter.VAR_POSITIONAL: - # First argument of the wrapped callable is `*args`, as in - # `partialmethod(lambda *args)`. - return sig - else: - sig_params = tuple(sig.parameters.values()) - assert (not sig_params or - first_wrapped_param is not sig_params[0]) - new_params = (first_wrapped_param,) + sig_params - return sig.replace(parameters=new_params) - - if isinstance(obj, functools.partial): - wrapped_sig = _get_signature_of(obj.func) - return _signature_get_partial(wrapped_sig, obj) - - if isfunction(obj) or _signature_is_functionlike(obj): - # If it's a pure Python function, or an object that is duck type - # of a Python function (Cython functions, for instance), then: - return _signature_from_function(sigcls, obj, - skip_bound_arg=skip_bound_arg, - globals=globals, locals=locals, eval_str=eval_str) - - if _signature_is_builtin(obj): - return _signature_from_builtin(sigcls, obj, - skip_bound_arg=skip_bound_arg) - - if isinstance(obj, type): - # obj is a class or a metaclass - - # First, let's see if it has an overloaded __call__ defined - # in its metaclass - call = _signature_get_user_defined_method( - type(obj), - '__call__', - follow_wrapper_chains=follow_wrapper_chains, - ) - if call is not None: - return _get_signature_of(call) - - # NOTE: The user-defined method can be a function with a thin wrapper - # around object.__new__ (e.g., generated by `@warnings.deprecated`) - new = _signature_get_user_defined_method( - obj, - '__new__', - follow_wrapper_chains=follow_wrapper_chains, - ) - init = _signature_get_user_defined_method( - obj, - '__init__', - follow_wrapper_chains=follow_wrapper_chains, - ) - - # Go through the MRO and see if any class has user-defined - # pure Python __new__ or __init__ method - for base in obj.__mro__: - # Now we check if the 'obj' class has an own '__new__' method - if new is not None and '__new__' in base.__dict__: - sig = _get_signature_of(new) - if skip_bound_arg: - sig = _signature_bound_method(sig) - return sig - # or an own '__init__' method - elif init is not None and '__init__' in base.__dict__: - return _get_signature_of(init) - - # At this point we know, that `obj` is a class, with no user- - # defined '__init__', '__new__', or class-level '__call__' - - for base in obj.__mro__[:-1]: - # Since '__text_signature__' is implemented as a - # descriptor that extracts text signature from the - # class docstring, if 'obj' is derived from a builtin - # class, its own '__text_signature__' may be 'None'. - # Therefore, we go through the MRO (except the last - # class in there, which is 'object') to find the first - # class with non-empty text signature. - try: - text_sig = base.__text_signature__ - except AttributeError: - pass - else: - if text_sig: - # If 'base' class has a __text_signature__ attribute: - # return a signature based on it - return _signature_fromstr(sigcls, base, text_sig) - - # No '__text_signature__' was found for the 'obj' class. - # Last option is to check if its '__init__' is - # object.__init__ or type.__init__. - if type not in obj.__mro__: - obj_init = obj.__init__ - obj_new = obj.__new__ - if follow_wrapper_chains: - obj_init = unwrap(obj_init) - obj_new = unwrap(obj_new) - # We have a class (not metaclass), but no user-defined - # __init__ or __new__ for it - if obj_init is object.__init__ and obj_new is object.__new__: - # Return a signature of 'object' builtin. - return sigcls.from_callable(object) - else: - raise ValueError( - 'no signature found for builtin type {!r}'.format(obj)) - - else: - # An object with __call__ - call = getattr_static(type(obj), '__call__', None) - if call is not None: - try: - text_sig = obj.__text_signature__ - except AttributeError: - pass - else: - if text_sig: - return _signature_fromstr(sigcls, obj, text_sig) - call = _descriptor_get(call, obj) - return _get_signature_of(call) - - raise ValueError('callable {!r} is not supported by signature'.format(obj)) - - -class _void: - """A private marker - used in Parameter & Signature.""" - - -class _empty: - """Marker object for Signature.empty and Parameter.empty.""" - - -class _ParameterKind(enum.IntEnum): - POSITIONAL_ONLY = 'positional-only' - POSITIONAL_OR_KEYWORD = 'positional or keyword' - VAR_POSITIONAL = 'variadic positional' - KEYWORD_ONLY = 'keyword-only' - VAR_KEYWORD = 'variadic keyword' - - def __new__(cls, description): - value = len(cls.__members__) - member = int.__new__(cls, value) - member._value_ = value - member.description = description - return member - - def __str__(self): - return self.name - -_POSITIONAL_ONLY = _ParameterKind.POSITIONAL_ONLY -_POSITIONAL_OR_KEYWORD = _ParameterKind.POSITIONAL_OR_KEYWORD -_VAR_POSITIONAL = _ParameterKind.VAR_POSITIONAL -_KEYWORD_ONLY = _ParameterKind.KEYWORD_ONLY -_VAR_KEYWORD = _ParameterKind.VAR_KEYWORD - - -class Parameter: - """Represents a parameter in a function signature. - - Has the following public attributes: - - * name : str - The name of the parameter as a string. - * default : object - The default value for the parameter if specified. If the - parameter has no default value, this attribute is set to - `Parameter.empty`. - * annotation - The annotation for the parameter if specified. If the - parameter has no annotation, this attribute is set to - `Parameter.empty`. - * kind - Describes how argument values are bound to the parameter. - Possible values: `Parameter.POSITIONAL_ONLY`, - `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`, - `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`. - Every value has a `description` attribute describing meaning. - """ - - __slots__ = ('_name', '_kind', '_default', '_annotation') - - POSITIONAL_ONLY = _POSITIONAL_ONLY - POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD - VAR_POSITIONAL = _VAR_POSITIONAL - KEYWORD_ONLY = _KEYWORD_ONLY - VAR_KEYWORD = _VAR_KEYWORD - - empty = _empty - - def __init__(self, name, kind, *, default=_empty, annotation=_empty): - try: - self._kind = _ParameterKind(kind) - except ValueError: - raise ValueError(f'value {kind!r} is not a valid Parameter.kind') - if default is not _empty: - if self._kind in (_VAR_POSITIONAL, _VAR_KEYWORD): - msg = '{} parameters cannot have default values' - msg = msg.format(self._kind.description) - raise ValueError(msg) - self._default = default - self._annotation = annotation - - if name is _empty: - raise ValueError('name is a required attribute for Parameter') - - if not isinstance(name, str): - msg = 'name must be a str, not a {}'.format(type(name).__name__) - raise TypeError(msg) - - if name[0] == '.' and name[1:].isdigit(): - # These are implicit arguments generated by comprehensions. In - # order to provide a friendlier interface to users, we recast - # their name as "implicitN" and treat them as positional-only. - # See issue 19611. - if self._kind != _POSITIONAL_OR_KEYWORD: - msg = ( - 'implicit arguments must be passed as ' - 'positional or keyword arguments, not {}' - ) - msg = msg.format(self._kind.description) - raise ValueError(msg) - self._kind = _POSITIONAL_ONLY - name = 'implicit{}'.format(name[1:]) - - # It's possible for C functions to have a positional-only parameter - # where the name is a keyword, so for compatibility we'll allow it. - is_keyword = iskeyword(name) and self._kind is not _POSITIONAL_ONLY - if is_keyword or not name.isidentifier(): - raise ValueError('{!r} is not a valid parameter name'.format(name)) - - self._name = name - - def __reduce__(self): - return (type(self), - (self._name, self._kind), - {'_default': self._default, - '_annotation': self._annotation}) - - def __setstate__(self, state): - self._default = state['_default'] - self._annotation = state['_annotation'] - - @property - def name(self): - return self._name - - @property - def default(self): - return self._default - - @property - def annotation(self): - return self._annotation - - @property - def kind(self): - return self._kind - - def replace(self, *, name=_void, kind=_void, - annotation=_void, default=_void): - """Creates a customized copy of the Parameter.""" - - if name is _void: - name = self._name - - if kind is _void: - kind = self._kind - - if annotation is _void: - annotation = self._annotation - - if default is _void: - default = self._default - - return type(self)(name, kind, default=default, annotation=annotation) - - def __str__(self): - kind = self.kind - formatted = self._name - - # Add annotation and default value - if self._annotation is not _empty: - formatted = '{}: {}'.format(formatted, - formatannotation(self._annotation)) - - if self._default is not _empty: - if self._annotation is not _empty: - formatted = '{} = {}'.format(formatted, repr(self._default)) - else: - formatted = '{}={}'.format(formatted, repr(self._default)) - - if kind == _VAR_POSITIONAL: - formatted = '*' + formatted - elif kind == _VAR_KEYWORD: - formatted = '**' + formatted - - return formatted - - __replace__ = replace - - def __repr__(self): - return '<{} "{}">'.format(self.__class__.__name__, self) - - def __hash__(self): - return hash((self._name, self._kind, self._annotation, self._default)) - - def __eq__(self, other): - if self is other: - return True - if not isinstance(other, Parameter): - return NotImplemented - return (self._name == other._name and - self._kind == other._kind and - self._default == other._default and - self._annotation == other._annotation) - - -class BoundArguments: - """Result of `Signature.bind` call. Holds the mapping of arguments - to the function's parameters. - - Has the following public attributes: - - * arguments : dict - An ordered mutable mapping of parameters' names to arguments' values. - Does not contain arguments' default values. - * signature : Signature - The Signature object that created this instance. - * args : tuple - Tuple of positional arguments values. - * kwargs : dict - Dict of keyword arguments values. - """ - - __slots__ = ('arguments', '_signature', '__weakref__') - - def __init__(self, signature, arguments): - self.arguments = arguments - self._signature = signature - - @property - def signature(self): - return self._signature - - @property - def args(self): - args = [] - for param_name, param in self._signature.parameters.items(): - if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): - break - - try: - arg = self.arguments[param_name] - except KeyError: - # We're done here. Other arguments - # will be mapped in 'BoundArguments.kwargs' - break - else: - if param.kind == _VAR_POSITIONAL: - # *args - args.extend(arg) - else: - # plain argument - args.append(arg) - - return tuple(args) - - @property - def kwargs(self): - kwargs = {} - kwargs_started = False - for param_name, param in self._signature.parameters.items(): - if not kwargs_started: - if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): - kwargs_started = True - else: - if param_name not in self.arguments: - kwargs_started = True - continue - - if not kwargs_started: - continue - - try: - arg = self.arguments[param_name] - except KeyError: - pass - else: - if param.kind == _VAR_KEYWORD: - # **kwargs - kwargs.update(arg) - else: - # plain keyword argument - kwargs[param_name] = arg - - return kwargs - - def apply_defaults(self): - """Set default values for missing arguments. - - For variable-positional arguments (*args) the default is an - empty tuple. - - For variable-keyword arguments (**kwargs) the default is an - empty dict. - """ - arguments = self.arguments - new_arguments = [] - for name, param in self._signature.parameters.items(): - try: - new_arguments.append((name, arguments[name])) - except KeyError: - if param.default is not _empty: - val = param.default - elif param.kind is _VAR_POSITIONAL: - val = () - elif param.kind is _VAR_KEYWORD: - val = {} - else: - # This BoundArguments was likely produced by - # Signature.bind_partial(). - continue - new_arguments.append((name, val)) - self.arguments = dict(new_arguments) - - def __eq__(self, other): - if self is other: - return True - if not isinstance(other, BoundArguments): - return NotImplemented - return (self.signature == other.signature and - self.arguments == other.arguments) - - def __setstate__(self, state): - self._signature = state['_signature'] - self.arguments = state['arguments'] - - def __getstate__(self): - return {'_signature': self._signature, 'arguments': self.arguments} - - def __repr__(self): - args = [] - for arg, value in self.arguments.items(): - args.append('{}={!r}'.format(arg, value)) - return '<{} ({})>'.format(self.__class__.__name__, ', '.join(args)) - - -class Signature: - """A Signature object represents the overall signature of a function. - It stores a Parameter object for each parameter accepted by the - function, as well as information specific to the function itself. - - A Signature object has the following public attributes and methods: - - * parameters : OrderedDict - An ordered mapping of parameters' names to the corresponding - Parameter objects (keyword-only arguments are in the same order - as listed in `code.co_varnames`). - * return_annotation : object - The annotation for the return type of the function if specified. - If the function has no annotation for its return type, this - attribute is set to `Signature.empty`. - * bind(*args, **kwargs) -> BoundArguments - Creates a mapping from positional and keyword arguments to - parameters. - * bind_partial(*args, **kwargs) -> BoundArguments - Creates a partial mapping from positional and keyword arguments - to parameters (simulating 'functools.partial' behavior.) - """ - - __slots__ = ('_return_annotation', '_parameters') - - _parameter_cls = Parameter - _bound_arguments_cls = BoundArguments - - empty = _empty - - def __init__(self, parameters=None, *, return_annotation=_empty, - __validate_parameters__=True): - """Constructs Signature from the given list of Parameter - objects and 'return_annotation'. All arguments are optional. - """ - - if parameters is None: - params = OrderedDict() - else: - if __validate_parameters__: - params = OrderedDict() - top_kind = _POSITIONAL_ONLY - seen_default = False - - for param in parameters: - kind = param.kind - name = param.name - - if kind < top_kind: - msg = ( - 'wrong parameter order: {} parameter before {} ' - 'parameter' - ) - msg = msg.format(top_kind.description, - kind.description) - raise ValueError(msg) - elif kind > top_kind: - top_kind = kind - - if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD): - if param.default is _empty: - if seen_default: - # No default for this parameter, but the - # previous parameter of had a default - msg = 'non-default argument follows default ' \ - 'argument' - raise ValueError(msg) - else: - # There is a default for this parameter. - seen_default = True - - if name in params: - msg = 'duplicate parameter name: {!r}'.format(name) - raise ValueError(msg) - - params[name] = param - else: - params = OrderedDict((param.name, param) for param in parameters) - - self._parameters = types.MappingProxyType(params) - self._return_annotation = return_annotation - - @classmethod - def from_callable(cls, obj, *, - follow_wrapped=True, globals=None, locals=None, eval_str=False): - """Constructs Signature for the given callable object.""" - return _signature_from_callable(obj, sigcls=cls, - follow_wrapper_chains=follow_wrapped, - globals=globals, locals=locals, eval_str=eval_str) - - @property - def parameters(self): - return self._parameters - - @property - def return_annotation(self): - return self._return_annotation - - def replace(self, *, parameters=_void, return_annotation=_void): - """Creates a customized copy of the Signature. - Pass 'parameters' and/or 'return_annotation' arguments - to override them in the new copy. - """ - - if parameters is _void: - parameters = self.parameters.values() - - if return_annotation is _void: - return_annotation = self._return_annotation - - return type(self)(parameters, - return_annotation=return_annotation) - - __replace__ = replace - - def _hash_basis(self): - params = tuple(param for param in self.parameters.values() - if param.kind != _KEYWORD_ONLY) - - kwo_params = {param.name: param for param in self.parameters.values() - if param.kind == _KEYWORD_ONLY} - - return params, kwo_params, self.return_annotation - - def __hash__(self): - params, kwo_params, return_annotation = self._hash_basis() - kwo_params = frozenset(kwo_params.values()) - return hash((params, kwo_params, return_annotation)) - - def __eq__(self, other): - if self is other: - return True - if not isinstance(other, Signature): - return NotImplemented - return self._hash_basis() == other._hash_basis() - - def _bind(self, args, kwargs, *, partial=False): - """Private method. Don't use directly.""" - - arguments = {} - - parameters = iter(self.parameters.values()) - parameters_ex = () - arg_vals = iter(args) - - pos_only_param_in_kwargs = [] - - while True: - # Let's iterate through the positional arguments and corresponding - # parameters - try: - arg_val = next(arg_vals) - except StopIteration: - # No more positional arguments - try: - param = next(parameters) - except StopIteration: - # No more parameters. That's it. Just need to check that - # we have no `kwargs` after this while loop - break - else: - if param.kind == _VAR_POSITIONAL: - # That's OK, just empty *args. Let's start parsing - # kwargs - break - elif param.name in kwargs: - if param.kind == _POSITIONAL_ONLY: - if param.default is _empty: - msg = f'missing a required positional-only argument: {param.name!r}' - raise TypeError(msg) - # Raise a TypeError once we are sure there is no - # **kwargs param later. - pos_only_param_in_kwargs.append(param) - continue - parameters_ex = (param,) - break - elif (param.kind == _VAR_KEYWORD or - param.default is not _empty): - # That's fine too - we have a default value for this - # parameter. So, lets start parsing `kwargs`, starting - # with the current parameter - parameters_ex = (param,) - break - else: - # No default, not VAR_KEYWORD, not VAR_POSITIONAL, - # not in `kwargs` - if partial: - parameters_ex = (param,) - break - else: - if param.kind == _KEYWORD_ONLY: - argtype = ' keyword-only' - else: - argtype = '' - msg = 'missing a required{argtype} argument: {arg!r}' - msg = msg.format(arg=param.name, argtype=argtype) - raise TypeError(msg) from None - else: - # We have a positional argument to process - try: - param = next(parameters) - except StopIteration: - raise TypeError('too many positional arguments') from None - else: - if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): - # Looks like we have no parameter for this positional - # argument - raise TypeError( - 'too many positional arguments') from None - - if param.kind == _VAR_POSITIONAL: - # We have an '*args'-like argument, let's fill it with - # all positional arguments we have left and move on to - # the next phase - values = [arg_val] - values.extend(arg_vals) - arguments[param.name] = tuple(values) - break - - if param.name in kwargs and param.kind != _POSITIONAL_ONLY: - raise TypeError( - 'multiple values for argument {arg!r}'.format( - arg=param.name)) from None - - arguments[param.name] = arg_val - - # Now, we iterate through the remaining parameters to process - # keyword arguments - kwargs_param = None - for param in itertools.chain(parameters_ex, parameters): - if param.kind == _VAR_KEYWORD: - # Memorize that we have a '**kwargs'-like parameter - kwargs_param = param - continue - - if param.kind == _VAR_POSITIONAL: - # Named arguments don't refer to '*args'-like parameters. - # We only arrive here if the positional arguments ended - # before reaching the last parameter before *args. - continue - - param_name = param.name - try: - arg_val = kwargs.pop(param_name) - except KeyError: - # We have no value for this parameter. It's fine though, - # if it has a default value, or it is an '*args'-like - # parameter, left alone by the processing of positional - # arguments. - if (not partial and param.kind != _VAR_POSITIONAL and - param.default is _empty): - raise TypeError('missing a required argument: {arg!r}'. \ - format(arg=param_name)) from None - - else: - arguments[param_name] = arg_val - - if kwargs: - if kwargs_param is not None: - # Process our '**kwargs'-like parameter - arguments[kwargs_param.name] = kwargs - elif pos_only_param_in_kwargs: - raise TypeError( - 'got some positional-only arguments passed as ' - 'keyword arguments: {arg!r}'.format( - arg=', '.join( - param.name - for param in pos_only_param_in_kwargs - ), - ), - ) - else: - raise TypeError( - 'got an unexpected keyword argument {arg!r}'.format( - arg=next(iter(kwargs)))) - - return self._bound_arguments_cls(self, arguments) - - def bind(self, /, *args, **kwargs): - """Get a BoundArguments object, that maps the passed `args` - and `kwargs` to the function's signature. Raises `TypeError` - if the passed arguments can not be bound. - """ - return self._bind(args, kwargs) - - def bind_partial(self, /, *args, **kwargs): - """Get a BoundArguments object, that partially maps the - passed `args` and `kwargs` to the function's signature. - Raises `TypeError` if the passed arguments can not be bound. - """ - return self._bind(args, kwargs, partial=True) - - def __reduce__(self): - return (type(self), - (tuple(self._parameters.values()),), - {'_return_annotation': self._return_annotation}) - - def __setstate__(self, state): - self._return_annotation = state['_return_annotation'] - - def __repr__(self): - return '<{} {}>'.format(self.__class__.__name__, self) - - def __str__(self): - return self.format() - - def format(self, *, max_width=None): - """Create a string representation of the Signature object. - - If *max_width* integer is passed, - signature will try to fit into the *max_width*. - If signature is longer than *max_width*, - all parameters will be on separate lines. - """ - result = [] - render_pos_only_separator = False - render_kw_only_separator = True - for param in self.parameters.values(): - formatted = str(param) - - kind = param.kind - - if kind == _POSITIONAL_ONLY: - render_pos_only_separator = True - elif render_pos_only_separator: - # It's not a positional-only parameter, and the flag - # is set to 'True' (there were pos-only params before.) - result.append('/') - render_pos_only_separator = False - - if kind == _VAR_POSITIONAL: - # OK, we have an '*args'-like parameter, so we won't need - # a '*' to separate keyword-only arguments - render_kw_only_separator = False - elif kind == _KEYWORD_ONLY and render_kw_only_separator: - # We have a keyword-only parameter to render and we haven't - # rendered an '*args'-like parameter before, so add a '*' - # separator to the parameters list ("foo(arg1, *, arg2)" case) - result.append('*') - # This condition should be only triggered once, so - # reset the flag - render_kw_only_separator = False - - result.append(formatted) - - if render_pos_only_separator: - # There were only positional-only parameters, hence the - # flag was not reset to 'False' - result.append('/') - - rendered = '({})'.format(', '.join(result)) - if max_width is not None and len(rendered) > max_width: - rendered = '(\n {}\n)'.format(',\n '.join(result)) - - if self.return_annotation is not _empty: - anno = formatannotation(self.return_annotation) - rendered += ' -> {}'.format(anno) - - return rendered - - -def signature(obj, *, follow_wrapped=True, globals=None, locals=None, eval_str=False): - """Get a signature object for the passed callable.""" - return Signature.from_callable(obj, follow_wrapped=follow_wrapped, - globals=globals, locals=locals, eval_str=eval_str) - - -class BufferFlags(enum.IntFlag): - SIMPLE = 0x0 - WRITABLE = 0x1 - FORMAT = 0x4 - ND = 0x8 - STRIDES = 0x10 | ND - C_CONTIGUOUS = 0x20 | STRIDES - F_CONTIGUOUS = 0x40 | STRIDES - ANY_CONTIGUOUS = 0x80 | STRIDES - INDIRECT = 0x100 | STRIDES - CONTIG = ND | WRITABLE - CONTIG_RO = ND - STRIDED = STRIDES | WRITABLE - STRIDED_RO = STRIDES - RECORDS = STRIDES | WRITABLE | FORMAT - RECORDS_RO = STRIDES | FORMAT - FULL = INDIRECT | WRITABLE | FORMAT - FULL_RO = INDIRECT | FORMAT - READ = 0x100 - WRITE = 0x200 - - -def _main(): - """ Logic for inspecting an object given at command line """ - import argparse - import importlib - - parser = argparse.ArgumentParser() - parser.add_argument( - 'object', - help="The object to be analysed. " - "It supports the 'module:qualname' syntax") - parser.add_argument( - '-d', '--details', action='store_true', - help='Display info about the module rather than its source code') - - args = parser.parse_args() - - target = args.object - mod_name, has_attrs, attrs = target.partition(":") - try: - obj = module = importlib.import_module(mod_name) - except Exception as exc: - msg = "Failed to import {} ({}: {})".format(mod_name, - type(exc).__name__, - exc) - print(msg, file=sys.stderr) - sys.exit(2) - - if has_attrs: - parts = attrs.split(".") - obj = module - for part in parts: - obj = getattr(obj, part) - - if module.__name__ in sys.builtin_module_names: - print("Can't get info for builtin modules.", file=sys.stderr) - sys.exit(1) - - if args.details: - print('Target: {}'.format(target)) - print('Origin: {}'.format(getsourcefile(module))) - print('Cached: {}'.format(module.__cached__)) - if obj is module: - print('Loader: {}'.format(repr(module.__loader__))) - if hasattr(module, '__path__'): - print('Submodule search path: {}'.format(module.__path__)) - else: - try: - __, lineno = findsource(obj) - except Exception: - pass - else: - print('Line: {}'.format(lineno)) - - print('\n') - else: - print(getsource(obj)) - - -if __name__ == "__main__": - _main() diff --git a/Python313_13_x64_Template/Lib/io.py b/Python313_13_x64_Template/Lib/io.py deleted file mode 100644 index f0e2fa15..00000000 --- a/Python313_13_x64_Template/Lib/io.py +++ /dev/null @@ -1,99 +0,0 @@ -"""The io module provides the Python interfaces to stream handling. The -builtin open function is defined in this module. - -At the top of the I/O hierarchy is the abstract base class IOBase. It -defines the basic interface to a stream. Note, however, that there is no -separation between reading and writing to streams; implementations are -allowed to raise an OSError if they do not support a given operation. - -Extending IOBase is RawIOBase which deals simply with the reading and -writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide -an interface to OS files. - -BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its -subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer -streams that are readable, writable, and both respectively. -BufferedRandom provides a buffered interface to random access -streams. BytesIO is a simple stream of in-memory bytes. - -Another IOBase subclass, TextIOBase, deals with the encoding and decoding -of streams into text. TextIOWrapper, which extends it, is a buffered text -interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO -is an in-memory stream for text. - -Argument names are not part of the specification, and only the arguments -of open() are intended to be used as keyword arguments. - -data: - -DEFAULT_BUFFER_SIZE - - An int containing the default buffer size used by the module's buffered - I/O classes. open() uses the file's blksize (as obtained by os.stat) if - possible. -""" -# New I/O library conforming to PEP 3116. - -__author__ = ("Guido van Rossum , " - "Mike Verdone , " - "Mark Russell , " - "Antoine Pitrou , " - "Amaury Forgeot d'Arc , " - "Benjamin Peterson ") - -__all__ = ["BlockingIOError", "open", "open_code", "IOBase", "RawIOBase", - "FileIO", "BytesIO", "StringIO", "BufferedIOBase", - "BufferedReader", "BufferedWriter", "BufferedRWPair", - "BufferedRandom", "TextIOBase", "TextIOWrapper", - "UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END", - "DEFAULT_BUFFER_SIZE", "text_encoding", "IncrementalNewlineDecoder"] - - -import _io -import abc - -from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation, - open, open_code, FileIO, BytesIO, StringIO, BufferedReader, - BufferedWriter, BufferedRWPair, BufferedRandom, - IncrementalNewlineDecoder, text_encoding, TextIOWrapper) - - -# Pretend this exception was created here. -UnsupportedOperation.__module__ = "io" - -# for seek() -SEEK_SET = 0 -SEEK_CUR = 1 -SEEK_END = 2 - -# Declaring ABCs in C is tricky so we do it here. -# Method descriptions and default implementations are inherited from the C -# version however. -class IOBase(_io._IOBase, metaclass=abc.ABCMeta): - __doc__ = _io._IOBase.__doc__ - -class RawIOBase(_io._RawIOBase, IOBase): - __doc__ = _io._RawIOBase.__doc__ - -class BufferedIOBase(_io._BufferedIOBase, IOBase): - __doc__ = _io._BufferedIOBase.__doc__ - -class TextIOBase(_io._TextIOBase, IOBase): - __doc__ = _io._TextIOBase.__doc__ - -RawIOBase.register(FileIO) - -for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom, - BufferedRWPair): - BufferedIOBase.register(klass) - -for klass in (StringIO, TextIOWrapper): - TextIOBase.register(klass) -del klass - -try: - from _io import _WindowsConsoleIO -except ImportError: - pass -else: - RawIOBase.register(_WindowsConsoleIO) diff --git a/Python313_13_x64_Template/Lib/ipaddress.py b/Python313_13_x64_Template/Lib/ipaddress.py deleted file mode 100644 index 4235ed87..00000000 --- a/Python313_13_x64_Template/Lib/ipaddress.py +++ /dev/null @@ -1,2440 +0,0 @@ -# Copyright 2007 Google Inc. -# Licensed to PSF under a Contributor Agreement. - -"""A fast, lightweight IPv4/IPv6 manipulation library in Python. - -This library is used to create/poke/manipulate IPv4 and IPv6 addresses -and networks. - -""" - -__version__ = '1.0' - - -import functools - -IPV4LENGTH = 32 -IPV6LENGTH = 128 - - -class AddressValueError(ValueError): - """A Value Error related to the address.""" - - -class NetmaskValueError(ValueError): - """A Value Error related to the netmask.""" - - -def ip_address(address): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP address. Either IPv4 or - IPv6 addresses may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Address or IPv6Address object. - - Raises: - ValueError: if the *address* passed isn't either a v4 or a v6 - address - - """ - try: - return IPv4Address(address) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Address(address) - except (AddressValueError, NetmaskValueError): - pass - - raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 address') - - -def ip_network(address, strict=True): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP network. Either IPv4 or - IPv6 networks may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Network or IPv6Network object. - - Raises: - ValueError: if the string passed isn't either a v4 or a v6 - address. Or if the network has host bits set. - - """ - try: - return IPv4Network(address, strict) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Network(address, strict) - except (AddressValueError, NetmaskValueError): - pass - - raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 network') - - -def ip_interface(address): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP address. Either IPv4 or - IPv6 addresses may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Interface or IPv6Interface object. - - Raises: - ValueError: if the string passed isn't either a v4 or a v6 - address. - - Notes: - The IPv?Interface classes describe an Address on a particular - Network, so they're basically a combination of both the Address - and Network classes. - - """ - try: - return IPv4Interface(address) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Interface(address) - except (AddressValueError, NetmaskValueError): - pass - - raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 interface') - - -def v4_int_to_packed(address): - """Represent an address as 4 packed bytes in network (big-endian) order. - - Args: - address: An integer representation of an IPv4 IP address. - - Returns: - The integer address packed as 4 bytes in network (big-endian) order. - - Raises: - ValueError: If the integer is negative or too large to be an - IPv4 IP address. - - """ - try: - return address.to_bytes(4) # big endian - except OverflowError: - raise ValueError("Address negative or too large for IPv4") - - -def v6_int_to_packed(address): - """Represent an address as 16 packed bytes in network (big-endian) order. - - Args: - address: An integer representation of an IPv6 IP address. - - Returns: - The integer address packed as 16 bytes in network (big-endian) order. - - """ - try: - return address.to_bytes(16) # big endian - except OverflowError: - raise ValueError("Address negative or too large for IPv6") - - -def _split_optional_netmask(address): - """Helper to split the netmask and raise AddressValueError if needed""" - addr = str(address).split('/') - if len(addr) > 2: - raise AddressValueError(f"Only one '/' permitted in {address!r}") - return addr - - -def _find_address_range(addresses): - """Find a sequence of sorted deduplicated IPv#Address. - - Args: - addresses: a list of IPv#Address objects. - - Yields: - A tuple containing the first and last IP addresses in the sequence. - - """ - it = iter(addresses) - first = last = next(it) - for ip in it: - if ip._ip != last._ip + 1: - yield first, last - first = ip - last = ip - yield first, last - - -def _count_righthand_zero_bits(number, bits): - """Count the number of zero bits on the right hand side. - - Args: - number: an integer. - bits: maximum number of bits to count. - - Returns: - The number of zero bits on the right hand side of the number. - - """ - if number == 0: - return bits - return min(bits, (~number & (number-1)).bit_length()) - - -def summarize_address_range(first, last): - """Summarize a network range given the first and last IP addresses. - - Example: - >>> list(summarize_address_range(IPv4Address('192.0.2.0'), - ... IPv4Address('192.0.2.130'))) - ... #doctest: +NORMALIZE_WHITESPACE - [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), - IPv4Network('192.0.2.130/32')] - - Args: - first: the first IPv4Address or IPv6Address in the range. - last: the last IPv4Address or IPv6Address in the range. - - Returns: - An iterator of the summarized IPv(4|6) network objects. - - Raise: - TypeError: - If the first and last objects are not IP addresses. - If the first and last objects are not the same version. - ValueError: - If the last object is not greater than the first. - If the version of the first address is not 4 or 6. - - """ - if (not (isinstance(first, _BaseAddress) and - isinstance(last, _BaseAddress))): - raise TypeError('first and last must be IP addresses, not networks') - if first.version != last.version: - raise TypeError("%s and %s are not of the same version" % ( - first, last)) - if first > last: - raise ValueError('last IP address must be greater than first') - - if first.version == 4: - ip = IPv4Network - elif first.version == 6: - ip = IPv6Network - else: - raise ValueError('unknown IP version') - - ip_bits = first._max_prefixlen - first_int = first._ip - last_int = last._ip - while first_int <= last_int: - nbits = min(_count_righthand_zero_bits(first_int, ip_bits), - (last_int - first_int + 1).bit_length() - 1) - net = ip((first_int, ip_bits - nbits)) - yield net - first_int += 1 << nbits - if first_int - 1 == ip._ALL_ONES: - break - - -def _collapse_addresses_internal(addresses): - """Loops through the addresses, collapsing concurrent netblocks. - - Example: - - ip1 = IPv4Network('192.0.2.0/26') - ip2 = IPv4Network('192.0.2.64/26') - ip3 = IPv4Network('192.0.2.128/26') - ip4 = IPv4Network('192.0.2.192/26') - - _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> - [IPv4Network('192.0.2.0/24')] - - This shouldn't be called directly; it is called via - collapse_addresses([]). - - Args: - addresses: A list of IPv4Network's or IPv6Network's - - Returns: - A list of IPv4Network's or IPv6Network's depending on what we were - passed. - - """ - # First merge - to_merge = list(addresses) - subnets = {} - while to_merge: - net = to_merge.pop() - supernet = net.supernet() - existing = subnets.get(supernet) - if existing is None: - subnets[supernet] = net - elif existing != net: - # Merge consecutive subnets - del subnets[supernet] - to_merge.append(supernet) - # Then iterate over resulting networks, skipping subsumed subnets - last = None - for net in sorted(subnets.values()): - if last is not None: - # Since they are sorted, last.network_address <= net.network_address - # is a given. - if last.broadcast_address >= net.broadcast_address: - continue - yield net - last = net - - -def collapse_addresses(addresses): - """Collapse a list of IP objects. - - Example: - collapse_addresses([IPv4Network('192.0.2.0/25'), - IPv4Network('192.0.2.128/25')]) -> - [IPv4Network('192.0.2.0/24')] - - Args: - addresses: An iterable of IPv4Network or IPv6Network objects. - - Returns: - An iterator of the collapsed IPv(4|6)Network objects. - - Raises: - TypeError: If passed a list of mixed version objects. - - """ - addrs = [] - ips = [] - nets = [] - - # split IP addresses and networks - for ip in addresses: - if isinstance(ip, _BaseAddress): - if ips and ips[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - ip, ips[-1])) - ips.append(ip) - elif ip._prefixlen == ip._max_prefixlen: - if ips and ips[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - ip, ips[-1])) - try: - ips.append(ip.ip) - except AttributeError: - ips.append(ip.network_address) - else: - if nets and nets[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - ip, nets[-1])) - nets.append(ip) - - # sort and dedup - ips = sorted(set(ips)) - - # find consecutive address ranges in the sorted sequence and summarize them - if ips: - for first, last in _find_address_range(ips): - addrs.extend(summarize_address_range(first, last)) - - return _collapse_addresses_internal(addrs + nets) - - -def get_mixed_type_key(obj): - """Return a key suitable for sorting between networks and addresses. - - Address and Network objects are not sortable by default; they're - fundamentally different so the expression - - IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') - - doesn't make any sense. There are some times however, where you may wish - to have ipaddress sort these for you anyway. If you need to do this, you - can use this function as the key= argument to sorted(). - - Args: - obj: either a Network or Address object. - Returns: - appropriate key. - - """ - if isinstance(obj, _BaseNetwork): - return obj._get_networks_key() - elif isinstance(obj, _BaseAddress): - return obj._get_address_key() - return NotImplemented - - -class _IPAddressBase: - - """The mother class.""" - - __slots__ = () - - @property - def exploded(self): - """Return the longhand version of the IP address as a string.""" - return self._explode_shorthand_ip_string() - - @property - def compressed(self): - """Return the shorthand version of the IP address as a string.""" - return str(self) - - @property - def reverse_pointer(self): - """The name of the reverse DNS pointer for the IP address, e.g.: - >>> ipaddress.ip_address("127.0.0.1").reverse_pointer - '1.0.0.127.in-addr.arpa' - >>> ipaddress.ip_address("2001:db8::1").reverse_pointer - '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' - - """ - return self._reverse_pointer() - - @property - def version(self): - msg = '%200s has no version specified' % (type(self),) - raise NotImplementedError(msg) - - def _check_int_address(self, address): - if address < 0: - msg = "%d (< 0) is not permitted as an IPv%d address" - raise AddressValueError(msg % (address, self._version)) - if address > self._ALL_ONES: - msg = "%d (>= 2**%d) is not permitted as an IPv%d address" - raise AddressValueError(msg % (address, self._max_prefixlen, - self._version)) - - def _check_packed_address(self, address, expected_len): - address_len = len(address) - if address_len != expected_len: - msg = "%r (len %d != %d) is not permitted as an IPv%d address" - raise AddressValueError(msg % (address, address_len, - expected_len, self._version)) - - @classmethod - def _ip_int_from_prefix(cls, prefixlen): - """Turn the prefix length into a bitwise netmask - - Args: - prefixlen: An integer, the prefix length. - - Returns: - An integer. - - """ - return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen) - - @classmethod - def _prefix_from_ip_int(cls, ip_int): - """Return prefix length from the bitwise netmask. - - Args: - ip_int: An integer, the netmask in expanded bitwise format - - Returns: - An integer, the prefix length. - - Raises: - ValueError: If the input intermingles zeroes & ones - """ - trailing_zeroes = _count_righthand_zero_bits(ip_int, - cls._max_prefixlen) - prefixlen = cls._max_prefixlen - trailing_zeroes - leading_ones = ip_int >> trailing_zeroes - all_ones = (1 << prefixlen) - 1 - if leading_ones != all_ones: - byteslen = cls._max_prefixlen // 8 - details = ip_int.to_bytes(byteslen, 'big') - msg = 'Netmask pattern %r mixes zeroes & ones' - raise ValueError(msg % details) - return prefixlen - - @classmethod - def _report_invalid_netmask(cls, netmask_str): - msg = '%r is not a valid netmask' % netmask_str - raise NetmaskValueError(msg) from None - - @classmethod - def _prefix_from_prefix_string(cls, prefixlen_str): - """Return prefix length from a numeric string - - Args: - prefixlen_str: The string to be converted - - Returns: - An integer, the prefix length. - - Raises: - NetmaskValueError: If the input is not a valid netmask - """ - # int allows a leading +/- as well as surrounding whitespace, - # so we ensure that isn't the case - if not (prefixlen_str.isascii() and prefixlen_str.isdigit()): - cls._report_invalid_netmask(prefixlen_str) - try: - prefixlen = int(prefixlen_str) - except ValueError: - cls._report_invalid_netmask(prefixlen_str) - if not (0 <= prefixlen <= cls._max_prefixlen): - cls._report_invalid_netmask(prefixlen_str) - return prefixlen - - @classmethod - def _prefix_from_ip_string(cls, ip_str): - """Turn a netmask/hostmask string into a prefix length - - Args: - ip_str: The netmask/hostmask to be converted - - Returns: - An integer, the prefix length. - - Raises: - NetmaskValueError: If the input is not a valid netmask/hostmask - """ - # Parse the netmask/hostmask like an IP address. - try: - ip_int = cls._ip_int_from_string(ip_str) - except AddressValueError: - cls._report_invalid_netmask(ip_str) - - # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). - # Note that the two ambiguous cases (all-ones and all-zeroes) are - # treated as netmasks. - try: - return cls._prefix_from_ip_int(ip_int) - except ValueError: - pass - - # Invert the bits, and try matching a /0+1+/ hostmask instead. - ip_int ^= cls._ALL_ONES - try: - return cls._prefix_from_ip_int(ip_int) - except ValueError: - cls._report_invalid_netmask(ip_str) - - @classmethod - def _split_addr_prefix(cls, address): - """Helper function to parse address of Network/Interface. - - Arg: - address: Argument of Network/Interface. - - Returns: - (addr, prefix) tuple. - """ - # a packed address or integer - if isinstance(address, (bytes, int)): - return address, cls._max_prefixlen - - if not isinstance(address, tuple): - # Assume input argument to be string or any object representation - # which converts into a formatted IP prefix string. - address = _split_optional_netmask(address) - - # Constructing from a tuple (addr, [mask]) - if len(address) > 1: - return address - return address[0], cls._max_prefixlen - - def __reduce__(self): - return self.__class__, (str(self),) - - -_address_fmt_re = None - -@functools.total_ordering -class _BaseAddress(_IPAddressBase): - - """A generic IP object. - - This IP class contains the version independent methods which are - used by single IP addresses. - """ - - __slots__ = () - - def __int__(self): - return self._ip - - def __eq__(self, other): - try: - return (self._ip == other._ip - and self._version == other._version) - except AttributeError: - return NotImplemented - - def __lt__(self, other): - if not isinstance(other, _BaseAddress): - return NotImplemented - if self._version != other._version: - raise TypeError('%s and %s are not of the same version' % ( - self, other)) - if self._ip != other._ip: - return self._ip < other._ip - return False - - # Shorthand for Integer addition and subtraction. This is not - # meant to ever support addition/subtraction of addresses. - def __add__(self, other): - if not isinstance(other, int): - return NotImplemented - return self.__class__(int(self) + other) - - def __sub__(self, other): - if not isinstance(other, int): - return NotImplemented - return self.__class__(int(self) - other) - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, str(self)) - - def __str__(self): - return str(self._string_from_ip_int(self._ip)) - - def __hash__(self): - return hash(hex(int(self._ip))) - - def _get_address_key(self): - return (self._version, self) - - def __reduce__(self): - return self.__class__, (self._ip,) - - def __format__(self, fmt): - """Returns an IP address as a formatted string. - - Supported presentation types are: - 's': returns the IP address as a string (default) - 'b': converts to binary and returns a zero-padded string - 'X' or 'x': converts to upper- or lower-case hex and returns a zero-padded string - 'n': the same as 'b' for IPv4 and 'x' for IPv6 - - For binary and hex presentation types, the alternate form specifier - '#' and the grouping option '_' are supported. - """ - - # Support string formatting - if not fmt or fmt[-1] == 's': - return format(str(self), fmt) - - # From here on down, support for 'bnXx' - global _address_fmt_re - if _address_fmt_re is None: - import re - _address_fmt_re = re.compile('(#?)(_?)([xbnX])') - - m = _address_fmt_re.fullmatch(fmt) - if not m: - return super().__format__(fmt) - - alternate, grouping, fmt_base = m.groups() - - # Set some defaults - if fmt_base == 'n': - if self._version == 4: - fmt_base = 'b' # Binary is default for ipv4 - else: - fmt_base = 'x' # Hex is default for ipv6 - - if fmt_base == 'b': - padlen = self._max_prefixlen - else: - padlen = self._max_prefixlen // 4 - - if grouping: - padlen += padlen // 4 - 1 - - if alternate: - padlen += 2 # 0b or 0x - - return format(int(self), f'{alternate}0{padlen}{grouping}{fmt_base}') - - -@functools.total_ordering -class _BaseNetwork(_IPAddressBase): - """A generic IP network object. - - This IP class contains the version independent methods which are - used by networks. - """ - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, str(self)) - - def __str__(self): - return '%s/%d' % (self.network_address, self.prefixlen) - - def hosts(self): - """Generate Iterator over usable hosts in a network. - - This is like __iter__ except it doesn't return the network - or broadcast addresses. - - """ - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in range(network + 1, broadcast): - yield self._address_class(x) - - def __iter__(self): - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in range(network, broadcast + 1): - yield self._address_class(x) - - def __getitem__(self, n): - network = int(self.network_address) - broadcast = int(self.broadcast_address) - if n >= 0: - if network + n > broadcast: - raise IndexError('address out of range') - return self._address_class(network + n) - else: - n += 1 - if broadcast + n < network: - raise IndexError('address out of range') - return self._address_class(broadcast + n) - - def __lt__(self, other): - if not isinstance(other, _BaseNetwork): - return NotImplemented - if self._version != other._version: - raise TypeError('%s and %s are not of the same version' % ( - self, other)) - if self.network_address != other.network_address: - return self.network_address < other.network_address - if self.netmask != other.netmask: - return self.netmask < other.netmask - return False - - def __eq__(self, other): - try: - return (self._version == other._version and - self.network_address == other.network_address and - int(self.netmask) == int(other.netmask)) - except AttributeError: - return NotImplemented - - def __hash__(self): - return hash((int(self.network_address), int(self.netmask))) - - def __contains__(self, other): - # always false if one is v4 and the other is v6. - if self._version != other._version: - return False - # dealing with another network. - if isinstance(other, _BaseNetwork): - return False - # dealing with another address - else: - # address - return other._ip & self.netmask._ip == self.network_address._ip - - def overlaps(self, other): - """Tell if self is partly contained in other.""" - return self.network_address in other or ( - self.broadcast_address in other or ( - other.network_address in self or ( - other.broadcast_address in self))) - - @functools.cached_property - def broadcast_address(self): - return self._address_class(int(self.network_address) | - int(self.hostmask)) - - @functools.cached_property - def hostmask(self): - return self._address_class(int(self.netmask) ^ self._ALL_ONES) - - @property - def with_prefixlen(self): - return '%s/%d' % (self.network_address, self._prefixlen) - - @property - def with_netmask(self): - return '%s/%s' % (self.network_address, self.netmask) - - @property - def with_hostmask(self): - return '%s/%s' % (self.network_address, self.hostmask) - - @property - def num_addresses(self): - """Number of hosts in the current subnet.""" - return int(self.broadcast_address) - int(self.network_address) + 1 - - @property - def _address_class(self): - # Returning bare address objects (rather than interfaces) allows for - # more consistent behaviour across the network address, broadcast - # address and individual host addresses. - msg = '%200s has no associated address class' % (type(self),) - raise NotImplementedError(msg) - - @property - def prefixlen(self): - return self._prefixlen - - def address_exclude(self, other): - """Remove an address from a larger block. - - For example: - - addr1 = ip_network('192.0.2.0/28') - addr2 = ip_network('192.0.2.1/32') - list(addr1.address_exclude(addr2)) = - [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), - IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] - - or IPv6: - - addr1 = ip_network('2001:db8::1/32') - addr2 = ip_network('2001:db8::1/128') - list(addr1.address_exclude(addr2)) = - [ip_network('2001:db8::1/128'), - ip_network('2001:db8::2/127'), - ip_network('2001:db8::4/126'), - ip_network('2001:db8::8/125'), - ... - ip_network('2001:db8:8000::/33')] - - Args: - other: An IPv4Network or IPv6Network object of the same type. - - Returns: - An iterator of the IPv(4|6)Network objects which is self - minus other. - - Raises: - TypeError: If self and other are of differing address - versions, or if other is not a network object. - ValueError: If other is not completely contained by self. - - """ - if not self._version == other._version: - raise TypeError("%s and %s are not of the same version" % ( - self, other)) - - if not isinstance(other, _BaseNetwork): - raise TypeError("%s is not a network object" % other) - - if not other.subnet_of(self): - raise ValueError('%s not contained in %s' % (other, self)) - if other == self: - return - - # Make sure we're comparing the network of other. - other = other.__class__('%s/%s' % (other.network_address, - other.prefixlen)) - - s1, s2 = self.subnets() - while s1 != other and s2 != other: - if other.subnet_of(s1): - yield s2 - s1, s2 = s1.subnets() - elif other.subnet_of(s2): - yield s1 - s1, s2 = s2.subnets() - else: - # If we got here, there's a bug somewhere. - raise AssertionError('Error performing exclusion: ' - 's1: %s s2: %s other: %s' % - (s1, s2, other)) - if s1 == other: - yield s2 - elif s2 == other: - yield s1 - else: - # If we got here, there's a bug somewhere. - raise AssertionError('Error performing exclusion: ' - 's1: %s s2: %s other: %s' % - (s1, s2, other)) - - def compare_networks(self, other): - """Compare two IP objects. - - This is only concerned about the comparison of the integer - representation of the network addresses. This means that the - host bits aren't considered at all in this method. If you want - to compare host bits, you can easily enough do a - 'HostA._ip < HostB._ip' - - Args: - other: An IP object. - - Returns: - If the IP versions of self and other are the same, returns: - - -1 if self < other: - eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') - IPv6Network('2001:db8::1000/124') < - IPv6Network('2001:db8::2000/124') - 0 if self == other - eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') - IPv6Network('2001:db8::1000/124') == - IPv6Network('2001:db8::1000/124') - 1 if self > other - eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') - IPv6Network('2001:db8::2000/124') > - IPv6Network('2001:db8::1000/124') - - Raises: - TypeError if the IP versions are different. - - """ - # does this need to raise a ValueError? - if self._version != other._version: - raise TypeError('%s and %s are not of the same type' % ( - self, other)) - # self._version == other._version below here: - if self.network_address < other.network_address: - return -1 - if self.network_address > other.network_address: - return 1 - # self.network_address == other.network_address below here: - if self.netmask < other.netmask: - return -1 - if self.netmask > other.netmask: - return 1 - return 0 - - def _get_networks_key(self): - """Network-only key function. - - Returns an object that identifies this address' network and - netmask. This function is a suitable "key" argument for sorted() - and list.sort(). - - """ - return (self._version, self.network_address, self.netmask) - - def subnets(self, prefixlen_diff=1, new_prefix=None): - """The subnets which join to make the current subnet. - - In the case that self contains only one IP - (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 - for IPv6), yield an iterator with just ourself. - - Args: - prefixlen_diff: An integer, the amount the prefix length - should be increased by. This should not be set if - new_prefix is also set. - new_prefix: The desired new prefix length. This must be a - larger number (smaller prefix) than the existing prefix. - This should not be set if prefixlen_diff is also set. - - Returns: - An iterator of IPv(4|6) objects. - - Raises: - ValueError: The prefixlen_diff is too small or too large. - OR - prefixlen_diff and new_prefix are both set or new_prefix - is a smaller number than the current prefix (smaller - number means a larger network) - - """ - if self._prefixlen == self._max_prefixlen: - yield self - return - - if new_prefix is not None: - if new_prefix < self._prefixlen: - raise ValueError('new prefix must be longer') - if prefixlen_diff != 1: - raise ValueError('cannot set prefixlen_diff and new_prefix') - prefixlen_diff = new_prefix - self._prefixlen - - if prefixlen_diff < 0: - raise ValueError('prefix length diff must be > 0') - new_prefixlen = self._prefixlen + prefixlen_diff - - if new_prefixlen > self._max_prefixlen: - raise ValueError( - 'prefix length diff %d is invalid for netblock %s' % ( - new_prefixlen, self)) - - start = int(self.network_address) - end = int(self.broadcast_address) + 1 - step = (int(self.hostmask) + 1) >> prefixlen_diff - for new_addr in range(start, end, step): - current = self.__class__((new_addr, new_prefixlen)) - yield current - - def supernet(self, prefixlen_diff=1, new_prefix=None): - """The supernet containing the current network. - - Args: - prefixlen_diff: An integer, the amount the prefix length of - the network should be decreased by. For example, given a - /24 network and a prefixlen_diff of 3, a supernet with a - /21 netmask is returned. - - Returns: - An IPv4 network object. - - Raises: - ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have - a negative prefix length. - OR - If prefixlen_diff and new_prefix are both set or new_prefix is a - larger number than the current prefix (larger number means a - smaller network) - - """ - if self._prefixlen == 0: - return self - - if new_prefix is not None: - if new_prefix > self._prefixlen: - raise ValueError('new prefix must be shorter') - if prefixlen_diff != 1: - raise ValueError('cannot set prefixlen_diff and new_prefix') - prefixlen_diff = self._prefixlen - new_prefix - - new_prefixlen = self.prefixlen - prefixlen_diff - if new_prefixlen < 0: - raise ValueError( - 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % - (self.prefixlen, prefixlen_diff)) - return self.__class__(( - int(self.network_address) & (int(self.netmask) << prefixlen_diff), - new_prefixlen - )) - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is a multicast address. - See RFC 2373 2.7 for details. - - """ - return (self.network_address.is_multicast and - self.broadcast_address.is_multicast) - - @staticmethod - def _is_subnet_of(a, b): - try: - # Always false if one is v4 and the other is v6. - if a._version != b._version: - raise TypeError(f"{a} and {b} are not of the same version") - return (b.network_address <= a.network_address and - b.broadcast_address >= a.broadcast_address) - except AttributeError: - raise TypeError(f"Unable to test subnet containment " - f"between {a} and {b}") - - def subnet_of(self, other): - """Return True if this network is a subnet of other.""" - return self._is_subnet_of(self, other) - - def supernet_of(self, other): - """Return True if this network is a supernet of other.""" - return self._is_subnet_of(other, self) - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within one of the - reserved IPv6 Network ranges. - - """ - return (self.network_address.is_reserved and - self.broadcast_address.is_reserved) - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is reserved per RFC 4291. - - """ - return (self.network_address.is_link_local and - self.broadcast_address.is_link_local) - - @property - def is_private(self): - """Test if this network belongs to a private range. - - Returns: - A boolean, True if the network is reserved per - iana-ipv4-special-registry or iana-ipv6-special-registry. - - """ - return any(self.network_address in priv_network and - self.broadcast_address in priv_network - for priv_network in self._constants._private_networks) and all( - self.network_address not in network and - self.broadcast_address not in network - for network in self._constants._private_networks_exceptions - ) - - @property - def is_global(self): - """Test if this address is allocated for public networks. - - Returns: - A boolean, True if the address is not reserved per - iana-ipv4-special-registry or iana-ipv6-special-registry. - - """ - return not self.is_private - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 2373 2.5.2. - - """ - return (self.network_address.is_unspecified and - self.broadcast_address.is_unspecified) - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback address as defined in - RFC 2373 2.5.3. - - """ - return (self.network_address.is_loopback and - self.broadcast_address.is_loopback) - - -class _BaseConstants: - - _private_networks = [] - - -_BaseNetwork._constants = _BaseConstants - - -class _BaseV4: - - """Base IPv4 object. - - The following methods are used by IPv4 objects in both single IP - addresses and networks. - - """ - - __slots__ = () - _version = 4 - # Equivalent to 255.255.255.255 or 32 bits of 1's. - _ALL_ONES = (2**IPV4LENGTH) - 1 - - _max_prefixlen = IPV4LENGTH - # There are only a handful of valid v4 netmasks, so we cache them all - # when constructed (see _make_netmask()). - _netmask_cache = {} - - def _explode_shorthand_ip_string(self): - return str(self) - - @classmethod - def _make_netmask(cls, arg): - """Make a (netmask, prefix_len) tuple from the given argument. - - Argument can be: - - an integer (the prefix length) - - a string representing the prefix length (e.g. "24") - - a string representing the prefix netmask (e.g. "255.255.255.0") - """ - if arg not in cls._netmask_cache: - if isinstance(arg, int): - prefixlen = arg - if not (0 <= prefixlen <= cls._max_prefixlen): - cls._report_invalid_netmask(prefixlen) - else: - try: - # Check for a netmask in prefix length form - prefixlen = cls._prefix_from_prefix_string(arg) - except NetmaskValueError: - # Check for a netmask or hostmask in dotted-quad form. - # This may raise NetmaskValueError. - prefixlen = cls._prefix_from_ip_string(arg) - netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen)) - cls._netmask_cache[arg] = netmask, prefixlen - return cls._netmask_cache[arg] - - @classmethod - def _ip_int_from_string(cls, ip_str): - """Turn the given IP string into an integer for comparison. - - Args: - ip_str: A string, the IP ip_str. - - Returns: - The IP ip_str as an integer. - - Raises: - AddressValueError: if ip_str isn't a valid IPv4 Address. - - """ - if not ip_str: - raise AddressValueError('Address cannot be empty') - - octets = ip_str.split('.') - if len(octets) != 4: - raise AddressValueError("Expected 4 octets in %r" % ip_str) - - try: - return int.from_bytes(map(cls._parse_octet, octets), 'big') - except ValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) from None - - @classmethod - def _parse_octet(cls, octet_str): - """Convert a decimal octet into an integer. - - Args: - octet_str: A string, the number to parse. - - Returns: - The octet as an integer. - - Raises: - ValueError: if the octet isn't strictly a decimal from [0..255]. - - """ - if not octet_str: - raise ValueError("Empty octet not permitted") - # Reject non-ASCII digits. - if not (octet_str.isascii() and octet_str.isdigit()): - msg = "Only decimal digits permitted in %r" - raise ValueError(msg % octet_str) - # We do the length check second, since the invalid character error - # is likely to be more informative for the user - if len(octet_str) > 3: - msg = "At most 3 characters permitted in %r" - raise ValueError(msg % octet_str) - # Handle leading zeros as strict as glibc's inet_pton() - # See security bug bpo-36384 - if octet_str != '0' and octet_str[0] == '0': - msg = "Leading zeros are not permitted in %r" - raise ValueError(msg % octet_str) - # Convert to integer (we know digits are legal) - octet_int = int(octet_str, 10) - if octet_int > 255: - raise ValueError("Octet %d (> 255) not permitted" % octet_int) - return octet_int - - @classmethod - def _string_from_ip_int(cls, ip_int): - """Turns a 32-bit integer into dotted decimal notation. - - Args: - ip_int: An integer, the IP address. - - Returns: - The IP address as a string in dotted decimal notation. - - """ - return '.'.join(map(str, ip_int.to_bytes(4, 'big'))) - - def _reverse_pointer(self): - """Return the reverse DNS pointer name for the IPv4 address. - - This implements the method described in RFC1035 3.5. - - """ - reverse_octets = str(self).split('.')[::-1] - return '.'.join(reverse_octets) + '.in-addr.arpa' - - @property - def max_prefixlen(self): - return self._max_prefixlen - - @property - def version(self): - return self._version - - -class IPv4Address(_BaseV4, _BaseAddress): - - """Represent and manipulate single IPv4 Addresses.""" - - __slots__ = ('_ip', '__weakref__') - - def __init__(self, address): - - """ - Args: - address: A string or integer representing the IP - - Additionally, an integer can be passed, so - IPv4Address('192.0.2.1') == IPv4Address(3221225985). - or, more generally - IPv4Address(int(IPv4Address('192.0.2.1'))) == - IPv4Address('192.0.2.1') - - Raises: - AddressValueError: If ipaddress isn't a valid IPv4 address. - - """ - # Efficient constructor from integer. - if isinstance(address, int): - self._check_int_address(address) - self._ip = address - return - - # Constructing from a packed address - if isinstance(address, bytes): - self._check_packed_address(address, 4) - self._ip = int.from_bytes(address) # big endian - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP string. - addr_str = str(address) - if '/' in addr_str: - raise AddressValueError(f"Unexpected '/' in {address!r}") - self._ip = self._ip_int_from_string(addr_str) - - @property - def packed(self): - """The binary representation of this address.""" - return v4_int_to_packed(self._ip) - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within the - reserved IPv4 Network range. - - """ - return self in self._constants._reserved_network - - @property - @functools.lru_cache() - def is_private(self): - """``True`` if the address is defined as not globally reachable by - iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ - (for IPv6) with the following exceptions: - - * ``is_private`` is ``False`` for ``100.64.0.0/10`` - * For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the - semantics of the underlying IPv4 addresses and the following condition holds - (see :attr:`IPv6Address.ipv4_mapped`):: - - address.is_private == address.ipv4_mapped.is_private - - ``is_private`` has value opposite to :attr:`is_global`, except for the ``100.64.0.0/10`` - IPv4 range where they are both ``False``. - """ - return ( - any(self in net for net in self._constants._private_networks) - and all(self not in net for net in self._constants._private_networks_exceptions) - ) - - @property - @functools.lru_cache() - def is_global(self): - """``True`` if the address is defined as globally reachable by - iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ - (for IPv6) with the following exception: - - For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the - semantics of the underlying IPv4 addresses and the following condition holds - (see :attr:`IPv6Address.ipv4_mapped`):: - - address.is_global == address.ipv4_mapped.is_global - - ``is_global`` has value opposite to :attr:`is_private`, except for the ``100.64.0.0/10`` - IPv4 range where they are both ``False``. - """ - return self not in self._constants._public_network and not self.is_private - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is multicast. - See RFC 3171 for details. - - """ - return self in self._constants._multicast_network - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 5735 3. - - """ - return self == self._constants._unspecified_address - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback per RFC 3330. - - """ - return self in self._constants._loopback_network - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is link-local per RFC 3927. - - """ - return self in self._constants._linklocal_network - - @property - def ipv6_mapped(self): - """Return the IPv4-mapped IPv6 address. - - Returns: - The IPv4-mapped IPv6 address per RFC 4291. - - """ - return IPv6Address(f'::ffff:{self}') - - -class IPv4Interface(IPv4Address): - - def __init__(self, address): - addr, mask = self._split_addr_prefix(address) - - IPv4Address.__init__(self, addr) - self.network = IPv4Network((addr, mask), strict=False) - self.netmask = self.network.netmask - self._prefixlen = self.network._prefixlen - - @functools.cached_property - def hostmask(self): - return self.network.hostmask - - def __str__(self): - return '%s/%d' % (self._string_from_ip_int(self._ip), - self._prefixlen) - - def __eq__(self, other): - address_equal = IPv4Address.__eq__(self, other) - if address_equal is NotImplemented or not address_equal: - return address_equal - try: - return self.network == other.network - except AttributeError: - # An interface with an associated network is NOT the - # same as an unassociated address. That's why the hash - # takes the extra info into account. - return False - - def __lt__(self, other): - address_less = IPv4Address.__lt__(self, other) - if address_less is NotImplemented: - return NotImplemented - try: - return (self.network < other.network or - self.network == other.network and address_less) - except AttributeError: - # We *do* allow addresses and interfaces to be sorted. The - # unassociated address is considered less than all interfaces. - return False - - def __hash__(self): - return hash((self._ip, self._prefixlen, int(self.network.network_address))) - - __reduce__ = _IPAddressBase.__reduce__ - - @property - def ip(self): - return IPv4Address(self._ip) - - @property - def with_prefixlen(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self._prefixlen) - - @property - def with_netmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.netmask) - - @property - def with_hostmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.hostmask) - - -class IPv4Network(_BaseV4, _BaseNetwork): - - """This class represents and manipulates 32-bit IPv4 network + addresses.. - - Attributes: [examples for IPv4Network('192.0.2.0/27')] - .network_address: IPv4Address('192.0.2.0') - .hostmask: IPv4Address('0.0.0.31') - .broadcast_address: IPv4Address('192.0.2.32') - .netmask: IPv4Address('255.255.255.224') - .prefixlen: 27 - - """ - # Class to use when creating address objects - _address_class = IPv4Address - - def __init__(self, address, strict=True): - """Instantiate a new IPv4 network object. - - Args: - address: A string or integer representing the IP [& network]. - '192.0.2.0/24' - '192.0.2.0/255.255.255.0' - '192.0.2.0/0.0.0.255' - are all functionally the same in IPv4. Similarly, - '192.0.2.1' - '192.0.2.1/255.255.255.255' - '192.0.2.1/32' - are also functionally equivalent. That is to say, failing to - provide a subnetmask will create an object with a mask of /32. - - If the mask (portion after the / in the argument) is given in - dotted quad form, it is treated as a netmask if it starts with a - non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it - starts with a zero field (e.g. 0.255.255.255 == /8), with the - single exception of an all-zero mask which is treated as a - netmask == /0. If no mask is given, a default of /32 is used. - - Additionally, an integer can be passed, so - IPv4Network('192.0.2.1') == IPv4Network(3221225985) - or, more generally - IPv4Interface(int(IPv4Interface('192.0.2.1'))) == - IPv4Interface('192.0.2.1') - - Raises: - AddressValueError: If ipaddress isn't a valid IPv4 address. - NetmaskValueError: If the netmask isn't valid for - an IPv4 address. - ValueError: If strict is True and a network address is not - supplied. - """ - addr, mask = self._split_addr_prefix(address) - - self.network_address = IPv4Address(addr) - self.netmask, self._prefixlen = self._make_netmask(mask) - packed = int(self.network_address) - if packed & int(self.netmask) != packed: - if strict: - raise ValueError('%s has host bits set' % self) - else: - self.network_address = IPv4Address(packed & - int(self.netmask)) - - if self._prefixlen == (self._max_prefixlen - 1): - self.hosts = self.__iter__ - elif self._prefixlen == (self._max_prefixlen): - self.hosts = lambda: iter((IPv4Address(addr),)) - - @property - @functools.lru_cache() - def is_global(self): - """Test if this address is allocated for public networks. - - Returns: - A boolean, True if the address is not reserved per - iana-ipv4-special-registry. - - """ - return (not (self.network_address in IPv4Network('100.64.0.0/10') and - self.broadcast_address in IPv4Network('100.64.0.0/10')) and - not self.is_private) - - -class _IPv4Constants: - _linklocal_network = IPv4Network('169.254.0.0/16') - - _loopback_network = IPv4Network('127.0.0.0/8') - - _multicast_network = IPv4Network('224.0.0.0/4') - - _public_network = IPv4Network('100.64.0.0/10') - - # Not globally reachable address blocks listed on - # https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml - _private_networks = [ - IPv4Network('0.0.0.0/8'), - IPv4Network('10.0.0.0/8'), - IPv4Network('127.0.0.0/8'), - IPv4Network('169.254.0.0/16'), - IPv4Network('172.16.0.0/12'), - IPv4Network('192.0.0.0/24'), - IPv4Network('192.0.0.170/31'), - IPv4Network('192.0.2.0/24'), - IPv4Network('192.168.0.0/16'), - IPv4Network('198.18.0.0/15'), - IPv4Network('198.51.100.0/24'), - IPv4Network('203.0.113.0/24'), - IPv4Network('240.0.0.0/4'), - IPv4Network('255.255.255.255/32'), - ] - - _private_networks_exceptions = [ - IPv4Network('192.0.0.9/32'), - IPv4Network('192.0.0.10/32'), - ] - - _reserved_network = IPv4Network('240.0.0.0/4') - - _unspecified_address = IPv4Address('0.0.0.0') - - -IPv4Address._constants = _IPv4Constants -IPv4Network._constants = _IPv4Constants - - -class _BaseV6: - - """Base IPv6 object. - - The following methods are used by IPv6 objects in both single IP - addresses and networks. - - """ - - __slots__ = () - _version = 6 - _ALL_ONES = (2**IPV6LENGTH) - 1 - _HEXTET_COUNT = 8 - _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef') - _max_prefixlen = IPV6LENGTH - - # There are only a bunch of valid v6 netmasks, so we cache them all - # when constructed (see _make_netmask()). - _netmask_cache = {} - - @classmethod - def _make_netmask(cls, arg): - """Make a (netmask, prefix_len) tuple from the given argument. - - Argument can be: - - an integer (the prefix length) - - a string representing the prefix length (e.g. "24") - - a string representing the prefix netmask (e.g. "255.255.255.0") - """ - if arg not in cls._netmask_cache: - if isinstance(arg, int): - prefixlen = arg - if not (0 <= prefixlen <= cls._max_prefixlen): - cls._report_invalid_netmask(prefixlen) - else: - prefixlen = cls._prefix_from_prefix_string(arg) - netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen)) - cls._netmask_cache[arg] = netmask, prefixlen - return cls._netmask_cache[arg] - - @classmethod - def _ip_int_from_string(cls, ip_str): - """Turn an IPv6 ip_str into an integer. - - Args: - ip_str: A string, the IPv6 ip_str. - - Returns: - An int, the IPv6 address - - Raises: - AddressValueError: if ip_str isn't a valid IPv6 Address. - - """ - if not ip_str: - raise AddressValueError('Address cannot be empty') - if len(ip_str) > 45: - shorten = ip_str - if len(shorten) > 100: - shorten = f'{ip_str[:45]}({len(ip_str)-90} chars elided){ip_str[-45:]}' - raise AddressValueError(f"At most 45 characters expected in " - f"{shorten!r}") - - # We want to allow more parts than the max to be 'split' - # to preserve the correct error message when there are - # too many parts combined with '::' - _max_parts = cls._HEXTET_COUNT + 1 - parts = ip_str.split(':', maxsplit=_max_parts) - - # An IPv6 address needs at least 2 colons (3 parts). - _min_parts = 3 - if len(parts) < _min_parts: - msg = "At least %d parts expected in %r" % (_min_parts, ip_str) - raise AddressValueError(msg) - - # If the address has an IPv4-style suffix, convert it to hexadecimal. - if '.' in parts[-1]: - try: - ipv4_int = IPv4Address(parts.pop())._ip - except AddressValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) from None - parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) - parts.append('%x' % (ipv4_int & 0xFFFF)) - - # An IPv6 address can't have more than 8 colons (9 parts). - # The extra colon comes from using the "::" notation for a single - # leading or trailing zero part. - if len(parts) > _max_parts: - msg = "At most %d colons permitted in %r" % (_max_parts-1, ip_str) - raise AddressValueError(msg) - - # Disregarding the endpoints, find '::' with nothing in between. - # This indicates that a run of zeroes has been skipped. - skip_index = None - for i in range(1, len(parts) - 1): - if not parts[i]: - if skip_index is not None: - # Can't have more than one '::' - msg = "At most one '::' permitted in %r" % ip_str - raise AddressValueError(msg) - skip_index = i - - # parts_hi is the number of parts to copy from above/before the '::' - # parts_lo is the number of parts to copy from below/after the '::' - if skip_index is not None: - # If we found a '::', then check if it also covers the endpoints. - parts_hi = skip_index - parts_lo = len(parts) - skip_index - 1 - if not parts[0]: - parts_hi -= 1 - if parts_hi: - msg = "Leading ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # ^: requires ^:: - if not parts[-1]: - parts_lo -= 1 - if parts_lo: - msg = "Trailing ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # :$ requires ::$ - parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) - if parts_skipped < 1: - msg = "Expected at most %d other parts with '::' in %r" - raise AddressValueError(msg % (cls._HEXTET_COUNT-1, ip_str)) - else: - # Otherwise, allocate the entire address to parts_hi. The - # endpoints could still be empty, but _parse_hextet() will check - # for that. - if len(parts) != cls._HEXTET_COUNT: - msg = "Exactly %d parts expected without '::' in %r" - raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) - if not parts[0]: - msg = "Leading ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # ^: requires ^:: - if not parts[-1]: - msg = "Trailing ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # :$ requires ::$ - parts_hi = len(parts) - parts_lo = 0 - parts_skipped = 0 - - try: - # Now, parse the hextets into a 128-bit integer. - ip_int = 0 - for i in range(parts_hi): - ip_int <<= 16 - ip_int |= cls._parse_hextet(parts[i]) - ip_int <<= 16 * parts_skipped - for i in range(-parts_lo, 0): - ip_int <<= 16 - ip_int |= cls._parse_hextet(parts[i]) - return ip_int - except ValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) from None - - @classmethod - def _parse_hextet(cls, hextet_str): - """Convert an IPv6 hextet string into an integer. - - Args: - hextet_str: A string, the number to parse. - - Returns: - The hextet as an integer. - - Raises: - ValueError: if the input isn't strictly a hex number from - [0..FFFF]. - - """ - # Reject non-ASCII digits. - if not cls._HEX_DIGITS.issuperset(hextet_str): - raise ValueError("Only hex digits permitted in %r" % hextet_str) - # We do the length check second, since the invalid character error - # is likely to be more informative for the user - if len(hextet_str) > 4: - msg = "At most 4 characters permitted in %r" - raise ValueError(msg % hextet_str) - # Length check means we can skip checking the integer value - return int(hextet_str, 16) - - @classmethod - def _compress_hextets(cls, hextets): - """Compresses a list of hextets. - - Compresses a list of strings, replacing the longest continuous - sequence of "0" in the list with "" and adding empty strings at - the beginning or at the end of the string such that subsequently - calling ":".join(hextets) will produce the compressed version of - the IPv6 address. - - Args: - hextets: A list of strings, the hextets to compress. - - Returns: - A list of strings. - - """ - best_doublecolon_start = -1 - best_doublecolon_len = 0 - doublecolon_start = -1 - doublecolon_len = 0 - for index, hextet in enumerate(hextets): - if hextet == '0': - doublecolon_len += 1 - if doublecolon_start == -1: - # Start of a sequence of zeros. - doublecolon_start = index - if doublecolon_len > best_doublecolon_len: - # This is the longest sequence of zeros so far. - best_doublecolon_len = doublecolon_len - best_doublecolon_start = doublecolon_start - else: - doublecolon_len = 0 - doublecolon_start = -1 - - if best_doublecolon_len > 1: - best_doublecolon_end = (best_doublecolon_start + - best_doublecolon_len) - # For zeros at the end of the address. - if best_doublecolon_end == len(hextets): - hextets += [''] - hextets[best_doublecolon_start:best_doublecolon_end] = [''] - # For zeros at the beginning of the address. - if best_doublecolon_start == 0: - hextets = [''] + hextets - - return hextets - - @classmethod - def _string_from_ip_int(cls, ip_int=None): - """Turns a 128-bit integer into hexadecimal notation. - - Args: - ip_int: An integer, the IP address. - - Returns: - A string, the hexadecimal representation of the address. - - Raises: - ValueError: The address is bigger than 128 bits of all ones. - - """ - if ip_int is None: - ip_int = int(cls._ip) - - if ip_int > cls._ALL_ONES: - raise ValueError('IPv6 address is too large') - - hex_str = '%032x' % ip_int - hextets = ['%x' % int(hex_str[x:x+4], 16) for x in range(0, 32, 4)] - - hextets = cls._compress_hextets(hextets) - return ':'.join(hextets) - - def _explode_shorthand_ip_string(self): - """Expand a shortened IPv6 address. - - Returns: - A string, the expanded IPv6 address. - - """ - if isinstance(self, IPv6Network): - ip_str = str(self.network_address) - elif isinstance(self, IPv6Interface): - ip_str = str(self.ip) - else: - ip_str = str(self) - - ip_int = self._ip_int_from_string(ip_str) - hex_str = '%032x' % ip_int - parts = [hex_str[x:x+4] for x in range(0, 32, 4)] - if isinstance(self, (_BaseNetwork, IPv6Interface)): - return '%s/%d' % (':'.join(parts), self._prefixlen) - return ':'.join(parts) - - def _reverse_pointer(self): - """Return the reverse DNS pointer name for the IPv6 address. - - This implements the method described in RFC3596 2.5. - - """ - reverse_chars = self.exploded[::-1].replace(':', '') - return '.'.join(reverse_chars) + '.ip6.arpa' - - @staticmethod - def _split_scope_id(ip_str): - """Helper function to parse IPv6 string address with scope id. - - See RFC 4007 for details. - - Args: - ip_str: A string, the IPv6 address. - - Returns: - (addr, scope_id) tuple. - - """ - addr, sep, scope_id = ip_str.partition('%') - if not sep: - scope_id = None - elif not scope_id or '%' in scope_id: - raise AddressValueError('Invalid IPv6 address: "%r"' % ip_str) - return addr, scope_id - - @property - def max_prefixlen(self): - return self._max_prefixlen - - @property - def version(self): - return self._version - - -class IPv6Address(_BaseV6, _BaseAddress): - - """Represent and manipulate single IPv6 Addresses.""" - - __slots__ = ('_ip', '_scope_id', '__weakref__') - - def __init__(self, address): - """Instantiate a new IPv6 address object. - - Args: - address: A string or integer representing the IP - - Additionally, an integer can be passed, so - IPv6Address('2001:db8::') == - IPv6Address(42540766411282592856903984951653826560) - or, more generally - IPv6Address(int(IPv6Address('2001:db8::'))) == - IPv6Address('2001:db8::') - - Raises: - AddressValueError: If address isn't a valid IPv6 address. - - """ - # Efficient constructor from integer. - if isinstance(address, int): - self._check_int_address(address) - self._ip = address - self._scope_id = None - return - - # Constructing from a packed address - if isinstance(address, bytes): - self._check_packed_address(address, 16) - self._ip = int.from_bytes(address, 'big') - self._scope_id = None - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP string. - addr_str = str(address) - if '/' in addr_str: - raise AddressValueError(f"Unexpected '/' in {address!r}") - addr_str, self._scope_id = self._split_scope_id(addr_str) - - self._ip = self._ip_int_from_string(addr_str) - - def _explode_shorthand_ip_string(self): - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is None: - return super()._explode_shorthand_ip_string() - prefix_len = 30 - raw_exploded_str = super()._explode_shorthand_ip_string() - return f"{raw_exploded_str[:prefix_len]}{ipv4_mapped!s}" - - def _reverse_pointer(self): - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is None: - return super()._reverse_pointer() - prefix_len = 30 - raw_exploded_str = super()._explode_shorthand_ip_string()[:prefix_len] - # ipv4 encoded using hexadecimal nibbles instead of decimals - ipv4_int = ipv4_mapped._ip - reverse_chars = f"{raw_exploded_str}{ipv4_int:008x}"[::-1].replace(':', '') - return '.'.join(reverse_chars) + '.ip6.arpa' - - def _ipv4_mapped_ipv6_to_str(self): - """Return convenient text representation of IPv4-mapped IPv6 address - - See RFC 4291 2.5.5.2, 2.2 p.3 for details. - - Returns: - A string, 'x:x:x:x:x:x:d.d.d.d', where the 'x's are the hexadecimal values of - the six high-order 16-bit pieces of the address, and the 'd's are - the decimal values of the four low-order 8-bit pieces of the - address (standard IPv4 representation) as defined in RFC 4291 2.2 p.3. - - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is None: - raise AddressValueError("Can not apply to non-IPv4-mapped IPv6 address %s" % str(self)) - high_order_bits = self._ip >> 32 - return "%s:%s" % (self._string_from_ip_int(high_order_bits), str(ipv4_mapped)) - - def __str__(self): - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is None: - ip_str = super().__str__() - else: - ip_str = self._ipv4_mapped_ipv6_to_str() - return ip_str + '%' + self._scope_id if self._scope_id else ip_str - - def __hash__(self): - return hash((self._ip, self._scope_id)) - - def __eq__(self, other): - address_equal = super().__eq__(other) - if address_equal is NotImplemented: - return NotImplemented - if not address_equal: - return False - return self._scope_id == getattr(other, '_scope_id', None) - - def __reduce__(self): - return (self.__class__, (str(self),)) - - @property - def scope_id(self): - """Identifier of a particular zone of the address's scope. - - See RFC 4007 for details. - - Returns: - A string identifying the zone of the address if specified, else None. - - """ - return self._scope_id - - @property - def packed(self): - """The binary representation of this address.""" - return v6_int_to_packed(self._ip) - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is a multicast address. - See RFC 2373 2.7 for details. - - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is not None: - return ipv4_mapped.is_multicast - return self in self._constants._multicast_network - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within one of the - reserved IPv6 Network ranges. - - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is not None: - return ipv4_mapped.is_reserved - return any(self in x for x in self._constants._reserved_networks) - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is reserved per RFC 4291. - - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is not None: - return ipv4_mapped.is_link_local - return self in self._constants._linklocal_network - - @property - def is_site_local(self): - """Test if the address is reserved for site-local. - - Note that the site-local address space has been deprecated by RFC 3879. - Use is_private to test if this address is in the space of unique local - addresses as defined by RFC 4193. - - Returns: - A boolean, True if the address is reserved per RFC 3513 2.5.6. - - """ - return self in self._constants._sitelocal_network - - @property - @functools.lru_cache() - def is_private(self): - """``True`` if the address is defined as not globally reachable by - iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ - (for IPv6) with the following exceptions: - - * ``is_private`` is ``False`` for ``100.64.0.0/10`` - * For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the - semantics of the underlying IPv4 addresses and the following condition holds - (see :attr:`IPv6Address.ipv4_mapped`):: - - address.is_private == address.ipv4_mapped.is_private - - ``is_private`` has value opposite to :attr:`is_global`, except for the ``100.64.0.0/10`` - IPv4 range where they are both ``False``. - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is not None: - return ipv4_mapped.is_private - return ( - any(self in net for net in self._constants._private_networks) - and all(self not in net for net in self._constants._private_networks_exceptions) - ) - - @property - def is_global(self): - """``True`` if the address is defined as globally reachable by - iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ - (for IPv6) with the following exception: - - For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the - semantics of the underlying IPv4 addresses and the following condition holds - (see :attr:`IPv6Address.ipv4_mapped`):: - - address.is_global == address.ipv4_mapped.is_global - - ``is_global`` has value opposite to :attr:`is_private`, except for the ``100.64.0.0/10`` - IPv4 range where they are both ``False``. - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is not None: - return ipv4_mapped.is_global - return not self.is_private - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 2373 2.5.2. - - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is not None: - return ipv4_mapped.is_unspecified - return self._ip == 0 - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback address as defined in - RFC 2373 2.5.3. - - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is not None: - return ipv4_mapped.is_loopback - return self._ip == 1 - - @property - def ipv4_mapped(self): - """Return the IPv4 mapped address. - - Returns: - If the IPv6 address is a v4 mapped address, return the - IPv4 mapped address. Return None otherwise. - - """ - if (self._ip >> 32) != 0xFFFF: - return None - return IPv4Address(self._ip & 0xFFFFFFFF) - - @property - def teredo(self): - """Tuple of embedded teredo IPs. - - Returns: - Tuple of the (server, client) IPs or None if the address - doesn't appear to be a teredo address (doesn't start with - 2001::/32) - - """ - if (self._ip >> 96) != 0x20010000: - return None - return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), - IPv4Address(~self._ip & 0xFFFFFFFF)) - - @property - def sixtofour(self): - """Return the IPv4 6to4 embedded address. - - Returns: - The IPv4 6to4-embedded address if present or None if the - address doesn't appear to contain a 6to4 embedded address. - - """ - if (self._ip >> 112) != 0x2002: - return None - return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) - - -class IPv6Interface(IPv6Address): - - def __init__(self, address): - addr, mask = self._split_addr_prefix(address) - - IPv6Address.__init__(self, addr) - self.network = IPv6Network((addr, mask), strict=False) - self.netmask = self.network.netmask - self._prefixlen = self.network._prefixlen - - @functools.cached_property - def hostmask(self): - return self.network.hostmask - - def __str__(self): - return '%s/%d' % (super().__str__(), - self._prefixlen) - - def __eq__(self, other): - address_equal = IPv6Address.__eq__(self, other) - if address_equal is NotImplemented or not address_equal: - return address_equal - try: - return self.network == other.network - except AttributeError: - # An interface with an associated network is NOT the - # same as an unassociated address. That's why the hash - # takes the extra info into account. - return False - - def __lt__(self, other): - address_less = IPv6Address.__lt__(self, other) - if address_less is NotImplemented: - return address_less - try: - return (self.network < other.network or - self.network == other.network and address_less) - except AttributeError: - # We *do* allow addresses and interfaces to be sorted. The - # unassociated address is considered less than all interfaces. - return False - - def __hash__(self): - return hash((self._ip, self._prefixlen, int(self.network.network_address))) - - __reduce__ = _IPAddressBase.__reduce__ - - @property - def ip(self): - return IPv6Address(self._ip) - - @property - def with_prefixlen(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self._prefixlen) - - @property - def with_netmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.netmask) - - @property - def with_hostmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.hostmask) - - @property - def is_unspecified(self): - return self._ip == 0 and self.network.is_unspecified - - @property - def is_loopback(self): - return super().is_loopback and self.network.is_loopback - - -class IPv6Network(_BaseV6, _BaseNetwork): - - """This class represents and manipulates 128-bit IPv6 networks. - - Attributes: [examples for IPv6('2001:db8::1000/124')] - .network_address: IPv6Address('2001:db8::1000') - .hostmask: IPv6Address('::f') - .broadcast_address: IPv6Address('2001:db8::100f') - .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') - .prefixlen: 124 - - """ - - # Class to use when creating address objects - _address_class = IPv6Address - - def __init__(self, address, strict=True): - """Instantiate a new IPv6 Network object. - - Args: - address: A string or integer representing the IPv6 network or the - IP and prefix/netmask. - '2001:db8::/128' - '2001:db8:0000:0000:0000:0000:0000:0000/128' - '2001:db8::' - are all functionally the same in IPv6. That is to say, - failing to provide a subnetmask will create an object with - a mask of /128. - - Additionally, an integer can be passed, so - IPv6Network('2001:db8::') == - IPv6Network(42540766411282592856903984951653826560) - or, more generally - IPv6Network(int(IPv6Network('2001:db8::'))) == - IPv6Network('2001:db8::') - - strict: A boolean. If true, ensure that we have been passed - A true network address, eg, 2001:db8::1000/124 and not an - IP address on a network, eg, 2001:db8::1/124. - - Raises: - AddressValueError: If address isn't a valid IPv6 address. - NetmaskValueError: If the netmask isn't valid for - an IPv6 address. - ValueError: If strict was True and a network address was not - supplied. - """ - addr, mask = self._split_addr_prefix(address) - - self.network_address = IPv6Address(addr) - self.netmask, self._prefixlen = self._make_netmask(mask) - packed = int(self.network_address) - if packed & int(self.netmask) != packed: - if strict: - raise ValueError('%s has host bits set' % self) - else: - self.network_address = IPv6Address(packed & - int(self.netmask)) - - if self._prefixlen == (self._max_prefixlen - 1): - self.hosts = self.__iter__ - elif self._prefixlen == self._max_prefixlen: - self.hosts = lambda: iter((IPv6Address(addr),)) - - def hosts(self): - """Generate Iterator over usable hosts in a network. - - This is like __iter__ except it doesn't return the - Subnet-Router anycast address. - - """ - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in range(network + 1, broadcast + 1): - yield self._address_class(x) - - @property - def is_site_local(self): - """Test if the address is reserved for site-local. - - Note that the site-local address space has been deprecated by RFC 3879. - Use is_private to test if this address is in the space of unique local - addresses as defined by RFC 4193. - - Returns: - A boolean, True if the address is reserved per RFC 3513 2.5.6. - - """ - return (self.network_address.is_site_local and - self.broadcast_address.is_site_local) - - -class _IPv6Constants: - - _linklocal_network = IPv6Network('fe80::/10') - - _multicast_network = IPv6Network('ff00::/8') - - # Not globally reachable address blocks listed on - # https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml - _private_networks = [ - IPv6Network('::1/128'), - IPv6Network('::/128'), - IPv6Network('::ffff:0:0/96'), - IPv6Network('64:ff9b:1::/48'), - IPv6Network('100::/64'), - IPv6Network('2001::/23'), - IPv6Network('2001:db8::/32'), - # IANA says N/A, let's consider it not globally reachable to be safe - IPv6Network('2002::/16'), - # RFC 9637: https://www.rfc-editor.org/rfc/rfc9637.html#section-6-2.2 - IPv6Network('3fff::/20'), - IPv6Network('fc00::/7'), - IPv6Network('fe80::/10'), - ] - - _private_networks_exceptions = [ - IPv6Network('2001:1::1/128'), - IPv6Network('2001:1::2/128'), - IPv6Network('2001:3::/32'), - IPv6Network('2001:4:112::/48'), - IPv6Network('2001:20::/28'), - IPv6Network('2001:30::/28'), - ] - - _reserved_networks = [ - IPv6Network('::/8'), IPv6Network('100::/8'), - IPv6Network('200::/7'), IPv6Network('400::/6'), - IPv6Network('800::/5'), IPv6Network('1000::/4'), - IPv6Network('4000::/3'), IPv6Network('6000::/3'), - IPv6Network('8000::/3'), IPv6Network('A000::/3'), - IPv6Network('C000::/3'), IPv6Network('E000::/4'), - IPv6Network('F000::/5'), IPv6Network('F800::/6'), - IPv6Network('FE00::/9'), - ] - - _sitelocal_network = IPv6Network('fec0::/10') - - -IPv6Address._constants = _IPv6Constants -IPv6Network._constants = _IPv6Constants diff --git a/Python313_13_x64_Template/Lib/json/__init__.py b/Python313_13_x64_Template/Lib/json/__init__.py deleted file mode 100644 index c7a6dcdf..00000000 --- a/Python313_13_x64_Template/Lib/json/__init__.py +++ /dev/null @@ -1,365 +0,0 @@ -r"""JSON (JavaScript Object Notation) is a subset of -JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data -interchange format. - -:mod:`json` exposes an API familiar to users of the standard library -:mod:`marshal` and :mod:`pickle` modules. It is derived from a -version of the externally maintained simplejson library. - -Encoding basic Python object hierarchies:: - - >>> import json - >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) - '["foo", {"bar": ["baz", null, 1.0, 2]}]' - >>> print(json.dumps("\"foo\bar")) - "\"foo\bar" - >>> print(json.dumps('\u1234')) - "\u1234" - >>> print(json.dumps('\\')) - "\\" - >>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)) - {"a": 0, "b": 0, "c": 0} - >>> from io import StringIO - >>> io = StringIO() - >>> json.dump(['streaming API'], io) - >>> io.getvalue() - '["streaming API"]' - -Compact encoding:: - - >>> import json - >>> mydict = {'4': 5, '6': 7} - >>> json.dumps([1,2,3,mydict], separators=(',', ':')) - '[1,2,3,{"4":5,"6":7}]' - -Pretty printing:: - - >>> import json - >>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)) - { - "4": 5, - "6": 7 - } - -Decoding JSON:: - - >>> import json - >>> obj = ['foo', {'bar': ['baz', None, 1.0, 2]}] - >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj - True - >>> json.loads('"\\"foo\\bar"') == '"foo\x08ar' - True - >>> from io import StringIO - >>> io = StringIO('["streaming API"]') - >>> json.load(io)[0] == 'streaming API' - True - -Specializing JSON object decoding:: - - >>> import json - >>> def as_complex(dct): - ... if '__complex__' in dct: - ... return complex(dct['real'], dct['imag']) - ... return dct - ... - >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', - ... object_hook=as_complex) - (1+2j) - >>> from decimal import Decimal - >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1') - True - -Specializing JSON object encoding:: - - >>> import json - >>> def encode_complex(obj): - ... if isinstance(obj, complex): - ... return [obj.real, obj.imag] - ... raise TypeError(f'Object of type {obj.__class__.__name__} ' - ... f'is not JSON serializable') - ... - >>> json.dumps(2 + 1j, default=encode_complex) - '[2.0, 1.0]' - >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j) - '[2.0, 1.0]' - >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j)) - '[2.0, 1.0]' - - -Using json.tool from the shell to validate and pretty-print:: - - $ echo '{"json":"obj"}' | python -m json.tool - { - "json": "obj" - } - $ echo '{ 1.2:3.4}' | python -m json.tool - Expecting property name enclosed in double quotes: line 1 column 3 (char 2) -""" -__version__ = '2.0.9' -__all__ = [ - 'dump', 'dumps', 'load', 'loads', - 'JSONDecoder', 'JSONDecodeError', 'JSONEncoder', -] - -__author__ = 'Bob Ippolito ' - -from .decoder import JSONDecoder, JSONDecodeError -from .encoder import JSONEncoder -import codecs - -_default_encoder = JSONEncoder( - skipkeys=False, - ensure_ascii=True, - check_circular=True, - allow_nan=True, - indent=None, - separators=None, - default=None, -) - -def dump(obj, fp, *, skipkeys=False, ensure_ascii=True, check_circular=True, - allow_nan=True, cls=None, indent=None, separators=None, - default=None, sort_keys=False, **kw): - """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a - ``.write()``-supporting file-like object). - - If ``skipkeys`` is true then ``dict`` keys that are not basic types - (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped - instead of raising a ``TypeError``. - - If ``ensure_ascii`` is false, then the strings written to ``fp`` can - contain non-ASCII and non-printable characters if they appear in strings - contained in ``obj``. Otherwise, all such characters are escaped in JSON - strings. - - If ``check_circular`` is false, then the circular reference check - for container types will be skipped and a circular reference will - result in an ``RecursionError`` (or worse). - - If ``allow_nan`` is false, then it will be a ``ValueError`` to - serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) - in strict compliance of the JSON specification, instead of using the - JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). - - If ``indent`` is a non-negative integer, then JSON array elements and - object members will be pretty-printed with that indent level. An indent - level of 0 will only insert newlines. ``None`` is the most compact - representation. - - If specified, ``separators`` should be an ``(item_separator, - key_separator)`` tuple. The default is ``(', ', ': ')`` if *indent* is - ``None`` and ``(',', ': ')`` otherwise. To get the most compact JSON - representation, you should specify ``(',', ':')`` to eliminate - whitespace. - - ``default(obj)`` is a function that should return a serializable version - of obj or raise TypeError. The default simply raises TypeError. - - If *sort_keys* is true (default: ``False``), then the output of - dictionaries will be sorted by key. - - To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the - ``.default()`` method to serialize additional types), specify it with - the ``cls`` kwarg; otherwise ``JSONEncoder`` is used. - - """ - # cached encoder - if (not skipkeys and ensure_ascii and - check_circular and allow_nan and - cls is None and indent is None and separators is None and - default is None and not sort_keys and not kw): - iterable = _default_encoder.iterencode(obj) - else: - if cls is None: - cls = JSONEncoder - iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, - check_circular=check_circular, allow_nan=allow_nan, indent=indent, - separators=separators, - default=default, sort_keys=sort_keys, **kw).iterencode(obj) - # could accelerate with writelines in some versions of Python, at - # a debuggability cost - for chunk in iterable: - fp.write(chunk) - - -def dumps(obj, *, skipkeys=False, ensure_ascii=True, check_circular=True, - allow_nan=True, cls=None, indent=None, separators=None, - default=None, sort_keys=False, **kw): - """Serialize ``obj`` to a JSON formatted ``str``. - - If ``skipkeys`` is true then ``dict`` keys that are not basic types - (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped - instead of raising a ``TypeError``. - - If ``ensure_ascii`` is false, then the return value can contain - non-ASCII and non-printable characters if they appear in strings - contained in ``obj``. Otherwise, all such characters are escaped in - JSON strings. - - If ``check_circular`` is false, then the circular reference check - for container types will be skipped and a circular reference will - result in an ``RecursionError`` (or worse). - - If ``allow_nan`` is false, then it will be a ``ValueError`` to - serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in - strict compliance of the JSON specification, instead of using the - JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). - - If ``indent`` is a non-negative integer, then JSON array elements and - object members will be pretty-printed with that indent level. An indent - level of 0 will only insert newlines. ``None`` is the most compact - representation. - - If specified, ``separators`` should be an ``(item_separator, - key_separator)`` tuple. The default is ``(', ', ': ')`` if *indent* is - ``None`` and ``(',', ': ')`` otherwise. To get the most compact JSON - representation, you should specify ``(',', ':')`` to eliminate - whitespace. - - ``default(obj)`` is a function that should return a serializable version - of obj or raise TypeError. The default simply raises TypeError. - - If *sort_keys* is true (default: ``False``), then the output of - dictionaries will be sorted by key. - - To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the - ``.default()`` method to serialize additional types), specify it with - the ``cls`` kwarg; otherwise ``JSONEncoder`` is used. - - """ - # cached encoder - if (not skipkeys and ensure_ascii and - check_circular and allow_nan and - cls is None and indent is None and separators is None and - default is None and not sort_keys and not kw): - return _default_encoder.encode(obj) - if cls is None: - cls = JSONEncoder - return cls( - skipkeys=skipkeys, ensure_ascii=ensure_ascii, - check_circular=check_circular, allow_nan=allow_nan, indent=indent, - separators=separators, default=default, sort_keys=sort_keys, - **kw).encode(obj) - - -_default_decoder = JSONDecoder(object_hook=None, object_pairs_hook=None) - - -def detect_encoding(b): - bstartswith = b.startswith - if bstartswith((codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)): - return 'utf-32' - if bstartswith((codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)): - return 'utf-16' - if bstartswith(codecs.BOM_UTF8): - return 'utf-8-sig' - - if len(b) >= 4: - if not b[0]: - # 00 00 -- -- - utf-32-be - # 00 XX -- -- - utf-16-be - return 'utf-16-be' if b[1] else 'utf-32-be' - if not b[1]: - # XX 00 00 00 - utf-32-le - # XX 00 00 XX - utf-16-le - # XX 00 XX -- - utf-16-le - return 'utf-16-le' if b[2] or b[3] else 'utf-32-le' - elif len(b) == 2: - if not b[0]: - # 00 XX - utf-16-be - return 'utf-16-be' - if not b[1]: - # XX 00 - utf-16-le - return 'utf-16-le' - # default - return 'utf-8' - - -def load(fp, *, cls=None, object_hook=None, parse_float=None, - parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): - """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing - a JSON document) to a Python object. - - ``object_hook`` is an optional function that will be called with the - result of any object literal decode (a ``dict``). The return value of - ``object_hook`` will be used instead of the ``dict``. This feature - can be used to implement custom decoders (e.g. JSON-RPC class hinting). - - ``object_pairs_hook`` is an optional function that will be called with - the result of any object literal decoded with an ordered list of pairs. - The return value of ``object_pairs_hook`` will be used instead of the - ``dict``. This feature can be used to implement custom decoders. If - ``object_hook`` is also defined, the ``object_pairs_hook`` takes - priority. - - To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` - kwarg; otherwise ``JSONDecoder`` is used. - """ - return loads(fp.read(), - cls=cls, object_hook=object_hook, - parse_float=parse_float, parse_int=parse_int, - parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw) - - -def loads(s, *, cls=None, object_hook=None, parse_float=None, - parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): - """Deserialize ``s`` (a ``str``, ``bytes`` or ``bytearray`` instance - containing a JSON document) to a Python object. - - ``object_hook`` is an optional function that will be called with the - result of any object literal decode (a ``dict``). The return value of - ``object_hook`` will be used instead of the ``dict``. This feature - can be used to implement custom decoders (e.g. JSON-RPC class hinting). - - ``object_pairs_hook`` is an optional function that will be called with - the result of any object literal decoded with an ordered list of pairs. - The return value of ``object_pairs_hook`` will be used instead of the - ``dict``. This feature can be used to implement custom decoders. If - ``object_hook`` is also defined, the ``object_pairs_hook`` takes - priority. - - ``parse_float``, if specified, will be called with the string - of every JSON float to be decoded. By default this is equivalent to - float(num_str). This can be used to use another datatype or parser - for JSON floats (e.g. decimal.Decimal). - - ``parse_int``, if specified, will be called with the string - of every JSON int to be decoded. By default this is equivalent to - int(num_str). This can be used to use another datatype or parser - for JSON integers (e.g. float). - - ``parse_constant``, if specified, will be called with one of the - following strings: -Infinity, Infinity, NaN. - This can be used to raise an exception if invalid JSON numbers - are encountered. - - To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` - kwarg; otherwise ``JSONDecoder`` is used. - """ - if isinstance(s, str): - if s.startswith('\ufeff'): - raise JSONDecodeError("Unexpected UTF-8 BOM (decode using utf-8-sig)", - s, 0) - else: - if not isinstance(s, (bytes, bytearray)): - raise TypeError(f'the JSON object must be str, bytes or bytearray, ' - f'not {s.__class__.__name__}') - s = s.decode(detect_encoding(s), 'surrogatepass') - - if (cls is None and object_hook is None and - parse_int is None and parse_float is None and - parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) - if cls is None: - cls = JSONDecoder - if object_hook is not None: - kw['object_hook'] = object_hook - if object_pairs_hook is not None: - kw['object_pairs_hook'] = object_pairs_hook - if parse_float is not None: - kw['parse_float'] = parse_float - if parse_int is not None: - kw['parse_int'] = parse_int - if parse_constant is not None: - kw['parse_constant'] = parse_constant - return cls(**kw).decode(s) diff --git a/Python313_13_x64_Template/Lib/json/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/json/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 7ac7b410..00000000 Binary files a/Python313_13_x64_Template/Lib/json/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/json/__pycache__/decoder.cpython-313.pyc b/Python313_13_x64_Template/Lib/json/__pycache__/decoder.cpython-313.pyc deleted file mode 100644 index 48d0b476..00000000 Binary files a/Python313_13_x64_Template/Lib/json/__pycache__/decoder.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/json/__pycache__/encoder.cpython-313.pyc b/Python313_13_x64_Template/Lib/json/__pycache__/encoder.cpython-313.pyc deleted file mode 100644 index 05ed6f3e..00000000 Binary files a/Python313_13_x64_Template/Lib/json/__pycache__/encoder.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/json/__pycache__/scanner.cpython-313.pyc b/Python313_13_x64_Template/Lib/json/__pycache__/scanner.cpython-313.pyc deleted file mode 100644 index 148e3aca..00000000 Binary files a/Python313_13_x64_Template/Lib/json/__pycache__/scanner.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/json/encoder.py b/Python313_13_x64_Template/Lib/json/encoder.py deleted file mode 100644 index 0671500d..00000000 --- a/Python313_13_x64_Template/Lib/json/encoder.py +++ /dev/null @@ -1,446 +0,0 @@ -"""Implementation of JSONEncoder -""" -import re - -try: - from _json import encode_basestring_ascii as c_encode_basestring_ascii -except ImportError: - c_encode_basestring_ascii = None -try: - from _json import encode_basestring as c_encode_basestring -except ImportError: - c_encode_basestring = None -try: - from _json import make_encoder as c_make_encoder -except ImportError: - c_make_encoder = None - -ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') -ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') -HAS_UTF8 = re.compile(b'[\x80-\xff]') -ESCAPE_DCT = { - '\\': '\\\\', - '"': '\\"', - '\b': '\\b', - '\f': '\\f', - '\n': '\\n', - '\r': '\\r', - '\t': '\\t', -} -for i in range(0x20): - ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) - #ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) -del i - -INFINITY = float('inf') - -def py_encode_basestring(s): - """Return a JSON representation of a Python string - - """ - def replace(match): - return ESCAPE_DCT[match.group(0)] - return '"' + ESCAPE.sub(replace, s) + '"' - - -encode_basestring = (c_encode_basestring or py_encode_basestring) - - -def py_encode_basestring_ascii(s): - """Return an ASCII-only JSON representation of a Python string - - """ - def replace(match): - s = match.group(0) - try: - return ESCAPE_DCT[s] - except KeyError: - n = ord(s) - if n < 0x10000: - return '\\u{0:04x}'.format(n) - #return '\\u%04x' % (n,) - else: - # surrogate pair - n -= 0x10000 - s1 = 0xd800 | ((n >> 10) & 0x3ff) - s2 = 0xdc00 | (n & 0x3ff) - return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) - return '"' + ESCAPE_ASCII.sub(replace, s) + '"' - - -encode_basestring_ascii = ( - c_encode_basestring_ascii or py_encode_basestring_ascii) - -class JSONEncoder(object): - """Extensible JSON encoder for Python data structures. - - Supports the following objects and types by default: - - +-------------------+---------------+ - | Python | JSON | - +===================+===============+ - | dict | object | - +-------------------+---------------+ - | list, tuple | array | - +-------------------+---------------+ - | str | string | - +-------------------+---------------+ - | int, float | number | - +-------------------+---------------+ - | True | true | - +-------------------+---------------+ - | False | false | - +-------------------+---------------+ - | None | null | - +-------------------+---------------+ - - To extend this to recognize other objects, subclass and implement a - ``.default()`` method with another method that returns a serializable - object for ``o`` if possible, otherwise it should call the superclass - implementation (to raise ``TypeError``). - - """ - item_separator = ', ' - key_separator = ': ' - def __init__(self, *, skipkeys=False, ensure_ascii=True, - check_circular=True, allow_nan=True, sort_keys=False, - indent=None, separators=None, default=None): - """Constructor for JSONEncoder, with sensible defaults. - - If skipkeys is false, then it is a TypeError to attempt - encoding of keys that are not str, int, float, bool or None. - If skipkeys is True, such items are simply skipped. - - If ensure_ascii is true, the output is guaranteed to be str objects - with all incoming non-ASCII and non-printable characters escaped. - If ensure_ascii is false, the output can contain non-ASCII and - non-printable characters. - - If check_circular is true, then lists, dicts, and custom encoded - objects will be checked for circular references during encoding to - prevent an infinite recursion (which would cause an RecursionError). - Otherwise, no such check takes place. - - If allow_nan is true, then NaN, Infinity, and -Infinity will be - encoded as such. This behavior is not JSON specification compliant, - but is consistent with most JavaScript based encoders and decoders. - Otherwise, it will be a ValueError to encode such floats. - - If sort_keys is true, then the output of dictionaries will be - sorted by key; this is useful for regression tests to ensure - that JSON serializations can be compared on a day-to-day basis. - - If indent is a non-negative integer, then JSON array - elements and object members will be pretty-printed with that - indent level. An indent level of 0 will only insert newlines. - None is the most compact representation. - - If specified, separators should be an (item_separator, - key_separator) tuple. The default is (', ', ': ') if *indent* is - ``None`` and (',', ': ') otherwise. To get the most compact JSON - representation, you should specify (',', ':') to eliminate - whitespace. - - If specified, default is a function that gets called for objects - that can't otherwise be serialized. It should return a JSON - encodable version of the object or raise a ``TypeError``. - - """ - - self.skipkeys = skipkeys - self.ensure_ascii = ensure_ascii - self.check_circular = check_circular - self.allow_nan = allow_nan - self.sort_keys = sort_keys - self.indent = indent - if separators is not None: - self.item_separator, self.key_separator = separators - elif indent is not None: - self.item_separator = ',' - if default is not None: - self.default = default - - def default(self, o): - """Implement this method in a subclass such that it returns - a serializable object for ``o``, or calls the base implementation - (to raise a ``TypeError``). - - For example, to support arbitrary iterators, you could - implement default like this:: - - def default(self, o): - try: - iterable = iter(o) - except TypeError: - pass - else: - return list(iterable) - # Let the base class default method raise the TypeError - return super().default(o) - - """ - raise TypeError(f'Object of type {o.__class__.__name__} ' - f'is not JSON serializable') - - def encode(self, o): - """Return a JSON string representation of a Python data structure. - - >>> from json.encoder import JSONEncoder - >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) - '{"foo": ["bar", "baz"]}' - - """ - # This is for extremely simple cases and benchmarks. - if isinstance(o, str): - if self.ensure_ascii: - return encode_basestring_ascii(o) - else: - return encode_basestring(o) - # This doesn't pass the iterator directly to ''.join() because the - # exceptions aren't as detailed. The list call should be roughly - # equivalent to the PySequence_Fast that ''.join() would do. - chunks = self.iterencode(o, _one_shot=True) - if not isinstance(chunks, (list, tuple)): - chunks = list(chunks) - return ''.join(chunks) - - def iterencode(self, o, _one_shot=False): - """Encode the given object and yield each string - representation as available. - - For example:: - - for chunk in JSONEncoder().iterencode(bigobject): - mysocket.write(chunk) - - """ - if self.check_circular: - markers = {} - else: - markers = None - if self.ensure_ascii: - _encoder = encode_basestring_ascii - else: - _encoder = encode_basestring - - def floatstr(o, allow_nan=self.allow_nan, - _repr=float.__repr__, _inf=INFINITY, _neginf=-INFINITY): - # Check for specials. Note that this type of test is processor - # and/or platform-specific, so do tests which don't depend on the - # internals. - - if o != o: - text = 'NaN' - elif o == _inf: - text = 'Infinity' - elif o == _neginf: - text = '-Infinity' - else: - return _repr(o) - - if not allow_nan: - raise ValueError( - "Out of range float values are not JSON compliant: " + - repr(o)) - - return text - - - if self.indent is None or isinstance(self.indent, str): - indent = self.indent - else: - indent = ' ' * self.indent - if _one_shot and c_make_encoder is not None: - _iterencode = c_make_encoder( - markers, self.default, _encoder, indent, - self.key_separator, self.item_separator, self.sort_keys, - self.skipkeys, self.allow_nan) - else: - _iterencode = _make_iterencode( - markers, self.default, _encoder, indent, floatstr, - self.key_separator, self.item_separator, self.sort_keys, - self.skipkeys, _one_shot) - return _iterencode(o, 0) - -def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, - _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, - ## HACK: hand-optimized bytecode; turn globals into locals - ValueError=ValueError, - dict=dict, - float=float, - id=id, - int=int, - isinstance=isinstance, - list=list, - str=str, - tuple=tuple, - _intstr=int.__repr__, - ): - - def _iterencode_list(lst, _current_indent_level): - if not lst: - yield '[]' - return - if markers is not None: - markerid = id(lst) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = lst - buf = '[' - if _indent is not None: - _current_indent_level += 1 - newline_indent = '\n' + _indent * _current_indent_level - separator = _item_separator + newline_indent - buf += newline_indent - else: - newline_indent = None - separator = _item_separator - first = True - for value in lst: - if first: - first = False - else: - buf = separator - if isinstance(value, str): - yield buf + _encoder(value) - elif value is None: - yield buf + 'null' - elif value is True: - yield buf + 'true' - elif value is False: - yield buf + 'false' - elif isinstance(value, int): - # Subclasses of int/float may override __repr__, but we still - # want to encode them as integers/floats in JSON. One example - # within the standard library is IntEnum. - yield buf + _intstr(value) - elif isinstance(value, float): - # see comment above for int - yield buf + _floatstr(value) - else: - yield buf - if isinstance(value, (list, tuple)): - chunks = _iterencode_list(value, _current_indent_level) - elif isinstance(value, dict): - chunks = _iterencode_dict(value, _current_indent_level) - else: - chunks = _iterencode(value, _current_indent_level) - yield from chunks - if newline_indent is not None: - _current_indent_level -= 1 - yield '\n' + _indent * _current_indent_level - yield ']' - if markers is not None: - del markers[markerid] - - def _iterencode_dict(dct, _current_indent_level): - if not dct: - yield '{}' - return - if markers is not None: - markerid = id(dct) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = dct - yield '{' - if _indent is not None: - _current_indent_level += 1 - newline_indent = '\n' + _indent * _current_indent_level - item_separator = _item_separator + newline_indent - else: - newline_indent = None - item_separator = _item_separator - first = True - if _sort_keys: - items = sorted(dct.items()) - else: - items = dct.items() - for key, value in items: - if isinstance(key, str): - pass - # JavaScript is weakly typed for these, so it makes sense to - # also allow them. Many encoders seem to do something like this. - elif isinstance(key, float): - # see comment for int/float in _make_iterencode - key = _floatstr(key) - elif key is True: - key = 'true' - elif key is False: - key = 'false' - elif key is None: - key = 'null' - elif isinstance(key, int): - # see comment for int/float in _make_iterencode - key = _intstr(key) - elif _skipkeys: - continue - else: - raise TypeError(f'keys must be str, int, float, bool or None, ' - f'not {key.__class__.__name__}') - if first: - first = False - if newline_indent is not None: - yield newline_indent - else: - yield item_separator - yield _encoder(key) - yield _key_separator - if isinstance(value, str): - yield _encoder(value) - elif value is None: - yield 'null' - elif value is True: - yield 'true' - elif value is False: - yield 'false' - elif isinstance(value, int): - # see comment for int/float in _make_iterencode - yield _intstr(value) - elif isinstance(value, float): - # see comment for int/float in _make_iterencode - yield _floatstr(value) - else: - if isinstance(value, (list, tuple)): - chunks = _iterencode_list(value, _current_indent_level) - elif isinstance(value, dict): - chunks = _iterencode_dict(value, _current_indent_level) - else: - chunks = _iterencode(value, _current_indent_level) - yield from chunks - if not first and newline_indent is not None: - _current_indent_level -= 1 - yield '\n' + _indent * _current_indent_level - yield '}' - if markers is not None: - del markers[markerid] - - def _iterencode(o, _current_indent_level): - if isinstance(o, str): - yield _encoder(o) - elif o is None: - yield 'null' - elif o is True: - yield 'true' - elif o is False: - yield 'false' - elif isinstance(o, int): - # see comment for int/float in _make_iterencode - yield _intstr(o) - elif isinstance(o, float): - # see comment for int/float in _make_iterencode - yield _floatstr(o) - elif isinstance(o, (list, tuple)): - yield from _iterencode_list(o, _current_indent_level) - elif isinstance(o, dict): - yield from _iterencode_dict(o, _current_indent_level) - else: - if markers is not None: - markerid = id(o) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = o - o = _default(o) - yield from _iterencode(o, _current_indent_level) - if markers is not None: - del markers[markerid] - return _iterencode diff --git a/Python313_13_x64_Template/Lib/json/tool.py b/Python313_13_x64_Template/Lib/json/tool.py deleted file mode 100644 index fdfc3372..00000000 --- a/Python313_13_x64_Template/Lib/json/tool.py +++ /dev/null @@ -1,89 +0,0 @@ -r"""Command-line tool to validate and pretty-print JSON - -Usage:: - - $ echo '{"json":"obj"}' | python -m json.tool - { - "json": "obj" - } - $ echo '{ 1.2:3.4}' | python -m json.tool - Expecting property name enclosed in double quotes: line 1 column 3 (char 2) - -""" -import argparse -import json -import sys - - -def main(): - prog = 'python -m json.tool' - description = ('A simple command line interface for json module ' - 'to validate and pretty-print JSON objects.') - parser = argparse.ArgumentParser(prog=prog, description=description) - parser.add_argument('infile', nargs='?', - help='a JSON file to be validated or pretty-printed', - default='-') - parser.add_argument('outfile', nargs='?', - help='write the output of infile to outfile', - default=None) - parser.add_argument('--sort-keys', action='store_true', default=False, - help='sort the output of dictionaries alphabetically by key') - parser.add_argument('--no-ensure-ascii', dest='ensure_ascii', action='store_false', - help='disable escaping of non-ASCII characters') - parser.add_argument('--json-lines', action='store_true', default=False, - help='parse input using the JSON Lines format. ' - 'Use with --no-indent or --compact to produce valid JSON Lines output.') - group = parser.add_mutually_exclusive_group() - group.add_argument('--indent', default=4, type=int, - help='separate items with newlines and use this number ' - 'of spaces for indentation') - group.add_argument('--tab', action='store_const', dest='indent', - const='\t', help='separate items with newlines and use ' - 'tabs for indentation') - group.add_argument('--no-indent', action='store_const', dest='indent', - const=None, - help='separate items with spaces rather than newlines') - group.add_argument('--compact', action='store_true', - help='suppress all whitespace separation (most compact)') - options = parser.parse_args() - - dump_args = { - 'sort_keys': options.sort_keys, - 'indent': options.indent, - 'ensure_ascii': options.ensure_ascii, - } - if options.compact: - dump_args['indent'] = None - dump_args['separators'] = ',', ':' - - try: - if options.infile == '-': - infile = sys.stdin - else: - infile = open(options.infile, encoding='utf-8') - try: - if options.json_lines: - objs = (json.loads(line) for line in infile) - else: - objs = (json.load(infile),) - finally: - if infile is not sys.stdin: - infile.close() - - if options.outfile is None: - outfile = sys.stdout - else: - outfile = open(options.outfile, 'w', encoding='utf-8') - with outfile: - for obj in objs: - json.dump(obj, outfile, **dump_args) - outfile.write('\n') - except ValueError as e: - raise SystemExit(e) - - -if __name__ == '__main__': - try: - main() - except BrokenPipeError as exc: - sys.exit(exc.errno) diff --git a/Python313_13_x64_Template/Lib/linecache.py b/Python313_13_x64_Template/Lib/linecache.py deleted file mode 100644 index f2bb0bc9..00000000 --- a/Python313_13_x64_Template/Lib/linecache.py +++ /dev/null @@ -1,236 +0,0 @@ -"""Cache lines from Python source files. - -This is intended to read lines from modules imported -- hence if a filename -is not found, it will look down the module search path for a file by -that name. -""" - -__all__ = ["getline", "clearcache", "checkcache", "lazycache"] - - -# The cache. Maps filenames to either a thunk which will provide source code, -# or a tuple (size, mtime, lines, fullname) once loaded. -cache = {} -_interactive_cache = {} - - -def clearcache(): - """Clear the cache entirely.""" - cache.clear() - - -def getline(filename, lineno, module_globals=None): - """Get a line for a Python source file from the cache. - Update the cache if it doesn't contain an entry for this file already.""" - - lines = getlines(filename, module_globals) - if 1 <= lineno <= len(lines): - return lines[lineno - 1] - return '' - - -def getlines(filename, module_globals=None): - """Get the lines for a Python source file from the cache. - Update the cache if it doesn't contain an entry for this file already.""" - - entry = cache.get(filename, None) - if entry is not None and len(entry) != 1: - return entry[2] - - try: - return updatecache(filename, module_globals) - except MemoryError: - clearcache() - return [] - - -def _getline_from_code(filename, lineno): - lines = _getlines_from_code(filename) - if 1 <= lineno <= len(lines): - return lines[lineno - 1] - return '' - -def _make_key(code): - return (code.co_filename, code.co_qualname, code.co_firstlineno) - -def _getlines_from_code(code): - code_id = _make_key(code) - entry = _interactive_cache.get(code_id, None) - if entry is not None and len(entry) != 1: - return entry[2] - return [] - - -def checkcache(filename=None): - """Discard cache entries that are out of date. - (This is not checked upon each call!)""" - - if filename is None: - # get keys atomically - filenames = cache.copy().keys() - else: - filenames = [filename] - - for filename in filenames: - entry = cache.get(filename, None) - if entry is None or len(entry) == 1: - # lazy cache entry, leave it lazy. - continue - size, mtime, lines, fullname = entry - if mtime is None: - continue # no-op for files loaded via a __loader__ - try: - # This import can fail if the interpreter is shutting down - import os - except ImportError: - return - try: - stat = os.stat(fullname) - except (OSError, ValueError): - cache.pop(filename, None) - continue - if size != stat.st_size or mtime != stat.st_mtime: - cache.pop(filename, None) - - -def updatecache(filename, module_globals=None): - """Update a cache entry and return its list of lines. - If something's wrong, print a message, discard the cache entry, - and return an empty list.""" - - # These imports are not at top level because linecache is in the critical - # path of the interpreter startup and importing os and sys take a lot of time - # and slows down the startup sequence. - try: - import os - import sys - import tokenize - except ImportError: - # These import can fail if the interpreter is shutting down - return [] - - entry = cache.pop(filename, None) - if not filename or (filename.startswith('<') and filename.endswith('>')): - return [] - - fullname = filename - try: - stat = os.stat(fullname) - except OSError: - basename = filename - - # Realise a lazy loader based lookup if there is one - # otherwise try to lookup right now. - lazy_entry = entry if entry is not None and len(entry) == 1 else None - if lazy_entry is None: - lazy_entry = _make_lazycache_entry(filename, module_globals) - if lazy_entry is not None: - try: - data = lazy_entry[0]() - except (ImportError, OSError): - pass - else: - if data is None: - # No luck, the PEP302 loader cannot find the source - # for this module. - return [] - entry = ( - len(data), - None, - [line + '\n' for line in data.splitlines()], - fullname - ) - cache[filename] = entry - return entry[2] - - # Try looking through the module search path, which is only useful - # when handling a relative filename. - if os.path.isabs(filename): - return [] - - for dirname in sys.path: - try: - fullname = os.path.join(dirname, basename) - except (TypeError, AttributeError): - # Not sufficiently string-like to do anything useful with. - continue - try: - stat = os.stat(fullname) - break - except (OSError, ValueError): - pass - else: - return [] - except ValueError: # may be raised by os.stat() - return [] - try: - with tokenize.open(fullname) as fp: - lines = fp.readlines() - except (OSError, UnicodeDecodeError, SyntaxError): - return [] - if not lines: - lines = ['\n'] - elif not lines[-1].endswith('\n'): - lines[-1] += '\n' - size, mtime = stat.st_size, stat.st_mtime - cache[filename] = size, mtime, lines, fullname - return lines - - -def lazycache(filename, module_globals): - """Seed the cache for filename with module_globals. - - The module loader will be asked for the source only when getlines is - called, not immediately. - - If there is an entry in the cache already, it is not altered. - - :return: True if a lazy load is registered in the cache, - otherwise False. To register such a load a module loader with a - get_source method must be found, the filename must be a cacheable - filename, and the filename must not be already cached. - """ - entry = cache.get(filename, None) - if entry is not None: - return len(entry) == 1 - - lazy_entry = _make_lazycache_entry(filename, module_globals) - if lazy_entry is not None: - cache[filename] = lazy_entry - return True - return False - - -def _make_lazycache_entry(filename, module_globals): - if not filename or (filename.startswith('<') and filename.endswith('>')): - return None - # Try for a __loader__, if available - if module_globals and '__name__' in module_globals: - spec = module_globals.get('__spec__') - name = getattr(spec, 'name', None) or module_globals['__name__'] - loader = getattr(spec, 'loader', None) - if loader is None: - loader = module_globals.get('__loader__') - get_source = getattr(loader, 'get_source', None) - - if name and get_source: - def get_lines(name=name, *args, **kwargs): - return get_source(name, *args, **kwargs) - return (get_lines,) - return None - - - -def _register_code(code, string, name): - entry = (len(string), - None, - [line + '\n' for line in string.splitlines()], - name) - stack = [code] - while stack: - code = stack.pop() - for const in code.co_consts: - if isinstance(const, type(code)): - stack.append(const) - key = _make_key(code) - _interactive_cache[key] = entry diff --git a/Python313_13_x64_Template/Lib/locale.py b/Python313_13_x64_Template/Lib/locale.py deleted file mode 100644 index db6d0abb..00000000 --- a/Python313_13_x64_Template/Lib/locale.py +++ /dev/null @@ -1,1778 +0,0 @@ -"""Locale support module. - -The module provides low-level access to the C lib's locale APIs and adds high -level number formatting APIs as well as a locale aliasing engine to complement -these. - -The aliasing engine includes support for many commonly used locale names and -maps them to values suitable for passing to the C lib's setlocale() function. It -also includes default encodings for all supported locale names. - -""" - -import sys -import encodings -import encodings.aliases -import re -import _collections_abc -from builtins import str as _builtin_str -import functools - -# Try importing the _locale module. -# -# If this fails, fall back on a basic 'C' locale emulation. - -# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before -# trying the import. So __all__ is also fiddled at the end of the file. -__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error", - "setlocale", "localeconv", "strcoll", "strxfrm", - "str", "atof", "atoi", "format_string", "currency", - "normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY", - "LC_NUMERIC", "LC_ALL", "CHAR_MAX", "getencoding"] - -def _strcoll(a,b): - """ strcoll(string,string) -> int. - Compares two strings according to the locale. - """ - return (a > b) - (a < b) - -def _strxfrm(s): - """ strxfrm(string) -> string. - Returns a string that behaves for cmp locale-aware. - """ - return s - -try: - - from _locale import * - -except ImportError: - - # Locale emulation - - CHAR_MAX = 127 - LC_ALL = 6 - LC_COLLATE = 3 - LC_CTYPE = 0 - LC_MESSAGES = 5 - LC_MONETARY = 4 - LC_NUMERIC = 1 - LC_TIME = 2 - Error = ValueError - - def localeconv(): - """ localeconv() -> dict. - Returns numeric and monetary locale-specific parameters. - """ - # 'C' locale default values - return {'grouping': [127], - 'currency_symbol': '', - 'n_sign_posn': 127, - 'p_cs_precedes': 127, - 'n_cs_precedes': 127, - 'mon_grouping': [], - 'n_sep_by_space': 127, - 'decimal_point': '.', - 'negative_sign': '', - 'positive_sign': '', - 'p_sep_by_space': 127, - 'int_curr_symbol': '', - 'p_sign_posn': 127, - 'thousands_sep': '', - 'mon_thousands_sep': '', - 'frac_digits': 127, - 'mon_decimal_point': '', - 'int_frac_digits': 127} - - def setlocale(category, value=None): - """ setlocale(integer,string=None) -> string. - Activates/queries locale processing. - """ - if value not in (None, '', 'C'): - raise Error('_locale emulation only supports "C" locale') - return 'C' - -# These may or may not exist in _locale, so be sure to set them. -if 'strxfrm' not in globals(): - strxfrm = _strxfrm -if 'strcoll' not in globals(): - strcoll = _strcoll - - -_localeconv = localeconv - -# With this dict, you can override some items of localeconv's return value. -# This is useful for testing purposes. -_override_localeconv = {} - -@functools.wraps(_localeconv) -def localeconv(): - d = _localeconv() - if _override_localeconv: - d.update(_override_localeconv) - return d - - -### Number formatting APIs - -# Author: Martin von Loewis -# improved by Georg Brandl - -# Iterate over grouping intervals -def _grouping_intervals(grouping): - last_interval = None - for interval in grouping: - # if grouping is -1, we are done - if interval == CHAR_MAX: - return - # 0: re-use last group ad infinitum - if interval == 0: - if last_interval is None: - raise ValueError("invalid grouping") - while True: - yield last_interval - yield interval - last_interval = interval - -#perform the grouping from right to left -def _group(s, monetary=False): - conv = localeconv() - thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep'] - grouping = conv[monetary and 'mon_grouping' or 'grouping'] - if not grouping: - return (s, 0) - if s[-1] == ' ': - stripped = s.rstrip() - right_spaces = s[len(stripped):] - s = stripped - else: - right_spaces = '' - left_spaces = '' - groups = [] - for interval in _grouping_intervals(grouping): - if not s or s[-1] not in "0123456789": - # only non-digit characters remain (sign, spaces) - left_spaces = s - s = '' - break - groups.append(s[-interval:]) - s = s[:-interval] - if s: - groups.append(s) - groups.reverse() - return ( - left_spaces + thousands_sep.join(groups) + right_spaces, - len(thousands_sep) * (len(groups) - 1) - ) - -# Strip a given amount of excess padding from the given string -def _strip_padding(s, amount): - lpos = 0 - while amount and s[lpos] == ' ': - lpos += 1 - amount -= 1 - rpos = len(s) - 1 - while amount and s[rpos] == ' ': - rpos -= 1 - amount -= 1 - return s[lpos:rpos+1] - -_percent_re = re.compile(r'%(?:\((?P.*?)\))?' - r'(?P[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]') - -def _format(percent, value, grouping=False, monetary=False, *additional): - if additional: - formatted = percent % ((value,) + additional) - else: - formatted = percent % value - if percent[-1] in 'eEfFgGdiu': - formatted = _localize(formatted, grouping, monetary) - return formatted - -# Transform formatted as locale number according to the locale settings -def _localize(formatted, grouping=False, monetary=False): - # floats and decimal ints need special action! - if '.' in formatted: - seps = 0 - parts = formatted.split('.') - if grouping: - parts[0], seps = _group(parts[0], monetary=monetary) - decimal_point = localeconv()[monetary and 'mon_decimal_point' - or 'decimal_point'] - formatted = decimal_point.join(parts) - if seps: - formatted = _strip_padding(formatted, seps) - else: - seps = 0 - if grouping: - formatted, seps = _group(formatted, monetary=monetary) - if seps: - formatted = _strip_padding(formatted, seps) - return formatted - -def format_string(f, val, grouping=False, monetary=False): - """Formats a string in the same way that the % formatting would use, - but takes the current locale into account. - - Grouping is applied if the third parameter is true. - Conversion uses monetary thousands separator and grouping strings if - forth parameter monetary is true.""" - percents = list(_percent_re.finditer(f)) - new_f = _percent_re.sub('%s', f) - - if isinstance(val, _collections_abc.Mapping): - new_val = [] - for perc in percents: - if perc.group()[-1]=='%': - new_val.append('%') - else: - new_val.append(_format(perc.group(), val, grouping, monetary)) - else: - if not isinstance(val, tuple): - val = (val,) - new_val = [] - i = 0 - for perc in percents: - if perc.group()[-1]=='%': - new_val.append('%') - else: - starcount = perc.group('modifiers').count('*') - new_val.append(_format(perc.group(), - val[i], - grouping, - monetary, - *val[i+1:i+1+starcount])) - i += (1 + starcount) - val = tuple(new_val) - - return new_f % val - -def currency(val, symbol=True, grouping=False, international=False): - """Formats val according to the currency settings - in the current locale.""" - conv = localeconv() - - # check for illegal values - digits = conv[international and 'int_frac_digits' or 'frac_digits'] - if digits == 127: - raise ValueError("Currency formatting is not possible using " - "the 'C' locale.") - - s = _localize(f'{abs(val):.{digits}f}', grouping, monetary=True) - # '<' and '>' are markers if the sign must be inserted between symbol and value - s = '<' + s + '>' - - if symbol: - smb = conv[international and 'int_curr_symbol' or 'currency_symbol'] - precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes'] - separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space'] - - if precedes: - s = smb + (separated and ' ' or '') + s - else: - if international and smb[-1] == ' ': - smb = smb[:-1] - s = s + (separated and ' ' or '') + smb - - sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn'] - sign = conv[val<0 and 'negative_sign' or 'positive_sign'] - - if sign_pos == 0: - s = '(' + s + ')' - elif sign_pos == 1: - s = sign + s - elif sign_pos == 2: - s = s + sign - elif sign_pos == 3: - s = s.replace('<', sign) - elif sign_pos == 4: - s = s.replace('>', sign) - else: - # the default if nothing specified; - # this should be the most fitting sign position - s = sign + s - - return s.replace('<', '').replace('>', '') - -def str(val): - """Convert float to string, taking the locale into account.""" - return _format("%.12g", val) - -def delocalize(string): - "Parses a string as a normalized number according to the locale settings." - - conv = localeconv() - - #First, get rid of the grouping - ts = conv['thousands_sep'] - if ts: - string = string.replace(ts, '') - - #next, replace the decimal point with a dot - dd = conv['decimal_point'] - if dd: - string = string.replace(dd, '.') - return string - -def localize(string, grouping=False, monetary=False): - """Parses a string as locale number according to the locale settings.""" - return _localize(string, grouping, monetary) - -def atof(string, func=float): - "Parses a string as a float according to the locale settings." - return func(delocalize(string)) - -def atoi(string): - "Converts a string to an integer according to the locale settings." - return int(delocalize(string)) - -def _test(): - setlocale(LC_ALL, "") - #do grouping - s1 = format_string("%d", 123456789,1) - print(s1, "is", atoi(s1)) - #standard formatting - s1 = str(3.14) - print(s1, "is", atof(s1)) - -### Locale name aliasing engine - -# Author: Marc-Andre Lemburg, mal@lemburg.com -# Various tweaks by Fredrik Lundh - -# store away the low-level version of setlocale (it's -# overridden below) -_setlocale = setlocale - -def _replace_encoding(code, encoding): - if '.' in code: - langname = code[:code.index('.')] - else: - langname = code - # Convert the encoding to a C lib compatible encoding string - norm_encoding = encodings.normalize_encoding(encoding) - #print('norm encoding: %r' % norm_encoding) - norm_encoding = encodings.aliases.aliases.get(norm_encoding.lower(), - norm_encoding) - #print('aliased encoding: %r' % norm_encoding) - encoding = norm_encoding - norm_encoding = norm_encoding.lower() - if norm_encoding in locale_encoding_alias: - encoding = locale_encoding_alias[norm_encoding] - else: - norm_encoding = norm_encoding.replace('_', '') - norm_encoding = norm_encoding.replace('-', '') - if norm_encoding in locale_encoding_alias: - encoding = locale_encoding_alias[norm_encoding] - #print('found encoding %r' % encoding) - return langname + '.' + encoding - -def _append_modifier(code, modifier): - if modifier == 'euro': - if '.' not in code: - return code + '.ISO8859-15' - _, _, encoding = code.partition('.') - if encoding in ('ISO8859-15', 'UTF-8'): - return code - if encoding == 'ISO8859-1': - return _replace_encoding(code, 'ISO8859-15') - return code + '@' + modifier - -def normalize(localename): - - """ Returns a normalized locale code for the given locale - name. - - The returned locale code is formatted for use with - setlocale(). - - If normalization fails, the original name is returned - unchanged. - - If the given encoding is not known, the function defaults to - the default encoding for the locale code just like setlocale() - does. - - """ - # Normalize the locale name and extract the encoding and modifier - code = localename.lower() - if ':' in code: - # ':' is sometimes used as encoding delimiter. - code = code.replace(':', '.') - if '@' in code: - code, modifier = code.split('@', 1) - else: - modifier = '' - if '.' in code: - langname, encoding = code.split('.')[:2] - else: - langname = code - encoding = '' - - # First lookup: fullname (possibly with encoding and modifier) - lang_enc = langname - if encoding: - norm_encoding = encoding.replace('-', '') - norm_encoding = norm_encoding.replace('_', '') - lang_enc += '.' + norm_encoding - lookup_name = lang_enc - if modifier: - lookup_name += '@' + modifier - code = locale_alias.get(lookup_name, None) - if code is not None: - return code - #print('first lookup failed') - - if modifier: - # Second try: fullname without modifier (possibly with encoding) - code = locale_alias.get(lang_enc, None) - if code is not None: - #print('lookup without modifier succeeded') - if '@' not in code: - return _append_modifier(code, modifier) - if code.split('@', 1)[1].lower() == modifier: - return code - #print('second lookup failed') - - if encoding: - # Third try: langname (without encoding, possibly with modifier) - lookup_name = langname - if modifier: - lookup_name += '@' + modifier - code = locale_alias.get(lookup_name, None) - if code is not None: - #print('lookup without encoding succeeded') - if '@' not in code: - return _replace_encoding(code, encoding) - code, modifier = code.split('@', 1) - return _replace_encoding(code, encoding) + '@' + modifier - - if modifier: - # Fourth try: langname (without encoding and modifier) - code = locale_alias.get(langname, None) - if code is not None: - #print('lookup without modifier and encoding succeeded') - if '@' not in code: - code = _replace_encoding(code, encoding) - return _append_modifier(code, modifier) - code, defmod = code.split('@', 1) - if defmod.lower() == modifier: - return _replace_encoding(code, encoding) + '@' + defmod - - return localename - -def _parse_localename(localename): - - """ Parses the locale code for localename and returns the - result as tuple (language code, encoding). - - The localename is normalized and passed through the locale - alias engine. A ValueError is raised in case the locale name - cannot be parsed. - - The language code corresponds to RFC 1766. code and encoding - can be None in case the values cannot be determined or are - unknown to this implementation. - - """ - code = normalize(localename) - if '@' in code: - # Deal with locale modifiers - code, modifier = code.split('@', 1) - if modifier == 'euro' and '.' not in code: - # Assume Latin-9 for @euro locales. This is bogus, - # since some systems may use other encodings for these - # locales. Also, we ignore other modifiers. - return code, 'iso-8859-15' - - if '.' in code: - return tuple(code.split('.')[:2]) - elif code == 'C': - return None, None - elif code == 'UTF-8': - # On macOS "LC_CTYPE=UTF-8" is a valid locale setting - # for getting UTF-8 handling for text. - return None, 'UTF-8' - raise ValueError('unknown locale: %s' % localename) - -def _build_localename(localetuple): - - """ Builds a locale code from the given tuple (language code, - encoding). - - No aliasing or normalizing takes place. - - """ - try: - language, encoding = localetuple - - if language is None: - language = 'C' - if encoding is None: - return language - else: - return language + '.' + encoding - except (TypeError, ValueError): - raise TypeError('Locale must be None, a string, or an iterable of ' - 'two strings -- language code, encoding.') from None - -def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')): - - """ Tries to determine the default locale settings and returns - them as tuple (language code, encoding). - - According to POSIX, a program which has not called - setlocale(LC_ALL, "") runs using the portable 'C' locale. - Calling setlocale(LC_ALL, "") lets it use the default locale as - defined by the LANG variable. Since we don't want to interfere - with the current locale setting we thus emulate the behavior - in the way described above. - - To maintain compatibility with other platforms, not only the - LANG variable is tested, but a list of variables given as - envvars parameter. The first found to be defined will be - used. envvars defaults to the search path used in GNU gettext; - it must always contain the variable name 'LANG'. - - Except for the code 'C', the language code corresponds to RFC - 1766. code and encoding can be None in case the values cannot - be determined. - - """ - - import warnings - warnings._deprecated( - "locale.getdefaultlocale", - "{name!r} is deprecated and slated for removal in Python {remove}. " - "Use setlocale(), getencoding() and getlocale() instead.", - remove=(3, 15)) - return _getdefaultlocale(envvars) - - -def _getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')): - try: - # check if it's supported by the _locale module - import _locale - code, encoding = _locale._getdefaultlocale() - except (ImportError, AttributeError): - pass - else: - # make sure the code/encoding values are valid - if sys.platform == "win32" and code and code[:2] == "0x": - # map windows language identifier to language name - code = windows_locale.get(int(code, 0)) - # ...add other platform-specific processing here, if - # necessary... - return code, encoding - - # fall back on POSIX behaviour - import os - lookup = os.environ.get - for variable in envvars: - localename = lookup(variable,None) - if localename: - if variable == 'LANGUAGE': - localename = localename.split(':')[0] - break - else: - localename = 'C' - return _parse_localename(localename) - - -def getlocale(category=LC_CTYPE): - - """ Returns the current setting for the given locale category as - tuple (language code, encoding). - - category may be one of the LC_* value except LC_ALL. It - defaults to LC_CTYPE. - - Except for the code 'C', the language code corresponds to RFC - 1766. code and encoding can be None in case the values cannot - be determined. - - """ - localename = _setlocale(category) - if category == LC_ALL and ';' in localename: - raise TypeError('category LC_ALL is not supported') - return _parse_localename(localename) - -def setlocale(category, locale=None): - - """ Set the locale for the given category. The locale can be - a string, an iterable of two strings (language code and encoding), - or None. - - Iterables are converted to strings using the locale aliasing - engine. Locale strings are passed directly to the C lib. - - category may be given as one of the LC_* values. - - """ - if locale and not isinstance(locale, _builtin_str): - # convert to string - locale = normalize(_build_localename(locale)) - return _setlocale(category, locale) - - -try: - from _locale import getencoding -except ImportError: - # When _locale.getencoding() is missing, locale.getencoding() uses the - # Python filesystem encoding. - def getencoding(): - return sys.getfilesystemencoding() - - -try: - CODESET -except NameError: - def getpreferredencoding(do_setlocale=True): - """Return the charset that the user is likely using.""" - if sys.flags.warn_default_encoding: - import warnings - warnings.warn( - "UTF-8 Mode affects locale.getpreferredencoding(). Consider locale.getencoding() instead.", - EncodingWarning, 2) - if sys.flags.utf8_mode: - return 'utf-8' - return getencoding() -else: - # On Unix, if CODESET is available, use that. - def getpreferredencoding(do_setlocale=True): - """Return the charset that the user is likely using, - according to the system configuration.""" - - if sys.flags.warn_default_encoding: - import warnings - warnings.warn( - "UTF-8 Mode affects locale.getpreferredencoding(). Consider locale.getencoding() instead.", - EncodingWarning, 2) - if sys.flags.utf8_mode: - return 'utf-8' - - if not do_setlocale: - return getencoding() - - old_loc = setlocale(LC_CTYPE) - try: - try: - setlocale(LC_CTYPE, "") - except Error: - pass - return getencoding() - finally: - setlocale(LC_CTYPE, old_loc) - - -### Database -# -# The following data was extracted from the locale.alias file which -# comes with X11 and then hand edited removing the explicit encoding -# definitions and adding some more aliases. The file is usually -# available as /usr/lib/X11/locale/locale.alias. -# - -# -# The local_encoding_alias table maps lowercase encoding alias names -# to C locale encoding names (case-sensitive). Note that normalize() -# first looks up the encoding in the encodings.aliases dictionary and -# then applies this mapping to find the correct C lib name for the -# encoding. -# -locale_encoding_alias = { - - # Mappings for non-standard encoding names used in locale names - '437': 'C', - 'c': 'C', - 'en': 'ISO8859-1', - 'jis': 'JIS7', - 'jis7': 'JIS7', - 'ajec': 'eucJP', - 'koi8c': 'KOI8-C', - 'microsoftcp1251': 'CP1251', - 'microsoftcp1255': 'CP1255', - 'microsoftcp1256': 'CP1256', - '88591': 'ISO8859-1', - '88592': 'ISO8859-2', - '88595': 'ISO8859-5', - '885915': 'ISO8859-15', - - # Mappings from Python codec names to C lib encoding names - 'ascii': 'ISO8859-1', - 'latin_1': 'ISO8859-1', - 'iso8859_1': 'ISO8859-1', - 'iso8859_10': 'ISO8859-10', - 'iso8859_11': 'ISO8859-11', - 'iso8859_13': 'ISO8859-13', - 'iso8859_14': 'ISO8859-14', - 'iso8859_15': 'ISO8859-15', - 'iso8859_16': 'ISO8859-16', - 'iso8859_2': 'ISO8859-2', - 'iso8859_3': 'ISO8859-3', - 'iso8859_4': 'ISO8859-4', - 'iso8859_5': 'ISO8859-5', - 'iso8859_6': 'ISO8859-6', - 'iso8859_7': 'ISO8859-7', - 'iso8859_8': 'ISO8859-8', - 'iso8859_9': 'ISO8859-9', - 'iso2022_jp': 'JIS7', - 'shift_jis': 'SJIS', - 'tactis': 'TACTIS', - 'euc_jp': 'eucJP', - 'euc_kr': 'eucKR', - 'utf_8': 'UTF-8', - 'koi8_r': 'KOI8-R', - 'koi8_t': 'KOI8-T', - 'koi8_u': 'KOI8-U', - 'kz1048': 'RK1048', - 'cp1251': 'CP1251', - 'cp1255': 'CP1255', - 'cp1256': 'CP1256', - - # XXX This list is still incomplete. If you know more - # mappings, please file a bug report. Thanks. -} - -for k, v in sorted(locale_encoding_alias.items()): - k = k.replace('_', '') - locale_encoding_alias.setdefault(k, v) -del k, v - -# -# The locale_alias table maps lowercase alias names to C locale names -# (case-sensitive). Encodings are always separated from the locale -# name using a dot ('.'); they should only be given in case the -# language name is needed to interpret the given encoding alias -# correctly (CJK codes often have this need). -# -# Note that the normalize() function which uses this tables -# removes '_' and '-' characters from the encoding part of the -# locale name before doing the lookup. This saves a lot of -# space in the table. -# -# MAL 2004-12-10: -# Updated alias mapping to most recent locale.alias file -# from X.org distribution using makelocalealias.py. -# -# These are the differences compared to the old mapping (Python 2.4 -# and older): -# -# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' -# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' -# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' -# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' -# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' -# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' -# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1' -# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' -# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' -# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' -# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' -# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' -# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' -# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP' -# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13' -# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13' -# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' -# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' -# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11' -# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312' -# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5' -# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5' -# -# MAL 2008-05-30: -# Updated alias mapping to most recent locale.alias file -# from X.org distribution using makelocalealias.py. -# -# These are the differences compared to the old mapping (Python 2.5 -# and older): -# -# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2' -# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' -# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' -# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2' -# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' -# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' -# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2' -# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' -# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2' -# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' -# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8' -# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# -# AP 2010-04-12: -# Updated alias mapping to most recent locale.alias file -# from X.org distribution using makelocalealias.py. -# -# These are the differences compared to the old mapping (Python 2.6.5 -# and older): -# -# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' -# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' -# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' -# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' -# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' -# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' -# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' -# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' -# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin' -# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' -# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin' -# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8' -# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' -# -# SS 2013-12-20: -# Updated alias mapping to most recent locale.alias file -# from X.org distribution using makelocalealias.py. -# -# These are the differences compared to the old mapping (Python 3.3.3 -# and older): -# -# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' -# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' -# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' -# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' -# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' -# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' -# updated 'sd' -> 'sd_IN@devanagari.UTF-8' to 'sd_IN.UTF-8' -# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' -# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8' -# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' -# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' -# -# SS 2014-10-01: -# Updated alias mapping with glibc 2.19 supported locales. -# -# SS 2018-05-05: -# Updated alias mapping with glibc 2.27 supported locales. -# -# These are the differences compared to the old mapping (Python 3.6.5 -# and older): -# -# updated 'ca_es@valencia' -> 'ca_ES.ISO8859-15@valencia' to 'ca_ES.UTF-8@valencia' -# updated 'kk_kz' -> 'kk_KZ.RK1048' to 'kk_KZ.ptcp154' -# updated 'russian' -> 'ru_RU.ISO8859-5' to 'ru_RU.KOI8-R' -# -# SS 2025-02-04: -# Updated alias mapping with glibc 2.41 supported locales and the latest -# X lib alias mapping. -# -# These are the differences compared to the old mapping (Python 3.13.1 -# and older): -# -# updated 'c.utf8' -> 'C.UTF-8' to 'en_US.UTF-8' -# updated 'de_it' -> 'de_IT.ISO8859-1' to 'de_IT.UTF-8' -# removed 'de_li.utf8' -# updated 'en_il' -> 'en_IL.UTF-8' to 'en_IL.ISO8859-1' -# removed 'english.iso88591' -# updated 'es_cu' -> 'es_CU.UTF-8' to 'es_CU.ISO8859-1' -# updated 'russian' -> 'ru_RU.KOI8-R' to 'ru_RU.ISO8859-5' -# updated 'sr@latn' -> 'sr_CS.UTF-8@latin' to 'sr_RS.UTF-8@latin' -# removed 'univ' -# removed 'universal' -# -# SS 2025-06-10: -# Remove 'c.utf8' -> 'en_US.UTF-8' because 'en_US.UTF-8' does not exist -# on all platforms. - -locale_alias = { - 'a3': 'az_AZ.KOI8-C', - 'a3_az': 'az_AZ.KOI8-C', - 'a3_az.koic': 'az_AZ.KOI8-C', - 'aa_dj': 'aa_DJ.ISO8859-1', - 'aa_er': 'aa_ER.UTF-8', - 'aa_et': 'aa_ET.UTF-8', - 'af': 'af_ZA.ISO8859-1', - 'af_za': 'af_ZA.ISO8859-1', - 'agr_pe': 'agr_PE.UTF-8', - 'ak_gh': 'ak_GH.UTF-8', - 'am': 'am_ET.UTF-8', - 'am_et': 'am_ET.UTF-8', - 'american': 'en_US.ISO8859-1', - 'an_es': 'an_ES.ISO8859-15', - 'anp_in': 'anp_IN.UTF-8', - 'ar': 'ar_AA.ISO8859-6', - 'ar_aa': 'ar_AA.ISO8859-6', - 'ar_ae': 'ar_AE.ISO8859-6', - 'ar_bh': 'ar_BH.ISO8859-6', - 'ar_dz': 'ar_DZ.ISO8859-6', - 'ar_eg': 'ar_EG.ISO8859-6', - 'ar_in': 'ar_IN.UTF-8', - 'ar_iq': 'ar_IQ.ISO8859-6', - 'ar_jo': 'ar_JO.ISO8859-6', - 'ar_kw': 'ar_KW.ISO8859-6', - 'ar_lb': 'ar_LB.ISO8859-6', - 'ar_ly': 'ar_LY.ISO8859-6', - 'ar_ma': 'ar_MA.ISO8859-6', - 'ar_om': 'ar_OM.ISO8859-6', - 'ar_qa': 'ar_QA.ISO8859-6', - 'ar_sa': 'ar_SA.ISO8859-6', - 'ar_sd': 'ar_SD.ISO8859-6', - 'ar_ss': 'ar_SS.UTF-8', - 'ar_sy': 'ar_SY.ISO8859-6', - 'ar_tn': 'ar_TN.ISO8859-6', - 'ar_ye': 'ar_YE.ISO8859-6', - 'arabic': 'ar_AA.ISO8859-6', - 'as': 'as_IN.UTF-8', - 'as_in': 'as_IN.UTF-8', - 'ast_es': 'ast_ES.ISO8859-15', - 'ayc_pe': 'ayc_PE.UTF-8', - 'az': 'az_AZ.ISO8859-9E', - 'az_az': 'az_AZ.ISO8859-9E', - 'az_az.iso88599e': 'az_AZ.ISO8859-9E', - 'az_ir': 'az_IR.UTF-8', - 'be': 'be_BY.CP1251', - 'be@latin': 'be_BY.UTF-8@latin', - 'be_bg.utf8': 'bg_BG.UTF-8', - 'be_by': 'be_BY.CP1251', - 'be_by@latin': 'be_BY.UTF-8@latin', - 'bem_zm': 'bem_ZM.UTF-8', - 'ber_dz': 'ber_DZ.UTF-8', - 'ber_ma': 'ber_MA.UTF-8', - 'bg': 'bg_BG.CP1251', - 'bg_bg': 'bg_BG.CP1251', - 'bhb_in.utf8': 'bhb_IN.UTF-8', - 'bho_in': 'bho_IN.UTF-8', - 'bho_np': 'bho_NP.UTF-8', - 'bi_vu': 'bi_VU.UTF-8', - 'bn_bd': 'bn_BD.UTF-8', - 'bn_in': 'bn_IN.UTF-8', - 'bo_cn': 'bo_CN.UTF-8', - 'bo_in': 'bo_IN.UTF-8', - 'bokmal': 'nb_NO.ISO8859-1', - 'bokm\xe5l': 'nb_NO.ISO8859-1', - 'br': 'br_FR.ISO8859-1', - 'br_fr': 'br_FR.ISO8859-1', - 'brx_in': 'brx_IN.UTF-8', - 'bs': 'bs_BA.ISO8859-2', - 'bs_ba': 'bs_BA.ISO8859-2', - 'bulgarian': 'bg_BG.CP1251', - 'byn_er': 'byn_ER.UTF-8', - 'c': 'C', - 'c-french': 'fr_CA.ISO8859-1', - 'c.ascii': 'C', - 'c.en': 'C', - 'c.iso88591': 'en_US.ISO8859-1', - 'c_c': 'C', - 'c_c.c': 'C', - 'ca': 'ca_ES.ISO8859-1', - 'ca_ad': 'ca_AD.ISO8859-1', - 'ca_es': 'ca_ES.ISO8859-1', - 'ca_es@valencia': 'ca_ES.UTF-8@valencia', - 'ca_fr': 'ca_FR.ISO8859-1', - 'ca_it': 'ca_IT.ISO8859-1', - 'catalan': 'ca_ES.ISO8859-1', - 'ce_ru': 'ce_RU.UTF-8', - 'cextend': 'en_US.ISO8859-1', - 'chinese-s': 'zh_CN.eucCN', - 'chinese-t': 'zh_TW.eucTW', - 'chr_us': 'chr_US.UTF-8', - 'ckb_iq': 'ckb_IQ.UTF-8', - 'cmn_tw': 'cmn_TW.UTF-8', - 'crh_ru': 'crh_RU.UTF-8', - 'crh_ua': 'crh_UA.UTF-8', - 'croatian': 'hr_HR.ISO8859-2', - 'cs': 'cs_CZ.ISO8859-2', - 'cs_cs': 'cs_CZ.ISO8859-2', - 'cs_cz': 'cs_CZ.ISO8859-2', - 'csb_pl': 'csb_PL.UTF-8', - 'cv_ru': 'cv_RU.UTF-8', - 'cy': 'cy_GB.ISO8859-1', - 'cy_gb': 'cy_GB.ISO8859-1', - 'cz': 'cs_CZ.ISO8859-2', - 'cz_cz': 'cs_CZ.ISO8859-2', - 'czech': 'cs_CZ.ISO8859-2', - 'da': 'da_DK.ISO8859-1', - 'da_dk': 'da_DK.ISO8859-1', - 'danish': 'da_DK.ISO8859-1', - 'dansk': 'da_DK.ISO8859-1', - 'de': 'de_DE.ISO8859-1', - 'de_at': 'de_AT.ISO8859-1', - 'de_be': 'de_BE.ISO8859-1', - 'de_ch': 'de_CH.ISO8859-1', - 'de_de': 'de_DE.ISO8859-1', - 'de_it': 'de_IT.UTF-8', - 'de_li': 'de_LI.ISO8859-1', - 'de_lu': 'de_LU.ISO8859-1', - 'deutsch': 'de_DE.ISO8859-1', - 'doi_in': 'doi_IN.UTF-8', - 'dsb_de': 'dsb_DE.UTF-8', - 'dutch': 'nl_NL.ISO8859-1', - 'dutch.iso88591': 'nl_BE.ISO8859-1', - 'dv_mv': 'dv_MV.UTF-8', - 'dz_bt': 'dz_BT.UTF-8', - 'ee': 'ee_EE.ISO8859-4', - 'ee_ee': 'ee_EE.ISO8859-4', - 'eesti': 'et_EE.ISO8859-1', - 'el': 'el_GR.ISO8859-7', - 'el_cy': 'el_CY.ISO8859-7', - 'el_gr': 'el_GR.ISO8859-7', - 'el_gr@euro': 'el_GR.ISO8859-15', - 'en': 'en_US.ISO8859-1', - 'en_ag': 'en_AG.UTF-8', - 'en_au': 'en_AU.ISO8859-1', - 'en_be': 'en_BE.ISO8859-1', - 'en_bw': 'en_BW.ISO8859-1', - 'en_ca': 'en_CA.ISO8859-1', - 'en_dk': 'en_DK.ISO8859-1', - 'en_dl.utf8': 'en_DL.UTF-8', - 'en_gb': 'en_GB.ISO8859-1', - 'en_hk': 'en_HK.ISO8859-1', - 'en_ie': 'en_IE.ISO8859-1', - 'en_il': 'en_IL.ISO8859-1', - 'en_in': 'en_IN.ISO8859-1', - 'en_ng': 'en_NG.UTF-8', - 'en_nz': 'en_NZ.ISO8859-1', - 'en_ph': 'en_PH.ISO8859-1', - 'en_sc.utf8': 'en_SC.UTF-8', - 'en_sg': 'en_SG.ISO8859-1', - 'en_uk': 'en_GB.ISO8859-1', - 'en_us': 'en_US.ISO8859-1', - 'en_us@euro@euro': 'en_US.ISO8859-15', - 'en_za': 'en_ZA.ISO8859-1', - 'en_zm': 'en_ZM.UTF-8', - 'en_zw': 'en_ZW.ISO8859-1', - 'en_zw.utf8': 'en_ZS.UTF-8', - 'eng_gb': 'en_GB.ISO8859-1', - 'english': 'en_EN.ISO8859-1', - 'english_uk': 'en_GB.ISO8859-1', - 'english_united-states': 'en_US.ISO8859-1', - 'english_united-states.437': 'C', - 'english_us': 'en_US.ISO8859-1', - 'eo': 'eo_XX.ISO8859-3', - 'eo.utf8': 'eo.UTF-8', - 'eo_eo': 'eo_EO.ISO8859-3', - 'eo_us.utf8': 'eo_US.UTF-8', - 'eo_xx': 'eo_XX.ISO8859-3', - 'es': 'es_ES.ISO8859-1', - 'es_ar': 'es_AR.ISO8859-1', - 'es_bo': 'es_BO.ISO8859-1', - 'es_cl': 'es_CL.ISO8859-1', - 'es_co': 'es_CO.ISO8859-1', - 'es_cr': 'es_CR.ISO8859-1', - 'es_cu': 'es_CU.ISO8859-1', - 'es_do': 'es_DO.ISO8859-1', - 'es_ec': 'es_EC.ISO8859-1', - 'es_es': 'es_ES.ISO8859-1', - 'es_gt': 'es_GT.ISO8859-1', - 'es_hn': 'es_HN.ISO8859-1', - 'es_mx': 'es_MX.ISO8859-1', - 'es_ni': 'es_NI.ISO8859-1', - 'es_pa': 'es_PA.ISO8859-1', - 'es_pe': 'es_PE.ISO8859-1', - 'es_pr': 'es_PR.ISO8859-1', - 'es_py': 'es_PY.ISO8859-1', - 'es_sv': 'es_SV.ISO8859-1', - 'es_us': 'es_US.ISO8859-1', - 'es_uy': 'es_UY.ISO8859-1', - 'es_ve': 'es_VE.ISO8859-1', - 'estonian': 'et_EE.ISO8859-1', - 'et': 'et_EE.ISO8859-15', - 'et_ee': 'et_EE.ISO8859-15', - 'eu': 'eu_ES.ISO8859-1', - 'eu_es': 'eu_ES.ISO8859-1', - 'eu_fr': 'eu_FR.ISO8859-1', - 'fa': 'fa_IR.UTF-8', - 'fa_ir': 'fa_IR.UTF-8', - 'fa_ir.isiri3342': 'fa_IR.ISIRI-3342', - 'ff_sn': 'ff_SN.UTF-8', - 'fi': 'fi_FI.ISO8859-15', - 'fi_fi': 'fi_FI.ISO8859-15', - 'fil_ph': 'fil_PH.UTF-8', - 'finnish': 'fi_FI.ISO8859-1', - 'fo': 'fo_FO.ISO8859-1', - 'fo_fo': 'fo_FO.ISO8859-1', - 'fr': 'fr_FR.ISO8859-1', - 'fr_be': 'fr_BE.ISO8859-1', - 'fr_ca': 'fr_CA.ISO8859-1', - 'fr_ch': 'fr_CH.ISO8859-1', - 'fr_fr': 'fr_FR.ISO8859-1', - 'fr_lu': 'fr_LU.ISO8859-1', - 'fran\xe7ais': 'fr_FR.ISO8859-1', - 'fre_fr': 'fr_FR.ISO8859-1', - 'french': 'fr_FR.ISO8859-1', - 'french.iso88591': 'fr_CH.ISO8859-1', - 'french_france': 'fr_FR.ISO8859-1', - 'fur_it': 'fur_IT.UTF-8', - 'fy_de': 'fy_DE.UTF-8', - 'fy_nl': 'fy_NL.UTF-8', - 'ga': 'ga_IE.ISO8859-1', - 'ga_ie': 'ga_IE.ISO8859-1', - 'galego': 'gl_ES.ISO8859-1', - 'galician': 'gl_ES.ISO8859-1', - 'gbm_in': 'gbm_IN.UTF-8', - 'gd': 'gd_GB.ISO8859-1', - 'gd_gb': 'gd_GB.ISO8859-1', - 'ger_de': 'de_DE.ISO8859-1', - 'german': 'de_DE.ISO8859-1', - 'german.iso88591': 'de_CH.ISO8859-1', - 'german_germany': 'de_DE.ISO8859-1', - 'gez_er': 'gez_ER.UTF-8', - 'gez_et': 'gez_ET.UTF-8', - 'gl': 'gl_ES.ISO8859-1', - 'gl_es': 'gl_ES.ISO8859-1', - 'greek': 'el_GR.ISO8859-7', - 'gu_in': 'gu_IN.UTF-8', - 'gv': 'gv_GB.ISO8859-1', - 'gv_gb': 'gv_GB.ISO8859-1', - 'ha_ng': 'ha_NG.UTF-8', - 'hak_tw': 'hak_TW.UTF-8', - 'he': 'he_IL.ISO8859-8', - 'he_il': 'he_IL.ISO8859-8', - 'hebrew': 'he_IL.ISO8859-8', - 'hi': 'hi_IN.ISCII-DEV', - 'hi_in': 'hi_IN.ISCII-DEV', - 'hi_in.isciidev': 'hi_IN.ISCII-DEV', - 'hif_fj': 'hif_FJ.UTF-8', - 'hne': 'hne_IN.UTF-8', - 'hne_in': 'hne_IN.UTF-8', - 'hr': 'hr_HR.ISO8859-2', - 'hr_hr': 'hr_HR.ISO8859-2', - 'hrvatski': 'hr_HR.ISO8859-2', - 'hsb_de': 'hsb_DE.ISO8859-2', - 'ht_ht': 'ht_HT.UTF-8', - 'hu': 'hu_HU.ISO8859-2', - 'hu_hu': 'hu_HU.ISO8859-2', - 'hungarian': 'hu_HU.ISO8859-2', - 'hy_am': 'hy_AM.UTF-8', - 'hy_am.armscii8': 'hy_AM.ARMSCII_8', - 'ia': 'ia.UTF-8', - 'ia_fr': 'ia_FR.UTF-8', - 'icelandic': 'is_IS.ISO8859-1', - 'id': 'id_ID.ISO8859-1', - 'id_id': 'id_ID.ISO8859-1', - 'ie': 'ie.UTF-8', - 'ig_ng': 'ig_NG.UTF-8', - 'ik_ca': 'ik_CA.UTF-8', - 'in': 'id_ID.ISO8859-1', - 'in_id': 'id_ID.ISO8859-1', - 'is': 'is_IS.ISO8859-1', - 'is_is': 'is_IS.ISO8859-1', - 'iso-8859-1': 'en_US.ISO8859-1', - 'iso-8859-15': 'en_US.ISO8859-15', - 'iso8859-1': 'en_US.ISO8859-1', - 'iso8859-15': 'en_US.ISO8859-15', - 'iso_8859_1': 'en_US.ISO8859-1', - 'iso_8859_15': 'en_US.ISO8859-15', - 'it': 'it_IT.ISO8859-1', - 'it_ch': 'it_CH.ISO8859-1', - 'it_it': 'it_IT.ISO8859-1', - 'italian': 'it_IT.ISO8859-1', - 'iu': 'iu_CA.NUNACOM-8', - 'iu_ca': 'iu_CA.NUNACOM-8', - 'iu_ca.nunacom8': 'iu_CA.NUNACOM-8', - 'iw': 'he_IL.ISO8859-8', - 'iw_il': 'he_IL.ISO8859-8', - 'iw_il.utf8': 'iw_IL.UTF-8', - 'ja': 'ja_JP.eucJP', - 'ja_jp': 'ja_JP.eucJP', - 'ja_jp.euc': 'ja_JP.eucJP', - 'ja_jp.mscode': 'ja_JP.SJIS', - 'ja_jp.pck': 'ja_JP.SJIS', - 'japan': 'ja_JP.eucJP', - 'japanese': 'ja_JP.eucJP', - 'japanese-euc': 'ja_JP.eucJP', - 'japanese.euc': 'ja_JP.eucJP', - 'jp_jp': 'ja_JP.eucJP', - 'ka': 'ka_GE.GEORGIAN-ACADEMY', - 'ka_ge': 'ka_GE.GEORGIAN-ACADEMY', - 'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY', - 'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS', - 'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY', - 'kab_dz': 'kab_DZ.UTF-8', - 'kk_kz': 'kk_KZ.ptcp154', - 'kl': 'kl_GL.ISO8859-1', - 'kl_gl': 'kl_GL.ISO8859-1', - 'km_kh': 'km_KH.UTF-8', - 'kn': 'kn_IN.UTF-8', - 'kn_in': 'kn_IN.UTF-8', - 'ko': 'ko_KR.eucKR', - 'ko_kr': 'ko_KR.eucKR', - 'ko_kr.euc': 'ko_KR.eucKR', - 'kok_in': 'kok_IN.UTF-8', - 'korean': 'ko_KR.eucKR', - 'korean.euc': 'ko_KR.eucKR', - 'ks': 'ks_IN.UTF-8', - 'ks_in': 'ks_IN.UTF-8', - 'ks_in@devanagari.utf8': 'ks_IN.UTF-8@devanagari', - 'ku_tr': 'ku_TR.ISO8859-9', - 'kv_ru': 'kv_RU.UTF-8', - 'kw': 'kw_GB.ISO8859-1', - 'kw_gb': 'kw_GB.ISO8859-1', - 'ky': 'ky_KG.UTF-8', - 'ky_kg': 'ky_KG.UTF-8', - 'lb_lu': 'lb_LU.UTF-8', - 'lg_ug': 'lg_UG.ISO8859-10', - 'li_be': 'li_BE.UTF-8', - 'li_nl': 'li_NL.UTF-8', - 'lij_it': 'lij_IT.UTF-8', - 'lithuanian': 'lt_LT.ISO8859-13', - 'ln_cd': 'ln_CD.UTF-8', - 'lo': 'lo_LA.MULELAO-1', - 'lo_la': 'lo_LA.MULELAO-1', - 'lo_la.cp1133': 'lo_LA.IBM-CP1133', - 'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133', - 'lo_la.mulelao1': 'lo_LA.MULELAO-1', - 'lt': 'lt_LT.ISO8859-13', - 'lt_lt': 'lt_LT.ISO8859-13', - 'ltg_lv.utf8': 'ltg_LV.UTF-8', - 'lv': 'lv_LV.ISO8859-13', - 'lv_lv': 'lv_LV.ISO8859-13', - 'lzh_tw': 'lzh_TW.UTF-8', - 'mag_in': 'mag_IN.UTF-8', - 'mai': 'mai_IN.UTF-8', - 'mai_in': 'mai_IN.UTF-8', - 'mai_np': 'mai_NP.UTF-8', - 'mdf_ru': 'mdf_RU.UTF-8', - 'mfe_mu': 'mfe_MU.UTF-8', - 'mg_mg': 'mg_MG.ISO8859-15', - 'mhr_ru': 'mhr_RU.UTF-8', - 'mi': 'mi_NZ.ISO8859-1', - 'mi_nz': 'mi_NZ.ISO8859-1', - 'miq_ni': 'miq_NI.UTF-8', - 'mjw_in': 'mjw_IN.UTF-8', - 'mk': 'mk_MK.ISO8859-5', - 'mk_mk': 'mk_MK.ISO8859-5', - 'ml': 'ml_IN.UTF-8', - 'ml_in': 'ml_IN.UTF-8', - 'mn_mn': 'mn_MN.UTF-8', - 'mni_in': 'mni_IN.UTF-8', - 'mnw_mm': 'mnw_MM.UTF-8', - 'mr': 'mr_IN.UTF-8', - 'mr_in': 'mr_IN.UTF-8', - 'ms': 'ms_MY.ISO8859-1', - 'ms_my': 'ms_MY.ISO8859-1', - 'mt': 'mt_MT.ISO8859-3', - 'mt_mt': 'mt_MT.ISO8859-3', - 'my_mm': 'my_MM.UTF-8', - 'nan_tw': 'nan_TW.UTF-8', - 'nb': 'nb_NO.ISO8859-1', - 'nb_no': 'nb_NO.ISO8859-1', - 'nds_de': 'nds_DE.UTF-8', - 'nds_nl': 'nds_NL.UTF-8', - 'ne_np': 'ne_NP.UTF-8', - 'nhn_mx': 'nhn_MX.UTF-8', - 'niu_nu': 'niu_NU.UTF-8', - 'niu_nz': 'niu_NZ.UTF-8', - 'nl': 'nl_NL.ISO8859-1', - 'nl_aw': 'nl_AW.UTF-8', - 'nl_be': 'nl_BE.ISO8859-1', - 'nl_nl': 'nl_NL.ISO8859-1', - 'nn': 'nn_NO.ISO8859-1', - 'nn_no': 'nn_NO.ISO8859-1', - 'no': 'no_NO.ISO8859-1', - 'no@nynorsk': 'ny_NO.ISO8859-1', - 'no_no': 'no_NO.ISO8859-1', - 'no_no.iso88591@bokmal': 'no_NO.ISO8859-1', - 'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1', - 'norwegian': 'no_NO.ISO8859-1', - 'nr': 'nr_ZA.ISO8859-1', - 'nr_za': 'nr_ZA.ISO8859-1', - 'nso': 'nso_ZA.ISO8859-15', - 'nso_za': 'nso_ZA.ISO8859-15', - 'ny': 'ny_NO.ISO8859-1', - 'ny_no': 'ny_NO.ISO8859-1', - 'nynorsk': 'nn_NO.ISO8859-1', - 'oc': 'oc_FR.ISO8859-1', - 'oc_fr': 'oc_FR.ISO8859-1', - 'om_et': 'om_ET.UTF-8', - 'om_ke': 'om_KE.ISO8859-1', - 'or': 'or_IN.UTF-8', - 'or_in': 'or_IN.UTF-8', - 'os_ru': 'os_RU.UTF-8', - 'pa': 'pa_IN.UTF-8', - 'pa_in': 'pa_IN.UTF-8', - 'pa_pk': 'pa_PK.UTF-8', - 'pap_an': 'pap_AN.UTF-8', - 'pap_aw': 'pap_AW.UTF-8', - 'pap_cw': 'pap_CW.UTF-8', - 'pd': 'pd_US.ISO8859-1', - 'pd_de': 'pd_DE.ISO8859-1', - 'pd_us': 'pd_US.ISO8859-1', - 'ph': 'ph_PH.ISO8859-1', - 'ph_ph': 'ph_PH.ISO8859-1', - 'pl': 'pl_PL.ISO8859-2', - 'pl_pl': 'pl_PL.ISO8859-2', - 'polish': 'pl_PL.ISO8859-2', - 'portuguese': 'pt_PT.ISO8859-1', - 'portuguese_brazil': 'pt_BR.ISO8859-1', - 'posix': 'C', - 'posix-utf2': 'C', - 'pp': 'pp_AN.ISO8859-1', - 'pp_an': 'pp_AN.ISO8859-1', - 'ps_af': 'ps_AF.UTF-8', - 'pt': 'pt_PT.ISO8859-1', - 'pt_br': 'pt_BR.ISO8859-1', - 'pt_pt': 'pt_PT.ISO8859-1', - 'quz_pe': 'quz_PE.UTF-8', - 'raj_in': 'raj_IN.UTF-8', - 'rif_ma': 'rif_MA.UTF-8', - 'ro': 'ro_RO.ISO8859-2', - 'ro_ro': 'ro_RO.ISO8859-2', - 'romanian': 'ro_RO.ISO8859-2', - 'ru': 'ru_RU.UTF-8', - 'ru_ru': 'ru_RU.UTF-8', - 'ru_ua': 'ru_UA.KOI8-U', - 'rumanian': 'ro_RO.ISO8859-2', - 'russian': 'ru_RU.ISO8859-5', - 'rw': 'rw_RW.ISO8859-1', - 'rw_rw': 'rw_RW.ISO8859-1', - 'sa_in': 'sa_IN.UTF-8', - 'sah_ru': 'sah_RU.UTF-8', - 'sat_in': 'sat_IN.UTF-8', - 'sc_it': 'sc_IT.UTF-8', - 'scn_it': 'scn_IT.UTF-8', - 'sd': 'sd_IN.UTF-8', - 'sd_in': 'sd_IN.UTF-8', - 'sd_in@devanagari.utf8': 'sd_IN.UTF-8@devanagari', - 'sd_pk': 'sd_PK.UTF-8', - 'se_no': 'se_NO.UTF-8', - 'serbocroatian': 'sr_RS.UTF-8@latin', - 'sgs_lt': 'sgs_LT.UTF-8', - 'sh': 'sr_RS.UTF-8@latin', - 'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2', - 'sh_hr': 'sh_HR.ISO8859-2', - 'sh_hr.iso88592': 'hr_HR.ISO8859-2', - 'sh_sp': 'sr_CS.ISO8859-2', - 'sh_yu': 'sr_RS.UTF-8@latin', - 'shn_mm': 'shn_MM.UTF-8', - 'shs_ca': 'shs_CA.UTF-8', - 'si': 'si_LK.UTF-8', - 'si_lk': 'si_LK.UTF-8', - 'sid_et': 'sid_ET.UTF-8', - 'sinhala': 'si_LK.UTF-8', - 'sk': 'sk_SK.ISO8859-2', - 'sk_sk': 'sk_SK.ISO8859-2', - 'sl': 'sl_SI.ISO8859-2', - 'sl_cs': 'sl_CS.ISO8859-2', - 'sl_si': 'sl_SI.ISO8859-2', - 'slovak': 'sk_SK.ISO8859-2', - 'slovene': 'sl_SI.ISO8859-2', - 'slovenian': 'sl_SI.ISO8859-2', - 'sm_ws': 'sm_WS.UTF-8', - 'so_dj': 'so_DJ.ISO8859-1', - 'so_et': 'so_ET.UTF-8', - 'so_ke': 'so_KE.ISO8859-1', - 'so_so': 'so_SO.ISO8859-1', - 'sp': 'sr_CS.ISO8859-5', - 'sp_yu': 'sr_CS.ISO8859-5', - 'spanish': 'es_ES.ISO8859-1', - 'spanish_spain': 'es_ES.ISO8859-1', - 'sq': 'sq_AL.ISO8859-2', - 'sq_al': 'sq_AL.ISO8859-2', - 'sq_mk': 'sq_MK.UTF-8', - 'sr': 'sr_RS.UTF-8', - 'sr@cyrillic': 'sr_RS.UTF-8', - 'sr@latn': 'sr_RS.UTF-8@latin', - 'sr_cs': 'sr_CS.UTF-8', - 'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2', - 'sr_cs@latn': 'sr_CS.UTF-8@latin', - 'sr_me': 'sr_ME.UTF-8', - 'sr_rs': 'sr_RS.UTF-8', - 'sr_rs@latn': 'sr_RS.UTF-8@latin', - 'sr_sp': 'sr_CS.ISO8859-2', - 'sr_yu': 'sr_RS.UTF-8@latin', - 'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251', - 'sr_yu.iso88592': 'sr_CS.ISO8859-2', - 'sr_yu.iso88595': 'sr_CS.ISO8859-5', - 'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5', - 'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251', - 'sr_yu.utf8': 'sr_RS.UTF-8', - 'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8', - 'sr_yu@cyrillic': 'sr_RS.UTF-8', - 'ss': 'ss_ZA.ISO8859-1', - 'ss_za': 'ss_ZA.ISO8859-1', - 'ssy_er': 'ssy_ER.UTF-8', - 'st': 'st_ZA.ISO8859-1', - 'st_za': 'st_ZA.ISO8859-1', - 'su_id': 'su_ID.UTF-8', - 'sv': 'sv_SE.ISO8859-1', - 'sv_fi': 'sv_FI.ISO8859-1', - 'sv_se': 'sv_SE.ISO8859-1', - 'sw_ke': 'sw_KE.UTF-8', - 'sw_tz': 'sw_TZ.UTF-8', - 'swedish': 'sv_SE.ISO8859-1', - 'syr': 'syr.UTF-8', - 'szl_pl': 'szl_PL.UTF-8', - 'ta': 'ta_IN.TSCII-0', - 'ta_in': 'ta_IN.TSCII-0', - 'ta_in.tscii': 'ta_IN.TSCII-0', - 'ta_in.tscii0': 'ta_IN.TSCII-0', - 'ta_lk': 'ta_LK.UTF-8', - 'tcy_in.utf8': 'tcy_IN.UTF-8', - 'te': 'te_IN.UTF-8', - 'te_in': 'te_IN.UTF-8', - 'tg': 'tg_TJ.KOI8-C', - 'tg_tj': 'tg_TJ.KOI8-C', - 'th': 'th_TH.ISO8859-11', - 'th_th': 'th_TH.ISO8859-11', - 'th_th.tactis': 'th_TH.TIS620', - 'th_th.tis620': 'th_TH.TIS620', - 'thai': 'th_TH.ISO8859-11', - 'the_np': 'the_NP.UTF-8', - 'ti_er': 'ti_ER.UTF-8', - 'ti_et': 'ti_ET.UTF-8', - 'tig_er': 'tig_ER.UTF-8', - 'tk_tm': 'tk_TM.UTF-8', - 'tl': 'tl_PH.ISO8859-1', - 'tl_ph': 'tl_PH.ISO8859-1', - 'tn': 'tn_ZA.ISO8859-15', - 'tn_za': 'tn_ZA.ISO8859-15', - 'to_to': 'to_TO.UTF-8', - 'tok': 'tok.UTF-8', - 'tpi_pg': 'tpi_PG.UTF-8', - 'tr': 'tr_TR.ISO8859-9', - 'tr_cy': 'tr_CY.ISO8859-9', - 'tr_tr': 'tr_TR.ISO8859-9', - 'ts': 'ts_ZA.ISO8859-1', - 'ts_za': 'ts_ZA.ISO8859-1', - 'tt': 'tt_RU.TATAR-CYR', - 'tt_ru': 'tt_RU.TATAR-CYR', - 'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR', - 'tt_ru@iqtelif': 'tt_RU.UTF-8@iqtelif', - 'turkish': 'tr_TR.ISO8859-9', - 'ug_cn': 'ug_CN.UTF-8', - 'uk': 'uk_UA.KOI8-U', - 'uk_ua': 'uk_UA.KOI8-U', - 'univ.utf8': 'en_US.UTF-8', - 'universal.utf8@ucs4': 'en_US.UTF-8', - 'unm_us': 'unm_US.UTF-8', - 'ur': 'ur_PK.CP1256', - 'ur_in': 'ur_IN.UTF-8', - 'ur_pk': 'ur_PK.CP1256', - 'uz': 'uz_UZ.UTF-8', - 'uz_uz': 'uz_UZ.UTF-8', - 'uz_uz@cyrillic': 'uz_UZ.UTF-8', - 've': 've_ZA.UTF-8', - 've_za': 've_ZA.UTF-8', - 'vi': 'vi_VN.TCVN', - 'vi_vn': 'vi_VN.TCVN', - 'vi_vn.tcvn': 'vi_VN.TCVN', - 'vi_vn.tcvn5712': 'vi_VN.TCVN', - 'vi_vn.viscii': 'vi_VN.VISCII', - 'vi_vn.viscii111': 'vi_VN.VISCII', - 'wa': 'wa_BE.ISO8859-1', - 'wa_be': 'wa_BE.ISO8859-1', - 'wae_ch': 'wae_CH.UTF-8', - 'wal_et': 'wal_ET.UTF-8', - 'wo_sn': 'wo_SN.UTF-8', - 'xh': 'xh_ZA.ISO8859-1', - 'xh_za': 'xh_ZA.ISO8859-1', - 'yi': 'yi_US.CP1255', - 'yi_us': 'yi_US.CP1255', - 'yo_ng': 'yo_NG.UTF-8', - 'yue_hk': 'yue_HK.UTF-8', - 'yuw_pg': 'yuw_PG.UTF-8', - 'zgh_ma': 'zgh_MA.UTF-8', - 'zh': 'zh_CN.eucCN', - 'zh_cn': 'zh_CN.gb2312', - 'zh_cn.big5': 'zh_TW.big5', - 'zh_cn.euc': 'zh_CN.eucCN', - 'zh_hk': 'zh_HK.big5hkscs', - 'zh_hk.big5hk': 'zh_HK.big5hkscs', - 'zh_sg': 'zh_SG.GB2312', - 'zh_sg.gbk': 'zh_SG.GBK', - 'zh_tw': 'zh_TW.big5', - 'zh_tw.euc': 'zh_TW.eucTW', - 'zh_tw.euctw': 'zh_TW.eucTW', - 'zu': 'zu_ZA.ISO8859-1', - 'zu_za': 'zu_ZA.ISO8859-1', -} - -# -# This maps Windows language identifiers to locale strings. -# -# This list has been updated from -# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp -# to include every locale up to Windows Vista. -# -# NOTE: this mapping is incomplete. If your language is missing, please -# submit a bug report as detailed in the Python devguide at: -# https://devguide.python.org/triage/issue-tracker/ -# Make sure you include the missing language identifier and the suggested -# locale code. -# - -windows_locale = { - 0x0436: "af_ZA", # Afrikaans - 0x041c: "sq_AL", # Albanian - 0x0484: "gsw_FR",# Alsatian - France - 0x045e: "am_ET", # Amharic - Ethiopia - 0x0401: "ar_SA", # Arabic - Saudi Arabia - 0x0801: "ar_IQ", # Arabic - Iraq - 0x0c01: "ar_EG", # Arabic - Egypt - 0x1001: "ar_LY", # Arabic - Libya - 0x1401: "ar_DZ", # Arabic - Algeria - 0x1801: "ar_MA", # Arabic - Morocco - 0x1c01: "ar_TN", # Arabic - Tunisia - 0x2001: "ar_OM", # Arabic - Oman - 0x2401: "ar_YE", # Arabic - Yemen - 0x2801: "ar_SY", # Arabic - Syria - 0x2c01: "ar_JO", # Arabic - Jordan - 0x3001: "ar_LB", # Arabic - Lebanon - 0x3401: "ar_KW", # Arabic - Kuwait - 0x3801: "ar_AE", # Arabic - United Arab Emirates - 0x3c01: "ar_BH", # Arabic - Bahrain - 0x4001: "ar_QA", # Arabic - Qatar - 0x042b: "hy_AM", # Armenian - 0x044d: "as_IN", # Assamese - India - 0x042c: "az_AZ", # Azeri - Latin - 0x082c: "az_AZ", # Azeri - Cyrillic - 0x046d: "ba_RU", # Bashkir - 0x042d: "eu_ES", # Basque - Russia - 0x0423: "be_BY", # Belarusian - 0x0445: "bn_IN", # Begali - 0x201a: "bs_BA", # Bosnian - Cyrillic - 0x141a: "bs_BA", # Bosnian - Latin - 0x047e: "br_FR", # Breton - France - 0x0402: "bg_BG", # Bulgarian -# 0x0455: "my_MM", # Burmese - Not supported - 0x0403: "ca_ES", # Catalan - 0x0004: "zh_CHS",# Chinese - Simplified - 0x0404: "zh_TW", # Chinese - Taiwan - 0x0804: "zh_CN", # Chinese - PRC - 0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R. - 0x1004: "zh_SG", # Chinese - Singapore - 0x1404: "zh_MO", # Chinese - Macao S.A.R. - 0x7c04: "zh_CHT",# Chinese - Traditional - 0x0483: "co_FR", # Corsican - France - 0x041a: "hr_HR", # Croatian - 0x101a: "hr_BA", # Croatian - Bosnia - 0x0405: "cs_CZ", # Czech - 0x0406: "da_DK", # Danish - 0x048c: "gbz_AF",# Dari - Afghanistan - 0x0465: "div_MV",# Divehi - Maldives - 0x0413: "nl_NL", # Dutch - The Netherlands - 0x0813: "nl_BE", # Dutch - Belgium - 0x0409: "en_US", # English - United States - 0x0809: "en_GB", # English - United Kingdom - 0x0c09: "en_AU", # English - Australia - 0x1009: "en_CA", # English - Canada - 0x1409: "en_NZ", # English - New Zealand - 0x1809: "en_IE", # English - Ireland - 0x1c09: "en_ZA", # English - South Africa - 0x2009: "en_JA", # English - Jamaica - 0x2409: "en_CB", # English - Caribbean - 0x2809: "en_BZ", # English - Belize - 0x2c09: "en_TT", # English - Trinidad - 0x3009: "en_ZW", # English - Zimbabwe - 0x3409: "en_PH", # English - Philippines - 0x4009: "en_IN", # English - India - 0x4409: "en_MY", # English - Malaysia - 0x4809: "en_IN", # English - Singapore - 0x0425: "et_EE", # Estonian - 0x0438: "fo_FO", # Faroese - 0x0464: "fil_PH",# Filipino - 0x040b: "fi_FI", # Finnish - 0x040c: "fr_FR", # French - France - 0x080c: "fr_BE", # French - Belgium - 0x0c0c: "fr_CA", # French - Canada - 0x100c: "fr_CH", # French - Switzerland - 0x140c: "fr_LU", # French - Luxembourg - 0x180c: "fr_MC", # French - Monaco - 0x0462: "fy_NL", # Frisian - Netherlands - 0x0456: "gl_ES", # Galician - 0x0437: "ka_GE", # Georgian - 0x0407: "de_DE", # German - Germany - 0x0807: "de_CH", # German - Switzerland - 0x0c07: "de_AT", # German - Austria - 0x1007: "de_LU", # German - Luxembourg - 0x1407: "de_LI", # German - Liechtenstein - 0x0408: "el_GR", # Greek - 0x046f: "kl_GL", # Greenlandic - Greenland - 0x0447: "gu_IN", # Gujarati - 0x0468: "ha_NG", # Hausa - Latin - 0x040d: "he_IL", # Hebrew - 0x0439: "hi_IN", # Hindi - 0x040e: "hu_HU", # Hungarian - 0x040f: "is_IS", # Icelandic - 0x0421: "id_ID", # Indonesian - 0x045d: "iu_CA", # Inuktitut - Syllabics - 0x085d: "iu_CA", # Inuktitut - Latin - 0x083c: "ga_IE", # Irish - Ireland - 0x0410: "it_IT", # Italian - Italy - 0x0810: "it_CH", # Italian - Switzerland - 0x0411: "ja_JP", # Japanese - 0x044b: "kn_IN", # Kannada - India - 0x043f: "kk_KZ", # Kazakh - 0x0453: "kh_KH", # Khmer - Cambodia - 0x0486: "qut_GT",# K'iche - Guatemala - 0x0487: "rw_RW", # Kinyarwanda - Rwanda - 0x0457: "kok_IN",# Konkani - 0x0412: "ko_KR", # Korean - 0x0440: "ky_KG", # Kyrgyz - 0x0454: "lo_LA", # Lao - Lao PDR - 0x0426: "lv_LV", # Latvian - 0x0427: "lt_LT", # Lithuanian - 0x082e: "dsb_DE",# Lower Sorbian - Germany - 0x046e: "lb_LU", # Luxembourgish - 0x042f: "mk_MK", # FYROM Macedonian - 0x043e: "ms_MY", # Malay - Malaysia - 0x083e: "ms_BN", # Malay - Brunei Darussalam - 0x044c: "ml_IN", # Malayalam - India - 0x043a: "mt_MT", # Maltese - 0x0481: "mi_NZ", # Maori - 0x047a: "arn_CL",# Mapudungun - 0x044e: "mr_IN", # Marathi - 0x047c: "moh_CA",# Mohawk - Canada - 0x0450: "mn_MN", # Mongolian - Cyrillic - 0x0850: "mn_CN", # Mongolian - PRC - 0x0461: "ne_NP", # Nepali - 0x0414: "nb_NO", # Norwegian - Bokmal - 0x0814: "nn_NO", # Norwegian - Nynorsk - 0x0482: "oc_FR", # Occitan - France - 0x0448: "or_IN", # Oriya - India - 0x0463: "ps_AF", # Pashto - Afghanistan - 0x0429: "fa_IR", # Persian - 0x0415: "pl_PL", # Polish - 0x0416: "pt_BR", # Portuguese - Brazil - 0x0816: "pt_PT", # Portuguese - Portugal - 0x0446: "pa_IN", # Punjabi - 0x046b: "quz_BO",# Quechua (Bolivia) - 0x086b: "quz_EC",# Quechua (Ecuador) - 0x0c6b: "quz_PE",# Quechua (Peru) - 0x0418: "ro_RO", # Romanian - Romania - 0x0417: "rm_CH", # Romansh - 0x0419: "ru_RU", # Russian - 0x243b: "smn_FI",# Sami Finland - 0x103b: "smj_NO",# Sami Norway - 0x143b: "smj_SE",# Sami Sweden - 0x043b: "se_NO", # Sami Northern Norway - 0x083b: "se_SE", # Sami Northern Sweden - 0x0c3b: "se_FI", # Sami Northern Finland - 0x203b: "sms_FI",# Sami Skolt - 0x183b: "sma_NO",# Sami Southern Norway - 0x1c3b: "sma_SE",# Sami Southern Sweden - 0x044f: "sa_IN", # Sanskrit - 0x0c1a: "sr_SP", # Serbian - Cyrillic - 0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic - 0x081a: "sr_SP", # Serbian - Latin - 0x181a: "sr_BA", # Serbian - Bosnia Latin - 0x045b: "si_LK", # Sinhala - Sri Lanka - 0x046c: "ns_ZA", # Northern Sotho - 0x0432: "tn_ZA", # Setswana - Southern Africa - 0x041b: "sk_SK", # Slovak - 0x0424: "sl_SI", # Slovenian - 0x040a: "es_ES", # Spanish - Spain - 0x080a: "es_MX", # Spanish - Mexico - 0x0c0a: "es_ES", # Spanish - Spain (Modern) - 0x100a: "es_GT", # Spanish - Guatemala - 0x140a: "es_CR", # Spanish - Costa Rica - 0x180a: "es_PA", # Spanish - Panama - 0x1c0a: "es_DO", # Spanish - Dominican Republic - 0x200a: "es_VE", # Spanish - Venezuela - 0x240a: "es_CO", # Spanish - Colombia - 0x280a: "es_PE", # Spanish - Peru - 0x2c0a: "es_AR", # Spanish - Argentina - 0x300a: "es_EC", # Spanish - Ecuador - 0x340a: "es_CL", # Spanish - Chile - 0x380a: "es_UR", # Spanish - Uruguay - 0x3c0a: "es_PY", # Spanish - Paraguay - 0x400a: "es_BO", # Spanish - Bolivia - 0x440a: "es_SV", # Spanish - El Salvador - 0x480a: "es_HN", # Spanish - Honduras - 0x4c0a: "es_NI", # Spanish - Nicaragua - 0x500a: "es_PR", # Spanish - Puerto Rico - 0x540a: "es_US", # Spanish - United States -# 0x0430: "", # Sutu - Not supported - 0x0441: "sw_KE", # Swahili - 0x041d: "sv_SE", # Swedish - Sweden - 0x081d: "sv_FI", # Swedish - Finland - 0x045a: "syr_SY",# Syriac - 0x0428: "tg_TJ", # Tajik - Cyrillic - 0x085f: "tmz_DZ",# Tamazight - Latin - 0x0449: "ta_IN", # Tamil - 0x0444: "tt_RU", # Tatar - 0x044a: "te_IN", # Telugu - 0x041e: "th_TH", # Thai - 0x0851: "bo_BT", # Tibetan - Bhutan - 0x0451: "bo_CN", # Tibetan - PRC - 0x041f: "tr_TR", # Turkish - 0x0442: "tk_TM", # Turkmen - Cyrillic - 0x0480: "ug_CN", # Uighur - Arabic - 0x0422: "uk_UA", # Ukrainian - 0x042e: "wen_DE",# Upper Sorbian - Germany - 0x0420: "ur_PK", # Urdu - 0x0820: "ur_IN", # Urdu - India - 0x0443: "uz_UZ", # Uzbek - Latin - 0x0843: "uz_UZ", # Uzbek - Cyrillic - 0x042a: "vi_VN", # Vietnamese - 0x0452: "cy_GB", # Welsh - 0x0488: "wo_SN", # Wolof - Senegal - 0x0434: "xh_ZA", # Xhosa - South Africa - 0x0485: "sah_RU",# Yakut - Cyrillic - 0x0478: "ii_CN", # Yi - PRC - 0x046a: "yo_NG", # Yoruba - Nigeria - 0x0435: "zu_ZA", # Zulu -} - -def _print_locale(): - - """ Test function. - """ - categories = {} - def _init_categories(categories=categories): - for k,v in globals().items(): - if k[:3] == 'LC_': - categories[k] = v - _init_categories() - del categories['LC_ALL'] - - print('Locale defaults as determined by getdefaultlocale():') - print('-'*72) - lang, enc = getdefaultlocale() - print('Language: ', lang or '(undefined)') - print('Encoding: ', enc or '(undefined)') - print() - - print('Locale settings on startup:') - print('-'*72) - for name,category in categories.items(): - print(name, '...') - lang, enc = getlocale(category) - print(' Language: ', lang or '(undefined)') - print(' Encoding: ', enc or '(undefined)') - print() - - try: - setlocale(LC_ALL, "") - except: - print('NOTE:') - print('setlocale(LC_ALL, "") does not support the default locale') - print('given in the OS environment variables.') - else: - print() - print('Locale settings after calling setlocale(LC_ALL, ""):') - print('-'*72) - for name,category in categories.items(): - print(name, '...') - lang, enc = getlocale(category) - print(' Language: ', lang or '(undefined)') - print(' Encoding: ', enc or '(undefined)') - print() - -### - -try: - LC_MESSAGES -except NameError: - pass -else: - __all__.append("LC_MESSAGES") - -if __name__=='__main__': - print('Locale aliasing:') - print() - _print_locale() - print() - print('Number formatting:') - print() - _test() diff --git a/Python313_13_x64_Template/Lib/logging/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/logging/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 77594651..00000000 Binary files a/Python313_13_x64_Template/Lib/logging/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/logging/__pycache__/config.cpython-313.pyc b/Python313_13_x64_Template/Lib/logging/__pycache__/config.cpython-313.pyc deleted file mode 100644 index 1c7cb798..00000000 Binary files a/Python313_13_x64_Template/Lib/logging/__pycache__/config.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/logging/__pycache__/handlers.cpython-313.pyc b/Python313_13_x64_Template/Lib/logging/__pycache__/handlers.cpython-313.pyc deleted file mode 100644 index 41bb9b44..00000000 Binary files a/Python313_13_x64_Template/Lib/logging/__pycache__/handlers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/logging/config.py b/Python313_13_x64_Template/Lib/logging/config.py deleted file mode 100644 index 190b4f92..00000000 --- a/Python313_13_x64_Template/Lib/logging/config.py +++ /dev/null @@ -1,1065 +0,0 @@ -# Copyright 2001-2023 by Vinay Sajip. All Rights Reserved. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose and without fee is hereby granted, -# provided that the above copyright notice appear in all copies and that -# both that copyright notice and this permission notice appear in -# supporting documentation, and that the name of Vinay Sajip -# not be used in advertising or publicity pertaining to distribution -# of the software without specific, written prior permission. -# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING -# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL -# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR -# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER -# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -""" -Configuration functions for the logging package for Python. The core package -is based on PEP 282 and comments thereto in comp.lang.python, and influenced -by Apache's log4j system. - -Copyright (C) 2001-2022 Vinay Sajip. All Rights Reserved. - -To use, simply 'import logging' and log away! -""" - -import errno -import functools -import io -import logging -import logging.handlers -import os -import queue -import re -import struct -import threading -import traceback - -from socketserver import ThreadingTCPServer, StreamRequestHandler - - -DEFAULT_LOGGING_CONFIG_PORT = 9030 - -RESET_ERROR = errno.ECONNRESET - -# -# The following code implements a socket listener for on-the-fly -# reconfiguration of logging. -# -# _listener holds the server object doing the listening -_listener = None - -def fileConfig(fname, defaults=None, disable_existing_loggers=True, encoding=None): - """ - Read the logging configuration from a ConfigParser-format file. - - This can be called several times from an application, allowing an end user - the ability to select from various pre-canned configurations (if the - developer provides a mechanism to present the choices and load the chosen - configuration). - """ - import configparser - - if isinstance(fname, str): - if not os.path.exists(fname): - raise FileNotFoundError(f"{fname} doesn't exist") - elif not os.path.getsize(fname): - raise RuntimeError(f'{fname} is an empty file') - - if isinstance(fname, configparser.RawConfigParser): - cp = fname - else: - try: - cp = configparser.ConfigParser(defaults) - if hasattr(fname, 'readline'): - cp.read_file(fname) - else: - encoding = io.text_encoding(encoding) - cp.read(fname, encoding=encoding) - except configparser.ParsingError as e: - raise RuntimeError(f'{fname} is invalid: {e}') - - formatters = _create_formatters(cp) - - # critical section - with logging._lock: - _clearExistingHandlers() - - # Handlers add themselves to logging._handlers - handlers = _install_handlers(cp, formatters) - _install_loggers(cp, handlers, disable_existing_loggers) - - -def _resolve(name): - """Resolve a dotted name to a global object.""" - name = name.split('.') - used = name.pop(0) - found = __import__(used) - for n in name: - used = used + '.' + n - try: - found = getattr(found, n) - except AttributeError: - __import__(used) - found = getattr(found, n) - return found - -def _strip_spaces(alist): - return map(str.strip, alist) - -def _create_formatters(cp): - """Create and return formatters""" - flist = cp["formatters"]["keys"] - if not len(flist): - return {} - flist = flist.split(",") - flist = _strip_spaces(flist) - formatters = {} - for form in flist: - sectname = "formatter_%s" % form - fs = cp.get(sectname, "format", raw=True, fallback=None) - dfs = cp.get(sectname, "datefmt", raw=True, fallback=None) - stl = cp.get(sectname, "style", raw=True, fallback='%') - defaults = cp.get(sectname, "defaults", raw=True, fallback=None) - - c = logging.Formatter - class_name = cp[sectname].get("class") - if class_name: - c = _resolve(class_name) - - if defaults is not None: - defaults = eval(defaults, vars(logging)) - f = c(fs, dfs, stl, defaults=defaults) - else: - f = c(fs, dfs, stl) - formatters[form] = f - return formatters - - -def _install_handlers(cp, formatters): - """Install and return handlers""" - hlist = cp["handlers"]["keys"] - if not len(hlist): - return {} - hlist = hlist.split(",") - hlist = _strip_spaces(hlist) - handlers = {} - fixups = [] #for inter-handler references - for hand in hlist: - section = cp["handler_%s" % hand] - klass = section["class"] - fmt = section.get("formatter", "") - try: - klass = eval(klass, vars(logging)) - except (AttributeError, NameError): - klass = _resolve(klass) - args = section.get("args", '()') - args = eval(args, vars(logging)) - kwargs = section.get("kwargs", '{}') - kwargs = eval(kwargs, vars(logging)) - h = klass(*args, **kwargs) - h.name = hand - if "level" in section: - level = section["level"] - h.setLevel(level) - if len(fmt): - h.setFormatter(formatters[fmt]) - if issubclass(klass, logging.handlers.MemoryHandler): - target = section.get("target", "") - if len(target): #the target handler may not be loaded yet, so keep for later... - fixups.append((h, target)) - handlers[hand] = h - #now all handlers are loaded, fixup inter-handler references... - for h, t in fixups: - h.setTarget(handlers[t]) - return handlers - -def _handle_existing_loggers(existing, child_loggers, disable_existing): - """ - When (re)configuring logging, handle loggers which were in the previous - configuration but are not in the new configuration. There's no point - deleting them as other threads may continue to hold references to them; - and by disabling them, you stop them doing any logging. - - However, don't disable children of named loggers, as that's probably not - what was intended by the user. Also, allow existing loggers to NOT be - disabled if disable_existing is false. - """ - root = logging.root - for log in existing: - logger = root.manager.loggerDict[log] - if log in child_loggers: - if not isinstance(logger, logging.PlaceHolder): - logger.setLevel(logging.NOTSET) - logger.handlers = [] - logger.propagate = True - else: - logger.disabled = disable_existing - -def _install_loggers(cp, handlers, disable_existing): - """Create and install loggers""" - - # configure the root first - llist = cp["loggers"]["keys"] - llist = llist.split(",") - llist = list(_strip_spaces(llist)) - llist.remove("root") - section = cp["logger_root"] - root = logging.root - log = root - if "level" in section: - level = section["level"] - log.setLevel(level) - for h in root.handlers[:]: - root.removeHandler(h) - hlist = section["handlers"] - if len(hlist): - hlist = hlist.split(",") - hlist = _strip_spaces(hlist) - for hand in hlist: - log.addHandler(handlers[hand]) - - #and now the others... - #we don't want to lose the existing loggers, - #since other threads may have pointers to them. - #existing is set to contain all existing loggers, - #and as we go through the new configuration we - #remove any which are configured. At the end, - #what's left in existing is the set of loggers - #which were in the previous configuration but - #which are not in the new configuration. - existing = list(root.manager.loggerDict.keys()) - #The list needs to be sorted so that we can - #avoid disabling child loggers of explicitly - #named loggers. With a sorted list it is easier - #to find the child loggers. - existing.sort() - #We'll keep the list of existing loggers - #which are children of named loggers here... - child_loggers = [] - #now set up the new ones... - for log in llist: - section = cp["logger_%s" % log] - qn = section["qualname"] - propagate = section.getint("propagate", fallback=1) - logger = logging.getLogger(qn) - if qn in existing: - i = existing.index(qn) + 1 # start with the entry after qn - prefixed = qn + "." - pflen = len(prefixed) - num_existing = len(existing) - while i < num_existing: - if existing[i][:pflen] == prefixed: - child_loggers.append(existing[i]) - i += 1 - existing.remove(qn) - if "level" in section: - level = section["level"] - logger.setLevel(level) - for h in logger.handlers[:]: - logger.removeHandler(h) - logger.propagate = propagate - logger.disabled = 0 - hlist = section["handlers"] - if len(hlist): - hlist = hlist.split(",") - hlist = _strip_spaces(hlist) - for hand in hlist: - logger.addHandler(handlers[hand]) - - #Disable any old loggers. There's no point deleting - #them as other threads may continue to hold references - #and by disabling them, you stop them doing any logging. - #However, don't disable children of named loggers, as that's - #probably not what was intended by the user. - #for log in existing: - # logger = root.manager.loggerDict[log] - # if log in child_loggers: - # logger.level = logging.NOTSET - # logger.handlers = [] - # logger.propagate = 1 - # elif disable_existing_loggers: - # logger.disabled = 1 - _handle_existing_loggers(existing, child_loggers, disable_existing) - - -def _clearExistingHandlers(): - """Clear and close existing handlers""" - logging._handlers.clear() - logging.shutdown(logging._handlerList[:]) - del logging._handlerList[:] - - -IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) - - -def valid_ident(s): - m = IDENTIFIER.match(s) - if not m: - raise ValueError('Not a valid Python identifier: %r' % s) - return True - - -class ConvertingMixin(object): - """For ConvertingXXX's, this mixin class provides common functions""" - - def convert_with_key(self, key, value, replace=True): - result = self.configurator.convert(value) - #If the converted value is different, save for next time - if value is not result: - if replace: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - def convert(self, value): - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - return result - - -# The ConvertingXXX classes are wrappers around standard Python containers, -# and they serve to convert any suitable values in the container. The -# conversion converts base dicts, lists and tuples to their wrapped -# equivalents, whereas strings which match a conversion format are converted -# appropriately. -# -# Each wrapper should have a configurator attribute holding the actual -# configurator to use for conversion. - -class ConvertingDict(dict, ConvertingMixin): - """A converting dictionary wrapper.""" - - def __getitem__(self, key): - value = dict.__getitem__(self, key) - return self.convert_with_key(key, value) - - def get(self, key, default=None): - value = dict.get(self, key, default) - return self.convert_with_key(key, value) - - def pop(self, key, default=None): - value = dict.pop(self, key, default) - return self.convert_with_key(key, value, replace=False) - -class ConvertingList(list, ConvertingMixin): - """A converting list wrapper.""" - def __getitem__(self, key): - value = list.__getitem__(self, key) - return self.convert_with_key(key, value) - - def pop(self, idx=-1): - value = list.pop(self, idx) - return self.convert(value) - -class ConvertingTuple(tuple, ConvertingMixin): - """A converting tuple wrapper.""" - def __getitem__(self, key): - value = tuple.__getitem__(self, key) - # Can't replace a tuple entry. - return self.convert_with_key(key, value, replace=False) - -class BaseConfigurator(object): - """ - The configurator base class which defines some useful defaults. - """ - - CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') - - WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') - DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') - INDEX_PATTERN = re.compile(r'^\[([^\[\]]*)\]\s*') - DIGIT_PATTERN = re.compile(r'^\d+$') - - value_converters = { - 'ext' : 'ext_convert', - 'cfg' : 'cfg_convert', - } - - # We might want to use a different one, e.g. importlib - importer = staticmethod(__import__) - - def __init__(self, config): - self.config = ConvertingDict(config) - self.config.configurator = self - - def resolve(self, s): - """ - Resolve strings to objects using standard import and attribute - syntax. - """ - name = s.split('.') - used = name.pop(0) - try: - found = self.importer(used) - for frag in name: - used += '.' + frag - try: - found = getattr(found, frag) - except AttributeError: - self.importer(used) - found = getattr(found, frag) - return found - except ImportError as e: - v = ValueError('Cannot resolve %r: %s' % (s, e)) - raise v from e - - def ext_convert(self, value): - """Default converter for the ext:// protocol.""" - return self.resolve(value) - - def cfg_convert(self, value): - """Default converter for the cfg:// protocol.""" - rest = value - m = self.WORD_PATTERN.match(rest) - if m is None: - raise ValueError("Unable to convert %r" % value) - else: - rest = rest[m.end():] - d = self.config[m.groups()[0]] - #print d, rest - while rest: - m = self.DOT_PATTERN.match(rest) - if m: - d = d[m.groups()[0]] - else: - m = self.INDEX_PATTERN.match(rest) - if m: - idx = m.groups()[0] - if not self.DIGIT_PATTERN.match(idx): - d = d[idx] - else: - try: - n = int(idx) # try as number first (most likely) - d = d[n] - except TypeError: - d = d[idx] - if m: - rest = rest[m.end():] - else: - raise ValueError('Unable to convert ' - '%r at %r' % (value, rest)) - #rest should be empty - return d - - def convert(self, value): - """ - Convert values to an appropriate type. dicts, lists and tuples are - replaced by their converting alternatives. Strings are checked to - see if they have a conversion format and are converted if they do. - """ - if not isinstance(value, ConvertingDict) and isinstance(value, dict): - value = ConvertingDict(value) - value.configurator = self - elif not isinstance(value, ConvertingList) and isinstance(value, list): - value = ConvertingList(value) - value.configurator = self - elif not isinstance(value, ConvertingTuple) and\ - isinstance(value, tuple) and not hasattr(value, '_fields'): - value = ConvertingTuple(value) - value.configurator = self - elif isinstance(value, str): # str for py3k - m = self.CONVERT_PATTERN.match(value) - if m: - d = m.groupdict() - prefix = d['prefix'] - converter = self.value_converters.get(prefix, None) - if converter: - suffix = d['suffix'] - converter = getattr(self, converter) - value = converter(suffix) - return value - - def configure_custom(self, config): - """Configure an object with a user-supplied factory.""" - c = config.pop('()') - if not callable(c): - c = self.resolve(c) - # Check for valid identifiers - kwargs = {k: config[k] for k in config if (k != '.' and valid_ident(k))} - result = c(**kwargs) - props = config.pop('.', None) - if props: - for name, value in props.items(): - setattr(result, name, value) - return result - - def as_tuple(self, value): - """Utility function which converts lists to tuples.""" - if isinstance(value, list): - value = tuple(value) - return value - -def _is_queue_like_object(obj): - """Check that *obj* implements the Queue API.""" - if isinstance(obj, (queue.Queue, queue.SimpleQueue)): - return True - # defer importing multiprocessing as much as possible - from multiprocessing.queues import Queue as MPQueue - if isinstance(obj, MPQueue): - return True - # Depending on the multiprocessing start context, we cannot create - # a multiprocessing.managers.BaseManager instance 'mm' to get the - # runtime type of mm.Queue() or mm.JoinableQueue() (see gh-119819). - # - # Since we only need an object implementing the Queue API, we only - # do a protocol check, but we do not use typing.runtime_checkable() - # and typing.Protocol to reduce import time (see gh-121723). - # - # Ideally, we would have wanted to simply use strict type checking - # instead of a protocol-based type checking since the latter does - # not check the method signatures. - # - # Note that only 'put_nowait' and 'get' are required by the logging - # queue handler and queue listener (see gh-124653) and that other - # methods are either optional or unused. - minimal_queue_interface = ['put_nowait', 'get'] - return all(callable(getattr(obj, method, None)) - for method in minimal_queue_interface) - -class DictConfigurator(BaseConfigurator): - """ - Configure logging using a dictionary-like object to describe the - configuration. - """ - - def configure(self): - """Do the configuration.""" - - config = self.config - if 'version' not in config: - raise ValueError("dictionary doesn't specify a version") - if config['version'] != 1: - raise ValueError("Unsupported version: %s" % config['version']) - incremental = config.pop('incremental', False) - EMPTY_DICT = {} - with logging._lock: - if incremental: - handlers = config.get('handlers', EMPTY_DICT) - for name in handlers: - if name not in logging._handlers: - raise ValueError('No handler found with ' - 'name %r' % name) - else: - try: - handler = logging._handlers[name] - handler_config = handlers[name] - level = handler_config.get('level', None) - if level: - handler.setLevel(logging._checkLevel(level)) - except Exception as e: - raise ValueError('Unable to configure handler ' - '%r' % name) from e - loggers = config.get('loggers', EMPTY_DICT) - for name in loggers: - try: - self.configure_logger(name, loggers[name], True) - except Exception as e: - raise ValueError('Unable to configure logger ' - '%r' % name) from e - root = config.get('root', None) - if root: - try: - self.configure_root(root, True) - except Exception as e: - raise ValueError('Unable to configure root ' - 'logger') from e - else: - disable_existing = config.pop('disable_existing_loggers', True) - - _clearExistingHandlers() - - # Do formatters first - they don't refer to anything else - formatters = config.get('formatters', EMPTY_DICT) - for name in formatters: - try: - formatters[name] = self.configure_formatter( - formatters[name]) - except Exception as e: - raise ValueError('Unable to configure ' - 'formatter %r' % name) from e - # Next, do filters - they don't refer to anything else, either - filters = config.get('filters', EMPTY_DICT) - for name in filters: - try: - filters[name] = self.configure_filter(filters[name]) - except Exception as e: - raise ValueError('Unable to configure ' - 'filter %r' % name) from e - - # Next, do handlers - they refer to formatters and filters - # As handlers can refer to other handlers, sort the keys - # to allow a deterministic order of configuration - handlers = config.get('handlers', EMPTY_DICT) - deferred = [] - for name in sorted(handlers): - try: - handler = self.configure_handler(handlers[name]) - handler.name = name - handlers[name] = handler - except Exception as e: - if ' not configured yet' in str(e.__cause__): - deferred.append(name) - else: - raise ValueError('Unable to configure handler ' - '%r' % name) from e - - # Now do any that were deferred - for name in deferred: - try: - handler = self.configure_handler(handlers[name]) - handler.name = name - handlers[name] = handler - except Exception as e: - raise ValueError('Unable to configure handler ' - '%r' % name) from e - - # Next, do loggers - they refer to handlers and filters - - #we don't want to lose the existing loggers, - #since other threads may have pointers to them. - #existing is set to contain all existing loggers, - #and as we go through the new configuration we - #remove any which are configured. At the end, - #what's left in existing is the set of loggers - #which were in the previous configuration but - #which are not in the new configuration. - root = logging.root - existing = list(root.manager.loggerDict.keys()) - #The list needs to be sorted so that we can - #avoid disabling child loggers of explicitly - #named loggers. With a sorted list it is easier - #to find the child loggers. - existing.sort() - #We'll keep the list of existing loggers - #which are children of named loggers here... - child_loggers = [] - #now set up the new ones... - loggers = config.get('loggers', EMPTY_DICT) - for name in loggers: - if name in existing: - i = existing.index(name) + 1 # look after name - prefixed = name + "." - pflen = len(prefixed) - num_existing = len(existing) - while i < num_existing: - if existing[i][:pflen] == prefixed: - child_loggers.append(existing[i]) - i += 1 - existing.remove(name) - try: - self.configure_logger(name, loggers[name]) - except Exception as e: - raise ValueError('Unable to configure logger ' - '%r' % name) from e - - #Disable any old loggers. There's no point deleting - #them as other threads may continue to hold references - #and by disabling them, you stop them doing any logging. - #However, don't disable children of named loggers, as that's - #probably not what was intended by the user. - #for log in existing: - # logger = root.manager.loggerDict[log] - # if log in child_loggers: - # logger.level = logging.NOTSET - # logger.handlers = [] - # logger.propagate = True - # elif disable_existing: - # logger.disabled = True - _handle_existing_loggers(existing, child_loggers, - disable_existing) - - # And finally, do the root logger - root = config.get('root', None) - if root: - try: - self.configure_root(root) - except Exception as e: - raise ValueError('Unable to configure root ' - 'logger') from e - - def configure_formatter(self, config): - """Configure a formatter from a dictionary.""" - if '()' in config: - factory = config['()'] # for use in exception handler - try: - result = self.configure_custom(config) - except TypeError as te: - if "'format'" not in str(te): - raise - # logging.Formatter and its subclasses expect the `fmt` - # parameter instead of `format`. Retry passing configuration - # with `fmt`. - config['fmt'] = config.pop('format') - config['()'] = factory - result = self.configure_custom(config) - else: - fmt = config.get('format', None) - dfmt = config.get('datefmt', None) - style = config.get('style', '%') - cname = config.get('class', None) - defaults = config.get('defaults', None) - - if not cname: - c = logging.Formatter - else: - c = _resolve(cname) - - kwargs = {} - - # Add defaults only if it exists. - # Prevents TypeError in custom formatter callables that do not - # accept it. - if defaults is not None: - kwargs['defaults'] = defaults - - # A TypeError would be raised if "validate" key is passed in with a formatter callable - # that does not accept "validate" as a parameter - if 'validate' in config: # if user hasn't mentioned it, the default will be fine - result = c(fmt, dfmt, style, config['validate'], **kwargs) - else: - result = c(fmt, dfmt, style, **kwargs) - - return result - - def configure_filter(self, config): - """Configure a filter from a dictionary.""" - if '()' in config: - result = self.configure_custom(config) - else: - name = config.get('name', '') - result = logging.Filter(name) - return result - - def add_filters(self, filterer, filters): - """Add filters to a filterer from a list of names.""" - for f in filters: - try: - if callable(f) or callable(getattr(f, 'filter', None)): - filter_ = f - else: - filter_ = self.config['filters'][f] - filterer.addFilter(filter_) - except Exception as e: - raise ValueError('Unable to add filter %r' % f) from e - - def _configure_queue_handler(self, klass, **kwargs): - if 'queue' in kwargs: - q = kwargs.pop('queue') - else: - q = queue.Queue() # unbounded - - rhl = kwargs.pop('respect_handler_level', False) - lklass = kwargs.pop('listener', logging.handlers.QueueListener) - handlers = kwargs.pop('handlers', []) - - listener = lklass(q, *handlers, respect_handler_level=rhl) - handler = klass(q, **kwargs) - handler.listener = listener - return handler - - def configure_handler(self, config): - """Configure a handler from a dictionary.""" - config_copy = dict(config) # for restoring in case of error - formatter = config.pop('formatter', None) - if formatter: - try: - formatter = self.config['formatters'][formatter] - except Exception as e: - raise ValueError('Unable to set formatter ' - '%r' % formatter) from e - level = config.pop('level', None) - filters = config.pop('filters', None) - if '()' in config: - c = config.pop('()') - if not callable(c): - c = self.resolve(c) - factory = c - else: - cname = config.pop('class') - if callable(cname): - klass = cname - else: - klass = self.resolve(cname) - if issubclass(klass, logging.handlers.MemoryHandler): - if 'flushLevel' in config: - config['flushLevel'] = logging._checkLevel(config['flushLevel']) - if 'target' in config: - # Special case for handler which refers to another handler - try: - tn = config['target'] - th = self.config['handlers'][tn] - if not isinstance(th, logging.Handler): - config.update(config_copy) # restore for deferred cfg - raise TypeError('target not configured yet') - config['target'] = th - except Exception as e: - raise ValueError('Unable to set target handler %r' % tn) from e - elif issubclass(klass, logging.handlers.QueueHandler): - # Another special case for handler which refers to other handlers - # if 'handlers' not in config: - # raise ValueError('No handlers specified for a QueueHandler') - if 'queue' in config: - qspec = config['queue'] - - if isinstance(qspec, str): - q = self.resolve(qspec) - if not callable(q): - raise TypeError('Invalid queue specifier %r' % qspec) - config['queue'] = q() - elif isinstance(qspec, dict): - if '()' not in qspec: - raise TypeError('Invalid queue specifier %r' % qspec) - config['queue'] = self.configure_custom(dict(qspec)) - elif not _is_queue_like_object(qspec): - raise TypeError('Invalid queue specifier %r' % qspec) - - if 'listener' in config: - lspec = config['listener'] - if isinstance(lspec, type): - if not issubclass(lspec, logging.handlers.QueueListener): - raise TypeError('Invalid listener specifier %r' % lspec) - else: - if isinstance(lspec, str): - listener = self.resolve(lspec) - if isinstance(listener, type) and\ - not issubclass(listener, logging.handlers.QueueListener): - raise TypeError('Invalid listener specifier %r' % lspec) - elif isinstance(lspec, dict): - if '()' not in lspec: - raise TypeError('Invalid listener specifier %r' % lspec) - listener = self.configure_custom(dict(lspec)) - else: - raise TypeError('Invalid listener specifier %r' % lspec) - if not callable(listener): - raise TypeError('Invalid listener specifier %r' % lspec) - config['listener'] = listener - if 'handlers' in config: - hlist = [] - try: - for hn in config['handlers']: - h = self.config['handlers'][hn] - if not isinstance(h, logging.Handler): - config.update(config_copy) # restore for deferred cfg - raise TypeError('Required handler %r ' - 'is not configured yet' % hn) - hlist.append(h) - except Exception as e: - raise ValueError('Unable to set required handler %r' % hn) from e - config['handlers'] = hlist - elif issubclass(klass, logging.handlers.SMTPHandler) and\ - 'mailhost' in config: - config['mailhost'] = self.as_tuple(config['mailhost']) - elif issubclass(klass, logging.handlers.SysLogHandler) and\ - 'address' in config: - config['address'] = self.as_tuple(config['address']) - if issubclass(klass, logging.handlers.QueueHandler): - factory = functools.partial(self._configure_queue_handler, klass) - else: - factory = klass - kwargs = {k: config[k] for k in config if (k != '.' and valid_ident(k))} - try: - result = factory(**kwargs) - except TypeError as te: - if "'stream'" not in str(te): - raise - #The argument name changed from strm to stream - #Retry with old name. - #This is so that code can be used with older Python versions - #(e.g. by Django) - kwargs['strm'] = kwargs.pop('stream') - result = factory(**kwargs) - if formatter: - result.setFormatter(formatter) - if level is not None: - result.setLevel(logging._checkLevel(level)) - if filters: - self.add_filters(result, filters) - props = config.pop('.', None) - if props: - for name, value in props.items(): - setattr(result, name, value) - return result - - def add_handlers(self, logger, handlers): - """Add handlers to a logger from a list of names.""" - for h in handlers: - try: - logger.addHandler(self.config['handlers'][h]) - except Exception as e: - raise ValueError('Unable to add handler %r' % h) from e - - def common_logger_config(self, logger, config, incremental=False): - """ - Perform configuration which is common to root and non-root loggers. - """ - level = config.get('level', None) - if level is not None: - logger.setLevel(logging._checkLevel(level)) - if not incremental: - #Remove any existing handlers - for h in logger.handlers[:]: - logger.removeHandler(h) - handlers = config.get('handlers', None) - if handlers: - self.add_handlers(logger, handlers) - filters = config.get('filters', None) - if filters: - self.add_filters(logger, filters) - - def configure_logger(self, name, config, incremental=False): - """Configure a non-root logger from a dictionary.""" - logger = logging.getLogger(name) - self.common_logger_config(logger, config, incremental) - logger.disabled = False - propagate = config.get('propagate', None) - if propagate is not None: - logger.propagate = propagate - - def configure_root(self, config, incremental=False): - """Configure a root logger from a dictionary.""" - root = logging.getLogger() - self.common_logger_config(root, config, incremental) - -dictConfigClass = DictConfigurator - -def dictConfig(config): - """Configure logging using a dictionary.""" - dictConfigClass(config).configure() - - -def listen(port=DEFAULT_LOGGING_CONFIG_PORT, verify=None): - """ - Start up a socket server on the specified port, and listen for new - configurations. - - These will be sent as a file suitable for processing by fileConfig(). - Returns a Thread object on which you can call start() to start the server, - and which you can join() when appropriate. To stop the server, call - stopListening(). - - Use the ``verify`` argument to verify any bytes received across the wire - from a client. If specified, it should be a callable which receives a - single argument - the bytes of configuration data received across the - network - and it should return either ``None``, to indicate that the - passed in bytes could not be verified and should be discarded, or a - byte string which is then passed to the configuration machinery as - normal. Note that you can return transformed bytes, e.g. by decrypting - the bytes passed in. - """ - - class ConfigStreamHandler(StreamRequestHandler): - """ - Handler for a logging configuration request. - - It expects a completely new logging configuration and uses fileConfig - to install it. - """ - def handle(self): - """ - Handle a request. - - Each request is expected to be a 4-byte length, packed using - struct.pack(">L", n), followed by the config file. - Uses fileConfig() to do the grunt work. - """ - try: - conn = self.connection - chunk = conn.recv(4) - if len(chunk) == 4: - slen = struct.unpack(">L", chunk)[0] - chunk = self.connection.recv(slen) - while len(chunk) < slen: - chunk = chunk + conn.recv(slen - len(chunk)) - if self.server.verify is not None: - chunk = self.server.verify(chunk) - if chunk is not None: # verified, can process - chunk = chunk.decode("utf-8") - try: - import json - d =json.loads(chunk) - assert isinstance(d, dict) - dictConfig(d) - except Exception: - #Apply new configuration. - - file = io.StringIO(chunk) - try: - fileConfig(file) - except Exception: - traceback.print_exc() - if self.server.ready: - self.server.ready.set() - except OSError as e: - if e.errno != RESET_ERROR: - raise - - class ConfigSocketReceiver(ThreadingTCPServer): - """ - A simple TCP socket-based logging config receiver. - """ - - allow_reuse_address = 1 - - def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT, - handler=None, ready=None, verify=None): - ThreadingTCPServer.__init__(self, (host, port), handler) - with logging._lock: - self.abort = 0 - self.timeout = 1 - self.ready = ready - self.verify = verify - - def serve_until_stopped(self): - import select - abort = 0 - while not abort: - rd, wr, ex = select.select([self.socket.fileno()], - [], [], - self.timeout) - if rd: - self.handle_request() - with logging._lock: - abort = self.abort - self.server_close() - - class Server(threading.Thread): - - def __init__(self, rcvr, hdlr, port, verify): - super(Server, self).__init__() - self.rcvr = rcvr - self.hdlr = hdlr - self.port = port - self.verify = verify - self.ready = threading.Event() - - def run(self): - server = self.rcvr(port=self.port, handler=self.hdlr, - ready=self.ready, - verify=self.verify) - if self.port == 0: - self.port = server.server_address[1] - self.ready.set() - global _listener - with logging._lock: - _listener = server - server.serve_until_stopped() - - return Server(ConfigSocketReceiver, ConfigStreamHandler, port, verify) - -def stopListening(): - """ - Stop the listening server which was created with a call to listen(). - """ - global _listener - with logging._lock: - if _listener: - _listener.abort = 1 - _listener = None diff --git a/Python313_13_x64_Template/Lib/logging/handlers.py b/Python313_13_x64_Template/Lib/logging/handlers.py deleted file mode 100644 index 480dbd08..00000000 --- a/Python313_13_x64_Template/Lib/logging/handlers.py +++ /dev/null @@ -1,1629 +0,0 @@ -# Copyright 2001-2021 by Vinay Sajip. All Rights Reserved. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose and without fee is hereby granted, -# provided that the above copyright notice appear in all copies and that -# both that copyright notice and this permission notice appear in -# supporting documentation, and that the name of Vinay Sajip -# not be used in advertising or publicity pertaining to distribution -# of the software without specific, written prior permission. -# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING -# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL -# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR -# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER -# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -""" -Additional handlers for the logging package for Python. The core package is -based on PEP 282 and comments thereto in comp.lang.python. - -Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved. - -To use, simply 'import logging.handlers' and log away! -""" - -import copy -import io -import logging -import os -import pickle -import queue -import re -import socket -import struct -import threading -import time - -# -# Some constants... -# - -DEFAULT_TCP_LOGGING_PORT = 9020 -DEFAULT_UDP_LOGGING_PORT = 9021 -DEFAULT_HTTP_LOGGING_PORT = 9022 -DEFAULT_SOAP_LOGGING_PORT = 9023 -SYSLOG_UDP_PORT = 514 -SYSLOG_TCP_PORT = 514 - -_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day - -class BaseRotatingHandler(logging.FileHandler): - """ - Base class for handlers that rotate log files at a certain point. - Not meant to be instantiated directly. Instead, use RotatingFileHandler - or TimedRotatingFileHandler. - """ - namer = None - rotator = None - - def __init__(self, filename, mode, encoding=None, delay=False, errors=None): - """ - Use the specified filename for streamed logging - """ - logging.FileHandler.__init__(self, filename, mode=mode, - encoding=encoding, delay=delay, - errors=errors) - self.mode = mode - self.encoding = encoding - self.errors = errors - - def emit(self, record): - """ - Emit a record. - - Output the record to the file, catering for rollover as described - in doRollover(). - """ - try: - if self.shouldRollover(record): - self.doRollover() - logging.FileHandler.emit(self, record) - except Exception: - self.handleError(record) - - def rotation_filename(self, default_name): - """ - Modify the filename of a log file when rotating. - - This is provided so that a custom filename can be provided. - - The default implementation calls the 'namer' attribute of the - handler, if it's callable, passing the default name to - it. If the attribute isn't callable (the default is None), the name - is returned unchanged. - - :param default_name: The default name for the log file. - """ - if not callable(self.namer): - result = default_name - else: - result = self.namer(default_name) - return result - - def rotate(self, source, dest): - """ - When rotating, rotate the current log. - - The default implementation calls the 'rotator' attribute of the - handler, if it's callable, passing the source and dest arguments to - it. If the attribute isn't callable (the default is None), the source - is simply renamed to the destination. - - :param source: The source filename. This is normally the base - filename, e.g. 'test.log' - :param dest: The destination filename. This is normally - what the source is rotated to, e.g. 'test.log.1'. - """ - if not callable(self.rotator): - # Issue 18940: A file may not have been created if delay is True. - if os.path.exists(source): - os.rename(source, dest) - else: - self.rotator(source, dest) - -class RotatingFileHandler(BaseRotatingHandler): - """ - Handler for logging to a set of files, which switches from one file - to the next when the current file reaches a certain size. - """ - def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, - encoding=None, delay=False, errors=None): - """ - Open the specified file and use it as the stream for logging. - - By default, the file grows indefinitely. You can specify particular - values of maxBytes and backupCount to allow the file to rollover at - a predetermined size. - - Rollover occurs whenever the current log file is nearly maxBytes in - length. If backupCount is >= 1, the system will successively create - new files with the same pathname as the base file, but with extensions - ".1", ".2" etc. appended to it. For example, with a backupCount of 5 - and a base file name of "app.log", you would get "app.log", - "app.log.1", "app.log.2", ... through to "app.log.5". The file being - written to is always "app.log" - when it gets filled up, it is closed - and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. - exist, then they are renamed to "app.log.2", "app.log.3" etc. - respectively. - - If maxBytes is zero, rollover never occurs. - """ - # If rotation/rollover is wanted, it doesn't make sense to use another - # mode. If for example 'w' were specified, then if there were multiple - # runs of the calling application, the logs from previous runs would be - # lost if the 'w' is respected, because the log file would be truncated - # on each run. - if maxBytes > 0: - mode = 'a' - if "b" not in mode: - encoding = io.text_encoding(encoding) - BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding, - delay=delay, errors=errors) - self.maxBytes = maxBytes - self.backupCount = backupCount - - def doRollover(self): - """ - Do a rollover, as described in __init__(). - """ - if self.stream: - self.stream.close() - self.stream = None - if self.backupCount > 0: - for i in range(self.backupCount - 1, 0, -1): - sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i)) - dfn = self.rotation_filename("%s.%d" % (self.baseFilename, - i + 1)) - if os.path.exists(sfn): - if os.path.exists(dfn): - os.remove(dfn) - os.rename(sfn, dfn) - dfn = self.rotation_filename(self.baseFilename + ".1") - if os.path.exists(dfn): - os.remove(dfn) - self.rotate(self.baseFilename, dfn) - if not self.delay: - self.stream = self._open() - - def shouldRollover(self, record): - """ - Determine if rollover should occur. - - Basically, see if the supplied record would cause the file to exceed - the size limit we have. - """ - if self.stream is None: # delay was set... - self.stream = self._open() - if self.maxBytes > 0: # are we rolling over? - try: - pos = self.stream.tell() - except io.UnsupportedOperation: - # gh-143237: Never rollover a named pipe. - return False - if not pos: - # gh-116263: Never rollover an empty file - return False - msg = "%s\n" % self.format(record) - if pos + len(msg) >= self.maxBytes: - # See bpo-45401: Never rollover anything other than regular files - if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename): - return False - return True - return False - -class TimedRotatingFileHandler(BaseRotatingHandler): - """ - Handler for logging to a file, rotating the log file at certain timed - intervals. - - If backupCount is > 0, when rollover is done, no more than backupCount - files are kept - the oldest ones are deleted. - """ - def __init__(self, filename, when='h', interval=1, backupCount=0, - encoding=None, delay=False, utc=False, atTime=None, - errors=None): - encoding = io.text_encoding(encoding) - BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding, - delay=delay, errors=errors) - self.when = when.upper() - self.backupCount = backupCount - self.utc = utc - self.atTime = atTime - # Calculate the real rollover interval, which is just the number of - # seconds between rollovers. Also set the filename suffix used when - # a rollover occurs. Current 'when' events supported: - # S - Seconds - # M - Minutes - # H - Hours - # D - Days - # midnight - roll over at midnight - # W{0-6} - roll over on a certain day; 0 - Monday - # - # Case of the 'when' specifier is not important; lower or upper case - # will work. - if self.when == 'S': - self.interval = 1 # one second - self.suffix = "%Y-%m-%d_%H-%M-%S" - extMatch = r"(? '6': - raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) - self.dayOfWeek = int(self.when[1]) - self.suffix = "%Y-%m-%d" - extMatch = r"(?= self.rolloverAt: - # See #89564: Never rollover anything other than regular files - if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename): - # The file is not a regular file, so do not rollover, but do - # set the next rollover time to avoid repeated checks. - self.rolloverAt = self.computeRollover(t) - return False - - return True - return False - - def getFilesToDelete(self): - """ - Determine the files to delete when rolling over. - - More specific than the earlier method, which just used glob.glob(). - """ - dirName, baseName = os.path.split(self.baseFilename) - fileNames = os.listdir(dirName) - result = [] - if self.namer is None: - prefix = baseName + '.' - plen = len(prefix) - for fileName in fileNames: - if fileName[:plen] == prefix: - suffix = fileName[plen:] - if self.extMatch.fullmatch(suffix): - result.append(os.path.join(dirName, fileName)) - else: - for fileName in fileNames: - # Our files could be just about anything after custom naming, - # but they should contain the datetime suffix. - # Try to find the datetime suffix in the file name and verify - # that the file name can be generated by this handler. - m = self.extMatch.search(fileName) - while m: - dfn = self.namer(self.baseFilename + "." + m[0]) - if os.path.basename(dfn) == fileName: - result.append(os.path.join(dirName, fileName)) - break - m = self.extMatch.search(fileName, m.start() + 1) - - if len(result) < self.backupCount: - result = [] - else: - result.sort() - result = result[:len(result) - self.backupCount] - return result - - def doRollover(self): - """ - do a rollover; in this case, a date/time stamp is appended to the filename - when the rollover happens. However, you want the file to be named for the - start of the interval, not the current time. If there is a backup count, - then we have to get a list of matching filenames, sort them and remove - the one with the oldest suffix. - """ - # get the time that this sequence started at and make it a TimeTuple - currentTime = int(time.time()) - t = self.rolloverAt - self.interval - if self.utc: - timeTuple = time.gmtime(t) - else: - timeTuple = time.localtime(t) - dstNow = time.localtime(currentTime)[-1] - dstThen = timeTuple[-1] - if dstNow != dstThen: - if dstNow: - addend = 3600 - else: - addend = -3600 - timeTuple = time.localtime(t + addend) - dfn = self.rotation_filename(self.baseFilename + "." + - time.strftime(self.suffix, timeTuple)) - if os.path.exists(dfn): - # Already rolled over. - return - - if self.stream: - self.stream.close() - self.stream = None - self.rotate(self.baseFilename, dfn) - if self.backupCount > 0: - for s in self.getFilesToDelete(): - os.remove(s) - if not self.delay: - self.stream = self._open() - self.rolloverAt = self.computeRollover(currentTime) - -class WatchedFileHandler(logging.FileHandler): - """ - A handler for logging to a file, which watches the file - to see if it has changed while in use. This can happen because of - usage of programs such as newsyslog and logrotate which perform - log file rotation. This handler, intended for use under Unix, - watches the file to see if it has changed since the last emit. - (A file has changed if its device or inode have changed.) - If it has changed, the old file stream is closed, and the file - opened to get a new stream. - - This handler is not appropriate for use under Windows, because - under Windows open files cannot be moved or renamed - logging - opens the files with exclusive locks - and so there is no need - for such a handler. - - This handler is based on a suggestion and patch by Chad J. - Schroeder. - """ - def __init__(self, filename, mode='a', encoding=None, delay=False, - errors=None): - if "b" not in mode: - encoding = io.text_encoding(encoding) - logging.FileHandler.__init__(self, filename, mode=mode, - encoding=encoding, delay=delay, - errors=errors) - self.dev, self.ino = -1, -1 - self._statstream() - - def _statstream(self): - if self.stream is None: - return - sres = os.fstat(self.stream.fileno()) - self.dev = sres.st_dev - self.ino = sres.st_ino - - def reopenIfNeeded(self): - """ - Reopen log file if needed. - - Checks if the underlying file has changed, and if it - has, close the old stream and reopen the file to get the - current stream. - """ - if self.stream is None: - return - - # Reduce the chance of race conditions by stat'ing by path only - # once and then fstat'ing our new fd if we opened a new log stream. - # See issue #14632: Thanks to John Mulligan for the problem report - # and patch. - try: - # stat the file by path, checking for existence - sres = os.stat(self.baseFilename) - - # compare file system stat with that of our stream file handle - reopen = (sres.st_dev != self.dev or sres.st_ino != self.ino) - except FileNotFoundError: - reopen = True - - if not reopen: - return - - # we have an open file handle, clean it up - self.stream.flush() - self.stream.close() - self.stream = None # See Issue #21742: _open () might fail. - - # open a new file handle and get new stat info from that fd - self.stream = self._open() - self._statstream() - - def emit(self, record): - """ - Emit a record. - - If underlying file has changed, reopen the file before emitting the - record to it. - """ - self.reopenIfNeeded() - logging.FileHandler.emit(self, record) - - -class SocketHandler(logging.Handler): - """ - A handler class which writes logging records, in pickle format, to - a streaming socket. The socket is kept open across logging calls. - If the peer resets it, an attempt is made to reconnect on the next call. - The pickle which is sent is that of the LogRecord's attribute dictionary - (__dict__), so that the receiver does not need to have the logging module - installed in order to process the logging event. - - To unpickle the record at the receiving end into a LogRecord, use the - makeLogRecord function. - """ - - def __init__(self, host, port): - """ - Initializes the handler with a specific host address and port. - - When the attribute *closeOnError* is set to True - if a socket error - occurs, the socket is silently closed and then reopened on the next - logging call. - """ - logging.Handler.__init__(self) - self.host = host - self.port = port - if port is None: - self.address = host - else: - self.address = (host, port) - self.sock = None - self.closeOnError = False - self.retryTime = None - # - # Exponential backoff parameters. - # - self.retryStart = 1.0 - self.retryMax = 30.0 - self.retryFactor = 2.0 - - def makeSocket(self, timeout=1): - """ - A factory method which allows subclasses to define the precise - type of socket they want. - """ - if self.port is not None: - result = socket.create_connection(self.address, timeout=timeout) - else: - result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - result.settimeout(timeout) - try: - result.connect(self.address) - except OSError: - result.close() # Issue 19182 - raise - return result - - def createSocket(self): - """ - Try to create a socket, using an exponential backoff with - a max retry time. Thanks to Robert Olson for the original patch - (SF #815911) which has been slightly refactored. - """ - now = time.time() - # Either retryTime is None, in which case this - # is the first time back after a disconnect, or - # we've waited long enough. - if self.retryTime is None: - attempt = True - else: - attempt = (now >= self.retryTime) - if attempt: - try: - self.sock = self.makeSocket() - self.retryTime = None # next time, no delay before trying - except OSError: - #Creation failed, so set the retry time and return. - if self.retryTime is None: - self.retryPeriod = self.retryStart - else: - self.retryPeriod = self.retryPeriod * self.retryFactor - if self.retryPeriod > self.retryMax: - self.retryPeriod = self.retryMax - self.retryTime = now + self.retryPeriod - - def send(self, s): - """ - Send a pickled string to the socket. - - This function allows for partial sends which can happen when the - network is busy. - """ - if self.sock is None: - self.createSocket() - #self.sock can be None either because we haven't reached the retry - #time yet, or because we have reached the retry time and retried, - #but are still unable to connect. - if self.sock: - try: - self.sock.sendall(s) - except OSError: #pragma: no cover - self.sock.close() - self.sock = None # so we can call createSocket next time - - def makePickle(self, record): - """ - Pickles the record in binary format with a length prefix, and - returns it ready for transmission across the socket. - """ - ei = record.exc_info - if ei: - # just to get traceback text into record.exc_text ... - dummy = self.format(record) - # See issue #14436: If msg or args are objects, they may not be - # available on the receiving end. So we convert the msg % args - # to a string, save it as msg and zap the args. - d = dict(record.__dict__) - d['msg'] = record.getMessage() - d['args'] = None - d['exc_info'] = None - # Issue #25685: delete 'message' if present: redundant with 'msg' - d.pop('message', None) - s = pickle.dumps(d, 1) - slen = struct.pack(">L", len(s)) - return slen + s - - def handleError(self, record): - """ - Handle an error during logging. - - An error has occurred during logging. Most likely cause - - connection lost. Close the socket so that we can retry on the - next event. - """ - if self.closeOnError and self.sock: - self.sock.close() - self.sock = None #try to reconnect next time - else: - logging.Handler.handleError(self, record) - - def emit(self, record): - """ - Emit a record. - - Pickles the record and writes it to the socket in binary format. - If there is an error with the socket, silently drop the packet. - If there was a problem with the socket, re-establishes the - socket. - """ - try: - s = self.makePickle(record) - self.send(s) - except Exception: - self.handleError(record) - - def close(self): - """ - Closes the socket. - """ - with self.lock: - sock = self.sock - if sock: - self.sock = None - sock.close() - logging.Handler.close(self) - -class DatagramHandler(SocketHandler): - """ - A handler class which writes logging records, in pickle format, to - a datagram socket. The pickle which is sent is that of the LogRecord's - attribute dictionary (__dict__), so that the receiver does not need to - have the logging module installed in order to process the logging event. - - To unpickle the record at the receiving end into a LogRecord, use the - makeLogRecord function. - - """ - def __init__(self, host, port): - """ - Initializes the handler with a specific host address and port. - """ - SocketHandler.__init__(self, host, port) - self.closeOnError = False - - def makeSocket(self): - """ - The factory method of SocketHandler is here overridden to create - a UDP socket (SOCK_DGRAM). - """ - if self.port is None: - family = socket.AF_UNIX - else: - family = socket.AF_INET - s = socket.socket(family, socket.SOCK_DGRAM) - return s - - def send(self, s): - """ - Send a pickled string to a socket. - - This function no longer allows for partial sends which can happen - when the network is busy - UDP does not guarantee delivery and - can deliver packets out of sequence. - """ - if self.sock is None: - self.createSocket() - self.sock.sendto(s, self.address) - -class SysLogHandler(logging.Handler): - """ - A handler class which sends formatted logging records to a syslog - server. Based on Sam Rushing's syslog module: - http://www.nightmare.com/squirl/python-ext/misc/syslog.py - Contributed by Nicolas Untz (after which minor refactoring changes - have been made). - """ - - # from : - # ====================================================================== - # priorities/facilities are encoded into a single 32-bit quantity, where - # the bottom 3 bits are the priority (0-7) and the top 28 bits are the - # facility (0-big number). Both the priorities and the facilities map - # roughly one-to-one to strings in the syslogd(8) source code. This - # mapping is included in this file. - # - # priorities (these are ordered) - - LOG_EMERG = 0 # system is unusable - LOG_ALERT = 1 # action must be taken immediately - LOG_CRIT = 2 # critical conditions - LOG_ERR = 3 # error conditions - LOG_WARNING = 4 # warning conditions - LOG_NOTICE = 5 # normal but significant condition - LOG_INFO = 6 # informational - LOG_DEBUG = 7 # debug-level messages - - # facility codes - LOG_KERN = 0 # kernel messages - LOG_USER = 1 # random user-level messages - LOG_MAIL = 2 # mail system - LOG_DAEMON = 3 # system daemons - LOG_AUTH = 4 # security/authorization messages - LOG_SYSLOG = 5 # messages generated internally by syslogd - LOG_LPR = 6 # line printer subsystem - LOG_NEWS = 7 # network news subsystem - LOG_UUCP = 8 # UUCP subsystem - LOG_CRON = 9 # clock daemon - LOG_AUTHPRIV = 10 # security/authorization messages (private) - LOG_FTP = 11 # FTP daemon - LOG_NTP = 12 # NTP subsystem - LOG_SECURITY = 13 # Log audit - LOG_CONSOLE = 14 # Log alert - LOG_SOLCRON = 15 # Scheduling daemon (Solaris) - - # other codes through 15 reserved for system use - LOG_LOCAL0 = 16 # reserved for local use - LOG_LOCAL1 = 17 # reserved for local use - LOG_LOCAL2 = 18 # reserved for local use - LOG_LOCAL3 = 19 # reserved for local use - LOG_LOCAL4 = 20 # reserved for local use - LOG_LOCAL5 = 21 # reserved for local use - LOG_LOCAL6 = 22 # reserved for local use - LOG_LOCAL7 = 23 # reserved for local use - - priority_names = { - "alert": LOG_ALERT, - "crit": LOG_CRIT, - "critical": LOG_CRIT, - "debug": LOG_DEBUG, - "emerg": LOG_EMERG, - "err": LOG_ERR, - "error": LOG_ERR, # DEPRECATED - "info": LOG_INFO, - "notice": LOG_NOTICE, - "panic": LOG_EMERG, # DEPRECATED - "warn": LOG_WARNING, # DEPRECATED - "warning": LOG_WARNING, - } - - facility_names = { - "auth": LOG_AUTH, - "authpriv": LOG_AUTHPRIV, - "console": LOG_CONSOLE, - "cron": LOG_CRON, - "daemon": LOG_DAEMON, - "ftp": LOG_FTP, - "kern": LOG_KERN, - "lpr": LOG_LPR, - "mail": LOG_MAIL, - "news": LOG_NEWS, - "ntp": LOG_NTP, - "security": LOG_SECURITY, - "solaris-cron": LOG_SOLCRON, - "syslog": LOG_SYSLOG, - "user": LOG_USER, - "uucp": LOG_UUCP, - "local0": LOG_LOCAL0, - "local1": LOG_LOCAL1, - "local2": LOG_LOCAL2, - "local3": LOG_LOCAL3, - "local4": LOG_LOCAL4, - "local5": LOG_LOCAL5, - "local6": LOG_LOCAL6, - "local7": LOG_LOCAL7, - } - - # Originally added to work around GH-43683. Unnecessary since GH-50043 but kept - # for backwards compatibility. - priority_map = { - "DEBUG" : "debug", - "INFO" : "info", - "WARNING" : "warning", - "ERROR" : "error", - "CRITICAL" : "critical" - } - - def __init__(self, address=('localhost', SYSLOG_UDP_PORT), - facility=LOG_USER, socktype=None): - """ - Initialize a handler. - - If address is specified as a string, a UNIX socket is used. To log to a - local syslogd, "SysLogHandler(address="/dev/log")" can be used. - If facility is not specified, LOG_USER is used. If socktype is - specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific - socket type will be used. For Unix sockets, you can also specify a - socktype of None, in which case socket.SOCK_DGRAM will be used, falling - back to socket.SOCK_STREAM. - """ - logging.Handler.__init__(self) - - self.address = address - self.facility = facility - self.socktype = socktype - self.socket = None - self.createSocket() - - def _connect_unixsocket(self, address): - use_socktype = self.socktype - if use_socktype is None: - use_socktype = socket.SOCK_DGRAM - self.socket = socket.socket(socket.AF_UNIX, use_socktype) - try: - self.socket.connect(address) - # it worked, so set self.socktype to the used type - self.socktype = use_socktype - except OSError: - self.socket.close() - if self.socktype is not None: - # user didn't specify falling back, so fail - raise - use_socktype = socket.SOCK_STREAM - self.socket = socket.socket(socket.AF_UNIX, use_socktype) - try: - self.socket.connect(address) - # it worked, so set self.socktype to the used type - self.socktype = use_socktype - except OSError: - self.socket.close() - raise - - def createSocket(self): - """ - Try to create a socket and, if it's not a datagram socket, connect it - to the other end. This method is called during handler initialization, - but it's not regarded as an error if the other end isn't listening yet - --- the method will be called again when emitting an event, - if there is no socket at that point. - """ - address = self.address - socktype = self.socktype - - if isinstance(address, str): - self.unixsocket = True - # Syslog server may be unavailable during handler initialisation. - # C's openlog() function also ignores connection errors. - # Moreover, we ignore these errors while logging, so it's not worse - # to ignore it also here. - try: - self._connect_unixsocket(address) - except OSError: - pass - else: - self.unixsocket = False - if socktype is None: - socktype = socket.SOCK_DGRAM - host, port = address - ress = socket.getaddrinfo(host, port, 0, socktype) - if not ress: - raise OSError("getaddrinfo returns an empty list") - for res in ress: - af, socktype, proto, _, sa = res - err = sock = None - try: - sock = socket.socket(af, socktype, proto) - if socktype == socket.SOCK_STREAM: - sock.connect(sa) - break - except OSError as exc: - err = exc - if sock is not None: - sock.close() - if err is not None: - raise err - self.socket = sock - self.socktype = socktype - - def encodePriority(self, facility, priority): - """ - Encode the facility and priority. You can pass in strings or - integers - if strings are passed, the facility_names and - priority_names mapping dictionaries are used to convert them to - integers. - """ - if isinstance(facility, str): - facility = self.facility_names[facility] - if isinstance(priority, str): - priority = self.priority_names[priority] - return (facility << 3) | priority - - def close(self): - """ - Closes the socket. - """ - with self.lock: - sock = self.socket - if sock: - self.socket = None - sock.close() - logging.Handler.close(self) - - def mapPriority(self, levelName): - """ - Map a logging level name to a key in the priority_names map. - This is useful in two scenarios: when custom levels are being - used, and in the case where you can't do a straightforward - mapping by lowercasing the logging level name because of locale- - specific issues (see SF #1524081). - """ - return self.priority_map.get(levelName, "warning") - - ident = '' # prepended to all messages - append_nul = True # some old syslog daemons expect a NUL terminator - - def emit(self, record): - """ - Emit a record. - - The record is formatted, and then sent to the syslog server. If - exception information is present, it is NOT sent to the server. - """ - try: - msg = self.format(record) - if self.ident: - msg = self.ident + msg - if self.append_nul: - msg += '\000' - - # We need to convert record level to lowercase, maybe this will - # change in the future. - prio = '<%d>' % self.encodePriority(self.facility, - self.mapPriority(record.levelname)) - prio = prio.encode('utf-8') - # Message is a string. Convert to bytes as required by RFC 5424 - msg = msg.encode('utf-8') - msg = prio + msg - - if not self.socket: - self.createSocket() - - if self.unixsocket: - try: - self.socket.send(msg) - except OSError: - self.socket.close() - self._connect_unixsocket(self.address) - self.socket.send(msg) - elif self.socktype == socket.SOCK_DGRAM: - self.socket.sendto(msg, self.address) - else: - self.socket.sendall(msg) - except Exception: - self.handleError(record) - -class SMTPHandler(logging.Handler): - """ - A handler class which sends an SMTP email for each logging event. - """ - def __init__(self, mailhost, fromaddr, toaddrs, subject, - credentials=None, secure=None, timeout=5.0): - """ - Initialize the handler. - - Initialize the instance with the from and to addresses and subject - line of the email. To specify a non-standard SMTP port, use the - (host, port) tuple format for the mailhost argument. To specify - authentication credentials, supply a (username, password) tuple - for the credentials argument. To specify the use of a secure - protocol (TLS), pass in a tuple for the secure argument. This will - only be used when authentication credentials are supplied. The tuple - will be either an empty tuple, or a single-value tuple with the name - of a keyfile, or a 2-value tuple with the names of the keyfile and - certificate file. (This tuple is passed to the - `ssl.SSLContext.load_cert_chain` method). - A timeout in seconds can be specified for the SMTP connection (the - default is one second). - """ - logging.Handler.__init__(self) - if isinstance(mailhost, (list, tuple)): - self.mailhost, self.mailport = mailhost - else: - self.mailhost, self.mailport = mailhost, None - if isinstance(credentials, (list, tuple)): - self.username, self.password = credentials - else: - self.username = None - self.fromaddr = fromaddr - if isinstance(toaddrs, str): - toaddrs = [toaddrs] - self.toaddrs = toaddrs - self.subject = subject - self.secure = secure - self.timeout = timeout - - def getSubject(self, record): - """ - Determine the subject for the email. - - If you want to specify a subject line which is record-dependent, - override this method. - """ - return self.subject - - def emit(self, record): - """ - Emit a record. - - Format the record and send it to the specified addressees. - """ - try: - import smtplib - from email.message import EmailMessage - import email.utils - - port = self.mailport - if not port: - port = smtplib.SMTP_PORT - smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout) - msg = EmailMessage() - msg['From'] = self.fromaddr - msg['To'] = ','.join(self.toaddrs) - msg['Subject'] = self.getSubject(record) - msg['Date'] = email.utils.localtime() - msg.set_content(self.format(record)) - if self.username: - if self.secure is not None: - import ssl - - try: - keyfile = self.secure[0] - except IndexError: - keyfile = None - - try: - certfile = self.secure[1] - except IndexError: - certfile = None - - context = ssl._create_stdlib_context( - certfile=certfile, keyfile=keyfile - ) - smtp.ehlo() - smtp.starttls(context=context) - smtp.ehlo() - smtp.login(self.username, self.password) - smtp.send_message(msg) - smtp.quit() - except Exception: - self.handleError(record) - -class NTEventLogHandler(logging.Handler): - """ - A handler class which sends events to the NT Event Log. Adds a - registry entry for the specified application name. If no dllname is - provided, win32service.pyd (which contains some basic message - placeholders) is used. Note that use of these placeholders will make - your event logs big, as the entire message source is held in the log. - If you want slimmer logs, you have to pass in the name of your own DLL - which contains the message definitions you want to use in the event log. - """ - def __init__(self, appname, dllname=None, logtype="Application"): - logging.Handler.__init__(self) - try: - import win32evtlogutil, win32evtlog - self.appname = appname - self._welu = win32evtlogutil - if not dllname: - dllname = os.path.split(self._welu.__file__) - dllname = os.path.split(dllname[0]) - dllname = os.path.join(dllname[0], r'win32service.pyd') - self.dllname = dllname - self.logtype = logtype - # Administrative privileges are required to add a source to the registry. - # This may not be available for a user that just wants to add to an - # existing source - handle this specific case. - try: - self._welu.AddSourceToRegistry(appname, dllname, logtype) - except Exception as e: - # This will probably be a pywintypes.error. Only raise if it's not - # an "access denied" error, else let it pass - if getattr(e, 'winerror', None) != 5: # not access denied - raise - self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE - self.typemap = { - logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, - logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, - logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, - logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, - logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, - } - except ImportError: - print("The Python Win32 extensions for NT (service, event "\ - "logging) appear not to be available.") - self._welu = None - - def getMessageID(self, record): - """ - Return the message ID for the event record. If you are using your - own messages, you could do this by having the msg passed to the - logger being an ID rather than a formatting string. Then, in here, - you could use a dictionary lookup to get the message ID. This - version returns 1, which is the base message ID in win32service.pyd. - """ - return 1 - - def getEventCategory(self, record): - """ - Return the event category for the record. - - Override this if you want to specify your own categories. This version - returns 0. - """ - return 0 - - def getEventType(self, record): - """ - Return the event type for the record. - - Override this if you want to specify your own types. This version does - a mapping using the handler's typemap attribute, which is set up in - __init__() to a dictionary which contains mappings for DEBUG, INFO, - WARNING, ERROR and CRITICAL. If you are using your own levels you will - either need to override this method or place a suitable dictionary in - the handler's typemap attribute. - """ - return self.typemap.get(record.levelno, self.deftype) - - def emit(self, record): - """ - Emit a record. - - Determine the message ID, event category and event type. Then - log the message in the NT event log. - """ - if self._welu: - try: - id = self.getMessageID(record) - cat = self.getEventCategory(record) - type = self.getEventType(record) - msg = self.format(record) - self._welu.ReportEvent(self.appname, id, cat, type, [msg]) - except Exception: - self.handleError(record) - - def close(self): - """ - Clean up this handler. - - You can remove the application name from the registry as a - source of event log entries. However, if you do this, you will - not be able to see the events as you intended in the Event Log - Viewer - it needs to be able to access the registry to get the - DLL name. - """ - #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) - logging.Handler.close(self) - -class HTTPHandler(logging.Handler): - """ - A class which sends records to a web server, using either GET or - POST semantics. - """ - def __init__(self, host, url, method="GET", secure=False, credentials=None, - context=None): - """ - Initialize the instance with the host, the request URL, and the method - ("GET" or "POST") - """ - logging.Handler.__init__(self) - method = method.upper() - if method not in ["GET", "POST"]: - raise ValueError("method must be GET or POST") - if not secure and context is not None: - raise ValueError("context parameter only makes sense " - "with secure=True") - self.host = host - self.url = url - self.method = method - self.secure = secure - self.credentials = credentials - self.context = context - - def mapLogRecord(self, record): - """ - Default implementation of mapping the log record into a dict - that is sent as the CGI data. Overwrite in your class. - Contributed by Franz Glasner. - """ - return record.__dict__ - - def getConnection(self, host, secure): - """ - get a HTTP[S]Connection. - - Override when a custom connection is required, for example if - there is a proxy. - """ - import http.client - if secure: - connection = http.client.HTTPSConnection(host, context=self.context) - else: - connection = http.client.HTTPConnection(host) - return connection - - def emit(self, record): - """ - Emit a record. - - Send the record to the web server as a percent-encoded dictionary - """ - try: - import urllib.parse - host = self.host - h = self.getConnection(host, self.secure) - url = self.url - data = urllib.parse.urlencode(self.mapLogRecord(record)) - if self.method == "GET": - if (url.find('?') >= 0): - sep = '&' - else: - sep = '?' - url = url + "%c%s" % (sep, data) - h.putrequest(self.method, url) - # support multiple hosts on one IP address... - # need to strip optional :port from host, if present - i = host.find(":") - if i >= 0: - host = host[:i] - # See issue #30904: putrequest call above already adds this header - # on Python 3.x. - # h.putheader("Host", host) - if self.method == "POST": - h.putheader("Content-type", - "application/x-www-form-urlencoded") - h.putheader("Content-length", str(len(data))) - if self.credentials: - import base64 - s = ('%s:%s' % self.credentials).encode('utf-8') - s = 'Basic ' + base64.b64encode(s).strip().decode('ascii') - h.putheader('Authorization', s) - h.endheaders() - if self.method == "POST": - h.send(data.encode('utf-8')) - h.getresponse() #can't do anything with the result - except Exception: - self.handleError(record) - -class BufferingHandler(logging.Handler): - """ - A handler class which buffers logging records in memory. Whenever each - record is added to the buffer, a check is made to see if the buffer should - be flushed. If it should, then flush() is expected to do what's needed. - """ - def __init__(self, capacity): - """ - Initialize the handler with the buffer size. - """ - logging.Handler.__init__(self) - self.capacity = capacity - self.buffer = [] - - def shouldFlush(self, record): - """ - Should the handler flush its buffer? - - Returns true if the buffer is up to capacity. This method can be - overridden to implement custom flushing strategies. - """ - return (len(self.buffer) >= self.capacity) - - def emit(self, record): - """ - Emit a record. - - Append the record. If shouldFlush() tells us to, call flush() to process - the buffer. - """ - self.buffer.append(record) - if self.shouldFlush(record): - self.flush() - - def flush(self): - """ - Override to implement custom flushing behaviour. - - This version just zaps the buffer to empty. - """ - with self.lock: - self.buffer.clear() - - def close(self): - """ - Close the handler. - - This version just flushes and chains to the parent class' close(). - """ - try: - self.flush() - finally: - logging.Handler.close(self) - -class MemoryHandler(BufferingHandler): - """ - A handler class which buffers logging records in memory, periodically - flushing them to a target handler. Flushing occurs whenever the buffer - is full, or when an event of a certain severity or greater is seen. - """ - def __init__(self, capacity, flushLevel=logging.ERROR, target=None, - flushOnClose=True): - """ - Initialize the handler with the buffer size, the level at which - flushing should occur and an optional target. - - Note that without a target being set either here or via setTarget(), - a MemoryHandler is no use to anyone! - - The ``flushOnClose`` argument is ``True`` for backward compatibility - reasons - the old behaviour is that when the handler is closed, the - buffer is flushed, even if the flush level hasn't been exceeded nor the - capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``. - """ - BufferingHandler.__init__(self, capacity) - self.flushLevel = flushLevel - self.target = target - # See Issue #26559 for why this has been added - self.flushOnClose = flushOnClose - - def shouldFlush(self, record): - """ - Check for buffer full or a record at the flushLevel or higher. - """ - return (len(self.buffer) >= self.capacity) or \ - (record.levelno >= self.flushLevel) - - def setTarget(self, target): - """ - Set the target handler for this handler. - """ - with self.lock: - self.target = target - - def flush(self): - """ - For a MemoryHandler, flushing means just sending the buffered - records to the target, if there is one. Override if you want - different behaviour. - - The record buffer is only cleared if a target has been set. - """ - with self.lock: - if self.target: - for record in self.buffer: - self.target.handle(record) - self.buffer.clear() - - def close(self): - """ - Flush, if appropriately configured, set the target to None and lose the - buffer. - """ - try: - if self.flushOnClose: - self.flush() - finally: - with self.lock: - self.target = None - BufferingHandler.close(self) - - -class QueueHandler(logging.Handler): - """ - This handler sends events to a queue. Typically, it would be used together - with a multiprocessing Queue to centralise logging to file in one process - (in a multi-process application), so as to avoid file write contention - between processes. - - This code is new in Python 3.2, but this class can be copy pasted into - user code for use with earlier Python versions. - """ - - def __init__(self, queue): - """ - Initialise an instance, using the passed queue. - """ - logging.Handler.__init__(self) - self.queue = queue - self.listener = None # will be set to listener if configured via dictConfig() - - def enqueue(self, record): - """ - Enqueue a record. - - The base implementation uses put_nowait. You may want to override - this method if you want to use blocking, timeouts or custom queue - implementations. - """ - self.queue.put_nowait(record) - - def prepare(self, record): - """ - Prepare a record for queuing. The object returned by this method is - enqueued. - - The base implementation formats the record to merge the message and - arguments, and removes unpickleable items from the record in-place. - Specifically, it overwrites the record's `msg` and - `message` attributes with the merged message (obtained by - calling the handler's `format` method), and sets the `args`, - `exc_info` and `exc_text` attributes to None. - - You might want to override this method if you want to convert - the record to a dict or JSON string, or send a modified copy - of the record while leaving the original intact. - """ - # The format operation gets traceback text into record.exc_text - # (if there's exception data), and also returns the formatted - # message. We can then use this to replace the original - # msg + args, as these might be unpickleable. We also zap the - # exc_info, exc_text and stack_info attributes, as they are no longer - # needed and, if not None, will typically not be pickleable. - msg = self.format(record) - # bpo-35726: make copy of record to avoid affecting other handlers in the chain. - record = copy.copy(record) - record.message = msg - record.msg = msg - record.args = None - record.exc_info = None - record.exc_text = None - record.stack_info = None - return record - - def emit(self, record): - """ - Emit a record. - - Writes the LogRecord to the queue, preparing it for pickling first. - """ - try: - self.enqueue(self.prepare(record)) - except Exception: - self.handleError(record) - - -class QueueListener(object): - """ - This class implements an internal threaded listener which watches for - LogRecords being added to a queue, removes them and passes them to a - list of handlers for processing. - """ - _sentinel = None - - def __init__(self, queue, *handlers, respect_handler_level=False): - """ - Initialise an instance with the specified queue and - handlers. - """ - self.queue = queue - self.handlers = handlers - self._thread = None - self.respect_handler_level = respect_handler_level - - def dequeue(self, block): - """ - Dequeue a record and return it, optionally blocking. - - The base implementation uses get. You may want to override this method - if you want to use timeouts or work with custom queue implementations. - """ - return self.queue.get(block) - - def start(self): - """ - Start the listener. - - This starts up a background thread to monitor the queue for - LogRecords to process. - """ - if self._thread is not None: - raise RuntimeError("Listener already started") - - self._thread = t = threading.Thread(target=self._monitor) - t.daemon = True - t.start() - - def prepare(self, record): - """ - Prepare a record for handling. - - This method just returns the passed-in record. You may want to - override this method if you need to do any custom marshalling or - manipulation of the record before passing it to the handlers. - """ - return record - - def handle(self, record): - """ - Handle a record. - - This just loops through the handlers offering them the record - to handle. - """ - record = self.prepare(record) - for handler in self.handlers: - if not self.respect_handler_level: - process = True - else: - process = record.levelno >= handler.level - if process: - handler.handle(record) - - def _monitor(self): - """ - Monitor the queue for records, and ask the handler - to deal with them. - - This method runs on a separate, internal thread. - The thread will terminate if it sees a sentinel object in the queue. - """ - q = self.queue - has_task_done = hasattr(q, 'task_done') - while True: - try: - record = self.dequeue(True) - if record is self._sentinel: - if has_task_done: - q.task_done() - break - self.handle(record) - if has_task_done: - q.task_done() - except queue.Empty: - break - - def enqueue_sentinel(self): - """ - This is used to enqueue the sentinel record. - - The base implementation uses put_nowait. You may want to override this - method if you want to use timeouts or work with custom queue - implementations. - """ - self.queue.put_nowait(self._sentinel) - - def stop(self): - """ - Stop the listener. - - This asks the thread to terminate, and then waits for it to do so. - Note that if you don't call this before your application exits, there - may be some records still left on the queue, which won't be processed. - """ - if self._thread: # see gh-114706 - allow calling this more than once - self.enqueue_sentinel() - self._thread.join() - self._thread = None diff --git a/Python313_13_x64_Template/Lib/lzma.py b/Python313_13_x64_Template/Lib/lzma.py deleted file mode 100644 index c1e3d33d..00000000 --- a/Python313_13_x64_Template/Lib/lzma.py +++ /dev/null @@ -1,364 +0,0 @@ -"""Interface to the liblzma compression library. - -This module provides a class for reading and writing compressed files, -classes for incremental (de)compression, and convenience functions for -one-shot (de)compression. - -These classes and functions support both the XZ and legacy LZMA -container formats, as well as raw compressed data streams. -""" - -__all__ = [ - "CHECK_NONE", "CHECK_CRC32", "CHECK_CRC64", "CHECK_SHA256", - "CHECK_ID_MAX", "CHECK_UNKNOWN", - "FILTER_LZMA1", "FILTER_LZMA2", "FILTER_DELTA", "FILTER_X86", "FILTER_IA64", - "FILTER_ARM", "FILTER_ARMTHUMB", "FILTER_POWERPC", "FILTER_SPARC", - "FORMAT_AUTO", "FORMAT_XZ", "FORMAT_ALONE", "FORMAT_RAW", - "MF_HC3", "MF_HC4", "MF_BT2", "MF_BT3", "MF_BT4", - "MODE_FAST", "MODE_NORMAL", "PRESET_DEFAULT", "PRESET_EXTREME", - - "LZMACompressor", "LZMADecompressor", "LZMAFile", "LZMAError", - "open", "compress", "decompress", "is_check_supported", -] - -import builtins -import io -import os -from _lzma import * -from _lzma import _encode_filter_properties, _decode_filter_properties -import _compression - - -# Value 0 no longer used -_MODE_READ = 1 -# Value 2 no longer used -_MODE_WRITE = 3 - - -class LZMAFile(_compression.BaseStream): - - """A file object providing transparent LZMA (de)compression. - - An LZMAFile can act as a wrapper for an existing file object, or - refer directly to a named file on disk. - - Note that LZMAFile provides a *binary* file interface - data read - is returned as bytes, and data to be written must be given as bytes. - """ - - def __init__(self, filename=None, mode="r", *, - format=None, check=-1, preset=None, filters=None): - """Open an LZMA-compressed file in binary mode. - - filename can be either an actual file name (given as a str, - bytes, or PathLike object), in which case the named file is - opened, or it can be an existing file object to read from or - write to. - - mode can be "r" for reading (default), "w" for (over)writing, - "x" for creating exclusively, or "a" for appending. These can - equivalently be given as "rb", "wb", "xb" and "ab" respectively. - - format specifies the container format to use for the file. - If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the - default is FORMAT_XZ. - - check specifies the integrity check to use. This argument can - only be used when opening a file for writing. For FORMAT_XZ, - the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not - support integrity checks - for these formats, check must be - omitted, or be CHECK_NONE. - - When opening a file for reading, the *preset* argument is not - meaningful, and should be omitted. The *filters* argument should - also be omitted, except when format is FORMAT_RAW (in which case - it is required). - - When opening a file for writing, the settings used by the - compressor can be specified either as a preset compression - level (with the *preset* argument), or in detail as a custom - filter chain (with the *filters* argument). For FORMAT_XZ and - FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset - level. For FORMAT_RAW, the caller must always specify a filter - chain; the raw compressor does not support preset compression - levels. - - preset (if provided) should be an integer in the range 0-9, - optionally OR-ed with the constant PRESET_EXTREME. - - filters (if provided) should be a sequence of dicts. Each dict - should have an entry for "id" indicating ID of the filter, plus - additional entries for options to the filter. - """ - self._fp = None - self._closefp = False - self._mode = None - - if mode in ("r", "rb"): - if check != -1: - raise ValueError("Cannot specify an integrity check " - "when opening a file for reading") - if preset is not None: - raise ValueError("Cannot specify a preset compression " - "level when opening a file for reading") - if format is None: - format = FORMAT_AUTO - mode_code = _MODE_READ - elif mode in ("w", "wb", "a", "ab", "x", "xb"): - if format is None: - format = FORMAT_XZ - mode_code = _MODE_WRITE - self._compressor = LZMACompressor(format=format, check=check, - preset=preset, filters=filters) - self._pos = 0 - else: - raise ValueError("Invalid mode: {!r}".format(mode)) - - if isinstance(filename, (str, bytes, os.PathLike)): - if "b" not in mode: - mode += "b" - self._fp = builtins.open(filename, mode) - self._closefp = True - self._mode = mode_code - elif hasattr(filename, "read") or hasattr(filename, "write"): - self._fp = filename - self._mode = mode_code - else: - raise TypeError("filename must be a str, bytes, file or PathLike object") - - if self._mode == _MODE_READ: - raw = _compression.DecompressReader(self._fp, LZMADecompressor, - trailing_error=LZMAError, format=format, filters=filters) - self._buffer = io.BufferedReader(raw) - - def close(self): - """Flush and close the file. - - May be called more than once without error. Once the file is - closed, any other operation on it will raise a ValueError. - """ - if self.closed: - return - try: - if self._mode == _MODE_READ: - self._buffer.close() - self._buffer = None - elif self._mode == _MODE_WRITE: - self._fp.write(self._compressor.flush()) - self._compressor = None - finally: - try: - if self._closefp: - self._fp.close() - finally: - self._fp = None - self._closefp = False - - @property - def closed(self): - """True if this file is closed.""" - return self._fp is None - - @property - def name(self): - self._check_not_closed() - return self._fp.name - - @property - def mode(self): - return 'wb' if self._mode == _MODE_WRITE else 'rb' - - def fileno(self): - """Return the file descriptor for the underlying file.""" - self._check_not_closed() - return self._fp.fileno() - - def seekable(self): - """Return whether the file supports seeking.""" - return self.readable() and self._buffer.seekable() - - def readable(self): - """Return whether the file was opened for reading.""" - self._check_not_closed() - return self._mode == _MODE_READ - - def writable(self): - """Return whether the file was opened for writing.""" - self._check_not_closed() - return self._mode == _MODE_WRITE - - def peek(self, size=-1): - """Return buffered data without advancing the file position. - - Always returns at least one byte of data, unless at EOF. - The exact number of bytes returned is unspecified. - """ - self._check_can_read() - # Relies on the undocumented fact that BufferedReader.peek() always - # returns at least one byte (except at EOF) - return self._buffer.peek(size) - - def read(self, size=-1): - """Read up to size uncompressed bytes from the file. - - If size is negative or omitted, read until EOF is reached. - Returns b"" if the file is already at EOF. - """ - self._check_can_read() - return self._buffer.read(size) - - def read1(self, size=-1): - """Read up to size uncompressed bytes, while trying to avoid - making multiple reads from the underlying stream. Reads up to a - buffer's worth of data if size is negative. - - Returns b"" if the file is at EOF. - """ - self._check_can_read() - if size < 0: - size = io.DEFAULT_BUFFER_SIZE - return self._buffer.read1(size) - - def readline(self, size=-1): - """Read a line of uncompressed bytes from the file. - - The terminating newline (if present) is retained. If size is - non-negative, no more than size bytes will be read (in which - case the line may be incomplete). Returns b'' if already at EOF. - """ - self._check_can_read() - return self._buffer.readline(size) - - def write(self, data): - """Write a bytes object to the file. - - Returns the number of uncompressed bytes written, which is - always the length of data in bytes. Note that due to buffering, - the file on disk may not reflect the data written until close() - is called. - """ - self._check_can_write() - if isinstance(data, (bytes, bytearray)): - length = len(data) - else: - # accept any data that supports the buffer protocol - data = memoryview(data) - length = data.nbytes - - compressed = self._compressor.compress(data) - self._fp.write(compressed) - self._pos += length - return length - - def seek(self, offset, whence=io.SEEK_SET): - """Change the file position. - - The new position is specified by offset, relative to the - position indicated by whence. Possible values for whence are: - - 0: start of stream (default): offset must not be negative - 1: current stream position - 2: end of stream; offset must not be positive - - Returns the new file position. - - Note that seeking is emulated, so depending on the parameters, - this operation may be extremely slow. - """ - self._check_can_seek() - return self._buffer.seek(offset, whence) - - def tell(self): - """Return the current file position.""" - self._check_not_closed() - if self._mode == _MODE_READ: - return self._buffer.tell() - return self._pos - - -def open(filename, mode="rb", *, - format=None, check=-1, preset=None, filters=None, - encoding=None, errors=None, newline=None): - """Open an LZMA-compressed file in binary or text mode. - - filename can be either an actual file name (given as a str, bytes, - or PathLike object), in which case the named file is opened, or it - can be an existing file object to read from or write to. - - The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb", - "a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text - mode. - - The format, check, preset and filters arguments specify the - compression settings, as for LZMACompressor, LZMADecompressor and - LZMAFile. - - For binary mode, this function is equivalent to the LZMAFile - constructor: LZMAFile(filename, mode, ...). In this case, the - encoding, errors and newline arguments must not be provided. - - For text mode, an LZMAFile object is created, and wrapped in an - io.TextIOWrapper instance with the specified encoding, error - handling behavior, and line ending(s). - - """ - if "t" in mode: - if "b" in mode: - raise ValueError("Invalid mode: %r" % (mode,)) - else: - if encoding is not None: - raise ValueError("Argument 'encoding' not supported in binary mode") - if errors is not None: - raise ValueError("Argument 'errors' not supported in binary mode") - if newline is not None: - raise ValueError("Argument 'newline' not supported in binary mode") - - lz_mode = mode.replace("t", "") - binary_file = LZMAFile(filename, lz_mode, format=format, check=check, - preset=preset, filters=filters) - - if "t" in mode: - encoding = io.text_encoding(encoding) - return io.TextIOWrapper(binary_file, encoding, errors, newline) - else: - return binary_file - - -def compress(data, format=FORMAT_XZ, check=-1, preset=None, filters=None): - """Compress a block of data. - - Refer to LZMACompressor's docstring for a description of the - optional arguments *format*, *check*, *preset* and *filters*. - - For incremental compression, use an LZMACompressor instead. - """ - comp = LZMACompressor(format, check, preset, filters) - return comp.compress(data) + comp.flush() - - -def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None): - """Decompress a block of data. - - Refer to LZMADecompressor's docstring for a description of the - optional arguments *format*, *check* and *filters*. - - For incremental decompression, use an LZMADecompressor instead. - """ - results = [] - while True: - decomp = LZMADecompressor(format, memlimit, filters) - try: - res = decomp.decompress(data) - except LZMAError: - if results: - break # Leftover data is not a valid LZMA/XZ stream; ignore it. - else: - raise # Error on the first iteration; bail out. - results.append(res) - if not decomp.eof: - raise LZMAError("Compressed data ended before the " - "end-of-stream marker was reached") - data = decomp.unused_data - if not data: - break - return b"".join(results) diff --git a/Python313_13_x64_Template/Lib/mimetypes.py b/Python313_13_x64_Template/Lib/mimetypes.py deleted file mode 100644 index 2af7c4b7..00000000 --- a/Python313_13_x64_Template/Lib/mimetypes.py +++ /dev/null @@ -1,679 +0,0 @@ -"""Guess the MIME type of a file. - -This module defines two useful functions: - -guess_type(url, strict=True) -- guess the MIME type and encoding of a URL. - -guess_extension(type, strict=True) -- guess the extension for a given MIME type. - -It also contains the following, for tuning the behavior: - -Data: - -knownfiles -- list of files to parse -inited -- flag set when init() has been called -suffix_map -- dictionary mapping suffixes to suffixes -encodings_map -- dictionary mapping suffixes to encodings -types_map -- dictionary mapping suffixes to types - -Functions: - -init([files]) -- parse a list of files, default knownfiles (on Windows, the - default values are taken from the registry) -read_mime_types(file) -- parse one file, return a dictionary or None -""" - -import os -import sys -import posixpath -import urllib.parse - -try: - from _winapi import _mimetypes_read_windows_registry -except ImportError: - _mimetypes_read_windows_registry = None - -try: - import winreg as _winreg -except ImportError: - _winreg = None - -__all__ = [ - "knownfiles", "inited", "MimeTypes", - "guess_type", "guess_file_type", "guess_all_extensions", "guess_extension", - "add_type", "init", "read_mime_types", - "suffix_map", "encodings_map", "types_map", "common_types" -] - -knownfiles = [ - "/etc/mime.types", - "/etc/httpd/mime.types", # Mac OS X - "/etc/httpd/conf/mime.types", # Apache - "/etc/apache/mime.types", # Apache 1 - "/etc/apache2/mime.types", # Apache 2 - "/usr/local/etc/httpd/conf/mime.types", - "/usr/local/lib/netscape/mime.types", - "/usr/local/etc/httpd/conf/mime.types", # Apache 1.2 - "/usr/local/etc/mime.types", # Apache 1.3 - ] - -inited = False -_db = None - - -class MimeTypes: - """MIME-types datastore. - - This datastore can handle information from mime.types-style files - and supports basic determination of MIME type from a filename or - URL, and can guess a reasonable extension given a MIME type. - """ - - def __init__(self, filenames=(), strict=True): - if not inited: - init() - self.encodings_map = _encodings_map_default.copy() - self.suffix_map = _suffix_map_default.copy() - self.types_map = ({}, {}) # dict for (non-strict, strict) - self.types_map_inv = ({}, {}) - for (ext, type) in _types_map_default.items(): - self.add_type(type, ext, True) - for (ext, type) in _common_types_default.items(): - self.add_type(type, ext, False) - for name in filenames: - self.read(name, strict) - - def add_type(self, type, ext, strict=True): - """Add a mapping between a type and an extension. - - When the extension is already known, the new - type will replace the old one. When the type - is already known the extension will be added - to the list of known extensions. - - If strict is true, information will be added to - list of standard types, else to the list of non-standard - types. - """ - if not type: - return - self.types_map[strict][ext] = type - exts = self.types_map_inv[strict].setdefault(type, []) - if ext not in exts: - exts.append(ext) - - def guess_type(self, url, strict=True): - """Guess the type of a file which is either a URL or a path-like object. - - Return value is a tuple (type, encoding) where type is None if - the type can't be guessed (no or unknown suffix) or a string - of the form type/subtype, usable for a MIME Content-type - header; and encoding is None for no encoding or the name of - the program used to encode (e.g. compress or gzip). The - mappings are table driven. Encoding suffixes are case - sensitive; type suffixes are first tried case sensitive, then - case insensitive. - - The suffixes .tgz, .taz and .tz (case sensitive!) are all - mapped to '.tar.gz'. (This is table-driven too, using the - dictionary suffix_map.) - - Optional `strict' argument when False adds a bunch of commonly found, - but non-standard types. - """ - # TODO: Deprecate accepting file paths (in particular path-like objects). - url = os.fspath(url) - p = urllib.parse.urlparse(url) - if p.scheme and len(p.scheme) > 1: - scheme = p.scheme - url = p.path - else: - return self.guess_file_type(url, strict=strict) - if scheme == 'data': - # syntax of data URLs: - # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data - # mediatype := [ type "/" subtype ] *( ";" parameter ) - # data := *urlchar - # parameter := attribute "=" value - # type/subtype defaults to "text/plain" - comma = url.find(',') - if comma < 0: - # bad data URL - return None, None - semi = url.find(';', 0, comma) - if semi >= 0: - type = url[:semi] - else: - type = url[:comma] - if '=' in type or '/' not in type: - type = 'text/plain' - return type, None # never compressed, so encoding is None - return self._guess_file_type(url, strict, posixpath.splitext) - - def guess_file_type(self, path, *, strict=True): - """Guess the type of a file based on its path. - - Similar to guess_type(), but takes file path istead of URL. - """ - path = os.fsdecode(path) - path = os.path.splitdrive(path)[1] - return self._guess_file_type(path, strict, os.path.splitext) - - def _guess_file_type(self, path, strict, splitext): - base, ext = splitext(path) - while (ext_lower := ext.lower()) in self.suffix_map: - base, ext = splitext(base + self.suffix_map[ext_lower]) - # encodings_map is case sensitive - if ext in self.encodings_map: - encoding = self.encodings_map[ext] - base, ext = splitext(base) - else: - encoding = None - ext = ext.lower() - types_map = self.types_map[True] - if ext in types_map: - return types_map[ext], encoding - elif strict: - return None, encoding - types_map = self.types_map[False] - if ext in types_map: - return types_map[ext], encoding - else: - return None, encoding - - def guess_all_extensions(self, type, strict=True): - """Guess the extensions for a file based on its MIME type. - - Return value is a list of strings giving the possible filename - extensions, including the leading dot ('.'). The extension is not - guaranteed to have been associated with any particular data stream, - but would be mapped to the MIME type `type' by guess_type(). - - Optional `strict' argument when false adds a bunch of commonly found, - but non-standard types. - """ - type = type.lower() - extensions = list(self.types_map_inv[True].get(type, [])) - if not strict: - for ext in self.types_map_inv[False].get(type, []): - if ext not in extensions: - extensions.append(ext) - return extensions - - def guess_extension(self, type, strict=True): - """Guess the extension for a file based on its MIME type. - - Return value is a string giving a filename extension, - including the leading dot ('.'). The extension is not - guaranteed to have been associated with any particular data - stream, but would be mapped to the MIME type `type' by - guess_type(). If no extension can be guessed for `type', None - is returned. - - Optional `strict' argument when false adds a bunch of commonly found, - but non-standard types. - """ - extensions = self.guess_all_extensions(type, strict) - if not extensions: - return None - return extensions[0] - - def read(self, filename, strict=True): - """ - Read a single mime.types-format file, specified by pathname. - - If strict is true, information will be added to - list of standard types, else to the list of non-standard - types. - """ - with open(filename, encoding='utf-8') as fp: - self.readfp(fp, strict) - - def readfp(self, fp, strict=True): - """ - Read a single mime.types-format file. - - If strict is true, information will be added to - list of standard types, else to the list of non-standard - types. - """ - while line := fp.readline(): - words = line.split() - for i in range(len(words)): - if words[i][0] == '#': - del words[i:] - break - if not words: - continue - type, suffixes = words[0], words[1:] - for suff in suffixes: - self.add_type(type, '.' + suff, strict) - - def read_windows_registry(self, strict=True): - """ - Load the MIME types database from Windows registry. - - If strict is true, information will be added to - list of standard types, else to the list of non-standard - types. - """ - - if not _mimetypes_read_windows_registry and not _winreg: - return - - add_type = self.add_type - if strict: - add_type = lambda type, ext: self.add_type(type, ext, True) - - # Accelerated function if it is available - if _mimetypes_read_windows_registry: - _mimetypes_read_windows_registry(add_type) - elif _winreg: - self._read_windows_registry(add_type) - - @classmethod - def _read_windows_registry(cls, add_type): - def enum_types(mimedb): - i = 0 - while True: - try: - ctype = _winreg.EnumKey(mimedb, i) - except OSError: - break - else: - if '\0' not in ctype: - yield ctype - i += 1 - - with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, '') as hkcr: - for subkeyname in enum_types(hkcr): - try: - with _winreg.OpenKey(hkcr, subkeyname) as subkey: - # Only check file extensions - if not subkeyname.startswith("."): - continue - # raises OSError if no 'Content Type' value - mimetype, datatype = _winreg.QueryValueEx( - subkey, 'Content Type') - if datatype != _winreg.REG_SZ: - continue - add_type(mimetype, subkeyname) - except OSError: - continue - -def guess_type(url, strict=True): - """Guess the type of a file based on its URL. - - Return value is a tuple (type, encoding) where type is None if the - type can't be guessed (no or unknown suffix) or a string of the - form type/subtype, usable for a MIME Content-type header; and - encoding is None for no encoding or the name of the program used - to encode (e.g. compress or gzip). The mappings are table - driven. Encoding suffixes are case sensitive; type suffixes are - first tried case sensitive, then case insensitive. - - The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped - to ".tar.gz". (This is table-driven too, using the dictionary - suffix_map). - - Optional `strict' argument when false adds a bunch of commonly found, but - non-standard types. - """ - if _db is None: - init() - return _db.guess_type(url, strict) - - -def guess_file_type(path, *, strict=True): - """Guess the type of a file based on its path. - - Similar to guess_type(), but takes file path istead of URL. - """ - if _db is None: - init() - return _db.guess_file_type(path, strict=strict) - - -def guess_all_extensions(type, strict=True): - """Guess the extensions for a file based on its MIME type. - - Return value is a list of strings giving the possible filename - extensions, including the leading dot ('.'). The extension is not - guaranteed to have been associated with any particular data - stream, but would be mapped to the MIME type `type' by - guess_type(). If no extension can be guessed for `type', None - is returned. - - Optional `strict' argument when false adds a bunch of commonly found, - but non-standard types. - """ - if _db is None: - init() - return _db.guess_all_extensions(type, strict) - -def guess_extension(type, strict=True): - """Guess the extension for a file based on its MIME type. - - Return value is a string giving a filename extension, including the - leading dot ('.'). The extension is not guaranteed to have been - associated with any particular data stream, but would be mapped to the - MIME type `type' by guess_type(). If no extension can be guessed for - `type', None is returned. - - Optional `strict' argument when false adds a bunch of commonly found, - but non-standard types. - """ - if _db is None: - init() - return _db.guess_extension(type, strict) - -def add_type(type, ext, strict=True): - """Add a mapping between a type and an extension. - - When the extension is already known, the new - type will replace the old one. When the type - is already known the extension will be added - to the list of known extensions. - - If strict is true, information will be added to - list of standard types, else to the list of non-standard - types. - """ - if _db is None: - init() - return _db.add_type(type, ext, strict) - - -def init(files=None): - global suffix_map, types_map, encodings_map, common_types - global inited, _db - inited = True # so that MimeTypes.__init__() doesn't call us again - - if files is None or _db is None: - db = MimeTypes() - # Quick return if not supported - db.read_windows_registry() - - if files is None: - files = knownfiles - else: - files = knownfiles + list(files) - else: - db = _db - - for file in files: - if os.path.isfile(file): - db.read(file) - encodings_map = db.encodings_map - suffix_map = db.suffix_map - types_map = db.types_map[True] - common_types = db.types_map[False] - # Make the DB a global variable now that it is fully initialized - _db = db - - -def read_mime_types(file): - try: - f = open(file, encoding='utf-8') - except OSError: - return None - with f: - db = MimeTypes() - db.readfp(f, True) - return db.types_map[True] - - -def _default_mime_types(): - global suffix_map, _suffix_map_default - global encodings_map, _encodings_map_default - global types_map, _types_map_default - global common_types, _common_types_default - - suffix_map = _suffix_map_default = { - '.svgz': '.svg.gz', - '.tgz': '.tar.gz', - '.taz': '.tar.gz', - '.tz': '.tar.gz', - '.tbz2': '.tar.bz2', - '.txz': '.tar.xz', - } - - encodings_map = _encodings_map_default = { - '.gz': 'gzip', - '.Z': 'compress', - '.bz2': 'bzip2', - '.xz': 'xz', - '.br': 'br', - } - - # Before adding new types, make sure they are either registered with IANA, - # at http://www.iana.org/assignments/media-types - # or extensions, i.e. using the x- prefix - - # If you add to these, please keep them sorted by mime type. - # Make sure the entry with the preferred file extension for a particular mime type - # appears before any others of the same mimetype. - types_map = _types_map_default = { - '.js' : 'text/javascript', - '.mjs' : 'text/javascript', - '.json' : 'application/json', - '.webmanifest': 'application/manifest+json', - '.doc' : 'application/msword', - '.dot' : 'application/msword', - '.wiz' : 'application/msword', - '.nq' : 'application/n-quads', - '.nt' : 'application/n-triples', - '.bin' : 'application/octet-stream', - '.a' : 'application/octet-stream', - '.dll' : 'application/octet-stream', - '.exe' : 'application/octet-stream', - '.o' : 'application/octet-stream', - '.obj' : 'application/octet-stream', - '.so' : 'application/octet-stream', - '.oda' : 'application/oda', - '.pdf' : 'application/pdf', - '.p7c' : 'application/pkcs7-mime', - '.ps' : 'application/postscript', - '.ai' : 'application/postscript', - '.eps' : 'application/postscript', - '.trig' : 'application/trig', - '.m3u' : 'application/vnd.apple.mpegurl', - '.m3u8' : 'application/vnd.apple.mpegurl', - '.xls' : 'application/vnd.ms-excel', - '.xlb' : 'application/vnd.ms-excel', - '.ppt' : 'application/vnd.ms-powerpoint', - '.pot' : 'application/vnd.ms-powerpoint', - '.ppa' : 'application/vnd.ms-powerpoint', - '.pps' : 'application/vnd.ms-powerpoint', - '.pwz' : 'application/vnd.ms-powerpoint', - '.wasm' : 'application/wasm', - '.bcpio' : 'application/x-bcpio', - '.cpio' : 'application/x-cpio', - '.csh' : 'application/x-csh', - '.dvi' : 'application/x-dvi', - '.gtar' : 'application/x-gtar', - '.hdf' : 'application/x-hdf', - '.h5' : 'application/x-hdf5', - '.latex' : 'application/x-latex', - '.mif' : 'application/x-mif', - '.cdf' : 'application/x-netcdf', - '.nc' : 'application/x-netcdf', - '.p12' : 'application/x-pkcs12', - '.pfx' : 'application/x-pkcs12', - '.ram' : 'application/x-pn-realaudio', - '.pyc' : 'application/x-python-code', - '.pyo' : 'application/x-python-code', - '.sh' : 'application/x-sh', - '.shar' : 'application/x-shar', - '.swf' : 'application/x-shockwave-flash', - '.sv4cpio': 'application/x-sv4cpio', - '.sv4crc' : 'application/x-sv4crc', - '.tar' : 'application/x-tar', - '.tcl' : 'application/x-tcl', - '.tex' : 'application/x-tex', - '.texi' : 'application/x-texinfo', - '.texinfo': 'application/x-texinfo', - '.roff' : 'application/x-troff', - '.t' : 'application/x-troff', - '.tr' : 'application/x-troff', - '.man' : 'application/x-troff-man', - '.me' : 'application/x-troff-me', - '.ms' : 'application/x-troff-ms', - '.ustar' : 'application/x-ustar', - '.src' : 'application/x-wais-source', - '.xsl' : 'application/xml', - '.rdf' : 'application/xml', - '.wsdl' : 'application/xml', - '.xpdl' : 'application/xml', - '.zip' : 'application/zip', - '.3gp' : 'audio/3gpp', - '.3gpp' : 'audio/3gpp', - '.3g2' : 'audio/3gpp2', - '.3gpp2' : 'audio/3gpp2', - '.aac' : 'audio/aac', - '.adts' : 'audio/aac', - '.loas' : 'audio/aac', - '.ass' : 'audio/aac', - '.au' : 'audio/basic', - '.snd' : 'audio/basic', - '.mp3' : 'audio/mpeg', - '.mp2' : 'audio/mpeg', - '.opus' : 'audio/opus', - '.aif' : 'audio/x-aiff', - '.aifc' : 'audio/x-aiff', - '.aiff' : 'audio/x-aiff', - '.ra' : 'audio/x-pn-realaudio', - '.wav' : 'audio/x-wav', - '.avif' : 'image/avif', - '.bmp' : 'image/bmp', - '.gif' : 'image/gif', - '.ief' : 'image/ief', - '.jpg' : 'image/jpeg', - '.jpe' : 'image/jpeg', - '.jpeg' : 'image/jpeg', - '.heic' : 'image/heic', - '.heif' : 'image/heif', - '.png' : 'image/png', - '.svg' : 'image/svg+xml', - '.tiff' : 'image/tiff', - '.tif' : 'image/tiff', - '.ico' : 'image/vnd.microsoft.icon', - '.webp' : 'image/webp', - '.ras' : 'image/x-cmu-raster', - '.pnm' : 'image/x-portable-anymap', - '.pbm' : 'image/x-portable-bitmap', - '.pgm' : 'image/x-portable-graymap', - '.ppm' : 'image/x-portable-pixmap', - '.rgb' : 'image/x-rgb', - '.xbm' : 'image/x-xbitmap', - '.xpm' : 'image/x-xpixmap', - '.xwd' : 'image/x-xwindowdump', - '.eml' : 'message/rfc822', - '.mht' : 'message/rfc822', - '.mhtml' : 'message/rfc822', - '.nws' : 'message/rfc822', - '.css' : 'text/css', - '.csv' : 'text/csv', - '.html' : 'text/html', - '.htm' : 'text/html', - '.md' : 'text/markdown', - '.markdown': 'text/markdown', - '.n3' : 'text/n3', - '.txt' : 'text/plain', - '.bat' : 'text/plain', - '.c' : 'text/plain', - '.h' : 'text/plain', - '.ksh' : 'text/plain', - '.pl' : 'text/plain', - '.srt' : 'text/plain', - '.rtx' : 'text/richtext', - '.rtf' : 'text/rtf', - '.tsv' : 'text/tab-separated-values', - '.vtt' : 'text/vtt', - '.py' : 'text/x-python', - '.rst' : 'text/x-rst', - '.etx' : 'text/x-setext', - '.sgm' : 'text/x-sgml', - '.sgml' : 'text/x-sgml', - '.vcf' : 'text/x-vcard', - '.xml' : 'text/xml', - '.mp4' : 'video/mp4', - '.mpeg' : 'video/mpeg', - '.m1v' : 'video/mpeg', - '.mpa' : 'video/mpeg', - '.mpe' : 'video/mpeg', - '.mpg' : 'video/mpeg', - '.mov' : 'video/quicktime', - '.qt' : 'video/quicktime', - '.webm' : 'video/webm', - '.avi' : 'video/x-msvideo', - '.movie' : 'video/x-sgi-movie', - } - - # These are non-standard types, commonly found in the wild. They will - # only match if strict=0 flag is given to the API methods. - - # Please sort these too - common_types = _common_types_default = { - '.rtf' : 'application/rtf', - '.midi': 'audio/midi', - '.mid' : 'audio/midi', - '.jpg' : 'image/jpg', - '.pict': 'image/pict', - '.pct' : 'image/pict', - '.pic' : 'image/pict', - '.xul' : 'text/xul', - } - - -_default_mime_types() - - -def _main(): - import getopt - - USAGE = """\ -Usage: mimetypes.py [options] type - -Options: - --help / -h -- print this message and exit - --lenient / -l -- additionally search of some common, but non-standard - types. - --extension / -e -- guess extension instead of type - -More than one type argument may be given. -""" - - def usage(code, msg=''): - print(USAGE) - if msg: print(msg) - sys.exit(code) - - try: - opts, args = getopt.getopt(sys.argv[1:], 'hle', - ['help', 'lenient', 'extension']) - except getopt.error as msg: - usage(1, msg) - - strict = 1 - extension = 0 - for opt, arg in opts: - if opt in ('-h', '--help'): - usage(0) - elif opt in ('-l', '--lenient'): - strict = 0 - elif opt in ('-e', '--extension'): - extension = 1 - for gtype in args: - if extension: - guess = guess_extension(gtype, strict) - if not guess: print("I don't know anything about type", gtype) - else: print(guess) - else: - guess, encoding = guess_type(gtype, strict) - if not guess: print("I don't know anything about type", gtype) - else: print('type:', guess, 'encoding:', encoding) - - -if __name__ == '__main__': - _main() diff --git a/Python313_13_x64_Template/Lib/multiprocessing/connection.py b/Python313_13_x64_Template/Lib/multiprocessing/connection.py deleted file mode 100644 index efb9ea95..00000000 --- a/Python313_13_x64_Template/Lib/multiprocessing/connection.py +++ /dev/null @@ -1,1212 +0,0 @@ -# -# A higher level module for using sockets (or Windows named pipes) -# -# multiprocessing/connection.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] - -import errno -import io -import os -import sys -import socket -import struct -import time -import tempfile -import itertools - - -from . import util - -from . import AuthenticationError, BufferTooShort -from .context import reduction -_ForkingPickler = reduction.ForkingPickler - -try: - import _multiprocessing - import _winapi - from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE -except ImportError: - if sys.platform == 'win32': - raise - _winapi = None - -# -# -# - -BUFSIZE = 8192 -# A very generous timeout when it comes to local connections... -CONNECTION_TIMEOUT = 20. - -_mmap_counter = itertools.count() -_MAX_PIPE_ATTEMPTS = 100 - -default_family = 'AF_INET' -families = ['AF_INET'] - -if hasattr(socket, 'AF_UNIX'): - default_family = 'AF_UNIX' - families += ['AF_UNIX'] - -if sys.platform == 'win32': - default_family = 'AF_PIPE' - families += ['AF_PIPE'] - - -def _init_timeout(timeout=CONNECTION_TIMEOUT): - return time.monotonic() + timeout - -def _check_timeout(t): - return time.monotonic() > t - -# -# -# - -def arbitrary_address(family): - ''' - Return an arbitrary free address for the given family - ''' - if family == 'AF_INET': - return ('localhost', 0) - elif family == 'AF_UNIX': - return tempfile.mktemp(prefix='sock-', dir=util.get_temp_dir()) - elif family == 'AF_PIPE': - return (r'\\.\pipe\pyc-%d-%d-%s' % - (os.getpid(), next(_mmap_counter), os.urandom(8).hex())) - else: - raise ValueError('unrecognized family') - -def _validate_family(family): - ''' - Checks if the family is valid for the current environment. - ''' - if sys.platform != 'win32' and family == 'AF_PIPE': - raise ValueError('Family %s is not recognized.' % family) - - if sys.platform == 'win32' and family == 'AF_UNIX': - # double check - if not hasattr(socket, family): - raise ValueError('Family %s is not recognized.' % family) - -def address_type(address): - ''' - Return the types of the address - - This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' - ''' - if type(address) == tuple: - return 'AF_INET' - elif type(address) is str and address.startswith('\\\\'): - return 'AF_PIPE' - elif type(address) is str or util.is_abstract_socket_namespace(address): - return 'AF_UNIX' - else: - raise ValueError('address type of %r unrecognized' % address) - -# -# Connection classes -# - -class _ConnectionBase: - _handle = None - - def __init__(self, handle, readable=True, writable=True): - handle = handle.__index__() - if handle < 0: - raise ValueError("invalid handle") - if not readable and not writable: - raise ValueError( - "at least one of `readable` and `writable` must be True") - self._handle = handle - self._readable = readable - self._writable = writable - - # XXX should we use util.Finalize instead of a __del__? - - def __del__(self): - if self._handle is not None: - self._close() - - def _check_closed(self): - if self._handle is None: - raise OSError("handle is closed") - - def _check_readable(self): - if not self._readable: - raise OSError("connection is write-only") - - def _check_writable(self): - if not self._writable: - raise OSError("connection is read-only") - - def _bad_message_length(self): - if self._writable: - self._readable = False - else: - self.close() - raise OSError("bad message length") - - @property - def closed(self): - """True if the connection is closed""" - return self._handle is None - - @property - def readable(self): - """True if the connection is readable""" - return self._readable - - @property - def writable(self): - """True if the connection is writable""" - return self._writable - - def fileno(self): - """File descriptor or handle of the connection""" - self._check_closed() - return self._handle - - def close(self): - """Close the connection""" - if self._handle is not None: - try: - self._close() - finally: - self._handle = None - - def send_bytes(self, buf, offset=0, size=None): - """Send the bytes data from a bytes-like object""" - self._check_closed() - self._check_writable() - m = memoryview(buf) - if m.itemsize > 1: - m = m.cast('B') - n = m.nbytes - if offset < 0: - raise ValueError("offset is negative") - if n < offset: - raise ValueError("buffer length < offset") - if size is None: - size = n - offset - elif size < 0: - raise ValueError("size is negative") - elif offset + size > n: - raise ValueError("buffer length < offset + size") - self._send_bytes(m[offset:offset + size]) - - def send(self, obj): - """Send a (picklable) object""" - self._check_closed() - self._check_writable() - self._send_bytes(_ForkingPickler.dumps(obj)) - - def recv_bytes(self, maxlength=None): - """ - Receive bytes data as a bytes object. - """ - self._check_closed() - self._check_readable() - if maxlength is not None and maxlength < 0: - raise ValueError("negative maxlength") - buf = self._recv_bytes(maxlength) - if buf is None: - self._bad_message_length() - return buf.getvalue() - - def recv_bytes_into(self, buf, offset=0): - """ - Receive bytes data into a writeable bytes-like object. - Return the number of bytes read. - """ - self._check_closed() - self._check_readable() - with memoryview(buf) as m: - # Get bytesize of arbitrary buffer - itemsize = m.itemsize - bytesize = itemsize * len(m) - if offset < 0: - raise ValueError("negative offset") - elif offset > bytesize: - raise ValueError("offset too large") - result = self._recv_bytes() - size = result.tell() - if bytesize < offset + size: - raise BufferTooShort(result.getvalue()) - # Message can fit in dest - result.seek(0) - result.readinto(m[offset // itemsize : - (offset + size) // itemsize]) - return size - - def recv(self): - """Receive a (picklable) object""" - self._check_closed() - self._check_readable() - buf = self._recv_bytes() - return _ForkingPickler.loads(buf.getbuffer()) - - def poll(self, timeout=0.0): - """Whether there is any input available to be read""" - self._check_closed() - self._check_readable() - return self._poll(timeout) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - self.close() - - -if _winapi: - - class PipeConnection(_ConnectionBase): - """ - Connection class based on a Windows named pipe. - Overlapped I/O is used, so the handles must have been created - with FILE_FLAG_OVERLAPPED. - """ - _got_empty_message = False - _send_ov = None - - def _close(self, _CloseHandle=_winapi.CloseHandle): - ov = self._send_ov - if ov is not None: - # Interrupt WaitForMultipleObjects() in _send_bytes() - ov.cancel() - _CloseHandle(self._handle) - - def _send_bytes(self, buf): - if self._send_ov is not None: - # A connection should only be used by a single thread - raise ValueError("concurrent send_bytes() calls " - "are not supported") - ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) - self._send_ov = ov - try: - if err == _winapi.ERROR_IO_PENDING: - waitres = _winapi.WaitForMultipleObjects( - [ov.event], False, INFINITE) - assert waitres == WAIT_OBJECT_0 - except: - ov.cancel() - raise - finally: - self._send_ov = None - nwritten, err = ov.GetOverlappedResult(True) - if err == _winapi.ERROR_OPERATION_ABORTED: - # close() was called by another thread while - # WaitForMultipleObjects() was waiting for the overlapped - # operation. - raise OSError(errno.EPIPE, "handle is closed") - assert err == 0 - assert nwritten == len(buf) - - def _recv_bytes(self, maxsize=None): - if self._got_empty_message: - self._got_empty_message = False - return io.BytesIO() - else: - bsize = 128 if maxsize is None else min(maxsize, 128) - try: - ov, err = _winapi.ReadFile(self._handle, bsize, - overlapped=True) - try: - if err == _winapi.ERROR_IO_PENDING: - waitres = _winapi.WaitForMultipleObjects( - [ov.event], False, INFINITE) - assert waitres == WAIT_OBJECT_0 - except: - ov.cancel() - raise - finally: - nread, err = ov.GetOverlappedResult(True) - if err == 0: - f = io.BytesIO() - f.write(ov.getbuffer()) - return f - elif err == _winapi.ERROR_MORE_DATA: - return self._get_more_data(ov, maxsize) - except OSError as e: - if e.winerror == _winapi.ERROR_BROKEN_PIPE: - raise EOFError - else: - raise - raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") - - def _poll(self, timeout): - if (self._got_empty_message or - _winapi.PeekNamedPipe(self._handle)[0] != 0): - return True - return bool(wait([self], timeout)) - - def _get_more_data(self, ov, maxsize): - buf = ov.getbuffer() - f = io.BytesIO() - f.write(buf) - left = _winapi.PeekNamedPipe(self._handle)[1] - assert left > 0 - if maxsize is not None and len(buf) + left > maxsize: - self._bad_message_length() - ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) - rbytes, err = ov.GetOverlappedResult(True) - assert err == 0 - assert rbytes == left - f.write(ov.getbuffer()) - return f - - -class Connection(_ConnectionBase): - """ - Connection class based on an arbitrary file descriptor (Unix only), or - a socket handle (Windows). - """ - - if _winapi: - def _close(self, _close=_multiprocessing.closesocket): - _close(self._handle) - _write = _multiprocessing.send - _read = _multiprocessing.recv - else: - def _close(self, _close=os.close): - _close(self._handle) - _write = os.write - _read = os.read - - def _send(self, buf, write=_write): - remaining = len(buf) - while True: - n = write(self._handle, buf) - remaining -= n - if remaining == 0: - break - buf = buf[n:] - - def _recv(self, size, read=_read): - buf = io.BytesIO() - handle = self._handle - remaining = size - while remaining > 0: - chunk = read(handle, remaining) - n = len(chunk) - if n == 0: - if remaining == size: - raise EOFError - else: - raise OSError("got end of file during message") - buf.write(chunk) - remaining -= n - return buf - - def _send_bytes(self, buf): - n = len(buf) - if n > 0x7fffffff: - pre_header = struct.pack("!i", -1) - header = struct.pack("!Q", n) - self._send(pre_header) - self._send(header) - self._send(buf) - else: - # For wire compatibility with 3.7 and lower - header = struct.pack("!i", n) - if n > 16384: - # The payload is large so Nagle's algorithm won't be triggered - # and we'd better avoid the cost of concatenation. - self._send(header) - self._send(buf) - else: - # Issue #20540: concatenate before sending, to avoid delays due - # to Nagle's algorithm on a TCP socket. - # Also note we want to avoid sending a 0-length buffer separately, - # to avoid "broken pipe" errors if the other end closed the pipe. - self._send(header + buf) - - def _recv_bytes(self, maxsize=None): - buf = self._recv(4) - size, = struct.unpack("!i", buf.getvalue()) - if size == -1: - buf = self._recv(8) - size, = struct.unpack("!Q", buf.getvalue()) - if maxsize is not None and size > maxsize: - return None - return self._recv(size) - - def _poll(self, timeout): - r = wait([self], timeout) - return bool(r) - - -# -# Public functions -# - -class Listener(object): - ''' - Returns a listener object. - - This is a wrapper for a bound socket which is 'listening' for - connections, or for a Windows named pipe. - ''' - def __init__(self, address=None, family=None, backlog=1, authkey=None): - family = family or (address and address_type(address)) \ - or default_family - _validate_family(family) - if authkey is not None and not isinstance(authkey, bytes): - raise TypeError('authkey should be a byte string') - - if family == 'AF_PIPE': - if address: - self._listener = PipeListener(address, backlog) - else: - for attempts in itertools.count(): - address = arbitrary_address(family) - try: - self._listener = PipeListener(address, backlog) - break - except OSError as e: - if attempts >= _MAX_PIPE_ATTEMPTS: - raise - if e.winerror not in (_winapi.ERROR_PIPE_BUSY, - _winapi.ERROR_ACCESS_DENIED): - raise - else: - address = address or arbitrary_address(family) - self._listener = SocketListener(address, family, backlog) - - self._authkey = authkey - - def accept(self): - ''' - Accept a connection on the bound socket or named pipe of `self`. - - Returns a `Connection` object. - ''' - if self._listener is None: - raise OSError('listener is closed') - - c = self._listener.accept() - if self._authkey is not None: - deliver_challenge(c, self._authkey) - answer_challenge(c, self._authkey) - return c - - def close(self): - ''' - Close the bound socket or named pipe of `self`. - ''' - listener = self._listener - if listener is not None: - self._listener = None - listener.close() - - @property - def address(self): - return self._listener._address - - @property - def last_accepted(self): - return self._listener._last_accepted - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - self.close() - - -def Client(address, family=None, authkey=None): - ''' - Returns a connection to the address of a `Listener` - ''' - family = family or address_type(address) - _validate_family(family) - if family == 'AF_PIPE': - c = PipeClient(address) - else: - c = SocketClient(address) - - if authkey is not None and not isinstance(authkey, bytes): - raise TypeError('authkey should be a byte string') - - if authkey is not None: - answer_challenge(c, authkey) - deliver_challenge(c, authkey) - - return c - - -if sys.platform != 'win32': - - def Pipe(duplex=True): - ''' - Returns pair of connection objects at either end of a pipe - ''' - if duplex: - s1, s2 = socket.socketpair() - s1.setblocking(True) - s2.setblocking(True) - c1 = Connection(s1.detach()) - c2 = Connection(s2.detach()) - else: - fd1, fd2 = os.pipe() - c1 = Connection(fd1, writable=False) - c2 = Connection(fd2, readable=False) - - return c1, c2 - -else: - - def Pipe(duplex=True): - ''' - Returns pair of connection objects at either end of a pipe - ''' - if duplex: - openmode = _winapi.PIPE_ACCESS_DUPLEX - access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE - obsize, ibsize = BUFSIZE, BUFSIZE - else: - openmode = _winapi.PIPE_ACCESS_INBOUND - access = _winapi.GENERIC_WRITE - obsize, ibsize = 0, BUFSIZE - - for attempts in itertools.count(): - address = arbitrary_address('AF_PIPE') - try: - h1 = _winapi.CreateNamedPipe( - address, openmode | _winapi.FILE_FLAG_OVERLAPPED | - _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, - _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | - _winapi.PIPE_WAIT, - 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, - # default security descriptor: the handle cannot be inherited - _winapi.NULL - ) - break - except OSError as e: - if attempts >= _MAX_PIPE_ATTEMPTS: - raise - if e.winerror not in (_winapi.ERROR_PIPE_BUSY, - _winapi.ERROR_ACCESS_DENIED): - raise - h2 = _winapi.CreateFile( - address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, - _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL - ) - _winapi.SetNamedPipeHandleState( - h2, _winapi.PIPE_READMODE_MESSAGE, None, None - ) - - overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) - _, err = overlapped.GetOverlappedResult(True) - assert err == 0 - - c1 = PipeConnection(h1, writable=duplex) - c2 = PipeConnection(h2, readable=duplex) - - return c1, c2 - -# -# Definitions for connections based on sockets -# - -class SocketListener(object): - ''' - Representation of a socket which is bound to an address and listening - ''' - def __init__(self, address, family, backlog=1): - self._socket = socket.socket(getattr(socket, family)) - try: - # SO_REUSEADDR has different semantics on Windows (issue #2550). - if os.name == 'posix': - self._socket.setsockopt(socket.SOL_SOCKET, - socket.SO_REUSEADDR, 1) - self._socket.setblocking(True) - self._socket.bind(address) - self._socket.listen(backlog) - self._address = self._socket.getsockname() - except OSError: - self._socket.close() - raise - self._family = family - self._last_accepted = None - - if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): - # Linux abstract socket namespaces do not need to be explicitly unlinked - self._unlink = util.Finalize( - self, os.unlink, args=(address,), exitpriority=0 - ) - else: - self._unlink = None - - def accept(self): - s, self._last_accepted = self._socket.accept() - s.setblocking(True) - return Connection(s.detach()) - - def close(self): - try: - self._socket.close() - finally: - unlink = self._unlink - if unlink is not None: - self._unlink = None - unlink() - - -def SocketClient(address): - ''' - Return a connection object connected to the socket given by `address` - ''' - family = address_type(address) - with socket.socket( getattr(socket, family) ) as s: - s.setblocking(True) - s.connect(address) - return Connection(s.detach()) - -# -# Definitions for connections based on named pipes -# - -if sys.platform == 'win32': - - class PipeListener(object): - ''' - Representation of a named pipe - ''' - def __init__(self, address, backlog=None): - self._address = address - self._handle_queue = [self._new_handle(first=True)] - - self._last_accepted = None - util.sub_debug('listener created with address=%r', self._address) - self.close = util.Finalize( - self, PipeListener._finalize_pipe_listener, - args=(self._handle_queue, self._address), exitpriority=0 - ) - - def _new_handle(self, first=False): - flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED - if first: - flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE - return _winapi.CreateNamedPipe( - self._address, flags, - _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | - _winapi.PIPE_WAIT, - _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, - _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL - ) - - def accept(self): - self._handle_queue.append(self._new_handle()) - handle = self._handle_queue.pop(0) - try: - ov = _winapi.ConnectNamedPipe(handle, overlapped=True) - except OSError as e: - if e.winerror != _winapi.ERROR_NO_DATA: - raise - # ERROR_NO_DATA can occur if a client has already connected, - # written data and then disconnected -- see Issue 14725. - else: - try: - res = _winapi.WaitForMultipleObjects( - [ov.event], False, INFINITE) - except: - ov.cancel() - _winapi.CloseHandle(handle) - raise - finally: - _, err = ov.GetOverlappedResult(True) - assert err == 0 - return PipeConnection(handle) - - @staticmethod - def _finalize_pipe_listener(queue, address): - util.sub_debug('closing listener with address=%r', address) - for handle in queue: - _winapi.CloseHandle(handle) - - def PipeClient(address): - ''' - Return a connection object connected to the pipe given by `address` - ''' - t = _init_timeout() - while 1: - try: - _winapi.WaitNamedPipe(address, 1000) - h = _winapi.CreateFile( - address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, - 0, _winapi.NULL, _winapi.OPEN_EXISTING, - _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL - ) - except OSError as e: - if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, - _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): - raise - else: - break - else: - raise - - _winapi.SetNamedPipeHandleState( - h, _winapi.PIPE_READMODE_MESSAGE, None, None - ) - return PipeConnection(h) - -# -# Authentication stuff -# - -MESSAGE_LENGTH = 40 # MUST be > 20 - -_CHALLENGE = b'#CHALLENGE#' -_WELCOME = b'#WELCOME#' -_FAILURE = b'#FAILURE#' - -# multiprocessing.connection Authentication Handshake Protocol Description -# (as documented for reference after reading the existing code) -# ============================================================================= -# -# On Windows: native pipes with "overlapped IO" are used to send the bytes, -# instead of the length prefix SIZE scheme described below. (ie: the OS deals -# with message sizes for us) -# -# Protocol error behaviors: -# -# On POSIX, any failure to receive the length prefix into SIZE, for SIZE greater -# than the requested maxsize to receive, or receiving fewer than SIZE bytes -# results in the connection being closed and auth to fail. -# -# On Windows, receiving too few bytes is never a low level _recv_bytes read -# error, receiving too many will trigger an error only if receive maxsize -# value was larger than 128 OR the if the data arrived in smaller pieces. -# -# Serving side Client side -# ------------------------------ --------------------------------------- -# 0. Open a connection on the pipe. -# 1. Accept connection. -# 2. Random 20+ bytes -> MESSAGE -# Modern servers always send -# more than 20 bytes and include -# a {digest} prefix on it with -# their preferred HMAC digest. -# Legacy ones send ==20 bytes. -# 3. send 4 byte length (net order) -# prefix followed by: -# b'#CHALLENGE#' + MESSAGE -# 4. Receive 4 bytes, parse as network byte -# order integer. If it is -1, receive an -# additional 8 bytes, parse that as network -# byte order. The result is the length of -# the data that follows -> SIZE. -# 5. Receive min(SIZE, 256) bytes -> M1 -# 6. Assert that M1 starts with: -# b'#CHALLENGE#' -# 7. Strip that prefix from M1 into -> M2 -# 7.1. Parse M2: if it is exactly 20 bytes in -# length this indicates a legacy server -# supporting only HMAC-MD5. Otherwise the -# 7.2. preferred digest is looked up from an -# expected "{digest}" prefix on M2. No prefix -# or unsupported digest? <- AuthenticationError -# 7.3. Put divined algorithm name in -> D_NAME -# 8. Compute HMAC-D_NAME of AUTHKEY, M2 -> C_DIGEST -# 9. Send 4 byte length prefix (net order) -# followed by C_DIGEST bytes. -# 10. Receive 4 or 4+8 byte length -# prefix (#4 dance) -> SIZE. -# 11. Receive min(SIZE, 256) -> C_D. -# 11.1. Parse C_D: legacy servers -# accept it as is, "md5" -> D_NAME -# 11.2. modern servers check the length -# of C_D, IF it is 16 bytes? -# 11.2.1. "md5" -> D_NAME -# and skip to step 12. -# 11.3. longer? expect and parse a "{digest}" -# prefix into -> D_NAME. -# Strip the prefix and store remaining -# bytes in -> C_D. -# 11.4. Don't like D_NAME? <- AuthenticationError -# 12. Compute HMAC-D_NAME of AUTHKEY, -# MESSAGE into -> M_DIGEST. -# 13. Compare M_DIGEST == C_D: -# 14a: Match? Send length prefix & -# b'#WELCOME#' -# <- RETURN -# 14b: Mismatch? Send len prefix & -# b'#FAILURE#' -# <- CLOSE & AuthenticationError -# 15. Receive 4 or 4+8 byte length prefix (net -# order) again as in #4 into -> SIZE. -# 16. Receive min(SIZE, 256) bytes -> M3. -# 17. Compare M3 == b'#WELCOME#': -# 17a. Match? <- RETURN -# 17b. Mismatch? <- CLOSE & AuthenticationError -# -# If this RETURNed, the connection remains open: it has been authenticated. -# -# Length prefixes are used consistently. Even on the legacy protocol, this -# was good fortune and allowed us to evolve the protocol by using the length -# of the opening challenge or length of the returned digest as a signal as -# to which protocol the other end supports. - -_ALLOWED_DIGESTS = frozenset( - {b'md5', b'sha256', b'sha384', b'sha3_256', b'sha3_384'}) -_MAX_DIGEST_LEN = max(len(_) for _ in _ALLOWED_DIGESTS) - -# Old hmac-md5 only server versions from Python <=3.11 sent a message of this -# length. It happens to not match the length of any supported digest so we can -# use a message of this length to indicate that we should work in backwards -# compatible md5-only mode without a {digest_name} prefix on our response. -_MD5ONLY_MESSAGE_LENGTH = 20 -_MD5_DIGEST_LEN = 16 -_LEGACY_LENGTHS = (_MD5ONLY_MESSAGE_LENGTH, _MD5_DIGEST_LEN) - - -def _get_digest_name_and_payload(message): # type: (bytes) -> tuple[str, bytes] - """Returns a digest name and the payload for a response hash. - - If a legacy protocol is detected based on the message length - or contents the digest name returned will be empty to indicate - legacy mode where MD5 and no digest prefix should be sent. - """ - # modern message format: b"{digest}payload" longer than 20 bytes - # legacy message format: 16 or 20 byte b"payload" - if len(message) in _LEGACY_LENGTHS: - # Either this was a legacy server challenge, or we're processing - # a reply from a legacy client that sent an unprefixed 16-byte - # HMAC-MD5 response. All messages using the modern protocol will - # be longer than either of these lengths. - return '', message - if (message.startswith(b'{') and - (curly := message.find(b'}', 1, _MAX_DIGEST_LEN+2)) > 0): - digest = message[1:curly] - if digest in _ALLOWED_DIGESTS: - payload = message[curly+1:] - return digest.decode('ascii'), payload - raise AuthenticationError( - 'unsupported message length, missing digest prefix, ' - f'or unsupported digest: {message=}') - - -def _create_response(authkey, message): - """Create a MAC based on authkey and message - - The MAC algorithm defaults to HMAC-MD5, unless MD5 is not available or - the message has a '{digest_name}' prefix. For legacy HMAC-MD5, the response - is the raw MAC, otherwise the response is prefixed with '{digest_name}', - e.g. b'{sha256}abcdefg...' - - Note: The MAC protects the entire message including the digest_name prefix. - """ - import hmac - digest_name = _get_digest_name_and_payload(message)[0] - # The MAC protects the entire message: digest header and payload. - if not digest_name: - # Legacy server without a {digest} prefix on message. - # Generate a legacy non-prefixed HMAC-MD5 reply. - try: - return hmac.new(authkey, message, 'md5').digest() - except ValueError: - # HMAC-MD5 is not available (FIPS mode?), fall back to - # HMAC-SHA2-256 modern protocol. The legacy server probably - # doesn't support it and will reject us anyways. :shrug: - digest_name = 'sha256' - # Modern protocol, indicate the digest used in the reply. - response = hmac.new(authkey, message, digest_name).digest() - return b'{%s}%s' % (digest_name.encode('ascii'), response) - - -def _verify_challenge(authkey, message, response): - """Verify MAC challenge - - If our message did not include a digest_name prefix, the client is allowed - to select a stronger digest_name from _ALLOWED_DIGESTS. - - In case our message is prefixed, a client cannot downgrade to a weaker - algorithm, because the MAC is calculated over the entire message - including the '{digest_name}' prefix. - """ - import hmac - response_digest, response_mac = _get_digest_name_and_payload(response) - response_digest = response_digest or 'md5' - try: - expected = hmac.new(authkey, message, response_digest).digest() - except ValueError: - raise AuthenticationError(f'{response_digest=} unsupported') - if len(expected) != len(response_mac): - raise AuthenticationError( - f'expected {response_digest!r} of length {len(expected)} ' - f'got {len(response_mac)}') - if not hmac.compare_digest(expected, response_mac): - raise AuthenticationError('digest received was wrong') - - -def deliver_challenge(connection, authkey: bytes, digest_name='sha256'): - if not isinstance(authkey, bytes): - raise ValueError( - "Authkey must be bytes, not {0!s}".format(type(authkey))) - assert MESSAGE_LENGTH > _MD5ONLY_MESSAGE_LENGTH, "protocol constraint" - message = os.urandom(MESSAGE_LENGTH) - message = b'{%s}%s' % (digest_name.encode('ascii'), message) - # Even when sending a challenge to a legacy client that does not support - # digest prefixes, they'll take the entire thing as a challenge and - # respond to it with a raw HMAC-MD5. - connection.send_bytes(_CHALLENGE + message) - response = connection.recv_bytes(256) # reject large message - try: - _verify_challenge(authkey, message, response) - except AuthenticationError: - connection.send_bytes(_FAILURE) - raise - else: - connection.send_bytes(_WELCOME) - - -def answer_challenge(connection, authkey: bytes): - if not isinstance(authkey, bytes): - raise ValueError( - "Authkey must be bytes, not {0!s}".format(type(authkey))) - message = connection.recv_bytes(256) # reject large message - if not message.startswith(_CHALLENGE): - raise AuthenticationError( - f'Protocol error, expected challenge: {message=}') - message = message[len(_CHALLENGE):] - if len(message) < _MD5ONLY_MESSAGE_LENGTH: - raise AuthenticationError(f'challenge too short: {len(message)} bytes') - digest = _create_response(authkey, message) - connection.send_bytes(digest) - response = connection.recv_bytes(256) # reject large message - if response != _WELCOME: - raise AuthenticationError('digest sent was rejected') - -# -# Support for using xmlrpclib for serialization -# - -class ConnectionWrapper(object): - def __init__(self, conn, dumps, loads): - self._conn = conn - self._dumps = dumps - self._loads = loads - for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): - obj = getattr(conn, attr) - setattr(self, attr, obj) - def send(self, obj): - s = self._dumps(obj) - self._conn.send_bytes(s) - def recv(self): - s = self._conn.recv_bytes() - return self._loads(s) - -def _xml_dumps(obj): - return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') - -def _xml_loads(s): - (obj,), method = xmlrpclib.loads(s.decode('utf-8')) - return obj - -class XmlListener(Listener): - def accept(self): - global xmlrpclib - import xmlrpc.client as xmlrpclib - obj = Listener.accept(self) - return ConnectionWrapper(obj, _xml_dumps, _xml_loads) - -def XmlClient(*args, **kwds): - global xmlrpclib - import xmlrpc.client as xmlrpclib - return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) - -# -# Wait -# - -if sys.platform == 'win32': - - def _exhaustive_wait(handles, timeout): - # Return ALL handles which are currently signalled. (Only - # returning the first signalled might create starvation issues.) - L = list(handles) - ready = [] - # Windows limits WaitForMultipleObjects at 64 handles, and we use a - # few for synchronisation, so we switch to batched waits at 60. - if len(L) > 60: - try: - res = _winapi.BatchedWaitForMultipleObjects(L, False, timeout) - except TimeoutError: - return [] - ready.extend(L[i] for i in res) - if res: - L = [h for i, h in enumerate(L) if i > res[0] & i not in res] - timeout = 0 - while L: - short_L = L[:60] if len(L) > 60 else L - res = _winapi.WaitForMultipleObjects(short_L, False, timeout) - if res == WAIT_TIMEOUT: - break - elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): - res -= WAIT_OBJECT_0 - elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): - res -= WAIT_ABANDONED_0 - else: - raise RuntimeError('Should not get here') - ready.append(L[res]) - L = L[res+1:] - timeout = 0 - return ready - - _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} - - def wait(object_list, timeout=None): - ''' - Wait till an object in object_list is ready/readable. - - Returns list of those objects in object_list which are ready/readable. - ''' - if timeout is None: - timeout = INFINITE - elif timeout < 0: - timeout = 0 - else: - timeout = int(timeout * 1000 + 0.5) - - object_list = list(object_list) - waithandle_to_obj = {} - ov_list = [] - ready_objects = set() - ready_handles = set() - - try: - for o in object_list: - try: - fileno = getattr(o, 'fileno') - except AttributeError: - waithandle_to_obj[o.__index__()] = o - else: - # start an overlapped read of length zero - try: - ov, err = _winapi.ReadFile(fileno(), 0, True) - except OSError as e: - ov, err = None, e.winerror - if err not in _ready_errors: - raise - if err == _winapi.ERROR_IO_PENDING: - ov_list.append(ov) - waithandle_to_obj[ov.event] = o - else: - # If o.fileno() is an overlapped pipe handle and - # err == 0 then there is a zero length message - # in the pipe, but it HAS NOT been consumed... - if ov and sys.getwindowsversion()[:2] >= (6, 2): - # ... except on Windows 8 and later, where - # the message HAS been consumed. - try: - _, err = ov.GetOverlappedResult(False) - except OSError as e: - err = e.winerror - if not err and hasattr(o, '_got_empty_message'): - o._got_empty_message = True - ready_objects.add(o) - timeout = 0 - - ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) - finally: - # request that overlapped reads stop - for ov in ov_list: - ov.cancel() - - # wait for all overlapped reads to stop - for ov in ov_list: - try: - _, err = ov.GetOverlappedResult(True) - except OSError as e: - err = e.winerror - if err not in _ready_errors: - raise - if err != _winapi.ERROR_OPERATION_ABORTED: - o = waithandle_to_obj[ov.event] - ready_objects.add(o) - if err == 0: - # If o.fileno() is an overlapped pipe handle then - # a zero length message HAS been consumed. - if hasattr(o, '_got_empty_message'): - o._got_empty_message = True - - ready_objects.update(waithandle_to_obj[h] for h in ready_handles) - return [o for o in object_list if o in ready_objects] - -else: - - import selectors - - # poll/select have the advantage of not requiring any extra file - # descriptor, contrarily to epoll/kqueue (also, they require a single - # syscall). - if hasattr(selectors, 'PollSelector'): - _WaitSelector = selectors.PollSelector - else: - _WaitSelector = selectors.SelectSelector - - def wait(object_list, timeout=None): - ''' - Wait till an object in object_list is ready/readable. - - Returns list of those objects in object_list which are ready/readable. - ''' - with _WaitSelector() as selector: - for obj in object_list: - selector.register(obj, selectors.EVENT_READ) - - if timeout is not None: - deadline = time.monotonic() + timeout - - while True: - ready = selector.select(timeout) - if ready: - return [key.fileobj for (key, events) in ready] - else: - if timeout is not None: - timeout = deadline - time.monotonic() - if timeout < 0: - return ready - -# -# Make connection and socket objects shareable if possible -# - -if sys.platform == 'win32': - def reduce_connection(conn): - handle = conn.fileno() - with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: - from . import resource_sharer - ds = resource_sharer.DupSocket(s) - return rebuild_connection, (ds, conn.readable, conn.writable) - def rebuild_connection(ds, readable, writable): - sock = ds.detach() - return Connection(sock.detach(), readable, writable) - reduction.register(Connection, reduce_connection) - - def reduce_pipe_connection(conn): - access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | - (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) - dh = reduction.DupHandle(conn.fileno(), access) - return rebuild_pipe_connection, (dh, conn.readable, conn.writable) - def rebuild_pipe_connection(dh, readable, writable): - handle = dh.detach() - return PipeConnection(handle, readable, writable) - reduction.register(PipeConnection, reduce_pipe_connection) - -else: - def reduce_connection(conn): - df = reduction.DupFd(conn.fileno()) - return rebuild_connection, (df, conn.readable, conn.writable) - def rebuild_connection(df, readable, writable): - fd = df.detach() - return Connection(fd, readable, writable) - reduction.register(Connection, reduce_connection) diff --git a/Python313_13_x64_Template/Lib/multiprocessing/context.py b/Python313_13_x64_Template/Lib/multiprocessing/context.py deleted file mode 100644 index 07c8a5d1..00000000 --- a/Python313_13_x64_Template/Lib/multiprocessing/context.py +++ /dev/null @@ -1,383 +0,0 @@ -import os -import sys -import threading - -from . import process -from . import reduction - -__all__ = () - -# -# Exceptions -# - -class ProcessError(Exception): - pass - -class BufferTooShort(ProcessError): - pass - -class TimeoutError(ProcessError): - pass - -class AuthenticationError(ProcessError): - pass - -# -# Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py -# - -class BaseContext(object): - - ProcessError = ProcessError - BufferTooShort = BufferTooShort - TimeoutError = TimeoutError - AuthenticationError = AuthenticationError - - current_process = staticmethod(process.current_process) - parent_process = staticmethod(process.parent_process) - active_children = staticmethod(process.active_children) - - def cpu_count(self): - '''Returns the number of CPUs in the system''' - num = os.cpu_count() - if num is None: - raise NotImplementedError('cannot determine number of cpus') - else: - return num - - def Manager(self): - '''Returns a manager associated with a running server process - - The managers methods such as `Lock()`, `Condition()` and `Queue()` - can be used to create shared objects. - ''' - from .managers import SyncManager - m = SyncManager(ctx=self.get_context()) - m.start() - return m - - def Pipe(self, duplex=True): - '''Returns two connection object connected by a pipe''' - from .connection import Pipe - return Pipe(duplex) - - def Lock(self): - '''Returns a non-recursive lock object''' - from .synchronize import Lock - return Lock(ctx=self.get_context()) - - def RLock(self): - '''Returns a recursive lock object''' - from .synchronize import RLock - return RLock(ctx=self.get_context()) - - def Condition(self, lock=None): - '''Returns a condition object''' - from .synchronize import Condition - return Condition(lock, ctx=self.get_context()) - - def Semaphore(self, value=1): - '''Returns a semaphore object''' - from .synchronize import Semaphore - return Semaphore(value, ctx=self.get_context()) - - def BoundedSemaphore(self, value=1): - '''Returns a bounded semaphore object''' - from .synchronize import BoundedSemaphore - return BoundedSemaphore(value, ctx=self.get_context()) - - def Event(self): - '''Returns an event object''' - from .synchronize import Event - return Event(ctx=self.get_context()) - - def Barrier(self, parties, action=None, timeout=None): - '''Returns a barrier object''' - from .synchronize import Barrier - return Barrier(parties, action, timeout, ctx=self.get_context()) - - def Queue(self, maxsize=0): - '''Returns a queue object''' - from .queues import Queue - return Queue(maxsize, ctx=self.get_context()) - - def JoinableQueue(self, maxsize=0): - '''Returns a queue object''' - from .queues import JoinableQueue - return JoinableQueue(maxsize, ctx=self.get_context()) - - def SimpleQueue(self): - '''Returns a queue object''' - from .queues import SimpleQueue - return SimpleQueue(ctx=self.get_context()) - - def Pool(self, processes=None, initializer=None, initargs=(), - maxtasksperchild=None): - '''Returns a process pool object''' - from .pool import Pool - return Pool(processes, initializer, initargs, maxtasksperchild, - context=self.get_context()) - - def RawValue(self, typecode_or_type, *args): - '''Returns a shared object''' - from .sharedctypes import RawValue - return RawValue(typecode_or_type, *args) - - def RawArray(self, typecode_or_type, size_or_initializer): - '''Returns a shared array''' - from .sharedctypes import RawArray - return RawArray(typecode_or_type, size_or_initializer) - - def Value(self, typecode_or_type, *args, lock=True): - '''Returns a synchronized shared object''' - from .sharedctypes import Value - return Value(typecode_or_type, *args, lock=lock, - ctx=self.get_context()) - - def Array(self, typecode_or_type, size_or_initializer, *, lock=True): - '''Returns a synchronized shared array''' - from .sharedctypes import Array - return Array(typecode_or_type, size_or_initializer, lock=lock, - ctx=self.get_context()) - - def freeze_support(self): - '''Check whether this is a fake forked process in a frozen executable. - If so then run code specified by commandline and exit. - ''' - # gh-140814: allow_none=True avoids locking in the default start - # method, which would cause a later set_start_method() to fail. - # None is safe to pass through: spawn.freeze_support() - # independently detects whether this process is a spawned - # child, so the start method check here is only an optimization. - if (getattr(sys, 'frozen', False) - and self.get_start_method(allow_none=True) in ('spawn', None)): - from .spawn import freeze_support - freeze_support() - - def get_logger(self): - '''Return package logger -- if it does not already exist then - it is created. - ''' - from .util import get_logger - return get_logger() - - def log_to_stderr(self, level=None): - '''Turn on logging and add a handler which prints to stderr''' - from .util import log_to_stderr - return log_to_stderr(level) - - def allow_connection_pickling(self): - '''Install support for sending connections and sockets - between processes - ''' - # This is undocumented. In previous versions of multiprocessing - # its only effect was to make socket objects inheritable on Windows. - from . import connection - - def set_executable(self, executable): - '''Sets the path to a python.exe or pythonw.exe binary used to run - child processes instead of sys.executable when using the 'spawn' - start method. Useful for people embedding Python. - ''' - from .spawn import set_executable - set_executable(executable) - - def set_forkserver_preload(self, module_names): - '''Set list of module names to try to load in forkserver process. - This is really just a hint. - ''' - from .forkserver import set_forkserver_preload - set_forkserver_preload(module_names) - - def get_context(self, method=None): - if method is None: - return self - try: - ctx = _concrete_contexts[method] - except KeyError: - raise ValueError('cannot find context for %r' % method) from None - ctx._check_available() - return ctx - - def get_start_method(self, allow_none=False): - return self._name - - def set_start_method(self, method, force=False): - raise ValueError('cannot set start method of concrete context') - - @property - def reducer(self): - '''Controls how objects will be reduced to a form that can be - shared with other processes.''' - return globals().get('reduction') - - @reducer.setter - def reducer(self, reduction): - globals()['reduction'] = reduction - - def _check_available(self): - pass - -# -# Type of default context -- underlying context can be set at most once -# - -class Process(process.BaseProcess): - _start_method = None - @staticmethod - def _Popen(process_obj): - return _default_context.get_context().Process._Popen(process_obj) - - @staticmethod - def _after_fork(): - return _default_context.get_context().Process._after_fork() - -class DefaultContext(BaseContext): - Process = Process - - def __init__(self, context): - self._default_context = context - self._actual_context = None - - def get_context(self, method=None): - if method is None: - if self._actual_context is None: - self._actual_context = self._default_context - return self._actual_context - else: - return super().get_context(method) - - def set_start_method(self, method, force=False): - if self._actual_context is not None and not force: - raise RuntimeError('context has already been set') - if method is None and force: - self._actual_context = None - return - self._actual_context = self.get_context(method) - - def get_start_method(self, allow_none=False): - if self._actual_context is None: - if allow_none: - return None - self._actual_context = self._default_context - return self._actual_context._name - - def get_all_start_methods(self): - """Returns a list of the supported start methods, default first.""" - if sys.platform == 'win32': - return ['spawn'] - else: - methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] - if reduction.HAVE_SEND_HANDLE: - methods.append('forkserver') - return methods - - -# -# Context types for fixed start method -# - -if sys.platform != 'win32': - - class ForkProcess(process.BaseProcess): - _start_method = 'fork' - @staticmethod - def _Popen(process_obj): - from .popen_fork import Popen - return Popen(process_obj) - - class SpawnProcess(process.BaseProcess): - _start_method = 'spawn' - @staticmethod - def _Popen(process_obj): - from .popen_spawn_posix import Popen - return Popen(process_obj) - - @staticmethod - def _after_fork(): - # process is spawned, nothing to do - pass - - class ForkServerProcess(process.BaseProcess): - _start_method = 'forkserver' - @staticmethod - def _Popen(process_obj): - from .popen_forkserver import Popen - return Popen(process_obj) - - class ForkContext(BaseContext): - _name = 'fork' - Process = ForkProcess - - class SpawnContext(BaseContext): - _name = 'spawn' - Process = SpawnProcess - - class ForkServerContext(BaseContext): - _name = 'forkserver' - Process = ForkServerProcess - def _check_available(self): - if not reduction.HAVE_SEND_HANDLE: - raise ValueError('forkserver start method not available') - - _concrete_contexts = { - 'fork': ForkContext(), - 'spawn': SpawnContext(), - 'forkserver': ForkServerContext(), - } - if sys.platform == 'darwin': - # bpo-33725: running arbitrary code after fork() is no longer reliable - # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. - _default_context = DefaultContext(_concrete_contexts['spawn']) - else: - _default_context = DefaultContext(_concrete_contexts['fork']) - -else: - - class SpawnProcess(process.BaseProcess): - _start_method = 'spawn' - @staticmethod - def _Popen(process_obj): - from .popen_spawn_win32 import Popen - return Popen(process_obj) - - @staticmethod - def _after_fork(): - # process is spawned, nothing to do - pass - - class SpawnContext(BaseContext): - _name = 'spawn' - Process = SpawnProcess - - _concrete_contexts = { - 'spawn': SpawnContext(), - } - _default_context = DefaultContext(_concrete_contexts['spawn']) - -# -# Force the start method -# - -def _force_start_method(method): - _default_context._actual_context = _concrete_contexts[method] - -# -# Check that the current thread is spawning a child process -# - -_tls = threading.local() - -def get_spawning_popen(): - return getattr(_tls, 'spawning_popen', None) - -def set_spawning_popen(popen): - _tls.spawning_popen = popen - -def assert_spawning(obj): - if get_spawning_popen() is None: - raise RuntimeError( - '%s objects should only be shared between processes' - ' through inheritance' % type(obj).__name__ - ) diff --git a/Python313_13_x64_Template/Lib/multiprocessing/dummy/__init__.py b/Python313_13_x64_Template/Lib/multiprocessing/dummy/__init__.py deleted file mode 100644 index 6a146860..00000000 --- a/Python313_13_x64_Template/Lib/multiprocessing/dummy/__init__.py +++ /dev/null @@ -1,126 +0,0 @@ -# -# Support for the API of the multiprocessing package using threads -# -# multiprocessing/dummy/__init__.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -__all__ = [ - 'Process', 'current_process', 'active_children', 'freeze_support', - 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', - 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' - ] - -# -# Imports -# - -import threading -import sys -import weakref -import array - -from .connection import Pipe -from threading import Lock, RLock, Semaphore, BoundedSemaphore -from threading import Event, Condition, Barrier -from queue import Queue - -# -# -# - -class DummyProcess(threading.Thread): - - def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): - threading.Thread.__init__(self, group, target, name, args, kwargs) - self._pid = None - self._children = weakref.WeakKeyDictionary() - self._start_called = False - self._parent = current_process() - - def start(self): - if self._parent is not current_process(): - raise RuntimeError( - "Parent is {0!r} but current_process is {1!r}".format( - self._parent, current_process())) - self._start_called = True - if hasattr(self._parent, '_children'): - self._parent._children[self] = None - threading.Thread.start(self) - - @property - def exitcode(self): - if self._start_called and not self.is_alive(): - return 0 - else: - return None - -# -# -# - -Process = DummyProcess -current_process = threading.current_thread -current_process()._children = weakref.WeakKeyDictionary() - -def active_children(): - children = current_process()._children - for p in list(children): - if not p.is_alive(): - children.pop(p, None) - return list(children) - -def freeze_support(): - pass - -# -# -# - -class Namespace(object): - def __init__(self, /, **kwds): - self.__dict__.update(kwds) - def __repr__(self): - items = list(self.__dict__.items()) - temp = [] - for name, value in items: - if not name.startswith('_'): - temp.append('%s=%r' % (name, value)) - temp.sort() - return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) - -dict = dict -list = list - -def Array(typecode, sequence, lock=True): - return array.array(typecode, sequence) - -class Value(object): - def __init__(self, typecode, value, lock=True): - self._typecode = typecode - self._value = value - - @property - def value(self): - return self._value - - @value.setter - def value(self, value): - self._value = value - - def __repr__(self): - return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) - -def Manager(): - return sys.modules[__name__] - -def shutdown(): - pass - -def Pool(processes=None, initializer=None, initargs=()): - from ..pool import ThreadPool - return ThreadPool(processes, initializer, initargs) - -JoinableQueue = Queue diff --git a/Python313_13_x64_Template/Lib/multiprocessing/forkserver.py b/Python313_13_x64_Template/Lib/multiprocessing/forkserver.py deleted file mode 100644 index 5eacb53f..00000000 --- a/Python313_13_x64_Template/Lib/multiprocessing/forkserver.py +++ /dev/null @@ -1,373 +0,0 @@ -import atexit -import errno -import os -import selectors -import signal -import socket -import struct -import sys -import threading -import warnings - -from . import connection -from . import process -from .context import reduction -from . import resource_tracker -from . import spawn -from . import util - -__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', - 'set_forkserver_preload'] - -# -# -# - -MAXFDS_TO_SEND = 256 -SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t - -# -# Forkserver class -# - -class ForkServer(object): - - def __init__(self): - self._forkserver_address = None - self._forkserver_alive_fd = None - self._forkserver_pid = None - self._inherited_fds = None - self._lock = threading.Lock() - self._preload_modules = ['__main__'] - - def _stop(self): - # Method used by unit tests to stop the server - with self._lock: - self._stop_unlocked() - - def _stop_unlocked(self): - if self._forkserver_pid is None: - return - - # close the "alive" file descriptor asks the server to stop - os.close(self._forkserver_alive_fd) - self._forkserver_alive_fd = None - - os.waitpid(self._forkserver_pid, 0) - self._forkserver_pid = None - - if not util.is_abstract_socket_namespace(self._forkserver_address): - os.unlink(self._forkserver_address) - self._forkserver_address = None - - def set_forkserver_preload(self, modules_names): - '''Set list of module names to try to load in forkserver process.''' - if not all(type(mod) is str for mod in modules_names): - raise TypeError('module_names must be a list of strings') - self._preload_modules = modules_names - - def get_inherited_fds(self): - '''Return list of fds inherited from parent process. - - This returns None if the current process was not started by fork - server. - ''' - return self._inherited_fds - - def connect_to_new_process(self, fds): - '''Request forkserver to create a child process. - - Returns a pair of fds (status_r, data_w). The calling process can read - the child process's pid and (eventually) its returncode from status_r. - The calling process should write to data_w the pickled preparation and - process data. - ''' - self.ensure_running() - if len(fds) + 4 >= MAXFDS_TO_SEND: - raise ValueError('too many fds') - with socket.socket(socket.AF_UNIX) as client: - client.connect(self._forkserver_address) - parent_r, child_w = os.pipe() - child_r, parent_w = os.pipe() - allfds = [child_r, child_w, self._forkserver_alive_fd, - resource_tracker.getfd()] - allfds += fds - try: - reduction.sendfds(client, allfds) - return parent_r, parent_w - except: - os.close(parent_r) - os.close(parent_w) - raise - finally: - os.close(child_r) - os.close(child_w) - - def ensure_running(self): - '''Make sure that a fork server is running. - - This can be called from any process. Note that usually a child - process will just reuse the forkserver started by its parent, so - ensure_running() will do nothing. - ''' - with self._lock: - resource_tracker.ensure_running() - if self._forkserver_pid is not None: - # forkserver was launched before, is it still running? - pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) - if not pid: - # still alive - return - # dead, launch it again - os.close(self._forkserver_alive_fd) - self._forkserver_address = None - self._forkserver_alive_fd = None - self._forkserver_pid = None - - # gh-144503: sys_argv is passed as real argv elements after the - # ``-c cmd`` rather than repr'd into main_kws so that a large - # parent sys.argv cannot push the single ``-c`` command string - # over the OS per-argument length limit (MAX_ARG_STRLEN on Linux). - # The child sees them as sys.argv[1:]. - cmd = ('import sys; ' - 'from multiprocessing.forkserver import main; ' - 'main(%d, %d, %r, sys_argv=sys.argv[1:], **%r)') - - main_kws = {} - sys_argv = None - if self._preload_modules: - data = spawn.get_preparation_data('ignore') - if 'sys_path' in data: - main_kws['sys_path'] = data['sys_path'] - if 'init_main_from_path' in data: - main_kws['main_path'] = data['init_main_from_path'] - if 'sys_argv' in data: - sys_argv = data['sys_argv'] - - with socket.socket(socket.AF_UNIX) as listener: - address = connection.arbitrary_address('AF_UNIX') - listener.bind(address) - if not util.is_abstract_socket_namespace(address): - os.chmod(address, 0o600) - listener.listen() - - # all client processes own the write end of the "alive" pipe; - # when they all terminate the read end becomes ready. - alive_r, alive_w = os.pipe() - try: - fds_to_pass = [listener.fileno(), alive_r] - cmd %= (listener.fileno(), alive_r, self._preload_modules, - main_kws) - exe = spawn.get_executable() - args = [exe] + util._args_from_interpreter_flags() - args += ['-c', cmd] - if sys_argv is not None: - args += sys_argv - pid = util.spawnv_passfds(exe, args, fds_to_pass) - except: - os.close(alive_w) - raise - finally: - os.close(alive_r) - self._forkserver_address = address - self._forkserver_alive_fd = alive_w - self._forkserver_pid = pid - -# -# -# - -def main(listener_fd, alive_r, preload, main_path=None, sys_path=None, - *, sys_argv=None): - '''Run forkserver.''' - if preload: - if sys_argv is not None: - sys.argv[:] = sys_argv - if sys_path is not None: - sys.path[:] = sys_path - if '__main__' in preload and main_path is not None: - process.current_process()._inheriting = True - try: - spawn.import_main_path(main_path) - finally: - del process.current_process()._inheriting - for modname in preload: - try: - __import__(modname) - except ImportError: - pass - - # gh-135335: flush stdout/stderr in case any of the preloaded modules - # wrote to them, otherwise children might inherit buffered data - util._flush_std_streams() - - util._close_stdin() - - sig_r, sig_w = os.pipe() - os.set_blocking(sig_r, False) - os.set_blocking(sig_w, False) - - def sigchld_handler(*_unused): - # Dummy signal handler, doesn't do anything - pass - - handlers = { - # unblocking SIGCHLD allows the wakeup fd to notify our event loop - signal.SIGCHLD: sigchld_handler, - # protect the process from ^C - signal.SIGINT: signal.SIG_IGN, - } - old_handlers = {sig: signal.signal(sig, val) - for (sig, val) in handlers.items()} - - # calling os.write() in the Python signal handler is racy - signal.set_wakeup_fd(sig_w) - - # map child pids to client fds - pid_to_fd = {} - - with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ - selectors.DefaultSelector() as selector: - _forkserver._forkserver_address = listener.getsockname() - - selector.register(listener, selectors.EVENT_READ) - selector.register(alive_r, selectors.EVENT_READ) - selector.register(sig_r, selectors.EVENT_READ) - - while True: - try: - while True: - rfds = [key.fileobj for (key, events) in selector.select()] - if rfds: - break - - if alive_r in rfds: - # EOF because no more client processes left - assert os.read(alive_r, 1) == b'', "Not at EOF?" - raise SystemExit - - if sig_r in rfds: - # Got SIGCHLD - os.read(sig_r, 65536) # exhaust - while True: - # Scan for child processes - try: - pid, sts = os.waitpid(-1, os.WNOHANG) - except ChildProcessError: - break - if pid == 0: - break - child_w = pid_to_fd.pop(pid, None) - if child_w is not None: - returncode = os.waitstatus_to_exitcode(sts) - - # Send exit code to client process - try: - write_signed(child_w, returncode) - except BrokenPipeError: - # client vanished - pass - os.close(child_w) - else: - # This shouldn't happen really - warnings.warn('forkserver: waitpid returned ' - 'unexpected pid %d' % pid) - - if listener in rfds: - # Incoming fork request - with listener.accept()[0] as s: - # Receive fds from client - fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) - if len(fds) > MAXFDS_TO_SEND: - raise RuntimeError( - "Too many ({0:n}) fds to send".format( - len(fds))) - child_r, child_w, *fds = fds - s.close() - pid = os.fork() - if pid == 0: - # Child - code = 1 - try: - listener.close() - selector.close() - unused_fds = [alive_r, child_w, sig_r, sig_w] - unused_fds.extend(pid_to_fd.values()) - atexit._clear() - atexit.register(util._exit_function) - code = _serve_one(child_r, fds, - unused_fds, - old_handlers) - except Exception: - sys.excepthook(*sys.exc_info()) - sys.stderr.flush() - finally: - atexit._run_exitfuncs() - os._exit(code) - else: - # Send pid to client process - try: - write_signed(child_w, pid) - except BrokenPipeError: - # client vanished - pass - pid_to_fd[pid] = child_w - os.close(child_r) - for fd in fds: - os.close(fd) - - except OSError as e: - if e.errno != errno.ECONNABORTED: - raise - - -def _serve_one(child_r, fds, unused_fds, handlers): - # close unnecessary stuff and reset signal handlers - signal.set_wakeup_fd(-1) - for sig, val in handlers.items(): - signal.signal(sig, val) - for fd in unused_fds: - os.close(fd) - - (_forkserver._forkserver_alive_fd, - resource_tracker._resource_tracker._fd, - *_forkserver._inherited_fds) = fds - - # Run process object received over pipe - parent_sentinel = os.dup(child_r) - code = spawn._main(child_r, parent_sentinel) - - return code - - -# -# Read and write signed numbers -# - -def read_signed(fd): - data = b'' - length = SIGNED_STRUCT.size - while len(data) < length: - s = os.read(fd, length - len(data)) - if not s: - raise EOFError('unexpected EOF') - data += s - return SIGNED_STRUCT.unpack(data)[0] - -def write_signed(fd, n): - msg = SIGNED_STRUCT.pack(n) - while msg: - nbytes = os.write(fd, msg) - if nbytes == 0: - raise RuntimeError('should not get here') - msg = msg[nbytes:] - -# -# -# - -_forkserver = ForkServer() -ensure_running = _forkserver.ensure_running -get_inherited_fds = _forkserver.get_inherited_fds -connect_to_new_process = _forkserver.connect_to_new_process -set_forkserver_preload = _forkserver.set_forkserver_preload diff --git a/Python313_13_x64_Template/Lib/multiprocessing/managers.py b/Python313_13_x64_Template/Lib/multiprocessing/managers.py deleted file mode 100644 index ef791c27..00000000 --- a/Python313_13_x64_Template/Lib/multiprocessing/managers.py +++ /dev/null @@ -1,1397 +0,0 @@ -# -# Module providing manager classes for dealing -# with shared objects -# -# multiprocessing/managers.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] - -# -# Imports -# - -import sys -import threading -import signal -import array -import queue -import time -import types -import os -from os import getpid - -from traceback import format_exc - -from . import connection -from .context import reduction, get_spawning_popen, ProcessError -from . import pool -from . import process -from . import util -from . import get_context -try: - from . import shared_memory -except ImportError: - HAS_SHMEM = False -else: - HAS_SHMEM = True - __all__.append('SharedMemoryManager') - -# -# Register some things for pickling -# - -def reduce_array(a): - return array.array, (a.typecode, a.tobytes()) -reduction.register(array.array, reduce_array) - -view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] -def rebuild_as_list(obj): - return list, (list(obj),) -for view_type in view_types: - reduction.register(view_type, rebuild_as_list) -del view_type, view_types - -# -# Type for identifying shared objects -# - -class Token(object): - ''' - Type to uniquely identify a shared object - ''' - __slots__ = ('typeid', 'address', 'id') - - def __init__(self, typeid, address, id): - (self.typeid, self.address, self.id) = (typeid, address, id) - - def __getstate__(self): - return (self.typeid, self.address, self.id) - - def __setstate__(self, state): - (self.typeid, self.address, self.id) = state - - def __repr__(self): - return '%s(typeid=%r, address=%r, id=%r)' % \ - (self.__class__.__name__, self.typeid, self.address, self.id) - -# -# Function for communication with a manager's server process -# - -def dispatch(c, id, methodname, args=(), kwds={}): - ''' - Send a message to manager using connection `c` and return response - ''' - c.send((id, methodname, args, kwds)) - kind, result = c.recv() - if kind == '#RETURN': - return result - try: - raise convert_to_error(kind, result) - finally: - del result # break reference cycle - -def convert_to_error(kind, result): - if kind == '#ERROR': - return result - elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): - if not isinstance(result, str): - raise TypeError( - "Result {0!r} (kind '{1}') type is {2}, not str".format( - result, kind, type(result))) - if kind == '#UNSERIALIZABLE': - return RemoteError('Unserializable message: %s\n' % result) - else: - return RemoteError(result) - else: - return ValueError('Unrecognized message type {!r}'.format(kind)) - -class RemoteError(Exception): - def __str__(self): - return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) - -# -# Functions for finding the method names of an object -# - -def all_methods(obj): - ''' - Return a list of names of methods of `obj` - ''' - temp = [] - for name in dir(obj): - func = getattr(obj, name) - if callable(func): - temp.append(name) - return temp - -def public_methods(obj): - ''' - Return a list of names of methods of `obj` which do not start with '_' - ''' - return [name for name in all_methods(obj) if name[0] != '_'] - -# -# Server which is run in a process controlled by a manager -# - -class Server(object): - ''' - Server class which runs in a process controlled by a manager object - ''' - public = ['shutdown', 'create', 'accept_connection', 'get_methods', - 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] - - def __init__(self, registry, address, authkey, serializer): - if not isinstance(authkey, bytes): - raise TypeError( - "Authkey {0!r} is type {1!s}, not bytes".format( - authkey, type(authkey))) - self.registry = registry - self.authkey = process.AuthenticationString(authkey) - Listener, Client = listener_client[serializer] - - # do authentication later - self.listener = Listener(address=address, backlog=128) - self.address = self.listener.address - - self.id_to_obj = {'0': (None, ())} - self.id_to_refcount = {} - self.id_to_local_proxy_obj = {} - self.mutex = threading.Lock() - - def serve_forever(self): - ''' - Run the server forever - ''' - self.stop_event = threading.Event() - process.current_process()._manager_server = self - try: - accepter = threading.Thread(target=self.accepter) - accepter.daemon = True - accepter.start() - try: - while not self.stop_event.is_set(): - self.stop_event.wait(1) - except (KeyboardInterrupt, SystemExit): - pass - finally: - if sys.stdout != sys.__stdout__: # what about stderr? - util.debug('resetting stdout, stderr') - sys.stdout = sys.__stdout__ - sys.stderr = sys.__stderr__ - sys.exit(0) - - def accepter(self): - while True: - try: - c = self.listener.accept() - except OSError: - continue - t = threading.Thread(target=self.handle_request, args=(c,)) - t.daemon = True - t.start() - - def _handle_request(self, c): - request = None - try: - connection.deliver_challenge(c, self.authkey) - connection.answer_challenge(c, self.authkey) - request = c.recv() - ignore, funcname, args, kwds = request - assert funcname in self.public, '%r unrecognized' % funcname - func = getattr(self, funcname) - except Exception: - msg = ('#TRACEBACK', format_exc()) - else: - try: - result = func(c, *args, **kwds) - except Exception: - msg = ('#TRACEBACK', format_exc()) - else: - msg = ('#RETURN', result) - - try: - c.send(msg) - except Exception as e: - try: - c.send(('#TRACEBACK', format_exc())) - except Exception: - pass - util.info('Failure to send message: %r', msg) - util.info(' ... request was %r', request) - util.info(' ... exception was %r', e) - - def handle_request(self, conn): - ''' - Handle a new connection - ''' - try: - self._handle_request(conn) - except SystemExit: - # Server.serve_client() calls sys.exit(0) on EOF - pass - finally: - conn.close() - - def serve_client(self, conn): - ''' - Handle requests from the proxies in a particular process/thread - ''' - util.debug('starting server thread to service %r', - threading.current_thread().name) - - recv = conn.recv - send = conn.send - id_to_obj = self.id_to_obj - - while not self.stop_event.is_set(): - - try: - methodname = obj = None - request = recv() - ident, methodname, args, kwds = request - try: - obj, exposed, gettypeid = id_to_obj[ident] - except KeyError as ke: - try: - obj, exposed, gettypeid = \ - self.id_to_local_proxy_obj[ident] - except KeyError: - raise ke - - if methodname not in exposed: - raise AttributeError( - 'method %r of %r object is not in exposed=%r' % - (methodname, type(obj), exposed) - ) - - function = getattr(obj, methodname) - - try: - res = function(*args, **kwds) - except Exception as e: - msg = ('#ERROR', e) - else: - typeid = gettypeid and gettypeid.get(methodname, None) - if typeid: - rident, rexposed = self.create(conn, typeid, res) - token = Token(typeid, self.address, rident) - msg = ('#PROXY', (rexposed, token)) - else: - msg = ('#RETURN', res) - - except AttributeError: - if methodname is None: - msg = ('#TRACEBACK', format_exc()) - else: - try: - fallback_func = self.fallback_mapping[methodname] - result = fallback_func( - self, conn, ident, obj, *args, **kwds - ) - msg = ('#RETURN', result) - except Exception: - msg = ('#TRACEBACK', format_exc()) - - except EOFError: - util.debug('got EOF -- exiting thread serving %r', - threading.current_thread().name) - sys.exit(0) - - except Exception: - msg = ('#TRACEBACK', format_exc()) - - try: - try: - send(msg) - except Exception: - send(('#UNSERIALIZABLE', format_exc())) - except Exception as e: - util.info('exception in thread serving %r', - threading.current_thread().name) - util.info(' ... message was %r', msg) - util.info(' ... exception was %r', e) - conn.close() - sys.exit(1) - - def fallback_getvalue(self, conn, ident, obj): - return obj - - def fallback_str(self, conn, ident, obj): - return str(obj) - - def fallback_repr(self, conn, ident, obj): - return repr(obj) - - fallback_mapping = { - '__str__':fallback_str, - '__repr__':fallback_repr, - '#GETVALUE':fallback_getvalue - } - - def dummy(self, c): - pass - - def debug_info(self, c): - ''' - Return some info --- useful to spot problems with refcounting - ''' - # Perhaps include debug info about 'c'? - with self.mutex: - result = [] - keys = list(self.id_to_refcount.keys()) - keys.sort() - for ident in keys: - if ident != '0': - result.append(' %s: refcount=%s\n %s' % - (ident, self.id_to_refcount[ident], - str(self.id_to_obj[ident][0])[:75])) - return '\n'.join(result) - - def number_of_objects(self, c): - ''' - Number of shared objects - ''' - # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' - return len(self.id_to_refcount) - - def shutdown(self, c): - ''' - Shutdown this process - ''' - try: - util.debug('manager received shutdown message') - c.send(('#RETURN', None)) - except: - import traceback - traceback.print_exc() - finally: - self.stop_event.set() - - def create(self, c, typeid, /, *args, **kwds): - ''' - Create a new shared object and return its id - ''' - with self.mutex: - callable, exposed, method_to_typeid, proxytype = \ - self.registry[typeid] - - if callable is None: - if kwds or (len(args) != 1): - raise ValueError( - "Without callable, must have one non-keyword argument") - obj = args[0] - else: - obj = callable(*args, **kwds) - - if exposed is None: - exposed = public_methods(obj) - if method_to_typeid is not None: - if not isinstance(method_to_typeid, dict): - raise TypeError( - "Method_to_typeid {0!r}: type {1!s}, not dict".format( - method_to_typeid, type(method_to_typeid))) - exposed = list(exposed) + list(method_to_typeid) - - ident = '%x' % id(obj) # convert to string because xmlrpclib - # only has 32 bit signed integers - util.debug('%r callable returned object with id %r', typeid, ident) - - self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) - if ident not in self.id_to_refcount: - self.id_to_refcount[ident] = 0 - - self.incref(c, ident) - return ident, tuple(exposed) - - def get_methods(self, c, token): - ''' - Return the methods of the shared object indicated by token - ''' - return tuple(self.id_to_obj[token.id][1]) - - def accept_connection(self, c, name): - ''' - Spawn a new thread to serve this connection - ''' - threading.current_thread().name = name - c.send(('#RETURN', None)) - self.serve_client(c) - - def incref(self, c, ident): - with self.mutex: - try: - self.id_to_refcount[ident] += 1 - except KeyError as ke: - # If no external references exist but an internal (to the - # manager) still does and a new external reference is created - # from it, restore the manager's tracking of it from the - # previously stashed internal ref. - if ident in self.id_to_local_proxy_obj: - self.id_to_refcount[ident] = 1 - self.id_to_obj[ident] = \ - self.id_to_local_proxy_obj[ident] - util.debug('Server re-enabled tracking & INCREF %r', ident) - else: - raise ke - - def decref(self, c, ident): - if ident not in self.id_to_refcount and \ - ident in self.id_to_local_proxy_obj: - util.debug('Server DECREF skipping %r', ident) - return - - with self.mutex: - if self.id_to_refcount[ident] <= 0: - raise AssertionError( - "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( - ident, self.id_to_obj[ident], - self.id_to_refcount[ident])) - self.id_to_refcount[ident] -= 1 - if self.id_to_refcount[ident] == 0: - del self.id_to_refcount[ident] - - if ident not in self.id_to_refcount: - # Two-step process in case the object turns out to contain other - # proxy objects (e.g. a managed list of managed lists). - # Otherwise, deleting self.id_to_obj[ident] would trigger the - # deleting of the stored value (another managed object) which would - # in turn attempt to acquire the mutex that is already held here. - self.id_to_obj[ident] = (None, (), None) # thread-safe - util.debug('disposing of obj with id %r', ident) - with self.mutex: - del self.id_to_obj[ident] - - -# -# Class to represent state of a manager -# - -class State(object): - __slots__ = ['value'] - INITIAL = 0 - STARTED = 1 - SHUTDOWN = 2 - -# -# Mapping from serializer name to Listener and Client types -# - -listener_client = { - 'pickle' : (connection.Listener, connection.Client), - 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) - } - -# -# Definition of BaseManager -# - -class BaseManager(object): - ''' - Base class for managers - ''' - _registry = {} - _Server = Server - - def __init__(self, address=None, authkey=None, serializer='pickle', - ctx=None, *, shutdown_timeout=1.0): - if authkey is None: - authkey = process.current_process().authkey - self._address = address # XXX not final address if eg ('', 0) - self._authkey = process.AuthenticationString(authkey) - self._state = State() - self._state.value = State.INITIAL - self._serializer = serializer - self._Listener, self._Client = listener_client[serializer] - self._ctx = ctx or get_context() - self._shutdown_timeout = shutdown_timeout - - def get_server(self): - ''' - Return server object with serve_forever() method and address attribute - ''' - if self._state.value != State.INITIAL: - if self._state.value == State.STARTED: - raise ProcessError("Already started server") - elif self._state.value == State.SHUTDOWN: - raise ProcessError("Manager has shut down") - else: - raise ProcessError( - "Unknown state {!r}".format(self._state.value)) - return Server(self._registry, self._address, - self._authkey, self._serializer) - - def connect(self): - ''' - Connect manager object to the server process - ''' - Listener, Client = listener_client[self._serializer] - conn = Client(self._address, authkey=self._authkey) - dispatch(conn, None, 'dummy') - self._state.value = State.STARTED - - def start(self, initializer=None, initargs=()): - ''' - Spawn a server process for this manager object - ''' - if self._state.value != State.INITIAL: - if self._state.value == State.STARTED: - raise ProcessError("Already started server") - elif self._state.value == State.SHUTDOWN: - raise ProcessError("Manager has shut down") - else: - raise ProcessError( - "Unknown state {!r}".format(self._state.value)) - - if initializer is not None and not callable(initializer): - raise TypeError('initializer must be a callable') - - # pipe over which we will retrieve address of server - reader, writer = connection.Pipe(duplex=False) - - # spawn process which runs a server - self._process = self._ctx.Process( - target=type(self)._run_server, - args=(self._registry, self._address, self._authkey, - self._serializer, writer, initializer, initargs), - ) - ident = ':'.join(str(i) for i in self._process._identity) - self._process.name = type(self).__name__ + '-' + ident - self._process.start() - - # get address of server - writer.close() - self._address = reader.recv() - reader.close() - - # register a finalizer - self._state.value = State.STARTED - self.shutdown = util.Finalize( - self, type(self)._finalize_manager, - args=(self._process, self._address, self._authkey, self._state, - self._Client, self._shutdown_timeout), - exitpriority=0 - ) - - @classmethod - def _run_server(cls, registry, address, authkey, serializer, writer, - initializer=None, initargs=()): - ''' - Create a server, report its address and run it - ''' - # bpo-36368: protect server process from KeyboardInterrupt signals - signal.signal(signal.SIGINT, signal.SIG_IGN) - - if initializer is not None: - initializer(*initargs) - - # create server - server = cls._Server(registry, address, authkey, serializer) - - # inform parent process of the server's address - writer.send(server.address) - writer.close() - - # run the manager - util.info('manager serving at %r', server.address) - server.serve_forever() - - def _create(self, typeid, /, *args, **kwds): - ''' - Create a new shared object; return the token and exposed tuple - ''' - assert self._state.value == State.STARTED, 'server not yet started' - conn = self._Client(self._address, authkey=self._authkey) - try: - id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) - finally: - conn.close() - return Token(typeid, self._address, id), exposed - - def join(self, timeout=None): - ''' - Join the manager process (if it has been spawned) - ''' - if self._process is not None: - self._process.join(timeout) - if not self._process.is_alive(): - self._process = None - - def _debug_info(self): - ''' - Return some info about the servers shared objects and connections - ''' - conn = self._Client(self._address, authkey=self._authkey) - try: - return dispatch(conn, None, 'debug_info') - finally: - conn.close() - - def _number_of_objects(self): - ''' - Return the number of shared objects - ''' - conn = self._Client(self._address, authkey=self._authkey) - try: - return dispatch(conn, None, 'number_of_objects') - finally: - conn.close() - - def __enter__(self): - if self._state.value == State.INITIAL: - self.start() - if self._state.value != State.STARTED: - if self._state.value == State.INITIAL: - raise ProcessError("Unable to start server") - elif self._state.value == State.SHUTDOWN: - raise ProcessError("Manager has shut down") - else: - raise ProcessError( - "Unknown state {!r}".format(self._state.value)) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.shutdown() - - @staticmethod - def _finalize_manager(process, address, authkey, state, _Client, - shutdown_timeout): - ''' - Shutdown the manager process; will be registered as a finalizer - ''' - if process.is_alive(): - util.info('sending shutdown message to manager') - try: - conn = _Client(address, authkey=authkey) - try: - dispatch(conn, None, 'shutdown') - finally: - conn.close() - except Exception: - pass - - process.join(timeout=shutdown_timeout) - if process.is_alive(): - util.info('manager still alive') - if hasattr(process, 'terminate'): - util.info('trying to `terminate()` manager process') - process.terminate() - process.join(timeout=shutdown_timeout) - if process.is_alive(): - util.info('manager still alive after terminate') - process.kill() - process.join() - - state.value = State.SHUTDOWN - try: - del BaseProxy._address_to_local[address] - except KeyError: - pass - - @property - def address(self): - return self._address - - @classmethod - def register(cls, typeid, callable=None, proxytype=None, exposed=None, - method_to_typeid=None, create_method=True): - ''' - Register a typeid with the manager type - ''' - if '_registry' not in cls.__dict__: - cls._registry = cls._registry.copy() - - if proxytype is None: - proxytype = AutoProxy - - exposed = exposed or getattr(proxytype, '_exposed_', None) - - method_to_typeid = method_to_typeid or \ - getattr(proxytype, '_method_to_typeid_', None) - - if method_to_typeid: - for key, value in list(method_to_typeid.items()): # isinstance? - assert type(key) is str, '%r is not a string' % key - assert type(value) is str, '%r is not a string' % value - - cls._registry[typeid] = ( - callable, exposed, method_to_typeid, proxytype - ) - - if create_method: - def temp(self, /, *args, **kwds): - util.debug('requesting creation of a shared %r object', typeid) - token, exp = self._create(typeid, *args, **kwds) - proxy = proxytype( - token, self._serializer, manager=self, - authkey=self._authkey, exposed=exp - ) - conn = self._Client(token.address, authkey=self._authkey) - dispatch(conn, None, 'decref', (token.id,)) - return proxy - temp.__name__ = typeid - setattr(cls, typeid, temp) - -# -# Subclass of set which get cleared after a fork -# - -class ProcessLocalSet(set): - def __init__(self): - util.register_after_fork(self, lambda obj: obj.clear()) - def __reduce__(self): - return type(self), () - -# -# Definition of BaseProxy -# - -class BaseProxy(object): - ''' - A base for proxies of shared objects - ''' - _address_to_local = {} - _mutex = util.ForkAwareThreadLock() - - # Each instance gets a `_serial` number. Unlike `id(...)`, this number - # is never reused. - _next_serial = 1 - - def __init__(self, token, serializer, manager=None, - authkey=None, exposed=None, incref=True, manager_owned=False): - with BaseProxy._mutex: - tls_serials = BaseProxy._address_to_local.get(token.address, None) - if tls_serials is None: - tls_serials = util.ForkAwareLocal(), ProcessLocalSet() - BaseProxy._address_to_local[token.address] = tls_serials - - self._serial = BaseProxy._next_serial - BaseProxy._next_serial += 1 - - # self._tls is used to record the connection used by this - # thread to communicate with the manager at token.address - self._tls = tls_serials[0] - - # self._all_serials is a set used to record the identities of all - # shared objects for which the current process owns references and - # which are in the manager at token.address - self._all_serials = tls_serials[1] - - self._token = token - self._id = self._token.id - self._manager = manager - self._serializer = serializer - self._Client = listener_client[serializer][1] - - # Should be set to True only when a proxy object is being created - # on the manager server; primary use case: nested proxy objects. - # RebuildProxy detects when a proxy is being created on the manager - # and sets this value appropriately. - self._owned_by_manager = manager_owned - - if authkey is not None: - self._authkey = process.AuthenticationString(authkey) - elif self._manager is not None: - self._authkey = self._manager._authkey - else: - self._authkey = process.current_process().authkey - - if incref: - self._incref() - - util.register_after_fork(self, BaseProxy._after_fork) - - def _connect(self): - util.debug('making connection to manager') - name = process.current_process().name - if threading.current_thread().name != 'MainThread': - name += '|' + threading.current_thread().name - conn = self._Client(self._token.address, authkey=self._authkey) - dispatch(conn, None, 'accept_connection', (name,)) - self._tls.connection = conn - - def _callmethod(self, methodname, args=(), kwds={}): - ''' - Try to call a method of the referent and return a copy of the result - ''' - try: - conn = self._tls.connection - except AttributeError: - util.debug('thread %r does not own a connection', - threading.current_thread().name) - self._connect() - conn = self._tls.connection - - conn.send((self._id, methodname, args, kwds)) - kind, result = conn.recv() - - if kind == '#RETURN': - return result - elif kind == '#PROXY': - exposed, token = result - proxytype = self._manager._registry[token.typeid][-1] - token.address = self._token.address - proxy = proxytype( - token, self._serializer, manager=self._manager, - authkey=self._authkey, exposed=exposed - ) - conn = self._Client(token.address, authkey=self._authkey) - dispatch(conn, None, 'decref', (token.id,)) - return proxy - try: - raise convert_to_error(kind, result) - finally: - del result # break reference cycle - - def _getvalue(self): - ''' - Get a copy of the value of the referent - ''' - return self._callmethod('#GETVALUE') - - def _incref(self): - if self._owned_by_manager: - util.debug('owned_by_manager skipped INCREF of %r', self._token.id) - return - - conn = self._Client(self._token.address, authkey=self._authkey) - dispatch(conn, None, 'incref', (self._id,)) - util.debug('INCREF %r', self._token.id) - - self._all_serials.add(self._serial) - - state = self._manager and self._manager._state - - self._close = util.Finalize( - self, BaseProxy._decref, - args=(self._token, self._serial, self._authkey, state, - self._tls, self._all_serials, self._Client), - exitpriority=10 - ) - - @staticmethod - def _decref(token, serial, authkey, state, tls, idset, _Client): - idset.discard(serial) - - # check whether manager is still alive - if state is None or state.value == State.STARTED: - # tell manager this process no longer cares about referent - try: - util.debug('DECREF %r', token.id) - conn = _Client(token.address, authkey=authkey) - dispatch(conn, None, 'decref', (token.id,)) - except Exception as e: - util.debug('... decref failed %s', e) - - else: - util.debug('DECREF %r -- manager already shutdown', token.id) - - # check whether we can close this thread's connection because - # the process owns no more references to objects for this manager - if not idset and hasattr(tls, 'connection'): - util.debug('thread %r has no more proxies so closing conn', - threading.current_thread().name) - tls.connection.close() - del tls.connection - - def _after_fork(self): - self._manager = None - try: - self._incref() - except Exception as e: - # the proxy may just be for a manager which has shutdown - util.info('incref failed: %s' % e) - - def __reduce__(self): - kwds = {} - if get_spawning_popen() is not None: - kwds['authkey'] = self._authkey - - if getattr(self, '_isauto', False): - kwds['exposed'] = self._exposed_ - return (RebuildProxy, - (AutoProxy, self._token, self._serializer, kwds)) - else: - return (RebuildProxy, - (type(self), self._token, self._serializer, kwds)) - - def __deepcopy__(self, memo): - return self._getvalue() - - def __repr__(self): - return '<%s object, typeid %r at %#x>' % \ - (type(self).__name__, self._token.typeid, id(self)) - - def __str__(self): - ''' - Return representation of the referent (or a fall-back if that fails) - ''' - try: - return self._callmethod('__repr__') - except Exception: - return repr(self)[:-1] + "; '__str__()' failed>" - -# -# Function used for unpickling -# - -def RebuildProxy(func, token, serializer, kwds): - ''' - Function used for unpickling proxy objects. - ''' - server = getattr(process.current_process(), '_manager_server', None) - if server and server.address == token.address: - util.debug('Rebuild a proxy owned by manager, token=%r', token) - kwds['manager_owned'] = True - if token.id not in server.id_to_local_proxy_obj: - server.id_to_local_proxy_obj[token.id] = \ - server.id_to_obj[token.id] - incref = ( - kwds.pop('incref', True) and - not getattr(process.current_process(), '_inheriting', False) - ) - return func(token, serializer, incref=incref, **kwds) - -# -# Functions to create proxies and proxy types -# - -def MakeProxyType(name, exposed, _cache={}): - ''' - Return a proxy type whose methods are given by `exposed` - ''' - exposed = tuple(exposed) - try: - return _cache[(name, exposed)] - except KeyError: - pass - - dic = {} - - for meth in exposed: - exec('''def %s(self, /, *args, **kwds): - return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) - - ProxyType = type(name, (BaseProxy,), dic) - ProxyType._exposed_ = exposed - _cache[(name, exposed)] = ProxyType - return ProxyType - - -def AutoProxy(token, serializer, manager=None, authkey=None, - exposed=None, incref=True, manager_owned=False): - ''' - Return an auto-proxy for `token` - ''' - _Client = listener_client[serializer][1] - - if exposed is None: - conn = _Client(token.address, authkey=authkey) - try: - exposed = dispatch(conn, None, 'get_methods', (token,)) - finally: - conn.close() - - if authkey is None and manager is not None: - authkey = manager._authkey - if authkey is None: - authkey = process.current_process().authkey - - ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) - proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, - incref=incref, manager_owned=manager_owned) - proxy._isauto = True - return proxy - -# -# Types/callables which we will register with SyncManager -# - -class Namespace(object): - def __init__(self, /, **kwds): - self.__dict__.update(kwds) - def __repr__(self): - items = list(self.__dict__.items()) - temp = [] - for name, value in items: - if not name.startswith('_'): - temp.append('%s=%r' % (name, value)) - temp.sort() - return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) - -class Value(object): - def __init__(self, typecode, value, lock=True): - self._typecode = typecode - self._value = value - def get(self): - return self._value - def set(self, value): - self._value = value - def __repr__(self): - return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) - value = property(get, set) - -def Array(typecode, sequence, lock=True): - return array.array(typecode, sequence) - -# -# Proxy types used by SyncManager -# - -class IteratorProxy(BaseProxy): - _exposed_ = ('__next__', 'send', 'throw', 'close') - def __iter__(self): - return self - def __next__(self, *args): - return self._callmethod('__next__', args) - def send(self, *args): - return self._callmethod('send', args) - def throw(self, *args): - return self._callmethod('throw', args) - def close(self, *args): - return self._callmethod('close', args) - - -class AcquirerProxy(BaseProxy): - _exposed_ = ('acquire', 'release') - def acquire(self, blocking=True, timeout=None): - args = (blocking,) if timeout is None else (blocking, timeout) - return self._callmethod('acquire', args) - def release(self): - return self._callmethod('release') - def __enter__(self): - return self._callmethod('acquire') - def __exit__(self, exc_type, exc_val, exc_tb): - return self._callmethod('release') - - -class ConditionProxy(AcquirerProxy): - _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') - def wait(self, timeout=None): - return self._callmethod('wait', (timeout,)) - def notify(self, n=1): - return self._callmethod('notify', (n,)) - def notify_all(self): - return self._callmethod('notify_all') - def wait_for(self, predicate, timeout=None): - result = predicate() - if result: - return result - if timeout is not None: - endtime = time.monotonic() + timeout - else: - endtime = None - waittime = None - while not result: - if endtime is not None: - waittime = endtime - time.monotonic() - if waittime <= 0: - break - self.wait(waittime) - result = predicate() - return result - - -class EventProxy(BaseProxy): - _exposed_ = ('is_set', 'set', 'clear', 'wait') - def is_set(self): - return self._callmethod('is_set') - def set(self): - return self._callmethod('set') - def clear(self): - return self._callmethod('clear') - def wait(self, timeout=None): - return self._callmethod('wait', (timeout,)) - - -class BarrierProxy(BaseProxy): - _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') - def wait(self, timeout=None): - return self._callmethod('wait', (timeout,)) - def abort(self): - return self._callmethod('abort') - def reset(self): - return self._callmethod('reset') - @property - def parties(self): - return self._callmethod('__getattribute__', ('parties',)) - @property - def n_waiting(self): - return self._callmethod('__getattribute__', ('n_waiting',)) - @property - def broken(self): - return self._callmethod('__getattribute__', ('broken',)) - - -class NamespaceProxy(BaseProxy): - _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') - def __getattr__(self, key): - if key[0] == '_': - return object.__getattribute__(self, key) - callmethod = object.__getattribute__(self, '_callmethod') - return callmethod('__getattribute__', (key,)) - def __setattr__(self, key, value): - if key[0] == '_': - return object.__setattr__(self, key, value) - callmethod = object.__getattribute__(self, '_callmethod') - return callmethod('__setattr__', (key, value)) - def __delattr__(self, key): - if key[0] == '_': - return object.__delattr__(self, key) - callmethod = object.__getattribute__(self, '_callmethod') - return callmethod('__delattr__', (key,)) - - -class ValueProxy(BaseProxy): - _exposed_ = ('get', 'set') - def get(self): - return self._callmethod('get') - def set(self, value): - return self._callmethod('set', (value,)) - value = property(get, set) - - __class_getitem__ = classmethod(types.GenericAlias) - - -BaseListProxy = MakeProxyType('BaseListProxy', ( - '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', - '__mul__', '__reversed__', '__rmul__', '__setitem__', - 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', - 'reverse', 'sort', '__imul__' - )) -class ListProxy(BaseListProxy): - def __iadd__(self, value): - self._callmethod('extend', (value,)) - return self - def __imul__(self, value): - self._callmethod('__imul__', (value,)) - return self - - __class_getitem__ = classmethod(types.GenericAlias) - - -_BaseDictProxy = MakeProxyType('DictProxy', ( - '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', - '__setitem__', 'clear', 'copy', 'get', 'items', - 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' - )) -_BaseDictProxy._method_to_typeid_ = { - '__iter__': 'Iterator', - } -class DictProxy(_BaseDictProxy): - __class_getitem__ = classmethod(types.GenericAlias) - - -ArrayProxy = MakeProxyType('ArrayProxy', ( - '__len__', '__getitem__', '__setitem__' - )) - - -BasePoolProxy = MakeProxyType('PoolProxy', ( - 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', - 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', - )) -BasePoolProxy._method_to_typeid_ = { - 'apply_async': 'AsyncResult', - 'map_async': 'AsyncResult', - 'starmap_async': 'AsyncResult', - 'imap': 'Iterator', - 'imap_unordered': 'Iterator' - } -class PoolProxy(BasePoolProxy): - def __enter__(self): - return self - def __exit__(self, exc_type, exc_val, exc_tb): - self.terminate() - -# -# Definition of SyncManager -# - -class SyncManager(BaseManager): - ''' - Subclass of `BaseManager` which supports a number of shared object types. - - The types registered are those intended for the synchronization - of threads, plus `dict`, `list` and `Namespace`. - - The `multiprocessing.Manager()` function creates started instances of - this class. - ''' - -SyncManager.register('Queue', queue.Queue) -SyncManager.register('JoinableQueue', queue.Queue) -SyncManager.register('Event', threading.Event, EventProxy) -SyncManager.register('Lock', threading.Lock, AcquirerProxy) -SyncManager.register('RLock', threading.RLock, AcquirerProxy) -SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) -SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, - AcquirerProxy) -SyncManager.register('Condition', threading.Condition, ConditionProxy) -SyncManager.register('Barrier', threading.Barrier, BarrierProxy) -SyncManager.register('Pool', pool.Pool, PoolProxy) -SyncManager.register('list', list, ListProxy) -SyncManager.register('dict', dict, DictProxy) -SyncManager.register('Value', Value, ValueProxy) -SyncManager.register('Array', Array, ArrayProxy) -SyncManager.register('Namespace', Namespace, NamespaceProxy) - -# types returned by methods of PoolProxy -SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) -SyncManager.register('AsyncResult', create_method=False) - -# -# Definition of SharedMemoryManager and SharedMemoryServer -# - -if HAS_SHMEM: - class _SharedMemoryTracker: - "Manages one or more shared memory segments." - - def __init__(self, name, segment_names=[]): - self.shared_memory_context_name = name - self.segment_names = segment_names - - def register_segment(self, segment_name): - "Adds the supplied shared memory block name to tracker." - util.debug(f"Register segment {segment_name!r} in pid {getpid()}") - self.segment_names.append(segment_name) - - def destroy_segment(self, segment_name): - """Calls unlink() on the shared memory block with the supplied name - and removes it from the list of blocks being tracked.""" - util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") - self.segment_names.remove(segment_name) - segment = shared_memory.SharedMemory(segment_name) - segment.close() - segment.unlink() - - def unlink(self): - "Calls destroy_segment() on all tracked shared memory blocks." - for segment_name in self.segment_names[:]: - self.destroy_segment(segment_name) - - def __del__(self): - util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") - self.unlink() - - def __getstate__(self): - return (self.shared_memory_context_name, self.segment_names) - - def __setstate__(self, state): - self.__init__(*state) - - - class SharedMemoryServer(Server): - - public = Server.public + \ - ['track_segment', 'release_segment', 'list_segments'] - - def __init__(self, *args, **kwargs): - Server.__init__(self, *args, **kwargs) - address = self.address - # The address of Linux abstract namespaces can be bytes - if isinstance(address, bytes): - address = os.fsdecode(address) - self.shared_memory_context = \ - _SharedMemoryTracker(f"shm_{address}_{getpid()}") - util.debug(f"SharedMemoryServer started by pid {getpid()}") - - def create(self, c, typeid, /, *args, **kwargs): - """Create a new distributed-shared object (not backed by a shared - memory block) and return its id to be used in a Proxy Object.""" - # Unless set up as a shared proxy, don't make shared_memory_context - # a standard part of kwargs. This makes things easier for supplying - # simple functions. - if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): - kwargs['shared_memory_context'] = self.shared_memory_context - return Server.create(self, c, typeid, *args, **kwargs) - - def shutdown(self, c): - "Call unlink() on all tracked shared memory, terminate the Server." - self.shared_memory_context.unlink() - return Server.shutdown(self, c) - - def track_segment(self, c, segment_name): - "Adds the supplied shared memory block name to Server's tracker." - self.shared_memory_context.register_segment(segment_name) - - def release_segment(self, c, segment_name): - """Calls unlink() on the shared memory block with the supplied name - and removes it from the tracker instance inside the Server.""" - self.shared_memory_context.destroy_segment(segment_name) - - def list_segments(self, c): - """Returns a list of names of shared memory blocks that the Server - is currently tracking.""" - return self.shared_memory_context.segment_names - - - class SharedMemoryManager(BaseManager): - """Like SyncManager but uses SharedMemoryServer instead of Server. - - It provides methods for creating and returning SharedMemory instances - and for creating a list-like object (ShareableList) backed by shared - memory. It also provides methods that create and return Proxy Objects - that support synchronization across processes (i.e. multi-process-safe - locks and semaphores). - """ - - _Server = SharedMemoryServer - - def __init__(self, *args, **kwargs): - if os.name == "posix": - # bpo-36867: Ensure the resource_tracker is running before - # launching the manager process, so that concurrent - # shared_memory manipulation both in the manager and in the - # current process does not create two resource_tracker - # processes. - from . import resource_tracker - resource_tracker.ensure_running() - BaseManager.__init__(self, *args, **kwargs) - util.debug(f"{self.__class__.__name__} created by pid {getpid()}") - - def __del__(self): - util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") - - def get_server(self): - 'Better than monkeypatching for now; merge into Server ultimately' - if self._state.value != State.INITIAL: - if self._state.value == State.STARTED: - raise ProcessError("Already started SharedMemoryServer") - elif self._state.value == State.SHUTDOWN: - raise ProcessError("SharedMemoryManager has shut down") - else: - raise ProcessError( - "Unknown state {!r}".format(self._state.value)) - return self._Server(self._registry, self._address, - self._authkey, self._serializer) - - def SharedMemory(self, size): - """Returns a new SharedMemory instance with the specified size in - bytes, to be tracked by the manager.""" - with self._Client(self._address, authkey=self._authkey) as conn: - sms = shared_memory.SharedMemory(None, create=True, size=size) - try: - dispatch(conn, None, 'track_segment', (sms.name,)) - except BaseException as e: - sms.unlink() - raise e - return sms - - def ShareableList(self, sequence): - """Returns a new ShareableList instance populated with the values - from the input sequence, to be tracked by the manager.""" - with self._Client(self._address, authkey=self._authkey) as conn: - sl = shared_memory.ShareableList(sequence) - try: - dispatch(conn, None, 'track_segment', (sl.shm.name,)) - except BaseException as e: - sl.shm.unlink() - raise e - return sl diff --git a/Python313_13_x64_Template/Lib/multiprocessing/popen_fork.py b/Python313_13_x64_Template/Lib/multiprocessing/popen_fork.py deleted file mode 100644 index a57ef6bd..00000000 --- a/Python313_13_x64_Template/Lib/multiprocessing/popen_fork.py +++ /dev/null @@ -1,87 +0,0 @@ -import atexit -import os -import signal - -from . import util - -__all__ = ['Popen'] - -# -# Start child process using fork -# - -class Popen(object): - method = 'fork' - - def __init__(self, process_obj): - util._flush_std_streams() - self.returncode = None - self.finalizer = None - self._launch(process_obj) - - def duplicate_for_child(self, fd): - return fd - - def poll(self, flag=os.WNOHANG): - if self.returncode is None: - try: - pid, sts = os.waitpid(self.pid, flag) - except OSError: - # Child process not yet created. See #1731717 - # e.errno == errno.ECHILD == 10 - return None - if pid == self.pid: - self.returncode = os.waitstatus_to_exitcode(sts) - return self.returncode - - def wait(self, timeout=None): - if self.returncode is None: - if timeout is not None: - from multiprocessing.connection import wait - if not wait([self.sentinel], timeout): - return None - # This shouldn't block if wait() returned successfully. - return self.poll(os.WNOHANG if timeout == 0.0 else 0) - return self.returncode - - def _send_signal(self, sig): - if self.returncode is None: - try: - os.kill(self.pid, sig) - except ProcessLookupError: - pass - except OSError: - if self.wait(timeout=0.1) is None: - raise - - def terminate(self): - self._send_signal(signal.SIGTERM) - - def kill(self): - self._send_signal(signal.SIGKILL) - - def _launch(self, process_obj): - code = 1 - parent_r, child_w = os.pipe() - child_r, parent_w = os.pipe() - self.pid = os.fork() - if self.pid == 0: - try: - atexit._clear() - atexit.register(util._exit_function) - os.close(parent_r) - os.close(parent_w) - code = process_obj._bootstrap(parent_sentinel=child_r) - finally: - atexit._run_exitfuncs() - os._exit(code) - else: - os.close(child_w) - os.close(child_r) - self.finalizer = util.Finalize(self, util.close_fds, - (parent_r, parent_w,)) - self.sentinel = parent_r - - def close(self): - if self.finalizer is not None: - self.finalizer() diff --git a/Python313_13_x64_Template/Lib/multiprocessing/process.py b/Python313_13_x64_Template/Lib/multiprocessing/process.py deleted file mode 100644 index b45f7df4..00000000 --- a/Python313_13_x64_Template/Lib/multiprocessing/process.py +++ /dev/null @@ -1,436 +0,0 @@ -# -# Module providing the `Process` class which emulates `threading.Thread` -# -# multiprocessing/process.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -__all__ = ['BaseProcess', 'current_process', 'active_children', - 'parent_process'] - -# -# Imports -# - -import os -import sys -import signal -import itertools -import threading -from _weakrefset import WeakSet - -# -# -# - -try: - ORIGINAL_DIR = os.path.abspath(os.getcwd()) -except OSError: - ORIGINAL_DIR = None - -# -# Public functions -# - -def current_process(): - ''' - Return process object representing the current process - ''' - return _current_process - -def active_children(): - ''' - Return list of process objects corresponding to live child processes - ''' - _cleanup() - return list(_children) - - -def parent_process(): - ''' - Return process object representing the parent process - ''' - return _parent_process - -# -# -# - -def _cleanup(): - # check for processes which have finished - for p in list(_children): - if (child_popen := p._popen) and child_popen.poll() is not None: - _children.discard(p) - -# -# The `Process` class -# - -class BaseProcess(object): - ''' - Process objects represent activity that is run in a separate process - - The class is analogous to `threading.Thread` - ''' - def _Popen(self): - raise NotImplementedError - - def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, - *, daemon=None): - assert group is None, 'group argument must be None for now' - count = next(_process_counter) - self._identity = _current_process._identity + (count,) - self._config = _current_process._config.copy() - self._parent_pid = os.getpid() - self._parent_name = _current_process.name - self._popen = None - self._closed = False - self._target = target - self._args = tuple(args) - self._kwargs = dict(kwargs) - self._name = name or type(self).__name__ + '-' + \ - ':'.join(str(i) for i in self._identity) - if daemon is not None: - self.daemon = daemon - _dangling.add(self) - - def _check_closed(self): - if self._closed: - raise ValueError("process object is closed") - - def run(self): - ''' - Method to be run in sub-process; can be overridden in sub-class - ''' - if self._target: - self._target(*self._args, **self._kwargs) - - def start(self): - ''' - Start child process - ''' - self._check_closed() - assert self._popen is None, 'cannot start a process twice' - assert self._parent_pid == os.getpid(), \ - 'can only start a process object created by current process' - assert not _current_process._config.get('daemon'), \ - 'daemonic processes are not allowed to have children' - _cleanup() - self._popen = self._Popen(self) - self._sentinel = self._popen.sentinel - # Avoid a refcycle if the target function holds an indirect - # reference to the process object (see bpo-30775) - del self._target, self._args, self._kwargs - _children.add(self) - - def terminate(self): - ''' - Terminate process; sends SIGTERM signal or uses TerminateProcess() - ''' - self._check_closed() - self._popen.terminate() - - def kill(self): - ''' - Terminate process; sends SIGKILL signal or uses TerminateProcess() - ''' - self._check_closed() - self._popen.kill() - - def join(self, timeout=None): - ''' - Wait until child process terminates - ''' - self._check_closed() - assert self._parent_pid == os.getpid(), 'can only join a child process' - assert self._popen is not None, 'can only join a started process' - res = self._popen.wait(timeout) - if res is not None: - _children.discard(self) - - def is_alive(self): - ''' - Return whether process is alive - ''' - self._check_closed() - if self is _current_process: - return True - assert self._parent_pid == os.getpid(), 'can only test a child process' - - if self._popen is None: - return False - - returncode = self._popen.poll() - if returncode is None: - return True - else: - _children.discard(self) - return False - - def close(self): - ''' - Close the Process object. - - This method releases resources held by the Process object. It is - an error to call this method if the child process is still running. - ''' - if self._popen is not None: - if self._popen.poll() is None: - raise ValueError("Cannot close a process while it is still running. " - "You should first call join() or terminate().") - self._popen.close() - self._popen = None - del self._sentinel - _children.discard(self) - self._closed = True - - @property - def name(self): - return self._name - - @name.setter - def name(self, name): - assert isinstance(name, str), 'name must be a string' - self._name = name - - @property - def daemon(self): - ''' - Return whether process is a daemon - ''' - return self._config.get('daemon', False) - - @daemon.setter - def daemon(self, daemonic): - ''' - Set whether process is a daemon - ''' - assert self._popen is None, 'process has already started' - self._config['daemon'] = daemonic - - @property - def authkey(self): - return self._config['authkey'] - - @authkey.setter - def authkey(self, authkey): - ''' - Set authorization key of process - ''' - self._config['authkey'] = AuthenticationString(authkey) - - @property - def exitcode(self): - ''' - Return exit code of process or `None` if it has yet to stop - ''' - self._check_closed() - if self._popen is None: - return self._popen - return self._popen.poll() - - @property - def ident(self): - ''' - Return identifier (PID) of process or `None` if it has yet to start - ''' - self._check_closed() - if self is _current_process: - return os.getpid() - else: - return self._popen and self._popen.pid - - pid = ident - - @property - def sentinel(self): - ''' - Return a file descriptor (Unix) or handle (Windows) suitable for - waiting for process termination. - ''' - self._check_closed() - try: - return self._sentinel - except AttributeError: - raise ValueError("process not started") from None - - def __repr__(self): - exitcode = None - if self is _current_process: - status = 'started' - elif self._closed: - status = 'closed' - elif self._parent_pid != os.getpid(): - status = 'unknown' - elif self._popen is None: - status = 'initial' - else: - exitcode = self._popen.poll() - if exitcode is not None: - status = 'stopped' - else: - status = 'started' - - info = [type(self).__name__, 'name=%r' % self._name] - if self._popen is not None: - info.append('pid=%s' % self._popen.pid) - info.append('parent=%s' % self._parent_pid) - info.append(status) - if exitcode is not None: - exitcode = _exitcode_to_name.get(exitcode, exitcode) - info.append('exitcode=%s' % exitcode) - if self.daemon: - info.append('daemon') - return '<%s>' % ' '.join(info) - - ## - - def _bootstrap(self, parent_sentinel=None): - from . import util, context - global _current_process, _parent_process, _process_counter, _children - - try: - if self._start_method is not None: - context._force_start_method(self._start_method) - _process_counter = itertools.count(1) - _children = set() - util._close_stdin() - old_process = _current_process - _current_process = self - _parent_process = _ParentProcess( - self._parent_name, self._parent_pid, parent_sentinel) - if threading._HAVE_THREAD_NATIVE_ID: - threading.main_thread()._set_native_id() - try: - self._after_fork() - finally: - # delay finalization of the old process object until after - # _run_after_forkers() is executed - del old_process - util.info('child process calling self.run()') - self.run() - exitcode = 0 - except SystemExit as e: - if e.code is None: - exitcode = 0 - elif isinstance(e.code, int): - exitcode = e.code - else: - sys.stderr.write(str(e.code) + '\n') - exitcode = 1 - except: - exitcode = 1 - import traceback - sys.stderr.write('Process %s:\n' % self.name) - traceback.print_exc() - finally: - threading._shutdown() - util.info('process exiting with exitcode %d' % exitcode) - util._flush_std_streams() - - return exitcode - - @staticmethod - def _after_fork(): - from . import util - util._finalizer_registry.clear() - util._run_after_forkers() - - -# -# We subclass bytes to avoid accidental transmission of auth keys over network -# - -class AuthenticationString(bytes): - def __reduce__(self): - from .context import get_spawning_popen - if get_spawning_popen() is None: - raise TypeError( - 'Pickling an AuthenticationString object is ' - 'disallowed for security reasons' - ) - return AuthenticationString, (bytes(self),) - - -# -# Create object representing the parent process -# - -class _ParentProcess(BaseProcess): - - def __init__(self, name, pid, sentinel): - self._identity = () - self._name = name - self._pid = pid - self._parent_pid = None - self._popen = None - self._closed = False - self._sentinel = sentinel - self._config = {} - - def is_alive(self): - from multiprocessing.connection import wait - return not wait([self._sentinel], timeout=0) - - @property - def ident(self): - return self._pid - - def join(self, timeout=None): - ''' - Wait until parent process terminates - ''' - from multiprocessing.connection import wait - wait([self._sentinel], timeout=timeout) - - pid = ident - -# -# Create object representing the main process -# - -class _MainProcess(BaseProcess): - - def __init__(self): - self._identity = () - self._name = 'MainProcess' - self._parent_pid = None - self._popen = None - self._closed = False - self._config = {'authkey': AuthenticationString(os.urandom(32)), - 'semprefix': '/mp'} - # Note that some versions of FreeBSD only allow named - # semaphores to have names of up to 14 characters. Therefore - # we choose a short prefix. - # - # On MacOSX in a sandbox it may be necessary to use a - # different prefix -- see #19478. - # - # Everything in self._config will be inherited by descendant - # processes. - - def close(self): - pass - - -_parent_process = None -_current_process = _MainProcess() -_process_counter = itertools.count(1) -_children = set() -del _MainProcess - -# -# Give names to some return codes -# - -_exitcode_to_name = {} - -for name, signum in list(signal.__dict__.items()): - if name[:3]=='SIG' and '_' not in name: - _exitcode_to_name[-signum] = f'-{name}' -del name, signum - -# For debug and leak testing -_dangling = WeakSet() diff --git a/Python313_13_x64_Template/Lib/multiprocessing/queues.py b/Python313_13_x64_Template/Lib/multiprocessing/queues.py deleted file mode 100644 index 925f0439..00000000 --- a/Python313_13_x64_Template/Lib/multiprocessing/queues.py +++ /dev/null @@ -1,399 +0,0 @@ -# -# Module implementing queues -# -# multiprocessing/queues.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] - -import sys -import os -import threading -import collections -import time -import types -import weakref -import errno - -from queue import Empty, Full - -from . import connection -from . import context -_ForkingPickler = context.reduction.ForkingPickler - -from .util import debug, info, Finalize, register_after_fork, is_exiting - -# -# Queue type using a pipe, buffer and thread -# - -class Queue(object): - - def __init__(self, maxsize=0, *, ctx): - if maxsize <= 0: - # Can raise ImportError (see issues #3770 and #23400) - from .synchronize import SEM_VALUE_MAX as maxsize - self._maxsize = maxsize - self._reader, self._writer = connection.Pipe(duplex=False) - self._rlock = ctx.Lock() - self._opid = os.getpid() - if sys.platform == 'win32': - self._wlock = None - else: - self._wlock = ctx.Lock() - self._sem = ctx.BoundedSemaphore(maxsize) - # For use by concurrent.futures - self._ignore_epipe = False - self._reset() - - if sys.platform != 'win32': - register_after_fork(self, Queue._after_fork) - - def __getstate__(self): - context.assert_spawning(self) - return (self._ignore_epipe, self._maxsize, self._reader, self._writer, - self._rlock, self._wlock, self._sem, self._opid) - - def __setstate__(self, state): - (self._ignore_epipe, self._maxsize, self._reader, self._writer, - self._rlock, self._wlock, self._sem, self._opid) = state - self._reset() - - def _after_fork(self): - debug('Queue._after_fork()') - self._reset(after_fork=True) - - def _reset(self, after_fork=False): - if after_fork: - self._notempty._at_fork_reinit() - else: - self._notempty = threading.Condition(threading.Lock()) - self._buffer = collections.deque() - self._thread = None - self._jointhread = None - self._joincancelled = False - self._closed = False - self._close = None - self._send_bytes = self._writer.send_bytes - self._recv_bytes = self._reader.recv_bytes - self._poll = self._reader.poll - - def put(self, obj, block=True, timeout=None): - if self._closed: - raise ValueError(f"Queue {self!r} is closed") - if not self._sem.acquire(block, timeout): - raise Full - - with self._notempty: - if self._thread is None: - self._start_thread() - self._buffer.append(obj) - self._notempty.notify() - - def get(self, block=True, timeout=None): - if self._closed: - raise ValueError(f"Queue {self!r} is closed") - if block and timeout is None: - with self._rlock: - res = self._recv_bytes() - self._sem.release() - else: - if block: - deadline = time.monotonic() + timeout - if not self._rlock.acquire(block, timeout): - raise Empty - try: - if block: - timeout = deadline - time.monotonic() - if not self._poll(timeout): - raise Empty - elif not self._poll(): - raise Empty - res = self._recv_bytes() - self._sem.release() - finally: - self._rlock.release() - # unserialize the data after having released the lock - return _ForkingPickler.loads(res) - - def qsize(self): - # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() - return self._maxsize - self._sem._semlock._get_value() - - def empty(self): - return not self._poll() - - def full(self): - return self._sem._semlock._is_zero() - - def get_nowait(self): - return self.get(False) - - def put_nowait(self, obj): - return self.put(obj, False) - - def close(self): - self._closed = True - close = self._close - if close: - self._close = None - close() - - def join_thread(self): - debug('Queue.join_thread()') - assert self._closed, "Queue {0!r} not closed".format(self) - if self._jointhread: - self._jointhread() - - def cancel_join_thread(self): - debug('Queue.cancel_join_thread()') - self._joincancelled = True - try: - self._jointhread.cancel() - except AttributeError: - pass - - def _terminate_broken(self): - # Close a Queue on error. - - # gh-94777: Prevent queue writing to a pipe which is no longer read. - self._reader.close() - - # gh-107219: Close the connection writer which can unblock - # Queue._feed() if it was stuck in send_bytes(). - if sys.platform == 'win32': - self._writer.close() - - self.close() - self.join_thread() - - def _start_thread(self): - debug('Queue._start_thread()') - - # Start thread which transfers data from buffer to pipe - self._buffer.clear() - self._thread = threading.Thread( - target=Queue._feed, - args=(self._buffer, self._notempty, self._send_bytes, - self._wlock, self._reader.close, self._writer.close, - self._ignore_epipe, self._on_queue_feeder_error, - self._sem), - name='QueueFeederThread', - daemon=True, - ) - - try: - debug('doing self._thread.start()') - self._thread.start() - debug('... done self._thread.start()') - except: - # gh-109047: During Python finalization, creating a thread - # can fail with RuntimeError. - self._thread = None - raise - - if not self._joincancelled: - self._jointhread = Finalize( - self._thread, Queue._finalize_join, - [weakref.ref(self._thread)], - exitpriority=-5 - ) - - # Send sentinel to the thread queue object when garbage collected - self._close = Finalize( - self, Queue._finalize_close, - [self._buffer, self._notempty], - exitpriority=10 - ) - - @staticmethod - def _finalize_join(twr): - debug('joining queue thread') - thread = twr() - if thread is not None: - thread.join() - debug('... queue thread joined') - else: - debug('... queue thread already dead') - - @staticmethod - def _finalize_close(buffer, notempty): - debug('telling queue thread to quit') - with notempty: - buffer.append(_sentinel) - notempty.notify() - - @staticmethod - def _feed(buffer, notempty, send_bytes, writelock, reader_close, - writer_close, ignore_epipe, onerror, queue_sem): - debug('starting thread to feed data to pipe') - nacquire = notempty.acquire - nrelease = notempty.release - nwait = notempty.wait - bpopleft = buffer.popleft - sentinel = _sentinel - if sys.platform != 'win32': - wacquire = writelock.acquire - wrelease = writelock.release - else: - wacquire = None - - while 1: - try: - nacquire() - try: - if not buffer: - nwait() - finally: - nrelease() - try: - while 1: - obj = bpopleft() - if obj is sentinel: - debug('feeder thread got sentinel -- exiting') - reader_close() - writer_close() - return - - # serialize the data before acquiring the lock - obj = _ForkingPickler.dumps(obj) - if wacquire is None: - send_bytes(obj) - else: - wacquire() - try: - send_bytes(obj) - finally: - wrelease() - except IndexError: - pass - except Exception as e: - if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: - return - # Since this runs in a daemon thread the resources it uses - # may be become unusable while the process is cleaning up. - # We ignore errors which happen after the process has - # started to cleanup. - if is_exiting(): - info('error in queue thread: %s', e) - return - else: - # Since the object has not been sent in the queue, we need - # to decrease the size of the queue. The error acts as - # if the object had been silently removed from the queue - # and this step is necessary to have a properly working - # queue. - queue_sem.release() - onerror(e, obj) - - @staticmethod - def _on_queue_feeder_error(e, obj): - """ - Private API hook called when feeding data in the background thread - raises an exception. For overriding by concurrent.futures. - """ - import traceback - traceback.print_exc() - - __class_getitem__ = classmethod(types.GenericAlias) - - -_sentinel = object() - -# -# A queue type which also supports join() and task_done() methods -# -# Note that if you do not call task_done() for each finished task then -# eventually the counter's semaphore may overflow causing Bad Things -# to happen. -# - -class JoinableQueue(Queue): - - def __init__(self, maxsize=0, *, ctx): - Queue.__init__(self, maxsize, ctx=ctx) - self._unfinished_tasks = ctx.Semaphore(0) - self._cond = ctx.Condition() - - def __getstate__(self): - return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) - - def __setstate__(self, state): - Queue.__setstate__(self, state[:-2]) - self._cond, self._unfinished_tasks = state[-2:] - - def put(self, obj, block=True, timeout=None): - if self._closed: - raise ValueError(f"Queue {self!r} is closed") - if not self._sem.acquire(block, timeout): - raise Full - - with self._notempty, self._cond: - if self._thread is None: - self._start_thread() - self._buffer.append(obj) - self._unfinished_tasks.release() - self._notempty.notify() - - def task_done(self): - with self._cond: - if not self._unfinished_tasks.acquire(False): - raise ValueError('task_done() called too many times') - if self._unfinished_tasks._semlock._is_zero(): - self._cond.notify_all() - - def join(self): - with self._cond: - if not self._unfinished_tasks._semlock._is_zero(): - self._cond.wait() - -# -# Simplified Queue type -- really just a locked pipe -# - -class SimpleQueue(object): - - def __init__(self, *, ctx): - self._reader, self._writer = connection.Pipe(duplex=False) - self._rlock = ctx.Lock() - self._poll = self._reader.poll - if sys.platform == 'win32': - self._wlock = None - else: - self._wlock = ctx.Lock() - - def close(self): - self._reader.close() - self._writer.close() - - def empty(self): - return not self._poll() - - def __getstate__(self): - context.assert_spawning(self) - return (self._reader, self._writer, self._rlock, self._wlock) - - def __setstate__(self, state): - (self._reader, self._writer, self._rlock, self._wlock) = state - self._poll = self._reader.poll - - def get(self): - with self._rlock: - res = self._reader.recv_bytes() - # unserialize the data after having released the lock - return _ForkingPickler.loads(res) - - def put(self, obj): - # serialize the data before acquiring the lock - obj = _ForkingPickler.dumps(obj) - if self._wlock is None: - # writes to a message oriented win32 pipe are atomic - self._writer.send_bytes(obj) - else: - with self._wlock: - self._writer.send_bytes(obj) - - __class_getitem__ = classmethod(types.GenericAlias) diff --git a/Python313_13_x64_Template/Lib/multiprocessing/reduction.py b/Python313_13_x64_Template/Lib/multiprocessing/reduction.py deleted file mode 100644 index 5593f068..00000000 --- a/Python313_13_x64_Template/Lib/multiprocessing/reduction.py +++ /dev/null @@ -1,281 +0,0 @@ -# -# Module which deals with pickling of objects. -# -# multiprocessing/reduction.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -from abc import ABCMeta -import copyreg -import functools -import io -import os -import pickle -import socket -import sys - -from . import context - -__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] - - -HAVE_SEND_HANDLE = (sys.platform == 'win32' or - (hasattr(socket, 'CMSG_LEN') and - hasattr(socket, 'SCM_RIGHTS') and - hasattr(socket.socket, 'sendmsg'))) - -# -# Pickler subclass -# - -class ForkingPickler(pickle.Pickler): - '''Pickler subclass used by multiprocessing.''' - _extra_reducers = {} - _copyreg_dispatch_table = copyreg.dispatch_table - - def __init__(self, *args): - super().__init__(*args) - self.dispatch_table = self._copyreg_dispatch_table.copy() - self.dispatch_table.update(self._extra_reducers) - - @classmethod - def register(cls, type, reduce): - '''Register a reduce function for a type.''' - cls._extra_reducers[type] = reduce - - @classmethod - def dumps(cls, obj, protocol=None): - buf = io.BytesIO() - cls(buf, protocol).dump(obj) - return buf.getbuffer() - - loads = pickle.loads - -register = ForkingPickler.register - -def dump(obj, file, protocol=None): - '''Replacement for pickle.dump() using ForkingPickler.''' - ForkingPickler(file, protocol).dump(obj) - -# -# Platform specific definitions -# - -if sys.platform == 'win32': - # Windows - __all__ += ['DupHandle', 'duplicate', 'steal_handle'] - import _winapi - - def duplicate(handle, target_process=None, inheritable=False, - *, source_process=None): - '''Duplicate a handle. (target_process is a handle not a pid!)''' - current_process = _winapi.GetCurrentProcess() - if source_process is None: - source_process = current_process - if target_process is None: - target_process = current_process - return _winapi.DuplicateHandle( - source_process, handle, target_process, - 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) - - def steal_handle(source_pid, handle): - '''Steal a handle from process identified by source_pid.''' - source_process_handle = _winapi.OpenProcess( - _winapi.PROCESS_DUP_HANDLE, False, source_pid) - try: - return _winapi.DuplicateHandle( - source_process_handle, handle, - _winapi.GetCurrentProcess(), 0, False, - _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) - finally: - _winapi.CloseHandle(source_process_handle) - - def send_handle(conn, handle, destination_pid): - '''Send a handle over a local connection.''' - dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) - conn.send(dh) - - def recv_handle(conn): - '''Receive a handle over a local connection.''' - return conn.recv().detach() - - class DupHandle(object): - '''Picklable wrapper for a handle.''' - def __init__(self, handle, access, pid=None): - if pid is None: - # We just duplicate the handle in the current process and - # let the receiving process steal the handle. - pid = os.getpid() - proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) - try: - self._handle = _winapi.DuplicateHandle( - _winapi.GetCurrentProcess(), - handle, proc, access, False, 0) - finally: - _winapi.CloseHandle(proc) - self._access = access - self._pid = pid - - def detach(self): - '''Get the handle. This should only be called once.''' - # retrieve handle from process which currently owns it - if self._pid == os.getpid(): - # The handle has already been duplicated for this process. - return self._handle - # We must steal the handle from the process whose pid is self._pid. - proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, - self._pid) - try: - return _winapi.DuplicateHandle( - proc, self._handle, _winapi.GetCurrentProcess(), - self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) - finally: - _winapi.CloseHandle(proc) - -else: - # Unix - __all__ += ['DupFd', 'sendfds', 'recvfds'] - import array - - # On MacOSX we should acknowledge receipt of fds -- see Issue14669 - ACKNOWLEDGE = sys.platform == 'darwin' - - def sendfds(sock, fds): - '''Send an array of fds over an AF_UNIX socket.''' - fds = array.array('i', fds) - msg = bytes([len(fds) % 256]) - sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) - if ACKNOWLEDGE and sock.recv(1) != b'A': - raise RuntimeError('did not receive acknowledgement of fd') - - def recvfds(sock, size): - '''Receive an array of fds over an AF_UNIX socket.''' - a = array.array('i') - bytes_size = a.itemsize * size - msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) - if not msg and not ancdata: - raise EOFError - try: - if ACKNOWLEDGE: - sock.send(b'A') - if len(ancdata) != 1: - raise RuntimeError('received %d items of ancdata' % - len(ancdata)) - cmsg_level, cmsg_type, cmsg_data = ancdata[0] - if (cmsg_level == socket.SOL_SOCKET and - cmsg_type == socket.SCM_RIGHTS): - if len(cmsg_data) % a.itemsize != 0: - raise ValueError - a.frombytes(cmsg_data) - if len(a) % 256 != msg[0]: - raise AssertionError( - "Len is {0:n} but msg[0] is {1!r}".format( - len(a), msg[0])) - return list(a) - except (ValueError, IndexError): - pass - raise RuntimeError('Invalid data received') - - def send_handle(conn, handle, destination_pid): - '''Send a handle over a local connection.''' - with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: - sendfds(s, [handle]) - - def recv_handle(conn): - '''Receive a handle over a local connection.''' - with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: - return recvfds(s, 1)[0] - - def DupFd(fd): - '''Return a wrapper for an fd.''' - popen_obj = context.get_spawning_popen() - if popen_obj is not None: - return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) - elif HAVE_SEND_HANDLE: - from . import resource_sharer - return resource_sharer.DupFd(fd) - else: - raise ValueError('SCM_RIGHTS appears not to be available') - -# -# Try making some callable types picklable -# - -def _reduce_method(m): - if m.__self__ is None: - return getattr, (m.__class__, m.__func__.__name__) - else: - return getattr, (m.__self__, m.__func__.__name__) -class _C: - def f(self): - pass -register(type(_C().f), _reduce_method) - - -def _reduce_method_descriptor(m): - return getattr, (m.__objclass__, m.__name__) -register(type(list.append), _reduce_method_descriptor) -register(type(int.__add__), _reduce_method_descriptor) - - -def _reduce_partial(p): - return _rebuild_partial, (p.func, p.args, p.keywords or {}) -def _rebuild_partial(func, args, keywords): - return functools.partial(func, *args, **keywords) -register(functools.partial, _reduce_partial) - -# -# Make sockets picklable -# - -if sys.platform == 'win32': - def _reduce_socket(s): - from .resource_sharer import DupSocket - return _rebuild_socket, (DupSocket(s),) - def _rebuild_socket(ds): - return ds.detach() - register(socket.socket, _reduce_socket) - -else: - def _reduce_socket(s): - df = DupFd(s.fileno()) - return _rebuild_socket, (df, s.family, s.type, s.proto) - def _rebuild_socket(df, family, type, proto): - fd = df.detach() - return socket.socket(family, type, proto, fileno=fd) - register(socket.socket, _reduce_socket) - - -class AbstractReducer(metaclass=ABCMeta): - '''Abstract base class for use in implementing a Reduction class - suitable for use in replacing the standard reduction mechanism - used in multiprocessing.''' - ForkingPickler = ForkingPickler - register = register - dump = dump - send_handle = send_handle - recv_handle = recv_handle - - if sys.platform == 'win32': - steal_handle = steal_handle - duplicate = duplicate - DupHandle = DupHandle - else: - sendfds = sendfds - recvfds = recvfds - DupFd = DupFd - - _reduce_method = _reduce_method - _reduce_method_descriptor = _reduce_method_descriptor - _rebuild_partial = _rebuild_partial - _reduce_socket = _reduce_socket - _rebuild_socket = _rebuild_socket - - def __init__(self, *args): - register(type(_C().f), _reduce_method) - register(type(list.append), _reduce_method_descriptor) - register(type(int.__add__), _reduce_method_descriptor) - register(functools.partial, _reduce_partial) - register(socket.socket, _reduce_socket) diff --git a/Python313_13_x64_Template/Lib/multiprocessing/resource_tracker.py b/Python313_13_x64_Template/Lib/multiprocessing/resource_tracker.py deleted file mode 100644 index 22e3bbcf..00000000 --- a/Python313_13_x64_Template/Lib/multiprocessing/resource_tracker.py +++ /dev/null @@ -1,420 +0,0 @@ -############################################################################### -# Server process to keep track of unlinked resources (like shared memory -# segments, semaphores etc.) and clean them. -# -# On Unix we run a server process which keeps track of unlinked -# resources. The server ignores SIGINT and SIGTERM and reads from a -# pipe. Every other process of the program has a copy of the writable -# end of the pipe, so we get EOF when all other processes have exited. -# Then the server process unlinks any remaining resource names. -# -# This is important because there may be system limits for such resources: for -# instance, the system only supports a limited number of named semaphores, and -# shared-memory segments live in the RAM. If a python process leaks such a -# resource, this resource will not be removed till the next reboot. Without -# this resource tracker process, "killall python" would probably leave unlinked -# resources. - -import base64 -import os -import signal -import sys -import threading -import warnings -from collections import deque - -import json - -from . import spawn -from . import util - -__all__ = ['ensure_running', 'register', 'unregister'] - -_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') -_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) - -def cleanup_noop(name): - raise RuntimeError('noop should never be registered or cleaned up') - -_CLEANUP_FUNCS = { - 'noop': cleanup_noop, - 'dummy': lambda name: None, # Dummy resource used in tests -} - -if os.name == 'posix': - import _multiprocessing - import _posixshmem - - # Use sem_unlink() to clean up named semaphores. - # - # sem_unlink() may be missing if the Python build process detected the - # absence of POSIX named semaphores. In that case, no named semaphores were - # ever opened, so no cleanup would be necessary. - if hasattr(_multiprocessing, 'sem_unlink'): - _CLEANUP_FUNCS.update({ - 'semaphore': _multiprocessing.sem_unlink, - }) - _CLEANUP_FUNCS.update({ - 'shared_memory': _posixshmem.shm_unlink, - }) - - -class ReentrantCallError(RuntimeError): - pass - - -class ResourceTracker(object): - - def __init__(self): - self._lock = threading.RLock() - self._fd = None - self._pid = None - self._exitcode = None - self._reentrant_messages = deque() - - # True to use colon-separated lines, rather than JSON lines, - # for internal communication. (Mainly for testing). - # Filenames not supported by the simple format will always be sent - # using JSON. - # The reader should understand all formats. - self._use_simple_format = True - - def _reentrant_call_error(self): - # gh-109629: this happens if an explicit call to the ResourceTracker - # gets interrupted by a garbage collection, invoking a finalizer (*) - # that itself calls back into ResourceTracker. - # (*) for example the SemLock finalizer - raise ReentrantCallError( - "Reentrant call into the multiprocessing resource tracker") - - def __del__(self): - # making sure child processess are cleaned before ResourceTracker - # gets destructed. - # see https://github.com/python/cpython/issues/88887 - self._stop(use_blocking_lock=False) - - def _stop(self, use_blocking_lock=True): - if use_blocking_lock: - with self._lock: - self._stop_locked() - else: - acquired = self._lock.acquire(blocking=False) - try: - self._stop_locked() - finally: - if acquired: - self._lock.release() - - def _stop_locked( - self, - close=os.close, - waitpid=os.waitpid, - waitstatus_to_exitcode=os.waitstatus_to_exitcode, - ): - # This shouldn't happen (it might when called by a finalizer) - # so we check for it anyway. - if self._lock._recursion_count() > 1: - raise self._reentrant_call_error() - if self._fd is None: - # not running - return - if self._pid is None: - return - - # closing the "alive" file descriptor stops main() - close(self._fd) - self._fd = None - - try: - _, status = waitpid(self._pid, 0) - except ChildProcessError: - self._pid = None - self._exitcode = None - return - - self._pid = None - - try: - self._exitcode = waitstatus_to_exitcode(status) - except ValueError: - # os.waitstatus_to_exitcode may raise an exception for invalid values - self._exitcode = None - - def getfd(self): - self.ensure_running() - return self._fd - - def ensure_running(self): - '''Make sure that resource tracker process is running. - - This can be run from any process. Usually a child process will use - the resource created by its parent.''' - return self._ensure_running_and_write() - - def _teardown_dead_process(self): - os.close(self._fd) - - # Clean-up to avoid dangling processes. - try: - # _pid can be None if this process is a child from another - # python process, which has started the resource_tracker. - if self._pid is not None: - os.waitpid(self._pid, 0) - except ChildProcessError: - # The resource_tracker has already been terminated. - pass - self._fd = None - self._pid = None - self._exitcode = None - - warnings.warn('resource_tracker: process died unexpectedly, ' - 'relaunching. Some resources might leak.') - - def _launch(self): - fds_to_pass = [] - try: - fds_to_pass.append(sys.stderr.fileno()) - except Exception: - pass - r, w = os.pipe() - try: - fds_to_pass.append(r) - # process will out live us, so no need to wait on pid - exe = spawn.get_executable() - args = [ - exe, - *util._args_from_interpreter_flags(), - '-c', - f'from multiprocessing.resource_tracker import main;main({r})', - ] - # bpo-33613: Register a signal mask that will block the signals. - # This signal mask will be inherited by the child that is going - # to be spawned and will protect the child from a race condition - # that can make the child die before it registers signal handlers - # for SIGINT and SIGTERM. The mask is unregistered after spawning - # the child. - prev_sigmask = None - try: - if _HAVE_SIGMASK: - prev_sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) - pid = util.spawnv_passfds(exe, args, fds_to_pass) - finally: - if prev_sigmask is not None: - signal.pthread_sigmask(signal.SIG_SETMASK, prev_sigmask) - except: - os.close(w) - raise - else: - self._fd = w - self._pid = pid - finally: - os.close(r) - - def _make_probe_message(self): - """Return a probe message.""" - if self._use_simple_format: - return b'PROBE:0:noop\n' - return ( - json.dumps( - {"cmd": "PROBE", "rtype": "noop"}, - ensure_ascii=True, - separators=(",", ":"), - ) - + "\n" - ).encode("ascii") - - def _ensure_running_and_write(self, msg=None): - with self._lock: - if self._lock._recursion_count() > 1: - # The code below is certainly not reentrant-safe, so bail out - if msg is None: - raise self._reentrant_call_error() - return self._reentrant_messages.append(msg) - - if self._fd is not None: - # resource tracker was launched before, is it still running? - if msg is None: - to_send = self._make_probe_message() - else: - to_send = msg - try: - self._write(to_send) - except OSError: - self._teardown_dead_process() - self._launch() - - msg = None # message was sent in probe - else: - self._launch() - - while True: - try: - reentrant_msg = self._reentrant_messages.popleft() - except IndexError: - break - self._write(reentrant_msg) - if msg is not None: - self._write(msg) - - def _check_alive(self): - '''Check that the pipe has not been closed by sending a probe.''' - try: - # We cannot use send here as it calls ensure_running, creating - # a cycle. - os.write(self._fd, self._make_probe_message()) - except OSError: - return False - else: - return True - - def register(self, name, rtype): - '''Register name of resource with resource tracker.''' - self._send('REGISTER', name, rtype) - - def unregister(self, name, rtype): - '''Unregister name of resource with resource tracker.''' - self._send('UNREGISTER', name, rtype) - - def _write(self, msg): - nbytes = os.write(self._fd, msg) - assert nbytes == len(msg), f"{nbytes=} != {len(msg)=}" - - def _send(self, cmd, name, rtype): - if self._use_simple_format and '\n' not in name: - msg = f"{cmd}:{name}:{rtype}\n".encode("ascii") - if len(msg) > 512: - # posix guarantees that writes to a pipe of less than PIPE_BUF - # bytes are atomic, and that PIPE_BUF >= 512 - raise ValueError('msg too long') - self._ensure_running_and_write(msg) - return - - # POSIX guarantees that writes to a pipe of less than PIPE_BUF (512 on Linux) - # bytes are atomic. Therefore, we want the message to be shorter than 512 bytes. - # POSIX shm_open() and sem_open() require the name, including its leading slash, - # to be at most NAME_MAX bytes (255 on Linux) - # With json.dump(..., ensure_ascii=True) every non-ASCII byte becomes a 6-char - # escape like \uDC80. - # As we want the overall message to be kept atomic and therefore smaller than 512, - # we encode encode the raw name bytes with URL-safe Base64 - so a 255 long name - # will not exceed 340 bytes. - b = name.encode('utf-8', 'surrogateescape') - if len(b) > 255: - raise ValueError('shared memory name too long (max 255 bytes)') - b64 = base64.urlsafe_b64encode(b).decode('ascii') - - payload = {"cmd": cmd, "rtype": rtype, "base64_name": b64} - msg = (json.dumps(payload, ensure_ascii=True, separators=(",", ":")) + "\n").encode("ascii") - - # The entire JSON message is guaranteed < PIPE_BUF (512 bytes) by construction. - assert len(msg) <= 512, f"internal error: message too long ({len(msg)} bytes)" - assert msg.startswith(b'{') - - self._ensure_running_and_write(msg) - -_resource_tracker = ResourceTracker() -ensure_running = _resource_tracker.ensure_running -register = _resource_tracker.register -unregister = _resource_tracker.unregister -getfd = _resource_tracker.getfd - - -def _decode_message(line): - if line.startswith(b'{'): - try: - obj = json.loads(line.decode('ascii')) - except Exception as e: - raise ValueError("malformed resource_tracker message: %r" % (line,)) from e - - cmd = obj["cmd"] - rtype = obj["rtype"] - b64 = obj.get("base64_name", "") - - if not isinstance(cmd, str) or not isinstance(rtype, str) or not isinstance(b64, str): - raise ValueError("malformed resource_tracker fields: %r" % (obj,)) - - try: - name = base64.urlsafe_b64decode(b64).decode('utf-8', 'surrogateescape') - except ValueError as e: - raise ValueError("malformed resource_tracker base64_name: %r" % (b64,)) from e - else: - cmd, rest = line.strip().decode('ascii').split(':', maxsplit=1) - name, rtype = rest.rsplit(':', maxsplit=1) - return cmd, rtype, name - - -def main(fd): - '''Run resource tracker.''' - # protect the process from ^C and "killall python" etc - signal.signal(signal.SIGINT, signal.SIG_IGN) - signal.signal(signal.SIGTERM, signal.SIG_IGN) - if _HAVE_SIGMASK: - signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) - - for f in (sys.stdin, sys.stdout): - try: - f.close() - except Exception: - pass - - cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} - exit_code = 0 - - try: - # keep track of registered/unregistered resources - with open(fd, 'rb') as f: - for line in f: - try: - cmd, rtype, name = _decode_message(line) - cleanup_func = _CLEANUP_FUNCS.get(rtype, None) - if cleanup_func is None: - raise ValueError( - f'Cannot register {name} for automatic cleanup: ' - f'unknown resource type {rtype}') - - if cmd == 'REGISTER': - cache[rtype].add(name) - elif cmd == 'UNREGISTER': - cache[rtype].remove(name) - elif cmd == 'PROBE': - pass - else: - raise RuntimeError('unrecognized command %r' % cmd) - except Exception: - exit_code = 3 - try: - sys.excepthook(*sys.exc_info()) - except: - pass - finally: - # all processes have terminated; cleanup any remaining resources - for rtype, rtype_cache in cache.items(): - if rtype_cache: - try: - exit_code = 1 - if rtype == 'dummy': - # The test 'dummy' resource is expected to leak. - # We skip the warning (and *only* the warning) for it. - pass - else: - warnings.warn( - f'resource_tracker: There appear to be ' - f'{len(rtype_cache)} leaked {rtype} objects to ' - f'clean up at shutdown: {rtype_cache}' - ) - except Exception: - pass - for name in rtype_cache: - # For some reason the process which created and registered this - # resource has failed to unregister it. Presumably it has - # died. We therefore unlink it. - try: - try: - _CLEANUP_FUNCS[rtype](name) - except Exception as e: - exit_code = 2 - warnings.warn('resource_tracker: %r: %s' % (name, e)) - finally: - pass - - sys.exit(exit_code) diff --git a/Python313_13_x64_Template/Lib/multiprocessing/shared_memory.py b/Python313_13_x64_Template/Lib/multiprocessing/shared_memory.py deleted file mode 100644 index 67e70fdc..00000000 --- a/Python313_13_x64_Template/Lib/multiprocessing/shared_memory.py +++ /dev/null @@ -1,544 +0,0 @@ -"""Provides shared memory for direct access across processes. - -The API of this package is currently provisional. Refer to the -documentation for details. -""" - - -__all__ = [ 'SharedMemory', 'ShareableList' ] - - -from functools import partial -import mmap -import os -import errno -import struct -import secrets -import types - -if os.name == "nt": - import _winapi - _USE_POSIX = False -else: - import _posixshmem - _USE_POSIX = True - -from . import resource_tracker - -_O_CREX = os.O_CREAT | os.O_EXCL - -# FreeBSD (and perhaps other BSDs) limit names to 14 characters. -_SHM_SAFE_NAME_LENGTH = 14 - -# Shared memory block name prefix -if _USE_POSIX: - _SHM_NAME_PREFIX = '/psm_' -else: - _SHM_NAME_PREFIX = 'wnsm_' - - -def _make_filename(): - "Create a random filename for the shared memory object." - # number of random bytes to use for name - nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 - assert nbytes >= 2, '_SHM_NAME_PREFIX too long' - name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) - assert len(name) <= _SHM_SAFE_NAME_LENGTH - return name - - -class SharedMemory: - """Creates a new shared memory block or attaches to an existing - shared memory block. - - Every shared memory block is assigned a unique name. This enables - one process to create a shared memory block with a particular name - so that a different process can attach to that same shared memory - block using that same name. - - As a resource for sharing data across processes, shared memory blocks - may outlive the original process that created them. When one process - no longer needs access to a shared memory block that might still be - needed by other processes, the close() method should be called. - When a shared memory block is no longer needed by any process, the - unlink() method should be called to ensure proper cleanup.""" - - # Defaults; enables close() and unlink() to run without errors. - _name = None - _fd = -1 - _mmap = None - _buf = None - _flags = os.O_RDWR - _mode = 0o600 - _prepend_leading_slash = True if _USE_POSIX else False - _track = True - - def __init__(self, name=None, create=False, size=0, *, track=True): - if not size >= 0: - raise ValueError("'size' must be a positive integer") - if create: - self._flags = _O_CREX | os.O_RDWR - if size == 0: - raise ValueError("'size' must be a positive number different from zero") - if name is None and not self._flags & os.O_EXCL: - raise ValueError("'name' can only be None if create=True") - - self._track = track - if _USE_POSIX: - - # POSIX Shared Memory - - if name is None: - while True: - name = _make_filename() - try: - self._fd = _posixshmem.shm_open( - name, - self._flags, - mode=self._mode - ) - except FileExistsError: - continue - self._name = name - break - else: - name = "/" + name if self._prepend_leading_slash else name - self._fd = _posixshmem.shm_open( - name, - self._flags, - mode=self._mode - ) - self._name = name - try: - if create and size: - os.ftruncate(self._fd, size) - stats = os.fstat(self._fd) - size = stats.st_size - self._mmap = mmap.mmap(self._fd, size) - except OSError: - self.unlink() - raise - if self._track: - resource_tracker.register(self._name, "shared_memory") - - else: - - # Windows Named Shared Memory - - if create: - while True: - temp_name = _make_filename() if name is None else name - # Create and reserve shared memory block with this name - # until it can be attached to by mmap. - h_map = _winapi.CreateFileMapping( - _winapi.INVALID_HANDLE_VALUE, - _winapi.NULL, - _winapi.PAGE_READWRITE, - (size >> 32) & 0xFFFFFFFF, - size & 0xFFFFFFFF, - temp_name - ) - try: - last_error_code = _winapi.GetLastError() - if last_error_code == _winapi.ERROR_ALREADY_EXISTS: - if name is not None: - raise FileExistsError( - errno.EEXIST, - os.strerror(errno.EEXIST), - name, - _winapi.ERROR_ALREADY_EXISTS - ) - else: - continue - self._mmap = mmap.mmap(-1, size, tagname=temp_name) - finally: - _winapi.CloseHandle(h_map) - self._name = temp_name - break - - else: - self._name = name - # Dynamically determine the existing named shared memory - # block's size which is likely a multiple of mmap.PAGESIZE. - h_map = _winapi.OpenFileMapping( - _winapi.FILE_MAP_READ, - False, - name - ) - try: - p_buf = _winapi.MapViewOfFile( - h_map, - _winapi.FILE_MAP_READ, - 0, - 0, - 0 - ) - finally: - _winapi.CloseHandle(h_map) - try: - size = _winapi.VirtualQuerySize(p_buf) - finally: - _winapi.UnmapViewOfFile(p_buf) - self._mmap = mmap.mmap(-1, size, tagname=name) - - self._size = size - self._buf = memoryview(self._mmap) - - def __del__(self): - try: - self.close() - except OSError: - pass - - def __reduce__(self): - return ( - self.__class__, - ( - self.name, - False, - self.size, - ), - ) - - def __repr__(self): - return f'{self.__class__.__name__}({self.name!r}, size={self.size})' - - @property - def buf(self): - "A memoryview of contents of the shared memory block." - return self._buf - - @property - def name(self): - "Unique name that identifies the shared memory block." - reported_name = self._name - if _USE_POSIX and self._prepend_leading_slash: - if self._name.startswith("/"): - reported_name = self._name[1:] - return reported_name - - @property - def size(self): - "Size in bytes." - return self._size - - def close(self): - """Closes access to the shared memory from this instance but does - not destroy the shared memory block.""" - if self._buf is not None: - self._buf.release() - self._buf = None - if self._mmap is not None: - self._mmap.close() - self._mmap = None - if _USE_POSIX and self._fd >= 0: - os.close(self._fd) - self._fd = -1 - - def unlink(self): - """Requests that the underlying shared memory block be destroyed. - - Unlink should be called once (and only once) across all handles - which have access to the shared memory block, even if these - handles belong to different processes. Closing and unlinking may - happen in any order, but trying to access data inside a shared - memory block after unlinking may result in memory errors, - depending on platform. - - This method has no effect on Windows, where the only way to - delete a shared memory block is to close all handles.""" - - if _USE_POSIX and self._name: - _posixshmem.shm_unlink(self._name) - if self._track: - resource_tracker.unregister(self._name, "shared_memory") - - -_encoding = "utf8" - -class ShareableList: - """Pattern for a mutable list-like object shareable via a shared - memory block. It differs from the built-in list type in that these - lists can not change their overall length (i.e. no append, insert, - etc.) - - Because values are packed into a memoryview as bytes, the struct - packing format for any storable value must require no more than 8 - characters to describe its format.""" - - # The shared memory area is organized as follows: - # - 8 bytes: number of items (N) as a 64-bit integer - # - (N + 1) * 8 bytes: offsets of each element from the start of the - # data area - # - K bytes: the data area storing item values (with encoding and size - # depending on their respective types) - # - N * 8 bytes: `struct` format string for each element - # - N bytes: index into _back_transforms_mapping for each element - # (for reconstructing the corresponding Python value) - _types_mapping = { - int: "q", - float: "d", - bool: "xxxxxxx?", - str: "%ds", - bytes: "%ds", - None.__class__: "xxxxxx?x", - } - _alignment = 8 - _back_transforms_mapping = { - 0: lambda value: value, # int, float, bool - 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str - 2: lambda value: value.rstrip(b'\x00'), # bytes - 3: lambda _value: None, # None - } - - @staticmethod - def _extract_recreation_code(value): - """Used in concert with _back_transforms_mapping to convert values - into the appropriate Python objects when retrieving them from - the list as well as when storing them.""" - if not isinstance(value, (str, bytes, None.__class__)): - return 0 - elif isinstance(value, str): - return 1 - elif isinstance(value, bytes): - return 2 - else: - return 3 # NoneType - - def __init__(self, sequence=None, *, name=None): - if name is None or sequence is not None: - sequence = sequence or () - _formats = [ - self._types_mapping[type(item)] - if not isinstance(item, (str, bytes)) - else self._types_mapping[type(item)] % ( - self._alignment * (len(item) // self._alignment + 1), - ) - for item in sequence - ] - self._list_len = len(_formats) - assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len - offset = 0 - # The offsets of each list element into the shared memory's - # data area (0 meaning the start of the data area, not the start - # of the shared memory area). - self._allocated_offsets = [0] - for fmt in _formats: - offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) - self._allocated_offsets.append(offset) - _recreation_codes = [ - self._extract_recreation_code(item) for item in sequence - ] - requested_size = struct.calcsize( - "q" + self._format_size_metainfo + - "".join(_formats) + - self._format_packing_metainfo + - self._format_back_transform_codes - ) - - self.shm = SharedMemory(name, create=True, size=requested_size) - else: - self.shm = SharedMemory(name) - - if sequence is not None: - _enc = _encoding - struct.pack_into( - "q" + self._format_size_metainfo, - self.shm.buf, - 0, - self._list_len, - *(self._allocated_offsets) - ) - struct.pack_into( - "".join(_formats), - self.shm.buf, - self._offset_data_start, - *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) - ) - struct.pack_into( - self._format_packing_metainfo, - self.shm.buf, - self._offset_packing_formats, - *(v.encode(_enc) for v in _formats) - ) - struct.pack_into( - self._format_back_transform_codes, - self.shm.buf, - self._offset_back_transform_codes, - *(_recreation_codes) - ) - - else: - self._list_len = len(self) # Obtains size from offset 0 in buffer. - self._allocated_offsets = list( - struct.unpack_from( - self._format_size_metainfo, - self.shm.buf, - 1 * 8 - ) - ) - - def _get_packing_format(self, position): - "Gets the packing format for a single value stored in the list." - position = position if position >= 0 else position + self._list_len - if (position >= self._list_len) or (self._list_len < 0): - raise IndexError("Requested position out of range.") - - v = struct.unpack_from( - "8s", - self.shm.buf, - self._offset_packing_formats + position * 8 - )[0] - fmt = v.rstrip(b'\x00') - fmt_as_str = fmt.decode(_encoding) - - return fmt_as_str - - def _get_back_transform(self, position): - "Gets the back transformation function for a single value." - - if (position >= self._list_len) or (self._list_len < 0): - raise IndexError("Requested position out of range.") - - transform_code = struct.unpack_from( - "b", - self.shm.buf, - self._offset_back_transform_codes + position - )[0] - transform_function = self._back_transforms_mapping[transform_code] - - return transform_function - - def _set_packing_format_and_transform(self, position, fmt_as_str, value): - """Sets the packing format and back transformation code for a - single value in the list at the specified position.""" - - if (position >= self._list_len) or (self._list_len < 0): - raise IndexError("Requested position out of range.") - - struct.pack_into( - "8s", - self.shm.buf, - self._offset_packing_formats + position * 8, - fmt_as_str.encode(_encoding) - ) - - transform_code = self._extract_recreation_code(value) - struct.pack_into( - "b", - self.shm.buf, - self._offset_back_transform_codes + position, - transform_code - ) - - def __getitem__(self, position): - position = position if position >= 0 else position + self._list_len - try: - offset = self._offset_data_start + self._allocated_offsets[position] - (v,) = struct.unpack_from( - self._get_packing_format(position), - self.shm.buf, - offset - ) - except IndexError: - raise IndexError("index out of range") - - back_transform = self._get_back_transform(position) - v = back_transform(v) - - return v - - def __setitem__(self, position, value): - position = position if position >= 0 else position + self._list_len - try: - item_offset = self._allocated_offsets[position] - offset = self._offset_data_start + item_offset - current_format = self._get_packing_format(position) - except IndexError: - raise IndexError("assignment index out of range") - - if not isinstance(value, (str, bytes)): - new_format = self._types_mapping[type(value)] - encoded_value = value - else: - allocated_length = self._allocated_offsets[position + 1] - item_offset - - encoded_value = (value.encode(_encoding) - if isinstance(value, str) else value) - if len(encoded_value) > allocated_length: - raise ValueError("bytes/str item exceeds available storage") - if current_format[-1] == "s": - new_format = current_format - else: - new_format = self._types_mapping[str] % ( - allocated_length, - ) - - self._set_packing_format_and_transform( - position, - new_format, - value - ) - struct.pack_into(new_format, self.shm.buf, offset, encoded_value) - - def __reduce__(self): - return partial(self.__class__, name=self.shm.name), () - - def __len__(self): - return struct.unpack_from("q", self.shm.buf, 0)[0] - - def __repr__(self): - return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' - - @property - def format(self): - "The struct packing format used by all currently stored items." - return "".join( - self._get_packing_format(i) for i in range(self._list_len) - ) - - @property - def _format_size_metainfo(self): - "The struct packing format used for the items' storage offsets." - return "q" * (self._list_len + 1) - - @property - def _format_packing_metainfo(self): - "The struct packing format used for the items' packing formats." - return "8s" * self._list_len - - @property - def _format_back_transform_codes(self): - "The struct packing format used for the items' back transforms." - return "b" * self._list_len - - @property - def _offset_data_start(self): - # - 8 bytes for the list length - # - (N + 1) * 8 bytes for the element offsets - return (self._list_len + 2) * 8 - - @property - def _offset_packing_formats(self): - return self._offset_data_start + self._allocated_offsets[-1] - - @property - def _offset_back_transform_codes(self): - return self._offset_packing_formats + self._list_len * 8 - - def count(self, value): - "L.count(value) -> integer -- return number of occurrences of value." - - return sum(value == entry for entry in self) - - def index(self, value): - """L.index(value) -> integer -- return first index of value. - Raises ValueError if the value is not present.""" - - for position, entry in enumerate(self): - if value == entry: - return position - else: - raise ValueError(f"{value!r} not in this container") - - __class_getitem__ = classmethod(types.GenericAlias) diff --git a/Python313_13_x64_Template/Lib/multiprocessing/synchronize.py b/Python313_13_x64_Template/Lib/multiprocessing/synchronize.py deleted file mode 100644 index 870c9134..00000000 --- a/Python313_13_x64_Template/Lib/multiprocessing/synchronize.py +++ /dev/null @@ -1,404 +0,0 @@ -# -# Module implementing synchronization primitives -# -# multiprocessing/synchronize.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -__all__ = [ - 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' - ] - -import threading -import sys -import tempfile -import _multiprocessing -import time - -from . import context -from . import process -from . import util - -# Try to import the mp.synchronize module cleanly, if it fails -# raise ImportError for platforms lacking a working sem_open implementation. -# See issue 3770 -try: - from _multiprocessing import SemLock, sem_unlink -except (ImportError): - raise ImportError("This platform lacks a functioning sem_open" + - " implementation, therefore, the required" + - " synchronization primitives needed will not" + - " function, see issue 3770.") - -# -# Constants -# - -RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) -SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX - -# -# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` -# - -class SemLock(object): - - _rand = tempfile._RandomNameSequence() - - def __init__(self, kind, value, maxvalue, *, ctx): - if ctx is None: - ctx = context._default_context.get_context() - self._is_fork_ctx = ctx.get_start_method() == 'fork' - unlink_now = sys.platform == 'win32' or self._is_fork_ctx - for i in range(100): - try: - sl = self._semlock = _multiprocessing.SemLock( - kind, value, maxvalue, self._make_name(), - unlink_now) - except FileExistsError: - pass - else: - break - else: - raise FileExistsError('cannot find name for semaphore') - - util.debug('created semlock with handle %s' % sl.handle) - self._make_methods() - - if sys.platform != 'win32': - def _after_fork(obj): - obj._semlock._after_fork() - util.register_after_fork(self, _after_fork) - - if self._semlock.name is not None: - # We only get here if we are on Unix with forking - # disabled. When the object is garbage collected or the - # process shuts down we unlink the semaphore name - from .resource_tracker import register - register(self._semlock.name, "semaphore") - util.Finalize(self, SemLock._cleanup, (self._semlock.name,), - exitpriority=0) - - @staticmethod - def _cleanup(name): - from .resource_tracker import unregister - sem_unlink(name) - unregister(name, "semaphore") - - def _make_methods(self): - self.acquire = self._semlock.acquire - self.release = self._semlock.release - - def __enter__(self): - return self._semlock.__enter__() - - def __exit__(self, *args): - return self._semlock.__exit__(*args) - - def __getstate__(self): - context.assert_spawning(self) - sl = self._semlock - if sys.platform == 'win32': - h = context.get_spawning_popen().duplicate_for_child(sl.handle) - else: - if self._is_fork_ctx: - raise RuntimeError('A SemLock created in a fork context is being ' - 'shared with a process in a spawn context. This is ' - 'not supported. Please use the same context to create ' - 'multiprocessing objects and Process.') - h = sl.handle - return (h, sl.kind, sl.maxvalue, sl.name) - - def __setstate__(self, state): - self._semlock = _multiprocessing.SemLock._rebuild(*state) - util.debug('recreated blocker with handle %r' % state[0]) - self._make_methods() - # Ensure that deserialized SemLock can be serialized again (gh-108520). - self._is_fork_ctx = False - - @staticmethod - def _make_name(): - return '%s-%s' % (process.current_process()._config['semprefix'], - next(SemLock._rand)) - -# -# Semaphore -# - -class Semaphore(SemLock): - - def __init__(self, value=1, *, ctx): - SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) - - def get_value(self): - return self._semlock._get_value() - - def __repr__(self): - try: - value = self._semlock._get_value() - except Exception: - value = 'unknown' - return '<%s(value=%s)>' % (self.__class__.__name__, value) - -# -# Bounded semaphore -# - -class BoundedSemaphore(Semaphore): - - def __init__(self, value=1, *, ctx): - SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) - - def __repr__(self): - try: - value = self._semlock._get_value() - except Exception: - value = 'unknown' - return '<%s(value=%s, maxvalue=%s)>' % \ - (self.__class__.__name__, value, self._semlock.maxvalue) - -# -# Non-recursive lock -# - -class Lock(SemLock): - - def __init__(self, *, ctx): - SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) - - def __repr__(self): - try: - if self._semlock._is_mine(): - name = process.current_process().name - if threading.current_thread().name != 'MainThread': - name += '|' + threading.current_thread().name - elif not self._semlock._is_zero(): - name = 'None' - elif self._semlock._count() > 0: - name = 'SomeOtherThread' - else: - name = 'SomeOtherProcess' - except Exception: - name = 'unknown' - return '<%s(owner=%s)>' % (self.__class__.__name__, name) - -# -# Recursive lock -# - -class RLock(SemLock): - - def __init__(self, *, ctx): - SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) - - def __repr__(self): - try: - if self._semlock._is_mine(): - name = process.current_process().name - if threading.current_thread().name != 'MainThread': - name += '|' + threading.current_thread().name - count = self._semlock._count() - elif not self._semlock._is_zero(): - name, count = 'None', 0 - elif self._semlock._count() > 0: - name, count = 'SomeOtherThread', 'nonzero' - else: - name, count = 'SomeOtherProcess', 'nonzero' - except Exception: - name, count = 'unknown', 'unknown' - return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) - -# -# Condition variable -# - -class Condition(object): - - def __init__(self, lock=None, *, ctx): - self._lock = lock or ctx.RLock() - self._sleeping_count = ctx.Semaphore(0) - self._woken_count = ctx.Semaphore(0) - self._wait_semaphore = ctx.Semaphore(0) - self._make_methods() - - def __getstate__(self): - context.assert_spawning(self) - return (self._lock, self._sleeping_count, - self._woken_count, self._wait_semaphore) - - def __setstate__(self, state): - (self._lock, self._sleeping_count, - self._woken_count, self._wait_semaphore) = state - self._make_methods() - - def __enter__(self): - return self._lock.__enter__() - - def __exit__(self, *args): - return self._lock.__exit__(*args) - - def _make_methods(self): - self.acquire = self._lock.acquire - self.release = self._lock.release - - def __repr__(self): - try: - num_waiters = (self._sleeping_count._semlock._get_value() - - self._woken_count._semlock._get_value()) - except Exception: - num_waiters = 'unknown' - return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) - - def wait(self, timeout=None): - assert self._lock._semlock._is_mine(), \ - 'must acquire() condition before using wait()' - - # indicate that this thread is going to sleep - self._sleeping_count.release() - - # release lock - count = self._lock._semlock._count() - for i in range(count): - self._lock.release() - - try: - # wait for notification or timeout - return self._wait_semaphore.acquire(True, timeout) - finally: - # indicate that this thread has woken - self._woken_count.release() - - # reacquire lock - for i in range(count): - self._lock.acquire() - - def notify(self, n=1): - assert self._lock._semlock._is_mine(), 'lock is not owned' - assert not self._wait_semaphore.acquire( - False), ('notify: Should not have been able to acquire ' - + '_wait_semaphore') - - # to take account of timeouts since last notify*() we subtract - # woken_count from sleeping_count and rezero woken_count - while self._woken_count.acquire(False): - res = self._sleeping_count.acquire(False) - assert res, ('notify: Bug in sleeping_count.acquire' - + '- res should not be False') - - sleepers = 0 - while sleepers < n and self._sleeping_count.acquire(False): - self._wait_semaphore.release() # wake up one sleeper - sleepers += 1 - - if sleepers: - for i in range(sleepers): - self._woken_count.acquire() # wait for a sleeper to wake - - # rezero wait_semaphore in case some timeouts just happened - while self._wait_semaphore.acquire(False): - pass - - def notify_all(self): - self.notify(n=sys.maxsize) - - def wait_for(self, predicate, timeout=None): - result = predicate() - if result: - return result - if timeout is not None: - endtime = time.monotonic() + timeout - else: - endtime = None - waittime = None - while not result: - if endtime is not None: - waittime = endtime - time.monotonic() - if waittime <= 0: - break - self.wait(waittime) - result = predicate() - return result - -# -# Event -# - -class Event(object): - - def __init__(self, *, ctx): - self._cond = ctx.Condition(ctx.Lock()) - self._flag = ctx.Semaphore(0) - - def is_set(self): - with self._cond: - if self._flag.acquire(False): - self._flag.release() - return True - return False - - def set(self): - with self._cond: - self._flag.acquire(False) - self._flag.release() - self._cond.notify_all() - - def clear(self): - with self._cond: - self._flag.acquire(False) - - def wait(self, timeout=None): - with self._cond: - if self._flag.acquire(False): - self._flag.release() - else: - self._cond.wait(timeout) - - if self._flag.acquire(False): - self._flag.release() - return True - return False - - def __repr__(self): - set_status = 'set' if self.is_set() else 'unset' - return f"<{type(self).__qualname__} at {id(self):#x} {set_status}>" -# -# Barrier -# - -class Barrier(threading.Barrier): - - def __init__(self, parties, action=None, timeout=None, *, ctx): - import struct - from .heap import BufferWrapper - wrapper = BufferWrapper(struct.calcsize('i') * 2) - cond = ctx.Condition() - self.__setstate__((parties, action, timeout, cond, wrapper)) - self._state = 0 - self._count = 0 - - def __setstate__(self, state): - (self._parties, self._action, self._timeout, - self._cond, self._wrapper) = state - self._array = self._wrapper.create_memoryview().cast('i') - - def __getstate__(self): - return (self._parties, self._action, self._timeout, - self._cond, self._wrapper) - - @property - def _state(self): - return self._array[0] - - @_state.setter - def _state(self, value): - self._array[0] = value - - @property - def _count(self): - return self._array[1] - - @_count.setter - def _count(self, value): - self._array[1] = value diff --git a/Python313_13_x64_Template/Lib/multiprocessing/util.py b/Python313_13_x64_Template/Lib/multiprocessing/util.py deleted file mode 100644 index b8bfea04..00000000 --- a/Python313_13_x64_Template/Lib/multiprocessing/util.py +++ /dev/null @@ -1,562 +0,0 @@ -# -# Module providing various facilities to other parts of the package -# -# multiprocessing/util.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -import os -import itertools -import sys -import weakref -import atexit -import threading # we want threading to install it's - # cleanup function before multiprocessing does -from subprocess import _args_from_interpreter_flags - -from . import process - -__all__ = [ - 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', - 'log_to_stderr', 'get_temp_dir', 'register_after_fork', - 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', - 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', - ] - -# -# Logging -# - -NOTSET = 0 -SUBDEBUG = 5 -DEBUG = 10 -INFO = 20 -SUBWARNING = 25 -WARNING = 30 - -LOGGER_NAME = 'multiprocessing' -DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' - -_logger = None -_log_to_stderr = False - -def sub_debug(msg, *args): - if _logger: - _logger.log(SUBDEBUG, msg, *args, stacklevel=2) - -def debug(msg, *args): - if _logger: - _logger.log(DEBUG, msg, *args, stacklevel=2) - -def info(msg, *args): - if _logger: - _logger.log(INFO, msg, *args, stacklevel=2) - -def _warn(msg, *args): - if _logger: - _logger.log(WARNING, msg, *args, stacklevel=2) - -def sub_warning(msg, *args): - if _logger: - _logger.log(SUBWARNING, msg, *args, stacklevel=2) - -def get_logger(): - ''' - Returns logger used by multiprocessing - ''' - global _logger - import logging - - with logging._lock: - if not _logger: - - _logger = logging.getLogger(LOGGER_NAME) - _logger.propagate = 0 - - # XXX multiprocessing should cleanup before logging - if hasattr(atexit, 'unregister'): - atexit.unregister(_exit_function) - atexit.register(_exit_function) - else: - atexit._exithandlers.remove((_exit_function, (), {})) - atexit._exithandlers.append((_exit_function, (), {})) - - return _logger - -def log_to_stderr(level=None): - ''' - Turn on logging and add a handler which prints to stderr - ''' - global _log_to_stderr - import logging - - logger = get_logger() - formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) - handler = logging.StreamHandler() - handler.setFormatter(formatter) - logger.addHandler(handler) - - if level: - logger.setLevel(level) - _log_to_stderr = True - return _logger - - -# Abstract socket support - -def _platform_supports_abstract_sockets(): - return sys.platform in ("linux", "android") - - -def is_abstract_socket_namespace(address): - if not address: - return False - if isinstance(address, bytes): - return address[0] == 0 - elif isinstance(address, str): - return address[0] == "\0" - raise TypeError(f'address type of {address!r} unrecognized') - - -abstract_sockets_supported = _platform_supports_abstract_sockets() - -# -# Function returning a temp directory which will be removed on exit -# - -# Maximum length of a NULL-terminated [1] socket file path is usually -# between 92 and 108 [2], but Linux is known to use a size of 108 [3]. -# BSD-based systems usually use a size of 104 or 108 and Windows does -# not create AF_UNIX sockets. -# -# [1]: https://github.com/python/cpython/issues/140734 -# [2]: https://pubs.opengroup.org/onlinepubs/9799919799/basedefs/sys_un.h.html -# [3]: https://man7.org/linux/man-pages/man7/unix.7.html - -if sys.platform == 'linux': - _SUN_PATH_MAX = 108 -elif sys.platform.startswith(('openbsd', 'freebsd')): - _SUN_PATH_MAX = 104 -else: - # On Windows platforms, we do not create AF_UNIX sockets. - _SUN_PATH_MAX = None if os.name == 'nt' else 92 - -def _remove_temp_dir(rmtree, tempdir): - rmtree(tempdir) - - current_process = process.current_process() - # current_process() can be None if the finalizer is called - # late during Python finalization - if current_process is not None: - current_process._config['tempdir'] = None - -def _get_base_temp_dir(tempfile): - """Get a temporary directory where socket files will be created. - - To prevent additional imports, pass a pre-imported 'tempfile' module. - """ - if os.name == 'nt': - return None - # Most of the time, the default temporary directory is /tmp. Thus, - # listener sockets files "$TMPDIR/pymp-XXXXXXXX/sock-XXXXXXXX" do - # not have a path length exceeding SUN_PATH_MAX. - # - # If users specify their own temporary directory, we may be unable - # to create those files. Therefore, we fall back to the system-wide - # temporary directory /tmp, assumed to exist on POSIX systems. - # - # See https://github.com/python/cpython/issues/132124. - base_tempdir = tempfile.gettempdir() - # Files created in a temporary directory are suffixed by a string - # generated by tempfile._RandomNameSequence, which, by design, - # is 8 characters long. - # - # Thus, the socket file path length (without NULL terminator) will be: - # - # len(base_tempdir + '/pymp-XXXXXXXX' + '/sock-XXXXXXXX') - sun_path_len = len(base_tempdir) + 14 + 14 - # Strict inequality to account for the NULL terminator. - # See https://github.com/python/cpython/issues/140734. - if sun_path_len < _SUN_PATH_MAX: - return base_tempdir - # Fallback to the default system-wide temporary directory. - # This ignores user-defined environment variables. - # - # On POSIX systems, /tmp MUST be writable by any application [1]. - # We however emit a warning if this is not the case to prevent - # obscure errors later in the execution. - # - # On some legacy systems, /var/tmp and /usr/tmp can be present - # and will be used instead. - # - # [1]: https://refspecs.linuxfoundation.org/FHS_3.0/fhs/ch03s18.html - dirlist = ['/tmp', '/var/tmp', '/usr/tmp'] - try: - base_system_tempdir = tempfile._get_default_tempdir(dirlist) - except FileNotFoundError: - _warn("Process-wide temporary directory %s will not be usable for " - "creating socket files and no usable system-wide temporary " - "directory was found in %s", base_tempdir, dirlist) - # At this point, the system-wide temporary directory is not usable - # but we may assume that the user-defined one is, even if we will - # not be able to write socket files out there. - return base_tempdir - _warn("Ignoring user-defined temporary directory: %s", base_tempdir) - # at most max(map(len, dirlist)) + 14 + 14 = 36 characters - assert len(base_system_tempdir) + 14 + 14 < _SUN_PATH_MAX - return base_system_tempdir - -def get_temp_dir(): - # get name of a temp directory which will be automatically cleaned up - tempdir = process.current_process()._config.get('tempdir') - if tempdir is None: - import shutil, tempfile - base_tempdir = _get_base_temp_dir(tempfile) - tempdir = tempfile.mkdtemp(prefix='pymp-', dir=base_tempdir) - info('created temp directory %s', tempdir) - # keep a strong reference to shutil.rmtree(), since the finalizer - # can be called late during Python shutdown - Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), - exitpriority=-100) - process.current_process()._config['tempdir'] = tempdir - return tempdir - -# -# Support for reinitialization of objects when bootstrapping a child process -# - -_afterfork_registry = weakref.WeakValueDictionary() -_afterfork_counter = itertools.count() - -def _run_after_forkers(): - items = list(_afterfork_registry.items()) - items.sort() - for (index, ident, func), obj in items: - try: - func(obj) - except Exception as e: - info('after forker raised exception %s', e) - -def register_after_fork(obj, func): - _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj - -# -# Finalization using weakrefs -# - -_finalizer_registry = {} -_finalizer_counter = itertools.count() - - -class Finalize(object): - ''' - Class which supports object finalization using weakrefs - ''' - def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): - if (exitpriority is not None) and not isinstance(exitpriority,int): - raise TypeError( - "Exitpriority ({0!r}) must be None or int, not {1!s}".format( - exitpriority, type(exitpriority))) - - if obj is not None: - self._weakref = weakref.ref(obj, self) - elif exitpriority is None: - raise ValueError("Without object, exitpriority cannot be None") - - self._callback = callback - self._args = args - self._kwargs = kwargs or {} - self._key = (exitpriority, next(_finalizer_counter)) - self._pid = os.getpid() - - _finalizer_registry[self._key] = self - - def __call__(self, wr=None, - # Need to bind these locally because the globals can have - # been cleared at shutdown - _finalizer_registry=_finalizer_registry, - sub_debug=sub_debug, getpid=os.getpid): - ''' - Run the callback unless it has already been called or cancelled - ''' - try: - del _finalizer_registry[self._key] - except KeyError: - sub_debug('finalizer no longer registered') - else: - if self._pid != getpid(): - sub_debug('finalizer ignored because different process') - res = None - else: - sub_debug('finalizer calling %s with args %s and kwargs %s', - self._callback, self._args, self._kwargs) - res = self._callback(*self._args, **self._kwargs) - self._weakref = self._callback = self._args = \ - self._kwargs = self._key = None - return res - - def cancel(self): - ''' - Cancel finalization of the object - ''' - try: - del _finalizer_registry[self._key] - except KeyError: - pass - else: - self._weakref = self._callback = self._args = \ - self._kwargs = self._key = None - - def still_active(self): - ''' - Return whether this finalizer is still waiting to invoke callback - ''' - return self._key in _finalizer_registry - - def __repr__(self): - try: - obj = self._weakref() - except (AttributeError, TypeError): - obj = None - - if obj is None: - return '<%s object, dead>' % self.__class__.__name__ - - x = '<%s object, callback=%s' % ( - self.__class__.__name__, - getattr(self._callback, '__name__', self._callback)) - if self._args: - x += ', args=' + str(self._args) - if self._kwargs: - x += ', kwargs=' + str(self._kwargs) - if self._key[0] is not None: - x += ', exitpriority=' + str(self._key[0]) - return x + '>' - - -def _run_finalizers(minpriority=None): - ''' - Run all finalizers whose exit priority is not None and at least minpriority - - Finalizers with highest priority are called first; finalizers with - the same priority will be called in reverse order of creation. - ''' - if _finalizer_registry is None: - # This function may be called after this module's globals are - # destroyed. See the _exit_function function in this module for more - # notes. - return - - if minpriority is None: - f = lambda p : p[0] is not None - else: - f = lambda p : p[0] is not None and p[0] >= minpriority - - # Careful: _finalizer_registry may be mutated while this function - # is running (either by a GC run or by another thread). - - # list(_finalizer_registry) should be atomic, while - # list(_finalizer_registry.items()) is not. - keys = [key for key in list(_finalizer_registry) if f(key)] - keys.sort(reverse=True) - - for key in keys: - finalizer = _finalizer_registry.get(key) - # key may have been removed from the registry - if finalizer is not None: - sub_debug('calling %s', finalizer) - try: - finalizer() - except Exception: - import traceback - traceback.print_exc() - - if minpriority is None: - _finalizer_registry.clear() - -# -# Clean up on exit -# - -def is_exiting(): - ''' - Returns true if the process is shutting down - ''' - return _exiting or _exiting is None - -_exiting = False - -def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, - active_children=process.active_children, - current_process=process.current_process): - # We hold on to references to functions in the arglist due to the - # situation described below, where this function is called after this - # module's globals are destroyed. - - global _exiting - - if not _exiting: - _exiting = True - - info('process shutting down') - debug('running all "atexit" finalizers with priority >= 0') - _run_finalizers(0) - - if current_process() is not None: - # We check if the current process is None here because if - # it's None, any call to ``active_children()`` will raise - # an AttributeError (active_children winds up trying to - # get attributes from util._current_process). One - # situation where this can happen is if someone has - # manipulated sys.modules, causing this module to be - # garbage collected. The destructor for the module type - # then replaces all values in the module dict with None. - # For instance, after setuptools runs a test it replaces - # sys.modules with a copy created earlier. See issues - # #9775 and #15881. Also related: #4106, #9205, and - # #9207. - - for p in active_children(): - if p.daemon: - info('calling terminate() for daemon %s', p.name) - p._popen.terminate() - - for p in active_children(): - info('calling join() for process %s', p.name) - p.join() - - debug('running the remaining "atexit" finalizers') - _run_finalizers() - -atexit.register(_exit_function) - -# -# Some fork aware types -# - -class ForkAwareThreadLock(object): - def __init__(self): - self._lock = threading.Lock() - self.acquire = self._lock.acquire - self.release = self._lock.release - register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) - - def _at_fork_reinit(self): - self._lock._at_fork_reinit() - - def __enter__(self): - return self._lock.__enter__() - - def __exit__(self, *args): - return self._lock.__exit__(*args) - - -class ForkAwareLocal(threading.local): - def __init__(self): - register_after_fork(self, lambda obj : obj.__dict__.clear()) - def __reduce__(self): - return type(self), () - -# -# Close fds except those specified -# - -try: - MAXFD = os.sysconf("SC_OPEN_MAX") -except Exception: - MAXFD = 256 - -def close_all_fds_except(fds): - fds = list(fds) + [-1, MAXFD] - fds.sort() - assert fds[-1] == MAXFD, 'fd too large' - for i in range(len(fds) - 1): - os.closerange(fds[i]+1, fds[i+1]) -# -# Close sys.stdin and replace stdin with os.devnull -# - -def _close_stdin(): - if sys.stdin is None: - return - - try: - sys.stdin.close() - except (OSError, ValueError): - pass - - try: - fd = os.open(os.devnull, os.O_RDONLY) - try: - sys.stdin = open(fd, encoding="utf-8", closefd=False) - except: - os.close(fd) - raise - except (OSError, ValueError): - pass - -# -# Flush standard streams, if any -# - -def _flush_std_streams(): - try: - sys.stdout.flush() - except (AttributeError, ValueError): - pass - try: - sys.stderr.flush() - except (AttributeError, ValueError): - pass - -# -# Start a program with only specified fds kept open -# - -def spawnv_passfds(path, args, passfds): - import _posixsubprocess - import subprocess - passfds = tuple(sorted(map(int, passfds))) - errpipe_read, errpipe_write = os.pipe() - try: - return _posixsubprocess.fork_exec( - args, [path], True, passfds, None, None, - -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, - False, False, -1, None, None, None, -1, None, - subprocess._USE_VFORK) - finally: - os.close(errpipe_read) - os.close(errpipe_write) - - -def close_fds(*fds): - """Close each file descriptor given as an argument""" - for fd in fds: - os.close(fd) - - -def _cleanup_tests(): - """Cleanup multiprocessing resources when multiprocessing tests - completed.""" - - from test import support - - # cleanup multiprocessing - process._cleanup() - - # Stop the ForkServer process if it's running - from multiprocessing import forkserver - forkserver._forkserver._stop() - - # Stop the ResourceTracker process if it's running - from multiprocessing import resource_tracker - resource_tracker._resource_tracker._stop() - - # bpo-37421: Explicitly call _run_finalizers() to remove immediately - # temporary directories created by multiprocessing.util.get_temp_dir(). - _run_finalizers() - support.gc_collect() - - support.reap_children() diff --git a/Python313_13_x64_Template/Lib/nturl2path.py b/Python313_13_x64_Template/Lib/nturl2path.py deleted file mode 100644 index 757fd01b..00000000 --- a/Python313_13_x64_Template/Lib/nturl2path.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Convert a NT pathname to a file URL and vice versa. - -This module only exists to provide OS-specific code -for urllib.requests, thus do not use directly. -""" -# Testing is done through test_urllib. - -def url2pathname(url): - """OS-specific conversion from a relative URL of the 'file' scheme - to a file system path; not recommended for general use.""" - # e.g. - # ///C|/foo/bar/spam.foo - # and - # ///C:/foo/bar/spam.foo - # become - # C:\foo\bar\spam.foo - import string, urllib.parse - if url[:3] == '///': - # URL has an empty authority section, so the path begins on the third - # character. - url = url[2:] - elif url[:12] == '//localhost/': - # Skip past 'localhost' authority. - url = url[11:] - if url[:3] == '///': - # Skip past extra slash before UNC drive in URL path. - url = url[1:] - # Windows itself uses ":" even in URLs. - url = url.replace(':', '|') - if not '|' in url: - # No drive specifier, just convert slashes - # make sure not to convert quoted slashes :-) - return urllib.parse.unquote(url.replace('/', '\\')) - comp = url.split('|') - if len(comp) != 2 or comp[0][-1] not in string.ascii_letters: - error = 'Bad URL: ' + url - raise OSError(error) - drive = comp[0][-1].upper() - tail = urllib.parse.unquote(comp[1].replace('/', '\\')) - return drive + ':' + tail - -def pathname2url(p): - """OS-specific conversion from a file system path to a relative URL - of the 'file' scheme; not recommended for general use.""" - # e.g. - # C:\foo\bar\spam.foo - # becomes - # ///C:/foo/bar/spam.foo - import urllib.parse - # First, clean up some special forms. We are going to sacrifice - # the additional information anyway - p = p.replace('\\', '/') - if p[:4] == '//?/': - p = p[4:] - if p[:4].upper() == 'UNC/': - p = '//' + p[4:] - elif p[1:2] != ':': - raise OSError('Bad path: ' + p) - if not ':' in p: - # No DOS drive specified, just quote the pathname - return urllib.parse.quote(p) - comp = p.split(':', maxsplit=2) - if len(comp) != 2 or len(comp[0]) > 1: - error = 'Bad path: ' + p - raise OSError(error) - - drive = urllib.parse.quote(comp[0].upper()) - tail = urllib.parse.quote(comp[1]) - return '///' + drive + ':' + tail diff --git a/Python313_13_x64_Template/Lib/opcode.py b/Python313_13_x64_Template/Lib/opcode.py deleted file mode 100644 index 5735686f..00000000 --- a/Python313_13_x64_Template/Lib/opcode.py +++ /dev/null @@ -1,115 +0,0 @@ - -""" -opcode module - potentially shared between dis and other modules which -operate on bytecodes (e.g. peephole optimizers). -""" - - -__all__ = ["cmp_op", "stack_effect", "hascompare", "opname", "opmap", - "HAVE_ARGUMENT", "EXTENDED_ARG", "hasarg", "hasconst", "hasname", - "hasjump", "hasjrel", "hasjabs", "hasfree", "haslocal", "hasexc"] - -import _opcode -from _opcode import stack_effect - -from _opcode_metadata import (_specializations, _specialized_opmap, opmap, - HAVE_ARGUMENT, MIN_INSTRUMENTED_OPCODE) -EXTENDED_ARG = opmap['EXTENDED_ARG'] - -opname = ['<%r>' % (op,) for op in range(max(opmap.values()) + 1)] -for op, i in opmap.items(): - opname[i] = op - -cmp_op = ('<', '<=', '==', '!=', '>', '>=') - -# These lists are documented as part of the dis module's API -hasarg = [op for op in opmap.values() if _opcode.has_arg(op)] -hasconst = [op for op in opmap.values() if _opcode.has_const(op)] -hasname = [op for op in opmap.values() if _opcode.has_name(op)] -hasjump = [op for op in opmap.values() if _opcode.has_jump(op)] -hasjrel = hasjump # for backward compatibility -hasjabs = [] -hasfree = [op for op in opmap.values() if _opcode.has_free(op)] -haslocal = [op for op in opmap.values() if _opcode.has_local(op)] -hasexc = [op for op in opmap.values() if _opcode.has_exc(op)] - - -_intrinsic_1_descs = _opcode.get_intrinsic1_descs() -_intrinsic_2_descs = _opcode.get_intrinsic2_descs() -_nb_ops = _opcode.get_nb_ops() - -hascompare = [opmap["COMPARE_OP"]] - -_cache_format = { - "LOAD_GLOBAL": { - "counter": 1, - "index": 1, - "module_keys_version": 1, - "builtin_keys_version": 1, - }, - "BINARY_OP": { - "counter": 1, - }, - "UNPACK_SEQUENCE": { - "counter": 1, - }, - "COMPARE_OP": { - "counter": 1, - }, - "CONTAINS_OP": { - "counter": 1, - }, - "BINARY_SUBSCR": { - "counter": 1, - }, - "FOR_ITER": { - "counter": 1, - }, - "LOAD_SUPER_ATTR": { - "counter": 1, - }, - "LOAD_ATTR": { - "counter": 1, - "version": 2, - "keys_version": 2, - "descr": 4, - }, - "STORE_ATTR": { - "counter": 1, - "version": 2, - "index": 1, - }, - "CALL": { - "counter": 1, - "func_version": 2, - }, - "STORE_SUBSCR": { - "counter": 1, - }, - "SEND": { - "counter": 1, - }, - "JUMP_BACKWARD": { - "counter": 1, - }, - "TO_BOOL": { - "counter": 1, - "version": 2, - }, - "POP_JUMP_IF_TRUE": { - "counter": 1, - }, - "POP_JUMP_IF_FALSE": { - "counter": 1, - }, - "POP_JUMP_IF_NONE": { - "counter": 1, - }, - "POP_JUMP_IF_NOT_NONE": { - "counter": 1, - }, -} - -_inline_cache_entries = { - name : sum(value.values()) for (name, value) in _cache_format.items() -} diff --git a/Python313_13_x64_Template/Lib/operator.py b/Python313_13_x64_Template/Lib/operator.py deleted file mode 100644 index 02ccdaa1..00000000 --- a/Python313_13_x64_Template/Lib/operator.py +++ /dev/null @@ -1,467 +0,0 @@ -""" -Operator Interface - -This module exports a set of functions corresponding to the intrinsic -operators of Python. For example, operator.add(x, y) is equivalent -to the expression x+y. The function names are those used for special -methods; variants without leading and trailing '__' are also provided -for convenience. - -This is the pure Python implementation of the module. -""" - -__all__ = ['abs', 'add', 'and_', 'attrgetter', 'call', 'concat', 'contains', 'countOf', - 'delitem', 'eq', 'floordiv', 'ge', 'getitem', 'gt', 'iadd', 'iand', - 'iconcat', 'ifloordiv', 'ilshift', 'imatmul', 'imod', 'imul', - 'index', 'indexOf', 'inv', 'invert', 'ior', 'ipow', 'irshift', - 'is_', 'is_not', 'isub', 'itemgetter', 'itruediv', 'ixor', 'le', - 'length_hint', 'lshift', 'lt', 'matmul', 'methodcaller', 'mod', - 'mul', 'ne', 'neg', 'not_', 'or_', 'pos', 'pow', 'rshift', - 'setitem', 'sub', 'truediv', 'truth', 'xor'] - -from builtins import abs as _abs - - -# Comparison Operations *******************************************************# - -def lt(a, b): - "Same as a < b." - return a < b - -def le(a, b): - "Same as a <= b." - return a <= b - -def eq(a, b): - "Same as a == b." - return a == b - -def ne(a, b): - "Same as a != b." - return a != b - -def ge(a, b): - "Same as a >= b." - return a >= b - -def gt(a, b): - "Same as a > b." - return a > b - -# Logical Operations **********************************************************# - -def not_(a): - "Same as not a." - return not a - -def truth(a): - "Return True if a is true, False otherwise." - return True if a else False - -def is_(a, b): - "Same as a is b." - return a is b - -def is_not(a, b): - "Same as a is not b." - return a is not b - -# Mathematical/Bitwise Operations *********************************************# - -def abs(a): - "Same as abs(a)." - return _abs(a) - -def add(a, b): - "Same as a + b." - return a + b - -def and_(a, b): - "Same as a & b." - return a & b - -def floordiv(a, b): - "Same as a // b." - return a // b - -def index(a): - "Same as a.__index__()." - return a.__index__() - -def inv(a): - "Same as ~a." - return ~a -invert = inv - -def lshift(a, b): - "Same as a << b." - return a << b - -def mod(a, b): - "Same as a % b." - return a % b - -def mul(a, b): - "Same as a * b." - return a * b - -def matmul(a, b): - "Same as a @ b." - return a @ b - -def neg(a): - "Same as -a." - return -a - -def or_(a, b): - "Same as a | b." - return a | b - -def pos(a): - "Same as +a." - return +a - -def pow(a, b): - "Same as a ** b." - return a ** b - -def rshift(a, b): - "Same as a >> b." - return a >> b - -def sub(a, b): - "Same as a - b." - return a - b - -def truediv(a, b): - "Same as a / b." - return a / b - -def xor(a, b): - "Same as a ^ b." - return a ^ b - -# Sequence Operations *********************************************************# - -def concat(a, b): - "Same as a + b, for a and b sequences." - if not hasattr(a, '__getitem__'): - msg = "'%s' object can't be concatenated" % type(a).__name__ - raise TypeError(msg) - return a + b - -def contains(a, b): - "Same as b in a (note reversed operands)." - return b in a - -def countOf(a, b): - "Return the number of items in a which are, or which equal, b." - count = 0 - for i in a: - if i is b or i == b: - count += 1 - return count - -def delitem(a, b): - "Same as del a[b]." - del a[b] - -def getitem(a, b): - "Same as a[b]." - return a[b] - -def indexOf(a, b): - "Return the first index of b in a." - for i, j in enumerate(a): - if j is b or j == b: - return i - else: - raise ValueError('sequence.index(x): x not in sequence') - -def setitem(a, b, c): - "Same as a[b] = c." - a[b] = c - -def length_hint(obj, default=0): - """ - Return an estimate of the number of items in obj. - This is useful for presizing containers when building from an iterable. - - If the object supports len(), the result will be exact. Otherwise, it may - over- or under-estimate by an arbitrary amount. The result will be an - integer >= 0. - """ - if not isinstance(default, int): - msg = ("'%s' object cannot be interpreted as an integer" % - type(default).__name__) - raise TypeError(msg) - - try: - return len(obj) - except TypeError: - pass - - try: - hint = type(obj).__length_hint__ - except AttributeError: - return default - - try: - val = hint(obj) - except TypeError: - return default - if val is NotImplemented: - return default - if not isinstance(val, int): - msg = ('__length_hint__ must be integer, not %s' % - type(val).__name__) - raise TypeError(msg) - if val < 0: - msg = '__length_hint__() should return >= 0' - raise ValueError(msg) - return val - -# Other Operations ************************************************************# - -def call(obj, /, *args, **kwargs): - """Same as obj(*args, **kwargs).""" - return obj(*args, **kwargs) - -# Generalized Lookup Objects **************************************************# - -class attrgetter: - """ - Return a callable object that fetches the given attribute(s) from its operand. - After f = attrgetter('name'), the call f(r) returns r.name. - After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date). - After h = attrgetter('name.first', 'name.last'), the call h(r) returns - (r.name.first, r.name.last). - """ - __slots__ = ('_attrs', '_call') - - def __init__(self, attr, /, *attrs): - if not attrs: - if not isinstance(attr, str): - raise TypeError('attribute name must be a string') - self._attrs = (attr,) - names = attr.split('.') - def func(obj): - for name in names: - obj = getattr(obj, name) - return obj - self._call = func - else: - self._attrs = (attr,) + attrs - getters = tuple(map(attrgetter, self._attrs)) - def func(obj): - return tuple(getter(obj) for getter in getters) - self._call = func - - def __call__(self, obj, /): - return self._call(obj) - - def __repr__(self): - return '%s.%s(%s)' % (self.__class__.__module__, - self.__class__.__qualname__, - ', '.join(map(repr, self._attrs))) - - def __reduce__(self): - return self.__class__, self._attrs - -class itemgetter: - """ - Return a callable object that fetches the given item(s) from its operand. - After f = itemgetter(2), the call f(r) returns r[2]. - After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]) - """ - __slots__ = ('_items', '_call') - - def __init__(self, item, /, *items): - if not items: - self._items = (item,) - def func(obj): - return obj[item] - self._call = func - else: - self._items = items = (item,) + items - def func(obj): - return tuple(obj[i] for i in items) - self._call = func - - def __call__(self, obj, /): - return self._call(obj) - - def __repr__(self): - return '%s.%s(%s)' % (self.__class__.__module__, - self.__class__.__name__, - ', '.join(map(repr, self._items))) - - def __reduce__(self): - return self.__class__, self._items - -class methodcaller: - """ - Return a callable object that calls the given method on its operand. - After f = methodcaller('name'), the call f(r) returns r.name(). - After g = methodcaller('name', 'date', foo=1), the call g(r) returns - r.name('date', foo=1). - """ - __slots__ = ('_name', '_args', '_kwargs') - - def __init__(self, name, /, *args, **kwargs): - self._name = name - if not isinstance(self._name, str): - raise TypeError('method name must be a string') - self._args = args - self._kwargs = kwargs - - def __call__(self, obj, /): - return getattr(obj, self._name)(*self._args, **self._kwargs) - - def __repr__(self): - args = [repr(self._name)] - args.extend(map(repr, self._args)) - args.extend('%s=%r' % (k, v) for k, v in self._kwargs.items()) - return '%s.%s(%s)' % (self.__class__.__module__, - self.__class__.__name__, - ', '.join(args)) - - def __reduce__(self): - if not self._kwargs: - return self.__class__, (self._name,) + self._args - else: - from functools import partial - return partial(self.__class__, self._name, **self._kwargs), self._args - - -# In-place Operations *********************************************************# - -def iadd(a, b): - "Same as a += b." - a += b - return a - -def iand(a, b): - "Same as a &= b." - a &= b - return a - -def iconcat(a, b): - "Same as a += b, for a and b sequences." - if not hasattr(a, '__getitem__'): - msg = "'%s' object can't be concatenated" % type(a).__name__ - raise TypeError(msg) - a += b - return a - -def ifloordiv(a, b): - "Same as a //= b." - a //= b - return a - -def ilshift(a, b): - "Same as a <<= b." - a <<= b - return a - -def imod(a, b): - "Same as a %= b." - a %= b - return a - -def imul(a, b): - "Same as a *= b." - a *= b - return a - -def imatmul(a, b): - "Same as a @= b." - a @= b - return a - -def ior(a, b): - "Same as a |= b." - a |= b - return a - -def ipow(a, b): - "Same as a **= b." - a **=b - return a - -def irshift(a, b): - "Same as a >>= b." - a >>= b - return a - -def isub(a, b): - "Same as a -= b." - a -= b - return a - -def itruediv(a, b): - "Same as a /= b." - a /= b - return a - -def ixor(a, b): - "Same as a ^= b." - a ^= b - return a - - -try: - from _operator import * -except ImportError: - pass -else: - from _operator import __doc__ - -# All of these "__func__ = func" assignments have to happen after importing -# from _operator to make sure they're set to the right function -__lt__ = lt -__le__ = le -__eq__ = eq -__ne__ = ne -__ge__ = ge -__gt__ = gt -__not__ = not_ -__abs__ = abs -__add__ = add -__and__ = and_ -__call__ = call -__floordiv__ = floordiv -__index__ = index -__inv__ = inv -__invert__ = invert -__lshift__ = lshift -__mod__ = mod -__mul__ = mul -__matmul__ = matmul -__neg__ = neg -__or__ = or_ -__pos__ = pos -__pow__ = pow -__rshift__ = rshift -__sub__ = sub -__truediv__ = truediv -__xor__ = xor -__concat__ = concat -__contains__ = contains -__delitem__ = delitem -__getitem__ = getitem -__setitem__ = setitem -__iadd__ = iadd -__iand__ = iand -__iconcat__ = iconcat -__ifloordiv__ = ifloordiv -__ilshift__ = ilshift -__imod__ = imod -__imul__ = imul -__imatmul__ = imatmul -__ior__ = ior -__ipow__ = ipow -__irshift__ = irshift -__isub__ = isub -__itruediv__ = itruediv -__ixor__ = ixor diff --git a/Python313_13_x64_Template/Lib/optparse.py b/Python313_13_x64_Template/Lib/optparse.py deleted file mode 100644 index 1c450c6f..00000000 --- a/Python313_13_x64_Template/Lib/optparse.py +++ /dev/null @@ -1,1681 +0,0 @@ -"""A powerful, extensible, and easy-to-use option parser. - -By Greg Ward - -Originally distributed as Optik. - -For support, use the optik-users@lists.sourceforge.net mailing list -(http://lists.sourceforge.net/lists/listinfo/optik-users). - -Simple usage example: - - from optparse import OptionParser - - parser = OptionParser() - parser.add_option("-f", "--file", dest="filename", - help="write report to FILE", metavar="FILE") - parser.add_option("-q", "--quiet", - action="store_false", dest="verbose", default=True, - help="don't print status messages to stdout") - - (options, args) = parser.parse_args() -""" - -__version__ = "1.5.3" - -__all__ = ['Option', - 'make_option', - 'SUPPRESS_HELP', - 'SUPPRESS_USAGE', - 'Values', - 'OptionContainer', - 'OptionGroup', - 'OptionParser', - 'HelpFormatter', - 'IndentedHelpFormatter', - 'TitledHelpFormatter', - 'OptParseError', - 'OptionError', - 'OptionConflictError', - 'OptionValueError', - 'BadOptionError', - 'check_choice'] - -__copyright__ = """ -Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved. -Copyright (c) 2002-2006 Python Software Foundation. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - * Neither the name of the author nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -""" - -import sys, os -import textwrap - -def _repr(self): - return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self) - - -# This file was generated from: -# Id: option_parser.py 527 2006-07-23 15:21:30Z greg -# Id: option.py 522 2006-06-11 16:22:03Z gward -# Id: help.py 527 2006-07-23 15:21:30Z greg -# Id: errors.py 509 2006-04-20 00:58:24Z gward - -try: - from gettext import gettext, ngettext -except ImportError: - def gettext(message): - return message - - def ngettext(singular, plural, n): - if n == 1: - return singular - return plural - -_ = gettext - - -class OptParseError (Exception): - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - - -class OptionError (OptParseError): - """ - Raised if an Option instance is created with invalid or - inconsistent arguments. - """ - - def __init__(self, msg, option): - self.msg = msg - self.option_id = str(option) - - def __str__(self): - if self.option_id: - return "option %s: %s" % (self.option_id, self.msg) - else: - return self.msg - -class OptionConflictError (OptionError): - """ - Raised if conflicting options are added to an OptionParser. - """ - -class OptionValueError (OptParseError): - """ - Raised if an invalid option value is encountered on the command - line. - """ - -class BadOptionError (OptParseError): - """ - Raised if an invalid option is seen on the command line. - """ - def __init__(self, opt_str): - self.opt_str = opt_str - - def __str__(self): - return _("no such option: %s") % self.opt_str - -class AmbiguousOptionError (BadOptionError): - """ - Raised if an ambiguous option is seen on the command line. - """ - def __init__(self, opt_str, possibilities): - BadOptionError.__init__(self, opt_str) - self.possibilities = possibilities - - def __str__(self): - return (_("ambiguous option: %s (%s?)") - % (self.opt_str, ", ".join(self.possibilities))) - - -class HelpFormatter: - - """ - Abstract base class for formatting option help. OptionParser - instances should use one of the HelpFormatter subclasses for - formatting help; by default IndentedHelpFormatter is used. - - Instance attributes: - parser : OptionParser - the controlling OptionParser instance - indent_increment : int - the number of columns to indent per nesting level - max_help_position : int - the maximum starting column for option help text - help_position : int - the calculated starting column for option help text; - initially the same as the maximum - width : int - total number of columns for output (pass None to constructor for - this value to be taken from the $COLUMNS environment variable) - level : int - current indentation level - current_indent : int - current indentation level (in columns) - help_width : int - number of columns available for option help text (calculated) - default_tag : str - text to replace with each option's default value, "%default" - by default. Set to false value to disable default value expansion. - option_strings : { Option : str } - maps Option instances to the snippet of help text explaining - the syntax of that option, e.g. "-h, --help" or - "-fFILE, --file=FILE" - _short_opt_fmt : str - format string controlling how short options with values are - printed in help text. Must be either "%s%s" ("-fFILE") or - "%s %s" ("-f FILE"), because those are the two syntaxes that - Optik supports. - _long_opt_fmt : str - similar but for long options; must be either "%s %s" ("--file FILE") - or "%s=%s" ("--file=FILE"). - """ - - NO_DEFAULT_VALUE = "none" - - def __init__(self, - indent_increment, - max_help_position, - width, - short_first): - self.parser = None - self.indent_increment = indent_increment - if width is None: - try: - width = int(os.environ['COLUMNS']) - except (KeyError, ValueError): - width = 80 - width -= 2 - self.width = width - self.help_position = self.max_help_position = \ - min(max_help_position, max(width - 20, indent_increment * 2)) - self.current_indent = 0 - self.level = 0 - self.help_width = None # computed later - self.short_first = short_first - self.default_tag = "%default" - self.option_strings = {} - self._short_opt_fmt = "%s %s" - self._long_opt_fmt = "%s=%s" - - def set_parser(self, parser): - self.parser = parser - - def set_short_opt_delimiter(self, delim): - if delim not in ("", " "): - raise ValueError( - "invalid metavar delimiter for short options: %r" % delim) - self._short_opt_fmt = "%s" + delim + "%s" - - def set_long_opt_delimiter(self, delim): - if delim not in ("=", " "): - raise ValueError( - "invalid metavar delimiter for long options: %r" % delim) - self._long_opt_fmt = "%s" + delim + "%s" - - def indent(self): - self.current_indent += self.indent_increment - self.level += 1 - - def dedent(self): - self.current_indent -= self.indent_increment - assert self.current_indent >= 0, "Indent decreased below 0." - self.level -= 1 - - def format_usage(self, usage): - raise NotImplementedError("subclasses must implement") - - def format_heading(self, heading): - raise NotImplementedError("subclasses must implement") - - def _format_text(self, text): - """ - Format a paragraph of free-form text for inclusion in the - help output at the current indentation level. - """ - text_width = max(self.width - self.current_indent, 11) - indent = " "*self.current_indent - return textwrap.fill(text, - text_width, - initial_indent=indent, - subsequent_indent=indent) - - def format_description(self, description): - if description: - return self._format_text(description) + "\n" - else: - return "" - - def format_epilog(self, epilog): - if epilog: - return "\n" + self._format_text(epilog) + "\n" - else: - return "" - - - def expand_default(self, option): - if self.parser is None or not self.default_tag: - return option.help - - default_value = self.parser.defaults.get(option.dest) - if default_value is NO_DEFAULT or default_value is None: - default_value = self.NO_DEFAULT_VALUE - - return option.help.replace(self.default_tag, str(default_value)) - - def format_option(self, option): - # The help for each option consists of two parts: - # * the opt strings and metavars - # eg. ("-x", or "-fFILENAME, --file=FILENAME") - # * the user-supplied help string - # eg. ("turn on expert mode", "read data from FILENAME") - # - # If possible, we write both of these on the same line: - # -x turn on expert mode - # - # But if the opt string list is too long, we put the help - # string on a second line, indented to the same column it would - # start in if it fit on the first line. - # -fFILENAME, --file=FILENAME - # read data from FILENAME - result = [] - opts = self.option_strings[option] - opt_width = self.help_position - self.current_indent - 2 - if len(opts) > opt_width: - opts = "%*s%s\n" % (self.current_indent, "", opts) - indent_first = self.help_position - else: # start help on same line as opts - opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts) - indent_first = 0 - result.append(opts) - if option.help: - help_text = self.expand_default(option) - help_lines = textwrap.wrap(help_text, self.help_width) - result.append("%*s%s\n" % (indent_first, "", help_lines[0])) - result.extend(["%*s%s\n" % (self.help_position, "", line) - for line in help_lines[1:]]) - elif opts[-1] != "\n": - result.append("\n") - return "".join(result) - - def store_option_strings(self, parser): - self.indent() - max_len = 0 - for opt in parser.option_list: - strings = self.format_option_strings(opt) - self.option_strings[opt] = strings - max_len = max(max_len, len(strings) + self.current_indent) - self.indent() - for group in parser.option_groups: - for opt in group.option_list: - strings = self.format_option_strings(opt) - self.option_strings[opt] = strings - max_len = max(max_len, len(strings) + self.current_indent) - self.dedent() - self.dedent() - self.help_position = min(max_len + 2, self.max_help_position) - self.help_width = max(self.width - self.help_position, 11) - - def format_option_strings(self, option): - """Return a comma-separated list of option strings & metavariables.""" - if option.takes_value(): - metavar = option.metavar or option.dest.upper() - short_opts = [self._short_opt_fmt % (sopt, metavar) - for sopt in option._short_opts] - long_opts = [self._long_opt_fmt % (lopt, metavar) - for lopt in option._long_opts] - else: - short_opts = option._short_opts - long_opts = option._long_opts - - if self.short_first: - opts = short_opts + long_opts - else: - opts = long_opts + short_opts - - return ", ".join(opts) - -class IndentedHelpFormatter (HelpFormatter): - """Format help with indented section bodies. - """ - - def __init__(self, - indent_increment=2, - max_help_position=24, - width=None, - short_first=1): - HelpFormatter.__init__( - self, indent_increment, max_help_position, width, short_first) - - def format_usage(self, usage): - return _("Usage: %s\n") % usage - - def format_heading(self, heading): - return "%*s%s:\n" % (self.current_indent, "", heading) - - -class TitledHelpFormatter (HelpFormatter): - """Format help with underlined section headers. - """ - - def __init__(self, - indent_increment=0, - max_help_position=24, - width=None, - short_first=0): - HelpFormatter.__init__ ( - self, indent_increment, max_help_position, width, short_first) - - def format_usage(self, usage): - return "%s %s\n" % (self.format_heading(_("Usage")), usage) - - def format_heading(self, heading): - return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading)) - - -def _parse_num(val, type): - if val[:2].lower() == "0x": # hexadecimal - radix = 16 - elif val[:2].lower() == "0b": # binary - radix = 2 - val = val[2:] or "0" # have to remove "0b" prefix - elif val[:1] == "0": # octal - radix = 8 - else: # decimal - radix = 10 - - return type(val, radix) - -def _parse_int(val): - return _parse_num(val, int) - -_builtin_cvt = { "int" : (_parse_int, _("integer")), - "long" : (_parse_int, _("integer")), - "float" : (float, _("floating-point")), - "complex" : (complex, _("complex")) } - -def check_builtin(option, opt, value): - (cvt, what) = _builtin_cvt[option.type] - try: - return cvt(value) - except ValueError: - raise OptionValueError( - _("option %s: invalid %s value: %r") % (opt, what, value)) - -def check_choice(option, opt, value): - if value in option.choices: - return value - else: - choices = ", ".join(map(repr, option.choices)) - raise OptionValueError( - _("option %s: invalid choice: %r (choose from %s)") - % (opt, value, choices)) - -# Not supplying a default is different from a default of None, -# so we need an explicit "not supplied" value. -NO_DEFAULT = ("NO", "DEFAULT") - - -class Option: - """ - Instance attributes: - _short_opts : [string] - _long_opts : [string] - - action : string - type : string - dest : string - default : any - nargs : int - const : any - choices : [string] - callback : function - callback_args : (any*) - callback_kwargs : { string : any } - help : string - metavar : string - """ - - # The list of instance attributes that may be set through - # keyword args to the constructor. - ATTRS = ['action', - 'type', - 'dest', - 'default', - 'nargs', - 'const', - 'choices', - 'callback', - 'callback_args', - 'callback_kwargs', - 'help', - 'metavar'] - - # The set of actions allowed by option parsers. Explicitly listed - # here so the constructor can validate its arguments. - ACTIONS = ("store", - "store_const", - "store_true", - "store_false", - "append", - "append_const", - "count", - "callback", - "help", - "version") - - # The set of actions that involve storing a value somewhere; - # also listed just for constructor argument validation. (If - # the action is one of these, there must be a destination.) - STORE_ACTIONS = ("store", - "store_const", - "store_true", - "store_false", - "append", - "append_const", - "count") - - # The set of actions for which it makes sense to supply a value - # type, ie. which may consume an argument from the command line. - TYPED_ACTIONS = ("store", - "append", - "callback") - - # The set of actions which *require* a value type, ie. that - # always consume an argument from the command line. - ALWAYS_TYPED_ACTIONS = ("store", - "append") - - # The set of actions which take a 'const' attribute. - CONST_ACTIONS = ("store_const", - "append_const") - - # The set of known types for option parsers. Again, listed here for - # constructor argument validation. - TYPES = ("string", "int", "long", "float", "complex", "choice") - - # Dictionary of argument checking functions, which convert and - # validate option arguments according to the option type. - # - # Signature of checking functions is: - # check(option : Option, opt : string, value : string) -> any - # where - # option is the Option instance calling the checker - # opt is the actual option seen on the command-line - # (eg. "-a", "--file") - # value is the option argument seen on the command-line - # - # The return value should be in the appropriate Python type - # for option.type -- eg. an integer if option.type == "int". - # - # If no checker is defined for a type, arguments will be - # unchecked and remain strings. - TYPE_CHECKER = { "int" : check_builtin, - "long" : check_builtin, - "float" : check_builtin, - "complex": check_builtin, - "choice" : check_choice, - } - - - # CHECK_METHODS is a list of unbound method objects; they are called - # by the constructor, in order, after all attributes are - # initialized. The list is created and filled in later, after all - # the methods are actually defined. (I just put it here because I - # like to define and document all class attributes in the same - # place.) Subclasses that add another _check_*() method should - # define their own CHECK_METHODS list that adds their check method - # to those from this class. - CHECK_METHODS = None - - - # -- Constructor/initialization methods ---------------------------- - - def __init__(self, *opts, **attrs): - # Set _short_opts, _long_opts attrs from 'opts' tuple. - # Have to be set now, in case no option strings are supplied. - self._short_opts = [] - self._long_opts = [] - opts = self._check_opt_strings(opts) - self._set_opt_strings(opts) - - # Set all other attrs (action, type, etc.) from 'attrs' dict - self._set_attrs(attrs) - - # Check all the attributes we just set. There are lots of - # complicated interdependencies, but luckily they can be farmed - # out to the _check_*() methods listed in CHECK_METHODS -- which - # could be handy for subclasses! The one thing these all share - # is that they raise OptionError if they discover a problem. - for checker in self.CHECK_METHODS: - checker(self) - - def _check_opt_strings(self, opts): - # Filter out None because early versions of Optik had exactly - # one short option and one long option, either of which - # could be None. - opts = [opt for opt in opts if opt] - if not opts: - raise TypeError("at least one option string must be supplied") - return opts - - def _set_opt_strings(self, opts): - for opt in opts: - if len(opt) < 2: - raise OptionError( - "invalid option string %r: " - "must be at least two characters long" % opt, self) - elif len(opt) == 2: - if not (opt[0] == "-" and opt[1] != "-"): - raise OptionError( - "invalid short option string %r: " - "must be of the form -x, (x any non-dash char)" % opt, - self) - self._short_opts.append(opt) - else: - if not (opt[0:2] == "--" and opt[2] != "-"): - raise OptionError( - "invalid long option string %r: " - "must start with --, followed by non-dash" % opt, - self) - self._long_opts.append(opt) - - def _set_attrs(self, attrs): - for attr in self.ATTRS: - if attr in attrs: - setattr(self, attr, attrs[attr]) - del attrs[attr] - else: - if attr == 'default': - setattr(self, attr, NO_DEFAULT) - else: - setattr(self, attr, None) - if attrs: - attrs = sorted(attrs.keys()) - raise OptionError( - "invalid keyword arguments: %s" % ", ".join(attrs), - self) - - - # -- Constructor validation methods -------------------------------- - - def _check_action(self): - if self.action is None: - self.action = "store" - elif self.action not in self.ACTIONS: - raise OptionError("invalid action: %r" % self.action, self) - - def _check_type(self): - if self.type is None: - if self.action in self.ALWAYS_TYPED_ACTIONS: - if self.choices is not None: - # The "choices" attribute implies "choice" type. - self.type = "choice" - else: - # No type given? "string" is the most sensible default. - self.type = "string" - else: - # Allow type objects or builtin type conversion functions - # (int, str, etc.) as an alternative to their names. - if isinstance(self.type, type): - self.type = self.type.__name__ - - if self.type == "str": - self.type = "string" - - if self.type not in self.TYPES: - raise OptionError("invalid option type: %r" % self.type, self) - if self.action not in self.TYPED_ACTIONS: - raise OptionError( - "must not supply a type for action %r" % self.action, self) - - def _check_choice(self): - if self.type == "choice": - if self.choices is None: - raise OptionError( - "must supply a list of choices for type 'choice'", self) - elif not isinstance(self.choices, (tuple, list)): - raise OptionError( - "choices must be a list of strings ('%s' supplied)" - % str(type(self.choices)).split("'")[1], self) - elif self.choices is not None: - raise OptionError( - "must not supply choices for type %r" % self.type, self) - - def _check_dest(self): - # No destination given, and we need one for this action. The - # self.type check is for callbacks that take a value. - takes_value = (self.action in self.STORE_ACTIONS or - self.type is not None) - if self.dest is None and takes_value: - - # Glean a destination from the first long option string, - # or from the first short option string if no long options. - if self._long_opts: - # eg. "--foo-bar" -> "foo_bar" - self.dest = self._long_opts[0][2:].replace('-', '_') - else: - self.dest = self._short_opts[0][1] - - def _check_const(self): - if self.action not in self.CONST_ACTIONS and self.const is not None: - raise OptionError( - "'const' must not be supplied for action %r" % self.action, - self) - - def _check_nargs(self): - if self.action in self.TYPED_ACTIONS: - if self.nargs is None: - self.nargs = 1 - elif self.nargs is not None: - raise OptionError( - "'nargs' must not be supplied for action %r" % self.action, - self) - - def _check_callback(self): - if self.action == "callback": - if not callable(self.callback): - raise OptionError( - "callback not callable: %r" % self.callback, self) - if (self.callback_args is not None and - not isinstance(self.callback_args, tuple)): - raise OptionError( - "callback_args, if supplied, must be a tuple: not %r" - % self.callback_args, self) - if (self.callback_kwargs is not None and - not isinstance(self.callback_kwargs, dict)): - raise OptionError( - "callback_kwargs, if supplied, must be a dict: not %r" - % self.callback_kwargs, self) - else: - if self.callback is not None: - raise OptionError( - "callback supplied (%r) for non-callback option" - % self.callback, self) - if self.callback_args is not None: - raise OptionError( - "callback_args supplied for non-callback option", self) - if self.callback_kwargs is not None: - raise OptionError( - "callback_kwargs supplied for non-callback option", self) - - - CHECK_METHODS = [_check_action, - _check_type, - _check_choice, - _check_dest, - _check_const, - _check_nargs, - _check_callback] - - - # -- Miscellaneous methods ----------------------------------------- - - def __str__(self): - return "/".join(self._short_opts + self._long_opts) - - __repr__ = _repr - - def takes_value(self): - return self.type is not None - - def get_opt_string(self): - if self._long_opts: - return self._long_opts[0] - else: - return self._short_opts[0] - - - # -- Processing methods -------------------------------------------- - - def check_value(self, opt, value): - checker = self.TYPE_CHECKER.get(self.type) - if checker is None: - return value - else: - return checker(self, opt, value) - - def convert_value(self, opt, value): - if value is not None: - if self.nargs == 1: - return self.check_value(opt, value) - else: - return tuple([self.check_value(opt, v) for v in value]) - - def process(self, opt, value, values, parser): - - # First, convert the value(s) to the right type. Howl if any - # value(s) are bogus. - value = self.convert_value(opt, value) - - # And then take whatever action is expected of us. - # This is a separate method to make life easier for - # subclasses to add new actions. - return self.take_action( - self.action, self.dest, opt, value, values, parser) - - def take_action(self, action, dest, opt, value, values, parser): - if action == "store": - setattr(values, dest, value) - elif action == "store_const": - setattr(values, dest, self.const) - elif action == "store_true": - setattr(values, dest, True) - elif action == "store_false": - setattr(values, dest, False) - elif action == "append": - values.ensure_value(dest, []).append(value) - elif action == "append_const": - values.ensure_value(dest, []).append(self.const) - elif action == "count": - setattr(values, dest, values.ensure_value(dest, 0) + 1) - elif action == "callback": - args = self.callback_args or () - kwargs = self.callback_kwargs or {} - self.callback(self, opt, value, parser, *args, **kwargs) - elif action == "help": - parser.print_help() - parser.exit() - elif action == "version": - parser.print_version() - parser.exit() - else: - raise ValueError("unknown action %r" % self.action) - - return 1 - -# class Option - - -SUPPRESS_HELP = "SUPPRESS"+"HELP" -SUPPRESS_USAGE = "SUPPRESS"+"USAGE" - -class Values: - - def __init__(self, defaults=None): - if defaults: - for (attr, val) in defaults.items(): - setattr(self, attr, val) - - def __str__(self): - return str(self.__dict__) - - __repr__ = _repr - - def __eq__(self, other): - if isinstance(other, Values): - return self.__dict__ == other.__dict__ - elif isinstance(other, dict): - return self.__dict__ == other - else: - return NotImplemented - - def _update_careful(self, dict): - """ - Update the option values from an arbitrary dictionary, but only - use keys from dict that already have a corresponding attribute - in self. Any keys in dict without a corresponding attribute - are silently ignored. - """ - for attr in dir(self): - if attr in dict: - dval = dict[attr] - if dval is not None: - setattr(self, attr, dval) - - def _update_loose(self, dict): - """ - Update the option values from an arbitrary dictionary, - using all keys from the dictionary regardless of whether - they have a corresponding attribute in self or not. - """ - self.__dict__.update(dict) - - def _update(self, dict, mode): - if mode == "careful": - self._update_careful(dict) - elif mode == "loose": - self._update_loose(dict) - else: - raise ValueError("invalid update mode: %r" % mode) - - def read_module(self, modname, mode="careful"): - __import__(modname) - mod = sys.modules[modname] - self._update(vars(mod), mode) - - def read_file(self, filename, mode="careful"): - vars = {} - exec(open(filename).read(), vars) - self._update(vars, mode) - - def ensure_value(self, attr, value): - if not hasattr(self, attr) or getattr(self, attr) is None: - setattr(self, attr, value) - return getattr(self, attr) - - -class OptionContainer: - - """ - Abstract base class. - - Class attributes: - standard_option_list : [Option] - list of standard options that will be accepted by all instances - of this parser class (intended to be overridden by subclasses). - - Instance attributes: - option_list : [Option] - the list of Option objects contained by this OptionContainer - _short_opt : { string : Option } - dictionary mapping short option strings, eg. "-f" or "-X", - to the Option instances that implement them. If an Option - has multiple short option strings, it will appear in this - dictionary multiple times. [1] - _long_opt : { string : Option } - dictionary mapping long option strings, eg. "--file" or - "--exclude", to the Option instances that implement them. - Again, a given Option can occur multiple times in this - dictionary. [1] - defaults : { string : any } - dictionary mapping option destination names to default - values for each destination [1] - - [1] These mappings are common to (shared by) all components of the - controlling OptionParser, where they are initially created. - - """ - - def __init__(self, option_class, conflict_handler, description): - # Initialize the option list and related data structures. - # This method must be provided by subclasses, and it must - # initialize at least the following instance attributes: - # option_list, _short_opt, _long_opt, defaults. - self._create_option_list() - - self.option_class = option_class - self.set_conflict_handler(conflict_handler) - self.set_description(description) - - def _create_option_mappings(self): - # For use by OptionParser constructor -- create the main - # option mappings used by this OptionParser and all - # OptionGroups that it owns. - self._short_opt = {} # single letter -> Option instance - self._long_opt = {} # long option -> Option instance - self.defaults = {} # maps option dest -> default value - - - def _share_option_mappings(self, parser): - # For use by OptionGroup constructor -- use shared option - # mappings from the OptionParser that owns this OptionGroup. - self._short_opt = parser._short_opt - self._long_opt = parser._long_opt - self.defaults = parser.defaults - - def set_conflict_handler(self, handler): - if handler not in ("error", "resolve"): - raise ValueError("invalid conflict_resolution value %r" % handler) - self.conflict_handler = handler - - def set_description(self, description): - self.description = description - - def get_description(self): - return self.description - - - def destroy(self): - """see OptionParser.destroy().""" - del self._short_opt - del self._long_opt - del self.defaults - - - # -- Option-adding methods ----------------------------------------- - - def _check_conflict(self, option): - conflict_opts = [] - for opt in option._short_opts: - if opt in self._short_opt: - conflict_opts.append((opt, self._short_opt[opt])) - for opt in option._long_opts: - if opt in self._long_opt: - conflict_opts.append((opt, self._long_opt[opt])) - - if conflict_opts: - handler = self.conflict_handler - if handler == "error": - raise OptionConflictError( - "conflicting option string(s): %s" - % ", ".join([co[0] for co in conflict_opts]), - option) - elif handler == "resolve": - for (opt, c_option) in conflict_opts: - if opt.startswith("--"): - c_option._long_opts.remove(opt) - del self._long_opt[opt] - else: - c_option._short_opts.remove(opt) - del self._short_opt[opt] - if not (c_option._short_opts or c_option._long_opts): - c_option.container.option_list.remove(c_option) - - def add_option(self, *args, **kwargs): - """add_option(Option) - add_option(opt_str, ..., kwarg=val, ...) - """ - if isinstance(args[0], str): - option = self.option_class(*args, **kwargs) - elif len(args) == 1 and not kwargs: - option = args[0] - if not isinstance(option, Option): - raise TypeError("not an Option instance: %r" % option) - else: - raise TypeError("invalid arguments") - - self._check_conflict(option) - - self.option_list.append(option) - option.container = self - for opt in option._short_opts: - self._short_opt[opt] = option - for opt in option._long_opts: - self._long_opt[opt] = option - - if option.dest is not None: # option has a dest, we need a default - if option.default is not NO_DEFAULT: - self.defaults[option.dest] = option.default - elif option.dest not in self.defaults: - self.defaults[option.dest] = None - - return option - - def add_options(self, option_list): - for option in option_list: - self.add_option(option) - - # -- Option query/removal methods ---------------------------------- - - def get_option(self, opt_str): - return (self._short_opt.get(opt_str) or - self._long_opt.get(opt_str)) - - def has_option(self, opt_str): - return (opt_str in self._short_opt or - opt_str in self._long_opt) - - def remove_option(self, opt_str): - option = self._short_opt.get(opt_str) - if option is None: - option = self._long_opt.get(opt_str) - if option is None: - raise ValueError("no such option %r" % opt_str) - - for opt in option._short_opts: - del self._short_opt[opt] - for opt in option._long_opts: - del self._long_opt[opt] - option.container.option_list.remove(option) - - - # -- Help-formatting methods --------------------------------------- - - def format_option_help(self, formatter): - if not self.option_list: - return "" - result = [] - for option in self.option_list: - if not option.help is SUPPRESS_HELP: - result.append(formatter.format_option(option)) - return "".join(result) - - def format_description(self, formatter): - return formatter.format_description(self.get_description()) - - def format_help(self, formatter): - result = [] - if self.description: - result.append(self.format_description(formatter)) - if self.option_list: - result.append(self.format_option_help(formatter)) - return "\n".join(result) - - -class OptionGroup (OptionContainer): - - def __init__(self, parser, title, description=None): - self.parser = parser - OptionContainer.__init__( - self, parser.option_class, parser.conflict_handler, description) - self.title = title - - def _create_option_list(self): - self.option_list = [] - self._share_option_mappings(self.parser) - - def set_title(self, title): - self.title = title - - def destroy(self): - """see OptionParser.destroy().""" - OptionContainer.destroy(self) - del self.option_list - - # -- Help-formatting methods --------------------------------------- - - def format_help(self, formatter): - result = formatter.format_heading(self.title) - formatter.indent() - result += OptionContainer.format_help(self, formatter) - formatter.dedent() - return result - - -class OptionParser (OptionContainer): - - """ - Class attributes: - standard_option_list : [Option] - list of standard options that will be accepted by all instances - of this parser class (intended to be overridden by subclasses). - - Instance attributes: - usage : string - a usage string for your program. Before it is displayed - to the user, "%prog" will be expanded to the name of - your program (self.prog or os.path.basename(sys.argv[0])). - prog : string - the name of the current program (to override - os.path.basename(sys.argv[0])). - description : string - A paragraph of text giving a brief overview of your program. - optparse reformats this paragraph to fit the current terminal - width and prints it when the user requests help (after usage, - but before the list of options). - epilog : string - paragraph of help text to print after option help - - option_groups : [OptionGroup] - list of option groups in this parser (option groups are - irrelevant for parsing the command-line, but very useful - for generating help) - - allow_interspersed_args : bool = true - if true, positional arguments may be interspersed with options. - Assuming -a and -b each take a single argument, the command-line - -ablah foo bar -bboo baz - will be interpreted the same as - -ablah -bboo -- foo bar baz - If this flag were false, that command line would be interpreted as - -ablah -- foo bar -bboo baz - -- ie. we stop processing options as soon as we see the first - non-option argument. (This is the tradition followed by - Python's getopt module, Perl's Getopt::Std, and other argument- - parsing libraries, but it is generally annoying to users.) - - process_default_values : bool = true - if true, option default values are processed similarly to option - values from the command line: that is, they are passed to the - type-checking function for the option's type (as long as the - default value is a string). (This really only matters if you - have defined custom types; see SF bug #955889.) Set it to false - to restore the behaviour of Optik 1.4.1 and earlier. - - rargs : [string] - the argument list currently being parsed. Only set when - parse_args() is active, and continually trimmed down as - we consume arguments. Mainly there for the benefit of - callback options. - largs : [string] - the list of leftover arguments that we have skipped while - parsing options. If allow_interspersed_args is false, this - list is always empty. - values : Values - the set of option values currently being accumulated. Only - set when parse_args() is active. Also mainly for callbacks. - - Because of the 'rargs', 'largs', and 'values' attributes, - OptionParser is not thread-safe. If, for some perverse reason, you - need to parse command-line arguments simultaneously in different - threads, use different OptionParser instances. - - """ - - standard_option_list = [] - - def __init__(self, - usage=None, - option_list=None, - option_class=Option, - version=None, - conflict_handler="error", - description=None, - formatter=None, - add_help_option=True, - prog=None, - epilog=None): - OptionContainer.__init__( - self, option_class, conflict_handler, description) - self.set_usage(usage) - self.prog = prog - self.version = version - self.allow_interspersed_args = True - self.process_default_values = True - if formatter is None: - formatter = IndentedHelpFormatter() - self.formatter = formatter - self.formatter.set_parser(self) - self.epilog = epilog - - # Populate the option list; initial sources are the - # standard_option_list class attribute, the 'option_list' - # argument, and (if applicable) the _add_version_option() and - # _add_help_option() methods. - self._populate_option_list(option_list, - add_help=add_help_option) - - self._init_parsing_state() - - - def destroy(self): - """ - Declare that you are done with this OptionParser. This cleans up - reference cycles so the OptionParser (and all objects referenced by - it) can be garbage-collected promptly. After calling destroy(), the - OptionParser is unusable. - """ - OptionContainer.destroy(self) - for group in self.option_groups: - group.destroy() - del self.option_list - del self.option_groups - del self.formatter - - - # -- Private methods ----------------------------------------------- - # (used by our or OptionContainer's constructor) - - def _create_option_list(self): - self.option_list = [] - self.option_groups = [] - self._create_option_mappings() - - def _add_help_option(self): - self.add_option("-h", "--help", - action="help", - help=_("show this help message and exit")) - - def _add_version_option(self): - self.add_option("--version", - action="version", - help=_("show program's version number and exit")) - - def _populate_option_list(self, option_list, add_help=True): - if self.standard_option_list: - self.add_options(self.standard_option_list) - if option_list: - self.add_options(option_list) - if self.version: - self._add_version_option() - if add_help: - self._add_help_option() - - def _init_parsing_state(self): - # These are set in parse_args() for the convenience of callbacks. - self.rargs = None - self.largs = None - self.values = None - - - # -- Simple modifier methods --------------------------------------- - - def set_usage(self, usage): - if usage is None: - self.usage = _("%prog [options]") - elif usage is SUPPRESS_USAGE: - self.usage = None - # For backwards compatibility with Optik 1.3 and earlier. - elif usage.lower().startswith("usage: "): - self.usage = usage[7:] - else: - self.usage = usage - - def enable_interspersed_args(self): - """Set parsing to not stop on the first non-option, allowing - interspersing switches with command arguments. This is the - default behavior. See also disable_interspersed_args() and the - class documentation description of the attribute - allow_interspersed_args.""" - self.allow_interspersed_args = True - - def disable_interspersed_args(self): - """Set parsing to stop on the first non-option. Use this if - you have a command processor which runs another command that - has options of its own and you want to make sure these options - don't get confused. - """ - self.allow_interspersed_args = False - - def set_process_default_values(self, process): - self.process_default_values = process - - def set_default(self, dest, value): - self.defaults[dest] = value - - def set_defaults(self, **kwargs): - self.defaults.update(kwargs) - - def _get_all_options(self): - options = self.option_list[:] - for group in self.option_groups: - options.extend(group.option_list) - return options - - def get_default_values(self): - if not self.process_default_values: - # Old, pre-Optik 1.5 behaviour. - return Values(self.defaults) - - defaults = self.defaults.copy() - for option in self._get_all_options(): - default = defaults.get(option.dest) - if isinstance(default, str): - opt_str = option.get_opt_string() - defaults[option.dest] = option.check_value(opt_str, default) - - return Values(defaults) - - - # -- OptionGroup methods ------------------------------------------- - - def add_option_group(self, *args, **kwargs): - # XXX lots of overlap with OptionContainer.add_option() - if isinstance(args[0], str): - group = OptionGroup(self, *args, **kwargs) - elif len(args) == 1 and not kwargs: - group = args[0] - if not isinstance(group, OptionGroup): - raise TypeError("not an OptionGroup instance: %r" % group) - if group.parser is not self: - raise ValueError("invalid OptionGroup (wrong parser)") - else: - raise TypeError("invalid arguments") - - self.option_groups.append(group) - return group - - def get_option_group(self, opt_str): - option = (self._short_opt.get(opt_str) or - self._long_opt.get(opt_str)) - if option and option.container is not self: - return option.container - return None - - - # -- Option-parsing methods ---------------------------------------- - - def _get_args(self, args): - if args is None: - return sys.argv[1:] - else: - return args[:] # don't modify caller's list - - def parse_args(self, args=None, values=None): - """ - parse_args(args : [string] = sys.argv[1:], - values : Values = None) - -> (values : Values, args : [string]) - - Parse the command-line options found in 'args' (default: - sys.argv[1:]). Any errors result in a call to 'error()', which - by default prints the usage message to stderr and calls - sys.exit() with an error message. On success returns a pair - (values, args) where 'values' is a Values instance (with all - your option values) and 'args' is the list of arguments left - over after parsing options. - """ - rargs = self._get_args(args) - if values is None: - values = self.get_default_values() - - # Store the halves of the argument list as attributes for the - # convenience of callbacks: - # rargs - # the rest of the command-line (the "r" stands for - # "remaining" or "right-hand") - # largs - # the leftover arguments -- ie. what's left after removing - # options and their arguments (the "l" stands for "leftover" - # or "left-hand") - self.rargs = rargs - self.largs = largs = [] - self.values = values - - try: - stop = self._process_args(largs, rargs, values) - except (BadOptionError, OptionValueError) as err: - self.error(str(err)) - - args = largs + rargs - return self.check_values(values, args) - - def check_values(self, values, args): - """ - check_values(values : Values, args : [string]) - -> (values : Values, args : [string]) - - Check that the supplied option values and leftover arguments are - valid. Returns the option values and leftover arguments - (possibly adjusted, possibly completely new -- whatever you - like). Default implementation just returns the passed-in - values; subclasses may override as desired. - """ - return (values, args) - - def _process_args(self, largs, rargs, values): - """_process_args(largs : [string], - rargs : [string], - values : Values) - - Process command-line arguments and populate 'values', consuming - options and arguments from 'rargs'. If 'allow_interspersed_args' is - false, stop at the first non-option argument. If true, accumulate any - interspersed non-option arguments in 'largs'. - """ - while rargs: - arg = rargs[0] - # We handle bare "--" explicitly, and bare "-" is handled by the - # standard arg handler since the short arg case ensures that the - # len of the opt string is greater than 1. - if arg == "--": - del rargs[0] - return - elif arg[0:2] == "--": - # process a single long option (possibly with value(s)) - self._process_long_opt(rargs, values) - elif arg[:1] == "-" and len(arg) > 1: - # process a cluster of short options (possibly with - # value(s) for the last one only) - self._process_short_opts(rargs, values) - elif self.allow_interspersed_args: - largs.append(arg) - del rargs[0] - else: - return # stop now, leave this arg in rargs - - # Say this is the original argument list: - # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] - # ^ - # (we are about to process arg(i)). - # - # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of - # [arg0, ..., arg(i-1)] (any options and their arguments will have - # been removed from largs). - # - # The while loop will usually consume 1 or more arguments per pass. - # If it consumes 1 (eg. arg is an option that takes no arguments), - # then after _process_arg() is done the situation is: - # - # largs = subset of [arg0, ..., arg(i)] - # rargs = [arg(i+1), ..., arg(N-1)] - # - # If allow_interspersed_args is false, largs will always be - # *empty* -- still a subset of [arg0, ..., arg(i-1)], but - # not a very interesting subset! - - def _match_long_opt(self, opt): - """_match_long_opt(opt : string) -> string - - Determine which long option string 'opt' matches, ie. which one - it is an unambiguous abbreviation for. Raises BadOptionError if - 'opt' doesn't unambiguously match any long option string. - """ - return _match_abbrev(opt, self._long_opt) - - def _process_long_opt(self, rargs, values): - arg = rargs.pop(0) - - # Value explicitly attached to arg? Pretend it's the next - # argument. - if "=" in arg: - (opt, next_arg) = arg.split("=", 1) - rargs.insert(0, next_arg) - had_explicit_value = True - else: - opt = arg - had_explicit_value = False - - opt = self._match_long_opt(opt) - option = self._long_opt[opt] - if option.takes_value(): - nargs = option.nargs - if len(rargs) < nargs: - self.error(ngettext( - "%(option)s option requires %(number)d argument", - "%(option)s option requires %(number)d arguments", - nargs) % {"option": opt, "number": nargs}) - elif nargs == 1: - value = rargs.pop(0) - else: - value = tuple(rargs[0:nargs]) - del rargs[0:nargs] - - elif had_explicit_value: - self.error(_("%s option does not take a value") % opt) - - else: - value = None - - option.process(opt, value, values, self) - - def _process_short_opts(self, rargs, values): - arg = rargs.pop(0) - stop = False - i = 1 - for ch in arg[1:]: - opt = "-" + ch - option = self._short_opt.get(opt) - i += 1 # we have consumed a character - - if not option: - raise BadOptionError(opt) - if option.takes_value(): - # Any characters left in arg? Pretend they're the - # next arg, and stop consuming characters of arg. - if i < len(arg): - rargs.insert(0, arg[i:]) - stop = True - - nargs = option.nargs - if len(rargs) < nargs: - self.error(ngettext( - "%(option)s option requires %(number)d argument", - "%(option)s option requires %(number)d arguments", - nargs) % {"option": opt, "number": nargs}) - elif nargs == 1: - value = rargs.pop(0) - else: - value = tuple(rargs[0:nargs]) - del rargs[0:nargs] - - else: # option doesn't take a value - value = None - - option.process(opt, value, values, self) - - if stop: - break - - - # -- Feedback methods ---------------------------------------------- - - def get_prog_name(self): - if self.prog is None: - return os.path.basename(sys.argv[0]) - else: - return self.prog - - def expand_prog_name(self, s): - return s.replace("%prog", self.get_prog_name()) - - def get_description(self): - return self.expand_prog_name(self.description) - - def exit(self, status=0, msg=None): - if msg: - sys.stderr.write(msg) - sys.exit(status) - - def error(self, msg): - """error(msg : string) - - Print a usage message incorporating 'msg' to stderr and exit. - If you override this in a subclass, it should not return -- it - should either exit or raise an exception. - """ - self.print_usage(sys.stderr) - self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg)) - - def get_usage(self): - if self.usage: - return self.formatter.format_usage( - self.expand_prog_name(self.usage)) - else: - return "" - - def print_usage(self, file=None): - """print_usage(file : file = stdout) - - Print the usage message for the current program (self.usage) to - 'file' (default stdout). Any occurrence of the string "%prog" in - self.usage is replaced with the name of the current program - (basename of sys.argv[0]). Does nothing if self.usage is empty - or not defined. - """ - if self.usage: - print(self.get_usage(), file=file) - - def get_version(self): - if self.version: - return self.expand_prog_name(self.version) - else: - return "" - - def print_version(self, file=None): - """print_version(file : file = stdout) - - Print the version message for this program (self.version) to - 'file' (default stdout). As with print_usage(), any occurrence - of "%prog" in self.version is replaced by the current program's - name. Does nothing if self.version is empty or undefined. - """ - if self.version: - print(self.get_version(), file=file) - - def format_option_help(self, formatter=None): - if formatter is None: - formatter = self.formatter - formatter.store_option_strings(self) - result = [] - result.append(formatter.format_heading(_("Options"))) - formatter.indent() - if self.option_list: - result.append(OptionContainer.format_option_help(self, formatter)) - result.append("\n") - for group in self.option_groups: - result.append(group.format_help(formatter)) - result.append("\n") - formatter.dedent() - # Drop the last "\n", or the header if no options or option groups: - return "".join(result[:-1]) - - def format_epilog(self, formatter): - return formatter.format_epilog(self.epilog) - - def format_help(self, formatter=None): - if formatter is None: - formatter = self.formatter - result = [] - if self.usage: - result.append(self.get_usage() + "\n") - if self.description: - result.append(self.format_description(formatter) + "\n") - result.append(self.format_option_help(formatter)) - result.append(self.format_epilog(formatter)) - return "".join(result) - - def print_help(self, file=None): - """print_help(file : file = stdout) - - Print an extended help message, listing all options and any - help text provided with them, to 'file' (default stdout). - """ - if file is None: - file = sys.stdout - file.write(self.format_help()) - -# class OptionParser - - -def _match_abbrev(s, wordmap): - """_match_abbrev(s : string, wordmap : {string : Option}) -> string - - Return the string key in 'wordmap' for which 's' is an unambiguous - abbreviation. If 's' is found to be ambiguous or doesn't match any of - 'words', raise BadOptionError. - """ - # Is there an exact match? - if s in wordmap: - return s - else: - # Isolate all words with s as a prefix. - possibilities = [word for word in wordmap.keys() - if word.startswith(s)] - # No exact match, so there had better be just one possibility. - if len(possibilities) == 1: - return possibilities[0] - elif not possibilities: - raise BadOptionError(s) - else: - # More than one possible completion: ambiguous prefix. - possibilities.sort() - raise AmbiguousOptionError(s, possibilities) - - -# Some day, there might be many Option classes. As of Optik 1.3, the -# preferred way to instantiate Options is indirectly, via make_option(), -# which will become a factory function when there are many Option -# classes. -make_option = Option diff --git a/Python313_13_x64_Template/Lib/os.py b/Python313_13_x64_Template/Lib/os.py deleted file mode 100644 index 1b1645f4..00000000 --- a/Python313_13_x64_Template/Lib/os.py +++ /dev/null @@ -1,1184 +0,0 @@ -r"""OS routines for NT or Posix depending on what system we're on. - -This exports: - - all functions from posix or nt, e.g. unlink, stat, etc. - - os.path is either posixpath or ntpath - - os.name is either 'posix' or 'nt' - - os.curdir is a string representing the current directory (always '.') - - os.pardir is a string representing the parent directory (always '..') - - os.sep is the (or a most common) pathname separator ('/' or '\\') - - os.extsep is the extension separator (always '.') - - os.altsep is the alternate pathname separator (None or '/') - - os.pathsep is the component separator used in $PATH etc - - os.linesep is the line separator in text files ('\n' or '\r\n') - - os.defpath is the default search path for executables - - os.devnull is the file path of the null device ('/dev/null', etc.) - -Programs that import and use 'os' stand a better chance of being -portable between different platforms. Of course, they must then -only use functions that are defined by all platforms (e.g., unlink -and opendir), and leave all pathname manipulation to os.path -(e.g., split and join). -""" - -#' -import abc -import sys -import stat as st - -from _collections_abc import _check_methods - -GenericAlias = type(list[int]) - -_names = sys.builtin_module_names - -# Note: more names are added to __all__ later. -__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep", - "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR", - "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen", - "extsep"] - -def _exists(name): - return name in globals() - -def _get_exports_list(module): - try: - return list(module.__all__) - except AttributeError: - return [n for n in dir(module) if n[0] != '_'] - -# Any new dependencies of the os module and/or changes in path separator -# requires updating importlib as well. -if 'posix' in _names: - name = 'posix' - linesep = '\n' - from posix import * - try: - from posix import _exit - __all__.append('_exit') - except ImportError: - pass - import posixpath as path - - try: - from posix import _have_functions - except ImportError: - pass - - import posix - __all__.extend(_get_exports_list(posix)) - del posix - -elif 'nt' in _names: - name = 'nt' - linesep = '\r\n' - from nt import * - try: - from nt import _exit - __all__.append('_exit') - except ImportError: - pass - import ntpath as path - - import nt - __all__.extend(_get_exports_list(nt)) - del nt - - try: - from nt import _have_functions - except ImportError: - pass - -else: - raise ImportError('no os specific module found') - -sys.modules['os.path'] = path -from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep, - devnull) - -del _names - - -if _exists("_have_functions"): - _globals = globals() - def _add(str, fn): - if (fn in _globals) and (str in _have_functions): - _set.add(_globals[fn]) - - _set = set() - _add("HAVE_FACCESSAT", "access") - _add("HAVE_FCHMODAT", "chmod") - _add("HAVE_FCHOWNAT", "chown") - _add("HAVE_FSTATAT", "stat") - _add("HAVE_LSTAT", "lstat") - _add("HAVE_FUTIMESAT", "utime") - _add("HAVE_LINKAT", "link") - _add("HAVE_MKDIRAT", "mkdir") - _add("HAVE_MKFIFOAT", "mkfifo") - _add("HAVE_MKNODAT", "mknod") - _add("HAVE_OPENAT", "open") - _add("HAVE_READLINKAT", "readlink") - _add("HAVE_RENAMEAT", "rename") - _add("HAVE_SYMLINKAT", "symlink") - _add("HAVE_UNLINKAT", "unlink") - _add("HAVE_UNLINKAT", "rmdir") - _add("HAVE_UTIMENSAT", "utime") - supports_dir_fd = _set - - _set = set() - _add("HAVE_FACCESSAT", "access") - supports_effective_ids = _set - - _set = set() - _add("HAVE_FCHDIR", "chdir") - _add("HAVE_FCHMOD", "chmod") - _add("MS_WINDOWS", "chmod") - _add("HAVE_FCHOWN", "chown") - _add("HAVE_FDOPENDIR", "listdir") - _add("HAVE_FDOPENDIR", "scandir") - _add("HAVE_FEXECVE", "execve") - _set.add(stat) # fstat always works - _add("HAVE_FTRUNCATE", "truncate") - _add("HAVE_FUTIMENS", "utime") - _add("HAVE_FUTIMES", "utime") - _add("HAVE_FPATHCONF", "pathconf") - if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3 - _add("HAVE_FSTATVFS", "statvfs") - supports_fd = _set - - _set = set() - _add("HAVE_FACCESSAT", "access") - # Some platforms don't support lchmod(). Often the function exists - # anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP. - # (No, I don't know why that's a good design.) ./configure will detect - # this and reject it--so HAVE_LCHMOD still won't be defined on such - # platforms. This is Very Helpful. - # - # However, sometimes platforms without a working lchmod() *do* have - # fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15, - # OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes - # it behave like lchmod(). So in theory it would be a suitable - # replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s - # flag doesn't work *either*. Sadly ./configure isn't sophisticated - # enough to detect this condition--it only determines whether or not - # fchmodat() minimally works. - # - # Therefore we simply ignore fchmodat() when deciding whether or not - # os.chmod supports follow_symlinks. Just checking lchmod() is - # sufficient. After all--if you have a working fchmodat(), your - # lchmod() almost certainly works too. - # - # _add("HAVE_FCHMODAT", "chmod") - _add("HAVE_FCHOWNAT", "chown") - _add("HAVE_FSTATAT", "stat") - _add("HAVE_LCHFLAGS", "chflags") - _add("HAVE_LCHMOD", "chmod") - _add("MS_WINDOWS", "chmod") - if _exists("lchown"): # mac os x10.3 - _add("HAVE_LCHOWN", "chown") - _add("HAVE_LINKAT", "link") - _add("HAVE_LUTIMES", "utime") - _add("HAVE_LSTAT", "stat") - _add("HAVE_FSTATAT", "stat") - _add("HAVE_UTIMENSAT", "utime") - _add("MS_WINDOWS", "stat") - supports_follow_symlinks = _set - - del _set - del _have_functions - del _globals - del _add - - -# Python uses fixed values for the SEEK_ constants; they are mapped -# to native constants if necessary in posixmodule.c -# Other possible SEEK values are directly imported from posixmodule.c -SEEK_SET = 0 -SEEK_CUR = 1 -SEEK_END = 2 - -# Super directory utilities. -# (Inspired by Eric Raymond; the doc strings are mostly his) - -def makedirs(name, mode=0o777, exist_ok=False): - """makedirs(name [, mode=0o777][, exist_ok=False]) - - Super-mkdir; create a leaf directory and all intermediate ones. Works like - mkdir, except that any intermediate path segment (not just the rightmost) - will be created if it does not exist. If the target directory already - exists, raise an OSError if exist_ok is False. Otherwise no exception is - raised. This is recursive. - - """ - head, tail = path.split(name) - if not tail: - head, tail = path.split(head) - if head and tail and not path.exists(head): - try: - makedirs(head, exist_ok=exist_ok) - except FileExistsError: - # Defeats race condition when another thread created the path - pass - cdir = curdir - if isinstance(tail, bytes): - cdir = bytes(curdir, 'ASCII') - if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists - return - try: - mkdir(name, mode) - except OSError: - # Cannot rely on checking for EEXIST, since the operating system - # could give priority to other errors like EACCES or EROFS - if not exist_ok or not path.isdir(name): - raise - -def removedirs(name): - """removedirs(name) - - Super-rmdir; remove a leaf directory and all empty intermediate - ones. Works like rmdir except that, if the leaf directory is - successfully removed, directories corresponding to rightmost path - segments will be pruned away until either the whole path is - consumed or an error occurs. Errors during this latter phase are - ignored -- they generally mean that a directory was not empty. - - """ - rmdir(name) - head, tail = path.split(name) - if not tail: - head, tail = path.split(head) - while head and tail: - try: - rmdir(head) - except OSError: - break - head, tail = path.split(head) - -def renames(old, new): - """renames(old, new) - - Super-rename; create directories as necessary and delete any left - empty. Works like rename, except creation of any intermediate - directories needed to make the new pathname good is attempted - first. After the rename, directories corresponding to rightmost - path segments of the old name will be pruned until either the - whole path is consumed or a nonempty directory is found. - - Note: this function can fail with the new directory structure made - if you lack permissions needed to unlink the leaf directory or - file. - - """ - head, tail = path.split(new) - if head and tail and not path.exists(head): - makedirs(head) - rename(old, new) - head, tail = path.split(old) - if head and tail: - try: - removedirs(head) - except OSError: - pass - -__all__.extend(["makedirs", "removedirs", "renames"]) - -# Private sentinel that makes walk() classify all symlinks and junctions as -# regular files. -_walk_symlinks_as_files = object() - -def walk(top, topdown=True, onerror=None, followlinks=False): - """Directory tree generator. - - For each directory in the directory tree rooted at top (including top - itself, but excluding '.' and '..'), yields a 3-tuple - - dirpath, dirnames, filenames - - dirpath is a string, the path to the directory. dirnames is a list of - the names of the subdirectories in dirpath (including symlinks to directories, - and excluding '.' and '..'). - filenames is a list of the names of the non-directory files in dirpath. - Note that the names in the lists are just names, with no path components. - To get a full path (which begins with top) to a file or directory in - dirpath, do os.path.join(dirpath, name). - - If optional arg 'topdown' is true or not specified, the triple for a - directory is generated before the triples for any of its subdirectories - (directories are generated top down). If topdown is false, the triple - for a directory is generated after the triples for all of its - subdirectories (directories are generated bottom up). - - When topdown is true, the caller can modify the dirnames list in-place - (e.g., via del or slice assignment), and walk will only recurse into the - subdirectories whose names remain in dirnames; this can be used to prune the - search, or to impose a specific order of visiting. Modifying dirnames when - topdown is false has no effect on the behavior of os.walk(), since the - directories in dirnames have already been generated by the time dirnames - itself is generated. No matter the value of topdown, the list of - subdirectories is retrieved before the tuples for the directory and its - subdirectories are generated. - - By default errors from the os.scandir() call are ignored. If - optional arg 'onerror' is specified, it should be a function; it - will be called with one argument, an OSError instance. It can - report the error to continue with the walk, or raise the exception - to abort the walk. Note that the filename is available as the - filename attribute of the exception object. - - By default, os.walk does not follow symbolic links to subdirectories on - systems that support them. In order to get this functionality, set the - optional argument 'followlinks' to true. - - Caution: if you pass a relative pathname for top, don't change the - current working directory between resumptions of walk. walk never - changes the current directory, and assumes that the client doesn't - either. - - Example: - - import os - from os.path import join, getsize - for root, dirs, files in os.walk('python/Lib/xml'): - print(root, "consumes ") - print(sum(getsize(join(root, name)) for name in files), end=" ") - print("bytes in", len(files), "non-directory files") - if '__pycache__' in dirs: - dirs.remove('__pycache__') # don't visit __pycache__ directories - - """ - sys.audit("os.walk", top, topdown, onerror, followlinks) - - stack = [fspath(top)] - islink, join = path.islink, path.join - while stack: - top = stack.pop() - if isinstance(top, tuple): - yield top - continue - - dirs = [] - nondirs = [] - walk_dirs = [] - - # We may not have read permission for top, in which case we can't - # get a list of the files the directory contains. - # We suppress the exception here, rather than blow up for a - # minor reason when (say) a thousand readable directories are still - # left to visit. - try: - scandir_it = scandir(top) - except OSError as error: - if onerror is not None: - onerror(error) - continue - - cont = False - with scandir_it: - while True: - try: - try: - entry = next(scandir_it) - except StopIteration: - break - except OSError as error: - if onerror is not None: - onerror(error) - cont = True - break - - try: - if followlinks is _walk_symlinks_as_files: - is_dir = entry.is_dir(follow_symlinks=False) and not entry.is_junction() - else: - is_dir = entry.is_dir() - except OSError: - # If is_dir() raises an OSError, consider the entry not to - # be a directory, same behaviour as os.path.isdir(). - is_dir = False - - if is_dir: - dirs.append(entry.name) - else: - nondirs.append(entry.name) - - if not topdown and is_dir: - # Bottom-up: traverse into sub-directory, but exclude - # symlinks to directories if followlinks is False - if followlinks: - walk_into = True - else: - try: - is_symlink = entry.is_symlink() - except OSError: - # If is_symlink() raises an OSError, consider the - # entry not to be a symbolic link, same behaviour - # as os.path.islink(). - is_symlink = False - walk_into = not is_symlink - - if walk_into: - walk_dirs.append(entry.path) - if cont: - continue - - if topdown: - # Yield before sub-directory traversal if going top down - yield top, dirs, nondirs - # Traverse into sub-directories - for dirname in reversed(dirs): - new_path = join(top, dirname) - # bpo-23605: os.path.islink() is used instead of caching - # entry.is_symlink() result during the loop on os.scandir() because - # the caller can replace the directory entry during the "yield" - # above. - if followlinks or not islink(new_path): - stack.append(new_path) - else: - # Yield after sub-directory traversal if going bottom up - stack.append((top, dirs, nondirs)) - # Traverse into sub-directories - for new_path in reversed(walk_dirs): - stack.append(new_path) - -__all__.append("walk") - -if {open, stat} <= supports_dir_fd and {scandir, stat} <= supports_fd: - - def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None): - """Directory tree generator. - - This behaves exactly like walk(), except that it yields a 4-tuple - - dirpath, dirnames, filenames, dirfd - - `dirpath`, `dirnames` and `filenames` are identical to walk() output, - and `dirfd` is a file descriptor referring to the directory `dirpath`. - - The advantage of fwalk() over walk() is that it's safe against symlink - races (when follow_symlinks is False). - - If dir_fd is not None, it should be a file descriptor open to a directory, - and top should be relative; top will then be relative to that directory. - (dir_fd is always supported for fwalk.) - - Caution: - Since fwalk() yields file descriptors, those are only valid until the - next iteration step, so you should dup() them if you want to keep them - for a longer period. - - Example: - - import os - for root, dirs, files, rootfd in os.fwalk('python/Lib/xml'): - print(root, "consumes", end="") - print(sum(os.stat(name, dir_fd=rootfd).st_size for name in files), - end="") - print("bytes in", len(files), "non-directory files") - if '__pycache__' in dirs: - dirs.remove('__pycache__') # don't visit __pycache__ directories - """ - sys.audit("os.fwalk", top, topdown, onerror, follow_symlinks, dir_fd) - top = fspath(top) - stack = [(_fwalk_walk, (True, dir_fd, top, top, None))] - isbytes = isinstance(top, bytes) - try: - while stack: - yield from _fwalk(stack, isbytes, topdown, onerror, follow_symlinks) - finally: - # Close any file descriptors still on the stack. - while stack: - action, value = stack.pop() - if action == _fwalk_close: - close(value) - - # Each item in the _fwalk() stack is a pair (action, args). - _fwalk_walk = 0 # args: (isroot, dirfd, toppath, topname, entry) - _fwalk_yield = 1 # args: (toppath, dirnames, filenames, topfd) - _fwalk_close = 2 # args: dirfd - - def _fwalk(stack, isbytes, topdown, onerror, follow_symlinks): - # Note: This uses O(depth of the directory tree) file descriptors: if - # necessary, it can be adapted to only require O(1) FDs, see issue - # #13734. - - action, value = stack.pop() - if action == _fwalk_close: - close(value) - return - elif action == _fwalk_yield: - yield value - return - assert action == _fwalk_walk - isroot, dirfd, toppath, topname, entry = value - try: - if not follow_symlinks: - # Note: To guard against symlink races, we use the standard - # lstat()/open()/fstat() trick. - if entry is None: - orig_st = stat(topname, follow_symlinks=False, dir_fd=dirfd) - else: - orig_st = entry.stat(follow_symlinks=False) - topfd = open(topname, O_RDONLY | O_NONBLOCK, dir_fd=dirfd) - except OSError as err: - if isroot: - raise - if onerror is not None: - onerror(err) - return - stack.append((_fwalk_close, topfd)) - if not follow_symlinks: - if isroot and not st.S_ISDIR(orig_st.st_mode): - return - if not path.samestat(orig_st, stat(topfd)): - return - - scandir_it = scandir(topfd) - dirs = [] - nondirs = [] - entries = None if topdown or follow_symlinks else [] - for entry in scandir_it: - name = entry.name - if isbytes: - name = fsencode(name) - try: - if entry.is_dir(): - dirs.append(name) - if entries is not None: - entries.append(entry) - else: - nondirs.append(name) - except OSError: - try: - # Add dangling symlinks, ignore disappeared files - if entry.is_symlink(): - nondirs.append(name) - except OSError: - pass - - if topdown: - yield toppath, dirs, nondirs, topfd - else: - stack.append((_fwalk_yield, (toppath, dirs, nondirs, topfd))) - - toppath = path.join(toppath, toppath[:0]) # Add trailing slash. - if entries is None: - stack.extend( - (_fwalk_walk, (False, topfd, toppath + name, name, None)) - for name in dirs[::-1]) - else: - stack.extend( - (_fwalk_walk, (False, topfd, toppath + name, name, entry)) - for name, entry in zip(dirs[::-1], entries[::-1])) - - __all__.append("fwalk") - -def execl(file, *args): - """execl(file, *args) - - Execute the executable file with argument list args, replacing the - current process. """ - execv(file, args) - -def execle(file, *args): - """execle(file, *args, env) - - Execute the executable file with argument list args and - environment env, replacing the current process. """ - env = args[-1] - execve(file, args[:-1], env) - -def execlp(file, *args): - """execlp(file, *args) - - Execute the executable file (which is searched for along $PATH) - with argument list args, replacing the current process. """ - execvp(file, args) - -def execlpe(file, *args): - """execlpe(file, *args, env) - - Execute the executable file (which is searched for along $PATH) - with argument list args and environment env, replacing the current - process. """ - env = args[-1] - execvpe(file, args[:-1], env) - -def execvp(file, args): - """execvp(file, args) - - Execute the executable file (which is searched for along $PATH) - with argument list args, replacing the current process. - args may be a list or tuple of strings. """ - _execvpe(file, args) - -def execvpe(file, args, env): - """execvpe(file, args, env) - - Execute the executable file (which is searched for along $PATH) - with argument list args and environment env, replacing the - current process. - args may be a list or tuple of strings. """ - _execvpe(file, args, env) - -__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"]) - -def _execvpe(file, args, env=None): - if env is not None: - exec_func = execve - argrest = (args, env) - else: - exec_func = execv - argrest = (args,) - env = environ - - if path.dirname(file): - exec_func(file, *argrest) - return - saved_exc = None - path_list = get_exec_path(env) - if name != 'nt': - file = fsencode(file) - path_list = map(fsencode, path_list) - for dir in path_list: - fullname = path.join(dir, file) - try: - exec_func(fullname, *argrest) - except (FileNotFoundError, NotADirectoryError) as e: - last_exc = e - except OSError as e: - last_exc = e - if saved_exc is None: - saved_exc = e - if saved_exc is not None: - raise saved_exc - raise last_exc - - -def get_exec_path(env=None): - """Returns the sequence of directories that will be searched for the - named executable (similar to a shell) when launching a process. - - *env* must be an environment variable dict or None. If *env* is None, - os.environ will be used. - """ - # Use a local import instead of a global import to limit the number of - # modules loaded at startup: the os module is always loaded at startup by - # Python. It may also avoid a bootstrap issue. - import warnings - - if env is None: - env = environ - - # {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a - # BytesWarning when using python -b or python -bb: ignore the warning - with warnings.catch_warnings(): - warnings.simplefilter("ignore", BytesWarning) - - try: - path_list = env.get('PATH') - except TypeError: - path_list = None - - if supports_bytes_environ: - try: - path_listb = env[b'PATH'] - except (KeyError, TypeError): - pass - else: - if path_list is not None: - raise ValueError( - "env cannot contain 'PATH' and b'PATH' keys") - path_list = path_listb - - if path_list is not None and isinstance(path_list, bytes): - path_list = fsdecode(path_list) - - if path_list is None: - path_list = defpath - return path_list.split(pathsep) - - -# Change environ to automatically call putenv() and unsetenv() -from _collections_abc import MutableMapping, Mapping - -class _Environ(MutableMapping): - def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue): - self.encodekey = encodekey - self.decodekey = decodekey - self.encodevalue = encodevalue - self.decodevalue = decodevalue - self._data = data - - def __getitem__(self, key): - try: - value = self._data[self.encodekey(key)] - except KeyError: - # raise KeyError with the original key value - raise KeyError(key) from None - return self.decodevalue(value) - - def __setitem__(self, key, value): - key = self.encodekey(key) - value = self.encodevalue(value) - putenv(key, value) - self._data[key] = value - - def __delitem__(self, key): - encodedkey = self.encodekey(key) - unsetenv(encodedkey) - try: - del self._data[encodedkey] - except KeyError: - # raise KeyError with the original key value - raise KeyError(key) from None - - def __iter__(self): - # list() from dict object is an atomic operation - keys = list(self._data) - for key in keys: - yield self.decodekey(key) - - def __len__(self): - return len(self._data) - - def __repr__(self): - formatted_items = ", ".join( - f"{self.decodekey(key)!r}: {self.decodevalue(value)!r}" - for key, value in self._data.items() - ) - return f"environ({{{formatted_items}}})" - - def copy(self): - return dict(self) - - def setdefault(self, key, value): - if key not in self: - self[key] = value - return self[key] - - def __ior__(self, other): - self.update(other) - return self - - def __or__(self, other): - if not isinstance(other, Mapping): - return NotImplemented - new = dict(self) - new.update(other) - return new - - def __ror__(self, other): - if not isinstance(other, Mapping): - return NotImplemented - new = dict(other) - new.update(self) - return new - -def _createenviron(): - if name == 'nt': - # Where Env Var Names Must Be UPPERCASE - def check_str(value): - if not isinstance(value, str): - raise TypeError("str expected, not %s" % type(value).__name__) - return value - encode = check_str - decode = str - def encodekey(key): - return encode(key).upper() - data = {} - for key, value in environ.items(): - data[encodekey(key)] = value - else: - # Where Env Var Names Can Be Mixed Case - encoding = sys.getfilesystemencoding() - def encode(value): - if not isinstance(value, str): - raise TypeError("str expected, not %s" % type(value).__name__) - return value.encode(encoding, 'surrogateescape') - def decode(value): - return value.decode(encoding, 'surrogateescape') - encodekey = encode - data = environ - return _Environ(data, - encodekey, decode, - encode, decode) - -# unicode environ -environ = _createenviron() -del _createenviron - - -def getenv(key, default=None): - """Get an environment variable, return None if it doesn't exist. - The optional second argument can specify an alternate default. - key, default and the result are str.""" - return environ.get(key, default) - -supports_bytes_environ = (name != 'nt') -__all__.extend(("getenv", "supports_bytes_environ")) - -if supports_bytes_environ: - def _check_bytes(value): - if not isinstance(value, bytes): - raise TypeError("bytes expected, not %s" % type(value).__name__) - return value - - # bytes environ - environb = _Environ(environ._data, - _check_bytes, bytes, - _check_bytes, bytes) - del _check_bytes - - def getenvb(key, default=None): - """Get an environment variable, return None if it doesn't exist. - The optional second argument can specify an alternate default. - key, default and the result are bytes.""" - return environb.get(key, default) - - __all__.extend(("environb", "getenvb")) - -def _fscodec(): - encoding = sys.getfilesystemencoding() - errors = sys.getfilesystemencodeerrors() - - def fsencode(filename): - """Encode filename (an os.PathLike, bytes, or str) to the filesystem - encoding with 'surrogateescape' error handler, return bytes unchanged. - On Windows, use 'strict' error handler if the file system encoding is - 'mbcs' (which is the default encoding). - """ - filename = fspath(filename) # Does type-checking of `filename`. - if isinstance(filename, str): - return filename.encode(encoding, errors) - else: - return filename - - def fsdecode(filename): - """Decode filename (an os.PathLike, bytes, or str) from the filesystem - encoding with 'surrogateescape' error handler, return str unchanged. On - Windows, use 'strict' error handler if the file system encoding is - 'mbcs' (which is the default encoding). - """ - filename = fspath(filename) # Does type-checking of `filename`. - if isinstance(filename, bytes): - return filename.decode(encoding, errors) - else: - return filename - - return fsencode, fsdecode - -fsencode, fsdecode = _fscodec() -del _fscodec - -# Supply spawn*() (probably only for Unix) -if _exists("fork") and not _exists("spawnv") and _exists("execv"): - - P_WAIT = 0 - P_NOWAIT = P_NOWAITO = 1 - - __all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"]) - - # XXX Should we support P_DETACH? I suppose it could fork()**2 - # and close the std I/O streams. Also, P_OVERLAY is the same - # as execv*()? - - def _spawnvef(mode, file, args, env, func): - # Internal helper; func is the exec*() function to use - if not isinstance(args, (tuple, list)): - raise TypeError('argv must be a tuple or a list') - if not args or not args[0]: - raise ValueError('argv first element cannot be empty') - pid = fork() - if not pid: - # Child - try: - if env is None: - func(file, args) - else: - func(file, args, env) - except: - _exit(127) - else: - # Parent - if mode == P_NOWAIT: - return pid # Caller is responsible for waiting! - while 1: - wpid, sts = waitpid(pid, 0) - if WIFSTOPPED(sts): - continue - - return waitstatus_to_exitcode(sts) - - def spawnv(mode, file, args): - """spawnv(mode, file, args) -> integer - -Execute file with arguments from args in a subprocess. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return _spawnvef(mode, file, args, None, execv) - - def spawnve(mode, file, args, env): - """spawnve(mode, file, args, env) -> integer - -Execute file with arguments from args in a subprocess with the -specified environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return _spawnvef(mode, file, args, env, execve) - - # Note: spawnvp[e] isn't currently supported on Windows - - def spawnvp(mode, file, args): - """spawnvp(mode, file, args) -> integer - -Execute file (which is looked for along $PATH) with arguments from -args in a subprocess. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return _spawnvef(mode, file, args, None, execvp) - - def spawnvpe(mode, file, args, env): - """spawnvpe(mode, file, args, env) -> integer - -Execute file (which is looked for along $PATH) with arguments from -args in a subprocess with the supplied environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return _spawnvef(mode, file, args, env, execvpe) - - - __all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"]) - - -if _exists("spawnv"): - # These aren't supplied by the basic Windows code - # but can be easily implemented in Python - - def spawnl(mode, file, *args): - """spawnl(mode, file, *args) -> integer - -Execute file with arguments from args in a subprocess. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return spawnv(mode, file, args) - - def spawnle(mode, file, *args): - """spawnle(mode, file, *args, env) -> integer - -Execute file with arguments from args in a subprocess with the -supplied environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - env = args[-1] - return spawnve(mode, file, args[:-1], env) - - - __all__.extend(["spawnl", "spawnle"]) - - -if _exists("spawnvp"): - # At the moment, Windows doesn't implement spawnvp[e], - # so it won't have spawnlp[e] either. - def spawnlp(mode, file, *args): - """spawnlp(mode, file, *args) -> integer - -Execute file (which is looked for along $PATH) with arguments from -args in a subprocess with the supplied environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return spawnvp(mode, file, args) - - def spawnlpe(mode, file, *args): - """spawnlpe(mode, file, *args, env) -> integer - -Execute file (which is looked for along $PATH) with arguments from -args in a subprocess with the supplied environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - env = args[-1] - return spawnvpe(mode, file, args[:-1], env) - - - __all__.extend(["spawnlp", "spawnlpe"]) - -# VxWorks has no user space shell provided. As a result, running -# command in a shell can't be supported. -if sys.platform != 'vxworks': - # Supply os.popen() - def popen(cmd, mode="r", buffering=-1): - if not isinstance(cmd, str): - raise TypeError("invalid cmd type (%s, expected string)" % type(cmd)) - if mode not in ("r", "w"): - raise ValueError("invalid mode %r" % mode) - if buffering == 0 or buffering is None: - raise ValueError("popen() does not support unbuffered streams") - import subprocess - if mode == "r": - proc = subprocess.Popen(cmd, - shell=True, text=True, - stdout=subprocess.PIPE, - bufsize=buffering) - return _wrap_close(proc.stdout, proc) - else: - proc = subprocess.Popen(cmd, - shell=True, text=True, - stdin=subprocess.PIPE, - bufsize=buffering) - return _wrap_close(proc.stdin, proc) - - # Helper for popen() -- a proxy for a file whose close waits for the process - class _wrap_close: - def __init__(self, stream, proc): - self._stream = stream - self._proc = proc - def close(self): - self._stream.close() - returncode = self._proc.wait() - if returncode == 0: - return None - if name == 'nt': - return returncode - else: - return returncode << 8 # Shift left to match old behavior - def __enter__(self): - return self - def __exit__(self, *args): - self.close() - def __getattr__(self, name): - return getattr(self._stream, name) - def __iter__(self): - return iter(self._stream) - - __all__.append("popen") - -# Supply os.fdopen() -def fdopen(fd, mode="r", buffering=-1, encoding=None, *args, **kwargs): - if not isinstance(fd, int): - raise TypeError("invalid fd type (%s, expected integer)" % type(fd)) - import io - if "b" not in mode: - encoding = io.text_encoding(encoding) - return io.open(fd, mode, buffering, encoding, *args, **kwargs) - - -# For testing purposes, make sure the function is available when the C -# implementation exists. -def _fspath(path): - """Return the path representation of a path-like object. - - If str or bytes is passed in, it is returned unchanged. Otherwise the - os.PathLike interface is used to get the path representation. If the - path representation is not str or bytes, TypeError is raised. If the - provided path is not str, bytes, or os.PathLike, TypeError is raised. - """ - if isinstance(path, (str, bytes)): - return path - - # Work from the object's type to match method resolution of other magic - # methods. - path_type = type(path) - try: - path_repr = path_type.__fspath__(path) - except AttributeError: - if hasattr(path_type, '__fspath__'): - raise - else: - raise TypeError("expected str, bytes or os.PathLike object, " - "not " + path_type.__name__) - except TypeError: - if path_type.__fspath__ is None: - raise TypeError("expected str, bytes or os.PathLike object, " - "not " + path_type.__name__) from None - else: - raise - if isinstance(path_repr, (str, bytes)): - return path_repr - else: - raise TypeError("expected {}.__fspath__() to return str or bytes, " - "not {}".format(path_type.__name__, - type(path_repr).__name__)) - -# If there is no C implementation, make the pure Python version the -# implementation as transparently as possible. -if not _exists('fspath'): - fspath = _fspath - fspath.__name__ = "fspath" - - -class PathLike(abc.ABC): - - """Abstract base class for implementing the file system path protocol.""" - - __slots__ = () - - @abc.abstractmethod - def __fspath__(self): - """Return the file system path representation of the object.""" - raise NotImplementedError - - @classmethod - def __subclasshook__(cls, subclass): - if cls is PathLike: - return _check_methods(subclass, '__fspath__') - return NotImplemented - - __class_getitem__ = classmethod(GenericAlias) - - -if name == 'nt': - class _AddedDllDirectory: - def __init__(self, path, cookie, remove_dll_directory): - self.path = path - self._cookie = cookie - self._remove_dll_directory = remove_dll_directory - def close(self): - self._remove_dll_directory(self._cookie) - self.path = None - def __enter__(self): - return self - def __exit__(self, *args): - self.close() - def __repr__(self): - if self.path: - return "".format(self.path) - return "" - - def add_dll_directory(path): - """Add a path to the DLL search path. - - This search path is used when resolving dependencies for imported - extension modules (the module itself is resolved through sys.path), - and also by ctypes. - - Remove the directory by calling close() on the returned object or - using it in a with statement. - """ - import nt - cookie = nt._add_dll_directory(path) - return _AddedDllDirectory( - path, - cookie, - nt._remove_dll_directory - ) - - -if _exists('sched_getaffinity') and sys._get_cpu_count_config() < 0: - def process_cpu_count(): - """ - Get the number of CPUs of the current process. - - Return the number of logical CPUs usable by the calling thread of the - current process. Return None if indeterminable. - """ - return len(sched_getaffinity(0)) -else: - # Just an alias to cpu_count() (same docstring) - process_cpu_count = cpu_count diff --git a/Python313_13_x64_Template/Lib/pathlib/__init__.py b/Python313_13_x64_Template/Lib/pathlib/__init__.py deleted file mode 100644 index 4b3edf53..00000000 --- a/Python313_13_x64_Template/Lib/pathlib/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Object-oriented filesystem paths. - -This module provides classes to represent abstract paths and concrete -paths with operations that have semantics appropriate for different -operating systems. -""" - -from ._abc import * -from ._local import * - -__all__ = (_abc.__all__ + - _local.__all__) diff --git a/Python313_13_x64_Template/Lib/pathlib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/pathlib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 3899d87a..00000000 Binary files a/Python313_13_x64_Template/Lib/pathlib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/pathlib/__pycache__/_abc.cpython-313.pyc b/Python313_13_x64_Template/Lib/pathlib/__pycache__/_abc.cpython-313.pyc deleted file mode 100644 index 15d4d12c..00000000 Binary files a/Python313_13_x64_Template/Lib/pathlib/__pycache__/_abc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/pathlib/__pycache__/_local.cpython-313.pyc b/Python313_13_x64_Template/Lib/pathlib/__pycache__/_local.cpython-313.pyc deleted file mode 100644 index 45321ad1..00000000 Binary files a/Python313_13_x64_Template/Lib/pathlib/__pycache__/_local.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/pathlib/_abc.py b/Python313_13_x64_Template/Lib/pathlib/_abc.py deleted file mode 100644 index 4d24146a..00000000 --- a/Python313_13_x64_Template/Lib/pathlib/_abc.py +++ /dev/null @@ -1,930 +0,0 @@ -""" -Abstract base classes for rich path objects. - -This module is published as a PyPI package called "pathlib-abc". - -This module is also a *PRIVATE* part of the Python standard library, where -it's developed alongside pathlib. If it finds success and maturity as a PyPI -package, it could become a public part of the standard library. - -Two base classes are defined here -- PurePathBase and PathBase -- that -resemble pathlib's PurePath and Path respectively. -""" - -import functools -from glob import _Globber, _no_recurse_symlinks -from errno import ENOENT, ENOTDIR, EBADF, ELOOP, EINVAL -from stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO - - -__all__ = ["UnsupportedOperation"] - -# -# Internals -# - -_WINERROR_NOT_READY = 21 # drive exists but is not accessible -_WINERROR_INVALID_NAME = 123 # fix for bpo-35306 -_WINERROR_CANT_RESOLVE_FILENAME = 1921 # broken symlink pointing to itself - -# EBADF - guard against macOS `stat` throwing EBADF -_IGNORED_ERRNOS = (ENOENT, ENOTDIR, EBADF, ELOOP) - -_IGNORED_WINERRORS = ( - _WINERROR_NOT_READY, - _WINERROR_INVALID_NAME, - _WINERROR_CANT_RESOLVE_FILENAME) - -def _ignore_error(exception): - return (getattr(exception, 'errno', None) in _IGNORED_ERRNOS or - getattr(exception, 'winerror', None) in _IGNORED_WINERRORS) - - -@functools.cache -def _is_case_sensitive(parser): - return parser.normcase('Aa') == 'Aa' - - -class UnsupportedOperation(NotImplementedError): - """An exception that is raised when an unsupported operation is called on - a path object. - """ - pass - - -class ParserBase: - """Base class for path parsers, which do low-level path manipulation. - - Path parsers provide a subset of the os.path API, specifically those - functions needed to provide PurePathBase functionality. Each PurePathBase - subclass references its path parser via a 'parser' class attribute. - - Every method in this base class raises an UnsupportedOperation exception. - """ - - @classmethod - def _unsupported_msg(cls, attribute): - return f"{cls.__name__}.{attribute} is unsupported" - - @property - def sep(self): - """The character used to separate path components.""" - raise UnsupportedOperation(self._unsupported_msg('sep')) - - def join(self, path, *paths): - """Join path segments.""" - raise UnsupportedOperation(self._unsupported_msg('join()')) - - def split(self, path): - """Split the path into a pair (head, tail), where *head* is everything - before the final path separator, and *tail* is everything after. - Either part may be empty. - """ - raise UnsupportedOperation(self._unsupported_msg('split()')) - - def splitdrive(self, path): - """Split the path into a 2-item tuple (drive, tail), where *drive* is - a device name or mount point, and *tail* is everything after the - drive. Either part may be empty.""" - raise UnsupportedOperation(self._unsupported_msg('splitdrive()')) - - def normcase(self, path): - """Normalize the case of the path.""" - raise UnsupportedOperation(self._unsupported_msg('normcase()')) - - def isabs(self, path): - """Returns whether the path is absolute, i.e. unaffected by the - current directory or drive.""" - raise UnsupportedOperation(self._unsupported_msg('isabs()')) - - -class PurePathBase: - """Base class for pure path objects. - - This class *does not* provide several magic methods that are defined in - its subclass PurePath. They are: __fspath__, __bytes__, __reduce__, - __hash__, __eq__, __lt__, __le__, __gt__, __ge__. Its initializer and path - joining methods accept only strings, not os.PathLike objects more broadly. - """ - - __slots__ = ( - # The `_raw_path` slot store a joined string path. This is set in the - # `__init__()` method. - '_raw_path', - - # The '_resolving' slot stores a boolean indicating whether the path - # is being processed by `PathBase.resolve()`. This prevents duplicate - # work from occurring when `resolve()` calls `stat()` or `readlink()`. - '_resolving', - ) - parser = ParserBase() - _globber = _Globber - - def __init__(self, path, *paths): - self._raw_path = self.parser.join(path, *paths) if paths else path - if not isinstance(self._raw_path, str): - raise TypeError( - f"path should be a str, not {type(self._raw_path).__name__!r}") - self._resolving = False - - def with_segments(self, *pathsegments): - """Construct a new path object from any number of path-like objects. - Subclasses may override this method to customize how new path objects - are created from methods like `iterdir()`. - """ - return type(self)(*pathsegments) - - def __str__(self): - """Return the string representation of the path, suitable for - passing to system calls.""" - return self._raw_path - - def as_posix(self): - """Return the string representation of the path with forward (/) - slashes.""" - return str(self).replace(self.parser.sep, '/') - - @property - def drive(self): - """The drive prefix (letter or UNC path), if any.""" - return self.parser.splitdrive(self.anchor)[0] - - @property - def root(self): - """The root of the path, if any.""" - return self.parser.splitdrive(self.anchor)[1] - - @property - def anchor(self): - """The concatenation of the drive and root, or ''.""" - return self._stack[0] - - @property - def name(self): - """The final path component, if any.""" - return self.parser.split(self._raw_path)[1] - - @property - def suffix(self): - """ - The final component's last suffix, if any. - - This includes the leading period. For example: '.txt' - """ - name = self.name - i = name.rfind('.') - if 0 < i < len(name) - 1: - return name[i:] - else: - return '' - - @property - def suffixes(self): - """ - A list of the final component's suffixes, if any. - - These include the leading periods. For example: ['.tar', '.gz'] - """ - name = self.name - if name.endswith('.'): - return [] - name = name.lstrip('.') - return ['.' + suffix for suffix in name.split('.')[1:]] - - @property - def stem(self): - """The final path component, minus its last suffix.""" - name = self.name - i = name.rfind('.') - if 0 < i < len(name) - 1: - return name[:i] - else: - return name - - def with_name(self, name): - """Return a new path with the file name changed.""" - split = self.parser.split - if split(name)[0]: - raise ValueError(f"Invalid name {name!r}") - return self.with_segments(split(self._raw_path)[0], name) - - def with_stem(self, stem): - """Return a new path with the stem changed.""" - suffix = self.suffix - if not suffix: - return self.with_name(stem) - elif not stem: - # If the suffix is non-empty, we can't make the stem empty. - raise ValueError(f"{self!r} has a non-empty suffix") - else: - return self.with_name(stem + suffix) - - def with_suffix(self, suffix): - """Return a new path with the file suffix changed. If the path - has no suffix, add given suffix. If the given suffix is an empty - string, remove the suffix from the path. - """ - stem = self.stem - if not stem: - # If the stem is empty, we can't make the suffix non-empty. - raise ValueError(f"{self!r} has an empty name") - elif suffix and not (suffix.startswith('.') and len(suffix) > 1): - raise ValueError(f"Invalid suffix {suffix!r}") - else: - return self.with_name(stem + suffix) - - def relative_to(self, other, *, walk_up=False): - """Return the relative path to another path identified by the passed - arguments. If the operation is not possible (because this is not - related to the other path), raise ValueError. - - The *walk_up* parameter controls whether `..` may be used to resolve - the path. - """ - if not isinstance(other, PurePathBase): - other = self.with_segments(other) - anchor0, parts0 = self._stack - anchor1, parts1 = other._stack - if anchor0 != anchor1: - raise ValueError(f"{self._raw_path!r} and {other._raw_path!r} have different anchors") - while parts0 and parts1 and parts0[-1] == parts1[-1]: - parts0.pop() - parts1.pop() - for part in parts1: - if not part or part == '.': - pass - elif not walk_up: - raise ValueError(f"{self._raw_path!r} is not in the subpath of {other._raw_path!r}") - elif part == '..': - raise ValueError(f"'..' segment in {other._raw_path!r} cannot be walked") - else: - parts0.append('..') - return self.with_segments('', *reversed(parts0)) - - def is_relative_to(self, other): - """Return True if the path is relative to another path or False. - """ - if not isinstance(other, PurePathBase): - other = self.with_segments(other) - anchor0, parts0 = self._stack - anchor1, parts1 = other._stack - if anchor0 != anchor1: - return False - while parts0 and parts1 and parts0[-1] == parts1[-1]: - parts0.pop() - parts1.pop() - for part in parts1: - if part and part != '.': - return False - return True - - @property - def parts(self): - """An object providing sequence-like access to the - components in the filesystem path.""" - anchor, parts = self._stack - if anchor: - parts.append(anchor) - return tuple(reversed(parts)) - - def joinpath(self, *pathsegments): - """Combine this path with one or several arguments, and return a - new path representing either a subpath (if all arguments are relative - paths) or a totally different path (if one of the arguments is - anchored). - """ - return self.with_segments(self._raw_path, *pathsegments) - - def __truediv__(self, key): - try: - return self.with_segments(self._raw_path, key) - except TypeError: - return NotImplemented - - def __rtruediv__(self, key): - try: - return self.with_segments(key, self._raw_path) - except TypeError: - return NotImplemented - - @property - def _stack(self): - """ - Split the path into a 2-tuple (anchor, parts), where *anchor* is the - uppermost parent of the path (equivalent to path.parents[-1]), and - *parts* is a reversed list of parts following the anchor. - """ - split = self.parser.split - path = self._raw_path - parent, name = split(path) - names = [] - while path != parent: - names.append(name) - path = parent - parent, name = split(path) - return path, names - - @property - def parent(self): - """The logical parent of the path.""" - path = self._raw_path - parent = self.parser.split(path)[0] - if path != parent: - parent = self.with_segments(parent) - parent._resolving = self._resolving - return parent - return self - - @property - def parents(self): - """A sequence of this path's logical parents.""" - split = self.parser.split - path = self._raw_path - parent = split(path)[0] - parents = [] - while path != parent: - parents.append(self.with_segments(parent)) - path = parent - parent = split(path)[0] - return tuple(parents) - - def is_absolute(self): - """True if the path is absolute (has both a root and, if applicable, - a drive).""" - return self.parser.isabs(self._raw_path) - - @property - def _pattern_str(self): - """The path expressed as a string, for use in pattern-matching.""" - return str(self) - - def match(self, path_pattern, *, case_sensitive=None): - """ - Return True if this path matches the given pattern. If the pattern is - relative, matching is done from the right; otherwise, the entire path - is matched. The recursive wildcard '**' is *not* supported by this - method. - """ - if not isinstance(path_pattern, PurePathBase): - path_pattern = self.with_segments(path_pattern) - if case_sensitive is None: - case_sensitive = _is_case_sensitive(self.parser) - sep = path_pattern.parser.sep - path_parts = self.parts[::-1] - pattern_parts = path_pattern.parts[::-1] - if not pattern_parts: - raise ValueError("empty pattern") - if len(path_parts) < len(pattern_parts): - return False - if len(path_parts) > len(pattern_parts) and path_pattern.anchor: - return False - globber = self._globber(sep, case_sensitive) - for path_part, pattern_part in zip(path_parts, pattern_parts): - match = globber.compile(pattern_part) - if match(path_part) is None: - return False - return True - - def full_match(self, pattern, *, case_sensitive=None): - """ - Return True if this path matches the given glob-style pattern. The - pattern is matched against the entire path. - """ - if not isinstance(pattern, PurePathBase): - pattern = self.with_segments(pattern) - if case_sensitive is None: - case_sensitive = _is_case_sensitive(self.parser) - globber = self._globber(pattern.parser.sep, case_sensitive, recursive=True) - match = globber.compile(pattern._pattern_str) - return match(self._pattern_str) is not None - - - -class PathBase(PurePathBase): - """Base class for concrete path objects. - - This class provides dummy implementations for many methods that derived - classes can override selectively; the default implementations raise - UnsupportedOperation. The most basic methods, such as stat() and open(), - directly raise UnsupportedOperation; these basic methods are called by - other methods such as is_dir() and read_text(). - - The Path class derives this class to implement local filesystem paths. - Users may derive their own classes to implement virtual filesystem paths, - such as paths in archive files or on remote storage systems. - """ - __slots__ = () - - # Maximum number of symlinks to follow in resolve() - _max_symlinks = 40 - - @classmethod - def _unsupported_msg(cls, attribute): - return f"{cls.__name__}.{attribute} is unsupported" - - def stat(self, *, follow_symlinks=True): - """ - Return the result of the stat() system call on this path, like - os.stat() does. - """ - raise UnsupportedOperation(self._unsupported_msg('stat()')) - - def lstat(self): - """ - Like stat(), except if the path points to a symlink, the symlink's - status information is returned, rather than its target's. - """ - return self.stat(follow_symlinks=False) - - - # Convenience functions for querying the stat results - - def exists(self, *, follow_symlinks=True): - """ - Whether this path exists. - - This method normally follows symlinks; to check whether a symlink exists, - add the argument follow_symlinks=False. - """ - try: - self.stat(follow_symlinks=follow_symlinks) - except OSError as e: - if not _ignore_error(e): - raise - return False - except ValueError: - # Non-encodable path - return False - return True - - def is_dir(self, *, follow_symlinks=True): - """ - Whether this path is a directory. - """ - try: - return S_ISDIR(self.stat(follow_symlinks=follow_symlinks).st_mode) - except OSError as e: - if not _ignore_error(e): - raise - # Path doesn't exist or is a broken symlink - # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) - return False - except ValueError: - # Non-encodable path - return False - - def is_file(self, *, follow_symlinks=True): - """ - Whether this path is a regular file (also True for symlinks pointing - to regular files). - """ - try: - return S_ISREG(self.stat(follow_symlinks=follow_symlinks).st_mode) - except OSError as e: - if not _ignore_error(e): - raise - # Path doesn't exist or is a broken symlink - # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) - return False - except ValueError: - # Non-encodable path - return False - - def is_mount(self): - """ - Check if this path is a mount point - """ - # Need to exist and be a dir - if not self.exists() or not self.is_dir(): - return False - - try: - parent_dev = self.parent.stat().st_dev - except OSError: - return False - - dev = self.stat().st_dev - if dev != parent_dev: - return True - ino = self.stat().st_ino - parent_ino = self.parent.stat().st_ino - return ino == parent_ino - - def is_symlink(self): - """ - Whether this path is a symbolic link. - """ - try: - return S_ISLNK(self.lstat().st_mode) - except OSError as e: - if not _ignore_error(e): - raise - # Path doesn't exist - return False - except ValueError: - # Non-encodable path - return False - - def is_junction(self): - """ - Whether this path is a junction. - """ - # Junctions are a Windows-only feature, not present in POSIX nor the - # majority of virtual filesystems. There is no cross-platform idiom - # to check for junctions (using stat().st_mode). - return False - - def is_block_device(self): - """ - Whether this path is a block device. - """ - try: - return S_ISBLK(self.stat().st_mode) - except OSError as e: - if not _ignore_error(e): - raise - # Path doesn't exist or is a broken symlink - # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) - return False - except ValueError: - # Non-encodable path - return False - - def is_char_device(self): - """ - Whether this path is a character device. - """ - try: - return S_ISCHR(self.stat().st_mode) - except OSError as e: - if not _ignore_error(e): - raise - # Path doesn't exist or is a broken symlink - # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) - return False - except ValueError: - # Non-encodable path - return False - - def is_fifo(self): - """ - Whether this path is a FIFO. - """ - try: - return S_ISFIFO(self.stat().st_mode) - except OSError as e: - if not _ignore_error(e): - raise - # Path doesn't exist or is a broken symlink - # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) - return False - except ValueError: - # Non-encodable path - return False - - def is_socket(self): - """ - Whether this path is a socket. - """ - try: - return S_ISSOCK(self.stat().st_mode) - except OSError as e: - if not _ignore_error(e): - raise - # Path doesn't exist or is a broken symlink - # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) - return False - except ValueError: - # Non-encodable path - return False - - def samefile(self, other_path): - """Return whether other_path is the same or not as this file - (as returned by os.path.samefile()). - """ - st = self.stat() - try: - other_st = other_path.stat() - except AttributeError: - other_st = self.with_segments(other_path).stat() - return (st.st_ino == other_st.st_ino and - st.st_dev == other_st.st_dev) - - def open(self, mode='r', buffering=-1, encoding=None, - errors=None, newline=None): - """ - Open the file pointed to by this path and return a file object, as - the built-in open() function does. - """ - raise UnsupportedOperation(self._unsupported_msg('open()')) - - def read_bytes(self): - """ - Open the file in bytes mode, read it, and close the file. - """ - with self.open(mode='rb') as f: - return f.read() - - def read_text(self, encoding=None, errors=None, newline=None): - """ - Open the file in text mode, read it, and close the file. - """ - with self.open(mode='r', encoding=encoding, errors=errors, newline=newline) as f: - return f.read() - - def write_bytes(self, data): - """ - Open the file in bytes mode, write to it, and close the file. - """ - # type-check for the buffer interface before truncating the file - view = memoryview(data) - with self.open(mode='wb') as f: - return f.write(view) - - def write_text(self, data, encoding=None, errors=None, newline=None): - """ - Open the file in text mode, write to it, and close the file. - """ - if not isinstance(data, str): - raise TypeError('data must be str, not %s' % - data.__class__.__name__) - with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f: - return f.write(data) - - def iterdir(self): - """Yield path objects of the directory contents. - - The children are yielded in arbitrary order, and the - special entries '.' and '..' are not included. - """ - raise UnsupportedOperation(self._unsupported_msg('iterdir()')) - - def _glob_selector(self, parts, case_sensitive, recurse_symlinks): - if case_sensitive is None: - case_sensitive = _is_case_sensitive(self.parser) - case_pedantic = False - else: - # The user has expressed a case sensitivity choice, but we don't - # know the case sensitivity of the underlying filesystem, so we - # must use scandir() for everything, including non-wildcard parts. - case_pedantic = True - recursive = True if recurse_symlinks else _no_recurse_symlinks - globber = self._globber(self.parser.sep, case_sensitive, case_pedantic, recursive) - return globber.selector(parts) - - def glob(self, pattern, *, case_sensitive=None, recurse_symlinks=True): - """Iterate over this subtree and yield all existing files (of any - kind, including directories) matching the given relative pattern. - """ - if not isinstance(pattern, PurePathBase): - pattern = self.with_segments(pattern) - anchor, parts = pattern._stack - if anchor: - raise NotImplementedError("Non-relative patterns are unsupported") - select = self._glob_selector(parts, case_sensitive, recurse_symlinks) - return select(self) - - def rglob(self, pattern, *, case_sensitive=None, recurse_symlinks=True): - """Recursively yield all existing files (of any kind, including - directories) matching the given relative pattern, anywhere in - this subtree. - """ - if not isinstance(pattern, PurePathBase): - pattern = self.with_segments(pattern) - pattern = '**' / pattern - return self.glob(pattern, case_sensitive=case_sensitive, recurse_symlinks=recurse_symlinks) - - def walk(self, top_down=True, on_error=None, follow_symlinks=False): - """Walk the directory tree from this directory, similar to os.walk().""" - paths = [self] - while paths: - path = paths.pop() - if isinstance(path, tuple): - yield path - continue - dirnames = [] - filenames = [] - if not top_down: - paths.append((path, dirnames, filenames)) - try: - for child in path.iterdir(): - try: - if child.is_dir(follow_symlinks=follow_symlinks): - if not top_down: - paths.append(child) - dirnames.append(child.name) - else: - filenames.append(child.name) - except OSError: - filenames.append(child.name) - except OSError as error: - if on_error is not None: - on_error(error) - if not top_down: - while not isinstance(paths.pop(), tuple): - pass - continue - if top_down: - yield path, dirnames, filenames - paths += [path.joinpath(d) for d in reversed(dirnames)] - - def absolute(self): - """Return an absolute version of this path - No normalization or symlink resolution is performed. - - Use resolve() to resolve symlinks and remove '..' segments. - """ - raise UnsupportedOperation(self._unsupported_msg('absolute()')) - - @classmethod - def cwd(cls): - """Return a new path pointing to the current working directory.""" - # We call 'absolute()' rather than using 'os.getcwd()' directly to - # enable users to replace the implementation of 'absolute()' in a - # subclass and benefit from the new behaviour here. This works because - # os.path.abspath('.') == os.getcwd(). - return cls('').absolute() - - def expanduser(self): - """ Return a new path with expanded ~ and ~user constructs - (as returned by os.path.expanduser) - """ - raise UnsupportedOperation(self._unsupported_msg('expanduser()')) - - @classmethod - def home(cls): - """Return a new path pointing to expanduser('~'). - """ - return cls("~").expanduser() - - def readlink(self): - """ - Return the path to which the symbolic link points. - """ - raise UnsupportedOperation(self._unsupported_msg('readlink()')) - readlink._supported = False - - def resolve(self, strict=False): - """ - Make the path absolute, resolving all symlinks on the way and also - normalizing it. - """ - if self._resolving: - return self - path_root, parts = self._stack - path = self.with_segments(path_root) - try: - path = path.absolute() - except UnsupportedOperation: - path_tail = [] - else: - path_root, path_tail = path._stack - path_tail.reverse() - - # If the user has *not* overridden the `readlink()` method, then symlinks are unsupported - # and (in non-strict mode) we can improve performance by not calling `stat()`. - querying = strict or getattr(self.readlink, '_supported', True) - link_count = 0 - while parts: - part = parts.pop() - if not part or part == '.': - continue - if part == '..': - if not path_tail: - if path_root: - # Delete '..' segment immediately following root - continue - elif path_tail[-1] != '..': - # Delete '..' segment and its predecessor - path_tail.pop() - continue - path_tail.append(part) - if querying and part != '..': - path = self.with_segments(path_root + self.parser.sep.join(path_tail)) - path._resolving = True - try: - st = path.stat(follow_symlinks=False) - if S_ISLNK(st.st_mode): - # Like Linux and macOS, raise OSError(errno.ELOOP) if too many symlinks are - # encountered during resolution. - link_count += 1 - if link_count >= self._max_symlinks: - raise OSError(ELOOP, "Too many symbolic links in path", self._raw_path) - target_root, target_parts = path.readlink()._stack - # If the symlink target is absolute (like '/etc/hosts'), set the current - # path to its uppermost parent (like '/'). - if target_root: - path_root = target_root - path_tail.clear() - else: - path_tail.pop() - # Add the symlink target's reversed tail parts (like ['hosts', 'etc']) to - # the stack of unresolved path parts. - parts.extend(target_parts) - continue - elif parts and not S_ISDIR(st.st_mode): - raise NotADirectoryError(ENOTDIR, "Not a directory", self._raw_path) - except OSError: - if strict: - raise - else: - querying = False - return self.with_segments(path_root + self.parser.sep.join(path_tail)) - - def symlink_to(self, target, target_is_directory=False): - """ - Make this path a symlink pointing to the target path. - Note the order of arguments (link, target) is the reverse of os.symlink. - """ - raise UnsupportedOperation(self._unsupported_msg('symlink_to()')) - - def hardlink_to(self, target): - """ - Make this path a hard link pointing to the same file as *target*. - - Note the order of arguments (self, target) is the reverse of os.link's. - """ - raise UnsupportedOperation(self._unsupported_msg('hardlink_to()')) - - def touch(self, mode=0o666, exist_ok=True): - """ - Create this file with the given access mode, if it doesn't exist. - """ - raise UnsupportedOperation(self._unsupported_msg('touch()')) - - def mkdir(self, mode=0o777, parents=False, exist_ok=False): - """ - Create a new directory at this given path. - """ - raise UnsupportedOperation(self._unsupported_msg('mkdir()')) - - def rename(self, target): - """ - Rename this path to the target path. - - The target path may be absolute or relative. Relative paths are - interpreted relative to the current working directory, *not* the - directory of the Path object. - - Returns the new Path instance pointing to the target path. - """ - raise UnsupportedOperation(self._unsupported_msg('rename()')) - - def replace(self, target): - """ - Rename this path to the target path, overwriting if that path exists. - - The target path may be absolute or relative. Relative paths are - interpreted relative to the current working directory, *not* the - directory of the Path object. - - Returns the new Path instance pointing to the target path. - """ - raise UnsupportedOperation(self._unsupported_msg('replace()')) - - def chmod(self, mode, *, follow_symlinks=True): - """ - Change the permissions of the path, like os.chmod(). - """ - raise UnsupportedOperation(self._unsupported_msg('chmod()')) - - def lchmod(self, mode): - """ - Like chmod(), except if the path points to a symlink, the symlink's - permissions are changed, rather than its target's. - """ - self.chmod(mode, follow_symlinks=False) - - def unlink(self, missing_ok=False): - """ - Remove this file or link. - If the path is a directory, use rmdir() instead. - """ - raise UnsupportedOperation(self._unsupported_msg('unlink()')) - - def rmdir(self): - """ - Remove this directory. The directory must be empty. - """ - raise UnsupportedOperation(self._unsupported_msg('rmdir()')) - - def owner(self, *, follow_symlinks=True): - """ - Return the login name of the file owner. - """ - raise UnsupportedOperation(self._unsupported_msg('owner()')) - - def group(self, *, follow_symlinks=True): - """ - Return the group name of the file gid. - """ - raise UnsupportedOperation(self._unsupported_msg('group()')) - - @classmethod - def from_uri(cls, uri): - """Return a new path from the given 'file' URI.""" - raise UnsupportedOperation(cls._unsupported_msg('from_uri()')) - - def as_uri(self): - """Return the path as a URI.""" - raise UnsupportedOperation(self._unsupported_msg('as_uri()')) diff --git a/Python313_13_x64_Template/Lib/pathlib/_local.py b/Python313_13_x64_Template/Lib/pathlib/_local.py deleted file mode 100644 index 0188e7c7..00000000 --- a/Python313_13_x64_Template/Lib/pathlib/_local.py +++ /dev/null @@ -1,861 +0,0 @@ -import io -import ntpath -import operator -import os -import posixpath -import sys -import warnings -from glob import _StringGlobber -from itertools import chain -from _collections_abc import Sequence - -try: - import pwd -except ImportError: - pwd = None -try: - import grp -except ImportError: - grp = None - -from ._abc import UnsupportedOperation, PurePathBase, PathBase - - -__all__ = [ - "PurePath", "PurePosixPath", "PureWindowsPath", - "Path", "PosixPath", "WindowsPath", - ] - - -class _PathParents(Sequence): - """This object provides sequence-like access to the logical ancestors - of a path. Don't try to construct it yourself.""" - __slots__ = ('_path', '_drv', '_root', '_tail') - - def __init__(self, path): - self._path = path - self._drv = path.drive - self._root = path.root - self._tail = path._tail - - def __len__(self): - return len(self._tail) - - def __getitem__(self, idx): - if isinstance(idx, slice): - return tuple(self[i] for i in range(*idx.indices(len(self)))) - - if idx >= len(self) or idx < -len(self): - raise IndexError(idx) - if idx < 0: - idx += len(self) - return self._path._from_parsed_parts(self._drv, self._root, - self._tail[:-idx - 1]) - - def __repr__(self): - return "<{}.parents>".format(type(self._path).__name__) - - -class PurePath(PurePathBase): - """Base class for manipulating paths without I/O. - - PurePath represents a filesystem path and offers operations which - don't imply any actual filesystem I/O. Depending on your system, - instantiating a PurePath will return either a PurePosixPath or a - PureWindowsPath object. You can also instantiate either of these classes - directly, regardless of your system. - """ - - __slots__ = ( - # The `_raw_paths` slot stores unnormalized string paths. This is set - # in the `__init__()` method. - '_raw_paths', - - # The `_drv`, `_root` and `_tail_cached` slots store parsed and - # normalized parts of the path. They are set when any of the `drive`, - # `root` or `_tail` properties are accessed for the first time. The - # three-part division corresponds to the result of - # `os.path.splitroot()`, except that the tail is further split on path - # separators (i.e. it is a list of strings), and that the root and - # tail are normalized. - '_drv', '_root', '_tail_cached', - - # The `_str` slot stores the string representation of the path, - # computed from the drive, root and tail when `__str__()` is called - # for the first time. It's used to implement `_str_normcase` - '_str', - - # The `_str_normcase_cached` slot stores the string path with - # normalized case. It is set when the `_str_normcase` property is - # accessed for the first time. It's used to implement `__eq__()` - # `__hash__()`, and `_parts_normcase` - '_str_normcase_cached', - - # The `_parts_normcase_cached` slot stores the case-normalized - # string path after splitting on path separators. It's set when the - # `_parts_normcase` property is accessed for the first time. It's used - # to implement comparison methods like `__lt__()`. - '_parts_normcase_cached', - - # The `_hash` slot stores the hash of the case-normalized string - # path. It's set when `__hash__()` is called for the first time. - '_hash', - ) - parser = os.path - _globber = _StringGlobber - - def __new__(cls, *args, **kwargs): - """Construct a PurePath from one or several strings and or existing - PurePath objects. The strings and path objects are combined so as - to yield a canonicalized path, which is incorporated into the - new PurePath object. - """ - if cls is PurePath: - cls = PureWindowsPath if os.name == 'nt' else PurePosixPath - return object.__new__(cls) - - def __init__(self, *args): - paths = [] - for arg in args: - if isinstance(arg, PurePath): - if arg.parser is not self.parser: - # GH-103631: Convert separators for backwards compatibility. - paths.append(arg.as_posix()) - else: - paths.extend(arg._raw_paths) - else: - try: - path = os.fspath(arg) - except TypeError: - path = arg - if not isinstance(path, str): - raise TypeError( - "argument should be a str or an os.PathLike " - "object where __fspath__ returns a str, " - f"not {type(path).__name__!r}") - paths.append(path) - # Avoid calling super().__init__, as an optimisation - self._raw_paths = paths - - def joinpath(self, *pathsegments): - """Combine this path with one or several arguments, and return a - new path representing either a subpath (if all arguments are relative - paths) or a totally different path (if one of the arguments is - anchored). - """ - return self.with_segments(self, *pathsegments) - - def __truediv__(self, key): - try: - return self.with_segments(self, key) - except TypeError: - return NotImplemented - - def __rtruediv__(self, key): - try: - return self.with_segments(key, self) - except TypeError: - return NotImplemented - - def __reduce__(self): - return self.__class__, tuple(self._raw_paths) - - def __repr__(self): - return "{}({!r})".format(self.__class__.__name__, self.as_posix()) - - def __fspath__(self): - return str(self) - - def __bytes__(self): - """Return the bytes representation of the path. This is only - recommended to use under Unix.""" - return os.fsencode(self) - - @property - def _str_normcase(self): - # String with normalized case, for hashing and equality checks - try: - return self._str_normcase_cached - except AttributeError: - if self.parser is posixpath: - self._str_normcase_cached = str(self) - else: - self._str_normcase_cached = str(self).lower() - return self._str_normcase_cached - - def __hash__(self): - try: - return self._hash - except AttributeError: - self._hash = hash(self._str_normcase) - return self._hash - - def __eq__(self, other): - if not isinstance(other, PurePath): - return NotImplemented - return self._str_normcase == other._str_normcase and self.parser is other.parser - - @property - def _parts_normcase(self): - # Cached parts with normalized case, for comparisons. - try: - return self._parts_normcase_cached - except AttributeError: - self._parts_normcase_cached = self._str_normcase.split(self.parser.sep) - return self._parts_normcase_cached - - def __lt__(self, other): - if not isinstance(other, PurePath) or self.parser is not other.parser: - return NotImplemented - return self._parts_normcase < other._parts_normcase - - def __le__(self, other): - if not isinstance(other, PurePath) or self.parser is not other.parser: - return NotImplemented - return self._parts_normcase <= other._parts_normcase - - def __gt__(self, other): - if not isinstance(other, PurePath) or self.parser is not other.parser: - return NotImplemented - return self._parts_normcase > other._parts_normcase - - def __ge__(self, other): - if not isinstance(other, PurePath) or self.parser is not other.parser: - return NotImplemented - return self._parts_normcase >= other._parts_normcase - - def __str__(self): - """Return the string representation of the path, suitable for - passing to system calls.""" - try: - return self._str - except AttributeError: - self._str = self._format_parsed_parts(self.drive, self.root, - self._tail) or '.' - return self._str - - @classmethod - def _format_parsed_parts(cls, drv, root, tail): - if drv or root: - return drv + root + cls.parser.sep.join(tail) - elif tail and cls.parser.splitdrive(tail[0])[0]: - tail = ['.'] + tail - return cls.parser.sep.join(tail) - - def _from_parsed_parts(self, drv, root, tail): - path = self._from_parsed_string(self._format_parsed_parts(drv, root, tail)) - path._drv = drv - path._root = root - path._tail_cached = tail - return path - - def _from_parsed_string(self, path_str): - path = self.with_segments(path_str) - path._str = path_str or '.' - return path - - @classmethod - def _parse_path(cls, path): - if not path: - return '', '', [] - sep = cls.parser.sep - altsep = cls.parser.altsep - if altsep: - path = path.replace(altsep, sep) - drv, root, rel = cls.parser.splitroot(path) - if not root and drv.startswith(sep) and not drv.endswith(sep): - drv_parts = drv.split(sep) - if len(drv_parts) == 4 and drv_parts[2] not in '?.': - # e.g. //server/share - root = sep - elif len(drv_parts) == 6: - # e.g. //?/unc/server/share - root = sep - parsed = [sys.intern(str(x)) for x in rel.split(sep) if x and x != '.'] - return drv, root, parsed - - @property - def _raw_path(self): - """The joined but unnormalized path.""" - paths = self._raw_paths - if len(paths) == 0: - path = '' - elif len(paths) == 1: - path = paths[0] - else: - path = self.parser.join(*paths) - return path - - @property - def drive(self): - """The drive prefix (letter or UNC path), if any.""" - try: - return self._drv - except AttributeError: - self._drv, self._root, self._tail_cached = self._parse_path(self._raw_path) - return self._drv - - @property - def root(self): - """The root of the path, if any.""" - try: - return self._root - except AttributeError: - self._drv, self._root, self._tail_cached = self._parse_path(self._raw_path) - return self._root - - @property - def _tail(self): - try: - return self._tail_cached - except AttributeError: - self._drv, self._root, self._tail_cached = self._parse_path(self._raw_path) - return self._tail_cached - - @property - def anchor(self): - """The concatenation of the drive and root, or ''.""" - return self.drive + self.root - - @property - def parts(self): - """An object providing sequence-like access to the - components in the filesystem path.""" - if self.drive or self.root: - return (self.drive + self.root,) + tuple(self._tail) - else: - return tuple(self._tail) - - @property - def parent(self): - """The logical parent of the path.""" - drv = self.drive - root = self.root - tail = self._tail - if not tail: - return self - return self._from_parsed_parts(drv, root, tail[:-1]) - - @property - def parents(self): - """A sequence of this path's logical parents.""" - # The value of this property should not be cached on the path object, - # as doing so would introduce a reference cycle. - return _PathParents(self) - - @property - def name(self): - """The final path component, if any.""" - tail = self._tail - if not tail: - return '' - return tail[-1] - - def with_name(self, name): - """Return a new path with the file name changed.""" - p = self.parser - if not name or p.sep in name or (p.altsep and p.altsep in name) or name == '.': - raise ValueError(f"Invalid name {name!r}") - tail = self._tail.copy() - if not tail: - raise ValueError(f"{self!r} has an empty name") - tail[-1] = name - return self._from_parsed_parts(self.drive, self.root, tail) - - def relative_to(self, other, /, *_deprecated, walk_up=False): - """Return the relative path to another path identified by the passed - arguments. If the operation is not possible (because this is not - related to the other path), raise ValueError. - - The *walk_up* parameter controls whether `..` may be used to resolve - the path. - """ - if _deprecated: - msg = ("support for supplying more than one positional argument " - "to pathlib.PurePath.relative_to() is deprecated and " - "scheduled for removal in Python 3.14") - warnings.warn(msg, DeprecationWarning, stacklevel=2) - other = self.with_segments(other, *_deprecated) - elif not isinstance(other, PurePath): - other = self.with_segments(other) - for step, path in enumerate(chain([other], other.parents)): - if path == self or path in self.parents: - break - elif not walk_up: - raise ValueError(f"{str(self)!r} is not in the subpath of {str(other)!r}") - elif path.name == '..': - raise ValueError(f"'..' segment in {str(other)!r} cannot be walked") - else: - raise ValueError(f"{str(self)!r} and {str(other)!r} have different anchors") - parts = ['..'] * step + self._tail[len(path._tail):] - return self._from_parsed_parts('', '', parts) - - def is_relative_to(self, other, /, *_deprecated): - """Return True if the path is relative to another path or False. - """ - if _deprecated: - msg = ("support for supplying more than one argument to " - "pathlib.PurePath.is_relative_to() is deprecated and " - "scheduled for removal in Python 3.14") - warnings.warn(msg, DeprecationWarning, stacklevel=2) - other = self.with_segments(other, *_deprecated) - elif not isinstance(other, PurePath): - other = self.with_segments(other) - return other == self or other in self.parents - - def is_absolute(self): - """True if the path is absolute (has both a root and, if applicable, - a drive).""" - if self.parser is posixpath: - # Optimization: work with raw paths on POSIX. - for path in self._raw_paths: - if path.startswith('/'): - return True - return False - return self.parser.isabs(self) - - def is_reserved(self): - """Return True if the path contains one of the special names reserved - by the system, if any.""" - msg = ("pathlib.PurePath.is_reserved() is deprecated and scheduled " - "for removal in Python 3.15. Use os.path.isreserved() to " - "detect reserved paths on Windows.") - warnings.warn(msg, DeprecationWarning, stacklevel=2) - if self.parser is ntpath: - return self.parser.isreserved(self) - return False - - def as_uri(self): - """Return the path as a URI.""" - if not self.is_absolute(): - raise ValueError("relative path can't be expressed as a file URI") - - drive = self.drive - if len(drive) == 2 and drive[1] == ':': - # It's a path on a local drive => 'file:///c:/a/b' - prefix = 'file:///' + drive - path = self.as_posix()[2:] - elif drive: - # It's a path on a network drive => 'file://host/share/a/b' - prefix = 'file:' - path = self.as_posix() - else: - # It's a posix path => 'file:///etc/hosts' - prefix = 'file://' - path = str(self) - from urllib.parse import quote_from_bytes - return prefix + quote_from_bytes(os.fsencode(path)) - - @property - def _pattern_str(self): - """The path expressed as a string, for use in pattern-matching.""" - # The string representation of an empty path is a single dot ('.'). Empty - # paths shouldn't match wildcards, so we change it to the empty string. - path_str = str(self) - return '' if path_str == '.' else path_str - -# Subclassing os.PathLike makes isinstance() checks slower, -# which in turn makes Path construction slower. Register instead! -os.PathLike.register(PurePath) - - -class PurePosixPath(PurePath): - """PurePath subclass for non-Windows systems. - - On a POSIX system, instantiating a PurePath should return this object. - However, you can also instantiate it directly on any system. - """ - parser = posixpath - __slots__ = () - - -class PureWindowsPath(PurePath): - """PurePath subclass for Windows systems. - - On a Windows system, instantiating a PurePath should return this object. - However, you can also instantiate it directly on any system. - """ - parser = ntpath - __slots__ = () - - -class Path(PathBase, PurePath): - """PurePath subclass that can make system calls. - - Path represents a filesystem path but unlike PurePath, also offers - methods to do system calls on path objects. Depending on your system, - instantiating a Path will return either a PosixPath or a WindowsPath - object. You can also instantiate a PosixPath or WindowsPath directly, - but cannot instantiate a WindowsPath on a POSIX system or vice versa. - """ - __slots__ = () - as_uri = PurePath.as_uri - - @classmethod - def _unsupported_msg(cls, attribute): - return f"{cls.__name__}.{attribute} is unsupported on this system" - - def __init__(self, *args, **kwargs): - if kwargs: - msg = ("support for supplying keyword arguments to pathlib.PurePath " - "is deprecated and scheduled for removal in Python {remove}") - warnings._deprecated("pathlib.PurePath(**kwargs)", msg, remove=(3, 14)) - super().__init__(*args) - - def __new__(cls, *args, **kwargs): - if cls is Path: - cls = WindowsPath if os.name == 'nt' else PosixPath - return object.__new__(cls) - - def stat(self, *, follow_symlinks=True): - """ - Return the result of the stat() system call on this path, like - os.stat() does. - """ - return os.stat(self, follow_symlinks=follow_symlinks) - - def is_mount(self): - """ - Check if this path is a mount point - """ - return os.path.ismount(self) - - def is_junction(self): - """ - Whether this path is a junction. - """ - return os.path.isjunction(self) - - def open(self, mode='r', buffering=-1, encoding=None, - errors=None, newline=None): - """ - Open the file pointed to by this path and return a file object, as - the built-in open() function does. - """ - if "b" not in mode: - encoding = io.text_encoding(encoding) - return io.open(self, mode, buffering, encoding, errors, newline) - - def read_text(self, encoding=None, errors=None, newline=None): - """ - Open the file in text mode, read it, and close the file. - """ - # Call io.text_encoding() here to ensure any warning is raised at an - # appropriate stack level. - encoding = io.text_encoding(encoding) - return PathBase.read_text(self, encoding, errors, newline) - - def write_text(self, data, encoding=None, errors=None, newline=None): - """ - Open the file in text mode, write to it, and close the file. - """ - # Call io.text_encoding() here to ensure any warning is raised at an - # appropriate stack level. - encoding = io.text_encoding(encoding) - return PathBase.write_text(self, data, encoding, errors, newline) - - _remove_leading_dot = operator.itemgetter(slice(2, None)) - _remove_trailing_slash = operator.itemgetter(slice(-1)) - - def _filter_trailing_slash(self, paths): - sep = self.parser.sep - anchor_len = len(self.anchor) - for path_str in paths: - if len(path_str) > anchor_len and path_str[-1] == sep: - path_str = path_str[:-1] - yield path_str - - def iterdir(self): - """Yield path objects of the directory contents. - - The children are yielded in arbitrary order, and the - special entries '.' and '..' are not included. - """ - root_dir = str(self) - with os.scandir(root_dir) as scandir_it: - paths = [entry.path for entry in scandir_it] - if root_dir == '.': - paths = map(self._remove_leading_dot, paths) - return map(self._from_parsed_string, paths) - - def glob(self, pattern, *, case_sensitive=None, recurse_symlinks=False): - """Iterate over this subtree and yield all existing files (of any - kind, including directories) matching the given relative pattern. - """ - sys.audit("pathlib.Path.glob", self, pattern) - if not isinstance(pattern, PurePath): - pattern = self.with_segments(pattern) - if pattern.anchor: - raise NotImplementedError("Non-relative patterns are unsupported") - parts = pattern._tail.copy() - if not parts: - raise ValueError("Unacceptable pattern: {!r}".format(pattern)) - raw = pattern._raw_path - if raw[-1] in (self.parser.sep, self.parser.altsep): - # GH-65238: pathlib doesn't preserve trailing slash. Add it back. - parts.append('') - select = self._glob_selector(parts[::-1], case_sensitive, recurse_symlinks) - root = str(self) - paths = select(root) - - # Normalize results - if root == '.': - paths = map(self._remove_leading_dot, paths) - if parts[-1] == '': - paths = map(self._remove_trailing_slash, paths) - elif parts[-1] == '**': - paths = self._filter_trailing_slash(paths) - paths = map(self._from_parsed_string, paths) - return paths - - def rglob(self, pattern, *, case_sensitive=None, recurse_symlinks=False): - """Recursively yield all existing files (of any kind, including - directories) matching the given relative pattern, anywhere in - this subtree. - """ - sys.audit("pathlib.Path.rglob", self, pattern) - if not isinstance(pattern, PurePath): - pattern = self.with_segments(pattern) - pattern = '**' / pattern - return self.glob(pattern, case_sensitive=case_sensitive, recurse_symlinks=recurse_symlinks) - - def walk(self, top_down=True, on_error=None, follow_symlinks=False): - """Walk the directory tree from this directory, similar to os.walk().""" - sys.audit("pathlib.Path.walk", self, on_error, follow_symlinks) - root_dir = str(self) - if not follow_symlinks: - follow_symlinks = os._walk_symlinks_as_files - results = os.walk(root_dir, top_down, on_error, follow_symlinks) - for path_str, dirnames, filenames in results: - if root_dir == '.': - path_str = path_str[2:] - yield self._from_parsed_string(path_str), dirnames, filenames - - def absolute(self): - """Return an absolute version of this path - No normalization or symlink resolution is performed. - - Use resolve() to resolve symlinks and remove '..' segments. - """ - if self.is_absolute(): - return self - if self.root: - drive = os.path.splitroot(os.getcwd())[0] - return self._from_parsed_parts(drive, self.root, self._tail) - if self.drive: - # There is a CWD on each drive-letter drive. - cwd = os.path.abspath(self.drive) - else: - cwd = os.getcwd() - if not self._tail: - # Fast path for "empty" paths, e.g. Path("."), Path("") or Path(). - # We pass only one argument to with_segments() to avoid the cost - # of joining, and we exploit the fact that getcwd() returns a - # fully-normalized string by storing it in _str. This is used to - # implement Path.cwd(). - return self._from_parsed_string(cwd) - drive, root, rel = os.path.splitroot(cwd) - if not rel: - return self._from_parsed_parts(drive, root, self._tail) - tail = rel.split(self.parser.sep) - tail.extend(self._tail) - return self._from_parsed_parts(drive, root, tail) - - def resolve(self, strict=False): - """ - Make the path absolute, resolving all symlinks on the way and also - normalizing it. - """ - - return self.with_segments(os.path.realpath(self, strict=strict)) - - if pwd: - def owner(self, *, follow_symlinks=True): - """ - Return the login name of the file owner. - """ - uid = self.stat(follow_symlinks=follow_symlinks).st_uid - return pwd.getpwuid(uid).pw_name - - if grp: - def group(self, *, follow_symlinks=True): - """ - Return the group name of the file gid. - """ - gid = self.stat(follow_symlinks=follow_symlinks).st_gid - return grp.getgrgid(gid).gr_name - - if hasattr(os, "readlink"): - def readlink(self): - """ - Return the path to which the symbolic link points. - """ - return self.with_segments(os.readlink(self)) - - def touch(self, mode=0o666, exist_ok=True): - """ - Create this file with the given access mode, if it doesn't exist. - """ - - if exist_ok: - # First try to bump modification time - # Implementation note: GNU touch uses the UTIME_NOW option of - # the utimensat() / futimens() functions. - try: - os.utime(self, None) - except OSError: - # Avoid exception chaining - pass - else: - return - flags = os.O_CREAT | os.O_WRONLY - if not exist_ok: - flags |= os.O_EXCL - fd = os.open(self, flags, mode) - os.close(fd) - - def mkdir(self, mode=0o777, parents=False, exist_ok=False): - """ - Create a new directory at this given path. - """ - try: - os.mkdir(self, mode) - except FileNotFoundError: - if not parents or self.parent == self: - raise - self.parent.mkdir(parents=True, exist_ok=True) - self.mkdir(mode, parents=False, exist_ok=exist_ok) - except OSError: - # Cannot rely on checking for EEXIST, since the operating system - # could give priority to other errors like EACCES or EROFS - if not exist_ok or not self.is_dir(): - raise - - def chmod(self, mode, *, follow_symlinks=True): - """ - Change the permissions of the path, like os.chmod(). - """ - os.chmod(self, mode, follow_symlinks=follow_symlinks) - - def unlink(self, missing_ok=False): - """ - Remove this file or link. - If the path is a directory, use rmdir() instead. - """ - try: - os.unlink(self) - except FileNotFoundError: - if not missing_ok: - raise - - def rmdir(self): - """ - Remove this directory. The directory must be empty. - """ - os.rmdir(self) - - def rename(self, target): - """ - Rename this path to the target path. - - The target path may be absolute or relative. Relative paths are - interpreted relative to the current working directory, *not* the - directory of the Path object. - - Returns the new Path instance pointing to the target path. - """ - os.rename(self, target) - return self.with_segments(target) - - def replace(self, target): - """ - Rename this path to the target path, overwriting if that path exists. - - The target path may be absolute or relative. Relative paths are - interpreted relative to the current working directory, *not* the - directory of the Path object. - - Returns the new Path instance pointing to the target path. - """ - os.replace(self, target) - return self.with_segments(target) - - if hasattr(os, "symlink"): - def symlink_to(self, target, target_is_directory=False): - """ - Make this path a symlink pointing to the target path. - Note the order of arguments (link, target) is the reverse of os.symlink. - """ - os.symlink(target, self, target_is_directory) - - if hasattr(os, "link"): - def hardlink_to(self, target): - """ - Make this path a hard link pointing to the same file as *target*. - - Note the order of arguments (self, target) is the reverse of os.link's. - """ - os.link(target, self) - - def expanduser(self): - """ Return a new path with expanded ~ and ~user constructs - (as returned by os.path.expanduser) - """ - if (not (self.drive or self.root) and - self._tail and self._tail[0][:1] == '~'): - homedir = os.path.expanduser(self._tail[0]) - if homedir[:1] == "~": - raise RuntimeError("Could not determine home directory.") - drv, root, tail = self._parse_path(homedir) - return self._from_parsed_parts(drv, root, tail + self._tail[1:]) - - return self - - @classmethod - def from_uri(cls, uri): - """Return a new path from the given 'file' URI.""" - if not uri.startswith('file:'): - raise ValueError(f"URI does not start with 'file:': {uri!r}") - path = uri[5:] - if path[:3] == '///': - # Remove empty authority - path = path[2:] - elif path[:12] == '//localhost/': - # Remove 'localhost' authority - path = path[11:] - if path[:3] == '///' or (path[:1] == '/' and path[2:3] in ':|'): - # Remove slash before DOS device/UNC path - path = path[1:] - if path[1:2] == '|': - # Replace bar with colon in DOS drive - path = path[:1] + ':' + path[2:] - from urllib.parse import unquote_to_bytes - path = cls(os.fsdecode(unquote_to_bytes(path))) - if not path.is_absolute(): - raise ValueError(f"URI is not absolute: {uri!r}") - return path - - -class PosixPath(Path, PurePosixPath): - """Path subclass for non-Windows systems. - - On a POSIX system, instantiating a Path should return this object. - """ - __slots__ = () - - if os.name == 'nt': - def __new__(cls, *args, **kwargs): - raise UnsupportedOperation( - f"cannot instantiate {cls.__name__!r} on your system") - -class WindowsPath(Path, PureWindowsPath): - """Path subclass for Windows systems. - - On a Windows system, instantiating a Path should return this object. - """ - __slots__ = () - - if os.name != 'nt': - def __new__(cls, *args, **kwargs): - raise UnsupportedOperation( - f"cannot instantiate {cls.__name__!r} on your system") diff --git a/Python313_13_x64_Template/Lib/pdb.py b/Python313_13_x64_Template/Lib/pdb.py deleted file mode 100644 index 5c9be23e..00000000 --- a/Python313_13_x64_Template/Lib/pdb.py +++ /dev/null @@ -1,2550 +0,0 @@ -#! /usr/bin/env python3 - -""" -The Python Debugger Pdb -======================= - -To use the debugger in its simplest form: - - >>> import pdb - >>> pdb.run('') - -The debugger's prompt is '(Pdb) '. This will stop in the first -function call in . - -Alternatively, if a statement terminated with an unhandled exception, -you can use pdb's post-mortem facility to inspect the contents of the -traceback: - - >>> - - >>> import pdb - >>> pdb.pm() - -The commands recognized by the debugger are listed in the next -section. Most can be abbreviated as indicated; e.g., h(elp) means -that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel', -nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in -square brackets. Alternatives in the command syntax are separated -by a vertical bar (|). - -A blank line repeats the previous command literally, except for -'list', where it lists the next 11 lines. - -Commands that the debugger doesn't recognize are assumed to be Python -statements and are executed in the context of the program being -debugged. Python statements can also be prefixed with an exclamation -point ('!'). This is a powerful way to inspect the program being -debugged; it is even possible to change variables or call functions. -When an exception occurs in such a statement, the exception name is -printed but the debugger's state is not changed. - -The debugger supports aliases, which can save typing. And aliases can -have parameters (see the alias help entry) which allows one a certain -level of adaptability to the context under examination. - -Multiple commands may be entered on a single line, separated by the -pair ';;'. No intelligence is applied to separating the commands; the -input is split at the first ';;', even if it is in the middle of a -quoted string. - -If a file ".pdbrc" exists in your home directory or in the current -directory, it is read in and executed as if it had been typed at the -debugger prompt. This is particularly useful for aliases. If both -files exist, the one in the home directory is read first and aliases -defined there can be overridden by the local file. This behavior can be -disabled by passing the "readrc=False" argument to the Pdb constructor. - -Aside from aliases, the debugger is not directly programmable; but it -is implemented as a class from which you can derive your own debugger -class, which you can make as fancy as you like. - - -Debugger commands -================= - -""" -# NOTE: the actual command documentation is collected from docstrings of the -# commands and is appended to __doc__ after the class has been defined. - -import os -import io -import re -import sys -import cmd -import bdb -import dis -import code -import glob -import token -import types -import codeop -import pprint -import signal -import inspect -import textwrap -import tokenize -import itertools -import traceback -import linecache -import _colorize - -from contextlib import contextmanager -from types import CodeType - - -class Restart(Exception): - """Causes a debugger to be restarted for the debugged python program.""" - pass - -__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace", - "post_mortem", "help"] - - -def find_first_executable_line(code): - """ Try to find the first executable line of the code object. - - Equivalently, find the line number of the instruction that's - after RESUME - - Return code.co_firstlineno if no executable line is found. - """ - prev = None - for instr in dis.get_instructions(code): - if prev is not None and prev.opname == 'RESUME': - if instr.positions.lineno is not None: - return instr.positions.lineno - return code.co_firstlineno - prev = instr - return code.co_firstlineno - -def find_function(funcname, filename): - cre = re.compile(r'def\s+%s(\s*\[.+\])?\s*[(]' % re.escape(funcname)) - try: - fp = tokenize.open(filename) - except OSError: - lines = linecache.getlines(filename) - if not lines: - return None - fp = io.StringIO(''.join(lines)) - funcdef = "" - funcstart = None - # consumer of this info expects the first line to be 1 - with fp: - for lineno, line in enumerate(fp, start=1): - if cre.match(line): - funcstart, funcdef = lineno, line - elif funcdef: - funcdef += line - - if funcdef: - try: - code = compile(funcdef, filename, 'exec') - except SyntaxError: - continue - # We should always be able to find the code object here - funccode = next(c for c in code.co_consts if - isinstance(c, CodeType) and c.co_name == funcname) - lineno_offset = find_first_executable_line(funccode) - return funcname, filename, funcstart + lineno_offset - 1 - return None - -def lasti2lineno(code, lasti): - linestarts = list(dis.findlinestarts(code)) - linestarts.reverse() - for i, lineno in linestarts: - if lasti >= i: - return lineno - return 0 - - -class _rstr(str): - """String that doesn't quote its repr.""" - def __repr__(self): - return self - - -class _ExecutableTarget: - filename: str - code: CodeType | str - namespace: dict - - -class _ScriptTarget(_ExecutableTarget): - def __init__(self, target): - self._check(target) - self._target = self._safe_realpath(target) - - # If PYTHONSAFEPATH (-P) is not set, sys.path[0] is the directory - # of pdb, and we should replace it with the directory of the script - if not sys.flags.safe_path: - sys.path[0] = os.path.dirname(self._target) - - @staticmethod - def _check(target): - """ - Check that target is plausibly a script. - """ - if not os.path.exists(target): - print(f'Error: {target} does not exist') - sys.exit(1) - if os.path.isdir(target): - print(f'Error: {target} is a directory') - sys.exit(1) - - @staticmethod - def _safe_realpath(path): - """ - Return the canonical path (realpath) if it is accessible from the userspace. - Otherwise (for example, if the path is a symlink to an anonymous pipe), - return the original path. - - See GH-142315. - """ - realpath = os.path.realpath(path) - return realpath if os.path.exists(realpath) else path - - def __repr__(self): - return self._target - - @property - def filename(self): - return self._target - - @property - def code(self): - # Open the file each time because the file may be modified - with io.open_code(self._target) as fp: - return f"exec(compile({fp.read()!r}, {self._target!r}, 'exec'))" - - @property - def namespace(self): - return dict( - __name__='__main__', - __file__=self._target, - __builtins__=__builtins__, - __spec__=None, - ) - - -class _ModuleTarget(_ExecutableTarget): - def __init__(self, target): - self._target = target - - import runpy - try: - _, self._spec, self._code = runpy._get_module_details(self._target) - except ImportError as e: - print(f"ImportError: {e}") - sys.exit(1) - except Exception: - traceback.print_exc() - sys.exit(1) - - def __repr__(self): - return self._target - - @property - def filename(self): - return self._code.co_filename - - @property - def code(self): - return self._code - - @property - def namespace(self): - return dict( - __name__='__main__', - __file__=os.path.normcase(os.path.abspath(self.filename)), - __package__=self._spec.parent, - __loader__=self._spec.loader, - __spec__=self._spec, - __builtins__=__builtins__, - ) - - -class _ZipTarget(_ExecutableTarget): - def __init__(self, target): - import runpy - - self._target = os.path.realpath(target) - sys.path.insert(0, self._target) - try: - _, self._spec, self._code = runpy._get_main_module_details() - except ImportError as e: - print(f"ImportError: {e}") - sys.exit(1) - except Exception: - traceback.print_exc() - sys.exit(1) - - def __repr__(self): - return self._target - - @property - def filename(self): - return self._code.co_filename - - @property - def code(self): - return self._code - - @property - def namespace(self): - return dict( - __name__='__main__', - __file__=os.path.normcase(os.path.abspath(self.filename)), - __package__=self._spec.parent, - __loader__=self._spec.loader, - __spec__=self._spec, - __builtins__=__builtins__, - ) - - -class _PdbInteractiveConsole(code.InteractiveConsole): - def __init__(self, ns, message): - self._message = message - super().__init__(locals=ns, local_exit=True) - - def write(self, data): - self._message(data, end='') - - -# Interaction prompt line will separate file and call info from code -# text using value of line_prefix string. A newline and arrow may -# be to your liking. You can set it once pdb is imported using the -# command "pdb.line_prefix = '\n% '". -# line_prefix = ': ' # Use this to get the old situation back -line_prefix = '\n-> ' # Probably a better default - - - -class Pdb(bdb.Bdb, cmd.Cmd): - _previous_sigint_handler = None - - # Limit the maximum depth of chained exceptions, we should be handling cycles, - # but in case there are recursions, we stop at 999. - MAX_CHAINED_EXCEPTION_DEPTH = 999 - - _file_mtime_table = {} - - def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None, - nosigint=False, readrc=True): - bdb.Bdb.__init__(self, skip=skip) - cmd.Cmd.__init__(self, completekey, stdin, stdout) - sys.audit("pdb.Pdb") - if stdout: - self.use_rawinput = 0 - self.prompt = '(Pdb) ' - self.aliases = {} - self.displaying = {} - self.mainpyfile = '' - self._wait_for_mainpyfile = False - self.tb_lineno = {} - # Try to load readline if it exists - try: - import readline - # remove some common file name delimiters - readline.set_completer_delims(' \t\n`@#%^&*()=+[{]}\\|;:\'",<>?') - except ImportError: - pass - - self.allow_kbdint = False - self.nosigint = nosigint - # Consider these characters as part of the command so when the users type - # c.a or c['a'], it won't be recognized as a c(ontinue) command - self.identchars = cmd.Cmd.identchars + '=.[](),"\'+-*/%@&|<>~^' - - # Read ~/.pdbrc and ./.pdbrc - self.rcLines = [] - if readrc: - try: - with open(os.path.expanduser('~/.pdbrc'), encoding='utf-8') as rcFile: - self.rcLines.extend(rcFile) - except OSError: - pass - try: - with open(".pdbrc", encoding='utf-8') as rcFile: - self.rcLines.extend(rcFile) - except OSError: - pass - - self.commands = {} # associates a command list to breakpoint numbers - self.commands_doprompt = {} # for each bp num, tells if the prompt - # must be disp. after execing the cmd list - self.commands_silent = {} # for each bp num, tells if the stack trace - # must be disp. after execing the cmd list - self.commands_defining = False # True while in the process of defining - # a command list - self.commands_bnum = None # The breakpoint number for which we are - # defining a list - - self._chained_exceptions = tuple() - self._chained_exception_index = 0 - - def sigint_handler(self, signum, frame): - if self.allow_kbdint: - raise KeyboardInterrupt - self.message("\nProgram interrupted. (Use 'cont' to resume).") - self.set_step() - self.set_trace(frame) - - def reset(self): - bdb.Bdb.reset(self) - self.forget() - - def forget(self): - self.lineno = None - self.stack = [] - self.curindex = 0 - if hasattr(self, 'curframe') and self.curframe: - self.curframe.f_globals.pop('__pdb_convenience_variables', None) - self.curframe = None - self.curframe_locals = {} - self.tb_lineno.clear() - - def setup(self, f, tb): - self.forget() - self.stack, self.curindex = self.get_stack(f, tb) - while tb: - # when setting up post-mortem debugging with a traceback, save all - # the original line numbers to be displayed along the current line - # numbers (which can be different, e.g. due to finally clauses) - lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti) - self.tb_lineno[tb.tb_frame] = lineno - tb = tb.tb_next - self.curframe = self.stack[self.curindex][0] - # The f_locals dictionary used to be updated from the actual frame - # locals whenever the .f_locals accessor was called, so it was - # cached here to ensure that modifications were not overwritten. While - # the caching is no longer required now that f_locals is a direct proxy - # on optimized frames, it's also harmless, so the code structure has - # been left unchanged. - self.curframe_locals = self.curframe.f_locals - self.set_convenience_variable(self.curframe, '_frame', self.curframe) - - if self._chained_exceptions: - self.set_convenience_variable( - self.curframe, - '_exception', - self._chained_exceptions[self._chained_exception_index], - ) - - if self.rcLines: - self.cmdqueue = [ - line for line in self.rcLines - if line.strip() and not line.strip().startswith("#") - ] - self.rcLines = [] - - # Override Bdb methods - - def user_call(self, frame, argument_list): - """This method is called when there is the remote possibility - that we ever need to stop in this function.""" - if self._wait_for_mainpyfile: - return - if self.stop_here(frame): - self.message('--Call--') - self.interaction(frame, None) - - def user_line(self, frame): - """This function is called when we stop or break at this line.""" - if self._wait_for_mainpyfile: - if (self.mainpyfile != self.canonic(frame.f_code.co_filename)): - return - self._wait_for_mainpyfile = False - if self.trace_opcodes: - # GH-127321 - # We want to avoid stopping at an opcode that does not have - # an associated line number because pdb does not like it - if frame.f_lineno is None: - self.set_stepinstr() - return - if self.bp_commands(frame): - self.interaction(frame, None) - - user_opcode = user_line - - def bp_commands(self, frame): - """Call every command that was set for the current active breakpoint - (if there is one). - - Returns True if the normal interaction function must be called, - False otherwise.""" - # self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit - if getattr(self, "currentbp", False) and \ - self.currentbp in self.commands: - currentbp = self.currentbp - self.currentbp = 0 - lastcmd_back = self.lastcmd - self.setup(frame, None) - for line in self.commands[currentbp]: - self.onecmd(line) - self.lastcmd = lastcmd_back - if not self.commands_silent[currentbp]: - self.print_stack_entry(self.stack[self.curindex]) - if self.commands_doprompt[currentbp]: - self._cmdloop() - self.forget() - return - return 1 - - def user_return(self, frame, return_value): - """This function is called when a return trap is set here.""" - if self._wait_for_mainpyfile: - return - frame.f_locals['__return__'] = return_value - self.set_convenience_variable(frame, '_retval', return_value) - self.message('--Return--') - self.interaction(frame, None) - - def user_exception(self, frame, exc_info): - """This function is called if an exception occurs, - but only if we are to stop at or just below this level.""" - if self._wait_for_mainpyfile: - return - exc_type, exc_value, exc_traceback = exc_info - frame.f_locals['__exception__'] = exc_type, exc_value - self.set_convenience_variable(frame, '_exception', exc_value) - - # An 'Internal StopIteration' exception is an exception debug event - # issued by the interpreter when handling a subgenerator run with - # 'yield from' or a generator controlled by a for loop. No exception has - # actually occurred in this case. The debugger uses this debug event to - # stop when the debuggee is returning from such generators. - prefix = 'Internal ' if (not exc_traceback - and exc_type is StopIteration) else '' - self.message('%s%s' % (prefix, self._format_exc(exc_value))) - self.interaction(frame, exc_traceback) - - # General interaction function - def _cmdloop(self): - while True: - try: - # keyboard interrupts allow for an easy way to cancel - # the current command, so allow them during interactive input - self.allow_kbdint = True - self.cmdloop() - self.allow_kbdint = False - break - except KeyboardInterrupt: - self.message('--KeyboardInterrupt--') - - def _validate_file_mtime(self): - """Check if the source file of the current frame has been modified since - the last time we saw it. If so, give a warning.""" - try: - filename = self.curframe.f_code.co_filename - mtime = os.path.getmtime(filename) - except Exception: - return - if (filename in self._file_mtime_table and - mtime != self._file_mtime_table[filename]): - self.message(f"*** WARNING: file '{filename}' was edited, " - "running stale code until the program is rerun") - self._file_mtime_table[filename] = mtime - - # Called before loop, handles display expressions - # Set up convenience variable containers - def _show_display(self): - displaying = self.displaying.get(self.curframe) - if displaying: - for expr, oldvalue in displaying.items(): - newvalue = self._getval_except(expr) - # check for identity first; this prevents custom __eq__ to - # be called at every loop, and also prevents instances whose - # fields are changed to be displayed - if newvalue is not oldvalue and newvalue != oldvalue: - displaying[expr] = newvalue - self.message('display %s: %s [old: %s]' % - (expr, self._safe_repr(newvalue, expr), - self._safe_repr(oldvalue, expr))) - - def _get_tb_and_exceptions(self, tb_or_exc): - """ - Given a tracecack or an exception, return a tuple of chained exceptions - and current traceback to inspect. - - This will deal with selecting the right ``__cause__`` or ``__context__`` - as well as handling cycles, and return a flattened list of exceptions we - can jump to with do_exceptions. - - """ - _exceptions = [] - if isinstance(tb_or_exc, BaseException): - traceback, current = tb_or_exc.__traceback__, tb_or_exc - - while current is not None: - if current in _exceptions: - break - _exceptions.append(current) - if current.__cause__ is not None: - current = current.__cause__ - elif ( - current.__context__ is not None and not current.__suppress_context__ - ): - current = current.__context__ - - if len(_exceptions) >= self.MAX_CHAINED_EXCEPTION_DEPTH: - self.message( - f"More than {self.MAX_CHAINED_EXCEPTION_DEPTH}" - " chained exceptions found, not all exceptions" - "will be browsable with `exceptions`." - ) - break - else: - traceback = tb_or_exc - return tuple(reversed(_exceptions)), traceback - - @contextmanager - def _hold_exceptions(self, exceptions): - """ - Context manager to ensure proper cleaning of exceptions references - - When given a chained exception instead of a traceback, - pdb may hold references to many objects which may leak memory. - - We use this context manager to make sure everything is properly cleaned - - """ - try: - self._chained_exceptions = exceptions - self._chained_exception_index = len(exceptions) - 1 - yield - finally: - # we can't put those in forget as otherwise they would - # be cleared on exception change - self._chained_exceptions = tuple() - self._chained_exception_index = 0 - - def interaction(self, frame, tb_or_exc): - # Restore the previous signal handler at the Pdb prompt. - if Pdb._previous_sigint_handler: - try: - signal.signal(signal.SIGINT, Pdb._previous_sigint_handler) - except ValueError: # ValueError: signal only works in main thread - pass - else: - Pdb._previous_sigint_handler = None - - _chained_exceptions, tb = self._get_tb_and_exceptions(tb_or_exc) - if isinstance(tb_or_exc, BaseException): - assert tb is not None, "main exception must have a traceback" - with self._hold_exceptions(_chained_exceptions): - self.setup(frame, tb) - # We should print the stack entry if and only if the user input - # is expected, and we should print it right before the user input. - # We achieve this by appending _pdbcmd_print_frame_status to the - # command queue. If cmdqueue is not exausted, the user input is - # not expected and we will not print the stack entry. - self.cmdqueue.append('_pdbcmd_print_frame_status') - self._cmdloop() - # If _pdbcmd_print_frame_status is not used, pop it out - if self.cmdqueue and self.cmdqueue[-1] == '_pdbcmd_print_frame_status': - self.cmdqueue.pop() - self.forget() - - def displayhook(self, obj): - """Custom displayhook for the exec in default(), which prevents - assignment of the _ variable in the builtins. - """ - # reproduce the behavior of the standard displayhook, not printing None - if obj is not None: - self.message(repr(obj)) - - @contextmanager - def _disable_command_completion(self): - completenames = self.completenames - try: - self.completenames = self.completedefault - yield - finally: - self.completenames = completenames - return - - def _exec_in_closure(self, source, globals, locals): - """ Run source code in closure so code object created within source - can find variables in locals correctly - - returns True if the source is executed, False otherwise - """ - - # Determine if the source should be executed in closure. Only when the - # source compiled to multiple code objects, we should use this feature. - # Otherwise, we can just raise an exception and normal exec will be used. - - code = compile(source, "", "exec") - if not any(isinstance(const, CodeType) for const in code.co_consts): - return False - - # locals could be a proxy which does not support pop - # copy it first to avoid modifying the original locals - locals_copy = dict(locals) - - locals_copy["__pdb_eval__"] = { - "result": None, - "write_back": {} - } - - # If the source is an expression, we need to print its value - try: - compile(source, "", "eval") - except SyntaxError: - pass - else: - source = "__pdb_eval__['result'] = " + source - - # Add write-back to update the locals - source = ("try:\n" + - textwrap.indent(source, " ") + "\n" + - "finally:\n" + - " __pdb_eval__['write_back'] = locals()") - - # Build a closure source code with freevars from locals like: - # def __pdb_outer(): - # var = None - # def __pdb_scope(): # This is the code object we want to execute - # nonlocal var - # - # return __pdb_scope.__code__ - source_with_closure = ("def __pdb_outer():\n" + - "\n".join(f" {var} = None" for var in locals_copy) + "\n" + - " def __pdb_scope():\n" + - "\n".join(f" nonlocal {var}" for var in locals_copy) + "\n" + - textwrap.indent(source, " ") + "\n" + - " return __pdb_scope.__code__" - ) - - # Get the code object of __pdb_scope() - # The exec fills locals_copy with the __pdb_outer() function and we can call - # that to get the code object of __pdb_scope() - ns = {} - try: - exec(source_with_closure, {}, ns) - except Exception: - return False - code = ns["__pdb_outer"]() - - cells = tuple(types.CellType(locals_copy.get(var)) for var in code.co_freevars) - - try: - exec(code, globals, locals_copy, closure=cells) - except Exception: - return False - - # get the data we need from the statement - pdb_eval = locals_copy["__pdb_eval__"] - - # __pdb_eval__ should not be updated back to locals - pdb_eval["write_back"].pop("__pdb_eval__") - - # Write all local variables back to locals - locals.update(pdb_eval["write_back"]) - eval_result = pdb_eval["result"] - if eval_result is not None: - print(repr(eval_result)) - - return True - - def default(self, line): - if line[:1] == '!': line = line[1:].strip() - locals = self.curframe_locals - globals = self.curframe.f_globals - try: - buffer = line - if (code := codeop.compile_command(line + '\n', '', 'single')) is None: - # Multi-line mode - with self._disable_command_completion(): - buffer = line - continue_prompt = "... " - while (code := codeop.compile_command(buffer, '', 'single')) is None: - if self.use_rawinput: - try: - line = input(continue_prompt) - except (EOFError, KeyboardInterrupt): - self.lastcmd = "" - print('\n') - return - else: - self.stdout.write(continue_prompt) - self.stdout.flush() - line = self.stdin.readline() - if not len(line): - self.lastcmd = "" - self.stdout.write('\n') - self.stdout.flush() - return - else: - line = line.rstrip('\r\n') - buffer += '\n' + line - self.lastcmd = buffer - save_stdout = sys.stdout - save_stdin = sys.stdin - save_displayhook = sys.displayhook - try: - sys.stdin = self.stdin - sys.stdout = self.stdout - sys.displayhook = self.displayhook - if not self._exec_in_closure(buffer, globals, locals): - exec(code, globals, locals) - finally: - sys.stdout = save_stdout - sys.stdin = save_stdin - sys.displayhook = save_displayhook - except: - self._error_exc() - - def _replace_convenience_variables(self, line): - """Replace the convenience variables in 'line' with their values. - e.g. $foo is replaced by __pdb_convenience_variables["foo"]. - Note: such pattern in string literals will be skipped""" - - if "$" not in line: - return line - - dollar_start = dollar_end = -1 - replace_variables = [] - try: - for t in tokenize.generate_tokens(io.StringIO(line).readline): - token_type, token_string, start, end, _ = t - if token_type == token.OP and token_string == '$': - dollar_start, dollar_end = start, end - elif start == dollar_end and token_type == token.NAME: - # line is a one-line command so we only care about column - replace_variables.append((dollar_start[1], end[1], token_string)) - except tokenize.TokenError: - return line - - if not replace_variables: - return line - - last_end = 0 - line_pieces = [] - for start, end, name in replace_variables: - line_pieces.append(line[last_end:start] + f'__pdb_convenience_variables["{name}"]') - last_end = end - line_pieces.append(line[last_end:]) - - return ''.join(line_pieces) - - def precmd(self, line): - """Handle alias expansion and ';;' separator.""" - if not line.strip(): - return line - args = line.split() - while args[0] in self.aliases: - line = self.aliases[args[0]] - for idx in range(1, 10): - if f'%{idx}' in line: - if idx >= len(args): - self.error(f"Not enough arguments for alias '{args[0]}'") - # This is a no-op - return "!" - line = line.replace(f'%{idx}', args[idx]) - elif '%*' not in line: - if idx < len(args): - self.error(f"Too many arguments for alias '{args[0]}'") - # This is a no-op - return "!" - break - - line = line.replace("%*", ' '.join(args[1:])) - args = line.split() - # split into ';;' separated commands - # unless it's an alias command - if args[0] != 'alias': - marker = line.find(';;') - if marker >= 0: - # queue up everything after marker - next = line[marker+2:].lstrip() - self.cmdqueue.insert(0, next) - line = line[:marker].rstrip() - - # Replace all the convenience variables - line = self._replace_convenience_variables(line) - - return line - - def onecmd(self, line): - """Interpret the argument as though it had been typed in response - to the prompt. - - Checks whether this line is typed at the normal prompt or in - a breakpoint command list definition. - """ - if not self.commands_defining: - self._validate_file_mtime() - if line.startswith('_pdbcmd'): - command, arg, line = self.parseline(line) - if hasattr(self, command): - return getattr(self, command)(arg) - return cmd.Cmd.onecmd(self, line) - else: - return self.handle_command_def(line) - - def handle_command_def(self, line): - """Handles one command line during command list definition.""" - cmd, arg, line = self.parseline(line) - if not cmd: - return False - if cmd == 'silent': - self.commands_silent[self.commands_bnum] = True - return False # continue to handle other cmd def in the cmd list - elif cmd == 'end': - return True # end of cmd list - cmdlist = self.commands[self.commands_bnum] - if arg: - cmdlist.append(cmd+' '+arg) - else: - cmdlist.append(cmd) - # Determine if we must stop - try: - func = getattr(self, 'do_' + cmd) - except AttributeError: - func = self.default - # one of the resuming commands - if func.__name__ in self.commands_resuming: - self.commands_doprompt[self.commands_bnum] = False - return True - return False - - # interface abstraction functions - - def message(self, msg, end='\n'): - print(msg, end=end, file=self.stdout) - - def error(self, msg): - print('***', msg, file=self.stdout) - - # convenience variables - - def set_convenience_variable(self, frame, name, value): - if '__pdb_convenience_variables' not in frame.f_globals: - frame.f_globals['__pdb_convenience_variables'] = {} - frame.f_globals['__pdb_convenience_variables'][name] = value - - # Generic completion functions. Individual complete_foo methods can be - # assigned below to one of these functions. - - @property - def rlcompleter(self): - """Return the `Completer` class from `rlcompleter`, while avoiding the - side effects of changing the completer from `import rlcompleter`. - - This is a compromise between GH-138860 and GH-139289. If GH-139289 is - fixed, then we don't need this and we can just `import rlcompleter` in - `Pdb.__init__`. - """ - if not hasattr(self, "_rlcompleter"): - try: - import readline - except ImportError: - # readline is not available, just get the Completer - from rlcompleter import Completer - self._rlcompleter = Completer - else: - # importing rlcompleter could have side effect of changing - # the current completer, we need to restore it - prev_completer = readline.get_completer() - from rlcompleter import Completer - self._rlcompleter = Completer - readline.set_completer(prev_completer) - return self._rlcompleter - - def completenames(self, text, line, begidx, endidx): - # Overwrite completenames() of cmd so for the command completion, - # if no current command matches, check for expressions as well - commands = super().completenames(text, line, begidx, endidx) - for alias in self.aliases: - if alias.startswith(text): - commands.append(alias) - if commands: - return commands - else: - expressions = self._complete_expression(text, line, begidx, endidx) - if expressions: - return expressions - return self.completedefault(text, line, begidx, endidx) - - def _complete_location(self, text, line, begidx, endidx): - # Complete a file/module/function location for break/tbreak/clear. - if line.strip().endswith((':', ',')): - # Here comes a line number or a condition which we can't complete. - return [] - # First, try to find matching functions (i.e. expressions). - try: - ret = self._complete_expression(text, line, begidx, endidx) - except Exception: - ret = [] - # Then, try to complete file names as well. - globs = glob.glob(glob.escape(text) + '*') - for fn in globs: - if os.path.isdir(fn): - ret.append(fn + '/') - elif os.path.isfile(fn) and fn.lower().endswith(('.py', '.pyw')): - ret.append(fn + ':') - return ret - - def _complete_bpnumber(self, text, line, begidx, endidx): - # Complete a breakpoint number. (This would be more helpful if we could - # display additional info along with the completions, such as file/line - # of the breakpoint.) - return [str(i) for i, bp in enumerate(bdb.Breakpoint.bpbynumber) - if bp is not None and str(i).startswith(text)] - - def _complete_expression(self, text, line, begidx, endidx): - # Complete an arbitrary expression. - if not self.curframe: - return [] - # Collect globals and locals. It is usually not really sensible to also - # complete builtins, and they clutter the namespace quite heavily, so we - # leave them out. - ns = {**self.curframe.f_globals, **self.curframe_locals} - if text.startswith("$"): - # Complete convenience variables - conv_vars = self.curframe.f_globals.get('__pdb_convenience_variables', {}) - return [f"${name}" for name in conv_vars if name.startswith(text[1:])] - if '.' in text: - # Walk an attribute chain up to the last part, similar to what - # rlcompleter does. This will bail if any of the parts are not - # simple attribute access, which is what we want. - dotted = text.split('.') - try: - obj = ns[dotted[0]] - for part in dotted[1:-1]: - obj = getattr(obj, part) - except (KeyError, AttributeError): - return [] - prefix = '.'.join(dotted[:-1]) + '.' - return [prefix + n for n in dir(obj) if n.startswith(dotted[-1])] - else: - # Complete a simple name. - return [n for n in ns.keys() if n.startswith(text)] - - def completedefault(self, text, line, begidx, endidx): - if text.startswith("$"): - # Complete convenience variables - conv_vars = self.curframe.f_globals.get('__pdb_convenience_variables', {}) - return [f"${name}" for name in conv_vars if name.startswith(text[1:])] - - state = 0 - matches = [] - completer = self.rlcompleter(self.curframe.f_globals | self.curframe_locals) - while (match := completer.complete(text, state)) is not None: - matches.append(match) - state += 1 - return matches - - # Pdb meta commands, only intended to be used internally by pdb - - def _pdbcmd_print_frame_status(self, arg): - self.print_stack_entry(self.stack[self.curindex]) - self._show_display() - - # Command definitions, called by cmdloop() - # The argument is the remaining string on the command line - # Return true to exit from the command loop - - def do_commands(self, arg): - """(Pdb) commands [bpnumber] - (com) ... - (com) end - (Pdb) - - Specify a list of commands for breakpoint number bpnumber. - The commands themselves are entered on the following lines. - Type a line containing just 'end' to terminate the commands. - The commands are executed when the breakpoint is hit. - - To remove all commands from a breakpoint, type commands and - follow it immediately with end; that is, give no commands. - - With no bpnumber argument, commands refers to the last - breakpoint set. - - You can use breakpoint commands to start your program up - again. Simply use the continue command, or step, or any other - command that resumes execution. - - Specifying any command resuming execution (currently continue, - step, next, return, jump, quit and their abbreviations) - terminates the command list (as if that command was - immediately followed by end). This is because any time you - resume execution (even with a simple next or step), you may - encounter another breakpoint -- which could have its own - command list, leading to ambiguities about which list to - execute. - - If you use the 'silent' command in the command list, the usual - message about stopping at a breakpoint is not printed. This - may be desirable for breakpoints that are to print a specific - message and then continue. If none of the other commands - print anything, you will see no sign that the breakpoint was - reached. - """ - if not arg: - bnum = len(bdb.Breakpoint.bpbynumber) - 1 - else: - try: - bnum = int(arg) - except: - self._print_invalid_arg(arg) - return - try: - self.get_bpbynumber(bnum) - except ValueError as err: - self.error('cannot set commands: %s' % err) - return - - self.commands_bnum = bnum - # Save old definitions for the case of a keyboard interrupt. - if bnum in self.commands: - old_command_defs = (self.commands[bnum], - self.commands_doprompt[bnum], - self.commands_silent[bnum]) - else: - old_command_defs = None - self.commands[bnum] = [] - self.commands_doprompt[bnum] = True - self.commands_silent[bnum] = False - - prompt_back = self.prompt - self.prompt = '(com) ' - self.commands_defining = True - try: - self.cmdloop() - except KeyboardInterrupt: - # Restore old definitions. - if old_command_defs: - self.commands[bnum] = old_command_defs[0] - self.commands_doprompt[bnum] = old_command_defs[1] - self.commands_silent[bnum] = old_command_defs[2] - else: - del self.commands[bnum] - del self.commands_doprompt[bnum] - del self.commands_silent[bnum] - self.error('command definition aborted, old commands restored') - finally: - self.commands_defining = False - self.prompt = prompt_back - - complete_commands = _complete_bpnumber - - def do_break(self, arg, temporary = 0): - """b(reak) [ ([filename:]lineno | function) [, condition] ] - - Without argument, list all breaks. - - With a line number argument, set a break at this line in the - current file. With a function name, set a break at the first - executable line of that function. If a second argument is - present, it is a string specifying an expression which must - evaluate to true before the breakpoint is honored. - - The line number may be prefixed with a filename and a colon, - to specify a breakpoint in another file (probably one that - hasn't been loaded yet). The file is searched for on - sys.path; the .py suffix may be omitted. - """ - if not arg: - if self.breaks: # There's at least one - self.message("Num Type Disp Enb Where") - for bp in bdb.Breakpoint.bpbynumber: - if bp: - self.message(bp.bpformat()) - return - # parse arguments; comma has lowest precedence - # and cannot occur in filename - filename = None - lineno = None - cond = None - comma = arg.find(',') - if comma > 0: - # parse stuff after comma: "condition" - cond = arg[comma+1:].lstrip() - if err := self._compile_error_message(cond): - self.error('Invalid condition %s: %r' % (cond, err)) - return - arg = arg[:comma].rstrip() - # parse stuff before comma: [filename:]lineno | function - colon = arg.rfind(':') - funcname = None - if colon >= 0: - filename = arg[:colon].rstrip() - f = self.lookupmodule(filename) - if not f: - self.error('%r not found from sys.path' % filename) - return - else: - filename = f - arg = arg[colon+1:].lstrip() - try: - lineno = int(arg) - except ValueError: - self.error('Bad lineno: %s' % arg) - return - else: - # no colon; can be lineno or function - try: - lineno = int(arg) - except ValueError: - try: - func = eval(arg, - self.curframe.f_globals, - self.curframe_locals) - except: - func = arg - try: - if hasattr(func, '__func__'): - func = func.__func__ - code = func.__code__ - #use co_name to identify the bkpt (function names - #could be aliased, but co_name is invariant) - funcname = code.co_name - lineno = find_first_executable_line(code) - filename = code.co_filename - except: - # last thing to try - (ok, filename, ln) = self.lineinfo(arg) - if not ok: - self.error('The specified object %r is not a function ' - 'or was not found along sys.path.' % arg) - return - funcname = ok # ok contains a function name - lineno = int(ln) - if not filename: - filename = self.defaultFile() - # Check for reasonable breakpoint - line = self.checkline(filename, lineno) - if line: - # now set the break point - err = self.set_break(filename, line, temporary, cond, funcname) - if err: - self.error(err) - else: - bp = self.get_breaks(filename, line)[-1] - self.message("Breakpoint %d at %s:%d" % - (bp.number, bp.file, bp.line)) - - # To be overridden in derived debuggers - def defaultFile(self): - """Produce a reasonable default.""" - filename = self.curframe.f_code.co_filename - if filename == '' and self.mainpyfile: - filename = self.mainpyfile - return filename - - do_b = do_break - - complete_break = _complete_location - complete_b = _complete_location - - def do_tbreak(self, arg): - """tbreak [ ([filename:]lineno | function) [, condition] ] - - Same arguments as break, but sets a temporary breakpoint: it - is automatically deleted when first hit. - """ - self.do_break(arg, 1) - - complete_tbreak = _complete_location - - def lineinfo(self, identifier): - failed = (None, None, None) - # Input is identifier, may be in single quotes - idstring = identifier.split("'") - if len(idstring) == 1: - # not in single quotes - id = idstring[0].strip() - elif len(idstring) == 3: - # quoted - id = idstring[1].strip() - else: - return failed - if id == '': return failed - parts = id.split('.') - # Protection for derived debuggers - if parts[0] == 'self': - del parts[0] - if len(parts) == 0: - return failed - # Best first guess at file to look at - fname = self.defaultFile() - if len(parts) == 1: - item = parts[0] - else: - # More than one part. - # First is module, second is method/class - f = self.lookupmodule(parts[0]) - if f: - fname = f - item = parts[1] - else: - return failed - answer = find_function(item, self.canonic(fname)) - return answer or failed - - def checkline(self, filename, lineno): - """Check whether specified line seems to be executable. - - Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank - line or EOF). Warning: testing is not comprehensive. - """ - # this method should be callable before starting debugging, so default - # to "no globals" if there is no current frame - frame = getattr(self, 'curframe', None) - globs = frame.f_globals if frame else None - line = linecache.getline(filename, lineno, globs) - if not line: - self.message('End of file') - return 0 - line = line.strip() - # Don't allow setting breakpoint at a blank line - if (not line or (line[0] == '#') or - (line[:3] == '"""') or line[:3] == "'''"): - self.error('Blank or comment') - return 0 - return lineno - - def do_enable(self, arg): - """enable bpnumber [bpnumber ...] - - Enables the breakpoints given as a space separated list of - breakpoint numbers. - """ - args = arg.split() - for i in args: - try: - bp = self.get_bpbynumber(i) - except ValueError as err: - self.error(err) - else: - bp.enable() - self.message('Enabled %s' % bp) - - complete_enable = _complete_bpnumber - - def do_disable(self, arg): - """disable bpnumber [bpnumber ...] - - Disables the breakpoints given as a space separated list of - breakpoint numbers. Disabling a breakpoint means it cannot - cause the program to stop execution, but unlike clearing a - breakpoint, it remains in the list of breakpoints and can be - (re-)enabled. - """ - args = arg.split() - for i in args: - try: - bp = self.get_bpbynumber(i) - except ValueError as err: - self.error(err) - else: - bp.disable() - self.message('Disabled %s' % bp) - - complete_disable = _complete_bpnumber - - def do_condition(self, arg): - """condition bpnumber [condition] - - Set a new condition for the breakpoint, an expression which - must evaluate to true before the breakpoint is honored. If - condition is absent, any existing condition is removed; i.e., - the breakpoint is made unconditional. - """ - args = arg.split(' ', 1) - try: - cond = args[1] - if err := self._compile_error_message(cond): - self.error('Invalid condition %s: %r' % (cond, err)) - return - except IndexError: - cond = None - try: - bp = self.get_bpbynumber(args[0].strip()) - except IndexError: - self.error('Breakpoint number expected') - except ValueError as err: - self.error(err) - else: - bp.cond = cond - if not cond: - self.message('Breakpoint %d is now unconditional.' % bp.number) - else: - self.message('New condition set for breakpoint %d.' % bp.number) - - complete_condition = _complete_bpnumber - - def do_ignore(self, arg): - """ignore bpnumber [count] - - Set the ignore count for the given breakpoint number. If - count is omitted, the ignore count is set to 0. A breakpoint - becomes active when the ignore count is zero. When non-zero, - the count is decremented each time the breakpoint is reached - and the breakpoint is not disabled and any associated - condition evaluates to true. - """ - args = arg.split() - if not args: - self.error('Breakpoint number expected') - return - if len(args) == 1: - count = 0 - elif len(args) == 2: - try: - count = int(args[1]) - except ValueError: - self._print_invalid_arg(arg) - return - else: - self._print_invalid_arg(arg) - return - try: - bp = self.get_bpbynumber(args[0].strip()) - except ValueError as err: - self.error(err) - else: - bp.ignore = count - if count > 0: - if count > 1: - countstr = '%d crossings' % count - else: - countstr = '1 crossing' - self.message('Will ignore next %s of breakpoint %d.' % - (countstr, bp.number)) - else: - self.message('Will stop next time breakpoint %d is reached.' - % bp.number) - - complete_ignore = _complete_bpnumber - - def do_clear(self, arg): - """cl(ear) [filename:lineno | bpnumber ...] - - With a space separated list of breakpoint numbers, clear - those breakpoints. Without argument, clear all breaks (but - first ask confirmation). With a filename:lineno argument, - clear all breaks at that line in that file. - """ - if not arg: - try: - reply = input('Clear all breaks? ') - except EOFError: - reply = 'no' - reply = reply.strip().lower() - if reply in ('y', 'yes'): - bplist = [bp for bp in bdb.Breakpoint.bpbynumber if bp] - self.clear_all_breaks() - for bp in bplist: - self.message('Deleted %s' % bp) - return - if ':' in arg: - # Make sure it works for "clear C:\foo\bar.py:12" - i = arg.rfind(':') - filename = arg[:i] - arg = arg[i+1:] - try: - lineno = int(arg) - except ValueError: - err = "Invalid line number (%s)" % arg - else: - bplist = self.get_breaks(filename, lineno)[:] - err = self.clear_break(filename, lineno) - if err: - self.error(err) - else: - for bp in bplist: - self.message('Deleted %s' % bp) - return - numberlist = arg.split() - for i in numberlist: - try: - bp = self.get_bpbynumber(i) - except ValueError as err: - self.error(err) - else: - self.clear_bpbynumber(i) - self.message('Deleted %s' % bp) - do_cl = do_clear # 'c' is already an abbreviation for 'continue' - - complete_clear = _complete_location - complete_cl = _complete_location - - def do_where(self, arg): - """w(here) - - Print a stack trace, with the most recent frame at the bottom. - An arrow indicates the "current frame", which determines the - context of most commands. 'bt' is an alias for this command. - """ - if arg: - self._print_invalid_arg(arg) - return - self.print_stack_trace() - do_w = do_where - do_bt = do_where - - def _select_frame(self, number): - assert 0 <= number < len(self.stack) - self.curindex = number - self.curframe = self.stack[self.curindex][0] - self.curframe_locals = self.curframe.f_locals - self.set_convenience_variable(self.curframe, '_frame', self.curframe) - self.print_stack_entry(self.stack[self.curindex]) - self.lineno = None - - def do_exceptions(self, arg): - """exceptions [number] - - List or change current exception in an exception chain. - - Without arguments, list all the current exception in the exception - chain. Exceptions will be numbered, with the current exception indicated - with an arrow. - - If given an integer as argument, switch to the exception at that index. - """ - if not self._chained_exceptions: - self.message( - "Did not find chained exceptions. To move between" - " exceptions, pdb/post_mortem must be given an exception" - " object rather than a traceback." - ) - return - if not arg: - for ix, exc in enumerate(self._chained_exceptions): - prompt = ">" if ix == self._chained_exception_index else " " - rep = repr(exc) - if len(rep) > 80: - rep = rep[:77] + "..." - indicator = ( - " -" - if self._chained_exceptions[ix].__traceback__ is None - else f"{ix:>3}" - ) - self.message(f"{prompt} {indicator} {rep}") - else: - try: - number = int(arg) - except ValueError: - self.error("Argument must be an integer") - return - if 0 <= number < len(self._chained_exceptions): - if self._chained_exceptions[number].__traceback__ is None: - self.error("This exception does not have a traceback, cannot jump to it") - return - - self._chained_exception_index = number - self.setup(None, self._chained_exceptions[number].__traceback__) - self.print_stack_entry(self.stack[self.curindex]) - else: - self.error("No exception with that number") - - def do_up(self, arg): - """u(p) [count] - - Move the current frame count (default one) levels up in the - stack trace (to an older frame). - """ - if self.curindex == 0: - self.error('Oldest frame') - return - try: - count = int(arg or 1) - except ValueError: - self.error('Invalid frame count (%s)' % arg) - return - if count < 0: - newframe = 0 - else: - newframe = max(0, self.curindex - count) - self._select_frame(newframe) - do_u = do_up - - def do_down(self, arg): - """d(own) [count] - - Move the current frame count (default one) levels down in the - stack trace (to a newer frame). - """ - if self.curindex + 1 == len(self.stack): - self.error('Newest frame') - return - try: - count = int(arg or 1) - except ValueError: - self.error('Invalid frame count (%s)' % arg) - return - if count < 0: - newframe = len(self.stack) - 1 - else: - newframe = min(len(self.stack) - 1, self.curindex + count) - self._select_frame(newframe) - do_d = do_down - - def do_until(self, arg): - """unt(il) [lineno] - - Without argument, continue execution until the line with a - number greater than the current one is reached. With a line - number, continue execution until a line with a number greater - or equal to that is reached. In both cases, also stop when - the current frame returns. - """ - if arg: - try: - lineno = int(arg) - except ValueError: - self.error('Error in argument: %r' % arg) - return - if lineno <= self.curframe.f_lineno: - self.error('"until" line number is smaller than current ' - 'line number') - return - else: - lineno = None - self.set_until(self.curframe, lineno) - return 1 - do_unt = do_until - - def do_step(self, arg): - """s(tep) - - Execute the current line, stop at the first possible occasion - (either in a function that is called or in the current - function). - """ - if arg: - self._print_invalid_arg(arg) - return - self.set_step() - return 1 - do_s = do_step - - def do_next(self, arg): - """n(ext) - - Continue execution until the next line in the current function - is reached or it returns. - """ - if arg: - self._print_invalid_arg(arg) - return - self.set_next(self.curframe) - return 1 - do_n = do_next - - def do_run(self, arg): - """run [args...] - - Restart the debugged python program. If a string is supplied - it is split with "shlex", and the result is used as the new - sys.argv. History, breakpoints, actions and debugger options - are preserved. "restart" is an alias for "run". - """ - if arg: - import shlex - argv0 = sys.argv[0:1] - try: - sys.argv = shlex.split(arg) - except ValueError as e: - self.error('Cannot run %s: %s' % (arg, e)) - return - sys.argv[:0] = argv0 - # this is caught in the main debugger loop - raise Restart - - do_restart = do_run - - def do_return(self, arg): - """r(eturn) - - Continue execution until the current function returns. - """ - if arg: - self._print_invalid_arg(arg) - return - self.set_return(self.curframe) - return 1 - do_r = do_return - - def do_continue(self, arg): - """c(ont(inue)) - - Continue execution, only stop when a breakpoint is encountered. - """ - if arg: - self._print_invalid_arg(arg) - return - if not self.nosigint: - try: - Pdb._previous_sigint_handler = \ - signal.signal(signal.SIGINT, self.sigint_handler) - except ValueError: - # ValueError happens when do_continue() is invoked from - # a non-main thread in which case we just continue without - # SIGINT set. Would printing a message here (once) make - # sense? - pass - self.set_continue() - return 1 - do_c = do_cont = do_continue - - def do_jump(self, arg): - """j(ump) lineno - - Set the next line that will be executed. Only available in - the bottom-most frame. This lets you jump back and execute - code again, or jump forward to skip code that you don't want - to run. - - It should be noted that not all jumps are allowed -- for - instance it is not possible to jump into the middle of a - for loop or out of a finally clause. - """ - if self.curindex + 1 != len(self.stack): - self.error('You can only jump within the bottom frame') - return - try: - arg = int(arg) - except ValueError: - self.error("The 'jump' command requires a line number") - else: - try: - # Do the jump, fix up our copy of the stack, and display the - # new position - self.curframe.f_lineno = arg - self.stack[self.curindex] = self.stack[self.curindex][0], arg - self.print_stack_entry(self.stack[self.curindex]) - except ValueError as e: - self.error('Jump failed: %s' % e) - do_j = do_jump - - def do_debug(self, arg): - """debug code - - Enter a recursive debugger that steps through the code - argument (which is an arbitrary expression or statement to be - executed in the current environment). - """ - sys.settrace(None) - globals = self.curframe.f_globals - locals = self.curframe_locals - p = Pdb(self.completekey, self.stdin, self.stdout) - p.prompt = "(%s) " % self.prompt.strip() - self.message("ENTERING RECURSIVE DEBUGGER") - try: - sys.call_tracing(p.run, (arg, globals, locals)) - except Exception: - self._error_exc() - self.message("LEAVING RECURSIVE DEBUGGER") - sys.settrace(self.trace_dispatch) - self.lastcmd = p.lastcmd - - complete_debug = _complete_expression - - def do_quit(self, arg): - """q(uit) | exit - - Quit from the debugger. The program being executed is aborted. - """ - self._user_requested_quit = True - self.set_quit() - return 1 - - do_q = do_quit - do_exit = do_quit - - def do_EOF(self, arg): - """EOF - - Handles the receipt of EOF as a command. - """ - self.message('') - self._user_requested_quit = True - self.set_quit() - return 1 - - def do_args(self, arg): - """a(rgs) - - Print the argument list of the current function. - """ - if arg: - self._print_invalid_arg(arg) - return - co = self.curframe.f_code - dict = self.curframe_locals - n = co.co_argcount + co.co_kwonlyargcount - if co.co_flags & inspect.CO_VARARGS: n = n+1 - if co.co_flags & inspect.CO_VARKEYWORDS: n = n+1 - for i in range(n): - name = co.co_varnames[i] - if name in dict: - self.message('%s = %s' % (name, self._safe_repr(dict[name], name))) - else: - self.message('%s = *** undefined ***' % (name,)) - do_a = do_args - - def do_retval(self, arg): - """retval - - Print the return value for the last return of a function. - """ - if arg: - self._print_invalid_arg(arg) - return - if '__return__' in self.curframe_locals: - self.message(self._safe_repr(self.curframe_locals['__return__'], "retval")) - else: - self.error('Not yet returned!') - do_rv = do_retval - - def _getval(self, arg): - try: - return eval(arg, self.curframe.f_globals, self.curframe_locals) - except: - self._error_exc() - raise - - def _getval_except(self, arg, frame=None): - try: - if frame is None: - return eval(arg, self.curframe.f_globals, self.curframe_locals) - else: - return eval(arg, frame.f_globals, frame.f_locals) - except BaseException as exc: - return _rstr('** raised %s **' % self._format_exc(exc)) - - def _error_exc(self): - exc = sys.exception() - self.error(self._format_exc(exc)) - - def _msg_val_func(self, arg, func): - try: - val = self._getval(arg) - except: - return # _getval() has displayed the error - try: - self.message(func(val)) - except: - self._error_exc() - - def _safe_repr(self, obj, expr): - try: - return repr(obj) - except Exception as e: - return _rstr(f"*** repr({expr}) failed: {self._format_exc(e)} ***") - - def do_p(self, arg): - """p expression - - Print the value of the expression. - """ - self._msg_val_func(arg, repr) - - def do_pp(self, arg): - """pp expression - - Pretty-print the value of the expression. - """ - self._msg_val_func(arg, pprint.pformat) - - complete_print = _complete_expression - complete_p = _complete_expression - complete_pp = _complete_expression - - def do_list(self, arg): - """l(ist) [first[, last] | .] - - List source code for the current file. Without arguments, - list 11 lines around the current line or continue the previous - listing. With . as argument, list 11 lines around the current - line. With one argument, list 11 lines starting at that line. - With two arguments, list the given range; if the second - argument is less than the first, it is a count. - - The current line in the current frame is indicated by "->". - If an exception is being debugged, the line where the - exception was originally raised or propagated is indicated by - ">>", if it differs from the current line. - """ - self.lastcmd = 'list' - last = None - if arg and arg != '.': - try: - if ',' in arg: - first, last = arg.split(',') - first = int(first.strip()) - last = int(last.strip()) - if last < first: - # assume it's a count - last = first + last - else: - first = int(arg.strip()) - first = max(1, first - 5) - except ValueError: - self.error('Error in argument: %r' % arg) - return - elif self.lineno is None or arg == '.': - first = max(1, self.curframe.f_lineno - 5) - else: - first = self.lineno + 1 - if last is None: - last = first + 10 - filename = self.curframe.f_code.co_filename - # gh-93696: stdlib frozen modules provide a useful __file__ - # this workaround can be removed with the closure of gh-89815 - if filename.startswith("

- - - - ''' % (cls, title) - if prelude: - result = result + ''' - - -''' % (cls, marginalia, cls, prelude, gap) - else: - result = result + ''' -''' % (cls, marginalia, gap) - - return result + '\n
 
%s
%s%s
%s
%s%s%s
' % contents - - def bigsection(self, title, *args): - """Format a section with a big heading.""" - title = '%s' % title - return self.section(title, *args) - - def preformat(self, text): - """Format literal preformatted text.""" - text = self.escape(text.expandtabs()) - return replace(text, '\n\n', '\n \n', '\n\n', '\n \n', - ' ', ' ', '\n', '
\n') - - def multicolumn(self, list, format): - """Format a list of items into a multi-column list.""" - result = '' - rows = (len(list) + 3) // 4 - for col in range(4): - result = result + '' - for i in range(rows*col, rows*col+rows): - if i < len(list): - result = result + format(list[i]) + '
\n' - result = result + '' - return '%s
' % result - - def grey(self, text): return '%s' % text - - def namelink(self, name, *dicts): - """Make a link for an identifier, given name-to-URL mappings.""" - for dict in dicts: - if name in dict: - return '
%s' % (dict[name], name) - return name - - def classlink(self, object, modname): - """Make a link for a class.""" - name, module = object.__name__, sys.modules.get(object.__module__) - if hasattr(module, name) and getattr(module, name) is object: - return '%s' % ( - module.__name__, name, classname(object, modname)) - return classname(object, modname) - - def parentlink(self, object, modname): - """Make a link for the enclosing class or module.""" - link = None - name, module = object.__name__, sys.modules.get(object.__module__) - if hasattr(module, name) and getattr(module, name) is object: - if '.' in object.__qualname__: - name = object.__qualname__.rpartition('.')[0] - if object.__module__ != modname: - link = '%s.html#%s' % (module.__name__, name) - else: - link = '#%s' % name - else: - if object.__module__ != modname: - link = '%s.html' % module.__name__ - if link: - return '%s' % (link, parentname(object, modname)) - else: - return parentname(object, modname) - - def modulelink(self, object): - """Make a link for a module.""" - return '%s' % (object.__name__, object.__name__) - - def modpkglink(self, modpkginfo): - """Make a link for a module or package to display in an index.""" - name, path, ispackage, shadowed = modpkginfo - if shadowed: - return self.grey(name) - if path: - url = '%s.%s.html' % (path, name) - else: - url = '%s.html' % name - if ispackage: - text = '%s (package)' % name - else: - text = name - return '%s' % (url, text) - - def filelink(self, url, path): - """Make a link to source file.""" - return '%s' % (url, path) - - def markup(self, text, escape=None, funcs={}, classes={}, methods={}): - """Mark up some plain text, given a context of symbols to look for. - Each context dictionary maps object names to anchor names.""" - escape = escape or self.escape - results = [] - here = 0 - pattern = re.compile(r'\b((http|https|ftp)://\S+[\w/]|' - r'RFC[- ]?(\d+)|' - r'PEP[- ]?(\d+)|' - r'(self\.)?(\w+))') - while match := pattern.search(text, here): - start, end = match.span() - results.append(escape(text[here:start])) - - all, scheme, rfc, pep, selfdot, name = match.groups() - if scheme: - url = escape(all).replace('"', '"') - results.append('%s' % (url, url)) - elif rfc: - url = 'https://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc) - results.append('%s' % (url, escape(all))) - elif pep: - url = 'https://peps.python.org/pep-%04d/' % int(pep) - results.append('%s' % (url, escape(all))) - elif selfdot: - # Create a link for methods like 'self.method(...)' - # and use for attributes like 'self.attr' - if text[end:end+1] == '(': - results.append('self.' + self.namelink(name, methods)) - else: - results.append('self.%s' % name) - elif text[end:end+1] == '(': - results.append(self.namelink(name, methods, funcs, classes)) - else: - results.append(self.namelink(name, classes)) - here = end - results.append(escape(text[here:])) - return ''.join(results) - - # ---------------------------------------------- type-specific routines - - def formattree(self, tree, modname, parent=None): - """Produce HTML for a class tree as given by inspect.getclasstree().""" - result = '' - for entry in tree: - if isinstance(entry, tuple): - c, bases = entry - result = result + '

' - result = result + self.classlink(c, modname) - if bases and bases != (parent,): - parents = [] - for base in bases: - parents.append(self.classlink(base, modname)) - result = result + '(' + ', '.join(parents) + ')' - result = result + '\n
' - elif isinstance(entry, list): - result = result + '
\n%s
\n' % self.formattree( - entry, modname, c) - return '
\n%s
\n' % result - - def docmodule(self, object, name=None, mod=None, *ignored): - """Produce HTML documentation for a module object.""" - name = object.__name__ # ignore the passed-in name - try: - all = object.__all__ - except AttributeError: - all = None - parts = name.split('.') - links = [] - for i in range(len(parts)-1): - links.append( - '%s' % - ('.'.join(parts[:i+1]), parts[i])) - linkedname = '.'.join(links + parts[-1:]) - head = '%s' % linkedname - try: - path = inspect.getabsfile(object) - url = urllib.parse.quote(path) - filelink = self.filelink(url, path) - except TypeError: - filelink = '(built-in)' - info = [] - if hasattr(object, '__version__'): - version = str(object.__version__) - if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': - version = version[11:-1].strip() - info.append('version %s' % self.escape(version)) - if hasattr(object, '__date__'): - info.append(self.escape(str(object.__date__))) - if info: - head = head + ' (%s)' % ', '.join(info) - docloc = self.getdocloc(object) - if docloc is not None: - docloc = '
Module Reference' % locals() - else: - docloc = '' - result = self.heading(head, 'index
' + filelink + docloc) - - modules = inspect.getmembers(object, inspect.ismodule) - - classes, cdict = [], {} - for key, value in inspect.getmembers(object, inspect.isclass): - # if __all__ exists, believe it. Otherwise use old heuristic. - if (all is not None or - (inspect.getmodule(value) or object) is object): - if visiblename(key, all, object): - classes.append((key, value)) - cdict[key] = cdict[value] = '#' + key - for key, value in classes: - for base in value.__bases__: - key, modname = base.__name__, base.__module__ - module = sys.modules.get(modname) - if modname != name and module and hasattr(module, key): - if getattr(module, key) is base: - if not key in cdict: - cdict[key] = cdict[base] = modname + '.html#' + key - funcs, fdict = [], {} - for key, value in inspect.getmembers(object, inspect.isroutine): - # if __all__ exists, believe it. Otherwise use a heuristic. - if (all is not None - or inspect.isbuiltin(value) - or (inspect.getmodule(value) or object) is object): - if visiblename(key, all, object): - funcs.append((key, value)) - fdict[key] = '#-' + key - if inspect.isfunction(value): fdict[value] = fdict[key] - data = [] - for key, value in inspect.getmembers(object, isdata): - if visiblename(key, all, object): - data.append((key, value)) - - doc = self.markup(getdoc(object), self.preformat, fdict, cdict) - doc = doc and '%s' % doc - result = result + '

%s

\n' % doc - - if hasattr(object, '__path__'): - modpkgs = [] - for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): - modpkgs.append((modname, name, ispkg, 0)) - modpkgs.sort() - contents = self.multicolumn(modpkgs, self.modpkglink) - result = result + self.bigsection( - 'Package Contents', 'pkg-content', contents) - elif modules: - contents = self.multicolumn( - modules, lambda t: self.modulelink(t[1])) - result = result + self.bigsection( - 'Modules', 'pkg-content', contents) - - if classes: - classlist = [value for (key, value) in classes] - contents = [ - self.formattree(inspect.getclasstree(classlist, 1), name)] - for key, value in classes: - contents.append(self.document(value, key, name, fdict, cdict)) - result = result + self.bigsection( - 'Classes', 'index', ' '.join(contents)) - if funcs: - contents = [] - for key, value in funcs: - contents.append(self.document(value, key, name, fdict, cdict)) - result = result + self.bigsection( - 'Functions', 'functions', ' '.join(contents)) - if data: - contents = [] - for key, value in data: - contents.append(self.document(value, key)) - result = result + self.bigsection( - 'Data', 'data', '
\n'.join(contents)) - if hasattr(object, '__author__'): - contents = self.markup(str(object.__author__), self.preformat) - result = result + self.bigsection('Author', 'author', contents) - if hasattr(object, '__credits__'): - contents = self.markup(str(object.__credits__), self.preformat) - result = result + self.bigsection('Credits', 'credits', contents) - - return result - - def docclass(self, object, name=None, mod=None, funcs={}, classes={}, - *ignored): - """Produce HTML documentation for a class object.""" - realname = object.__name__ - name = name or realname - bases = object.__bases__ - - contents = [] - push = contents.append - - # Cute little class to pump out a horizontal rule between sections. - class HorizontalRule: - def __init__(self): - self.needone = 0 - def maybe(self): - if self.needone: - push('
\n') - self.needone = 1 - hr = HorizontalRule() - - # List the mro, if non-trivial. - mro = deque(inspect.getmro(object)) - if len(mro) > 2: - hr.maybe() - push('
Method resolution order:
\n') - for base in mro: - push('
%s
\n' % self.classlink(base, - object.__module__)) - push('
\n') - - def spill(msg, attrs, predicate): - ok, attrs = _split_list(attrs, predicate) - if ok: - hr.maybe() - push(msg) - for name, kind, homecls, value in ok: - try: - value = getattr(object, name) - except Exception: - # Some descriptors may meet a failure in their __get__. - # (bug #1785) - push(self.docdata(value, name, mod)) - else: - push(self.document(value, name, mod, - funcs, classes, mdict, object, homecls)) - push('\n') - return attrs - - def spilldescriptors(msg, attrs, predicate): - ok, attrs = _split_list(attrs, predicate) - if ok: - hr.maybe() - push(msg) - for name, kind, homecls, value in ok: - push(self.docdata(value, name, mod)) - return attrs - - def spilldata(msg, attrs, predicate): - ok, attrs = _split_list(attrs, predicate) - if ok: - hr.maybe() - push(msg) - for name, kind, homecls, value in ok: - base = self.docother(getattr(object, name), name, mod) - doc = getdoc(value) - if not doc: - push('
%s
\n' % base) - else: - doc = self.markup(getdoc(value), self.preformat, - funcs, classes, mdict) - doc = '
%s' % doc - push('
%s%s
\n' % (base, doc)) - push('\n') - return attrs - - attrs = [(name, kind, cls, value) - for name, kind, cls, value in classify_class_attrs(object) - if visiblename(name, obj=object)] - - mdict = {} - for key, kind, homecls, value in attrs: - mdict[key] = anchor = '#' + name + '-' + key - try: - value = getattr(object, name) - except Exception: - # Some descriptors may meet a failure in their __get__. - # (bug #1785) - pass - try: - # The value may not be hashable (e.g., a data attr with - # a dict or list value). - mdict[value] = anchor - except TypeError: - pass - - while attrs: - if mro: - thisclass = mro.popleft() - else: - thisclass = attrs[0][2] - attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) - - if object is not builtins.object and thisclass is builtins.object: - attrs = inherited - continue - elif thisclass is object: - tag = 'defined here' - else: - tag = 'inherited from %s' % self.classlink(thisclass, - object.__module__) - tag += ':
\n' - - sort_attributes(attrs, object) - - # Pump out the attrs, segregated by kind. - attrs = spill('Methods %s' % tag, attrs, - lambda t: t[1] == 'method') - attrs = spill('Class methods %s' % tag, attrs, - lambda t: t[1] == 'class method') - attrs = spill('Static methods %s' % tag, attrs, - lambda t: t[1] == 'static method') - attrs = spilldescriptors("Readonly properties %s" % tag, attrs, - lambda t: t[1] == 'readonly property') - attrs = spilldescriptors('Data descriptors %s' % tag, attrs, - lambda t: t[1] == 'data descriptor') - attrs = spilldata('Data and other attributes %s' % tag, attrs, - lambda t: t[1] == 'data') - assert attrs == [] - attrs = inherited - - contents = ''.join(contents) - - if name == realname: - title = 'class %s' % ( - name, realname) - else: - title = '%s = class %s' % ( - name, name, realname) - if bases: - parents = [] - for base in bases: - parents.append(self.classlink(base, object.__module__)) - title = title + '(%s)' % ', '.join(parents) - - decl = '' - argspec = _getargspec(object) - if argspec and argspec != '()': - decl = name + self.escape(argspec) + '\n\n' - - doc = getdoc(object) - if decl: - doc = decl + (doc or '') - doc = self.markup(doc, self.preformat, funcs, classes, mdict) - doc = doc and '%s
 
' % doc - - return self.section(title, 'title', contents, 3, doc) - - def formatvalue(self, object): - """Format an argument default value as text.""" - return self.grey('=' + self.repr(object)) - - def docroutine(self, object, name=None, mod=None, - funcs={}, classes={}, methods={}, cl=None, homecls=None): - """Produce HTML documentation for a function or method object.""" - realname = object.__name__ - name = name or realname - if homecls is None: - homecls = cl - anchor = ('' if cl is None else cl.__name__) + '-' + name - note = '' - skipdocs = False - imfunc = None - if _is_bound_method(object): - imself = object.__self__ - if imself is cl: - imfunc = getattr(object, '__func__', None) - elif inspect.isclass(imself): - note = ' class method of %s' % self.classlink(imself, mod) - else: - note = ' method of %s instance' % self.classlink( - imself.__class__, mod) - elif (inspect.ismethoddescriptor(object) or - inspect.ismethodwrapper(object)): - try: - objclass = object.__objclass__ - except AttributeError: - pass - else: - if cl is None: - note = ' unbound %s method' % self.classlink(objclass, mod) - elif objclass is not homecls: - note = ' from ' + self.classlink(objclass, mod) - else: - imfunc = object - if inspect.isfunction(imfunc) and homecls is not None and ( - imfunc.__module__ != homecls.__module__ or - imfunc.__qualname__ != homecls.__qualname__ + '.' + realname): - pname = self.parentlink(imfunc, mod) - if pname: - note = ' from %s' % pname - - if (inspect.iscoroutinefunction(object) or - inspect.isasyncgenfunction(object)): - asyncqualifier = 'async ' - else: - asyncqualifier = '' - - if name == realname: - title = '%s' % (anchor, realname) - else: - if (cl is not None and - inspect.getattr_static(cl, realname, []) is object): - reallink = '%s' % ( - cl.__name__ + '-' + realname, realname) - skipdocs = True - if note.startswith(' from '): - note = '' - else: - reallink = realname - title = '%s = %s' % ( - anchor, name, reallink) - argspec = None - if inspect.isroutine(object): - argspec = _getargspec(object) - if argspec and realname == '': - title = '%s lambda ' % name - # XXX lambda's won't usually have func_annotations['return'] - # since the syntax doesn't support but it is possible. - # So removing parentheses isn't truly safe. - if not object.__annotations__: - argspec = argspec[1:-1] # remove parentheses - if not argspec: - argspec = '(...)' - - decl = asyncqualifier + title + self.escape(argspec) + (note and - self.grey('%s' % note)) - - if skipdocs: - return '
%s
\n' % decl - else: - doc = self.markup( - getdoc(object), self.preformat, funcs, classes, methods) - doc = doc and '
%s
' % doc - return '
%s
%s
\n' % (decl, doc) - - def docdata(self, object, name=None, mod=None, cl=None, *ignored): - """Produce html documentation for a data descriptor.""" - results = [] - push = results.append - - if name: - push('
%s
\n' % name) - doc = self.markup(getdoc(object), self.preformat) - if doc: - push('
%s
\n' % doc) - push('
\n') - - return ''.join(results) - - docproperty = docdata - - def docother(self, object, name=None, mod=None, *ignored): - """Produce HTML documentation for a data object.""" - lhs = name and '%s = ' % name or '' - return lhs + self.repr(object) - - def index(self, dir, shadowed=None): - """Generate an HTML index for a directory of modules.""" - modpkgs = [] - if shadowed is None: shadowed = {} - for importer, name, ispkg in pkgutil.iter_modules([dir]): - if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name): - # ignore a module if its name contains a surrogate character - continue - modpkgs.append((name, '', ispkg, name in shadowed)) - shadowed[name] = 1 - - modpkgs.sort() - contents = self.multicolumn(modpkgs, self.modpkglink) - return self.bigsection(dir, 'index', contents) - -# -------------------------------------------- text documentation generator - -class TextRepr(Repr): - """Class for safely making a text representation of a Python object.""" - def __init__(self): - Repr.__init__(self) - self.maxlist = self.maxtuple = 20 - self.maxdict = 10 - self.maxstring = self.maxother = 100 - - def repr1(self, x, level): - if hasattr(type(x), '__name__'): - methodname = 'repr_' + '_'.join(type(x).__name__.split()) - if hasattr(self, methodname): - return getattr(self, methodname)(x, level) - return cram(stripid(repr(x)), self.maxother) - - def repr_string(self, x, level): - test = cram(x, self.maxstring) - testrepr = repr(test) - if '\\' in test and '\\' not in replace(testrepr, r'\\', ''): - # Backslashes are only literal in the string and are never - # needed to make any special characters, so show a raw string. - return 'r' + testrepr[0] + test + testrepr[0] - return testrepr - - repr_str = repr_string - - def repr_instance(self, x, level): - try: - return cram(stripid(repr(x)), self.maxstring) - except: - return '<%s instance>' % x.__class__.__name__ - -class TextDoc(Doc): - """Formatter class for text documentation.""" - - # ------------------------------------------- text formatting utilities - - _repr_instance = TextRepr() - repr = _repr_instance.repr - - def bold(self, text): - """Format a string in bold by overstriking.""" - return ''.join(ch + '\b' + ch for ch in text) - - def indent(self, text, prefix=' '): - """Indent text by prepending a given prefix to each line.""" - if not text: return '' - lines = [(prefix + line).rstrip() for line in text.split('\n')] - return '\n'.join(lines) - - def section(self, title, contents): - """Format a section with a given heading.""" - clean_contents = self.indent(contents).rstrip() - return self.bold(title) + '\n' + clean_contents + '\n\n' - - # ---------------------------------------------- type-specific routines - - def formattree(self, tree, modname, parent=None, prefix=''): - """Render in text a class tree as returned by inspect.getclasstree().""" - result = '' - for entry in tree: - if isinstance(entry, tuple): - c, bases = entry - result = result + prefix + classname(c, modname) - if bases and bases != (parent,): - parents = (classname(c, modname) for c in bases) - result = result + '(%s)' % ', '.join(parents) - result = result + '\n' - elif isinstance(entry, list): - result = result + self.formattree( - entry, modname, c, prefix + ' ') - return result - - def docmodule(self, object, name=None, mod=None, *ignored): - """Produce text documentation for a given module object.""" - name = object.__name__ # ignore the passed-in name - synop, desc = splitdoc(getdoc(object)) - result = self.section('NAME', name + (synop and ' - ' + synop)) - all = getattr(object, '__all__', None) - docloc = self.getdocloc(object) - if docloc is not None: - result = result + self.section('MODULE REFERENCE', docloc + """ - -The following documentation is automatically generated from the Python -source files. It may be incomplete, incorrect or include features that -are considered implementation detail and may vary between Python -implementations. When in doubt, consult the module reference at the -location listed above. -""") - - if desc: - result = result + self.section('DESCRIPTION', desc) - - classes = [] - for key, value in inspect.getmembers(object, inspect.isclass): - # if __all__ exists, believe it. Otherwise use old heuristic. - if (all is not None - or (inspect.getmodule(value) or object) is object): - if visiblename(key, all, object): - classes.append((key, value)) - funcs = [] - for key, value in inspect.getmembers(object, inspect.isroutine): - # if __all__ exists, believe it. Otherwise use a heuristic. - if (all is not None - or inspect.isbuiltin(value) - or (inspect.getmodule(value) or object) is object): - if visiblename(key, all, object): - funcs.append((key, value)) - data = [] - for key, value in inspect.getmembers(object, isdata): - if visiblename(key, all, object): - data.append((key, value)) - - modpkgs = [] - modpkgs_names = set() - if hasattr(object, '__path__'): - for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): - modpkgs_names.add(modname) - if ispkg: - modpkgs.append(modname + ' (package)') - else: - modpkgs.append(modname) - - modpkgs.sort() - result = result + self.section( - 'PACKAGE CONTENTS', '\n'.join(modpkgs)) - - # Detect submodules as sometimes created by C extensions - submodules = [] - for key, value in inspect.getmembers(object, inspect.ismodule): - if value.__name__.startswith(name + '.') and key not in modpkgs_names: - submodules.append(key) - if submodules: - submodules.sort() - result = result + self.section( - 'SUBMODULES', '\n'.join(submodules)) - - if classes: - classlist = [value for key, value in classes] - contents = [self.formattree( - inspect.getclasstree(classlist, 1), name)] - for key, value in classes: - contents.append(self.document(value, key, name)) - result = result + self.section('CLASSES', '\n'.join(contents)) - - if funcs: - contents = [] - for key, value in funcs: - contents.append(self.document(value, key, name)) - result = result + self.section('FUNCTIONS', '\n'.join(contents)) - - if data: - contents = [] - for key, value in data: - contents.append(self.docother(value, key, name, maxlen=70)) - result = result + self.section('DATA', '\n'.join(contents)) - - if hasattr(object, '__version__'): - version = str(object.__version__) - if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': - version = version[11:-1].strip() - result = result + self.section('VERSION', version) - if hasattr(object, '__date__'): - result = result + self.section('DATE', str(object.__date__)) - if hasattr(object, '__author__'): - result = result + self.section('AUTHOR', str(object.__author__)) - if hasattr(object, '__credits__'): - result = result + self.section('CREDITS', str(object.__credits__)) - try: - file = inspect.getabsfile(object) - except TypeError: - file = '(built-in)' - result = result + self.section('FILE', file) - return result - - def docclass(self, object, name=None, mod=None, *ignored): - """Produce text documentation for a given class object.""" - realname = object.__name__ - name = name or realname - bases = object.__bases__ - - def makename(c, m=object.__module__): - return classname(c, m) - - if name == realname: - title = 'class ' + self.bold(realname) - else: - title = self.bold(name) + ' = class ' + realname - if bases: - parents = map(makename, bases) - title = title + '(%s)' % ', '.join(parents) - - contents = [] - push = contents.append - - argspec = _getargspec(object) - if argspec and argspec != '()': - push(name + argspec + '\n') - - doc = getdoc(object) - if doc: - push(doc + '\n') - - # List the mro, if non-trivial. - mro = deque(inspect.getmro(object)) - if len(mro) > 2: - push("Method resolution order:") - for base in mro: - push(' ' + makename(base)) - push('') - - # List the built-in subclasses, if any: - subclasses = sorted( - (str(cls.__name__) for cls in type.__subclasses__(object) - if not cls.__name__.startswith("_") and cls.__module__ == "builtins"), - key=str.lower - ) - no_of_subclasses = len(subclasses) - MAX_SUBCLASSES_TO_DISPLAY = 4 - if subclasses: - push("Built-in subclasses:") - for subclassname in subclasses[:MAX_SUBCLASSES_TO_DISPLAY]: - push(' ' + subclassname) - if no_of_subclasses > MAX_SUBCLASSES_TO_DISPLAY: - push(' ... and ' + - str(no_of_subclasses - MAX_SUBCLASSES_TO_DISPLAY) + - ' other subclasses') - push('') - - # Cute little class to pump out a horizontal rule between sections. - class HorizontalRule: - def __init__(self): - self.needone = 0 - def maybe(self): - if self.needone: - push('-' * 70) - self.needone = 1 - hr = HorizontalRule() - - def spill(msg, attrs, predicate): - ok, attrs = _split_list(attrs, predicate) - if ok: - hr.maybe() - push(msg) - for name, kind, homecls, value in ok: - try: - value = getattr(object, name) - except Exception: - # Some descriptors may meet a failure in their __get__. - # (bug #1785) - push(self.docdata(value, name, mod)) - else: - push(self.document(value, - name, mod, object, homecls)) - return attrs - - def spilldescriptors(msg, attrs, predicate): - ok, attrs = _split_list(attrs, predicate) - if ok: - hr.maybe() - push(msg) - for name, kind, homecls, value in ok: - push(self.docdata(value, name, mod)) - return attrs - - def spilldata(msg, attrs, predicate): - ok, attrs = _split_list(attrs, predicate) - if ok: - hr.maybe() - push(msg) - for name, kind, homecls, value in ok: - doc = getdoc(value) - try: - obj = getattr(object, name) - except AttributeError: - obj = homecls.__dict__[name] - push(self.docother(obj, name, mod, maxlen=70, doc=doc) + - '\n') - return attrs - - attrs = [(name, kind, cls, value) - for name, kind, cls, value in classify_class_attrs(object) - if visiblename(name, obj=object)] - - while attrs: - if mro: - thisclass = mro.popleft() - else: - thisclass = attrs[0][2] - attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) - - if object is not builtins.object and thisclass is builtins.object: - attrs = inherited - continue - elif thisclass is object: - tag = "defined here" - else: - tag = "inherited from %s" % classname(thisclass, - object.__module__) - - sort_attributes(attrs, object) - - # Pump out the attrs, segregated by kind. - attrs = spill("Methods %s:\n" % tag, attrs, - lambda t: t[1] == 'method') - attrs = spill("Class methods %s:\n" % tag, attrs, - lambda t: t[1] == 'class method') - attrs = spill("Static methods %s:\n" % tag, attrs, - lambda t: t[1] == 'static method') - attrs = spilldescriptors("Readonly properties %s:\n" % tag, attrs, - lambda t: t[1] == 'readonly property') - attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs, - lambda t: t[1] == 'data descriptor') - attrs = spilldata("Data and other attributes %s:\n" % tag, attrs, - lambda t: t[1] == 'data') - - assert attrs == [] - attrs = inherited - - contents = '\n'.join(contents) - if not contents: - return title + '\n' - return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n' - - def formatvalue(self, object): - """Format an argument default value as text.""" - return '=' + self.repr(object) - - def docroutine(self, object, name=None, mod=None, cl=None, homecls=None): - """Produce text documentation for a function or method object.""" - realname = object.__name__ - name = name or realname - if homecls is None: - homecls = cl - note = '' - skipdocs = False - imfunc = None - if _is_bound_method(object): - imself = object.__self__ - if imself is cl: - imfunc = getattr(object, '__func__', None) - elif inspect.isclass(imself): - note = ' class method of %s' % classname(imself, mod) - else: - note = ' method of %s instance' % classname( - imself.__class__, mod) - elif (inspect.ismethoddescriptor(object) or - inspect.ismethodwrapper(object)): - try: - objclass = object.__objclass__ - except AttributeError: - pass - else: - if cl is None: - note = ' unbound %s method' % classname(objclass, mod) - elif objclass is not homecls: - note = ' from ' + classname(objclass, mod) - else: - imfunc = object - if inspect.isfunction(imfunc) and homecls is not None and ( - imfunc.__module__ != homecls.__module__ or - imfunc.__qualname__ != homecls.__qualname__ + '.' + realname): - pname = parentname(imfunc, mod) - if pname: - note = ' from %s' % pname - - if (inspect.iscoroutinefunction(object) or - inspect.isasyncgenfunction(object)): - asyncqualifier = 'async ' - else: - asyncqualifier = '' - - if name == realname: - title = self.bold(realname) - else: - if (cl is not None and - inspect.getattr_static(cl, realname, []) is object): - skipdocs = True - if note.startswith(' from '): - note = '' - title = self.bold(name) + ' = ' + realname - argspec = None - - if inspect.isroutine(object): - argspec = _getargspec(object) - if argspec and realname == '': - title = self.bold(name) + ' lambda ' - # XXX lambda's won't usually have func_annotations['return'] - # since the syntax doesn't support but it is possible. - # So removing parentheses isn't truly safe. - if not object.__annotations__: - argspec = argspec[1:-1] - if not argspec: - argspec = '(...)' - decl = asyncqualifier + title + argspec + note - - if skipdocs: - return decl + '\n' - else: - doc = getdoc(object) or '' - return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n') - - def docdata(self, object, name=None, mod=None, cl=None, *ignored): - """Produce text documentation for a data descriptor.""" - results = [] - push = results.append - - if name: - push(self.bold(name)) - push('\n') - doc = getdoc(object) or '' - if doc: - push(self.indent(doc)) - push('\n') - return ''.join(results) - - docproperty = docdata - - def docother(self, object, name=None, mod=None, parent=None, *ignored, - maxlen=None, doc=None): - """Produce text documentation for a data object.""" - repr = self.repr(object) - if maxlen: - line = (name and name + ' = ' or '') + repr - chop = maxlen - len(line) - if chop < 0: repr = repr[:chop] + '...' - line = (name and self.bold(name) + ' = ' or '') + repr - if not doc: - doc = getdoc(object) - if doc: - line += '\n' + self.indent(str(doc)) + '\n' - return line - -class _PlainTextDoc(TextDoc): - """Subclass of TextDoc which overrides string styling""" - def bold(self, text): - return text - -# --------------------------------------------------------- user interfaces - -def pager(text, title=''): - """The first time this is called, determine what kind of pager to use.""" - global pager - pager = get_pager() - pager(text, title) - -def describe(thing): - """Produce a short description of the given thing.""" - if inspect.ismodule(thing): - if thing.__name__ in sys.builtin_module_names: - return 'built-in module ' + thing.__name__ - if hasattr(thing, '__path__'): - return 'package ' + thing.__name__ - else: - return 'module ' + thing.__name__ - if inspect.isbuiltin(thing): - return 'built-in function ' + thing.__name__ - if inspect.isgetsetdescriptor(thing): - return 'getset descriptor %s.%s.%s' % ( - thing.__objclass__.__module__, thing.__objclass__.__name__, - thing.__name__) - if inspect.ismemberdescriptor(thing): - return 'member descriptor %s.%s.%s' % ( - thing.__objclass__.__module__, thing.__objclass__.__name__, - thing.__name__) - if inspect.isclass(thing): - return 'class ' + thing.__name__ - if inspect.isfunction(thing): - return 'function ' + thing.__name__ - if inspect.ismethod(thing): - return 'method ' + thing.__name__ - return type(thing).__name__ - -def locate(path, forceload=0): - """Locate an object by name or dotted path, importing as necessary.""" - parts = [part for part in path.split('.') if part] - module, n = None, 0 - while n < len(parts): - nextmodule = safeimport('.'.join(parts[:n+1]), forceload) - if nextmodule: module, n = nextmodule, n + 1 - else: break - if module: - object = module - else: - object = builtins - for part in parts[n:]: - try: - object = getattr(object, part) - except AttributeError: - return None - return object - -# --------------------------------------- interactive interpreter interface - -text = TextDoc() -plaintext = _PlainTextDoc() -html = HTMLDoc() - -def resolve(thing, forceload=0): - """Given an object or a path to an object, get the object and its name.""" - if isinstance(thing, str): - object = locate(thing, forceload) - if object is None: - raise ImportError('''\ -No Python documentation found for %r. -Use help() to get the interactive help utility. -Use help(str) for help on the str class.''' % thing) - return object, thing - else: - name = getattr(thing, '__name__', None) - return thing, name if isinstance(name, str) else None - -def render_doc(thing, title='Python Library Documentation: %s', forceload=0, - renderer=None): - """Render text documentation, given an object or a path to an object.""" - if renderer is None: - renderer = text - object, name = resolve(thing, forceload) - desc = describe(object) - module = inspect.getmodule(object) - if name and '.' in name: - desc += ' in ' + name[:name.rfind('.')] - elif module and module is not object: - desc += ' in module ' + module.__name__ - - if not (inspect.ismodule(object) or - inspect.isclass(object) or - inspect.isroutine(object) or - inspect.isdatadescriptor(object) or - _getdoc(object)): - # If the passed object is a piece of data or an instance, - # document its available methods instead of its value. - if hasattr(object, '__origin__'): - object = object.__origin__ - else: - object = type(object) - desc += ' object' - return title % desc + '\n\n' + renderer.document(object, name) - -def doc(thing, title='Python Library Documentation: %s', forceload=0, - output=None, is_cli=False): - """Display text documentation, given an object or a path to an object.""" - if output is None: - try: - if isinstance(thing, str): - what = thing - else: - what = getattr(thing, '__qualname__', None) - if not isinstance(what, str): - what = getattr(thing, '__name__', None) - if not isinstance(what, str): - what = type(thing).__name__ + ' object' - pager(render_doc(thing, title, forceload), f'Help on {what!s}') - except ImportError as exc: - if is_cli: - raise - print(exc) - else: - try: - s = render_doc(thing, title, forceload, plaintext) - except ImportError as exc: - s = str(exc) - output.write(s) - -def writedoc(thing, forceload=0): - """Write HTML documentation to a file in the current directory.""" - object, name = resolve(thing, forceload) - page = html.page(describe(object), html.document(object, name)) - with open(name + '.html', 'w', encoding='utf-8') as file: - file.write(page) - print('wrote', name + '.html') - -def writedocs(dir, pkgpath='', done=None): - """Write out HTML documentation for all modules in a directory tree.""" - if done is None: done = {} - for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath): - writedoc(modname) - return - -class Helper: - - # These dictionaries map a topic name to either an alias, or a tuple - # (label, seealso-items). The "label" is the label of the corresponding - # section in the .rst file under Doc/ and an index into the dictionary - # in pydoc_data/topics.py. - # - # CAUTION: if you change one of these dictionaries, be sure to adapt the - # list of needed labels in Doc/tools/extensions/pyspecific.py and - # regenerate the pydoc_data/topics.py file by running - # make pydoc-topics - # in Doc/ and copying the output file into the Lib/ directory. - - keywords = { - 'False': '', - 'None': '', - 'True': '', - 'and': 'BOOLEAN', - 'as': 'with', - 'assert': ('assert', ''), - 'async': ('async', ''), - 'await': ('await', ''), - 'break': ('break', 'while for'), - 'class': ('class', 'CLASSES SPECIALMETHODS'), - 'continue': ('continue', 'while for'), - 'def': ('function', ''), - 'del': ('del', 'BASICMETHODS'), - 'elif': 'if', - 'else': ('else', 'while for'), - 'except': 'try', - 'finally': 'try', - 'for': ('for', 'break continue while'), - 'from': 'import', - 'global': ('global', 'nonlocal NAMESPACES'), - 'if': ('if', 'TRUTHVALUE'), - 'import': ('import', 'MODULES'), - 'in': ('in', 'SEQUENCEMETHODS'), - 'is': 'COMPARISON', - 'lambda': ('lambda', 'FUNCTIONS'), - 'nonlocal': ('nonlocal', 'global NAMESPACES'), - 'not': 'BOOLEAN', - 'or': 'BOOLEAN', - 'pass': ('pass', ''), - 'raise': ('raise', 'EXCEPTIONS'), - 'return': ('return', 'FUNCTIONS'), - 'try': ('try', 'EXCEPTIONS'), - 'while': ('while', 'break continue if TRUTHVALUE'), - 'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'), - 'yield': ('yield', ''), - } - # Either add symbols to this dictionary or to the symbols dictionary - # directly: Whichever is easier. They are merged later. - _strprefixes = [p + q for p in ('b', 'f', 'r', 'u') for q in ("'", '"')] - _symbols_inverse = { - 'STRINGS' : ("'", "'''", '"', '"""', *_strprefixes), - 'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&', - '|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'), - 'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'), - 'UNARY' : ('-', '~'), - 'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=', - '^=', '<<=', '>>=', '**=', '//='), - 'BITWISE' : ('<<', '>>', '&', '|', '^', '~'), - 'COMPLEX' : ('j', 'J') - } - symbols = { - '%': 'OPERATORS FORMATTING', - '**': 'POWER', - ',': 'TUPLES LISTS FUNCTIONS', - '.': 'ATTRIBUTES FLOAT MODULES OBJECTS', - '...': 'ELLIPSIS', - ':': 'SLICINGS DICTIONARYLITERALS', - '@': 'def class', - '\\': 'STRINGS', - ':=': 'ASSIGNMENTEXPRESSIONS', - '_': 'PRIVATENAMES', - '__': 'PRIVATENAMES SPECIALMETHODS', - '`': 'BACKQUOTES', - '(': 'TUPLES FUNCTIONS CALLS', - ')': 'TUPLES FUNCTIONS CALLS', - '[': 'LISTS SUBSCRIPTS SLICINGS', - ']': 'LISTS SUBSCRIPTS SLICINGS' - } - for topic, symbols_ in _symbols_inverse.items(): - for symbol in symbols_: - topics = symbols.get(symbol, topic) - if topic not in topics: - topics = topics + ' ' + topic - symbols[symbol] = topics - del topic, symbols_, symbol, topics - - topics = { - 'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS ' - 'FUNCTIONS CLASSES MODULES FILES inspect'), - 'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS ' - 'FORMATTING TYPES'), - 'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'), - 'FORMATTING': ('formatstrings', 'OPERATORS'), - 'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS ' - 'FORMATTING TYPES'), - 'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'), - 'INTEGER': ('integers', 'int range'), - 'FLOAT': ('floating', 'float math'), - 'COMPLEX': ('imaginary', 'complex cmath'), - 'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'), - 'MAPPINGS': 'DICTIONARIES', - 'FUNCTIONS': ('typesfunctions', 'def TYPES'), - 'METHODS': ('typesmethods', 'class def CLASSES TYPES'), - 'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'), - 'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'), - 'FRAMEOBJECTS': 'TYPES', - 'TRACEBACKS': 'TYPES', - 'NONE': ('bltin-null-object', ''), - 'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'), - 'SPECIALATTRIBUTES': ('specialattrs', ''), - 'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'), - 'MODULES': ('typesmodules', 'import'), - 'PACKAGES': 'import', - 'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN ' - 'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER ' - 'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES ' - 'LISTS DICTIONARIES'), - 'OPERATORS': 'EXPRESSIONS', - 'PRECEDENCE': 'EXPRESSIONS', - 'OBJECTS': ('objects', 'TYPES'), - 'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS ' - 'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS ' - 'NUMBERMETHODS CLASSES'), - 'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'), - 'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'), - 'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'), - 'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS ' - 'SPECIALMETHODS'), - 'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'), - 'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT ' - 'SPECIALMETHODS'), - 'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'), - 'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'), - 'DYNAMICFEATURES': ('dynamic-features', ''), - 'SCOPING': 'NAMESPACES', - 'FRAMES': 'NAMESPACES', - 'EXCEPTIONS': ('exceptions', 'try except finally raise'), - 'CONVERSIONS': ('conversions', ''), - 'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'), - 'SPECIALIDENTIFIERS': ('id-classes', ''), - 'PRIVATENAMES': ('atom-identifiers', ''), - 'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS ' - 'LISTLITERALS DICTIONARYLITERALS'), - 'TUPLES': 'SEQUENCES', - 'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'), - 'LISTS': ('typesseq-mutable', 'LISTLITERALS'), - 'LISTLITERALS': ('lists', 'LISTS LITERALS'), - 'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'), - 'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'), - 'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'), - 'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'), - 'SLICINGS': ('slicings', 'SEQUENCEMETHODS'), - 'CALLS': ('calls', 'EXPRESSIONS'), - 'POWER': ('power', 'EXPRESSIONS'), - 'UNARY': ('unary', 'EXPRESSIONS'), - 'BINARY': ('binary', 'EXPRESSIONS'), - 'SHIFTING': ('shifting', 'EXPRESSIONS'), - 'BITWISE': ('bitwise', 'EXPRESSIONS'), - 'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'), - 'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'), - 'ASSERTION': 'assert', - 'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'), - 'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'), - 'ASSIGNMENTEXPRESSIONS': ('assignment-expressions', ''), - 'DELETION': 'del', - 'RETURNING': 'return', - 'IMPORTING': 'import', - 'CONDITIONAL': 'if', - 'LOOPING': ('compound', 'for while break continue'), - 'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'), - 'DEBUGGING': ('debugger', 'pdb'), - 'CONTEXTMANAGERS': ('context-managers', 'with'), - } - - def __init__(self, input=None, output=None): - self._input = input - self._output = output - - @property - def input(self): - return self._input or sys.stdin - - @property - def output(self): - return self._output or sys.stdout - - def __repr__(self): - if inspect.stack()[1][3] == '?': - self() - return '' - return '<%s.%s instance>' % (self.__class__.__module__, - self.__class__.__qualname__) - - _GoInteractive = object() - def __call__(self, request=_GoInteractive): - if request is not self._GoInteractive: - try: - self.help(request) - except ImportError as err: - self.output.write(f'{err}\n') - else: - self.intro() - self.interact() - self.output.write(''' -You are now leaving help and returning to the Python interpreter. -If you want to ask for help on a particular object directly from the -interpreter, you can type "help(object)". Executing "help('string')" -has the same effect as typing a particular string at the help> prompt. -''') - - def interact(self): - self.output.write('\n') - while True: - try: - request = self.getline('help> ') - except (KeyboardInterrupt, EOFError): - break - request = request.strip() - if not request: - continue # back to the prompt - - # Make sure significant trailing quoting marks of literals don't - # get deleted while cleaning input - if (len(request) > 2 and request[0] == request[-1] in ("'", '"') - and request[0] not in request[1:-1]): - request = request[1:-1] - if request.lower() in ('q', 'quit', 'exit'): break - if request == 'help': - self.intro() - else: - self.help(request) - - def getline(self, prompt): - """Read one line, using input() when appropriate.""" - if self.input is sys.stdin: - return input(prompt) - else: - self.output.write(prompt) - self.output.flush() - return self.input.readline() - - def help(self, request, is_cli=False): - if isinstance(request, str): - request = request.strip() - if request == 'keywords': self.listkeywords() - elif request == 'symbols': self.listsymbols() - elif request == 'topics': self.listtopics() - elif request == 'modules': self.listmodules() - elif request[:8] == 'modules ': - self.listmodules(request.split()[1]) - elif request in self.symbols: self.showsymbol(request) - elif request in ['True', 'False', 'None']: - # special case these keywords since they are objects too - doc(eval(request), 'Help on %s:', output=self._output, is_cli=is_cli) - elif request in self.keywords: self.showtopic(request) - elif request in self.topics: self.showtopic(request) - elif request: doc(request, 'Help on %s:', output=self._output, is_cli=is_cli) - else: doc(str, 'Help on %s:', output=self._output, is_cli=is_cli) - elif isinstance(request, Helper): self() - else: doc(request, 'Help on %s:', output=self._output, is_cli=is_cli) - self.output.write('\n') - - def intro(self): - self.output.write('''\ -Welcome to Python {0}'s help utility! If this is your first time using -Python, you should definitely check out the tutorial at -https://docs.python.org/{0}/tutorial/. - -Enter the name of any module, keyword, or topic to get help on writing -Python programs and using Python modules. To get a list of available -modules, keywords, symbols, or topics, enter "modules", "keywords", -"symbols", or "topics". - -Each module also comes with a one-line summary of what it does; to list -the modules whose name or summary contain a given string such as "spam", -enter "modules spam". - -To quit this help utility and return to the interpreter, -enter "q", "quit" or "exit". -'''.format('%d.%d' % sys.version_info[:2])) - - def list(self, items, columns=4, width=80): - items = sorted(items) - colw = width // columns - rows = (len(items) + columns - 1) // columns - for row in range(rows): - for col in range(columns): - i = col * rows + row - if i < len(items): - self.output.write(items[i]) - if col < columns - 1: - self.output.write(' ' + ' ' * (colw - 1 - len(items[i]))) - self.output.write('\n') - - def listkeywords(self): - self.output.write(''' -Here is a list of the Python keywords. Enter any keyword to get more help. - -''') - self.list(self.keywords.keys()) - - def listsymbols(self): - self.output.write(''' -Here is a list of the punctuation symbols which Python assigns special meaning -to. Enter any symbol to get more help. - -''') - self.list(self.symbols.keys()) - - def listtopics(self): - self.output.write(''' -Here is a list of available topics. Enter any topic name to get more help. - -''') - self.list(self.topics.keys(), columns=3) - - def showtopic(self, topic, more_xrefs=''): - try: - import pydoc_data.topics - except ImportError: - self.output.write(''' -Sorry, topic and keyword documentation is not available because the -module "pydoc_data.topics" could not be found. -''') - return - target = self.topics.get(topic, self.keywords.get(topic)) - if not target: - self.output.write('no documentation found for %s\n' % repr(topic)) - return - if isinstance(target, str): - return self.showtopic(target, more_xrefs) - - label, xrefs = target - try: - doc = pydoc_data.topics.topics[label] - except KeyError: - self.output.write('no documentation found for %s\n' % repr(topic)) - return - doc = doc.strip() + '\n' - if more_xrefs: - xrefs = (xrefs or '') + ' ' + more_xrefs - if xrefs: - import textwrap - text = 'Related help topics: ' + ', '.join(xrefs.split()) + '\n' - wrapped_text = textwrap.wrap(text, 72) - doc += '\n%s\n' % '\n'.join(wrapped_text) - - if self._output is None: - pager(doc, f'Help on {topic!s}') - else: - self.output.write(doc) - - def _gettopic(self, topic, more_xrefs=''): - """Return unbuffered tuple of (topic, xrefs). - - If an error occurs here, the exception is caught and displayed by - the url handler. - - This function duplicates the showtopic method but returns its - result directly so it can be formatted for display in an html page. - """ - try: - import pydoc_data.topics - except ImportError: - return(''' -Sorry, topic and keyword documentation is not available because the -module "pydoc_data.topics" could not be found. -''' , '') - target = self.topics.get(topic, self.keywords.get(topic)) - if not target: - raise ValueError('could not find topic') - if isinstance(target, str): - return self._gettopic(target, more_xrefs) - label, xrefs = target - doc = pydoc_data.topics.topics[label] - if more_xrefs: - xrefs = (xrefs or '') + ' ' + more_xrefs - return doc, xrefs - - def showsymbol(self, symbol): - target = self.symbols[symbol] - topic, _, xrefs = target.partition(' ') - self.showtopic(topic, xrefs) - - def listmodules(self, key=''): - if key: - self.output.write(''' -Here is a list of modules whose name or summary contains '{}'. -If there are any, enter a module name to get more help. - -'''.format(key)) - apropos(key) - else: - self.output.write(''' -Please wait a moment while I gather a list of all available modules... - -''') - modules = {} - def callback(path, modname, desc, modules=modules): - if modname and modname[-9:] == '.__init__': - modname = modname[:-9] + ' (package)' - if modname.find('.') < 0: - modules[modname] = 1 - def onerror(modname): - callback(None, modname, None) - ModuleScanner().run(callback, onerror=onerror) - self.list(modules.keys()) - self.output.write(''' -Enter any module name to get more help. Or, type "modules spam" to search -for modules whose name or summary contain the string "spam". -''') - -help = Helper() - -class ModuleScanner: - """An interruptible scanner that searches module synopses.""" - - def run(self, callback, key=None, completer=None, onerror=None): - if key: key = key.lower() - self.quit = False - seen = {} - - for modname in sys.builtin_module_names: - if modname != '__main__': - seen[modname] = 1 - if key is None: - callback(None, modname, '') - else: - name = __import__(modname).__doc__ or '' - desc = name.split('\n')[0] - name = modname + ' - ' + desc - if name.lower().find(key) >= 0: - callback(None, modname, desc) - - for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror): - if self.quit: - break - - if key is None: - callback(None, modname, '') - else: - try: - spec = importer.find_spec(modname) - except SyntaxError: - # raised by tests for bad coding cookies or BOM - continue - loader = spec.loader - if hasattr(loader, 'get_source'): - try: - source = loader.get_source(modname) - except Exception: - if onerror: - onerror(modname) - continue - desc = source_synopsis(io.StringIO(source)) or '' - if hasattr(loader, 'get_filename'): - path = loader.get_filename(modname) - else: - path = None - else: - try: - module = importlib._bootstrap._load(spec) - except ImportError: - if onerror: - onerror(modname) - continue - desc = module.__doc__.splitlines()[0] if module.__doc__ else '' - path = getattr(module,'__file__',None) - name = modname + ' - ' + desc - if name.lower().find(key) >= 0: - callback(path, modname, desc) - - if completer: - completer() - -def apropos(key): - """Print all the one-line module summaries that contain a substring.""" - def callback(path, modname, desc): - if modname[-9:] == '.__init__': - modname = modname[:-9] + ' (package)' - print(modname, desc and '- ' + desc) - def onerror(modname): - pass - with warnings.catch_warnings(): - warnings.filterwarnings('ignore') # ignore problems during import - ModuleScanner().run(callback, key, onerror=onerror) - -# --------------------------------------- enhanced web browser interface - -def _start_server(urlhandler, hostname, port): - """Start an HTTP server thread on a specific port. - - Start an HTML/text server thread, so HTML or text documents can be - browsed dynamically and interactively with a web browser. Example use: - - >>> import time - >>> import pydoc - - Define a URL handler. To determine what the client is asking - for, check the URL and content_type. - - Then get or generate some text or HTML code and return it. - - >>> def my_url_handler(url, content_type): - ... text = 'the URL sent was: (%s, %s)' % (url, content_type) - ... return text - - Start server thread on port 0. - If you use port 0, the server will pick a random port number. - You can then use serverthread.port to get the port number. - - >>> port = 0 - >>> serverthread = pydoc._start_server(my_url_handler, port) - - Check that the server is really started. If it is, open browser - and get first page. Use serverthread.url as the starting page. - - >>> if serverthread.serving: - ... import webbrowser - - The next two lines are commented out so a browser doesn't open if - doctest is run on this module. - - #... webbrowser.open(serverthread.url) - #True - - Let the server do its thing. We just need to monitor its status. - Use time.sleep so the loop doesn't hog the CPU. - - >>> starttime = time.monotonic() - >>> timeout = 1 #seconds - - This is a short timeout for testing purposes. - - >>> while serverthread.serving: - ... time.sleep(.01) - ... if serverthread.serving and time.monotonic() - starttime > timeout: - ... serverthread.stop() - ... break - - Print any errors that may have occurred. - - >>> print(serverthread.error) - None - """ - import http.server - import email.message - import select - import threading - - class DocHandler(http.server.BaseHTTPRequestHandler): - - def do_GET(self): - """Process a request from an HTML browser. - - The URL received is in self.path. - Get an HTML page from self.urlhandler and send it. - """ - if self.path.endswith('.css'): - content_type = 'text/css' - else: - content_type = 'text/html' - self.send_response(200) - self.send_header('Content-Type', '%s; charset=UTF-8' % content_type) - self.end_headers() - self.wfile.write(self.urlhandler( - self.path, content_type).encode('utf-8')) - - def log_message(self, *args): - # Don't log messages. - pass - - class DocServer(http.server.HTTPServer): - - def __init__(self, host, port, callback): - self.host = host - self.address = (self.host, port) - self.callback = callback - self.base.__init__(self, self.address, self.handler) - self.quit = False - - def serve_until_quit(self): - while not self.quit: - rd, wr, ex = select.select([self.socket.fileno()], [], [], 1) - if rd: - self.handle_request() - self.server_close() - - def server_activate(self): - self.base.server_activate(self) - if self.callback: - self.callback(self) - - class ServerThread(threading.Thread): - - def __init__(self, urlhandler, host, port): - self.urlhandler = urlhandler - self.host = host - self.port = int(port) - threading.Thread.__init__(self) - self.serving = False - self.error = None - self.docserver = None - - def run(self): - """Start the server.""" - try: - DocServer.base = http.server.HTTPServer - DocServer.handler = DocHandler - DocHandler.MessageClass = email.message.Message - DocHandler.urlhandler = staticmethod(self.urlhandler) - docsvr = DocServer(self.host, self.port, self.ready) - self.docserver = docsvr - docsvr.serve_until_quit() - except Exception as err: - self.error = err - - def ready(self, server): - self.serving = True - self.host = server.host - self.port = server.server_port - self.url = 'http://%s:%d/' % (self.host, self.port) - - def stop(self): - """Stop the server and this thread nicely""" - self.docserver.quit = True - self.join() - # explicitly break a reference cycle: DocServer.callback - # has indirectly a reference to ServerThread. - self.docserver = None - self.serving = False - self.url = None - - thread = ServerThread(urlhandler, hostname, port) - thread.start() - # Wait until thread.serving is True and thread.docserver is set - # to make sure we are really up before returning. - while not thread.error and not (thread.serving and thread.docserver): - time.sleep(.01) - return thread - - -def _url_handler(url, content_type="text/html"): - """The pydoc url handler for use with the pydoc server. - - If the content_type is 'text/css', the _pydoc.css style - sheet is read and returned if it exits. - - If the content_type is 'text/html', then the result of - get_html_page(url) is returned. - """ - class _HTMLDoc(HTMLDoc): - - def page(self, title, contents): - """Format an HTML page.""" - css_path = "pydoc_data/_pydoc.css" - css_link = ( - '' % - css_path) - return '''\ - - - - -Pydoc: %s -%s%s
%s
-''' % (title, css_link, html_navbar(), contents) - - - html = _HTMLDoc() - - def html_navbar(): - version = html.escape("%s [%s, %s]" % (platform.python_version(), - platform.python_build()[0], - platform.python_compiler())) - return """ -
- Python %s
%s -
-
- -
-
- - -
  -
- - -
-
-
- """ % (version, html.escape(platform.platform(terse=True))) - - def html_index(): - """Module Index page.""" - - def bltinlink(name): - return '%s' % (name, name) - - heading = html.heading( - 'Index of Modules' - ) - names = [name for name in sys.builtin_module_names - if name != '__main__'] - contents = html.multicolumn(names, bltinlink) - contents = [heading, '

' + html.bigsection( - 'Built-in Modules', 'index', contents)] - - seen = {} - for dir in sys.path: - contents.append(html.index(dir, seen)) - - contents.append( - '

pydoc by Ka-Ping Yee' - '<ping@lfw.org>

') - return 'Index of Modules', ''.join(contents) - - def html_search(key): - """Search results page.""" - # scan for modules - search_result = [] - - def callback(path, modname, desc): - if modname[-9:] == '.__init__': - modname = modname[:-9] + ' (package)' - search_result.append((modname, desc and '- ' + desc)) - - with warnings.catch_warnings(): - warnings.filterwarnings('ignore') # ignore problems during import - def onerror(modname): - pass - ModuleScanner().run(callback, key, onerror=onerror) - - # format page - def bltinlink(name): - return '%s' % (name, name) - - results = [] - heading = html.heading( - 'Search Results', - ) - for name, desc in search_result: - results.append(bltinlink(name) + desc) - contents = heading + html.bigsection( - 'key = %s' % key, 'index', '
'.join(results)) - return 'Search Results', contents - - def html_topics(): - """Index of topic texts available.""" - - def bltinlink(name): - return '%s' % (name, name) - - heading = html.heading( - 'INDEX', - ) - names = sorted(Helper.topics.keys()) - - contents = html.multicolumn(names, bltinlink) - contents = heading + html.bigsection( - 'Topics', 'index', contents) - return 'Topics', contents - - def html_keywords(): - """Index of keywords.""" - heading = html.heading( - 'INDEX', - ) - names = sorted(Helper.keywords.keys()) - - def bltinlink(name): - return '%s' % (name, name) - - contents = html.multicolumn(names, bltinlink) - contents = heading + html.bigsection( - 'Keywords', 'index', contents) - return 'Keywords', contents - - def html_topicpage(topic): - """Topic or keyword help page.""" - buf = io.StringIO() - htmlhelp = Helper(buf, buf) - contents, xrefs = htmlhelp._gettopic(topic) - if topic in htmlhelp.keywords: - title = 'KEYWORD' - else: - title = 'TOPIC' - heading = html.heading( - '%s' % title, - ) - contents = '
%s
' % html.markup(contents) - contents = html.bigsection(topic , 'index', contents) - if xrefs: - xrefs = sorted(xrefs.split()) - - def bltinlink(name): - return '%s' % (name, name) - - xrefs = html.multicolumn(xrefs, bltinlink) - xrefs = html.section('Related help topics: ', 'index', xrefs) - return ('%s %s' % (title, topic), - ''.join((heading, contents, xrefs))) - - def html_getobj(url): - obj = locate(url, forceload=1) - if obj is None and url != 'None': - raise ValueError('could not find object') - title = describe(obj) - content = html.document(obj, url) - return title, content - - def html_error(url, exc): - heading = html.heading( - 'Error', - ) - contents = '
'.join(html.escape(line) for line in - format_exception_only(type(exc), exc)) - contents = heading + html.bigsection(url, 'error', contents) - return "Error - %s" % url, contents - - def get_html_page(url): - """Generate an HTML page for url.""" - complete_url = url - if url.endswith('.html'): - url = url[:-5] - try: - if url in ("", "index"): - title, content = html_index() - elif url == "topics": - title, content = html_topics() - elif url == "keywords": - title, content = html_keywords() - elif '=' in url: - op, _, url = url.partition('=') - if op == "search?key": - title, content = html_search(url) - elif op == "topic?key": - # try topics first, then objects. - try: - title, content = html_topicpage(url) - except ValueError: - title, content = html_getobj(url) - elif op == "get?key": - # try objects first, then topics. - if url in ("", "index"): - title, content = html_index() - else: - try: - title, content = html_getobj(url) - except ValueError: - title, content = html_topicpage(url) - else: - raise ValueError('bad pydoc url') - else: - title, content = html_getobj(url) - except Exception as exc: - # Catch any errors and display them in an error page. - title, content = html_error(complete_url, exc) - return html.page(title, content) - - if url.startswith('/'): - url = url[1:] - if content_type == 'text/css': - path_here = os.path.dirname(os.path.realpath(__file__)) - css_path = os.path.join(path_here, url) - with open(css_path) as fp: - return ''.join(fp.readlines()) - elif content_type == 'text/html': - return get_html_page(url) - # Errors outside the url handler are caught by the server. - raise TypeError('unknown content type %r for url %s' % (content_type, url)) - - -def browse(port=0, *, open_browser=True, hostname='localhost'): - """Start the enhanced pydoc web server and open a web browser. - - Use port '0' to start the server on an arbitrary port. - Set open_browser to False to suppress opening a browser. - """ - import webbrowser - serverthread = _start_server(_url_handler, hostname, port) - if serverthread.error: - print(serverthread.error) - return - if serverthread.serving: - server_help_msg = 'Server commands: [b]rowser, [q]uit' - if open_browser: - webbrowser.open(serverthread.url) - try: - print('Server ready at', serverthread.url) - print(server_help_msg) - while serverthread.serving: - cmd = input('server> ') - cmd = cmd.lower() - if cmd == 'q': - break - elif cmd == 'b': - webbrowser.open(serverthread.url) - else: - print(server_help_msg) - except (KeyboardInterrupt, EOFError): - print() - finally: - if serverthread.serving: - serverthread.stop() - print('Server stopped') - - -# -------------------------------------------------- command-line interface - -def ispath(x): - return isinstance(x, str) and x.find(os.sep) >= 0 - -def _get_revised_path(given_path, argv0): - """Ensures current directory is on returned path, and argv0 directory is not - - Exception: argv0 dir is left alone if it's also pydoc's directory. - - Returns a new path entry list, or None if no adjustment is needed. - """ - # Scripts may get the current directory in their path by default if they're - # run with the -m switch, or directly from the current directory. - # The interactive prompt also allows imports from the current directory. - - # Accordingly, if the current directory is already present, don't make - # any changes to the given_path - if '' in given_path or os.curdir in given_path or os.getcwd() in given_path: - return None - - # Otherwise, add the current directory to the given path, and remove the - # script directory (as long as the latter isn't also pydoc's directory. - stdlib_dir = os.path.dirname(__file__) - script_dir = os.path.dirname(argv0) - revised_path = given_path.copy() - if script_dir in given_path and not os.path.samefile(script_dir, stdlib_dir): - revised_path.remove(script_dir) - revised_path.insert(0, os.getcwd()) - return revised_path - - -# Note: the tests only cover _get_revised_path, not _adjust_cli_path itself -def _adjust_cli_sys_path(): - """Ensures current directory is on sys.path, and __main__ directory is not. - - Exception: __main__ dir is left alone if it's also pydoc's directory. - """ - revised_path = _get_revised_path(sys.path, sys.argv[0]) - if revised_path is not None: - sys.path[:] = revised_path - - -def cli(): - """Command-line interface (looks at sys.argv to decide what to do).""" - import getopt - class BadUsage(Exception): pass - - _adjust_cli_sys_path() - - try: - opts, args = getopt.getopt(sys.argv[1:], 'bk:n:p:w') - writing = False - start_server = False - open_browser = False - port = 0 - hostname = 'localhost' - for opt, val in opts: - if opt == '-b': - start_server = True - open_browser = True - if opt == '-k': - apropos(val) - return - if opt == '-p': - start_server = True - port = val - if opt == '-w': - writing = True - if opt == '-n': - start_server = True - hostname = val - - if start_server: - browse(port, hostname=hostname, open_browser=open_browser) - return - - if not args: raise BadUsage - for arg in args: - if ispath(arg) and not os.path.exists(arg): - print('file %r does not exist' % arg) - sys.exit(1) - try: - if ispath(arg) and os.path.isfile(arg): - arg = importfile(arg) - if writing: - if ispath(arg) and os.path.isdir(arg): - writedocs(arg) - else: - writedoc(arg) - else: - help.help(arg, is_cli=True) - except (ImportError, ErrorDuringImport) as value: - print(value) - sys.exit(1) - - except (getopt.error, BadUsage): - cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0] - print("""pydoc - the Python documentation tool - -{cmd} ... - Show text documentation on something. may be the name of a - Python keyword, topic, function, module, or package, or a dotted - reference to a class or function within a module or module in a - package. If contains a '{sep}', it is used as the path to a - Python source file to document. If name is 'keywords', 'topics', - or 'modules', a listing of these things is displayed. - -{cmd} -k - Search for a keyword in the synopsis lines of all available modules. - -{cmd} -n - Start an HTTP server with the given hostname (default: localhost). - -{cmd} -p - Start an HTTP server on the given port on the local machine. Port - number 0 can be used to get an arbitrary unused port. - -{cmd} -b - Start an HTTP server on an arbitrary unused port and open a web browser - to interactively browse documentation. This option can be used in - combination with -n and/or -p. - -{cmd} -w ... - Write out the HTML documentation for a module to a file in the current - directory. If contains a '{sep}', it is treated as a filename; if - it names a directory, documentation is written for all the contents. -""".format(cmd=cmd, sep=os.sep)) - -if __name__ == '__main__': - cli() diff --git a/Python313_13_x64_Template/Lib/pydoc_data/module_docs.py b/Python313_13_x64_Template/Lib/pydoc_data/module_docs.py deleted file mode 100644 index 8c401360..00000000 --- a/Python313_13_x64_Template/Lib/pydoc_data/module_docs.py +++ /dev/null @@ -1,314 +0,0 @@ -# Autogenerated by Sphinx on Tue Apr 7 20:18:56 2026 -# as part of the release process. - -module_docs = { - '__future__': '__future__#module-__future__', - '__main__': '__main__#module-__main__', - '_thread': '_thread#module-_thread', - '_tkinter': 'tkinter#module-_tkinter', - 'abc': 'abc#module-abc', - 'aifc': 'aifc#module-aifc', - 'argparse': 'argparse#module-argparse', - 'array': 'array#module-array', - 'ast': 'ast#module-ast', - 'asynchat': 'asynchat#module-asynchat', - 'asyncio': 'asyncio#module-asyncio', - 'asyncore': 'asyncore#module-asyncore', - 'atexit': 'atexit#module-atexit', - 'audioop': 'audioop#module-audioop', - 'base64': 'base64#module-base64', - 'bdb': 'bdb#module-bdb', - 'binascii': 'binascii#module-binascii', - 'bisect': 'bisect#module-bisect', - 'builtins': 'builtins#module-builtins', - 'bz2': 'bz2#module-bz2', - 'cProfile': 'profile#module-cProfile', - 'calendar': 'calendar#module-calendar', - 'cgi': 'cgi#module-cgi', - 'cgitb': 'cgitb#module-cgitb', - 'chunk': 'chunk#module-chunk', - 'cmath': 'cmath#module-cmath', - 'cmd': 'cmd#module-cmd', - 'code': 'code#module-code', - 'codecs': 'codecs#module-codecs', - 'codeop': 'codeop#module-codeop', - 'collections': 'collections#module-collections', - 'collections.abc': 'collections.abc#module-collections.abc', - 'colorsys': 'colorsys#module-colorsys', - 'compileall': 'compileall#module-compileall', - 'concurrent.futures': 'concurrent.futures#module-concurrent.futures', - 'configparser': 'configparser#module-configparser', - 'contextlib': 'contextlib#module-contextlib', - 'contextvars': 'contextvars#module-contextvars', - 'copy': 'copy#module-copy', - 'copyreg': 'copyreg#module-copyreg', - 'crypt': 'crypt#module-crypt', - 'csv': 'csv#module-csv', - 'ctypes': 'ctypes#module-ctypes', - 'curses': 'curses#module-curses', - 'curses.ascii': 'curses.ascii#module-curses.ascii', - 'curses.panel': 'curses.panel#module-curses.panel', - 'curses.textpad': 'curses#module-curses.textpad', - 'dataclasses': 'dataclasses#module-dataclasses', - 'datetime': 'datetime#module-datetime', - 'dbm': 'dbm#module-dbm', - 'dbm.dumb': 'dbm#module-dbm.dumb', - 'dbm.gnu': 'dbm#module-dbm.gnu', - 'dbm.ndbm': 'dbm#module-dbm.ndbm', - 'dbm.sqlite3': 'dbm#module-dbm.sqlite3', - 'decimal': 'decimal#module-decimal', - 'difflib': 'difflib#module-difflib', - 'dis': 'dis#module-dis', - 'distutils': 'distutils#module-distutils', - 'doctest': 'doctest#module-doctest', - 'email': 'email#module-email', - 'email.charset': 'email.charset#module-email.charset', - 'email.contentmanager': 'email.contentmanager#module-email.contentmanager', - 'email.encoders': 'email.encoders#module-email.encoders', - 'email.errors': 'email.errors#module-email.errors', - 'email.generator': 'email.generator#module-email.generator', - 'email.header': 'email.header#module-email.header', - 'email.headerregistry': 'email.headerregistry#module-email.headerregistry', - 'email.iterators': 'email.iterators#module-email.iterators', - 'email.message': 'email.message#module-email.message', - 'email.mime': 'email.mime#module-email.mime', - 'email.mime.application': 'email.mime#module-email.mime.application', - 'email.mime.audio': 'email.mime#module-email.mime.audio', - 'email.mime.base': 'email.mime#module-email.mime.base', - 'email.mime.image': 'email.mime#module-email.mime.image', - 'email.mime.message': 'email.mime#module-email.mime.message', - 'email.mime.multipart': 'email.mime#module-email.mime.multipart', - 'email.mime.nonmultipart': 'email.mime#module-email.mime.nonmultipart', - 'email.mime.text': 'email.mime#module-email.mime.text', - 'email.parser': 'email.parser#module-email.parser', - 'email.policy': 'email.policy#module-email.policy', - 'email.utils': 'email.utils#module-email.utils', - 'encodings': 'codecs#module-encodings', - 'encodings.idna': 'codecs#module-encodings.idna', - 'encodings.mbcs': 'codecs#module-encodings.mbcs', - 'encodings.utf_8_sig': 'codecs#module-encodings.utf_8_sig', - 'ensurepip': 'ensurepip#module-ensurepip', - 'enum': 'enum#module-enum', - 'errno': 'errno#module-errno', - 'faulthandler': 'faulthandler#module-faulthandler', - 'fcntl': 'fcntl#module-fcntl', - 'filecmp': 'filecmp#module-filecmp', - 'fileinput': 'fileinput#module-fileinput', - 'fnmatch': 'fnmatch#module-fnmatch', - 'fractions': 'fractions#module-fractions', - 'ftplib': 'ftplib#module-ftplib', - 'functools': 'functools#module-functools', - 'gc': 'gc#module-gc', - 'getopt': 'getopt#module-getopt', - 'getpass': 'getpass#module-getpass', - 'gettext': 'gettext#module-gettext', - 'glob': 'glob#module-glob', - 'graphlib': 'graphlib#module-graphlib', - 'grp': 'grp#module-grp', - 'gzip': 'gzip#module-gzip', - 'hashlib': 'hashlib#module-hashlib', - 'heapq': 'heapq#module-heapq', - 'hmac': 'hmac#module-hmac', - 'html': 'html#module-html', - 'html.entities': 'html.entities#module-html.entities', - 'html.parser': 'html.parser#module-html.parser', - 'http': 'http#module-http', - 'http.client': 'http.client#module-http.client', - 'http.cookiejar': 'http.cookiejar#module-http.cookiejar', - 'http.cookies': 'http.cookies#module-http.cookies', - 'http.server': 'http.server#module-http.server', - 'idlelib': 'idle#module-idlelib', - 'imaplib': 'imaplib#module-imaplib', - 'imghdr': 'imghdr#module-imghdr', - 'imp': 'imp#module-imp', - 'importlib': 'importlib#module-importlib', - 'importlib.abc': 'importlib#module-importlib.abc', - 'importlib.machinery': 'importlib#module-importlib.machinery', - 'importlib.metadata': 'importlib.metadata#module-importlib.metadata', - 'importlib.resources': 'importlib.resources#module-importlib.resources', - 'importlib.resources.abc': 'importlib.resources.abc#module-importlib.resources.abc', - 'importlib.util': 'importlib#module-importlib.util', - 'inspect': 'inspect#module-inspect', - 'io': 'io#module-io', - 'ipaddress': 'ipaddress#module-ipaddress', - 'itertools': 'itertools#module-itertools', - 'json': 'json#module-json', - 'json.tool': 'json#module-json.tool', - 'keyword': 'keyword#module-keyword', - 'linecache': 'linecache#module-linecache', - 'locale': 'locale#module-locale', - 'logging': 'logging#module-logging', - 'logging.config': 'logging.config#module-logging.config', - 'logging.handlers': 'logging.handlers#module-logging.handlers', - 'lzma': 'lzma#module-lzma', - 'mailbox': 'mailbox#module-mailbox', - 'mailcap': 'mailcap#module-mailcap', - 'marshal': 'marshal#module-marshal', - 'math': 'math#module-math', - 'mimetypes': 'mimetypes#module-mimetypes', - 'mmap': 'mmap#module-mmap', - 'modulefinder': 'modulefinder#module-modulefinder', - 'msilib': 'msilib#module-msilib', - 'msvcrt': 'msvcrt#module-msvcrt', - 'multiprocessing': 'multiprocessing#module-multiprocessing', - 'multiprocessing.connection': 'multiprocessing#module-multiprocessing.connection', - 'multiprocessing.dummy': 'multiprocessing#module-multiprocessing.dummy', - 'multiprocessing.managers': 'multiprocessing#module-multiprocessing.managers', - 'multiprocessing.pool': 'multiprocessing#module-multiprocessing.pool', - 'multiprocessing.shared_memory': 'multiprocessing.shared_memory#module-multiprocessing.shared_memory', - 'multiprocessing.sharedctypes': 'multiprocessing#module-multiprocessing.sharedctypes', - 'netrc': 'netrc#module-netrc', - 'nis': 'nis#module-nis', - 'nntplib': 'nntplib#module-nntplib', - 'numbers': 'numbers#module-numbers', - 'operator': 'operator#module-operator', - 'optparse': 'optparse#module-optparse', - 'os': 'os#module-os', - 'os.path': 'os.path#module-os.path', - 'ossaudiodev': 'ossaudiodev#module-ossaudiodev', - 'pathlib': 'pathlib#module-pathlib', - 'pdb': 'pdb#module-pdb', - 'pickle': 'pickle#module-pickle', - 'pickletools': 'pickletools#module-pickletools', - 'pipes': 'pipes#module-pipes', - 'pkgutil': 'pkgutil#module-pkgutil', - 'platform': 'platform#module-platform', - 'plistlib': 'plistlib#module-plistlib', - 'poplib': 'poplib#module-poplib', - 'posix': 'posix#module-posix', - 'pprint': 'pprint#module-pprint', - 'profile': 'profile#module-profile', - 'pstats': 'profile#module-pstats', - 'pty': 'pty#module-pty', - 'pwd': 'pwd#module-pwd', - 'py_compile': 'py_compile#module-py_compile', - 'pyclbr': 'pyclbr#module-pyclbr', - 'pydoc': 'pydoc#module-pydoc', - 'queue': 'queue#module-queue', - 'quopri': 'quopri#module-quopri', - 'random': 'random#module-random', - 're': 're#module-re', - 'readline': 'readline#module-readline', - 'reprlib': 'reprlib#module-reprlib', - 'resource': 'resource#module-resource', - 'rlcompleter': 'rlcompleter#module-rlcompleter', - 'runpy': 'runpy#module-runpy', - 'sched': 'sched#module-sched', - 'secrets': 'secrets#module-secrets', - 'select': 'select#module-select', - 'selectors': 'selectors#module-selectors', - 'shelve': 'shelve#module-shelve', - 'shlex': 'shlex#module-shlex', - 'shutil': 'shutil#module-shutil', - 'signal': 'signal#module-signal', - 'site': 'site#module-site', - 'sitecustomize': 'site#module-sitecustomize', - 'smtpd': 'smtpd#module-smtpd', - 'smtplib': 'smtplib#module-smtplib', - 'sndhdr': 'sndhdr#module-sndhdr', - 'socket': 'socket#module-socket', - 'socketserver': 'socketserver#module-socketserver', - 'spwd': 'spwd#module-spwd', - 'sqlite3': 'sqlite3#module-sqlite3', - 'ssl': 'ssl#module-ssl', - 'stat': 'stat#module-stat', - 'statistics': 'statistics#module-statistics', - 'string': 'string#module-string', - 'stringprep': 'stringprep#module-stringprep', - 'struct': 'struct#module-struct', - 'subprocess': 'subprocess#module-subprocess', - 'sunau': 'sunau#module-sunau', - 'symtable': 'symtable#module-symtable', - 'sys': 'sys#module-sys', - 'sys.monitoring': 'sys.monitoring#module-sys.monitoring', - 'sysconfig': 'sysconfig#module-sysconfig', - 'syslog': 'syslog#module-syslog', - 'tabnanny': 'tabnanny#module-tabnanny', - 'tarfile': 'tarfile#module-tarfile', - 'telnetlib': 'telnetlib#module-telnetlib', - 'tempfile': 'tempfile#module-tempfile', - 'termios': 'termios#module-termios', - 'test': 'test#module-test', - 'test.regrtest': 'test#module-test.regrtest', - 'test.support': 'test#module-test.support', - 'test.support.bytecode_helper': 'test#module-test.support.bytecode_helper', - 'test.support.import_helper': 'test#module-test.support.import_helper', - 'test.support.os_helper': 'test#module-test.support.os_helper', - 'test.support.script_helper': 'test#module-test.support.script_helper', - 'test.support.socket_helper': 'test#module-test.support.socket_helper', - 'test.support.threading_helper': 'test#module-test.support.threading_helper', - 'test.support.warnings_helper': 'test#module-test.support.warnings_helper', - 'textwrap': 'textwrap#module-textwrap', - 'threading': 'threading#module-threading', - 'time': 'time#module-time', - 'timeit': 'timeit#module-timeit', - 'tkinter': 'tkinter#module-tkinter', - 'tkinter.colorchooser': 'tkinter.colorchooser#module-tkinter.colorchooser', - 'tkinter.commondialog': 'dialog#module-tkinter.commondialog', - 'tkinter.dnd': 'tkinter.dnd#module-tkinter.dnd', - 'tkinter.filedialog': 'dialog#module-tkinter.filedialog', - 'tkinter.font': 'tkinter.font#module-tkinter.font', - 'tkinter.messagebox': 'tkinter.messagebox#module-tkinter.messagebox', - 'tkinter.scrolledtext': 'tkinter.scrolledtext#module-tkinter.scrolledtext', - 'tkinter.simpledialog': 'dialog#module-tkinter.simpledialog', - 'tkinter.ttk': 'tkinter.ttk#module-tkinter.ttk', - 'token': 'token#module-token', - 'tokenize': 'tokenize#module-tokenize', - 'tomllib': 'tomllib#module-tomllib', - 'trace': 'trace#module-trace', - 'traceback': 'traceback#module-traceback', - 'tracemalloc': 'tracemalloc#module-tracemalloc', - 'tty': 'tty#module-tty', - 'turtle': 'turtle#module-turtle', - 'turtledemo': 'turtle#module-turtledemo', - 'types': 'types#module-types', - 'typing': 'typing#module-typing', - 'unicodedata': 'unicodedata#module-unicodedata', - 'unittest': 'unittest#module-unittest', - 'unittest.mock': 'unittest.mock#module-unittest.mock', - 'urllib': 'urllib#module-urllib', - 'urllib.error': 'urllib.error#module-urllib.error', - 'urllib.parse': 'urllib.parse#module-urllib.parse', - 'urllib.request': 'urllib.request#module-urllib.request', - 'urllib.response': 'urllib.request#module-urllib.response', - 'urllib.robotparser': 'urllib.robotparser#module-urllib.robotparser', - 'usercustomize': 'site#module-usercustomize', - 'uu': 'uu#module-uu', - 'uuid': 'uuid#module-uuid', - 'venv': 'venv#module-venv', - 'warnings': 'warnings#module-warnings', - 'wave': 'wave#module-wave', - 'weakref': 'weakref#module-weakref', - 'webbrowser': 'webbrowser#module-webbrowser', - 'winreg': 'winreg#module-winreg', - 'winsound': 'winsound#module-winsound', - 'wsgiref': 'wsgiref#module-wsgiref', - 'wsgiref.handlers': 'wsgiref#module-wsgiref.handlers', - 'wsgiref.headers': 'wsgiref#module-wsgiref.headers', - 'wsgiref.simple_server': 'wsgiref#module-wsgiref.simple_server', - 'wsgiref.types': 'wsgiref#module-wsgiref.types', - 'wsgiref.util': 'wsgiref#module-wsgiref.util', - 'wsgiref.validate': 'wsgiref#module-wsgiref.validate', - 'xdrlib': 'xdrlib#module-xdrlib', - 'xml': 'xml#module-xml', - 'xml.dom': 'xml.dom#module-xml.dom', - 'xml.dom.minidom': 'xml.dom.minidom#module-xml.dom.minidom', - 'xml.dom.pulldom': 'xml.dom.pulldom#module-xml.dom.pulldom', - 'xml.etree.ElementInclude': 'xml.etree.elementtree#module-xml.etree.ElementInclude', - 'xml.etree.ElementTree': 'xml.etree.elementtree#module-xml.etree.ElementTree', - 'xml.parsers.expat': 'pyexpat#module-xml.parsers.expat', - 'xml.parsers.expat.errors': 'pyexpat#module-xml.parsers.expat.errors', - 'xml.parsers.expat.model': 'pyexpat#module-xml.parsers.expat.model', - 'xml.sax': 'xml.sax#module-xml.sax', - 'xml.sax.handler': 'xml.sax.handler#module-xml.sax.handler', - 'xml.sax.saxutils': 'xml.sax.utils#module-xml.sax.saxutils', - 'xml.sax.xmlreader': 'xml.sax.reader#module-xml.sax.xmlreader', - 'xmlrpc': 'xmlrpc#module-xmlrpc', - 'xmlrpc.client': 'xmlrpc.client#module-xmlrpc.client', - 'xmlrpc.server': 'xmlrpc.server#module-xmlrpc.server', - 'zipapp': 'zipapp#module-zipapp', - 'zipfile': 'zipfile#module-zipfile', - 'zipimport': 'zipimport#module-zipimport', - 'zlib': 'zlib#module-zlib', - 'zoneinfo': 'zoneinfo#module-zoneinfo', -} diff --git a/Python313_13_x64_Template/Lib/pydoc_data/topics.py b/Python313_13_x64_Template/Lib/pydoc_data/topics.py deleted file mode 100644 index bbbd6a3e..00000000 --- a/Python313_13_x64_Template/Lib/pydoc_data/topics.py +++ /dev/null @@ -1,13095 +0,0 @@ -# Autogenerated by Sphinx on Tue Apr 7 20:18:56 2026 -# as part of the release process. - -topics = { - 'assert': r'''The "assert" statement -********************** - -Assert statements are a convenient way to insert debugging assertions -into a program: - - assert_stmt ::= "assert" expression ["," expression] - -The simple form, "assert expression", is equivalent to - - if __debug__: - if not expression: raise AssertionError - -The extended form, "assert expression1, expression2", is equivalent to - - if __debug__: - if not expression1: raise AssertionError(expression2) - -These equivalences assume that "__debug__" and "AssertionError" refer -to the built-in variables with those names. In the current -implementation, the built-in variable "__debug__" is "True" under -normal circumstances, "False" when optimization is requested (command -line option "-O"). The current code generator emits no code for an -"assert" statement when optimization is requested at compile time. -Note that it is unnecessary to include the source code for the -expression that failed in the error message; it will be displayed as -part of the stack trace. - -Assignments to "__debug__" are illegal. The value for the built-in -variable is determined when the interpreter starts. -''', - 'assignment': r'''Assignment statements -********************* - -Assignment statements are used to (re)bind names to values and to -modify attributes or items of mutable objects: - - assignment_stmt ::= (target_list "=")+ (starred_expression | yield_expression) - target_list ::= target ("," target)* [","] - target ::= identifier - | "(" [target_list] ")" - | "[" [target_list] "]" - | attributeref - | subscription - | slicing - | "*" target - -(See section Primaries for the syntax definitions for *attributeref*, -*subscription*, and *slicing*.) - -An assignment statement evaluates the expression list (remember that -this can be a single expression or a comma-separated list, the latter -yielding a tuple) and assigns the single resulting object to each of -the target lists, from left to right. - -Assignment is defined recursively depending on the form of the target -(list). When a target is part of a mutable object (an attribute -reference, subscription or slicing), the mutable object must -ultimately perform the assignment and decide about its validity, and -may raise an exception if the assignment is unacceptable. The rules -observed by various types and the exceptions raised are given with the -definition of the object types (see section The standard type -hierarchy). - -Assignment of an object to a target list, optionally enclosed in -parentheses or square brackets, is recursively defined as follows. - -* If the target list is a single target with no trailing comma, - optionally in parentheses, the object is assigned to that target. - -* Else: - - * If the target list contains one target prefixed with an asterisk, - called a “starred” target: The object must be an iterable with at - least as many items as there are targets in the target list, minus - one. The first items of the iterable are assigned, from left to - right, to the targets before the starred target. The final items - of the iterable are assigned to the targets after the starred - target. A list of the remaining items in the iterable is then - assigned to the starred target (the list can be empty). - - * Else: The object must be an iterable with the same number of items - as there are targets in the target list, and the items are - assigned, from left to right, to the corresponding targets. - -Assignment of an object to a single target is recursively defined as -follows. - -* If the target is an identifier (name): - - * If the name does not occur in a "global" or "nonlocal" statement - in the current code block: the name is bound to the object in the - current local namespace. - - * Otherwise: the name is bound to the object in the global namespace - or the outer namespace determined by "nonlocal", respectively. - - The name is rebound if it was already bound. This may cause the - reference count for the object previously bound to the name to reach - zero, causing the object to be deallocated and its destructor (if it - has one) to be called. - -* If the target is an attribute reference: The primary expression in - the reference is evaluated. It should yield an object with - assignable attributes; if this is not the case, "TypeError" is - raised. That object is then asked to assign the assigned object to - the given attribute; if it cannot perform the assignment, it raises - an exception (usually but not necessarily "AttributeError"). - - Note: If the object is a class instance and the attribute reference - occurs on both sides of the assignment operator, the right-hand side - expression, "a.x" can access either an instance attribute or (if no - instance attribute exists) a class attribute. The left-hand side - target "a.x" is always set as an instance attribute, creating it if - necessary. Thus, the two occurrences of "a.x" do not necessarily - refer to the same attribute: if the right-hand side expression - refers to a class attribute, the left-hand side creates a new - instance attribute as the target of the assignment: - - class Cls: - x = 3 # class variable - inst = Cls() - inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3 - - This description does not necessarily apply to descriptor - attributes, such as properties created with "property()". - -* If the target is a subscription: The primary expression in the - reference is evaluated. It should yield either a mutable sequence - object (such as a list) or a mapping object (such as a dictionary). - Next, the subscript expression is evaluated. - - If the primary is a mutable sequence object (such as a list), the - subscript must yield an integer. If it is negative, the sequence’s - length is added to it. The resulting value must be a nonnegative - integer less than the sequence’s length, and the sequence is asked - to assign the assigned object to its item with that index. If the - index is out of range, "IndexError" is raised (assignment to a - subscripted sequence cannot add new items to a list). - - If the primary is a mapping object (such as a dictionary), the - subscript must have a type compatible with the mapping’s key type, - and the mapping is then asked to create a key/value pair which maps - the subscript to the assigned object. This can either replace an - existing key/value pair with the same key value, or insert a new - key/value pair (if no key with the same value existed). - - For user-defined objects, the "__setitem__()" method is called with - appropriate arguments. - -* If the target is a slicing: The primary expression in the reference - is evaluated. It should yield a mutable sequence object (such as a - list). The assigned object should be a sequence object of the same - type. Next, the lower and upper bound expressions are evaluated, - insofar they are present; defaults are zero and the sequence’s - length. The bounds should evaluate to integers. If either bound is - negative, the sequence’s length is added to it. The resulting - bounds are clipped to lie between zero and the sequence’s length, - inclusive. Finally, the sequence object is asked to replace the - slice with the items of the assigned sequence. The length of the - slice may be different from the length of the assigned sequence, - thus changing the length of the target sequence, if the target - sequence allows it. - -**CPython implementation detail:** In the current implementation, the -syntax for targets is taken to be the same as for expressions, and -invalid syntax is rejected during the code generation phase, causing -less detailed error messages. - -Although the definition of assignment implies that overlaps between -the left-hand side and the right-hand side are ‘simultaneous’ (for -example "a, b = b, a" swaps two variables), overlaps *within* the -collection of assigned-to variables occur left-to-right, sometimes -resulting in confusion. For instance, the following program prints -"[0, 2]": - - x = [0, 1] - i = 0 - i, x[i] = 1, 2 # i is updated, then x[i] is updated - print(x) - -See also: - - **PEP 3132** - Extended Iterable Unpacking - The specification for the "*target" feature. - - -Augmented assignment statements -=============================== - -Augmented assignment is the combination, in a single statement, of a -binary operation and an assignment statement: - - augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression) - augtarget ::= identifier | attributeref | subscription | slicing - augop ::= "+=" | "-=" | "*=" | "@=" | "/=" | "//=" | "%=" | "**=" - | ">>=" | "<<=" | "&=" | "^=" | "|=" - -(See section Primaries for the syntax definitions of the last three -symbols.) - -An augmented assignment evaluates the target (which, unlike normal -assignment statements, cannot be an unpacking) and the expression -list, performs the binary operation specific to the type of assignment -on the two operands, and assigns the result to the original target. -The target is only evaluated once. - -An augmented assignment statement like "x += 1" can be rewritten as "x -= x + 1" to achieve a similar, but not exactly equal effect. In the -augmented version, "x" is only evaluated once. Also, when possible, -the actual operation is performed *in-place*, meaning that rather than -creating a new object and assigning that to the target, the old object -is modified instead. - -Unlike normal assignments, augmented assignments evaluate the left- -hand side *before* evaluating the right-hand side. For example, "a[i] -+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs -the addition, and lastly, it writes the result back to "a[i]". - -With the exception of assigning to tuples and multiple targets in a -single statement, the assignment done by augmented assignment -statements is handled the same way as normal assignments. Similarly, -with the exception of the possible *in-place* behavior, the binary -operation performed by augmented assignment is the same as the normal -binary operations. - -For targets which are attribute references, the same caveat about -class and instance attributes applies as for regular assignments. - - -Annotated assignment statements -=============================== - -*Annotation* assignment is the combination, in a single statement, of -a variable or attribute annotation and an optional assignment -statement: - - annotated_assignment_stmt ::= augtarget ":" expression - ["=" (starred_expression | yield_expression)] - -The difference from normal Assignment statements is that only a single -target is allowed. - -The assignment target is considered “simple” if it consists of a -single name that is not enclosed in parentheses. For simple assignment -targets, if in class or module scope, the annotations are evaluated -and stored in a special class or module attribute "__annotations__" -that is a dictionary mapping from variable names (mangled if private) -to evaluated annotations. This attribute is writable and is -automatically created at the start of class or module body execution, -if annotations are found statically. - -If the assignment target is not simple (an attribute, subscript node, -or parenthesized name), the annotation is evaluated if in class or -module scope, but not stored. - -If a name is annotated in a function scope, then this name is local -for that scope. Annotations are never evaluated and stored in function -scopes. - -If the right hand side is present, an annotated assignment performs -the actual assignment before evaluating annotations (where -applicable). If the right hand side is not present for an expression -target, then the interpreter evaluates the target except for the last -"__setitem__()" or "__setattr__()" call. - -See also: - - **PEP 526** - Syntax for Variable Annotations - The proposal that added syntax for annotating the types of - variables (including class variables and instance variables), - instead of expressing them through comments. - - **PEP 484** - Type hints - The proposal that added the "typing" module to provide a standard - syntax for type annotations that can be used in static analysis - tools and IDEs. - -Changed in version 3.8: Now annotated assignments allow the same -expressions in the right hand side as regular assignments. Previously, -some expressions (like un-parenthesized tuple expressions) caused a -syntax error. -''', - 'assignment-expressions': r'''Assignment expressions -********************** - - assignment_expression ::= [identifier ":="] expression - -An assignment expression (sometimes also called a “named expression” -or “walrus”) assigns an "expression" to an "identifier", while also -returning the value of the "expression". - -One common use case is when handling matched regular expressions: - - if matching := pattern.search(data): - do_something(matching) - -Or, when processing a file stream in chunks: - - while chunk := file.read(9000): - process(chunk) - -Assignment expressions must be surrounded by parentheses when used as -expression statements and when used as sub-expressions in slicing, -conditional, lambda, keyword-argument, and comprehension-if -expressions and in "assert", "with", and "assignment" statements. In -all other places where they can be used, parentheses are not required, -including in "if" and "while" statements. - -Added in version 3.8: See **PEP 572** for more details about -assignment expressions. -''', - 'async': r'''Coroutines -********** - -Added in version 3.5. - - -Coroutine function definition -============================= - - async_funcdef ::= [decorators] "async" "def" funcname "(" [parameter_list] ")" - ["->" expression] ":" suite - -Execution of Python coroutines can be suspended and resumed at many -points (see *coroutine*). "await" expressions, "async for" and "async -with" can only be used in the body of a coroutine function. - -Functions defined with "async def" syntax are always coroutine -functions, even if they do not contain "await" or "async" keywords. - -It is a "SyntaxError" to use a "yield from" expression inside the body -of a coroutine function. - -An example of a coroutine function: - - async def func(param1, param2): - do_stuff() - await some_coroutine() - -Changed in version 3.7: "await" and "async" are now keywords; -previously they were only treated as such inside the body of a -coroutine function. - - -The "async for" statement -========================= - - async_for_stmt ::= "async" for_stmt - -An *asynchronous iterable* provides an "__aiter__" method that -directly returns an *asynchronous iterator*, which can call -asynchronous code in its "__anext__" method. - -The "async for" statement allows convenient iteration over -asynchronous iterables. - -The following code: - - async for TARGET in ITER: - SUITE - else: - SUITE2 - -Is semantically equivalent to: - - iter = (ITER).__aiter__() - running = True - - while running: - try: - TARGET = await iter.__anext__() - except StopAsyncIteration: - running = False - else: - SUITE - else: - SUITE2 - -except that implicit special method lookup is used for "__aiter__()" -and "__anext__()". - -It is a "SyntaxError" to use an "async for" statement outside the body -of a coroutine function. - - -The "async with" statement -========================== - - async_with_stmt ::= "async" with_stmt - -An *asynchronous context manager* is a *context manager* that is able -to suspend execution in its *enter* and *exit* methods. - -The following code: - - async with EXPRESSION as TARGET: - SUITE - -is semantically equivalent to: - - manager = (EXPRESSION) - aenter = manager.__aenter__ - aexit = manager.__aexit__ - value = await aenter() - hit_except = False - - try: - TARGET = value - SUITE - except: - hit_except = True - if not await aexit(*sys.exc_info()): - raise - finally: - if not hit_except: - await aexit(None, None, None) - -except that implicit special method lookup is used for "__aenter__()" -and "__aexit__()". - -It is a "SyntaxError" to use an "async with" statement outside the -body of a coroutine function. - -See also: - - **PEP 492** - Coroutines with async and await syntax - The proposal that made coroutines a proper standalone concept in - Python, and added supporting syntax. -''', - 'atom-identifiers': r'''Identifiers (Names) -******************* - -An identifier occurring as an atom is a name. See section Identifiers -and keywords for lexical definition and section Naming and binding for -documentation of naming and binding. - -When the name is bound to an object, evaluation of the atom yields -that object. When a name is not bound, an attempt to evaluate it -raises a "NameError" exception. - - -Private name mangling -===================== - -When an identifier that textually occurs in a class definition begins -with two or more underscore characters and does not end in two or more -underscores, it is considered a *private name* of that class. - -See also: The class specifications. - -More precisely, private names are transformed to a longer form before -code is generated for them. If the transformed name is longer than -255 characters, implementation-defined truncation may happen. - -The transformation is independent of the syntactical context in which -the identifier is used but only the following private identifiers are -mangled: - -* Any name used as the name of a variable that is assigned or read or - any name of an attribute being accessed. - - The "__name__" attribute of nested functions, classes, and type - aliases is however not mangled. - -* The name of imported modules, e.g., "__spam" in "import __spam". If - the module is part of a package (i.e., its name contains a dot), the - name is *not* mangled, e.g., the "__foo" in "import __foo.bar" is - not mangled. - -* The name of an imported member, e.g., "__f" in "from spam import - __f". - -The transformation rule is defined as follows: - -* The class name, with leading underscores removed and a single - leading underscore inserted, is inserted in front of the identifier, - e.g., the identifier "__spam" occurring in a class named "Foo", - "_Foo" or "__Foo" is transformed to "_Foo__spam". - -* If the class name consists only of underscores, the transformation - is the identity, e.g., the identifier "__spam" occurring in a class - named "_" or "__" is left as is. -''', - 'atom-literals': r'''Literals -******** - -Python supports string and bytes literals and various numeric -literals: - - literal ::= stringliteral | bytesliteral - | integer | floatnumber | imagnumber - -Evaluation of a literal yields an object of the given type (string, -bytes, integer, floating-point number, complex number) with the given -value. The value may be approximated in the case of floating-point -and imaginary (complex) literals. See section Literals for details. - -All literals correspond to immutable data types, and hence the -object’s identity is less important than its value. Multiple -evaluations of literals with the same value (either the same -occurrence in the program text or a different occurrence) may obtain -the same object or a different object with the same value. -''', - 'attribute-access': r'''Customizing attribute access -**************************** - -The following methods can be defined to customize the meaning of -attribute access (use of, assignment to, or deletion of "x.name") for -class instances. - -object.__getattr__(self, name) - - Called when the default attribute access fails with an - "AttributeError" (either "__getattribute__()" raises an - "AttributeError" because *name* is not an instance attribute or an - attribute in the class tree for "self"; or "__get__()" of a *name* - property raises "AttributeError"). This method should either - return the (computed) attribute value or raise an "AttributeError" - exception. The "object" class itself does not provide this method. - - Note that if the attribute is found through the normal mechanism, - "__getattr__()" is not called. (This is an intentional asymmetry - between "__getattr__()" and "__setattr__()".) This is done both for - efficiency reasons and because otherwise "__getattr__()" would have - no way to access other attributes of the instance. Note that at - least for instance variables, you can take total control by not - inserting any values in the instance attribute dictionary (but - instead inserting them in another object). See the - "__getattribute__()" method below for a way to actually get total - control over attribute access. - -object.__getattribute__(self, name) - - Called unconditionally to implement attribute accesses for - instances of the class. If the class also defines "__getattr__()", - the latter will not be called unless "__getattribute__()" either - calls it explicitly or raises an "AttributeError". This method - should return the (computed) attribute value or raise an - "AttributeError" exception. In order to avoid infinite recursion in - this method, its implementation should always call the base class - method with the same name to access any attributes it needs, for - example, "object.__getattribute__(self, name)". - - Note: - - This method may still be bypassed when looking up special methods - as the result of implicit invocation via language syntax or - built-in functions. See Special method lookup. - - For certain sensitive attribute accesses, raises an auditing event - "object.__getattr__" with arguments "obj" and "name". - -object.__setattr__(self, name, value) - - Called when an attribute assignment is attempted. This is called - instead of the normal mechanism (i.e. store the value in the - instance dictionary). *name* is the attribute name, *value* is the - value to be assigned to it. - - If "__setattr__()" wants to assign to an instance attribute, it - should call the base class method with the same name, for example, - "object.__setattr__(self, name, value)". - - For certain sensitive attribute assignments, raises an auditing - event "object.__setattr__" with arguments "obj", "name", "value". - -object.__delattr__(self, name) - - Like "__setattr__()" but for attribute deletion instead of - assignment. This should only be implemented if "del obj.name" is - meaningful for the object. - - For certain sensitive attribute deletions, raises an auditing event - "object.__delattr__" with arguments "obj" and "name". - -object.__dir__(self) - - Called when "dir()" is called on the object. An iterable must be - returned. "dir()" converts the returned iterable to a list and - sorts it. - - -Customizing module attribute access -=================================== - -module.__getattr__() -module.__dir__() - -Special names "__getattr__" and "__dir__" can be also used to -customize access to module attributes. The "__getattr__" function at -the module level should accept one argument which is the name of an -attribute and return the computed value or raise an "AttributeError". -If an attribute is not found on a module object through the normal -lookup, i.e. "object.__getattribute__()", then "__getattr__" is -searched in the module "__dict__" before raising an "AttributeError". -If found, it is called with the attribute name and the result is -returned. - -The "__dir__" function should accept no arguments, and return an -iterable of strings that represents the names accessible on module. If -present, this function overrides the standard "dir()" search on a -module. - -module.__class__ - -For a more fine grained customization of the module behavior (setting -attributes, properties, etc.), one can set the "__class__" attribute -of a module object to a subclass of "types.ModuleType". For example: - - import sys - from types import ModuleType - - class VerboseModule(ModuleType): - def __repr__(self): - return f'Verbose {self.__name__}' - - def __setattr__(self, attr, value): - print(f'Setting {attr}...') - super().__setattr__(attr, value) - - sys.modules[__name__].__class__ = VerboseModule - -Note: - - Defining module "__getattr__" and setting module "__class__" only - affect lookups made using the attribute access syntax – directly - accessing the module globals (whether by code within the module, or - via a reference to the module’s globals dictionary) is unaffected. - -Changed in version 3.5: "__class__" module attribute is now writable. - -Added in version 3.7: "__getattr__" and "__dir__" module attributes. - -See also: - - **PEP 562** - Module __getattr__ and __dir__ - Describes the "__getattr__" and "__dir__" functions on modules. - - -Implementing Descriptors -======================== - -The following methods only apply when an instance of the class -containing the method (a so-called *descriptor* class) appears in an -*owner* class (the descriptor must be in either the owner’s class -dictionary or in the class dictionary for one of its parents). In the -examples below, “the attribute” refers to the attribute whose name is -the key of the property in the owner class’ "__dict__". The "object" -class itself does not implement any of these protocols. - -object.__get__(self, instance, owner=None) - - Called to get the attribute of the owner class (class attribute - access) or of an instance of that class (instance attribute - access). The optional *owner* argument is the owner class, while - *instance* is the instance that the attribute was accessed through, - or "None" when the attribute is accessed through the *owner*. - - This method should return the computed attribute value or raise an - "AttributeError" exception. - - **PEP 252** specifies that "__get__()" is callable with one or two - arguments. Python’s own built-in descriptors support this - specification; however, it is likely that some third-party tools - have descriptors that require both arguments. Python’s own - "__getattribute__()" implementation always passes in both arguments - whether they are required or not. - -object.__set__(self, instance, value) - - Called to set the attribute on an instance *instance* of the owner - class to a new value, *value*. - - Note, adding "__set__()" or "__delete__()" changes the kind of - descriptor to a “data descriptor”. See Invoking Descriptors for - more details. - -object.__delete__(self, instance) - - Called to delete the attribute on an instance *instance* of the - owner class. - -Instances of descriptors may also have the "__objclass__" attribute -present: - -object.__objclass__ - - The attribute "__objclass__" is interpreted by the "inspect" module - as specifying the class where this object was defined (setting this - appropriately can assist in runtime introspection of dynamic class - attributes). For callables, it may indicate that an instance of the - given type (or a subclass) is expected or required as the first - positional argument (for example, CPython sets this attribute for - unbound methods that are implemented in C). - - -Invoking Descriptors -==================== - -In general, a descriptor is an object attribute with “binding -behavior”, one whose attribute access has been overridden by methods -in the descriptor protocol: "__get__()", "__set__()", and -"__delete__()". If any of those methods are defined for an object, it -is said to be a descriptor. - -The default behavior for attribute access is to get, set, or delete -the attribute from an object’s dictionary. For instance, "a.x" has a -lookup chain starting with "a.__dict__['x']", then -"type(a).__dict__['x']", and continuing through the base classes of -"type(a)" excluding metaclasses. - -However, if the looked-up value is an object defining one of the -descriptor methods, then Python may override the default behavior and -invoke the descriptor method instead. Where this occurs in the -precedence chain depends on which descriptor methods were defined and -how they were called. - -The starting point for descriptor invocation is a binding, "a.x". How -the arguments are assembled depends on "a": - -Direct Call - The simplest and least common call is when user code directly - invokes a descriptor method: "x.__get__(a)". - -Instance Binding - If binding to an object instance, "a.x" is transformed into the - call: "type(a).__dict__['x'].__get__(a, type(a))". - -Class Binding - If binding to a class, "A.x" is transformed into the call: - "A.__dict__['x'].__get__(None, A)". - -Super Binding - A dotted lookup such as "super(A, a).x" searches - "a.__class__.__mro__" for a base class "B" following "A" and then - returns "B.__dict__['x'].__get__(a, A)". If not a descriptor, "x" - is returned unchanged. - -For instance bindings, the precedence of descriptor invocation depends -on which descriptor methods are defined. A descriptor can define any -combination of "__get__()", "__set__()" and "__delete__()". If it -does not define "__get__()", then accessing the attribute will return -the descriptor object itself unless there is a value in the object’s -instance dictionary. If the descriptor defines "__set__()" and/or -"__delete__()", it is a data descriptor; if it defines neither, it is -a non-data descriptor. Normally, data descriptors define both -"__get__()" and "__set__()", while non-data descriptors have just the -"__get__()" method. Data descriptors with "__get__()" and "__set__()" -(and/or "__delete__()") defined always override a redefinition in an -instance dictionary. In contrast, non-data descriptors can be -overridden by instances. - -Python methods (including those decorated with "@staticmethod" and -"@classmethod") are implemented as non-data descriptors. Accordingly, -instances can redefine and override methods. This allows individual -instances to acquire behaviors that differ from other instances of the -same class. - -The "property()" function is implemented as a data descriptor. -Accordingly, instances cannot override the behavior of a property. - - -__slots__ -========= - -*__slots__* allow us to explicitly declare data members (like -properties) and deny the creation of "__dict__" and *__weakref__* -(unless explicitly declared in *__slots__* or available in a parent.) - -The space saved over using "__dict__" can be significant. Attribute -lookup speed can be significantly improved as well. - -object.__slots__ - - This class variable can be assigned a string, iterable, or sequence - of strings with variable names used by instances. *__slots__* - reserves space for the declared variables and prevents the - automatic creation of "__dict__" and *__weakref__* for each - instance. - -Notes on using *__slots__*: - -* When inheriting from a class without *__slots__*, the "__dict__" and - *__weakref__* attribute of the instances will always be accessible. - -* Without a "__dict__" variable, instances cannot be assigned new - variables not listed in the *__slots__* definition. Attempts to - assign to an unlisted variable name raises "AttributeError". If - dynamic assignment of new variables is desired, then add - "'__dict__'" to the sequence of strings in the *__slots__* - declaration. - -* Without a *__weakref__* variable for each instance, classes defining - *__slots__* do not support "weak references" to its instances. If - weak reference support is needed, then add "'__weakref__'" to the - sequence of strings in the *__slots__* declaration. - -* *__slots__* are implemented at the class level by creating - descriptors for each variable name. As a result, class attributes - cannot be used to set default values for instance variables defined - by *__slots__*; otherwise, the class attribute would overwrite the - descriptor assignment. - -* The action of a *__slots__* declaration is not limited to the class - where it is defined. *__slots__* declared in parents are available - in child classes. However, instances of a child subclass will get a - "__dict__" and *__weakref__* unless the subclass also defines - *__slots__* (which should only contain names of any *additional* - slots). - -* If a class defines a slot also defined in a base class, the instance - variable defined by the base class slot is inaccessible (except by - retrieving its descriptor directly from the base class). This - renders the meaning of the program undefined. In the future, a - check may be added to prevent this. - -* "TypeError" will be raised if nonempty *__slots__* are defined for a - class derived from a ""variable-length" built-in type" such as - "int", "bytes", and "tuple". - -* Any non-string *iterable* may be assigned to *__slots__*. - -* If a "dictionary" is used to assign *__slots__*, the dictionary keys - will be used as the slot names. The values of the dictionary can be - used to provide per-attribute docstrings that will be recognised by - "inspect.getdoc()" and displayed in the output of "help()". - -* "__class__" assignment works only if both classes have the same - *__slots__*. - -* Multiple inheritance with multiple slotted parent classes can be - used, but only one parent is allowed to have attributes created by - slots (the other bases must have empty slot layouts) - violations - raise "TypeError". - -* If an *iterator* is used for *__slots__* then a *descriptor* is - created for each of the iterator’s values. However, the *__slots__* - attribute will be an empty iterator. -''', - 'attribute-references': r'''Attribute references -******************** - -An attribute reference is a primary followed by a period and a name: - - attributeref ::= primary "." identifier - -The primary must evaluate to an object of a type that supports -attribute references, which most objects do. This object is then -asked to produce the attribute whose name is the identifier. The type -and value produced is determined by the object. Multiple evaluations -of the same attribute reference may yield different objects. - -This production can be customized by overriding the -"__getattribute__()" method or the "__getattr__()" method. The -"__getattribute__()" method is called first and either returns a value -or raises "AttributeError" if the attribute is not available. - -If an "AttributeError" is raised and the object has a "__getattr__()" -method, that method is called as a fallback. -''', - 'augassign': r'''Augmented assignment statements -******************************* - -Augmented assignment is the combination, in a single statement, of a -binary operation and an assignment statement: - - augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression) - augtarget ::= identifier | attributeref | subscription | slicing - augop ::= "+=" | "-=" | "*=" | "@=" | "/=" | "//=" | "%=" | "**=" - | ">>=" | "<<=" | "&=" | "^=" | "|=" - -(See section Primaries for the syntax definitions of the last three -symbols.) - -An augmented assignment evaluates the target (which, unlike normal -assignment statements, cannot be an unpacking) and the expression -list, performs the binary operation specific to the type of assignment -on the two operands, and assigns the result to the original target. -The target is only evaluated once. - -An augmented assignment statement like "x += 1" can be rewritten as "x -= x + 1" to achieve a similar, but not exactly equal effect. In the -augmented version, "x" is only evaluated once. Also, when possible, -the actual operation is performed *in-place*, meaning that rather than -creating a new object and assigning that to the target, the old object -is modified instead. - -Unlike normal assignments, augmented assignments evaluate the left- -hand side *before* evaluating the right-hand side. For example, "a[i] -+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs -the addition, and lastly, it writes the result back to "a[i]". - -With the exception of assigning to tuples and multiple targets in a -single statement, the assignment done by augmented assignment -statements is handled the same way as normal assignments. Similarly, -with the exception of the possible *in-place* behavior, the binary -operation performed by augmented assignment is the same as the normal -binary operations. - -For targets which are attribute references, the same caveat about -class and instance attributes applies as for regular assignments. -''', - 'await': r'''Await expression -**************** - -Suspend the execution of *coroutine* on an *awaitable* object. Can -only be used inside a *coroutine function*. - - await_expr ::= "await" primary - -Added in version 3.5. -''', - 'binary': r'''Binary arithmetic operations -**************************** - -The binary arithmetic operations have the conventional priority -levels. Note that some of these operations also apply to certain non- -numeric types. Apart from the power operator, there are only two -levels, one for multiplicative operators and one for additive -operators: - - m_expr ::= u_expr | m_expr "*" u_expr | m_expr "@" m_expr | - m_expr "//" u_expr | m_expr "/" u_expr | - m_expr "%" u_expr - a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr - -The "*" (multiplication) operator yields the product of its arguments. -The arguments must either both be numbers, or one argument must be an -integer and the other must be a sequence. In the former case, the -numbers are converted to a common type and then multiplied together. -In the latter case, sequence repetition is performed; a negative -repetition factor yields an empty sequence. - -This operation can be customized using the special "__mul__()" and -"__rmul__()" methods. - -The "@" (at) operator is intended to be used for matrix -multiplication. No builtin Python types implement this operator. - -This operation can be customized using the special "__matmul__()" and -"__rmatmul__()" methods. - -Added in version 3.5. - -The "/" (division) and "//" (floor division) operators yield the -quotient of their arguments. The numeric arguments are first -converted to a common type. Division of integers yields a float, while -floor division of integers results in an integer; the result is that -of mathematical division with the ‘floor’ function applied to the -result. Division by zero raises the "ZeroDivisionError" exception. - -The division operation can be customized using the special -"__truediv__()" and "__rtruediv__()" methods. The floor division -operation can be customized using the special "__floordiv__()" and -"__rfloordiv__()" methods. - -The "%" (modulo) operator yields the remainder from the division of -the first argument by the second. The numeric arguments are first -converted to a common type. A zero right argument raises the -"ZeroDivisionError" exception. The arguments may be floating-point -numbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 + -0.34".) The modulo operator always yields a result with the same sign -as its second operand (or zero); the absolute value of the result is -strictly smaller than the absolute value of the second operand [1]. - -The floor division and modulo operators are connected by the following -identity: "x == (x//y)*y + (x%y)". Floor division and modulo are also -connected with the built-in function "divmod()": "divmod(x, y) == -(x//y, x%y)". [2]. - -In addition to performing the modulo operation on numbers, the "%" -operator is also overloaded by string objects to perform old-style -string formatting (also known as interpolation). The syntax for -string formatting is described in the Python Library Reference, -section printf-style String Formatting. - -The *modulo* operation can be customized using the special "__mod__()" -and "__rmod__()" methods. - -The floor division operator, the modulo operator, and the "divmod()" -function are not defined for complex numbers. Instead, convert to a -floating-point number using the "abs()" function if appropriate. - -The "+" (addition) operator yields the sum of its arguments. The -arguments must either both be numbers or both be sequences of the same -type. In the former case, the numbers are converted to a common type -and then added together. In the latter case, the sequences are -concatenated. - -This operation can be customized using the special "__add__()" and -"__radd__()" methods. - -The "-" (subtraction) operator yields the difference of its arguments. -The numeric arguments are first converted to a common type. - -This operation can be customized using the special "__sub__()" and -"__rsub__()" methods. -''', - 'bitwise': r'''Binary bitwise operations -************************* - -Each of the three bitwise operations has a different priority level: - - and_expr ::= shift_expr | and_expr "&" shift_expr - xor_expr ::= and_expr | xor_expr "^" and_expr - or_expr ::= xor_expr | or_expr "|" xor_expr - -The "&" operator yields the bitwise AND of its arguments, which must -be integers or one of them must be a custom object overriding -"__and__()" or "__rand__()" special methods. - -The "^" operator yields the bitwise XOR (exclusive OR) of its -arguments, which must be integers or one of them must be a custom -object overriding "__xor__()" or "__rxor__()" special methods. - -The "|" operator yields the bitwise (inclusive) OR of its arguments, -which must be integers or one of them must be a custom object -overriding "__or__()" or "__ror__()" special methods. -''', - 'bltin-code-objects': r'''Code Objects -************ - -Code objects are used by the implementation to represent “pseudo- -compiled” executable Python code such as a function body. They differ -from function objects because they don’t contain a reference to their -global execution environment. Code objects are returned by the built- -in "compile()" function and can be extracted from function objects -through their "__code__" attribute. See also the "code" module. - -Accessing "__code__" raises an auditing event "object.__getattr__" -with arguments "obj" and ""__code__"". - -A code object can be executed or evaluated by passing it (instead of a -source string) to the "exec()" or "eval()" built-in functions. - -See The standard type hierarchy for more information. -''', - 'bltin-ellipsis-object': r'''The Ellipsis Object -******************* - -This object is commonly used to indicate that something is omitted. It -supports no special operations. There is exactly one ellipsis object, -named "Ellipsis" (a built-in name). "type(Ellipsis)()" produces the -"Ellipsis" singleton. - -It is written as "Ellipsis" or "...". - -In typical use, "..." as the "Ellipsis" object appears in a few -different places, for instance: - -* In type annotations, such as callable arguments or tuple elements. - -* As the body of a function instead of a pass statement. - -* In third-party libraries, such as Numpy’s slicing and striding. - -Python also uses three dots in ways that are not "Ellipsis" objects, -for instance: - -* Doctest’s "ELLIPSIS", as a pattern for missing content. - -* The default Python prompt of the *interactive* shell when partial - input is incomplete. - -Lastly, the Python documentation often uses three dots in conventional -English usage to mean omitted content, even in code examples that also -use them as the "Ellipsis". -''', - 'bltin-null-object': r'''The Null Object -*************** - -This object is returned by functions that don’t explicitly return a -value. It supports no special operations. There is exactly one null -object, named "None" (a built-in name). "type(None)()" produces the -same singleton. - -It is written as "None". -''', - 'bltin-type-objects': r'''Type Objects -************ - -Type objects represent the various object types. An object’s type is -accessed by the built-in function "type()". There are no special -operations on types. The standard module "types" defines names for -all standard built-in types. - -Types are written like this: "". -''', - 'booleans': r'''Boolean operations -****************** - - or_test ::= and_test | or_test "or" and_test - and_test ::= not_test | and_test "and" not_test - not_test ::= comparison | "not" not_test - -In the context of Boolean operations, and also when expressions are -used by control flow statements, the following values are interpreted -as false: "False", "None", numeric zero of all types, and empty -strings and containers (including strings, tuples, lists, -dictionaries, sets and frozensets). All other values are interpreted -as true. User-defined objects can customize their truth value by -providing a "__bool__()" method. - -The operator "not" yields "True" if its argument is false, "False" -otherwise. - -The expression "x and y" first evaluates *x*; if *x* is false, its -value is returned; otherwise, *y* is evaluated and the resulting value -is returned. - -The expression "x or y" first evaluates *x*; if *x* is true, its value -is returned; otherwise, *y* is evaluated and the resulting value is -returned. - -Note that neither "and" nor "or" restrict the value and type they -return to "False" and "True", but rather return the last evaluated -argument. This is sometimes useful, e.g., if "s" is a string that -should be replaced by a default value if it is empty, the expression -"s or 'foo'" yields the desired value. Because "not" has to create a -new value, it returns a boolean value regardless of the type of its -argument (for example, "not 'foo'" produces "False" rather than "''".) -''', - 'break': r'''The "break" statement -********************* - - break_stmt ::= "break" - -"break" may only occur syntactically nested in a "for" or "while" -loop, but not nested in a function or class definition within that -loop. - -It terminates the nearest enclosing loop, skipping the optional "else" -clause if the loop has one. - -If a "for" loop is terminated by "break", the loop control target -keeps its current value. - -When "break" passes control out of a "try" statement with a "finally" -clause, that "finally" clause is executed before really leaving the -loop. -''', - 'callable-types': r'''Emulating callable objects -************************** - -object.__call__(self[, args...]) - - Called when the instance is “called” as a function; if this method - is defined, "x(arg1, arg2, ...)" roughly translates to - "type(x).__call__(x, arg1, ...)". The "object" class itself does - not provide this method. -''', - 'calls': r'''Calls -***** - -A call calls a callable object (e.g., a *function*) with a possibly -empty series of *arguments*: - - call ::= primary "(" [argument_list [","] | comprehension] ")" - argument_list ::= positional_arguments ["," starred_and_keywords] - ["," keywords_arguments] - | starred_and_keywords ["," keywords_arguments] - | keywords_arguments - positional_arguments ::= positional_item ("," positional_item)* - positional_item ::= assignment_expression | "*" expression - starred_and_keywords ::= ("*" expression | keyword_item) - ("," "*" expression | "," keyword_item)* - keywords_arguments ::= (keyword_item | "**" expression) - ("," keyword_item | "," "**" expression)* - keyword_item ::= identifier "=" expression - -An optional trailing comma may be present after the positional and -keyword arguments but does not affect the semantics. - -The primary must evaluate to a callable object (user-defined -functions, built-in functions, methods of built-in objects, class -objects, methods of class instances, and all objects having a -"__call__()" method are callable). All argument expressions are -evaluated before the call is attempted. Please refer to section -Function definitions for the syntax of formal *parameter* lists. - -If keyword arguments are present, they are first converted to -positional arguments, as follows. First, a list of unfilled slots is -created for the formal parameters. If there are N positional -arguments, they are placed in the first N slots. Next, for each -keyword argument, the identifier is used to determine the -corresponding slot (if the identifier is the same as the first formal -parameter name, the first slot is used, and so on). If the slot is -already filled, a "TypeError" exception is raised. Otherwise, the -argument is placed in the slot, filling it (even if the expression is -"None", it fills the slot). When all arguments have been processed, -the slots that are still unfilled are filled with the corresponding -default value from the function definition. (Default values are -calculated, once, when the function is defined; thus, a mutable object -such as a list or dictionary used as default value will be shared by -all calls that don’t specify an argument value for the corresponding -slot; this should usually be avoided.) If there are any unfilled -slots for which no default value is specified, a "TypeError" exception -is raised. Otherwise, the list of filled slots is used as the -argument list for the call. - -**CPython implementation detail:** An implementation may provide -built-in functions whose positional parameters do not have names, even -if they are ‘named’ for the purpose of documentation, and which -therefore cannot be supplied by keyword. In CPython, this is the case -for functions implemented in C that use "PyArg_ParseTuple()" to parse -their arguments. - -If there are more positional arguments than there are formal parameter -slots, a "TypeError" exception is raised, unless a formal parameter -using the syntax "*identifier" is present; in this case, that formal -parameter receives a tuple containing the excess positional arguments -(or an empty tuple if there were no excess positional arguments). - -If any keyword argument does not correspond to a formal parameter -name, a "TypeError" exception is raised, unless a formal parameter -using the syntax "**identifier" is present; in this case, that formal -parameter receives a dictionary containing the excess keyword -arguments (using the keywords as keys and the argument values as -corresponding values), or a (new) empty dictionary if there were no -excess keyword arguments. - -If the syntax "*expression" appears in the function call, "expression" -must evaluate to an *iterable*. Elements from these iterables are -treated as if they were additional positional arguments. For the call -"f(x1, x2, *y, x3, x4)", if *y* evaluates to a sequence *y1*, …, *yM*, -this is equivalent to a call with M+4 positional arguments *x1*, *x2*, -*y1*, …, *yM*, *x3*, *x4*. - -A consequence of this is that although the "*expression" syntax may -appear *after* explicit keyword arguments, it is processed *before* -the keyword arguments (and any "**expression" arguments – see below). -So: - - >>> def f(a, b): - ... print(a, b) - ... - >>> f(b=1, *(2,)) - 2 1 - >>> f(a=1, *(2,)) - Traceback (most recent call last): - File "", line 1, in - TypeError: f() got multiple values for keyword argument 'a' - >>> f(1, *(2,)) - 1 2 - -It is unusual for both keyword arguments and the "*expression" syntax -to be used in the same call, so in practice this confusion does not -often arise. - -If the syntax "**expression" appears in the function call, -"expression" must evaluate to a *mapping*, the contents of which are -treated as additional keyword arguments. If a parameter matching a key -has already been given a value (by an explicit keyword argument, or -from another unpacking), a "TypeError" exception is raised. - -When "**expression" is used, each key in this mapping must be a -string. Each value from the mapping is assigned to the first formal -parameter eligible for keyword assignment whose name is equal to the -key. A key need not be a Python identifier (e.g. ""max-temp °F"" is -acceptable, although it will not match any formal parameter that could -be declared). If there is no match to a formal parameter the key-value -pair is collected by the "**" parameter, if there is one, or if there -is not, a "TypeError" exception is raised. - -Formal parameters using the syntax "*identifier" or "**identifier" -cannot be used as positional argument slots or as keyword argument -names. - -Changed in version 3.5: Function calls accept any number of "*" and -"**" unpackings, positional arguments may follow iterable unpackings -("*"), and keyword arguments may follow dictionary unpackings ("**"). -Originally proposed by **PEP 448**. - -A call always returns some value, possibly "None", unless it raises an -exception. How this value is computed depends on the type of the -callable object. - -If it is— - -a user-defined function: - The code block for the function is executed, passing it the - argument list. The first thing the code block will do is bind the - formal parameters to the arguments; this is described in section - Function definitions. When the code block executes a "return" - statement, this specifies the return value of the function call. - If execution reaches the end of the code block without executing a - "return" statement, the return value is "None". - -a built-in function or method: - The result is up to the interpreter; see Built-in Functions for the - descriptions of built-in functions and methods. - -a class object: - A new instance of that class is returned. - -a class instance method: - The corresponding user-defined function is called, with an argument - list that is one longer than the argument list of the call: the - instance becomes the first argument. - -a class instance: - The class must define a "__call__()" method; the effect is then the - same as if that method was called. -''', - 'class': r'''Class definitions -***************** - -A class definition defines a class object (see section The standard -type hierarchy): - - classdef ::= [decorators] "class" classname [type_params] [inheritance] ":" suite - inheritance ::= "(" [argument_list] ")" - classname ::= identifier - -A class definition is an executable statement. The inheritance list -usually gives a list of base classes (see Metaclasses for more -advanced uses), so each item in the list should evaluate to a class -object which allows subclassing. Classes without an inheritance list -inherit, by default, from the base class "object"; hence, - - class Foo: - pass - -is equivalent to - - class Foo(object): - pass - -The class’s suite is then executed in a new execution frame (see -Naming and binding), using a newly created local namespace and the -original global namespace. (Usually, the suite contains mostly -function definitions.) When the class’s suite finishes execution, its -execution frame is discarded but its local namespace is saved. [5] A -class object is then created using the inheritance list for the base -classes and the saved local namespace for the attribute dictionary. -The class name is bound to this class object in the original local -namespace. - -The order in which attributes are defined in the class body is -preserved in the new class’s "__dict__". Note that this is reliable -only right after the class is created and only for classes that were -defined using the definition syntax. - -Class creation can be customized heavily using metaclasses. - -Classes can also be decorated: just like when decorating functions, - - @f1(arg) - @f2 - class Foo: pass - -is roughly equivalent to - - class Foo: pass - Foo = f1(arg)(f2(Foo)) - -The evaluation rules for the decorator expressions are the same as for -function decorators. The result is then bound to the class name. - -Changed in version 3.9: Classes may be decorated with any valid -"assignment_expression". Previously, the grammar was much more -restrictive; see **PEP 614** for details. - -A list of type parameters may be given in square brackets immediately -after the class’s name. This indicates to static type checkers that -the class is generic. At runtime, the type parameters can be retrieved -from the class’s "__type_params__" attribute. See Generic classes for -more. - -Changed in version 3.12: Type parameter lists are new in Python 3.12. - -**Programmer’s note:** Variables defined in the class definition are -class attributes; they are shared by instances. Instance attributes -can be set in a method with "self.name = value". Both class and -instance attributes are accessible through the notation “"self.name"”, -and an instance attribute hides a class attribute with the same name -when accessed in this way. Class attributes can be used as defaults -for instance attributes, but using mutable values there can lead to -unexpected results. Descriptors can be used to create instance -variables with different implementation details. - -See also: - - **PEP 3115** - Metaclasses in Python 3000 - The proposal that changed the declaration of metaclasses to the - current syntax, and the semantics for how classes with - metaclasses are constructed. - - **PEP 3129** - Class Decorators - The proposal that added class decorators. Function and method - decorators were introduced in **PEP 318**. -''', - 'comparisons': r'''Comparisons -*********** - -Unlike C, all comparison operations in Python have the same priority, -which is lower than that of any arithmetic, shifting or bitwise -operation. Also unlike C, expressions like "a < b < c" have the -interpretation that is conventional in mathematics: - - comparison ::= or_expr (comp_operator or_expr)* - comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!=" - | "is" ["not"] | ["not"] "in" - -Comparisons yield boolean values: "True" or "False". Custom *rich -comparison methods* may return non-boolean values. In this case Python -will call "bool()" on such value in boolean contexts. - -Comparisons can be chained arbitrarily, e.g., "x < y <= z" is -equivalent to "x < y and y <= z", except that "y" is evaluated only -once (but in both cases "z" is not evaluated at all when "x < y" is -found to be false). - -Formally, if *a*, *b*, *c*, …, *y*, *z* are expressions and *op1*, -*op2*, …, *opN* are comparison operators, then "a op1 b op2 c ... y -opN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except -that each expression is evaluated at most once. - -Note that "a op1 b op2 c" doesn’t imply any kind of comparison between -*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though -perhaps not pretty). - - -Value comparisons -================= - -The operators "<", ">", "==", ">=", "<=", and "!=" compare the values -of two objects. The objects do not need to have the same type. - -Chapter Objects, values and types states that objects have a value (in -addition to type and identity). The value of an object is a rather -abstract notion in Python: For example, there is no canonical access -method for an object’s value. Also, there is no requirement that the -value of an object should be constructed in a particular way, e.g. -comprised of all its data attributes. Comparison operators implement a -particular notion of what the value of an object is. One can think of -them as defining the value of an object indirectly, by means of their -comparison implementation. - -Because all types are (direct or indirect) subtypes of "object", they -inherit the default comparison behavior from "object". Types can -customize their comparison behavior by implementing *rich comparison -methods* like "__lt__()", described in Basic customization. - -The default behavior for equality comparison ("==" and "!=") is based -on the identity of the objects. Hence, equality comparison of -instances with the same identity results in equality, and equality -comparison of instances with different identities results in -inequality. A motivation for this default behavior is the desire that -all objects should be reflexive (i.e. "x is y" implies "x == y"). - -A default order comparison ("<", ">", "<=", and ">=") is not provided; -an attempt raises "TypeError". A motivation for this default behavior -is the lack of a similar invariant as for equality. - -The behavior of the default equality comparison, that instances with -different identities are always unequal, may be in contrast to what -types will need that have a sensible definition of object value and -value-based equality. Such types will need to customize their -comparison behavior, and in fact, a number of built-in types have done -that. - -The following list describes the comparison behavior of the most -important built-in types. - -* Numbers of built-in numeric types (Numeric Types — int, float, - complex) and of the standard library types "fractions.Fraction" and - "decimal.Decimal" can be compared within and across their types, - with the restriction that complex numbers do not support order - comparison. Within the limits of the types involved, they compare - mathematically (algorithmically) correct without loss of precision. - - The not-a-number values "float('NaN')" and "decimal.Decimal('NaN')" - are special. Any ordered comparison of a number to a not-a-number - value is false. A counter-intuitive implication is that not-a-number - values are not equal to themselves. For example, if "x = - float('NaN')", "3 < x", "x < 3" and "x == x" are all false, while "x - != x" is true. This behavior is compliant with IEEE 754. - -* "None" and "NotImplemented" are singletons. **PEP 8** advises that - comparisons for singletons should always be done with "is" or "is - not", never the equality operators. - -* Binary sequences (instances of "bytes" or "bytearray") can be - compared within and across their types. They compare - lexicographically using the numeric values of their elements. - -* Strings (instances of "str") compare lexicographically using the - numerical Unicode code points (the result of the built-in function - "ord()") of their characters. [3] - - Strings and binary sequences cannot be directly compared. - -* Sequences (instances of "tuple", "list", or "range") can be compared - only within each of their types, with the restriction that ranges do - not support order comparison. Equality comparison across these - types results in inequality, and ordering comparison across these - types raises "TypeError". - - Sequences compare lexicographically using comparison of - corresponding elements. The built-in containers typically assume - identical objects are equal to themselves. That lets them bypass - equality tests for identical objects to improve performance and to - maintain their internal invariants. - - Lexicographical comparison between built-in collections works as - follows: - - * For two collections to compare equal, they must be of the same - type, have the same length, and each pair of corresponding - elements must compare equal (for example, "[1,2] == (1,2)" is - false because the type is not the same). - - * Collections that support order comparison are ordered the same as - their first unequal elements (for example, "[1,2,x] <= [1,2,y]" - has the same value as "x <= y"). If a corresponding element does - not exist, the shorter collection is ordered first (for example, - "[1,2] < [1,2,3]" is true). - -* Mappings (instances of "dict") compare equal if and only if they - have equal "(key, value)" pairs. Equality comparison of the keys and - values enforces reflexivity. - - Order comparisons ("<", ">", "<=", and ">=") raise "TypeError". - -* Sets (instances of "set" or "frozenset") can be compared within and - across their types. - - They define order comparison operators to mean subset and superset - tests. Those relations do not define total orderings (for example, - the two sets "{1,2}" and "{2,3}" are not equal, nor subsets of one - another, nor supersets of one another). Accordingly, sets are not - appropriate arguments for functions which depend on total ordering - (for example, "min()", "max()", and "sorted()" produce undefined - results given a list of sets as inputs). - - Comparison of sets enforces reflexivity of its elements. - -* Most other built-in types have no comparison methods implemented, so - they inherit the default comparison behavior. - -User-defined classes that customize their comparison behavior should -follow some consistency rules, if possible: - -* Equality comparison should be reflexive. In other words, identical - objects should compare equal: - - "x is y" implies "x == y" - -* Comparison should be symmetric. In other words, the following - expressions should have the same result: - - "x == y" and "y == x" - - "x != y" and "y != x" - - "x < y" and "y > x" - - "x <= y" and "y >= x" - -* Comparison should be transitive. The following (non-exhaustive) - examples illustrate that: - - "x > y and y > z" implies "x > z" - - "x < y and y <= z" implies "x < z" - -* Inverse comparison should result in the boolean negation. In other - words, the following expressions should have the same result: - - "x == y" and "not x != y" - - "x < y" and "not x >= y" (for total ordering) - - "x > y" and "not x <= y" (for total ordering) - - The last two expressions apply to totally ordered collections (e.g. - to sequences, but not to sets or mappings). See also the - "total_ordering()" decorator. - -* The "hash()" result should be consistent with equality. Objects that - are equal should either have the same hash value, or be marked as - unhashable. - -Python does not enforce these consistency rules. In fact, the -not-a-number values are an example for not following these rules. - - -Membership test operations -========================== - -The operators "in" and "not in" test for membership. "x in s" -evaluates to "True" if *x* is a member of *s*, and "False" otherwise. -"x not in s" returns the negation of "x in s". All built-in sequences -and set types support this as well as dictionary, for which "in" tests -whether the dictionary has a given key. For container types such as -list, tuple, set, frozenset, dict, or collections.deque, the -expression "x in y" is equivalent to "any(x is e or x == e for e in -y)". - -For the string and bytes types, "x in y" is "True" if and only if *x* -is a substring of *y*. An equivalent test is "y.find(x) != -1". -Empty strings are always considered to be a substring of any other -string, so """ in "abc"" will return "True". - -For user-defined classes which define the "__contains__()" method, "x -in y" returns "True" if "y.__contains__(x)" returns a true value, and -"False" otherwise. - -For user-defined classes which do not define "__contains__()" but do -define "__iter__()", "x in y" is "True" if some value "z", for which -the expression "x is z or x == z" is true, is produced while iterating -over "y". If an exception is raised during the iteration, it is as if -"in" raised that exception. - -Lastly, the old-style iteration protocol is tried: if a class defines -"__getitem__()", "x in y" is "True" if and only if there is a non- -negative integer index *i* such that "x is y[i] or x == y[i]", and no -lower integer index raises the "IndexError" exception. (If any other -exception is raised, it is as if "in" raised that exception). - -The operator "not in" is defined to have the inverse truth value of -"in". - - -Identity comparisons -==================== - -The operators "is" and "is not" test for an object’s identity: "x is -y" is true if and only if *x* and *y* are the same object. An -Object’s identity is determined using the "id()" function. "x is not -y" yields the inverse truth value. [4] -''', - 'compound': r'''Compound statements -******************* - -Compound statements contain (groups of) other statements; they affect -or control the execution of those other statements in some way. In -general, compound statements span multiple lines, although in simple -incarnations a whole compound statement may be contained in one line. - -The "if", "while" and "for" statements implement traditional control -flow constructs. "try" specifies exception handlers and/or cleanup -code for a group of statements, while the "with" statement allows the -execution of initialization and finalization code around a block of -code. Function and class definitions are also syntactically compound -statements. - -A compound statement consists of one or more ‘clauses.’ A clause -consists of a header and a ‘suite.’ The clause headers of a -particular compound statement are all at the same indentation level. -Each clause header begins with a uniquely identifying keyword and ends -with a colon. A suite is a group of statements controlled by a -clause. A suite can be one or more semicolon-separated simple -statements on the same line as the header, following the header’s -colon, or it can be one or more indented statements on subsequent -lines. Only the latter form of a suite can contain nested compound -statements; the following is illegal, mostly because it wouldn’t be -clear to which "if" clause a following "else" clause would belong: - - if test1: if test2: print(x) - -Also note that the semicolon binds tighter than the colon in this -context, so that in the following example, either all or none of the -"print()" calls are executed: - - if x < y < z: print(x); print(y); print(z) - -Summarizing: - - compound_stmt ::= if_stmt - | while_stmt - | for_stmt - | try_stmt - | with_stmt - | match_stmt - | funcdef - | classdef - | async_with_stmt - | async_for_stmt - | async_funcdef - suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT - statement ::= stmt_list NEWLINE | compound_stmt - stmt_list ::= simple_stmt (";" simple_stmt)* [";"] - -Note that statements always end in a "NEWLINE" possibly followed by a -"DEDENT". Also note that optional continuation clauses always begin -with a keyword that cannot start a statement, thus there are no -ambiguities (the ‘dangling "else"’ problem is solved in Python by -requiring nested "if" statements to be indented). - -The formatting of the grammar rules in the following sections places -each clause on a separate line for clarity. - - -The "if" statement -================== - -The "if" statement is used for conditional execution: - - if_stmt ::= "if" assignment_expression ":" suite - ("elif" assignment_expression ":" suite)* - ["else" ":" suite] - -It selects exactly one of the suites by evaluating the expressions one -by one until one is found to be true (see section Boolean operations -for the definition of true and false); then that suite is executed -(and no other part of the "if" statement is executed or evaluated). -If all expressions are false, the suite of the "else" clause, if -present, is executed. - - -The "while" statement -===================== - -The "while" statement is used for repeated execution as long as an -expression is true: - - while_stmt ::= "while" assignment_expression ":" suite - ["else" ":" suite] - -This repeatedly tests the expression and, if it is true, executes the -first suite; if the expression is false (which may be the first time -it is tested) the suite of the "else" clause, if present, is executed -and the loop terminates. - -A "break" statement executed in the first suite terminates the loop -without executing the "else" clause’s suite. A "continue" statement -executed in the first suite skips the rest of the suite and goes back -to testing the expression. - - -The "for" statement -=================== - -The "for" statement is used to iterate over the elements of a sequence -(such as a string, tuple or list) or other iterable object: - - for_stmt ::= "for" target_list "in" `!starred_list` ":" suite - ["else" ":" suite] - -The "starred_list" expression is evaluated once; it should yield an -*iterable* object. An *iterator* is created for that iterable. The -first item provided by the iterator is then assigned to the target -list using the standard rules for assignments (see Assignment -statements), and the suite is executed. This repeats for each item -provided by the iterator. When the iterator is exhausted, the suite -in the "else" clause, if present, is executed, and the loop -terminates. - -A "break" statement executed in the first suite terminates the loop -without executing the "else" clause’s suite. A "continue" statement -executed in the first suite skips the rest of the suite and continues -with the next item, or with the "else" clause if there is no next -item. - -The for-loop makes assignments to the variables in the target list. -This overwrites all previous assignments to those variables including -those made in the suite of the for-loop: - - for i in range(10): - print(i) - i = 5 # this will not affect the for-loop - # because i will be overwritten with the next - # index in the range - -Names in the target list are not deleted when the loop is finished, -but if the sequence is empty, they will not have been assigned to at -all by the loop. Hint: the built-in type "range()" represents -immutable arithmetic sequences of integers. For instance, iterating -"range(3)" successively yields 0, 1, and then 2. - -Changed in version 3.11: Starred elements are now allowed in the -expression list. - - -The "try" statement -=================== - -The "try" statement specifies exception handlers and/or cleanup code -for a group of statements: - - try_stmt ::= try1_stmt | try2_stmt | try3_stmt - try1_stmt ::= "try" ":" suite - ("except" [expression ["as" identifier]] ":" suite)+ - ["else" ":" suite] - ["finally" ":" suite] - try2_stmt ::= "try" ":" suite - ("except" "*" expression ["as" identifier] ":" suite)+ - ["else" ":" suite] - ["finally" ":" suite] - try3_stmt ::= "try" ":" suite - "finally" ":" suite - -Additional information on exceptions can be found in section -Exceptions, and information on using the "raise" statement to generate -exceptions may be found in section The raise statement. - - -"except" clause ---------------- - -The "except" clause(s) specify one or more exception handlers. When no -exception occurs in the "try" clause, no exception handler is -executed. When an exception occurs in the "try" suite, a search for an -exception handler is started. This search inspects the "except" -clauses in turn until one is found that matches the exception. An -expression-less "except" clause, if present, must be last; it matches -any exception. - -For an "except" clause with an expression, the expression must -evaluate to an exception type or a tuple of exception types. The -raised exception matches an "except" clause whose expression evaluates -to the class or a *non-virtual base class* of the exception object, or -to a tuple that contains such a class. - -If no "except" clause matches the exception, the search for an -exception handler continues in the surrounding code and on the -invocation stack. [1] - -If the evaluation of an expression in the header of an "except" clause -raises an exception, the original search for a handler is canceled and -a search starts for the new exception in the surrounding code and on -the call stack (it is treated as if the entire "try" statement raised -the exception). - -When a matching "except" clause is found, the exception is assigned to -the target specified after the "as" keyword in that "except" clause, -if present, and the "except" clause’s suite is executed. All "except" -clauses must have an executable block. When the end of this block is -reached, execution continues normally after the entire "try" -statement. (This means that if two nested handlers exist for the same -exception, and the exception occurs in the "try" clause of the inner -handler, the outer handler will not handle the exception.) - -When an exception has been assigned using "as target", it is cleared -at the end of the "except" clause. This is as if - - except E as N: - foo - -was translated to - - except E as N: - try: - foo - finally: - del N - -This means the exception must be assigned to a different name to be -able to refer to it after the "except" clause. Exceptions are cleared -because with the traceback attached to them, they form a reference -cycle with the stack frame, keeping all locals in that frame alive -until the next garbage collection occurs. - -Before an "except" clause’s suite is executed, the exception is stored -in the "sys" module, where it can be accessed from within the body of -the "except" clause by calling "sys.exception()". When leaving an -exception handler, the exception stored in the "sys" module is reset -to its previous value: - - >>> print(sys.exception()) - None - >>> try: - ... raise TypeError - ... except: - ... print(repr(sys.exception())) - ... try: - ... raise ValueError - ... except: - ... print(repr(sys.exception())) - ... print(repr(sys.exception())) - ... - TypeError() - ValueError() - TypeError() - >>> print(sys.exception()) - None - - -"except*" clause ----------------- - -The "except*" clause(s) specify one or more handlers for groups of -exceptions ("BaseExceptionGroup" instances). A "try" statement can -have either "except" or "except*" clauses, but not both. The exception -type for matching is mandatory in the case of "except*", so "except*:" -is a syntax error. The type is interpreted as in the case of "except", -but matching is performed on the exceptions contained in the group -that is being handled. An "TypeError" is raised if a matching type is -a subclass of "BaseExceptionGroup", because that would have ambiguous -semantics. - -When an exception group is raised in the try block, each "except*" -clause splits (see "split()") it into the subgroups of matching and -non-matching exceptions. If the matching subgroup is not empty, it -becomes the handled exception (the value returned from -"sys.exception()") and assigned to the target of the "except*" clause -(if there is one). Then, the body of the "except*" clause executes. If -the non-matching subgroup is not empty, it is processed by the next -"except*" in the same manner. This continues until all exceptions in -the group have been matched, or the last "except*" clause has run. - -After all "except*" clauses execute, the group of unhandled exceptions -is merged with any exceptions that were raised or re-raised from -within "except*" clauses. This merged exception group propagates on.: - - >>> try: - ... raise ExceptionGroup("eg", - ... [ValueError(1), TypeError(2), OSError(3), OSError(4)]) - ... except* TypeError as e: - ... print(f'caught {type(e)} with nested {e.exceptions}') - ... except* OSError as e: - ... print(f'caught {type(e)} with nested {e.exceptions}') - ... - caught with nested (TypeError(2),) - caught with nested (OSError(3), OSError(4)) - + Exception Group Traceback (most recent call last): - | File "", line 2, in - | raise ExceptionGroup("eg", - | [ValueError(1), TypeError(2), OSError(3), OSError(4)]) - | ExceptionGroup: eg (1 sub-exception) - +-+---------------- 1 ---------------- - | ValueError: 1 - +------------------------------------ - -If the exception raised from the "try" block is not an exception group -and its type matches one of the "except*" clauses, it is caught and -wrapped by an exception group with an empty message string. This -ensures that the type of the target "e" is consistently -"BaseExceptionGroup": - - >>> try: - ... raise BlockingIOError - ... except* BlockingIOError as e: - ... print(repr(e)) - ... - ExceptionGroup('', (BlockingIOError(),)) - -"break", "continue" and "return" cannot appear in an "except*" clause. - - -"else" clause -------------- - -The optional "else" clause is executed if the control flow leaves the -"try" suite, no exception was raised, and no "return", "continue", or -"break" statement was executed. Exceptions in the "else" clause are -not handled by the preceding "except" clauses. - - -"finally" clause ----------------- - -If "finally" is present, it specifies a ‘cleanup’ handler. The "try" -clause is executed, including any "except" and "else" clauses. If an -exception occurs in any of the clauses and is not handled, the -exception is temporarily saved. The "finally" clause is executed. If -there is a saved exception it is re-raised at the end of the "finally" -clause. If the "finally" clause raises another exception, the saved -exception is set as the context of the new exception. If the "finally" -clause executes a "return", "break" or "continue" statement, the saved -exception is discarded: - - >>> def f(): - ... try: - ... 1/0 - ... finally: - ... return 42 - ... - >>> f() - 42 - -The exception information is not available to the program during -execution of the "finally" clause. - -When a "return", "break" or "continue" statement is executed in the -"try" suite of a "try"…"finally" statement, the "finally" clause is -also executed ‘on the way out.’ - -The return value of a function is determined by the last "return" -statement executed. Since the "finally" clause always executes, a -"return" statement executed in the "finally" clause will always be the -last one executed: - - >>> def foo(): - ... try: - ... return 'try' - ... finally: - ... return 'finally' - ... - >>> foo() - 'finally' - -Changed in version 3.8: Prior to Python 3.8, a "continue" statement -was illegal in the "finally" clause due to a problem with the -implementation. - - -The "with" statement -==================== - -The "with" statement is used to wrap the execution of a block with -methods defined by a context manager (see section With Statement -Context Managers). This allows common "try"…"except"…"finally" usage -patterns to be encapsulated for convenient reuse. - - with_stmt ::= "with" ( "(" with_stmt_contents ","? ")" | with_stmt_contents ) ":" suite - with_stmt_contents ::= with_item ("," with_item)* - with_item ::= expression ["as" target] - -The execution of the "with" statement with one “item” proceeds as -follows: - -1. The context expression (the expression given in the "with_item") is - evaluated to obtain a context manager. - -2. The context manager’s "__enter__()" is loaded for later use. - -3. The context manager’s "__exit__()" is loaded for later use. - -4. The context manager’s "__enter__()" method is invoked. - -5. If a target was included in the "with" statement, the return value - from "__enter__()" is assigned to it. - - Note: - - The "with" statement guarantees that if the "__enter__()" method - returns without an error, then "__exit__()" will always be - called. Thus, if an error occurs during the assignment to the - target list, it will be treated the same as an error occurring - within the suite would be. See step 7 below. - -6. The suite is executed. - -7. The context manager’s "__exit__()" method is invoked. If an - exception caused the suite to be exited, its type, value, and - traceback are passed as arguments to "__exit__()". Otherwise, three - "None" arguments are supplied. - - If the suite was exited due to an exception, and the return value - from the "__exit__()" method was false, the exception is reraised. - If the return value was true, the exception is suppressed, and - execution continues with the statement following the "with" - statement. - - If the suite was exited for any reason other than an exception, the - return value from "__exit__()" is ignored, and execution proceeds - at the normal location for the kind of exit that was taken. - -The following code: - - with EXPRESSION as TARGET: - SUITE - -is semantically equivalent to: - - manager = (EXPRESSION) - enter = manager.__enter__ - exit = manager.__exit__ - value = enter() - hit_except = False - - try: - TARGET = value - SUITE - except: - hit_except = True - if not exit(*sys.exc_info()): - raise - finally: - if not hit_except: - exit(None, None, None) - -except that implicit special method lookup is used for "__enter__()" -and "__exit__()". - -With more than one item, the context managers are processed as if -multiple "with" statements were nested: - - with A() as a, B() as b: - SUITE - -is semantically equivalent to: - - with A() as a: - with B() as b: - SUITE - -You can also write multi-item context managers in multiple lines if -the items are surrounded by parentheses. For example: - - with ( - A() as a, - B() as b, - ): - SUITE - -Changed in version 3.1: Support for multiple context expressions. - -Changed in version 3.10: Support for using grouping parentheses to -break the statement in multiple lines. - -See also: - - **PEP 343** - The “with” statement - The specification, background, and examples for the Python "with" - statement. - - -The "match" statement -===================== - -Added in version 3.10. - -The match statement is used for pattern matching. Syntax: - - match_stmt ::= 'match' subject_expr ":" NEWLINE INDENT case_block+ DEDENT - subject_expr ::= `!star_named_expression` "," `!star_named_expressions`? - | `!named_expression` - case_block ::= 'case' patterns [guard] ":" `!block` - -Note: - - This section uses single quotes to denote soft keywords. - -Pattern matching takes a pattern as input (following "case") and a -subject value (following "match"). The pattern (which may contain -subpatterns) is matched against the subject value. The outcomes are: - -* A match success or failure (also termed a pattern success or - failure). - -* Possible binding of matched values to a name. The prerequisites for - this are further discussed below. - -The "match" and "case" keywords are soft keywords. - -See also: - - * **PEP 634** – Structural Pattern Matching: Specification - - * **PEP 636** – Structural Pattern Matching: Tutorial - - -Overview --------- - -Here’s an overview of the logical flow of a match statement: - -1. The subject expression "subject_expr" is evaluated and a resulting - subject value obtained. If the subject expression contains a comma, - a tuple is constructed using the standard rules. - -2. Each pattern in a "case_block" is attempted to match with the - subject value. The specific rules for success or failure are - described below. The match attempt can also bind some or all of the - standalone names within the pattern. The precise pattern binding - rules vary per pattern type and are specified below. **Name - bindings made during a successful pattern match outlive the - executed block and can be used after the match statement**. - - Note: - - During failed pattern matches, some subpatterns may succeed. Do - not rely on bindings being made for a failed match. Conversely, - do not rely on variables remaining unchanged after a failed - match. The exact behavior is dependent on implementation and may - vary. This is an intentional decision made to allow different - implementations to add optimizations. - -3. If the pattern succeeds, the corresponding guard (if present) is - evaluated. In this case all name bindings are guaranteed to have - happened. - - * If the guard evaluates as true or is missing, the "block" inside - "case_block" is executed. - - * Otherwise, the next "case_block" is attempted as described above. - - * If there are no further case blocks, the match statement is - completed. - -Note: - - Users should generally never rely on a pattern being evaluated. - Depending on implementation, the interpreter may cache values or use - other optimizations which skip repeated evaluations. - -A sample match statement: - - >>> flag = False - >>> match (100, 200): - ... case (100, 300): # Mismatch: 200 != 300 - ... print('Case 1') - ... case (100, 200) if flag: # Successful match, but guard fails - ... print('Case 2') - ... case (100, y): # Matches and binds y to 200 - ... print(f'Case 3, y: {y}') - ... case _: # Pattern not attempted - ... print('Case 4, I match anything!') - ... - Case 3, y: 200 - -In this case, "if flag" is a guard. Read more about that in the next -section. - - -Guards ------- - - guard ::= "if" `!named_expression` - -A "guard" (which is part of the "case") must succeed for code inside -the "case" block to execute. It takes the form: "if" followed by an -expression. - -The logical flow of a "case" block with a "guard" follows: - -1. Check that the pattern in the "case" block succeeded. If the - pattern failed, the "guard" is not evaluated and the next "case" - block is checked. - -2. If the pattern succeeded, evaluate the "guard". - - * If the "guard" condition evaluates as true, the case block is - selected. - - * If the "guard" condition evaluates as false, the case block is - not selected. - - * If the "guard" raises an exception during evaluation, the - exception bubbles up. - -Guards are allowed to have side effects as they are expressions. -Guard evaluation must proceed from the first to the last case block, -one at a time, skipping case blocks whose pattern(s) don’t all -succeed. (I.e., guard evaluation must happen in order.) Guard -evaluation must stop once a case block is selected. - - -Irrefutable Case Blocks ------------------------ - -An irrefutable case block is a match-all case block. A match -statement may have at most one irrefutable case block, and it must be -last. - -A case block is considered irrefutable if it has no guard and its -pattern is irrefutable. A pattern is considered irrefutable if we can -prove from its syntax alone that it will always succeed. Only the -following patterns are irrefutable: - -* AS Patterns whose left-hand side is irrefutable - -* OR Patterns containing at least one irrefutable pattern - -* Capture Patterns - -* Wildcard Patterns - -* parenthesized irrefutable patterns - - -Patterns --------- - -Note: - - This section uses grammar notations beyond standard EBNF: - - * the notation "SEP.RULE+" is shorthand for "RULE (SEP RULE)*" - - * the notation "!RULE" is shorthand for a negative lookahead - assertion - -The top-level syntax for "patterns" is: - - patterns ::= open_sequence_pattern | pattern - pattern ::= as_pattern | or_pattern - closed_pattern ::= | literal_pattern - | capture_pattern - | wildcard_pattern - | value_pattern - | group_pattern - | sequence_pattern - | mapping_pattern - | class_pattern - -The descriptions below will include a description “in simple terms” of -what a pattern does for illustration purposes (credits to Raymond -Hettinger for a document that inspired most of the descriptions). Note -that these descriptions are purely for illustration purposes and **may -not** reflect the underlying implementation. Furthermore, they do not -cover all valid forms. - - -OR Patterns -~~~~~~~~~~~ - -An OR pattern is two or more patterns separated by vertical bars "|". -Syntax: - - or_pattern ::= "|".closed_pattern+ - -Only the final subpattern may be irrefutable, and each subpattern must -bind the same set of names to avoid ambiguity. - -An OR pattern matches each of its subpatterns in turn to the subject -value, until one succeeds. The OR pattern is then considered -successful. Otherwise, if none of the subpatterns succeed, the OR -pattern fails. - -In simple terms, "P1 | P2 | ..." will try to match "P1", if it fails -it will try to match "P2", succeeding immediately if any succeeds, -failing otherwise. - - -AS Patterns -~~~~~~~~~~~ - -An AS pattern matches an OR pattern on the left of the "as" keyword -against a subject. Syntax: - - as_pattern ::= or_pattern "as" capture_pattern - -If the OR pattern fails, the AS pattern fails. Otherwise, the AS -pattern binds the subject to the name on the right of the as keyword -and succeeds. "capture_pattern" cannot be a "_". - -In simple terms "P as NAME" will match with "P", and on success it -will set "NAME = ". - - -Literal Patterns -~~~~~~~~~~~~~~~~ - -A literal pattern corresponds to most literals in Python. Syntax: - - literal_pattern ::= signed_number - | signed_number "+" NUMBER - | signed_number "-" NUMBER - | `!strings` - | "None" - | "True" - | "False" - signed_number ::= ["-"] NUMBER - -The rule "strings" and the token "NUMBER" are defined in the standard -Python grammar. Triple-quoted strings are supported. Raw strings and -byte strings are supported. f-strings are not supported. - -The forms "signed_number '+' NUMBER" and "signed_number '-' NUMBER" -are for expressing complex numbers; they require a real number on the -left and an imaginary number on the right. E.g. "3 + 4j". - -In simple terms, "LITERAL" will succeed only if " == -LITERAL". For the singletons "None", "True" and "False", the "is" -operator is used. - - -Capture Patterns -~~~~~~~~~~~~~~~~ - -A capture pattern binds the subject value to a name. Syntax: - - capture_pattern ::= !'_' NAME - -A single underscore "_" is not a capture pattern (this is what "!'_'" -expresses). It is instead treated as a "wildcard_pattern". - -In a given pattern, a given name can only be bound once. E.g. "case -x, x: ..." is invalid while "case [x] | x: ..." is allowed. - -Capture patterns always succeed. The binding follows scoping rules -established by the assignment expression operator in **PEP 572**; the -name becomes a local variable in the closest containing function scope -unless there’s an applicable "global" or "nonlocal" statement. - -In simple terms "NAME" will always succeed and it will set "NAME = -". - - -Wildcard Patterns -~~~~~~~~~~~~~~~~~ - -A wildcard pattern always succeeds (matches anything) and binds no -name. Syntax: - - wildcard_pattern ::= '_' - -"_" is a soft keyword within any pattern, but only within patterns. -It is an identifier, as usual, even within "match" subject -expressions, "guard"s, and "case" blocks. - -In simple terms, "_" will always succeed. - - -Value Patterns -~~~~~~~~~~~~~~ - -A value pattern represents a named value in Python. Syntax: - - value_pattern ::= attr - attr ::= name_or_attr "." NAME - name_or_attr ::= attr | NAME - -The dotted name in the pattern is looked up using standard Python name -resolution rules. The pattern succeeds if the value found compares -equal to the subject value (using the "==" equality operator). - -In simple terms "NAME1.NAME2" will succeed only if " == -NAME1.NAME2" - -Note: - - If the same value occurs multiple times in the same match statement, - the interpreter may cache the first value found and reuse it rather - than repeat the same lookup. This cache is strictly tied to a given - execution of a given match statement. - - -Group Patterns -~~~~~~~~~~~~~~ - -A group pattern allows users to add parentheses around patterns to -emphasize the intended grouping. Otherwise, it has no additional -syntax. Syntax: - - group_pattern ::= "(" pattern ")" - -In simple terms "(P)" has the same effect as "P". - - -Sequence Patterns -~~~~~~~~~~~~~~~~~ - -A sequence pattern contains several subpatterns to be matched against -sequence elements. The syntax is similar to the unpacking of a list or -tuple. - - sequence_pattern ::= "[" [maybe_sequence_pattern] "]" - | "(" [open_sequence_pattern] ")" - open_sequence_pattern ::= maybe_star_pattern "," [maybe_sequence_pattern] - maybe_sequence_pattern ::= ",".maybe_star_pattern+ ","? - maybe_star_pattern ::= star_pattern | pattern - star_pattern ::= "*" (capture_pattern | wildcard_pattern) - -There is no difference if parentheses or square brackets are used for -sequence patterns (i.e. "(...)" vs "[...]" ). - -Note: - - A single pattern enclosed in parentheses without a trailing comma - (e.g. "(3 | 4)") is a group pattern. While a single pattern enclosed - in square brackets (e.g. "[3 | 4]") is still a sequence pattern. - -At most one star subpattern may be in a sequence pattern. The star -subpattern may occur in any position. If no star subpattern is -present, the sequence pattern is a fixed-length sequence pattern; -otherwise it is a variable-length sequence pattern. - -The following is the logical flow for matching a sequence pattern -against a subject value: - -1. If the subject value is not a sequence [2], the sequence pattern - fails. - -2. If the subject value is an instance of "str", "bytes" or - "bytearray" the sequence pattern fails. - -3. The subsequent steps depend on whether the sequence pattern is - fixed or variable-length. - - If the sequence pattern is fixed-length: - - 1. If the length of the subject sequence is not equal to the number - of subpatterns, the sequence pattern fails - - 2. Subpatterns in the sequence pattern are matched to their - corresponding items in the subject sequence from left to right. - Matching stops as soon as a subpattern fails. If all - subpatterns succeed in matching their corresponding item, the - sequence pattern succeeds. - - Otherwise, if the sequence pattern is variable-length: - - 1. If the length of the subject sequence is less than the number of - non-star subpatterns, the sequence pattern fails. - - 2. The leading non-star subpatterns are matched to their - corresponding items as for fixed-length sequences. - - 3. If the previous step succeeds, the star subpattern matches a - list formed of the remaining subject items, excluding the - remaining items corresponding to non-star subpatterns following - the star subpattern. - - 4. Remaining non-star subpatterns are matched to their - corresponding subject items, as for a fixed-length sequence. - - Note: - - The length of the subject sequence is obtained via "len()" (i.e. - via the "__len__()" protocol). This length may be cached by the - interpreter in a similar manner as value patterns. - -In simple terms "[P1, P2, P3," … ", P]" matches only if all the -following happens: - -* check "" is a sequence - -* "len(subject) == " - -* "P1" matches "[0]" (note that this match can also bind - names) - -* "P2" matches "[1]" (note that this match can also bind - names) - -* … and so on for the corresponding pattern/element. - - -Mapping Patterns -~~~~~~~~~~~~~~~~ - -A mapping pattern contains one or more key-value patterns. The syntax -is similar to the construction of a dictionary. Syntax: - - mapping_pattern ::= "{" [items_pattern] "}" - items_pattern ::= ",".key_value_pattern+ ","? - key_value_pattern ::= (literal_pattern | value_pattern) ":" pattern - | double_star_pattern - double_star_pattern ::= "**" capture_pattern - -At most one double star pattern may be in a mapping pattern. The -double star pattern must be the last subpattern in the mapping -pattern. - -Duplicate keys in mapping patterns are disallowed. Duplicate literal -keys will raise a "SyntaxError". Two keys that otherwise have the same -value will raise a "ValueError" at runtime. - -The following is the logical flow for matching a mapping pattern -against a subject value: - -1. If the subject value is not a mapping [3],the mapping pattern - fails. - -2. If every key given in the mapping pattern is present in the subject - mapping, and the pattern for each key matches the corresponding - item of the subject mapping, the mapping pattern succeeds. - -3. If duplicate keys are detected in the mapping pattern, the pattern - is considered invalid. A "SyntaxError" is raised for duplicate - literal values; or a "ValueError" for named keys of the same value. - -Note: - - Key-value pairs are matched using the two-argument form of the - mapping subject’s "get()" method. Matched key-value pairs must - already be present in the mapping, and not created on-the-fly via - "__missing__()" or "__getitem__()". - -In simple terms "{KEY1: P1, KEY2: P2, ... }" matches only if all the -following happens: - -* check "" is a mapping - -* "KEY1 in " - -* "P1" matches "[KEY1]" - -* … and so on for the corresponding KEY/pattern pair. - - -Class Patterns -~~~~~~~~~~~~~~ - -A class pattern represents a class and its positional and keyword -arguments (if any). Syntax: - - class_pattern ::= name_or_attr "(" [pattern_arguments ","?] ")" - pattern_arguments ::= positional_patterns ["," keyword_patterns] - | keyword_patterns - positional_patterns ::= ",".pattern+ - keyword_patterns ::= ",".keyword_pattern+ - keyword_pattern ::= NAME "=" pattern - -The same keyword should not be repeated in class patterns. - -The following is the logical flow for matching a class pattern against -a subject value: - -1. If "name_or_attr" is not an instance of the builtin "type" , raise - "TypeError". - -2. If the subject value is not an instance of "name_or_attr" (tested - via "isinstance()"), the class pattern fails. - -3. If no pattern arguments are present, the pattern succeeds. - Otherwise, the subsequent steps depend on whether keyword or - positional argument patterns are present. - - For a number of built-in types (specified below), a single - positional subpattern is accepted which will match the entire - subject; for these types keyword patterns also work as for other - types. - - If only keyword patterns are present, they are processed as - follows, one by one: - - 1. The keyword is looked up as an attribute on the subject. - - * If this raises an exception other than "AttributeError", the - exception bubbles up. - - * If this raises "AttributeError", the class pattern has failed. - - * Else, the subpattern associated with the keyword pattern is - matched against the subject’s attribute value. If this fails, - the class pattern fails; if this succeeds, the match proceeds - to the next keyword. - - 2. If all keyword patterns succeed, the class pattern succeeds. - - If any positional patterns are present, they are converted to - keyword patterns using the "__match_args__" attribute on the class - "name_or_attr" before matching: - - 1. The equivalent of "getattr(cls, "__match_args__", ())" is - called. - - * If this raises an exception, the exception bubbles up. - - * If the returned value is not a tuple, the conversion fails and - "TypeError" is raised. - - * If there are more positional patterns than - "len(cls.__match_args__)", "TypeError" is raised. - - * Otherwise, positional pattern "i" is converted to a keyword - pattern using "__match_args__[i]" as the keyword. - "__match_args__[i]" must be a string; if not "TypeError" is - raised. - - * If there are duplicate keywords, "TypeError" is raised. - - See also: - - Customizing positional arguments in class pattern matching - - 2. Once all positional patterns have been converted to keyword - patterns, the match proceeds as if there were only keyword - patterns. - - For the following built-in types the handling of positional - subpatterns is different: - - * "bool" - - * "bytearray" - - * "bytes" - - * "dict" - - * "float" - - * "frozenset" - - * "int" - - * "list" - - * "set" - - * "str" - - * "tuple" - - These classes accept a single positional argument, and the pattern - there is matched against the whole object rather than an attribute. - For example "int(0|1)" matches the value "0", but not the value - "0.0". - -In simple terms "CLS(P1, attr=P2)" matches only if the following -happens: - -* "isinstance(, CLS)" - -* convert "P1" to a keyword pattern using "CLS.__match_args__" - -* For each keyword argument "attr=P2": - - * "hasattr(, "attr")" - - * "P2" matches ".attr" - -* … and so on for the corresponding keyword argument/pattern pair. - -See also: - - * **PEP 634** – Structural Pattern Matching: Specification - - * **PEP 636** – Structural Pattern Matching: Tutorial - - -Function definitions -==================== - -A function definition defines a user-defined function object (see -section The standard type hierarchy): - - funcdef ::= [decorators] "def" funcname [type_params] "(" [parameter_list] ")" - ["->" expression] ":" suite - decorators ::= decorator+ - decorator ::= "@" assignment_expression NEWLINE - parameter_list ::= defparameter ("," defparameter)* "," "/" ["," [parameter_list_no_posonly]] - | parameter_list_no_posonly - parameter_list_no_posonly ::= defparameter ("," defparameter)* ["," [parameter_list_starargs]] - | parameter_list_starargs - parameter_list_starargs ::= "*" [star_parameter] ("," defparameter)* ["," [parameter_star_kwargs]] - | "*" ("," defparameter)+ ["," [parameter_star_kwargs]] - | parameter_star_kwargs - parameter_star_kwargs ::= "**" parameter [","] - parameter ::= identifier [":" expression] - star_parameter ::= identifier [":" ["*"] expression] - defparameter ::= parameter ["=" expression] - funcname ::= identifier - -A function definition is an executable statement. Its execution binds -the function name in the current local namespace to a function object -(a wrapper around the executable code for the function). This -function object contains a reference to the current global namespace -as the global namespace to be used when the function is called. - -The function definition does not execute the function body; this gets -executed only when the function is called. [4] - -A function definition may be wrapped by one or more *decorator* -expressions. Decorator expressions are evaluated when the function is -defined, in the scope that contains the function definition. The -result must be a callable, which is invoked with the function object -as the only argument. The returned value is bound to the function name -instead of the function object. Multiple decorators are applied in -nested fashion. For example, the following code - - @f1(arg) - @f2 - def func(): pass - -is roughly equivalent to - - def func(): pass - func = f1(arg)(f2(func)) - -except that the original function is not temporarily bound to the name -"func". - -Changed in version 3.9: Functions may be decorated with any valid -"assignment_expression". Previously, the grammar was much more -restrictive; see **PEP 614** for details. - -A list of type parameters may be given in square brackets between the -function’s name and the opening parenthesis for its parameter list. -This indicates to static type checkers that the function is generic. -At runtime, the type parameters can be retrieved from the function’s -"__type_params__" attribute. See Generic functions for more. - -Changed in version 3.12: Type parameter lists are new in Python 3.12. - -When one or more *parameters* have the form *parameter* "=" -*expression*, the function is said to have “default parameter values.” -For a parameter with a default value, the corresponding *argument* may -be omitted from a call, in which case the parameter’s default value is -substituted. If a parameter has a default value, all following -parameters up until the “"*"” must also have a default value — this is -a syntactic restriction that is not expressed by the grammar. - -**Default parameter values are evaluated from left to right when the -function definition is executed.** This means that the expression is -evaluated once, when the function is defined, and that the same “pre- -computed” value is used for each call. This is especially important -to understand when a default parameter value is a mutable object, such -as a list or a dictionary: if the function modifies the object (e.g. -by appending an item to a list), the default parameter value is in -effect modified. This is generally not what was intended. A way -around this is to use "None" as the default, and explicitly test for -it in the body of the function, e.g.: - - def whats_on_the_telly(penguin=None): - if penguin is None: - penguin = [] - penguin.append("property of the zoo") - return penguin - -Function call semantics are described in more detail in section Calls. -A function call always assigns values to all parameters mentioned in -the parameter list, either from positional arguments, from keyword -arguments, or from default values. If the form “"*identifier"” is -present, it is initialized to a tuple receiving any excess positional -parameters, defaulting to the empty tuple. If the form -“"**identifier"” is present, it is initialized to a new ordered -mapping receiving any excess keyword arguments, defaulting to a new -empty mapping of the same type. Parameters after “"*"” or -“"*identifier"” are keyword-only parameters and may only be passed by -keyword arguments. Parameters before “"/"” are positional-only -parameters and may only be passed by positional arguments. - -Changed in version 3.8: The "/" function parameter syntax may be used -to indicate positional-only parameters. See **PEP 570** for details. - -Parameters may have an *annotation* of the form “": expression"” -following the parameter name. Any parameter may have an annotation, -even those of the form "*identifier" or "**identifier". (As a special -case, parameters of the form "*identifier" may have an annotation “": -*expression"”.) Functions may have “return” annotation of the form -“"-> expression"” after the parameter list. These annotations can be -any valid Python expression. The presence of annotations does not -change the semantics of a function. The annotation values are -available as values of a dictionary keyed by the parameters’ names in -the "__annotations__" attribute of the function object. If the -"annotations" import from "__future__" is used, annotations are -preserved as strings at runtime which enables postponed evaluation. -Otherwise, they are evaluated when the function definition is -executed. In this case annotations may be evaluated in a different -order than they appear in the source code. - -Changed in version 3.11: Parameters of the form “"*identifier"” may -have an annotation “": *expression"”. See **PEP 646**. - -It is also possible to create anonymous functions (functions not bound -to a name), for immediate use in expressions. This uses lambda -expressions, described in section Lambdas. Note that the lambda -expression is merely a shorthand for a simplified function definition; -a function defined in a “"def"” statement can be passed around or -assigned to another name just like a function defined by a lambda -expression. The “"def"” form is actually more powerful since it -allows the execution of multiple statements and annotations. - -**Programmer’s note:** Functions are first-class objects. A “"def"” -statement executed inside a function definition defines a local -function that can be returned or passed around. Free variables used -in the nested function can access the local variables of the function -containing the def. See section Naming and binding for details. - -See also: - - **PEP 3107** - Function Annotations - The original specification for function annotations. - - **PEP 484** - Type Hints - Definition of a standard meaning for annotations: type hints. - - **PEP 526** - Syntax for Variable Annotations - Ability to type hint variable declarations, including class - variables and instance variables. - - **PEP 563** - Postponed Evaluation of Annotations - Support for forward references within annotations by preserving - annotations in a string form at runtime instead of eager - evaluation. - - **PEP 318** - Decorators for Functions and Methods - Function and method decorators were introduced. Class decorators - were introduced in **PEP 3129**. - - -Class definitions -================= - -A class definition defines a class object (see section The standard -type hierarchy): - - classdef ::= [decorators] "class" classname [type_params] [inheritance] ":" suite - inheritance ::= "(" [argument_list] ")" - classname ::= identifier - -A class definition is an executable statement. The inheritance list -usually gives a list of base classes (see Metaclasses for more -advanced uses), so each item in the list should evaluate to a class -object which allows subclassing. Classes without an inheritance list -inherit, by default, from the base class "object"; hence, - - class Foo: - pass - -is equivalent to - - class Foo(object): - pass - -The class’s suite is then executed in a new execution frame (see -Naming and binding), using a newly created local namespace and the -original global namespace. (Usually, the suite contains mostly -function definitions.) When the class’s suite finishes execution, its -execution frame is discarded but its local namespace is saved. [5] A -class object is then created using the inheritance list for the base -classes and the saved local namespace for the attribute dictionary. -The class name is bound to this class object in the original local -namespace. - -The order in which attributes are defined in the class body is -preserved in the new class’s "__dict__". Note that this is reliable -only right after the class is created and only for classes that were -defined using the definition syntax. - -Class creation can be customized heavily using metaclasses. - -Classes can also be decorated: just like when decorating functions, - - @f1(arg) - @f2 - class Foo: pass - -is roughly equivalent to - - class Foo: pass - Foo = f1(arg)(f2(Foo)) - -The evaluation rules for the decorator expressions are the same as for -function decorators. The result is then bound to the class name. - -Changed in version 3.9: Classes may be decorated with any valid -"assignment_expression". Previously, the grammar was much more -restrictive; see **PEP 614** for details. - -A list of type parameters may be given in square brackets immediately -after the class’s name. This indicates to static type checkers that -the class is generic. At runtime, the type parameters can be retrieved -from the class’s "__type_params__" attribute. See Generic classes for -more. - -Changed in version 3.12: Type parameter lists are new in Python 3.12. - -**Programmer’s note:** Variables defined in the class definition are -class attributes; they are shared by instances. Instance attributes -can be set in a method with "self.name = value". Both class and -instance attributes are accessible through the notation “"self.name"”, -and an instance attribute hides a class attribute with the same name -when accessed in this way. Class attributes can be used as defaults -for instance attributes, but using mutable values there can lead to -unexpected results. Descriptors can be used to create instance -variables with different implementation details. - -See also: - - **PEP 3115** - Metaclasses in Python 3000 - The proposal that changed the declaration of metaclasses to the - current syntax, and the semantics for how classes with - metaclasses are constructed. - - **PEP 3129** - Class Decorators - The proposal that added class decorators. Function and method - decorators were introduced in **PEP 318**. - - -Coroutines -========== - -Added in version 3.5. - - -Coroutine function definition ------------------------------ - - async_funcdef ::= [decorators] "async" "def" funcname "(" [parameter_list] ")" - ["->" expression] ":" suite - -Execution of Python coroutines can be suspended and resumed at many -points (see *coroutine*). "await" expressions, "async for" and "async -with" can only be used in the body of a coroutine function. - -Functions defined with "async def" syntax are always coroutine -functions, even if they do not contain "await" or "async" keywords. - -It is a "SyntaxError" to use a "yield from" expression inside the body -of a coroutine function. - -An example of a coroutine function: - - async def func(param1, param2): - do_stuff() - await some_coroutine() - -Changed in version 3.7: "await" and "async" are now keywords; -previously they were only treated as such inside the body of a -coroutine function. - - -The "async for" statement -------------------------- - - async_for_stmt ::= "async" for_stmt - -An *asynchronous iterable* provides an "__aiter__" method that -directly returns an *asynchronous iterator*, which can call -asynchronous code in its "__anext__" method. - -The "async for" statement allows convenient iteration over -asynchronous iterables. - -The following code: - - async for TARGET in ITER: - SUITE - else: - SUITE2 - -Is semantically equivalent to: - - iter = (ITER).__aiter__() - running = True - - while running: - try: - TARGET = await iter.__anext__() - except StopAsyncIteration: - running = False - else: - SUITE - else: - SUITE2 - -except that implicit special method lookup is used for "__aiter__()" -and "__anext__()". - -It is a "SyntaxError" to use an "async for" statement outside the body -of a coroutine function. - - -The "async with" statement --------------------------- - - async_with_stmt ::= "async" with_stmt - -An *asynchronous context manager* is a *context manager* that is able -to suspend execution in its *enter* and *exit* methods. - -The following code: - - async with EXPRESSION as TARGET: - SUITE - -is semantically equivalent to: - - manager = (EXPRESSION) - aenter = manager.__aenter__ - aexit = manager.__aexit__ - value = await aenter() - hit_except = False - - try: - TARGET = value - SUITE - except: - hit_except = True - if not await aexit(*sys.exc_info()): - raise - finally: - if not hit_except: - await aexit(None, None, None) - -except that implicit special method lookup is used for "__aenter__()" -and "__aexit__()". - -It is a "SyntaxError" to use an "async with" statement outside the -body of a coroutine function. - -See also: - - **PEP 492** - Coroutines with async and await syntax - The proposal that made coroutines a proper standalone concept in - Python, and added supporting syntax. - - -Type parameter lists -==================== - -Added in version 3.12. - -Changed in version 3.13: Support for default values was added (see -**PEP 696**). - - type_params ::= "[" type_param ("," type_param)* "]" - type_param ::= typevar | typevartuple | paramspec - typevar ::= identifier (":" expression)? ("=" expression)? - typevartuple ::= "*" identifier ("=" expression)? - paramspec ::= "**" identifier ("=" expression)? - -Functions (including coroutines), classes and type aliases may contain -a type parameter list: - - def max[T](args: list[T]) -> T: - ... - - async def amax[T](args: list[T]) -> T: - ... - - class Bag[T]: - def __iter__(self) -> Iterator[T]: - ... - - def add(self, arg: T) -> None: - ... - - type ListOrSet[T] = list[T] | set[T] - -Semantically, this indicates that the function, class, or type alias -is generic over a type variable. This information is primarily used by -static type checkers, and at runtime, generic objects behave much like -their non-generic counterparts. - -Type parameters are declared in square brackets ("[]") immediately -after the name of the function, class, or type alias. The type -parameters are accessible within the scope of the generic object, but -not elsewhere. Thus, after a declaration "def func[T](): pass", the -name "T" is not available in the module scope. Below, the semantics of -generic objects are described with more precision. The scope of type -parameters is modeled with a special function (technically, an -annotation scope) that wraps the creation of the generic object. - -Generic functions, classes, and type aliases have a "__type_params__" -attribute listing their type parameters. - -Type parameters come in three kinds: - -* "typing.TypeVar", introduced by a plain name (e.g., "T"). - Semantically, this represents a single type to a type checker. - -* "typing.TypeVarTuple", introduced by a name prefixed with a single - asterisk (e.g., "*Ts"). Semantically, this stands for a tuple of any - number of types. - -* "typing.ParamSpec", introduced by a name prefixed with two asterisks - (e.g., "**P"). Semantically, this stands for the parameters of a - callable. - -"typing.TypeVar" declarations can define *bounds* and *constraints* -with a colon (":") followed by an expression. A single expression -after the colon indicates a bound (e.g. "T: int"). Semantically, this -means that the "typing.TypeVar" can only represent types that are a -subtype of this bound. A parenthesized tuple of expressions after the -colon indicates a set of constraints (e.g. "T: (str, bytes)"). Each -member of the tuple should be a type (again, this is not enforced at -runtime). Constrained type variables can only take on one of the types -in the list of constraints. - -For "typing.TypeVar"s declared using the type parameter list syntax, -the bound and constraints are not evaluated when the generic object is -created, but only when the value is explicitly accessed through the -attributes "__bound__" and "__constraints__". To accomplish this, the -bounds or constraints are evaluated in a separate annotation scope. - -"typing.TypeVarTuple"s and "typing.ParamSpec"s cannot have bounds or -constraints. - -All three flavors of type parameters can also have a *default value*, -which is used when the type parameter is not explicitly provided. This -is added by appending a single equals sign ("=") followed by an -expression. Like the bounds and constraints of type variables, the -default value is not evaluated when the object is created, but only -when the type parameter’s "__default__" attribute is accessed. To this -end, the default value is evaluated in a separate annotation scope. If -no default value is specified for a type parameter, the "__default__" -attribute is set to the special sentinel object "typing.NoDefault". - -The following example indicates the full set of allowed type parameter -declarations: - - def overly_generic[ - SimpleTypeVar, - TypeVarWithDefault = int, - TypeVarWithBound: int, - TypeVarWithConstraints: (str, bytes), - *SimpleTypeVarTuple = (int, float), - **SimpleParamSpec = (str, bytearray), - ]( - a: SimpleTypeVar, - b: TypeVarWithDefault, - c: TypeVarWithBound, - d: Callable[SimpleParamSpec, TypeVarWithConstraints], - *e: SimpleTypeVarTuple, - ): ... - - -Generic functions ------------------ - -Generic functions are declared as follows: - - def func[T](arg: T): ... - -This syntax is equivalent to: - - annotation-def TYPE_PARAMS_OF_func(): - T = typing.TypeVar("T") - def func(arg: T): ... - func.__type_params__ = (T,) - return func - func = TYPE_PARAMS_OF_func() - -Here "annotation-def" indicates an annotation scope, which is not -actually bound to any name at runtime. (One other liberty is taken in -the translation: the syntax does not go through attribute access on -the "typing" module, but creates an instance of "typing.TypeVar" -directly.) - -The annotations of generic functions are evaluated within the -annotation scope used for declaring the type parameters, but the -function’s defaults and decorators are not. - -The following example illustrates the scoping rules for these cases, -as well as for additional flavors of type parameters: - - @decorator - def func[T: int, *Ts, **P](*args: *Ts, arg: Callable[P, T] = some_default): - ... - -Except for the lazy evaluation of the "TypeVar" bound, this is -equivalent to: - - DEFAULT_OF_arg = some_default - - annotation-def TYPE_PARAMS_OF_func(): - - annotation-def BOUND_OF_T(): - return int - # In reality, BOUND_OF_T() is evaluated only on demand. - T = typing.TypeVar("T", bound=BOUND_OF_T()) - - Ts = typing.TypeVarTuple("Ts") - P = typing.ParamSpec("P") - - def func(*args: *Ts, arg: Callable[P, T] = DEFAULT_OF_arg): - ... - - func.__type_params__ = (T, Ts, P) - return func - func = decorator(TYPE_PARAMS_OF_func()) - -The capitalized names like "DEFAULT_OF_arg" are not actually bound at -runtime. - - -Generic classes ---------------- - -Generic classes are declared as follows: - - class Bag[T]: ... - -This syntax is equivalent to: - - annotation-def TYPE_PARAMS_OF_Bag(): - T = typing.TypeVar("T") - class Bag(typing.Generic[T]): - __type_params__ = (T,) - ... - return Bag - Bag = TYPE_PARAMS_OF_Bag() - -Here again "annotation-def" (not a real keyword) indicates an -annotation scope, and the name "TYPE_PARAMS_OF_Bag" is not actually -bound at runtime. - -Generic classes implicitly inherit from "typing.Generic". The base -classes and keyword arguments of generic classes are evaluated within -the type scope for the type parameters, and decorators are evaluated -outside that scope. This is illustrated by this example: - - @decorator - class Bag(Base[T], arg=T): ... - -This is equivalent to: - - annotation-def TYPE_PARAMS_OF_Bag(): - T = typing.TypeVar("T") - class Bag(Base[T], typing.Generic[T], arg=T): - __type_params__ = (T,) - ... - return Bag - Bag = decorator(TYPE_PARAMS_OF_Bag()) - - -Generic type aliases --------------------- - -The "type" statement can also be used to create a generic type alias: - - type ListOrSet[T] = list[T] | set[T] - -Except for the lazy evaluation of the value, this is equivalent to: - - annotation-def TYPE_PARAMS_OF_ListOrSet(): - T = typing.TypeVar("T") - - annotation-def VALUE_OF_ListOrSet(): - return list[T] | set[T] - # In reality, the value is lazily evaluated - return typing.TypeAliasType("ListOrSet", VALUE_OF_ListOrSet(), type_params=(T,)) - ListOrSet = TYPE_PARAMS_OF_ListOrSet() - -Here, "annotation-def" (not a real keyword) indicates an annotation -scope. The capitalized names like "TYPE_PARAMS_OF_ListOrSet" are not -actually bound at runtime. - --[ Footnotes ]- - -[1] The exception is propagated to the invocation stack unless there - is a "finally" clause which happens to raise another exception. - That new exception causes the old one to be lost. - -[2] In pattern matching, a sequence is defined as one of the - following: - - * a class that inherits from "collections.abc.Sequence" - - * a Python class that has been registered as - "collections.abc.Sequence" - - * a builtin class that has its (CPython) "Py_TPFLAGS_SEQUENCE" bit - set - - * a class that inherits from any of the above - - The following standard library classes are sequences: - - * "array.array" - - * "collections.deque" - - * "list" - - * "memoryview" - - * "range" - - * "tuple" - - Note: - - Subject values of type "str", "bytes", and "bytearray" do not - match sequence patterns. - -[3] In pattern matching, a mapping is defined as one of the following: - - * a class that inherits from "collections.abc.Mapping" - - * a Python class that has been registered as - "collections.abc.Mapping" - - * a builtin class that has its (CPython) "Py_TPFLAGS_MAPPING" bit - set - - * a class that inherits from any of the above - - The standard library classes "dict" and "types.MappingProxyType" - are mappings. - -[4] A string literal appearing as the first statement in the function - body is transformed into the function’s "__doc__" attribute and - therefore the function’s *docstring*. - -[5] A string literal appearing as the first statement in the class - body is transformed into the namespace’s "__doc__" item and - therefore the class’s *docstring*. -''', - 'context-managers': r'''With Statement Context Managers -******************************* - -A *context manager* is an object that defines the runtime context to -be established when executing a "with" statement. The context manager -handles the entry into, and the exit from, the desired runtime context -for the execution of the block of code. Context managers are normally -invoked using the "with" statement (described in section The with -statement), but can also be used by directly invoking their methods. - -Typical uses of context managers include saving and restoring various -kinds of global state, locking and unlocking resources, closing opened -files, etc. - -For more information on context managers, see Context Manager Types. -The "object" class itself does not provide the context manager -methods. - -object.__enter__(self) - - Enter the runtime context related to this object. The "with" - statement will bind this method’s return value to the target(s) - specified in the "as" clause of the statement, if any. - -object.__exit__(self, exc_type, exc_value, traceback) - - Exit the runtime context related to this object. The parameters - describe the exception that caused the context to be exited. If the - context was exited without an exception, all three arguments will - be "None". - - If an exception is supplied, and the method wishes to suppress the - exception (i.e., prevent it from being propagated), it should - return a true value. Otherwise, the exception will be processed - normally upon exit from this method. - - Note that "__exit__()" methods should not reraise the passed-in - exception; this is the caller’s responsibility. - -See also: - - **PEP 343** - The “with” statement - The specification, background, and examples for the Python "with" - statement. -''', - 'continue': r'''The "continue" statement -************************ - - continue_stmt ::= "continue" - -"continue" may only occur syntactically nested in a "for" or "while" -loop, but not nested in a function or class definition within that -loop. It continues with the next cycle of the nearest enclosing loop. - -When "continue" passes control out of a "try" statement with a -"finally" clause, that "finally" clause is executed before really -starting the next loop cycle. -''', - 'conversions': r'''Arithmetic conversions -********************** - -When a description of an arithmetic operator below uses the phrase -“the numeric arguments are converted to a common type”, this means -that the operator implementation for built-in types works as follows: - -* If either argument is a complex number, the other is converted to - complex; - -* otherwise, if either argument is a floating-point number, the other - is converted to floating point; - -* otherwise, both must be integers and no conversion is necessary. - -Some additional rules apply for certain operators (e.g., a string as a -left argument to the ‘%’ operator). Extensions must define their own -conversion behavior. -''', - 'customization': r'''Basic customization -******************* - -object.__new__(cls[, ...]) - - Called to create a new instance of class *cls*. "__new__()" is a - static method (special-cased so you need not declare it as such) - that takes the class of which an instance was requested as its - first argument. The remaining arguments are those passed to the - object constructor expression (the call to the class). The return - value of "__new__()" should be the new object instance (usually an - instance of *cls*). - - Typical implementations create a new instance of the class by - invoking the superclass’s "__new__()" method using - "super().__new__(cls[, ...])" with appropriate arguments and then - modifying the newly created instance as necessary before returning - it. - - If "__new__()" is invoked during object construction and it returns - an instance of *cls*, then the new instance’s "__init__()" method - will be invoked like "__init__(self[, ...])", where *self* is the - new instance and the remaining arguments are the same as were - passed to the object constructor. - - If "__new__()" does not return an instance of *cls*, then the new - instance’s "__init__()" method will not be invoked. - - "__new__()" is intended mainly to allow subclasses of immutable - types (like int, str, or tuple) to customize instance creation. It - is also commonly overridden in custom metaclasses in order to - customize class creation. - -object.__init__(self[, ...]) - - Called after the instance has been created (by "__new__()"), but - before it is returned to the caller. The arguments are those - passed to the class constructor expression. If a base class has an - "__init__()" method, the derived class’s "__init__()" method, if - any, must explicitly call it to ensure proper initialization of the - base class part of the instance; for example: - "super().__init__([args...])". - - Because "__new__()" and "__init__()" work together in constructing - objects ("__new__()" to create it, and "__init__()" to customize - it), no non-"None" value may be returned by "__init__()"; doing so - will cause a "TypeError" to be raised at runtime. - -object.__del__(self) - - Called when the instance is about to be destroyed. This is also - called a finalizer or (improperly) a destructor. If a base class - has a "__del__()" method, the derived class’s "__del__()" method, - if any, must explicitly call it to ensure proper deletion of the - base class part of the instance. - - It is possible (though not recommended!) for the "__del__()" method - to postpone destruction of the instance by creating a new reference - to it. This is called object *resurrection*. It is - implementation-dependent whether "__del__()" is called a second - time when a resurrected object is about to be destroyed; the - current *CPython* implementation only calls it once. - - It is not guaranteed that "__del__()" methods are called for - objects that still exist when the interpreter exits. - "weakref.finalize" provides a straightforward way to register a - cleanup function to be called when an object is garbage collected. - - Note: - - "del x" doesn’t directly call "x.__del__()" — the former - decrements the reference count for "x" by one, and the latter is - only called when "x"’s reference count reaches zero. - - **CPython implementation detail:** It is possible for a reference - cycle to prevent the reference count of an object from going to - zero. In this case, the cycle will be later detected and deleted - by the *cyclic garbage collector*. A common cause of reference - cycles is when an exception has been caught in a local variable. - The frame’s locals then reference the exception, which references - its own traceback, which references the locals of all frames caught - in the traceback. - - See also: Documentation for the "gc" module. - - Warning: - - Due to the precarious circumstances under which "__del__()" - methods are invoked, exceptions that occur during their execution - are ignored, and a warning is printed to "sys.stderr" instead. - In particular: - - * "__del__()" can be invoked when arbitrary code is being - executed, including from any arbitrary thread. If "__del__()" - needs to take a lock or invoke any other blocking resource, it - may deadlock as the resource may already be taken by the code - that gets interrupted to execute "__del__()". - - * "__del__()" can be executed during interpreter shutdown. As a - consequence, the global variables it needs to access (including - other modules) may already have been deleted or set to "None". - Python guarantees that globals whose name begins with a single - underscore are deleted from their module before other globals - are deleted; if no other references to such globals exist, this - may help in assuring that imported modules are still available - at the time when the "__del__()" method is called. - -object.__repr__(self) - - Called by the "repr()" built-in function to compute the “official” - string representation of an object. If at all possible, this - should look like a valid Python expression that could be used to - recreate an object with the same value (given an appropriate - environment). If this is not possible, a string of the form - "<...some useful description...>" should be returned. The return - value must be a string object. If a class defines "__repr__()" but - not "__str__()", then "__repr__()" is also used when an “informal” - string representation of instances of that class is required. - - This is typically used for debugging, so it is important that the - representation is information-rich and unambiguous. A default - implementation is provided by the "object" class itself. - -object.__str__(self) - - Called by "str(object)", the default "__format__()" implementation, - and the built-in function "print()", to compute the “informal” or - nicely printable string representation of an object. The return - value must be a str object. - - This method differs from "object.__repr__()" in that there is no - expectation that "__str__()" return a valid Python expression: a - more convenient or concise representation can be used. - - The default implementation defined by the built-in type "object" - calls "object.__repr__()". - -object.__bytes__(self) - - Called by bytes to compute a byte-string representation of an - object. This should return a "bytes" object. The "object" class - itself does not provide this method. - -object.__format__(self, format_spec) - - Called by the "format()" built-in function, and by extension, - evaluation of formatted string literals and the "str.format()" - method, to produce a “formatted” string representation of an - object. The *format_spec* argument is a string that contains a - description of the formatting options desired. The interpretation - of the *format_spec* argument is up to the type implementing - "__format__()", however most classes will either delegate - formatting to one of the built-in types, or use a similar - formatting option syntax. - - See Format specification mini-language for a description of the - standard formatting syntax. - - The return value must be a string object. - - The default implementation by the "object" class should be given an - empty *format_spec* string. It delegates to "__str__()". - - Changed in version 3.4: The __format__ method of "object" itself - raises a "TypeError" if passed any non-empty string. - - Changed in version 3.7: "object.__format__(x, '')" is now - equivalent to "str(x)" rather than "format(str(x), '')". - -object.__lt__(self, other) -object.__le__(self, other) -object.__eq__(self, other) -object.__ne__(self, other) -object.__gt__(self, other) -object.__ge__(self, other) - - These are the so-called “rich comparison” methods. The - correspondence between operator symbols and method names is as - follows: "xy" calls - "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)". - - A rich comparison method may return the singleton "NotImplemented" - if it does not implement the operation for a given pair of - arguments. By convention, "False" and "True" are returned for a - successful comparison. However, these methods can return any value, - so if the comparison operator is used in a Boolean context (e.g., - in the condition of an "if" statement), Python will call "bool()" - on the value to determine if the result is true or false. - - By default, "object" implements "__eq__()" by using "is", returning - "NotImplemented" in the case of a false comparison: "True if x is y - else NotImplemented". For "__ne__()", by default it delegates to - "__eq__()" and inverts the result unless it is "NotImplemented". - There are no other implied relationships among the comparison - operators or default implementations; for example, the truth of - "(x.__hash__". - - If a class that does not override "__eq__()" wishes to suppress - hash support, it should include "__hash__ = None" in the class - definition. A class which defines its own "__hash__()" that - explicitly raises a "TypeError" would be incorrectly identified as - hashable by an "isinstance(obj, collections.abc.Hashable)" call. - - Note: - - By default, the "__hash__()" values of str and bytes objects are - “salted” with an unpredictable random value. Although they - remain constant within an individual Python process, they are not - predictable between repeated invocations of Python.This is - intended to provide protection against a denial-of-service caused - by carefully chosen inputs that exploit the worst case - performance of a dict insertion, *O*(*n*^2) complexity. See - https://ocert.org/advisories/ocert-2011-003.html for - details.Changing hash values affects the iteration order of sets. - Python has never made guarantees about this ordering (and it - typically varies between 32-bit and 64-bit builds).See also - "PYTHONHASHSEED". - - Changed in version 3.3: Hash randomization is enabled by default. - -object.__bool__(self) - - Called to implement truth value testing and the built-in operation - "bool()"; should return "False" or "True". When this method is not - defined, "__len__()" is called, if it is defined, and the object is - considered true if its result is nonzero. If a class defines - neither "__len__()" nor "__bool__()" (which is true of the "object" - class itself), all its instances are considered true. -''', - 'debugger': r'''"pdb" — The Python Debugger -*************************** - -**Source code:** Lib/pdb.py - -====================================================================== - -The module "pdb" defines an interactive source code debugger for -Python programs. It supports setting (conditional) breakpoints and -single stepping at the source line level, inspection of stack frames, -source code listing, and evaluation of arbitrary Python code in the -context of any stack frame. It also supports post-mortem debugging -and can be called under program control. - -The debugger is extensible – it is actually defined as the class -"Pdb". This is currently undocumented but easily understood by reading -the source. The extension interface uses the modules "bdb" and "cmd". - -See also: - - Module "faulthandler" - Used to dump Python tracebacks explicitly, on a fault, after a - timeout, or on a user signal. - - Module "traceback" - Standard interface to extract, format and print stack traces of - Python programs. - -The typical usage to break into the debugger is to insert: - - import pdb; pdb.set_trace() - -Or: - - breakpoint() - -at the location you want to break into the debugger, and then run the -program. You can then step through the code following this statement, -and continue running without the debugger using the "continue" -command. - -Changed in version 3.7: The built-in "breakpoint()", when called with -defaults, can be used instead of "import pdb; pdb.set_trace()". - - def double(x): - breakpoint() - return x * 2 - val = 3 - print(f"{val} * 2 is {double(val)}") - -The debugger’s prompt is "(Pdb)", which is the indicator that you are -in debug mode: - - > ...(2)double() - -> breakpoint() - (Pdb) p x - 3 - (Pdb) continue - 3 * 2 is 6 - -Changed in version 3.3: Tab-completion via the "readline" module is -available for commands and command arguments, e.g. the current global -and local names are offered as arguments of the "p" command. - - -Command-line interface -====================== - -You can also invoke "pdb" from the command line to debug other -scripts. For example: - - python -m pdb [-c command] (-m module | pyfile) [args ...] - -When invoked as a module, pdb will automatically enter post-mortem -debugging if the program being debugged exits abnormally. After post- -mortem debugging (or after normal exit of the program), pdb will -restart the program. Automatic restarting preserves pdb’s state (such -as breakpoints) and in most cases is more useful than quitting the -debugger upon program’s exit. - --c, --command - - To execute commands as if given in a ".pdbrc" file; see Debugger - commands. - - Changed in version 3.2: Added the "-c" option. - --m - - To execute modules similar to the way "python -m" does. As with a - script, the debugger will pause execution just before the first - line of the module. - - Changed in version 3.7: Added the "-m" option. - -Typical usage to execute a statement under control of the debugger is: - - >>> import pdb - >>> def f(x): - ... print(1 / x) - >>> pdb.run("f(2)") - > (1)() - (Pdb) continue - 0.5 - >>> - -The typical usage to inspect a crashed program is: - - >>> import pdb - >>> def f(x): - ... print(1 / x) - ... - >>> f(0) - Traceback (most recent call last): - File "", line 1, in - File "", line 2, in f - ZeroDivisionError: division by zero - >>> pdb.pm() - > (2)f() - (Pdb) p x - 0 - (Pdb) - -Changed in version 3.13: The implementation of **PEP 667** means that -name assignments made via "pdb" will immediately affect the active -scope, even when running inside an *optimized scope*. - -The module defines the following functions; each enters the debugger -in a slightly different way: - -pdb.run(statement, globals=None, locals=None) - - Execute the *statement* (given as a string or a code object) under - debugger control. The debugger prompt appears before any code is - executed; you can set breakpoints and type "continue", or you can - step through the statement using "step" or "next" (all these - commands are explained below). The optional *globals* and *locals* - arguments specify the environment in which the code is executed; by - default the dictionary of the module "__main__" is used. (See the - explanation of the built-in "exec()" or "eval()" functions.) - -pdb.runeval(expression, globals=None, locals=None) - - Evaluate the *expression* (given as a string or a code object) - under debugger control. When "runeval()" returns, it returns the - value of the *expression*. Otherwise this function is similar to - "run()". - -pdb.runcall(function, *args, **kwds) - - Call the *function* (a function or method object, not a string) - with the given arguments. When "runcall()" returns, it returns - whatever the function call returned. The debugger prompt appears - as soon as the function is entered. - -pdb.set_trace(*, header=None) - - Enter the debugger at the calling stack frame. This is useful to - hard-code a breakpoint at a given point in a program, even if the - code is not otherwise being debugged (e.g. when an assertion - fails). If given, *header* is printed to the console just before - debugging begins. - - Changed in version 3.7: The keyword-only argument *header*. - - Changed in version 3.13: "set_trace()" will enter the debugger - immediately, rather than on the next line of code to be executed. - -pdb.post_mortem(t=None) - - Enter post-mortem debugging of the given exception or traceback - object. If no value is given, it uses the exception that is - currently being handled, or raises "ValueError" if there isn’t one. - - Changed in version 3.13: Support for exception objects was added. - -pdb.pm() - - Enter post-mortem debugging of the exception found in - "sys.last_exc". - -The "run*" functions and "set_trace()" are aliases for instantiating -the "Pdb" class and calling the method of the same name. If you want -to access further features, you have to do this yourself: - -class pdb.Pdb(completekey='tab', stdin=None, stdout=None, skip=None, nosigint=False, readrc=True) - - "Pdb" is the debugger class. - - The *completekey*, *stdin* and *stdout* arguments are passed to the - underlying "cmd.Cmd" class; see the description there. - - The *skip* argument, if given, must be an iterable of glob-style - module name patterns. The debugger will not step into frames that - originate in a module that matches one of these patterns. [1] - - By default, Pdb sets a handler for the SIGINT signal (which is sent - when the user presses "Ctrl"-"C" on the console) when you give a - "continue" command. This allows you to break into the debugger - again by pressing "Ctrl"-"C". If you want Pdb not to touch the - SIGINT handler, set *nosigint* to true. - - The *readrc* argument defaults to true and controls whether Pdb - will load .pdbrc files from the filesystem. - - Example call to enable tracing with *skip*: - - import pdb; pdb.Pdb(skip=['django.*']).set_trace() - - Raises an auditing event "pdb.Pdb" with no arguments. - - Changed in version 3.1: Added the *skip* parameter. - - Changed in version 3.2: Added the *nosigint* parameter. Previously, - a SIGINT handler was never set by Pdb. - - Changed in version 3.6: The *readrc* argument. - - run(statement, globals=None, locals=None) - runeval(expression, globals=None, locals=None) - runcall(function, *args, **kwds) - set_trace() - - See the documentation for the functions explained above. - - -Debugger commands -================= - -The commands recognized by the debugger are listed below. Most -commands can be abbreviated to one or two letters as indicated; e.g. -"h(elp)" means that either "h" or "help" can be used to enter the help -command (but not "he" or "hel", nor "H" or "Help" or "HELP"). -Arguments to commands must be separated by whitespace (spaces or -tabs). Optional arguments are enclosed in square brackets ("[]") in -the command syntax; the square brackets must not be typed. -Alternatives in the command syntax are separated by a vertical bar -("|"). - -Entering a blank line repeats the last command entered. Exception: if -the last command was a "list" command, the next 11 lines are listed. - -Commands that the debugger doesn’t recognize are assumed to be Python -statements and are executed in the context of the program being -debugged. Python statements can also be prefixed with an exclamation -point ("!"). This is a powerful way to inspect the program being -debugged; it is even possible to change a variable or call a function. -When an exception occurs in such a statement, the exception name is -printed but the debugger’s state is not changed. - -Changed in version 3.13: Expressions/Statements whose prefix is a pdb -command are now correctly identified and executed. - -The debugger supports aliases. Aliases can have parameters which -allows one a certain level of adaptability to the context under -examination. - -Multiple commands may be entered on a single line, separated by ";;". -(A single ";" is not used as it is the separator for multiple commands -in a line that is passed to the Python parser.) No intelligence is -applied to separating the commands; the input is split at the first -";;" pair, even if it is in the middle of a quoted string. A -workaround for strings with double semicolons is to use implicit -string concatenation "';'';'" or "";"";"". - -To set a temporary global variable, use a *convenience variable*. A -*convenience variable* is a variable whose name starts with "$". For -example, "$foo = 1" sets a global variable "$foo" which you can use in -the debugger session. The *convenience variables* are cleared when -the program resumes execution so it’s less likely to interfere with -your program compared to using normal variables like "foo = 1". - -There are three preset *convenience variables*: - -* "$_frame": the current frame you are debugging - -* "$_retval": the return value if the frame is returning - -* "$_exception": the exception if the frame is raising an exception - -Added in version 3.12: Added the *convenience variable* feature. - -If a file ".pdbrc" exists in the user’s home directory or in the -current directory, it is read with "'utf-8'" encoding and executed as -if it had been typed at the debugger prompt, with the exception that -empty lines and lines starting with "#" are ignored. This is -particularly useful for aliases. If both files exist, the one in the -home directory is read first and aliases defined there can be -overridden by the local file. - -Changed in version 3.2: ".pdbrc" can now contain commands that -continue debugging, such as "continue" or "next". Previously, these -commands had no effect. - -Changed in version 3.11: ".pdbrc" is now read with "'utf-8'" encoding. -Previously, it was read with the system locale encoding. - -h(elp) [command] - - Without argument, print the list of available commands. With a - *command* as argument, print help about that command. "help pdb" - displays the full documentation (the docstring of the "pdb" - module). Since the *command* argument must be an identifier, "help - exec" must be entered to get help on the "!" command. - -w(here) - - Print a stack trace, with the most recent frame at the bottom. An - arrow (">") indicates the current frame, which determines the - context of most commands. - -d(own) [count] - - Move the current frame *count* (default one) levels down in the - stack trace (to a newer frame). - -u(p) [count] - - Move the current frame *count* (default one) levels up in the stack - trace (to an older frame). - -b(reak) [([filename:]lineno | function) [, condition]] - - With a *lineno* argument, set a break at line *lineno* in the - current file. The line number may be prefixed with a *filename* and - a colon, to specify a breakpoint in another file (possibly one that - hasn’t been loaded yet). The file is searched on "sys.path". - Accepatable forms of *filename* are "/abspath/to/file.py", - "relpath/file.py", "module" and "package.module". - - With a *function* argument, set a break at the first executable - statement within that function. *function* can be any expression - that evaluates to a function in the current namespace. - - If a second argument is present, it is an expression which must - evaluate to true before the breakpoint is honored. - - Without argument, list all breaks, including for each breakpoint, - the number of times that breakpoint has been hit, the current - ignore count, and the associated condition if any. - - Each breakpoint is assigned a number to which all the other - breakpoint commands refer. - -tbreak [([filename:]lineno | function) [, condition]] - - Temporary breakpoint, which is removed automatically when it is - first hit. The arguments are the same as for "break". - -cl(ear) [filename:lineno | bpnumber ...] - - With a *filename:lineno* argument, clear all the breakpoints at - this line. With a space separated list of breakpoint numbers, clear - those breakpoints. Without argument, clear all breaks (but first - ask confirmation). - -disable bpnumber [bpnumber ...] - - Disable the breakpoints given as a space separated list of - breakpoint numbers. Disabling a breakpoint means it cannot cause - the program to stop execution, but unlike clearing a breakpoint, it - remains in the list of breakpoints and can be (re-)enabled. - -enable bpnumber [bpnumber ...] - - Enable the breakpoints specified. - -ignore bpnumber [count] - - Set the ignore count for the given breakpoint number. If *count* - is omitted, the ignore count is set to 0. A breakpoint becomes - active when the ignore count is zero. When non-zero, the *count* - is decremented each time the breakpoint is reached and the - breakpoint is not disabled and any associated condition evaluates - to true. - -condition bpnumber [condition] - - Set a new *condition* for the breakpoint, an expression which must - evaluate to true before the breakpoint is honored. If *condition* - is absent, any existing condition is removed; i.e., the breakpoint - is made unconditional. - -commands [bpnumber] - - Specify a list of commands for breakpoint number *bpnumber*. The - commands themselves appear on the following lines. Type a line - containing just "end" to terminate the commands. An example: - - (Pdb) commands 1 - (com) p some_variable - (com) end - (Pdb) - - To remove all commands from a breakpoint, type "commands" and - follow it immediately with "end"; that is, give no commands. - - With no *bpnumber* argument, "commands" refers to the last - breakpoint set. - - You can use breakpoint commands to start your program up again. - Simply use the "continue" command, or "step", or any other command - that resumes execution. - - Specifying any command resuming execution (currently "continue", - "step", "next", "return", "jump", "quit" and their abbreviations) - terminates the command list (as if that command was immediately - followed by end). This is because any time you resume execution - (even with a simple next or step), you may encounter another - breakpoint—which could have its own command list, leading to - ambiguities about which list to execute. - - If you use the "silent" command in the command list, the usual - message about stopping at a breakpoint is not printed. This may be - desirable for breakpoints that are to print a specific message and - then continue. If none of the other commands print anything, you - see no sign that the breakpoint was reached. - -s(tep) - - Execute the current line, stop at the first possible occasion - (either in a function that is called or on the next line in the - current function). - -n(ext) - - Continue execution until the next line in the current function is - reached or it returns. (The difference between "next" and "step" - is that "step" stops inside a called function, while "next" - executes called functions at (nearly) full speed, only stopping at - the next line in the current function.) - -unt(il) [lineno] - - Without argument, continue execution until the line with a number - greater than the current one is reached. - - With *lineno*, continue execution until a line with a number - greater or equal to *lineno* is reached. In both cases, also stop - when the current frame returns. - - Changed in version 3.2: Allow giving an explicit line number. - -r(eturn) - - Continue execution until the current function returns. - -c(ont(inue)) - - Continue execution, only stop when a breakpoint is encountered. - -j(ump) lineno - - Set the next line that will be executed. Only available in the - bottom-most frame. This lets you jump back and execute code again, - or jump forward to skip code that you don’t want to run. - - It should be noted that not all jumps are allowed – for instance it - is not possible to jump into the middle of a "for" loop or out of a - "finally" clause. - -l(ist) [first[, last]] - - List source code for the current file. Without arguments, list 11 - lines around the current line or continue the previous listing. - With "." as argument, list 11 lines around the current line. With - one argument, list 11 lines around at that line. With two - arguments, list the given range; if the second argument is less - than the first, it is interpreted as a count. - - The current line in the current frame is indicated by "->". If an - exception is being debugged, the line where the exception was - originally raised or propagated is indicated by ">>", if it differs - from the current line. - - Changed in version 3.2: Added the ">>" marker. - -ll | longlist - - List all source code for the current function or frame. - Interesting lines are marked as for "list". - - Added in version 3.2. - -a(rgs) - - Print the arguments of the current function and their current - values. - -p expression - - Evaluate *expression* in the current context and print its value. - - Note: - - "print()" can also be used, but is not a debugger command — this - executes the Python "print()" function. - -pp expression - - Like the "p" command, except the value of *expression* is pretty- - printed using the "pprint" module. - -whatis expression - - Print the type of *expression*. - -source expression - - Try to get source code of *expression* and display it. - - Added in version 3.2. - -display [expression] - - Display the value of *expression* if it changed, each time - execution stops in the current frame. - - Without *expression*, list all display expressions for the current - frame. - - Note: - - Display evaluates *expression* and compares to the result of the - previous evaluation of *expression*, so when the result is - mutable, display may not be able to pick up the changes. - - Example: - - lst = [] - breakpoint() - pass - lst.append(1) - print(lst) - - Display won’t realize "lst" has been changed because the result of - evaluation is modified in place by "lst.append(1)" before being - compared: - - > example.py(3)() - -> pass - (Pdb) display lst - display lst: [] - (Pdb) n - > example.py(4)() - -> lst.append(1) - (Pdb) n - > example.py(5)() - -> print(lst) - (Pdb) - - You can do some tricks with copy mechanism to make it work: - - > example.py(3)() - -> pass - (Pdb) display lst[:] - display lst[:]: [] - (Pdb) n - > example.py(4)() - -> lst.append(1) - (Pdb) n - > example.py(5)() - -> print(lst) - display lst[:]: [1] [old: []] - (Pdb) - - Added in version 3.2. - -undisplay [expression] - - Do not display *expression* anymore in the current frame. Without - *expression*, clear all display expressions for the current frame. - - Added in version 3.2. - -interact - - Start an interactive interpreter (using the "code" module) in a new - global namespace initialised from the local and global namespaces - for the current scope. Use "exit()" or "quit()" to exit the - interpreter and return to the debugger. - - Note: - - As "interact" creates a new dedicated namespace for code - execution, assignments to variables will not affect the original - namespaces. However, modifications to any referenced mutable - objects will be reflected in the original namespaces as usual. - - Added in version 3.2. - - Changed in version 3.13: "exit()" and "quit()" can be used to exit - the "interact" command. - - Changed in version 3.13: "interact" directs its output to the - debugger’s output channel rather than "sys.stderr". - -alias [name [command]] - - Create an alias called *name* that executes *command*. The - *command* must *not* be enclosed in quotes. Replaceable parameters - can be indicated by "%1", "%2", … and "%9", while "%*" is replaced - by all the parameters. If *command* is omitted, the current alias - for *name* is shown. If no arguments are given, all aliases are - listed. - - Aliases may be nested and can contain anything that can be legally - typed at the pdb prompt. Note that internal pdb commands *can* be - overridden by aliases. Such a command is then hidden until the - alias is removed. Aliasing is recursively applied to the first - word of the command line; all other words in the line are left - alone. - - As an example, here are two useful aliases (especially when placed - in the ".pdbrc" file): - - # Print instance variables (usage "pi classInst") - alias pi for k in %1.__dict__.keys(): print(f"%1.{k} = {%1.__dict__[k]}") - # Print instance variables in self - alias ps pi self - -unalias name - - Delete the specified alias *name*. - -! statement - - Execute the (one-line) *statement* in the context of the current - stack frame. The exclamation point can be omitted unless the first - word of the statement resembles a debugger command, e.g.: - - (Pdb) ! n=42 - (Pdb) - - To set a global variable, you can prefix the assignment command - with a "global" statement on the same line, e.g.: - - (Pdb) global list_options; list_options = ['-l'] - (Pdb) - -run [args ...] -restart [args ...] - - Restart the debugged Python program. If *args* is supplied, it is - split with "shlex" and the result is used as the new "sys.argv". - History, breakpoints, actions and debugger options are preserved. - "restart" is an alias for "run". - -q(uit) - - Quit from the debugger. The program being executed is aborted. - -debug code - - Enter a recursive debugger that steps through *code* (which is an - arbitrary expression or statement to be executed in the current - environment). - -retval - - Print the return value for the last return of the current function. - -exceptions [excnumber] - - List or jump between chained exceptions. - - When using "pdb.pm()" or "Pdb.post_mortem(...)" with a chained - exception instead of a traceback, it allows the user to move - between the chained exceptions using "exceptions" command to list - exceptions, and "exceptions " to switch to that exception. - - Example: - - def out(): - try: - middle() - except Exception as e: - raise ValueError("reraise middle() error") from e - - def middle(): - try: - return inner(0) - except Exception as e: - raise ValueError("Middle fail") - - def inner(x): - 1 / x - - out() - - calling "pdb.pm()" will allow to move between exceptions: - - > example.py(5)out() - -> raise ValueError("reraise middle() error") from e - - (Pdb) exceptions - 0 ZeroDivisionError('division by zero') - 1 ValueError('Middle fail') - > 2 ValueError('reraise middle() error') - - (Pdb) exceptions 0 - > example.py(16)inner() - -> 1 / x - - (Pdb) up - > example.py(10)middle() - -> return inner(0) - - Added in version 3.13. - --[ Footnotes ]- - -[1] Whether a frame is considered to originate in a certain module is - determined by the "__name__" in the frame globals. -''', - 'del': r'''The "del" statement -******************* - - del_stmt ::= "del" target_list - -Deletion is recursively defined very similar to the way assignment is -defined. Rather than spelling it out in full details, here are some -hints. - -Deletion of a target list recursively deletes each target, from left -to right. - -Deletion of a name removes the binding of that name from the local or -global namespace, depending on whether the name occurs in a "global" -statement in the same code block. Trying to delete an unbound name -raises a "NameError" exception. - -Deletion of attribute references, subscriptions and slicings is passed -to the primary object involved; deletion of a slicing is in general -equivalent to assignment of an empty slice of the right type (but even -this is determined by the sliced object). - -Changed in version 3.2: Previously it was illegal to delete a name -from the local namespace if it occurs as a free variable in a nested -block. -''', - 'dict': r'''Dictionary displays -******************* - -A dictionary display is a possibly empty series of dict items -(key/value pairs) enclosed in curly braces: - - dict_display ::= "{" [dict_item_list | dict_comprehension] "}" - dict_item_list ::= dict_item ("," dict_item)* [","] - dict_item ::= expression ":" expression | "**" or_expr - dict_comprehension ::= expression ":" expression comp_for - -A dictionary display yields a new dictionary object. - -If a comma-separated sequence of dict items is given, they are -evaluated from left to right to define the entries of the dictionary: -each key object is used as a key into the dictionary to store the -corresponding value. This means that you can specify the same key -multiple times in the dict item list, and the final dictionary’s value -for that key will be the last one given. - -A double asterisk "**" denotes *dictionary unpacking*. Its operand -must be a *mapping*. Each mapping item is added to the new -dictionary. Later values replace values already set by earlier dict -items and earlier dictionary unpackings. - -Added in version 3.5: Unpacking into dictionary displays, originally -proposed by **PEP 448**. - -A dict comprehension, in contrast to list and set comprehensions, -needs two expressions separated with a colon followed by the usual -“for” and “if” clauses. When the comprehension is run, the resulting -key and value elements are inserted in the new dictionary in the order -they are produced. - -Restrictions on the types of the key values are listed earlier in -section The standard type hierarchy. (To summarize, the key type -should be *hashable*, which excludes all mutable objects.) Clashes -between duplicate keys are not detected; the last value (textually -rightmost in the display) stored for a given key value prevails. - -Changed in version 3.8: Prior to Python 3.8, in dict comprehensions, -the evaluation order of key and value was not well-defined. In -CPython, the value was evaluated before the key. Starting with 3.8, -the key is evaluated before the value, as proposed by **PEP 572**. -''', - 'dynamic-features': r'''Interaction with dynamic features -********************************* - -Name resolution of free variables occurs at runtime, not at compile -time. This means that the following code will print 42: - - i = 10 - def f(): - print(i) - i = 42 - f() - -The "eval()" and "exec()" functions do not have access to the full -environment for resolving names. Names may be resolved in the local -and global namespaces of the caller. Free variables are not resolved -in the nearest enclosing namespace, but in the global namespace. [1] -The "exec()" and "eval()" functions have optional arguments to -override the global and local namespace. If only one namespace is -specified, it is used for both. -''', - 'else': r'''The "if" statement -****************** - -The "if" statement is used for conditional execution: - - if_stmt ::= "if" assignment_expression ":" suite - ("elif" assignment_expression ":" suite)* - ["else" ":" suite] - -It selects exactly one of the suites by evaluating the expressions one -by one until one is found to be true (see section Boolean operations -for the definition of true and false); then that suite is executed -(and no other part of the "if" statement is executed or evaluated). -If all expressions are false, the suite of the "else" clause, if -present, is executed. -''', - 'exceptions': r'''Exceptions -********** - -Exceptions are a means of breaking out of the normal flow of control -of a code block in order to handle errors or other exceptional -conditions. An exception is *raised* at the point where the error is -detected; it may be *handled* by the surrounding code block or by any -code block that directly or indirectly invoked the code block where -the error occurred. - -The Python interpreter raises an exception when it detects a run-time -error (such as division by zero). A Python program can also -explicitly raise an exception with the "raise" statement. Exception -handlers are specified with the "try" … "except" statement. The -"finally" clause of such a statement can be used to specify cleanup -code which does not handle the exception, but is executed whether an -exception occurred or not in the preceding code. - -Python uses the “termination” model of error handling: an exception -handler can find out what happened and continue execution at an outer -level, but it cannot repair the cause of the error and retry the -failing operation (except by re-entering the offending piece of code -from the top). - -When an exception is not handled at all, the interpreter terminates -execution of the program, or returns to its interactive main loop. In -either case, it prints a stack traceback, except when the exception is -"SystemExit". - -Exceptions are identified by class instances. The "except" clause is -selected depending on the class of the instance: it must reference the -class of the instance or a *non-virtual base class* thereof. The -instance can be received by the handler and can carry additional -information about the exceptional condition. - -Note: - - Exception messages are not part of the Python API. Their contents - may change from one version of Python to the next without warning - and should not be relied on by code which will run under multiple - versions of the interpreter. - -See also the description of the "try" statement in section The try -statement and "raise" statement in section The raise statement. - --[ Footnotes ]- - -[1] This limitation occurs because the code that is executed by these - operations is not available at the time the module is compiled. -''', - 'execmodel': r'''Execution model -*************** - - -Structure of a program -====================== - -A Python program is constructed from code blocks. A *block* is a piece -of Python program text that is executed as a unit. The following are -blocks: a module, a function body, and a class definition. Each -command typed interactively is a block. A script file (a file given -as standard input to the interpreter or specified as a command line -argument to the interpreter) is a code block. A script command (a -command specified on the interpreter command line with the "-c" -option) is a code block. A module run as a top level script (as module -"__main__") from the command line using a "-m" argument is also a code -block. The string argument passed to the built-in functions "eval()" -and "exec()" is a code block. - -A code block is executed in an *execution frame*. A frame contains -some administrative information (used for debugging) and determines -where and how execution continues after the code block’s execution has -completed. - - -Naming and binding -================== - - -Binding of names ----------------- - -*Names* refer to objects. Names are introduced by name binding -operations. - -The following constructs bind names: - -* formal parameters to functions, - -* class definitions, - -* function definitions, - -* assignment expressions, - -* targets that are identifiers if occurring in an assignment: - - * "for" loop header, - - * after "as" in a "with" statement, "except" clause, "except*" - clause, or in the as-pattern in structural pattern matching, - - * in a capture pattern in structural pattern matching - -* "import" statements. - -* "type" statements. - -* type parameter lists. - -The "import" statement of the form "from ... import *" binds all names -defined in the imported module, except those beginning with an -underscore. This form may only be used at the module level. - -A target occurring in a "del" statement is also considered bound for -this purpose (though the actual semantics are to unbind the name). - -Each assignment or import statement occurs within a block defined by a -class or function definition or at the module level (the top-level -code block). - -If a name is bound in a block, it is a local variable of that block, -unless declared as "nonlocal" or "global". If a name is bound at the -module level, it is a global variable. (The variables of the module -code block are local and global.) If a variable is used in a code -block but not defined there, it is a *free variable*. - -Each occurrence of a name in the program text refers to the *binding* -of that name established by the following name resolution rules. - - -Resolution of names -------------------- - -A *scope* defines the visibility of a name within a block. If a local -variable is defined in a block, its scope includes that block. If the -definition occurs in a function block, the scope extends to any blocks -contained within the defining one, unless a contained block introduces -a different binding for the name. - -When a name is used in a code block, it is resolved using the nearest -enclosing scope. The set of all such scopes visible to a code block -is called the block’s *environment*. - -When a name is not found at all, a "NameError" exception is raised. If -the current scope is a function scope, and the name refers to a local -variable that has not yet been bound to a value at the point where the -name is used, an "UnboundLocalError" exception is raised. -"UnboundLocalError" is a subclass of "NameError". - -If a name binding operation occurs anywhere within a code block, all -uses of the name within the block are treated as references to the -current block. This can lead to errors when a name is used within a -block before it is bound. This rule is subtle. Python lacks -declarations and allows name binding operations to occur anywhere -within a code block. The local variables of a code block can be -determined by scanning the entire text of the block for name binding -operations. See the FAQ entry on UnboundLocalError for examples. - -If the "global" statement occurs within a block, all uses of the names -specified in the statement refer to the bindings of those names in the -top-level namespace. Names are resolved in the top-level namespace by -searching the global namespace, i.e. the namespace of the module -containing the code block, and the builtins namespace, the namespace -of the module "builtins". The global namespace is searched first. If -the names are not found there, the builtins namespace is searched -next. If the names are also not found in the builtins namespace, new -variables are created in the global namespace. The global statement -must precede all uses of the listed names. - -The "global" statement has the same scope as a name binding operation -in the same block. If the nearest enclosing scope for a free variable -contains a global statement, the free variable is treated as a global. - -The "nonlocal" statement causes corresponding names to refer to -previously bound variables in the nearest enclosing function scope. -"SyntaxError" is raised at compile time if the given name does not -exist in any enclosing function scope. Type parameters cannot be -rebound with the "nonlocal" statement. - -The namespace for a module is automatically created the first time a -module is imported. The main module for a script is always called -"__main__". - -Class definition blocks and arguments to "exec()" and "eval()" are -special in the context of name resolution. A class definition is an -executable statement that may use and define names. These references -follow the normal rules for name resolution with an exception that -unbound local variables are looked up in the global namespace. The -namespace of the class definition becomes the attribute dictionary of -the class. The scope of names defined in a class block is limited to -the class block; it does not extend to the code blocks of methods. -This includes comprehensions and generator expressions, but it does -not include annotation scopes, which have access to their enclosing -class scopes. This means that the following will fail: - - class A: - a = 42 - b = list(a + i for i in range(10)) - -However, the following will succeed: - - class A: - type Alias = Nested - class Nested: pass - - print(A.Alias.__value__) # - - -Annotation scopes ------------------ - -Type parameter lists and "type" statements introduce *annotation -scopes*, which behave mostly like function scopes, but with some -exceptions discussed below. *Annotations* currently do not use -annotation scopes, but they are expected to use annotation scopes in -Python 3.13 when **PEP 649** is implemented. - -Annotation scopes are used in the following contexts: - -* Type parameter lists for generic type aliases. - -* Type parameter lists for generic functions. A generic function’s - annotations are executed within the annotation scope, but its - defaults and decorators are not. - -* Type parameter lists for generic classes. A generic class’s base - classes and keyword arguments are executed within the annotation - scope, but its decorators are not. - -* The bounds, constraints, and default values for type parameters - (lazily evaluated). - -* The value of type aliases (lazily evaluated). - -Annotation scopes differ from function scopes in the following ways: - -* Annotation scopes have access to their enclosing class namespace. If - an annotation scope is immediately within a class scope, or within - another annotation scope that is immediately within a class scope, - the code in the annotation scope can use names defined in the class - scope as if it were executed directly within the class body. This - contrasts with regular functions defined within classes, which - cannot access names defined in the class scope. - -* Expressions in annotation scopes cannot contain "yield", "yield - from", "await", or ":=" expressions. (These expressions are allowed - in other scopes contained within the annotation scope.) - -* Names defined in annotation scopes cannot be rebound with "nonlocal" - statements in inner scopes. This includes only type parameters, as - no other syntactic elements that can appear within annotation scopes - can introduce new names. - -* While annotation scopes have an internal name, that name is not - reflected in the *qualified name* of objects defined within the - scope. Instead, the "__qualname__" of such objects is as if the - object were defined in the enclosing scope. - -Added in version 3.12: Annotation scopes were introduced in Python -3.12 as part of **PEP 695**. - -Changed in version 3.13: Annotation scopes are also used for type -parameter defaults, as introduced by **PEP 696**. - - -Lazy evaluation ---------------- - -The values of type aliases created through the "type" statement are -*lazily evaluated*. The same applies to the bounds, constraints, and -default values of type variables created through the type parameter -syntax. This means that they are not evaluated when the type alias or -type variable is created. Instead, they are only evaluated when doing -so is necessary to resolve an attribute access. - -Example: - - >>> type Alias = 1/0 - >>> Alias.__value__ - Traceback (most recent call last): - ... - ZeroDivisionError: division by zero - >>> def func[T: 1/0](): pass - >>> T = func.__type_params__[0] - >>> T.__bound__ - Traceback (most recent call last): - ... - ZeroDivisionError: division by zero - -Here the exception is raised only when the "__value__" attribute of -the type alias or the "__bound__" attribute of the type variable is -accessed. - -This behavior is primarily useful for references to types that have -not yet been defined when the type alias or type variable is created. -For example, lazy evaluation enables creation of mutually recursive -type aliases: - - from typing import Literal - - type SimpleExpr = int | Parenthesized - type Parenthesized = tuple[Literal["("], Expr, Literal[")"]] - type Expr = SimpleExpr | tuple[SimpleExpr, Literal["+", "-"], Expr] - -Lazily evaluated values are evaluated in annotation scope, which means -that names that appear inside the lazily evaluated value are looked up -as if they were used in the immediately enclosing scope. - -Added in version 3.12. - - -Builtins and restricted execution ---------------------------------- - -**CPython implementation detail:** Users should not touch -"__builtins__"; it is strictly an implementation detail. Users -wanting to override values in the builtins namespace should "import" -the "builtins" module and modify its attributes appropriately. - -The builtins namespace associated with the execution of a code block -is actually found by looking up the name "__builtins__" in its global -namespace; this should be a dictionary or a module (in the latter case -the module’s dictionary is used). By default, when in the "__main__" -module, "__builtins__" is the built-in module "builtins"; when in any -other module, "__builtins__" is an alias for the dictionary of the -"builtins" module itself. - - -Interaction with dynamic features ---------------------------------- - -Name resolution of free variables occurs at runtime, not at compile -time. This means that the following code will print 42: - - i = 10 - def f(): - print(i) - i = 42 - f() - -The "eval()" and "exec()" functions do not have access to the full -environment for resolving names. Names may be resolved in the local -and global namespaces of the caller. Free variables are not resolved -in the nearest enclosing namespace, but in the global namespace. [1] -The "exec()" and "eval()" functions have optional arguments to -override the global and local namespace. If only one namespace is -specified, it is used for both. - - -Exceptions -========== - -Exceptions are a means of breaking out of the normal flow of control -of a code block in order to handle errors or other exceptional -conditions. An exception is *raised* at the point where the error is -detected; it may be *handled* by the surrounding code block or by any -code block that directly or indirectly invoked the code block where -the error occurred. - -The Python interpreter raises an exception when it detects a run-time -error (such as division by zero). A Python program can also -explicitly raise an exception with the "raise" statement. Exception -handlers are specified with the "try" … "except" statement. The -"finally" clause of such a statement can be used to specify cleanup -code which does not handle the exception, but is executed whether an -exception occurred or not in the preceding code. - -Python uses the “termination” model of error handling: an exception -handler can find out what happened and continue execution at an outer -level, but it cannot repair the cause of the error and retry the -failing operation (except by re-entering the offending piece of code -from the top). - -When an exception is not handled at all, the interpreter terminates -execution of the program, or returns to its interactive main loop. In -either case, it prints a stack traceback, except when the exception is -"SystemExit". - -Exceptions are identified by class instances. The "except" clause is -selected depending on the class of the instance: it must reference the -class of the instance or a *non-virtual base class* thereof. The -instance can be received by the handler and can carry additional -information about the exceptional condition. - -Note: - - Exception messages are not part of the Python API. Their contents - may change from one version of Python to the next without warning - and should not be relied on by code which will run under multiple - versions of the interpreter. - -See also the description of the "try" statement in section The try -statement and "raise" statement in section The raise statement. - --[ Footnotes ]- - -[1] This limitation occurs because the code that is executed by these - operations is not available at the time the module is compiled. -''', - 'exprlists': r'''Expression lists -**************** - - starred_expression ::= ["*"] or_expr - flexible_expression ::= assignment_expression | starred_expression - flexible_expression_list ::= flexible_expression ("," flexible_expression)* [","] - starred_expression_list ::= starred_expression ("," starred_expression)* [","] - expression_list ::= expression ("," expression)* [","] - yield_list ::= expression_list | starred_expression "," [starred_expression_list] - -Except when part of a list or set display, an expression list -containing at least one comma yields a tuple. The length of the tuple -is the number of expressions in the list. The expressions are -evaluated from left to right. - -An asterisk "*" denotes *iterable unpacking*. Its operand must be an -*iterable*. The iterable is expanded into a sequence of items, which -are included in the new tuple, list, or set, at the site of the -unpacking. - -Added in version 3.5: Iterable unpacking in expression lists, -originally proposed by **PEP 448**. - -Added in version 3.11: Any item in an expression list may be starred. -See **PEP 646**. - -A trailing comma is required only to create a one-item tuple, such as -"1,"; it is optional in all other cases. A single expression without a -trailing comma doesn’t create a tuple, but rather yields the value of -that expression. (To create an empty tuple, use an empty pair of -parentheses: "()".) -''', - 'floating': r'''Floating-point literals -*********************** - -Floating-point literals are described by the following lexical -definitions: - - floatnumber ::= pointfloat | exponentfloat - pointfloat ::= [digitpart] fraction | digitpart "." - exponentfloat ::= (digitpart | pointfloat) exponent - digitpart ::= digit (["_"] digit)* - fraction ::= "." digitpart - exponent ::= ("e" | "E") ["+" | "-"] digitpart - -Note that the integer and exponent parts are always interpreted using -radix 10. For example, "077e010" is legal, and denotes the same number -as "77e10". The allowed range of floating-point literals is -implementation-dependent. As in integer literals, underscores are -supported for digit grouping. - -Some examples of floating-point literals: - - 3.14 10. .001 1e100 3.14e-10 0e0 3.14_15_93 - -Changed in version 3.6: Underscores are now allowed for grouping -purposes in literals. -''', - 'for': r'''The "for" statement -******************* - -The "for" statement is used to iterate over the elements of a sequence -(such as a string, tuple or list) or other iterable object: - - for_stmt ::= "for" target_list "in" `!starred_list` ":" suite - ["else" ":" suite] - -The "starred_list" expression is evaluated once; it should yield an -*iterable* object. An *iterator* is created for that iterable. The -first item provided by the iterator is then assigned to the target -list using the standard rules for assignments (see Assignment -statements), and the suite is executed. This repeats for each item -provided by the iterator. When the iterator is exhausted, the suite -in the "else" clause, if present, is executed, and the loop -terminates. - -A "break" statement executed in the first suite terminates the loop -without executing the "else" clause’s suite. A "continue" statement -executed in the first suite skips the rest of the suite and continues -with the next item, or with the "else" clause if there is no next -item. - -The for-loop makes assignments to the variables in the target list. -This overwrites all previous assignments to those variables including -those made in the suite of the for-loop: - - for i in range(10): - print(i) - i = 5 # this will not affect the for-loop - # because i will be overwritten with the next - # index in the range - -Names in the target list are not deleted when the loop is finished, -but if the sequence is empty, they will not have been assigned to at -all by the loop. Hint: the built-in type "range()" represents -immutable arithmetic sequences of integers. For instance, iterating -"range(3)" successively yields 0, 1, and then 2. - -Changed in version 3.11: Starred elements are now allowed in the -expression list. -''', - 'formatstrings': r'''Format string syntax -******************** - -The "str.format()" method and the "Formatter" class share the same -syntax for format strings (although in the case of "Formatter", -subclasses can define their own format string syntax). The syntax is -related to that of formatted string literals, but it is less -sophisticated and, in particular, does not support arbitrary -expressions. - -Format strings contain “replacement fields” surrounded by curly braces -"{}". Anything that is not contained in braces is considered literal -text, which is copied unchanged to the output. If you need to include -a brace character in the literal text, it can be escaped by doubling: -"{{" and "}}". - -The grammar for a replacement field is as follows: - - replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}" - field_name ::= arg_name ("." attribute_name | "[" element_index "]")* - arg_name ::= [identifier | digit+] - attribute_name ::= identifier - element_index ::= digit+ | index_string - index_string ::= + - conversion ::= "r" | "s" | "a" - format_spec ::= format-spec:format_spec - -In less formal terms, the replacement field can start with a -*field_name* that specifies the object whose value is to be formatted -and inserted into the output instead of the replacement field. The -*field_name* is optionally followed by a *conversion* field, which is -preceded by an exclamation point "'!'", and a *format_spec*, which is -preceded by a colon "':'". These specify a non-default format for the -replacement value. - -See also the Format specification mini-language section. - -The *field_name* itself begins with an *arg_name* that is either a -number or a keyword. If it’s a number, it refers to a positional -argument, and if it’s a keyword, it refers to a named keyword -argument. An *arg_name* is treated as a number if a call to -"str.isdecimal()" on the string would return true. If the numerical -arg_names in a format string are 0, 1, 2, … in sequence, they can all -be omitted (not just some) and the numbers 0, 1, 2, … will be -automatically inserted in that order. Because *arg_name* is not quote- -delimited, it is not possible to specify arbitrary dictionary keys -(e.g., the strings "'10'" or "':-]'") within a format string. The -*arg_name* can be followed by any number of index or attribute -expressions. An expression of the form "'.name'" selects the named -attribute using "getattr()", while an expression of the form -"'[index]'" does an index lookup using "__getitem__()". - -Changed in version 3.1: The positional argument specifiers can be -omitted for "str.format()", so "'{} {}'.format(a, b)" is equivalent to -"'{0} {1}'.format(a, b)". - -Changed in version 3.4: The positional argument specifiers can be -omitted for "Formatter". - -Some simple format string examples: - - "First, thou shalt count to {0}" # References first positional argument - "Bring me a {}" # Implicitly references the first positional argument - "From {} to {}" # Same as "From {0} to {1}" - "My quest is {name}" # References keyword argument 'name' - "Weight in tons {0.weight}" # 'weight' attribute of first positional arg - "Units destroyed: {players[0]}" # First element of keyword argument 'players'. - -The *conversion* field causes a type coercion before formatting. -Normally, the job of formatting a value is done by the "__format__()" -method of the value itself. However, in some cases it is desirable to -force a type to be formatted as a string, overriding its own -definition of formatting. By converting the value to a string before -calling "__format__()", the normal formatting logic is bypassed. - -Three conversion flags are currently supported: "'!s'" which calls -"str()" on the value, "'!r'" which calls "repr()" and "'!a'" which -calls "ascii()". - -Some examples: - - "Harold's a clever {0!s}" # Calls str() on the argument first - "Bring out the holy {name!r}" # Calls repr() on the argument first - "More {!a}" # Calls ascii() on the argument first - -The *format_spec* field contains a specification of how the value -should be presented, including such details as field width, alignment, -padding, decimal precision and so on. Each value type can define its -own “formatting mini-language” or interpretation of the *format_spec*. - -Most built-in types support a common formatting mini-language, which -is described in the next section. - -A *format_spec* field can also include nested replacement fields -within it. These nested replacement fields may contain a field name, -conversion flag and format specification, but deeper nesting is not -allowed. The replacement fields within the format_spec are -substituted before the *format_spec* string is interpreted. This -allows the formatting of a value to be dynamically specified. - -See the Format examples section for some examples. - - -Format specification mini-language -================================== - -“Format specifications” are used within replacement fields contained -within a format string to define how individual values are presented -(see Format string syntax and f-strings). They can also be passed -directly to the built-in "format()" function. Each formattable type -may define how the format specification is to be interpreted. - -Most built-in types implement the following options for format -specifications, although some of the formatting options are only -supported by the numeric types. - -A general convention is that an empty format specification produces -the same result as if you had called "str()" on the value. A non-empty -format specification typically modifies the result. - -The general form of a *standard format specifier* is: - - format_spec ::= [options][width][grouping]["." precision][type] - options ::= [[fill]align][sign]["z"]["#"]["0"] - fill ::= - align ::= "<" | ">" | "=" | "^" - sign ::= "+" | "-" | " " - width ::= digit+ - grouping ::= "," | "_" - precision ::= digit+ - type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" - | "G" | "n" | "o" | "s" | "x" | "X" | "%" - -If a valid *align* value is specified, it can be preceded by a *fill* -character that can be any character and defaults to a space if -omitted. It is not possible to use a literal curly brace (”"{"” or -“"}"”) as the *fill* character in a formatted string literal or when -using the "str.format()" method. However, it is possible to insert a -curly brace with a nested replacement field. This limitation doesn’t -affect the "format()" function. - -The meaning of the various alignment options is as follows: - -+-----------+------------------------------------------------------------+ -| Option | Meaning | -|===========|============================================================| -| "'<'" | Forces the field to be left-aligned within the available | -| | space (this is the default for most objects). | -+-----------+------------------------------------------------------------+ -| "'>'" | Forces the field to be right-aligned within the available | -| | space (this is the default for numbers). | -+-----------+------------------------------------------------------------+ -| "'='" | Forces the padding to be placed after the sign (if any) | -| | but before the digits. This is used for printing fields | -| | in the form ‘+000000120’. This alignment option is only | -| | valid for numeric types, excluding "complex". It becomes | -| | the default for numbers when ‘0’ immediately precedes the | -| | field width. | -+-----------+------------------------------------------------------------+ -| "'^'" | Forces the field to be centered within the available | -| | space. | -+-----------+------------------------------------------------------------+ - -Note that unless a minimum field width is defined, the field width -will always be the same size as the data to fill it, so that the -alignment option has no meaning in this case. - -The *sign* option is only valid for number types, and can be one of -the following: - -+-----------+------------------------------------------------------------+ -| Option | Meaning | -|===========|============================================================| -| "'+'" | Indicates that a sign should be used for both positive as | -| | well as negative numbers. | -+-----------+------------------------------------------------------------+ -| "'-'" | Indicates that a sign should be used only for negative | -| | numbers (this is the default behavior). | -+-----------+------------------------------------------------------------+ -| space | Indicates that a leading space should be used on positive | -| | numbers, and a minus sign on negative numbers. | -+-----------+------------------------------------------------------------+ - -The "'z'" option coerces negative zero floating-point values to -positive zero after rounding to the format precision. This option is -only valid for floating-point presentation types. - -Changed in version 3.11: Added the "'z'" option (see also **PEP -682**). - -The "'#'" option causes the “alternate form” to be used for the -conversion. The alternate form is defined differently for different -types. This option is only valid for integer, float and complex -types. For integers, when binary, octal, or hexadecimal output is -used, this option adds the respective prefix "'0b'", "'0o'", "'0x'", -or "'0X'" to the output value. For float and complex the alternate -form causes the result of the conversion to always contain a decimal- -point character, even if no digits follow it. Normally, a decimal- -point character appears in the result of these conversions only if a -digit follows it. In addition, for "'g'" and "'G'" conversions, -trailing zeros are not removed from the result. - -The *width* is a decimal integer defining the minimum total field -width, including any prefixes, separators, and other formatting -characters. If not specified, then the field width will be determined -by the content. - -When no explicit alignment is given, preceding the *width* field by a -zero ("'0'") character enables sign-aware zero-padding for numeric -types, excluding "complex". This is equivalent to a *fill* character -of "'0'" with an *alignment* type of "'='". - -Changed in version 3.10: Preceding the *width* field by "'0'" no -longer affects the default alignment for strings. - -The *grouping* option after the *width* field specifies a digit group -separator for the integral part of a number. It can be one of the -following: - -+-----------+------------------------------------------------------------+ -| Option | Meaning | -|===========|============================================================| -| "','" | Inserts a comma every 3 digits for integer presentation | -| | type "'d'" and floating-point presentation types, | -| | excluding "'n'". For other presentation types, this option | -| | is not supported. | -+-----------+------------------------------------------------------------+ -| "'_'" | Inserts an underscore every 3 digits for integer | -| | presentation type "'d'" and floating-point presentation | -| | types, excluding "'n'". For integer presentation types | -| | "'b'", "'o'", "'x'", and "'X'", underscores are inserted | -| | every 4 digits. For other presentation types, this option | -| | is not supported. | -+-----------+------------------------------------------------------------+ - -For a locale aware separator, use the "'n'" presentation type instead. - -Changed in version 3.1: Added the "','" option (see also **PEP 378**). - -Changed in version 3.6: Added the "'_'" option (see also **PEP 515**). - -The *precision* is a decimal integer indicating how many digits should -be displayed after the decimal point for presentation types "'f'" and -"'F'", or before and after the decimal point for presentation types -"'g'" or "'G'". For string presentation types the field indicates the -maximum field size - in other words, how many characters will be used -from the field content. The *precision* is not allowed for integer -presentation types. - -Finally, the *type* determines how the data should be presented. - -The available string presentation types are: - - +-----------+------------------------------------------------------------+ - | Type | Meaning | - |===========|============================================================| - | "'s'" | String format. This is the default type for strings and | - | | may be omitted. | - +-----------+------------------------------------------------------------+ - | None | The same as "'s'". | - +-----------+------------------------------------------------------------+ - -The available integer presentation types are: - - +-----------+------------------------------------------------------------+ - | Type | Meaning | - |===========|============================================================| - | "'b'" | Binary format. Outputs the number in base 2. | - +-----------+------------------------------------------------------------+ - | "'c'" | Character. Converts the integer to the corresponding | - | | unicode character before printing. | - +-----------+------------------------------------------------------------+ - | "'d'" | Decimal Integer. Outputs the number in base 10. | - +-----------+------------------------------------------------------------+ - | "'o'" | Octal format. Outputs the number in base 8. | - +-----------+------------------------------------------------------------+ - | "'x'" | Hex format. Outputs the number in base 16, using lower- | - | | case letters for the digits above 9. | - +-----------+------------------------------------------------------------+ - | "'X'" | Hex format. Outputs the number in base 16, using upper- | - | | case letters for the digits above 9. In case "'#'" is | - | | specified, the prefix "'0x'" will be upper-cased to "'0X'" | - | | as well. | - +-----------+------------------------------------------------------------+ - | "'n'" | Number. This is the same as "'d'", except that it uses the | - | | current locale setting to insert the appropriate digit | - | | group separators. | - +-----------+------------------------------------------------------------+ - | None | The same as "'d'". | - +-----------+------------------------------------------------------------+ - -In addition to the above presentation types, integers can be formatted -with the floating-point presentation types listed below (except "'n'" -and "None"). When doing so, "float()" is used to convert the integer -to a floating-point number before formatting. - -The available presentation types for "float" and "Decimal" values are: - - +-----------+------------------------------------------------------------+ - | Type | Meaning | - |===========|============================================================| - | "'e'" | Scientific notation. For a given precision "p", formats | - | | the number in scientific notation with the letter ‘e’ | - | | separating the coefficient from the exponent. The | - | | coefficient has one digit before and "p" digits after the | - | | decimal point, for a total of "p + 1" significant digits. | - | | With no precision given, uses a precision of "6" digits | - | | after the decimal point for "float", and shows all | - | | coefficient digits for "Decimal". If "p=0", the decimal | - | | point is omitted unless the "#" option is used. For | - | | "float", the exponent always contains at least two digits, | - | | and is zero if the value is zero. | - +-----------+------------------------------------------------------------+ - | "'E'" | Scientific notation. Same as "'e'" except it uses an upper | - | | case ‘E’ as the separator character. | - +-----------+------------------------------------------------------------+ - | "'f'" | Fixed-point notation. For a given precision "p", formats | - | | the number as a decimal number with exactly "p" digits | - | | following the decimal point. With no precision given, uses | - | | a precision of "6" digits after the decimal point for | - | | "float", and uses a precision large enough to show all | - | | coefficient digits for "Decimal". If "p=0", the decimal | - | | point is omitted unless the "#" option is used. | - +-----------+------------------------------------------------------------+ - | "'F'" | Fixed-point notation. Same as "'f'", but converts "nan" to | - | | "NAN" and "inf" to "INF". | - +-----------+------------------------------------------------------------+ - | "'g'" | General format. For a given precision "p >= 1", this | - | | rounds the number to "p" significant digits and then | - | | formats the result in either fixed-point format or in | - | | scientific notation, depending on its magnitude. A | - | | precision of "0" is treated as equivalent to a precision | - | | of "1". The precise rules are as follows: suppose that | - | | the result formatted with presentation type "'e'" and | - | | precision "p-1" would have exponent "exp". Then, if "m <= | - | | exp < p", where "m" is -4 for floats and -6 for | - | | "Decimals", the number is formatted with presentation type | - | | "'f'" and precision "p-1-exp". Otherwise, the number is | - | | formatted with presentation type "'e'" and precision | - | | "p-1". In both cases insignificant trailing zeros are | - | | removed from the significand, and the decimal point is | - | | also removed if there are no remaining digits following | - | | it, unless the "'#'" option is used. With no precision | - | | given, uses a precision of "6" significant digits for | - | | "float". For "Decimal", the coefficient of the result is | - | | formed from the coefficient digits of the value; | - | | scientific notation is used for values smaller than "1e-6" | - | | in absolute value and values where the place value of the | - | | least significant digit is larger than 1, and fixed-point | - | | notation is used otherwise. Positive and negative | - | | infinity, positive and negative zero, and nans, are | - | | formatted as "inf", "-inf", "0", "-0" and "nan" | - | | respectively, regardless of the precision. | - +-----------+------------------------------------------------------------+ - | "'G'" | General format. Same as "'g'" except switches to "'E'" if | - | | the number gets too large. The representations of infinity | - | | and NaN are uppercased, too. | - +-----------+------------------------------------------------------------+ - | "'n'" | Number. This is the same as "'g'", except that it uses the | - | | current locale setting to insert the appropriate digit | - | | group separators for the integral part of a number. | - +-----------+------------------------------------------------------------+ - | "'%'" | Percentage. Multiplies the number by 100 and displays in | - | | fixed ("'f'") format, followed by a percent sign. | - +-----------+------------------------------------------------------------+ - | None | For "float" this is like the "'g'" type, except that when | - | | fixed- point notation is used to format the result, it | - | | always includes at least one digit past the decimal point, | - | | and switches to the scientific notation when "exp >= p - | - | | 1". When the precision is not specified, the latter will | - | | be as large as needed to represent the given value | - | | faithfully. For "Decimal", this is the same as either | - | | "'g'" or "'G'" depending on the value of | - | | "context.capitals" for the current decimal context. The | - | | overall effect is to match the output of "str()" as | - | | altered by the other format modifiers. | - +-----------+------------------------------------------------------------+ - -The result should be correctly rounded to a given precision "p" of -digits after the decimal point. The rounding mode for "float" matches -that of the "round()" builtin. For "Decimal", the rounding mode of -the current context will be used. - -The available presentation types for "complex" are the same as those -for "float" ("'%'" is not allowed). Both the real and imaginary -components of a complex number are formatted as floating-point -numbers, according to the specified presentation type. They are -separated by the mandatory sign of the imaginary part, the latter -being terminated by a "j" suffix. If the presentation type is -missing, the result will match the output of "str()" (complex numbers -with a non-zero real part are also surrounded by parentheses), -possibly altered by other format modifiers. - - -Format examples -=============== - -This section contains examples of the "str.format()" syntax and -comparison with the old "%"-formatting. - -In most of the cases the syntax is similar to the old "%"-formatting, -with the addition of the "{}" and with ":" used instead of "%". For -example, "'%03.2f'" can be translated to "'{:03.2f}'". - -The new format syntax also supports new and different options, shown -in the following examples. - -Accessing arguments by position: - - >>> '{0}, {1}, {2}'.format('a', 'b', 'c') - 'a, b, c' - >>> '{}, {}, {}'.format('a', 'b', 'c') # 3.1+ only - 'a, b, c' - >>> '{2}, {1}, {0}'.format('a', 'b', 'c') - 'c, b, a' - >>> '{2}, {1}, {0}'.format(*'abc') # unpacking argument sequence - 'c, b, a' - >>> '{0}{1}{0}'.format('abra', 'cad') # arguments' indices can be repeated - 'abracadabra' - -Accessing arguments by name: - - >>> 'Coordinates: {latitude}, {longitude}'.format(latitude='37.24N', longitude='-115.81W') - 'Coordinates: 37.24N, -115.81W' - >>> coord = {'latitude': '37.24N', 'longitude': '-115.81W'} - >>> 'Coordinates: {latitude}, {longitude}'.format(**coord) - 'Coordinates: 37.24N, -115.81W' - -Accessing arguments’ attributes: - - >>> c = 3-5j - >>> ('The complex number {0} is formed from the real part {0.real} ' - ... 'and the imaginary part {0.imag}.').format(c) - 'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.' - >>> class Point: - ... def __init__(self, x, y): - ... self.x, self.y = x, y - ... def __str__(self): - ... return 'Point({self.x}, {self.y})'.format(self=self) - ... - >>> str(Point(4, 2)) - 'Point(4, 2)' - -Accessing arguments’ items: - - >>> coord = (3, 5) - >>> 'X: {0[0]}; Y: {0[1]}'.format(coord) - 'X: 3; Y: 5' - -Replacing "%s" and "%r": - - >>> "repr() shows quotes: {!r}; str() doesn't: {!s}".format('test1', 'test2') - "repr() shows quotes: 'test1'; str() doesn't: test2" - -Aligning the text and specifying a width: - - >>> '{:<30}'.format('left aligned') - 'left aligned ' - >>> '{:>30}'.format('right aligned') - ' right aligned' - >>> '{:^30}'.format('centered') - ' centered ' - >>> '{:*^30}'.format('centered') # use '*' as a fill char - '***********centered***********' - -Replacing "%+f", "%-f", and "% f" and specifying a sign: - - >>> '{:+f}; {:+f}'.format(3.14, -3.14) # show it always - '+3.140000; -3.140000' - >>> '{: f}; {: f}'.format(3.14, -3.14) # show a space for positive numbers - ' 3.140000; -3.140000' - >>> '{:-f}; {:-f}'.format(3.14, -3.14) # show only the minus -- same as '{:f}; {:f}' - '3.140000; -3.140000' - -Replacing "%x" and "%o" and converting the value to different bases: - - >>> # format also supports binary numbers - >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42) - 'int: 42; hex: 2a; oct: 52; bin: 101010' - >>> # with 0x, 0o, or 0b as prefix: - >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42) - 'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010' - -Using the comma or the underscore as a digit group separator: - - >>> '{:,}'.format(1234567890) - '1,234,567,890' - >>> '{:_}'.format(1234567890) - '1_234_567_890' - >>> '{:_b}'.format(1234567890) - '100_1001_1001_0110_0000_0010_1101_0010' - >>> '{:_x}'.format(1234567890) - '4996_02d2' - -Expressing a percentage: - - >>> points = 19 - >>> total = 22 - >>> 'Correct answers: {:.2%}'.format(points/total) - 'Correct answers: 86.36%' - -Using type-specific formatting: - - >>> import datetime as dt - >>> d = dt.datetime(2010, 7, 4, 12, 15, 58) - >>> '{:%Y-%m-%d %H:%M:%S}'.format(d) - '2010-07-04 12:15:58' - -Nesting arguments and more complex examples: - - >>> for align, text in zip('<^>', ['left', 'center', 'right']): - ... '{0:{fill}{align}16}'.format(text, fill=align, align=align) - ... - 'left<<<<<<<<<<<<' - '^^^^^center^^^^^' - '>>>>>>>>>>>right' - >>> - >>> octets = [192, 168, 0, 1] - >>> '{:02X}{:02X}{:02X}{:02X}'.format(*octets) - 'C0A80001' - >>> int(_, 16) - 3232235521 - >>> - >>> width = 5 - >>> for num in range(5,12): - ... for base in 'dXob': - ... print('{0:{width}{base}}'.format(num, base=base, width=width), end=' ') - ... print() - ... - 5 5 5 101 - 6 6 6 110 - 7 7 7 111 - 8 8 10 1000 - 9 9 11 1001 - 10 A 12 1010 - 11 B 13 1011 -''', - 'function': r'''Function definitions -******************** - -A function definition defines a user-defined function object (see -section The standard type hierarchy): - - funcdef ::= [decorators] "def" funcname [type_params] "(" [parameter_list] ")" - ["->" expression] ":" suite - decorators ::= decorator+ - decorator ::= "@" assignment_expression NEWLINE - parameter_list ::= defparameter ("," defparameter)* "," "/" ["," [parameter_list_no_posonly]] - | parameter_list_no_posonly - parameter_list_no_posonly ::= defparameter ("," defparameter)* ["," [parameter_list_starargs]] - | parameter_list_starargs - parameter_list_starargs ::= "*" [star_parameter] ("," defparameter)* ["," [parameter_star_kwargs]] - | "*" ("," defparameter)+ ["," [parameter_star_kwargs]] - | parameter_star_kwargs - parameter_star_kwargs ::= "**" parameter [","] - parameter ::= identifier [":" expression] - star_parameter ::= identifier [":" ["*"] expression] - defparameter ::= parameter ["=" expression] - funcname ::= identifier - -A function definition is an executable statement. Its execution binds -the function name in the current local namespace to a function object -(a wrapper around the executable code for the function). This -function object contains a reference to the current global namespace -as the global namespace to be used when the function is called. - -The function definition does not execute the function body; this gets -executed only when the function is called. [4] - -A function definition may be wrapped by one or more *decorator* -expressions. Decorator expressions are evaluated when the function is -defined, in the scope that contains the function definition. The -result must be a callable, which is invoked with the function object -as the only argument. The returned value is bound to the function name -instead of the function object. Multiple decorators are applied in -nested fashion. For example, the following code - - @f1(arg) - @f2 - def func(): pass - -is roughly equivalent to - - def func(): pass - func = f1(arg)(f2(func)) - -except that the original function is not temporarily bound to the name -"func". - -Changed in version 3.9: Functions may be decorated with any valid -"assignment_expression". Previously, the grammar was much more -restrictive; see **PEP 614** for details. - -A list of type parameters may be given in square brackets between the -function’s name and the opening parenthesis for its parameter list. -This indicates to static type checkers that the function is generic. -At runtime, the type parameters can be retrieved from the function’s -"__type_params__" attribute. See Generic functions for more. - -Changed in version 3.12: Type parameter lists are new in Python 3.12. - -When one or more *parameters* have the form *parameter* "=" -*expression*, the function is said to have “default parameter values.” -For a parameter with a default value, the corresponding *argument* may -be omitted from a call, in which case the parameter’s default value is -substituted. If a parameter has a default value, all following -parameters up until the “"*"” must also have a default value — this is -a syntactic restriction that is not expressed by the grammar. - -**Default parameter values are evaluated from left to right when the -function definition is executed.** This means that the expression is -evaluated once, when the function is defined, and that the same “pre- -computed” value is used for each call. This is especially important -to understand when a default parameter value is a mutable object, such -as a list or a dictionary: if the function modifies the object (e.g. -by appending an item to a list), the default parameter value is in -effect modified. This is generally not what was intended. A way -around this is to use "None" as the default, and explicitly test for -it in the body of the function, e.g.: - - def whats_on_the_telly(penguin=None): - if penguin is None: - penguin = [] - penguin.append("property of the zoo") - return penguin - -Function call semantics are described in more detail in section Calls. -A function call always assigns values to all parameters mentioned in -the parameter list, either from positional arguments, from keyword -arguments, or from default values. If the form “"*identifier"” is -present, it is initialized to a tuple receiving any excess positional -parameters, defaulting to the empty tuple. If the form -“"**identifier"” is present, it is initialized to a new ordered -mapping receiving any excess keyword arguments, defaulting to a new -empty mapping of the same type. Parameters after “"*"” or -“"*identifier"” are keyword-only parameters and may only be passed by -keyword arguments. Parameters before “"/"” are positional-only -parameters and may only be passed by positional arguments. - -Changed in version 3.8: The "/" function parameter syntax may be used -to indicate positional-only parameters. See **PEP 570** for details. - -Parameters may have an *annotation* of the form “": expression"” -following the parameter name. Any parameter may have an annotation, -even those of the form "*identifier" or "**identifier". (As a special -case, parameters of the form "*identifier" may have an annotation “": -*expression"”.) Functions may have “return” annotation of the form -“"-> expression"” after the parameter list. These annotations can be -any valid Python expression. The presence of annotations does not -change the semantics of a function. The annotation values are -available as values of a dictionary keyed by the parameters’ names in -the "__annotations__" attribute of the function object. If the -"annotations" import from "__future__" is used, annotations are -preserved as strings at runtime which enables postponed evaluation. -Otherwise, they are evaluated when the function definition is -executed. In this case annotations may be evaluated in a different -order than they appear in the source code. - -Changed in version 3.11: Parameters of the form “"*identifier"” may -have an annotation “": *expression"”. See **PEP 646**. - -It is also possible to create anonymous functions (functions not bound -to a name), for immediate use in expressions. This uses lambda -expressions, described in section Lambdas. Note that the lambda -expression is merely a shorthand for a simplified function definition; -a function defined in a “"def"” statement can be passed around or -assigned to another name just like a function defined by a lambda -expression. The “"def"” form is actually more powerful since it -allows the execution of multiple statements and annotations. - -**Programmer’s note:** Functions are first-class objects. A “"def"” -statement executed inside a function definition defines a local -function that can be returned or passed around. Free variables used -in the nested function can access the local variables of the function -containing the def. See section Naming and binding for details. - -See also: - - **PEP 3107** - Function Annotations - The original specification for function annotations. - - **PEP 484** - Type Hints - Definition of a standard meaning for annotations: type hints. - - **PEP 526** - Syntax for Variable Annotations - Ability to type hint variable declarations, including class - variables and instance variables. - - **PEP 563** - Postponed Evaluation of Annotations - Support for forward references within annotations by preserving - annotations in a string form at runtime instead of eager - evaluation. - - **PEP 318** - Decorators for Functions and Methods - Function and method decorators were introduced. Class decorators - were introduced in **PEP 3129**. -''', - 'global': r'''The "global" statement -********************** - - global_stmt ::= "global" identifier ("," identifier)* - -The "global" statement causes the listed identifiers to be interpreted -as globals. It would be impossible to assign to a global variable -without "global", although free variables may refer to globals without -being declared global. - -The "global" statement applies to the entire current scope (module, -function body or class definition). A "SyntaxError" is raised if a -variable is used or assigned to prior to its global declaration in the -scope. - -At the module level, all variables are global, so a "global" statement -has no effect. However, variables must still not be used or assigned -to prior to their "global" declaration. This requirement is relaxed in -the interactive prompt (*REPL*). - -**Programmer’s note:** "global" is a directive to the parser. It -applies only to code parsed at the same time as the "global" -statement. In particular, a "global" statement contained in a string -or code object supplied to the built-in "exec()" function does not -affect the code block *containing* the function call, and code -contained in such a string is unaffected by "global" statements in the -code containing the function call. The same applies to the "eval()" -and "compile()" functions. -''', - 'id-classes': r'''Reserved classes of identifiers -******************************* - -Certain classes of identifiers (besides keywords) have special -meanings. These classes are identified by the patterns of leading and -trailing underscore characters: - -"_*" - Not imported by "from module import *". - -"_" - In a "case" pattern within a "match" statement, "_" is a soft - keyword that denotes a wildcard. - - Separately, the interactive interpreter makes the result of the - last evaluation available in the variable "_". (It is stored in the - "builtins" module, alongside built-in functions like "print".) - - Elsewhere, "_" is a regular identifier. It is often used to name - “special” items, but it is not special to Python itself. - - Note: - - The name "_" is often used in conjunction with - internationalization; refer to the documentation for the - "gettext" module for more information on this convention.It is - also commonly used for unused variables. - -"__*__" - System-defined names, informally known as “dunder” names. These - names are defined by the interpreter and its implementation - (including the standard library). Current system names are - discussed in the Special method names section and elsewhere. More - will likely be defined in future versions of Python. *Any* use of - "__*__" names, in any context, that does not follow explicitly - documented use, is subject to breakage without warning. - -"__*" - Class-private names. Names in this category, when used within the - context of a class definition, are re-written to use a mangled form - to help avoid name clashes between “private” attributes of base and - derived classes. See section Identifiers (Names). -''', - 'identifiers': r'''Identifiers and keywords -************************ - -Identifiers (also referred to as *names*) are described by the -following lexical definitions. - -The syntax of identifiers in Python is based on the Unicode standard -annex UAX-31, with elaboration and changes as defined below; see also -**PEP 3131** for further details. - -Within the ASCII range (U+0001..U+007F), the valid characters for -identifiers include the uppercase and lowercase letters "A" through -"Z", the underscore "_" and, except for the first character, the -digits "0" through "9". Python 3.0 introduced additional characters -from outside the ASCII range (see **PEP 3131**). For these -characters, the classification uses the version of the Unicode -Character Database as included in the "unicodedata" module. - -Identifiers are unlimited in length. Case is significant. - - identifier ::= xid_start xid_continue* - id_start ::= - id_continue ::= - xid_start ::= - xid_continue ::= - -The Unicode category codes mentioned above stand for: - -* *Lu* - uppercase letters - -* *Ll* - lowercase letters - -* *Lt* - titlecase letters - -* *Lm* - modifier letters - -* *Lo* - other letters - -* *Nl* - letter numbers - -* *Mn* - nonspacing marks - -* *Mc* - spacing combining marks - -* *Nd* - decimal numbers - -* *Pc* - connector punctuations - -* *Other_ID_Start* - explicit list of characters in PropList.txt to - support backwards compatibility - -* *Other_ID_Continue* - likewise - -All identifiers are converted into the normal form NFKC while parsing; -comparison of identifiers is based on NFKC. - -A non-normative HTML file listing all valid identifier characters for -Unicode 15.1.0 can be found at -https://www.unicode.org/Public/15.1.0/ucd/DerivedCoreProperties.txt - - -Keywords -======== - -The following identifiers are used as reserved words, or *keywords* of -the language, and cannot be used as ordinary identifiers. They must -be spelled exactly as written here: - - False await else import pass - None break except in raise - True class finally is return - and continue for lambda try - as def from nonlocal while - assert del global not with - async elif if or yield - - -Soft Keywords -============= - -Added in version 3.10. - -Some identifiers are only reserved under specific contexts. These are -known as *soft keywords*. The identifiers "match", "case", "type" and -"_" can syntactically act as keywords in certain contexts, but this -distinction is done at the parser level, not when tokenizing. - -As soft keywords, their use in the grammar is possible while still -preserving compatibility with existing code that uses these names as -identifier names. - -"match", "case", and "_" are used in the "match" statement. "type" is -used in the "type" statement. - -Changed in version 3.12: "type" is now a soft keyword. - - -Reserved classes of identifiers -=============================== - -Certain classes of identifiers (besides keywords) have special -meanings. These classes are identified by the patterns of leading and -trailing underscore characters: - -"_*" - Not imported by "from module import *". - -"_" - In a "case" pattern within a "match" statement, "_" is a soft - keyword that denotes a wildcard. - - Separately, the interactive interpreter makes the result of the - last evaluation available in the variable "_". (It is stored in the - "builtins" module, alongside built-in functions like "print".) - - Elsewhere, "_" is a regular identifier. It is often used to name - “special” items, but it is not special to Python itself. - - Note: - - The name "_" is often used in conjunction with - internationalization; refer to the documentation for the - "gettext" module for more information on this convention.It is - also commonly used for unused variables. - -"__*__" - System-defined names, informally known as “dunder” names. These - names are defined by the interpreter and its implementation - (including the standard library). Current system names are - discussed in the Special method names section and elsewhere. More - will likely be defined in future versions of Python. *Any* use of - "__*__" names, in any context, that does not follow explicitly - documented use, is subject to breakage without warning. - -"__*" - Class-private names. Names in this category, when used within the - context of a class definition, are re-written to use a mangled form - to help avoid name clashes between “private” attributes of base and - derived classes. See section Identifiers (Names). -''', - 'if': r'''The "if" statement -****************** - -The "if" statement is used for conditional execution: - - if_stmt ::= "if" assignment_expression ":" suite - ("elif" assignment_expression ":" suite)* - ["else" ":" suite] - -It selects exactly one of the suites by evaluating the expressions one -by one until one is found to be true (see section Boolean operations -for the definition of true and false); then that suite is executed -(and no other part of the "if" statement is executed or evaluated). -If all expressions are false, the suite of the "else" clause, if -present, is executed. -''', - 'imaginary': r'''Imaginary literals -****************** - -Imaginary literals are described by the following lexical definitions: - - imagnumber ::= (floatnumber | digitpart) ("j" | "J") - -An imaginary literal yields a complex number with a real part of 0.0. -Complex numbers are represented as a pair of floating-point numbers -and have the same restrictions on their range. To create a complex -number with a nonzero real part, add a floating-point number to it, -e.g., "(3+4j)". Some examples of imaginary literals: - - 3.14j 10.j 10j .001j 1e100j 3.14e-10j 3.14_15_93j -''', - 'import': r'''The "import" statement -********************** - - import_stmt ::= "import" module ["as" identifier] ("," module ["as" identifier])* - | "from" relative_module "import" identifier ["as" identifier] - ("," identifier ["as" identifier])* - | "from" relative_module "import" "(" identifier ["as" identifier] - ("," identifier ["as" identifier])* [","] ")" - | "from" relative_module "import" "*" - module ::= (identifier ".")* identifier - relative_module ::= "."* module | "."+ - -The basic import statement (no "from" clause) is executed in two -steps: - -1. find a module, loading and initializing it if necessary - -2. define a name or names in the local namespace for the scope where - the "import" statement occurs. - -When the statement contains multiple clauses (separated by commas) the -two steps are carried out separately for each clause, just as though -the clauses had been separated out into individual import statements. - -The details of the first step, finding and loading modules, are -described in greater detail in the section on the import system, which -also describes the various types of packages and modules that can be -imported, as well as all the hooks that can be used to customize the -import system. Note that failures in this step may indicate either -that the module could not be located, *or* that an error occurred -while initializing the module, which includes execution of the -module’s code. - -If the requested module is retrieved successfully, it will be made -available in the local namespace in one of three ways: - -* If the module name is followed by "as", then the name following "as" - is bound directly to the imported module. - -* If no other name is specified, and the module being imported is a - top level module, the module’s name is bound in the local namespace - as a reference to the imported module - -* If the module being imported is *not* a top level module, then the - name of the top level package that contains the module is bound in - the local namespace as a reference to the top level package. The - imported module must be accessed using its full qualified name - rather than directly - -The "from" form uses a slightly more complex process: - -1. find the module specified in the "from" clause, loading and - initializing it if necessary; - -2. for each of the identifiers specified in the "import" clauses: - - 1. check if the imported module has an attribute by that name - - 2. if not, attempt to import a submodule with that name and then - check the imported module again for that attribute - - 3. if the attribute is not found, "ImportError" is raised. - - 4. otherwise, a reference to that value is stored in the local - namespace, using the name in the "as" clause if it is present, - otherwise using the attribute name - -Examples: - - import foo # foo imported and bound locally - import foo.bar.baz # foo, foo.bar, and foo.bar.baz imported, foo bound locally - import foo.bar.baz as fbb # foo, foo.bar, and foo.bar.baz imported, foo.bar.baz bound as fbb - from foo.bar import baz # foo, foo.bar, and foo.bar.baz imported, foo.bar.baz bound as baz - from foo import attr # foo imported and foo.attr bound as attr - -If the list of identifiers is replaced by a star ("'*'"), all public -names defined in the module are bound in the local namespace for the -scope where the "import" statement occurs. - -The *public names* defined by a module are determined by checking the -module’s namespace for a variable named "__all__"; if defined, it must -be a sequence of strings which are names defined or imported by that -module. Names containing non-ASCII characters must be in the -normalization form NFKC. The names given in "__all__" are all -considered public and are required to exist. If "__all__" is not -defined, the set of public names includes all names found in the -module’s namespace which do not begin with an underscore character -("'_'"). "__all__" should contain the entire public API. It is -intended to avoid accidentally exporting items that are not part of -the API (such as library modules which were imported and used within -the module). - -The wild card form of import — "from module import *" — is only -allowed at the module level. Attempting to use it in class or -function definitions will raise a "SyntaxError". - -When specifying what module to import you do not have to specify the -absolute name of the module. When a module or package is contained -within another package it is possible to make a relative import within -the same top package without having to mention the package name. By -using leading dots in the specified module or package after "from" you -can specify how high to traverse up the current package hierarchy -without specifying exact names. One leading dot means the current -package where the module making the import exists. Two dots means up -one package level. Three dots is up two levels, etc. So if you execute -"from . import mod" from a module in the "pkg" package then you will -end up importing "pkg.mod". If you execute "from ..subpkg2 import mod" -from within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The -specification for relative imports is contained in the Package -Relative Imports section. - -"importlib.import_module()" is provided to support applications that -determine dynamically the modules to be loaded. - -Raises an auditing event "import" with arguments "module", "filename", -"sys.path", "sys.meta_path", "sys.path_hooks". - - -Future statements -================= - -A *future statement* is a directive to the compiler that a particular -module should be compiled using syntax or semantics that will be -available in a specified future release of Python where the feature -becomes standard. - -The future statement is intended to ease migration to future versions -of Python that introduce incompatible changes to the language. It -allows use of the new features on a per-module basis before the -release in which the feature becomes standard. - - future_stmt ::= "from" "__future__" "import" feature ["as" identifier] - ("," feature ["as" identifier])* - | "from" "__future__" "import" "(" feature ["as" identifier] - ("," feature ["as" identifier])* [","] ")" - feature ::= identifier - -A future statement must appear near the top of the module. The only -lines that can appear before a future statement are: - -* the module docstring (if any), - -* comments, - -* blank lines, and - -* other future statements. - -The only feature that requires using the future statement is -"annotations" (see **PEP 563**). - -All historical features enabled by the future statement are still -recognized by Python 3. The list includes "absolute_import", -"division", "generators", "generator_stop", "unicode_literals", -"print_function", "nested_scopes" and "with_statement". They are all -redundant because they are always enabled, and only kept for backwards -compatibility. - -A future statement is recognized and treated specially at compile -time: Changes to the semantics of core constructs are often -implemented by generating different code. It may even be the case -that a new feature introduces new incompatible syntax (such as a new -reserved word), in which case the compiler may need to parse the -module differently. Such decisions cannot be pushed off until -runtime. - -For any given release, the compiler knows which feature names have -been defined, and raises a compile-time error if a future statement -contains a feature not known to it. - -The direct runtime semantics are the same as for any import statement: -there is a standard module "__future__", described later, and it will -be imported in the usual way at the time the future statement is -executed. - -The interesting runtime semantics depend on the specific feature -enabled by the future statement. - -Note that there is nothing special about the statement: - - import __future__ [as name] - -That is not a future statement; it’s an ordinary import statement with -no special semantics or syntax restrictions. - -Code compiled by calls to the built-in functions "exec()" and -"compile()" that occur in a module "M" containing a future statement -will, by default, use the new syntax or semantics associated with the -future statement. This can be controlled by optional arguments to -"compile()" — see the documentation of that function for details. - -A future statement typed at an interactive interpreter prompt will -take effect for the rest of the interpreter session. If an -interpreter is started with the "-i" option, is passed a script name -to execute, and the script includes a future statement, it will be in -effect in the interactive session started after the script is -executed. - -See also: - - **PEP 236** - Back to the __future__ - The original proposal for the __future__ mechanism. -''', - 'in': r'''Membership test operations -************************** - -The operators "in" and "not in" test for membership. "x in s" -evaluates to "True" if *x* is a member of *s*, and "False" otherwise. -"x not in s" returns the negation of "x in s". All built-in sequences -and set types support this as well as dictionary, for which "in" tests -whether the dictionary has a given key. For container types such as -list, tuple, set, frozenset, dict, or collections.deque, the -expression "x in y" is equivalent to "any(x is e or x == e for e in -y)". - -For the string and bytes types, "x in y" is "True" if and only if *x* -is a substring of *y*. An equivalent test is "y.find(x) != -1". -Empty strings are always considered to be a substring of any other -string, so """ in "abc"" will return "True". - -For user-defined classes which define the "__contains__()" method, "x -in y" returns "True" if "y.__contains__(x)" returns a true value, and -"False" otherwise. - -For user-defined classes which do not define "__contains__()" but do -define "__iter__()", "x in y" is "True" if some value "z", for which -the expression "x is z or x == z" is true, is produced while iterating -over "y". If an exception is raised during the iteration, it is as if -"in" raised that exception. - -Lastly, the old-style iteration protocol is tried: if a class defines -"__getitem__()", "x in y" is "True" if and only if there is a non- -negative integer index *i* such that "x is y[i] or x == y[i]", and no -lower integer index raises the "IndexError" exception. (If any other -exception is raised, it is as if "in" raised that exception). - -The operator "not in" is defined to have the inverse truth value of -"in". -''', - 'integers': r'''Integer literals -**************** - -Integer literals are described by the following lexical definitions: - - integer ::= decinteger | bininteger | octinteger | hexinteger - decinteger ::= nonzerodigit (["_"] digit)* | "0"+ (["_"] "0")* - bininteger ::= "0" ("b" | "B") (["_"] bindigit)+ - octinteger ::= "0" ("o" | "O") (["_"] octdigit)+ - hexinteger ::= "0" ("x" | "X") (["_"] hexdigit)+ - nonzerodigit ::= "1"..."9" - digit ::= "0"..."9" - bindigit ::= "0" | "1" - octdigit ::= "0"..."7" - hexdigit ::= digit | "a"..."f" | "A"..."F" - -There is no limit for the length of integer literals apart from what -can be stored in available memory. - -Underscores are ignored for determining the numeric value of the -literal. They can be used to group digits for enhanced readability. -One underscore can occur between digits, and after base specifiers -like "0x". - -Note that leading zeros in a non-zero decimal number are not allowed. -This is for disambiguation with C-style octal literals, which Python -used before version 3.0. - -Some examples of integer literals: - - 7 2147483647 0o177 0b100110111 - 3 79228162514264337593543950336 0o377 0xdeadbeef - 100_000_000_000 0b_1110_0101 - -Changed in version 3.6: Underscores are now allowed for grouping -purposes in literals. -''', - 'lambda': r'''Lambdas -******* - - lambda_expr ::= "lambda" [parameter_list] ":" expression - -Lambda expressions (sometimes called lambda forms) are used to create -anonymous functions. The expression "lambda parameters: expression" -yields a function object. The unnamed object behaves like a function -object defined with: - - def (parameters): - return expression - -See section Function definitions for the syntax of parameter lists. -Note that functions created with lambda expressions cannot contain -statements or annotations. -''', - 'lists': r'''List displays -************* - -A list display is a possibly empty series of expressions enclosed in -square brackets: - - list_display ::= "[" [flexible_expression_list | comprehension] "]" - -A list display yields a new list object, the contents being specified -by either a list of expressions or a comprehension. When a comma- -separated list of expressions is supplied, its elements are evaluated -from left to right and placed into the list object in that order. -When a comprehension is supplied, the list is constructed from the -elements resulting from the comprehension. -''', - 'naming': r'''Naming and binding -****************** - - -Binding of names -================ - -*Names* refer to objects. Names are introduced by name binding -operations. - -The following constructs bind names: - -* formal parameters to functions, - -* class definitions, - -* function definitions, - -* assignment expressions, - -* targets that are identifiers if occurring in an assignment: - - * "for" loop header, - - * after "as" in a "with" statement, "except" clause, "except*" - clause, or in the as-pattern in structural pattern matching, - - * in a capture pattern in structural pattern matching - -* "import" statements. - -* "type" statements. - -* type parameter lists. - -The "import" statement of the form "from ... import *" binds all names -defined in the imported module, except those beginning with an -underscore. This form may only be used at the module level. - -A target occurring in a "del" statement is also considered bound for -this purpose (though the actual semantics are to unbind the name). - -Each assignment or import statement occurs within a block defined by a -class or function definition or at the module level (the top-level -code block). - -If a name is bound in a block, it is a local variable of that block, -unless declared as "nonlocal" or "global". If a name is bound at the -module level, it is a global variable. (The variables of the module -code block are local and global.) If a variable is used in a code -block but not defined there, it is a *free variable*. - -Each occurrence of a name in the program text refers to the *binding* -of that name established by the following name resolution rules. - - -Resolution of names -=================== - -A *scope* defines the visibility of a name within a block. If a local -variable is defined in a block, its scope includes that block. If the -definition occurs in a function block, the scope extends to any blocks -contained within the defining one, unless a contained block introduces -a different binding for the name. - -When a name is used in a code block, it is resolved using the nearest -enclosing scope. The set of all such scopes visible to a code block -is called the block’s *environment*. - -When a name is not found at all, a "NameError" exception is raised. If -the current scope is a function scope, and the name refers to a local -variable that has not yet been bound to a value at the point where the -name is used, an "UnboundLocalError" exception is raised. -"UnboundLocalError" is a subclass of "NameError". - -If a name binding operation occurs anywhere within a code block, all -uses of the name within the block are treated as references to the -current block. This can lead to errors when a name is used within a -block before it is bound. This rule is subtle. Python lacks -declarations and allows name binding operations to occur anywhere -within a code block. The local variables of a code block can be -determined by scanning the entire text of the block for name binding -operations. See the FAQ entry on UnboundLocalError for examples. - -If the "global" statement occurs within a block, all uses of the names -specified in the statement refer to the bindings of those names in the -top-level namespace. Names are resolved in the top-level namespace by -searching the global namespace, i.e. the namespace of the module -containing the code block, and the builtins namespace, the namespace -of the module "builtins". The global namespace is searched first. If -the names are not found there, the builtins namespace is searched -next. If the names are also not found in the builtins namespace, new -variables are created in the global namespace. The global statement -must precede all uses of the listed names. - -The "global" statement has the same scope as a name binding operation -in the same block. If the nearest enclosing scope for a free variable -contains a global statement, the free variable is treated as a global. - -The "nonlocal" statement causes corresponding names to refer to -previously bound variables in the nearest enclosing function scope. -"SyntaxError" is raised at compile time if the given name does not -exist in any enclosing function scope. Type parameters cannot be -rebound with the "nonlocal" statement. - -The namespace for a module is automatically created the first time a -module is imported. The main module for a script is always called -"__main__". - -Class definition blocks and arguments to "exec()" and "eval()" are -special in the context of name resolution. A class definition is an -executable statement that may use and define names. These references -follow the normal rules for name resolution with an exception that -unbound local variables are looked up in the global namespace. The -namespace of the class definition becomes the attribute dictionary of -the class. The scope of names defined in a class block is limited to -the class block; it does not extend to the code blocks of methods. -This includes comprehensions and generator expressions, but it does -not include annotation scopes, which have access to their enclosing -class scopes. This means that the following will fail: - - class A: - a = 42 - b = list(a + i for i in range(10)) - -However, the following will succeed: - - class A: - type Alias = Nested - class Nested: pass - - print(A.Alias.__value__) # - - -Annotation scopes -================= - -Type parameter lists and "type" statements introduce *annotation -scopes*, which behave mostly like function scopes, but with some -exceptions discussed below. *Annotations* currently do not use -annotation scopes, but they are expected to use annotation scopes in -Python 3.13 when **PEP 649** is implemented. - -Annotation scopes are used in the following contexts: - -* Type parameter lists for generic type aliases. - -* Type parameter lists for generic functions. A generic function’s - annotations are executed within the annotation scope, but its - defaults and decorators are not. - -* Type parameter lists for generic classes. A generic class’s base - classes and keyword arguments are executed within the annotation - scope, but its decorators are not. - -* The bounds, constraints, and default values for type parameters - (lazily evaluated). - -* The value of type aliases (lazily evaluated). - -Annotation scopes differ from function scopes in the following ways: - -* Annotation scopes have access to their enclosing class namespace. If - an annotation scope is immediately within a class scope, or within - another annotation scope that is immediately within a class scope, - the code in the annotation scope can use names defined in the class - scope as if it were executed directly within the class body. This - contrasts with regular functions defined within classes, which - cannot access names defined in the class scope. - -* Expressions in annotation scopes cannot contain "yield", "yield - from", "await", or ":=" expressions. (These expressions are allowed - in other scopes contained within the annotation scope.) - -* Names defined in annotation scopes cannot be rebound with "nonlocal" - statements in inner scopes. This includes only type parameters, as - no other syntactic elements that can appear within annotation scopes - can introduce new names. - -* While annotation scopes have an internal name, that name is not - reflected in the *qualified name* of objects defined within the - scope. Instead, the "__qualname__" of such objects is as if the - object were defined in the enclosing scope. - -Added in version 3.12: Annotation scopes were introduced in Python -3.12 as part of **PEP 695**. - -Changed in version 3.13: Annotation scopes are also used for type -parameter defaults, as introduced by **PEP 696**. - - -Lazy evaluation -=============== - -The values of type aliases created through the "type" statement are -*lazily evaluated*. The same applies to the bounds, constraints, and -default values of type variables created through the type parameter -syntax. This means that they are not evaluated when the type alias or -type variable is created. Instead, they are only evaluated when doing -so is necessary to resolve an attribute access. - -Example: - - >>> type Alias = 1/0 - >>> Alias.__value__ - Traceback (most recent call last): - ... - ZeroDivisionError: division by zero - >>> def func[T: 1/0](): pass - >>> T = func.__type_params__[0] - >>> T.__bound__ - Traceback (most recent call last): - ... - ZeroDivisionError: division by zero - -Here the exception is raised only when the "__value__" attribute of -the type alias or the "__bound__" attribute of the type variable is -accessed. - -This behavior is primarily useful for references to types that have -not yet been defined when the type alias or type variable is created. -For example, lazy evaluation enables creation of mutually recursive -type aliases: - - from typing import Literal - - type SimpleExpr = int | Parenthesized - type Parenthesized = tuple[Literal["("], Expr, Literal[")"]] - type Expr = SimpleExpr | tuple[SimpleExpr, Literal["+", "-"], Expr] - -Lazily evaluated values are evaluated in annotation scope, which means -that names that appear inside the lazily evaluated value are looked up -as if they were used in the immediately enclosing scope. - -Added in version 3.12. - - -Builtins and restricted execution -================================= - -**CPython implementation detail:** Users should not touch -"__builtins__"; it is strictly an implementation detail. Users -wanting to override values in the builtins namespace should "import" -the "builtins" module and modify its attributes appropriately. - -The builtins namespace associated with the execution of a code block -is actually found by looking up the name "__builtins__" in its global -namespace; this should be a dictionary or a module (in the latter case -the module’s dictionary is used). By default, when in the "__main__" -module, "__builtins__" is the built-in module "builtins"; when in any -other module, "__builtins__" is an alias for the dictionary of the -"builtins" module itself. - - -Interaction with dynamic features -================================= - -Name resolution of free variables occurs at runtime, not at compile -time. This means that the following code will print 42: - - i = 10 - def f(): - print(i) - i = 42 - f() - -The "eval()" and "exec()" functions do not have access to the full -environment for resolving names. Names may be resolved in the local -and global namespaces of the caller. Free variables are not resolved -in the nearest enclosing namespace, but in the global namespace. [1] -The "exec()" and "eval()" functions have optional arguments to -override the global and local namespace. If only one namespace is -specified, it is used for both. -''', - 'nonlocal': r'''The "nonlocal" statement -************************ - - nonlocal_stmt ::= "nonlocal" identifier ("," identifier)* - -When the definition of a function or class is nested (enclosed) within -the definitions of other functions, its nonlocal scopes are the local -scopes of the enclosing functions. The "nonlocal" statement causes the -listed identifiers to refer to names previously bound in nonlocal -scopes. It allows encapsulated code to rebind such nonlocal -identifiers. If a name is bound in more than one nonlocal scope, the -nearest binding is used. If a name is not bound in any nonlocal scope, -or if there is no nonlocal scope, a "SyntaxError" is raised. - -The "nonlocal" statement applies to the entire scope of a function or -class body. A "SyntaxError" is raised if a variable is used or -assigned to prior to its nonlocal declaration in the scope. - -See also: - - **PEP 3104** - Access to Names in Outer Scopes - The specification for the "nonlocal" statement. - -**Programmer’s note:** "nonlocal" is a directive to the parser and -applies only to code parsed along with it. See the note for the -"global" statement. -''', - 'numbers': r'''Numeric literals -**************** - -There are three types of numeric literals: integers, floating-point -numbers, and imaginary numbers. There are no complex literals -(complex numbers can be formed by adding a real number and an -imaginary number). - -Note that numeric literals do not include a sign; a phrase like "-1" -is actually an expression composed of the unary operator ‘"-"’ and the -literal "1". -''', - 'numeric-types': r'''Emulating numeric types -*********************** - -The following methods can be defined to emulate numeric objects. -Methods corresponding to operations that are not supported by the -particular kind of number implemented (e.g., bitwise operations for -non-integral numbers) should be left undefined. - -object.__add__(self, other) -object.__sub__(self, other) -object.__mul__(self, other) -object.__matmul__(self, other) -object.__truediv__(self, other) -object.__floordiv__(self, other) -object.__mod__(self, other) -object.__divmod__(self, other) -object.__pow__(self, other[, modulo]) -object.__lshift__(self, other) -object.__rshift__(self, other) -object.__and__(self, other) -object.__xor__(self, other) -object.__or__(self, other) - - These methods are called to implement the binary arithmetic - operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", - "pow()", "**", "<<", ">>", "&", "^", "|"). For instance, to - evaluate the expression "x + y", where *x* is an instance of a - class that has an "__add__()" method, "type(x).__add__(x, y)" is - called. The "__divmod__()" method should be the equivalent to - using "__floordiv__()" and "__mod__()"; it should not be related to - "__truediv__()". Note that "__pow__()" should be defined to accept - an optional third argument if the ternary version of the built-in - "pow()" function is to be supported. - - If one of those methods does not support the operation with the - supplied arguments, it should return "NotImplemented". - -object.__radd__(self, other) -object.__rsub__(self, other) -object.__rmul__(self, other) -object.__rmatmul__(self, other) -object.__rtruediv__(self, other) -object.__rfloordiv__(self, other) -object.__rmod__(self, other) -object.__rdivmod__(self, other) -object.__rpow__(self, other[, modulo]) -object.__rlshift__(self, other) -object.__rrshift__(self, other) -object.__rand__(self, other) -object.__rxor__(self, other) -object.__ror__(self, other) - - These methods are called to implement the binary arithmetic - operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", - "pow()", "**", "<<", ">>", "&", "^", "|") with reflected (swapped) - operands. These functions are only called if the left operand does - not support the corresponding operation [3] and the operands are of - different types. [4] For instance, to evaluate the expression "x - - y", where *y* is an instance of a class that has an "__rsub__()" - method, "type(y).__rsub__(y, x)" is called if "type(x).__sub__(x, - y)" returns "NotImplemented". - - Note that ternary "pow()" will not try calling "__rpow__()" (the - coercion rules would become too complicated). - - Note: - - If the right operand’s type is a subclass of the left operand’s - type and that subclass provides a different implementation of the - reflected method for the operation, this method will be called - before the left operand’s non-reflected method. This behavior - allows subclasses to override their ancestors’ operations. - -object.__iadd__(self, other) -object.__isub__(self, other) -object.__imul__(self, other) -object.__imatmul__(self, other) -object.__itruediv__(self, other) -object.__ifloordiv__(self, other) -object.__imod__(self, other) -object.__ipow__(self, other[, modulo]) -object.__ilshift__(self, other) -object.__irshift__(self, other) -object.__iand__(self, other) -object.__ixor__(self, other) -object.__ior__(self, other) - - These methods are called to implement the augmented arithmetic - assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", "**=", - "<<=", ">>=", "&=", "^=", "|="). These methods should attempt to - do the operation in-place (modifying *self*) and return the result - (which could be, but does not have to be, *self*). If a specific - method is not defined, or if that method returns "NotImplemented", - the augmented assignment falls back to the normal methods. For - instance, if *x* is an instance of a class with an "__iadd__()" - method, "x += y" is equivalent to "x = x.__iadd__(y)" . If - "__iadd__()" does not exist, or if "x.__iadd__(y)" returns - "NotImplemented", "x.__add__(y)" and "y.__radd__(x)" are - considered, as with the evaluation of "x + y". In certain - situations, augmented assignment can result in unexpected errors - (see Why does a_tuple[i] += [‘item’] raise an exception when the - addition works?), but this behavior is in fact part of the data - model. - -object.__neg__(self) -object.__pos__(self) -object.__abs__(self) -object.__invert__(self) - - Called to implement the unary arithmetic operations ("-", "+", - "abs()" and "~"). - -object.__complex__(self) -object.__int__(self) -object.__float__(self) - - Called to implement the built-in functions "complex()", "int()" and - "float()". Should return a value of the appropriate type. - -object.__index__(self) - - Called to implement "operator.index()", and whenever Python needs - to losslessly convert the numeric object to an integer object (such - as in slicing, or in the built-in "bin()", "hex()" and "oct()" - functions). Presence of this method indicates that the numeric - object is an integer type. Must return an integer. - - If "__int__()", "__float__()" and "__complex__()" are not defined - then corresponding built-in functions "int()", "float()" and - "complex()" fall back to "__index__()". - -object.__round__(self[, ndigits]) -object.__trunc__(self) -object.__floor__(self) -object.__ceil__(self) - - Called to implement the built-in function "round()" and "math" - functions "trunc()", "floor()" and "ceil()". Unless *ndigits* is - passed to "__round__()" all these methods should return the value - of the object truncated to an "Integral" (typically an "int"). - - The built-in function "int()" falls back to "__trunc__()" if - neither "__int__()" nor "__index__()" is defined. - - Changed in version 3.11: The delegation of "int()" to "__trunc__()" - is deprecated. -''', - 'objects': r'''Objects, values and types -************************* - -*Objects* are Python’s abstraction for data. All data in a Python -program is represented by objects or by relations between objects. -Even code is represented by objects. - -Every object has an identity, a type and a value. An object’s -*identity* never changes once it has been created; you may think of it -as the object’s address in memory. The "is" operator compares the -identity of two objects; the "id()" function returns an integer -representing its identity. - -**CPython implementation detail:** For CPython, "id(x)" is the memory -address where "x" is stored. - -An object’s type determines the operations that the object supports -(e.g., “does it have a length?”) and also defines the possible values -for objects of that type. The "type()" function returns an object’s -type (which is an object itself). Like its identity, an object’s -*type* is also unchangeable. [1] - -The *value* of some objects can change. Objects whose value can -change are said to be *mutable*; objects whose value is unchangeable -once they are created are called *immutable*. (The value of an -immutable container object that contains a reference to a mutable -object can change when the latter’s value is changed; however the -container is still considered immutable, because the collection of -objects it contains cannot be changed. So, immutability is not -strictly the same as having an unchangeable value, it is more subtle.) -An object’s mutability is determined by its type; for instance, -numbers, strings and tuples are immutable, while dictionaries and -lists are mutable. - -Objects are never explicitly destroyed; however, when they become -unreachable they may be garbage-collected. An implementation is -allowed to postpone garbage collection or omit it altogether — it is a -matter of implementation quality how garbage collection is -implemented, as long as no objects are collected that are still -reachable. - -**CPython implementation detail:** CPython currently uses a reference- -counting scheme with (optional) delayed detection of cyclically linked -garbage, which collects most objects as soon as they become -unreachable, but is not guaranteed to collect garbage containing -circular references. See the documentation of the "gc" module for -information on controlling the collection of cyclic garbage. Other -implementations act differently and CPython may change. Do not depend -on immediate finalization of objects when they become unreachable (so -you should always close files explicitly). - -Note that the use of the implementation’s tracing or debugging -facilities may keep objects alive that would normally be collectable. -Also note that catching an exception with a "try"…"except" statement -may keep objects alive. - -Some objects contain references to “external” resources such as open -files or windows. It is understood that these resources are freed -when the object is garbage-collected, but since garbage collection is -not guaranteed to happen, such objects also provide an explicit way to -release the external resource, usually a "close()" method. Programs -are strongly recommended to explicitly close such objects. The -"try"…"finally" statement and the "with" statement provide convenient -ways to do this. - -Some objects contain references to other objects; these are called -*containers*. Examples of containers are tuples, lists and -dictionaries. The references are part of a container’s value. In -most cases, when we talk about the value of a container, we imply the -values, not the identities of the contained objects; however, when we -talk about the mutability of a container, only the identities of the -immediately contained objects are implied. So, if an immutable -container (like a tuple) contains a reference to a mutable object, its -value changes if that mutable object is changed. - -Types affect almost all aspects of object behavior. Even the -importance of object identity is affected in some sense: for immutable -types, operations that compute new values may actually return a -reference to any existing object with the same type and value, while -for mutable objects this is not allowed. For example, after "a = 1; b -= 1", *a* and *b* may or may not refer to the same object with the -value one, depending on the implementation. This is because "int" is -an immutable type, so the reference to "1" can be reused. This -behaviour depends on the implementation used, so should not be relied -upon, but is something to be aware of when making use of object -identity tests. However, after "c = []; d = []", *c* and *d* are -guaranteed to refer to two different, unique, newly created empty -lists. (Note that "e = f = []" assigns the *same* object to both *e* -and *f*.) -''', - 'operator-summary': r'''Operator precedence -******************* - -The following table summarizes the operator precedence in Python, from -highest precedence (most binding) to lowest precedence (least -binding). Operators in the same box have the same precedence. Unless -the syntax is explicitly given, operators are binary. Operators in -the same box group left to right (except for exponentiation and -conditional expressions, which group from right to left). - -Note that comparisons, membership tests, and identity tests, all have -the same precedence and have a left-to-right chaining feature as -described in the Comparisons section. - -+-------------------------------------------------+---------------------------------------+ -| Operator | Description | -|=================================================|=======================================| -| "(expressions...)", "[expressions...]", "{key: | Binding or parenthesized expression, | -| value...}", "{expressions...}" | list display, dictionary display, set | -| | display | -+-------------------------------------------------+---------------------------------------+ -| "x[index]", "x[index:index]", | Subscription, slicing, call, | -| "x(arguments...)", "x.attribute" | attribute reference | -+-------------------------------------------------+---------------------------------------+ -| "await x" | Await expression | -+-------------------------------------------------+---------------------------------------+ -| "**" | Exponentiation [5] | -+-------------------------------------------------+---------------------------------------+ -| "+x", "-x", "~x" | Positive, negative, bitwise NOT | -+-------------------------------------------------+---------------------------------------+ -| "*", "@", "/", "//", "%" | Multiplication, matrix | -| | multiplication, division, floor | -| | division, remainder [6] | -+-------------------------------------------------+---------------------------------------+ -| "+", "-" | Addition and subtraction | -+-------------------------------------------------+---------------------------------------+ -| "<<", ">>" | Shifts | -+-------------------------------------------------+---------------------------------------+ -| "&" | Bitwise AND | -+-------------------------------------------------+---------------------------------------+ -| "^" | Bitwise XOR | -+-------------------------------------------------+---------------------------------------+ -| "|" | Bitwise OR | -+-------------------------------------------------+---------------------------------------+ -| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership | -| ">=", "!=", "==" | tests and identity tests | -+-------------------------------------------------+---------------------------------------+ -| "not x" | Boolean NOT | -+-------------------------------------------------+---------------------------------------+ -| "and" | Boolean AND | -+-------------------------------------------------+---------------------------------------+ -| "or" | Boolean OR | -+-------------------------------------------------+---------------------------------------+ -| "if" – "else" | Conditional expression | -+-------------------------------------------------+---------------------------------------+ -| "lambda" | Lambda expression | -+-------------------------------------------------+---------------------------------------+ -| ":=" | Assignment expression | -+-------------------------------------------------+---------------------------------------+ - --[ Footnotes ]- - -[1] While "abs(x%y) < abs(y)" is true mathematically, for floats it - may not be true numerically due to roundoff. For example, and - assuming a platform on which a Python float is an IEEE 754 double- - precision number, in order that "-1e-100 % 1e100" have the same - sign as "1e100", the computed result is "-1e-100 + 1e100", which - is numerically exactly equal to "1e100". The function - "math.fmod()" returns a result whose sign matches the sign of the - first argument instead, and so returns "-1e-100" in this case. - Which approach is more appropriate depends on the application. - -[2] If x is very close to an exact integer multiple of y, it’s - possible for "x//y" to be one larger than "(x-x%y)//y" due to - rounding. In such cases, Python returns the latter result, in - order to preserve that "divmod(x,y)[0] * y + x % y" be very close - to "x". - -[3] The Unicode standard distinguishes between *code points* (e.g. - U+0041) and *abstract characters* (e.g. “LATIN CAPITAL LETTER A”). - While most abstract characters in Unicode are only represented - using one code point, there is a number of abstract characters - that can in addition be represented using a sequence of more than - one code point. For example, the abstract character “LATIN - CAPITAL LETTER C WITH CEDILLA” can be represented as a single - *precomposed character* at code position U+00C7, or as a sequence - of a *base character* at code position U+0043 (LATIN CAPITAL - LETTER C), followed by a *combining character* at code position - U+0327 (COMBINING CEDILLA). - - The comparison operators on strings compare at the level of - Unicode code points. This may be counter-intuitive to humans. For - example, ""\u00C7" == "\u0043\u0327"" is "False", even though both - strings represent the same abstract character “LATIN CAPITAL - LETTER C WITH CEDILLA”. - - To compare strings at the level of abstract characters (that is, - in a way intuitive to humans), use "unicodedata.normalize()". - -[4] Due to automatic garbage-collection, free lists, and the dynamic - nature of descriptors, you may notice seemingly unusual behaviour - in certain uses of the "is" operator, like those involving - comparisons between instance methods, or constants. Check their - documentation for more info. - -[5] The power operator "**" binds less tightly than an arithmetic or - bitwise unary operator on its right, that is, "2**-1" is "0.5". - -[6] The "%" operator is also used for string formatting; the same - precedence applies. -''', - 'pass': r'''The "pass" statement -******************** - - pass_stmt ::= "pass" - -"pass" is a null operation — when it is executed, nothing happens. It -is useful as a placeholder when a statement is required syntactically, -but no code needs to be executed, for example: - - def f(arg): pass # a function that does nothing (yet) - - class C: pass # a class with no methods (yet) -''', - 'power': r'''The power operator -****************** - -The power operator binds more tightly than unary operators on its -left; it binds less tightly than unary operators on its right. The -syntax is: - - power ::= (await_expr | primary) ["**" u_expr] - -Thus, in an unparenthesized sequence of power and unary operators, the -operators are evaluated from right to left (this does not constrain -the evaluation order for the operands): "-1**2" results in "-1". - -The power operator has the same semantics as the built-in "pow()" -function, when called with two arguments: it yields its left argument -raised to the power of its right argument. The numeric arguments are -first converted to a common type, and the result is of that type. - -For int operands, the result has the same type as the operands unless -the second argument is negative; in that case, all arguments are -converted to float and a float result is delivered. For example, -"10**2" returns "100", but "10**-2" returns "0.01". - -Raising "0.0" to a negative power results in a "ZeroDivisionError". -Raising a negative number to a fractional power results in a "complex" -number. (In earlier versions it raised a "ValueError".) - -This operation can be customized using the special "__pow__()" and -"__rpow__()" methods. -''', - 'raise': r'''The "raise" statement -********************* - - raise_stmt ::= "raise" [expression ["from" expression]] - -If no expressions are present, "raise" re-raises the exception that is -currently being handled, which is also known as the *active -exception*. If there isn’t currently an active exception, a -"RuntimeError" exception is raised indicating that this is an error. - -Otherwise, "raise" evaluates the first expression as the exception -object. It must be either a subclass or an instance of -"BaseException". If it is a class, the exception instance will be -obtained when needed by instantiating the class with no arguments. - -The *type* of the exception is the exception instance’s class, the -*value* is the instance itself. - -A traceback object is normally created automatically when an exception -is raised and attached to it as the "__traceback__" attribute. You can -create an exception and set your own traceback in one step using the -"with_traceback()" exception method (which returns the same exception -instance, with its traceback set to its argument), like so: - - raise Exception("foo occurred").with_traceback(tracebackobj) - -The "from" clause is used for exception chaining: if given, the second -*expression* must be another exception class or instance. If the -second expression is an exception instance, it will be attached to the -raised exception as the "__cause__" attribute (which is writable). If -the expression is an exception class, the class will be instantiated -and the resulting exception instance will be attached to the raised -exception as the "__cause__" attribute. If the raised exception is not -handled, both exceptions will be printed: - - >>> try: - ... print(1 / 0) - ... except Exception as exc: - ... raise RuntimeError("Something bad happened") from exc - ... - Traceback (most recent call last): - File "", line 2, in - print(1 / 0) - ~~^~~ - ZeroDivisionError: division by zero - - The above exception was the direct cause of the following exception: - - Traceback (most recent call last): - File "", line 4, in - raise RuntimeError("Something bad happened") from exc - RuntimeError: Something bad happened - -A similar mechanism works implicitly if a new exception is raised when -an exception is already being handled. An exception may be handled -when an "except" or "finally" clause, or a "with" statement, is used. -The previous exception is then attached as the new exception’s -"__context__" attribute: - - >>> try: - ... print(1 / 0) - ... except: - ... raise RuntimeError("Something bad happened") - ... - Traceback (most recent call last): - File "", line 2, in - print(1 / 0) - ~~^~~ - ZeroDivisionError: division by zero - - During handling of the above exception, another exception occurred: - - Traceback (most recent call last): - File "", line 4, in - raise RuntimeError("Something bad happened") - RuntimeError: Something bad happened - -Exception chaining can be explicitly suppressed by specifying "None" -in the "from" clause: - - >>> try: - ... print(1 / 0) - ... except: - ... raise RuntimeError("Something bad happened") from None - ... - Traceback (most recent call last): - File "", line 4, in - RuntimeError: Something bad happened - -Additional information on exceptions can be found in section -Exceptions, and information about handling exceptions is in section -The try statement. - -Changed in version 3.3: "None" is now permitted as "Y" in "raise X -from Y".Added the "__suppress_context__" attribute to suppress -automatic display of the exception context. - -Changed in version 3.11: If the traceback of the active exception is -modified in an "except" clause, a subsequent "raise" statement re- -raises the exception with the modified traceback. Previously, the -exception was re-raised with the traceback it had when it was caught. -''', - 'return': r'''The "return" statement -********************** - - return_stmt ::= "return" [expression_list] - -"return" may only occur syntactically nested in a function definition, -not within a nested class definition. - -If an expression list is present, it is evaluated, else "None" is -substituted. - -"return" leaves the current function call with the expression list (or -"None") as return value. - -When "return" passes control out of a "try" statement with a "finally" -clause, that "finally" clause is executed before really leaving the -function. - -In a generator function, the "return" statement indicates that the -generator is done and will cause "StopIteration" to be raised. The -returned value (if any) is used as an argument to construct -"StopIteration" and becomes the "StopIteration.value" attribute. - -In an asynchronous generator function, an empty "return" statement -indicates that the asynchronous generator is done and will cause -"StopAsyncIteration" to be raised. A non-empty "return" statement is -a syntax error in an asynchronous generator function. -''', - 'sequence-types': r'''Emulating container types -************************* - -The following methods can be defined to implement container objects. -None of them are provided by the "object" class itself. Containers -usually are *sequences* (such as "lists" or "tuples") or *mappings* -(like *dictionaries*), but can represent other containers as well. -The first set of methods is used either to emulate a sequence or to -emulate a mapping; the difference is that for a sequence, the -allowable keys should be the integers *k* for which "0 <= k < N" where -*N* is the length of the sequence, or "slice" objects, which define a -range of items. It is also recommended that mappings provide the -methods "keys()", "values()", "items()", "get()", "clear()", -"setdefault()", "pop()", "popitem()", "copy()", and "update()" -behaving similar to those for Python’s standard "dictionary" objects. -The "collections.abc" module provides a "MutableMapping" *abstract -base class* to help create those methods from a base set of -"__getitem__()", "__setitem__()", "__delitem__()", and "keys()". - -Mutable sequences should provide methods "append()", "clear()", -"count()", "extend()", "index()", "insert()", "pop()", "remove()", and -"reverse()", like Python standard "list" objects. Finally, sequence -types should implement addition (meaning concatenation) and -multiplication (meaning repetition) by defining the methods -"__add__()", "__radd__()", "__iadd__()", "__mul__()", "__rmul__()" and -"__imul__()" described below; they should not define other numerical -operators. - -It is recommended that both mappings and sequences implement the -"__contains__()" method to allow efficient use of the "in" operator; -for mappings, "in" should search the mapping’s keys; for sequences, it -should search through the values. It is further recommended that both -mappings and sequences implement the "__iter__()" method to allow -efficient iteration through the container; for mappings, "__iter__()" -should iterate through the object’s keys; for sequences, it should -iterate through the values. - -object.__len__(self) - - Called to implement the built-in function "len()". Should return - the length of the object, an integer ">=" 0. Also, an object that - doesn’t define a "__bool__()" method and whose "__len__()" method - returns zero is considered to be false in a Boolean context. - - **CPython implementation detail:** In CPython, the length is - required to be at most "sys.maxsize". If the length is larger than - "sys.maxsize" some features (such as "len()") may raise - "OverflowError". To prevent raising "OverflowError" by truth value - testing, an object must define a "__bool__()" method. - -object.__length_hint__(self) - - Called to implement "operator.length_hint()". Should return an - estimated length for the object (which may be greater or less than - the actual length). The length must be an integer ">=" 0. The - return value may also be "NotImplemented", which is treated the - same as if the "__length_hint__" method didn’t exist at all. This - method is purely an optimization and is never required for - correctness. - - Added in version 3.4. - -Note: - - Slicing is done exclusively with the following three methods. A - call like - - a[1:2] = b - - is translated to - - a[slice(1, 2, None)] = b - - and so forth. Missing slice items are always filled in with "None". - -object.__getitem__(self, key) - - Called to implement evaluation of "self[key]". For *sequence* - types, the accepted keys should be integers. Optionally, they may - support "slice" objects as well. Negative index support is also - optional. If *key* is of an inappropriate type, "TypeError" may be - raised; if *key* is a value outside the set of indexes for the - sequence (after any special interpretation of negative values), - "IndexError" should be raised. For *mapping* types, if *key* is - missing (not in the container), "KeyError" should be raised. - - Note: - - "for" loops expect that an "IndexError" will be raised for - illegal indexes to allow proper detection of the end of the - sequence. - - Note: - - When subscripting a *class*, the special class method - "__class_getitem__()" may be called instead of "__getitem__()". - See __class_getitem__ versus __getitem__ for more details. - -object.__setitem__(self, key, value) - - Called to implement assignment to "self[key]". Same note as for - "__getitem__()". This should only be implemented for mappings if - the objects support changes to the values for keys, or if new keys - can be added, or for sequences if elements can be replaced. The - same exceptions should be raised for improper *key* values as for - the "__getitem__()" method. - -object.__delitem__(self, key) - - Called to implement deletion of "self[key]". Same note as for - "__getitem__()". This should only be implemented for mappings if - the objects support removal of keys, or for sequences if elements - can be removed from the sequence. The same exceptions should be - raised for improper *key* values as for the "__getitem__()" method. - -object.__missing__(self, key) - - Called by "dict"."__getitem__()" to implement "self[key]" for dict - subclasses when key is not in the dictionary. - -object.__iter__(self) - - This method is called when an *iterator* is required for a - container. This method should return a new iterator object that can - iterate over all the objects in the container. For mappings, it - should iterate over the keys of the container. - -object.__reversed__(self) - - Called (if present) by the "reversed()" built-in to implement - reverse iteration. It should return a new iterator object that - iterates over all the objects in the container in reverse order. - - If the "__reversed__()" method is not provided, the "reversed()" - built-in will fall back to using the sequence protocol ("__len__()" - and "__getitem__()"). Objects that support the sequence protocol - should only provide "__reversed__()" if they can provide an - implementation that is more efficient than the one provided by - "reversed()". - -The membership test operators ("in" and "not in") are normally -implemented as an iteration through a container. However, container -objects can supply the following special method with a more efficient -implementation, which also does not require the object be iterable. - -object.__contains__(self, item) - - Called to implement membership test operators. Should return true - if *item* is in *self*, false otherwise. For mapping objects, this - should consider the keys of the mapping rather than the values or - the key-item pairs. - - For objects that don’t define "__contains__()", the membership test - first tries iteration via "__iter__()", then the old sequence - iteration protocol via "__getitem__()", see this section in the - language reference. -''', - 'shifting': r'''Shifting operations -******************* - -The shifting operations have lower priority than the arithmetic -operations: - - shift_expr ::= a_expr | shift_expr ("<<" | ">>") a_expr - -These operators accept integers as arguments. They shift the first -argument to the left or right by the number of bits given by the -second argument. - -The left shift operation can be customized using the special -"__lshift__()" and "__rlshift__()" methods. The right shift operation -can be customized using the special "__rshift__()" and "__rrshift__()" -methods. - -A right shift by *n* bits is defined as floor division by "pow(2,n)". -A left shift by *n* bits is defined as multiplication with "pow(2,n)". -''', - 'slicings': r'''Slicings -******** - -A slicing selects a range of items in a sequence object (e.g., a -string, tuple or list). Slicings may be used as expressions or as -targets in assignment or "del" statements. The syntax for a slicing: - - slicing ::= primary "[" slice_list "]" - slice_list ::= slice_item ("," slice_item)* [","] - slice_item ::= expression | proper_slice - proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" [stride] ] - lower_bound ::= expression - upper_bound ::= expression - stride ::= expression - -There is ambiguity in the formal syntax here: anything that looks like -an expression list also looks like a slice list, so any subscription -can be interpreted as a slicing. Rather than further complicating the -syntax, this is disambiguated by defining that in this case the -interpretation as a subscription takes priority over the -interpretation as a slicing (this is the case if the slice list -contains no proper slice). - -The semantics for a slicing are as follows. The primary is indexed -(using the same "__getitem__()" method as normal subscription) with a -key that is constructed from the slice list, as follows. If the slice -list contains at least one comma, the key is a tuple containing the -conversion of the slice items; otherwise, the conversion of the lone -slice item is the key. The conversion of a slice item that is an -expression is that expression. The conversion of a proper slice is a -slice object (see section The standard type hierarchy) whose "start", -"stop" and "step" attributes are the values of the expressions given -as lower bound, upper bound and stride, respectively, substituting -"None" for missing expressions. -''', - 'specialattrs': r'''Special Attributes -****************** - -The implementation adds a few special read-only attributes to several -object types, where they are relevant. Some of these are not reported -by the "dir()" built-in function. - -definition.__name__ - - The name of the class, function, method, descriptor, or generator - instance. - -definition.__qualname__ - - The *qualified name* of the class, function, method, descriptor, or - generator instance. - - Added in version 3.3. - -definition.__module__ - - The name of the module in which a class or function was defined. - -definition.__doc__ - - The documentation string of a class or function, or "None" if - undefined. - -definition.__type_params__ - - The type parameters of generic classes, functions, and type - aliases. For classes and functions that are not generic, this will - be an empty tuple. - - Added in version 3.12. -''', - 'specialnames': r'''Special method names -******************** - -A class can implement certain operations that are invoked by special -syntax (such as arithmetic operations or subscripting and slicing) by -defining methods with special names. This is Python’s approach to -*operator overloading*, allowing classes to define their own behavior -with respect to language operators. For instance, if a class defines -a method named "__getitem__()", and "x" is an instance of this class, -then "x[i]" is roughly equivalent to "type(x).__getitem__(x, i)". -Except where mentioned, attempts to execute an operation raise an -exception when no appropriate method is defined (typically -"AttributeError" or "TypeError"). - -Setting a special method to "None" indicates that the corresponding -operation is not available. For example, if a class sets "__iter__()" -to "None", the class is not iterable, so calling "iter()" on its -instances will raise a "TypeError" (without falling back to -"__getitem__()"). [2] - -When implementing a class that emulates any built-in type, it is -important that the emulation only be implemented to the degree that it -makes sense for the object being modelled. For example, some -sequences may work well with retrieval of individual elements, but -extracting a slice may not make sense. (One example of this is the -NodeList interface in the W3C’s Document Object Model.) - - -Basic customization -=================== - -object.__new__(cls[, ...]) - - Called to create a new instance of class *cls*. "__new__()" is a - static method (special-cased so you need not declare it as such) - that takes the class of which an instance was requested as its - first argument. The remaining arguments are those passed to the - object constructor expression (the call to the class). The return - value of "__new__()" should be the new object instance (usually an - instance of *cls*). - - Typical implementations create a new instance of the class by - invoking the superclass’s "__new__()" method using - "super().__new__(cls[, ...])" with appropriate arguments and then - modifying the newly created instance as necessary before returning - it. - - If "__new__()" is invoked during object construction and it returns - an instance of *cls*, then the new instance’s "__init__()" method - will be invoked like "__init__(self[, ...])", where *self* is the - new instance and the remaining arguments are the same as were - passed to the object constructor. - - If "__new__()" does not return an instance of *cls*, then the new - instance’s "__init__()" method will not be invoked. - - "__new__()" is intended mainly to allow subclasses of immutable - types (like int, str, or tuple) to customize instance creation. It - is also commonly overridden in custom metaclasses in order to - customize class creation. - -object.__init__(self[, ...]) - - Called after the instance has been created (by "__new__()"), but - before it is returned to the caller. The arguments are those - passed to the class constructor expression. If a base class has an - "__init__()" method, the derived class’s "__init__()" method, if - any, must explicitly call it to ensure proper initialization of the - base class part of the instance; for example: - "super().__init__([args...])". - - Because "__new__()" and "__init__()" work together in constructing - objects ("__new__()" to create it, and "__init__()" to customize - it), no non-"None" value may be returned by "__init__()"; doing so - will cause a "TypeError" to be raised at runtime. - -object.__del__(self) - - Called when the instance is about to be destroyed. This is also - called a finalizer or (improperly) a destructor. If a base class - has a "__del__()" method, the derived class’s "__del__()" method, - if any, must explicitly call it to ensure proper deletion of the - base class part of the instance. - - It is possible (though not recommended!) for the "__del__()" method - to postpone destruction of the instance by creating a new reference - to it. This is called object *resurrection*. It is - implementation-dependent whether "__del__()" is called a second - time when a resurrected object is about to be destroyed; the - current *CPython* implementation only calls it once. - - It is not guaranteed that "__del__()" methods are called for - objects that still exist when the interpreter exits. - "weakref.finalize" provides a straightforward way to register a - cleanup function to be called when an object is garbage collected. - - Note: - - "del x" doesn’t directly call "x.__del__()" — the former - decrements the reference count for "x" by one, and the latter is - only called when "x"’s reference count reaches zero. - - **CPython implementation detail:** It is possible for a reference - cycle to prevent the reference count of an object from going to - zero. In this case, the cycle will be later detected and deleted - by the *cyclic garbage collector*. A common cause of reference - cycles is when an exception has been caught in a local variable. - The frame’s locals then reference the exception, which references - its own traceback, which references the locals of all frames caught - in the traceback. - - See also: Documentation for the "gc" module. - - Warning: - - Due to the precarious circumstances under which "__del__()" - methods are invoked, exceptions that occur during their execution - are ignored, and a warning is printed to "sys.stderr" instead. - In particular: - - * "__del__()" can be invoked when arbitrary code is being - executed, including from any arbitrary thread. If "__del__()" - needs to take a lock or invoke any other blocking resource, it - may deadlock as the resource may already be taken by the code - that gets interrupted to execute "__del__()". - - * "__del__()" can be executed during interpreter shutdown. As a - consequence, the global variables it needs to access (including - other modules) may already have been deleted or set to "None". - Python guarantees that globals whose name begins with a single - underscore are deleted from their module before other globals - are deleted; if no other references to such globals exist, this - may help in assuring that imported modules are still available - at the time when the "__del__()" method is called. - -object.__repr__(self) - - Called by the "repr()" built-in function to compute the “official” - string representation of an object. If at all possible, this - should look like a valid Python expression that could be used to - recreate an object with the same value (given an appropriate - environment). If this is not possible, a string of the form - "<...some useful description...>" should be returned. The return - value must be a string object. If a class defines "__repr__()" but - not "__str__()", then "__repr__()" is also used when an “informal” - string representation of instances of that class is required. - - This is typically used for debugging, so it is important that the - representation is information-rich and unambiguous. A default - implementation is provided by the "object" class itself. - -object.__str__(self) - - Called by "str(object)", the default "__format__()" implementation, - and the built-in function "print()", to compute the “informal” or - nicely printable string representation of an object. The return - value must be a str object. - - This method differs from "object.__repr__()" in that there is no - expectation that "__str__()" return a valid Python expression: a - more convenient or concise representation can be used. - - The default implementation defined by the built-in type "object" - calls "object.__repr__()". - -object.__bytes__(self) - - Called by bytes to compute a byte-string representation of an - object. This should return a "bytes" object. The "object" class - itself does not provide this method. - -object.__format__(self, format_spec) - - Called by the "format()" built-in function, and by extension, - evaluation of formatted string literals and the "str.format()" - method, to produce a “formatted” string representation of an - object. The *format_spec* argument is a string that contains a - description of the formatting options desired. The interpretation - of the *format_spec* argument is up to the type implementing - "__format__()", however most classes will either delegate - formatting to one of the built-in types, or use a similar - formatting option syntax. - - See Format specification mini-language for a description of the - standard formatting syntax. - - The return value must be a string object. - - The default implementation by the "object" class should be given an - empty *format_spec* string. It delegates to "__str__()". - - Changed in version 3.4: The __format__ method of "object" itself - raises a "TypeError" if passed any non-empty string. - - Changed in version 3.7: "object.__format__(x, '')" is now - equivalent to "str(x)" rather than "format(str(x), '')". - -object.__lt__(self, other) -object.__le__(self, other) -object.__eq__(self, other) -object.__ne__(self, other) -object.__gt__(self, other) -object.__ge__(self, other) - - These are the so-called “rich comparison” methods. The - correspondence between operator symbols and method names is as - follows: "xy" calls - "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)". - - A rich comparison method may return the singleton "NotImplemented" - if it does not implement the operation for a given pair of - arguments. By convention, "False" and "True" are returned for a - successful comparison. However, these methods can return any value, - so if the comparison operator is used in a Boolean context (e.g., - in the condition of an "if" statement), Python will call "bool()" - on the value to determine if the result is true or false. - - By default, "object" implements "__eq__()" by using "is", returning - "NotImplemented" in the case of a false comparison: "True if x is y - else NotImplemented". For "__ne__()", by default it delegates to - "__eq__()" and inverts the result unless it is "NotImplemented". - There are no other implied relationships among the comparison - operators or default implementations; for example, the truth of - "(x.__hash__". - - If a class that does not override "__eq__()" wishes to suppress - hash support, it should include "__hash__ = None" in the class - definition. A class which defines its own "__hash__()" that - explicitly raises a "TypeError" would be incorrectly identified as - hashable by an "isinstance(obj, collections.abc.Hashable)" call. - - Note: - - By default, the "__hash__()" values of str and bytes objects are - “salted” with an unpredictable random value. Although they - remain constant within an individual Python process, they are not - predictable between repeated invocations of Python.This is - intended to provide protection against a denial-of-service caused - by carefully chosen inputs that exploit the worst case - performance of a dict insertion, *O*(*n*^2) complexity. See - https://ocert.org/advisories/ocert-2011-003.html for - details.Changing hash values affects the iteration order of sets. - Python has never made guarantees about this ordering (and it - typically varies between 32-bit and 64-bit builds).See also - "PYTHONHASHSEED". - - Changed in version 3.3: Hash randomization is enabled by default. - -object.__bool__(self) - - Called to implement truth value testing and the built-in operation - "bool()"; should return "False" or "True". When this method is not - defined, "__len__()" is called, if it is defined, and the object is - considered true if its result is nonzero. If a class defines - neither "__len__()" nor "__bool__()" (which is true of the "object" - class itself), all its instances are considered true. - - -Customizing attribute access -============================ - -The following methods can be defined to customize the meaning of -attribute access (use of, assignment to, or deletion of "x.name") for -class instances. - -object.__getattr__(self, name) - - Called when the default attribute access fails with an - "AttributeError" (either "__getattribute__()" raises an - "AttributeError" because *name* is not an instance attribute or an - attribute in the class tree for "self"; or "__get__()" of a *name* - property raises "AttributeError"). This method should either - return the (computed) attribute value or raise an "AttributeError" - exception. The "object" class itself does not provide this method. - - Note that if the attribute is found through the normal mechanism, - "__getattr__()" is not called. (This is an intentional asymmetry - between "__getattr__()" and "__setattr__()".) This is done both for - efficiency reasons and because otherwise "__getattr__()" would have - no way to access other attributes of the instance. Note that at - least for instance variables, you can take total control by not - inserting any values in the instance attribute dictionary (but - instead inserting them in another object). See the - "__getattribute__()" method below for a way to actually get total - control over attribute access. - -object.__getattribute__(self, name) - - Called unconditionally to implement attribute accesses for - instances of the class. If the class also defines "__getattr__()", - the latter will not be called unless "__getattribute__()" either - calls it explicitly or raises an "AttributeError". This method - should return the (computed) attribute value or raise an - "AttributeError" exception. In order to avoid infinite recursion in - this method, its implementation should always call the base class - method with the same name to access any attributes it needs, for - example, "object.__getattribute__(self, name)". - - Note: - - This method may still be bypassed when looking up special methods - as the result of implicit invocation via language syntax or - built-in functions. See Special method lookup. - - For certain sensitive attribute accesses, raises an auditing event - "object.__getattr__" with arguments "obj" and "name". - -object.__setattr__(self, name, value) - - Called when an attribute assignment is attempted. This is called - instead of the normal mechanism (i.e. store the value in the - instance dictionary). *name* is the attribute name, *value* is the - value to be assigned to it. - - If "__setattr__()" wants to assign to an instance attribute, it - should call the base class method with the same name, for example, - "object.__setattr__(self, name, value)". - - For certain sensitive attribute assignments, raises an auditing - event "object.__setattr__" with arguments "obj", "name", "value". - -object.__delattr__(self, name) - - Like "__setattr__()" but for attribute deletion instead of - assignment. This should only be implemented if "del obj.name" is - meaningful for the object. - - For certain sensitive attribute deletions, raises an auditing event - "object.__delattr__" with arguments "obj" and "name". - -object.__dir__(self) - - Called when "dir()" is called on the object. An iterable must be - returned. "dir()" converts the returned iterable to a list and - sorts it. - - -Customizing module attribute access ------------------------------------ - -module.__getattr__() -module.__dir__() - -Special names "__getattr__" and "__dir__" can be also used to -customize access to module attributes. The "__getattr__" function at -the module level should accept one argument which is the name of an -attribute and return the computed value or raise an "AttributeError". -If an attribute is not found on a module object through the normal -lookup, i.e. "object.__getattribute__()", then "__getattr__" is -searched in the module "__dict__" before raising an "AttributeError". -If found, it is called with the attribute name and the result is -returned. - -The "__dir__" function should accept no arguments, and return an -iterable of strings that represents the names accessible on module. If -present, this function overrides the standard "dir()" search on a -module. - -module.__class__ - -For a more fine grained customization of the module behavior (setting -attributes, properties, etc.), one can set the "__class__" attribute -of a module object to a subclass of "types.ModuleType". For example: - - import sys - from types import ModuleType - - class VerboseModule(ModuleType): - def __repr__(self): - return f'Verbose {self.__name__}' - - def __setattr__(self, attr, value): - print(f'Setting {attr}...') - super().__setattr__(attr, value) - - sys.modules[__name__].__class__ = VerboseModule - -Note: - - Defining module "__getattr__" and setting module "__class__" only - affect lookups made using the attribute access syntax – directly - accessing the module globals (whether by code within the module, or - via a reference to the module’s globals dictionary) is unaffected. - -Changed in version 3.5: "__class__" module attribute is now writable. - -Added in version 3.7: "__getattr__" and "__dir__" module attributes. - -See also: - - **PEP 562** - Module __getattr__ and __dir__ - Describes the "__getattr__" and "__dir__" functions on modules. - - -Implementing Descriptors ------------------------- - -The following methods only apply when an instance of the class -containing the method (a so-called *descriptor* class) appears in an -*owner* class (the descriptor must be in either the owner’s class -dictionary or in the class dictionary for one of its parents). In the -examples below, “the attribute” refers to the attribute whose name is -the key of the property in the owner class’ "__dict__". The "object" -class itself does not implement any of these protocols. - -object.__get__(self, instance, owner=None) - - Called to get the attribute of the owner class (class attribute - access) or of an instance of that class (instance attribute - access). The optional *owner* argument is the owner class, while - *instance* is the instance that the attribute was accessed through, - or "None" when the attribute is accessed through the *owner*. - - This method should return the computed attribute value or raise an - "AttributeError" exception. - - **PEP 252** specifies that "__get__()" is callable with one or two - arguments. Python’s own built-in descriptors support this - specification; however, it is likely that some third-party tools - have descriptors that require both arguments. Python’s own - "__getattribute__()" implementation always passes in both arguments - whether they are required or not. - -object.__set__(self, instance, value) - - Called to set the attribute on an instance *instance* of the owner - class to a new value, *value*. - - Note, adding "__set__()" or "__delete__()" changes the kind of - descriptor to a “data descriptor”. See Invoking Descriptors for - more details. - -object.__delete__(self, instance) - - Called to delete the attribute on an instance *instance* of the - owner class. - -Instances of descriptors may also have the "__objclass__" attribute -present: - -object.__objclass__ - - The attribute "__objclass__" is interpreted by the "inspect" module - as specifying the class where this object was defined (setting this - appropriately can assist in runtime introspection of dynamic class - attributes). For callables, it may indicate that an instance of the - given type (or a subclass) is expected or required as the first - positional argument (for example, CPython sets this attribute for - unbound methods that are implemented in C). - - -Invoking Descriptors --------------------- - -In general, a descriptor is an object attribute with “binding -behavior”, one whose attribute access has been overridden by methods -in the descriptor protocol: "__get__()", "__set__()", and -"__delete__()". If any of those methods are defined for an object, it -is said to be a descriptor. - -The default behavior for attribute access is to get, set, or delete -the attribute from an object’s dictionary. For instance, "a.x" has a -lookup chain starting with "a.__dict__['x']", then -"type(a).__dict__['x']", and continuing through the base classes of -"type(a)" excluding metaclasses. - -However, if the looked-up value is an object defining one of the -descriptor methods, then Python may override the default behavior and -invoke the descriptor method instead. Where this occurs in the -precedence chain depends on which descriptor methods were defined and -how they were called. - -The starting point for descriptor invocation is a binding, "a.x". How -the arguments are assembled depends on "a": - -Direct Call - The simplest and least common call is when user code directly - invokes a descriptor method: "x.__get__(a)". - -Instance Binding - If binding to an object instance, "a.x" is transformed into the - call: "type(a).__dict__['x'].__get__(a, type(a))". - -Class Binding - If binding to a class, "A.x" is transformed into the call: - "A.__dict__['x'].__get__(None, A)". - -Super Binding - A dotted lookup such as "super(A, a).x" searches - "a.__class__.__mro__" for a base class "B" following "A" and then - returns "B.__dict__['x'].__get__(a, A)". If not a descriptor, "x" - is returned unchanged. - -For instance bindings, the precedence of descriptor invocation depends -on which descriptor methods are defined. A descriptor can define any -combination of "__get__()", "__set__()" and "__delete__()". If it -does not define "__get__()", then accessing the attribute will return -the descriptor object itself unless there is a value in the object’s -instance dictionary. If the descriptor defines "__set__()" and/or -"__delete__()", it is a data descriptor; if it defines neither, it is -a non-data descriptor. Normally, data descriptors define both -"__get__()" and "__set__()", while non-data descriptors have just the -"__get__()" method. Data descriptors with "__get__()" and "__set__()" -(and/or "__delete__()") defined always override a redefinition in an -instance dictionary. In contrast, non-data descriptors can be -overridden by instances. - -Python methods (including those decorated with "@staticmethod" and -"@classmethod") are implemented as non-data descriptors. Accordingly, -instances can redefine and override methods. This allows individual -instances to acquire behaviors that differ from other instances of the -same class. - -The "property()" function is implemented as a data descriptor. -Accordingly, instances cannot override the behavior of a property. - - -__slots__ ---------- - -*__slots__* allow us to explicitly declare data members (like -properties) and deny the creation of "__dict__" and *__weakref__* -(unless explicitly declared in *__slots__* or available in a parent.) - -The space saved over using "__dict__" can be significant. Attribute -lookup speed can be significantly improved as well. - -object.__slots__ - - This class variable can be assigned a string, iterable, or sequence - of strings with variable names used by instances. *__slots__* - reserves space for the declared variables and prevents the - automatic creation of "__dict__" and *__weakref__* for each - instance. - -Notes on using *__slots__*: - -* When inheriting from a class without *__slots__*, the "__dict__" and - *__weakref__* attribute of the instances will always be accessible. - -* Without a "__dict__" variable, instances cannot be assigned new - variables not listed in the *__slots__* definition. Attempts to - assign to an unlisted variable name raises "AttributeError". If - dynamic assignment of new variables is desired, then add - "'__dict__'" to the sequence of strings in the *__slots__* - declaration. - -* Without a *__weakref__* variable for each instance, classes defining - *__slots__* do not support "weak references" to its instances. If - weak reference support is needed, then add "'__weakref__'" to the - sequence of strings in the *__slots__* declaration. - -* *__slots__* are implemented at the class level by creating - descriptors for each variable name. As a result, class attributes - cannot be used to set default values for instance variables defined - by *__slots__*; otherwise, the class attribute would overwrite the - descriptor assignment. - -* The action of a *__slots__* declaration is not limited to the class - where it is defined. *__slots__* declared in parents are available - in child classes. However, instances of a child subclass will get a - "__dict__" and *__weakref__* unless the subclass also defines - *__slots__* (which should only contain names of any *additional* - slots). - -* If a class defines a slot also defined in a base class, the instance - variable defined by the base class slot is inaccessible (except by - retrieving its descriptor directly from the base class). This - renders the meaning of the program undefined. In the future, a - check may be added to prevent this. - -* "TypeError" will be raised if nonempty *__slots__* are defined for a - class derived from a ""variable-length" built-in type" such as - "int", "bytes", and "tuple". - -* Any non-string *iterable* may be assigned to *__slots__*. - -* If a "dictionary" is used to assign *__slots__*, the dictionary keys - will be used as the slot names. The values of the dictionary can be - used to provide per-attribute docstrings that will be recognised by - "inspect.getdoc()" and displayed in the output of "help()". - -* "__class__" assignment works only if both classes have the same - *__slots__*. - -* Multiple inheritance with multiple slotted parent classes can be - used, but only one parent is allowed to have attributes created by - slots (the other bases must have empty slot layouts) - violations - raise "TypeError". - -* If an *iterator* is used for *__slots__* then a *descriptor* is - created for each of the iterator’s values. However, the *__slots__* - attribute will be an empty iterator. - - -Customizing class creation -========================== - -Whenever a class inherits from another class, "__init_subclass__()" is -called on the parent class. This way, it is possible to write classes -which change the behavior of subclasses. This is closely related to -class decorators, but where class decorators only affect the specific -class they’re applied to, "__init_subclass__" solely applies to future -subclasses of the class defining the method. - -classmethod object.__init_subclass__(cls) - - This method is called whenever the containing class is subclassed. - *cls* is then the new subclass. If defined as a normal instance - method, this method is implicitly converted to a class method. - - Keyword arguments which are given to a new class are passed to the - parent class’s "__init_subclass__". For compatibility with other - classes using "__init_subclass__", one should take out the needed - keyword arguments and pass the others over to the base class, as - in: - - class Philosopher: - def __init_subclass__(cls, /, default_name, **kwargs): - super().__init_subclass__(**kwargs) - cls.default_name = default_name - - class AustralianPhilosopher(Philosopher, default_name="Bruce"): - pass - - The default implementation "object.__init_subclass__" does nothing, - but raises an error if it is called with any arguments. - - Note: - - The metaclass hint "metaclass" is consumed by the rest of the - type machinery, and is never passed to "__init_subclass__" - implementations. The actual metaclass (rather than the explicit - hint) can be accessed as "type(cls)". - - Added in version 3.6. - -When a class is created, "type.__new__()" scans the class variables -and makes callbacks to those with a "__set_name__()" hook. - -object.__set_name__(self, owner, name) - - Automatically called at the time the owning class *owner* is - created. The object has been assigned to *name* in that class: - - class A: - x = C() # Automatically calls: x.__set_name__(A, 'x') - - If the class variable is assigned after the class is created, - "__set_name__()" will not be called automatically. If needed, - "__set_name__()" can be called directly: - - class A: - pass - - c = C() - A.x = c # The hook is not called - c.__set_name__(A, 'x') # Manually invoke the hook - - See Creating the class object for more details. - - Added in version 3.6. - - -Metaclasses ------------ - -By default, classes are constructed using "type()". The class body is -executed in a new namespace and the class name is bound locally to the -result of "type(name, bases, namespace)". - -The class creation process can be customized by passing the -"metaclass" keyword argument in the class definition line, or by -inheriting from an existing class that included such an argument. In -the following example, both "MyClass" and "MySubclass" are instances -of "Meta": - - class Meta(type): - pass - - class MyClass(metaclass=Meta): - pass - - class MySubclass(MyClass): - pass - -Any other keyword arguments that are specified in the class definition -are passed through to all metaclass operations described below. - -When a class definition is executed, the following steps occur: - -* MRO entries are resolved; - -* the appropriate metaclass is determined; - -* the class namespace is prepared; - -* the class body is executed; - -* the class object is created. - - -Resolving MRO entries ---------------------- - -object.__mro_entries__(self, bases) - - If a base that appears in a class definition is not an instance of - "type", then an "__mro_entries__()" method is searched on the base. - If an "__mro_entries__()" method is found, the base is substituted - with the result of a call to "__mro_entries__()" when creating the - class. The method is called with the original bases tuple passed to - the *bases* parameter, and must return a tuple of classes that will - be used instead of the base. The returned tuple may be empty: in - these cases, the original base is ignored. - -See also: - - "types.resolve_bases()" - Dynamically resolve bases that are not instances of "type". - - "types.get_original_bases()" - Retrieve a class’s “original bases” prior to modifications by - "__mro_entries__()". - - **PEP 560** - Core support for typing module and generic types. - - -Determining the appropriate metaclass -------------------------------------- - -The appropriate metaclass for a class definition is determined as -follows: - -* if no bases and no explicit metaclass are given, then "type()" is - used; - -* if an explicit metaclass is given and it is *not* an instance of - "type()", then it is used directly as the metaclass; - -* if an instance of "type()" is given as the explicit metaclass, or - bases are defined, then the most derived metaclass is used. - -The most derived metaclass is selected from the explicitly specified -metaclass (if any) and the metaclasses (i.e. "type(cls)") of all -specified base classes. The most derived metaclass is one which is a -subtype of *all* of these candidate metaclasses. If none of the -candidate metaclasses meets that criterion, then the class definition -will fail with "TypeError". - - -Preparing the class namespace ------------------------------ - -Once the appropriate metaclass has been identified, then the class -namespace is prepared. If the metaclass has a "__prepare__" attribute, -it is called as "namespace = metaclass.__prepare__(name, bases, -**kwds)" (where the additional keyword arguments, if any, come from -the class definition). The "__prepare__" method should be implemented -as a "classmethod". The namespace returned by "__prepare__" is passed -in to "__new__", but when the final class object is created the -namespace is copied into a new "dict". - -If the metaclass has no "__prepare__" attribute, then the class -namespace is initialised as an empty ordered mapping. - -See also: - - **PEP 3115** - Metaclasses in Python 3000 - Introduced the "__prepare__" namespace hook - - -Executing the class body ------------------------- - -The class body is executed (approximately) as "exec(body, globals(), -namespace)". The key difference from a normal call to "exec()" is that -lexical scoping allows the class body (including any methods) to -reference names from the current and outer scopes when the class -definition occurs inside a function. - -However, even when the class definition occurs inside the function, -methods defined inside the class still cannot see names defined at the -class scope. Class variables must be accessed through the first -parameter of instance or class methods, or through the implicit -lexically scoped "__class__" reference described in the next section. - - -Creating the class object -------------------------- - -Once the class namespace has been populated by executing the class -body, the class object is created by calling "metaclass(name, bases, -namespace, **kwds)" (the additional keywords passed here are the same -as those passed to "__prepare__"). - -This class object is the one that will be referenced by the zero- -argument form of "super()". "__class__" is an implicit closure -reference created by the compiler if any methods in a class body refer -to either "__class__" or "super". This allows the zero argument form -of "super()" to correctly identify the class being defined based on -lexical scoping, while the class or instance that was used to make the -current call is identified based on the first argument passed to the -method. - -**CPython implementation detail:** In CPython 3.6 and later, the -"__class__" cell is passed to the metaclass as a "__classcell__" entry -in the class namespace. If present, this must be propagated up to the -"type.__new__" call in order for the class to be initialised -correctly. Failing to do so will result in a "RuntimeError" in Python -3.8. - -When using the default metaclass "type", or any metaclass that -ultimately calls "type.__new__", the following additional -customization steps are invoked after creating the class object: - -1. The "type.__new__" method collects all of the attributes in the - class namespace that define a "__set_name__()" method; - -2. Those "__set_name__" methods are called with the class being - defined and the assigned name of that particular attribute; - -3. The "__init_subclass__()" hook is called on the immediate parent of - the new class in its method resolution order. - -After the class object is created, it is passed to the class -decorators included in the class definition (if any) and the resulting -object is bound in the local namespace as the defined class. - -When a new class is created by "type.__new__", the object provided as -the namespace parameter is copied to a new ordered mapping and the -original object is discarded. The new copy is wrapped in a read-only -proxy, which becomes the "__dict__" attribute of the class object. - -See also: - - **PEP 3135** - New super - Describes the implicit "__class__" closure reference - - -Uses for metaclasses --------------------- - -The potential uses for metaclasses are boundless. Some ideas that have -been explored include enum, logging, interface checking, automatic -delegation, automatic property creation, proxies, frameworks, and -automatic resource locking/synchronization. - - -Customizing instance and subclass checks -======================================== - -The following methods are used to override the default behavior of the -"isinstance()" and "issubclass()" built-in functions. - -In particular, the metaclass "abc.ABCMeta" implements these methods in -order to allow the addition of Abstract Base Classes (ABCs) as -“virtual base classes” to any class or type (including built-in -types), including other ABCs. - -type.__instancecheck__(self, instance) - - Return true if *instance* should be considered a (direct or - indirect) instance of *class*. If defined, called to implement - "isinstance(instance, class)". - -type.__subclasscheck__(self, subclass) - - Return true if *subclass* should be considered a (direct or - indirect) subclass of *class*. If defined, called to implement - "issubclass(subclass, class)". - -Note that these methods are looked up on the type (metaclass) of a -class. They cannot be defined as class methods in the actual class. -This is consistent with the lookup of special methods that are called -on instances, only in this case the instance is itself a class. - -See also: - - **PEP 3119** - Introducing Abstract Base Classes - Includes the specification for customizing "isinstance()" and - "issubclass()" behavior through "__instancecheck__()" and - "__subclasscheck__()", with motivation for this functionality in - the context of adding Abstract Base Classes (see the "abc" - module) to the language. - - -Emulating generic types -======================= - -When using *type annotations*, it is often useful to *parameterize* a -*generic type* using Python’s square-brackets notation. For example, -the annotation "list[int]" might be used to signify a "list" in which -all the elements are of type "int". - -See also: - - **PEP 484** - Type Hints - Introducing Python’s framework for type annotations - - Generic Alias Types - Documentation for objects representing parameterized generic - classes - - Generics, user-defined generics and "typing.Generic" - Documentation on how to implement generic classes that can be - parameterized at runtime and understood by static type-checkers. - -A class can *generally* only be parameterized if it defines the -special class method "__class_getitem__()". - -classmethod object.__class_getitem__(cls, key) - - Return an object representing the specialization of a generic class - by type arguments found in *key*. - - When defined on a class, "__class_getitem__()" is automatically a - class method. As such, there is no need for it to be decorated with - "@classmethod" when it is defined. - - -The purpose of *__class_getitem__* ----------------------------------- - -The purpose of "__class_getitem__()" is to allow runtime -parameterization of standard-library generic classes in order to more -easily apply *type hints* to these classes. - -To implement custom generic classes that can be parameterized at -runtime and understood by static type-checkers, users should either -inherit from a standard library class that already implements -"__class_getitem__()", or inherit from "typing.Generic", which has its -own implementation of "__class_getitem__()". - -Custom implementations of "__class_getitem__()" on classes defined -outside of the standard library may not be understood by third-party -type-checkers such as mypy. Using "__class_getitem__()" on any class -for purposes other than type hinting is discouraged. - - -*__class_getitem__* versus *__getitem__* ----------------------------------------- - -Usually, the subscription of an object using square brackets will call -the "__getitem__()" instance method defined on the object’s class. -However, if the object being subscribed is itself a class, the class -method "__class_getitem__()" may be called instead. -"__class_getitem__()" should return a GenericAlias object if it is -properly defined. - -Presented with the *expression* "obj[x]", the Python interpreter -follows something like the following process to decide whether -"__getitem__()" or "__class_getitem__()" should be called: - - from inspect import isclass - - def subscribe(obj, x): - """Return the result of the expression 'obj[x]'""" - - class_of_obj = type(obj) - - # If the class of obj defines __getitem__, - # call class_of_obj.__getitem__(obj, x) - if hasattr(class_of_obj, '__getitem__'): - return class_of_obj.__getitem__(obj, x) - - # Else, if obj is a class and defines __class_getitem__, - # call obj.__class_getitem__(x) - elif isclass(obj) and hasattr(obj, '__class_getitem__'): - return obj.__class_getitem__(x) - - # Else, raise an exception - else: - raise TypeError( - f"'{class_of_obj.__name__}' object is not subscriptable" - ) - -In Python, all classes are themselves instances of other classes. The -class of a class is known as that class’s *metaclass*, and most -classes have the "type" class as their metaclass. "type" does not -define "__getitem__()", meaning that expressions such as "list[int]", -"dict[str, float]" and "tuple[str, bytes]" all result in -"__class_getitem__()" being called: - - >>> # list has class "type" as its metaclass, like most classes: - >>> type(list) - - >>> type(dict) == type(list) == type(tuple) == type(str) == type(bytes) - True - >>> # "list[int]" calls "list.__class_getitem__(int)" - >>> list[int] - list[int] - >>> # list.__class_getitem__ returns a GenericAlias object: - >>> type(list[int]) - - -However, if a class has a custom metaclass that defines -"__getitem__()", subscribing the class may result in different -behaviour. An example of this can be found in the "enum" module: - - >>> from enum import Enum - >>> class Menu(Enum): - ... """A breakfast menu""" - ... SPAM = 'spam' - ... BACON = 'bacon' - ... - >>> # Enum classes have a custom metaclass: - >>> type(Menu) - - >>> # EnumMeta defines __getitem__, - >>> # so __class_getitem__ is not called, - >>> # and the result is not a GenericAlias object: - >>> Menu['SPAM'] - - >>> type(Menu['SPAM']) - - -See also: - - **PEP 560** - Core Support for typing module and generic types - Introducing "__class_getitem__()", and outlining when a - subscription results in "__class_getitem__()" being called - instead of "__getitem__()" - - -Emulating callable objects -========================== - -object.__call__(self[, args...]) - - Called when the instance is “called” as a function; if this method - is defined, "x(arg1, arg2, ...)" roughly translates to - "type(x).__call__(x, arg1, ...)". The "object" class itself does - not provide this method. - - -Emulating container types -========================= - -The following methods can be defined to implement container objects. -None of them are provided by the "object" class itself. Containers -usually are *sequences* (such as "lists" or "tuples") or *mappings* -(like *dictionaries*), but can represent other containers as well. -The first set of methods is used either to emulate a sequence or to -emulate a mapping; the difference is that for a sequence, the -allowable keys should be the integers *k* for which "0 <= k < N" where -*N* is the length of the sequence, or "slice" objects, which define a -range of items. It is also recommended that mappings provide the -methods "keys()", "values()", "items()", "get()", "clear()", -"setdefault()", "pop()", "popitem()", "copy()", and "update()" -behaving similar to those for Python’s standard "dictionary" objects. -The "collections.abc" module provides a "MutableMapping" *abstract -base class* to help create those methods from a base set of -"__getitem__()", "__setitem__()", "__delitem__()", and "keys()". - -Mutable sequences should provide methods "append()", "clear()", -"count()", "extend()", "index()", "insert()", "pop()", "remove()", and -"reverse()", like Python standard "list" objects. Finally, sequence -types should implement addition (meaning concatenation) and -multiplication (meaning repetition) by defining the methods -"__add__()", "__radd__()", "__iadd__()", "__mul__()", "__rmul__()" and -"__imul__()" described below; they should not define other numerical -operators. - -It is recommended that both mappings and sequences implement the -"__contains__()" method to allow efficient use of the "in" operator; -for mappings, "in" should search the mapping’s keys; for sequences, it -should search through the values. It is further recommended that both -mappings and sequences implement the "__iter__()" method to allow -efficient iteration through the container; for mappings, "__iter__()" -should iterate through the object’s keys; for sequences, it should -iterate through the values. - -object.__len__(self) - - Called to implement the built-in function "len()". Should return - the length of the object, an integer ">=" 0. Also, an object that - doesn’t define a "__bool__()" method and whose "__len__()" method - returns zero is considered to be false in a Boolean context. - - **CPython implementation detail:** In CPython, the length is - required to be at most "sys.maxsize". If the length is larger than - "sys.maxsize" some features (such as "len()") may raise - "OverflowError". To prevent raising "OverflowError" by truth value - testing, an object must define a "__bool__()" method. - -object.__length_hint__(self) - - Called to implement "operator.length_hint()". Should return an - estimated length for the object (which may be greater or less than - the actual length). The length must be an integer ">=" 0. The - return value may also be "NotImplemented", which is treated the - same as if the "__length_hint__" method didn’t exist at all. This - method is purely an optimization and is never required for - correctness. - - Added in version 3.4. - -Note: - - Slicing is done exclusively with the following three methods. A - call like - - a[1:2] = b - - is translated to - - a[slice(1, 2, None)] = b - - and so forth. Missing slice items are always filled in with "None". - -object.__getitem__(self, key) - - Called to implement evaluation of "self[key]". For *sequence* - types, the accepted keys should be integers. Optionally, they may - support "slice" objects as well. Negative index support is also - optional. If *key* is of an inappropriate type, "TypeError" may be - raised; if *key* is a value outside the set of indexes for the - sequence (after any special interpretation of negative values), - "IndexError" should be raised. For *mapping* types, if *key* is - missing (not in the container), "KeyError" should be raised. - - Note: - - "for" loops expect that an "IndexError" will be raised for - illegal indexes to allow proper detection of the end of the - sequence. - - Note: - - When subscripting a *class*, the special class method - "__class_getitem__()" may be called instead of "__getitem__()". - See __class_getitem__ versus __getitem__ for more details. - -object.__setitem__(self, key, value) - - Called to implement assignment to "self[key]". Same note as for - "__getitem__()". This should only be implemented for mappings if - the objects support changes to the values for keys, or if new keys - can be added, or for sequences if elements can be replaced. The - same exceptions should be raised for improper *key* values as for - the "__getitem__()" method. - -object.__delitem__(self, key) - - Called to implement deletion of "self[key]". Same note as for - "__getitem__()". This should only be implemented for mappings if - the objects support removal of keys, or for sequences if elements - can be removed from the sequence. The same exceptions should be - raised for improper *key* values as for the "__getitem__()" method. - -object.__missing__(self, key) - - Called by "dict"."__getitem__()" to implement "self[key]" for dict - subclasses when key is not in the dictionary. - -object.__iter__(self) - - This method is called when an *iterator* is required for a - container. This method should return a new iterator object that can - iterate over all the objects in the container. For mappings, it - should iterate over the keys of the container. - -object.__reversed__(self) - - Called (if present) by the "reversed()" built-in to implement - reverse iteration. It should return a new iterator object that - iterates over all the objects in the container in reverse order. - - If the "__reversed__()" method is not provided, the "reversed()" - built-in will fall back to using the sequence protocol ("__len__()" - and "__getitem__()"). Objects that support the sequence protocol - should only provide "__reversed__()" if they can provide an - implementation that is more efficient than the one provided by - "reversed()". - -The membership test operators ("in" and "not in") are normally -implemented as an iteration through a container. However, container -objects can supply the following special method with a more efficient -implementation, which also does not require the object be iterable. - -object.__contains__(self, item) - - Called to implement membership test operators. Should return true - if *item* is in *self*, false otherwise. For mapping objects, this - should consider the keys of the mapping rather than the values or - the key-item pairs. - - For objects that don’t define "__contains__()", the membership test - first tries iteration via "__iter__()", then the old sequence - iteration protocol via "__getitem__()", see this section in the - language reference. - - -Emulating numeric types -======================= - -The following methods can be defined to emulate numeric objects. -Methods corresponding to operations that are not supported by the -particular kind of number implemented (e.g., bitwise operations for -non-integral numbers) should be left undefined. - -object.__add__(self, other) -object.__sub__(self, other) -object.__mul__(self, other) -object.__matmul__(self, other) -object.__truediv__(self, other) -object.__floordiv__(self, other) -object.__mod__(self, other) -object.__divmod__(self, other) -object.__pow__(self, other[, modulo]) -object.__lshift__(self, other) -object.__rshift__(self, other) -object.__and__(self, other) -object.__xor__(self, other) -object.__or__(self, other) - - These methods are called to implement the binary arithmetic - operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", - "pow()", "**", "<<", ">>", "&", "^", "|"). For instance, to - evaluate the expression "x + y", where *x* is an instance of a - class that has an "__add__()" method, "type(x).__add__(x, y)" is - called. The "__divmod__()" method should be the equivalent to - using "__floordiv__()" and "__mod__()"; it should not be related to - "__truediv__()". Note that "__pow__()" should be defined to accept - an optional third argument if the ternary version of the built-in - "pow()" function is to be supported. - - If one of those methods does not support the operation with the - supplied arguments, it should return "NotImplemented". - -object.__radd__(self, other) -object.__rsub__(self, other) -object.__rmul__(self, other) -object.__rmatmul__(self, other) -object.__rtruediv__(self, other) -object.__rfloordiv__(self, other) -object.__rmod__(self, other) -object.__rdivmod__(self, other) -object.__rpow__(self, other[, modulo]) -object.__rlshift__(self, other) -object.__rrshift__(self, other) -object.__rand__(self, other) -object.__rxor__(self, other) -object.__ror__(self, other) - - These methods are called to implement the binary arithmetic - operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", - "pow()", "**", "<<", ">>", "&", "^", "|") with reflected (swapped) - operands. These functions are only called if the left operand does - not support the corresponding operation [3] and the operands are of - different types. [4] For instance, to evaluate the expression "x - - y", where *y* is an instance of a class that has an "__rsub__()" - method, "type(y).__rsub__(y, x)" is called if "type(x).__sub__(x, - y)" returns "NotImplemented". - - Note that ternary "pow()" will not try calling "__rpow__()" (the - coercion rules would become too complicated). - - Note: - - If the right operand’s type is a subclass of the left operand’s - type and that subclass provides a different implementation of the - reflected method for the operation, this method will be called - before the left operand’s non-reflected method. This behavior - allows subclasses to override their ancestors’ operations. - -object.__iadd__(self, other) -object.__isub__(self, other) -object.__imul__(self, other) -object.__imatmul__(self, other) -object.__itruediv__(self, other) -object.__ifloordiv__(self, other) -object.__imod__(self, other) -object.__ipow__(self, other[, modulo]) -object.__ilshift__(self, other) -object.__irshift__(self, other) -object.__iand__(self, other) -object.__ixor__(self, other) -object.__ior__(self, other) - - These methods are called to implement the augmented arithmetic - assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", "**=", - "<<=", ">>=", "&=", "^=", "|="). These methods should attempt to - do the operation in-place (modifying *self*) and return the result - (which could be, but does not have to be, *self*). If a specific - method is not defined, or if that method returns "NotImplemented", - the augmented assignment falls back to the normal methods. For - instance, if *x* is an instance of a class with an "__iadd__()" - method, "x += y" is equivalent to "x = x.__iadd__(y)" . If - "__iadd__()" does not exist, or if "x.__iadd__(y)" returns - "NotImplemented", "x.__add__(y)" and "y.__radd__(x)" are - considered, as with the evaluation of "x + y". In certain - situations, augmented assignment can result in unexpected errors - (see Why does a_tuple[i] += [‘item’] raise an exception when the - addition works?), but this behavior is in fact part of the data - model. - -object.__neg__(self) -object.__pos__(self) -object.__abs__(self) -object.__invert__(self) - - Called to implement the unary arithmetic operations ("-", "+", - "abs()" and "~"). - -object.__complex__(self) -object.__int__(self) -object.__float__(self) - - Called to implement the built-in functions "complex()", "int()" and - "float()". Should return a value of the appropriate type. - -object.__index__(self) - - Called to implement "operator.index()", and whenever Python needs - to losslessly convert the numeric object to an integer object (such - as in slicing, or in the built-in "bin()", "hex()" and "oct()" - functions). Presence of this method indicates that the numeric - object is an integer type. Must return an integer. - - If "__int__()", "__float__()" and "__complex__()" are not defined - then corresponding built-in functions "int()", "float()" and - "complex()" fall back to "__index__()". - -object.__round__(self[, ndigits]) -object.__trunc__(self) -object.__floor__(self) -object.__ceil__(self) - - Called to implement the built-in function "round()" and "math" - functions "trunc()", "floor()" and "ceil()". Unless *ndigits* is - passed to "__round__()" all these methods should return the value - of the object truncated to an "Integral" (typically an "int"). - - The built-in function "int()" falls back to "__trunc__()" if - neither "__int__()" nor "__index__()" is defined. - - Changed in version 3.11: The delegation of "int()" to "__trunc__()" - is deprecated. - - -With Statement Context Managers -=============================== - -A *context manager* is an object that defines the runtime context to -be established when executing a "with" statement. The context manager -handles the entry into, and the exit from, the desired runtime context -for the execution of the block of code. Context managers are normally -invoked using the "with" statement (described in section The with -statement), but can also be used by directly invoking their methods. - -Typical uses of context managers include saving and restoring various -kinds of global state, locking and unlocking resources, closing opened -files, etc. - -For more information on context managers, see Context Manager Types. -The "object" class itself does not provide the context manager -methods. - -object.__enter__(self) - - Enter the runtime context related to this object. The "with" - statement will bind this method’s return value to the target(s) - specified in the "as" clause of the statement, if any. - -object.__exit__(self, exc_type, exc_value, traceback) - - Exit the runtime context related to this object. The parameters - describe the exception that caused the context to be exited. If the - context was exited without an exception, all three arguments will - be "None". - - If an exception is supplied, and the method wishes to suppress the - exception (i.e., prevent it from being propagated), it should - return a true value. Otherwise, the exception will be processed - normally upon exit from this method. - - Note that "__exit__()" methods should not reraise the passed-in - exception; this is the caller’s responsibility. - -See also: - - **PEP 343** - The “with” statement - The specification, background, and examples for the Python "with" - statement. - - -Customizing positional arguments in class pattern matching -========================================================== - -When using a class name in a pattern, positional arguments in the -pattern are not allowed by default, i.e. "case MyClass(x, y)" is -typically invalid without special support in "MyClass". To be able to -use that kind of pattern, the class needs to define a *__match_args__* -attribute. - -object.__match_args__ - - This class variable can be assigned a tuple of strings. When this - class is used in a class pattern with positional arguments, each - positional argument will be converted into a keyword argument, - using the corresponding value in *__match_args__* as the keyword. - The absence of this attribute is equivalent to setting it to "()". - -For example, if "MyClass.__match_args__" is "("left", "center", -"right")" that means that "case MyClass(x, y)" is equivalent to "case -MyClass(left=x, center=y)". Note that the number of arguments in the -pattern must be smaller than or equal to the number of elements in -*__match_args__*; if it is larger, the pattern match attempt will -raise a "TypeError". - -Added in version 3.10. - -See also: - - **PEP 634** - Structural Pattern Matching - The specification for the Python "match" statement. - - -Emulating buffer types -====================== - -The buffer protocol provides a way for Python objects to expose -efficient access to a low-level memory array. This protocol is -implemented by builtin types such as "bytes" and "memoryview", and -third-party libraries may define additional buffer types. - -While buffer types are usually implemented in C, it is also possible -to implement the protocol in Python. - -object.__buffer__(self, flags) - - Called when a buffer is requested from *self* (for example, by the - "memoryview" constructor). The *flags* argument is an integer - representing the kind of buffer requested, affecting for example - whether the returned buffer is read-only or writable. - "inspect.BufferFlags" provides a convenient way to interpret the - flags. The method must return a "memoryview" object. - -object.__release_buffer__(self, buffer) - - Called when a buffer is no longer needed. The *buffer* argument is - a "memoryview" object that was previously returned by - "__buffer__()". The method must release any resources associated - with the buffer. This method should return "None". Buffer objects - that do not need to perform any cleanup are not required to - implement this method. - -Added in version 3.12. - -See also: - - **PEP 688** - Making the buffer protocol accessible in Python - Introduces the Python "__buffer__" and "__release_buffer__" - methods. - - "collections.abc.Buffer" - ABC for buffer types. - - -Special method lookup -===================== - -For custom classes, implicit invocations of special methods are only -guaranteed to work correctly if defined on an object’s type, not in -the object’s instance dictionary. That behaviour is the reason why -the following code raises an exception: - - >>> class C: - ... pass - ... - >>> c = C() - >>> c.__len__ = lambda: 5 - >>> len(c) - Traceback (most recent call last): - File "", line 1, in - TypeError: object of type 'C' has no len() - -The rationale behind this behaviour lies with a number of special -methods such as "__hash__()" and "__repr__()" that are implemented by -all objects, including type objects. If the implicit lookup of these -methods used the conventional lookup process, they would fail when -invoked on the type object itself: - - >>> 1 .__hash__() == hash(1) - True - >>> int.__hash__() == hash(int) - Traceback (most recent call last): - File "", line 1, in - TypeError: descriptor '__hash__' of 'int' object needs an argument - -Incorrectly attempting to invoke an unbound method of a class in this -way is sometimes referred to as ‘metaclass confusion’, and is avoided -by bypassing the instance when looking up special methods: - - >>> type(1).__hash__(1) == hash(1) - True - >>> type(int).__hash__(int) == hash(int) - True - -In addition to bypassing any instance attributes in the interest of -correctness, implicit special method lookup generally also bypasses -the "__getattribute__()" method even of the object’s metaclass: - - >>> class Meta(type): - ... def __getattribute__(*args): - ... print("Metaclass getattribute invoked") - ... return type.__getattribute__(*args) - ... - >>> class C(object, metaclass=Meta): - ... def __len__(self): - ... return 10 - ... def __getattribute__(*args): - ... print("Class getattribute invoked") - ... return object.__getattribute__(*args) - ... - >>> c = C() - >>> c.__len__() # Explicit lookup via instance - Class getattribute invoked - 10 - >>> type(c).__len__(c) # Explicit lookup via type - Metaclass getattribute invoked - 10 - >>> len(c) # Implicit lookup - 10 - -Bypassing the "__getattribute__()" machinery in this fashion provides -significant scope for speed optimisations within the interpreter, at -the cost of some flexibility in the handling of special methods (the -special method *must* be set on the class object itself in order to be -consistently invoked by the interpreter). -''', - 'string-methods': r'''String Methods -************** - -Strings implement all of the common sequence operations, along with -the additional methods described below. - -Strings also support two styles of string formatting, one providing a -large degree of flexibility and customization (see "str.format()", -Format string syntax and Custom string formatting) and the other based -on C "printf" style formatting that handles a narrower range of types -and is slightly harder to use correctly, but is often faster for the -cases it can handle (printf-style String Formatting). - -The Text Processing Services section of the standard library covers a -number of other modules that provide various text related utilities -(including regular expression support in the "re" module). - -str.capitalize() - - Return a copy of the string with its first character capitalized - and the rest lowercased. - - Changed in version 3.8: The first character is now put into - titlecase rather than uppercase. This means that characters like - digraphs will only have their first letter capitalized, instead of - the full character. - -str.casefold() - - Return a casefolded copy of the string. Casefolded strings may be - used for caseless matching. - - Casefolding is similar to lowercasing but more aggressive because - it is intended to remove all case distinctions in a string. For - example, the German lowercase letter "'ß'" is equivalent to ""ss"". - Since it is already lowercase, "lower()" would do nothing to "'ß'"; - "casefold()" converts it to ""ss"". For example: - - >>> 'straße'.lower() - 'straße' - >>> 'straße'.casefold() - 'strasse' - - The casefolding algorithm is described in section 3.13 ‘Default - Case Folding’ of the Unicode Standard. - - Added in version 3.3. - -str.center(width, fillchar=' ', /) - - Return centered in a string of length *width*. Padding is done - using the specified *fillchar* (default is an ASCII space). The - original string is returned if *width* is less than or equal to - "len(s)". For example: - - >>> 'Python'.center(10) - ' Python ' - >>> 'Python'.center(10, '-') - '--Python--' - >>> 'Python'.center(4) - 'Python' - -str.count(sub[, start[, end]]) - - Return the number of non-overlapping occurrences of substring *sub* - in the range [*start*, *end*]. Optional arguments *start* and - *end* are interpreted as in slice notation. - - If *sub* is empty, returns the number of empty strings between - characters which is the length of the string plus one. For example: - - >>> 'spam, spam, spam'.count('spam') - 3 - >>> 'spam, spam, spam'.count('spam', 5) - 2 - >>> 'spam, spam, spam'.count('spam', 5, 10) - 1 - >>> 'spam, spam, spam'.count('eggs') - 0 - >>> 'spam, spam, spam'.count('') - 17 - -str.encode(encoding='utf-8', errors='strict') - - Return the string encoded to "bytes". - - *encoding* defaults to "'utf-8'"; see Standard Encodings for - possible values. - - *errors* controls how encoding errors are handled. If "'strict'" - (the default), a "UnicodeError" exception is raised. Other possible - values are "'ignore'", "'replace'", "'xmlcharrefreplace'", - "'backslashreplace'" and any other name registered via - "codecs.register_error()". See Error Handlers for details. - - For performance reasons, the value of *errors* is not checked for - validity unless an encoding error actually occurs, Python - Development Mode is enabled or a debug build is used. For example: - - >>> encoded_str_to_bytes = 'Python'.encode() - >>> type(encoded_str_to_bytes) - - >>> encoded_str_to_bytes - b'Python' - - Changed in version 3.1: Added support for keyword arguments. - - Changed in version 3.9: The value of the *errors* argument is now - checked in Python Development Mode and in debug mode. - -str.endswith(suffix[, start[, end]]) - - Return "True" if the string ends with the specified *suffix*, - otherwise return "False". *suffix* can also be a tuple of suffixes - to look for. With optional *start*, test beginning at that - position. With optional *end*, stop comparing at that position. - Using *start* and *end* is equivalent to - "str[start:end].endswith(suffix)". For example: - - >>> 'Python'.endswith('on') - True - >>> 'a tuple of suffixes'.endswith(('at', 'in')) - False - >>> 'a tuple of suffixes'.endswith(('at', 'es')) - True - >>> 'Python is amazing'.endswith('is', 0, 9) - True - - See also "startswith()" and "removesuffix()". - -str.expandtabs(tabsize=8) - - Return a copy of the string where all tab characters are replaced - by one or more spaces, depending on the current column and the - given tab size. Tab positions occur every *tabsize* characters - (default is 8, giving tab positions at columns 0, 8, 16 and so on). - To expand the string, the current column is set to zero and the - string is examined character by character. If the character is a - tab ("\t"), one or more space characters are inserted in the result - until the current column is equal to the next tab position. (The - tab character itself is not copied.) If the character is a newline - ("\n") or return ("\r"), it is copied and the current column is - reset to zero. Any other character is copied unchanged and the - current column is incremented by one regardless of how the - character is represented when printed. For example: - - >>> '01\t012\t0123\t01234'.expandtabs() - '01 012 0123 01234' - >>> '01\t012\t0123\t01234'.expandtabs(4) - '01 012 0123 01234' - >>> print('01\t012\n0123\t01234'.expandtabs(4)) - 01 012 - 0123 01234 - -str.find(sub[, start[, end]]) - - Return the lowest index in the string where substring *sub* is - found within the slice "s[start:end]". Optional arguments *start* - and *end* are interpreted as in slice notation. Return "-1" if - *sub* is not found. For example: - - >>> 'spam, spam, spam'.find('sp') - 0 - >>> 'spam, spam, spam'.find('sp', 5) - 6 - - See also "rfind()" and "index()". - - Note: - - The "find()" method should be used only if you need to know the - position of *sub*. To check if *sub* is a substring or not, use - the "in" operator: - - >>> 'Py' in 'Python' - True - -str.format(*args, **kwargs) - - Perform a string formatting operation. The string on which this - method is called can contain literal text or replacement fields - delimited by braces "{}". Each replacement field contains either - the numeric index of a positional argument, or the name of a - keyword argument. Returns a copy of the string where each - replacement field is replaced with the string value of the - corresponding argument. For example: - - >>> "The sum of 1 + 2 is {0}".format(1+2) - 'The sum of 1 + 2 is 3' - >>> "The sum of {a} + {b} is {answer}".format(answer=1+2, a=1, b=2) - 'The sum of 1 + 2 is 3' - >>> "{1} expects the {0} Inquisition!".format("Spanish", "Nobody") - 'Nobody expects the Spanish Inquisition!' - - See Format string syntax for a description of the various - formatting options that can be specified in format strings. - - Note: - - When formatting a number ("int", "float", "complex", - "decimal.Decimal" and subclasses) with the "n" type (ex: - "'{:n}'.format(1234)"), the function temporarily sets the - "LC_CTYPE" locale to the "LC_NUMERIC" locale to decode - "decimal_point" and "thousands_sep" fields of "localeconv()" if - they are non-ASCII or longer than 1 byte, and the "LC_NUMERIC" - locale is different than the "LC_CTYPE" locale. This temporary - change affects other threads. - - Changed in version 3.7: When formatting a number with the "n" type, - the function sets temporarily the "LC_CTYPE" locale to the - "LC_NUMERIC" locale in some cases. - -str.format_map(mapping, /) - - Similar to "str.format(**mapping)", except that "mapping" is used - directly and not copied to a "dict". This is useful if for example - "mapping" is a dict subclass: - - >>> class Default(dict): - ... def __missing__(self, key): - ... return key - ... - >>> '{name} was born in {country}'.format_map(Default(name='Guido')) - 'Guido was born in country' - - Added in version 3.2. - -str.index(sub[, start[, end]]) - - Like "find()", but raise "ValueError" when the substring is not - found. For example: - - >>> 'spam, spam, spam'.index('spam') - 0 - >>> 'spam, spam, spam'.index('eggs') - Traceback (most recent call last): - File "", line 1, in - 'spam, spam, spam'.index('eggs') - ~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^ - ValueError: substring not found - - See also "rindex()". - -str.isalnum() - - Return "True" if all characters in the string are alphanumeric and - there is at least one character, "False" otherwise. A character - "c" is alphanumeric if one of the following returns "True": - "c.isalpha()", "c.isdecimal()", "c.isdigit()", or "c.isnumeric()". - For example: - - >>> 'abc123'.isalnum() - True - >>> 'abc123!@#'.isalnum() - False - >>> ''.isalnum() - False - >>> ' '.isalnum() - False - -str.isalpha() - - Return "True" if all characters in the string are alphabetic and - there is at least one character, "False" otherwise. Alphabetic - characters are those characters defined in the Unicode character - database as “Letter”, i.e., those with general category property - being one of “Lm”, “Lt”, “Lu”, “Ll”, or “Lo”. Note that this is - different from the Alphabetic property defined in the section 4.10 - ‘Letters, Alphabetic, and Ideographic’ of the Unicode Standard. For - example: - - >>> 'Letters and spaces'.isalpha() - False - >>> 'LettersOnly'.isalpha() - True - >>> 'µ'.isalpha() # non-ASCII characters can be considered alphabetical too - True - - See Unicode Properties. - -str.isascii() - - Return "True" if the string is empty or all characters in the - string are ASCII, "False" otherwise. ASCII characters have code - points in the range U+0000-U+007F. For example: - - >>> 'ASCII characters'.isascii() - True - >>> 'µ'.isascii() - False - - Added in version 3.7. - -str.isdecimal() - - Return "True" if all characters in the string are decimal - characters and there is at least one character, "False" otherwise. - Decimal characters are those that can be used to form numbers in - base 10, such as U+0660, ARABIC-INDIC DIGIT ZERO. Formally a - decimal character is a character in the Unicode General Category - “Nd”. For example: - - >>> '0123456789'.isdecimal() - True - >>> '٠١٢٣٤٥٦٧٨٩'.isdecimal() # Arabic-Indic digits zero to nine - True - >>> 'alphabetic'.isdecimal() - False - -str.isdigit() - - Return "True" if all characters in the string are digits and there - is at least one character, "False" otherwise. Digits include - decimal characters and digits that need special handling, such as - the compatibility superscript digits. This covers digits which - cannot be used to form numbers in base 10, like the Kharosthi - numbers. Formally, a digit is a character that has the property - value Numeric_Type=Digit or Numeric_Type=Decimal. - -str.isidentifier() - - Return "True" if the string is a valid identifier according to the - language definition, section Identifiers and keywords. - - "keyword.iskeyword()" can be used to test whether string "s" is a - reserved identifier, such as "def" and "class". - - Example: - - >>> from keyword import iskeyword - - >>> 'hello'.isidentifier(), iskeyword('hello') - (True, False) - >>> 'def'.isidentifier(), iskeyword('def') - (True, True) - -str.islower() - - Return "True" if all cased characters [4] in the string are - lowercase and there is at least one cased character, "False" - otherwise. - -str.isnumeric() - - Return "True" if all characters in the string are numeric - characters, and there is at least one character, "False" otherwise. - Numeric characters include digit characters, and all characters - that have the Unicode numeric value property, e.g. U+2155, VULGAR - FRACTION ONE FIFTH. Formally, numeric characters are those with - the property value Numeric_Type=Digit, Numeric_Type=Decimal or - Numeric_Type=Numeric. For example: - - >>> '0123456789'.isnumeric() - True - >>> '٠١٢٣٤٥٦٧٨٩'.isnumeric() # Arabic-indic digit zero to nine - True - >>> '⅕'.isnumeric() # Vulgar fraction one fifth - True - >>> '²'.isdecimal(), '²'.isdigit(), '²'.isnumeric() - (False, True, True) - - See also "isdecimal()" and "isdigit()". Numeric characters are a - superset of decimal numbers. - -str.isprintable() - - Return "True" if all characters in the string are printable, - "False" if it contains at least one non-printable character. - - Here “printable” means the character is suitable for "repr()" to - use in its output; “non-printable” means that "repr()" on built-in - types will hex-escape the character. It has no bearing on the - handling of strings written to "sys.stdout" or "sys.stderr". - - The printable characters are those which in the Unicode character - database (see "unicodedata") have a general category in group - Letter, Mark, Number, Punctuation, or Symbol (L, M, N, P, or S); - plus the ASCII space 0x20. Nonprintable characters are those in - group Separator or Other (Z or C), except the ASCII space. - - For example: - - >>> ''.isprintable(), ' '.isprintable() - (True, True) - >>> '\t'.isprintable(), '\n'.isprintable() - (False, False) - - See also "isspace()". - -str.isspace() - - Return "True" if there are only whitespace characters in the string - and there is at least one character, "False" otherwise. - - For example: - - >>> ''.isspace() - False - >>> ' '.isspace() - True - >>> '\t\n'.isspace() # TAB and BREAK LINE - True - >>> '\u3000'.isspace() # IDEOGRAPHIC SPACE - True - - A character is *whitespace* if in the Unicode character database - (see "unicodedata"), either its general category is "Zs" - (“Separator, space”), or its bidirectional class is one of "WS", - "B", or "S". - - See also "isprintable()". - -str.istitle() - - Return "True" if the string is a titlecased string and there is at - least one character, for example uppercase characters may only - follow uncased characters and lowercase characters only cased ones. - Return "False" otherwise. - - For example: - - >>> 'Spam, Spam, Spam'.istitle() - True - >>> 'spam, spam, spam'.istitle() - False - >>> 'SPAM, SPAM, SPAM'.istitle() - False - - See also "title()". - -str.isupper() - - Return "True" if all cased characters [4] in the string are - uppercase and there is at least one cased character, "False" - otherwise. - - >>> 'BANANA'.isupper() - True - >>> 'banana'.isupper() - False - >>> 'baNana'.isupper() - False - >>> ' '.isupper() - False - -str.join(iterable, /) - - Return a string which is the concatenation of the strings in - *iterable*. A "TypeError" will be raised if there are any non- - string values in *iterable*, including "bytes" objects. The - separator between elements is the string providing this method. For - example: - - >>> ', '.join(['spam', 'spam', 'spam']) - 'spam, spam, spam' - >>> '-'.join('Python') - 'P-y-t-h-o-n' - - See also "split()". - -str.ljust(width, fillchar=' ', /) - - Return the string left justified in a string of length *width*. - Padding is done using the specified *fillchar* (default is an ASCII - space). The original string is returned if *width* is less than or - equal to "len(s)". - - For example: - - >>> 'Python'.ljust(10) - 'Python ' - >>> 'Python'.ljust(10, '.') - 'Python....' - >>> 'Monty Python'.ljust(10, '.') - 'Monty Python' - - See also "rjust()". - -str.lower() - - Return a copy of the string with all the cased characters [4] - converted to lowercase. For example: - - >>> 'Lower Method Example'.lower() - 'lower method example' - - The lowercasing algorithm used is described in section 3.13 - ‘Default Case Folding’ of the Unicode Standard. - -str.lstrip(chars=None, /) - - Return a copy of the string with leading characters removed. The - *chars* argument is a string specifying the set of characters to be - removed. If omitted or "None", the *chars* argument defaults to - removing whitespace. The *chars* argument is not a prefix; rather, - all combinations of its values are stripped: - - >>> ' spacious '.lstrip() - 'spacious ' - >>> 'www.example.com'.lstrip('cmowz.') - 'example.com' - - See "str.removeprefix()" for a method that will remove a single - prefix string rather than all of a set of characters. For example: - - >>> 'Arthur: three!'.lstrip('Arthur: ') - 'ee!' - >>> 'Arthur: three!'.removeprefix('Arthur: ') - 'three!' - -static str.maketrans(dict, /) -static str.maketrans(from, to, remove='', /) - - This static method returns a translation table usable for - "str.translate()". - - If there is only one argument, it must be a dictionary mapping - Unicode ordinals (integers) or characters (strings of length 1) to - Unicode ordinals, strings (of arbitrary lengths) or "None". - Character keys will then be converted to ordinals. - - If there are two arguments, they must be strings of equal length, - and in the resulting dictionary, each character in *from* will be - mapped to the character at the same position in *to*. If there is - a third argument, it must be a string, whose characters will be - mapped to "None" in the result. - -str.partition(sep, /) - - Split the string at the first occurrence of *sep*, and return a - 3-tuple containing the part before the separator, the separator - itself, and the part after the separator. If the separator is not - found, return a 3-tuple containing the string itself, followed by - two empty strings. - - For example: - - >>> 'Monty Python'.partition(' ') - ('Monty', ' ', 'Python') - >>> "Monty Python's Flying Circus".partition(' ') - ('Monty', ' ', "Python's Flying Circus") - >>> 'Monty Python'.partition('-') - ('Monty Python', '', '') - - See also "rpartition()". - -str.removeprefix(prefix, /) - - If the string starts with the *prefix* string, return - "string[len(prefix):]". Otherwise, return a copy of the original - string: - - >>> 'TestHook'.removeprefix('Test') - 'Hook' - >>> 'BaseTestCase'.removeprefix('Test') - 'BaseTestCase' - - Added in version 3.9. - - See also "removesuffix()" and "startswith()". - -str.removesuffix(suffix, /) - - If the string ends with the *suffix* string and that *suffix* is - not empty, return "string[:-len(suffix)]". Otherwise, return a copy - of the original string: - - >>> 'MiscTests'.removesuffix('Tests') - 'Misc' - >>> 'TmpDirMixin'.removesuffix('Tests') - 'TmpDirMixin' - - Added in version 3.9. - - See also "removeprefix()" and "endswith()". - -str.replace(old, new, /, count=-1) - - Return a copy of the string with all occurrences of substring *old* - replaced by *new*. If *count* is given, only the first *count* - occurrences are replaced. If *count* is not specified or "-1", then - all occurrences are replaced. For example: - - >>> 'spam, spam, spam'.replace('spam', 'eggs') - 'eggs, eggs, eggs' - >>> 'spam, spam, spam'.replace('spam', 'eggs', 1) - 'eggs, spam, spam' - - Changed in version 3.13: *count* is now supported as a keyword - argument. - -str.rfind(sub[, start[, end]]) - - Return the highest index in the string where substring *sub* is - found, such that *sub* is contained within "s[start:end]". - Optional arguments *start* and *end* are interpreted as in slice - notation. Return "-1" on failure. For example: - - >>> 'spam, spam, spam'.rfind('sp') - 12 - >>> 'spam, spam, spam'.rfind('sp', 0, 10) - 6 - - See also "find()" and "rindex()". - -str.rindex(sub[, start[, end]]) - - Like "rfind()" but raises "ValueError" when the substring *sub* is - not found. For example: - - >>> 'spam, spam, spam'.rindex('spam') - 12 - >>> 'spam, spam, spam'.rindex('eggs') - Traceback (most recent call last): - File "", line 1, in - 'spam, spam, spam'.rindex('eggs') - ~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^ - ValueError: substring not found - - See also "index()" and "find()". - -str.rjust(width, fillchar=' ', /) - - Return the string right justified in a string of length *width*. - Padding is done using the specified *fillchar* (default is an ASCII - space). The original string is returned if *width* is less than or - equal to "len(s)". - - For example: - - >>> 'Python'.rjust(10) - ' Python' - >>> 'Python'.rjust(10, '.') - '....Python' - >>> 'Monty Python'.rjust(10, '.') - 'Monty Python' - - See also "ljust()" and "zfill()". - -str.rpartition(sep, /) - - Split the string at the last occurrence of *sep*, and return a - 3-tuple containing the part before the separator, the separator - itself, and the part after the separator. If the separator is not - found, return a 3-tuple containing two empty strings, followed by - the string itself. - - For example: - - >>> 'Monty Python'.rpartition(' ') - ('Monty', ' ', 'Python') - >>> "Monty Python's Flying Circus".rpartition(' ') - ("Monty Python's Flying", ' ', 'Circus') - >>> 'Monty Python'.rpartition('-') - ('', '', 'Monty Python') - - See also "partition()". - -str.rsplit(sep=None, maxsplit=-1) - - Return a list of the words in the string, using *sep* as the - delimiter string. If *maxsplit* is given, at most *maxsplit* splits - are done, the *rightmost* ones. If *sep* is not specified or - "None", any whitespace string is a separator. Except for splitting - from the right, "rsplit()" behaves like "split()" which is - described in detail below. - -str.rstrip(chars=None, /) - - Return a copy of the string with trailing characters removed. The - *chars* argument is a string specifying the set of characters to be - removed. If omitted or "None", the *chars* argument defaults to - removing whitespace. The *chars* argument is not a suffix; rather, - all combinations of its values are stripped. For example: - - >>> ' spacious '.rstrip() - ' spacious' - >>> 'mississippi'.rstrip('ipz') - 'mississ' - - See "removesuffix()" for a method that will remove a single suffix - string rather than all of a set of characters. For example: - - >>> 'Monty Python'.rstrip(' Python') - 'M' - >>> 'Monty Python'.removesuffix(' Python') - 'Monty' - - See also "strip()". - -str.split(sep=None, maxsplit=-1) - - Return a list of the words in the string, using *sep* as the - delimiter string. If *maxsplit* is given, at most *maxsplit* - splits are done (thus, the list will have at most "maxsplit+1" - elements). If *maxsplit* is not specified or "-1", then there is - no limit on the number of splits (all possible splits are made). - - If *sep* is given, consecutive delimiters are not grouped together - and are deemed to delimit empty strings (for example, - "'1,,2'.split(',')" returns "['1', '', '2']"). The *sep* argument - may consist of multiple characters as a single delimiter (to split - with multiple delimiters, use "re.split()"). Splitting an empty - string with a specified separator returns "['']". - - For example: - - >>> '1,2,3'.split(',') - ['1', '2', '3'] - >>> '1,2,3'.split(',', maxsplit=1) - ['1', '2,3'] - >>> '1,2,,3,'.split(',') - ['1', '2', '', '3', ''] - >>> '1<>2<>3<4'.split('<>') - ['1', '2', '3<4'] - - If *sep* is not specified or is "None", a different splitting - algorithm is applied: runs of consecutive whitespace are regarded - as a single separator, and the result will contain no empty strings - at the start or end if the string has leading or trailing - whitespace. Consequently, splitting an empty string or a string - consisting of just whitespace with a "None" separator returns "[]". - - For example: - - >>> '1 2 3'.split() - ['1', '2', '3'] - >>> '1 2 3'.split(maxsplit=1) - ['1', '2 3'] - >>> ' 1 2 3 '.split() - ['1', '2', '3'] - - If *sep* is not specified or is "None" and *maxsplit* is "0", only - leading runs of consecutive whitespace are considered. - - For example: - - >>> "".split(None, 0) - [] - >>> " ".split(None, 0) - [] - >>> " foo ".split(maxsplit=0) - ['foo '] - - See also "join()". - -str.splitlines(keepends=False) - - Return a list of the lines in the string, breaking at line - boundaries. Line breaks are not included in the resulting list - unless *keepends* is given and true. - - This method splits on the following line boundaries. In - particular, the boundaries are a superset of *universal newlines*. - - +-------------------------+-------------------------------+ - | Representation | Description | - |=========================|===============================| - | "\n" | Line Feed | - +-------------------------+-------------------------------+ - | "\r" | Carriage Return | - +-------------------------+-------------------------------+ - | "\r\n" | Carriage Return + Line Feed | - +-------------------------+-------------------------------+ - | "\v" or "\x0b" | Line Tabulation | - +-------------------------+-------------------------------+ - | "\f" or "\x0c" | Form Feed | - +-------------------------+-------------------------------+ - | "\x1c" | File Separator | - +-------------------------+-------------------------------+ - | "\x1d" | Group Separator | - +-------------------------+-------------------------------+ - | "\x1e" | Record Separator | - +-------------------------+-------------------------------+ - | "\x85" | Next Line (C1 Control Code) | - +-------------------------+-------------------------------+ - | "\u2028" | Line Separator | - +-------------------------+-------------------------------+ - | "\u2029" | Paragraph Separator | - +-------------------------+-------------------------------+ - - Changed in version 3.2: "\v" and "\f" added to list of line - boundaries. - - For example: - - >>> 'ab c\n\nde fg\rkl\r\n'.splitlines() - ['ab c', '', 'de fg', 'kl'] - >>> 'ab c\n\nde fg\rkl\r\n'.splitlines(keepends=True) - ['ab c\n', '\n', 'de fg\r', 'kl\r\n'] - - Unlike "split()" when a delimiter string *sep* is given, this - method returns an empty list for the empty string, and a terminal - line break does not result in an extra line: - - >>> "".splitlines() - [] - >>> "One line\n".splitlines() - ['One line'] - - For comparison, "split('\n')" gives: - - >>> ''.split('\n') - [''] - >>> 'Two lines\n'.split('\n') - ['Two lines', ''] - -str.startswith(prefix[, start[, end]]) - - Return "True" if string starts with the *prefix*, otherwise return - "False". *prefix* can also be a tuple of prefixes to look for. - With optional *start*, test string beginning at that position. - With optional *end*, stop comparing string at that position. - - For example: - - >>> 'Python'.startswith('Py') - True - >>> 'a tuple of prefixes'.startswith(('at', 'a')) - True - >>> 'Python is amazing'.startswith('is', 7) - True - - See also "endswith()" and "removeprefix()". - -str.strip(chars=None, /) - - Return a copy of the string with the leading and trailing - characters removed. The *chars* argument is a string specifying the - set of characters to be removed. If omitted or "None", the *chars* - argument defaults to removing whitespace. The *chars* argument is - not a prefix or suffix; rather, all combinations of its values are - stripped. - - For example: - - >>> ' spacious '.strip() - 'spacious' - >>> 'www.example.com'.strip('cmowz.') - 'example' - - The outermost leading and trailing *chars* argument values are - stripped from the string. Characters are removed from the leading - end until reaching a string character that is not contained in the - set of characters in *chars*. A similar action takes place on the - trailing end. - - For example: - - >>> comment_string = '#....... Section 3.2.1 Issue #32 .......' - >>> comment_string.strip('.#! ') - 'Section 3.2.1 Issue #32' - - See also "rstrip()". - -str.swapcase() - - Return a copy of the string with uppercase characters converted to - lowercase and vice versa. Note that it is not necessarily true that - "s.swapcase().swapcase() == s". - -str.title() - - Return a titlecased version of the string where words start with an - uppercase character and the remaining characters are lowercase. - - For example: - - >>> 'Hello world'.title() - 'Hello World' - - The algorithm uses a simple language-independent definition of a - word as groups of consecutive letters. The definition works in - many contexts but it means that apostrophes in contractions and - possessives form word boundaries, which may not be the desired - result: - - >>> "they're bill's friends from the UK".title() - "They'Re Bill'S Friends From The Uk" - - The "string.capwords()" function does not have this problem, as it - splits words on spaces only. - - Alternatively, a workaround for apostrophes can be constructed - using regular expressions: - - >>> import re - >>> def titlecase(s): - ... return re.sub(r"[A-Za-z]+('[A-Za-z]+)?", - ... lambda mo: mo.group(0).capitalize(), - ... s) - ... - >>> titlecase("they're bill's friends.") - "They're Bill's Friends." - - See also "istitle()". - -str.translate(table, /) - - Return a copy of the string in which each character has been mapped - through the given translation table. The table must be an object - that implements indexing via "__getitem__()", typically a *mapping* - or *sequence*. When indexed by a Unicode ordinal (an integer), the - table object can do any of the following: return a Unicode ordinal - or a string, to map the character to one or more other characters; - return "None", to delete the character from the return string; or - raise a "LookupError" exception, to map the character to itself. - - You can use "str.maketrans()" to create a translation map from - character-to-character mappings in different formats. - - See also the "codecs" module for a more flexible approach to custom - character mappings. - -str.upper() - - Return a copy of the string with all the cased characters [4] - converted to uppercase. Note that "s.upper().isupper()" might be - "False" if "s" contains uncased characters or if the Unicode - category of the resulting character(s) is not “Lu” (Letter, - uppercase), but e.g. “Lt” (Letter, titlecase). - - The uppercasing algorithm used is described in section 3.13 - ‘Default Case Folding’ of the Unicode Standard. - -str.zfill(width, /) - - Return a copy of the string left filled with ASCII "'0'" digits to - make a string of length *width*. A leading sign prefix - ("'+'"/"'-'") is handled by inserting the padding *after* the sign - character rather than before. The original string is returned if - *width* is less than or equal to "len(s)". - - For example: - - >>> "42".zfill(5) - '00042' - >>> "-42".zfill(5) - '-0042' - - See also "rjust()". -''', - 'strings': '''String and Bytes literals -************************* - -String literals are described by the following lexical definitions: - - stringliteral ::= [stringprefix](shortstring | longstring) - stringprefix ::= "r" | "u" | "R" | "U" | "f" | "F" - | "fr" | "Fr" | "fR" | "FR" | "rf" | "rF" | "Rf" | "RF" - shortstring ::= "'" shortstringitem* "'" | '"' shortstringitem* '"' - longstring ::= "\'\'\'" longstringitem* "\'\'\'" | '"""' longstringitem* '"""' - shortstringitem ::= shortstringchar | stringescapeseq - longstringitem ::= longstringchar | stringescapeseq - shortstringchar ::= - longstringchar ::= - stringescapeseq ::= "\\" - - bytesliteral ::= bytesprefix(shortbytes | longbytes) - bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | "rb" | "rB" | "Rb" | "RB" - shortbytes ::= "'" shortbytesitem* "'" | '"' shortbytesitem* '"' - longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | '"""' longbytesitem* '"""' - shortbytesitem ::= shortbyteschar | bytesescapeseq - longbytesitem ::= longbyteschar | bytesescapeseq - shortbyteschar ::= - longbyteschar ::= - bytesescapeseq ::= "\\" - -One syntactic restriction not indicated by these productions is that -whitespace is not allowed between the "stringprefix" or "bytesprefix" -and the rest of the literal. The source character set is defined by -the encoding declaration; it is UTF-8 if no encoding declaration is -given in the source file; see section Encoding declarations. - -In plain English: Both types of literals can be enclosed in matching -single quotes ("'") or double quotes ("""). They can also be enclosed -in matching groups of three single or double quotes (these are -generally referred to as *triple-quoted strings*). The backslash ("\\") -character is used to give special meaning to otherwise ordinary -characters like "n", which means ‘newline’ when escaped ("\\n"). It can -also be used to escape characters that otherwise have a special -meaning, such as newline, backslash itself, or the quote character. -See escape sequences below for examples. - -Bytes literals are always prefixed with "'b'" or "'B'"; they produce -an instance of the "bytes" type instead of the "str" type. They may -only contain ASCII characters; bytes with a numeric value of 128 or -greater must be expressed with escapes. - -Both string and bytes literals may optionally be prefixed with a -letter "'r'" or "'R'"; such constructs are called *raw string -literals* and *raw bytes literals* respectively and treat backslashes -as literal characters. As a result, in raw string literals, "'\\U'" -and "'\\u'" escapes are not treated specially. - -Added in version 3.3: The "'rb'" prefix of raw bytes literals has been -added as a synonym of "'br'".Support for the unicode legacy literal -("u'value'") was reintroduced to simplify the maintenance of dual -Python 2.x and 3.x codebases. See **PEP 414** for more information. - -A string literal with "'f'" or "'F'" in its prefix is a *formatted -string literal*; see f-strings. The "'f'" may be combined with "'r'", -but not with "'b'" or "'u'", therefore raw formatted strings are -possible, but formatted bytes literals are not. - -In triple-quoted literals, unescaped newlines and quotes are allowed -(and are retained), except that three unescaped quotes in a row -terminate the literal. (A “quote” is the character used to open the -literal, i.e. either "'" or """.) - - -Escape sequences -================ - -Unless an "'r'" or "'R'" prefix is present, escape sequences in string -and bytes literals are interpreted according to rules similar to those -used by Standard C. The recognized escape sequences are: - -+---------------------------+-----------------------------------+---------+ -| Escape Sequence | Meaning | Notes | -|===========================|===================================|=========| -| "\\" | Backslash and newline ignored | (1) | -+---------------------------+-----------------------------------+---------+ -| "\\\\" | Backslash ("\\") | | -+---------------------------+-----------------------------------+---------+ -| "\\'" | Single quote ("'") | | -+---------------------------+-----------------------------------+---------+ -| "\\"" | Double quote (""") | | -+---------------------------+-----------------------------------+---------+ -| "\\a" | ASCII Bell (BEL) | | -+---------------------------+-----------------------------------+---------+ -| "\\b" | ASCII Backspace (BS) | | -+---------------------------+-----------------------------------+---------+ -| "\\f" | ASCII Formfeed (FF) | | -+---------------------------+-----------------------------------+---------+ -| "\\n" | ASCII Linefeed (LF) | | -+---------------------------+-----------------------------------+---------+ -| "\\r" | ASCII Carriage Return (CR) | | -+---------------------------+-----------------------------------+---------+ -| "\\t" | ASCII Horizontal Tab (TAB) | | -+---------------------------+-----------------------------------+---------+ -| "\\v" | ASCII Vertical Tab (VT) | | -+---------------------------+-----------------------------------+---------+ -| "\\*ooo*" | Character with octal value *ooo* | (2,4) | -+---------------------------+-----------------------------------+---------+ -| "\\x*hh*" | Character with hex value *hh* | (3,4) | -+---------------------------+-----------------------------------+---------+ - -Escape sequences only recognized in string literals are: - -+---------------------------+-----------------------------------+---------+ -| Escape Sequence | Meaning | Notes | -|===========================|===================================|=========| -| "\\N{*name*}" | Character named *name* in the | (5) | -| | Unicode database | | -+---------------------------+-----------------------------------+---------+ -| "\\u*xxxx*" | Character with 16-bit hex value | (6) | -| | *xxxx* | | -+---------------------------+-----------------------------------+---------+ -| "\\U*xxxxxxxx*" | Character with 32-bit hex value | (7) | -| | *xxxxxxxx* | | -+---------------------------+-----------------------------------+---------+ - -Notes: - -1. A backslash can be added at the end of a line to ignore the - newline: - - >>> 'This string will not include \\ - ... backslashes or newline characters.' - 'This string will not include backslashes or newline characters.' - - The same result can be achieved using triple-quoted strings, or - parentheses and string literal concatenation. - -2. As in Standard C, up to three octal digits are accepted. - - Changed in version 3.11: Octal escapes with value larger than - "0o377" produce a "DeprecationWarning". - - Changed in version 3.12: Octal escapes with value larger than - "0o377" produce a "SyntaxWarning". In a future Python version they - will be eventually a "SyntaxError". - -3. Unlike in Standard C, exactly two hex digits are required. - -4. In a bytes literal, hexadecimal and octal escapes denote the byte - with the given value. In a string literal, these escapes denote a - Unicode character with the given value. - -5. Changed in version 3.3: Support for name aliases [1] has been - added. - -6. Exactly four hex digits are required. - -7. Any Unicode character can be encoded this way. Exactly eight hex - digits are required. - -Unlike Standard C, all unrecognized escape sequences are left in the -string unchanged, i.e., *the backslash is left in the result*. (This -behavior is useful when debugging: if an escape sequence is mistyped, -the resulting output is more easily recognized as broken.) It is also -important to note that the escape sequences only recognized in string -literals fall into the category of unrecognized escapes for bytes -literals. - -Changed in version 3.6: Unrecognized escape sequences produce a -"DeprecationWarning". - -Changed in version 3.12: Unrecognized escape sequences produce a -"SyntaxWarning". In a future Python version they will be eventually a -"SyntaxError". - -Even in a raw literal, quotes can be escaped with a backslash, but the -backslash remains in the result; for example, "r"\\""" is a valid -string literal consisting of two characters: a backslash and a double -quote; "r"\\"" is not a valid string literal (even a raw string cannot -end in an odd number of backslashes). Specifically, *a raw literal -cannot end in a single backslash* (since the backslash would escape -the following quote character). Note also that a single backslash -followed by a newline is interpreted as those two characters as part -of the literal, *not* as a line continuation. -''', - 'subscriptions': r'''Subscriptions -************* - -The subscription of an instance of a container class will generally -select an element from the container. The subscription of a *generic -class* will generally return a GenericAlias object. - - subscription ::= primary "[" flexible_expression_list "]" - -When an object is subscripted, the interpreter will evaluate the -primary and the expression list. - -The primary must evaluate to an object that supports subscription. An -object may support subscription through defining one or both of -"__getitem__()" and "__class_getitem__()". When the primary is -subscripted, the evaluated result of the expression list will be -passed to one of these methods. For more details on when -"__class_getitem__" is called instead of "__getitem__", see -__class_getitem__ versus __getitem__. - -If the expression list contains at least one comma, or if any of the -expressions are starred, the expression list will evaluate to a -"tuple" containing the items of the expression list. Otherwise, the -expression list will evaluate to the value of the list’s sole member. - -Changed in version 3.11: Expressions in an expression list may be -starred. See **PEP 646**. - -For built-in objects, there are two types of objects that support -subscription via "__getitem__()": - -1. Mappings. If the primary is a *mapping*, the expression list must - evaluate to an object whose value is one of the keys of the - mapping, and the subscription selects the value in the mapping that - corresponds to that key. An example of a builtin mapping class is - the "dict" class. - -2. Sequences. If the primary is a *sequence*, the expression list must - evaluate to an "int" or a "slice" (as discussed in the following - section). Examples of builtin sequence classes include the "str", - "list" and "tuple" classes. - -The formal syntax makes no special provision for negative indices in -*sequences*. However, built-in sequences all provide a "__getitem__()" -method that interprets negative indices by adding the length of the -sequence to the index so that, for example, "x[-1]" selects the last -item of "x". The resulting value must be a nonnegative integer less -than the number of items in the sequence, and the subscription selects -the item whose index is that value (counting from zero). Since the -support for negative indices and slicing occurs in the object’s -"__getitem__()" method, subclasses overriding this method will need to -explicitly add that support. - -A "string" is a special kind of sequence whose items are *characters*. -A character is not a separate data type but a string of exactly one -character. -''', - 'truth': r'''Truth Value Testing -******************* - -Any object can be tested for truth value, for use in an "if" or -"while" condition or as operand of the Boolean operations below. - -By default, an object is considered true unless its class defines -either a "__bool__()" method that returns "False" or a "__len__()" -method that returns zero, when called with the object. [1] Here are -most of the built-in objects considered false: - -* constants defined to be false: "None" and "False" - -* zero of any numeric type: "0", "0.0", "0j", "Decimal(0)", - "Fraction(0, 1)" - -* empty sequences and collections: "''", "()", "[]", "{}", "set()", - "range(0)" - -Operations and built-in functions that have a Boolean result always -return "0" or "False" for false and "1" or "True" for true, unless -otherwise stated. (Important exception: the Boolean operations "or" -and "and" always return one of their operands.) -''', - 'try': r'''The "try" statement -******************* - -The "try" statement specifies exception handlers and/or cleanup code -for a group of statements: - - try_stmt ::= try1_stmt | try2_stmt | try3_stmt - try1_stmt ::= "try" ":" suite - ("except" [expression ["as" identifier]] ":" suite)+ - ["else" ":" suite] - ["finally" ":" suite] - try2_stmt ::= "try" ":" suite - ("except" "*" expression ["as" identifier] ":" suite)+ - ["else" ":" suite] - ["finally" ":" suite] - try3_stmt ::= "try" ":" suite - "finally" ":" suite - -Additional information on exceptions can be found in section -Exceptions, and information on using the "raise" statement to generate -exceptions may be found in section The raise statement. - - -"except" clause -=============== - -The "except" clause(s) specify one or more exception handlers. When no -exception occurs in the "try" clause, no exception handler is -executed. When an exception occurs in the "try" suite, a search for an -exception handler is started. This search inspects the "except" -clauses in turn until one is found that matches the exception. An -expression-less "except" clause, if present, must be last; it matches -any exception. - -For an "except" clause with an expression, the expression must -evaluate to an exception type or a tuple of exception types. The -raised exception matches an "except" clause whose expression evaluates -to the class or a *non-virtual base class* of the exception object, or -to a tuple that contains such a class. - -If no "except" clause matches the exception, the search for an -exception handler continues in the surrounding code and on the -invocation stack. [1] - -If the evaluation of an expression in the header of an "except" clause -raises an exception, the original search for a handler is canceled and -a search starts for the new exception in the surrounding code and on -the call stack (it is treated as if the entire "try" statement raised -the exception). - -When a matching "except" clause is found, the exception is assigned to -the target specified after the "as" keyword in that "except" clause, -if present, and the "except" clause’s suite is executed. All "except" -clauses must have an executable block. When the end of this block is -reached, execution continues normally after the entire "try" -statement. (This means that if two nested handlers exist for the same -exception, and the exception occurs in the "try" clause of the inner -handler, the outer handler will not handle the exception.) - -When an exception has been assigned using "as target", it is cleared -at the end of the "except" clause. This is as if - - except E as N: - foo - -was translated to - - except E as N: - try: - foo - finally: - del N - -This means the exception must be assigned to a different name to be -able to refer to it after the "except" clause. Exceptions are cleared -because with the traceback attached to them, they form a reference -cycle with the stack frame, keeping all locals in that frame alive -until the next garbage collection occurs. - -Before an "except" clause’s suite is executed, the exception is stored -in the "sys" module, where it can be accessed from within the body of -the "except" clause by calling "sys.exception()". When leaving an -exception handler, the exception stored in the "sys" module is reset -to its previous value: - - >>> print(sys.exception()) - None - >>> try: - ... raise TypeError - ... except: - ... print(repr(sys.exception())) - ... try: - ... raise ValueError - ... except: - ... print(repr(sys.exception())) - ... print(repr(sys.exception())) - ... - TypeError() - ValueError() - TypeError() - >>> print(sys.exception()) - None - - -"except*" clause -================ - -The "except*" clause(s) specify one or more handlers for groups of -exceptions ("BaseExceptionGroup" instances). A "try" statement can -have either "except" or "except*" clauses, but not both. The exception -type for matching is mandatory in the case of "except*", so "except*:" -is a syntax error. The type is interpreted as in the case of "except", -but matching is performed on the exceptions contained in the group -that is being handled. An "TypeError" is raised if a matching type is -a subclass of "BaseExceptionGroup", because that would have ambiguous -semantics. - -When an exception group is raised in the try block, each "except*" -clause splits (see "split()") it into the subgroups of matching and -non-matching exceptions. If the matching subgroup is not empty, it -becomes the handled exception (the value returned from -"sys.exception()") and assigned to the target of the "except*" clause -(if there is one). Then, the body of the "except*" clause executes. If -the non-matching subgroup is not empty, it is processed by the next -"except*" in the same manner. This continues until all exceptions in -the group have been matched, or the last "except*" clause has run. - -After all "except*" clauses execute, the group of unhandled exceptions -is merged with any exceptions that were raised or re-raised from -within "except*" clauses. This merged exception group propagates on.: - - >>> try: - ... raise ExceptionGroup("eg", - ... [ValueError(1), TypeError(2), OSError(3), OSError(4)]) - ... except* TypeError as e: - ... print(f'caught {type(e)} with nested {e.exceptions}') - ... except* OSError as e: - ... print(f'caught {type(e)} with nested {e.exceptions}') - ... - caught with nested (TypeError(2),) - caught with nested (OSError(3), OSError(4)) - + Exception Group Traceback (most recent call last): - | File "", line 2, in - | raise ExceptionGroup("eg", - | [ValueError(1), TypeError(2), OSError(3), OSError(4)]) - | ExceptionGroup: eg (1 sub-exception) - +-+---------------- 1 ---------------- - | ValueError: 1 - +------------------------------------ - -If the exception raised from the "try" block is not an exception group -and its type matches one of the "except*" clauses, it is caught and -wrapped by an exception group with an empty message string. This -ensures that the type of the target "e" is consistently -"BaseExceptionGroup": - - >>> try: - ... raise BlockingIOError - ... except* BlockingIOError as e: - ... print(repr(e)) - ... - ExceptionGroup('', (BlockingIOError(),)) - -"break", "continue" and "return" cannot appear in an "except*" clause. - - -"else" clause -============= - -The optional "else" clause is executed if the control flow leaves the -"try" suite, no exception was raised, and no "return", "continue", or -"break" statement was executed. Exceptions in the "else" clause are -not handled by the preceding "except" clauses. - - -"finally" clause -================ - -If "finally" is present, it specifies a ‘cleanup’ handler. The "try" -clause is executed, including any "except" and "else" clauses. If an -exception occurs in any of the clauses and is not handled, the -exception is temporarily saved. The "finally" clause is executed. If -there is a saved exception it is re-raised at the end of the "finally" -clause. If the "finally" clause raises another exception, the saved -exception is set as the context of the new exception. If the "finally" -clause executes a "return", "break" or "continue" statement, the saved -exception is discarded: - - >>> def f(): - ... try: - ... 1/0 - ... finally: - ... return 42 - ... - >>> f() - 42 - -The exception information is not available to the program during -execution of the "finally" clause. - -When a "return", "break" or "continue" statement is executed in the -"try" suite of a "try"…"finally" statement, the "finally" clause is -also executed ‘on the way out.’ - -The return value of a function is determined by the last "return" -statement executed. Since the "finally" clause always executes, a -"return" statement executed in the "finally" clause will always be the -last one executed: - - >>> def foo(): - ... try: - ... return 'try' - ... finally: - ... return 'finally' - ... - >>> foo() - 'finally' - -Changed in version 3.8: Prior to Python 3.8, a "continue" statement -was illegal in the "finally" clause due to a problem with the -implementation. -''', - 'types': r'''The standard type hierarchy -*************************** - -Below is a list of the types that are built into Python. Extension -modules (written in C, Java, or other languages, depending on the -implementation) can define additional types. Future versions of -Python may add types to the type hierarchy (e.g., rational numbers, -efficiently stored arrays of integers, etc.), although such additions -will often be provided via the standard library instead. - -Some of the type descriptions below contain a paragraph listing -‘special attributes.’ These are attributes that provide access to the -implementation and are not intended for general use. Their definition -may change in the future. - - -None -==== - -This type has a single value. There is a single object with this -value. This object is accessed through the built-in name "None". It is -used to signify the absence of a value in many situations, e.g., it is -returned from functions that don’t explicitly return anything. Its -truth value is false. - - -NotImplemented -============== - -This type has a single value. There is a single object with this -value. This object is accessed through the built-in name -"NotImplemented". Numeric methods and rich comparison methods should -return this value if they do not implement the operation for the -operands provided. (The interpreter will then try the reflected -operation, or some other fallback, depending on the operator.) It -should not be evaluated in a boolean context. - -See Implementing the arithmetic operations for more details. - -Changed in version 3.9: Evaluating "NotImplemented" in a boolean -context is deprecated. While it currently evaluates as true, it will -emit a "DeprecationWarning". It will raise a "TypeError" in a future -version of Python. - - -Ellipsis -======== - -This type has a single value. There is a single object with this -value. This object is accessed through the literal "..." or the built- -in name "Ellipsis". Its truth value is true. - - -"numbers.Number" -================ - -These are created by numeric literals and returned as results by -arithmetic operators and arithmetic built-in functions. Numeric -objects are immutable; once created their value never changes. Python -numbers are of course strongly related to mathematical numbers, but -subject to the limitations of numerical representation in computers. - -The string representations of the numeric classes, computed by -"__repr__()" and "__str__()", have the following properties: - -* They are valid numeric literals which, when passed to their class - constructor, produce an object having the value of the original - numeric. - -* The representation is in base 10, when possible. - -* Leading zeros, possibly excepting a single zero before a decimal - point, are not shown. - -* Trailing zeros, possibly excepting a single zero after a decimal - point, are not shown. - -* A sign is shown only when the number is negative. - -Python distinguishes between integers, floating-point numbers, and -complex numbers: - - -"numbers.Integral" ------------------- - -These represent elements from the mathematical set of integers -(positive and negative). - -Note: - - The rules for integer representation are intended to give the most - meaningful interpretation of shift and mask operations involving - negative integers. - -There are two types of integers: - -Integers ("int") - These represent numbers in an unlimited range, subject to available - (virtual) memory only. For the purpose of shift and mask - operations, a binary representation is assumed, and negative - numbers are represented in a variant of 2’s complement which gives - the illusion of an infinite string of sign bits extending to the - left. - -Booleans ("bool") - These represent the truth values False and True. The two objects - representing the values "False" and "True" are the only Boolean - objects. The Boolean type is a subtype of the integer type, and - Boolean values behave like the values 0 and 1, respectively, in - almost all contexts, the exception being that when converted to a - string, the strings ""False"" or ""True"" are returned, - respectively. - - -"numbers.Real" ("float") ------------------------- - -These represent machine-level double precision floating-point numbers. -You are at the mercy of the underlying machine architecture (and C or -Java implementation) for the accepted range and handling of overflow. -Python does not support single-precision floating-point numbers; the -savings in processor and memory usage that are usually the reason for -using these are dwarfed by the overhead of using objects in Python, so -there is no reason to complicate the language with two kinds of -floating-point numbers. - - -"numbers.Complex" ("complex") ------------------------------ - -These represent complex numbers as a pair of machine-level double -precision floating-point numbers. The same caveats apply as for -floating-point numbers. The real and imaginary parts of a complex -number "z" can be retrieved through the read-only attributes "z.real" -and "z.imag". - - -Sequences -========= - -These represent finite ordered sets indexed by non-negative numbers. -The built-in function "len()" returns the number of items of a -sequence. When the length of a sequence is *n*, the index set contains -the numbers 0, 1, …, *n*-1. Item *i* of sequence *a* is selected by -"a[i]". Some sequences, including built-in sequences, interpret -negative subscripts by adding the sequence length. For example, -"a[-2]" equals "a[n-2]", the second to last item of sequence a with -length "n". - -Sequences also support slicing: "a[i:j]" selects all items with index -*k* such that *i* "<=" *k* "<" *j*. When used as an expression, a -slice is a sequence of the same type. The comment above about negative -indexes also applies to negative slice positions. - -Some sequences also support “extended slicing” with a third “step” -parameter: "a[i:j:k]" selects all items of *a* with index *x* where "x -= i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*. - -Sequences are distinguished according to their mutability: - - -Immutable sequences -------------------- - -An object of an immutable sequence type cannot change once it is -created. (If the object contains references to other objects, these -other objects may be mutable and may be changed; however, the -collection of objects directly referenced by an immutable object -cannot change.) - -The following types are immutable sequences: - -Strings - A string is a sequence of values that represent Unicode code - points. All the code points in the range "U+0000 - U+10FFFF" can be - represented in a string. Python doesn’t have a char type; instead, - every code point in the string is represented as a string object - with length "1". The built-in function "ord()" converts a code - point from its string form to an integer in the range "0 - 10FFFF"; - "chr()" converts an integer in the range "0 - 10FFFF" to the - corresponding length "1" string object. "str.encode()" can be used - to convert a "str" to "bytes" using the given text encoding, and - "bytes.decode()" can be used to achieve the opposite. - -Tuples - The items of a tuple are arbitrary Python objects. Tuples of two or - more items are formed by comma-separated lists of expressions. A - tuple of one item (a ‘singleton’) can be formed by affixing a comma - to an expression (an expression by itself does not create a tuple, - since parentheses must be usable for grouping of expressions). An - empty tuple can be formed by an empty pair of parentheses. - -Bytes - A bytes object is an immutable array. The items are 8-bit bytes, - represented by integers in the range 0 <= x < 256. Bytes literals - (like "b'abc'") and the built-in "bytes()" constructor can be used - to create bytes objects. Also, bytes objects can be decoded to - strings via the "decode()" method. - - -Mutable sequences ------------------ - -Mutable sequences can be changed after they are created. The -subscription and slicing notations can be used as the target of -assignment and "del" (delete) statements. - -Note: - - The "collections" and "array" module provide additional examples of - mutable sequence types. - -There are currently two intrinsic mutable sequence types: - -Lists - The items of a list are arbitrary Python objects. Lists are formed - by placing a comma-separated list of expressions in square - brackets. (Note that there are no special cases needed to form - lists of length 0 or 1.) - -Byte Arrays - A bytearray object is a mutable array. They are created by the - built-in "bytearray()" constructor. Aside from being mutable (and - hence unhashable), byte arrays otherwise provide the same interface - and functionality as immutable "bytes" objects. - - -Set types -========= - -These represent unordered, finite sets of unique, immutable objects. -As such, they cannot be indexed by any subscript. However, they can be -iterated over, and the built-in function "len()" returns the number of -items in a set. Common uses for sets are fast membership testing, -removing duplicates from a sequence, and computing mathematical -operations such as intersection, union, difference, and symmetric -difference. - -For set elements, the same immutability rules apply as for dictionary -keys. Note that numeric types obey the normal rules for numeric -comparison: if two numbers compare equal (e.g., "1" and "1.0"), only -one of them can be contained in a set. - -There are currently two intrinsic set types: - -Sets - These represent a mutable set. They are created by the built-in - "set()" constructor and can be modified afterwards by several - methods, such as "add()". - -Frozen sets - These represent an immutable set. They are created by the built-in - "frozenset()" constructor. As a frozenset is immutable and - *hashable*, it can be used again as an element of another set, or - as a dictionary key. - - -Mappings -======== - -These represent finite sets of objects indexed by arbitrary index -sets. The subscript notation "a[k]" selects the item indexed by "k" -from the mapping "a"; this can be used in expressions and as the -target of assignments or "del" statements. The built-in function -"len()" returns the number of items in a mapping. - -There is currently a single intrinsic mapping type: - - -Dictionaries ------------- - -These represent finite sets of objects indexed by nearly arbitrary -values. The only types of values not acceptable as keys are values -containing lists or dictionaries or other mutable types that are -compared by value rather than by object identity, the reason being -that the efficient implementation of dictionaries requires a key’s -hash value to remain constant. Numeric types used for keys obey the -normal rules for numeric comparison: if two numbers compare equal -(e.g., "1" and "1.0") then they can be used interchangeably to index -the same dictionary entry. - -Dictionaries preserve insertion order, meaning that keys will be -produced in the same order they were added sequentially over the -dictionary. Replacing an existing key does not change the order, -however removing a key and re-inserting it will add it to the end -instead of keeping its old place. - -Dictionaries are mutable; they can be created by the "{}" notation -(see section Dictionary displays). - -The extension modules "dbm.ndbm" and "dbm.gnu" provide additional -examples of mapping types, as does the "collections" module. - -Changed in version 3.7: Dictionaries did not preserve insertion order -in versions of Python before 3.6. In CPython 3.6, insertion order was -preserved, but it was considered an implementation detail at that time -rather than a language guarantee. - - -Callable types -============== - -These are the types to which the function call operation (see section -Calls) can be applied: - - -User-defined functions ----------------------- - -A user-defined function object is created by a function definition -(see section Function definitions). It should be called with an -argument list containing the same number of items as the function’s -formal parameter list. - - -Special read-only attributes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -+----------------------------------------------------+----------------------------------------------------+ -| Attribute | Meaning | -|====================================================|====================================================| -| function.__builtins__ | A reference to the "dictionary" that holds the | -| | function’s builtins namespace. Added in version | -| | 3.10. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__globals__ | A reference to the "dictionary" that holds the | -| | function’s global variables – the global namespace | -| | of the module in which the function was defined. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__closure__ | "None" or a "tuple" of cells that contain bindings | -| | for the names specified in the "co_freevars" | -| | attribute of the function’s "code object". A cell | -| | object has the attribute "cell_contents". This can | -| | be used to get the value of the cell, as well as | -| | set the value. | -+----------------------------------------------------+----------------------------------------------------+ - - -Special writable attributes -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Most of these attributes check the type of the assigned value: - -+----------------------------------------------------+----------------------------------------------------+ -| Attribute | Meaning | -|====================================================|====================================================| -| function.__doc__ | The function’s documentation string, or "None" if | -| | unavailable. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__name__ | The function’s name. See also: "__name__ | -| | attributes". | -+----------------------------------------------------+----------------------------------------------------+ -| function.__qualname__ | The function’s *qualified name*. See also: | -| | "__qualname__ attributes". Added in version 3.3. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__module__ | The name of the module the function was defined | -| | in, or "None" if unavailable. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__defaults__ | A "tuple" containing default *parameter* values | -| | for those parameters that have defaults, or "None" | -| | if no parameters have a default value. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__code__ | The code object representing the compiled function | -| | body. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__dict__ | The namespace supporting arbitrary function | -| | attributes. See also: "__dict__ attributes". | -+----------------------------------------------------+----------------------------------------------------+ -| function.__annotations__ | A "dictionary" containing annotations of | -| | *parameters*. The keys of the dictionary are the | -| | parameter names, and "'return'" for the return | -| | annotation, if provided. See also: Annotations | -| | Best Practices. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__kwdefaults__ | A "dictionary" containing defaults for keyword- | -| | only *parameters*. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__type_params__ | A "tuple" containing the type parameters of a | -| | generic function. Added in version 3.12. | -+----------------------------------------------------+----------------------------------------------------+ - -Function objects also support getting and setting arbitrary -attributes, which can be used, for example, to attach metadata to -functions. Regular attribute dot-notation is used to get and set such -attributes. - -**CPython implementation detail:** CPython’s current implementation -only supports function attributes on user-defined functions. Function -attributes on built-in functions may be supported in the future. - -Additional information about a function’s definition can be retrieved -from its code object (accessible via the "__code__" attribute). - - -Instance methods ----------------- - -An instance method object combines a class, a class instance and any -callable object (normally a user-defined function). - -Special read-only attributes: - -+----------------------------------------------------+----------------------------------------------------+ -| method.__self__ | Refers to the class instance object to which the | -| | method is bound | -+----------------------------------------------------+----------------------------------------------------+ -| method.__func__ | Refers to the original function object | -+----------------------------------------------------+----------------------------------------------------+ -| method.__doc__ | The method’s documentation (same as | -| | "method.__func__.__doc__"). A "string" if the | -| | original function had a docstring, else "None". | -+----------------------------------------------------+----------------------------------------------------+ -| method.__name__ | The name of the method (same as | -| | "method.__func__.__name__") | -+----------------------------------------------------+----------------------------------------------------+ -| method.__module__ | The name of the module the method was defined in, | -| | or "None" if unavailable. | -+----------------------------------------------------+----------------------------------------------------+ - -Methods also support accessing (but not setting) the arbitrary -function attributes on the underlying function object. - -User-defined method objects may be created when getting an attribute -of a class (perhaps via an instance of that class), if that attribute -is a user-defined function object or a "classmethod" object. - -When an instance method object is created by retrieving a user-defined -function object from a class via one of its instances, its "__self__" -attribute is the instance, and the method object is said to be -*bound*. The new method’s "__func__" attribute is the original -function object. - -When an instance method object is created by retrieving a -"classmethod" object from a class or instance, its "__self__" -attribute is the class itself, and its "__func__" attribute is the -function object underlying the class method. - -When an instance method object is called, the underlying function -("__func__") is called, inserting the class instance ("__self__") in -front of the argument list. For instance, when "C" is a class which -contains a definition for a function "f()", and "x" is an instance of -"C", calling "x.f(1)" is equivalent to calling "C.f(x, 1)". - -When an instance method object is derived from a "classmethod" object, -the “class instance” stored in "__self__" will actually be the class -itself, so that calling either "x.f(1)" or "C.f(1)" is equivalent to -calling "f(C,1)" where "f" is the underlying function. - -It is important to note that user-defined functions which are -attributes of a class instance are not converted to bound methods; -this *only* happens when the function is an attribute of the class. - - -Generator functions -------------------- - -A function or method which uses the "yield" statement (see section The -yield statement) is called a *generator function*. Such a function, -when called, always returns an *iterator* object which can be used to -execute the body of the function: calling the iterator’s -"iterator.__next__()" method will cause the function to execute until -it provides a value using the "yield" statement. When the function -executes a "return" statement or falls off the end, a "StopIteration" -exception is raised and the iterator will have reached the end of the -set of values to be returned. - - -Coroutine functions -------------------- - -A function or method which is defined using "async def" is called a -*coroutine function*. Such a function, when called, returns a -*coroutine* object. It may contain "await" expressions, as well as -"async with" and "async for" statements. See also the Coroutine -Objects section. - - -Asynchronous generator functions --------------------------------- - -A function or method which is defined using "async def" and which uses -the "yield" statement is called a *asynchronous generator function*. -Such a function, when called, returns an *asynchronous iterator* -object which can be used in an "async for" statement to execute the -body of the function. - -Calling the asynchronous iterator’s "aiterator.__anext__" method will -return an *awaitable* which when awaited will execute until it -provides a value using the "yield" expression. When the function -executes an empty "return" statement or falls off the end, a -"StopAsyncIteration" exception is raised and the asynchronous iterator -will have reached the end of the set of values to be yielded. - - -Built-in functions ------------------- - -A built-in function object is a wrapper around a C function. Examples -of built-in functions are "len()" and "math.sin()" ("math" is a -standard built-in module). The number and type of the arguments are -determined by the C function. Special read-only attributes: - -* "__doc__" is the function’s documentation string, or "None" if - unavailable. See "function.__doc__". - -* "__name__" is the function’s name. See "function.__name__". - -* "__self__" is set to "None" (but see the next item). - -* "__module__" is the name of the module the function was defined in - or "None" if unavailable. See "function.__module__". - - -Built-in methods ----------------- - -This is really a different disguise of a built-in function, this time -containing an object passed to the C function as an implicit extra -argument. An example of a built-in method is "alist.append()", -assuming *alist* is a list object. In this case, the special read-only -attribute "__self__" is set to the object denoted by *alist*. (The -attribute has the same semantics as it does with "other instance -methods".) - - -Classes -------- - -Classes are callable. These objects normally act as factories for new -instances of themselves, but variations are possible for class types -that override "__new__()". The arguments of the call are passed to -"__new__()" and, in the typical case, to "__init__()" to initialize -the new instance. - - -Class Instances ---------------- - -Instances of arbitrary classes can be made callable by defining a -"__call__()" method in their class. - - -Modules -======= - -Modules are a basic organizational unit of Python code, and are -created by the import system as invoked either by the "import" -statement, or by calling functions such as "importlib.import_module()" -and built-in "__import__()". A module object has a namespace -implemented by a "dictionary" object (this is the dictionary -referenced by the "__globals__" attribute of functions defined in the -module). Attribute references are translated to lookups in this -dictionary, e.g., "m.x" is equivalent to "m.__dict__["x"]". A module -object does not contain the code object used to initialize the module -(since it isn’t needed once the initialization is done). - -Attribute assignment updates the module’s namespace dictionary, e.g., -"m.x = 1" is equivalent to "m.__dict__["x"] = 1". - - -Import-related attributes on module objects -------------------------------------------- - -Module objects have the following attributes that relate to the import -system. When a module is created using the machinery associated with -the import system, these attributes are filled in based on the -module’s *spec*, before the *loader* executes and loads the module. - -To create a module dynamically rather than using the import system, -it’s recommended to use "importlib.util.module_from_spec()", which -will set the various import-controlled attributes to appropriate -values. It’s also possible to use the "types.ModuleType" constructor -to create modules directly, but this technique is more error-prone, as -most attributes must be manually set on the module object after it has -been created when using this approach. - -Caution: - - With the exception of "__name__", it is **strongly** recommended - that you rely on "__spec__" and its attributes instead of any of the - other individual attributes listed in this subsection. Note that - updating an attribute on "__spec__" will not update the - corresponding attribute on the module itself: - - >>> import typing - >>> typing.__name__, typing.__spec__.name - ('typing', 'typing') - >>> typing.__spec__.name = 'spelling' - >>> typing.__name__, typing.__spec__.name - ('typing', 'spelling') - >>> typing.__name__ = 'keyboard_smashing' - >>> typing.__name__, typing.__spec__.name - ('keyboard_smashing', 'spelling') - -module.__name__ - - The name used to uniquely identify the module in the import system. - For a directly executed module, this will be set to ""__main__"". - - This attribute must be set to the fully qualified name of the - module. It is expected to match the value of - "module.__spec__.name". - -module.__spec__ - - A record of the module’s import-system-related state. - - Set to the "module spec" that was used when importing the module. - See Module specs for more details. - - Added in version 3.4. - -module.__package__ - - The *package* a module belongs to. - - If the module is top-level (that is, not a part of any specific - package) then the attribute should be set to "''" (the empty - string). Otherwise, it should be set to the name of the module’s - package (which can be equal to "module.__name__" if the module - itself is a package). See **PEP 366** for further details. - - This attribute is used instead of "__name__" to calculate explicit - relative imports for main modules. It defaults to "None" for - modules created dynamically using the "types.ModuleType" - constructor; use "importlib.util.module_from_spec()" instead to - ensure the attribute is set to a "str". - - It is **strongly** recommended that you use - "module.__spec__.parent" instead of "module.__package__". - "__package__" is now only used as a fallback if "__spec__.parent" - is not set, and this fallback path is deprecated. - - Changed in version 3.4: This attribute now defaults to "None" for - modules created dynamically using the "types.ModuleType" - constructor. Previously the attribute was optional. - - Changed in version 3.6: The value of "__package__" is expected to - be the same as "__spec__.parent". "__package__" is now only used as - a fallback during import resolution if "__spec__.parent" is not - defined. - - Changed in version 3.10: "ImportWarning" is raised if an import - resolution falls back to "__package__" instead of - "__spec__.parent". - - Changed in version 3.12: Raise "DeprecationWarning" instead of - "ImportWarning" when falling back to "__package__" during import - resolution. - - Deprecated since version 3.13, will be removed in version 3.15: - "__package__" will cease to be set or taken into consideration by - the import system or standard library. - -module.__loader__ - - The *loader* object that the import machinery used to load the - module. - - This attribute is mostly useful for introspection, but can be used - for additional loader-specific functionality, for example getting - data associated with a loader. - - "__loader__" defaults to "None" for modules created dynamically - using the "types.ModuleType" constructor; use - "importlib.util.module_from_spec()" instead to ensure the attribute - is set to a *loader* object. - - It is **strongly** recommended that you use - "module.__spec__.loader" instead of "module.__loader__". - - Changed in version 3.4: This attribute now defaults to "None" for - modules created dynamically using the "types.ModuleType" - constructor. Previously the attribute was optional. - - Deprecated since version 3.12, will be removed in version 3.16: - Setting "__loader__" on a module while failing to set - "__spec__.loader" is deprecated. In Python 3.16, "__loader__" will - cease to be set or taken into consideration by the import system or - the standard library. - -module.__path__ - - A (possibly empty) *sequence* of strings enumerating the locations - where the package’s submodules will be found. Non-package modules - should not have a "__path__" attribute. See __path__ attributes on - modules for more details. - - It is **strongly** recommended that you use - "module.__spec__.submodule_search_locations" instead of - "module.__path__". - -module.__file__ - -module.__cached__ - - "__file__" and "__cached__" are both optional attributes that may - or may not be set. Both attributes should be a "str" when they are - available. - - "__file__" indicates the pathname of the file from which the module - was loaded (if loaded from a file), or the pathname of the shared - library file for extension modules loaded dynamically from a shared - library. It might be missing for certain types of modules, such as - C modules that are statically linked into the interpreter, and the - import system may opt to leave it unset if it has no semantic - meaning (for example, a module loaded from a database). - - If "__file__" is set then the "__cached__" attribute might also be - set, which is the path to any compiled version of the code (for - example, a byte-compiled file). The file does not need to exist to - set this attribute; the path can simply point to where the compiled - file *would* exist (see **PEP 3147**). - - Note that "__cached__" may be set even if "__file__" is not set. - However, that scenario is quite atypical. Ultimately, the *loader* - is what makes use of the module spec provided by the *finder* (from - which "__file__" and "__cached__" are derived). So if a loader can - load from a cached module but otherwise does not load from a file, - that atypical scenario may be appropriate. - - It is **strongly** recommended that you use - "module.__spec__.cached" instead of "module.__cached__". - - Deprecated since version 3.13, will be removed in version 3.15: - Setting "__cached__" on a module while failing to set - "__spec__.cached" is deprecated. In Python 3.15, "__cached__" will - cease to be set or taken into consideration by the import system or - standard library. - - -Other writable attributes on module objects -------------------------------------------- - -As well as the import-related attributes listed above, module objects -also have the following writable attributes: - -module.__doc__ - - The module’s documentation string, or "None" if unavailable. See - also: "__doc__ attributes". - -module.__annotations__ - - A dictionary containing *variable annotations* collected during - module body execution. For best practices on working with - "__annotations__", please see Annotations Best Practices. - - -Module dictionaries -------------------- - -Module objects also have the following special read-only attribute: - -module.__dict__ - - The module’s namespace as a dictionary object. Uniquely among the - attributes listed here, "__dict__" cannot be accessed as a global - variable from within a module; it can only be accessed as an - attribute on module objects. - - **CPython implementation detail:** Because of the way CPython - clears module dictionaries, the module dictionary will be cleared - when the module falls out of scope even if the dictionary still has - live references. To avoid this, copy the dictionary or keep the - module around while using its dictionary directly. - - -Custom classes -============== - -Custom class types are typically created by class definitions (see -section Class definitions). A class has a namespace implemented by a -dictionary object. Class attribute references are translated to -lookups in this dictionary, e.g., "C.x" is translated to -"C.__dict__["x"]" (although there are a number of hooks which allow -for other means of locating attributes). When the attribute name is -not found there, the attribute search continues in the base classes. -This search of the base classes uses the C3 method resolution order -which behaves correctly even in the presence of ‘diamond’ inheritance -structures where there are multiple inheritance paths leading back to -a common ancestor. Additional details on the C3 MRO used by Python can -be found at The Python 2.3 Method Resolution Order. - -When a class attribute reference (for class "C", say) would yield a -class method object, it is transformed into an instance method object -whose "__self__" attribute is "C". When it would yield a -"staticmethod" object, it is transformed into the object wrapped by -the static method object. See section Implementing Descriptors for -another way in which attributes retrieved from a class may differ from -those actually contained in its "__dict__". - -Class attribute assignments update the class’s dictionary, never the -dictionary of a base class. - -A class object can be called (see above) to yield a class instance -(see below). - - -Special attributes ------------------- - -+----------------------------------------------------+----------------------------------------------------+ -| Attribute | Meaning | -|====================================================|====================================================| -| type.__name__ | The class’s name. See also: "__name__ attributes". | -+----------------------------------------------------+----------------------------------------------------+ -| type.__qualname__ | The class’s *qualified name*. See also: | -| | "__qualname__ attributes". | -+----------------------------------------------------+----------------------------------------------------+ -| type.__module__ | The name of the module in which the class was | -| | defined. | -+----------------------------------------------------+----------------------------------------------------+ -| type.__dict__ | A "mapping proxy" providing a read-only view of | -| | the class’s namespace. See also: "__dict__ | -| | attributes". | -+----------------------------------------------------+----------------------------------------------------+ -| type.__bases__ | A "tuple" containing the class’s bases. In most | -| | cases, for a class defined as "class X(A, B, C)", | -| | "X.__bases__" will be exactly equal to "(A, B, | -| | C)". | -+----------------------------------------------------+----------------------------------------------------+ -| type.__base__ | **CPython implementation detail:** The single base | -| | class in the inheritance chain that is responsible | -| | for the memory layout of instances. This attribute | -| | corresponds to "tp_base" at the C level. | -+----------------------------------------------------+----------------------------------------------------+ -| type.__doc__ | The class’s documentation string, or "None" if | -| | undefined. Not inherited by subclasses. | -+----------------------------------------------------+----------------------------------------------------+ -| type.__annotations__ | A dictionary containing *variable annotations* | -| | collected during class body execution. For best | -| | practices on working with "__annotations__", | -| | please see Annotations Best Practices. Caution: | -| | Accessing the "__annotations__" attribute of a | -| | class object directly may yield incorrect results | -| | in the presence of metaclasses. In addition, the | -| | attribute may not exist for some classes. Use | -| | "inspect.get_annotations()" to retrieve class | -| | annotations safely. | -+----------------------------------------------------+----------------------------------------------------+ -| type.__type_params__ | A "tuple" containing the type parameters of a | -| | generic class. Added in version 3.12. | -+----------------------------------------------------+----------------------------------------------------+ -| type.__static_attributes__ | A "tuple" containing names of attributes of this | -| | class which are assigned through "self.X" from any | -| | function in its body. Added in version 3.13. | -+----------------------------------------------------+----------------------------------------------------+ -| type.__firstlineno__ | The line number of the first line of the class | -| | definition, including decorators. Setting the | -| | "__module__" attribute removes the | -| | "__firstlineno__" item from the type’s dictionary. | -| | Added in version 3.13. | -+----------------------------------------------------+----------------------------------------------------+ -| type.__mro__ | The "tuple" of classes that are considered when | -| | looking for base classes during method resolution. | -+----------------------------------------------------+----------------------------------------------------+ - - -Special methods ---------------- - -In addition to the special attributes described above, all Python -classes also have the following two methods available: - -type.mro() - - This method can be overridden by a metaclass to customize the - method resolution order for its instances. It is called at class - instantiation, and its result is stored in "__mro__". - -type.__subclasses__() - - Each class keeps a list of weak references to its immediate - subclasses. This method returns a list of all those references - still alive. The list is in definition order. Example: - - >>> class A: pass - >>> class B(A): pass - >>> A.__subclasses__() - [] - - -Class instances -=============== - -A class instance is created by calling a class object (see above). A -class instance has a namespace implemented as a dictionary which is -the first place in which attribute references are searched. When an -attribute is not found there, and the instance’s class has an -attribute by that name, the search continues with the class -attributes. If a class attribute is found that is a user-defined -function object, it is transformed into an instance method object -whose "__self__" attribute is the instance. Static method and class -method objects are also transformed; see above under “Classes”. See -section Implementing Descriptors for another way in which attributes -of a class retrieved via its instances may differ from the objects -actually stored in the class’s "__dict__". If no class attribute is -found, and the object’s class has a "__getattr__()" method, that is -called to satisfy the lookup. - -Attribute assignments and deletions update the instance’s dictionary, -never a class’s dictionary. If the class has a "__setattr__()" or -"__delattr__()" method, this is called instead of updating the -instance dictionary directly. - -Class instances can pretend to be numbers, sequences, or mappings if -they have methods with certain special names. See section Special -method names. - - -Special attributes ------------------- - -object.__class__ - - The class to which a class instance belongs. - -object.__dict__ - - A dictionary or other mapping object used to store an object’s - (writable) attributes. Not all instances have a "__dict__" - attribute; see the section on __slots__ for more details. - - -I/O objects (also known as file objects) -======================================== - -A *file object* represents an open file. Various shortcuts are -available to create file objects: the "open()" built-in function, and -also "os.popen()", "os.fdopen()", and the "makefile()" method of -socket objects (and perhaps by other functions or methods provided by -extension modules). - -File objects implement common methods, listed below, to simplify usage -in generic code. They are expected to be With Statement Context -Managers. - -The objects "sys.stdin", "sys.stdout" and "sys.stderr" are initialized -to file objects corresponding to the interpreter’s standard input, -output and error streams; they are all open in text mode and therefore -follow the interface defined by the "io.TextIOBase" abstract class. - -file.read(size=-1, /) - - Retrieve up to *size* data from the file. As a convenience if - *size* is unspecified or -1 retrieve all data available. - -file.write(data, /) - - Store *data* to the file. - -file.close() - - Flush any buffers and close the underlying file. - - -Internal types -============== - -A few types used internally by the interpreter are exposed to the -user. Their definitions may change with future versions of the -interpreter, but they are mentioned here for completeness. - - -Code objects ------------- - -Code objects represent *byte-compiled* executable Python code, or -*bytecode*. The difference between a code object and a function object -is that the function object contains an explicit reference to the -function’s globals (the module in which it was defined), while a code -object contains no context; also the default argument values are -stored in the function object, not in the code object (because they -represent values calculated at run-time). Unlike function objects, -code objects are immutable and contain no references (directly or -indirectly) to mutable objects. - - -Special read-only attributes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_name | The function name | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_qualname | The fully qualified function name Added in | -| | version 3.11. | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_argcount | The total number of positional *parameters* | -| | (including positional-only parameters and | -| | parameters with default values) that the function | -| | has | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_posonlyargcount | The number of positional-only *parameters* | -| | (including arguments with default values) that the | -| | function has | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_kwonlyargcount | The number of keyword-only *parameters* (including | -| | arguments with default values) that the function | -| | has | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_nlocals | The number of local variables used by the function | -| | (including parameters) | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_varnames | A "tuple" containing the names of the local | -| | variables in the function (starting with the | -| | parameter names) | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_cellvars | A "tuple" containing the names of local variables | -| | that are referenced from at least one *nested | -| | scope* inside the function | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_freevars | A "tuple" containing the names of *free (closure) | -| | variables* that a *nested scope* references in an | -| | outer scope. See also "function.__closure__". | -| | Note: references to global and builtin names are | -| | *not* included. | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_code | A string representing the sequence of *bytecode* | -| | instructions in the function | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_consts | A "tuple" containing the literals used by the | -| | *bytecode* in the function | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_names | A "tuple" containing the names used by the | -| | *bytecode* in the function | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_filename | The name of the file from which the code was | -| | compiled | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_firstlineno | The line number of the first line of the function | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_lnotab | A string encoding the mapping from *bytecode* | -| | offsets to line numbers. For details, see the | -| | source code of the interpreter. Deprecated since | -| | version 3.12: This attribute of code objects is | -| | deprecated, and may be removed in Python 3.15. | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_stacksize | The required stack size of the code object | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_flags | An "integer" encoding a number of flags for the | -| | interpreter. | -+----------------------------------------------------+----------------------------------------------------+ - -The following flag bits are defined for "co_flags": bit "0x04" is set -if the function uses the "*arguments" syntax to accept an arbitrary -number of positional arguments; bit "0x08" is set if the function uses -the "**keywords" syntax to accept arbitrary keyword arguments; bit -"0x20" is set if the function is a generator. See Code Objects Bit -Flags for details on the semantics of each flags that might be -present. - -Future feature declarations (for example, "from __future__ import -division") also use bits in "co_flags" to indicate whether a code -object was compiled with a particular feature enabled. See -"compiler_flag". - -Other bits in "co_flags" are reserved for internal use. - -If a code object represents a function, the first item in "co_consts" -is the documentation string of the function, or "None" if undefined. - - -Methods on code objects -~~~~~~~~~~~~~~~~~~~~~~~ - -codeobject.co_positions() - - Returns an iterable over the source code positions of each - *bytecode* instruction in the code object. - - The iterator returns "tuple"s containing the "(start_line, - end_line, start_column, end_column)". The *i-th* tuple corresponds - to the position of the source code that compiled to the *i-th* code - unit. Column information is 0-indexed utf-8 byte offsets on the - given source line. - - This positional information can be missing. A non-exhaustive lists - of cases where this may happen: - - * Running the interpreter with "-X" "no_debug_ranges". - - * Loading a pyc file compiled while using "-X" "no_debug_ranges". - - * Position tuples corresponding to artificial instructions. - - * Line and column numbers that can’t be represented due to - implementation specific limitations. - - When this occurs, some or all of the tuple elements can be "None". - - Added in version 3.11. - - Note: - - This feature requires storing column positions in code objects - which may result in a small increase of disk usage of compiled - Python files or interpreter memory usage. To avoid storing the - extra information and/or deactivate printing the extra traceback - information, the "-X" "no_debug_ranges" command line flag or the - "PYTHONNODEBUGRANGES" environment variable can be used. - -codeobject.co_lines() - - Returns an iterator that yields information about successive ranges - of *bytecode*s. Each item yielded is a "(start, end, lineno)" - "tuple": - - * "start" (an "int") represents the offset (inclusive) of the start - of the *bytecode* range - - * "end" (an "int") represents the offset (exclusive) of the end of - the *bytecode* range - - * "lineno" is an "int" representing the line number of the - *bytecode* range, or "None" if the bytecodes in the given range - have no line number - - The items yielded will have the following properties: - - * The first range yielded will have a "start" of 0. - - * The "(start, end)" ranges will be non-decreasing and consecutive. - That is, for any pair of "tuple"s, the "start" of the second will - be equal to the "end" of the first. - - * No range will be backwards: "end >= start" for all triples. - - * The last "tuple" yielded will have "end" equal to the size of the - *bytecode*. - - Zero-width ranges, where "start == end", are allowed. Zero-width - ranges are used for lines that are present in the source code, but - have been eliminated by the *bytecode* compiler. - - Added in version 3.10. - - See also: - - **PEP 626** - Precise line numbers for debugging and other tools. - The PEP that introduced the "co_lines()" method. - -codeobject.replace(**kwargs) - - Return a copy of the code object with new values for the specified - fields. - - Code objects are also supported by the generic function - "copy.replace()". - - Added in version 3.8. - - -Frame objects -------------- - -Frame objects represent execution frames. They may occur in traceback -objects, and are also passed to registered trace functions. - - -Special read-only attributes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_back | Points to the previous stack frame (towards the | -| | caller), or "None" if this is the bottom stack | -| | frame | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_code | The code object being executed in this frame. | -| | Accessing this attribute raises an auditing event | -| | "object.__getattr__" with arguments "obj" and | -| | ""f_code"". | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_locals | The mapping used by the frame to look up local | -| | variables. If the frame refers to an *optimized | -| | scope*, this may return a write-through proxy | -| | object. Changed in version 3.13: Return a proxy | -| | for optimized scopes. | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_globals | The dictionary used by the frame to look up global | -| | variables | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_builtins | The dictionary used by the frame to look up built- | -| | in (intrinsic) names | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_lasti | The “precise instruction” of the frame object | -| | (this is an index into the *bytecode* string of | -| | the code object) | -+----------------------------------------------------+----------------------------------------------------+ - - -Special writable attributes -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_trace | If not "None", this is a function called for | -| | various events during code execution (this is used | -| | by debuggers). Normally an event is triggered for | -| | each new source line (see "f_trace_lines"). | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_trace_lines | Set this attribute to "False" to disable | -| | triggering a tracing event for each source line. | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_trace_opcodes | Set this attribute to "True" to allow per-opcode | -| | events to be requested. Note that this may lead to | -| | undefined interpreter behaviour if exceptions | -| | raised by the trace function escape to the | -| | function being traced. | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_lineno | The current line number of the frame – writing to | -| | this from within a trace function jumps to the | -| | given line (only for the bottom-most frame). A | -| | debugger can implement a Jump command (aka Set | -| | Next Statement) by writing to this attribute. | -+----------------------------------------------------+----------------------------------------------------+ - - -Frame object methods -~~~~~~~~~~~~~~~~~~~~ - -Frame objects support one method: - -frame.clear() - - This method clears all references to local variables held by the - frame. Also, if the frame belonged to a *generator*, the generator - is finalized. This helps break reference cycles involving frame - objects (for example when catching an exception and storing its - traceback for later use). - - "RuntimeError" is raised if the frame is currently executing or - suspended. - - Added in version 3.4. - - Changed in version 3.13: Attempting to clear a suspended frame - raises "RuntimeError" (as has always been the case for executing - frames). - - -Traceback objects ------------------ - -Traceback objects represent the stack trace of an exception. A -traceback object is implicitly created when an exception occurs, and -may also be explicitly created by calling "types.TracebackType". - -Changed in version 3.7: Traceback objects can now be explicitly -instantiated from Python code. - -For implicitly created tracebacks, when the search for an exception -handler unwinds the execution stack, at each unwound level a traceback -object is inserted in front of the current traceback. When an -exception handler is entered, the stack trace is made available to the -program. (See section The try statement.) It is accessible as the -third item of the tuple returned by "sys.exc_info()", and as the -"__traceback__" attribute of the caught exception. - -When the program contains no suitable handler, the stack trace is -written (nicely formatted) to the standard error stream; if the -interpreter is interactive, it is also made available to the user as -"sys.last_traceback". - -For explicitly created tracebacks, it is up to the creator of the -traceback to determine how the "tb_next" attributes should be linked -to form a full stack trace. - -Special read-only attributes: - -+----------------------------------------------------+----------------------------------------------------+ -| traceback.tb_frame | Points to the execution frame of the current | -| | level. Accessing this attribute raises an | -| | auditing event "object.__getattr__" with arguments | -| | "obj" and ""tb_frame"". | -+----------------------------------------------------+----------------------------------------------------+ -| traceback.tb_lineno | Gives the line number where the exception occurred | -+----------------------------------------------------+----------------------------------------------------+ -| traceback.tb_lasti | Indicates the “precise instruction”. | -+----------------------------------------------------+----------------------------------------------------+ - -The line number and last instruction in the traceback may differ from -the line number of its frame object if the exception occurred in a -"try" statement with no matching except clause or with a "finally" -clause. - -traceback.tb_next - - The special writable attribute "tb_next" is the next level in the - stack trace (towards the frame where the exception occurred), or - "None" if there is no next level. - - Changed in version 3.7: This attribute is now writable - - -Slice objects -------------- - -Slice objects are used to represent slices for "__getitem__()" -methods. They are also created by the built-in "slice()" function. - -Special read-only attributes: "start" is the lower bound; "stop" is -the upper bound; "step" is the step value; each is "None" if omitted. -These attributes can have any type. - -Slice objects support one method: - -slice.indices(self, length) - - This method takes a single integer argument *length* and computes - information about the slice that the slice object would describe if - applied to a sequence of *length* items. It returns a tuple of - three integers; respectively these are the *start* and *stop* - indices and the *step* or stride length of the slice. Missing or - out-of-bounds indices are handled in a manner consistent with - regular slices. - - -Static method objects ---------------------- - -Static method objects provide a way of defeating the transformation of -function objects to method objects described above. A static method -object is a wrapper around any other object, usually a user-defined -method object. When a static method object is retrieved from a class -or a class instance, the object actually returned is the wrapped -object, which is not subject to any further transformation. Static -method objects are also callable. Static method objects are created by -the built-in "staticmethod()" constructor. - - -Class method objects --------------------- - -A class method object, like a static method object, is a wrapper -around another object that alters the way in which that object is -retrieved from classes and class instances. The behaviour of class -method objects upon such retrieval is described above, under “instance -methods”. Class method objects are created by the built-in -"classmethod()" constructor. -''', - 'typesfunctions': r'''Functions -********* - -Function objects are created by function definitions. The only -operation on a function object is to call it: "func(argument-list)". - -There are really two flavors of function objects: built-in functions -and user-defined functions. Both support the same operation (to call -the function), but the implementation is different, hence the -different object types. - -See Function definitions for more information. -''', - 'typesmapping': r'''Mapping Types — "dict" -********************** - -A *mapping* object maps *hashable* values to arbitrary objects. -Mappings are mutable objects. There is currently only one standard -mapping type, the *dictionary*. (For other containers see the built- -in "list", "set", and "tuple" classes, and the "collections" module.) - -A dictionary’s keys are *almost* arbitrary values. Values that are -not *hashable*, that is, values containing lists, dictionaries or -other mutable types (that are compared by value rather than by object -identity) may not be used as keys. Values that compare equal (such as -"1", "1.0", and "True") can be used interchangeably to index the same -dictionary entry. - -class dict(**kwargs) -class dict(mapping, /, **kwargs) -class dict(iterable, /, **kwargs) - - Return a new dictionary initialized from an optional positional - argument and a possibly empty set of keyword arguments. - - Dictionaries can be created by several means: - - * Use a comma-separated list of "key: value" pairs within braces: - "{'jack': 4098, 'sjoerd': 4127}" or "{4098: 'jack', 4127: - 'sjoerd'}" - - * Use a dict comprehension: "{}", "{x: x ** 2 for x in range(10)}" - - * Use the type constructor: "dict()", "dict([('foo', 100), ('bar', - 200)])", "dict(foo=100, bar=200)" - - If no positional argument is given, an empty dictionary is created. - If a positional argument is given and it defines a "keys()" method, - a dictionary is created by calling "__getitem__()" on the argument - with each returned key from the method. Otherwise, the positional - argument must be an *iterable* object. Each item in the iterable - must itself be an iterable with exactly two elements. The first - element of each item becomes a key in the new dictionary, and the - second element the corresponding value. If a key occurs more than - once, the last value for that key becomes the corresponding value - in the new dictionary. - - If keyword arguments are given, the keyword arguments and their - values are added to the dictionary created from the positional - argument. If a key being added is already present, the value from - the keyword argument replaces the value from the positional - argument. - - Dictionaries compare equal if and only if they have the same "(key, - value)" pairs (regardless of ordering). Order comparisons (‘<’, - ‘<=’, ‘>=’, ‘>’) raise "TypeError". To illustrate dictionary - creation and equality, the following examples all return a - dictionary equal to "{"one": 1, "two": 2, "three": 3}": - - >>> a = dict(one=1, two=2, three=3) - >>> b = {'one': 1, 'two': 2, 'three': 3} - >>> c = dict(zip(['one', 'two', 'three'], [1, 2, 3])) - >>> d = dict([('two', 2), ('one', 1), ('three', 3)]) - >>> e = dict({'three': 3, 'one': 1, 'two': 2}) - >>> f = dict({'one': 1, 'three': 3}, two=2) - >>> a == b == c == d == e == f - True - - Providing keyword arguments as in the first example only works for - keys that are valid Python identifiers. Otherwise, any valid keys - can be used. - - Dictionaries preserve insertion order. Note that updating a key - does not affect the order. Keys added after deletion are inserted - at the end. - - >>> d = {"one": 1, "two": 2, "three": 3, "four": 4} - >>> d - {'one': 1, 'two': 2, 'three': 3, 'four': 4} - >>> list(d) - ['one', 'two', 'three', 'four'] - >>> list(d.values()) - [1, 2, 3, 4] - >>> d["one"] = 42 - >>> d - {'one': 42, 'two': 2, 'three': 3, 'four': 4} - >>> del d["two"] - >>> d["two"] = None - >>> d - {'one': 42, 'three': 3, 'four': 4, 'two': None} - - Changed in version 3.7: Dictionary order is guaranteed to be - insertion order. This behavior was an implementation detail of - CPython from 3.6. - - These are the operations that dictionaries support (and therefore, - custom mapping types should support too): - - list(d) - - Return a list of all the keys used in the dictionary *d*. - - len(d) - - Return the number of items in the dictionary *d*. - - d[key] - - Return the item of *d* with key *key*. Raises a "KeyError" if - *key* is not in the map. - - If a subclass of dict defines a method "__missing__()" and *key* - is not present, the "d[key]" operation calls that method with - the key *key* as argument. The "d[key]" operation then returns - or raises whatever is returned or raised by the - "__missing__(key)" call. No other operations or methods invoke - "__missing__()". If "__missing__()" is not defined, "KeyError" - is raised. "__missing__()" must be a method; it cannot be an - instance variable: - - >>> class Counter(dict): - ... def __missing__(self, key): - ... return 0 - ... - >>> c = Counter() - >>> c['red'] - 0 - >>> c['red'] += 1 - >>> c['red'] - 1 - - The example above shows part of the implementation of - "collections.Counter". A different "__missing__()" method is - used by "collections.defaultdict". - - d[key] = value - - Set "d[key]" to *value*. - - del d[key] - - Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not - in the map. - - key in d - - Return "True" if *d* has a key *key*, else "False". - - key not in d - - Equivalent to "not key in d". - - iter(d) - - Return an iterator over the keys of the dictionary. This is a - shortcut for "iter(d.keys())". - - clear() - - Remove all items from the dictionary. - - copy() - - Return a shallow copy of the dictionary. - - classmethod fromkeys(iterable, value=None, /) - - Create a new dictionary with keys from *iterable* and values set - to *value*. - - "fromkeys()" is a class method that returns a new dictionary. - *value* defaults to "None". All of the values refer to just a - single instance, so it generally doesn’t make sense for *value* - to be a mutable object such as an empty list. To get distinct - values, use a dict comprehension instead. - - get(key, default=None, /) - - Return the value for *key* if *key* is in the dictionary, else - *default*. If *default* is not given, it defaults to "None", so - that this method never raises a "KeyError". - - items() - - Return a new view of the dictionary’s items ("(key, value)" - pairs). See the documentation of view objects. - - keys() - - Return a new view of the dictionary’s keys. See the - documentation of view objects. - - pop(key, /) - pop(key, default, /) - - If *key* is in the dictionary, remove it and return its value, - else return *default*. If *default* is not given and *key* is - not in the dictionary, a "KeyError" is raised. - - popitem() - - Remove and return a "(key, value)" pair from the dictionary. - Pairs are returned in LIFO (last-in, first-out) order. - - "popitem()" is useful to destructively iterate over a - dictionary, as often used in set algorithms. If the dictionary - is empty, calling "popitem()" raises a "KeyError". - - Changed in version 3.7: LIFO order is now guaranteed. In prior - versions, "popitem()" would return an arbitrary key/value pair. - - reversed(d) - - Return a reverse iterator over the keys of the dictionary. This - is a shortcut for "reversed(d.keys())". - - Added in version 3.8. - - setdefault(key, default=None, /) - - If *key* is in the dictionary, return its value. If not, insert - *key* with a value of *default* and return *default*. *default* - defaults to "None". - - update(**kwargs) - update(mapping, /, **kwargs) - update(iterable, /, **kwargs) - - Update the dictionary with the key/value pairs from *mapping* or - *iterable* and *kwargs*, overwriting existing keys. Return - "None". - - "update()" accepts either another object with a "keys()" method - (in which case "__getitem__()" is called with every key returned - from the method) or an iterable of key/value pairs (as tuples or - other iterables of length two). If keyword arguments are - specified, the dictionary is then updated with those key/value - pairs: "d.update(red=1, blue=2)". - - values() - - Return a new view of the dictionary’s values. See the - documentation of view objects. - - An equality comparison between one "dict.values()" view and - another will always return "False". This also applies when - comparing "dict.values()" to itself: - - >>> d = {'a': 1} - >>> d.values() == d.values() - False - - d | other - - Create a new dictionary with the merged keys and values of *d* - and *other*, which must both be dictionaries. The values of - *other* take priority when *d* and *other* share keys. - - Added in version 3.9. - - d |= other - - Update the dictionary *d* with keys and values from *other*, - which may be either a *mapping* or an *iterable* of key/value - pairs. The values of *other* take priority when *d* and *other* - share keys. - - Added in version 3.9. - - Dictionaries and dictionary views are reversible. - - >>> d = {"one": 1, "two": 2, "three": 3, "four": 4} - >>> d - {'one': 1, 'two': 2, 'three': 3, 'four': 4} - >>> list(reversed(d)) - ['four', 'three', 'two', 'one'] - >>> list(reversed(d.values())) - [4, 3, 2, 1] - >>> list(reversed(d.items())) - [('four', 4), ('three', 3), ('two', 2), ('one', 1)] - - Changed in version 3.8: Dictionaries are now reversible. - -See also: - - "types.MappingProxyType" can be used to create a read-only view of a - "dict". - - -Dictionary view objects -======================= - -The objects returned by "dict.keys()", "dict.values()" and -"dict.items()" are *view objects*. They provide a dynamic view on the -dictionary’s entries, which means that when the dictionary changes, -the view reflects these changes. - -Dictionary views can be iterated over to yield their respective data, -and support membership tests: - -len(dictview) - - Return the number of entries in the dictionary. - -iter(dictview) - - Return an iterator over the keys, values or items (represented as - tuples of "(key, value)") in the dictionary. - - Keys and values are iterated over in insertion order. This allows - the creation of "(value, key)" pairs using "zip()": "pairs = - zip(d.values(), d.keys())". Another way to create the same list is - "pairs = [(v, k) for (k, v) in d.items()]". - - Iterating views while adding or deleting entries in the dictionary - may raise a "RuntimeError" or fail to iterate over all entries. - - Changed in version 3.7: Dictionary order is guaranteed to be - insertion order. - -x in dictview - - Return "True" if *x* is in the underlying dictionary’s keys, values - or items (in the latter case, *x* should be a "(key, value)" - tuple). - -reversed(dictview) - - Return a reverse iterator over the keys, values or items of the - dictionary. The view will be iterated in reverse order of the - insertion. - - Changed in version 3.8: Dictionary views are now reversible. - -dictview.mapping - - Return a "types.MappingProxyType" that wraps the original - dictionary to which the view refers. - - Added in version 3.10. - -Keys views are set-like since their entries are unique and *hashable*. -Items views also have set-like operations since the (key, value) pairs -are unique and the keys are hashable. If all values in an items view -are hashable as well, then the items view can interoperate with other -sets. (Values views are not treated as set-like since the entries are -generally not unique.) For set-like views, all of the operations -defined for the abstract base class "collections.abc.Set" are -available (for example, "==", "<", or "^"). While using set -operators, set-like views accept any iterable as the other operand, -unlike sets which only accept sets as the input. - -An example of dictionary view usage: - - >>> dishes = {'eggs': 2, 'sausage': 1, 'bacon': 1, 'spam': 500} - >>> keys = dishes.keys() - >>> values = dishes.values() - - >>> # iteration - >>> n = 0 - >>> for val in values: - ... n += val - ... - >>> print(n) - 504 - - >>> # keys and values are iterated over in the same order (insertion order) - >>> list(keys) - ['eggs', 'sausage', 'bacon', 'spam'] - >>> list(values) - [2, 1, 1, 500] - - >>> # view objects are dynamic and reflect dict changes - >>> del dishes['eggs'] - >>> del dishes['sausage'] - >>> list(keys) - ['bacon', 'spam'] - - >>> # set operations - >>> keys & {'eggs', 'bacon', 'salad'} - {'bacon'} - >>> keys ^ {'sausage', 'juice'} == {'juice', 'sausage', 'bacon', 'spam'} - True - >>> keys | ['juice', 'juice', 'juice'] == {'bacon', 'spam', 'juice'} - True - - >>> # get back a read-only proxy for the original dictionary - >>> values.mapping - mappingproxy({'bacon': 1, 'spam': 500}) - >>> values.mapping['spam'] - 500 -''', - 'typesmethods': r'''Methods -******* - -Methods are functions that are called using the attribute notation. -There are two flavors: built-in methods (such as "append()" on lists) -and class instance method. Built-in methods are described with the -types that support them. - -If you access a method (a function defined in a class namespace) -through an instance, you get a special object: a *bound method* (also -called instance method) object. When called, it will add the "self" -argument to the argument list. Bound methods have two special read- -only attributes: "m.__self__" is the object on which the method -operates, and "m.__func__" is the function implementing the method. -Calling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to -calling "m.__func__(m.__self__, arg-1, arg-2, ..., arg-n)". - -Like function objects, bound method objects support getting arbitrary -attributes. However, since method attributes are actually stored on -the underlying function object ("method.__func__"), setting method -attributes on bound methods is disallowed. Attempting to set an -attribute on a method results in an "AttributeError" being raised. In -order to set a method attribute, you need to explicitly set it on the -underlying function object: - - >>> class C: - ... def method(self): - ... pass - ... - >>> c = C() - >>> c.method.whoami = 'my name is method' # can't set on the method - Traceback (most recent call last): - File "", line 1, in - AttributeError: 'method' object has no attribute 'whoami' - >>> c.method.__func__.whoami = 'my name is method' - >>> c.method.whoami - 'my name is method' - -See Instance methods for more information. -''', - 'typesmodules': r'''Modules -******* - -The only special operation on a module is attribute access: "m.name", -where *m* is a module and *name* accesses a name defined in *m*’s -symbol table. Module attributes can be assigned to. (Note that the -"import" statement is not, strictly speaking, an operation on a module -object; "import foo" does not require a module object named *foo* to -exist, rather it requires an (external) *definition* for a module -named *foo* somewhere.) - -A special attribute of every module is "__dict__". This is the -dictionary containing the module’s symbol table. Modifying this -dictionary will actually change the module’s symbol table, but direct -assignment to the "__dict__" attribute is not possible (you can write -"m.__dict__['a'] = 1", which defines "m.a" to be "1", but you can’t -write "m.__dict__ = {}"). Modifying "__dict__" directly is not -recommended. - -Modules built into the interpreter are written like this: "". If loaded from a file, they are written as -"". -''', - 'typesseq': r'''Sequence Types — "list", "tuple", "range" -***************************************** - -There are three basic sequence types: lists, tuples, and range -objects. Additional sequence types tailored for processing of binary -data and text strings are described in dedicated sections. - - -Common Sequence Operations -========================== - -The operations in the following table are supported by most sequence -types, both mutable and immutable. The "collections.abc.Sequence" ABC -is provided to make it easier to correctly implement these operations -on custom sequence types. - -This table lists the sequence operations sorted in ascending priority. -In the table, *s* and *t* are sequences of the same type, *n*, *i*, -*j* and *k* are integers and *x* is an arbitrary object that meets any -type and value restrictions imposed by *s*. - -The "in" and "not in" operations have the same priorities as the -comparison operations. The "+" (concatenation) and "*" (repetition) -operations have the same priority as the corresponding numeric -operations. [3] - -+----------------------------+----------------------------------+------------+ -| Operation | Result | Notes | -|============================|==================================|============| -| "x in s" | "True" if an item of *s* is | (1) | -| | equal to *x*, else "False" | | -+----------------------------+----------------------------------+------------+ -| "x not in s" | "False" if an item of *s* is | (1) | -| | equal to *x*, else "True" | | -+----------------------------+----------------------------------+------------+ -| "s + t" | the concatenation of *s* and *t* | (6)(7) | -+----------------------------+----------------------------------+------------+ -| "s * n" or "n * s" | equivalent to adding *s* to | (2)(7) | -| | itself *n* times | | -+----------------------------+----------------------------------+------------+ -| "s[i]" | *i*th item of *s*, origin 0 | (3)(8) | -+----------------------------+----------------------------------+------------+ -| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) | -+----------------------------+----------------------------------+------------+ -| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) | -| | with step *k* | | -+----------------------------+----------------------------------+------------+ -| "len(s)" | length of *s* | | -+----------------------------+----------------------------------+------------+ -| "min(s)" | smallest item of *s* | | -+----------------------------+----------------------------------+------------+ -| "max(s)" | largest item of *s* | | -+----------------------------+----------------------------------+------------+ - -Sequences of the same type also support comparisons. In particular, -tuples and lists are compared lexicographically by comparing -corresponding elements. This means that to compare equal, every -element must compare equal and the two sequences must be of the same -type and have the same length. (For full details see Comparisons in -the language reference.) - -Forward and reversed iterators over mutable sequences access values -using an index. That index will continue to march forward (or -backward) even if the underlying sequence is mutated. The iterator -terminates only when an "IndexError" or a "StopIteration" is -encountered (or when the index drops below zero). - -Notes: - -1. While the "in" and "not in" operations are used only for simple - containment testing in the general case, some specialised sequences - (such as "str", "bytes" and "bytearray") also use them for - subsequence testing: - - >>> "gg" in "eggs" - True - -2. Values of *n* less than "0" are treated as "0" (which yields an - empty sequence of the same type as *s*). Note that items in the - sequence *s* are not copied; they are referenced multiple times. - This often haunts new Python programmers; consider: - - >>> lists = [[]] * 3 - >>> lists - [[], [], []] - >>> lists[0].append(3) - >>> lists - [[3], [3], [3]] - - What has happened is that "[[]]" is a one-element list containing - an empty list, so all three elements of "[[]] * 3" are references - to this single empty list. Modifying any of the elements of - "lists" modifies this single list. You can create a list of - different lists this way: - - >>> lists = [[] for i in range(3)] - >>> lists[0].append(3) - >>> lists[1].append(5) - >>> lists[2].append(7) - >>> lists - [[3], [5], [7]] - - Further explanation is available in the FAQ entry How do I create a - multidimensional list?. - -3. If *i* or *j* is negative, the index is relative to the end of - sequence *s*: "len(s) + i" or "len(s) + j" is substituted. But - note that "-0" is still "0". - -4. The slice of *s* from *i* to *j* is defined as the sequence of - items with index *k* such that "i <= k < j". - - * If *i* is omitted or "None", use "0". - - * If *j* is omitted or "None", use "len(s)". - - * If *i* or *j* is less than "-len(s)", use "0". - - * If *i* or *j* is greater than "len(s)", use "len(s)". - - * If *i* is greater than or equal to *j*, the slice is empty. - -5. The slice of *s* from *i* to *j* with step *k* is defined as the - sequence of items with index "x = i + n*k" such that "0 <= n < - (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k", - "i+3*k" and so on, stopping when *j* is reached (but never - including *j*). When *k* is positive, *i* and *j* are reduced to - "len(s)" if they are greater. When *k* is negative, *i* and *j* are - reduced to "len(s) - 1" if they are greater. If *i* or *j* are - omitted or "None", they become “end” values (which end depends on - the sign of *k*). Note, *k* cannot be zero. If *k* is "None", it - is treated like "1". - -6. Concatenating immutable sequences always results in a new object. - This means that building up a sequence by repeated concatenation - will have a quadratic runtime cost in the total sequence length. - To get a linear runtime cost, you must switch to one of the - alternatives below: - - * if concatenating "str" objects, you can build a list and use - "str.join()" at the end or else write to an "io.StringIO" - instance and retrieve its value when complete - - * if concatenating "bytes" objects, you can similarly use - "bytes.join()" or "io.BytesIO", or you can do in-place - concatenation with a "bytearray" object. "bytearray" objects are - mutable and have an efficient overallocation mechanism - - * if concatenating "tuple" objects, extend a "list" instead - - * for other types, investigate the relevant class documentation - -7. Some sequence types (such as "range") only support item sequences - that follow specific patterns, and hence don’t support sequence - concatenation or repetition. - -8. An "IndexError" is raised if *i* is outside the sequence range. - --[ Sequence Methods ]- - -Sequence types also support the following methods: - -sequence.count(value, /) - - Return the total number of occurrences of *value* in *sequence*. - -sequence.index(value[, start[, stop]]) - - Return the index of the first occurrence of *value* in *sequence*. - - Raises "ValueError" if *value* is not found in *sequence*. - - The *start* or *stop* arguments allow for efficient searching of - subsections of the sequence, beginning at *start* and ending at - *stop*. This is roughly equivalent to "start + - sequence[start:stop].index(value)", only without copying any data. - - Caution: - - Not all sequence types support passing the *start* and *stop* - arguments. - - -Immutable Sequence Types -======================== - -The only operation that immutable sequence types generally implement -that is not also implemented by mutable sequence types is support for -the "hash()" built-in. - -This support allows immutable sequences, such as "tuple" instances, to -be used as "dict" keys and stored in "set" and "frozenset" instances. - -Attempting to hash an immutable sequence that contains unhashable -values will result in "TypeError". - - -Mutable Sequence Types -====================== - -The operations in the following table are defined on mutable sequence -types. The "collections.abc.MutableSequence" ABC is provided to make -it easier to correctly implement these operations on custom sequence -types. - -In the table *s* is an instance of a mutable sequence type, *t* is any -iterable object and *x* is an arbitrary object that meets any type and -value restrictions imposed by *s* (for example, "bytearray" only -accepts integers that meet the value restriction "0 <= x <= 255"). - -+--------------------------------+----------------------------------+-----------------------+ -| Operation | Result | Notes | -|================================|==================================|=======================| -| "s[i] = x" | item *i* of *s* is replaced by | | -| | *x* | | -+--------------------------------+----------------------------------+-----------------------+ -| "del s[i]" | removes item *i* of *s* | | -+--------------------------------+----------------------------------+-----------------------+ -| "s[i:j] = t" | slice of *s* from *i* to *j* is | | -| | replaced by the contents of the | | -| | iterable *t* | | -+--------------------------------+----------------------------------+-----------------------+ -| "del s[i:j]" | removes the elements of "s[i:j]" | | -| | from the list (same as "s[i:j] = | | -| | []") | | -+--------------------------------+----------------------------------+-----------------------+ -| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) | -| | replaced by those of *t* | | -+--------------------------------+----------------------------------+-----------------------+ -| "del s[i:j:k]" | removes the elements of | | -| | "s[i:j:k]" from the list | | -+--------------------------------+----------------------------------+-----------------------+ -| "s += t" | extends *s* with the contents of | | -| | *t* (for the most part the same | | -| | as "s[len(s):len(s)] = t") | | -+--------------------------------+----------------------------------+-----------------------+ -| "s *= n" | updates *s* with its contents | (2) | -| | repeated *n* times | | -+--------------------------------+----------------------------------+-----------------------+ - -Notes: - -1. If *k* is not equal to "1", *t* must have the same length as the - slice it is replacing. - -2. The value *n* is an integer, or an object implementing - "__index__()". Zero and negative values of *n* clear the sequence. - Items in the sequence are not copied; they are referenced multiple - times, as explained for "s * n" under Common Sequence Operations. - --[ Mutable Sequence Methods ]- - -Mutable sequence types also support the following methods: - -sequence.append(value, /) - - Append *value* to the end of the sequence. This is equivalent to - writing "seq[len(seq):len(seq)] = [value]". - -sequence.clear() - - Added in version 3.3. - - Remove all items from *sequence*. This is equivalent to writing - "del sequence[:]". - -sequence.copy() - - Added in version 3.3. - - Create a shallow copy of *sequence*. This is equivalent to writing - "sequence[:]". - - Hint: - - The "copy()" method is not part of the "MutableSequence" "ABC", - but most concrete mutable sequence types provide it. - -sequence.extend(iterable, /) - - Extend *sequence* with the contents of *iterable*. For the most - part, this is the same as writing "seq[len(seq):len(seq)] = - iterable". - -sequence.insert(index, value, /) - - Insert *value* into *sequence* at the given *index*. This is - equivalent to writing "sequence[index:index] = [value]". - -sequence.pop(index=-1, /) - - Retrieve the item at *index* and also removes it from *sequence*. - By default, the last item in *sequence* is removed and returned. - -sequence.remove(value, /) - - Remove the first item from *sequence* where "sequence[i] == value". - - Raises "ValueError" if *value* is not found in *sequence*. - -sequence.reverse() - - Reverse the items of *sequence* in place. This method maintains - economy of space when reversing a large sequence. To remind users - that it operates by side-effect, it returns "None". - - -Lists -===== - -Lists are mutable sequences, typically used to store collections of -homogeneous items (where the precise degree of similarity will vary by -application). - -class list(iterable=(), /) - - Lists may be constructed in several ways: - - * Using a pair of square brackets to denote the empty list: "[]" - - * Using square brackets, separating items with commas: "[a]", "[a, - b, c]" - - * Using a list comprehension: "[x for x in iterable]" - - * Using the type constructor: "list()" or "list(iterable)" - - The constructor builds a list whose items are the same and in the - same order as *iterable*’s items. *iterable* may be either a - sequence, a container that supports iteration, or an iterator - object. If *iterable* is already a list, a copy is made and - returned, similar to "iterable[:]". For example, "list('abc')" - returns "['a', 'b', 'c']" and "list( (1, 2, 3) )" returns "[1, 2, - 3]". If no argument is given, the constructor creates a new empty - list, "[]". - - Many other operations also produce lists, including the "sorted()" - built-in. - - Lists implement all of the common and mutable sequence operations. - Lists also provide the following additional method: - - sort(*, key=None, reverse=False) - - This method sorts the list in place, using only "<" comparisons - between items. Exceptions are not suppressed - if any comparison - operations fail, the entire sort operation will fail (and the - list will likely be left in a partially modified state). - - "sort()" accepts two arguments that can only be passed by - keyword (keyword-only arguments): - - *key* specifies a function of one argument that is used to - extract a comparison key from each list element (for example, - "key=str.lower"). The key corresponding to each item in the list - is calculated once and then used for the entire sorting process. - The default value of "None" means that list items are sorted - directly without calculating a separate key value. - - The "functools.cmp_to_key()" utility is available to convert a - 2.x style *cmp* function to a *key* function. - - *reverse* is a boolean value. If set to "True", then the list - elements are sorted as if each comparison were reversed. - - This method modifies the sequence in place for economy of space - when sorting a large sequence. To remind users that it operates - by side effect, it does not return the sorted sequence (use - "sorted()" to explicitly request a new sorted list instance). - - The "sort()" method is guaranteed to be stable. A sort is - stable if it guarantees not to change the relative order of - elements that compare equal — this is helpful for sorting in - multiple passes (for example, sort by department, then by salary - grade). - - For sorting examples and a brief sorting tutorial, see Sorting - Techniques. - - **CPython implementation detail:** While a list is being sorted, - the effect of attempting to mutate, or even inspect, the list is - undefined. The C implementation of Python makes the list appear - empty for the duration, and raises "ValueError" if it can detect - that the list has been mutated during a sort. - - -Tuples -====== - -Tuples are immutable sequences, typically used to store collections of -heterogeneous data (such as the 2-tuples produced by the "enumerate()" -built-in). Tuples are also used for cases where an immutable sequence -of homogeneous data is needed (such as allowing storage in a "set" or -"dict" instance). - -class tuple(iterable=(), /) - - Tuples may be constructed in a number of ways: - - * Using a pair of parentheses to denote the empty tuple: "()" - - * Using a trailing comma for a singleton tuple: "a," or "(a,)" - - * Separating items with commas: "a, b, c" or "(a, b, c)" - - * Using the "tuple()" built-in: "tuple()" or "tuple(iterable)" - - The constructor builds a tuple whose items are the same and in the - same order as *iterable*’s items. *iterable* may be either a - sequence, a container that supports iteration, or an iterator - object. If *iterable* is already a tuple, it is returned - unchanged. For example, "tuple('abc')" returns "('a', 'b', 'c')" - and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument is - given, the constructor creates a new empty tuple, "()". - - Note that it is actually the comma which makes a tuple, not the - parentheses. The parentheses are optional, except in the empty - tuple case, or when they are needed to avoid syntactic ambiguity. - For example, "f(a, b, c)" is a function call with three arguments, - while "f((a, b, c))" is a function call with a 3-tuple as the sole - argument. - - Tuples implement all of the common sequence operations. - -For heterogeneous collections of data where access by name is clearer -than access by index, "collections.namedtuple()" may be a more -appropriate choice than a simple tuple object. - - -Ranges -====== - -The "range" type represents an immutable sequence of numbers and is -commonly used for looping a specific number of times in "for" loops. - -class range(stop, /) -class range(start, stop, step=1, /) - - The arguments to the range constructor must be integers (either - built-in "int" or any object that implements the "__index__()" - special method). If the *step* argument is omitted, it defaults to - "1". If the *start* argument is omitted, it defaults to "0". If - *step* is zero, "ValueError" is raised. - - For a positive *step*, the contents of a range "r" are determined - by the formula "r[i] = start + step*i" where "i >= 0" and "r[i] < - stop". - - For a negative *step*, the contents of the range are still - determined by the formula "r[i] = start + step*i", but the - constraints are "i >= 0" and "r[i] > stop". - - A range object will be empty if "r[0]" does not meet the value - constraint. Ranges do support negative indices, but these are - interpreted as indexing from the end of the sequence determined by - the positive indices. - - Ranges containing absolute values larger than "sys.maxsize" are - permitted but some features (such as "len()") may raise - "OverflowError". - - Range examples: - - >>> list(range(10)) - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] - >>> list(range(1, 11)) - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - >>> list(range(0, 30, 5)) - [0, 5, 10, 15, 20, 25] - >>> list(range(0, 10, 3)) - [0, 3, 6, 9] - >>> list(range(0, -10, -1)) - [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] - >>> list(range(0)) - [] - >>> list(range(1, 0)) - [] - - Ranges implement all of the common sequence operations except - concatenation and repetition (due to the fact that range objects - can only represent sequences that follow a strict pattern and - repetition and concatenation will usually violate that pattern). - - start - - The value of the *start* parameter (or "0" if the parameter was - not supplied) - - stop - - The value of the *stop* parameter - - step - - The value of the *step* parameter (or "1" if the parameter was - not supplied) - -The advantage of the "range" type over a regular "list" or "tuple" is -that a "range" object will always take the same (small) amount of -memory, no matter the size of the range it represents (as it only -stores the "start", "stop" and "step" values, calculating individual -items and subranges as needed). - -Range objects implement the "collections.abc.Sequence" ABC, and -provide features such as containment tests, element index lookup, -slicing and support for negative indices (see Sequence Types — list, -tuple, range): - ->>> r = range(0, 20, 2) ->>> r -range(0, 20, 2) ->>> 11 in r -False ->>> 10 in r -True ->>> r.index(10) -5 ->>> r[5] -10 ->>> r[:5] -range(0, 10, 2) ->>> r[-1] -18 - -Testing range objects for equality with "==" and "!=" compares them as -sequences. That is, two range objects are considered equal if they -represent the same sequence of values. (Note that two range objects -that compare equal might have different "start", "stop" and "step" -attributes, for example "range(0) == range(2, 1, 3)" or "range(0, 3, -2) == range(0, 4, 2)".) - -Changed in version 3.2: Implement the Sequence ABC. Support slicing -and negative indices. Test "int" objects for membership in constant -time instead of iterating through all items. - -Changed in version 3.3: Define ‘==’ and ‘!=’ to compare range objects -based on the sequence of values they define (instead of comparing -based on object identity).Added the "start", "stop" and "step" -attributes. - -See also: - - * The linspace recipe shows how to implement a lazy version of range - suitable for floating-point applications. -''', - 'typesseq-mutable': r'''Mutable Sequence Types -********************** - -The operations in the following table are defined on mutable sequence -types. The "collections.abc.MutableSequence" ABC is provided to make -it easier to correctly implement these operations on custom sequence -types. - -In the table *s* is an instance of a mutable sequence type, *t* is any -iterable object and *x* is an arbitrary object that meets any type and -value restrictions imposed by *s* (for example, "bytearray" only -accepts integers that meet the value restriction "0 <= x <= 255"). - -+--------------------------------+----------------------------------+-----------------------+ -| Operation | Result | Notes | -|================================|==================================|=======================| -| "s[i] = x" | item *i* of *s* is replaced by | | -| | *x* | | -+--------------------------------+----------------------------------+-----------------------+ -| "del s[i]" | removes item *i* of *s* | | -+--------------------------------+----------------------------------+-----------------------+ -| "s[i:j] = t" | slice of *s* from *i* to *j* is | | -| | replaced by the contents of the | | -| | iterable *t* | | -+--------------------------------+----------------------------------+-----------------------+ -| "del s[i:j]" | removes the elements of "s[i:j]" | | -| | from the list (same as "s[i:j] = | | -| | []") | | -+--------------------------------+----------------------------------+-----------------------+ -| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) | -| | replaced by those of *t* | | -+--------------------------------+----------------------------------+-----------------------+ -| "del s[i:j:k]" | removes the elements of | | -| | "s[i:j:k]" from the list | | -+--------------------------------+----------------------------------+-----------------------+ -| "s += t" | extends *s* with the contents of | | -| | *t* (for the most part the same | | -| | as "s[len(s):len(s)] = t") | | -+--------------------------------+----------------------------------+-----------------------+ -| "s *= n" | updates *s* with its contents | (2) | -| | repeated *n* times | | -+--------------------------------+----------------------------------+-----------------------+ - -Notes: - -1. If *k* is not equal to "1", *t* must have the same length as the - slice it is replacing. - -2. The value *n* is an integer, or an object implementing - "__index__()". Zero and negative values of *n* clear the sequence. - Items in the sequence are not copied; they are referenced multiple - times, as explained for "s * n" under Common Sequence Operations. - --[ Mutable Sequence Methods ]- - -Mutable sequence types also support the following methods: - -sequence.append(value, /) - - Append *value* to the end of the sequence. This is equivalent to - writing "seq[len(seq):len(seq)] = [value]". - -sequence.clear() - - Added in version 3.3. - - Remove all items from *sequence*. This is equivalent to writing - "del sequence[:]". - -sequence.copy() - - Added in version 3.3. - - Create a shallow copy of *sequence*. This is equivalent to writing - "sequence[:]". - - Hint: - - The "copy()" method is not part of the "MutableSequence" "ABC", - but most concrete mutable sequence types provide it. - -sequence.extend(iterable, /) - - Extend *sequence* with the contents of *iterable*. For the most - part, this is the same as writing "seq[len(seq):len(seq)] = - iterable". - -sequence.insert(index, value, /) - - Insert *value* into *sequence* at the given *index*. This is - equivalent to writing "sequence[index:index] = [value]". - -sequence.pop(index=-1, /) - - Retrieve the item at *index* and also removes it from *sequence*. - By default, the last item in *sequence* is removed and returned. - -sequence.remove(value, /) - - Remove the first item from *sequence* where "sequence[i] == value". - - Raises "ValueError" if *value* is not found in *sequence*. - -sequence.reverse() - - Reverse the items of *sequence* in place. This method maintains - economy of space when reversing a large sequence. To remind users - that it operates by side-effect, it returns "None". -''', - 'unary': r'''Unary arithmetic and bitwise operations -*************************************** - -All unary arithmetic and bitwise operations have the same priority: - - u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr - -The unary "-" (minus) operator yields the negation of its numeric -argument; the operation can be overridden with the "__neg__()" special -method. - -The unary "+" (plus) operator yields its numeric argument unchanged; -the operation can be overridden with the "__pos__()" special method. - -The unary "~" (invert) operator yields the bitwise inversion of its -integer argument. The bitwise inversion of "x" is defined as -"-(x+1)". It only applies to integral numbers or to custom objects -that override the "__invert__()" special method. - -In all three cases, if the argument does not have the proper type, a -"TypeError" exception is raised. -''', - 'while': r'''The "while" statement -********************* - -The "while" statement is used for repeated execution as long as an -expression is true: - - while_stmt ::= "while" assignment_expression ":" suite - ["else" ":" suite] - -This repeatedly tests the expression and, if it is true, executes the -first suite; if the expression is false (which may be the first time -it is tested) the suite of the "else" clause, if present, is executed -and the loop terminates. - -A "break" statement executed in the first suite terminates the loop -without executing the "else" clause’s suite. A "continue" statement -executed in the first suite skips the rest of the suite and goes back -to testing the expression. -''', - 'with': r'''The "with" statement -******************** - -The "with" statement is used to wrap the execution of a block with -methods defined by a context manager (see section With Statement -Context Managers). This allows common "try"…"except"…"finally" usage -patterns to be encapsulated for convenient reuse. - - with_stmt ::= "with" ( "(" with_stmt_contents ","? ")" | with_stmt_contents ) ":" suite - with_stmt_contents ::= with_item ("," with_item)* - with_item ::= expression ["as" target] - -The execution of the "with" statement with one “item” proceeds as -follows: - -1. The context expression (the expression given in the "with_item") is - evaluated to obtain a context manager. - -2. The context manager’s "__enter__()" is loaded for later use. - -3. The context manager’s "__exit__()" is loaded for later use. - -4. The context manager’s "__enter__()" method is invoked. - -5. If a target was included in the "with" statement, the return value - from "__enter__()" is assigned to it. - - Note: - - The "with" statement guarantees that if the "__enter__()" method - returns without an error, then "__exit__()" will always be - called. Thus, if an error occurs during the assignment to the - target list, it will be treated the same as an error occurring - within the suite would be. See step 7 below. - -6. The suite is executed. - -7. The context manager’s "__exit__()" method is invoked. If an - exception caused the suite to be exited, its type, value, and - traceback are passed as arguments to "__exit__()". Otherwise, three - "None" arguments are supplied. - - If the suite was exited due to an exception, and the return value - from the "__exit__()" method was false, the exception is reraised. - If the return value was true, the exception is suppressed, and - execution continues with the statement following the "with" - statement. - - If the suite was exited for any reason other than an exception, the - return value from "__exit__()" is ignored, and execution proceeds - at the normal location for the kind of exit that was taken. - -The following code: - - with EXPRESSION as TARGET: - SUITE - -is semantically equivalent to: - - manager = (EXPRESSION) - enter = manager.__enter__ - exit = manager.__exit__ - value = enter() - hit_except = False - - try: - TARGET = value - SUITE - except: - hit_except = True - if not exit(*sys.exc_info()): - raise - finally: - if not hit_except: - exit(None, None, None) - -except that implicit special method lookup is used for "__enter__()" -and "__exit__()". - -With more than one item, the context managers are processed as if -multiple "with" statements were nested: - - with A() as a, B() as b: - SUITE - -is semantically equivalent to: - - with A() as a: - with B() as b: - SUITE - -You can also write multi-item context managers in multiple lines if -the items are surrounded by parentheses. For example: - - with ( - A() as a, - B() as b, - ): - SUITE - -Changed in version 3.1: Support for multiple context expressions. - -Changed in version 3.10: Support for using grouping parentheses to -break the statement in multiple lines. - -See also: - - **PEP 343** - The “with” statement - The specification, background, and examples for the Python "with" - statement. -''', - 'yield': r'''The "yield" statement -********************* - - yield_stmt ::= yield_expression - -A "yield" statement is semantically equivalent to a yield expression. -The "yield" statement can be used to omit the parentheses that would -otherwise be required in the equivalent yield expression statement. -For example, the yield statements - - yield - yield from - -are equivalent to the yield expression statements - - (yield ) - (yield from ) - -Yield expressions and statements are only used when defining a -*generator* function, and are only used in the body of the generator -function. Using "yield" in a function definition is sufficient to -cause that definition to create a generator function instead of a -normal function. - -For full details of "yield" semantics, refer to the Yield expressions -section. -''', -} diff --git a/Python313_13_x64_Template/Lib/quopri.py b/Python313_13_x64_Template/Lib/quopri.py deleted file mode 100644 index f36cf7b3..00000000 --- a/Python313_13_x64_Template/Lib/quopri.py +++ /dev/null @@ -1,237 +0,0 @@ -#! /usr/bin/env python3 - -"""Conversions to/from quoted-printable transport encoding as per RFC 1521.""" - -# (Dec 1991 version). - -__all__ = ["encode", "decode", "encodestring", "decodestring"] - -ESCAPE = b'=' -MAXLINESIZE = 76 -HEX = b'0123456789ABCDEF' -EMPTYSTRING = b'' - -try: - from binascii import a2b_qp, b2a_qp -except ImportError: - a2b_qp = None - b2a_qp = None - - -def needsquoting(c, quotetabs, header): - """Decide whether a particular byte ordinal needs to be quoted. - - The 'quotetabs' flag indicates whether embedded tabs and spaces should be - quoted. Note that line-ending tabs and spaces are always encoded, as per - RFC 1521. - """ - assert isinstance(c, bytes) - if c in b' \t': - return quotetabs - # if header, we have to escape _ because _ is used to escape space - if c == b'_': - return header - return c == ESCAPE or not (b' ' <= c <= b'~') - -def quote(c): - """Quote a single character.""" - assert isinstance(c, bytes) and len(c)==1 - c = ord(c) - return ESCAPE + bytes((HEX[c//16], HEX[c%16])) - - - -def encode(input, output, quotetabs, header=False): - """Read 'input', apply quoted-printable encoding, and write to 'output'. - - 'input' and 'output' are binary file objects. The 'quotetabs' flag - indicates whether embedded tabs and spaces should be quoted. Note that - line-ending tabs and spaces are always encoded, as per RFC 1521. - The 'header' flag indicates whether we are encoding spaces as _ as per RFC - 1522.""" - - if b2a_qp is not None: - data = input.read() - odata = b2a_qp(data, quotetabs=quotetabs, header=header) - output.write(odata) - return - - def write(s, output=output, lineEnd=b'\n'): - # RFC 1521 requires that the line ending in a space or tab must have - # that trailing character encoded. - if s and s[-1:] in b' \t': - output.write(s[:-1] + quote(s[-1:]) + lineEnd) - elif s == b'.': - output.write(quote(s) + lineEnd) - else: - output.write(s + lineEnd) - - prevline = None - while line := input.readline(): - outline = [] - # Strip off any readline induced trailing newline - stripped = b'' - if line[-1:] == b'\n': - line = line[:-1] - stripped = b'\n' - # Calculate the un-length-limited encoded line - for c in line: - c = bytes((c,)) - if needsquoting(c, quotetabs, header): - c = quote(c) - if header and c == b' ': - outline.append(b'_') - else: - outline.append(c) - # First, write out the previous line - if prevline is not None: - write(prevline) - # Now see if we need any soft line breaks because of RFC-imposed - # length limitations. Then do the thisline->prevline dance. - thisline = EMPTYSTRING.join(outline) - while len(thisline) > MAXLINESIZE: - # Don't forget to include the soft line break `=' sign in the - # length calculation! - write(thisline[:MAXLINESIZE-1], lineEnd=b'=\n') - thisline = thisline[MAXLINESIZE-1:] - # Write out the current line - prevline = thisline - # Write out the last line, without a trailing newline - if prevline is not None: - write(prevline, lineEnd=stripped) - -def encodestring(s, quotetabs=False, header=False): - if b2a_qp is not None: - return b2a_qp(s, quotetabs=quotetabs, header=header) - from io import BytesIO - infp = BytesIO(s) - outfp = BytesIO() - encode(infp, outfp, quotetabs, header) - return outfp.getvalue() - - - -def decode(input, output, header=False): - """Read 'input', apply quoted-printable decoding, and write to 'output'. - 'input' and 'output' are binary file objects. - If 'header' is true, decode underscore as space (per RFC 1522).""" - - if a2b_qp is not None: - data = input.read() - odata = a2b_qp(data, header=header) - output.write(odata) - return - - new = b'' - while line := input.readline(): - i, n = 0, len(line) - if n > 0 and line[n-1:n] == b'\n': - partial = 0; n = n-1 - # Strip trailing whitespace - while n > 0 and line[n-1:n] in b" \t\r": - n = n-1 - else: - partial = 1 - while i < n: - c = line[i:i+1] - if c == b'_' and header: - new = new + b' '; i = i+1 - elif c != ESCAPE: - new = new + c; i = i+1 - elif i+1 == n and not partial: - partial = 1; break - elif i+1 < n and line[i+1:i+2] == ESCAPE: - new = new + ESCAPE; i = i+2 - elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]): - new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3 - else: # Bad escape sequence -- leave it in - new = new + c; i = i+1 - if not partial: - output.write(new + b'\n') - new = b'' - if new: - output.write(new) - -def decodestring(s, header=False): - if a2b_qp is not None: - return a2b_qp(s, header=header) - from io import BytesIO - infp = BytesIO(s) - outfp = BytesIO() - decode(infp, outfp, header=header) - return outfp.getvalue() - - - -# Other helper functions -def ishex(c): - """Return true if the byte ordinal 'c' is a hexadecimal digit in ASCII.""" - assert isinstance(c, bytes) - return b'0' <= c <= b'9' or b'a' <= c <= b'f' or b'A' <= c <= b'F' - -def unhex(s): - """Get the integer value of a hexadecimal number.""" - bits = 0 - for c in s: - c = bytes((c,)) - if b'0' <= c <= b'9': - i = ord('0') - elif b'a' <= c <= b'f': - i = ord('a')-10 - elif b'A' <= c <= b'F': - i = ord(b'A')-10 - else: - assert False, "non-hex digit "+repr(c) - bits = bits*16 + (ord(c) - i) - return bits - - - -def main(): - import sys - import getopt - try: - opts, args = getopt.getopt(sys.argv[1:], 'td') - except getopt.error as msg: - sys.stdout = sys.stderr - print(msg) - print("usage: quopri [-t | -d] [file] ...") - print("-t: quote tabs") - print("-d: decode; default encode") - sys.exit(2) - deco = False - tabs = False - for o, a in opts: - if o == '-t': tabs = True - if o == '-d': deco = True - if tabs and deco: - sys.stdout = sys.stderr - print("-t and -d are mutually exclusive") - sys.exit(2) - if not args: args = ['-'] - sts = 0 - for file in args: - if file == '-': - fp = sys.stdin.buffer - else: - try: - fp = open(file, "rb") - except OSError as msg: - sys.stderr.write("%s: can't open (%s)\n" % (file, msg)) - sts = 1 - continue - try: - if deco: - decode(fp, sys.stdout.buffer) - else: - encode(fp, sys.stdout.buffer, tabs) - finally: - if file != '-': - fp.close() - if sts: - sys.exit(sts) - - - -if __name__ == '__main__': - main() diff --git a/Python313_13_x64_Template/Lib/random.py b/Python313_13_x64_Template/Lib/random.py deleted file mode 100644 index 1abcae77..00000000 --- a/Python313_13_x64_Template/Lib/random.py +++ /dev/null @@ -1,1070 +0,0 @@ -"""Random variable generators. - - bytes - ----- - uniform bytes (values between 0 and 255) - - integers - -------- - uniform within range - - sequences - --------- - pick random element - pick random sample - pick weighted random sample - generate random permutation - - distributions on the real line: - ------------------------------ - uniform - triangular - normal (Gaussian) - lognormal - negative exponential - gamma - beta - pareto - Weibull - - distributions on the circle (angles 0 to 2pi) - --------------------------------------------- - circular uniform - von Mises - - discrete distributions - ---------------------- - binomial - - -General notes on the underlying Mersenne Twister core generator: - -* The period is 2**19937-1. -* It is one of the most extensively tested generators in existence. -* The random() method is implemented in C, executes in a single Python step, - and is, therefore, threadsafe. - -""" - -# Translated by Guido van Rossum from C source provided by -# Adrian Baddeley. Adapted by Raymond Hettinger for use with -# the Mersenne Twister and os.urandom() core generators. - -from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil -from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin -from math import tau as TWOPI, floor as _floor, isfinite as _isfinite -from math import lgamma as _lgamma, fabs as _fabs, log2 as _log2 -from os import urandom as _urandom -from _collections_abc import Sequence as _Sequence -from operator import index as _index -from itertools import accumulate as _accumulate, repeat as _repeat -from bisect import bisect as _bisect -import os as _os -import _random - -__all__ = [ - "Random", - "SystemRandom", - "betavariate", - "binomialvariate", - "choice", - "choices", - "expovariate", - "gammavariate", - "gauss", - "getrandbits", - "getstate", - "lognormvariate", - "normalvariate", - "paretovariate", - "randbytes", - "randint", - "random", - "randrange", - "sample", - "seed", - "setstate", - "shuffle", - "triangular", - "uniform", - "vonmisesvariate", - "weibullvariate", -] - -NV_MAGICCONST = 4 * _exp(-0.5) / _sqrt(2.0) -LOG4 = _log(4.0) -SG_MAGICCONST = 1.0 + _log(4.5) -BPF = 53 # Number of bits in a float -RECIP_BPF = 2 ** -BPF -_ONE = 1 -_sha512 = None - - -class Random(_random.Random): - """Random number generator base class used by bound module functions. - - Used to instantiate instances of Random to get generators that don't - share state. - - Class Random can also be subclassed if you want to use a different basic - generator of your own devising: in that case, override the following - methods: random(), seed(), getstate(), and setstate(). - Optionally, implement a getrandbits() method so that randrange() - can cover arbitrarily large ranges. - - """ - - VERSION = 3 # used by getstate/setstate - - def __init__(self, x=None): - """Initialize an instance. - - Optional argument x controls seeding, as for Random.seed(). - """ - - self.seed(x) - self.gauss_next = None - - def seed(self, a=None, version=2): - """Initialize internal state from a seed. - - The only supported seed types are None, int, float, - str, bytes, and bytearray. - - None or no argument seeds from current time or from an operating - system specific randomness source if available. - - If *a* is an int, all bits are used. - - For version 2 (the default), all of the bits are used if *a* is a str, - bytes, or bytearray. For version 1 (provided for reproducing random - sequences from older versions of Python), the algorithm for str and - bytes generates a narrower range of seeds. - - """ - - if version == 1 and isinstance(a, (str, bytes)): - a = a.decode('latin-1') if isinstance(a, bytes) else a - x = ord(a[0]) << 7 if a else 0 - for c in map(ord, a): - x = ((1000003 * x) ^ c) & 0xFFFFFFFFFFFFFFFF - x ^= len(a) - a = -2 if x == -1 else x - - elif version == 2 and isinstance(a, (str, bytes, bytearray)): - global _sha512 - if _sha512 is None: - try: - # hashlib is pretty heavy to load, try lean internal - # module first - from _sha2 import sha512 as _sha512 - except ImportError: - # fallback to official implementation - from hashlib import sha512 as _sha512 - - if isinstance(a, str): - a = a.encode() - a = int.from_bytes(a + _sha512(a).digest()) - - elif not isinstance(a, (type(None), int, float, str, bytes, bytearray)): - raise TypeError('The only supported seed types are:\n' - 'None, int, float, str, bytes, and bytearray.') - - super().seed(a) - self.gauss_next = None - - def getstate(self): - """Return internal state; can be passed to setstate() later.""" - return self.VERSION, super().getstate(), self.gauss_next - - def setstate(self, state): - """Restore internal state from object returned by getstate().""" - version = state[0] - if version == 3: - version, internalstate, self.gauss_next = state - super().setstate(internalstate) - elif version == 2: - version, internalstate, self.gauss_next = state - # In version 2, the state was saved as signed ints, which causes - # inconsistencies between 32/64-bit systems. The state is - # really unsigned 32-bit ints, so we convert negative ints from - # version 2 to positive longs for version 3. - try: - internalstate = tuple(x % (2 ** 32) for x in internalstate) - except ValueError as e: - raise TypeError from e - super().setstate(internalstate) - else: - raise ValueError("state with version %s passed to " - "Random.setstate() of version %s" % - (version, self.VERSION)) - - - ## ------------------------------------------------------- - ## ---- Methods below this point do not need to be overridden or extended - ## ---- when subclassing for the purpose of using a different core generator. - - - ## -------------------- pickle support ------------------- - - # Issue 17489: Since __reduce__ was defined to fix #759889 this is no - # longer called; we leave it here because it has been here since random was - # rewritten back in 2001 and why risk breaking something. - def __getstate__(self): # for pickle - return self.getstate() - - def __setstate__(self, state): # for pickle - self.setstate(state) - - def __reduce__(self): - return self.__class__, (), self.getstate() - - - ## ---- internal support method for evenly distributed integers ---- - - def __init_subclass__(cls, /, **kwargs): - """Control how subclasses generate random integers. - - The algorithm a subclass can use depends on the random() and/or - getrandbits() implementation available to it and determines - whether it can generate random integers from arbitrarily large - ranges. - """ - - for c in cls.__mro__: - if '_randbelow' in c.__dict__: - # just inherit it - break - if 'getrandbits' in c.__dict__: - cls._randbelow = cls._randbelow_with_getrandbits - break - if 'random' in c.__dict__: - cls._randbelow = cls._randbelow_without_getrandbits - break - - def _randbelow_with_getrandbits(self, n): - "Return a random int in the range [0,n). Defined for n > 0." - - getrandbits = self.getrandbits - k = n.bit_length() - r = getrandbits(k) # 0 <= r < 2**k - while r >= n: - r = getrandbits(k) - return r - - def _randbelow_without_getrandbits(self, n, maxsize=1< 0. - - The implementation does not use getrandbits, but only random. - """ - - random = self.random - if n >= maxsize: - from warnings import warn - warn("Underlying random() generator does not supply \n" - "enough bits to choose from a population range this large.\n" - "To remove the range limitation, add a getrandbits() method.") - return _floor(random() * n) - rem = maxsize % n - limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0 - r = random() - while r >= limit: - r = random() - return _floor(r * maxsize) % n - - _randbelow = _randbelow_with_getrandbits - - - ## -------------------------------------------------------- - ## ---- Methods below this point generate custom distributions - ## ---- based on the methods defined above. They do not - ## ---- directly touch the underlying generator and only - ## ---- access randomness through the methods: random(), - ## ---- getrandbits(), or _randbelow(). - - - ## -------------------- bytes methods --------------------- - - def randbytes(self, n): - """Generate n random bytes.""" - return self.getrandbits(n * 8).to_bytes(n, 'little') - - - ## -------------------- integer methods ------------------- - - def randrange(self, start, stop=None, step=_ONE): - """Choose a random item from range(stop) or range(start, stop[, step]). - - Roughly equivalent to ``choice(range(start, stop, step))`` but - supports arbitrarily large ranges and is optimized for common cases. - - """ - - # This code is a bit messy to make it fast for the - # common case while still doing adequate error checking. - istart = _index(start) - if stop is None: - # We don't check for "step != 1" because it hasn't been - # type checked and converted to an integer yet. - if step is not _ONE: - raise TypeError("Missing a non-None stop argument") - if istart > 0: - return self._randbelow(istart) - raise ValueError("empty range for randrange()") - - # Stop argument supplied. - istop = _index(stop) - width = istop - istart - istep = _index(step) - # Fast path. - if istep == 1: - if width > 0: - return istart + self._randbelow(width) - raise ValueError(f"empty range in randrange({start}, {stop})") - - # Non-unit step argument supplied. - if istep > 0: - n = (width + istep - 1) // istep - elif istep < 0: - n = (width + istep + 1) // istep - else: - raise ValueError("zero step for randrange()") - if n <= 0: - raise ValueError(f"empty range in randrange({start}, {stop}, {step})") - return istart + istep * self._randbelow(n) - - def randint(self, a, b): - """Return random integer in range [a, b], including both end points. - """ - - return self.randrange(a, b+1) - - - ## -------------------- sequence methods ------------------- - - def choice(self, seq): - """Choose a random element from a non-empty sequence.""" - - # As an accommodation for NumPy, we don't use "if not seq" - # because bool(numpy.array()) raises a ValueError. - if not len(seq): - raise IndexError('Cannot choose from an empty sequence') - return seq[self._randbelow(len(seq))] - - def shuffle(self, x): - """Shuffle list x in place, and return None.""" - - randbelow = self._randbelow - for i in reversed(range(1, len(x))): - # pick an element in x[:i+1] with which to exchange x[i] - j = randbelow(i + 1) - x[i], x[j] = x[j], x[i] - - def sample(self, population, k, *, counts=None): - """Chooses k unique random elements from a population sequence. - - Returns a new list containing elements from the population while - leaving the original population unchanged. The resulting list is - in selection order so that all sub-slices will also be valid random - samples. This allows raffle winners (the sample) to be partitioned - into grand prize and second place winners (the subslices). - - Members of the population need not be hashable or unique. If the - population contains repeats, then each occurrence is a possible - selection in the sample. - - Repeated elements can be specified one at a time or with the optional - counts parameter. For example: - - sample(['red', 'blue'], counts=[4, 2], k=5) - - is equivalent to: - - sample(['red', 'red', 'red', 'red', 'blue', 'blue'], k=5) - - To choose a sample from a range of integers, use range() for the - population argument. This is especially fast and space efficient - for sampling from a large population: - - sample(range(10000000), 60) - - """ - - # Sampling without replacement entails tracking either potential - # selections (the pool) in a list or previous selections in a set. - - # When the number of selections is small compared to the - # population, then tracking selections is efficient, requiring - # only a small set and an occasional reselection. For - # a larger number of selections, the pool tracking method is - # preferred since the list takes less space than the - # set and it doesn't suffer from frequent reselections. - - # The number of calls to _randbelow() is kept at or near k, the - # theoretical minimum. This is important because running time - # is dominated by _randbelow() and because it extracts the - # least entropy from the underlying random number generators. - - # Memory requirements are kept to the smaller of a k-length - # set or an n-length list. - - # There are other sampling algorithms that do not require - # auxiliary memory, but they were rejected because they made - # too many calls to _randbelow(), making them slower and - # causing them to eat more entropy than necessary. - - if not isinstance(population, _Sequence): - raise TypeError("Population must be a sequence. " - "For dicts or sets, use sorted(d).") - n = len(population) - if counts is not None: - cum_counts = list(_accumulate(counts)) - if len(cum_counts) != n: - raise ValueError('The number of counts does not match the population') - total = cum_counts.pop() if cum_counts else 0 - if not isinstance(total, int): - raise TypeError('Counts must be integers') - if total < 0: - raise ValueError('Counts must be non-negative') - selections = self.sample(range(total), k=k) - bisect = _bisect - return [population[bisect(cum_counts, s)] for s in selections] - randbelow = self._randbelow - if not 0 <= k <= n: - raise ValueError("Sample larger than population or is negative") - result = [None] * k - setsize = 21 # size of a small set minus size of an empty list - if k > 5: - setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets - if n <= setsize: - # An n-length list is smaller than a k-length set. - # Invariant: non-selected at pool[0 : n-i] - pool = list(population) - for i in range(k): - j = randbelow(n - i) - result[i] = pool[j] - pool[j] = pool[n - i - 1] # move non-selected item into vacancy - else: - selected = set() - selected_add = selected.add - for i in range(k): - j = randbelow(n) - while j in selected: - j = randbelow(n) - selected_add(j) - result[i] = population[j] - return result - - def choices(self, population, weights=None, *, cum_weights=None, k=1): - """Return a k sized list of population elements chosen with replacement. - - If the relative weights or cumulative weights are not specified, - the selections are made with equal probability. - - """ - random = self.random - n = len(population) - if cum_weights is None: - if weights is None: - floor = _floor - n += 0.0 # convert to float for a small speed improvement - return [population[floor(random() * n)] for i in _repeat(None, k)] - try: - cum_weights = list(_accumulate(weights)) - except TypeError: - if not isinstance(weights, int): - raise - k = weights - raise TypeError( - f'The number of choices must be a keyword argument: {k=}' - ) from None - elif weights is not None: - raise TypeError('Cannot specify both weights and cumulative weights') - if len(cum_weights) != n: - raise ValueError('The number of weights does not match the population') - total = cum_weights[-1] + 0.0 # convert to float - if total <= 0.0: - raise ValueError('Total of weights must be greater than zero') - if not _isfinite(total): - raise ValueError('Total of weights must be finite') - bisect = _bisect - hi = n - 1 - return [population[bisect(cum_weights, random() * total, 0, hi)] - for i in _repeat(None, k)] - - - ## -------------------- real-valued distributions ------------------- - - def uniform(self, a, b): - """Get a random number in the range [a, b) or [a, b] depending on rounding. - - The mean (expected value) and variance of the random variable are: - - E[X] = (a + b) / 2 - Var[X] = (b - a) ** 2 / 12 - - """ - return a + (b - a) * self.random() - - def triangular(self, low=0.0, high=1.0, mode=None): - """Triangular distribution. - - Continuous distribution bounded by given lower and upper limits, - and having a given mode value in-between. - - http://en.wikipedia.org/wiki/Triangular_distribution - - The mean (expected value) and variance of the random variable are: - - E[X] = (low + high + mode) / 3 - Var[X] = (low**2 + high**2 + mode**2 - low*high - low*mode - high*mode) / 18 - - """ - u = self.random() - try: - c = 0.5 if mode is None else (mode - low) / (high - low) - except ZeroDivisionError: - return low - if u > c: - u = 1.0 - u - c = 1.0 - c - low, high = high, low - return low + (high - low) * _sqrt(u * c) - - def normalvariate(self, mu=0.0, sigma=1.0): - """Normal distribution. - - mu is the mean, and sigma is the standard deviation. - - """ - # Uses Kinderman and Monahan method. Reference: Kinderman, - # A.J. and Monahan, J.F., "Computer generation of random - # variables using the ratio of uniform deviates", ACM Trans - # Math Software, 3, (1977), pp257-260. - - random = self.random - while True: - u1 = random() - u2 = 1.0 - random() - z = NV_MAGICCONST * (u1 - 0.5) / u2 - zz = z * z / 4.0 - if zz <= -_log(u2): - break - return mu + z * sigma - - def gauss(self, mu=0.0, sigma=1.0): - """Gaussian distribution. - - mu is the mean, and sigma is the standard deviation. This is - slightly faster than the normalvariate() function. - - Not thread-safe without a lock around calls. - - """ - # When x and y are two variables from [0, 1), uniformly - # distributed, then - # - # cos(2*pi*x)*sqrt(-2*log(1-y)) - # sin(2*pi*x)*sqrt(-2*log(1-y)) - # - # are two *independent* variables with normal distribution - # (mu = 0, sigma = 1). - # (Lambert Meertens) - # (corrected version; bug discovered by Mike Miller, fixed by LM) - - # Multithreading note: When two threads call this function - # simultaneously, it is possible that they will receive the - # same return value. The window is very small though. To - # avoid this, you have to use a lock around all calls. (I - # didn't want to slow this down in the serial case by using a - # lock here.) - - random = self.random - z = self.gauss_next - self.gauss_next = None - if z is None: - x2pi = random() * TWOPI - g2rad = _sqrt(-2.0 * _log(1.0 - random())) - z = _cos(x2pi) * g2rad - self.gauss_next = _sin(x2pi) * g2rad - - return mu + z * sigma - - def lognormvariate(self, mu, sigma): - """Log normal distribution. - - If you take the natural logarithm of this distribution, you'll get a - normal distribution with mean mu and standard deviation sigma. - mu can have any value, and sigma must be greater than zero. - - """ - return _exp(self.normalvariate(mu, sigma)) - - def expovariate(self, lambd=1.0): - """Exponential distribution. - - lambd is 1.0 divided by the desired mean. It should be - nonzero. (The parameter would be called "lambda", but that is - a reserved word in Python.) Returned values range from 0 to - positive infinity if lambd is positive, and from negative - infinity to 0 if lambd is negative. - - The mean (expected value) and variance of the random variable are: - - E[X] = 1 / lambd - Var[X] = 1 / lambd ** 2 - - """ - # we use 1-random() instead of random() to preclude the - # possibility of taking the log of zero. - - return -_log(1.0 - self.random()) / lambd - - def vonmisesvariate(self, mu, kappa): - """Circular data distribution. - - mu is the mean angle, expressed in radians between 0 and 2*pi, and - kappa is the concentration parameter, which must be greater than or - equal to zero. If kappa is equal to zero, this distribution reduces - to a uniform random angle over the range 0 to 2*pi. - - """ - # Based upon an algorithm published in: Fisher, N.I., - # "Statistical Analysis of Circular Data", Cambridge - # University Press, 1993. - - # Thanks to Magnus Kessler for a correction to the - # implementation of step 4. - - random = self.random - if kappa <= 1e-6: - return TWOPI * random() - - s = 0.5 / kappa - r = s + _sqrt(1.0 + s * s) - - while True: - u1 = random() - z = _cos(_pi * u1) - - d = z / (r + z) - u2 = random() - if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): - break - - q = 1.0 / r - f = (q + z) / (1.0 + q * z) - u3 = random() - if u3 > 0.5: - theta = (mu + _acos(f)) % TWOPI - else: - theta = (mu - _acos(f)) % TWOPI - - return theta - - def gammavariate(self, alpha, beta): - """Gamma distribution. Not the gamma function! - - Conditions on the parameters are alpha > 0 and beta > 0. - - The probability distribution function is: - - x ** (alpha - 1) * math.exp(-x / beta) - pdf(x) = -------------------------------------- - math.gamma(alpha) * beta ** alpha - - The mean (expected value) and variance of the random variable are: - - E[X] = alpha * beta - Var[X] = alpha * beta ** 2 - - """ - - # Warning: a few older sources define the gamma distribution in terms - # of alpha > -1.0 - if alpha <= 0.0 or beta <= 0.0: - raise ValueError('gammavariate: alpha and beta must be > 0.0') - - random = self.random - if alpha > 1.0: - - # Uses R.C.H. Cheng, "The generation of Gamma - # variables with non-integral shape parameters", - # Applied Statistics, (1977), 26, No. 1, p71-74 - - ainv = _sqrt(2.0 * alpha - 1.0) - bbb = alpha - LOG4 - ccc = alpha + ainv - - while True: - u1 = random() - if not 1e-7 < u1 < 0.9999999: - continue - u2 = 1.0 - random() - v = _log(u1 / (1.0 - u1)) / ainv - x = alpha * _exp(v) - z = u1 * u1 * u2 - r = bbb + ccc * v - x - if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z): - return x * beta - - elif alpha == 1.0: - # expovariate(1/beta) - return -_log(1.0 - random()) * beta - - else: - # alpha is between 0 and 1 (exclusive) - # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle - while True: - u = random() - b = (_e + alpha) / _e - p = b * u - if p <= 1.0: - x = p ** (1.0 / alpha) - else: - x = -_log((b - p) / alpha) - u1 = random() - if p > 1.0: - if u1 <= x ** (alpha - 1.0): - break - elif u1 <= _exp(-x): - break - return x * beta - - def betavariate(self, alpha, beta): - """Beta distribution. - - Conditions on the parameters are alpha > 0 and beta > 0. - Returned values range between 0 and 1. - - The mean (expected value) and variance of the random variable are: - - E[X] = alpha / (alpha + beta) - Var[X] = alpha * beta / ((alpha + beta)**2 * (alpha + beta + 1)) - - """ - ## See - ## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html - ## for Ivan Frohne's insightful analysis of why the original implementation: - ## - ## def betavariate(self, alpha, beta): - ## # Discrete Event Simulation in C, pp 87-88. - ## - ## y = self.expovariate(alpha) - ## z = self.expovariate(1.0/beta) - ## return z/(y+z) - ## - ## was dead wrong, and how it probably got that way. - - # This version due to Janne Sinkkonen, and matches all the std - # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution"). - y = self.gammavariate(alpha, 1.0) - if y: - return y / (y + self.gammavariate(beta, 1.0)) - return 0.0 - - def paretovariate(self, alpha): - """Pareto distribution. alpha is the shape parameter.""" - # Jain, pg. 495 - - u = 1.0 - self.random() - return u ** (-1.0 / alpha) - - def weibullvariate(self, alpha, beta): - """Weibull distribution. - - alpha is the scale parameter and beta is the shape parameter. - - """ - # Jain, pg. 499; bug fix courtesy Bill Arms - - u = 1.0 - self.random() - return alpha * (-_log(u)) ** (1.0 / beta) - - - ## -------------------- discrete distributions --------------------- - - def binomialvariate(self, n=1, p=0.5): - """Binomial random variable. - - Gives the number of successes for *n* independent trials - with the probability of success in each trial being *p*: - - sum(random() < p for i in range(n)) - - Returns an integer in the range: 0 <= X <= n - - The mean (expected value) and variance of the random variable are: - - E[X] = n * p - Var[x] = n * p * (1 - p) - - """ - # Error check inputs and handle edge cases - if n < 0: - raise ValueError("n must be non-negative") - if p <= 0.0 or p >= 1.0: - if p == 0.0: - return 0 - if p == 1.0: - return n - raise ValueError("p must be in the range 0.0 <= p <= 1.0") - - random = self.random - - # Fast path for a common case - if n == 1: - return _index(random() < p) - - # Exploit symmetry to establish: p <= 0.5 - if p > 0.5: - return n - self.binomialvariate(n, 1.0 - p) - - if n * p < 10.0: - # BG: Geometric method by Devroye with running time of O(np). - # https://dl.acm.org/doi/pdf/10.1145/42372.42381 - x = y = 0 - c = _log2(1.0 - p) - if not c: - return x - while True: - y += _floor(_log2(random()) / c) + 1 - if y > n: - return x - x += 1 - - # BTRS: Transformed rejection with squeeze method by Wolfgang Hörmann - # https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.47.8407&rep=rep1&type=pdf - assert n*p >= 10.0 and p <= 0.5 - setup_complete = False - - spq = _sqrt(n * p * (1.0 - p)) # Standard deviation of the distribution - b = 1.15 + 2.53 * spq - a = -0.0873 + 0.0248 * b + 0.01 * p - c = n * p + 0.5 - vr = 0.92 - 4.2 / b - - while True: - - u = random() - u -= 0.5 - us = 0.5 - _fabs(u) - k = _floor((2.0 * a / us + b) * u + c) - if k < 0 or k > n: - continue - - # The early-out "squeeze" test substantially reduces - # the number of acceptance condition evaluations. - v = random() - if us >= 0.07 and v <= vr: - return k - - # Acceptance-rejection test. - # Note, the original paper erroneously omits the call to log(v) - # when comparing to the log of the rescaled binomial distribution. - if not setup_complete: - alpha = (2.83 + 5.1 / b) * spq - lpq = _log(p / (1.0 - p)) - m = _floor((n + 1) * p) # Mode of the distribution - h = _lgamma(m + 1) + _lgamma(n - m + 1) - setup_complete = True # Only needs to be done once - v *= alpha / (a / (us * us) + b) - if _log(v) <= h - _lgamma(k + 1) - _lgamma(n - k + 1) + (k - m) * lpq: - return k - - -## ------------------------------------------------------------------ -## --------------- Operating System Random Source ------------------ - - -class SystemRandom(Random): - """Alternate random number generator using sources provided - by the operating system (such as /dev/urandom on Unix or - CryptGenRandom on Windows). - - Not available on all systems (see os.urandom() for details). - - """ - - def random(self): - """Get the next random number in the range 0.0 <= X < 1.0.""" - return (int.from_bytes(_urandom(7)) >> 3) * RECIP_BPF - - def getrandbits(self, k): - """getrandbits(k) -> x. Generates an int with k random bits.""" - if k < 0: - raise ValueError('number of bits must be non-negative') - numbytes = (k + 7) // 8 # bits / 8 and rounded up - x = int.from_bytes(_urandom(numbytes)) - return x >> (numbytes * 8 - k) # trim excess bits - - def randbytes(self, n): - """Generate n random bytes.""" - # os.urandom(n) fails with ValueError for n < 0 - # and returns an empty bytes string for n == 0. - return _urandom(n) - - def seed(self, *args, **kwds): - "Stub method. Not used for a system random number generator." - return None - - def _notimplemented(self, *args, **kwds): - "Method should not be called for a system random number generator." - raise NotImplementedError('System entropy source does not have state.') - getstate = setstate = _notimplemented - - -# ---------------------------------------------------------------------- -# Create one instance, seeded from current time, and export its methods -# as module-level functions. The functions share state across all uses -# (both in the user's code and in the Python libraries), but that's fine -# for most programs and is easier for the casual user than making them -# instantiate their own Random() instance. - -_inst = Random() -seed = _inst.seed -random = _inst.random -uniform = _inst.uniform -triangular = _inst.triangular -randint = _inst.randint -choice = _inst.choice -randrange = _inst.randrange -sample = _inst.sample -shuffle = _inst.shuffle -choices = _inst.choices -normalvariate = _inst.normalvariate -lognormvariate = _inst.lognormvariate -expovariate = _inst.expovariate -vonmisesvariate = _inst.vonmisesvariate -gammavariate = _inst.gammavariate -gauss = _inst.gauss -betavariate = _inst.betavariate -binomialvariate = _inst.binomialvariate -paretovariate = _inst.paretovariate -weibullvariate = _inst.weibullvariate -getstate = _inst.getstate -setstate = _inst.setstate -getrandbits = _inst.getrandbits -randbytes = _inst.randbytes - - -## ------------------------------------------------------ -## ----------------- test program ----------------------- - -def _test_generator(n, func, args): - from statistics import stdev, fmean as mean - from time import perf_counter - - t0 = perf_counter() - data = [func(*args) for i in _repeat(None, n)] - t1 = perf_counter() - - xbar = mean(data) - sigma = stdev(data, xbar) - low = min(data) - high = max(data) - - print(f'{t1 - t0:.3f} sec, {n} times {func.__name__}{args!r}') - print('avg %g, stddev %g, min %g, max %g\n' % (xbar, sigma, low, high)) - - -def _test(N=10_000): - _test_generator(N, random, ()) - _test_generator(N, normalvariate, (0.0, 1.0)) - _test_generator(N, lognormvariate, (0.0, 1.0)) - _test_generator(N, vonmisesvariate, (0.0, 1.0)) - _test_generator(N, binomialvariate, (15, 0.60)) - _test_generator(N, binomialvariate, (100, 0.75)) - _test_generator(N, gammavariate, (0.01, 1.0)) - _test_generator(N, gammavariate, (0.1, 1.0)) - _test_generator(N, gammavariate, (0.1, 2.0)) - _test_generator(N, gammavariate, (0.5, 1.0)) - _test_generator(N, gammavariate, (0.9, 1.0)) - _test_generator(N, gammavariate, (1.0, 1.0)) - _test_generator(N, gammavariate, (2.0, 1.0)) - _test_generator(N, gammavariate, (20.0, 1.0)) - _test_generator(N, gammavariate, (200.0, 1.0)) - _test_generator(N, gauss, (0.0, 1.0)) - _test_generator(N, betavariate, (3.0, 3.0)) - _test_generator(N, triangular, (0.0, 1.0, 1.0 / 3.0)) - - -## ------------------------------------------------------ -## ------------------ fork support --------------------- - -if hasattr(_os, "fork"): - _os.register_at_fork(after_in_child=_inst.seed) - - -# ------------------------------------------------------ -# -------------- command-line interface ---------------- - - -def _parse_args(arg_list: list[str] | None): - import argparse - parser = argparse.ArgumentParser( - formatter_class=argparse.RawTextHelpFormatter) - group = parser.add_mutually_exclusive_group() - group.add_argument( - "-c", "--choice", nargs="+", - help="print a random choice") - group.add_argument( - "-i", "--integer", type=int, metavar="N", - help="print a random integer between 1 and N inclusive") - group.add_argument( - "-f", "--float", type=float, metavar="N", - help="print a random floating-point number between 0 and N inclusive") - group.add_argument( - "--test", type=int, const=10_000, nargs="?", - help=argparse.SUPPRESS) - parser.add_argument("input", nargs="*", - help="""\ -if no options given, output depends on the input - string or multiple: same as --choice - integer: same as --integer - float: same as --float""") - args = parser.parse_args(arg_list) - return args, parser.format_help() - - -def main(arg_list: list[str] | None = None) -> int | str: - args, help_text = _parse_args(arg_list) - - # Explicit arguments - if args.choice: - return choice(args.choice) - - if args.integer is not None: - return randint(1, args.integer) - - if args.float is not None: - return uniform(0, args.float) - - if args.test: - _test(args.test) - return "" - - # No explicit argument, select based on input - if len(args.input) == 1: - val = args.input[0] - try: - # Is it an integer? - val = int(val) - return randint(1, val) - except ValueError: - try: - # Is it a float? - val = float(val) - return uniform(0, val) - except ValueError: - # Split in case of space-separated string: "a b c" - return choice(val.split()) - - if len(args.input) >= 2: - return choice(args.input) - - return help_text - - -if __name__ == '__main__': - print(main()) diff --git a/Python313_13_x64_Template/Lib/re/__init__.py b/Python313_13_x64_Template/Lib/re/__init__.py deleted file mode 100644 index 7e8abbf6..00000000 --- a/Python313_13_x64_Template/Lib/re/__init__.py +++ /dev/null @@ -1,428 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# re-compatible interface for the sre matching engine -# -# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. -# -# This version of the SRE library can be redistributed under CNRI's -# Python 1.6 license. For any other use, please contact Secret Labs -# AB (info@pythonware.com). -# -# Portions of this engine have been developed in cooperation with -# CNRI. Hewlett-Packard provided funding for 1.6 integration and -# other compatibility work. -# - -r"""Support for regular expressions (RE). - -This module provides regular expression matching operations similar to -those found in Perl. It supports both 8-bit and Unicode strings; both -the pattern and the strings being processed can contain null bytes and -characters outside the US ASCII range. - -Regular expressions can contain both special and ordinary characters. -Most ordinary characters, like "A", "a", or "0", are the simplest -regular expressions; they simply match themselves. You can -concatenate ordinary characters, so last matches the string 'last'. - -The special characters are: - "." Matches any character except a newline. - "^" Matches the start of the string. - "$" Matches the end of the string or just before the newline at - the end of the string. - "*" Matches 0 or more (greedy) repetitions of the preceding RE. - Greedy means that it will match as many repetitions as possible. - "+" Matches 1 or more (greedy) repetitions of the preceding RE. - "?" Matches 0 or 1 (greedy) of the preceding RE. - *?,+?,?? Non-greedy versions of the previous three special characters. - {m,n} Matches from m to n repetitions of the preceding RE. - {m,n}? Non-greedy version of the above. - "\\" Either escapes special characters or signals a special sequence. - [] Indicates a set of characters. - A "^" as the first character indicates a complementing set. - "|" A|B, creates an RE that will match either A or B. - (...) Matches the RE inside the parentheses. - The contents can be retrieved or matched later in the string. - (?aiLmsux) The letters set the corresponding flags defined below. - (?:...) Non-grouping version of regular parentheses. - (?P...) The substring matched by the group is accessible by name. - (?P=name) Matches the text matched earlier by the group named name. - (?#...) A comment; ignored. - (?=...) Matches if ... matches next, but doesn't consume the string. - (?!...) Matches if ... doesn't match next. - (?<=...) Matches if preceded by ... (must be fixed length). - (?= _MAXCACHE: - # Drop the least recently used item. - # next(iter(_cache)) is known to have linear amortized time, - # but it is used here to avoid a dependency from using OrderedDict. - # For the small _MAXCACHE value it doesn't make much of a difference. - try: - del _cache[next(iter(_cache))] - except (StopIteration, RuntimeError, KeyError): - pass - # Append to the end. - _cache[key] = p - - if len(_cache2) >= _MAXCACHE2: - # Drop the oldest item. - try: - del _cache2[next(iter(_cache2))] - except (StopIteration, RuntimeError, KeyError): - pass - _cache2[key] = p - return p - -@functools.lru_cache(_MAXCACHE) -def _compile_template(pattern, repl): - # internal: compile replacement pattern - return _sre.template(pattern, _parser.parse_template(repl, pattern)) - -# register myself for pickling - -import copyreg - -def _pickle(p): - return _compile, (p.pattern, p.flags) - -copyreg.pickle(Pattern, _pickle, _compile) - -# -------------------------------------------------------------------- -# experimental stuff (see python-dev discussions for details) - -class Scanner: - def __init__(self, lexicon, flags=0): - from ._constants import BRANCH, SUBPATTERN - if isinstance(flags, RegexFlag): - flags = flags.value - self.lexicon = lexicon - # combine phrases into a compound pattern - p = [] - s = _parser.State() - s.flags = flags - for phrase, action in lexicon: - gid = s.opengroup() - p.append(_parser.SubPattern(s, [ - (SUBPATTERN, (gid, 0, 0, _parser.parse(phrase, flags))), - ])) - s.closegroup(gid, p[-1]) - p = _parser.SubPattern(s, [(BRANCH, (None, p))]) - self.scanner = _compiler.compile(p) - def scan(self, string): - result = [] - append = result.append - match = self.scanner.scanner(string).match - i = 0 - while True: - m = match() - if not m: - break - j = m.end() - if i == j: - break - action = self.lexicon[m.lastindex-1][1] - if callable(action): - self.match = m - action = action(self, m.group()) - if action is not None: - append(action) - i = j - return result, string[i:] diff --git a/Python313_13_x64_Template/Lib/re/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/re/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 4ca50469..00000000 Binary files a/Python313_13_x64_Template/Lib/re/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/re/__pycache__/_casefix.cpython-313.pyc b/Python313_13_x64_Template/Lib/re/__pycache__/_casefix.cpython-313.pyc deleted file mode 100644 index d6b92328..00000000 Binary files a/Python313_13_x64_Template/Lib/re/__pycache__/_casefix.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/re/__pycache__/_compiler.cpython-313.pyc b/Python313_13_x64_Template/Lib/re/__pycache__/_compiler.cpython-313.pyc deleted file mode 100644 index c9008307..00000000 Binary files a/Python313_13_x64_Template/Lib/re/__pycache__/_compiler.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/re/__pycache__/_constants.cpython-313.pyc b/Python313_13_x64_Template/Lib/re/__pycache__/_constants.cpython-313.pyc deleted file mode 100644 index f7a439f8..00000000 Binary files a/Python313_13_x64_Template/Lib/re/__pycache__/_constants.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/re/__pycache__/_parser.cpython-313.pyc b/Python313_13_x64_Template/Lib/re/__pycache__/_parser.cpython-313.pyc deleted file mode 100644 index 33ab59a1..00000000 Binary files a/Python313_13_x64_Template/Lib/re/__pycache__/_parser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/re/_compiler.py b/Python313_13_x64_Template/Lib/re/_compiler.py deleted file mode 100644 index 1b1aaa77..00000000 --- a/Python313_13_x64_Template/Lib/re/_compiler.py +++ /dev/null @@ -1,768 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# convert template to internal format -# -# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. -# -# See the __init__.py file for information on usage and redistribution. -# - -"""Internal support module for sre""" - -import _sre -from . import _parser -from ._constants import * -from ._casefix import _EXTRA_CASES - -assert _sre.MAGIC == MAGIC, "SRE module mismatch" - -_LITERAL_CODES = {LITERAL, NOT_LITERAL} -_SUCCESS_CODES = {SUCCESS, FAILURE} -_ASSERT_CODES = {ASSERT, ASSERT_NOT} -_UNIT_CODES = _LITERAL_CODES | {ANY, IN} - -_REPEATING_CODES = { - MIN_REPEAT: (REPEAT, MIN_UNTIL, MIN_REPEAT_ONE), - MAX_REPEAT: (REPEAT, MAX_UNTIL, REPEAT_ONE), - POSSESSIVE_REPEAT: (POSSESSIVE_REPEAT, SUCCESS, POSSESSIVE_REPEAT_ONE), -} - -def _combine_flags(flags, add_flags, del_flags, - TYPE_FLAGS=_parser.TYPE_FLAGS): - if add_flags & TYPE_FLAGS: - flags &= ~TYPE_FLAGS - return (flags | add_flags) & ~del_flags - -def _compile(code, pattern, flags): - # internal: compile a (sub)pattern - emit = code.append - _len = len - LITERAL_CODES = _LITERAL_CODES - REPEATING_CODES = _REPEATING_CODES - SUCCESS_CODES = _SUCCESS_CODES - ASSERT_CODES = _ASSERT_CODES - iscased = None - tolower = None - fixes = None - if flags & SRE_FLAG_IGNORECASE and not flags & SRE_FLAG_LOCALE: - if flags & SRE_FLAG_UNICODE: - iscased = _sre.unicode_iscased - tolower = _sre.unicode_tolower - fixes = _EXTRA_CASES - else: - iscased = _sre.ascii_iscased - tolower = _sre.ascii_tolower - for op, av in pattern: - if op in LITERAL_CODES: - if not flags & SRE_FLAG_IGNORECASE: - emit(op) - emit(av) - elif flags & SRE_FLAG_LOCALE: - emit(OP_LOCALE_IGNORE[op]) - emit(av) - elif not iscased(av): - emit(op) - emit(av) - else: - lo = tolower(av) - if not fixes: # ascii - emit(OP_IGNORE[op]) - emit(lo) - elif lo not in fixes: - emit(OP_UNICODE_IGNORE[op]) - emit(lo) - else: - emit(IN_UNI_IGNORE) - skip = _len(code); emit(0) - if op is NOT_LITERAL: - emit(NEGATE) - for k in (lo,) + fixes[lo]: - emit(LITERAL) - emit(k) - emit(FAILURE) - code[skip] = _len(code) - skip - elif op is IN: - charset, hascased = _optimize_charset(av, iscased, tolower, fixes) - if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE: - emit(IN_LOC_IGNORE) - elif not hascased: - emit(IN) - elif not fixes: # ascii - emit(IN_IGNORE) - else: - emit(IN_UNI_IGNORE) - skip = _len(code); emit(0) - _compile_charset(charset, flags, code) - code[skip] = _len(code) - skip - elif op is ANY: - if flags & SRE_FLAG_DOTALL: - emit(ANY_ALL) - else: - emit(ANY) - elif op in REPEATING_CODES: - if _simple(av[2]): - emit(REPEATING_CODES[op][2]) - skip = _len(code); emit(0) - emit(av[0]) - emit(av[1]) - _compile(code, av[2], flags) - emit(SUCCESS) - code[skip] = _len(code) - skip - else: - emit(REPEATING_CODES[op][0]) - skip = _len(code); emit(0) - emit(av[0]) - emit(av[1]) - _compile(code, av[2], flags) - code[skip] = _len(code) - skip - emit(REPEATING_CODES[op][1]) - elif op is SUBPATTERN: - group, add_flags, del_flags, p = av - if group: - emit(MARK) - emit((group-1)*2) - # _compile_info(code, p, _combine_flags(flags, add_flags, del_flags)) - _compile(code, p, _combine_flags(flags, add_flags, del_flags)) - if group: - emit(MARK) - emit((group-1)*2+1) - elif op is ATOMIC_GROUP: - # Atomic Groups are handled by starting with an Atomic - # Group op code, then putting in the atomic group pattern - # and finally a success op code to tell any repeat - # operations within the Atomic Group to stop eating and - # pop their stack if they reach it - emit(ATOMIC_GROUP) - skip = _len(code); emit(0) - _compile(code, av, flags) - emit(SUCCESS) - code[skip] = _len(code) - skip - elif op in SUCCESS_CODES: - emit(op) - elif op in ASSERT_CODES: - emit(op) - skip = _len(code); emit(0) - if av[0] >= 0: - emit(0) # look ahead - else: - lo, hi = av[1].getwidth() - if lo > MAXCODE: - raise error("looks too much behind") - if lo != hi: - raise PatternError("look-behind requires fixed-width pattern") - emit(lo) # look behind - _compile(code, av[1], flags) - emit(SUCCESS) - code[skip] = _len(code) - skip - elif op is AT: - emit(op) - if flags & SRE_FLAG_MULTILINE: - av = AT_MULTILINE.get(av, av) - if flags & SRE_FLAG_LOCALE: - av = AT_LOCALE.get(av, av) - elif flags & SRE_FLAG_UNICODE: - av = AT_UNICODE.get(av, av) - emit(av) - elif op is BRANCH: - emit(op) - tail = [] - tailappend = tail.append - for av in av[1]: - skip = _len(code); emit(0) - # _compile_info(code, av, flags) - _compile(code, av, flags) - emit(JUMP) - tailappend(_len(code)); emit(0) - code[skip] = _len(code) - skip - emit(FAILURE) # end of branch - for tail in tail: - code[tail] = _len(code) - tail - elif op is CATEGORY: - emit(op) - if flags & SRE_FLAG_LOCALE: - av = CH_LOCALE[av] - elif flags & SRE_FLAG_UNICODE: - av = CH_UNICODE[av] - emit(av) - elif op is GROUPREF: - if not flags & SRE_FLAG_IGNORECASE: - emit(op) - elif flags & SRE_FLAG_LOCALE: - emit(GROUPREF_LOC_IGNORE) - elif not fixes: # ascii - emit(GROUPREF_IGNORE) - else: - emit(GROUPREF_UNI_IGNORE) - emit(av-1) - elif op is GROUPREF_EXISTS: - emit(op) - emit(av[0]-1) - skipyes = _len(code); emit(0) - _compile(code, av[1], flags) - if av[2]: - emit(JUMP) - skipno = _len(code); emit(0) - code[skipyes] = _len(code) - skipyes + 1 - _compile(code, av[2], flags) - code[skipno] = _len(code) - skipno - else: - code[skipyes] = _len(code) - skipyes + 1 - else: - raise PatternError(f"internal: unsupported operand type {op!r}") - -def _compile_charset(charset, flags, code): - # compile charset subprogram - emit = code.append - for op, av in charset: - emit(op) - if op is NEGATE: - pass - elif op is LITERAL: - emit(av) - elif op is RANGE or op is RANGE_UNI_IGNORE: - emit(av[0]) - emit(av[1]) - elif op is CHARSET: - code.extend(av) - elif op is BIGCHARSET: - code.extend(av) - elif op is CATEGORY: - if flags & SRE_FLAG_LOCALE: - emit(CH_LOCALE[av]) - elif flags & SRE_FLAG_UNICODE: - emit(CH_UNICODE[av]) - else: - emit(av) - else: - raise PatternError(f"internal: unsupported set operator {op!r}") - emit(FAILURE) - -def _optimize_charset(charset, iscased=None, fixup=None, fixes=None): - # internal: optimize character set - out = [] - tail = [] - charmap = bytearray(256) - hascased = False - for op, av in charset: - while True: - try: - if op is LITERAL: - if fixup: # IGNORECASE and not LOCALE - av = fixup(av) - charmap[av] = 1 - if fixes and av in fixes: - for k in fixes[av]: - charmap[k] = 1 - if not hascased and iscased(av): - hascased = True - else: - charmap[av] = 1 - elif op is RANGE: - r = range(av[0], av[1]+1) - if fixup: # IGNORECASE and not LOCALE - if fixes: - for i in map(fixup, r): - charmap[i] = 1 - if i in fixes: - for k in fixes[i]: - charmap[k] = 1 - else: - for i in map(fixup, r): - charmap[i] = 1 - if not hascased: - hascased = any(map(iscased, r)) - else: - for i in r: - charmap[i] = 1 - elif op is NEGATE: - out.append((op, av)) - else: - tail.append((op, av)) - except IndexError: - if len(charmap) == 256: - # character set contains non-UCS1 character codes - charmap += b'\0' * 0xff00 - continue - # Character set contains non-BMP character codes. - # For range, all BMP characters in the range are already - # proceeded. - if fixup: # IGNORECASE and not LOCALE - # For now, IN_UNI_IGNORE+LITERAL and - # IN_UNI_IGNORE+RANGE_UNI_IGNORE work for all non-BMP - # characters, because two characters (at least one of - # which is not in the BMP) match case-insensitively - # if and only if: - # 1) c1.lower() == c2.lower() - # 2) c1.lower() == c2 or c1.lower().upper() == c2 - # Also, both c.lower() and c.lower().upper() are single - # characters for every non-BMP character. - if op is RANGE: - if fixes: # not ASCII - op = RANGE_UNI_IGNORE - hascased = True - else: - assert op is LITERAL - if not hascased and iscased(av): - hascased = True - tail.append((op, av)) - break - - # compress character map - runs = [] - q = 0 - while True: - p = charmap.find(1, q) - if p < 0: - break - if len(runs) >= 2: - runs = None - break - q = charmap.find(0, p) - if q < 0: - runs.append((p, len(charmap))) - break - runs.append((p, q)) - if runs is not None: - # use literal/range - for p, q in runs: - if q - p == 1: - out.append((LITERAL, p)) - else: - out.append((RANGE, (p, q - 1))) - out += tail - # if the case was changed or new representation is more compact - if hascased or len(out) < len(charset): - return out, hascased - # else original character set is good enough - return charset, hascased - - # use bitmap - if len(charmap) == 256: - data = _mk_bitmap(charmap) - out.append((CHARSET, data)) - out += tail - return out, hascased - - # To represent a big charset, first a bitmap of all characters in the - # set is constructed. Then, this bitmap is sliced into chunks of 256 - # characters, duplicate chunks are eliminated, and each chunk is - # given a number. In the compiled expression, the charset is - # represented by a 32-bit word sequence, consisting of one word for - # the number of different chunks, a sequence of 256 bytes (64 words) - # of chunk numbers indexed by their original chunk position, and a - # sequence of 256-bit chunks (8 words each). - - # Compression is normally good: in a typical charset, large ranges of - # Unicode will be either completely excluded (e.g. if only cyrillic - # letters are to be matched), or completely included (e.g. if large - # subranges of Kanji match). These ranges will be represented by - # chunks of all one-bits or all zero-bits. - - # Matching can be also done efficiently: the more significant byte of - # the Unicode character is an index into the chunk number, and the - # less significant byte is a bit index in the chunk (just like the - # CHARSET matching). - - charmap = bytes(charmap) # should be hashable - comps = {} - mapping = bytearray(256) - block = 0 - data = bytearray() - for i in range(0, 65536, 256): - chunk = charmap[i: i + 256] - if chunk in comps: - mapping[i // 256] = comps[chunk] - else: - mapping[i // 256] = comps[chunk] = block - block += 1 - data += chunk - data = _mk_bitmap(data) - data[0:0] = [block] + _bytes_to_codes(mapping) - out.append((BIGCHARSET, data)) - out += tail - return out, hascased - -_CODEBITS = _sre.CODESIZE * 8 -MAXCODE = (1 << _CODEBITS) - 1 -_BITS_TRANS = b'0' + b'1' * 255 -def _mk_bitmap(bits, _CODEBITS=_CODEBITS, _int=int): - s = bits.translate(_BITS_TRANS)[::-1] - return [_int(s[i - _CODEBITS: i], 2) - for i in range(len(s), 0, -_CODEBITS)] - -def _bytes_to_codes(b): - # Convert block indices to word array - a = memoryview(b).cast('I') - assert a.itemsize == _sre.CODESIZE - assert len(a) * a.itemsize == len(b) - return a.tolist() - -def _simple(p): - # check if this subpattern is a "simple" operator - if len(p) != 1: - return False - op, av = p[0] - if op is SUBPATTERN: - return av[0] is None and _simple(av[-1]) - return op in _UNIT_CODES - -def _generate_overlap_table(prefix): - """ - Generate an overlap table for the following prefix. - An overlap table is a table of the same size as the prefix which - informs about the potential self-overlap for each index in the prefix: - - if overlap[i] == 0, prefix[i:] can't overlap prefix[0:...] - - if overlap[i] == k with 0 < k <= i, prefix[i-k+1:i+1] overlaps with - prefix[0:k] - """ - table = [0] * len(prefix) - for i in range(1, len(prefix)): - idx = table[i - 1] - while prefix[i] != prefix[idx]: - if idx == 0: - table[i] = 0 - break - idx = table[idx - 1] - else: - table[i] = idx + 1 - return table - -def _get_iscased(flags): - if not flags & SRE_FLAG_IGNORECASE: - return None - elif flags & SRE_FLAG_UNICODE: - return _sre.unicode_iscased - else: - return _sre.ascii_iscased - -def _get_literal_prefix(pattern, flags): - # look for literal prefix - prefix = [] - prefixappend = prefix.append - prefix_skip = None - iscased = _get_iscased(flags) - for op, av in pattern.data: - if op is LITERAL: - if iscased and iscased(av): - break - prefixappend(av) - elif op is SUBPATTERN: - group, add_flags, del_flags, p = av - flags1 = _combine_flags(flags, add_flags, del_flags) - if flags1 & SRE_FLAG_IGNORECASE and flags1 & SRE_FLAG_LOCALE: - break - prefix1, prefix_skip1, got_all = _get_literal_prefix(p, flags1) - if prefix_skip is None: - if group is not None: - prefix_skip = len(prefix) - elif prefix_skip1 is not None: - prefix_skip = len(prefix) + prefix_skip1 - prefix.extend(prefix1) - if not got_all: - break - else: - break - else: - return prefix, prefix_skip, True - return prefix, prefix_skip, False - -def _get_charset_prefix(pattern, flags): - while True: - if not pattern.data: - return None - op, av = pattern.data[0] - if op is not SUBPATTERN: - break - group, add_flags, del_flags, pattern = av - flags = _combine_flags(flags, add_flags, del_flags) - if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE: - return None - - iscased = _get_iscased(flags) - if op is LITERAL: - if iscased and iscased(av): - return None - return [(op, av)] - elif op is BRANCH: - charset = [] - charsetappend = charset.append - for p in av[1]: - if not p: - return None - op, av = p[0] - if op is LITERAL and not (iscased and iscased(av)): - charsetappend((op, av)) - else: - return None - return charset - elif op is IN: - charset = av - if iscased: - for op, av in charset: - if op is LITERAL: - if iscased(av): - return None - elif op is RANGE: - if av[1] > 0xffff: - return None - if any(map(iscased, range(av[0], av[1]+1))): - return None - return charset - return None - -def _compile_info(code, pattern, flags): - # internal: compile an info block. in the current version, - # this contains min/max pattern width, and an optional literal - # prefix or a character map - lo, hi = pattern.getwidth() - if hi > MAXCODE: - hi = MAXCODE - if lo == 0: - code.extend([INFO, 4, 0, lo, hi]) - return - # look for a literal prefix - prefix = [] - prefix_skip = 0 - charset = [] # not used - if not (flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE): - # look for literal prefix - prefix, prefix_skip, got_all = _get_literal_prefix(pattern, flags) - # if no prefix, look for charset prefix - if not prefix: - charset = _get_charset_prefix(pattern, flags) -## if prefix: -## print("*** PREFIX", prefix, prefix_skip) -## if charset: -## print("*** CHARSET", charset) - # add an info block - emit = code.append - emit(INFO) - skip = len(code); emit(0) - # literal flag - mask = 0 - if prefix: - mask = SRE_INFO_PREFIX - if prefix_skip is None and got_all: - mask = mask | SRE_INFO_LITERAL - elif charset: - mask = mask | SRE_INFO_CHARSET - emit(mask) - # pattern length - if lo < MAXCODE: - emit(lo) - else: - emit(MAXCODE) - prefix = prefix[:MAXCODE] - emit(hi) - # add literal prefix - if prefix: - emit(len(prefix)) # length - if prefix_skip is None: - prefix_skip = len(prefix) - emit(prefix_skip) # skip - code.extend(prefix) - # generate overlap table - code.extend(_generate_overlap_table(prefix)) - elif charset: - charset, hascased = _optimize_charset(charset) - assert not hascased - _compile_charset(charset, flags, code) - code[skip] = len(code) - skip - -def isstring(obj): - return isinstance(obj, (str, bytes)) - -def _code(p, flags): - - flags = p.state.flags | flags - code = [] - - # compile info block - _compile_info(code, p, flags) - - # compile the pattern - _compile(code, p.data, flags) - - code.append(SUCCESS) - - return code - -def _hex_code(code): - return '[%s]' % ', '.join('%#0*x' % (_sre.CODESIZE*2+2, x) for x in code) - -def dis(code): - import sys - - labels = set() - level = 0 - offset_width = len(str(len(code) - 1)) - - def dis_(start, end): - def print_(*args, to=None): - if to is not None: - labels.add(to) - args += ('(to %d)' % (to,),) - print('%*d%s ' % (offset_width, start, ':' if start in labels else '.'), - end=' '*(level-1)) - print(*args) - - def print_2(*args): - print(end=' '*(offset_width + 2*level)) - print(*args) - - nonlocal level - level += 1 - i = start - while i < end: - start = i - op = code[i] - i += 1 - op = OPCODES[op] - if op in (SUCCESS, FAILURE, ANY, ANY_ALL, - MAX_UNTIL, MIN_UNTIL, NEGATE): - print_(op) - elif op in (LITERAL, NOT_LITERAL, - LITERAL_IGNORE, NOT_LITERAL_IGNORE, - LITERAL_UNI_IGNORE, NOT_LITERAL_UNI_IGNORE, - LITERAL_LOC_IGNORE, NOT_LITERAL_LOC_IGNORE): - arg = code[i] - i += 1 - print_(op, '%#02x (%r)' % (arg, chr(arg))) - elif op is AT: - arg = code[i] - i += 1 - arg = str(ATCODES[arg]) - assert arg[:3] == 'AT_' - print_(op, arg[3:]) - elif op is CATEGORY: - arg = code[i] - i += 1 - arg = str(CHCODES[arg]) - assert arg[:9] == 'CATEGORY_' - print_(op, arg[9:]) - elif op in (IN, IN_IGNORE, IN_UNI_IGNORE, IN_LOC_IGNORE): - skip = code[i] - print_(op, skip, to=i+skip) - dis_(i+1, i+skip) - i += skip - elif op in (RANGE, RANGE_UNI_IGNORE): - lo, hi = code[i: i+2] - i += 2 - print_(op, '%#02x %#02x (%r-%r)' % (lo, hi, chr(lo), chr(hi))) - elif op is CHARSET: - print_(op, _hex_code(code[i: i + 256//_CODEBITS])) - i += 256//_CODEBITS - elif op is BIGCHARSET: - arg = code[i] - i += 1 - mapping = list(b''.join(x.to_bytes(_sre.CODESIZE, sys.byteorder) - for x in code[i: i + 256//_sre.CODESIZE])) - print_(op, arg, mapping) - i += 256//_sre.CODESIZE - level += 1 - for j in range(arg): - print_2(_hex_code(code[i: i + 256//_CODEBITS])) - i += 256//_CODEBITS - level -= 1 - elif op in (MARK, GROUPREF, GROUPREF_IGNORE, GROUPREF_UNI_IGNORE, - GROUPREF_LOC_IGNORE): - arg = code[i] - i += 1 - print_(op, arg) - elif op is JUMP: - skip = code[i] - print_(op, skip, to=i+skip) - i += 1 - elif op is BRANCH: - skip = code[i] - print_(op, skip, to=i+skip) - while skip: - dis_(i+1, i+skip) - i += skip - start = i - skip = code[i] - if skip: - print_('branch', skip, to=i+skip) - else: - print_(FAILURE) - i += 1 - elif op in (REPEAT, REPEAT_ONE, MIN_REPEAT_ONE, - POSSESSIVE_REPEAT, POSSESSIVE_REPEAT_ONE): - skip, min, max = code[i: i+3] - if max == MAXREPEAT: - max = 'MAXREPEAT' - print_(op, skip, min, max, to=i+skip) - dis_(i+3, i+skip) - i += skip - elif op is GROUPREF_EXISTS: - arg, skip = code[i: i+2] - print_(op, arg, skip, to=i+skip) - i += 2 - elif op in (ASSERT, ASSERT_NOT): - skip, arg = code[i: i+2] - print_(op, skip, arg, to=i+skip) - dis_(i+2, i+skip) - i += skip - elif op is ATOMIC_GROUP: - skip = code[i] - print_(op, skip, to=i+skip) - dis_(i+1, i+skip) - i += skip - elif op is INFO: - skip, flags, min, max = code[i: i+4] - if max == MAXREPEAT: - max = 'MAXREPEAT' - print_(op, skip, bin(flags), min, max, to=i+skip) - start = i+4 - if flags & SRE_INFO_PREFIX: - prefix_len, prefix_skip = code[i+4: i+6] - print_2(' prefix_skip', prefix_skip) - start = i + 6 - prefix = code[start: start+prefix_len] - print_2(' prefix', - '[%s]' % ', '.join('%#02x' % x for x in prefix), - '(%r)' % ''.join(map(chr, prefix))) - start += prefix_len - print_2(' overlap', code[start: start+prefix_len]) - start += prefix_len - if flags & SRE_INFO_CHARSET: - level += 1 - print_2('in') - dis_(start, i+skip) - level -= 1 - i += skip - else: - raise ValueError(op) - - level -= 1 - - dis_(0, len(code)) - - -def compile(p, flags=0): - # internal: convert pattern list to internal format - - if isstring(p): - pattern = p - p = _parser.parse(p, flags) - else: - pattern = None - - code = _code(p, flags) - - if flags & SRE_FLAG_DEBUG: - print() - dis(code) - - # map in either direction - groupindex = p.state.groupdict - indexgroup = [None] * p.state.groups - for k, i in groupindex.items(): - indexgroup[i] = k - - return _sre.compile( - pattern, flags | p.state.flags, code, - p.state.groups-1, - groupindex, tuple(indexgroup) - ) diff --git a/Python313_13_x64_Template/Lib/re/_constants.py b/Python313_13_x64_Template/Lib/re/_constants.py deleted file mode 100644 index 9c3c294b..00000000 --- a/Python313_13_x64_Template/Lib/re/_constants.py +++ /dev/null @@ -1,222 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# various symbols used by the regular expression engine. -# run this script to update the _sre include files! -# -# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. -# -# See the __init__.py file for information on usage and redistribution. -# - -"""Internal support module for sre""" - -# update when constants are added or removed - -MAGIC = 20230612 - -from _sre import MAXREPEAT, MAXGROUPS - -# SRE standard exception (access as sre.error) -# should this really be here? - -class PatternError(Exception): - """Exception raised for invalid regular expressions. - - Attributes: - - msg: The unformatted error message - pattern: The regular expression pattern - pos: The index in the pattern where compilation failed (may be None) - lineno: The line corresponding to pos (may be None) - colno: The column corresponding to pos (may be None) - """ - - __module__ = 're' - - def __init__(self, msg, pattern=None, pos=None): - self.msg = msg - self.pattern = pattern - self.pos = pos - if pattern is not None and pos is not None: - msg = '%s at position %d' % (msg, pos) - if isinstance(pattern, str): - newline = '\n' - else: - newline = b'\n' - self.lineno = pattern.count(newline, 0, pos) + 1 - self.colno = pos - pattern.rfind(newline, 0, pos) - if newline in pattern: - msg = '%s (line %d, column %d)' % (msg, self.lineno, self.colno) - else: - self.lineno = self.colno = None - super().__init__(msg) - - -# Backward compatibility after renaming in 3.13 -error = PatternError - -class _NamedIntConstant(int): - def __new__(cls, value, name): - self = super(_NamedIntConstant, cls).__new__(cls, value) - self.name = name - return self - - def __repr__(self): - return self.name - - __reduce__ = None - -MAXREPEAT = _NamedIntConstant(MAXREPEAT, 'MAXREPEAT') - -def _makecodes(*names): - items = [_NamedIntConstant(i, name) for i, name in enumerate(names)] - globals().update({item.name: item for item in items}) - return items - -# operators -OPCODES = _makecodes( - # failure=0 success=1 (just because it looks better that way :-) - 'FAILURE', 'SUCCESS', - - 'ANY', 'ANY_ALL', - 'ASSERT', 'ASSERT_NOT', - 'AT', - 'BRANCH', - 'CATEGORY', - 'CHARSET', 'BIGCHARSET', - 'GROUPREF', 'GROUPREF_EXISTS', - 'IN', - 'INFO', - 'JUMP', - 'LITERAL', - 'MARK', - 'MAX_UNTIL', - 'MIN_UNTIL', - 'NOT_LITERAL', - 'NEGATE', - 'RANGE', - 'REPEAT', - 'REPEAT_ONE', - 'SUBPATTERN', - 'MIN_REPEAT_ONE', - 'ATOMIC_GROUP', - 'POSSESSIVE_REPEAT', - 'POSSESSIVE_REPEAT_ONE', - - 'GROUPREF_IGNORE', - 'IN_IGNORE', - 'LITERAL_IGNORE', - 'NOT_LITERAL_IGNORE', - - 'GROUPREF_LOC_IGNORE', - 'IN_LOC_IGNORE', - 'LITERAL_LOC_IGNORE', - 'NOT_LITERAL_LOC_IGNORE', - - 'GROUPREF_UNI_IGNORE', - 'IN_UNI_IGNORE', - 'LITERAL_UNI_IGNORE', - 'NOT_LITERAL_UNI_IGNORE', - 'RANGE_UNI_IGNORE', - - # The following opcodes are only occurred in the parser output, - # but not in the compiled code. - 'MIN_REPEAT', 'MAX_REPEAT', -) -del OPCODES[-2:] # remove MIN_REPEAT and MAX_REPEAT - -# positions -ATCODES = _makecodes( - 'AT_BEGINNING', 'AT_BEGINNING_LINE', 'AT_BEGINNING_STRING', - 'AT_BOUNDARY', 'AT_NON_BOUNDARY', - 'AT_END', 'AT_END_LINE', 'AT_END_STRING', - - 'AT_LOC_BOUNDARY', 'AT_LOC_NON_BOUNDARY', - - 'AT_UNI_BOUNDARY', 'AT_UNI_NON_BOUNDARY', -) - -# categories -CHCODES = _makecodes( - 'CATEGORY_DIGIT', 'CATEGORY_NOT_DIGIT', - 'CATEGORY_SPACE', 'CATEGORY_NOT_SPACE', - 'CATEGORY_WORD', 'CATEGORY_NOT_WORD', - 'CATEGORY_LINEBREAK', 'CATEGORY_NOT_LINEBREAK', - - 'CATEGORY_LOC_WORD', 'CATEGORY_LOC_NOT_WORD', - - 'CATEGORY_UNI_DIGIT', 'CATEGORY_UNI_NOT_DIGIT', - 'CATEGORY_UNI_SPACE', 'CATEGORY_UNI_NOT_SPACE', - 'CATEGORY_UNI_WORD', 'CATEGORY_UNI_NOT_WORD', - 'CATEGORY_UNI_LINEBREAK', 'CATEGORY_UNI_NOT_LINEBREAK', -) - - -# replacement operations for "ignore case" mode -OP_IGNORE = { - LITERAL: LITERAL_IGNORE, - NOT_LITERAL: NOT_LITERAL_IGNORE, -} - -OP_LOCALE_IGNORE = { - LITERAL: LITERAL_LOC_IGNORE, - NOT_LITERAL: NOT_LITERAL_LOC_IGNORE, -} - -OP_UNICODE_IGNORE = { - LITERAL: LITERAL_UNI_IGNORE, - NOT_LITERAL: NOT_LITERAL_UNI_IGNORE, -} - -AT_MULTILINE = { - AT_BEGINNING: AT_BEGINNING_LINE, - AT_END: AT_END_LINE -} - -AT_LOCALE = { - AT_BOUNDARY: AT_LOC_BOUNDARY, - AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY -} - -AT_UNICODE = { - AT_BOUNDARY: AT_UNI_BOUNDARY, - AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY -} - -CH_LOCALE = { - CATEGORY_DIGIT: CATEGORY_DIGIT, - CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT, - CATEGORY_SPACE: CATEGORY_SPACE, - CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE, - CATEGORY_WORD: CATEGORY_LOC_WORD, - CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD, - CATEGORY_LINEBREAK: CATEGORY_LINEBREAK, - CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK -} - -CH_UNICODE = { - CATEGORY_DIGIT: CATEGORY_UNI_DIGIT, - CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT, - CATEGORY_SPACE: CATEGORY_UNI_SPACE, - CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE, - CATEGORY_WORD: CATEGORY_UNI_WORD, - CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD, - CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK, - CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK -} - -# flags -SRE_FLAG_IGNORECASE = 2 # case insensitive -SRE_FLAG_LOCALE = 4 # honour system locale -SRE_FLAG_MULTILINE = 8 # treat target as multiline string -SRE_FLAG_DOTALL = 16 # treat target as a single string -SRE_FLAG_UNICODE = 32 # use unicode "locale" -SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments -SRE_FLAG_DEBUG = 128 # debugging -SRE_FLAG_ASCII = 256 # use ascii "locale" - -# flags for INFO primitive -SRE_INFO_PREFIX = 1 # has prefix -SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix) -SRE_INFO_CHARSET = 4 # pattern starts with character from given set diff --git a/Python313_13_x64_Template/Lib/re/_parser.py b/Python313_13_x64_Template/Lib/re/_parser.py deleted file mode 100644 index f3c77934..00000000 --- a/Python313_13_x64_Template/Lib/re/_parser.py +++ /dev/null @@ -1,1081 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# convert re-style regular expression to sre pattern -# -# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. -# -# See the __init__.py file for information on usage and redistribution. -# - -"""Internal support module for sre""" - -# XXX: show string offset and offending character for all errors - -from ._constants import * - -SPECIAL_CHARS = ".\\[{()*+?^$|" -REPEAT_CHARS = "*+?{" - -DIGITS = frozenset("0123456789") - -OCTDIGITS = frozenset("01234567") -HEXDIGITS = frozenset("0123456789abcdefABCDEF") -ASCIILETTERS = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - -WHITESPACE = frozenset(" \t\n\r\v\f") - -_REPEATCODES = frozenset({MIN_REPEAT, MAX_REPEAT, POSSESSIVE_REPEAT}) -_UNITCODES = frozenset({ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY}) - -ESCAPES = { - r"\a": (LITERAL, ord("\a")), - r"\b": (LITERAL, ord("\b")), - r"\f": (LITERAL, ord("\f")), - r"\n": (LITERAL, ord("\n")), - r"\r": (LITERAL, ord("\r")), - r"\t": (LITERAL, ord("\t")), - r"\v": (LITERAL, ord("\v")), - r"\\": (LITERAL, ord("\\")) -} - -CATEGORIES = { - r"\A": (AT, AT_BEGINNING_STRING), # start of string - r"\b": (AT, AT_BOUNDARY), - r"\B": (AT, AT_NON_BOUNDARY), - r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]), - r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]), - r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]), - r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]), - r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]), - r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]), - r"\Z": (AT, AT_END_STRING), # end of string -} - -FLAGS = { - # standard flags - "i": SRE_FLAG_IGNORECASE, - "L": SRE_FLAG_LOCALE, - "m": SRE_FLAG_MULTILINE, - "s": SRE_FLAG_DOTALL, - "x": SRE_FLAG_VERBOSE, - # extensions - "a": SRE_FLAG_ASCII, - "u": SRE_FLAG_UNICODE, -} - -TYPE_FLAGS = SRE_FLAG_ASCII | SRE_FLAG_LOCALE | SRE_FLAG_UNICODE -GLOBAL_FLAGS = SRE_FLAG_DEBUG - -# Maximal value returned by SubPattern.getwidth(). -# Must be larger than MAXREPEAT, MAXCODE and sys.maxsize. -MAXWIDTH = 1 << 64 - -class State: - # keeps track of state for parsing - def __init__(self): - self.flags = 0 - self.groupdict = {} - self.groupwidths = [None] # group 0 - self.lookbehindgroups = None - self.grouprefpos = {} - @property - def groups(self): - return len(self.groupwidths) - def opengroup(self, name=None): - gid = self.groups - self.groupwidths.append(None) - if self.groups > MAXGROUPS: - raise error("too many groups") - if name is not None: - ogid = self.groupdict.get(name, None) - if ogid is not None: - raise error("redefinition of group name %r as group %d; " - "was group %d" % (name, gid, ogid)) - self.groupdict[name] = gid - return gid - def closegroup(self, gid, p): - self.groupwidths[gid] = p.getwidth() - def checkgroup(self, gid): - return gid < self.groups and self.groupwidths[gid] is not None - - def checklookbehindgroup(self, gid, source): - if self.lookbehindgroups is not None: - if not self.checkgroup(gid): - raise source.error('cannot refer to an open group') - if gid >= self.lookbehindgroups: - raise source.error('cannot refer to group defined in the same ' - 'lookbehind subpattern') - -class SubPattern: - # a subpattern, in intermediate form - def __init__(self, state, data=None): - self.state = state - if data is None: - data = [] - self.data = data - self.width = None - - def dump(self, level=0): - seqtypes = (tuple, list) - for op, av in self.data: - print(level*" " + str(op), end='') - if op is IN: - # member sublanguage - print() - for op, a in av: - print((level+1)*" " + str(op), a) - elif op is BRANCH: - print() - for i, a in enumerate(av[1]): - if i: - print(level*" " + "OR") - a.dump(level+1) - elif op is GROUPREF_EXISTS: - condgroup, item_yes, item_no = av - print('', condgroup) - item_yes.dump(level+1) - if item_no: - print(level*" " + "ELSE") - item_no.dump(level+1) - elif isinstance(av, SubPattern): - print() - av.dump(level+1) - elif isinstance(av, seqtypes): - nl = False - for a in av: - if isinstance(a, SubPattern): - if not nl: - print() - a.dump(level+1) - nl = True - else: - if not nl: - print(' ', end='') - print(a, end='') - nl = False - if not nl: - print() - else: - print('', av) - def __repr__(self): - return repr(self.data) - def __len__(self): - return len(self.data) - def __delitem__(self, index): - del self.data[index] - def __getitem__(self, index): - if isinstance(index, slice): - return SubPattern(self.state, self.data[index]) - return self.data[index] - def __setitem__(self, index, code): - self.data[index] = code - def insert(self, index, code): - self.data.insert(index, code) - def append(self, code): - self.data.append(code) - def getwidth(self): - # determine the width (min, max) for this subpattern - if self.width is not None: - return self.width - lo = hi = 0 - for op, av in self.data: - if op is BRANCH: - i = MAXWIDTH - j = 0 - for av in av[1]: - l, h = av.getwidth() - i = min(i, l) - j = max(j, h) - lo = lo + i - hi = hi + j - elif op is ATOMIC_GROUP: - i, j = av.getwidth() - lo = lo + i - hi = hi + j - elif op is SUBPATTERN: - i, j = av[-1].getwidth() - lo = lo + i - hi = hi + j - elif op in _REPEATCODES: - i, j = av[2].getwidth() - lo = lo + i * av[0] - if av[1] == MAXREPEAT and j: - hi = MAXWIDTH - else: - hi = hi + j * av[1] - elif op in _UNITCODES: - lo = lo + 1 - hi = hi + 1 - elif op is GROUPREF: - i, j = self.state.groupwidths[av] - lo = lo + i - hi = hi + j - elif op is GROUPREF_EXISTS: - i, j = av[1].getwidth() - if av[2] is not None: - l, h = av[2].getwidth() - i = min(i, l) - j = max(j, h) - else: - i = 0 - lo = lo + i - hi = hi + j - elif op is SUCCESS: - break - self.width = min(lo, MAXWIDTH), min(hi, MAXWIDTH) - return self.width - -class Tokenizer: - def __init__(self, string): - self.istext = isinstance(string, str) - self.string = string - if not self.istext: - string = str(string, 'latin1') - self.decoded_string = string - self.index = 0 - self.next = None - self.__next() - def __next(self): - index = self.index - try: - char = self.decoded_string[index] - except IndexError: - self.next = None - return - if char == "\\": - index += 1 - try: - char += self.decoded_string[index] - except IndexError: - raise error("bad escape (end of pattern)", - self.string, len(self.string) - 1) from None - self.index = index + 1 - self.next = char - def match(self, char): - if char == self.next: - self.__next() - return True - return False - def get(self): - this = self.next - self.__next() - return this - def getwhile(self, n, charset): - result = '' - for _ in range(n): - c = self.next - if c not in charset: - break - result += c - self.__next() - return result - def getuntil(self, terminator, name): - result = '' - while True: - c = self.next - self.__next() - if c is None: - if not result: - raise self.error("missing " + name) - raise self.error("missing %s, unterminated name" % terminator, - len(result)) - if c == terminator: - if not result: - raise self.error("missing " + name, 1) - break - result += c - return result - @property - def pos(self): - return self.index - len(self.next or '') - def tell(self): - return self.index - len(self.next or '') - def seek(self, index): - self.index = index - self.__next() - - def error(self, msg, offset=0): - if not self.istext: - msg = msg.encode('ascii', 'backslashreplace').decode('ascii') - return error(msg, self.string, self.tell() - offset) - - def checkgroupname(self, name, offset): - if not (self.istext or name.isascii()): - msg = "bad character in group name %a" % name - raise self.error(msg, len(name) + offset) - if not name.isidentifier(): - msg = "bad character in group name %r" % name - raise self.error(msg, len(name) + offset) - -def _class_escape(source, escape): - # handle escape code inside character class - code = ESCAPES.get(escape) - if code: - return code - code = CATEGORIES.get(escape) - if code and code[0] is IN: - return code - try: - c = escape[1:2] - if c == "x": - # hexadecimal escape (exactly two digits) - escape += source.getwhile(2, HEXDIGITS) - if len(escape) != 4: - raise source.error("incomplete escape %s" % escape, len(escape)) - return LITERAL, int(escape[2:], 16) - elif c == "u" and source.istext: - # unicode escape (exactly four digits) - escape += source.getwhile(4, HEXDIGITS) - if len(escape) != 6: - raise source.error("incomplete escape %s" % escape, len(escape)) - return LITERAL, int(escape[2:], 16) - elif c == "U" and source.istext: - # unicode escape (exactly eight digits) - escape += source.getwhile(8, HEXDIGITS) - if len(escape) != 10: - raise source.error("incomplete escape %s" % escape, len(escape)) - c = int(escape[2:], 16) - chr(c) # raise ValueError for invalid code - return LITERAL, c - elif c == "N" and source.istext: - import unicodedata - # named unicode escape e.g. \N{EM DASH} - if not source.match('{'): - raise source.error("missing {") - charname = source.getuntil('}', 'character name') - try: - c = ord(unicodedata.lookup(charname)) - except (KeyError, TypeError): - raise source.error("undefined character name %r" % charname, - len(charname) + len(r'\N{}')) from None - return LITERAL, c - elif c in OCTDIGITS: - # octal escape (up to three digits) - escape += source.getwhile(2, OCTDIGITS) - c = int(escape[1:], 8) - if c > 0o377: - raise source.error('octal escape value %s outside of ' - 'range 0-0o377' % escape, len(escape)) - return LITERAL, c - elif c in DIGITS: - raise ValueError - if len(escape) == 2: - if c in ASCIILETTERS: - raise source.error('bad escape %s' % escape, len(escape)) - return LITERAL, ord(escape[1]) - except ValueError: - pass - raise source.error("bad escape %s" % escape, len(escape)) - -def _escape(source, escape, state): - # handle escape code in expression - code = CATEGORIES.get(escape) - if code: - return code - code = ESCAPES.get(escape) - if code: - return code - try: - c = escape[1:2] - if c == "x": - # hexadecimal escape - escape += source.getwhile(2, HEXDIGITS) - if len(escape) != 4: - raise source.error("incomplete escape %s" % escape, len(escape)) - return LITERAL, int(escape[2:], 16) - elif c == "u" and source.istext: - # unicode escape (exactly four digits) - escape += source.getwhile(4, HEXDIGITS) - if len(escape) != 6: - raise source.error("incomplete escape %s" % escape, len(escape)) - return LITERAL, int(escape[2:], 16) - elif c == "U" and source.istext: - # unicode escape (exactly eight digits) - escape += source.getwhile(8, HEXDIGITS) - if len(escape) != 10: - raise source.error("incomplete escape %s" % escape, len(escape)) - c = int(escape[2:], 16) - chr(c) # raise ValueError for invalid code - return LITERAL, c - elif c == "N" and source.istext: - import unicodedata - # named unicode escape e.g. \N{EM DASH} - if not source.match('{'): - raise source.error("missing {") - charname = source.getuntil('}', 'character name') - try: - c = ord(unicodedata.lookup(charname)) - except (KeyError, TypeError): - raise source.error("undefined character name %r" % charname, - len(charname) + len(r'\N{}')) from None - return LITERAL, c - elif c == "0": - # octal escape - escape += source.getwhile(2, OCTDIGITS) - return LITERAL, int(escape[1:], 8) - elif c in DIGITS: - # octal escape *or* decimal group reference (sigh) - if source.next in DIGITS: - escape += source.get() - if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and - source.next in OCTDIGITS): - # got three octal digits; this is an octal escape - escape += source.get() - c = int(escape[1:], 8) - if c > 0o377: - raise source.error('octal escape value %s outside of ' - 'range 0-0o377' % escape, - len(escape)) - return LITERAL, c - # not an octal escape, so this is a group reference - group = int(escape[1:]) - if group < state.groups: - if not state.checkgroup(group): - raise source.error("cannot refer to an open group", - len(escape)) - state.checklookbehindgroup(group, source) - return GROUPREF, group - raise source.error("invalid group reference %d" % group, len(escape) - 1) - if len(escape) == 2: - if c in ASCIILETTERS: - raise source.error("bad escape %s" % escape, len(escape)) - return LITERAL, ord(escape[1]) - except ValueError: - pass - raise source.error("bad escape %s" % escape, len(escape)) - -def _uniq(items): - return list(dict.fromkeys(items)) - -def _parse_sub(source, state, verbose, nested): - # parse an alternation: a|b|c - - items = [] - itemsappend = items.append - sourcematch = source.match - start = source.tell() - while True: - itemsappend(_parse(source, state, verbose, nested + 1, - not nested and not items)) - if not sourcematch("|"): - break - if not nested: - verbose = state.flags & SRE_FLAG_VERBOSE - - if len(items) == 1: - return items[0] - - subpattern = SubPattern(state) - - # check if all items share a common prefix - while True: - prefix = None - for item in items: - if not item: - break - if prefix is None: - prefix = item[0] - elif item[0] != prefix: - break - else: - # all subitems start with a common "prefix". - # move it out of the branch - for item in items: - del item[0] - subpattern.append(prefix) - continue # check next one - break - - # check if the branch can be replaced by a character set - set = [] - for item in items: - if len(item) != 1: - break - op, av = item[0] - if op is LITERAL: - set.append((op, av)) - elif op is IN and av[0][0] is not NEGATE: - set.extend(av) - else: - break - else: - # we can store this as a character set instead of a - # branch (the compiler may optimize this even more) - subpattern.append((IN, _uniq(set))) - return subpattern - - subpattern.append((BRANCH, (None, items))) - return subpattern - -def _parse(source, state, verbose, nested, first=False): - # parse a simple pattern - subpattern = SubPattern(state) - - # precompute constants into local variables - subpatternappend = subpattern.append - sourceget = source.get - sourcematch = source.match - _len = len - _ord = ord - - while True: - - this = source.next - if this is None: - break # end of pattern - if this in "|)": - break # end of subpattern - sourceget() - - if verbose: - # skip whitespace and comments - if this in WHITESPACE: - continue - if this == "#": - while True: - this = sourceget() - if this is None or this == "\n": - break - continue - - if this[0] == "\\": - code = _escape(source, this, state) - subpatternappend(code) - - elif this not in SPECIAL_CHARS: - subpatternappend((LITERAL, _ord(this))) - - elif this == "[": - here = source.tell() - 1 - # character set - set = [] - setappend = set.append -## if sourcematch(":"): -## pass # handle character classes - if source.next == '[': - import warnings - warnings.warn( - 'Possible nested set at position %d' % source.tell(), - FutureWarning, stacklevel=nested + 6 - ) - negate = sourcematch("^") - # check remaining characters - while True: - this = sourceget() - if this is None: - raise source.error("unterminated character set", - source.tell() - here) - if this == "]" and set: - break - elif this[0] == "\\": - code1 = _class_escape(source, this) - else: - if set and this in '-&~|' and source.next == this: - import warnings - warnings.warn( - 'Possible set %s at position %d' % ( - 'difference' if this == '-' else - 'intersection' if this == '&' else - 'symmetric difference' if this == '~' else - 'union', - source.tell() - 1), - FutureWarning, stacklevel=nested + 6 - ) - code1 = LITERAL, _ord(this) - if sourcematch("-"): - # potential range - that = sourceget() - if that is None: - raise source.error("unterminated character set", - source.tell() - here) - if that == "]": - if code1[0] is IN: - code1 = code1[1][0] - setappend(code1) - setappend((LITERAL, _ord("-"))) - break - if that[0] == "\\": - code2 = _class_escape(source, that) - else: - if that == '-': - import warnings - warnings.warn( - 'Possible set difference at position %d' % ( - source.tell() - 2), - FutureWarning, stacklevel=nested + 6 - ) - code2 = LITERAL, _ord(that) - if code1[0] != LITERAL or code2[0] != LITERAL: - msg = "bad character range %s-%s" % (this, that) - raise source.error(msg, len(this) + 1 + len(that)) - lo = code1[1] - hi = code2[1] - if hi < lo: - msg = "bad character range %s-%s" % (this, that) - raise source.error(msg, len(this) + 1 + len(that)) - setappend((RANGE, (lo, hi))) - else: - if code1[0] is IN: - code1 = code1[1][0] - setappend(code1) - - set = _uniq(set) - # XXX: should move set optimization to compiler! - if _len(set) == 1 and set[0][0] is LITERAL: - # optimization - if negate: - subpatternappend((NOT_LITERAL, set[0][1])) - else: - subpatternappend(set[0]) - else: - if negate: - set.insert(0, (NEGATE, None)) - # charmap optimization can't be added here because - # global flags still are not known - subpatternappend((IN, set)) - - elif this in REPEAT_CHARS: - # repeat previous item - here = source.tell() - if this == "?": - min, max = 0, 1 - elif this == "*": - min, max = 0, MAXREPEAT - - elif this == "+": - min, max = 1, MAXREPEAT - elif this == "{": - if source.next == "}": - subpatternappend((LITERAL, _ord(this))) - continue - - min, max = 0, MAXREPEAT - lo = hi = "" - while source.next in DIGITS: - lo += sourceget() - if sourcematch(","): - while source.next in DIGITS: - hi += sourceget() - else: - hi = lo - if not sourcematch("}"): - subpatternappend((LITERAL, _ord(this))) - source.seek(here) - continue - - if lo: - min = int(lo) - if min >= MAXREPEAT: - raise OverflowError("the repetition number is too large") - if hi: - max = int(hi) - if max >= MAXREPEAT: - raise OverflowError("the repetition number is too large") - if max < min: - raise source.error("min repeat greater than max repeat", - source.tell() - here) - else: - raise AssertionError("unsupported quantifier %r" % (char,)) - # figure out which item to repeat - if subpattern: - item = subpattern[-1:] - else: - item = None - if not item or item[0][0] is AT: - raise source.error("nothing to repeat", - source.tell() - here + len(this)) - if item[0][0] in _REPEATCODES: - raise source.error("multiple repeat", - source.tell() - here + len(this)) - if item[0][0] is SUBPATTERN: - group, add_flags, del_flags, p = item[0][1] - if group is None and not add_flags and not del_flags: - item = p - if sourcematch("?"): - # Non-Greedy Match - subpattern[-1] = (MIN_REPEAT, (min, max, item)) - elif sourcematch("+"): - # Possessive Match (Always Greedy) - subpattern[-1] = (POSSESSIVE_REPEAT, (min, max, item)) - else: - # Greedy Match - subpattern[-1] = (MAX_REPEAT, (min, max, item)) - - elif this == ".": - subpatternappend((ANY, None)) - - elif this == "(": - start = source.tell() - 1 - capture = True - atomic = False - name = None - add_flags = 0 - del_flags = 0 - if sourcematch("?"): - # options - char = sourceget() - if char is None: - raise source.error("unexpected end of pattern") - if char == "P": - # python extensions - if sourcematch("<"): - # named group: skip forward to end of name - name = source.getuntil(">", "group name") - source.checkgroupname(name, 1) - elif sourcematch("="): - # named backreference - name = source.getuntil(")", "group name") - source.checkgroupname(name, 1) - gid = state.groupdict.get(name) - if gid is None: - msg = "unknown group name %r" % name - raise source.error(msg, len(name) + 1) - if not state.checkgroup(gid): - raise source.error("cannot refer to an open group", - len(name) + 1) - state.checklookbehindgroup(gid, source) - subpatternappend((GROUPREF, gid)) - continue - - else: - char = sourceget() - if char is None: - raise source.error("unexpected end of pattern") - raise source.error("unknown extension ?P" + char, - len(char) + 2) - elif char == ":": - # non-capturing group - capture = False - elif char == "#": - # comment - while True: - if source.next is None: - raise source.error("missing ), unterminated comment", - source.tell() - start) - if sourceget() == ")": - break - continue - - elif char in "=!<": - # lookahead assertions - dir = 1 - if char == "<": - char = sourceget() - if char is None: - raise source.error("unexpected end of pattern") - if char not in "=!": - raise source.error("unknown extension ?<" + char, - len(char) + 2) - dir = -1 # lookbehind - lookbehindgroups = state.lookbehindgroups - if lookbehindgroups is None: - state.lookbehindgroups = state.groups - p = _parse_sub(source, state, verbose, nested + 1) - if dir < 0: - if lookbehindgroups is None: - state.lookbehindgroups = None - if not sourcematch(")"): - raise source.error("missing ), unterminated subpattern", - source.tell() - start) - if char == "=": - subpatternappend((ASSERT, (dir, p))) - elif p: - subpatternappend((ASSERT_NOT, (dir, p))) - else: - subpatternappend((FAILURE, ())) - continue - - elif char == "(": - # conditional backreference group - condname = source.getuntil(")", "group name") - if not (condname.isdecimal() and condname.isascii()): - source.checkgroupname(condname, 1) - condgroup = state.groupdict.get(condname) - if condgroup is None: - msg = "unknown group name %r" % condname - raise source.error(msg, len(condname) + 1) - else: - condgroup = int(condname) - if not condgroup: - raise source.error("bad group number", - len(condname) + 1) - if condgroup >= MAXGROUPS: - msg = "invalid group reference %d" % condgroup - raise source.error(msg, len(condname) + 1) - if condgroup not in state.grouprefpos: - state.grouprefpos[condgroup] = ( - source.tell() - len(condname) - 1 - ) - if not (condname.isdecimal() and condname.isascii()): - import warnings - warnings.warn( - "bad character in group name %s at position %d" % - (repr(condname) if source.istext else ascii(condname), - source.tell() - len(condname) - 1), - DeprecationWarning, stacklevel=nested + 6 - ) - state.checklookbehindgroup(condgroup, source) - item_yes = _parse(source, state, verbose, nested + 1) - if source.match("|"): - item_no = _parse(source, state, verbose, nested + 1) - if source.next == "|": - raise source.error("conditional backref with more than two branches") - else: - item_no = None - if not source.match(")"): - raise source.error("missing ), unterminated subpattern", - source.tell() - start) - subpatternappend((GROUPREF_EXISTS, (condgroup, item_yes, item_no))) - continue - - elif char == ">": - # non-capturing, atomic group - capture = False - atomic = True - elif char in FLAGS or char == "-": - # flags - flags = _parse_flags(source, state, char) - if flags is None: # global flags - if not first or subpattern: - raise source.error('global flags not at the start ' - 'of the expression', - source.tell() - start) - verbose = state.flags & SRE_FLAG_VERBOSE - continue - - add_flags, del_flags = flags - capture = False - else: - raise source.error("unknown extension ?" + char, - len(char) + 1) - - # parse group contents - if capture: - try: - group = state.opengroup(name) - except error as err: - raise source.error(err.msg, len(name) + 1) from None - else: - group = None - sub_verbose = ((verbose or (add_flags & SRE_FLAG_VERBOSE)) and - not (del_flags & SRE_FLAG_VERBOSE)) - p = _parse_sub(source, state, sub_verbose, nested + 1) - if not source.match(")"): - raise source.error("missing ), unterminated subpattern", - source.tell() - start) - if group is not None: - state.closegroup(group, p) - if atomic: - assert group is None - subpatternappend((ATOMIC_GROUP, p)) - else: - subpatternappend((SUBPATTERN, (group, add_flags, del_flags, p))) - - elif this == "^": - subpatternappend((AT, AT_BEGINNING)) - - elif this == "$": - subpatternappend((AT, AT_END)) - - else: - raise AssertionError("unsupported special character %r" % (char,)) - - # unpack non-capturing groups - for i in range(len(subpattern))[::-1]: - op, av = subpattern[i] - if op is SUBPATTERN: - group, add_flags, del_flags, p = av - if group is None and not add_flags and not del_flags: - subpattern[i: i+1] = p - - return subpattern - -def _parse_flags(source, state, char): - sourceget = source.get - add_flags = 0 - del_flags = 0 - if char != "-": - while True: - flag = FLAGS[char] - if source.istext: - if char == 'L': - msg = "bad inline flags: cannot use 'L' flag with a str pattern" - raise source.error(msg) - else: - if char == 'u': - msg = "bad inline flags: cannot use 'u' flag with a bytes pattern" - raise source.error(msg) - add_flags |= flag - if (flag & TYPE_FLAGS) and (add_flags & TYPE_FLAGS) != flag: - msg = "bad inline flags: flags 'a', 'u' and 'L' are incompatible" - raise source.error(msg) - char = sourceget() - if char is None: - raise source.error("missing -, : or )") - if char in ")-:": - break - if char not in FLAGS: - msg = "unknown flag" if char.isalpha() else "missing -, : or )" - raise source.error(msg, len(char)) - if char == ")": - state.flags |= add_flags - return None - if add_flags & GLOBAL_FLAGS: - raise source.error("bad inline flags: cannot turn on global flag", 1) - if char == "-": - char = sourceget() - if char is None: - raise source.error("missing flag") - if char not in FLAGS: - msg = "unknown flag" if char.isalpha() else "missing flag" - raise source.error(msg, len(char)) - while True: - flag = FLAGS[char] - if flag & TYPE_FLAGS: - msg = "bad inline flags: cannot turn off flags 'a', 'u' and 'L'" - raise source.error(msg) - del_flags |= flag - char = sourceget() - if char is None: - raise source.error("missing :") - if char == ":": - break - if char not in FLAGS: - msg = "unknown flag" if char.isalpha() else "missing :" - raise source.error(msg, len(char)) - assert char == ":" - if del_flags & GLOBAL_FLAGS: - raise source.error("bad inline flags: cannot turn off global flag", 1) - if add_flags & del_flags: - raise source.error("bad inline flags: flag turned on and off", 1) - return add_flags, del_flags - -def fix_flags(src, flags): - # Check and fix flags according to the type of pattern (str or bytes) - if isinstance(src, str): - if flags & SRE_FLAG_LOCALE: - raise ValueError("cannot use LOCALE flag with a str pattern") - if not flags & SRE_FLAG_ASCII: - flags |= SRE_FLAG_UNICODE - elif flags & SRE_FLAG_UNICODE: - raise ValueError("ASCII and UNICODE flags are incompatible") - else: - if flags & SRE_FLAG_UNICODE: - raise ValueError("cannot use UNICODE flag with a bytes pattern") - if flags & SRE_FLAG_LOCALE and flags & SRE_FLAG_ASCII: - raise ValueError("ASCII and LOCALE flags are incompatible") - return flags - -def parse(str, flags=0, state=None): - # parse 're' pattern into list of (opcode, argument) tuples - - source = Tokenizer(str) - - if state is None: - state = State() - state.flags = flags - state.str = str - - p = _parse_sub(source, state, flags & SRE_FLAG_VERBOSE, 0) - p.state.flags = fix_flags(str, p.state.flags) - - if source.next is not None: - assert source.next == ")" - raise source.error("unbalanced parenthesis") - - for g in p.state.grouprefpos: - if g >= p.state.groups: - msg = "invalid group reference %d" % g - raise error(msg, str, p.state.grouprefpos[g]) - - if flags & SRE_FLAG_DEBUG: - p.dump() - - return p - -def parse_template(source, pattern): - # parse 're' replacement string into list of literals and - # group references - s = Tokenizer(source) - sget = s.get - result = [] - literal = [] - lappend = literal.append - def addliteral(): - if s.istext: - result.append(''.join(literal)) - else: - # The tokenizer implicitly decodes bytes objects as latin-1, we must - # therefore re-encode the final representation. - result.append(''.join(literal).encode('latin-1')) - del literal[:] - def addgroup(index, pos): - if index > pattern.groups: - raise s.error("invalid group reference %d" % index, pos) - addliteral() - result.append(index) - groupindex = pattern.groupindex - while True: - this = sget() - if this is None: - break # end of replacement string - if this[0] == "\\": - # group - c = this[1] - if c == "g": - if not s.match("<"): - raise s.error("missing <") - name = s.getuntil(">", "group name") - if not (name.isdecimal() and name.isascii()): - s.checkgroupname(name, 1) - try: - index = groupindex[name] - except KeyError: - raise IndexError("unknown group name %r" % name) from None - else: - index = int(name) - if index >= MAXGROUPS: - raise s.error("invalid group reference %d" % index, - len(name) + 1) - if not (name.isdecimal() and name.isascii()): - import warnings - warnings.warn( - "bad character in group name %s at position %d" % - (repr(name) if s.istext else ascii(name), - s.tell() - len(name) - 1), - DeprecationWarning, stacklevel=5 - ) - addgroup(index, len(name) + 1) - elif c == "0": - if s.next in OCTDIGITS: - this += sget() - if s.next in OCTDIGITS: - this += sget() - lappend(chr(int(this[1:], 8) & 0xff)) - elif c in DIGITS: - isoctal = False - if s.next in DIGITS: - this += sget() - if (c in OCTDIGITS and this[2] in OCTDIGITS and - s.next in OCTDIGITS): - this += sget() - isoctal = True - c = int(this[1:], 8) - if c > 0o377: - raise s.error('octal escape value %s outside of ' - 'range 0-0o377' % this, len(this)) - lappend(chr(c)) - if not isoctal: - addgroup(int(this[1:]), len(this) - 1) - else: - try: - this = chr(ESCAPES[this][1]) - except KeyError: - if c in ASCIILETTERS: - raise s.error('bad escape %s' % this, len(this)) from None - lappend(this) - else: - lappend(this) - addliteral() - return result diff --git a/Python313_13_x64_Template/Lib/reprlib.py b/Python313_13_x64_Template/Lib/reprlib.py deleted file mode 100644 index f6831850..00000000 --- a/Python313_13_x64_Template/Lib/reprlib.py +++ /dev/null @@ -1,230 +0,0 @@ -"""Redo the builtin repr() (representation) but with limits on most sizes.""" - -__all__ = ["Repr", "repr", "recursive_repr"] - -import builtins -from itertools import islice -from _thread import get_ident - -def recursive_repr(fillvalue='...'): - 'Decorator to make a repr function return fillvalue for a recursive call' - - def decorating_function(user_function): - repr_running = set() - - def wrapper(self): - key = id(self), get_ident() - if key in repr_running: - return fillvalue - repr_running.add(key) - try: - result = user_function(self) - finally: - repr_running.discard(key) - return result - - # Can't use functools.wraps() here because of bootstrap issues - wrapper.__module__ = getattr(user_function, '__module__') - wrapper.__doc__ = getattr(user_function, '__doc__') - wrapper.__name__ = getattr(user_function, '__name__') - wrapper.__qualname__ = getattr(user_function, '__qualname__') - wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) - wrapper.__type_params__ = getattr(user_function, '__type_params__', ()) - wrapper.__wrapped__ = user_function - return wrapper - - return decorating_function - -class Repr: - _lookup = { - 'tuple': 'builtins', - 'list': 'builtins', - 'array': 'array', - 'set': 'builtins', - 'frozenset': 'builtins', - 'deque': 'collections', - 'dict': 'builtins', - 'str': 'builtins', - 'int': 'builtins' - } - - def __init__( - self, *, maxlevel=6, maxtuple=6, maxlist=6, maxarray=5, maxdict=4, - maxset=6, maxfrozenset=6, maxdeque=6, maxstring=30, maxlong=40, - maxother=30, fillvalue='...', indent=None, - ): - self.maxlevel = maxlevel - self.maxtuple = maxtuple - self.maxlist = maxlist - self.maxarray = maxarray - self.maxdict = maxdict - self.maxset = maxset - self.maxfrozenset = maxfrozenset - self.maxdeque = maxdeque - self.maxstring = maxstring - self.maxlong = maxlong - self.maxother = maxother - self.fillvalue = fillvalue - self.indent = indent - - def repr(self, x): - return self.repr1(x, self.maxlevel) - - def repr1(self, x, level): - cls = type(x) - typename = cls.__name__ - - if ' ' in typename: - parts = typename.split() - typename = '_'.join(parts) - - method = getattr(self, 'repr_' + typename, None) - if method: - # not defined in this class - if typename not in self._lookup: - return method(x, level) - module = getattr(cls, '__module__', None) - # defined in this class and is the module intended - if module == self._lookup[typename]: - return method(x, level) - - return self.repr_instance(x, level) - - def _join(self, pieces, level): - if self.indent is None: - return ', '.join(pieces) - if not pieces: - return '' - indent = self.indent - if isinstance(indent, int): - if indent < 0: - raise ValueError( - f'Repr.indent cannot be negative int (was {indent!r})' - ) - indent *= ' ' - try: - sep = ',\n' + (self.maxlevel - level + 1) * indent - except TypeError as error: - raise TypeError( - f'Repr.indent must be a str, int or None, not {type(indent)}' - ) from error - return sep.join(('', *pieces, ''))[1:-len(indent) or None] - - def _repr_iterable(self, x, level, left, right, maxiter, trail=''): - n = len(x) - if level <= 0 and n: - s = self.fillvalue - else: - newlevel = level - 1 - repr1 = self.repr1 - pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)] - if n > maxiter: - pieces.append(self.fillvalue) - s = self._join(pieces, level) - if n == 1 and trail and self.indent is None: - right = trail + right - return '%s%s%s' % (left, s, right) - - def repr_tuple(self, x, level): - return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',') - - def repr_list(self, x, level): - return self._repr_iterable(x, level, '[', ']', self.maxlist) - - def repr_array(self, x, level): - if not x: - return "array('%s')" % x.typecode - header = "array('%s', [" % x.typecode - return self._repr_iterable(x, level, header, '])', self.maxarray) - - def repr_set(self, x, level): - if not x: - return 'set()' - x = _possibly_sorted(x) - return self._repr_iterable(x, level, '{', '}', self.maxset) - - def repr_frozenset(self, x, level): - if not x: - return 'frozenset()' - x = _possibly_sorted(x) - return self._repr_iterable(x, level, 'frozenset({', '})', - self.maxfrozenset) - - def repr_deque(self, x, level): - return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque) - - def repr_dict(self, x, level): - n = len(x) - if n == 0: - return '{}' - if level <= 0: - return '{' + self.fillvalue + '}' - newlevel = level - 1 - repr1 = self.repr1 - pieces = [] - for key in islice(_possibly_sorted(x), self.maxdict): - keyrepr = repr1(key, newlevel) - valrepr = repr1(x[key], newlevel) - pieces.append('%s: %s' % (keyrepr, valrepr)) - if n > self.maxdict: - pieces.append(self.fillvalue) - s = self._join(pieces, level) - return '{%s}' % (s,) - - def repr_str(self, x, level): - s = builtins.repr(x[:self.maxstring]) - if len(s) > self.maxstring: - i = max(0, (self.maxstring-3)//2) - j = max(0, self.maxstring-3-i) - s = builtins.repr(x[:i] + x[len(x)-j:]) - s = s[:i] + self.fillvalue + s[len(s)-j:] - return s - - def repr_int(self, x, level): - try: - s = builtins.repr(x) - except ValueError as exc: - assert 'sys.set_int_max_str_digits()' in str(exc) - # Those imports must be deferred due to Python's build system - # where the reprlib module is imported before the math module. - import math, sys - # Integers with more than sys.get_int_max_str_digits() digits - # are rendered differently as their repr() raises a ValueError. - # See https://github.com/python/cpython/issues/135487. - k = 1 + int(math.log10(abs(x))) - # Note: math.log10(abs(x)) may be overestimated or underestimated, - # but for simplicity, we do not compute the exact number of digits. - max_digits = sys.get_int_max_str_digits() - return (f'<{x.__class__.__name__} instance with roughly {k} ' - f'digits (limit at {max_digits}) at 0x{id(x):x}>') - if len(s) > self.maxlong: - i = max(0, (self.maxlong-3)//2) - j = max(0, self.maxlong-3-i) - s = s[:i] + self.fillvalue + s[len(s)-j:] - return s - - def repr_instance(self, x, level): - try: - s = builtins.repr(x) - # Bugs in x.__repr__() can cause arbitrary - # exceptions -- then make up something - except Exception: - return '<%s instance at %#x>' % (x.__class__.__name__, id(x)) - if len(s) > self.maxother: - i = max(0, (self.maxother-3)//2) - j = max(0, self.maxother-3-i) - s = s[:i] + self.fillvalue + s[len(s)-j:] - return s - - -def _possibly_sorted(x): - # Since not all sequences of items can be sorted and comparison - # functions may raise arbitrary exceptions, return an unsorted - # sequence in that case. - try: - return sorted(x) - except Exception: - return list(x) - -aRepr = Repr() -repr = aRepr.repr diff --git a/Python313_13_x64_Template/Lib/shlex.py b/Python313_13_x64_Template/Lib/shlex.py deleted file mode 100644 index f4821616..00000000 --- a/Python313_13_x64_Template/Lib/shlex.py +++ /dev/null @@ -1,345 +0,0 @@ -"""A lexical analyzer class for simple shell-like syntaxes.""" - -# Module and documentation by Eric S. Raymond, 21 Dec 1998 -# Input stacking and error message cleanup added by ESR, March 2000 -# push_source() and pop_source() made explicit by ESR, January 2001. -# Posix compliance, split(), string arguments, and -# iterator interface by Gustavo Niemeyer, April 2003. -# changes to tokenize more like Posix shells by Vinay Sajip, July 2016. - -import os -import re -import sys -from collections import deque - -from io import StringIO - -__all__ = ["shlex", "split", "quote", "join"] - -class shlex: - "A lexical analyzer class for simple shell-like syntaxes." - def __init__(self, instream=None, infile=None, posix=False, - punctuation_chars=False): - if isinstance(instream, str): - instream = StringIO(instream) - if instream is not None: - self.instream = instream - self.infile = infile - else: - self.instream = sys.stdin - self.infile = None - self.posix = posix - if posix: - self.eof = None - else: - self.eof = '' - self.commenters = '#' - self.wordchars = ('abcdfeghijklmnopqrstuvwxyz' - 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_') - if self.posix: - self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ' - 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ') - self.whitespace = ' \t\r\n' - self.whitespace_split = False - self.quotes = '\'"' - self.escape = '\\' - self.escapedquotes = '"' - self.state = ' ' - self.pushback = deque() - self.lineno = 1 - self.debug = 0 - self.token = '' - self.filestack = deque() - self.source = None - if not punctuation_chars: - punctuation_chars = '' - elif punctuation_chars is True: - punctuation_chars = '();<>|&' - self._punctuation_chars = punctuation_chars - if punctuation_chars: - # _pushback_chars is a push back queue used by lookahead logic - self._pushback_chars = deque() - # these chars added because allowed in file names, args, wildcards - self.wordchars += '~-./*?=' - #remove any punctuation chars from wordchars - t = self.wordchars.maketrans(dict.fromkeys(punctuation_chars)) - self.wordchars = self.wordchars.translate(t) - - @property - def punctuation_chars(self): - return self._punctuation_chars - - def push_token(self, tok): - "Push a token onto the stack popped by the get_token method" - if self.debug >= 1: - print("shlex: pushing token " + repr(tok)) - self.pushback.appendleft(tok) - - def push_source(self, newstream, newfile=None): - "Push an input source onto the lexer's input source stack." - if isinstance(newstream, str): - newstream = StringIO(newstream) - self.filestack.appendleft((self.infile, self.instream, self.lineno)) - self.infile = newfile - self.instream = newstream - self.lineno = 1 - if self.debug: - if newfile is not None: - print('shlex: pushing to file %s' % (self.infile,)) - else: - print('shlex: pushing to stream %s' % (self.instream,)) - - def pop_source(self): - "Pop the input source stack." - self.instream.close() - (self.infile, self.instream, self.lineno) = self.filestack.popleft() - if self.debug: - print('shlex: popping to %s, line %d' \ - % (self.instream, self.lineno)) - self.state = ' ' - - def get_token(self): - "Get a token from the input stream (or from stack if it's nonempty)" - if self.pushback: - tok = self.pushback.popleft() - if self.debug >= 1: - print("shlex: popping token " + repr(tok)) - return tok - # No pushback. Get a token. - raw = self.read_token() - # Handle inclusions - if self.source is not None: - while raw == self.source: - spec = self.sourcehook(self.read_token()) - if spec: - (newfile, newstream) = spec - self.push_source(newstream, newfile) - raw = self.get_token() - # Maybe we got EOF instead? - while raw == self.eof: - if not self.filestack: - return self.eof - else: - self.pop_source() - raw = self.get_token() - # Neither inclusion nor EOF - if self.debug >= 1: - if raw != self.eof: - print("shlex: token=" + repr(raw)) - else: - print("shlex: token=EOF") - return raw - - def read_token(self): - quoted = False - escapedstate = ' ' - while True: - if self.punctuation_chars and self._pushback_chars: - nextchar = self._pushback_chars.pop() - else: - nextchar = self.instream.read(1) - if nextchar == '\n': - self.lineno += 1 - if self.debug >= 3: - print("shlex: in state %r I see character: %r" % (self.state, - nextchar)) - if self.state is None: - self.token = '' # past end of file - break - elif self.state == ' ': - if not nextchar: - self.state = None # end of file - break - elif nextchar in self.whitespace: - if self.debug >= 2: - print("shlex: I see whitespace in whitespace state") - if self.token or (self.posix and quoted): - break # emit current token - else: - continue - elif nextchar in self.commenters: - self.instream.readline() - self.lineno += 1 - elif self.posix and nextchar in self.escape: - escapedstate = 'a' - self.state = nextchar - elif nextchar in self.wordchars: - self.token = nextchar - self.state = 'a' - elif nextchar in self.punctuation_chars: - self.token = nextchar - self.state = 'c' - elif nextchar in self.quotes: - if not self.posix: - self.token = nextchar - self.state = nextchar - elif self.whitespace_split: - self.token = nextchar - self.state = 'a' - else: - self.token = nextchar - if self.token or (self.posix and quoted): - break # emit current token - else: - continue - elif self.state in self.quotes: - quoted = True - if not nextchar: # end of file - if self.debug >= 2: - print("shlex: I see EOF in quotes state") - # XXX what error should be raised here? - raise ValueError("No closing quotation") - if nextchar == self.state: - if not self.posix: - self.token += nextchar - self.state = ' ' - break - else: - self.state = 'a' - elif (self.posix and nextchar in self.escape and self.state - in self.escapedquotes): - escapedstate = self.state - self.state = nextchar - else: - self.token += nextchar - elif self.state in self.escape: - if not nextchar: # end of file - if self.debug >= 2: - print("shlex: I see EOF in escape state") - # XXX what error should be raised here? - raise ValueError("No escaped character") - # In posix shells, only the quote itself or the escape - # character may be escaped within quotes. - if (escapedstate in self.quotes and - nextchar != self.state and nextchar != escapedstate): - self.token += self.state - self.token += nextchar - self.state = escapedstate - elif self.state in ('a', 'c'): - if not nextchar: - self.state = None # end of file - break - elif nextchar in self.whitespace: - if self.debug >= 2: - print("shlex: I see whitespace in word state") - self.state = ' ' - if self.token or (self.posix and quoted): - break # emit current token - else: - continue - elif nextchar in self.commenters: - self.instream.readline() - self.lineno += 1 - if self.posix: - self.state = ' ' - if self.token or (self.posix and quoted): - break # emit current token - else: - continue - elif self.state == 'c': - if nextchar in self.punctuation_chars: - self.token += nextchar - else: - if nextchar not in self.whitespace: - self._pushback_chars.append(nextchar) - self.state = ' ' - break - elif self.posix and nextchar in self.quotes: - self.state = nextchar - elif self.posix and nextchar in self.escape: - escapedstate = 'a' - self.state = nextchar - elif (nextchar in self.wordchars or nextchar in self.quotes - or (self.whitespace_split and - nextchar not in self.punctuation_chars)): - self.token += nextchar - else: - if self.punctuation_chars: - self._pushback_chars.append(nextchar) - else: - self.pushback.appendleft(nextchar) - if self.debug >= 2: - print("shlex: I see punctuation in word state") - self.state = ' ' - if self.token or (self.posix and quoted): - break # emit current token - else: - continue - result = self.token - self.token = '' - if self.posix and not quoted and result == '': - result = None - if self.debug > 1: - if result: - print("shlex: raw token=" + repr(result)) - else: - print("shlex: raw token=EOF") - return result - - def sourcehook(self, newfile): - "Hook called on a filename to be sourced." - if newfile[0] == '"': - newfile = newfile[1:-1] - # This implements cpp-like semantics for relative-path inclusion. - if isinstance(self.infile, str) and not os.path.isabs(newfile): - newfile = os.path.join(os.path.dirname(self.infile), newfile) - return (newfile, open(newfile, "r")) - - def error_leader(self, infile=None, lineno=None): - "Emit a C-compiler-like, Emacs-friendly error-message leader." - if infile is None: - infile = self.infile - if lineno is None: - lineno = self.lineno - return "\"%s\", line %d: " % (infile, lineno) - - def __iter__(self): - return self - - def __next__(self): - token = self.get_token() - if token == self.eof: - raise StopIteration - return token - -def split(s, comments=False, posix=True): - """Split the string *s* using shell-like syntax.""" - if s is None: - raise ValueError("s argument must not be None") - lex = shlex(s, posix=posix) - lex.whitespace_split = True - if not comments: - lex.commenters = '' - return list(lex) - - -def join(split_command): - """Return a shell-escaped string from *split_command*.""" - return ' '.join(quote(arg) for arg in split_command) - - -_find_unsafe = re.compile(r'[^\w@%+=:,./-]', re.ASCII).search - -def quote(s): - """Return a shell-escaped version of the string *s*.""" - if not s: - return "''" - if _find_unsafe(s) is None: - return s - - # use single quotes, and put single quotes into double quotes - # the string $'b is then quoted as '$'"'"'b' - return "'" + s.replace("'", "'\"'\"'") + "'" - - -def _print_tokens(lexer): - while tt := lexer.get_token(): - print("Token: " + repr(tt)) - -if __name__ == '__main__': - if len(sys.argv) == 1: - _print_tokens(shlex()) - else: - fn = sys.argv[1] - with open(fn) as f: - _print_tokens(shlex(f, fn)) diff --git a/Python313_13_x64_Template/Lib/shutil.py b/Python313_13_x64_Template/Lib/shutil.py deleted file mode 100644 index 7df97201..00000000 --- a/Python313_13_x64_Template/Lib/shutil.py +++ /dev/null @@ -1,1583 +0,0 @@ -"""Utility functions for copying and archiving files and directory trees. - -XXX The functions here don't copy the resource fork or other metadata on Mac. - -""" - -import os -import sys -import stat -import fnmatch -import collections -import errno - -try: - import zlib - del zlib - _ZLIB_SUPPORTED = True -except ImportError: - _ZLIB_SUPPORTED = False - -try: - import bz2 - del bz2 - _BZ2_SUPPORTED = True -except ImportError: - _BZ2_SUPPORTED = False - -try: - import lzma - del lzma - _LZMA_SUPPORTED = True -except ImportError: - _LZMA_SUPPORTED = False - -_WINDOWS = os.name == 'nt' -posix = nt = None -if os.name == 'posix': - import posix -elif _WINDOWS: - import nt - -if sys.platform == 'win32': - import _winapi -else: - _winapi = None - -COPY_BUFSIZE = 1024 * 1024 if _WINDOWS else 64 * 1024 -# This should never be removed, see rationale in: -# https://bugs.python.org/issue43743#msg393429 -_USE_CP_SENDFILE = (hasattr(os, "sendfile") - and sys.platform.startswith(("linux", "android"))) -_HAS_FCOPYFILE = posix and hasattr(posix, "_fcopyfile") # macOS - -# CMD defaults in Windows 10 -_WIN_DEFAULT_PATHEXT = ".COM;.EXE;.BAT;.CMD;.VBS;.JS;.WS;.MSC" - -__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", - "copytree", "move", "rmtree", "Error", "SpecialFileError", - "ExecError", "make_archive", "get_archive_formats", - "register_archive_format", "unregister_archive_format", - "get_unpack_formats", "register_unpack_format", - "unregister_unpack_format", "unpack_archive", - "ignore_patterns", "chown", "which", "get_terminal_size", - "SameFileError"] - # disk_usage is added later, if available on the platform - -class Error(OSError): - pass - -class SameFileError(Error): - """Raised when source and destination are the same file.""" - -class SpecialFileError(OSError): - """Raised when trying to do a kind of operation (e.g. copying) which is - not supported on a special file (e.g. a named pipe)""" - -class ExecError(OSError): - """Raised when a command could not be executed""" - -class ReadError(OSError): - """Raised when an archive cannot be read""" - -class RegistryError(Exception): - """Raised when a registry operation with the archiving - and unpacking registries fails""" - -class _GiveupOnFastCopy(Exception): - """Raised as a signal to fallback on using raw read()/write() - file copy when fast-copy functions fail to do so. - """ - -def _fastcopy_fcopyfile(fsrc, fdst, flags): - """Copy a regular file content or metadata by using high-performance - fcopyfile(3) syscall (macOS). - """ - try: - infd = fsrc.fileno() - outfd = fdst.fileno() - except Exception as err: - raise _GiveupOnFastCopy(err) # not a regular file - - try: - posix._fcopyfile(infd, outfd, flags) - except OSError as err: - err.filename = fsrc.name - err.filename2 = fdst.name - if err.errno in {errno.EINVAL, errno.ENOTSUP}: - raise _GiveupOnFastCopy(err) - else: - raise err from None - -def _fastcopy_sendfile(fsrc, fdst): - """Copy data from one regular mmap-like fd to another by using - high-performance sendfile(2) syscall. - This should work on Linux >= 2.6.33 only. - """ - # Note: copyfileobj() is left alone in order to not introduce any - # unexpected breakage. Possible risks by using zero-copy calls - # in copyfileobj() are: - # - fdst cannot be open in "a"(ppend) mode - # - fsrc and fdst may be open in "t"(ext) mode - # - fsrc may be a BufferedReader (which hides unread data in a buffer), - # GzipFile (which decompresses data), HTTPResponse (which decodes - # chunks). - # - possibly others (e.g. encrypted fs/partition?) - global _USE_CP_SENDFILE - try: - infd = fsrc.fileno() - outfd = fdst.fileno() - except Exception as err: - raise _GiveupOnFastCopy(err) # not a regular file - - # Hopefully the whole file will be copied in a single call. - # sendfile() is called in a loop 'till EOF is reached (0 return) - # so a bufsize smaller or bigger than the actual file size - # should not make any difference, also in case the file content - # changes while being copied. - try: - blocksize = max(os.fstat(infd).st_size, 2 ** 23) # min 8MiB - except OSError: - blocksize = 2 ** 27 # 128MiB - # On 32-bit architectures truncate to 1GiB to avoid OverflowError, - # see bpo-38319. - if sys.maxsize < 2 ** 32: - blocksize = min(blocksize, 2 ** 30) - - offset = 0 - while True: - try: - sent = os.sendfile(outfd, infd, offset, blocksize) - except OSError as err: - # ...in oder to have a more informative exception. - err.filename = fsrc.name - err.filename2 = fdst.name - - if err.errno == errno.ENOTSOCK: - # sendfile() on this platform (probably Linux < 2.6.33) - # does not support copies between regular files (only - # sockets). - _USE_CP_SENDFILE = False - raise _GiveupOnFastCopy(err) - - if err.errno == errno.ENOSPC: # filesystem is full - raise err from None - - # Give up on first call and if no data was copied. - if offset == 0 and os.lseek(outfd, 0, os.SEEK_CUR) == 0: - raise _GiveupOnFastCopy(err) - - raise err - else: - if sent == 0: - break # EOF - offset += sent - -def _copyfileobj_readinto(fsrc, fdst, length=COPY_BUFSIZE): - """readinto()/memoryview() based variant of copyfileobj(). - *fsrc* must support readinto() method and both files must be - open in binary mode. - """ - # Localize variable access to minimize overhead. - fsrc_readinto = fsrc.readinto - fdst_write = fdst.write - with memoryview(bytearray(length)) as mv: - while True: - n = fsrc_readinto(mv) - if not n: - break - elif n < length: - with mv[:n] as smv: - fdst_write(smv) - break - else: - fdst_write(mv) - -def copyfileobj(fsrc, fdst, length=0): - """copy data from file-like object fsrc to file-like object fdst""" - if not length: - length = COPY_BUFSIZE - # Localize variable access to minimize overhead. - fsrc_read = fsrc.read - fdst_write = fdst.write - while buf := fsrc_read(length): - fdst_write(buf) - -def _samefile(src, dst): - # Macintosh, Unix. - if isinstance(src, os.DirEntry) and hasattr(os.path, 'samestat'): - try: - return os.path.samestat(src.stat(), os.stat(dst)) - except OSError: - return False - - if hasattr(os.path, 'samefile'): - try: - return os.path.samefile(src, dst) - except OSError: - return False - - # All other platforms: check for same pathname. - return (os.path.normcase(os.path.abspath(src)) == - os.path.normcase(os.path.abspath(dst))) - -def _stat(fn): - return fn.stat() if isinstance(fn, os.DirEntry) else os.stat(fn) - -def _islink(fn): - return fn.is_symlink() if isinstance(fn, os.DirEntry) else os.path.islink(fn) - -def copyfile(src, dst, *, follow_symlinks=True): - """Copy data from src to dst in the most efficient way possible. - - If follow_symlinks is not set and src is a symbolic link, a new - symlink will be created instead of copying the file it points to. - - """ - sys.audit("shutil.copyfile", src, dst) - - if _samefile(src, dst): - raise SameFileError("{!r} and {!r} are the same file".format(src, dst)) - - file_size = 0 - for i, fn in enumerate([src, dst]): - try: - st = _stat(fn) - except OSError: - # File most likely does not exist - pass - else: - # XXX What about other special files? (sockets, devices...) - if stat.S_ISFIFO(st.st_mode): - fn = fn.path if isinstance(fn, os.DirEntry) else fn - raise SpecialFileError("`%s` is a named pipe" % fn) - if _WINDOWS and i == 0: - file_size = st.st_size - - if not follow_symlinks and _islink(src): - os.symlink(os.readlink(src), dst) - else: - with open(src, 'rb') as fsrc: - try: - with open(dst, 'wb') as fdst: - # macOS - if _HAS_FCOPYFILE: - try: - _fastcopy_fcopyfile(fsrc, fdst, posix._COPYFILE_DATA) - return dst - except _GiveupOnFastCopy: - pass - # Linux - elif _USE_CP_SENDFILE: - try: - _fastcopy_sendfile(fsrc, fdst) - return dst - except _GiveupOnFastCopy: - pass - # Windows, see: - # https://github.com/python/cpython/pull/7160#discussion_r195405230 - elif _WINDOWS and file_size > 0: - _copyfileobj_readinto(fsrc, fdst, min(file_size, COPY_BUFSIZE)) - return dst - - copyfileobj(fsrc, fdst) - - # Issue 43219, raise a less confusing exception - except IsADirectoryError as e: - if not os.path.exists(dst): - raise FileNotFoundError(f'Directory does not exist: {dst}') from e - else: - raise - - return dst - -def copymode(src, dst, *, follow_symlinks=True): - """Copy mode bits from src to dst. - - If follow_symlinks is not set, symlinks aren't followed if and only - if both `src` and `dst` are symlinks. If `lchmod` isn't available - (e.g. Linux) this method does nothing. - - """ - sys.audit("shutil.copymode", src, dst) - - if not follow_symlinks and _islink(src) and os.path.islink(dst): - if hasattr(os, 'lchmod'): - stat_func, chmod_func = os.lstat, os.lchmod - else: - return - else: - stat_func = _stat - if os.name == 'nt' and os.path.islink(dst): - def chmod_func(*args): - os.chmod(*args, follow_symlinks=True) - else: - chmod_func = os.chmod - - st = stat_func(src) - chmod_func(dst, stat.S_IMODE(st.st_mode)) - -if hasattr(os, 'listxattr'): - def _copyxattr(src, dst, *, follow_symlinks=True): - """Copy extended filesystem attributes from `src` to `dst`. - - Overwrite existing attributes. - - If `follow_symlinks` is false, symlinks won't be followed. - - """ - - try: - names = os.listxattr(src, follow_symlinks=follow_symlinks) - except OSError as e: - if e.errno not in (errno.ENOTSUP, errno.ENODATA, errno.EINVAL): - raise - return - for name in names: - try: - value = os.getxattr(src, name, follow_symlinks=follow_symlinks) - os.setxattr(dst, name, value, follow_symlinks=follow_symlinks) - except OSError as e: - if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA, - errno.EINVAL, errno.EACCES): - raise -else: - def _copyxattr(*args, **kwargs): - pass - -def copystat(src, dst, *, follow_symlinks=True): - """Copy file metadata - - Copy the permission bits, last access time, last modification time, and - flags from `src` to `dst`. On Linux, copystat() also copies the "extended - attributes" where possible. The file contents, owner, and group are - unaffected. `src` and `dst` are path-like objects or path names given as - strings. - - If the optional flag `follow_symlinks` is not set, symlinks aren't - followed if and only if both `src` and `dst` are symlinks. - """ - sys.audit("shutil.copystat", src, dst) - - def _nop(*args, ns=None, follow_symlinks=None): - pass - - # follow symlinks (aka don't not follow symlinks) - follow = follow_symlinks or not (_islink(src) and os.path.islink(dst)) - if follow: - # use the real function if it exists - def lookup(name): - return getattr(os, name, _nop) - else: - # use the real function only if it exists - # *and* it supports follow_symlinks - def lookup(name): - fn = getattr(os, name, _nop) - if fn in os.supports_follow_symlinks: - return fn - return _nop - - if isinstance(src, os.DirEntry): - st = src.stat(follow_symlinks=follow) - else: - st = lookup("stat")(src, follow_symlinks=follow) - mode = stat.S_IMODE(st.st_mode) - lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns), - follow_symlinks=follow) - # We must copy extended attributes before the file is (potentially) - # chmod()'ed read-only, otherwise setxattr() will error with -EACCES. - _copyxattr(src, dst, follow_symlinks=follow) - try: - lookup("chmod")(dst, mode, follow_symlinks=follow) - except NotImplementedError: - # if we got a NotImplementedError, it's because - # * follow_symlinks=False, - # * lchown() is unavailable, and - # * either - # * fchownat() is unavailable or - # * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW. - # (it returned ENOSUP.) - # therefore we're out of options--we simply cannot chown the - # symlink. give up, suppress the error. - # (which is what shutil always did in this circumstance.) - pass - if hasattr(st, 'st_flags'): - try: - lookup("chflags")(dst, st.st_flags, follow_symlinks=follow) - except OSError as why: - for err in 'EOPNOTSUPP', 'ENOTSUP': - if hasattr(errno, err) and why.errno == getattr(errno, err): - break - else: - raise - -def copy(src, dst, *, follow_symlinks=True): - """Copy data and mode bits ("cp src dst"). Return the file's destination. - - The destination may be a directory. - - If follow_symlinks is false, symlinks won't be followed. This - resembles GNU's "cp -P src dst". - - If source and destination are the same file, a SameFileError will be - raised. - - """ - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - copyfile(src, dst, follow_symlinks=follow_symlinks) - copymode(src, dst, follow_symlinks=follow_symlinks) - return dst - -def copy2(src, dst, *, follow_symlinks=True): - """Copy data and metadata. Return the file's destination. - - Metadata is copied with copystat(). Please see the copystat function - for more information. - - The destination may be a directory. - - If follow_symlinks is false, symlinks won't be followed. This - resembles GNU's "cp -P src dst". - """ - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - - if hasattr(_winapi, "CopyFile2"): - src_ = os.fsdecode(src) - dst_ = os.fsdecode(dst) - flags = _winapi.COPY_FILE_ALLOW_DECRYPTED_DESTINATION # for compat - if not follow_symlinks: - flags |= _winapi.COPY_FILE_COPY_SYMLINK - try: - _winapi.CopyFile2(src_, dst_, flags) - return dst - except OSError as exc: - if (exc.winerror == _winapi.ERROR_PRIVILEGE_NOT_HELD - and not follow_symlinks): - # Likely encountered a symlink we aren't allowed to create. - # Fall back on the old code - pass - elif exc.winerror == _winapi.ERROR_ACCESS_DENIED: - # Possibly encountered a hidden or readonly file we can't - # overwrite. Fall back on old code - pass - else: - raise - - copyfile(src, dst, follow_symlinks=follow_symlinks) - copystat(src, dst, follow_symlinks=follow_symlinks) - return dst - -def ignore_patterns(*patterns): - """Function that can be used as copytree() ignore parameter. - - Patterns is a sequence of glob-style patterns - that are used to exclude files""" - def _ignore_patterns(path, names): - ignored_names = [] - for pattern in patterns: - ignored_names.extend(fnmatch.filter(names, pattern)) - return set(ignored_names) - return _ignore_patterns - -def _copytree(entries, src, dst, symlinks, ignore, copy_function, - ignore_dangling_symlinks, dirs_exist_ok=False): - if ignore is not None: - ignored_names = ignore(os.fspath(src), [x.name for x in entries]) - else: - ignored_names = () - - os.makedirs(dst, exist_ok=dirs_exist_ok) - errors = [] - use_srcentry = copy_function is copy2 or copy_function is copy - - for srcentry in entries: - if srcentry.name in ignored_names: - continue - srcname = os.path.join(src, srcentry.name) - dstname = os.path.join(dst, srcentry.name) - srcobj = srcentry if use_srcentry else srcname - try: - is_symlink = srcentry.is_symlink() - if is_symlink and os.name == 'nt': - # Special check for directory junctions, which appear as - # symlinks but we want to recurse. - lstat = srcentry.stat(follow_symlinks=False) - if lstat.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT: - is_symlink = False - if is_symlink: - linkto = os.readlink(srcname) - if symlinks: - # We can't just leave it to `copy_function` because legacy - # code with a custom `copy_function` may rely on copytree - # doing the right thing. - os.symlink(linkto, dstname) - copystat(srcobj, dstname, follow_symlinks=not symlinks) - else: - # ignore dangling symlink if the flag is on - if not os.path.exists(linkto) and ignore_dangling_symlinks: - continue - # otherwise let the copy occur. copy2 will raise an error - if srcentry.is_dir(): - copytree(srcobj, dstname, symlinks, ignore, - copy_function, ignore_dangling_symlinks, - dirs_exist_ok) - else: - copy_function(srcobj, dstname) - elif srcentry.is_dir(): - copytree(srcobj, dstname, symlinks, ignore, copy_function, - ignore_dangling_symlinks, dirs_exist_ok) - else: - # Will raise a SpecialFileError for unsupported file types - copy_function(srcobj, dstname) - # catch the Error from the recursive copytree so that we can - # continue with other files - except Error as err: - errors.extend(err.args[0]) - except OSError as why: - errors.append((srcname, dstname, str(why))) - try: - copystat(src, dst) - except OSError as why: - # Copying file access times may fail on Windows - if getattr(why, 'winerror', None) is None: - errors.append((src, dst, str(why))) - if errors: - raise Error(errors) - return dst - -def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, - ignore_dangling_symlinks=False, dirs_exist_ok=False): - """Recursively copy a directory tree and return the destination directory. - - If exception(s) occur, an Error is raised with a list of reasons. - - If the optional symlinks flag is true, symbolic links in the - source tree result in symbolic links in the destination tree; if - it is false, the contents of the files pointed to by symbolic - links are copied. If the file pointed to by the symlink doesn't - exist, an exception will be added in the list of errors raised in - an Error exception at the end of the copy process. - - You can set the optional ignore_dangling_symlinks flag to true if you - want to silence this exception. Notice that this has no effect on - platforms that don't support os.symlink. - - The optional ignore argument is a callable. If given, it - is called with the `src` parameter, which is the directory - being visited by copytree(), and `names` which is the list of - `src` contents, as returned by os.listdir(): - - callable(src, names) -> ignored_names - - Since copytree() is called recursively, the callable will be - called once for each directory that is copied. It returns a - list of names relative to the `src` directory that should - not be copied. - - The optional copy_function argument is a callable that will be used - to copy each file. It will be called with the source path and the - destination path as arguments. By default, copy2() is used, but any - function that supports the same signature (like copy()) can be used. - - If dirs_exist_ok is false (the default) and `dst` already exists, a - `FileExistsError` is raised. If `dirs_exist_ok` is true, the copying - operation will continue if it encounters existing directories, and files - within the `dst` tree will be overwritten by corresponding files from the - `src` tree. - """ - sys.audit("shutil.copytree", src, dst) - with os.scandir(src) as itr: - entries = list(itr) - return _copytree(entries=entries, src=src, dst=dst, symlinks=symlinks, - ignore=ignore, copy_function=copy_function, - ignore_dangling_symlinks=ignore_dangling_symlinks, - dirs_exist_ok=dirs_exist_ok) - -if hasattr(os.stat_result, 'st_file_attributes'): - def _rmtree_islink(st): - return (stat.S_ISLNK(st.st_mode) or - (st.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT - and st.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT)) -else: - def _rmtree_islink(st): - return stat.S_ISLNK(st.st_mode) - -# version vulnerable to race conditions -def _rmtree_unsafe(path, onexc): - def onerror(err): - if not isinstance(err, FileNotFoundError): - onexc(os.scandir, err.filename, err) - results = os.walk(path, topdown=False, onerror=onerror, followlinks=os._walk_symlinks_as_files) - for dirpath, dirnames, filenames in results: - for name in dirnames: - fullname = os.path.join(dirpath, name) - try: - os.rmdir(fullname) - except FileNotFoundError: - continue - except OSError as err: - onexc(os.rmdir, fullname, err) - for name in filenames: - fullname = os.path.join(dirpath, name) - try: - os.unlink(fullname) - except FileNotFoundError: - continue - except OSError as err: - onexc(os.unlink, fullname, err) - try: - os.rmdir(path) - except FileNotFoundError: - pass - except OSError as err: - onexc(os.rmdir, path, err) - -# Version using fd-based APIs to protect against races -def _rmtree_safe_fd(stack, onexc): - # Each stack item has four elements: - # * func: The first operation to perform: os.lstat, os.close or os.rmdir. - # Walking a directory starts with an os.lstat() to detect symlinks; in - # this case, func is updated before subsequent operations and passed to - # onexc() if an error occurs. - # * dirfd: Open file descriptor, or None if we're processing the top-level - # directory given to rmtree() and the user didn't supply dir_fd. - # * path: Path of file to operate upon. This is passed to onexc() if an - # error occurs. - # * orig_entry: os.DirEntry, or None if we're processing the top-level - # directory given to rmtree(). We used the cached stat() of the entry to - # save a call to os.lstat() when walking subdirectories. - func, dirfd, path, orig_entry = stack.pop() - name = path if orig_entry is None else orig_entry.name - try: - if func is os.close: - os.close(dirfd) - return - if func is os.rmdir: - os.rmdir(name, dir_fd=dirfd) - return - - # Note: To guard against symlink races, we use the standard - # lstat()/open()/fstat() trick. - assert func is os.lstat - if orig_entry is None: - orig_st = os.lstat(name, dir_fd=dirfd) - else: - orig_st = orig_entry.stat(follow_symlinks=False) - - func = os.open # For error reporting. - topfd = os.open(name, os.O_RDONLY | os.O_NONBLOCK, dir_fd=dirfd) - - func = os.path.islink # For error reporting. - try: - if not os.path.samestat(orig_st, os.fstat(topfd)): - # Symlinks to directories are forbidden, see GH-46010. - raise OSError("Cannot call rmtree on a symbolic link") - stack.append((os.rmdir, dirfd, path, orig_entry)) - finally: - stack.append((os.close, topfd, path, orig_entry)) - - func = os.scandir # For error reporting. - with os.scandir(topfd) as scandir_it: - entries = list(scandir_it) - for entry in entries: - fullname = os.path.join(path, entry.name) - try: - if entry.is_dir(follow_symlinks=False): - # Traverse into sub-directory. - stack.append((os.lstat, topfd, fullname, entry)) - continue - except FileNotFoundError: - continue - except OSError: - pass - try: - os.unlink(entry.name, dir_fd=topfd) - except FileNotFoundError: - continue - except OSError as err: - onexc(os.unlink, fullname, err) - except FileNotFoundError as err: - if orig_entry is None or func is os.close: - err.filename = path - onexc(func, path, err) - except OSError as err: - err.filename = path - onexc(func, path, err) - -_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <= - os.supports_dir_fd and - os.scandir in os.supports_fd and - os.stat in os.supports_follow_symlinks) - -def rmtree(path, ignore_errors=False, onerror=None, *, onexc=None, dir_fd=None): - """Recursively delete a directory tree. - - If dir_fd is not None, it should be a file descriptor open to a directory; - path will then be relative to that directory. - dir_fd may not be implemented on your platform. - If it is unavailable, using it will raise a NotImplementedError. - - If ignore_errors is set, errors are ignored; otherwise, if onexc or - onerror is set, it is called to handle the error with arguments (func, - path, exc_info) where func is platform and implementation dependent; - path is the argument to that function that caused it to fail; and - the value of exc_info describes the exception. For onexc it is the - exception instance, and for onerror it is a tuple as returned by - sys.exc_info(). If ignore_errors is false and both onexc and - onerror are None, the exception is reraised. - - onerror is deprecated and only remains for backwards compatibility. - If both onerror and onexc are set, onerror is ignored and onexc is used. - """ - - sys.audit("shutil.rmtree", path, dir_fd) - if ignore_errors: - def onexc(*args): - pass - elif onerror is None and onexc is None: - def onexc(*args): - raise - elif onexc is None: - if onerror is None: - def onexc(*args): - raise - else: - # delegate to onerror - def onexc(*args): - func, path, exc = args - if exc is None: - exc_info = None, None, None - else: - exc_info = type(exc), exc, exc.__traceback__ - return onerror(func, path, exc_info) - - if _use_fd_functions: - # While the unsafe rmtree works fine on bytes, the fd based does not. - if isinstance(path, bytes): - path = os.fsdecode(path) - stack = [(os.lstat, dir_fd, path, None)] - try: - while stack: - _rmtree_safe_fd(stack, onexc) - finally: - # Close any file descriptors still on the stack. - while stack: - func, fd, path, entry = stack.pop() - if func is not os.close: - continue - try: - os.close(fd) - except OSError as err: - onexc(os.close, path, err) - else: - if dir_fd is not None: - raise NotImplementedError("dir_fd unavailable on this platform") - try: - st = os.lstat(path) - except OSError as err: - onexc(os.lstat, path, err) - return - try: - if _rmtree_islink(st): - # symlinks to directories are forbidden, see bug #1669 - raise OSError("Cannot call rmtree on a symbolic link") - except OSError as err: - onexc(os.path.islink, path, err) - # can't continue even if onexc hook returns - return - return _rmtree_unsafe(path, onexc) - -# Allow introspection of whether or not the hardening against symlink -# attacks is supported on the current platform -rmtree.avoids_symlink_attacks = _use_fd_functions - -def _basename(path): - """A basename() variant which first strips the trailing slash, if present. - Thus we always get the last component of the path, even for directories. - - path: Union[PathLike, str] - - e.g. - >>> os.path.basename('/bar/foo') - 'foo' - >>> os.path.basename('/bar/foo/') - '' - >>> _basename('/bar/foo/') - 'foo' - """ - path = os.fspath(path) - sep = os.path.sep + (os.path.altsep or '') - return os.path.basename(path.rstrip(sep)) - -def move(src, dst, copy_function=copy2): - """Recursively move a file or directory to another location. This is - similar to the Unix "mv" command. Return the file or directory's - destination. - - If dst is an existing directory or a symlink to a directory, then src is - moved inside that directory. The destination path in that directory must - not already exist. - - If dst already exists but is not a directory, it may be overwritten - depending on os.rename() semantics. - - If the destination is on our current filesystem, then rename() is used. - Otherwise, src is copied to the destination and then removed. Symlinks are - recreated under the new name if os.rename() fails because of cross - filesystem renames. - - The optional `copy_function` argument is a callable that will be used - to copy the source or it will be delegated to `copytree`. - By default, copy2() is used, but any function that supports the same - signature (like copy()) can be used. - - A lot more could be done here... A look at a mv.c shows a lot of - the issues this implementation glosses over. - - """ - sys.audit("shutil.move", src, dst) - real_dst = dst - if os.path.isdir(dst): - if _samefile(src, dst) and not os.path.islink(src): - # We might be on a case insensitive filesystem, - # perform the rename anyway. - os.rename(src, dst) - return - - # Using _basename instead of os.path.basename is important, as we must - # ignore any trailing slash to avoid the basename returning '' - real_dst = os.path.join(dst, _basename(src)) - - if os.path.exists(real_dst): - raise Error("Destination path '%s' already exists" % real_dst) - try: - os.rename(src, real_dst) - except OSError: - if os.path.islink(src): - linkto = os.readlink(src) - os.symlink(linkto, real_dst) - os.unlink(src) - elif os.path.isdir(src): - if _destinsrc(src, dst): - raise Error("Cannot move a directory '%s' into itself" - " '%s'." % (src, dst)) - if (_is_immutable(src) - or (not os.access(src, os.W_OK) and os.listdir(src) - and sys.platform == 'darwin')): - raise PermissionError("Cannot move the non-empty directory " - "'%s': Lacking write permission to '%s'." - % (src, src)) - copytree(src, real_dst, copy_function=copy_function, - symlinks=True) - rmtree(src) - else: - copy_function(src, real_dst) - os.unlink(src) - return real_dst - -def _destinsrc(src, dst): - src = os.path.abspath(src) - dst = os.path.abspath(dst) - if not src.endswith(os.path.sep): - src += os.path.sep - if not dst.endswith(os.path.sep): - dst += os.path.sep - return dst.startswith(src) - -def _is_immutable(src): - st = _stat(src) - immutable_states = [stat.UF_IMMUTABLE, stat.SF_IMMUTABLE] - return hasattr(st, 'st_flags') and st.st_flags in immutable_states - -def _get_gid(name): - """Returns a gid, given a group name.""" - if name is None: - return None - - try: - from grp import getgrnam - except ImportError: - return None - - try: - result = getgrnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def _get_uid(name): - """Returns an uid, given a user name.""" - if name is None: - return None - - try: - from pwd import getpwnam - except ImportError: - return None - - try: - result = getpwnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, - owner=None, group=None, logger=None, root_dir=None): - """Create a (possibly compressed) tar file from all the files under - 'base_dir'. - - 'compress' must be "gzip" (the default), "bzip2", "xz", or None. - - 'owner' and 'group' can be used to define an owner and a group for the - archive that is being built. If not provided, the current owner and group - will be used. - - The output tar file will be named 'base_name' + ".tar", possibly plus - the appropriate compression extension (".gz", ".bz2", or ".xz"). - - Returns the output filename. - """ - if compress is None: - tar_compression = '' - elif _ZLIB_SUPPORTED and compress == 'gzip': - tar_compression = 'gz' - elif _BZ2_SUPPORTED and compress == 'bzip2': - tar_compression = 'bz2' - elif _LZMA_SUPPORTED and compress == 'xz': - tar_compression = 'xz' - else: - raise ValueError("bad value for 'compress', or compression format not " - "supported : {0}".format(compress)) - - import tarfile # late import for breaking circular dependency - - compress_ext = '.' + tar_compression if compress else '' - archive_name = base_name + '.tar' + compress_ext - archive_dir = os.path.dirname(archive_name) - - if archive_dir and not os.path.exists(archive_dir): - if logger is not None: - logger.info("creating %s", archive_dir) - if not dry_run: - os.makedirs(archive_dir) - - # creating the tarball - if logger is not None: - logger.info('Creating tar archive') - - uid = _get_uid(owner) - gid = _get_gid(group) - - def _set_uid_gid(tarinfo): - if gid is not None: - tarinfo.gid = gid - tarinfo.gname = group - if uid is not None: - tarinfo.uid = uid - tarinfo.uname = owner - return tarinfo - - if not dry_run: - tar = tarfile.open(archive_name, 'w|%s' % tar_compression) - arcname = base_dir - if root_dir is not None: - base_dir = os.path.join(root_dir, base_dir) - try: - tar.add(base_dir, arcname, filter=_set_uid_gid) - finally: - tar.close() - - if root_dir is not None: - archive_name = os.path.abspath(archive_name) - return archive_name - -def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, - logger=None, owner=None, group=None, root_dir=None): - """Create a zip file from all the files under 'base_dir'. - - The output zip file will be named 'base_name' + ".zip". Returns the - name of the output zip file. - """ - import zipfile # late import for breaking circular dependency - - zip_filename = base_name + ".zip" - archive_dir = os.path.dirname(base_name) - - if archive_dir and not os.path.exists(archive_dir): - if logger is not None: - logger.info("creating %s", archive_dir) - if not dry_run: - os.makedirs(archive_dir) - - if logger is not None: - logger.info("creating '%s' and adding '%s' to it", - zip_filename, base_dir) - - if not dry_run: - with zipfile.ZipFile(zip_filename, "w", - compression=zipfile.ZIP_DEFLATED) as zf: - arcname = os.path.normpath(base_dir) - if root_dir is not None: - base_dir = os.path.join(root_dir, base_dir) - base_dir = os.path.normpath(base_dir) - if arcname != os.curdir: - zf.write(base_dir, arcname) - if logger is not None: - logger.info("adding '%s'", base_dir) - for dirpath, dirnames, filenames in os.walk(base_dir): - arcdirpath = dirpath - if root_dir is not None: - arcdirpath = os.path.relpath(arcdirpath, root_dir) - arcdirpath = os.path.normpath(arcdirpath) - for name in sorted(dirnames): - path = os.path.join(dirpath, name) - arcname = os.path.join(arcdirpath, name) - zf.write(path, arcname) - if logger is not None: - logger.info("adding '%s'", path) - for name in filenames: - path = os.path.join(dirpath, name) - path = os.path.normpath(path) - if os.path.isfile(path): - arcname = os.path.join(arcdirpath, name) - zf.write(path, arcname) - if logger is not None: - logger.info("adding '%s'", path) - - if root_dir is not None: - zip_filename = os.path.abspath(zip_filename) - return zip_filename - -_make_tarball.supports_root_dir = True -_make_zipfile.supports_root_dir = True - -# Maps the name of the archive format to a tuple containing: -# * the archiving function -# * extra keyword arguments -# * description -_ARCHIVE_FORMATS = { - 'tar': (_make_tarball, [('compress', None)], - "uncompressed tar file"), -} - -if _ZLIB_SUPPORTED: - _ARCHIVE_FORMATS['gztar'] = (_make_tarball, [('compress', 'gzip')], - "gzip'ed tar-file") - _ARCHIVE_FORMATS['zip'] = (_make_zipfile, [], "ZIP file") - -if _BZ2_SUPPORTED: - _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], - "bzip2'ed tar-file") - -if _LZMA_SUPPORTED: - _ARCHIVE_FORMATS['xztar'] = (_make_tarball, [('compress', 'xz')], - "xz'ed tar-file") - -def get_archive_formats(): - """Returns a list of supported formats for archiving and unarchiving. - - Each element of the returned sequence is a tuple (name, description) - """ - formats = [(name, registry[2]) for name, registry in - _ARCHIVE_FORMATS.items()] - formats.sort() - return formats - -def register_archive_format(name, function, extra_args=None, description=''): - """Registers an archive format. - - name is the name of the format. function is the callable that will be - used to create archives. If provided, extra_args is a sequence of - (name, value) tuples that will be passed as arguments to the callable. - description can be provided to describe the format, and will be returned - by the get_archive_formats() function. - """ - if extra_args is None: - extra_args = [] - if not callable(function): - raise TypeError('The %s object is not callable' % function) - if not isinstance(extra_args, (tuple, list)): - raise TypeError('extra_args needs to be a sequence') - for element in extra_args: - if not isinstance(element, (tuple, list)) or len(element) !=2: - raise TypeError('extra_args elements are : (arg_name, value)') - - _ARCHIVE_FORMATS[name] = (function, extra_args, description) - -def unregister_archive_format(name): - del _ARCHIVE_FORMATS[name] - -def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, - dry_run=0, owner=None, group=None, logger=None): - """Create an archive file (eg. zip or tar). - - 'base_name' is the name of the file to create, minus any format-specific - extension; 'format' is the archive format: one of "zip", "tar", "gztar", - "bztar", or "xztar". Or any other registered format. - - 'root_dir' is a directory that will be the root directory of the - archive; ie. we typically chdir into 'root_dir' before creating the - archive. 'base_dir' is the directory where we start archiving from; - ie. 'base_dir' will be the common prefix of all files and - directories in the archive. 'root_dir' and 'base_dir' both default - to the current directory. Returns the name of the archive file. - - 'owner' and 'group' are used when creating a tar archive. By default, - uses the current owner and group. - """ - sys.audit("shutil.make_archive", base_name, format, root_dir, base_dir) - try: - format_info = _ARCHIVE_FORMATS[format] - except KeyError: - raise ValueError("unknown archive format '%s'" % format) from None - - kwargs = {'dry_run': dry_run, 'logger': logger, - 'owner': owner, 'group': group} - - func = format_info[0] - for arg, val in format_info[1]: - kwargs[arg] = val - - if base_dir is None: - base_dir = os.curdir - - supports_root_dir = getattr(func, 'supports_root_dir', False) - save_cwd = None - if root_dir is not None: - stmd = os.stat(root_dir).st_mode - if not stat.S_ISDIR(stmd): - raise NotADirectoryError(errno.ENOTDIR, 'Not a directory', root_dir) - - if supports_root_dir: - # Support path-like base_name here for backwards-compatibility. - base_name = os.fspath(base_name) - kwargs['root_dir'] = root_dir - else: - save_cwd = os.getcwd() - if logger is not None: - logger.debug("changing into '%s'", root_dir) - base_name = os.path.abspath(base_name) - if not dry_run: - os.chdir(root_dir) - - try: - filename = func(base_name, base_dir, **kwargs) - finally: - if save_cwd is not None: - if logger is not None: - logger.debug("changing back to '%s'", save_cwd) - os.chdir(save_cwd) - - return filename - - -def get_unpack_formats(): - """Returns a list of supported formats for unpacking. - - Each element of the returned sequence is a tuple - (name, extensions, description) - """ - formats = [(name, info[0], info[3]) for name, info in - _UNPACK_FORMATS.items()] - formats.sort() - return formats - -def _check_unpack_options(extensions, function, extra_args): - """Checks what gets registered as an unpacker.""" - # first make sure no other unpacker is registered for this extension - existing_extensions = {} - for name, info in _UNPACK_FORMATS.items(): - for ext in info[0]: - existing_extensions[ext] = name - - for extension in extensions: - if extension in existing_extensions: - msg = '%s is already registered for "%s"' - raise RegistryError(msg % (extension, - existing_extensions[extension])) - - if not callable(function): - raise TypeError('The registered function must be a callable') - - -def register_unpack_format(name, extensions, function, extra_args=None, - description=''): - """Registers an unpack format. - - `name` is the name of the format. `extensions` is a list of extensions - corresponding to the format. - - `function` is the callable that will be - used to unpack archives. The callable will receive archives to unpack. - If it's unable to handle an archive, it needs to raise a ReadError - exception. - - If provided, `extra_args` is a sequence of - (name, value) tuples that will be passed as arguments to the callable. - description can be provided to describe the format, and will be returned - by the get_unpack_formats() function. - """ - if extra_args is None: - extra_args = [] - _check_unpack_options(extensions, function, extra_args) - _UNPACK_FORMATS[name] = extensions, function, extra_args, description - -def unregister_unpack_format(name): - """Removes the pack format from the registry.""" - del _UNPACK_FORMATS[name] - -def _ensure_directory(path): - """Ensure that the parent directory of `path` exists""" - dirname = os.path.dirname(path) - if not os.path.isdir(dirname): - os.makedirs(dirname) - -def _unpack_zipfile(filename, extract_dir): - """Unpack zip `filename` to `extract_dir` - """ - import zipfile # late import for breaking circular dependency - - if not zipfile.is_zipfile(filename): - raise ReadError("%s is not a zip file" % filename) - - zip = zipfile.ZipFile(filename) - try: - for info in zip.infolist(): - name = info.filename - - # don't extract absolute paths or ones with .. in them - if name.startswith('/') or '..' in name: - continue - - targetpath = os.path.join(extract_dir, *name.split('/')) - if not targetpath: - continue - - _ensure_directory(targetpath) - if not name.endswith('/'): - # file - with zip.open(name, 'r') as source, \ - open(targetpath, 'wb') as target: - copyfileobj(source, target) - finally: - zip.close() - -def _unpack_tarfile(filename, extract_dir, *, filter=None): - """Unpack tar/tar.gz/tar.bz2/tar.xz `filename` to `extract_dir` - """ - import tarfile # late import for breaking circular dependency - try: - tarobj = tarfile.open(filename) - except tarfile.TarError: - raise ReadError( - "%s is not a compressed or uncompressed tar file" % filename) - try: - tarobj.extractall(extract_dir, filter=filter) - finally: - tarobj.close() - -# Maps the name of the unpack format to a tuple containing: -# * extensions -# * the unpacking function -# * extra keyword arguments -# * description -_UNPACK_FORMATS = { - 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), - 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file"), -} - -if _ZLIB_SUPPORTED: - _UNPACK_FORMATS['gztar'] = (['.tar.gz', '.tgz'], _unpack_tarfile, [], - "gzip'ed tar-file") - -if _BZ2_SUPPORTED: - _UNPACK_FORMATS['bztar'] = (['.tar.bz2', '.tbz2'], _unpack_tarfile, [], - "bzip2'ed tar-file") - -if _LZMA_SUPPORTED: - _UNPACK_FORMATS['xztar'] = (['.tar.xz', '.txz'], _unpack_tarfile, [], - "xz'ed tar-file") - -def _find_unpack_format(filename): - for name, info in _UNPACK_FORMATS.items(): - for extension in info[0]: - if filename.endswith(extension): - return name - return None - -def unpack_archive(filename, extract_dir=None, format=None, *, filter=None): - """Unpack an archive. - - `filename` is the name of the archive. - - `extract_dir` is the name of the target directory, where the archive - is unpacked. If not provided, the current working directory is used. - - `format` is the archive format: one of "zip", "tar", "gztar", "bztar", - or "xztar". Or any other registered format. If not provided, - unpack_archive will use the filename extension and see if an unpacker - was registered for that extension. - - In case none is found, a ValueError is raised. - - If `filter` is given, it is passed to the underlying - extraction function. - """ - sys.audit("shutil.unpack_archive", filename, extract_dir, format) - - if extract_dir is None: - extract_dir = os.getcwd() - - extract_dir = os.fspath(extract_dir) - filename = os.fspath(filename) - - if filter is None: - filter_kwargs = {} - else: - filter_kwargs = {'filter': filter} - if format is not None: - try: - format_info = _UNPACK_FORMATS[format] - except KeyError: - raise ValueError("Unknown unpack format '{0}'".format(format)) from None - - func = format_info[1] - func(filename, extract_dir, **dict(format_info[2]), **filter_kwargs) - else: - # we need to look at the registered unpackers supported extensions - format = _find_unpack_format(filename) - if format is None: - raise ReadError("Unknown archive format '{0}'".format(filename)) - - func = _UNPACK_FORMATS[format][1] - kwargs = dict(_UNPACK_FORMATS[format][2]) | filter_kwargs - func(filename, extract_dir, **kwargs) - - -if hasattr(os, 'statvfs'): - - __all__.append('disk_usage') - _ntuple_diskusage = collections.namedtuple('usage', 'total used free') - _ntuple_diskusage.total.__doc__ = 'Total space in bytes' - _ntuple_diskusage.used.__doc__ = 'Used space in bytes' - _ntuple_diskusage.free.__doc__ = 'Free space in bytes' - - def disk_usage(path): - """Return disk usage statistics about the given path. - - Returned value is a named tuple with attributes 'total', 'used' and - 'free', which are the amount of total, used and free space, in bytes. - """ - st = os.statvfs(path) - free = st.f_bavail * st.f_frsize - total = st.f_blocks * st.f_frsize - used = (st.f_blocks - st.f_bfree) * st.f_frsize - return _ntuple_diskusage(total, used, free) - -elif _WINDOWS: - - __all__.append('disk_usage') - _ntuple_diskusage = collections.namedtuple('usage', 'total used free') - - def disk_usage(path): - """Return disk usage statistics about the given path. - - Returned values is a named tuple with attributes 'total', 'used' and - 'free', which are the amount of total, used and free space, in bytes. - """ - total, free = nt._getdiskusage(path) - used = total - free - return _ntuple_diskusage(total, used, free) - - -def chown(path, user=None, group=None, *, dir_fd=None, follow_symlinks=True): - """Change owner user and group of the given path. - - user and group can be the uid/gid or the user/group names, and in that case, - they are converted to their respective uid/gid. - - If dir_fd is set, it should be an open file descriptor to the directory to - be used as the root of *path* if it is relative. - - If follow_symlinks is set to False and the last element of the path is a - symbolic link, chown will modify the link itself and not the file being - referenced by the link. - """ - sys.audit('shutil.chown', path, user, group) - - if user is None and group is None: - raise ValueError("user and/or group must be set") - - _user = user - _group = group - - # -1 means don't change it - if user is None: - _user = -1 - # user can either be an int (the uid) or a string (the system username) - elif isinstance(user, str): - _user = _get_uid(user) - if _user is None: - raise LookupError("no such user: {!r}".format(user)) - - if group is None: - _group = -1 - elif not isinstance(group, int): - _group = _get_gid(group) - if _group is None: - raise LookupError("no such group: {!r}".format(group)) - - os.chown(path, _user, _group, dir_fd=dir_fd, - follow_symlinks=follow_symlinks) - -def get_terminal_size(fallback=(80, 24)): - """Get the size of the terminal window. - - For each of the two dimensions, the environment variable, COLUMNS - and LINES respectively, is checked. If the variable is defined and - the value is a positive integer, it is used. - - When COLUMNS or LINES is not defined, which is the common case, - the terminal connected to sys.__stdout__ is queried - by invoking os.get_terminal_size. - - If the terminal size cannot be successfully queried, either because - the system doesn't support querying, or because we are not - connected to a terminal, the value given in fallback parameter - is used. Fallback defaults to (80, 24) which is the default - size used by many terminal emulators. - - The value returned is a named tuple of type os.terminal_size. - """ - # columns, lines are the working values - try: - columns = int(os.environ['COLUMNS']) - except (KeyError, ValueError): - columns = 0 - - try: - lines = int(os.environ['LINES']) - except (KeyError, ValueError): - lines = 0 - - # only query if necessary - if columns <= 0 or lines <= 0: - try: - size = os.get_terminal_size(sys.__stdout__.fileno()) - except (AttributeError, ValueError, OSError): - # stdout is None, closed, detached, or not a terminal, or - # os.get_terminal_size() is unsupported - size = os.terminal_size(fallback) - if columns <= 0: - columns = size.columns or fallback[0] - if lines <= 0: - lines = size.lines or fallback[1] - - return os.terminal_size((columns, lines)) - - -# Check that a given file can be accessed with the correct mode. -# Additionally check that `file` is not a directory, as on Windows -# directories pass the os.access check. -def _access_check(fn, mode): - return (os.path.exists(fn) and os.access(fn, mode) - and not os.path.isdir(fn)) - - -def _win_path_needs_curdir(cmd, mode): - """ - On Windows, we can use NeedCurrentDirectoryForExePath to figure out - if we should add the cwd to PATH when searching for executables if - the mode is executable. - """ - return (not (mode & os.X_OK)) or _winapi.NeedCurrentDirectoryForExePath( - os.fsdecode(cmd)) - - -def which(cmd, mode=os.F_OK | os.X_OK, path=None): - """Given a command, mode, and a PATH string, return the path which - conforms to the given mode on the PATH, or None if there is no such - file. - - `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result - of os.environ.get("PATH"), or can be overridden with a custom search - path. - - """ - use_bytes = isinstance(cmd, bytes) - - # If we're given a path with a directory part, look it up directly rather - # than referring to PATH directories. This includes checking relative to - # the current directory, e.g. ./script - dirname, cmd = os.path.split(cmd) - if dirname: - path = [dirname] - else: - if path is None: - path = os.environ.get("PATH", None) - if path is None: - try: - path = os.confstr("CS_PATH") - except (AttributeError, ValueError): - # os.confstr() or CS_PATH is not available - path = os.defpath - # bpo-35755: Don't use os.defpath if the PATH environment variable - # is set to an empty string - - # PATH='' doesn't match, whereas PATH=':' looks in the current - # directory - if not path: - return None - - if use_bytes: - path = os.fsencode(path) - path = path.split(os.fsencode(os.pathsep)) - else: - path = os.fsdecode(path) - path = path.split(os.pathsep) - - if sys.platform == "win32" and _win_path_needs_curdir(cmd, mode): - curdir = os.curdir - if use_bytes: - curdir = os.fsencode(curdir) - path.insert(0, curdir) - - if sys.platform == "win32": - # PATHEXT is necessary to check on Windows. - pathext_source = os.getenv("PATHEXT") or _WIN_DEFAULT_PATHEXT - pathext = pathext_source.split(os.pathsep) - pathext = [ext.rstrip('.') for ext in pathext if ext] - - if use_bytes: - pathext = [os.fsencode(ext) for ext in pathext] - - files = [cmd + ext for ext in pathext] - - # If X_OK in mode, simulate the cmd.exe behavior: look at direct - # match if and only if the extension is in PATHEXT. - # If X_OK not in mode, simulate the first result of where.exe: - # always look at direct match before a PATHEXT match. - normcmd = cmd.upper() - if not (mode & os.X_OK) or any(normcmd.endswith(ext.upper()) for ext in pathext): - files.insert(0, cmd) - else: - # On other platforms you don't have things like PATHEXT to tell you - # what file suffixes are executable, so just pass on cmd as-is. - files = [cmd] - - seen = set() - for dir in path: - normdir = os.path.normcase(dir) - if normdir not in seen: - seen.add(normdir) - for thefile in files: - name = os.path.join(dir, thefile) - if _access_check(name, mode): - return name - return None diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/RECORD b/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/RECORD deleted file mode 100644 index c9e29f25..00000000 --- a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/RECORD +++ /dev/null @@ -1,878 +0,0 @@ -../../Scripts/pip.exe,sha256=DtNbXKx9bv_yOpRqtGHrJRruaE2YN145nzYE9ZxGdCc,108328 -../../Scripts/pip3.13.exe,sha256=DtNbXKx9bv_yOpRqtGHrJRruaE2YN145nzYE9ZxGdCc,108328 -../../Scripts/pip3.exe,sha256=DtNbXKx9bv_yOpRqtGHrJRruaE2YN145nzYE9ZxGdCc,108328 -pip-26.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -pip-26.0.1.dist-info/METADATA,sha256=ZqIZuNGsG6l2gHiKlQjVQghFQhgSWfhEDHuCVPW3aN8,4675 -pip-26.0.1.dist-info/RECORD,, -pip-26.0.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip-26.0.1.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82 -pip-26.0.1.dist-info/entry_points.txt,sha256=Vhf8s0IYgX37mtd4vGL73BPcxdKnqeCFPzB5-d30x8o,84 -pip-26.0.1.dist-info/licenses/AUTHORS.txt,sha256=grSl9YDNOpOFFJTX8ZYKSdgfouXi_DzlRyYGE2-u5aI,11731 -pip-26.0.1.dist-info/licenses/LICENSE.txt,sha256=Y0MApmnUmurmWxLGxIySTFGkzfPR_whtw0VtyLyqIQQ,1093 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt,sha256=hu7uh74qQ_P_H1ZJb0UfaSQ5JvAl_tuwM2ZsMExMFhs,558 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/certifi/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt,sha256=GrNuPipLqGMWJThPh-ngkdsfrtA0xbIzJbMjmr8sxSU,1099 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt,sha256=gI4QyKarjesUn_mz-xn0R6gICUYG1xKpylf-rTVSWZ0,14531 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/distro/LICENSE,sha256=y16Ofl9KOYjhBjwULGDcLfdWBfTEZRXnduOspt-XbhQ,11325 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md,sha256=t6M2q_OwThgOwGXN0W5wXQeeHMehT5EKpukYfza5zYc,1541 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/msgpack/COPYING,sha256=SS3tuoXaWHL3jmCRvNH-pHTWYNNay03ulkuKqz8AdCc,614 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE,sha256=KeD9YukphQ6G6yjD_czwzv30-pSHkBHP-z0NS-1tTbY,1089 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/pygments/LICENSE,sha256=qdZvHVJt8C4p3Oc0NtNOVuhjL0bCdbvf_HBWnogvnxc,1331 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE,sha256=GyKwSbUmfW38I6Z79KhNjsBLn9-xpR02DkK0NCyLQVQ,1081 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/requests/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE,sha256=84j9OMrRMRLB3A9mm76A5_hFQe26-3LzAw0sp2QsPJ0,751 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/rich/LICENSE,sha256=3u18F6QxgVgZCj6iOcyHmlpQJxzruYrnAl9I--WNyhU,1056 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/truststore/LICENSE,sha256=M757fo-k_Rmxdg4ajtimaL2rhSyRtpLdQUJLy3Jan8o,1086 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt,sha256=w3vxhuJ8-dvpYZ5V7f486nswCRzrPaY8fay-Dm13kHs,1115 -pip/__init__.py,sha256=3EhKF2588Ab15tmBszgD3Bp0N26sJx7VhS2Akn_qY38,355 -pip/__main__.py,sha256=WzbhHXTbSE6gBY19mNN9m4s5o_365LOvTYSgqgbdBhE,854 -pip/__pip-runner__.py,sha256=JOoEZTwrtv7jRaXBkgSQKAE04yNyfFmGHxqpHiGHvL0,1450 -pip/__pycache__/__init__.cpython-313.pyc,, -pip/__pycache__/__main__.cpython-313.pyc,, -pip/__pycache__/__pip-runner__.cpython-313.pyc,, -pip/_internal/__init__.py,sha256=S7i9Dn9aSZS0MG-2Wrve3dV9TImPzvQn5jjhp9t_uf0,511 -pip/_internal/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/__pycache__/build_env.cpython-313.pyc,, -pip/_internal/__pycache__/cache.cpython-313.pyc,, -pip/_internal/__pycache__/configuration.cpython-313.pyc,, -pip/_internal/__pycache__/exceptions.cpython-313.pyc,, -pip/_internal/__pycache__/main.cpython-313.pyc,, -pip/_internal/__pycache__/pyproject.cpython-313.pyc,, -pip/_internal/__pycache__/self_outdated_check.cpython-313.pyc,, -pip/_internal/__pycache__/wheel_builder.cpython-313.pyc,, -pip/_internal/build_env.py,sha256=XpgOIlTQLgz3PvDT2n7j2NzX_rVFZLCIG7t7b2ddhcM,21911 -pip/_internal/cache.py,sha256=nMh48Yv3yu1HS1yCdscouu6B6B5zYBWdV6bhqs7gL-E,10345 -pip/_internal/cli/__init__.py,sha256=Iqg_tKA771XuMO1P4t_sDHnSKPzkUb9D0DqunAmw_ko,131 -pip/_internal/cli/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/cli/__pycache__/autocompletion.cpython-313.pyc,, -pip/_internal/cli/__pycache__/base_command.cpython-313.pyc,, -pip/_internal/cli/__pycache__/cmdoptions.cpython-313.pyc,, -pip/_internal/cli/__pycache__/command_context.cpython-313.pyc,, -pip/_internal/cli/__pycache__/index_command.cpython-313.pyc,, -pip/_internal/cli/__pycache__/main.cpython-313.pyc,, -pip/_internal/cli/__pycache__/main_parser.cpython-313.pyc,, -pip/_internal/cli/__pycache__/parser.cpython-313.pyc,, -pip/_internal/cli/__pycache__/progress_bars.cpython-313.pyc,, -pip/_internal/cli/__pycache__/req_command.cpython-313.pyc,, -pip/_internal/cli/__pycache__/spinners.cpython-313.pyc,, -pip/_internal/cli/__pycache__/status_codes.cpython-313.pyc,, -pip/_internal/cli/autocompletion.py,sha256=ZG2cM03nlcNrs-WG_SFTW46isx9s2Go5lUD_8-iv70o,7193 -pip/_internal/cli/base_command.py,sha256=6OW75PSGzkH8Fz761WZ3OSz1TsuO3-suc6iap-sQjTM,9168 -pip/_internal/cli/cmdoptions.py,sha256=hfA9B29Nnq2vYMWhFVg7EcWjdlfdPBPU4WwWT2Lkq4A,36164 -pip/_internal/cli/command_context.py,sha256=kmu3EWZbfBega1oDamnGJTA_UaejhIQNuMj2CVmMXu0,817 -pip/_internal/cli/index_command.py,sha256=s3x75lpDXWJtCkBacTQ3qAAprldHMJCniEQ5qkQ0FiI,6484 -pip/_internal/cli/main.py,sha256=ljDQBkvBtC8xTjOdb6rDJzJUNi1s-PnVR_W5C-Mq0Dk,3137 -pip/_internal/cli/main_parser.py,sha256=YjzJAjqf78ARNsLlnJT9l6fNbpyDPJA-arOIXYsK5Ik,4403 -pip/_internal/cli/parser.py,sha256=EIFExrWX_1nrl1Ib--GOor70WYqLtduHByenb1u9xH4,13827 -pip/_internal/cli/progress_bars.py,sha256=IW1PH5n2FPqUBTP7ULQ5Yu-wyNNO9XGY3g1PT4RMu44,4706 -pip/_internal/cli/req_command.py,sha256=QjDXId0hFdopwE8hNx2eustumxUNbnOCvG_ORmUC7vM,16482 -pip/_internal/cli/spinners.py,sha256=EJzZIZNyUtJljp3-WjcsyIrqxW-HUsfWzhuW84n_Tqw,7362 -pip/_internal/cli/status_codes.py,sha256=sEFHUaUJbqv8iArL3HAtcztWZmGOFX01hTesSytDEh0,116 -pip/_internal/commands/__init__.py,sha256=aNeCbQurGWihfhQq7BqaLXHqWDQ0i3I04OS7kxK6plQ,4026 -pip/_internal/commands/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/commands/__pycache__/cache.cpython-313.pyc,, -pip/_internal/commands/__pycache__/check.cpython-313.pyc,, -pip/_internal/commands/__pycache__/completion.cpython-313.pyc,, -pip/_internal/commands/__pycache__/configuration.cpython-313.pyc,, -pip/_internal/commands/__pycache__/debug.cpython-313.pyc,, -pip/_internal/commands/__pycache__/download.cpython-313.pyc,, -pip/_internal/commands/__pycache__/freeze.cpython-313.pyc,, -pip/_internal/commands/__pycache__/hash.cpython-313.pyc,, -pip/_internal/commands/__pycache__/help.cpython-313.pyc,, -pip/_internal/commands/__pycache__/index.cpython-313.pyc,, -pip/_internal/commands/__pycache__/inspect.cpython-313.pyc,, -pip/_internal/commands/__pycache__/install.cpython-313.pyc,, -pip/_internal/commands/__pycache__/list.cpython-313.pyc,, -pip/_internal/commands/__pycache__/lock.cpython-313.pyc,, -pip/_internal/commands/__pycache__/search.cpython-313.pyc,, -pip/_internal/commands/__pycache__/show.cpython-313.pyc,, -pip/_internal/commands/__pycache__/uninstall.cpython-313.pyc,, -pip/_internal/commands/__pycache__/wheel.cpython-313.pyc,, -pip/_internal/commands/cache.py,sha256=XjT7kjY8GSISMksFHsLvjS9Ogfi5extNlUUv-dUoWCM,9142 -pip/_internal/commands/check.py,sha256=hVFBQezQ3zj4EydoWbFQj_afPUppMt7r9JPAlY22U6Y,2244 -pip/_internal/commands/completion.py,sha256=LjvRIZ6QUiDXJL3IOMFeD-_J97HfjMGgEk0j2tWGu1U,4565 -pip/_internal/commands/configuration.py,sha256=6gNOGrVWnOLU15zUnAiNuOMhf76RRIZvCdVD0degPRk,10105 -pip/_internal/commands/debug.py,sha256=_8IqM8Fx1_lY2STu_qspr63tufF7zyFJCyYAXtxz0N4,6805 -pip/_internal/commands/download.py,sha256=LUNVobuvCdagjLBuPBaxHeBiHEiIe03fTO2m6ahC8qw,5178 -pip/_internal/commands/freeze.py,sha256=fxoW8AAc-bAqB_fXdNq2VnZ3JfWkFMg-bR6LcdDVO7A,3099 -pip/_internal/commands/hash.py,sha256=GO9pRN3wXC2kQaovK57TaLYBMc3IltOH92O6QEw6YE0,1679 -pip/_internal/commands/help.py,sha256=Bz3LcjNQXkz4Cu__pL4CZ86o4-HNLZj1NZWdlJhjuu0,1108 -pip/_internal/commands/index.py,sha256=kDpx2MO6ZxTt5PpeY4jqcssVbYhzxpkpreDe_6PPhks,5520 -pip/_internal/commands/inspect.py,sha256=ogm4UT7LRo8bIQcWUS1IiA25QdD4VHLa7JaPAodDttM,3177 -pip/_internal/commands/install.py,sha256=L6X1qi49ROVTGABhwwxDgBBTijlOpVn6XSDVZ7QW1Kc,30588 -pip/_internal/commands/list.py,sha256=L5nWuwawqSrBNsuxfyHLAagfz7XJP86tC9nK3L9YiI8,13497 -pip/_internal/commands/lock.py,sha256=145ihjUK_-7gP8O65XPDi_xMhlh5hne1ptkHdfnbAnQ,6027 -pip/_internal/commands/search.py,sha256=zbMsX_YASj6kXA6XIBgTDv0bGK51xG-CV3IynZJcE-c,5782 -pip/_internal/commands/show.py,sha256=oLVJIfKWmDKm0SsQGEi3pozNiqrXjTras_fbBSYKpBA,8066 -pip/_internal/commands/uninstall.py,sha256=CsOihqvb6ZA6O67L70oXeoLHeOfNzMM88H9g-9aocgw,3868 -pip/_internal/commands/wheel.py,sha256=L9vEzJ_E42scF_Hgh5X4Hk39nqJDKxGg4u7glDYbNWc,5880 -pip/_internal/configuration.py,sha256=WxwwSwY_Bm6QzDgf32BsujEyO8dgRedegCpgbUfDvM8,14568 -pip/_internal/distributions/__init__.py,sha256=Hq6kt6gXBgjNit5hTTWLAzeCNOKoB-N0pGYSqehrli8,858 -pip/_internal/distributions/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/distributions/__pycache__/base.cpython-313.pyc,, -pip/_internal/distributions/__pycache__/installed.cpython-313.pyc,, -pip/_internal/distributions/__pycache__/sdist.cpython-313.pyc,, -pip/_internal/distributions/__pycache__/wheel.cpython-313.pyc,, -pip/_internal/distributions/base.py,sha256=l-OTCAIs25lsapejA6IYpPZxSM5-BET4sdZDkql8jiY,1830 -pip/_internal/distributions/installed.py,sha256=kgIEE_1NzjZxLBSC-v5s64uOFZlVEt3aPrjTtL6x2XY,929 -pip/_internal/distributions/sdist.py,sha256=RYwQIbuxpKy6OjlBZCAefxpMDaoocUQ4dFtheGsiTOQ,6627 -pip/_internal/distributions/wheel.py,sha256=_HbG0OehF8dwj4UX-xV__tXLwgPus9OjMEf2NTRqBbE,1364 -pip/_internal/exceptions.py,sha256=JdPCrQ9iTLvE-GBebzBEeGP3hoTffWEKqbYEsa6cEZc,32165 -pip/_internal/index/__init__.py,sha256=tzwMH_fhQeubwMqHdSivasg1cRgTSbNg2CiMVnzMmyU,29 -pip/_internal/index/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/index/__pycache__/collector.cpython-313.pyc,, -pip/_internal/index/__pycache__/package_finder.cpython-313.pyc,, -pip/_internal/index/__pycache__/sources.cpython-313.pyc,, -pip/_internal/index/collector.py,sha256=R7Gcx_4GEoSEI-iazfAZVEPG3Lp6mbZT4lbAD6NjAc0,16144 -pip/_internal/index/package_finder.py,sha256=a3_L4FDNsuDf3y8Af9J7sfsHR1ahs8o13Ths-WYwFh0,41776 -pip/_internal/index/sources.py,sha256=nXJkOjhLy-O2FsrKU9RIqCOqgY2PsoKWybtZjjRgqU0,8639 -pip/_internal/locations/__init__.py,sha256=Sd67ap1LIemvXArUDFqm8U-HuZvj9i3ApEuiIwUc9UE,14157 -pip/_internal/locations/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/locations/__pycache__/_distutils.cpython-313.pyc,, -pip/_internal/locations/__pycache__/_sysconfig.cpython-313.pyc,, -pip/_internal/locations/__pycache__/base.cpython-313.pyc,, -pip/_internal/locations/_distutils.py,sha256=jpFj4V00rD9IR3vA9TqrGkwcdNVFc58LsChZavge9JY,5975 -pip/_internal/locations/_sysconfig.py,sha256=8CpTjtxaCzHSCrKpaxWnHE7aKcJrRJRmntR1ZLVysLk,7779 -pip/_internal/locations/base.py,sha256=AImjYJWxOtDkc0KKc6Y4Gz677cg91caMA4L94B9FZEg,2550 -pip/_internal/main.py,sha256=1cHqjsfFCrMFf3B5twzocxTJUdHMLoXUpy5lJoFqUi8,338 -pip/_internal/metadata/__init__.py,sha256=vp-JAxiWg_-l5F8AT0Jcey72uUnh8CDwwol9-KktHZ8,5824 -pip/_internal/metadata/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/metadata/__pycache__/_json.cpython-313.pyc,, -pip/_internal/metadata/__pycache__/base.cpython-313.pyc,, -pip/_internal/metadata/__pycache__/pkg_resources.cpython-313.pyc,, -pip/_internal/metadata/_json.py,sha256=hNvnMHOXLAyNlzirWhPL9Nx2CvCqa1iRma6Osq1YfV8,2711 -pip/_internal/metadata/base.py,sha256=BGuMenlcQT8i7j9iclrfdC3vSwgvhr8gjn955cCy16s,25420 -pip/_internal/metadata/importlib/__init__.py,sha256=jUUidoxnHcfITHHaAWG1G2i5fdBYklv_uJcjo2x7VYE,135 -pip/_internal/metadata/importlib/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/metadata/importlib/__pycache__/_compat.cpython-313.pyc,, -pip/_internal/metadata/importlib/__pycache__/_dists.cpython-313.pyc,, -pip/_internal/metadata/importlib/__pycache__/_envs.cpython-313.pyc,, -pip/_internal/metadata/importlib/_compat.py,sha256=sneVh4_6WxQZK4ljdl3ylVuP-q0ttSqbgl9mWt0HnOg,2804 -pip/_internal/metadata/importlib/_dists.py,sha256=znZD7MN4RC73-87KXAn6tKZv9lAQRI0AxxK2bubDvPw,8420 -pip/_internal/metadata/importlib/_envs.py,sha256=H3qVLXVh4LWvrPvu_ekXf3dfbtwnlhNJQP2pxXpccfU,5333 -pip/_internal/metadata/pkg_resources.py,sha256=NO76ZrfR2-LKJTyaXrmQoGhmJMArALvacrlZHViSDT8,10544 -pip/_internal/models/__init__.py,sha256=AjmCEBxX_MH9f_jVjIGNCFJKYCYeSEe18yyvNx4uRKQ,62 -pip/_internal/models/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/models/__pycache__/candidate.cpython-313.pyc,, -pip/_internal/models/__pycache__/direct_url.cpython-313.pyc,, -pip/_internal/models/__pycache__/format_control.cpython-313.pyc,, -pip/_internal/models/__pycache__/index.cpython-313.pyc,, -pip/_internal/models/__pycache__/installation_report.cpython-313.pyc,, -pip/_internal/models/__pycache__/link.cpython-313.pyc,, -pip/_internal/models/__pycache__/release_control.cpython-313.pyc,, -pip/_internal/models/__pycache__/scheme.cpython-313.pyc,, -pip/_internal/models/__pycache__/search_scope.cpython-313.pyc,, -pip/_internal/models/__pycache__/selection_prefs.cpython-313.pyc,, -pip/_internal/models/__pycache__/target_python.cpython-313.pyc,, -pip/_internal/models/__pycache__/wheel.cpython-313.pyc,, -pip/_internal/models/candidate.py,sha256=zzgFRuw_kWPjKpGw7LC0ZUMD2CQ2EberUIYs8izjdCA,753 -pip/_internal/models/direct_url.py,sha256=4NMWacu_QzPPWREC1te7v6Wfv-2HkI4tvSJF-CBgLh4,6555 -pip/_internal/models/format_control.py,sha256=PwemYG1L27BM0f1KP61rm24wShENFyxqlD1TWu34alc,2471 -pip/_internal/models/index.py,sha256=tYnL8oxGi4aSNWur0mG8DAP7rC6yuha_MwJO8xw0crI,1030 -pip/_internal/models/installation_report.py,sha256=cqfWJ93ThCxjcacqSWryOCD2XtIn1CZrgzZxAv5FQZ0,2839 -pip/_internal/models/link.py,sha256=zti5UCx1hT03etYqm6MCqFd714clmTgX8rTZT9CKZDQ,21992 -pip/_internal/models/release_control.py,sha256=XD14Hy_XLh9xWR1p7JHqPZPEv3Nnb1BZGMpClk76sLs,3403 -pip/_internal/models/scheme.py,sha256=PakmHJM3e8OOWSZFtfz1Az7f1meONJnkGuQxFlt3wBE,575 -pip/_internal/models/search_scope.py,sha256=1hxU2IVsAaLZVjp0CbzJbYaYzCxv72_Qbg3JL0qhXo0,4507 -pip/_internal/models/selection_prefs.py,sha256=IDOA3euRtyqWUyIK7lX2bzIZasYiEvunKA6H3Mngk-M,2221 -pip/_internal/models/target_python.py,sha256=I0eFS-eia3kwhrOvgsphFZtNAB2IwXZ9Sr9fp6IjBP4,4243 -pip/_internal/models/wheel.py,sha256=1SdfDvN7ALTsbyZ9EOsNy1GPirP1n6EjHyzPrZyLSh8,2920 -pip/_internal/network/__init__.py,sha256=FMy06P__y6jMjUc8z3ZcQdKF-pmZ2zM14_vBeHPGhUI,49 -pip/_internal/network/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/network/__pycache__/auth.cpython-313.pyc,, -pip/_internal/network/__pycache__/cache.cpython-313.pyc,, -pip/_internal/network/__pycache__/download.cpython-313.pyc,, -pip/_internal/network/__pycache__/lazy_wheel.cpython-313.pyc,, -pip/_internal/network/__pycache__/session.cpython-313.pyc,, -pip/_internal/network/__pycache__/utils.cpython-313.pyc,, -pip/_internal/network/__pycache__/xmlrpc.cpython-313.pyc,, -pip/_internal/network/auth.py,sha256=azFp14I9cyWAAzkxF2VM0Q_xtHnbNz3_NQXszy87KQo,20806 -pip/_internal/network/cache.py,sha256=kmRXKQrG9E26xQRj211LHeEGpDg_SlYU9Dn1fJ-AMeI,4862 -pip/_internal/network/download.py,sha256=8sVwIc9MWwpGlMPYCkO1S9U-FD8TA2utw42tj00skjM,12667 -pip/_internal/network/lazy_wheel.py,sha256=y9gVksdJCSjnLfYzs_m3DYUAtl3hc_k-xFPDBd9DgOs,7646 -pip/_internal/network/session.py,sha256=7zK7EeQCSRFipu4ZzcWl1V3AMKkiXdtGqFr7GvU2LrY,19555 -pip/_internal/network/utils.py,sha256=ACsXd1msqNCidHVXsu7LHUSr8NgaypcOKQ4KG-Z_wJM,4091 -pip/_internal/network/xmlrpc.py,sha256=_-Rnk3vOff8uF9hAGmT6SLALflY1gMBcbGwS12fb_Y4,1830 -pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/operations/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/operations/__pycache__/check.cpython-313.pyc,, -pip/_internal/operations/__pycache__/freeze.cpython-313.pyc,, -pip/_internal/operations/__pycache__/prepare.cpython-313.pyc,, -pip/_internal/operations/build/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/operations/build/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/operations/build/__pycache__/build_tracker.cpython-313.pyc,, -pip/_internal/operations/build/__pycache__/metadata.cpython-313.pyc,, -pip/_internal/operations/build/__pycache__/metadata_editable.cpython-313.pyc,, -pip/_internal/operations/build/__pycache__/wheel.cpython-313.pyc,, -pip/_internal/operations/build/__pycache__/wheel_editable.cpython-313.pyc,, -pip/_internal/operations/build/build_tracker.py,sha256=W3b5cmkMWPaE6QIwfzsTayJo7-OlxFHWDxfPuax1KcE,4771 -pip/_internal/operations/build/metadata.py,sha256=INHaeiRfOiLYCXApfDNRo9Cw2xI4VwTc0KItvfdfOjk,1421 -pip/_internal/operations/build/metadata_editable.py,sha256=oWudMsnjy4loO_Jy7g4N9nxsnaEX_iDlVRgCy7pu1rs,1509 -pip/_internal/operations/build/wheel.py,sha256=3bP-nNiJ4S8JvMaBnyessXQUBhxTqt1GBx6DQ1iPJDY,1136 -pip/_internal/operations/build/wheel_editable.py,sha256=q3kfElclM6FutVbFwE87JOTpVWt5ixDf3_UkHAIVfz4,1478 -pip/_internal/operations/check.py,sha256=yC2XWth6iehGGE_fj7XRJLjVKBsTIG3ZoWRkFi3rOwc,5894 -pip/_internal/operations/freeze.py,sha256=PDdY-y_ZtZZJLAKcaWPIGRKAGW7DXR48f0aMRU0j7BA,9854 -pip/_internal/operations/install/__init__.py,sha256=ak-UETcQPKlFZaWoYKWu5QVXbpFBvg0sXc3i0O4vSYY,50 -pip/_internal/operations/install/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/operations/install/__pycache__/wheel.cpython-313.pyc,, -pip/_internal/operations/install/wheel.py,sha256=FQIl2AnNadHV5YGGOVEmOHtUUNO8lpzj3Icoo4S2xis,27923 -pip/_internal/operations/prepare.py,sha256=ptVsmQf0Mo6jirk1Q5Djdse_wJw5Zdh1Fla2iL9HAJM,28830 -pip/_internal/pyproject.py,sha256=J-sTWqC-XfsKQgz9m1bypMWZPHItsSHzIN_NWeIRmhM,4555 -pip/_internal/req/__init__.py,sha256=WcY9z7D3rlIKX1QY8_tRnAsS_poebiGGdtQ7EJ5JQQo,3041 -pip/_internal/req/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/req/__pycache__/constructors.cpython-313.pyc,, -pip/_internal/req/__pycache__/pep723.cpython-313.pyc,, -pip/_internal/req/__pycache__/req_dependency_group.cpython-313.pyc,, -pip/_internal/req/__pycache__/req_file.cpython-313.pyc,, -pip/_internal/req/__pycache__/req_install.cpython-313.pyc,, -pip/_internal/req/__pycache__/req_set.cpython-313.pyc,, -pip/_internal/req/__pycache__/req_uninstall.cpython-313.pyc,, -pip/_internal/req/constructors.py,sha256=R-6n8irjnaa2DMMXlR4YMouXzykFBlzUFjhOZ1NcUUg,18688 -pip/_internal/req/pep723.py,sha256=olZL3tLmHWJhyLNfbD6U9UuikuzTcLDB06qd9WavTjs,1225 -pip/_internal/req/req_dependency_group.py,sha256=0yEQCUaO5Bza66Y3D5o9JRf0qII5QgCRugn1x5aRivA,2618 -pip/_internal/req/req_file.py,sha256=e32ZQ3kJaL_Sdtf32twGKqIau_AqR43MeSycl0iS2Mw,20685 -pip/_internal/req/req_install.py,sha256=vv5cbs3P5gf43e_1v72gwSQ2N_D_qpsfuXOyerMhDuI,31273 -pip/_internal/req/req_set.py,sha256=awkqIXnYA4Prmsj0Qb3zhqdbYUmXd-1o0P-KZ3mvRQs,2828 -pip/_internal/req/req_uninstall.py,sha256=dCmOHt-9RaJBq921L4tMH3PmIBDetGplnbjRKXmGt00,24099 -pip/_internal/resolution/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/resolution/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/resolution/__pycache__/base.cpython-313.pyc,, -pip/_internal/resolution/base.py,sha256=RIsqSP79olPdOgtPKW-oOQ364ICVopehA6RfGkRfe2s,577 -pip/_internal/resolution/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/resolution/legacy/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/resolution/legacy/__pycache__/resolver.cpython-313.pyc,, -pip/_internal/resolution/legacy/resolver.py,sha256=bwUqE66etz2bcPabqxed18-iyqqb-kx3Er2aT6GeUJY,24060 -pip/_internal/resolution/resolvelib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/base.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/base.py,sha256=_AoP0ZWlaSct8CRDn2ol3CbNn4zDtnh_0zQGjXASDKI,5047 -pip/_internal/resolution/resolvelib/candidates.py,sha256=50AN7BfB-pCfEmbKNlFZSXtdC0C8ms1waJrF2arknQE,20454 -pip/_internal/resolution/resolvelib/factory.py,sha256=82mLwnPlig37mMrDwcgKHJTE9mPczVuJIxeaUb7CQ0Y,34028 -pip/_internal/resolution/resolvelib/found_candidates.py,sha256=8bZYDCZLXSdLHy_s1o5f4r15HmKvqFUhzBUQOF21Lr4,6018 -pip/_internal/resolution/resolvelib/provider.py,sha256=tbVPfFv4Vg780yZ2_XGoGFP5LVo0U2bFnZov3jpSAIk,11441 -pip/_internal/resolution/resolvelib/reporter.py,sha256=faSgjqme0k_uzv1fvM5T0ZatPQ2eEktNvKBqfvXeGjc,3909 -pip/_internal/resolution/resolvelib/requirements.py,sha256=Izl9n8nc188lA1BSPS8QxfudfDQPHgngw-ij6hXt0nQ,8239 -pip/_internal/resolution/resolvelib/resolver.py,sha256=wQ94Hkep-7kWEHAc-NbMJhmzeEzgEAtxeBxyKVzZoeo,13437 -pip/_internal/self_outdated_check.py,sha256=zDKsyLMufFHuEZY16WRu129FBbBp-ADuxyWMIN4ihPE,8284 -pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/utils/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/utils/__pycache__/_jaraco_text.cpython-313.pyc,, -pip/_internal/utils/__pycache__/_log.cpython-313.pyc,, -pip/_internal/utils/__pycache__/appdirs.cpython-313.pyc,, -pip/_internal/utils/__pycache__/compat.cpython-313.pyc,, -pip/_internal/utils/__pycache__/compatibility_tags.cpython-313.pyc,, -pip/_internal/utils/__pycache__/datetime.cpython-313.pyc,, -pip/_internal/utils/__pycache__/deprecation.cpython-313.pyc,, -pip/_internal/utils/__pycache__/direct_url_helpers.cpython-313.pyc,, -pip/_internal/utils/__pycache__/egg_link.cpython-313.pyc,, -pip/_internal/utils/__pycache__/entrypoints.cpython-313.pyc,, -pip/_internal/utils/__pycache__/filesystem.cpython-313.pyc,, -pip/_internal/utils/__pycache__/filetypes.cpython-313.pyc,, -pip/_internal/utils/__pycache__/glibc.cpython-313.pyc,, -pip/_internal/utils/__pycache__/hashes.cpython-313.pyc,, -pip/_internal/utils/__pycache__/logging.cpython-313.pyc,, -pip/_internal/utils/__pycache__/misc.cpython-313.pyc,, -pip/_internal/utils/__pycache__/packaging.cpython-313.pyc,, -pip/_internal/utils/__pycache__/pylock.cpython-313.pyc,, -pip/_internal/utils/__pycache__/retry.cpython-313.pyc,, -pip/_internal/utils/__pycache__/subprocess.cpython-313.pyc,, -pip/_internal/utils/__pycache__/temp_dir.cpython-313.pyc,, -pip/_internal/utils/__pycache__/unpacking.cpython-313.pyc,, -pip/_internal/utils/__pycache__/urls.cpython-313.pyc,, -pip/_internal/utils/__pycache__/virtualenv.cpython-313.pyc,, -pip/_internal/utils/__pycache__/wheel.cpython-313.pyc,, -pip/_internal/utils/_jaraco_text.py,sha256=M15uUPIh5NpP1tdUGBxRau6q1ZAEtI8-XyLEETscFfE,3350 -pip/_internal/utils/_log.py,sha256=-jHLOE_THaZz5BFcCnoSL9EYAtJ0nXem49s9of4jvKw,1015 -pip/_internal/utils/appdirs.py,sha256=LrzDPZMKVh0rubtCx9vu3XlZbLCSug6VSj4Qsvt66BA,1681 -pip/_internal/utils/compat.py,sha256=C9LHXJAKkwAH8Hn3nPkz9EYK3rqPBeO_IXkOG2zzsdQ,2514 -pip/_internal/utils/compatibility_tags.py,sha256=DiNSLqpuruXUamGQwOJ2WZByDGLTGaXi9O-Xf8fOi34,6630 -pip/_internal/utils/datetime.py,sha256=kuJOf1mW8G5tRFN6jWardddS-9qSaR53lK1jmx3NTZY,868 -pip/_internal/utils/deprecation.py,sha256=HVhvyO5qiRFcG88PhZlp_87qdKQNwPTUIIHWtsTR2yI,3696 -pip/_internal/utils/direct_url_helpers.py,sha256=ttKv4GMUqlRwPPog9_CUopy6SDgoxVILzeBJzgfn2tg,3200 -pip/_internal/utils/egg_link.py,sha256=YWfsrbmfcrfWgqQYy6OuIjsyb9IfL1q_2v4zsms1WjI,2459 -pip/_internal/utils/entrypoints.py,sha256=uPjAyShKObdotjQjJUzprQ6r3xQvDIZwUYfHHqZ7Dok,3324 -pip/_internal/utils/filesystem.py,sha256=mJ_PP8z1V1x4HMhydWIWDyEmWikLX0f-NXPCXEcjiLo,6892 -pip/_internal/utils/filetypes.py,sha256=sEMa38qaqjvx1Zid3OCAUja31BOBU-USuSMPBvU3yjo,689 -pip/_internal/utils/glibc.py,sha256=sEh8RJJLYSdRvTqAO4THVPPA-YSDVLD4SI9So-bxX1U,3726 -pip/_internal/utils/hashes.py,sha256=d32UI1en8nyqZzdZQvxUVdfeBoe4ADWx7HtrIM4-XQ4,4998 -pip/_internal/utils/logging.py,sha256=6lJWMC6c7_aD_i4sdgaaeb-Tm3kWpYg0hba_V1-OLnE,13414 -pip/_internal/utils/misc.py,sha256=phFIbHm2kmliHDXJ0eNPxgGP423ZpvZoMKKtJ1_Zvjs,23722 -pip/_internal/utils/packaging.py,sha256=s5tpUmFumwV0H9JSTzryrIY4JwQM8paGt7Sm7eNwt2Y,1601 -pip/_internal/utils/pylock.py,sha256=nKQknZgyswWgzi--hRQX_DLUYQ3g5wGTCwVNQNdoJ54,3817 -pip/_internal/utils/retry.py,sha256=83wReEB2rcntMZ5VLd7ascaYSjn_kLdlQCqxILxWkPM,1461 -pip/_internal/utils/subprocess.py,sha256=r4-Ba_Yc3uZXQpi0K4pZFsCT_QqdSvtF3XJ-204QWaA,8983 -pip/_internal/utils/temp_dir.py,sha256=D9c8D7WOProOO8GGDqpBeVSj10NGFmunG0o2TodjjIU,9307 -pip/_internal/utils/unpacking.py,sha256=4hNg6dqHOn_KzGCzSC76nChG97d_UjtF9AnLSof672o,12972 -pip/_internal/utils/urls.py,sha256=aF_eg9ul5d8bMCxfSSSxQcfs-OpJdbStYqZHoy2K1RE,1601 -pip/_internal/utils/virtualenv.py,sha256=mX-UPyw1MPxhwUxKhbqWWX70J6PHXAJjVVrRnG0h9mc,3455 -pip/_internal/utils/wheel.py,sha256=YdRuj6MicG-Q9Mg03FbUv1WTLam6Lc7AgijY4voVyis,4468 -pip/_internal/vcs/__init__.py,sha256=UAqvzpbi0VbZo3Ub6skEeZAw-ooIZR-zX_WpCbxyCoU,596 -pip/_internal/vcs/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/vcs/__pycache__/bazaar.cpython-313.pyc,, -pip/_internal/vcs/__pycache__/git.cpython-313.pyc,, -pip/_internal/vcs/__pycache__/mercurial.cpython-313.pyc,, -pip/_internal/vcs/__pycache__/subversion.cpython-313.pyc,, -pip/_internal/vcs/__pycache__/versioncontrol.cpython-313.pyc,, -pip/_internal/vcs/bazaar.py,sha256=3W1eHjkYx2vc6boeb2NBh4I_rlGAXM-vrzfNhLm1Rxg,3734 -pip/_internal/vcs/git.py,sha256=TTeqDuzS-_BFSNuUStVWmE2nGDpKuvUhBBJk_CCQXV0,19144 -pip/_internal/vcs/mercurial.py,sha256=w1ZJWLKqNP1onEjkfjlwBVnMqPZNSIER8ayjQcnTq4w,5575 -pip/_internal/vcs/subversion.py,sha256=uUgdPvxmvEB8Qwtjr0Hc0XgFjbiNi5cbvI4vARLOJXo,11787 -pip/_internal/vcs/versioncontrol.py,sha256=Ma_HMZBVveSkeYvxacvqeujnkSIaF1XjxTsS3BwcJ8E,22599 -pip/_internal/wheel_builder.py,sha256=yvEULStZtty9Kplp89tDis3hGdyKQ-2BUbFLmJ_5ink,9010 -pip/_vendor/README.rst,sha256=pKKBwCWhu3M3qQ9dDnsmxb3KdsRr-nWmMq2srbH_Bi0,9394 -pip/_vendor/__init__.py,sha256=WzusPTGWIMeQQWSVJ0h2rafGkVTa9WKJ2HT-2-EoZrU,4907 -pip/_vendor/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/cachecontrol/LICENSE.txt,sha256=hu7uh74qQ_P_H1ZJb0UfaSQ5JvAl_tuwM2ZsMExMFhs,558 -pip/_vendor/cachecontrol/__init__.py,sha256=GxwRkm_TQBtPZpfpVK9r6S9dAy2DVnVgDVHJKTiPZ1k,820 -pip/_vendor/cachecontrol/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/adapter.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/cache.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/controller.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/serialize.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-313.pyc,, -pip/_vendor/cachecontrol/_cmd.py,sha256=iist2EpzJvDVIhMAxXq8iFnTBsiZAd6iplxfmNboNyk,1737 -pip/_vendor/cachecontrol/adapter.py,sha256=W-HW-l01gyCsnxkOyCbqx7sxrWYoBbKrDsKkVVQN6NE,6586 -pip/_vendor/cachecontrol/cache.py,sha256=OXwv7Fn2AwnKNiahJHnjtvaKLndvVLv_-zO-ltlV9qI,1953 -pip/_vendor/cachecontrol/caches/__init__.py,sha256=dtrrroK5BnADR1GWjCZ19aZ0tFsMfvFBtLQQU1sp_ag,303 -pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-313.pyc,, -pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-313.pyc,, -pip/_vendor/cachecontrol/caches/file_cache.py,sha256=d8upFmy_zwaCmlbWEVBlLXFddt8Zw8c5SFpxeOZsdfw,4117 -pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=9rmqwtYu_ljVkW6_oLqbC7EaX_a8YT_yLuna-eS0dgo,1386 -pip/_vendor/cachecontrol/controller.py,sha256=xBauC-vUSu5GsJsxD4-W-JaKqqbBz0MN6Zv8PA2N8hI,19102 -pip/_vendor/cachecontrol/filewrapper.py,sha256=DhxC_rSk-beKdbsYhfvBUDovQHX9r3gHH_jP9-q_mKk,4354 -pip/_vendor/cachecontrol/heuristics.py,sha256=gqMXU8w0gQuEQiSdu3Yg-0vd9kW7nrWKbLca75rheGE,4881 -pip/_vendor/cachecontrol/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/cachecontrol/serialize.py,sha256=HQd2IllQ05HzPkVLMXTF2uX5mjEQjDBkxCqUJUODpZk,5163 -pip/_vendor/cachecontrol/wrapper.py,sha256=hsGc7g8QGQTT-4f8tgz3AM5qwScg6FO0BSdLSRdEvpU,1417 -pip/_vendor/certifi/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989 -pip/_vendor/certifi/__init__.py,sha256=969deMMS7Uchipr0oO4dbRBUvRi0uNYCn07VmG1aTrg,94 -pip/_vendor/certifi/__main__.py,sha256=1k3Cr95vCxxGRGDljrW3wMdpZdL3Nhf0u1n-k2qdsCY,255 -pip/_vendor/certifi/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/certifi/__pycache__/__main__.cpython-313.pyc,, -pip/_vendor/certifi/__pycache__/core.cpython-313.pyc,, -pip/_vendor/certifi/cacert.pem,sha256=Tzl1_zCrvzVEO0hgZK6Ly0Hf9wf_31dsdtKS-0WKoKk,270954 -pip/_vendor/certifi/core.py,sha256=gu_ECVI1m3Rq0ytpsNE61hgQGcKaOAt9Rs9G8KsTCOI,3442 -pip/_vendor/certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/dependency_groups/LICENSE.txt,sha256=GrNuPipLqGMWJThPh-ngkdsfrtA0xbIzJbMjmr8sxSU,1099 -pip/_vendor/dependency_groups/__init__.py,sha256=C3OFu0NGwDzQ4LOmmSOFPsRSvkbBn-mdd4j_5YqJw-s,250 -pip/_vendor/dependency_groups/__main__.py,sha256=UNTM7P5mfVtT7wDi9kOTXWgV3fu3e8bTrt1Qp1jvjKo,1709 -pip/_vendor/dependency_groups/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/dependency_groups/__pycache__/__main__.cpython-313.pyc,, -pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-313.pyc,, -pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-313.pyc,, -pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-313.pyc,, -pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-313.pyc,, -pip/_vendor/dependency_groups/_implementation.py,sha256=Gqb2DlQELRakeHlKf6QtQSW0M-bcEomxHw4JsvID1ls,8041 -pip/_vendor/dependency_groups/_lint_dependency_groups.py,sha256=yp-DDqKXtbkDTNa0ifa-FmOA8ra24lPZEXftW-R5AuI,1710 -pip/_vendor/dependency_groups/_pip_wrapper.py,sha256=nuVW_w_ntVxpE26ELEvngMY0N04sFLsijXRyZZROFG8,1865 -pip/_vendor/dependency_groups/_toml_compat.py,sha256=BHnXnFacm3DeolsA35GjI6qkDApvua-1F20kv3BfZWE,285 -pip/_vendor/dependency_groups/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/distlib/LICENSE.txt,sha256=gI4QyKarjesUn_mz-xn0R6gICUYG1xKpylf-rTVSWZ0,14531 -pip/_vendor/distlib/__init__.py,sha256=Deo3uo98aUyIfdKJNqofeSEFWwDzrV2QeGLXLsgq0Ag,625 -pip/_vendor/distlib/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/distlib/__pycache__/compat.cpython-313.pyc,, -pip/_vendor/distlib/__pycache__/resources.cpython-313.pyc,, -pip/_vendor/distlib/__pycache__/scripts.cpython-313.pyc,, -pip/_vendor/distlib/__pycache__/util.cpython-313.pyc,, -pip/_vendor/distlib/compat.py,sha256=2jRSjRI4o-vlXeTK2BCGIUhkc6e9ZGhSsacRM5oseTw,41467 -pip/_vendor/distlib/resources.py,sha256=LwbPksc0A1JMbi6XnuPdMBUn83X7BPuFNWqPGEKI698,10820 -pip/_vendor/distlib/scripts.py,sha256=Qvp76E9Jc3IgyYubnpqI9fS7eseGOe4FjpeVKqKt9Iw,18612 -pip/_vendor/distlib/t32.exe,sha256=a0GV5kCoWsMutvliiCKmIgV98eRZ33wXoS-XrqvJQVs,97792 -pip/_vendor/distlib/t64-arm.exe,sha256=68TAa32V504xVBnufojh0PcenpR3U4wAqTqf-MZqbPw,182784 -pip/_vendor/distlib/t64.exe,sha256=gaYY8hy4fbkHYTTnA4i26ct8IQZzkBG2pRdy0iyuBrc,108032 -pip/_vendor/distlib/util.py,sha256=vMPGvsS4j9hF6Y9k3Tyom1aaHLb0rFmZAEyzeAdel9w,66682 -pip/_vendor/distlib/w32.exe,sha256=R4csx3-OGM9kL4aPIzQKRo5TfmRSHZo6QWyLhDhNBks,91648 -pip/_vendor/distlib/w64-arm.exe,sha256=xdyYhKj0WDcVUOCb05blQYvzdYIKMbmJn2SZvzkcey4,168448 -pip/_vendor/distlib/w64.exe,sha256=ejGf-rojoBfXseGLpya6bFTFPWRG21X5KvU8J5iU-K0,101888 -pip/_vendor/distro/LICENSE,sha256=y16Ofl9KOYjhBjwULGDcLfdWBfTEZRXnduOspt-XbhQ,11325 -pip/_vendor/distro/__init__.py,sha256=2fHjF-SfgPvjyNZ1iHh_wjqWdR_Yo5ODHwZC0jLBPhc,981 -pip/_vendor/distro/__main__.py,sha256=bu9d3TifoKciZFcqRBuygV3GSuThnVD_m2IK4cz96Vs,64 -pip/_vendor/distro/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/distro/__pycache__/__main__.cpython-313.pyc,, -pip/_vendor/distro/__pycache__/distro.cpython-313.pyc,, -pip/_vendor/distro/distro.py,sha256=XqbefacAhDT4zr_trnbA15eY8vdK4GTghgmvUGrEM_4,49430 -pip/_vendor/distro/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/idna/LICENSE.md,sha256=t6M2q_OwThgOwGXN0W5wXQeeHMehT5EKpukYfza5zYc,1541 -pip/_vendor/idna/__init__.py,sha256=MPqNDLZbXqGaNdXxAFhiqFPKEQXju2jNQhCey6-5eJM,868 -pip/_vendor/idna/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/idna/__pycache__/codec.cpython-313.pyc,, -pip/_vendor/idna/__pycache__/compat.cpython-313.pyc,, -pip/_vendor/idna/__pycache__/core.cpython-313.pyc,, -pip/_vendor/idna/__pycache__/idnadata.cpython-313.pyc,, -pip/_vendor/idna/__pycache__/intranges.cpython-313.pyc,, -pip/_vendor/idna/__pycache__/package_data.cpython-313.pyc,, -pip/_vendor/idna/__pycache__/uts46data.cpython-313.pyc,, -pip/_vendor/idna/codec.py,sha256=M2SGWN7cs_6B32QmKTyTN6xQGZeYQgQ2wiX3_DR6loE,3438 -pip/_vendor/idna/compat.py,sha256=RzLy6QQCdl9784aFhb2EX9EKGCJjg0P3PilGdeXXcx8,316 -pip/_vendor/idna/core.py,sha256=P26_XVycuMTZ1R2mNK1ZREVzM5mvTzdabBXfyZVU1Lc,13246 -pip/_vendor/idna/idnadata.py,sha256=SG8jhaGE53iiD6B49pt2pwTv_UvClciWE-N54oR2p4U,79623 -pip/_vendor/idna/intranges.py,sha256=amUtkdhYcQG8Zr-CoMM_kVRacxkivC1WgxN1b63KKdU,1898 -pip/_vendor/idna/package_data.py,sha256=_CUavOxobnbyNG2FLyHoN8QHP3QM9W1tKuw7eq9QwBk,21 -pip/_vendor/idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/idna/uts46data.py,sha256=H9J35VkD0F9L9mKOqjeNGd2A-Va6FlPoz6Jz4K7h-ps,243725 -pip/_vendor/msgpack/COPYING,sha256=SS3tuoXaWHL3jmCRvNH-pHTWYNNay03ulkuKqz8AdCc,614 -pip/_vendor/msgpack/__init__.py,sha256=RA8gcqK17YpkxBnNwXJVa1oa2LygWDgfF1nA1NPw3mo,1109 -pip/_vendor/msgpack/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/msgpack/__pycache__/exceptions.cpython-313.pyc,, -pip/_vendor/msgpack/__pycache__/ext.cpython-313.pyc,, -pip/_vendor/msgpack/__pycache__/fallback.cpython-313.pyc,, -pip/_vendor/msgpack/exceptions.py,sha256=dCTWei8dpkrMsQDcjQk74ATl9HsIBH0ybt8zOPNqMYc,1081 -pip/_vendor/msgpack/ext.py,sha256=kteJv03n9tYzd5oo3xYopVTo4vRaAxonBQQJhXohZZo,5726 -pip/_vendor/msgpack/fallback.py,sha256=0g1Pzp0vtmBEmJ5w9F3s_-JMVURP8RS4G1cc5TRaAsI,32390 -pip/_vendor/packaging/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 -pip/_vendor/packaging/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 -pip/_vendor/packaging/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 -pip/_vendor/packaging/__init__.py,sha256=y4lVbpeBzCGk-IPDw5BGBZ_b0P3ukEEJZAbGYc6Ey8c,494 -pip/_vendor/packaging/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/_elffile.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/_manylinux.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/_musllinux.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/_parser.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/_structures.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/_tokenizer.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/markers.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/metadata.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/pylock.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/requirements.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/specifiers.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/tags.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/utils.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/version.cpython-313.pyc,, -pip/_vendor/packaging/_elffile.py,sha256=-sKkptYqzYw2-x3QByJa5mB4rfPWu1pxkZHRx1WAFCY,3211 -pip/_vendor/packaging/_manylinux.py,sha256=Hf6nB0cOrayEs96-p3oIXAgGnFquv20DO5l-o2_Xnv0,9559 -pip/_vendor/packaging/_musllinux.py,sha256=Z6swjH3MA7XS3qXnmMN7QPhqP3fnoYI0eQ18e9-HgAE,2707 -pip/_vendor/packaging/_parser.py,sha256=U_DajsEx2VoC_F46fSVV3hDKNCWoQYkPkasO3dld0ig,10518 -pip/_vendor/packaging/_structures.py,sha256=Hn49Ta8zV9Wo8GiCL8Nl2ARZY983Un3pruZGVNldPwE,1514 -pip/_vendor/packaging/_tokenizer.py,sha256=M8EwNIdXeL9NMFuFrQtiOKwjka_xFx8KjRQnfE8O_z8,5421 -pip/_vendor/packaging/licenses/__init__.py,sha256=TwXLHZCXwSgdFwRLPxW602T6mSieunSFHM6fp8pgW78,5819 -pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-313.pyc,, -pip/_vendor/packaging/licenses/_spdx.py,sha256=WW7DXiyg68up_YND_wpRYlr1SHhiV4FfJLQffghhMxQ,51122 -pip/_vendor/packaging/markers.py,sha256=ZX-cLvW1S3cZcEc0fHI4z7zSx5U2T19yMpDP_mE-CYw,12771 -pip/_vendor/packaging/metadata.py,sha256=CWVZpN_HfoYMSSDuCP7igOvGgqA9AOmpW8f3qTisfnc,39360 -pip/_vendor/packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/packaging/pylock.py,sha256=-R1uNfJ4PaLto7Mg62YsGOHgvskuiIEqPwxOywl42Jk,22537 -pip/_vendor/packaging/requirements.py,sha256=PMCAWD8aNMnVD-6uZMedhBuAVX2573eZ4yPBLXmz04I,2870 -pip/_vendor/packaging/specifiers.py,sha256=tF2nC-jwW94FYe6So9dNGenQx1Hdif7ErmWlVp1QiXE,40821 -pip/_vendor/packaging/tags.py,sha256=cXLV1pJD3UtJlDg7Wz3zrfdQhRZqr8jumSAKKAAd2xE,22856 -pip/_vendor/packaging/utils.py,sha256=N4c6oZzFJy6klTZ3AnkNz7sSkJesuFWPp68LA3B5dAo,5040 -pip/_vendor/packaging/version.py,sha256=RVRKq8_GD5Bcak6E1kGG8K7siNZYW9n_XK8M2ZLl0H8,23284 -pip/_vendor/pkg_resources/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 -pip/_vendor/pkg_resources/__init__.py,sha256=vbTJ0_ruUgGxQjlEqsruFmiNPVyh2t9q-zyTDT053xI,124451 -pip/_vendor/pkg_resources/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/platformdirs/LICENSE,sha256=KeD9YukphQ6G6yjD_czwzv30-pSHkBHP-z0NS-1tTbY,1089 -pip/_vendor/platformdirs/__init__.py,sha256=UfeSHWl8AeTtbOBOoHAxK4dODOWkZtfy-m_i7cWdJ8c,22344 -pip/_vendor/platformdirs/__main__.py,sha256=jBJ8zb7Mpx5ebcqF83xrpO94MaeCpNGHVf9cvDN2JLg,1505 -pip/_vendor/platformdirs/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/platformdirs/__pycache__/__main__.cpython-313.pyc,, -pip/_vendor/platformdirs/__pycache__/android.cpython-313.pyc,, -pip/_vendor/platformdirs/__pycache__/api.cpython-313.pyc,, -pip/_vendor/platformdirs/__pycache__/macos.cpython-313.pyc,, -pip/_vendor/platformdirs/__pycache__/unix.cpython-313.pyc,, -pip/_vendor/platformdirs/__pycache__/version.cpython-313.pyc,, -pip/_vendor/platformdirs/__pycache__/windows.cpython-313.pyc,, -pip/_vendor/platformdirs/android.py,sha256=r0DshVBf-RO1jXJGX8C4Til7F1XWt-bkdWMgmvEiaYg,9013 -pip/_vendor/platformdirs/api.py,sha256=wPHOlwOsfz2oqQZ6A2FcCu5kEAj-JondzoNOHYFQ0h8,9281 -pip/_vendor/platformdirs/macos.py,sha256=0XoOgin1NK7Qki7iskD-oS8xKxw6bXgoKEgdqpCRAFQ,6322 -pip/_vendor/platformdirs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/platformdirs/unix.py,sha256=WZmkUA--L3JNRGmz32s35YfoD3ica6xKIPdCV_HhLcs,10458 -pip/_vendor/platformdirs/version.py,sha256=BI_dKLSMwlkl57vlxZnT8oVjPiUC2W_sdx_8_h99HeQ,704 -pip/_vendor/platformdirs/windows.py,sha256=XvCfklGUMVxJbXit51jpYMN-lNeScPB82qS1CAeplL0,10362 -pip/_vendor/pygments/LICENSE,sha256=qdZvHVJt8C4p3Oc0NtNOVuhjL0bCdbvf_HBWnogvnxc,1331 -pip/_vendor/pygments/__init__.py,sha256=8uNqJCCwXqbEx5aSsBr0FykUQOBDKBihO5mPqiw1aqo,2983 -pip/_vendor/pygments/__main__.py,sha256=WrndpSe6i1ckX_SQ1KaxD9CTKGzD0EuCOFxcbwFpoLU,353 -pip/_vendor/pygments/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/__main__.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/console.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/filter.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/formatter.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/lexer.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/modeline.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/plugin.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/regexopt.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/scanner.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/sphinxext.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/style.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/token.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/unistring.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/util.cpython-313.pyc,, -pip/_vendor/pygments/console.py,sha256=AagDWqwea2yBWf10KC9ptBgMpMjxKp8yABAmh-NQOVk,1718 -pip/_vendor/pygments/filter.py,sha256=YLtpTnZiu07nY3oK9nfR6E9Y1FBHhP5PX8gvkJWcfag,1910 -pip/_vendor/pygments/filters/__init__.py,sha256=4U4jtA0X3iP83uQnB9-TI-HDSw8E8y8zMYHa0UjbbaI,40392 -pip/_vendor/pygments/filters/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/pygments/formatter.py,sha256=KZQMmyo_xkOIkQG8g66LYEkBh1bx7a0HyGCBcvhI9Ew,4390 -pip/_vendor/pygments/formatters/__init__.py,sha256=KTwBmnXlaopJhQDOemVHYHskiDghuq-08YtP6xPNJPg,5385 -pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-313.pyc,, -pip/_vendor/pygments/formatters/_mapping.py,sha256=1Cw37FuQlNacnxRKmtlPX4nyLoX9_ttko5ZwscNUZZ4,4176 -pip/_vendor/pygments/lexer.py,sha256=_kBrOJ_NT5Tl0IVM0rA9c8eysP6_yrlGzEQI0eVYB-A,35349 -pip/_vendor/pygments/lexers/__init__.py,sha256=wbIME35GH7bI1B9rNPJFqWT-ij_RApZDYPUlZycaLzA,12115 -pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-313.pyc,, -pip/_vendor/pygments/lexers/__pycache__/python.cpython-313.pyc,, -pip/_vendor/pygments/lexers/_mapping.py,sha256=l4tCXM8e9aPC2BD6sjIr0deT-J-z5tHgCwL-p1fS0PE,77602 -pip/_vendor/pygments/lexers/python.py,sha256=vxjn1cOHclIKJKxoyiBsQTY65GHbkZtZRuKQ2AVCKaw,53853 -pip/_vendor/pygments/modeline.py,sha256=K5eSkR8GS1r5OkXXTHOcV0aM_6xpk9eWNEIAW-OOJ2g,1005 -pip/_vendor/pygments/plugin.py,sha256=tPx0rJCTIZ9ioRgLNYG4pifCbAwTRUZddvLw-NfAk2w,1891 -pip/_vendor/pygments/regexopt.py,sha256=wXaP9Gjp_hKAdnICqoDkRxAOQJSc4v3X6mcxx3z-TNs,3072 -pip/_vendor/pygments/scanner.py,sha256=nNcETRR1tRuiTaHmHSTTECVYFPcLf6mDZu1e4u91A9E,3092 -pip/_vendor/pygments/sphinxext.py,sha256=5x7Zh9YlU6ISJ31dMwduiaanb5dWZnKg3MyEQsseNnQ,7981 -pip/_vendor/pygments/style.py,sha256=PlOZqlsnTVd58RGy50vkA2cXQ_lP5bF5EGMEBTno6DA,6420 -pip/_vendor/pygments/styles/__init__.py,sha256=x9ebctfyvCAFpMTlMJ5YxwcNYBzjgq6zJaKkNm78r4M,2042 -pip/_vendor/pygments/styles/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-313.pyc,, -pip/_vendor/pygments/styles/_mapping.py,sha256=6lovFUE29tz6EsV3XYY4hgozJ7q1JL7cfO3UOlgnS8w,3312 -pip/_vendor/pygments/token.py,sha256=WbdWGhYm_Vosb0DDxW9lHNPgITXfWTsQmHt6cy9RbcM,6226 -pip/_vendor/pygments/unistring.py,sha256=al-_rBemRuGvinsrM6atNsHTmJ6DUbw24q2O2Ru1cBc,63208 -pip/_vendor/pygments/util.py,sha256=oRtSpiAo5jM9ulntkvVbgXUdiAW57jnuYGB7t9fYuhc,10031 -pip/_vendor/pyproject_hooks/LICENSE,sha256=GyKwSbUmfW38I6Z79KhNjsBLn9-xpR02DkK0NCyLQVQ,1081 -pip/_vendor/pyproject_hooks/__init__.py,sha256=cPB_a9LXz5xvsRbX1o2qyAdjLatZJdQ_Lc5McNX-X7Y,691 -pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-313.pyc,, -pip/_vendor/pyproject_hooks/_impl.py,sha256=jY-raxnmyRyB57ruAitrJRUzEexuAhGTpgMygqx67Z4,14936 -pip/_vendor/pyproject_hooks/_in_process/__init__.py,sha256=MJNPpfIxcO-FghxpBbxkG1rFiQf6HOUbV4U5mq0HFns,557 -pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-313.pyc,, -pip/_vendor/pyproject_hooks/_in_process/_in_process.py,sha256=qcXMhmx__MIJq10gGHW3mA4Tl8dy8YzHMccwnNoKlw0,12216 -pip/_vendor/pyproject_hooks/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/requests/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142 -pip/_vendor/requests/__init__.py,sha256=HlB_HzhrzGtfD_aaYUwUh1zWXLZ75_YCLyit75d0Vz8,5057 -pip/_vendor/requests/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/__version__.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/_internal_utils.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/adapters.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/api.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/auth.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/certs.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/compat.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/cookies.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/exceptions.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/help.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/hooks.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/models.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/packages.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/sessions.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/status_codes.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/structures.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/utils.cpython-313.pyc,, -pip/_vendor/requests/__version__.py,sha256=QKDceK8K_ujqwDDc3oYrR0odOBYgKVOQQ5vFap_G_cg,435 -pip/_vendor/requests/_internal_utils.py,sha256=nMQymr4hs32TqVo5AbCrmcJEhvPUh7xXlluyqwslLiQ,1495 -pip/_vendor/requests/adapters.py,sha256=2MLFOK9GpYNhiTd6zLDUrAgSkIB-76i6pmSuUJjHC2w,26429 -pip/_vendor/requests/api.py,sha256=_Zb9Oa7tzVIizTKwFrPjDEY9ejtm_OnSRERnADxGsQs,6449 -pip/_vendor/requests/auth.py,sha256=kF75tqnLctZ9Mf_hm9TZIj4cQWnN5uxRz8oWsx5wmR0,10186 -pip/_vendor/requests/certs.py,sha256=kHDlkK_beuHXeMPc5jta2wgl8gdKeUWt5f2nTDVrvt8,441 -pip/_vendor/requests/compat.py,sha256=QfbmdTFiZzjSHMXiMrd4joCRU6RabtQ9zIcPoVaHIus,1822 -pip/_vendor/requests/cookies.py,sha256=bNi-iqEj4NPZ00-ob-rHvzkvObzN3lEpgw3g6paS3Xw,18590 -pip/_vendor/requests/exceptions.py,sha256=D1wqzYWne1mS2rU43tP9CeN1G7QAy7eqL9o1god6Ejw,4272 -pip/_vendor/requests/help.py,sha256=hRKaf9u0G7fdwrqMHtF3oG16RKktRf6KiwtSq2Fo1_0,3813 -pip/_vendor/requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733 -pip/_vendor/requests/models.py,sha256=taljlg6vJ4b-xMu2TaMNFFkaiwMex_VsEQ6qUTN3wzY,35575 -pip/_vendor/requests/packages.py,sha256=_ZQDCJTJ8SP3kVWunSqBsRZNPzj2c1WFVqbdr08pz3U,1057 -pip/_vendor/requests/sessions.py,sha256=Cl1dpEnOfwrzzPbku-emepNeN4Rt_0_58Iy2x-JGTm8,30503 -pip/_vendor/requests/status_codes.py,sha256=iJUAeA25baTdw-6PfD0eF4qhpINDJRJI-yaMqxs4LEI,4322 -pip/_vendor/requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912 -pip/_vendor/requests/utils.py,sha256=WS3wHSQaaEfceu1syiFo5jf4e_CWKUTep_IabOVI_J0,33225 -pip/_vendor/resolvelib/LICENSE,sha256=84j9OMrRMRLB3A9mm76A5_hFQe26-3LzAw0sp2QsPJ0,751 -pip/_vendor/resolvelib/__init__.py,sha256=yoX-d4STvwGGCiQRE5cJC9Cter69SgVgqClxOCvSP7M,541 -pip/_vendor/resolvelib/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/resolvelib/__pycache__/providers.cpython-313.pyc,, -pip/_vendor/resolvelib/__pycache__/reporters.cpython-313.pyc,, -pip/_vendor/resolvelib/__pycache__/structs.cpython-313.pyc,, -pip/_vendor/resolvelib/providers.py,sha256=pIWJbIdJJ9GFtNbtwTH0Ia43Vj6hYCEJj2DOLue15FM,8914 -pip/_vendor/resolvelib/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/resolvelib/reporters.py,sha256=pNJf4nFxLpAeKxlBUi2GEj0a2Ij1nikY0UabTKXesT4,2037 -pip/_vendor/resolvelib/resolvers/__init__.py,sha256=728M3EvmnPbVXS7ExXlv2kMu6b7wEsoPutEfl-uVk_I,640 -pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-313.pyc,, -pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-313.pyc,, -pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-313.pyc,, -pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-313.pyc,, -pip/_vendor/resolvelib/resolvers/abstract.py,sha256=CNeQPnpAudY77nmzOkONSmAgRlzIf06X-X9mvRYODms,1543 -pip/_vendor/resolvelib/resolvers/criterion.py,sha256=lcmZGv5sKHOnFD_RzZwvlGSj19MeA-5rCMpdf2Sgw7Y,1768 -pip/_vendor/resolvelib/resolvers/exceptions.py,sha256=ln_jaQtgLlRUSFY627yiHG2gD7AgaXzRKaElFVh7fDQ,1768 -pip/_vendor/resolvelib/resolvers/resolution.py,sha256=3J_zkW-sD3EY-BlNXjyln__njpyH5n0UZJT6uV7CheA,24212 -pip/_vendor/resolvelib/structs.py,sha256=pu-EJiR2IBITr2SQeNPRa0rXhjlStfmO_GEgAhr3004,6420 -pip/_vendor/rich/LICENSE,sha256=3u18F6QxgVgZCj6iOcyHmlpQJxzruYrnAl9I--WNyhU,1056 -pip/_vendor/rich/__init__.py,sha256=dRxjIL-SbFVY0q3IjSMrfgBTHrm1LZDgLOygVBwiYZc,6090 -pip/_vendor/rich/__main__.py,sha256=e_aVC-tDzarWQW9SuZMuCgBr6ODV_iDNV2Wh2xkxOlw,7896 -pip/_vendor/rich/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/__main__.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_cell_widths.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_emoji_codes.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_emoji_replace.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_export_format.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_extension.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_fileno.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_inspect.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_log_render.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_loop.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_null_file.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_palettes.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_pick.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_ratio.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_spinners.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_stack.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_timer.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_win32_console.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_windows.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_windows_renderer.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_wrap.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/abc.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/align.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/ansi.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/bar.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/box.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/cells.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/color.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/color_triplet.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/columns.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/console.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/constrain.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/containers.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/control.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/default_styles.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/diagnose.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/emoji.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/errors.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/file_proxy.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/filesize.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/highlighter.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/json.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/jupyter.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/layout.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/live.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/live_render.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/logging.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/markup.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/measure.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/padding.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/pager.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/palette.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/panel.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/pretty.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/progress.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/progress_bar.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/prompt.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/protocol.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/region.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/repr.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/rule.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/scope.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/screen.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/segment.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/spinner.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/status.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/style.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/styled.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/syntax.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/table.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/terminal_theme.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/text.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/theme.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/themes.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/traceback.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/tree.cpython-313.pyc,, -pip/_vendor/rich/_cell_widths.py,sha256=fbmeyetEdHjzE_Vx2l1uK7tnPOhMs2X1lJfO3vsKDpA,10209 -pip/_vendor/rich/_emoji_codes.py,sha256=hu1VL9nbVdppJrVoijVshRlcRRe_v3dju3Mmd2sKZdY,140235 -pip/_vendor/rich/_emoji_replace.py,sha256=n-kcetsEUx2ZUmhQrfeMNc-teeGhpuSQ5F8VPBsyvDo,1064 -pip/_vendor/rich/_export_format.py,sha256=RI08pSrm5tBSzPMvnbTqbD9WIalaOoN5d4M1RTmLq1Y,2128 -pip/_vendor/rich/_extension.py,sha256=Xt47QacCKwYruzjDi-gOBq724JReDj9Cm9xUi5fr-34,265 -pip/_vendor/rich/_fileno.py,sha256=HWZxP5C2ajMbHryvAQZseflVfQoGzsKOHzKGsLD8ynQ,799 -pip/_vendor/rich/_inspect.py,sha256=ROT0PLC2GMWialWZkqJIjmYq7INRijQQkoSokWTaAiI,9656 -pip/_vendor/rich/_log_render.py,sha256=1ByI0PA1ZpxZY3CGJOK54hjlq4X-Bz_boIjIqCd8Kns,3225 -pip/_vendor/rich/_loop.py,sha256=hV_6CLdoPm0va22Wpw4zKqM0RYsz3TZxXj0PoS-9eDQ,1236 -pip/_vendor/rich/_null_file.py,sha256=ADGKp1yt-k70FMKV6tnqCqecB-rSJzp-WQsD7LPL-kg,1394 -pip/_vendor/rich/_palettes.py,sha256=cdev1JQKZ0JvlguV9ipHgznTdnvlIzUFDBb0It2PzjI,7063 -pip/_vendor/rich/_pick.py,sha256=evDt8QN4lF5CiwrUIXlOJCntitBCOsI3ZLPEIAVRLJU,423 -pip/_vendor/rich/_ratio.py,sha256=IOtl78sQCYZsmHyxhe45krkb68u9xVz7zFsXVJD-b2Y,5325 -pip/_vendor/rich/_spinners.py,sha256=U2r1_g_1zSjsjiUdAESc2iAMc3i4ri_S8PYP6kQ5z1I,19919 -pip/_vendor/rich/_stack.py,sha256=-C8OK7rxn3sIUdVwxZBBpeHhIzX0eI-VM3MemYfaXm0,351 -pip/_vendor/rich/_timer.py,sha256=zelxbT6oPFZnNrwWPpc1ktUeAT-Vc4fuFcRZLQGLtMI,417 -pip/_vendor/rich/_win32_console.py,sha256=BSaDRIMwBLITn_m0mTRLPqME5q-quGdSMuYMpYeYJwc,22755 -pip/_vendor/rich/_windows.py,sha256=aBwaD_S56SbgopIvayVmpk0Y28uwY2C5Bab1wl3Bp-I,1925 -pip/_vendor/rich/_windows_renderer.py,sha256=t74ZL3xuDCP3nmTp9pH1L5LiI2cakJuQRQleHCJerlk,2783 -pip/_vendor/rich/_wrap.py,sha256=FlSsom5EX0LVkA3KWy34yHnCfLtqX-ZIepXKh-70rpc,3404 -pip/_vendor/rich/abc.py,sha256=ON-E-ZqSSheZ88VrKX2M3PXpFbGEUUZPMa_Af0l-4f0,890 -pip/_vendor/rich/align.py,sha256=dg-7uY0ukMLLlUEsBDRLva22_sQgIJD4BK0dmZHFHug,10324 -pip/_vendor/rich/ansi.py,sha256=Avs1LHbSdcyOvDOdpELZUoULcBiYewY76eNBp6uFBhs,6921 -pip/_vendor/rich/bar.py,sha256=ldbVHOzKJOnflVNuv1xS7g6dLX2E3wMnXkdPbpzJTcs,3263 -pip/_vendor/rich/box.py,sha256=kmavBc_dn73L_g_8vxWSwYJD2uzBXOUFTtJOfpbczcM,10686 -pip/_vendor/rich/cells.py,sha256=KrQkj5-LghCCpJLSNQIyAZjndc4bnEqOEmi5YuZ9UCY,5130 -pip/_vendor/rich/color.py,sha256=3HSULVDj7qQkXUdFWv78JOiSZzfy5y1nkcYhna296V0,18211 -pip/_vendor/rich/color_triplet.py,sha256=3lhQkdJbvWPoLDO-AnYImAWmJvV5dlgYNCVZ97ORaN4,1054 -pip/_vendor/rich/columns.py,sha256=HUX0KcMm9dsKNi11fTbiM_h2iDtl8ySCaVcxlalEzq8,7131 -pip/_vendor/rich/console.py,sha256=t9azZpmRMVU5cphVBZSShNsmBxd2-IAWcTTlhor-E1s,100849 -pip/_vendor/rich/constrain.py,sha256=1VIPuC8AgtKWrcncQrjBdYqA3JVWysu6jZo1rrh7c7Q,1288 -pip/_vendor/rich/containers.py,sha256=c_56TxcedGYqDepHBMTuZdUIijitAQgnox-Qde0Z1qo,5502 -pip/_vendor/rich/control.py,sha256=EUTSUFLQbxY6Zmo_sdM-5Ls323vIHTBfN8TPulqeHUY,6487 -pip/_vendor/rich/default_styles.py,sha256=khQFqqaoDs3bprMqWpHw8nO5UpG2DN6QtuTd6LzZwYc,8257 -pip/_vendor/rich/diagnose.py,sha256=fJl1TItRn19gGwouqTg-8zPUW3YqQBqGltrfPQs1H9w,1025 -pip/_vendor/rich/emoji.py,sha256=Wd4bQubZdSy6-PyrRQNuMHtn2VkljK9uPZPVlu2cmx0,2367 -pip/_vendor/rich/errors.py,sha256=5pP3Kc5d4QJ_c0KFsxrfyhjiPVe7J1zOqSFbFAzcV-Y,642 -pip/_vendor/rich/file_proxy.py,sha256=Tl9THMDZ-Pk5Wm8sI1gGg_U5DhusmxD-FZ0fUbcU0W0,1683 -pip/_vendor/rich/filesize.py,sha256=_iz9lIpRgvW7MNSeCZnLg-HwzbP4GETg543WqD8SFs0,2484 -pip/_vendor/rich/highlighter.py,sha256=G_sn-8DKjM1sEjLG_oc4ovkWmiUpWvj8bXi0yed2LnY,9586 -pip/_vendor/rich/json.py,sha256=vVEoKdawoJRjAFayPwXkMBPLy7RSTs-f44wSQDR2nJ0,5031 -pip/_vendor/rich/jupyter.py,sha256=QyoKoE_8IdCbrtiSHp9TsTSNyTHY0FO5whE7jOTd9UE,3252 -pip/_vendor/rich/layout.py,sha256=ajkSFAtEVv9EFTcFs-w4uZfft7nEXhNzL7ZVdgrT5rI,14004 -pip/_vendor/rich/live.py,sha256=tF3ukAAJZ_N2ZbGclqZ-iwLoIoZ8f0HHUz79jAyJqj8,15180 -pip/_vendor/rich/live_render.py,sha256=It_39YdzrBm8o3LL0kaGorPFg-BfZWAcrBjLjFokbx4,3521 -pip/_vendor/rich/logging.py,sha256=5KaPPSMP9FxcXPBcKM4cGd_zW78PMgf-YbMVnvfSw0o,12468 -pip/_vendor/rich/markup.py,sha256=3euGKP5s41NCQwaSjTnJxus5iZMHjxpIM0W6fCxra38,8451 -pip/_vendor/rich/measure.py,sha256=HmrIJX8sWRTHbgh8MxEay_83VkqNW_70s8aKP5ZcYI8,5305 -pip/_vendor/rich/padding.py,sha256=KVEI3tOwo9sgK1YNSuH__M1_jUWmLZwRVV_KmOtVzyM,4908 -pip/_vendor/rich/pager.py,sha256=SO_ETBFKbg3n_AgOzXm41Sv36YxXAyI3_R-KOY2_uSc,828 -pip/_vendor/rich/palette.py,sha256=lInvR1ODDT2f3UZMfL1grq7dY_pDdKHw4bdUgOGaM4Y,3396 -pip/_vendor/rich/panel.py,sha256=9sQl00hPIqH5G2gALQo4NepFwpP0k9wT-s_gOms5pIc,11157 -pip/_vendor/rich/pretty.py,sha256=gy3S72u4FRg2ytoo7N1ZDWDIvB4unbzd5iUGdgm-8fc,36391 -pip/_vendor/rich/progress.py,sha256=CUc2lkU-X59mVdGfjMCBkZeiGPL3uxdONjhNJF2T7wY,60408 -pip/_vendor/rich/progress_bar.py,sha256=mZTPpJUwcfcdgQCTTz3kyY-fc79ddLwtx6Ghhxfo064,8162 -pip/_vendor/rich/prompt.py,sha256=l0RhQU-0UVTV9e08xW1BbIj0Jq2IXyChX4lC0lFNzt4,12447 -pip/_vendor/rich/protocol.py,sha256=5hHHDDNHckdk8iWH5zEbi-zuIVSF5hbU2jIo47R7lTE,1391 -pip/_vendor/rich/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/rich/region.py,sha256=rNT9xZrVZTYIXZC0NYn41CJQwYNbR-KecPOxTgQvB8Y,166 -pip/_vendor/rich/repr.py,sha256=5MZJZmONgC6kud-QW-_m1okXwL2aR6u6y-pUcUCJz28,4431 -pip/_vendor/rich/rule.py,sha256=0fNaS_aERa3UMRc3T5WMpN_sumtDxfaor2y3of1ftBk,4602 -pip/_vendor/rich/scope.py,sha256=TMUU8qo17thyqQCPqjDLYpg_UU1k5qVd-WwiJvnJVas,2843 -pip/_vendor/rich/screen.py,sha256=YoeReESUhx74grqb0mSSb9lghhysWmFHYhsbMVQjXO8,1591 -pip/_vendor/rich/segment.py,sha256=otnKeKGEV-WRlQVosfJVeFDcDxAKHpvJ_hLzSu5lumM,24743 -pip/_vendor/rich/spinner.py,sha256=onIhpKlljRHppTZasxO8kXgtYyCHUkpSgKglRJ3o51g,4214 -pip/_vendor/rich/status.py,sha256=kkPph3YeAZBo-X-4wPp8gTqZyU466NLwZBA4PZTTewo,4424 -pip/_vendor/rich/style.py,sha256=W9Ccy8Py8lNICtlfcp-ryzMTuQaGxAU3av7-g5fHu0s,26990 -pip/_vendor/rich/styled.py,sha256=eZNnzGrI4ki_54pgY3Oj0T-x3lxdXTYh4_ryDB24wBU,1258 -pip/_vendor/rich/syntax.py,sha256=eDKIRwl--eZ0Lwo2da2RRtfutXGavrJO61Cl5OkS59U,36371 -pip/_vendor/rich/table.py,sha256=ZmT7V7MMCOYKw7TGY9SZLyYDf6JdM-WVf07FdVuVhTI,40049 -pip/_vendor/rich/terminal_theme.py,sha256=1j5-ufJfnvlAo5Qsi_ACZiXDmwMXzqgmFByObT9-yJY,3370 -pip/_vendor/rich/text.py,sha256=AO7JPCz6-gaN1thVLXMBntEmDPVYFgFNG1oM61_sanU,47552 -pip/_vendor/rich/theme.py,sha256=oNyhXhGagtDlbDye3tVu3esWOWk0vNkuxFw-_unlaK0,3771 -pip/_vendor/rich/themes.py,sha256=0xgTLozfabebYtcJtDdC5QkX5IVUEaviqDUJJh4YVFk,102 -pip/_vendor/rich/traceback.py,sha256=c0WmB_L04_UfZbLaoH982_U_s7eosxKMUiAVmDPdRYU,35861 -pip/_vendor/rich/tree.py,sha256=yWnQ6rAvRGJ3qZGqBrxS2SW2TKBTNrP0SdY8QxOFPuw,9451 -pip/_vendor/tomli/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 -pip/_vendor/tomli/__init__.py,sha256=qzEGl8QHhqgQPCuLzfKyPIuH3KKPspf-UVPbZ0ppBD4,314 -pip/_vendor/tomli/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/tomli/__pycache__/_parser.cpython-313.pyc,, -pip/_vendor/tomli/__pycache__/_re.cpython-313.pyc,, -pip/_vendor/tomli/__pycache__/_types.cpython-313.pyc,, -pip/_vendor/tomli/_parser.py,sha256=bO8tUYmnyA2K6m4TnbQbfUqmIFcDv7mG1KuC9gqRVmA,25778 -pip/_vendor/tomli/_re.py,sha256=n8-Io8ZK1U-F6jzlg7Pabc40hLFJsawE2uNLKH9w7iU,3235 -pip/_vendor/tomli/_types.py,sha256=-GTG2VUqkpxwMqzmVO4F7ybKddIbAnuAHXfmWQcTi3Q,254 -pip/_vendor/tomli/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 -pip/_vendor/tomli_w/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 -pip/_vendor/tomli_w/__init__.py,sha256=0F8yDtXx3Uunhm874KrAcP76srsM98y7WyHQwCulZbo,169 -pip/_vendor/tomli_w/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/tomli_w/__pycache__/_writer.cpython-313.pyc,, -pip/_vendor/tomli_w/_writer.py,sha256=dsifFS2xYf1i76mmRyfz9y125xC7Z_HQ845ZKhJsYXs,6961 -pip/_vendor/tomli_w/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 -pip/_vendor/truststore/LICENSE,sha256=M757fo-k_Rmxdg4ajtimaL2rhSyRtpLdQUJLy3Jan8o,1086 -pip/_vendor/truststore/__init__.py,sha256=Bu7kqkmpunhLsj5xCu8gT_25ktoPXcSnwe8VHk1GmJo,1320 -pip/_vendor/truststore/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/truststore/__pycache__/_api.cpython-313.pyc,, -pip/_vendor/truststore/__pycache__/_macos.cpython-313.pyc,, -pip/_vendor/truststore/__pycache__/_openssl.cpython-313.pyc,, -pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-313.pyc,, -pip/_vendor/truststore/__pycache__/_windows.cpython-313.pyc,, -pip/_vendor/truststore/_api.py,sha256=CYJCV5BTfttZYfqY3movdMBE-8az7uhET_LYbKT2Nn4,11413 -pip/_vendor/truststore/_macos.py,sha256=nZlLkOmszUE0g6ryRwBVGY5COzPyudcsiJtDWarM5LQ,20503 -pip/_vendor/truststore/_openssl.py,sha256=zB-SQvJydks7tQ0yIwrP6GD3fQNSSaPiq7zw4yF5T40,2412 -pip/_vendor/truststore/_ssl_constants.py,sha256=NUD4fVKdSD02ri7-db0tnO0VqLP9aHuzmStcW7tAl08,1130 -pip/_vendor/truststore/_windows.py,sha256=rAHyKYD8M7t-bXfG8VgOVa3TpfhVhbt4rZQlO45YuP8,17993 -pip/_vendor/truststore/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/urllib3/LICENSE.txt,sha256=w3vxhuJ8-dvpYZ5V7f486nswCRzrPaY8fay-Dm13kHs,1115 -pip/_vendor/urllib3/__init__.py,sha256=iXLcYiJySn0GNbWOOZDDApgBL1JgP44EZ8i1760S8Mc,3333 -pip/_vendor/urllib3/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/_collections.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/_version.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/connection.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/connectionpool.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/exceptions.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/fields.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/filepost.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/poolmanager.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/request.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/response.cpython-313.pyc,, -pip/_vendor/urllib3/_collections.py,sha256=pyASJJhW7wdOpqJj9QJA8FyGRfr8E8uUUhqUvhF0728,11372 -pip/_vendor/urllib3/_version.py,sha256=t9wGB6ooOTXXgiY66K1m6BZS1CJyXHAU8EoWDTe6Shk,64 -pip/_vendor/urllib3/connection.py,sha256=ttIA909BrbTUzwkqEe_TzZVh4JOOj7g61Ysei2mrwGg,20314 -pip/_vendor/urllib3/connectionpool.py,sha256=e2eiAwNbFNCKxj4bwDKNK-w7HIdSz3OmMxU_TIt-evQ,40408 -pip/_vendor/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957 -pip/_vendor/urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/_securetransport/bindings.py,sha256=4Xk64qIkPBt09A5q-RIFUuDhNc9mXilVapm7WnYnzRw,17632 -pip/_vendor/urllib3/contrib/_securetransport/low_level.py,sha256=B2JBB2_NRP02xK6DCa1Pa9IuxrPwxzDzZbixQkb7U9M,13922 -pip/_vendor/urllib3/contrib/appengine.py,sha256=VR68eAVE137lxTgjBDwCna5UiBZTOKa01Aj_-5BaCz4,11036 -pip/_vendor/urllib3/contrib/ntlmpool.py,sha256=NlfkW7WMdW8ziqudopjHoW299og1BTWi0IeIibquFwk,4528 -pip/_vendor/urllib3/contrib/pyopenssl.py,sha256=hDJh4MhyY_p-oKlFcYcQaVQRDv6GMmBGuW9yjxyeejM,17081 -pip/_vendor/urllib3/contrib/securetransport.py,sha256=Fef1IIUUFHqpevzXiDPbIGkDKchY2FVKeVeLGR1Qq3g,34446 -pip/_vendor/urllib3/contrib/socks.py,sha256=aRi9eWXo9ZEb95XUxef4Z21CFlnnjbEiAo9HOseoMt4,7097 -pip/_vendor/urllib3/exceptions.py,sha256=0Mnno3KHTNfXRfY7638NufOPkUb6mXOm-Lqj-4x2w8A,8217 -pip/_vendor/urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579 -pip/_vendor/urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440 -pip/_vendor/urllib3/packages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/urllib3/packages/__pycache__/six.cpython-313.pyc,, -pip/_vendor/urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-313.pyc,, -pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-313.pyc,, -pip/_vendor/urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417 -pip/_vendor/urllib3/packages/backports/weakref_finalize.py,sha256=tRCal5OAhNSRyb0DhHp-38AtIlCsRP8BxF3NX-6rqIA,5343 -pip/_vendor/urllib3/packages/six.py,sha256=b9LM0wBXv7E7SrbCjAm4wwN-hrH-iNxv18LgWNMMKPo,34665 -pip/_vendor/urllib3/poolmanager.py,sha256=aWyhXRtNO4JUnCSVVqKTKQd8EXTvUm1VN9pgs2bcONo,19990 -pip/_vendor/urllib3/request.py,sha256=YTWFNr7QIwh7E1W9dde9LM77v2VWTJ5V78XuTTw7D1A,6691 -pip/_vendor/urllib3/response.py,sha256=fmDJAFkG71uFTn-sVSTh2Iw0WmcXQYqkbRjihvwBjU8,30641 -pip/_vendor/urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155 -pip/_vendor/urllib3/util/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/connection.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/proxy.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/queue.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/request.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/response.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/retry.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/timeout.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/url.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/wait.cpython-313.pyc,, -pip/_vendor/urllib3/util/connection.py,sha256=5Lx2B1PW29KxBn2T0xkN1CBgRBa3gGVJBKoQoRogEVk,4901 -pip/_vendor/urllib3/util/proxy.py,sha256=zUvPPCJrp6dOF0N4GAVbOcl6o-4uXKSrGiTkkr5vUS4,1605 -pip/_vendor/urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498 -pip/_vendor/urllib3/util/request.py,sha256=C0OUt2tcU6LRiQJ7YYNP9GvPrSvl7ziIBekQ-5nlBZk,3997 -pip/_vendor/urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510 -pip/_vendor/urllib3/util/retry.py,sha256=6ENvOZ8PBDzh8kgixpql9lIrb2dxH-k7ZmBanJF2Ng4,22050 -pip/_vendor/urllib3/util/ssl_.py,sha256=QDuuTxPSCj1rYtZ4xpD7Ux-r20TD50aHyqKyhQ7Bq4A,17460 -pip/_vendor/urllib3/util/ssl_match_hostname.py,sha256=Ir4cZVEjmAk8gUAIHWSi7wtOO83UCYABY2xFD1Ql_WA,5758 -pip/_vendor/urllib3/util/ssltransport.py,sha256=NA-u5rMTrDFDFC8QzRKUEKMG0561hOD4qBTr3Z4pv6E,6895 -pip/_vendor/urllib3/util/timeout.py,sha256=cwq4dMk87mJHSBktK1miYJ-85G-3T3RmT20v7SFCpno,10168 -pip/_vendor/urllib3/util/url.py,sha256=lCAE7M5myA8EDdW0sJuyyZhVB9K_j38ljWhHAnFaWoE,14296 -pip/_vendor/urllib3/util/wait.py,sha256=fOX0_faozG2P7iVojQoE1mbydweNyTcm-hXEfFrTtLI,5403 -pip/_vendor/vendor.txt,sha256=f2msFLZ-chXWIZSKW31NLGyMWmt_-Vfy7sY5dHYgmnw,342 -pip/py.typed,sha256=EBVvvPRTn_eIpz5e5QztSCdrMX7Qwd7VP93RSoIlZ2I,286 diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index afbe95d0..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/__pycache__/__main__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index d2031eb1..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/__pycache__/__pip-runner__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/__pycache__/__pip-runner__.cpython-313.pyc deleted file mode 100644 index 803eec91..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/__pycache__/__pip-runner__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 66acefd5..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-313.pyc deleted file mode 100644 index 52477dfd..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-313.pyc deleted file mode 100644 index 25da598c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-313.pyc deleted file mode 100644 index 5d44dcfd..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-313.pyc deleted file mode 100644 index 71912379..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/main.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/main.cpython-313.pyc deleted file mode 100644 index 76669041..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/main.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-313.pyc deleted file mode 100644 index 7870f63f..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-313.pyc deleted file mode 100644 index c5706505..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-313.pyc deleted file mode 100644 index d4d4cc79..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 8d8cc9a1..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-313.pyc deleted file mode 100644 index e3f8d406..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-313.pyc deleted file mode 100644 index 677cb898..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-313.pyc deleted file mode 100644 index dcef927c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-313.pyc deleted file mode 100644 index b47bdecd..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-313.pyc deleted file mode 100644 index 543a030f..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-313.pyc deleted file mode 100644 index 5aef722c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-313.pyc deleted file mode 100644 index 61a900a5..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-313.pyc deleted file mode 100644 index 815dc5b3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-313.pyc deleted file mode 100644 index d69a8919..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-313.pyc deleted file mode 100644 index 15b8c92e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-313.pyc deleted file mode 100644 index 8627fe73..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-313.pyc deleted file mode 100644 index 0c35826b..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 19249046..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-313.pyc deleted file mode 100644 index b6a91b44..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-313.pyc deleted file mode 100644 index a814a848..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-313.pyc deleted file mode 100644 index b980b174..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-313.pyc deleted file mode 100644 index ab820b3d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-313.pyc deleted file mode 100644 index 9638fc63..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-313.pyc deleted file mode 100644 index 4ca127b8..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-313.pyc deleted file mode 100644 index f21f0fdb..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-313.pyc deleted file mode 100644 index 47767eda..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-313.pyc deleted file mode 100644 index 135fbb17..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/index.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/index.cpython-313.pyc deleted file mode 100644 index 6b1336e9..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/index.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-313.pyc deleted file mode 100644 index 8097f1bd..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-313.pyc deleted file mode 100644 index 0793df1d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-313.pyc deleted file mode 100644 index 05667ed3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/lock.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/lock.cpython-313.pyc deleted file mode 100644 index 8e2f4c34..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/lock.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-313.pyc deleted file mode 100644 index f5fe518c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-313.pyc deleted file mode 100644 index 5b3dd6dd..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-313.pyc deleted file mode 100644 index af84f33e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-313.pyc deleted file mode 100644 index d1b6b085..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 8572cd02..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-313.pyc deleted file mode 100644 index 96e4ac4c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-313.pyc deleted file mode 100644 index 8c356aac..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-313.pyc deleted file mode 100644 index 63037d4d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-313.pyc deleted file mode 100644 index 94e003c2..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 0259a2ab..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-313.pyc deleted file mode 100644 index 8b268cd1..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-313.pyc deleted file mode 100644 index fe01da1c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-313.pyc deleted file mode 100644 index c630e21f..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 8f3c93b6..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-313.pyc deleted file mode 100644 index b3ae457d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-313.pyc deleted file mode 100644 index f5eafb33..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-313.pyc deleted file mode 100644 index 12ede3ee..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 1354ce90..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-313.pyc deleted file mode 100644 index 290f857c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-313.pyc deleted file mode 100644 index 71c3d029..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-313.pyc deleted file mode 100644 index ea8878d1..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index c4e5372f..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-313.pyc deleted file mode 100644 index 38eb38b5..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-313.pyc deleted file mode 100644 index b29ab228..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-313.pyc deleted file mode 100644 index 7b00ac64..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 491dd945..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-313.pyc deleted file mode 100644 index f6e1736c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-313.pyc deleted file mode 100644 index c88955a3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-313.pyc deleted file mode 100644 index 4001a35c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-313.pyc deleted file mode 100644 index eb8f17ac..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-313.pyc deleted file mode 100644 index 6044fdec..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-313.pyc deleted file mode 100644 index 50f6c5f4..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/release_control.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/release_control.cpython-313.pyc deleted file mode 100644 index 80dae106..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/release_control.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-313.pyc deleted file mode 100644 index 34861b61..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-313.pyc deleted file mode 100644 index 4ddaaee6..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-313.pyc deleted file mode 100644 index fd99a948..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-313.pyc deleted file mode 100644 index 98ab1b49..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-313.pyc deleted file mode 100644 index aafba18e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index cf002252..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-313.pyc deleted file mode 100644 index fb83c171..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-313.pyc deleted file mode 100644 index 7f8bf36c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-313.pyc deleted file mode 100644 index cc5113e1..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-313.pyc deleted file mode 100644 index a9e62436..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-313.pyc deleted file mode 100644 index bfdcb126..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-313.pyc deleted file mode 100644 index 9509cd8e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-313.pyc deleted file mode 100644 index c6a680e3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 06aa0e01..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-313.pyc deleted file mode 100644 index 557f8739..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-313.pyc deleted file mode 100644 index 87b37729..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-313.pyc deleted file mode 100644 index fde535f9..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 83e4faaa..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-313.pyc deleted file mode 100644 index b9647ba5..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-313.pyc deleted file mode 100644 index 994167f4..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-313.pyc deleted file mode 100644 index 2a2e1401..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-313.pyc deleted file mode 100644 index b612a4a0..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-313.pyc deleted file mode 100644 index f515416f..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 056653b2..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-313.pyc deleted file mode 100644 index 0a33125c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index e35e6013..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-313.pyc deleted file mode 100644 index d05dd4b3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/pep723.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/pep723.cpython-313.pyc deleted file mode 100644 index 5ad7fd2e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/pep723.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-313.pyc deleted file mode 100644 index 6579edd3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-313.pyc deleted file mode 100644 index 99760bda..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-313.pyc deleted file mode 100644 index 465a3a0d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-313.pyc deleted file mode 100644 index 4ca27be0..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-313.pyc deleted file mode 100644 index e806712e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index de18c5b3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-313.pyc deleted file mode 100644 index 9b367701..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index a8a96627..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-313.pyc deleted file mode 100644 index 2dc0765d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index c41e2523..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-313.pyc deleted file mode 100644 index f7a5500d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-313.pyc deleted file mode 100644 index da154f13..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-313.pyc deleted file mode 100644 index 50ea7a61..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-313.pyc deleted file mode 100644 index dffc5755..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-313.pyc deleted file mode 100644 index a8835311..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-313.pyc deleted file mode 100644 index d5126600..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-313.pyc deleted file mode 100644 index 27686108..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-313.pyc deleted file mode 100644 index da4235f6..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 3d345aa4..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-313.pyc deleted file mode 100644 index 44972930..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_log.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_log.cpython-313.pyc deleted file mode 100644 index 91c0770e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_log.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-313.pyc deleted file mode 100644 index 64ef8961..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-313.pyc deleted file mode 100644 index 10a37a15..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-313.pyc deleted file mode 100644 index ed5a8a29..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-313.pyc deleted file mode 100644 index 822b079f..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-313.pyc deleted file mode 100644 index ae744eb1..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-313.pyc deleted file mode 100644 index d4da969b..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-313.pyc deleted file mode 100644 index e531776e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-313.pyc deleted file mode 100644 index bd2d494f..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-313.pyc deleted file mode 100644 index d42a67fb..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-313.pyc deleted file mode 100644 index 40e469e9..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-313.pyc deleted file mode 100644 index 58adb0f8..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-313.pyc deleted file mode 100644 index 0c595180..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-313.pyc deleted file mode 100644 index ba27e4bd..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-313.pyc deleted file mode 100644 index 8fb5401f..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-313.pyc deleted file mode 100644 index f09ed1fd..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/pylock.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/pylock.cpython-313.pyc deleted file mode 100644 index 8a8c6691..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/pylock.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/retry.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/retry.cpython-313.pyc deleted file mode 100644 index 42b10b3a..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/retry.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-313.pyc deleted file mode 100644 index b4db0d5b..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-313.pyc deleted file mode 100644 index c334e199..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-313.pyc deleted file mode 100644 index 4f377f37..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-313.pyc deleted file mode 100644 index 5c0c359c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-313.pyc deleted file mode 100644 index 2b3f2fae..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-313.pyc deleted file mode 100644 index c28e9917..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index da4cfbc2..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-313.pyc deleted file mode 100644 index 9d4f10f5..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-313.pyc deleted file mode 100644 index 491c5eac..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-313.pyc deleted file mode 100644 index 7e49ea14..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-313.pyc deleted file mode 100644 index 389cd1b2..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-313.pyc deleted file mode 100644 index 443ed63e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index f4834c45..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index b49cd9e9..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-313.pyc deleted file mode 100644 index 2c21aba1..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-313.pyc deleted file mode 100644 index 2fe6164d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-313.pyc deleted file mode 100644 index 55567b58..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-313.pyc deleted file mode 100644 index 642b0ad3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-313.pyc deleted file mode 100644 index 2ca63be3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-313.pyc deleted file mode 100644 index 4cac3c5a..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-313.pyc deleted file mode 100644 index c65b7dc3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-313.pyc deleted file mode 100644 index 788b1d47..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 322d58e9..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-313.pyc deleted file mode 100644 index 6fd07d65..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-313.pyc deleted file mode 100644 index 1469f6b9..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index d19c4c30..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index c4bea28a..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-313.pyc deleted file mode 100644 index e706d5a3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index cee6fabd..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index 24481800..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-313.pyc deleted file mode 100644 index 7cfe1151..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-313.pyc deleted file mode 100644 index d75b3ed2..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-313.pyc deleted file mode 100644 index 025281be..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-313.pyc deleted file mode 100644 index b8b0f4bb..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 2e0b4f91..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-313.pyc deleted file mode 100644 index 229dc04c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-313.pyc deleted file mode 100644 index dcc8534b..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-313.pyc deleted file mode 100644 index 8123bf60..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-313.pyc deleted file mode 100644 index 7e4eb887..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 197c6bb3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index 451d4a1c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-313.pyc deleted file mode 100644 index 1c8923b7..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index aad03470..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-313.pyc deleted file mode 100644 index 32b98f93..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-313.pyc deleted file mode 100644 index 9a8677f8..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-313.pyc deleted file mode 100644 index 20394dc3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-313.pyc deleted file mode 100644 index a9c01737..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-313.pyc deleted file mode 100644 index 3b6c39b9..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-313.pyc deleted file mode 100644 index a19f93be..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-313.pyc deleted file mode 100644 index cef25b4a..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index dfecaa17..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-313.pyc deleted file mode 100644 index 3af8043b..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-313.pyc deleted file mode 100644 index 782254a0..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-313.pyc deleted file mode 100644 index 19217209..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 613c1d19..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-313.pyc deleted file mode 100644 index 7613706c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-313.pyc deleted file mode 100644 index f1f09bfc..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-313.pyc deleted file mode 100644 index 7192c295..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-313.pyc deleted file mode 100644 index 9b34a8d3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-313.pyc deleted file mode 100644 index 7a2c8b9a..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-313.pyc deleted file mode 100644 index 6f1b83dc..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-313.pyc deleted file mode 100644 index 0d3fab03..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-313.pyc deleted file mode 100644 index 8dfb7df6..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/pylock.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/pylock.cpython-313.pyc deleted file mode 100644 index 6ec28813..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/pylock.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-313.pyc deleted file mode 100644 index 287bf376..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-313.pyc deleted file mode 100644 index a83034e3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-313.pyc deleted file mode 100644 index be88923f..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-313.pyc deleted file mode 100644 index 1947a233..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-313.pyc deleted file mode 100644 index 71ba4c79..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 62bb0a2e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-313.pyc deleted file mode 100644 index 6a483752..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 0a0b8310..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 91151120..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index a300094b..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-313.pyc deleted file mode 100644 index 959c2aef..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-313.pyc deleted file mode 100644 index 463e1682..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-313.pyc deleted file mode 100644 index d2f8de69..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-313.pyc deleted file mode 100644 index 288ba203..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-313.pyc deleted file mode 100644 index d2c3d58e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-313.pyc deleted file mode 100644 index 52e76b49..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index f86c42bc..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index 89f9458d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-313.pyc deleted file mode 100644 index 1aced98d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-313.pyc deleted file mode 100644 index c37d3183..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-313.pyc deleted file mode 100644 index 76f0ec72..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-313.pyc deleted file mode 100644 index ceb86001..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-313.pyc deleted file mode 100644 index a66d0e2c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-313.pyc deleted file mode 100644 index e518ba48..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-313.pyc deleted file mode 100644 index b13597df..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-313.pyc deleted file mode 100644 index eb7d89b4..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-313.pyc deleted file mode 100644 index dcc6c9f8..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-313.pyc deleted file mode 100644 index 26c152ae..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-313.pyc deleted file mode 100644 index 8484ba3c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-313.pyc deleted file mode 100644 index 5d3267d9..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-313.pyc deleted file mode 100644 index b44789d1..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index c641de07..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 741fc1fe..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-313.pyc deleted file mode 100644 index d2e13e43..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 10a3339a..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-313.pyc deleted file mode 100644 index d15681d2..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-313.pyc deleted file mode 100644 index e3c20c8a..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index e0599391..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-313.pyc deleted file mode 100644 index 90fb79ca..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 33b284f5..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-313.pyc deleted file mode 100644 index 191dd309..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index d624c6f5..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-313.pyc deleted file mode 100644 index b4fe487d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index bd0c694b..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-313.pyc deleted file mode 100644 index df697582..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-313.pyc deleted file mode 100644 index 9c451f5b..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-313.pyc deleted file mode 100644 index 71e55d4e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-313.pyc deleted file mode 100644 index 41b2f936..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-313.pyc deleted file mode 100644 index 0994f3c9..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-313.pyc deleted file mode 100644 index bf80ba5c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-313.pyc deleted file mode 100644 index ddffd101..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-313.pyc deleted file mode 100644 index e0f148ae..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-313.pyc deleted file mode 100644 index 852d2392..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-313.pyc deleted file mode 100644 index d165f190..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-313.pyc deleted file mode 100644 index 1fc3097f..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-313.pyc deleted file mode 100644 index e86b9efc..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-313.pyc deleted file mode 100644 index c1cb392b..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-313.pyc deleted file mode 100644 index b88a2bac..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-313.pyc deleted file mode 100644 index 9d366f53..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-313.pyc deleted file mode 100644 index 99006c0a..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-313.pyc deleted file mode 100644 index c350347e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index b743bc36..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-313.pyc deleted file mode 100644 index 3c8f650d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-313.pyc deleted file mode 100644 index 37020912..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-313.pyc deleted file mode 100644 index e11939c5..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index a224e838..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-313.pyc deleted file mode 100644 index 0e6ee701..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-313.pyc deleted file mode 100644 index fa8556ef..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-313.pyc deleted file mode 100644 index 58c566ce..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-313.pyc deleted file mode 100644 index d4431424..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 3fee320c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index e9d731b0..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-313.pyc deleted file mode 100644 index a60fe0cc..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-313.pyc deleted file mode 100644 index 12f3225c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-313.pyc deleted file mode 100644 index 818a20a4..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-313.pyc deleted file mode 100644 index eed54037..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-313.pyc deleted file mode 100644 index a3e79ec9..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-313.pyc deleted file mode 100644 index 30ab47c2..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-313.pyc deleted file mode 100644 index 21e93d97..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-313.pyc deleted file mode 100644 index 91d8dd81..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-313.pyc deleted file mode 100644 index e9be1a1e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-313.pyc deleted file mode 100644 index dac623a1..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-313.pyc deleted file mode 100644 index e3e0be1e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-313.pyc deleted file mode 100644 index 8b0c5df5..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-313.pyc deleted file mode 100644 index 80c05522..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-313.pyc deleted file mode 100644 index 2f0c5c76..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-313.pyc deleted file mode 100644 index cb316998..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-313.pyc deleted file mode 100644 index 19dbd390..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-313.pyc deleted file mode 100644 index a04c7bd5..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-313.pyc deleted file mode 100644 index 901ae638..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-313.pyc deleted file mode 100644 index 521e7903..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-313.pyc deleted file mode 100644 index 9ff67c08..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-313.pyc deleted file mode 100644 index a69b02f0..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/align.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/align.cpython-313.pyc deleted file mode 100644 index 58b0474e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/align.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-313.pyc deleted file mode 100644 index b96de0eb..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-313.pyc deleted file mode 100644 index 51429b96..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/box.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/box.cpython-313.pyc deleted file mode 100644 index 83c5fc56..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/box.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-313.pyc deleted file mode 100644 index 7b7b0133..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color.cpython-313.pyc deleted file mode 100644 index dd03b529..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-313.pyc deleted file mode 100644 index 339ecbab..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-313.pyc deleted file mode 100644 index df4cad42..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/console.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/console.cpython-313.pyc deleted file mode 100644 index 7d0b7269..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/console.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-313.pyc deleted file mode 100644 index b2643830..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-313.pyc deleted file mode 100644 index b42fb7c1..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/control.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/control.cpython-313.pyc deleted file mode 100644 index a2274ce4..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/control.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-313.pyc deleted file mode 100644 index aba63270..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-313.pyc deleted file mode 100644 index 9ecd861e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-313.pyc deleted file mode 100644 index 9b6f5644..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-313.pyc deleted file mode 100644 index 8a937190..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-313.pyc deleted file mode 100644 index be2e765c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-313.pyc deleted file mode 100644 index 5ac298f2..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-313.pyc deleted file mode 100644 index 21e61352..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/json.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/json.cpython-313.pyc deleted file mode 100644 index e19f3016..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/json.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-313.pyc deleted file mode 100644 index 3b9a51fd..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-313.pyc deleted file mode 100644 index d1d2c08e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live.cpython-313.pyc deleted file mode 100644 index 324418cf..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-313.pyc deleted file mode 100644 index 75e5c96d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-313.pyc deleted file mode 100644 index 3a01fc84..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-313.pyc deleted file mode 100644 index 2a1616d2..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-313.pyc deleted file mode 100644 index 4b1a7885..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-313.pyc deleted file mode 100644 index 4818eb2d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-313.pyc deleted file mode 100644 index 09e93f13..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-313.pyc deleted file mode 100644 index 4d5c40bb..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-313.pyc deleted file mode 100644 index 23941da1..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-313.pyc deleted file mode 100644 index 9e9f9970..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-313.pyc deleted file mode 100644 index 2f8c7458..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-313.pyc deleted file mode 100644 index fe754266..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/prompt.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/prompt.cpython-313.pyc deleted file mode 100644 index 2eb8aad3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/prompt.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-313.pyc deleted file mode 100644 index b67b20b9..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/region.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/region.cpython-313.pyc deleted file mode 100644 index 931b4d56..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/region.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-313.pyc deleted file mode 100644 index 70f6fe36..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/rule.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/rule.cpython-313.pyc deleted file mode 100644 index df7c537d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/rule.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-313.pyc deleted file mode 100644 index d4c42eae..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-313.pyc deleted file mode 100644 index a6c302ed..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-313.pyc deleted file mode 100644 index 4d34d77e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-313.pyc deleted file mode 100644 index eaa03e29..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/status.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/status.cpython-313.pyc deleted file mode 100644 index cc9297a5..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/status.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/style.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/style.cpython-313.pyc deleted file mode 100644 index 1429cd1d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/style.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-313.pyc deleted file mode 100644 index b3e49065..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-313.pyc deleted file mode 100644 index c6e5d78c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/table.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/table.cpython-313.pyc deleted file mode 100644 index 736b2d1e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/table.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-313.pyc deleted file mode 100644 index 89de877c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/text.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/text.cpython-313.pyc deleted file mode 100644 index f0f7d606..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/text.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-313.pyc deleted file mode 100644 index 60161179..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-313.pyc deleted file mode 100644 index 563d8fed..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-313.pyc deleted file mode 100644 index 0a430c0c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/tree.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/tree.cpython-313.pyc deleted file mode 100644 index bb0baef8..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/tree.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 2745ae93..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_parser.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_parser.cpython-313.pyc deleted file mode 100644 index 35d47ad2..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_parser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_re.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_re.cpython-313.pyc deleted file mode 100644 index 2979fa06..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_re.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_types.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_types.cpython-313.pyc deleted file mode 100644 index 3c59cba8..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_types.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index ab2fbc20..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/_writer.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/_writer.cpython-313.pyc deleted file mode 100644 index fb28f5f8..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/_writer.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 9006bb03..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_api.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_api.cpython-313.pyc deleted file mode 100644 index b0aa55e8..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_api.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_macos.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_macos.cpython-313.pyc deleted file mode 100644 index cac09454..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_macos.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_openssl.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_openssl.cpython-313.pyc deleted file mode 100644 index d7f1084d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_openssl.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-313.pyc deleted file mode 100644 index b9b1b125..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_windows.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_windows.cpython-313.pyc deleted file mode 100644 index 7a8a79d7..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_windows.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 2ca3d4db..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-313.pyc deleted file mode 100644 index ae1162a4..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-313.pyc deleted file mode 100644 index 4efcd69c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-313.pyc deleted file mode 100644 index aa555eae..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-313.pyc deleted file mode 100644 index fba5cdfb..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-313.pyc deleted file mode 100644 index dab94f5d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-313.pyc deleted file mode 100644 index 22e85dd4..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-313.pyc deleted file mode 100644 index 61d59e9e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-313.pyc deleted file mode 100644 index b6f68496..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-313.pyc deleted file mode 100644 index 78f56b88..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-313.pyc deleted file mode 100644 index cf73967d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 3bb0f117..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-313.pyc deleted file mode 100644 index e415f905..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-313.pyc deleted file mode 100644 index ff820c9f..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-313.pyc deleted file mode 100644 index 6a1b1ff6..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-313.pyc deleted file mode 100644 index a6eb0fb6..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-313.pyc deleted file mode 100644 index d83b5b16..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-313.pyc deleted file mode 100644 index b0e7f706..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 64a8e49f..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-313.pyc deleted file mode 100644 index dd4f3561..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-313.pyc deleted file mode 100644 index 66bc7736..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 0c57a7e6..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-313.pyc deleted file mode 100644 index a3a6e40a..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index e31d55c1..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-313.pyc deleted file mode 100644 index 67574342..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-313.pyc deleted file mode 100644 index 4774b1c8..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 0542fec7..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-313.pyc deleted file mode 100644 index 702b85fd..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-313.pyc deleted file mode 100644 index 80f430a0..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-313.pyc deleted file mode 100644 index 0a626113..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-313.pyc deleted file mode 100644 index 57a60ad3..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-313.pyc deleted file mode 100644 index b4595aa8..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-313.pyc deleted file mode 100644 index d8c65efe..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-313.pyc deleted file mode 100644 index 693b8953..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-313.pyc deleted file mode 100644 index c70021b0..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-313.pyc deleted file mode 100644 index e26811ce..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-313.pyc deleted file mode 100644 index b76e518d..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-313.pyc deleted file mode 100644 index f1e6528e..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-313.pyc b/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-313.pyc deleted file mode 100644 index 07edbd6c..00000000 Binary files a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/site.py b/Python313_13_x64_Template/Lib/site.py deleted file mode 100644 index 041dca11..00000000 --- a/Python313_13_x64_Template/Lib/site.py +++ /dev/null @@ -1,773 +0,0 @@ -"""Append module search paths for third-party packages to sys.path. - -**************************************************************** -* This module is automatically imported during initialization. * -**************************************************************** - -This will append site-specific paths to the module search path. On -Unix (including Mac OSX), it starts with sys.prefix and -sys.exec_prefix (if different) and appends -lib/python/site-packages. -On other platforms (such as Windows), it tries each of the -prefixes directly, as well as with lib/site-packages appended. The -resulting directories, if they exist, are appended to sys.path, and -also inspected for path configuration files. - -If a file named "pyvenv.cfg" exists one directory above sys.executable, -sys.prefix and sys.exec_prefix are set to that directory and -it is also checked for site-packages (sys.base_prefix and -sys.base_exec_prefix will always be the "real" prefixes of the Python -installation). If "pyvenv.cfg" (a bootstrap configuration file) contains -the key "include-system-site-packages" set to anything other than "false" -(case-insensitive), the system-level prefixes will still also be -searched for site-packages; otherwise they won't. - -All of the resulting site-specific directories, if they exist, are -appended to sys.path, and also inspected for path configuration -files. - -A path configuration file is a file whose name has the form -.pth; its contents are additional directories (one per line) -to be added to sys.path. Non-existing directories (or -non-directories) are never added to sys.path; no directory is added to -sys.path more than once. Blank lines and lines beginning with -'#' are skipped. Lines starting with 'import' are executed. - -For example, suppose sys.prefix and sys.exec_prefix are set to -/usr/local and there is a directory /usr/local/lib/python2.5/site-packages -with three subdirectories, foo, bar and spam, and two path -configuration files, foo.pth and bar.pth. Assume foo.pth contains the -following: - - # foo package configuration - foo - bar - bletch - -and bar.pth contains: - - # bar package configuration - bar - -Then the following directories are added to sys.path, in this order: - - /usr/local/lib/python2.5/site-packages/bar - /usr/local/lib/python2.5/site-packages/foo - -Note that bletch is omitted because it doesn't exist; bar precedes foo -because bar.pth comes alphabetically before foo.pth; and spam is -omitted because it is not mentioned in either path configuration file. - -The readline module is also automatically configured to enable -completion for systems that support it. This can be overridden in -sitecustomize, usercustomize or PYTHONSTARTUP. Starting Python in -isolated mode (-I) disables automatic readline configuration. - -After these operations, an attempt is made to import a module -named sitecustomize, which can perform arbitrary additional -site-specific customizations. If this import fails with an -ImportError exception, it is silently ignored. -""" - -import sys -import os -import builtins -import _sitebuiltins -import io -import stat -import errno - -# Prefixes for site-packages; add additional prefixes like /usr/local here -PREFIXES = [sys.prefix, sys.exec_prefix] -# Enable per user site-packages directory -# set it to False to disable the feature or True to force the feature -ENABLE_USER_SITE = None - -# for distutils.commands.install -# These values are initialized by the getuserbase() and getusersitepackages() -# functions, through the main() function when Python starts. -USER_SITE = None -USER_BASE = None - - -def _trace(message): - if sys.flags.verbose: - print(message, file=sys.stderr) - - -def makepath(*paths): - dir = os.path.join(*paths) - try: - dir = os.path.abspath(dir) - except OSError: - pass - return dir, os.path.normcase(dir) - - -def abs_paths(): - """Set all module __file__ and __cached__ attributes to an absolute path""" - for m in set(sys.modules.values()): - loader_module = None - try: - loader_module = m.__loader__.__module__ - except AttributeError: - try: - loader_module = m.__spec__.loader.__module__ - except AttributeError: - pass - if loader_module not in {'_frozen_importlib', '_frozen_importlib_external'}: - continue # don't mess with a PEP 302-supplied __file__ - try: - m.__file__ = os.path.abspath(m.__file__) - except (AttributeError, OSError, TypeError): - pass - try: - m.__cached__ = os.path.abspath(m.__cached__) - except (AttributeError, OSError, TypeError): - pass - - -def removeduppaths(): - """ Remove duplicate entries from sys.path along with making them - absolute""" - # This ensures that the initial path provided by the interpreter contains - # only absolute pathnames, even if we're running from the build directory. - L = [] - known_paths = set() - for dir in sys.path: - # Filter out duplicate paths (on case-insensitive file systems also - # if they only differ in case); turn relative paths into absolute - # paths. - dir, dircase = makepath(dir) - if dircase not in known_paths: - L.append(dir) - known_paths.add(dircase) - sys.path[:] = L - return known_paths - - -def _init_pathinfo(): - """Return a set containing all existing file system items from sys.path.""" - d = set() - for item in sys.path: - try: - if os.path.exists(item): - _, itemcase = makepath(item) - d.add(itemcase) - except TypeError: - continue - return d - - -def addpackage(sitedir, name, known_paths): - """Process a .pth file within the site-packages directory: - For each line in the file, either combine it with sitedir to a path - and add that to known_paths, or execute it if it starts with 'import '. - """ - if known_paths is None: - known_paths = _init_pathinfo() - reset = True - else: - reset = False - fullname = os.path.join(sitedir, name) - try: - st = os.lstat(fullname) - except OSError: - return - if ((getattr(st, 'st_flags', 0) & stat.UF_HIDDEN) or - (getattr(st, 'st_file_attributes', 0) & stat.FILE_ATTRIBUTE_HIDDEN)): - _trace(f"Skipping hidden .pth file: {fullname!r}") - return - _trace(f"Processing .pth file: {fullname!r}") - try: - with io.open_code(fullname) as f: - pth_content = f.read() - except OSError: - return - - try: - # Accept BOM markers in .pth files as we do in source files - # (Windows PowerShell 5.1 makes it hard to emit UTF-8 files without a BOM) - pth_content = pth_content.decode("utf-8-sig") - except UnicodeDecodeError: - # Fallback to locale encoding for backward compatibility. - # We will deprecate this fallback in the future. - import locale - pth_content = pth_content.decode(locale.getencoding()) - _trace(f"Cannot read {fullname!r} as UTF-8. " - f"Using fallback encoding {locale.getencoding()!r}") - - for n, line in enumerate(pth_content.splitlines(), 1): - if line.startswith("#"): - continue - if line.strip() == "": - continue - try: - if line.startswith(("import ", "import\t")): - exec(line) - continue - line = line.rstrip() - dir, dircase = makepath(sitedir, line) - if dircase not in known_paths and os.path.exists(dir): - sys.path.append(dir) - known_paths.add(dircase) - except Exception as exc: - print(f"Error processing line {n:d} of {fullname}:\n", - file=sys.stderr) - import traceback - for record in traceback.format_exception(exc): - for line in record.splitlines(): - print(' '+line, file=sys.stderr) - print("\nRemainder of file ignored", file=sys.stderr) - break - if reset: - known_paths = None - return known_paths - - -def addsitedir(sitedir, known_paths=None): - """Add 'sitedir' argument to sys.path if missing and handle .pth files in - 'sitedir'""" - _trace(f"Adding directory: {sitedir!r}") - if known_paths is None: - known_paths = _init_pathinfo() - reset = True - else: - reset = False - sitedir, sitedircase = makepath(sitedir) - if not sitedircase in known_paths: - sys.path.append(sitedir) # Add path component - known_paths.add(sitedircase) - try: - names = os.listdir(sitedir) - except OSError: - return - names = [name for name in names - if name.endswith(".pth") and not name.startswith(".")] - for name in sorted(names): - addpackage(sitedir, name, known_paths) - if reset: - known_paths = None - return known_paths - - -def check_enableusersite(): - """Check if user site directory is safe for inclusion - - The function tests for the command line flag (including environment var), - process uid/gid equal to effective uid/gid. - - None: Disabled for security reasons - False: Disabled by user (command line option) - True: Safe and enabled - """ - if sys.flags.no_user_site: - return False - - if hasattr(os, "getuid") and hasattr(os, "geteuid"): - # check process uid == effective uid - if os.geteuid() != os.getuid(): - return None - if hasattr(os, "getgid") and hasattr(os, "getegid"): - # check process gid == effective gid - if os.getegid() != os.getgid(): - return None - - return True - - -# NOTE: sysconfig and it's dependencies are relatively large but site module -# needs very limited part of them. -# To speedup startup time, we have copy of them. -# -# See https://bugs.python.org/issue29585 - -# Copy of sysconfig._get_implementation() -def _get_implementation(): - return 'Python' - -# Copy of sysconfig._getuserbase() -def _getuserbase(): - env_base = os.environ.get("PYTHONUSERBASE", None) - if env_base: - return env_base - - # Emscripten, iOS, tvOS, VxWorks, WASI, and watchOS have no home directories - if sys.platform in {"emscripten", "ios", "tvos", "vxworks", "wasi", "watchos"}: - return None - - def joinuser(*args): - return os.path.expanduser(os.path.join(*args)) - - if os.name == "nt": - base = os.environ.get("APPDATA") or "~" - return joinuser(base, _get_implementation()) - - if sys.platform == "darwin" and sys._framework: - return joinuser("~", "Library", sys._framework, - "%d.%d" % sys.version_info[:2]) - - return joinuser("~", ".local") - - -# Same to sysconfig.get_path('purelib', os.name+'_user') -def _get_path(userbase): - version = sys.version_info - if hasattr(sys, 'abiflags') and 't' in sys.abiflags: - abi_thread = 't' - else: - abi_thread = '' - - implementation = _get_implementation() - implementation_lower = implementation.lower() - if os.name == 'nt': - ver_nodot = sys.winver.replace('.', '') - return f'{userbase}\\{implementation}{ver_nodot}\\site-packages' - - if sys.platform == 'darwin' and sys._framework: - return f'{userbase}/lib/{implementation_lower}/site-packages' - - return f'{userbase}/lib/python{version[0]}.{version[1]}{abi_thread}/site-packages' - - -def getuserbase(): - """Returns the `user base` directory path. - - The `user base` directory can be used to store data. If the global - variable ``USER_BASE`` is not initialized yet, this function will also set - it. - """ - global USER_BASE - if USER_BASE is None: - USER_BASE = _getuserbase() - return USER_BASE - - -def getusersitepackages(): - """Returns the user-specific site-packages directory path. - - If the global variable ``USER_SITE`` is not initialized yet, this - function will also set it. - """ - global USER_SITE, ENABLE_USER_SITE - userbase = getuserbase() # this will also set USER_BASE - - if USER_SITE is None: - if userbase is None: - ENABLE_USER_SITE = False # disable user site and return None - else: - USER_SITE = _get_path(userbase) - - return USER_SITE - -def addusersitepackages(known_paths): - """Add a per user site-package to sys.path - - Each user has its own python directory with site-packages in the - home directory. - """ - # get the per user site-package path - # this call will also make sure USER_BASE and USER_SITE are set - _trace("Processing user site-packages") - user_site = getusersitepackages() - - if ENABLE_USER_SITE and os.path.isdir(user_site): - addsitedir(user_site, known_paths) - return known_paths - -def getsitepackages(prefixes=None): - """Returns a list containing all global site-packages directories. - - For each directory present in ``prefixes`` (or the global ``PREFIXES``), - this function will find its `site-packages` subdirectory depending on the - system environment, and will return a list of full paths. - """ - sitepackages = [] - seen = set() - - if prefixes is None: - prefixes = PREFIXES - - for prefix in prefixes: - if not prefix or prefix in seen: - continue - seen.add(prefix) - - implementation = _get_implementation().lower() - ver = sys.version_info - if hasattr(sys, 'abiflags') and 't' in sys.abiflags: - abi_thread = 't' - else: - abi_thread = '' - if os.sep == '/': - libdirs = [sys.platlibdir] - if sys.platlibdir != "lib": - libdirs.append("lib") - - for libdir in libdirs: - path = os.path.join(prefix, libdir, - f"{implementation}{ver[0]}.{ver[1]}{abi_thread}", - "site-packages") - sitepackages.append(path) - else: - sitepackages.append(prefix) - sitepackages.append(os.path.join(prefix, "Lib", "site-packages")) - return sitepackages - -def addsitepackages(known_paths, prefixes=None): - """Add site-packages to sys.path""" - _trace("Processing global site-packages") - for sitedir in getsitepackages(prefixes): - if os.path.isdir(sitedir): - addsitedir(sitedir, known_paths) - - return known_paths - -def setquit(): - """Define new builtins 'quit' and 'exit'. - - These are objects which make the interpreter exit when called. - The repr of each object contains a hint at how it works. - - """ - if os.sep == '\\': - eof = 'Ctrl-Z plus Return' - else: - eof = 'Ctrl-D (i.e. EOF)' - - builtins.quit = _sitebuiltins.Quitter('quit', eof) - builtins.exit = _sitebuiltins.Quitter('exit', eof) - - -def setcopyright(): - """Set 'copyright' and 'credits' in builtins""" - builtins.copyright = _sitebuiltins._Printer("copyright", sys.copyright) - builtins.credits = _sitebuiltins._Printer("credits", """\ -Thanks to CWI, CNRI, BeOpen, Zope Corporation, the Python Software -Foundation, and a cast of thousands for supporting Python -development. See www.python.org for more information.""") - files, dirs = [], [] - # Not all modules are required to have a __file__ attribute. See - # PEP 420 for more details. - here = getattr(sys, '_stdlib_dir', None) - if not here and hasattr(os, '__file__'): - here = os.path.dirname(os.__file__) - if here: - files.extend(["LICENSE.txt", "LICENSE"]) - dirs.extend([os.path.join(here, os.pardir), here, os.curdir]) - builtins.license = _sitebuiltins._Printer( - "license", - "See https://www.python.org/psf/license/", - files, dirs) - - -def sethelper(): - builtins.help = _sitebuiltins._Helper() - - -def gethistoryfile(): - """Check if the PYTHON_HISTORY environment variable is set and define - it as the .python_history file. If PYTHON_HISTORY is not set, use the - default .python_history file. - """ - if not sys.flags.ignore_environment: - history = os.environ.get("PYTHON_HISTORY") - if history: - return history - return os.path.join(os.path.expanduser('~'), - '.python_history') - - -def enablerlcompleter(): - """Enable default readline configuration on interactive prompts, by - registering a sys.__interactivehook__. - """ - sys.__interactivehook__ = register_readline - - -def register_readline(): - """Configure readline completion on interactive prompts. - - If the readline module can be imported, the hook will set the Tab key - as completion key and register ~/.python_history as history file. - This can be overridden in the sitecustomize or usercustomize module, - or in a PYTHONSTARTUP file. - """ - if not sys.flags.ignore_environment: - PYTHON_BASIC_REPL = os.getenv("PYTHON_BASIC_REPL") - else: - PYTHON_BASIC_REPL = False - - import atexit - - try: - try: - import readline - except ImportError: - readline = None - else: - import rlcompleter # noqa: F401 - except ImportError: - return - - try: - if PYTHON_BASIC_REPL: - CAN_USE_PYREPL = False - else: - original_path = sys.path - sys.path = [p for p in original_path if p != ''] - try: - import _pyrepl.readline - if os.name == "nt": - import _pyrepl.windows_console - console_errors = (_pyrepl.windows_console._error,) - else: - import _pyrepl.unix_console - console_errors = _pyrepl.unix_console._error - from _pyrepl.main import CAN_USE_PYREPL - finally: - sys.path = original_path - except ImportError: - return - - if readline is not None: - # Reading the initialization (config) file may not be enough to set a - # completion key, so we set one first and then read the file. - if readline.backend == 'editline': - readline.parse_and_bind('bind ^I rl_complete') - else: - readline.parse_and_bind('tab: complete') - - try: - readline.read_init_file() - except OSError: - # An OSError here could have many causes, but the most likely one - # is that there's no .inputrc file (or .editrc file in the case of - # Mac OS X + libedit) in the expected location. In that case, we - # want to ignore the exception. - pass - - if readline is None or readline.get_current_history_length() == 0: - # If no history was loaded, default to .python_history, - # or PYTHON_HISTORY. - # The guard is necessary to avoid doubling history size at - # each interpreter exit when readline was already configured - # through a PYTHONSTARTUP hook, see: - # http://bugs.python.org/issue5845#msg198636 - history = gethistoryfile() - - if CAN_USE_PYREPL: - readline_module = _pyrepl.readline - exceptions = (OSError, *console_errors) - else: - if readline is None: - return - readline_module = readline - exceptions = OSError - - try: - readline_module.read_history_file(history) - except exceptions: - pass - - def write_history(): - try: - readline_module.write_history_file(history) - except (FileNotFoundError, PermissionError): - # home directory does not exist or is not writable - # https://bugs.python.org/issue19891 - pass - except OSError: - if errno.EROFS: - pass # gh-128066: read-only file system - else: - raise - - atexit.register(write_history) - - -def venv(known_paths): - global PREFIXES, ENABLE_USER_SITE - - env = os.environ - if sys.platform == 'darwin' and '__PYVENV_LAUNCHER__' in env: - executable = sys._base_executable = os.environ['__PYVENV_LAUNCHER__'] - else: - executable = sys.executable - exe_dir = os.path.dirname(os.path.abspath(executable)) - site_prefix = os.path.dirname(exe_dir) - sys._home = None - conf_basename = 'pyvenv.cfg' - candidate_conf = next( - ( - conffile for conffile in ( - os.path.join(exe_dir, conf_basename), - os.path.join(site_prefix, conf_basename) - ) - if os.path.isfile(conffile) - ), - None - ) - - if candidate_conf: - virtual_conf = candidate_conf - system_site = "true" - # Issue 25185: Use UTF-8, as that's what the venv module uses when - # writing the file. - with open(virtual_conf, encoding='utf-8') as f: - for line in f: - if '=' in line: - key, _, value = line.partition('=') - key = key.strip().lower() - value = value.strip() - if key == 'include-system-site-packages': - system_site = value.lower() - elif key == 'home': - sys._home = value - - sys.prefix = sys.exec_prefix = site_prefix - - # Doing this here ensures venv takes precedence over user-site - addsitepackages(known_paths, [sys.prefix]) - - # addsitepackages will process site_prefix again if its in PREFIXES, - # but that's ok; known_paths will prevent anything being added twice - if system_site == "true": - PREFIXES.insert(0, sys.prefix) - else: - PREFIXES = [sys.prefix] - ENABLE_USER_SITE = False - - return known_paths - - -def execsitecustomize(): - """Run custom site specific code, if available.""" - try: - try: - import sitecustomize - except ImportError as exc: - if exc.name == 'sitecustomize': - pass - else: - raise - except Exception as err: - if sys.flags.verbose: - sys.excepthook(*sys.exc_info()) - else: - sys.stderr.write( - "Error in sitecustomize; set PYTHONVERBOSE for traceback:\n" - "%s: %s\n" % - (err.__class__.__name__, err)) - - -def execusercustomize(): - """Run custom user specific code, if available.""" - try: - try: - import usercustomize - except ImportError as exc: - if exc.name == 'usercustomize': - pass - else: - raise - except Exception as err: - if sys.flags.verbose: - sys.excepthook(*sys.exc_info()) - else: - sys.stderr.write( - "Error in usercustomize; set PYTHONVERBOSE for traceback:\n" - "%s: %s\n" % - (err.__class__.__name__, err)) - - -def main(): - """Add standard site-specific directories to the module search path. - - This function is called automatically when this module is imported, - unless the python interpreter was started with the -S flag. - """ - global ENABLE_USER_SITE - - orig_path = sys.path[:] - known_paths = removeduppaths() - if orig_path != sys.path: - # removeduppaths() might make sys.path absolute. - # fix __file__ and __cached__ of already imported modules too. - abs_paths() - - known_paths = venv(known_paths) - if ENABLE_USER_SITE is None: - ENABLE_USER_SITE = check_enableusersite() - known_paths = addusersitepackages(known_paths) - known_paths = addsitepackages(known_paths) - setquit() - setcopyright() - sethelper() - if not sys.flags.isolated: - enablerlcompleter() - execsitecustomize() - if ENABLE_USER_SITE: - execusercustomize() - -# Prevent extending of sys.path when python was started with -S and -# site is imported later. -if not sys.flags.no_site: - main() - -def _script(): - help = """\ - %s [--user-base] [--user-site] - - Without arguments print some useful information - With arguments print the value of USER_BASE and/or USER_SITE separated - by '%s'. - - Exit codes with --user-base or --user-site: - 0 - user site directory is enabled - 1 - user site directory is disabled by user - 2 - user site directory is disabled by super user - or for security reasons - >2 - unknown error - """ - args = sys.argv[1:] - if not args: - user_base = getuserbase() - user_site = getusersitepackages() - print("sys.path = [") - for dir in sys.path: - print(" %r," % (dir,)) - print("]") - def exists(path): - if path is not None and os.path.isdir(path): - return "exists" - else: - return "doesn't exist" - print(f"USER_BASE: {user_base!r} ({exists(user_base)})") - print(f"USER_SITE: {user_site!r} ({exists(user_site)})") - print(f"ENABLE_USER_SITE: {ENABLE_USER_SITE!r}") - sys.exit(0) - - buffer = [] - if '--user-base' in args: - buffer.append(USER_BASE) - if '--user-site' in args: - buffer.append(USER_SITE) - - if buffer: - print(os.pathsep.join(buffer)) - if ENABLE_USER_SITE: - sys.exit(0) - elif ENABLE_USER_SITE is False: - sys.exit(1) - elif ENABLE_USER_SITE is None: - sys.exit(2) - else: - sys.exit(3) - else: - import textwrap - print(textwrap.dedent(help % (sys.argv[0], os.pathsep))) - sys.exit(10) - -if __name__ == '__main__': - _script() diff --git a/Python313_13_x64_Template/Lib/smtplib.py b/Python313_13_x64_Template/Lib/smtplib.py deleted file mode 100644 index 9bedcc5f..00000000 --- a/Python313_13_x64_Template/Lib/smtplib.py +++ /dev/null @@ -1,1123 +0,0 @@ -#! /usr/bin/env python3 - -'''SMTP/ESMTP client class. - -This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP -Authentication) and RFC 2487 (Secure SMTP over TLS). - -Notes: - -Please remember, when doing ESMTP, that the names of the SMTP service -extensions are NOT the same thing as the option keywords for the RCPT -and MAIL commands! - -Example: - - >>> import smtplib - >>> s=smtplib.SMTP("localhost") - >>> print(s.help()) - This is Sendmail version 8.8.4 - Topics: - HELO EHLO MAIL RCPT DATA - RSET NOOP QUIT HELP VRFY - EXPN VERB ETRN DSN - For more info use "HELP ". - To report bugs in the implementation send email to - sendmail-bugs@sendmail.org. - For local information send email to Postmaster at your site. - End of HELP info - >>> s.putcmd("vrfy","someone@here") - >>> s.getreply() - (250, "Somebody OverHere ") - >>> s.quit() -''' - -# Author: The Dragon De Monsyne -# ESMTP support, test code and doc fixes added by -# Eric S. Raymond -# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data) -# by Carey Evans , for picky mail servers. -# RFC 2554 (authentication) support by Gerhard Haering . -# -# This was modified from the Python 1.5 library HTTP lib. - -import socket -import io -import re -import email.utils -import email.message -import email.generator -import base64 -import hmac -import copy -import datetime -import sys -from email.base64mime import body_encode as encode_base64 - -__all__ = ["SMTPException", "SMTPNotSupportedError", "SMTPServerDisconnected", "SMTPResponseException", - "SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError", - "SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError", - "quoteaddr", "quotedata", "SMTP"] - -SMTP_PORT = 25 -SMTP_SSL_PORT = 465 -CRLF = "\r\n" -bCRLF = b"\r\n" -_MAXLINE = 8192 # more than 8 times larger than RFC 821, 4.5.3 -_MAXCHALLENGE = 5 # Maximum number of AUTH challenges sent - -OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I) - -# Exception classes used by this module. -class SMTPException(OSError): - """Base class for all exceptions raised by this module.""" - -class SMTPNotSupportedError(SMTPException): - """The command or option is not supported by the SMTP server. - - This exception is raised when an attempt is made to run a command or a - command with an option which is not supported by the server. - """ - -class SMTPServerDisconnected(SMTPException): - """Not connected to any SMTP server. - - This exception is raised when the server unexpectedly disconnects, - or when an attempt is made to use the SMTP instance before - connecting it to a server. - """ - -class SMTPResponseException(SMTPException): - """Base class for all exceptions that include an SMTP error code. - - These exceptions are generated in some instances when the SMTP - server returns an error code. The error code is stored in the - `smtp_code' attribute of the error, and the `smtp_error' attribute - is set to the error message. - """ - - def __init__(self, code, msg): - self.smtp_code = code - self.smtp_error = msg - self.args = (code, msg) - -class SMTPSenderRefused(SMTPResponseException): - """Sender address refused. - - In addition to the attributes set by on all SMTPResponseException - exceptions, this sets `sender' to the string that the SMTP refused. - """ - - def __init__(self, code, msg, sender): - self.smtp_code = code - self.smtp_error = msg - self.sender = sender - self.args = (code, msg, sender) - -class SMTPRecipientsRefused(SMTPException): - """All recipient addresses refused. - - The errors for each recipient are accessible through the attribute - 'recipients', which is a dictionary of exactly the same sort as - SMTP.sendmail() returns. - """ - - def __init__(self, recipients): - self.recipients = recipients - self.args = (recipients,) - - -class SMTPDataError(SMTPResponseException): - """The SMTP server didn't accept the data.""" - -class SMTPConnectError(SMTPResponseException): - """Error during connection establishment.""" - -class SMTPHeloError(SMTPResponseException): - """The server refused our HELO reply.""" - -class SMTPAuthenticationError(SMTPResponseException): - """Authentication error. - - Most probably the server didn't accept the username/password - combination provided. - """ - -def quoteaddr(addrstring): - """Quote a subset of the email addresses defined by RFC 821. - - Should be able to handle anything email.utils.parseaddr can handle. - """ - displayname, addr = email.utils.parseaddr(addrstring) - if (displayname, addr) == ('', ''): - # parseaddr couldn't parse it, use it as is and hope for the best. - if addrstring.strip().startswith('<'): - return addrstring - return "<%s>" % addrstring - return "<%s>" % addr - -def _addr_only(addrstring): - displayname, addr = email.utils.parseaddr(addrstring) - if (displayname, addr) == ('', ''): - # parseaddr couldn't parse it, so use it as is. - return addrstring - return addr - -# Legacy method kept for backward compatibility. -def quotedata(data): - """Quote data for email. - - Double leading '.', and change Unix newline '\\n', or Mac '\\r' into - internet CRLF end-of-line. - """ - return re.sub(r'(?m)^\.', '..', - re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data)) - -def _quote_periods(bindata): - return re.sub(br'(?m)^\.', b'..', bindata) - -def _fix_eols(data): - return re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data) - - -try: - hmac.digest(b'', b'', 'md5') -except ValueError: - _have_cram_md5_support = False -else: - _have_cram_md5_support = True - - -try: - import ssl -except ImportError: - _have_ssl = False -else: - _have_ssl = True - - -class SMTP: - """This class manages a connection to an SMTP or ESMTP server. - SMTP Objects: - SMTP objects have the following attributes: - helo_resp - This is the message given by the server in response to the - most recent HELO command. - - ehlo_resp - This is the message given by the server in response to the - most recent EHLO command. This is usually multiline. - - does_esmtp - This is a True value _after you do an EHLO command_, if the - server supports ESMTP. - - esmtp_features - This is a dictionary, which, if the server supports ESMTP, - will _after you do an EHLO command_, contain the names of the - SMTP service extensions this server supports, and their - parameters (if any). - - Note, all extension names are mapped to lower case in the - dictionary. - - See each method's docstrings for details. In general, there is a - method of the same name to perform each SMTP command. There is also a - method called 'sendmail' that will do an entire mail transaction. - """ - debuglevel = 0 - - sock = None - file = None - helo_resp = None - ehlo_msg = "ehlo" - ehlo_resp = None - does_esmtp = False - default_port = SMTP_PORT - - def __init__(self, host='', port=0, local_hostname=None, - timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - source_address=None): - """Initialize a new instance. - - If specified, `host` is the name of the remote host to which to - connect. If specified, `port` specifies the port to which to connect. - By default, smtplib.SMTP_PORT is used. If a host is specified the - connect method is called, and if it returns anything other than a - success code an SMTPConnectError is raised. If specified, - `local_hostname` is used as the FQDN of the local host in the HELO/EHLO - command. Otherwise, the local hostname is found using - socket.getfqdn(). The `source_address` parameter takes a 2-tuple (host, - port) for the socket to bind to as its source address before - connecting. If the host is '' and port is 0, the OS default behavior - will be used. - - """ - self._host = host - self.timeout = timeout - self.esmtp_features = {} - self.command_encoding = 'ascii' - self.source_address = source_address - self._auth_challenge_count = 0 - - if host: - (code, msg) = self.connect(host, port) - if code != 220: - self.close() - raise SMTPConnectError(code, msg) - if local_hostname is not None: - self.local_hostname = local_hostname - else: - # RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and - # if that can't be calculated, that we should use a domain literal - # instead (essentially an encoded IP address like [A.B.C.D]). - fqdn = socket.getfqdn() - if '.' in fqdn: - self.local_hostname = fqdn - else: - # We can't find an fqdn hostname, so use a domain literal - addr = '127.0.0.1' - try: - addr = socket.gethostbyname(socket.gethostname()) - except socket.gaierror: - pass - self.local_hostname = '[%s]' % addr - - def __enter__(self): - return self - - def __exit__(self, *args): - try: - code, message = self.docmd("QUIT") - if code != 221: - raise SMTPResponseException(code, message) - except SMTPServerDisconnected: - pass - finally: - self.close() - - def set_debuglevel(self, debuglevel): - """Set the debug output level. - - A non-false value results in debug messages for connection and for all - messages sent to and received from the server. - - """ - self.debuglevel = debuglevel - - def _print_debug(self, *args): - if self.debuglevel > 1: - print(datetime.datetime.now().time(), *args, file=sys.stderr) - else: - print(*args, file=sys.stderr) - - def _get_socket(self, host, port, timeout): - # This makes it simpler for SMTP_SSL to use the SMTP connect code - # and just alter the socket connection bit. - if timeout is not None and not timeout: - raise ValueError('Non-blocking socket (timeout=0) is not supported') - if self.debuglevel > 0: - self._print_debug('connect: to', (host, port), self.source_address) - return socket.create_connection((host, port), timeout, - self.source_address) - - def connect(self, host='localhost', port=0, source_address=None): - """Connect to a host on a given port. - - If the hostname ends with a colon (`:') followed by a number, and - there is no port specified, that suffix will be stripped off and the - number interpreted as the port number to use. - - Note: This method is automatically invoked by __init__, if a host is - specified during instantiation. - - """ - - if source_address: - self.source_address = source_address - - if not port and (host.find(':') == host.rfind(':')): - i = host.rfind(':') - if i >= 0: - host, port = host[:i], host[i + 1:] - try: - port = int(port) - except ValueError: - raise OSError("nonnumeric port") - if not port: - port = self.default_port - sys.audit("smtplib.connect", self, host, port) - self.sock = self._get_socket(host, port, self.timeout) - self.file = None - (code, msg) = self.getreply() - if self.debuglevel > 0: - self._print_debug('connect:', repr(msg)) - return (code, msg) - - def send(self, s): - """Send `s' to the server.""" - if self.debuglevel > 0: - self._print_debug('send:', repr(s)) - if self.sock: - if isinstance(s, str): - # send is used by the 'data' command, where command_encoding - # should not be used, but 'data' needs to convert the string to - # binary itself anyway, so that's not a problem. - s = s.encode(self.command_encoding) - sys.audit("smtplib.send", self, s) - try: - self.sock.sendall(s) - except OSError: - self.close() - raise SMTPServerDisconnected('Server not connected') - else: - raise SMTPServerDisconnected('please run connect() first') - - def putcmd(self, cmd, args=""): - """Send a command to the server.""" - if args == "": - s = cmd - else: - s = f'{cmd} {args}' - if '\r' in s or '\n' in s: - s = s.replace('\n', '\\n').replace('\r', '\\r') - raise ValueError( - f'command and arguments contain prohibited newline characters: {s}' - ) - self.send(f'{s}{CRLF}') - - def getreply(self): - """Get a reply from the server. - - Returns a tuple consisting of: - - - server response code (e.g. '250', or such, if all goes well) - Note: returns -1 if it can't read response code. - - - server response string corresponding to response code (multiline - responses are converted to a single, multiline string). - - Raises SMTPServerDisconnected if end-of-file is reached. - """ - resp = [] - if self.file is None: - self.file = self.sock.makefile('rb') - while 1: - try: - line = self.file.readline(_MAXLINE + 1) - except OSError as e: - self.close() - raise SMTPServerDisconnected("Connection unexpectedly closed: " - + str(e)) - if not line: - self.close() - raise SMTPServerDisconnected("Connection unexpectedly closed") - if self.debuglevel > 0: - self._print_debug('reply:', repr(line)) - if len(line) > _MAXLINE: - self.close() - raise SMTPResponseException(500, "Line too long.") - resp.append(line[4:].strip(b' \t\r\n')) - code = line[:3] - # Check that the error code is syntactically correct. - # Don't attempt to read a continuation line if it is broken. - try: - errcode = int(code) - except ValueError: - errcode = -1 - break - # Check if multiline response. - if line[3:4] != b"-": - break - - errmsg = b"\n".join(resp) - if self.debuglevel > 0: - self._print_debug('reply: retcode (%s); Msg: %a' % (errcode, errmsg)) - return errcode, errmsg - - def docmd(self, cmd, args=""): - """Send a command, and return its response code.""" - self.putcmd(cmd, args) - return self.getreply() - - # std smtp commands - def helo(self, name=''): - """SMTP 'helo' command. - Hostname to send for this command defaults to the FQDN of the local - host. - """ - self.putcmd("helo", name or self.local_hostname) - (code, msg) = self.getreply() - self.helo_resp = msg - return (code, msg) - - def ehlo(self, name=''): - """ SMTP 'ehlo' command. - Hostname to send for this command defaults to the FQDN of the local - host. - """ - self.esmtp_features = {} - self.putcmd(self.ehlo_msg, name or self.local_hostname) - (code, msg) = self.getreply() - # According to RFC1869 some (badly written) - # MTA's will disconnect on an ehlo. Toss an exception if - # that happens -ddm - if code == -1 and len(msg) == 0: - self.close() - raise SMTPServerDisconnected("Server not connected") - self.ehlo_resp = msg - if code != 250: - return (code, msg) - self.does_esmtp = True - #parse the ehlo response -ddm - assert isinstance(self.ehlo_resp, bytes), repr(self.ehlo_resp) - resp = self.ehlo_resp.decode("latin-1").split('\n') - del resp[0] - for each in resp: - # To be able to communicate with as many SMTP servers as possible, - # we have to take the old-style auth advertisement into account, - # because: - # 1) Else our SMTP feature parser gets confused. - # 2) There are some servers that only advertise the auth methods we - # support using the old style. - auth_match = OLDSTYLE_AUTH.match(each) - if auth_match: - # This doesn't remove duplicates, but that's no problem - self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \ - + " " + auth_match.groups(0)[0] - continue - - # RFC 1869 requires a space between ehlo keyword and parameters. - # It's actually stricter, in that only spaces are allowed between - # parameters, but were not going to check for that here. Note - # that the space isn't present if there are no parameters. - m = re.match(r'(?P[A-Za-z0-9][A-Za-z0-9\-]*) ?', each) - if m: - feature = m.group("feature").lower() - params = m.string[m.end("feature"):].strip() - if feature == "auth": - self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \ - + " " + params - else: - self.esmtp_features[feature] = params - return (code, msg) - - def has_extn(self, opt): - """Does the server support a given SMTP service extension?""" - return opt.lower() in self.esmtp_features - - def help(self, args=''): - """SMTP 'help' command. - Returns help text from server.""" - self.putcmd("help", args) - return self.getreply()[1] - - def rset(self): - """SMTP 'rset' command -- resets session.""" - self.command_encoding = 'ascii' - return self.docmd("rset") - - def _rset(self): - """Internal 'rset' command which ignores any SMTPServerDisconnected error. - - Used internally in the library, since the server disconnected error - should appear to the application when the *next* command is issued, if - we are doing an internal "safety" reset. - """ - try: - self.rset() - except SMTPServerDisconnected: - pass - - def noop(self): - """SMTP 'noop' command -- doesn't do anything :>""" - return self.docmd("noop") - - def mail(self, sender, options=()): - """SMTP 'mail' command -- begins mail xfer session. - - This method may raise the following exceptions: - - SMTPNotSupportedError The options parameter includes 'SMTPUTF8' - but the SMTPUTF8 extension is not supported by - the server. - """ - optionlist = '' - if options and self.does_esmtp: - if any(x.lower()=='smtputf8' for x in options): - if self.has_extn('smtputf8'): - self.command_encoding = 'utf-8' - else: - raise SMTPNotSupportedError( - 'SMTPUTF8 not supported by server') - optionlist = ' ' + ' '.join(options) - self.putcmd("mail", "from:%s%s" % (quoteaddr(sender), optionlist)) - return self.getreply() - - def rcpt(self, recip, options=()): - """SMTP 'rcpt' command -- indicates 1 recipient for this mail.""" - optionlist = '' - if options and self.does_esmtp: - optionlist = ' ' + ' '.join(options) - self.putcmd("rcpt", "to:%s%s" % (quoteaddr(recip), optionlist)) - return self.getreply() - - def data(self, msg): - """SMTP 'DATA' command -- sends message data to server. - - Automatically quotes lines beginning with a period per rfc821. - Raises SMTPDataError if there is an unexpected reply to the - DATA command; the return value from this method is the final - response code received when the all data is sent. If msg - is a string, lone '\\r' and '\\n' characters are converted to - '\\r\\n' characters. If msg is bytes, it is transmitted as is. - """ - self.putcmd("data") - (code, repl) = self.getreply() - if self.debuglevel > 0: - self._print_debug('data:', (code, repl)) - if code != 354: - raise SMTPDataError(code, repl) - else: - if isinstance(msg, str): - msg = _fix_eols(msg).encode('ascii') - q = _quote_periods(msg) - if q[-2:] != bCRLF: - q = q + bCRLF - q = q + b"." + bCRLF - self.send(q) - (code, msg) = self.getreply() - if self.debuglevel > 0: - self._print_debug('data:', (code, msg)) - return (code, msg) - - def verify(self, address): - """SMTP 'verify' command -- checks for address validity.""" - self.putcmd("vrfy", _addr_only(address)) - return self.getreply() - # a.k.a. - vrfy = verify - - def expn(self, address): - """SMTP 'expn' command -- expands a mailing list.""" - self.putcmd("expn", _addr_only(address)) - return self.getreply() - - # some useful methods - - def ehlo_or_helo_if_needed(self): - """Call self.ehlo() and/or self.helo() if needed. - - If there has been no previous EHLO or HELO command this session, this - method tries ESMTP EHLO first. - - This method may raise the following exceptions: - - SMTPHeloError The server didn't reply properly to - the helo greeting. - """ - if self.helo_resp is None and self.ehlo_resp is None: - if not (200 <= self.ehlo()[0] <= 299): - (code, resp) = self.helo() - if not (200 <= code <= 299): - raise SMTPHeloError(code, resp) - - def auth(self, mechanism, authobject, *, initial_response_ok=True): - """Authentication command - requires response processing. - - 'mechanism' specifies which authentication mechanism is to - be used - the valid values are those listed in the 'auth' - element of 'esmtp_features'. - - 'authobject' must be a callable object taking a single argument: - - data = authobject(challenge) - - It will be called to process the server's challenge response; the - challenge argument it is passed will be a bytes. It should return - an ASCII string that will be base64 encoded and sent to the server. - - Keyword arguments: - - initial_response_ok: Allow sending the RFC 4954 initial-response - to the AUTH command, if the authentication methods supports it. - """ - # RFC 4954 allows auth methods to provide an initial response. Not all - # methods support it. By definition, if they return something other - # than None when challenge is None, then they do. See issue #15014. - mechanism = mechanism.upper() - initial_response = (authobject() if initial_response_ok else None) - if initial_response is not None: - response = encode_base64(initial_response.encode('ascii'), eol='') - (code, resp) = self.docmd("AUTH", mechanism + " " + response) - self._auth_challenge_count = 1 - else: - (code, resp) = self.docmd("AUTH", mechanism) - self._auth_challenge_count = 0 - # If server responds with a challenge, send the response. - while code == 334: - self._auth_challenge_count += 1 - challenge = base64.decodebytes(resp) - response = encode_base64( - authobject(challenge).encode('ascii'), eol='') - (code, resp) = self.docmd(response) - # If server keeps sending challenges, something is wrong. - if self._auth_challenge_count > _MAXCHALLENGE: - raise SMTPException( - "Server AUTH mechanism infinite loop. Last response: " - + repr((code, resp)) - ) - if code in (235, 503): - return (code, resp) - raise SMTPAuthenticationError(code, resp) - - def auth_cram_md5(self, challenge=None): - """ Authobject to use with CRAM-MD5 authentication. Requires self.user - and self.password to be set.""" - # CRAM-MD5 does not support initial-response. - if challenge is None: - return None - if not _have_cram_md5_support: - raise SMTPException("CRAM-MD5 is not supported") - password = self.password.encode('ascii') - authcode = hmac.HMAC(password, challenge, 'md5') - return f"{self.user} {authcode.hexdigest()}" - - def auth_plain(self, challenge=None): - """ Authobject to use with PLAIN authentication. Requires self.user and - self.password to be set.""" - return "\0%s\0%s" % (self.user, self.password) - - def auth_login(self, challenge=None): - """ Authobject to use with LOGIN authentication. Requires self.user and - self.password to be set.""" - if challenge is None or self._auth_challenge_count < 2: - return self.user - else: - return self.password - - def login(self, user, password, *, initial_response_ok=True): - """Log in on an SMTP server that requires authentication. - - The arguments are: - - user: The user name to authenticate with. - - password: The password for the authentication. - - Keyword arguments: - - initial_response_ok: Allow sending the RFC 4954 initial-response - to the AUTH command, if the authentication methods supports it. - - If there has been no previous EHLO or HELO command this session, this - method tries ESMTP EHLO first. - - This method will return normally if the authentication was successful. - - This method may raise the following exceptions: - - SMTPHeloError The server didn't reply properly to - the helo greeting. - SMTPAuthenticationError The server didn't accept the username/ - password combination. - SMTPNotSupportedError The AUTH command is not supported by the - server. - SMTPException No suitable authentication method was - found. - """ - - self.ehlo_or_helo_if_needed() - if not self.has_extn("auth"): - raise SMTPNotSupportedError( - "SMTP AUTH extension not supported by server.") - - # Authentication methods the server claims to support - advertised_authlist = self.esmtp_features["auth"].split() - - # Authentication methods we can handle in our preferred order: - if _have_cram_md5_support: - preferred_auths = ['CRAM-MD5', 'PLAIN', 'LOGIN'] - else: - preferred_auths = ['PLAIN', 'LOGIN'] - # We try the supported authentications in our preferred order, if - # the server supports them. - authlist = [auth for auth in preferred_auths - if auth in advertised_authlist] - if not authlist: - raise SMTPException("No suitable authentication method found.") - - # Some servers advertise authentication methods they don't really - # support, so if authentication fails, we continue until we've tried - # all methods. - self.user, self.password = user, password - for authmethod in authlist: - method_name = 'auth_' + authmethod.lower().replace('-', '_') - try: - (code, resp) = self.auth( - authmethod, getattr(self, method_name), - initial_response_ok=initial_response_ok) - # 235 == 'Authentication successful' - # 503 == 'Error: already authenticated' - if code in (235, 503): - return (code, resp) - except SMTPAuthenticationError as e: - last_exception = e - - # We could not login successfully. Return result of last attempt. - raise last_exception - - def starttls(self, *, context=None): - """Puts the connection to the SMTP server into TLS mode. - - If there has been no previous EHLO or HELO command this session, this - method tries ESMTP EHLO first. - - If the server supports TLS, this will encrypt the rest of the SMTP - session. If you provide the context parameter, - the identity of the SMTP server and client can be checked. This, - however, depends on whether the socket module really checks the - certificates. - - This method may raise the following exceptions: - - SMTPHeloError The server didn't reply properly to - the helo greeting. - """ - self.ehlo_or_helo_if_needed() - if not self.has_extn("starttls"): - raise SMTPNotSupportedError( - "STARTTLS extension not supported by server.") - (resp, reply) = self.docmd("STARTTLS") - if resp == 220: - if not _have_ssl: - raise RuntimeError("No SSL support included in this Python") - if context is None: - context = ssl._create_stdlib_context() - self.sock = context.wrap_socket(self.sock, - server_hostname=self._host) - self.file = None - # RFC 3207: - # The client MUST discard any knowledge obtained from - # the server, such as the list of SMTP service extensions, - # which was not obtained from the TLS negotiation itself. - self.helo_resp = None - self.ehlo_resp = None - self.esmtp_features = {} - self.does_esmtp = False - else: - # RFC 3207: - # 501 Syntax error (no parameters allowed) - # 454 TLS not available due to temporary reason - raise SMTPResponseException(resp, reply) - return (resp, reply) - - def sendmail(self, from_addr, to_addrs, msg, mail_options=(), - rcpt_options=()): - """This command performs an entire mail transaction. - - The arguments are: - - from_addr : The address sending this mail. - - to_addrs : A list of addresses to send this mail to. A bare - string will be treated as a list with 1 address. - - msg : The message to send. - - mail_options : List of ESMTP options (such as 8bitmime) for the - mail command. - - rcpt_options : List of ESMTP options (such as DSN commands) for - all the rcpt commands. - - msg may be a string containing characters in the ASCII range, or a byte - string. A string is encoded to bytes using the ascii codec, and lone - \\r and \\n characters are converted to \\r\\n characters. - - If there has been no previous EHLO or HELO command this session, this - method tries ESMTP EHLO first. If the server does ESMTP, message size - and each of the specified options will be passed to it. If EHLO - fails, HELO will be tried and ESMTP options suppressed. - - This method will return normally if the mail is accepted for at least - one recipient. It returns a dictionary, with one entry for each - recipient that was refused. Each entry contains a tuple of the SMTP - error code and the accompanying error message sent by the server. - - This method may raise the following exceptions: - - SMTPHeloError The server didn't reply properly to - the helo greeting. - SMTPRecipientsRefused The server rejected ALL recipients - (no mail was sent). - SMTPSenderRefused The server didn't accept the from_addr. - SMTPDataError The server replied with an unexpected - error code (other than a refusal of - a recipient). - SMTPNotSupportedError The mail_options parameter includes 'SMTPUTF8' - but the SMTPUTF8 extension is not supported by - the server. - - Note: the connection will be open even after an exception is raised. - - Example: - - >>> import smtplib - >>> s=smtplib.SMTP("localhost") - >>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"] - >>> msg = '''\\ - ... From: Me@my.org - ... Subject: testin'... - ... - ... This is a test ''' - >>> s.sendmail("me@my.org",tolist,msg) - { "three@three.org" : ( 550 ,"User unknown" ) } - >>> s.quit() - - In the above example, the message was accepted for delivery to three - of the four addresses, and one was rejected, with the error code - 550. If all addresses are accepted, then the method will return an - empty dictionary. - - """ - self.ehlo_or_helo_if_needed() - esmtp_opts = [] - if isinstance(msg, str): - msg = _fix_eols(msg).encode('ascii') - if self.does_esmtp: - if self.has_extn('size'): - esmtp_opts.append("size=%d" % len(msg)) - for option in mail_options: - esmtp_opts.append(option) - (code, resp) = self.mail(from_addr, esmtp_opts) - if code != 250: - if code == 421: - self.close() - else: - self._rset() - raise SMTPSenderRefused(code, resp, from_addr) - senderrs = {} - if isinstance(to_addrs, str): - to_addrs = [to_addrs] - for each in to_addrs: - (code, resp) = self.rcpt(each, rcpt_options) - if (code != 250) and (code != 251): - senderrs[each] = (code, resp) - if code == 421: - self.close() - raise SMTPRecipientsRefused(senderrs) - if len(senderrs) == len(to_addrs): - # the server refused all our recipients - self._rset() - raise SMTPRecipientsRefused(senderrs) - (code, resp) = self.data(msg) - if code != 250: - if code == 421: - self.close() - else: - self._rset() - raise SMTPDataError(code, resp) - #if we got here then somebody got our mail - return senderrs - - def send_message(self, msg, from_addr=None, to_addrs=None, - mail_options=(), rcpt_options=()): - """Converts message to a bytestring and passes it to sendmail. - - The arguments are as for sendmail, except that msg is an - email.message.Message object. If from_addr is None or to_addrs is - None, these arguments are taken from the headers of the Message as - described in RFC 5322 (a ValueError is raised if there is more than - one set of 'Resent-' headers). Regardless of the values of from_addr and - to_addr, any Bcc field (or Resent-Bcc field, when the Message is a - resent) of the Message object won't be transmitted. The Message - object is then serialized using email.generator.BytesGenerator and - sendmail is called to transmit the message. If the sender or any of - the recipient addresses contain non-ASCII and the server advertises the - SMTPUTF8 capability, the policy is cloned with utf8 set to True for the - serialization, and SMTPUTF8 and BODY=8BITMIME are asserted on the send. - If the server does not support SMTPUTF8, an SMTPNotSupported error is - raised. Otherwise the generator is called without modifying the - policy. - - """ - # 'Resent-Date' is a mandatory field if the Message is resent (RFC 5322 - # Section 3.6.6). In such a case, we use the 'Resent-*' fields. However, - # if there is more than one 'Resent-' block there's no way to - # unambiguously determine which one is the most recent in all cases, - # so rather than guess we raise a ValueError in that case. - # - # TODO implement heuristics to guess the correct Resent-* block with an - # option allowing the user to enable the heuristics. (It should be - # possible to guess correctly almost all of the time.) - - self.ehlo_or_helo_if_needed() - resent = msg.get_all('Resent-Date') - if resent is None: - header_prefix = '' - elif len(resent) == 1: - header_prefix = 'Resent-' - else: - raise ValueError("message has more than one 'Resent-' header block") - if from_addr is None: - # Prefer the sender field per RFC 5322 section 3.6.2. - from_addr = (msg[header_prefix + 'Sender'] - if (header_prefix + 'Sender') in msg - else msg[header_prefix + 'From']) - from_addr = email.utils.getaddresses([from_addr])[0][1] - if to_addrs is None: - addr_fields = [f for f in (msg[header_prefix + 'To'], - msg[header_prefix + 'Bcc'], - msg[header_prefix + 'Cc']) - if f is not None] - to_addrs = [a[1] for a in email.utils.getaddresses(addr_fields)] - # Make a local copy so we can delete the bcc headers. - msg_copy = copy.copy(msg) - del msg_copy['Bcc'] - del msg_copy['Resent-Bcc'] - international = False - try: - ''.join([from_addr, *to_addrs]).encode('ascii') - except UnicodeEncodeError: - if not self.has_extn('smtputf8'): - raise SMTPNotSupportedError( - "One or more source or delivery addresses require" - " internationalized email support, but the server" - " does not advertise the required SMTPUTF8 capability") - international = True - with io.BytesIO() as bytesmsg: - if international: - g = email.generator.BytesGenerator( - bytesmsg, policy=msg.policy.clone(utf8=True)) - mail_options = (*mail_options, 'SMTPUTF8', 'BODY=8BITMIME') - else: - g = email.generator.BytesGenerator(bytesmsg) - g.flatten(msg_copy, linesep='\r\n') - flatmsg = bytesmsg.getvalue() - return self.sendmail(from_addr, to_addrs, flatmsg, mail_options, - rcpt_options) - - def close(self): - """Close the connection to the SMTP server.""" - try: - file = self.file - self.file = None - if file: - file.close() - finally: - sock = self.sock - self.sock = None - if sock: - sock.close() - - def quit(self): - """Terminate the SMTP session.""" - res = self.docmd("quit") - # A new EHLO is required after reconnecting with connect() - self.ehlo_resp = self.helo_resp = None - self.esmtp_features = {} - self.does_esmtp = False - self.close() - return res - -if _have_ssl: - - class SMTP_SSL(SMTP): - """ This is a subclass derived from SMTP that connects over an SSL - encrypted socket (to use this class you need a socket module that was - compiled with SSL support). If host is not specified, '' (the local - host) is used. If port is omitted, the standard SMTP-over-SSL port - (465) is used. local_hostname and source_address have the same meaning - as they do in the SMTP class. context also optional, can contain a - SSLContext. - - """ - - default_port = SMTP_SSL_PORT - - def __init__(self, host='', port=0, local_hostname=None, - *, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - source_address=None, context=None): - if context is None: - context = ssl._create_stdlib_context() - self.context = context - SMTP.__init__(self, host, port, local_hostname, timeout, - source_address) - - def _get_socket(self, host, port, timeout): - if self.debuglevel > 0: - self._print_debug('connect:', (host, port)) - new_socket = super()._get_socket(host, port, timeout) - new_socket = self.context.wrap_socket(new_socket, - server_hostname=self._host) - return new_socket - - __all__.append("SMTP_SSL") - -# -# LMTP extension -# -LMTP_PORT = 2003 - -class LMTP(SMTP): - """LMTP - Local Mail Transfer Protocol - - The LMTP protocol, which is very similar to ESMTP, is heavily based - on the standard SMTP client. It's common to use Unix sockets for - LMTP, so our connect() method must support that as well as a regular - host:port server. local_hostname and source_address have the same - meaning as they do in the SMTP class. To specify a Unix socket, - you must use an absolute path as the host, starting with a '/'. - - Authentication is supported, using the regular SMTP mechanism. When - using a Unix socket, LMTP generally don't support or require any - authentication, but your mileage might vary.""" - - ehlo_msg = "lhlo" - - def __init__(self, host='', port=LMTP_PORT, local_hostname=None, - source_address=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): - """Initialize a new instance.""" - super().__init__(host, port, local_hostname=local_hostname, - source_address=source_address, timeout=timeout) - - def connect(self, host='localhost', port=0, source_address=None): - """Connect to the LMTP daemon, on either a Unix or a TCP socket.""" - if host[0] != '/': - return super().connect(host, port, source_address=source_address) - - if self.timeout is not None and not self.timeout: - raise ValueError('Non-blocking socket (timeout=0) is not supported') - - # Handle Unix-domain sockets. - try: - self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: - self.sock.settimeout(self.timeout) - self.file = None - self.sock.connect(host) - except OSError: - if self.debuglevel > 0: - self._print_debug('connect fail:', host) - if self.sock: - self.sock.close() - self.sock = None - raise - (code, msg) = self.getreply() - if self.debuglevel > 0: - self._print_debug('connect:', msg) - return (code, msg) - - -# Test the sendmail method, which tests most of the others. -# Note: This always sends to localhost. -if __name__ == '__main__': - def prompt(prompt): - sys.stdout.write(prompt + ": ") - sys.stdout.flush() - return sys.stdin.readline().strip() - - fromaddr = prompt("From") - toaddrs = prompt("To").split(',') - print("Enter message, end with ^D:") - msg = '' - while line := sys.stdin.readline(): - msg = msg + line - print("Message length is %d" % len(msg)) - - server = SMTP('localhost') - server.set_debuglevel(1) - server.sendmail(fromaddr, toaddrs, msg) - server.quit() diff --git a/Python313_13_x64_Template/Lib/socket.py b/Python313_13_x64_Template/Lib/socket.py deleted file mode 100644 index 35d87eff..00000000 --- a/Python313_13_x64_Template/Lib/socket.py +++ /dev/null @@ -1,982 +0,0 @@ -# Wrapper module for _socket, providing some additional facilities -# implemented in Python. - -"""\ -This module provides socket operations and some related functions. -On Unix, it supports IP (Internet Protocol) and Unix domain sockets. -On other systems, it only supports IP. Functions specific for a -socket are available as methods of the socket object. - -Functions: - -socket() -- create a new socket object -socketpair() -- create a pair of new socket objects [*] -fromfd() -- create a socket object from an open file descriptor [*] -send_fds() -- Send file descriptor to the socket. -recv_fds() -- Receive file descriptors from the socket. -fromshare() -- create a socket object from data received from socket.share() [*] -gethostname() -- return the current hostname -gethostbyname() -- map a hostname to its IP number -gethostbyaddr() -- map an IP number or hostname to DNS info -getservbyname() -- map a service name and a protocol name to a port number -getprotobyname() -- map a protocol name (e.g. 'tcp') to a number -ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order -htons(), htonl() -- convert 16, 32 bit int from host to network byte order -inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format -inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89) -socket.getdefaulttimeout() -- get the default timeout value -socket.setdefaulttimeout() -- set the default timeout value -create_connection() -- connects to an address, with an optional timeout and - optional source address. -create_server() -- create a TCP socket and bind it to a specified address. - - [*] not available on all platforms! - -Special objects: - -SocketType -- type object for socket objects -error -- exception raised for I/O errors -has_ipv6 -- boolean value indicating if IPv6 is supported - -IntEnum constants: - -AF_INET, AF_UNIX -- socket domains (first argument to socket() call) -SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument) - -Integer constants: - -Many other constants may be defined; these may be used in calls to -the setsockopt() and getsockopt() methods. -""" - -import _socket -from _socket import * - -import os, sys, io, selectors -from enum import IntEnum, IntFlag - -try: - import errno -except ImportError: - errno = None -EBADF = getattr(errno, 'EBADF', 9) -EAGAIN = getattr(errno, 'EAGAIN', 11) -EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11) - -__all__ = ["fromfd", "getfqdn", "create_connection", "create_server", - "has_dualstack_ipv6", "AddressFamily", "SocketKind"] -__all__.extend(os._get_exports_list(_socket)) - -# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for -# nicer string representations. -# Note that _socket only knows about the integer values. The public interface -# in this module understands the enums and translates them back from integers -# where needed (e.g. .family property of a socket object). - -IntEnum._convert_( - 'AddressFamily', - __name__, - lambda C: C.isupper() and C.startswith('AF_')) - -IntEnum._convert_( - 'SocketKind', - __name__, - lambda C: C.isupper() and C.startswith('SOCK_')) - -IntFlag._convert_( - 'MsgFlag', - __name__, - lambda C: C.isupper() and C.startswith('MSG_')) - -IntFlag._convert_( - 'AddressInfo', - __name__, - lambda C: C.isupper() and C.startswith('AI_')) - -_LOCALHOST = '127.0.0.1' -_LOCALHOST_V6 = '::1' - - -def _intenum_converter(value, enum_klass): - """Convert a numeric family value to an IntEnum member. - - If it's not a known member, return the numeric value itself. - """ - try: - return enum_klass(value) - except ValueError: - return value - - -# WSA error codes -if sys.platform.lower().startswith("win"): - errorTab = {} - errorTab[6] = "Specified event object handle is invalid." - errorTab[8] = "Insufficient memory available." - errorTab[87] = "One or more parameters are invalid." - errorTab[995] = "Overlapped operation aborted." - errorTab[996] = "Overlapped I/O event object not in signaled state." - errorTab[997] = "Overlapped operation will complete later." - errorTab[10004] = "The operation was interrupted." - errorTab[10009] = "A bad file handle was passed." - errorTab[10013] = "Permission denied." - errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT - errorTab[10022] = "An invalid operation was attempted." - errorTab[10024] = "Too many open files." - errorTab[10035] = "The socket operation would block." - errorTab[10036] = "A blocking operation is already in progress." - errorTab[10037] = "Operation already in progress." - errorTab[10038] = "Socket operation on nonsocket." - errorTab[10039] = "Destination address required." - errorTab[10040] = "Message too long." - errorTab[10041] = "Protocol wrong type for socket." - errorTab[10042] = "Bad protocol option." - errorTab[10043] = "Protocol not supported." - errorTab[10044] = "Socket type not supported." - errorTab[10045] = "Operation not supported." - errorTab[10046] = "Protocol family not supported." - errorTab[10047] = "Address family not supported by protocol family." - errorTab[10048] = "The network address is in use." - errorTab[10049] = "Cannot assign requested address." - errorTab[10050] = "Network is down." - errorTab[10051] = "Network is unreachable." - errorTab[10052] = "Network dropped connection on reset." - errorTab[10053] = "Software caused connection abort." - errorTab[10054] = "The connection has been reset." - errorTab[10055] = "No buffer space available." - errorTab[10056] = "Socket is already connected." - errorTab[10057] = "Socket is not connected." - errorTab[10058] = "The network has been shut down." - errorTab[10059] = "Too many references." - errorTab[10060] = "The operation timed out." - errorTab[10061] = "Connection refused." - errorTab[10062] = "Cannot translate name." - errorTab[10063] = "The name is too long." - errorTab[10064] = "The host is down." - errorTab[10065] = "The host is unreachable." - errorTab[10066] = "Directory not empty." - errorTab[10067] = "Too many processes." - errorTab[10068] = "User quota exceeded." - errorTab[10069] = "Disk quota exceeded." - errorTab[10070] = "Stale file handle reference." - errorTab[10071] = "Item is remote." - errorTab[10091] = "Network subsystem is unavailable." - errorTab[10092] = "Winsock.dll version out of range." - errorTab[10093] = "Successful WSAStartup not yet performed." - errorTab[10101] = "Graceful shutdown in progress." - errorTab[10102] = "No more results from WSALookupServiceNext." - errorTab[10103] = "Call has been canceled." - errorTab[10104] = "Procedure call table is invalid." - errorTab[10105] = "Service provider is invalid." - errorTab[10106] = "Service provider failed to initialize." - errorTab[10107] = "System call failure." - errorTab[10108] = "Service not found." - errorTab[10109] = "Class type not found." - errorTab[10110] = "No more results from WSALookupServiceNext." - errorTab[10111] = "Call was canceled." - errorTab[10112] = "Database query was refused." - errorTab[11001] = "Host not found." - errorTab[11002] = "Nonauthoritative host not found." - errorTab[11003] = "This is a nonrecoverable error." - errorTab[11004] = "Valid name, no data record requested type." - errorTab[11005] = "QoS receivers." - errorTab[11006] = "QoS senders." - errorTab[11007] = "No QoS senders." - errorTab[11008] = "QoS no receivers." - errorTab[11009] = "QoS request confirmed." - errorTab[11010] = "QoS admission error." - errorTab[11011] = "QoS policy failure." - errorTab[11012] = "QoS bad style." - errorTab[11013] = "QoS bad object." - errorTab[11014] = "QoS traffic control error." - errorTab[11015] = "QoS generic error." - errorTab[11016] = "QoS service type error." - errorTab[11017] = "QoS flowspec error." - errorTab[11018] = "Invalid QoS provider buffer." - errorTab[11019] = "Invalid QoS filter style." - errorTab[11020] = "Invalid QoS filter style." - errorTab[11021] = "Incorrect QoS filter count." - errorTab[11022] = "Invalid QoS object length." - errorTab[11023] = "Incorrect QoS flow count." - errorTab[11024] = "Unrecognized QoS object." - errorTab[11025] = "Invalid QoS policy object." - errorTab[11026] = "Invalid QoS flow descriptor." - errorTab[11027] = "Invalid QoS provider-specific flowspec." - errorTab[11028] = "Invalid QoS provider-specific filterspec." - errorTab[11029] = "Invalid QoS shape discard mode object." - errorTab[11030] = "Invalid QoS shaping rate object." - errorTab[11031] = "Reserved policy QoS element type." - __all__.append("errorTab") - - -class _GiveupOnSendfile(Exception): pass - - -class socket(_socket.socket): - - """A subclass of _socket.socket adding the makefile() method.""" - - __slots__ = ["__weakref__", "_io_refs", "_closed"] - - def __init__(self, family=-1, type=-1, proto=-1, fileno=None): - # For user code address family and type values are IntEnum members, but - # for the underlying _socket.socket they're just integers. The - # constructor of _socket.socket converts the given argument to an - # integer automatically. - if fileno is None: - if family == -1: - family = AF_INET - if type == -1: - type = SOCK_STREAM - if proto == -1: - proto = 0 - _socket.socket.__init__(self, family, type, proto, fileno) - self._io_refs = 0 - self._closed = False - - def __enter__(self): - return self - - def __exit__(self, *args): - if not self._closed: - self.close() - - def __repr__(self): - """Wrap __repr__() to reveal the real class name and socket - address(es). - """ - closed = getattr(self, '_closed', False) - s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \ - % (self.__class__.__module__, - self.__class__.__qualname__, - " [closed]" if closed else "", - self.fileno(), - self.family, - self.type, - self.proto) - if not closed: - # getsockname and getpeername may not be available on WASI. - try: - laddr = self.getsockname() - if laddr: - s += ", laddr=%s" % str(laddr) - except (error, AttributeError): - pass - try: - raddr = self.getpeername() - if raddr: - s += ", raddr=%s" % str(raddr) - except (error, AttributeError): - pass - s += '>' - return s - - def __getstate__(self): - raise TypeError(f"cannot pickle {self.__class__.__name__!r} object") - - def dup(self): - """dup() -> socket object - - Duplicate the socket. Return a new socket object connected to the same - system resource. The new socket is non-inheritable. - """ - fd = dup(self.fileno()) - sock = self.__class__(self.family, self.type, self.proto, fileno=fd) - sock.settimeout(self.gettimeout()) - return sock - - def accept(self): - """accept() -> (socket object, address info) - - Wait for an incoming connection. Return a new socket - representing the connection, and the address of the client. - For IP sockets, the address info is a pair (hostaddr, port). - """ - fd, addr = self._accept() - sock = socket(self.family, self.type, self.proto, fileno=fd) - # Issue #7995: if no default timeout is set and the listening - # socket had a (non-zero) timeout, force the new socket in blocking - # mode to override platform-specific socket flags inheritance. - if getdefaulttimeout() is None and self.gettimeout(): - sock.setblocking(True) - return sock, addr - - def makefile(self, mode="r", buffering=None, *, - encoding=None, errors=None, newline=None): - """makefile(...) -> an I/O stream connected to the socket - - The arguments are as for io.open() after the filename, except the only - supported mode values are 'r' (default), 'w', 'b', or a combination of - those. - """ - # XXX refactor to share code? - if not set(mode) <= {"r", "w", "b"}: - raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,)) - writing = "w" in mode - reading = "r" in mode or not writing - assert reading or writing - binary = "b" in mode - rawmode = "" - if reading: - rawmode += "r" - if writing: - rawmode += "w" - raw = SocketIO(self, rawmode) - self._io_refs += 1 - if buffering is None: - buffering = -1 - if buffering < 0: - buffering = io.DEFAULT_BUFFER_SIZE - if buffering == 0: - if not binary: - raise ValueError("unbuffered streams must be binary") - return raw - if reading and writing: - buffer = io.BufferedRWPair(raw, raw, buffering) - elif reading: - buffer = io.BufferedReader(raw, buffering) - else: - assert writing - buffer = io.BufferedWriter(raw, buffering) - if binary: - return buffer - encoding = io.text_encoding(encoding) - text = io.TextIOWrapper(buffer, encoding, errors, newline) - text.mode = mode - return text - - if hasattr(os, 'sendfile'): - - def _sendfile_use_sendfile(self, file, offset=0, count=None): - self._check_sendfile_params(file, offset, count) - sockno = self.fileno() - try: - fileno = file.fileno() - except (AttributeError, io.UnsupportedOperation) as err: - raise _GiveupOnSendfile(err) # not a regular file - try: - fsize = os.fstat(fileno).st_size - except OSError as err: - raise _GiveupOnSendfile(err) # not a regular file - if not fsize: - return 0 # empty file - # Truncate to 1GiB to avoid OverflowError, see bpo-38319. - blocksize = min(count or fsize, 2 ** 30) - timeout = self.gettimeout() - if timeout == 0: - raise ValueError("non-blocking sockets are not supported") - # poll/select have the advantage of not requiring any - # extra file descriptor, contrarily to epoll/kqueue - # (also, they require a single syscall). - if hasattr(selectors, 'PollSelector'): - selector = selectors.PollSelector() - else: - selector = selectors.SelectSelector() - selector.register(sockno, selectors.EVENT_WRITE) - - total_sent = 0 - # localize variable access to minimize overhead - selector_select = selector.select - os_sendfile = os.sendfile - try: - while True: - if timeout and not selector_select(timeout): - raise TimeoutError('timed out') - if count: - blocksize = min(count - total_sent, blocksize) - if blocksize <= 0: - break - try: - sent = os_sendfile(sockno, fileno, offset, blocksize) - except BlockingIOError: - if not timeout: - # Block until the socket is ready to send some - # data; avoids hogging CPU resources. - selector_select() - continue - except OSError as err: - if total_sent == 0: - # We can get here for different reasons, the main - # one being 'file' is not a regular mmap(2)-like - # file, in which case we'll fall back on using - # plain send(). - raise _GiveupOnSendfile(err) - raise err from None - else: - if sent == 0: - break # EOF - offset += sent - total_sent += sent - return total_sent - finally: - if total_sent > 0 and hasattr(file, 'seek'): - file.seek(offset) - else: - def _sendfile_use_sendfile(self, file, offset=0, count=None): - raise _GiveupOnSendfile( - "os.sendfile() not available on this platform") - - def _sendfile_use_send(self, file, offset=0, count=None): - self._check_sendfile_params(file, offset, count) - if self.gettimeout() == 0: - raise ValueError("non-blocking sockets are not supported") - if offset: - file.seek(offset) - blocksize = min(count, 8192) if count else 8192 - total_sent = 0 - # localize variable access to minimize overhead - file_read = file.read - sock_send = self.send - try: - while True: - if count: - blocksize = min(count - total_sent, blocksize) - if blocksize <= 0: - break - data = memoryview(file_read(blocksize)) - if not data: - break # EOF - while True: - try: - sent = sock_send(data) - except BlockingIOError: - continue - else: - total_sent += sent - if sent < len(data): - data = data[sent:] - else: - break - return total_sent - finally: - if total_sent > 0 and hasattr(file, 'seek'): - file.seek(offset + total_sent) - - def _check_sendfile_params(self, file, offset, count): - if 'b' not in getattr(file, 'mode', 'b'): - raise ValueError("file should be opened in binary mode") - if not self.type & SOCK_STREAM: - raise ValueError("only SOCK_STREAM type sockets are supported") - if count is not None: - if not isinstance(count, int): - raise TypeError( - "count must be a positive integer (got {!r})".format(count)) - if count <= 0: - raise ValueError( - "count must be a positive integer (got {!r})".format(count)) - - def sendfile(self, file, offset=0, count=None): - """sendfile(file[, offset[, count]]) -> sent - - Send a file until EOF is reached by using high-performance - os.sendfile() and return the total number of bytes which - were sent. - *file* must be a regular file object opened in binary mode. - If os.sendfile() is not available (e.g. Windows) or file is - not a regular file socket.send() will be used instead. - *offset* tells from where to start reading the file. - If specified, *count* is the total number of bytes to transmit - as opposed to sending the file until EOF is reached. - File position is updated on return or also in case of error in - which case file.tell() can be used to figure out the number of - bytes which were sent. - The socket must be of SOCK_STREAM type. - Non-blocking sockets are not supported. - """ - try: - return self._sendfile_use_sendfile(file, offset, count) - except _GiveupOnSendfile: - return self._sendfile_use_send(file, offset, count) - - def _decref_socketios(self): - if self._io_refs > 0: - self._io_refs -= 1 - if self._closed: - self.close() - - def _real_close(self, _ss=_socket.socket): - # This function should not reference any globals. See issue #808164. - _ss.close(self) - - def close(self): - # This function should not reference any globals. See issue #808164. - self._closed = True - if self._io_refs <= 0: - self._real_close() - - def detach(self): - """detach() -> file descriptor - - Close the socket object without closing the underlying file descriptor. - The object cannot be used after this call, but the file descriptor - can be reused for other purposes. The file descriptor is returned. - """ - self._closed = True - return super().detach() - - @property - def family(self): - """Read-only access to the address family for this socket. - """ - return _intenum_converter(super().family, AddressFamily) - - @property - def type(self): - """Read-only access to the socket type. - """ - return _intenum_converter(super().type, SocketKind) - - if os.name == 'nt': - def get_inheritable(self): - return os.get_handle_inheritable(self.fileno()) - def set_inheritable(self, inheritable): - os.set_handle_inheritable(self.fileno(), inheritable) - else: - def get_inheritable(self): - return os.get_inheritable(self.fileno()) - def set_inheritable(self, inheritable): - os.set_inheritable(self.fileno(), inheritable) - get_inheritable.__doc__ = "Get the inheritable flag of the socket" - set_inheritable.__doc__ = "Set the inheritable flag of the socket" - -def fromfd(fd, family, type, proto=0): - """ fromfd(fd, family, type[, proto]) -> socket object - - Create a socket object from a duplicate of the given file - descriptor. The remaining arguments are the same as for socket(). - """ - nfd = dup(fd) - return socket(family, type, proto, nfd) - -if hasattr(_socket.socket, "sendmsg"): - import array - - def send_fds(sock, buffers, fds, flags=0, address=None): - """ send_fds(sock, buffers, fds[, flags[, address]]) -> integer - - Send the list of file descriptors fds over an AF_UNIX socket. - """ - return sock.sendmsg(buffers, [(_socket.SOL_SOCKET, - _socket.SCM_RIGHTS, array.array("i", fds))]) - __all__.append("send_fds") - -if hasattr(_socket.socket, "recvmsg"): - import array - - def recv_fds(sock, bufsize, maxfds, flags=0): - """ recv_fds(sock, bufsize, maxfds[, flags]) -> (data, list of file - descriptors, msg_flags, address) - - Receive up to maxfds file descriptors returning the message - data and a list containing the descriptors. - """ - # Array of ints - fds = array.array("i") - msg, ancdata, flags, addr = sock.recvmsg(bufsize, - _socket.CMSG_LEN(maxfds * fds.itemsize)) - for cmsg_level, cmsg_type, cmsg_data in ancdata: - if (cmsg_level == _socket.SOL_SOCKET and cmsg_type == _socket.SCM_RIGHTS): - fds.frombytes(cmsg_data[: - len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) - - return msg, list(fds), flags, addr - __all__.append("recv_fds") - -if hasattr(_socket.socket, "share"): - def fromshare(info): - """ fromshare(info) -> socket object - - Create a socket object from the bytes object returned by - socket.share(pid). - """ - return socket(0, 0, 0, info) - __all__.append("fromshare") - -# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain. -# This is used if _socket doesn't natively provide socketpair. It's -# always defined so that it can be patched in for testing purposes. -def _fallback_socketpair(family=AF_INET, type=SOCK_STREAM, proto=0): - if family == AF_INET: - host = _LOCALHOST - elif family == AF_INET6: - host = _LOCALHOST_V6 - else: - raise ValueError("Only AF_INET and AF_INET6 socket address families " - "are supported") - if type != SOCK_STREAM: - raise ValueError("Only SOCK_STREAM socket type is supported") - if proto != 0: - raise ValueError("Only protocol zero is supported") - - # We create a connected TCP socket. Note the trick with - # setblocking(False) that prevents us from having to create a thread. - lsock = socket(family, type, proto) - try: - lsock.bind((host, 0)) - lsock.listen() - # On IPv6, ignore flow_info and scope_id - addr, port = lsock.getsockname()[:2] - csock = socket(family, type, proto) - try: - csock.setblocking(False) - try: - csock.connect((addr, port)) - except (BlockingIOError, InterruptedError): - pass - csock.setblocking(True) - ssock, _ = lsock.accept() - except: - csock.close() - raise - finally: - lsock.close() - - # Authenticating avoids using a connection from something else - # able to connect to {host}:{port} instead of us. - # We expect only AF_INET and AF_INET6 families. - try: - if ( - ssock.getsockname() != csock.getpeername() - or csock.getsockname() != ssock.getpeername() - ): - raise ConnectionError("Unexpected peer connection") - except: - # getsockname() and getpeername() can fail - # if either socket isn't connected. - ssock.close() - csock.close() - raise - - return (ssock, csock) - -if hasattr(_socket, "socketpair"): - def socketpair(family=None, type=SOCK_STREAM, proto=0): - if family is None: - try: - family = AF_UNIX - except NameError: - family = AF_INET - a, b = _socket.socketpair(family, type, proto) - a = socket(family, type, proto, a.detach()) - b = socket(family, type, proto, b.detach()) - return a, b - -else: - socketpair = _fallback_socketpair - __all__.append("socketpair") - -socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object) -Create a pair of socket objects from the sockets returned by the platform -socketpair() function. -The arguments are the same as for socket() except the default family is AF_UNIX -if defined on the platform; otherwise, the default is AF_INET. -""" - -_blocking_errnos = { EAGAIN, EWOULDBLOCK } - -class SocketIO(io.RawIOBase): - - """Raw I/O implementation for stream sockets. - - This class supports the makefile() method on sockets. It provides - the raw I/O interface on top of a socket object. - """ - - # One might wonder why not let FileIO do the job instead. There are two - # main reasons why FileIO is not adapted: - # - it wouldn't work under Windows (where you can't used read() and - # write() on a socket handle) - # - it wouldn't work with socket timeouts (FileIO would ignore the - # timeout and consider the socket non-blocking) - - # XXX More docs - - def __init__(self, sock, mode): - if mode not in ("r", "w", "rw", "rb", "wb", "rwb"): - raise ValueError("invalid mode: %r" % mode) - io.RawIOBase.__init__(self) - self._sock = sock - if "b" not in mode: - mode += "b" - self._mode = mode - self._reading = "r" in mode - self._writing = "w" in mode - self._timeout_occurred = False - - def readinto(self, b): - """Read up to len(b) bytes into the writable buffer *b* and return - the number of bytes read. If the socket is non-blocking and no bytes - are available, None is returned. - - If *b* is non-empty, a 0 return value indicates that the connection - was shutdown at the other end. - """ - self._checkClosed() - self._checkReadable() - if self._timeout_occurred: - raise OSError("cannot read from timed out object") - try: - return self._sock.recv_into(b) - except timeout: - self._timeout_occurred = True - raise - except error as e: - if e.errno in _blocking_errnos: - return None - raise - - def write(self, b): - """Write the given bytes or bytearray object *b* to the socket - and return the number of bytes written. This can be less than - len(b) if not all data could be written. If the socket is - non-blocking and no bytes could be written None is returned. - """ - self._checkClosed() - self._checkWritable() - try: - return self._sock.send(b) - except error as e: - # XXX what about EINTR? - if e.errno in _blocking_errnos: - return None - raise - - def readable(self): - """True if the SocketIO is open for reading. - """ - if self.closed: - raise ValueError("I/O operation on closed socket.") - return self._reading - - def writable(self): - """True if the SocketIO is open for writing. - """ - if self.closed: - raise ValueError("I/O operation on closed socket.") - return self._writing - - def seekable(self): - """True if the SocketIO is open for seeking. - """ - if self.closed: - raise ValueError("I/O operation on closed socket.") - return super().seekable() - - def fileno(self): - """Return the file descriptor of the underlying socket. - """ - self._checkClosed() - return self._sock.fileno() - - @property - def name(self): - if not self.closed: - return self.fileno() - else: - return -1 - - @property - def mode(self): - return self._mode - - def close(self): - """Close the SocketIO object. This doesn't close the underlying - socket, except if all references to it have disappeared. - """ - if self.closed: - return - io.RawIOBase.close(self) - self._sock._decref_socketios() - self._sock = None - - -def getfqdn(name=''): - """Get fully qualified domain name from name. - - An empty argument is interpreted as meaning the local host. - - First the hostname returned by gethostbyaddr() is checked, then - possibly existing aliases. In case no FQDN is available and `name` - was given, it is returned unchanged. If `name` was empty, '0.0.0.0' or '::', - hostname from gethostname() is returned. - """ - name = name.strip() - if not name or name in ('0.0.0.0', '::'): - name = gethostname() - try: - hostname, aliases, ipaddrs = gethostbyaddr(name) - except error: - pass - else: - aliases.insert(0, hostname) - for name in aliases: - if '.' in name: - break - else: - name = hostname - return name - - -_GLOBAL_DEFAULT_TIMEOUT = object() - -def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, - source_address=None, *, all_errors=False): - """Connect to *address* and return the socket object. - - Convenience function. Connect to *address* (a 2-tuple ``(host, - port)``) and return the socket object. Passing the optional - *timeout* parameter will set the timeout on the socket instance - before attempting to connect. If no *timeout* is supplied, the - global default timeout setting returned by :func:`getdefaulttimeout` - is used. If *source_address* is set it must be a tuple of (host, port) - for the socket to bind as a source address before making the connection. - A host of '' or port 0 tells the OS to use the default. When a connection - cannot be created, raises the last error if *all_errors* is False, - and an ExceptionGroup of all errors if *all_errors* is True. - """ - - host, port = address - exceptions = [] - for res in getaddrinfo(host, port, 0, SOCK_STREAM): - af, socktype, proto, canonname, sa = res - sock = None - try: - sock = socket(af, socktype, proto) - if timeout is not _GLOBAL_DEFAULT_TIMEOUT: - sock.settimeout(timeout) - if source_address: - sock.bind(source_address) - sock.connect(sa) - # Break explicitly a reference cycle - exceptions.clear() - return sock - - except error as exc: - if not all_errors: - exceptions.clear() # raise only the last error - exceptions.append(exc) - if sock is not None: - sock.close() - - if len(exceptions): - try: - if not all_errors: - raise exceptions[0] - raise ExceptionGroup("create_connection failed", exceptions) - finally: - # Break explicitly a reference cycle - exceptions.clear() - else: - raise error("getaddrinfo returns an empty list") - - -def has_dualstack_ipv6(): - """Return True if the platform supports creating a SOCK_STREAM socket - which can handle both AF_INET and AF_INET6 (IPv4 / IPv6) connections. - """ - if not has_ipv6 \ - or not hasattr(_socket, 'IPPROTO_IPV6') \ - or not hasattr(_socket, 'IPV6_V6ONLY'): - return False - try: - with socket(AF_INET6, SOCK_STREAM) as sock: - sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0) - return True - except error: - return False - - -def create_server(address, *, family=AF_INET, backlog=None, reuse_port=False, - dualstack_ipv6=False): - """Convenience function which creates a SOCK_STREAM type socket - bound to *address* (a 2-tuple (host, port)) and return the socket - object. - - *family* should be either AF_INET or AF_INET6. - *backlog* is the queue size passed to socket.listen(). - *reuse_port* dictates whether to use the SO_REUSEPORT socket option. - *dualstack_ipv6*: if true and the platform supports it, it will - create an AF_INET6 socket able to accept both IPv4 or IPv6 - connections. When false it will explicitly disable this option on - platforms that enable it by default (e.g. Linux). - - >>> with create_server(('', 8000)) as server: - ... while True: - ... conn, addr = server.accept() - ... # handle new connection - """ - if reuse_port and not hasattr(_socket, "SO_REUSEPORT"): - raise ValueError("SO_REUSEPORT not supported on this platform") - if dualstack_ipv6: - if not has_dualstack_ipv6(): - raise ValueError("dualstack_ipv6 not supported on this platform") - if family != AF_INET6: - raise ValueError("dualstack_ipv6 requires AF_INET6 family") - sock = socket(family, SOCK_STREAM) - try: - # Note about Windows. We don't set SO_REUSEADDR because: - # 1) It's unnecessary: bind() will succeed even in case of a - # previous closed socket on the same address and still in - # TIME_WAIT state. - # 2) If set, another socket is free to bind() on the same - # address, effectively preventing this one from accepting - # connections. Also, it may set the process in a state where - # it'll no longer respond to any signals or graceful kills. - # See: https://learn.microsoft.com/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse - if os.name not in ('nt', 'cygwin') and \ - hasattr(_socket, 'SO_REUSEADDR'): - try: - sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) - except error: - # Fail later on bind(), for platforms which may not - # support this option. - pass - # Since Linux 6.12.9, SO_REUSEPORT is not allowed - # on other address families than AF_INET/AF_INET6. - if reuse_port and family in (AF_INET, AF_INET6): - sock.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1) - if has_ipv6 and family == AF_INET6: - if dualstack_ipv6: - sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0) - elif hasattr(_socket, "IPV6_V6ONLY") and \ - hasattr(_socket, "IPPROTO_IPV6"): - sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1) - try: - sock.bind(address) - except error as err: - msg = '%s (while attempting to bind on address %r)' % \ - (err.strerror, address) - raise error(err.errno, msg) from None - if backlog is None: - sock.listen() - else: - sock.listen(backlog) - return sock - except error: - sock.close() - raise - - -def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): - """Resolve host and port into list of address info entries. - - Translate the host/port argument into a sequence of 5-tuples that contain - all the necessary arguments for creating a socket connected to that service. - host is a domain name, a string representation of an IPv4/v6 address or - None. port is a string service name such as 'http', a numeric port number or - None. By passing None as the value of host and port, you can pass NULL to - the underlying C API. - - The family, type and proto arguments can be optionally specified in order to - narrow the list of addresses returned. Passing zero as a value for each of - these arguments selects the full range of results. - """ - # We override this function since we want to translate the numeric family - # and socket type values to enum constants. - addrlist = [] - for res in _socket.getaddrinfo(host, port, family, type, proto, flags): - af, socktype, proto, canonname, sa = res - addrlist.append((_intenum_converter(af, AddressFamily), - _intenum_converter(socktype, SocketKind), - proto, canonname, sa)) - return addrlist diff --git a/Python313_13_x64_Template/Lib/sqlite3/__init__.py b/Python313_13_x64_Template/Lib/sqlite3/__init__.py deleted file mode 100644 index e3c81ffc..00000000 --- a/Python313_13_x64_Template/Lib/sqlite3/__init__.py +++ /dev/null @@ -1,70 +0,0 @@ -# pysqlite2/__init__.py: the pysqlite2 package. -# -# Copyright (C) 2005 Gerhard Häring -# -# This file is part of pysqlite. -# -# This software is provided 'as-is', without any express or implied -# warranty. In no event will the authors be held liable for any damages -# arising from the use of this software. -# -# Permission is granted to anyone to use this software for any purpose, -# including commercial applications, and to alter it and redistribute it -# freely, subject to the following restrictions: -# -# 1. The origin of this software must not be misrepresented; you must not -# claim that you wrote the original software. If you use this software -# in a product, an acknowledgment in the product documentation would be -# appreciated but is not required. -# 2. Altered source versions must be plainly marked as such, and must not be -# misrepresented as being the original software. -# 3. This notice may not be removed or altered from any source distribution. - -""" -The sqlite3 extension module provides a DB-API 2.0 (PEP 249) compliant -interface to the SQLite library, and requires SQLite 3.15.2 or newer. - -To use the module, start by creating a database Connection object: - - import sqlite3 - cx = sqlite3.connect("test.db") # test.db will be created or opened - -The special path name ":memory:" can be provided to connect to a transient -in-memory database: - - cx = sqlite3.connect(":memory:") # connect to a database in RAM - -Once a connection has been established, create a Cursor object and call -its execute() method to perform SQL queries: - - cu = cx.cursor() - - # create a table - cu.execute("create table lang(name, first_appeared)") - - # insert values into a table - cu.execute("insert into lang values (?, ?)", ("C", 1972)) - - # execute a query and iterate over the result - for row in cu.execute("select * from lang"): - print(row) - - cx.close() - -The sqlite3 module is written by Gerhard Häring . -""" - -from sqlite3.dbapi2 import * -from sqlite3.dbapi2 import (_deprecated_names, - _deprecated_version_info, - _deprecated_version) - - -def __getattr__(name): - if name in _deprecated_names: - from warnings import warn - - warn(f"{name} is deprecated and will be removed in Python 3.14", - DeprecationWarning, stacklevel=2) - return globals()[f"_deprecated_{name}"] - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/Python313_13_x64_Template/Lib/sqlite3/__main__.py b/Python313_13_x64_Template/Lib/sqlite3/__main__.py deleted file mode 100644 index 87a80a6f..00000000 --- a/Python313_13_x64_Template/Lib/sqlite3/__main__.py +++ /dev/null @@ -1,139 +0,0 @@ -"""A simple SQLite CLI for the sqlite3 module. - -Apart from using 'argparse' for the command-line interface, -this module implements the REPL as a thin wrapper around -the InteractiveConsole class from the 'code' stdlib module. -""" -import sqlite3 -import sys - -from argparse import ArgumentParser -from code import InteractiveConsole -from textwrap import dedent - - -def execute(c, sql, suppress_errors=True): - """Helper that wraps execution of SQL code. - - This is used both by the REPL and by direct execution from the CLI. - - 'c' may be a cursor or a connection. - 'sql' is the SQL string to execute. - """ - - try: - for row in c.execute(sql): - print(row) - except sqlite3.Error as e: - tp = type(e).__name__ - try: - print(f"{tp} ({e.sqlite_errorname}): {e}", file=sys.stderr) - except AttributeError: - print(f"{tp}: {e}", file=sys.stderr) - if not suppress_errors: - sys.exit(1) - - -class SqliteInteractiveConsole(InteractiveConsole): - """A simple SQLite REPL.""" - - def __init__(self, connection): - super().__init__() - self._con = connection - self._cur = connection.cursor() - - def runsource(self, source, filename="", symbol="single"): - """Override runsource, the core of the InteractiveConsole REPL. - - Return True if more input is needed; buffering is done automatically. - Return False is input is a complete statement ready for execution. - """ - if not source or source.isspace(): - return False - if source[0] == ".": - match source[1:].strip(): - case "version": - print(f"{sqlite3.sqlite_version}") - case "help": - print("Enter SQL code and press enter.") - case "quit": - sys.exit(0) - case "": - pass - case _ as unknown: - self.write("Error: unknown command or invalid arguments:" - f' "{unknown}".\n') - else: - if not sqlite3.complete_statement(source): - return True - execute(self._cur, source) - return False - - -def main(*args): - parser = ArgumentParser( - description="Python sqlite3 CLI", - prog="python -m sqlite3", - ) - parser.add_argument( - "filename", type=str, default=":memory:", nargs="?", - help=( - "SQLite database to open (defaults to ':memory:'). " - "A new database is created if the file does not previously exist." - ), - ) - parser.add_argument( - "sql", type=str, nargs="?", - help=( - "An SQL query to execute. " - "Any returned rows are printed to stdout." - ), - ) - parser.add_argument( - "-v", "--version", action="version", - version=f"SQLite version {sqlite3.sqlite_version}", - help="Print underlying SQLite library version", - ) - args = parser.parse_args(*args) - - if args.filename == ":memory:": - db_name = "a transient in-memory database" - else: - db_name = repr(args.filename) - - # Prepare REPL banner and prompts. - if sys.platform == "win32" and "idlelib.run" not in sys.modules: - eofkey = "CTRL-Z" - else: - eofkey = "CTRL-D" - banner = dedent(f""" - sqlite3 shell, running on SQLite version {sqlite3.sqlite_version} - Connected to {db_name} - - Each command will be run using execute() on the cursor. - Type ".help" for more information; type ".quit" or {eofkey} to quit. - """).strip() - sys.ps1 = "sqlite> " - sys.ps2 = " ... " - - con = sqlite3.connect(args.filename, isolation_level=None) - try: - if args.sql: - # SQL statement provided on the command-line; execute it directly. - execute(con, args.sql, suppress_errors=False) - else: - # No SQL provided; start the REPL. - console = SqliteInteractiveConsole(con) - try: - import readline - except ImportError: - pass - console.interact(banner, exitmsg="") - finally: - con.close() - - sys.exit(0) - - -if __name__ == "__main__": - main(sys.argv[1:]) diff --git a/Python313_13_x64_Template/Lib/sqlite3/dbapi2.py b/Python313_13_x64_Template/Lib/sqlite3/dbapi2.py deleted file mode 100644 index 56fc0461..00000000 --- a/Python313_13_x64_Template/Lib/sqlite3/dbapi2.py +++ /dev/null @@ -1,108 +0,0 @@ -# pysqlite2/dbapi2.py: the DB-API 2.0 interface -# -# Copyright (C) 2004-2005 Gerhard Häring -# -# This file is part of pysqlite. -# -# This software is provided 'as-is', without any express or implied -# warranty. In no event will the authors be held liable for any damages -# arising from the use of this software. -# -# Permission is granted to anyone to use this software for any purpose, -# including commercial applications, and to alter it and redistribute it -# freely, subject to the following restrictions: -# -# 1. The origin of this software must not be misrepresented; you must not -# claim that you wrote the original software. If you use this software -# in a product, an acknowledgment in the product documentation would be -# appreciated but is not required. -# 2. Altered source versions must be plainly marked as such, and must not be -# misrepresented as being the original software. -# 3. This notice may not be removed or altered from any source distribution. - -import datetime -import time -import collections.abc - -from _sqlite3 import * -from _sqlite3 import _deprecated_version - -_deprecated_names = frozenset({"version", "version_info"}) - -paramstyle = "qmark" - -apilevel = "2.0" - -Date = datetime.date - -Time = datetime.time - -Timestamp = datetime.datetime - -def DateFromTicks(ticks): - return Date(*time.localtime(ticks)[:3]) - -def TimeFromTicks(ticks): - return Time(*time.localtime(ticks)[3:6]) - -def TimestampFromTicks(ticks): - return Timestamp(*time.localtime(ticks)[:6]) - -_deprecated_version_info = tuple(map(int, _deprecated_version.split("."))) -sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")]) - -Binary = memoryview -collections.abc.Sequence.register(Row) - -def register_adapters_and_converters(): - from warnings import warn - - msg = ("The default {what} is deprecated as of Python 3.12; " - "see the sqlite3 documentation for suggested replacement recipes") - - def adapt_date(val): - warn(msg.format(what="date adapter"), DeprecationWarning, stacklevel=2) - return val.isoformat() - - def adapt_datetime(val): - warn(msg.format(what="datetime adapter"), DeprecationWarning, stacklevel=2) - return val.isoformat(" ") - - def convert_date(val): - warn(msg.format(what="date converter"), DeprecationWarning, stacklevel=2) - return datetime.date(*map(int, val.split(b"-"))) - - def convert_timestamp(val): - warn(msg.format(what="timestamp converter"), DeprecationWarning, stacklevel=2) - datepart, timepart = val.split(b" ") - year, month, day = map(int, datepart.split(b"-")) - timepart_full = timepart.split(b".") - hours, minutes, seconds = map(int, timepart_full[0].split(b":")) - if len(timepart_full) == 2: - microseconds = int('{:0<6.6}'.format(timepart_full[1].decode())) - else: - microseconds = 0 - - val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds) - return val - - - register_adapter(datetime.date, adapt_date) - register_adapter(datetime.datetime, adapt_datetime) - register_converter("date", convert_date) - register_converter("timestamp", convert_timestamp) - -register_adapters_and_converters() - -# Clean up namespace - -del(register_adapters_and_converters) - -def __getattr__(name): - if name in _deprecated_names: - from warnings import warn - - warn(f"{name} is deprecated and will be removed in Python 3.14", - DeprecationWarning, stacklevel=2) - return globals()[f"_deprecated_{name}"] - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/Python313_13_x64_Template/Lib/ssl.py b/Python313_13_x64_Template/Lib/ssl.py deleted file mode 100644 index 7508e4f2..00000000 --- a/Python313_13_x64_Template/Lib/ssl.py +++ /dev/null @@ -1,1529 +0,0 @@ -# Wrapper module for _ssl, providing some additional facilities -# implemented in Python. Written by Bill Janssen. - -"""This module provides some more Pythonic support for SSL. - -Object types: - - SSLSocket -- subtype of socket.socket which does SSL over the socket - -Exceptions: - - SSLError -- exception raised for I/O errors - -Functions: - - cert_time_to_seconds -- convert time string used for certificate - notBefore and notAfter functions to integer - seconds past the Epoch (the time values - returned from time.time()) - - get_server_certificate (addr, ssl_version, ca_certs, timeout) -- Retrieve the - certificate from the server at the specified - address and return it as a PEM-encoded string - - -Integer constants: - -SSL_ERROR_ZERO_RETURN -SSL_ERROR_WANT_READ -SSL_ERROR_WANT_WRITE -SSL_ERROR_WANT_X509_LOOKUP -SSL_ERROR_SYSCALL -SSL_ERROR_SSL -SSL_ERROR_WANT_CONNECT - -SSL_ERROR_EOF -SSL_ERROR_INVALID_ERROR_CODE - -The following group define certificate requirements that one side is -allowing/requiring from the other side: - -CERT_NONE - no certificates from the other side are required (or will - be looked at if provided) -CERT_OPTIONAL - certificates are not required, but if provided will be - validated, and if validation fails, the connection will - also fail -CERT_REQUIRED - certificates are required, and will be validated, and - if validation fails, the connection will also fail - -The following constants identify various SSL protocol variants: - -PROTOCOL_SSLv2 -PROTOCOL_SSLv3 -PROTOCOL_SSLv23 -PROTOCOL_TLS -PROTOCOL_TLS_CLIENT -PROTOCOL_TLS_SERVER -PROTOCOL_TLSv1 -PROTOCOL_TLSv1_1 -PROTOCOL_TLSv1_2 - -The following constants identify various SSL alert message descriptions as per -http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-6 - -ALERT_DESCRIPTION_CLOSE_NOTIFY -ALERT_DESCRIPTION_UNEXPECTED_MESSAGE -ALERT_DESCRIPTION_BAD_RECORD_MAC -ALERT_DESCRIPTION_RECORD_OVERFLOW -ALERT_DESCRIPTION_DECOMPRESSION_FAILURE -ALERT_DESCRIPTION_HANDSHAKE_FAILURE -ALERT_DESCRIPTION_BAD_CERTIFICATE -ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE -ALERT_DESCRIPTION_CERTIFICATE_REVOKED -ALERT_DESCRIPTION_CERTIFICATE_EXPIRED -ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN -ALERT_DESCRIPTION_ILLEGAL_PARAMETER -ALERT_DESCRIPTION_UNKNOWN_CA -ALERT_DESCRIPTION_ACCESS_DENIED -ALERT_DESCRIPTION_DECODE_ERROR -ALERT_DESCRIPTION_DECRYPT_ERROR -ALERT_DESCRIPTION_PROTOCOL_VERSION -ALERT_DESCRIPTION_INSUFFICIENT_SECURITY -ALERT_DESCRIPTION_INTERNAL_ERROR -ALERT_DESCRIPTION_USER_CANCELLED -ALERT_DESCRIPTION_NO_RENEGOTIATION -ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION -ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE -ALERT_DESCRIPTION_UNRECOGNIZED_NAME -ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE -ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE -ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY -""" - -import sys -import os -from collections import namedtuple -from enum import Enum as _Enum, IntEnum as _IntEnum, IntFlag as _IntFlag -from enum import _simple_enum - -import _ssl # if we can't import it, let the error propagate - -from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION -from _ssl import _SSLContext, MemoryBIO, SSLSession -from _ssl import ( - SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError, - SSLSyscallError, SSLEOFError, SSLCertVerificationError - ) -from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj -from _ssl import RAND_status, RAND_add, RAND_bytes -try: - from _ssl import RAND_egd -except ImportError: - # RAND_egd is not supported on some platforms - pass - - -from _ssl import ( - HAS_SNI, HAS_ECDH, HAS_NPN, HAS_ALPN, HAS_SSLv2, HAS_SSLv3, HAS_TLSv1, - HAS_TLSv1_1, HAS_TLSv1_2, HAS_TLSv1_3, HAS_PSK -) -from _ssl import _DEFAULT_CIPHERS, _OPENSSL_API_VERSION - -_IntEnum._convert_( - '_SSLMethod', __name__, - lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23', - source=_ssl) - -_IntFlag._convert_( - 'Options', __name__, - lambda name: name.startswith('OP_'), - source=_ssl) - -_IntEnum._convert_( - 'AlertDescription', __name__, - lambda name: name.startswith('ALERT_DESCRIPTION_'), - source=_ssl) - -_IntEnum._convert_( - 'SSLErrorNumber', __name__, - lambda name: name.startswith('SSL_ERROR_'), - source=_ssl) - -_IntFlag._convert_( - 'VerifyFlags', __name__, - lambda name: name.startswith('VERIFY_'), - source=_ssl) - -_IntEnum._convert_( - 'VerifyMode', __name__, - lambda name: name.startswith('CERT_'), - source=_ssl) - -PROTOCOL_SSLv23 = _SSLMethod.PROTOCOL_SSLv23 = _SSLMethod.PROTOCOL_TLS -_PROTOCOL_NAMES = {value: name for name, value in _SSLMethod.__members__.items()} - -_SSLv2_IF_EXISTS = getattr(_SSLMethod, 'PROTOCOL_SSLv2', None) - - -@_simple_enum(_IntEnum) -class TLSVersion: - MINIMUM_SUPPORTED = _ssl.PROTO_MINIMUM_SUPPORTED - SSLv3 = _ssl.PROTO_SSLv3 - TLSv1 = _ssl.PROTO_TLSv1 - TLSv1_1 = _ssl.PROTO_TLSv1_1 - TLSv1_2 = _ssl.PROTO_TLSv1_2 - TLSv1_3 = _ssl.PROTO_TLSv1_3 - MAXIMUM_SUPPORTED = _ssl.PROTO_MAXIMUM_SUPPORTED - - -@_simple_enum(_IntEnum) -class _TLSContentType: - """Content types (record layer) - - See RFC 8446, section B.1 - """ - CHANGE_CIPHER_SPEC = 20 - ALERT = 21 - HANDSHAKE = 22 - APPLICATION_DATA = 23 - # pseudo content types - HEADER = 0x100 - INNER_CONTENT_TYPE = 0x101 - - -@_simple_enum(_IntEnum) -class _TLSAlertType: - """Alert types for TLSContentType.ALERT messages - - See RFC 8446, section B.2 - """ - CLOSE_NOTIFY = 0 - UNEXPECTED_MESSAGE = 10 - BAD_RECORD_MAC = 20 - DECRYPTION_FAILED = 21 - RECORD_OVERFLOW = 22 - DECOMPRESSION_FAILURE = 30 - HANDSHAKE_FAILURE = 40 - NO_CERTIFICATE = 41 - BAD_CERTIFICATE = 42 - UNSUPPORTED_CERTIFICATE = 43 - CERTIFICATE_REVOKED = 44 - CERTIFICATE_EXPIRED = 45 - CERTIFICATE_UNKNOWN = 46 - ILLEGAL_PARAMETER = 47 - UNKNOWN_CA = 48 - ACCESS_DENIED = 49 - DECODE_ERROR = 50 - DECRYPT_ERROR = 51 - EXPORT_RESTRICTION = 60 - PROTOCOL_VERSION = 70 - INSUFFICIENT_SECURITY = 71 - INTERNAL_ERROR = 80 - INAPPROPRIATE_FALLBACK = 86 - USER_CANCELED = 90 - NO_RENEGOTIATION = 100 - MISSING_EXTENSION = 109 - UNSUPPORTED_EXTENSION = 110 - CERTIFICATE_UNOBTAINABLE = 111 - UNRECOGNIZED_NAME = 112 - BAD_CERTIFICATE_STATUS_RESPONSE = 113 - BAD_CERTIFICATE_HASH_VALUE = 114 - UNKNOWN_PSK_IDENTITY = 115 - CERTIFICATE_REQUIRED = 116 - NO_APPLICATION_PROTOCOL = 120 - - -@_simple_enum(_IntEnum) -class _TLSMessageType: - """Message types (handshake protocol) - - See RFC 8446, section B.3 - """ - HELLO_REQUEST = 0 - CLIENT_HELLO = 1 - SERVER_HELLO = 2 - HELLO_VERIFY_REQUEST = 3 - NEWSESSION_TICKET = 4 - END_OF_EARLY_DATA = 5 - HELLO_RETRY_REQUEST = 6 - ENCRYPTED_EXTENSIONS = 8 - CERTIFICATE = 11 - SERVER_KEY_EXCHANGE = 12 - CERTIFICATE_REQUEST = 13 - SERVER_DONE = 14 - CERTIFICATE_VERIFY = 15 - CLIENT_KEY_EXCHANGE = 16 - FINISHED = 20 - CERTIFICATE_URL = 21 - CERTIFICATE_STATUS = 22 - SUPPLEMENTAL_DATA = 23 - KEY_UPDATE = 24 - NEXT_PROTO = 67 - MESSAGE_HASH = 254 - CHANGE_CIPHER_SPEC = 0x0101 - - -if sys.platform == "win32": - from _ssl import enum_certificates, enum_crls - -from socket import socket, SOCK_STREAM, create_connection -from socket import SOL_SOCKET, SO_TYPE, _GLOBAL_DEFAULT_TIMEOUT -import socket as _socket -import base64 # for DER-to-PEM translation -import errno -import warnings - - -socket_error = OSError # keep that public name in module namespace - -CHANNEL_BINDING_TYPES = ['tls-unique'] - -HAS_NEVER_CHECK_COMMON_NAME = hasattr(_ssl, 'HOSTFLAG_NEVER_CHECK_SUBJECT') - - -_RESTRICTED_SERVER_CIPHERS = _DEFAULT_CIPHERS - -CertificateError = SSLCertVerificationError - - -def _dnsname_match(dn, hostname): - """Matching according to RFC 6125, section 6.4.3 - - - Hostnames are compared lower-case. - - For IDNA, both dn and hostname must be encoded as IDN A-label (ACE). - - Partial wildcards like 'www*.example.org', multiple wildcards, sole - wildcard or wildcards in labels other then the left-most label are not - supported and a CertificateError is raised. - - A wildcard must match at least one character. - """ - if not dn: - return False - - wildcards = dn.count('*') - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - if wildcards > 1: - raise CertificateError( - "too many wildcards in certificate DNS name: {!r}.".format(dn)) - - dn_leftmost, sep, dn_remainder = dn.partition('.') - - if '*' in dn_remainder: - # Only match wildcard in leftmost segment. - raise CertificateError( - "wildcard can only be present in the leftmost label: " - "{!r}.".format(dn)) - - if not sep: - # no right side - raise CertificateError( - "sole wildcard without additional labels are not support: " - "{!r}.".format(dn)) - - if dn_leftmost != '*': - # no partial wildcard matching - raise CertificateError( - "partial wildcards in leftmost label are not supported: " - "{!r}.".format(dn)) - - hostname_leftmost, sep, hostname_remainder = hostname.partition('.') - if not hostname_leftmost or not sep: - # wildcard must match at least one char - return False - return dn_remainder.lower() == hostname_remainder.lower() - - -def _inet_paton(ipname): - """Try to convert an IP address to packed binary form - - Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6 - support. - """ - # inet_aton() also accepts strings like '1', '127.1', some also trailing - # data like '127.0.0.1 whatever'. - try: - addr = _socket.inet_aton(ipname) - except OSError: - # not an IPv4 address - pass - else: - if _socket.inet_ntoa(addr) == ipname: - # only accept injective ipnames - return addr - else: - # refuse for short IPv4 notation and additional trailing data - raise ValueError( - "{!r} is not a quad-dotted IPv4 address.".format(ipname) - ) - - try: - return _socket.inet_pton(_socket.AF_INET6, ipname) - except OSError: - raise ValueError("{!r} is neither an IPv4 nor an IP6 " - "address.".format(ipname)) - except AttributeError: - # AF_INET6 not available - pass - - raise ValueError("{!r} is not an IPv4 address.".format(ipname)) - - -def _ipaddress_match(cert_ipaddress, host_ip): - """Exact matching of IP addresses. - - RFC 6125 explicitly doesn't define an algorithm for this - (section 1.7.2 - "Out of Scope"). - """ - # OpenSSL may add a trailing newline to a subjectAltName's IP address, - # commonly with IPv6 addresses. Strip off trailing \n. - ip = _inet_paton(cert_ipaddress.rstrip()) - return ip == host_ip - - -DefaultVerifyPaths = namedtuple("DefaultVerifyPaths", - "cafile capath openssl_cafile_env openssl_cafile openssl_capath_env " - "openssl_capath") - -def get_default_verify_paths(): - """Return paths to default cafile and capath. - """ - parts = _ssl.get_default_verify_paths() - - # environment vars shadow paths - cafile = os.environ.get(parts[0], parts[1]) - capath = os.environ.get(parts[2], parts[3]) - - return DefaultVerifyPaths(cafile if os.path.isfile(cafile) else None, - capath if os.path.isdir(capath) else None, - *parts) - - -class _ASN1Object(namedtuple("_ASN1Object", "nid shortname longname oid")): - """ASN.1 object identifier lookup - """ - __slots__ = () - - def __new__(cls, oid): - return super().__new__(cls, *_txt2obj(oid, name=False)) - - @classmethod - def fromnid(cls, nid): - """Create _ASN1Object from OpenSSL numeric ID - """ - return super().__new__(cls, *_nid2obj(nid)) - - @classmethod - def fromname(cls, name): - """Create _ASN1Object from short name, long name or OID - """ - return super().__new__(cls, *_txt2obj(name, name=True)) - - -class Purpose(_ASN1Object, _Enum): - """SSLContext purpose flags with X509v3 Extended Key Usage objects - """ - SERVER_AUTH = '1.3.6.1.5.5.7.3.1' - CLIENT_AUTH = '1.3.6.1.5.5.7.3.2' - - -class SSLContext(_SSLContext): - """An SSLContext holds various SSL-related configuration options and - data, such as certificates and possibly a private key.""" - _windows_cert_stores = ("CA", "ROOT") - - sslsocket_class = None # SSLSocket is assigned later. - sslobject_class = None # SSLObject is assigned later. - - def __new__(cls, protocol=None, *args, **kwargs): - if protocol is None: - warnings.warn( - "ssl.SSLContext() without protocol argument is deprecated.", - category=DeprecationWarning, - stacklevel=2 - ) - protocol = PROTOCOL_TLS - self = _SSLContext.__new__(cls, protocol) - return self - - def _encode_hostname(self, hostname): - if hostname is None: - return None - elif isinstance(hostname, str): - return hostname.encode('idna').decode('ascii') - else: - return hostname.decode('ascii') - - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, - suppress_ragged_eofs=True, - server_hostname=None, session=None): - # SSLSocket class handles server_hostname encoding before it calls - # ctx._wrap_socket() - return self.sslsocket_class._create( - sock=sock, - server_side=server_side, - do_handshake_on_connect=do_handshake_on_connect, - suppress_ragged_eofs=suppress_ragged_eofs, - server_hostname=server_hostname, - context=self, - session=session - ) - - def wrap_bio(self, incoming, outgoing, server_side=False, - server_hostname=None, session=None): - # Need to encode server_hostname here because _wrap_bio() can only - # handle ASCII str. - return self.sslobject_class._create( - incoming, outgoing, server_side=server_side, - server_hostname=self._encode_hostname(server_hostname), - session=session, context=self, - ) - - def set_npn_protocols(self, npn_protocols): - warnings.warn( - "ssl NPN is deprecated, use ALPN instead", - DeprecationWarning, - stacklevel=2 - ) - protos = bytearray() - for protocol in npn_protocols: - b = bytes(protocol, 'ascii') - if len(b) == 0 or len(b) > 255: - raise SSLError('NPN protocols must be 1 to 255 in length') - protos.append(len(b)) - protos.extend(b) - - self._set_npn_protocols(protos) - - def set_servername_callback(self, server_name_callback): - if server_name_callback is None: - self.sni_callback = None - else: - if not callable(server_name_callback): - raise TypeError("not a callable object") - - def shim_cb(sslobj, servername, sslctx): - servername = self._encode_hostname(servername) - return server_name_callback(sslobj, servername, sslctx) - - self.sni_callback = shim_cb - - def set_alpn_protocols(self, alpn_protocols): - protos = bytearray() - for protocol in alpn_protocols: - b = bytes(protocol, 'ascii') - if len(b) == 0 or len(b) > 255: - raise SSLError('ALPN protocols must be 1 to 255 in length') - protos.append(len(b)) - protos.extend(b) - - self._set_alpn_protocols(protos) - - def _load_windows_store_certs(self, storename, purpose): - try: - for cert, encoding, trust in enum_certificates(storename): - # CA certs are never PKCS#7 encoded - if encoding == "x509_asn": - if trust is True or purpose.oid in trust: - try: - self.load_verify_locations(cadata=cert) - except SSLError as exc: - warnings.warn(f"Bad certificate in Windows certificate store: {exc!s}") - except PermissionError: - warnings.warn("unable to enumerate Windows certificate store") - - def load_default_certs(self, purpose=Purpose.SERVER_AUTH): - if not isinstance(purpose, _ASN1Object): - raise TypeError(purpose) - if sys.platform == "win32": - for storename in self._windows_cert_stores: - self._load_windows_store_certs(storename, purpose) - self.set_default_verify_paths() - - if hasattr(_SSLContext, 'minimum_version'): - @property - def minimum_version(self): - return TLSVersion(super().minimum_version) - - @minimum_version.setter - def minimum_version(self, value): - if value == TLSVersion.SSLv3: - self.options &= ~Options.OP_NO_SSLv3 - super(SSLContext, SSLContext).minimum_version.__set__(self, value) - - @property - def maximum_version(self): - return TLSVersion(super().maximum_version) - - @maximum_version.setter - def maximum_version(self, value): - super(SSLContext, SSLContext).maximum_version.__set__(self, value) - - @property - def options(self): - return Options(super().options) - - @options.setter - def options(self, value): - super(SSLContext, SSLContext).options.__set__(self, value) - - if hasattr(_ssl, 'HOSTFLAG_NEVER_CHECK_SUBJECT'): - @property - def hostname_checks_common_name(self): - ncs = self._host_flags & _ssl.HOSTFLAG_NEVER_CHECK_SUBJECT - return ncs != _ssl.HOSTFLAG_NEVER_CHECK_SUBJECT - - @hostname_checks_common_name.setter - def hostname_checks_common_name(self, value): - if value: - self._host_flags &= ~_ssl.HOSTFLAG_NEVER_CHECK_SUBJECT - else: - self._host_flags |= _ssl.HOSTFLAG_NEVER_CHECK_SUBJECT - else: - @property - def hostname_checks_common_name(self): - return True - - @property - def _msg_callback(self): - """TLS message callback - - The message callback provides a debugging hook to analyze TLS - connections. The callback is called for any TLS protocol message - (header, handshake, alert, and more), but not for application data. - Due to technical limitations, the callback can't be used to filter - traffic or to abort a connection. Any exception raised in the - callback is delayed until the handshake, read, or write operation - has been performed. - - def msg_cb(conn, direction, version, content_type, msg_type, data): - pass - - conn - :class:`SSLSocket` or :class:`SSLObject` instance - direction - ``read`` or ``write`` - version - :class:`TLSVersion` enum member or int for unknown version. For a - frame header, it's the header version. - content_type - :class:`_TLSContentType` enum member or int for unsupported - content type. - msg_type - Either a :class:`_TLSContentType` enum number for a header - message, a :class:`_TLSAlertType` enum member for an alert - message, a :class:`_TLSMessageType` enum member for other - messages, or int for unsupported message types. - data - Raw, decrypted message content as bytes - """ - inner = super()._msg_callback - if inner is not None: - return inner.user_function - else: - return None - - @_msg_callback.setter - def _msg_callback(self, callback): - if callback is None: - super(SSLContext, SSLContext)._msg_callback.__set__(self, None) - return - - if not hasattr(callback, '__call__'): - raise TypeError(f"{callback} is not callable.") - - def inner(conn, direction, version, content_type, msg_type, data): - try: - version = TLSVersion(version) - except ValueError: - pass - - try: - content_type = _TLSContentType(content_type) - except ValueError: - pass - - if content_type == _TLSContentType.HEADER: - msg_enum = _TLSContentType - elif content_type == _TLSContentType.ALERT: - msg_enum = _TLSAlertType - else: - msg_enum = _TLSMessageType - try: - msg_type = msg_enum(msg_type) - except ValueError: - pass - - return callback(conn, direction, version, - content_type, msg_type, data) - - inner.user_function = callback - - super(SSLContext, SSLContext)._msg_callback.__set__(self, inner) - - @property - def protocol(self): - return _SSLMethod(super().protocol) - - @property - def verify_flags(self): - return VerifyFlags(super().verify_flags) - - @verify_flags.setter - def verify_flags(self, value): - super(SSLContext, SSLContext).verify_flags.__set__(self, value) - - @property - def verify_mode(self): - value = super().verify_mode - try: - return VerifyMode(value) - except ValueError: - return value - - @verify_mode.setter - def verify_mode(self, value): - super(SSLContext, SSLContext).verify_mode.__set__(self, value) - - -def create_default_context(purpose=Purpose.SERVER_AUTH, *, cafile=None, - capath=None, cadata=None): - """Create a SSLContext object with default settings. - - NOTE: The protocol and settings may change anytime without prior - deprecation. The values represent a fair balance between maximum - compatibility and security. - """ - if not isinstance(purpose, _ASN1Object): - raise TypeError(purpose) - - # SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION, - # OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE - # by default. - if purpose == Purpose.SERVER_AUTH: - # verify certs and host name in client mode - context = SSLContext(PROTOCOL_TLS_CLIENT) - context.verify_mode = CERT_REQUIRED - context.check_hostname = True - elif purpose == Purpose.CLIENT_AUTH: - context = SSLContext(PROTOCOL_TLS_SERVER) - else: - raise ValueError(purpose) - - # `VERIFY_X509_PARTIAL_CHAIN` makes OpenSSL's chain building behave more - # like RFC 3280 and 5280, which specify that chain building stops with the - # first trust anchor, even if that anchor is not self-signed. - # - # `VERIFY_X509_STRICT` makes OpenSSL more conservative about the - # certificates it accepts, including "disabling workarounds for - # some broken certificates." - context.verify_flags |= (_ssl.VERIFY_X509_PARTIAL_CHAIN | - _ssl.VERIFY_X509_STRICT) - - if cafile or capath or cadata: - context.load_verify_locations(cafile, capath, cadata) - elif context.verify_mode != CERT_NONE: - # no explicit cafile, capath or cadata but the verify mode is - # CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system - # root CA certificates for the given purpose. This may fail silently. - context.load_default_certs(purpose) - # OpenSSL 1.1.1 keylog file - if hasattr(context, 'keylog_filename'): - keylogfile = os.environ.get('SSLKEYLOGFILE') - if keylogfile and not sys.flags.ignore_environment: - context.keylog_filename = keylogfile - return context - -def _create_unverified_context(protocol=None, *, cert_reqs=CERT_NONE, - check_hostname=False, purpose=Purpose.SERVER_AUTH, - certfile=None, keyfile=None, - cafile=None, capath=None, cadata=None): - """Create a SSLContext object for Python stdlib modules - - All Python stdlib modules shall use this function to create SSLContext - objects in order to keep common settings in one place. The configuration - is less restrict than create_default_context()'s to increase backward - compatibility. - """ - if not isinstance(purpose, _ASN1Object): - raise TypeError(purpose) - - # SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION, - # OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE - # by default. - if purpose == Purpose.SERVER_AUTH: - # verify certs and host name in client mode - if protocol is None: - protocol = PROTOCOL_TLS_CLIENT - elif purpose == Purpose.CLIENT_AUTH: - if protocol is None: - protocol = PROTOCOL_TLS_SERVER - else: - raise ValueError(purpose) - - context = SSLContext(protocol) - context.check_hostname = check_hostname - if cert_reqs is not None: - context.verify_mode = cert_reqs - if check_hostname: - context.check_hostname = True - - if keyfile and not certfile: - raise ValueError("certfile must be specified") - if certfile or keyfile: - context.load_cert_chain(certfile, keyfile) - - # load CA root certs - if cafile or capath or cadata: - context.load_verify_locations(cafile, capath, cadata) - elif context.verify_mode != CERT_NONE: - # no explicit cafile, capath or cadata but the verify mode is - # CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system - # root CA certificates for the given purpose. This may fail silently. - context.load_default_certs(purpose) - # OpenSSL 1.1.1 keylog file - if hasattr(context, 'keylog_filename'): - keylogfile = os.environ.get('SSLKEYLOGFILE') - if keylogfile and not sys.flags.ignore_environment: - context.keylog_filename = keylogfile - return context - -# Used by http.client if no context is explicitly passed. -_create_default_https_context = create_default_context - - -# Backwards compatibility alias, even though it's not a public name. -_create_stdlib_context = _create_unverified_context - - -class SSLObject: - """This class implements an interface on top of a low-level SSL object as - implemented by OpenSSL. This object captures the state of an SSL connection - but does not provide any network IO itself. IO needs to be performed - through separate "BIO" objects which are OpenSSL's IO abstraction layer. - - This class does not have a public constructor. Instances are returned by - ``SSLContext.wrap_bio``. This class is typically used by framework authors - that want to implement asynchronous IO for SSL through memory buffers. - - When compared to ``SSLSocket``, this object lacks the following features: - - * Any form of network IO, including methods such as ``recv`` and ``send``. - * The ``do_handshake_on_connect`` and ``suppress_ragged_eofs`` machinery. - """ - def __init__(self, *args, **kwargs): - raise TypeError( - f"{self.__class__.__name__} does not have a public " - f"constructor. Instances are returned by SSLContext.wrap_bio()." - ) - - @classmethod - def _create(cls, incoming, outgoing, server_side=False, - server_hostname=None, session=None, context=None): - self = cls.__new__(cls) - sslobj = context._wrap_bio( - incoming, outgoing, server_side=server_side, - server_hostname=server_hostname, - owner=self, session=session - ) - self._sslobj = sslobj - return self - - @property - def context(self): - """The SSLContext that is currently in use.""" - return self._sslobj.context - - @context.setter - def context(self, ctx): - self._sslobj.context = ctx - - @property - def session(self): - """The SSLSession for client socket.""" - return self._sslobj.session - - @session.setter - def session(self, session): - self._sslobj.session = session - - @property - def session_reused(self): - """Was the client session reused during handshake""" - return self._sslobj.session_reused - - @property - def server_side(self): - """Whether this is a server-side socket.""" - return self._sslobj.server_side - - @property - def server_hostname(self): - """The currently set server hostname (for SNI), or ``None`` if no - server hostname is set.""" - return self._sslobj.server_hostname - - def read(self, len=1024, buffer=None): - """Read up to 'len' bytes from the SSL object and return them. - - If 'buffer' is provided, read into this buffer and return the number of - bytes read. - """ - if buffer is not None: - v = self._sslobj.read(len, buffer) - else: - v = self._sslobj.read(len) - return v - - def write(self, data): - """Write 'data' to the SSL object and return the number of bytes - written. - - The 'data' argument must support the buffer interface. - """ - return self._sslobj.write(data) - - def getpeercert(self, binary_form=False): - """Returns a formatted version of the data in the certificate provided - by the other end of the SSL channel. - - Return None if no certificate was provided, {} if a certificate was - provided, but not validated. - """ - return self._sslobj.getpeercert(binary_form) - - def get_verified_chain(self): - """Returns verified certificate chain provided by the other - end of the SSL channel as a list of DER-encoded bytes. - - If certificate verification was disabled method acts the same as - ``SSLSocket.get_unverified_chain``. - """ - chain = self._sslobj.get_verified_chain() - - if chain is None: - return [] - - return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] - - def get_unverified_chain(self): - """Returns raw certificate chain provided by the other - end of the SSL channel as a list of DER-encoded bytes. - """ - chain = self._sslobj.get_unverified_chain() - - if chain is None: - return [] - - return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] - - def selected_npn_protocol(self): - """Return the currently selected NPN protocol as a string, or ``None`` - if a next protocol was not negotiated or if NPN is not supported by one - of the peers.""" - warnings.warn( - "ssl NPN is deprecated, use ALPN instead", - DeprecationWarning, - stacklevel=2 - ) - - def selected_alpn_protocol(self): - """Return the currently selected ALPN protocol as a string, or ``None`` - if a next protocol was not negotiated or if ALPN is not supported by one - of the peers.""" - return self._sslobj.selected_alpn_protocol() - - def cipher(self): - """Return the currently selected cipher as a 3-tuple ``(name, - ssl_version, secret_bits)``.""" - return self._sslobj.cipher() - - def shared_ciphers(self): - """Return a list of ciphers shared by the client during the handshake or - None if this is not a valid server connection. - """ - return self._sslobj.shared_ciphers() - - def compression(self): - """Return the current compression algorithm in use, or ``None`` if - compression was not negotiated or not supported by one of the peers.""" - return self._sslobj.compression() - - def pending(self): - """Return the number of bytes that can be read immediately.""" - return self._sslobj.pending() - - def do_handshake(self): - """Start the SSL/TLS handshake.""" - self._sslobj.do_handshake() - - def unwrap(self): - """Start the SSL shutdown handshake.""" - return self._sslobj.shutdown() - - def get_channel_binding(self, cb_type="tls-unique"): - """Get channel binding data for current connection. Raise ValueError - if the requested `cb_type` is not supported. Return bytes of the data - or None if the data is not available (e.g. before the handshake).""" - return self._sslobj.get_channel_binding(cb_type) - - def version(self): - """Return a string identifying the protocol version used by the - current SSL channel. """ - return self._sslobj.version() - - def verify_client_post_handshake(self): - return self._sslobj.verify_client_post_handshake() - - -def _sslcopydoc(func): - """Copy docstring from SSLObject to SSLSocket""" - func.__doc__ = getattr(SSLObject, func.__name__).__doc__ - return func - - -class SSLSocket(socket): - """This class implements a subtype of socket.socket that wraps - the underlying OS socket in an SSL context when necessary, and - provides read and write methods over that channel. """ - - def __init__(self, *args, **kwargs): - raise TypeError( - f"{self.__class__.__name__} does not have a public " - f"constructor. Instances are returned by " - f"SSLContext.wrap_socket()." - ) - - @classmethod - def _create(cls, sock, server_side=False, do_handshake_on_connect=True, - suppress_ragged_eofs=True, server_hostname=None, - context=None, session=None): - if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM: - raise NotImplementedError("only stream sockets are supported") - if server_side: - if server_hostname: - raise ValueError("server_hostname can only be specified " - "in client mode") - if session is not None: - raise ValueError("session can only be specified in " - "client mode") - if context.check_hostname and not server_hostname: - raise ValueError("check_hostname requires server_hostname") - - sock_timeout = sock.gettimeout() - kwargs = dict( - family=sock.family, type=sock.type, proto=sock.proto, - fileno=sock.fileno() - ) - self = cls.__new__(cls, **kwargs) - super(SSLSocket, self).__init__(**kwargs) - sock.detach() - # Now SSLSocket is responsible for closing the file descriptor. - try: - self._context = context - self._session = session - self._closed = False - self._sslobj = None - self.server_side = server_side - self.server_hostname = context._encode_hostname(server_hostname) - self.do_handshake_on_connect = do_handshake_on_connect - self.suppress_ragged_eofs = suppress_ragged_eofs - - # See if we are connected - try: - self.getpeername() - except OSError as e: - if e.errno != errno.ENOTCONN: - raise - connected = False - blocking = self.getblocking() - self.setblocking(False) - try: - # We are not connected so this is not supposed to block, but - # testing revealed otherwise on macOS and Windows so we do - # the non-blocking dance regardless. Our raise when any data - # is found means consuming the data is harmless. - notconn_pre_handshake_data = self.recv(1) - except OSError as e: - # EINVAL occurs for recv(1) on non-connected on unix sockets. - if e.errno not in (errno.ENOTCONN, errno.EINVAL): - raise - notconn_pre_handshake_data = b'' - self.setblocking(blocking) - if notconn_pre_handshake_data: - # This prevents pending data sent to the socket before it was - # closed from escaping to the caller who could otherwise - # presume it came through a successful TLS connection. - reason = "Closed before TLS handshake with data in recv buffer." - notconn_pre_handshake_data_error = SSLError(e.errno, reason) - # Add the SSLError attributes that _ssl.c always adds. - notconn_pre_handshake_data_error.reason = reason - notconn_pre_handshake_data_error.library = None - try: - raise notconn_pre_handshake_data_error - finally: - # Explicitly break the reference cycle. - notconn_pre_handshake_data_error = None - else: - connected = True - - self.settimeout(sock_timeout) # Must come after setblocking() calls. - self._connected = connected - if connected: - # create the SSL object - self._sslobj = self._context._wrap_socket( - self, server_side, self.server_hostname, - owner=self, session=self._session, - ) - if do_handshake_on_connect: - timeout = self.gettimeout() - if timeout == 0.0: - # non-blocking - raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets") - self.do_handshake() - except: - try: - self.close() - except OSError: - pass - raise - return self - - @property - @_sslcopydoc - def context(self): - return self._context - - @context.setter - def context(self, ctx): - self._context = ctx - self._sslobj.context = ctx - - @property - @_sslcopydoc - def session(self): - if self._sslobj is not None: - return self._sslobj.session - - @session.setter - def session(self, session): - self._session = session - if self._sslobj is not None: - self._sslobj.session = session - - @property - @_sslcopydoc - def session_reused(self): - if self._sslobj is not None: - return self._sslobj.session_reused - - def dup(self): - raise NotImplementedError("Can't dup() %s instances" % - self.__class__.__name__) - - def _checkClosed(self, msg=None): - # raise an exception here if you wish to check for spurious closes - pass - - def _check_connected(self): - if not self._connected: - # getpeername() will raise ENOTCONN if the socket is really - # not connected; note that we can be connected even without - # _connected being set, e.g. if connect() first returned - # EAGAIN. - self.getpeername() - - def read(self, len=1024, buffer=None): - """Read up to LEN bytes and return them. - Return zero-length string on EOF.""" - - self._checkClosed() - if self._sslobj is None: - raise ValueError("Read on closed or unwrapped SSL socket.") - try: - if buffer is not None: - return self._sslobj.read(len, buffer) - else: - return self._sslobj.read(len) - except SSLError as x: - if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs: - if buffer is not None: - return 0 - else: - return b'' - else: - raise - - def write(self, data): - """Write DATA to the underlying SSL channel. Returns - number of bytes of DATA actually transmitted.""" - - self._checkClosed() - if self._sslobj is None: - raise ValueError("Write on closed or unwrapped SSL socket.") - return self._sslobj.write(data) - - @_sslcopydoc - def getpeercert(self, binary_form=False): - self._checkClosed() - self._check_connected() - return self._sslobj.getpeercert(binary_form) - - @_sslcopydoc - def get_verified_chain(self): - chain = self._sslobj.get_verified_chain() - - if chain is None: - return [] - - return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] - - @_sslcopydoc - def get_unverified_chain(self): - chain = self._sslobj.get_unverified_chain() - - if chain is None: - return [] - - return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] - - @_sslcopydoc - def selected_npn_protocol(self): - self._checkClosed() - warnings.warn( - "ssl NPN is deprecated, use ALPN instead", - DeprecationWarning, - stacklevel=2 - ) - return None - - @_sslcopydoc - def selected_alpn_protocol(self): - self._checkClosed() - if self._sslobj is None or not _ssl.HAS_ALPN: - return None - else: - return self._sslobj.selected_alpn_protocol() - - @_sslcopydoc - def cipher(self): - self._checkClosed() - if self._sslobj is None: - return None - else: - return self._sslobj.cipher() - - @_sslcopydoc - def shared_ciphers(self): - self._checkClosed() - if self._sslobj is None: - return None - else: - return self._sslobj.shared_ciphers() - - @_sslcopydoc - def compression(self): - self._checkClosed() - if self._sslobj is None: - return None - else: - return self._sslobj.compression() - - def send(self, data, flags=0): - self._checkClosed() - if self._sslobj is not None: - if flags != 0: - raise ValueError( - "non-zero flags not allowed in calls to send() on %s" % - self.__class__) - return self._sslobj.write(data) - else: - return super().send(data, flags) - - def sendto(self, data, flags_or_addr, addr=None): - self._checkClosed() - if self._sslobj is not None: - raise ValueError("sendto not allowed on instances of %s" % - self.__class__) - elif addr is None: - return super().sendto(data, flags_or_addr) - else: - return super().sendto(data, flags_or_addr, addr) - - def sendmsg(self, *args, **kwargs): - # Ensure programs don't send data unencrypted if they try to - # use this method. - raise NotImplementedError("sendmsg not allowed on instances of %s" % - self.__class__) - - def sendall(self, data, flags=0): - self._checkClosed() - if self._sslobj is not None: - if flags != 0: - raise ValueError( - "non-zero flags not allowed in calls to sendall() on %s" % - self.__class__) - count = 0 - with memoryview(data) as view, view.cast("B") as byte_view: - amount = len(byte_view) - while count < amount: - v = self.send(byte_view[count:]) - count += v - else: - return super().sendall(data, flags) - - def sendfile(self, file, offset=0, count=None): - """Send a file, possibly by using os.sendfile() if this is a - clear-text socket. Return the total number of bytes sent. - """ - if self._sslobj is not None: - return self._sendfile_use_send(file, offset, count) - else: - # os.sendfile() works with plain sockets only - return super().sendfile(file, offset, count) - - def recv(self, buflen=1024, flags=0): - self._checkClosed() - if self._sslobj is not None: - if flags != 0: - raise ValueError( - "non-zero flags not allowed in calls to recv() on %s" % - self.__class__) - return self.read(buflen) - else: - return super().recv(buflen, flags) - - def recv_into(self, buffer, nbytes=None, flags=0): - self._checkClosed() - if nbytes is None: - if buffer is not None: - with memoryview(buffer) as view: - nbytes = view.nbytes - if not nbytes: - nbytes = 1024 - else: - nbytes = 1024 - if self._sslobj is not None: - if flags != 0: - raise ValueError( - "non-zero flags not allowed in calls to recv_into() on %s" % - self.__class__) - return self.read(nbytes, buffer) - else: - return super().recv_into(buffer, nbytes, flags) - - def recvfrom(self, buflen=1024, flags=0): - self._checkClosed() - if self._sslobj is not None: - raise ValueError("recvfrom not allowed on instances of %s" % - self.__class__) - else: - return super().recvfrom(buflen, flags) - - def recvfrom_into(self, buffer, nbytes=None, flags=0): - self._checkClosed() - if self._sslobj is not None: - raise ValueError("recvfrom_into not allowed on instances of %s" % - self.__class__) - else: - return super().recvfrom_into(buffer, nbytes, flags) - - def recvmsg(self, *args, **kwargs): - raise NotImplementedError("recvmsg not allowed on instances of %s" % - self.__class__) - - def recvmsg_into(self, *args, **kwargs): - raise NotImplementedError("recvmsg_into not allowed on instances of " - "%s" % self.__class__) - - @_sslcopydoc - def pending(self): - self._checkClosed() - if self._sslobj is not None: - return self._sslobj.pending() - else: - return 0 - - def shutdown(self, how): - self._checkClosed() - self._sslobj = None - super().shutdown(how) - - @_sslcopydoc - def unwrap(self): - if self._sslobj: - s = self._sslobj.shutdown() - self._sslobj = None - return s - else: - raise ValueError("No SSL wrapper around " + str(self)) - - @_sslcopydoc - def verify_client_post_handshake(self): - if self._sslobj: - return self._sslobj.verify_client_post_handshake() - else: - raise ValueError("No SSL wrapper around " + str(self)) - - def _real_close(self): - self._sslobj = None - super()._real_close() - - @_sslcopydoc - def do_handshake(self, block=False): - self._check_connected() - timeout = self.gettimeout() - try: - if timeout == 0.0 and block: - self.settimeout(None) - self._sslobj.do_handshake() - finally: - self.settimeout(timeout) - - def _real_connect(self, addr, connect_ex): - if self.server_side: - raise ValueError("can't connect in server-side mode") - # Here we assume that the socket is client-side, and not - # connected at the time of the call. We connect it, then wrap it. - if self._connected or self._sslobj is not None: - raise ValueError("attempt to connect already-connected SSLSocket!") - self._sslobj = self.context._wrap_socket( - self, False, self.server_hostname, - owner=self, session=self._session - ) - try: - if connect_ex: - rc = super().connect_ex(addr) - else: - rc = None - super().connect(addr) - if not rc: - self._connected = True - if self.do_handshake_on_connect: - self.do_handshake() - return rc - except (OSError, ValueError): - self._sslobj = None - raise - - def connect(self, addr): - """Connects to remote ADDR, and then wraps the connection in - an SSL channel.""" - self._real_connect(addr, False) - - def connect_ex(self, addr): - """Connects to remote ADDR, and then wraps the connection in - an SSL channel.""" - return self._real_connect(addr, True) - - def accept(self): - """Accepts a new connection from a remote client, and returns - a tuple containing that new connection wrapped with a server-side - SSL channel, and the address of the remote client.""" - - newsock, addr = super().accept() - newsock = self.context.wrap_socket(newsock, - do_handshake_on_connect=self.do_handshake_on_connect, - suppress_ragged_eofs=self.suppress_ragged_eofs, - server_side=True) - return newsock, addr - - @_sslcopydoc - def get_channel_binding(self, cb_type="tls-unique"): - if self._sslobj is not None: - return self._sslobj.get_channel_binding(cb_type) - else: - if cb_type not in CHANNEL_BINDING_TYPES: - raise ValueError( - "{0} channel binding type not implemented".format(cb_type) - ) - return None - - @_sslcopydoc - def version(self): - if self._sslobj is not None: - return self._sslobj.version() - else: - return None - - -# Python does not support forward declaration of types. -SSLContext.sslsocket_class = SSLSocket -SSLContext.sslobject_class = SSLObject - - -# some utility functions - -def cert_time_to_seconds(cert_time): - """Return the time in seconds since the Epoch, given the timestring - representing the "notBefore" or "notAfter" date from a certificate - in ``"%b %d %H:%M:%S %Y %Z"`` strptime format (C locale). - - "notBefore" or "notAfter" dates must use UTC (RFC 5280). - - Month is one of: Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec - UTC should be specified as GMT (see ASN1_TIME_print()) - """ - from time import strptime - from calendar import timegm - - months = ( - "Jan","Feb","Mar","Apr","May","Jun", - "Jul","Aug","Sep","Oct","Nov","Dec" - ) - time_format = ' %d %H:%M:%S %Y GMT' # NOTE: no month, fixed GMT - try: - month_number = months.index(cert_time[:3].title()) + 1 - except ValueError: - raise ValueError('time data %r does not match ' - 'format "%%b%s"' % (cert_time, time_format)) - else: - # found valid month - tt = strptime(cert_time[3:], time_format) - # return an integer, the previous mktime()-based implementation - # returned a float (fractional seconds are always zero here). - return timegm((tt[0], month_number) + tt[2:6]) - -PEM_HEADER = "-----BEGIN CERTIFICATE-----" -PEM_FOOTER = "-----END CERTIFICATE-----" - -def DER_cert_to_PEM_cert(der_cert_bytes): - """Takes a certificate in binary DER format and returns the - PEM version of it as a string.""" - - f = str(base64.standard_b64encode(der_cert_bytes), 'ASCII', 'strict') - ss = [PEM_HEADER] - ss += [f[i:i+64] for i in range(0, len(f), 64)] - ss.append(PEM_FOOTER + '\n') - return '\n'.join(ss) - -def PEM_cert_to_DER_cert(pem_cert_string): - """Takes a certificate in ASCII PEM format and returns the - DER-encoded version of it as a byte sequence""" - - if not pem_cert_string.startswith(PEM_HEADER): - raise ValueError("Invalid PEM encoding; must start with %s" - % PEM_HEADER) - if not pem_cert_string.strip().endswith(PEM_FOOTER): - raise ValueError("Invalid PEM encoding; must end with %s" - % PEM_FOOTER) - d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)] - return base64.decodebytes(d.encode('ASCII', 'strict')) - -def get_server_certificate(addr, ssl_version=PROTOCOL_TLS_CLIENT, - ca_certs=None, timeout=_GLOBAL_DEFAULT_TIMEOUT): - """Retrieve the certificate from the server at the specified address, - and return it as a PEM-encoded string. - If 'ca_certs' is specified, validate the server cert against it. - If 'ssl_version' is specified, use it in the connection attempt. - If 'timeout' is specified, use it in the connection attempt. - """ - - host, port = addr - if ca_certs is not None: - cert_reqs = CERT_REQUIRED - else: - cert_reqs = CERT_NONE - context = _create_stdlib_context(ssl_version, - cert_reqs=cert_reqs, - cafile=ca_certs) - with create_connection(addr, timeout=timeout) as sock: - with context.wrap_socket(sock, server_hostname=host) as sslsock: - dercert = sslsock.getpeercert(True) - return DER_cert_to_PEM_cert(dercert) - -def get_protocol_name(protocol_code): - return _PROTOCOL_NAMES.get(protocol_code, '') diff --git a/Python313_13_x64_Template/Lib/statistics.py b/Python313_13_x64_Template/Lib/statistics.py deleted file mode 100644 index c71e83aa..00000000 --- a/Python313_13_x64_Template/Lib/statistics.py +++ /dev/null @@ -1,1817 +0,0 @@ -""" -Basic statistics module. - -This module provides functions for calculating statistics of data, including -averages, variance, and standard deviation. - -Calculating averages --------------------- - -================== ================================================== -Function Description -================== ================================================== -mean Arithmetic mean (average) of data. -fmean Fast, floating-point arithmetic mean. -geometric_mean Geometric mean of data. -harmonic_mean Harmonic mean of data. -median Median (middle value) of data. -median_low Low median of data. -median_high High median of data. -median_grouped Median, or 50th percentile, of grouped data. -mode Mode (most common value) of data. -multimode List of modes (most common values of data). -quantiles Divide data into intervals with equal probability. -================== ================================================== - -Calculate the arithmetic mean ("the average") of data: - ->>> mean([-1.0, 2.5, 3.25, 5.75]) -2.625 - - -Calculate the standard median of discrete data: - ->>> median([2, 3, 4, 5]) -3.5 - - -Calculate the median, or 50th percentile, of data grouped into class intervals -centred on the data values provided. E.g. if your data points are rounded to -the nearest whole number: - ->>> median_grouped([2, 2, 3, 3, 3, 4]) #doctest: +ELLIPSIS -2.8333333333... - -This should be interpreted in this way: you have two data points in the class -interval 1.5-2.5, three data points in the class interval 2.5-3.5, and one in -the class interval 3.5-4.5. The median of these data points is 2.8333... - - -Calculating variability or spread ---------------------------------- - -================== ============================================= -Function Description -================== ============================================= -pvariance Population variance of data. -variance Sample variance of data. -pstdev Population standard deviation of data. -stdev Sample standard deviation of data. -================== ============================================= - -Calculate the standard deviation of sample data: - ->>> stdev([2.5, 3.25, 5.5, 11.25, 11.75]) #doctest: +ELLIPSIS -4.38961843444... - -If you have previously calculated the mean, you can pass it as the optional -second argument to the four "spread" functions to avoid recalculating it: - ->>> data = [1, 2, 2, 4, 4, 4, 5, 6] ->>> mu = mean(data) ->>> pvariance(data, mu) -2.5 - - -Statistics for relations between two inputs -------------------------------------------- - -================== ==================================================== -Function Description -================== ==================================================== -covariance Sample covariance for two variables. -correlation Pearson's correlation coefficient for two variables. -linear_regression Intercept and slope for simple linear regression. -================== ==================================================== - -Calculate covariance, Pearson's correlation, and simple linear regression -for two inputs: - ->>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9] ->>> y = [1, 2, 3, 1, 2, 3, 1, 2, 3] ->>> covariance(x, y) -0.75 ->>> correlation(x, y) #doctest: +ELLIPSIS -0.31622776601... ->>> linear_regression(x, y) #doctest: -LinearRegression(slope=0.1, intercept=1.5) - - -Exceptions ----------- - -A single exception is defined: StatisticsError is a subclass of ValueError. - -""" - -__all__ = [ - 'NormalDist', - 'StatisticsError', - 'correlation', - 'covariance', - 'fmean', - 'geometric_mean', - 'harmonic_mean', - 'kde', - 'kde_random', - 'linear_regression', - 'mean', - 'median', - 'median_grouped', - 'median_high', - 'median_low', - 'mode', - 'multimode', - 'pstdev', - 'pvariance', - 'quantiles', - 'stdev', - 'variance', -] - -import math -import numbers -import random -import sys - -from fractions import Fraction -from decimal import Decimal -from itertools import count, groupby, repeat -from bisect import bisect_left, bisect_right -from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum, sumprod -from math import isfinite, isinf, pi, cos, sin, tan, cosh, asin, atan, acos -from functools import reduce -from operator import itemgetter -from collections import Counter, namedtuple, defaultdict - -_SQRT2 = sqrt(2.0) -_random = random - -# === Exceptions === - -class StatisticsError(ValueError): - pass - - -# === Private utilities === - -def _sum(data): - """_sum(data) -> (type, sum, count) - - Return a high-precision sum of the given numeric data as a fraction, - together with the type to be converted to and the count of items. - - Examples - -------- - - >>> _sum([3, 2.25, 4.5, -0.5, 0.25]) - (, Fraction(19, 2), 5) - - Some sources of round-off error will be avoided: - - # Built-in sum returns zero. - >>> _sum([1e50, 1, -1e50] * 1000) - (, Fraction(1000, 1), 3000) - - Fractions and Decimals are also supported: - - >>> from fractions import Fraction as F - >>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)]) - (, Fraction(63, 20), 4) - - >>> from decimal import Decimal as D - >>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")] - >>> _sum(data) - (, Fraction(6963, 10000), 4) - - Mixed types are currently treated as an error, except that int is - allowed. - """ - count = 0 - types = set() - types_add = types.add - partials = {} - partials_get = partials.get - for typ, values in groupby(data, type): - types_add(typ) - for n, d in map(_exact_ratio, values): - count += 1 - partials[d] = partials_get(d, 0) + n - if None in partials: - # The sum will be a NAN or INF. We can ignore all the finite - # partials, and just look at this special one. - total = partials[None] - assert not _isfinite(total) - else: - # Sum all the partial sums using builtin sum. - total = sum(Fraction(n, d) for d, n in partials.items()) - T = reduce(_coerce, types, int) # or raise TypeError - return (T, total, count) - - -def _ss(data, c=None): - """Return the exact mean and sum of square deviations of sequence data. - - Calculations are done in a single pass, allowing the input to be an iterator. - - If given *c* is used the mean; otherwise, it is calculated from the data. - Use the *c* argument with care, as it can lead to garbage results. - - """ - if c is not None: - T, ssd, count = _sum((d := x - c) * d for x in data) - return (T, ssd, c, count) - count = 0 - types = set() - types_add = types.add - sx_partials = defaultdict(int) - sxx_partials = defaultdict(int) - for typ, values in groupby(data, type): - types_add(typ) - for n, d in map(_exact_ratio, values): - count += 1 - sx_partials[d] += n - sxx_partials[d] += n * n - if not count: - ssd = c = Fraction(0) - elif None in sx_partials: - # The sum will be a NAN or INF. We can ignore all the finite - # partials, and just look at this special one. - ssd = c = sx_partials[None] - assert not _isfinite(ssd) - else: - sx = sum(Fraction(n, d) for d, n in sx_partials.items()) - sxx = sum(Fraction(n, d*d) for d, n in sxx_partials.items()) - # This formula has poor numeric properties for floats, - # but with fractions it is exact. - ssd = (count * sxx - sx * sx) / count - c = sx / count - T = reduce(_coerce, types, int) # or raise TypeError - return (T, ssd, c, count) - - -def _isfinite(x): - try: - return x.is_finite() # Likely a Decimal. - except AttributeError: - return math.isfinite(x) # Coerces to float first. - - -def _coerce(T, S): - """Coerce types T and S to a common type, or raise TypeError. - - Coercion rules are currently an implementation detail. See the CoerceTest - test class in test_statistics for details. - """ - # See http://bugs.python.org/issue24068. - assert T is not bool, "initial type T is bool" - # If the types are the same, no need to coerce anything. Put this - # first, so that the usual case (no coercion needed) happens as soon - # as possible. - if T is S: return T - # Mixed int & other coerce to the other type. - if S is int or S is bool: return T - if T is int: return S - # If one is a (strict) subclass of the other, coerce to the subclass. - if issubclass(S, T): return S - if issubclass(T, S): return T - # Ints coerce to the other type. - if issubclass(T, int): return S - if issubclass(S, int): return T - # Mixed fraction & float coerces to float (or float subclass). - if issubclass(T, Fraction) and issubclass(S, float): - return S - if issubclass(T, float) and issubclass(S, Fraction): - return T - # Any other combination is disallowed. - msg = "don't know how to coerce %s and %s" - raise TypeError(msg % (T.__name__, S.__name__)) - - -def _exact_ratio(x): - """Return Real number x to exact (numerator, denominator) pair. - - >>> _exact_ratio(0.25) - (1, 4) - - x is expected to be an int, Fraction, Decimal or float. - """ - - # XXX We should revisit whether using fractions to accumulate exact - # ratios is the right way to go. - - # The integer ratios for binary floats can have numerators or - # denominators with over 300 decimal digits. The problem is more - # acute with decimal floats where the default decimal context - # supports a huge range of exponents from Emin=-999999 to - # Emax=999999. When expanded with as_integer_ratio(), numbers like - # Decimal('3.14E+5000') and Decimal('3.14E-5000') have large - # numerators or denominators that will slow computation. - - # When the integer ratios are accumulated as fractions, the size - # grows to cover the full range from the smallest magnitude to the - # largest. For example, Fraction(3.14E+300) + Fraction(3.14E-300), - # has a 616 digit numerator. Likewise, - # Fraction(Decimal('3.14E+5000')) + Fraction(Decimal('3.14E-5000')) - # has 10,003 digit numerator. - - # This doesn't seem to have been problem in practice, but it is a - # potential pitfall. - - try: - return x.as_integer_ratio() - except AttributeError: - pass - except (OverflowError, ValueError): - # float NAN or INF. - assert not _isfinite(x) - return (x, None) - try: - # x may be an Integral ABC. - return (x.numerator, x.denominator) - except AttributeError: - msg = f"can't convert type '{type(x).__name__}' to numerator/denominator" - raise TypeError(msg) - - -def _convert(value, T): - """Convert value to given numeric type T.""" - if type(value) is T: - # This covers the cases where T is Fraction, or where value is - # a NAN or INF (Decimal or float). - return value - if issubclass(T, int) and value.denominator != 1: - T = float - try: - # FIXME: what do we do if this overflows? - return T(value) - except TypeError: - if issubclass(T, Decimal): - return T(value.numerator) / T(value.denominator) - else: - raise - - -def _fail_neg(values, errmsg='negative value'): - """Iterate over values, failing if any are less than zero.""" - for x in values: - if x < 0: - raise StatisticsError(errmsg) - yield x - - -def _rank(data, /, *, key=None, reverse=False, ties='average', start=1) -> list[float]: - """Rank order a dataset. The lowest value has rank 1. - - Ties are averaged so that equal values receive the same rank: - - >>> data = [31, 56, 31, 25, 75, 18] - >>> _rank(data) - [3.5, 5.0, 3.5, 2.0, 6.0, 1.0] - - The operation is idempotent: - - >>> _rank([3.5, 5.0, 3.5, 2.0, 6.0, 1.0]) - [3.5, 5.0, 3.5, 2.0, 6.0, 1.0] - - It is possible to rank the data in reverse order so that the - highest value has rank 1. Also, a key-function can extract - the field to be ranked: - - >>> goals = [('eagles', 45), ('bears', 48), ('lions', 44)] - >>> _rank(goals, key=itemgetter(1), reverse=True) - [2.0, 1.0, 3.0] - - Ranks are conventionally numbered starting from one; however, - setting *start* to zero allows the ranks to be used as array indices: - - >>> prize = ['Gold', 'Silver', 'Bronze', 'Certificate'] - >>> scores = [8.1, 7.3, 9.4, 8.3] - >>> [prize[int(i)] for i in _rank(scores, start=0, reverse=True)] - ['Bronze', 'Certificate', 'Gold', 'Silver'] - - """ - # If this function becomes public at some point, more thought - # needs to be given to the signature. A list of ints is - # plausible when ties is "min" or "max". When ties is "average", - # either list[float] or list[Fraction] is plausible. - - # Default handling of ties matches scipy.stats.mstats.spearmanr. - if ties != 'average': - raise ValueError(f'Unknown tie resolution method: {ties!r}') - if key is not None: - data = map(key, data) - val_pos = sorted(zip(data, count()), reverse=reverse) - i = start - 1 - result = [0] * len(val_pos) - for _, g in groupby(val_pos, key=itemgetter(0)): - group = list(g) - size = len(group) - rank = i + (size + 1) / 2 - for value, orig_pos in group: - result[orig_pos] = rank - i += size - return result - - -def _integer_sqrt_of_frac_rto(n: int, m: int) -> int: - """Square root of n/m, rounded to the nearest integer using round-to-odd.""" - # Reference: https://www.lri.fr/~melquion/doc/05-imacs17_1-expose.pdf - a = math.isqrt(n // m) - return a | (a*a*m != n) - - -# For 53 bit precision floats, the bit width used in -# _float_sqrt_of_frac() is 109. -_sqrt_bit_width: int = 2 * sys.float_info.mant_dig + 3 - - -def _float_sqrt_of_frac(n: int, m: int) -> float: - """Square root of n/m as a float, correctly rounded.""" - # See principle and proof sketch at: https://bugs.python.org/msg407078 - q = (n.bit_length() - m.bit_length() - _sqrt_bit_width) // 2 - if q >= 0: - numerator = _integer_sqrt_of_frac_rto(n, m << 2 * q) << q - denominator = 1 - else: - numerator = _integer_sqrt_of_frac_rto(n << -2 * q, m) - denominator = 1 << -q - return numerator / denominator # Convert to float - - -def _decimal_sqrt_of_frac(n: int, m: int) -> Decimal: - """Square root of n/m as a Decimal, correctly rounded.""" - # Premise: For decimal, computing (n/m).sqrt() can be off - # by 1 ulp from the correctly rounded result. - # Method: Check the result, moving up or down a step if needed. - if n <= 0: - if not n: - return Decimal('0.0') - n, m = -n, -m - - root = (Decimal(n) / Decimal(m)).sqrt() - nr, dr = root.as_integer_ratio() - - plus = root.next_plus() - np, dp = plus.as_integer_ratio() - # test: n / m > ((root + plus) / 2) ** 2 - if 4 * n * (dr*dp)**2 > m * (dr*np + dp*nr)**2: - return plus - - minus = root.next_minus() - nm, dm = minus.as_integer_ratio() - # test: n / m < ((root + minus) / 2) ** 2 - if 4 * n * (dr*dm)**2 < m * (dr*nm + dm*nr)**2: - return minus - - return root - - -# === Measures of central tendency (averages) === - -def mean(data): - """Return the sample arithmetic mean of data. - - >>> mean([1, 2, 3, 4, 4]) - 2.8 - - >>> from fractions import Fraction as F - >>> mean([F(3, 7), F(1, 21), F(5, 3), F(1, 3)]) - Fraction(13, 21) - - >>> from decimal import Decimal as D - >>> mean([D("0.5"), D("0.75"), D("0.625"), D("0.375")]) - Decimal('0.5625') - - If ``data`` is empty, StatisticsError will be raised. - """ - T, total, n = _sum(data) - if n < 1: - raise StatisticsError('mean requires at least one data point') - return _convert(total / n, T) - - -def fmean(data, weights=None): - """Convert data to floats and compute the arithmetic mean. - - This runs faster than the mean() function and it always returns a float. - If the input dataset is empty, it raises a StatisticsError. - - >>> fmean([3.5, 4.0, 5.25]) - 4.25 - """ - if weights is None: - try: - n = len(data) - except TypeError: - # Handle iterators that do not define __len__(). - n = 0 - def count(iterable): - nonlocal n - for n, x in enumerate(iterable, start=1): - yield x - data = count(data) - total = fsum(data) - if not n: - raise StatisticsError('fmean requires at least one data point') - return total / n - if not isinstance(weights, (list, tuple)): - weights = list(weights) - try: - num = sumprod(data, weights) - except ValueError: - raise StatisticsError('data and weights must be the same length') - den = fsum(weights) - if not den: - raise StatisticsError('sum of weights must be non-zero') - return num / den - - -def geometric_mean(data): - """Convert data to floats and compute the geometric mean. - - Raises a StatisticsError if the input dataset is empty - or if it contains a negative value. - - Returns zero if the product of inputs is zero. - - No special efforts are made to achieve exact results. - (However, this may change in the future.) - - >>> round(geometric_mean([54, 24, 36]), 9) - 36.0 - """ - n = 0 - found_zero = False - def count_positive(iterable): - nonlocal n, found_zero - for n, x in enumerate(iterable, start=1): - if x > 0.0 or math.isnan(x): - yield x - elif x == 0.0: - found_zero = True - else: - raise StatisticsError('No negative inputs allowed', x) - total = fsum(map(log, count_positive(data))) - if not n: - raise StatisticsError('Must have a non-empty dataset') - if math.isnan(total): - return math.nan - if found_zero: - return math.nan if total == math.inf else 0.0 - return exp(total / n) - - -def harmonic_mean(data, weights=None): - """Return the harmonic mean of data. - - The harmonic mean is the reciprocal of the arithmetic mean of the - reciprocals of the data. It can be used for averaging ratios or - rates, for example speeds. - - Suppose a car travels 40 km/hr for 5 km and then speeds-up to - 60 km/hr for another 5 km. What is the average speed? - - >>> harmonic_mean([40, 60]) - 48.0 - - Suppose a car travels 40 km/hr for 5 km, and when traffic clears, - speeds-up to 60 km/hr for the remaining 30 km of the journey. What - is the average speed? - - >>> harmonic_mean([40, 60], weights=[5, 30]) - 56.0 - - If ``data`` is empty, or any element is less than zero, - ``harmonic_mean`` will raise ``StatisticsError``. - """ - if iter(data) is data: - data = list(data) - errmsg = 'harmonic mean does not support negative values' - n = len(data) - if n < 1: - raise StatisticsError('harmonic_mean requires at least one data point') - elif n == 1 and weights is None: - x = data[0] - if isinstance(x, (numbers.Real, Decimal)): - if x < 0: - raise StatisticsError(errmsg) - return x - else: - raise TypeError('unsupported type') - if weights is None: - weights = repeat(1, n) - sum_weights = n - else: - if iter(weights) is weights: - weights = list(weights) - if len(weights) != n: - raise StatisticsError('Number of weights does not match data size') - _, sum_weights, _ = _sum(w for w in _fail_neg(weights, errmsg)) - try: - data = _fail_neg(data, errmsg) - T, total, count = _sum(w / x if w else 0 for w, x in zip(weights, data)) - except ZeroDivisionError: - return 0 - if total <= 0: - raise StatisticsError('Weighted sum must be positive') - return _convert(sum_weights / total, T) - -# FIXME: investigate ways to calculate medians without sorting? Quickselect? -def median(data): - """Return the median (middle value) of numeric data. - - When the number of data points is odd, return the middle data point. - When the number of data points is even, the median is interpolated by - taking the average of the two middle values: - - >>> median([1, 3, 5]) - 3 - >>> median([1, 3, 5, 7]) - 4.0 - - """ - data = sorted(data) - n = len(data) - if n == 0: - raise StatisticsError("no median for empty data") - if n % 2 == 1: - return data[n // 2] - else: - i = n // 2 - return (data[i - 1] + data[i]) / 2 - - -def median_low(data): - """Return the low median of numeric data. - - When the number of data points is odd, the middle value is returned. - When it is even, the smaller of the two middle values is returned. - - >>> median_low([1, 3, 5]) - 3 - >>> median_low([1, 3, 5, 7]) - 3 - - """ - data = sorted(data) - n = len(data) - if n == 0: - raise StatisticsError("no median for empty data") - if n % 2 == 1: - return data[n // 2] - else: - return data[n // 2 - 1] - - -def median_high(data): - """Return the high median of data. - - When the number of data points is odd, the middle value is returned. - When it is even, the larger of the two middle values is returned. - - >>> median_high([1, 3, 5]) - 3 - >>> median_high([1, 3, 5, 7]) - 5 - - """ - data = sorted(data) - n = len(data) - if n == 0: - raise StatisticsError("no median for empty data") - return data[n // 2] - - -def median_grouped(data, interval=1.0): - """Estimates the median for numeric data binned around the midpoints - of consecutive, fixed-width intervals. - - The *data* can be any iterable of numeric data with each value being - exactly the midpoint of a bin. At least one value must be present. - - The *interval* is width of each bin. - - For example, demographic information may have been summarized into - consecutive ten-year age groups with each group being represented - by the 5-year midpoints of the intervals: - - >>> demographics = Counter({ - ... 25: 172, # 20 to 30 years old - ... 35: 484, # 30 to 40 years old - ... 45: 387, # 40 to 50 years old - ... 55: 22, # 50 to 60 years old - ... 65: 6, # 60 to 70 years old - ... }) - - The 50th percentile (median) is the 536th person out of the 1071 - member cohort. That person is in the 30 to 40 year old age group. - - The regular median() function would assume that everyone in the - tricenarian age group was exactly 35 years old. A more tenable - assumption is that the 484 members of that age group are evenly - distributed between 30 and 40. For that, we use median_grouped(). - - >>> data = list(demographics.elements()) - >>> median(data) - 35 - >>> round(median_grouped(data, interval=10), 1) - 37.5 - - The caller is responsible for making sure the data points are separated - by exact multiples of *interval*. This is essential for getting a - correct result. The function does not check this precondition. - - Inputs may be any numeric type that can be coerced to a float during - the interpolation step. - - """ - data = sorted(data) - n = len(data) - if not n: - raise StatisticsError("no median for empty data") - - # Find the value at the midpoint. Remember this corresponds to the - # midpoint of the class interval. - x = data[n // 2] - - # Using O(log n) bisection, find where all the x values occur in the data. - # All x will lie within data[i:j]. - i = bisect_left(data, x) - j = bisect_right(data, x, lo=i) - - # Coerce to floats, raising a TypeError if not possible - try: - interval = float(interval) - x = float(x) - except ValueError: - raise TypeError(f'Value cannot be converted to a float') - - # Interpolate the median using the formula found at: - # https://www.cuemath.com/data/median-of-grouped-data/ - L = x - interval / 2.0 # Lower limit of the median interval - cf = i # Cumulative frequency of the preceding interval - f = j - i # Number of elements in the median internal - return L + interval * (n / 2 - cf) / f - - -def mode(data): - """Return the most common data point from discrete or nominal data. - - ``mode`` assumes discrete data, and returns a single value. This is the - standard treatment of the mode as commonly taught in schools: - - >>> mode([1, 1, 2, 3, 3, 3, 3, 4]) - 3 - - This also works with nominal (non-numeric) data: - - >>> mode(["red", "blue", "blue", "red", "green", "red", "red"]) - 'red' - - If there are multiple modes with same frequency, return the first one - encountered: - - >>> mode(['red', 'red', 'green', 'blue', 'blue']) - 'red' - - If *data* is empty, ``mode``, raises StatisticsError. - - """ - pairs = Counter(iter(data)).most_common(1) - try: - return pairs[0][0] - except IndexError: - raise StatisticsError('no mode for empty data') from None - - -def multimode(data): - """Return a list of the most frequently occurring values. - - Will return more than one result if there are multiple modes - or an empty list if *data* is empty. - - >>> multimode('aabbbbbbbbcc') - ['b'] - >>> multimode('aabbbbccddddeeffffgg') - ['b', 'd', 'f'] - >>> multimode('') - [] - """ - counts = Counter(iter(data)) - if not counts: - return [] - maxcount = max(counts.values()) - return [value for value, count in counts.items() if count == maxcount] - - -def kde(data, h, kernel='normal', *, cumulative=False): - """Kernel Density Estimation: Create a continuous probability density - function or cumulative distribution function from discrete samples. - - The basic idea is to smooth the data using a kernel function - to help draw inferences about a population from a sample. - - The degree of smoothing is controlled by the scaling parameter h - which is called the bandwidth. Smaller values emphasize local - features while larger values give smoother results. - - The kernel determines the relative weights of the sample data - points. Generally, the choice of kernel shape does not matter - as much as the more influential bandwidth smoothing parameter. - - Kernels that give some weight to every sample point: - - normal (gauss) - logistic - sigmoid - - Kernels that only give weight to sample points within - the bandwidth: - - rectangular (uniform) - triangular - parabolic (epanechnikov) - quartic (biweight) - triweight - cosine - - If *cumulative* is true, will return a cumulative distribution function. - - A StatisticsError will be raised if the data sequence is empty. - - Example - ------- - - Given a sample of six data points, construct a continuous - function that estimates the underlying probability density: - - >>> sample = [-2.1, -1.3, -0.4, 1.9, 5.1, 6.2] - >>> f_hat = kde(sample, h=1.5) - - Compute the area under the curve: - - >>> area = sum(f_hat(x) for x in range(-20, 20)) - >>> round(area, 4) - 1.0 - - Plot the estimated probability density function at - evenly spaced points from -6 to 10: - - >>> for x in range(-6, 11): - ... density = f_hat(x) - ... plot = ' ' * int(density * 400) + 'x' - ... print(f'{x:2}: {density:.3f} {plot}') - ... - -6: 0.002 x - -5: 0.009 x - -4: 0.031 x - -3: 0.070 x - -2: 0.111 x - -1: 0.125 x - 0: 0.110 x - 1: 0.086 x - 2: 0.068 x - 3: 0.059 x - 4: 0.066 x - 5: 0.082 x - 6: 0.082 x - 7: 0.058 x - 8: 0.028 x - 9: 0.009 x - 10: 0.002 x - - Estimate P(4.5 < X <= 7.5), the probability that a new sample value - will be between 4.5 and 7.5: - - >>> cdf = kde(sample, h=1.5, cumulative=True) - >>> round(cdf(7.5) - cdf(4.5), 2) - 0.22 - - References - ---------- - - Kernel density estimation and its application: - https://www.itm-conferences.org/articles/itmconf/pdf/2018/08/itmconf_sam2018_00037.pdf - - Kernel functions in common use: - https://en.wikipedia.org/wiki/Kernel_(statistics)#kernel_functions_in_common_use - - Interactive graphical demonstration and exploration: - https://demonstrations.wolfram.com/KernelDensityEstimation/ - - Kernel estimation of cumulative distribution function of a random variable with bounded support - https://www.econstor.eu/bitstream/10419/207829/1/10.21307_stattrans-2016-037.pdf - - """ - - n = len(data) - if not n: - raise StatisticsError('Empty data sequence') - - if not isinstance(data[0], (int, float)): - raise TypeError('Data sequence must contain ints or floats') - - if h <= 0.0: - raise StatisticsError(f'Bandwidth h must be positive, not {h=!r}') - - match kernel: - - case 'normal' | 'gauss': - sqrt2pi = sqrt(2 * pi) - sqrt2 = sqrt(2) - K = lambda t: exp(-1/2 * t * t) / sqrt2pi - W = lambda t: 1/2 * (1.0 + erf(t / sqrt2)) - support = None - - case 'logistic': - # 1.0 / (exp(t) + 2.0 + exp(-t)) - K = lambda t: 1/2 / (1.0 + cosh(t)) - W = lambda t: 1.0 - 1.0 / (exp(t) + 1.0) - support = None - - case 'sigmoid': - # (2/pi) / (exp(t) + exp(-t)) - c1 = 1 / pi - c2 = 2 / pi - K = lambda t: c1 / cosh(t) - W = lambda t: c2 * atan(exp(t)) - support = None - - case 'rectangular' | 'uniform': - K = lambda t: 1/2 - W = lambda t: 1/2 * t + 1/2 - support = 1.0 - - case 'triangular': - K = lambda t: 1.0 - abs(t) - W = lambda t: t*t * (1/2 if t < 0.0 else -1/2) + t + 1/2 - support = 1.0 - - case 'parabolic' | 'epanechnikov': - K = lambda t: 3/4 * (1.0 - t * t) - W = lambda t: -1/4 * t**3 + 3/4 * t + 1/2 - support = 1.0 - - case 'quartic' | 'biweight': - K = lambda t: 15/16 * (1.0 - t * t) ** 2 - W = lambda t: 3/16 * t**5 - 5/8 * t**3 + 15/16 * t + 1/2 - support = 1.0 - - case 'triweight': - K = lambda t: 35/32 * (1.0 - t * t) ** 3 - W = lambda t: 35/32 * (-1/7*t**7 + 3/5*t**5 - t**3 + t) + 1/2 - support = 1.0 - - case 'cosine': - c1 = pi / 4 - c2 = pi / 2 - K = lambda t: c1 * cos(c2 * t) - W = lambda t: 1/2 * sin(c2 * t) + 1/2 - support = 1.0 - - case _: - raise StatisticsError(f'Unknown kernel name: {kernel!r}') - - if support is None: - - def pdf(x): - n = len(data) - return sum(K((x - x_i) / h) for x_i in data) / (n * h) - - def cdf(x): - n = len(data) - return sum(W((x - x_i) / h) for x_i in data) / n - - else: - - sample = sorted(data) - bandwidth = h * support - - def pdf(x): - nonlocal n, sample - if len(data) != n: - sample = sorted(data) - n = len(data) - i = bisect_left(sample, x - bandwidth) - j = bisect_right(sample, x + bandwidth) - supported = sample[i : j] - return sum(K((x - x_i) / h) for x_i in supported) / (n * h) - - def cdf(x): - nonlocal n, sample - if len(data) != n: - sample = sorted(data) - n = len(data) - i = bisect_left(sample, x - bandwidth) - j = bisect_right(sample, x + bandwidth) - supported = sample[i : j] - return sum((W((x - x_i) / h) for x_i in supported), i) / n - - if cumulative: - cdf.__doc__ = f'CDF estimate with {h=!r} and {kernel=!r}' - return cdf - - else: - pdf.__doc__ = f'PDF estimate with {h=!r} and {kernel=!r}' - return pdf - - -# Notes on methods for computing quantiles -# ---------------------------------------- -# -# There is no one perfect way to compute quantiles. Here we offer -# two methods that serve common needs. Most other packages -# surveyed offered at least one or both of these two, making them -# "standard" in the sense of "widely-adopted and reproducible". -# They are also easy to explain, easy to compute manually, and have -# straight-forward interpretations that aren't surprising. - -# The default method is known as "R6", "PERCENTILE.EXC", or "expected -# value of rank order statistics". The alternative method is known as -# "R7", "PERCENTILE.INC", or "mode of rank order statistics". - -# For sample data where there is a positive probability for values -# beyond the range of the data, the R6 exclusive method is a -# reasonable choice. Consider a random sample of nine values from a -# population with a uniform distribution from 0.0 to 1.0. The -# distribution of the third ranked sample point is described by -# betavariate(alpha=3, beta=7) which has mode=0.250, median=0.286, and -# mean=0.300. Only the latter (which corresponds with R6) gives the -# desired cut point with 30% of the population falling below that -# value, making it comparable to a result from an inv_cdf() function. -# The R6 exclusive method is also idempotent. - -# For describing population data where the end points are known to -# be included in the data, the R7 inclusive method is a reasonable -# choice. Instead of the mean, it uses the mode of the beta -# distribution for the interior points. Per Hyndman & Fan, "One nice -# property is that the vertices of Q7(p) divide the range into n - 1 -# intervals, and exactly 100p% of the intervals lie to the left of -# Q7(p) and 100(1 - p)% of the intervals lie to the right of Q7(p)." - -# If needed, other methods could be added. However, for now, the -# position is that fewer options make for easier choices and that -# external packages can be used for anything more advanced. - -def quantiles(data, *, n=4, method='exclusive'): - """Divide *data* into *n* continuous intervals with equal probability. - - Returns a list of (n - 1) cut points separating the intervals. - - Set *n* to 4 for quartiles (the default). Set *n* to 10 for deciles. - Set *n* to 100 for percentiles which gives the 99 cuts points that - separate *data* in to 100 equal sized groups. - - The *data* can be any iterable containing sample. - The cut points are linearly interpolated between data points. - - If *method* is set to *inclusive*, *data* is treated as population - data. The minimum value is treated as the 0th percentile and the - maximum value is treated as the 100th percentile. - """ - if n < 1: - raise StatisticsError('n must be at least 1') - data = sorted(data) - ld = len(data) - if ld < 2: - if ld == 1: - return data * (n - 1) - raise StatisticsError('must have at least one data point') - - if method == 'inclusive': - m = ld - 1 - result = [] - for i in range(1, n): - j, delta = divmod(i * m, n) - interpolated = (data[j] * (n - delta) + data[j + 1] * delta) / n - result.append(interpolated) - return result - - if method == 'exclusive': - m = ld + 1 - result = [] - for i in range(1, n): - j = i * m // n # rescale i to m/n - j = 1 if j < 1 else ld-1 if j > ld-1 else j # clamp to 1 .. ld-1 - delta = i*m - j*n # exact integer math - interpolated = (data[j - 1] * (n - delta) + data[j] * delta) / n - result.append(interpolated) - return result - - raise ValueError(f'Unknown method: {method!r}') - - -# === Measures of spread === - -# See http://mathworld.wolfram.com/Variance.html -# http://mathworld.wolfram.com/SampleVariance.html - - -def variance(data, xbar=None): - """Return the sample variance of data. - - data should be an iterable of Real-valued numbers, with at least two - values. The optional argument xbar, if given, should be the mean of - the data. If it is missing or None, the mean is automatically calculated. - - Use this function when your data is a sample from a population. To - calculate the variance from the entire population, see ``pvariance``. - - Examples: - - >>> data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5] - >>> variance(data) - 1.3720238095238095 - - If you have already calculated the mean of your data, you can pass it as - the optional second argument ``xbar`` to avoid recalculating it: - - >>> m = mean(data) - >>> variance(data, m) - 1.3720238095238095 - - This function does not check that ``xbar`` is actually the mean of - ``data``. Giving arbitrary values for ``xbar`` may lead to invalid or - impossible results. - - Decimals and Fractions are supported: - - >>> from decimal import Decimal as D - >>> variance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")]) - Decimal('31.01875') - - >>> from fractions import Fraction as F - >>> variance([F(1, 6), F(1, 2), F(5, 3)]) - Fraction(67, 108) - - """ - T, ss, c, n = _ss(data, xbar) - if n < 2: - raise StatisticsError('variance requires at least two data points') - return _convert(ss / (n - 1), T) - - -def pvariance(data, mu=None): - """Return the population variance of ``data``. - - data should be a sequence or iterable of Real-valued numbers, with at least one - value. The optional argument mu, if given, should be the mean of - the data. If it is missing or None, the mean is automatically calculated. - - Use this function to calculate the variance from the entire population. - To estimate the variance from a sample, the ``variance`` function is - usually a better choice. - - Examples: - - >>> data = [0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25] - >>> pvariance(data) - 1.25 - - If you have already calculated the mean of the data, you can pass it as - the optional second argument to avoid recalculating it: - - >>> mu = mean(data) - >>> pvariance(data, mu) - 1.25 - - Decimals and Fractions are supported: - - >>> from decimal import Decimal as D - >>> pvariance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")]) - Decimal('24.815') - - >>> from fractions import Fraction as F - >>> pvariance([F(1, 4), F(5, 4), F(1, 2)]) - Fraction(13, 72) - - """ - T, ss, c, n = _ss(data, mu) - if n < 1: - raise StatisticsError('pvariance requires at least one data point') - return _convert(ss / n, T) - - -def stdev(data, xbar=None): - """Return the square root of the sample variance. - - See ``variance`` for arguments and other details. - - >>> stdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75]) - 1.0810874155219827 - - """ - T, ss, c, n = _ss(data, xbar) - if n < 2: - raise StatisticsError('stdev requires at least two data points') - mss = ss / (n - 1) - try: - mss_numerator = mss.numerator - mss_denominator = mss.denominator - except AttributeError: - raise ValueError('inf or nan encountered in data') - if issubclass(T, Decimal): - return _decimal_sqrt_of_frac(mss_numerator, mss_denominator) - return _float_sqrt_of_frac(mss_numerator, mss_denominator) - - -def pstdev(data, mu=None): - """Return the square root of the population variance. - - See ``pvariance`` for arguments and other details. - - >>> pstdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75]) - 0.986893273527251 - - """ - T, ss, c, n = _ss(data, mu) - if n < 1: - raise StatisticsError('pstdev requires at least one data point') - mss = ss / n - try: - mss_numerator = mss.numerator - mss_denominator = mss.denominator - except AttributeError: - raise ValueError('inf or nan encountered in data') - if issubclass(T, Decimal): - return _decimal_sqrt_of_frac(mss_numerator, mss_denominator) - return _float_sqrt_of_frac(mss_numerator, mss_denominator) - - -def _mean_stdev(data): - """In one pass, compute the mean and sample standard deviation as floats.""" - T, ss, xbar, n = _ss(data) - if n < 2: - raise StatisticsError('stdev requires at least two data points') - mss = ss / (n - 1) - try: - return float(xbar), _float_sqrt_of_frac(mss.numerator, mss.denominator) - except AttributeError: - # Handle Nans and Infs gracefully - return float(xbar), float(xbar) / float(ss) - -def _sqrtprod(x: float, y: float) -> float: - "Return sqrt(x * y) computed with improved accuracy and without overflow/underflow." - h = sqrt(x * y) - if not isfinite(h): - if isinf(h) and not isinf(x) and not isinf(y): - # Finite inputs overflowed, so scale down, and recompute. - scale = 2.0 ** -512 # sqrt(1 / sys.float_info.max) - return _sqrtprod(scale * x, scale * y) / scale - return h - if not h: - if x and y: - # Non-zero inputs underflowed, so scale up, and recompute. - # Scale: 1 / sqrt(sys.float_info.min * sys.float_info.epsilon) - scale = 2.0 ** 537 - return _sqrtprod(scale * x, scale * y) / scale - return h - # Improve accuracy with a differential correction. - # https://www.wolframalpha.com/input/?i=Maclaurin+series+sqrt%28h**2+%2B+x%29+at+x%3D0 - d = sumprod((x, h), (y, -h)) - return h + d / (2.0 * h) - - -# === Statistics for relations between two inputs === - -# See https://en.wikipedia.org/wiki/Covariance -# https://en.wikipedia.org/wiki/Pearson_correlation_coefficient -# https://en.wikipedia.org/wiki/Simple_linear_regression - - -def covariance(x, y, /): - """Covariance - - Return the sample covariance of two inputs *x* and *y*. Covariance - is a measure of the joint variability of two inputs. - - >>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9] - >>> y = [1, 2, 3, 1, 2, 3, 1, 2, 3] - >>> covariance(x, y) - 0.75 - >>> z = [9, 8, 7, 6, 5, 4, 3, 2, 1] - >>> covariance(x, z) - -7.5 - >>> covariance(z, x) - -7.5 - - """ - n = len(x) - if len(y) != n: - raise StatisticsError('covariance requires that both inputs have same number of data points') - if n < 2: - raise StatisticsError('covariance requires at least two data points') - xbar = fsum(x) / n - ybar = fsum(y) / n - sxy = sumprod((xi - xbar for xi in x), (yi - ybar for yi in y)) - return sxy / (n - 1) - - -def correlation(x, y, /, *, method='linear'): - """Pearson's correlation coefficient - - Return the Pearson's correlation coefficient for two inputs. Pearson's - correlation coefficient *r* takes values between -1 and +1. It measures - the strength and direction of a linear relationship. - - >>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9] - >>> y = [9, 8, 7, 6, 5, 4, 3, 2, 1] - >>> correlation(x, x) - 1.0 - >>> correlation(x, y) - -1.0 - - If *method* is "ranked", computes Spearman's rank correlation coefficient - for two inputs. The data is replaced by ranks. Ties are averaged - so that equal values receive the same rank. The resulting coefficient - measures the strength of a monotonic relationship. - - Spearman's rank correlation coefficient is appropriate for ordinal - data or for continuous data that doesn't meet the linear proportion - requirement for Pearson's correlation coefficient. - """ - n = len(x) - if len(y) != n: - raise StatisticsError('correlation requires that both inputs have same number of data points') - if n < 2: - raise StatisticsError('correlation requires at least two data points') - if method not in {'linear', 'ranked'}: - raise ValueError(f'Unknown method: {method!r}') - if method == 'ranked': - start = (n - 1) / -2 # Center rankings around zero - x = _rank(x, start=start) - y = _rank(y, start=start) - else: - xbar = fsum(x) / n - ybar = fsum(y) / n - x = [xi - xbar for xi in x] - y = [yi - ybar for yi in y] - sxy = sumprod(x, y) - sxx = sumprod(x, x) - syy = sumprod(y, y) - try: - return sxy / _sqrtprod(sxx, syy) - except ZeroDivisionError: - raise StatisticsError('at least one of the inputs is constant') - - -LinearRegression = namedtuple('LinearRegression', ('slope', 'intercept')) - - -def linear_regression(x, y, /, *, proportional=False): - """Slope and intercept for simple linear regression. - - Return the slope and intercept of simple linear regression - parameters estimated using ordinary least squares. Simple linear - regression describes relationship between an independent variable - *x* and a dependent variable *y* in terms of a linear function: - - y = slope * x + intercept + noise - - where *slope* and *intercept* are the regression parameters that are - estimated, and noise represents the variability of the data that was - not explained by the linear regression (it is equal to the - difference between predicted and actual values of the dependent - variable). - - The parameters are returned as a named tuple. - - >>> x = [1, 2, 3, 4, 5] - >>> noise = NormalDist().samples(5, seed=42) - >>> y = [3 * x[i] + 2 + noise[i] for i in range(5)] - >>> linear_regression(x, y) #doctest: +ELLIPSIS - LinearRegression(slope=3.17495..., intercept=1.00925...) - - If *proportional* is true, the independent variable *x* and the - dependent variable *y* are assumed to be directly proportional. - The data is fit to a line passing through the origin. - - Since the *intercept* will always be 0.0, the underlying linear - function simplifies to: - - y = slope * x + noise - - >>> y = [3 * x[i] + noise[i] for i in range(5)] - >>> linear_regression(x, y, proportional=True) #doctest: +ELLIPSIS - LinearRegression(slope=2.90475..., intercept=0.0) - - """ - n = len(x) - if len(y) != n: - raise StatisticsError('linear regression requires that both inputs have same number of data points') - if n < 2: - raise StatisticsError('linear regression requires at least two data points') - if not proportional: - xbar = fsum(x) / n - ybar = fsum(y) / n - x = [xi - xbar for xi in x] # List because used three times below - y = (yi - ybar for yi in y) # Generator because only used once below - sxy = sumprod(x, y) + 0.0 # Add zero to coerce result to a float - sxx = sumprod(x, x) - try: - slope = sxy / sxx # equivalent to: covariance(x, y) / variance(x) - except ZeroDivisionError: - raise StatisticsError('x is constant') - intercept = 0.0 if proportional else ybar - slope * xbar - return LinearRegression(slope=slope, intercept=intercept) - - -## Normal Distribution ##################################################### - - -def _normal_dist_inv_cdf(p, mu, sigma): - # There is no closed-form solution to the inverse CDF for the normal - # distribution, so we use a rational approximation instead: - # Wichura, M.J. (1988). "Algorithm AS241: The Percentage Points of the - # Normal Distribution". Applied Statistics. Blackwell Publishing. 37 - # (3): 477–484. doi:10.2307/2347330. JSTOR 2347330. - q = p - 0.5 - if fabs(q) <= 0.425: - r = 0.180625 - q * q - # Hash sum: 55.88319_28806_14901_4439 - num = (((((((2.50908_09287_30122_6727e+3 * r + - 3.34305_75583_58812_8105e+4) * r + - 6.72657_70927_00870_0853e+4) * r + - 4.59219_53931_54987_1457e+4) * r + - 1.37316_93765_50946_1125e+4) * r + - 1.97159_09503_06551_4427e+3) * r + - 1.33141_66789_17843_7745e+2) * r + - 3.38713_28727_96366_6080e+0) * q - den = (((((((5.22649_52788_52854_5610e+3 * r + - 2.87290_85735_72194_2674e+4) * r + - 3.93078_95800_09271_0610e+4) * r + - 2.12137_94301_58659_5867e+4) * r + - 5.39419_60214_24751_1077e+3) * r + - 6.87187_00749_20579_0830e+2) * r + - 4.23133_30701_60091_1252e+1) * r + - 1.0) - x = num / den - return mu + (x * sigma) - r = p if q <= 0.0 else 1.0 - p - r = sqrt(-log(r)) - if r <= 5.0: - r = r - 1.6 - # Hash sum: 49.33206_50330_16102_89036 - num = (((((((7.74545_01427_83414_07640e-4 * r + - 2.27238_44989_26918_45833e-2) * r + - 2.41780_72517_74506_11770e-1) * r + - 1.27045_82524_52368_38258e+0) * r + - 3.64784_83247_63204_60504e+0) * r + - 5.76949_72214_60691_40550e+0) * r + - 4.63033_78461_56545_29590e+0) * r + - 1.42343_71107_49683_57734e+0) - den = (((((((1.05075_00716_44416_84324e-9 * r + - 5.47593_80849_95344_94600e-4) * r + - 1.51986_66563_61645_71966e-2) * r + - 1.48103_97642_74800_74590e-1) * r + - 6.89767_33498_51000_04550e-1) * r + - 1.67638_48301_83803_84940e+0) * r + - 2.05319_16266_37758_82187e+0) * r + - 1.0) - else: - r = r - 5.0 - # Hash sum: 47.52583_31754_92896_71629 - num = (((((((2.01033_43992_92288_13265e-7 * r + - 2.71155_55687_43487_57815e-5) * r + - 1.24266_09473_88078_43860e-3) * r + - 2.65321_89526_57612_30930e-2) * r + - 2.96560_57182_85048_91230e-1) * r + - 1.78482_65399_17291_33580e+0) * r + - 5.46378_49111_64114_36990e+0) * r + - 6.65790_46435_01103_77720e+0) - den = (((((((2.04426_31033_89939_78564e-15 * r + - 1.42151_17583_16445_88870e-7) * r + - 1.84631_83175_10054_68180e-5) * r + - 7.86869_13114_56132_59100e-4) * r + - 1.48753_61290_85061_48525e-2) * r + - 1.36929_88092_27358_05310e-1) * r + - 5.99832_20655_58879_37690e-1) * r + - 1.0) - x = num / den - if q < 0.0: - x = -x - return mu + (x * sigma) - - -# If available, use C implementation -try: - from _statistics import _normal_dist_inv_cdf -except ImportError: - pass - - -class NormalDist: - "Normal distribution of a random variable" - # https://en.wikipedia.org/wiki/Normal_distribution - # https://en.wikipedia.org/wiki/Variance#Properties - - __slots__ = { - '_mu': 'Arithmetic mean of a normal distribution', - '_sigma': 'Standard deviation of a normal distribution', - } - - def __init__(self, mu=0.0, sigma=1.0): - "NormalDist where mu is the mean and sigma is the standard deviation." - if sigma < 0.0: - raise StatisticsError('sigma must be non-negative') - self._mu = float(mu) - self._sigma = float(sigma) - - @classmethod - def from_samples(cls, data): - "Make a normal distribution instance from sample data." - return cls(*_mean_stdev(data)) - - def samples(self, n, *, seed=None): - "Generate *n* samples for a given mean and standard deviation." - rnd = random.random if seed is None else random.Random(seed).random - inv_cdf = _normal_dist_inv_cdf - mu = self._mu - sigma = self._sigma - return [inv_cdf(rnd(), mu, sigma) for _ in repeat(None, n)] - - def pdf(self, x): - "Probability density function. P(x <= X < x+dx) / dx" - variance = self._sigma * self._sigma - if not variance: - raise StatisticsError('pdf() not defined when sigma is zero') - diff = x - self._mu - return exp(diff * diff / (-2.0 * variance)) / sqrt(tau * variance) - - def cdf(self, x): - "Cumulative distribution function. P(X <= x)" - if not self._sigma: - raise StatisticsError('cdf() not defined when sigma is zero') - return 0.5 * (1.0 + erf((x - self._mu) / (self._sigma * _SQRT2))) - - def inv_cdf(self, p): - """Inverse cumulative distribution function. x : P(X <= x) = p - - Finds the value of the random variable such that the probability of - the variable being less than or equal to that value equals the given - probability. - - This function is also called the percent point function or quantile - function. - """ - if p <= 0.0 or p >= 1.0: - raise StatisticsError('p must be in the range 0.0 < p < 1.0') - return _normal_dist_inv_cdf(p, self._mu, self._sigma) - - def quantiles(self, n=4): - """Divide into *n* continuous intervals with equal probability. - - Returns a list of (n - 1) cut points separating the intervals. - - Set *n* to 4 for quartiles (the default). Set *n* to 10 for deciles. - Set *n* to 100 for percentiles which gives the 99 cuts points that - separate the normal distribution in to 100 equal sized groups. - """ - return [self.inv_cdf(i / n) for i in range(1, n)] - - def overlap(self, other): - """Compute the overlapping coefficient (OVL) between two normal distributions. - - Measures the agreement between two normal probability distributions. - Returns a value between 0.0 and 1.0 giving the overlapping area in - the two underlying probability density functions. - - >>> N1 = NormalDist(2.4, 1.6) - >>> N2 = NormalDist(3.2, 2.0) - >>> N1.overlap(N2) - 0.8035050657330205 - """ - # See: "The overlapping coefficient as a measure of agreement between - # probability distributions and point estimation of the overlap of two - # normal densities" -- Henry F. Inman and Edwin L. Bradley Jr - # http://dx.doi.org/10.1080/03610928908830127 - if not isinstance(other, NormalDist): - raise TypeError('Expected another NormalDist instance') - X, Y = self, other - if (Y._sigma, Y._mu) < (X._sigma, X._mu): # sort to assure commutativity - X, Y = Y, X - X_var, Y_var = X.variance, Y.variance - if not X_var or not Y_var: - raise StatisticsError('overlap() not defined when sigma is zero') - dv = Y_var - X_var - dm = fabs(Y._mu - X._mu) - if not dv: - return 1.0 - erf(dm / (2.0 * X._sigma * _SQRT2)) - a = X._mu * Y_var - Y._mu * X_var - b = X._sigma * Y._sigma * sqrt(dm * dm + dv * log(Y_var / X_var)) - x1 = (a + b) / dv - x2 = (a - b) / dv - return 1.0 - (fabs(Y.cdf(x1) - X.cdf(x1)) + fabs(Y.cdf(x2) - X.cdf(x2))) - - def zscore(self, x): - """Compute the Standard Score. (x - mean) / stdev - - Describes *x* in terms of the number of standard deviations - above or below the mean of the normal distribution. - """ - # https://www.statisticshowto.com/probability-and-statistics/z-score/ - if not self._sigma: - raise StatisticsError('zscore() not defined when sigma is zero') - return (x - self._mu) / self._sigma - - @property - def mean(self): - "Arithmetic mean of the normal distribution." - return self._mu - - @property - def median(self): - "Return the median of the normal distribution" - return self._mu - - @property - def mode(self): - """Return the mode of the normal distribution - - The mode is the value x where which the probability density - function (pdf) takes its maximum value. - """ - return self._mu - - @property - def stdev(self): - "Standard deviation of the normal distribution." - return self._sigma - - @property - def variance(self): - "Square of the standard deviation." - return self._sigma * self._sigma - - def __add__(x1, x2): - """Add a constant or another NormalDist instance. - - If *other* is a constant, translate mu by the constant, - leaving sigma unchanged. - - If *other* is a NormalDist, add both the means and the variances. - Mathematically, this works only if the two distributions are - independent or if they are jointly normally distributed. - """ - if isinstance(x2, NormalDist): - return NormalDist(x1._mu + x2._mu, hypot(x1._sigma, x2._sigma)) - return NormalDist(x1._mu + x2, x1._sigma) - - def __sub__(x1, x2): - """Subtract a constant or another NormalDist instance. - - If *other* is a constant, translate by the constant mu, - leaving sigma unchanged. - - If *other* is a NormalDist, subtract the means and add the variances. - Mathematically, this works only if the two distributions are - independent or if they are jointly normally distributed. - """ - if isinstance(x2, NormalDist): - return NormalDist(x1._mu - x2._mu, hypot(x1._sigma, x2._sigma)) - return NormalDist(x1._mu - x2, x1._sigma) - - def __mul__(x1, x2): - """Multiply both mu and sigma by a constant. - - Used for rescaling, perhaps to change measurement units. - Sigma is scaled with the absolute value of the constant. - """ - return NormalDist(x1._mu * x2, x1._sigma * fabs(x2)) - - def __truediv__(x1, x2): - """Divide both mu and sigma by a constant. - - Used for rescaling, perhaps to change measurement units. - Sigma is scaled with the absolute value of the constant. - """ - return NormalDist(x1._mu / x2, x1._sigma / fabs(x2)) - - def __pos__(x1): - "Return a copy of the instance." - return NormalDist(x1._mu, x1._sigma) - - def __neg__(x1): - "Negates mu while keeping sigma the same." - return NormalDist(-x1._mu, x1._sigma) - - __radd__ = __add__ - - def __rsub__(x1, x2): - "Subtract a NormalDist from a constant or another NormalDist." - return -(x1 - x2) - - __rmul__ = __mul__ - - def __eq__(x1, x2): - "Two NormalDist objects are equal if their mu and sigma are both equal." - if not isinstance(x2, NormalDist): - return NotImplemented - return x1._mu == x2._mu and x1._sigma == x2._sigma - - def __hash__(self): - "NormalDist objects hash equal if their mu and sigma are both equal." - return hash((self._mu, self._sigma)) - - def __repr__(self): - return f'{type(self).__name__}(mu={self._mu!r}, sigma={self._sigma!r})' - - def __getstate__(self): - return self._mu, self._sigma - - def __setstate__(self, state): - self._mu, self._sigma = state - - -## kde_random() ############################################################## - -def _newton_raphson(f_inv_estimate, f, f_prime, tolerance=1e-12): - def f_inv(y): - "Return x such that f(x) ≈ y within the specified tolerance." - x = f_inv_estimate(y) - while abs(diff := f(x) - y) > tolerance: - x -= diff / f_prime(x) - return x - return f_inv - -def _quartic_invcdf_estimate(p): - sign, p = (1.0, p) if p <= 1/2 else (-1.0, 1.0 - p) - x = (2.0 * p) ** 0.4258865685331 - 1.0 - if p >= 0.004 < 0.499: - x += 0.026818732 * sin(7.101753784 * p + 2.73230839482953) - return x * sign - -_quartic_invcdf = _newton_raphson( - f_inv_estimate = _quartic_invcdf_estimate, - f = lambda t: 3/16 * t**5 - 5/8 * t**3 + 15/16 * t + 1/2, - f_prime = lambda t: 15/16 * (1.0 - t * t) ** 2) - -def _triweight_invcdf_estimate(p): - sign, p = (1.0, p) if p <= 1/2 else (-1.0, 1.0 - p) - x = (2.0 * p) ** 0.3400218741872791 - 1.0 - return x * sign - -_triweight_invcdf = _newton_raphson( - f_inv_estimate = _triweight_invcdf_estimate, - f = lambda t: 35/32 * (-1/7*t**7 + 3/5*t**5 - t**3 + t) + 1/2, - f_prime = lambda t: 35/32 * (1.0 - t * t) ** 3) - -_kernel_invcdfs = { - 'normal': NormalDist().inv_cdf, - 'logistic': lambda p: log(p / (1 - p)), - 'sigmoid': lambda p: log(tan(p * pi/2)), - 'rectangular': lambda p: 2*p - 1, - 'parabolic': lambda p: 2 * cos((acos(2*p-1) + pi) / 3), - 'quartic': _quartic_invcdf, - 'triweight': _triweight_invcdf, - 'triangular': lambda p: sqrt(2*p) - 1 if p < 1/2 else 1 - sqrt(2 - 2*p), - 'cosine': lambda p: 2 * asin(2*p - 1) / pi, -} -_kernel_invcdfs['gauss'] = _kernel_invcdfs['normal'] -_kernel_invcdfs['uniform'] = _kernel_invcdfs['rectangular'] -_kernel_invcdfs['epanechnikov'] = _kernel_invcdfs['parabolic'] -_kernel_invcdfs['biweight'] = _kernel_invcdfs['quartic'] - -def kde_random(data, h, kernel='normal', *, seed=None): - """Return a function that makes a random selection from the estimated - probability density function created by kde(data, h, kernel). - - Providing a *seed* allows reproducible selections within a single - thread. The seed may be an integer, float, str, or bytes. - - A StatisticsError will be raised if the *data* sequence is empty. - - Example: - - >>> data = [-2.1, -1.3, -0.4, 1.9, 5.1, 6.2] - >>> rand = kde_random(data, h=1.5, seed=8675309) - >>> new_selections = [rand() for i in range(10)] - >>> [round(x, 1) for x in new_selections] - [0.7, 6.2, 1.2, 6.9, 7.0, 1.8, 2.5, -0.5, -1.8, 5.6] - - """ - n = len(data) - if not n: - raise StatisticsError('Empty data sequence') - - if not isinstance(data[0], (int, float)): - raise TypeError('Data sequence must contain ints or floats') - - if h <= 0.0: - raise StatisticsError(f'Bandwidth h must be positive, not {h=!r}') - - kernel_invcdf = _kernel_invcdfs.get(kernel) - if kernel_invcdf is None: - raise StatisticsError(f'Unknown kernel name: {kernel!r}') - - prng = _random.Random(seed) - random = prng.random - choice = prng.choice - - def rand(): - return choice(data) + h * kernel_invcdf(random()) - - rand.__doc__ = f'Random KDE selection with {h=!r} and {kernel=!r}' - - return rand diff --git a/Python313_13_x64_Template/Lib/string.py b/Python313_13_x64_Template/Lib/string.py deleted file mode 100644 index 2eab6d4f..00000000 --- a/Python313_13_x64_Template/Lib/string.py +++ /dev/null @@ -1,309 +0,0 @@ -"""A collection of string constants. - -Public module variables: - -whitespace -- a string containing all ASCII whitespace -ascii_lowercase -- a string containing all ASCII lowercase letters -ascii_uppercase -- a string containing all ASCII uppercase letters -ascii_letters -- a string containing all ASCII letters -digits -- a string containing all ASCII decimal digits -hexdigits -- a string containing all ASCII hexadecimal digits -octdigits -- a string containing all ASCII octal digits -punctuation -- a string containing all ASCII punctuation characters -printable -- a string containing all ASCII characters considered printable - -""" - -__all__ = ["ascii_letters", "ascii_lowercase", "ascii_uppercase", "capwords", - "digits", "hexdigits", "octdigits", "printable", "punctuation", - "whitespace", "Formatter", "Template"] - -import _string - -# Some strings for ctype-style character classification -whitespace = ' \t\n\r\v\f' -ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz' -ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' -ascii_letters = ascii_lowercase + ascii_uppercase -digits = '0123456789' -hexdigits = digits + 'abcdef' + 'ABCDEF' -octdigits = '01234567' -punctuation = r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~""" -printable = digits + ascii_letters + punctuation + whitespace - -# Functions which aren't available as string methods. - -# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def". -def capwords(s, sep=None): - """capwords(s [,sep]) -> string - - Split the argument into words using split, capitalize each - word using capitalize, and join the capitalized words using - join. If the optional second argument sep is absent or None, - runs of whitespace characters are replaced by a single space - and leading and trailing whitespace are removed, otherwise - sep is used to split and join the words. - - """ - return (sep or ' ').join(map(str.capitalize, s.split(sep))) - - -#################################################################### -import re as _re -from collections import ChainMap as _ChainMap - -_sentinel_dict = {} - -class Template: - """A string class for supporting $-substitutions.""" - - delimiter = '$' - # r'[a-z]' matches to non-ASCII letters when used with IGNORECASE, but - # without the ASCII flag. We can't add re.ASCII to flags because of - # backward compatibility. So we use the ?a local flag and [a-z] pattern. - # See https://bugs.python.org/issue31672 - idpattern = r'(?a:[_a-z][_a-z0-9]*)' - braceidpattern = None - flags = _re.IGNORECASE - - def __init_subclass__(cls): - super().__init_subclass__() - if 'pattern' in cls.__dict__: - pattern = cls.pattern - else: - delim = _re.escape(cls.delimiter) - id = cls.idpattern - bid = cls.braceidpattern or cls.idpattern - pattern = fr""" - {delim}(?: - (?P{delim}) | # Escape sequence of two delimiters - (?P{id}) | # delimiter and a Python identifier - {{(?P{bid})}} | # delimiter and a braced identifier - (?P) # Other ill-formed delimiter exprs - ) - """ - cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE) - - def __init__(self, template): - self.template = template - - # Search for $$, $identifier, ${identifier}, and any bare $'s - - def _invalid(self, mo): - i = mo.start('invalid') - lines = self.template[:i].splitlines(keepends=True) - if not lines: - colno = 1 - lineno = 1 - else: - colno = i - len(''.join(lines[:-1])) - lineno = len(lines) - raise ValueError('Invalid placeholder in string: line %d, col %d' % - (lineno, colno)) - - def substitute(self, mapping=_sentinel_dict, /, **kws): - if mapping is _sentinel_dict: - mapping = kws - elif kws: - mapping = _ChainMap(kws, mapping) - # Helper function for .sub() - def convert(mo): - # Check the most common path first. - named = mo.group('named') or mo.group('braced') - if named is not None: - return str(mapping[named]) - if mo.group('escaped') is not None: - return self.delimiter - if mo.group('invalid') is not None: - self._invalid(mo) - raise ValueError('Unrecognized named group in pattern', - self.pattern) - return self.pattern.sub(convert, self.template) - - def safe_substitute(self, mapping=_sentinel_dict, /, **kws): - if mapping is _sentinel_dict: - mapping = kws - elif kws: - mapping = _ChainMap(kws, mapping) - # Helper function for .sub() - def convert(mo): - named = mo.group('named') or mo.group('braced') - if named is not None: - try: - return str(mapping[named]) - except KeyError: - return mo.group() - if mo.group('escaped') is not None: - return self.delimiter - if mo.group('invalid') is not None: - return mo.group() - raise ValueError('Unrecognized named group in pattern', - self.pattern) - return self.pattern.sub(convert, self.template) - - def is_valid(self): - for mo in self.pattern.finditer(self.template): - if mo.group('invalid') is not None: - return False - if (mo.group('named') is None - and mo.group('braced') is None - and mo.group('escaped') is None): - # If all the groups are None, there must be - # another group we're not expecting - raise ValueError('Unrecognized named group in pattern', - self.pattern) - return True - - def get_identifiers(self): - ids = [] - for mo in self.pattern.finditer(self.template): - named = mo.group('named') or mo.group('braced') - if named is not None and named not in ids: - # add a named group only the first time it appears - ids.append(named) - elif (named is None - and mo.group('invalid') is None - and mo.group('escaped') is None): - # If all the groups are None, there must be - # another group we're not expecting - raise ValueError('Unrecognized named group in pattern', - self.pattern) - return ids - -# Initialize Template.pattern. __init_subclass__() is automatically called -# only for subclasses, not for the Template class itself. -Template.__init_subclass__() - - -######################################################################## -# the Formatter class -# see PEP 3101 for details and purpose of this class - -# The hard parts are reused from the C implementation. They're exposed as "_" -# prefixed methods of str. - -# The overall parser is implemented in _string.formatter_parser. -# The field name parser is implemented in _string.formatter_field_name_split - -class Formatter: - def format(self, format_string, /, *args, **kwargs): - return self.vformat(format_string, args, kwargs) - - def vformat(self, format_string, args, kwargs): - used_args = set() - result, _ = self._vformat(format_string, args, kwargs, used_args, 2) - self.check_unused_args(used_args, args, kwargs) - return result - - def _vformat(self, format_string, args, kwargs, used_args, recursion_depth, - auto_arg_index=0): - if recursion_depth < 0: - raise ValueError('Max string recursion exceeded') - result = [] - for literal_text, field_name, format_spec, conversion in \ - self.parse(format_string): - - # output the literal text - if literal_text: - result.append(literal_text) - - # if there's a field, output it - if field_name is not None: - # this is some markup, find the object and do - # the formatting - - # handle arg indexing when empty field_names are given. - if field_name == '': - if auto_arg_index is False: - raise ValueError('cannot switch from manual field ' - 'specification to automatic field ' - 'numbering') - field_name = str(auto_arg_index) - auto_arg_index += 1 - elif field_name.isdigit(): - if auto_arg_index: - raise ValueError('cannot switch from manual field ' - 'specification to automatic field ' - 'numbering') - # disable auto arg incrementing, if it gets - # used later on, then an exception will be raised - auto_arg_index = False - - # given the field_name, find the object it references - # and the argument it came from - obj, arg_used = self.get_field(field_name, args, kwargs) - used_args.add(arg_used) - - # do any conversion on the resulting object - obj = self.convert_field(obj, conversion) - - # expand the format spec, if needed - format_spec, auto_arg_index = self._vformat( - format_spec, args, kwargs, - used_args, recursion_depth-1, - auto_arg_index=auto_arg_index) - - # format the object and append to the result - result.append(self.format_field(obj, format_spec)) - - return ''.join(result), auto_arg_index - - - def get_value(self, key, args, kwargs): - if isinstance(key, int): - return args[key] - else: - return kwargs[key] - - - def check_unused_args(self, used_args, args, kwargs): - pass - - - def format_field(self, value, format_spec): - return format(value, format_spec) - - - def convert_field(self, value, conversion): - # do any conversion on the resulting object - if conversion is None: - return value - elif conversion == 's': - return str(value) - elif conversion == 'r': - return repr(value) - elif conversion == 'a': - return ascii(value) - raise ValueError("Unknown conversion specifier {0!s}".format(conversion)) - - - # returns an iterable that contains tuples of the form: - # (literal_text, field_name, format_spec, conversion) - # literal_text can be zero length - # field_name can be None, in which case there's no - # object to format and output - # if field_name is not None, it is looked up, formatted - # with format_spec and conversion and then used - def parse(self, format_string): - return _string.formatter_parser(format_string) - - - # given a field_name, find the object it references. - # field_name: the field being looked up, e.g. "0.name" - # or "lookup[3]" - # used_args: a set of which args have been used - # args, kwargs: as passed in to vformat - def get_field(self, field_name, args, kwargs): - first, rest = _string.formatter_field_name_split(field_name) - - obj = self.get_value(first, args, kwargs) - - # loop through the rest of the field_name, doing - # getattr or getitem as needed - for is_attr, i in rest: - if is_attr: - obj = getattr(obj, i) - else: - obj = obj[i] - - return obj, first diff --git a/Python313_13_x64_Template/Lib/struct.py b/Python313_13_x64_Template/Lib/struct.py deleted file mode 100644 index d6bba588..00000000 --- a/Python313_13_x64_Template/Lib/struct.py +++ /dev/null @@ -1,15 +0,0 @@ -__all__ = [ - # Functions - 'calcsize', 'pack', 'pack_into', 'unpack', 'unpack_from', - 'iter_unpack', - - # Classes - 'Struct', - - # Exceptions - 'error' - ] - -from _struct import * -from _struct import _clearcache -from _struct import __doc__ diff --git a/Python313_13_x64_Template/Lib/subprocess.py b/Python313_13_x64_Template/Lib/subprocess.py deleted file mode 100644 index 3a8c7434..00000000 --- a/Python313_13_x64_Template/Lib/subprocess.py +++ /dev/null @@ -1,2258 +0,0 @@ -# subprocess - Subprocesses with accessible I/O streams -# -# For more information about this module, see PEP 324. -# -# Copyright (c) 2003-2005 by Peter Astrand -# -# Licensed to PSF under a Contributor Agreement. - -r"""Subprocesses with accessible I/O streams - -This module allows you to spawn processes, connect to their -input/output/error pipes, and obtain their return codes. - -For a complete description of this module see the Python documentation. - -Main API -======== -run(...): Runs a command, waits for it to complete, then returns a - CompletedProcess instance. -Popen(...): A class for flexibly executing a command in a new process - -Constants ---------- -DEVNULL: Special value that indicates that os.devnull should be used -PIPE: Special value that indicates a pipe should be created -STDOUT: Special value that indicates that stderr should go to stdout - - -Older API -========= -call(...): Runs a command, waits for it to complete, then returns - the return code. -check_call(...): Same as call() but raises CalledProcessError() - if return code is not 0 -check_output(...): Same as check_call() but returns the contents of - stdout instead of a return code -getoutput(...): Runs a command in the shell, waits for it to complete, - then returns the output -getstatusoutput(...): Runs a command in the shell, waits for it to complete, - then returns a (exitcode, output) tuple -""" - -import builtins -import errno -import io -import locale -import os -import time -import signal -import sys -import threading -import warnings -import contextlib -from time import monotonic as _time -import types - -try: - import fcntl -except ImportError: - fcntl = None - - -__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput", - "getoutput", "check_output", "run", "CalledProcessError", "DEVNULL", - "SubprocessError", "TimeoutExpired", "CompletedProcess"] - # NOTE: We intentionally exclude list2cmdline as it is - # considered an internal implementation detail. issue10838. - -# use presence of msvcrt to detect Windows-like platforms (see bpo-8110) -try: - import msvcrt -except ModuleNotFoundError: - _mswindows = False -else: - _mswindows = True - -# some platforms do not support subprocesses -_can_fork_exec = sys.platform not in {"emscripten", "wasi", "ios", "tvos", "watchos"} - -if _mswindows: - import _winapi - from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP, - STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, - STD_ERROR_HANDLE, SW_HIDE, - STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW, - STARTF_FORCEONFEEDBACK, STARTF_FORCEOFFFEEDBACK, - ABOVE_NORMAL_PRIORITY_CLASS, BELOW_NORMAL_PRIORITY_CLASS, - HIGH_PRIORITY_CLASS, IDLE_PRIORITY_CLASS, - NORMAL_PRIORITY_CLASS, REALTIME_PRIORITY_CLASS, - CREATE_NO_WINDOW, DETACHED_PROCESS, - CREATE_DEFAULT_ERROR_MODE, CREATE_BREAKAWAY_FROM_JOB) - - __all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP", - "STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE", - "STD_ERROR_HANDLE", "SW_HIDE", - "STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW", - "STARTF_FORCEONFEEDBACK", "STARTF_FORCEOFFFEEDBACK", - "STARTUPINFO", - "ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS", - "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS", - "NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS", - "CREATE_NO_WINDOW", "DETACHED_PROCESS", - "CREATE_DEFAULT_ERROR_MODE", "CREATE_BREAKAWAY_FROM_JOB"]) -else: - if _can_fork_exec: - from _posixsubprocess import fork_exec as _fork_exec - # used in methods that are called by __del__ - class _del_safe: - waitpid = os.waitpid - waitstatus_to_exitcode = os.waitstatus_to_exitcode - WIFSTOPPED = os.WIFSTOPPED - WSTOPSIG = os.WSTOPSIG - WNOHANG = os.WNOHANG - ECHILD = errno.ECHILD - else: - class _del_safe: - waitpid = None - waitstatus_to_exitcode = None - WIFSTOPPED = None - WSTOPSIG = None - WNOHANG = None - ECHILD = errno.ECHILD - - import select - import selectors - - -# Exception classes used by this module. -class SubprocessError(Exception): pass - - -class CalledProcessError(SubprocessError): - """Raised when run() is called with check=True and the process - returns a non-zero exit status. - - Attributes: - cmd, returncode, stdout, stderr, output - """ - def __init__(self, returncode, cmd, output=None, stderr=None): - self.returncode = returncode - self.cmd = cmd - self.output = output - self.stderr = stderr - - def __str__(self): - if self.returncode and self.returncode < 0: - try: - return "Command '%s' died with %r." % ( - self.cmd, signal.Signals(-self.returncode)) - except ValueError: - return "Command '%s' died with unknown signal %d." % ( - self.cmd, -self.returncode) - else: - return "Command '%s' returned non-zero exit status %d." % ( - self.cmd, self.returncode) - - @property - def stdout(self): - """Alias for output attribute, to match stderr""" - return self.output - - @stdout.setter - def stdout(self, value): - # There's no obvious reason to set this, but allow it anyway so - # .stdout is a transparent alias for .output - self.output = value - - -class TimeoutExpired(SubprocessError): - """This exception is raised when the timeout expires while waiting for a - child process. - - Attributes: - cmd, output, stdout, stderr, timeout - """ - def __init__(self, cmd, timeout, output=None, stderr=None): - self.cmd = cmd - self.timeout = timeout - self.output = output - self.stderr = stderr - - def __str__(self): - return ("Command '%s' timed out after %s seconds" % - (self.cmd, self.timeout)) - - @property - def stdout(self): - return self.output - - @stdout.setter - def stdout(self, value): - # There's no obvious reason to set this, but allow it anyway so - # .stdout is a transparent alias for .output - self.output = value - - -if _mswindows: - class STARTUPINFO: - def __init__(self, *, dwFlags=0, hStdInput=None, hStdOutput=None, - hStdError=None, wShowWindow=0, lpAttributeList=None): - self.dwFlags = dwFlags - self.hStdInput = hStdInput - self.hStdOutput = hStdOutput - self.hStdError = hStdError - self.wShowWindow = wShowWindow - self.lpAttributeList = lpAttributeList or {"handle_list": []} - - def copy(self): - attr_list = self.lpAttributeList.copy() - if 'handle_list' in attr_list: - attr_list['handle_list'] = list(attr_list['handle_list']) - - return STARTUPINFO(dwFlags=self.dwFlags, - hStdInput=self.hStdInput, - hStdOutput=self.hStdOutput, - hStdError=self.hStdError, - wShowWindow=self.wShowWindow, - lpAttributeList=attr_list) - - - class Handle(int): - closed = False - - def Close(self, CloseHandle=_winapi.CloseHandle): - if not self.closed: - self.closed = True - CloseHandle(self) - - def Detach(self): - if not self.closed: - self.closed = True - return int(self) - raise ValueError("already closed") - - def __repr__(self): - return "%s(%d)" % (self.__class__.__name__, int(self)) - - __del__ = Close -else: - # When select or poll has indicated that the file is writable, - # we can write up to _PIPE_BUF bytes without risk of blocking. - # POSIX defines PIPE_BUF as >= 512. - _PIPE_BUF = getattr(select, 'PIPE_BUF', 512) - - # poll/select have the advantage of not requiring any extra file - # descriptor, contrarily to epoll/kqueue (also, they require a single - # syscall). - if hasattr(selectors, 'PollSelector'): - _PopenSelector = selectors.PollSelector - else: - _PopenSelector = selectors.SelectSelector - - -if _mswindows: - # On Windows we just need to close `Popen._handle` when we no longer need - # it, so that the kernel can free it. `Popen._handle` gets closed - # implicitly when the `Popen` instance is finalized (see `Handle.__del__`, - # which is calling `CloseHandle` as requested in [1]), so there is nothing - # for `_cleanup` to do. - # - # [1] https://docs.microsoft.com/en-us/windows/desktop/ProcThread/ - # creating-processes - _active = None - - def _cleanup(): - pass -else: - # This lists holds Popen instances for which the underlying process had not - # exited at the time its __del__ method got called: those processes are - # wait()ed for synchronously from _cleanup() when a new Popen object is - # created, to avoid zombie processes. - _active = [] - - def _cleanup(): - if _active is None: - return - for inst in _active[:]: - res = inst._internal_poll(_deadstate=sys.maxsize) - if res is not None: - try: - _active.remove(inst) - except ValueError: - # This can happen if two threads create a new Popen instance. - # It's harmless that it was already removed, so ignore. - pass - -PIPE = -1 -STDOUT = -2 -DEVNULL = -3 - - -# XXX This function is only used by multiprocessing and the test suite, -# but it's here so that it can be imported when Python is compiled without -# threads. - -def _optim_args_from_interpreter_flags(): - """Return a list of command-line arguments reproducing the current - optimization settings in sys.flags.""" - args = [] - value = sys.flags.optimize - if value > 0: - args.append('-' + 'O' * value) - return args - - -def _args_from_interpreter_flags(): - """Return a list of command-line arguments reproducing the current - settings in sys.flags, sys.warnoptions and sys._xoptions.""" - flag_opt_map = { - 'debug': 'd', - # 'inspect': 'i', - # 'interactive': 'i', - 'dont_write_bytecode': 'B', - 'no_site': 'S', - 'verbose': 'v', - 'bytes_warning': 'b', - 'quiet': 'q', - # -O is handled in _optim_args_from_interpreter_flags() - } - args = _optim_args_from_interpreter_flags() - for flag, opt in flag_opt_map.items(): - v = getattr(sys.flags, flag) - if v > 0: - args.append('-' + opt * v) - - if sys.flags.isolated: - args.append('-I') - else: - if sys.flags.ignore_environment: - args.append('-E') - if sys.flags.no_user_site: - args.append('-s') - if sys.flags.safe_path: - args.append('-P') - - # -W options - warnopts = sys.warnoptions[:] - xoptions = getattr(sys, '_xoptions', {}) - bytes_warning = sys.flags.bytes_warning - dev_mode = sys.flags.dev_mode - - if bytes_warning > 1: - warnopts.remove("error::BytesWarning") - elif bytes_warning: - warnopts.remove("default::BytesWarning") - if dev_mode: - warnopts.remove('default') - for opt in warnopts: - args.append('-W' + opt) - - # -X options - if dev_mode: - args.extend(('-X', 'dev')) - for opt in ('faulthandler', 'tracemalloc', 'importtime', - 'frozen_modules', 'showrefcount', 'utf8', 'gil'): - if opt in xoptions: - value = xoptions[opt] - if value is True: - arg = opt - else: - arg = '%s=%s' % (opt, value) - args.extend(('-X', arg)) - - return args - - -def _text_encoding(): - # Return default text encoding and emit EncodingWarning if - # sys.flags.warn_default_encoding is true. - if sys.flags.warn_default_encoding: - f = sys._getframe() - filename = f.f_code.co_filename - stacklevel = 2 - while f := f.f_back: - if f.f_code.co_filename != filename: - break - stacklevel += 1 - warnings.warn("'encoding' argument not specified.", - EncodingWarning, stacklevel) - - if sys.flags.utf8_mode: - return "utf-8" - else: - return locale.getencoding() - - -def call(*popenargs, timeout=None, **kwargs): - """Run command with arguments. Wait for command to complete or - for timeout seconds, then return the returncode attribute. - - The arguments are the same as for the Popen constructor. Example: - - retcode = call(["ls", "-l"]) - """ - with Popen(*popenargs, **kwargs) as p: - try: - return p.wait(timeout=timeout) - except: # Including KeyboardInterrupt, wait handled that. - p.kill() - # We don't call p.wait() again as p.__exit__ does that for us. - raise - - -def check_call(*popenargs, **kwargs): - """Run command with arguments. Wait for command to complete. If - the exit code was zero then return, otherwise raise - CalledProcessError. The CalledProcessError object will have the - return code in the returncode attribute. - - The arguments are the same as for the call function. Example: - - check_call(["ls", "-l"]) - """ - retcode = call(*popenargs, **kwargs) - if retcode: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - raise CalledProcessError(retcode, cmd) - return 0 - - -def check_output(*popenargs, timeout=None, **kwargs): - r"""Run command with arguments and return its output. - - If the exit code was non-zero it raises a CalledProcessError. The - CalledProcessError object will have the return code in the returncode - attribute and output in the output attribute. - - The arguments are the same as for the Popen constructor. Example: - - >>> check_output(["ls", "-l", "/dev/null"]) - b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' - - The stdout argument is not allowed as it is used internally. - To capture standard error in the result, use stderr=STDOUT. - - >>> check_output(["/bin/sh", "-c", - ... "ls -l non_existent_file ; exit 0"], - ... stderr=STDOUT) - b'ls: non_existent_file: No such file or directory\n' - - There is an additional optional argument, "input", allowing you to - pass a string to the subprocess's stdin. If you use this argument - you may not also use the Popen constructor's "stdin" argument, as - it too will be used internally. Example: - - >>> check_output(["sed", "-e", "s/foo/bar/"], - ... input=b"when in the course of fooman events\n") - b'when in the course of barman events\n' - - By default, all communication is in bytes, and therefore any "input" - should be bytes, and the return value will be bytes. If in text mode, - any "input" should be a string, and the return value will be a string - decoded according to locale encoding, or by "encoding" if set. Text mode - is triggered by setting any of text, encoding, errors or universal_newlines. - """ - for kw in ('stdout', 'check'): - if kw in kwargs: - raise ValueError(f'{kw} argument not allowed, it will be overridden.') - - if 'input' in kwargs and kwargs['input'] is None: - # Explicitly passing input=None was previously equivalent to passing an - # empty string. That is maintained here for backwards compatibility. - if kwargs.get('universal_newlines') or kwargs.get('text') or kwargs.get('encoding') \ - or kwargs.get('errors'): - empty = '' - else: - empty = b'' - kwargs['input'] = empty - - return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, - **kwargs).stdout - - -class CompletedProcess(object): - """A process that has finished running. - - This is returned by run(). - - Attributes: - args: The list or str args passed to run(). - returncode: The exit code of the process, negative for signals. - stdout: The standard output (None if not captured). - stderr: The standard error (None if not captured). - """ - def __init__(self, args, returncode, stdout=None, stderr=None): - self.args = args - self.returncode = returncode - self.stdout = stdout - self.stderr = stderr - - def __repr__(self): - args = ['args={!r}'.format(self.args), - 'returncode={!r}'.format(self.returncode)] - if self.stdout is not None: - args.append('stdout={!r}'.format(self.stdout)) - if self.stderr is not None: - args.append('stderr={!r}'.format(self.stderr)) - return "{}({})".format(type(self).__name__, ', '.join(args)) - - __class_getitem__ = classmethod(types.GenericAlias) - - - def check_returncode(self): - """Raise CalledProcessError if the exit code is non-zero.""" - if self.returncode: - raise CalledProcessError(self.returncode, self.args, self.stdout, - self.stderr) - - -def run(*popenargs, - input=None, capture_output=False, timeout=None, check=False, **kwargs): - """Run command with arguments and return a CompletedProcess instance. - - The returned instance will have attributes args, returncode, stdout and - stderr. By default, stdout and stderr are not captured, and those attributes - will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them, - or pass capture_output=True to capture both. - - If check is True and the exit code was non-zero, it raises a - CalledProcessError. The CalledProcessError object will have the return code - in the returncode attribute, and output & stderr attributes if those streams - were captured. - - If timeout (seconds) is given and the process takes too long, - a TimeoutExpired exception will be raised. - - There is an optional argument "input", allowing you to - pass bytes or a string to the subprocess's stdin. If you use this argument - you may not also use the Popen constructor's "stdin" argument, as - it will be used internally. - - By default, all communication is in bytes, and therefore any "input" should - be bytes, and the stdout and stderr will be bytes. If in text mode, any - "input" should be a string, and stdout and stderr will be strings decoded - according to locale encoding, or by "encoding" if set. Text mode is - triggered by setting any of text, encoding, errors or universal_newlines. - - The other arguments are the same as for the Popen constructor. - """ - if input is not None: - if kwargs.get('stdin') is not None: - raise ValueError('stdin and input arguments may not both be used.') - kwargs['stdin'] = PIPE - - if capture_output: - if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None: - raise ValueError('stdout and stderr arguments may not be used ' - 'with capture_output.') - kwargs['stdout'] = PIPE - kwargs['stderr'] = PIPE - - with Popen(*popenargs, **kwargs) as process: - try: - stdout, stderr = process.communicate(input, timeout=timeout) - except TimeoutExpired as exc: - process.kill() - if _mswindows: - # Windows accumulates the output in a single blocking - # read() call run on child threads, with the timeout - # being done in a join() on those threads. communicate() - # _after_ kill() is required to collect that and add it - # to the exception. - exc.stdout, exc.stderr = process.communicate() - else: - # POSIX _communicate already populated the output so - # far into the TimeoutExpired exception. - process.wait() - raise - except: # Including KeyboardInterrupt, communicate handled that. - process.kill() - # We don't call process.wait() as .__exit__ does that for us. - raise - retcode = process.poll() - if check and retcode: - raise CalledProcessError(retcode, process.args, - output=stdout, stderr=stderr) - return CompletedProcess(process.args, retcode, stdout, stderr) - - -def list2cmdline(seq): - """ - Translate a sequence of arguments into a command line - string, using the same rules as the MS C runtime: - - 1) Arguments are delimited by white space, which is either a - space or a tab. - - 2) A string surrounded by double quotation marks is - interpreted as a single argument, regardless of white space - contained within. A quoted string can be embedded in an - argument. - - 3) A double quotation mark preceded by a backslash is - interpreted as a literal double quotation mark. - - 4) Backslashes are interpreted literally, unless they - immediately precede a double quotation mark. - - 5) If backslashes immediately precede a double quotation mark, - every pair of backslashes is interpreted as a literal - backslash. If the number of backslashes is odd, the last - backslash escapes the next double quotation mark as - described in rule 3. - """ - - # See - # http://msdn.microsoft.com/en-us/library/17w5ykft.aspx - # or search http://msdn.microsoft.com for - # "Parsing C++ Command-Line Arguments" - result = [] - needquote = False - for arg in map(os.fsdecode, seq): - bs_buf = [] - - # Add a space to separate this argument from the others - if result: - result.append(' ') - - needquote = (" " in arg) or ("\t" in arg) or not arg - if needquote: - result.append('"') - - for c in arg: - if c == '\\': - # Don't know if we need to double yet. - bs_buf.append(c) - elif c == '"': - # Double backslashes. - result.append('\\' * len(bs_buf)*2) - bs_buf = [] - result.append('\\"') - else: - # Normal char - if bs_buf: - result.extend(bs_buf) - bs_buf = [] - result.append(c) - - # Add remaining backslashes, if any. - if bs_buf: - result.extend(bs_buf) - - if needquote: - result.extend(bs_buf) - result.append('"') - - return ''.join(result) - - -# Various tools for executing commands and looking at their output and status. -# - -def getstatusoutput(cmd, *, encoding=None, errors=None): - """Return (exitcode, output) of executing cmd in a shell. - - Execute the string 'cmd' in a shell with 'check_output' and - return a 2-tuple (status, output). The locale encoding is used - to decode the output and process newlines. - - A trailing newline is stripped from the output. - The exit status for the command can be interpreted - according to the rules for the function 'wait'. Example: - - >>> import subprocess - >>> subprocess.getstatusoutput('ls /bin/ls') - (0, '/bin/ls') - >>> subprocess.getstatusoutput('cat /bin/junk') - (1, 'cat: /bin/junk: No such file or directory') - >>> subprocess.getstatusoutput('/bin/junk') - (127, 'sh: /bin/junk: not found') - >>> subprocess.getstatusoutput('/bin/kill $$') - (-15, '') - """ - try: - data = check_output(cmd, shell=True, text=True, stderr=STDOUT, - encoding=encoding, errors=errors) - exitcode = 0 - except CalledProcessError as ex: - data = ex.output - exitcode = ex.returncode - if data[-1:] == '\n': - data = data[:-1] - return exitcode, data - -def getoutput(cmd, *, encoding=None, errors=None): - """Return output (stdout or stderr) of executing cmd in a shell. - - Like getstatusoutput(), except the exit status is ignored and the return - value is a string containing the command's output. Example: - - >>> import subprocess - >>> subprocess.getoutput('ls /bin/ls') - '/bin/ls' - """ - return getstatusoutput(cmd, encoding=encoding, errors=errors)[1] - - - -def _use_posix_spawn(): - """Check if posix_spawn() can be used for subprocess. - - subprocess requires a posix_spawn() implementation that properly reports - errors to the parent process, & sets errno on the following failures: - - * Process attribute actions failed. - * File actions failed. - * exec() failed. - - Prefer an implementation which can use vfork() in some cases for best - performance. - """ - if _mswindows or not hasattr(os, 'posix_spawn'): - # os.posix_spawn() is not available - return False - - if ((_env := os.environ.get('_PYTHON_SUBPROCESS_USE_POSIX_SPAWN')) in ('0', '1')): - return bool(int(_env)) - - if sys.platform in ('darwin', 'sunos5'): - # posix_spawn() is a syscall on both macOS and Solaris, - # and properly reports errors - return True - - # Check libc name and runtime libc version - try: - ver = os.confstr('CS_GNU_LIBC_VERSION') - # parse 'glibc 2.28' as ('glibc', (2, 28)) - parts = ver.split(maxsplit=1) - if len(parts) != 2: - # reject unknown format - raise ValueError - libc = parts[0] - version = tuple(map(int, parts[1].split('.'))) - - if sys.platform == 'linux' and libc == 'glibc' and version >= (2, 24): - # glibc 2.24 has a new Linux posix_spawn implementation using vfork - # which properly reports errors to the parent process. - return True - # Note: Don't use the implementation in earlier glibc because it doesn't - # use vfork (even if glibc 2.26 added a pipe to properly report errors - # to the parent process). - except (AttributeError, ValueError, OSError): - # os.confstr() or CS_GNU_LIBC_VERSION value not available - pass - - # By default, assume that posix_spawn() does not properly report errors. - return False - - -# These are primarily fail-safe knobs for negatives. A True value does not -# guarantee the given libc/syscall API will be used. -_USE_POSIX_SPAWN = _use_posix_spawn() -_USE_VFORK = True -_HAVE_POSIX_SPAWN_CLOSEFROM = hasattr(os, 'POSIX_SPAWN_CLOSEFROM') - - -class Popen: - """ Execute a child program in a new process. - - For a complete description of the arguments see the Python documentation. - - Arguments: - args: A string, or a sequence of program arguments. - - bufsize: supplied as the buffering argument to the open() function when - creating the stdin/stdout/stderr pipe file objects - - executable: A replacement program to execute. - - stdin, stdout and stderr: These specify the executed programs' standard - input, standard output and standard error file handles, respectively. - - preexec_fn: (POSIX only) An object to be called in the child process - just before the child is executed. - - close_fds: Controls closing or inheriting of file descriptors. - - shell: If true, the command will be executed through the shell. - - cwd: Sets the current directory before the child is executed. - - env: Defines the environment variables for the new process. - - text: If true, decode stdin, stdout and stderr using the given encoding - (if set) or the system default otherwise. - - universal_newlines: Alias of text, provided for backwards compatibility. - - startupinfo and creationflags (Windows only) - - restore_signals (POSIX only) - - start_new_session (POSIX only) - - process_group (POSIX only) - - group (POSIX only) - - extra_groups (POSIX only) - - user (POSIX only) - - umask (POSIX only) - - pass_fds (POSIX only) - - encoding and errors: Text mode encoding and error handling to use for - file objects stdin, stdout and stderr. - - Attributes: - stdin, stdout, stderr, pid, returncode - """ - _child_created = False # Set here since __del__ checks it - - def __init__(self, args, bufsize=-1, executable=None, - stdin=None, stdout=None, stderr=None, - preexec_fn=None, close_fds=True, - shell=False, cwd=None, env=None, universal_newlines=None, - startupinfo=None, creationflags=0, - restore_signals=True, start_new_session=False, - pass_fds=(), *, user=None, group=None, extra_groups=None, - encoding=None, errors=None, text=None, umask=-1, pipesize=-1, - process_group=None): - """Create new Popen instance.""" - if not _can_fork_exec: - raise OSError( - errno.ENOTSUP, f"{sys.platform} does not support processes." - ) - - _cleanup() - # Held while anything is calling waitpid before returncode has been - # updated to prevent clobbering returncode if wait() or poll() are - # called from multiple threads at once. After acquiring the lock, - # code must re-check self.returncode to see if another thread just - # finished a waitpid() call. - self._waitpid_lock = threading.Lock() - - self._input = None - self._communication_started = False - if bufsize is None: - bufsize = -1 # Restore default - if not isinstance(bufsize, int): - raise TypeError("bufsize must be an integer") - - if stdout is STDOUT: - raise ValueError("STDOUT can only be used for stderr") - - if pipesize is None: - pipesize = -1 # Restore default - if not isinstance(pipesize, int): - raise TypeError("pipesize must be an integer") - - if _mswindows: - if preexec_fn is not None: - raise ValueError("preexec_fn is not supported on Windows " - "platforms") - else: - # POSIX - if pass_fds and not close_fds: - warnings.warn("pass_fds overriding close_fds.", RuntimeWarning) - close_fds = True - if startupinfo is not None: - raise ValueError("startupinfo is only supported on Windows " - "platforms") - if creationflags != 0: - raise ValueError("creationflags is only supported on Windows " - "platforms") - - self.args = args - self.stdin = None - self.stdout = None - self.stderr = None - self.pid = None - self.returncode = None - self.encoding = encoding - self.errors = errors - self.pipesize = pipesize - - # Validate the combinations of text and universal_newlines - if (text is not None and universal_newlines is not None - and bool(universal_newlines) != bool(text)): - raise SubprocessError('Cannot disambiguate when both text ' - 'and universal_newlines are supplied but ' - 'different. Pass one or the other.') - - self.text_mode = encoding or errors or text or universal_newlines - if self.text_mode and encoding is None: - self.encoding = encoding = _text_encoding() - - # How long to resume waiting on a child after the first ^C. - # There is no right value for this. The purpose is to be polite - # yet remain good for interactive users trying to exit a tool. - self._sigint_wait_secs = 0.25 # 1/xkcd221.getRandomNumber() - - self._closed_child_pipe_fds = False - - if self.text_mode: - if bufsize == 1: - line_buffering = True - # Use the default buffer size for the underlying binary streams - # since they don't support line buffering. - bufsize = -1 - else: - line_buffering = False - - if process_group is None: - process_group = -1 # The internal APIs are int-only - - gid = None - if group is not None: - if not hasattr(os, 'setregid'): - raise ValueError("The 'group' parameter is not supported on the " - "current platform") - - elif isinstance(group, str): - try: - import grp - except ImportError: - raise ValueError("The group parameter cannot be a string " - "on systems without the grp module") - - gid = grp.getgrnam(group).gr_gid - elif isinstance(group, int): - gid = group - else: - raise TypeError("Group must be a string or an integer, not {}" - .format(type(group))) - - if gid < 0: - raise ValueError(f"Group ID cannot be negative, got {gid}") - - gids = None - if extra_groups is not None: - if not hasattr(os, 'setgroups'): - raise ValueError("The 'extra_groups' parameter is not " - "supported on the current platform") - - elif isinstance(extra_groups, str): - raise ValueError("Groups must be a list, not a string") - - gids = [] - for extra_group in extra_groups: - if isinstance(extra_group, str): - try: - import grp - except ImportError: - raise ValueError("Items in extra_groups cannot be " - "strings on systems without the " - "grp module") - - gids.append(grp.getgrnam(extra_group).gr_gid) - elif isinstance(extra_group, int): - gids.append(extra_group) - else: - raise TypeError("Items in extra_groups must be a string " - "or integer, not {}" - .format(type(extra_group))) - - # make sure that the gids are all positive here so we can do less - # checking in the C code - for gid_check in gids: - if gid_check < 0: - raise ValueError(f"Group ID cannot be negative, got {gid_check}") - - uid = None - if user is not None: - if not hasattr(os, 'setreuid'): - raise ValueError("The 'user' parameter is not supported on " - "the current platform") - - elif isinstance(user, str): - try: - import pwd - except ImportError: - raise ValueError("The user parameter cannot be a string " - "on systems without the pwd module") - uid = pwd.getpwnam(user).pw_uid - elif isinstance(user, int): - uid = user - else: - raise TypeError("User must be a string or an integer") - - if uid < 0: - raise ValueError(f"User ID cannot be negative, got {uid}") - - # Input and output objects. The general principle is like - # this: - # - # Parent Child - # ------ ----- - # p2cwrite ---stdin---> p2cread - # c2pread <--stdout--- c2pwrite - # errread <--stderr--- errwrite - # - # On POSIX, the child objects are file descriptors. On - # Windows, these are Windows file handles. The parent objects - # are file descriptors on both platforms. The parent objects - # are -1 when not using PIPEs. The child objects are -1 - # when not redirecting. - - (p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite) = self._get_handles(stdin, stdout, stderr) - - # From here on, raising exceptions may cause file descriptor leakage - - # We wrap OS handles *before* launching the child, otherwise a - # quickly terminating child could make our fds unwrappable - # (see #8458). - - if _mswindows: - if p2cwrite != -1: - p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0) - if c2pread != -1: - c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0) - if errread != -1: - errread = msvcrt.open_osfhandle(errread.Detach(), 0) - - try: - if p2cwrite != -1: - self.stdin = io.open(p2cwrite, 'wb', bufsize) - if self.text_mode: - self.stdin = io.TextIOWrapper(self.stdin, write_through=True, - line_buffering=line_buffering, - encoding=encoding, errors=errors) - if c2pread != -1: - self.stdout = io.open(c2pread, 'rb', bufsize) - if self.text_mode: - self.stdout = io.TextIOWrapper(self.stdout, - encoding=encoding, errors=errors) - if errread != -1: - self.stderr = io.open(errread, 'rb', bufsize) - if self.text_mode: - self.stderr = io.TextIOWrapper(self.stderr, - encoding=encoding, errors=errors) - - self._execute_child(args, executable, preexec_fn, close_fds, - pass_fds, cwd, env, - startupinfo, creationflags, shell, - p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite, - restore_signals, - gid, gids, uid, umask, - start_new_session, process_group) - except: - # Cleanup if the child failed starting. - for f in filter(None, (self.stdin, self.stdout, self.stderr)): - try: - f.close() - except OSError: - pass # Ignore EBADF or other errors. - - if not self._closed_child_pipe_fds: - to_close = [] - if stdin == PIPE: - to_close.append(p2cread) - if stdout == PIPE: - to_close.append(c2pwrite) - if stderr == PIPE: - to_close.append(errwrite) - if hasattr(self, '_devnull'): - to_close.append(self._devnull) - for fd in to_close: - try: - if _mswindows and isinstance(fd, Handle): - fd.Close() - else: - os.close(fd) - except OSError: - pass - - raise - - def __repr__(self): - obj_repr = ( - f"<{self.__class__.__name__}: " - f"returncode: {self.returncode} args: {self.args!r}>" - ) - if len(obj_repr) > 80: - obj_repr = obj_repr[:76] + "...>" - return obj_repr - - __class_getitem__ = classmethod(types.GenericAlias) - - @property - def universal_newlines(self): - # universal_newlines as retained as an alias of text_mode for API - # compatibility. bpo-31756 - return self.text_mode - - @universal_newlines.setter - def universal_newlines(self, universal_newlines): - self.text_mode = bool(universal_newlines) - - def _translate_newlines(self, data, encoding, errors): - data = data.decode(encoding, errors) - return data.replace("\r\n", "\n").replace("\r", "\n") - - def __enter__(self): - return self - - def __exit__(self, exc_type, value, traceback): - if self.stdout: - self.stdout.close() - if self.stderr: - self.stderr.close() - try: # Flushing a BufferedWriter may raise an error - if self.stdin: - self.stdin.close() - finally: - if exc_type == KeyboardInterrupt: - # https://bugs.python.org/issue25942 - # In the case of a KeyboardInterrupt we assume the SIGINT - # was also already sent to our child processes. We can't - # block indefinitely as that is not user friendly. - # If we have not already waited a brief amount of time in - # an interrupted .wait() or .communicate() call, do so here - # for consistency. - if self._sigint_wait_secs > 0: - try: - self._wait(timeout=self._sigint_wait_secs) - except TimeoutExpired: - pass - self._sigint_wait_secs = 0 # Note that this has been done. - return # resume the KeyboardInterrupt - - # Wait for the process to terminate, to avoid zombies. - self.wait() - - def __del__(self, _maxsize=sys.maxsize, _warn=warnings.warn): - if not self._child_created: - # We didn't get to successfully create a child process. - return - if self.returncode is None: - # Not reading subprocess exit status creates a zombie process which - # is only destroyed at the parent python process exit - _warn("subprocess %s is still running" % self.pid, - ResourceWarning, source=self) - # In case the child hasn't been waited on, check if it's done. - self._internal_poll(_deadstate=_maxsize) - if self.returncode is None and _active is not None: - # Child is still running, keep us alive until we can wait on it. - _active.append(self) - - def _get_devnull(self): - if not hasattr(self, '_devnull'): - self._devnull = os.open(os.devnull, os.O_RDWR) - return self._devnull - - def _stdin_write(self, input): - if input: - try: - self.stdin.write(input) - except BrokenPipeError: - pass # communicate() must ignore broken pipe errors. - except OSError as exc: - if exc.errno == errno.EINVAL: - # bpo-19612, bpo-30418: On Windows, stdin.write() fails - # with EINVAL if the child process exited or if the child - # process is still running but closed the pipe. - pass - else: - raise - - try: - self.stdin.close() - except BrokenPipeError: - pass # communicate() must ignore broken pipe errors. - except OSError as exc: - if exc.errno == errno.EINVAL: - pass - else: - raise - - def communicate(self, input=None, timeout=None): - """Interact with process: Send data to stdin and close it. - Read data from stdout and stderr, until end-of-file is - reached. Wait for process to terminate. - - The optional "input" argument should be data to be sent to the - child process, or None, if no data should be sent to the child. - communicate() returns a tuple (stdout, stderr). - - By default, all communication is in bytes, and therefore any - "input" should be bytes, and the (stdout, stderr) will be bytes. - If in text mode (indicated by self.text_mode), any "input" should - be a string, and (stdout, stderr) will be strings decoded - according to locale encoding, or by "encoding" if set. Text mode - is triggered by setting any of text, encoding, errors or - universal_newlines. - """ - - if self._communication_started and input: - raise ValueError("Cannot send input after starting communication") - - # Optimization: If we are not worried about timeouts, we haven't - # started communicating, and we have one or zero pipes, using select() - # or threads is unnecessary. - if (timeout is None and not self._communication_started and - [self.stdin, self.stdout, self.stderr].count(None) >= 2): - stdout = None - stderr = None - if self.stdin: - self._stdin_write(input) - elif self.stdout: - stdout = self.stdout.read() - self.stdout.close() - elif self.stderr: - stderr = self.stderr.read() - self.stderr.close() - self.wait() - else: - if timeout is not None: - endtime = _time() + timeout - else: - endtime = None - - try: - stdout, stderr = self._communicate(input, endtime, timeout) - except KeyboardInterrupt: - # https://bugs.python.org/issue25942 - # See the detailed comment in .wait(). - if timeout is not None: - sigint_timeout = min(self._sigint_wait_secs, - self._remaining_time(endtime)) - else: - sigint_timeout = self._sigint_wait_secs - self._sigint_wait_secs = 0 # nothing else should wait. - try: - self._wait(timeout=sigint_timeout) - except TimeoutExpired: - pass - raise # resume the KeyboardInterrupt - - finally: - self._communication_started = True - try: - sts = self.wait(timeout=self._remaining_time(endtime)) - except TimeoutExpired as exc: - exc.timeout = timeout - raise - - return (stdout, stderr) - - - def poll(self): - """Check if child process has terminated. Set and return returncode - attribute.""" - return self._internal_poll() - - - def _remaining_time(self, endtime): - """Convenience for _communicate when computing timeouts.""" - if endtime is None: - return None - else: - return endtime - _time() - - - def _check_timeout(self, endtime, orig_timeout, stdout_seq, stderr_seq, - skip_check_and_raise=False): - """Convenience for checking if a timeout has expired.""" - if endtime is None: - return - if skip_check_and_raise or _time() > endtime: - raise TimeoutExpired( - self.args, orig_timeout, - output=b''.join(stdout_seq) if stdout_seq else None, - stderr=b''.join(stderr_seq) if stderr_seq else None) - - - def wait(self, timeout=None): - """Wait for child process to terminate; returns self.returncode.""" - if timeout is not None: - endtime = _time() + timeout - try: - return self._wait(timeout=timeout) - except KeyboardInterrupt: - # https://bugs.python.org/issue25942 - # The first keyboard interrupt waits briefly for the child to - # exit under the common assumption that it also received the ^C - # generated SIGINT and will exit rapidly. - if timeout is not None: - sigint_timeout = min(self._sigint_wait_secs, - self._remaining_time(endtime)) - else: - sigint_timeout = self._sigint_wait_secs - self._sigint_wait_secs = 0 # nothing else should wait. - try: - self._wait(timeout=sigint_timeout) - except TimeoutExpired: - pass - raise # resume the KeyboardInterrupt - - def _close_pipe_fds(self, - p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite): - # self._devnull is not always defined. - devnull_fd = getattr(self, '_devnull', None) - - with contextlib.ExitStack() as stack: - if _mswindows: - if p2cread != -1: - stack.callback(p2cread.Close) - if c2pwrite != -1: - stack.callback(c2pwrite.Close) - if errwrite != -1: - stack.callback(errwrite.Close) - else: - if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd: - stack.callback(os.close, p2cread) - if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd: - stack.callback(os.close, c2pwrite) - if errwrite != -1 and errread != -1 and errwrite != devnull_fd: - stack.callback(os.close, errwrite) - - if devnull_fd is not None: - stack.callback(os.close, devnull_fd) - - # Prevent a double close of these handles/fds from __init__ on error. - self._closed_child_pipe_fds = True - - @contextlib.contextmanager - def _on_error_fd_closer(self): - """Helper to ensure file descriptors opened in _get_handles are closed""" - to_close = [] - try: - yield to_close - except: - if hasattr(self, '_devnull'): - to_close.append(self._devnull) - del self._devnull - for fd in to_close: - try: - if _mswindows and isinstance(fd, Handle): - fd.Close() - else: - os.close(fd) - except OSError: - pass - raise - - if _mswindows: - # - # Windows methods - # - def _get_handles(self, stdin, stdout, stderr): - """Construct and return tuple with IO objects: - p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite - """ - if stdin is None and stdout is None and stderr is None: - return (-1, -1, -1, -1, -1, -1) - - p2cread, p2cwrite = -1, -1 - c2pread, c2pwrite = -1, -1 - errread, errwrite = -1, -1 - - with self._on_error_fd_closer() as err_close_fds: - if stdin is None: - p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE) - if p2cread is None: - p2cread, _ = _winapi.CreatePipe(None, 0) - p2cread = Handle(p2cread) - err_close_fds.append(p2cread) - _winapi.CloseHandle(_) - elif stdin == PIPE: - p2cread, p2cwrite = _winapi.CreatePipe(None, 0) - p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite) - err_close_fds.extend((p2cread, p2cwrite)) - elif stdin == DEVNULL: - p2cread = msvcrt.get_osfhandle(self._get_devnull()) - elif isinstance(stdin, int): - p2cread = msvcrt.get_osfhandle(stdin) - else: - # Assuming file-like object - p2cread = msvcrt.get_osfhandle(stdin.fileno()) - p2cread = self._make_inheritable(p2cread) - - if stdout is None: - c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE) - if c2pwrite is None: - _, c2pwrite = _winapi.CreatePipe(None, 0) - c2pwrite = Handle(c2pwrite) - err_close_fds.append(c2pwrite) - _winapi.CloseHandle(_) - elif stdout == PIPE: - c2pread, c2pwrite = _winapi.CreatePipe(None, 0) - c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite) - err_close_fds.extend((c2pread, c2pwrite)) - elif stdout == DEVNULL: - c2pwrite = msvcrt.get_osfhandle(self._get_devnull()) - elif isinstance(stdout, int): - c2pwrite = msvcrt.get_osfhandle(stdout) - else: - # Assuming file-like object - c2pwrite = msvcrt.get_osfhandle(stdout.fileno()) - c2pwrite = self._make_inheritable(c2pwrite) - - if stderr is None: - errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE) - if errwrite is None: - _, errwrite = _winapi.CreatePipe(None, 0) - errwrite = Handle(errwrite) - err_close_fds.append(errwrite) - _winapi.CloseHandle(_) - elif stderr == PIPE: - errread, errwrite = _winapi.CreatePipe(None, 0) - errread, errwrite = Handle(errread), Handle(errwrite) - err_close_fds.extend((errread, errwrite)) - elif stderr == STDOUT: - errwrite = c2pwrite - elif stderr == DEVNULL: - errwrite = msvcrt.get_osfhandle(self._get_devnull()) - elif isinstance(stderr, int): - errwrite = msvcrt.get_osfhandle(stderr) - else: - # Assuming file-like object - errwrite = msvcrt.get_osfhandle(stderr.fileno()) - errwrite = self._make_inheritable(errwrite) - - return (p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite) - - - def _make_inheritable(self, handle): - """Return a duplicate of handle, which is inheritable""" - h = _winapi.DuplicateHandle( - _winapi.GetCurrentProcess(), handle, - _winapi.GetCurrentProcess(), 0, 1, - _winapi.DUPLICATE_SAME_ACCESS) - return Handle(h) - - - def _filter_handle_list(self, handle_list): - """Filter out console handles that can't be used - in lpAttributeList["handle_list"] and make sure the list - isn't empty. This also removes duplicate handles.""" - # An handle with it's lowest two bits set might be a special console - # handle that if passed in lpAttributeList["handle_list"], will - # cause it to fail. - return list({handle for handle in handle_list - if handle & 0x3 != 0x3 - or _winapi.GetFileType(handle) != - _winapi.FILE_TYPE_CHAR}) - - - def _execute_child(self, args, executable, preexec_fn, close_fds, - pass_fds, cwd, env, - startupinfo, creationflags, shell, - p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite, - unused_restore_signals, - unused_gid, unused_gids, unused_uid, - unused_umask, - unused_start_new_session, unused_process_group): - """Execute program (MS Windows version)""" - - assert not pass_fds, "pass_fds not supported on Windows." - - if isinstance(args, str): - pass - elif isinstance(args, bytes): - if shell: - raise TypeError('bytes args is not allowed on Windows') - args = list2cmdline([args]) - elif isinstance(args, os.PathLike): - if shell: - raise TypeError('path-like args is not allowed when ' - 'shell is true') - args = list2cmdline([args]) - else: - args = list2cmdline(args) - - if executable is not None: - executable = os.fsdecode(executable) - - # Process startup details - if startupinfo is None: - startupinfo = STARTUPINFO() - else: - # bpo-34044: Copy STARTUPINFO since it is modified above, - # so the caller can reuse it multiple times. - startupinfo = startupinfo.copy() - - use_std_handles = -1 not in (p2cread, c2pwrite, errwrite) - if use_std_handles: - startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES - startupinfo.hStdInput = p2cread - startupinfo.hStdOutput = c2pwrite - startupinfo.hStdError = errwrite - - attribute_list = startupinfo.lpAttributeList - have_handle_list = bool(attribute_list and - "handle_list" in attribute_list and - attribute_list["handle_list"]) - - # If we were given an handle_list or need to create one - if have_handle_list or (use_std_handles and close_fds): - if attribute_list is None: - attribute_list = startupinfo.lpAttributeList = {} - handle_list = attribute_list["handle_list"] = \ - list(attribute_list.get("handle_list", [])) - - if use_std_handles: - handle_list += [int(p2cread), int(c2pwrite), int(errwrite)] - - handle_list[:] = self._filter_handle_list(handle_list) - - if handle_list: - if not close_fds: - warnings.warn("startupinfo.lpAttributeList['handle_list'] " - "overriding close_fds", RuntimeWarning) - - # When using the handle_list we always request to inherit - # handles but the only handles that will be inherited are - # the ones in the handle_list - close_fds = False - - if shell: - startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW - startupinfo.wShowWindow = _winapi.SW_HIDE - if not executable: - # gh-101283: without a fully-qualified path, before Windows - # checks the system directories, it first looks in the - # application directory, and also the current directory if - # NeedCurrentDirectoryForExePathW(ExeName) is true, so try - # to avoid executing unqualified "cmd.exe". - comspec = os.environ.get('ComSpec') - if not comspec: - system_root = os.environ.get('SystemRoot', '') - comspec = os.path.join(system_root, 'System32', 'cmd.exe') - if not os.path.isabs(comspec): - raise FileNotFoundError('shell not found: neither %ComSpec% nor %SystemRoot% is set') - if os.path.isabs(comspec): - executable = comspec - else: - comspec = executable - - args = '{} /c "{}"'.format (comspec, args) - - if cwd is not None: - cwd = os.fsdecode(cwd) - - sys.audit("subprocess.Popen", executable, args, cwd, env) - - # Start the process - try: - hp, ht, pid, tid = _winapi.CreateProcess(executable, args, - # no special security - None, None, - int(not close_fds), - creationflags, - env, - cwd, - startupinfo) - finally: - # Child is launched. Close the parent's copy of those pipe - # handles that only the child should have open. You need - # to make sure that no handles to the write end of the - # output pipe are maintained in this process or else the - # pipe will not close when the child process exits and the - # ReadFile will hang. - self._close_pipe_fds(p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite) - - # Retain the process handle, but close the thread handle - self._child_created = True - self._handle = Handle(hp) - self.pid = pid - _winapi.CloseHandle(ht) - - def _internal_poll(self, _deadstate=None, - _WaitForSingleObject=_winapi.WaitForSingleObject, - _WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0, - _GetExitCodeProcess=_winapi.GetExitCodeProcess): - """Check if child process has terminated. Returns returncode - attribute. - - This method is called by __del__, so it can only refer to objects - in its local scope. - - """ - if self.returncode is None: - if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0: - self.returncode = _GetExitCodeProcess(self._handle) - return self.returncode - - - def _wait(self, timeout): - """Internal implementation of wait() on Windows.""" - if timeout is None: - timeout_millis = _winapi.INFINITE - elif timeout <= 0: - timeout_millis = 0 - else: - timeout_millis = int(timeout * 1000) - if self.returncode is None: - # API note: Returns immediately if timeout_millis == 0. - result = _winapi.WaitForSingleObject(self._handle, - timeout_millis) - if result == _winapi.WAIT_TIMEOUT: - raise TimeoutExpired(self.args, timeout) - self.returncode = _winapi.GetExitCodeProcess(self._handle) - return self.returncode - - - def _readerthread(self, fh, buffer): - buffer.append(fh.read()) - fh.close() - - - def _writerthread(self, input): - self._stdin_write(input) - - - def _communicate(self, input, endtime, orig_timeout): - # Start reader threads feeding into a list hanging off of this - # object, unless they've already been started. - if self.stdout and not hasattr(self, "_stdout_buff"): - self._stdout_buff = [] - self.stdout_thread = \ - threading.Thread(target=self._readerthread, - args=(self.stdout, self._stdout_buff)) - self.stdout_thread.daemon = True - self.stdout_thread.start() - if self.stderr and not hasattr(self, "_stderr_buff"): - self._stderr_buff = [] - self.stderr_thread = \ - threading.Thread(target=self._readerthread, - args=(self.stderr, self._stderr_buff)) - self.stderr_thread.daemon = True - self.stderr_thread.start() - - # Start writer thread to send input to stdin, unless already - # started. The thread writes input and closes stdin when done, - # or continues in the background on timeout. - if self.stdin and not hasattr(self, "_stdin_thread"): - self._stdin_thread = \ - threading.Thread(target=self._writerthread, - args=(input,)) - self._stdin_thread.daemon = True - self._stdin_thread.start() - - # Wait for the writer thread, or time out. If we time out, the - # thread remains writing and the fd left open in case the user - # calls communicate again. - if hasattr(self, "_stdin_thread"): - self._stdin_thread.join(self._remaining_time(endtime)) - if self._stdin_thread.is_alive(): - raise TimeoutExpired(self.args, orig_timeout) - - # Wait for the reader threads, or time out. If we time out, the - # threads remain reading and the fds left open in case the user - # calls communicate again. - if self.stdout is not None: - self.stdout_thread.join(self._remaining_time(endtime)) - if self.stdout_thread.is_alive(): - raise TimeoutExpired(self.args, orig_timeout) - if self.stderr is not None: - self.stderr_thread.join(self._remaining_time(endtime)) - if self.stderr_thread.is_alive(): - raise TimeoutExpired(self.args, orig_timeout) - - # Collect the output from and close both pipes, now that we know - # both have been read successfully. - stdout = None - stderr = None - if self.stdout: - stdout = self._stdout_buff - self.stdout.close() - if self.stderr: - stderr = self._stderr_buff - self.stderr.close() - - # All data exchanged. Translate lists into strings. - stdout = stdout[0] if stdout else None - stderr = stderr[0] if stderr else None - - return (stdout, stderr) - - def send_signal(self, sig): - """Send a signal to the process.""" - # Don't signal a process that we know has already died. - if self.returncode is not None: - return - if sig == signal.SIGTERM: - self.terminate() - elif sig == signal.CTRL_C_EVENT: - os.kill(self.pid, signal.CTRL_C_EVENT) - elif sig == signal.CTRL_BREAK_EVENT: - os.kill(self.pid, signal.CTRL_BREAK_EVENT) - else: - raise ValueError("Unsupported signal: {}".format(sig)) - - def terminate(self): - """Terminates the process.""" - # Don't terminate a process that we know has already died. - if self.returncode is not None: - return - try: - _winapi.TerminateProcess(self._handle, 1) - except PermissionError: - # ERROR_ACCESS_DENIED (winerror 5) is received when the - # process already died. - rc = _winapi.GetExitCodeProcess(self._handle) - if rc == _winapi.STILL_ACTIVE: - raise - self.returncode = rc - - kill = terminate - - else: - # - # POSIX methods - # - def _get_handles(self, stdin, stdout, stderr): - """Construct and return tuple with IO objects: - p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite - """ - p2cread, p2cwrite = -1, -1 - c2pread, c2pwrite = -1, -1 - errread, errwrite = -1, -1 - - with self._on_error_fd_closer() as err_close_fds: - if stdin is None: - pass - elif stdin == PIPE: - p2cread, p2cwrite = os.pipe() - err_close_fds.extend((p2cread, p2cwrite)) - if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"): - fcntl.fcntl(p2cwrite, fcntl.F_SETPIPE_SZ, self.pipesize) - elif stdin == DEVNULL: - p2cread = self._get_devnull() - elif isinstance(stdin, int): - p2cread = stdin - else: - # Assuming file-like object - p2cread = stdin.fileno() - - if stdout is None: - pass - elif stdout == PIPE: - c2pread, c2pwrite = os.pipe() - err_close_fds.extend((c2pread, c2pwrite)) - if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"): - fcntl.fcntl(c2pwrite, fcntl.F_SETPIPE_SZ, self.pipesize) - elif stdout == DEVNULL: - c2pwrite = self._get_devnull() - elif isinstance(stdout, int): - c2pwrite = stdout - else: - # Assuming file-like object - c2pwrite = stdout.fileno() - - if stderr is None: - pass - elif stderr == PIPE: - errread, errwrite = os.pipe() - err_close_fds.extend((errread, errwrite)) - if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"): - fcntl.fcntl(errwrite, fcntl.F_SETPIPE_SZ, self.pipesize) - elif stderr == STDOUT: - if c2pwrite != -1: - errwrite = c2pwrite - else: # child's stdout is not set, use parent's stdout - errwrite = sys.__stdout__.fileno() - elif stderr == DEVNULL: - errwrite = self._get_devnull() - elif isinstance(stderr, int): - errwrite = stderr - else: - # Assuming file-like object - errwrite = stderr.fileno() - - return (p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite) - - - def _posix_spawn(self, args, executable, env, restore_signals, close_fds, - p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite): - """Execute program using os.posix_spawn().""" - kwargs = {} - if restore_signals: - # See _Py_RestoreSignals() in Python/pylifecycle.c - sigset = [] - for signame in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'): - signum = getattr(signal, signame, None) - if signum is not None: - sigset.append(signum) - kwargs['setsigdef'] = sigset - - file_actions = [] - for fd in (p2cwrite, c2pread, errread): - if fd != -1: - file_actions.append((os.POSIX_SPAWN_CLOSE, fd)) - for fd, fd2 in ( - (p2cread, 0), - (c2pwrite, 1), - (errwrite, 2), - ): - if fd != -1: - file_actions.append((os.POSIX_SPAWN_DUP2, fd, fd2)) - - if close_fds: - file_actions.append((os.POSIX_SPAWN_CLOSEFROM, 3)) - - if file_actions: - kwargs['file_actions'] = file_actions - - self.pid = os.posix_spawn(executable, args, env, **kwargs) - self._child_created = True - - self._close_pipe_fds(p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite) - - def _execute_child(self, args, executable, preexec_fn, close_fds, - pass_fds, cwd, env, - startupinfo, creationflags, shell, - p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite, - restore_signals, - gid, gids, uid, umask, - start_new_session, process_group): - """Execute program (POSIX version)""" - - if isinstance(args, (str, bytes)): - args = [args] - elif isinstance(args, os.PathLike): - if shell: - raise TypeError('path-like args is not allowed when ' - 'shell is true') - args = [args] - else: - args = list(args) - - if shell: - # On Android the default shell is at '/system/bin/sh'. - unix_shell = ('/system/bin/sh' if - hasattr(sys, 'getandroidapilevel') else '/bin/sh') - args = [unix_shell, "-c"] + args - if executable: - args[0] = executable - - if executable is None: - executable = args[0] - - sys.audit("subprocess.Popen", executable, args, cwd, env) - - if (_USE_POSIX_SPAWN - and os.path.dirname(executable) - and preexec_fn is None - and (not close_fds or _HAVE_POSIX_SPAWN_CLOSEFROM) - and not pass_fds - and cwd is None - and (p2cread == -1 or p2cread > 2) - and (c2pwrite == -1 or c2pwrite > 2) - and (errwrite == -1 or errwrite > 2) - and not start_new_session - and process_group == -1 - and gid is None - and gids is None - and uid is None - and umask < 0): - self._posix_spawn(args, executable, env, restore_signals, close_fds, - p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite) - return - - orig_executable = executable - - # For transferring possible exec failure from child to parent. - # Data format: "exception name:hex errno:description" - # Pickle is not used; it is complex and involves memory allocation. - errpipe_read, errpipe_write = os.pipe() - # errpipe_write must not be in the standard io 0, 1, or 2 fd range. - low_fds_to_close = [] - while errpipe_write < 3: - low_fds_to_close.append(errpipe_write) - errpipe_write = os.dup(errpipe_write) - for low_fd in low_fds_to_close: - os.close(low_fd) - try: - try: - # We must avoid complex work that could involve - # malloc or free in the child process to avoid - # potential deadlocks, thus we do all this here. - # and pass it to fork_exec() - - if env is not None: - env_list = [] - for k, v in env.items(): - k = os.fsencode(k) - if b'=' in k: - raise ValueError("illegal environment variable name") - env_list.append(k + b'=' + os.fsencode(v)) - else: - env_list = None # Use execv instead of execve. - executable = os.fsencode(executable) - if os.path.dirname(executable): - executable_list = (executable,) - else: - # This matches the behavior of os._execvpe(). - executable_list = tuple( - os.path.join(os.fsencode(dir), executable) - for dir in os.get_exec_path(env)) - fds_to_keep = set(pass_fds) - fds_to_keep.add(errpipe_write) - self.pid = _fork_exec( - args, executable_list, - close_fds, tuple(sorted(map(int, fds_to_keep))), - cwd, env_list, - p2cread, p2cwrite, c2pread, c2pwrite, - errread, errwrite, - errpipe_read, errpipe_write, - restore_signals, start_new_session, - process_group, gid, gids, uid, umask, - preexec_fn, _USE_VFORK) - self._child_created = True - finally: - # be sure the FD is closed no matter what - os.close(errpipe_write) - - self._close_pipe_fds(p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite) - - # Wait for exec to fail or succeed; possibly raising an - # exception (limited in size) - errpipe_data = bytearray() - while True: - part = os.read(errpipe_read, 50000) - errpipe_data += part - if not part or len(errpipe_data) > 50000: - break - finally: - # be sure the FD is closed no matter what - os.close(errpipe_read) - - if errpipe_data: - try: - pid, sts = os.waitpid(self.pid, 0) - if pid == self.pid: - self._handle_exitstatus(sts) - else: - self.returncode = sys.maxsize - except ChildProcessError: - pass - - try: - exception_name, hex_errno, err_msg = ( - errpipe_data.split(b':', 2)) - # The encoding here should match the encoding - # written in by the subprocess implementations - # like _posixsubprocess - err_msg = err_msg.decode() - except ValueError: - exception_name = b'SubprocessError' - hex_errno = b'0' - err_msg = 'Bad exception data from child: {!r}'.format( - bytes(errpipe_data)) - child_exception_type = getattr( - builtins, exception_name.decode('ascii'), - SubprocessError) - if issubclass(child_exception_type, OSError) and hex_errno: - errno_num = int(hex_errno, 16) - if err_msg == "noexec:chdir": - err_msg = "" - # The error must be from chdir(cwd). - err_filename = cwd - elif err_msg == "noexec": - err_msg = "" - err_filename = None - else: - err_filename = orig_executable - if errno_num != 0: - err_msg = os.strerror(errno_num) - if err_filename is not None: - raise child_exception_type(errno_num, err_msg, err_filename) - else: - raise child_exception_type(errno_num, err_msg) - raise child_exception_type(err_msg) - - - def _handle_exitstatus(self, sts, _del_safe=_del_safe): - """All callers to this function MUST hold self._waitpid_lock.""" - # This method is called (indirectly) by __del__, so it cannot - # refer to anything outside of its local scope. - if _del_safe.WIFSTOPPED(sts): - self.returncode = -_del_safe.WSTOPSIG(sts) - else: - self.returncode = _del_safe.waitstatus_to_exitcode(sts) - - def _internal_poll(self, _deadstate=None, _del_safe=_del_safe): - """Check if child process has terminated. Returns returncode - attribute. - - This method is called by __del__, so it cannot reference anything - outside of the local scope (nor can any methods it calls). - - """ - if self.returncode is None: - if not self._waitpid_lock.acquire(False): - # Something else is busy calling waitpid. Don't allow two - # at once. We know nothing yet. - return None - try: - if self.returncode is not None: - return self.returncode # Another thread waited. - pid, sts = _del_safe.waitpid(self.pid, _del_safe.WNOHANG) - if pid == self.pid: - self._handle_exitstatus(sts) - except OSError as e: - if _deadstate is not None: - self.returncode = _deadstate - elif e.errno == _del_safe.ECHILD: - # This happens if SIGCLD is set to be ignored or - # waiting for child processes has otherwise been - # disabled for our process. This child is dead, we - # can't get the status. - # http://bugs.python.org/issue15756 - self.returncode = 0 - finally: - self._waitpid_lock.release() - return self.returncode - - - def _try_wait(self, wait_flags): - """All callers to this function MUST hold self._waitpid_lock.""" - try: - (pid, sts) = os.waitpid(self.pid, wait_flags) - except ChildProcessError: - # This happens if SIGCLD is set to be ignored or waiting - # for child processes has otherwise been disabled for our - # process. This child is dead, we can't get the status. - pid = self.pid - sts = 0 - return (pid, sts) - - - def _wait(self, timeout): - """Internal implementation of wait() on POSIX.""" - if self.returncode is not None: - return self.returncode - - if timeout is not None: - endtime = _time() + timeout - # Enter a busy loop if we have a timeout. This busy loop was - # cribbed from Lib/threading.py in Thread.wait() at r71065. - delay = 0.0005 # 500 us -> initial delay of 1 ms - while True: - if self._waitpid_lock.acquire(False): - try: - if self.returncode is not None: - break # Another thread waited. - (pid, sts) = self._try_wait(os.WNOHANG) - assert pid == self.pid or pid == 0 - if pid == self.pid: - self._handle_exitstatus(sts) - break - finally: - self._waitpid_lock.release() - remaining = self._remaining_time(endtime) - if remaining <= 0: - raise TimeoutExpired(self.args, timeout) - delay = min(delay * 2, remaining, .05) - time.sleep(delay) - else: - while self.returncode is None: - with self._waitpid_lock: - if self.returncode is not None: - break # Another thread waited. - (pid, sts) = self._try_wait(0) - # Check the pid and loop as waitpid has been known to - # return 0 even without WNOHANG in odd situations. - # http://bugs.python.org/issue14396. - if pid == self.pid: - self._handle_exitstatus(sts) - return self.returncode - - - def _communicate(self, input, endtime, orig_timeout): - if self.stdin and not self._communication_started: - # Flush stdio buffer. This might block, if the user has - # been writing to .stdin in an uncontrolled fashion. - try: - self.stdin.flush() - except BrokenPipeError: - pass # communicate() must ignore BrokenPipeError. - except ValueError: - # ignore ValueError: I/O operation on closed file. - if not self.stdin.closed: - raise - if not input: - try: - self.stdin.close() - except BrokenPipeError: - pass # communicate() must ignore BrokenPipeError. - - stdout = None - stderr = None - - # Only create this mapping if we haven't already. - if not self._communication_started: - self._fileobj2output = {} - if self.stdout: - self._fileobj2output[self.stdout] = [] - if self.stderr: - self._fileobj2output[self.stderr] = [] - - if self.stdout: - stdout = self._fileobj2output[self.stdout] - if self.stderr: - stderr = self._fileobj2output[self.stderr] - - self._save_input(input) - - if self._input: - if not isinstance(self._input, memoryview): - input_view = memoryview(self._input) - else: - input_view = self._input.cast("b") # byte input required - - with _PopenSelector() as selector: - if self.stdin and not self.stdin.closed and self._input: - selector.register(self.stdin, selectors.EVENT_WRITE) - if self.stdout and not self.stdout.closed: - selector.register(self.stdout, selectors.EVENT_READ) - if self.stderr and not self.stderr.closed: - selector.register(self.stderr, selectors.EVENT_READ) - - while selector.get_map(): - timeout = self._remaining_time(endtime) - if timeout is not None and timeout <= 0: - self._check_timeout(endtime, orig_timeout, - stdout, stderr, - skip_check_and_raise=True) - raise RuntimeError( # Impossible :) - '_check_timeout(..., skip_check_and_raise=True) ' - 'failed to raise TimeoutExpired.') - - ready = selector.select(timeout) - self._check_timeout(endtime, orig_timeout, stdout, stderr) - - # XXX Rewrite these to use non-blocking I/O on the file - # objects; they are no longer using C stdio! - - for key, events in ready: - if key.fileobj is self.stdin: - chunk = input_view[self._input_offset : - self._input_offset + _PIPE_BUF] - try: - self._input_offset += os.write(key.fd, chunk) - except BrokenPipeError: - selector.unregister(key.fileobj) - key.fileobj.close() - else: - if self._input_offset >= len(input_view): - selector.unregister(key.fileobj) - key.fileobj.close() - elif key.fileobj in (self.stdout, self.stderr): - data = os.read(key.fd, 32768) - if not data: - selector.unregister(key.fileobj) - key.fileobj.close() - self._fileobj2output[key.fileobj].append(data) - try: - self.wait(timeout=self._remaining_time(endtime)) - except TimeoutExpired as exc: - exc.timeout = orig_timeout - raise - - # All data exchanged. Translate lists into strings. - if stdout is not None: - stdout = b''.join(stdout) - if stderr is not None: - stderr = b''.join(stderr) - - # Translate newlines, if requested. - # This also turns bytes into strings. - if self.text_mode: - if stdout is not None: - stdout = self._translate_newlines(stdout, - self.stdout.encoding, - self.stdout.errors) - if stderr is not None: - stderr = self._translate_newlines(stderr, - self.stderr.encoding, - self.stderr.errors) - - return (stdout, stderr) - - - def _save_input(self, input): - # This method is called from the _communicate_with_*() methods - # so that if we time out while communicating, we can continue - # sending input if we retry. - if self.stdin and self._input is None: - self._input_offset = 0 - self._input = input - if input is not None and self.text_mode: - self._input = self._input.encode(self.stdin.encoding, - self.stdin.errors) - - - def send_signal(self, sig): - """Send a signal to the process.""" - # bpo-38630: Polling reduces the risk of sending a signal to the - # wrong process if the process completed, the Popen.returncode - # attribute is still None, and the pid has been reassigned - # (recycled) to a new different process. This race condition can - # happens in two cases. - # - # Case 1. Thread A calls Popen.poll(), thread B calls - # Popen.send_signal(). In thread A, waitpid() succeed and returns - # the exit status. Thread B calls kill() because poll() in thread A - # did not set returncode yet. Calling poll() in thread B prevents - # the race condition thanks to Popen._waitpid_lock. - # - # Case 2. waitpid(pid, 0) has been called directly, without - # using Popen methods: returncode is still None is this case. - # Calling Popen.poll() will set returncode to a default value, - # since waitpid() fails with ProcessLookupError. - self.poll() - if self.returncode is not None: - # Skip signalling a process that we know has already died. - return - - # The race condition can still happen if the race condition - # described above happens between the returncode test - # and the kill() call. - try: - os.kill(self.pid, sig) - except ProcessLookupError: - # Suppress the race condition error; bpo-40550. - pass - - def terminate(self): - """Terminate the process with SIGTERM - """ - self.send_signal(signal.SIGTERM) - - def kill(self): - """Kill the process with SIGKILL - """ - self.send_signal(signal.SIGKILL) diff --git a/Python313_13_x64_Template/Lib/symtable.py b/Python313_13_x64_Template/Lib/symtable.py deleted file mode 100644 index 672ec0ce..00000000 --- a/Python313_13_x64_Template/Lib/symtable.py +++ /dev/null @@ -1,414 +0,0 @@ -"""Interface to the compiler's internal symbol tables""" - -import _symtable -from _symtable import (USE, DEF_GLOBAL, DEF_NONLOCAL, DEF_LOCAL, DEF_PARAM, - DEF_IMPORT, DEF_BOUND, DEF_ANNOT, SCOPE_OFF, SCOPE_MASK, FREE, - LOCAL, GLOBAL_IMPLICIT, GLOBAL_EXPLICIT, CELL) - -import weakref -from enum import StrEnum - -__all__ = ["symtable", "SymbolTableType", "SymbolTable", "Class", "Function", "Symbol"] - -def symtable(code, filename, compile_type): - """ Return the toplevel *SymbolTable* for the source code. - - *filename* is the name of the file with the code - and *compile_type* is the *compile()* mode argument. - """ - top = _symtable.symtable(code, filename, compile_type) - return _newSymbolTable(top, filename) - -class SymbolTableFactory: - def __init__(self): - self.__memo = weakref.WeakValueDictionary() - - def new(self, table, filename): - if table.type == _symtable.TYPE_FUNCTION: - return Function(table, filename) - if table.type == _symtable.TYPE_CLASS: - return Class(table, filename) - return SymbolTable(table, filename) - - def __call__(self, table, filename): - key = table, filename - obj = self.__memo.get(key, None) - if obj is None: - obj = self.__memo[key] = self.new(table, filename) - return obj - -_newSymbolTable = SymbolTableFactory() - - -class SymbolTableType(StrEnum): - MODULE = "module" - FUNCTION = "function" - CLASS = "class" - ANNOTATION = "annotation" - TYPE_ALIAS = "type alias" - TYPE_PARAMETERS = "type parameters" - TYPE_VARIABLE = "type variable" - - -class SymbolTable: - - def __init__(self, raw_table, filename): - self._table = raw_table - self._filename = filename - self._symbols = {} - - def __repr__(self): - if self.__class__ == SymbolTable: - kind = "" - else: - kind = "%s " % self.__class__.__name__ - - if self._table.name == "top": - return "<{0}SymbolTable for module {1}>".format(kind, self._filename) - else: - return "<{0}SymbolTable for {1} in {2}>".format(kind, - self._table.name, - self._filename) - - def get_type(self): - """Return the type of the symbol table. - - The value returned is one of the values in - the ``SymbolTableType`` enumeration. - """ - if self._table.type == _symtable.TYPE_MODULE: - return SymbolTableType.MODULE - if self._table.type == _symtable.TYPE_FUNCTION: - return SymbolTableType.FUNCTION - if self._table.type == _symtable.TYPE_CLASS: - return SymbolTableType.CLASS - if self._table.type == _symtable.TYPE_ANNOTATION: - return SymbolTableType.ANNOTATION - if self._table.type == _symtable.TYPE_TYPE_ALIAS: - return SymbolTableType.TYPE_ALIAS - if self._table.type == _symtable.TYPE_TYPE_PARAMETERS: - return SymbolTableType.TYPE_PARAMETERS - if self._table.type == _symtable.TYPE_TYPE_VARIABLE: - return SymbolTableType.TYPE_VARIABLE - assert False, f"unexpected type: {self._table.type}" - - def get_id(self): - """Return an identifier for the table. - """ - return self._table.id - - def get_name(self): - """Return the table's name. - - This corresponds to the name of the class, function - or 'top' if the table is for a class, function or - global respectively. - """ - return self._table.name - - def get_lineno(self): - """Return the number of the first line in the - block for the table. - """ - return self._table.lineno - - def is_optimized(self): - """Return *True* if the locals in the table - are optimizable. - """ - return bool(self._table.type == _symtable.TYPE_FUNCTION) - - def is_nested(self): - """Return *True* if the block is a nested class - or function.""" - return bool(self._table.nested) - - def has_children(self): - """Return *True* if the block has nested namespaces. - """ - return bool(self._table.children) - - def get_identifiers(self): - """Return a view object containing the names of symbols in the table. - """ - return self._table.symbols.keys() - - def lookup(self, name): - """Lookup a *name* in the table. - - Returns a *Symbol* instance. - """ - sym = self._symbols.get(name) - if sym is None: - flags = self._table.symbols[name] - namespaces = self.__check_children(name) - module_scope = (self._table.name == "top") - sym = self._symbols[name] = Symbol(name, flags, namespaces, - module_scope=module_scope) - return sym - - def get_symbols(self): - """Return a list of *Symbol* instances for - names in the table. - """ - return [self.lookup(ident) for ident in self.get_identifiers()] - - def __check_children(self, name): - return [_newSymbolTable(st, self._filename) - for st in self._table.children - if st.name == name] - - def get_children(self): - """Return a list of the nested symbol tables. - """ - return [_newSymbolTable(st, self._filename) - for st in self._table.children] - - -class Function(SymbolTable): - - # Default values for instance variables - __params = None - __locals = None - __frees = None - __globals = None - __nonlocals = None - - def __idents_matching(self, test_func): - return tuple(ident for ident in self.get_identifiers() - if test_func(self._table.symbols[ident])) - - def get_parameters(self): - """Return a tuple of parameters to the function. - """ - if self.__params is None: - self.__params = self.__idents_matching(lambda x:x & DEF_PARAM) - return self.__params - - def get_locals(self): - """Return a tuple of locals in the function. - """ - if self.__locals is None: - locs = (LOCAL, CELL) - test = lambda x: ((x >> SCOPE_OFF) & SCOPE_MASK) in locs - self.__locals = self.__idents_matching(test) - return self.__locals - - def get_globals(self): - """Return a tuple of globals in the function. - """ - if self.__globals is None: - glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT) - test = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) in glob - self.__globals = self.__idents_matching(test) - return self.__globals - - def get_nonlocals(self): - """Return a tuple of nonlocals in the function. - """ - if self.__nonlocals is None: - self.__nonlocals = self.__idents_matching(lambda x:x & DEF_NONLOCAL) - return self.__nonlocals - - def get_frees(self): - """Return a tuple of free variables in the function. - """ - if self.__frees is None: - is_free = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) == FREE - self.__frees = self.__idents_matching(is_free) - return self.__frees - - -class Class(SymbolTable): - - __methods = None - - def get_methods(self): - """Return a tuple of methods declared in the class. - """ - if self.__methods is None: - d = {} - - def is_local_symbol(ident): - flags = self._table.symbols.get(ident, 0) - return ((flags >> SCOPE_OFF) & SCOPE_MASK) == LOCAL - - for st in self._table.children: - # pick the function-like symbols that are local identifiers - if is_local_symbol(st.name): - match st.type: - case _symtable.TYPE_FUNCTION: - # generators are of type TYPE_FUNCTION with a ".0" - # parameter as a first parameter (which makes them - # distinguishable from a function named 'genexpr') - if st.name == 'genexpr' and '.0' in st.varnames: - continue - d[st.name] = 1 - case _symtable.TYPE_TYPE_PARAMETERS: - # Get the function-def block in the annotation - # scope 'st' with the same identifier, if any. - scope_name = st.name - for c in st.children: - if c.name == scope_name and c.type == _symtable.TYPE_FUNCTION: - # A generic generator of type TYPE_FUNCTION - # cannot be a direct child of 'st' (but it - # can be a descendant), e.g.: - # - # class A: - # type genexpr[genexpr] = (x for x in []) - assert scope_name != 'genexpr' or '.0' not in c.varnames - d[scope_name] = 1 - break - self.__methods = tuple(d) - return self.__methods - - -class Symbol: - - def __init__(self, name, flags, namespaces=None, *, module_scope=False): - self.__name = name - self.__flags = flags - self.__scope = (flags >> SCOPE_OFF) & SCOPE_MASK # like PyST_GetScope() - self.__namespaces = namespaces or () - self.__module_scope = module_scope - - def __repr__(self): - flags_str = '|'.join(self._flags_str()) - return f'' - - def _scope_str(self): - return _scopes_value_to_name.get(self.__scope) or str(self.__scope) - - def _flags_str(self): - for flagname, flagvalue in _flags: - if self.__flags & flagvalue == flagvalue: - yield flagname - - def get_name(self): - """Return a name of a symbol. - """ - return self.__name - - def is_referenced(self): - """Return *True* if the symbol is used in - its block. - """ - return bool(self.__flags & _symtable.USE) - - def is_parameter(self): - """Return *True* if the symbol is a parameter. - """ - return bool(self.__flags & DEF_PARAM) - - def is_global(self): - """Return *True* if the symbol is global. - """ - return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT) - or (self.__module_scope and self.__flags & DEF_BOUND)) - - def is_nonlocal(self): - """Return *True* if the symbol is nonlocal.""" - return bool(self.__flags & DEF_NONLOCAL) - - def is_declared_global(self): - """Return *True* if the symbol is declared global - with a global statement.""" - return bool(self.__scope == GLOBAL_EXPLICIT) - - def is_local(self): - """Return *True* if the symbol is local. - """ - return bool(self.__scope in (LOCAL, CELL) - or (self.__module_scope and self.__flags & DEF_BOUND)) - - def is_annotated(self): - """Return *True* if the symbol is annotated. - """ - return bool(self.__flags & DEF_ANNOT) - - def is_free(self): - """Return *True* if a referenced symbol is - not assigned to. - """ - return bool(self.__scope == FREE) - - def is_imported(self): - """Return *True* if the symbol is created from - an import statement. - """ - return bool(self.__flags & DEF_IMPORT) - - def is_assigned(self): - """Return *True* if a symbol is assigned to.""" - return bool(self.__flags & DEF_LOCAL) - - def is_namespace(self): - """Returns *True* if name binding introduces new namespace. - - If the name is used as the target of a function or class - statement, this will be true. - - Note that a single name can be bound to multiple objects. If - is_namespace() is true, the name may also be bound to other - objects, like an int or list, that does not introduce a new - namespace. - """ - return bool(self.__namespaces) - - def get_namespaces(self): - """Return a list of namespaces bound to this name""" - return self.__namespaces - - def get_namespace(self): - """Return the single namespace bound to this name. - - Raises ValueError if the name is bound to multiple namespaces - or no namespace. - """ - if len(self.__namespaces) == 0: - raise ValueError("name is not bound to any namespaces") - elif len(self.__namespaces) > 1: - raise ValueError("name is bound to multiple namespaces") - else: - return self.__namespaces[0] - - -_flags = [('USE', USE)] -_flags.extend(kv for kv in globals().items() if kv[0].startswith('DEF_')) -_scopes_names = ('FREE', 'LOCAL', 'GLOBAL_IMPLICIT', 'GLOBAL_EXPLICIT', 'CELL') -_scopes_value_to_name = {globals()[n]: n for n in _scopes_names} - - -def main(args): - import sys - def print_symbols(table, level=0): - indent = ' ' * level - nested = "nested " if table.is_nested() else "" - if table.get_type() == 'module': - what = f'from file {table._filename!r}' - else: - what = f'{table.get_name()!r}' - print(f'{indent}symbol table for {nested}{table.get_type()} {what}:') - for ident in table.get_identifiers(): - symbol = table.lookup(ident) - flags = ', '.join(symbol._flags_str()).lower() - print(f' {indent}{symbol._scope_str().lower()} symbol {symbol.get_name()!r}: {flags}') - print() - - for table2 in table.get_children(): - print_symbols(table2, level + 1) - - for filename in args or ['-']: - if filename == '-': - src = sys.stdin.read() - filename = '' - else: - with open(filename, 'rb') as f: - src = f.read() - mod = symtable(src, filename, 'exec') - print_symbols(mod) - - -if __name__ == "__main__": - import sys - main(sys.argv[1:]) diff --git a/Python313_13_x64_Template/Lib/sysconfig/__init__.py b/Python313_13_x64_Template/Lib/sysconfig/__init__.py deleted file mode 100644 index 43edebce..00000000 --- a/Python313_13_x64_Template/Lib/sysconfig/__init__.py +++ /dev/null @@ -1,734 +0,0 @@ -"""Access to Python's configuration information.""" - -import os -import sys -import threading -from os.path import realpath - -__all__ = [ - 'get_config_h_filename', - 'get_config_var', - 'get_config_vars', - 'get_makefile_filename', - 'get_path', - 'get_path_names', - 'get_paths', - 'get_platform', - 'get_python_version', - 'get_scheme_names', - 'parse_config_h', -] - -# Keys for get_config_var() that are never converted to Python integers. -_ALWAYS_STR = { - 'IPHONEOS_DEPLOYMENT_TARGET', - 'MACOSX_DEPLOYMENT_TARGET', -} - -_INSTALL_SCHEMES = { - 'posix_prefix': { - 'stdlib': '{installed_base}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', - 'platstdlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', - 'purelib': '{base}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', - 'platlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}/site-packages', - 'include': - '{installed_base}/include/{implementation_lower}{py_version_short}{abiflags}', - 'platinclude': - '{installed_platbase}/include/{implementation_lower}{py_version_short}{abiflags}', - 'scripts': '{base}/bin', - 'data': '{base}', - }, - 'posix_home': { - 'stdlib': '{installed_base}/lib/{implementation_lower}', - 'platstdlib': '{base}/lib/{implementation_lower}', - 'purelib': '{base}/lib/{implementation_lower}', - 'platlib': '{base}/lib/{implementation_lower}', - 'include': '{installed_base}/include/{implementation_lower}', - 'platinclude': '{installed_base}/include/{implementation_lower}', - 'scripts': '{base}/bin', - 'data': '{base}', - }, - 'nt': { - 'stdlib': '{installed_base}/Lib', - 'platstdlib': '{base}/Lib', - 'purelib': '{base}/Lib/site-packages', - 'platlib': '{base}/Lib/site-packages', - 'include': '{installed_base}/Include', - 'platinclude': '{installed_base}/Include', - 'scripts': '{base}/Scripts', - 'data': '{base}', - }, - - # Downstream distributors can overwrite the default install scheme. - # This is done to support downstream modifications where distributors change - # the installation layout (eg. different site-packages directory). - # So, distributors will change the default scheme to one that correctly - # represents their layout. - # This presents an issue for projects/people that need to bootstrap virtual - # environments, like virtualenv. As distributors might now be customizing - # the default install scheme, there is no guarantee that the information - # returned by sysconfig.get_default_scheme/get_paths is correct for - # a virtual environment, the only guarantee we have is that it is correct - # for the *current* environment. When bootstrapping a virtual environment, - # we need to know its layout, so that we can place the files in the - # correct locations. - # The "*_venv" install scheme is a scheme to bootstrap virtual environments, - # essentially identical to the default posix_prefix/nt schemes. - # Downstream distributors who patch posix_prefix/nt scheme are encouraged to - # leave the following schemes unchanged - 'posix_venv': { - 'stdlib': '{installed_base}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', - 'platstdlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', - 'purelib': '{base}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', - 'platlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}/site-packages', - 'include': - '{installed_base}/include/{implementation_lower}{py_version_short}{abiflags}', - 'platinclude': - '{installed_platbase}/include/{implementation_lower}{py_version_short}{abiflags}', - 'scripts': '{base}/bin', - 'data': '{base}', - }, - 'nt_venv': { - 'stdlib': '{installed_base}/Lib', - 'platstdlib': '{base}/Lib', - 'purelib': '{base}/Lib/site-packages', - 'platlib': '{base}/Lib/site-packages', - 'include': '{installed_base}/Include', - 'platinclude': '{installed_base}/Include', - 'scripts': '{base}/Scripts', - 'data': '{base}', - }, - } - -# For the OS-native venv scheme, we essentially provide an alias: -if os.name == 'nt': - _INSTALL_SCHEMES['venv'] = _INSTALL_SCHEMES['nt_venv'] -else: - _INSTALL_SCHEMES['venv'] = _INSTALL_SCHEMES['posix_venv'] - -def _get_implementation(): - return 'Python' - -# NOTE: site.py has copy of this function. -# Sync it when modify this function. -def _getuserbase(): - env_base = os.environ.get("PYTHONUSERBASE", None) - if env_base: - return env_base - - # Emscripten, iOS, tvOS, VxWorks, WASI, and watchOS have no home directories - if sys.platform in {"emscripten", "ios", "tvos", "vxworks", "wasi", "watchos"}: - return None - - def joinuser(*args): - return os.path.expanduser(os.path.join(*args)) - - if os.name == "nt": - base = os.environ.get("APPDATA") or "~" - return joinuser(base, _get_implementation()) - - if sys.platform == "darwin" and sys._framework: - return joinuser("~", "Library", sys._framework, - f"{sys.version_info[0]}.{sys.version_info[1]}") - - return joinuser("~", ".local") - -_HAS_USER_BASE = (_getuserbase() is not None) - -if _HAS_USER_BASE: - _INSTALL_SCHEMES |= { - # NOTE: When modifying "purelib" scheme, update site._get_path() too. - 'nt_user': { - 'stdlib': '{userbase}/{implementation}{py_version_nodot_plat}', - 'platstdlib': '{userbase}/{implementation}{py_version_nodot_plat}', - 'purelib': '{userbase}/{implementation}{py_version_nodot_plat}/site-packages', - 'platlib': '{userbase}/{implementation}{py_version_nodot_plat}/site-packages', - 'include': '{userbase}/{implementation}{py_version_nodot_plat}/Include', - 'scripts': '{userbase}/{implementation}{py_version_nodot_plat}/Scripts', - 'data': '{userbase}', - }, - 'posix_user': { - 'stdlib': '{userbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', - 'platstdlib': '{userbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', - 'purelib': '{userbase}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', - 'platlib': '{userbase}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', - 'include': '{userbase}/include/{implementation_lower}{py_version_short}{abi_thread}', - 'scripts': '{userbase}/bin', - 'data': '{userbase}', - }, - 'osx_framework_user': { - 'stdlib': '{userbase}/lib/{implementation_lower}', - 'platstdlib': '{userbase}/lib/{implementation_lower}', - 'purelib': '{userbase}/lib/{implementation_lower}/site-packages', - 'platlib': '{userbase}/lib/{implementation_lower}/site-packages', - 'include': '{userbase}/include/{implementation_lower}{py_version_short}', - 'scripts': '{userbase}/bin', - 'data': '{userbase}', - }, - } - -_SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include', - 'scripts', 'data') - -_PY_VERSION = sys.version.split()[0] -_PY_VERSION_SHORT = f'{sys.version_info[0]}.{sys.version_info[1]}' -_PY_VERSION_SHORT_NO_DOT = f'{sys.version_info[0]}{sys.version_info[1]}' -_BASE_PREFIX = os.path.normpath(sys.base_prefix) -_BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix) -# Mutex guarding initialization of _CONFIG_VARS. -_CONFIG_VARS_LOCK = threading.RLock() -_CONFIG_VARS = None -# True iff _CONFIG_VARS has been fully initialized. -_CONFIG_VARS_INITIALIZED = False -_USER_BASE = None - - -def _safe_realpath(path): - try: - return realpath(path) - except OSError: - return path - -if sys.executable: - _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) -else: - # sys.executable can be empty if argv[0] has been changed and Python is - # unable to retrieve the real program name - _PROJECT_BASE = _safe_realpath(os.getcwd()) - -# In a virtual environment, `sys._home` gives us the target directory -# `_PROJECT_BASE` for the executable that created it when the virtual -# python is an actual executable ('venv --copies' or Windows). -_sys_home = getattr(sys, '_home', None) -if _sys_home: - _PROJECT_BASE = _sys_home - -if os.name == 'nt': - # In a source build, the executable is in a subdirectory of the root - # that we want (\PCbuild\). - # `_BASE_PREFIX` is used as the base installation is where the source - # will be. The realpath is needed to prevent mount point confusion - # that can occur with just string comparisons. - if _safe_realpath(_PROJECT_BASE).startswith( - _safe_realpath(f'{_BASE_PREFIX}\\PCbuild')): - _PROJECT_BASE = _BASE_PREFIX - -# set for cross builds -if "_PYTHON_PROJECT_BASE" in os.environ: - _PROJECT_BASE = _safe_realpath(os.environ["_PYTHON_PROJECT_BASE"]) - -def is_python_build(check_home=None): - if check_home is not None: - import warnings - warnings.warn( - ( - 'The check_home argument of sysconfig.is_python_build is ' - 'deprecated and its value is ignored. ' - 'It will be removed in Python 3.15.' - ), - DeprecationWarning, - stacklevel=2, - ) - for fn in ("Setup", "Setup.local"): - if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): - return True - return False - -_PYTHON_BUILD = is_python_build() - -if _PYTHON_BUILD: - for scheme in ('posix_prefix', 'posix_home'): - # On POSIX-y platforms, Python will: - # - Build from .h files in 'headers' (which is only added to the - # scheme when building CPython) - # - Install .h files to 'include' - scheme = _INSTALL_SCHEMES[scheme] - scheme['headers'] = scheme['include'] - scheme['include'] = '{srcdir}/Include' - scheme['platinclude'] = '{projectbase}/.' - del scheme - - -def _subst_vars(s, local_vars): - try: - return s.format(**local_vars) - except KeyError as var: - try: - return s.format(**os.environ) - except KeyError: - raise AttributeError(f'{var}') from None - -def _extend_dict(target_dict, other_dict): - target_keys = target_dict.keys() - for key, value in other_dict.items(): - if key in target_keys: - continue - target_dict[key] = value - - -def _expand_vars(scheme, vars): - res = {} - if vars is None: - vars = {} - _extend_dict(vars, get_config_vars()) - if os.name == 'nt': - # On Windows we want to substitute 'lib' for schemes rather - # than the native value (without modifying vars, in case it - # was passed in) - vars = vars | {'platlibdir': 'lib'} - - for key, value in _INSTALL_SCHEMES[scheme].items(): - if os.name in ('posix', 'nt'): - value = os.path.expanduser(value) - res[key] = os.path.normpath(_subst_vars(value, vars)) - return res - - -def _get_preferred_schemes(): - if os.name == 'nt': - return { - 'prefix': 'nt', - 'home': 'posix_home', - 'user': 'nt_user', - } - if sys.platform == 'darwin' and sys._framework: - return { - 'prefix': 'posix_prefix', - 'home': 'posix_home', - 'user': 'osx_framework_user', - } - - return { - 'prefix': 'posix_prefix', - 'home': 'posix_home', - 'user': 'posix_user', - } - - -def get_preferred_scheme(key): - if key == 'prefix' and sys.prefix != sys.base_prefix: - return 'venv' - scheme = _get_preferred_schemes()[key] - if scheme not in _INSTALL_SCHEMES: - raise ValueError( - f"{key!r} returned {scheme!r}, which is not a valid scheme " - f"on this platform" - ) - return scheme - - -def get_default_scheme(): - return get_preferred_scheme('prefix') - - -def get_makefile_filename(): - """Return the path of the Makefile.""" - if _PYTHON_BUILD: - return os.path.join(_PROJECT_BASE, "Makefile") - if hasattr(sys, 'abiflags'): - config_dir_name = f'config-{_PY_VERSION_SHORT}{sys.abiflags}' - else: - config_dir_name = 'config' - if hasattr(sys.implementation, '_multiarch'): - config_dir_name += f'-{sys.implementation._multiarch}' - return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') - - -def _get_sysconfigdata_name(): - multiarch = getattr(sys.implementation, '_multiarch', '') - return os.environ.get( - '_PYTHON_SYSCONFIGDATA_NAME', - f'_sysconfigdata_{sys.abiflags}_{sys.platform}_{multiarch}', - ) - -def _init_posix(vars): - """Initialize the module as appropriate for POSIX systems.""" - # _sysconfigdata is generated at build time, see _generate_posix_vars() - name = _get_sysconfigdata_name() - - # For cross builds, the path to the target's sysconfigdata must be specified - # so it can be imported. It cannot be in PYTHONPATH, as foreign modules in - # sys.path can cause crashes when loaded by the host interpreter. - # Rely on truthiness as a valueless env variable is still an empty string. - # See OS X note in _generate_posix_vars re _sysconfigdata. - if (path := os.environ.get('_PYTHON_SYSCONFIGDATA_PATH')): - from importlib.machinery import FileFinder, SourceFileLoader, SOURCE_SUFFIXES - from importlib.util import module_from_spec - spec = FileFinder(path, (SourceFileLoader, SOURCE_SUFFIXES)).find_spec(name) - _temp = module_from_spec(spec) - spec.loader.exec_module(_temp) - else: - _temp = __import__(name, globals(), locals(), ['build_time_vars'], 0) - build_time_vars = _temp.build_time_vars - vars.update(build_time_vars) - -def _init_non_posix(vars): - """Initialize the module as appropriate for NT""" - # set basic install directories - import _winapi - import _sysconfig - vars['LIBDEST'] = get_path('stdlib') - vars['BINLIBDEST'] = get_path('platstdlib') - vars['INCLUDEPY'] = get_path('include') - - # Add EXT_SUFFIX, SOABI, and Py_GIL_DISABLED - vars.update(_sysconfig.config_vars()) - - vars['LIBDIR'] = _safe_realpath(os.path.join(get_config_var('installed_base'), 'libs')) - if hasattr(sys, 'dllhandle'): - dllhandle = _winapi.GetModuleFileName(sys.dllhandle) - vars['LIBRARY'] = os.path.basename(_safe_realpath(dllhandle)) - vars['LDLIBRARY'] = vars['LIBRARY'] - vars['EXE'] = '.exe' - vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT - vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) - vars['TZPATH'] = '' - -# -# public APIs -# - - -def parse_config_h(fp, vars=None): - """Parse a config.h-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - if vars is None: - vars = {} - import re - define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") - undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") - - while True: - line = fp.readline() - if not line: - break - m = define_rx.match(line) - if m: - n, v = m.group(1, 2) - try: - if n in _ALWAYS_STR: - raise ValueError - v = int(v) - except ValueError: - pass - vars[n] = v - else: - m = undef_rx.match(line) - if m: - vars[m.group(1)] = 0 - return vars - - -def get_config_h_filename(): - """Return the path of pyconfig.h.""" - if _PYTHON_BUILD: - if os.name == "nt": - inc_dir = os.path.dirname(sys._base_executable) - else: - inc_dir = _PROJECT_BASE - else: - inc_dir = get_path('platinclude') - return os.path.join(inc_dir, 'pyconfig.h') - - -def get_scheme_names(): - """Return a tuple containing the schemes names.""" - return tuple(sorted(_INSTALL_SCHEMES)) - - -def get_path_names(): - """Return a tuple containing the paths names.""" - return _SCHEME_KEYS - - -def get_paths(scheme=get_default_scheme(), vars=None, expand=True): - """Return a mapping containing an install scheme. - - ``scheme`` is the install scheme name. If not provided, it will - return the default scheme for the current platform. - """ - if expand: - return _expand_vars(scheme, vars) - else: - return _INSTALL_SCHEMES[scheme] - - -def get_path(name, scheme=get_default_scheme(), vars=None, expand=True): - """Return a path corresponding to the scheme. - - ``scheme`` is the install scheme name. - """ - return get_paths(scheme, vars, expand)[name] - - -def _init_config_vars(): - global _CONFIG_VARS - _CONFIG_VARS = {} - # Normalized versions of prefix and exec_prefix are handy to have; - # in fact, these are the standard versions used most places in the - # Distutils. - _PREFIX = os.path.normpath(sys.prefix) - _EXEC_PREFIX = os.path.normpath(sys.exec_prefix) - _CONFIG_VARS['prefix'] = _PREFIX # FIXME: This gets overwriten by _init_posix. - _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX # FIXME: This gets overwriten by _init_posix. - _CONFIG_VARS['py_version'] = _PY_VERSION - _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT - _CONFIG_VARS['py_version_nodot'] = _PY_VERSION_SHORT_NO_DOT - _CONFIG_VARS['installed_base'] = _BASE_PREFIX - _CONFIG_VARS['base'] = _PREFIX - _CONFIG_VARS['installed_platbase'] = _BASE_EXEC_PREFIX - _CONFIG_VARS['platbase'] = _EXEC_PREFIX - _CONFIG_VARS['projectbase'] = _PROJECT_BASE - _CONFIG_VARS['platlibdir'] = sys.platlibdir - _CONFIG_VARS['implementation'] = _get_implementation() - _CONFIG_VARS['implementation_lower'] = _get_implementation().lower() - try: - _CONFIG_VARS['abiflags'] = sys.abiflags - except AttributeError: - # sys.abiflags may not be defined on all platforms. - _CONFIG_VARS['abiflags'] = '' - try: - _CONFIG_VARS['py_version_nodot_plat'] = sys.winver.replace('.', '') - except AttributeError: - _CONFIG_VARS['py_version_nodot_plat'] = '' - - if os.name == 'nt': - _init_non_posix(_CONFIG_VARS) - _CONFIG_VARS['VPATH'] = sys._vpath - if os.name == 'posix': - _init_posix(_CONFIG_VARS) - if _HAS_USER_BASE: - # Setting 'userbase' is done below the call to the - # init function to enable using 'get_config_var' in - # the init-function. - _CONFIG_VARS['userbase'] = _getuserbase() - - # e.g., 't' for free-threaded or '' for default build - _CONFIG_VARS['abi_thread'] = 't' if _CONFIG_VARS.get('Py_GIL_DISABLED') else '' - - # Always convert srcdir to an absolute path - srcdir = _CONFIG_VARS.get('srcdir', _PROJECT_BASE) - if os.name == 'posix': - if _PYTHON_BUILD: - # If srcdir is a relative path (typically '.' or '..') - # then it should be interpreted relative to the directory - # containing Makefile. - base = os.path.dirname(get_makefile_filename()) - srcdir = os.path.join(base, srcdir) - else: - # srcdir is not meaningful since the installation is - # spread about the filesystem. We choose the - # directory containing the Makefile since we know it - # exists. - srcdir = os.path.dirname(get_makefile_filename()) - _CONFIG_VARS['srcdir'] = _safe_realpath(srcdir) - - # OS X platforms require special customization to handle - # multi-architecture, multi-os-version installers - if sys.platform == 'darwin': - import _osx_support - _osx_support.customize_config_vars(_CONFIG_VARS) - - global _CONFIG_VARS_INITIALIZED - _CONFIG_VARS_INITIALIZED = True - - -def get_config_vars(*args): - """With no arguments, return a dictionary of all configuration - variables relevant for the current platform. - - On Unix, this means every variable defined in Python's installed Makefile; - On Windows it's a much smaller set. - - With arguments, return a list of values that result from looking up - each argument in the configuration variable dictionary. - """ - global _CONFIG_VARS_INITIALIZED - - # Avoid claiming the lock once initialization is complete. - if not _CONFIG_VARS_INITIALIZED: - with _CONFIG_VARS_LOCK: - # Test again with the lock held to avoid races. Note that - # we test _CONFIG_VARS here, not _CONFIG_VARS_INITIALIZED, - # to ensure that recursive calls to get_config_vars() - # don't re-enter init_config_vars(). - if _CONFIG_VARS is None: - _init_config_vars() - else: - # If the site module initialization happened after _CONFIG_VARS was - # initialized, a virtual environment might have been activated, resulting in - # variables like sys.prefix changing their value, so we need to re-init the - # config vars (see GH-126789). - if _CONFIG_VARS['base'] != os.path.normpath(sys.prefix): - with _CONFIG_VARS_LOCK: - _CONFIG_VARS_INITIALIZED = False - _init_config_vars() - - if args: - vals = [] - for name in args: - vals.append(_CONFIG_VARS.get(name)) - return vals - else: - return _CONFIG_VARS - - -def get_config_var(name): - """Return the value of a single variable using the dictionary returned by - 'get_config_vars()'. - - Equivalent to get_config_vars().get(name) - """ - return get_config_vars().get(name) - - -def get_platform(): - """Return a string that identifies the current platform. - - This is used mainly to distinguish platform-specific build directories and - platform-specific built distributions. Typically includes the OS name and - version and the architecture (as supplied by 'os.uname()'), although the - exact information included depends on the OS; on Linux, the kernel version - isn't particularly important. - - Examples of returned values: - - - Windows: - - - win-amd64 (64-bit Windows on AMD64, aka x86_64, Intel64, and EM64T) - - win-arm64 (64-bit Windows on ARM64, aka AArch64) - - win32 (all others - specifically, sys.platform is returned) - - POSIX based OS: - - - linux-x86_64 - - macosx-15.5-arm64 - - macosx-26.0-universal2 (macOS on Apple Silicon or Intel) - - android-24-arm64_v8a - - For other non-POSIX platforms, currently just returns :data:`sys.platform`.""" - if os.name == 'nt': - if 'amd64' in sys.version.lower(): - return 'win-amd64' - if '(arm)' in sys.version.lower(): - return 'win-arm32' - if '(arm64)' in sys.version.lower(): - return 'win-arm64' - return sys.platform - - if os.name != "posix" or not hasattr(os, 'uname'): - # XXX what about the architecture? NT is Intel or Alpha - return sys.platform - - # Set for cross builds explicitly - if "_PYTHON_HOST_PLATFORM" in os.environ: - return os.environ["_PYTHON_HOST_PLATFORM"] - - # Try to distinguish various flavours of Unix - osname, host, release, version, machine = os.uname() - - # Convert the OS name to lowercase, remove '/' characters, and translate - # spaces (for "Power Macintosh") - osname = osname.lower().replace('/', '') - machine = machine.replace(' ', '_') - machine = machine.replace('/', '-') - - if osname[:5] == "linux": - if sys.platform == "android": - osname = "android" - release = get_config_var("ANDROID_API_LEVEL") - - # Wheel tags use the ABI names from Android's own tools. - # When Python is running on 32-bit ARM Android on a 64-bit ARM kernel, - # 'os.uname().machine' is 'armv8l'. Such devices run the same userspace - # code as 'armv7l' devices. - # During the build process of the Android testbed when targeting 32-bit ARM, - # '_PYTHON_HOST_PLATFORM' is 'arm-linux-androideabi', so 'machine' becomes - # 'arm'. - machine = { - "aarch64": "arm64_v8a", - "arm": "armeabi_v7a", - "armv7l": "armeabi_v7a", - "armv8l": "armeabi_v7a", - "i686": "x86", - "x86_64": "x86_64", - }[machine] - else: - # At least on Linux/Intel, 'machine' is the processor -- - # i386, etc. - # XXX what about Alpha, SPARC, etc? - return f"{osname}-{machine}" - elif osname[:5] == "sunos": - if release[0] >= "5": # SunOS 5 == Solaris 2 - osname = "solaris" - release = f"{int(release[0]) - 3}.{release[2:]}" - # We can't use "platform.architecture()[0]" because a - # bootstrap problem. We use a dict to get an error - # if some suspicious happens. - bitness = {2147483647:"32bit", 9223372036854775807:"64bit"} - machine += f".{bitness[sys.maxsize]}" - # fall through to standard osname-release-machine representation - elif osname[:3] == "aix": - from _aix_support import aix_platform - return aix_platform() - elif osname[:6] == "cygwin": - osname = "cygwin" - import re - rel_re = re.compile(r'[\d.]+') - m = rel_re.match(release) - if m: - release = m.group() - elif osname[:6] == "darwin": - if sys.platform == "ios": - release = get_config_vars().get("IPHONEOS_DEPLOYMENT_TARGET", "13.0") - osname = sys.platform - machine = sys.implementation._multiarch - else: - import _osx_support - osname, release, machine = _osx_support.get_platform_osx( - get_config_vars(), - osname, release, machine) - - return f"{osname}-{release}-{machine}" - - -def get_python_version(): - return _PY_VERSION_SHORT - - -def _get_python_version_abi(): - return _PY_VERSION_SHORT + get_config_var("abi_thread") - - -def expand_makefile_vars(s, vars): - """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in - 'string' according to 'vars' (a dictionary mapping variable names to - values). Variables not present in 'vars' are silently expanded to the - empty string. The variable values in 'vars' should not contain further - variable expansions; if 'vars' is the output of 'parse_makefile()', - you're fine. Returns a variable-expanded version of 's'. - """ - import re - - _findvar1_rx = r"\$\(([A-Za-z][A-Za-z0-9_]*)\)" - _findvar2_rx = r"\${([A-Za-z][A-Za-z0-9_]*)}" - - # This algorithm does multiple expansion, so if vars['foo'] contains - # "${bar}", it will expand ${foo} to ${bar}, and then expand - # ${bar}... and so forth. This is fine as long as 'vars' comes from - # 'parse_makefile()', which takes care of such expansions eagerly, - # according to make's variable expansion semantics. - - while True: - m = re.search(_findvar1_rx, s) or re.search(_findvar2_rx, s) - if m: - (beg, end) = m.span() - s = s[0:beg] + vars.get(m.group(1)) + s[end:] - else: - break - return s diff --git a/Python313_13_x64_Template/Lib/sysconfig/__main__.py b/Python313_13_x64_Template/Lib/sysconfig/__main__.py deleted file mode 100644 index d7257b9d..00000000 --- a/Python313_13_x64_Template/Lib/sysconfig/__main__.py +++ /dev/null @@ -1,248 +0,0 @@ -import os -import sys -from sysconfig import ( - _ALWAYS_STR, - _PYTHON_BUILD, - _get_sysconfigdata_name, - get_config_h_filename, - get_config_vars, - get_default_scheme, - get_makefile_filename, - get_paths, - get_platform, - get_python_version, - parse_config_h, -) - - -# Regexes needed for parsing Makefile (and similar syntaxes, -# like old-style Setup files). -_variable_rx = r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)" -_findvar1_rx = r"\$\(([A-Za-z][A-Za-z0-9_]*)\)" -_findvar2_rx = r"\${([A-Za-z][A-Za-z0-9_]*)}" - - -def _parse_makefile(filename, vars=None, keep_unresolved=True): - """Parse a Makefile-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - import re - - if vars is None: - vars = {} - done = {} - notdone = {} - - with open(filename, encoding=sys.getfilesystemencoding(), - errors="surrogateescape") as f: - lines = f.readlines() - - for line in lines: - if line.startswith('#') or line.strip() == '': - continue - m = re.match(_variable_rx, line) - if m: - n, v = m.group(1, 2) - v = v.strip() - # `$$' is a literal `$' in make - tmpv = v.replace('$$', '') - - if "$" in tmpv: - notdone[n] = v - else: - try: - if n in _ALWAYS_STR: - raise ValueError - - v = int(v) - except ValueError: - # insert literal `$' - done[n] = v.replace('$$', '$') - else: - done[n] = v - - # do variable interpolation here - variables = list(notdone.keys()) - - # Variables with a 'PY_' prefix in the makefile. These need to - # be made available without that prefix through sysconfig. - # Special care is needed to ensure that variable expansion works, even - # if the expansion uses the name without a prefix. - renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') - - while len(variables) > 0: - for name in tuple(variables): - value = notdone[name] - m1 = re.search(_findvar1_rx, value) - m2 = re.search(_findvar2_rx, value) - if m1 and m2: - m = m1 if m1.start() < m2.start() else m2 - else: - m = m1 if m1 else m2 - if m is not None: - n = m.group(1) - found = True - if n in done: - item = str(done[n]) - elif n in notdone: - # get it on a subsequent round - found = False - elif n in os.environ: - # do it like make: fall back to environment - item = os.environ[n] - - elif n in renamed_variables: - if (name.startswith('PY_') and - name[3:] in renamed_variables): - item = "" - - elif 'PY_' + n in notdone: - found = False - - else: - item = str(done['PY_' + n]) - - else: - done[n] = item = "" - - if found: - after = value[m.end():] - value = value[:m.start()] + item + after - if "$" in after: - notdone[name] = value - else: - try: - if name in _ALWAYS_STR: - raise ValueError - value = int(value) - except ValueError: - done[name] = value.strip() - else: - done[name] = value - variables.remove(name) - - if name.startswith('PY_') \ - and name[3:] in renamed_variables: - - name = name[3:] - if name not in done: - done[name] = value - - else: - # Adds unresolved variables to the done dict. - # This is disabled when called from distutils.sysconfig - if keep_unresolved: - done[name] = value - # bogus variable reference (e.g. "prefix=$/opt/python"); - # just drop it since we can't deal - variables.remove(name) - - # strip spurious spaces - for k, v in done.items(): - if isinstance(v, str): - done[k] = v.strip() - - # save the results in the global dictionary - vars.update(done) - return vars - - -def _print_config_dict(d, stream): - print ("{", file=stream) - for k, v in sorted(d.items()): - print(f" {k!r}: {v!r},", file=stream) - print ("}", file=stream) - - -def _generate_posix_vars(): - """Generate the Python module containing build-time variables.""" - vars = {} - # load the installed Makefile: - makefile = get_makefile_filename() - try: - _parse_makefile(makefile, vars) - except OSError as e: - msg = f"invalid Python installation: unable to open {makefile}" - if hasattr(e, "strerror"): - msg = f"{msg} ({e.strerror})" - raise OSError(msg) - # load the installed pyconfig.h: - config_h = get_config_h_filename() - try: - with open(config_h, encoding="utf-8") as f: - parse_config_h(f, vars) - except OSError as e: - msg = f"invalid Python installation: unable to open {config_h}" - if hasattr(e, "strerror"): - msg = f"{msg} ({e.strerror})" - raise OSError(msg) - # On AIX, there are wrong paths to the linker scripts in the Makefile - # -- these paths are relative to the Python source, but when installed - # the scripts are in another directory. - if _PYTHON_BUILD: - vars['BLDSHARED'] = vars['LDSHARED'] - - # There's a chicken-and-egg situation on OS X with regards to the - # _sysconfigdata module after the changes introduced by #15298: - # get_config_vars() is called by get_platform() as part of the - # `make pybuilddir.txt` target -- which is a precursor to the - # _sysconfigdata.py module being constructed. Unfortunately, - # get_config_vars() eventually calls _init_posix(), which attempts - # to import _sysconfigdata, which we won't have built yet. In order - # for _init_posix() to work, if we're on Darwin, just mock up the - # _sysconfigdata module manually and populate it with the build vars. - # This is more than sufficient for ensuring the subsequent call to - # get_platform() succeeds. - name = _get_sysconfigdata_name() - if 'darwin' in sys.platform: - import types - module = types.ModuleType(name) - module.build_time_vars = vars - sys.modules[name] = module - - pybuilddir = f'build/lib.{get_platform()}-{get_python_version()}' - if hasattr(sys, "gettotalrefcount"): - pybuilddir += '-pydebug' - os.makedirs(pybuilddir, exist_ok=True) - destfile = os.path.join(pybuilddir, name + '.py') - - with open(destfile, 'w', encoding='utf8') as f: - f.write('# system configuration generated and used by' - ' the sysconfig module\n') - f.write('build_time_vars = ') - _print_config_dict(vars, stream=f) - - # Create file used for sys.path fixup -- see Modules/getpath.c - with open('pybuilddir.txt', 'w', encoding='utf8') as f: - f.write(pybuilddir) - - -def _print_dict(title, data): - for index, (key, value) in enumerate(sorted(data.items())): - if index == 0: - print(f'{title}: ') - print(f'\t{key} = "{value}"') - - -def _main(): - """Display all information sysconfig detains.""" - if '--generate-posix-vars' in sys.argv: - _generate_posix_vars() - return - print(f'Platform: "{get_platform()}"') - print(f'Python version: "{get_python_version()}"') - print(f'Current installation scheme: "{get_default_scheme()}"') - print() - _print_dict('Paths', get_paths()) - print() - _print_dict('Variables', get_config_vars()) - - -if __name__ == '__main__': - try: - _main() - except BrokenPipeError: - pass diff --git a/Python313_13_x64_Template/Lib/sysconfig/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/sysconfig/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 1855cff1..00000000 Binary files a/Python313_13_x64_Template/Lib/sysconfig/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/tabnanny.py b/Python313_13_x64_Template/Lib/tabnanny.py deleted file mode 100644 index d06c4c22..00000000 --- a/Python313_13_x64_Template/Lib/tabnanny.py +++ /dev/null @@ -1,340 +0,0 @@ -#! /usr/bin/env python3 - -"""The Tab Nanny despises ambiguous indentation. She knows no mercy. - -tabnanny -- Detection of ambiguous indentation - -For the time being this module is intended to be called as a script. -However it is possible to import it into an IDE and use the function -check() described below. - -Warning: The API provided by this module is likely to change in future -releases; such changes may not be backward compatible. -""" - -# Released to the public domain, by Tim Peters, 15 April 1998. - -# XXX Note: this is now a standard library module. -# XXX The API needs to undergo changes however; the current code is too -# XXX script-like. This will be addressed later. - -__version__ = "6" - -import os -import sys -import tokenize - -__all__ = ["check", "NannyNag", "process_tokens"] - -verbose = 0 -filename_only = 0 - -def errprint(*args): - sep = "" - for arg in args: - sys.stderr.write(sep + str(arg)) - sep = " " - sys.stderr.write("\n") - sys.exit(1) - -def main(): - import getopt - - global verbose, filename_only - try: - opts, args = getopt.getopt(sys.argv[1:], "qv") - except getopt.error as msg: - errprint(msg) - for o, a in opts: - if o == '-q': - filename_only = filename_only + 1 - if o == '-v': - verbose = verbose + 1 - if not args: - errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...") - for arg in args: - check(arg) - -class NannyNag(Exception): - """ - Raised by process_tokens() if detecting an ambiguous indent. - Captured and handled in check(). - """ - def __init__(self, lineno, msg, line): - self.lineno, self.msg, self.line = lineno, msg, line - def get_lineno(self): - return self.lineno - def get_msg(self): - return self.msg - def get_line(self): - return self.line - -def check(file): - """check(file_or_dir) - - If file_or_dir is a directory and not a symbolic link, then recursively - descend the directory tree named by file_or_dir, checking all .py files - along the way. If file_or_dir is an ordinary Python source file, it is - checked for whitespace related problems. The diagnostic messages are - written to standard output using the print statement. - """ - - if os.path.isdir(file) and not os.path.islink(file): - if verbose: - print("%r: listing directory" % (file,)) - names = os.listdir(file) - for name in names: - fullname = os.path.join(file, name) - if (os.path.isdir(fullname) and - not os.path.islink(fullname) or - os.path.normcase(name[-3:]) == ".py"): - check(fullname) - return - - try: - f = tokenize.open(file) - except OSError as msg: - errprint("%r: I/O Error: %s" % (file, msg)) - return - - if verbose > 1: - print("checking %r ..." % file) - - try: - process_tokens(tokenize.generate_tokens(f.readline)) - - except tokenize.TokenError as msg: - errprint("%r: Token Error: %s" % (file, msg)) - return - - except IndentationError as msg: - errprint("%r: Indentation Error: %s" % (file, msg)) - return - - except SyntaxError as msg: - errprint("%r: Syntax Error: %s" % (file, msg)) - return - - except NannyNag as nag: - badline = nag.get_lineno() - line = nag.get_line() - if verbose: - print("%r: *** Line %d: trouble in tab city! ***" % (file, badline)) - print("offending line: %r" % (line,)) - print(nag.get_msg()) - else: - if ' ' in file: file = '"' + file + '"' - if filename_only: print(file) - else: print(file, badline, repr(line)) - return - - finally: - f.close() - - if verbose: - print("%r: Clean bill of health." % (file,)) - -class Whitespace: - # the characters used for space and tab - S, T = ' \t' - - # members: - # raw - # the original string - # n - # the number of leading whitespace characters in raw - # nt - # the number of tabs in raw[:n] - # norm - # the normal form as a pair (count, trailing), where: - # count - # a tuple such that raw[:n] contains count[i] - # instances of S * i + T - # trailing - # the number of trailing spaces in raw[:n] - # It's A Theorem that m.indent_level(t) == - # n.indent_level(t) for all t >= 1 iff m.norm == n.norm. - # is_simple - # true iff raw[:n] is of the form (T*)(S*) - - def __init__(self, ws): - self.raw = ws - S, T = Whitespace.S, Whitespace.T - count = [] - b = n = nt = 0 - for ch in self.raw: - if ch == S: - n = n + 1 - b = b + 1 - elif ch == T: - n = n + 1 - nt = nt + 1 - if b >= len(count): - count = count + [0] * (b - len(count) + 1) - count[b] = count[b] + 1 - b = 0 - else: - break - self.n = n - self.nt = nt - self.norm = tuple(count), b - self.is_simple = len(count) <= 1 - - # return length of longest contiguous run of spaces (whether or not - # preceding a tab) - def longest_run_of_spaces(self): - count, trailing = self.norm - return max(len(count)-1, trailing) - - def indent_level(self, tabsize): - # count, il = self.norm - # for i in range(len(count)): - # if count[i]: - # il = il + (i//tabsize + 1)*tabsize * count[i] - # return il - - # quicker: - # il = trailing + sum (i//ts + 1)*ts*count[i] = - # trailing + ts * sum (i//ts + 1)*count[i] = - # trailing + ts * sum i//ts*count[i] + count[i] = - # trailing + ts * [(sum i//ts*count[i]) + (sum count[i])] = - # trailing + ts * [(sum i//ts*count[i]) + num_tabs] - # and note that i//ts*count[i] is 0 when i < ts - - count, trailing = self.norm - il = 0 - for i in range(tabsize, len(count)): - il = il + i//tabsize * count[i] - return trailing + tabsize * (il + self.nt) - - # return true iff self.indent_level(t) == other.indent_level(t) - # for all t >= 1 - def equal(self, other): - return self.norm == other.norm - - # return a list of tuples (ts, i1, i2) such that - # i1 == self.indent_level(ts) != other.indent_level(ts) == i2. - # Intended to be used after not self.equal(other) is known, in which - # case it will return at least one witnessing tab size. - def not_equal_witness(self, other): - n = max(self.longest_run_of_spaces(), - other.longest_run_of_spaces()) + 1 - a = [] - for ts in range(1, n+1): - if self.indent_level(ts) != other.indent_level(ts): - a.append( (ts, - self.indent_level(ts), - other.indent_level(ts)) ) - return a - - # Return True iff self.indent_level(t) < other.indent_level(t) - # for all t >= 1. - # The algorithm is due to Vincent Broman. - # Easy to prove it's correct. - # XXXpost that. - # Trivial to prove n is sharp (consider T vs ST). - # Unknown whether there's a faster general way. I suspected so at - # first, but no longer. - # For the special (but common!) case where M and N are both of the - # form (T*)(S*), M.less(N) iff M.len() < N.len() and - # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded. - # XXXwrite that up. - # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1. - def less(self, other): - if self.n >= other.n: - return False - if self.is_simple and other.is_simple: - return self.nt <= other.nt - n = max(self.longest_run_of_spaces(), - other.longest_run_of_spaces()) + 1 - # the self.n >= other.n test already did it for ts=1 - for ts in range(2, n+1): - if self.indent_level(ts) >= other.indent_level(ts): - return False - return True - - # return a list of tuples (ts, i1, i2) such that - # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2. - # Intended to be used after not self.less(other) is known, in which - # case it will return at least one witnessing tab size. - def not_less_witness(self, other): - n = max(self.longest_run_of_spaces(), - other.longest_run_of_spaces()) + 1 - a = [] - for ts in range(1, n+1): - if self.indent_level(ts) >= other.indent_level(ts): - a.append( (ts, - self.indent_level(ts), - other.indent_level(ts)) ) - return a - -def format_witnesses(w): - firsts = (str(tup[0]) for tup in w) - prefix = "at tab size" - if len(w) > 1: - prefix = prefix + "s" - return prefix + " " + ', '.join(firsts) - -def process_tokens(tokens): - try: - _process_tokens(tokens) - except TabError as e: - raise NannyNag(e.lineno, e.msg, e.text) - -def _process_tokens(tokens): - INDENT = tokenize.INDENT - DEDENT = tokenize.DEDENT - NEWLINE = tokenize.NEWLINE - JUNK = tokenize.COMMENT, tokenize.NL - indents = [Whitespace("")] - check_equal = 0 - - for (type, token, start, end, line) in tokens: - if type == NEWLINE: - # a program statement, or ENDMARKER, will eventually follow, - # after some (possibly empty) run of tokens of the form - # (NL | COMMENT)* (INDENT | DEDENT+)? - # If an INDENT appears, setting check_equal is wrong, and will - # be undone when we see the INDENT. - check_equal = 1 - - elif type == INDENT: - check_equal = 0 - thisguy = Whitespace(token) - if not indents[-1].less(thisguy): - witness = indents[-1].not_less_witness(thisguy) - msg = "indent not greater e.g. " + format_witnesses(witness) - raise NannyNag(start[0], msg, line) - indents.append(thisguy) - - elif type == DEDENT: - # there's nothing we need to check here! what's important is - # that when the run of DEDENTs ends, the indentation of the - # program statement (or ENDMARKER) that triggered the run is - # equal to what's left at the top of the indents stack - - # Ouch! This assert triggers if the last line of the source - # is indented *and* lacks a newline -- then DEDENTs pop out - # of thin air. - # assert check_equal # else no earlier NEWLINE, or an earlier INDENT - check_equal = 1 - - del indents[-1] - - elif check_equal and type not in JUNK: - # this is the first "real token" following a NEWLINE, so it - # must be the first token of the next program statement, or an - # ENDMARKER; the "line" argument exposes the leading whitespace - # for this statement; in the case of ENDMARKER, line is an empty - # string, so will properly match the empty string with which the - # "indents" stack was seeded - check_equal = 0 - thisguy = Whitespace(line) - if not indents[-1].equal(thisguy): - witness = indents[-1].not_equal_witness(thisguy) - msg = "indent not equal e.g. " + format_witnesses(witness) - raise NannyNag(start[0], msg, line) - - -if __name__ == '__main__': - main() diff --git a/Python313_13_x64_Template/Lib/tarfile.py b/Python313_13_x64_Template/Lib/tarfile.py deleted file mode 100644 index 533c0cc8..00000000 --- a/Python313_13_x64_Template/Lib/tarfile.py +++ /dev/null @@ -1,3091 +0,0 @@ -#!/usr/bin/env python3 -#------------------------------------------------------------------- -# tarfile.py -#------------------------------------------------------------------- -# Copyright (C) 2002 Lars Gustaebel -# All rights reserved. -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation -# files (the "Software"), to deal in the Software without -# restriction, including without limitation the rights to use, -# copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following -# conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -"""Read from and write to tar format archives. -""" - -version = "0.9.0" -__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" -__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." - -#--------- -# Imports -#--------- -from builtins import open as bltn_open -import sys -import os -import io -import shutil -import stat -import time -import struct -import copy -import re - -try: - import pwd -except ImportError: - pwd = None -try: - import grp -except ImportError: - grp = None - -# os.symlink on Windows prior to 6.0 raises NotImplementedError -# OSError (winerror=1314) will be raised if the caller does not hold the -# SeCreateSymbolicLinkPrivilege privilege -symlink_exception = (AttributeError, NotImplementedError, OSError) - -# from tarfile import * -__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError", "ReadError", - "CompressionError", "StreamError", "ExtractError", "HeaderError", - "ENCODING", "USTAR_FORMAT", "GNU_FORMAT", "PAX_FORMAT", - "DEFAULT_FORMAT", "open","fully_trusted_filter", "data_filter", - "tar_filter", "FilterError", "AbsoluteLinkError", - "OutsideDestinationError", "SpecialFileError", "AbsolutePathError", - "LinkOutsideDestinationError", "LinkFallbackError"] - - -#--------------------------------------------------------- -# tar constants -#--------------------------------------------------------- -NUL = b"\0" # the null character -BLOCKSIZE = 512 # length of processing blocks -RECORDSIZE = BLOCKSIZE * 20 # length of records -GNU_MAGIC = b"ustar \0" # magic gnu tar string -POSIX_MAGIC = b"ustar\x0000" # magic posix tar string - -LENGTH_NAME = 100 # maximum length of a filename -LENGTH_LINK = 100 # maximum length of a linkname -LENGTH_PREFIX = 155 # maximum length of the prefix field - -REGTYPE = b"0" # regular file -AREGTYPE = b"\0" # regular file -LNKTYPE = b"1" # link (inside tarfile) -SYMTYPE = b"2" # symbolic link -CHRTYPE = b"3" # character special device -BLKTYPE = b"4" # block special device -DIRTYPE = b"5" # directory -FIFOTYPE = b"6" # fifo special device -CONTTYPE = b"7" # contiguous file - -GNUTYPE_LONGNAME = b"L" # GNU tar longname -GNUTYPE_LONGLINK = b"K" # GNU tar longlink -GNUTYPE_SPARSE = b"S" # GNU tar sparse file - -XHDTYPE = b"x" # POSIX.1-2001 extended header -XGLTYPE = b"g" # POSIX.1-2001 global header -SOLARIS_XHDTYPE = b"X" # Solaris extended header - -USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format -GNU_FORMAT = 1 # GNU tar format -PAX_FORMAT = 2 # POSIX.1-2001 (pax) format -DEFAULT_FORMAT = PAX_FORMAT - -#--------------------------------------------------------- -# tarfile constants -#--------------------------------------------------------- -# File types that tarfile supports: -SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, - SYMTYPE, DIRTYPE, FIFOTYPE, - CONTTYPE, CHRTYPE, BLKTYPE, - GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, - GNUTYPE_SPARSE) - -# File types that will be treated as a regular file. -REGULAR_TYPES = (REGTYPE, AREGTYPE, - CONTTYPE, GNUTYPE_SPARSE) - -# File types that are part of the GNU tar format. -GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, - GNUTYPE_SPARSE) - -# Fields from a pax header that override a TarInfo attribute. -PAX_FIELDS = ("path", "linkpath", "size", "mtime", - "uid", "gid", "uname", "gname") - -# Fields from a pax header that are affected by hdrcharset. -PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"} - -# Fields in a pax header that are numbers, all other fields -# are treated as strings. -PAX_NUMBER_FIELDS = { - "atime": float, - "ctime": float, - "mtime": float, - "uid": int, - "gid": int, - "size": int -} - -#--------------------------------------------------------- -# initialization -#--------------------------------------------------------- -if os.name == "nt": - ENCODING = "utf-8" -else: - ENCODING = sys.getfilesystemencoding() - -#--------------------------------------------------------- -# Some useful functions -#--------------------------------------------------------- - -def stn(s, length, encoding, errors): - """Convert a string to a null-terminated bytes object. - """ - if s is None: - raise ValueError("metadata cannot contain None") - s = s.encode(encoding, errors) - return s[:length] + (length - len(s)) * NUL - -def nts(s, encoding, errors): - """Convert a null-terminated bytes object to a string. - """ - p = s.find(b"\0") - if p != -1: - s = s[:p] - return s.decode(encoding, errors) - -def nti(s): - """Convert a number field to a python number. - """ - # There are two possible encodings for a number field, see - # itn() below. - if s[0] in (0o200, 0o377): - n = 0 - for i in range(len(s) - 1): - n <<= 8 - n += s[i + 1] - if s[0] == 0o377: - n = -(256 ** (len(s) - 1) - n) - else: - try: - s = nts(s, "ascii", "strict") - n = int(s.strip() or "0", 8) - except ValueError: - raise InvalidHeaderError("invalid header") - return n - -def itn(n, digits=8, format=DEFAULT_FORMAT): - """Convert a python number to a number field. - """ - # POSIX 1003.1-1988 requires numbers to be encoded as a string of - # octal digits followed by a null-byte, this allows values up to - # (8**(digits-1))-1. GNU tar allows storing numbers greater than - # that if necessary. A leading 0o200 or 0o377 byte indicate this - # particular encoding, the following digits-1 bytes are a big-endian - # base-256 representation. This allows values up to (256**(digits-1))-1. - # A 0o200 byte indicates a positive number, a 0o377 byte a negative - # number. - original_n = n - n = int(n) - if 0 <= n < 8 ** (digits - 1): - s = bytes("%0*o" % (digits - 1, n), "ascii") + NUL - elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1): - if n >= 0: - s = bytearray([0o200]) - else: - s = bytearray([0o377]) - n = 256 ** digits + n - - for i in range(digits - 1): - s.insert(1, n & 0o377) - n >>= 8 - else: - raise ValueError("overflow in number field") - - return s - -def calc_chksums(buf): - """Calculate the checksum for a member's header by summing up all - characters except for the chksum field which is treated as if - it was filled with spaces. According to the GNU tar sources, - some tars (Sun and NeXT) calculate chksum with signed char, - which will be different if there are chars in the buffer with - the high bit set. So we calculate two checksums, unsigned and - signed. - """ - unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf)) - signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf)) - return unsigned_chksum, signed_chksum - -def copyfileobj(src, dst, length=None, exception=OSError, bufsize=None): - """Copy length bytes from fileobj src to fileobj dst. - If length is None, copy the entire content. - """ - bufsize = bufsize or 16 * 1024 - if length == 0: - return - if length is None: - shutil.copyfileobj(src, dst, bufsize) - return - - blocks, remainder = divmod(length, bufsize) - for b in range(blocks): - buf = src.read(bufsize) - if len(buf) < bufsize: - raise exception("unexpected end of data") - dst.write(buf) - - if remainder != 0: - buf = src.read(remainder) - if len(buf) < remainder: - raise exception("unexpected end of data") - dst.write(buf) - return - -def _safe_print(s): - encoding = getattr(sys.stdout, 'encoding', None) - if encoding is not None: - s = s.encode(encoding, 'backslashreplace').decode(encoding) - print(s, end=' ') - - -class TarError(Exception): - """Base exception.""" - pass -class ExtractError(TarError): - """General exception for extract errors.""" - pass -class ReadError(TarError): - """Exception for unreadable tar archives.""" - pass -class CompressionError(TarError): - """Exception for unavailable compression methods.""" - pass -class StreamError(TarError): - """Exception for unsupported operations on stream-like TarFiles.""" - pass -class HeaderError(TarError): - """Base exception for header errors.""" - pass -class EmptyHeaderError(HeaderError): - """Exception for empty headers.""" - pass -class TruncatedHeaderError(HeaderError): - """Exception for truncated headers.""" - pass -class EOFHeaderError(HeaderError): - """Exception for end of file headers.""" - pass -class InvalidHeaderError(HeaderError): - """Exception for invalid headers.""" - pass -class SubsequentHeaderError(HeaderError): - """Exception for missing and invalid extended headers.""" - pass - -#--------------------------- -# internal stream interface -#--------------------------- -class _LowLevelFile: - """Low-level file object. Supports reading and writing. - It is used instead of a regular file object for streaming - access. - """ - - def __init__(self, name, mode): - mode = { - "r": os.O_RDONLY, - "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, - }[mode] - if hasattr(os, "O_BINARY"): - mode |= os.O_BINARY - self.fd = os.open(name, mode, 0o666) - - def close(self): - os.close(self.fd) - - def read(self, size): - return os.read(self.fd, size) - - def write(self, s): - os.write(self.fd, s) - -class _Stream: - """Class that serves as an adapter between TarFile and - a stream-like object. The stream-like object only - needs to have a read() or write() method that works with bytes, - and the method is accessed blockwise. - Use of gzip or bzip2 compression is possible. - A stream-like object could be for example: sys.stdin.buffer, - sys.stdout.buffer, a socket, a tape device etc. - - _Stream is intended to be used only internally. - """ - - def __init__(self, name, mode, comptype, fileobj, bufsize, - compresslevel): - """Construct a _Stream object. - """ - self._extfileobj = True - if fileobj is None: - fileobj = _LowLevelFile(name, mode) - self._extfileobj = False - - if comptype == '*': - # Enable transparent compression detection for the - # stream interface - fileobj = _StreamProxy(fileobj) - comptype = fileobj.getcomptype() - - self.name = os.fspath(name) if name is not None else "" - self.mode = mode - self.comptype = comptype - self.fileobj = fileobj - self.bufsize = bufsize - self.buf = b"" - self.pos = 0 - self.closed = False - - try: - if comptype == "gz": - try: - import zlib - except ImportError: - raise CompressionError("zlib module is not available") from None - self.zlib = zlib - self.crc = zlib.crc32(b"") - if mode == "r": - self.exception = zlib.error - self._init_read_gz() - else: - self._init_write_gz(compresslevel) - - elif comptype == "bz2": - try: - import bz2 - except ImportError: - raise CompressionError("bz2 module is not available") from None - if mode == "r": - self.dbuf = b"" - self.cmp = bz2.BZ2Decompressor() - self.exception = OSError - else: - self.cmp = bz2.BZ2Compressor(compresslevel) - - elif comptype == "xz": - try: - import lzma - except ImportError: - raise CompressionError("lzma module is not available") from None - if mode == "r": - self.dbuf = b"" - self.cmp = lzma.LZMADecompressor() - self.exception = lzma.LZMAError - else: - self.cmp = lzma.LZMACompressor() - - elif comptype != "tar": - raise CompressionError("unknown compression type %r" % comptype) - - except: - if not self._extfileobj: - self.fileobj.close() - self.closed = True - raise - - def __del__(self): - if hasattr(self, "closed") and not self.closed: - self.close() - - def _init_write_gz(self, compresslevel): - """Initialize for writing with gzip compression. - """ - self.cmp = self.zlib.compressobj(compresslevel, - self.zlib.DEFLATED, - -self.zlib.MAX_WBITS, - self.zlib.DEF_MEM_LEVEL, - 0) - timestamp = struct.pack(" self.bufsize: - self.fileobj.write(self.buf[:self.bufsize]) - self.buf = self.buf[self.bufsize:] - - def close(self): - """Close the _Stream object. No operation should be - done on it afterwards. - """ - if self.closed: - return - - self.closed = True - try: - if self.mode == "w" and self.comptype != "tar": - self.buf += self.cmp.flush() - - if self.mode == "w" and self.buf: - self.fileobj.write(self.buf) - self.buf = b"" - if self.comptype == "gz": - self.fileobj.write(struct.pack("= 0: - blocks, remainder = divmod(pos - self.pos, self.bufsize) - for i in range(blocks): - self.read(self.bufsize) - self.read(remainder) - else: - raise StreamError("seeking backwards is not allowed") - return self.pos - - def read(self, size): - """Return the next size number of bytes from the stream.""" - assert size is not None - buf = self._read(size) - self.pos += len(buf) - return buf - - def _read(self, size): - """Return size bytes from the stream. - """ - if self.comptype == "tar": - return self.__read(size) - - c = len(self.dbuf) - t = [self.dbuf] - while c < size: - # Skip underlying buffer to avoid unaligned double buffering. - if self.buf: - buf = self.buf - self.buf = b"" - else: - buf = self.fileobj.read(self.bufsize) - if not buf: - break - try: - buf = self.cmp.decompress(buf) - except self.exception as e: - raise ReadError("invalid compressed data") from e - t.append(buf) - c += len(buf) - t = b"".join(t) - self.dbuf = t[size:] - return t[:size] - - def __read(self, size): - """Return size bytes from stream. If internal buffer is empty, - read another block from the stream. - """ - c = len(self.buf) - t = [self.buf] - while c < size: - buf = self.fileobj.read(self.bufsize) - if not buf: - break - t.append(buf) - c += len(buf) - t = b"".join(t) - self.buf = t[size:] - return t[:size] -# class _Stream - -class _StreamProxy(object): - """Small proxy class that enables transparent compression - detection for the Stream interface (mode 'r|*'). - """ - - def __init__(self, fileobj): - self.fileobj = fileobj - self.buf = self.fileobj.read(BLOCKSIZE) - - def read(self, size): - self.read = self.fileobj.read - return self.buf - - def getcomptype(self): - if self.buf.startswith(b"\x1f\x8b\x08"): - return "gz" - elif self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY": - return "bz2" - elif self.buf.startswith((b"\x5d\x00\x00\x80", b"\xfd7zXZ")): - return "xz" - else: - return "tar" - - def close(self): - self.fileobj.close() -# class StreamProxy - -#------------------------ -# Extraction file object -#------------------------ -class _FileInFile(object): - """A thin wrapper around an existing file object that - provides a part of its data as an individual file - object. - """ - - def __init__(self, fileobj, offset, size, name, blockinfo=None): - self.fileobj = fileobj - self.offset = offset - self.size = size - self.position = 0 - self.name = name - self.closed = False - - if blockinfo is None: - blockinfo = [(0, size)] - - # Construct a map with data and zero blocks. - self.map_index = 0 - self.map = [] - lastpos = 0 - realpos = self.offset - for offset, size in blockinfo: - if offset > lastpos: - self.map.append((False, lastpos, offset, None)) - self.map.append((True, offset, offset + size, realpos)) - realpos += size - lastpos = offset + size - if lastpos < self.size: - self.map.append((False, lastpos, self.size, None)) - - def flush(self): - pass - - @property - def mode(self): - return 'rb' - - def readable(self): - return True - - def writable(self): - return False - - def seekable(self): - return self.fileobj.seekable() - - def tell(self): - """Return the current file position. - """ - return self.position - - def seek(self, position, whence=io.SEEK_SET): - """Seek to a position in the file. - """ - if whence == io.SEEK_SET: - self.position = min(max(position, 0), self.size) - elif whence == io.SEEK_CUR: - if position < 0: - self.position = max(self.position + position, 0) - else: - self.position = min(self.position + position, self.size) - elif whence == io.SEEK_END: - self.position = max(min(self.size + position, self.size), 0) - else: - raise ValueError("Invalid argument") - return self.position - - def read(self, size=None): - """Read data from the file. - """ - if size is None: - size = self.size - self.position - else: - size = min(size, self.size - self.position) - - buf = b"" - while size > 0: - while True: - data, start, stop, offset = self.map[self.map_index] - if start <= self.position < stop: - break - else: - self.map_index += 1 - if self.map_index == len(self.map): - self.map_index = 0 - length = min(size, stop - self.position) - if data: - self.fileobj.seek(offset + (self.position - start)) - b = self.fileobj.read(length) - if len(b) != length: - raise ReadError("unexpected end of data") - buf += b - else: - buf += NUL * length - size -= length - self.position += length - return buf - - def readinto(self, b): - buf = self.read(len(b)) - b[:len(buf)] = buf - return len(buf) - - def close(self): - self.closed = True -#class _FileInFile - -class ExFileObject(io.BufferedReader): - - def __init__(self, tarfile, tarinfo): - fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data, - tarinfo.size, tarinfo.name, tarinfo.sparse) - super().__init__(fileobj) -#class ExFileObject - - -#----------------------------- -# extraction filters (PEP 706) -#----------------------------- - -class FilterError(TarError): - pass - -class AbsolutePathError(FilterError): - def __init__(self, tarinfo): - self.tarinfo = tarinfo - super().__init__(f'member {tarinfo.name!r} has an absolute path') - -class OutsideDestinationError(FilterError): - def __init__(self, tarinfo, path): - self.tarinfo = tarinfo - self._path = path - super().__init__(f'{tarinfo.name!r} would be extracted to {path!r}, ' - + 'which is outside the destination') - -class SpecialFileError(FilterError): - def __init__(self, tarinfo): - self.tarinfo = tarinfo - super().__init__(f'{tarinfo.name!r} is a special file') - -class AbsoluteLinkError(FilterError): - def __init__(self, tarinfo): - self.tarinfo = tarinfo - super().__init__(f'{tarinfo.name!r} is a link to an absolute path') - -class LinkOutsideDestinationError(FilterError): - def __init__(self, tarinfo, path): - self.tarinfo = tarinfo - self._path = path - super().__init__(f'{tarinfo.name!r} would link to {path!r}, ' - + 'which is outside the destination') - -class LinkFallbackError(FilterError): - def __init__(self, tarinfo, path): - self.tarinfo = tarinfo - self._path = path - super().__init__(f'link {tarinfo.name!r} would be extracted as a ' - + f'copy of {path!r}, which was rejected') - -# Errors caused by filters -- both "fatal" and "non-fatal" -- that -# we consider to be issues with the argument, rather than a bug in the -# filter function -_FILTER_ERRORS = (FilterError, OSError, ExtractError) - -def _get_filtered_attrs(member, dest_path, for_data=True): - new_attrs = {} - name = member.name - dest_path = os.path.realpath(dest_path, strict=os.path.ALLOW_MISSING) - # Strip leading / (tar's directory separator) from filenames. - # Include os.sep (target OS directory separator) as well. - if name.startswith(('/', os.sep)): - name = new_attrs['name'] = member.path.lstrip('/' + os.sep) - if os.path.isabs(name): - # Path is absolute even after stripping. - # For example, 'C:/foo' on Windows. - raise AbsolutePathError(member) - # Ensure we stay in the destination - target_path = os.path.realpath(os.path.join(dest_path, name), - strict=os.path.ALLOW_MISSING) - if os.path.commonpath([target_path, dest_path]) != dest_path: - raise OutsideDestinationError(member, target_path) - # Limit permissions (no high bits, and go-w) - mode = member.mode - if mode is not None: - # Strip high bits & group/other write bits - mode = mode & 0o755 - if for_data: - # For data, handle permissions & file types - if member.isreg() or member.islnk(): - if not mode & 0o100: - # Clear executable bits if not executable by user - mode &= ~0o111 - # Ensure owner can read & write - mode |= 0o600 - elif member.isdir() or member.issym(): - # Ignore mode for directories & symlinks - mode = None - else: - # Reject special files - raise SpecialFileError(member) - if mode != member.mode: - new_attrs['mode'] = mode - if for_data: - # Ignore ownership for 'data' - if member.uid is not None: - new_attrs['uid'] = None - if member.gid is not None: - new_attrs['gid'] = None - if member.uname is not None: - new_attrs['uname'] = None - if member.gname is not None: - new_attrs['gname'] = None - # Check link destination for 'data' - if member.islnk() or member.issym(): - if os.path.isabs(member.linkname): - raise AbsoluteLinkError(member) - normalized = os.path.normpath(member.linkname) - if normalized != member.linkname: - new_attrs['linkname'] = normalized - if member.issym(): - target_path = os.path.join(dest_path, - os.path.dirname(name), - member.linkname) - else: - target_path = os.path.join(dest_path, - member.linkname) - target_path = os.path.realpath(target_path, - strict=os.path.ALLOW_MISSING) - if os.path.commonpath([target_path, dest_path]) != dest_path: - raise LinkOutsideDestinationError(member, target_path) - return new_attrs - -def fully_trusted_filter(member, dest_path): - return member - -def tar_filter(member, dest_path): - new_attrs = _get_filtered_attrs(member, dest_path, False) - if new_attrs: - return member.replace(**new_attrs, deep=False) - return member - -def data_filter(member, dest_path): - new_attrs = _get_filtered_attrs(member, dest_path, True) - if new_attrs: - return member.replace(**new_attrs, deep=False) - return member - -_NAMED_FILTERS = { - "fully_trusted": fully_trusted_filter, - "tar": tar_filter, - "data": data_filter, -} - -#------------------ -# Exported Classes -#------------------ - -# Sentinel for replace() defaults, meaning "don't change the attribute" -_KEEP = object() - -# Header length is digits followed by a space. -_header_length_prefix_re = re.compile(br"([0-9]{1,20}) ") - -class TarInfo(object): - """Informational class which holds the details about an - archive member given by a tar header block. - TarInfo objects are returned by TarFile.getmember(), - TarFile.getmembers() and TarFile.gettarinfo() and are - usually created internally. - """ - - __slots__ = dict( - name = 'Name of the archive member.', - mode = 'Permission bits.', - uid = 'User ID of the user who originally stored this member.', - gid = 'Group ID of the user who originally stored this member.', - size = 'Size in bytes.', - mtime = 'Time of last modification.', - chksum = 'Header checksum.', - type = ('File type. type is usually one of these constants: ' - 'REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, ' - 'CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_SPARSE.'), - linkname = ('Name of the target file name, which is only present ' - 'in TarInfo objects of type LNKTYPE and SYMTYPE.'), - uname = 'User name.', - gname = 'Group name.', - devmajor = 'Device major number.', - devminor = 'Device minor number.', - offset = 'The tar header starts here.', - offset_data = "The file's data starts here.", - pax_headers = ('A dictionary containing key-value pairs of an ' - 'associated pax extended header.'), - sparse = 'Sparse member information.', - _tarfile = None, - _sparse_structs = None, - _link_target = None, - ) - - def __init__(self, name=""): - """Construct a TarInfo object. name is the optional name - of the member. - """ - self.name = name # member name - self.mode = 0o644 # file permissions - self.uid = 0 # user id - self.gid = 0 # group id - self.size = 0 # file size - self.mtime = 0 # modification time - self.chksum = 0 # header checksum - self.type = REGTYPE # member type - self.linkname = "" # link name - self.uname = "" # user name - self.gname = "" # group name - self.devmajor = 0 # device major number - self.devminor = 0 # device minor number - - self.offset = 0 # the tar header starts here - self.offset_data = 0 # the file's data starts here - - self.sparse = None # sparse member information - self.pax_headers = {} # pax header information - - @property - def tarfile(self): - import warnings - warnings.warn( - 'The undocumented "tarfile" attribute of TarInfo objects ' - + 'is deprecated and will be removed in Python 3.16', - DeprecationWarning, stacklevel=2) - return self._tarfile - - @tarfile.setter - def tarfile(self, tarfile): - import warnings - warnings.warn( - 'The undocumented "tarfile" attribute of TarInfo objects ' - + 'is deprecated and will be removed in Python 3.16', - DeprecationWarning, stacklevel=2) - self._tarfile = tarfile - - @property - def path(self): - 'In pax headers, "name" is called "path".' - return self.name - - @path.setter - def path(self, name): - self.name = name - - @property - def linkpath(self): - 'In pax headers, "linkname" is called "linkpath".' - return self.linkname - - @linkpath.setter - def linkpath(self, linkname): - self.linkname = linkname - - def __repr__(self): - return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) - - def replace(self, *, - name=_KEEP, mtime=_KEEP, mode=_KEEP, linkname=_KEEP, - uid=_KEEP, gid=_KEEP, uname=_KEEP, gname=_KEEP, - deep=True, _KEEP=_KEEP): - """Return a deep copy of self with the given attributes replaced. - """ - if deep: - result = copy.deepcopy(self) - else: - result = copy.copy(self) - if name is not _KEEP: - result.name = name - if mtime is not _KEEP: - result.mtime = mtime - if mode is not _KEEP: - result.mode = mode - if linkname is not _KEEP: - result.linkname = linkname - if uid is not _KEEP: - result.uid = uid - if gid is not _KEEP: - result.gid = gid - if uname is not _KEEP: - result.uname = uname - if gname is not _KEEP: - result.gname = gname - return result - - def get_info(self): - """Return the TarInfo's attributes as a dictionary. - """ - if self.mode is None: - mode = None - else: - mode = self.mode & 0o7777 - info = { - "name": self.name, - "mode": mode, - "uid": self.uid, - "gid": self.gid, - "size": self.size, - "mtime": self.mtime, - "chksum": self.chksum, - "type": self.type, - "linkname": self.linkname, - "uname": self.uname, - "gname": self.gname, - "devmajor": self.devmajor, - "devminor": self.devminor - } - - if info["type"] == DIRTYPE and not info["name"].endswith("/"): - info["name"] += "/" - - return info - - def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): - """Return a tar header as a string of 512 byte blocks. - """ - info = self.get_info() - for name, value in info.items(): - if value is None: - raise ValueError("%s may not be None" % name) - - if format == USTAR_FORMAT: - return self.create_ustar_header(info, encoding, errors) - elif format == GNU_FORMAT: - return self.create_gnu_header(info, encoding, errors) - elif format == PAX_FORMAT: - return self.create_pax_header(info, encoding) - else: - raise ValueError("invalid format") - - def create_ustar_header(self, info, encoding, errors): - """Return the object as a ustar header block. - """ - info["magic"] = POSIX_MAGIC - - if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK: - raise ValueError("linkname is too long") - - if len(info["name"].encode(encoding, errors)) > LENGTH_NAME: - info["prefix"], info["name"] = self._posix_split_name(info["name"], encoding, errors) - - return self._create_header(info, USTAR_FORMAT, encoding, errors) - - def create_gnu_header(self, info, encoding, errors): - """Return the object as a GNU header block sequence. - """ - info["magic"] = GNU_MAGIC - - buf = b"" - if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK: - buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) - - if len(info["name"].encode(encoding, errors)) > LENGTH_NAME: - buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) - - return buf + self._create_header(info, GNU_FORMAT, encoding, errors) - - def create_pax_header(self, info, encoding): - """Return the object as a ustar header block. If it cannot be - represented this way, prepend a pax extended header sequence - with supplement information. - """ - info["magic"] = POSIX_MAGIC - pax_headers = self.pax_headers.copy() - - # Test string fields for values that exceed the field length or cannot - # be represented in ASCII encoding. - for name, hname, length in ( - ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), - ("uname", "uname", 32), ("gname", "gname", 32)): - - if hname in pax_headers: - # The pax header has priority. - continue - - # Try to encode the string as ASCII. - try: - info[name].encode("ascii", "strict") - except UnicodeEncodeError: - pax_headers[hname] = info[name] - continue - - if len(info[name]) > length: - pax_headers[hname] = info[name] - - # Test number fields for values that exceed the field limit or values - # that like to be stored as float. - for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): - needs_pax = False - - val = info[name] - val_is_float = isinstance(val, float) - val_int = round(val) if val_is_float else val - if not 0 <= val_int < 8 ** (digits - 1): - # Avoid overflow. - info[name] = 0 - needs_pax = True - elif val_is_float: - # Put rounded value in ustar header, and full - # precision value in pax header. - info[name] = val_int - needs_pax = True - - # The existing pax header has priority. - if needs_pax and name not in pax_headers: - pax_headers[name] = str(val) - - # Create a pax extended header if necessary. - if pax_headers: - buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) - else: - buf = b"" - - return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") - - @classmethod - def create_pax_global_header(cls, pax_headers): - """Return the object as a pax global header block sequence. - """ - return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf-8") - - def _posix_split_name(self, name, encoding, errors): - """Split a name longer than 100 chars into a prefix - and a name part. - """ - components = name.split("/") - for i in range(1, len(components)): - prefix = "/".join(components[:i]) - name = "/".join(components[i:]) - if len(prefix.encode(encoding, errors)) <= LENGTH_PREFIX and \ - len(name.encode(encoding, errors)) <= LENGTH_NAME: - break - else: - raise ValueError("name is too long") - - return prefix, name - - @staticmethod - def _create_header(info, format, encoding, errors): - """Return a header block. info is a dictionary with file - information, format must be one of the *_FORMAT constants. - """ - has_device_fields = info.get("type") in (CHRTYPE, BLKTYPE) - if has_device_fields: - devmajor = itn(info.get("devmajor", 0), 8, format) - devminor = itn(info.get("devminor", 0), 8, format) - else: - devmajor = stn("", 8, encoding, errors) - devminor = stn("", 8, encoding, errors) - - # None values in metadata should cause ValueError. - # itn()/stn() do this for all fields except type. - filetype = info.get("type", REGTYPE) - if filetype is None: - raise ValueError("TarInfo.type must not be None") - - parts = [ - stn(info.get("name", ""), 100, encoding, errors), - itn(info.get("mode", 0) & 0o7777, 8, format), - itn(info.get("uid", 0), 8, format), - itn(info.get("gid", 0), 8, format), - itn(info.get("size", 0), 12, format), - itn(info.get("mtime", 0), 12, format), - b" ", # checksum field - filetype, - stn(info.get("linkname", ""), 100, encoding, errors), - info.get("magic", POSIX_MAGIC), - stn(info.get("uname", ""), 32, encoding, errors), - stn(info.get("gname", ""), 32, encoding, errors), - devmajor, - devminor, - stn(info.get("prefix", ""), 155, encoding, errors) - ] - - buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) - chksum = calc_chksums(buf[-BLOCKSIZE:])[0] - buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:] - return buf - - @staticmethod - def _create_payload(payload): - """Return the string payload filled with zero bytes - up to the next 512 byte border. - """ - blocks, remainder = divmod(len(payload), BLOCKSIZE) - if remainder > 0: - payload += (BLOCKSIZE - remainder) * NUL - return payload - - @classmethod - def _create_gnu_long_header(cls, name, type, encoding, errors): - """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence - for name. - """ - name = name.encode(encoding, errors) + NUL - - info = {} - info["name"] = "././@LongLink" - info["type"] = type - info["size"] = len(name) - info["magic"] = GNU_MAGIC - - # create extended header + name blocks. - return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ - cls._create_payload(name) - - @classmethod - def _create_pax_generic_header(cls, pax_headers, type, encoding): - """Return a POSIX.1-2008 extended or global header sequence - that contains a list of keyword, value pairs. The values - must be strings. - """ - # Check if one of the fields contains surrogate characters and thereby - # forces hdrcharset=BINARY, see _proc_pax() for more information. - binary = False - for keyword, value in pax_headers.items(): - try: - value.encode("utf-8", "strict") - except UnicodeEncodeError: - binary = True - break - - records = b"" - if binary: - # Put the hdrcharset field at the beginning of the header. - records += b"21 hdrcharset=BINARY\n" - - for keyword, value in pax_headers.items(): - keyword = keyword.encode("utf-8") - if binary: - # Try to restore the original byte representation of `value'. - # Needless to say, that the encoding must match the string. - value = value.encode(encoding, "surrogateescape") - else: - value = value.encode("utf-8") - - l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' - n = p = 0 - while True: - n = l + len(str(p)) - if n == p: - break - p = n - records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" - - # We use a hardcoded "././@PaxHeader" name like star does - # instead of the one that POSIX recommends. - info = {} - info["name"] = "././@PaxHeader" - info["type"] = type - info["size"] = len(records) - info["magic"] = POSIX_MAGIC - - # Create pax header + record blocks. - return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ - cls._create_payload(records) - - @classmethod - def frombuf(cls, buf, encoding, errors): - """Construct a TarInfo object from a 512 byte bytes object. - - To support the old v7 tar format AREGTYPE headers are - transformed to DIRTYPE headers if their name ends in '/'. - """ - return cls._frombuf(buf, encoding, errors) - - @classmethod - def _frombuf(cls, buf, encoding, errors, *, dircheck=True): - """Construct a TarInfo object from a 512 byte bytes object. - - If ``dircheck`` is set to ``True`` then ``AREGTYPE`` headers will - be normalized to ``DIRTYPE`` if the name ends in a trailing slash. - ``dircheck`` must be set to ``False`` if this function is called - on a follow-up header such as ``GNUTYPE_LONGNAME``. - """ - if len(buf) == 0: - raise EmptyHeaderError("empty header") - if len(buf) != BLOCKSIZE: - raise TruncatedHeaderError("truncated header") - if buf.count(NUL) == BLOCKSIZE: - raise EOFHeaderError("end of file header") - - chksum = nti(buf[148:156]) - if chksum not in calc_chksums(buf): - raise InvalidHeaderError("bad checksum") - - obj = cls() - obj.name = nts(buf[0:100], encoding, errors) - obj.mode = nti(buf[100:108]) - obj.uid = nti(buf[108:116]) - obj.gid = nti(buf[116:124]) - obj.size = nti(buf[124:136]) - obj.mtime = nti(buf[136:148]) - obj.chksum = chksum - obj.type = buf[156:157] - obj.linkname = nts(buf[157:257], encoding, errors) - obj.uname = nts(buf[265:297], encoding, errors) - obj.gname = nts(buf[297:329], encoding, errors) - obj.devmajor = nti(buf[329:337]) - obj.devminor = nti(buf[337:345]) - prefix = nts(buf[345:500], encoding, errors) - - # Old V7 tar format represents a directory as a regular - # file with a trailing slash. - if dircheck and obj.type == AREGTYPE and obj.name.endswith("/"): - obj.type = DIRTYPE - - # The old GNU sparse format occupies some of the unused - # space in the buffer for up to 4 sparse structures. - # Save them for later processing in _proc_sparse(). - if obj.type == GNUTYPE_SPARSE: - pos = 386 - structs = [] - for i in range(4): - try: - offset = nti(buf[pos:pos + 12]) - numbytes = nti(buf[pos + 12:pos + 24]) - except ValueError: - break - structs.append((offset, numbytes)) - pos += 24 - isextended = bool(buf[482]) - origsize = nti(buf[483:495]) - obj._sparse_structs = (structs, isextended, origsize) - - # Remove redundant slashes from directories. - if obj.isdir(): - obj.name = obj.name.rstrip("/") - - # Reconstruct a ustar longname. - if prefix and obj.type not in GNU_TYPES: - obj.name = prefix + "/" + obj.name - return obj - - @classmethod - def fromtarfile(cls, tarfile): - """Return the next TarInfo object from TarFile object - tarfile. - """ - return cls._fromtarfile(tarfile) - - @classmethod - def _fromtarfile(cls, tarfile, *, dircheck=True): - """ - See dircheck documentation in _frombuf(). - """ - buf = tarfile.fileobj.read(BLOCKSIZE) - obj = cls._frombuf(buf, tarfile.encoding, tarfile.errors, dircheck=dircheck) - obj.offset = tarfile.fileobj.tell() - BLOCKSIZE - return obj._proc_member(tarfile) - - #-------------------------------------------------------------------------- - # The following are methods that are called depending on the type of a - # member. The entry point is _proc_member() which can be overridden in a - # subclass to add custom _proc_*() methods. A _proc_*() method MUST - # implement the following - # operations: - # 1. Set self.offset_data to the position where the data blocks begin, - # if there is data that follows. - # 2. Set tarfile.offset to the position where the next member's header will - # begin. - # 3. Return self or another valid TarInfo object. - def _proc_member(self, tarfile): - """Choose the right processing method depending on - the type and call it. - """ - if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): - return self._proc_gnulong(tarfile) - elif self.type == GNUTYPE_SPARSE: - return self._proc_sparse(tarfile) - elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): - return self._proc_pax(tarfile) - else: - return self._proc_builtin(tarfile) - - def _proc_builtin(self, tarfile): - """Process a builtin type or an unknown type which - will be treated as a regular file. - """ - self.offset_data = tarfile.fileobj.tell() - offset = self.offset_data - if self.isreg() or self.type not in SUPPORTED_TYPES: - # Skip the following data blocks. - offset += self._block(self.size) - tarfile.offset = offset - - # Patch the TarInfo object with saved global - # header information. - self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) - - # Remove redundant slashes from directories. This is to be consistent - # with frombuf(). - if self.isdir(): - self.name = self.name.rstrip("/") - - return self - - def _proc_gnulong(self, tarfile): - """Process the blocks that hold a GNU longname - or longlink member. - """ - buf = tarfile.fileobj.read(self._block(self.size)) - - # Fetch the next header and process it. - try: - next = self._fromtarfile(tarfile, dircheck=False) - except HeaderError as e: - raise SubsequentHeaderError(str(e)) from None - - # Patch the TarInfo object from the next header with - # the longname information. - next.offset = self.offset - if self.type == GNUTYPE_LONGNAME: - next.name = nts(buf, tarfile.encoding, tarfile.errors) - elif self.type == GNUTYPE_LONGLINK: - next.linkname = nts(buf, tarfile.encoding, tarfile.errors) - - # Remove redundant slashes from directories. This is to be consistent - # with frombuf(). - if next.isdir(): - next.name = next.name.removesuffix("/") - - return next - - def _proc_sparse(self, tarfile): - """Process a GNU sparse header plus extra headers. - """ - # We already collected some sparse structures in frombuf(). - structs, isextended, origsize = self._sparse_structs - del self._sparse_structs - - # Collect sparse structures from extended header blocks. - while isextended: - buf = tarfile.fileobj.read(BLOCKSIZE) - pos = 0 - for i in range(21): - try: - offset = nti(buf[pos:pos + 12]) - numbytes = nti(buf[pos + 12:pos + 24]) - except ValueError: - break - if offset and numbytes: - structs.append((offset, numbytes)) - pos += 24 - isextended = bool(buf[504]) - self.sparse = structs - - self.offset_data = tarfile.fileobj.tell() - tarfile.offset = self.offset_data + self._block(self.size) - self.size = origsize - return self - - def _proc_pax(self, tarfile): - """Process an extended or global header as described in - POSIX.1-2008. - """ - # Read the header information. - buf = tarfile.fileobj.read(self._block(self.size)) - - # A pax header stores supplemental information for either - # the following file (extended) or all following files - # (global). - if self.type == XGLTYPE: - pax_headers = tarfile.pax_headers - else: - pax_headers = tarfile.pax_headers.copy() - - # Parse pax header information. A record looks like that: - # "%d %s=%s\n" % (length, keyword, value). length is the size - # of the complete record including the length field itself and - # the newline. - pos = 0 - encoding = None - raw_headers = [] - while len(buf) > pos and buf[pos] != 0x00: - if not (match := _header_length_prefix_re.match(buf, pos)): - raise InvalidHeaderError("invalid header") - try: - length = int(match.group(1)) - except ValueError: - raise InvalidHeaderError("invalid header") - # Headers must be at least 5 bytes, shortest being '5 x=\n'. - # Value is allowed to be empty. - if length < 5: - raise InvalidHeaderError("invalid header") - if pos + length > len(buf): - raise InvalidHeaderError("invalid header") - - header_value_end_offset = match.start(1) + length - 1 # Last byte of the header - keyword_and_value = buf[match.end(1) + 1:header_value_end_offset] - raw_keyword, equals, raw_value = keyword_and_value.partition(b"=") - - # Check the framing of the header. The last character must be '\n' (0x0A) - if not raw_keyword or equals != b"=" or buf[header_value_end_offset] != 0x0A: - raise InvalidHeaderError("invalid header") - raw_headers.append((length, raw_keyword, raw_value)) - - # Check if the pax header contains a hdrcharset field. This tells us - # the encoding of the path, linkpath, uname and gname fields. Normally, - # these fields are UTF-8 encoded but since POSIX.1-2008 tar - # implementations are allowed to store them as raw binary strings if - # the translation to UTF-8 fails. For the time being, we don't care about - # anything other than "BINARY". The only other value that is currently - # allowed by the standard is "ISO-IR 10646 2000 UTF-8" in other words UTF-8. - # Note that we only follow the initial 'hdrcharset' setting to preserve - # the initial behavior of the 'tarfile' module. - if raw_keyword == b"hdrcharset" and encoding is None: - if raw_value == b"BINARY": - encoding = tarfile.encoding - else: # This branch ensures only the first 'hdrcharset' header is used. - encoding = "utf-8" - - pos += length - - # If no explicit hdrcharset is set, we use UTF-8 as a default. - if encoding is None: - encoding = "utf-8" - - # After parsing the raw headers we can decode them to text. - for length, raw_keyword, raw_value in raw_headers: - # Normally, we could just use "utf-8" as the encoding and "strict" - # as the error handler, but we better not take the risk. For - # example, GNU tar <= 1.23 is known to store filenames it cannot - # translate to UTF-8 as raw strings (unfortunately without a - # hdrcharset=BINARY header). - # We first try the strict standard encoding, and if that fails we - # fall back on the user's encoding and error handler. - keyword = self._decode_pax_field(raw_keyword, "utf-8", "utf-8", - tarfile.errors) - if keyword in PAX_NAME_FIELDS: - value = self._decode_pax_field(raw_value, encoding, tarfile.encoding, - tarfile.errors) - else: - value = self._decode_pax_field(raw_value, "utf-8", "utf-8", - tarfile.errors) - - pax_headers[keyword] = value - - # Fetch the next header. - try: - next = self._fromtarfile(tarfile, dircheck=False) - except HeaderError as e: - raise SubsequentHeaderError(str(e)) from None - - # Process GNU sparse information. - if "GNU.sparse.map" in pax_headers: - # GNU extended sparse format version 0.1. - self._proc_gnusparse_01(next, pax_headers) - - elif "GNU.sparse.size" in pax_headers: - # GNU extended sparse format version 0.0. - self._proc_gnusparse_00(next, raw_headers) - - elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": - # GNU extended sparse format version 1.0. - self._proc_gnusparse_10(next, pax_headers, tarfile) - - if self.type in (XHDTYPE, SOLARIS_XHDTYPE): - # Patch the TarInfo object with the extended header info. - next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) - next.offset = self.offset - - if "size" in pax_headers: - # If the extended header replaces the size field, - # we need to recalculate the offset where the next - # header starts. - offset = next.offset_data - if next.isreg() or next.type not in SUPPORTED_TYPES: - offset += next._block(next.size) - tarfile.offset = offset - - return next - - def _proc_gnusparse_00(self, next, raw_headers): - """Process a GNU tar extended sparse header, version 0.0. - """ - offsets = [] - numbytes = [] - for _, keyword, value in raw_headers: - if keyword == b"GNU.sparse.offset": - try: - offsets.append(int(value.decode())) - except ValueError: - raise InvalidHeaderError("invalid header") - - elif keyword == b"GNU.sparse.numbytes": - try: - numbytes.append(int(value.decode())) - except ValueError: - raise InvalidHeaderError("invalid header") - - next.sparse = list(zip(offsets, numbytes)) - - def _proc_gnusparse_01(self, next, pax_headers): - """Process a GNU tar extended sparse header, version 0.1. - """ - sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] - next.sparse = list(zip(sparse[::2], sparse[1::2])) - - def _proc_gnusparse_10(self, next, pax_headers, tarfile): - """Process a GNU tar extended sparse header, version 1.0. - """ - fields = None - sparse = [] - buf = tarfile.fileobj.read(BLOCKSIZE) - fields, buf = buf.split(b"\n", 1) - fields = int(fields) - while len(sparse) < fields * 2: - if b"\n" not in buf: - buf += tarfile.fileobj.read(BLOCKSIZE) - number, buf = buf.split(b"\n", 1) - sparse.append(int(number)) - next.offset_data = tarfile.fileobj.tell() - next.sparse = list(zip(sparse[::2], sparse[1::2])) - - def _apply_pax_info(self, pax_headers, encoding, errors): - """Replace fields with supplemental information from a previous - pax extended or global header. - """ - for keyword, value in pax_headers.items(): - if keyword == "GNU.sparse.name": - setattr(self, "path", value) - elif keyword == "GNU.sparse.size": - setattr(self, "size", int(value)) - elif keyword == "GNU.sparse.realsize": - setattr(self, "size", int(value)) - elif keyword in PAX_FIELDS: - if keyword in PAX_NUMBER_FIELDS: - try: - value = PAX_NUMBER_FIELDS[keyword](value) - except ValueError: - value = 0 - if keyword == "path": - value = value.rstrip("/") - setattr(self, keyword, value) - - self.pax_headers = pax_headers.copy() - - def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): - """Decode a single field from a pax record. - """ - try: - return value.decode(encoding, "strict") - except UnicodeDecodeError: - return value.decode(fallback_encoding, fallback_errors) - - def _block(self, count): - """Round up a byte count by BLOCKSIZE and return it, - e.g. _block(834) => 1024. - """ - # Only non-negative offsets are allowed - if count < 0: - raise InvalidHeaderError("invalid offset") - blocks, remainder = divmod(count, BLOCKSIZE) - if remainder: - blocks += 1 - return blocks * BLOCKSIZE - - def isreg(self): - 'Return True if the Tarinfo object is a regular file.' - return self.type in REGULAR_TYPES - - def isfile(self): - 'Return True if the Tarinfo object is a regular file.' - return self.isreg() - - def isdir(self): - 'Return True if it is a directory.' - return self.type == DIRTYPE - - def issym(self): - 'Return True if it is a symbolic link.' - return self.type == SYMTYPE - - def islnk(self): - 'Return True if it is a hard link.' - return self.type == LNKTYPE - - def ischr(self): - 'Return True if it is a character device.' - return self.type == CHRTYPE - - def isblk(self): - 'Return True if it is a block device.' - return self.type == BLKTYPE - - def isfifo(self): - 'Return True if it is a FIFO.' - return self.type == FIFOTYPE - - def issparse(self): - return self.sparse is not None - - def isdev(self): - 'Return True if it is one of character device, block device or FIFO.' - return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) -# class TarInfo - -class TarFile(object): - """The TarFile Class provides an interface to tar archives. - """ - - debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) - - dereference = False # If true, add content of linked file to the - # tar file, else the link. - - ignore_zeros = False # If true, skips empty or invalid blocks and - # continues processing. - - errorlevel = 1 # If 0, fatal errors only appear in debug - # messages (if debug >= 0). If > 0, errors - # are passed to the caller as exceptions. - - format = DEFAULT_FORMAT # The format to use when creating an archive. - - encoding = ENCODING # Encoding for 8-bit character strings. - - errors = None # Error handler for unicode conversion. - - tarinfo = TarInfo # The default TarInfo class to use. - - fileobject = ExFileObject # The file-object for extractfile(). - - extraction_filter = None # The default filter for extraction. - - def __init__(self, name=None, mode="r", fileobj=None, format=None, - tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, - errors="surrogateescape", pax_headers=None, debug=None, - errorlevel=None, copybufsize=None, stream=False): - """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to - read from an existing archive, 'a' to append data to an existing - file or 'w' to create a new file overwriting an existing one. `mode' - defaults to 'r'. - If `fileobj' is given, it is used for reading or writing data. If it - can be determined, `mode' is overridden by `fileobj's mode. - `fileobj' is not closed, when TarFile is closed. - """ - modes = {"r": "rb", "a": "r+b", "w": "wb", "x": "xb"} - if mode not in modes: - raise ValueError("mode must be 'r', 'a', 'w' or 'x'") - self.mode = mode - self._mode = modes[mode] - - if not fileobj: - if self.mode == "a" and not os.path.exists(name): - # Create nonexistent files in append mode. - self.mode = "w" - self._mode = "wb" - fileobj = bltn_open(name, self._mode) - self._extfileobj = False - else: - if (name is None and hasattr(fileobj, "name") and - isinstance(fileobj.name, (str, bytes))): - name = fileobj.name - if hasattr(fileobj, "mode"): - self._mode = fileobj.mode - self._extfileobj = True - self.name = os.path.abspath(name) if name else None - self.fileobj = fileobj - - self.stream = stream - - # Init attributes. - if format is not None: - self.format = format - if tarinfo is not None: - self.tarinfo = tarinfo - if dereference is not None: - self.dereference = dereference - if ignore_zeros is not None: - self.ignore_zeros = ignore_zeros - if encoding is not None: - self.encoding = encoding - self.errors = errors - - if pax_headers is not None and self.format == PAX_FORMAT: - self.pax_headers = pax_headers - else: - self.pax_headers = {} - - if debug is not None: - self.debug = debug - if errorlevel is not None: - self.errorlevel = errorlevel - - # Init datastructures. - self.copybufsize = copybufsize - self.closed = False - self.members = [] # list of members as TarInfo objects - self._loaded = False # flag if all members have been read - self.offset = self.fileobj.tell() - # current position in the archive file - self.inodes = {} # dictionary caching the inodes of - # archive members already added - - try: - if self.mode == "r": - self.firstmember = None - self.firstmember = self.next() - - if self.mode == "a": - # Move to the end of the archive, - # before the first empty block. - while True: - self.fileobj.seek(self.offset) - try: - tarinfo = self.tarinfo.fromtarfile(self) - self.members.append(tarinfo) - except EOFHeaderError: - self.fileobj.seek(self.offset) - break - except HeaderError as e: - raise ReadError(str(e)) from None - - if self.mode in ("a", "w", "x"): - self._loaded = True - - if self.pax_headers: - buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) - self.fileobj.write(buf) - self.offset += len(buf) - except: - if not self._extfileobj: - self.fileobj.close() - self.closed = True - raise - - #-------------------------------------------------------------------------- - # Below are the classmethods which act as alternate constructors to the - # TarFile class. The open() method is the only one that is needed for - # public use; it is the "super"-constructor and is able to select an - # adequate "sub"-constructor for a particular compression using the mapping - # from OPEN_METH. - # - # This concept allows one to subclass TarFile without losing the comfort of - # the super-constructor. A sub-constructor is registered and made available - # by adding it to the mapping in OPEN_METH. - - @classmethod - def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): - """Open a tar archive for reading, writing or appending. Return - an appropriate TarFile class. - - mode: - 'r' or 'r:*' open for reading with transparent compression - 'r:' open for reading exclusively uncompressed - 'r:gz' open for reading with gzip compression - 'r:bz2' open for reading with bzip2 compression - 'r:xz' open for reading with lzma compression - 'a' or 'a:' open for appending, creating the file if necessary - 'w' or 'w:' open for writing without compression - 'w:gz' open for writing with gzip compression - 'w:bz2' open for writing with bzip2 compression - 'w:xz' open for writing with lzma compression - - 'x' or 'x:' create a tarfile exclusively without compression, raise - an exception if the file is already created - 'x:gz' create a gzip compressed tarfile, raise an exception - if the file is already created - 'x:bz2' create a bzip2 compressed tarfile, raise an exception - if the file is already created - 'x:xz' create an lzma compressed tarfile, raise an exception - if the file is already created - - 'r|*' open a stream of tar blocks with transparent compression - 'r|' open an uncompressed stream of tar blocks for reading - 'r|gz' open a gzip compressed stream of tar blocks - 'r|bz2' open a bzip2 compressed stream of tar blocks - 'r|xz' open an lzma compressed stream of tar blocks - 'w|' open an uncompressed stream for writing - 'w|gz' open a gzip compressed stream for writing - 'w|bz2' open a bzip2 compressed stream for writing - 'w|xz' open an lzma compressed stream for writing - """ - - if not name and not fileobj: - raise ValueError("nothing to open") - - if mode in ("r", "r:*"): - # Find out which *open() is appropriate for opening the file. - def not_compressed(comptype): - return cls.OPEN_METH[comptype] == 'taropen' - error_msgs = [] - for comptype in sorted(cls.OPEN_METH, key=not_compressed): - func = getattr(cls, cls.OPEN_METH[comptype]) - if fileobj is not None: - saved_pos = fileobj.tell() - try: - return func(name, "r", fileobj, **kwargs) - except (ReadError, CompressionError) as e: - error_msgs.append(f'- method {comptype}: {e!r}') - if fileobj is not None: - fileobj.seek(saved_pos) - continue - error_msgs_summary = '\n'.join(error_msgs) - raise ReadError(f"file could not be opened successfully:\n{error_msgs_summary}") - - elif ":" in mode: - filemode, comptype = mode.split(":", 1) - filemode = filemode or "r" - comptype = comptype or "tar" - - # Select the *open() function according to - # given compression. - if comptype in cls.OPEN_METH: - func = getattr(cls, cls.OPEN_METH[comptype]) - else: - raise CompressionError("unknown compression type %r" % comptype) - return func(name, filemode, fileobj, **kwargs) - - elif "|" in mode: - filemode, comptype = mode.split("|", 1) - filemode = filemode or "r" - comptype = comptype or "tar" - - if filemode not in ("r", "w"): - raise ValueError("mode must be 'r' or 'w'") - - compresslevel = kwargs.pop("compresslevel", 9) - stream = _Stream(name, filemode, comptype, fileobj, bufsize, - compresslevel) - try: - t = cls(name, filemode, stream, **kwargs) - except: - stream.close() - raise - t._extfileobj = False - return t - - elif mode in ("a", "w", "x"): - return cls.taropen(name, mode, fileobj, **kwargs) - - raise ValueError("undiscernible mode") - - @classmethod - def taropen(cls, name, mode="r", fileobj=None, **kwargs): - """Open uncompressed tar archive name for reading or writing. - """ - if mode not in ("r", "a", "w", "x"): - raise ValueError("mode must be 'r', 'a', 'w' or 'x'") - return cls(name, mode, fileobj, **kwargs) - - @classmethod - def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): - """Open gzip compressed tar archive name for reading or writing. - Appending is not allowed. - """ - if mode not in ("r", "w", "x"): - raise ValueError("mode must be 'r', 'w' or 'x'") - - try: - from gzip import GzipFile - except ImportError: - raise CompressionError("gzip module is not available") from None - - try: - fileobj = GzipFile(name, mode + "b", compresslevel, fileobj) - except OSError as e: - if fileobj is not None and mode == 'r': - raise ReadError("not a gzip file") from e - raise - - try: - t = cls.taropen(name, mode, fileobj, **kwargs) - except OSError as e: - fileobj.close() - if mode == 'r': - raise ReadError("not a gzip file") from e - raise - except: - fileobj.close() - raise - t._extfileobj = False - return t - - @classmethod - def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): - """Open bzip2 compressed tar archive name for reading or writing. - Appending is not allowed. - """ - if mode not in ("r", "w", "x"): - raise ValueError("mode must be 'r', 'w' or 'x'") - - try: - from bz2 import BZ2File - except ImportError: - raise CompressionError("bz2 module is not available") from None - - fileobj = BZ2File(fileobj or name, mode, compresslevel=compresslevel) - - try: - t = cls.taropen(name, mode, fileobj, **kwargs) - except (OSError, EOFError) as e: - fileobj.close() - if mode == 'r': - raise ReadError("not a bzip2 file") from e - raise - except: - fileobj.close() - raise - t._extfileobj = False - return t - - @classmethod - def xzopen(cls, name, mode="r", fileobj=None, preset=None, **kwargs): - """Open lzma compressed tar archive name for reading or writing. - Appending is not allowed. - """ - if mode not in ("r", "w", "x"): - raise ValueError("mode must be 'r', 'w' or 'x'") - - try: - from lzma import LZMAFile, LZMAError - except ImportError: - raise CompressionError("lzma module is not available") from None - - fileobj = LZMAFile(fileobj or name, mode, preset=preset) - - try: - t = cls.taropen(name, mode, fileobj, **kwargs) - except (LZMAError, EOFError) as e: - fileobj.close() - if mode == 'r': - raise ReadError("not an lzma file") from e - raise - except: - fileobj.close() - raise - t._extfileobj = False - return t - - # All *open() methods are registered here. - OPEN_METH = { - "tar": "taropen", # uncompressed tar - "gz": "gzopen", # gzip compressed tar - "bz2": "bz2open", # bzip2 compressed tar - "xz": "xzopen" # lzma compressed tar - } - - #-------------------------------------------------------------------------- - # The public methods which TarFile provides: - - def close(self): - """Close the TarFile. In write-mode, two finishing zero blocks are - appended to the archive. - """ - if self.closed: - return - - self.closed = True - try: - if self.mode in ("a", "w", "x"): - self.fileobj.write(NUL * (BLOCKSIZE * 2)) - self.offset += (BLOCKSIZE * 2) - # fill up the end with zero-blocks - # (like option -b20 for tar does) - blocks, remainder = divmod(self.offset, RECORDSIZE) - if remainder > 0: - self.fileobj.write(NUL * (RECORDSIZE - remainder)) - finally: - if not self._extfileobj: - self.fileobj.close() - - def getmember(self, name): - """Return a TarInfo object for member `name'. If `name' can not be - found in the archive, KeyError is raised. If a member occurs more - than once in the archive, its last occurrence is assumed to be the - most up-to-date version. - """ - tarinfo = self._getmember(name.rstrip('/')) - if tarinfo is None: - raise KeyError("filename %r not found" % name) - return tarinfo - - def getmembers(self): - """Return the members of the archive as a list of TarInfo objects. The - list has the same order as the members in the archive. - """ - self._check() - if not self._loaded: # if we want to obtain a list of - self._load() # all members, we first have to - # scan the whole archive. - return self.members - - def getnames(self): - """Return the members of the archive as a list of their names. It has - the same order as the list returned by getmembers(). - """ - return [tarinfo.name for tarinfo in self.getmembers()] - - def gettarinfo(self, name=None, arcname=None, fileobj=None): - """Create a TarInfo object from the result of os.stat or equivalent - on an existing file. The file is either named by `name', or - specified as a file object `fileobj' with a file descriptor. If - given, `arcname' specifies an alternative name for the file in the - archive, otherwise, the name is taken from the 'name' attribute of - 'fileobj', or the 'name' argument. The name should be a text - string. - """ - self._check("awx") - - # When fileobj is given, replace name by - # fileobj's real name. - if fileobj is not None: - name = fileobj.name - - # Building the name of the member in the archive. - # Backward slashes are converted to forward slashes, - # Absolute paths are turned to relative paths. - if arcname is None: - arcname = name - drv, arcname = os.path.splitdrive(arcname) - arcname = arcname.replace(os.sep, "/") - arcname = arcname.lstrip("/") - - # Now, fill the TarInfo object with - # information specific for the file. - tarinfo = self.tarinfo() - tarinfo._tarfile = self # To be removed in 3.16. - - # Use os.stat or os.lstat, depending on if symlinks shall be resolved. - if fileobj is None: - if not self.dereference: - statres = os.lstat(name) - else: - statres = os.stat(name) - else: - statres = os.fstat(fileobj.fileno()) - linkname = "" - - stmd = statres.st_mode - if stat.S_ISREG(stmd): - inode = (statres.st_ino, statres.st_dev) - if not self.dereference and statres.st_nlink > 1 and \ - inode in self.inodes and arcname != self.inodes[inode]: - # Is it a hardlink to an already - # archived file? - type = LNKTYPE - linkname = self.inodes[inode] - else: - # The inode is added only if its valid. - # For win32 it is always 0. - type = REGTYPE - if inode[0]: - self.inodes[inode] = arcname - elif stat.S_ISDIR(stmd): - type = DIRTYPE - elif stat.S_ISFIFO(stmd): - type = FIFOTYPE - elif stat.S_ISLNK(stmd): - type = SYMTYPE - linkname = os.readlink(name) - elif stat.S_ISCHR(stmd): - type = CHRTYPE - elif stat.S_ISBLK(stmd): - type = BLKTYPE - else: - return None - - # Fill the TarInfo object with all - # information we can get. - tarinfo.name = arcname - tarinfo.mode = stmd - tarinfo.uid = statres.st_uid - tarinfo.gid = statres.st_gid - if type == REGTYPE: - tarinfo.size = statres.st_size - else: - tarinfo.size = 0 - tarinfo.mtime = statres.st_mtime - tarinfo.type = type - tarinfo.linkname = linkname - if pwd: - try: - tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] - except KeyError: - pass - if grp: - try: - tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] - except KeyError: - pass - - if type in (CHRTYPE, BLKTYPE): - if hasattr(os, "major") and hasattr(os, "minor"): - tarinfo.devmajor = os.major(statres.st_rdev) - tarinfo.devminor = os.minor(statres.st_rdev) - return tarinfo - - def list(self, verbose=True, *, members=None): - """Print a table of contents to sys.stdout. If `verbose' is False, only - the names of the members are printed. If it is True, an `ls -l'-like - output is produced. `members' is optional and must be a subset of the - list returned by getmembers(). - """ - # Convert tarinfo type to stat type. - type2mode = {REGTYPE: stat.S_IFREG, SYMTYPE: stat.S_IFLNK, - FIFOTYPE: stat.S_IFIFO, CHRTYPE: stat.S_IFCHR, - DIRTYPE: stat.S_IFDIR, BLKTYPE: stat.S_IFBLK} - self._check() - - if members is None: - members = self - for tarinfo in members: - if verbose: - if tarinfo.mode is None: - _safe_print("??????????") - else: - modetype = type2mode.get(tarinfo.type, 0) - _safe_print(stat.filemode(modetype | tarinfo.mode)) - _safe_print("%s/%s" % (tarinfo.uname or tarinfo.uid, - tarinfo.gname or tarinfo.gid)) - if tarinfo.ischr() or tarinfo.isblk(): - _safe_print("%10s" % - ("%d,%d" % (tarinfo.devmajor, tarinfo.devminor))) - else: - _safe_print("%10d" % tarinfo.size) - if tarinfo.mtime is None: - _safe_print("????-??-?? ??:??:??") - else: - _safe_print("%d-%02d-%02d %02d:%02d:%02d" \ - % time.localtime(tarinfo.mtime)[:6]) - - _safe_print(tarinfo.name + ("/" if tarinfo.isdir() else "")) - - if verbose: - if tarinfo.issym(): - _safe_print("-> " + tarinfo.linkname) - if tarinfo.islnk(): - _safe_print("link to " + tarinfo.linkname) - print() - - def add(self, name, arcname=None, recursive=True, *, filter=None): - """Add the file `name' to the archive. `name' may be any type of file - (directory, fifo, symbolic link, etc.). If given, `arcname' - specifies an alternative name for the file in the archive. - Directories are added recursively by default. This can be avoided by - setting `recursive' to False. `filter' is a function - that expects a TarInfo object argument and returns the changed - TarInfo object, if it returns None the TarInfo object will be - excluded from the archive. - """ - self._check("awx") - - if arcname is None: - arcname = name - - # Skip if somebody tries to archive the archive... - if self.name is not None and os.path.abspath(name) == self.name: - self._dbg(2, "tarfile: Skipped %r" % name) - return - - self._dbg(1, name) - - # Create a TarInfo object from the file. - tarinfo = self.gettarinfo(name, arcname) - - if tarinfo is None: - self._dbg(1, "tarfile: Unsupported type %r" % name) - return - - # Change or exclude the TarInfo object. - if filter is not None: - tarinfo = filter(tarinfo) - if tarinfo is None: - self._dbg(2, "tarfile: Excluded %r" % name) - return - - # Append the tar header and data to the archive. - if tarinfo.isreg(): - with bltn_open(name, "rb") as f: - self.addfile(tarinfo, f) - - elif tarinfo.isdir(): - self.addfile(tarinfo) - if recursive: - for f in sorted(os.listdir(name)): - self.add(os.path.join(name, f), os.path.join(arcname, f), - recursive, filter=filter) - - else: - self.addfile(tarinfo) - - def addfile(self, tarinfo, fileobj=None): - """Add the TarInfo object `tarinfo' to the archive. If `tarinfo' represents - a non zero-size regular file, the `fileobj' argument should be a binary file, - and tarinfo.size bytes are read from it and added to the archive. - You can create TarInfo objects directly, or by using gettarinfo(). - """ - self._check("awx") - - if fileobj is None and tarinfo.isreg() and tarinfo.size != 0: - raise ValueError("fileobj not provided for non zero-size regular file") - - tarinfo = copy.copy(tarinfo) - - buf = tarinfo.tobuf(self.format, self.encoding, self.errors) - self.fileobj.write(buf) - self.offset += len(buf) - bufsize=self.copybufsize - # If there's data to follow, append it. - if fileobj is not None: - copyfileobj(fileobj, self.fileobj, tarinfo.size, bufsize=bufsize) - blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) - if remainder > 0: - self.fileobj.write(NUL * (BLOCKSIZE - remainder)) - blocks += 1 - self.offset += blocks * BLOCKSIZE - - self.members.append(tarinfo) - - def _get_filter_function(self, filter): - if filter is None: - filter = self.extraction_filter - if filter is None: - import warnings - warnings.warn( - 'Python 3.14 will, by default, filter extracted tar ' - + 'archives and reject files or modify their metadata. ' - + 'Use the filter argument to control this behavior.', - DeprecationWarning, stacklevel=3) - return fully_trusted_filter - if isinstance(filter, str): - raise TypeError( - 'String names are not supported for ' - + 'TarFile.extraction_filter. Use a function such as ' - + 'tarfile.data_filter directly.') - return filter - if callable(filter): - return filter - try: - return _NAMED_FILTERS[filter] - except KeyError: - raise ValueError(f"filter {filter!r} not found") from None - - def extractall(self, path=".", members=None, *, numeric_owner=False, - filter=None): - """Extract all members from the archive to the current working - directory and set owner, modification time and permissions on - directories afterwards. `path' specifies a different directory - to extract to. `members' is optional and must be a subset of the - list returned by getmembers(). If `numeric_owner` is True, only - the numbers for user/group names are used and not the names. - - The `filter` function will be called on each member just - before extraction. - It can return a changed TarInfo or None to skip the member. - String names of common filters are accepted. - """ - directories = [] - - filter_function = self._get_filter_function(filter) - if members is None: - members = self - - for member in members: - tarinfo, unfiltered = self._get_extract_tarinfo( - member, filter_function, path) - if tarinfo is None: - continue - if tarinfo.isdir(): - # For directories, delay setting attributes until later, - # since permissions can interfere with extraction and - # extracting contents can reset mtime. - directories.append(unfiltered) - self._extract_one(tarinfo, path, set_attrs=not tarinfo.isdir(), - numeric_owner=numeric_owner, - filter_function=filter_function) - - # Reverse sort directories. - directories.sort(key=lambda a: a.name, reverse=True) - - - # Set correct owner, mtime and filemode on directories. - for unfiltered in directories: - try: - # Need to re-apply any filter, to take the *current* filesystem - # state into account. - try: - tarinfo = filter_function(unfiltered, path) - except _FILTER_ERRORS as exc: - self._log_no_directory_fixup(unfiltered, repr(exc)) - continue - if tarinfo is None: - self._log_no_directory_fixup(unfiltered, - 'excluded by filter') - continue - dirpath = os.path.join(path, tarinfo.name) - try: - lstat = os.lstat(dirpath) - except FileNotFoundError: - self._log_no_directory_fixup(tarinfo, 'missing') - continue - if not stat.S_ISDIR(lstat.st_mode): - # This is no longer a directory; presumably a later - # member overwrote the entry. - self._log_no_directory_fixup(tarinfo, 'not a directory') - continue - self.chown(tarinfo, dirpath, numeric_owner=numeric_owner) - self.utime(tarinfo, dirpath) - self.chmod(tarinfo, dirpath) - except ExtractError as e: - self._handle_nonfatal_error(e) - - def _log_no_directory_fixup(self, member, reason): - self._dbg(2, "tarfile: Not fixing up directory %r (%s)" % - (member.name, reason)) - - def extract(self, member, path="", set_attrs=True, *, numeric_owner=False, - filter=None): - """Extract a member from the archive to the current working directory, - using its full name. Its file information is extracted as accurately - as possible. `member' may be a filename or a TarInfo object. You can - specify a different directory using `path'. File attributes (owner, - mtime, mode) are set unless `set_attrs' is False. If `numeric_owner` - is True, only the numbers for user/group names are used and not - the names. - - The `filter` function will be called before extraction. - It can return a changed TarInfo or None to skip the member. - String names of common filters are accepted. - """ - filter_function = self._get_filter_function(filter) - tarinfo, unfiltered = self._get_extract_tarinfo( - member, filter_function, path) - if tarinfo is not None: - self._extract_one(tarinfo, path, set_attrs, numeric_owner) - - def _get_extract_tarinfo(self, member, filter_function, path): - """Get (filtered, unfiltered) TarInfos from *member* - - *member* might be a string. - - Return (None, None) if not found. - """ - - if isinstance(member, str): - unfiltered = self.getmember(member) - else: - unfiltered = member - - filtered = None - try: - filtered = filter_function(unfiltered, path) - except (OSError, UnicodeEncodeError, FilterError) as e: - self._handle_fatal_error(e) - except ExtractError as e: - self._handle_nonfatal_error(e) - if filtered is None: - self._dbg(2, "tarfile: Excluded %r" % unfiltered.name) - return None, None - - # Prepare the link target for makelink(). - if filtered.islnk(): - filtered = copy.copy(filtered) - filtered._link_target = os.path.join(path, filtered.linkname) - return filtered, unfiltered - - def _extract_one(self, tarinfo, path, set_attrs, numeric_owner, - filter_function=None): - """Extract from filtered tarinfo to disk. - - filter_function is only used when extracting a *different* - member (e.g. as fallback to creating a symlink) - """ - self._check("r") - - try: - self._extract_member(tarinfo, os.path.join(path, tarinfo.name), - set_attrs=set_attrs, - numeric_owner=numeric_owner, - filter_function=filter_function, - extraction_root=path) - except (OSError, UnicodeEncodeError) as e: - self._handle_fatal_error(e) - except ExtractError as e: - self._handle_nonfatal_error(e) - - def _handle_nonfatal_error(self, e): - """Handle non-fatal error (ExtractError) according to errorlevel""" - if self.errorlevel > 1: - raise - else: - self._dbg(1, "tarfile: %s" % e) - - def _handle_fatal_error(self, e): - """Handle "fatal" error according to self.errorlevel""" - if self.errorlevel > 0: - raise - elif isinstance(e, OSError): - if e.filename is None: - self._dbg(1, "tarfile: %s" % e.strerror) - else: - self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) - else: - self._dbg(1, "tarfile: %s %s" % (type(e).__name__, e)) - - def extractfile(self, member): - """Extract a member from the archive as a file object. `member' may be - a filename or a TarInfo object. If `member' is a regular file or - a link, an io.BufferedReader object is returned. For all other - existing members, None is returned. If `member' does not appear - in the archive, KeyError is raised. - """ - self._check("r") - - if isinstance(member, str): - tarinfo = self.getmember(member) - else: - tarinfo = member - - if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES: - # Members with unknown types are treated as regular files. - return self.fileobject(self, tarinfo) - - elif tarinfo.islnk() or tarinfo.issym(): - if isinstance(self.fileobj, _Stream): - # A small but ugly workaround for the case that someone tries - # to extract a (sym)link as a file-object from a non-seekable - # stream of tar blocks. - raise StreamError("cannot extract (sym)link as file object") - else: - # A (sym)link's file object is its target's file object. - return self.extractfile(self._find_link_target(tarinfo)) - else: - # If there's no data associated with the member (directory, chrdev, - # blkdev, etc.), return None instead of a file object. - return None - - def _extract_member(self, tarinfo, targetpath, set_attrs=True, - numeric_owner=False, *, filter_function=None, - extraction_root=None): - """Extract the filtered TarInfo object tarinfo to a physical - file called targetpath. - - filter_function is only used when extracting a *different* - member (e.g. as fallback to creating a symlink) - """ - # Fetch the TarInfo object for the given name - # and build the destination pathname, replacing - # forward slashes to platform specific separators. - targetpath = targetpath.rstrip("/") - targetpath = targetpath.replace("/", os.sep) - - # Create all upper directories. - upperdirs = os.path.dirname(targetpath) - if upperdirs and not os.path.exists(upperdirs): - # Create directories that are not part of the archive with - # default permissions. - os.makedirs(upperdirs, exist_ok=True) - - if tarinfo.islnk() or tarinfo.issym(): - self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) - else: - self._dbg(1, tarinfo.name) - - if tarinfo.isreg(): - self.makefile(tarinfo, targetpath) - elif tarinfo.isdir(): - self.makedir(tarinfo, targetpath) - elif tarinfo.isfifo(): - self.makefifo(tarinfo, targetpath) - elif tarinfo.ischr() or tarinfo.isblk(): - self.makedev(tarinfo, targetpath) - elif tarinfo.islnk() or tarinfo.issym(): - self.makelink_with_filter( - tarinfo, targetpath, - filter_function=filter_function, - extraction_root=extraction_root) - elif tarinfo.type not in SUPPORTED_TYPES: - self.makeunknown(tarinfo, targetpath) - else: - self.makefile(tarinfo, targetpath) - - if set_attrs: - self.chown(tarinfo, targetpath, numeric_owner) - if not tarinfo.issym(): - self.chmod(tarinfo, targetpath) - self.utime(tarinfo, targetpath) - - #-------------------------------------------------------------------------- - # Below are the different file methods. They are called via - # _extract_member() when extract() is called. They can be replaced in a - # subclass to implement other functionality. - - def makedir(self, tarinfo, targetpath): - """Make a directory called targetpath. - """ - try: - if tarinfo.mode is None: - # Use the system's default mode - os.mkdir(targetpath) - else: - # Use a safe mode for the directory, the real mode is set - # later in _extract_member(). - os.mkdir(targetpath, 0o700) - except FileExistsError: - if not os.path.isdir(targetpath): - raise - - def makefile(self, tarinfo, targetpath): - """Make a file called targetpath. - """ - source = self.fileobj - source.seek(tarinfo.offset_data) - bufsize = self.copybufsize - with bltn_open(targetpath, "wb") as target: - if tarinfo.sparse is not None: - for offset, size in tarinfo.sparse: - target.seek(offset) - copyfileobj(source, target, size, ReadError, bufsize) - target.seek(tarinfo.size) - target.truncate() - else: - copyfileobj(source, target, tarinfo.size, ReadError, bufsize) - - def makeunknown(self, tarinfo, targetpath): - """Make a file from a TarInfo object with an unknown type - at targetpath. - """ - self.makefile(tarinfo, targetpath) - self._dbg(1, "tarfile: Unknown file type %r, " \ - "extracted as regular file." % tarinfo.type) - - def makefifo(self, tarinfo, targetpath): - """Make a fifo called targetpath. - """ - if hasattr(os, "mkfifo"): - os.mkfifo(targetpath) - else: - raise ExtractError("fifo not supported by system") - - def makedev(self, tarinfo, targetpath): - """Make a character or block device called targetpath. - """ - if not hasattr(os, "mknod") or not hasattr(os, "makedev"): - raise ExtractError("special devices not supported by system") - - mode = tarinfo.mode - if mode is None: - # Use mknod's default - mode = 0o600 - if tarinfo.isblk(): - mode |= stat.S_IFBLK - else: - mode |= stat.S_IFCHR - - os.mknod(targetpath, mode, - os.makedev(tarinfo.devmajor, tarinfo.devminor)) - - def makelink(self, tarinfo, targetpath): - return self.makelink_with_filter(tarinfo, targetpath, None, None) - - def makelink_with_filter(self, tarinfo, targetpath, - filter_function, extraction_root): - """Make a (symbolic) link called targetpath. If it cannot be created - (platform limitation), we try to make a copy of the referenced file - instead of a link. - - filter_function is only used when extracting a *different* - member (e.g. as fallback to creating a link). - """ - keyerror_to_extracterror = False - try: - # For systems that support symbolic and hard links. - if tarinfo.issym(): - if os.path.lexists(targetpath): - # Avoid FileExistsError on following os.symlink. - os.unlink(targetpath) - os.symlink(tarinfo.linkname, targetpath) - return - else: - if os.path.exists(tarinfo._link_target): - if os.path.lexists(targetpath): - # Avoid FileExistsError on following os.link. - os.unlink(targetpath) - os.link(tarinfo._link_target, targetpath) - return - except symlink_exception: - keyerror_to_extracterror = True - - try: - unfiltered = self._find_link_target(tarinfo) - except KeyError: - if keyerror_to_extracterror: - raise ExtractError( - "unable to resolve link inside archive") from None - else: - raise - - if filter_function is None: - filtered = unfiltered - else: - if extraction_root is None: - raise ExtractError( - "makelink_with_filter: if filter_function is not None, " - + "extraction_root must also not be None") - try: - filtered = filter_function(unfiltered, extraction_root) - except _FILTER_ERRORS as cause: - raise LinkFallbackError(tarinfo, unfiltered.name) from cause - if filtered is not None: - self._extract_member(filtered, targetpath, - filter_function=filter_function, - extraction_root=extraction_root) - - def chown(self, tarinfo, targetpath, numeric_owner): - """Set owner of targetpath according to tarinfo. If numeric_owner - is True, use .gid/.uid instead of .gname/.uname. If numeric_owner - is False, fall back to .gid/.uid when the search based on name - fails. - """ - if hasattr(os, "geteuid") and os.geteuid() == 0: - # We have to be root to do so. - g = tarinfo.gid - u = tarinfo.uid - if not numeric_owner: - try: - if grp and tarinfo.gname: - g = grp.getgrnam(tarinfo.gname)[2] - except KeyError: - pass - try: - if pwd and tarinfo.uname: - u = pwd.getpwnam(tarinfo.uname)[2] - except KeyError: - pass - if g is None: - g = -1 - if u is None: - u = -1 - try: - if tarinfo.issym() and hasattr(os, "lchown"): - os.lchown(targetpath, u, g) - else: - os.chown(targetpath, u, g) - except (OSError, OverflowError) as e: - # OverflowError can be raised if an ID doesn't fit in `id_t` - raise ExtractError("could not change owner") from e - - def chmod(self, tarinfo, targetpath): - """Set file permissions of targetpath according to tarinfo. - """ - if tarinfo.mode is None: - return - try: - os.chmod(targetpath, tarinfo.mode) - except OSError as e: - raise ExtractError("could not change mode") from e - - def utime(self, tarinfo, targetpath): - """Set modification time of targetpath according to tarinfo. - """ - mtime = tarinfo.mtime - if mtime is None: - return - if not hasattr(os, 'utime'): - return - try: - os.utime(targetpath, (mtime, mtime)) - except OSError as e: - raise ExtractError("could not change modification time") from e - - #-------------------------------------------------------------------------- - def next(self): - """Return the next member of the archive as a TarInfo object, when - TarFile is opened for reading. Return None if there is no more - available. - """ - self._check("ra") - if self.firstmember is not None: - m = self.firstmember - self.firstmember = None - return m - - # Advance the file pointer. - if self.offset != self.fileobj.tell(): - if self.offset == 0: - return None - self.fileobj.seek(self.offset - 1) - if not self.fileobj.read(1): - raise ReadError("unexpected end of data") - - # Read the next block. - tarinfo = None - while True: - try: - tarinfo = self.tarinfo.fromtarfile(self) - except EOFHeaderError as e: - if self.ignore_zeros: - self._dbg(2, "0x%X: %s" % (self.offset, e)) - self.offset += BLOCKSIZE - continue - except InvalidHeaderError as e: - if self.ignore_zeros: - self._dbg(2, "0x%X: %s" % (self.offset, e)) - self.offset += BLOCKSIZE - continue - elif self.offset == 0: - raise ReadError(str(e)) from None - except EmptyHeaderError: - if self.offset == 0: - raise ReadError("empty file") from None - except TruncatedHeaderError as e: - if self.offset == 0: - raise ReadError(str(e)) from None - except SubsequentHeaderError as e: - raise ReadError(str(e)) from None - except Exception as e: - try: - import zlib - if isinstance(e, zlib.error): - raise ReadError(f'zlib error: {e}') from None - else: - raise e - except ImportError: - raise e - break - - if tarinfo is not None: - # if streaming the file we do not want to cache the tarinfo - if not self.stream: - self.members.append(tarinfo) - else: - self._loaded = True - - return tarinfo - - #-------------------------------------------------------------------------- - # Little helper methods: - - def _getmember(self, name, tarinfo=None, normalize=False): - """Find an archive member by name from bottom to top. - If tarinfo is given, it is used as the starting point. - """ - # Ensure that all members have been loaded. - members = self.getmembers() - - # Limit the member search list up to tarinfo. - skipping = False - if tarinfo is not None: - try: - index = members.index(tarinfo) - except ValueError: - # The given starting point might be a (modified) copy. - # We'll later skip members until we find an equivalent. - skipping = True - else: - # Happy fast path - members = members[:index] - - if normalize: - name = os.path.normpath(name) - - for member in reversed(members): - if skipping: - if tarinfo.offset == member.offset: - skipping = False - continue - if normalize: - member_name = os.path.normpath(member.name) - else: - member_name = member.name - - if name == member_name: - return member - - if skipping: - # Starting point was not found - raise ValueError(tarinfo) - - def _load(self): - """Read through the entire archive file and look for readable - members. This should not run if the file is set to stream. - """ - if not self.stream: - while self.next() is not None: - pass - self._loaded = True - - def _check(self, mode=None): - """Check if TarFile is still open, and if the operation's mode - corresponds to TarFile's mode. - """ - if self.closed: - raise OSError("%s is closed" % self.__class__.__name__) - if mode is not None and self.mode not in mode: - raise OSError("bad operation for mode %r" % self.mode) - - def _find_link_target(self, tarinfo): - """Find the target member of a symlink or hardlink member in the - archive. - """ - if tarinfo.issym(): - # Always search the entire archive. - linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname))) - limit = None - else: - # Search the archive before the link, because a hard link is - # just a reference to an already archived file. - linkname = tarinfo.linkname - limit = tarinfo - - member = self._getmember(linkname, tarinfo=limit, normalize=True) - if member is None: - raise KeyError("linkname %r not found" % linkname) - return member - - def __iter__(self): - """Provide an iterator object. - """ - if self._loaded: - yield from self.members - return - - # Yield items using TarFile's next() method. - # When all members have been read, set TarFile as _loaded. - index = 0 - # Fix for SF #1100429: Under rare circumstances it can - # happen that getmembers() is called during iteration, - # which will have already exhausted the next() method. - if self.firstmember is not None: - tarinfo = self.next() - index += 1 - yield tarinfo - - while True: - if index < len(self.members): - tarinfo = self.members[index] - elif not self._loaded: - tarinfo = self.next() - if not tarinfo: - self._loaded = True - return - else: - return - index += 1 - yield tarinfo - - def _dbg(self, level, msg): - """Write debugging output to sys.stderr. - """ - if level <= self.debug: - print(msg, file=sys.stderr) - - def __enter__(self): - self._check() - return self - - def __exit__(self, type, value, traceback): - if type is None: - self.close() - else: - # An exception occurred. We must not call close() because - # it would try to write end-of-archive blocks and padding. - if not self._extfileobj: - self.fileobj.close() - self.closed = True - -#-------------------- -# exported functions -#-------------------- - -def is_tarfile(name): - """Return True if name points to a tar archive that we - are able to handle, else return False. - - 'name' should be a string, file, or file-like object. - """ - try: - if hasattr(name, "read"): - pos = name.tell() - t = open(fileobj=name) - name.seek(pos) - else: - t = open(name) - t.close() - return True - except TarError: - return False - -open = TarFile.open - - -def main(): - import argparse - - description = 'A simple command-line interface for tarfile module.' - parser = argparse.ArgumentParser(description=description) - parser.add_argument('-v', '--verbose', action='store_true', default=False, - help='Verbose output') - parser.add_argument('--filter', metavar='', - choices=_NAMED_FILTERS, - help='Filter for extraction') - - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('-l', '--list', metavar='', - help='Show listing of a tarfile') - group.add_argument('-e', '--extract', nargs='+', - metavar=('', ''), - help='Extract tarfile into target dir') - group.add_argument('-c', '--create', nargs='+', - metavar=('', ''), - help='Create tarfile from sources') - group.add_argument('-t', '--test', metavar='', - help='Test if a tarfile is valid') - - args = parser.parse_args() - - if args.filter and args.extract is None: - parser.exit(1, '--filter is only valid for extraction\n') - - if args.test is not None: - src = args.test - if is_tarfile(src): - with open(src, 'r') as tar: - tar.getmembers() - print(tar.getmembers(), file=sys.stderr) - if args.verbose: - print('{!r} is a tar archive.'.format(src)) - else: - parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) - - elif args.list is not None: - src = args.list - if is_tarfile(src): - with TarFile.open(src, 'r:*') as tf: - tf.list(verbose=args.verbose) - else: - parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) - - elif args.extract is not None: - if len(args.extract) == 1: - src = args.extract[0] - curdir = os.curdir - elif len(args.extract) == 2: - src, curdir = args.extract - else: - parser.exit(1, parser.format_help()) - - if is_tarfile(src): - with TarFile.open(src, 'r:*') as tf: - tf.extractall(path=curdir, filter=args.filter) - if args.verbose: - if curdir == '.': - msg = '{!r} file is extracted.'.format(src) - else: - msg = ('{!r} file is extracted ' - 'into {!r} directory.').format(src, curdir) - print(msg) - else: - parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) - - elif args.create is not None: - tar_name = args.create.pop(0) - _, ext = os.path.splitext(tar_name) - compressions = { - # gz - '.gz': 'gz', - '.tgz': 'gz', - # xz - '.xz': 'xz', - '.txz': 'xz', - # bz2 - '.bz2': 'bz2', - '.tbz': 'bz2', - '.tbz2': 'bz2', - '.tb2': 'bz2', - } - tar_mode = 'w:' + compressions[ext] if ext in compressions else 'w' - tar_files = args.create - - with TarFile.open(tar_name, tar_mode) as tf: - for file_name in tar_files: - tf.add(file_name) - - if args.verbose: - print('{!r} file created.'.format(tar_name)) - -if __name__ == '__main__': - main() diff --git a/Python313_13_x64_Template/Lib/tempfile.py b/Python313_13_x64_Template/Lib/tempfile.py deleted file mode 100644 index 609ef487..00000000 --- a/Python313_13_x64_Template/Lib/tempfile.py +++ /dev/null @@ -1,957 +0,0 @@ -"""Temporary files. - -This module provides generic, low- and high-level interfaces for -creating temporary files and directories. All of the interfaces -provided by this module can be used without fear of race conditions -except for 'mktemp'. 'mktemp' is subject to race conditions and -should not be used; it is provided for backward compatibility only. - -The default path names are returned as str. If you supply bytes as -input, all return values will be in bytes. Ex: - - >>> tempfile.mkstemp() - (4, '/tmp/tmptpu9nin8') - >>> tempfile.mkdtemp(suffix=b'') - b'/tmp/tmppbi8f0hy' - -This module also provides some data items to the user: - - TMP_MAX - maximum number of names that will be tried before - giving up. - tempdir - If this is set to a string before the first use of - any routine from this module, it will be considered as - another candidate location to store temporary files. -""" - -__all__ = [ - "NamedTemporaryFile", "TemporaryFile", # high level safe interfaces - "SpooledTemporaryFile", "TemporaryDirectory", - "mkstemp", "mkdtemp", # low level safe interfaces - "mktemp", # deprecated unsafe interface - "TMP_MAX", "gettempprefix", # constants - "tempdir", "gettempdir", - "gettempprefixb", "gettempdirb", - ] - - -# Imports. - -import functools as _functools -import warnings as _warnings -import io as _io -import os as _os -import shutil as _shutil -import errno as _errno -from random import Random as _Random -import sys as _sys -import types as _types -import weakref as _weakref -import _thread -_allocate_lock = _thread.allocate_lock - -_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL -if hasattr(_os, 'O_NOFOLLOW'): - _text_openflags |= _os.O_NOFOLLOW - -_bin_openflags = _text_openflags -if hasattr(_os, 'O_BINARY'): - _bin_openflags |= _os.O_BINARY - -# This is more than enough. -# Each name contains over 40 random bits. Even with a million temporary -# files, the chance of a conflict is less than 1 in a million, and with -# 20 attempts, it is less than 1e-120. -TMP_MAX = 20 - -# This variable _was_ unused for legacy reasons, see issue 10354. -# But as of 3.5 we actually use it at runtime so changing it would -# have a possibly desirable side effect... But we do not want to support -# that as an API. It is undocumented on purpose. Do not depend on this. -template = "tmp" - -# Internal routines. - -_once_lock = _allocate_lock() - - -def _exists(fn): - try: - _os.lstat(fn) - except OSError: - return False - else: - return True - - -def _infer_return_type(*args): - """Look at the type of all args and divine their implied return type.""" - return_type = None - for arg in args: - if arg is None: - continue - - if isinstance(arg, _os.PathLike): - arg = _os.fspath(arg) - - if isinstance(arg, bytes): - if return_type is str: - raise TypeError("Can't mix bytes and non-bytes in " - "path components.") - return_type = bytes - else: - if return_type is bytes: - raise TypeError("Can't mix bytes and non-bytes in " - "path components.") - return_type = str - if return_type is None: - if tempdir is None or isinstance(tempdir, str): - return str # tempfile APIs return a str by default. - else: - # we could check for bytes but it'll fail later on anyway - return bytes - return return_type - - -def _sanitize_params(prefix, suffix, dir): - """Common parameter processing for most APIs in this module.""" - output_type = _infer_return_type(prefix, suffix, dir) - if suffix is None: - suffix = output_type() - if prefix is None: - if output_type is str: - prefix = template - else: - prefix = _os.fsencode(template) - if dir is None: - if output_type is str: - dir = gettempdir() - else: - dir = gettempdirb() - return prefix, suffix, dir, output_type - - -class _RandomNameSequence: - """An instance of _RandomNameSequence generates an endless - sequence of unpredictable strings which can safely be incorporated - into file names. Each string is eight characters long. Multiple - threads can safely use the same instance at the same time. - - _RandomNameSequence is an iterator.""" - - characters = "abcdefghijklmnopqrstuvwxyz0123456789_" - - @property - def rng(self): - cur_pid = _os.getpid() - if cur_pid != getattr(self, '_rng_pid', None): - self._rng = _Random() - self._rng_pid = cur_pid - return self._rng - - def __iter__(self): - return self - - def __next__(self): - return ''.join(self.rng.choices(self.characters, k=8)) - -def _candidate_tempdir_list(): - """Generate a list of candidate temporary directories which - _get_default_tempdir will try.""" - - dirlist = [] - - # First, try the environment. - for envname in 'TMPDIR', 'TEMP', 'TMP': - dirname = _os.getenv(envname) - if dirname: dirlist.append(dirname) - - # Failing that, try OS-specific locations. - if _os.name == 'nt': - dirlist.extend([ _os.path.expanduser(r'~\AppData\Local\Temp'), - _os.path.expandvars(r'%SYSTEMROOT%\Temp'), - r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ]) - else: - dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ]) - - # As a last resort, the current directory. - try: - dirlist.append(_os.getcwd()) - except (AttributeError, OSError): - dirlist.append(_os.curdir) - - return dirlist - -def _get_default_tempdir(dirlist=None): - """Calculate the default directory to use for temporary files. - This routine should be called exactly once. - - We determine whether or not a candidate temp dir is usable by - trying to create and write to a file in that directory. If this - is successful, the test file is deleted. To prevent denial of - service, the name of the test file must be randomized.""" - - namer = _RandomNameSequence() - if dirlist is None: - dirlist = _candidate_tempdir_list() - - for dir in dirlist: - if dir != _os.curdir: - dir = _os.path.abspath(dir) - for seq in range(TMP_MAX): - name = next(namer) - filename = _os.path.join(dir, name) - try: - fd = _os.open(filename, _bin_openflags, 0o600) - try: - try: - _os.write(fd, b'blat') - finally: - _os.close(fd) - finally: - _os.unlink(filename) - return dir - except FileExistsError: - pass - except PermissionError: - # See the comment in mkdtemp(). - if _os.name == 'nt' and _os.path.isdir(dir): - continue - break # no point trying more names in this directory - except OSError: - break # no point trying more names in this directory - raise FileNotFoundError(_errno.ENOENT, - "No usable temporary directory found in %s" % - dirlist) - -_name_sequence = None - -def _get_candidate_names(): - """Common setup sequence for all user-callable interfaces.""" - - global _name_sequence - if _name_sequence is None: - _once_lock.acquire() - try: - if _name_sequence is None: - _name_sequence = _RandomNameSequence() - finally: - _once_lock.release() - return _name_sequence - - -def _mkstemp_inner(dir, pre, suf, flags, output_type): - """Code common to mkstemp, TemporaryFile, and NamedTemporaryFile.""" - - dir = _os.path.abspath(dir) - names = _get_candidate_names() - if output_type is bytes: - names = map(_os.fsencode, names) - - for seq in range(TMP_MAX): - name = next(names) - file = _os.path.join(dir, pre + name + suf) - _sys.audit("tempfile.mkstemp", file) - try: - fd = _os.open(file, flags, 0o600) - except FileExistsError: - continue # try again - except PermissionError: - # See the comment in mkdtemp(). - if _os.name == 'nt' and _os.path.isdir(dir) and seq < TMP_MAX - 1: - continue - else: - raise - return fd, file - - raise FileExistsError(_errno.EEXIST, - "No usable temporary file name found") - -def _dont_follow_symlinks(func, path, *args): - # Pass follow_symlinks=False, unless not supported on this platform. - if func in _os.supports_follow_symlinks: - func(path, *args, follow_symlinks=False) - elif not _os.path.islink(path): - func(path, *args) - -def _resetperms(path): - try: - chflags = _os.chflags - except AttributeError: - pass - else: - _dont_follow_symlinks(chflags, path, 0) - _dont_follow_symlinks(_os.chmod, path, 0o700) - - -# User visible interfaces. - -def gettempprefix(): - """The default prefix for temporary directories as string.""" - return _os.fsdecode(template) - -def gettempprefixb(): - """The default prefix for temporary directories as bytes.""" - return _os.fsencode(template) - -tempdir = None - -def _gettempdir(): - """Private accessor for tempfile.tempdir.""" - global tempdir - if tempdir is None: - _once_lock.acquire() - try: - if tempdir is None: - tempdir = _get_default_tempdir() - finally: - _once_lock.release() - return tempdir - -def gettempdir(): - """Returns tempfile.tempdir as str.""" - return _os.fsdecode(_gettempdir()) - -def gettempdirb(): - """Returns tempfile.tempdir as bytes.""" - return _os.fsencode(_gettempdir()) - -def mkstemp(suffix=None, prefix=None, dir=None, text=False): - """User-callable function to create and return a unique temporary - file. The return value is a pair (fd, name) where fd is the - file descriptor returned by os.open, and name is the filename. - - If 'suffix' is not None, the file name will end with that suffix, - otherwise there will be no suffix. - - If 'prefix' is not None, the file name will begin with that prefix, - otherwise a default prefix is used. - - If 'dir' is not None, the file will be created in that directory, - otherwise a default directory is used. - - If 'text' is specified and true, the file is opened in text - mode. Else (the default) the file is opened in binary mode. - - If any of 'suffix', 'prefix' and 'dir' are not None, they must be the - same type. If they are bytes, the returned name will be bytes; str - otherwise. - - The file is readable and writable only by the creating user ID. - If the operating system uses permission bits to indicate whether a - file is executable, the file is executable by no one. The file - descriptor is not inherited by children of this process. - - Caller is responsible for deleting the file when done with it. - """ - - prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) - - if text: - flags = _text_openflags - else: - flags = _bin_openflags - - return _mkstemp_inner(dir, prefix, suffix, flags, output_type) - - -def mkdtemp(suffix=None, prefix=None, dir=None): - """User-callable function to create and return a unique temporary - directory. The return value is the pathname of the directory. - - Arguments are as for mkstemp, except that the 'text' argument is - not accepted. - - The directory is readable, writable, and searchable only by the - creating user. - - Caller is responsible for deleting the directory when done with it. - """ - - prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) - - names = _get_candidate_names() - if output_type is bytes: - names = map(_os.fsencode, names) - - for seq in range(TMP_MAX): - name = next(names) - file = _os.path.join(dir, prefix + name + suffix) - _sys.audit("tempfile.mkdtemp", file) - try: - _os.mkdir(file, 0o700) - except FileExistsError: - continue # try again - except PermissionError: - # On Posix, this exception is raised when the user has no - # write access to the parent directory. - # On Windows, it is also raised when a directory with - # the chosen name already exists, or if the parent directory - # is not a directory. - # We cannot distinguish between "directory-exists-error" and - # "access-denied-error". - if _os.name == 'nt' and _os.path.isdir(dir) and seq < TMP_MAX - 1: - continue - else: - raise - return _os.path.abspath(file) - - raise FileExistsError(_errno.EEXIST, - "No usable temporary directory name found") - -def mktemp(suffix="", prefix=template, dir=None): - """User-callable function to return a unique temporary file name. The - file is not created. - - Arguments are similar to mkstemp, except that the 'text' argument is - not accepted, and suffix=None, prefix=None and bytes file names are not - supported. - - THIS FUNCTION IS UNSAFE AND SHOULD NOT BE USED. The file name may - refer to a file that did not exist at some point, but by the time - you get around to creating it, someone else may have beaten you to - the punch. - """ - -## from warnings import warn as _warn -## _warn("mktemp is a potential security risk to your program", -## RuntimeWarning, stacklevel=2) - - if dir is None: - dir = gettempdir() - - names = _get_candidate_names() - for seq in range(TMP_MAX): - name = next(names) - file = _os.path.join(dir, prefix + name + suffix) - if not _exists(file): - return file - - raise FileExistsError(_errno.EEXIST, - "No usable temporary filename found") - - -class _TemporaryFileCloser: - """A separate object allowing proper closing of a temporary file's - underlying file object, without adding a __del__ method to the - temporary file.""" - - cleanup_called = False - close_called = False - - def __init__(self, file, name, delete=True, delete_on_close=True): - self.file = file - self.name = name - self.delete = delete - self.delete_on_close = delete_on_close - - def cleanup(self, windows=(_os.name == 'nt'), unlink=_os.unlink): - if not self.cleanup_called: - self.cleanup_called = True - try: - if not self.close_called: - self.close_called = True - self.file.close() - finally: - # Windows provides delete-on-close as a primitive, in which - # case the file was deleted by self.file.close(). - if self.delete and not (windows and self.delete_on_close): - try: - unlink(self.name) - except FileNotFoundError: - pass - - def close(self): - if not self.close_called: - self.close_called = True - try: - self.file.close() - finally: - if self.delete and self.delete_on_close: - self.cleanup() - - def __del__(self): - self.cleanup() - - -class _TemporaryFileWrapper: - """Temporary file wrapper - - This class provides a wrapper around files opened for - temporary use. In particular, it seeks to automatically - remove the file when it is no longer needed. - """ - - def __init__(self, file, name, delete=True, delete_on_close=True): - self.file = file - self.name = name - self._closer = _TemporaryFileCloser(file, name, delete, - delete_on_close) - - def __getattr__(self, name): - # Attribute lookups are delegated to the underlying file - # and cached for non-numeric results - # (i.e. methods are cached, closed and friends are not) - file = self.__dict__['file'] - a = getattr(file, name) - if hasattr(a, '__call__'): - func = a - @_functools.wraps(func) - def func_wrapper(*args, **kwargs): - return func(*args, **kwargs) - # Avoid closing the file as long as the wrapper is alive, - # see issue #18879. - func_wrapper._closer = self._closer - a = func_wrapper - if not isinstance(a, int): - setattr(self, name, a) - return a - - # The underlying __enter__ method returns the wrong object - # (self.file) so override it to return the wrapper - def __enter__(self): - self.file.__enter__() - return self - - # Need to trap __exit__ as well to ensure the file gets - # deleted when used in a with statement - def __exit__(self, exc, value, tb): - result = self.file.__exit__(exc, value, tb) - self._closer.cleanup() - return result - - def close(self): - """ - Close the temporary file, possibly deleting it. - """ - self._closer.close() - - # iter() doesn't use __getattr__ to find the __iter__ method - def __iter__(self): - # Don't return iter(self.file), but yield from it to avoid closing - # file as long as it's being used as iterator (see issue #23700). We - # can't use 'yield from' here because iter(file) returns the file - # object itself, which has a close method, and thus the file would get - # closed when the generator is finalized, due to PEP380 semantics. - for line in self.file: - yield line - -def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None, - newline=None, suffix=None, prefix=None, - dir=None, delete=True, *, errors=None, - delete_on_close=True): - """Create and return a temporary file. - Arguments: - 'prefix', 'suffix', 'dir' -- as for mkstemp. - 'mode' -- the mode argument to io.open (default "w+b"). - 'buffering' -- the buffer size argument to io.open (default -1). - 'encoding' -- the encoding argument to io.open (default None) - 'newline' -- the newline argument to io.open (default None) - 'delete' -- whether the file is automatically deleted (default True). - 'delete_on_close' -- if 'delete', whether the file is deleted on close - (default True) or otherwise either on context manager exit - (if context manager was used) or on object finalization. . - 'errors' -- the errors argument to io.open (default None) - The file is created as mkstemp() would do it. - - Returns an object with a file-like interface; the name of the file - is accessible as its 'name' attribute. The file will be automatically - deleted when it is closed unless the 'delete' argument is set to False. - - On POSIX, NamedTemporaryFiles cannot be automatically deleted if - the creating process is terminated abruptly with a SIGKILL signal. - Windows can delete the file even in this case. - """ - - prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) - - flags = _bin_openflags - - # Setting O_TEMPORARY in the flags causes the OS to delete - # the file when it is closed. This is only supported by Windows. - if _os.name == 'nt' and delete and delete_on_close: - flags |= _os.O_TEMPORARY - - if "b" not in mode: - encoding = _io.text_encoding(encoding) - - name = None - def opener(*args): - nonlocal name - fd, name = _mkstemp_inner(dir, prefix, suffix, flags, output_type) - return fd - try: - file = _io.open(dir, mode, buffering=buffering, - newline=newline, encoding=encoding, errors=errors, - opener=opener) - try: - raw = getattr(file, 'buffer', file) - raw = getattr(raw, 'raw', raw) - raw.name = name - return _TemporaryFileWrapper(file, name, delete, delete_on_close) - except: - file.close() - raise - except: - if name is not None and not ( - _os.name == 'nt' and delete and delete_on_close): - _os.unlink(name) - raise - -if _os.name != 'posix' or _sys.platform == 'cygwin': - # On non-POSIX and Cygwin systems, assume that we cannot unlink a file - # while it is open. - TemporaryFile = NamedTemporaryFile - -else: - # Is the O_TMPFILE flag available and does it work? - # The flag is set to False if os.open(dir, os.O_TMPFILE) raises an - # IsADirectoryError exception - _O_TMPFILE_WORKS = hasattr(_os, 'O_TMPFILE') - - def TemporaryFile(mode='w+b', buffering=-1, encoding=None, - newline=None, suffix=None, prefix=None, - dir=None, *, errors=None): - """Create and return a temporary file. - Arguments: - 'prefix', 'suffix', 'dir' -- as for mkstemp. - 'mode' -- the mode argument to io.open (default "w+b"). - 'buffering' -- the buffer size argument to io.open (default -1). - 'encoding' -- the encoding argument to io.open (default None) - 'newline' -- the newline argument to io.open (default None) - 'errors' -- the errors argument to io.open (default None) - The file is created as mkstemp() would do it. - - Returns an object with a file-like interface. The file has no - name, and will cease to exist when it is closed. - """ - global _O_TMPFILE_WORKS - - if "b" not in mode: - encoding = _io.text_encoding(encoding) - - prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) - - flags = _bin_openflags - if _O_TMPFILE_WORKS: - fd = None - def opener(*args): - nonlocal fd - flags2 = (flags | _os.O_TMPFILE) & ~_os.O_CREAT - fd = _os.open(dir, flags2, 0o600) - return fd - try: - file = _io.open(dir, mode, buffering=buffering, - newline=newline, encoding=encoding, - errors=errors, opener=opener) - raw = getattr(file, 'buffer', file) - raw = getattr(raw, 'raw', raw) - raw.name = fd - return file - except IsADirectoryError: - # Linux kernel older than 3.11 ignores the O_TMPFILE flag: - # O_TMPFILE is read as O_DIRECTORY. Trying to open a directory - # with O_RDWR|O_DIRECTORY fails with IsADirectoryError, a - # directory cannot be open to write. Set flag to False to not - # try again. - _O_TMPFILE_WORKS = False - except OSError: - # The filesystem of the directory does not support O_TMPFILE. - # For example, OSError(95, 'Operation not supported'). - # - # On Linux kernel older than 3.11, trying to open a regular - # file (or a symbolic link to a regular file) with O_TMPFILE - # fails with NotADirectoryError, because O_TMPFILE is read as - # O_DIRECTORY. - pass - # Fallback to _mkstemp_inner(). - - fd = None - def opener(*args): - nonlocal fd - fd, name = _mkstemp_inner(dir, prefix, suffix, flags, output_type) - try: - _os.unlink(name) - except BaseException as e: - _os.close(fd) - raise - return fd - file = _io.open(dir, mode, buffering=buffering, - newline=newline, encoding=encoding, errors=errors, - opener=opener) - raw = getattr(file, 'buffer', file) - raw = getattr(raw, 'raw', raw) - raw.name = fd - return file - -class SpooledTemporaryFile(_io.IOBase): - """Temporary file wrapper, specialized to switch from BytesIO - or StringIO to a real file when it exceeds a certain size or - when a fileno is needed. - """ - _rolled = False - - def __init__(self, max_size=0, mode='w+b', buffering=-1, - encoding=None, newline=None, - suffix=None, prefix=None, dir=None, *, errors=None): - if 'b' in mode: - self._file = _io.BytesIO() - else: - encoding = _io.text_encoding(encoding) - self._file = _io.TextIOWrapper(_io.BytesIO(), - encoding=encoding, errors=errors, - newline=newline) - self._max_size = max_size - self._rolled = False - self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering, - 'suffix': suffix, 'prefix': prefix, - 'encoding': encoding, 'newline': newline, - 'dir': dir, 'errors': errors} - - __class_getitem__ = classmethod(_types.GenericAlias) - - def _check(self, file): - if self._rolled: return - max_size = self._max_size - if max_size and file.tell() > max_size: - self.rollover() - - def rollover(self): - if self._rolled: return - file = self._file - newfile = self._file = TemporaryFile(**self._TemporaryFileArgs) - del self._TemporaryFileArgs - - pos = file.tell() - if hasattr(newfile, 'buffer'): - newfile.buffer.write(file.detach().getvalue()) - else: - newfile.write(file.getvalue()) - newfile.seek(pos, 0) - - self._rolled = True - - # The method caching trick from NamedTemporaryFile - # won't work here, because _file may change from a - # BytesIO/StringIO instance to a real file. So we list - # all the methods directly. - - # Context management protocol - def __enter__(self): - if self._file.closed: - raise ValueError("Cannot enter context with closed file") - return self - - def __exit__(self, exc, value, tb): - self._file.close() - - # file protocol - def __iter__(self): - return self._file.__iter__() - - def __del__(self): - if not self.closed: - _warnings.warn( - "Unclosed file {!r}".format(self), - ResourceWarning, - stacklevel=2, - source=self - ) - self.close() - - def close(self): - self._file.close() - - @property - def closed(self): - return self._file.closed - - @property - def encoding(self): - return self._file.encoding - - @property - def errors(self): - return self._file.errors - - def fileno(self): - self.rollover() - return self._file.fileno() - - def flush(self): - self._file.flush() - - def isatty(self): - return self._file.isatty() - - @property - def mode(self): - try: - return self._file.mode - except AttributeError: - return self._TemporaryFileArgs['mode'] - - @property - def name(self): - try: - return self._file.name - except AttributeError: - return None - - @property - def newlines(self): - return self._file.newlines - - def readable(self): - return self._file.readable() - - def read(self, *args): - return self._file.read(*args) - - def read1(self, *args): - return self._file.read1(*args) - - def readinto(self, b): - return self._file.readinto(b) - - def readinto1(self, b): - return self._file.readinto1(b) - - def readline(self, *args): - return self._file.readline(*args) - - def readlines(self, *args): - return self._file.readlines(*args) - - def seekable(self): - return self._file.seekable() - - def seek(self, *args): - return self._file.seek(*args) - - def tell(self): - return self._file.tell() - - def truncate(self, size=None): - if size is None: - return self._file.truncate() - else: - if size > self._max_size: - self.rollover() - return self._file.truncate(size) - - def writable(self): - return self._file.writable() - - def write(self, s): - file = self._file - rv = file.write(s) - self._check(file) - return rv - - def writelines(self, iterable): - if self._max_size == 0 or self._rolled: - return self._file.writelines(iterable) - - it = iter(iterable) - for line in it: - self.write(line) - if self._rolled: - return self._file.writelines(it) - - def detach(self): - return self._file.detach() - - -class TemporaryDirectory: - """Create and return a temporary directory. This has the same - behavior as mkdtemp but can be used as a context manager. For - example: - - with TemporaryDirectory() as tmpdir: - ... - - Upon exiting the context, the directory and everything contained - in it are removed (unless delete=False is passed or an exception - is raised during cleanup and ignore_cleanup_errors is not True). - - Optional Arguments: - suffix - A str suffix for the directory name. (see mkdtemp) - prefix - A str prefix for the directory name. (see mkdtemp) - dir - A directory to create this temp dir in. (see mkdtemp) - ignore_cleanup_errors - False; ignore exceptions during cleanup? - delete - True; whether the directory is automatically deleted. - """ - - def __init__(self, suffix=None, prefix=None, dir=None, - ignore_cleanup_errors=False, *, delete=True): - self.name = mkdtemp(suffix, prefix, dir) - self._ignore_cleanup_errors = ignore_cleanup_errors - self._delete = delete - self._finalizer = _weakref.finalize( - self, self._cleanup, self.name, - warn_message="Implicitly cleaning up {!r}".format(self), - ignore_errors=self._ignore_cleanup_errors, delete=self._delete) - - @classmethod - def _rmtree(cls, name, ignore_errors=False, repeated=False): - def onexc(func, path, exc): - if isinstance(exc, PermissionError): - if repeated and path == name: - if ignore_errors: - return - raise - - try: - if path != name: - _resetperms(_os.path.dirname(path)) - _resetperms(path) - - try: - _os.unlink(path) - except IsADirectoryError: - cls._rmtree(path, ignore_errors=ignore_errors) - except PermissionError: - # The PermissionError handler was originally added for - # FreeBSD in directories, but it seems that it is raised - # on Windows too. - # bpo-43153: Calling _rmtree again may - # raise NotADirectoryError and mask the PermissionError. - # So we must re-raise the current PermissionError if - # path is not a directory. - if not _os.path.isdir(path) or _os.path.isjunction(path): - if ignore_errors: - return - raise - cls._rmtree(path, ignore_errors=ignore_errors, - repeated=(path == name)) - except FileNotFoundError: - pass - elif isinstance(exc, FileNotFoundError): - pass - else: - if not ignore_errors: - raise - - _shutil.rmtree(name, onexc=onexc) - - @classmethod - def _cleanup(cls, name, warn_message, ignore_errors=False, delete=True): - if delete: - cls._rmtree(name, ignore_errors=ignore_errors) - _warnings.warn(warn_message, ResourceWarning) - - def __repr__(self): - return "<{} {!r}>".format(self.__class__.__name__, self.name) - - def __enter__(self): - return self.name - - def __exit__(self, exc, value, tb): - if self._delete: - self.cleanup() - - def cleanup(self): - if self._finalizer.detach() or _os.path.exists(self.name): - self._rmtree(self.name, ignore_errors=self._ignore_cleanup_errors) - - __class_getitem__ = classmethod(_types.GenericAlias) diff --git a/Python313_13_x64_Template/Lib/textwrap.py b/Python313_13_x64_Template/Lib/textwrap.py deleted file mode 100644 index 686c9eb8..00000000 --- a/Python313_13_x64_Template/Lib/textwrap.py +++ /dev/null @@ -1,497 +0,0 @@ -"""Text wrapping and filling. -""" - -# Copyright (C) 1999-2001 Gregory P. Ward. -# Copyright (C) 2002, 2003 Python Software Foundation. -# Written by Greg Ward - -import re - -__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent', 'shorten'] - -# Hardcode the recognized whitespace characters to the US-ASCII -# whitespace characters. The main reason for doing this is that -# some Unicode spaces (like \u00a0) are non-breaking whitespaces. -_whitespace = '\t\n\x0b\x0c\r ' - -class TextWrapper: - """ - Object for wrapping/filling text. The public interface consists of - the wrap() and fill() methods; the other methods are just there for - subclasses to override in order to tweak the default behaviour. - If you want to completely replace the main wrapping algorithm, - you'll probably have to override _wrap_chunks(). - - Several instance attributes control various aspects of wrapping: - width (default: 70) - the maximum width of wrapped lines (unless break_long_words - is false) - initial_indent (default: "") - string that will be prepended to the first line of wrapped - output. Counts towards the line's width. - subsequent_indent (default: "") - string that will be prepended to all lines save the first - of wrapped output; also counts towards each line's width. - expand_tabs (default: true) - Expand tabs in input text to spaces before further processing. - Each tab will become 0 .. 'tabsize' spaces, depending on its position - in its line. If false, each tab is treated as a single character. - tabsize (default: 8) - Expand tabs in input text to 0 .. 'tabsize' spaces, unless - 'expand_tabs' is false. - replace_whitespace (default: true) - Replace all whitespace characters in the input text by spaces - after tab expansion. Note that if expand_tabs is false and - replace_whitespace is true, every tab will be converted to a - single space! - fix_sentence_endings (default: false) - Ensure that sentence-ending punctuation is always followed - by two spaces. Off by default because the algorithm is - (unavoidably) imperfect. - break_long_words (default: true) - Break words longer than 'width'. If false, those words will not - be broken, and some lines might be longer than 'width'. - break_on_hyphens (default: true) - Allow breaking hyphenated words. If true, wrapping will occur - preferably on whitespaces and right after hyphens part of - compound words. - drop_whitespace (default: true) - Drop leading and trailing whitespace from lines. - max_lines (default: None) - Truncate wrapped lines. - placeholder (default: ' [...]') - Append to the last line of truncated text. - """ - - unicode_whitespace_trans = dict.fromkeys(map(ord, _whitespace), ord(' ')) - - # This funky little regex is just the trick for splitting - # text up into word-wrappable chunks. E.g. - # "Hello there -- you goof-ball, use the -b option!" - # splits into - # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option! - # (after stripping out empty strings). - word_punct = r'[\w!"\'&.,?]' - letter = r'[^\d\W]' - whitespace = r'[%s]' % re.escape(_whitespace) - nowhitespace = '[^' + whitespace[1:] - wordsep_re = re.compile(r''' - ( # any whitespace - %(ws)s+ - | # em-dash between words - (?<=%(wp)s) -{2,} (?=\w) - | # word, possibly hyphenated - %(nws)s+? (?: - # hyphenated word - -(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-)) - (?= %(lt)s -? %(lt)s) - | # end of word - (?=%(ws)s|\Z) - | # em-dash - (?<=%(wp)s) (?=-{2,}\w) - ) - )''' % {'wp': word_punct, 'lt': letter, - 'ws': whitespace, 'nws': nowhitespace}, - re.VERBOSE) - del word_punct, letter, nowhitespace - - # This less funky little regex just split on recognized spaces. E.g. - # "Hello there -- you goof-ball, use the -b option!" - # splits into - # Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/ - wordsep_simple_re = re.compile(r'(%s+)' % whitespace) - del whitespace - - # XXX this is not locale- or charset-aware -- string.lowercase - # is US-ASCII only (and therefore English-only) - sentence_end_re = re.compile(r'[a-z]' # lowercase letter - r'[\.\!\?]' # sentence-ending punct. - r'[\"\']?' # optional end-of-quote - r'\Z') # end of chunk - - def __init__(self, - width=70, - initial_indent="", - subsequent_indent="", - expand_tabs=True, - replace_whitespace=True, - fix_sentence_endings=False, - break_long_words=True, - drop_whitespace=True, - break_on_hyphens=True, - tabsize=8, - *, - max_lines=None, - placeholder=' [...]'): - self.width = width - self.initial_indent = initial_indent - self.subsequent_indent = subsequent_indent - self.expand_tabs = expand_tabs - self.replace_whitespace = replace_whitespace - self.fix_sentence_endings = fix_sentence_endings - self.break_long_words = break_long_words - self.drop_whitespace = drop_whitespace - self.break_on_hyphens = break_on_hyphens - self.tabsize = tabsize - self.max_lines = max_lines - self.placeholder = placeholder - - - # -- Private methods ----------------------------------------------- - # (possibly useful for subclasses to override) - - def _munge_whitespace(self, text): - """_munge_whitespace(text : string) -> string - - Munge whitespace in text: expand tabs and convert all other - whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz" - becomes " foo bar baz". - """ - if self.expand_tabs: - text = text.expandtabs(self.tabsize) - if self.replace_whitespace: - text = text.translate(self.unicode_whitespace_trans) - return text - - - def _split(self, text): - """_split(text : string) -> [string] - - Split the text to wrap into indivisible chunks. Chunks are - not quite the same as words; see _wrap_chunks() for full - details. As an example, the text - Look, goof-ball -- use the -b option! - breaks into the following chunks: - 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', - 'use', ' ', 'the', ' ', '-b', ' ', 'option!' - if break_on_hyphens is True, or in: - 'Look,', ' ', 'goof-ball', ' ', '--', ' ', - 'use', ' ', 'the', ' ', '-b', ' ', option!' - otherwise. - """ - if self.break_on_hyphens is True: - chunks = self.wordsep_re.split(text) - else: - chunks = self.wordsep_simple_re.split(text) - chunks = [c for c in chunks if c] - return chunks - - def _fix_sentence_endings(self, chunks): - """_fix_sentence_endings(chunks : [string]) - - Correct for sentence endings buried in 'chunks'. Eg. when the - original text contains "... foo.\\nBar ...", munge_whitespace() - and split() will convert that to [..., "foo.", " ", "Bar", ...] - which has one too few spaces; this method simply changes the one - space to two. - """ - i = 0 - patsearch = self.sentence_end_re.search - while i < len(chunks)-1: - if chunks[i+1] == " " and patsearch(chunks[i]): - chunks[i+1] = " " - i += 2 - else: - i += 1 - - def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): - """_handle_long_word(chunks : [string], - cur_line : [string], - cur_len : int, width : int) - - Handle a chunk of text (most likely a word, not whitespace) that - is too long to fit in any line. - """ - # Figure out when indent is larger than the specified width, and make - # sure at least one character is stripped off on every pass - if width < 1: - space_left = 1 - else: - space_left = width - cur_len - - # If we're allowed to break long words, then do so: put as much - # of the next chunk onto the current line as will fit. - if self.break_long_words and space_left > 0: - end = space_left - chunk = reversed_chunks[-1] - if self.break_on_hyphens and len(chunk) > space_left: - # break after last hyphen, but only if there are - # non-hyphens before it - hyphen = chunk.rfind('-', 0, space_left) - if hyphen > 0 and any(c != '-' for c in chunk[:hyphen]): - end = hyphen + 1 - cur_line.append(chunk[:end]) - reversed_chunks[-1] = chunk[end:] - - # Otherwise, we have to preserve the long word intact. Only add - # it to the current line if there's nothing already there -- - # that minimizes how much we violate the width constraint. - elif not cur_line: - cur_line.append(reversed_chunks.pop()) - - # If we're not allowed to break long words, and there's already - # text on the current line, do nothing. Next time through the - # main loop of _wrap_chunks(), we'll wind up here again, but - # cur_len will be zero, so the next line will be entirely - # devoted to the long word that we can't handle right now. - - def _wrap_chunks(self, chunks): - """_wrap_chunks(chunks : [string]) -> [string] - - Wrap a sequence of text chunks and return a list of lines of - length 'self.width' or less. (If 'break_long_words' is false, - some lines may be longer than this.) Chunks correspond roughly - to words and the whitespace between them: each chunk is - indivisible (modulo 'break_long_words'), but a line break can - come between any two chunks. Chunks should not have internal - whitespace; ie. a chunk is either all whitespace or a "word". - Whitespace chunks will be removed from the beginning and end of - lines, but apart from that whitespace is preserved. - """ - lines = [] - if self.width <= 0: - raise ValueError("invalid width %r (must be > 0)" % self.width) - if self.max_lines is not None: - if self.max_lines > 1: - indent = self.subsequent_indent - else: - indent = self.initial_indent - if len(indent) + len(self.placeholder.lstrip()) > self.width: - raise ValueError("placeholder too large for max width") - - # Arrange in reverse order so items can be efficiently popped - # from a stack of chucks. - chunks.reverse() - - while chunks: - - # Start the list of chunks that will make up the current line. - # cur_len is just the length of all the chunks in cur_line. - cur_line = [] - cur_len = 0 - - # Figure out which static string will prefix this line. - if lines: - indent = self.subsequent_indent - else: - indent = self.initial_indent - - # Maximum width for this line. - width = self.width - len(indent) - - # First chunk on line is whitespace -- drop it, unless this - # is the very beginning of the text (ie. no lines started yet). - if self.drop_whitespace and chunks[-1].strip() == '' and lines: - del chunks[-1] - - while chunks: - l = len(chunks[-1]) - - # Can at least squeeze this chunk onto the current line. - if cur_len + l <= width: - cur_line.append(chunks.pop()) - cur_len += l - - # Nope, this line is full. - else: - break - - # The current line is full, and the next chunk is too big to - # fit on *any* line (not just this one). - if chunks and len(chunks[-1]) > width: - self._handle_long_word(chunks, cur_line, cur_len, width) - cur_len = sum(map(len, cur_line)) - - # If the last chunk on this line is all whitespace, drop it. - if self.drop_whitespace and cur_line and cur_line[-1].strip() == '': - cur_len -= len(cur_line[-1]) - del cur_line[-1] - - if cur_line: - if (self.max_lines is None or - len(lines) + 1 < self.max_lines or - (not chunks or - self.drop_whitespace and - len(chunks) == 1 and - not chunks[0].strip()) and cur_len <= width): - # Convert current line back to a string and store it in - # list of all lines (return value). - lines.append(indent + ''.join(cur_line)) - else: - while cur_line: - if (cur_line[-1].strip() and - cur_len + len(self.placeholder) <= width): - cur_line.append(self.placeholder) - lines.append(indent + ''.join(cur_line)) - break - cur_len -= len(cur_line[-1]) - del cur_line[-1] - else: - if lines: - prev_line = lines[-1].rstrip() - if (len(prev_line) + len(self.placeholder) <= - self.width): - lines[-1] = prev_line + self.placeholder - break - lines.append(indent + self.placeholder.lstrip()) - break - - return lines - - def _split_chunks(self, text): - text = self._munge_whitespace(text) - return self._split(text) - - # -- Public interface ---------------------------------------------- - - def wrap(self, text): - """wrap(text : string) -> [string] - - Reformat the single paragraph in 'text' so it fits in lines of - no more than 'self.width' columns, and return a list of wrapped - lines. Tabs in 'text' are expanded with string.expandtabs(), - and all other whitespace characters (including newline) are - converted to space. - """ - chunks = self._split_chunks(text) - if self.fix_sentence_endings: - self._fix_sentence_endings(chunks) - return self._wrap_chunks(chunks) - - def fill(self, text): - """fill(text : string) -> string - - Reformat the single paragraph in 'text' to fit in lines of no - more than 'self.width' columns, and return a new string - containing the entire wrapped paragraph. - """ - return "\n".join(self.wrap(text)) - - -# -- Convenience interface --------------------------------------------- - -def wrap(text, width=70, **kwargs): - """Wrap a single paragraph of text, returning a list of wrapped lines. - - Reformat the single paragraph in 'text' so it fits in lines of no - more than 'width' columns, and return a list of wrapped lines. By - default, tabs in 'text' are expanded with string.expandtabs(), and - all other whitespace characters (including newline) are converted to - space. See TextWrapper class for available keyword args to customize - wrapping behaviour. - """ - w = TextWrapper(width=width, **kwargs) - return w.wrap(text) - -def fill(text, width=70, **kwargs): - """Fill a single paragraph of text, returning a new string. - - Reformat the single paragraph in 'text' to fit in lines of no more - than 'width' columns, and return a new string containing the entire - wrapped paragraph. As with wrap(), tabs are expanded and other - whitespace characters converted to space. See TextWrapper class for - available keyword args to customize wrapping behaviour. - """ - w = TextWrapper(width=width, **kwargs) - return w.fill(text) - -def shorten(text, width, **kwargs): - """Collapse and truncate the given text to fit in the given width. - - The text first has its whitespace collapsed. If it then fits in - the *width*, it is returned as is. Otherwise, as many words - as possible are joined and then the placeholder is appended:: - - >>> textwrap.shorten("Hello world!", width=12) - 'Hello world!' - >>> textwrap.shorten("Hello world!", width=11) - 'Hello [...]' - """ - w = TextWrapper(width=width, max_lines=1, **kwargs) - return w.fill(' '.join(text.strip().split())) - - -# -- Loosely related functionality ------------------------------------- - -_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE) -_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE) - -def dedent(text): - """Remove any common leading whitespace from every line in `text`. - - This can be used to make triple-quoted strings line up with the left - edge of the display, while still presenting them in the source code - in indented form. - - Note that tabs and spaces are both treated as whitespace, but they - are not equal: the lines " hello" and "\\thello" are - considered to have no common leading whitespace. - - Entirely blank lines are normalized to a newline character. - """ - # Look for the longest leading string of spaces and tabs common to - # all lines. - margin = None - text = _whitespace_only_re.sub('', text) - indents = _leading_whitespace_re.findall(text) - for indent in indents: - if margin is None: - margin = indent - - # Current line more deeply indented than previous winner: - # no change (previous winner is still on top). - elif indent.startswith(margin): - pass - - # Current line consistent with and no deeper than previous winner: - # it's the new winner. - elif margin.startswith(indent): - margin = indent - - # Find the largest common whitespace between current line and previous - # winner. - else: - for i, (x, y) in enumerate(zip(margin, indent)): - if x != y: - margin = margin[:i] - break - - # sanity check (testing/debugging only) - if 0 and margin: - for line in text.split("\n"): - assert not line or line.startswith(margin), \ - "line = %r, margin = %r" % (line, margin) - - if margin: - text = re.sub(r'(?m)^' + margin, '', text) - return text - - -def indent(text, prefix, predicate=None): - """Adds 'prefix' to the beginning of selected lines in 'text'. - - If 'predicate' is provided, 'prefix' will only be added to the lines - where 'predicate(line)' is True. If 'predicate' is not provided, - it will default to adding 'prefix' to all non-empty lines that do not - consist solely of whitespace characters. - """ - if predicate is None: - # str.splitlines(True) doesn't produce empty string. - # ''.splitlines(True) => [] - # 'foo\n'.splitlines(True) => ['foo\n'] - # So we can use just `not s.isspace()` here. - predicate = lambda s: not s.isspace() - - prefixed_lines = [] - for line in text.splitlines(True): - if predicate(line): - prefixed_lines.append(prefix) - prefixed_lines.append(line) - - return ''.join(prefixed_lines) - - -if __name__ == "__main__": - #print dedent("\tfoo\n\tbar") - #print dedent(" \thello there\n \t how are you?") - print(dedent("Hello there.\n This is indented.")) diff --git a/Python313_13_x64_Template/Lib/threading.py b/Python313_13_x64_Template/Lib/threading.py deleted file mode 100644 index 15bf786a..00000000 --- a/Python313_13_x64_Template/Lib/threading.py +++ /dev/null @@ -1,1602 +0,0 @@ -"""Thread module emulating a subset of Java's threading model.""" - -import os as _os -import sys as _sys -import _thread -import warnings - -from time import monotonic as _time -from _weakrefset import WeakSet -from itertools import count as _count -try: - from _collections import deque as _deque -except ImportError: - from collections import deque as _deque - -# Note regarding PEP 8 compliant names -# This threading model was originally inspired by Java, and inherited -# the convention of camelCase function and method names from that -# language. Those original names are not in any imminent danger of -# being deprecated (even for Py3k),so this module provides them as an -# alias for the PEP 8 compliant names -# Note that using the new PEP 8 compliant names facilitates substitution -# with the multiprocessing module, which doesn't provide the old -# Java inspired names. - -__all__ = ['get_ident', 'active_count', 'Condition', 'current_thread', - 'enumerate', 'main_thread', 'TIMEOUT_MAX', - 'Event', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', - 'Barrier', 'BrokenBarrierError', 'Timer', 'ThreadError', - 'setprofile', 'settrace', 'local', 'stack_size', - 'excepthook', 'ExceptHookArgs', 'gettrace', 'getprofile', - 'setprofile_all_threads','settrace_all_threads'] - -# Rename some stuff so "from threading import *" is safe -_start_joinable_thread = _thread.start_joinable_thread -_daemon_threads_allowed = _thread.daemon_threads_allowed -_allocate_lock = _thread.allocate_lock -_LockType = _thread.LockType -_thread_shutdown = _thread._shutdown -_make_thread_handle = _thread._make_thread_handle -_ThreadHandle = _thread._ThreadHandle -get_ident = _thread.get_ident -_get_main_thread_ident = _thread._get_main_thread_ident -_is_main_interpreter = _thread._is_main_interpreter -try: - get_native_id = _thread.get_native_id - _HAVE_THREAD_NATIVE_ID = True - __all__.append('get_native_id') -except AttributeError: - _HAVE_THREAD_NATIVE_ID = False -ThreadError = _thread.error -try: - _CRLock = _thread.RLock -except AttributeError: - _CRLock = None -TIMEOUT_MAX = _thread.TIMEOUT_MAX -del _thread - -# get thread-local implementation, either from the thread -# module, or from the python fallback - -try: - from _thread import _local as local -except ImportError: - from _threading_local import local - -# Support for profile and trace hooks - -_profile_hook = None -_trace_hook = None - -def setprofile(func): - """Set a profile function for all threads started from the threading module. - - The func will be passed to sys.setprofile() for each thread, before its - run() method is called. - """ - global _profile_hook - _profile_hook = func - -def setprofile_all_threads(func): - """Set a profile function for all threads started from the threading module - and all Python threads that are currently executing. - - The func will be passed to sys.setprofile() for each thread, before its - run() method is called. - """ - setprofile(func) - _sys._setprofileallthreads(func) - -def getprofile(): - """Get the profiler function as set by threading.setprofile().""" - return _profile_hook - -def settrace(func): - """Set a trace function for all threads started from the threading module. - - The func will be passed to sys.settrace() for each thread, before its run() - method is called. - """ - global _trace_hook - _trace_hook = func - -def settrace_all_threads(func): - """Set a trace function for all threads started from the threading module - and all Python threads that are currently executing. - - The func will be passed to sys.settrace() for each thread, before its run() - method is called. - """ - settrace(func) - _sys._settraceallthreads(func) - -def gettrace(): - """Get the trace function as set by threading.settrace().""" - return _trace_hook - -# Synchronization classes - -Lock = _LockType - -def RLock(*args, **kwargs): - """Factory function that returns a new reentrant lock. - - A reentrant lock must be released by the thread that acquired it. Once a - thread has acquired a reentrant lock, the same thread may acquire it again - without blocking; the thread must release it once for each time it has - acquired it. - - """ - if args or kwargs: - warnings.warn( - 'Passing arguments to RLock is deprecated and will be removed in 3.15', - DeprecationWarning, - stacklevel=2, - ) - if _CRLock is None: - return _PyRLock(*args, **kwargs) - return _CRLock(*args, **kwargs) - -class _RLock: - """This class implements reentrant lock objects. - - A reentrant lock must be released by the thread that acquired it. Once a - thread has acquired a reentrant lock, the same thread may acquire it - again without blocking; the thread must release it once for each time it - has acquired it. - - """ - - def __init__(self): - self._block = _allocate_lock() - self._owner = None - self._count = 0 - - def __repr__(self): - owner = self._owner - try: - owner = _active[owner].name - except KeyError: - pass - return "<%s %s.%s object owner=%r count=%d at %s>" % ( - "locked" if self._block.locked() else "unlocked", - self.__class__.__module__, - self.__class__.__qualname__, - owner, - self._count, - hex(id(self)) - ) - - def _at_fork_reinit(self): - self._block._at_fork_reinit() - self._owner = None - self._count = 0 - - def acquire(self, blocking=True, timeout=-1): - """Acquire a lock, blocking or non-blocking. - - When invoked without arguments: if this thread already owns the lock, - increment the recursion level by one, and return immediately. Otherwise, - if another thread owns the lock, block until the lock is unlocked. Once - the lock is unlocked (not owned by any thread), then grab ownership, set - the recursion level to one, and return. If more than one thread is - blocked waiting until the lock is unlocked, only one at a time will be - able to grab ownership of the lock. There is no return value in this - case. - - When invoked with the blocking argument set to true, do the same thing - as when called without arguments, and return true. - - When invoked with the blocking argument set to false, do not block. If a - call without an argument would block, return false immediately; - otherwise, do the same thing as when called without arguments, and - return true. - - When invoked with the floating-point timeout argument set to a positive - value, block for at most the number of seconds specified by timeout - and as long as the lock cannot be acquired. Return true if the lock has - been acquired, false if the timeout has elapsed. - - """ - me = get_ident() - if self._owner == me: - self._count += 1 - return 1 - rc = self._block.acquire(blocking, timeout) - if rc: - self._owner = me - self._count = 1 - return rc - - __enter__ = acquire - - def release(self): - """Release a lock, decrementing the recursion level. - - If after the decrement it is zero, reset the lock to unlocked (not owned - by any thread), and if any other threads are blocked waiting for the - lock to become unlocked, allow exactly one of them to proceed. If after - the decrement the recursion level is still nonzero, the lock remains - locked and owned by the calling thread. - - Only call this method when the calling thread owns the lock. A - RuntimeError is raised if this method is called when the lock is - unlocked. - - There is no return value. - - """ - if self._owner != get_ident(): - raise RuntimeError("cannot release un-acquired lock") - self._count = count = self._count - 1 - if not count: - self._owner = None - self._block.release() - - def __exit__(self, t, v, tb): - self.release() - - # Internal methods used by condition variables - - def _acquire_restore(self, state): - self._block.acquire() - self._count, self._owner = state - - def _release_save(self): - if self._count == 0: - raise RuntimeError("cannot release un-acquired lock") - count = self._count - self._count = 0 - owner = self._owner - self._owner = None - self._block.release() - return (count, owner) - - def _is_owned(self): - return self._owner == get_ident() - - # Internal method used for reentrancy checks - - def _recursion_count(self): - if self._owner != get_ident(): - return 0 - return self._count - -_PyRLock = _RLock - - -class Condition: - """Class that implements a condition variable. - - A condition variable allows one or more threads to wait until they are - notified by another thread. - - If the lock argument is given and not None, it must be a Lock or RLock - object, and it is used as the underlying lock. Otherwise, a new RLock object - is created and used as the underlying lock. - - """ - - def __init__(self, lock=None): - if lock is None: - lock = RLock() - self._lock = lock - # Export the lock's acquire() and release() methods - self.acquire = lock.acquire - self.release = lock.release - # If the lock defines _release_save() and/or _acquire_restore(), - # these override the default implementations (which just call - # release() and acquire() on the lock). Ditto for _is_owned(). - if hasattr(lock, '_release_save'): - self._release_save = lock._release_save - if hasattr(lock, '_acquire_restore'): - self._acquire_restore = lock._acquire_restore - if hasattr(lock, '_is_owned'): - self._is_owned = lock._is_owned - self._waiters = _deque() - - def _at_fork_reinit(self): - self._lock._at_fork_reinit() - self._waiters.clear() - - def __enter__(self): - return self._lock.__enter__() - - def __exit__(self, *args): - return self._lock.__exit__(*args) - - def __repr__(self): - return "" % (self._lock, len(self._waiters)) - - def _release_save(self): - self._lock.release() # No state to save - - def _acquire_restore(self, x): - self._lock.acquire() # Ignore saved state - - def _is_owned(self): - # Return True if lock is owned by current_thread. - # This method is called only if _lock doesn't have _is_owned(). - if self._lock.acquire(False): - self._lock.release() - return False - else: - return True - - def wait(self, timeout=None): - """Wait until notified or until a timeout occurs. - - If the calling thread has not acquired the lock when this method is - called, a RuntimeError is raised. - - This method releases the underlying lock, and then blocks until it is - awakened by a notify() or notify_all() call for the same condition - variable in another thread, or until the optional timeout occurs. Once - awakened or timed out, it re-acquires the lock and returns. - - When the timeout argument is present and not None, it should be a - floating-point number specifying a timeout for the operation in seconds - (or fractions thereof). - - When the underlying lock is an RLock, it is not released using its - release() method, since this may not actually unlock the lock when it - was acquired multiple times recursively. Instead, an internal interface - of the RLock class is used, which really unlocks it even when it has - been recursively acquired several times. Another internal interface is - then used to restore the recursion level when the lock is reacquired. - - """ - if not self._is_owned(): - raise RuntimeError("cannot wait on un-acquired lock") - waiter = _allocate_lock() - waiter.acquire() - self._waiters.append(waiter) - saved_state = self._release_save() - gotit = False - try: # restore state no matter what (e.g., KeyboardInterrupt) - if timeout is None: - waiter.acquire() - gotit = True - else: - if timeout > 0: - gotit = waiter.acquire(True, timeout) - else: - gotit = waiter.acquire(False) - return gotit - finally: - self._acquire_restore(saved_state) - if not gotit: - try: - self._waiters.remove(waiter) - except ValueError: - pass - - def wait_for(self, predicate, timeout=None): - """Wait until a condition evaluates to True. - - predicate should be a callable which result will be interpreted as a - boolean value. A timeout may be provided giving the maximum time to - wait. - - """ - endtime = None - waittime = timeout - result = predicate() - while not result: - if waittime is not None: - if endtime is None: - endtime = _time() + waittime - else: - waittime = endtime - _time() - if waittime <= 0: - break - self.wait(waittime) - result = predicate() - return result - - def notify(self, n=1): - """Wake up one or more threads waiting on this condition, if any. - - If the calling thread has not acquired the lock when this method is - called, a RuntimeError is raised. - - This method wakes up at most n of the threads waiting for the condition - variable; it is a no-op if no threads are waiting. - - """ - if not self._is_owned(): - raise RuntimeError("cannot notify on un-acquired lock") - waiters = self._waiters - while waiters and n > 0: - waiter = waiters[0] - try: - waiter.release() - except RuntimeError: - # gh-92530: The previous call of notify() released the lock, - # but was interrupted before removing it from the queue. - # It can happen if a signal handler raises an exception, - # like CTRL+C which raises KeyboardInterrupt. - pass - else: - n -= 1 - try: - waiters.remove(waiter) - except ValueError: - pass - - def notify_all(self): - """Wake up all threads waiting on this condition. - - If the calling thread has not acquired the lock when this method - is called, a RuntimeError is raised. - - """ - self.notify(len(self._waiters)) - - def notifyAll(self): - """Wake up all threads waiting on this condition. - - This method is deprecated, use notify_all() instead. - - """ - import warnings - warnings.warn('notifyAll() is deprecated, use notify_all() instead', - DeprecationWarning, stacklevel=2) - self.notify_all() - - -class Semaphore: - """This class implements semaphore objects. - - Semaphores manage a counter representing the number of release() calls minus - the number of acquire() calls, plus an initial value. The acquire() method - blocks if necessary until it can return without making the counter - negative. If not given, value defaults to 1. - - """ - - # After Tim Peters' semaphore class, but not quite the same (no maximum) - - def __init__(self, value=1): - if value < 0: - raise ValueError("semaphore initial value must be >= 0") - self._cond = Condition(Lock()) - self._value = value - - def __repr__(self): - cls = self.__class__ - return (f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}:" - f" value={self._value}>") - - def acquire(self, blocking=True, timeout=None): - """Acquire a semaphore, decrementing the internal counter by one. - - When invoked without arguments: if the internal counter is larger than - zero on entry, decrement it by one and return immediately. If it is zero - on entry, block, waiting until some other thread has called release() to - make it larger than zero. This is done with proper interlocking so that - if multiple acquire() calls are blocked, release() will wake exactly one - of them up. The implementation may pick one at random, so the order in - which blocked threads are awakened should not be relied on. There is no - return value in this case. - - When invoked with blocking set to true, do the same thing as when called - without arguments, and return true. - - When invoked with blocking set to false, do not block. If a call without - an argument would block, return false immediately; otherwise, do the - same thing as when called without arguments, and return true. - - When invoked with a timeout other than None, it will block for at - most timeout seconds. If acquire does not complete successfully in - that interval, return false. Return true otherwise. - - """ - if not blocking and timeout is not None: - raise ValueError("can't specify timeout for non-blocking acquire") - rc = False - endtime = None - with self._cond: - while self._value == 0: - if not blocking: - break - if timeout is not None: - if endtime is None: - endtime = _time() + timeout - else: - timeout = endtime - _time() - if timeout <= 0: - break - self._cond.wait(timeout) - else: - self._value -= 1 - rc = True - return rc - - __enter__ = acquire - - def release(self, n=1): - """Release a semaphore, incrementing the internal counter by one or more. - - When the counter is zero on entry and another thread is waiting for it - to become larger than zero again, wake up that thread. - - """ - if n < 1: - raise ValueError('n must be one or more') - with self._cond: - self._value += n - self._cond.notify(n) - - def __exit__(self, t, v, tb): - self.release() - - -class BoundedSemaphore(Semaphore): - """Implements a bounded semaphore. - - A bounded semaphore checks to make sure its current value doesn't exceed its - initial value. If it does, ValueError is raised. In most situations - semaphores are used to guard resources with limited capacity. - - If the semaphore is released too many times it's a sign of a bug. If not - given, value defaults to 1. - - Like regular semaphores, bounded semaphores manage a counter representing - the number of release() calls minus the number of acquire() calls, plus an - initial value. The acquire() method blocks if necessary until it can return - without making the counter negative. If not given, value defaults to 1. - - """ - - def __init__(self, value=1): - super().__init__(value) - self._initial_value = value - - def __repr__(self): - cls = self.__class__ - return (f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}:" - f" value={self._value}/{self._initial_value}>") - - def release(self, n=1): - """Release a semaphore, incrementing the internal counter by one or more. - - When the counter is zero on entry and another thread is waiting for it - to become larger than zero again, wake up that thread. - - If the number of releases exceeds the number of acquires, - raise a ValueError. - - """ - if n < 1: - raise ValueError('n must be one or more') - with self._cond: - if self._value + n > self._initial_value: - raise ValueError("Semaphore released too many times") - self._value += n - self._cond.notify(n) - - -class Event: - """Class implementing event objects. - - Events manage a flag that can be set to true with the set() method and reset - to false with the clear() method. The wait() method blocks until the flag is - true. The flag is initially false. - - """ - - # After Tim Peters' event class (without is_posted()) - - def __init__(self): - self._cond = Condition(Lock()) - self._flag = False - - def __repr__(self): - cls = self.__class__ - status = 'set' if self._flag else 'unset' - return f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}: {status}>" - - def _at_fork_reinit(self): - # Private method called by Thread._after_fork() - self._cond._at_fork_reinit() - - def is_set(self): - """Return true if and only if the internal flag is true.""" - return self._flag - - def isSet(self): - """Return true if and only if the internal flag is true. - - This method is deprecated, use is_set() instead. - - """ - import warnings - warnings.warn('isSet() is deprecated, use is_set() instead', - DeprecationWarning, stacklevel=2) - return self.is_set() - - def set(self): - """Set the internal flag to true. - - All threads waiting for it to become true are awakened. Threads - that call wait() once the flag is true will not block at all. - - """ - with self._cond: - self._flag = True - self._cond.notify_all() - - def clear(self): - """Reset the internal flag to false. - - Subsequently, threads calling wait() will block until set() is called to - set the internal flag to true again. - - """ - with self._cond: - self._flag = False - - def wait(self, timeout=None): - """Block until the internal flag is true. - - If the internal flag is true on entry, return immediately. Otherwise, - block until another thread calls set() to set the flag to true, or until - the optional timeout occurs. - - When the timeout argument is present and not None, it should be a - floating-point number specifying a timeout for the operation in seconds - (or fractions thereof). - - This method returns the internal flag on exit, so it will always return - ``True`` except if a timeout is given and the operation times out, when - it will return ``False``. - - """ - with self._cond: - signaled = self._flag - if not signaled: - signaled = self._cond.wait(timeout) - return signaled - - -# A barrier class. Inspired in part by the pthread_barrier_* api and -# the CyclicBarrier class from Java. See -# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and -# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/ -# CyclicBarrier.html -# for information. -# We maintain two main states, 'filling' and 'draining' enabling the barrier -# to be cyclic. Threads are not allowed into it until it has fully drained -# since the previous cycle. In addition, a 'resetting' state exists which is -# similar to 'draining' except that threads leave with a BrokenBarrierError, -# and a 'broken' state in which all threads get the exception. -class Barrier: - """Implements a Barrier. - - Useful for synchronizing a fixed number of threads at known synchronization - points. Threads block on 'wait()' and are simultaneously awoken once they - have all made that call. - - """ - - def __init__(self, parties, action=None, timeout=None): - """Create a barrier, initialised to 'parties' threads. - - 'action' is a callable which, when supplied, will be called by one of - the threads after they have all entered the barrier and just prior to - releasing them all. If a 'timeout' is provided, it is used as the - default for all subsequent 'wait()' calls. - - """ - if parties < 1: - raise ValueError("parties must be >= 1") - self._cond = Condition(Lock()) - self._action = action - self._timeout = timeout - self._parties = parties - self._state = 0 # 0 filling, 1 draining, -1 resetting, -2 broken - self._count = 0 - - def __repr__(self): - cls = self.__class__ - if self.broken: - return f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}: broken>" - return (f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}:" - f" waiters={self.n_waiting}/{self.parties}>") - - def wait(self, timeout=None): - """Wait for the barrier. - - When the specified number of threads have started waiting, they are all - simultaneously awoken. If an 'action' was provided for the barrier, one - of the threads will have executed that callback prior to returning. - Returns an individual index number from 0 to 'parties-1'. - - """ - if timeout is None: - timeout = self._timeout - with self._cond: - self._enter() # Block while the barrier drains. - index = self._count - self._count += 1 - try: - if index + 1 == self._parties: - # We release the barrier - self._release() - else: - # We wait until someone releases us - self._wait(timeout) - return index - finally: - self._count -= 1 - # Wake up any threads waiting for barrier to drain. - self._exit() - - # Block until the barrier is ready for us, or raise an exception - # if it is broken. - def _enter(self): - while self._state in (-1, 1): - # It is draining or resetting, wait until done - self._cond.wait() - #see if the barrier is in a broken state - if self._state < 0: - raise BrokenBarrierError - assert self._state == 0 - - # Optionally run the 'action' and release the threads waiting - # in the barrier. - def _release(self): - try: - if self._action: - self._action() - # enter draining state - self._state = 1 - self._cond.notify_all() - except: - #an exception during the _action handler. Break and reraise - self._break() - raise - - # Wait in the barrier until we are released. Raise an exception - # if the barrier is reset or broken. - def _wait(self, timeout): - if not self._cond.wait_for(lambda : self._state != 0, timeout): - #timed out. Break the barrier - self._break() - raise BrokenBarrierError - if self._state < 0: - raise BrokenBarrierError - assert self._state == 1 - - # If we are the last thread to exit the barrier, signal any threads - # waiting for the barrier to drain. - def _exit(self): - if self._count == 0: - if self._state in (-1, 1): - #resetting or draining - self._state = 0 - self._cond.notify_all() - - def reset(self): - """Reset the barrier to the initial state. - - Any threads currently waiting will get the BrokenBarrier exception - raised. - - """ - with self._cond: - if self._count > 0: - if self._state == 0: - #reset the barrier, waking up threads - self._state = -1 - elif self._state == -2: - #was broken, set it to reset state - #which clears when the last thread exits - self._state = -1 - else: - self._state = 0 - self._cond.notify_all() - - def abort(self): - """Place the barrier into a 'broken' state. - - Useful in case of error. Any currently waiting threads and threads - attempting to 'wait()' will have BrokenBarrierError raised. - - """ - with self._cond: - self._break() - - def _break(self): - # An internal error was detected. The barrier is set to - # a broken state all parties awakened. - self._state = -2 - self._cond.notify_all() - - @property - def parties(self): - """Return the number of threads required to trip the barrier.""" - return self._parties - - @property - def n_waiting(self): - """Return the number of threads currently waiting at the barrier.""" - # We don't need synchronization here since this is an ephemeral result - # anyway. It returns the correct value in the steady state. - if self._state == 0: - return self._count - return 0 - - @property - def broken(self): - """Return True if the barrier is in a broken state.""" - return self._state == -2 - -# exception raised by the Barrier class -class BrokenBarrierError(RuntimeError): - pass - - -# Helper to generate new thread names -_counter = _count(1).__next__ -def _newname(name_template): - return name_template % _counter() - -# Active thread administration. -# -# bpo-44422: Use a reentrant lock to allow reentrant calls to functions like -# threading.enumerate(). -_active_limbo_lock = RLock() -_active = {} # maps thread id to Thread object -_limbo = {} -_dangling = WeakSet() - - -# Main class for threads - -class Thread: - """A class that represents a thread of control. - - This class can be safely subclassed in a limited fashion. There are two ways - to specify the activity: by passing a callable object to the constructor, or - by overriding the run() method in a subclass. - - """ - - _initialized = False - - def __init__(self, group=None, target=None, name=None, - args=(), kwargs=None, *, daemon=None): - """This constructor should always be called with keyword arguments. Arguments are: - - *group* should be None; reserved for future extension when a ThreadGroup - class is implemented. - - *target* is the callable object to be invoked by the run() - method. Defaults to None, meaning nothing is called. - - *name* is the thread name. By default, a unique name is constructed of - the form "Thread-N" where N is a small decimal number. - - *args* is a list or tuple of arguments for the target invocation. Defaults to (). - - *kwargs* is a dictionary of keyword arguments for the target - invocation. Defaults to {}. - - If a subclass overrides the constructor, it must make sure to invoke - the base class constructor (Thread.__init__()) before doing anything - else to the thread. - - """ - assert group is None, "group argument must be None for now" - if kwargs is None: - kwargs = {} - if name: - name = str(name) - else: - name = _newname("Thread-%d") - if target is not None: - try: - target_name = target.__name__ - name += f" ({target_name})" - except AttributeError: - pass - - self._target = target - self._name = name - self._args = args - self._kwargs = kwargs - if daemon is not None: - if daemon and not _daemon_threads_allowed(): - raise RuntimeError('daemon threads are disabled in this (sub)interpreter') - self._daemonic = daemon - else: - self._daemonic = current_thread().daemon - self._ident = None - if _HAVE_THREAD_NATIVE_ID: - self._native_id = None - self._handle = _ThreadHandle() - self._started = Event() - self._initialized = True - # Copy of sys.stderr used by self._invoke_excepthook() - self._stderr = _sys.stderr - self._invoke_excepthook = _make_invoke_excepthook() - # For debugging and _after_fork() - _dangling.add(self) - - def _after_fork(self, new_ident=None): - # Private! Called by threading._after_fork(). - self._started._at_fork_reinit() - if new_ident is not None: - # This thread is alive. - self._ident = new_ident - assert self._handle.ident == new_ident - if _HAVE_THREAD_NATIVE_ID: - self._set_native_id() - else: - # Otherwise, the thread is dead, Jim. _PyThread_AfterFork() - # already marked our handle done. - pass - - def __repr__(self): - assert self._initialized, "Thread.__init__() was not called" - status = "initial" - if self._started.is_set(): - status = "started" - if self._handle.is_done(): - status = "stopped" - if self._daemonic: - status += " daemon" - if self._ident is not None: - status += " %s" % self._ident - return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status) - - def start(self): - """Start the thread's activity. - - It must be called at most once per thread object. It arranges for the - object's run() method to be invoked in a separate thread of control. - - This method will raise a RuntimeError if called more than once on the - same thread object. - - """ - if not self._initialized: - raise RuntimeError("thread.__init__() not called") - - if self._started.is_set(): - raise RuntimeError("threads can only be started once") - - with _active_limbo_lock: - _limbo[self] = self - try: - # Start joinable thread - _start_joinable_thread(self._bootstrap, handle=self._handle, - daemon=self.daemon) - except Exception: - with _active_limbo_lock: - del _limbo[self] - raise - self._started.wait() # Will set ident and native_id - - def run(self): - """Method representing the thread's activity. - - You may override this method in a subclass. The standard run() method - invokes the callable object passed to the object's constructor as the - target argument, if any, with sequential and keyword arguments taken - from the args and kwargs arguments, respectively. - - """ - try: - if self._target is not None: - self._target(*self._args, **self._kwargs) - finally: - # Avoid a refcycle if the thread is running a function with - # an argument that has a member that points to the thread. - del self._target, self._args, self._kwargs - - def _bootstrap(self): - # Wrapper around the real bootstrap code that ignores - # exceptions during interpreter cleanup. Those typically - # happen when a daemon thread wakes up at an unfortunate - # moment, finds the world around it destroyed, and raises some - # random exception *** while trying to report the exception in - # _bootstrap_inner() below ***. Those random exceptions - # don't help anybody, and they confuse users, so we suppress - # them. We suppress them only when it appears that the world - # indeed has already been destroyed, so that exceptions in - # _bootstrap_inner() during normal business hours are properly - # reported. Also, we only suppress them for daemonic threads; - # if a non-daemonic encounters this, something else is wrong. - try: - self._bootstrap_inner() - except: - if self._daemonic and _sys is None: - return - raise - - def _set_ident(self): - self._ident = get_ident() - - if _HAVE_THREAD_NATIVE_ID: - def _set_native_id(self): - self._native_id = get_native_id() - - def _bootstrap_inner(self): - try: - self._set_ident() - if _HAVE_THREAD_NATIVE_ID: - self._set_native_id() - self._started.set() - with _active_limbo_lock: - _active[self._ident] = self - del _limbo[self] - - if _trace_hook: - _sys.settrace(_trace_hook) - if _profile_hook: - _sys.setprofile(_profile_hook) - - try: - self.run() - except: - self._invoke_excepthook(self) - finally: - self._delete() - - def _delete(self): - "Remove current thread from the dict of currently running threads." - with _active_limbo_lock: - del _active[get_ident()] - # There must not be any python code between the previous line - # and after the lock is released. Otherwise a tracing function - # could try to acquire the lock again in the same thread, (in - # current_thread()), and would block. - - def join(self, timeout=None): - """Wait until the thread terminates. - - This blocks the calling thread until the thread whose join() method is - called terminates -- either normally or through an unhandled exception - or until the optional timeout occurs. - - When the timeout argument is present and not None, it should be a - floating-point number specifying a timeout for the operation in seconds - (or fractions thereof). As join() always returns None, you must call - is_alive() after join() to decide whether a timeout happened -- if the - thread is still alive, the join() call timed out. - - When the timeout argument is not present or None, the operation will - block until the thread terminates. - - A thread can be join()ed many times. - - join() raises a RuntimeError if an attempt is made to join the current - thread as that would cause a deadlock. It is also an error to join() a - thread before it has been started and attempts to do so raises the same - exception. - - """ - if not self._initialized: - raise RuntimeError("Thread.__init__() not called") - if not self._started.is_set(): - raise RuntimeError("cannot join thread before it is started") - if self is current_thread(): - raise RuntimeError("cannot join current thread") - - # the behavior of a negative timeout isn't documented, but - # historically .join(timeout=x) for x<0 has acted as if timeout=0 - if timeout is not None: - timeout = max(timeout, 0) - - self._handle.join(timeout) - - @property - def name(self): - """A string used for identification purposes only. - - It has no semantics. Multiple threads may be given the same name. The - initial name is set by the constructor. - - """ - assert self._initialized, "Thread.__init__() not called" - return self._name - - @name.setter - def name(self, name): - assert self._initialized, "Thread.__init__() not called" - self._name = str(name) - - @property - def ident(self): - """Thread identifier of this thread or None if it has not been started. - - This is a nonzero integer. See the get_ident() function. Thread - identifiers may be recycled when a thread exits and another thread is - created. The identifier is available even after the thread has exited. - - """ - assert self._initialized, "Thread.__init__() not called" - return self._ident - - if _HAVE_THREAD_NATIVE_ID: - @property - def native_id(self): - """Native integral thread ID of this thread, or None if it has not been started. - - This is a non-negative integer. See the get_native_id() function. - This represents the Thread ID as reported by the kernel. - - """ - assert self._initialized, "Thread.__init__() not called" - return self._native_id - - def is_alive(self): - """Return whether the thread is alive. - - This method returns True just before the run() method starts until just - after the run() method terminates. See also the module function - enumerate(). - - """ - assert self._initialized, "Thread.__init__() not called" - return self._started.is_set() and not self._handle.is_done() - - @property - def daemon(self): - """A boolean value indicating whether this thread is a daemon thread. - - This must be set before start() is called, otherwise RuntimeError is - raised. Its initial value is inherited from the creating thread; the - main thread is not a daemon thread and therefore all threads created in - the main thread default to daemon = False. - - The entire Python program exits when only daemon threads are left. - - """ - assert self._initialized, "Thread.__init__() not called" - return self._daemonic - - @daemon.setter - def daemon(self, daemonic): - if not self._initialized: - raise RuntimeError("Thread.__init__() not called") - if daemonic and not _daemon_threads_allowed(): - raise RuntimeError('daemon threads are disabled in this interpreter') - if self._started.is_set(): - raise RuntimeError("cannot set daemon status of active thread") - self._daemonic = daemonic - - def isDaemon(self): - """Return whether this thread is a daemon. - - This method is deprecated, use the daemon attribute instead. - - """ - import warnings - warnings.warn('isDaemon() is deprecated, get the daemon attribute instead', - DeprecationWarning, stacklevel=2) - return self.daemon - - def setDaemon(self, daemonic): - """Set whether this thread is a daemon. - - This method is deprecated, use the .daemon property instead. - - """ - import warnings - warnings.warn('setDaemon() is deprecated, set the daemon attribute instead', - DeprecationWarning, stacklevel=2) - self.daemon = daemonic - - def getName(self): - """Return a string used for identification purposes only. - - This method is deprecated, use the name attribute instead. - - """ - import warnings - warnings.warn('getName() is deprecated, get the name attribute instead', - DeprecationWarning, stacklevel=2) - return self.name - - def setName(self, name): - """Set the name string for this thread. - - This method is deprecated, use the name attribute instead. - - """ - import warnings - warnings.warn('setName() is deprecated, set the name attribute instead', - DeprecationWarning, stacklevel=2) - self.name = name - - -try: - from _thread import (_excepthook as excepthook, - _ExceptHookArgs as ExceptHookArgs) -except ImportError: - # Simple Python implementation if _thread._excepthook() is not available - from traceback import print_exception as _print_exception - from collections import namedtuple - - _ExceptHookArgs = namedtuple( - 'ExceptHookArgs', - 'exc_type exc_value exc_traceback thread') - - def ExceptHookArgs(args): - return _ExceptHookArgs(*args) - - def excepthook(args, /): - """ - Handle uncaught Thread.run() exception. - """ - if args.exc_type == SystemExit: - # silently ignore SystemExit - return - - if _sys is not None and _sys.stderr is not None: - stderr = _sys.stderr - elif args.thread is not None: - stderr = args.thread._stderr - if stderr is None: - # do nothing if sys.stderr is None and sys.stderr was None - # when the thread was created - return - else: - # do nothing if sys.stderr is None and args.thread is None - return - - if args.thread is not None: - name = args.thread.name - else: - name = get_ident() - print(f"Exception in thread {name}:", - file=stderr, flush=True) - _print_exception(args.exc_type, args.exc_value, args.exc_traceback, - file=stderr) - stderr.flush() - - -# Original value of threading.excepthook -__excepthook__ = excepthook - - -def _make_invoke_excepthook(): - # Create a local namespace to ensure that variables remain alive - # when _invoke_excepthook() is called, even if it is called late during - # Python shutdown. It is mostly needed for daemon threads. - - old_excepthook = excepthook - old_sys_excepthook = _sys.excepthook - if old_excepthook is None: - raise RuntimeError("threading.excepthook is None") - if old_sys_excepthook is None: - raise RuntimeError("sys.excepthook is None") - - sys_exc_info = _sys.exc_info - local_print = print - local_sys = _sys - - def invoke_excepthook(thread): - global excepthook - try: - hook = excepthook - if hook is None: - hook = old_excepthook - - args = ExceptHookArgs([*sys_exc_info(), thread]) - - hook(args) - except Exception as exc: - exc.__suppress_context__ = True - del exc - - if local_sys is not None and local_sys.stderr is not None: - stderr = local_sys.stderr - else: - stderr = thread._stderr - - local_print("Exception in threading.excepthook:", - file=stderr, flush=True) - - if local_sys is not None and local_sys.excepthook is not None: - sys_excepthook = local_sys.excepthook - else: - sys_excepthook = old_sys_excepthook - - sys_excepthook(*sys_exc_info()) - finally: - # Break reference cycle (exception stored in a variable) - args = None - - return invoke_excepthook - - -# The timer class was contributed by Itamar Shtull-Trauring - -class Timer(Thread): - """Call a function after a specified number of seconds: - - t = Timer(30.0, f, args=None, kwargs=None) - t.start() - t.cancel() # stop the timer's action if it's still waiting - - """ - - def __init__(self, interval, function, args=None, kwargs=None): - Thread.__init__(self) - self.interval = interval - self.function = function - self.args = args if args is not None else [] - self.kwargs = kwargs if kwargs is not None else {} - self.finished = Event() - - def cancel(self): - """Stop the timer if it hasn't finished yet.""" - self.finished.set() - - def run(self): - self.finished.wait(self.interval) - if not self.finished.is_set(): - self.function(*self.args, **self.kwargs) - self.finished.set() - - -# Special thread class to represent the main thread - -class _MainThread(Thread): - - def __init__(self): - Thread.__init__(self, name="MainThread", daemon=False) - self._started.set() - self._ident = _get_main_thread_ident() - self._handle = _make_thread_handle(self._ident) - if _HAVE_THREAD_NATIVE_ID: - self._set_native_id() - with _active_limbo_lock: - _active[self._ident] = self - - -# Helper thread-local instance to detect when a _DummyThread -# is collected. Not a part of the public API. -_thread_local_info = local() - - -class _DeleteDummyThreadOnDel: - ''' - Helper class to remove a dummy thread from threading._active on __del__. - ''' - - def __init__(self, dummy_thread): - self._dummy_thread = dummy_thread - self._tident = dummy_thread.ident - # Put the thread on a thread local variable so that when - # the related thread finishes this instance is collected. - # - # Note: no other references to this instance may be created. - # If any client code creates a reference to this instance, - # the related _DummyThread will be kept forever! - _thread_local_info._track_dummy_thread_ref = self - - def __del__(self, _active_limbo_lock=_active_limbo_lock, _active=_active): - with _active_limbo_lock: - if _active.get(self._tident) is self._dummy_thread: - _active.pop(self._tident, None) - - -# Dummy thread class to represent threads not started here. -# These should be added to `_active` and removed automatically -# when they die, although they can't be waited for. -# Their purpose is to return *something* from current_thread(). -# They are marked as daemon threads so we won't wait for them -# when we exit (conform previous semantics). - -class _DummyThread(Thread): - - def __init__(self): - Thread.__init__(self, name=_newname("Dummy-%d"), - daemon=_daemon_threads_allowed()) - self._started.set() - self._set_ident() - self._handle = _make_thread_handle(self._ident) - if _HAVE_THREAD_NATIVE_ID: - self._set_native_id() - with _active_limbo_lock: - _active[self._ident] = self - _DeleteDummyThreadOnDel(self) - - def is_alive(self): - if not self._handle.is_done() and self._started.is_set(): - return True - raise RuntimeError("thread is not alive") - - def join(self, timeout=None): - raise RuntimeError("cannot join a dummy thread") - - def _after_fork(self, new_ident=None): - if new_ident is not None: - self.__class__ = _MainThread - self._name = 'MainThread' - self._daemonic = False - Thread._after_fork(self, new_ident=new_ident) - - -# Global API functions - -def current_thread(): - """Return the current Thread object, corresponding to the caller's thread of control. - - If the caller's thread of control was not created through the threading - module, a dummy thread object with limited functionality is returned. - - """ - try: - return _active[get_ident()] - except KeyError: - return _DummyThread() - -def currentThread(): - """Return the current Thread object, corresponding to the caller's thread of control. - - This function is deprecated, use current_thread() instead. - - """ - import warnings - warnings.warn('currentThread() is deprecated, use current_thread() instead', - DeprecationWarning, stacklevel=2) - return current_thread() - -def active_count(): - """Return the number of Thread objects currently alive. - - The returned count is equal to the length of the list returned by - enumerate(). - - """ - # NOTE: if the logic in here ever changes, update Modules/posixmodule.c - # warn_about_fork_with_threads() to match. - with _active_limbo_lock: - return len(_active) + len(_limbo) - -def activeCount(): - """Return the number of Thread objects currently alive. - - This function is deprecated, use active_count() instead. - - """ - import warnings - warnings.warn('activeCount() is deprecated, use active_count() instead', - DeprecationWarning, stacklevel=2) - return active_count() - -def _enumerate(): - # Same as enumerate(), but without the lock. Internal use only. - return list(_active.values()) + list(_limbo.values()) - -def enumerate(): - """Return a list of all Thread objects currently alive. - - The list includes daemonic threads, dummy thread objects created by - current_thread(), and the main thread. It excludes terminated threads and - threads that have not yet been started. - - """ - with _active_limbo_lock: - return list(_active.values()) + list(_limbo.values()) - - -_threading_atexits = [] -_SHUTTING_DOWN = False - -def _register_atexit(func, *arg, **kwargs): - """CPython internal: register *func* to be called before joining threads. - - The registered *func* is called with its arguments just before all - non-daemon threads are joined in `_shutdown()`. It provides a similar - purpose to `atexit.register()`, but its functions are called prior to - threading shutdown instead of interpreter shutdown. - - For similarity to atexit, the registered functions are called in reverse. - """ - if _SHUTTING_DOWN: - raise RuntimeError("can't register atexit after shutdown") - - _threading_atexits.append(lambda: func(*arg, **kwargs)) - - -from _thread import stack_size - -# Create the main thread object, -# and make it available for the interpreter -# (Py_Main) as threading._shutdown. - -_main_thread = _MainThread() - -def _shutdown(): - """ - Wait until the Python thread state of all non-daemon threads get deleted. - """ - # Obscure: other threads may be waiting to join _main_thread. That's - # dubious, but some code does it. We can't wait for it to be marked as done - # normally - that won't happen until the interpreter is nearly dead. So - # mark it done here. - if _main_thread._handle.is_done() and _is_main_interpreter(): - # _shutdown() was already called - return - - global _SHUTTING_DOWN - _SHUTTING_DOWN = True - - # Call registered threading atexit functions before threads are joined. - # Order is reversed, similar to atexit. - for atexit_call in reversed(_threading_atexits): - atexit_call() - - if _is_main_interpreter(): - _main_thread._handle._set_done() - - # Wait for all non-daemon threads to exit. - _thread_shutdown() - - -def main_thread(): - """Return the main thread object. - - In normal conditions, the main thread is the thread from which the - Python interpreter was started. - """ - # XXX Figure this out for subinterpreters. (See gh-75698.) - return _main_thread - - -def _after_fork(): - """ - Cleanup threading module state that should not exist after a fork. - """ - # Reset _active_limbo_lock, in case we forked while the lock was held - # by another (non-forked) thread. http://bugs.python.org/issue874900 - global _active_limbo_lock, _main_thread - _active_limbo_lock = RLock() - - # fork() only copied the current thread; clear references to others. - new_active = {} - - try: - current = _active[get_ident()] - except KeyError: - # fork() was called in a thread which was not spawned - # by threading.Thread. For example, a thread spawned - # by thread.start_new_thread(). - current = _MainThread() - - _main_thread = current - - with _active_limbo_lock: - # Dangling thread instances must still have their locks reset, - # because someone may join() them. - threads = set(_enumerate()) - threads.update(_dangling) - for thread in threads: - # Any lock/condition variable may be currently locked or in an - # invalid state, so we reinitialize them. - if thread is current: - # This is the one and only active thread. - ident = get_ident() - thread._after_fork(new_ident=ident) - new_active[ident] = thread - else: - # All the others are already stopped. - thread._after_fork() - - _limbo.clear() - _active.clear() - _active.update(new_active) - assert len(_active) == 1 - - -if hasattr(_os, "register_at_fork"): - _os.register_at_fork(after_in_child=_after_fork) diff --git a/Python313_13_x64_Template/Lib/timeit.py b/Python313_13_x64_Template/Lib/timeit.py deleted file mode 100644 index 02cfafaf..00000000 --- a/Python313_13_x64_Template/Lib/timeit.py +++ /dev/null @@ -1,381 +0,0 @@ -#! /usr/bin/env python3 - -"""Tool for measuring execution time of small code snippets. - -This module avoids a number of common traps for measuring execution -times. See also Tim Peters' introduction to the Algorithms chapter in -the Python Cookbook, published by O'Reilly. - -Library usage: see the Timer class. - -Command line usage: - python timeit.py [-n N] [-r N] [-s S] [-p] [-h] [--] [statement] - -Options: - -n/--number N: how many times to execute 'statement' (default: see below) - -r/--repeat N: how many times to repeat the timer (default 5) - -s/--setup S: statement to be executed once initially (default 'pass'). - Execution time of this setup statement is NOT timed. - -p/--process: use time.process_time() (default is time.perf_counter()) - -v/--verbose: print raw timing results; repeat for more digits precision - -u/--unit: set the output time unit (nsec, usec, msec, or sec) - -h/--help: print this usage message and exit - --: separate options from statement, use when statement starts with - - statement: statement to be timed (default 'pass') - -A multi-line statement may be given by specifying each line as a -separate argument; indented lines are possible by enclosing an -argument in quotes and using leading spaces. Multiple -s options are -treated similarly. - -If -n is not given, a suitable number of loops is calculated by trying -increasing numbers from the sequence 1, 2, 5, 10, 20, 50, ... until the -total time is at least 0.2 seconds. - -Note: there is a certain baseline overhead associated with executing a -pass statement. It differs between versions. The code here doesn't try -to hide it, but you should be aware of it. The baseline overhead can be -measured by invoking the program without arguments. - -Classes: - - Timer - -Functions: - - timeit(string, string) -> float - repeat(string, string) -> list - default_timer() -> float - -""" - -import gc -import itertools -import sys -import time - -__all__ = ["Timer", "timeit", "repeat", "default_timer"] - -dummy_src_name = "" -default_number = 1000000 -default_repeat = 5 -default_timer = time.perf_counter - -_globals = globals - -# Don't change the indentation of the template; the reindent() calls -# in Timer.__init__() depend on setup being indented 4 spaces and stmt -# being indented 8 spaces. -template = """ -def inner(_it, _timer{init}): - {setup} - _t0 = _timer() - for _i in _it: - {stmt} - pass - _t1 = _timer() - return _t1 - _t0 -""" - - -def reindent(src, indent): - """Helper to reindent a multi-line statement.""" - return src.replace("\n", "\n" + " " * indent) - - -class Timer: - """Class for timing execution speed of small code snippets. - - The constructor takes a statement to be timed, an additional - statement used for setup, and a timer function. Both statements - default to 'pass'; the timer function is platform-dependent (see - module doc string). If 'globals' is specified, the code will be - executed within that namespace (as opposed to inside timeit's - namespace). - - To measure the execution time of the first statement, use the - timeit() method. The repeat() method is a convenience to call - timeit() multiple times and return a list of results. - - The statements may contain newlines, as long as they don't contain - multi-line string literals. - """ - - def __init__(self, stmt="pass", setup="pass", timer=default_timer, - globals=None): - """Constructor. See class doc string.""" - self.timer = timer - local_ns = {} - global_ns = _globals() if globals is None else globals - init = '' - if isinstance(setup, str): - # Check that the code can be compiled outside a function - compile(setup, dummy_src_name, "exec") - stmtprefix = setup + '\n' - setup = reindent(setup, 4) - elif callable(setup): - local_ns['_setup'] = setup - init += ', _setup=_setup' - stmtprefix = '' - setup = '_setup()' - else: - raise ValueError("setup is neither a string nor callable") - if isinstance(stmt, str): - # Check that the code can be compiled outside a function - compile(stmtprefix + stmt, dummy_src_name, "exec") - stmt = reindent(stmt, 8) - elif callable(stmt): - local_ns['_stmt'] = stmt - init += ', _stmt=_stmt' - stmt = '_stmt()' - else: - raise ValueError("stmt is neither a string nor callable") - src = template.format(stmt=stmt, setup=setup, init=init) - self.src = src # Save for traceback display - code = compile(src, dummy_src_name, "exec") - exec(code, global_ns, local_ns) - self.inner = local_ns["inner"] - - def print_exc(self, file=None): - """Helper to print a traceback from the timed code. - - Typical use: - - t = Timer(...) # outside the try/except - try: - t.timeit(...) # or t.repeat(...) - except: - t.print_exc() - - The advantage over the standard traceback is that source lines - in the compiled template will be displayed. - - The optional file argument directs where the traceback is - sent; it defaults to sys.stderr. - """ - import linecache, traceback - if self.src is not None: - linecache.cache[dummy_src_name] = (len(self.src), - None, - self.src.split("\n"), - dummy_src_name) - # else the source is already stored somewhere else - - traceback.print_exc(file=file) - - def timeit(self, number=default_number): - """Time 'number' executions of the main statement. - - To be precise, this executes the setup statement once, and - then returns the time it takes to execute the main statement - a number of times, as float seconds if using the default timer. The - argument is the number of times through the loop, defaulting - to one million. The main statement, the setup statement and - the timer function to be used are passed to the constructor. - """ - it = itertools.repeat(None, number) - gcold = gc.isenabled() - gc.disable() - try: - timing = self.inner(it, self.timer) - finally: - if gcold: - gc.enable() - return timing - - def repeat(self, repeat=default_repeat, number=default_number): - """Call timeit() a few times. - - This is a convenience function that calls the timeit() - repeatedly, returning a list of results. The first argument - specifies how many times to call timeit(), defaulting to 5; - the second argument specifies the timer argument, defaulting - to one million. - - Note: it's tempting to calculate mean and standard deviation - from the result vector and report these. However, this is not - very useful. In a typical case, the lowest value gives a - lower bound for how fast your machine can run the given code - snippet; higher values in the result vector are typically not - caused by variability in Python's speed, but by other - processes interfering with your timing accuracy. So the min() - of the result is probably the only number you should be - interested in. After that, you should look at the entire - vector and apply common sense rather than statistics. - """ - r = [] - for i in range(repeat): - t = self.timeit(number) - r.append(t) - return r - - def autorange(self, callback=None): - """Return the number of loops and time taken so that total time >= 0.2. - - Calls the timeit method with increasing numbers from the sequence - 1, 2, 5, 10, 20, 50, ... until the time taken is at least 0.2 - second. Returns (number, time_taken). - - If *callback* is given and is not None, it will be called after - each trial with two arguments: ``callback(number, time_taken)``. - """ - i = 1 - while True: - for j in 1, 2, 5: - number = i * j - time_taken = self.timeit(number) - if callback: - callback(number, time_taken) - if time_taken >= 0.2: - return (number, time_taken) - i *= 10 - - -def timeit(stmt="pass", setup="pass", timer=default_timer, - number=default_number, globals=None): - """Convenience function to create Timer object and call timeit method.""" - return Timer(stmt, setup, timer, globals).timeit(number) - - -def repeat(stmt="pass", setup="pass", timer=default_timer, - repeat=default_repeat, number=default_number, globals=None): - """Convenience function to create Timer object and call repeat method.""" - return Timer(stmt, setup, timer, globals).repeat(repeat, number) - - -def main(args=None, *, _wrap_timer=None): - """Main program, used when run as a script. - - The optional 'args' argument specifies the command line to be parsed, - defaulting to sys.argv[1:]. - - The return value is an exit code to be passed to sys.exit(); it - may be None to indicate success. - - When an exception happens during timing, a traceback is printed to - stderr and the return value is 1. Exceptions at other times - (including the template compilation) are not caught. - - '_wrap_timer' is an internal interface used for unit testing. If it - is not None, it must be a callable that accepts a timer function - and returns another timer function (used for unit testing). - """ - if args is None: - args = sys.argv[1:] - import getopt - try: - opts, args = getopt.getopt(args, "n:u:s:r:pvh", - ["number=", "setup=", "repeat=", - "process", "verbose", "unit=", "help"]) - except getopt.error as err: - print(err) - print("use -h/--help for command line help") - return 2 - - timer = default_timer - stmt = "\n".join(args) or "pass" - number = 0 # auto-determine - setup = [] - repeat = default_repeat - verbose = 0 - time_unit = None - units = {"nsec": 1e-9, "usec": 1e-6, "msec": 1e-3, "sec": 1.0} - precision = 3 - for o, a in opts: - if o in ("-n", "--number"): - number = int(a) - if o in ("-s", "--setup"): - setup.append(a) - if o in ("-u", "--unit"): - if a in units: - time_unit = a - else: - print("Unrecognized unit. Please select nsec, usec, msec, or sec.", - file=sys.stderr) - return 2 - if o in ("-r", "--repeat"): - repeat = int(a) - if repeat <= 0: - repeat = 1 - if o in ("-p", "--process"): - timer = time.process_time - if o in ("-v", "--verbose"): - if verbose: - precision += 1 - verbose += 1 - if o in ("-h", "--help"): - print(__doc__, end=' ') - return 0 - setup = "\n".join(setup) or "pass" - - # Include the current directory, so that local imports work (sys.path - # contains the directory of this script, rather than the current - # directory) - import os - sys.path.insert(0, os.curdir) - if _wrap_timer is not None: - timer = _wrap_timer(timer) - - t = Timer(stmt, setup, timer) - if number == 0: - # determine number so that 0.2 <= total time < 2.0 - callback = None - if verbose: - def callback(number, time_taken): - msg = "{num} loop{s} -> {secs:.{prec}g} secs" - plural = (number != 1) - print(msg.format(num=number, s='s' if plural else '', - secs=time_taken, prec=precision)) - try: - number, _ = t.autorange(callback) - except: - t.print_exc() - return 1 - - if verbose: - print() - - try: - raw_timings = t.repeat(repeat, number) - except: - t.print_exc() - return 1 - - def format_time(dt): - unit = time_unit - - if unit is not None: - scale = units[unit] - else: - scales = [(scale, unit) for unit, scale in units.items()] - scales.sort(reverse=True) - for scale, unit in scales: - if dt >= scale: - break - - return "%.*g %s" % (precision, dt / scale, unit) - - if verbose: - print("raw times: %s" % ", ".join(map(format_time, raw_timings))) - print() - timings = [dt / number for dt in raw_timings] - - best = min(timings) - print("%d loop%s, best of %d: %s per loop" - % (number, 's' if number != 1 else '', - repeat, format_time(best))) - - best = min(timings) - worst = max(timings) - if worst >= best * 4: - import warnings - warnings.warn_explicit("The test results are likely unreliable. " - "The worst time (%s) was more than four times " - "slower than the best time (%s)." - % (format_time(worst), format_time(best)), - UserWarning, '', 0) - return None - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/Python313_13_x64_Template/Lib/token.py b/Python313_13_x64_Template/Lib/token.py deleted file mode 100644 index 54d7cdcc..00000000 --- a/Python313_13_x64_Template/Lib/token.py +++ /dev/null @@ -1,141 +0,0 @@ -"""Token constants.""" -# Auto-generated by Tools/build/generate_token.py - -__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF', - 'EXACT_TOKEN_TYPES'] - -ENDMARKER = 0 -NAME = 1 -NUMBER = 2 -STRING = 3 -NEWLINE = 4 -INDENT = 5 -DEDENT = 6 -LPAR = 7 -RPAR = 8 -LSQB = 9 -RSQB = 10 -COLON = 11 -COMMA = 12 -SEMI = 13 -PLUS = 14 -MINUS = 15 -STAR = 16 -SLASH = 17 -VBAR = 18 -AMPER = 19 -LESS = 20 -GREATER = 21 -EQUAL = 22 -DOT = 23 -PERCENT = 24 -LBRACE = 25 -RBRACE = 26 -EQEQUAL = 27 -NOTEQUAL = 28 -LESSEQUAL = 29 -GREATEREQUAL = 30 -TILDE = 31 -CIRCUMFLEX = 32 -LEFTSHIFT = 33 -RIGHTSHIFT = 34 -DOUBLESTAR = 35 -PLUSEQUAL = 36 -MINEQUAL = 37 -STAREQUAL = 38 -SLASHEQUAL = 39 -PERCENTEQUAL = 40 -AMPEREQUAL = 41 -VBAREQUAL = 42 -CIRCUMFLEXEQUAL = 43 -LEFTSHIFTEQUAL = 44 -RIGHTSHIFTEQUAL = 45 -DOUBLESTAREQUAL = 46 -DOUBLESLASH = 47 -DOUBLESLASHEQUAL = 48 -AT = 49 -ATEQUAL = 50 -RARROW = 51 -ELLIPSIS = 52 -COLONEQUAL = 53 -EXCLAMATION = 54 -OP = 55 -TYPE_IGNORE = 56 -TYPE_COMMENT = 57 -SOFT_KEYWORD = 58 -FSTRING_START = 59 -FSTRING_MIDDLE = 60 -FSTRING_END = 61 -COMMENT = 62 -NL = 63 -# These aren't used by the C tokenizer but are needed for tokenize.py -ERRORTOKEN = 64 -ENCODING = 65 -N_TOKENS = 66 -# Special definitions for cooperation with parser -NT_OFFSET = 256 - -tok_name = {value: name - for name, value in globals().items() - if isinstance(value, int) and not name.startswith('_')} -__all__.extend(tok_name.values()) - -EXACT_TOKEN_TYPES = { - '!': EXCLAMATION, - '!=': NOTEQUAL, - '%': PERCENT, - '%=': PERCENTEQUAL, - '&': AMPER, - '&=': AMPEREQUAL, - '(': LPAR, - ')': RPAR, - '*': STAR, - '**': DOUBLESTAR, - '**=': DOUBLESTAREQUAL, - '*=': STAREQUAL, - '+': PLUS, - '+=': PLUSEQUAL, - ',': COMMA, - '-': MINUS, - '-=': MINEQUAL, - '->': RARROW, - '.': DOT, - '...': ELLIPSIS, - '/': SLASH, - '//': DOUBLESLASH, - '//=': DOUBLESLASHEQUAL, - '/=': SLASHEQUAL, - ':': COLON, - ':=': COLONEQUAL, - ';': SEMI, - '<': LESS, - '<<': LEFTSHIFT, - '<<=': LEFTSHIFTEQUAL, - '<=': LESSEQUAL, - '=': EQUAL, - '==': EQEQUAL, - '>': GREATER, - '>=': GREATEREQUAL, - '>>': RIGHTSHIFT, - '>>=': RIGHTSHIFTEQUAL, - '@': AT, - '@=': ATEQUAL, - '[': LSQB, - ']': RSQB, - '^': CIRCUMFLEX, - '^=': CIRCUMFLEXEQUAL, - '{': LBRACE, - '|': VBAR, - '|=': VBAREQUAL, - '}': RBRACE, - '~': TILDE, -} - -def ISTERMINAL(x): - return x < NT_OFFSET - -def ISNONTERMINAL(x): - return x >= NT_OFFSET - -def ISEOF(x): - return x == ENDMARKER diff --git a/Python313_13_x64_Template/Lib/tokenize.py b/Python313_13_x64_Template/Lib/tokenize.py deleted file mode 100644 index 7ca552c4..00000000 --- a/Python313_13_x64_Template/Lib/tokenize.py +++ /dev/null @@ -1,592 +0,0 @@ -"""Tokenization help for Python programs. - -tokenize(readline) is a generator that breaks a stream of bytes into -Python tokens. It decodes the bytes according to PEP-0263 for -determining source file encoding. - -It accepts a readline-like method which is called repeatedly to get the -next line of input (or b"" for EOF). It generates 5-tuples with these -members: - - the token type (see token.py) - the token (a string) - the starting (row, column) indices of the token (a 2-tuple of ints) - the ending (row, column) indices of the token (a 2-tuple of ints) - the original line (string) - -It is designed to match the working of the Python tokenizer exactly, except -that it produces COMMENT tokens for comments and gives type OP for all -operators. Additionally, all token lists start with an ENCODING token -which tells you which encoding was used to decode the bytes stream. -""" - -__author__ = 'Ka-Ping Yee ' -__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' - 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' - 'Michael Foord') -from builtins import open as _builtin_open -from codecs import lookup, BOM_UTF8 -import collections -import functools -from io import TextIOWrapper -import itertools as _itertools -import re -import sys -from token import * -from token import EXACT_TOKEN_TYPES -import _tokenize - -cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII) -blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) - -import token -__all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding", - "untokenize", "TokenInfo", "open", "TokenError"] -del token - -class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): - def __repr__(self): - annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) - return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % - self._replace(type=annotated_type)) - - @property - def exact_type(self): - if self.type == OP and self.string in EXACT_TOKEN_TYPES: - return EXACT_TOKEN_TYPES[self.string] - else: - return self.type - -def group(*choices): return '(' + '|'.join(choices) + ')' -def any(*choices): return group(*choices) + '*' -def maybe(*choices): return group(*choices) + '?' - -# Note: we use unicode matching for names ("\w") but ascii matching for -# number literals. -Whitespace = r'[ \f\t]*' -Comment = r'#[^\r\n]*' -Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) -Name = r'\w+' - -Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+' -Binnumber = r'0[bB](?:_?[01])+' -Octnumber = r'0[oO](?:_?[0-7])+' -Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)' -Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) -Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*' -Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?', - r'\.[0-9](?:_?[0-9])*') + maybe(Exponent) -Expfloat = r'[0-9](?:_?[0-9])*' + Exponent -Floatnumber = group(Pointfloat, Expfloat) -Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]') -Number = group(Imagnumber, Floatnumber, Intnumber) - -# Return the empty string, plus all of the valid string prefixes. -def _all_string_prefixes(): - # The valid string prefixes. Only contain the lower case versions, - # and don't contain any permutations (include 'fr', but not - # 'rf'). The various permutations will be generated. - _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr'] - # if we add binary f-strings, add: ['fb', 'fbr'] - result = {''} - for prefix in _valid_string_prefixes: - for t in _itertools.permutations(prefix): - # create a list with upper and lower versions of each - # character - for u in _itertools.product(*[(c, c.upper()) for c in t]): - result.add(''.join(u)) - return result - -@functools.lru_cache -def _compile(expr): - return re.compile(expr, re.UNICODE) - -# Note that since _all_string_prefixes includes the empty string, -# StringPrefix can be the empty string (making it optional). -StringPrefix = group(*_all_string_prefixes()) - -# Tail end of ' string. -Single = r"[^'\\]*(?:\\.[^'\\]*)*'" -# Tail end of " string. -Double = r'[^"\\]*(?:\\.[^"\\]*)*"' -# Tail end of ''' string. -Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" -# Tail end of """ string. -Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' -Triple = group(StringPrefix + "'''", StringPrefix + '"""') -# Single-line ' or " string. -String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", - StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') - -# Sorting in reverse order puts the long operators before their prefixes. -# Otherwise if = came before ==, == would get recognized as two instances -# of =. -Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse=True))) -Funny = group(r'\r?\n', Special) - -PlainToken = group(Number, Funny, String, Name) -Token = Ignore + PlainToken - -# First (or only) line of ' or " string. -ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + - group("'", r'\\\r?\n'), - StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + - group('"', r'\\\r?\n')) -PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple) -PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) - -# For a given string prefix plus quotes, endpats maps it to a regex -# to match the remainder of that string. _prefix can be empty, for -# a normal single or triple quoted string (with no prefix). -endpats = {} -for _prefix in _all_string_prefixes(): - endpats[_prefix + "'"] = Single - endpats[_prefix + '"'] = Double - endpats[_prefix + "'''"] = Single3 - endpats[_prefix + '"""'] = Double3 -del _prefix - -# A set of all of the single and triple quoted string prefixes, -# including the opening quotes. -single_quoted = set() -triple_quoted = set() -for t in _all_string_prefixes(): - for u in (t + '"', t + "'"): - single_quoted.add(u) - for u in (t + '"""', t + "'''"): - triple_quoted.add(u) -del t, u - -tabsize = 8 - -class TokenError(Exception): pass - - -class Untokenizer: - - def __init__(self): - self.tokens = [] - self.prev_row = 1 - self.prev_col = 0 - self.prev_type = None - self.prev_line = "" - self.encoding = None - - def add_whitespace(self, start): - row, col = start - if row < self.prev_row or row == self.prev_row and col < self.prev_col: - raise ValueError("start ({},{}) precedes previous end ({},{})" - .format(row, col, self.prev_row, self.prev_col)) - self.add_backslash_continuation(start) - col_offset = col - self.prev_col - if col_offset: - self.tokens.append(" " * col_offset) - - def add_backslash_continuation(self, start): - """Add backslash continuation characters if the row has increased - without encountering a newline token. - - This also inserts the correct amount of whitespace before the backslash. - """ - row = start[0] - row_offset = row - self.prev_row - if row_offset == 0: - return - - newline = '\r\n' if self.prev_line.endswith('\r\n') else '\n' - line = self.prev_line.rstrip('\\\r\n') - ws = ''.join(_itertools.takewhile(str.isspace, reversed(line))) - self.tokens.append(ws + f"\\{newline}" * row_offset) - self.prev_col = 0 - - def escape_brackets(self, token): - characters = [] - consume_until_next_bracket = False - for character in token: - if character == "}": - if consume_until_next_bracket: - consume_until_next_bracket = False - else: - characters.append(character) - if character == "{": - n_backslashes = sum( - 1 for char in _itertools.takewhile( - "\\".__eq__, - characters[-2::-1] - ) - ) - if n_backslashes % 2 == 0 or characters[-1] != "N": - characters.append(character) - else: - consume_until_next_bracket = True - characters.append(character) - return "".join(characters) - - def untokenize(self, iterable): - it = iter(iterable) - indents = [] - startline = False - for t in it: - if len(t) == 2: - self.compat(t, it) - break - tok_type, token, start, end, line = t - if tok_type == ENCODING: - self.encoding = token - continue - if tok_type == ENDMARKER: - break - if tok_type == INDENT: - indents.append(token) - continue - elif tok_type == DEDENT: - indents.pop() - self.prev_row, self.prev_col = end - continue - elif tok_type in (NEWLINE, NL): - startline = True - elif startline and indents: - indent = indents[-1] - if start[1] >= len(indent): - self.tokens.append(indent) - self.prev_col = len(indent) - startline = False - elif tok_type == FSTRING_MIDDLE: - if '{' in token or '}' in token: - token = self.escape_brackets(token) - last_line = token.splitlines()[-1] - end_line, end_col = end - extra_chars = last_line.count("{{") + last_line.count("}}") - end = (end_line, end_col + extra_chars) - - self.add_whitespace(start) - self.tokens.append(token) - self.prev_row, self.prev_col = end - if tok_type in (NEWLINE, NL): - self.prev_row += 1 - self.prev_col = 0 - self.prev_type = tok_type - self.prev_line = line - return "".join(self.tokens) - - def compat(self, token, iterable): - indents = [] - toks_append = self.tokens.append - startline = token[0] in (NEWLINE, NL) - prevstring = False - in_fstring = 0 - - for tok in _itertools.chain([token], iterable): - toknum, tokval = tok[:2] - if toknum == ENCODING: - self.encoding = tokval - continue - - if toknum in (NAME, NUMBER): - tokval += ' ' - - # Insert a space between two consecutive strings - if toknum == STRING: - if prevstring: - tokval = ' ' + tokval - prevstring = True - else: - prevstring = False - - if toknum == FSTRING_START: - in_fstring += 1 - elif toknum == FSTRING_END: - in_fstring -= 1 - if toknum == INDENT: - indents.append(tokval) - continue - elif toknum == DEDENT: - indents.pop() - continue - elif toknum in (NEWLINE, NL): - startline = True - elif startline and indents: - toks_append(indents[-1]) - startline = False - elif toknum == FSTRING_MIDDLE: - tokval = self.escape_brackets(tokval) - - # Insert a space between two consecutive brackets if we are in an f-string - if tokval in {"{", "}"} and self.tokens and self.tokens[-1] == tokval and in_fstring: - tokval = ' ' + tokval - - # Insert a space between two consecutive f-strings - if toknum in (STRING, FSTRING_START) and self.prev_type in (STRING, FSTRING_END): - self.tokens.append(" ") - - toks_append(tokval) - self.prev_type = toknum - - -def untokenize(iterable): - """Transform tokens back into Python source code. - It returns a bytes object, encoded using the ENCODING - token, which is the first token sequence output by tokenize. - - Each element returned by the iterable must be a token sequence - with at least two elements, a token number and token value. If - only two tokens are passed, the resulting output is poor. - - The result is guaranteed to tokenize back to match the input so - that the conversion is lossless and round-trips are assured. - The guarantee applies only to the token type and token string as - the spacing between tokens (column positions) may change. - """ - ut = Untokenizer() - out = ut.untokenize(iterable) - if ut.encoding is not None: - out = out.encode(ut.encoding) - return out - - -def _get_normal_name(orig_enc): - """Imitates get_normal_name in Parser/tokenizer/helpers.c.""" - # Only care about the first 12 characters. - enc = orig_enc[:12].lower().replace("_", "-") - if enc == "utf-8" or enc.startswith("utf-8-"): - return "utf-8" - if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ - enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): - return "iso-8859-1" - return orig_enc - -def detect_encoding(readline): - """ - The detect_encoding() function is used to detect the encoding that should - be used to decode a Python source file. It requires one argument, readline, - in the same way as the tokenize() generator. - - It will call readline a maximum of twice, and return the encoding used - (as a string) and a list of any lines (left as bytes) it has read in. - - It detects the encoding from the presence of a utf-8 bom or an encoding - cookie as specified in pep-0263. If both a bom and a cookie are present, - but disagree, a SyntaxError will be raised. If the encoding cookie is an - invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, - 'utf-8-sig' is returned. - - If no encoding is specified, then the default of 'utf-8' will be returned. - """ - try: - filename = readline.__self__.name - except AttributeError: - filename = None - bom_found = False - encoding = None - default = 'utf-8' - def read_or_stop(): - try: - return readline() - except StopIteration: - return b'' - - def find_cookie(line): - try: - # Decode as UTF-8. Either the line is an encoding declaration, - # in which case it should be pure ASCII, or it must be UTF-8 - # per default encoding. - line_string = line.decode('utf-8') - except UnicodeDecodeError: - msg = "invalid or missing encoding declaration" - if filename is not None: - msg = '{} for {!r}'.format(msg, filename) - raise SyntaxError(msg) - - match = cookie_re.match(line_string) - if not match: - return None - encoding = _get_normal_name(match.group(1)) - try: - codec = lookup(encoding) - except LookupError: - # This behaviour mimics the Python interpreter - if filename is None: - msg = "unknown encoding: " + encoding - else: - msg = "unknown encoding for {!r}: {}".format(filename, - encoding) - raise SyntaxError(msg) - - if bom_found: - if encoding != 'utf-8': - # This behaviour mimics the Python interpreter - if filename is None: - msg = 'encoding problem: utf-8' - else: - msg = 'encoding problem for {!r}: utf-8'.format(filename) - raise SyntaxError(msg) - encoding += '-sig' - return encoding - - first = read_or_stop() - if first.startswith(BOM_UTF8): - bom_found = True - first = first[3:] - default = 'utf-8-sig' - if not first: - return default, [] - - encoding = find_cookie(first) - if encoding: - return encoding, [first] - if not blank_re.match(first): - return default, [first] - - second = read_or_stop() - if not second: - return default, [first] - - encoding = find_cookie(second) - if encoding: - return encoding, [first, second] - - return default, [first, second] - - -def open(filename): - """Open a file in read only mode using the encoding detected by - detect_encoding(). - """ - buffer = _builtin_open(filename, 'rb') - try: - encoding, lines = detect_encoding(buffer.readline) - buffer.seek(0) - text = TextIOWrapper(buffer, encoding, line_buffering=True) - text.mode = 'r' - return text - except: - buffer.close() - raise - -def tokenize(readline): - """ - The tokenize() generator requires one argument, readline, which - must be a callable object which provides the same interface as the - readline() method of built-in file objects. Each call to the function - should return one line of input as bytes. Alternatively, readline - can be a callable function terminating with StopIteration: - readline = open(myfile, 'rb').__next__ # Example of alternate readline - - The generator produces 5-tuples with these members: the token type; the - token string; a 2-tuple (srow, scol) of ints specifying the row and - column where the token begins in the source; a 2-tuple (erow, ecol) of - ints specifying the row and column where the token ends in the source; - and the line on which the token was found. The line passed is the - physical line. - - The first token sequence will always be an ENCODING token - which tells you which encoding was used to decode the bytes stream. - """ - encoding, consumed = detect_encoding(readline) - rl_gen = _itertools.chain(consumed, iter(readline, b"")) - if encoding is not None: - if encoding == "utf-8-sig": - # BOM will already have been stripped. - encoding = "utf-8" - yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') - yield from _generate_tokens_from_c_tokenizer(rl_gen.__next__, encoding, extra_tokens=True) - -def generate_tokens(readline): - """Tokenize a source reading Python code as unicode strings. - - This has the same API as tokenize(), except that it expects the *readline* - callable to return str objects instead of bytes. - """ - return _generate_tokens_from_c_tokenizer(readline, extra_tokens=True) - -def main(): - import argparse - - # Helper error handling routines - def perror(message): - sys.stderr.write(message) - sys.stderr.write('\n') - - def error(message, filename=None, location=None): - if location: - args = (filename,) + location + (message,) - perror("%s:%d:%d: error: %s" % args) - elif filename: - perror("%s: error: %s" % (filename, message)) - else: - perror("error: %s" % message) - sys.exit(1) - - # Parse the arguments and options - parser = argparse.ArgumentParser(prog='python -m tokenize') - parser.add_argument(dest='filename', nargs='?', - metavar='filename.py', - help='the file to tokenize; defaults to stdin') - parser.add_argument('-e', '--exact', dest='exact', action='store_true', - help='display token names using the exact type') - args = parser.parse_args() - - try: - # Tokenize the input - if args.filename: - filename = args.filename - with _builtin_open(filename, 'rb') as f: - tokens = list(tokenize(f.readline)) - else: - filename = "" - tokens = _generate_tokens_from_c_tokenizer( - sys.stdin.readline, extra_tokens=True) - - - # Output the tokenization - for token in tokens: - token_type = token.type - if args.exact: - token_type = token.exact_type - token_range = "%d,%d-%d,%d:" % (token.start + token.end) - print("%-20s%-15s%-15r" % - (token_range, tok_name[token_type], token.string)) - except IndentationError as err: - line, column = err.args[1][1:3] - error(err.args[0], filename, (line, column)) - except TokenError as err: - line, column = err.args[1] - error(err.args[0], filename, (line, column)) - except SyntaxError as err: - error(err, filename) - except OSError as err: - error(err) - except KeyboardInterrupt: - print("interrupted\n") - except Exception as err: - perror("unexpected error: %s" % err) - raise - -def _transform_msg(msg): - """Transform error messages from the C tokenizer into the Python tokenize - - The C tokenizer is more picky than the Python one, so we need to massage - the error messages a bit for backwards compatibility. - """ - if "unterminated triple-quoted string literal" in msg: - return "EOF in multi-line string" - return msg - -def _generate_tokens_from_c_tokenizer(source, encoding=None, extra_tokens=False): - """Tokenize a source reading Python code as unicode strings using the internal C tokenizer""" - if encoding is None: - it = _tokenize.TokenizerIter(source, extra_tokens=extra_tokens) - else: - it = _tokenize.TokenizerIter(source, encoding=encoding, extra_tokens=extra_tokens) - try: - for info in it: - yield TokenInfo._make(info) - except SyntaxError as e: - if type(e) != SyntaxError: - raise e from None - msg = _transform_msg(e.msg) - raise TokenError(msg, (e.lineno, e.offset)) from None - - -if __name__ == "__main__": - main() diff --git a/Python313_13_x64_Template/Lib/tomllib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/tomllib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 0124a69e..00000000 Binary files a/Python313_13_x64_Template/Lib/tomllib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/tomllib/__pycache__/_parser.cpython-313.pyc b/Python313_13_x64_Template/Lib/tomllib/__pycache__/_parser.cpython-313.pyc deleted file mode 100644 index c2a2b285..00000000 Binary files a/Python313_13_x64_Template/Lib/tomllib/__pycache__/_parser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/tomllib/__pycache__/_re.cpython-313.pyc b/Python313_13_x64_Template/Lib/tomllib/__pycache__/_re.cpython-313.pyc deleted file mode 100644 index 9537e096..00000000 Binary files a/Python313_13_x64_Template/Lib/tomllib/__pycache__/_re.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/tomllib/__pycache__/_types.cpython-313.pyc b/Python313_13_x64_Template/Lib/tomllib/__pycache__/_types.cpython-313.pyc deleted file mode 100644 index 9602c8af..00000000 Binary files a/Python313_13_x64_Template/Lib/tomllib/__pycache__/_types.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/tomllib/_parser.py b/Python313_13_x64_Template/Lib/tomllib/_parser.py deleted file mode 100644 index 9c80a6a5..00000000 --- a/Python313_13_x64_Template/Lib/tomllib/_parser.py +++ /dev/null @@ -1,691 +0,0 @@ -# SPDX-License-Identifier: MIT -# SPDX-FileCopyrightText: 2021 Taneli Hukkinen -# Licensed to PSF under a Contributor Agreement. - -from __future__ import annotations - -from collections.abc import Iterable -import string -from types import MappingProxyType -from typing import Any, BinaryIO, NamedTuple - -from ._re import ( - RE_DATETIME, - RE_LOCALTIME, - RE_NUMBER, - match_to_datetime, - match_to_localtime, - match_to_number, -) -from ._types import Key, ParseFloat, Pos - -ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) - -# Neither of these sets include quotation mark or backslash. They are -# currently handled as separate cases in the parser functions. -ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t") -ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n") - -ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS -ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS - -ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS - -TOML_WS = frozenset(" \t") -TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n") -BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_") -KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'") -HEXDIGIT_CHARS = frozenset(string.hexdigits) - -BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType( - { - "\\b": "\u0008", # backspace - "\\t": "\u0009", # tab - "\\n": "\u000A", # linefeed - "\\f": "\u000C", # form feed - "\\r": "\u000D", # carriage return - '\\"': "\u0022", # quote - "\\\\": "\u005C", # backslash - } -) - - -class TOMLDecodeError(ValueError): - """An error raised if a document is not valid TOML.""" - - -def load(fp: BinaryIO, /, *, parse_float: ParseFloat = float) -> dict[str, Any]: - """Parse TOML from a binary file object.""" - b = fp.read() - try: - s = b.decode() - except AttributeError: - raise TypeError( - "File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`" - ) from None - return loads(s, parse_float=parse_float) - - -def loads(s: str, /, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901 - """Parse TOML from a string.""" - - # The spec allows converting "\r\n" to "\n", even in string - # literals. Let's do so to simplify parsing. - src = s.replace("\r\n", "\n") - pos = 0 - out = Output(NestedDict(), Flags()) - header: Key = () - parse_float = make_safe_parse_float(parse_float) - - # Parse one statement at a time - # (typically means one line in TOML source) - while True: - # 1. Skip line leading whitespace - pos = skip_chars(src, pos, TOML_WS) - - # 2. Parse rules. Expect one of the following: - # - end of file - # - end of line - # - comment - # - key/value pair - # - append dict to list (and move to its namespace) - # - create dict (and move to its namespace) - # Skip trailing whitespace when applicable. - try: - char = src[pos] - except IndexError: - break - if char == "\n": - pos += 1 - continue - if char in KEY_INITIAL_CHARS: - pos = key_value_rule(src, pos, out, header, parse_float) - pos = skip_chars(src, pos, TOML_WS) - elif char == "[": - try: - second_char: str | None = src[pos + 1] - except IndexError: - second_char = None - out.flags.finalize_pending() - if second_char == "[": - pos, header = create_list_rule(src, pos, out) - else: - pos, header = create_dict_rule(src, pos, out) - pos = skip_chars(src, pos, TOML_WS) - elif char != "#": - raise suffixed_err(src, pos, "Invalid statement") - - # 3. Skip comment - pos = skip_comment(src, pos) - - # 4. Expect end of line or end of file - try: - char = src[pos] - except IndexError: - break - if char != "\n": - raise suffixed_err( - src, pos, "Expected newline or end of document after a statement" - ) - pos += 1 - - return out.data.dict - - -class Flags: - """Flags that map to parsed keys/namespaces.""" - - # Marks an immutable namespace (inline array or inline table). - FROZEN = 0 - # Marks a nest that has been explicitly created and can no longer - # be opened using the "[table]" syntax. - EXPLICIT_NEST = 1 - - def __init__(self) -> None: - self._flags: dict[str, dict[Any, Any]] = {} - self._pending_flags: set[tuple[Key, int]] = set() - - def add_pending(self, key: Key, flag: int) -> None: - self._pending_flags.add((key, flag)) - - def finalize_pending(self) -> None: - for key, flag in self._pending_flags: - self.set(key, flag, recursive=False) - self._pending_flags.clear() - - def unset_all(self, key: Key) -> None: - cont = self._flags - for k in key[:-1]: - if k not in cont: - return - cont = cont[k]["nested"] - cont.pop(key[-1], None) - - def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003 - cont = self._flags - key_parent, key_stem = key[:-1], key[-1] - for k in key_parent: - if k not in cont: - cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} - cont = cont[k]["nested"] - if key_stem not in cont: - cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}} - cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag) - - def is_(self, key: Key, flag: int) -> bool: - if not key: - return False # document root has no flags - cont = self._flags - for k in key[:-1]: - if k not in cont: - return False - inner_cont = cont[k] - if flag in inner_cont["recursive_flags"]: - return True - cont = inner_cont["nested"] - key_stem = key[-1] - if key_stem in cont: - cont = cont[key_stem] - return flag in cont["flags"] or flag in cont["recursive_flags"] - return False - - -class NestedDict: - def __init__(self) -> None: - # The parsed content of the TOML document - self.dict: dict[str, Any] = {} - - def get_or_create_nest( - self, - key: Key, - *, - access_lists: bool = True, - ) -> dict[str, Any]: - cont: Any = self.dict - for k in key: - if k not in cont: - cont[k] = {} - cont = cont[k] - if access_lists and isinstance(cont, list): - cont = cont[-1] - if not isinstance(cont, dict): - raise KeyError("There is no nest behind this key") - return cont # type: ignore[no-any-return] - - def append_nest_to_list(self, key: Key) -> None: - cont = self.get_or_create_nest(key[:-1]) - last_key = key[-1] - if last_key in cont: - list_ = cont[last_key] - if not isinstance(list_, list): - raise KeyError("An object other than list found behind this key") - list_.append({}) - else: - cont[last_key] = [{}] - - -class Output(NamedTuple): - data: NestedDict - flags: Flags - - -def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: - try: - while src[pos] in chars: - pos += 1 - except IndexError: - pass - return pos - - -def skip_until( - src: str, - pos: Pos, - expect: str, - *, - error_on: frozenset[str], - error_on_eof: bool, -) -> Pos: - try: - new_pos = src.index(expect, pos) - except ValueError: - new_pos = len(src) - if error_on_eof: - raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None - - if not error_on.isdisjoint(src[pos:new_pos]): - while src[pos] not in error_on: - pos += 1 - raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}") - return new_pos - - -def skip_comment(src: str, pos: Pos) -> Pos: - try: - char: str | None = src[pos] - except IndexError: - char = None - if char == "#": - return skip_until( - src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False - ) - return pos - - -def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos: - while True: - pos_before_skip = pos - pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) - pos = skip_comment(src, pos) - if pos == pos_before_skip: - return pos - - -def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: - pos += 1 # Skip "[" - pos = skip_chars(src, pos, TOML_WS) - pos, key = parse_key(src, pos) - - if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot declare {key} twice") - out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) - try: - out.data.get_or_create_nest(key) - except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None - - if not src.startswith("]", pos): - raise suffixed_err(src, pos, "Expected ']' at the end of a table declaration") - return pos + 1, key - - -def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: - pos += 2 # Skip "[[" - pos = skip_chars(src, pos, TOML_WS) - pos, key = parse_key(src, pos) - - if out.flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}") - # Free the namespace now that it points to another empty list item... - out.flags.unset_all(key) - # ...but this key precisely is still prohibited from table declaration - out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) - try: - out.data.append_nest_to_list(key) - except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None - - if not src.startswith("]]", pos): - raise suffixed_err(src, pos, "Expected ']]' at the end of an array declaration") - return pos + 2, key - - -def key_value_rule( - src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat -) -> Pos: - pos, key, value = parse_key_value_pair(src, pos, parse_float) - key_parent, key_stem = key[:-1], key[-1] - abs_key_parent = header + key_parent - - relative_path_cont_keys = (header + key[:i] for i in range(1, len(key))) - for cont_key in relative_path_cont_keys: - # Check that dotted key syntax does not redefine an existing table - if out.flags.is_(cont_key, Flags.EXPLICIT_NEST): - raise suffixed_err(src, pos, f"Cannot redefine namespace {cont_key}") - # Containers in the relative path can't be opened with the table syntax or - # dotted key/value syntax in following table sections. - out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST) - - if out.flags.is_(abs_key_parent, Flags.FROZEN): - raise suffixed_err( - src, pos, f"Cannot mutate immutable namespace {abs_key_parent}" - ) - - try: - nest = out.data.get_or_create_nest(abs_key_parent) - except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None - if key_stem in nest: - raise suffixed_err(src, pos, "Cannot overwrite a value") - # Mark inline table and array namespaces recursively immutable - if isinstance(value, (dict, list)): - out.flags.set(header + key, Flags.FROZEN, recursive=True) - nest[key_stem] = value - return pos - - -def parse_key_value_pair( - src: str, pos: Pos, parse_float: ParseFloat -) -> tuple[Pos, Key, Any]: - pos, key = parse_key(src, pos) - try: - char: str | None = src[pos] - except IndexError: - char = None - if char != "=": - raise suffixed_err(src, pos, "Expected '=' after a key in a key/value pair") - pos += 1 - pos = skip_chars(src, pos, TOML_WS) - pos, value = parse_value(src, pos, parse_float) - return pos, key, value - - -def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]: - pos, key_part = parse_key_part(src, pos) - key: Key = (key_part,) - pos = skip_chars(src, pos, TOML_WS) - while True: - try: - char: str | None = src[pos] - except IndexError: - char = None - if char != ".": - return pos, key - pos += 1 - pos = skip_chars(src, pos, TOML_WS) - pos, key_part = parse_key_part(src, pos) - key += (key_part,) - pos = skip_chars(src, pos, TOML_WS) - - -def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]: - try: - char: str | None = src[pos] - except IndexError: - char = None - if char in BARE_KEY_CHARS: - start_pos = pos - pos = skip_chars(src, pos, BARE_KEY_CHARS) - return pos, src[start_pos:pos] - if char == "'": - return parse_literal_str(src, pos) - if char == '"': - return parse_one_line_basic_str(src, pos) - raise suffixed_err(src, pos, "Invalid initial character for a key part") - - -def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]: - pos += 1 - return parse_basic_str(src, pos, multiline=False) - - -def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list[Any]]: - pos += 1 - array: list[Any] = [] - - pos = skip_comments_and_array_ws(src, pos) - if src.startswith("]", pos): - return pos + 1, array - while True: - pos, val = parse_value(src, pos, parse_float) - array.append(val) - pos = skip_comments_and_array_ws(src, pos) - - c = src[pos : pos + 1] - if c == "]": - return pos + 1, array - if c != ",": - raise suffixed_err(src, pos, "Unclosed array") - pos += 1 - - pos = skip_comments_and_array_ws(src, pos) - if src.startswith("]", pos): - return pos + 1, array - - -def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict[str, Any]]: - pos += 1 - nested_dict = NestedDict() - flags = Flags() - - pos = skip_chars(src, pos, TOML_WS) - if src.startswith("}", pos): - return pos + 1, nested_dict.dict - while True: - pos, key, value = parse_key_value_pair(src, pos, parse_float) - key_parent, key_stem = key[:-1], key[-1] - if flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}") - try: - nest = nested_dict.get_or_create_nest(key_parent, access_lists=False) - except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None - if key_stem in nest: - raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}") - nest[key_stem] = value - pos = skip_chars(src, pos, TOML_WS) - c = src[pos : pos + 1] - if c == "}": - return pos + 1, nested_dict.dict - if c != ",": - raise suffixed_err(src, pos, "Unclosed inline table") - if isinstance(value, (dict, list)): - flags.set(key, Flags.FROZEN, recursive=True) - pos += 1 - pos = skip_chars(src, pos, TOML_WS) - - -def parse_basic_str_escape( - src: str, pos: Pos, *, multiline: bool = False -) -> tuple[Pos, str]: - escape_id = src[pos : pos + 2] - pos += 2 - if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}: - # Skip whitespace until next non-whitespace character or end of - # the doc. Error if non-whitespace is found before newline. - if escape_id != "\\\n": - pos = skip_chars(src, pos, TOML_WS) - try: - char = src[pos] - except IndexError: - return pos, "" - if char != "\n": - raise suffixed_err(src, pos, "Unescaped '\\' in a string") - pos += 1 - pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) - return pos, "" - if escape_id == "\\u": - return parse_hex_char(src, pos, 4) - if escape_id == "\\U": - return parse_hex_char(src, pos, 8) - try: - return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id] - except KeyError: - raise suffixed_err(src, pos, "Unescaped '\\' in a string") from None - - -def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]: - return parse_basic_str_escape(src, pos, multiline=True) - - -def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]: - hex_str = src[pos : pos + hex_len] - if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str): - raise suffixed_err(src, pos, "Invalid hex value") - pos += hex_len - hex_int = int(hex_str, 16) - if not is_unicode_scalar_value(hex_int): - raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value") - return pos, chr(hex_int) - - -def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]: - pos += 1 # Skip starting apostrophe - start_pos = pos - pos = skip_until( - src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True - ) - return pos + 1, src[start_pos:pos] # Skip ending apostrophe - - -def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]: - pos += 3 - if src.startswith("\n", pos): - pos += 1 - - if literal: - delim = "'" - end_pos = skip_until( - src, - pos, - "'''", - error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS, - error_on_eof=True, - ) - result = src[pos:end_pos] - pos = end_pos + 3 - else: - delim = '"' - pos, result = parse_basic_str(src, pos, multiline=True) - - # Add at maximum two extra apostrophes/quotes if the end sequence - # is 4 or 5 chars long instead of just 3. - if not src.startswith(delim, pos): - return pos, result - pos += 1 - if not src.startswith(delim, pos): - return pos, result + delim - pos += 1 - return pos, result + (delim * 2) - - -def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]: - if multiline: - error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS - parse_escapes = parse_basic_str_escape_multiline - else: - error_on = ILLEGAL_BASIC_STR_CHARS - parse_escapes = parse_basic_str_escape - result = "" - start_pos = pos - while True: - try: - char = src[pos] - except IndexError: - raise suffixed_err(src, pos, "Unterminated string") from None - if char == '"': - if not multiline: - return pos + 1, result + src[start_pos:pos] - if src.startswith('"""', pos): - return pos + 3, result + src[start_pos:pos] - pos += 1 - continue - if char == "\\": - result += src[start_pos:pos] - pos, parsed_escape = parse_escapes(src, pos) - result += parsed_escape - start_pos = pos - continue - if char in error_on: - raise suffixed_err(src, pos, f"Illegal character {char!r}") - pos += 1 - - -def parse_value( # noqa: C901 - src: str, pos: Pos, parse_float: ParseFloat -) -> tuple[Pos, Any]: - try: - char: str | None = src[pos] - except IndexError: - char = None - - # IMPORTANT: order conditions based on speed of checking and likelihood - - # Basic strings - if char == '"': - if src.startswith('"""', pos): - return parse_multiline_str(src, pos, literal=False) - return parse_one_line_basic_str(src, pos) - - # Literal strings - if char == "'": - if src.startswith("'''", pos): - return parse_multiline_str(src, pos, literal=True) - return parse_literal_str(src, pos) - - # Booleans - if char == "t": - if src.startswith("true", pos): - return pos + 4, True - if char == "f": - if src.startswith("false", pos): - return pos + 5, False - - # Arrays - if char == "[": - return parse_array(src, pos, parse_float) - - # Inline tables - if char == "{": - return parse_inline_table(src, pos, parse_float) - - # Dates and times - datetime_match = RE_DATETIME.match(src, pos) - if datetime_match: - try: - datetime_obj = match_to_datetime(datetime_match) - except ValueError as e: - raise suffixed_err(src, pos, "Invalid date or datetime") from e - return datetime_match.end(), datetime_obj - localtime_match = RE_LOCALTIME.match(src, pos) - if localtime_match: - return localtime_match.end(), match_to_localtime(localtime_match) - - # Integers and "normal" floats. - # The regex will greedily match any type starting with a decimal - # char, so needs to be located after handling of dates and times. - number_match = RE_NUMBER.match(src, pos) - if number_match: - return number_match.end(), match_to_number(number_match, parse_float) - - # Special floats - first_three = src[pos : pos + 3] - if first_three in {"inf", "nan"}: - return pos + 3, parse_float(first_three) - first_four = src[pos : pos + 4] - if first_four in {"-inf", "+inf", "-nan", "+nan"}: - return pos + 4, parse_float(first_four) - - raise suffixed_err(src, pos, "Invalid value") - - -def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError: - """Return a `TOMLDecodeError` where error message is suffixed with - coordinates in source.""" - - def coord_repr(src: str, pos: Pos) -> str: - if pos >= len(src): - return "end of document" - line = src.count("\n", 0, pos) + 1 - if line == 1: - column = pos + 1 - else: - column = pos - src.rindex("\n", 0, pos) - return f"line {line}, column {column}" - - return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})") - - -def is_unicode_scalar_value(codepoint: int) -> bool: - return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111) - - -def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat: - """A decorator to make `parse_float` safe. - - `parse_float` must not return dicts or lists, because these types - would be mixed with parsed TOML tables and arrays, thus confusing - the parser. The returned decorated callable raises `ValueError` - instead of returning illegal types. - """ - # The default `float` callable never returns illegal types. Optimize it. - if parse_float is float: - return float - - def safe_parse_float(float_str: str) -> Any: - float_value = parse_float(float_str) - if isinstance(float_value, (dict, list)): - raise ValueError("parse_float must not return dicts or lists") - return float_value - - return safe_parse_float diff --git a/Python313_13_x64_Template/Lib/tomllib/_re.py b/Python313_13_x64_Template/Lib/tomllib/_re.py deleted file mode 100644 index a97cab2f..00000000 --- a/Python313_13_x64_Template/Lib/tomllib/_re.py +++ /dev/null @@ -1,107 +0,0 @@ -# SPDX-License-Identifier: MIT -# SPDX-FileCopyrightText: 2021 Taneli Hukkinen -# Licensed to PSF under a Contributor Agreement. - -from __future__ import annotations - -from datetime import date, datetime, time, timedelta, timezone, tzinfo -from functools import lru_cache -import re -from typing import Any - -from ._types import ParseFloat - -# E.g. -# - 00:32:00.999999 -# - 00:32:00 -_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?" - -RE_NUMBER = re.compile( - r""" -0 -(?: - x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex - | - b[01](?:_?[01])* # bin - | - o[0-7](?:_?[0-7])* # oct -) -| -[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part -(?P - (?:\.[0-9](?:_?[0-9])*)? # optional fractional part - (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part -) -""", - flags=re.VERBOSE, -) -RE_LOCALTIME = re.compile(_TIME_RE_STR) -RE_DATETIME = re.compile( - rf""" -([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27 -(?: - [Tt ] - {_TIME_RE_STR} - (?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset -)? -""", - flags=re.VERBOSE, -) - - -def match_to_datetime(match: re.Match[str]) -> datetime | date: - """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. - - Raises ValueError if the match does not correspond to a valid date - or datetime. - """ - ( - year_str, - month_str, - day_str, - hour_str, - minute_str, - sec_str, - micros_str, - zulu_time, - offset_sign_str, - offset_hour_str, - offset_minute_str, - ) = match.groups() - year, month, day = int(year_str), int(month_str), int(day_str) - if hour_str is None: - return date(year, month, day) - hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) - micros = int(micros_str.ljust(6, "0")) if micros_str else 0 - if offset_sign_str: - tz: tzinfo | None = cached_tz( - offset_hour_str, offset_minute_str, offset_sign_str - ) - elif zulu_time: - tz = timezone.utc - else: # local date-time - tz = None - return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz) - - -@lru_cache(maxsize=None) -def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone: - sign = 1 if sign_str == "+" else -1 - return timezone( - timedelta( - hours=sign * int(hour_str), - minutes=sign * int(minute_str), - ) - ) - - -def match_to_localtime(match: re.Match[str]) -> time: - hour_str, minute_str, sec_str, micros_str = match.groups() - micros = int(micros_str.ljust(6, "0")) if micros_str else 0 - return time(int(hour_str), int(minute_str), int(sec_str), micros) - - -def match_to_number(match: re.Match[str], parse_float: ParseFloat) -> Any: - if match.group("floatpart"): - return parse_float(match.group()) - return int(match.group(), 0) diff --git a/Python313_13_x64_Template/Lib/trace.py b/Python313_13_x64_Template/Lib/trace.py deleted file mode 100644 index 64fc8037..00000000 --- a/Python313_13_x64_Template/Lib/trace.py +++ /dev/null @@ -1,754 +0,0 @@ -#!/usr/bin/env python3 - -# portions copyright 2001, Autonomous Zones Industries, Inc., all rights... -# err... reserved and offered to the public under the terms of the -# Python 2.2 license. -# Author: Zooko O'Whielacronx -# http://zooko.com/ -# mailto:zooko@zooko.com -# -# Copyright 2000, Mojam Media, Inc., all rights reserved. -# Author: Skip Montanaro -# -# Copyright 1999, Bioreason, Inc., all rights reserved. -# Author: Andrew Dalke -# -# Copyright 1995-1997, Automatrix, Inc., all rights reserved. -# Author: Skip Montanaro -# -# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved. -# -# -# Permission to use, copy, modify, and distribute this Python software and -# its associated documentation for any purpose without fee is hereby -# granted, provided that the above copyright notice appears in all copies, -# and that both that copyright notice and this permission notice appear in -# supporting documentation, and that the name of neither Automatrix, -# Bioreason or Mojam Media be used in advertising or publicity pertaining to -# distribution of the software without specific, written prior permission. -# -"""program/module to trace Python program or function execution - -Sample use, command line: - trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs - trace.py -t --ignore-dir '$prefix' spam.py eggs - trace.py --trackcalls spam.py eggs - -Sample use, programmatically - import sys - - # create a Trace object, telling it what to ignore, and whether to - # do tracing or line-counting or both. - tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,], - trace=0, count=1) - # run the new command using the given tracer - tracer.run('main()') - # make a report, placing output in /tmp - r = tracer.results() - r.write_results(show_missing=True, coverdir="/tmp") -""" -__all__ = ['Trace', 'CoverageResults'] - -import io -import linecache -import os -import sys -import sysconfig -import token -import tokenize -import inspect -import gc -import dis -import pickle -from time import monotonic as _time - -import threading - -PRAGMA_NOCOVER = "#pragma NO COVER" - -class _Ignore: - def __init__(self, modules=None, dirs=None): - self._mods = set() if not modules else set(modules) - self._dirs = [] if not dirs else [os.path.normpath(d) - for d in dirs] - self._ignore = { '': 1 } - - def names(self, filename, modulename): - if modulename in self._ignore: - return self._ignore[modulename] - - # haven't seen this one before, so see if the module name is - # on the ignore list. - if modulename in self._mods: # Identical names, so ignore - self._ignore[modulename] = 1 - return 1 - - # check if the module is a proper submodule of something on - # the ignore list - for mod in self._mods: - # Need to take some care since ignoring - # "cmp" mustn't mean ignoring "cmpcache" but ignoring - # "Spam" must also mean ignoring "Spam.Eggs". - if modulename.startswith(mod + '.'): - self._ignore[modulename] = 1 - return 1 - - # Now check that filename isn't in one of the directories - if filename is None: - # must be a built-in, so we must ignore - self._ignore[modulename] = 1 - return 1 - - # Ignore a file when it contains one of the ignorable paths - for d in self._dirs: - # The '+ os.sep' is to ensure that d is a parent directory, - # as compared to cases like: - # d = "/usr/local" - # filename = "/usr/local.py" - # or - # d = "/usr/local.py" - # filename = "/usr/local.py" - if filename.startswith(d + os.sep): - self._ignore[modulename] = 1 - return 1 - - # Tried the different ways, so we don't ignore this module - self._ignore[modulename] = 0 - return 0 - -def _modname(path): - """Return a plausible module name for the path.""" - - base = os.path.basename(path) - filename, ext = os.path.splitext(base) - return filename - -def _fullmodname(path): - """Return a plausible module name for the path.""" - - # If the file 'path' is part of a package, then the filename isn't - # enough to uniquely identify it. Try to do the right thing by - # looking in sys.path for the longest matching prefix. We'll - # assume that the rest is the package name. - - comparepath = os.path.normcase(path) - longest = "" - for dir in sys.path: - dir = os.path.normcase(dir) - if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep: - if len(dir) > len(longest): - longest = dir - - if longest: - base = path[len(longest) + 1:] - else: - base = path - # the drive letter is never part of the module name - drive, base = os.path.splitdrive(base) - base = base.replace(os.sep, ".") - if os.altsep: - base = base.replace(os.altsep, ".") - filename, ext = os.path.splitext(base) - return filename.lstrip(".") - -class CoverageResults: - def __init__(self, counts=None, calledfuncs=None, infile=None, - callers=None, outfile=None): - self.counts = counts - if self.counts is None: - self.counts = {} - self.counter = self.counts.copy() # map (filename, lineno) to count - self.calledfuncs = calledfuncs - if self.calledfuncs is None: - self.calledfuncs = {} - self.calledfuncs = self.calledfuncs.copy() - self.callers = callers - if self.callers is None: - self.callers = {} - self.callers = self.callers.copy() - self.infile = infile - self.outfile = outfile - if self.infile: - # Try to merge existing counts file. - try: - with open(self.infile, 'rb') as f: - counts, calledfuncs, callers = pickle.load(f) - self.update(self.__class__(counts, calledfuncs, callers=callers)) - except (OSError, EOFError, ValueError) as err: - print(("Skipping counts file %r: %s" - % (self.infile, err)), file=sys.stderr) - - def is_ignored_filename(self, filename): - """Return True if the filename does not refer to a file - we want to have reported. - """ - return filename.startswith('<') and filename.endswith('>') - - def update(self, other): - """Merge in the data from another CoverageResults""" - counts = self.counts - calledfuncs = self.calledfuncs - callers = self.callers - other_counts = other.counts - other_calledfuncs = other.calledfuncs - other_callers = other.callers - - for key in other_counts: - counts[key] = counts.get(key, 0) + other_counts[key] - - for key in other_calledfuncs: - calledfuncs[key] = 1 - - for key in other_callers: - callers[key] = 1 - - def write_results(self, show_missing=True, summary=False, coverdir=None, *, - ignore_missing_files=False): - """ - Write the coverage results. - - :param show_missing: Show lines that had no hits. - :param summary: Include coverage summary per module. - :param coverdir: If None, the results of each module are placed in its - directory, otherwise it is included in the directory - specified. - :param ignore_missing_files: If True, counts for files that no longer - exist are silently ignored. Otherwise, a missing file - will raise a FileNotFoundError. - """ - if self.calledfuncs: - print() - print("functions called:") - calls = self.calledfuncs - for filename, modulename, funcname in sorted(calls): - print(("filename: %s, modulename: %s, funcname: %s" - % (filename, modulename, funcname))) - - if self.callers: - print() - print("calling relationships:") - lastfile = lastcfile = "" - for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) \ - in sorted(self.callers): - if pfile != lastfile: - print() - print("***", pfile, "***") - lastfile = pfile - lastcfile = "" - if cfile != pfile and lastcfile != cfile: - print(" -->", cfile) - lastcfile = cfile - print(" %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc)) - - # turn the counts data ("(filename, lineno) = count") into something - # accessible on a per-file basis - per_file = {} - for filename, lineno in self.counts: - lines_hit = per_file[filename] = per_file.get(filename, {}) - lines_hit[lineno] = self.counts[(filename, lineno)] - - # accumulate summary info, if needed - sums = {} - - for filename, count in per_file.items(): - if self.is_ignored_filename(filename): - continue - - if filename.endswith(".pyc"): - filename = filename[:-1] - - if ignore_missing_files and not os.path.isfile(filename): - continue - - if coverdir is None: - dir = os.path.dirname(os.path.abspath(filename)) - modulename = _modname(filename) - else: - dir = coverdir - os.makedirs(dir, exist_ok=True) - modulename = _fullmodname(filename) - - # If desired, get a list of the line numbers which represent - # executable content (returned as a dict for better lookup speed) - if show_missing: - lnotab = _find_executable_linenos(filename) - else: - lnotab = {} - source = linecache.getlines(filename) - coverpath = os.path.join(dir, modulename + ".cover") - with open(filename, 'rb') as fp: - encoding, _ = tokenize.detect_encoding(fp.readline) - n_hits, n_lines = self.write_results_file(coverpath, source, - lnotab, count, encoding) - if summary and n_lines: - percent = int(100 * n_hits / n_lines) - sums[modulename] = n_lines, percent, modulename, filename - - if summary and sums: - print("lines cov% module (path)") - for m in sorted(sums): - n_lines, percent, modulename, filename = sums[m] - print("%5d %3d%% %s (%s)" % sums[m]) - - if self.outfile: - # try and store counts and module info into self.outfile - try: - with open(self.outfile, 'wb') as f: - pickle.dump((self.counts, self.calledfuncs, self.callers), - f, 1) - except OSError as err: - print("Can't save counts files because %s" % err, file=sys.stderr) - - def write_results_file(self, path, lines, lnotab, lines_hit, encoding=None): - """Return a coverage results file in path.""" - # ``lnotab`` is a dict of executable lines, or a line number "table" - - try: - outfile = open(path, "w", encoding=encoding) - except OSError as err: - print(("trace: Could not open %r for writing: %s " - "- skipping" % (path, err)), file=sys.stderr) - return 0, 0 - - n_lines = 0 - n_hits = 0 - with outfile: - for lineno, line in enumerate(lines, 1): - # do the blank/comment match to try to mark more lines - # (help the reader find stuff that hasn't been covered) - if lineno in lines_hit: - outfile.write("%5d: " % lines_hit[lineno]) - n_hits += 1 - n_lines += 1 - elif lineno in lnotab and not PRAGMA_NOCOVER in line: - # Highlight never-executed lines, unless the line contains - # #pragma: NO COVER - outfile.write(">>>>>> ") - n_lines += 1 - else: - outfile.write(" ") - outfile.write(line.expandtabs(8)) - - return n_hits, n_lines - -def _find_lines_from_code(code, strs): - """Return dict where keys are lines in the line number table.""" - linenos = {} - - for _, lineno in dis.findlinestarts(code): - if lineno not in strs: - linenos[lineno] = 1 - - return linenos - -def _find_lines(code, strs): - """Return lineno dict for all code objects reachable from code.""" - # get all of the lineno information from the code of this scope level - linenos = _find_lines_from_code(code, strs) - - # and check the constants for references to other code objects - for c in code.co_consts: - if inspect.iscode(c): - # find another code object, so recurse into it - linenos.update(_find_lines(c, strs)) - return linenos - -def _find_strings(filename, encoding=None): - """Return a dict of possible docstring positions. - - The dict maps line numbers to strings. There is an entry for - line that contains only a string or a part of a triple-quoted - string. - """ - d = {} - # If the first token is a string, then it's the module docstring. - # Add this special case so that the test in the loop passes. - prev_ttype = token.INDENT - with open(filename, encoding=encoding) as f: - tok = tokenize.generate_tokens(f.readline) - for ttype, tstr, start, end, line in tok: - if ttype == token.STRING: - if prev_ttype == token.INDENT: - sline, scol = start - eline, ecol = end - for i in range(sline, eline + 1): - d[i] = 1 - prev_ttype = ttype - return d - -def _find_executable_linenos(filename): - """Return dict where keys are line numbers in the line number table.""" - try: - with tokenize.open(filename) as f: - prog = f.read() - encoding = f.encoding - except OSError as err: - print(("Not printing coverage data for %r: %s" - % (filename, err)), file=sys.stderr) - return {} - code = compile(prog, filename, "exec") - strs = _find_strings(filename, encoding) - return _find_lines(code, strs) - -class Trace: - def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0, - ignoremods=(), ignoredirs=(), infile=None, outfile=None, - timing=False): - """ - @param count true iff it should count number of times each - line is executed - @param trace true iff it should print out each line that is - being counted - @param countfuncs true iff it should just output a list of - (filename, modulename, funcname,) for functions - that were called at least once; This overrides - `count' and `trace' - @param ignoremods a list of the names of modules to ignore - @param ignoredirs a list of the names of directories to ignore - all of the (recursive) contents of - @param infile file from which to read stored counts to be - added into the results - @param outfile file in which to write the results - @param timing true iff timing information be displayed - """ - self.infile = infile - self.outfile = outfile - self.ignore = _Ignore(ignoremods, ignoredirs) - self.counts = {} # keys are (filename, linenumber) - self.pathtobasename = {} # for memoizing os.path.basename - self.donothing = 0 - self.trace = trace - self._calledfuncs = {} - self._callers = {} - self._caller_cache = {} - self.start_time = None - if timing: - self.start_time = _time() - if countcallers: - self.globaltrace = self.globaltrace_trackcallers - elif countfuncs: - self.globaltrace = self.globaltrace_countfuncs - elif trace and count: - self.globaltrace = self.globaltrace_lt - self.localtrace = self.localtrace_trace_and_count - elif trace: - self.globaltrace = self.globaltrace_lt - self.localtrace = self.localtrace_trace - elif count: - self.globaltrace = self.globaltrace_lt - self.localtrace = self.localtrace_count - else: - # Ahem -- do nothing? Okay. - self.donothing = 1 - - def run(self, cmd): - import __main__ - dict = __main__.__dict__ - self.runctx(cmd, dict, dict) - - def runctx(self, cmd, globals=None, locals=None): - if globals is None: globals = {} - if locals is None: locals = {} - if not self.donothing: - threading.settrace(self.globaltrace) - sys.settrace(self.globaltrace) - try: - exec(cmd, globals, locals) - finally: - if not self.donothing: - sys.settrace(None) - threading.settrace(None) - - def runfunc(self, func, /, *args, **kw): - result = None - if not self.donothing: - sys.settrace(self.globaltrace) - try: - result = func(*args, **kw) - finally: - if not self.donothing: - sys.settrace(None) - return result - - def file_module_function_of(self, frame): - code = frame.f_code - filename = code.co_filename - if filename: - modulename = _modname(filename) - else: - modulename = None - - funcname = code.co_name - clsname = None - if code in self._caller_cache: - if self._caller_cache[code] is not None: - clsname = self._caller_cache[code] - else: - self._caller_cache[code] = None - ## use of gc.get_referrers() was suggested by Michael Hudson - # all functions which refer to this code object - funcs = [f for f in gc.get_referrers(code) - if inspect.isfunction(f)] - # require len(func) == 1 to avoid ambiguity caused by calls to - # new.function(): "In the face of ambiguity, refuse the - # temptation to guess." - if len(funcs) == 1: - dicts = [d for d in gc.get_referrers(funcs[0]) - if isinstance(d, dict)] - if len(dicts) == 1: - classes = [c for c in gc.get_referrers(dicts[0]) - if hasattr(c, "__bases__")] - if len(classes) == 1: - # ditto for new.classobj() - clsname = classes[0].__name__ - # cache the result - assumption is that new.* is - # not called later to disturb this relationship - # _caller_cache could be flushed if functions in - # the new module get called. - self._caller_cache[code] = clsname - if clsname is not None: - funcname = "%s.%s" % (clsname, funcname) - - return filename, modulename, funcname - - def globaltrace_trackcallers(self, frame, why, arg): - """Handler for call events. - - Adds information about who called who to the self._callers dict. - """ - if why == 'call': - # XXX Should do a better job of identifying methods - this_func = self.file_module_function_of(frame) - parent_func = self.file_module_function_of(frame.f_back) - self._callers[(parent_func, this_func)] = 1 - - def globaltrace_countfuncs(self, frame, why, arg): - """Handler for call events. - - Adds (filename, modulename, funcname) to the self._calledfuncs dict. - """ - if why == 'call': - this_func = self.file_module_function_of(frame) - self._calledfuncs[this_func] = 1 - - def globaltrace_lt(self, frame, why, arg): - """Handler for call events. - - If the code block being entered is to be ignored, returns `None', - else returns self.localtrace. - """ - if why == 'call': - code = frame.f_code - filename = frame.f_globals.get('__file__', None) - if filename: - # XXX _modname() doesn't work right for packages, so - # the ignore support won't work right for packages - modulename = _modname(filename) - if modulename is not None: - ignore_it = self.ignore.names(filename, modulename) - if not ignore_it: - if self.trace: - print((" --- modulename: %s, funcname: %s" - % (modulename, code.co_name))) - return self.localtrace - else: - return None - - def localtrace_trace_and_count(self, frame, why, arg): - if why == "line": - # record the file name and line number of every trace - filename = frame.f_code.co_filename - lineno = frame.f_lineno - key = filename, lineno - self.counts[key] = self.counts.get(key, 0) + 1 - - if self.start_time: - print('%.2f' % (_time() - self.start_time), end=' ') - bname = os.path.basename(filename) - line = linecache.getline(filename, lineno) - print("%s(%d)" % (bname, lineno), end='') - if line: - print(": ", line, end='') - else: - print() - return self.localtrace - - def localtrace_trace(self, frame, why, arg): - if why == "line": - # record the file name and line number of every trace - filename = frame.f_code.co_filename - lineno = frame.f_lineno - - if self.start_time: - print('%.2f' % (_time() - self.start_time), end=' ') - bname = os.path.basename(filename) - line = linecache.getline(filename, lineno) - print("%s(%d)" % (bname, lineno), end='') - if line: - print(": ", line, end='') - else: - print() - return self.localtrace - - def localtrace_count(self, frame, why, arg): - if why == "line": - filename = frame.f_code.co_filename - lineno = frame.f_lineno - key = filename, lineno - self.counts[key] = self.counts.get(key, 0) + 1 - return self.localtrace - - def results(self): - return CoverageResults(self.counts, infile=self.infile, - outfile=self.outfile, - calledfuncs=self._calledfuncs, - callers=self._callers) - -def main(): - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument('--version', action='version', version='trace 2.0') - - grp = parser.add_argument_group('Main options', - 'One of these (or --report) must be given') - - grp.add_argument('-c', '--count', action='store_true', - help='Count the number of times each line is executed and write ' - 'the counts to .cover for each module executed, in ' - 'the module\'s directory. See also --coverdir, --file, ' - '--no-report below.') - grp.add_argument('-t', '--trace', action='store_true', - help='Print each line to sys.stdout before it is executed') - grp.add_argument('-l', '--listfuncs', action='store_true', - help='Keep track of which functions are executed at least once ' - 'and write the results to sys.stdout after the program exits. ' - 'Cannot be specified alongside --trace or --count.') - grp.add_argument('-T', '--trackcalls', action='store_true', - help='Keep track of caller/called pairs and write the results to ' - 'sys.stdout after the program exits.') - - grp = parser.add_argument_group('Modifiers') - - _grp = grp.add_mutually_exclusive_group() - _grp.add_argument('-r', '--report', action='store_true', - help='Generate a report from a counts file; does not execute any ' - 'code. --file must specify the results file to read, which ' - 'must have been created in a previous run with --count ' - '--file=FILE') - _grp.add_argument('-R', '--no-report', action='store_true', - help='Do not generate the coverage report files. ' - 'Useful if you want to accumulate over several runs.') - - grp.add_argument('-f', '--file', - help='File to accumulate counts over several runs') - grp.add_argument('-C', '--coverdir', - help='Directory where the report files go. The coverage report ' - 'for . will be written to file ' - '//.cover') - grp.add_argument('-m', '--missing', action='store_true', - help='Annotate executable lines that were not executed with ' - '">>>>>> "') - grp.add_argument('-s', '--summary', action='store_true', - help='Write a brief summary for each file to sys.stdout. ' - 'Can only be used with --count or --report') - grp.add_argument('-g', '--timing', action='store_true', - help='Prefix each line with the time since the program started. ' - 'Only used while tracing') - - grp = parser.add_argument_group('Filters', - 'Can be specified multiple times') - grp.add_argument('--ignore-module', action='append', default=[], - help='Ignore the given module(s) and its submodules ' - '(if it is a package). Accepts comma separated list of ' - 'module names.') - grp.add_argument('--ignore-dir', action='append', default=[], - help='Ignore files in the given directory ' - '(multiple directories can be joined by os.pathsep).') - - parser.add_argument('--module', action='store_true', default=False, - help='Trace a module. ') - parser.add_argument('progname', nargs='?', - help='file to run as main program') - parser.add_argument('arguments', nargs=argparse.REMAINDER, - help='arguments to the program') - - opts = parser.parse_args() - - if opts.ignore_dir: - _prefix = sysconfig.get_path("stdlib") - _exec_prefix = sysconfig.get_path("platstdlib") - - def parse_ignore_dir(s): - s = os.path.expanduser(os.path.expandvars(s)) - s = s.replace('$prefix', _prefix).replace('$exec_prefix', _exec_prefix) - return os.path.normpath(s) - - opts.ignore_module = [mod.strip() - for i in opts.ignore_module for mod in i.split(',')] - opts.ignore_dir = [parse_ignore_dir(s) - for i in opts.ignore_dir for s in i.split(os.pathsep)] - - if opts.report: - if not opts.file: - parser.error('-r/--report requires -f/--file') - results = CoverageResults(infile=opts.file, outfile=opts.file) - return results.write_results(opts.missing, opts.summary, opts.coverdir) - - if not any([opts.trace, opts.count, opts.listfuncs, opts.trackcalls]): - parser.error('must specify one of --trace, --count, --report, ' - '--listfuncs, or --trackcalls') - - if opts.listfuncs and (opts.count or opts.trace): - parser.error('cannot specify both --listfuncs and (--trace or --count)') - - if opts.summary and not opts.count: - parser.error('--summary can only be used with --count or --report') - - if opts.progname is None: - parser.error('progname is missing: required with the main options') - - t = Trace(opts.count, opts.trace, countfuncs=opts.listfuncs, - countcallers=opts.trackcalls, ignoremods=opts.ignore_module, - ignoredirs=opts.ignore_dir, infile=opts.file, - outfile=opts.file, timing=opts.timing) - try: - if opts.module: - import runpy - module_name = opts.progname - mod_name, mod_spec, code = runpy._get_module_details(module_name) - sys.argv = [code.co_filename, *opts.arguments] - globs = { - '__name__': '__main__', - '__file__': code.co_filename, - '__package__': mod_spec.parent, - '__loader__': mod_spec.loader, - '__spec__': mod_spec, - '__cached__': None, - } - else: - sys.argv = [opts.progname, *opts.arguments] - sys.path[0] = os.path.dirname(opts.progname) - - with io.open_code(opts.progname) as fp: - code = compile(fp.read(), opts.progname, 'exec') - # try to emulate __main__ namespace as much as possible - globs = { - '__file__': opts.progname, - '__name__': '__main__', - '__package__': None, - '__cached__': None, - } - t.runctx(code, globs, globs) - except OSError as err: - sys.exit("Cannot run file %r because: %s" % (sys.argv[0], err)) - except SystemExit: - pass - - results = t.results() - - if not opts.no_report: - results.write_results(opts.missing, opts.summary, opts.coverdir) - -if __name__=='__main__': - main() diff --git a/Python313_13_x64_Template/Lib/traceback.py b/Python313_13_x64_Template/Lib/traceback.py deleted file mode 100644 index b412954b..00000000 --- a/Python313_13_x64_Template/Lib/traceback.py +++ /dev/null @@ -1,1640 +0,0 @@ -"""Extract, format and print information about Python stack traces.""" - -import collections.abc -import itertools -import linecache -import sys -import textwrap -import warnings -from contextlib import suppress -import _colorize -from _colorize import ANSIColors - -__all__ = ['extract_stack', 'extract_tb', 'format_exception', - 'format_exception_only', 'format_list', 'format_stack', - 'format_tb', 'print_exc', 'format_exc', 'print_exception', - 'print_last', 'print_stack', 'print_tb', 'clear_frames', - 'FrameSummary', 'StackSummary', 'TracebackException', - 'walk_stack', 'walk_tb'] - -# -# Formatting and printing lists of traceback lines. -# - - -def print_list(extracted_list, file=None): - """Print the list of tuples as returned by extract_tb() or - extract_stack() as a formatted stack trace to the given file.""" - if file is None: - file = sys.stderr - for item in StackSummary.from_list(extracted_list).format(): - print(item, file=file, end="") - -def format_list(extracted_list): - """Format a list of tuples or FrameSummary objects for printing. - - Given a list of tuples or FrameSummary objects as returned by - extract_tb() or extract_stack(), return a list of strings ready - for printing. - - Each string in the resulting list corresponds to the item with the - same index in the argument list. Each string ends in a newline; - the strings may contain internal newlines as well, for those items - whose source text line is not None. - """ - return StackSummary.from_list(extracted_list).format() - -# -# Printing and Extracting Tracebacks. -# - -def print_tb(tb, limit=None, file=None): - """Print up to 'limit' stack trace entries from the traceback 'tb'. - - If 'limit' is omitted or None, all entries are printed. If 'file' - is omitted or None, the output goes to sys.stderr; otherwise - 'file' should be an open file or file-like object with a write() - method. - """ - print_list(extract_tb(tb, limit=limit), file=file) - -def format_tb(tb, limit=None): - """A shorthand for 'format_list(extract_tb(tb, limit))'.""" - return extract_tb(tb, limit=limit).format() - -def extract_tb(tb, limit=None): - """ - Return a StackSummary object representing a list of - pre-processed entries from traceback. - - This is useful for alternate formatting of stack traces. If - 'limit' is omitted or None, all entries are extracted. A - pre-processed stack trace entry is a FrameSummary object - containing attributes filename, lineno, name, and line - representing the information that is usually printed for a stack - trace. The line is a string with leading and trailing - whitespace stripped; if the source is not available it is None. - """ - return StackSummary._extract_from_extended_frame_gen( - _walk_tb_with_full_positions(tb), limit=limit) - -# -# Exception formatting and output. -# - -_cause_message = ( - "\nThe above exception was the direct cause " - "of the following exception:\n\n") - -_context_message = ( - "\nDuring handling of the above exception, " - "another exception occurred:\n\n") - - -class _Sentinel: - def __repr__(self): - return "" - -_sentinel = _Sentinel() - -def _parse_value_tb(exc, value, tb): - if (value is _sentinel) != (tb is _sentinel): - raise ValueError("Both or neither of value and tb must be given") - if value is tb is _sentinel: - if exc is not None: - if isinstance(exc, BaseException): - return exc, exc.__traceback__ - - raise TypeError(f'Exception expected for value, ' - f'{type(exc).__name__} found') - else: - return None, None - return value, tb - - -def print_exception(exc, /, value=_sentinel, tb=_sentinel, limit=None, \ - file=None, chain=True, **kwargs): - """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. - - This differs from print_tb() in the following ways: (1) if - traceback is not None, it prints a header "Traceback (most recent - call last):"; (2) it prints the exception type and value after the - stack trace; (3) if type is SyntaxError and value has the - appropriate format, it prints the line where the syntax error - occurred with a caret on the next line indicating the approximate - position of the error. - """ - colorize = kwargs.get("colorize", False) - value, tb = _parse_value_tb(exc, value, tb) - te = TracebackException(type(value), value, tb, limit=limit, compact=True) - te.print(file=file, chain=chain, colorize=colorize) - - -BUILTIN_EXCEPTION_LIMIT = object() - - -def _print_exception_bltin(exc, /): - file = sys.stderr if sys.stderr is not None else sys.__stderr__ - colorize = _colorize.can_colorize(file=file) - return print_exception(exc, limit=BUILTIN_EXCEPTION_LIMIT, file=file, colorize=colorize) - - -def format_exception(exc, /, value=_sentinel, tb=_sentinel, limit=None, \ - chain=True, **kwargs): - """Format a stack trace and the exception information. - - The arguments have the same meaning as the corresponding arguments - to print_exception(). The return value is a list of strings, each - ending in a newline and some containing internal newlines. When - these lines are concatenated and printed, exactly the same text is - printed as does print_exception(). - """ - colorize = kwargs.get("colorize", False) - value, tb = _parse_value_tb(exc, value, tb) - te = TracebackException(type(value), value, tb, limit=limit, compact=True) - return list(te.format(chain=chain, colorize=colorize)) - - -def format_exception_only(exc, /, value=_sentinel, *, show_group=False, **kwargs): - """Format the exception part of a traceback. - - The return value is a list of strings, each ending in a newline. - - The list contains the exception's message, which is - normally a single string; however, for :exc:`SyntaxError` exceptions, it - contains several lines that (when printed) display detailed information - about where the syntax error occurred. Following the message, the list - contains the exception's ``__notes__``. - - When *show_group* is ``True``, and the exception is an instance of - :exc:`BaseExceptionGroup`, the nested exceptions are included as - well, recursively, with indentation relative to their nesting depth. - """ - colorize = kwargs.get("colorize", False) - if value is _sentinel: - value = exc - te = TracebackException(type(value), value, None, compact=True) - return list(te.format_exception_only(show_group=show_group, colorize=colorize)) - - -# -- not official API but folk probably use these two functions. - -def _format_final_exc_line(etype, value, *, insert_final_newline=True, colorize=False): - valuestr = _safe_string(value, 'exception') - end_char = "\n" if insert_final_newline else "" - if colorize: - if value is None or not valuestr: - line = f"{ANSIColors.BOLD_MAGENTA}{etype}{ANSIColors.RESET}{end_char}" - else: - line = f"{ANSIColors.BOLD_MAGENTA}{etype}{ANSIColors.RESET}: {ANSIColors.MAGENTA}{valuestr}{ANSIColors.RESET}{end_char}" - else: - if value is None or not valuestr: - line = f"{etype}{end_char}" - else: - line = f"{etype}: {valuestr}{end_char}" - return line - - -def _safe_string(value, what, func=str): - try: - return func(value) - except: - return f'<{what} {func.__name__}() failed>' - -# -- - -def print_exc(limit=None, file=None, chain=True): - """Shorthand for 'print_exception(sys.exception(), limit=limit, file=file, chain=chain)'.""" - print_exception(sys.exception(), limit=limit, file=file, chain=chain) - -def format_exc(limit=None, chain=True): - """Like print_exc() but return a string.""" - return "".join(format_exception(sys.exception(), limit=limit, chain=chain)) - -def print_last(limit=None, file=None, chain=True): - """This is a shorthand for 'print_exception(sys.last_exc, limit=limit, file=file, chain=chain)'.""" - if not hasattr(sys, "last_exc") and not hasattr(sys, "last_type"): - raise ValueError("no last exception") - - if hasattr(sys, "last_exc"): - print_exception(sys.last_exc, limit=limit, file=file, chain=chain) - else: - print_exception(sys.last_type, sys.last_value, sys.last_traceback, - limit=limit, file=file, chain=chain) - - -# -# Printing and Extracting Stacks. -# - -def print_stack(f=None, limit=None, file=None): - """Print a stack trace from its invocation point. - - The optional 'f' argument can be used to specify an alternate - stack frame at which to start. The optional 'limit' and 'file' - arguments have the same meaning as for print_exception(). - """ - if f is None: - f = sys._getframe().f_back - print_list(extract_stack(f, limit=limit), file=file) - - -def format_stack(f=None, limit=None): - """Shorthand for 'format_list(extract_stack(f, limit))'.""" - if f is None: - f = sys._getframe().f_back - return format_list(extract_stack(f, limit=limit)) - - -def extract_stack(f=None, limit=None): - """Extract the raw traceback from the current stack frame. - - The return value has the same format as for extract_tb(). The - optional 'f' and 'limit' arguments have the same meaning as for - print_stack(). Each item in the list is a quadruple (filename, - line number, function name, text), and the entries are in order - from oldest to newest stack frame. - """ - if f is None: - f = sys._getframe().f_back - stack = StackSummary.extract(walk_stack(f), limit=limit) - stack.reverse() - return stack - - -def clear_frames(tb): - "Clear all references to local variables in the frames of a traceback." - while tb is not None: - try: - tb.tb_frame.clear() - except RuntimeError: - # Ignore the exception raised if the frame is still executing. - pass - tb = tb.tb_next - - -class FrameSummary: - """Information about a single frame from a traceback. - - - :attr:`filename` The filename for the frame. - - :attr:`lineno` The line within filename for the frame that was - active when the frame was captured. - - :attr:`name` The name of the function or method that was executing - when the frame was captured. - - :attr:`line` The text from the linecache module for the - of code that was running when the frame was captured. - - :attr:`locals` Either None if locals were not supplied, or a dict - mapping the name to the repr() of the variable. - """ - - __slots__ = ('filename', 'lineno', 'end_lineno', 'colno', 'end_colno', - 'name', '_lines', '_lines_dedented', 'locals', '_code') - - def __init__(self, filename, lineno, name, *, lookup_line=True, - locals=None, line=None, - end_lineno=None, colno=None, end_colno=None, **kwargs): - """Construct a FrameSummary. - - :param lookup_line: If True, `linecache` is consulted for the source - code line. Otherwise, the line will be looked up when first needed. - :param locals: If supplied the frame locals, which will be captured as - object representations. - :param line: If provided, use this instead of looking up the line in - the linecache. - """ - self.filename = filename - self.lineno = lineno - self.end_lineno = lineno if end_lineno is None else end_lineno - self.colno = colno - self.end_colno = end_colno - self.name = name - self._code = kwargs.get("_code") - self._lines = line - self._lines_dedented = None - if lookup_line: - self.line - self.locals = {k: _safe_string(v, 'local', func=repr) - for k, v in locals.items()} if locals else None - - def __eq__(self, other): - if isinstance(other, FrameSummary): - return (self.filename == other.filename and - self.lineno == other.lineno and - self.name == other.name and - self.locals == other.locals) - if isinstance(other, tuple): - return (self.filename, self.lineno, self.name, self.line) == other - return NotImplemented - - def __getitem__(self, pos): - return (self.filename, self.lineno, self.name, self.line)[pos] - - def __iter__(self): - return iter([self.filename, self.lineno, self.name, self.line]) - - def __repr__(self): - return "".format( - filename=self.filename, lineno=self.lineno, name=self.name) - - def __len__(self): - return 4 - - def _set_lines(self): - if ( - self._lines is None - and self.lineno is not None - and self.end_lineno is not None - ): - lines = [] - for lineno in range(self.lineno, self.end_lineno + 1): - # treat errors (empty string) and empty lines (newline) as the same - line = linecache.getline(self.filename, lineno).rstrip() - if not line and self._code is not None and self.filename.startswith("<"): - line = linecache._getline_from_code(self._code, lineno).rstrip() - lines.append(line) - self._lines = "\n".join(lines) + "\n" - - @property - def _original_lines(self): - # Returns the line as-is from the source, without modifying whitespace. - self._set_lines() - return self._lines - - @property - def _dedented_lines(self): - # Returns _original_lines, but dedented - self._set_lines() - if self._lines_dedented is None and self._lines is not None: - self._lines_dedented = textwrap.dedent(self._lines) - return self._lines_dedented - - @property - def line(self): - self._set_lines() - if self._lines is None: - return None - # return only the first line, stripped - return self._lines.partition("\n")[0].strip() - - -def walk_stack(f): - """Walk a stack yielding the frame and line number for each frame. - - This will follow f.f_back from the given frame. If no frame is given, the - current stack is used. Usually used with StackSummary.extract. - """ - if f is None: - f = sys._getframe().f_back.f_back.f_back.f_back - while f is not None: - yield f, f.f_lineno - f = f.f_back - - -def walk_tb(tb): - """Walk a traceback yielding the frame and line number for each frame. - - This will follow tb.tb_next (and thus is in the opposite order to - walk_stack). Usually used with StackSummary.extract. - """ - while tb is not None: - yield tb.tb_frame, tb.tb_lineno - tb = tb.tb_next - - -def _walk_tb_with_full_positions(tb): - # Internal version of walk_tb that yields full code positions including - # end line and column information. - while tb is not None: - positions = _get_code_position(tb.tb_frame.f_code, tb.tb_lasti) - # Yield tb_lineno when co_positions does not have a line number to - # maintain behavior with walk_tb. - if positions[0] is None: - yield tb.tb_frame, (tb.tb_lineno, ) + positions[1:] - else: - yield tb.tb_frame, positions - tb = tb.tb_next - - -def _get_code_position(code, instruction_index): - if instruction_index < 0: - return (None, None, None, None) - positions_gen = code.co_positions() - return next(itertools.islice(positions_gen, instruction_index // 2, None)) - - -_RECURSIVE_CUTOFF = 3 # Also hardcoded in traceback.c. - - -class StackSummary(list): - """A list of FrameSummary objects, representing a stack of frames.""" - - @classmethod - def extract(klass, frame_gen, *, limit=None, lookup_lines=True, - capture_locals=False): - """Create a StackSummary from a traceback or stack object. - - :param frame_gen: A generator that yields (frame, lineno) tuples - whose summaries are to be included in the stack. - :param limit: None to include all frames or the number of frames to - include. - :param lookup_lines: If True, lookup lines for each frame immediately, - otherwise lookup is deferred until the frame is rendered. - :param capture_locals: If True, the local variables from each frame will - be captured as object representations into the FrameSummary. - """ - def extended_frame_gen(): - for f, lineno in frame_gen: - yield f, (lineno, None, None, None) - - return klass._extract_from_extended_frame_gen( - extended_frame_gen(), limit=limit, lookup_lines=lookup_lines, - capture_locals=capture_locals) - - @classmethod - def _extract_from_extended_frame_gen(klass, frame_gen, *, limit=None, - lookup_lines=True, capture_locals=False): - # Same as extract but operates on a frame generator that yields - # (frame, (lineno, end_lineno, colno, end_colno)) in the stack. - # Only lineno is required, the remaining fields can be None if the - # information is not available. - builtin_limit = limit is BUILTIN_EXCEPTION_LIMIT - if limit is None or builtin_limit: - limit = getattr(sys, 'tracebacklimit', None) - if limit is not None and limit < 0: - limit = 0 - if limit is not None: - if builtin_limit: - frame_gen = tuple(frame_gen) - frame_gen = frame_gen[len(frame_gen) - limit:] - elif limit >= 0: - frame_gen = itertools.islice(frame_gen, limit) - else: - frame_gen = collections.deque(frame_gen, maxlen=-limit) - - result = klass() - fnames = set() - for f, (lineno, end_lineno, colno, end_colno) in frame_gen: - co = f.f_code - filename = co.co_filename - name = co.co_name - fnames.add(filename) - linecache.lazycache(filename, f.f_globals) - # Must defer line lookups until we have called checkcache. - if capture_locals: - f_locals = f.f_locals - else: - f_locals = None - result.append( - FrameSummary(filename, lineno, name, - lookup_line=False, locals=f_locals, - end_lineno=end_lineno, colno=colno, end_colno=end_colno, - _code=f.f_code, - ) - ) - for filename in fnames: - linecache.checkcache(filename) - - # If immediate lookup was desired, trigger lookups now. - if lookup_lines: - for f in result: - f.line - return result - - @classmethod - def from_list(klass, a_list): - """ - Create a StackSummary object from a supplied list of - FrameSummary objects or old-style list of tuples. - """ - # While doing a fast-path check for isinstance(a_list, StackSummary) is - # appealing, idlelib.run.cleanup_traceback and other similar code may - # break this by making arbitrary frames plain tuples, so we need to - # check on a frame by frame basis. - result = StackSummary() - for frame in a_list: - if isinstance(frame, FrameSummary): - result.append(frame) - else: - filename, lineno, name, line = frame - result.append(FrameSummary(filename, lineno, name, line=line)) - return result - - def format_frame_summary(self, frame_summary, **kwargs): - """Format the lines for a single FrameSummary. - - Returns a string representing one frame involved in the stack. This - gets called for every frame to be printed in the stack summary. - """ - colorize = kwargs.get("colorize", False) - row = [] - filename = frame_summary.filename - if frame_summary.filename.startswith("'): - filename = "" - if colorize: - row.append(' File {}"{}"{}, line {}{}{}, in {}{}{}\n'.format( - ANSIColors.MAGENTA, - filename, - ANSIColors.RESET, - ANSIColors.MAGENTA, - frame_summary.lineno, - ANSIColors.RESET, - ANSIColors.MAGENTA, - frame_summary.name, - ANSIColors.RESET, - ) - ) - else: - row.append(' File "{}", line {}, in {}\n'.format( - filename, frame_summary.lineno, frame_summary.name)) - if frame_summary._dedented_lines and frame_summary._dedented_lines.strip(): - if ( - frame_summary.colno is None or - frame_summary.end_colno is None - ): - # only output first line if column information is missing - row.append(textwrap.indent(frame_summary.line, ' ') + "\n") - else: - # get first and last line - all_lines_original = frame_summary._original_lines.splitlines() - first_line = all_lines_original[0] - # assume all_lines_original has enough lines (since we constructed it) - last_line = all_lines_original[frame_summary.end_lineno - frame_summary.lineno] - - # character index of the start/end of the instruction - start_offset = _byte_offset_to_character_offset(first_line, frame_summary.colno) - end_offset = _byte_offset_to_character_offset(last_line, frame_summary.end_colno) - - all_lines = frame_summary._dedented_lines.splitlines()[ - :frame_summary.end_lineno - frame_summary.lineno + 1 - ] - - # adjust start/end offset based on dedent - dedent_characters = len(first_line) - len(all_lines[0]) - start_offset = max(0, start_offset - dedent_characters) - end_offset = max(0, end_offset - dedent_characters) - - # When showing this on a terminal, some of the non-ASCII characters - # might be rendered as double-width characters, so we need to take - # that into account when calculating the length of the line. - dp_start_offset = _display_width(all_lines[0], offset=start_offset) - dp_end_offset = _display_width(all_lines[-1], offset=end_offset) - - # get exact code segment corresponding to the instruction - segment = "\n".join(all_lines) - segment = segment[start_offset:len(segment) - (len(all_lines[-1]) - end_offset)] - - # attempt to parse for anchors - anchors = None - show_carets = False - with suppress(Exception): - anchors = _extract_caret_anchors_from_line_segment(segment) - show_carets = self._should_show_carets(start_offset, end_offset, all_lines, anchors) - - result = [] - - # only display first line, last line, and lines around anchor start/end - significant_lines = {0, len(all_lines) - 1} - - anchors_left_end_offset = 0 - anchors_right_start_offset = 0 - primary_char = "^" - secondary_char = "^" - if anchors: - anchors_left_end_offset = anchors.left_end_offset - anchors_right_start_offset = anchors.right_start_offset - # computed anchor positions do not take start_offset into account, - # so account for it here - if anchors.left_end_lineno == 0: - anchors_left_end_offset += start_offset - if anchors.right_start_lineno == 0: - anchors_right_start_offset += start_offset - - # account for display width - anchors_left_end_offset = _display_width( - all_lines[anchors.left_end_lineno], offset=anchors_left_end_offset - ) - anchors_right_start_offset = _display_width( - all_lines[anchors.right_start_lineno], offset=anchors_right_start_offset - ) - - primary_char = anchors.primary_char - secondary_char = anchors.secondary_char - significant_lines.update( - range(anchors.left_end_lineno - 1, anchors.left_end_lineno + 2) - ) - significant_lines.update( - range(anchors.right_start_lineno - 1, anchors.right_start_lineno + 2) - ) - - # remove bad line numbers - significant_lines.discard(-1) - significant_lines.discard(len(all_lines)) - - def output_line(lineno): - """output all_lines[lineno] along with carets""" - result.append(all_lines[lineno] + "\n") - if not show_carets: - return - num_spaces = len(all_lines[lineno]) - len(all_lines[lineno].lstrip()) - carets = [] - num_carets = dp_end_offset if lineno == len(all_lines) - 1 else _display_width(all_lines[lineno]) - # compute caret character for each position - for col in range(num_carets): - if col < num_spaces or (lineno == 0 and col < dp_start_offset): - # before first non-ws char of the line, or before start of instruction - carets.append(' ') - elif anchors and ( - lineno > anchors.left_end_lineno or - (lineno == anchors.left_end_lineno and col >= anchors_left_end_offset) - ) and ( - lineno < anchors.right_start_lineno or - (lineno == anchors.right_start_lineno and col < anchors_right_start_offset) - ): - # within anchors - carets.append(secondary_char) - else: - carets.append(primary_char) - if colorize: - # Replace the previous line with a red version of it only in the parts covered - # by the carets. - line = result[-1] - colorized_line_parts = [] - colorized_carets_parts = [] - - for color, group in itertools.groupby(itertools.zip_longest(line, carets, fillvalue=""), key=lambda x: x[1]): - caret_group = list(group) - if color == "^": - colorized_line_parts.append(ANSIColors.BOLD_RED + "".join(char for char, _ in caret_group) + ANSIColors.RESET) - colorized_carets_parts.append(ANSIColors.BOLD_RED + "".join(caret for _, caret in caret_group) + ANSIColors.RESET) - elif color == "~": - colorized_line_parts.append(ANSIColors.RED + "".join(char for char, _ in caret_group) + ANSIColors.RESET) - colorized_carets_parts.append(ANSIColors.RED + "".join(caret for _, caret in caret_group) + ANSIColors.RESET) - else: - colorized_line_parts.append("".join(char for char, _ in caret_group)) - colorized_carets_parts.append("".join(caret for _, caret in caret_group)) - - colorized_line = "".join(colorized_line_parts) - colorized_carets = "".join(colorized_carets_parts) - result[-1] = colorized_line - result.append(colorized_carets + "\n") - else: - result.append("".join(carets) + "\n") - - # display significant lines - sig_lines_list = sorted(significant_lines) - for i, lineno in enumerate(sig_lines_list): - if i: - linediff = lineno - sig_lines_list[i - 1] - if linediff == 2: - # 1 line in between - just output it - output_line(lineno - 1) - elif linediff > 2: - # > 1 line in between - abbreviate - result.append(f"...<{linediff - 1} lines>...\n") - output_line(lineno) - - row.append( - textwrap.indent(textwrap.dedent("".join(result)), ' ', lambda line: True) - ) - if frame_summary.locals: - for name, value in sorted(frame_summary.locals.items()): - row.append(' {name} = {value}\n'.format(name=name, value=value)) - - return ''.join(row) - - def _should_show_carets(self, start_offset, end_offset, all_lines, anchors): - with suppress(SyntaxError, ImportError): - import ast - tree = ast.parse('\n'.join(all_lines)) - if not tree.body: - return False - statement = tree.body[0] - value = None - def _spawns_full_line(value): - return ( - value.lineno == 1 - and value.end_lineno == len(all_lines) - and value.col_offset == start_offset - and value.end_col_offset == end_offset - ) - match statement: - case ast.Return(value=ast.Call()): - if isinstance(statement.value.func, ast.Name): - value = statement.value - case ast.Assign(value=ast.Call()): - if ( - len(statement.targets) == 1 and - isinstance(statement.targets[0], ast.Name) - ): - value = statement.value - if value is not None and _spawns_full_line(value): - return False - if anchors: - return True - if all_lines[0][:start_offset].lstrip() or all_lines[-1][end_offset:].rstrip(): - return True - return False - - def format(self, **kwargs): - """Format the stack ready for printing. - - Returns a list of strings ready for printing. Each string in the - resulting list corresponds to a single frame from the stack. - Each string ends in a newline; the strings may contain internal - newlines as well, for those items with source text lines. - - For long sequences of the same frame and line, the first few - repetitions are shown, followed by a summary line stating the exact - number of further repetitions. - """ - colorize = kwargs.get("colorize", False) - result = [] - last_file = None - last_line = None - last_name = None - count = 0 - for frame_summary in self: - formatted_frame = self.format_frame_summary(frame_summary, colorize=colorize) - if formatted_frame is None: - continue - if (last_file is None or last_file != frame_summary.filename or - last_line is None or last_line != frame_summary.lineno or - last_name is None or last_name != frame_summary.name): - if count > _RECURSIVE_CUTOFF: - count -= _RECURSIVE_CUTOFF - result.append( - f' [Previous line repeated {count} more ' - f'time{"s" if count > 1 else ""}]\n' - ) - last_file = frame_summary.filename - last_line = frame_summary.lineno - last_name = frame_summary.name - count = 0 - count += 1 - if count > _RECURSIVE_CUTOFF: - continue - result.append(formatted_frame) - - if count > _RECURSIVE_CUTOFF: - count -= _RECURSIVE_CUTOFF - result.append( - f' [Previous line repeated {count} more ' - f'time{"s" if count > 1 else ""}]\n' - ) - return result - - -def _byte_offset_to_character_offset(str, offset): - as_utf8 = str.encode('utf-8') - return len(as_utf8[:offset].decode("utf-8", errors="replace")) - - -_Anchors = collections.namedtuple( - "_Anchors", - [ - "left_end_lineno", - "left_end_offset", - "right_start_lineno", - "right_start_offset", - "primary_char", - "secondary_char", - ], - defaults=["~", "^"] -) - -def _extract_caret_anchors_from_line_segment(segment): - """ - Given source code `segment` corresponding to a FrameSummary, determine: - - for binary ops, the location of the binary op - - for indexing and function calls, the location of the brackets. - `segment` is expected to be a valid Python expression. - """ - import ast - - try: - # Without parentheses, `segment` is parsed as a statement. - # Binary ops, subscripts, and calls are expressions, so - # we can wrap them with parentheses to parse them as - # (possibly multi-line) expressions. - # e.g. if we try to highlight the addition in - # x = ( - # a + - # b - # ) - # then we would ast.parse - # a + - # b - # which is not a valid statement because of the newline. - # Adding brackets makes it a valid expression. - # ( - # a + - # b - # ) - # Line locations will be different than the original, - # which is taken into account later on. - tree = ast.parse(f"(\n{segment}\n)") - except SyntaxError: - return None - - if len(tree.body) != 1: - return None - - lines = segment.splitlines() - - def normalize(lineno, offset): - """Get character index given byte offset""" - return _byte_offset_to_character_offset(lines[lineno], offset) - - def next_valid_char(lineno, col): - """Gets the next valid character index in `lines`, if - the current location is not valid. Handles empty lines. - """ - while lineno < len(lines) and col >= len(lines[lineno]): - col = 0 - lineno += 1 - assert lineno < len(lines) and col < len(lines[lineno]) - return lineno, col - - def increment(lineno, col): - """Get the next valid character index in `lines`.""" - col += 1 - lineno, col = next_valid_char(lineno, col) - return lineno, col - - def nextline(lineno, col): - """Get the next valid character at least on the next line""" - col = 0 - lineno += 1 - lineno, col = next_valid_char(lineno, col) - return lineno, col - - def increment_until(lineno, col, stop): - """Get the next valid non-"\\#" character that satisfies the `stop` predicate""" - while True: - ch = lines[lineno][col] - if ch in "\\#": - lineno, col = nextline(lineno, col) - elif not stop(ch): - lineno, col = increment(lineno, col) - else: - break - return lineno, col - - def setup_positions(expr, force_valid=True): - """Get the lineno/col position of the end of `expr`. If `force_valid` is True, - forces the position to be a valid character (e.g. if the position is beyond the - end of the line, move to the next line) - """ - # -2 since end_lineno is 1-indexed and because we added an extra - # bracket + newline to `segment` when calling ast.parse - lineno = expr.end_lineno - 2 - col = normalize(lineno, expr.end_col_offset) - return next_valid_char(lineno, col) if force_valid else (lineno, col) - - statement = tree.body[0] - match statement: - case ast.Expr(expr): - match expr: - case ast.BinOp(): - # ast gives these locations for BinOp subexpressions - # ( left_expr ) + ( right_expr ) - # left^^^^^ right^^^^^ - lineno, col = setup_positions(expr.left) - - # First operator character is the first non-space/')' character - lineno, col = increment_until(lineno, col, lambda x: not x.isspace() and x != ')') - - # binary op is 1 or 2 characters long, on the same line, - # before the right subexpression - right_col = col + 1 - if ( - right_col < len(lines[lineno]) - and ( - # operator char should not be in the right subexpression - expr.right.lineno - 2 > lineno or - right_col < normalize(expr.right.lineno - 2, expr.right.col_offset) - ) - and not (ch := lines[lineno][right_col]).isspace() - and ch not in "\\#" - ): - right_col += 1 - - # right_col can be invalid since it is exclusive - return _Anchors(lineno, col, lineno, right_col) - case ast.Subscript(): - # ast gives these locations for value and slice subexpressions - # ( value_expr ) [ slice_expr ] - # value^^^^^ slice^^^^^ - # subscript^^^^^^^^^^^^^^^^^^^^ - - # find left bracket - left_lineno, left_col = setup_positions(expr.value) - left_lineno, left_col = increment_until(left_lineno, left_col, lambda x: x == '[') - # find right bracket (final character of expression) - right_lineno, right_col = setup_positions(expr, force_valid=False) - return _Anchors(left_lineno, left_col, right_lineno, right_col) - case ast.Call(): - # ast gives these locations for function call expressions - # ( func_expr ) (args, kwargs) - # func^^^^^ - # call^^^^^^^^^^^^^^^^^^^^^^^^ - - # find left bracket - left_lineno, left_col = setup_positions(expr.func) - left_lineno, left_col = increment_until(left_lineno, left_col, lambda x: x == '(') - # find right bracket (final character of expression) - right_lineno, right_col = setup_positions(expr, force_valid=False) - return _Anchors(left_lineno, left_col, right_lineno, right_col) - - return None - -_WIDE_CHAR_SPECIFIERS = "WF" - -def _display_width(line, offset=None): - """Calculate the extra amount of width space the given source - code segment might take if it were to be displayed on a fixed - width output device. Supports wide unicode characters and emojis.""" - - if offset is None: - offset = len(line) - - # Fast track for ASCII-only strings - if line.isascii(): - return offset - - import unicodedata - - return sum( - 2 if unicodedata.east_asian_width(char) in _WIDE_CHAR_SPECIFIERS else 1 - for char in line[:offset] - ) - - - -class _ExceptionPrintContext: - def __init__(self): - self.seen = set() - self.exception_group_depth = 0 - self.need_close = False - - def indent(self): - return ' ' * (2 * self.exception_group_depth) - - def emit(self, text_gen, margin_char=None): - if margin_char is None: - margin_char = '|' - indent_str = self.indent() - if self.exception_group_depth: - indent_str += margin_char + ' ' - - if isinstance(text_gen, str): - yield textwrap.indent(text_gen, indent_str, lambda line: True) - else: - for text in text_gen: - yield textwrap.indent(text, indent_str, lambda line: True) - - -class TracebackException: - """An exception ready for rendering. - - The traceback module captures enough attributes from the original exception - to this intermediary form to ensure that no references are held, while - still being able to fully print or format it. - - max_group_width and max_group_depth control the formatting of exception - groups. The depth refers to the nesting level of the group, and the width - refers to the size of a single exception group's exceptions array. The - formatted output is truncated when either limit is exceeded. - - Use `from_exception` to create TracebackException instances from exception - objects, or the constructor to create TracebackException instances from - individual components. - - - :attr:`__cause__` A TracebackException of the original *__cause__*. - - :attr:`__context__` A TracebackException of the original *__context__*. - - :attr:`exceptions` For exception groups - a list of TracebackException - instances for the nested *exceptions*. ``None`` for other exceptions. - - :attr:`__suppress_context__` The *__suppress_context__* value from the - original exception. - - :attr:`stack` A `StackSummary` representing the traceback. - - :attr:`exc_type` (deprecated) The class of the original traceback. - - :attr:`exc_type_str` String display of exc_type - - :attr:`filename` For syntax errors - the filename where the error - occurred. - - :attr:`lineno` For syntax errors - the linenumber where the error - occurred. - - :attr:`end_lineno` For syntax errors - the end linenumber where the error - occurred. Can be `None` if not present. - - :attr:`text` For syntax errors - the text where the error - occurred. - - :attr:`offset` For syntax errors - the offset into the text where the - error occurred. - - :attr:`end_offset` For syntax errors - the end offset into the text where - the error occurred. Can be `None` if not present. - - :attr:`msg` For syntax errors - the compiler error message. - """ - - def __init__(self, exc_type, exc_value, exc_traceback, *, limit=None, - lookup_lines=True, capture_locals=False, compact=False, - max_group_width=15, max_group_depth=10, save_exc_type=True, _seen=None): - # NB: we need to accept exc_traceback, exc_value, exc_traceback to - # permit backwards compat with the existing API, otherwise we - # need stub thunk objects just to glue it together. - # Handle loops in __cause__ or __context__. - is_recursive_call = _seen is not None - if _seen is None: - _seen = set() - _seen.add(id(exc_value)) - - self.max_group_width = max_group_width - self.max_group_depth = max_group_depth - - self.stack = StackSummary._extract_from_extended_frame_gen( - _walk_tb_with_full_positions(exc_traceback), - limit=limit, lookup_lines=lookup_lines, - capture_locals=capture_locals) - - self._exc_type = exc_type if save_exc_type else None - - # Capture now to permit freeing resources: only complication is in the - # unofficial API _format_final_exc_line - self._str = _safe_string(exc_value, 'exception') - try: - self.__notes__ = getattr(exc_value, '__notes__', None) - except Exception as e: - self.__notes__ = [ - f'Ignored error getting __notes__: {_safe_string(e, '__notes__', repr)}'] - - self._is_syntax_error = False - self._have_exc_type = exc_type is not None - if exc_type is not None: - self.exc_type_qualname = exc_type.__qualname__ - self.exc_type_module = exc_type.__module__ - else: - self.exc_type_qualname = None - self.exc_type_module = None - - if exc_type and issubclass(exc_type, SyntaxError): - # Handle SyntaxError's specially - self.filename = exc_value.filename - lno = exc_value.lineno - self.lineno = str(lno) if lno is not None else None - end_lno = exc_value.end_lineno - self.end_lineno = str(end_lno) if end_lno is not None else None - self.text = exc_value.text - self.offset = exc_value.offset - self.end_offset = exc_value.end_offset - self.msg = exc_value.msg - self._is_syntax_error = True - elif exc_type and issubclass(exc_type, ImportError) and \ - getattr(exc_value, "name_from", None) is not None: - wrong_name = getattr(exc_value, "name_from", None) - suggestion = _compute_suggestion_error(exc_value, exc_traceback, wrong_name) - if suggestion: - self._str += f". Did you mean: '{suggestion}'?" - elif exc_type and issubclass(exc_type, (NameError, AttributeError)) and \ - getattr(exc_value, "name", None) is not None: - wrong_name = getattr(exc_value, "name", None) - suggestion = _compute_suggestion_error(exc_value, exc_traceback, wrong_name) - if suggestion: - self._str += f". Did you mean: '{suggestion}'?" - if issubclass(exc_type, NameError): - wrong_name = getattr(exc_value, "name", None) - if wrong_name is not None and wrong_name in sys.stdlib_module_names: - if suggestion: - self._str += f" Or did you forget to import '{wrong_name}'?" - else: - self._str += f". Did you forget to import '{wrong_name}'?" - if lookup_lines: - self._load_lines() - self.__suppress_context__ = \ - exc_value.__suppress_context__ if exc_value is not None else False - - # Convert __cause__ and __context__ to `TracebackExceptions`s, use a - # queue to avoid recursion (only the top-level call gets _seen == None) - if not is_recursive_call: - queue = [(self, exc_value)] - while queue: - te, e = queue.pop() - if (e is not None and e.__cause__ is not None - and id(e.__cause__) not in _seen): - cause = TracebackException( - type(e.__cause__), - e.__cause__, - e.__cause__.__traceback__, - limit=limit, - lookup_lines=lookup_lines, - capture_locals=capture_locals, - max_group_width=max_group_width, - max_group_depth=max_group_depth, - _seen=_seen) - else: - cause = None - - if compact: - need_context = (cause is None and - e is not None and - not e.__suppress_context__) - else: - need_context = True - if (e is not None and e.__context__ is not None - and need_context and id(e.__context__) not in _seen): - context = TracebackException( - type(e.__context__), - e.__context__, - e.__context__.__traceback__, - limit=limit, - lookup_lines=lookup_lines, - capture_locals=capture_locals, - max_group_width=max_group_width, - max_group_depth=max_group_depth, - _seen=_seen) - else: - context = None - - if e is not None and isinstance(e, BaseExceptionGroup): - exceptions = [] - for exc in e.exceptions: - texc = TracebackException( - type(exc), - exc, - exc.__traceback__, - limit=limit, - lookup_lines=lookup_lines, - capture_locals=capture_locals, - max_group_width=max_group_width, - max_group_depth=max_group_depth, - _seen=_seen) - exceptions.append(texc) - else: - exceptions = None - - te.__cause__ = cause - te.__context__ = context - te.exceptions = exceptions - if cause: - queue.append((te.__cause__, e.__cause__)) - if context: - queue.append((te.__context__, e.__context__)) - if exceptions: - queue.extend(zip(te.exceptions, e.exceptions)) - - @classmethod - def from_exception(cls, exc, *args, **kwargs): - """Create a TracebackException from an exception.""" - return cls(type(exc), exc, exc.__traceback__, *args, **kwargs) - - @property - def exc_type(self): - warnings.warn('Deprecated in 3.13. Use exc_type_str instead.', - DeprecationWarning, stacklevel=2) - return self._exc_type - - @property - def exc_type_str(self): - if not self._have_exc_type: - return None - stype = self.exc_type_qualname - smod = self.exc_type_module - if smod not in ("__main__", "builtins"): - if not isinstance(smod, str): - smod = "" - stype = smod + '.' + stype - return stype - - def _load_lines(self): - """Private API. force all lines in the stack to be loaded.""" - for frame in self.stack: - frame.line - - def __eq__(self, other): - if isinstance(other, TracebackException): - return self.__dict__ == other.__dict__ - return NotImplemented - - def __str__(self): - return self._str - - def format_exception_only(self, *, show_group=False, _depth=0, **kwargs): - """Format the exception part of the traceback. - - The return value is a generator of strings, each ending in a newline. - - Generator yields the exception message. - For :exc:`SyntaxError` exceptions, it - also yields (before the exception message) - several lines that (when printed) - display detailed information about where the syntax error occurred. - Following the message, generator also yields - all the exception's ``__notes__``. - - When *show_group* is ``True``, and the exception is an instance of - :exc:`BaseExceptionGroup`, the nested exceptions are included as - well, recursively, with indentation relative to their nesting depth. - """ - colorize = kwargs.get("colorize", False) - - indent = 3 * _depth * ' ' - if not self._have_exc_type: - yield indent + _format_final_exc_line(None, self._str, colorize=colorize) - return - - stype = self.exc_type_str - if not self._is_syntax_error: - if _depth > 0: - # Nested exceptions needs correct handling of multiline messages. - formatted = _format_final_exc_line( - stype, self._str, insert_final_newline=False, colorize=colorize - ).split('\n') - yield from [ - indent + l + '\n' - for l in formatted - ] - else: - yield _format_final_exc_line(stype, self._str, colorize=colorize) - else: - yield from [indent + l for l in self._format_syntax_error(stype, colorize=colorize)] - - if ( - isinstance(self.__notes__, collections.abc.Sequence) - and not isinstance(self.__notes__, (str, bytes)) - ): - for note in self.__notes__: - note = _safe_string(note, 'note') - yield from [indent + l + '\n' for l in note.split('\n')] - elif self.__notes__ is not None: - yield indent + "{}\n".format(_safe_string(self.__notes__, '__notes__', func=repr)) - - if self.exceptions and show_group: - for ex in self.exceptions: - yield from ex.format_exception_only(show_group=show_group, _depth=_depth+1, colorize=colorize) - - def _format_syntax_error(self, stype, **kwargs): - """Format SyntaxError exceptions (internal helper).""" - # Show exactly where the problem was found. - colorize = kwargs.get("colorize", False) - filename_suffix = '' - if self.lineno is not None: - if colorize: - yield ' File {}"{}"{}, line {}{}{}\n'.format( - ANSIColors.MAGENTA, - self.filename or "", - ANSIColors.RESET, - ANSIColors.MAGENTA, - self.lineno, - ANSIColors.RESET, - ) - else: - yield ' File "{}", line {}\n'.format( - self.filename or "", self.lineno) - elif self.filename is not None: - filename_suffix = ' ({})'.format(self.filename) - - text = self.text - if isinstance(text, str): - # text = " foo\n" - # rtext = " foo" - # ltext = "foo" - rtext = text.rstrip('\n') - ltext = rtext.lstrip(' \n\f') - spaces = len(rtext) - len(ltext) - if self.offset is None: - yield ' {}\n'.format(ltext) - elif isinstance(self.offset, int): - offset = self.offset - if self.lineno == self.end_lineno: - end_offset = ( - self.end_offset - if ( - isinstance(self.end_offset, int) - and self.end_offset != 0 - ) - else offset - ) - else: - end_offset = len(rtext) + 1 - - if self.text and offset > len(self.text): - offset = len(rtext) + 1 - if self.text and end_offset > len(self.text): - end_offset = len(rtext) + 1 - if offset >= end_offset or end_offset < 0: - end_offset = offset + 1 - - # Convert 1-based column offset to 0-based index into stripped text - colno = offset - 1 - spaces - end_colno = end_offset - 1 - spaces - caretspace = ' ' - if colno >= 0: - # non-space whitespace (likes tabs) must be kept for alignment - caretspace = ((c if c.isspace() else ' ') for c in ltext[:colno]) - start_color = end_color = "" - if colorize: - # colorize from colno to end_colno - ltext = ( - ltext[:colno] + - ANSIColors.BOLD_RED + ltext[colno:end_colno] + ANSIColors.RESET + - ltext[end_colno:] - ) - start_color = ANSIColors.BOLD_RED - end_color = ANSIColors.RESET - yield ' {}\n'.format(ltext) - yield ' {}{}{}{}\n'.format( - "".join(caretspace), - start_color, - ('^' * (end_colno - colno)), - end_color, - ) - else: - yield ' {}\n'.format(ltext) - msg = self.msg or "" - if colorize: - yield "{}{}{}: {}{}{}{}\n".format( - ANSIColors.BOLD_MAGENTA, - stype, - ANSIColors.RESET, - ANSIColors.MAGENTA, - msg, - ANSIColors.RESET, - filename_suffix) - else: - yield "{}: {}{}\n".format(stype, msg, filename_suffix) - - def format(self, *, chain=True, _ctx=None, **kwargs): - """Format the exception. - - If chain is not *True*, *__cause__* and *__context__* will not be formatted. - - The return value is a generator of strings, each ending in a newline and - some containing internal newlines. `print_exception` is a wrapper around - this method which just prints the lines to a file. - - The message indicating which exception occurred is always the last - string in the output. - """ - colorize = kwargs.get("colorize", False) - if _ctx is None: - _ctx = _ExceptionPrintContext() - - output = [] - exc = self - if chain: - while exc: - if exc.__cause__ is not None: - chained_msg = _cause_message - chained_exc = exc.__cause__ - elif (exc.__context__ is not None and - not exc.__suppress_context__): - chained_msg = _context_message - chained_exc = exc.__context__ - else: - chained_msg = None - chained_exc = None - - output.append((chained_msg, exc)) - exc = chained_exc - else: - output.append((None, exc)) - - for msg, exc in reversed(output): - if msg is not None: - yield from _ctx.emit(msg) - if exc.exceptions is None: - if exc.stack: - yield from _ctx.emit('Traceback (most recent call last):\n') - yield from _ctx.emit(exc.stack.format(colorize=colorize)) - yield from _ctx.emit(exc.format_exception_only(colorize=colorize)) - elif _ctx.exception_group_depth > self.max_group_depth: - # exception group, but depth exceeds limit - yield from _ctx.emit( - f"... (max_group_depth is {self.max_group_depth})\n") - else: - # format exception group - is_toplevel = (_ctx.exception_group_depth == 0) - if is_toplevel: - _ctx.exception_group_depth += 1 - - if exc.stack: - yield from _ctx.emit( - 'Exception Group Traceback (most recent call last):\n', - margin_char = '+' if is_toplevel else None) - yield from _ctx.emit(exc.stack.format(colorize=colorize)) - - yield from _ctx.emit(exc.format_exception_only(colorize=colorize)) - num_excs = len(exc.exceptions) - if num_excs <= self.max_group_width: - n = num_excs - else: - n = self.max_group_width + 1 - _ctx.need_close = False - for i in range(n): - last_exc = (i == n-1) - if last_exc: - # The closing frame may be added by a recursive call - _ctx.need_close = True - - if self.max_group_width is not None: - truncated = (i >= self.max_group_width) - else: - truncated = False - title = f'{i+1}' if not truncated else '...' - yield (_ctx.indent() + - ('+-' if i==0 else ' ') + - f'+---------------- {title} ----------------\n') - _ctx.exception_group_depth += 1 - if not truncated: - yield from exc.exceptions[i].format(chain=chain, _ctx=_ctx, colorize=colorize) - else: - remaining = num_excs - self.max_group_width - plural = 's' if remaining > 1 else '' - yield from _ctx.emit( - f"and {remaining} more exception{plural}\n") - - if last_exc and _ctx.need_close: - yield (_ctx.indent() + - "+------------------------------------\n") - _ctx.need_close = False - _ctx.exception_group_depth -= 1 - - if is_toplevel: - assert _ctx.exception_group_depth == 1 - _ctx.exception_group_depth = 0 - - - def print(self, *, file=None, chain=True, **kwargs): - """Print the result of self.format(chain=chain) to 'file'.""" - colorize = kwargs.get("colorize", False) - if file is None: - file = sys.stderr - for line in self.format(chain=chain, colorize=colorize): - print(line, file=file, end="") - - -_MAX_CANDIDATE_ITEMS = 750 -_MAX_STRING_SIZE = 40 -_MOVE_COST = 2 -_CASE_COST = 1 - - -def _substitution_cost(ch_a, ch_b): - if ch_a == ch_b: - return 0 - if ch_a.lower() == ch_b.lower(): - return _CASE_COST - return _MOVE_COST - - -def _get_safe___dir__(obj): - # Use obj.__dir__() to avoid a TypeError when calling dir(obj). - # See gh-131001 and gh-139933. - try: - d = obj.__dir__() - except TypeError: # when obj is a class - d = type(obj).__dir__(obj) - return sorted(x for x in d if isinstance(x, str)) - - -def _compute_suggestion_error(exc_value, tb, wrong_name): - if wrong_name is None or not isinstance(wrong_name, str): - return None - if isinstance(exc_value, AttributeError): - obj = exc_value.obj - try: - d = _get_safe___dir__(obj) - hide_underscored = (wrong_name[:1] != '_') - if hide_underscored and tb is not None: - while tb.tb_next is not None: - tb = tb.tb_next - frame = tb.tb_frame - if 'self' in frame.f_locals and frame.f_locals['self'] is obj: - hide_underscored = False - if hide_underscored: - d = [x for x in d if x[:1] != '_'] - except Exception: - return None - elif isinstance(exc_value, ImportError): - try: - mod = __import__(exc_value.name) - d = _get_safe___dir__(mod) - if wrong_name[:1] != '_': - d = [x for x in d if x[:1] != '_'] - except Exception: - return None - else: - assert isinstance(exc_value, NameError) - # find most recent frame - if tb is None: - return None - while tb.tb_next is not None: - tb = tb.tb_next - frame = tb.tb_frame - d = ( - list(frame.f_locals) - + list(frame.f_globals) - + list(frame.f_builtins) - ) - d = [x for x in d if isinstance(x, str)] - - # Check first if we are in a method and the instance - # has the wrong name as attribute - if 'self' in frame.f_locals: - self = frame.f_locals['self'] - try: - has_wrong_name = hasattr(self, wrong_name) - except Exception: - has_wrong_name = False - if has_wrong_name: - return f"self.{wrong_name}" - - try: - import _suggestions - except ImportError: - pass - else: - return _suggestions._generate_suggestions(d, wrong_name) - - # Compute closest match - - if len(d) > _MAX_CANDIDATE_ITEMS: - return None - wrong_name_len = len(wrong_name) - if wrong_name_len > _MAX_STRING_SIZE: - return None - best_distance = wrong_name_len - suggestion = None - for possible_name in d: - if possible_name == wrong_name: - # A missing attribute is "found". Don't suggest it (see GH-88821). - continue - # No more than 1/3 of the involved characters should need changed. - max_distance = (len(possible_name) + wrong_name_len + 3) * _MOVE_COST // 6 - # Don't take matches we've already beaten. - max_distance = min(max_distance, best_distance - 1) - current_distance = _levenshtein_distance(wrong_name, possible_name, max_distance) - if current_distance > max_distance: - continue - if not suggestion or current_distance < best_distance: - suggestion = possible_name - best_distance = current_distance - return suggestion - - -def _levenshtein_distance(a, b, max_cost): - # A Python implementation of Python/suggestions.c:levenshtein_distance. - - # Both strings are the same - if a == b: - return 0 - - # Trim away common affixes - pre = 0 - while a[pre:] and b[pre:] and a[pre] == b[pre]: - pre += 1 - a = a[pre:] - b = b[pre:] - post = 0 - while a[:post or None] and b[:post or None] and a[post-1] == b[post-1]: - post -= 1 - a = a[:post or None] - b = b[:post or None] - if not a or not b: - return _MOVE_COST * (len(a) + len(b)) - if len(a) > _MAX_STRING_SIZE or len(b) > _MAX_STRING_SIZE: - return max_cost + 1 - - # Prefer shorter buffer - if len(b) < len(a): - a, b = b, a - - # Quick fail when a match is impossible - if (len(b) - len(a)) * _MOVE_COST > max_cost: - return max_cost + 1 - - # Instead of producing the whole traditional len(a)-by-len(b) - # matrix, we can update just one row in place. - # Initialize the buffer row - row = list(range(_MOVE_COST, _MOVE_COST * (len(a) + 1), _MOVE_COST)) - - result = 0 - for bindex in range(len(b)): - bchar = b[bindex] - distance = result = bindex * _MOVE_COST - minimum = sys.maxsize - for index in range(len(a)): - # 1) Previous distance in this row is cost(b[:b_index], a[:index]) - substitute = distance + _substitution_cost(bchar, a[index]) - # 2) cost(b[:b_index], a[:index+1]) from previous row - distance = row[index] - # 3) existing result is cost(b[:b_index+1], a[index]) - - insert_delete = min(result, distance) + _MOVE_COST - result = min(insert_delete, substitute) - - # cost(b[:b_index+1], a[:index+1]) - row[index] = result - if result < minimum: - minimum = result - if minimum > max_cost: - # Everything in this row is too big, so bail early. - return max_cost + 1 - return result diff --git a/Python313_13_x64_Template/Lib/turtle.py b/Python313_13_x64_Template/Lib/turtle.py deleted file mode 100644 index ff2002cc..00000000 --- a/Python313_13_x64_Template/Lib/turtle.py +++ /dev/null @@ -1,4199 +0,0 @@ -# -# turtle.py: a Tkinter based turtle graphics module for Python -# Version 1.1b - 4. 5. 2009 -# -# Copyright (C) 2006 - 2010 Gregor Lingl -# email: glingl@aon.at -# -# This software is provided 'as-is', without any express or implied -# warranty. In no event will the authors be held liable for any damages -# arising from the use of this software. -# -# Permission is granted to anyone to use this software for any purpose, -# including commercial applications, and to alter it and redistribute it -# freely, subject to the following restrictions: -# -# 1. The origin of this software must not be misrepresented; you must not -# claim that you wrote the original software. If you use this software -# in a product, an acknowledgment in the product documentation would be -# appreciated but is not required. -# 2. Altered source versions must be plainly marked as such, and must not be -# misrepresented as being the original software. -# 3. This notice may not be removed or altered from any source distribution. - -""" -Turtle graphics is a popular way for introducing programming to -kids. It was part of the original Logo programming language developed -by Wally Feurzig and Seymour Papert in 1966. - -Imagine a robotic turtle starting at (0, 0) in the x-y plane. After an ``import turtle``, give it -the command turtle.forward(15), and it moves (on-screen!) 15 pixels in -the direction it is facing, drawing a line as it moves. Give it the -command turtle.right(25), and it rotates in-place 25 degrees clockwise. - -By combining together these and similar commands, intricate shapes and -pictures can easily be drawn. - ------ turtle.py - -This module is an extended reimplementation of turtle.py from the -Python standard distribution up to Python 2.5. (See: https://www.python.org) - -It tries to keep the merits of turtle.py and to be (nearly) 100% -compatible with it. This means in the first place to enable the -learning programmer to use all the commands, classes and methods -interactively when using the module from within IDLE run with -the -n switch. - -Roughly it has the following features added: - -- Better animation of the turtle movements, especially of turning the - turtle. So the turtles can more easily be used as a visual feedback - instrument by the (beginning) programmer. - -- Different turtle shapes, gif-images as turtle shapes, user defined - and user controllable turtle shapes, among them compound - (multicolored) shapes. Turtle shapes can be stretched and tilted, which - makes turtles very versatile geometrical objects. - -- Fine control over turtle movement and screen updates via delay(), - and enhanced tracer() and speed() methods. - -- Aliases for the most commonly used commands, like fd for forward etc., - following the early Logo traditions. This reduces the boring work of - typing long sequences of commands, which often occur in a natural way - when kids try to program fancy pictures on their first encounter with - turtle graphics. - -- Turtles now have an undo()-method with configurable undo-buffer. - -- Some simple commands/methods for creating event driven programs - (mouse-, key-, timer-events). Especially useful for programming games. - -- A scrollable Canvas class. The default scrollable Canvas can be - extended interactively as needed while playing around with the turtle(s). - -- A TurtleScreen class with methods controlling background color or - background image, window and canvas size and other properties of the - TurtleScreen. - -- There is a method, setworldcoordinates(), to install a user defined - coordinate-system for the TurtleScreen. - -- The implementation uses a 2-vector class named Vec2D, derived from tuple. - This class is public, so it can be imported by the application programmer, - which makes certain types of computations very natural and compact. - -- Appearance of the TurtleScreen and the Turtles at startup/import can be - configured by means of a turtle.cfg configuration file. - The default configuration mimics the appearance of the old turtle module. - -- If configured appropriately the module reads in docstrings from a docstring - dictionary in some different language, supplied separately and replaces - the English ones by those read in. There is a utility function - write_docstringdict() to write a dictionary with the original (English) - docstrings to disc, so it can serve as a template for translations. - -Behind the scenes there are some features included with possible -extensions in mind. These will be commented and documented elsewhere. -""" - -import tkinter as TK -import types -import math -import time -import inspect -import sys - -from os.path import isfile, split, join -from copy import deepcopy -from tkinter import simpledialog - -_tg_classes = ['ScrolledCanvas', 'TurtleScreen', 'Screen', - 'RawTurtle', 'Turtle', 'RawPen', 'Pen', 'Shape', 'Vec2D'] -_tg_screen_functions = ['addshape', 'bgcolor', 'bgpic', 'bye', - 'clearscreen', 'colormode', 'delay', 'exitonclick', 'getcanvas', - 'getshapes', 'listen', 'mainloop', 'mode', 'numinput', - 'onkey', 'onkeypress', 'onkeyrelease', 'onscreenclick', 'ontimer', - 'register_shape', 'resetscreen', 'screensize', 'setup', - 'setworldcoordinates', 'textinput', 'title', 'tracer', 'turtles', 'update', - 'window_height', 'window_width'] -_tg_turtle_functions = ['back', 'backward', 'begin_fill', 'begin_poly', 'bk', - 'circle', 'clear', 'clearstamp', 'clearstamps', 'clone', 'color', - 'degrees', 'distance', 'dot', 'down', 'end_fill', 'end_poly', 'fd', - 'fillcolor', 'filling', 'forward', 'get_poly', 'getpen', 'getscreen', 'get_shapepoly', - 'getturtle', 'goto', 'heading', 'hideturtle', 'home', 'ht', 'isdown', - 'isvisible', 'left', 'lt', 'onclick', 'ondrag', 'onrelease', 'pd', - 'pen', 'pencolor', 'pendown', 'pensize', 'penup', 'pos', 'position', - 'pu', 'radians', 'right', 'reset', 'resizemode', 'rt', - 'seth', 'setheading', 'setpos', 'setposition', - 'setundobuffer', 'setx', 'sety', 'shape', 'shapesize', 'shapetransform', 'shearfactor', 'showturtle', - 'speed', 'st', 'stamp', 'teleport', 'tilt', 'tiltangle', 'towards', - 'turtlesize', 'undo', 'undobufferentries', 'up', 'width', - 'write', 'xcor', 'ycor'] -_tg_utilities = ['write_docstringdict', 'done'] - -__all__ = (_tg_classes + _tg_screen_functions + _tg_turtle_functions + - _tg_utilities + ['Terminator']) - -_alias_list = ['addshape', 'backward', 'bk', 'fd', 'ht', 'lt', 'pd', 'pos', - 'pu', 'rt', 'seth', 'setpos', 'setposition', 'st', - 'turtlesize', 'up', 'width'] - -_CFG = {"width" : 0.5, # Screen - "height" : 0.75, - "canvwidth" : 400, - "canvheight": 300, - "leftright": None, - "topbottom": None, - "mode": "standard", # TurtleScreen - "colormode": 1.0, - "delay": 10, - "undobuffersize": 1000, # RawTurtle - "shape": "classic", - "pencolor" : "black", - "fillcolor" : "black", - "resizemode" : "noresize", - "visible" : True, - "language": "english", # docstrings - "exampleturtle": "turtle", - "examplescreen": "screen", - "title": "Python Turtle Graphics", - "using_IDLE": False - } - -def config_dict(filename): - """Convert content of config-file into dictionary.""" - with open(filename, "r") as f: - cfglines = f.readlines() - cfgdict = {} - for line in cfglines: - line = line.strip() - if not line or line.startswith("#"): - continue - try: - key, value = line.split("=") - except ValueError: - print("Bad line in config-file %s:\n%s" % (filename,line)) - continue - key = key.strip() - value = value.strip() - if value in ["True", "False", "None", "''", '""']: - value = eval(value) - else: - try: - if "." in value: - value = float(value) - else: - value = int(value) - except ValueError: - pass # value need not be converted - cfgdict[key] = value - return cfgdict - -def readconfig(cfgdict): - """Read config-files, change configuration-dict accordingly. - - If there is a turtle.cfg file in the current working directory, - read it from there. If this contains an importconfig-value, - say 'myway', construct filename turtle_mayway.cfg else use - turtle.cfg and read it from the import-directory, where - turtle.py is located. - Update configuration dictionary first according to config-file, - in the import directory, then according to config-file in the - current working directory. - If no config-file is found, the default configuration is used. - """ - default_cfg = "turtle.cfg" - cfgdict1 = {} - cfgdict2 = {} - if isfile(default_cfg): - cfgdict1 = config_dict(default_cfg) - if "importconfig" in cfgdict1: - default_cfg = "turtle_%s.cfg" % cfgdict1["importconfig"] - try: - head, tail = split(__file__) - cfg_file2 = join(head, default_cfg) - except Exception: - cfg_file2 = "" - if isfile(cfg_file2): - cfgdict2 = config_dict(cfg_file2) - _CFG.update(cfgdict2) - _CFG.update(cfgdict1) - -try: - readconfig(_CFG) -except Exception: - print ("No configfile read, reason unknown") - - -class Vec2D(tuple): - """A 2 dimensional vector class, used as a helper class - for implementing turtle graphics. - May be useful for turtle graphics programs also. - Derived from tuple, so a vector is a tuple! - - Provides (for a, b vectors, k number): - a+b vector addition - a-b vector subtraction - a*b inner product - k*a and a*k multiplication with scalar - |a| absolute value of a - a.rotate(angle) rotation - """ - def __new__(cls, x, y): - return tuple.__new__(cls, (x, y)) - def __add__(self, other): - return Vec2D(self[0]+other[0], self[1]+other[1]) - def __mul__(self, other): - if isinstance(other, Vec2D): - return self[0]*other[0]+self[1]*other[1] - return Vec2D(self[0]*other, self[1]*other) - def __rmul__(self, other): - if isinstance(other, int) or isinstance(other, float): - return Vec2D(self[0]*other, self[1]*other) - return NotImplemented - def __sub__(self, other): - return Vec2D(self[0]-other[0], self[1]-other[1]) - def __neg__(self): - return Vec2D(-self[0], -self[1]) - def __abs__(self): - return math.hypot(*self) - def rotate(self, angle): - """rotate self counterclockwise by angle - """ - perp = Vec2D(-self[1], self[0]) - angle = math.radians(angle) - c, s = math.cos(angle), math.sin(angle) - return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s) - def __getnewargs__(self): - return (self[0], self[1]) - def __repr__(self): - return "(%.2f,%.2f)" % self - - -############################################################################## -### From here up to line : Tkinter - Interface for turtle.py ### -### May be replaced by an interface to some different graphics toolkit ### -############################################################################## - -## helper functions for Scrolled Canvas, to forward Canvas-methods -## to ScrolledCanvas class - -def __methodDict(cls, _dict): - """helper function for Scrolled Canvas""" - baseList = list(cls.__bases__) - baseList.reverse() - for _super in baseList: - __methodDict(_super, _dict) - for key, value in cls.__dict__.items(): - if type(value) == types.FunctionType: - _dict[key] = value - -def __methods(cls): - """helper function for Scrolled Canvas""" - _dict = {} - __methodDict(cls, _dict) - return _dict.keys() - -__stringBody = ( - 'def %(method)s(self, *args, **kw): return ' + - 'self.%(attribute)s.%(method)s(*args, **kw)') - -def __forwardmethods(fromClass, toClass, toPart, exclude = ()): - ### MANY CHANGES ### - _dict_1 = {} - __methodDict(toClass, _dict_1) - _dict = {} - mfc = __methods(fromClass) - for ex in _dict_1.keys(): - if ex[:1] == '_' or ex[-1:] == '_' or ex in exclude or ex in mfc: - pass - else: - _dict[ex] = _dict_1[ex] - - for method, func in _dict.items(): - d = {'method': method, 'func': func} - if isinstance(toPart, str): - execString = \ - __stringBody % {'method' : method, 'attribute' : toPart} - exec(execString, d) - setattr(fromClass, method, d[method]) ### NEWU! - - -class ScrolledCanvas(TK.Frame): - """Modeled after the scrolled canvas class from Grayons's Tkinter book. - - Used as the default canvas, which pops up automatically when - using turtle graphics functions or the Turtle class. - """ - def __init__(self, master, width=500, height=350, - canvwidth=600, canvheight=500): - TK.Frame.__init__(self, master, width=width, height=height) - self._rootwindow = self.winfo_toplevel() - self.width, self.height = width, height - self.canvwidth, self.canvheight = canvwidth, canvheight - self.bg = "white" - self._canvas = TK.Canvas(master, width=width, height=height, - bg=self.bg, relief=TK.SUNKEN, borderwidth=2) - self.hscroll = TK.Scrollbar(master, command=self._canvas.xview, - orient=TK.HORIZONTAL) - self.vscroll = TK.Scrollbar(master, command=self._canvas.yview) - self._canvas.configure(xscrollcommand=self.hscroll.set, - yscrollcommand=self.vscroll.set) - self.rowconfigure(0, weight=1, minsize=0) - self.columnconfigure(0, weight=1, minsize=0) - self._canvas.grid(padx=1, in_ = self, pady=1, row=0, - column=0, rowspan=1, columnspan=1, sticky='news') - self.vscroll.grid(padx=1, in_ = self, pady=1, row=0, - column=1, rowspan=1, columnspan=1, sticky='news') - self.hscroll.grid(padx=1, in_ = self, pady=1, row=1, - column=0, rowspan=1, columnspan=1, sticky='news') - self.reset() - self._rootwindow.bind('', self.onResize) - - def reset(self, canvwidth=None, canvheight=None, bg = None): - """Adjust canvas and scrollbars according to given canvas size.""" - if canvwidth: - self.canvwidth = canvwidth - if canvheight: - self.canvheight = canvheight - if bg: - self.bg = bg - self._canvas.config(bg=bg, - scrollregion=(-self.canvwidth//2, -self.canvheight//2, - self.canvwidth//2, self.canvheight//2)) - self._canvas.xview_moveto(0.5*(self.canvwidth - self.width + 30) / - self.canvwidth) - self._canvas.yview_moveto(0.5*(self.canvheight- self.height + 30) / - self.canvheight) - self.adjustScrolls() - - - def adjustScrolls(self): - """ Adjust scrollbars according to window- and canvas-size. - """ - cwidth = self._canvas.winfo_width() - cheight = self._canvas.winfo_height() - self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth) - self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight) - if cwidth < self.canvwidth or cheight < self.canvheight: - self.hscroll.grid(padx=1, in_ = self, pady=1, row=1, - column=0, rowspan=1, columnspan=1, sticky='news') - self.vscroll.grid(padx=1, in_ = self, pady=1, row=0, - column=1, rowspan=1, columnspan=1, sticky='news') - else: - self.hscroll.grid_forget() - self.vscroll.grid_forget() - - def onResize(self, event): - """self-explanatory""" - self.adjustScrolls() - - def bbox(self, *args): - """ 'forward' method, which canvas itself has inherited... - """ - return self._canvas.bbox(*args) - - def cget(self, *args, **kwargs): - """ 'forward' method, which canvas itself has inherited... - """ - return self._canvas.cget(*args, **kwargs) - - def config(self, *args, **kwargs): - """ 'forward' method, which canvas itself has inherited... - """ - self._canvas.config(*args, **kwargs) - - def bind(self, *args, **kwargs): - """ 'forward' method, which canvas itself has inherited... - """ - self._canvas.bind(*args, **kwargs) - - def unbind(self, *args, **kwargs): - """ 'forward' method, which canvas itself has inherited... - """ - self._canvas.unbind(*args, **kwargs) - - def focus_force(self): - """ 'forward' method, which canvas itself has inherited... - """ - self._canvas.focus_force() - -__forwardmethods(ScrolledCanvas, TK.Canvas, '_canvas') - - -class _Root(TK.Tk): - """Root class for Screen based on Tkinter.""" - def __init__(self): - TK.Tk.__init__(self) - - def setupcanvas(self, width, height, cwidth, cheight): - self._canvas = ScrolledCanvas(self, width, height, cwidth, cheight) - self._canvas.pack(expand=1, fill="both") - - def _getcanvas(self): - return self._canvas - - def set_geometry(self, width, height, startx, starty): - self.geometry("%dx%d%+d%+d"%(width, height, startx, starty)) - - def ondestroy(self, destroy): - self.wm_protocol("WM_DELETE_WINDOW", destroy) - - def win_width(self): - return self.winfo_screenwidth() - - def win_height(self): - return self.winfo_screenheight() - -Canvas = TK.Canvas - - -class TurtleScreenBase(object): - """Provide the basic graphics functionality. - Interface between Tkinter and turtle.py. - - To port turtle.py to some different graphics toolkit - a corresponding TurtleScreenBase class has to be implemented. - """ - - def _blankimage(self): - """return a blank image object - """ - img = TK.PhotoImage(width=1, height=1, master=self.cv) - img.blank() - return img - - def _image(self, filename): - """return an image object containing the - imagedata from a gif-file named filename. - """ - return TK.PhotoImage(file=filename, master=self.cv) - - def __init__(self, cv): - self.cv = cv - if isinstance(cv, ScrolledCanvas): - w = self.cv.canvwidth - h = self.cv.canvheight - else: # expected: ordinary TK.Canvas - w = int(self.cv.cget("width")) - h = int(self.cv.cget("height")) - self.cv.config(scrollregion = (-w//2, -h//2, w//2, h//2 )) - self.canvwidth = w - self.canvheight = h - self.xscale = self.yscale = 1.0 - - def _createpoly(self): - """Create an invisible polygon item on canvas self.cv) - """ - return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill="", outline="") - - def _drawpoly(self, polyitem, coordlist, fill=None, - outline=None, width=None, top=False): - """Configure polygonitem polyitem according to provided - arguments: - coordlist is sequence of coordinates - fill is filling color - outline is outline color - top is a boolean value, which specifies if polyitem - will be put on top of the canvas' displaylist so it - will not be covered by other items. - """ - cl = [] - for x, y in coordlist: - cl.append(x * self.xscale) - cl.append(-y * self.yscale) - self.cv.coords(polyitem, *cl) - if fill is not None: - self.cv.itemconfigure(polyitem, fill=fill) - if outline is not None: - self.cv.itemconfigure(polyitem, outline=outline) - if width is not None: - self.cv.itemconfigure(polyitem, width=width) - if top: - self.cv.tag_raise(polyitem) - - def _createline(self): - """Create an invisible line item on canvas self.cv) - """ - return self.cv.create_line(0, 0, 0, 0, fill="", width=2, - capstyle = TK.ROUND) - - def _drawline(self, lineitem, coordlist=None, - fill=None, width=None, top=False): - """Configure lineitem according to provided arguments: - coordlist is sequence of coordinates - fill is drawing color - width is width of drawn line. - top is a boolean value, which specifies if polyitem - will be put on top of the canvas' displaylist so it - will not be covered by other items. - """ - if coordlist is not None: - cl = [] - for x, y in coordlist: - cl.append(x * self.xscale) - cl.append(-y * self.yscale) - self.cv.coords(lineitem, *cl) - if fill is not None: - self.cv.itemconfigure(lineitem, fill=fill) - if width is not None: - self.cv.itemconfigure(lineitem, width=width) - if top: - self.cv.tag_raise(lineitem) - - def _delete(self, item): - """Delete graphics item from canvas. - If item is"all" delete all graphics items. - """ - self.cv.delete(item) - - def _update(self): - """Redraw graphics items on canvas - """ - self.cv.update() - - def _delay(self, delay): - """Delay subsequent canvas actions for delay ms.""" - self.cv.after(delay) - - def _iscolorstring(self, color): - """Check if the string color is a legal Tkinter color string. - """ - try: - rgb = self.cv.winfo_rgb(color) - ok = True - except TK.TclError: - ok = False - return ok - - def _bgcolor(self, color=None): - """Set canvas' backgroundcolor if color is not None, - else return backgroundcolor.""" - if color is not None: - self.cv.config(bg = color) - self._update() - else: - return self.cv.cget("bg") - - def _write(self, pos, txt, align, font, pencolor): - """Write txt at pos in canvas with specified font - and color. - Return text item and x-coord of right bottom corner - of text's bounding box.""" - x, y = pos - x = x * self.xscale - y = y * self.yscale - anchor = {"left":"sw", "center":"s", "right":"se" } - item = self.cv.create_text(x-1, -y, text = txt, anchor = anchor[align], - fill = pencolor, font = font) - x0, y0, x1, y1 = self.cv.bbox(item) - return item, x1-1 - - def _onclick(self, item, fun, num=1, add=None): - """Bind fun to mouse-click event on turtle. - fun must be a function with two arguments, the coordinates - of the clicked point on the canvas. - num, the number of the mouse-button defaults to 1 - """ - if fun is None: - self.cv.tag_unbind(item, "" % num) - else: - def eventfun(event): - x, y = (self.cv.canvasx(event.x)/self.xscale, - -self.cv.canvasy(event.y)/self.yscale) - fun(x, y) - self.cv.tag_bind(item, "" % num, eventfun, add) - - def _onrelease(self, item, fun, num=1, add=None): - """Bind fun to mouse-button-release event on turtle. - fun must be a function with two arguments, the coordinates - of the point on the canvas where mouse button is released. - num, the number of the mouse-button defaults to 1 - - If a turtle is clicked, first _onclick-event will be performed, - then _onscreensclick-event. - """ - if fun is None: - self.cv.tag_unbind(item, "" % num) - else: - def eventfun(event): - x, y = (self.cv.canvasx(event.x)/self.xscale, - -self.cv.canvasy(event.y)/self.yscale) - fun(x, y) - self.cv.tag_bind(item, "" % num, - eventfun, add) - - def _ondrag(self, item, fun, num=1, add=None): - """Bind fun to mouse-move-event (with pressed mouse button) on turtle. - fun must be a function with two arguments, the coordinates of the - actual mouse position on the canvas. - num, the number of the mouse-button defaults to 1 - - Every sequence of mouse-move-events on a turtle is preceded by a - mouse-click event on that turtle. - """ - if fun is None: - self.cv.tag_unbind(item, "" % num) - else: - def eventfun(event): - try: - x, y = (self.cv.canvasx(event.x)/self.xscale, - -self.cv.canvasy(event.y)/self.yscale) - fun(x, y) - except Exception: - pass - self.cv.tag_bind(item, "" % num, eventfun, add) - - def _onscreenclick(self, fun, num=1, add=None): - """Bind fun to mouse-click event on canvas. - fun must be a function with two arguments, the coordinates - of the clicked point on the canvas. - num, the number of the mouse-button defaults to 1 - - If a turtle is clicked, first _onclick-event will be performed, - then _onscreensclick-event. - """ - if fun is None: - self.cv.unbind("" % num) - else: - def eventfun(event): - x, y = (self.cv.canvasx(event.x)/self.xscale, - -self.cv.canvasy(event.y)/self.yscale) - fun(x, y) - self.cv.bind("" % num, eventfun, add) - - def _onkeyrelease(self, fun, key): - """Bind fun to key-release event of key. - Canvas must have focus. See method listen - """ - if fun is None: - self.cv.unbind("" % key, None) - else: - def eventfun(event): - fun() - self.cv.bind("" % key, eventfun) - - def _onkeypress(self, fun, key=None): - """If key is given, bind fun to key-press event of key. - Otherwise bind fun to any key-press. - Canvas must have focus. See method listen. - """ - if fun is None: - if key is None: - self.cv.unbind("", None) - else: - self.cv.unbind("" % key, None) - else: - def eventfun(event): - fun() - if key is None: - self.cv.bind("", eventfun) - else: - self.cv.bind("" % key, eventfun) - - def _listen(self): - """Set focus on canvas (in order to collect key-events) - """ - self.cv.focus_force() - - def _ontimer(self, fun, t): - """Install a timer, which calls fun after t milliseconds. - """ - if t == 0: - self.cv.after_idle(fun) - else: - self.cv.after(t, fun) - - def _createimage(self, image): - """Create and return image item on canvas. - """ - return self.cv.create_image(0, 0, image=image) - - def _drawimage(self, item, pos, image): - """Configure image item as to draw image object - at position (x,y) on canvas) - """ - x, y = pos - self.cv.coords(item, (x * self.xscale, -y * self.yscale)) - self.cv.itemconfig(item, image=image) - - def _setbgpic(self, item, image): - """Configure image item as to draw image object - at center of canvas. Set item to the first item - in the displaylist, so it will be drawn below - any other item .""" - self.cv.itemconfig(item, image=image) - self.cv.tag_lower(item) - - def _type(self, item): - """Return 'line' or 'polygon' or 'image' depending on - type of item. - """ - return self.cv.type(item) - - def _pointlist(self, item): - """returns list of coordinate-pairs of points of item - Example (for insiders): - >>> from turtle import * - >>> getscreen()._pointlist(getturtle().turtle._item) - [(0.0, 9.9999999999999982), (0.0, -9.9999999999999982), - (9.9999999999999982, 0.0)] - >>> """ - cl = self.cv.coords(item) - pl = [(cl[i], -cl[i+1]) for i in range(0, len(cl), 2)] - return pl - - def _setscrollregion(self, srx1, sry1, srx2, sry2): - self.cv.config(scrollregion=(srx1, sry1, srx2, sry2)) - - def _rescale(self, xscalefactor, yscalefactor): - items = self.cv.find_all() - for item in items: - coordinates = list(self.cv.coords(item)) - newcoordlist = [] - while coordinates: - x, y = coordinates[:2] - newcoordlist.append(x * xscalefactor) - newcoordlist.append(y * yscalefactor) - coordinates = coordinates[2:] - self.cv.coords(item, *newcoordlist) - - def _resize(self, canvwidth=None, canvheight=None, bg=None): - """Resize the canvas the turtles are drawing on. Does - not alter the drawing window. - """ - # needs amendment - if not isinstance(self.cv, ScrolledCanvas): - return self.canvwidth, self.canvheight - if canvwidth is canvheight is bg is None: - return self.cv.canvwidth, self.cv.canvheight - if canvwidth is not None: - self.canvwidth = canvwidth - if canvheight is not None: - self.canvheight = canvheight - self.cv.reset(canvwidth, canvheight, bg) - - def _window_size(self): - """ Return the width and height of the turtle window. - """ - width = self.cv.winfo_width() - if width <= 1: # the window isn't managed by a geometry manager - width = self.cv['width'] - height = self.cv.winfo_height() - if height <= 1: # the window isn't managed by a geometry manager - height = self.cv['height'] - return width, height - - def mainloop(self): - """Starts event loop - calling Tkinter's mainloop function. - - No argument. - - Must be last statement in a turtle graphics program. - Must NOT be used if a script is run from within IDLE in -n mode - (No subprocess) - for interactive use of turtle graphics. - - Example (for a TurtleScreen instance named screen): - >>> screen.mainloop() - - """ - self.cv.tk.mainloop() - - def textinput(self, title, prompt): - """Pop up a dialog window for input of a string. - - Arguments: title is the title of the dialog window, - prompt is a text mostly describing what information to input. - - Return the string input - If the dialog is canceled, return None. - - Example (for a TurtleScreen instance named screen): - >>> screen.textinput("NIM", "Name of first player:") - - """ - return simpledialog.askstring(title, prompt, parent=self.cv) - - def numinput(self, title, prompt, default=None, minval=None, maxval=None): - """Pop up a dialog window for input of a number. - - Arguments: title is the title of the dialog window, - prompt is a text mostly describing what numerical information to input. - default: default value - minval: minimum value for input - maxval: maximum value for input - - The number input must be in the range minval .. maxval if these are - given. If not, a hint is issued and the dialog remains open for - correction. Return the number input. - If the dialog is canceled, return None. - - Example (for a TurtleScreen instance named screen): - >>> screen.numinput("Poker", "Your stakes:", 1000, minval=10, maxval=10000) - - """ - return simpledialog.askfloat(title, prompt, initialvalue=default, - minvalue=minval, maxvalue=maxval, - parent=self.cv) - - -############################################################################## -### End of Tkinter - interface ### -############################################################################## - - -class Terminator (Exception): - """Will be raised in TurtleScreen.update, if _RUNNING becomes False. - - This stops execution of a turtle graphics script. - Main purpose: use in the Demo-Viewer turtle.Demo.py. - """ - pass - - -class TurtleGraphicsError(Exception): - """Some TurtleGraphics Error - """ - - -class Shape(object): - """Data structure modeling shapes. - - attribute _type is one of "polygon", "image", "compound" - attribute _data is - depending on _type a poygon-tuple, - an image or a list constructed using the addcomponent method. - """ - def __init__(self, type_, data=None): - self._type = type_ - if type_ == "polygon": - if isinstance(data, list): - data = tuple(data) - elif type_ == "image": - if isinstance(data, str): - if data.lower().endswith(".gif") and isfile(data): - data = TurtleScreen._image(data) - # else data assumed to be PhotoImage - elif type_ == "compound": - data = [] - else: - raise TurtleGraphicsError("There is no shape type %s" % type_) - self._data = data - - def addcomponent(self, poly, fill, outline=None): - """Add component to a shape of type compound. - - Arguments: poly is a polygon, i. e. a tuple of number pairs. - fill is the fillcolor of the component, - outline is the outline color of the component. - - call (for a Shapeobject namend s): - -- s.addcomponent(((0,0), (10,10), (-10,10)), "red", "blue") - - Example: - >>> poly = ((0,0),(10,-5),(0,10),(-10,-5)) - >>> s = Shape("compound") - >>> s.addcomponent(poly, "red", "blue") - >>> # .. add more components and then use register_shape() - """ - if self._type != "compound": - raise TurtleGraphicsError("Cannot add component to %s Shape" - % self._type) - if outline is None: - outline = fill - self._data.append([poly, fill, outline]) - - -class Tbuffer(object): - """Ring buffer used as undobuffer for RawTurtle objects.""" - def __init__(self, bufsize=10): - self.bufsize = bufsize - self.buffer = [[None]] * bufsize - self.ptr = -1 - self.cumulate = False - def reset(self, bufsize=None): - if bufsize is None: - for i in range(self.bufsize): - self.buffer[i] = [None] - else: - self.bufsize = bufsize - self.buffer = [[None]] * bufsize - self.ptr = -1 - def push(self, item): - if self.bufsize > 0: - if not self.cumulate: - self.ptr = (self.ptr + 1) % self.bufsize - self.buffer[self.ptr] = item - else: - self.buffer[self.ptr].append(item) - def pop(self): - if self.bufsize > 0: - item = self.buffer[self.ptr] - if item is None: - return None - else: - self.buffer[self.ptr] = [None] - self.ptr = (self.ptr - 1) % self.bufsize - return (item) - def nr_of_items(self): - return self.bufsize - self.buffer.count([None]) - def __repr__(self): - return str(self.buffer) + " " + str(self.ptr) - - - -class TurtleScreen(TurtleScreenBase): - """Provides screen oriented methods like bgcolor etc. - - Only relies upon the methods of TurtleScreenBase and NOT - upon components of the underlying graphics toolkit - - which is Tkinter in this case. - """ - _RUNNING = True - - def __init__(self, cv, mode=_CFG["mode"], - colormode=_CFG["colormode"], delay=_CFG["delay"]): - TurtleScreenBase.__init__(self, cv) - - self._shapes = { - "arrow" : Shape("polygon", ((-10,0), (10,0), (0,10))), - "turtle" : Shape("polygon", ((0,16), (-2,14), (-1,10), (-4,7), - (-7,9), (-9,8), (-6,5), (-7,1), (-5,-3), (-8,-6), - (-6,-8), (-4,-5), (0,-7), (4,-5), (6,-8), (8,-6), - (5,-3), (7,1), (6,5), (9,8), (7,9), (4,7), (1,10), - (2,14))), - "circle" : Shape("polygon", ((10,0), (9.51,3.09), (8.09,5.88), - (5.88,8.09), (3.09,9.51), (0,10), (-3.09,9.51), - (-5.88,8.09), (-8.09,5.88), (-9.51,3.09), (-10,0), - (-9.51,-3.09), (-8.09,-5.88), (-5.88,-8.09), - (-3.09,-9.51), (-0.00,-10.00), (3.09,-9.51), - (5.88,-8.09), (8.09,-5.88), (9.51,-3.09))), - "square" : Shape("polygon", ((10,-10), (10,10), (-10,10), - (-10,-10))), - "triangle" : Shape("polygon", ((10,-5.77), (0,11.55), - (-10,-5.77))), - "classic": Shape("polygon", ((0,0),(-5,-9),(0,-7),(5,-9))), - "blank" : Shape("image", self._blankimage()) - } - - self._bgpics = {"nopic" : ""} - - self._mode = mode - self._delayvalue = delay - self._colormode = _CFG["colormode"] - self._keys = [] - self.clear() - if sys.platform == 'darwin': - # Force Turtle window to the front on OS X. This is needed because - # the Turtle window will show behind the Terminal window when you - # start the demo from the command line. - rootwindow = cv.winfo_toplevel() - rootwindow.call('wm', 'attributes', '.', '-topmost', '1') - rootwindow.call('wm', 'attributes', '.', '-topmost', '0') - - def clear(self): - """Delete all drawings and all turtles from the TurtleScreen. - - No argument. - - Reset empty TurtleScreen to its initial state: white background, - no backgroundimage, no eventbindings and tracing on. - - Example (for a TurtleScreen instance named screen): - >>> screen.clear() - - Note: this method is not available as function. - """ - self._delayvalue = _CFG["delay"] - self._colormode = _CFG["colormode"] - self._delete("all") - self._bgpic = self._createimage("") - self._bgpicname = "nopic" - self._tracing = 1 - self._updatecounter = 0 - self._turtles = [] - self.bgcolor("white") - for btn in 1, 2, 3: - self.onclick(None, btn) - self.onkeypress(None) - for key in self._keys[:]: - self.onkey(None, key) - self.onkeypress(None, key) - Turtle._pen = None - - def mode(self, mode=None): - """Set turtle-mode ('standard', 'logo' or 'world') and perform reset. - - Optional argument: - mode -- one of the strings 'standard', 'logo' or 'world' - - Mode 'standard' is compatible with turtle.py. - Mode 'logo' is compatible with most Logo-Turtle-Graphics. - Mode 'world' uses userdefined 'worldcoordinates'. *Attention*: in - this mode angles appear distorted if x/y unit-ratio doesn't equal 1. - If mode is not given, return the current mode. - - Mode Initial turtle heading positive angles - ------------|-------------------------|------------------- - 'standard' to the right (east) counterclockwise - 'logo' upward (north) clockwise - - Examples: - >>> mode('logo') # resets turtle heading to north - >>> mode() - 'logo' - """ - if mode is None: - return self._mode - mode = mode.lower() - if mode not in ["standard", "logo", "world"]: - raise TurtleGraphicsError("No turtle-graphics-mode %s" % mode) - self._mode = mode - if mode in ["standard", "logo"]: - self._setscrollregion(-self.canvwidth//2, -self.canvheight//2, - self.canvwidth//2, self.canvheight//2) - self.xscale = self.yscale = 1.0 - self.reset() - - def setworldcoordinates(self, llx, lly, urx, ury): - """Set up a user defined coordinate-system. - - Arguments: - llx -- a number, x-coordinate of lower left corner of canvas - lly -- a number, y-coordinate of lower left corner of canvas - urx -- a number, x-coordinate of upper right corner of canvas - ury -- a number, y-coordinate of upper right corner of canvas - - Set up user coodinat-system and switch to mode 'world' if necessary. - This performs a screen.reset. If mode 'world' is already active, - all drawings are redrawn according to the new coordinates. - - But ATTENTION: in user-defined coordinatesystems angles may appear - distorted. (see Screen.mode()) - - Example (for a TurtleScreen instance named screen): - >>> screen.setworldcoordinates(-10,-0.5,50,1.5) - >>> for _ in range(36): - ... left(10) - ... forward(0.5) - """ - if self.mode() != "world": - self.mode("world") - xspan = float(urx - llx) - yspan = float(ury - lly) - wx, wy = self._window_size() - self.screensize(wx-20, wy-20) - oldxscale, oldyscale = self.xscale, self.yscale - self.xscale = self.canvwidth / xspan - self.yscale = self.canvheight / yspan - srx1 = llx * self.xscale - sry1 = -ury * self.yscale - srx2 = self.canvwidth + srx1 - sry2 = self.canvheight + sry1 - self._setscrollregion(srx1, sry1, srx2, sry2) - self._rescale(self.xscale/oldxscale, self.yscale/oldyscale) - self.update() - - def register_shape(self, name, shape=None): - """Adds a turtle shape to TurtleScreen's shapelist. - - Arguments: - (1) name is the name of a gif-file and shape is None. - Installs the corresponding image shape. - !! Image-shapes DO NOT rotate when turning the turtle, - !! so they do not display the heading of the turtle! - (2) name is an arbitrary string and shape is a tuple - of pairs of coordinates. Installs the corresponding - polygon shape - (3) name is an arbitrary string and shape is a - (compound) Shape object. Installs the corresponding - compound shape. - To use a shape, you have to issue the command shape(shapename). - - call: register_shape("turtle.gif") - --or: register_shape("tri", ((0,0), (10,10), (-10,10))) - - Example (for a TurtleScreen instance named screen): - >>> screen.register_shape("triangle", ((5,-3),(0,5),(-5,-3))) - - """ - if shape is None: - # image - if name.lower().endswith(".gif"): - shape = Shape("image", self._image(name)) - else: - raise TurtleGraphicsError("Bad arguments for register_shape.\n" - + "Use help(register_shape)" ) - elif isinstance(shape, tuple): - shape = Shape("polygon", shape) - ## else shape assumed to be Shape-instance - self._shapes[name] = shape - - def _colorstr(self, color): - """Return color string corresponding to args. - - Argument may be a string or a tuple of three - numbers corresponding to actual colormode, - i.e. in the range 0<=n<=colormode. - - If the argument doesn't represent a color, - an error is raised. - """ - if len(color) == 1: - color = color[0] - if isinstance(color, str): - if self._iscolorstring(color) or color == "": - return color - else: - raise TurtleGraphicsError("bad color string: %s" % str(color)) - try: - r, g, b = color - except (TypeError, ValueError): - raise TurtleGraphicsError("bad color arguments: %s" % str(color)) - if self._colormode == 1.0: - r, g, b = [round(255.0*x) for x in (r, g, b)] - if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)): - raise TurtleGraphicsError("bad color sequence: %s" % str(color)) - return "#%02x%02x%02x" % (r, g, b) - - def _color(self, cstr): - if not cstr.startswith("#"): - return cstr - if len(cstr) == 7: - cl = [int(cstr[i:i+2], 16) for i in (1, 3, 5)] - elif len(cstr) == 4: - cl = [16*int(cstr[h], 16) for h in cstr[1:]] - else: - raise TurtleGraphicsError("bad colorstring: %s" % cstr) - return tuple(c * self._colormode/255 for c in cl) - - def colormode(self, cmode=None): - """Return the colormode or set it to 1.0 or 255. - - Optional argument: - cmode -- one of the values 1.0 or 255 - - r, g, b values of colortriples have to be in range 0..cmode. - - Example (for a TurtleScreen instance named screen): - >>> screen.colormode() - 1.0 - >>> screen.colormode(255) - >>> pencolor(240,160,80) - """ - if cmode is None: - return self._colormode - if cmode == 1.0: - self._colormode = float(cmode) - elif cmode == 255: - self._colormode = int(cmode) - - def reset(self): - """Reset all Turtles on the Screen to their initial state. - - No argument. - - Example (for a TurtleScreen instance named screen): - >>> screen.reset() - """ - for turtle in self._turtles: - turtle._setmode(self._mode) - turtle.reset() - - def turtles(self): - """Return the list of turtles on the screen. - - Example (for a TurtleScreen instance named screen): - >>> screen.turtles() - [] - """ - return self._turtles - - def bgcolor(self, *args): - """Set or return backgroundcolor of the TurtleScreen. - - Four input formats are allowed: - - bgcolor() - Return the current background color as color specification - string or as a tuple (see example). May be used as input - to another color/pencolor/fillcolor/bgcolor call. - - bgcolor(colorstring) - Set the background color to colorstring, which is a Tk color - specification string, such as "red", "yellow", or "#33cc8c". - - bgcolor((r, g, b)) - Set the background color to the RGB color represented by - the tuple of r, g, and b. Each of r, g, and b must be in - the range 0..colormode, where colormode is either 1.0 or 255 - (see colormode()). - - bgcolor(r, g, b) - Set the background color to the RGB color represented by - r, g, and b. Each of r, g, and b must be in the range - 0..colormode. - - Example (for a TurtleScreen instance named screen): - >>> screen.bgcolor("orange") - >>> screen.bgcolor() - 'orange' - >>> colormode(255) - >>> screen.bgcolor('#800080') - >>> screen.bgcolor() - (128.0, 0.0, 128.0) - """ - if args: - color = self._colorstr(args) - else: - color = None - color = self._bgcolor(color) - if color is not None: - color = self._color(color) - return color - - def tracer(self, n=None, delay=None): - """Turns turtle animation on/off and set delay for update drawings. - - Optional arguments: - n -- nonnegative integer - delay -- nonnegative integer - - If n is given, only each n-th regular screen update is really performed. - (Can be used to accelerate the drawing of complex graphics.) - Second arguments sets delay value (see RawTurtle.delay()) - - Example (for a TurtleScreen instance named screen): - >>> screen.tracer(8, 25) - >>> dist = 2 - >>> for i in range(200): - ... fd(dist) - ... rt(90) - ... dist += 2 - """ - if n is None: - return self._tracing - self._tracing = int(n) - self._updatecounter = 0 - if delay is not None: - self._delayvalue = int(delay) - if self._tracing: - self.update() - - def delay(self, delay=None): - """ Return or set the drawing delay in milliseconds. - - Optional argument: - delay -- positive integer - - Example (for a TurtleScreen instance named screen): - >>> screen.delay(15) - >>> screen.delay() - 15 - """ - if delay is None: - return self._delayvalue - self._delayvalue = int(delay) - - def _incrementudc(self): - """Increment update counter.""" - if not TurtleScreen._RUNNING: - TurtleScreen._RUNNING = True - raise Terminator - if self._tracing > 0: - self._updatecounter += 1 - self._updatecounter %= self._tracing - - def update(self): - """Perform a TurtleScreen update. - """ - tracing = self._tracing - self._tracing = True - for t in self.turtles(): - t._update_data() - t._drawturtle() - self._tracing = tracing - self._update() - - def window_width(self): - """ Return the width of the turtle window. - - Example (for a TurtleScreen instance named screen): - >>> screen.window_width() - 640 - """ - return self._window_size()[0] - - def window_height(self): - """ Return the height of the turtle window. - - Example (for a TurtleScreen instance named screen): - >>> screen.window_height() - 480 - """ - return self._window_size()[1] - - def getcanvas(self): - """Return the Canvas of this TurtleScreen. - - No argument. - - Example (for a Screen instance named screen): - >>> cv = screen.getcanvas() - >>> cv - - """ - return self.cv - - def getshapes(self): - """Return a list of names of all currently available turtle shapes. - - No argument. - - Example (for a TurtleScreen instance named screen): - >>> screen.getshapes() - ['arrow', 'blank', 'circle', ... , 'turtle'] - """ - return sorted(self._shapes.keys()) - - def onclick(self, fun, btn=1, add=None): - """Bind fun to mouse-click event on canvas. - - Arguments: - fun -- a function with two arguments, the coordinates of the - clicked point on the canvas. - btn -- the number of the mouse-button, defaults to 1 - - Example (for a TurtleScreen instance named screen) - - >>> screen.onclick(goto) - >>> # Subsequently clicking into the TurtleScreen will - >>> # make the turtle move to the clicked point. - >>> screen.onclick(None) - """ - self._onscreenclick(fun, btn, add) - - def onkey(self, fun, key): - """Bind fun to key-release event of key. - - Arguments: - fun -- a function with no arguments - key -- a string: key (e.g. "a") or key-symbol (e.g. "space") - - In order to be able to register key-events, TurtleScreen - must have focus. (See method listen.) - - Example (for a TurtleScreen instance named screen): - - >>> def f(): - ... fd(50) - ... lt(60) - ... - >>> screen.onkey(f, "Up") - >>> screen.listen() - - Subsequently the turtle can be moved by repeatedly pressing - the up-arrow key, consequently drawing a hexagon - - """ - if fun is None: - if key in self._keys: - self._keys.remove(key) - elif key not in self._keys: - self._keys.append(key) - self._onkeyrelease(fun, key) - - def onkeypress(self, fun, key=None): - """Bind fun to key-press event of key if key is given, - or to any key-press-event if no key is given. - - Arguments: - fun -- a function with no arguments - key -- a string: key (e.g. "a") or key-symbol (e.g. "space") - - In order to be able to register key-events, TurtleScreen - must have focus. (See method listen.) - - Example (for a TurtleScreen instance named screen - and a Turtle instance named turtle): - - >>> def f(): - ... fd(50) - ... lt(60) - ... - >>> screen.onkeypress(f, "Up") - >>> screen.listen() - - Subsequently the turtle can be moved by repeatedly pressing - the up-arrow key, or by keeping pressed the up-arrow key. - consequently drawing a hexagon. - """ - if fun is None: - if key in self._keys: - self._keys.remove(key) - elif key is not None and key not in self._keys: - self._keys.append(key) - self._onkeypress(fun, key) - - def listen(self, xdummy=None, ydummy=None): - """Set focus on TurtleScreen (in order to collect key-events) - - No arguments. - Dummy arguments are provided in order - to be able to pass listen to the onclick method. - - Example (for a TurtleScreen instance named screen): - >>> screen.listen() - """ - self._listen() - - def ontimer(self, fun, t=0): - """Install a timer, which calls fun after t milliseconds. - - Arguments: - fun -- a function with no arguments. - t -- a number >= 0 - - Example (for a TurtleScreen instance named screen): - - >>> running = True - >>> def f(): - ... if running: - ... fd(50) - ... lt(60) - ... screen.ontimer(f, 250) - ... - >>> f() # makes the turtle marching around - >>> running = False - """ - self._ontimer(fun, t) - - def bgpic(self, picname=None): - """Set background image or return name of current backgroundimage. - - Optional argument: - picname -- a string, name of a gif-file or "nopic". - - If picname is a filename, set the corresponding image as background. - If picname is "nopic", delete backgroundimage, if present. - If picname is None, return the filename of the current backgroundimage. - - Example (for a TurtleScreen instance named screen): - >>> screen.bgpic() - 'nopic' - >>> screen.bgpic("landscape.gif") - >>> screen.bgpic() - 'landscape.gif' - """ - if picname is None: - return self._bgpicname - if picname not in self._bgpics: - self._bgpics[picname] = self._image(picname) - self._setbgpic(self._bgpic, self._bgpics[picname]) - self._bgpicname = picname - - def screensize(self, canvwidth=None, canvheight=None, bg=None): - """Resize the canvas the turtles are drawing on. - - Optional arguments: - canvwidth -- positive integer, new width of canvas in pixels - canvheight -- positive integer, new height of canvas in pixels - bg -- colorstring or color-tuple, new backgroundcolor - If no arguments are given, return current (canvaswidth, canvasheight) - - Do not alter the drawing window. To observe hidden parts of - the canvas use the scrollbars. (Can make visible those parts - of a drawing, which were outside the canvas before!) - - Example (for a Turtle instance named turtle): - >>> turtle.screensize(2000,1500) - >>> # e.g. to search for an erroneously escaped turtle ;-) - """ - return self._resize(canvwidth, canvheight, bg) - - onscreenclick = onclick - resetscreen = reset - clearscreen = clear - addshape = register_shape - onkeyrelease = onkey - -class TNavigator(object): - """Navigation part of the RawTurtle. - Implements methods for turtle movement. - """ - START_ORIENTATION = { - "standard": Vec2D(1.0, 0.0), - "world" : Vec2D(1.0, 0.0), - "logo" : Vec2D(0.0, 1.0) } - DEFAULT_MODE = "standard" - DEFAULT_ANGLEOFFSET = 0 - DEFAULT_ANGLEORIENT = 1 - - def __init__(self, mode=DEFAULT_MODE): - self._angleOffset = self.DEFAULT_ANGLEOFFSET - self._angleOrient = self.DEFAULT_ANGLEORIENT - self._mode = mode - self.undobuffer = None - self.degrees() - self._mode = None - self._setmode(mode) - TNavigator.reset(self) - - def reset(self): - """reset turtle to its initial values - - Will be overwritten by parent class - """ - self._position = Vec2D(0.0, 0.0) - self._orient = TNavigator.START_ORIENTATION[self._mode] - - def _setmode(self, mode=None): - """Set turtle-mode to 'standard', 'world' or 'logo'. - """ - if mode is None: - return self._mode - if mode not in ["standard", "logo", "world"]: - return - self._mode = mode - if mode in ["standard", "world"]: - self._angleOffset = 0 - self._angleOrient = 1 - else: # mode == "logo": - self._angleOffset = self._fullcircle/4. - self._angleOrient = -1 - - def _setDegreesPerAU(self, fullcircle): - """Helper function for degrees() and radians()""" - self._fullcircle = fullcircle - self._degreesPerAU = 360/fullcircle - if self._mode == "standard": - self._angleOffset = 0 - else: - self._angleOffset = fullcircle/4. - - def degrees(self, fullcircle=360.0): - """ Set angle measurement units to degrees. - - Optional argument: - fullcircle - a number - - Set angle measurement units, i. e. set number - of 'degrees' for a full circle. Default value is - 360 degrees. - - Example (for a Turtle instance named turtle): - >>> turtle.left(90) - >>> turtle.heading() - 90 - - Change angle measurement unit to grad (also known as gon, - grade, or gradian and equals 1/100-th of the right angle.) - >>> turtle.degrees(400.0) - >>> turtle.heading() - 100 - - """ - self._setDegreesPerAU(fullcircle) - - def radians(self): - """ Set the angle measurement units to radians. - - No arguments. - - Example (for a Turtle instance named turtle): - >>> turtle.heading() - 90 - >>> turtle.radians() - >>> turtle.heading() - 1.5707963267948966 - """ - self._setDegreesPerAU(math.tau) - - def _go(self, distance): - """move turtle forward by specified distance""" - ende = self._position + self._orient * distance - self._goto(ende) - - def _rotate(self, angle): - """Turn turtle counterclockwise by specified angle if angle > 0.""" - angle *= self._degreesPerAU - self._orient = self._orient.rotate(angle) - - def _goto(self, end): - """move turtle to position end.""" - self._position = end - - def teleport(self, x=None, y=None, *, fill_gap: bool = False) -> None: - """To be overwritten by child class RawTurtle. - Includes no TPen references.""" - new_x = x if x is not None else self._position[0] - new_y = y if y is not None else self._position[1] - self._position = Vec2D(new_x, new_y) - - def forward(self, distance): - """Move the turtle forward by the specified distance. - - Aliases: forward | fd - - Argument: - distance -- a number (integer or float) - - Move the turtle forward by the specified distance, in the direction - the turtle is headed. - - Example (for a Turtle instance named turtle): - >>> turtle.position() - (0.00,0.00) - >>> turtle.forward(25) - >>> turtle.position() - (25.00,0.00) - >>> turtle.forward(-75) - >>> turtle.position() - (-50.00,0.00) - """ - self._go(distance) - - def back(self, distance): - """Move the turtle backward by distance. - - Aliases: back | backward | bk - - Argument: - distance -- a number - - Move the turtle backward by distance, opposite to the direction the - turtle is headed. Do not change the turtle's heading. - - Example (for a Turtle instance named turtle): - >>> turtle.position() - (0.00,0.00) - >>> turtle.backward(30) - >>> turtle.position() - (-30.00,0.00) - """ - self._go(-distance) - - def right(self, angle): - """Turn turtle right by angle units. - - Aliases: right | rt - - Argument: - angle -- a number (integer or float) - - Turn turtle right by angle units. (Units are by default degrees, - but can be set via the degrees() and radians() functions.) - Angle orientation depends on mode. (See this.) - - Example (for a Turtle instance named turtle): - >>> turtle.heading() - 22.0 - >>> turtle.right(45) - >>> turtle.heading() - 337.0 - """ - self._rotate(-angle) - - def left(self, angle): - """Turn turtle left by angle units. - - Aliases: left | lt - - Argument: - angle -- a number (integer or float) - - Turn turtle left by angle units. (Units are by default degrees, - but can be set via the degrees() and radians() functions.) - Angle orientation depends on mode. (See this.) - - Example (for a Turtle instance named turtle): - >>> turtle.heading() - 22.0 - >>> turtle.left(45) - >>> turtle.heading() - 67.0 - """ - self._rotate(angle) - - def pos(self): - """Return the turtle's current location (x,y), as a Vec2D-vector. - - Aliases: pos | position - - No arguments. - - Example (for a Turtle instance named turtle): - >>> turtle.pos() - (0.00, 240.00) - """ - return self._position - - def xcor(self): - """ Return the turtle's x coordinate. - - No arguments. - - Example (for a Turtle instance named turtle): - >>> reset() - >>> turtle.left(60) - >>> turtle.forward(100) - >>> print(turtle.xcor()) - 50.0 - """ - return self._position[0] - - def ycor(self): - """ Return the turtle's y coordinate - --- - No arguments. - - Example (for a Turtle instance named turtle): - >>> reset() - >>> turtle.left(60) - >>> turtle.forward(100) - >>> print(turtle.ycor()) - 86.6025403784 - """ - return self._position[1] - - - def goto(self, x, y=None): - """Move turtle to an absolute position. - - Aliases: setpos | setposition | goto: - - Arguments: - x -- a number or a pair/vector of numbers - y -- a number None - - call: goto(x, y) # two coordinates - --or: goto((x, y)) # a pair (tuple) of coordinates - --or: goto(vec) # e.g. as returned by pos() - - Move turtle to an absolute position. If the pen is down, - a line will be drawn. The turtle's orientation does not change. - - Example (for a Turtle instance named turtle): - >>> tp = turtle.pos() - >>> tp - (0.00,0.00) - >>> turtle.setpos(60,30) - >>> turtle.pos() - (60.00,30.00) - >>> turtle.setpos((20,80)) - >>> turtle.pos() - (20.00,80.00) - >>> turtle.setpos(tp) - >>> turtle.pos() - (0.00,0.00) - """ - if y is None: - self._goto(Vec2D(*x)) - else: - self._goto(Vec2D(x, y)) - - def home(self): - """Move turtle to the origin - coordinates (0,0). - - No arguments. - - Move turtle to the origin - coordinates (0,0) and set its - heading to its start-orientation (which depends on mode). - - Example (for a Turtle instance named turtle): - >>> turtle.home() - """ - self.goto(0, 0) - self.setheading(0) - - def setx(self, x): - """Set the turtle's first coordinate to x - - Argument: - x -- a number (integer or float) - - Set the turtle's first coordinate to x, leave second coordinate - unchanged. - - Example (for a Turtle instance named turtle): - >>> turtle.position() - (0.00, 240.00) - >>> turtle.setx(10) - >>> turtle.position() - (10.00, 240.00) - """ - self._goto(Vec2D(x, self._position[1])) - - def sety(self, y): - """Set the turtle's second coordinate to y - - Argument: - y -- a number (integer or float) - - Set the turtle's first coordinate to x, second coordinate remains - unchanged. - - Example (for a Turtle instance named turtle): - >>> turtle.position() - (0.00, 40.00) - >>> turtle.sety(-10) - >>> turtle.position() - (0.00, -10.00) - """ - self._goto(Vec2D(self._position[0], y)) - - def distance(self, x, y=None): - """Return the distance from the turtle to (x,y) in turtle step units. - - Arguments: - x -- a number or a pair/vector of numbers or a turtle instance - y -- a number None None - - call: distance(x, y) # two coordinates - --or: distance((x, y)) # a pair (tuple) of coordinates - --or: distance(vec) # e.g. as returned by pos() - --or: distance(mypen) # where mypen is another turtle - - Example (for a Turtle instance named turtle): - >>> turtle.pos() - (0.00,0.00) - >>> turtle.distance(30,40) - 50.0 - >>> pen = Turtle() - >>> pen.forward(77) - >>> turtle.distance(pen) - 77.0 - """ - if y is not None: - pos = Vec2D(x, y) - if isinstance(x, Vec2D): - pos = x - elif isinstance(x, tuple): - pos = Vec2D(*x) - elif isinstance(x, TNavigator): - pos = x._position - return abs(pos - self._position) - - def towards(self, x, y=None): - """Return the angle of the line from the turtle's position to (x, y). - - Arguments: - x -- a number or a pair/vector of numbers or a turtle instance - y -- a number None None - - call: distance(x, y) # two coordinates - --or: distance((x, y)) # a pair (tuple) of coordinates - --or: distance(vec) # e.g. as returned by pos() - --or: distance(mypen) # where mypen is another turtle - - Return the angle, between the line from turtle-position to position - specified by x, y and the turtle's start orientation. (Depends on - modes - "standard" or "logo") - - Example (for a Turtle instance named turtle): - >>> turtle.pos() - (10.00, 10.00) - >>> turtle.towards(0,0) - 225.0 - """ - if y is not None: - pos = Vec2D(x, y) - if isinstance(x, Vec2D): - pos = x - elif isinstance(x, tuple): - pos = Vec2D(*x) - elif isinstance(x, TNavigator): - pos = x._position - x, y = pos - self._position - result = round(math.degrees(math.atan2(y, x)), 10) % 360.0 - result /= self._degreesPerAU - return (self._angleOffset + self._angleOrient*result) % self._fullcircle - - def heading(self): - """ Return the turtle's current heading. - - No arguments. - - Example (for a Turtle instance named turtle): - >>> turtle.left(67) - >>> turtle.heading() - 67.0 - """ - x, y = self._orient - result = round(math.degrees(math.atan2(y, x)), 10) % 360.0 - result /= self._degreesPerAU - return (self._angleOffset + self._angleOrient*result) % self._fullcircle - - def setheading(self, to_angle): - """Set the orientation of the turtle to to_angle. - - Aliases: setheading | seth - - Argument: - to_angle -- a number (integer or float) - - Set the orientation of the turtle to to_angle. - Here are some common directions in degrees: - - standard - mode: logo-mode: - -------------------|-------------------- - 0 - east 0 - north - 90 - north 90 - east - 180 - west 180 - south - 270 - south 270 - west - - Example (for a Turtle instance named turtle): - >>> turtle.setheading(90) - >>> turtle.heading() - 90 - """ - angle = (to_angle - self.heading())*self._angleOrient - full = self._fullcircle - angle = (angle+full/2.)%full - full/2. - self._rotate(angle) - - def circle(self, radius, extent = None, steps = None): - """ Draw a circle with given radius. - - Arguments: - radius -- a number - extent (optional) -- a number - steps (optional) -- an integer - - Draw a circle with given radius. The center is radius units left - of the turtle; extent - an angle - determines which part of the - circle is drawn. If extent is not given, draw the entire circle. - If extent is not a full circle, one endpoint of the arc is the - current pen position. Draw the arc in counterclockwise direction - if radius is positive, otherwise in clockwise direction. Finally - the direction of the turtle is changed by the amount of extent. - - As the circle is approximated by an inscribed regular polygon, - steps determines the number of steps to use. If not given, - it will be calculated automatically. Maybe used to draw regular - polygons. - - call: circle(radius) # full circle - --or: circle(radius, extent) # arc - --or: circle(radius, extent, steps) - --or: circle(radius, steps=6) # 6-sided polygon - - Example (for a Turtle instance named turtle): - >>> turtle.circle(50) - >>> turtle.circle(120, 180) # semicircle - """ - if self.undobuffer: - self.undobuffer.push(["seq"]) - self.undobuffer.cumulate = True - speed = self.speed() - if extent is None: - extent = self._fullcircle - if steps is None: - frac = abs(extent)/self._fullcircle - steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac) - w = 1.0 * extent / steps - w2 = 0.5 * w - l = 2.0 * radius * math.sin(math.radians(w2)*self._degreesPerAU) - if radius < 0: - l, w, w2 = -l, -w, -w2 - tr = self._tracer() - dl = self._delay() - if speed == 0: - self._tracer(0, 0) - else: - self.speed(0) - self._rotate(w2) - for i in range(steps): - self.speed(speed) - self._go(l) - self.speed(0) - self._rotate(w) - self._rotate(-w2) - if speed == 0: - self._tracer(tr, dl) - self.speed(speed) - if self.undobuffer: - self.undobuffer.cumulate = False - -## three dummy methods to be implemented by child class: - - def speed(self, s=0): - """dummy method - to be overwritten by child class""" - def _tracer(self, a=None, b=None): - """dummy method - to be overwritten by child class""" - def _delay(self, n=None): - """dummy method - to be overwritten by child class""" - - fd = forward - bk = back - backward = back - rt = right - lt = left - position = pos - setpos = goto - setposition = goto - seth = setheading - - -class TPen(object): - """Drawing part of the RawTurtle. - Implements drawing properties. - """ - def __init__(self, resizemode=_CFG["resizemode"]): - self._resizemode = resizemode # or "user" or "noresize" - self.undobuffer = None - TPen._reset(self) - - def _reset(self, pencolor=_CFG["pencolor"], - fillcolor=_CFG["fillcolor"]): - self._pensize = 1 - self._shown = True - self._pencolor = pencolor - self._fillcolor = fillcolor - self._drawing = True - self._speed = 3 - self._stretchfactor = (1., 1.) - self._shearfactor = 0. - self._tilt = 0. - self._shapetrafo = (1., 0., 0., 1.) - self._outlinewidth = 1 - - def resizemode(self, rmode=None): - """Set resizemode to one of the values: "auto", "user", "noresize". - - (Optional) Argument: - rmode -- one of the strings "auto", "user", "noresize" - - Different resizemodes have the following effects: - - "auto" adapts the appearance of the turtle - corresponding to the value of pensize. - - "user" adapts the appearance of the turtle according to the - values of stretchfactor and outlinewidth (outline), - which are set by shapesize() - - "noresize" no adaption of the turtle's appearance takes place. - If no argument is given, return current resizemode. - resizemode("user") is called by a call of shapesize with arguments. - - - Examples (for a Turtle instance named turtle): - >>> turtle.resizemode("noresize") - >>> turtle.resizemode() - 'noresize' - """ - if rmode is None: - return self._resizemode - rmode = rmode.lower() - if rmode in ["auto", "user", "noresize"]: - self.pen(resizemode=rmode) - - def pensize(self, width=None): - """Set or return the line thickness. - - Aliases: pensize | width - - Argument: - width -- positive number - - Set the line thickness to width or return it. If resizemode is set - to "auto" and turtleshape is a polygon, that polygon is drawn with - the same line thickness. If no argument is given, current pensize - is returned. - - Example (for a Turtle instance named turtle): - >>> turtle.pensize() - 1 - >>> turtle.pensize(10) # from here on lines of width 10 are drawn - """ - if width is None: - return self._pensize - self.pen(pensize=width) - - - def penup(self): - """Pull the pen up -- no drawing when moving. - - Aliases: penup | pu | up - - No argument - - Example (for a Turtle instance named turtle): - >>> turtle.penup() - """ - if not self._drawing: - return - self.pen(pendown=False) - - def pendown(self): - """Pull the pen down -- drawing when moving. - - Aliases: pendown | pd | down - - No argument. - - Example (for a Turtle instance named turtle): - >>> turtle.pendown() - """ - if self._drawing: - return - self.pen(pendown=True) - - def isdown(self): - """Return True if pen is down, False if it's up. - - No argument. - - Example (for a Turtle instance named turtle): - >>> turtle.penup() - >>> turtle.isdown() - False - >>> turtle.pendown() - >>> turtle.isdown() - True - """ - return self._drawing - - def speed(self, speed=None): - """ Return or set the turtle's speed. - - Optional argument: - speed -- an integer in the range 0..10 or a speedstring (see below) - - Set the turtle's speed to an integer value in the range 0 .. 10. - If no argument is given: return current speed. - - If input is a number greater than 10 or smaller than 0.5, - speed is set to 0. - Speedstrings are mapped to speedvalues in the following way: - 'fastest' : 0 - 'fast' : 10 - 'normal' : 6 - 'slow' : 3 - 'slowest' : 1 - speeds from 1 to 10 enforce increasingly faster animation of - line drawing and turtle turning. - - Attention: - speed = 0 : *no* animation takes place. forward/back makes turtle jump - and likewise left/right make the turtle turn instantly. - - Example (for a Turtle instance named turtle): - >>> turtle.speed(3) - """ - speeds = {'fastest':0, 'fast':10, 'normal':6, 'slow':3, 'slowest':1 } - if speed is None: - return self._speed - if speed in speeds: - speed = speeds[speed] - elif 0.5 < speed < 10.5: - speed = int(round(speed)) - else: - speed = 0 - self.pen(speed=speed) - - def color(self, *args): - """Return or set the pencolor and fillcolor. - - Arguments: - Several input formats are allowed. - They use 0 to 3 arguments as follows: - - color() - Return the current pencolor and the current fillcolor as - a pair of color specification strings or tuples as returned - by pencolor() and fillcolor(). - - color(colorstring), color((r,g,b)), color(r,g,b) - Inputs as in pencolor(), set both, fillcolor and pencolor, - to the given value. - - color(colorstring1, colorstring2), color((r1,g1,b1), (r2,g2,b2)) - Equivalent to pencolor(colorstring1) and fillcolor(colorstring2) - and analogously if the other input format is used. - - If turtleshape is a polygon, outline and interior of that polygon - is drawn with the newly set colors. - For more info see: pencolor, fillcolor - - Example (for a Turtle instance named turtle): - >>> turtle.color('red', 'green') - >>> turtle.color() - ('red', 'green') - >>> colormode(255) - >>> color(('#285078', '#a0c8f0')) - >>> color() - ((40.0, 80.0, 120.0), (160.0, 200.0, 240.0)) - """ - if args: - l = len(args) - if l == 1: - pcolor = fcolor = args[0] - elif l == 2: - pcolor, fcolor = args - elif l == 3: - pcolor = fcolor = args - pcolor = self._colorstr(pcolor) - fcolor = self._colorstr(fcolor) - self.pen(pencolor=pcolor, fillcolor=fcolor) - else: - return self._color(self._pencolor), self._color(self._fillcolor) - - def pencolor(self, *args): - """ Return or set the pencolor. - - Arguments: - Four input formats are allowed: - - pencolor() - Return the current pencolor as color specification string or - as a tuple (see example). May be used as input to another - color/pencolor/fillcolor/bgcolor call. - - pencolor(colorstring) - Set pencolor to colorstring, which is a Tk color - specification string, such as "red", "yellow", or "#33cc8c". - - pencolor((r, g, b)) - Set pencolor to the RGB color represented by the tuple of - r, g, and b. Each of r, g, and b must be in the range - 0..colormode, where colormode is either 1.0 or 255 (see - colormode()). - - pencolor(r, g, b) - Set pencolor to the RGB color represented by r, g, and b. - Each of r, g, and b must be in the range 0..colormode. - - If turtleshape is a polygon, the outline of that polygon is drawn - with the newly set pencolor. - - Example (for a Turtle instance named turtle): - >>> turtle.pencolor('brown') - >>> turtle.pencolor() - 'brown' - >>> colormode(255) - >>> turtle.pencolor('#32c18f') - >>> turtle.pencolor() - (50.0, 193.0, 143.0) - """ - if args: - color = self._colorstr(args) - if color == self._pencolor: - return - self.pen(pencolor=color) - else: - return self._color(self._pencolor) - - def fillcolor(self, *args): - """ Return or set the fillcolor. - - Arguments: - Four input formats are allowed: - - fillcolor() - Return the current fillcolor as color specification string, - possibly in tuple format (see example). May be used as - input to another color/pencolor/fillcolor/bgcolor call. - - fillcolor(colorstring) - Set fillcolor to colorstring, which is a Tk color - specification string, such as "red", "yellow", or "#33cc8c". - - fillcolor((r, g, b)) - Set fillcolor to the RGB color represented by the tuple of - r, g, and b. Each of r, g, and b must be in the range - 0..colormode, where colormode is either 1.0 or 255 (see - colormode()). - - fillcolor(r, g, b) - Set fillcolor to the RGB color represented by r, g, and b. - Each of r, g, and b must be in the range 0..colormode. - - If turtleshape is a polygon, the interior of that polygon is drawn - with the newly set fillcolor. - - Example (for a Turtle instance named turtle): - >>> turtle.fillcolor('violet') - >>> turtle.fillcolor() - 'violet' - >>> colormode(255) - >>> turtle.fillcolor('#ffffff') - >>> turtle.fillcolor() - (255.0, 255.0, 255.0) - """ - if args: - color = self._colorstr(args) - if color == self._fillcolor: - return - self.pen(fillcolor=color) - else: - return self._color(self._fillcolor) - - def teleport(self, x=None, y=None, *, fill_gap: bool = False) -> None: - """To be overwritten by child class RawTurtle. - Includes no TNavigator references. - """ - pendown = self.isdown() - if pendown: - self.pen(pendown=False) - self.pen(pendown=pendown) - - def showturtle(self): - """Makes the turtle visible. - - Aliases: showturtle | st - - No argument. - - Example (for a Turtle instance named turtle): - >>> turtle.hideturtle() - >>> turtle.showturtle() - """ - self.pen(shown=True) - - def hideturtle(self): - """Makes the turtle invisible. - - Aliases: hideturtle | ht - - No argument. - - It's a good idea to do this while you're in the - middle of a complicated drawing, because hiding - the turtle speeds up the drawing observably. - - Example (for a Turtle instance named turtle): - >>> turtle.hideturtle() - """ - self.pen(shown=False) - - def isvisible(self): - """Return True if the Turtle is shown, False if it's hidden. - - No argument. - - Example (for a Turtle instance named turtle): - >>> turtle.hideturtle() - >>> print(turtle.isvisible()) - False - """ - return self._shown - - def pen(self, pen=None, **pendict): - """Return or set the pen's attributes. - - Arguments: - pen -- a dictionary with some or all of the below listed keys. - **pendict -- one or more keyword-arguments with the below - listed keys as keywords. - - Return or set the pen's attributes in a 'pen-dictionary' - with the following key/value pairs: - "shown" : True/False - "pendown" : True/False - "pencolor" : color-string or color-tuple - "fillcolor" : color-string or color-tuple - "pensize" : positive number - "speed" : number in range 0..10 - "resizemode" : "auto" or "user" or "noresize" - "stretchfactor": (positive number, positive number) - "shearfactor": number - "outline" : positive number - "tilt" : number - - This dictionary can be used as argument for a subsequent - pen()-call to restore the former pen-state. Moreover one - or more of these attributes can be provided as keyword-arguments. - This can be used to set several pen attributes in one statement. - - - Examples (for a Turtle instance named turtle): - >>> turtle.pen(fillcolor="black", pencolor="red", pensize=10) - >>> turtle.pen() - {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, - 'pencolor': 'red', 'pendown': True, 'fillcolor': 'black', - 'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0} - >>> penstate=turtle.pen() - >>> turtle.color("yellow","") - >>> turtle.penup() - >>> turtle.pen() - {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, - 'pencolor': 'yellow', 'pendown': False, 'fillcolor': '', - 'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0} - >>> p.pen(penstate, fillcolor="green") - >>> p.pen() - {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, - 'pencolor': 'red', 'pendown': True, 'fillcolor': 'green', - 'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0} - """ - _pd = {"shown" : self._shown, - "pendown" : self._drawing, - "pencolor" : self._pencolor, - "fillcolor" : self._fillcolor, - "pensize" : self._pensize, - "speed" : self._speed, - "resizemode" : self._resizemode, - "stretchfactor" : self._stretchfactor, - "shearfactor" : self._shearfactor, - "outline" : self._outlinewidth, - "tilt" : self._tilt - } - - if not (pen or pendict): - return _pd - - if isinstance(pen, dict): - p = pen - else: - p = {} - p.update(pendict) - - _p_buf = {} - for key in p: - _p_buf[key] = _pd[key] - - if self.undobuffer: - self.undobuffer.push(("pen", _p_buf)) - - newLine = False - if "pendown" in p: - if self._drawing != p["pendown"]: - newLine = True - if "pencolor" in p: - if isinstance(p["pencolor"], tuple): - p["pencolor"] = self._colorstr((p["pencolor"],)) - if self._pencolor != p["pencolor"]: - newLine = True - if "pensize" in p: - if self._pensize != p["pensize"]: - newLine = True - if newLine: - self._newLine() - if "pendown" in p: - self._drawing = p["pendown"] - if "pencolor" in p: - self._pencolor = p["pencolor"] - if "pensize" in p: - self._pensize = p["pensize"] - if "fillcolor" in p: - if isinstance(p["fillcolor"], tuple): - p["fillcolor"] = self._colorstr((p["fillcolor"],)) - self._fillcolor = p["fillcolor"] - if "speed" in p: - self._speed = p["speed"] - if "resizemode" in p: - self._resizemode = p["resizemode"] - if "stretchfactor" in p: - sf = p["stretchfactor"] - if isinstance(sf, (int, float)): - sf = (sf, sf) - self._stretchfactor = sf - if "shearfactor" in p: - self._shearfactor = p["shearfactor"] - if "outline" in p: - self._outlinewidth = p["outline"] - if "shown" in p: - self._shown = p["shown"] - if "tilt" in p: - self._tilt = p["tilt"] - if "stretchfactor" in p or "tilt" in p or "shearfactor" in p: - scx, scy = self._stretchfactor - shf = self._shearfactor - sa, ca = math.sin(self._tilt), math.cos(self._tilt) - self._shapetrafo = ( scx*ca, scy*(shf*ca + sa), - -scx*sa, scy*(ca - shf*sa)) - self._update() - -## three dummy methods to be implemented by child class: - - def _newLine(self, usePos = True): - """dummy method - to be overwritten by child class""" - def _update(self, count=True, forced=False): - """dummy method - to be overwritten by child class""" - def _color(self, args): - """dummy method - to be overwritten by child class""" - def _colorstr(self, args): - """dummy method - to be overwritten by child class""" - - width = pensize - up = penup - pu = penup - pd = pendown - down = pendown - st = showturtle - ht = hideturtle - - -class _TurtleImage(object): - """Helper class: Datatype to store Turtle attributes - """ - - def __init__(self, screen, shapeIndex): - self.screen = screen - self._type = None - self._setshape(shapeIndex) - - def _setshape(self, shapeIndex): - screen = self.screen - self.shapeIndex = shapeIndex - if self._type == "polygon" == screen._shapes[shapeIndex]._type: - return - if self._type == "image" == screen._shapes[shapeIndex]._type: - return - if self._type in ["image", "polygon"]: - screen._delete(self._item) - elif self._type == "compound": - for item in self._item: - screen._delete(item) - self._type = screen._shapes[shapeIndex]._type - if self._type == "polygon": - self._item = screen._createpoly() - elif self._type == "image": - self._item = screen._createimage(screen._shapes["blank"]._data) - elif self._type == "compound": - self._item = [screen._createpoly() for item in - screen._shapes[shapeIndex]._data] - - -class RawTurtle(TPen, TNavigator): - """Animation part of the RawTurtle. - Puts RawTurtle upon a TurtleScreen and provides tools for - its animation. - """ - screens = [] - - def __init__(self, canvas=None, - shape=_CFG["shape"], - undobuffersize=_CFG["undobuffersize"], - visible=_CFG["visible"]): - if isinstance(canvas, _Screen): - self.screen = canvas - elif isinstance(canvas, TurtleScreen): - if canvas not in RawTurtle.screens: - RawTurtle.screens.append(canvas) - self.screen = canvas - elif isinstance(canvas, (ScrolledCanvas, Canvas)): - for screen in RawTurtle.screens: - if screen.cv == canvas: - self.screen = screen - break - else: - self.screen = TurtleScreen(canvas) - RawTurtle.screens.append(self.screen) - else: - raise TurtleGraphicsError("bad canvas argument %s" % canvas) - - screen = self.screen - TNavigator.__init__(self, screen.mode()) - TPen.__init__(self) - screen._turtles.append(self) - self.drawingLineItem = screen._createline() - self.turtle = _TurtleImage(screen, shape) - self._poly = None - self._creatingPoly = False - self._fillitem = self._fillpath = None - self._shown = visible - self._hidden_from_screen = False - self.currentLineItem = screen._createline() - self.currentLine = [self._position] - self.items = [self.currentLineItem] - self.stampItems = [] - self._undobuffersize = undobuffersize - self.undobuffer = Tbuffer(undobuffersize) - self._update() - - def reset(self): - """Delete the turtle's drawings and restore its default values. - - No argument. - - Delete the turtle's drawings from the screen, re-center the turtle - and set variables to the default values. - - Example (for a Turtle instance named turtle): - >>> turtle.position() - (0.00,-22.00) - >>> turtle.heading() - 100.0 - >>> turtle.reset() - >>> turtle.position() - (0.00,0.00) - >>> turtle.heading() - 0.0 - """ - TNavigator.reset(self) - TPen._reset(self) - self._clear() - self._drawturtle() - self._update() - - def setundobuffer(self, size): - """Set or disable undobuffer. - - Argument: - size -- an integer or None - - If size is an integer an empty undobuffer of given size is installed. - Size gives the maximum number of turtle-actions that can be undone - by the undo() function. - If size is None, no undobuffer is present. - - Example (for a Turtle instance named turtle): - >>> turtle.setundobuffer(42) - """ - if size is None or size <= 0: - self.undobuffer = None - else: - self.undobuffer = Tbuffer(size) - - def undobufferentries(self): - """Return count of entries in the undobuffer. - - No argument. - - Example (for a Turtle instance named turtle): - >>> while undobufferentries(): - ... undo() - """ - if self.undobuffer is None: - return 0 - return self.undobuffer.nr_of_items() - - def _clear(self): - """Delete all of pen's drawings""" - self._fillitem = self._fillpath = None - for item in self.items: - self.screen._delete(item) - self.currentLineItem = self.screen._createline() - self.currentLine = [] - if self._drawing: - self.currentLine.append(self._position) - self.items = [self.currentLineItem] - self.clearstamps() - self.setundobuffer(self._undobuffersize) - - - def clear(self): - """Delete the turtle's drawings from the screen. Do not move turtle. - - No arguments. - - Delete the turtle's drawings from the screen. Do not move turtle. - State and position of the turtle as well as drawings of other - turtles are not affected. - - Examples (for a Turtle instance named turtle): - >>> turtle.clear() - """ - self._clear() - self._update() - - def _update_data(self): - self.screen._incrementudc() - if self.screen._updatecounter != 0: - return - if len(self.currentLine)>1: - self.screen._drawline(self.currentLineItem, self.currentLine, - self._pencolor, self._pensize) - - def _update(self): - """Perform a Turtle-data update. - """ - screen = self.screen - if screen._tracing == 0: - return - elif screen._tracing == 1: - self._update_data() - self._drawturtle() - screen._update() # TurtleScreenBase - screen._delay(screen._delayvalue) # TurtleScreenBase - else: - self._update_data() - if screen._updatecounter == 0: - for t in screen.turtles(): - t._drawturtle() - screen._update() - - def _tracer(self, flag=None, delay=None): - """Turns turtle animation on/off and set delay for update drawings. - - Optional arguments: - n -- nonnegative integer - delay -- nonnegative integer - - If n is given, only each n-th regular screen update is really performed. - (Can be used to accelerate the drawing of complex graphics.) - Second arguments sets delay value (see RawTurtle.delay()) - - Example (for a Turtle instance named turtle): - >>> turtle.tracer(8, 25) - >>> dist = 2 - >>> for i in range(200): - ... turtle.fd(dist) - ... turtle.rt(90) - ... dist += 2 - """ - return self.screen.tracer(flag, delay) - - def _color(self, args): - return self.screen._color(args) - - def _colorstr(self, args): - return self.screen._colorstr(args) - - def _cc(self, args): - """Convert colortriples to hexstrings. - """ - if isinstance(args, str): - return args - try: - r, g, b = args - except (TypeError, ValueError): - raise TurtleGraphicsError("bad color arguments: %s" % str(args)) - if self.screen._colormode == 1.0: - r, g, b = [round(255.0*x) for x in (r, g, b)] - if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)): - raise TurtleGraphicsError("bad color sequence: %s" % str(args)) - return "#%02x%02x%02x" % (r, g, b) - - def teleport(self, x=None, y=None, *, fill_gap: bool = False) -> None: - """Instantly move turtle to an absolute position. - - Arguments: - x -- a number or None - y -- a number None - fill_gap -- a boolean This argument must be specified by name. - - call: teleport(x, y) # two coordinates - --or: teleport(x) # teleport to x position, keeping y as is - --or: teleport(y=y) # teleport to y position, keeping x as is - --or: teleport(x, y, fill_gap=True) - # teleport but fill the gap in between - - Move turtle to an absolute position. Unlike goto(x, y), a line will not - be drawn. The turtle's orientation does not change. If currently - filling, the polygon(s) teleported from will be filled after leaving, - and filling will begin again after teleporting. This can be disabled - with fill_gap=True, which makes the imaginary line traveled during - teleporting act as a fill barrier like in goto(x, y). - - Example (for a Turtle instance named turtle): - >>> tp = turtle.pos() - >>> tp - (0.00,0.00) - >>> turtle.teleport(60) - >>> turtle.pos() - (60.00,0.00) - >>> turtle.teleport(y=10) - >>> turtle.pos() - (60.00,10.00) - >>> turtle.teleport(20, 30) - >>> turtle.pos() - (20.00,30.00) - """ - pendown = self.isdown() - was_filling = self.filling() - if pendown: - self.pen(pendown=False) - if was_filling and not fill_gap: - self.end_fill() - new_x = x if x is not None else self._position[0] - new_y = y if y is not None else self._position[1] - self._position = Vec2D(new_x, new_y) - self.pen(pendown=pendown) - if was_filling and not fill_gap: - self.begin_fill() - - def clone(self): - """Create and return a clone of the turtle. - - No argument. - - Create and return a clone of the turtle with same position, heading - and turtle properties. - - Example (for a Turtle instance named mick): - mick = Turtle() - joe = mick.clone() - """ - screen = self.screen - self._newLine(self._drawing) - - turtle = self.turtle - self.screen = None - self.turtle = None # too make self deepcopy-able - - q = deepcopy(self) - - self.screen = screen - self.turtle = turtle - - q.screen = screen - q.turtle = _TurtleImage(screen, self.turtle.shapeIndex) - - screen._turtles.append(q) - ttype = screen._shapes[self.turtle.shapeIndex]._type - if ttype == "polygon": - q.turtle._item = screen._createpoly() - elif ttype == "image": - q.turtle._item = screen._createimage(screen._shapes["blank"]._data) - elif ttype == "compound": - q.turtle._item = [screen._createpoly() for item in - screen._shapes[self.turtle.shapeIndex]._data] - q.currentLineItem = screen._createline() - q._update() - return q - - def shape(self, name=None): - """Set turtle shape to shape with given name / return current shapename. - - Optional argument: - name -- a string, which is a valid shapename - - Set turtle shape to shape with given name or, if name is not given, - return name of current shape. - Shape with name must exist in the TurtleScreen's shape dictionary. - Initially there are the following polygon shapes: - 'arrow', 'turtle', 'circle', 'square', 'triangle', 'classic'. - To learn about how to deal with shapes see Screen-method register_shape. - - Example (for a Turtle instance named turtle): - >>> turtle.shape() - 'arrow' - >>> turtle.shape("turtle") - >>> turtle.shape() - 'turtle' - """ - if name is None: - return self.turtle.shapeIndex - if not name in self.screen.getshapes(): - raise TurtleGraphicsError("There is no shape named %s" % name) - self.turtle._setshape(name) - self._update() - - def shapesize(self, stretch_wid=None, stretch_len=None, outline=None): - """Set/return turtle's stretchfactors/outline. Set resizemode to "user". - - Optional arguments: - stretch_wid : positive number - stretch_len : positive number - outline : positive number - - Return or set the pen's attributes x/y-stretchfactors and/or outline. - Set resizemode to "user". - If and only if resizemode is set to "user", the turtle will be displayed - stretched according to its stretchfactors: - stretch_wid is stretchfactor perpendicular to orientation - stretch_len is stretchfactor in direction of turtles orientation. - outline determines the width of the shapes's outline. - - Examples (for a Turtle instance named turtle): - >>> turtle.resizemode("user") - >>> turtle.shapesize(5, 5, 12) - >>> turtle.shapesize(outline=8) - """ - if stretch_wid is stretch_len is outline is None: - stretch_wid, stretch_len = self._stretchfactor - return stretch_wid, stretch_len, self._outlinewidth - if stretch_wid == 0 or stretch_len == 0: - raise TurtleGraphicsError("stretch_wid/stretch_len must not be zero") - if stretch_wid is not None: - if stretch_len is None: - stretchfactor = stretch_wid, stretch_wid - else: - stretchfactor = stretch_wid, stretch_len - elif stretch_len is not None: - stretchfactor = self._stretchfactor[0], stretch_len - else: - stretchfactor = self._stretchfactor - if outline is None: - outline = self._outlinewidth - self.pen(resizemode="user", - stretchfactor=stretchfactor, outline=outline) - - def shearfactor(self, shear=None): - """Set or return the current shearfactor. - - Optional argument: shear -- number, tangent of the shear angle - - Shear the turtleshape according to the given shearfactor shear, - which is the tangent of the shear angle. DO NOT change the - turtle's heading (direction of movement). - If shear is not given: return the current shearfactor, i. e. the - tangent of the shear angle, by which lines parallel to the - heading of the turtle are sheared. - - Examples (for a Turtle instance named turtle): - >>> turtle.shape("circle") - >>> turtle.shapesize(5,2) - >>> turtle.shearfactor(0.5) - >>> turtle.shearfactor() - >>> 0.5 - """ - if shear is None: - return self._shearfactor - self.pen(resizemode="user", shearfactor=shear) - - def tiltangle(self, angle=None): - """Set or return the current tilt-angle. - - Optional argument: angle -- number - - Rotate the turtleshape to point in the direction specified by angle, - regardless of its current tilt-angle. DO NOT change the turtle's - heading (direction of movement). - If angle is not given: return the current tilt-angle, i. e. the angle - between the orientation of the turtleshape and the heading of the - turtle (its direction of movement). - - Examples (for a Turtle instance named turtle): - >>> turtle.shape("circle") - >>> turtle.shapesize(5, 2) - >>> turtle.tiltangle() - 0.0 - >>> turtle.tiltangle(45) - >>> turtle.tiltangle() - 45.0 - >>> turtle.stamp() - >>> turtle.fd(50) - >>> turtle.tiltangle(-45) - >>> turtle.tiltangle() - 315.0 - >>> turtle.stamp() - >>> turtle.fd(50) - """ - if angle is None: - tilt = -math.degrees(self._tilt) * self._angleOrient - return (tilt / self._degreesPerAU) % self._fullcircle - else: - tilt = -angle * self._degreesPerAU * self._angleOrient - tilt = math.radians(tilt) % math.tau - self.pen(resizemode="user", tilt=tilt) - - def tilt(self, angle): - """Rotate the turtleshape by angle. - - Argument: - angle - a number - - Rotate the turtleshape by angle from its current tilt-angle, - but do NOT change the turtle's heading (direction of movement). - - Examples (for a Turtle instance named turtle): - >>> turtle.shape("circle") - >>> turtle.shapesize(5,2) - >>> turtle.tilt(30) - >>> turtle.fd(50) - >>> turtle.tilt(30) - >>> turtle.fd(50) - """ - self.tiltangle(angle + self.tiltangle()) - - def shapetransform(self, t11=None, t12=None, t21=None, t22=None): - """Set or return the current transformation matrix of the turtle shape. - - Optional arguments: t11, t12, t21, t22 -- numbers. - - If none of the matrix elements are given, return the transformation - matrix. - Otherwise set the given elements and transform the turtleshape - according to the matrix consisting of first row t11, t12 and - second row t21, 22. - Modify stretchfactor, shearfactor and tiltangle according to the - given matrix. - - Examples (for a Turtle instance named turtle): - >>> turtle.shape("square") - >>> turtle.shapesize(4,2) - >>> turtle.shearfactor(-0.5) - >>> turtle.shapetransform() - (4.0, -1.0, -0.0, 2.0) - """ - if t11 is t12 is t21 is t22 is None: - return self._shapetrafo - m11, m12, m21, m22 = self._shapetrafo - if t11 is not None: m11 = t11 - if t12 is not None: m12 = t12 - if t21 is not None: m21 = t21 - if t22 is not None: m22 = t22 - if t11 * t22 - t12 * t21 == 0: - raise TurtleGraphicsError("Bad shape transform matrix: must not be singular") - self._shapetrafo = (m11, m12, m21, m22) - alfa = math.atan2(-m21, m11) % math.tau - sa, ca = math.sin(alfa), math.cos(alfa) - a11, a12, a21, a22 = (ca*m11 - sa*m21, ca*m12 - sa*m22, - sa*m11 + ca*m21, sa*m12 + ca*m22) - self._stretchfactor = a11, a22 - self._shearfactor = a12/a22 - self._tilt = alfa - self.pen(resizemode="user") - - - def _polytrafo(self, poly): - """Computes transformed polygon shapes from a shape - according to current position and heading. - """ - screen = self.screen - p0, p1 = self._position - e0, e1 = self._orient - e = Vec2D(e0, e1 * screen.yscale / screen.xscale) - e0, e1 = (1.0 / abs(e)) * e - return [(p0+(e1*x+e0*y)/screen.xscale, p1+(-e0*x+e1*y)/screen.yscale) - for (x, y) in poly] - - def get_shapepoly(self): - """Return the current shape polygon as tuple of coordinate pairs. - - No argument. - - Examples (for a Turtle instance named turtle): - >>> turtle.shape("square") - >>> turtle.shapetransform(4, -1, 0, 2) - >>> turtle.get_shapepoly() - ((50, -20), (30, 20), (-50, 20), (-30, -20)) - - """ - shape = self.screen._shapes[self.turtle.shapeIndex] - if shape._type == "polygon": - return self._getshapepoly(shape._data, shape._type == "compound") - # else return None - - def _getshapepoly(self, polygon, compound=False): - """Calculate transformed shape polygon according to resizemode - and shapetransform. - """ - if self._resizemode == "user" or compound: - t11, t12, t21, t22 = self._shapetrafo - elif self._resizemode == "auto": - l = max(1, self._pensize/5.0) - t11, t12, t21, t22 = l, 0, 0, l - elif self._resizemode == "noresize": - return polygon - return tuple((t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon) - - def _drawturtle(self): - """Manages the correct rendering of the turtle with respect to - its shape, resizemode, stretch and tilt etc.""" - screen = self.screen - shape = screen._shapes[self.turtle.shapeIndex] - ttype = shape._type - titem = self.turtle._item - if self._shown and screen._updatecounter == 0 and screen._tracing > 0: - self._hidden_from_screen = False - tshape = shape._data - if ttype == "polygon": - if self._resizemode == "noresize": w = 1 - elif self._resizemode == "auto": w = self._pensize - else: w =self._outlinewidth - shape = self._polytrafo(self._getshapepoly(tshape)) - fc, oc = self._fillcolor, self._pencolor - screen._drawpoly(titem, shape, fill=fc, outline=oc, - width=w, top=True) - elif ttype == "image": - screen._drawimage(titem, self._position, tshape) - elif ttype == "compound": - for item, (poly, fc, oc) in zip(titem, tshape): - poly = self._polytrafo(self._getshapepoly(poly, True)) - screen._drawpoly(item, poly, fill=self._cc(fc), - outline=self._cc(oc), width=self._outlinewidth, top=True) - else: - if self._hidden_from_screen: - return - if ttype == "polygon": - screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), "", "") - elif ttype == "image": - screen._drawimage(titem, self._position, - screen._shapes["blank"]._data) - elif ttype == "compound": - for item in titem: - screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), "", "") - self._hidden_from_screen = True - -############################## stamp stuff ############################### - - def stamp(self): - """Stamp a copy of the turtleshape onto the canvas and return its id. - - No argument. - - Stamp a copy of the turtle shape onto the canvas at the current - turtle position. Return a stamp_id for that stamp, which can be - used to delete it by calling clearstamp(stamp_id). - - Example (for a Turtle instance named turtle): - >>> turtle.color("blue") - >>> turtle.stamp() - 13 - >>> turtle.fd(50) - """ - screen = self.screen - shape = screen._shapes[self.turtle.shapeIndex] - ttype = shape._type - tshape = shape._data - if ttype == "polygon": - stitem = screen._createpoly() - if self._resizemode == "noresize": w = 1 - elif self._resizemode == "auto": w = self._pensize - else: w =self._outlinewidth - shape = self._polytrafo(self._getshapepoly(tshape)) - fc, oc = self._fillcolor, self._pencolor - screen._drawpoly(stitem, shape, fill=fc, outline=oc, - width=w, top=True) - elif ttype == "image": - stitem = screen._createimage("") - screen._drawimage(stitem, self._position, tshape) - elif ttype == "compound": - stitem = [] - for element in tshape: - item = screen._createpoly() - stitem.append(item) - stitem = tuple(stitem) - for item, (poly, fc, oc) in zip(stitem, tshape): - poly = self._polytrafo(self._getshapepoly(poly, True)) - screen._drawpoly(item, poly, fill=self._cc(fc), - outline=self._cc(oc), width=self._outlinewidth, top=True) - self.stampItems.append(stitem) - self.undobuffer.push(("stamp", stitem)) - return stitem - - def _clearstamp(self, stampid): - """does the work for clearstamp() and clearstamps() - """ - if stampid in self.stampItems: - if isinstance(stampid, tuple): - for subitem in stampid: - self.screen._delete(subitem) - else: - self.screen._delete(stampid) - self.stampItems.remove(stampid) - # Delete stampitem from undobuffer if necessary - # if clearstamp is called directly. - item = ("stamp", stampid) - buf = self.undobuffer - if item not in buf.buffer: - return - index = buf.buffer.index(item) - buf.buffer.remove(item) - if index <= buf.ptr: - buf.ptr = (buf.ptr - 1) % buf.bufsize - buf.buffer.insert((buf.ptr+1)%buf.bufsize, [None]) - - def clearstamp(self, stampid): - """Delete stamp with given stampid - - Argument: - stampid - an integer, must be return value of previous stamp() call. - - Example (for a Turtle instance named turtle): - >>> turtle.color("blue") - >>> astamp = turtle.stamp() - >>> turtle.fd(50) - >>> turtle.clearstamp(astamp) - """ - self._clearstamp(stampid) - self._update() - - def clearstamps(self, n=None): - """Delete all or first/last n of turtle's stamps. - - Optional argument: - n -- an integer - - If n is None, delete all of pen's stamps, - else if n > 0 delete first n stamps - else if n < 0 delete last n stamps. - - Example (for a Turtle instance named turtle): - >>> for i in range(8): - ... turtle.stamp(); turtle.fd(30) - ... - >>> turtle.clearstamps(2) - >>> turtle.clearstamps(-2) - >>> turtle.clearstamps() - """ - if n is None: - toDelete = self.stampItems[:] - elif n >= 0: - toDelete = self.stampItems[:n] - else: - toDelete = self.stampItems[n:] - for item in toDelete: - self._clearstamp(item) - self._update() - - def _goto(self, end): - """Move the pen to the point end, thereby drawing a line - if pen is down. All other methods for turtle movement depend - on this one. - """ - ## Version with undo-stuff - go_modes = ( self._drawing, - self._pencolor, - self._pensize, - isinstance(self._fillpath, list)) - screen = self.screen - undo_entry = ("go", self._position, end, go_modes, - (self.currentLineItem, - self.currentLine[:], - screen._pointlist(self.currentLineItem), - self.items[:]) - ) - if self.undobuffer: - self.undobuffer.push(undo_entry) - start = self._position - if self._speed and screen._tracing == 1: - diff = (end-start) - diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2 - nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed)) - delta = diff * (1.0/nhops) - for n in range(1, nhops): - if n == 1: - top = True - else: - top = False - self._position = start + delta * n - if self._drawing: - screen._drawline(self.drawingLineItem, - (start, self._position), - self._pencolor, self._pensize, top) - self._update() - if self._drawing: - screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)), - fill="", width=self._pensize) - # Turtle now at end, - if self._drawing: # now update currentLine - self.currentLine.append(end) - if isinstance(self._fillpath, list): - self._fillpath.append(end) - ###### vererbung!!!!!!!!!!!!!!!!!!!!!! - self._position = end - if self._creatingPoly: - self._poly.append(end) - if len(self.currentLine) > 42: # 42! answer to the ultimate question - # of life, the universe and everything - self._newLine() - self._update() #count=True) - - def _undogoto(self, entry): - """Reverse a _goto. Used for undo() - """ - old, new, go_modes, coodata = entry - drawing, pc, ps, filling = go_modes - cLI, cL, pl, items = coodata - screen = self.screen - if abs(self._position - new) > 0.5: - print ("undogoto: HALLO-DA-STIMMT-WAS-NICHT!") - # restore former situation - self.currentLineItem = cLI - self.currentLine = cL - - if pl == [(0, 0), (0, 0)]: - usepc = "" - else: - usepc = pc - screen._drawline(cLI, pl, fill=usepc, width=ps) - - todelete = [i for i in self.items if (i not in items) and - (screen._type(i) == "line")] - for i in todelete: - screen._delete(i) - self.items.remove(i) - - start = old - if self._speed and screen._tracing == 1: - diff = old - new - diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2 - nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed)) - delta = diff * (1.0/nhops) - for n in range(1, nhops): - if n == 1: - top = True - else: - top = False - self._position = new + delta * n - if drawing: - screen._drawline(self.drawingLineItem, - (start, self._position), - pc, ps, top) - self._update() - if drawing: - screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)), - fill="", width=ps) - # Turtle now at position old, - self._position = old - ## if undo is done during creating a polygon, the last vertex - ## will be deleted. if the polygon is entirely deleted, - ## creatingPoly will be set to False. - ## Polygons created before the last one will not be affected by undo() - if self._creatingPoly: - if len(self._poly) > 0: - self._poly.pop() - if self._poly == []: - self._creatingPoly = False - self._poly = None - if filling: - if self._fillpath == []: - self._fillpath = None - print("Unwahrscheinlich in _undogoto!") - elif self._fillpath is not None: - self._fillpath.pop() - self._update() #count=True) - - def _rotate(self, angle): - """Turns pen clockwise by angle. - """ - if self.undobuffer: - self.undobuffer.push(("rot", angle, self._degreesPerAU)) - angle *= self._degreesPerAU - neworient = self._orient.rotate(angle) - tracing = self.screen._tracing - if tracing == 1 and self._speed > 0: - anglevel = 3.0 * self._speed - steps = 1 + int(abs(angle)/anglevel) - delta = 1.0*angle/steps - for _ in range(steps): - self._orient = self._orient.rotate(delta) - self._update() - self._orient = neworient - self._update() - - def _newLine(self, usePos=True): - """Closes current line item and starts a new one. - Remark: if current line became too long, animation - performance (via _drawline) slowed down considerably. - """ - if len(self.currentLine) > 1: - self.screen._drawline(self.currentLineItem, self.currentLine, - self._pencolor, self._pensize) - self.currentLineItem = self.screen._createline() - self.items.append(self.currentLineItem) - else: - self.screen._drawline(self.currentLineItem, top=True) - self.currentLine = [] - if usePos: - self.currentLine = [self._position] - - def filling(self): - """Return fillstate (True if filling, False else). - - No argument. - - Example (for a Turtle instance named turtle): - >>> turtle.begin_fill() - >>> if turtle.filling(): - ... turtle.pensize(5) - ... else: - ... turtle.pensize(3) - """ - return isinstance(self._fillpath, list) - - def begin_fill(self): - """Called just before drawing a shape to be filled. - - No argument. - - Example (for a Turtle instance named turtle): - >>> turtle.color("black", "red") - >>> turtle.begin_fill() - >>> turtle.circle(60) - >>> turtle.end_fill() - """ - if not self.filling(): - self._fillitem = self.screen._createpoly() - self.items.append(self._fillitem) - self._fillpath = [self._position] - self._newLine() - if self.undobuffer: - self.undobuffer.push(("beginfill", self._fillitem)) - self._update() - - - def end_fill(self): - """Fill the shape drawn after the call begin_fill(). - - No argument. - - Example (for a Turtle instance named turtle): - >>> turtle.color("black", "red") - >>> turtle.begin_fill() - >>> turtle.circle(60) - >>> turtle.end_fill() - """ - if self.filling(): - if len(self._fillpath) > 2: - self.screen._drawpoly(self._fillitem, self._fillpath, - fill=self._fillcolor) - if self.undobuffer: - self.undobuffer.push(("dofill", self._fillitem)) - self._fillitem = self._fillpath = None - self._update() - - def dot(self, size=None, *color): - """Draw a dot with diameter size, using color. - - Optional arguments: - size -- an integer >= 1 (if given) - color -- a colorstring or a numeric color tuple - - Draw a circular dot with diameter size, using color. - If size is not given, the maximum of pensize+4 and 2*pensize is used. - - Example (for a Turtle instance named turtle): - >>> turtle.dot() - >>> turtle.fd(50); turtle.dot(20, "blue"); turtle.fd(50) - """ - if not color: - if isinstance(size, (str, tuple)): - color = self._colorstr(size) - size = self._pensize + max(self._pensize, 4) - else: - color = self._pencolor - if not size: - size = self._pensize + max(self._pensize, 4) - else: - if size is None: - size = self._pensize + max(self._pensize, 4) - color = self._colorstr(color) - # If screen were to gain a dot function, see GH #104218. - pen = self.pen() - if self.undobuffer: - self.undobuffer.push(["seq"]) - self.undobuffer.cumulate = True - try: - if self.resizemode() == 'auto': - self.ht() - self.pendown() - self.pensize(size) - self.pencolor(color) - self.forward(0) - finally: - self.pen(pen) - if self.undobuffer: - self.undobuffer.cumulate = False - - def _write(self, txt, align, font): - """Performs the writing for write() - """ - item, end = self.screen._write(self._position, txt, align, font, - self._pencolor) - self._update() - self.items.append(item) - if self.undobuffer: - self.undobuffer.push(("wri", item)) - return end - - def write(self, arg, move=False, align="left", font=("Arial", 8, "normal")): - """Write text at the current turtle position. - - Arguments: - arg -- info, which is to be written to the TurtleScreen - move (optional) -- True/False - align (optional) -- one of the strings "left", "center" or right" - font (optional) -- a triple (fontname, fontsize, fonttype) - - Write text - the string representation of arg - at the current - turtle position according to align ("left", "center" or right") - and with the given font. - If move is True, the pen is moved to the bottom-right corner - of the text. By default, move is False. - - Example (for a Turtle instance named turtle): - >>> turtle.write('Home = ', True, align="center") - >>> turtle.write((0,0), True) - """ - if self.undobuffer: - self.undobuffer.push(["seq"]) - self.undobuffer.cumulate = True - end = self._write(str(arg), align.lower(), font) - if move: - x, y = self.pos() - self.setpos(end, y) - if self.undobuffer: - self.undobuffer.cumulate = False - - def begin_poly(self): - """Start recording the vertices of a polygon. - - No argument. - - Start recording the vertices of a polygon. Current turtle position - is first point of polygon. - - Example (for a Turtle instance named turtle): - >>> turtle.begin_poly() - """ - self._poly = [self._position] - self._creatingPoly = True - - def end_poly(self): - """Stop recording the vertices of a polygon. - - No argument. - - Stop recording the vertices of a polygon. Current turtle position is - last point of polygon. This will be connected with the first point. - - Example (for a Turtle instance named turtle): - >>> turtle.end_poly() - """ - self._creatingPoly = False - - def get_poly(self): - """Return the lastly recorded polygon. - - No argument. - - Example (for a Turtle instance named turtle): - >>> p = turtle.get_poly() - >>> turtle.register_shape("myFavouriteShape", p) - """ - ## check if there is any poly? - if self._poly is not None: - return tuple(self._poly) - - def getscreen(self): - """Return the TurtleScreen object, the turtle is drawing on. - - No argument. - - Return the TurtleScreen object, the turtle is drawing on. - So TurtleScreen-methods can be called for that object. - - Example (for a Turtle instance named turtle): - >>> ts = turtle.getscreen() - >>> ts - - >>> ts.bgcolor("pink") - """ - return self.screen - - def getturtle(self): - """Return the Turtleobject itself. - - No argument. - - Only reasonable use: as a function to return the 'anonymous turtle': - - Example: - >>> pet = getturtle() - >>> pet.fd(50) - >>> pet - - >>> turtles() - [] - """ - return self - - getpen = getturtle - - - ################################################################ - ### screen oriented methods recurring to methods of TurtleScreen - ################################################################ - - def _delay(self, delay=None): - """Set delay value which determines speed of turtle animation. - """ - return self.screen.delay(delay) - - def onclick(self, fun, btn=1, add=None): - """Bind fun to mouse-click event on this turtle on canvas. - - Arguments: - fun -- a function with two arguments, to which will be assigned - the coordinates of the clicked point on the canvas. - btn -- number of the mouse-button defaults to 1 (left mouse button). - add -- True or False. If True, new binding will be added, otherwise - it will replace a former binding. - - Example for the anonymous turtle, i. e. the procedural way: - - >>> def turn(x, y): - ... left(360) - ... - >>> onclick(turn) # Now clicking into the turtle will turn it. - >>> onclick(None) # event-binding will be removed - """ - self.screen._onclick(self.turtle._item, fun, btn, add) - self._update() - - def onrelease(self, fun, btn=1, add=None): - """Bind fun to mouse-button-release event on this turtle on canvas. - - Arguments: - fun -- a function with two arguments, to which will be assigned - the coordinates of the clicked point on the canvas. - btn -- number of the mouse-button defaults to 1 (left mouse button). - - Example (for a MyTurtle instance named joe): - >>> class MyTurtle(Turtle): - ... def glow(self,x,y): - ... self.fillcolor("red") - ... def unglow(self,x,y): - ... self.fillcolor("") - ... - >>> joe = MyTurtle() - >>> joe.onclick(joe.glow) - >>> joe.onrelease(joe.unglow) - - Clicking on joe turns fillcolor red, unclicking turns it to - transparent. - """ - self.screen._onrelease(self.turtle._item, fun, btn, add) - self._update() - - def ondrag(self, fun, btn=1, add=None): - """Bind fun to mouse-move event on this turtle on canvas. - - Arguments: - fun -- a function with two arguments, to which will be assigned - the coordinates of the clicked point on the canvas. - btn -- number of the mouse-button defaults to 1 (left mouse button). - - Every sequence of mouse-move-events on a turtle is preceded by a - mouse-click event on that turtle. - - Example (for a Turtle instance named turtle): - >>> turtle.ondrag(turtle.goto) - - Subsequently clicking and dragging a Turtle will move it - across the screen thereby producing handdrawings (if pen is - down). - """ - self.screen._ondrag(self.turtle._item, fun, btn, add) - - - def _undo(self, action, data): - """Does the main part of the work for undo() - """ - if self.undobuffer is None: - return - if action == "rot": - angle, degPAU = data - self._rotate(-angle*degPAU/self._degreesPerAU) - dummy = self.undobuffer.pop() - elif action == "stamp": - stitem = data[0] - self.clearstamp(stitem) - elif action == "go": - self._undogoto(data) - elif action in ["wri", "dot"]: - item = data[0] - self.screen._delete(item) - self.items.remove(item) - elif action == "dofill": - item = data[0] - self.screen._drawpoly(item, ((0, 0),(0, 0),(0, 0)), - fill="", outline="") - elif action == "beginfill": - item = data[0] - self._fillitem = self._fillpath = None - if item in self.items: - self.screen._delete(item) - self.items.remove(item) - elif action == "pen": - TPen.pen(self, data[0]) - self.undobuffer.pop() - - def undo(self): - """undo (repeatedly) the last turtle action. - - No argument. - - undo (repeatedly) the last turtle action. - Number of available undo actions is determined by the size of - the undobuffer. - - Example (for a Turtle instance named turtle): - >>> for i in range(4): - ... turtle.fd(50); turtle.lt(80) - ... - >>> for i in range(8): - ... turtle.undo() - ... - """ - if self.undobuffer is None: - return - item = self.undobuffer.pop() - action = item[0] - data = item[1:] - if action == "seq": - while data: - item = data.pop() - self._undo(item[0], item[1:]) - else: - self._undo(action, data) - - turtlesize = shapesize - -RawPen = RawTurtle - -### Screen - Singleton ######################## - -def Screen(): - """Return the singleton screen object. - If none exists at the moment, create a new one and return it, - else return the existing one.""" - if Turtle._screen is None: - Turtle._screen = _Screen() - return Turtle._screen - -class _Screen(TurtleScreen): - - _root = None - _canvas = None - _title = _CFG["title"] - - def __init__(self): - if _Screen._root is None: - _Screen._root = self._root = _Root() - self._root.title(_Screen._title) - self._root.ondestroy(self._destroy) - if _Screen._canvas is None: - width = _CFG["width"] - height = _CFG["height"] - canvwidth = _CFG["canvwidth"] - canvheight = _CFG["canvheight"] - leftright = _CFG["leftright"] - topbottom = _CFG["topbottom"] - self._root.setupcanvas(width, height, canvwidth, canvheight) - _Screen._canvas = self._root._getcanvas() - TurtleScreen.__init__(self, _Screen._canvas) - self.setup(width, height, leftright, topbottom) - - def setup(self, width=_CFG["width"], height=_CFG["height"], - startx=_CFG["leftright"], starty=_CFG["topbottom"]): - """ Set the size and position of the main window. - - Arguments: - width: as integer a size in pixels, as float a fraction of the screen. - Default is 50% of screen. - height: as integer the height in pixels, as float a fraction of the - screen. Default is 75% of screen. - startx: if positive, starting position in pixels from the left - edge of the screen, if negative from the right edge - Default, startx=None is to center window horizontally. - starty: if positive, starting position in pixels from the top - edge of the screen, if negative from the bottom edge - Default, starty=None is to center window vertically. - - Examples (for a Screen instance named screen): - >>> screen.setup (width=200, height=200, startx=0, starty=0) - - sets window to 200x200 pixels, in upper left of screen - - >>> screen.setup(width=.75, height=0.5, startx=None, starty=None) - - sets window to 75% of screen by 50% of screen and centers - """ - if not hasattr(self._root, "set_geometry"): - return - sw = self._root.win_width() - sh = self._root.win_height() - if isinstance(width, float) and 0 <= width <= 1: - width = sw*width - if startx is None: - startx = (sw - width) / 2 - if isinstance(height, float) and 0 <= height <= 1: - height = sh*height - if starty is None: - starty = (sh - height) / 2 - self._root.set_geometry(width, height, startx, starty) - self.update() - - def title(self, titlestring): - """Set title of turtle-window - - Argument: - titlestring -- a string, to appear in the titlebar of the - turtle graphics window. - - This is a method of Screen-class. Not available for TurtleScreen- - objects. - - Example (for a Screen instance named screen): - >>> screen.title("Welcome to the turtle-zoo!") - """ - if _Screen._root is not None: - _Screen._root.title(titlestring) - _Screen._title = titlestring - - def _destroy(self): - root = self._root - if root is _Screen._root: - Turtle._pen = None - Turtle._screen = None - _Screen._root = None - _Screen._canvas = None - TurtleScreen._RUNNING = False - root.destroy() - - def bye(self): - """Shut the turtlegraphics window. - - Example (for a TurtleScreen instance named screen): - >>> screen.bye() - """ - self._destroy() - - def exitonclick(self): - """Go into mainloop until the mouse is clicked. - - No arguments. - - Bind bye() method to mouseclick on TurtleScreen. - If "using_IDLE" - value in configuration dictionary is False - (default value), enter mainloop. - If IDLE with -n switch (no subprocess) is used, this value should be - set to True in turtle.cfg. In this case IDLE's mainloop - is active also for the client script. - - This is a method of the Screen-class and not available for - TurtleScreen instances. - - Example (for a Screen instance named screen): - >>> screen.exitonclick() - - """ - def exitGracefully(x, y): - """Screen.bye() with two dummy-parameters""" - self.bye() - self.onclick(exitGracefully) - if _CFG["using_IDLE"]: - return - try: - mainloop() - except AttributeError: - exit(0) - -class Turtle(RawTurtle): - """RawTurtle auto-creating (scrolled) canvas. - - When a Turtle object is created or a function derived from some - Turtle method is called a TurtleScreen object is automatically created. - """ - _pen = None - _screen = None - - def __init__(self, - shape=_CFG["shape"], - undobuffersize=_CFG["undobuffersize"], - visible=_CFG["visible"]): - if Turtle._screen is None: - Turtle._screen = Screen() - RawTurtle.__init__(self, Turtle._screen, - shape=shape, - undobuffersize=undobuffersize, - visible=visible) - -Pen = Turtle - -def write_docstringdict(filename="turtle_docstringdict"): - """Create and write docstring-dictionary to file. - - Optional argument: - filename -- a string, used as filename - default value is turtle_docstringdict - - Has to be called explicitly, (not used by the turtle-graphics classes) - The docstring dictionary will be written to the Python script .py - It is intended to serve as a template for translation of the docstrings - into different languages. - """ - docsdict = {} - - for methodname in _tg_screen_functions: - key = "_Screen."+methodname - docsdict[key] = eval(key).__doc__ - for methodname in _tg_turtle_functions: - key = "Turtle."+methodname - docsdict[key] = eval(key).__doc__ - - with open("%s.py" % filename,"w") as f: - keys = sorted(x for x in docsdict - if x.split('.')[1] not in _alias_list) - f.write('docsdict = {\n\n') - for key in keys[:-1]: - f.write('%s :\n' % repr(key)) - f.write(' """%s\n""",\n\n' % docsdict[key]) - key = keys[-1] - f.write('%s :\n' % repr(key)) - f.write(' """%s\n"""\n\n' % docsdict[key]) - f.write("}\n") - f.close() - -def read_docstrings(lang): - """Read in docstrings from lang-specific docstring dictionary. - - Transfer docstrings, translated to lang, from a dictionary-file - to the methods of classes Screen and Turtle and - in revised form - - to the corresponding functions. - """ - modname = "turtle_docstringdict_%(language)s" % {'language':lang.lower()} - module = __import__(modname) - docsdict = module.docsdict - for key in docsdict: - try: -# eval(key).im_func.__doc__ = docsdict[key] - eval(key).__doc__ = docsdict[key] - except Exception: - print("Bad docstring-entry: %s" % key) - -_LANGUAGE = _CFG["language"] - -try: - if _LANGUAGE != "english": - read_docstrings(_LANGUAGE) -except ImportError: - print("Cannot find docsdict for", _LANGUAGE) -except Exception: - print ("Unknown Error when trying to import %s-docstring-dictionary" % - _LANGUAGE) - - -def getmethparlist(ob): - """Get strings describing the arguments for the given object - - Returns a pair of strings representing function parameter lists - including parenthesis. The first string is suitable for use in - function definition and the second is suitable for use in function - call. The "self" parameter is not included. - """ - orig_sig = inspect.signature(ob) - # bit of a hack for methods - turn it into a function - # but we drop the "self" param. - # Try and build one for Python defined functions - func_sig = orig_sig.replace( - parameters=list(orig_sig.parameters.values())[1:], - ) - - call_args = [] - for param in func_sig.parameters.values(): - match param.kind: - case ( - inspect.Parameter.POSITIONAL_ONLY - | inspect.Parameter.POSITIONAL_OR_KEYWORD - ): - call_args.append(param.name) - case inspect.Parameter.VAR_POSITIONAL: - call_args.append(f'*{param.name}') - case inspect.Parameter.KEYWORD_ONLY: - call_args.append(f'{param.name}={param.name}') - case inspect.Parameter.VAR_KEYWORD: - call_args.append(f'**{param.name}') - case _: - raise RuntimeError('Unsupported parameter kind', param.kind) - call_text = f'({', '.join(call_args)})' - - return str(func_sig), call_text - -def _turtle_docrevise(docstr): - """To reduce docstrings from RawTurtle class for functions - """ - import re - if docstr is None: - return None - turtlename = _CFG["exampleturtle"] - newdocstr = docstr.replace("%s." % turtlename,"") - parexp = re.compile(r' \(.+ %s\):' % turtlename) - newdocstr = parexp.sub(":", newdocstr) - return newdocstr - -def _screen_docrevise(docstr): - """To reduce docstrings from TurtleScreen class for functions - """ - import re - if docstr is None: - return None - screenname = _CFG["examplescreen"] - newdocstr = docstr.replace("%s." % screenname,"") - parexp = re.compile(r' \(.+ %s\):' % screenname) - newdocstr = parexp.sub(":", newdocstr) - return newdocstr - -## The following mechanism makes all methods of RawTurtle and Turtle available -## as functions. So we can enhance, change, add, delete methods to these -## classes and do not need to change anything here. - -__func_body = """\ -def {name}{paramslist}: - if {obj} is None: - if not TurtleScreen._RUNNING: - TurtleScreen._RUNNING = True - raise Terminator - {obj} = {init} - try: - return {obj}.{name}{argslist} - except TK.TclError: - if not TurtleScreen._RUNNING: - TurtleScreen._RUNNING = True - raise Terminator - raise -""" - -def _make_global_funcs(functions, cls, obj, init, docrevise): - for methodname in functions: - method = getattr(cls, methodname) - pl1, pl2 = getmethparlist(method) - if pl1 == "": - print(">>>>>>", pl1, pl2) - continue - defstr = __func_body.format(obj=obj, init=init, name=methodname, - paramslist=pl1, argslist=pl2) - exec(defstr, globals()) - globals()[methodname].__doc__ = docrevise(method.__doc__) - -_make_global_funcs(_tg_screen_functions, _Screen, - 'Turtle._screen', 'Screen()', _screen_docrevise) -_make_global_funcs(_tg_turtle_functions, Turtle, - 'Turtle._pen', 'Turtle()', _turtle_docrevise) - - -done = mainloop - -if __name__ == "__main__": - def switchpen(): - if isdown(): - pu() - else: - pd() - - def demo1(): - """Demo of old turtle.py - module""" - reset() - tracer(True) - up() - backward(100) - down() - # draw 3 squares; the last filled - width(3) - for i in range(3): - if i == 2: - begin_fill() - for _ in range(4): - forward(20) - left(90) - if i == 2: - color("maroon") - end_fill() - up() - forward(30) - down() - width(1) - color("black") - # move out of the way - tracer(False) - up() - right(90) - forward(100) - right(90) - forward(100) - right(180) - down() - # some text - write("startstart", 1) - write("start", 1) - color("red") - # staircase - for i in range(5): - forward(20) - left(90) - forward(20) - right(90) - # filled staircase - tracer(True) - begin_fill() - for i in range(5): - forward(20) - left(90) - forward(20) - right(90) - end_fill() - # more text - - def demo2(): - """Demo of some new features.""" - speed(1) - st() - pensize(3) - setheading(towards(0, 0)) - radius = distance(0, 0)/2.0 - rt(90) - for _ in range(18): - switchpen() - circle(radius, 10) - write("wait a moment...") - while undobufferentries(): - undo() - reset() - lt(90) - colormode(255) - laenge = 10 - pencolor("green") - pensize(3) - lt(180) - for i in range(-2, 16): - if i > 0: - begin_fill() - fillcolor(255-15*i, 0, 15*i) - for _ in range(3): - fd(laenge) - lt(120) - end_fill() - laenge += 10 - lt(15) - speed((speed()+1)%12) - #end_fill() - - lt(120) - pu() - fd(70) - rt(30) - pd() - color("red","yellow") - speed(0) - begin_fill() - for _ in range(4): - circle(50, 90) - rt(90) - fd(30) - rt(90) - end_fill() - lt(90) - pu() - fd(30) - pd() - shape("turtle") - - tri = getturtle() - tri.resizemode("auto") - turtle = Turtle() - turtle.resizemode("auto") - turtle.shape("turtle") - turtle.reset() - turtle.left(90) - turtle.speed(0) - turtle.up() - turtle.goto(280, 40) - turtle.lt(30) - turtle.down() - turtle.speed(6) - turtle.color("blue","orange") - turtle.pensize(2) - tri.speed(6) - setheading(towards(turtle)) - count = 1 - while tri.distance(turtle) > 4: - turtle.fd(3.5) - turtle.lt(0.6) - tri.setheading(tri.towards(turtle)) - tri.fd(4) - if count % 20 == 0: - turtle.stamp() - tri.stamp() - switchpen() - count += 1 - tri.write("CAUGHT! ", font=("Arial", 16, "bold"), align="right") - tri.pencolor("black") - tri.pencolor("red") - - def baba(xdummy, ydummy): - clearscreen() - bye() - - time.sleep(2) - - while undobufferentries(): - tri.undo() - turtle.undo() - tri.fd(50) - tri.write(" Click me!", font = ("Courier", 12, "bold") ) - tri.onclick(baba, 1) - - demo1() - demo2() - exitonclick() diff --git a/Python313_13_x64_Template/Lib/types.py b/Python313_13_x64_Template/Lib/types.py deleted file mode 100644 index ff474c14..00000000 --- a/Python313_13_x64_Template/Lib/types.py +++ /dev/null @@ -1,345 +0,0 @@ -""" -Define names for built-in types that aren't directly accessible as a builtin. -""" - -import sys - -# Iterators in Python aren't a matter of type but of protocol. A large -# and changing number of builtin types implement *some* flavor of -# iterator. Don't check the type! Use hasattr to check for both -# "__iter__" and "__next__" attributes instead. - -def _f(): pass -FunctionType = type(_f) -LambdaType = type(lambda: None) # Same as FunctionType -CodeType = type(_f.__code__) -MappingProxyType = type(type.__dict__) -SimpleNamespace = type(sys.implementation) - -def _cell_factory(): - a = 1 - def f(): - nonlocal a - return f.__closure__[0] -CellType = type(_cell_factory()) - -def _g(): - yield 1 -GeneratorType = type(_g()) - -async def _c(): pass -_c = _c() -CoroutineType = type(_c) -_c.close() # Prevent ResourceWarning - -async def _ag(): - yield -_ag = _ag() -AsyncGeneratorType = type(_ag) - -class _C: - def _m(self): pass -MethodType = type(_C()._m) - -BuiltinFunctionType = type(len) -BuiltinMethodType = type([].append) # Same as BuiltinFunctionType - -WrapperDescriptorType = type(object.__init__) -MethodWrapperType = type(object().__str__) -MethodDescriptorType = type(str.join) -ClassMethodDescriptorType = type(dict.__dict__['fromkeys']) - -ModuleType = type(sys) - -try: - raise TypeError -except TypeError as exc: - TracebackType = type(exc.__traceback__) - FrameType = type(exc.__traceback__.tb_frame) - -GetSetDescriptorType = type(FunctionType.__code__) -MemberDescriptorType = type(FunctionType.__globals__) - -del sys, _f, _g, _C, _c, _ag, _cell_factory # Not for export - - -# Provide a PEP 3115 compliant mechanism for class creation -def new_class(name, bases=(), kwds=None, exec_body=None): - """Create a class object dynamically using the appropriate metaclass.""" - resolved_bases = resolve_bases(bases) - meta, ns, kwds = prepare_class(name, resolved_bases, kwds) - if exec_body is not None: - exec_body(ns) - if resolved_bases is not bases: - ns['__orig_bases__'] = bases - return meta(name, resolved_bases, ns, **kwds) - -def resolve_bases(bases): - """Resolve MRO entries dynamically as specified by PEP 560.""" - new_bases = list(bases) - updated = False - shift = 0 - for i, base in enumerate(bases): - if isinstance(base, type): - continue - if not hasattr(base, "__mro_entries__"): - continue - new_base = base.__mro_entries__(bases) - updated = True - if not isinstance(new_base, tuple): - raise TypeError("__mro_entries__ must return a tuple") - else: - new_bases[i+shift:i+shift+1] = new_base - shift += len(new_base) - 1 - if not updated: - return bases - return tuple(new_bases) - -def prepare_class(name, bases=(), kwds=None): - """Call the __prepare__ method of the appropriate metaclass. - - Returns (metaclass, namespace, kwds) as a 3-tuple - - *metaclass* is the appropriate metaclass - *namespace* is the prepared class namespace - *kwds* is an updated copy of the passed in kwds argument with any - 'metaclass' entry removed. If no kwds argument is passed in, this will - be an empty dict. - """ - if kwds is None: - kwds = {} - else: - kwds = dict(kwds) # Don't alter the provided mapping - if 'metaclass' in kwds: - meta = kwds.pop('metaclass') - else: - if bases: - meta = type(bases[0]) - else: - meta = type - if isinstance(meta, type): - # when meta is a type, we first determine the most-derived metaclass - # instead of invoking the initial candidate directly - meta = _calculate_meta(meta, bases) - if hasattr(meta, '__prepare__'): - ns = meta.__prepare__(name, bases, **kwds) - else: - ns = {} - return meta, ns, kwds - -def _calculate_meta(meta, bases): - """Calculate the most derived metaclass.""" - winner = meta - for base in bases: - base_meta = type(base) - if issubclass(winner, base_meta): - continue - if issubclass(base_meta, winner): - winner = base_meta - continue - # else: - raise TypeError("metaclass conflict: " - "the metaclass of a derived class " - "must be a (non-strict) subclass " - "of the metaclasses of all its bases") - return winner - - -def get_original_bases(cls, /): - """Return the class's "original" bases prior to modification by `__mro_entries__`. - - Examples:: - - from typing import TypeVar, Generic, NamedTuple, TypedDict - - T = TypeVar("T") - class Foo(Generic[T]): ... - class Bar(Foo[int], float): ... - class Baz(list[str]): ... - Eggs = NamedTuple("Eggs", [("a", int), ("b", str)]) - Spam = TypedDict("Spam", {"a": int, "b": str}) - - assert get_original_bases(Bar) == (Foo[int], float) - assert get_original_bases(Baz) == (list[str],) - assert get_original_bases(Eggs) == (NamedTuple,) - assert get_original_bases(Spam) == (TypedDict,) - assert get_original_bases(int) == (object,) - """ - try: - return cls.__dict__.get("__orig_bases__", cls.__bases__) - except AttributeError: - raise TypeError( - f"Expected an instance of type, not {type(cls).__name__!r}" - ) from None - - -class DynamicClassAttribute: - """Route attribute access on a class to __getattr__. - - This is a descriptor, used to define attributes that act differently when - accessed through an instance and through a class. Instance access remains - normal, but access to an attribute through a class will be routed to the - class's __getattr__ method; this is done by raising AttributeError. - - This allows one to have properties active on an instance, and have virtual - attributes on the class with the same name. (Enum used this between Python - versions 3.4 - 3.9 .) - - Subclass from this to use a different method of accessing virtual attributes - and still be treated properly by the inspect module. (Enum uses this since - Python 3.10 .) - - """ - def __init__(self, fget=None, fset=None, fdel=None, doc=None): - self.fget = fget - self.fset = fset - self.fdel = fdel - # next two lines make DynamicClassAttribute act the same as property - self.__doc__ = doc or fget.__doc__ - self.overwrite_doc = doc is None - # support for abstract methods - self.__isabstractmethod__ = bool(getattr(fget, '__isabstractmethod__', False)) - - def __get__(self, instance, ownerclass=None): - if instance is None: - if self.__isabstractmethod__: - return self - raise AttributeError() - elif self.fget is None: - raise AttributeError("unreadable attribute") - return self.fget(instance) - - def __set__(self, instance, value): - if self.fset is None: - raise AttributeError("can't set attribute") - self.fset(instance, value) - - def __delete__(self, instance): - if self.fdel is None: - raise AttributeError("can't delete attribute") - self.fdel(instance) - - def getter(self, fget): - fdoc = fget.__doc__ if self.overwrite_doc else None - result = type(self)(fget, self.fset, self.fdel, fdoc or self.__doc__) - result.overwrite_doc = self.overwrite_doc - return result - - def setter(self, fset): - result = type(self)(self.fget, fset, self.fdel, self.__doc__) - result.overwrite_doc = self.overwrite_doc - return result - - def deleter(self, fdel): - result = type(self)(self.fget, self.fset, fdel, self.__doc__) - result.overwrite_doc = self.overwrite_doc - return result - - -class _GeneratorWrapper: - # TODO: Implement this in C. - def __init__(self, gen): - self.__wrapped = gen - self.__isgen = gen.__class__ is GeneratorType - self.__name__ = getattr(gen, '__name__', None) - self.__qualname__ = getattr(gen, '__qualname__', None) - def send(self, val): - return self.__wrapped.send(val) - def throw(self, tp, *rest): - return self.__wrapped.throw(tp, *rest) - def close(self): - return self.__wrapped.close() - @property - def gi_code(self): - return self.__wrapped.gi_code - @property - def gi_frame(self): - return self.__wrapped.gi_frame - @property - def gi_running(self): - return self.__wrapped.gi_running - @property - def gi_yieldfrom(self): - return self.__wrapped.gi_yieldfrom - @property - def gi_suspended(self): - return self.__wrapped.gi_suspended - cr_code = gi_code - cr_frame = gi_frame - cr_running = gi_running - cr_await = gi_yieldfrom - cr_suspended = gi_suspended - def __next__(self): - return next(self.__wrapped) - def __iter__(self): - if self.__isgen: - return self.__wrapped - return self - __await__ = __iter__ - -def coroutine(func): - """Convert regular generator function to a coroutine.""" - - if not callable(func): - raise TypeError('types.coroutine() expects a callable') - - if (func.__class__ is FunctionType and - getattr(func, '__code__', None).__class__ is CodeType): - - co_flags = func.__code__.co_flags - - # Check if 'func' is a coroutine function. - # (0x180 == CO_COROUTINE | CO_ITERABLE_COROUTINE) - if co_flags & 0x180: - return func - - # Check if 'func' is a generator function. - # (0x20 == CO_GENERATOR) - if co_flags & 0x20: - # TODO: Implement this in C. - co = func.__code__ - # 0x100 == CO_ITERABLE_COROUTINE - func.__code__ = co.replace(co_flags=co.co_flags | 0x100) - return func - - # The following code is primarily to support functions that - # return generator-like objects (for instance generators - # compiled with Cython). - - # Delay functools and _collections_abc import for speeding up types import. - import functools - import _collections_abc - @functools.wraps(func) - def wrapped(*args, **kwargs): - coro = func(*args, **kwargs) - if (coro.__class__ is CoroutineType or - coro.__class__ is GeneratorType and coro.gi_code.co_flags & 0x100): - # 'coro' is a native coroutine object or an iterable coroutine - return coro - if (isinstance(coro, _collections_abc.Generator) and - not isinstance(coro, _collections_abc.Coroutine)): - # 'coro' is either a pure Python generator iterator, or it - # implements collections.abc.Generator (and does not implement - # collections.abc.Coroutine). - return _GeneratorWrapper(coro) - # 'coro' is either an instance of collections.abc.Coroutine or - # some other object -- pass it through. - return coro - - return wrapped - -GenericAlias = type(list[int]) -UnionType = type(int | str) - -EllipsisType = type(Ellipsis) -NoneType = type(None) -NotImplementedType = type(NotImplemented) - -def __getattr__(name): - if name == 'CapsuleType': - import _socket - return type(_socket.CAPI) - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") - -__all__ = [n for n in globals() if n[:1] != '_'] -__all__ += ['CapsuleType'] diff --git a/Python313_13_x64_Template/Lib/typing.py b/Python313_13_x64_Template/Lib/typing.py deleted file mode 100644 index cbc6d90e..00000000 --- a/Python313_13_x64_Template/Lib/typing.py +++ /dev/null @@ -1,3834 +0,0 @@ -""" -The typing module: Support for gradual typing as defined by PEP 484 and subsequent PEPs. - -Among other things, the module includes the following: -* Generic, Protocol, and internal machinery to support generic aliases. - All subscripted types like X[int], Union[int, str] are generic aliases. -* Various "special forms" that have unique meanings in type annotations: - NoReturn, Never, ClassVar, Self, Concatenate, Unpack, and others. -* Classes whose instances can be type arguments to generic classes and functions: - TypeVar, ParamSpec, TypeVarTuple. -* Public helper functions: get_type_hints, overload, cast, final, and others. -* Several protocols to support duck-typing: - SupportsFloat, SupportsIndex, SupportsAbs, and others. -* Special types: NewType, NamedTuple, TypedDict. -* Deprecated aliases for builtin types and collections.abc ABCs. - -Any name not present in __all__ is an implementation detail -that may be changed without notice. Use at your own risk! -""" - -from abc import abstractmethod, ABCMeta -import collections -from collections import defaultdict -import collections.abc -import copyreg -import functools -import operator -import sys -import types -from types import WrapperDescriptorType, MethodWrapperType, MethodDescriptorType, GenericAlias - -from _typing import ( - _idfunc, - TypeVar, - ParamSpec, - TypeVarTuple, - ParamSpecArgs, - ParamSpecKwargs, - TypeAliasType, - Generic, - NoDefault, -) - -# Please keep __all__ alphabetized within each category. -__all__ = [ - # Super-special typing primitives. - 'Annotated', - 'Any', - 'Callable', - 'ClassVar', - 'Concatenate', - 'Final', - 'ForwardRef', - 'Generic', - 'Literal', - 'Optional', - 'ParamSpec', - 'Protocol', - 'Tuple', - 'Type', - 'TypeVar', - 'TypeVarTuple', - 'Union', - - # ABCs (from collections.abc). - 'AbstractSet', # collections.abc.Set. - 'ByteString', - 'Container', - 'ContextManager', - 'Hashable', - 'ItemsView', - 'Iterable', - 'Iterator', - 'KeysView', - 'Mapping', - 'MappingView', - 'MutableMapping', - 'MutableSequence', - 'MutableSet', - 'Sequence', - 'Sized', - 'ValuesView', - 'Awaitable', - 'AsyncIterator', - 'AsyncIterable', - 'Coroutine', - 'Collection', - 'AsyncGenerator', - 'AsyncContextManager', - - # Structural checks, a.k.a. protocols. - 'Reversible', - 'SupportsAbs', - 'SupportsBytes', - 'SupportsComplex', - 'SupportsFloat', - 'SupportsIndex', - 'SupportsInt', - 'SupportsRound', - - # Concrete collection types. - 'ChainMap', - 'Counter', - 'Deque', - 'Dict', - 'DefaultDict', - 'List', - 'OrderedDict', - 'Set', - 'FrozenSet', - 'NamedTuple', # Not really a type. - 'TypedDict', # Not really a type. - 'Generator', - - # Other concrete types. - 'BinaryIO', - 'IO', - 'Match', - 'Pattern', - 'TextIO', - - # One-off things. - 'AnyStr', - 'assert_type', - 'assert_never', - 'cast', - 'clear_overloads', - 'dataclass_transform', - 'final', - 'get_args', - 'get_origin', - 'get_overloads', - 'get_protocol_members', - 'get_type_hints', - 'is_protocol', - 'is_typeddict', - 'LiteralString', - 'Never', - 'NewType', - 'no_type_check', - 'no_type_check_decorator', - 'NoDefault', - 'NoReturn', - 'NotRequired', - 'overload', - 'override', - 'ParamSpecArgs', - 'ParamSpecKwargs', - 'ReadOnly', - 'Required', - 'reveal_type', - 'runtime_checkable', - 'Self', - 'Text', - 'TYPE_CHECKING', - 'TypeAlias', - 'TypeGuard', - 'TypeIs', - 'TypeAliasType', - 'Unpack', -] - - -def _type_convert(arg, module=None, *, allow_special_forms=False): - """For converting None to type(None), and strings to ForwardRef.""" - if arg is None: - return type(None) - if isinstance(arg, str): - return ForwardRef(arg, module=module, is_class=allow_special_forms) - return arg - - -def _type_check(arg, msg, is_argument=True, module=None, *, allow_special_forms=False): - """Check that the argument is a type, and return it (internal helper). - - As a special case, accept None and return type(None) instead. Also wrap strings - into ForwardRef instances. Consider several corner cases, for example plain - special forms like Union are not valid, while Union[int, str] is OK, etc. - The msg argument is a human-readable error message, e.g.:: - - "Union[arg, ...]: arg should be a type." - - We append the repr() of the actual value (truncated to 100 chars). - """ - invalid_generic_forms = (Generic, Protocol) - if not allow_special_forms: - invalid_generic_forms += (ClassVar,) - if is_argument: - invalid_generic_forms += (Final,) - - arg = _type_convert(arg, module=module, allow_special_forms=allow_special_forms) - if (isinstance(arg, _GenericAlias) and - arg.__origin__ in invalid_generic_forms): - raise TypeError(f"{arg} is not valid as type argument") - if arg in (Any, LiteralString, NoReturn, Never, Self, TypeAlias): - return arg - if allow_special_forms and arg in (ClassVar, Final): - return arg - if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol): - raise TypeError(f"Plain {arg} is not valid as type argument") - if type(arg) is tuple: - raise TypeError(f"{msg} Got {arg!r:.100}.") - return arg - - -def _is_param_expr(arg): - return arg is ... or isinstance(arg, - (tuple, list, ParamSpec, _ConcatenateGenericAlias)) - - -def _should_unflatten_callable_args(typ, args): - """Internal helper for munging collections.abc.Callable's __args__. - - The canonical representation for a Callable's __args__ flattens the - argument types, see https://github.com/python/cpython/issues/86361. - - For example:: - - >>> import collections.abc - >>> P = ParamSpec('P') - >>> collections.abc.Callable[[int, int], str].__args__ == (int, int, str) - True - >>> collections.abc.Callable[P, str].__args__ == (P, str) - True - - As a result, if we need to reconstruct the Callable from its __args__, - we need to unflatten it. - """ - return ( - typ.__origin__ is collections.abc.Callable - and not (len(args) == 2 and _is_param_expr(args[0])) - ) - - -def _type_repr(obj): - """Return the repr() of an object, special-casing types (internal helper). - - If obj is a type, we return a shorter version than the default - type.__repr__, based on the module and qualified name, which is - typically enough to uniquely identify a type. For everything - else, we fall back on repr(obj). - """ - # When changing this function, don't forget about - # `_collections_abc._type_repr`, which does the same thing - # and must be consistent with this one. - if isinstance(obj, type): - if obj.__module__ == 'builtins': - return obj.__qualname__ - return f'{obj.__module__}.{obj.__qualname__}' - if obj is ...: - return '...' - if isinstance(obj, types.FunctionType): - return obj.__name__ - if isinstance(obj, tuple): - # Special case for `repr` of types with `ParamSpec`: - return '[' + ', '.join(_type_repr(t) for t in obj) + ']' - return repr(obj) - - -def _collect_type_parameters(args, *, enforce_default_ordering: bool = True): - """Collect all type parameters in args - in order of first appearance (lexicographic order). - - For example:: - - >>> P = ParamSpec('P') - >>> T = TypeVar('T') - >>> _collect_type_parameters((T, Callable[P, T])) - (~T, ~P) - """ - # required type parameter cannot appear after parameter with default - default_encountered = False - # or after TypeVarTuple - type_var_tuple_encountered = False - parameters = [] - for t in args: - if isinstance(t, type): - # We don't want __parameters__ descriptor of a bare Python class. - pass - elif isinstance(t, tuple): - # `t` might be a tuple, when `ParamSpec` is substituted with - # `[T, int]`, or `[int, *Ts]`, etc. - for x in t: - for collected in _collect_type_parameters([x]): - if collected not in parameters: - parameters.append(collected) - elif hasattr(t, '__typing_subst__'): - if t not in parameters: - if enforce_default_ordering: - if type_var_tuple_encountered and t.has_default(): - raise TypeError('Type parameter with a default' - ' follows TypeVarTuple') - - if t.has_default(): - default_encountered = True - elif default_encountered: - raise TypeError(f'Type parameter {t!r} without a default' - ' follows type parameter with a default') - - parameters.append(t) - else: - if _is_unpacked_typevartuple(t): - type_var_tuple_encountered = True - for x in getattr(t, '__parameters__', ()): - if x not in parameters: - parameters.append(x) - return tuple(parameters) - - -def _check_generic_specialization(cls, arguments): - """Check correct count for parameters of a generic cls (internal helper). - - This gives a nice error message in case of count mismatch. - """ - expected_len = len(cls.__parameters__) - if not expected_len: - raise TypeError(f"{cls} is not a generic class") - actual_len = len(arguments) - if actual_len != expected_len: - # deal with defaults - if actual_len < expected_len: - # If the parameter at index `actual_len` in the parameters list - # has a default, then all parameters after it must also have - # one, because we validated as much in _collect_type_parameters(). - # That means that no error needs to be raised here, despite - # the number of arguments being passed not matching the number - # of parameters: all parameters that aren't explicitly - # specialized in this call are parameters with default values. - if cls.__parameters__[actual_len].has_default(): - return - - expected_len -= sum(p.has_default() for p in cls.__parameters__) - expect_val = f"at least {expected_len}" - else: - expect_val = expected_len - - raise TypeError(f"Too {'many' if actual_len > expected_len else 'few'} arguments" - f" for {cls}; actual {actual_len}, expected {expect_val}") - - -def _unpack_args(*args): - newargs = [] - for arg in args: - subargs = getattr(arg, '__typing_unpacked_tuple_args__', None) - if subargs is not None and not (subargs and subargs[-1] is ...): - newargs.extend(subargs) - else: - newargs.append(arg) - return newargs - -def _deduplicate(params, *, unhashable_fallback=False): - # Weed out strict duplicates, preserving the first of each occurrence. - try: - return dict.fromkeys(params) - except TypeError: - if not unhashable_fallback: - raise - # Happens for cases like `Annotated[dict, {'x': IntValidator()}]` - return _deduplicate_unhashable(params) - -def _deduplicate_unhashable(unhashable_params): - new_unhashable = [] - for t in unhashable_params: - if t not in new_unhashable: - new_unhashable.append(t) - return new_unhashable - -def _compare_args_orderless(first_args, second_args): - first_unhashable = _deduplicate_unhashable(first_args) - second_unhashable = _deduplicate_unhashable(second_args) - t = list(second_unhashable) - try: - for elem in first_unhashable: - t.remove(elem) - except ValueError: - return False - return not t - -def _remove_dups_flatten(parameters): - """Internal helper for Union creation and substitution. - - Flatten Unions among parameters, then remove duplicates. - """ - # Flatten out Union[Union[...], ...]. - params = [] - for p in parameters: - if isinstance(p, (_UnionGenericAlias, types.UnionType)): - params.extend(p.__args__) - else: - params.append(p) - - return tuple(_deduplicate(params, unhashable_fallback=True)) - - -def _flatten_literal_params(parameters): - """Internal helper for Literal creation: flatten Literals among parameters.""" - params = [] - for p in parameters: - if isinstance(p, _LiteralGenericAlias): - params.extend(p.__args__) - else: - params.append(p) - return tuple(params) - - -_cleanups = [] -_caches = {} - - -def _tp_cache(func=None, /, *, typed=False): - """Internal wrapper caching __getitem__ of generic types. - - For non-hashable arguments, the original function is used as a fallback. - """ - def decorator(func): - # The callback 'inner' references the newly created lru_cache - # indirectly by performing a lookup in the global '_caches' dictionary. - # This breaks a reference that can be problematic when combined with - # C API extensions that leak references to types. See GH-98253. - - cache = functools.lru_cache(typed=typed)(func) - _caches[func] = cache - _cleanups.append(cache.cache_clear) - del cache - - @functools.wraps(func) - def inner(*args, **kwds): - try: - return _caches[func](*args, **kwds) - except TypeError: - pass # All real errors (not unhashable args) are raised below. - return func(*args, **kwds) - return inner - - if func is not None: - return decorator(func) - - return decorator - - -def _deprecation_warning_for_no_type_params_passed(funcname: str) -> None: - import warnings - - depr_message = ( - f"Failing to pass a value to the 'type_params' parameter " - f"of {funcname!r} is deprecated, as it leads to incorrect behaviour " - f"when calling {funcname} on a stringified annotation " - f"that references a PEP 695 type parameter. " - f"It will be disallowed in Python 3.15." - ) - warnings.warn(depr_message, category=DeprecationWarning, stacklevel=3) - - -class _Sentinel: - __slots__ = () - def __repr__(self): - return '' - - -_sentinel = _Sentinel() - - -def _eval_type(t, globalns, localns, type_params=_sentinel, *, recursive_guard=frozenset()): - """Evaluate all forward references in the given type t. - - For use of globalns and localns see the docstring for get_type_hints(). - recursive_guard is used to prevent infinite recursion with a recursive - ForwardRef. - """ - if type_params is _sentinel: - _deprecation_warning_for_no_type_params_passed("typing._eval_type") - type_params = () - if isinstance(t, ForwardRef): - return t._evaluate(globalns, localns, type_params, recursive_guard=recursive_guard) - if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)): - if isinstance(t, GenericAlias): - args = tuple( - ForwardRef(arg) if isinstance(arg, str) else arg - for arg in t.__args__ - ) - is_unpacked = t.__unpacked__ - if _should_unflatten_callable_args(t, args): - t = t.__origin__[(args[:-1], args[-1])] - else: - t = t.__origin__[args] - if is_unpacked: - t = Unpack[t] - - ev_args = tuple( - _eval_type( - a, globalns, localns, type_params, recursive_guard=recursive_guard - ) - for a in t.__args__ - ) - if ev_args == t.__args__: - return t - if isinstance(t, GenericAlias): - return GenericAlias(t.__origin__, ev_args) - if isinstance(t, types.UnionType): - return functools.reduce(operator.or_, ev_args) - else: - return t.copy_with(ev_args) - return t - - -class _Final: - """Mixin to prohibit subclassing.""" - - __slots__ = ('__weakref__',) - - def __init_subclass__(cls, /, *args, **kwds): - if '_root' not in kwds: - raise TypeError("Cannot subclass special typing classes") - - -class _NotIterable: - """Mixin to prevent iteration, without being compatible with Iterable. - - That is, we could do:: - - def __iter__(self): raise TypeError() - - But this would make users of this mixin duck type-compatible with - collections.abc.Iterable - isinstance(foo, Iterable) would be True. - - Luckily, we can instead prevent iteration by setting __iter__ to None, which - is treated specially. - """ - - __slots__ = () - __iter__ = None - - -# Internal indicator of special typing constructs. -# See __doc__ instance attribute for specific docs. -class _SpecialForm(_Final, _NotIterable, _root=True): - __slots__ = ('_name', '__doc__', '_getitem') - - def __init__(self, getitem): - self._getitem = getitem - self._name = getitem.__name__ - self.__doc__ = getitem.__doc__ - - def __getattr__(self, item): - if item in {'__name__', '__qualname__'}: - return self._name - - raise AttributeError(item) - - def __mro_entries__(self, bases): - raise TypeError(f"Cannot subclass {self!r}") - - def __repr__(self): - return 'typing.' + self._name - - def __reduce__(self): - return self._name - - def __call__(self, *args, **kwds): - raise TypeError(f"Cannot instantiate {self!r}") - - def __or__(self, other): - return Union[self, other] - - def __ror__(self, other): - return Union[other, self] - - def __instancecheck__(self, obj): - raise TypeError(f"{self} cannot be used with isinstance()") - - def __subclasscheck__(self, cls): - raise TypeError(f"{self} cannot be used with issubclass()") - - @_tp_cache - def __getitem__(self, parameters): - return self._getitem(self, parameters) - - -class _TypedCacheSpecialForm(_SpecialForm, _root=True): - def __getitem__(self, parameters): - if not isinstance(parameters, tuple): - parameters = (parameters,) - return self._getitem(self, *parameters) - - -class _AnyMeta(type): - def __instancecheck__(self, obj): - if self is Any: - raise TypeError("typing.Any cannot be used with isinstance()") - return super().__instancecheck__(obj) - - def __repr__(self): - if self is Any: - return "typing.Any" - return super().__repr__() # respect to subclasses - - -class Any(metaclass=_AnyMeta): - """Special type indicating an unconstrained type. - - - Any is compatible with every type. - - Any assumed to have all methods. - - All values assumed to be instances of Any. - - Note that all the above statements are true from the point of view of - static type checkers. At runtime, Any should not be used with instance - checks. - """ - - def __new__(cls, *args, **kwargs): - if cls is Any: - raise TypeError("Any cannot be instantiated") - return super().__new__(cls) - - -@_SpecialForm -def NoReturn(self, parameters): - """Special type indicating functions that never return. - - Example:: - - from typing import NoReturn - - def stop() -> NoReturn: - raise Exception('no way') - - NoReturn can also be used as a bottom type, a type that - has no values. Starting in Python 3.11, the Never type should - be used for this concept instead. Type checkers should treat the two - equivalently. - """ - raise TypeError(f"{self} is not subscriptable") - -# This is semantically identical to NoReturn, but it is implemented -# separately so that type checkers can distinguish between the two -# if they want. -@_SpecialForm -def Never(self, parameters): - """The bottom type, a type that has no members. - - This can be used to define a function that should never be - called, or a function that never returns:: - - from typing import Never - - def never_call_me(arg: Never) -> None: - pass - - def int_or_str(arg: int | str) -> None: - never_call_me(arg) # type checker error - match arg: - case int(): - print("It's an int") - case str(): - print("It's a str") - case _: - never_call_me(arg) # OK, arg is of type Never - """ - raise TypeError(f"{self} is not subscriptable") - - -@_SpecialForm -def Self(self, parameters): - """Used to spell the type of "self" in classes. - - Example:: - - from typing import Self - - class Foo: - def return_self(self) -> Self: - ... - return self - - This is especially useful for: - - classmethods that are used as alternative constructors - - annotating an `__enter__` method which returns self - """ - raise TypeError(f"{self} is not subscriptable") - - -@_SpecialForm -def LiteralString(self, parameters): - """Represents an arbitrary literal string. - - Example:: - - from typing import LiteralString - - def run_query(sql: LiteralString) -> None: - ... - - def caller(arbitrary_string: str, literal_string: LiteralString) -> None: - run_query("SELECT * FROM students") # OK - run_query(literal_string) # OK - run_query("SELECT * FROM " + literal_string) # OK - run_query(arbitrary_string) # type checker error - run_query( # type checker error - f"SELECT * FROM students WHERE name = {arbitrary_string}" - ) - - Only string literals and other LiteralStrings are compatible - with LiteralString. This provides a tool to help prevent - security issues such as SQL injection. - """ - raise TypeError(f"{self} is not subscriptable") - - -@_SpecialForm -def ClassVar(self, parameters): - """Special type construct to mark class variables. - - An annotation wrapped in ClassVar indicates that a given - attribute is intended to be used as a class variable and - should not be set on instances of that class. - - Usage:: - - class Starship: - stats: ClassVar[dict[str, int]] = {} # class variable - damage: int = 10 # instance variable - - ClassVar accepts only types and cannot be further subscribed. - - Note that ClassVar is not a class itself, and should not - be used with isinstance() or issubclass(). - """ - item = _type_check(parameters, f'{self} accepts only single type.', allow_special_forms=True) - return _GenericAlias(self, (item,)) - -@_SpecialForm -def Final(self, parameters): - """Special typing construct to indicate final names to type checkers. - - A final name cannot be re-assigned or overridden in a subclass. - - For example:: - - MAX_SIZE: Final = 9000 - MAX_SIZE += 1 # Error reported by type checker - - class Connection: - TIMEOUT: Final[int] = 10 - - class FastConnector(Connection): - TIMEOUT = 1 # Error reported by type checker - - There is no runtime checking of these properties. - """ - item = _type_check(parameters, f'{self} accepts only single type.', allow_special_forms=True) - return _GenericAlias(self, (item,)) - -@_SpecialForm -def Union(self, parameters): - """Union type; Union[X, Y] means either X or Y. - - On Python 3.10 and higher, the | operator - can also be used to denote unions; - X | Y means the same thing to the type checker as Union[X, Y]. - - To define a union, use e.g. Union[int, str]. Details: - - The arguments must be types and there must be at least one. - - None as an argument is a special case and is replaced by - type(None). - - Unions of unions are flattened, e.g.:: - - assert Union[Union[int, str], float] == Union[int, str, float] - - - Unions of a single argument vanish, e.g.:: - - assert Union[int] == int # The constructor actually returns int - - - Redundant arguments are skipped, e.g.:: - - assert Union[int, str, int] == Union[int, str] - - - When comparing unions, the argument order is ignored, e.g.:: - - assert Union[int, str] == Union[str, int] - - - You cannot subclass or instantiate a union. - - You can use Optional[X] as a shorthand for Union[X, None]. - """ - if parameters == (): - raise TypeError("Cannot take a Union of no types.") - if not isinstance(parameters, tuple): - parameters = (parameters,) - msg = "Union[arg, ...]: each arg must be a type." - parameters = tuple(_type_check(p, msg) for p in parameters) - parameters = _remove_dups_flatten(parameters) - if len(parameters) == 1: - return parameters[0] - if len(parameters) == 2 and type(None) in parameters: - return _UnionGenericAlias(self, parameters, name="Optional") - return _UnionGenericAlias(self, parameters) - -def _make_union(left, right): - """Used from the C implementation of TypeVar. - - TypeVar.__or__ calls this instead of returning types.UnionType - because we want to allow unions between TypeVars and strings - (forward references). - """ - return Union[left, right] - -@_SpecialForm -def Optional(self, parameters): - """Optional[X] is equivalent to Union[X, None].""" - arg = _type_check(parameters, f"{self} requires a single type.") - return Union[arg, type(None)] - -@_TypedCacheSpecialForm -@_tp_cache(typed=True) -def Literal(self, *parameters): - """Special typing form to define literal types (a.k.a. value types). - - This form can be used to indicate to type checkers that the corresponding - variable or function parameter has a value equivalent to the provided - literal (or one of several literals):: - - def validate_simple(data: Any) -> Literal[True]: # always returns True - ... - - MODE = Literal['r', 'rb', 'w', 'wb'] - def open_helper(file: str, mode: MODE) -> str: - ... - - open_helper('/some/path', 'r') # Passes type check - open_helper('/other/path', 'typo') # Error in type checker - - Literal[...] cannot be subclassed. At runtime, an arbitrary value - is allowed as type argument to Literal[...], but type checkers may - impose restrictions. - """ - # There is no '_type_check' call because arguments to Literal[...] are - # values, not types. - parameters = _flatten_literal_params(parameters) - - try: - parameters = tuple(p for p, _ in _deduplicate(list(_value_and_type_iter(parameters)))) - except TypeError: # unhashable parameters - pass - - return _LiteralGenericAlias(self, parameters) - - -@_SpecialForm -def TypeAlias(self, parameters): - """Special form for marking type aliases. - - Use TypeAlias to indicate that an assignment should - be recognized as a proper type alias definition by type - checkers. - - For example:: - - Predicate: TypeAlias = Callable[..., bool] - - It's invalid when used anywhere except as in the example above. - """ - raise TypeError(f"{self} is not subscriptable") - - -@_SpecialForm -def Concatenate(self, parameters): - """Special form for annotating higher-order functions. - - ``Concatenate`` can be used in conjunction with ``ParamSpec`` and - ``Callable`` to represent a higher-order function which adds, removes or - transforms the parameters of a callable. - - For example:: - - Callable[Concatenate[int, P], int] - - See PEP 612 for detailed information. - """ - if parameters == (): - raise TypeError("Cannot take a Concatenate of no types.") - if not isinstance(parameters, tuple): - parameters = (parameters,) - if not (parameters[-1] is ... or isinstance(parameters[-1], ParamSpec)): - raise TypeError("The last parameter to Concatenate should be a " - "ParamSpec variable or ellipsis.") - msg = "Concatenate[arg, ...]: each arg must be a type." - parameters = (*(_type_check(p, msg) for p in parameters[:-1]), parameters[-1]) - return _ConcatenateGenericAlias(self, parameters) - - -@_SpecialForm -def TypeGuard(self, parameters): - """Special typing construct for marking user-defined type predicate functions. - - ``TypeGuard`` can be used to annotate the return type of a user-defined - type predicate function. ``TypeGuard`` only accepts a single type argument. - At runtime, functions marked this way should return a boolean. - - ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static - type checkers to determine a more precise type of an expression within a - program's code flow. Usually type narrowing is done by analyzing - conditional code flow and applying the narrowing to a block of code. The - conditional expression here is sometimes referred to as a "type predicate". - - Sometimes it would be convenient to use a user-defined boolean function - as a type predicate. Such a function should use ``TypeGuard[...]`` or - ``TypeIs[...]`` as its return type to alert static type checkers to - this intention. ``TypeGuard`` should be used over ``TypeIs`` when narrowing - from an incompatible type (e.g., ``list[object]`` to ``list[int]``) or when - the function does not return ``True`` for all instances of the narrowed type. - - Using ``-> TypeGuard[NarrowedType]`` tells the static type checker that - for a given function: - - 1. The return value is a boolean. - 2. If the return value is ``True``, the type of its argument - is ``NarrowedType``. - - For example:: - - def is_str_list(val: list[object]) -> TypeGuard[list[str]]: - '''Determines whether all objects in the list are strings''' - return all(isinstance(x, str) for x in val) - - def func1(val: list[object]): - if is_str_list(val): - # Type of ``val`` is narrowed to ``list[str]``. - print(" ".join(val)) - else: - # Type of ``val`` remains as ``list[object]``. - print("Not a list of strings!") - - Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower - form of ``TypeA`` (it can even be a wider form) and this may lead to - type-unsafe results. The main reason is to allow for things like - narrowing ``list[object]`` to ``list[str]`` even though the latter is not - a subtype of the former, since ``list`` is invariant. The responsibility of - writing type-safe type predicates is left to the user. - - ``TypeGuard`` also works with type variables. For more information, see - PEP 647 (User-Defined Type Guards). - """ - item = _type_check(parameters, f'{self} accepts only single type.') - return _GenericAlias(self, (item,)) - - -@_SpecialForm -def TypeIs(self, parameters): - """Special typing construct for marking user-defined type predicate functions. - - ``TypeIs`` can be used to annotate the return type of a user-defined - type predicate function. ``TypeIs`` only accepts a single type argument. - At runtime, functions marked this way should return a boolean and accept - at least one argument. - - ``TypeIs`` aims to benefit *type narrowing* -- a technique used by static - type checkers to determine a more precise type of an expression within a - program's code flow. Usually type narrowing is done by analyzing - conditional code flow and applying the narrowing to a block of code. The - conditional expression here is sometimes referred to as a "type predicate". - - Sometimes it would be convenient to use a user-defined boolean function - as a type predicate. Such a function should use ``TypeIs[...]`` or - ``TypeGuard[...]`` as its return type to alert static type checkers to - this intention. ``TypeIs`` usually has more intuitive behavior than - ``TypeGuard``, but it cannot be used when the input and output types - are incompatible (e.g., ``list[object]`` to ``list[int]``) or when the - function does not return ``True`` for all instances of the narrowed type. - - Using ``-> TypeIs[NarrowedType]`` tells the static type checker that for - a given function: - - 1. The return value is a boolean. - 2. If the return value is ``True``, the type of its argument - is the intersection of the argument's original type and - ``NarrowedType``. - 3. If the return value is ``False``, the type of its argument - is narrowed to exclude ``NarrowedType``. - - For example:: - - from typing import assert_type, final, TypeIs - - class Parent: pass - class Child(Parent): pass - @final - class Unrelated: pass - - def is_parent(val: object) -> TypeIs[Parent]: - return isinstance(val, Parent) - - def run(arg: Child | Unrelated): - if is_parent(arg): - # Type of ``arg`` is narrowed to the intersection - # of ``Parent`` and ``Child``, which is equivalent to - # ``Child``. - assert_type(arg, Child) - else: - # Type of ``arg`` is narrowed to exclude ``Parent``, - # so only ``Unrelated`` is left. - assert_type(arg, Unrelated) - - The type inside ``TypeIs`` must be consistent with the type of the - function's argument; if it is not, static type checkers will raise - an error. An incorrectly written ``TypeIs`` function can lead to - unsound behavior in the type system; it is the user's responsibility - to write such functions in a type-safe manner. - - ``TypeIs`` also works with type variables. For more information, see - PEP 742 (Narrowing types with ``TypeIs``). - """ - item = _type_check(parameters, f'{self} accepts only single type.') - return _GenericAlias(self, (item,)) - - -class ForwardRef(_Final, _root=True): - """Internal wrapper to hold a forward reference.""" - - __slots__ = ('__forward_arg__', '__forward_code__', - '__forward_evaluated__', '__forward_value__', - '__forward_is_argument__', '__forward_is_class__', - '__forward_module__') - - def __init__(self, arg, is_argument=True, module=None, *, is_class=False): - if not isinstance(arg, str): - raise TypeError(f"Forward reference must be a string -- got {arg!r}") - - try: - code = compile(_rewrite_star_unpack(arg), '', 'eval') - except SyntaxError: - raise SyntaxError(f"Forward reference must be an expression -- got {arg!r}") - - self.__forward_arg__ = arg - self.__forward_code__ = code - self.__forward_evaluated__ = False - self.__forward_value__ = None - self.__forward_is_argument__ = is_argument - self.__forward_is_class__ = is_class - self.__forward_module__ = module - - def _evaluate(self, globalns, localns, type_params=_sentinel, *, recursive_guard): - if type_params is _sentinel: - _deprecation_warning_for_no_type_params_passed("typing.ForwardRef._evaluate") - type_params = () - if self.__forward_arg__ in recursive_guard: - return self - if not self.__forward_evaluated__ or localns is not globalns: - if globalns is None and localns is None: - globalns = localns = {} - elif globalns is None: - globalns = localns - elif localns is None: - localns = globalns - if self.__forward_module__ is not None: - globalns = getattr( - sys.modules.get(self.__forward_module__, None), '__dict__', globalns - ) - - # type parameters require some special handling, - # as they exist in their own scope - # but `eval()` does not have a dedicated parameter for that scope. - # For classes, names in type parameter scopes should override - # names in the global scope (which here are called `localns`!), - # but should in turn be overridden by names in the class scope - # (which here are called `globalns`!) - if type_params: - globalns, localns = dict(globalns), dict(localns) - for param in type_params: - param_name = param.__name__ - if not self.__forward_is_class__ or param_name not in globalns: - globalns[param_name] = param - localns.pop(param_name, None) - - type_ = _type_check( - eval(self.__forward_code__, globalns, localns), - "Forward references must evaluate to types.", - is_argument=self.__forward_is_argument__, - allow_special_forms=self.__forward_is_class__, - ) - self.__forward_value__ = _eval_type( - type_, - globalns, - localns, - type_params, - recursive_guard=(recursive_guard | {self.__forward_arg__}), - ) - self.__forward_evaluated__ = True - return self.__forward_value__ - - def __eq__(self, other): - if not isinstance(other, ForwardRef): - return NotImplemented - if self.__forward_evaluated__ and other.__forward_evaluated__: - return (self.__forward_arg__ == other.__forward_arg__ and - self.__forward_value__ == other.__forward_value__) - return (self.__forward_arg__ == other.__forward_arg__ and - self.__forward_module__ == other.__forward_module__) - - def __hash__(self): - return hash((self.__forward_arg__, self.__forward_module__)) - - def __or__(self, other): - return Union[self, other] - - def __ror__(self, other): - return Union[other, self] - - def __repr__(self): - if self.__forward_module__ is None: - module_repr = '' - else: - module_repr = f', module={self.__forward_module__!r}' - return f'ForwardRef({self.__forward_arg__!r}{module_repr})' - - -def _rewrite_star_unpack(arg): - """If the given argument annotation expression is a star unpack e.g. `'*Ts'` - rewrite it to a valid expression. - """ - if arg.startswith("*"): - return f"({arg},)[0]" # E.g. (*Ts,)[0] or (*tuple[int, int],)[0] - else: - return arg - - -def _is_unpacked_typevartuple(x: Any) -> bool: - return ((not isinstance(x, type)) and - getattr(x, '__typing_is_unpacked_typevartuple__', False)) - - -def _is_typevar_like(x: Any) -> bool: - return isinstance(x, (TypeVar, ParamSpec)) or _is_unpacked_typevartuple(x) - - -def _typevar_subst(self, arg): - msg = "Parameters to generic types must be types." - arg = _type_check(arg, msg, is_argument=True) - if ((isinstance(arg, _GenericAlias) and arg.__origin__ is Unpack) or - (isinstance(arg, GenericAlias) and getattr(arg, '__unpacked__', False))): - raise TypeError(f"{arg} is not valid as type argument") - return arg - - -def _typevartuple_prepare_subst(self, alias, args): - params = alias.__parameters__ - typevartuple_index = params.index(self) - for param in params[typevartuple_index + 1:]: - if isinstance(param, TypeVarTuple): - raise TypeError(f"More than one TypeVarTuple parameter in {alias}") - - alen = len(args) - plen = len(params) - left = typevartuple_index - right = plen - typevartuple_index - 1 - var_tuple_index = None - fillarg = None - for k, arg in enumerate(args): - if not isinstance(arg, type): - subargs = getattr(arg, '__typing_unpacked_tuple_args__', None) - if subargs and len(subargs) == 2 and subargs[-1] is ...: - if var_tuple_index is not None: - raise TypeError("More than one unpacked arbitrary-length tuple argument") - var_tuple_index = k - fillarg = subargs[0] - if var_tuple_index is not None: - left = min(left, var_tuple_index) - right = min(right, alen - var_tuple_index - 1) - elif left + right > alen: - raise TypeError(f"Too few arguments for {alias};" - f" actual {alen}, expected at least {plen-1}") - if left == alen - right and self.has_default(): - replacement = _unpack_args(self.__default__) - else: - replacement = args[left: alen - right] - - return ( - *args[:left], - *([fillarg]*(typevartuple_index - left)), - replacement, - *([fillarg]*(plen - right - left - typevartuple_index - 1)), - *args[alen - right:], - ) - - -def _paramspec_subst(self, arg): - if isinstance(arg, (list, tuple)): - arg = tuple(_type_check(a, "Expected a type.") for a in arg) - elif not _is_param_expr(arg): - raise TypeError(f"Expected a list of types, an ellipsis, " - f"ParamSpec, or Concatenate. Got {arg}") - return arg - - -def _paramspec_prepare_subst(self, alias, args): - params = alias.__parameters__ - i = params.index(self) - if i == len(args) and self.has_default(): - args = (*args, self.__default__) - if i >= len(args): - raise TypeError(f"Too few arguments for {alias}") - # Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612. - if len(params) == 1 and not _is_param_expr(args[0]): - assert i == 0 - args = (args,) - # Convert lists to tuples to help other libraries cache the results. - elif isinstance(args[i], list): - args = (*args[:i], tuple(args[i]), *args[i+1:]) - return args - - -@_tp_cache -def _generic_class_getitem(cls, args): - """Parameterizes a generic class. - - At least, parameterizing a generic class is the *main* thing this method - does. For example, for some generic class `Foo`, this is called when we - do `Foo[int]` - there, with `cls=Foo` and `args=int`. - - However, note that this method is also called when defining generic - classes in the first place with `class Foo(Generic[T]): ...`. - """ - if not isinstance(args, tuple): - args = (args,) - - args = tuple(_type_convert(p) for p in args) - is_generic_or_protocol = cls in (Generic, Protocol) - - if is_generic_or_protocol: - # Generic and Protocol can only be subscripted with unique type variables. - if not args: - raise TypeError( - f"Parameter list to {cls.__qualname__}[...] cannot be empty" - ) - if not all(_is_typevar_like(p) for p in args): - raise TypeError( - f"Parameters to {cls.__name__}[...] must all be type variables " - f"or parameter specification variables.") - if len(set(args)) != len(args): - raise TypeError( - f"Parameters to {cls.__name__}[...] must all be unique") - else: - # Subscripting a regular Generic subclass. - try: - parameters = cls.__parameters__ - except AttributeError as e: - init_subclass = getattr(cls, '__init_subclass__', None) - if init_subclass not in {None, Generic.__init_subclass__}: - e.add_note( - f"Note: this exception may have been caused by " - f"{init_subclass.__qualname__!r} (or the " - f"'__init_subclass__' method on a superclass) not " - f"calling 'super().__init_subclass__()'" - ) - raise - for param in parameters: - prepare = getattr(param, '__typing_prepare_subst__', None) - if prepare is not None: - args = prepare(cls, args) - _check_generic_specialization(cls, args) - - new_args = [] - for param, new_arg in zip(parameters, args): - if isinstance(param, TypeVarTuple): - new_args.extend(new_arg) - else: - new_args.append(new_arg) - args = tuple(new_args) - - return _GenericAlias(cls, args) - - -def _generic_init_subclass(cls, *args, **kwargs): - super(Generic, cls).__init_subclass__(*args, **kwargs) - tvars = [] - if '__orig_bases__' in cls.__dict__: - error = Generic in cls.__orig_bases__ - else: - error = (Generic in cls.__bases__ and - cls.__name__ != 'Protocol' and - type(cls) != _TypedDictMeta) - if error: - raise TypeError("Cannot inherit from plain Generic") - if '__orig_bases__' in cls.__dict__: - tvars = _collect_type_parameters(cls.__orig_bases__) - # Look for Generic[T1, ..., Tn]. - # If found, tvars must be a subset of it. - # If not found, tvars is it. - # Also check for and reject plain Generic, - # and reject multiple Generic[...]. - gvars = None - for base in cls.__orig_bases__: - if (isinstance(base, _GenericAlias) and - base.__origin__ is Generic): - if gvars is not None: - raise TypeError( - "Cannot inherit from Generic[...] multiple times.") - gvars = base.__parameters__ - if gvars is not None: - tvarset = set(tvars) - gvarset = set(gvars) - if not tvarset <= gvarset: - s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) - s_args = ', '.join(str(g) for g in gvars) - raise TypeError(f"Some type variables ({s_vars}) are" - f" not listed in Generic[{s_args}]") - tvars = gvars - cls.__parameters__ = tuple(tvars) - - -def _is_dunder(attr): - return attr.startswith('__') and attr.endswith('__') - -class _BaseGenericAlias(_Final, _root=True): - """The central part of the internal API. - - This represents a generic version of type 'origin' with type arguments 'params'. - There are two kind of these aliases: user defined and special. The special ones - are wrappers around builtin collections and ABCs in collections.abc. These must - have 'name' always set. If 'inst' is False, then the alias can't be instantiated; - this is used by e.g. typing.List and typing.Dict. - """ - - def __init__(self, origin, *, inst=True, name=None): - self._inst = inst - self._name = name - self.__origin__ = origin - self.__slots__ = None # This is not documented. - - def __call__(self, *args, **kwargs): - if not self._inst: - raise TypeError(f"Type {self._name} cannot be instantiated; " - f"use {self.__origin__.__name__}() instead") - result = self.__origin__(*args, **kwargs) - try: - result.__orig_class__ = self - # Some objects raise TypeError (or something even more exotic) - # if you try to set attributes on them; we guard against that here - except Exception: - pass - return result - - def __mro_entries__(self, bases): - res = [] - if self.__origin__ not in bases: - res.append(self.__origin__) - - # Check if any base that occurs after us in `bases` is either itself a - # subclass of Generic, or something which will add a subclass of Generic - # to `__bases__` via its `__mro_entries__`. If not, add Generic - # ourselves. The goal is to ensure that Generic (or a subclass) will - # appear exactly once in the final bases tuple. If we let it appear - # multiple times, we risk "can't form a consistent MRO" errors. - i = bases.index(self) - for b in bases[i+1:]: - if isinstance(b, _BaseGenericAlias): - break - if not isinstance(b, type): - meth = getattr(b, "__mro_entries__", None) - new_bases = meth(bases) if meth else None - if ( - isinstance(new_bases, tuple) and - any( - isinstance(b2, type) and issubclass(b2, Generic) - for b2 in new_bases - ) - ): - break - elif issubclass(b, Generic): - break - else: - res.append(Generic) - return tuple(res) - - def __getattr__(self, attr): - if attr in {'__name__', '__qualname__'}: - return self._name or self.__origin__.__name__ - - # We are careful for copy and pickle. - # Also for simplicity we don't relay any dunder names - if '__origin__' in self.__dict__ and not _is_dunder(attr): - return getattr(self.__origin__, attr) - raise AttributeError(attr) - - def __setattr__(self, attr, val): - if _is_dunder(attr) or attr in {'_name', '_inst', '_nparams', '_defaults'}: - super().__setattr__(attr, val) - else: - setattr(self.__origin__, attr, val) - - def __instancecheck__(self, obj): - return self.__subclasscheck__(type(obj)) - - def __subclasscheck__(self, cls): - raise TypeError("Subscripted generics cannot be used with" - " class and instance checks") - - def __dir__(self): - return list(set(super().__dir__() - + [attr for attr in dir(self.__origin__) if not _is_dunder(attr)])) - - -# Special typing constructs Union, Optional, Generic, Callable and Tuple -# use three special attributes for internal bookkeeping of generic types: -# * __parameters__ is a tuple of unique free type parameters of a generic -# type, for example, Dict[T, T].__parameters__ == (T,); -# * __origin__ keeps a reference to a type that was subscripted, -# e.g., Union[T, int].__origin__ == Union, or the non-generic version of -# the type. -# * __args__ is a tuple of all arguments used in subscripting, -# e.g., Dict[T, int].__args__ == (T, int). - - -class _GenericAlias(_BaseGenericAlias, _root=True): - # The type of parameterized generics. - # - # That is, for example, `type(List[int])` is `_GenericAlias`. - # - # Objects which are instances of this class include: - # * Parameterized container types, e.g. `Tuple[int]`, `List[int]`. - # * Note that native container types, e.g. `tuple`, `list`, use - # `types.GenericAlias` instead. - # * Parameterized classes: - # class C[T]: pass - # # C[int] is a _GenericAlias - # * `Callable` aliases, generic `Callable` aliases, and - # parameterized `Callable` aliases: - # T = TypeVar('T') - # # _CallableGenericAlias inherits from _GenericAlias. - # A = Callable[[], None] # _CallableGenericAlias - # B = Callable[[T], None] # _CallableGenericAlias - # C = B[int] # _CallableGenericAlias - # * Parameterized `Final`, `ClassVar`, `TypeGuard`, and `TypeIs`: - # # All _GenericAlias - # Final[int] - # ClassVar[float] - # TypeGuard[bool] - # TypeIs[range] - - def __init__(self, origin, args, *, inst=True, name=None): - super().__init__(origin, inst=inst, name=name) - if not isinstance(args, tuple): - args = (args,) - self.__args__ = tuple(... if a is _TypingEllipsis else - a for a in args) - enforce_default_ordering = origin in (Generic, Protocol) - self.__parameters__ = _collect_type_parameters( - args, - enforce_default_ordering=enforce_default_ordering, - ) - if not name: - self.__module__ = origin.__module__ - - def __eq__(self, other): - if not isinstance(other, _GenericAlias): - return NotImplemented - return (self.__origin__ == other.__origin__ - and self.__args__ == other.__args__) - - def __hash__(self): - return hash((self.__origin__, self.__args__)) - - def __or__(self, right): - return Union[self, right] - - def __ror__(self, left): - return Union[left, self] - - @_tp_cache - def __getitem__(self, args): - # Parameterizes an already-parameterized object. - # - # For example, we arrive here doing something like: - # T1 = TypeVar('T1') - # T2 = TypeVar('T2') - # T3 = TypeVar('T3') - # class A(Generic[T1]): pass - # B = A[T2] # B is a _GenericAlias - # C = B[T3] # Invokes _GenericAlias.__getitem__ - # - # We also arrive here when parameterizing a generic `Callable` alias: - # T = TypeVar('T') - # C = Callable[[T], None] - # C[int] # Invokes _GenericAlias.__getitem__ - - if self.__origin__ in (Generic, Protocol): - # Can't subscript Generic[...] or Protocol[...]. - raise TypeError(f"Cannot subscript already-subscripted {self}") - if not self.__parameters__: - raise TypeError(f"{self} is not a generic class") - - # Preprocess `args`. - if not isinstance(args, tuple): - args = (args,) - args = _unpack_args(*(_type_convert(p) for p in args)) - new_args = self._determine_new_args(args) - r = self.copy_with(new_args) - return r - - def _determine_new_args(self, args): - # Determines new __args__ for __getitem__. - # - # For example, suppose we had: - # T1 = TypeVar('T1') - # T2 = TypeVar('T2') - # class A(Generic[T1, T2]): pass - # T3 = TypeVar('T3') - # B = A[int, T3] - # C = B[str] - # `B.__args__` is `(int, T3)`, so `C.__args__` should be `(int, str)`. - # Unfortunately, this is harder than it looks, because if `T3` is - # anything more exotic than a plain `TypeVar`, we need to consider - # edge cases. - - params = self.__parameters__ - # In the example above, this would be {T3: str} - for param in params: - prepare = getattr(param, '__typing_prepare_subst__', None) - if prepare is not None: - args = prepare(self, args) - alen = len(args) - plen = len(params) - if alen != plen: - raise TypeError(f"Too {'many' if alen > plen else 'few'} arguments for {self};" - f" actual {alen}, expected {plen}") - new_arg_by_param = dict(zip(params, args)) - return tuple(self._make_substitution(self.__args__, new_arg_by_param)) - - def _make_substitution(self, args, new_arg_by_param): - """Create a list of new type arguments.""" - new_args = [] - for old_arg in args: - if isinstance(old_arg, type): - new_args.append(old_arg) - continue - - substfunc = getattr(old_arg, '__typing_subst__', None) - if substfunc: - new_arg = substfunc(new_arg_by_param[old_arg]) - else: - subparams = getattr(old_arg, '__parameters__', ()) - if not subparams: - new_arg = old_arg - else: - subargs = [] - for x in subparams: - if isinstance(x, TypeVarTuple): - subargs.extend(new_arg_by_param[x]) - else: - subargs.append(new_arg_by_param[x]) - new_arg = old_arg[tuple(subargs)] - - if self.__origin__ == collections.abc.Callable and isinstance(new_arg, tuple): - # Consider the following `Callable`. - # C = Callable[[int], str] - # Here, `C.__args__` should be (int, str) - NOT ([int], str). - # That means that if we had something like... - # P = ParamSpec('P') - # T = TypeVar('T') - # C = Callable[P, T] - # D = C[[int, str], float] - # ...we need to be careful; `new_args` should end up as - # `(int, str, float)` rather than `([int, str], float)`. - new_args.extend(new_arg) - elif _is_unpacked_typevartuple(old_arg): - # Consider the following `_GenericAlias`, `B`: - # class A(Generic[*Ts]): ... - # B = A[T, *Ts] - # If we then do: - # B[float, int, str] - # The `new_arg` corresponding to `T` will be `float`, and the - # `new_arg` corresponding to `*Ts` will be `(int, str)`. We - # should join all these types together in a flat list - # `(float, int, str)` - so again, we should `extend`. - new_args.extend(new_arg) - elif isinstance(old_arg, tuple): - # Corner case: - # P = ParamSpec('P') - # T = TypeVar('T') - # class Base(Generic[P]): ... - # Can be substituted like this: - # X = Base[[int, T]] - # In this case, `old_arg` will be a tuple: - new_args.append( - tuple(self._make_substitution(old_arg, new_arg_by_param)), - ) - else: - new_args.append(new_arg) - return new_args - - def copy_with(self, args): - return self.__class__(self.__origin__, args, name=self._name, inst=self._inst) - - def __repr__(self): - if self._name: - name = 'typing.' + self._name - else: - name = _type_repr(self.__origin__) - if self.__args__: - args = ", ".join([_type_repr(a) for a in self.__args__]) - else: - # To ensure the repr is eval-able. - args = "()" - return f'{name}[{args}]' - - def __reduce__(self): - if self._name: - origin = globals()[self._name] - else: - origin = self.__origin__ - args = tuple(self.__args__) - if len(args) == 1 and not isinstance(args[0], tuple): - args, = args - return operator.getitem, (origin, args) - - def __mro_entries__(self, bases): - if isinstance(self.__origin__, _SpecialForm): - raise TypeError(f"Cannot subclass {self!r}") - - if self._name: # generic version of an ABC or built-in class - return super().__mro_entries__(bases) - if self.__origin__ is Generic: - if Protocol in bases: - return () - i = bases.index(self) - for b in bases[i+1:]: - if isinstance(b, _BaseGenericAlias) and b is not self: - return () - return (self.__origin__,) - - def __iter__(self): - yield Unpack[self] - - -# _nparams is the number of accepted parameters, e.g. 0 for Hashable, -# 1 for List and 2 for Dict. It may be -1 if variable number of -# parameters are accepted (needs custom __getitem__). - -class _SpecialGenericAlias(_NotIterable, _BaseGenericAlias, _root=True): - def __init__(self, origin, nparams, *, inst=True, name=None, defaults=()): - if name is None: - name = origin.__name__ - super().__init__(origin, inst=inst, name=name) - self._nparams = nparams - self._defaults = defaults - if origin.__module__ == 'builtins': - self.__doc__ = f'Deprecated alias to {origin.__qualname__}.' - else: - self.__doc__ = f'Deprecated alias to {origin.__module__}.{origin.__qualname__}.' - - @_tp_cache - def __getitem__(self, params): - if not isinstance(params, tuple): - params = (params,) - msg = "Parameters to generic types must be types." - params = tuple(_type_check(p, msg) for p in params) - if (self._defaults - and len(params) < self._nparams - and len(params) + len(self._defaults) >= self._nparams - ): - params = (*params, *self._defaults[len(params) - self._nparams:]) - actual_len = len(params) - - if actual_len != self._nparams: - if self._defaults: - expected = f"at least {self._nparams - len(self._defaults)}" - else: - expected = str(self._nparams) - if not self._nparams: - raise TypeError(f"{self} is not a generic class") - raise TypeError(f"Too {'many' if actual_len > self._nparams else 'few'} arguments for {self};" - f" actual {actual_len}, expected {expected}") - return self.copy_with(params) - - def copy_with(self, params): - return _GenericAlias(self.__origin__, params, - name=self._name, inst=self._inst) - - def __repr__(self): - return 'typing.' + self._name - - def __subclasscheck__(self, cls): - if isinstance(cls, _SpecialGenericAlias): - return issubclass(cls.__origin__, self.__origin__) - if not isinstance(cls, _GenericAlias): - return issubclass(cls, self.__origin__) - return super().__subclasscheck__(cls) - - def __reduce__(self): - return self._name - - def __or__(self, right): - return Union[self, right] - - def __ror__(self, left): - return Union[left, self] - - -class _DeprecatedGenericAlias(_SpecialGenericAlias, _root=True): - def __init__( - self, origin, nparams, *, removal_version, inst=True, name=None - ): - super().__init__(origin, nparams, inst=inst, name=name) - self._removal_version = removal_version - - def __instancecheck__(self, inst): - import warnings - warnings._deprecated( - f"{self.__module__}.{self._name}", remove=self._removal_version - ) - return super().__instancecheck__(inst) - - -class _CallableGenericAlias(_NotIterable, _GenericAlias, _root=True): - def __repr__(self): - assert self._name == 'Callable' - args = self.__args__ - if len(args) == 2 and _is_param_expr(args[0]): - return super().__repr__() - return (f'typing.Callable' - f'[[{", ".join([_type_repr(a) for a in args[:-1]])}], ' - f'{_type_repr(args[-1])}]') - - def __reduce__(self): - args = self.__args__ - if not (len(args) == 2 and _is_param_expr(args[0])): - args = list(args[:-1]), args[-1] - return operator.getitem, (Callable, args) - - -class _CallableType(_SpecialGenericAlias, _root=True): - def copy_with(self, params): - return _CallableGenericAlias(self.__origin__, params, - name=self._name, inst=self._inst) - - def __getitem__(self, params): - if not isinstance(params, tuple) or len(params) != 2: - raise TypeError("Callable must be used as " - "Callable[[arg, ...], result].") - args, result = params - # This relaxes what args can be on purpose to allow things like - # PEP 612 ParamSpec. Responsibility for whether a user is using - # Callable[...] properly is deferred to static type checkers. - if isinstance(args, list): - params = (tuple(args), result) - else: - params = (args, result) - return self.__getitem_inner__(params) - - @_tp_cache - def __getitem_inner__(self, params): - args, result = params - msg = "Callable[args, result]: result must be a type." - result = _type_check(result, msg) - if args is Ellipsis: - return self.copy_with((_TypingEllipsis, result)) - if not isinstance(args, tuple): - args = (args,) - args = tuple(_type_convert(arg) for arg in args) - params = args + (result,) - return self.copy_with(params) - - -class _TupleType(_SpecialGenericAlias, _root=True): - @_tp_cache - def __getitem__(self, params): - if not isinstance(params, tuple): - params = (params,) - if len(params) >= 2 and params[-1] is ...: - msg = "Tuple[t, ...]: t must be a type." - params = tuple(_type_check(p, msg) for p in params[:-1]) - return self.copy_with((*params, _TypingEllipsis)) - msg = "Tuple[t0, t1, ...]: each t must be a type." - params = tuple(_type_check(p, msg) for p in params) - return self.copy_with(params) - - -class _UnionGenericAlias(_NotIterable, _GenericAlias, _root=True): - def copy_with(self, params): - return Union[params] - - def __eq__(self, other): - if not isinstance(other, (_UnionGenericAlias, types.UnionType)): - return NotImplemented - try: # fast path - return set(self.__args__) == set(other.__args__) - except TypeError: # not hashable, slow path - return _compare_args_orderless(self.__args__, other.__args__) - - def __hash__(self): - return hash(frozenset(self.__args__)) - - def __repr__(self): - args = self.__args__ - if len(args) == 2: - if args[0] is type(None): - return f'typing.Optional[{_type_repr(args[1])}]' - elif args[1] is type(None): - return f'typing.Optional[{_type_repr(args[0])}]' - return super().__repr__() - - def __instancecheck__(self, obj): - for arg in self.__args__: - if isinstance(obj, arg): - return True - return False - - def __subclasscheck__(self, cls): - for arg in self.__args__: - if issubclass(cls, arg): - return True - return False - - def __reduce__(self): - func, (origin, args) = super().__reduce__() - return func, (Union, args) - - -def _value_and_type_iter(parameters): - return ((p, type(p)) for p in parameters) - - -class _LiteralGenericAlias(_GenericAlias, _root=True): - def __eq__(self, other): - if not isinstance(other, _LiteralGenericAlias): - return NotImplemented - - return set(_value_and_type_iter(self.__args__)) == set(_value_and_type_iter(other.__args__)) - - def __hash__(self): - return hash(frozenset(_value_and_type_iter(self.__args__))) - - -class _ConcatenateGenericAlias(_GenericAlias, _root=True): - def copy_with(self, params): - if isinstance(params[-1], (list, tuple)): - return (*params[:-1], *params[-1]) - if isinstance(params[-1], _ConcatenateGenericAlias): - params = (*params[:-1], *params[-1].__args__) - return super().copy_with(params) - - -@_SpecialForm -def Unpack(self, parameters): - """Type unpack operator. - - The type unpack operator takes the child types from some container type, - such as `tuple[int, str]` or a `TypeVarTuple`, and 'pulls them out'. - - For example:: - - # For some generic class `Foo`: - Foo[Unpack[tuple[int, str]]] # Equivalent to Foo[int, str] - - Ts = TypeVarTuple('Ts') - # Specifies that `Bar` is generic in an arbitrary number of types. - # (Think of `Ts` as a tuple of an arbitrary number of individual - # `TypeVar`s, which the `Unpack` is 'pulling out' directly into the - # `Generic[]`.) - class Bar(Generic[Unpack[Ts]]): ... - Bar[int] # Valid - Bar[int, str] # Also valid - - From Python 3.11, this can also be done using the `*` operator:: - - Foo[*tuple[int, str]] - class Bar(Generic[*Ts]): ... - - And from Python 3.12, it can be done using built-in syntax for generics:: - - Foo[*tuple[int, str]] - class Bar[*Ts]: ... - - The operator can also be used along with a `TypedDict` to annotate - `**kwargs` in a function signature:: - - class Movie(TypedDict): - name: str - year: int - - # This function expects two keyword arguments - *name* of type `str` and - # *year* of type `int`. - def foo(**kwargs: Unpack[Movie]): ... - - Note that there is only some runtime checking of this operator. Not - everything the runtime allows may be accepted by static type checkers. - - For more information, see PEPs 646 and 692. - """ - item = _type_check(parameters, f'{self} accepts only single type.') - return _UnpackGenericAlias(origin=self, args=(item,)) - - -class _UnpackGenericAlias(_GenericAlias, _root=True): - def __repr__(self): - # `Unpack` only takes one argument, so __args__ should contain only - # a single item. - return f'typing.Unpack[{_type_repr(self.__args__[0])}]' - - def __getitem__(self, args): - if self.__typing_is_unpacked_typevartuple__: - return args - return super().__getitem__(args) - - @property - def __typing_unpacked_tuple_args__(self): - assert self.__origin__ is Unpack - assert len(self.__args__) == 1 - arg, = self.__args__ - if isinstance(arg, (_GenericAlias, types.GenericAlias)): - if arg.__origin__ is not tuple: - raise TypeError("Unpack[...] must be used with a tuple type") - return arg.__args__ - return None - - @property - def __typing_is_unpacked_typevartuple__(self): - assert self.__origin__ is Unpack - assert len(self.__args__) == 1 - return isinstance(self.__args__[0], TypeVarTuple) - - -class _TypingEllipsis: - """Internal placeholder for ... (ellipsis).""" - - -_TYPING_INTERNALS = frozenset({ - '__parameters__', '__orig_bases__', '__orig_class__', - '_is_protocol', '_is_runtime_protocol', '__protocol_attrs__', - '__non_callable_proto_members__', '__type_params__', -}) - -_SPECIAL_NAMES = frozenset({ - '__abstractmethods__', '__annotations__', '__dict__', '__doc__', - '__init__', '__module__', '__new__', '__slots__', - '__subclasshook__', '__weakref__', '__class_getitem__', - '__match_args__', '__static_attributes__', '__firstlineno__', -}) - -# These special attributes will be not collected as protocol members. -EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS | _SPECIAL_NAMES | {'_MutableMapping__marker'} - - -def _get_protocol_attrs(cls): - """Collect protocol members from a protocol class objects. - - This includes names actually defined in the class dictionary, as well - as names that appear in annotations. Special names (above) are skipped. - """ - attrs = set() - for base in cls.__mro__[:-1]: # without object - if base.__name__ in {'Protocol', 'Generic'}: - continue - annotations = getattr(base, '__annotations__', {}) - for attr in (*base.__dict__, *annotations): - if not attr.startswith('_abc_') and attr not in EXCLUDED_ATTRIBUTES: - attrs.add(attr) - return attrs - - -def _no_init_or_replace_init(self, *args, **kwargs): - cls = type(self) - - if cls._is_protocol: - raise TypeError('Protocols cannot be instantiated') - - # Already using a custom `__init__`. No need to calculate correct - # `__init__` to call. This can lead to RecursionError. See bpo-45121. - if cls.__init__ is not _no_init_or_replace_init: - return - - # Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`. - # The first instantiation of the subclass will call `_no_init_or_replace_init` which - # searches for a proper new `__init__` in the MRO. The new `__init__` - # replaces the subclass' old `__init__` (ie `_no_init_or_replace_init`). Subsequent - # instantiation of the protocol subclass will thus use the new - # `__init__` and no longer call `_no_init_or_replace_init`. - for base in cls.__mro__: - init = base.__dict__.get('__init__', _no_init_or_replace_init) - if init is not _no_init_or_replace_init: - cls.__init__ = init - break - else: - # should not happen - cls.__init__ = object.__init__ - - cls.__init__(self, *args, **kwargs) - - -def _caller(depth=1, default='__main__'): - try: - return sys._getframemodulename(depth + 1) or default - except AttributeError: # For platforms without _getframemodulename() - pass - try: - return sys._getframe(depth + 1).f_globals.get('__name__', default) - except (AttributeError, ValueError): # For platforms without _getframe() - pass - return None - -def _allow_reckless_class_checks(depth=2): - """Allow instance and class checks for special stdlib modules. - - The abc and functools modules indiscriminately call isinstance() and - issubclass() on the whole MRO of a user class, which may contain protocols. - """ - return _caller(depth) in {'abc', 'functools', None} - - -_PROTO_ALLOWLIST = { - 'collections.abc': [ - 'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable', - 'AsyncIterator', 'Hashable', 'Sized', 'Container', 'Collection', - 'Reversible', 'Buffer', - ], - 'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'], -} - - -@functools.cache -def _lazy_load_getattr_static(): - # Import getattr_static lazily so as not to slow down the import of typing.py - # Cache the result so we don't slow down _ProtocolMeta.__instancecheck__ unnecessarily - from inspect import getattr_static - return getattr_static - - -_cleanups.append(_lazy_load_getattr_static.cache_clear) - -def _pickle_psargs(psargs): - return ParamSpecArgs, (psargs.__origin__,) - -copyreg.pickle(ParamSpecArgs, _pickle_psargs) - -def _pickle_pskwargs(pskwargs): - return ParamSpecKwargs, (pskwargs.__origin__,) - -copyreg.pickle(ParamSpecKwargs, _pickle_pskwargs) - -del _pickle_psargs, _pickle_pskwargs - - -# Preload these once, as globals, as a micro-optimisation. -# This makes a significant difference to the time it takes -# to do `isinstance()`/`issubclass()` checks -# against runtime-checkable protocols with only one callable member. -_abc_instancecheck = ABCMeta.__instancecheck__ -_abc_subclasscheck = ABCMeta.__subclasscheck__ - - -def _type_check_issubclass_arg_1(arg): - """Raise TypeError if `arg` is not an instance of `type` - in `issubclass(arg, )`. - - In most cases, this is verified by type.__subclasscheck__. - Checking it again unnecessarily would slow down issubclass() checks, - so, we don't perform this check unless we absolutely have to. - - For various error paths, however, - we want to ensure that *this* error message is shown to the user - where relevant, rather than a typing.py-specific error message. - """ - if not isinstance(arg, type): - # Same error message as for issubclass(1, int). - raise TypeError('issubclass() arg 1 must be a class') - - -class _ProtocolMeta(ABCMeta): - # This metaclass is somewhat unfortunate, - # but is necessary for several reasons... - def __new__(mcls, name, bases, namespace, /, **kwargs): - if name == "Protocol" and bases == (Generic,): - pass - elif Protocol in bases: - for base in bases: - if not ( - base in {object, Generic} - or base.__name__ in _PROTO_ALLOWLIST.get(base.__module__, []) - or ( - issubclass(base, Generic) - and getattr(base, "_is_protocol", False) - ) - ): - raise TypeError( - f"Protocols can only inherit from other protocols, " - f"got {base!r}" - ) - return super().__new__(mcls, name, bases, namespace, **kwargs) - - def __init__(cls, *args, **kwargs): - super().__init__(*args, **kwargs) - if getattr(cls, "_is_protocol", False): - cls.__protocol_attrs__ = _get_protocol_attrs(cls) - - def __subclasscheck__(cls, other): - if cls is Protocol: - return type.__subclasscheck__(cls, other) - if ( - getattr(cls, '_is_protocol', False) - and not _allow_reckless_class_checks() - ): - if not getattr(cls, '_is_runtime_protocol', False): - _type_check_issubclass_arg_1(other) - raise TypeError( - "Instance and class checks can only be used with " - "@runtime_checkable protocols" - ) - if ( - # this attribute is set by @runtime_checkable: - cls.__non_callable_proto_members__ - and cls.__dict__.get("__subclasshook__") is _proto_hook - ): - _type_check_issubclass_arg_1(other) - non_method_attrs = sorted(cls.__non_callable_proto_members__) - raise TypeError( - "Protocols with non-method members don't support issubclass()." - f" Non-method members: {str(non_method_attrs)[1:-1]}." - ) - return _abc_subclasscheck(cls, other) - - def __instancecheck__(cls, instance): - # We need this method for situations where attributes are - # assigned in __init__. - if cls is Protocol: - return type.__instancecheck__(cls, instance) - if not getattr(cls, "_is_protocol", False): - # i.e., it's a concrete subclass of a protocol - return _abc_instancecheck(cls, instance) - - if ( - not getattr(cls, '_is_runtime_protocol', False) and - not _allow_reckless_class_checks() - ): - raise TypeError("Instance and class checks can only be used with" - " @runtime_checkable protocols") - - if _abc_instancecheck(cls, instance): - return True - - getattr_static = _lazy_load_getattr_static() - for attr in cls.__protocol_attrs__: - try: - val = getattr_static(instance, attr) - except AttributeError: - break - # this attribute is set by @runtime_checkable: - if val is None and attr not in cls.__non_callable_proto_members__: - break - else: - return True - - return False - - -@classmethod -def _proto_hook(cls, other): - if not cls.__dict__.get('_is_protocol', False): - return NotImplemented - - for attr in cls.__protocol_attrs__: - for base in other.__mro__: - # Check if the members appears in the class dictionary... - if attr in base.__dict__: - if base.__dict__[attr] is None: - return NotImplemented - break - - # ...or in annotations, if it is a sub-protocol. - annotations = getattr(base, '__annotations__', {}) - if (isinstance(annotations, collections.abc.Mapping) and - attr in annotations and - issubclass(other, Generic) and getattr(other, '_is_protocol', False)): - break - else: - return NotImplemented - return True - - -class Protocol(Generic, metaclass=_ProtocolMeta): - """Base class for protocol classes. - - Protocol classes are defined as:: - - class Proto(Protocol): - def meth(self) -> int: - ... - - Such classes are primarily used with static type checkers that recognize - structural subtyping (static duck-typing). - - For example:: - - class C: - def meth(self) -> int: - return 0 - - def func(x: Proto) -> int: - return x.meth() - - func(C()) # Passes static type check - - See PEP 544 for details. Protocol classes decorated with - @typing.runtime_checkable act as simple-minded runtime protocols that check - only the presence of given attributes, ignoring their type signatures. - Protocol classes can be generic, they are defined as:: - - class GenProto[T](Protocol): - def meth(self) -> T: - ... - """ - - __slots__ = () - _is_protocol = True - _is_runtime_protocol = False - - def __init_subclass__(cls, *args, **kwargs): - super().__init_subclass__(*args, **kwargs) - - # Determine if this is a protocol or a concrete subclass. - if not cls.__dict__.get('_is_protocol', False): - cls._is_protocol = any(b is Protocol for b in cls.__bases__) - - # Set (or override) the protocol subclass hook. - if '__subclasshook__' not in cls.__dict__: - cls.__subclasshook__ = _proto_hook - - # Prohibit instantiation for protocol classes - if cls._is_protocol and cls.__init__ is Protocol.__init__: - cls.__init__ = _no_init_or_replace_init - - -class _AnnotatedAlias(_NotIterable, _GenericAlias, _root=True): - """Runtime representation of an annotated type. - - At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' - with extra annotations. The alias behaves like a normal typing alias. - Instantiating is the same as instantiating the underlying type; binding - it to types is also the same. - - The metadata itself is stored in a '__metadata__' attribute as a tuple. - """ - - def __init__(self, origin, metadata): - if isinstance(origin, _AnnotatedAlias): - metadata = origin.__metadata__ + metadata - origin = origin.__origin__ - super().__init__(origin, origin, name='Annotated') - self.__metadata__ = metadata - - def copy_with(self, params): - assert len(params) == 1 - new_type = params[0] - return _AnnotatedAlias(new_type, self.__metadata__) - - def __repr__(self): - return "typing.Annotated[{}, {}]".format( - _type_repr(self.__origin__), - ", ".join(repr(a) for a in self.__metadata__) - ) - - def __reduce__(self): - return operator.getitem, ( - Annotated, (self.__origin__,) + self.__metadata__ - ) - - def __eq__(self, other): - if not isinstance(other, _AnnotatedAlias): - return NotImplemented - return (self.__origin__ == other.__origin__ - and self.__metadata__ == other.__metadata__) - - def __hash__(self): - return hash((self.__origin__, self.__metadata__)) - - def __getattr__(self, attr): - if attr in {'__name__', '__qualname__'}: - return 'Annotated' - return super().__getattr__(attr) - - def __mro_entries__(self, bases): - return (self.__origin__,) - - -@_TypedCacheSpecialForm -@_tp_cache(typed=True) -def Annotated(self, *params): - """Add context-specific metadata to a type. - - Example: Annotated[int, runtime_check.Unsigned] indicates to the - hypothetical runtime_check module that this type is an unsigned int. - Every other consumer of this type can ignore this metadata and treat - this type as int. - - The first argument to Annotated must be a valid type. - - Details: - - - It's an error to call `Annotated` with less than two arguments. - - Access the metadata via the ``__metadata__`` attribute:: - - assert Annotated[int, '$'].__metadata__ == ('$',) - - - Nested Annotated types are flattened:: - - assert Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] - - - Instantiating an annotated type is equivalent to instantiating the - underlying type:: - - assert Annotated[C, Ann1](5) == C(5) - - - Annotated can be used as a generic type alias:: - - type Optimized[T] = Annotated[T, runtime.Optimize()] - # type checker will treat Optimized[int] - # as equivalent to Annotated[int, runtime.Optimize()] - - type OptimizedList[T] = Annotated[list[T], runtime.Optimize()] - # type checker will treat OptimizedList[int] - # as equivalent to Annotated[list[int], runtime.Optimize()] - - - Annotated cannot be used with an unpacked TypeVarTuple:: - - type Variadic[*Ts] = Annotated[*Ts, Ann1] # NOT valid - - This would be equivalent to:: - - Annotated[T1, T2, T3, ..., Ann1] - - where T1, T2 etc. are TypeVars, which would be invalid, because - only one type should be passed to Annotated. - """ - if len(params) < 2: - raise TypeError("Annotated[...] should be used " - "with at least two arguments (a type and an " - "annotation).") - if _is_unpacked_typevartuple(params[0]): - raise TypeError("Annotated[...] should not be used with an " - "unpacked TypeVarTuple") - msg = "Annotated[t, ...]: t must be a type." - origin = _type_check(params[0], msg, allow_special_forms=True) - metadata = tuple(params[1:]) - return _AnnotatedAlias(origin, metadata) - - -def runtime_checkable(cls): - """Mark a protocol class as a runtime protocol. - - Such protocol can be used with isinstance() and issubclass(). - Raise TypeError if applied to a non-protocol class. - This allows a simple-minded structural check very similar to - one trick ponies in collections.abc such as Iterable. - - For example:: - - @runtime_checkable - class Closable(Protocol): - def close(self): ... - - assert isinstance(open('/some/file'), Closable) - - Warning: this will check only the presence of the required methods, - not their type signatures! - """ - if not issubclass(cls, Generic) or not getattr(cls, '_is_protocol', False): - raise TypeError('@runtime_checkable can be only applied to protocol classes,' - ' got %r' % cls) - cls._is_runtime_protocol = True - # PEP 544 prohibits using issubclass() - # with protocols that have non-method members. - # See gh-113320 for why we compute this attribute here, - # rather than in `_ProtocolMeta.__init__` - cls.__non_callable_proto_members__ = set() - for attr in cls.__protocol_attrs__: - try: - is_callable = callable(getattr(cls, attr, None)) - except Exception as e: - raise TypeError( - f"Failed to determine whether protocol member {attr!r} " - "is a method member" - ) from e - else: - if not is_callable: - cls.__non_callable_proto_members__.add(attr) - return cls - - -def cast(typ, val): - """Cast a value to a type. - - This returns the value unchanged. To the type checker this - signals that the return value has the designated type, but at - runtime we intentionally don't check anything (we want this - to be as fast as possible). - """ - return val - - -def assert_type(val, typ, /): - """Ask a static type checker to confirm that the value is of the given type. - - At runtime this does nothing: it returns the first argument unchanged with no - checks or side effects, no matter the actual type of the argument. - - When a static type checker encounters a call to assert_type(), it - emits an error if the value is not of the specified type:: - - def greet(name: str) -> None: - assert_type(name, str) # OK - assert_type(name, int) # type checker error - """ - return val - - -_allowed_types = (types.FunctionType, types.BuiltinFunctionType, - types.MethodType, types.ModuleType, - WrapperDescriptorType, MethodWrapperType, MethodDescriptorType) - - -def get_type_hints(obj, globalns=None, localns=None, include_extras=False): - """Return type hints for an object. - - This is often the same as obj.__annotations__, but it handles - forward references encoded as string literals and recursively replaces all - 'Annotated[T, ...]' with 'T' (unless 'include_extras=True'). - - The argument may be a module, class, method, or function. The annotations - are returned as a dictionary. For classes, annotations include also - inherited members. - - TypeError is raised if the argument is not of a type that can contain - annotations, and an empty dictionary is returned if no annotations are - present. - - BEWARE -- the behavior of globalns and localns is counterintuitive - (unless you are familiar with how eval() and exec() work). The - search order is locals first, then globals. - - - If no dict arguments are passed, an attempt is made to use the - globals from obj (or the respective module's globals for classes), - and these are also used as the locals. If the object does not appear - to have globals, an empty dictionary is used. For classes, the search - order is globals first then locals. - - - If one dict argument is passed, it is used for both globals and - locals. - - - If two dict arguments are passed, they specify globals and - locals, respectively. - """ - if getattr(obj, '__no_type_check__', None): - return {} - # Classes require a special treatment. - if isinstance(obj, type): - hints = {} - for base in reversed(obj.__mro__): - if globalns is None: - base_globals = getattr(sys.modules.get(base.__module__, None), '__dict__', {}) - else: - base_globals = globalns - ann = base.__dict__.get('__annotations__', {}) - if isinstance(ann, types.GetSetDescriptorType): - ann = {} - base_locals = dict(vars(base)) if localns is None else localns - if localns is None and globalns is None: - # This is surprising, but required. Before Python 3.10, - # get_type_hints only evaluated the globalns of - # a class. To maintain backwards compatibility, we reverse - # the globalns and localns order so that eval() looks into - # *base_globals* first rather than *base_locals*. - # This only affects ForwardRefs. - base_globals, base_locals = base_locals, base_globals - for name, value in ann.items(): - if value is None: - value = type(None) - if isinstance(value, str): - value = ForwardRef(value, is_argument=False, is_class=True) - value = _eval_type(value, base_globals, base_locals, base.__type_params__) - hints[name] = value - return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()} - - if globalns is None: - if isinstance(obj, types.ModuleType): - globalns = obj.__dict__ - else: - nsobj = obj - # Find globalns for the unwrapped object. - while hasattr(nsobj, '__wrapped__'): - nsobj = nsobj.__wrapped__ - globalns = getattr(nsobj, '__globals__', {}) - if localns is None: - localns = globalns - elif localns is None: - localns = globalns - hints = getattr(obj, '__annotations__', None) - if hints is None: - # Return empty annotations for something that _could_ have them. - if isinstance(obj, _allowed_types): - return {} - else: - raise TypeError('{!r} is not a module, class, method, ' - 'or function.'.format(obj)) - hints = dict(hints) - type_params = getattr(obj, "__type_params__", ()) - for name, value in hints.items(): - if value is None: - value = type(None) - if isinstance(value, str): - # class-level forward refs were handled above, this must be either - # a module-level annotation or a function argument annotation - value = ForwardRef( - value, - is_argument=not isinstance(obj, types.ModuleType), - is_class=False, - ) - hints[name] = _eval_type(value, globalns, localns, type_params) - return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()} - - -def _strip_annotations(t): - """Strip the annotations from a given type.""" - if isinstance(t, _AnnotatedAlias): - return _strip_annotations(t.__origin__) - if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired, ReadOnly): - return _strip_annotations(t.__args__[0]) - if isinstance(t, _GenericAlias): - stripped_args = tuple(_strip_annotations(a) for a in t.__args__) - if stripped_args == t.__args__: - return t - return t.copy_with(stripped_args) - if isinstance(t, GenericAlias): - stripped_args = tuple(_strip_annotations(a) for a in t.__args__) - if stripped_args == t.__args__: - return t - return GenericAlias(t.__origin__, stripped_args) - if isinstance(t, types.UnionType): - stripped_args = tuple(_strip_annotations(a) for a in t.__args__) - if stripped_args == t.__args__: - return t - return functools.reduce(operator.or_, stripped_args) - - return t - - -def get_origin(tp): - """Get the unsubscripted version of a type. - - This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar, - Annotated, and others. Return None for unsupported types. - - Examples:: - - >>> P = ParamSpec('P') - >>> assert get_origin(Literal[42]) is Literal - >>> assert get_origin(int) is None - >>> assert get_origin(ClassVar[int]) is ClassVar - >>> assert get_origin(Generic) is Generic - >>> assert get_origin(Generic[T]) is Generic - >>> assert get_origin(Union[T, int]) is Union - >>> assert get_origin(List[Tuple[T, T]][int]) is list - >>> assert get_origin(P.args) is P - """ - if isinstance(tp, _AnnotatedAlias): - return Annotated - if isinstance(tp, (_BaseGenericAlias, GenericAlias, - ParamSpecArgs, ParamSpecKwargs)): - return tp.__origin__ - if tp is Generic: - return Generic - if isinstance(tp, types.UnionType): - return types.UnionType - return None - - -def get_args(tp): - """Get type arguments with all substitutions performed. - - For unions, basic simplifications used by Union constructor are performed. - - Examples:: - - >>> T = TypeVar('T') - >>> assert get_args(Dict[str, int]) == (str, int) - >>> assert get_args(int) == () - >>> assert get_args(Union[int, Union[T, int], str][int]) == (int, str) - >>> assert get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) - >>> assert get_args(Callable[[], T][int]) == ([], int) - """ - if isinstance(tp, _AnnotatedAlias): - return (tp.__origin__,) + tp.__metadata__ - if isinstance(tp, (_GenericAlias, GenericAlias)): - res = tp.__args__ - if _should_unflatten_callable_args(tp, res): - res = (list(res[:-1]), res[-1]) - return res - if isinstance(tp, types.UnionType): - return tp.__args__ - return () - - -def is_typeddict(tp): - """Check if an annotation is a TypedDict class. - - For example:: - - >>> from typing import TypedDict - >>> class Film(TypedDict): - ... title: str - ... year: int - ... - >>> is_typeddict(Film) - True - >>> is_typeddict(dict) - False - """ - return isinstance(tp, _TypedDictMeta) - - -_ASSERT_NEVER_REPR_MAX_LENGTH = 100 - - -def assert_never(arg: Never, /) -> Never: - """Statically assert that a line of code is unreachable. - - Example:: - - def int_or_str(arg: int | str) -> None: - match arg: - case int(): - print("It's an int") - case str(): - print("It's a str") - case _: - assert_never(arg) - - If a type checker finds that a call to assert_never() is - reachable, it will emit an error. - - At runtime, this throws an exception when called. - """ - value = repr(arg) - if len(value) > _ASSERT_NEVER_REPR_MAX_LENGTH: - value = value[:_ASSERT_NEVER_REPR_MAX_LENGTH] + '...' - raise AssertionError(f"Expected code to be unreachable, but got: {value}") - - -def no_type_check(arg): - """Decorator to indicate that annotations are not type hints. - - The argument must be a class or function; if it is a class, it - applies recursively to all methods and classes defined in that class - (but not to methods defined in its superclasses or subclasses). - - This mutates the function(s) or class(es) in place. - """ - if isinstance(arg, type): - for key in dir(arg): - obj = getattr(arg, key) - if ( - not hasattr(obj, '__qualname__') - or obj.__qualname__ != f'{arg.__qualname__}.{obj.__name__}' - or getattr(obj, '__module__', None) != arg.__module__ - ): - # We only modify objects that are defined in this type directly. - # If classes / methods are nested in multiple layers, - # we will modify them when processing their direct holders. - continue - # Instance, class, and static methods: - if isinstance(obj, types.FunctionType): - obj.__no_type_check__ = True - if isinstance(obj, types.MethodType): - obj.__func__.__no_type_check__ = True - # Nested types: - if isinstance(obj, type): - no_type_check(obj) - try: - arg.__no_type_check__ = True - except TypeError: # built-in classes - pass - return arg - - -def no_type_check_decorator(decorator): - """Decorator to give another decorator the @no_type_check effect. - - This wraps the decorator with something that wraps the decorated - function in @no_type_check. - """ - import warnings - warnings._deprecated("typing.no_type_check_decorator", remove=(3, 15)) - @functools.wraps(decorator) - def wrapped_decorator(*args, **kwds): - func = decorator(*args, **kwds) - func = no_type_check(func) - return func - - return wrapped_decorator - - -def _overload_dummy(*args, **kwds): - """Helper for @overload to raise when called.""" - raise NotImplementedError( - "You should not call an overloaded function. " - "A series of @overload-decorated functions " - "outside a stub module should always be followed " - "by an implementation that is not @overload-ed.") - - -# {module: {qualname: {firstlineno: func}}} -_overload_registry = defaultdict(functools.partial(defaultdict, dict)) - - -def overload(func): - """Decorator for overloaded functions/methods. - - In a stub file, place two or more stub definitions for the same - function in a row, each decorated with @overload. - - For example:: - - @overload - def utf8(value: None) -> None: ... - @overload - def utf8(value: bytes) -> bytes: ... - @overload - def utf8(value: str) -> bytes: ... - - In a non-stub file (i.e. a regular .py file), do the same but - follow it with an implementation. The implementation should *not* - be decorated with @overload:: - - @overload - def utf8(value: None) -> None: ... - @overload - def utf8(value: bytes) -> bytes: ... - @overload - def utf8(value: str) -> bytes: ... - def utf8(value): - ... # implementation goes here - - The overloads for a function can be retrieved at runtime using the - get_overloads() function. - """ - # classmethod and staticmethod - f = getattr(func, "__func__", func) - try: - _overload_registry[f.__module__][f.__qualname__][f.__code__.co_firstlineno] = func - except AttributeError: - # Not a normal function; ignore. - pass - return _overload_dummy - - -def get_overloads(func): - """Return all defined overloads for *func* as a sequence.""" - # classmethod and staticmethod - f = getattr(func, "__func__", func) - if f.__module__ not in _overload_registry: - return [] - mod_dict = _overload_registry[f.__module__] - if f.__qualname__ not in mod_dict: - return [] - return list(mod_dict[f.__qualname__].values()) - - -def clear_overloads(): - """Clear all overloads in the registry.""" - _overload_registry.clear() - - -def final(f): - """Decorator to indicate final methods and final classes. - - Use this decorator to indicate to type checkers that the decorated - method cannot be overridden, and decorated class cannot be subclassed. - - For example:: - - class Base: - @final - def done(self) -> None: - ... - class Sub(Base): - def done(self) -> None: # Error reported by type checker - ... - - @final - class Leaf: - ... - class Other(Leaf): # Error reported by type checker - ... - - There is no runtime checking of these properties. The decorator - attempts to set the ``__final__`` attribute to ``True`` on the decorated - object to allow runtime introspection. - """ - try: - f.__final__ = True - except (AttributeError, TypeError): - # Skip the attribute silently if it is not writable. - # AttributeError happens if the object has __slots__ or a - # read-only property, TypeError if it's a builtin class. - pass - return f - - -# Some unconstrained type variables. These were initially used by the container types. -# They were never meant for export and are now unused, but we keep them around to -# avoid breaking compatibility with users who import them. -T = TypeVar('T') # Any type. -KT = TypeVar('KT') # Key type. -VT = TypeVar('VT') # Value type. -T_co = TypeVar('T_co', covariant=True) # Any type covariant containers. -V_co = TypeVar('V_co', covariant=True) # Any type covariant containers. -VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers. -T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant. -# Internal type variable used for Type[]. -CT_co = TypeVar('CT_co', covariant=True, bound=type) - - -# A useful type variable with constraints. This represents string types. -# (This one *is* for export!) -AnyStr = TypeVar('AnyStr', bytes, str) - - -# Various ABCs mimicking those in collections.abc. -_alias = _SpecialGenericAlias - -Hashable = _alias(collections.abc.Hashable, 0) # Not generic. -Awaitable = _alias(collections.abc.Awaitable, 1) -Coroutine = _alias(collections.abc.Coroutine, 3) -AsyncIterable = _alias(collections.abc.AsyncIterable, 1) -AsyncIterator = _alias(collections.abc.AsyncIterator, 1) -Iterable = _alias(collections.abc.Iterable, 1) -Iterator = _alias(collections.abc.Iterator, 1) -Reversible = _alias(collections.abc.Reversible, 1) -Sized = _alias(collections.abc.Sized, 0) # Not generic. -Container = _alias(collections.abc.Container, 1) -Collection = _alias(collections.abc.Collection, 1) -Callable = _CallableType(collections.abc.Callable, 2) -Callable.__doc__ = \ - """Deprecated alias to collections.abc.Callable. - - Callable[[int], str] signifies a function that takes a single - parameter of type int and returns a str. - - The subscription syntax must always be used with exactly two - values: the argument list and the return type. - The argument list must be a list of types, a ParamSpec, - Concatenate or ellipsis. The return type must be a single type. - - There is no syntax to indicate optional or keyword arguments; - such function types are rarely used as callback types. - """ -AbstractSet = _alias(collections.abc.Set, 1, name='AbstractSet') -MutableSet = _alias(collections.abc.MutableSet, 1) -# NOTE: Mapping is only covariant in the value type. -Mapping = _alias(collections.abc.Mapping, 2) -MutableMapping = _alias(collections.abc.MutableMapping, 2) -Sequence = _alias(collections.abc.Sequence, 1) -MutableSequence = _alias(collections.abc.MutableSequence, 1) -ByteString = _DeprecatedGenericAlias( - collections.abc.ByteString, 0, removal_version=(3, 17) # Not generic. -) -# Tuple accepts variable number of parameters. -Tuple = _TupleType(tuple, -1, inst=False, name='Tuple') -Tuple.__doc__ = \ - """Deprecated alias to builtins.tuple. - - Tuple[X, Y] is the cross-product type of X and Y. - - Example: Tuple[T1, T2] is a tuple of two elements corresponding - to type variables T1 and T2. Tuple[int, float, str] is a tuple - of an int, a float and a string. - - To specify a variable-length tuple of homogeneous type, use Tuple[T, ...]. - """ -List = _alias(list, 1, inst=False, name='List') -Deque = _alias(collections.deque, 1, name='Deque') -Set = _alias(set, 1, inst=False, name='Set') -FrozenSet = _alias(frozenset, 1, inst=False, name='FrozenSet') -MappingView = _alias(collections.abc.MappingView, 1) -KeysView = _alias(collections.abc.KeysView, 1) -ItemsView = _alias(collections.abc.ItemsView, 2) -ValuesView = _alias(collections.abc.ValuesView, 1) -Dict = _alias(dict, 2, inst=False, name='Dict') -DefaultDict = _alias(collections.defaultdict, 2, name='DefaultDict') -OrderedDict = _alias(collections.OrderedDict, 2) -Counter = _alias(collections.Counter, 1) -ChainMap = _alias(collections.ChainMap, 2) -Generator = _alias(collections.abc.Generator, 3, defaults=(types.NoneType, types.NoneType)) -AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2, defaults=(types.NoneType,)) -Type = _alias(type, 1, inst=False, name='Type') -Type.__doc__ = \ - """Deprecated alias to builtins.type. - - builtins.type or typing.Type can be used to annotate class objects. - For example, suppose we have the following classes:: - - class User: ... # Abstract base for User classes - class BasicUser(User): ... - class ProUser(User): ... - class TeamUser(User): ... - - And a function that takes a class argument that's a subclass of - User and returns an instance of the corresponding class:: - - def new_user[U](user_class: Type[U]) -> U: - user = user_class() - # (Here we could write the user object to a database) - return user - - joe = new_user(BasicUser) - - At this point the type checker knows that joe has type BasicUser. - """ - - -@runtime_checkable -class SupportsInt(Protocol): - """An ABC with one abstract method __int__.""" - - __slots__ = () - - @abstractmethod - def __int__(self) -> int: - pass - - -@runtime_checkable -class SupportsFloat(Protocol): - """An ABC with one abstract method __float__.""" - - __slots__ = () - - @abstractmethod - def __float__(self) -> float: - pass - - -@runtime_checkable -class SupportsComplex(Protocol): - """An ABC with one abstract method __complex__.""" - - __slots__ = () - - @abstractmethod - def __complex__(self) -> complex: - pass - - -@runtime_checkable -class SupportsBytes(Protocol): - """An ABC with one abstract method __bytes__.""" - - __slots__ = () - - @abstractmethod - def __bytes__(self) -> bytes: - pass - - -@runtime_checkable -class SupportsIndex(Protocol): - """An ABC with one abstract method __index__.""" - - __slots__ = () - - @abstractmethod - def __index__(self) -> int: - pass - - -@runtime_checkable -class SupportsAbs[T](Protocol): - """An ABC with one abstract method __abs__ that is covariant in its return type.""" - - __slots__ = () - - @abstractmethod - def __abs__(self) -> T: - pass - - -@runtime_checkable -class SupportsRound[T](Protocol): - """An ABC with one abstract method __round__ that is covariant in its return type.""" - - __slots__ = () - - @abstractmethod - def __round__(self, ndigits: int = 0) -> T: - pass - - -def _make_nmtuple(name, types, module, defaults = ()): - fields = [n for n, t in types] - types = {n: _type_check(t, f"field {n} annotation must be a type") - for n, t in types} - nm_tpl = collections.namedtuple(name, fields, - defaults=defaults, module=module) - nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = types - return nm_tpl - - -# attributes prohibited to set in NamedTuple class syntax -_prohibited = frozenset({'__new__', '__init__', '__slots__', '__getnewargs__', - '_fields', '_field_defaults', - '_make', '_replace', '_asdict', '_source'}) - -_special = frozenset({'__module__', '__name__', '__annotations__'}) - - -class NamedTupleMeta(type): - def __new__(cls, typename, bases, ns): - assert _NamedTuple in bases - for base in bases: - if base is not _NamedTuple and base is not Generic: - raise TypeError( - 'can only inherit from a NamedTuple type and Generic') - bases = tuple(tuple if base is _NamedTuple else base for base in bases) - types = ns.get('__annotations__', {}) - default_names = [] - for field_name in types: - if field_name in ns: - default_names.append(field_name) - elif default_names: - raise TypeError(f"Non-default namedtuple field {field_name} " - f"cannot follow default field" - f"{'s' if len(default_names) > 1 else ''} " - f"{', '.join(default_names)}") - nm_tpl = _make_nmtuple(typename, types.items(), - defaults=[ns[n] for n in default_names], - module=ns['__module__']) - nm_tpl.__bases__ = bases - if Generic in bases: - class_getitem = _generic_class_getitem - nm_tpl.__class_getitem__ = classmethod(class_getitem) - # update from user namespace without overriding special namedtuple attributes - for key, val in ns.items(): - if key in _prohibited: - raise AttributeError("Cannot overwrite NamedTuple attribute " + key) - elif key not in _special: - if key not in nm_tpl._fields: - setattr(nm_tpl, key, val) - try: - set_name = type(val).__set_name__ - except AttributeError: - pass - else: - try: - set_name(val, nm_tpl, key) - except BaseException as e: - e.add_note( - f"Error calling __set_name__ on {type(val).__name__!r} " - f"instance {key!r} in {typename!r}" - ) - raise - - if Generic in bases: - nm_tpl.__init_subclass__() - return nm_tpl - - -def NamedTuple(typename, fields=_sentinel, /, **kwargs): - """Typed version of namedtuple. - - Usage:: - - class Employee(NamedTuple): - name: str - id: int - - This is equivalent to:: - - Employee = collections.namedtuple('Employee', ['name', 'id']) - - The resulting class has an extra __annotations__ attribute, giving a - dict that maps field names to types. (The field names are also in - the _fields attribute, which is part of the namedtuple API.) - An alternative equivalent functional syntax is also accepted:: - - Employee = NamedTuple('Employee', [('name', str), ('id', int)]) - """ - if fields is _sentinel: - if kwargs: - deprecated_thing = "Creating NamedTuple classes using keyword arguments" - deprecation_msg = ( - "{name} is deprecated and will be disallowed in Python {remove}. " - "Use the class-based or functional syntax instead." - ) - else: - deprecated_thing = "Failing to pass a value for the 'fields' parameter" - example = f"`{typename} = NamedTuple({typename!r}, [])`" - deprecation_msg = ( - "{name} is deprecated and will be disallowed in Python {remove}. " - "To create a NamedTuple class with 0 fields " - "using the functional syntax, " - "pass an empty list, e.g. " - ) + example + "." - elif fields is None: - if kwargs: - raise TypeError( - "Cannot pass `None` as the 'fields' parameter " - "and also specify fields using keyword arguments" - ) - else: - deprecated_thing = "Passing `None` as the 'fields' parameter" - example = f"`{typename} = NamedTuple({typename!r}, [])`" - deprecation_msg = ( - "{name} is deprecated and will be disallowed in Python {remove}. " - "To create a NamedTuple class with 0 fields " - "using the functional syntax, " - "pass an empty list, e.g. " - ) + example + "." - elif kwargs: - raise TypeError("Either list of fields or keywords" - " can be provided to NamedTuple, not both") - if fields is _sentinel or fields is None: - import warnings - warnings._deprecated(deprecated_thing, message=deprecation_msg, remove=(3, 15)) - fields = kwargs.items() - nt = _make_nmtuple(typename, fields, module=_caller()) - nt.__orig_bases__ = (NamedTuple,) - return nt - -_NamedTuple = type.__new__(NamedTupleMeta, 'NamedTuple', (), {}) - -def _namedtuple_mro_entries(bases): - assert NamedTuple in bases - return (_NamedTuple,) - -NamedTuple.__mro_entries__ = _namedtuple_mro_entries - - -def _get_typeddict_qualifiers(annotation_type): - while True: - annotation_origin = get_origin(annotation_type) - if annotation_origin is Annotated: - annotation_args = get_args(annotation_type) - if annotation_args: - annotation_type = annotation_args[0] - else: - break - elif annotation_origin is Required: - yield Required - (annotation_type,) = get_args(annotation_type) - elif annotation_origin is NotRequired: - yield NotRequired - (annotation_type,) = get_args(annotation_type) - elif annotation_origin is ReadOnly: - yield ReadOnly - (annotation_type,) = get_args(annotation_type) - else: - break - - -class _TypedDictMeta(type): - def __new__(cls, name, bases, ns, total=True): - """Create a new typed dict class object. - - This method is called when TypedDict is subclassed, - or when TypedDict is instantiated. This way - TypedDict supports all three syntax forms described in its docstring. - Subclasses and instances of TypedDict return actual dictionaries. - """ - for base in bases: - if type(base) is not _TypedDictMeta and base is not Generic: - raise TypeError('cannot inherit from both a TypedDict type ' - 'and a non-TypedDict base class') - - if any(issubclass(b, Generic) for b in bases): - generic_base = (Generic,) - else: - generic_base = () - - tp_dict = type.__new__(_TypedDictMeta, name, (*generic_base, dict), ns) - - if not hasattr(tp_dict, '__orig_bases__'): - tp_dict.__orig_bases__ = bases - - annotations = {} - own_annotations = ns.get('__annotations__', {}) - msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" - own_annotations = { - n: _type_check(tp, msg, module=tp_dict.__module__) - for n, tp in own_annotations.items() - } - required_keys = set() - optional_keys = set() - readonly_keys = set() - mutable_keys = set() - - for base in bases: - annotations.update(base.__dict__.get('__annotations__', {})) - - base_required = base.__dict__.get('__required_keys__', set()) - required_keys |= base_required - optional_keys -= base_required - - base_optional = base.__dict__.get('__optional_keys__', set()) - required_keys -= base_optional - optional_keys |= base_optional - - readonly_keys.update(base.__dict__.get('__readonly_keys__', ())) - mutable_keys.update(base.__dict__.get('__mutable_keys__', ())) - - annotations.update(own_annotations) - for annotation_key, annotation_type in own_annotations.items(): - qualifiers = set(_get_typeddict_qualifiers(annotation_type)) - if Required in qualifiers: - is_required = True - elif NotRequired in qualifiers: - is_required = False - else: - is_required = total - - if is_required: - required_keys.add(annotation_key) - optional_keys.discard(annotation_key) - else: - optional_keys.add(annotation_key) - required_keys.discard(annotation_key) - - if ReadOnly in qualifiers: - if annotation_key in mutable_keys: - raise TypeError( - f"Cannot override mutable key {annotation_key!r}" - " with read-only key" - ) - readonly_keys.add(annotation_key) - else: - mutable_keys.add(annotation_key) - readonly_keys.discard(annotation_key) - - assert required_keys.isdisjoint(optional_keys), ( - f"Required keys overlap with optional keys in {name}:" - f" {required_keys=}, {optional_keys=}" - ) - tp_dict.__annotations__ = annotations - tp_dict.__required_keys__ = frozenset(required_keys) - tp_dict.__optional_keys__ = frozenset(optional_keys) - tp_dict.__readonly_keys__ = frozenset(readonly_keys) - tp_dict.__mutable_keys__ = frozenset(mutable_keys) - tp_dict.__total__ = total - return tp_dict - - __call__ = dict # static method - - def __subclasscheck__(cls, other): - # Typed dicts are only for static structural subtyping. - raise TypeError('TypedDict does not support instance and class checks') - - __instancecheck__ = __subclasscheck__ - - -def TypedDict(typename, fields=_sentinel, /, *, total=True): - """A simple typed namespace. At runtime it is equivalent to a plain dict. - - TypedDict creates a dictionary type such that a type checker will expect all - instances to have a certain set of keys, where each key is - associated with a value of a consistent type. This expectation - is not checked at runtime. - - Usage:: - - >>> class Point2D(TypedDict): - ... x: int - ... y: int - ... label: str - ... - >>> a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK - >>> b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check - >>> Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') - True - - The type info can be accessed via the Point2D.__annotations__ dict, and - the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets. - TypedDict supports an additional equivalent form:: - - Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) - - By default, all keys must be present in a TypedDict. It is possible - to override this by specifying totality:: - - class Point2D(TypedDict, total=False): - x: int - y: int - - This means that a Point2D TypedDict can have any of the keys omitted. A type - checker is only expected to support a literal False or True as the value of - the total argument. True is the default, and makes all items defined in the - class body be required. - - The Required and NotRequired special forms can also be used to mark - individual keys as being required or not required:: - - class Point2D(TypedDict): - x: int # the "x" key must always be present (Required is the default) - y: NotRequired[int] # the "y" key can be omitted - - See PEP 655 for more details on Required and NotRequired. - - The ReadOnly special form can be used - to mark individual keys as immutable for type checkers:: - - class DatabaseUser(TypedDict): - id: ReadOnly[int] # the "id" key must not be modified - username: str # the "username" key can be changed - - """ - if fields is _sentinel or fields is None: - import warnings - - if fields is _sentinel: - deprecated_thing = "Failing to pass a value for the 'fields' parameter" - else: - deprecated_thing = "Passing `None` as the 'fields' parameter" - - example = f"`{typename} = TypedDict({typename!r}, {{{{}}}})`" - deprecation_msg = ( - "{name} is deprecated and will be disallowed in Python {remove}. " - "To create a TypedDict class with 0 fields " - "using the functional syntax, " - "pass an empty dictionary, e.g. " - ) + example + "." - warnings._deprecated(deprecated_thing, message=deprecation_msg, remove=(3, 15)) - fields = {} - - ns = {'__annotations__': dict(fields)} - module = _caller() - if module is not None: - # Setting correct module is necessary to make typed dict classes pickleable. - ns['__module__'] = module - - td = _TypedDictMeta(typename, (), ns, total=total) - td.__orig_bases__ = (TypedDict,) - return td - -_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {}) -TypedDict.__mro_entries__ = lambda bases: (_TypedDict,) - - -@_SpecialForm -def Required(self, parameters): - """Special typing construct to mark a TypedDict key as required. - - This is mainly useful for total=False TypedDicts. - - For example:: - - class Movie(TypedDict, total=False): - title: Required[str] - year: int - - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - - There is no runtime checking that a required key is actually provided - when instantiating a related TypedDict. - """ - item = _type_check(parameters, f'{self._name} accepts only a single type.') - return _GenericAlias(self, (item,)) - - -@_SpecialForm -def NotRequired(self, parameters): - """Special typing construct to mark a TypedDict key as potentially missing. - - For example:: - - class Movie(TypedDict): - title: str - year: NotRequired[int] - - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - """ - item = _type_check(parameters, f'{self._name} accepts only a single type.') - return _GenericAlias(self, (item,)) - - -@_SpecialForm -def ReadOnly(self, parameters): - """A special typing construct to mark an item of a TypedDict as read-only. - - For example:: - - class Movie(TypedDict): - title: ReadOnly[str] - year: int - - def mutate_movie(m: Movie) -> None: - m["year"] = 1992 # allowed - m["title"] = "The Matrix" # typechecker error - - There is no runtime checking for this property. - """ - item = _type_check(parameters, f'{self._name} accepts only a single type.') - return _GenericAlias(self, (item,)) - - -class NewType: - """NewType creates simple unique types with almost zero runtime overhead. - - NewType(name, tp) is considered a subtype of tp - by static type checkers. At runtime, NewType(name, tp) returns - a dummy callable that simply returns its argument. - - Usage:: - - UserId = NewType('UserId', int) - - def name_by_id(user_id: UserId) -> str: - ... - - UserId('user') # Fails type check - - name_by_id(42) # Fails type check - name_by_id(UserId(42)) # OK - - num = UserId(5) + 1 # type: int - """ - - __call__ = _idfunc - - def __init__(self, name, tp): - self.__qualname__ = name - if '.' in name: - name = name.rpartition('.')[-1] - self.__name__ = name - self.__supertype__ = tp - def_mod = _caller() - if def_mod != 'typing': - self.__module__ = def_mod - - def __mro_entries__(self, bases): - # We defined __mro_entries__ to get a better error message - # if a user attempts to subclass a NewType instance. bpo-46170 - superclass_name = self.__name__ - - class Dummy: - def __init_subclass__(cls): - subclass_name = cls.__name__ - raise TypeError( - f"Cannot subclass an instance of NewType. Perhaps you were looking for: " - f"`{subclass_name} = NewType({subclass_name!r}, {superclass_name})`" - ) - - return (Dummy,) - - def __repr__(self): - return f'{self.__module__}.{self.__qualname__}' - - def __reduce__(self): - return self.__qualname__ - - def __or__(self, other): - return Union[self, other] - - def __ror__(self, other): - return Union[other, self] - - -# Python-version-specific alias (Python 2: unicode; Python 3: str) -Text = str - - -# Constant that's True when type checking, but False here. -TYPE_CHECKING = False - - -class IO(Generic[AnyStr]): - """Generic base class for TextIO and BinaryIO. - - This is an abstract, generic version of the return of open(). - - NOTE: This does not distinguish between the different possible - classes (text vs. binary, read vs. write vs. read/write, - append-only, unbuffered). The TextIO and BinaryIO subclasses - below capture the distinctions between text vs. binary, which is - pervasive in the interface; however we currently do not offer a - way to track the other distinctions in the type system. - """ - - __slots__ = () - - @property - @abstractmethod - def mode(self) -> str: - pass - - @property - @abstractmethod - def name(self) -> str: - pass - - @abstractmethod - def close(self) -> None: - pass - - @property - @abstractmethod - def closed(self) -> bool: - pass - - @abstractmethod - def fileno(self) -> int: - pass - - @abstractmethod - def flush(self) -> None: - pass - - @abstractmethod - def isatty(self) -> bool: - pass - - @abstractmethod - def read(self, n: int = -1) -> AnyStr: - pass - - @abstractmethod - def readable(self) -> bool: - pass - - @abstractmethod - def readline(self, limit: int = -1) -> AnyStr: - pass - - @abstractmethod - def readlines(self, hint: int = -1) -> List[AnyStr]: - pass - - @abstractmethod - def seek(self, offset: int, whence: int = 0) -> int: - pass - - @abstractmethod - def seekable(self) -> bool: - pass - - @abstractmethod - def tell(self) -> int: - pass - - @abstractmethod - def truncate(self, size: int = None) -> int: - pass - - @abstractmethod - def writable(self) -> bool: - pass - - @abstractmethod - def write(self, s: AnyStr) -> int: - pass - - @abstractmethod - def writelines(self, lines: List[AnyStr]) -> None: - pass - - @abstractmethod - def __enter__(self) -> 'IO[AnyStr]': - pass - - @abstractmethod - def __exit__(self, type, value, traceback) -> None: - pass - - -class BinaryIO(IO[bytes]): - """Typed version of the return of open() in binary mode.""" - - __slots__ = () - - @abstractmethod - def write(self, s: Union[bytes, bytearray]) -> int: - pass - - @abstractmethod - def __enter__(self) -> 'BinaryIO': - pass - - -class TextIO(IO[str]): - """Typed version of the return of open() in text mode.""" - - __slots__ = () - - @property - @abstractmethod - def buffer(self) -> BinaryIO: - pass - - @property - @abstractmethod - def encoding(self) -> str: - pass - - @property - @abstractmethod - def errors(self) -> Optional[str]: - pass - - @property - @abstractmethod - def line_buffering(self) -> bool: - pass - - @property - @abstractmethod - def newlines(self) -> Any: - pass - - @abstractmethod - def __enter__(self) -> 'TextIO': - pass - - -def reveal_type[T](obj: T, /) -> T: - """Ask a static type checker to reveal the inferred type of an expression. - - When a static type checker encounters a call to ``reveal_type()``, - it will emit the inferred type of the argument:: - - x: int = 1 - reveal_type(x) - - Running a static type checker (e.g., mypy) on this example - will produce output similar to 'Revealed type is "builtins.int"'. - - At runtime, the function prints the runtime type of the - argument and returns the argument unchanged. - """ - print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr) - return obj - - -class _IdentityCallable(Protocol): - def __call__[T](self, arg: T, /) -> T: - ... - - -def dataclass_transform( - *, - eq_default: bool = True, - order_default: bool = False, - kw_only_default: bool = False, - frozen_default: bool = False, - field_specifiers: tuple[type[Any] | Callable[..., Any], ...] = (), - **kwargs: Any, -) -> _IdentityCallable: - """Decorator to mark an object as providing dataclass-like behaviour. - - The decorator can be applied to a function, class, or metaclass. - - Example usage with a decorator function:: - - @dataclass_transform() - def create_model[T](cls: type[T]) -> type[T]: - ... - return cls - - @create_model - class CustomerModel: - id: int - name: str - - On a base class:: - - @dataclass_transform() - class ModelBase: ... - - class CustomerModel(ModelBase): - id: int - name: str - - On a metaclass:: - - @dataclass_transform() - class ModelMeta(type): ... - - class ModelBase(metaclass=ModelMeta): ... - - class CustomerModel(ModelBase): - id: int - name: str - - The ``CustomerModel`` classes defined above will - be treated by type checkers similarly to classes created with - ``@dataclasses.dataclass``. - For example, type checkers will assume these classes have - ``__init__`` methods that accept ``id`` and ``name``. - - The arguments to this decorator can be used to customize this behavior: - - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be - ``True`` or ``False`` if it is omitted by the caller. - - ``order_default`` indicates whether the ``order`` parameter is - assumed to be True or False if it is omitted by the caller. - - ``kw_only_default`` indicates whether the ``kw_only`` parameter is - assumed to be True or False if it is omitted by the caller. - - ``frozen_default`` indicates whether the ``frozen`` parameter is - assumed to be True or False if it is omitted by the caller. - - ``field_specifiers`` specifies a static list of supported classes - or functions that describe fields, similar to ``dataclasses.field()``. - - Arbitrary other keyword arguments are accepted in order to allow for - possible future extensions. - - At runtime, this decorator records its arguments in the - ``__dataclass_transform__`` attribute on the decorated object. - It has no other runtime effect. - - See PEP 681 for more details. - """ - def decorator(cls_or_fn): - cls_or_fn.__dataclass_transform__ = { - "eq_default": eq_default, - "order_default": order_default, - "kw_only_default": kw_only_default, - "frozen_default": frozen_default, - "field_specifiers": field_specifiers, - "kwargs": kwargs, - } - return cls_or_fn - return decorator - - -type _Func = Callable[..., Any] - - -def override[F: _Func](method: F, /) -> F: - """Indicate that a method is intended to override a method in a base class. - - Usage:: - - class Base: - def method(self) -> None: - pass - - class Child(Base): - @override - def method(self) -> None: - super().method() - - When this decorator is applied to a method, the type checker will - validate that it overrides a method or attribute with the same name on a - base class. This helps prevent bugs that may occur when a base class is - changed without an equivalent change to a child class. - - There is no runtime checking of this property. The decorator attempts to - set the ``__override__`` attribute to ``True`` on the decorated object to - allow runtime introspection. - - See PEP 698 for details. - """ - try: - method.__override__ = True - except (AttributeError, TypeError): - # Skip the attribute silently if it is not writable. - # AttributeError happens if the object has __slots__ or a - # read-only property, TypeError if it's a builtin class. - pass - return method - - -def is_protocol(tp: type, /) -> bool: - """Return True if the given type is a Protocol. - - Example:: - - >>> from typing import Protocol, is_protocol - >>> class P(Protocol): - ... def a(self) -> str: ... - ... b: int - >>> is_protocol(P) - True - >>> is_protocol(int) - False - """ - return ( - isinstance(tp, type) - and getattr(tp, '_is_protocol', False) - and tp != Protocol - ) - - -def get_protocol_members(tp: type, /) -> frozenset[str]: - """Return the set of members defined in a Protocol. - - Example:: - - >>> from typing import Protocol, get_protocol_members - >>> class P(Protocol): - ... def a(self) -> str: ... - ... b: int - >>> get_protocol_members(P) == frozenset({'a', 'b'}) - True - - Raise a TypeError for arguments that are not Protocols. - """ - if not is_protocol(tp): - raise TypeError(f'{tp!r} is not a Protocol') - return frozenset(tp.__protocol_attrs__) - - -def __getattr__(attr): - """Improve the import time of the typing module. - - Soft-deprecated objects which are costly to create - are only created on-demand here. - """ - if attr in {"Pattern", "Match"}: - import re - obj = _alias(getattr(re, attr), 1) - elif attr in {"ContextManager", "AsyncContextManager"}: - import contextlib - obj = _alias(getattr(contextlib, f"Abstract{attr}"), 2, name=attr, defaults=(bool | None,)) - elif attr == "_collect_parameters": - import warnings - - depr_message = ( - "The private _collect_parameters function is deprecated and will be" - " removed in a future version of Python. Any use of private functions" - " is discouraged and may break in the future." - ) - warnings.warn(depr_message, category=DeprecationWarning, stacklevel=2) - obj = _collect_type_parameters - else: - raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") - globals()[attr] = obj - return obj diff --git a/Python313_13_x64_Template/Lib/unittest/__init__.py b/Python313_13_x64_Template/Lib/unittest/__init__.py deleted file mode 100644 index f1f6c911..00000000 --- a/Python313_13_x64_Template/Lib/unittest/__init__.py +++ /dev/null @@ -1,80 +0,0 @@ -""" -Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's -Smalltalk testing framework (used with permission). - -This module contains the core framework classes that form the basis of -specific test cases and suites (TestCase, TestSuite etc.), and also a -text-based utility class for running the tests and reporting the results - (TextTestRunner). - -Simple usage: - - import unittest - - class IntegerArithmeticTestCase(unittest.TestCase): - def testAdd(self): # test method names begin with 'test' - self.assertEqual((1 + 2), 3) - self.assertEqual(0 + 1, 1) - def testMultiply(self): - self.assertEqual((0 * 10), 0) - self.assertEqual((5 * 8), 40) - - if __name__ == '__main__': - unittest.main() - -Further information is available in the bundled documentation, and from - - http://docs.python.org/library/unittest.html - -Copyright (c) 1999-2003 Steve Purcell -Copyright (c) 2003-2010 Python Software Foundation -This module is free software, and you may redistribute it and/or modify -it under the same terms as Python itself, so long as this copyright message -and disclaimer are retained in their original form. - -IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, -SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF -THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH -DAMAGE. - -THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, -AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, -SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. -""" - -__all__ = ['TestResult', 'TestCase', 'IsolatedAsyncioTestCase', 'TestSuite', - 'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main', - 'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless', - 'expectedFailure', 'TextTestResult', 'installHandler', - 'registerResult', 'removeResult', 'removeHandler', - 'addModuleCleanup', 'doModuleCleanups', 'enterModuleContext'] - -__unittest = True - -from .result import TestResult -from .case import (addModuleCleanup, TestCase, FunctionTestCase, SkipTest, skip, - skipIf, skipUnless, expectedFailure, doModuleCleanups, - enterModuleContext) -from .suite import BaseTestSuite, TestSuite -from .loader import TestLoader, defaultTestLoader -from .main import TestProgram, main -from .runner import TextTestRunner, TextTestResult -from .signals import installHandler, registerResult, removeResult, removeHandler -# IsolatedAsyncioTestCase will be imported lazily. - - -# Lazy import of IsolatedAsyncioTestCase from .async_case -# It imports asyncio, which is relatively heavy, but most tests -# do not need it. - -def __dir__(): - return globals().keys() | {'IsolatedAsyncioTestCase'} - -def __getattr__(name): - if name == 'IsolatedAsyncioTestCase': - global IsolatedAsyncioTestCase - from .async_case import IsolatedAsyncioTestCase - return IsolatedAsyncioTestCase - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/Python313_13_x64_Template/Lib/unittest/async_case.py b/Python313_13_x64_Template/Lib/unittest/async_case.py deleted file mode 100644 index e761ba7e..00000000 --- a/Python313_13_x64_Template/Lib/unittest/async_case.py +++ /dev/null @@ -1,146 +0,0 @@ -import asyncio -import contextvars -import inspect -import warnings - -from .case import TestCase - -__unittest = True - -class IsolatedAsyncioTestCase(TestCase): - # Names intentionally have a long prefix - # to reduce a chance of clashing with user-defined attributes - # from inherited test case - # - # The class doesn't call loop.run_until_complete(self.setUp()) and family - # but uses a different approach: - # 1. create a long-running task that reads self.setUp() - # awaitable from queue along with a future - # 2. await the awaitable object passing in and set the result - # into the future object - # 3. Outer code puts the awaitable and the future object into a queue - # with waiting for the future - # The trick is necessary because every run_until_complete() call - # creates a new task with embedded ContextVar context. - # To share contextvars between setUp(), test and tearDown() we need to execute - # them inside the same task. - - # Note: the test case modifies event loop policy if the policy was not instantiated - # yet, unless loop_factory=asyncio.EventLoop is set. - # asyncio.get_event_loop_policy() creates a default policy on demand but never - # returns None - # I believe this is not an issue in user level tests but python itself for testing - # should reset a policy in every test module - # by calling asyncio.set_event_loop_policy(None) in tearDownModule() - # or set loop_factory=asyncio.EventLoop - - loop_factory = None - - def __init__(self, methodName='runTest'): - super().__init__(methodName) - self._asyncioRunner = None - self._asyncioTestContext = contextvars.copy_context() - - async def asyncSetUp(self): - pass - - async def asyncTearDown(self): - pass - - def addAsyncCleanup(self, func, /, *args, **kwargs): - # A trivial trampoline to addCleanup() - # the function exists because it has a different semantics - # and signature: - # addCleanup() accepts regular functions - # but addAsyncCleanup() accepts coroutines - # - # We intentionally don't add inspect.iscoroutinefunction() check - # for func argument because there is no way - # to check for async function reliably: - # 1. It can be "async def func()" itself - # 2. Class can implement "async def __call__()" method - # 3. Regular "def func()" that returns awaitable object - self.addCleanup(*(func, *args), **kwargs) - - async def enterAsyncContext(self, cm): - """Enters the supplied asynchronous context manager. - - If successful, also adds its __aexit__ method as a cleanup - function and returns the result of the __aenter__ method. - """ - # We look up the special methods on the type to match the with - # statement. - cls = type(cm) - try: - enter = cls.__aenter__ - exit = cls.__aexit__ - except AttributeError: - raise TypeError(f"'{cls.__module__}.{cls.__qualname__}' object does " - f"not support the asynchronous context manager protocol" - ) from None - result = await enter(cm) - self.addAsyncCleanup(exit, cm, None, None, None) - return result - - def _callSetUp(self): - # Force loop to be initialized and set as the current loop - # so that setUp functions can use get_event_loop() and get the - # correct loop instance. - self._asyncioRunner.get_loop() - self._asyncioTestContext.run(self.setUp) - self._callAsync(self.asyncSetUp) - - def _callTestMethod(self, method): - if self._callMaybeAsync(method) is not None: - warnings.warn(f'It is deprecated to return a value that is not None from a ' - f'test case ({method})', DeprecationWarning, stacklevel=4) - - def _callTearDown(self): - self._callAsync(self.asyncTearDown) - self._asyncioTestContext.run(self.tearDown) - - def _callCleanup(self, function, *args, **kwargs): - self._callMaybeAsync(function, *args, **kwargs) - - def _callAsync(self, func, /, *args, **kwargs): - assert self._asyncioRunner is not None, 'asyncio runner is not initialized' - assert inspect.iscoroutinefunction(func), f'{func!r} is not an async function' - return self._asyncioRunner.run( - func(*args, **kwargs), - context=self._asyncioTestContext - ) - - def _callMaybeAsync(self, func, /, *args, **kwargs): - assert self._asyncioRunner is not None, 'asyncio runner is not initialized' - if inspect.iscoroutinefunction(func): - return self._asyncioRunner.run( - func(*args, **kwargs), - context=self._asyncioTestContext, - ) - else: - return self._asyncioTestContext.run(func, *args, **kwargs) - - def _setupAsyncioRunner(self): - assert self._asyncioRunner is None, 'asyncio runner is already initialized' - runner = asyncio.Runner(debug=True, loop_factory=self.loop_factory) - self._asyncioRunner = runner - - def _tearDownAsyncioRunner(self): - runner = self._asyncioRunner - runner.close() - - def run(self, result=None): - self._setupAsyncioRunner() - try: - return super().run(result) - finally: - self._tearDownAsyncioRunner() - - def debug(self): - self._setupAsyncioRunner() - super().debug() - self._tearDownAsyncioRunner() - - def __del__(self): - if self._asyncioRunner is not None: - self._tearDownAsyncioRunner() diff --git a/Python313_13_x64_Template/Lib/unittest/case.py b/Python313_13_x64_Template/Lib/unittest/case.py deleted file mode 100644 index 36daa61f..00000000 --- a/Python313_13_x64_Template/Lib/unittest/case.py +++ /dev/null @@ -1,1478 +0,0 @@ -"""Test case implementation""" - -import sys -import functools -import difflib -import pprint -import re -import warnings -import collections -import contextlib -import traceback -import time -import types - -from . import result -from .util import (strclass, safe_repr, _count_diff_all_purpose, - _count_diff_hashable, _common_shorten_repr) - -__unittest = True - -_subtest_msg_sentinel = object() - -DIFF_OMITTED = ('\nDiff is %s characters long. ' - 'Set self.maxDiff to None to see it.') - -class SkipTest(Exception): - """ - Raise this exception in a test to skip it. - - Usually you can use TestCase.skipTest() or one of the skipping decorators - instead of raising this directly. - """ - -class _ShouldStop(Exception): - """ - The test should stop. - """ - -class _UnexpectedSuccess(Exception): - """ - The test was supposed to fail, but it didn't! - """ - - -class _Outcome(object): - def __init__(self, result=None): - self.expecting_failure = False - self.result = result - self.result_supports_subtests = hasattr(result, "addSubTest") - self.success = True - self.expectedFailure = None - - @contextlib.contextmanager - def testPartExecutor(self, test_case, subTest=False): - old_success = self.success - self.success = True - try: - yield - except KeyboardInterrupt: - raise - except SkipTest as e: - self.success = False - _addSkip(self.result, test_case, str(e)) - except _ShouldStop: - pass - except: - exc_info = sys.exc_info() - if self.expecting_failure: - self.expectedFailure = exc_info - else: - self.success = False - if subTest: - self.result.addSubTest(test_case.test_case, test_case, exc_info) - else: - _addError(self.result, test_case, exc_info) - # explicitly break a reference cycle: - # exc_info -> frame -> exc_info - exc_info = None - else: - if subTest and self.success: - self.result.addSubTest(test_case.test_case, test_case, None) - finally: - self.success = self.success and old_success - - -def _addSkip(result, test_case, reason): - addSkip = getattr(result, 'addSkip', None) - if addSkip is not None: - addSkip(test_case, reason) - else: - warnings.warn("TestResult has no addSkip method, skips not reported", - RuntimeWarning, 2) - result.addSuccess(test_case) - -def _addError(result, test, exc_info): - if result is not None and exc_info is not None: - if issubclass(exc_info[0], test.failureException): - result.addFailure(test, exc_info) - else: - result.addError(test, exc_info) - -def _id(obj): - return obj - - -def _enter_context(cm, addcleanup): - # We look up the special methods on the type to match the with - # statement. - cls = type(cm) - try: - enter = cls.__enter__ - exit = cls.__exit__ - except AttributeError: - raise TypeError(f"'{cls.__module__}.{cls.__qualname__}' object does " - f"not support the context manager protocol") from None - result = enter(cm) - addcleanup(exit, cm, None, None, None) - return result - - -_module_cleanups = [] -def addModuleCleanup(function, /, *args, **kwargs): - """Same as addCleanup, except the cleanup items are called even if - setUpModule fails (unlike tearDownModule).""" - _module_cleanups.append((function, args, kwargs)) - -def enterModuleContext(cm): - """Same as enterContext, but module-wide.""" - return _enter_context(cm, addModuleCleanup) - - -def doModuleCleanups(): - """Execute all module cleanup functions. Normally called for you after - tearDownModule.""" - exceptions = [] - while _module_cleanups: - function, args, kwargs = _module_cleanups.pop() - try: - function(*args, **kwargs) - except Exception as exc: - exceptions.append(exc) - if exceptions: - # Swallows all but first exception. If a multi-exception handler - # gets written we should use that here instead. - raise exceptions[0] - - -def skip(reason): - """ - Unconditionally skip a test. - """ - def decorator(test_item): - if not isinstance(test_item, type): - @functools.wraps(test_item) - def skip_wrapper(*args, **kwargs): - raise SkipTest(reason) - test_item = skip_wrapper - - test_item.__unittest_skip__ = True - test_item.__unittest_skip_why__ = reason - return test_item - if isinstance(reason, types.FunctionType): - test_item = reason - reason = '' - return decorator(test_item) - return decorator - -def skipIf(condition, reason): - """ - Skip a test if the condition is true. - """ - if condition: - return skip(reason) - return _id - -def skipUnless(condition, reason): - """ - Skip a test unless the condition is true. - """ - if not condition: - return skip(reason) - return _id - -def expectedFailure(test_item): - test_item.__unittest_expecting_failure__ = True - return test_item - -def _is_subtype(expected, basetype): - if isinstance(expected, tuple): - return all(_is_subtype(e, basetype) for e in expected) - return isinstance(expected, type) and issubclass(expected, basetype) - -class _BaseTestCaseContext: - - def __init__(self, test_case): - self.test_case = test_case - - def _raiseFailure(self, standardMsg): - msg = self.test_case._formatMessage(self.msg, standardMsg) - raise self.test_case.failureException(msg) - -class _AssertRaisesBaseContext(_BaseTestCaseContext): - - def __init__(self, expected, test_case, expected_regex=None): - _BaseTestCaseContext.__init__(self, test_case) - self.expected = expected - self.test_case = test_case - if expected_regex is not None: - expected_regex = re.compile(expected_regex) - self.expected_regex = expected_regex - self.obj_name = None - self.msg = None - - def handle(self, name, args, kwargs): - """ - If args is empty, assertRaises/Warns is being used as a - context manager, so check for a 'msg' kwarg and return self. - If args is not empty, call a callable passing positional and keyword - arguments. - """ - try: - if not _is_subtype(self.expected, self._base_type): - raise TypeError('%s() arg 1 must be %s' % - (name, self._base_type_str)) - if not args: - self.msg = kwargs.pop('msg', None) - if kwargs: - raise TypeError('%r is an invalid keyword argument for ' - 'this function' % (next(iter(kwargs)),)) - return self - - callable_obj, *args = args - try: - self.obj_name = callable_obj.__name__ - except AttributeError: - self.obj_name = str(callable_obj) - with self: - callable_obj(*args, **kwargs) - finally: - # bpo-23890: manually break a reference cycle - self = None - - -class _AssertRaisesContext(_AssertRaisesBaseContext): - """A context manager used to implement TestCase.assertRaises* methods.""" - - _base_type = BaseException - _base_type_str = 'an exception type or tuple of exception types' - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - if exc_type is None: - try: - exc_name = self.expected.__name__ - except AttributeError: - exc_name = str(self.expected) - if self.obj_name: - self._raiseFailure("{} not raised by {}".format(exc_name, - self.obj_name)) - else: - self._raiseFailure("{} not raised".format(exc_name)) - else: - traceback.clear_frames(tb) - if not issubclass(exc_type, self.expected): - # let unexpected exceptions pass through - return False - # store exception, without traceback, for later retrieval - self.exception = exc_value.with_traceback(None) - if self.expected_regex is None: - return True - - expected_regex = self.expected_regex - if not expected_regex.search(str(exc_value)): - self._raiseFailure('"{}" does not match "{}"'.format( - expected_regex.pattern, str(exc_value))) - return True - - __class_getitem__ = classmethod(types.GenericAlias) - - -class _AssertWarnsContext(_AssertRaisesBaseContext): - """A context manager used to implement TestCase.assertWarns* methods.""" - - _base_type = Warning - _base_type_str = 'a warning type or tuple of warning types' - - def __enter__(self): - # The __warningregistry__'s need to be in a pristine state for tests - # to work properly. - for v in list(sys.modules.values()): - if getattr(v, '__warningregistry__', None): - v.__warningregistry__ = {} - self.warnings_manager = warnings.catch_warnings(record=True) - self.warnings = self.warnings_manager.__enter__() - warnings.simplefilter("always", self.expected) - return self - - def __exit__(self, exc_type, exc_value, tb): - self.warnings_manager.__exit__(exc_type, exc_value, tb) - if exc_type is not None: - # let unexpected exceptions pass through - return - try: - exc_name = self.expected.__name__ - except AttributeError: - exc_name = str(self.expected) - first_matching = None - for m in self.warnings: - w = m.message - if not isinstance(w, self.expected): - continue - if first_matching is None: - first_matching = w - if (self.expected_regex is not None and - not self.expected_regex.search(str(w))): - continue - # store warning for later retrieval - self.warning = w - self.filename = m.filename - self.lineno = m.lineno - return - # Now we simply try to choose a helpful failure message - if first_matching is not None: - self._raiseFailure('"{}" does not match "{}"'.format( - self.expected_regex.pattern, str(first_matching))) - if self.obj_name: - self._raiseFailure("{} not triggered by {}".format(exc_name, - self.obj_name)) - else: - self._raiseFailure("{} not triggered".format(exc_name)) - - -class _AssertNotWarnsContext(_AssertWarnsContext): - - def __exit__(self, exc_type, exc_value, tb): - self.warnings_manager.__exit__(exc_type, exc_value, tb) - if exc_type is not None: - # let unexpected exceptions pass through - return - try: - exc_name = self.expected.__name__ - except AttributeError: - exc_name = str(self.expected) - for m in self.warnings: - w = m.message - if isinstance(w, self.expected): - self._raiseFailure(f"{exc_name} triggered") - - -class _OrderedChainMap(collections.ChainMap): - def __iter__(self): - seen = set() - for mapping in self.maps: - for k in mapping: - if k not in seen: - seen.add(k) - yield k - - -class TestCase(object): - """A class whose instances are single test cases. - - By default, the test code itself should be placed in a method named - 'runTest'. - - If the fixture may be used for many test cases, create as - many test methods as are needed. When instantiating such a TestCase - subclass, specify in the constructor arguments the name of the test method - that the instance is to execute. - - Test authors should subclass TestCase for their own tests. Construction - and deconstruction of the test's environment ('fixture') can be - implemented by overriding the 'setUp' and 'tearDown' methods respectively. - - If it is necessary to override the __init__ method, the base class - __init__ method must always be called. It is important that subclasses - should not change the signature of their __init__ method, since instances - of the classes are instantiated automatically by parts of the framework - in order to be run. - - When subclassing TestCase, you can set these attributes: - * failureException: determines which exception will be raised when - the instance's assertion methods fail; test methods raising this - exception will be deemed to have 'failed' rather than 'errored'. - * longMessage: determines whether long messages (including repr of - objects used in assert methods) will be printed on failure in *addition* - to any explicit message passed. - * maxDiff: sets the maximum length of a diff in failure messages - by assert methods using difflib. It is looked up as an instance - attribute so can be configured by individual tests if required. - """ - - failureException = AssertionError - - longMessage = True - - maxDiff = 80*8 - - # If a string is longer than _diffThreshold, use normal comparison instead - # of difflib. See #11763. - _diffThreshold = 2**16 - - def __init_subclass__(cls, *args, **kwargs): - # Attribute used by TestSuite for classSetUp - cls._classSetupFailed = False - cls._class_cleanups = [] - super().__init_subclass__(*args, **kwargs) - - def __init__(self, methodName='runTest'): - """Create an instance of the class that will use the named test - method when executed. Raises a ValueError if the instance does - not have a method with the specified name. - """ - self._testMethodName = methodName - self._outcome = None - self._testMethodDoc = 'No test' - try: - testMethod = getattr(self, methodName) - except AttributeError: - if methodName != 'runTest': - # we allow instantiation with no explicit method name - # but not an *incorrect* or missing method name - raise ValueError("no such test method in %s: %s" % - (self.__class__, methodName)) - else: - self._testMethodDoc = testMethod.__doc__ - self._cleanups = [] - self._subtest = None - - # Map types to custom assertEqual functions that will compare - # instances of said type in more detail to generate a more useful - # error message. - self._type_equality_funcs = {} - self.addTypeEqualityFunc(dict, 'assertDictEqual') - self.addTypeEqualityFunc(list, 'assertListEqual') - self.addTypeEqualityFunc(tuple, 'assertTupleEqual') - self.addTypeEqualityFunc(set, 'assertSetEqual') - self.addTypeEqualityFunc(frozenset, 'assertSetEqual') - self.addTypeEqualityFunc(str, 'assertMultiLineEqual') - - def addTypeEqualityFunc(self, typeobj, function): - """Add a type specific assertEqual style function to compare a type. - - This method is for use by TestCase subclasses that need to register - their own type equality functions to provide nicer error messages. - - Args: - typeobj: The data type to call this function on when both values - are of the same type in assertEqual(). - function: The callable taking two arguments and an optional - msg= argument that raises self.failureException with a - useful error message when the two arguments are not equal. - """ - self._type_equality_funcs[typeobj] = function - - def addCleanup(self, function, /, *args, **kwargs): - """Add a function, with arguments, to be called when the test is - completed. Functions added are called on a LIFO basis and are - called after tearDown on test failure or success. - - Cleanup items are called even if setUp fails (unlike tearDown).""" - self._cleanups.append((function, args, kwargs)) - - def enterContext(self, cm): - """Enters the supplied context manager. - - If successful, also adds its __exit__ method as a cleanup - function and returns the result of the __enter__ method. - """ - return _enter_context(cm, self.addCleanup) - - @classmethod - def addClassCleanup(cls, function, /, *args, **kwargs): - """Same as addCleanup, except the cleanup items are called even if - setUpClass fails (unlike tearDownClass).""" - cls._class_cleanups.append((function, args, kwargs)) - - @classmethod - def enterClassContext(cls, cm): - """Same as enterContext, but class-wide.""" - return _enter_context(cm, cls.addClassCleanup) - - def setUp(self): - "Hook method for setting up the test fixture before exercising it." - pass - - def tearDown(self): - "Hook method for deconstructing the test fixture after testing it." - pass - - @classmethod - def setUpClass(cls): - "Hook method for setting up class fixture before running tests in the class." - - @classmethod - def tearDownClass(cls): - "Hook method for deconstructing the class fixture after running all tests in the class." - - def countTestCases(self): - return 1 - - def defaultTestResult(self): - return result.TestResult() - - def shortDescription(self): - """Returns a one-line description of the test, or None if no - description has been provided. - - The default implementation of this method returns the first line of - the specified test method's docstring. - """ - doc = self._testMethodDoc - return doc.strip().split("\n")[0].strip() if doc else None - - - def id(self): - return "%s.%s" % (strclass(self.__class__), self._testMethodName) - - def __eq__(self, other): - if type(self) is not type(other): - return NotImplemented - - return self._testMethodName == other._testMethodName - - def __hash__(self): - return hash((type(self), self._testMethodName)) - - def __str__(self): - return "%s (%s.%s)" % (self._testMethodName, strclass(self.__class__), self._testMethodName) - - def __repr__(self): - return "<%s testMethod=%s>" % \ - (strclass(self.__class__), self._testMethodName) - - @contextlib.contextmanager - def subTest(self, msg=_subtest_msg_sentinel, **params): - """Return a context manager that will return the enclosed block - of code in a subtest identified by the optional message and - keyword parameters. A failure in the subtest marks the test - case as failed but resumes execution at the end of the enclosed - block, allowing further test code to be executed. - """ - if self._outcome is None or not self._outcome.result_supports_subtests: - yield - return - parent = self._subtest - if parent is None: - params_map = _OrderedChainMap(params) - else: - params_map = parent.params.new_child(params) - self._subtest = _SubTest(self, msg, params_map) - try: - with self._outcome.testPartExecutor(self._subtest, subTest=True): - yield - if not self._outcome.success: - result = self._outcome.result - if result is not None and result.failfast: - raise _ShouldStop - elif self._outcome.expectedFailure: - # If the test is expecting a failure, we really want to - # stop now and register the expected failure. - raise _ShouldStop - finally: - self._subtest = parent - - def _addExpectedFailure(self, result, exc_info): - try: - addExpectedFailure = result.addExpectedFailure - except AttributeError: - warnings.warn("TestResult has no addExpectedFailure method, reporting as passes", - RuntimeWarning) - result.addSuccess(self) - else: - addExpectedFailure(self, exc_info) - - def _addUnexpectedSuccess(self, result): - try: - addUnexpectedSuccess = result.addUnexpectedSuccess - except AttributeError: - warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failure", - RuntimeWarning) - # We need to pass an actual exception and traceback to addFailure, - # otherwise the legacy result can choke. - try: - raise _UnexpectedSuccess from None - except _UnexpectedSuccess: - result.addFailure(self, sys.exc_info()) - else: - addUnexpectedSuccess(self) - - def _addDuration(self, result, elapsed): - try: - addDuration = result.addDuration - except AttributeError: - warnings.warn("TestResult has no addDuration method", - RuntimeWarning) - else: - addDuration(self, elapsed) - - def _callSetUp(self): - self.setUp() - - def _callTestMethod(self, method): - if method() is not None: - warnings.warn(f'It is deprecated to return a value that is not None from a ' - f'test case ({method})', DeprecationWarning, stacklevel=3) - - def _callTearDown(self): - self.tearDown() - - def _callCleanup(self, function, /, *args, **kwargs): - function(*args, **kwargs) - - def run(self, result=None): - if result is None: - result = self.defaultTestResult() - startTestRun = getattr(result, 'startTestRun', None) - stopTestRun = getattr(result, 'stopTestRun', None) - if startTestRun is not None: - startTestRun() - else: - stopTestRun = None - - result.startTest(self) - try: - testMethod = getattr(self, self._testMethodName) - if (getattr(self.__class__, "__unittest_skip__", False) or - getattr(testMethod, "__unittest_skip__", False)): - # If the class or method was skipped. - skip_why = (getattr(self.__class__, '__unittest_skip_why__', '') - or getattr(testMethod, '__unittest_skip_why__', '')) - _addSkip(result, self, skip_why) - return result - - expecting_failure = ( - getattr(self, "__unittest_expecting_failure__", False) or - getattr(testMethod, "__unittest_expecting_failure__", False) - ) - outcome = _Outcome(result) - start_time = time.perf_counter() - try: - self._outcome = outcome - - with outcome.testPartExecutor(self): - self._callSetUp() - if outcome.success: - outcome.expecting_failure = expecting_failure - with outcome.testPartExecutor(self): - self._callTestMethod(testMethod) - outcome.expecting_failure = False - with outcome.testPartExecutor(self): - self._callTearDown() - self.doCleanups() - self._addDuration(result, (time.perf_counter() - start_time)) - - if outcome.success: - if expecting_failure: - if outcome.expectedFailure: - self._addExpectedFailure(result, outcome.expectedFailure) - else: - self._addUnexpectedSuccess(result) - else: - result.addSuccess(self) - return result - finally: - # explicitly break reference cycle: - # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure - outcome.expectedFailure = None - outcome = None - - # clear the outcome, no more needed - self._outcome = None - - finally: - result.stopTest(self) - if stopTestRun is not None: - stopTestRun() - - def doCleanups(self): - """Execute all cleanup functions. Normally called for you after - tearDown.""" - outcome = self._outcome or _Outcome() - while self._cleanups: - function, args, kwargs = self._cleanups.pop() - with outcome.testPartExecutor(self): - self._callCleanup(function, *args, **kwargs) - - # return this for backwards compatibility - # even though we no longer use it internally - return outcome.success - - @classmethod - def doClassCleanups(cls): - """Execute all class cleanup functions. Normally called for you after - tearDownClass.""" - cls.tearDown_exceptions = [] - while cls._class_cleanups: - function, args, kwargs = cls._class_cleanups.pop() - try: - function(*args, **kwargs) - except Exception: - cls.tearDown_exceptions.append(sys.exc_info()) - - def __call__(self, *args, **kwds): - return self.run(*args, **kwds) - - def debug(self): - """Run the test without collecting errors in a TestResult""" - testMethod = getattr(self, self._testMethodName) - if (getattr(self.__class__, "__unittest_skip__", False) or - getattr(testMethod, "__unittest_skip__", False)): - # If the class or method was skipped. - skip_why = (getattr(self.__class__, '__unittest_skip_why__', '') - or getattr(testMethod, '__unittest_skip_why__', '')) - raise SkipTest(skip_why) - - self._callSetUp() - self._callTestMethod(testMethod) - self._callTearDown() - while self._cleanups: - function, args, kwargs = self._cleanups.pop() - self._callCleanup(function, *args, **kwargs) - - def skipTest(self, reason): - """Skip this test.""" - raise SkipTest(reason) - - def fail(self, msg=None): - """Fail immediately, with the given message.""" - raise self.failureException(msg) - - def assertFalse(self, expr, msg=None): - """Check that the expression is false.""" - if expr: - msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr)) - raise self.failureException(msg) - - def assertTrue(self, expr, msg=None): - """Check that the expression is true.""" - if not expr: - msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr)) - raise self.failureException(msg) - - def _formatMessage(self, msg, standardMsg): - """Honour the longMessage attribute when generating failure messages. - If longMessage is False this means: - * Use only an explicit message if it is provided - * Otherwise use the standard message for the assert - - If longMessage is True: - * Use the standard message - * If an explicit message is provided, plus ' : ' and the explicit message - """ - if not self.longMessage: - return msg or standardMsg - if msg is None: - return standardMsg - try: - # don't switch to '{}' formatting in Python 2.X - # it changes the way unicode input is handled - return '%s : %s' % (standardMsg, msg) - except UnicodeDecodeError: - return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg)) - - def assertRaises(self, expected_exception, *args, **kwargs): - """Fail unless an exception of class expected_exception is raised - by the callable when invoked with specified positional and - keyword arguments. If a different type of exception is - raised, it will not be caught, and the test case will be - deemed to have suffered an error, exactly as for an - unexpected exception. - - If called with the callable and arguments omitted, will return a - context object used like this:: - - with self.assertRaises(SomeException): - do_something() - - An optional keyword argument 'msg' can be provided when assertRaises - is used as a context object. - - The context manager keeps a reference to the exception as - the 'exception' attribute. This allows you to inspect the - exception after the assertion:: - - with self.assertRaises(SomeException) as cm: - do_something() - the_exception = cm.exception - self.assertEqual(the_exception.error_code, 3) - """ - context = _AssertRaisesContext(expected_exception, self) - try: - return context.handle('assertRaises', args, kwargs) - finally: - # bpo-23890: manually break a reference cycle - context = None - - def assertWarns(self, expected_warning, *args, **kwargs): - """Fail unless a warning of class warnClass is triggered - by the callable when invoked with specified positional and - keyword arguments. If a different type of warning is - triggered, it will not be handled: depending on the other - warning filtering rules in effect, it might be silenced, printed - out, or raised as an exception. - - If called with the callable and arguments omitted, will return a - context object used like this:: - - with self.assertWarns(SomeWarning): - do_something() - - An optional keyword argument 'msg' can be provided when assertWarns - is used as a context object. - - The context manager keeps a reference to the first matching - warning as the 'warning' attribute; similarly, the 'filename' - and 'lineno' attributes give you information about the line - of Python code from which the warning was triggered. - This allows you to inspect the warning after the assertion:: - - with self.assertWarns(SomeWarning) as cm: - do_something() - the_warning = cm.warning - self.assertEqual(the_warning.some_attribute, 147) - """ - context = _AssertWarnsContext(expected_warning, self) - return context.handle('assertWarns', args, kwargs) - - def _assertNotWarns(self, expected_warning, *args, **kwargs): - """The opposite of assertWarns. Private due to low demand.""" - context = _AssertNotWarnsContext(expected_warning, self) - return context.handle('_assertNotWarns', args, kwargs) - - def assertLogs(self, logger=None, level=None): - """Fail unless a log message of level *level* or higher is emitted - on *logger_name* or its children. If omitted, *level* defaults to - INFO and *logger* defaults to the root logger. - - This method must be used as a context manager, and will yield - a recording object with two attributes: `output` and `records`. - At the end of the context manager, the `output` attribute will - be a list of the matching formatted log messages and the - `records` attribute will be a list of the corresponding LogRecord - objects. - - Example:: - - with self.assertLogs('foo', level='INFO') as cm: - logging.getLogger('foo').info('first message') - logging.getLogger('foo.bar').error('second message') - self.assertEqual(cm.output, ['INFO:foo:first message', - 'ERROR:foo.bar:second message']) - """ - # Lazy import to avoid importing logging if it is not needed. - from ._log import _AssertLogsContext - return _AssertLogsContext(self, logger, level, no_logs=False) - - def assertNoLogs(self, logger=None, level=None): - """ Fail unless no log messages of level *level* or higher are emitted - on *logger_name* or its children. - - This method must be used as a context manager. - """ - from ._log import _AssertLogsContext - return _AssertLogsContext(self, logger, level, no_logs=True) - - def _getAssertEqualityFunc(self, first, second): - """Get a detailed comparison function for the types of the two args. - - Returns: A callable accepting (first, second, msg=None) that will - raise a failure exception if first != second with a useful human - readable error message for those types. - """ - # - # NOTE(gregory.p.smith): I considered isinstance(first, type(second)) - # and vice versa. I opted for the conservative approach in case - # subclasses are not intended to be compared in detail to their super - # class instances using a type equality func. This means testing - # subtypes won't automagically use the detailed comparison. Callers - # should use their type specific assertSpamEqual method to compare - # subclasses if the detailed comparison is desired and appropriate. - # See the discussion in http://bugs.python.org/issue2578. - # - if type(first) is type(second): - asserter = self._type_equality_funcs.get(type(first)) - if asserter is not None: - if isinstance(asserter, str): - asserter = getattr(self, asserter) - return asserter - - return self._baseAssertEqual - - def _baseAssertEqual(self, first, second, msg=None): - """The default assertEqual implementation, not type specific.""" - if not first == second: - standardMsg = '%s != %s' % _common_shorten_repr(first, second) - msg = self._formatMessage(msg, standardMsg) - raise self.failureException(msg) - - def assertEqual(self, first, second, msg=None): - """Fail if the two objects are unequal as determined by the '==' - operator. - """ - assertion_func = self._getAssertEqualityFunc(first, second) - assertion_func(first, second, msg=msg) - - def assertNotEqual(self, first, second, msg=None): - """Fail if the two objects are equal as determined by the '!=' - operator. - """ - if not first != second: - msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first), - safe_repr(second))) - raise self.failureException(msg) - - def assertAlmostEqual(self, first, second, places=None, msg=None, - delta=None): - """Fail if the two objects are unequal as determined by their - difference rounded to the given number of decimal places - (default 7) and comparing to zero, or by comparing that the - difference between the two objects is more than the given - delta. - - Note that decimal places (from zero) are usually not the same - as significant digits (measured from the most significant digit). - - If the two objects compare equal then they will automatically - compare almost equal. - """ - if first == second: - # shortcut - return - if delta is not None and places is not None: - raise TypeError("specify delta or places not both") - - diff = abs(first - second) - if delta is not None: - if diff <= delta: - return - - standardMsg = '%s != %s within %s delta (%s difference)' % ( - safe_repr(first), - safe_repr(second), - safe_repr(delta), - safe_repr(diff)) - else: - if places is None: - places = 7 - - if round(diff, places) == 0: - return - - standardMsg = '%s != %s within %r places (%s difference)' % ( - safe_repr(first), - safe_repr(second), - places, - safe_repr(diff)) - msg = self._formatMessage(msg, standardMsg) - raise self.failureException(msg) - - def assertNotAlmostEqual(self, first, second, places=None, msg=None, - delta=None): - """Fail if the two objects are equal as determined by their - difference rounded to the given number of decimal places - (default 7) and comparing to zero, or by comparing that the - difference between the two objects is less than the given delta. - - Note that decimal places (from zero) are usually not the same - as significant digits (measured from the most significant digit). - - Objects that are equal automatically fail. - """ - if delta is not None and places is not None: - raise TypeError("specify delta or places not both") - diff = abs(first - second) - if delta is not None: - if not (first == second) and diff > delta: - return - standardMsg = '%s == %s within %s delta (%s difference)' % ( - safe_repr(first), - safe_repr(second), - safe_repr(delta), - safe_repr(diff)) - else: - if places is None: - places = 7 - if not (first == second) and round(diff, places) != 0: - return - standardMsg = '%s == %s within %r places' % (safe_repr(first), - safe_repr(second), - places) - - msg = self._formatMessage(msg, standardMsg) - raise self.failureException(msg) - - def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None): - """An equality assertion for ordered sequences (like lists and tuples). - - For the purposes of this function, a valid ordered sequence type is one - which can be indexed, has a length, and has an equality operator. - - Args: - seq1: The first sequence to compare. - seq2: The second sequence to compare. - seq_type: The expected datatype of the sequences, or None if no - datatype should be enforced. - msg: Optional message to use on failure instead of a list of - differences. - """ - if seq_type is not None: - seq_type_name = seq_type.__name__ - if not isinstance(seq1, seq_type): - raise self.failureException('First sequence is not a %s: %s' - % (seq_type_name, safe_repr(seq1))) - if not isinstance(seq2, seq_type): - raise self.failureException('Second sequence is not a %s: %s' - % (seq_type_name, safe_repr(seq2))) - else: - seq_type_name = "sequence" - - differing = None - try: - len1 = len(seq1) - except (TypeError, NotImplementedError): - differing = 'First %s has no length. Non-sequence?' % ( - seq_type_name) - - if differing is None: - try: - len2 = len(seq2) - except (TypeError, NotImplementedError): - differing = 'Second %s has no length. Non-sequence?' % ( - seq_type_name) - - if differing is None: - if seq1 == seq2: - return - - differing = '%ss differ: %s != %s\n' % ( - (seq_type_name.capitalize(),) + - _common_shorten_repr(seq1, seq2)) - - for i in range(min(len1, len2)): - try: - item1 = seq1[i] - except (TypeError, IndexError, NotImplementedError): - differing += ('\nUnable to index element %d of first %s\n' % - (i, seq_type_name)) - break - - try: - item2 = seq2[i] - except (TypeError, IndexError, NotImplementedError): - differing += ('\nUnable to index element %d of second %s\n' % - (i, seq_type_name)) - break - - if item1 != item2: - differing += ('\nFirst differing element %d:\n%s\n%s\n' % - ((i,) + _common_shorten_repr(item1, item2))) - break - else: - if (len1 == len2 and seq_type is None and - type(seq1) != type(seq2)): - # The sequences are the same, but have differing types. - return - - if len1 > len2: - differing += ('\nFirst %s contains %d additional ' - 'elements.\n' % (seq_type_name, len1 - len2)) - try: - differing += ('First extra element %d:\n%s\n' % - (len2, safe_repr(seq1[len2]))) - except (TypeError, IndexError, NotImplementedError): - differing += ('Unable to index element %d ' - 'of first %s\n' % (len2, seq_type_name)) - elif len1 < len2: - differing += ('\nSecond %s contains %d additional ' - 'elements.\n' % (seq_type_name, len2 - len1)) - try: - differing += ('First extra element %d:\n%s\n' % - (len1, safe_repr(seq2[len1]))) - except (TypeError, IndexError, NotImplementedError): - differing += ('Unable to index element %d ' - 'of second %s\n' % (len1, seq_type_name)) - standardMsg = differing - diffMsg = '\n' + '\n'.join( - difflib.ndiff(pprint.pformat(seq1).splitlines(), - pprint.pformat(seq2).splitlines())) - - standardMsg = self._truncateMessage(standardMsg, diffMsg) - msg = self._formatMessage(msg, standardMsg) - self.fail(msg) - - def _truncateMessage(self, message, diff): - max_diff = self.maxDiff - if max_diff is None or len(diff) <= max_diff: - return message + diff - return message + (DIFF_OMITTED % len(diff)) - - def assertListEqual(self, list1, list2, msg=None): - """A list-specific equality assertion. - - Args: - list1: The first list to compare. - list2: The second list to compare. - msg: Optional message to use on failure instead of a list of - differences. - - """ - self.assertSequenceEqual(list1, list2, msg, seq_type=list) - - def assertTupleEqual(self, tuple1, tuple2, msg=None): - """A tuple-specific equality assertion. - - Args: - tuple1: The first tuple to compare. - tuple2: The second tuple to compare. - msg: Optional message to use on failure instead of a list of - differences. - """ - self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple) - - def assertSetEqual(self, set1, set2, msg=None): - """A set-specific equality assertion. - - Args: - set1: The first set to compare. - set2: The second set to compare. - msg: Optional message to use on failure instead of a list of - differences. - - assertSetEqual uses ducktyping to support different types of sets, and - is optimized for sets specifically (parameters must support a - difference method). - """ - try: - difference1 = set1.difference(set2) - except TypeError as e: - self.fail('invalid type when attempting set difference: %s' % e) - except AttributeError as e: - self.fail('first argument does not support set difference: %s' % e) - - try: - difference2 = set2.difference(set1) - except TypeError as e: - self.fail('invalid type when attempting set difference: %s' % e) - except AttributeError as e: - self.fail('second argument does not support set difference: %s' % e) - - if not (difference1 or difference2): - return - - lines = [] - if difference1: - lines.append('Items in the first set but not the second:') - for item in difference1: - lines.append(repr(item)) - if difference2: - lines.append('Items in the second set but not the first:') - for item in difference2: - lines.append(repr(item)) - - standardMsg = '\n'.join(lines) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIn(self, member, container, msg=None): - """Just like self.assertTrue(a in b), but with a nicer default message.""" - if member not in container: - standardMsg = '%s not found in %s' % (safe_repr(member), - safe_repr(container)) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertNotIn(self, member, container, msg=None): - """Just like self.assertTrue(a not in b), but with a nicer default message.""" - if member in container: - standardMsg = '%s unexpectedly found in %s' % (safe_repr(member), - safe_repr(container)) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIs(self, expr1, expr2, msg=None): - """Just like self.assertTrue(a is b), but with a nicer default message.""" - if expr1 is not expr2: - standardMsg = '%s is not %s' % (safe_repr(expr1), - safe_repr(expr2)) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIsNot(self, expr1, expr2, msg=None): - """Just like self.assertTrue(a is not b), but with a nicer default message.""" - if expr1 is expr2: - standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertDictEqual(self, d1, d2, msg=None): - self.assertIsInstance(d1, dict, 'First argument is not a dictionary') - self.assertIsInstance(d2, dict, 'Second argument is not a dictionary') - - if d1 != d2: - standardMsg = '%s != %s' % _common_shorten_repr(d1, d2) - diff = ('\n' + '\n'.join(difflib.ndiff( - pprint.pformat(d1).splitlines(), - pprint.pformat(d2).splitlines()))) - standardMsg = self._truncateMessage(standardMsg, diff) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertCountEqual(self, first, second, msg=None): - """Asserts that two iterables have the same elements, the same number of - times, without regard to order. - - self.assertEqual(Counter(list(first)), - Counter(list(second))) - - Example: - - [0, 1, 1] and [1, 0, 1] compare equal. - - [0, 0, 1] and [0, 1] compare unequal. - - """ - first_seq, second_seq = list(first), list(second) - try: - first = collections.Counter(first_seq) - second = collections.Counter(second_seq) - except TypeError: - # Handle case with unhashable elements - differences = _count_diff_all_purpose(first_seq, second_seq) - else: - if first == second: - return - differences = _count_diff_hashable(first_seq, second_seq) - - if differences: - standardMsg = 'Element counts were not equal:\n' - lines = ['First has %d, Second has %d: %r' % diff for diff in differences] - diffMsg = '\n'.join(lines) - standardMsg = self._truncateMessage(standardMsg, diffMsg) - msg = self._formatMessage(msg, standardMsg) - self.fail(msg) - - def assertMultiLineEqual(self, first, second, msg=None): - """Assert that two multi-line strings are equal.""" - self.assertIsInstance(first, str, "First argument is not a string") - self.assertIsInstance(second, str, "Second argument is not a string") - - if first != second: - # Don't use difflib if the strings are too long - if (len(first) > self._diffThreshold or - len(second) > self._diffThreshold): - self._baseAssertEqual(first, second, msg) - - # Append \n to both strings if either is missing the \n. - # This allows the final ndiff to show the \n difference. The - # exception here is if the string is empty, in which case no - # \n should be added - first_presplit = first - second_presplit = second - if first and second: - if first[-1] != '\n' or second[-1] != '\n': - first_presplit += '\n' - second_presplit += '\n' - elif second and second[-1] != '\n': - second_presplit += '\n' - elif first and first[-1] != '\n': - first_presplit += '\n' - - firstlines = first_presplit.splitlines(keepends=True) - secondlines = second_presplit.splitlines(keepends=True) - - # Generate the message and diff, then raise the exception - standardMsg = '%s != %s' % _common_shorten_repr(first, second) - diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines)) - standardMsg = self._truncateMessage(standardMsg, diff) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertLess(self, a, b, msg=None): - """Just like self.assertTrue(a < b), but with a nicer default message.""" - if not a < b: - standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b)) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertLessEqual(self, a, b, msg=None): - """Just like self.assertTrue(a <= b), but with a nicer default message.""" - if not a <= b: - standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b)) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertGreater(self, a, b, msg=None): - """Just like self.assertTrue(a > b), but with a nicer default message.""" - if not a > b: - standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b)) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertGreaterEqual(self, a, b, msg=None): - """Just like self.assertTrue(a >= b), but with a nicer default message.""" - if not a >= b: - standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b)) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIsNone(self, obj, msg=None): - """Same as self.assertTrue(obj is None), with a nicer default message.""" - if obj is not None: - standardMsg = '%s is not None' % (safe_repr(obj),) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIsNotNone(self, obj, msg=None): - """Included for symmetry with assertIsNone.""" - if obj is None: - standardMsg = 'unexpectedly None' - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIsInstance(self, obj, cls, msg=None): - """Same as self.assertTrue(isinstance(obj, cls)), with a nicer - default message.""" - if not isinstance(obj, cls): - standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertNotIsInstance(self, obj, cls, msg=None): - """Included for symmetry with assertIsInstance.""" - if isinstance(obj, cls): - standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertRaisesRegex(self, expected_exception, expected_regex, - *args, **kwargs): - """Asserts that the message in a raised exception matches a regex. - - Args: - expected_exception: Exception class expected to be raised. - expected_regex: Regex (re.Pattern object or string) expected - to be found in error message. - args: Function to be called and extra positional args. - kwargs: Extra kwargs. - msg: Optional message used in case of failure. Can only be used - when assertRaisesRegex is used as a context manager. - """ - context = _AssertRaisesContext(expected_exception, self, expected_regex) - return context.handle('assertRaisesRegex', args, kwargs) - - def assertWarnsRegex(self, expected_warning, expected_regex, - *args, **kwargs): - """Asserts that the message in a triggered warning matches a regexp. - Basic functioning is similar to assertWarns() with the addition - that only warnings whose messages also match the regular expression - are considered successful matches. - - Args: - expected_warning: Warning class expected to be triggered. - expected_regex: Regex (re.Pattern object or string) expected - to be found in error message. - args: Function to be called and extra positional args. - kwargs: Extra kwargs. - msg: Optional message used in case of failure. Can only be used - when assertWarnsRegex is used as a context manager. - """ - context = _AssertWarnsContext(expected_warning, self, expected_regex) - return context.handle('assertWarnsRegex', args, kwargs) - - def assertRegex(self, text, expected_regex, msg=None): - """Fail the test unless the text matches the regular expression.""" - if isinstance(expected_regex, (str, bytes)): - assert expected_regex, "expected_regex must not be empty." - expected_regex = re.compile(expected_regex) - if not expected_regex.search(text): - standardMsg = "Regex didn't match: %r not found in %r" % ( - expected_regex.pattern, text) - # _formatMessage ensures the longMessage option is respected - msg = self._formatMessage(msg, standardMsg) - raise self.failureException(msg) - - def assertNotRegex(self, text, unexpected_regex, msg=None): - """Fail the test if the text matches the regular expression.""" - if isinstance(unexpected_regex, (str, bytes)): - unexpected_regex = re.compile(unexpected_regex) - match = unexpected_regex.search(text) - if match: - standardMsg = 'Regex matched: %r matches %r in %r' % ( - text[match.start() : match.end()], - unexpected_regex.pattern, - text) - # _formatMessage ensures the longMessage option is respected - msg = self._formatMessage(msg, standardMsg) - raise self.failureException(msg) - - - -class FunctionTestCase(TestCase): - """A test case that wraps a test function. - - This is useful for slipping pre-existing test functions into the - unittest framework. Optionally, set-up and tidy-up functions can be - supplied. As with TestCase, the tidy-up ('tearDown') function will - always be called if the set-up ('setUp') function ran successfully. - """ - - def __init__(self, testFunc, setUp=None, tearDown=None, description=None): - super(FunctionTestCase, self).__init__() - self._setUpFunc = setUp - self._tearDownFunc = tearDown - self._testFunc = testFunc - self._description = description - - def setUp(self): - if self._setUpFunc is not None: - self._setUpFunc() - - def tearDown(self): - if self._tearDownFunc is not None: - self._tearDownFunc() - - def runTest(self): - self._testFunc() - - def id(self): - return self._testFunc.__name__ - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - - return self._setUpFunc == other._setUpFunc and \ - self._tearDownFunc == other._tearDownFunc and \ - self._testFunc == other._testFunc and \ - self._description == other._description - - def __hash__(self): - return hash((type(self), self._setUpFunc, self._tearDownFunc, - self._testFunc, self._description)) - - def __str__(self): - return "%s (%s)" % (strclass(self.__class__), - self._testFunc.__name__) - - def __repr__(self): - return "<%s tec=%s>" % (strclass(self.__class__), - self._testFunc) - - def shortDescription(self): - if self._description is not None: - return self._description - doc = self._testFunc.__doc__ - return doc and doc.split("\n")[0].strip() or None - - -class _SubTest(TestCase): - - def __init__(self, test_case, message, params): - super().__init__() - self._message = message - self.test_case = test_case - self.params = params - self.failureException = test_case.failureException - - def runTest(self): - raise NotImplementedError("subtests cannot be run directly") - - def _subDescription(self): - parts = [] - if self._message is not _subtest_msg_sentinel: - parts.append("[{}]".format(self._message)) - if self.params: - params_desc = ', '.join( - "{}={!r}".format(k, v) - for (k, v) in self.params.items()) - parts.append("({})".format(params_desc)) - return " ".join(parts) or '()' - - def id(self): - return "{} {}".format(self.test_case.id(), self._subDescription()) - - def shortDescription(self): - """Returns a one-line description of the subtest, or None if no - description has been provided. - """ - return self.test_case.shortDescription() - - def __str__(self): - return "{} {}".format(self.test_case, self._subDescription()) diff --git a/Python313_13_x64_Template/Lib/unittest/loader.py b/Python313_13_x64_Template/Lib/unittest/loader.py deleted file mode 100644 index 22797b83..00000000 --- a/Python313_13_x64_Template/Lib/unittest/loader.py +++ /dev/null @@ -1,453 +0,0 @@ -"""Loading unittests.""" - -import os -import re -import sys -import traceback -import types -import functools - -from fnmatch import fnmatch, fnmatchcase - -from . import case, suite, util - -__unittest = True - -# what about .pyc (etc) -# we would need to avoid loading the same tests multiple times -# from '.py', *and* '.pyc' -VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE) - - -class _FailedTest(case.TestCase): - _testMethodName = None - - def __init__(self, method_name, exception): - self._exception = exception - super(_FailedTest, self).__init__(method_name) - - def __getattr__(self, name): - if name != self._testMethodName: - return super(_FailedTest, self).__getattr__(name) - def testFailure(): - raise self._exception - return testFailure - - -def _make_failed_import_test(name, suiteClass): - message = 'Failed to import test module: %s\n%s' % ( - name, traceback.format_exc()) - return _make_failed_test(name, ImportError(message), suiteClass, message) - -def _make_failed_load_tests(name, exception, suiteClass): - message = 'Failed to call load_tests:\n%s' % (traceback.format_exc(),) - return _make_failed_test( - name, exception, suiteClass, message) - -def _make_failed_test(methodname, exception, suiteClass, message): - test = _FailedTest(methodname, exception) - return suiteClass((test,)), message - -def _make_skipped_test(methodname, exception, suiteClass): - @case.skip(str(exception)) - def testSkipped(self): - pass - attrs = {methodname: testSkipped} - TestClass = type("ModuleSkipped", (case.TestCase,), attrs) - return suiteClass((TestClass(methodname),)) - -def _splitext(path): - return os.path.splitext(path)[0] - - -class TestLoader(object): - """ - This class is responsible for loading tests according to various criteria - and returning them wrapped in a TestSuite - """ - testMethodPrefix = 'test' - sortTestMethodsUsing = staticmethod(util.three_way_cmp) - testNamePatterns = None - suiteClass = suite.TestSuite - _top_level_dir = None - - def __init__(self): - super(TestLoader, self).__init__() - self.errors = [] - # Tracks packages which we have called into via load_tests, to - # avoid infinite re-entrancy. - self._loading_packages = set() - - def loadTestsFromTestCase(self, testCaseClass): - """Return a suite of all test cases contained in testCaseClass""" - if issubclass(testCaseClass, suite.TestSuite): - raise TypeError("Test cases should not be derived from " - "TestSuite. Maybe you meant to derive from " - "TestCase?") - if testCaseClass in (case.TestCase, case.FunctionTestCase): - # We don't load any tests from base types that should not be loaded. - testCaseNames = [] - else: - testCaseNames = self.getTestCaseNames(testCaseClass) - if not testCaseNames and hasattr(testCaseClass, 'runTest'): - testCaseNames = ['runTest'] - loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames)) - return loaded_suite - - def loadTestsFromModule(self, module, *, pattern=None): - """Return a suite of all test cases contained in the given module""" - tests = [] - for name in dir(module): - obj = getattr(module, name) - if ( - isinstance(obj, type) - and issubclass(obj, case.TestCase) - and obj not in (case.TestCase, case.FunctionTestCase) - ): - tests.append(self.loadTestsFromTestCase(obj)) - - load_tests = getattr(module, 'load_tests', None) - tests = self.suiteClass(tests) - if load_tests is not None: - try: - return load_tests(self, tests, pattern) - except Exception as e: - error_case, error_message = _make_failed_load_tests( - module.__name__, e, self.suiteClass) - self.errors.append(error_message) - return error_case - return tests - - def loadTestsFromName(self, name, module=None): - """Return a suite of all test cases given a string specifier. - - The name may resolve either to a module, a test case class, a - test method within a test case class, or a callable object which - returns a TestCase or TestSuite instance. - - The method optionally resolves the names relative to a given module. - """ - parts = name.split('.') - error_case, error_message = None, None - if module is None: - parts_copy = parts[:] - while parts_copy: - try: - module_name = '.'.join(parts_copy) - module = __import__(module_name) - break - except ImportError: - next_attribute = parts_copy.pop() - # Last error so we can give it to the user if needed. - error_case, error_message = _make_failed_import_test( - next_attribute, self.suiteClass) - if not parts_copy: - # Even the top level import failed: report that error. - self.errors.append(error_message) - return error_case - parts = parts[1:] - obj = module - for part in parts: - try: - parent, obj = obj, getattr(obj, part) - except AttributeError as e: - # We can't traverse some part of the name. - if (getattr(obj, '__path__', None) is not None - and error_case is not None): - # This is a package (no __path__ per importlib docs), and we - # encountered an error importing something. We cannot tell - # the difference between package.WrongNameTestClass and - # package.wrong_module_name so we just report the - # ImportError - it is more informative. - self.errors.append(error_message) - return error_case - else: - # Otherwise, we signal that an AttributeError has occurred. - error_case, error_message = _make_failed_test( - part, e, self.suiteClass, - 'Failed to access attribute:\n%s' % ( - traceback.format_exc(),)) - self.errors.append(error_message) - return error_case - - if isinstance(obj, types.ModuleType): - return self.loadTestsFromModule(obj) - elif ( - isinstance(obj, type) - and issubclass(obj, case.TestCase) - and obj not in (case.TestCase, case.FunctionTestCase) - ): - return self.loadTestsFromTestCase(obj) - elif (isinstance(obj, types.FunctionType) and - isinstance(parent, type) and - issubclass(parent, case.TestCase)): - name = parts[-1] - inst = parent(name) - # static methods follow a different path - if not isinstance(getattr(inst, name), types.FunctionType): - return self.suiteClass([inst]) - elif isinstance(obj, suite.TestSuite): - return obj - if callable(obj): - test = obj() - if isinstance(test, suite.TestSuite): - return test - elif isinstance(test, case.TestCase): - return self.suiteClass([test]) - else: - raise TypeError("calling %s returned %s, not a test" % - (obj, test)) - else: - raise TypeError("don't know how to make test from: %s" % obj) - - def loadTestsFromNames(self, names, module=None): - """Return a suite of all test cases found using the given sequence - of string specifiers. See 'loadTestsFromName()'. - """ - suites = [self.loadTestsFromName(name, module) for name in names] - return self.suiteClass(suites) - - def getTestCaseNames(self, testCaseClass): - """Return a sorted sequence of method names found within testCaseClass - """ - def shouldIncludeMethod(attrname): - if not attrname.startswith(self.testMethodPrefix): - return False - testFunc = getattr(testCaseClass, attrname) - if not callable(testFunc): - return False - fullName = f'%s.%s.%s' % ( - testCaseClass.__module__, testCaseClass.__qualname__, attrname - ) - return self.testNamePatterns is None or \ - any(fnmatchcase(fullName, pattern) for pattern in self.testNamePatterns) - testFnNames = list(filter(shouldIncludeMethod, dir(testCaseClass))) - if self.sortTestMethodsUsing: - testFnNames.sort(key=functools.cmp_to_key(self.sortTestMethodsUsing)) - return testFnNames - - def discover(self, start_dir, pattern='test*.py', top_level_dir=None): - """Find and return all test modules from the specified start - directory, recursing into subdirectories to find them and return all - tests found within them. Only test files that match the pattern will - be loaded. (Using shell style pattern matching.) - - All test modules must be importable from the top level of the project. - If the start directory is not the top level directory then the top - level directory must be specified separately. - - If a test package name (directory with '__init__.py') matches the - pattern then the package will be checked for a 'load_tests' function. If - this exists then it will be called with (loader, tests, pattern) unless - the package has already had load_tests called from the same discovery - invocation, in which case the package module object is not scanned for - tests - this ensures that when a package uses discover to further - discover child tests that infinite recursion does not happen. - - If load_tests exists then discovery does *not* recurse into the package, - load_tests is responsible for loading all tests in the package. - - The pattern is deliberately not stored as a loader attribute so that - packages can continue discovery themselves. top_level_dir is stored so - load_tests does not need to pass this argument in to loader.discover(). - - Paths are sorted before being imported to ensure reproducible execution - order even on filesystems with non-alphabetical ordering like ext3/4. - """ - original_top_level_dir = self._top_level_dir - set_implicit_top = False - if top_level_dir is None and self._top_level_dir is not None: - # make top_level_dir optional if called from load_tests in a package - top_level_dir = self._top_level_dir - elif top_level_dir is None: - set_implicit_top = True - top_level_dir = start_dir - - top_level_dir = os.path.abspath(top_level_dir) - - if not top_level_dir in sys.path: - # all test modules must be importable from the top level directory - # should we *unconditionally* put the start directory in first - # in sys.path to minimise likelihood of conflicts between installed - # modules and development versions? - sys.path.insert(0, top_level_dir) - self._top_level_dir = top_level_dir - - is_not_importable = False - if os.path.isdir(os.path.abspath(start_dir)): - start_dir = os.path.abspath(start_dir) - if start_dir != top_level_dir: - is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py')) - else: - # support for discovery from dotted module names - try: - __import__(start_dir) - except ImportError: - is_not_importable = True - else: - the_module = sys.modules[start_dir] - top_part = start_dir.split('.')[0] - try: - start_dir = os.path.abspath( - os.path.dirname((the_module.__file__))) - except AttributeError: - if the_module.__name__ in sys.builtin_module_names: - # builtin module - raise TypeError('Can not use builtin modules ' - 'as dotted module names') from None - else: - raise TypeError( - f"don't know how to discover from {the_module!r}" - ) from None - - if set_implicit_top: - self._top_level_dir = self._get_directory_containing_module(top_part) - sys.path.remove(top_level_dir) - - if is_not_importable: - raise ImportError('Start directory is not importable: %r' % start_dir) - - tests = list(self._find_tests(start_dir, pattern)) - self._top_level_dir = original_top_level_dir - return self.suiteClass(tests) - - def _get_directory_containing_module(self, module_name): - module = sys.modules[module_name] - full_path = os.path.abspath(module.__file__) - - if os.path.basename(full_path).lower().startswith('__init__.py'): - return os.path.dirname(os.path.dirname(full_path)) - else: - # here we have been given a module rather than a package - so - # all we can do is search the *same* directory the module is in - # should an exception be raised instead - return os.path.dirname(full_path) - - def _get_name_from_path(self, path): - if path == self._top_level_dir: - return '.' - path = _splitext(os.path.normpath(path)) - - _relpath = os.path.relpath(path, self._top_level_dir) - assert not os.path.isabs(_relpath), "Path must be within the project" - assert not _relpath.startswith('..'), "Path must be within the project" - - name = _relpath.replace(os.path.sep, '.') - return name - - def _get_module_from_name(self, name): - __import__(name) - return sys.modules[name] - - def _match_path(self, path, full_path, pattern): - # override this method to use alternative matching strategy - return fnmatch(path, pattern) - - def _find_tests(self, start_dir, pattern): - """Used by discovery. Yields test suites it loads.""" - # Handle the __init__ in this package - name = self._get_name_from_path(start_dir) - # name is '.' when start_dir == top_level_dir (and top_level_dir is by - # definition not a package). - if name != '.' and name not in self._loading_packages: - # name is in self._loading_packages while we have called into - # loadTestsFromModule with name. - tests, should_recurse = self._find_test_path(start_dir, pattern) - if tests is not None: - yield tests - if not should_recurse: - # Either an error occurred, or load_tests was used by the - # package. - return - # Handle the contents. - paths = sorted(os.listdir(start_dir)) - for path in paths: - full_path = os.path.join(start_dir, path) - tests, should_recurse = self._find_test_path(full_path, pattern) - if tests is not None: - yield tests - if should_recurse: - # we found a package that didn't use load_tests. - name = self._get_name_from_path(full_path) - self._loading_packages.add(name) - try: - yield from self._find_tests(full_path, pattern) - finally: - self._loading_packages.discard(name) - - def _find_test_path(self, full_path, pattern): - """Used by discovery. - - Loads tests from a single file, or a directories' __init__.py when - passed the directory. - - Returns a tuple (None_or_tests_from_file, should_recurse). - """ - basename = os.path.basename(full_path) - if os.path.isfile(full_path): - if not VALID_MODULE_NAME.match(basename): - # valid Python identifiers only - return None, False - if not self._match_path(basename, full_path, pattern): - return None, False - # if the test file matches, load it - name = self._get_name_from_path(full_path) - try: - module = self._get_module_from_name(name) - except case.SkipTest as e: - return _make_skipped_test(name, e, self.suiteClass), False - except: - error_case, error_message = \ - _make_failed_import_test(name, self.suiteClass) - self.errors.append(error_message) - return error_case, False - else: - mod_file = os.path.abspath( - getattr(module, '__file__', full_path)) - realpath = _splitext( - os.path.realpath(mod_file)) - fullpath_noext = _splitext( - os.path.realpath(full_path)) - if realpath.lower() != fullpath_noext.lower(): - module_dir = os.path.dirname(realpath) - mod_name = _splitext( - os.path.basename(full_path)) - expected_dir = os.path.dirname(full_path) - msg = ("%r module incorrectly imported from %r. Expected " - "%r. Is this module globally installed?") - raise ImportError( - msg % (mod_name, module_dir, expected_dir)) - return self.loadTestsFromModule(module, pattern=pattern), False - elif os.path.isdir(full_path): - if not os.path.isfile(os.path.join(full_path, '__init__.py')): - return None, False - - load_tests = None - tests = None - name = self._get_name_from_path(full_path) - try: - package = self._get_module_from_name(name) - except case.SkipTest as e: - return _make_skipped_test(name, e, self.suiteClass), False - except: - error_case, error_message = \ - _make_failed_import_test(name, self.suiteClass) - self.errors.append(error_message) - return error_case, False - else: - load_tests = getattr(package, 'load_tests', None) - # Mark this package as being in load_tests (possibly ;)) - self._loading_packages.add(name) - try: - tests = self.loadTestsFromModule(package, pattern=pattern) - if load_tests is not None: - # loadTestsFromModule(package) has loaded tests for us. - return tests, False - return tests, True - finally: - self._loading_packages.discard(name) - else: - return None, False - - -defaultTestLoader = TestLoader() diff --git a/Python313_13_x64_Template/Lib/unittest/main.py b/Python313_13_x64_Template/Lib/unittest/main.py deleted file mode 100644 index a0cd8a9f..00000000 --- a/Python313_13_x64_Template/Lib/unittest/main.py +++ /dev/null @@ -1,280 +0,0 @@ -"""Unittest main program""" - -import sys -import argparse -import os - -from . import loader, runner -from .signals import installHandler - -__unittest = True -_NO_TESTS_EXITCODE = 5 - -MAIN_EXAMPLES = """\ -Examples: - %(prog)s test_module - run tests from test_module - %(prog)s module.TestClass - run tests from module.TestClass - %(prog)s module.Class.test_method - run specified test method - %(prog)s path/to/test_file.py - run tests from test_file.py -""" - -MODULE_EXAMPLES = """\ -Examples: - %(prog)s - run default set of tests - %(prog)s MyTestSuite - run suite 'MyTestSuite' - %(prog)s MyTestCase.testSomething - run MyTestCase.testSomething - %(prog)s MyTestCase - run all 'test*' test methods - in MyTestCase -""" - -def _convert_name(name): - # on Linux / Mac OS X 'foo.PY' is not importable, but on - # Windows it is. Simpler to do a case insensitive match - # a better check would be to check that the name is a - # valid Python module name. - if os.path.isfile(name) and name.lower().endswith('.py'): - if os.path.isabs(name): - rel_path = os.path.relpath(name, os.getcwd()) - if os.path.isabs(rel_path) or rel_path.startswith(os.pardir): - return name - name = rel_path - # on Windows both '\' and '/' are used as path - # separators. Better to replace both than rely on os.path.sep - return os.path.normpath(name)[:-3].replace('\\', '.').replace('/', '.') - return name - -def _convert_names(names): - return [_convert_name(name) for name in names] - - -def _convert_select_pattern(pattern): - if not '*' in pattern: - pattern = '*%s*' % pattern - return pattern - - -class TestProgram(object): - """A command-line program that runs a set of tests; this is primarily - for making test modules conveniently executable. - """ - # defaults for testing - module=None - verbosity = 1 - failfast = catchbreak = buffer = progName = warnings = testNamePatterns = None - _discovery_parser = None - - def __init__(self, module='__main__', defaultTest=None, argv=None, - testRunner=None, testLoader=loader.defaultTestLoader, - exit=True, verbosity=1, failfast=None, catchbreak=None, - buffer=None, warnings=None, *, tb_locals=False, - durations=None): - if isinstance(module, str): - self.module = __import__(module) - for part in module.split('.')[1:]: - self.module = getattr(self.module, part) - else: - self.module = module - if argv is None: - argv = sys.argv - - self.exit = exit - self.failfast = failfast - self.catchbreak = catchbreak - self.verbosity = verbosity - self.buffer = buffer - self.tb_locals = tb_locals - self.durations = durations - if warnings is None and not sys.warnoptions: - # even if DeprecationWarnings are ignored by default - # print them anyway unless other warnings settings are - # specified by the warnings arg or the -W python flag - self.warnings = 'default' - else: - # here self.warnings is set either to the value passed - # to the warnings args or to None. - # If the user didn't pass a value self.warnings will - # be None. This means that the behavior is unchanged - # and depends on the values passed to -W. - self.warnings = warnings - self.defaultTest = defaultTest - self.testRunner = testRunner - self.testLoader = testLoader - self.progName = os.path.basename(argv[0]) - self.parseArgs(argv) - self.runTests() - - def _print_help(self, *args, **kwargs): - if self.module is None: - print(self._main_parser.format_help()) - print(MAIN_EXAMPLES % {'prog': self.progName}) - self._discovery_parser.print_help() - else: - print(self._main_parser.format_help()) - print(MODULE_EXAMPLES % {'prog': self.progName}) - - def parseArgs(self, argv): - self._initArgParsers() - if self.module is None: - if len(argv) > 1 and argv[1].lower() == 'discover': - self._do_discovery(argv[2:]) - return - self._main_parser.parse_args(argv[1:], self) - if not self.tests: - # this allows "python -m unittest -v" to still work for - # test discovery. - self._do_discovery([]) - return - else: - self._main_parser.parse_args(argv[1:], self) - - if self.tests: - self.testNames = _convert_names(self.tests) - if __name__ == '__main__': - # to support python -m unittest ... - self.module = None - elif self.defaultTest is None: - # createTests will load tests from self.module - self.testNames = None - elif isinstance(self.defaultTest, str): - self.testNames = (self.defaultTest,) - else: - self.testNames = list(self.defaultTest) - self.createTests() - - def createTests(self, from_discovery=False, Loader=None): - if self.testNamePatterns: - self.testLoader.testNamePatterns = self.testNamePatterns - if from_discovery: - loader = self.testLoader if Loader is None else Loader() - self.test = loader.discover(self.start, self.pattern, self.top) - elif self.testNames is None: - self.test = self.testLoader.loadTestsFromModule(self.module) - else: - self.test = self.testLoader.loadTestsFromNames(self.testNames, - self.module) - - def _initArgParsers(self): - parent_parser = self._getParentArgParser() - self._main_parser = self._getMainArgParser(parent_parser) - self._discovery_parser = self._getDiscoveryArgParser(parent_parser) - - def _getParentArgParser(self): - parser = argparse.ArgumentParser(add_help=False) - - parser.add_argument('-v', '--verbose', dest='verbosity', - action='store_const', const=2, - help='Verbose output') - parser.add_argument('-q', '--quiet', dest='verbosity', - action='store_const', const=0, - help='Quiet output') - parser.add_argument('--locals', dest='tb_locals', - action='store_true', - help='Show local variables in tracebacks') - parser.add_argument('--durations', dest='durations', type=int, - default=None, metavar="N", - help='Show the N slowest test cases (N=0 for all)') - if self.failfast is None: - parser.add_argument('-f', '--failfast', dest='failfast', - action='store_true', - help='Stop on first fail or error') - self.failfast = False - if self.catchbreak is None: - parser.add_argument('-c', '--catch', dest='catchbreak', - action='store_true', - help='Catch Ctrl-C and display results so far') - self.catchbreak = False - if self.buffer is None: - parser.add_argument('-b', '--buffer', dest='buffer', - action='store_true', - help='Buffer stdout and stderr during tests') - self.buffer = False - if self.testNamePatterns is None: - parser.add_argument('-k', dest='testNamePatterns', - action='append', type=_convert_select_pattern, - help='Only run tests which match the given substring') - self.testNamePatterns = [] - - return parser - - def _getMainArgParser(self, parent): - parser = argparse.ArgumentParser(parents=[parent]) - parser.prog = self.progName - parser.print_help = self._print_help - - parser.add_argument('tests', nargs='*', - help='a list of any number of test modules, ' - 'classes and test methods.') - - return parser - - def _getDiscoveryArgParser(self, parent): - parser = argparse.ArgumentParser(parents=[parent]) - parser.prog = '%s discover' % self.progName - parser.epilog = ('For test discovery all test modules must be ' - 'importable from the top level directory of the ' - 'project.') - - parser.add_argument('-s', '--start-directory', dest='start', - help="Directory to start discovery ('.' default)") - parser.add_argument('-p', '--pattern', dest='pattern', - help="Pattern to match tests ('test*.py' default)") - parser.add_argument('-t', '--top-level-directory', dest='top', - help='Top level directory of project (defaults to ' - 'start directory)') - for arg in ('start', 'pattern', 'top'): - parser.add_argument(arg, nargs='?', - default=argparse.SUPPRESS, - help=argparse.SUPPRESS) - - return parser - - def _do_discovery(self, argv, Loader=None): - self.start = '.' - self.pattern = 'test*.py' - self.top = None - if argv is not None: - # handle command line args for test discovery - if self._discovery_parser is None: - # for testing - self._initArgParsers() - self._discovery_parser.parse_args(argv, self) - - self.createTests(from_discovery=True, Loader=Loader) - - def runTests(self): - if self.catchbreak: - installHandler() - if self.testRunner is None: - self.testRunner = runner.TextTestRunner - if isinstance(self.testRunner, type): - try: - try: - testRunner = self.testRunner(verbosity=self.verbosity, - failfast=self.failfast, - buffer=self.buffer, - warnings=self.warnings, - tb_locals=self.tb_locals, - durations=self.durations) - except TypeError: - # didn't accept the tb_locals or durations argument - testRunner = self.testRunner(verbosity=self.verbosity, - failfast=self.failfast, - buffer=self.buffer, - warnings=self.warnings) - except TypeError: - # didn't accept the verbosity, buffer or failfast arguments - testRunner = self.testRunner() - else: - # it is assumed to be a TestRunner instance - testRunner = self.testRunner - self.result = testRunner.run(self.test) - if self.exit: - if not self.result.wasSuccessful(): - sys.exit(1) - elif self.result.testsRun == 0 and len(self.result.skipped) == 0: - sys.exit(_NO_TESTS_EXITCODE) - else: - sys.exit(0) - - -main = TestProgram diff --git a/Python313_13_x64_Template/Lib/unittest/mock.py b/Python313_13_x64_Template/Lib/unittest/mock.py deleted file mode 100644 index b6dd1c27..00000000 --- a/Python313_13_x64_Template/Lib/unittest/mock.py +++ /dev/null @@ -1,3185 +0,0 @@ -# mock.py -# Test tools for mocking and patching. -# Maintained by Michael Foord -# Backport for other versions of Python available from -# https://pypi.org/project/mock - -__all__ = ( - 'Mock', - 'MagicMock', - 'patch', - 'sentinel', - 'DEFAULT', - 'ANY', - 'call', - 'create_autospec', - 'AsyncMock', - 'ThreadingMock', - 'FILTER_DIR', - 'NonCallableMock', - 'NonCallableMagicMock', - 'mock_open', - 'PropertyMock', - 'seal', -) - - -import asyncio -import contextlib -import io -import inspect -import pprint -import sys -import builtins -import pkgutil -from asyncio import iscoroutinefunction -import threading -from types import CodeType, ModuleType, MethodType -from unittest.util import safe_repr -from functools import wraps, partial -from threading import RLock - - -class InvalidSpecError(Exception): - """Indicates that an invalid value was used as a mock spec.""" - - -_builtins = {name for name in dir(builtins) if not name.startswith('_')} - -FILTER_DIR = True - -# Workaround for issue #12370 -# Without this, the __class__ properties wouldn't be set correctly -_safe_super = super - -def _is_async_obj(obj): - if _is_instance_mock(obj) and not isinstance(obj, AsyncMock): - return False - if hasattr(obj, '__func__'): - obj = getattr(obj, '__func__') - return iscoroutinefunction(obj) or inspect.isawaitable(obj) - - -def _is_async_func(func): - if getattr(func, '__code__', None): - return iscoroutinefunction(func) - else: - return False - - -def _is_instance_mock(obj): - # can't use isinstance on Mock objects because they override __class__ - # The base class for all mocks is NonCallableMock - return issubclass(type(obj), NonCallableMock) - - -def _is_exception(obj): - return ( - isinstance(obj, BaseException) or - isinstance(obj, type) and issubclass(obj, BaseException) - ) - - -def _extract_mock(obj): - # Autospecced functions will return a FunctionType with "mock" attribute - # which is the actual mock object that needs to be used. - if isinstance(obj, FunctionTypes) and hasattr(obj, 'mock'): - return obj.mock - else: - return obj - - -def _get_signature_object(func, as_instance, eat_self): - """ - Given an arbitrary, possibly callable object, try to create a suitable - signature object. - Return a (reduced func, signature) tuple, or None. - """ - if isinstance(func, type) and not as_instance: - # If it's a type and should be modelled as a type, use __init__. - func = func.__init__ - # Skip the `self` argument in __init__ - eat_self = True - elif isinstance(func, (classmethod, staticmethod)): - if isinstance(func, classmethod): - # Skip the `cls` argument of a class method - eat_self = True - # Use the original decorated method to extract the correct function signature - func = func.__func__ - elif not isinstance(func, FunctionTypes): - # If we really want to model an instance of the passed type, - # __call__ should be looked up, not __init__. - try: - func = func.__call__ - except AttributeError: - return None - if eat_self: - sig_func = partial(func, None) - else: - sig_func = func - try: - return func, inspect.signature(sig_func) - except ValueError: - # Certain callable types are not supported by inspect.signature() - return None - - -def _check_signature(func, mock, skipfirst, instance=False): - sig = _get_signature_object(func, instance, skipfirst) - if sig is None: - return - func, sig = sig - def checksig(self, /, *args, **kwargs): - sig.bind(*args, **kwargs) - _copy_func_details(func, checksig) - type(mock)._mock_check_sig = checksig - type(mock).__signature__ = sig - - -def _copy_func_details(func, funcopy): - # we explicitly don't copy func.__dict__ into this copy as it would - # expose original attributes that should be mocked - for attribute in ( - '__name__', '__doc__', '__text_signature__', - '__module__', '__defaults__', '__kwdefaults__', - ): - try: - setattr(funcopy, attribute, getattr(func, attribute)) - except AttributeError: - pass - - -def _callable(obj): - if isinstance(obj, type): - return True - if isinstance(obj, (staticmethod, classmethod, MethodType)): - return _callable(obj.__func__) - if getattr(obj, '__call__', None) is not None: - return True - return False - - -def _is_list(obj): - # checks for list or tuples - # XXXX badly named! - return type(obj) in (list, tuple) - - -def _instance_callable(obj): - """Given an object, return True if the object is callable. - For classes, return True if instances would be callable.""" - if not isinstance(obj, type): - # already an instance - return getattr(obj, '__call__', None) is not None - - # *could* be broken by a class overriding __mro__ or __dict__ via - # a metaclass - for base in (obj,) + obj.__mro__: - if base.__dict__.get('__call__') is not None: - return True - return False - - -def _set_signature(mock, original, instance=False): - # creates a function with signature (*args, **kwargs) that delegates to a - # mock. It still does signature checking by calling a lambda with the same - # signature as the original. - - skipfirst = isinstance(original, type) - result = _get_signature_object(original, instance, skipfirst) - if result is None: - return mock - func, sig = result - def checksig(*args, **kwargs): - sig.bind(*args, **kwargs) - _copy_func_details(func, checksig) - - name = original.__name__ - if not name.isidentifier(): - name = 'funcopy' - context = {'_checksig_': checksig, 'mock': mock} - src = """def %s(*args, **kwargs): - _checksig_(*args, **kwargs) - return mock(*args, **kwargs)""" % name - exec (src, context) - funcopy = context[name] - _setup_func(funcopy, mock, sig) - return funcopy - -def _set_async_signature(mock, original, instance=False, is_async_mock=False): - # creates an async function with signature (*args, **kwargs) that delegates to a - # mock. It still does signature checking by calling a lambda with the same - # signature as the original. - - skipfirst = isinstance(original, type) - func, sig = _get_signature_object(original, instance, skipfirst) - def checksig(*args, **kwargs): - sig.bind(*args, **kwargs) - _copy_func_details(func, checksig) - - name = original.__name__ - context = {'_checksig_': checksig, 'mock': mock} - src = """async def %s(*args, **kwargs): - _checksig_(*args, **kwargs) - return await mock(*args, **kwargs)""" % name - exec (src, context) - funcopy = context[name] - _setup_func(funcopy, mock, sig) - _setup_async_mock(funcopy) - return funcopy - - -def _setup_func(funcopy, mock, sig): - funcopy.mock = mock - - def assert_called_with(*args, **kwargs): - return mock.assert_called_with(*args, **kwargs) - def assert_called(*args, **kwargs): - return mock.assert_called(*args, **kwargs) - def assert_not_called(*args, **kwargs): - return mock.assert_not_called(*args, **kwargs) - def assert_called_once(*args, **kwargs): - return mock.assert_called_once(*args, **kwargs) - def assert_called_once_with(*args, **kwargs): - return mock.assert_called_once_with(*args, **kwargs) - def assert_has_calls(*args, **kwargs): - return mock.assert_has_calls(*args, **kwargs) - def assert_any_call(*args, **kwargs): - return mock.assert_any_call(*args, **kwargs) - def reset_mock(): - funcopy.method_calls = _CallList() - funcopy.mock_calls = _CallList() - mock.reset_mock() - ret = funcopy.return_value - if _is_instance_mock(ret) and not ret is mock: - ret.reset_mock() - - funcopy.called = False - funcopy.call_count = 0 - funcopy.call_args = None - funcopy.call_args_list = _CallList() - funcopy.method_calls = _CallList() - funcopy.mock_calls = _CallList() - - funcopy.return_value = mock.return_value - funcopy.side_effect = mock.side_effect - funcopy._mock_children = mock._mock_children - - funcopy.assert_called_with = assert_called_with - funcopy.assert_called_once_with = assert_called_once_with - funcopy.assert_has_calls = assert_has_calls - funcopy.assert_any_call = assert_any_call - funcopy.reset_mock = reset_mock - funcopy.assert_called = assert_called - funcopy.assert_not_called = assert_not_called - funcopy.assert_called_once = assert_called_once - funcopy.__signature__ = sig - - mock._mock_delegate = funcopy - - -def _setup_async_mock(mock): - mock._is_coroutine = asyncio.coroutines._is_coroutine - mock.await_count = 0 - mock.await_args = None - mock.await_args_list = _CallList() - - # Mock is not configured yet so the attributes are set - # to a function and then the corresponding mock helper function - # is called when the helper is accessed similar to _setup_func. - def wrapper(attr, /, *args, **kwargs): - return getattr(mock.mock, attr)(*args, **kwargs) - - for attribute in ('assert_awaited', - 'assert_awaited_once', - 'assert_awaited_with', - 'assert_awaited_once_with', - 'assert_any_await', - 'assert_has_awaits', - 'assert_not_awaited'): - - # setattr(mock, attribute, wrapper) causes late binding - # hence attribute will always be the last value in the loop - # Use partial(wrapper, attribute) to ensure the attribute is bound - # correctly. - setattr(mock, attribute, partial(wrapper, attribute)) - - -def _is_magic(name): - return '__%s__' % name[2:-2] == name - - -class _SentinelObject(object): - "A unique, named, sentinel object." - def __init__(self, name): - self.name = name - - def __repr__(self): - return 'sentinel.%s' % self.name - - def __reduce__(self): - return 'sentinel.%s' % self.name - - -class _Sentinel(object): - """Access attributes to return a named object, usable as a sentinel.""" - def __init__(self): - self._sentinels = {} - - def __getattr__(self, name): - if name == '__bases__': - # Without this help(unittest.mock) raises an exception - raise AttributeError - return self._sentinels.setdefault(name, _SentinelObject(name)) - - def __reduce__(self): - return 'sentinel' - - -sentinel = _Sentinel() - -DEFAULT = sentinel.DEFAULT -_missing = sentinel.MISSING -_deleted = sentinel.DELETED - - -_allowed_names = { - 'return_value', '_mock_return_value', 'side_effect', - '_mock_side_effect', '_mock_parent', '_mock_new_parent', - '_mock_name', '_mock_new_name' -} - - -def _delegating_property(name): - _allowed_names.add(name) - _the_name = '_mock_' + name - def _get(self, name=name, _the_name=_the_name): - sig = self._mock_delegate - if sig is None: - return getattr(self, _the_name) - return getattr(sig, name) - def _set(self, value, name=name, _the_name=_the_name): - sig = self._mock_delegate - if sig is None: - self.__dict__[_the_name] = value - else: - setattr(sig, name, value) - - return property(_get, _set) - - - -class _CallList(list): - - def __contains__(self, value): - if not isinstance(value, list): - return list.__contains__(self, value) - len_value = len(value) - len_self = len(self) - if len_value > len_self: - return False - - for i in range(0, len_self - len_value + 1): - sub_list = self[i:i+len_value] - if sub_list == value: - return True - return False - - def __repr__(self): - return pprint.pformat(list(self)) - - -def _check_and_set_parent(parent, value, name, new_name): - value = _extract_mock(value) - - if not _is_instance_mock(value): - return False - if ((value._mock_name or value._mock_new_name) or - (value._mock_parent is not None) or - (value._mock_new_parent is not None)): - return False - - _parent = parent - while _parent is not None: - # setting a mock (value) as a child or return value of itself - # should not modify the mock - if _parent is value: - return False - _parent = _parent._mock_new_parent - - if new_name: - value._mock_new_parent = parent - value._mock_new_name = new_name - if name: - value._mock_parent = parent - value._mock_name = name - return True - -# Internal class to identify if we wrapped an iterator object or not. -class _MockIter(object): - def __init__(self, obj): - self.obj = iter(obj) - def __next__(self): - return next(self.obj) - -class Base(object): - _mock_return_value = DEFAULT - _mock_side_effect = None - def __init__(self, /, *args, **kwargs): - pass - - - -class NonCallableMock(Base): - """A non-callable version of `Mock`""" - - # Store a mutex as a class attribute in order to protect concurrent access - # to mock attributes. Using a class attribute allows all NonCallableMock - # instances to share the mutex for simplicity. - # - # See https://github.com/python/cpython/issues/98624 for why this is - # necessary. - _lock = RLock() - - def __new__( - cls, spec=None, wraps=None, name=None, spec_set=None, - parent=None, _spec_state=None, _new_name='', _new_parent=None, - _spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs - ): - # every instance has its own class - # so we can create magic methods on the - # class without stomping on other mocks - bases = (cls,) - if not issubclass(cls, AsyncMockMixin): - # Check if spec is an async object or function - spec_arg = spec_set or spec - if spec_arg is not None and _is_async_obj(spec_arg): - bases = (AsyncMockMixin, cls) - new = type(cls.__name__, bases, {'__doc__': cls.__doc__}) - instance = _safe_super(NonCallableMock, cls).__new__(new) - return instance - - - def __init__( - self, spec=None, wraps=None, name=None, spec_set=None, - parent=None, _spec_state=None, _new_name='', _new_parent=None, - _spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs - ): - if _new_parent is None: - _new_parent = parent - - __dict__ = self.__dict__ - __dict__['_mock_parent'] = parent - __dict__['_mock_name'] = name - __dict__['_mock_new_name'] = _new_name - __dict__['_mock_new_parent'] = _new_parent - __dict__['_mock_sealed'] = False - - if spec_set is not None: - spec = spec_set - spec_set = True - if _eat_self is None: - _eat_self = parent is not None - - self._mock_add_spec(spec, spec_set, _spec_as_instance, _eat_self) - - __dict__['_mock_children'] = {} - __dict__['_mock_wraps'] = wraps - __dict__['_mock_delegate'] = None - - __dict__['_mock_called'] = False - __dict__['_mock_call_args'] = None - __dict__['_mock_call_count'] = 0 - __dict__['_mock_call_args_list'] = _CallList() - __dict__['_mock_mock_calls'] = _CallList() - - __dict__['method_calls'] = _CallList() - __dict__['_mock_unsafe'] = unsafe - - if kwargs: - self.configure_mock(**kwargs) - - _safe_super(NonCallableMock, self).__init__( - spec, wraps, name, spec_set, parent, - _spec_state - ) - - - def attach_mock(self, mock, attribute): - """ - Attach a mock as an attribute of this one, replacing its name and - parent. Calls to the attached mock will be recorded in the - `method_calls` and `mock_calls` attributes of this one.""" - inner_mock = _extract_mock(mock) - - inner_mock._mock_parent = None - inner_mock._mock_new_parent = None - inner_mock._mock_name = '' - inner_mock._mock_new_name = None - - setattr(self, attribute, mock) - - - def mock_add_spec(self, spec, spec_set=False): - """Add a spec to a mock. `spec` can either be an object or a - list of strings. Only attributes on the `spec` can be fetched as - attributes from the mock. - - If `spec_set` is True then only attributes on the spec can be set.""" - self._mock_add_spec(spec, spec_set) - - - def _mock_add_spec(self, spec, spec_set, _spec_as_instance=False, - _eat_self=False): - if _is_instance_mock(spec): - raise InvalidSpecError(f'Cannot spec a Mock object. [object={spec!r}]') - - _spec_class = None - _spec_signature = None - _spec_asyncs = [] - - if spec is not None and not _is_list(spec): - if isinstance(spec, type): - _spec_class = spec - else: - _spec_class = type(spec) - res = _get_signature_object(spec, - _spec_as_instance, _eat_self) - _spec_signature = res and res[1] - - spec_list = dir(spec) - - for attr in spec_list: - static_attr = inspect.getattr_static(spec, attr, None) - unwrapped_attr = static_attr - try: - unwrapped_attr = inspect.unwrap(unwrapped_attr) - except ValueError: - pass - if iscoroutinefunction(unwrapped_attr): - _spec_asyncs.append(attr) - - spec = spec_list - - __dict__ = self.__dict__ - __dict__['_spec_class'] = _spec_class - __dict__['_spec_set'] = spec_set - __dict__['_spec_signature'] = _spec_signature - __dict__['_mock_methods'] = spec - __dict__['_spec_asyncs'] = _spec_asyncs - - def __get_return_value(self): - ret = self._mock_return_value - if self._mock_delegate is not None: - ret = self._mock_delegate.return_value - - if ret is DEFAULT and self._mock_wraps is None: - ret = self._get_child_mock( - _new_parent=self, _new_name='()' - ) - self.return_value = ret - return ret - - - def __set_return_value(self, value): - if self._mock_delegate is not None: - self._mock_delegate.return_value = value - else: - self._mock_return_value = value - _check_and_set_parent(self, value, None, '()') - - __return_value_doc = "The value to be returned when the mock is called." - return_value = property(__get_return_value, __set_return_value, - __return_value_doc) - - - @property - def __class__(self): - if self._spec_class is None: - return type(self) - return self._spec_class - - called = _delegating_property('called') - call_count = _delegating_property('call_count') - call_args = _delegating_property('call_args') - call_args_list = _delegating_property('call_args_list') - mock_calls = _delegating_property('mock_calls') - - - def __get_side_effect(self): - delegated = self._mock_delegate - if delegated is None: - return self._mock_side_effect - sf = delegated.side_effect - if (sf is not None and not callable(sf) - and not isinstance(sf, _MockIter) and not _is_exception(sf)): - sf = _MockIter(sf) - delegated.side_effect = sf - return sf - - def __set_side_effect(self, value): - value = _try_iter(value) - delegated = self._mock_delegate - if delegated is None: - self._mock_side_effect = value - else: - delegated.side_effect = value - - side_effect = property(__get_side_effect, __set_side_effect) - - - def reset_mock(self, visited=None, *, - return_value: bool = False, - side_effect: bool = False): - "Restore the mock object to its initial state." - if visited is None: - visited = [] - if id(self) in visited: - return - visited.append(id(self)) - - self.called = False - self.call_args = None - self.call_count = 0 - self.mock_calls = _CallList() - self.call_args_list = _CallList() - self.method_calls = _CallList() - - if return_value: - self._mock_return_value = DEFAULT - if side_effect: - self._mock_side_effect = None - - for child in self._mock_children.values(): - if isinstance(child, _SpecState) or child is _deleted: - continue - child.reset_mock(visited, return_value=return_value, side_effect=side_effect) - - ret = self._mock_return_value - if _is_instance_mock(ret) and ret is not self: - ret.reset_mock(visited) - - - def configure_mock(self, /, **kwargs): - """Set attributes on the mock through keyword arguments. - - Attributes plus return values and side effects can be set on child - mocks using standard dot notation and unpacking a dictionary in the - method call: - - >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError} - >>> mock.configure_mock(**attrs)""" - for arg, val in sorted(kwargs.items(), - # we sort on the number of dots so that - # attributes are set before we set attributes on - # attributes - key=lambda entry: entry[0].count('.')): - args = arg.split('.') - final = args.pop() - obj = self - for entry in args: - obj = getattr(obj, entry) - setattr(obj, final, val) - - - def __getattr__(self, name): - if name in {'_mock_methods', '_mock_unsafe'}: - raise AttributeError(name) - elif self._mock_methods is not None: - if name not in self._mock_methods or name in _all_magics: - raise AttributeError("Mock object has no attribute %r" % name) - elif _is_magic(name): - raise AttributeError(name) - if not self._mock_unsafe and (not self._mock_methods or name not in self._mock_methods): - if name.startswith(('assert', 'assret', 'asert', 'aseert', 'assrt')) or name in _ATTRIB_DENY_LIST: - raise AttributeError( - f"{name!r} is not a valid assertion. Use a spec " - f"for the mock if {name!r} is meant to be an attribute.") - - with NonCallableMock._lock: - result = self._mock_children.get(name) - if result is _deleted: - raise AttributeError(name) - elif result is None: - wraps = None - if self._mock_wraps is not None: - # XXXX should we get the attribute without triggering code - # execution? - wraps = getattr(self._mock_wraps, name) - - result = self._get_child_mock( - parent=self, name=name, wraps=wraps, _new_name=name, - _new_parent=self - ) - self._mock_children[name] = result - - elif isinstance(result, _SpecState): - try: - result = create_autospec( - result.spec, result.spec_set, result.instance, - result.parent, result.name - ) - except InvalidSpecError: - target_name = self.__dict__['_mock_name'] or self - raise InvalidSpecError( - f'Cannot autospec attr {name!r} from target ' - f'{target_name!r} as it has already been mocked out. ' - f'[target={self!r}, attr={result.spec!r}]') - self._mock_children[name] = result - - return result - - - def _extract_mock_name(self): - _name_list = [self._mock_new_name] - _parent = self._mock_new_parent - last = self - - dot = '.' - if _name_list == ['()']: - dot = '' - - while _parent is not None: - last = _parent - - _name_list.append(_parent._mock_new_name + dot) - dot = '.' - if _parent._mock_new_name == '()': - dot = '' - - _parent = _parent._mock_new_parent - - _name_list = list(reversed(_name_list)) - _first = last._mock_name or 'mock' - if len(_name_list) > 1: - if _name_list[1] not in ('()', '().'): - _first += '.' - _name_list[0] = _first - return ''.join(_name_list) - - def __repr__(self): - name = self._extract_mock_name() - - name_string = '' - if name not in ('mock', 'mock.'): - name_string = ' name=%r' % name - - spec_string = '' - if self._spec_class is not None: - spec_string = ' spec=%r' - if self._spec_set: - spec_string = ' spec_set=%r' - spec_string = spec_string % self._spec_class.__name__ - return "<%s%s%s id='%s'>" % ( - type(self).__name__, - name_string, - spec_string, - id(self) - ) - - - def __dir__(self): - """Filter the output of `dir(mock)` to only useful members.""" - if not FILTER_DIR: - return object.__dir__(self) - - extras = self._mock_methods or [] - from_type = dir(type(self)) - from_dict = list(self.__dict__) - from_child_mocks = [ - m_name for m_name, m_value in self._mock_children.items() - if m_value is not _deleted] - - from_type = [e for e in from_type if not e.startswith('_')] - from_dict = [e for e in from_dict if not e.startswith('_') or - _is_magic(e)] - return sorted(set(extras + from_type + from_dict + from_child_mocks)) - - - def __setattr__(self, name, value): - if name in _allowed_names: - # property setters go through here - return object.__setattr__(self, name, value) - elif (self._spec_set and self._mock_methods is not None and - name not in self._mock_methods and - name not in self.__dict__): - raise AttributeError("Mock object has no attribute '%s'" % name) - elif name in _unsupported_magics: - msg = 'Attempting to set unsupported magic method %r.' % name - raise AttributeError(msg) - elif name in _all_magics: - if self._mock_methods is not None and name not in self._mock_methods: - raise AttributeError("Mock object has no attribute '%s'" % name) - - if not _is_instance_mock(value): - setattr(type(self), name, _get_method(name, value)) - original = value - value = lambda *args, **kw: original(self, *args, **kw) - else: - # only set _new_name and not name so that mock_calls is tracked - # but not method calls - _check_and_set_parent(self, value, None, name) - setattr(type(self), name, value) - self._mock_children[name] = value - elif name == '__class__': - self._spec_class = value - return - else: - if _check_and_set_parent(self, value, name, name): - self._mock_children[name] = value - - if self._mock_sealed and not hasattr(self, name): - mock_name = f'{self._extract_mock_name()}.{name}' - raise AttributeError(f'Cannot set {mock_name}') - - if isinstance(value, PropertyMock): - self.__dict__[name] = value - return - return object.__setattr__(self, name, value) - - - def __delattr__(self, name): - if name in _all_magics and name in type(self).__dict__: - delattr(type(self), name) - if name not in self.__dict__: - # for magic methods that are still MagicProxy objects and - # not set on the instance itself - return - - obj = self._mock_children.get(name, _missing) - if name in self.__dict__: - _safe_super(NonCallableMock, self).__delattr__(name) - elif obj is _deleted: - raise AttributeError(name) - if obj is not _missing: - del self._mock_children[name] - self._mock_children[name] = _deleted - - - def _format_mock_call_signature(self, args, kwargs): - name = self._mock_name or 'mock' - return _format_call_signature(name, args, kwargs) - - - def _format_mock_failure_message(self, args, kwargs, action='call'): - message = 'expected %s not found.\nExpected: %s\n Actual: %s' - expected_string = self._format_mock_call_signature(args, kwargs) - call_args = self.call_args - actual_string = self._format_mock_call_signature(*call_args) - return message % (action, expected_string, actual_string) - - - def _get_call_signature_from_name(self, name): - """ - * If call objects are asserted against a method/function like obj.meth1 - then there could be no name for the call object to lookup. Hence just - return the spec_signature of the method/function being asserted against. - * If the name is not empty then remove () and split by '.' to get - list of names to iterate through the children until a potential - match is found. A child mock is created only during attribute access - so if we get a _SpecState then no attributes of the spec were accessed - and can be safely exited. - """ - if not name: - return self._spec_signature - - sig = None - names = name.replace('()', '').split('.') - children = self._mock_children - - for name in names: - child = children.get(name) - if child is None or isinstance(child, _SpecState): - break - else: - # If an autospecced object is attached using attach_mock the - # child would be a function with mock object as attribute from - # which signature has to be derived. - child = _extract_mock(child) - children = child._mock_children - sig = child._spec_signature - - return sig - - - def _call_matcher(self, _call): - """ - Given a call (or simply an (args, kwargs) tuple), return a - comparison key suitable for matching with other calls. - This is a best effort method which relies on the spec's signature, - if available, or falls back on the arguments themselves. - """ - - if isinstance(_call, tuple) and len(_call) > 2: - sig = self._get_call_signature_from_name(_call[0]) - else: - sig = self._spec_signature - - if sig is not None: - if len(_call) == 2: - name = '' - args, kwargs = _call - else: - name, args, kwargs = _call - try: - bound_call = sig.bind(*args, **kwargs) - return call(name, bound_call.args, bound_call.kwargs) - except TypeError as e: - return e.with_traceback(None) - else: - return _call - - def assert_not_called(self): - """assert that the mock was never called. - """ - if self.call_count != 0: - msg = ("Expected '%s' to not have been called. Called %s times.%s" - % (self._mock_name or 'mock', - self.call_count, - self._calls_repr())) - raise AssertionError(msg) - - def assert_called(self): - """assert that the mock was called at least once - """ - if self.call_count == 0: - msg = ("Expected '%s' to have been called." % - (self._mock_name or 'mock')) - raise AssertionError(msg) - - def assert_called_once(self): - """assert that the mock was called only once. - """ - if not self.call_count == 1: - msg = ("Expected '%s' to have been called once. Called %s times.%s" - % (self._mock_name or 'mock', - self.call_count, - self._calls_repr())) - raise AssertionError(msg) - - def assert_called_with(self, /, *args, **kwargs): - """assert that the last call was made with the specified arguments. - - Raises an AssertionError if the args and keyword args passed in are - different to the last call to the mock.""" - if self.call_args is None: - expected = self._format_mock_call_signature(args, kwargs) - actual = 'not called.' - error_message = ('expected call not found.\nExpected: %s\n Actual: %s' - % (expected, actual)) - raise AssertionError(error_message) - - def _error_message(): - msg = self._format_mock_failure_message(args, kwargs) - return msg - expected = self._call_matcher(_Call((args, kwargs), two=True)) - actual = self._call_matcher(self.call_args) - if actual != expected: - cause = expected if isinstance(expected, Exception) else None - raise AssertionError(_error_message()) from cause - - - def assert_called_once_with(self, /, *args, **kwargs): - """assert that the mock was called exactly once and that that call was - with the specified arguments.""" - if not self.call_count == 1: - msg = ("Expected '%s' to be called once. Called %s times.%s" - % (self._mock_name or 'mock', - self.call_count, - self._calls_repr())) - raise AssertionError(msg) - return self.assert_called_with(*args, **kwargs) - - - def assert_has_calls(self, calls, any_order=False): - """assert the mock has been called with the specified calls. - The `mock_calls` list is checked for the calls. - - If `any_order` is False (the default) then the calls must be - sequential. There can be extra calls before or after the - specified calls. - - If `any_order` is True then the calls can be in any order, but - they must all appear in `mock_calls`.""" - expected = [self._call_matcher(c) for c in calls] - cause = next((e for e in expected if isinstance(e, Exception)), None) - all_calls = _CallList(self._call_matcher(c) for c in self.mock_calls) - if not any_order: - if expected not in all_calls: - if cause is None: - problem = 'Calls not found.' - else: - problem = ('Error processing expected calls.\n' - 'Errors: {}').format( - [e if isinstance(e, Exception) else None - for e in expected]) - raise AssertionError( - f'{problem}\n' - f'Expected: {_CallList(calls)}\n' - f' Actual: {safe_repr(self.mock_calls)}' - ) from cause - return - - all_calls = list(all_calls) - - not_found = [] - for kall in expected: - try: - all_calls.remove(kall) - except ValueError: - not_found.append(kall) - if not_found: - raise AssertionError( - '%r does not contain all of %r in its call list, ' - 'found %r instead' % (self._mock_name or 'mock', - tuple(not_found), all_calls) - ) from cause - - - def assert_any_call(self, /, *args, **kwargs): - """assert the mock has been called with the specified arguments. - - The assert passes if the mock has *ever* been called, unlike - `assert_called_with` and `assert_called_once_with` that only pass if - the call is the most recent one.""" - expected = self._call_matcher(_Call((args, kwargs), two=True)) - cause = expected if isinstance(expected, Exception) else None - actual = [self._call_matcher(c) for c in self.call_args_list] - if cause or expected not in _AnyComparer(actual): - expected_string = self._format_mock_call_signature(args, kwargs) - raise AssertionError( - '%s call not found' % expected_string - ) from cause - - - def _get_child_mock(self, /, **kw): - """Create the child mocks for attributes and return value. - By default child mocks will be the same type as the parent. - Subclasses of Mock may want to override this to customize the way - child mocks are made. - - For non-callable mocks the callable variant will be used (rather than - any custom subclass).""" - if self._mock_sealed: - attribute = f".{kw['name']}" if "name" in kw else "()" - mock_name = self._extract_mock_name() + attribute - raise AttributeError(mock_name) - - _new_name = kw.get("_new_name") - if _new_name in self.__dict__['_spec_asyncs']: - return AsyncMock(**kw) - - _type = type(self) - if issubclass(_type, MagicMock) and _new_name in _async_method_magics: - # Any asynchronous magic becomes an AsyncMock - klass = AsyncMock - elif issubclass(_type, AsyncMockMixin): - if (_new_name in _all_sync_magics or - self._mock_methods and _new_name in self._mock_methods): - # Any synchronous method on AsyncMock becomes a MagicMock - klass = MagicMock - else: - klass = AsyncMock - elif not issubclass(_type, CallableMixin): - if issubclass(_type, NonCallableMagicMock): - klass = MagicMock - elif issubclass(_type, NonCallableMock): - klass = Mock - else: - klass = _type.__mro__[1] - return klass(**kw) - - - def _calls_repr(self): - """Renders self.mock_calls as a string. - - Example: "\nCalls: [call(1), call(2)]." - - If self.mock_calls is empty, an empty string is returned. The - output will be truncated if very long. - """ - if not self.mock_calls: - return "" - return f"\nCalls: {safe_repr(self.mock_calls)}." - - -# Denylist for forbidden attribute names in safe mode -_ATTRIB_DENY_LIST = frozenset({ - name.removeprefix("assert_") - for name in dir(NonCallableMock) - if name.startswith("assert_") -}) - - -class _AnyComparer(list): - """A list which checks if it contains a call which may have an - argument of ANY, flipping the components of item and self from - their traditional locations so that ANY is guaranteed to be on - the left.""" - def __contains__(self, item): - for _call in self: - assert len(item) == len(_call) - if all([ - expected == actual - for expected, actual in zip(item, _call) - ]): - return True - return False - - -def _try_iter(obj): - if obj is None: - return obj - if _is_exception(obj): - return obj - if _callable(obj): - return obj - try: - return iter(obj) - except TypeError: - # XXXX backwards compatibility - # but this will blow up on first call - so maybe we should fail early? - return obj - - -class CallableMixin(Base): - - def __init__(self, spec=None, side_effect=None, return_value=DEFAULT, - wraps=None, name=None, spec_set=None, parent=None, - _spec_state=None, _new_name='', _new_parent=None, **kwargs): - self.__dict__['_mock_return_value'] = return_value - _safe_super(CallableMixin, self).__init__( - spec, wraps, name, spec_set, parent, - _spec_state, _new_name, _new_parent, **kwargs - ) - - self.side_effect = side_effect - - - def _mock_check_sig(self, /, *args, **kwargs): - # stub method that can be replaced with one with a specific signature - pass - - - def __call__(self, /, *args, **kwargs): - # can't use self in-case a function / method we are mocking uses self - # in the signature - self._mock_check_sig(*args, **kwargs) - self._increment_mock_call(*args, **kwargs) - return self._mock_call(*args, **kwargs) - - - def _mock_call(self, /, *args, **kwargs): - return self._execute_mock_call(*args, **kwargs) - - def _increment_mock_call(self, /, *args, **kwargs): - self.called = True - - # handle call_args - # needs to be set here so assertions on call arguments pass before - # execution in the case of awaited calls - with NonCallableMock._lock: - # Lock is used here so that call_args_list and call_count are - # set atomically otherwise it is possible that by the time call_count - # is set another thread may have appended to call_args_list. - # The rest of this function relies on list.append being atomic and - # skips locking. - _call = _Call((args, kwargs), two=True) - self.call_args = _call - self.call_args_list.append(_call) - self.call_count = len(self.call_args_list) - - # initial stuff for method_calls: - do_method_calls = self._mock_parent is not None - method_call_name = self._mock_name - - # initial stuff for mock_calls: - mock_call_name = self._mock_new_name - is_a_call = mock_call_name == '()' - self.mock_calls.append(_Call(('', args, kwargs))) - - # follow up the chain of mocks: - _new_parent = self._mock_new_parent - while _new_parent is not None: - - # handle method_calls: - if do_method_calls: - _new_parent.method_calls.append(_Call((method_call_name, args, kwargs))) - do_method_calls = _new_parent._mock_parent is not None - if do_method_calls: - method_call_name = _new_parent._mock_name + '.' + method_call_name - - # handle mock_calls: - this_mock_call = _Call((mock_call_name, args, kwargs)) - _new_parent.mock_calls.append(this_mock_call) - - if _new_parent._mock_new_name: - if is_a_call: - dot = '' - else: - dot = '.' - is_a_call = _new_parent._mock_new_name == '()' - mock_call_name = _new_parent._mock_new_name + dot + mock_call_name - - # follow the parental chain: - _new_parent = _new_parent._mock_new_parent - - def _execute_mock_call(self, /, *args, **kwargs): - # separate from _increment_mock_call so that awaited functions are - # executed separately from their call, also AsyncMock overrides this method - - effect = self.side_effect - if effect is not None: - if _is_exception(effect): - raise effect - elif not _callable(effect): - result = next(effect) - if _is_exception(result): - raise result - else: - result = effect(*args, **kwargs) - - if result is not DEFAULT: - return result - - if self._mock_return_value is not DEFAULT: - return self.return_value - - if self._mock_delegate and self._mock_delegate.return_value is not DEFAULT: - return self.return_value - - if self._mock_wraps is not None: - return self._mock_wraps(*args, **kwargs) - - return self.return_value - - - -class Mock(CallableMixin, NonCallableMock): - """ - Create a new `Mock` object. `Mock` takes several optional arguments - that specify the behaviour of the Mock object: - - * `spec`: This can be either a list of strings or an existing object (a - class or instance) that acts as the specification for the mock object. If - you pass in an object then a list of strings is formed by calling dir on - the object (excluding unsupported magic attributes and methods). Accessing - any attribute not in this list will raise an `AttributeError`. - - If `spec` is an object (rather than a list of strings) then - `mock.__class__` returns the class of the spec object. This allows mocks - to pass `isinstance` tests. - - * `spec_set`: A stricter variant of `spec`. If used, attempting to *set* - or get an attribute on the mock that isn't on the object passed as - `spec_set` will raise an `AttributeError`. - - * `side_effect`: A function to be called whenever the Mock is called. See - the `side_effect` attribute. Useful for raising exceptions or - dynamically changing return values. The function is called with the same - arguments as the mock, and unless it returns `DEFAULT`, the return - value of this function is used as the return value. - - If `side_effect` is an iterable then each call to the mock will return - the next value from the iterable. If any of the members of the iterable - are exceptions they will be raised instead of returned. - - * `return_value`: The value returned when the mock is called. By default - this is a new Mock (created on first access). See the - `return_value` attribute. - - * `unsafe`: By default, accessing any attribute whose name starts with - *assert*, *assret*, *asert*, *aseert*, or *assrt* raises an AttributeError. - Additionally, an AttributeError is raised when accessing - attributes that match the name of an assertion method without the prefix - `assert_`, e.g. accessing `called_once` instead of `assert_called_once`. - Passing `unsafe=True` will allow access to these attributes. - - * `wraps`: Item for the mock object to wrap. If `wraps` is not None then - calling the Mock will pass the call through to the wrapped object - (returning the real result). Attribute access on the mock will return a - Mock object that wraps the corresponding attribute of the wrapped object - (so attempting to access an attribute that doesn't exist will raise an - `AttributeError`). - - If the mock has an explicit `return_value` set then calls are not passed - to the wrapped object and the `return_value` is returned instead. - - * `name`: If the mock has a name then it will be used in the repr of the - mock. This can be useful for debugging. The name is propagated to child - mocks. - - Mocks can also be called with arbitrary keyword arguments. These will be - used to set attributes on the mock after it is created. - """ - - -# _check_spec_arg_typos takes kwargs from commands like patch and checks that -# they don't contain common misspellings of arguments related to autospeccing. -def _check_spec_arg_typos(kwargs_to_check): - typos = ("autospect", "auto_spec", "set_spec") - for typo in typos: - if typo in kwargs_to_check: - raise RuntimeError( - f"{typo!r} might be a typo; use unsafe=True if this is intended" - ) - - -class _patch(object): - - attribute_name = None - _active_patches = [] - - def __init__( - self, getter, attribute, new, spec, create, - spec_set, autospec, new_callable, kwargs, *, unsafe=False - ): - if new_callable is not None: - if new is not DEFAULT: - raise ValueError( - "Cannot use 'new' and 'new_callable' together" - ) - if autospec is not None: - raise ValueError( - "Cannot use 'autospec' and 'new_callable' together" - ) - if not unsafe: - _check_spec_arg_typos(kwargs) - if _is_instance_mock(spec): - raise InvalidSpecError( - f'Cannot spec attr {attribute!r} as the spec ' - f'has already been mocked out. [spec={spec!r}]') - if _is_instance_mock(spec_set): - raise InvalidSpecError( - f'Cannot spec attr {attribute!r} as the spec_set ' - f'target has already been mocked out. [spec_set={spec_set!r}]') - - self.getter = getter - self.attribute = attribute - self.new = new - self.new_callable = new_callable - self.spec = spec - self.create = create - self.has_local = False - self.spec_set = spec_set - self.autospec = autospec - self.kwargs = kwargs - self.additional_patchers = [] - self.is_started = False - - - def copy(self): - patcher = _patch( - self.getter, self.attribute, self.new, self.spec, - self.create, self.spec_set, - self.autospec, self.new_callable, self.kwargs - ) - patcher.attribute_name = self.attribute_name - patcher.additional_patchers = [ - p.copy() for p in self.additional_patchers - ] - return patcher - - - def __call__(self, func): - if isinstance(func, type): - return self.decorate_class(func) - if inspect.iscoroutinefunction(func): - return self.decorate_async_callable(func) - return self.decorate_callable(func) - - - def decorate_class(self, klass): - for attr in dir(klass): - if not attr.startswith(patch.TEST_PREFIX): - continue - - attr_value = getattr(klass, attr) - if not hasattr(attr_value, "__call__"): - continue - - patcher = self.copy() - setattr(klass, attr, patcher(attr_value)) - return klass - - - @contextlib.contextmanager - def decoration_helper(self, patched, args, keywargs): - extra_args = [] - with contextlib.ExitStack() as exit_stack: - for patching in patched.patchings: - arg = exit_stack.enter_context(patching) - if patching.attribute_name is not None: - keywargs.update(arg) - elif patching.new is DEFAULT: - extra_args.append(arg) - - args += tuple(extra_args) - yield (args, keywargs) - - - def decorate_callable(self, func): - # NB. Keep the method in sync with decorate_async_callable() - if hasattr(func, 'patchings'): - func.patchings.append(self) - return func - - @wraps(func) - def patched(*args, **keywargs): - with self.decoration_helper(patched, - args, - keywargs) as (newargs, newkeywargs): - return func(*newargs, **newkeywargs) - - patched.patchings = [self] - return patched - - - def decorate_async_callable(self, func): - # NB. Keep the method in sync with decorate_callable() - if hasattr(func, 'patchings'): - func.patchings.append(self) - return func - - @wraps(func) - async def patched(*args, **keywargs): - with self.decoration_helper(patched, - args, - keywargs) as (newargs, newkeywargs): - return await func(*newargs, **newkeywargs) - - patched.patchings = [self] - return patched - - - def get_original(self): - target = self.getter() - name = self.attribute - - original = DEFAULT - local = False - - try: - original = target.__dict__[name] - except (AttributeError, KeyError): - original = getattr(target, name, DEFAULT) - else: - local = True - - if name in _builtins and isinstance(target, ModuleType): - self.create = True - - if not self.create and original is DEFAULT: - raise AttributeError( - "%s does not have the attribute %r" % (target, name) - ) - return original, local - - - def __enter__(self): - """Perform the patch.""" - if self.is_started: - raise RuntimeError("Patch is already started") - - new, spec, spec_set = self.new, self.spec, self.spec_set - autospec, kwargs = self.autospec, self.kwargs - new_callable = self.new_callable - self.target = self.getter() - - # normalise False to None - if spec is False: - spec = None - if spec_set is False: - spec_set = None - if autospec is False: - autospec = None - - if spec is not None and autospec is not None: - raise TypeError("Can't specify spec and autospec") - if ((spec is not None or autospec is not None) and - spec_set not in (True, None)): - raise TypeError("Can't provide explicit spec_set *and* spec or autospec") - - original, local = self.get_original() - - if new is DEFAULT and autospec is None: - inherit = False - if spec is True: - # set spec to the object we are replacing - spec = original - if spec_set is True: - spec_set = original - spec = None - elif spec is not None: - if spec_set is True: - spec_set = spec - spec = None - elif spec_set is True: - spec_set = original - - if spec is not None or spec_set is not None: - if original is DEFAULT: - raise TypeError("Can't use 'spec' with create=True") - if isinstance(original, type): - # If we're patching out a class and there is a spec - inherit = True - - # Determine the Klass to use - if new_callable is not None: - Klass = new_callable - elif spec is None and _is_async_obj(original): - Klass = AsyncMock - elif spec is not None or spec_set is not None: - this_spec = spec - if spec_set is not None: - this_spec = spec_set - if _is_list(this_spec): - not_callable = '__call__' not in this_spec - else: - not_callable = not callable(this_spec) - if _is_async_obj(this_spec): - Klass = AsyncMock - elif not_callable: - Klass = NonCallableMagicMock - else: - Klass = MagicMock - else: - Klass = MagicMock - - _kwargs = {} - if spec is not None: - _kwargs['spec'] = spec - if spec_set is not None: - _kwargs['spec_set'] = spec_set - - # add a name to mocks - if (isinstance(Klass, type) and - issubclass(Klass, NonCallableMock) and self.attribute): - _kwargs['name'] = self.attribute - - _kwargs.update(kwargs) - new = Klass(**_kwargs) - - if inherit and _is_instance_mock(new): - # we can only tell if the instance should be callable if the - # spec is not a list - this_spec = spec - if spec_set is not None: - this_spec = spec_set - if (not _is_list(this_spec) and not - _instance_callable(this_spec)): - Klass = NonCallableMagicMock - - _kwargs.pop('name') - new.return_value = Klass(_new_parent=new, _new_name='()', - **_kwargs) - elif autospec is not None: - # spec is ignored, new *must* be default, spec_set is treated - # as a boolean. Should we check spec is not None and that spec_set - # is a bool? - if new is not DEFAULT: - raise TypeError( - "autospec creates the mock for you. Can't specify " - "autospec and new." - ) - if original is DEFAULT: - raise TypeError("Can't use 'autospec' with create=True") - spec_set = bool(spec_set) - if autospec is True: - autospec = original - - if _is_instance_mock(self.target): - raise InvalidSpecError( - f'Cannot autospec attr {self.attribute!r} as the patch ' - f'target has already been mocked out. ' - f'[target={self.target!r}, attr={autospec!r}]') - if _is_instance_mock(autospec): - target_name = getattr(self.target, '__name__', self.target) - raise InvalidSpecError( - f'Cannot autospec attr {self.attribute!r} from target ' - f'{target_name!r} as it has already been mocked out. ' - f'[target={self.target!r}, attr={autospec!r}]') - - new = create_autospec(autospec, spec_set=spec_set, - _name=self.attribute, **kwargs) - elif kwargs: - # can't set keyword args when we aren't creating the mock - # XXXX If new is a Mock we could call new.configure_mock(**kwargs) - raise TypeError("Can't pass kwargs to a mock we aren't creating") - - new_attr = new - - self.temp_original = original - self.is_local = local - self._exit_stack = contextlib.ExitStack() - self.is_started = True - try: - setattr(self.target, self.attribute, new_attr) - if self.attribute_name is not None: - extra_args = {} - if self.new is DEFAULT: - extra_args[self.attribute_name] = new - for patching in self.additional_patchers: - arg = self._exit_stack.enter_context(patching) - if patching.new is DEFAULT: - extra_args.update(arg) - return extra_args - - return new - except: - if not self.__exit__(*sys.exc_info()): - raise - - def __exit__(self, *exc_info): - """Undo the patch.""" - if not self.is_started: - return - - if self.is_local and self.temp_original is not DEFAULT: - setattr(self.target, self.attribute, self.temp_original) - else: - delattr(self.target, self.attribute) - if not self.create and (not hasattr(self.target, self.attribute) or - self.attribute in ('__doc__', '__module__', - '__defaults__', '__annotations__', - '__kwdefaults__')): - # needed for proxy objects like django settings - setattr(self.target, self.attribute, self.temp_original) - - del self.temp_original - del self.is_local - del self.target - exit_stack = self._exit_stack - del self._exit_stack - self.is_started = False - return exit_stack.__exit__(*exc_info) - - - def start(self): - """Activate a patch, returning any created mock.""" - result = self.__enter__() - self._active_patches.append(self) - return result - - - def stop(self): - """Stop an active patch.""" - try: - self._active_patches.remove(self) - except ValueError: - # If the patch hasn't been started this will fail - return None - - return self.__exit__(None, None, None) - - - -def _get_target(target): - try: - target, attribute = target.rsplit('.', 1) - except (TypeError, ValueError, AttributeError): - raise TypeError( - f"Need a valid target to patch. You supplied: {target!r}") - return partial(pkgutil.resolve_name, target), attribute - - -def _patch_object( - target, attribute, new=DEFAULT, spec=None, - create=False, spec_set=None, autospec=None, - new_callable=None, *, unsafe=False, **kwargs - ): - """ - patch the named member (`attribute`) on an object (`target`) with a mock - object. - - `patch.object` can be used as a decorator, class decorator or a context - manager. Arguments `new`, `spec`, `create`, `spec_set`, - `autospec` and `new_callable` have the same meaning as for `patch`. Like - `patch`, `patch.object` takes arbitrary keyword arguments for configuring - the mock object it creates. - - When used as a class decorator `patch.object` honours `patch.TEST_PREFIX` - for choosing which methods to wrap. - """ - if type(target) is str: - raise TypeError( - f"{target!r} must be the actual object to be patched, not a str" - ) - getter = lambda: target - return _patch( - getter, attribute, new, spec, create, - spec_set, autospec, new_callable, kwargs, unsafe=unsafe - ) - - -def _patch_multiple(target, spec=None, create=False, spec_set=None, - autospec=None, new_callable=None, **kwargs): - """Perform multiple patches in a single call. It takes the object to be - patched (either as an object or a string to fetch the object by importing) - and keyword arguments for the patches:: - - with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'): - ... - - Use `DEFAULT` as the value if you want `patch.multiple` to create - mocks for you. In this case the created mocks are passed into a decorated - function by keyword, and a dictionary is returned when `patch.multiple` is - used as a context manager. - - `patch.multiple` can be used as a decorator, class decorator or a context - manager. The arguments `spec`, `spec_set`, `create`, - `autospec` and `new_callable` have the same meaning as for `patch`. These - arguments will be applied to *all* patches done by `patch.multiple`. - - When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX` - for choosing which methods to wrap. - """ - if type(target) is str: - getter = partial(pkgutil.resolve_name, target) - else: - getter = lambda: target - - if not kwargs: - raise ValueError( - 'Must supply at least one keyword argument with patch.multiple' - ) - # need to wrap in a list for python 3, where items is a view - items = list(kwargs.items()) - attribute, new = items[0] - patcher = _patch( - getter, attribute, new, spec, create, spec_set, - autospec, new_callable, {} - ) - patcher.attribute_name = attribute - for attribute, new in items[1:]: - this_patcher = _patch( - getter, attribute, new, spec, create, spec_set, - autospec, new_callable, {} - ) - this_patcher.attribute_name = attribute - patcher.additional_patchers.append(this_patcher) - return patcher - - -def patch( - target, new=DEFAULT, spec=None, create=False, - spec_set=None, autospec=None, new_callable=None, *, unsafe=False, **kwargs - ): - """ - `patch` acts as a function decorator, class decorator or a context - manager. Inside the body of the function or with statement, the `target` - is patched with a `new` object. When the function/with statement exits - the patch is undone. - - If `new` is omitted, then the target is replaced with an - `AsyncMock if the patched object is an async function or a - `MagicMock` otherwise. If `patch` is used as a decorator and `new` is - omitted, the created mock is passed in as an extra argument to the - decorated function. If `patch` is used as a context manager the created - mock is returned by the context manager. - - `target` should be a string in the form `'package.module.ClassName'`. The - `target` is imported and the specified object replaced with the `new` - object, so the `target` must be importable from the environment you are - calling `patch` from. The target is imported when the decorated function - is executed, not at decoration time. - - The `spec` and `spec_set` keyword arguments are passed to the `MagicMock` - if patch is creating one for you. - - In addition you can pass `spec=True` or `spec_set=True`, which causes - patch to pass in the object being mocked as the spec/spec_set object. - - `new_callable` allows you to specify a different class, or callable object, - that will be called to create the `new` object. By default `AsyncMock` is - used for async functions and `MagicMock` for the rest. - - A more powerful form of `spec` is `autospec`. If you set `autospec=True` - then the mock will be created with a spec from the object being replaced. - All attributes of the mock will also have the spec of the corresponding - attribute of the object being replaced. Methods and functions being - mocked will have their arguments checked and will raise a `TypeError` if - they are called with the wrong signature. For mocks replacing a class, - their return value (the 'instance') will have the same spec as the class. - - Instead of `autospec=True` you can pass `autospec=some_object` to use an - arbitrary object as the spec instead of the one being replaced. - - By default `patch` will fail to replace attributes that don't exist. If - you pass in `create=True`, and the attribute doesn't exist, patch will - create the attribute for you when the patched function is called, and - delete it again afterwards. This is useful for writing tests against - attributes that your production code creates at runtime. It is off by - default because it can be dangerous. With it switched on you can write - passing tests against APIs that don't actually exist! - - Patch can be used as a `TestCase` class decorator. It works by - decorating each test method in the class. This reduces the boilerplate - code when your test methods share a common patchings set. `patch` finds - tests by looking for method names that start with `patch.TEST_PREFIX`. - By default this is `test`, which matches the way `unittest` finds tests. - You can specify an alternative prefix by setting `patch.TEST_PREFIX`. - - Patch can be used as a context manager, with the with statement. Here the - patching applies to the indented block after the with statement. If you - use "as" then the patched object will be bound to the name after the - "as"; very useful if `patch` is creating a mock object for you. - - Patch will raise a `RuntimeError` if passed some common misspellings of - the arguments autospec and spec_set. Pass the argument `unsafe` with the - value True to disable that check. - - `patch` takes arbitrary keyword arguments. These will be passed to - `AsyncMock` if the patched object is asynchronous, to `MagicMock` - otherwise or to `new_callable` if specified. - - `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are - available for alternate use-cases. - """ - getter, attribute = _get_target(target) - return _patch( - getter, attribute, new, spec, create, - spec_set, autospec, new_callable, kwargs, unsafe=unsafe - ) - - -class _patch_dict(object): - """ - Patch a dictionary, or dictionary like object, and restore the dictionary - to its original state after the test. - - `in_dict` can be a dictionary or a mapping like container. If it is a - mapping then it must at least support getting, setting and deleting items - plus iterating over keys. - - `in_dict` can also be a string specifying the name of the dictionary, which - will then be fetched by importing it. - - `values` can be a dictionary of values to set in the dictionary. `values` - can also be an iterable of `(key, value)` pairs. - - If `clear` is True then the dictionary will be cleared before the new - values are set. - - `patch.dict` can also be called with arbitrary keyword arguments to set - values in the dictionary:: - - with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()): - ... - - `patch.dict` can be used as a context manager, decorator or class - decorator. When used as a class decorator `patch.dict` honours - `patch.TEST_PREFIX` for choosing which methods to wrap. - """ - - def __init__(self, in_dict, values=(), clear=False, **kwargs): - self.in_dict = in_dict - # support any argument supported by dict(...) constructor - self.values = dict(values) - self.values.update(kwargs) - self.clear = clear - self._original = None - - - def __call__(self, f): - if isinstance(f, type): - return self.decorate_class(f) - if inspect.iscoroutinefunction(f): - return self.decorate_async_callable(f) - return self.decorate_callable(f) - - - def decorate_callable(self, f): - @wraps(f) - def _inner(*args, **kw): - self._patch_dict() - try: - return f(*args, **kw) - finally: - self._unpatch_dict() - - return _inner - - - def decorate_async_callable(self, f): - @wraps(f) - async def _inner(*args, **kw): - self._patch_dict() - try: - return await f(*args, **kw) - finally: - self._unpatch_dict() - - return _inner - - - def decorate_class(self, klass): - for attr in dir(klass): - attr_value = getattr(klass, attr) - if (attr.startswith(patch.TEST_PREFIX) and - hasattr(attr_value, "__call__")): - decorator = _patch_dict(self.in_dict, self.values, self.clear) - decorated = decorator(attr_value) - setattr(klass, attr, decorated) - return klass - - - def __enter__(self): - """Patch the dict.""" - self._patch_dict() - return self.in_dict - - - def _patch_dict(self): - values = self.values - if isinstance(self.in_dict, str): - self.in_dict = pkgutil.resolve_name(self.in_dict) - in_dict = self.in_dict - clear = self.clear - - try: - original = in_dict.copy() - except AttributeError: - # dict like object with no copy method - # must support iteration over keys - original = {} - for key in in_dict: - original[key] = in_dict[key] - self._original = original - - if clear: - _clear_dict(in_dict) - - try: - in_dict.update(values) - except AttributeError: - # dict like object with no update method - for key in values: - in_dict[key] = values[key] - - - def _unpatch_dict(self): - in_dict = self.in_dict - original = self._original - - _clear_dict(in_dict) - - try: - in_dict.update(original) - except AttributeError: - for key in original: - in_dict[key] = original[key] - - - def __exit__(self, *args): - """Unpatch the dict.""" - if self._original is not None: - self._unpatch_dict() - return False - - - def start(self): - """Activate a patch, returning any created mock.""" - result = self.__enter__() - _patch._active_patches.append(self) - return result - - - def stop(self): - """Stop an active patch.""" - try: - _patch._active_patches.remove(self) - except ValueError: - # If the patch hasn't been started this will fail - return None - - return self.__exit__(None, None, None) - - -def _clear_dict(in_dict): - try: - in_dict.clear() - except AttributeError: - keys = list(in_dict) - for key in keys: - del in_dict[key] - - -def _patch_stopall(): - """Stop all active patches. LIFO to unroll nested patches.""" - for patch in reversed(_patch._active_patches): - patch.stop() - - -patch.object = _patch_object -patch.dict = _patch_dict -patch.multiple = _patch_multiple -patch.stopall = _patch_stopall -patch.TEST_PREFIX = 'test' - -magic_methods = ( - "lt le gt ge eq ne " - "getitem setitem delitem " - "len contains iter " - "hash str sizeof " - "enter exit " - # we added divmod and rdivmod here instead of numerics - # because there is no idivmod - "divmod rdivmod neg pos abs invert " - "complex int float index " - "round trunc floor ceil " - "bool next " - "fspath " - "aiter " -) - -numerics = ( - "add sub mul matmul truediv floordiv mod lshift rshift and xor or pow" -) -inplace = ' '.join('i%s' % n for n in numerics.split()) -right = ' '.join('r%s' % n for n in numerics.split()) - -# not including __prepare__, __instancecheck__, __subclasscheck__ -# (as they are metaclass methods) -# __del__ is not supported at all as it causes problems if it exists - -_non_defaults = { - '__get__', '__set__', '__delete__', '__reversed__', '__missing__', - '__reduce__', '__reduce_ex__', '__getinitargs__', '__getnewargs__', - '__getstate__', '__setstate__', '__getformat__', - '__repr__', '__dir__', '__subclasses__', '__format__', - '__getnewargs_ex__', -} - - -def _get_method(name, func): - "Turns a callable object (like a mock) into a real function" - def method(self, /, *args, **kw): - return func(self, *args, **kw) - method.__name__ = name - return method - - -_magics = { - '__%s__' % method for method in - ' '.join([magic_methods, numerics, inplace, right]).split() -} - -# Magic methods used for async `with` statements -_async_method_magics = {"__aenter__", "__aexit__", "__anext__"} -# Magic methods that are only used with async calls but are synchronous functions themselves -_sync_async_magics = {"__aiter__"} -_async_magics = _async_method_magics | _sync_async_magics - -_all_sync_magics = _magics | _non_defaults -_all_magics = _all_sync_magics | _async_magics - -_unsupported_magics = { - '__getattr__', '__setattr__', - '__init__', '__new__', '__prepare__', - '__instancecheck__', '__subclasscheck__', - '__del__' -} - -_calculate_return_value = { - '__hash__': lambda self: object.__hash__(self), - '__str__': lambda self: object.__str__(self), - '__sizeof__': lambda self: object.__sizeof__(self), - '__fspath__': lambda self: f"{type(self).__name__}/{self._extract_mock_name()}/{id(self)}", -} - -_return_values = { - '__lt__': NotImplemented, - '__gt__': NotImplemented, - '__le__': NotImplemented, - '__ge__': NotImplemented, - '__int__': 1, - '__contains__': False, - '__len__': 0, - '__exit__': False, - '__complex__': 1j, - '__float__': 1.0, - '__bool__': True, - '__index__': 1, - '__aexit__': False, -} - - -def _get_eq(self): - def __eq__(other): - ret_val = self.__eq__._mock_return_value - if ret_val is not DEFAULT: - return ret_val - if self is other: - return True - return NotImplemented - return __eq__ - -def _get_ne(self): - def __ne__(other): - if self.__ne__._mock_return_value is not DEFAULT: - return DEFAULT - if self is other: - return False - return NotImplemented - return __ne__ - -def _get_iter(self): - def __iter__(): - ret_val = self.__iter__._mock_return_value - if ret_val is DEFAULT: - return iter([]) - # if ret_val was already an iterator, then calling iter on it should - # return the iterator unchanged - return iter(ret_val) - return __iter__ - -def _get_async_iter(self): - def __aiter__(): - ret_val = self.__aiter__._mock_return_value - if ret_val is DEFAULT: - return _AsyncIterator(iter([])) - return _AsyncIterator(iter(ret_val)) - return __aiter__ - -_side_effect_methods = { - '__eq__': _get_eq, - '__ne__': _get_ne, - '__iter__': _get_iter, - '__aiter__': _get_async_iter -} - - - -def _set_return_value(mock, method, name): - fixed = _return_values.get(name, DEFAULT) - if fixed is not DEFAULT: - method.return_value = fixed - return - - return_calculator = _calculate_return_value.get(name) - if return_calculator is not None: - return_value = return_calculator(mock) - method.return_value = return_value - return - - side_effector = _side_effect_methods.get(name) - if side_effector is not None: - method.side_effect = side_effector(mock) - - - -class MagicMixin(Base): - def __init__(self, /, *args, **kw): - self._mock_set_magics() # make magic work for kwargs in init - _safe_super(MagicMixin, self).__init__(*args, **kw) - self._mock_set_magics() # fix magic broken by upper level init - - - def _mock_set_magics(self): - orig_magics = _magics | _async_method_magics - these_magics = orig_magics - - if getattr(self, "_mock_methods", None) is not None: - these_magics = orig_magics.intersection(self._mock_methods) - - remove_magics = set() - remove_magics = orig_magics - these_magics - - for entry in remove_magics: - if entry in type(self).__dict__: - # remove unneeded magic methods - delattr(self, entry) - - # don't overwrite existing attributes if called a second time - these_magics = these_magics - set(type(self).__dict__) - - _type = type(self) - for entry in these_magics: - setattr(_type, entry, MagicProxy(entry, self)) - - - -class NonCallableMagicMock(MagicMixin, NonCallableMock): - """A version of `MagicMock` that isn't callable.""" - def mock_add_spec(self, spec, spec_set=False): - """Add a spec to a mock. `spec` can either be an object or a - list of strings. Only attributes on the `spec` can be fetched as - attributes from the mock. - - If `spec_set` is True then only attributes on the spec can be set.""" - self._mock_add_spec(spec, spec_set) - self._mock_set_magics() - - -class AsyncMagicMixin(MagicMixin): - pass - - -class MagicMock(MagicMixin, Mock): - """ - MagicMock is a subclass of Mock with default implementations - of most of the magic methods. You can use MagicMock without having to - configure the magic methods yourself. - - If you use the `spec` or `spec_set` arguments then *only* magic - methods that exist in the spec will be created. - - Attributes and the return value of a `MagicMock` will also be `MagicMocks`. - """ - def mock_add_spec(self, spec, spec_set=False): - """Add a spec to a mock. `spec` can either be an object or a - list of strings. Only attributes on the `spec` can be fetched as - attributes from the mock. - - If `spec_set` is True then only attributes on the spec can be set.""" - self._mock_add_spec(spec, spec_set) - self._mock_set_magics() - - def reset_mock(self, /, *args, return_value: bool = False, **kwargs): - if ( - return_value - and self._mock_name - and _is_magic(self._mock_name) - ): - # Don't reset return values for magic methods, - # otherwise `m.__str__` will start - # to return `MagicMock` instances, instead of `str` instances. - return_value = False - super().reset_mock(*args, return_value=return_value, **kwargs) - - -class MagicProxy(Base): - def __init__(self, name, parent): - self.name = name - self.parent = parent - - def create_mock(self): - entry = self.name - parent = self.parent - m = parent._get_child_mock(name=entry, _new_name=entry, - _new_parent=parent) - setattr(parent, entry, m) - _set_return_value(parent, m, entry) - return m - - def __get__(self, obj, _type=None): - return self.create_mock() - - -try: - _CODE_SIG = inspect.signature(partial(CodeType.__init__, None)) - _CODE_ATTRS = dir(CodeType) -except ValueError: - _CODE_SIG = None - - -class AsyncMockMixin(Base): - await_count = _delegating_property('await_count') - await_args = _delegating_property('await_args') - await_args_list = _delegating_property('await_args_list') - - def __init__(self, /, *args, **kwargs): - super().__init__(*args, **kwargs) - # iscoroutinefunction() checks _is_coroutine property to say if an - # object is a coroutine. Without this check it looks to see if it is a - # function/method, which in this case it is not (since it is an - # AsyncMock). - # It is set through __dict__ because when spec_set is True, this - # attribute is likely undefined. - self.__dict__['_is_coroutine'] = asyncio.coroutines._is_coroutine - self.__dict__['_mock_await_count'] = 0 - self.__dict__['_mock_await_args'] = None - self.__dict__['_mock_await_args_list'] = _CallList() - if _CODE_SIG: - code_mock = NonCallableMock(spec_set=_CODE_ATTRS) - code_mock.__dict__["_spec_class"] = CodeType - code_mock.__dict__["_spec_signature"] = _CODE_SIG - else: - code_mock = NonCallableMock(spec_set=CodeType) - code_mock.co_flags = ( - inspect.CO_COROUTINE - + inspect.CO_VARARGS - + inspect.CO_VARKEYWORDS - ) - code_mock.co_argcount = 0 - code_mock.co_varnames = ('args', 'kwargs') - code_mock.co_posonlyargcount = 0 - code_mock.co_kwonlyargcount = 0 - self.__dict__['__code__'] = code_mock - self.__dict__['__name__'] = 'AsyncMock' - self.__dict__['__defaults__'] = tuple() - self.__dict__['__kwdefaults__'] = {} - self.__dict__['__annotations__'] = None - - async def _execute_mock_call(self, /, *args, **kwargs): - # This is nearly just like super(), except for special handling - # of coroutines - - _call = _Call((args, kwargs), two=True) - self.await_count += 1 - self.await_args = _call - self.await_args_list.append(_call) - - effect = self.side_effect - if effect is not None: - if _is_exception(effect): - raise effect - elif not _callable(effect): - try: - result = next(effect) - except StopIteration: - # It is impossible to propagate a StopIteration - # through coroutines because of PEP 479 - raise StopAsyncIteration - if _is_exception(result): - raise result - elif iscoroutinefunction(effect): - result = await effect(*args, **kwargs) - else: - result = effect(*args, **kwargs) - - if result is not DEFAULT: - return result - - if self._mock_return_value is not DEFAULT: - return self.return_value - - if self._mock_wraps is not None: - if iscoroutinefunction(self._mock_wraps): - return await self._mock_wraps(*args, **kwargs) - return self._mock_wraps(*args, **kwargs) - - return self.return_value - - def assert_awaited(self): - """ - Assert that the mock was awaited at least once. - """ - if self.await_count == 0: - msg = f"Expected {self._mock_name or 'mock'} to have been awaited." - raise AssertionError(msg) - - def assert_awaited_once(self): - """ - Assert that the mock was awaited exactly once. - """ - if not self.await_count == 1: - msg = (f"Expected {self._mock_name or 'mock'} to have been awaited once." - f" Awaited {self.await_count} times.") - raise AssertionError(msg) - - def assert_awaited_with(self, /, *args, **kwargs): - """ - Assert that the last await was with the specified arguments. - """ - if self.await_args is None: - expected = self._format_mock_call_signature(args, kwargs) - raise AssertionError(f'Expected await: {expected}\nNot awaited') - - def _error_message(): - msg = self._format_mock_failure_message(args, kwargs, action='await') - return msg - - expected = self._call_matcher(_Call((args, kwargs), two=True)) - actual = self._call_matcher(self.await_args) - if actual != expected: - cause = expected if isinstance(expected, Exception) else None - raise AssertionError(_error_message()) from cause - - def assert_awaited_once_with(self, /, *args, **kwargs): - """ - Assert that the mock was awaited exactly once and with the specified - arguments. - """ - if not self.await_count == 1: - msg = (f"Expected {self._mock_name or 'mock'} to have been awaited once." - f" Awaited {self.await_count} times.") - raise AssertionError(msg) - return self.assert_awaited_with(*args, **kwargs) - - def assert_any_await(self, /, *args, **kwargs): - """ - Assert the mock has ever been awaited with the specified arguments. - """ - expected = self._call_matcher(_Call((args, kwargs), two=True)) - cause = expected if isinstance(expected, Exception) else None - actual = [self._call_matcher(c) for c in self.await_args_list] - if cause or expected not in _AnyComparer(actual): - expected_string = self._format_mock_call_signature(args, kwargs) - raise AssertionError( - '%s await not found' % expected_string - ) from cause - - def assert_has_awaits(self, calls, any_order=False): - """ - Assert the mock has been awaited with the specified calls. - The :attr:`await_args_list` list is checked for the awaits. - - If `any_order` is False (the default) then the awaits must be - sequential. There can be extra calls before or after the - specified awaits. - - If `any_order` is True then the awaits can be in any order, but - they must all appear in :attr:`await_args_list`. - """ - expected = [self._call_matcher(c) for c in calls] - cause = next((e for e in expected if isinstance(e, Exception)), None) - all_awaits = _CallList(self._call_matcher(c) for c in self.await_args_list) - if not any_order: - if expected not in all_awaits: - if cause is None: - problem = 'Awaits not found.' - else: - problem = ('Error processing expected awaits.\n' - 'Errors: {}').format( - [e if isinstance(e, Exception) else None - for e in expected]) - raise AssertionError( - f'{problem}\n' - f'Expected: {_CallList(calls)}\n' - f'Actual: {self.await_args_list}' - ) from cause - return - - all_awaits = list(all_awaits) - - not_found = [] - for kall in expected: - try: - all_awaits.remove(kall) - except ValueError: - not_found.append(kall) - if not_found: - raise AssertionError( - '%r not all found in await list' % (tuple(not_found),) - ) from cause - - def assert_not_awaited(self): - """ - Assert that the mock was never awaited. - """ - if self.await_count != 0: - msg = (f"Expected {self._mock_name or 'mock'} to not have been awaited." - f" Awaited {self.await_count} times.") - raise AssertionError(msg) - - def reset_mock(self, /, *args, **kwargs): - """ - See :func:`.Mock.reset_mock()` - """ - super().reset_mock(*args, **kwargs) - self.await_count = 0 - self.await_args = None - self.await_args_list = _CallList() - - -class AsyncMock(AsyncMockMixin, AsyncMagicMixin, Mock): - """ - Enhance :class:`Mock` with features allowing to mock - an async function. - - The :class:`AsyncMock` object will behave so the object is - recognized as an async function, and the result of a call is an awaitable: - - >>> mock = AsyncMock() - >>> iscoroutinefunction(mock) - True - >>> inspect.isawaitable(mock()) - True - - - The result of ``mock()`` is an async function which will have the outcome - of ``side_effect`` or ``return_value``: - - - if ``side_effect`` is a function, the async function will return the - result of that function, - - if ``side_effect`` is an exception, the async function will raise the - exception, - - if ``side_effect`` is an iterable, the async function will return the - next value of the iterable, however, if the sequence of result is - exhausted, ``StopIteration`` is raised immediately, - - if ``side_effect`` is not defined, the async function will return the - value defined by ``return_value``, hence, by default, the async function - returns a new :class:`AsyncMock` object. - - If the outcome of ``side_effect`` or ``return_value`` is an async function, - the mock async function obtained when the mock object is called will be this - async function itself (and not an async function returning an async - function). - - The test author can also specify a wrapped object with ``wraps``. In this - case, the :class:`Mock` object behavior is the same as with an - :class:`.Mock` object: the wrapped object may have methods - defined as async function functions. - - Based on Martin Richard's asynctest project. - """ - - -class _ANY(object): - "A helper object that compares equal to everything." - - def __eq__(self, other): - return True - - def __ne__(self, other): - return False - - def __repr__(self): - return '' - -ANY = _ANY() - - - -def _format_call_signature(name, args, kwargs): - message = '%s(%%s)' % name - formatted_args = '' - args_string = ', '.join([repr(arg) for arg in args]) - kwargs_string = ', '.join([ - '%s=%r' % (key, value) for key, value in kwargs.items() - ]) - if args_string: - formatted_args = args_string - if kwargs_string: - if formatted_args: - formatted_args += ', ' - formatted_args += kwargs_string - - return message % formatted_args - - - -class _Call(tuple): - """ - A tuple for holding the results of a call to a mock, either in the form - `(args, kwargs)` or `(name, args, kwargs)`. - - If args or kwargs are empty then a call tuple will compare equal to - a tuple without those values. This makes comparisons less verbose:: - - _Call(('name', (), {})) == ('name',) - _Call(('name', (1,), {})) == ('name', (1,)) - _Call(((), {'a': 'b'})) == ({'a': 'b'},) - - The `_Call` object provides a useful shortcut for comparing with call:: - - _Call(((1, 2), {'a': 3})) == call(1, 2, a=3) - _Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3) - - If the _Call has no name then it will match any name. - """ - def __new__(cls, value=(), name='', parent=None, two=False, - from_kall=True): - args = () - kwargs = {} - _len = len(value) - if _len == 3: - name, args, kwargs = value - elif _len == 2: - first, second = value - if isinstance(first, str): - name = first - if isinstance(second, tuple): - args = second - else: - kwargs = second - else: - args, kwargs = first, second - elif _len == 1: - value, = value - if isinstance(value, str): - name = value - elif isinstance(value, tuple): - args = value - else: - kwargs = value - - if two: - return tuple.__new__(cls, (args, kwargs)) - - return tuple.__new__(cls, (name, args, kwargs)) - - - def __init__(self, value=(), name=None, parent=None, two=False, - from_kall=True): - self._mock_name = name - self._mock_parent = parent - self._mock_from_kall = from_kall - - - def __eq__(self, other): - try: - len_other = len(other) - except TypeError: - return NotImplemented - - self_name = '' - if len(self) == 2: - self_args, self_kwargs = self - else: - self_name, self_args, self_kwargs = self - - if (getattr(self, '_mock_parent', None) and getattr(other, '_mock_parent', None) - and self._mock_parent != other._mock_parent): - return False - - other_name = '' - if len_other == 0: - other_args, other_kwargs = (), {} - elif len_other == 3: - other_name, other_args, other_kwargs = other - elif len_other == 1: - value, = other - if isinstance(value, tuple): - other_args = value - other_kwargs = {} - elif isinstance(value, str): - other_name = value - other_args, other_kwargs = (), {} - else: - other_args = () - other_kwargs = value - elif len_other == 2: - # could be (name, args) or (name, kwargs) or (args, kwargs) - first, second = other - if isinstance(first, str): - other_name = first - if isinstance(second, tuple): - other_args, other_kwargs = second, {} - else: - other_args, other_kwargs = (), second - else: - other_args, other_kwargs = first, second - else: - return False - - if self_name and other_name != self_name: - return False - - # this order is important for ANY to work! - return (other_args, other_kwargs) == (self_args, self_kwargs) - - - __ne__ = object.__ne__ - - - def __call__(self, /, *args, **kwargs): - if self._mock_name is None: - return _Call(('', args, kwargs), name='()') - - name = self._mock_name + '()' - return _Call((self._mock_name, args, kwargs), name=name, parent=self) - - - def __getattr__(self, attr): - if self._mock_name is None: - return _Call(name=attr, from_kall=False) - name = '%s.%s' % (self._mock_name, attr) - return _Call(name=name, parent=self, from_kall=False) - - - def __getattribute__(self, attr): - if attr in tuple.__dict__: - raise AttributeError - return tuple.__getattribute__(self, attr) - - - def _get_call_arguments(self): - if len(self) == 2: - args, kwargs = self - else: - name, args, kwargs = self - - return args, kwargs - - @property - def args(self): - return self._get_call_arguments()[0] - - @property - def kwargs(self): - return self._get_call_arguments()[1] - - def __repr__(self): - if not self._mock_from_kall: - name = self._mock_name or 'call' - if name.startswith('()'): - name = 'call%s' % name - return name - - if len(self) == 2: - name = 'call' - args, kwargs = self - else: - name, args, kwargs = self - if not name: - name = 'call' - elif not name.startswith('()'): - name = 'call.%s' % name - else: - name = 'call%s' % name - return _format_call_signature(name, args, kwargs) - - - def call_list(self): - """For a call object that represents multiple calls, `call_list` - returns a list of all the intermediate calls as well as the - final call.""" - vals = [] - thing = self - while thing is not None: - if thing._mock_from_kall: - vals.append(thing) - thing = thing._mock_parent - return _CallList(reversed(vals)) - - -call = _Call(from_kall=False) - - -def create_autospec(spec, spec_set=False, instance=False, _parent=None, - _name=None, *, unsafe=False, **kwargs): - """Create a mock object using another object as a spec. Attributes on the - mock will use the corresponding attribute on the `spec` object as their - spec. - - Functions or methods being mocked will have their arguments checked - to check that they are called with the correct signature. - - If `spec_set` is True then attempting to set attributes that don't exist - on the spec object will raise an `AttributeError`. - - If a class is used as a spec then the return value of the mock (the - instance of the class) will have the same spec. You can use a class as the - spec for an instance object by passing `instance=True`. The returned mock - will only be callable if instances of the mock are callable. - - `create_autospec` will raise a `RuntimeError` if passed some common - misspellings of the arguments autospec and spec_set. Pass the argument - `unsafe` with the value True to disable that check. - - `create_autospec` also takes arbitrary keyword arguments that are passed to - the constructor of the created mock.""" - if _is_list(spec): - # can't pass a list instance to the mock constructor as it will be - # interpreted as a list of strings - spec = type(spec) - - is_type = isinstance(spec, type) - if _is_instance_mock(spec): - raise InvalidSpecError(f'Cannot autospec a Mock object. ' - f'[object={spec!r}]') - is_async_func = _is_async_func(spec) - _kwargs = {'spec': spec} - if spec_set: - _kwargs = {'spec_set': spec} - elif spec is None: - # None we mock with a normal mock without a spec - _kwargs = {} - if _kwargs and instance: - _kwargs['_spec_as_instance'] = True - if not unsafe: - _check_spec_arg_typos(kwargs) - - _name = kwargs.pop('name', _name) - _new_name = _name - if _parent is None: - # for a top level object no _new_name should be set - _new_name = '' - - _kwargs.update(kwargs) - - Klass = MagicMock - if inspect.isdatadescriptor(spec): - # descriptors don't have a spec - # because we don't know what type they return - _kwargs = {} - elif is_async_func: - if instance: - raise RuntimeError("Instance can not be True when create_autospec " - "is mocking an async function") - Klass = AsyncMock - elif not _callable(spec): - Klass = NonCallableMagicMock - elif is_type and instance and not _instance_callable(spec): - Klass = NonCallableMagicMock - - mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name, - name=_name, **_kwargs) - - if isinstance(spec, FunctionTypes): - # should only happen at the top level because we don't - # recurse for functions - if is_async_func: - mock = _set_async_signature(mock, spec) - else: - mock = _set_signature(mock, spec) - else: - _check_signature(spec, mock, is_type, instance) - - if _parent is not None and not instance: - _parent._mock_children[_name] = mock - - # Pop wraps from kwargs because it must not be passed to configure_mock. - wrapped = kwargs.pop('wraps', None) - if is_type and not instance and 'return_value' not in kwargs: - mock.return_value = create_autospec(spec, spec_set, instance=True, - _name='()', _parent=mock, - wraps=wrapped) - - for entry in dir(spec): - if _is_magic(entry): - # MagicMock already does the useful magic methods for us - continue - - # XXXX do we need a better way of getting attributes without - # triggering code execution (?) Probably not - we need the actual - # object to mock it so we would rather trigger a property than mock - # the property descriptor. Likewise we want to mock out dynamically - # provided attributes. - # XXXX what about attributes that raise exceptions other than - # AttributeError on being fetched? - # we could be resilient against it, or catch and propagate the - # exception when the attribute is fetched from the mock - try: - original = getattr(spec, entry) - except AttributeError: - continue - - child_kwargs = {'spec': original} - # Wrap child attributes also. - if wrapped and hasattr(wrapped, entry): - child_kwargs.update(wraps=original) - if spec_set: - child_kwargs = {'spec_set': original} - - if not isinstance(original, FunctionTypes): - new = _SpecState(original, spec_set, mock, entry, instance) - mock._mock_children[entry] = new - else: - parent = mock - if isinstance(spec, FunctionTypes): - parent = mock.mock - - skipfirst = _must_skip(spec, entry, is_type) - child_kwargs['_eat_self'] = skipfirst - if iscoroutinefunction(original): - child_klass = AsyncMock - else: - child_klass = MagicMock - new = child_klass(parent=parent, name=entry, _new_name=entry, - _new_parent=parent, **child_kwargs) - mock._mock_children[entry] = new - new.return_value = child_klass() - _check_signature(original, new, skipfirst=skipfirst) - - # so functions created with _set_signature become instance attributes, - # *plus* their underlying mock exists in _mock_children of the parent - # mock. Adding to _mock_children may be unnecessary where we are also - # setting as an instance attribute? - if isinstance(new, FunctionTypes): - setattr(mock, entry, new) - # kwargs are passed with respect to the parent mock so, they are not used - # for creating return_value of the parent mock. So, this condition - # should be true only for the parent mock if kwargs are given. - if _is_instance_mock(mock) and kwargs: - mock.configure_mock(**kwargs) - - return mock - - -def _must_skip(spec, entry, is_type): - """ - Return whether we should skip the first argument on spec's `entry` - attribute. - """ - if not isinstance(spec, type): - if entry in getattr(spec, '__dict__', {}): - # instance attribute - shouldn't skip - return False - spec = spec.__class__ - - for klass in spec.__mro__: - result = klass.__dict__.get(entry, DEFAULT) - if result is DEFAULT: - continue - if isinstance(result, (staticmethod, classmethod)): - return False - elif isinstance(result, FunctionTypes): - # Normal method => skip if looked up on type - # (if looked up on instance, self is already skipped) - return is_type - else: - return False - - # function is a dynamically provided attribute - return is_type - - -class _SpecState(object): - - def __init__(self, spec, spec_set=False, parent=None, - name=None, ids=None, instance=False): - self.spec = spec - self.ids = ids - self.spec_set = spec_set - self.parent = parent - self.instance = instance - self.name = name - - -FunctionTypes = ( - # python function - type(create_autospec), - # instance method - type(ANY.__eq__), -) - - -file_spec = None -open_spec = None - - -def _to_stream(read_data): - if isinstance(read_data, bytes): - return io.BytesIO(read_data) - else: - return io.StringIO(read_data) - - -def mock_open(mock=None, read_data=''): - """ - A helper function to create a mock to replace the use of `open`. It works - for `open` called directly or used as a context manager. - - The `mock` argument is the mock object to configure. If `None` (the - default) then a `MagicMock` will be created for you, with the API limited - to methods or attributes available on standard file handles. - - `read_data` is a string for the `read`, `readline` and `readlines` of the - file handle to return. This is an empty string by default. - """ - _read_data = _to_stream(read_data) - _state = [_read_data, None] - - def _readlines_side_effect(*args, **kwargs): - if handle.readlines.return_value is not None: - return handle.readlines.return_value - return _state[0].readlines(*args, **kwargs) - - def _read_side_effect(*args, **kwargs): - if handle.read.return_value is not None: - return handle.read.return_value - return _state[0].read(*args, **kwargs) - - def _readline_side_effect(*args, **kwargs): - yield from _iter_side_effect() - while True: - yield _state[0].readline(*args, **kwargs) - - def _iter_side_effect(): - if handle.readline.return_value is not None: - while True: - yield handle.readline.return_value - for line in _state[0]: - yield line - - def _next_side_effect(): - if handle.readline.return_value is not None: - return handle.readline.return_value - return next(_state[0]) - - def _exit_side_effect(exctype, excinst, exctb): - handle.close() - - global file_spec - if file_spec is None: - import _io - file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) - - global open_spec - if open_spec is None: - import _io - open_spec = list(set(dir(_io.open))) - if mock is None: - mock = MagicMock(name='open', spec=open_spec) - - handle = MagicMock(spec=file_spec) - handle.__enter__.return_value = handle - - handle.write.return_value = None - handle.read.return_value = None - handle.readline.return_value = None - handle.readlines.return_value = None - - handle.read.side_effect = _read_side_effect - _state[1] = _readline_side_effect() - handle.readline.side_effect = _state[1] - handle.readlines.side_effect = _readlines_side_effect - handle.__iter__.side_effect = _iter_side_effect - handle.__next__.side_effect = _next_side_effect - handle.__exit__.side_effect = _exit_side_effect - - def reset_data(*args, **kwargs): - _state[0] = _to_stream(read_data) - if handle.readline.side_effect == _state[1]: - # Only reset the side effect if the user hasn't overridden it. - _state[1] = _readline_side_effect() - handle.readline.side_effect = _state[1] - return DEFAULT - - mock.side_effect = reset_data - mock.return_value = handle - return mock - - -class PropertyMock(Mock): - """ - A mock intended to be used as a property, or other descriptor, on a class. - `PropertyMock` provides `__get__` and `__set__` methods so you can specify - a return value when it is fetched. - - Fetching a `PropertyMock` instance from an object calls the mock, with - no args. Setting it calls the mock with the value being set. - """ - def _get_child_mock(self, /, **kwargs): - return MagicMock(**kwargs) - - def __get__(self, obj, obj_type=None): - return self() - def __set__(self, obj, val): - self(val) - - -_timeout_unset = sentinel.TIMEOUT_UNSET - -class ThreadingMixin(Base): - - DEFAULT_TIMEOUT = None - - def _get_child_mock(self, /, **kw): - if isinstance(kw.get("parent"), ThreadingMixin): - kw["timeout"] = kw["parent"]._mock_wait_timeout - elif isinstance(kw.get("_new_parent"), ThreadingMixin): - kw["timeout"] = kw["_new_parent"]._mock_wait_timeout - return super()._get_child_mock(**kw) - - def __init__(self, *args, timeout=_timeout_unset, **kwargs): - super().__init__(*args, **kwargs) - if timeout is _timeout_unset: - timeout = self.DEFAULT_TIMEOUT - self.__dict__["_mock_event"] = threading.Event() # Event for any call - self.__dict__["_mock_calls_events"] = [] # Events for each of the calls - self.__dict__["_mock_calls_events_lock"] = threading.Lock() - self.__dict__["_mock_wait_timeout"] = timeout - - def reset_mock(self, /, *args, **kwargs): - """ - See :func:`.Mock.reset_mock()` - """ - super().reset_mock(*args, **kwargs) - self.__dict__["_mock_event"] = threading.Event() - self.__dict__["_mock_calls_events"] = [] - - def __get_event(self, expected_args, expected_kwargs): - with self._mock_calls_events_lock: - for args, kwargs, event in self._mock_calls_events: - if (args, kwargs) == (expected_args, expected_kwargs): - return event - new_event = threading.Event() - self._mock_calls_events.append((expected_args, expected_kwargs, new_event)) - return new_event - - def _mock_call(self, *args, **kwargs): - ret_value = super()._mock_call(*args, **kwargs) - - call_event = self.__get_event(args, kwargs) - call_event.set() - - self._mock_event.set() - - return ret_value - - def wait_until_called(self, *, timeout=_timeout_unset): - """Wait until the mock object is called. - - `timeout` - time to wait for in seconds, waits forever otherwise. - Defaults to the constructor provided timeout. - Use None to block undefinetively. - """ - if timeout is _timeout_unset: - timeout = self._mock_wait_timeout - if not self._mock_event.wait(timeout=timeout): - msg = (f"{self._mock_name or 'mock'} was not called before" - f" timeout({timeout}).") - raise AssertionError(msg) - - def wait_until_any_call_with(self, *args, **kwargs): - """Wait until the mock object is called with given args. - - Waits for the timeout in seconds provided in the constructor. - """ - event = self.__get_event(args, kwargs) - if not event.wait(timeout=self._mock_wait_timeout): - expected_string = self._format_mock_call_signature(args, kwargs) - raise AssertionError(f'{expected_string} call not found') - - -class ThreadingMock(ThreadingMixin, MagicMixin, Mock): - """ - A mock that can be used to wait until on calls happening - in a different thread. - - The constructor can take a `timeout` argument which - controls the timeout in seconds for all `wait` calls of the mock. - - You can change the default timeout of all instances via the - `ThreadingMock.DEFAULT_TIMEOUT` attribute. - - If no timeout is set, it will block undefinetively. - """ - pass - - -def seal(mock): - """Disable the automatic generation of child mocks. - - Given an input Mock, seals it to ensure no further mocks will be generated - when accessing an attribute that was not already defined. - - The operation recursively seals the mock passed in, meaning that - the mock itself, any mocks generated by accessing one of its attributes, - and all assigned mocks without a name or spec will be sealed. - """ - mock._mock_sealed = True - for attr in dir(mock): - try: - m = getattr(mock, attr) - except AttributeError: - continue - if not isinstance(m, NonCallableMock): - continue - if isinstance(m._mock_children.get(attr), _SpecState): - continue - if m._mock_new_parent is mock: - seal(m) - - -class _AsyncIterator: - """ - Wraps an iterator in an asynchronous iterator. - """ - def __init__(self, iterator): - self.iterator = iterator - code_mock = NonCallableMock(spec_set=CodeType) - code_mock.co_flags = inspect.CO_ITERABLE_COROUTINE - self.__dict__['__code__'] = code_mock - - async def __anext__(self): - try: - return next(self.iterator) - except StopIteration: - pass - raise StopAsyncIteration diff --git a/Python313_13_x64_Template/Lib/unittest/result.py b/Python313_13_x64_Template/Lib/unittest/result.py deleted file mode 100644 index 3ace0a5b..00000000 --- a/Python313_13_x64_Template/Lib/unittest/result.py +++ /dev/null @@ -1,256 +0,0 @@ -"""Test result object""" - -import io -import sys -import traceback - -from . import util -from functools import wraps - -__unittest = True - -def failfast(method): - @wraps(method) - def inner(self, *args, **kw): - if getattr(self, 'failfast', False): - self.stop() - return method(self, *args, **kw) - return inner - -STDOUT_LINE = '\nStdout:\n%s' -STDERR_LINE = '\nStderr:\n%s' - - -class TestResult(object): - """Holder for test result information. - - Test results are automatically managed by the TestCase and TestSuite - classes, and do not need to be explicitly manipulated by writers of tests. - - Each instance holds the total number of tests run, and collections of - failures and errors that occurred among those test runs. The collections - contain tuples of (testcase, exceptioninfo), where exceptioninfo is the - formatted traceback of the error that occurred. - """ - _previousTestClass = None - _testRunEntered = False - _moduleSetUpFailed = False - def __init__(self, stream=None, descriptions=None, verbosity=None): - self.failfast = False - self.failures = [] - self.errors = [] - self.testsRun = 0 - self.skipped = [] - self.expectedFailures = [] - self.unexpectedSuccesses = [] - self.collectedDurations = [] - self.shouldStop = False - self.buffer = False - self.tb_locals = False - self._stdout_buffer = None - self._stderr_buffer = None - self._original_stdout = sys.stdout - self._original_stderr = sys.stderr - self._mirrorOutput = False - - def printErrors(self): - "Called by TestRunner after test run" - - def startTest(self, test): - "Called when the given test is about to be run" - self.testsRun += 1 - self._mirrorOutput = False - self._setupStdout() - - def _setupStdout(self): - if self.buffer: - if self._stderr_buffer is None: - self._stderr_buffer = io.StringIO() - self._stdout_buffer = io.StringIO() - sys.stdout = self._stdout_buffer - sys.stderr = self._stderr_buffer - - def startTestRun(self): - """Called once before any tests are executed. - - See startTest for a method called before each test. - """ - - def stopTest(self, test): - """Called when the given test has been run""" - self._restoreStdout() - self._mirrorOutput = False - - def _restoreStdout(self): - if self.buffer: - if self._mirrorOutput: - output = sys.stdout.getvalue() - error = sys.stderr.getvalue() - if output: - if not output.endswith('\n'): - output += '\n' - self._original_stdout.write(STDOUT_LINE % output) - if error: - if not error.endswith('\n'): - error += '\n' - self._original_stderr.write(STDERR_LINE % error) - - sys.stdout = self._original_stdout - sys.stderr = self._original_stderr - self._stdout_buffer.seek(0) - self._stdout_buffer.truncate() - self._stderr_buffer.seek(0) - self._stderr_buffer.truncate() - - def stopTestRun(self): - """Called once after all tests are executed. - - See stopTest for a method called after each test. - """ - - @failfast - def addError(self, test, err): - """Called when an error has occurred. 'err' is a tuple of values as - returned by sys.exc_info(). - """ - self.errors.append((test, self._exc_info_to_string(err, test))) - self._mirrorOutput = True - - @failfast - def addFailure(self, test, err): - """Called when an error has occurred. 'err' is a tuple of values as - returned by sys.exc_info().""" - self.failures.append((test, self._exc_info_to_string(err, test))) - self._mirrorOutput = True - - def addSubTest(self, test, subtest, err): - """Called at the end of a subtest. - 'err' is None if the subtest ended successfully, otherwise it's a - tuple of values as returned by sys.exc_info(). - """ - # By default, we don't do anything with successful subtests, but - # more sophisticated test results might want to record them. - if err is not None: - if getattr(self, 'failfast', False): - self.stop() - if issubclass(err[0], test.failureException): - errors = self.failures - else: - errors = self.errors - errors.append((subtest, self._exc_info_to_string(err, test))) - self._mirrorOutput = True - - def addSuccess(self, test): - "Called when a test has completed successfully" - pass - - def addSkip(self, test, reason): - """Called when a test is skipped.""" - self.skipped.append((test, reason)) - - def addExpectedFailure(self, test, err): - """Called when an expected failure/error occurred.""" - self.expectedFailures.append( - (test, self._exc_info_to_string(err, test))) - - @failfast - def addUnexpectedSuccess(self, test): - """Called when a test was expected to fail, but succeed.""" - self.unexpectedSuccesses.append(test) - - def addDuration(self, test, elapsed): - """Called when a test finished to run, regardless of its outcome. - *test* is the test case corresponding to the test method. - *elapsed* is the time represented in seconds, and it includes the - execution of cleanup functions. - """ - # support for a TextTestRunner using an old TestResult class - if hasattr(self, "collectedDurations"): - # Pass test repr and not the test object itself to avoid resources leak - self.collectedDurations.append((str(test), elapsed)) - - def wasSuccessful(self): - """Tells whether or not this result was a success.""" - # The hasattr check is for test_result's OldResult test. That - # way this method works on objects that lack the attribute. - # (where would such result instances come from? old stored pickles?) - return ((len(self.failures) == len(self.errors) == 0) and - (not hasattr(self, 'unexpectedSuccesses') or - len(self.unexpectedSuccesses) == 0)) - - def stop(self): - """Indicates that the tests should be aborted.""" - self.shouldStop = True - - def _exc_info_to_string(self, err, test): - """Converts a sys.exc_info()-style tuple of values into a string.""" - exctype, value, tb = err - tb = self._clean_tracebacks(exctype, value, tb, test) - tb_e = traceback.TracebackException( - exctype, value, tb, - capture_locals=self.tb_locals, compact=True) - msgLines = list(tb_e.format()) - - if self.buffer: - output = sys.stdout.getvalue() - error = sys.stderr.getvalue() - if output: - if not output.endswith('\n'): - output += '\n' - msgLines.append(STDOUT_LINE % output) - if error: - if not error.endswith('\n'): - error += '\n' - msgLines.append(STDERR_LINE % error) - return ''.join(msgLines) - - def _clean_tracebacks(self, exctype, value, tb, test): - ret = None - first = True - excs = [(exctype, value, tb)] - seen = {id(value)} # Detect loops in chained exceptions. - while excs: - (exctype, value, tb) = excs.pop() - # Skip test runner traceback levels - while tb and self._is_relevant_tb_level(tb): - tb = tb.tb_next - - # Skip assert*() traceback levels - if exctype is test.failureException: - self._remove_unittest_tb_frames(tb) - - if first: - ret = tb - first = False - else: - value.__traceback__ = tb - - if value is not None: - for c in (value.__cause__, value.__context__): - if c is not None and id(c) not in seen: - excs.append((type(c), c, c.__traceback__)) - seen.add(id(c)) - return ret - - def _is_relevant_tb_level(self, tb): - return '__unittest' in tb.tb_frame.f_globals - - def _remove_unittest_tb_frames(self, tb): - '''Truncates usercode tb at the first unittest frame. - - If the first frame of the traceback is in user code, - the prefix up to the first unittest frame is returned. - If the first frame is already in the unittest module, - the traceback is not modified. - ''' - prev = None - while tb and not self._is_relevant_tb_level(tb): - prev = tb - tb = tb.tb_next - if prev is not None: - prev.tb_next = None - - def __repr__(self): - return ("<%s run=%i errors=%i failures=%i>" % - (util.strclass(self.__class__), self.testsRun, len(self.errors), - len(self.failures))) diff --git a/Python313_13_x64_Template/Lib/unittest/runner.py b/Python313_13_x64_Template/Lib/unittest/runner.py deleted file mode 100644 index 2bcadf0c..00000000 --- a/Python313_13_x64_Template/Lib/unittest/runner.py +++ /dev/null @@ -1,292 +0,0 @@ -"""Running tests""" - -import sys -import time -import warnings - -from . import result -from .case import _SubTest -from .signals import registerResult - -__unittest = True - - -class _WritelnDecorator(object): - """Used to decorate file-like objects with a handy 'writeln' method""" - def __init__(self,stream): - self.stream = stream - - def __getattr__(self, attr): - if attr in ('stream', '__getstate__'): - raise AttributeError(attr) - return getattr(self.stream,attr) - - def writeln(self, arg=None): - if arg: - self.write(arg) - self.write('\n') # text-mode streams translate to \r\n if needed - - -class TextTestResult(result.TestResult): - """A test result class that can print formatted text results to a stream. - - Used by TextTestRunner. - """ - separator1 = '=' * 70 - separator2 = '-' * 70 - - def __init__(self, stream, descriptions, verbosity, *, durations=None): - """Construct a TextTestResult. Subclasses should accept **kwargs - to ensure compatibility as the interface changes.""" - super(TextTestResult, self).__init__(stream, descriptions, verbosity) - self.stream = stream - self.showAll = verbosity > 1 - self.dots = verbosity == 1 - self.descriptions = descriptions - self._newline = True - self.durations = durations - - def getDescription(self, test): - doc_first_line = test.shortDescription() - if self.descriptions and doc_first_line: - return '\n'.join((str(test), doc_first_line)) - else: - return str(test) - - def startTest(self, test): - super(TextTestResult, self).startTest(test) - if self.showAll: - self.stream.write(self.getDescription(test)) - self.stream.write(" ... ") - self.stream.flush() - self._newline = False - - def _write_status(self, test, status): - is_subtest = isinstance(test, _SubTest) - if is_subtest or self._newline: - if not self._newline: - self.stream.writeln() - if is_subtest: - self.stream.write(" ") - self.stream.write(self.getDescription(test)) - self.stream.write(" ... ") - self.stream.writeln(status) - self.stream.flush() - self._newline = True - - def addSubTest(self, test, subtest, err): - if err is not None: - if self.showAll: - if issubclass(err[0], subtest.failureException): - self._write_status(subtest, "FAIL") - else: - self._write_status(subtest, "ERROR") - elif self.dots: - if issubclass(err[0], subtest.failureException): - self.stream.write('F') - else: - self.stream.write('E') - self.stream.flush() - super(TextTestResult, self).addSubTest(test, subtest, err) - - def addSuccess(self, test): - super(TextTestResult, self).addSuccess(test) - if self.showAll: - self._write_status(test, "ok") - elif self.dots: - self.stream.write('.') - self.stream.flush() - - def addError(self, test, err): - super(TextTestResult, self).addError(test, err) - if self.showAll: - self._write_status(test, "ERROR") - elif self.dots: - self.stream.write('E') - self.stream.flush() - - def addFailure(self, test, err): - super(TextTestResult, self).addFailure(test, err) - if self.showAll: - self._write_status(test, "FAIL") - elif self.dots: - self.stream.write('F') - self.stream.flush() - - def addSkip(self, test, reason): - super(TextTestResult, self).addSkip(test, reason) - if self.showAll: - self._write_status(test, "skipped {0!r}".format(reason)) - elif self.dots: - self.stream.write("s") - self.stream.flush() - - def addExpectedFailure(self, test, err): - super(TextTestResult, self).addExpectedFailure(test, err) - if self.showAll: - self.stream.writeln("expected failure") - self.stream.flush() - elif self.dots: - self.stream.write("x") - self.stream.flush() - - def addUnexpectedSuccess(self, test): - super(TextTestResult, self).addUnexpectedSuccess(test) - if self.showAll: - self.stream.writeln("unexpected success") - self.stream.flush() - elif self.dots: - self.stream.write("u") - self.stream.flush() - - def printErrors(self): - if self.dots or self.showAll: - self.stream.writeln() - self.stream.flush() - self.printErrorList('ERROR', self.errors) - self.printErrorList('FAIL', self.failures) - unexpectedSuccesses = getattr(self, 'unexpectedSuccesses', ()) - if unexpectedSuccesses: - self.stream.writeln(self.separator1) - for test in unexpectedSuccesses: - self.stream.writeln(f"UNEXPECTED SUCCESS: {self.getDescription(test)}") - self.stream.flush() - - def printErrorList(self, flavour, errors): - for test, err in errors: - self.stream.writeln(self.separator1) - self.stream.writeln("%s: %s" % (flavour,self.getDescription(test))) - self.stream.writeln(self.separator2) - self.stream.writeln("%s" % err) - self.stream.flush() - - -class TextTestRunner(object): - """A test runner class that displays results in textual form. - - It prints out the names of tests as they are run, errors as they - occur, and a summary of the results at the end of the test run. - """ - resultclass = TextTestResult - - def __init__(self, stream=None, descriptions=True, verbosity=1, - failfast=False, buffer=False, resultclass=None, warnings=None, - *, tb_locals=False, durations=None): - """Construct a TextTestRunner. - - Subclasses should accept **kwargs to ensure compatibility as the - interface changes. - """ - if stream is None: - stream = sys.stderr - self.stream = _WritelnDecorator(stream) - self.descriptions = descriptions - self.verbosity = verbosity - self.failfast = failfast - self.buffer = buffer - self.tb_locals = tb_locals - self.durations = durations - self.warnings = warnings - if resultclass is not None: - self.resultclass = resultclass - - def _makeResult(self): - try: - return self.resultclass(self.stream, self.descriptions, - self.verbosity, durations=self.durations) - except TypeError: - # didn't accept the durations argument - return self.resultclass(self.stream, self.descriptions, - self.verbosity) - - def _printDurations(self, result): - if not result.collectedDurations: - return - ls = sorted(result.collectedDurations, key=lambda x: x[1], - reverse=True) - if self.durations > 0: - ls = ls[:self.durations] - self.stream.writeln("Slowest test durations") - if hasattr(result, 'separator2'): - self.stream.writeln(result.separator2) - hidden = False - for test, elapsed in ls: - if self.verbosity < 2 and elapsed < 0.001: - hidden = True - continue - self.stream.writeln("%-10s %s" % ("%.3fs" % elapsed, test)) - if hidden: - self.stream.writeln("\n(durations < 0.001s were hidden; " - "use -v to show these durations)") - else: - self.stream.writeln("") - - def run(self, test): - "Run the given test case or test suite." - result = self._makeResult() - registerResult(result) - result.failfast = self.failfast - result.buffer = self.buffer - result.tb_locals = self.tb_locals - with warnings.catch_warnings(): - if self.warnings: - # if self.warnings is set, use it to filter all the warnings - warnings.simplefilter(self.warnings) - startTime = time.perf_counter() - startTestRun = getattr(result, 'startTestRun', None) - if startTestRun is not None: - startTestRun() - try: - test(result) - finally: - stopTestRun = getattr(result, 'stopTestRun', None) - if stopTestRun is not None: - stopTestRun() - stopTime = time.perf_counter() - timeTaken = stopTime - startTime - result.printErrors() - if self.durations is not None: - self._printDurations(result) - - if hasattr(result, 'separator2'): - self.stream.writeln(result.separator2) - - run = result.testsRun - self.stream.writeln("Ran %d test%s in %.3fs" % - (run, run != 1 and "s" or "", timeTaken)) - self.stream.writeln() - - expectedFails = unexpectedSuccesses = skipped = 0 - try: - results = map(len, (result.expectedFailures, - result.unexpectedSuccesses, - result.skipped)) - except AttributeError: - pass - else: - expectedFails, unexpectedSuccesses, skipped = results - - infos = [] - if not result.wasSuccessful(): - self.stream.write("FAILED") - failed, errored = len(result.failures), len(result.errors) - if failed: - infos.append("failures=%d" % failed) - if errored: - infos.append("errors=%d" % errored) - elif run == 0 and not skipped: - self.stream.write("NO TESTS RAN") - else: - self.stream.write("OK") - if skipped: - infos.append("skipped=%d" % skipped) - if expectedFails: - infos.append("expected failures=%d" % expectedFails) - if unexpectedSuccesses: - infos.append("unexpected successes=%d" % unexpectedSuccesses) - if infos: - self.stream.writeln(" (%s)" % (", ".join(infos),)) - else: - self.stream.write("\n") - self.stream.flush() - return result diff --git a/Python313_13_x64_Template/Lib/urllib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/urllib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 9fb4e40d..00000000 Binary files a/Python313_13_x64_Template/Lib/urllib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/urllib/__pycache__/error.cpython-313.pyc b/Python313_13_x64_Template/Lib/urllib/__pycache__/error.cpython-313.pyc deleted file mode 100644 index 4577b77f..00000000 Binary files a/Python313_13_x64_Template/Lib/urllib/__pycache__/error.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/urllib/__pycache__/parse.cpython-313.pyc b/Python313_13_x64_Template/Lib/urllib/__pycache__/parse.cpython-313.pyc deleted file mode 100644 index 5886cffd..00000000 Binary files a/Python313_13_x64_Template/Lib/urllib/__pycache__/parse.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/urllib/__pycache__/request.cpython-313.pyc b/Python313_13_x64_Template/Lib/urllib/__pycache__/request.cpython-313.pyc deleted file mode 100644 index 5f1f6c64..00000000 Binary files a/Python313_13_x64_Template/Lib/urllib/__pycache__/request.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/urllib/__pycache__/response.cpython-313.pyc b/Python313_13_x64_Template/Lib/urllib/__pycache__/response.cpython-313.pyc deleted file mode 100644 index 2a77b186..00000000 Binary files a/Python313_13_x64_Template/Lib/urllib/__pycache__/response.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/urllib/parse.py b/Python313_13_x64_Template/Lib/urllib/parse.py deleted file mode 100644 index 14f66c5a..00000000 --- a/Python313_13_x64_Template/Lib/urllib/parse.py +++ /dev/null @@ -1,1264 +0,0 @@ -"""Parse (absolute and relative) URLs. - -urllib.parse module is based upon the following RFC specifications. - -RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding -and L. Masinter, January 2005. - -RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter -and L.Masinter, December 1999. - -RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. -Berners-Lee, R. Fielding, and L. Masinter, August 1998. - -RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. - -RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June -1995. - -RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. -McCahill, December 1994 - -RFC 3986 is considered the current standard and any future changes to -urllib.parse module should conform with it. The urllib.parse module is -currently not entirely compliant with this RFC due to defacto -scenarios for parsing, and for backward compatibility purposes, some -parsing quirks from older RFCs are retained. The testcases in -test_urlparse.py provides a good indicator of parsing behavior. - -The WHATWG URL Parser spec should also be considered. We are not compliant with -it either due to existing user code API behavior expectations (Hyrum's Law). -It serves as a useful guide when making changes. -""" - -from collections import namedtuple -import functools -import math -import re -import types -import warnings -import ipaddress - -__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", - "urlsplit", "urlunsplit", "urlencode", "parse_qs", - "parse_qsl", "quote", "quote_plus", "quote_from_bytes", - "unquote", "unquote_plus", "unquote_to_bytes", - "DefragResult", "ParseResult", "SplitResult", - "DefragResultBytes", "ParseResultBytes", "SplitResultBytes"] - -# A classification of schemes. -# The empty string classifies URLs with no scheme specified, -# being the default value returned by “urlsplit” and “urlparse”. - -uses_relative = ['', 'ftp', 'http', 'gopher', 'nntp', 'imap', - 'wais', 'file', 'https', 'shttp', 'mms', - 'prospero', 'rtsp', 'rtsps', 'rtspu', 'sftp', - 'svn', 'svn+ssh', 'ws', 'wss'] - -uses_netloc = ['', 'ftp', 'http', 'gopher', 'nntp', 'telnet', - 'imap', 'wais', 'file', 'mms', 'https', 'shttp', - 'snews', 'prospero', 'rtsp', 'rtsps', 'rtspu', 'rsync', - 'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh', - 'ws', 'wss', 'itms-services'] - -uses_params = ['', 'ftp', 'hdl', 'prospero', 'http', 'imap', - 'https', 'shttp', 'rtsp', 'rtsps', 'rtspu', 'sip', - 'sips', 'mms', 'sftp', 'tel'] - -# These are not actually used anymore, but should stay for backwards -# compatibility. (They are undocumented, but have a public-looking name.) - -non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', - 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] - -uses_query = ['', 'http', 'wais', 'imap', 'https', 'shttp', 'mms', - 'gopher', 'rtsp', 'rtsps', 'rtspu', 'sip', 'sips'] - -uses_fragment = ['', 'ftp', 'hdl', 'http', 'gopher', 'news', - 'nntp', 'wais', 'https', 'shttp', 'snews', - 'file', 'prospero'] - -# Characters valid in scheme names -scheme_chars = ('abcdefghijklmnopqrstuvwxyz' - 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' - '0123456789' - '+-.') - -# Leading and trailing C0 control and space to be stripped per WHATWG spec. -# == "".join([chr(i) for i in range(0, 0x20 + 1)]) -_WHATWG_C0_CONTROL_OR_SPACE = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f ' - -# Unsafe bytes to be removed per WHATWG spec -_UNSAFE_URL_BYTES_TO_REMOVE = ['\t', '\r', '\n'] - -def clear_cache(): - """Clear internal performance caches. Undocumented; some tests want it.""" - urlsplit.cache_clear() - _byte_quoter_factory.cache_clear() - -# Helpers for bytes handling -# For 3.2, we deliberately require applications that -# handle improperly quoted URLs to do their own -# decoding and encoding. If valid use cases are -# presented, we may relax this by using latin-1 -# decoding internally for 3.3 -_implicit_encoding = 'ascii' -_implicit_errors = 'strict' - -def _noop(obj): - return obj - -def _encode_result(obj, encoding=_implicit_encoding, - errors=_implicit_errors): - return obj.encode(encoding, errors) - -def _decode_args(args, encoding=_implicit_encoding, - errors=_implicit_errors): - return tuple(x.decode(encoding, errors) if x else '' for x in args) - -def _coerce_args(*args): - # Invokes decode if necessary to create str args - # and returns the coerced inputs along with - # an appropriate result coercion function - # - noop for str inputs - # - encoding function otherwise - str_input = isinstance(args[0], str) - for arg in args[1:]: - # We special-case the empty string to support the - # "scheme=''" default argument to some functions - if arg and isinstance(arg, str) != str_input: - raise TypeError("Cannot mix str and non-str arguments") - if str_input: - return args + (_noop,) - return _decode_args(args) + (_encode_result,) - -# Result objects are more helpful than simple tuples -class _ResultMixinStr(object): - """Standard approach to encoding parsed results from str to bytes""" - __slots__ = () - - def encode(self, encoding='ascii', errors='strict'): - return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self)) - - -class _ResultMixinBytes(object): - """Standard approach to decoding parsed results from bytes to str""" - __slots__ = () - - def decode(self, encoding='ascii', errors='strict'): - return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self)) - - -class _NetlocResultMixinBase(object): - """Shared methods for the parsed result objects containing a netloc element""" - __slots__ = () - - @property - def username(self): - return self._userinfo[0] - - @property - def password(self): - return self._userinfo[1] - - @property - def hostname(self): - hostname = self._hostinfo[0] - if not hostname: - return None - # Scoped IPv6 address may have zone info, which must not be lowercased - # like http://[fe80::822a:a8ff:fe49:470c%tESt]:1234/keys - separator = '%' if isinstance(hostname, str) else b'%' - hostname, percent, zone = hostname.partition(separator) - return hostname.lower() + percent + zone - - @property - def port(self): - port = self._hostinfo[1] - if port is not None: - if port.isdigit() and port.isascii(): - port = int(port) - else: - raise ValueError(f"Port could not be cast to integer value as {port!r}") - if not (0 <= port <= 65535): - raise ValueError("Port out of range 0-65535") - return port - - __class_getitem__ = classmethod(types.GenericAlias) - - -class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr): - __slots__ = () - - @property - def _userinfo(self): - netloc = self.netloc - userinfo, have_info, hostinfo = netloc.rpartition('@') - if have_info: - username, have_password, password = userinfo.partition(':') - if not have_password: - password = None - else: - username = password = None - return username, password - - @property - def _hostinfo(self): - netloc = self.netloc - _, _, hostinfo = netloc.rpartition('@') - _, have_open_br, bracketed = hostinfo.partition('[') - if have_open_br: - hostname, _, port = bracketed.partition(']') - _, _, port = port.partition(':') - else: - hostname, _, port = hostinfo.partition(':') - if not port: - port = None - return hostname, port - - -class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes): - __slots__ = () - - @property - def _userinfo(self): - netloc = self.netloc - userinfo, have_info, hostinfo = netloc.rpartition(b'@') - if have_info: - username, have_password, password = userinfo.partition(b':') - if not have_password: - password = None - else: - username = password = None - return username, password - - @property - def _hostinfo(self): - netloc = self.netloc - _, _, hostinfo = netloc.rpartition(b'@') - _, have_open_br, bracketed = hostinfo.partition(b'[') - if have_open_br: - hostname, _, port = bracketed.partition(b']') - _, _, port = port.partition(b':') - else: - hostname, _, port = hostinfo.partition(b':') - if not port: - port = None - return hostname, port - - -_DefragResultBase = namedtuple('DefragResult', 'url fragment') -_SplitResultBase = namedtuple( - 'SplitResult', 'scheme netloc path query fragment') -_ParseResultBase = namedtuple( - 'ParseResult', 'scheme netloc path params query fragment') - -_DefragResultBase.__doc__ = """ -DefragResult(url, fragment) - -A 2-tuple that contains the url without fragment identifier and the fragment -identifier as a separate argument. -""" - -_DefragResultBase.url.__doc__ = """The URL with no fragment identifier.""" - -_DefragResultBase.fragment.__doc__ = """ -Fragment identifier separated from URL, that allows indirect identification of a -secondary resource by reference to a primary resource and additional identifying -information. -""" - -_SplitResultBase.__doc__ = """ -SplitResult(scheme, netloc, path, query, fragment) - -A 5-tuple that contains the different components of a URL. Similar to -ParseResult, but does not split params. -""" - -_SplitResultBase.scheme.__doc__ = """Specifies URL scheme for the request.""" - -_SplitResultBase.netloc.__doc__ = """ -Network location where the request is made to. -""" - -_SplitResultBase.path.__doc__ = """ -The hierarchical path, such as the path to a file to download. -""" - -_SplitResultBase.query.__doc__ = """ -The query component, that contains non-hierarchical data, that along with data -in path component, identifies a resource in the scope of URI's scheme and -network location. -""" - -_SplitResultBase.fragment.__doc__ = """ -Fragment identifier, that allows indirect identification of a secondary resource -by reference to a primary resource and additional identifying information. -""" - -_ParseResultBase.__doc__ = """ -ParseResult(scheme, netloc, path, params, query, fragment) - -A 6-tuple that contains components of a parsed URL. -""" - -_ParseResultBase.scheme.__doc__ = _SplitResultBase.scheme.__doc__ -_ParseResultBase.netloc.__doc__ = _SplitResultBase.netloc.__doc__ -_ParseResultBase.path.__doc__ = _SplitResultBase.path.__doc__ -_ParseResultBase.params.__doc__ = """ -Parameters for last path element used to dereference the URI in order to provide -access to perform some operation on the resource. -""" - -_ParseResultBase.query.__doc__ = _SplitResultBase.query.__doc__ -_ParseResultBase.fragment.__doc__ = _SplitResultBase.fragment.__doc__ - - -# For backwards compatibility, alias _NetlocResultMixinStr -# ResultBase is no longer part of the documented API, but it is -# retained since deprecating it isn't worth the hassle -ResultBase = _NetlocResultMixinStr - -# Structured result objects for string data -class DefragResult(_DefragResultBase, _ResultMixinStr): - __slots__ = () - def geturl(self): - if self.fragment: - return self.url + '#' + self.fragment - else: - return self.url - -class SplitResult(_SplitResultBase, _NetlocResultMixinStr): - __slots__ = () - def geturl(self): - return urlunsplit(self) - -class ParseResult(_ParseResultBase, _NetlocResultMixinStr): - __slots__ = () - def geturl(self): - return urlunparse(self) - -# Structured result objects for bytes data -class DefragResultBytes(_DefragResultBase, _ResultMixinBytes): - __slots__ = () - def geturl(self): - if self.fragment: - return self.url + b'#' + self.fragment - else: - return self.url - -class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes): - __slots__ = () - def geturl(self): - return urlunsplit(self) - -class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes): - __slots__ = () - def geturl(self): - return urlunparse(self) - -# Set up the encode/decode result pairs -def _fix_result_transcoding(): - _result_pairs = ( - (DefragResult, DefragResultBytes), - (SplitResult, SplitResultBytes), - (ParseResult, ParseResultBytes), - ) - for _decoded, _encoded in _result_pairs: - _decoded._encoded_counterpart = _encoded - _encoded._decoded_counterpart = _decoded - -_fix_result_transcoding() -del _fix_result_transcoding - -def urlparse(url, scheme='', allow_fragments=True): - """Parse a URL into 6 components: - :///;?# - - The result is a named 6-tuple with fields corresponding to the - above. It is either a ParseResult or ParseResultBytes object, - depending on the type of the url parameter. - - The username, password, hostname, and port sub-components of netloc - can also be accessed as attributes of the returned object. - - The scheme argument provides the default value of the scheme - component when no scheme is found in url. - - If allow_fragments is False, no attempt is made to separate the - fragment component from the previous component, which can be either - path or query. - - Note that % escapes are not expanded. - - urlsplit() should generally be used instead of urlparse(). - """ - url, scheme, _coerce_result = _coerce_args(url, scheme) - splitresult = urlsplit(url, scheme, allow_fragments) - scheme, netloc, url, query, fragment = splitresult - if scheme in uses_params and ';' in url: - url, params = _splitparams(url) - else: - params = '' - result = ParseResult(scheme, netloc, url, params, query, fragment) - return _coerce_result(result) - -def _splitparams(url): - if '/' in url: - i = url.find(';', url.rfind('/')) - if i < 0: - return url, '' - else: - i = url.find(';') - return url[:i], url[i+1:] - -def _splitnetloc(url, start=0): - delim = len(url) # position of end of domain part of url, default is end - for c in '/?#': # look for delimiters; the order is NOT important - wdelim = url.find(c, start) # find first of this delim - if wdelim >= 0: # if found - delim = min(delim, wdelim) # use earliest delim position - return url[start:delim], url[delim:] # return (domain, rest) - -def _checknetloc(netloc): - if not netloc or netloc.isascii(): - return - # looking for characters like \u2100 that expand to 'a/c' - # IDNA uses NFKC equivalence, so normalize for this check - import unicodedata - n = netloc.replace('@', '') # ignore characters already included - n = n.replace(':', '') # but not the surrounding text - n = n.replace('#', '') - n = n.replace('?', '') - netloc2 = unicodedata.normalize('NFKC', n) - if n == netloc2: - return - for c in '/?#@:': - if c in netloc2: - raise ValueError("netloc '" + netloc + "' contains invalid " + - "characters under NFKC normalization") - -def _check_bracketed_netloc(netloc): - # Note that this function must mirror the splitting - # done in NetlocResultMixins._hostinfo(). - hostname_and_port = netloc.rpartition('@')[2] - before_bracket, have_open_br, bracketed = hostname_and_port.partition('[') - if have_open_br: - # No data is allowed before a bracket. - if before_bracket: - raise ValueError("Invalid IPv6 URL") - hostname, _, port = bracketed.partition(']') - # No data is allowed after the bracket but before the port delimiter. - if port and not port.startswith(":"): - raise ValueError("Invalid IPv6 URL") - else: - hostname, _, port = hostname_and_port.partition(':') - _check_bracketed_host(hostname) - -# Valid bracketed hosts are defined in -# https://www.rfc-editor.org/rfc/rfc3986#page-49 and https://url.spec.whatwg.org/ -def _check_bracketed_host(hostname): - if hostname.startswith('v'): - if not re.match(r"\Av[a-fA-F0-9]+\..+\Z", hostname): - raise ValueError(f"IPvFuture address is invalid") - else: - ip = ipaddress.ip_address(hostname) # Throws Value Error if not IPv6 or IPv4 - if isinstance(ip, ipaddress.IPv4Address): - raise ValueError(f"An IPv4 address cannot be in brackets") - -# typed=True avoids BytesWarnings being emitted during cache key -# comparison since this API supports both bytes and str input. -@functools.lru_cache(typed=True) -def urlsplit(url, scheme='', allow_fragments=True): - """Parse a URL into 5 components: - :///?# - - The result is a named 5-tuple with fields corresponding to the - above. It is either a SplitResult or SplitResultBytes object, - depending on the type of the url parameter. - - The username, password, hostname, and port sub-components of netloc - can also be accessed as attributes of the returned object. - - The scheme argument provides the default value of the scheme - component when no scheme is found in url. - - If allow_fragments is False, no attempt is made to separate the - fragment component from the previous component, which can be either - path or query. - - Note that % escapes are not expanded. - """ - - url, scheme, _coerce_result = _coerce_args(url, scheme) - # Only lstrip url as some applications rely on preserving trailing space. - # (https://url.spec.whatwg.org/#concept-basic-url-parser would strip both) - url = url.lstrip(_WHATWG_C0_CONTROL_OR_SPACE) - scheme = scheme.strip(_WHATWG_C0_CONTROL_OR_SPACE) - - for b in _UNSAFE_URL_BYTES_TO_REMOVE: - url = url.replace(b, "") - scheme = scheme.replace(b, "") - - allow_fragments = bool(allow_fragments) - netloc = query = fragment = '' - i = url.find(':') - if i > 0 and url[0].isascii() and url[0].isalpha(): - for c in url[:i]: - if c not in scheme_chars: - break - else: - scheme, url = url[:i].lower(), url[i+1:] - if url[:2] == '//': - netloc, url = _splitnetloc(url, 2) - if (('[' in netloc and ']' not in netloc) or - (']' in netloc and '[' not in netloc)): - raise ValueError("Invalid IPv6 URL") - if '[' in netloc and ']' in netloc: - _check_bracketed_netloc(netloc) - if allow_fragments and '#' in url: - url, fragment = url.split('#', 1) - if '?' in url: - url, query = url.split('?', 1) - _checknetloc(netloc) - v = SplitResult(scheme, netloc, url, query, fragment) - return _coerce_result(v) - -def urlunparse(components): - """Put a parsed URL back together again. This may result in a - slightly different, but equivalent URL, if the URL that was parsed - originally had redundant delimiters, e.g. a ? with an empty query - (the draft states that these are equivalent).""" - scheme, netloc, url, params, query, fragment, _coerce_result = ( - _coerce_args(*components)) - if params: - url = "%s;%s" % (url, params) - return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment))) - -def urlunsplit(components): - """Combine the elements of a tuple as returned by urlsplit() into a - complete URL as a string. The data argument can be any five-item iterable. - This may result in a slightly different, but equivalent URL, if the URL that - was parsed originally had unnecessary delimiters (for example, a ? with an - empty query; the RFC states that these are equivalent).""" - scheme, netloc, url, query, fragment, _coerce_result = ( - _coerce_args(*components)) - if netloc: - if url and url[:1] != '/': url = '/' + url - url = '//' + netloc + url - elif url[:2] == '//': - url = '//' + url - elif scheme and scheme in uses_netloc and (not url or url[:1] == '/'): - url = '//' + url - if scheme: - url = scheme + ':' + url - if query: - url = url + '?' + query - if fragment: - url = url + '#' + fragment - return _coerce_result(url) - -def urljoin(base, url, allow_fragments=True): - """Join a base URL and a possibly relative URL to form an absolute - interpretation of the latter.""" - if not base: - return url - if not url: - return base - - base, url, _coerce_result = _coerce_args(base, url) - bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ - urlparse(base, '', allow_fragments) - scheme, netloc, path, params, query, fragment = \ - urlparse(url, bscheme, allow_fragments) - - if scheme != bscheme or scheme not in uses_relative: - return _coerce_result(url) - if scheme in uses_netloc: - if netloc: - return _coerce_result(urlunparse((scheme, netloc, path, - params, query, fragment))) - netloc = bnetloc - - if not path and not params: - path = bpath - params = bparams - if not query: - query = bquery - return _coerce_result(urlunparse((scheme, netloc, path, - params, query, fragment))) - - base_parts = bpath.split('/') - if base_parts[-1] != '': - # the last item is not a directory, so will not be taken into account - # in resolving the relative path - del base_parts[-1] - - # for rfc3986, ignore all base path should the first character be root. - if path[:1] == '/': - segments = path.split('/') - else: - segments = base_parts + path.split('/') - # filter out elements that would cause redundant slashes on re-joining - # the resolved_path - segments[1:-1] = filter(None, segments[1:-1]) - - resolved_path = [] - - for seg in segments: - if seg == '..': - try: - resolved_path.pop() - except IndexError: - # ignore any .. segments that would otherwise cause an IndexError - # when popped from resolved_path if resolving for rfc3986 - pass - elif seg == '.': - continue - else: - resolved_path.append(seg) - - if segments[-1] in ('.', '..'): - # do some post-processing here. if the last segment was a relative dir, - # then we need to append the trailing '/' - resolved_path.append('') - - return _coerce_result(urlunparse((scheme, netloc, '/'.join( - resolved_path) or '/', params, query, fragment))) - - -def urldefrag(url): - """Removes any existing fragment from URL. - - Returns a tuple of the defragmented URL and the fragment. If - the URL contained no fragments, the second element is the - empty string. - """ - url, _coerce_result = _coerce_args(url) - if '#' in url: - s, n, p, a, q, frag = urlparse(url) - defrag = urlunparse((s, n, p, a, q, '')) - else: - frag = '' - defrag = url - return _coerce_result(DefragResult(defrag, frag)) - -_hexdig = '0123456789ABCDEFabcdef' -_hextobyte = None - -def unquote_to_bytes(string): - """unquote_to_bytes('abc%20def') -> b'abc def'.""" - return bytes(_unquote_impl(string)) - -def _unquote_impl(string: bytes | bytearray | str) -> bytes | bytearray: - # Note: strings are encoded as UTF-8. This is only an issue if it contains - # unescaped non-ASCII characters, which URIs should not. - if not string: - # Is it a string-like object? - string.split - return b'' - if isinstance(string, str): - string = string.encode('utf-8') - bits = string.split(b'%') - if len(bits) == 1: - return string - res = bytearray(bits[0]) - append = res.extend - # Delay the initialization of the table to not waste memory - # if the function is never called - global _hextobyte - if _hextobyte is None: - _hextobyte = {(a + b).encode(): bytes.fromhex(a + b) - for a in _hexdig for b in _hexdig} - for item in bits[1:]: - try: - append(_hextobyte[item[:2]]) - append(item[2:]) - except KeyError: - append(b'%') - append(item) - return res - -_asciire = re.compile('([\x00-\x7f]+)') - -def _generate_unquoted_parts(string, encoding, errors): - previous_match_end = 0 - for ascii_match in _asciire.finditer(string): - start, end = ascii_match.span() - yield string[previous_match_end:start] # Non-ASCII - # The ascii_match[1] group == string[start:end]. - yield _unquote_impl(ascii_match[1]).decode(encoding, errors) - previous_match_end = end - yield string[previous_match_end:] # Non-ASCII tail - -def unquote(string, encoding='utf-8', errors='replace'): - """Replace %xx escapes by their single-character equivalent. The optional - encoding and errors parameters specify how to decode percent-encoded - sequences into Unicode characters, as accepted by the bytes.decode() - method. - By default, percent-encoded sequences are decoded with UTF-8, and invalid - sequences are replaced by a placeholder character. - - unquote('abc%20def') -> 'abc def'. - """ - if isinstance(string, bytes): - return _unquote_impl(string).decode(encoding, errors) - if '%' not in string: - # Is it a string-like object? - string.split - return string - if encoding is None: - encoding = 'utf-8' - if errors is None: - errors = 'replace' - return ''.join(_generate_unquoted_parts(string, encoding, errors)) - - -def parse_qs(qs, keep_blank_values=False, strict_parsing=False, - encoding='utf-8', errors='replace', max_num_fields=None, separator='&'): - """Parse a query given as a string argument. - - Arguments: - - qs: percent-encoded query string to be parsed - - keep_blank_values: flag indicating whether blank values in - percent-encoded queries should be treated as blank strings. - A true value indicates that blanks should be retained as - blank strings. The default false value indicates that - blank values are to be ignored and treated as if they were - not included. - - strict_parsing: flag indicating what to do with parsing errors. - If false (the default), errors are silently ignored. - If true, errors raise a ValueError exception. - - encoding and errors: specify how to decode percent-encoded sequences - into Unicode characters, as accepted by the bytes.decode() method. - - max_num_fields: int. If set, then throws a ValueError if there - are more than n fields read by parse_qsl(). - - separator: str. The symbol to use for separating the query arguments. - Defaults to &. - - Returns a dictionary. - """ - parsed_result = {} - pairs = parse_qsl(qs, keep_blank_values, strict_parsing, - encoding=encoding, errors=errors, - max_num_fields=max_num_fields, separator=separator) - for name, value in pairs: - if name in parsed_result: - parsed_result[name].append(value) - else: - parsed_result[name] = [value] - return parsed_result - - -def parse_qsl(qs, keep_blank_values=False, strict_parsing=False, - encoding='utf-8', errors='replace', max_num_fields=None, separator='&'): - """Parse a query given as a string argument. - - Arguments: - - qs: percent-encoded query string to be parsed - - keep_blank_values: flag indicating whether blank values in - percent-encoded queries should be treated as blank strings. - A true value indicates that blanks should be retained as blank - strings. The default false value indicates that blank values - are to be ignored and treated as if they were not included. - - strict_parsing: flag indicating what to do with parsing errors. If - false (the default), errors are silently ignored. If true, - errors raise a ValueError exception. - - encoding and errors: specify how to decode percent-encoded sequences - into Unicode characters, as accepted by the bytes.decode() method. - - max_num_fields: int. If set, then throws a ValueError - if there are more than n fields read by parse_qsl(). - - separator: str. The symbol to use for separating the query arguments. - Defaults to &. - - Returns a list, as G-d intended. - """ - - if not separator or not isinstance(separator, (str, bytes)): - raise ValueError("Separator must be of type string or bytes.") - if isinstance(qs, str): - if not isinstance(separator, str): - separator = str(separator, 'ascii') - eq = '=' - def _unquote(s): - return unquote_plus(s, encoding=encoding, errors=errors) - else: - if not qs: - return [] - # Use memoryview() to reject integers and iterables, - # acceptable by the bytes constructor. - qs = bytes(memoryview(qs)) - if isinstance(separator, str): - separator = bytes(separator, 'ascii') - eq = b'=' - def _unquote(s): - return unquote_to_bytes(s.replace(b'+', b' ')) - - if not qs: - return [] - - # If max_num_fields is defined then check that the number of fields - # is less than max_num_fields. This prevents a memory exhaustion DOS - # attack via post bodies with many fields. - if max_num_fields is not None: - num_fields = 1 + qs.count(separator) - if max_num_fields < num_fields: - raise ValueError('Max number of fields exceeded') - - r = [] - for name_value in qs.split(separator): - if name_value or strict_parsing: - name, has_eq, value = name_value.partition(eq) - if not has_eq and strict_parsing: - raise ValueError("bad query field: %r" % (name_value,)) - if value or keep_blank_values: - name = _unquote(name) - value = _unquote(value) - r.append((name, value)) - return r - -def unquote_plus(string, encoding='utf-8', errors='replace'): - """Like unquote(), but also replace plus signs by spaces, as required for - unquoting HTML form values. - - unquote_plus('%7e/abc+def') -> '~/abc def' - """ - string = string.replace('+', ' ') - return unquote(string, encoding, errors) - -_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' - b'abcdefghijklmnopqrstuvwxyz' - b'0123456789' - b'_.-~') -_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE) - -def __getattr__(name): - if name == 'Quoter': - warnings.warn('Deprecated in 3.11. ' - 'urllib.parse.Quoter will be removed in Python 3.14. ' - 'It was not intended to be a public API.', - DeprecationWarning, stacklevel=2) - return _Quoter - raise AttributeError(f'module {__name__!r} has no attribute {name!r}') - -class _Quoter(dict): - """A mapping from bytes numbers (in range(0,256)) to strings. - - String values are percent-encoded byte values, unless the key < 128, and - in either of the specified safe set, or the always safe set. - """ - # Keeps a cache internally, via __missing__, for efficiency (lookups - # of cached keys don't call Python code at all). - def __init__(self, safe): - """safe: bytes object.""" - self.safe = _ALWAYS_SAFE.union(safe) - - def __repr__(self): - return f"" - - def __missing__(self, b): - # Handle a cache miss. Store quoted string in cache and return. - res = chr(b) if b in self.safe else '%{:02X}'.format(b) - self[b] = res - return res - -def quote(string, safe='/', encoding=None, errors=None): - """quote('abc def') -> 'abc%20def' - - Each part of a URL, e.g. the path info, the query, etc., has a - different set of reserved characters that must be quoted. The - quote function offers a cautious (not minimal) way to quote a - string for most of these parts. - - RFC 3986 Uniform Resource Identifier (URI): Generic Syntax lists - the following (un)reserved characters. - - unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" - reserved = gen-delims / sub-delims - gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" - sub-delims = "!" / "$" / "&" / "'" / "(" / ")" - / "*" / "+" / "," / ";" / "=" - - Each of the reserved characters is reserved in some component of a URL, - but not necessarily in all of them. - - The quote function %-escapes all characters that are neither in the - unreserved chars ("always safe") nor the additional chars set via the - safe arg. - - The default for the safe arg is '/'. The character is reserved, but in - typical usage the quote function is being called on a path where the - existing slash characters are to be preserved. - - Python 3.7 updates from using RFC 2396 to RFC 3986 to quote URL strings. - Now, "~" is included in the set of unreserved characters. - - string and safe may be either str or bytes objects. encoding and errors - must not be specified if string is a bytes object. - - The optional encoding and errors parameters specify how to deal with - non-ASCII characters, as accepted by the str.encode method. - By default, encoding='utf-8' (characters are encoded with UTF-8), and - errors='strict' (unsupported characters raise a UnicodeEncodeError). - """ - if isinstance(string, str): - if not string: - return string - if encoding is None: - encoding = 'utf-8' - if errors is None: - errors = 'strict' - string = string.encode(encoding, errors) - else: - if encoding is not None: - raise TypeError("quote() doesn't support 'encoding' for bytes") - if errors is not None: - raise TypeError("quote() doesn't support 'errors' for bytes") - return quote_from_bytes(string, safe) - -def quote_plus(string, safe='', encoding=None, errors=None): - """Like quote(), but also replace ' ' with '+', as required for quoting - HTML form values. Plus signs in the original string are escaped unless - they are included in safe. It also does not have safe default to '/'. - """ - # Check if ' ' in string, where string may either be a str or bytes. If - # there are no spaces, the regular quote will produce the right answer. - if ((isinstance(string, str) and ' ' not in string) or - (isinstance(string, bytes) and b' ' not in string)): - return quote(string, safe, encoding, errors) - if isinstance(safe, str): - space = ' ' - else: - space = b' ' - string = quote(string, safe + space, encoding, errors) - return string.replace(' ', '+') - -# Expectation: A typical program is unlikely to create more than 5 of these. -@functools.lru_cache -def _byte_quoter_factory(safe): - return _Quoter(safe).__getitem__ - -def quote_from_bytes(bs, safe='/'): - """Like quote(), but accepts a bytes object rather than a str, and does - not perform string-to-bytes encoding. It always returns an ASCII string. - quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f' - """ - if not isinstance(bs, (bytes, bytearray)): - raise TypeError("quote_from_bytes() expected bytes") - if not bs: - return '' - if isinstance(safe, str): - # Normalize 'safe' by converting to bytes and removing non-ASCII chars - safe = safe.encode('ascii', 'ignore') - else: - # List comprehensions are faster than generator expressions. - safe = bytes([c for c in safe if c < 128]) - if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe): - return bs.decode() - quoter = _byte_quoter_factory(safe) - if (bs_len := len(bs)) < 200_000: - return ''.join(map(quoter, bs)) - else: - # This saves memory - https://github.com/python/cpython/issues/95865 - chunk_size = math.isqrt(bs_len) - chunks = [''.join(map(quoter, bs[i:i+chunk_size])) - for i in range(0, bs_len, chunk_size)] - return ''.join(chunks) - -def urlencode(query, doseq=False, safe='', encoding=None, errors=None, - quote_via=quote_plus): - """Encode a dict or sequence of two-element tuples into a URL query string. - - If any values in the query arg are sequences and doseq is true, each - sequence element is converted to a separate parameter. - - If the query arg is a sequence of two-element tuples, the order of the - parameters in the output will match the order of parameters in the - input. - - The components of a query arg may each be either a string or a bytes type. - - The safe, encoding, and errors parameters are passed down to the function - specified by quote_via (encoding and errors only if a component is a str). - """ - - if hasattr(query, "items"): - query = query.items() - else: - # It's a bother at times that strings and string-like objects are - # sequences. - try: - # non-sequence items should not work with len() - # non-empty strings will fail this - if len(query) and not isinstance(query[0], tuple): - raise TypeError - # Zero-length sequences of all types will get here and succeed, - # but that's a minor nit. Since the original implementation - # allowed empty dicts that type of behavior probably should be - # preserved for consistency - except TypeError as err: - raise TypeError("not a valid non-string sequence " - "or mapping object") from err - - l = [] - if not doseq: - for k, v in query: - if isinstance(k, bytes): - k = quote_via(k, safe) - else: - k = quote_via(str(k), safe, encoding, errors) - - if isinstance(v, bytes): - v = quote_via(v, safe) - else: - v = quote_via(str(v), safe, encoding, errors) - l.append(k + '=' + v) - else: - for k, v in query: - if isinstance(k, bytes): - k = quote_via(k, safe) - else: - k = quote_via(str(k), safe, encoding, errors) - - if isinstance(v, bytes): - v = quote_via(v, safe) - l.append(k + '=' + v) - elif isinstance(v, str): - v = quote_via(v, safe, encoding, errors) - l.append(k + '=' + v) - else: - try: - # Is this a sufficient test for sequence-ness? - x = len(v) - except TypeError: - # not a sequence - v = quote_via(str(v), safe, encoding, errors) - l.append(k + '=' + v) - else: - # loop over the sequence - for elt in v: - if isinstance(elt, bytes): - elt = quote_via(elt, safe) - else: - elt = quote_via(str(elt), safe, encoding, errors) - l.append(k + '=' + elt) - return '&'.join(l) - - -def to_bytes(url): - warnings.warn("urllib.parse.to_bytes() is deprecated as of 3.8", - DeprecationWarning, stacklevel=2) - return _to_bytes(url) - - -def _to_bytes(url): - """to_bytes(u"URL") --> 'URL'.""" - # Most URL schemes require ASCII. If that changes, the conversion - # can be relaxed. - # XXX get rid of to_bytes() - if isinstance(url, str): - try: - url = url.encode("ASCII").decode() - except UnicodeError: - raise UnicodeError("URL " + repr(url) + - " contains non-ASCII characters") - return url - - -def unwrap(url): - """Transform a string like '' into 'scheme://host/path'. - - The string is returned unchanged if it's not a wrapped URL. - """ - url = str(url).strip() - if url[:1] == '<' and url[-1:] == '>': - url = url[1:-1].strip() - if url[:4] == 'URL:': - url = url[4:].strip() - return url - - -def splittype(url): - warnings.warn("urllib.parse.splittype() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splittype(url) - - -_typeprog = None -def _splittype(url): - """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" - global _typeprog - if _typeprog is None: - _typeprog = re.compile('([^/:]+):(.*)', re.DOTALL) - - match = _typeprog.match(url) - if match: - scheme, data = match.groups() - return scheme.lower(), data - return None, url - - -def splithost(url): - warnings.warn("urllib.parse.splithost() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splithost(url) - - -_hostprog = None -def _splithost(url): - """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" - global _hostprog - if _hostprog is None: - _hostprog = re.compile('//([^/#?]*)(.*)', re.DOTALL) - - match = _hostprog.match(url) - if match: - host_port, path = match.groups() - if path and path[0] != '/': - path = '/' + path - return host_port, path - return None, url - - -def splituser(host): - warnings.warn("urllib.parse.splituser() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splituser(host) - - -def _splituser(host): - """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" - user, delim, host = host.rpartition('@') - return (user if delim else None), host - - -def splitpasswd(user): - warnings.warn("urllib.parse.splitpasswd() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splitpasswd(user) - - -def _splitpasswd(user): - """splitpasswd('user:passwd') -> 'user', 'passwd'.""" - user, delim, passwd = user.partition(':') - return user, (passwd if delim else None) - - -def splitport(host): - warnings.warn("urllib.parse.splitport() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splitport(host) - - -# splittag('/path#tag') --> '/path', 'tag' -_portprog = None -def _splitport(host): - """splitport('host:port') --> 'host', 'port'.""" - global _portprog - if _portprog is None: - _portprog = re.compile('(.*):([0-9]*)', re.DOTALL) - - match = _portprog.fullmatch(host) - if match: - host, port = match.groups() - if port: - return host, port - return host, None - - -def splitnport(host, defport=-1): - warnings.warn("urllib.parse.splitnport() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splitnport(host, defport) - - -def _splitnport(host, defport=-1): - """Split host and port, returning numeric port. - Return given default port if no ':' found; defaults to -1. - Return numerical port if a valid number is found after ':'. - Return None if ':' but not a valid number.""" - host, delim, port = host.rpartition(':') - if not delim: - host = port - elif port: - if port.isdigit() and port.isascii(): - nport = int(port) - else: - nport = None - return host, nport - return host, defport - - -def splitquery(url): - warnings.warn("urllib.parse.splitquery() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splitquery(url) - - -def _splitquery(url): - """splitquery('/path?query') --> '/path', 'query'.""" - path, delim, query = url.rpartition('?') - if delim: - return path, query - return url, None - - -def splittag(url): - warnings.warn("urllib.parse.splittag() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splittag(url) - - -def _splittag(url): - """splittag('/path#tag') --> '/path', 'tag'.""" - path, delim, tag = url.rpartition('#') - if delim: - return path, tag - return url, None - - -def splitattr(url): - warnings.warn("urllib.parse.splitattr() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splitattr(url) - - -def _splitattr(url): - """splitattr('/path;attr1=value1;attr2=value2;...') -> - '/path', ['attr1=value1', 'attr2=value2', ...].""" - words = url.split(';') - return words[0], words[1:] - - -def splitvalue(attr): - warnings.warn("urllib.parse.splitvalue() is deprecated as of 3.8, " - "use urllib.parse.parse_qsl() instead", - DeprecationWarning, stacklevel=2) - return _splitvalue(attr) - - -def _splitvalue(attr): - """splitvalue('attr=value') --> 'attr', 'value'.""" - attr, delim, value = attr.partition('=') - return attr, (value if delim else None) diff --git a/Python313_13_x64_Template/Lib/urllib/request.py b/Python313_13_x64_Template/Lib/urllib/request.py deleted file mode 100644 index 3d864f1d..00000000 --- a/Python313_13_x64_Template/Lib/urllib/request.py +++ /dev/null @@ -1,2797 +0,0 @@ -"""An extensible library for opening URLs using a variety of protocols - -The simplest way to use this module is to call the urlopen function, -which accepts a string containing a URL or a Request object (described -below). It opens the URL and returns the results as file-like -object; the returned object has some extra methods described below. - -The OpenerDirector manages a collection of Handler objects that do -all the actual work. Each Handler implements a particular protocol or -option. The OpenerDirector is a composite object that invokes the -Handlers needed to open the requested URL. For example, the -HTTPHandler performs HTTP GET and POST requests and deals with -non-error returns. The HTTPRedirectHandler automatically deals with -HTTP 301, 302, 303, 307, and 308 redirect errors, and the -HTTPDigestAuthHandler deals with digest authentication. - -urlopen(url, data=None) -- Basic usage is the same as original -urllib. pass the url and optionally data to post to an HTTP URL, and -get a file-like object back. One difference is that you can also pass -a Request instance instead of URL. Raises a URLError (subclass of -OSError); for HTTP errors, raises an HTTPError, which can also be -treated as a valid response. - -build_opener -- Function that creates a new OpenerDirector instance. -Will install the default handlers. Accepts one or more Handlers as -arguments, either instances or Handler classes that it will -instantiate. If one of the argument is a subclass of the default -handler, the argument will be installed instead of the default. - -install_opener -- Installs a new opener as the default opener. - -objects of interest: - -OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages -the Handler classes, while dealing with requests and responses. - -Request -- An object that encapsulates the state of a request. The -state can be as simple as the URL. It can also include extra HTTP -headers, e.g. a User-Agent. - -BaseHandler -- - -internals: -BaseHandler and parent -_call_chain conventions - -Example usage: - -import urllib.request - -# set up authentication info -authinfo = urllib.request.HTTPBasicAuthHandler() -authinfo.add_password(realm='PDQ Application', - uri='https://mahler:8092/site-updates.py', - user='klem', - passwd='geheim$parole') - -proxy_support = urllib.request.ProxyHandler({"http" : "http://ahad-haam:3128"}) - -# build a new opener that adds authentication and caching FTP handlers -opener = urllib.request.build_opener(proxy_support, authinfo, - urllib.request.CacheFTPHandler) - -# install it -urllib.request.install_opener(opener) - -f = urllib.request.urlopen('https://www.python.org/') -""" - -# XXX issues: -# If an authentication error handler that tries to perform -# authentication for some reason but fails, how should the error be -# signalled? The client needs to know the HTTP error code. But if -# the handler knows that the problem was, e.g., that it didn't know -# that hash algo that requested in the challenge, it would be good to -# pass that information along to the client, too. -# ftp errors aren't handled cleanly -# check digest against correct (i.e. non-apache) implementation - -# Possible extensions: -# complex proxies XXX not sure what exactly was meant by this -# abstract factory for opener - -import base64 -import bisect -import email -import hashlib -import http.client -import io -import os -import re -import socket -import string -import sys -import time -import tempfile -import contextlib -import warnings - - -from urllib.error import URLError, HTTPError, ContentTooShortError -from urllib.parse import ( - urlparse, urlsplit, urljoin, unwrap, quote, unquote, - _splittype, _splithost, _splitport, _splituser, _splitpasswd, - _splitattr, _splitquery, _splitvalue, _splittag, _to_bytes, - unquote_to_bytes, urlunparse) -from urllib.response import addinfourl, addclosehook - -# check for SSL -try: - import ssl -except ImportError: - _have_ssl = False -else: - _have_ssl = True - -__all__ = [ - # Classes - 'Request', 'OpenerDirector', 'BaseHandler', 'HTTPDefaultErrorHandler', - 'HTTPRedirectHandler', 'HTTPCookieProcessor', 'ProxyHandler', - 'HTTPPasswordMgr', 'HTTPPasswordMgrWithDefaultRealm', - 'HTTPPasswordMgrWithPriorAuth', 'AbstractBasicAuthHandler', - 'HTTPBasicAuthHandler', 'ProxyBasicAuthHandler', 'AbstractDigestAuthHandler', - 'HTTPDigestAuthHandler', 'ProxyDigestAuthHandler', 'HTTPHandler', - 'FileHandler', 'FTPHandler', 'CacheFTPHandler', 'DataHandler', - 'UnknownHandler', 'HTTPErrorProcessor', - # Functions - 'urlopen', 'install_opener', 'build_opener', - 'pathname2url', 'url2pathname', 'getproxies', - # Legacy interface - 'urlretrieve', 'urlcleanup', 'URLopener', 'FancyURLopener', -] - -# used in User-Agent header sent -__version__ = '%d.%d' % sys.version_info[:2] - -_opener = None -def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - *, context=None): - '''Open the URL url, which can be either a string or a Request object. - - *data* must be an object specifying additional data to be sent to - the server, or None if no such data is needed. See Request for - details. - - urllib.request module uses HTTP/1.1 and includes a "Connection:close" - header in its HTTP requests. - - The optional *timeout* parameter specifies a timeout in seconds for - blocking operations like the connection attempt (if not specified, the - global default timeout setting will be used). This only works for HTTP, - HTTPS and FTP connections. - - If *context* is specified, it must be a ssl.SSLContext instance describing - the various SSL options. See HTTPSConnection for more details. - - - This function always returns an object which can work as a - context manager and has the properties url, headers, and status. - See urllib.response.addinfourl for more detail on these properties. - - For HTTP and HTTPS URLs, this function returns a http.client.HTTPResponse - object slightly modified. In addition to the three new methods above, the - msg attribute contains the same information as the reason attribute --- - the reason phrase returned by the server --- instead of the response - headers as it is specified in the documentation for HTTPResponse. - - For FTP, file, and data URLs and requests explicitly handled by legacy - URLopener and FancyURLopener classes, this function returns a - urllib.response.addinfourl object. - - Note that None may be returned if no handler handles the request (though - the default installed global OpenerDirector uses UnknownHandler to ensure - this never happens). - - In addition, if proxy settings are detected (for example, when a *_proxy - environment variable like http_proxy is set), ProxyHandler is default - installed and makes sure the requests are handled through the proxy. - - ''' - global _opener - if context: - https_handler = HTTPSHandler(context=context) - opener = build_opener(https_handler) - elif _opener is None: - _opener = opener = build_opener() - else: - opener = _opener - return opener.open(url, data, timeout) - -def install_opener(opener): - global _opener - _opener = opener - -_url_tempfiles = [] -def urlretrieve(url, filename=None, reporthook=None, data=None): - """ - Retrieve a URL into a temporary location on disk. - - Requires a URL argument. If a filename is passed, it is used as - the temporary file location. The reporthook argument should be - a callable that accepts a block number, a read size, and the - total file size of the URL target. The data argument should be - valid URL encoded data. - - If a filename is passed and the URL points to a local resource, - the result is a copy from local file to new file. - - Returns a tuple containing the path to the newly created - data file as well as the resulting HTTPMessage object. - """ - url_type, path = _splittype(url) - - with contextlib.closing(urlopen(url, data)) as fp: - headers = fp.info() - - # Just return the local path and the "headers" for file:// - # URLs. No sense in performing a copy unless requested. - if url_type == "file" and not filename: - return os.path.normpath(path), headers - - # Handle temporary file setup. - if filename: - tfp = open(filename, 'wb') - else: - tfp = tempfile.NamedTemporaryFile(delete=False) - filename = tfp.name - _url_tempfiles.append(filename) - - with tfp: - result = filename, headers - bs = 1024*8 - size = -1 - read = 0 - blocknum = 0 - if "content-length" in headers: - size = int(headers["Content-Length"]) - - if reporthook: - reporthook(blocknum, bs, size) - - while block := fp.read(bs): - read += len(block) - tfp.write(block) - blocknum += 1 - if reporthook: - reporthook(blocknum, bs, size) - - if size >= 0 and read < size: - raise ContentTooShortError( - "retrieval incomplete: got only %i out of %i bytes" - % (read, size), result) - - return result - -def urlcleanup(): - """Clean up temporary files from urlretrieve calls.""" - for temp_file in _url_tempfiles: - try: - os.unlink(temp_file) - except OSError: - pass - - del _url_tempfiles[:] - global _opener - if _opener: - _opener = None - -# copied from cookielib.py -_cut_port_re = re.compile(r":\d+$", re.ASCII) -def request_host(request): - """Return request-host, as defined by RFC 2965. - - Variation from RFC: returned value is lowercased, for convenient - comparison. - - """ - url = request.full_url - host = urlparse(url)[1] - if host == "": - host = request.get_header("Host", "") - - # remove port, if present - host = _cut_port_re.sub("", host, 1) - return host.lower() - -class Request: - - def __init__(self, url, data=None, headers={}, - origin_req_host=None, unverifiable=False, - method=None): - self.full_url = url - self.headers = {} - self.unredirected_hdrs = {} - self._data = None - self.data = data - self._tunnel_host = None - for key, value in headers.items(): - self.add_header(key, value) - if origin_req_host is None: - origin_req_host = request_host(self) - self.origin_req_host = origin_req_host - self.unverifiable = unverifiable - if method: - self.method = method - - @property - def full_url(self): - if self.fragment: - return '{}#{}'.format(self._full_url, self.fragment) - return self._full_url - - @full_url.setter - def full_url(self, url): - # unwrap('') --> 'type://host/path' - self._full_url = unwrap(url) - self._full_url, self.fragment = _splittag(self._full_url) - self._parse() - - @full_url.deleter - def full_url(self): - self._full_url = None - self.fragment = None - self.selector = '' - - @property - def data(self): - return self._data - - @data.setter - def data(self, data): - if data != self._data: - self._data = data - # issue 16464 - # if we change data we need to remove content-length header - # (cause it's most probably calculated for previous value) - if self.has_header("Content-length"): - self.remove_header("Content-length") - - @data.deleter - def data(self): - self.data = None - - def _parse(self): - self.type, rest = _splittype(self._full_url) - if self.type is None: - raise ValueError("unknown url type: %r" % self.full_url) - self.host, self.selector = _splithost(rest) - if self.host: - self.host = unquote(self.host) - - def get_method(self): - """Return a string indicating the HTTP request method.""" - default_method = "POST" if self.data is not None else "GET" - return getattr(self, 'method', default_method) - - def get_full_url(self): - return self.full_url - - def set_proxy(self, host, type): - if self.type == 'https' and not self._tunnel_host: - self._tunnel_host = self.host - else: - self.type= type - self.selector = self.full_url - self.host = host - - def has_proxy(self): - return self.selector == self.full_url - - def add_header(self, key, val): - # useful for something like authentication - self.headers[key.capitalize()] = val - - def add_unredirected_header(self, key, val): - # will not be added to a redirected request - self.unredirected_hdrs[key.capitalize()] = val - - def has_header(self, header_name): - return (header_name in self.headers or - header_name in self.unredirected_hdrs) - - def get_header(self, header_name, default=None): - return self.headers.get( - header_name, - self.unredirected_hdrs.get(header_name, default)) - - def remove_header(self, header_name): - self.headers.pop(header_name, None) - self.unredirected_hdrs.pop(header_name, None) - - def header_items(self): - hdrs = {**self.unredirected_hdrs, **self.headers} - return list(hdrs.items()) - -class OpenerDirector: - def __init__(self): - client_version = "Python-urllib/%s" % __version__ - self.addheaders = [('User-agent', client_version)] - # self.handlers is retained only for backward compatibility - self.handlers = [] - # manage the individual handlers - self.handle_open = {} - self.handle_error = {} - self.process_response = {} - self.process_request = {} - - def add_handler(self, handler): - if not hasattr(handler, "add_parent"): - raise TypeError("expected BaseHandler instance, got %r" % - type(handler)) - - added = False - for meth in dir(handler): - if meth in ["redirect_request", "do_open", "proxy_open"]: - # oops, coincidental match - continue - - i = meth.find("_") - protocol = meth[:i] - condition = meth[i+1:] - - if condition.startswith("error"): - j = condition.find("_") + i + 1 - kind = meth[j+1:] - try: - kind = int(kind) - except ValueError: - pass - lookup = self.handle_error.get(protocol, {}) - self.handle_error[protocol] = lookup - elif condition == "open": - kind = protocol - lookup = self.handle_open - elif condition == "response": - kind = protocol - lookup = self.process_response - elif condition == "request": - kind = protocol - lookup = self.process_request - else: - continue - - handlers = lookup.setdefault(kind, []) - if handlers: - bisect.insort(handlers, handler) - else: - handlers.append(handler) - added = True - - if added: - bisect.insort(self.handlers, handler) - handler.add_parent(self) - - def close(self): - # Only exists for backwards compatibility. - pass - - def _call_chain(self, chain, kind, meth_name, *args): - # Handlers raise an exception if no one else should try to handle - # the request, or return None if they can't but another handler - # could. Otherwise, they return the response. - handlers = chain.get(kind, ()) - for handler in handlers: - func = getattr(handler, meth_name) - result = func(*args) - if result is not None: - return result - - def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): - # accept a URL or a Request object - if isinstance(fullurl, str): - req = Request(fullurl, data) - else: - req = fullurl - if data is not None: - req.data = data - - req.timeout = timeout - protocol = req.type - - # pre-process request - meth_name = protocol+"_request" - for processor in self.process_request.get(protocol, []): - meth = getattr(processor, meth_name) - req = meth(req) - - sys.audit('urllib.Request', req.full_url, req.data, req.headers, req.get_method()) - response = self._open(req, data) - - # post-process response - meth_name = protocol+"_response" - for processor in self.process_response.get(protocol, []): - meth = getattr(processor, meth_name) - response = meth(req, response) - - return response - - def _open(self, req, data=None): - result = self._call_chain(self.handle_open, 'default', - 'default_open', req) - if result: - return result - - protocol = req.type - result = self._call_chain(self.handle_open, protocol, protocol + - '_open', req) - if result: - return result - - return self._call_chain(self.handle_open, 'unknown', - 'unknown_open', req) - - def error(self, proto, *args): - if proto in ('http', 'https'): - # XXX http[s] protocols are special-cased - dict = self.handle_error['http'] # https is not different than http - proto = args[2] # YUCK! - meth_name = 'http_error_%s' % proto - http_err = 1 - orig_args = args - else: - dict = self.handle_error - meth_name = proto + '_error' - http_err = 0 - args = (dict, proto, meth_name) + args - result = self._call_chain(*args) - if result: - return result - - if http_err: - args = (dict, 'default', 'http_error_default') + orig_args - return self._call_chain(*args) - -# XXX probably also want an abstract factory that knows when it makes -# sense to skip a superclass in favor of a subclass and when it might -# make sense to include both - -def build_opener(*handlers): - """Create an opener object from a list of handlers. - - The opener will use several default handlers, including support - for HTTP, FTP and when applicable HTTPS. - - If any of the handlers passed as arguments are subclasses of the - default handlers, the default handlers will not be used. - """ - opener = OpenerDirector() - default_classes = [ProxyHandler, UnknownHandler, HTTPHandler, - HTTPDefaultErrorHandler, HTTPRedirectHandler, - FTPHandler, FileHandler, HTTPErrorProcessor, - DataHandler] - if hasattr(http.client, "HTTPSConnection"): - default_classes.append(HTTPSHandler) - skip = set() - for klass in default_classes: - for check in handlers: - if isinstance(check, type): - if issubclass(check, klass): - skip.add(klass) - elif isinstance(check, klass): - skip.add(klass) - for klass in skip: - default_classes.remove(klass) - - for klass in default_classes: - opener.add_handler(klass()) - - for h in handlers: - if isinstance(h, type): - h = h() - opener.add_handler(h) - return opener - -class BaseHandler: - handler_order = 500 - - def add_parent(self, parent): - self.parent = parent - - def close(self): - # Only exists for backwards compatibility - pass - - def __lt__(self, other): - if not hasattr(other, "handler_order"): - # Try to preserve the old behavior of having custom classes - # inserted after default ones (works only for custom user - # classes which are not aware of handler_order). - return True - return self.handler_order < other.handler_order - - -class HTTPErrorProcessor(BaseHandler): - """Process HTTP error responses.""" - handler_order = 1000 # after all other processing - - def http_response(self, request, response): - code, msg, hdrs = response.code, response.msg, response.info() - - # According to RFC 2616, "2xx" code indicates that the client's - # request was successfully received, understood, and accepted. - if not (200 <= code < 300): - response = self.parent.error( - 'http', request, response, code, msg, hdrs) - - return response - - https_response = http_response - -class HTTPDefaultErrorHandler(BaseHandler): - def http_error_default(self, req, fp, code, msg, hdrs): - raise HTTPError(req.full_url, code, msg, hdrs, fp) - -class HTTPRedirectHandler(BaseHandler): - # maximum number of redirections to any single URL - # this is needed because of the state that cookies introduce - max_repeats = 4 - # maximum total number of redirections (regardless of URL) before - # assuming we're in a loop - max_redirections = 10 - - def redirect_request(self, req, fp, code, msg, headers, newurl): - """Return a Request or None in response to a redirect. - - This is called by the http_error_30x methods when a - redirection response is received. If a redirection should - take place, return a new Request to allow http_error_30x to - perform the redirect. Otherwise, raise HTTPError if no-one - else should try to handle this url. Return None if you can't - but another Handler might. - """ - m = req.get_method() - if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD") - or code in (301, 302, 303) and m == "POST")): - raise HTTPError(req.full_url, code, msg, headers, fp) - - # Strictly (according to RFC 2616), 301 or 302 in response to - # a POST MUST NOT cause a redirection without confirmation - # from the user (of urllib.request, in this case). In practice, - # essentially all clients do redirect in this case, so we do - # the same. - - # Be conciliant with URIs containing a space. This is mainly - # redundant with the more complete encoding done in http_error_302(), - # but it is kept for compatibility with other callers. - newurl = newurl.replace(' ', '%20') - - CONTENT_HEADERS = ("content-length", "content-type") - newheaders = {k: v for k, v in req.headers.items() - if k.lower() not in CONTENT_HEADERS} - return Request(newurl, - method="HEAD" if m == "HEAD" else "GET", - headers=newheaders, - origin_req_host=req.origin_req_host, - unverifiable=True) - - # Implementation note: To avoid the server sending us into an - # infinite loop, the request object needs to track what URLs we - # have already seen. Do this by adding a handler-specific - # attribute to the Request object. - def http_error_302(self, req, fp, code, msg, headers): - # Some servers (incorrectly) return multiple Location headers - # (so probably same goes for URI). Use first header. - if "location" in headers: - newurl = headers["location"] - elif "uri" in headers: - newurl = headers["uri"] - else: - return - - # fix a possible malformed URL - urlparts = urlparse(newurl) - - # For security reasons we don't allow redirection to anything other - # than http, https or ftp. - - if urlparts.scheme not in ('http', 'https', 'ftp', ''): - raise HTTPError( - newurl, code, - "%s - Redirection to url '%s' is not allowed" % (msg, newurl), - headers, fp) - - if not urlparts.path and urlparts.netloc: - urlparts = list(urlparts) - urlparts[2] = "/" - newurl = urlunparse(urlparts) - - # http.client.parse_headers() decodes as ISO-8859-1. Recover the - # original bytes and percent-encode non-ASCII bytes, and any special - # characters such as the space. - newurl = quote( - newurl, encoding="iso-8859-1", safe=string.punctuation) - newurl = urljoin(req.full_url, newurl) - - # XXX Probably want to forget about the state of the current - # request, although that might interact poorly with other - # handlers that also use handler-specific request attributes - new = self.redirect_request(req, fp, code, msg, headers, newurl) - if new is None: - return - - # loop detection - # .redirect_dict has a key url if url was previously visited. - if hasattr(req, 'redirect_dict'): - visited = new.redirect_dict = req.redirect_dict - if (visited.get(newurl, 0) >= self.max_repeats or - len(visited) >= self.max_redirections): - raise HTTPError(req.full_url, code, - self.inf_msg + msg, headers, fp) - else: - visited = new.redirect_dict = req.redirect_dict = {} - visited[newurl] = visited.get(newurl, 0) + 1 - - # Don't close the fp until we are sure that we won't use it - # with HTTPError. - fp.read() - fp.close() - - return self.parent.open(new, timeout=req.timeout) - - http_error_301 = http_error_303 = http_error_307 = http_error_308 = http_error_302 - - inf_msg = "The HTTP server returned a redirect error that would " \ - "lead to an infinite loop.\n" \ - "The last 30x error message was:\n" - - -def _parse_proxy(proxy): - """Return (scheme, user, password, host/port) given a URL or an authority. - - If a URL is supplied, it must have an authority (host:port) component. - According to RFC 3986, having an authority component means the URL must - have two slashes after the scheme. - """ - scheme, r_scheme = _splittype(proxy) - if not r_scheme.startswith("/"): - # authority - scheme = None - authority = proxy - else: - # URL - if not r_scheme.startswith("//"): - raise ValueError("proxy URL with no authority: %r" % proxy) - # We have an authority, so for RFC 3986-compliant URLs (by ss 3. - # and 3.3.), path is empty or starts with '/' - if '@' in r_scheme: - host_separator = r_scheme.find('@') - end = r_scheme.find("/", host_separator) - else: - end = r_scheme.find("/", 2) - if end == -1: - end = None - authority = r_scheme[2:end] - userinfo, hostport = _splituser(authority) - if userinfo is not None: - user, password = _splitpasswd(userinfo) - else: - user = password = None - return scheme, user, password, hostport - -class ProxyHandler(BaseHandler): - # Proxies must be in front - handler_order = 100 - - def __init__(self, proxies=None): - if proxies is None: - proxies = getproxies() - assert hasattr(proxies, 'keys'), "proxies must be a mapping" - self.proxies = proxies - for type, url in proxies.items(): - type = type.lower() - setattr(self, '%s_open' % type, - lambda r, proxy=url, type=type, meth=self.proxy_open: - meth(r, proxy, type)) - - def proxy_open(self, req, proxy, type): - orig_type = req.type - proxy_type, user, password, hostport = _parse_proxy(proxy) - if proxy_type is None: - proxy_type = orig_type - - if req.host and proxy_bypass(req.host): - return None - - if user and password: - user_pass = '%s:%s' % (unquote(user), - unquote(password)) - creds = base64.b64encode(user_pass.encode()).decode("ascii") - req.add_header('Proxy-authorization', 'Basic ' + creds) - hostport = unquote(hostport) - req.set_proxy(hostport, proxy_type) - if orig_type == proxy_type or orig_type == 'https': - # let other handlers take care of it - return None - else: - # need to start over, because the other handlers don't - # grok the proxy's URL type - # e.g. if we have a constructor arg proxies like so: - # {'http': 'ftp://proxy.example.com'}, we may end up turning - # a request for http://acme.example.com/a into one for - # ftp://proxy.example.com/a - return self.parent.open(req, timeout=req.timeout) - -class HTTPPasswordMgr: - - def __init__(self): - self.passwd = {} - - def add_password(self, realm, uri, user, passwd): - # uri could be a single URI or a sequence - if isinstance(uri, str): - uri = [uri] - if realm not in self.passwd: - self.passwd[realm] = {} - for default_port in True, False: - reduced_uri = tuple( - self.reduce_uri(u, default_port) for u in uri) - self.passwd[realm][reduced_uri] = (user, passwd) - - def find_user_password(self, realm, authuri): - domains = self.passwd.get(realm, {}) - for default_port in True, False: - reduced_authuri = self.reduce_uri(authuri, default_port) - for uris, authinfo in domains.items(): - for uri in uris: - if self.is_suburi(uri, reduced_authuri): - return authinfo - return None, None - - def reduce_uri(self, uri, default_port=True): - """Accept authority or URI and extract only the authority and path.""" - # note HTTP URLs do not have a userinfo component - parts = urlsplit(uri) - if parts[1]: - # URI - scheme = parts[0] - authority = parts[1] - path = parts[2] or '/' - else: - # host or host:port - scheme = None - authority = uri - path = '/' - host, port = _splitport(authority) - if default_port and port is None and scheme is not None: - dport = {"http": 80, - "https": 443, - }.get(scheme) - if dport is not None: - authority = "%s:%d" % (host, dport) - return authority, path - - def is_suburi(self, base, test): - """Check if test is below base in a URI tree - - Both args must be URIs in reduced form. - """ - if base == test: - return True - if base[0] != test[0]: - return False - prefix = base[1] - if prefix[-1:] != '/': - prefix += '/' - return test[1].startswith(prefix) - - -class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr): - - def find_user_password(self, realm, authuri): - user, password = HTTPPasswordMgr.find_user_password(self, realm, - authuri) - if user is not None: - return user, password - return HTTPPasswordMgr.find_user_password(self, None, authuri) - - -class HTTPPasswordMgrWithPriorAuth(HTTPPasswordMgrWithDefaultRealm): - - def __init__(self): - self.authenticated = {} - super().__init__() - - def add_password(self, realm, uri, user, passwd, is_authenticated=False): - self.update_authenticated(uri, is_authenticated) - # Add a default for prior auth requests - if realm is not None: - super().add_password(None, uri, user, passwd) - super().add_password(realm, uri, user, passwd) - - def update_authenticated(self, uri, is_authenticated=False): - # uri could be a single URI or a sequence - if isinstance(uri, str): - uri = [uri] - - for default_port in True, False: - for u in uri: - reduced_uri = self.reduce_uri(u, default_port) - self.authenticated[reduced_uri] = is_authenticated - - def is_authenticated(self, authuri): - for default_port in True, False: - reduced_authuri = self.reduce_uri(authuri, default_port) - for uri in self.authenticated: - if self.is_suburi(uri, reduced_authuri): - return self.authenticated[uri] - - -class AbstractBasicAuthHandler: - - # XXX this allows for multiple auth-schemes, but will stupidly pick - # the last one with a realm specified. - - # allow for double- and single-quoted realm values - # (single quotes are a violation of the RFC, but appear in the wild) - rx = re.compile('(?:^|,)' # start of the string or ',' - '[ \t]*' # optional whitespaces - '([^ \t,]+)' # scheme like "Basic" - '[ \t]+' # mandatory whitespaces - # realm=xxx - # realm='xxx' - # realm="xxx" - 'realm=(["\']?)([^"\']*)\\2', - re.I) - - # XXX could pre-emptively send auth info already accepted (RFC 2617, - # end of section 2, and section 1.2 immediately after "credentials" - # production). - - def __init__(self, password_mgr=None): - if password_mgr is None: - password_mgr = HTTPPasswordMgr() - self.passwd = password_mgr - self.add_password = self.passwd.add_password - - def _parse_realm(self, header): - # parse WWW-Authenticate header: accept multiple challenges per header - found_challenge = False - for mo in AbstractBasicAuthHandler.rx.finditer(header): - scheme, quote, realm = mo.groups() - if quote not in ['"', "'"]: - warnings.warn("Basic Auth Realm was unquoted", - UserWarning, 3) - - yield (scheme, realm) - - found_challenge = True - - if not found_challenge: - if header: - scheme = header.split()[0] - else: - scheme = '' - yield (scheme, None) - - def http_error_auth_reqed(self, authreq, host, req, headers): - # host may be an authority (without userinfo) or a URL with an - # authority - headers = headers.get_all(authreq) - if not headers: - # no header found - return - - unsupported = None - for header in headers: - for scheme, realm in self._parse_realm(header): - if scheme.lower() != 'basic': - unsupported = scheme - continue - - if realm is not None: - # Use the first matching Basic challenge. - # Ignore following challenges even if they use the Basic - # scheme. - return self.retry_http_basic_auth(host, req, realm) - - if unsupported is not None: - raise ValueError("AbstractBasicAuthHandler does not " - "support the following scheme: %r" - % (scheme,)) - - def retry_http_basic_auth(self, host, req, realm): - user, pw = self.passwd.find_user_password(realm, host) - if pw is not None: - raw = "%s:%s" % (user, pw) - auth = "Basic " + base64.b64encode(raw.encode()).decode("ascii") - if req.get_header(self.auth_header, None) == auth: - return None - req.add_unredirected_header(self.auth_header, auth) - return self.parent.open(req, timeout=req.timeout) - else: - return None - - def http_request(self, req): - if (not hasattr(self.passwd, 'is_authenticated') or - not self.passwd.is_authenticated(req.full_url)): - return req - - if not req.has_header('Authorization'): - user, passwd = self.passwd.find_user_password(None, req.full_url) - credentials = '{0}:{1}'.format(user, passwd).encode() - auth_str = base64.standard_b64encode(credentials).decode() - req.add_unredirected_header('Authorization', - 'Basic {}'.format(auth_str.strip())) - return req - - def http_response(self, req, response): - if hasattr(self.passwd, 'is_authenticated'): - if 200 <= response.code < 300: - self.passwd.update_authenticated(req.full_url, True) - else: - self.passwd.update_authenticated(req.full_url, False) - return response - - https_request = http_request - https_response = http_response - - - -class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): - - auth_header = 'Authorization' - - def http_error_401(self, req, fp, code, msg, headers): - url = req.full_url - response = self.http_error_auth_reqed('www-authenticate', - url, req, headers) - return response - - -class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): - - auth_header = 'Proxy-authorization' - - def http_error_407(self, req, fp, code, msg, headers): - # http_error_auth_reqed requires that there is no userinfo component in - # authority. Assume there isn't one, since urllib.request does not (and - # should not, RFC 3986 s. 3.2.1) support requests for URLs containing - # userinfo. - authority = req.host - response = self.http_error_auth_reqed('proxy-authenticate', - authority, req, headers) - return response - - -# Return n random bytes. -_randombytes = os.urandom - - -class AbstractDigestAuthHandler: - # Digest authentication is specified in RFC 2617. - - # XXX The client does not inspect the Authentication-Info header - # in a successful response. - - # XXX It should be possible to test this implementation against - # a mock server that just generates a static set of challenges. - - # XXX qop="auth-int" supports is shaky - - def __init__(self, passwd=None): - if passwd is None: - passwd = HTTPPasswordMgr() - self.passwd = passwd - self.add_password = self.passwd.add_password - self.retried = 0 - self.nonce_count = 0 - self.last_nonce = None - - def reset_retry_count(self): - self.retried = 0 - - def http_error_auth_reqed(self, auth_header, host, req, headers): - authreq = headers.get(auth_header, None) - if self.retried > 5: - # Don't fail endlessly - if we failed once, we'll probably - # fail a second time. Hm. Unless the Password Manager is - # prompting for the information. Crap. This isn't great - # but it's better than the current 'repeat until recursion - # depth exceeded' approach - raise HTTPError(req.full_url, 401, "digest auth failed", - headers, None) - else: - self.retried += 1 - if authreq: - scheme = authreq.split()[0] - if scheme.lower() == 'digest': - return self.retry_http_digest_auth(req, authreq) - elif scheme.lower() != 'basic': - raise ValueError("AbstractDigestAuthHandler does not support" - " the following scheme: '%s'" % scheme) - - def retry_http_digest_auth(self, req, auth): - token, challenge = auth.split(' ', 1) - chal = parse_keqv_list(filter(None, parse_http_list(challenge))) - auth = self.get_authorization(req, chal) - if auth: - auth_val = 'Digest %s' % auth - if req.headers.get(self.auth_header, None) == auth_val: - return None - req.add_unredirected_header(self.auth_header, auth_val) - resp = self.parent.open(req, timeout=req.timeout) - return resp - - def get_cnonce(self, nonce): - # The cnonce-value is an opaque - # quoted string value provided by the client and used by both client - # and server to avoid chosen plaintext attacks, to provide mutual - # authentication, and to provide some message integrity protection. - # This isn't a fabulous effort, but it's probably Good Enough. - s = "%s:%s:%s:" % (self.nonce_count, nonce, time.ctime()) - b = s.encode("ascii") + _randombytes(8) - dig = hashlib.sha1(b).hexdigest() - return dig[:16] - - def get_authorization(self, req, chal): - try: - realm = chal['realm'] - nonce = chal['nonce'] - qop = chal.get('qop') - algorithm = chal.get('algorithm', 'MD5') - # mod_digest doesn't send an opaque, even though it isn't - # supposed to be optional - opaque = chal.get('opaque', None) - except KeyError: - return None - - H, KD = self.get_algorithm_impls(algorithm) - if H is None: - return None - - user, pw = self.passwd.find_user_password(realm, req.full_url) - if user is None: - return None - - # XXX not implemented yet - if req.data is not None: - entdig = self.get_entity_digest(req.data, chal) - else: - entdig = None - - A1 = "%s:%s:%s" % (user, realm, pw) - A2 = "%s:%s" % (req.get_method(), - # XXX selector: what about proxies and full urls - req.selector) - # NOTE: As per RFC 2617, when server sends "auth,auth-int", the client could use either `auth` - # or `auth-int` to the response back. we use `auth` to send the response back. - if qop is None: - respdig = KD(H(A1), "%s:%s" % (nonce, H(A2))) - elif 'auth' in qop.split(','): - if nonce == self.last_nonce: - self.nonce_count += 1 - else: - self.nonce_count = 1 - self.last_nonce = nonce - ncvalue = '%08x' % self.nonce_count - cnonce = self.get_cnonce(nonce) - noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, 'auth', H(A2)) - respdig = KD(H(A1), noncebit) - else: - # XXX handle auth-int. - raise URLError("qop '%s' is not supported." % qop) - - # XXX should the partial digests be encoded too? - - base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ - 'response="%s"' % (user, realm, nonce, req.selector, - respdig) - if opaque: - base += ', opaque="%s"' % opaque - if entdig: - base += ', digest="%s"' % entdig - base += ', algorithm="%s"' % algorithm - if qop: - base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce) - return base - - def get_algorithm_impls(self, algorithm): - # lambdas assume digest modules are imported at the top level - if algorithm == 'MD5': - H = lambda x: hashlib.md5(x.encode("ascii")).hexdigest() - elif algorithm == 'SHA': - H = lambda x: hashlib.sha1(x.encode("ascii")).hexdigest() - # XXX MD5-sess - else: - raise ValueError("Unsupported digest authentication " - "algorithm %r" % algorithm) - KD = lambda s, d: H("%s:%s" % (s, d)) - return H, KD - - def get_entity_digest(self, data, chal): - # XXX not implemented yet - return None - - -class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): - """An authentication protocol defined by RFC 2069 - - Digest authentication improves on basic authentication because it - does not transmit passwords in the clear. - """ - - auth_header = 'Authorization' - handler_order = 490 # before Basic auth - - def http_error_401(self, req, fp, code, msg, headers): - host = urlparse(req.full_url)[1] - retry = self.http_error_auth_reqed('www-authenticate', - host, req, headers) - self.reset_retry_count() - return retry - - -class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): - - auth_header = 'Proxy-Authorization' - handler_order = 490 # before Basic auth - - def http_error_407(self, req, fp, code, msg, headers): - host = req.host - retry = self.http_error_auth_reqed('proxy-authenticate', - host, req, headers) - self.reset_retry_count() - return retry - -class AbstractHTTPHandler(BaseHandler): - - def __init__(self, debuglevel=None): - self._debuglevel = debuglevel if debuglevel is not None else http.client.HTTPConnection.debuglevel - - def set_http_debuglevel(self, level): - self._debuglevel = level - - def _get_content_length(self, request): - return http.client.HTTPConnection._get_content_length( - request.data, - request.get_method()) - - def do_request_(self, request): - host = request.host - if not host: - raise URLError('no host given') - - if request.data is not None: # POST - data = request.data - if isinstance(data, str): - msg = "POST data should be bytes, an iterable of bytes, " \ - "or a file object. It cannot be of type str." - raise TypeError(msg) - if not request.has_header('Content-type'): - request.add_unredirected_header( - 'Content-type', - 'application/x-www-form-urlencoded') - if (not request.has_header('Content-length') - and not request.has_header('Transfer-encoding')): - content_length = self._get_content_length(request) - if content_length is not None: - request.add_unredirected_header( - 'Content-length', str(content_length)) - else: - request.add_unredirected_header( - 'Transfer-encoding', 'chunked') - - sel_host = host - if request.has_proxy(): - scheme, sel = _splittype(request.selector) - sel_host, sel_path = _splithost(sel) - if not request.has_header('Host'): - request.add_unredirected_header('Host', sel_host) - for name, value in self.parent.addheaders: - name = name.capitalize() - if not request.has_header(name): - request.add_unredirected_header(name, value) - - return request - - def do_open(self, http_class, req, **http_conn_args): - """Return an HTTPResponse object for the request, using http_class. - - http_class must implement the HTTPConnection API from http.client. - """ - host = req.host - if not host: - raise URLError('no host given') - - # will parse host:port - h = http_class(host, timeout=req.timeout, **http_conn_args) - h.set_debuglevel(self._debuglevel) - - headers = dict(req.unredirected_hdrs) - headers.update({k: v for k, v in req.headers.items() - if k not in headers}) - - # TODO(jhylton): Should this be redesigned to handle - # persistent connections? - - # We want to make an HTTP/1.1 request, but the addinfourl - # class isn't prepared to deal with a persistent connection. - # It will try to read all remaining data from the socket, - # which will block while the server waits for the next request. - # So make sure the connection gets closed after the (only) - # request. - headers["Connection"] = "close" - headers = {name.title(): val for name, val in headers.items()} - - if req._tunnel_host: - tunnel_headers = {} - proxy_auth_hdr = "Proxy-Authorization" - if proxy_auth_hdr in headers: - tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr] - # Proxy-Authorization should not be sent to origin - # server. - del headers[proxy_auth_hdr] - h.set_tunnel(req._tunnel_host, headers=tunnel_headers) - - try: - try: - h.request(req.get_method(), req.selector, req.data, headers, - encode_chunked=req.has_header('Transfer-encoding')) - except OSError as err: # timeout error - raise URLError(err) - r = h.getresponse() - except: - h.close() - raise - - # If the server does not send us a 'Connection: close' header, - # HTTPConnection assumes the socket should be left open. Manually - # mark the socket to be closed when this response object goes away. - if h.sock: - h.sock.close() - h.sock = None - - r.url = req.get_full_url() - # This line replaces the .msg attribute of the HTTPResponse - # with .headers, because urllib clients expect the response to - # have the reason in .msg. It would be good to mark this - # attribute is deprecated and get then to use info() or - # .headers. - r.msg = r.reason - return r - - -class HTTPHandler(AbstractHTTPHandler): - - def http_open(self, req): - return self.do_open(http.client.HTTPConnection, req) - - http_request = AbstractHTTPHandler.do_request_ - -if hasattr(http.client, 'HTTPSConnection'): - - class HTTPSHandler(AbstractHTTPHandler): - - def __init__(self, debuglevel=None, context=None, check_hostname=None): - debuglevel = debuglevel if debuglevel is not None else http.client.HTTPSConnection.debuglevel - AbstractHTTPHandler.__init__(self, debuglevel) - if context is None: - http_version = http.client.HTTPSConnection._http_vsn - context = http.client._create_https_context(http_version) - if check_hostname is not None: - context.check_hostname = check_hostname - self._context = context - - def https_open(self, req): - return self.do_open(http.client.HTTPSConnection, req, - context=self._context) - - https_request = AbstractHTTPHandler.do_request_ - - __all__.append('HTTPSHandler') - -class HTTPCookieProcessor(BaseHandler): - def __init__(self, cookiejar=None): - import http.cookiejar - if cookiejar is None: - cookiejar = http.cookiejar.CookieJar() - self.cookiejar = cookiejar - - def http_request(self, request): - self.cookiejar.add_cookie_header(request) - return request - - def http_response(self, request, response): - self.cookiejar.extract_cookies(response, request) - return response - - https_request = http_request - https_response = http_response - -class UnknownHandler(BaseHandler): - def unknown_open(self, req): - type = req.type - raise URLError('unknown url type: %s' % type) - -def parse_keqv_list(l): - """Parse list of key=value strings where keys are not duplicated.""" - parsed = {} - for elt in l: - k, v = elt.split('=', 1) - if v[0] == '"' and v[-1] == '"': - v = v[1:-1] - parsed[k] = v - return parsed - -def parse_http_list(s): - """Parse lists as described by RFC 2068 Section 2. - - In particular, parse comma-separated lists where the elements of - the list may include quoted-strings. A quoted-string could - contain a comma. A non-quoted string could have quotes in the - middle. Neither commas nor quotes count if they are escaped. - Only double-quotes count, not single-quotes. - """ - res = [] - part = '' - - escape = quote = False - for cur in s: - if escape: - part += cur - escape = False - continue - if quote: - if cur == '\\': - escape = True - continue - elif cur == '"': - quote = False - part += cur - continue - - if cur == ',': - res.append(part) - part = '' - continue - - if cur == '"': - quote = True - - part += cur - - # append last part - if part: - res.append(part) - - return [part.strip() for part in res] - -class FileHandler(BaseHandler): - # Use local file or FTP depending on form of URL - def file_open(self, req): - url = req.selector - if url[:2] == '//' and url[2:3] != '/' and (req.host and - req.host != 'localhost'): - if not req.host in self.get_names(): - raise URLError("file:// scheme is supported only on localhost") - else: - return self.open_local_file(req) - - # names for the localhost - names = None - def get_names(self): - if FileHandler.names is None: - try: - FileHandler.names = tuple( - socket.gethostbyname_ex('localhost')[2] + - socket.gethostbyname_ex(socket.gethostname())[2]) - except socket.gaierror: - FileHandler.names = (socket.gethostbyname('localhost'),) - return FileHandler.names - - # not entirely sure what the rules are here - def open_local_file(self, req): - import email.utils - import mimetypes - host = req.host - filename = req.selector - localfile = url2pathname(filename) - try: - stats = os.stat(localfile) - size = stats.st_size - modified = email.utils.formatdate(stats.st_mtime, usegmt=True) - mtype = mimetypes.guess_type(filename)[0] - headers = email.message_from_string( - 'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' % - (mtype or 'text/plain', size, modified)) - if host: - host, port = _splitport(host) - if not host or \ - (not port and _safe_gethostbyname(host) in self.get_names()): - if host: - origurl = 'file://' + host + filename - else: - origurl = 'file://' + filename - return addinfourl(open(localfile, 'rb'), headers, origurl) - except OSError as exp: - raise URLError(exp) - raise URLError('file not on local host') - -def _safe_gethostbyname(host): - try: - return socket.gethostbyname(host) - except socket.gaierror: - return None - -class FTPHandler(BaseHandler): - def ftp_open(self, req): - import ftplib - import mimetypes - host = req.host - if not host: - raise URLError('ftp error: no host given') - host, port = _splitport(host) - if port is None: - port = ftplib.FTP_PORT - else: - port = int(port) - - # username/password handling - user, host = _splituser(host) - if user: - user, passwd = _splitpasswd(user) - else: - passwd = None - host = unquote(host) - user = user or '' - passwd = passwd or '' - - try: - host = socket.gethostbyname(host) - except OSError as msg: - raise URLError(msg) - path, attrs = _splitattr(req.selector) - dirs = path.split('/') - dirs = list(map(unquote, dirs)) - dirs, file = dirs[:-1], dirs[-1] - if dirs and not dirs[0]: - dirs = dirs[1:] - fw = None - try: - fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout) - type = file and 'I' or 'D' - for attr in attrs: - attr, value = _splitvalue(attr) - if attr.lower() == 'type' and \ - value in ('a', 'A', 'i', 'I', 'd', 'D'): - type = value.upper() - fp, retrlen = fw.retrfile(file, type) - headers = "" - mtype = mimetypes.guess_type(req.full_url)[0] - if mtype: - headers += "Content-type: %s\n" % mtype - if retrlen is not None and retrlen >= 0: - headers += "Content-length: %d\n" % retrlen - headers = email.message_from_string(headers) - return addinfourl(fp, headers, req.full_url) - except Exception as exp: - if fw is not None and not fw.keepalive: - fw.close() - if isinstance(exp, ftplib.all_errors): - raise URLError(exp) from exp - raise - - def connect_ftp(self, user, passwd, host, port, dirs, timeout): - return ftpwrapper(user, passwd, host, port, dirs, timeout, - persistent=False) - -class CacheFTPHandler(FTPHandler): - # XXX would be nice to have pluggable cache strategies - # XXX this stuff is definitely not thread safe - def __init__(self): - self.cache = {} - self.timeout = {} - self.soonest = 0 - self.delay = 60 - self.max_conns = 16 - - def setTimeout(self, t): - self.delay = t - - def setMaxConns(self, m): - self.max_conns = m - - def connect_ftp(self, user, passwd, host, port, dirs, timeout): - key = user, host, port, '/'.join(dirs), timeout - conn = self.cache.get(key) - if conn is None or not conn.keepalive: - if conn is not None: - conn.close() - conn = self.cache[key] = ftpwrapper(user, passwd, host, port, - dirs, timeout) - self.timeout[key] = time.time() + self.delay - self.check_cache() - return conn - - def check_cache(self): - # first check for old ones - t = time.time() - if self.soonest <= t: - for k, v in list(self.timeout.items()): - if v < t: - self.cache[k].close() - del self.cache[k] - del self.timeout[k] - self.soonest = min(list(self.timeout.values())) - - # then check the size - if len(self.cache) == self.max_conns: - for k, v in list(self.timeout.items()): - if v == self.soonest: - del self.cache[k] - del self.timeout[k] - break - self.soonest = min(list(self.timeout.values())) - - def clear_cache(self): - for conn in self.cache.values(): - conn.close() - self.cache.clear() - self.timeout.clear() - -class DataHandler(BaseHandler): - def data_open(self, req): - # data URLs as specified in RFC 2397. - # - # ignores POSTed data - # - # syntax: - # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data - # mediatype := [ type "/" subtype ] *( ";" parameter ) - # data := *urlchar - # parameter := attribute "=" value - url = req.full_url - - scheme, data = url.split(":",1) - mediatype, data = data.split(",",1) - - # Disallow control characters within mediatype. - if re.search(r"[\x00-\x1F\x7F]", mediatype): - raise ValueError( - "Control characters not allowed in data: mediatype") - - # even base64 encoded data URLs might be quoted so unquote in any case: - data = unquote_to_bytes(data) - if mediatype.endswith(";base64"): - data = base64.decodebytes(data) - mediatype = mediatype[:-7] - - if not mediatype: - mediatype = "text/plain;charset=US-ASCII" - - headers = email.message_from_string("Content-type: %s\nContent-length: %d\n" % - (mediatype, len(data))) - - return addinfourl(io.BytesIO(data), headers, url) - - -# Code move from the old urllib module - -MAXFTPCACHE = 10 # Trim the ftp cache beyond this size - -# Helper for non-unix systems -if os.name == 'nt': - from nturl2path import url2pathname, pathname2url -else: - def url2pathname(pathname): - """OS-specific conversion from a relative URL of the 'file' scheme - to a file system path; not recommended for general use.""" - if pathname[:3] == '///': - # URL has an empty authority section, so the path begins on the - # third character. - pathname = pathname[2:] - elif pathname[:12] == '//localhost/': - # Skip past 'localhost' authority. - pathname = pathname[11:] - encoding = sys.getfilesystemencoding() - errors = sys.getfilesystemencodeerrors() - return unquote(pathname, encoding=encoding, errors=errors) - - def pathname2url(pathname): - """OS-specific conversion from a file system path to a relative URL - of the 'file' scheme; not recommended for general use.""" - if pathname[:2] == '//': - # Add explicitly empty authority to avoid interpreting the path - # as authority. - pathname = '//' + pathname - encoding = sys.getfilesystemencoding() - errors = sys.getfilesystemencodeerrors() - return quote(pathname, encoding=encoding, errors=errors) - - -ftpcache = {} - - -class URLopener: - """Class to open URLs. - This is a class rather than just a subroutine because we may need - more than one set of global protocol-specific options. - Note -- this is a base class for those who don't want the - automatic handling of errors type 302 (relocated) and 401 - (authorization needed).""" - - __tempfiles = None - - version = "Python-urllib/%s" % __version__ - - # Constructor - def __init__(self, proxies=None, **x509): - msg = "%(class)s style of invoking requests is deprecated. " \ - "Use newer urlopen functions/methods" % {'class': self.__class__.__name__} - warnings.warn(msg, DeprecationWarning, stacklevel=3) - if proxies is None: - proxies = getproxies() - assert hasattr(proxies, 'keys'), "proxies must be a mapping" - self.proxies = proxies - self.key_file = x509.get('key_file') - self.cert_file = x509.get('cert_file') - self.addheaders = [('User-Agent', self.version), ('Accept', '*/*')] - self.__tempfiles = [] - self.__unlink = os.unlink # See cleanup() - self.tempcache = None - # Undocumented feature: if you assign {} to tempcache, - # it is used to cache files retrieved with - # self.retrieve(). This is not enabled by default - # since it does not work for changing documents (and I - # haven't got the logic to check expiration headers - # yet). - self.ftpcache = ftpcache - # Undocumented feature: you can use a different - # ftp cache by assigning to the .ftpcache member; - # in case you want logically independent URL openers - # XXX This is not threadsafe. Bah. - - def __del__(self): - self.close() - - def close(self): - self.cleanup() - - def cleanup(self): - # This code sometimes runs when the rest of this module - # has already been deleted, so it can't use any globals - # or import anything. - if self.__tempfiles: - for file in self.__tempfiles: - try: - self.__unlink(file) - except OSError: - pass - del self.__tempfiles[:] - if self.tempcache: - self.tempcache.clear() - - def addheader(self, *args): - """Add a header to be used by the HTTP interface only - e.g. u.addheader('Accept', 'sound/basic')""" - self.addheaders.append(args) - - # External interface - def open(self, fullurl, data=None): - """Use URLopener().open(file) instead of open(file, 'r').""" - fullurl = unwrap(_to_bytes(fullurl)) - fullurl = quote(fullurl, safe="%/:=&?~#+!$,;'@()*[]|") - if self.tempcache and fullurl in self.tempcache: - filename, headers = self.tempcache[fullurl] - fp = open(filename, 'rb') - return addinfourl(fp, headers, fullurl) - urltype, url = _splittype(fullurl) - if not urltype: - urltype = 'file' - if urltype in self.proxies: - proxy = self.proxies[urltype] - urltype, proxyhost = _splittype(proxy) - host, selector = _splithost(proxyhost) - url = (host, fullurl) # Signal special case to open_*() - else: - proxy = None - name = 'open_' + urltype - self.type = urltype - name = name.replace('-', '_') - if not hasattr(self, name) or name == 'open_local_file': - if proxy: - return self.open_unknown_proxy(proxy, fullurl, data) - else: - return self.open_unknown(fullurl, data) - try: - if data is None: - return getattr(self, name)(url) - else: - return getattr(self, name)(url, data) - except (HTTPError, URLError): - raise - except OSError as msg: - raise OSError('socket error', msg) from msg - - def open_unknown(self, fullurl, data=None): - """Overridable interface to open unknown URL type.""" - type, url = _splittype(fullurl) - raise OSError('url error', 'unknown url type', type) - - def open_unknown_proxy(self, proxy, fullurl, data=None): - """Overridable interface to open unknown URL type.""" - type, url = _splittype(fullurl) - raise OSError('url error', 'invalid proxy for %s' % type, proxy) - - # External interface - def retrieve(self, url, filename=None, reporthook=None, data=None): - """retrieve(url) returns (filename, headers) for a local object - or (tempfilename, headers) for a remote object.""" - url = unwrap(_to_bytes(url)) - if self.tempcache and url in self.tempcache: - return self.tempcache[url] - type, url1 = _splittype(url) - if filename is None and (not type or type == 'file'): - try: - fp = self.open_local_file(url1) - hdrs = fp.info() - fp.close() - return url2pathname(_splithost(url1)[1]), hdrs - except OSError: - pass - fp = self.open(url, data) - try: - headers = fp.info() - if filename: - tfp = open(filename, 'wb') - else: - garbage, path = _splittype(url) - garbage, path = _splithost(path or "") - path, garbage = _splitquery(path or "") - path, garbage = _splitattr(path or "") - suffix = os.path.splitext(path)[1] - (fd, filename) = tempfile.mkstemp(suffix) - self.__tempfiles.append(filename) - tfp = os.fdopen(fd, 'wb') - try: - result = filename, headers - if self.tempcache is not None: - self.tempcache[url] = result - bs = 1024*8 - size = -1 - read = 0 - blocknum = 0 - if "content-length" in headers: - size = int(headers["Content-Length"]) - if reporthook: - reporthook(blocknum, bs, size) - while block := fp.read(bs): - read += len(block) - tfp.write(block) - blocknum += 1 - if reporthook: - reporthook(blocknum, bs, size) - finally: - tfp.close() - finally: - fp.close() - - # raise exception if actual size does not match content-length header - if size >= 0 and read < size: - raise ContentTooShortError( - "retrieval incomplete: got only %i out of %i bytes" - % (read, size), result) - - return result - - # Each method named open_ knows how to open that type of URL - - def _open_generic_http(self, connection_factory, url, data): - """Make an HTTP connection using connection_class. - - This is an internal method that should be called from - open_http() or open_https(). - - Arguments: - - connection_factory should take a host name and return an - HTTPConnection instance. - - url is the url to retrieval or a host, relative-path pair. - - data is payload for a POST request or None. - """ - - user_passwd = None - proxy_passwd= None - if isinstance(url, str): - host, selector = _splithost(url) - if host: - user_passwd, host = _splituser(host) - host = unquote(host) - realhost = host - else: - host, selector = url - # check whether the proxy contains authorization information - proxy_passwd, host = _splituser(host) - # now we proceed with the url we want to obtain - urltype, rest = _splittype(selector) - url = rest - user_passwd = None - if urltype.lower() != 'http': - realhost = None - else: - realhost, rest = _splithost(rest) - if realhost: - user_passwd, realhost = _splituser(realhost) - if user_passwd: - selector = "%s://%s%s" % (urltype, realhost, rest) - if proxy_bypass(realhost): - host = realhost - - if not host: raise OSError('http error', 'no host given') - - if proxy_passwd: - proxy_passwd = unquote(proxy_passwd) - proxy_auth = base64.b64encode(proxy_passwd.encode()).decode('ascii') - else: - proxy_auth = None - - if user_passwd: - user_passwd = unquote(user_passwd) - auth = base64.b64encode(user_passwd.encode()).decode('ascii') - else: - auth = None - http_conn = connection_factory(host) - headers = {} - if proxy_auth: - headers["Proxy-Authorization"] = "Basic %s" % proxy_auth - if auth: - headers["Authorization"] = "Basic %s" % auth - if realhost: - headers["Host"] = realhost - - # Add Connection:close as we don't support persistent connections yet. - # This helps in closing the socket and avoiding ResourceWarning - - headers["Connection"] = "close" - - for header, value in self.addheaders: - headers[header] = value - - if data is not None: - headers["Content-Type"] = "application/x-www-form-urlencoded" - http_conn.request("POST", selector, data, headers) - else: - http_conn.request("GET", selector, headers=headers) - - try: - response = http_conn.getresponse() - except http.client.BadStatusLine: - # something went wrong with the HTTP status line - raise URLError("http protocol error: bad status line") - - # According to RFC 2616, "2xx" code indicates that the client's - # request was successfully received, understood, and accepted. - if 200 <= response.status < 300: - return addinfourl(response, response.msg, "http:" + url, - response.status) - else: - return self.http_error( - url, response.fp, - response.status, response.reason, response.msg, data) - - def open_http(self, url, data=None): - """Use HTTP protocol.""" - return self._open_generic_http(http.client.HTTPConnection, url, data) - - def http_error(self, url, fp, errcode, errmsg, headers, data=None): - """Handle http errors. - - Derived class can override this, or provide specific handlers - named http_error_DDD where DDD is the 3-digit error code.""" - # First check if there's a specific handler for this error - name = 'http_error_%d' % errcode - if hasattr(self, name): - method = getattr(self, name) - if data is None: - result = method(url, fp, errcode, errmsg, headers) - else: - result = method(url, fp, errcode, errmsg, headers, data) - if result: return result - return self.http_error_default(url, fp, errcode, errmsg, headers) - - def http_error_default(self, url, fp, errcode, errmsg, headers): - """Default error handler: close the connection and raise OSError.""" - fp.close() - raise HTTPError(url, errcode, errmsg, headers, None) - - if _have_ssl: - def _https_connection(self, host): - if self.key_file or self.cert_file: - http_version = http.client.HTTPSConnection._http_vsn - context = http.client._create_https_context(http_version) - context.load_cert_chain(self.cert_file, self.key_file) - # cert and key file means the user wants to authenticate. - # enable TLS 1.3 PHA implicitly even for custom contexts. - if context.post_handshake_auth is not None: - context.post_handshake_auth = True - else: - context = None - return http.client.HTTPSConnection(host, context=context) - - def open_https(self, url, data=None): - """Use HTTPS protocol.""" - return self._open_generic_http(self._https_connection, url, data) - - def open_file(self, url): - """Use local file or FTP depending on form of URL.""" - if not isinstance(url, str): - raise URLError('file error: proxy support for file protocol currently not implemented') - if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/': - raise ValueError("file:// scheme is supported only on localhost") - else: - return self.open_local_file(url) - - def open_local_file(self, url): - """Use local file.""" - import email.utils - import mimetypes - host, file = _splithost(url) - localname = url2pathname(file) - try: - stats = os.stat(localname) - except OSError as e: - raise URLError(e.strerror, e.filename) - size = stats.st_size - modified = email.utils.formatdate(stats.st_mtime, usegmt=True) - mtype = mimetypes.guess_type(url)[0] - headers = email.message_from_string( - 'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' % - (mtype or 'text/plain', size, modified)) - if not host: - urlfile = file - if file[:1] == '/': - urlfile = 'file://' + file - return addinfourl(open(localname, 'rb'), headers, urlfile) - host, port = _splitport(host) - if (not port - and socket.gethostbyname(host) in ((localhost(),) + thishost())): - urlfile = file - if file[:1] == '/': - urlfile = 'file://' + file - elif file[:2] == './': - raise ValueError("local file url may start with / or file:. Unknown url of type: %s" % url) - return addinfourl(open(localname, 'rb'), headers, urlfile) - raise URLError('local file error: not on local host') - - def open_ftp(self, url): - """Use FTP protocol.""" - if not isinstance(url, str): - raise URLError('ftp error: proxy support for ftp protocol currently not implemented') - import mimetypes - host, path = _splithost(url) - if not host: raise URLError('ftp error: no host given') - host, port = _splitport(host) - user, host = _splituser(host) - if user: user, passwd = _splitpasswd(user) - else: passwd = None - host = unquote(host) - user = unquote(user or '') - passwd = unquote(passwd or '') - host = socket.gethostbyname(host) - if not port: - import ftplib - port = ftplib.FTP_PORT - else: - port = int(port) - path, attrs = _splitattr(path) - path = unquote(path) - dirs = path.split('/') - dirs, file = dirs[:-1], dirs[-1] - if dirs and not dirs[0]: dirs = dirs[1:] - if dirs and not dirs[0]: dirs[0] = '/' - key = user, host, port, '/'.join(dirs) - # XXX thread unsafe! - if len(self.ftpcache) > MAXFTPCACHE: - # Prune the cache, rather arbitrarily - for k in list(self.ftpcache): - if k != key: - v = self.ftpcache[k] - del self.ftpcache[k] - v.close() - try: - if key not in self.ftpcache: - self.ftpcache[key] = \ - ftpwrapper(user, passwd, host, port, dirs) - if not file: type = 'D' - else: type = 'I' - for attr in attrs: - attr, value = _splitvalue(attr) - if attr.lower() == 'type' and \ - value in ('a', 'A', 'i', 'I', 'd', 'D'): - type = value.upper() - (fp, retrlen) = self.ftpcache[key].retrfile(file, type) - mtype = mimetypes.guess_type("ftp:" + url)[0] - headers = "" - if mtype: - headers += "Content-Type: %s\n" % mtype - if retrlen is not None and retrlen >= 0: - headers += "Content-Length: %d\n" % retrlen - headers = email.message_from_string(headers) - return addinfourl(fp, headers, "ftp:" + url) - except ftperrors() as exp: - raise URLError(f'ftp error: {exp}') from exp - - def open_data(self, url, data=None): - """Use "data" URL.""" - if not isinstance(url, str): - raise URLError('data error: proxy support for data protocol currently not implemented') - # ignore POSTed data - # - # syntax of data URLs: - # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data - # mediatype := [ type "/" subtype ] *( ";" parameter ) - # data := *urlchar - # parameter := attribute "=" value - try: - [type, data] = url.split(',', 1) - except ValueError: - raise OSError('data error', 'bad data URL') - if not type: - type = 'text/plain;charset=US-ASCII' - semi = type.rfind(';') - if semi >= 0 and '=' not in type[semi:]: - encoding = type[semi+1:] - type = type[:semi] - else: - encoding = '' - msg = [] - msg.append('Date: %s'%time.strftime('%a, %d %b %Y %H:%M:%S GMT', - time.gmtime(time.time()))) - msg.append('Content-type: %s' % type) - if encoding == 'base64': - # XXX is this encoding/decoding ok? - data = base64.decodebytes(data.encode('ascii')).decode('latin-1') - else: - data = unquote(data) - msg.append('Content-Length: %d' % len(data)) - msg.append('') - msg.append(data) - msg = '\n'.join(msg) - headers = email.message_from_string(msg) - f = io.StringIO(msg) - #f.fileno = None # needed for addinfourl - return addinfourl(f, headers, url) - - -class FancyURLopener(URLopener): - """Derived class with handlers for errors we can handle (perhaps).""" - - def __init__(self, *args, **kwargs): - URLopener.__init__(self, *args, **kwargs) - self.auth_cache = {} - self.tries = 0 - self.maxtries = 10 - - def http_error_default(self, url, fp, errcode, errmsg, headers): - """Default error handling -- don't raise an exception.""" - return addinfourl(fp, headers, "http:" + url, errcode) - - def http_error_302(self, url, fp, errcode, errmsg, headers, data=None): - """Error 302 -- relocated (temporarily).""" - self.tries += 1 - try: - if self.maxtries and self.tries >= self.maxtries: - if hasattr(self, "http_error_500"): - meth = self.http_error_500 - else: - meth = self.http_error_default - return meth(url, fp, 500, - "Internal Server Error: Redirect Recursion", - headers) - result = self.redirect_internal(url, fp, errcode, errmsg, - headers, data) - return result - finally: - self.tries = 0 - - def redirect_internal(self, url, fp, errcode, errmsg, headers, data): - if 'location' in headers: - newurl = headers['location'] - elif 'uri' in headers: - newurl = headers['uri'] - else: - return - fp.close() - - # In case the server sent a relative URL, join with original: - newurl = urljoin(self.type + ":" + url, newurl) - - urlparts = urlparse(newurl) - - # For security reasons, we don't allow redirection to anything other - # than http, https and ftp. - - # We are using newer HTTPError with older redirect_internal method - # This older method will get deprecated in 3.3 - - if urlparts.scheme not in ('http', 'https', 'ftp', ''): - raise HTTPError(newurl, errcode, - errmsg + - " Redirection to url '%s' is not allowed." % newurl, - headers, fp) - - return self.open(newurl) - - def http_error_301(self, url, fp, errcode, errmsg, headers, data=None): - """Error 301 -- also relocated (permanently).""" - return self.http_error_302(url, fp, errcode, errmsg, headers, data) - - def http_error_303(self, url, fp, errcode, errmsg, headers, data=None): - """Error 303 -- also relocated (essentially identical to 302).""" - return self.http_error_302(url, fp, errcode, errmsg, headers, data) - - def http_error_307(self, url, fp, errcode, errmsg, headers, data=None): - """Error 307 -- relocated, but turn POST into error.""" - if data is None: - return self.http_error_302(url, fp, errcode, errmsg, headers, data) - else: - return self.http_error_default(url, fp, errcode, errmsg, headers) - - def http_error_308(self, url, fp, errcode, errmsg, headers, data=None): - """Error 308 -- relocated, but turn POST into error.""" - if data is None: - return self.http_error_301(url, fp, errcode, errmsg, headers, data) - else: - return self.http_error_default(url, fp, errcode, errmsg, headers) - - def http_error_401(self, url, fp, errcode, errmsg, headers, data=None, - retry=False): - """Error 401 -- authentication required. - This function supports Basic authentication only.""" - if 'www-authenticate' not in headers: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - stuff = headers['www-authenticate'] - match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) - if not match: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - scheme, realm = match.groups() - if scheme.lower() != 'basic': - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - if not retry: - URLopener.http_error_default(self, url, fp, errcode, errmsg, - headers) - name = 'retry_' + self.type + '_basic_auth' - if data is None: - return getattr(self,name)(url, realm) - else: - return getattr(self,name)(url, realm, data) - - def http_error_407(self, url, fp, errcode, errmsg, headers, data=None, - retry=False): - """Error 407 -- proxy authentication required. - This function supports Basic authentication only.""" - if 'proxy-authenticate' not in headers: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - stuff = headers['proxy-authenticate'] - match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) - if not match: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - scheme, realm = match.groups() - if scheme.lower() != 'basic': - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - if not retry: - URLopener.http_error_default(self, url, fp, errcode, errmsg, - headers) - name = 'retry_proxy_' + self.type + '_basic_auth' - if data is None: - return getattr(self,name)(url, realm) - else: - return getattr(self,name)(url, realm, data) - - def retry_proxy_http_basic_auth(self, url, realm, data=None): - host, selector = _splithost(url) - newurl = 'http://' + host + selector - proxy = self.proxies['http'] - urltype, proxyhost = _splittype(proxy) - proxyhost, proxyselector = _splithost(proxyhost) - i = proxyhost.find('@') + 1 - proxyhost = proxyhost[i:] - user, passwd = self.get_user_passwd(proxyhost, realm, i) - if not (user or passwd): return None - proxyhost = "%s:%s@%s" % (quote(user, safe=''), - quote(passwd, safe=''), proxyhost) - self.proxies['http'] = 'http://' + proxyhost + proxyselector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def retry_proxy_https_basic_auth(self, url, realm, data=None): - host, selector = _splithost(url) - newurl = 'https://' + host + selector - proxy = self.proxies['https'] - urltype, proxyhost = _splittype(proxy) - proxyhost, proxyselector = _splithost(proxyhost) - i = proxyhost.find('@') + 1 - proxyhost = proxyhost[i:] - user, passwd = self.get_user_passwd(proxyhost, realm, i) - if not (user or passwd): return None - proxyhost = "%s:%s@%s" % (quote(user, safe=''), - quote(passwd, safe=''), proxyhost) - self.proxies['https'] = 'https://' + proxyhost + proxyselector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def retry_http_basic_auth(self, url, realm, data=None): - host, selector = _splithost(url) - i = host.find('@') + 1 - host = host[i:] - user, passwd = self.get_user_passwd(host, realm, i) - if not (user or passwd): return None - host = "%s:%s@%s" % (quote(user, safe=''), - quote(passwd, safe=''), host) - newurl = 'http://' + host + selector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def retry_https_basic_auth(self, url, realm, data=None): - host, selector = _splithost(url) - i = host.find('@') + 1 - host = host[i:] - user, passwd = self.get_user_passwd(host, realm, i) - if not (user or passwd): return None - host = "%s:%s@%s" % (quote(user, safe=''), - quote(passwd, safe=''), host) - newurl = 'https://' + host + selector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def get_user_passwd(self, host, realm, clear_cache=0): - key = realm + '@' + host.lower() - if key in self.auth_cache: - if clear_cache: - del self.auth_cache[key] - else: - return self.auth_cache[key] - user, passwd = self.prompt_user_passwd(host, realm) - if user or passwd: self.auth_cache[key] = (user, passwd) - return user, passwd - - def prompt_user_passwd(self, host, realm): - """Override this in a GUI environment!""" - import getpass - try: - user = input("Enter username for %s at %s: " % (realm, host)) - passwd = getpass.getpass("Enter password for %s in %s at %s: " % - (user, realm, host)) - return user, passwd - except KeyboardInterrupt: - print() - return None, None - - -# Utility functions - -_localhost = None -def localhost(): - """Return the IP address of the magic hostname 'localhost'.""" - global _localhost - if _localhost is None: - _localhost = socket.gethostbyname('localhost') - return _localhost - -_thishost = None -def thishost(): - """Return the IP addresses of the current host.""" - global _thishost - if _thishost is None: - try: - _thishost = tuple(socket.gethostbyname_ex(socket.gethostname())[2]) - except socket.gaierror: - _thishost = tuple(socket.gethostbyname_ex('localhost')[2]) - return _thishost - -_ftperrors = None -def ftperrors(): - """Return the set of errors raised by the FTP class.""" - global _ftperrors - if _ftperrors is None: - import ftplib - _ftperrors = ftplib.all_errors - return _ftperrors - -_noheaders = None -def noheaders(): - """Return an empty email Message object.""" - global _noheaders - if _noheaders is None: - _noheaders = email.message_from_string("") - return _noheaders - - -# Utility classes - -class ftpwrapper: - """Class used by open_ftp() for cache of open FTP connections.""" - - def __init__(self, user, passwd, host, port, dirs, timeout=None, - persistent=True): - self.user = user - self.passwd = passwd - self.host = host - self.port = port - self.dirs = dirs - self.timeout = timeout - self.refcount = 0 - self.keepalive = persistent - try: - self.init() - except: - self.close() - raise - - def init(self): - import ftplib - self.busy = 0 - self.ftp = ftplib.FTP() - self.ftp.connect(self.host, self.port, self.timeout) - self.ftp.login(self.user, self.passwd) - _target = '/'.join(self.dirs) - self.ftp.cwd(_target) - - def retrfile(self, file, type): - import ftplib - self.endtransfer() - if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1 - else: cmd = 'TYPE ' + type; isdir = 0 - try: - self.ftp.voidcmd(cmd) - except ftplib.all_errors: - self.init() - self.ftp.voidcmd(cmd) - conn = None - if file and not isdir: - # Try to retrieve as a file - try: - cmd = 'RETR ' + file - conn, retrlen = self.ftp.ntransfercmd(cmd) - except ftplib.error_perm as reason: - if str(reason)[:3] != '550': - raise URLError(f'ftp error: {reason}') from reason - if not conn: - # Set transfer mode to ASCII! - self.ftp.voidcmd('TYPE A') - # Try a directory listing. Verify that directory exists. - if file: - pwd = self.ftp.pwd() - try: - try: - self.ftp.cwd(file) - except ftplib.error_perm as reason: - raise URLError('ftp error: %r' % reason) from reason - finally: - self.ftp.cwd(pwd) - cmd = 'LIST ' + file - else: - cmd = 'LIST' - conn, retrlen = self.ftp.ntransfercmd(cmd) - self.busy = 1 - - ftpobj = addclosehook(conn.makefile('rb'), self.file_close) - self.refcount += 1 - conn.close() - # Pass back both a suitably decorated object and a retrieval length - return (ftpobj, retrlen) - - def endtransfer(self): - if not self.busy: - return - self.busy = 0 - try: - self.ftp.voidresp() - except ftperrors(): - pass - - def close(self): - self.keepalive = False - if self.refcount <= 0: - self.real_close() - - def file_close(self): - self.endtransfer() - self.refcount -= 1 - if self.refcount <= 0 and not self.keepalive: - self.real_close() - - def real_close(self): - self.endtransfer() - try: - self.ftp.close() - except ftperrors(): - pass - -# Proxy handling -def getproxies_environment(): - """Return a dictionary of scheme -> proxy server URL mappings. - - Scan the environment for variables named _proxy; - this seems to be the standard convention. If you need a - different way, you can pass a proxies dictionary to the - [Fancy]URLopener constructor. - """ - # in order to prefer lowercase variables, process environment in - # two passes: first matches any, second pass matches lowercase only - - # select only environment variables which end in (after making lowercase) _proxy - proxies = {} - environment = [] - for name in os.environ: - # fast screen underscore position before more expensive case-folding - if len(name) > 5 and name[-6] == "_" and name[-5:].lower() == "proxy": - value = os.environ[name] - proxy_name = name[:-6].lower() - environment.append((name, value, proxy_name)) - if value: - proxies[proxy_name] = value - # CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY - # (non-all-lowercase) as it may be set from the web server by a "Proxy:" - # header from the client - # If "proxy" is lowercase, it will still be used thanks to the next block - if 'REQUEST_METHOD' in os.environ: - proxies.pop('http', None) - for name, value, proxy_name in environment: - # not case-folded, checking here for lower-case env vars only - if name[-6:] == '_proxy': - if value: - proxies[proxy_name] = value - else: - proxies.pop(proxy_name, None) - return proxies - -def proxy_bypass_environment(host, proxies=None): - """Test if proxies should not be used for a particular host. - - Checks the proxy dict for the value of no_proxy, which should - be a list of comma separated DNS suffixes, or '*' for all hosts. - - """ - if proxies is None: - proxies = getproxies_environment() - # don't bypass, if no_proxy isn't specified - try: - no_proxy = proxies['no'] - except KeyError: - return False - # '*' is special case for always bypass - if no_proxy == '*': - return True - host = host.lower() - # strip port off host - hostonly, port = _splitport(host) - # check if the host ends with any of the DNS suffixes - for name in no_proxy.split(','): - name = name.strip() - if name: - name = name.lstrip('.') # ignore leading dots - name = name.lower() - if hostonly == name or host == name: - return True - name = '.' + name - if hostonly.endswith(name) or host.endswith(name): - return True - # otherwise, don't bypass - return False - - -# This code tests an OSX specific data structure but is testable on all -# platforms -def _proxy_bypass_macosx_sysconf(host, proxy_settings): - """ - Return True iff this host shouldn't be accessed using a proxy - - This function uses the MacOSX framework SystemConfiguration - to fetch the proxy information. - - proxy_settings come from _scproxy._get_proxy_settings or get mocked ie: - { 'exclude_simple': bool, - 'exceptions': ['foo.bar', '*.bar.com', '127.0.0.1', '10.1', '10.0/16'] - } - """ - from fnmatch import fnmatch - from ipaddress import AddressValueError, IPv4Address - - hostonly, port = _splitport(host) - - def ip2num(ipAddr): - parts = ipAddr.split('.') - parts = list(map(int, parts)) - if len(parts) != 4: - parts = (parts + [0, 0, 0, 0])[:4] - return (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3] - - # Check for simple host names: - if '.' not in host: - if proxy_settings['exclude_simple']: - return True - - hostIP = None - try: - hostIP = int(IPv4Address(hostonly)) - except AddressValueError: - pass - - for value in proxy_settings.get('exceptions', ()): - # Items in the list are strings like these: *.local, 169.254/16 - if not value: continue - - m = re.match(r"(\d+(?:\.\d+)*)(/\d+)?", value) - if m is not None and hostIP is not None: - base = ip2num(m.group(1)) - mask = m.group(2) - if mask is None: - mask = 8 * (m.group(1).count('.') + 1) - else: - mask = int(mask[1:]) - - if mask < 0 or mask > 32: - # System libraries ignore invalid prefix lengths - continue - - mask = 32 - mask - - if (hostIP >> mask) == (base >> mask): - return True - - elif fnmatch(host, value): - return True - - return False - - -# Same as _proxy_bypass_macosx_sysconf, testable on all platforms -def _proxy_bypass_winreg_override(host, override): - """Return True if the host should bypass the proxy server. - - The proxy override list is obtained from the Windows - Internet settings proxy override registry value. - - An example of a proxy override value is: - "www.example.com;*.example.net; 192.168.0.1" - """ - from fnmatch import fnmatch - - host, _ = _splitport(host) - proxy_override = override.split(';') - for test in proxy_override: - test = test.strip() - # "" should bypass the proxy server for all intranet addresses - if test == '': - if '.' not in host: - return True - elif fnmatch(host, test): - return True - return False - - -if sys.platform == 'darwin': - from _scproxy import _get_proxy_settings, _get_proxies - - def proxy_bypass_macosx_sysconf(host): - proxy_settings = _get_proxy_settings() - return _proxy_bypass_macosx_sysconf(host, proxy_settings) - - def getproxies_macosx_sysconf(): - """Return a dictionary of scheme -> proxy server URL mappings. - - This function uses the MacOSX framework SystemConfiguration - to fetch the proxy information. - """ - return _get_proxies() - - - - def proxy_bypass(host): - """Return True, if host should be bypassed. - - Checks proxy settings gathered from the environment, if specified, - or from the MacOSX framework SystemConfiguration. - - """ - proxies = getproxies_environment() - if proxies: - return proxy_bypass_environment(host, proxies) - else: - return proxy_bypass_macosx_sysconf(host) - - def getproxies(): - return getproxies_environment() or getproxies_macosx_sysconf() - - -elif os.name == 'nt': - def getproxies_registry(): - """Return a dictionary of scheme -> proxy server URL mappings. - - Win32 uses the registry to store proxies. - - """ - proxies = {} - try: - import winreg - except ImportError: - # Std module, so should be around - but you never know! - return proxies - try: - internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') - proxyEnable = winreg.QueryValueEx(internetSettings, - 'ProxyEnable')[0] - if proxyEnable: - # Returned as Unicode but problems if not converted to ASCII - proxyServer = str(winreg.QueryValueEx(internetSettings, - 'ProxyServer')[0]) - if '=' not in proxyServer and ';' not in proxyServer: - # Use one setting for all protocols. - proxyServer = 'http={0};https={0};ftp={0}'.format(proxyServer) - for p in proxyServer.split(';'): - protocol, address = p.split('=', 1) - # See if address has a type:// prefix - if not re.match('(?:[^/:]+)://', address): - # Add type:// prefix to address without specifying type - if protocol in ('http', 'https', 'ftp'): - # The default proxy type of Windows is HTTP - address = 'http://' + address - elif protocol == 'socks': - address = 'socks://' + address - proxies[protocol] = address - # Use SOCKS proxy for HTTP(S) protocols - if proxies.get('socks'): - # The default SOCKS proxy type of Windows is SOCKS4 - address = re.sub(r'^socks://', 'socks4://', proxies['socks']) - proxies['http'] = proxies.get('http') or address - proxies['https'] = proxies.get('https') or address - internetSettings.Close() - except (OSError, ValueError, TypeError): - # Either registry key not found etc, or the value in an - # unexpected format. - # proxies already set up to be empty so nothing to do - pass - return proxies - - def getproxies(): - """Return a dictionary of scheme -> proxy server URL mappings. - - Returns settings gathered from the environment, if specified, - or the registry. - - """ - return getproxies_environment() or getproxies_registry() - - def proxy_bypass_registry(host): - try: - import winreg - except ImportError: - # Std modules, so should be around - but you never know! - return False - try: - internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') - proxyEnable = winreg.QueryValueEx(internetSettings, - 'ProxyEnable')[0] - proxyOverride = str(winreg.QueryValueEx(internetSettings, - 'ProxyOverride')[0]) - # ^^^^ Returned as Unicode but problems if not converted to ASCII - except OSError: - return False - if not proxyEnable or not proxyOverride: - return False - return _proxy_bypass_winreg_override(host, proxyOverride) - - def proxy_bypass(host): - """Return True, if host should be bypassed. - - Checks proxy settings gathered from the environment, if specified, - or the registry. - - """ - proxies = getproxies_environment() - if proxies: - return proxy_bypass_environment(host, proxies) - else: - return proxy_bypass_registry(host) - -else: - # By default use environment variables - getproxies = getproxies_environment - proxy_bypass = proxy_bypass_environment diff --git a/Python313_13_x64_Template/Lib/urllib/robotparser.py b/Python313_13_x64_Template/Lib/urllib/robotparser.py deleted file mode 100644 index 63689816..00000000 --- a/Python313_13_x64_Template/Lib/urllib/robotparser.py +++ /dev/null @@ -1,286 +0,0 @@ -""" robotparser.py - - Copyright (C) 2000 Bastian Kleineidam - - You can choose between two licenses when using this package: - 1) GNU GPLv2 - 2) PSF license for Python 2.2 - - The robots.txt Exclusion Protocol is implemented as specified in - http://www.robotstxt.org/norobots-rfc.txt -""" - -import collections -import re -import urllib.error -import urllib.parse -import urllib.request - -__all__ = ["RobotFileParser"] - -RequestRate = collections.namedtuple("RequestRate", "requests seconds") - - -def normalize(path): - unquoted = urllib.parse.unquote(path, errors='surrogateescape') - return urllib.parse.quote(unquoted, errors='surrogateescape') - -def normalize_path(path): - path, sep, query = path.partition('?') - path = normalize(path) - if sep: - query = re.sub(r'[^=&]+', lambda m: normalize(m[0]), query) - path += '?' + query - return path - - -class RobotFileParser: - """ This class provides a set of methods to read, parse and answer - questions about a single robots.txt file. - - """ - - def __init__(self, url=''): - self.entries = [] - self.sitemaps = [] - self.default_entry = None - self.disallow_all = False - self.allow_all = False - self.set_url(url) - self.last_checked = 0 - - def mtime(self): - """Returns the time the robots.txt file was last fetched. - - This is useful for long-running web spiders that need to - check for new robots.txt files periodically. - - """ - return self.last_checked - - def modified(self): - """Sets the time the robots.txt file was last fetched to the - current time. - - """ - import time - self.last_checked = time.time() - - def set_url(self, url): - """Sets the URL referring to a robots.txt file.""" - self.url = url - self.host, self.path = urllib.parse.urlsplit(url)[1:3] - - def read(self): - """Reads the robots.txt URL and feeds it to the parser.""" - try: - f = urllib.request.urlopen(self.url) - except urllib.error.HTTPError as err: - if err.code in (401, 403): - self.disallow_all = True - elif err.code >= 400 and err.code < 500: - self.allow_all = True - err.close() - else: - raw = f.read() - self.parse(raw.decode("utf-8", "surrogateescape").splitlines()) - - def _add_entry(self, entry): - if "*" in entry.useragents: - # the default entry is considered last - if self.default_entry is None: - # the first default entry wins - self.default_entry = entry - else: - self.entries.append(entry) - - def parse(self, lines): - """Parse the input lines from a robots.txt file. - - We allow that a user-agent: line is not preceded by - one or more blank lines. - """ - # states: - # 0: start state - # 1: saw user-agent line - # 2: saw an allow or disallow line - state = 0 - entry = Entry() - - self.modified() - for line in lines: - if not line: - if state == 1: - entry = Entry() - state = 0 - elif state == 2: - self._add_entry(entry) - entry = Entry() - state = 0 - # remove optional comment and strip line - i = line.find('#') - if i >= 0: - line = line[:i] - line = line.strip() - if not line: - continue - line = line.split(':', 1) - if len(line) == 2: - line[0] = line[0].strip().lower() - line[1] = line[1].strip() - if line[0] == "user-agent": - if state == 2: - self._add_entry(entry) - entry = Entry() - entry.useragents.append(line[1]) - state = 1 - elif line[0] == "disallow": - if state != 0: - entry.rulelines.append(RuleLine(line[1], False)) - state = 2 - elif line[0] == "allow": - if state != 0: - entry.rulelines.append(RuleLine(line[1], True)) - state = 2 - elif line[0] == "crawl-delay": - if state != 0: - # before trying to convert to int we need to make - # sure that robots.txt has valid syntax otherwise - # it will crash - if line[1].strip().isdigit(): - entry.delay = int(line[1]) - state = 2 - elif line[0] == "request-rate": - if state != 0: - numbers = line[1].split('/') - # check if all values are sane - if (len(numbers) == 2 and numbers[0].strip().isdigit() - and numbers[1].strip().isdigit()): - entry.req_rate = RequestRate(int(numbers[0]), int(numbers[1])) - state = 2 - elif line[0] == "sitemap": - # According to http://www.sitemaps.org/protocol.html - # "This directive is independent of the user-agent line, - # so it doesn't matter where you place it in your file." - # Therefore we do not change the state of the parser. - self.sitemaps.append(line[1]) - if state == 2: - self._add_entry(entry) - - def can_fetch(self, useragent, url): - """using the parsed robots.txt decide if useragent can fetch url""" - if self.disallow_all: - return False - if self.allow_all: - return True - # Until the robots.txt file has been read or found not - # to exist, we must assume that no url is allowable. - # This prevents false positives when a user erroneously - # calls can_fetch() before calling read(). - if not self.last_checked: - return False - # search for given user agent matches - # the first match counts - parsed_url = urllib.parse.urlsplit(url) - url = urllib.parse.urlunsplit(('', '', *parsed_url[2:])) - url = normalize_path(url) - if not url: - url = "/" - for entry in self.entries: - if entry.applies_to(useragent): - return entry.allowance(url) - # try the default entry last - if self.default_entry: - return self.default_entry.allowance(url) - # agent not found ==> access granted - return True - - def crawl_delay(self, useragent): - if not self.mtime(): - return None - for entry in self.entries: - if entry.applies_to(useragent): - return entry.delay - if self.default_entry: - return self.default_entry.delay - return None - - def request_rate(self, useragent): - if not self.mtime(): - return None - for entry in self.entries: - if entry.applies_to(useragent): - return entry.req_rate - if self.default_entry: - return self.default_entry.req_rate - return None - - def site_maps(self): - if not self.sitemaps: - return None - return self.sitemaps - - def __str__(self): - entries = self.entries - if self.default_entry is not None: - entries = entries + [self.default_entry] - return '\n\n'.join(map(str, entries)) - -class RuleLine: - """A rule line is a single "Allow:" (allowance==True) or "Disallow:" - (allowance==False) followed by a path.""" - def __init__(self, path, allowance): - if path == '' and not allowance: - # an empty value means allow all - allowance = True - self.path = normalize_path(path) - self.allowance = allowance - - def applies_to(self, filename): - return self.path == "*" or filename.startswith(self.path) - - def __str__(self): - return ("Allow" if self.allowance else "Disallow") + ": " + self.path - - -class Entry: - """An entry has one or more user-agents and zero or more rulelines""" - def __init__(self): - self.useragents = [] - self.rulelines = [] - self.delay = None - self.req_rate = None - - def __str__(self): - ret = [] - for agent in self.useragents: - ret.append(f"User-agent: {agent}") - if self.delay is not None: - ret.append(f"Crawl-delay: {self.delay}") - if self.req_rate is not None: - rate = self.req_rate - ret.append(f"Request-rate: {rate.requests}/{rate.seconds}") - ret.extend(map(str, self.rulelines)) - return '\n'.join(ret) - - def applies_to(self, useragent): - """check if this entry applies to the specified agent""" - # split the name token and make it lower case - useragent = useragent.split("/")[0].lower() - for agent in self.useragents: - if agent == '*': - # we have the catch-all agent - return True - agent = agent.lower() - if agent in useragent: - return True - return False - - def allowance(self, filename): - """Preconditions: - - our agent applies to this entry - - filename is URL encoded""" - for line in self.rulelines: - if line.applies_to(filename): - return line.allowance - return True diff --git a/Python313_13_x64_Template/Lib/uuid.py b/Python313_13_x64_Template/Lib/uuid.py deleted file mode 100644 index 55f46eb5..00000000 --- a/Python313_13_x64_Template/Lib/uuid.py +++ /dev/null @@ -1,784 +0,0 @@ -r"""UUID objects (universally unique identifiers) according to RFC 4122. - -This module provides immutable UUID objects (class UUID) and the functions -uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5 -UUIDs as specified in RFC 4122. - -If all you want is a unique ID, you should probably call uuid1() or uuid4(). -Note that uuid1() may compromise privacy since it creates a UUID containing -the computer's network address. uuid4() creates a random UUID. - -Typical usage: - - >>> import uuid - - # make a UUID based on the host ID and current time - >>> uuid.uuid1() # doctest: +SKIP - UUID('a8098c1a-f86e-11da-bd1a-00112444be1e') - - # make a UUID using an MD5 hash of a namespace UUID and a name - >>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org') - UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e') - - # make a random UUID - >>> uuid.uuid4() # doctest: +SKIP - UUID('16fd2706-8baf-433b-82eb-8c7fada847da') - - # make a UUID using a SHA-1 hash of a namespace UUID and a name - >>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org') - UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d') - - # make a UUID from a string of hex digits (braces and hyphens ignored) - >>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}') - - # convert a UUID to a string of hex digits in standard form - >>> str(x) - '00010203-0405-0607-0809-0a0b0c0d0e0f' - - # get the raw 16 bytes of the UUID - >>> x.bytes - b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f' - - # make a UUID from a 16-byte string - >>> uuid.UUID(bytes=x.bytes) - UUID('00010203-0405-0607-0809-0a0b0c0d0e0f') -""" - -import os -import sys - -from enum import Enum, _simple_enum - - -__author__ = 'Ka-Ping Yee ' - -# The recognized platforms - known behaviors -if sys.platform in {'win32', 'darwin', 'emscripten', 'wasi'}: - _AIX = _LINUX = False -elif sys.platform == 'linux': - _LINUX = True - _AIX = False -else: - import platform - _platform_system = platform.system() - _AIX = _platform_system == 'AIX' - _LINUX = _platform_system in ('Linux', 'Android') - -_MAC_DELIM = b':' -_MAC_OMITS_LEADING_ZEROES = False -if _AIX: - _MAC_DELIM = b'.' - _MAC_OMITS_LEADING_ZEROES = True - -RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [ - 'reserved for NCS compatibility', 'specified in RFC 4122', - 'reserved for Microsoft compatibility', 'reserved for future definition'] - -int_ = int # The built-in int type -bytes_ = bytes # The built-in bytes type - - -@_simple_enum(Enum) -class SafeUUID: - safe = 0 - unsafe = -1 - unknown = None - - -class UUID: - """Instances of the UUID class represent UUIDs as specified in RFC 4122. - UUID objects are immutable, hashable, and usable as dictionary keys. - Converting a UUID to a string with str() yields something in the form - '12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts - five possible forms: a similar string of hexadecimal digits, or a tuple - of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and - 48-bit values respectively) as an argument named 'fields', or a string - of 16 bytes (with all the integer fields in big-endian order) as an - argument named 'bytes', or a string of 16 bytes (with the first three - fields in little-endian order) as an argument named 'bytes_le', or a - single 128-bit integer as an argument named 'int'. - - UUIDs have these read-only attributes: - - bytes the UUID as a 16-byte string (containing the six - integer fields in big-endian byte order) - - bytes_le the UUID as a 16-byte string (with time_low, time_mid, - and time_hi_version in little-endian byte order) - - fields a tuple of the six integer fields of the UUID, - which are also available as six individual attributes - and two derived attributes: - - time_low the first 32 bits of the UUID - time_mid the next 16 bits of the UUID - time_hi_version the next 16 bits of the UUID - clock_seq_hi_variant the next 8 bits of the UUID - clock_seq_low the next 8 bits of the UUID - node the last 48 bits of the UUID - - time the 60-bit timestamp - clock_seq the 14-bit sequence number - - hex the UUID as a 32-character hexadecimal string - - int the UUID as a 128-bit integer - - urn the UUID as a URN as specified in RFC 4122 - - variant the UUID variant (one of the constants RESERVED_NCS, - RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE) - - version the UUID version number (1 through 5, meaningful only - when the variant is RFC_4122) - - is_safe An enum indicating whether the UUID has been generated in - a way that is safe for multiprocessing applications, via - uuid_generate_time_safe(3). - """ - - __slots__ = ('int', 'is_safe', '__weakref__') - - def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None, - int=None, version=None, - *, is_safe=SafeUUID.unknown): - r"""Create a UUID from either a string of 32 hexadecimal digits, - a string of 16 bytes as the 'bytes' argument, a string of 16 bytes - in little-endian order as the 'bytes_le' argument, a tuple of six - integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version, - 8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as - the 'fields' argument, or a single 128-bit integer as the 'int' - argument. When a string of hex digits is given, curly braces, - hyphens, and a URN prefix are all optional. For example, these - expressions all yield the same UUID: - - UUID('{12345678-1234-5678-1234-567812345678}') - UUID('12345678123456781234567812345678') - UUID('urn:uuid:12345678-1234-5678-1234-567812345678') - UUID(bytes='\x12\x34\x56\x78'*4) - UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' + - '\x12\x34\x56\x78\x12\x34\x56\x78') - UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678)) - UUID(int=0x12345678123456781234567812345678) - - Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must - be given. The 'version' argument is optional; if given, the resulting - UUID will have its variant and version set according to RFC 4122, - overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'. - - is_safe is an enum exposed as an attribute on the instance. It - indicates whether the UUID has been generated in a way that is safe - for multiprocessing applications, via uuid_generate_time_safe(3). - """ - - if [hex, bytes, bytes_le, fields, int].count(None) != 4: - raise TypeError('one of the hex, bytes, bytes_le, fields, ' - 'or int arguments must be given') - if hex is not None: - hex = hex.replace('urn:', '').replace('uuid:', '') - hex = hex.strip('{}').replace('-', '') - if len(hex) != 32: - raise ValueError('badly formed hexadecimal UUID string') - int = int_(hex, 16) - if bytes_le is not None: - if len(bytes_le) != 16: - raise ValueError('bytes_le is not a 16-char string') - bytes = (bytes_le[4-1::-1] + bytes_le[6-1:4-1:-1] + - bytes_le[8-1:6-1:-1] + bytes_le[8:]) - if bytes is not None: - if len(bytes) != 16: - raise ValueError('bytes is not a 16-char string') - assert isinstance(bytes, bytes_), repr(bytes) - int = int_.from_bytes(bytes) # big endian - if fields is not None: - if len(fields) != 6: - raise ValueError('fields is not a 6-tuple') - (time_low, time_mid, time_hi_version, - clock_seq_hi_variant, clock_seq_low, node) = fields - if not 0 <= time_low < 1<<32: - raise ValueError('field 1 out of range (need a 32-bit value)') - if not 0 <= time_mid < 1<<16: - raise ValueError('field 2 out of range (need a 16-bit value)') - if not 0 <= time_hi_version < 1<<16: - raise ValueError('field 3 out of range (need a 16-bit value)') - if not 0 <= clock_seq_hi_variant < 1<<8: - raise ValueError('field 4 out of range (need an 8-bit value)') - if not 0 <= clock_seq_low < 1<<8: - raise ValueError('field 5 out of range (need an 8-bit value)') - if not 0 <= node < 1<<48: - raise ValueError('field 6 out of range (need a 48-bit value)') - clock_seq = (clock_seq_hi_variant << 8) | clock_seq_low - int = ((time_low << 96) | (time_mid << 80) | - (time_hi_version << 64) | (clock_seq << 48) | node) - if int is not None: - if not 0 <= int < 1<<128: - raise ValueError('int is out of range (need a 128-bit value)') - if version is not None: - if not 1 <= version <= 5: - raise ValueError('illegal version number') - # Set the variant to RFC 4122. - int &= ~(0xc000 << 48) - int |= 0x8000 << 48 - # Set the version number. - int &= ~(0xf000 << 64) - int |= version << 76 - object.__setattr__(self, 'int', int) - object.__setattr__(self, 'is_safe', is_safe) - - def __getstate__(self): - d = {'int': self.int} - if self.is_safe != SafeUUID.unknown: - # is_safe is a SafeUUID instance. Return just its value, so that - # it can be un-pickled in older Python versions without SafeUUID. - d['is_safe'] = self.is_safe.value - return d - - def __setstate__(self, state): - object.__setattr__(self, 'int', state['int']) - # is_safe was added in 3.7; it is also omitted when it is "unknown" - object.__setattr__(self, 'is_safe', - SafeUUID(state['is_safe']) - if 'is_safe' in state else SafeUUID.unknown) - - def __eq__(self, other): - if isinstance(other, UUID): - return self.int == other.int - return NotImplemented - - # Q. What's the value of being able to sort UUIDs? - # A. Use them as keys in a B-Tree or similar mapping. - - def __lt__(self, other): - if isinstance(other, UUID): - return self.int < other.int - return NotImplemented - - def __gt__(self, other): - if isinstance(other, UUID): - return self.int > other.int - return NotImplemented - - def __le__(self, other): - if isinstance(other, UUID): - return self.int <= other.int - return NotImplemented - - def __ge__(self, other): - if isinstance(other, UUID): - return self.int >= other.int - return NotImplemented - - def __hash__(self): - return hash(self.int) - - def __int__(self): - return self.int - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, str(self)) - - def __setattr__(self, name, value): - raise TypeError('UUID objects are immutable') - - def __str__(self): - hex = '%032x' % self.int - return '%s-%s-%s-%s-%s' % ( - hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:]) - - @property - def bytes(self): - return self.int.to_bytes(16) # big endian - - @property - def bytes_le(self): - bytes = self.bytes - return (bytes[4-1::-1] + bytes[6-1:4-1:-1] + bytes[8-1:6-1:-1] + - bytes[8:]) - - @property - def fields(self): - return (self.time_low, self.time_mid, self.time_hi_version, - self.clock_seq_hi_variant, self.clock_seq_low, self.node) - - @property - def time_low(self): - return self.int >> 96 - - @property - def time_mid(self): - return (self.int >> 80) & 0xffff - - @property - def time_hi_version(self): - return (self.int >> 64) & 0xffff - - @property - def clock_seq_hi_variant(self): - return (self.int >> 56) & 0xff - - @property - def clock_seq_low(self): - return (self.int >> 48) & 0xff - - @property - def time(self): - return (((self.time_hi_version & 0x0fff) << 48) | - (self.time_mid << 32) | self.time_low) - - @property - def clock_seq(self): - return (((self.clock_seq_hi_variant & 0x3f) << 8) | - self.clock_seq_low) - - @property - def node(self): - return self.int & 0xffffffffffff - - @property - def hex(self): - return '%032x' % self.int - - @property - def urn(self): - return 'urn:uuid:' + str(self) - - @property - def variant(self): - if not self.int & (0x8000 << 48): - return RESERVED_NCS - elif not self.int & (0x4000 << 48): - return RFC_4122 - elif not self.int & (0x2000 << 48): - return RESERVED_MICROSOFT - else: - return RESERVED_FUTURE - - @property - def version(self): - # The version bits are only meaningful for RFC 4122 UUIDs. - if self.variant == RFC_4122: - return int((self.int >> 76) & 0xf) - - -def _get_command_stdout(command, *args): - import io, os, shutil, subprocess - - try: - path_dirs = os.environ.get('PATH', os.defpath).split(os.pathsep) - path_dirs.extend(['/sbin', '/usr/sbin']) - executable = shutil.which(command, path=os.pathsep.join(path_dirs)) - if executable is None: - return None - # LC_ALL=C to ensure English output, stderr=DEVNULL to prevent output - # on stderr (Note: we don't have an example where the words we search - # for are actually localized, but in theory some system could do so.) - env = dict(os.environ) - env['LC_ALL'] = 'C' - # Empty strings will be quoted by popen so we should just ommit it - if args != ('',): - command = (executable, *args) - else: - command = (executable,) - proc = subprocess.Popen(command, - stdout=subprocess.PIPE, - stderr=subprocess.DEVNULL, - env=env) - if not proc: - return None - stdout, stderr = proc.communicate() - return io.BytesIO(stdout) - except (OSError, subprocess.SubprocessError): - return None - - -# For MAC (a.k.a. IEEE 802, or EUI-48) addresses, the second least significant -# bit of the first octet signifies whether the MAC address is universally (0) -# or locally (1) administered. Network cards from hardware manufacturers will -# always be universally administered to guarantee global uniqueness of the MAC -# address, but any particular machine may have other interfaces which are -# locally administered. An example of the latter is the bridge interface to -# the Touch Bar on MacBook Pros. -# -# This bit works out to be the 42nd bit counting from 1 being the least -# significant, or 1<<41. We'll prefer universally administered MAC addresses -# over locally administered ones since the former are globally unique, but -# we'll return the first of the latter found if that's all the machine has. -# -# See https://en.wikipedia.org/wiki/MAC_address#Universal_vs._local_(U/L_bit) - -def _is_universal(mac): - return not (mac & (1 << 41)) - - -def _find_mac_near_keyword(command, args, keywords, get_word_index): - """Searches a command's output for a MAC address near a keyword. - - Each line of words in the output is case-insensitively searched for - any of the given keywords. Upon a match, get_word_index is invoked - to pick a word from the line, given the index of the match. For - example, lambda i: 0 would get the first word on the line, while - lambda i: i - 1 would get the word preceding the keyword. - """ - stdout = _get_command_stdout(command, args) - if stdout is None: - return None - - first_local_mac = None - for line in stdout: - words = line.lower().rstrip().split() - for i in range(len(words)): - if words[i] in keywords: - try: - word = words[get_word_index(i)] - mac = int(word.replace(_MAC_DELIM, b''), 16) - except (ValueError, IndexError): - # Virtual interfaces, such as those provided by - # VPNs, do not have a colon-delimited MAC address - # as expected, but a 16-byte HWAddr separated by - # dashes. These should be ignored in favor of a - # real MAC address - pass - else: - if _is_universal(mac): - return mac - first_local_mac = first_local_mac or mac - return first_local_mac or None - - -def _parse_mac(word): - # Accept 'HH:HH:HH:HH:HH:HH' MAC address (ex: '52:54:00:9d:0e:67'), - # but reject IPv6 address (ex: 'fe80::5054:ff:fe9' or '123:2:3:4:5:6:7:8'). - # - # Virtual interfaces, such as those provided by VPNs, do not have a - # colon-delimited MAC address as expected, but a 16-byte HWAddr separated - # by dashes. These should be ignored in favor of a real MAC address - parts = word.split(_MAC_DELIM) - if len(parts) != 6: - return - if _MAC_OMITS_LEADING_ZEROES: - # (Only) on AIX the macaddr value given is not prefixed by 0, e.g. - # en0 1500 link#2 fa.bc.de.f7.62.4 110854824 0 160133733 0 0 - # not - # en0 1500 link#2 fa.bc.de.f7.62.04 110854824 0 160133733 0 0 - if not all(1 <= len(part) <= 2 for part in parts): - return - hexstr = b''.join(part.rjust(2, b'0') for part in parts) - else: - if not all(len(part) == 2 for part in parts): - return - hexstr = b''.join(parts) - try: - return int(hexstr, 16) - except ValueError: - return - - -def _find_mac_under_heading(command, args, heading): - """Looks for a MAC address under a heading in a command's output. - - The first line of words in the output is searched for the given - heading. Words at the same word index as the heading in subsequent - lines are then examined to see if they look like MAC addresses. - """ - stdout = _get_command_stdout(command, args) - if stdout is None: - return None - - keywords = stdout.readline().rstrip().split() - try: - column_index = keywords.index(heading) - except ValueError: - return None - - first_local_mac = None - for line in stdout: - words = line.rstrip().split() - try: - word = words[column_index] - except IndexError: - continue - - mac = _parse_mac(word) - if mac is None: - continue - if _is_universal(mac): - return mac - if first_local_mac is None: - first_local_mac = mac - - return first_local_mac - - -# The following functions call external programs to 'get' a macaddr value to -# be used as basis for an uuid -def _ifconfig_getnode(): - """Get the hardware address on Unix by running ifconfig.""" - # This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes. - keywords = (b'hwaddr', b'ether', b'address:', b'lladdr') - for args in ('', '-a', '-av'): - mac = _find_mac_near_keyword('ifconfig', args, keywords, lambda i: i+1) - if mac: - return mac - return None - -def _ip_getnode(): - """Get the hardware address on Unix by running ip.""" - # This works on Linux with iproute2. - mac = _find_mac_near_keyword('ip', 'link', [b'link/ether'], lambda i: i+1) - if mac: - return mac - return None - -def _arp_getnode(): - """Get the hardware address on Unix by running arp.""" - import os, socket - if not hasattr(socket, "gethostbyname"): - return None - try: - ip_addr = socket.gethostbyname(socket.gethostname()) - except OSError: - return None - - # Try getting the MAC addr from arp based on our IP address (Solaris). - mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: -1) - if mac: - return mac - - # This works on OpenBSD - mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: i+1) - if mac: - return mac - - # This works on Linux, FreeBSD and NetBSD - mac = _find_mac_near_keyword('arp', '-an', [os.fsencode('(%s)' % ip_addr)], - lambda i: i+2) - # Return None instead of 0. - if mac: - return mac - return None - -def _lanscan_getnode(): - """Get the hardware address on Unix by running lanscan.""" - # This might work on HP-UX. - return _find_mac_near_keyword('lanscan', '-ai', [b'lan0'], lambda i: 0) - -def _netstat_getnode(): - """Get the hardware address on Unix by running netstat.""" - # This works on AIX and might work on Tru64 UNIX. - return _find_mac_under_heading('netstat', '-ian', b'Address') - - -# Import optional C extension at toplevel, to help disabling it when testing -try: - import _uuid - _generate_time_safe = getattr(_uuid, "generate_time_safe", None) - _has_stable_extractable_node = getattr(_uuid, "has_stable_extractable_node", False) - _UuidCreate = getattr(_uuid, "UuidCreate", None) -except ImportError: - _uuid = None - _generate_time_safe = None - _has_stable_extractable_node = False - _UuidCreate = None - - -def _unix_getnode(): - """Get the hardware address on Unix using the _uuid extension module.""" - if _generate_time_safe and _has_stable_extractable_node: - uuid_time, _ = _generate_time_safe() - return UUID(bytes=uuid_time).node - -def _windll_getnode(): - """Get the hardware address on Windows using the _uuid extension module.""" - if _UuidCreate and _has_stable_extractable_node: - uuid_bytes = _UuidCreate() - return UUID(bytes_le=uuid_bytes).node - -def _random_getnode(): - """Get a random node ID.""" - # RFC 9562, §6.10-3 says that - # - # Implementations MAY elect to obtain a 48-bit cryptographic-quality - # random number as per Section 6.9 to use as the Node ID. [...] [and] - # implementations MUST set the least significant bit of the first octet - # of the Node ID to 1. This bit is the unicast or multicast bit, which - # will never be set in IEEE 802 addresses obtained from network cards. - # - # The "multicast bit" of a MAC address is defined to be "the least - # significant bit of the first octet". This works out to be the 41st bit - # counting from 1 being the least significant bit, or 1<<40. - # - # See https://en.wikipedia.org/w/index.php?title=MAC_address&oldid=1128764812#Universal_vs._local_(U/L_bit) - return int.from_bytes(os.urandom(6)) | (1 << 40) - - -# _OS_GETTERS, when known, are targeted for a specific OS or platform. -# The order is by 'common practice' on the specified platform. -# Note: 'posix' and 'windows' _OS_GETTERS are prefixed by a dll/dlload() method -# which, when successful, means none of these "external" methods are called. -# _GETTERS is (also) used by test_uuid.py to SkipUnless(), e.g., -# @unittest.skipUnless(_uuid._ifconfig_getnode in _uuid._GETTERS, ...) -if _LINUX: - _OS_GETTERS = [_ip_getnode, _ifconfig_getnode] -elif sys.platform == 'darwin': - _OS_GETTERS = [_ifconfig_getnode, _arp_getnode, _netstat_getnode] -elif sys.platform == 'win32': - # bpo-40201: _windll_getnode will always succeed, so these are not needed - _OS_GETTERS = [] -elif _AIX: - _OS_GETTERS = [_netstat_getnode] -else: - _OS_GETTERS = [_ifconfig_getnode, _ip_getnode, _arp_getnode, - _netstat_getnode, _lanscan_getnode] -if os.name == 'posix': - _GETTERS = [_unix_getnode] + _OS_GETTERS -elif os.name == 'nt': - _GETTERS = [_windll_getnode] + _OS_GETTERS -else: - _GETTERS = _OS_GETTERS - -_node = None - -def getnode(): - """Get the hardware address as a 48-bit positive integer. - - The first time this runs, it may launch a separate program, which could - be quite slow. If all attempts to obtain the hardware address fail, we - choose a random 48-bit number with its eighth bit set to 1 as recommended - in RFC 4122. - """ - global _node - if _node is not None: - return _node - - for getter in _GETTERS + [_random_getnode]: - try: - _node = getter() - except: - continue - if (_node is not None) and (0 <= _node < (1 << 48)): - return _node - assert False, '_random_getnode() returned invalid value: {}'.format(_node) - - -_last_timestamp = None - -def uuid1(node=None, clock_seq=None): - """Generate a UUID from a host ID, sequence number, and the current time. - If 'node' is not given, getnode() is used to obtain the hardware - address. If 'clock_seq' is given, it is used as the sequence number; - otherwise a random 14-bit sequence number is chosen.""" - - # When the system provides a version-1 UUID generator, use it (but don't - # use UuidCreate here because its UUIDs don't conform to RFC 4122). - if _generate_time_safe is not None and node is clock_seq is None: - uuid_time, safely_generated = _generate_time_safe() - try: - is_safe = SafeUUID(safely_generated) - except ValueError: - is_safe = SafeUUID.unknown - return UUID(bytes=uuid_time, is_safe=is_safe) - - global _last_timestamp - import time - nanoseconds = time.time_ns() - # 0x01b21dd213814000 is the number of 100-ns intervals between the - # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00. - timestamp = nanoseconds // 100 + 0x01b21dd213814000 - if _last_timestamp is not None and timestamp <= _last_timestamp: - timestamp = _last_timestamp + 1 - _last_timestamp = timestamp - if clock_seq is None: - import random - clock_seq = random.getrandbits(14) # instead of stable storage - time_low = timestamp & 0xffffffff - time_mid = (timestamp >> 32) & 0xffff - time_hi_version = (timestamp >> 48) & 0x0fff - clock_seq_low = clock_seq & 0xff - clock_seq_hi_variant = (clock_seq >> 8) & 0x3f - if node is None: - node = getnode() - return UUID(fields=(time_low, time_mid, time_hi_version, - clock_seq_hi_variant, clock_seq_low, node), version=1) - -def uuid3(namespace, name): - """Generate a UUID from the MD5 hash of a namespace UUID and a name.""" - if isinstance(name, str): - name = bytes(name, "utf-8") - from hashlib import md5 - digest = md5( - namespace.bytes + name, - usedforsecurity=False - ).digest() - return UUID(bytes=digest[:16], version=3) - -def uuid4(): - """Generate a random UUID.""" - return UUID(bytes=os.urandom(16), version=4) - -def uuid5(namespace, name): - """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" - if isinstance(name, str): - name = bytes(name, "utf-8") - from hashlib import sha1 - hash = sha1(namespace.bytes + name).digest() - return UUID(bytes=hash[:16], version=5) - - -def main(): - """Run the uuid command line interface.""" - uuid_funcs = { - "uuid1": uuid1, - "uuid3": uuid3, - "uuid4": uuid4, - "uuid5": uuid5 - } - uuid_namespace_funcs = ("uuid3", "uuid5") - namespaces = { - "@dns": NAMESPACE_DNS, - "@url": NAMESPACE_URL, - "@oid": NAMESPACE_OID, - "@x500": NAMESPACE_X500 - } - - import argparse - parser = argparse.ArgumentParser( - description="Generates a uuid using the selected uuid function.") - parser.add_argument("-u", "--uuid", choices=uuid_funcs.keys(), default="uuid4", - help="The function to use to generate the uuid. " - "By default uuid4 function is used.") - parser.add_argument("-n", "--namespace", - help="The namespace is a UUID, or '@ns' where 'ns' is a " - "well-known predefined UUID addressed by namespace name. " - "Such as @dns, @url, @oid, and @x500. " - "Only required for uuid3/uuid5 functions.") - parser.add_argument("-N", "--name", - help="The name used as part of generating the uuid. " - "Only required for uuid3/uuid5 functions.") - - args = parser.parse_args() - uuid_func = uuid_funcs[args.uuid] - namespace = args.namespace - name = args.name - - if args.uuid in uuid_namespace_funcs: - if not namespace or not name: - parser.error( - "Incorrect number of arguments. " - f"{args.uuid} requires a namespace and a name. " - "Run 'python -m uuid -h' for more information." - ) - namespace = namespaces[namespace] if namespace in namespaces else UUID(namespace) - print(uuid_func(namespace, name)) - else: - print(uuid_func()) - - -# The following standard UUIDs are for use with uuid3() or uuid5(). - -NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8') -NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8') -NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8') -NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8') - -if __name__ == "__main__": - main() diff --git a/Python313_13_x64_Template/Lib/venv/__init__.py b/Python313_13_x64_Template/Lib/venv/__init__.py deleted file mode 100644 index c45cb2ee..00000000 --- a/Python313_13_x64_Template/Lib/venv/__init__.py +++ /dev/null @@ -1,687 +0,0 @@ -""" -Virtual environment (venv) package for Python. Based on PEP 405. - -Copyright (C) 2011-2014 Vinay Sajip. -Licensed to the PSF under a contributor agreement. -""" -import logging -import os -import shutil -import subprocess -import sys -import sysconfig -import types -import shlex - - -CORE_VENV_DEPS = ('pip',) -logger = logging.getLogger(__name__) - - -class EnvBuilder: - """ - This class exists to allow virtual environment creation to be - customized. The constructor parameters determine the builder's - behaviour when called upon to create a virtual environment. - - By default, the builder makes the system (global) site-packages dir - *un*available to the created environment. - - If invoked using the Python -m option, the default is to use copying - on Windows platforms but symlinks elsewhere. If instantiated some - other way, the default is to *not* use symlinks. - - :param system_site_packages: If True, the system (global) site-packages - dir is available to created environments. - :param clear: If True, delete the contents of the environment directory if - it already exists, before environment creation. - :param symlinks: If True, attempt to symlink rather than copy files into - virtual environment. - :param upgrade: If True, upgrade an existing virtual environment. - :param with_pip: If True, ensure pip is installed in the virtual - environment - :param prompt: Alternative terminal prefix for the environment. - :param upgrade_deps: Update the base venv modules to the latest on PyPI - :param scm_ignore_files: Create ignore files for the SCMs specified by the - iterable. - """ - - def __init__(self, system_site_packages=False, clear=False, - symlinks=False, upgrade=False, with_pip=False, prompt=None, - upgrade_deps=False, *, scm_ignore_files=frozenset()): - self.system_site_packages = system_site_packages - self.clear = clear - self.symlinks = symlinks - self.upgrade = upgrade - self.with_pip = with_pip - self.orig_prompt = prompt - if prompt == '.': # see bpo-38901 - prompt = os.path.basename(os.getcwd()) - self.prompt = prompt - self.upgrade_deps = upgrade_deps - self.scm_ignore_files = frozenset(map(str.lower, scm_ignore_files)) - - def create(self, env_dir): - """ - Create a virtual environment in a directory. - - :param env_dir: The target directory to create an environment in. - - """ - env_dir = os.path.abspath(env_dir) - context = self.ensure_directories(env_dir) - for scm in self.scm_ignore_files: - getattr(self, f"create_{scm}_ignore_file")(context) - # See issue 24875. We need system_site_packages to be False - # until after pip is installed. - true_system_site_packages = self.system_site_packages - self.system_site_packages = False - self.create_configuration(context) - self.setup_python(context) - if self.with_pip: - self._setup_pip(context) - if not self.upgrade: - self.setup_scripts(context) - self.post_setup(context) - if true_system_site_packages: - # We had set it to False before, now - # restore it and rewrite the configuration - self.system_site_packages = True - self.create_configuration(context) - if self.upgrade_deps: - self.upgrade_dependencies(context) - - def clear_directory(self, path): - for fn in os.listdir(path): - fn = os.path.join(path, fn) - if os.path.islink(fn) or os.path.isfile(fn): - os.remove(fn) - elif os.path.isdir(fn): - shutil.rmtree(fn) - - def _venv_path(self, env_dir, name): - vars = { - 'base': env_dir, - 'platbase': env_dir, - 'installed_base': env_dir, - 'installed_platbase': env_dir, - } - return sysconfig.get_path(name, scheme='venv', vars=vars) - - @classmethod - def _same_path(cls, path1, path2): - """Check whether two paths appear the same. - - Whether they refer to the same file is irrelevant; we're testing for - whether a human reader would look at the path string and easily tell - that they're the same file. - """ - if sys.platform == 'win32': - if os.path.normcase(path1) == os.path.normcase(path2): - return True - # gh-90329: Don't display a warning for short/long names - import _winapi - try: - path1 = _winapi.GetLongPathName(os.fsdecode(path1)) - except OSError: - pass - try: - path2 = _winapi.GetLongPathName(os.fsdecode(path2)) - except OSError: - pass - if os.path.normcase(path1) == os.path.normcase(path2): - return True - return False - else: - return path1 == path2 - - def ensure_directories(self, env_dir): - """ - Create the directories for the environment. - - Returns a context object which holds paths in the environment, - for use by subsequent logic. - """ - - def create_if_needed(d): - if not os.path.exists(d): - os.makedirs(d) - elif os.path.islink(d) or os.path.isfile(d): - raise ValueError('Unable to create directory %r' % d) - - if os.pathsep in os.fspath(env_dir): - raise ValueError(f'Refusing to create a venv in {env_dir} because ' - f'it contains the PATH separator {os.pathsep}.') - if os.path.exists(env_dir) and self.clear: - self.clear_directory(env_dir) - context = types.SimpleNamespace() - context.env_dir = env_dir - context.env_name = os.path.split(env_dir)[1] - context.prompt = self.prompt if self.prompt is not None else context.env_name - create_if_needed(env_dir) - executable = sys._base_executable - if not executable: # see gh-96861 - raise ValueError('Unable to determine path to the running ' - 'Python interpreter. Provide an explicit path or ' - 'check that your PATH environment variable is ' - 'correctly set.') - dirname, exename = os.path.split(os.path.abspath(executable)) - if sys.platform == 'win32': - # Always create the simplest name in the venv. It will either be a - # link back to executable, or a copy of the appropriate launcher - _d = '_d' if os.path.splitext(exename)[0].endswith('_d') else '' - exename = f'python{_d}.exe' - context.executable = executable - context.python_dir = dirname - context.python_exe = exename - binpath = self._venv_path(env_dir, 'scripts') - incpath = self._venv_path(env_dir, 'include') - libpath = self._venv_path(env_dir, 'purelib') - - context.inc_path = incpath - create_if_needed(incpath) - context.lib_path = libpath - create_if_needed(libpath) - # Issue 21197: create lib64 as a symlink to lib on 64-bit non-OS X POSIX - if ((sys.maxsize > 2**32) and (os.name == 'posix') and - (sys.platform != 'darwin')): - link_path = os.path.join(env_dir, 'lib64') - if not os.path.exists(link_path): # Issue #21643 - os.symlink('lib', link_path) - context.bin_path = binpath - context.bin_name = os.path.relpath(binpath, env_dir) - context.env_exe = os.path.join(binpath, exename) - create_if_needed(binpath) - # Assign and update the command to use when launching the newly created - # environment, in case it isn't simply the executable script (e.g. bpo-45337) - context.env_exec_cmd = context.env_exe - if sys.platform == 'win32': - # bpo-45337: Fix up env_exec_cmd to account for file system redirections. - # Some redirects only apply to CreateFile and not CreateProcess - real_env_exe = os.path.realpath(context.env_exe) - if not self._same_path(real_env_exe, context.env_exe): - logger.warning('Actual environment location may have moved due to ' - 'redirects, links or junctions.\n' - ' Requested location: "%s"\n' - ' Actual location: "%s"', - context.env_exe, real_env_exe) - context.env_exec_cmd = real_env_exe - return context - - def create_configuration(self, context): - """ - Create a configuration file indicating where the environment's Python - was copied from, and whether the system site-packages should be made - available in the environment. - - :param context: The information for the environment creation request - being processed. - """ - context.cfg_path = path = os.path.join(context.env_dir, 'pyvenv.cfg') - with open(path, 'w', encoding='utf-8') as f: - f.write('home = %s\n' % context.python_dir) - if self.system_site_packages: - incl = 'true' - else: - incl = 'false' - f.write('include-system-site-packages = %s\n' % incl) - f.write('version = %d.%d.%d\n' % sys.version_info[:3]) - if self.prompt is not None: - f.write(f'prompt = {self.prompt!r}\n') - f.write('executable = %s\n' % os.path.realpath(sys.executable)) - args = [] - nt = os.name == 'nt' - if nt and self.symlinks: - args.append('--symlinks') - if not nt and not self.symlinks: - args.append('--copies') - if not self.with_pip: - args.append('--without-pip') - if self.system_site_packages: - args.append('--system-site-packages') - if self.clear: - args.append('--clear') - if self.upgrade: - args.append('--upgrade') - if self.upgrade_deps: - args.append('--upgrade-deps') - if self.orig_prompt is not None: - args.append(f'--prompt="{self.orig_prompt}"') - if not self.scm_ignore_files: - args.append('--without-scm-ignore-files') - - args.append(context.env_dir) - args = ' '.join(args) - f.write(f'command = {sys.executable} -m venv {args}\n') - - def symlink_or_copy(self, src, dst, relative_symlinks_ok=False): - """ - Try symlinking a file, and if that fails, fall back to copying. - (Unused on Windows, because we can't just copy a failed symlink file: we - switch to a different set of files instead.) - """ - assert os.name != 'nt' - force_copy = not self.symlinks - if not force_copy: - try: - if not os.path.islink(dst): # can't link to itself! - if relative_symlinks_ok: - assert os.path.dirname(src) == os.path.dirname(dst) - os.symlink(os.path.basename(src), dst) - else: - os.symlink(src, dst) - except Exception: # may need to use a more specific exception - logger.warning('Unable to symlink %r to %r', src, dst) - force_copy = True - if force_copy: - shutil.copyfile(src, dst) - - def create_git_ignore_file(self, context): - """ - Create a .gitignore file in the environment directory. - - The contents of the file cause the entire environment directory to be - ignored by git. - """ - gitignore_path = os.path.join(context.env_dir, '.gitignore') - with open(gitignore_path, 'w', encoding='utf-8') as file: - file.write('# Created by venv; ' - 'see https://docs.python.org/3/library/venv.html\n') - file.write('*\n') - - if os.name != 'nt': - def setup_python(self, context): - """ - Set up a Python executable in the environment. - - :param context: The information for the environment creation request - being processed. - """ - binpath = context.bin_path - path = context.env_exe - copier = self.symlink_or_copy - dirname = context.python_dir - copier(context.executable, path) - if not os.path.islink(path): - os.chmod(path, 0o755) - for suffix in ('python', 'python3', - f'python3.{sys.version_info[1]}'): - path = os.path.join(binpath, suffix) - if not os.path.exists(path): - # Issue 18807: make copies if - # symlinks are not wanted - copier(context.env_exe, path, relative_symlinks_ok=True) - if not os.path.islink(path): - os.chmod(path, 0o755) - - else: - def setup_python(self, context): - """ - Set up a Python executable in the environment. - - :param context: The information for the environment creation request - being processed. - """ - binpath = context.bin_path - dirname = context.python_dir - exename = os.path.basename(context.env_exe) - exe_stem = os.path.splitext(exename)[0] - exe_d = '_d' if os.path.normcase(exe_stem).endswith('_d') else '' - if sysconfig.is_python_build(): - scripts = dirname - else: - scripts = os.path.join(os.path.dirname(__file__), - 'scripts', 'nt') - if not sysconfig.get_config_var("Py_GIL_DISABLED"): - python_exe = os.path.join(dirname, f'python{exe_d}.exe') - pythonw_exe = os.path.join(dirname, f'pythonw{exe_d}.exe') - link_sources = { - 'python.exe': python_exe, - f'python{exe_d}.exe': python_exe, - 'pythonw.exe': pythonw_exe, - f'pythonw{exe_d}.exe': pythonw_exe, - } - python_exe = os.path.join(scripts, f'venvlauncher{exe_d}.exe') - pythonw_exe = os.path.join(scripts, f'venvwlauncher{exe_d}.exe') - copy_sources = { - 'python.exe': python_exe, - f'python{exe_d}.exe': python_exe, - 'pythonw.exe': pythonw_exe, - f'pythonw{exe_d}.exe': pythonw_exe, - } - else: - exe_t = f'3.{sys.version_info[1]}t' - python_exe = os.path.join(dirname, f'python{exe_t}{exe_d}.exe') - pythonw_exe = os.path.join(dirname, f'pythonw{exe_t}{exe_d}.exe') - link_sources = { - 'python.exe': python_exe, - f'python{exe_d}.exe': python_exe, - f'python{exe_t}.exe': python_exe, - f'python{exe_t}{exe_d}.exe': python_exe, - 'pythonw.exe': pythonw_exe, - f'pythonw{exe_d}.exe': pythonw_exe, - f'pythonw{exe_t}.exe': pythonw_exe, - f'pythonw{exe_t}{exe_d}.exe': pythonw_exe, - } - python_exe = os.path.join(scripts, f'venvlaunchert{exe_d}.exe') - pythonw_exe = os.path.join(scripts, f'venvwlaunchert{exe_d}.exe') - copy_sources = { - 'python.exe': python_exe, - f'python{exe_d}.exe': python_exe, - f'python{exe_t}.exe': python_exe, - f'python{exe_t}{exe_d}.exe': python_exe, - 'pythonw.exe': pythonw_exe, - f'pythonw{exe_d}.exe': pythonw_exe, - f'pythonw{exe_t}.exe': pythonw_exe, - f'pythonw{exe_t}{exe_d}.exe': pythonw_exe, - } - - do_copies = True - if self.symlinks: - do_copies = False - # For symlinking, we need all the DLLs to be available alongside - # the executables. - link_sources.update({ - f: os.path.join(dirname, f) for f in os.listdir(dirname) - if os.path.normcase(f).startswith(('python', 'vcruntime')) - and os.path.normcase(os.path.splitext(f)[1]) == '.dll' - }) - - to_unlink = [] - for dest, src in link_sources.items(): - dest = os.path.join(binpath, dest) - try: - os.symlink(src, dest) - to_unlink.append(dest) - except OSError: - logger.warning('Unable to symlink %r to %r', src, dest) - do_copies = True - for f in to_unlink: - try: - os.unlink(f) - except OSError: - logger.warning('Failed to clean up symlink %r', - f) - logger.warning('Retrying with copies') - break - - if do_copies: - for dest, src in copy_sources.items(): - dest = os.path.join(binpath, dest) - try: - shutil.copy2(src, dest) - except OSError: - logger.warning('Unable to copy %r to %r', src, dest) - - if sysconfig.is_python_build(): - # copy init.tcl - for root, dirs, files in os.walk(context.python_dir): - if 'init.tcl' in files: - tcldir = os.path.basename(root) - tcldir = os.path.join(context.env_dir, 'Lib', tcldir) - if not os.path.exists(tcldir): - os.makedirs(tcldir) - src = os.path.join(root, 'init.tcl') - dst = os.path.join(tcldir, 'init.tcl') - shutil.copyfile(src, dst) - break - - def _call_new_python(self, context, *py_args, **kwargs): - """Executes the newly created Python using safe-ish options""" - # gh-98251: We do not want to just use '-I' because that masks - # legitimate user preferences (such as not writing bytecode). All we - # really need is to ensure that the path variables do not overrule - # normal venv handling. - args = [context.env_exec_cmd, *py_args] - kwargs['env'] = env = os.environ.copy() - env['VIRTUAL_ENV'] = context.env_dir - env.pop('PYTHONHOME', None) - env.pop('PYTHONPATH', None) - kwargs['cwd'] = context.env_dir - kwargs['executable'] = context.env_exec_cmd - subprocess.check_output(args, **kwargs) - - def _setup_pip(self, context): - """Installs or upgrades pip in a virtual environment""" - self._call_new_python(context, '-m', 'ensurepip', '--upgrade', - '--default-pip', stderr=subprocess.STDOUT) - - def setup_scripts(self, context): - """ - Set up scripts into the created environment from a directory. - - This method installs the default scripts into the environment - being created. You can prevent the default installation by overriding - this method if you really need to, or if you need to specify - a different location for the scripts to install. By default, the - 'scripts' directory in the venv package is used as the source of - scripts to install. - """ - path = os.path.abspath(os.path.dirname(__file__)) - path = os.path.join(path, 'scripts') - self.install_scripts(context, path) - - def post_setup(self, context): - """ - Hook for post-setup modification of the venv. Subclasses may install - additional packages or scripts here, add activation shell scripts, etc. - - :param context: The information for the environment creation request - being processed. - """ - pass - - def replace_variables(self, text, context): - """ - Replace variable placeholders in script text with context-specific - variables. - - Return the text passed in , but with variables replaced. - - :param text: The text in which to replace placeholder variables. - :param context: The information for the environment creation request - being processed. - """ - replacements = { - '__VENV_DIR__': context.env_dir, - '__VENV_NAME__': context.env_name, - '__VENV_PROMPT__': context.prompt, - '__VENV_BIN_NAME__': context.bin_name, - '__VENV_PYTHON__': context.env_exe, - } - - def quote_ps1(s): - """ - This should satisfy PowerShell quoting rules [1], unless the quoted - string is passed directly to Windows native commands [2]. - [1]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_quoting_rules - [2]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_parsing#passing-arguments-that-contain-quote-characters - """ - s = s.replace("'", "''") - return f"'{s}'" - - def quote_bat(s): - return s - - # gh-124651: need to quote the template strings properly - quote = shlex.quote - script_path = context.script_path - if script_path.endswith('.ps1'): - quote = quote_ps1 - elif script_path.endswith('.bat'): - quote = quote_bat - else: - # fallbacks to POSIX shell compliant quote - quote = shlex.quote - - replacements = {key: quote(s) for key, s in replacements.items()} - for key, quoted in replacements.items(): - text = text.replace(key, quoted) - return text - - def install_scripts(self, context, path): - """ - Install scripts into the created environment from a directory. - - :param context: The information for the environment creation request - being processed. - :param path: Absolute pathname of a directory containing script. - Scripts in the 'common' subdirectory of this directory, - and those in the directory named for the platform - being run on, are installed in the created environment. - Placeholder variables are replaced with environment- - specific values. - """ - binpath = context.bin_path - plen = len(path) - if os.name == 'nt': - def skip_file(f): - f = os.path.normcase(f) - return (f.startswith(('python', 'venv')) - and f.endswith(('.exe', '.pdb'))) - else: - def skip_file(f): - return False - for root, dirs, files in os.walk(path): - if root == path: # at top-level, remove irrelevant dirs - for d in dirs[:]: - if d not in ('common', os.name): - dirs.remove(d) - continue # ignore files in top level - for f in files: - if skip_file(f): - continue - srcfile = os.path.join(root, f) - suffix = root[plen:].split(os.sep)[2:] - if not suffix: - dstdir = binpath - else: - dstdir = os.path.join(binpath, *suffix) - if not os.path.exists(dstdir): - os.makedirs(dstdir) - dstfile = os.path.join(dstdir, f) - if os.name == 'nt' and srcfile.endswith(('.exe', '.pdb')): - shutil.copy2(srcfile, dstfile) - continue - with open(srcfile, 'rb') as f: - data = f.read() - try: - context.script_path = srcfile - new_data = ( - self.replace_variables(data.decode('utf-8'), context) - .encode('utf-8') - ) - except UnicodeError as e: - logger.warning('unable to copy script %r, ' - 'may be binary: %s', srcfile, e) - continue - if new_data == data: - shutil.copy(srcfile, dstfile) - else: - with open(dstfile, 'wb') as f: - f.write(new_data) - shutil.copymode(srcfile, dstfile) - - def upgrade_dependencies(self, context): - logger.debug( - f'Upgrading {CORE_VENV_DEPS} packages in {context.bin_path}' - ) - self._call_new_python(context, '-m', 'pip', 'install', '--upgrade', - *CORE_VENV_DEPS) - - -def create(env_dir, system_site_packages=False, clear=False, - symlinks=False, with_pip=False, prompt=None, upgrade_deps=False, - *, scm_ignore_files=frozenset()): - """Create a virtual environment in a directory.""" - builder = EnvBuilder(system_site_packages=system_site_packages, - clear=clear, symlinks=symlinks, with_pip=with_pip, - prompt=prompt, upgrade_deps=upgrade_deps, - scm_ignore_files=scm_ignore_files) - builder.create(env_dir) - - -def main(args=None): - import argparse - - parser = argparse.ArgumentParser(prog=__name__, - description='Creates virtual Python ' - 'environments in one or ' - 'more target ' - 'directories.', - epilog='Once an environment has been ' - 'created, you may wish to ' - 'activate it, e.g. by ' - 'sourcing an activate script ' - 'in its bin directory.') - parser.add_argument('dirs', metavar='ENV_DIR', nargs='+', - help='A directory to create the environment in.') - parser.add_argument('--system-site-packages', default=False, - action='store_true', dest='system_site', - help='Give the virtual environment access to the ' - 'system site-packages dir.') - if os.name == 'nt': - use_symlinks = False - else: - use_symlinks = True - group = parser.add_mutually_exclusive_group() - group.add_argument('--symlinks', default=use_symlinks, - action='store_true', dest='symlinks', - help='Try to use symlinks rather than copies, ' - 'when symlinks are not the default for ' - 'the platform.') - group.add_argument('--copies', default=not use_symlinks, - action='store_false', dest='symlinks', - help='Try to use copies rather than symlinks, ' - 'even when symlinks are the default for ' - 'the platform.') - parser.add_argument('--clear', default=False, action='store_true', - dest='clear', help='Delete the contents of the ' - 'environment directory if it ' - 'already exists, before ' - 'environment creation.') - parser.add_argument('--upgrade', default=False, action='store_true', - dest='upgrade', help='Upgrade the environment ' - 'directory to use this version ' - 'of Python, assuming Python ' - 'has been upgraded in-place.') - parser.add_argument('--without-pip', dest='with_pip', - default=True, action='store_false', - help='Skips installing or upgrading pip in the ' - 'virtual environment (pip is bootstrapped ' - 'by default)') - parser.add_argument('--prompt', - help='Provides an alternative prompt prefix for ' - 'this environment.') - parser.add_argument('--upgrade-deps', default=False, action='store_true', - dest='upgrade_deps', - help=f'Upgrade core dependencies ({", ".join(CORE_VENV_DEPS)}) ' - 'to the latest version in PyPI') - parser.add_argument('--without-scm-ignore-files', dest='scm_ignore_files', - action='store_const', const=frozenset(), - default=frozenset(['git']), - help='Skips adding SCM ignore files to the environment ' - 'directory (Git is supported by default).') - options = parser.parse_args(args) - if options.upgrade and options.clear: - raise ValueError('you cannot supply --upgrade and --clear together.') - builder = EnvBuilder(system_site_packages=options.system_site, - clear=options.clear, - symlinks=options.symlinks, - upgrade=options.upgrade, - with_pip=options.with_pip, - prompt=options.prompt, - upgrade_deps=options.upgrade_deps, - scm_ignore_files=options.scm_ignore_files) - for d in options.dirs: - builder.create(d) - - -if __name__ == '__main__': - rc = 1 - try: - main() - rc = 0 - except Exception as e: - print('Error: %s' % e, file=sys.stderr) - sys.exit(rc) diff --git a/Python313_13_x64_Template/Lib/venv/scripts/common/Activate.ps1 b/Python313_13_x64_Template/Lib/venv/scripts/common/Activate.ps1 deleted file mode 100644 index f1460ba0..00000000 --- a/Python313_13_x64_Template/Lib/venv/scripts/common/Activate.ps1 +++ /dev/null @@ -1,547 +0,0 @@ -<# -.Synopsis -Activate a Python virtual environment for the current PowerShell session. - -.Description -Pushes the python executable for a virtual environment to the front of the -$Env:PATH environment variable and sets the prompt to signify that you are -in a Python virtual environment. Makes use of the command line switches as -well as the `pyvenv.cfg` file values present in the virtual environment. - -.Parameter VenvDir -Path to the directory that contains the virtual environment to activate. The -default value for this is the parent of the directory that the Activate.ps1 -script is located within. - -.Parameter Prompt -The prompt prefix to display when this virtual environment is activated. By -default, this prompt is the name of the virtual environment folder (VenvDir) -surrounded by parentheses and followed by a single space (ie. '(.venv) '). - -.Example -Activate.ps1 -Activates the Python virtual environment that contains the Activate.ps1 script. - -.Example -Activate.ps1 -Verbose -Activates the Python virtual environment that contains the Activate.ps1 script, -and shows extra information about the activation as it executes. - -.Example -Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv -Activates the Python virtual environment located in the specified location. - -.Example -Activate.ps1 -Prompt "MyPython" -Activates the Python virtual environment that contains the Activate.ps1 script, -and prefixes the current prompt with the specified string (surrounded in -parentheses) while the virtual environment is active. - -.Notes -On Windows, it may be required to enable this Activate.ps1 script by setting the -execution policy for the user. You can do this by issuing the following PowerShell -command: - -PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser - -For more information on Execution Policies: -https://go.microsoft.com/fwlink/?LinkID=135170 - -#> -Param( - [Parameter(Mandatory = $false)] - [String] - $VenvDir, - [Parameter(Mandatory = $false)] - [String] - $Prompt -) - -<# Function declarations --------------------------------------------------- #> - -<# -.Synopsis -Remove all shell session elements added by the Activate script, including the -addition of the virtual environment's Python executable from the beginning of -the PATH variable. - -.Parameter NonDestructive -If present, do not remove this function from the global namespace for the -session. - -#> -function global:deactivate ([switch]$NonDestructive) { - # Revert to original values - - # The prior prompt: - if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { - Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt - Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT - } - - # The prior PYTHONHOME: - if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { - Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME - Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME - } - - # The prior PATH: - if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { - Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH - Remove-Item -Path Env:_OLD_VIRTUAL_PATH - } - - # Just remove the VIRTUAL_ENV altogether: - if (Test-Path -Path Env:VIRTUAL_ENV) { - Remove-Item -Path env:VIRTUAL_ENV - } - - # Just remove VIRTUAL_ENV_PROMPT altogether. - if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { - Remove-Item -Path env:VIRTUAL_ENV_PROMPT - } - - # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: - if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { - Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force - } - - # Leave deactivate function in the global namespace if requested: - if (-not $NonDestructive) { - Remove-Item -Path function:deactivate - } -} - -<# -.Description -Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the -given folder, and returns them in a map. - -For each line in the pyvenv.cfg file, if that line can be parsed into exactly -two strings separated by `=` (with any amount of whitespace surrounding the =) -then it is considered a `key = value` line. The left hand string is the key, -the right hand is the value. - -If the value starts with a `'` or a `"` then the first and last character is -stripped from the value before being captured. - -.Parameter ConfigDir -Path to the directory that contains the `pyvenv.cfg` file. -#> -function Get-PyVenvConfig( - [String] - $ConfigDir -) { - Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" - - # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). - $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue - - # An empty map will be returned if no config file is found. - $pyvenvConfig = @{ } - - if ($pyvenvConfigPath) { - - Write-Verbose "File exists, parse `key = value` lines" - $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath - - $pyvenvConfigContent | ForEach-Object { - $keyval = $PSItem -split "\s*=\s*", 2 - if ($keyval[0] -and $keyval[1]) { - $val = $keyval[1] - - # Remove extraneous quotations around a string value. - if ("'""".Contains($val.Substring(0, 1))) { - $val = $val.Substring(1, $val.Length - 2) - } - - $pyvenvConfig[$keyval[0]] = $val - Write-Verbose "Adding Key: '$($keyval[0])'='$val'" - } - } - } - return $pyvenvConfig -} - - -<# Begin Activate script --------------------------------------------------- #> - -# Determine the containing directory of this script -$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition -$VenvExecDir = Get-Item -Path $VenvExecPath - -Write-Verbose "Activation script is located in path: '$VenvExecPath'" -Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" -Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" - -# Set values required in priority: CmdLine, ConfigFile, Default -# First, get the location of the virtual environment, it might not be -# VenvExecDir if specified on the command line. -if ($VenvDir) { - Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" -} -else { - Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." - $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") - Write-Verbose "VenvDir=$VenvDir" -} - -# Next, read the `pyvenv.cfg` file to determine any required value such -# as `prompt`. -$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir - -# Next, set the prompt from the command line, or the config file, or -# just use the name of the virtual environment folder. -if ($Prompt) { - Write-Verbose "Prompt specified as argument, using '$Prompt'" -} -else { - Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" - if ($pyvenvCfg -and $pyvenvCfg['prompt']) { - Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" - $Prompt = $pyvenvCfg['prompt']; - } - else { - Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" - Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" - $Prompt = Split-Path -Path $venvDir -Leaf - } -} - -Write-Verbose "Prompt = '$Prompt'" -Write-Verbose "VenvDir='$VenvDir'" - -# Deactivate any currently active virtual environment, but leave the -# deactivate function in place. -deactivate -nondestructive - -# Now set the environment variable VIRTUAL_ENV, used by many tools to determine -# that there is an activated venv. -$env:VIRTUAL_ENV = $VenvDir - -$env:VIRTUAL_ENV_PROMPT = $Prompt - -if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { - - Write-Verbose "Setting prompt to '$Prompt'" - - # Set the prompt to include the env name - # Make sure _OLD_VIRTUAL_PROMPT is global - function global:_OLD_VIRTUAL_PROMPT { "" } - Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT - New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt - - function global:prompt { - Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " - _OLD_VIRTUAL_PROMPT - } -} - -# Clear PYTHONHOME -if (Test-Path -Path Env:PYTHONHOME) { - Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME - Remove-Item -Path Env:PYTHONHOME -} - -# Add the venv to the PATH -Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH -$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" - -# SIG # Begin signature block -# MII3ZAYJKoZIhvcNAQcCoII3VTCCN1ECAQExDzANBglghkgBZQMEAgEFADB5Bgor -# BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG -# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCBALKwKRFIhr2RY -# IW/WJLd9pc8a9sj/IoThKU92fTfKsKCCG9IwggXMMIIDtKADAgECAhBUmNLR1FsZ -# lUgTecgRwIeZMA0GCSqGSIb3DQEBDAUAMHcxCzAJBgNVBAYTAlVTMR4wHAYDVQQK -# ExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xSDBGBgNVBAMTP01pY3Jvc29mdCBJZGVu -# dGl0eSBWZXJpZmljYXRpb24gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgMjAy -# MDAeFw0yMDA0MTYxODM2MTZaFw00NTA0MTYxODQ0NDBaMHcxCzAJBgNVBAYTAlVT -# MR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xSDBGBgNVBAMTP01pY3Jv -# c29mdCBJZGVudGl0eSBWZXJpZmljYXRpb24gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRo -# b3JpdHkgMjAyMDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALORKgeD -# Bmf9np3gx8C3pOZCBH8Ppttf+9Va10Wg+3cL8IDzpm1aTXlT2KCGhFdFIMeiVPvH -# or+Kx24186IVxC9O40qFlkkN/76Z2BT2vCcH7kKbK/ULkgbk/WkTZaiRcvKYhOuD -# PQ7k13ESSCHLDe32R0m3m/nJxxe2hE//uKya13NnSYXjhr03QNAlhtTetcJtYmrV -# qXi8LW9J+eVsFBT9FMfTZRY33stuvF4pjf1imxUs1gXmuYkyM6Nix9fWUmcIxC70 -# ViueC4fM7Ke0pqrrBc0ZV6U6CwQnHJFnni1iLS8evtrAIMsEGcoz+4m+mOJyoHI1 -# vnnhnINv5G0Xb5DzPQCGdTiO0OBJmrvb0/gwytVXiGhNctO/bX9x2P29Da6SZEi3 -# W295JrXNm5UhhNHvDzI9e1eM80UHTHzgXhgONXaLbZ7LNnSrBfjgc10yVpRnlyUK -# xjU9lJfnwUSLgP3B+PR0GeUw9gb7IVc+BhyLaxWGJ0l7gpPKWeh1R+g/OPTHU3mg -# trTiXFHvvV84wRPmeAyVWi7FQFkozA8kwOy6CXcjmTimthzax7ogttc32H83rwjj -# O3HbbnMbfZlysOSGM1l0tRYAe1BtxoYT2v3EOYI9JACaYNq6lMAFUSw0rFCZE4e7 -# swWAsk0wAly4JoNdtGNz764jlU9gKL431VulAgMBAAGjVDBSMA4GA1UdDwEB/wQE -# AwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTIftJqhSobyhmYBAcnz1AQ -# T2ioojAQBgkrBgEEAYI3FQEEAwIBADANBgkqhkiG9w0BAQwFAAOCAgEAr2rd5hnn -# LZRDGU7L6VCVZKUDkQKL4jaAOxWiUsIWGbZqWl10QzD0m/9gdAmxIR6QFm3FJI9c -# Zohj9E/MffISTEAQiwGf2qnIrvKVG8+dBetJPnSgaFvlVixlHIJ+U9pW2UYXeZJF -# xBA2CFIpF8svpvJ+1Gkkih6PsHMNzBxKq7Kq7aeRYwFkIqgyuH4yKLNncy2RtNwx -# AQv3Rwqm8ddK7VZgxCwIo3tAsLx0J1KH1r6I3TeKiW5niB31yV2g/rarOoDXGpc8 -# FzYiQR6sTdWD5jw4vU8w6VSp07YEwzJ2YbuwGMUrGLPAgNW3lbBeUU0i/OxYqujY -# lLSlLu2S3ucYfCFX3VVj979tzR/SpncocMfiWzpbCNJbTsgAlrPhgzavhgplXHT2 -# 6ux6anSg8Evu75SjrFDyh+3XOjCDyft9V77l4/hByuVkrrOj7FjshZrM77nq81YY -# uVxzmq/FdxeDWds3GhhyVKVB0rYjdaNDmuV3fJZ5t0GNv+zcgKCf0Xd1WF81E+Al -# GmcLfc4l+gcK5GEh2NQc5QfGNpn0ltDGFf5Ozdeui53bFv0ExpK91IjmqaOqu/dk -# ODtfzAzQNb50GQOmxapMomE2gj4d8yu8l13bS3g7LfU772Aj6PXsCyM2la+YZr9T -# 03u4aUoqlmZpxJTG9F9urJh4iIAGXKKy7aIwggb+MIIE5qADAgECAhMzAAfqVHr/ -# 4Q/aDzAcAAAAB+pUMA0GCSqGSIb3DQEBDAUAMFoxCzAJBgNVBAYTAlVTMR4wHAYD -# VQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKzApBgNVBAMTIk1pY3Jvc29mdCBJ -# RCBWZXJpZmllZCBDUyBFT0MgQ0EgMDIwHhcNMjYwNDA3MDcyODM1WhcNMjYwNDEw -# MDcyODM1WjB8MQswCQYDVQQGEwJVUzEPMA0GA1UECBMGT3JlZ29uMRIwEAYDVQQH -# EwlCZWF2ZXJ0b24xIzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9u -# MSMwIQYDVQQDExpQeXRob24gU29mdHdhcmUgRm91bmRhdGlvbjCCAaIwDQYJKoZI -# hvcNAQEBBQADggGPADCCAYoCggGBAND/lHfn3OCIvUzMUIL6OdsKJrpnvuRtahV1 -# 6NCf0YSqOQemwQw2bTIyTkgSFwY4WaCvfHzcliURiPidXiqy56OmeC19A95BarKA -# UmKRv3bVpM0XEK7OLvMyRFNg9aPUi1nmdF3Vx02RI9p88wBHQR5nNIpOTXlwfONQ -# klggyEZSxkBf+dCL6jtz4jiqoreiEmRwesOrtQxKNsRuezbumpmVMZGxrMQVLBIX -# OWG9a3GS6Sqfi+cJgxQhSKa9JENPRojyxOyVG8vdwJQiMqSjm2ZMFAkIkSWBQSfx -# WjrRmw8/20WaBENattpqb7/cjX7zwimJ86uV48D8AQIGzAxfYAySG6NG9iMfU5S5 -# wzDFpiCuXyfrlgAbZu4fnBIyOmGcq01XxruzJ3FcdLMif5YXZU+n30XOaJfgY9/x -# Gq2HiEIQF5MeuxknfD+vYi/GXGtC/nlKS0Tx91+YXt6RctxgJEwpZCGzFZmmaiUa -# Y0GBp4jzXXwLqX8T15lgxAGoqoPvvwIDAQABo4ICGTCCAhUwDAYDVR0TAQH/BAIw -# ADAOBgNVHQ8BAf8EBAMCB4AwPAYDVR0lBDUwMwYKKwYBBAGCN2EBAAYIKwYBBQUH -# AwMGGysGAQQBgjdhgqKNuwqmkohkgZH0oEWCk/3hbzAdBgNVHQ4EFgQUy3N6DzeS -# y91jju8Ihmm3r+5AO58wHwYDVR0jBBgwFoAUZZ9RzoVofy+KRYiq3acxux4NAF4w -# ZwYDVR0fBGAwXjBcoFqgWIZWaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9w -# cy9jcmwvTWljcm9zb2Z0JTIwSUQlMjBWZXJpZmllZCUyMENTJTIwRU9DJTIwQ0El -# MjAwMi5jcmwwgaUGCCsGAQUFBwEBBIGYMIGVMGQGCCsGAQUFBzAChlhodHRwOi8v -# d3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NlcnRzL01pY3Jvc29mdCUyMElEJTIw -# VmVyaWZpZWQlMjBDUyUyMEVPQyUyMENBJTIwMDIuY3J0MC0GCCsGAQUFBzABhiFo -# dHRwOi8vb25lb2NzcC5taWNyb3NvZnQuY29tL29jc3AwZgYDVR0gBF8wXTBRBgwr -# BgEEAYI3TIN9AQEwQTA/BggrBgEFBQcCARYzaHR0cDovL3d3dy5taWNyb3NvZnQu -# Y29tL3BraW9wcy9Eb2NzL1JlcG9zaXRvcnkuaHRtMAgGBmeBDAEEATANBgkqhkiG -# 9w0BAQwFAAOCAgEAPPwJPfkrkQMH39/iTBbir6tGnQpLCpOuP1A6mmKp22GxCG0/ -# 1IPx4QK1qXpy8hYd/G9ySDSYu3DSg22/icSmGSxdcI3zoRsj9vdJeesQrxtK8v9y -# 4zMxN5TaLV5CmatSUZPyX1t7Tee9wiLBUeZIj+3Lg2gNUsdvavywRYxSYkWGuGaM -# jGtJrs4PoJW3f4KkOc5mShCpUgl4Mo9ZO+ChcQpKEP99UJ9CXB9wrNzXnEOTyGnR -# f1sYklPqBifC7hrnKIPZiJte1efmGeExmspWewmUSNXCIGenDAN8XDut2yi1iSSQ -# n1VtL6deCRhS1cTn+FAzy2q7a/8Jhhq+HUlcJwRGtrxgKZHrwEvGRvIWNK5l1rKl -# Q+WQ7RqRrH6PpSfR/xoptfpJX9LNUoHS0m114HcE2xk2hbv+U/5ZgxUtSd4MbF7/ -# C8eShz4Os8CznYXJ/d+kfvoyEqKE9VCbc4BUC+w1iufQOPo4tRvK4TFJu1N4IqJk -# NsChWXUef7lIT5CoaJw4np0dVS2NosmRCxi1dMyADzqFNDXGKQxq5k6MpnXbevL5 -# JdcznhhxgwRUcwNK/3f9WSaU2mnI+6tHrnATteL7Ct6FzZWjqWDbURkU66bRqrBh -# +u5KyLZAAQXTfdsaDUfxtElQJf5wROgYvwnW1dGvujgc+XKVvf1VT3GSFRIwggda -# MIIFQqADAgECAhMzAAAABft6XDITYd9dAAAAAAAFMA0GCSqGSIb3DQEBDAUAMGMx -# CzAJBgNVBAYTAlVTMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xNDAy -# BgNVBAMTK01pY3Jvc29mdCBJRCBWZXJpZmllZCBDb2RlIFNpZ25pbmcgUENBIDIw -# MjEwHhcNMjEwNDEzMTczMTUzWhcNMjYwNDEzMTczMTUzWjBaMQswCQYDVQQGEwJV -# UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSswKQYDVQQDEyJNaWNy -# b3NvZnQgSUQgVmVyaWZpZWQgQ1MgRU9DIENBIDAyMIICIjANBgkqhkiG9w0BAQEF -# AAOCAg8AMIICCgKCAgEA0hqZfD8ykKTA6CDbWvshmBpDoBf7Lv132RVuSqVwQO3a -# ALLkuRnnTIoRmMGo0fIMQrtwR6UHB06xdqOkAfqB6exubXTHu44+duHUCdE4ngjE -# LBQyluMuSOnHaEdveIbt31OhMEX/4nQkph4+Ah0eR4H2sTRrVKmKrlOoQlhia73Q -# g2dHoitcX1uT1vW3Knpt9Mt76H7ZHbLNspMZLkWBabKMl6BdaWZXYpPGdS+qY80g -# DaNCvFq0d10UMu7xHesIqXpTDT3Q3AeOxSylSTc/74P3og9j3OuemEFauFzL55t1 -# MvpadEhQmD8uFMxFv/iZOjwvcdY1zhanVLLyplz13/NzSoU3QjhPdqAGhRIwh/YD -# zo3jCdVJgWQRrW83P3qWFFkxNiME2iO4IuYgj7RwseGwv7I9cxOyaHihKMdT9Neo -# SjpSNzVnKKGcYMtOdMtKFqoV7Cim2m84GmIYZTBorR/Po9iwlasTYKFpGZqdWKyY -# nJO2FV8oMmWkIK1iagLLgEt6ZaR0rk/1jUYssyTiRqWr84Qs3XL/V5KUBEtUEQfQ -# /4RtnI09uFFUIGJZV9mD/xOUksWodGrCQSem6Hy261xMJAHqTqMuDKgwi8xk/mfl -# r7yhXPL73SOULmu1Aqu4I7Gpe6QwNW2TtQBxM3vtSTmdPW6rK5y0gED51RjsyK0C -# AwEAAaOCAg4wggIKMA4GA1UdDwEB/wQEAwIBhjAQBgkrBgEEAYI3FQEEAwIBADAd -# BgNVHQ4EFgQUZZ9RzoVofy+KRYiq3acxux4NAF4wVAYDVR0gBE0wSzBJBgRVHSAA -# MEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9wa2lvcHMv -# RG9jcy9SZXBvc2l0b3J5Lmh0bTAZBgkrBgEEAYI3FAIEDB4KAFMAdQBiAEMAQTAS -# BgNVHRMBAf8ECDAGAQH/AgEAMB8GA1UdIwQYMBaAFNlBKbAPD2Ns72nX9c0pnqRI -# ajDmMHAGA1UdHwRpMGcwZaBjoGGGX2h0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9w -# a2lvcHMvY3JsL01pY3Jvc29mdCUyMElEJTIwVmVyaWZpZWQlMjBDb2RlJTIwU2ln -# bmluZyUyMFBDQSUyMDIwMjEuY3JsMIGuBggrBgEFBQcBAQSBoTCBnjBtBggrBgEF -# BQcwAoZhaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9jZXJ0cy9NaWNy -# b3NvZnQlMjBJRCUyMFZlcmlmaWVkJTIwQ29kZSUyMFNpZ25pbmclMjBQQ0ElMjAy -# MDIxLmNydDAtBggrBgEFBQcwAYYhaHR0cDovL29uZW9jc3AubWljcm9zb2Z0LmNv -# bS9vY3NwMA0GCSqGSIb3DQEBDAUAA4ICAQBFSWDUd08X4g5HzvVfrB1SiV8pk6XP -# HT9jPkCmvU/uvBzmZRAjYk2gKYR3pXoStRJaJ/lhjC5Dq/2R7P1YRZHCDYyK0zvS -# RMdE6YQtgGjmsdhzD0nCS6hVVcgfmNQscPJ1WHxbvG5EQgYQ0ZED1FN0MOPQzWe1 -# zbH5Va0dSxtnodBVRjnyDYEm7sNEcvJHTG3eXzAyd00E5KDCsEl4z5O0mvXqwaH2 -# PS0200E6P4WqLwgs/NmUu5+Aa8Lw/2En2VkIW7Pkir4Un1jG6+tj/ehuqgFyUPPC -# h6kbnvk48bisi/zPjAVkj7qErr7fSYICCzJ4s4YUNVVHgdoFn2xbW7ZfBT3QA9zf -# hq9u4ExXbrVD5rxXSTFEUg2gzQq9JHxsdHyMfcCKLFQOXODSzcYeLpCd+r6GcoDB -# ToyPdKccjC6mAq6+/hiMDnpvKUIHpyYEzWUeattyKXtMf+QrJeQ+ny5jBL+xqdOO -# PEz3dg7qn8/oprUrUbGLBv9fWm18fWXdAv1PCtLL/acMLtHoyeSVMKQYqDHb3Qm0 -# uQ+NQ0YE4kUxSQa+W/cCzYAI32uN0nb9M4Mr1pj4bJZidNkM4JyYqezohILxYkgH -# bboJQISrQWrm5RYdyhKBpptJ9JJn0Z63LjdnzlOUxjlsAbQir2Wmz/OJE703BbHm -# QZRwzPx1vu7S5zCCB54wggWGoAMCAQICEzMAAAAHh6M0o3uljhwAAAAAAAcwDQYJ -# KoZIhvcNAQEMBQAwdzELMAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBD -# b3Jwb3JhdGlvbjFIMEYGA1UEAxM/TWljcm9zb2Z0IElkZW50aXR5IFZlcmlmaWNh -# dGlvbiBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAyMDIwMB4XDTIxMDQwMTIw -# MDUyMFoXDTM2MDQwMTIwMTUyMFowYzELMAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1p -# Y3Jvc29mdCBDb3Jwb3JhdGlvbjE0MDIGA1UEAxMrTWljcm9zb2Z0IElEIFZlcmlm -# aWVkIENvZGUgU2lnbmluZyBQQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIP -# ADCCAgoCggIBALLwwK8ZiCji3VR6TElsaQhVCbRS/3pK+MHrJSj3Zxd3KU3rlfL3 -# qrZilYKJNqztA9OQacr1AwoNcHbKBLbsQAhBnIB34zxf52bDpIO3NJlfIaTE/xrw -# eLoQ71lzCHkD7A4As1Bs076Iu+mA6cQzsYYH/Cbl1icwQ6C65rU4V9NQhNUwgrx9 -# rGQ//h890Q8JdjLLw0nV+ayQ2Fbkd242o9kH82RZsH3HEyqjAB5a8+Ae2nPIPc8s -# ZU6ZE7iRrRZywRmrKDp5+TcmJX9MRff241UaOBs4NmHOyke8oU1TYrkxh+YeHgfW -# o5tTgkoSMoayqoDpHOLJs+qG8Tvh8SnifW2Jj3+ii11TS8/FGngEaNAWrbyfNrC6 -# 9oKpRQXY9bGH6jn9NEJv9weFxhTwyvx9OJLXmRGbAUXN1U9nf4lXezky6Uh/cgjk -# Vd6CGUAf0K+Jw+GE/5VpIVbcNr9rNE50Sbmy/4RTCEGvOq3GhjITbCa4crCzTTHg -# YYjHs1NbOc6brH+eKpWLtr+bGecy9CrwQyx7S/BfYJ+ozst7+yZtG2wR461uckFu -# 0t+gCwLdN0A6cFtSRtR8bvxVFyWwTtgMMFRuBa3vmUOTnfKLsLefRaQcVTgRnzeL -# zdpt32cdYKp+dhr2ogc+qM6K4CBI5/j4VFyC4QFeUP2YAidLtvpXRRo3AgMBAAGj -# ggI1MIICMTAOBgNVHQ8BAf8EBAMCAYYwEAYJKwYBBAGCNxUBBAMCAQAwHQYDVR0O -# BBYEFNlBKbAPD2Ns72nX9c0pnqRIajDmMFQGA1UdIARNMEswSQYEVR0gADBBMD8G -# CCsGAQUFBwIBFjNodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL0RvY3Mv -# UmVwb3NpdG9yeS5odG0wGQYJKwYBBAGCNxQCBAweCgBTAHUAYgBDAEEwDwYDVR0T -# AQH/BAUwAwEB/zAfBgNVHSMEGDAWgBTIftJqhSobyhmYBAcnz1AQT2ioojCBhAYD -# VR0fBH0wezB5oHegdYZzaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9j -# cmwvTWljcm9zb2Z0JTIwSWRlbnRpdHklMjBWZXJpZmljYXRpb24lMjBSb290JTIw -# Q2VydGlmaWNhdGUlMjBBdXRob3JpdHklMjAyMDIwLmNybDCBwwYIKwYBBQUHAQEE -# gbYwgbMwgYEGCCsGAQUFBzAChnVodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtp -# b3BzL2NlcnRzL01pY3Jvc29mdCUyMElkZW50aXR5JTIwVmVyaWZpY2F0aW9uJTIw -# Um9vdCUyMENlcnRpZmljYXRlJTIwQXV0aG9yaXR5JTIwMjAyMC5jcnQwLQYIKwYB -# BQUHMAGGIWh0dHA6Ly9vbmVvY3NwLm1pY3Jvc29mdC5jb20vb2NzcDANBgkqhkiG -# 9w0BAQwFAAOCAgEAfyUqnv7Uq+rdZgrbVyNMul5skONbhls5fccPlmIbzi+OwVdP -# Q4H55v7VOInnmezQEeW4LqK0wja+fBznANbXLB0KrdMCbHQpbLvG6UA/Xv2pfpVI -# E1CRFfNF4XKO8XYEa3oW8oVH+KZHgIQRIwAbyFKQ9iyj4aOWeAzwk+f9E5StNp5T -# 8FG7/VEURIVWArbAzPt9ThVN3w1fAZkF7+YU9kbq1bCR2YD+MtunSQ1Rft6XG7b4 -# e0ejRA7mB2IoX5hNh3UEauY0byxNRG+fT2MCEhQl9g2i2fs6VOG19CNep7SquKaB -# jhWmirYyANb0RJSLWjinMLXNOAga10n8i9jqeprzSMU5ODmrMCJE12xS/NWShg/t -# uLjAsKP6SzYZ+1Ry358ZTFcx0FS/mx2vSoU8s8HRvy+rnXqyUJ9HBqS0DErVLjQw -# K8VtsBdekBmdTbQVoCgPCqr+PDPB3xajYnzevs7eidBsM71PINK2BoE2UfMwxCCX -# 3mccFgx6UsQeRSdVVVNSyALQe6PT12418xon2iDGE81OGCreLzDcMAZnrUAx4XQL -# Uz6ZTl65yPUiOh3k7Yww94lDf+8oG2oZmDh5O1Qe38E+M3vhKwmzIeoB1dVLlz4i -# 3IpaDcR+iuGjH2TdaC1ZOmBXiCRKJLj4DT2uhJ04ji+tHD6n58vhavFIrmcxghro -# MIIa5AIBATBxMFoxCzAJBgNVBAYTAlVTMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29y -# cG9yYXRpb24xKzApBgNVBAMTIk1pY3Jvc29mdCBJRCBWZXJpZmllZCBDUyBFT0Mg -# Q0EgMDICEzMAB+pUev/hD9oPMBwAAAAH6lQwDQYJYIZIAWUDBAIBBQCggbQwGQYJ -# KoZIhvcNAQkDMQwGCisGAQQBgjcCAQQwHAYKKwYBBAGCNwIBCzEOMAwGCisGAQQB -# gjcCARUwLwYJKoZIhvcNAQkEMSIEICpXe3RS3b2coD0CJveEHlglqtPUYZ2FqSrO -# UfP6C6Y4MEgGCisGAQQBgjcCAQwxOjA4oDKAMABQAHkAdABoAG8AbgAgADMALgAx -# ADMALgAxADMAIAAoADAAMQAxADAANABjAGUAKaECgAAwDQYJKoZIhvcNAQEBBQAE -# ggGAE+NSuDZvG3igu5a4sqdyWzpiCadCeGW/MeexQY3ttAFVHGoFn4aKPdPsfFB+ -# YgDPw3+rHC2h619JkyWSJXfojj24d+16z/kRVRKAZxLMp4NKto1Y9ZOLN5spf10q -# rIyAWZybbmll4QwcBEyb1fnlpfLUSzK9a8IhvKWGyb7Q3S0mkHApX0Lo1ppe9Chh -# wpsd+tqOTlJoIE18CZNgFvN4lRl7T2XhX3XqyMNZbBemtzIvKBXoWTMvHAErQm5M -# L6xdiAiLjA3bDsYqd60Maa0TYEwVLCxFfxbSQk2hHL1h2mpPwdTNYYQ8II4lcSDj -# St5jb9hyF82JnFYU6KBRVVyu+j5/zW6fu7/mbqBSNW9NgzftNNT1AlzZyzyMFhYp -# Wu2gJGCpi0XeibKNaRhU1UAc7cD+Kv2CTZaKq19wq1Q3KdcxxuZ0ITDc0GIjGZCz -# oVYgurx7Ooz/MrS1Qc6oHsTL9wdORG288/wOPB1qCI33eRw9+T+5kOGhEpHQZ/6M -# XzLyoYIYETCCGA0GCisGAQQBgjcDAwExghf9MIIX+QYJKoZIhvcNAQcCoIIX6jCC -# F+YCAQMxDzANBglghkgBZQMEAgEFADCCAWIGCyqGSIb3DQEJEAEEoIIBUQSCAU0w -# ggFJAgEBBgorBgEEAYRZCgMBMDEwDQYJYIZIAWUDBAIBBQAEIGeiqCZUyu3m9A3N -# 0x1/GV6dpz+x8P335/R8ZIND3pjbAgZpwnLJmBYYEzIwMjYwNDA3MjAzNjIzLjU0 -# N1owBIACAfSggeGkgd4wgdsxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5n -# dG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9y -# YXRpb24xJTAjBgNVBAsTHE1pY3Jvc29mdCBBbWVyaWNhIE9wZXJhdGlvbnMxJzAl -# BgNVBAsTHm5TaGllbGQgVFNTIEVTTjo3RDAwLTA1RTAtRDk0NzE1MDMGA1UEAxMs -# TWljcm9zb2Z0IFB1YmxpYyBSU0EgVGltZSBTdGFtcGluZyBBdXRob3JpdHmggg8h -# MIIHgjCCBWqgAwIBAgITMwAAAAXlzw//Zi7JhwAAAAAABTANBgkqhkiG9w0BAQwF -# ADB3MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9u -# MUgwRgYDVQQDEz9NaWNyb3NvZnQgSWRlbnRpdHkgVmVyaWZpY2F0aW9uIFJvb3Qg -# Q2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMjAwHhcNMjAxMTE5MjAzMjMxWhcNMzUx -# MTE5MjA0MjMxWjBhMQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENv -# cnBvcmF0aW9uMTIwMAYDVQQDEylNaWNyb3NvZnQgUHVibGljIFJTQSBUaW1lc3Rh -# bXBpbmcgQ0EgMjAyMDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ58 -# 51Jj/eDFnwV9Y7UGIqMcHtfnlzPREwW9ZUZHd5HBXXBvf7KrQ5cMSqFSHGqg2/qJ -# hYqOQxwuEQXG8kB41wsDJP5d0zmLYKAY8Zxv3lYkuLDsfMuIEqvGYOPURAH+Ybl4 -# SJEESnt0MbPEoKdNihwM5xGv0rGofJ1qOYSTNcc55EbBT7uq3wx3mXhtVmtcCEr5 -# ZKTkKKE1CxZvNPWdGWJUPC6e4uRfWHIhZcgCsJ+sozf5EeH5KrlFnxpjKKTavwfF -# P6XaGZGWUG8TZaiTogRoAlqcevbiqioUz1Yt4FRK53P6ovnUfANjIgM9JDdJ4e0q -# iDRm5sOTiEQtBLGd9Vhd1MadxoGcHrRCsS5rO9yhv2fjJHrmlQ0EIXmp4DhDBieK -# UGR+eZ4CNE3ctW4uvSDQVeSp9h1SaPV8UWEfyTxgGjOsRpeexIveR1MPTVf7gt8h -# Y64XNPO6iyUGsEgt8c2PxF87E+CO7A28TpjNq5eLiiunhKbq0XbjkNoU5JhtYUrl -# mAbpxRjb9tSreDdtACpm3rkpxp7AQndnI0Shu/fk1/rE3oWsDqMX3jjv40e8KN5Y -# sJBnczyWB4JyeeFMW3JBfdeAKhzohFe8U5w9WuvcP1E8cIxLoKSDzCCBOu0hWdjz -# KNu8Y5SwB1lt5dQhABYyzR3dxEO/T1K/BVF3rV69AgMBAAGjggIbMIICFzAOBgNV -# HQ8BAf8EBAMCAYYwEAYJKwYBBAGCNxUBBAMCAQAwHQYDVR0OBBYEFGtpKDo1L0hj -# QM972K9J6T7ZPdshMFQGA1UdIARNMEswSQYEVR0gADBBMD8GCCsGAQUFBwIBFjNo -# dHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL0RvY3MvUmVwb3NpdG9yeS5o -# dG0wEwYDVR0lBAwwCgYIKwYBBQUHAwgwGQYJKwYBBAGCNxQCBAweCgBTAHUAYgBD -# AEEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBTIftJqhSobyhmYBAcnz1AQ -# T2ioojCBhAYDVR0fBH0wezB5oHegdYZzaHR0cDovL3d3dy5taWNyb3NvZnQuY29t -# L3BraW9wcy9jcmwvTWljcm9zb2Z0JTIwSWRlbnRpdHklMjBWZXJpZmljYXRpb24l -# MjBSb290JTIwQ2VydGlmaWNhdGUlMjBBdXRob3JpdHklMjAyMDIwLmNybDCBlAYI -# KwYBBQUHAQEEgYcwgYQwgYEGCCsGAQUFBzAChnVodHRwOi8vd3d3Lm1pY3Jvc29m -# dC5jb20vcGtpb3BzL2NlcnRzL01pY3Jvc29mdCUyMElkZW50aXR5JTIwVmVyaWZp -# Y2F0aW9uJTIwUm9vdCUyMENlcnRpZmljYXRlJTIwQXV0aG9yaXR5JTIwMjAyMC5j -# cnQwDQYJKoZIhvcNAQEMBQADggIBAF+Idsd+bbVaFXXnTHho+k7h2ESZJRWluLE0 -# Oa/pO+4ge/XEizXvhs0Y7+KVYyb4nHlugBesnFqBGEdC2IWmtKMyS1OWIviwpnK3 -# aL5JedwzbeBF7POyg6IGG/XhhJ3UqWeWTO+Czb1c2NP5zyEh89F72u9UIw+IfvM9 -# lzDmc2O2END7MPnrcjWdQnrLn1Ntday7JSyrDvBdmgbNnCKNZPmhzoa8PccOiQlj -# jTW6GePe5sGFuRHzdFt8y+bN2neF7Zu8hTO1I64XNGqst8S+w+RUdie8fXC1jKu3 -# m9KGIqF4aldrYBamyh3g4nJPj/LR2CBaLyD+2BuGZCVmoNR/dSpRCxlot0i79dKO -# ChmoONqbMI8m04uLaEHAv4qwKHQ1vBzbV/nG89LDKbRSSvijmwJwxRxLLpMQ/u4x -# XxFfR4f/gksSkbJp7oqLwliDm/h+w0aJ/U5ccnYhYb7vPKNMN+SZDWycU5ODIRfy -# oGl59BsXR/HpRGtiJquOYGmvA/pk5vC1lcnbeMrcWD/26ozePQ/TWfNXKBOmkFpv -# PE8CH+EeGGWzqTCjdAsno2jzTeNSxlx3glDGJgcdz5D/AAxw9Sdgq/+rY7jjgs7X -# 6fqPTXPmaCAJKVHAP19oEjJIBwD1LyHbaEgBxFCogYSOiUIr0Xqcr1nJfiWG2GwY -# e6ZoAF1bMIIHlzCCBX+gAwIBAgITMwAAAFXZ3WkmKPn44gAAAAAAVTANBgkqhkiG -# 9w0BAQwFADBhMQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBv -# cmF0aW9uMTIwMAYDVQQDEylNaWNyb3NvZnQgUHVibGljIFJTQSBUaW1lc3RhbXBp -# bmcgQ0EgMjAyMDAeFw0yNTEwMjMyMDQ2NDlaFw0yNjEwMjIyMDQ2NDlaMIHbMQsw -# CQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9u -# ZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSUwIwYDVQQLExxNaWNy -# b3NvZnQgQW1lcmljYSBPcGVyYXRpb25zMScwJQYDVQQLEx5uU2hpZWxkIFRTUyBF -# U046N0QwMC0wNUUwLUQ5NDcxNTAzBgNVBAMTLE1pY3Jvc29mdCBQdWJsaWMgUlNB -# IFRpbWUgU3RhbXBpbmcgQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A -# MIICCgKCAgEAvbkfkh5ZSLP0MCUWafaw/KZoVZu9iQx8r5JwhZvdrUi86UjCCFQO -# NjQanrIxGF9hRGIZLQZ50gHrLC+4fpUEJff5t04VwByWC2/bWOuk6NmaTh9JpPZD -# cGzNR95QlryjfEjtl+gxj12zNPEdADPplVfzt8cYRWFBx/Fbfch08k6P9p7jX2q1 -# jFPbUxWYJ+xOyGC1aKhDGY5b+8wL39v6qC0HFIx/v3y+bep+aEXooK8VoeWK+szf -# aFjXo8YTcvQ8UL4szu9HFTuZNv6vvoJ7Ju+o5aTj51sph+0+FXW38TlL/rDBd5ia -# 79jskLtOeHbDjkbljilwzegcxv9i49F05ZrS/5ELZCCY1VaqO7EOLKVaxxdAO5oy -# 1vb0Bx0ZRVX1mxFjYzay2EC051k6yGJHm58y1oe2IKRa/SM1+BTGse6vHNi5Q2d5 -# ZnoR9AOAUDDwJIIqRI4rZz2MSinh11WrXTG9urF2uoyd5Ve+8hxes9ABeP2PYQKl -# XYTAxvdaeanDTQ/vwmnM+yTcWzrVm84Z38XVFw4G7p/ZNZ2nscvv6uru2AevXcyV -# 1t8ha7iWmhhgTWBNBrViuDlc3iPvOz2SVPbPeqhyY/NXwNZCAgc2H5pOztu6MwQx -# DIjte3XM/FkKBxHofS2abNT/0HG+xZtFqUJDaxgbJa6lN1zh7spjuQ8CAwEAAaOC -# AcswggHHMB0GA1UdDgQWBBRWBF8QbdwIA/DIv6nJFsrB16xltjAfBgNVHSMEGDAW -# gBRraSg6NS9IY0DPe9ivSek+2T3bITBsBgNVHR8EZTBjMGGgX6BdhltodHRwOi8v -# d3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NybC9NaWNyb3NvZnQlMjBQdWJsaWMl -# MjBSU0ElMjBUaW1lc3RhbXBpbmclMjBDQSUyMDIwMjAuY3JsMHkGCCsGAQUFBwEB -# BG0wazBpBggrBgEFBQcwAoZdaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9w -# cy9jZXJ0cy9NaWNyb3NvZnQlMjBQdWJsaWMlMjBSU0ElMjBUaW1lc3RhbXBpbmcl -# MjBDQSUyMDIwMjAuY3J0MAwGA1UdEwEB/wQCMAAwFgYDVR0lAQH/BAwwCgYIKwYB -# BQUHAwgwDgYDVR0PAQH/BAQDAgeAMGYGA1UdIARfMF0wUQYMKwYBBAGCN0yDfQEB -# MEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9wa2lvcHMv -# RG9jcy9SZXBvc2l0b3J5Lmh0bTAIBgZngQwBBAIwDQYJKoZIhvcNAQEMBQADggIB -# AFIe4ZJUe9qUKcWeWypchB58fXE/ZIWv2D5XP5/k/tB7LCN9BvmNSVKZ3VeclQM9 -# 78wfEvuvdMQSUv6Y20boIM8DK1K1IU9cP21MG0ExiHxaqjrikf2qbfrXIip4Ef3v -# 2bNYKQxCxN3Sczp1SX0H7uqK2L5OhfDEiXf15iou5hh+EPaaqp49czNQpJDOR/vf -# JghUc/qcslDPhoCZpZx8b2ODvywGQNXwqlbsmCS24uGmEkQ3UH5JUeN6c91yasVc -# hS78riMrm6R9ZpAiO5pfNKMGU2MLm1A3pp098DcbFTAc95Hh6Qvkh//28F/Xe2bM -# Fb6DL7Sw0ZO95v0gv0ZTyJfxS/LCxfraeEII9FSFOKAMEp1zNFSs2ue0GGjBt9yE -# EMUwvxq9ExFz0aZzYm8ivJfffpIVDnX/+rVRTYcxIkQyFYslIhYlWF9SjCw5r49q -# akjMRNh8W9O7aaoolSVZleQZjGt0K8JzMlyp6hp2lbW6XqRx2cOHbbxJDxmENzoh -# GUziI13lI2g2Bf5qibfC4bKNRpJo9lbE8HUbY0qJiE8u3SU8eDQaySPXOEhJjxRC -# QwwOvejYmBG5P7CckQNBSnnl12+FKRKgPoj0Mv+z5OMhj9z2MtpbnHLAkep0odQC -# lEyyCG/uR5tK5rW6mZH5Oq56UWS0NI6NV1JGS7Jri6jFMYIHQzCCBz8CAQEweDBh -# MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTIw -# MAYDVQQDEylNaWNyb3NvZnQgUHVibGljIFJTQSBUaW1lc3RhbXBpbmcgQ0EgMjAy -# MAITMwAAAFXZ3WkmKPn44gAAAAAAVTANBglghkgBZQMEAgEFAKCCBJwwEQYLKoZI -# hvcNAQkQAg8xAgUAMBoGCSqGSIb3DQEJAzENBgsqhkiG9w0BCRABBDAcBgkqhkiG -# 9w0BCQUxDxcNMjYwNDA3MjAzNjIzWjAvBgkqhkiG9w0BCQQxIgQgKqYqRp979q8g -# 7hK0TG+U1u6PRrp2uF9Xesi1PlSt0XUwgbkGCyqGSIb3DQEJEAIvMYGpMIGmMIGj -# MIGgBCDYuTyXZIZiu799/v4PaqsmeSzBxh0rqkYq7sYYavj+zTB8MGWkYzBhMQsw -# CQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTIwMAYD -# VQQDEylNaWNyb3NvZnQgUHVibGljIFJTQSBUaW1lc3RhbXBpbmcgQ0EgMjAyMAIT -# MwAAAFXZ3WkmKPn44gAAAAAAVTCCA14GCyqGSIb3DQEJEAISMYIDTTCCA0mhggNF -# MIIDQTCCAikCAQEwggEJoYHhpIHeMIHbMQswCQYDVQQGEwJVUzETMBEGA1UECBMK -# V2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0 -# IENvcnBvcmF0aW9uMSUwIwYDVQQLExxNaWNyb3NvZnQgQW1lcmljYSBPcGVyYXRp -# b25zMScwJQYDVQQLEx5uU2hpZWxkIFRTUyBFU046N0QwMC0wNUUwLUQ5NDcxNTAz -# BgNVBAMTLE1pY3Jvc29mdCBQdWJsaWMgUlNBIFRpbWUgU3RhbXBpbmcgQXV0aG9y -# aXR5oiMKAQEwBwYFKw4DAhoDFQAdO1QBgmW/tuBZV5EGjhfsV4cN6qBnMGWkYzBh -# MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTIw -# MAYDVQQDEylNaWNyb3NvZnQgUHVibGljIFJTQSBUaW1lc3RhbXBpbmcgQ0EgMjAy -# MDANBgkqhkiG9w0BAQsFAAIFAO1/Zh0wIhgPMjAyNjA0MDcxMTE2NDVaGA8yMDI2 -# MDQwODExMTY0NVowdDA6BgorBgEEAYRZCgQBMSwwKjAKAgUA7X9mHQIBADAHAgEA -# AgITDjAHAgEAAgIcRzAKAgUA7YC3nQIBADA2BgorBgEEAYRZCgQCMSgwJjAMBgor -# BgEEAYRZCgMCoAowCAIBAAIDB6EgoQowCAIBAAIDAYagMA0GCSqGSIb3DQEBCwUA -# A4IBAQCP8NhPOsRpXyExxSxtV1IGKEWgzQWAmjL30svR7/O2jDhTCAvbuQ+3CVvJ -# V9Y6fD60sFPaxuxLjIiP5CdNSgd6o0D9R70U+Q8fzTWq2HNENnhRPDPT0qBV8pyH -# DRiMZNKkvwZAvpwBiAW5/9mHSoRXBhbg7GLIoQwqnNh2qg3BHH0usKATSSbhZnds -# MHBUUIpx3XvqAKlXliV7wxsNYWLNJSvKWnu/ur8qfHQGtazxDXtFQedgDJ6FkIPR -# 3QfNcQKWH6wrFJzKTxOMjajswpq7xhEfN4sM33CPbiEn1RGQ1OGMOFjeu40y+96z -# nFJ8qaGkipaxvlm9lbiGZup3HBZsMA0GCSqGSIb3DQEBAQUABIICADaemTQ/4ZMQ -# soyCIO9q48tSWvkyJuSkTOW/M7bVT6w/5aJszNjASFT9Dj5RPaUAMOsTarqEEuyR -# Tu2wdsHQxilgIc/vr/MJ3LPh1ywX3TQ2FGpXBXff8zKAxOn0Q/LCiIuVzVKmtr5U -# s4jAxqVmcQ1O1rFC7iOj1pFm4RKcMV5mob54tVp/CVb99nA/SPen2RVbJpaIk/jh -# PQNrizjJSlMMQudkOU94H58/HegK5RGkcrI5ShsnU6FA6TlYE3avFyjt6/LBN34R -# By7f2eNk87zOfF/YccTHKc34FygIdJpkh89Xzhg4cr416ZU6mdsAGvGNwq/snJXR -# nzjFist8pl3/Q1lqakSC8BEhpIf4pftQcJsCJUBInSMFHgAVtpKxJKO60h1/QlSq -# ot9CoBoxmwA1TJpSAli/T83d007EBcHE2u+GGQCz6hAcwjsJgsYI75ONLRzD0u/B -# /EKNduVquwz6XWVVDTTjFfYETPT/cV+zDX0m28S8tZF62+xKRHjJDbPBGj8hTiBi -# +VNvfTv++Z7RGVGWUxAlxoCRnAjsSxiS5zoSvgkkm0+rVipaBRgwoviDLcyReVJ5 -# gawZvW25bgeYO6t9Qgxp4tQCqrlABgwR5pCGAEk+z5dHckintm1qwu8YaAgA8Bcv -# GBDbYo8mMvA4mwTau9S5DpdwbZYVLfWp -# SIG # End signature block diff --git a/Python313_13_x64_Template/Lib/venv/scripts/nt/venvlauncher.exe b/Python313_13_x64_Template/Lib/venv/scripts/nt/venvlauncher.exe deleted file mode 100644 index 03286c1b..00000000 Binary files a/Python313_13_x64_Template/Lib/venv/scripts/nt/venvlauncher.exe and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/venv/scripts/nt/venvwlauncher.exe b/Python313_13_x64_Template/Lib/venv/scripts/nt/venvwlauncher.exe deleted file mode 100644 index f872482b..00000000 Binary files a/Python313_13_x64_Template/Lib/venv/scripts/nt/venvwlauncher.exe and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/warnings.py b/Python313_13_x64_Template/Lib/warnings.py deleted file mode 100644 index 04320d47..00000000 --- a/Python313_13_x64_Template/Lib/warnings.py +++ /dev/null @@ -1,735 +0,0 @@ -"""Python part of the warnings subsystem.""" - -import sys - - -__all__ = ["warn", "warn_explicit", "showwarning", - "formatwarning", "filterwarnings", "simplefilter", - "resetwarnings", "catch_warnings", "deprecated"] - -def showwarning(message, category, filename, lineno, file=None, line=None): - """Hook to write a warning to a file; replace if you like.""" - msg = WarningMessage(message, category, filename, lineno, file, line) - _showwarnmsg_impl(msg) - -def formatwarning(message, category, filename, lineno, line=None): - """Function to format a warning the standard way.""" - msg = WarningMessage(message, category, filename, lineno, None, line) - return _formatwarnmsg_impl(msg) - -def _showwarnmsg_impl(msg): - file = msg.file - if file is None: - file = sys.stderr - if file is None: - # sys.stderr is None when run with pythonw.exe: - # warnings get lost - return - text = _formatwarnmsg(msg) - try: - file.write(text) - except OSError: - # the file (probably stderr) is invalid - this warning gets lost. - pass - -def _formatwarnmsg_impl(msg): - category = msg.category.__name__ - s = f"{msg.filename}:{msg.lineno}: {category}: {msg.message}\n" - - if msg.line is None: - try: - import linecache - line = linecache.getline(msg.filename, msg.lineno) - except Exception: - # When a warning is logged during Python shutdown, linecache - # and the import machinery don't work anymore - line = None - linecache = None - else: - line = msg.line - if line: - line = line.strip() - s += " %s\n" % line - - if msg.source is not None: - try: - import tracemalloc - # Logging a warning should not raise a new exception: - # catch Exception, not only ImportError and RecursionError. - except Exception: - # don't suggest to enable tracemalloc if it's not available - suggest_tracemalloc = False - tb = None - else: - try: - suggest_tracemalloc = not tracemalloc.is_tracing() - tb = tracemalloc.get_object_traceback(msg.source) - except Exception: - # When a warning is logged during Python shutdown, tracemalloc - # and the import machinery don't work anymore - suggest_tracemalloc = False - tb = None - - if tb is not None: - s += 'Object allocated at (most recent call last):\n' - for frame in tb: - s += (' File "%s", lineno %s\n' - % (frame.filename, frame.lineno)) - - try: - if linecache is not None: - line = linecache.getline(frame.filename, frame.lineno) - else: - line = None - except Exception: - line = None - if line: - line = line.strip() - s += ' %s\n' % line - elif suggest_tracemalloc: - s += (f'{category}: Enable tracemalloc to get the object ' - f'allocation traceback\n') - return s - -# Keep a reference to check if the function was replaced -_showwarning_orig = showwarning - -def _showwarnmsg(msg): - """Hook to write a warning to a file; replace if you like.""" - try: - sw = showwarning - except NameError: - pass - else: - if sw is not _showwarning_orig: - # warnings.showwarning() was replaced - if not callable(sw): - raise TypeError("warnings.showwarning() must be set to a " - "function or method") - - sw(msg.message, msg.category, msg.filename, msg.lineno, - msg.file, msg.line) - return - _showwarnmsg_impl(msg) - -# Keep a reference to check if the function was replaced -_formatwarning_orig = formatwarning - -def _formatwarnmsg(msg): - """Function to format a warning the standard way.""" - try: - fw = formatwarning - except NameError: - pass - else: - if fw is not _formatwarning_orig: - # warnings.formatwarning() was replaced - return fw(msg.message, msg.category, - msg.filename, msg.lineno, msg.line) - return _formatwarnmsg_impl(msg) - -def filterwarnings(action, message="", category=Warning, module="", lineno=0, - append=False): - """Insert an entry into the list of warnings filters (at the front). - - 'action' -- one of "error", "ignore", "always", "default", "module", - or "once" - 'message' -- a regex that the warning message must match - 'category' -- a class that the warning must be a subclass of - 'module' -- a regex that the module name must match - 'lineno' -- an integer line number, 0 matches all warnings - 'append' -- if true, append to the list of filters - """ - if action not in {"error", "ignore", "always", "default", "module", "once"}: - raise ValueError(f"invalid action: {action!r}") - if not isinstance(message, str): - raise TypeError("message must be a string") - if not isinstance(category, type) or not issubclass(category, Warning): - raise TypeError("category must be a Warning subclass") - if not isinstance(module, str): - raise TypeError("module must be a string") - if not isinstance(lineno, int): - raise TypeError("lineno must be an int") - if lineno < 0: - raise ValueError("lineno must be an int >= 0") - - if message or module: - import re - - if message: - message = re.compile(message, re.I) - else: - message = None - if module: - module = re.compile(module) - else: - module = None - - _add_filter(action, message, category, module, lineno, append=append) - -def simplefilter(action, category=Warning, lineno=0, append=False): - """Insert a simple entry into the list of warnings filters (at the front). - - A simple filter matches all modules and messages. - 'action' -- one of "error", "ignore", "always", "default", "module", - or "once" - 'category' -- a class that the warning must be a subclass of - 'lineno' -- an integer line number, 0 matches all warnings - 'append' -- if true, append to the list of filters - """ - if action not in {"error", "ignore", "always", "default", "module", "once"}: - raise ValueError(f"invalid action: {action!r}") - if not isinstance(lineno, int): - raise TypeError("lineno must be an int") - if lineno < 0: - raise ValueError("lineno must be an int >= 0") - _add_filter(action, None, category, None, lineno, append=append) - -def _add_filter(*item, append): - # Remove possible duplicate filters, so new one will be placed - # in correct place. If append=True and duplicate exists, do nothing. - if not append: - try: - filters.remove(item) - except ValueError: - pass - filters.insert(0, item) - else: - if item not in filters: - filters.append(item) - _filters_mutated() - -def resetwarnings(): - """Clear the list of warning filters, so that no filters are active.""" - filters[:] = [] - _filters_mutated() - -class _OptionError(Exception): - """Exception used by option processing helpers.""" - pass - -# Helper to process -W options passed via sys.warnoptions -def _processoptions(args): - for arg in args: - try: - _setoption(arg) - except _OptionError as msg: - print("Invalid -W option ignored:", msg, file=sys.stderr) - -# Helper for _processoptions() -def _setoption(arg): - parts = arg.split(':') - if len(parts) > 5: - raise _OptionError("too many fields (max 5): %r" % (arg,)) - while len(parts) < 5: - parts.append('') - action, message, category, module, lineno = [s.strip() - for s in parts] - action = _getaction(action) - category = _getcategory(category) - if message or module: - import re - if message: - message = re.escape(message) - if module: - module = re.escape(module) + r'\Z' - if lineno: - try: - lineno = int(lineno) - if lineno < 0: - raise ValueError - except (ValueError, OverflowError): - raise _OptionError("invalid lineno %r" % (lineno,)) from None - else: - lineno = 0 - filterwarnings(action, message, category, module, lineno) - -# Helper for _setoption() -def _getaction(action): - if not action: - return "default" - if action == "all": return "always" # Alias - for a in ('default', 'always', 'ignore', 'module', 'once', 'error'): - if a.startswith(action): - return a - raise _OptionError("invalid action: %r" % (action,)) - -# Helper for _setoption() -def _getcategory(category): - if not category: - return Warning - if '.' not in category: - import builtins as m - klass = category - else: - module, _, klass = category.rpartition('.') - try: - m = __import__(module, None, None, [klass]) - except ImportError: - raise _OptionError("invalid module name: %r" % (module,)) from None - try: - cat = getattr(m, klass) - except AttributeError: - raise _OptionError("unknown warning category: %r" % (category,)) from None - if not issubclass(cat, Warning): - raise _OptionError("invalid warning category: %r" % (category,)) - return cat - - -def _is_internal_filename(filename): - return 'importlib' in filename and '_bootstrap' in filename - - -def _is_filename_to_skip(filename, skip_file_prefixes): - return any(filename.startswith(prefix) for prefix in skip_file_prefixes) - - -def _is_internal_frame(frame): - """Signal whether the frame is an internal CPython implementation detail.""" - return _is_internal_filename(frame.f_code.co_filename) - - -def _next_external_frame(frame, skip_file_prefixes): - """Find the next frame that doesn't involve Python or user internals.""" - frame = frame.f_back - while frame is not None and ( - _is_internal_filename(filename := frame.f_code.co_filename) or - _is_filename_to_skip(filename, skip_file_prefixes)): - frame = frame.f_back - return frame - - -# Code typically replaced by _warnings -def warn(message, category=None, stacklevel=1, source=None, - *, skip_file_prefixes=()): - """Issue a warning, or maybe ignore it or raise an exception.""" - # Check if message is already a Warning object - if isinstance(message, Warning): - category = message.__class__ - # Check category argument - if category is None: - category = UserWarning - if not (isinstance(category, type) and issubclass(category, Warning)): - raise TypeError("category must be a Warning subclass, " - "not '{:s}'".format(type(category).__name__)) - if not isinstance(skip_file_prefixes, tuple): - # The C version demands a tuple for implementation performance. - raise TypeError('skip_file_prefixes must be a tuple of strs.') - if skip_file_prefixes: - stacklevel = max(2, stacklevel) - # Get context information - try: - if stacklevel <= 1 or _is_internal_frame(sys._getframe(1)): - # If frame is too small to care or if the warning originated in - # internal code, then do not try to hide any frames. - frame = sys._getframe(stacklevel) - else: - frame = sys._getframe(1) - # Look for one frame less since the above line starts us off. - for x in range(stacklevel-1): - frame = _next_external_frame(frame, skip_file_prefixes) - if frame is None: - raise ValueError - except ValueError: - globals = sys.__dict__ - filename = "" - lineno = 0 - else: - globals = frame.f_globals - filename = frame.f_code.co_filename - lineno = frame.f_lineno - if '__name__' in globals: - module = globals['__name__'] - else: - module = "" - registry = globals.setdefault("__warningregistry__", {}) - warn_explicit(message, category, filename, lineno, module, registry, - globals, source) - -def warn_explicit(message, category, filename, lineno, - module=None, registry=None, module_globals=None, - source=None): - lineno = int(lineno) - if module is None: - module = filename or "" - if module[-3:].lower() == ".py": - module = module[:-3] # XXX What about leading pathname? - if registry is None: - registry = {} - if registry.get('version', 0) != _filters_version: - registry.clear() - registry['version'] = _filters_version - if isinstance(message, Warning): - text = str(message) - category = message.__class__ - else: - text = message - message = category(message) - key = (text, category, lineno) - # Quick test for common case - if registry.get(key): - return - # Search the filters - for item in filters: - action, msg, cat, mod, ln = item - if ((msg is None or msg.match(text)) and - issubclass(category, cat) and - (mod is None or mod.match(module)) and - (ln == 0 or lineno == ln)): - break - else: - action = defaultaction - # Early exit actions - if action == "ignore": - return - - # Prime the linecache for formatting, in case the - # "file" is actually in a zipfile or something. - import linecache - linecache.getlines(filename, module_globals) - - if action == "error": - raise message - # Other actions - if action == "once": - registry[key] = 1 - oncekey = (text, category) - if onceregistry.get(oncekey): - return - onceregistry[oncekey] = 1 - elif action == "always": - pass - elif action == "module": - registry[key] = 1 - altkey = (text, category, 0) - if registry.get(altkey): - return - registry[altkey] = 1 - elif action == "default": - registry[key] = 1 - else: - # Unrecognized actions are errors - raise RuntimeError( - "Unrecognized action (%r) in warnings.filters:\n %s" % - (action, item)) - # Print message and context - msg = WarningMessage(message, category, filename, lineno, source=source) - _showwarnmsg(msg) - - -class WarningMessage(object): - - _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", - "line", "source") - - def __init__(self, message, category, filename, lineno, file=None, - line=None, source=None): - self.message = message - self.category = category - self.filename = filename - self.lineno = lineno - self.file = file - self.line = line - self.source = source - self._category_name = category.__name__ if category else None - - def __str__(self): - return ("{message : %r, category : %r, filename : %r, lineno : %s, " - "line : %r}" % (self.message, self._category_name, - self.filename, self.lineno, self.line)) - - -class catch_warnings(object): - - """A context manager that copies and restores the warnings filter upon - exiting the context. - - The 'record' argument specifies whether warnings should be captured by a - custom implementation of warnings.showwarning() and be appended to a list - returned by the context manager. Otherwise None is returned by the context - manager. The objects appended to the list are arguments whose attributes - mirror the arguments to showwarning(). - - The 'module' argument is to specify an alternative module to the module - named 'warnings' and imported under that name. This argument is only useful - when testing the warnings module itself. - - If the 'action' argument is not None, the remaining arguments are passed - to warnings.simplefilter() as if it were called immediately on entering the - context. - """ - - def __init__(self, *, record=False, module=None, - action=None, category=Warning, lineno=0, append=False): - """Specify whether to record warnings and if an alternative module - should be used other than sys.modules['warnings']. - - For compatibility with Python 3.0, please consider all arguments to be - keyword-only. - - """ - self._record = record - self._module = sys.modules['warnings'] if module is None else module - self._entered = False - if action is None: - self._filter = None - else: - self._filter = (action, category, lineno, append) - - def __repr__(self): - args = [] - if self._record: - args.append("record=True") - if self._module is not sys.modules['warnings']: - args.append("module=%r" % self._module) - name = type(self).__name__ - return "%s(%s)" % (name, ", ".join(args)) - - def __enter__(self): - if self._entered: - raise RuntimeError("Cannot enter %r twice" % self) - self._entered = True - self._filters = self._module.filters - self._module.filters = self._filters[:] - self._module._filters_mutated() - self._showwarning = self._module.showwarning - self._showwarnmsg_impl = self._module._showwarnmsg_impl - if self._filter is not None: - simplefilter(*self._filter) - if self._record: - log = [] - self._module._showwarnmsg_impl = log.append - # Reset showwarning() to the default implementation to make sure - # that _showwarnmsg() calls _showwarnmsg_impl() - self._module.showwarning = self._module._showwarning_orig - return log - else: - return None - - def __exit__(self, *exc_info): - if not self._entered: - raise RuntimeError("Cannot exit %r without entering first" % self) - self._module.filters = self._filters - self._module._filters_mutated() - self._module.showwarning = self._showwarning - self._module._showwarnmsg_impl = self._showwarnmsg_impl - - -class deprecated: - """Indicate that a class, function or overload is deprecated. - - When this decorator is applied to an object, the type checker - will generate a diagnostic on usage of the deprecated object. - - Usage: - - @deprecated("Use B instead") - class A: - pass - - @deprecated("Use g instead") - def f(): - pass - - @overload - @deprecated("int support is deprecated") - def g(x: int) -> int: ... - @overload - def g(x: str) -> int: ... - - The warning specified by *category* will be emitted at runtime - on use of deprecated objects. For functions, that happens on calls; - for classes, on instantiation and on creation of subclasses. - If the *category* is ``None``, no warning is emitted at runtime. - The *stacklevel* determines where the - warning is emitted. If it is ``1`` (the default), the warning - is emitted at the direct caller of the deprecated object; if it - is higher, it is emitted further up the stack. - Static type checker behavior is not affected by the *category* - and *stacklevel* arguments. - - The deprecation message passed to the decorator is saved in the - ``__deprecated__`` attribute on the decorated object. - If applied to an overload, the decorator - must be after the ``@overload`` decorator for the attribute to - exist on the overload as returned by ``get_overloads()``. - - See PEP 702 for details. - - """ - def __init__( - self, - message: str, - /, - *, - category: type[Warning] | None = DeprecationWarning, - stacklevel: int = 1, - ) -> None: - if not isinstance(message, str): - raise TypeError( - f"Expected an object of type str for 'message', not {type(message).__name__!r}" - ) - self.message = message - self.category = category - self.stacklevel = stacklevel - - def __call__(self, arg, /): - # Make sure the inner functions created below don't - # retain a reference to self. - msg = self.message - category = self.category - stacklevel = self.stacklevel - if category is None: - arg.__deprecated__ = msg - return arg - elif isinstance(arg, type): - import functools - from types import MethodType - - original_new = arg.__new__ - - @functools.wraps(original_new) - def __new__(cls, /, *args, **kwargs): - if cls is arg: - warn(msg, category=category, stacklevel=stacklevel + 1) - if original_new is not object.__new__: - return original_new(cls, *args, **kwargs) - # Mirrors a similar check in object.__new__. - elif cls.__init__ is object.__init__ and (args or kwargs): - raise TypeError(f"{cls.__name__}() takes no arguments") - else: - return original_new(cls) - - arg.__new__ = staticmethod(__new__) - - if "__init_subclass__" in arg.__dict__: - # __init_subclass__ is directly present on the decorated class. - # Synthesize a wrapper that calls this method directly. - original_init_subclass = arg.__init_subclass__ - # We need slightly different behavior if __init_subclass__ - # is a bound method (likely if it was implemented in Python). - # Otherwise, it likely means it's a builtin such as - # object's implementation of __init_subclass__. - if isinstance(original_init_subclass, MethodType): - original_init_subclass = original_init_subclass.__func__ - - @functools.wraps(original_init_subclass) - def __init_subclass__(*args, **kwargs): - warn(msg, category=category, stacklevel=stacklevel + 1) - return original_init_subclass(*args, **kwargs) - else: - def __init_subclass__(cls, *args, **kwargs): - warn(msg, category=category, stacklevel=stacklevel + 1) - return super(arg, cls).__init_subclass__(*args, **kwargs) - - arg.__init_subclass__ = classmethod(__init_subclass__) - - arg.__deprecated__ = __new__.__deprecated__ = msg - __init_subclass__.__deprecated__ = msg - return arg - elif callable(arg): - import functools - import inspect - - @functools.wraps(arg) - def wrapper(*args, **kwargs): - warn(msg, category=category, stacklevel=stacklevel + 1) - return arg(*args, **kwargs) - - if inspect.iscoroutinefunction(arg): - wrapper = inspect.markcoroutinefunction(wrapper) - - arg.__deprecated__ = wrapper.__deprecated__ = msg - return wrapper - else: - raise TypeError( - "@deprecated decorator with non-None category must be applied to " - f"a class or callable, not {arg!r}" - ) - - -_DEPRECATED_MSG = "{name!r} is deprecated and slated for removal in Python {remove}" - -def _deprecated(name, message=_DEPRECATED_MSG, *, remove, _version=sys.version_info): - """Warn that *name* is deprecated or should be removed. - - RuntimeError is raised if *remove* specifies a major/minor tuple older than - the current Python version or the same version but past the alpha. - - The *message* argument is formatted with *name* and *remove* as a Python - version tuple (e.g. (3, 11)). - - """ - remove_formatted = f"{remove[0]}.{remove[1]}" - if (_version[:2] > remove) or (_version[:2] == remove and _version[3] != "alpha"): - msg = f"{name!r} was slated for removal after Python {remove_formatted} alpha" - raise RuntimeError(msg) - else: - msg = message.format(name=name, remove=remove_formatted) - warn(msg, DeprecationWarning, stacklevel=3) - - -# Private utility function called by _PyErr_WarnUnawaitedCoroutine -def _warn_unawaited_coroutine(coro): - msg_lines = [ - f"coroutine '{coro.__qualname__}' was never awaited\n" - ] - if coro.cr_origin is not None: - import linecache, traceback - def extract(): - for filename, lineno, funcname in reversed(coro.cr_origin): - line = linecache.getline(filename, lineno) - yield (filename, lineno, funcname, line) - msg_lines.append("Coroutine created at (most recent call last)\n") - msg_lines += traceback.format_list(list(extract())) - msg = "".join(msg_lines).rstrip("\n") - # Passing source= here means that if the user happens to have tracemalloc - # enabled and tracking where the coroutine was created, the warning will - # contain that traceback. This does mean that if they have *both* - # coroutine origin tracking *and* tracemalloc enabled, they'll get two - # partially-redundant tracebacks. If we wanted to be clever we could - # probably detect this case and avoid it, but for now we don't bother. - warn(msg, category=RuntimeWarning, stacklevel=2, source=coro) - - -# filters contains a sequence of filter 5-tuples -# The components of the 5-tuple are: -# - an action: error, ignore, always, default, module, or once -# - a compiled regex that must match the warning message -# - a class representing the warning category -# - a compiled regex that must match the module that is being warned -# - a line number for the line being warning, or 0 to mean any line -# If either if the compiled regexs are None, match anything. -try: - from _warnings import (filters, _defaultaction, _onceregistry, - warn, warn_explicit, _filters_mutated) - defaultaction = _defaultaction - onceregistry = _onceregistry - _warnings_defaults = True -except ImportError: - filters = [] - defaultaction = "default" - onceregistry = {} - - _filters_version = 1 - - def _filters_mutated(): - global _filters_version - _filters_version += 1 - - _warnings_defaults = False - - -# Module initialization -_processoptions(sys.warnoptions) -if not _warnings_defaults: - # Several warning categories are ignored by default in regular builds - if not hasattr(sys, 'gettotalrefcount'): - filterwarnings("default", category=DeprecationWarning, - module="__main__", append=1) - simplefilter("ignore", category=DeprecationWarning, append=1) - simplefilter("ignore", category=PendingDeprecationWarning, append=1) - simplefilter("ignore", category=ImportWarning, append=1) - simplefilter("ignore", category=ResourceWarning, append=1) - -del _warnings_defaults diff --git a/Python313_13_x64_Template/Lib/weakref.py b/Python313_13_x64_Template/Lib/weakref.py deleted file mode 100644 index 25b70927..00000000 --- a/Python313_13_x64_Template/Lib/weakref.py +++ /dev/null @@ -1,674 +0,0 @@ -"""Weak reference support for Python. - -This module is an implementation of PEP 205: - -https://peps.python.org/pep-0205/ -""" - -# Naming convention: Variables named "wr" are weak reference objects; -# they are called this instead of "ref" to avoid name collisions with -# the module-global ref() function imported from _weakref. - -from _weakref import ( - getweakrefcount, - getweakrefs, - ref, - proxy, - CallableProxyType, - ProxyType, - ReferenceType, - _remove_dead_weakref) - -from _weakrefset import WeakSet, _IterationGuard - -import _collections_abc # Import after _weakref to avoid circular import. -import sys -import itertools - -ProxyTypes = (ProxyType, CallableProxyType) - -__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs", - "WeakKeyDictionary", "ReferenceType", "ProxyType", - "CallableProxyType", "ProxyTypes", "WeakValueDictionary", - "WeakSet", "WeakMethod", "finalize"] - - -_collections_abc.MutableSet.register(WeakSet) - -class WeakMethod(ref): - """ - A custom `weakref.ref` subclass which simulates a weak reference to - a bound method, working around the lifetime problem of bound methods. - """ - - __slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__" - - def __new__(cls, meth, callback=None): - try: - obj = meth.__self__ - func = meth.__func__ - except AttributeError: - raise TypeError("argument should be a bound method, not {}" - .format(type(meth))) from None - def _cb(arg): - # The self-weakref trick is needed to avoid creating a reference - # cycle. - self = self_wr() - if self._alive: - self._alive = False - if callback is not None: - callback(self) - self = ref.__new__(cls, obj, _cb) - self._func_ref = ref(func, _cb) - self._meth_type = type(meth) - self._alive = True - self_wr = ref(self) - return self - - def __call__(self): - obj = super().__call__() - func = self._func_ref() - if obj is None or func is None: - return None - return self._meth_type(func, obj) - - def __eq__(self, other): - if isinstance(other, WeakMethod): - if not self._alive or not other._alive: - return self is other - return ref.__eq__(self, other) and self._func_ref == other._func_ref - return NotImplemented - - def __ne__(self, other): - if isinstance(other, WeakMethod): - if not self._alive or not other._alive: - return self is not other - return ref.__ne__(self, other) or self._func_ref != other._func_ref - return NotImplemented - - __hash__ = ref.__hash__ - - -class WeakValueDictionary(_collections_abc.MutableMapping): - """Mapping class that references values weakly. - - Entries in the dictionary will be discarded when no strong - reference to the value exists anymore - """ - # We inherit the constructor without worrying about the input - # dictionary; since it uses our .update() method, we get the right - # checks (if the other dictionary is a WeakValueDictionary, - # objects are unwrapped on the way out, and we always wrap on the - # way in). - - def __init__(self, other=(), /, **kw): - def remove(wr, selfref=ref(self), _atomic_removal=_remove_dead_weakref): - self = selfref() - if self is not None: - if self._iterating: - self._pending_removals.append(wr.key) - else: - # Atomic removal is necessary since this function - # can be called asynchronously by the GC - _atomic_removal(self.data, wr.key) - self._remove = remove - # A list of keys to be removed - self._pending_removals = [] - self._iterating = set() - self.data = {} - self.update(other, **kw) - - def _commit_removals(self, _atomic_removal=_remove_dead_weakref): - pop = self._pending_removals.pop - d = self.data - # We shouldn't encounter any KeyError, because this method should - # always be called *before* mutating the dict. - while True: - try: - key = pop() - except IndexError: - return - _atomic_removal(d, key) - - def __getitem__(self, key): - if self._pending_removals: - self._commit_removals() - o = self.data[key]() - if o is None: - raise KeyError(key) - else: - return o - - def __delitem__(self, key): - if self._pending_removals: - self._commit_removals() - del self.data[key] - - def __len__(self): - if self._pending_removals: - self._commit_removals() - return len(self.data) - - def __contains__(self, key): - if self._pending_removals: - self._commit_removals() - try: - o = self.data[key]() - except KeyError: - return False - return o is not None - - def __repr__(self): - return "<%s at %#x>" % (self.__class__.__name__, id(self)) - - def __setitem__(self, key, value): - if self._pending_removals: - self._commit_removals() - self.data[key] = KeyedRef(value, self._remove, key) - - def copy(self): - if self._pending_removals: - self._commit_removals() - new = WeakValueDictionary() - with _IterationGuard(self): - for key, wr in self.data.items(): - o = wr() - if o is not None: - new[key] = o - return new - - __copy__ = copy - - def __deepcopy__(self, memo): - from copy import deepcopy - if self._pending_removals: - self._commit_removals() - new = self.__class__() - with _IterationGuard(self): - for key, wr in self.data.items(): - o = wr() - if o is not None: - new[deepcopy(key, memo)] = o - return new - - def get(self, key, default=None): - if self._pending_removals: - self._commit_removals() - try: - wr = self.data[key] - except KeyError: - return default - else: - o = wr() - if o is None: - # This should only happen - return default - else: - return o - - def items(self): - if self._pending_removals: - self._commit_removals() - with _IterationGuard(self): - for k, wr in self.data.items(): - v = wr() - if v is not None: - yield k, v - - def keys(self): - if self._pending_removals: - self._commit_removals() - with _IterationGuard(self): - for k, wr in self.data.items(): - if wr() is not None: - yield k - - __iter__ = keys - - def itervaluerefs(self): - """Return an iterator that yields the weak references to the values. - - The references are not guaranteed to be 'live' at the time - they are used, so the result of calling the references needs - to be checked before being used. This can be used to avoid - creating references that will cause the garbage collector to - keep the values around longer than needed. - - """ - if self._pending_removals: - self._commit_removals() - with _IterationGuard(self): - yield from self.data.values() - - def values(self): - if self._pending_removals: - self._commit_removals() - with _IterationGuard(self): - for wr in self.data.values(): - obj = wr() - if obj is not None: - yield obj - - def popitem(self): - if self._pending_removals: - self._commit_removals() - while True: - key, wr = self.data.popitem() - o = wr() - if o is not None: - return key, o - - def pop(self, key, *args): - if self._pending_removals: - self._commit_removals() - try: - o = self.data.pop(key)() - except KeyError: - o = None - if o is None: - if args: - return args[0] - else: - raise KeyError(key) - else: - return o - - def setdefault(self, key, default=None): - try: - o = self.data[key]() - except KeyError: - o = None - if o is None: - if self._pending_removals: - self._commit_removals() - self.data[key] = KeyedRef(default, self._remove, key) - return default - else: - return o - - def update(self, other=None, /, **kwargs): - if self._pending_removals: - self._commit_removals() - d = self.data - if other is not None: - if not hasattr(other, "items"): - other = dict(other) - for key, o in other.items(): - d[key] = KeyedRef(o, self._remove, key) - for key, o in kwargs.items(): - d[key] = KeyedRef(o, self._remove, key) - - def valuerefs(self): - """Return a list of weak references to the values. - - The references are not guaranteed to be 'live' at the time - they are used, so the result of calling the references needs - to be checked before being used. This can be used to avoid - creating references that will cause the garbage collector to - keep the values around longer than needed. - - """ - if self._pending_removals: - self._commit_removals() - return list(self.data.values()) - - def __ior__(self, other): - self.update(other) - return self - - def __or__(self, other): - if isinstance(other, _collections_abc.Mapping): - c = self.copy() - c.update(other) - return c - return NotImplemented - - def __ror__(self, other): - if isinstance(other, _collections_abc.Mapping): - c = self.__class__() - c.update(other) - c.update(self) - return c - return NotImplemented - - -class KeyedRef(ref): - """Specialized reference that includes a key corresponding to the value. - - This is used in the WeakValueDictionary to avoid having to create - a function object for each key stored in the mapping. A shared - callback object can use the 'key' attribute of a KeyedRef instead - of getting a reference to the key from an enclosing scope. - - """ - - __slots__ = "key", - - def __new__(type, ob, callback, key): - self = ref.__new__(type, ob, callback) - self.key = key - return self - - def __init__(self, ob, callback, key): - super().__init__(ob, callback) - - -class WeakKeyDictionary(_collections_abc.MutableMapping): - """ Mapping class that references keys weakly. - - Entries in the dictionary will be discarded when there is no - longer a strong reference to the key. This can be used to - associate additional data with an object owned by other parts of - an application without adding attributes to those objects. This - can be especially useful with objects that override attribute - accesses. - """ - - def __init__(self, dict=None): - self.data = {} - def remove(k, selfref=ref(self)): - self = selfref() - if self is not None: - if self._iterating: - self._pending_removals.append(k) - else: - try: - del self.data[k] - except KeyError: - pass - self._remove = remove - # A list of dead weakrefs (keys to be removed) - self._pending_removals = [] - self._iterating = set() - self._dirty_len = False - if dict is not None: - self.update(dict) - - def _commit_removals(self): - # NOTE: We don't need to call this method before mutating the dict, - # because a dead weakref never compares equal to a live weakref, - # even if they happened to refer to equal objects. - # However, it means keys may already have been removed. - pop = self._pending_removals.pop - d = self.data - while True: - try: - key = pop() - except IndexError: - return - - try: - del d[key] - except KeyError: - pass - - def _scrub_removals(self): - d = self.data - self._pending_removals = [k for k in self._pending_removals if k in d] - self._dirty_len = False - - def __delitem__(self, key): - self._dirty_len = True - del self.data[ref(key)] - - def __getitem__(self, key): - return self.data[ref(key)] - - def __len__(self): - if self._dirty_len and self._pending_removals: - # self._pending_removals may still contain keys which were - # explicitly removed, we have to scrub them (see issue #21173). - self._scrub_removals() - return len(self.data) - len(self._pending_removals) - - def __repr__(self): - return "<%s at %#x>" % (self.__class__.__name__, id(self)) - - def __setitem__(self, key, value): - self.data[ref(key, self._remove)] = value - - def copy(self): - new = WeakKeyDictionary() - with _IterationGuard(self): - for key, value in self.data.items(): - o = key() - if o is not None: - new[o] = value - return new - - __copy__ = copy - - def __deepcopy__(self, memo): - from copy import deepcopy - new = self.__class__() - with _IterationGuard(self): - for key, value in self.data.items(): - o = key() - if o is not None: - new[o] = deepcopy(value, memo) - return new - - def get(self, key, default=None): - return self.data.get(ref(key),default) - - def __contains__(self, key): - try: - wr = ref(key) - except TypeError: - return False - return wr in self.data - - def items(self): - with _IterationGuard(self): - for wr, value in self.data.items(): - key = wr() - if key is not None: - yield key, value - - def keys(self): - with _IterationGuard(self): - for wr in self.data: - obj = wr() - if obj is not None: - yield obj - - __iter__ = keys - - def values(self): - with _IterationGuard(self): - for wr, value in self.data.items(): - if wr() is not None: - yield value - - def keyrefs(self): - """Return a list of weak references to the keys. - - The references are not guaranteed to be 'live' at the time - they are used, so the result of calling the references needs - to be checked before being used. This can be used to avoid - creating references that will cause the garbage collector to - keep the keys around longer than needed. - - """ - return list(self.data) - - def popitem(self): - self._dirty_len = True - while True: - key, value = self.data.popitem() - o = key() - if o is not None: - return o, value - - def pop(self, key, *args): - self._dirty_len = True - return self.data.pop(ref(key), *args) - - def setdefault(self, key, default=None): - return self.data.setdefault(ref(key, self._remove),default) - - def update(self, dict=None, /, **kwargs): - d = self.data - if dict is not None: - if not hasattr(dict, "items"): - dict = type({})(dict) - for key, value in dict.items(): - d[ref(key, self._remove)] = value - if len(kwargs): - self.update(kwargs) - - def __ior__(self, other): - self.update(other) - return self - - def __or__(self, other): - if isinstance(other, _collections_abc.Mapping): - c = self.copy() - c.update(other) - return c - return NotImplemented - - def __ror__(self, other): - if isinstance(other, _collections_abc.Mapping): - c = self.__class__() - c.update(other) - c.update(self) - return c - return NotImplemented - - -class finalize: - """Class for finalization of weakrefable objects - - finalize(obj, func, *args, **kwargs) returns a callable finalizer - object which will be called when obj is garbage collected. The - first time the finalizer is called it evaluates func(*arg, **kwargs) - and returns the result. After this the finalizer is dead, and - calling it just returns None. - - When the program exits any remaining finalizers for which the - atexit attribute is true will be run in reverse order of creation. - By default atexit is true. - """ - - # Finalizer objects don't have any state of their own. They are - # just used as keys to lookup _Info objects in the registry. This - # ensures that they cannot be part of a ref-cycle. - - __slots__ = () - _registry = {} - _shutdown = False - _index_iter = itertools.count() - _dirty = False - _registered_with_atexit = False - - class _Info: - __slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index") - - def __init__(self, obj, func, /, *args, **kwargs): - if not self._registered_with_atexit: - # We may register the exit function more than once because - # of a thread race, but that is harmless - import atexit - atexit.register(self._exitfunc) - finalize._registered_with_atexit = True - info = self._Info() - info.weakref = ref(obj, self) - info.func = func - info.args = args - info.kwargs = kwargs or None - info.atexit = True - info.index = next(self._index_iter) - self._registry[self] = info - finalize._dirty = True - - def __call__(self, _=None): - """If alive then mark as dead and return func(*args, **kwargs); - otherwise return None""" - info = self._registry.pop(self, None) - if info and not self._shutdown: - return info.func(*info.args, **(info.kwargs or {})) - - def detach(self): - """If alive then mark as dead and return (obj, func, args, kwargs); - otherwise return None""" - info = self._registry.get(self) - obj = info and info.weakref() - if obj is not None and self._registry.pop(self, None): - return (obj, info.func, info.args, info.kwargs or {}) - - def peek(self): - """If alive then return (obj, func, args, kwargs); - otherwise return None""" - info = self._registry.get(self) - obj = info and info.weakref() - if obj is not None: - return (obj, info.func, info.args, info.kwargs or {}) - - @property - def alive(self): - """Whether finalizer is alive""" - return self in self._registry - - @property - def atexit(self): - """Whether finalizer should be called at exit""" - info = self._registry.get(self) - return bool(info) and info.atexit - - @atexit.setter - def atexit(self, value): - info = self._registry.get(self) - if info: - info.atexit = bool(value) - - def __repr__(self): - info = self._registry.get(self) - obj = info and info.weakref() - if obj is None: - return '<%s object at %#x; dead>' % (type(self).__name__, id(self)) - else: - return '<%s object at %#x; for %r at %#x>' % \ - (type(self).__name__, id(self), type(obj).__name__, id(obj)) - - @classmethod - def _select_for_exit(cls): - # Return live finalizers marked for exit, oldest first - L = [(f,i) for (f,i) in cls._registry.items() if i.atexit] - L.sort(key=lambda item:item[1].index) - return [f for (f,i) in L] - - @classmethod - def _exitfunc(cls): - # At shutdown invoke finalizers for which atexit is true. - # This is called once all other non-daemonic threads have been - # joined. - reenable_gc = False - try: - if cls._registry: - import gc - if gc.isenabled(): - reenable_gc = True - gc.disable() - pending = None - while True: - if pending is None or finalize._dirty: - pending = cls._select_for_exit() - finalize._dirty = False - if not pending: - break - f = pending.pop() - try: - # gc is disabled, so (assuming no daemonic - # threads) the following is the only line in - # this function which might trigger creation - # of a new finalizer - f() - except Exception: - sys.excepthook(*sys.exc_info()) - assert f not in cls._registry - finally: - # prevent any more finalizers from executing during shutdown - finalize._shutdown = True - if reenable_gc: - gc.enable() diff --git a/Python313_13_x64_Template/Lib/webbrowser.py b/Python313_13_x64_Template/Lib/webbrowser.py deleted file mode 100644 index ee582410..00000000 --- a/Python313_13_x64_Template/Lib/webbrowser.py +++ /dev/null @@ -1,723 +0,0 @@ -#! /usr/bin/env python3 -"""Interfaces for launching and remotely controlling web browsers.""" -# Maintained by Georg Brandl. - -import os -import shlex -import shutil -import sys -import subprocess -import threading - -__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"] - - -class Error(Exception): - pass - - -_lock = threading.RLock() -_browsers = {} # Dictionary of available browser controllers -_tryorder = None # Preference order of available browsers -_os_preferred_browser = None # The preferred browser - - -def register(name, klass, instance=None, *, preferred=False): - """Register a browser connector.""" - with _lock: - if _tryorder is None: - register_standard_browsers() - _browsers[name.lower()] = [klass, instance] - - # Preferred browsers go to the front of the list. - # Need to match to the default browser returned by xdg-settings, which - # may be of the form e.g. "firefox.desktop". - if preferred or (_os_preferred_browser and f'{name}.desktop' == _os_preferred_browser): - _tryorder.insert(0, name) - else: - _tryorder.append(name) - - -def get(using=None): - """Return a browser launcher instance appropriate for the environment.""" - if _tryorder is None: - with _lock: - if _tryorder is None: - register_standard_browsers() - if using is not None: - alternatives = [using] - else: - alternatives = _tryorder - for browser in alternatives: - if '%s' in browser: - # User gave us a command line, split it into name and args - browser = shlex.split(browser) - if browser[-1] == '&': - return BackgroundBrowser(browser[:-1]) - else: - return GenericBrowser(browser) - else: - # User gave us a browser name or path. - try: - command = _browsers[browser.lower()] - except KeyError: - command = _synthesize(browser) - if command[1] is not None: - return command[1] - elif command[0] is not None: - return command[0]() - raise Error("could not locate runnable browser") - - -# Please note: the following definition hides a builtin function. -# It is recommended one does "import webbrowser" and uses webbrowser.open(url) -# instead of "from webbrowser import *". - -def open(url, new=0, autoraise=True): - """Display url using the default browser. - - If possible, open url in a location determined by new. - - 0: the same browser window (the default). - - 1: a new browser window. - - 2: a new browser page ("tab"). - If possible, autoraise raises the window (the default) or not. - - If opening the browser succeeds, return True. - If there is a problem, return False. - """ - if _tryorder is None: - with _lock: - if _tryorder is None: - register_standard_browsers() - for name in _tryorder: - browser = get(name) - if browser.open(url, new, autoraise): - return True - return False - - -def open_new(url): - """Open url in a new window of the default browser. - - If not possible, then open url in the only browser window. - """ - return open(url, 1) - - -def open_new_tab(url): - """Open url in a new page ("tab") of the default browser. - - If not possible, then the behavior becomes equivalent to open_new(). - """ - return open(url, 2) - - -def _synthesize(browser, *, preferred=False): - """Attempt to synthesize a controller based on existing controllers. - - This is useful to create a controller when a user specifies a path to - an entry in the BROWSER environment variable -- we can copy a general - controller to operate using a specific installation of the desired - browser in this way. - - If we can't create a controller in this way, or if there is no - executable for the requested browser, return [None, None]. - - """ - cmd = browser.split()[0] - if not shutil.which(cmd): - return [None, None] - name = os.path.basename(cmd) - try: - command = _browsers[name.lower()] - except KeyError: - return [None, None] - # now attempt to clone to fit the new name: - controller = command[1] - if controller and name.lower() == controller.basename: - import copy - controller = copy.copy(controller) - controller.name = browser - controller.basename = os.path.basename(browser) - register(browser, None, instance=controller, preferred=preferred) - return [None, controller] - return [None, None] - - -# General parent classes - -class BaseBrowser: - """Parent class for all browsers. Do not use directly.""" - - args = ['%s'] - - def __init__(self, name=""): - self.name = name - self.basename = name - - def open(self, url, new=0, autoraise=True): - raise NotImplementedError - - def open_new(self, url): - return self.open(url, 1) - - def open_new_tab(self, url): - return self.open(url, 2) - - @staticmethod - def _check_url(url): - """Ensures that the URL is safe to pass to subprocesses as a parameter""" - if url and url.lstrip().startswith("-"): - raise ValueError(f"Invalid URL (leading dash disallowed): {url!r}") - - -class GenericBrowser(BaseBrowser): - """Class for all browsers started with a command - and without remote functionality.""" - - def __init__(self, name): - if isinstance(name, str): - self.name = name - self.args = ["%s"] - else: - # name should be a list with arguments - self.name = name[0] - self.args = name[1:] - self.basename = os.path.basename(self.name) - - def open(self, url, new=0, autoraise=True): - sys.audit("webbrowser.open", url) - self._check_url(url) - cmdline = [self.name] + [arg.replace("%s", url) - for arg in self.args] - try: - if sys.platform[:3] == 'win': - p = subprocess.Popen(cmdline) - else: - p = subprocess.Popen(cmdline, close_fds=True) - return not p.wait() - except OSError: - return False - - -class BackgroundBrowser(GenericBrowser): - """Class for all browsers which are to be started in the - background.""" - - def open(self, url, new=0, autoraise=True): - cmdline = [self.name] + [arg.replace("%s", url) - for arg in self.args] - sys.audit("webbrowser.open", url) - self._check_url(url) - try: - if sys.platform[:3] == 'win': - p = subprocess.Popen(cmdline) - else: - p = subprocess.Popen(cmdline, close_fds=True, - start_new_session=True) - return p.poll() is None - except OSError: - return False - - -class UnixBrowser(BaseBrowser): - """Parent class for all Unix browsers with remote functionality.""" - - raise_opts = None - background = False - redirect_stdout = True - # In remote_args, %s will be replaced with the requested URL. %action will - # be replaced depending on the value of 'new' passed to open. - # remote_action is used for new=0 (open). If newwin is not None, it is - # used for new=1 (open_new). If newtab is not None, it is used for - # new=3 (open_new_tab). After both substitutions are made, any empty - # strings in the transformed remote_args list will be removed. - remote_args = ['%action', '%s'] - remote_action = None - remote_action_newwin = None - remote_action_newtab = None - - def _invoke(self, args, remote, autoraise, url=None): - raise_opt = [] - if remote and self.raise_opts: - # use autoraise argument only for remote invocation - autoraise = int(autoraise) - opt = self.raise_opts[autoraise] - if opt: - raise_opt = [opt] - - cmdline = [self.name] + raise_opt + args - - if remote or self.background: - inout = subprocess.DEVNULL - else: - # for TTY browsers, we need stdin/out - inout = None - p = subprocess.Popen(cmdline, close_fds=True, stdin=inout, - stdout=(self.redirect_stdout and inout or None), - stderr=inout, start_new_session=True) - if remote: - # wait at most five seconds. If the subprocess is not finished, the - # remote invocation has (hopefully) started a new instance. - try: - rc = p.wait(5) - # if remote call failed, open() will try direct invocation - return not rc - except subprocess.TimeoutExpired: - return True - elif self.background: - if p.poll() is None: - return True - else: - return False - else: - return not p.wait() - - def open(self, url, new=0, autoraise=True): - sys.audit("webbrowser.open", url) - self._check_url(url) - if new == 0: - action = self.remote_action - elif new == 1: - action = self.remote_action_newwin - elif new == 2: - if self.remote_action_newtab is None: - action = self.remote_action_newwin - else: - action = self.remote_action_newtab - else: - raise Error("Bad 'new' parameter to open(); " - f"expected 0, 1, or 2, got {new}") - - args = [arg.replace("%s", url).replace("%action", action) - for arg in self.remote_args] - args = [arg for arg in args if arg] - success = self._invoke(args, True, autoraise, url) - if not success: - # remote invocation failed, try straight way - args = [arg.replace("%s", url) for arg in self.args] - return self._invoke(args, False, False) - else: - return True - - -class Mozilla(UnixBrowser): - """Launcher class for Mozilla browsers.""" - - remote_args = ['%action', '%s'] - remote_action = "" - remote_action_newwin = "-new-window" - remote_action_newtab = "-new-tab" - background = True - - -class Epiphany(UnixBrowser): - """Launcher class for Epiphany browser.""" - - raise_opts = ["-noraise", ""] - remote_args = ['%action', '%s'] - remote_action = "-n" - remote_action_newwin = "-w" - background = True - - -class Chrome(UnixBrowser): - """Launcher class for Google Chrome browser.""" - - remote_args = ['%action', '%s'] - remote_action = "" - remote_action_newwin = "--new-window" - remote_action_newtab = "" - background = True - - -Chromium = Chrome - - -class Opera(UnixBrowser): - """Launcher class for Opera browser.""" - - remote_args = ['%action', '%s'] - remote_action = "" - remote_action_newwin = "--new-window" - remote_action_newtab = "" - background = True - - -class Elinks(UnixBrowser): - """Launcher class for Elinks browsers.""" - - remote_args = ['-remote', 'openURL(%s%action)'] - remote_action = "" - remote_action_newwin = ",new-window" - remote_action_newtab = ",new-tab" - background = False - - # elinks doesn't like its stdout to be redirected - - # it uses redirected stdout as a signal to do -dump - redirect_stdout = False - - -class Konqueror(BaseBrowser): - """Controller for the KDE File Manager (kfm, or Konqueror). - - See the output of ``kfmclient --commands`` - for more information on the Konqueror remote-control interface. - """ - - def open(self, url, new=0, autoraise=True): - sys.audit("webbrowser.open", url) - self._check_url(url) - # XXX Currently I know no way to prevent KFM from opening a new win. - if new == 2: - action = "newTab" - else: - action = "openURL" - - devnull = subprocess.DEVNULL - - try: - p = subprocess.Popen(["kfmclient", action, url], - close_fds=True, stdin=devnull, - stdout=devnull, stderr=devnull) - except OSError: - # fall through to next variant - pass - else: - p.wait() - # kfmclient's return code unfortunately has no meaning as it seems - return True - - try: - p = subprocess.Popen(["konqueror", "--silent", url], - close_fds=True, stdin=devnull, - stdout=devnull, stderr=devnull, - start_new_session=True) - except OSError: - # fall through to next variant - pass - else: - if p.poll() is None: - # Should be running now. - return True - - try: - p = subprocess.Popen(["kfm", "-d", url], - close_fds=True, stdin=devnull, - stdout=devnull, stderr=devnull, - start_new_session=True) - except OSError: - return False - else: - return p.poll() is None - - -class Edge(UnixBrowser): - """Launcher class for Microsoft Edge browser.""" - - remote_args = ['%action', '%s'] - remote_action = "" - remote_action_newwin = "--new-window" - remote_action_newtab = "" - background = True - - -# -# Platform support for Unix -# - -# These are the right tests because all these Unix browsers require either -# a console terminal or an X display to run. - -def register_X_browsers(): - - # use xdg-open if around - if shutil.which("xdg-open"): - register("xdg-open", None, BackgroundBrowser("xdg-open")) - - # Opens an appropriate browser for the URL scheme according to - # freedesktop.org settings (GNOME, KDE, XFCE, etc.) - if shutil.which("gio"): - register("gio", None, BackgroundBrowser(["gio", "open", "--", "%s"])) - - xdg_desktop = os.getenv("XDG_CURRENT_DESKTOP", "").split(":") - - # The default GNOME3 browser - if (("GNOME" in xdg_desktop or - "GNOME_DESKTOP_SESSION_ID" in os.environ) and - shutil.which("gvfs-open")): - register("gvfs-open", None, BackgroundBrowser("gvfs-open")) - - # The default KDE browser - if (("KDE" in xdg_desktop or - "KDE_FULL_SESSION" in os.environ) and - shutil.which("kfmclient")): - register("kfmclient", Konqueror, Konqueror("kfmclient")) - - # Common symbolic link for the default X11 browser - if shutil.which("x-www-browser"): - register("x-www-browser", None, BackgroundBrowser("x-www-browser")) - - # The Mozilla browsers - for browser in ("firefox", "iceweasel", "seamonkey", "mozilla-firefox", - "mozilla"): - if shutil.which(browser): - register(browser, None, Mozilla(browser)) - - # Konqueror/kfm, the KDE browser. - if shutil.which("kfm"): - register("kfm", Konqueror, Konqueror("kfm")) - elif shutil.which("konqueror"): - register("konqueror", Konqueror, Konqueror("konqueror")) - - # Gnome's Epiphany - if shutil.which("epiphany"): - register("epiphany", None, Epiphany("epiphany")) - - # Google Chrome/Chromium browsers - for browser in ("google-chrome", "chrome", "chromium", "chromium-browser"): - if shutil.which(browser): - register(browser, None, Chrome(browser)) - - # Opera, quite popular - if shutil.which("opera"): - register("opera", None, Opera("opera")) - - if shutil.which("microsoft-edge"): - register("microsoft-edge", None, Edge("microsoft-edge")) - - -def register_standard_browsers(): - global _tryorder - _tryorder = [] - - if sys.platform == 'darwin': - register("MacOSX", None, MacOSXOSAScript('default')) - register("chrome", None, MacOSXOSAScript('chrome')) - register("firefox", None, MacOSXOSAScript('firefox')) - register("safari", None, MacOSXOSAScript('safari')) - # OS X can use below Unix support (but we prefer using the OS X - # specific stuff) - - if sys.platform == "ios": - register("iosbrowser", None, IOSBrowser(), preferred=True) - - if sys.platform == "serenityos": - # SerenityOS webbrowser, simply called "Browser". - register("Browser", None, BackgroundBrowser("Browser")) - - if sys.platform[:3] == "win": - # First try to use the default Windows browser - register("windows-default", WindowsDefault) - - # Detect some common Windows browsers, fallback to Microsoft Edge - # location in 64-bit Windows - edge64 = os.path.join(os.environ.get("PROGRAMFILES(x86)", "C:\\Program Files (x86)"), - "Microsoft\\Edge\\Application\\msedge.exe") - # location in 32-bit Windows - edge32 = os.path.join(os.environ.get("PROGRAMFILES", "C:\\Program Files"), - "Microsoft\\Edge\\Application\\msedge.exe") - for browser in ("firefox", "seamonkey", "mozilla", "chrome", - "opera", edge64, edge32): - if shutil.which(browser): - register(browser, None, BackgroundBrowser(browser)) - if shutil.which("MicrosoftEdge.exe"): - register("microsoft-edge", None, Edge("MicrosoftEdge.exe")) - else: - # Prefer X browsers if present - # - # NOTE: Do not check for X11 browser on macOS, - # XQuartz installation sets a DISPLAY environment variable and will - # autostart when someone tries to access the display. Mac users in - # general don't need an X11 browser. - if sys.platform != "darwin" and (os.environ.get("DISPLAY") or os.environ.get("WAYLAND_DISPLAY")): - try: - cmd = "xdg-settings get default-web-browser".split() - raw_result = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) - result = raw_result.decode().strip() - except (FileNotFoundError, subprocess.CalledProcessError, - PermissionError, NotADirectoryError): - pass - else: - global _os_preferred_browser - _os_preferred_browser = result - - register_X_browsers() - - # Also try console browsers - if os.environ.get("TERM"): - # Common symbolic link for the default text-based browser - if shutil.which("www-browser"): - register("www-browser", None, GenericBrowser("www-browser")) - # The Links/elinks browsers - if shutil.which("links"): - register("links", None, GenericBrowser("links")) - if shutil.which("elinks"): - register("elinks", None, Elinks("elinks")) - # The Lynx browser , - if shutil.which("lynx"): - register("lynx", None, GenericBrowser("lynx")) - # The w3m browser - if shutil.which("w3m"): - register("w3m", None, GenericBrowser("w3m")) - - # OK, now that we know what the default preference orders for each - # platform are, allow user to override them with the BROWSER variable. - if "BROWSER" in os.environ: - userchoices = os.environ["BROWSER"].split(os.pathsep) - userchoices.reverse() - - # Treat choices in same way as if passed into get() but do register - # and prepend to _tryorder - for cmdline in userchoices: - if cmdline != '': - cmd = _synthesize(cmdline, preferred=True) - if cmd[1] is None: - register(cmdline, None, GenericBrowser(cmdline), preferred=True) - - # what to do if _tryorder is now empty? - - -# -# Platform support for Windows -# - -if sys.platform[:3] == "win": - class WindowsDefault(BaseBrowser): - def open(self, url, new=0, autoraise=True): - sys.audit("webbrowser.open", url) - self._check_url(url) - try: - os.startfile(url) - except OSError: - # [Error 22] No application is associated with the specified - # file for this operation: '' - return False - else: - return True - -# -# Platform support for macOS -# - -if sys.platform == 'darwin': - class MacOSXOSAScript(BaseBrowser): - def __init__(self, name='default'): - super().__init__(name) - - def open(self, url, new=0, autoraise=True): - sys.audit("webbrowser.open", url) - self._check_url(url) - url = url.replace('"', '%22') - if self.name == 'default': - script = f'open location "{url}"' # opens in default browser - else: - script = f''' - tell application "{self.name}" - activate - open location "{url}" - end - ''' - - osapipe = os.popen("/usr/bin/osascript", "w") - if osapipe is None: - return False - - osapipe.write(script) - rc = osapipe.close() - return not rc - -# -# Platform support for iOS -# -if sys.platform == "ios": - from _ios_support import objc - if objc: - # If objc exists, we know ctypes is also importable. - from ctypes import c_void_p, c_char_p, c_ulong - - class IOSBrowser(BaseBrowser): - def open(self, url, new=0, autoraise=True): - sys.audit("webbrowser.open", url) - self._check_url(url) - # If ctypes isn't available, we can't open a browser - if objc is None: - return False - - # All the messages in this call return object references. - objc.objc_msgSend.restype = c_void_p - - # This is the equivalent of: - # NSString url_string = - # [NSString stringWithCString:url.encode("utf-8") - # encoding:NSUTF8StringEncoding]; - NSString = objc.objc_getClass(b"NSString") - constructor = objc.sel_registerName(b"stringWithCString:encoding:") - objc.objc_msgSend.argtypes = [c_void_p, c_void_p, c_char_p, c_ulong] - url_string = objc.objc_msgSend( - NSString, - constructor, - url.encode("utf-8"), - 4, # NSUTF8StringEncoding = 4 - ) - - # Create an NSURL object representing the URL - # This is the equivalent of: - # NSURL *nsurl = [NSURL URLWithString:url]; - NSURL = objc.objc_getClass(b"NSURL") - urlWithString_ = objc.sel_registerName(b"URLWithString:") - objc.objc_msgSend.argtypes = [c_void_p, c_void_p, c_void_p] - ns_url = objc.objc_msgSend(NSURL, urlWithString_, url_string) - - # Get the shared UIApplication instance - # This code is the equivalent of: - # UIApplication shared_app = [UIApplication sharedApplication] - UIApplication = objc.objc_getClass(b"UIApplication") - sharedApplication = objc.sel_registerName(b"sharedApplication") - objc.objc_msgSend.argtypes = [c_void_p, c_void_p] - shared_app = objc.objc_msgSend(UIApplication, sharedApplication) - - # Open the URL on the shared application - # This code is the equivalent of: - # [shared_app openURL:ns_url - # options:NIL - # completionHandler:NIL]; - openURL_ = objc.sel_registerName(b"openURL:options:completionHandler:") - objc.objc_msgSend.argtypes = [ - c_void_p, c_void_p, c_void_p, c_void_p, c_void_p - ] - # Method returns void - objc.objc_msgSend.restype = None - objc.objc_msgSend(shared_app, openURL_, ns_url, None, None) - - return True - - -def parse_args(arg_list: list[str] | None): - import argparse - parser = argparse.ArgumentParser(description="Open URL in a web browser.") - parser.add_argument("url", help="URL to open") - - group = parser.add_mutually_exclusive_group() - group.add_argument("-n", "--new-window", action="store_const", - const=1, default=0, dest="new_win", - help="open new window") - group.add_argument("-t", "--new-tab", action="store_const", - const=2, default=0, dest="new_win", - help="open new tab") - - args = parser.parse_args(arg_list) - - return args - - -def main(arg_list: list[str] | None = None): - args = parse_args(arg_list) - - open(args.url, args.new_win) - - print("\a") - - -if __name__ == "__main__": - main() diff --git a/Python313_13_x64_Template/Lib/wsgiref/headers.py b/Python313_13_x64_Template/Lib/wsgiref/headers.py deleted file mode 100644 index 17559b0a..00000000 --- a/Python313_13_x64_Template/Lib/wsgiref/headers.py +++ /dev/null @@ -1,192 +0,0 @@ -"""Manage HTTP Response Headers - -Much of this module is red-handedly pilfered from email.message in the stdlib, -so portions are Copyright (C) 2001,2002 Python Software Foundation, and were -written by Barry Warsaw. -""" - -# Regular expression that matches `special' characters in parameters, the -# existence of which force quoting of the parameter value. -import re -tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]') -# Disallowed characters for headers and values. -# HTAB (\x09) is allowed in header values, but -# not in header names. (RFC 9110 Section 5.5) -_name_disallowed_re = re.compile(r'[\x00-\x1F\x7F]') -_value_disallowed_re = re.compile(r'[\x00-\x08\x0A-\x1F\x7F]') - -def _formatparam(param, value=None, quote=1): - """Convenience function to format and return a key=value pair. - - This will quote the value if needed or if quote is true. - """ - if value is not None and len(value) > 0: - if quote or tspecials.search(value): - value = value.replace('\\', '\\\\').replace('"', r'\"') - return '%s="%s"' % (param, value) - else: - return '%s=%s' % (param, value) - else: - return param - - -class Headers: - """Manage a collection of HTTP response headers""" - - def __init__(self, headers=None): - headers = headers if headers is not None else [] - if type(headers) is not list: - raise TypeError("Headers must be a list of name/value tuples") - self._headers = headers - if __debug__: - for k, v in headers: - self._convert_string_type(k, name=True) - self._convert_string_type(v, name=False) - - def _convert_string_type(self, value, *, name): - """Convert/check value type.""" - if type(value) is str: - regex = (_name_disallowed_re if name else _value_disallowed_re) - if regex.search(value): - raise ValueError("Control characters not allowed in headers") - return value - raise AssertionError("Header names/values must be" - " of type str (got {0})".format(repr(value))) - - def __len__(self): - """Return the total number of headers, including duplicates.""" - return len(self._headers) - - def __setitem__(self, name, val): - """Set the value of a header.""" - del self[name] - self._headers.append( - (self._convert_string_type(name, name=True), self._convert_string_type(val, name=False))) - - def __delitem__(self,name): - """Delete all occurrences of a header, if present. - - Does *not* raise an exception if the header is missing. - """ - name = self._convert_string_type(name.lower(), name=True) - self._headers[:] = [kv for kv in self._headers if kv[0].lower() != name] - - def __getitem__(self,name): - """Get the first header value for 'name' - - Return None if the header is missing instead of raising an exception. - - Note that if the header appeared multiple times, the first exactly which - occurrence gets returned is undefined. Use getall() to get all - the values matching a header field name. - """ - return self.get(name) - - def __contains__(self, name): - """Return true if the message contains the header.""" - return self.get(name) is not None - - - def get_all(self, name): - """Return a list of all the values for the named field. - - These will be sorted in the order they appeared in the original header - list or were added to this instance, and may contain duplicates. Any - fields deleted and re-inserted are always appended to the header list. - If no fields exist with the given name, returns an empty list. - """ - name = self._convert_string_type(name.lower(), name=True) - return [kv[1] for kv in self._headers if kv[0].lower()==name] - - - def get(self,name,default=None): - """Get the first header value for 'name', or return 'default'""" - name = self._convert_string_type(name.lower(), name=True) - for k,v in self._headers: - if k.lower()==name: - return v - return default - - - def keys(self): - """Return a list of all the header field names. - - These will be sorted in the order they appeared in the original header - list, or were added to this instance, and may contain duplicates. - Any fields deleted and re-inserted are always appended to the header - list. - """ - return [k for k, v in self._headers] - - def values(self): - """Return a list of all header values. - - These will be sorted in the order they appeared in the original header - list, or were added to this instance, and may contain duplicates. - Any fields deleted and re-inserted are always appended to the header - list. - """ - return [v for k, v in self._headers] - - def items(self): - """Get all the header fields and values. - - These will be sorted in the order they were in the original header - list, or were added to this instance, and may contain duplicates. - Any fields deleted and re-inserted are always appended to the header - list. - """ - return self._headers[:] - - def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self._headers) - - def __str__(self): - """str() returns the formatted headers, complete with end line, - suitable for direct HTTP transmission.""" - return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['','']) - - def __bytes__(self): - return str(self).encode('iso-8859-1') - - def setdefault(self,name,value): - """Return first matching header value for 'name', or 'value' - - If there is no header named 'name', add a new header with name 'name' - and value 'value'.""" - result = self.get(name) - if result is None: - self._headers.append((self._convert_string_type(name, name=True), - self._convert_string_type(value, name=False))) - return value - else: - return result - - def add_header(self, _name, _value, **_params): - """Extended header setting. - - _name is the header field to add. keyword arguments can be used to set - additional parameters for the header field, with underscores converted - to dashes. Normally the parameter will be added as key="value" unless - value is None, in which case only the key will be added. - - Example: - - h.add_header('content-disposition', 'attachment', filename='bud.gif') - - Note that unlike the corresponding 'email.message' method, this does - *not* handle '(charset, language, value)' tuples: all values must be - strings or None. - """ - parts = [] - if _value is not None: - _value = self._convert_string_type(_value, name=False) - parts.append(_value) - for k, v in _params.items(): - k = self._convert_string_type(k, name=True) - if v is None: - parts.append(k.replace('_', '-')) - else: - v = self._convert_string_type(v, name=False) - parts.append(_formatparam(k.replace('_', '-'), v)) - self._headers.append((self._convert_string_type(_name, name=True), "; ".join(parts))) diff --git a/Python313_13_x64_Template/Lib/xml/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/xml/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index f8843579..00000000 Binary files a/Python313_13_x64_Template/Lib/xml/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/xml/dom/__init__.py b/Python313_13_x64_Template/Lib/xml/dom/__init__.py deleted file mode 100644 index 97cf9a64..00000000 --- a/Python313_13_x64_Template/Lib/xml/dom/__init__.py +++ /dev/null @@ -1,140 +0,0 @@ -"""W3C Document Object Model implementation for Python. - -The Python mapping of the Document Object Model is documented in the -Python Library Reference in the section on the xml.dom package. - -This package contains the following modules: - -minidom -- A simple implementation of the Level 1 DOM with namespace - support added (based on the Level 2 specification) and other - minor Level 2 functionality. - -pulldom -- DOM builder supporting on-demand tree-building for selected - subtrees of the document. - -""" - - -class Node: - """Class giving the NodeType constants.""" - __slots__ = () - - # DOM implementations may use this as a base class for their own - # Node implementations. If they don't, the constants defined here - # should still be used as the canonical definitions as they match - # the values given in the W3C recommendation. Client code can - # safely refer to these values in all tests of Node.nodeType - # values. - - ELEMENT_NODE = 1 - ATTRIBUTE_NODE = 2 - TEXT_NODE = 3 - CDATA_SECTION_NODE = 4 - ENTITY_REFERENCE_NODE = 5 - ENTITY_NODE = 6 - PROCESSING_INSTRUCTION_NODE = 7 - COMMENT_NODE = 8 - DOCUMENT_NODE = 9 - DOCUMENT_TYPE_NODE = 10 - DOCUMENT_FRAGMENT_NODE = 11 - NOTATION_NODE = 12 - - -#ExceptionCode -INDEX_SIZE_ERR = 1 -DOMSTRING_SIZE_ERR = 2 -HIERARCHY_REQUEST_ERR = 3 -WRONG_DOCUMENT_ERR = 4 -INVALID_CHARACTER_ERR = 5 -NO_DATA_ALLOWED_ERR = 6 -NO_MODIFICATION_ALLOWED_ERR = 7 -NOT_FOUND_ERR = 8 -NOT_SUPPORTED_ERR = 9 -INUSE_ATTRIBUTE_ERR = 10 -INVALID_STATE_ERR = 11 -SYNTAX_ERR = 12 -INVALID_MODIFICATION_ERR = 13 -NAMESPACE_ERR = 14 -INVALID_ACCESS_ERR = 15 -VALIDATION_ERR = 16 - - -class DOMException(Exception): - """Abstract base class for DOM exceptions. - Exceptions with specific codes are specializations of this class.""" - - def __init__(self, *args, **kw): - if self.__class__ is DOMException: - raise RuntimeError( - "DOMException should not be instantiated directly") - Exception.__init__(self, *args, **kw) - - def _get_code(self): - return self.code - - -class IndexSizeErr(DOMException): - code = INDEX_SIZE_ERR - -class DomstringSizeErr(DOMException): - code = DOMSTRING_SIZE_ERR - -class HierarchyRequestErr(DOMException): - code = HIERARCHY_REQUEST_ERR - -class WrongDocumentErr(DOMException): - code = WRONG_DOCUMENT_ERR - -class InvalidCharacterErr(DOMException): - code = INVALID_CHARACTER_ERR - -class NoDataAllowedErr(DOMException): - code = NO_DATA_ALLOWED_ERR - -class NoModificationAllowedErr(DOMException): - code = NO_MODIFICATION_ALLOWED_ERR - -class NotFoundErr(DOMException): - code = NOT_FOUND_ERR - -class NotSupportedErr(DOMException): - code = NOT_SUPPORTED_ERR - -class InuseAttributeErr(DOMException): - code = INUSE_ATTRIBUTE_ERR - -class InvalidStateErr(DOMException): - code = INVALID_STATE_ERR - -class SyntaxErr(DOMException): - code = SYNTAX_ERR - -class InvalidModificationErr(DOMException): - code = INVALID_MODIFICATION_ERR - -class NamespaceErr(DOMException): - code = NAMESPACE_ERR - -class InvalidAccessErr(DOMException): - code = INVALID_ACCESS_ERR - -class ValidationErr(DOMException): - code = VALIDATION_ERR - -class UserDataHandler: - """Class giving the operation constants for UserDataHandler.handle().""" - - # Based on DOM Level 3 (WD 9 April 2002) - - NODE_CLONED = 1 - NODE_IMPORTED = 2 - NODE_DELETED = 3 - NODE_RENAMED = 4 - -XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace" -XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/" -XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml" -EMPTY_NAMESPACE = None -EMPTY_PREFIX = None - -from .domreg import getDOMImplementation, registerDOMImplementation diff --git a/Python313_13_x64_Template/Lib/xml/etree/ElementTree.py b/Python313_13_x64_Template/Lib/xml/etree/ElementTree.py deleted file mode 100644 index 9bb09ab5..00000000 --- a/Python313_13_x64_Template/Lib/xml/etree/ElementTree.py +++ /dev/null @@ -1,2098 +0,0 @@ -"""Lightweight XML support for Python. - - XML is an inherently hierarchical data format, and the most natural way to - represent it is with a tree. This module has two classes for this purpose: - - 1. ElementTree represents the whole XML document as a tree and - - 2. Element represents a single node in this tree. - - Interactions with the whole document (reading and writing to/from files) are - usually done on the ElementTree level. Interactions with a single XML element - and its sub-elements are done on the Element level. - - Element is a flexible container object designed to store hierarchical data - structures in memory. It can be described as a cross between a list and a - dictionary. Each Element has a number of properties associated with it: - - 'tag' - a string containing the element's name. - - 'attributes' - a Python dictionary storing the element's attributes. - - 'text' - a string containing the element's text content. - - 'tail' - an optional string containing text after the element's end tag. - - And a number of child elements stored in a Python sequence. - - To create an element instance, use the Element constructor, - or the SubElement factory function. - - You can also use the ElementTree class to wrap an element structure - and convert it to and from XML. - -""" - -#--------------------------------------------------------------------- -# Licensed to PSF under a Contributor Agreement. -# See https://www.python.org/psf/license for licensing details. -# -# ElementTree -# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved. -# -# fredrik@pythonware.com -# http://www.pythonware.com -# -------------------------------------------------------------------- -# The ElementTree toolkit is -# -# Copyright (c) 1999-2008 by Fredrik Lundh -# -# By obtaining, using, and/or copying this software and/or its -# associated documentation, you agree that you have read, understood, -# and will comply with the following terms and conditions: -# -# Permission to use, copy, modify, and distribute this software and -# its associated documentation for any purpose and without fee is -# hereby granted, provided that the above copyright notice appears in -# all copies, and that both that copyright notice and this permission -# notice appear in supporting documentation, and that the name of -# Secret Labs AB or the author not be used in advertising or publicity -# pertaining to distribution of the software without specific, written -# prior permission. -# -# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD -# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- -# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR -# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY -# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS -# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE -# OF THIS SOFTWARE. -# -------------------------------------------------------------------- - -__all__ = [ - # public symbols - "Comment", - "dump", - "Element", "ElementTree", - "fromstring", "fromstringlist", - "indent", "iselement", "iterparse", - "parse", "ParseError", - "PI", "ProcessingInstruction", - "QName", - "SubElement", - "tostring", "tostringlist", - "TreeBuilder", - "VERSION", - "XML", "XMLID", - "XMLParser", "XMLPullParser", - "register_namespace", - "canonicalize", "C14NWriterTarget", - ] - -VERSION = "1.3.0" - -import sys -import re -import warnings -import io -import collections -import collections.abc -import contextlib -import weakref - -from . import ElementPath - - -class ParseError(SyntaxError): - """An error when parsing an XML document. - - In addition to its exception value, a ParseError contains - two extra attributes: - 'code' - the specific exception code - 'position' - the line and column of the error - - """ - pass - -# -------------------------------------------------------------------- - - -def iselement(element): - """Return True if *element* appears to be an Element.""" - return hasattr(element, 'tag') - - -class Element: - """An XML element. - - This class is the reference implementation of the Element interface. - - An element's length is its number of subelements. That means if you - want to check if an element is truly empty, you should check BOTH - its length AND its text attribute. - - The element tag, attribute names, and attribute values can be either - bytes or strings. - - *tag* is the element name. *attrib* is an optional dictionary containing - element attributes. *extra* are additional element attributes given as - keyword arguments. - - Example form: - text...tail - - """ - - tag = None - """The element's name.""" - - attrib = None - """Dictionary of the element's attributes.""" - - text = None - """ - Text before first subelement. This is either a string or the value None. - Note that if there is no text, this attribute may be either - None or the empty string, depending on the parser. - - """ - - tail = None - """ - Text after this element's end tag, but before the next sibling element's - start tag. This is either a string or the value None. Note that if there - was no text, this attribute may be either None or an empty string, - depending on the parser. - - """ - - def __init__(self, tag, attrib={}, **extra): - if not isinstance(attrib, dict): - raise TypeError("attrib must be dict, not %s" % ( - attrib.__class__.__name__,)) - self.tag = tag - self.attrib = {**attrib, **extra} - self._children = [] - - def __repr__(self): - return "<%s %r at %#x>" % (self.__class__.__name__, self.tag, id(self)) - - def makeelement(self, tag, attrib): - """Create a new element with the same type. - - *tag* is a string containing the element name. - *attrib* is a dictionary containing the element attributes. - - Do not call this method, use the SubElement factory function instead. - - """ - return self.__class__(tag, attrib) - - def __copy__(self): - elem = self.makeelement(self.tag, self.attrib) - elem.text = self.text - elem.tail = self.tail - elem[:] = self - return elem - - def __len__(self): - return len(self._children) - - def __bool__(self): - warnings.warn( - "Testing an element's truth value will always return True in " - "future versions. " - "Use specific 'len(elem)' or 'elem is not None' test instead.", - DeprecationWarning, stacklevel=2 - ) - return len(self._children) != 0 # emulate old behaviour, for now - - def __getitem__(self, index): - return self._children[index] - - def __setitem__(self, index, element): - if isinstance(index, slice): - for elt in element: - self._assert_is_element(elt) - else: - self._assert_is_element(element) - self._children[index] = element - - def __delitem__(self, index): - del self._children[index] - - def append(self, subelement): - """Add *subelement* to the end of this element. - - The new element will appear in document order after the last existing - subelement (or directly after the text, if it's the first subelement), - but before the end tag for this element. - - """ - self._assert_is_element(subelement) - self._children.append(subelement) - - def extend(self, elements): - """Append subelements from a sequence. - - *elements* is a sequence with zero or more elements. - - """ - for element in elements: - self._assert_is_element(element) - self._children.append(element) - - def insert(self, index, subelement): - """Insert *subelement* at position *index*.""" - self._assert_is_element(subelement) - self._children.insert(index, subelement) - - def _assert_is_element(self, e): - # Need to refer to the actual Python implementation, not the - # shadowing C implementation. - if not isinstance(e, _Element_Py): - raise TypeError('expected an Element, not %s' % type(e).__name__) - - def remove(self, subelement): - """Remove matching subelement. - - Unlike the find methods, this method compares elements based on - identity, NOT ON tag value or contents. To remove subelements by - other means, the easiest way is to use a list comprehension to - select what elements to keep, and then use slice assignment to update - the parent element. - - ValueError is raised if a matching element could not be found. - - """ - # assert iselement(element) - self._children.remove(subelement) - - def find(self, path, namespaces=None): - """Find first matching element by tag name or path. - - *path* is a string having either an element tag or an XPath, - *namespaces* is an optional mapping from namespace prefix to full name. - - Return the first matching element, or None if no element was found. - - """ - return ElementPath.find(self, path, namespaces) - - def findtext(self, path, default=None, namespaces=None): - """Find text for first matching element by tag name or path. - - *path* is a string having either an element tag or an XPath, - *default* is the value to return if the element was not found, - *namespaces* is an optional mapping from namespace prefix to full name. - - Return text content of first matching element, or default value if - none was found. Note that if an element is found having no text - content, the empty string is returned. - - """ - return ElementPath.findtext(self, path, default, namespaces) - - def findall(self, path, namespaces=None): - """Find all matching subelements by tag name or path. - - *path* is a string having either an element tag or an XPath, - *namespaces* is an optional mapping from namespace prefix to full name. - - Returns list containing all matching elements in document order. - - """ - return ElementPath.findall(self, path, namespaces) - - def iterfind(self, path, namespaces=None): - """Find all matching subelements by tag name or path. - - *path* is a string having either an element tag or an XPath, - *namespaces* is an optional mapping from namespace prefix to full name. - - Return an iterable yielding all matching elements in document order. - - """ - return ElementPath.iterfind(self, path, namespaces) - - def clear(self): - """Reset element. - - This function removes all subelements, clears all attributes, and sets - the text and tail attributes to None. - - """ - self.attrib.clear() - self._children = [] - self.text = self.tail = None - - def get(self, key, default=None): - """Get element attribute. - - Equivalent to attrib.get, but some implementations may handle this a - bit more efficiently. *key* is what attribute to look for, and - *default* is what to return if the attribute was not found. - - Returns a string containing the attribute value, or the default if - attribute was not found. - - """ - return self.attrib.get(key, default) - - def set(self, key, value): - """Set element attribute. - - Equivalent to attrib[key] = value, but some implementations may handle - this a bit more efficiently. *key* is what attribute to set, and - *value* is the attribute value to set it to. - - """ - self.attrib[key] = value - - def keys(self): - """Get list of attribute names. - - Names are returned in an arbitrary order, just like an ordinary - Python dict. Equivalent to attrib.keys() - - """ - return self.attrib.keys() - - def items(self): - """Get element attributes as a sequence. - - The attributes are returned in arbitrary order. Equivalent to - attrib.items(). - - Return a list of (name, value) tuples. - - """ - return self.attrib.items() - - def iter(self, tag=None): - """Create tree iterator. - - The iterator loops over the element and all subelements in document - order, returning all elements with a matching tag. - - If the tree structure is modified during iteration, new or removed - elements may or may not be included. To get a stable set, use the - list() function on the iterator, and loop over the resulting list. - - *tag* is what tags to look for (default is to return all elements) - - Return an iterator containing all the matching elements. - - """ - if tag == "*": - tag = None - if tag is None or self.tag == tag: - yield self - for e in self._children: - yield from e.iter(tag) - - def itertext(self): - """Create text iterator. - - The iterator loops over the element and all subelements in document - order, returning all inner text. - - """ - tag = self.tag - if not isinstance(tag, str) and tag is not None: - return - t = self.text - if t: - yield t - for e in self: - yield from e.itertext() - t = e.tail - if t: - yield t - - -def SubElement(parent, tag, attrib={}, **extra): - """Subelement factory which creates an element instance, and appends it - to an existing parent. - - The element tag, attribute names, and attribute values can be either - bytes or Unicode strings. - - *parent* is the parent element, *tag* is the subelements name, *attrib* is - an optional directory containing element attributes, *extra* are - additional attributes given as keyword arguments. - - """ - attrib = {**attrib, **extra} - element = parent.makeelement(tag, attrib) - parent.append(element) - return element - - -def Comment(text=None): - """Comment element factory. - - This function creates a special element which the standard serializer - serializes as an XML comment. - - *text* is a string containing the comment string. - - """ - element = Element(Comment) - element.text = text - return element - - -def ProcessingInstruction(target, text=None): - """Processing Instruction element factory. - - This function creates a special element which the standard serializer - serializes as an XML comment. - - *target* is a string containing the processing instruction, *text* is a - string containing the processing instruction contents, if any. - - """ - element = Element(ProcessingInstruction) - element.text = target - if text: - element.text = element.text + " " + text - return element - -PI = ProcessingInstruction - - -class QName: - """Qualified name wrapper. - - This class can be used to wrap a QName attribute value in order to get - proper namespace handing on output. - - *text_or_uri* is a string containing the QName value either in the form - {uri}local, or if the tag argument is given, the URI part of a QName. - - *tag* is an optional argument which if given, will make the first - argument (text_or_uri) be interpreted as a URI, and this argument (tag) - be interpreted as a local name. - - """ - def __init__(self, text_or_uri, tag=None): - if tag: - text_or_uri = "{%s}%s" % (text_or_uri, tag) - self.text = text_or_uri - def __str__(self): - return self.text - def __repr__(self): - return '<%s %r>' % (self.__class__.__name__, self.text) - def __hash__(self): - return hash(self.text) - def __le__(self, other): - if isinstance(other, QName): - return self.text <= other.text - return self.text <= other - def __lt__(self, other): - if isinstance(other, QName): - return self.text < other.text - return self.text < other - def __ge__(self, other): - if isinstance(other, QName): - return self.text >= other.text - return self.text >= other - def __gt__(self, other): - if isinstance(other, QName): - return self.text > other.text - return self.text > other - def __eq__(self, other): - if isinstance(other, QName): - return self.text == other.text - return self.text == other - -# -------------------------------------------------------------------- - - -class ElementTree: - """An XML element hierarchy. - - This class also provides support for serialization to and from - standard XML. - - *element* is an optional root element node, - *file* is an optional file handle or file name of an XML file whose - contents will be used to initialize the tree with. - - """ - def __init__(self, element=None, file=None): - if element is not None and not iselement(element): - raise TypeError('expected an Element, not %s' % - type(element).__name__) - self._root = element # first node - if file: - self.parse(file) - - def getroot(self): - """Return root element of this tree.""" - return self._root - - def _setroot(self, element): - """Replace root element of this tree. - - This will discard the current contents of the tree and replace it - with the given element. Use with care! - - """ - if not iselement(element): - raise TypeError('expected an Element, not %s' - % type(element).__name__) - self._root = element - - def parse(self, source, parser=None): - """Load external XML document into element tree. - - *source* is a file name or file object, *parser* is an optional parser - instance that defaults to XMLParser. - - ParseError is raised if the parser fails to parse the document. - - Returns the root element of the given source document. - - """ - close_source = False - if not hasattr(source, "read"): - source = open(source, "rb") - close_source = True - try: - if parser is None: - # If no parser was specified, create a default XMLParser - parser = XMLParser() - if hasattr(parser, '_parse_whole'): - # The default XMLParser, when it comes from an accelerator, - # can define an internal _parse_whole API for efficiency. - # It can be used to parse the whole source without feeding - # it with chunks. - self._root = parser._parse_whole(source) - return self._root - while data := source.read(65536): - parser.feed(data) - self._root = parser.close() - return self._root - finally: - if close_source: - source.close() - - def iter(self, tag=None): - """Create and return tree iterator for the root element. - - The iterator loops over all elements in this tree, in document order. - - *tag* is a string with the tag name to iterate over - (default is to return all elements). - - """ - # assert self._root is not None - return self._root.iter(tag) - - def find(self, path, namespaces=None): - """Find first matching element by tag name or path. - - Same as getroot().find(path), which is Element.find() - - *path* is a string having either an element tag or an XPath, - *namespaces* is an optional mapping from namespace prefix to full name. - - Return the first matching element, or None if no element was found. - - """ - # assert self._root is not None - if path[:1] == "/": - path = "." + path - warnings.warn( - "This search is broken in 1.3 and earlier, and will be " - "fixed in a future version. If you rely on the current " - "behaviour, change it to %r" % path, - FutureWarning, stacklevel=2 - ) - return self._root.find(path, namespaces) - - def findtext(self, path, default=None, namespaces=None): - """Find first matching element by tag name or path. - - Same as getroot().findtext(path), which is Element.findtext() - - *path* is a string having either an element tag or an XPath, - *namespaces* is an optional mapping from namespace prefix to full name. - - Return the first matching element, or None if no element was found. - - """ - # assert self._root is not None - if path[:1] == "/": - path = "." + path - warnings.warn( - "This search is broken in 1.3 and earlier, and will be " - "fixed in a future version. If you rely on the current " - "behaviour, change it to %r" % path, - FutureWarning, stacklevel=2 - ) - return self._root.findtext(path, default, namespaces) - - def findall(self, path, namespaces=None): - """Find all matching subelements by tag name or path. - - Same as getroot().findall(path), which is Element.findall(). - - *path* is a string having either an element tag or an XPath, - *namespaces* is an optional mapping from namespace prefix to full name. - - Return list containing all matching elements in document order. - - """ - # assert self._root is not None - if path[:1] == "/": - path = "." + path - warnings.warn( - "This search is broken in 1.3 and earlier, and will be " - "fixed in a future version. If you rely on the current " - "behaviour, change it to %r" % path, - FutureWarning, stacklevel=2 - ) - return self._root.findall(path, namespaces) - - def iterfind(self, path, namespaces=None): - """Find all matching subelements by tag name or path. - - Same as getroot().iterfind(path), which is element.iterfind() - - *path* is a string having either an element tag or an XPath, - *namespaces* is an optional mapping from namespace prefix to full name. - - Return an iterable yielding all matching elements in document order. - - """ - # assert self._root is not None - if path[:1] == "/": - path = "." + path - warnings.warn( - "This search is broken in 1.3 and earlier, and will be " - "fixed in a future version. If you rely on the current " - "behaviour, change it to %r" % path, - FutureWarning, stacklevel=2 - ) - return self._root.iterfind(path, namespaces) - - def write(self, file_or_filename, - encoding=None, - xml_declaration=None, - default_namespace=None, - method=None, *, - short_empty_elements=True): - """Write element tree to a file as XML. - - Arguments: - *file_or_filename* -- file name or a file object opened for writing - - *encoding* -- the output encoding (default: US-ASCII) - - *xml_declaration* -- bool indicating if an XML declaration should be - added to the output. If None, an XML declaration - is added if encoding IS NOT either of: - US-ASCII, UTF-8, or Unicode - - *default_namespace* -- sets the default XML namespace (for "xmlns") - - *method* -- either "xml" (default), "html, "text", or "c14n" - - *short_empty_elements* -- controls the formatting of elements - that contain no content. If True (default) - they are emitted as a single self-closed - tag, otherwise they are emitted as a pair - of start/end tags - - """ - if self._root is None: - raise TypeError('ElementTree not initialized') - if not method: - method = "xml" - elif method not in _serialize: - raise ValueError("unknown method %r" % method) - if not encoding: - if method == "c14n": - encoding = "utf-8" - else: - encoding = "us-ascii" - with _get_writer(file_or_filename, encoding) as (write, declared_encoding): - if method == "xml" and (xml_declaration or - (xml_declaration is None and - encoding.lower() != "unicode" and - declared_encoding.lower() not in ("utf-8", "us-ascii"))): - write("\n" % ( - declared_encoding,)) - if method == "text": - _serialize_text(write, self._root) - else: - qnames, namespaces = _namespaces(self._root, default_namespace) - serialize = _serialize[method] - serialize(write, self._root, qnames, namespaces, - short_empty_elements=short_empty_elements) - - def write_c14n(self, file): - # lxml.etree compatibility. use output method instead - return self.write(file, method="c14n") - -# -------------------------------------------------------------------- -# serialization support - -@contextlib.contextmanager -def _get_writer(file_or_filename, encoding): - # returns text write method and release all resources after using - try: - write = file_or_filename.write - except AttributeError: - # file_or_filename is a file name - if encoding.lower() == "unicode": - encoding="utf-8" - with open(file_or_filename, "w", encoding=encoding, - errors="xmlcharrefreplace") as file: - yield file.write, encoding - else: - # file_or_filename is a file-like object - # encoding determines if it is a text or binary writer - if encoding.lower() == "unicode": - # use a text writer as is - yield write, getattr(file_or_filename, "encoding", None) or "utf-8" - else: - # wrap a binary writer with TextIOWrapper - with contextlib.ExitStack() as stack: - if isinstance(file_or_filename, io.BufferedIOBase): - file = file_or_filename - elif isinstance(file_or_filename, io.RawIOBase): - file = io.BufferedWriter(file_or_filename) - # Keep the original file open when the BufferedWriter is - # destroyed - stack.callback(file.detach) - else: - # This is to handle passed objects that aren't in the - # IOBase hierarchy, but just have a write method - file = io.BufferedIOBase() - file.writable = lambda: True - file.write = write - try: - # TextIOWrapper uses this methods to determine - # if BOM (for UTF-16, etc) should be added - file.seekable = file_or_filename.seekable - file.tell = file_or_filename.tell - except AttributeError: - pass - file = io.TextIOWrapper(file, - encoding=encoding, - errors="xmlcharrefreplace", - newline="\n") - # Keep the original file open when the TextIOWrapper is - # destroyed - stack.callback(file.detach) - yield file.write, encoding - -def _namespaces(elem, default_namespace=None): - # identify namespaces used in this tree - - # maps qnames to *encoded* prefix:local names - qnames = {None: None} - - # maps uri:s to prefixes - namespaces = {} - if default_namespace: - namespaces[default_namespace] = "" - - def add_qname(qname): - # calculate serialized qname representation - try: - if qname[:1] == "{": - uri, tag = qname[1:].rsplit("}", 1) - prefix = namespaces.get(uri) - if prefix is None: - prefix = _namespace_map.get(uri) - if prefix is None: - prefix = "ns%d" % len(namespaces) - if prefix != "xml": - namespaces[uri] = prefix - if prefix: - qnames[qname] = "%s:%s" % (prefix, tag) - else: - qnames[qname] = tag # default element - else: - if default_namespace: - # FIXME: can this be handled in XML 1.0? - raise ValueError( - "cannot use non-qualified names with " - "default_namespace option" - ) - qnames[qname] = qname - except TypeError: - _raise_serialization_error(qname) - - # populate qname and namespaces table - for elem in elem.iter(): - tag = elem.tag - if isinstance(tag, QName): - if tag.text not in qnames: - add_qname(tag.text) - elif isinstance(tag, str): - if tag not in qnames: - add_qname(tag) - elif tag is not None and tag is not Comment and tag is not PI: - _raise_serialization_error(tag) - for key, value in elem.items(): - if isinstance(key, QName): - key = key.text - if key not in qnames: - add_qname(key) - if isinstance(value, QName) and value.text not in qnames: - add_qname(value.text) - text = elem.text - if isinstance(text, QName) and text.text not in qnames: - add_qname(text.text) - return qnames, namespaces - -def _serialize_xml(write, elem, qnames, namespaces, - short_empty_elements, **kwargs): - tag = elem.tag - text = elem.text - if tag is Comment: - write("" % text) - elif tag is ProcessingInstruction: - write("" % text) - else: - tag = qnames[tag] - if tag is None: - if text: - write(_escape_cdata(text)) - for e in elem: - _serialize_xml(write, e, qnames, None, - short_empty_elements=short_empty_elements) - else: - write("<" + tag) - items = list(elem.items()) - if items or namespaces: - if namespaces: - for v, k in sorted(namespaces.items(), - key=lambda x: x[1]): # sort on prefix - if k: - k = ":" + k - write(" xmlns%s=\"%s\"" % ( - k, - _escape_attrib(v) - )) - for k, v in items: - if isinstance(k, QName): - k = k.text - if isinstance(v, QName): - v = qnames[v.text] - else: - v = _escape_attrib(v) - write(" %s=\"%s\"" % (qnames[k], v)) - if text or len(elem) or not short_empty_elements: - write(">") - if text: - write(_escape_cdata(text)) - for e in elem: - _serialize_xml(write, e, qnames, None, - short_empty_elements=short_empty_elements) - write("") - else: - write(" />") - if elem.tail: - write(_escape_cdata(elem.tail)) - -HTML_EMPTY = {"area", "base", "basefont", "br", "col", "embed", "frame", "hr", - "img", "input", "isindex", "link", "meta", "param", "source", - "track", "wbr"} - -def _serialize_html(write, elem, qnames, namespaces, **kwargs): - tag = elem.tag - text = elem.text - if tag is Comment: - write("" % _escape_cdata(text)) - elif tag is ProcessingInstruction: - write("" % _escape_cdata(text)) - else: - tag = qnames[tag] - if tag is None: - if text: - write(_escape_cdata(text)) - for e in elem: - _serialize_html(write, e, qnames, None) - else: - write("<" + tag) - items = list(elem.items()) - if items or namespaces: - if namespaces: - for v, k in sorted(namespaces.items(), - key=lambda x: x[1]): # sort on prefix - if k: - k = ":" + k - write(" xmlns%s=\"%s\"" % ( - k, - _escape_attrib(v) - )) - for k, v in items: - if isinstance(k, QName): - k = k.text - if isinstance(v, QName): - v = qnames[v.text] - else: - v = _escape_attrib_html(v) - # FIXME: handle boolean attributes - write(" %s=\"%s\"" % (qnames[k], v)) - write(">") - ltag = tag.lower() - if text: - if ltag == "script" or ltag == "style": - write(text) - else: - write(_escape_cdata(text)) - for e in elem: - _serialize_html(write, e, qnames, None) - if ltag not in HTML_EMPTY: - write("") - if elem.tail: - write(_escape_cdata(elem.tail)) - -def _serialize_text(write, elem): - for part in elem.itertext(): - write(part) - if elem.tail: - write(elem.tail) - -_serialize = { - "xml": _serialize_xml, - "html": _serialize_html, - "text": _serialize_text, -# this optional method is imported at the end of the module -# "c14n": _serialize_c14n, -} - - -def register_namespace(prefix, uri): - """Register a namespace prefix. - - The registry is global, and any existing mapping for either the - given prefix or the namespace URI will be removed. - - *prefix* is the namespace prefix, *uri* is a namespace uri. Tags and - attributes in this namespace will be serialized with prefix if possible. - - ValueError is raised if prefix is reserved or is invalid. - - """ - if re.match(r"ns\d+$", prefix): - raise ValueError("Prefix format reserved for internal use") - for k, v in list(_namespace_map.items()): - if k == uri or v == prefix: - del _namespace_map[k] - _namespace_map[uri] = prefix - -_namespace_map = { - # "well-known" namespace prefixes - "http://www.w3.org/XML/1998/namespace": "xml", - "http://www.w3.org/1999/xhtml": "html", - "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", - "http://schemas.xmlsoap.org/wsdl/": "wsdl", - # xml schema - "http://www.w3.org/2001/XMLSchema": "xs", - "http://www.w3.org/2001/XMLSchema-instance": "xsi", - # dublin core - "http://purl.org/dc/elements/1.1/": "dc", -} -# For tests and troubleshooting -register_namespace._namespace_map = _namespace_map - -def _raise_serialization_error(text): - raise TypeError( - "cannot serialize %r (type %s)" % (text, type(text).__name__) - ) - -def _escape_cdata(text): - # escape character data - try: - # it's worth avoiding do-nothing calls for strings that are - # shorter than 500 characters, or so. assume that's, by far, - # the most common case in most applications. - if "&" in text: - text = text.replace("&", "&") - if "<" in text: - text = text.replace("<", "<") - if ">" in text: - text = text.replace(">", ">") - return text - except (TypeError, AttributeError): - _raise_serialization_error(text) - -def _escape_attrib(text): - # escape attribute value - try: - if "&" in text: - text = text.replace("&", "&") - if "<" in text: - text = text.replace("<", "<") - if ">" in text: - text = text.replace(">", ">") - if "\"" in text: - text = text.replace("\"", """) - # Although section 2.11 of the XML specification states that CR or - # CR LN should be replaced with just LN, it applies only to EOLNs - # which take part of organizing file into lines. Within attributes, - # we are replacing these with entity numbers, so they do not count. - # http://www.w3.org/TR/REC-xml/#sec-line-ends - # The current solution, contained in following six lines, was - # discussed in issue 17582 and 39011. - if "\r" in text: - text = text.replace("\r", " ") - if "\n" in text: - text = text.replace("\n", " ") - if "\t" in text: - text = text.replace("\t", " ") - return text - except (TypeError, AttributeError): - _raise_serialization_error(text) - -def _escape_attrib_html(text): - # escape attribute value - try: - if "&" in text: - text = text.replace("&", "&") - if ">" in text: - text = text.replace(">", ">") - if "\"" in text: - text = text.replace("\"", """) - return text - except (TypeError, AttributeError): - _raise_serialization_error(text) - -# -------------------------------------------------------------------- - -def tostring(element, encoding=None, method=None, *, - xml_declaration=None, default_namespace=None, - short_empty_elements=True): - """Generate string representation of XML element. - - All subelements are included. If encoding is "unicode", a string - is returned. Otherwise a bytestring is returned. - - *element* is an Element instance, *encoding* is an optional output - encoding defaulting to US-ASCII, *method* is an optional output which can - be one of "xml" (default), "html", "text" or "c14n", *default_namespace* - sets the default XML namespace (for "xmlns"). - - Returns an (optionally) encoded string containing the XML data. - - """ - stream = io.StringIO() if encoding == 'unicode' else io.BytesIO() - ElementTree(element).write(stream, encoding, - xml_declaration=xml_declaration, - default_namespace=default_namespace, - method=method, - short_empty_elements=short_empty_elements) - return stream.getvalue() - -class _ListDataStream(io.BufferedIOBase): - """An auxiliary stream accumulating into a list reference.""" - def __init__(self, lst): - self.lst = lst - - def writable(self): - return True - - def seekable(self): - return True - - def write(self, b): - self.lst.append(b) - - def tell(self): - return len(self.lst) - -def tostringlist(element, encoding=None, method=None, *, - xml_declaration=None, default_namespace=None, - short_empty_elements=True): - lst = [] - stream = _ListDataStream(lst) - ElementTree(element).write(stream, encoding, - xml_declaration=xml_declaration, - default_namespace=default_namespace, - method=method, - short_empty_elements=short_empty_elements) - return lst - - -def dump(elem): - """Write element tree or element structure to sys.stdout. - - This function should be used for debugging only. - - *elem* is either an ElementTree, or a single Element. The exact output - format is implementation dependent. In this version, it's written as an - ordinary XML file. - - """ - # debugging - if not isinstance(elem, ElementTree): - elem = ElementTree(elem) - elem.write(sys.stdout, encoding="unicode") - tail = elem.getroot().tail - if not tail or tail[-1] != "\n": - sys.stdout.write("\n") - - -def indent(tree, space=" ", level=0): - """Indent an XML document by inserting newlines and indentation space - after elements. - - *tree* is the ElementTree or Element to modify. The (root) element - itself will not be changed, but the tail text of all elements in its - subtree will be adapted. - - *space* is the whitespace to insert for each indentation level, two - space characters by default. - - *level* is the initial indentation level. Setting this to a higher - value than 0 can be used for indenting subtrees that are more deeply - nested inside of a document. - """ - if isinstance(tree, ElementTree): - tree = tree.getroot() - if level < 0: - raise ValueError(f"Initial indentation level must be >= 0, got {level}") - if not len(tree): - return - - # Reduce the memory consumption by reusing indentation strings. - indentations = ["\n" + level * space] - - def _indent_children(elem, level): - # Start a new indentation level for the first child. - child_level = level + 1 - try: - child_indentation = indentations[child_level] - except IndexError: - child_indentation = indentations[level] + space - indentations.append(child_indentation) - - if not elem.text or not elem.text.strip(): - elem.text = child_indentation - - for child in elem: - if len(child): - _indent_children(child, child_level) - if not child.tail or not child.tail.strip(): - child.tail = child_indentation - - # Dedent after the last child by overwriting the previous indentation. - if not child.tail.strip(): - child.tail = indentations[level] - - _indent_children(tree, 0) - - -# -------------------------------------------------------------------- -# parsing - - -def parse(source, parser=None): - """Parse XML document into element tree. - - *source* is a filename or file object containing XML data, - *parser* is an optional parser instance defaulting to XMLParser. - - Return an ElementTree instance. - - """ - tree = ElementTree() - tree.parse(source, parser) - return tree - - -def iterparse(source, events=None, parser=None): - """Incrementally parse XML document into ElementTree. - - This class also reports what's going on to the user based on the - *events* it is initialized with. The supported events are the strings - "start", "end", "start-ns" and "end-ns" (the "ns" events are used to get - detailed namespace information). If *events* is omitted, only - "end" events are reported. - - *source* is a filename or file object containing XML data, *events* is - a list of events to report back, *parser* is an optional parser instance. - - Returns an iterator providing (event, elem) pairs. - - """ - # Use the internal, undocumented _parser argument for now; When the - # parser argument of iterparse is removed, this can be killed. - pullparser = XMLPullParser(events=events, _parser=parser) - - if not hasattr(source, "read"): - source = open(source, "rb") - close_source = True - else: - close_source = False - - def iterator(source): - try: - while True: - yield from pullparser.read_events() - # load event buffer - data = source.read(16 * 1024) - if not data: - break - pullparser.feed(data) - root = pullparser._close_and_return_root() - yield from pullparser.read_events() - it = wr() - if it is not None: - it.root = root - finally: - if close_source: - source.close() - - gen = iterator(source) - class IterParseIterator(collections.abc.Iterator): - __next__ = gen.__next__ - def close(self): - if close_source: - source.close() - gen.close() - - def __del__(self): - # TODO: Emit a ResourceWarning if it was not explicitly closed. - # (When the close() method will be supported in all maintained Python versions.) - if close_source: - source.close() - - it = IterParseIterator() - it.root = None - wr = weakref.ref(it) - return it - - -class XMLPullParser: - - def __init__(self, events=None, *, _parser=None): - # The _parser argument is for internal use only and must not be relied - # upon in user code. It will be removed in a future release. - # See https://bugs.python.org/issue17741 for more details. - - self._events_queue = collections.deque() - self._parser = _parser or XMLParser(target=TreeBuilder()) - # wire up the parser for event reporting - if events is None: - events = ("end",) - self._parser._setevents(self._events_queue, events) - - def feed(self, data): - """Feed encoded data to parser.""" - if self._parser is None: - raise ValueError("feed() called after end of stream") - if data: - try: - self._parser.feed(data) - except SyntaxError as exc: - self._events_queue.append(exc) - - def _close_and_return_root(self): - # iterparse needs this to set its root attribute properly :( - root = self._parser.close() - self._parser = None - return root - - def close(self): - """Finish feeding data to parser. - - Unlike XMLParser, does not return the root element. Use - read_events() to consume elements from XMLPullParser. - """ - self._close_and_return_root() - - def read_events(self): - """Return an iterator over currently available (event, elem) pairs. - - Events are consumed from the internal event queue as they are - retrieved from the iterator. - """ - events = self._events_queue - while events: - event = events.popleft() - if isinstance(event, Exception): - raise event - else: - yield event - - def flush(self): - if self._parser is None: - raise ValueError("flush() called after end of stream") - self._parser.flush() - - -def XML(text, parser=None): - """Parse XML document from string constant. - - This function can be used to embed "XML Literals" in Python code. - - *text* is a string containing XML data, *parser* is an - optional parser instance, defaulting to the standard XMLParser. - - Returns an Element instance. - - """ - if not parser: - parser = XMLParser(target=TreeBuilder()) - parser.feed(text) - return parser.close() - - -def XMLID(text, parser=None): - """Parse XML document from string constant for its IDs. - - *text* is a string containing XML data, *parser* is an - optional parser instance, defaulting to the standard XMLParser. - - Returns an (Element, dict) tuple, in which the - dict maps element id:s to elements. - - """ - if not parser: - parser = XMLParser(target=TreeBuilder()) - parser.feed(text) - tree = parser.close() - ids = {} - for elem in tree.iter(): - id = elem.get("id") - if id: - ids[id] = elem - return tree, ids - -# Parse XML document from string constant. Alias for XML(). -fromstring = XML - -def fromstringlist(sequence, parser=None): - """Parse XML document from sequence of string fragments. - - *sequence* is a list of other sequence, *parser* is an optional parser - instance, defaulting to the standard XMLParser. - - Returns an Element instance. - - """ - if not parser: - parser = XMLParser(target=TreeBuilder()) - for text in sequence: - parser.feed(text) - return parser.close() - -# -------------------------------------------------------------------- - - -class TreeBuilder: - """Generic element structure builder. - - This builder converts a sequence of start, data, and end method - calls to a well-formed element structure. - - You can use this class to build an element structure using a custom XML - parser, or a parser for some other XML-like format. - - *element_factory* is an optional element factory which is called - to create new Element instances, as necessary. - - *comment_factory* is a factory to create comments to be used instead of - the standard factory. If *insert_comments* is false (the default), - comments will not be inserted into the tree. - - *pi_factory* is a factory to create processing instructions to be used - instead of the standard factory. If *insert_pis* is false (the default), - processing instructions will not be inserted into the tree. - """ - def __init__(self, element_factory=None, *, - comment_factory=None, pi_factory=None, - insert_comments=False, insert_pis=False): - self._data = [] # data collector - self._elem = [] # element stack - self._last = None # last element - self._root = None # root element - self._tail = None # true if we're after an end tag - if comment_factory is None: - comment_factory = Comment - self._comment_factory = comment_factory - self.insert_comments = insert_comments - if pi_factory is None: - pi_factory = ProcessingInstruction - self._pi_factory = pi_factory - self.insert_pis = insert_pis - if element_factory is None: - element_factory = Element - self._factory = element_factory - - def close(self): - """Flush builder buffers and return toplevel document Element.""" - assert len(self._elem) == 0, "missing end tags" - assert self._root is not None, "missing toplevel element" - return self._root - - def _flush(self): - if self._data: - if self._last is not None: - text = "".join(self._data) - if self._tail: - assert self._last.tail is None, "internal error (tail)" - self._last.tail = text - else: - assert self._last.text is None, "internal error (text)" - self._last.text = text - self._data = [] - - def data(self, data): - """Add text to current element.""" - self._data.append(data) - - def start(self, tag, attrs): - """Open new element and return it. - - *tag* is the element name, *attrs* is a dict containing element - attributes. - - """ - self._flush() - self._last = elem = self._factory(tag, attrs) - if self._elem: - self._elem[-1].append(elem) - elif self._root is None: - self._root = elem - self._elem.append(elem) - self._tail = 0 - return elem - - def end(self, tag): - """Close and return current Element. - - *tag* is the element name. - - """ - self._flush() - self._last = self._elem.pop() - assert self._last.tag == tag,\ - "end tag mismatch (expected %s, got %s)" % ( - self._last.tag, tag) - self._tail = 1 - return self._last - - def comment(self, text): - """Create a comment using the comment_factory. - - *text* is the text of the comment. - """ - return self._handle_single( - self._comment_factory, self.insert_comments, text) - - def pi(self, target, text=None): - """Create a processing instruction using the pi_factory. - - *target* is the target name of the processing instruction. - *text* is the data of the processing instruction, or ''. - """ - return self._handle_single( - self._pi_factory, self.insert_pis, target, text) - - def _handle_single(self, factory, insert, *args): - elem = factory(*args) - if insert: - self._flush() - self._last = elem - if self._elem: - self._elem[-1].append(elem) - self._tail = 1 - return elem - - -# also see ElementTree and TreeBuilder -class XMLParser: - """Element structure builder for XML source data based on the expat parser. - - *target* is an optional target object which defaults to an instance of the - standard TreeBuilder class, *encoding* is an optional encoding string - which if given, overrides the encoding specified in the XML file: - http://www.iana.org/assignments/character-sets - - """ - - def __init__(self, *, target=None, encoding=None): - try: - from xml.parsers import expat - except ImportError: - try: - import pyexpat as expat - except ImportError: - raise ImportError( - "No module named expat; use SimpleXMLTreeBuilder instead" - ) - parser = expat.ParserCreate(encoding, "}") - if target is None: - target = TreeBuilder() - # underscored names are provided for compatibility only - self.parser = self._parser = parser - self.target = self._target = target - self._error = expat.error - self._names = {} # name memo cache - # main callbacks - parser.DefaultHandlerExpand = self._default - if hasattr(target, 'start'): - parser.StartElementHandler = self._start - if hasattr(target, 'end'): - parser.EndElementHandler = self._end - if hasattr(target, 'start_ns'): - parser.StartNamespaceDeclHandler = self._start_ns - if hasattr(target, 'end_ns'): - parser.EndNamespaceDeclHandler = self._end_ns - if hasattr(target, 'data'): - parser.CharacterDataHandler = target.data - # miscellaneous callbacks - if hasattr(target, 'comment'): - parser.CommentHandler = target.comment - if hasattr(target, 'pi'): - parser.ProcessingInstructionHandler = target.pi - # Configure pyexpat: buffering, new-style attribute handling. - parser.buffer_text = 1 - parser.ordered_attributes = 1 - self._doctype = None - self.entity = {} - try: - self.version = "Expat %d.%d.%d" % expat.version_info - except AttributeError: - pass # unknown - - def _setevents(self, events_queue, events_to_report): - # Internal API for XMLPullParser - # events_to_report: a list of events to report during parsing (same as - # the *events* of XMLPullParser's constructor. - # events_queue: a list of actual parsing events that will be populated - # by the underlying parser. - # - parser = self._parser - append = events_queue.append - for event_name in events_to_report: - if event_name == "start": - parser.ordered_attributes = 1 - def handler(tag, attrib_in, event=event_name, append=append, - start=self._start): - append((event, start(tag, attrib_in))) - parser.StartElementHandler = handler - elif event_name == "end": - def handler(tag, event=event_name, append=append, - end=self._end): - append((event, end(tag))) - parser.EndElementHandler = handler - elif event_name == "start-ns": - # TreeBuilder does not implement .start_ns() - if hasattr(self.target, "start_ns"): - def handler(prefix, uri, event=event_name, append=append, - start_ns=self._start_ns): - append((event, start_ns(prefix, uri))) - else: - def handler(prefix, uri, event=event_name, append=append): - append((event, (prefix or '', uri or ''))) - parser.StartNamespaceDeclHandler = handler - elif event_name == "end-ns": - # TreeBuilder does not implement .end_ns() - if hasattr(self.target, "end_ns"): - def handler(prefix, event=event_name, append=append, - end_ns=self._end_ns): - append((event, end_ns(prefix))) - else: - def handler(prefix, event=event_name, append=append): - append((event, None)) - parser.EndNamespaceDeclHandler = handler - elif event_name == 'comment': - def handler(text, event=event_name, append=append, self=self): - append((event, self.target.comment(text))) - parser.CommentHandler = handler - elif event_name == 'pi': - def handler(pi_target, data, event=event_name, append=append, - self=self): - append((event, self.target.pi(pi_target, data))) - parser.ProcessingInstructionHandler = handler - else: - raise ValueError("unknown event %r" % event_name) - - def _raiseerror(self, value): - err = ParseError(value) - err.code = value.code - err.position = value.lineno, value.offset - raise err - - def _fixname(self, key): - # expand qname, and convert name string to ascii, if possible - try: - name = self._names[key] - except KeyError: - name = key - if "}" in name: - name = "{" + name - self._names[key] = name - return name - - def _start_ns(self, prefix, uri): - return self.target.start_ns(prefix or '', uri or '') - - def _end_ns(self, prefix): - return self.target.end_ns(prefix or '') - - def _start(self, tag, attr_list): - # Handler for expat's StartElementHandler. Since ordered_attributes - # is set, the attributes are reported as a list of alternating - # attribute name,value. - fixname = self._fixname - tag = fixname(tag) - attrib = {} - if attr_list: - for i in range(0, len(attr_list), 2): - attrib[fixname(attr_list[i])] = attr_list[i+1] - return self.target.start(tag, attrib) - - def _end(self, tag): - return self.target.end(self._fixname(tag)) - - def _default(self, text): - prefix = text[:1] - if prefix == "&": - # deal with undefined entities - try: - data_handler = self.target.data - except AttributeError: - return - try: - data_handler(self.entity[text[1:-1]]) - except KeyError: - from xml.parsers import expat - err = expat.error( - "undefined entity %s: line %d, column %d" % - (text, self.parser.ErrorLineNumber, - self.parser.ErrorColumnNumber) - ) - err.code = 11 # XML_ERROR_UNDEFINED_ENTITY - err.lineno = self.parser.ErrorLineNumber - err.offset = self.parser.ErrorColumnNumber - raise err - elif prefix == "<" and text[:9] == "": - self._doctype = None - return - text = text.strip() - if not text: - return - self._doctype.append(text) - n = len(self._doctype) - if n > 2: - type = self._doctype[1] - if type == "PUBLIC" and n == 4: - name, type, pubid, system = self._doctype - if pubid: - pubid = pubid[1:-1] - elif type == "SYSTEM" and n == 3: - name, type, system = self._doctype - pubid = None - else: - return - if hasattr(self.target, "doctype"): - self.target.doctype(name, pubid, system[1:-1]) - elif hasattr(self, "doctype"): - warnings.warn( - "The doctype() method of XMLParser is ignored. " - "Define doctype() method on the TreeBuilder target.", - RuntimeWarning) - - self._doctype = None - - def feed(self, data): - """Feed encoded data to parser.""" - try: - self.parser.Parse(data, False) - except self._error as v: - self._raiseerror(v) - - def close(self): - """Finish feeding data to parser and return element structure.""" - try: - self.parser.Parse(b"", True) # end of data - except self._error as v: - self._raiseerror(v) - try: - close_handler = self.target.close - except AttributeError: - pass - else: - return close_handler() - finally: - # get rid of circular references - del self.parser, self._parser - del self.target, self._target - - def flush(self): - was_enabled = self.parser.GetReparseDeferralEnabled() - try: - self.parser.SetReparseDeferralEnabled(False) - self.parser.Parse(b"", False) - except self._error as v: - self._raiseerror(v) - finally: - self.parser.SetReparseDeferralEnabled(was_enabled) - -# -------------------------------------------------------------------- -# C14N 2.0 - -def canonicalize(xml_data=None, *, out=None, from_file=None, **options): - """Convert XML to its C14N 2.0 serialised form. - - If *out* is provided, it must be a file or file-like object that receives - the serialised canonical XML output (text, not bytes) through its ``.write()`` - method. To write to a file, open it in text mode with encoding "utf-8". - If *out* is not provided, this function returns the output as text string. - - Either *xml_data* (an XML string) or *from_file* (a file path or - file-like object) must be provided as input. - - The configuration options are the same as for the ``C14NWriterTarget``. - """ - if xml_data is None and from_file is None: - raise ValueError("Either 'xml_data' or 'from_file' must be provided as input") - sio = None - if out is None: - sio = out = io.StringIO() - - parser = XMLParser(target=C14NWriterTarget(out.write, **options)) - - if xml_data is not None: - parser.feed(xml_data) - parser.close() - elif from_file is not None: - parse(from_file, parser=parser) - - return sio.getvalue() if sio is not None else None - - -_looks_like_prefix_name = re.compile(r'^\w+:\w+$', re.UNICODE).match - - -class C14NWriterTarget: - """ - Canonicalization writer target for the XMLParser. - - Serialises parse events to XML C14N 2.0. - - The *write* function is used for writing out the resulting data stream - as text (not bytes). To write to a file, open it in text mode with encoding - "utf-8" and pass its ``.write`` method. - - Configuration options: - - - *with_comments*: set to true to include comments - - *strip_text*: set to true to strip whitespace before and after text content - - *rewrite_prefixes*: set to true to replace namespace prefixes by "n{number}" - - *qname_aware_tags*: a set of qname aware tag names in which prefixes - should be replaced in text content - - *qname_aware_attrs*: a set of qname aware attribute names in which prefixes - should be replaced in text content - - *exclude_attrs*: a set of attribute names that should not be serialised - - *exclude_tags*: a set of tag names that should not be serialised - """ - def __init__(self, write, *, - with_comments=False, strip_text=False, rewrite_prefixes=False, - qname_aware_tags=None, qname_aware_attrs=None, - exclude_attrs=None, exclude_tags=None): - self._write = write - self._data = [] - self._with_comments = with_comments - self._strip_text = strip_text - self._exclude_attrs = set(exclude_attrs) if exclude_attrs else None - self._exclude_tags = set(exclude_tags) if exclude_tags else None - - self._rewrite_prefixes = rewrite_prefixes - if qname_aware_tags: - self._qname_aware_tags = set(qname_aware_tags) - else: - self._qname_aware_tags = None - if qname_aware_attrs: - self._find_qname_aware_attrs = set(qname_aware_attrs).intersection - else: - self._find_qname_aware_attrs = None - - # Stack with globally and newly declared namespaces as (uri, prefix) pairs. - self._declared_ns_stack = [[ - ("http://www.w3.org/XML/1998/namespace", "xml"), - ]] - # Stack with user declared namespace prefixes as (uri, prefix) pairs. - self._ns_stack = [] - if not rewrite_prefixes: - self._ns_stack.append(list(_namespace_map.items())) - self._ns_stack.append([]) - self._prefix_map = {} - self._preserve_space = [False] - self._pending_start = None - self._root_seen = False - self._root_done = False - self._ignored_depth = 0 - - def _iter_namespaces(self, ns_stack, _reversed=reversed): - for namespaces in _reversed(ns_stack): - if namespaces: # almost no element declares new namespaces - yield from namespaces - - def _resolve_prefix_name(self, prefixed_name): - prefix, name = prefixed_name.split(':', 1) - for uri, p in self._iter_namespaces(self._ns_stack): - if p == prefix: - return f'{{{uri}}}{name}' - raise ValueError(f'Prefix {prefix} of QName "{prefixed_name}" is not declared in scope') - - def _qname(self, qname, uri=None): - if uri is None: - uri, tag = qname[1:].rsplit('}', 1) if qname[:1] == '{' else ('', qname) - else: - tag = qname - - prefixes_seen = set() - for u, prefix in self._iter_namespaces(self._declared_ns_stack): - if u == uri and prefix not in prefixes_seen: - return f'{prefix}:{tag}' if prefix else tag, tag, uri - prefixes_seen.add(prefix) - - # Not declared yet => add new declaration. - if self._rewrite_prefixes: - if uri in self._prefix_map: - prefix = self._prefix_map[uri] - else: - prefix = self._prefix_map[uri] = f'n{len(self._prefix_map)}' - self._declared_ns_stack[-1].append((uri, prefix)) - return f'{prefix}:{tag}', tag, uri - - if not uri and '' not in prefixes_seen: - # No default namespace declared => no prefix needed. - return tag, tag, uri - - for u, prefix in self._iter_namespaces(self._ns_stack): - if u == uri: - self._declared_ns_stack[-1].append((uri, prefix)) - return f'{prefix}:{tag}' if prefix else tag, tag, uri - - if not uri: - # As soon as a default namespace is defined, - # anything that has no namespace (and thus, no prefix) goes there. - return tag, tag, uri - - raise ValueError(f'Namespace "{uri}" is not declared in scope') - - def data(self, data): - if not self._ignored_depth: - self._data.append(data) - - def _flush(self, _join_text=''.join): - data = _join_text(self._data) - del self._data[:] - if self._strip_text and not self._preserve_space[-1]: - data = data.strip() - if self._pending_start is not None: - args, self._pending_start = self._pending_start, None - qname_text = data if data and _looks_like_prefix_name(data) else None - self._start(*args, qname_text) - if qname_text is not None: - return - if data and self._root_seen: - self._write(_escape_cdata_c14n(data)) - - def start_ns(self, prefix, uri): - if self._ignored_depth: - return - # we may have to resolve qnames in text content - if self._data: - self._flush() - self._ns_stack[-1].append((uri, prefix)) - - def start(self, tag, attrs): - if self._exclude_tags is not None and ( - self._ignored_depth or tag in self._exclude_tags): - self._ignored_depth += 1 - return - if self._data: - self._flush() - - new_namespaces = [] - self._declared_ns_stack.append(new_namespaces) - - if self._qname_aware_tags is not None and tag in self._qname_aware_tags: - # Need to parse text first to see if it requires a prefix declaration. - self._pending_start = (tag, attrs, new_namespaces) - return - self._start(tag, attrs, new_namespaces) - - def _start(self, tag, attrs, new_namespaces, qname_text=None): - if self._exclude_attrs is not None and attrs: - attrs = {k: v for k, v in attrs.items() if k not in self._exclude_attrs} - - qnames = {tag, *attrs} - resolved_names = {} - - # Resolve prefixes in attribute and tag text. - if qname_text is not None: - qname = resolved_names[qname_text] = self._resolve_prefix_name(qname_text) - qnames.add(qname) - if self._find_qname_aware_attrs is not None and attrs: - qattrs = self._find_qname_aware_attrs(attrs) - if qattrs: - for attr_name in qattrs: - value = attrs[attr_name] - if _looks_like_prefix_name(value): - qname = resolved_names[value] = self._resolve_prefix_name(value) - qnames.add(qname) - else: - qattrs = None - else: - qattrs = None - - # Assign prefixes in lexicographical order of used URIs. - parse_qname = self._qname - parsed_qnames = {n: parse_qname(n) for n in sorted( - qnames, key=lambda n: n.split('}', 1))} - - # Write namespace declarations in prefix order ... - if new_namespaces: - attr_list = [ - ('xmlns:' + prefix if prefix else 'xmlns', uri) - for uri, prefix in new_namespaces - ] - attr_list.sort() - else: - # almost always empty - attr_list = [] - - # ... followed by attributes in URI+name order - if attrs: - for k, v in sorted(attrs.items()): - if qattrs is not None and k in qattrs and v in resolved_names: - v = parsed_qnames[resolved_names[v]][0] - attr_qname, attr_name, uri = parsed_qnames[k] - # No prefix for attributes in default ('') namespace. - attr_list.append((attr_qname if uri else attr_name, v)) - - # Honour xml:space attributes. - space_behaviour = attrs.get('{http://www.w3.org/XML/1998/namespace}space') - self._preserve_space.append( - space_behaviour == 'preserve' if space_behaviour - else self._preserve_space[-1]) - - # Write the tag. - write = self._write - write('<' + parsed_qnames[tag][0]) - if attr_list: - write(''.join([f' {k}="{_escape_attrib_c14n(v)}"' for k, v in attr_list])) - write('>') - - # Write the resolved qname text content. - if qname_text is not None: - write(_escape_cdata_c14n(parsed_qnames[resolved_names[qname_text]][0])) - - self._root_seen = True - self._ns_stack.append([]) - - def end(self, tag): - if self._ignored_depth: - self._ignored_depth -= 1 - return - if self._data: - self._flush() - self._write(f'') - self._preserve_space.pop() - self._root_done = len(self._preserve_space) == 1 - self._declared_ns_stack.pop() - self._ns_stack.pop() - - def comment(self, text): - if not self._with_comments: - return - if self._ignored_depth: - return - if self._root_done: - self._write('\n') - elif self._root_seen and self._data: - self._flush() - self._write(f'') - if not self._root_seen: - self._write('\n') - - def pi(self, target, data): - if self._ignored_depth: - return - if self._root_done: - self._write('\n') - elif self._root_seen and self._data: - self._flush() - self._write( - f'' if data else f'') - if not self._root_seen: - self._write('\n') - - -def _escape_cdata_c14n(text): - # escape character data - try: - # it's worth avoiding do-nothing calls for strings that are - # shorter than 500 character, or so. assume that's, by far, - # the most common case in most applications. - if '&' in text: - text = text.replace('&', '&') - if '<' in text: - text = text.replace('<', '<') - if '>' in text: - text = text.replace('>', '>') - if '\r' in text: - text = text.replace('\r', ' ') - return text - except (TypeError, AttributeError): - _raise_serialization_error(text) - - -def _escape_attrib_c14n(text): - # escape attribute value - try: - if '&' in text: - text = text.replace('&', '&') - if '<' in text: - text = text.replace('<', '<') - if '"' in text: - text = text.replace('"', '"') - if '\t' in text: - text = text.replace('\t', ' ') - if '\n' in text: - text = text.replace('\n', ' ') - if '\r' in text: - text = text.replace('\r', ' ') - return text - except (TypeError, AttributeError): - _raise_serialization_error(text) - - -# -------------------------------------------------------------------- - -# Import the C accelerators -try: - # Element is going to be shadowed by the C implementation. We need to keep - # the Python version of it accessible for some "creative" by external code - # (see tests) - _Element_Py = Element - - # Element, SubElement, ParseError, TreeBuilder, XMLParser, _set_factories - from _elementtree import * - from _elementtree import _set_factories -except ImportError: - pass -else: - _set_factories(Comment, ProcessingInstruction) diff --git a/Python313_13_x64_Template/Lib/xml/parsers/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/xml/parsers/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index bc21ea8f..00000000 Binary files a/Python313_13_x64_Template/Lib/xml/parsers/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/xml/parsers/__pycache__/expat.cpython-313.pyc b/Python313_13_x64_Template/Lib/xml/parsers/__pycache__/expat.cpython-313.pyc deleted file mode 100644 index 449bbf5c..00000000 Binary files a/Python313_13_x64_Template/Lib/xml/parsers/__pycache__/expat.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/xml/sax/__init__.py b/Python313_13_x64_Template/Lib/xml/sax/__init__.py deleted file mode 100644 index b6573102..00000000 --- a/Python313_13_x64_Template/Lib/xml/sax/__init__.py +++ /dev/null @@ -1,94 +0,0 @@ -"""Simple API for XML (SAX) implementation for Python. - -This module provides an implementation of the SAX 2 interface; -information about the Java version of the interface can be found at -http://www.megginson.com/SAX/. The Python version of the interface is -documented at <...>. - -This package contains the following modules: - -handler -- Base classes and constants which define the SAX 2 API for - the 'client-side' of SAX for Python. - -saxutils -- Implementation of the convenience classes commonly used to - work with SAX. - -xmlreader -- Base classes and constants which define the SAX 2 API for - the parsers used with SAX for Python. - -expatreader -- Driver that allows use of the Expat parser with SAX. -""" - -from .xmlreader import InputSource -from .handler import ContentHandler, ErrorHandler -from ._exceptions import SAXException, SAXNotRecognizedException, \ - SAXParseException, SAXNotSupportedException, \ - SAXReaderNotAvailable - - -def parse(source, handler, errorHandler=ErrorHandler()): - parser = make_parser() - parser.setContentHandler(handler) - parser.setErrorHandler(errorHandler) - parser.parse(source) - -def parseString(string, handler, errorHandler=ErrorHandler()): - import io - if errorHandler is None: - errorHandler = ErrorHandler() - parser = make_parser() - parser.setContentHandler(handler) - parser.setErrorHandler(errorHandler) - - inpsrc = InputSource() - if isinstance(string, str): - inpsrc.setCharacterStream(io.StringIO(string)) - else: - inpsrc.setByteStream(io.BytesIO(string)) - parser.parse(inpsrc) - -# this is the parser list used by the make_parser function if no -# alternatives are given as parameters to the function - -default_parser_list = ["xml.sax.expatreader"] - -# tell modulefinder that importing sax potentially imports expatreader -_false = 0 -if _false: - import xml.sax.expatreader - -import os, sys -if not sys.flags.ignore_environment and "PY_SAX_PARSER" in os.environ: - default_parser_list = os.environ["PY_SAX_PARSER"].split(",") -del os, sys - - -def make_parser(parser_list=()): - """Creates and returns a SAX parser. - - Creates the first parser it is able to instantiate of the ones - given in the iterable created by chaining parser_list and - default_parser_list. The iterables must contain the names of Python - modules containing both a SAX parser and a create_parser function.""" - - for parser_name in list(parser_list) + default_parser_list: - try: - return _create_parser(parser_name) - except ImportError: - import sys - if parser_name in sys.modules: - # The parser module was found, but importing it - # failed unexpectedly, pass this exception through - raise - except SAXReaderNotAvailable: - # The parser module detected that it won't work properly, - # so try the next one - pass - - raise SAXReaderNotAvailable("No parsers found", None) - -# --- Internal utility methods used by make_parser - -def _create_parser(parser_name): - drv_module = __import__(parser_name,{},{},['create_parser']) - return drv_module.create_parser() diff --git a/Python313_13_x64_Template/Lib/xml/sax/handler.py b/Python313_13_x64_Template/Lib/xml/sax/handler.py deleted file mode 100644 index e8d417e5..00000000 --- a/Python313_13_x64_Template/Lib/xml/sax/handler.py +++ /dev/null @@ -1,387 +0,0 @@ -""" -This module contains the core classes of version 2.0 of SAX for Python. -This file provides only default classes with absolutely minimum -functionality, from which drivers and applications can be subclassed. - -Many of these classes are empty and are included only as documentation -of the interfaces. - -$Id$ -""" - -version = '2.0beta' - -#============================================================================ -# -# HANDLER INTERFACES -# -#============================================================================ - -# ===== ERRORHANDLER ===== - -class ErrorHandler: - """Basic interface for SAX error handlers. - - If you create an object that implements this interface, then - register the object with your XMLReader, the parser will call the - methods in your object to report all warnings and errors. There - are three levels of errors available: warnings, (possibly) - recoverable errors, and unrecoverable errors. All methods take a - SAXParseException as the only parameter.""" - - def error(self, exception): - "Handle a recoverable error." - raise exception - - def fatalError(self, exception): - "Handle a non-recoverable error." - raise exception - - def warning(self, exception): - "Handle a warning." - print(exception) - - -# ===== CONTENTHANDLER ===== - -class ContentHandler: - """Interface for receiving logical document content events. - - This is the main callback interface in SAX, and the one most - important to applications. The order of events in this interface - mirrors the order of the information in the document.""" - - def __init__(self): - self._locator = None - - def setDocumentLocator(self, locator): - """Called by the parser to give the application a locator for - locating the origin of document events. - - SAX parsers are strongly encouraged (though not absolutely - required) to supply a locator: if it does so, it must supply - the locator to the application by invoking this method before - invoking any of the other methods in the DocumentHandler - interface. - - The locator allows the application to determine the end - position of any document-related event, even if the parser is - not reporting an error. Typically, the application will use - this information for reporting its own errors (such as - character content that does not match an application's - business rules). The information returned by the locator is - probably not sufficient for use with a search engine. - - Note that the locator will return correct information only - during the invocation of the events in this interface. The - application should not attempt to use it at any other time.""" - self._locator = locator - - def startDocument(self): - """Receive notification of the beginning of a document. - - The SAX parser will invoke this method only once, before any - other methods in this interface or in DTDHandler (except for - setDocumentLocator).""" - - def endDocument(self): - """Receive notification of the end of a document. - - The SAX parser will invoke this method only once, and it will - be the last method invoked during the parse. The parser shall - not invoke this method until it has either abandoned parsing - (because of an unrecoverable error) or reached the end of - input.""" - - def startPrefixMapping(self, prefix, uri): - """Begin the scope of a prefix-URI Namespace mapping. - - The information from this event is not necessary for normal - Namespace processing: the SAX XML reader will automatically - replace prefixes for element and attribute names when the - http://xml.org/sax/features/namespaces feature is true (the - default). - - There are cases, however, when applications need to use - prefixes in character data or in attribute values, where they - cannot safely be expanded automatically; the - start/endPrefixMapping event supplies the information to the - application to expand prefixes in those contexts itself, if - necessary. - - Note that start/endPrefixMapping events are not guaranteed to - be properly nested relative to each-other: all - startPrefixMapping events will occur before the corresponding - startElement event, and all endPrefixMapping events will occur - after the corresponding endElement event, but their order is - not guaranteed.""" - - def endPrefixMapping(self, prefix): - """End the scope of a prefix-URI mapping. - - See startPrefixMapping for details. This event will always - occur after the corresponding endElement event, but the order - of endPrefixMapping events is not otherwise guaranteed.""" - - def startElement(self, name, attrs): - """Signals the start of an element in non-namespace mode. - - The name parameter contains the raw XML 1.0 name of the - element type as a string and the attrs parameter holds an - instance of the Attributes class containing the attributes of - the element.""" - - def endElement(self, name): - """Signals the end of an element in non-namespace mode. - - The name parameter contains the name of the element type, just - as with the startElement event.""" - - def startElementNS(self, name, qname, attrs): - """Signals the start of an element in namespace mode. - - The name parameter contains the name of the element type as a - (uri, localname) tuple, the qname parameter the raw XML 1.0 - name used in the source document, and the attrs parameter - holds an instance of the Attributes class containing the - attributes of the element. - - The uri part of the name tuple is None for elements which have - no namespace.""" - - def endElementNS(self, name, qname): - """Signals the end of an element in namespace mode. - - The name parameter contains the name of the element type, just - as with the startElementNS event.""" - - def characters(self, content): - """Receive notification of character data. - - The Parser will call this method to report each chunk of - character data. SAX parsers may return all contiguous - character data in a single chunk, or they may split it into - several chunks; however, all of the characters in any single - event must come from the same external entity so that the - Locator provides useful information.""" - - def ignorableWhitespace(self, whitespace): - """Receive notification of ignorable whitespace in element content. - - Validating Parsers must use this method to report each chunk - of ignorable whitespace (see the W3C XML 1.0 recommendation, - section 2.10): non-validating parsers may also use this method - if they are capable of parsing and using content models. - - SAX parsers may return all contiguous whitespace in a single - chunk, or they may split it into several chunks; however, all - of the characters in any single event must come from the same - external entity, so that the Locator provides useful - information.""" - - def processingInstruction(self, target, data): - """Receive notification of a processing instruction. - - The Parser will invoke this method once for each processing - instruction found: note that processing instructions may occur - before or after the main document element. - - A SAX parser should never report an XML declaration (XML 1.0, - section 2.8) or a text declaration (XML 1.0, section 4.3.1) - using this method.""" - - def skippedEntity(self, name): - """Receive notification of a skipped entity. - - The Parser will invoke this method once for each entity - skipped. Non-validating processors may skip entities if they - have not seen the declarations (because, for example, the - entity was declared in an external DTD subset). All processors - may skip external entities, depending on the values of the - http://xml.org/sax/features/external-general-entities and the - http://xml.org/sax/features/external-parameter-entities - properties.""" - - -# ===== DTDHandler ===== - -class DTDHandler: - """Handle DTD events. - - This interface specifies only those DTD events required for basic - parsing (unparsed entities and attributes).""" - - def notationDecl(self, name, publicId, systemId): - "Handle a notation declaration event." - - def unparsedEntityDecl(self, name, publicId, systemId, ndata): - "Handle an unparsed entity declaration event." - - -# ===== ENTITYRESOLVER ===== - -class EntityResolver: - """Basic interface for resolving entities. If you create an object - implementing this interface, then register the object with your - Parser, the parser will call the method in your object to - resolve all external entities. Note that DefaultHandler implements - this interface with the default behaviour.""" - - def resolveEntity(self, publicId, systemId): - """Resolve the system identifier of an entity and return either - the system identifier to read from as a string, or an InputSource - to read from.""" - return systemId - - -#============================================================================ -# -# CORE FEATURES -# -#============================================================================ - -feature_namespaces = "http://xml.org/sax/features/namespaces" -# true: Perform Namespace processing (default). -# false: Optionally do not perform Namespace processing -# (implies namespace-prefixes). -# access: (parsing) read-only; (not parsing) read/write - -feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes" -# true: Report the original prefixed names and attributes used for Namespace -# declarations. -# false: Do not report attributes used for Namespace declarations, and -# optionally do not report original prefixed names (default). -# access: (parsing) read-only; (not parsing) read/write - -feature_string_interning = "http://xml.org/sax/features/string-interning" -# true: All element names, prefixes, attribute names, Namespace URIs, and -# local names are interned using the built-in intern function. -# false: Names are not necessarily interned, although they may be (default). -# access: (parsing) read-only; (not parsing) read/write - -feature_validation = "http://xml.org/sax/features/validation" -# true: Report all validation errors (implies external-general-entities and -# external-parameter-entities). -# false: Do not report validation errors. -# access: (parsing) read-only; (not parsing) read/write - -feature_external_ges = "http://xml.org/sax/features/external-general-entities" -# true: Include all external general (text) entities. -# false: Do not include external general entities. -# access: (parsing) read-only; (not parsing) read/write - -feature_external_pes = "http://xml.org/sax/features/external-parameter-entities" -# true: Include all external parameter entities, including the external -# DTD subset. -# false: Do not include any external parameter entities, even the external -# DTD subset. -# access: (parsing) read-only; (not parsing) read/write - -all_features = [feature_namespaces, - feature_namespace_prefixes, - feature_string_interning, - feature_validation, - feature_external_ges, - feature_external_pes] - - -#============================================================================ -# -# CORE PROPERTIES -# -#============================================================================ - -property_lexical_handler = "http://xml.org/sax/properties/lexical-handler" -# data type: xml.sax.sax2lib.LexicalHandler -# description: An optional extension handler for lexical events like comments. -# access: read/write - -property_declaration_handler = "http://xml.org/sax/properties/declaration-handler" -# data type: xml.sax.sax2lib.DeclHandler -# description: An optional extension handler for DTD-related events other -# than notations and unparsed entities. -# access: read/write - -property_dom_node = "http://xml.org/sax/properties/dom-node" -# data type: org.w3c.dom.Node -# description: When parsing, the current DOM node being visited if this is -# a DOM iterator; when not parsing, the root DOM node for -# iteration. -# access: (parsing) read-only; (not parsing) read/write - -property_xml_string = "http://xml.org/sax/properties/xml-string" -# data type: String -# description: The literal string of characters that was the source for -# the current event. -# access: read-only - -property_encoding = "http://www.python.org/sax/properties/encoding" -# data type: String -# description: The name of the encoding to assume for input data. -# access: write: set the encoding, e.g. established by a higher-level -# protocol. May change during parsing (e.g. after -# processing a META tag) -# read: return the current encoding (possibly established through -# auto-detection. -# initial value: UTF-8 -# - -property_interning_dict = "http://www.python.org/sax/properties/interning-dict" -# data type: Dictionary -# description: The dictionary used to intern common strings in the document -# access: write: Request that the parser uses a specific dictionary, to -# allow interning across different documents -# read: return the current interning dictionary, or None -# - -all_properties = [property_lexical_handler, - property_dom_node, - property_declaration_handler, - property_xml_string, - property_encoding, - property_interning_dict] - - -class LexicalHandler: - """Optional SAX2 handler for lexical events. - - This handler is used to obtain lexical information about an XML - document, that is, information about how the document was encoded - (as opposed to what it contains, which is reported to the - ContentHandler), such as comments and CDATA marked section - boundaries. - - To set the LexicalHandler of an XMLReader, use the setProperty - method with the property identifier - 'http://xml.org/sax/properties/lexical-handler'.""" - - def comment(self, content): - """Reports a comment anywhere in the document (including the - DTD and outside the document element). - - content is a string that holds the contents of the comment.""" - - def startDTD(self, name, public_id, system_id): - """Report the start of the DTD declarations, if the document - has an associated DTD. - - A startEntity event will be reported before declaration events - from the external DTD subset are reported, and this can be - used to infer from which subset DTD declarations derive. - - name is the name of the document element type, public_id the - public identifier of the DTD (or None if none were supplied) - and system_id the system identfier of the external subset (or - None if none were supplied).""" - - def endDTD(self): - """Signals the end of DTD declarations.""" - - def startCDATA(self): - """Reports the beginning of a CDATA marked section. - - The contents of the CDATA marked section will be reported - through the characters event.""" - - def endCDATA(self): - """Reports the end of a CDATA marked section.""" diff --git a/Python313_13_x64_Template/Lib/xmlrpc/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/xmlrpc/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 730a86da..00000000 Binary files a/Python313_13_x64_Template/Lib/xmlrpc/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/xmlrpc/__pycache__/client.cpython-313.pyc b/Python313_13_x64_Template/Lib/xmlrpc/__pycache__/client.cpython-313.pyc deleted file mode 100644 index a907d4d3..00000000 Binary files a/Python313_13_x64_Template/Lib/xmlrpc/__pycache__/client.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/xmlrpc/server.py b/Python313_13_x64_Template/Lib/xmlrpc/server.py deleted file mode 100644 index 4dddb1d1..00000000 --- a/Python313_13_x64_Template/Lib/xmlrpc/server.py +++ /dev/null @@ -1,1002 +0,0 @@ -r"""XML-RPC Servers. - -This module can be used to create simple XML-RPC servers -by creating a server and either installing functions, a -class instance, or by extending the SimpleXMLRPCServer -class. - -It can also be used to handle XML-RPC requests in a CGI -environment using CGIXMLRPCRequestHandler. - -The Doc* classes can be used to create XML-RPC servers that -serve pydoc-style documentation in response to HTTP -GET requests. This documentation is dynamically generated -based on the functions and methods registered with the -server. - -A list of possible usage patterns follows: - -1. Install functions: - -server = SimpleXMLRPCServer(("localhost", 8000)) -server.register_function(pow) -server.register_function(lambda x,y: x+y, 'add') -server.serve_forever() - -2. Install an instance: - -class MyFuncs: - def __init__(self): - # make all of the sys functions available through sys.func_name - import sys - self.sys = sys - def _listMethods(self): - # implement this method so that system.listMethods - # knows to advertise the sys methods - return list_public_methods(self) + \ - ['sys.' + method for method in list_public_methods(self.sys)] - def pow(self, x, y): return pow(x, y) - def add(self, x, y) : return x + y - -server = SimpleXMLRPCServer(("localhost", 8000)) -server.register_introspection_functions() -server.register_instance(MyFuncs()) -server.serve_forever() - -3. Install an instance with custom dispatch method: - -class Math: - def _listMethods(self): - # this method must be present for system.listMethods - # to work - return ['add', 'pow'] - def _methodHelp(self, method): - # this method must be present for system.methodHelp - # to work - if method == 'add': - return "add(2,3) => 5" - elif method == 'pow': - return "pow(x, y[, z]) => number" - else: - # By convention, return empty - # string if no help is available - return "" - def _dispatch(self, method, params): - if method == 'pow': - return pow(*params) - elif method == 'add': - return params[0] + params[1] - else: - raise ValueError('bad method') - -server = SimpleXMLRPCServer(("localhost", 8000)) -server.register_introspection_functions() -server.register_instance(Math()) -server.serve_forever() - -4. Subclass SimpleXMLRPCServer: - -class MathServer(SimpleXMLRPCServer): - def _dispatch(self, method, params): - try: - # We are forcing the 'export_' prefix on methods that are - # callable through XML-RPC to prevent potential security - # problems - func = getattr(self, 'export_' + method) - except AttributeError: - raise Exception('method "%s" is not supported' % method) - else: - return func(*params) - - def export_add(self, x, y): - return x + y - -server = MathServer(("localhost", 8000)) -server.serve_forever() - -5. CGI script: - -server = CGIXMLRPCRequestHandler() -server.register_function(pow) -server.handle_request() -""" - -# Written by Brian Quinlan (brian@sweetapp.com). -# Based on code written by Fredrik Lundh. - -from xmlrpc.client import Fault, dumps, loads, gzip_encode, gzip_decode -from http.server import BaseHTTPRequestHandler -from functools import partial -from inspect import signature -import html -import http.server -import socketserver -import sys -import os -import re -import pydoc -import traceback -try: - import fcntl -except ImportError: - fcntl = None - -def resolve_dotted_attribute(obj, attr, allow_dotted_names=True): - """resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d - - Resolves a dotted attribute name to an object. Raises - an AttributeError if any attribute in the chain starts with a '_'. - - If the optional allow_dotted_names argument is false, dots are not - supported and this function operates similar to getattr(obj, attr). - """ - - if allow_dotted_names: - attrs = attr.split('.') - else: - attrs = [attr] - - for i in attrs: - if i.startswith('_'): - raise AttributeError( - 'attempt to access private attribute "%s"' % i - ) - else: - obj = getattr(obj,i) - return obj - -def list_public_methods(obj): - """Returns a list of attribute strings, found in the specified - object, which represent callable attributes""" - - return [member for member in dir(obj) - if not member.startswith('_') and - callable(getattr(obj, member))] - -class SimpleXMLRPCDispatcher: - """Mix-in class that dispatches XML-RPC requests. - - This class is used to register XML-RPC method handlers - and then to dispatch them. This class doesn't need to be - instanced directly when used by SimpleXMLRPCServer but it - can be instanced when used by the MultiPathXMLRPCServer - """ - - def __init__(self, allow_none=False, encoding=None, - use_builtin_types=False): - self.funcs = {} - self.instance = None - self.allow_none = allow_none - self.encoding = encoding or 'utf-8' - self.use_builtin_types = use_builtin_types - - def register_instance(self, instance, allow_dotted_names=False): - """Registers an instance to respond to XML-RPC requests. - - Only one instance can be installed at a time. - - If the registered instance has a _dispatch method then that - method will be called with the name of the XML-RPC method and - its parameters as a tuple - e.g. instance._dispatch('add',(2,3)) - - If the registered instance does not have a _dispatch method - then the instance will be searched to find a matching method - and, if found, will be called. Methods beginning with an '_' - are considered private and will not be called by - SimpleXMLRPCServer. - - If a registered function matches an XML-RPC request, then it - will be called instead of the registered instance. - - If the optional allow_dotted_names argument is true and the - instance does not have a _dispatch method, method names - containing dots are supported and resolved, as long as none of - the name segments start with an '_'. - - *** SECURITY WARNING: *** - - Enabling the allow_dotted_names options allows intruders - to access your module's global variables and may allow - intruders to execute arbitrary code on your machine. Only - use this option on a secure, closed network. - - """ - - self.instance = instance - self.allow_dotted_names = allow_dotted_names - - def register_function(self, function=None, name=None): - """Registers a function to respond to XML-RPC requests. - - The optional name argument can be used to set a Unicode name - for the function. - """ - # decorator factory - if function is None: - return partial(self.register_function, name=name) - - if name is None: - name = function.__name__ - self.funcs[name] = function - - return function - - def register_introspection_functions(self): - """Registers the XML-RPC introspection methods in the system - namespace. - - see http://xmlrpc.usefulinc.com/doc/reserved.html - """ - - self.funcs.update({'system.listMethods' : self.system_listMethods, - 'system.methodSignature' : self.system_methodSignature, - 'system.methodHelp' : self.system_methodHelp}) - - def register_multicall_functions(self): - """Registers the XML-RPC multicall method in the system - namespace. - - see http://www.xmlrpc.com/discuss/msgReader$1208""" - - self.funcs.update({'system.multicall' : self.system_multicall}) - - def _marshaled_dispatch(self, data, dispatch_method = None, path = None): - """Dispatches an XML-RPC method from marshalled (XML) data. - - XML-RPC methods are dispatched from the marshalled (XML) data - using the _dispatch method and the result is returned as - marshalled data. For backwards compatibility, a dispatch - function can be provided as an argument (see comment in - SimpleXMLRPCRequestHandler.do_POST) but overriding the - existing method through subclassing is the preferred means - of changing method dispatch behavior. - """ - - try: - params, method = loads(data, use_builtin_types=self.use_builtin_types) - - # generate response - if dispatch_method is not None: - response = dispatch_method(method, params) - else: - response = self._dispatch(method, params) - # wrap response in a singleton tuple - response = (response,) - response = dumps(response, methodresponse=1, - allow_none=self.allow_none, encoding=self.encoding) - except Fault as fault: - response = dumps(fault, allow_none=self.allow_none, - encoding=self.encoding) - except BaseException as exc: - response = dumps( - Fault(1, "%s:%s" % (type(exc), exc)), - encoding=self.encoding, allow_none=self.allow_none, - ) - - return response.encode(self.encoding, 'xmlcharrefreplace') - - def system_listMethods(self): - """system.listMethods() => ['add', 'subtract', 'multiple'] - - Returns a list of the methods supported by the server.""" - - methods = set(self.funcs.keys()) - if self.instance is not None: - # Instance can implement _listMethod to return a list of - # methods - if hasattr(self.instance, '_listMethods'): - methods |= set(self.instance._listMethods()) - # if the instance has a _dispatch method then we - # don't have enough information to provide a list - # of methods - elif not hasattr(self.instance, '_dispatch'): - methods |= set(list_public_methods(self.instance)) - return sorted(methods) - - def system_methodSignature(self, method_name): - """system.methodSignature('add') => [double, int, int] - - Returns a list describing the signature of the method. In the - above example, the add method takes two integers as arguments - and returns a double result. - - This server does NOT support system.methodSignature.""" - - # See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html - - return 'signatures not supported' - - def system_methodHelp(self, method_name): - """system.methodHelp('add') => "Adds two integers together" - - Returns a string containing documentation for the specified method.""" - - method = None - if method_name in self.funcs: - method = self.funcs[method_name] - elif self.instance is not None: - # Instance can implement _methodHelp to return help for a method - if hasattr(self.instance, '_methodHelp'): - return self.instance._methodHelp(method_name) - # if the instance has a _dispatch method then we - # don't have enough information to provide help - elif not hasattr(self.instance, '_dispatch'): - try: - method = resolve_dotted_attribute( - self.instance, - method_name, - self.allow_dotted_names - ) - except AttributeError: - pass - - # Note that we aren't checking that the method actually - # be a callable object of some kind - if method is None: - return "" - else: - return pydoc.getdoc(method) - - def system_multicall(self, call_list): - """system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \ -[[4], ...] - - Allows the caller to package multiple XML-RPC calls into a single - request. - - See http://www.xmlrpc.com/discuss/msgReader$1208 - """ - - results = [] - for call in call_list: - method_name = call['methodName'] - params = call['params'] - - try: - # XXX A marshalling error in any response will fail the entire - # multicall. If someone cares they should fix this. - results.append([self._dispatch(method_name, params)]) - except Fault as fault: - results.append( - {'faultCode' : fault.faultCode, - 'faultString' : fault.faultString} - ) - except BaseException as exc: - results.append( - {'faultCode' : 1, - 'faultString' : "%s:%s" % (type(exc), exc)} - ) - return results - - def _dispatch(self, method, params): - """Dispatches the XML-RPC method. - - XML-RPC calls are forwarded to a registered function that - matches the called XML-RPC method name. If no such function - exists then the call is forwarded to the registered instance, - if available. - - If the registered instance has a _dispatch method then that - method will be called with the name of the XML-RPC method and - its parameters as a tuple - e.g. instance._dispatch('add',(2,3)) - - If the registered instance does not have a _dispatch method - then the instance will be searched to find a matching method - and, if found, will be called. - - Methods beginning with an '_' are considered private and will - not be called. - """ - - try: - # call the matching registered function - func = self.funcs[method] - except KeyError: - pass - else: - if func is not None: - return func(*params) - raise Exception('method "%s" is not supported' % method) - - if self.instance is not None: - if hasattr(self.instance, '_dispatch'): - # call the `_dispatch` method on the instance - return self.instance._dispatch(method, params) - - # call the instance's method directly - try: - func = resolve_dotted_attribute( - self.instance, - method, - self.allow_dotted_names - ) - except AttributeError: - pass - else: - if func is not None: - return func(*params) - - raise Exception('method "%s" is not supported' % method) - -class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler): - """Simple XML-RPC request handler class. - - Handles all HTTP POST requests and attempts to decode them as - XML-RPC requests. - """ - - # Class attribute listing the accessible path components; - # paths not on this list will result in a 404 error. - rpc_paths = ('/', '/RPC2', '/pydoc.css') - - #if not None, encode responses larger than this, if possible - encode_threshold = 1400 #a common MTU - - #Override form StreamRequestHandler: full buffering of output - #and no Nagle. - wbufsize = -1 - disable_nagle_algorithm = True - - # a re to match a gzip Accept-Encoding - aepattern = re.compile(r""" - \s* ([^\s;]+) \s* #content-coding - (;\s* q \s*=\s* ([0-9\.]+))? #q - """, re.VERBOSE | re.IGNORECASE) - - def accept_encodings(self): - r = {} - ae = self.headers.get("Accept-Encoding", "") - for e in ae.split(","): - match = self.aepattern.match(e) - if match: - v = match.group(3) - v = float(v) if v else 1.0 - r[match.group(1)] = v - return r - - def is_rpc_path_valid(self): - if self.rpc_paths: - return self.path in self.rpc_paths - else: - # If .rpc_paths is empty, just assume all paths are legal - return True - - def do_POST(self): - """Handles the HTTP POST request. - - Attempts to interpret all HTTP POST requests as XML-RPC calls, - which are forwarded to the server's _dispatch method for handling. - """ - - # Check that the path is legal - if not self.is_rpc_path_valid(): - self.report_404() - return - - try: - # Get arguments by reading body of request. - # We read this in chunks to avoid straining - # socket.read(); around the 10 or 15Mb mark, some platforms - # begin to have problems (bug #792570). - max_chunk_size = 10*1024*1024 - size_remaining = int(self.headers["content-length"]) - L = [] - while size_remaining: - chunk_size = min(size_remaining, max_chunk_size) - chunk = self.rfile.read(chunk_size) - if not chunk: - break - L.append(chunk) - size_remaining -= len(L[-1]) - data = b''.join(L) - - data = self.decode_request_content(data) - if data is None: - return #response has been sent - - # In previous versions of SimpleXMLRPCServer, _dispatch - # could be overridden in this class, instead of in - # SimpleXMLRPCDispatcher. To maintain backwards compatibility, - # check to see if a subclass implements _dispatch and dispatch - # using that method if present. - response = self.server._marshaled_dispatch( - data, getattr(self, '_dispatch', None), self.path - ) - except Exception as e: # This should only happen if the module is buggy - # internal error, report as HTTP server error - self.send_response(500) - - # Send information about the exception if requested - if hasattr(self.server, '_send_traceback_header') and \ - self.server._send_traceback_header: - self.send_header("X-exception", str(e)) - trace = traceback.format_exc() - trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII') - self.send_header("X-traceback", trace) - - self.send_header("Content-length", "0") - self.end_headers() - else: - self.send_response(200) - self.send_header("Content-type", "text/xml") - if self.encode_threshold is not None: - if len(response) > self.encode_threshold: - q = self.accept_encodings().get("gzip", 0) - if q: - try: - response = gzip_encode(response) - self.send_header("Content-Encoding", "gzip") - except NotImplementedError: - pass - self.send_header("Content-length", str(len(response))) - self.end_headers() - self.wfile.write(response) - - def decode_request_content(self, data): - #support gzip encoding of request - encoding = self.headers.get("content-encoding", "identity").lower() - if encoding == "identity": - return data - if encoding == "gzip": - try: - return gzip_decode(data) - except NotImplementedError: - self.send_response(501, "encoding %r not supported" % encoding) - except ValueError: - self.send_response(400, "error decoding gzip content") - else: - self.send_response(501, "encoding %r not supported" % encoding) - self.send_header("Content-length", "0") - self.end_headers() - - def report_404 (self): - # Report a 404 error - self.send_response(404) - response = b'No such page' - self.send_header("Content-type", "text/plain") - self.send_header("Content-length", str(len(response))) - self.end_headers() - self.wfile.write(response) - - def log_request(self, code='-', size='-'): - """Selectively log an accepted request.""" - - if self.server.logRequests: - BaseHTTPRequestHandler.log_request(self, code, size) - -class SimpleXMLRPCServer(socketserver.TCPServer, - SimpleXMLRPCDispatcher): - """Simple XML-RPC server. - - Simple XML-RPC server that allows functions and a single instance - to be installed to handle requests. The default implementation - attempts to dispatch XML-RPC calls to the functions or instance - installed in the server. Override the _dispatch method inherited - from SimpleXMLRPCDispatcher to change this behavior. - """ - - allow_reuse_address = True - - # Warning: this is for debugging purposes only! Never set this to True in - # production code, as will be sending out sensitive information (exception - # and stack trace details) when exceptions are raised inside - # SimpleXMLRPCRequestHandler.do_POST - _send_traceback_header = False - - def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, - logRequests=True, allow_none=False, encoding=None, - bind_and_activate=True, use_builtin_types=False): - self.logRequests = logRequests - - SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types) - socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate) - - -class MultiPathXMLRPCServer(SimpleXMLRPCServer): - """Multipath XML-RPC Server - This specialization of SimpleXMLRPCServer allows the user to create - multiple Dispatcher instances and assign them to different - HTTP request paths. This makes it possible to run two or more - 'virtual XML-RPC servers' at the same port. - Make sure that the requestHandler accepts the paths in question. - """ - def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, - logRequests=True, allow_none=False, encoding=None, - bind_and_activate=True, use_builtin_types=False): - - SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none, - encoding, bind_and_activate, use_builtin_types) - self.dispatchers = {} - self.allow_none = allow_none - self.encoding = encoding or 'utf-8' - - def add_dispatcher(self, path, dispatcher): - self.dispatchers[path] = dispatcher - return dispatcher - - def get_dispatcher(self, path): - return self.dispatchers[path] - - def _marshaled_dispatch(self, data, dispatch_method = None, path = None): - try: - response = self.dispatchers[path]._marshaled_dispatch( - data, dispatch_method, path) - except BaseException as exc: - # report low level exception back to server - # (each dispatcher should have handled their own - # exceptions) - response = dumps( - Fault(1, "%s:%s" % (type(exc), exc)), - encoding=self.encoding, allow_none=self.allow_none) - response = response.encode(self.encoding, 'xmlcharrefreplace') - return response - -class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher): - """Simple handler for XML-RPC data passed through CGI.""" - - def __init__(self, allow_none=False, encoding=None, use_builtin_types=False): - SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types) - - def handle_xmlrpc(self, request_text): - """Handle a single XML-RPC request""" - - response = self._marshaled_dispatch(request_text) - - print('Content-Type: text/xml') - print('Content-Length: %d' % len(response)) - print() - sys.stdout.flush() - sys.stdout.buffer.write(response) - sys.stdout.buffer.flush() - - def handle_get(self): - """Handle a single HTTP GET request. - - Default implementation indicates an error because - XML-RPC uses the POST method. - """ - - code = 400 - message, explain = BaseHTTPRequestHandler.responses[code] - - response = http.server.DEFAULT_ERROR_MESSAGE % \ - { - 'code' : code, - 'message' : message, - 'explain' : explain - } - response = response.encode('utf-8') - print('Status: %d %s' % (code, message)) - print('Content-Type: %s' % http.server.DEFAULT_ERROR_CONTENT_TYPE) - print('Content-Length: %d' % len(response)) - print() - sys.stdout.flush() - sys.stdout.buffer.write(response) - sys.stdout.buffer.flush() - - def handle_request(self, request_text=None): - """Handle a single XML-RPC request passed through a CGI post method. - - If no XML data is given then it is read from stdin. The resulting - XML-RPC response is printed to stdout along with the correct HTTP - headers. - """ - - if request_text is None and \ - os.environ.get('REQUEST_METHOD', None) == 'GET': - self.handle_get() - else: - # POST data is normally available through stdin - try: - length = int(os.environ.get('CONTENT_LENGTH', None)) - except (ValueError, TypeError): - length = -1 - if request_text is None: - request_text = sys.stdin.read(length) - - self.handle_xmlrpc(request_text) - - -# ----------------------------------------------------------------------------- -# Self documenting XML-RPC Server. - -class ServerHTMLDoc(pydoc.HTMLDoc): - """Class used to generate pydoc HTML document for a server""" - - def markup(self, text, escape=None, funcs={}, classes={}, methods={}): - """Mark up some plain text, given a context of symbols to look for. - Each context dictionary maps object names to anchor names.""" - escape = escape or self.escape - results = [] - here = 0 - - # XXX Note that this regular expression does not allow for the - # hyperlinking of arbitrary strings being used as method - # names. Only methods with names consisting of word characters - # and '.'s are hyperlinked. - pattern = re.compile(r'\b((http|https|ftp)://\S+[\w/]|' - r'RFC[- ]?(\d+)|' - r'PEP[- ]?(\d+)|' - r'(self\.)?((?:\w|\.)+))\b') - while match := pattern.search(text, here): - start, end = match.span() - results.append(escape(text[here:start])) - - all, scheme, rfc, pep, selfdot, name = match.groups() - if scheme: - url = escape(all).replace('"', '"') - results.append('%s' % (url, url)) - elif rfc: - url = 'https://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc) - results.append('%s' % (url, escape(all))) - elif pep: - url = 'https://peps.python.org/pep-%04d/' % int(pep) - results.append('%s' % (url, escape(all))) - elif text[end:end+1] == '(': - results.append(self.namelink(name, methods, funcs, classes)) - elif selfdot: - results.append('self.%s' % name) - else: - results.append(self.namelink(name, classes)) - here = end - results.append(escape(text[here:])) - return ''.join(results) - - def docroutine(self, object, name, mod=None, - funcs={}, classes={}, methods={}, cl=None): - """Produce HTML documentation for a function or method object.""" - - anchor = (cl and cl.__name__ or '') + '-' + name - note = '' - - title = '%s' % ( - self.escape(anchor), self.escape(name)) - - if callable(object): - argspec = str(signature(object)) - else: - argspec = '(...)' - - if isinstance(object, tuple): - argspec = object[0] or argspec - docstring = object[1] or "" - else: - docstring = pydoc.getdoc(object) - - decl = title + argspec + (note and self.grey( - '%s' % note)) - - doc = self.markup( - docstring, self.preformat, funcs, classes, methods) - doc = doc and '
%s
' % doc - return '
%s
%s
\n' % (decl, doc) - - def docserver(self, server_name, package_documentation, methods): - """Produce HTML documentation for an XML-RPC server.""" - - fdict = {} - for key, value in methods.items(): - fdict[key] = '#-' + key - fdict[value] = fdict[key] - - server_name = self.escape(server_name) - head = '%s' % server_name - result = self.heading(head) - - doc = self.markup(package_documentation, self.preformat, fdict) - doc = doc and '%s' % doc - result = result + '

%s

\n' % doc - - contents = [] - method_items = sorted(methods.items()) - for key, value in method_items: - contents.append(self.docroutine(value, key, funcs=fdict)) - result = result + self.bigsection( - 'Methods', 'functions', ''.join(contents)) - - return result - - - def page(self, title, contents): - """Format an HTML page.""" - css_path = "/pydoc.css" - css_link = ( - '' % - css_path) - return '''\ - - - - -Python: %s -%s%s''' % (title, css_link, contents) - -class XMLRPCDocGenerator: - """Generates documentation for an XML-RPC server. - - This class is designed as mix-in and should not - be constructed directly. - """ - - def __init__(self): - # setup variables used for HTML documentation - self.server_name = 'XML-RPC Server Documentation' - self.server_documentation = \ - "This server exports the following methods through the XML-RPC "\ - "protocol." - self.server_title = 'XML-RPC Server Documentation' - - def set_server_title(self, server_title): - """Set the HTML title of the generated server documentation""" - - self.server_title = server_title - - def set_server_name(self, server_name): - """Set the name of the generated HTML server documentation""" - - self.server_name = server_name - - def set_server_documentation(self, server_documentation): - """Set the documentation string for the entire server.""" - - self.server_documentation = server_documentation - - def generate_html_documentation(self): - """generate_html_documentation() => html documentation for the server - - Generates HTML documentation for the server using introspection for - installed functions and instances that do not implement the - _dispatch method. Alternatively, instances can choose to implement - the _get_method_argstring(method_name) method to provide the - argument string used in the documentation and the - _methodHelp(method_name) method to provide the help text used - in the documentation.""" - - methods = {} - - for method_name in self.system_listMethods(): - if method_name in self.funcs: - method = self.funcs[method_name] - elif self.instance is not None: - method_info = [None, None] # argspec, documentation - if hasattr(self.instance, '_get_method_argstring'): - method_info[0] = self.instance._get_method_argstring(method_name) - if hasattr(self.instance, '_methodHelp'): - method_info[1] = self.instance._methodHelp(method_name) - - method_info = tuple(method_info) - if method_info != (None, None): - method = method_info - elif not hasattr(self.instance, '_dispatch'): - try: - method = resolve_dotted_attribute( - self.instance, - method_name - ) - except AttributeError: - method = method_info - else: - method = method_info - else: - assert 0, "Could not find method in self.functions and no "\ - "instance installed" - - methods[method_name] = method - - documenter = ServerHTMLDoc() - documentation = documenter.docserver( - self.server_name, - self.server_documentation, - methods - ) - - return documenter.page(html.escape(self.server_title), documentation) - -class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler): - """XML-RPC and documentation request handler class. - - Handles all HTTP POST requests and attempts to decode them as - XML-RPC requests. - - Handles all HTTP GET requests and interprets them as requests - for documentation. - """ - - def _get_css(self, url): - path_here = os.path.dirname(os.path.realpath(__file__)) - css_path = os.path.join(path_here, "..", "pydoc_data", "_pydoc.css") - with open(css_path, mode="rb") as fp: - return fp.read() - - def do_GET(self): - """Handles the HTTP GET request. - - Interpret all HTTP GET requests as requests for server - documentation. - """ - # Check that the path is legal - if not self.is_rpc_path_valid(): - self.report_404() - return - - if self.path.endswith('.css'): - content_type = 'text/css' - response = self._get_css(self.path) - else: - content_type = 'text/html' - response = self.server.generate_html_documentation().encode('utf-8') - - self.send_response(200) - self.send_header('Content-Type', '%s; charset=UTF-8' % content_type) - self.send_header("Content-length", str(len(response))) - self.end_headers() - self.wfile.write(response) - -class DocXMLRPCServer( SimpleXMLRPCServer, - XMLRPCDocGenerator): - """XML-RPC and HTML documentation server. - - Adds the ability to serve server documentation to the capabilities - of SimpleXMLRPCServer. - """ - - def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler, - logRequests=True, allow_none=False, encoding=None, - bind_and_activate=True, use_builtin_types=False): - SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, - allow_none, encoding, bind_and_activate, - use_builtin_types) - XMLRPCDocGenerator.__init__(self) - -class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler, - XMLRPCDocGenerator): - """Handler for XML-RPC data and documentation requests passed through - CGI""" - - def handle_get(self): - """Handles the HTTP GET request. - - Interpret all HTTP GET requests as requests for server - documentation. - """ - - response = self.generate_html_documentation().encode('utf-8') - - print('Content-Type: text/html') - print('Content-Length: %d' % len(response)) - print() - sys.stdout.flush() - sys.stdout.buffer.write(response) - sys.stdout.buffer.flush() - - def __init__(self): - CGIXMLRPCRequestHandler.__init__(self) - XMLRPCDocGenerator.__init__(self) - - -if __name__ == '__main__': - import datetime - - class ExampleService: - def getData(self): - return '42' - - class currentTime: - @staticmethod - def getCurrentTime(): - return datetime.datetime.now() - - with SimpleXMLRPCServer(("localhost", 8000)) as server: - server.register_function(pow) - server.register_function(lambda x,y: x+y, 'add') - server.register_instance(ExampleService(), allow_dotted_names=True) - server.register_multicall_functions() - print('Serving XML-RPC on localhost port 8000') - print('It is advisable to run this example server within a secure, closed network.') - try: - server.serve_forever() - except KeyboardInterrupt: - print("\nKeyboard interrupt received, exiting.") - sys.exit(0) diff --git a/Python313_13_x64_Template/Lib/zipapp.py b/Python313_13_x64_Template/Lib/zipapp.py deleted file mode 100644 index 4ffacc49..00000000 --- a/Python313_13_x64_Template/Lib/zipapp.py +++ /dev/null @@ -1,229 +0,0 @@ -import contextlib -import os -import pathlib -import shutil -import stat -import sys -import zipfile - -__all__ = ['ZipAppError', 'create_archive', 'get_interpreter'] - - -# The __main__.py used if the users specifies "-m module:fn". -# Note that this will always be written as UTF-8 (module and -# function names can be non-ASCII in Python 3). -# We add a coding cookie even though UTF-8 is the default in Python 3 -# because the resulting archive may be intended to be run under Python 2. -MAIN_TEMPLATE = """\ -# -*- coding: utf-8 -*- -import {module} -{module}.{fn}() -""" - - -# The Windows launcher defaults to UTF-8 when parsing shebang lines if the -# file has no BOM. So use UTF-8 on Windows. -# On Unix, use the filesystem encoding. -if sys.platform.startswith('win'): - shebang_encoding = 'utf-8' -else: - shebang_encoding = sys.getfilesystemencoding() - - -class ZipAppError(ValueError): - pass - - -@contextlib.contextmanager -def _maybe_open(archive, mode): - if isinstance(archive, (str, os.PathLike)): - with open(archive, mode) as f: - yield f - else: - yield archive - - -def _write_file_prefix(f, interpreter): - """Write a shebang line.""" - if interpreter: - shebang = b'#!' + interpreter.encode(shebang_encoding) + b'\n' - f.write(shebang) - - -def _copy_archive(archive, new_archive, interpreter=None): - """Copy an application archive, modifying the shebang line.""" - with _maybe_open(archive, 'rb') as src: - # Skip the shebang line from the source. - # Read 2 bytes of the source and check if they are #!. - first_2 = src.read(2) - if first_2 == b'#!': - # Discard the initial 2 bytes and the rest of the shebang line. - first_2 = b'' - src.readline() - - with _maybe_open(new_archive, 'wb') as dst: - _write_file_prefix(dst, interpreter) - # If there was no shebang, "first_2" contains the first 2 bytes - # of the source file, so write them before copying the rest - # of the file. - dst.write(first_2) - shutil.copyfileobj(src, dst) - - if interpreter and isinstance(new_archive, str): - os.chmod(new_archive, os.stat(new_archive).st_mode | stat.S_IEXEC) - - -def create_archive(source, target=None, interpreter=None, main=None, - filter=None, compressed=False): - """Create an application archive from SOURCE. - - The SOURCE can be the name of a directory, or a filename or a file-like - object referring to an existing archive. - - The content of SOURCE is packed into an application archive in TARGET, - which can be a filename or a file-like object. If SOURCE is a directory, - TARGET can be omitted and will default to the name of SOURCE with .pyz - appended. - - The created application archive will have a shebang line specifying - that it should run with INTERPRETER (there will be no shebang line if - INTERPRETER is None), and a __main__.py which runs MAIN (if MAIN is - not specified, an existing __main__.py will be used). It is an error - to specify MAIN for anything other than a directory source with no - __main__.py, and it is an error to omit MAIN if the directory has no - __main__.py. - """ - # Are we copying an existing archive? - source_is_file = False - if hasattr(source, 'read') and hasattr(source, 'readline'): - source_is_file = True - else: - source = pathlib.Path(source) - if source.is_file(): - source_is_file = True - - if source_is_file: - _copy_archive(source, target, interpreter) - return - - # We are creating a new archive from a directory. - if not source.exists(): - raise ZipAppError("Source does not exist") - has_main = (source / '__main__.py').is_file() - if main and has_main: - raise ZipAppError( - "Cannot specify entry point if the source has __main__.py") - if not (main or has_main): - raise ZipAppError("Archive has no entry point") - - main_py = None - if main: - # Check that main has the right format. - mod, sep, fn = main.partition(':') - mod_ok = all(part.isidentifier() for part in mod.split('.')) - fn_ok = all(part.isidentifier() for part in fn.split('.')) - if not (sep == ':' and mod_ok and fn_ok): - raise ZipAppError("Invalid entry point: " + main) - main_py = MAIN_TEMPLATE.format(module=mod, fn=fn) - - if target is None: - target = source.with_suffix('.pyz') - elif not hasattr(target, 'write'): - target = pathlib.Path(target) - - # Create the list of files to add to the archive now, in case - # the target is being created in the source directory - we - # don't want the target being added to itself - files_to_add = sorted(source.rglob('*')) - - # The target cannot be in the list of files to add. If it were, we'd - # end up overwriting the source file and writing the archive into - # itself, which is an error. We therefore check for that case and - # provide a helpful message for the user. - - # Note that we only do a simple path equality check. This won't - # catch every case, but it will catch the common case where the - # source is the CWD and the target is a file in the CWD. More - # thorough checks don't provide enough value to justify the extra - # cost. - - # If target is a file-like object, it will simply fail to compare - # equal to any of the entries in files_to_add, so there's no need - # to add a special check for that. - if target in files_to_add: - raise ZipAppError( - f"The target archive {target} overwrites one of the source files.") - - with _maybe_open(target, 'wb') as fd: - _write_file_prefix(fd, interpreter) - compression = (zipfile.ZIP_DEFLATED if compressed else - zipfile.ZIP_STORED) - with zipfile.ZipFile(fd, 'w', compression=compression) as z: - for child in files_to_add: - arcname = child.relative_to(source) - if filter is None or filter(arcname): - z.write(child, arcname.as_posix()) - if main_py: - z.writestr('__main__.py', main_py.encode('utf-8')) - - if interpreter and not hasattr(target, 'write'): - target.chmod(target.stat().st_mode | stat.S_IEXEC) - - -def get_interpreter(archive): - with _maybe_open(archive, 'rb') as f: - if f.read(2) == b'#!': - return f.readline().strip().decode(shebang_encoding) - - -def main(args=None): - """Run the zipapp command line interface. - - The ARGS parameter lets you specify the argument list directly. - Omitting ARGS (or setting it to None) works as for argparse, using - sys.argv[1:] as the argument list. - """ - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument('--output', '-o', default=None, - help="The name of the output archive. " - "Required if SOURCE is an archive.") - parser.add_argument('--python', '-p', default=None, - help="The name of the Python interpreter to use " - "(default: no shebang line).") - parser.add_argument('--main', '-m', default=None, - help="The main function of the application " - "(default: use an existing __main__.py).") - parser.add_argument('--compress', '-c', action='store_true', - help="Compress files with the deflate method. " - "Files are stored uncompressed by default.") - parser.add_argument('--info', default=False, action='store_true', - help="Display the interpreter from the archive.") - parser.add_argument('source', - help="Source directory (or existing archive).") - - args = parser.parse_args(args) - - # Handle `python -m zipapp archive.pyz --info`. - if args.info: - if not os.path.isfile(args.source): - raise SystemExit("Can only get info for an archive file") - interpreter = get_interpreter(args.source) - print("Interpreter: {}".format(interpreter or "")) - sys.exit(0) - - if os.path.isfile(args.source): - if args.output is None or (os.path.exists(args.output) and - os.path.samefile(args.source, args.output)): - raise SystemExit("In-place editing of archives is not supported") - if args.main: - raise SystemExit("Cannot change the main function when copying") - - create_archive(args.source, args.output, - interpreter=args.python, main=args.main, - compressed=args.compress) - - -if __name__ == '__main__': - main() diff --git a/Python313_13_x64_Template/Lib/zipfile/__init__.py b/Python313_13_x64_Template/Lib/zipfile/__init__.py deleted file mode 100644 index 3d889e9c..00000000 --- a/Python313_13_x64_Template/Lib/zipfile/__init__.py +++ /dev/null @@ -1,2375 +0,0 @@ -""" -Read and write ZIP files. - -XXX references to utf-8 need further investigation. -""" -import binascii -import importlib.util -import io -import os -import shutil -import stat -import struct -import sys -import threading -import time - -try: - import zlib # We may need its compression method - crc32 = zlib.crc32 -except ImportError: - zlib = None - crc32 = binascii.crc32 - -try: - import bz2 # We may need its compression method -except ImportError: - bz2 = None - -try: - import lzma # We may need its compression method -except ImportError: - lzma = None - -__all__ = ["BadZipFile", "BadZipfile", "error", - "ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA", - "is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile", - "Path"] - -class BadZipFile(Exception): - pass - - -class LargeZipFile(Exception): - """ - Raised when writing a zipfile, the zipfile requires ZIP64 extensions - and those extensions are disabled. - """ - -error = BadZipfile = BadZipFile # Pre-3.2 compatibility names - - -ZIP64_LIMIT = (1 << 31) - 1 -ZIP_FILECOUNT_LIMIT = (1 << 16) - 1 -ZIP_MAX_COMMENT = (1 << 16) - 1 - -# constants for Zip file compression methods -ZIP_STORED = 0 -ZIP_DEFLATED = 8 -ZIP_BZIP2 = 12 -ZIP_LZMA = 14 -# Other ZIP compression methods not supported - -DEFAULT_VERSION = 20 -ZIP64_VERSION = 45 -BZIP2_VERSION = 46 -LZMA_VERSION = 63 -# we recognize (but not necessarily support) all features up to that version -MAX_EXTRACT_VERSION = 63 - -# Below are some formats and associated data for reading/writing headers using -# the struct module. The names and structures of headers/records are those used -# in the PKWARE description of the ZIP file format: -# http://www.pkware.com/documents/casestudies/APPNOTE.TXT -# (URL valid as of January 2008) - -# The "end of central directory" structure, magic number, size, and indices -# (section V.I in the format document) -structEndArchive = b"<4s4H2LH" -stringEndArchive = b"PK\005\006" -sizeEndCentDir = struct.calcsize(structEndArchive) - -_ECD_SIGNATURE = 0 -_ECD_DISK_NUMBER = 1 -_ECD_DISK_START = 2 -_ECD_ENTRIES_THIS_DISK = 3 -_ECD_ENTRIES_TOTAL = 4 -_ECD_SIZE = 5 -_ECD_OFFSET = 6 -_ECD_COMMENT_SIZE = 7 -# These last two indices are not part of the structure as defined in the -# spec, but they are used internally by this module as a convenience -_ECD_COMMENT = 8 -_ECD_LOCATION = 9 - -# The "central directory" structure, magic number, size, and indices -# of entries in the structure (section V.F in the format document) -structCentralDir = "<4s4B4HL2L5H2L" -stringCentralDir = b"PK\001\002" -sizeCentralDir = struct.calcsize(structCentralDir) - -# indexes of entries in the central directory structure -_CD_SIGNATURE = 0 -_CD_CREATE_VERSION = 1 -_CD_CREATE_SYSTEM = 2 -_CD_EXTRACT_VERSION = 3 -_CD_EXTRACT_SYSTEM = 4 -_CD_FLAG_BITS = 5 -_CD_COMPRESS_TYPE = 6 -_CD_TIME = 7 -_CD_DATE = 8 -_CD_CRC = 9 -_CD_COMPRESSED_SIZE = 10 -_CD_UNCOMPRESSED_SIZE = 11 -_CD_FILENAME_LENGTH = 12 -_CD_EXTRA_FIELD_LENGTH = 13 -_CD_COMMENT_LENGTH = 14 -_CD_DISK_NUMBER_START = 15 -_CD_INTERNAL_FILE_ATTRIBUTES = 16 -_CD_EXTERNAL_FILE_ATTRIBUTES = 17 -_CD_LOCAL_HEADER_OFFSET = 18 - -# General purpose bit flags -# Zip Appnote: 4.4.4 general purpose bit flag: (2 bytes) -_MASK_ENCRYPTED = 1 << 0 -# Bits 1 and 2 have different meanings depending on the compression used. -_MASK_COMPRESS_OPTION_1 = 1 << 1 -# _MASK_COMPRESS_OPTION_2 = 1 << 2 -# _MASK_USE_DATA_DESCRIPTOR: If set, crc-32, compressed size and uncompressed -# size are zero in the local header and the real values are written in the data -# descriptor immediately following the compressed data. -_MASK_USE_DATA_DESCRIPTOR = 1 << 3 -# Bit 4: Reserved for use with compression method 8, for enhanced deflating. -# _MASK_RESERVED_BIT_4 = 1 << 4 -_MASK_COMPRESSED_PATCH = 1 << 5 -_MASK_STRONG_ENCRYPTION = 1 << 6 -# _MASK_UNUSED_BIT_7 = 1 << 7 -# _MASK_UNUSED_BIT_8 = 1 << 8 -# _MASK_UNUSED_BIT_9 = 1 << 9 -# _MASK_UNUSED_BIT_10 = 1 << 10 -_MASK_UTF_FILENAME = 1 << 11 -# Bit 12: Reserved by PKWARE for enhanced compression. -# _MASK_RESERVED_BIT_12 = 1 << 12 -# _MASK_ENCRYPTED_CENTRAL_DIR = 1 << 13 -# Bit 14, 15: Reserved by PKWARE -# _MASK_RESERVED_BIT_14 = 1 << 14 -# _MASK_RESERVED_BIT_15 = 1 << 15 - -# The "local file header" structure, magic number, size, and indices -# (section V.A in the format document) -structFileHeader = "<4s2B4HL2L2H" -stringFileHeader = b"PK\003\004" -sizeFileHeader = struct.calcsize(structFileHeader) - -_FH_SIGNATURE = 0 -_FH_EXTRACT_VERSION = 1 -_FH_EXTRACT_SYSTEM = 2 -_FH_GENERAL_PURPOSE_FLAG_BITS = 3 -_FH_COMPRESSION_METHOD = 4 -_FH_LAST_MOD_TIME = 5 -_FH_LAST_MOD_DATE = 6 -_FH_CRC = 7 -_FH_COMPRESSED_SIZE = 8 -_FH_UNCOMPRESSED_SIZE = 9 -_FH_FILENAME_LENGTH = 10 -_FH_EXTRA_FIELD_LENGTH = 11 - -# The "Zip64 end of central directory locator" structure, magic number, and size -structEndArchive64Locator = "<4sLQL" -stringEndArchive64Locator = b"PK\x06\x07" -sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator) - -# The "Zip64 end of central directory" record, magic number, size, and indices -# (section V.G in the format document) -structEndArchive64 = "<4sQ2H2L4Q" -stringEndArchive64 = b"PK\x06\x06" -sizeEndCentDir64 = struct.calcsize(structEndArchive64) - -_CD64_SIGNATURE = 0 -_CD64_DIRECTORY_RECSIZE = 1 -_CD64_CREATE_VERSION = 2 -_CD64_EXTRACT_VERSION = 3 -_CD64_DISK_NUMBER = 4 -_CD64_DISK_NUMBER_START = 5 -_CD64_NUMBER_ENTRIES_THIS_DISK = 6 -_CD64_NUMBER_ENTRIES_TOTAL = 7 -_CD64_DIRECTORY_SIZE = 8 -_CD64_OFFSET_START_CENTDIR = 9 - -_DD_SIGNATURE = 0x08074b50 - - -class _Extra(bytes): - FIELD_STRUCT = struct.Struct(' 1: - raise BadZipFile("zipfiles that span multiple disks are not supported") - - offset -= sizeEndCentDir64 - if reloff > offset: - raise BadZipFile("Corrupt zip64 end of central directory locator") - # First, check the assumption that there is no prepended data. - fpin.seek(reloff) - extrasz = offset - reloff - data = fpin.read(sizeEndCentDir64) - if len(data) != sizeEndCentDir64: - raise OSError("Unknown I/O error") - if not data.startswith(stringEndArchive64) and reloff != offset: - # Since we already have seen the Zip64 EOCD Locator, it's - # possible we got here because there is prepended data. - # Assume no 'zip64 extensible data' - fpin.seek(offset) - extrasz = 0 - data = fpin.read(sizeEndCentDir64) - if len(data) != sizeEndCentDir64: - raise OSError("Unknown I/O error") - if not data.startswith(stringEndArchive64): - raise BadZipFile("Zip64 end of central directory record not found") - - sig, sz, create_version, read_version, disk_num, disk_dir, \ - dircount, dircount2, dirsize, diroffset = \ - struct.unpack(structEndArchive64, data) - if (diroffset + dirsize != reloff or - sz + 12 != sizeEndCentDir64 + extrasz): - raise BadZipFile("Corrupt zip64 end of central directory record") - - # Update the original endrec using data from the ZIP64 record - endrec[_ECD_SIGNATURE] = sig - endrec[_ECD_DISK_NUMBER] = disk_num - endrec[_ECD_DISK_START] = disk_dir - endrec[_ECD_ENTRIES_THIS_DISK] = dircount - endrec[_ECD_ENTRIES_TOTAL] = dircount2 - endrec[_ECD_SIZE] = dirsize - endrec[_ECD_OFFSET] = diroffset - endrec[_ECD_LOCATION] = offset - extrasz - return endrec - - -def _EndRecData(fpin): - """Return data from the "End of Central Directory" record, or None. - - The data is a list of the nine items in the ZIP "End of central dir" - record followed by a tenth item, the file seek offset of this record.""" - - # Determine file size - fpin.seek(0, 2) - filesize = fpin.tell() - - # Check to see if this is ZIP file with no archive comment (the - # "end of central directory" structure should be the last item in the - # file if this is the case). - try: - fpin.seek(-sizeEndCentDir, 2) - except OSError: - return None - data = fpin.read(sizeEndCentDir) - if (len(data) == sizeEndCentDir and - data[0:4] == stringEndArchive and - data[-2:] == b"\000\000"): - # the signature is correct and there's no comment, unpack structure - endrec = struct.unpack(structEndArchive, data) - endrec=list(endrec) - - # Append a blank comment and record start offset - endrec.append(b"") - endrec.append(filesize - sizeEndCentDir) - - # Try to read the "Zip64 end of central directory" structure - return _EndRecData64(fpin, filesize - sizeEndCentDir, endrec) - - # Either this is not a ZIP file, or it is a ZIP file with an archive - # comment. Search the end of the file for the "end of central directory" - # record signature. The comment is the last item in the ZIP file and may be - # up to 64K long. It is assumed that the "end of central directory" magic - # number does not appear in the comment. - maxCommentStart = max(filesize - ZIP_MAX_COMMENT - sizeEndCentDir, 0) - fpin.seek(maxCommentStart, 0) - data = fpin.read(ZIP_MAX_COMMENT + sizeEndCentDir) - start = data.rfind(stringEndArchive) - if start >= 0: - # found the magic number; attempt to unpack and interpret - recData = data[start:start+sizeEndCentDir] - if len(recData) != sizeEndCentDir: - # Zip file is corrupted. - return None - endrec = list(struct.unpack(structEndArchive, recData)) - commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file - comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize] - endrec.append(comment) - endrec.append(maxCommentStart + start) - - # Try to read the "Zip64 end of central directory" structure - return _EndRecData64(fpin, maxCommentStart + start, endrec) - - # Unable to find a valid end of central directory structure - return None - -def _sanitize_filename(filename): - """Terminate the file name at the first null byte and - ensure paths always use forward slashes as the directory separator.""" - - # Terminate the file name at the first null byte. Null bytes in file - # names are used as tricks by viruses in archives. - null_byte = filename.find(chr(0)) - if null_byte >= 0: - filename = filename[0:null_byte] - # This is used to ensure paths in generated ZIP files always use - # forward slashes as the directory separator, as required by the - # ZIP format specification. - if os.sep != "/" and os.sep in filename: - filename = filename.replace(os.sep, "/") - if os.altsep and os.altsep != "/" and os.altsep in filename: - filename = filename.replace(os.altsep, "/") - return filename - - -class ZipInfo: - """Class with attributes describing each file in the ZIP archive.""" - - __slots__ = ( - 'orig_filename', - 'filename', - 'date_time', - 'compress_type', - 'compress_level', - 'comment', - 'extra', - 'create_system', - 'create_version', - 'extract_version', - 'reserved', - 'flag_bits', - 'volume', - 'internal_attr', - 'external_attr', - 'header_offset', - 'CRC', - 'compress_size', - 'file_size', - '_raw_time', - '_end_offset', - ) - - def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)): - self.orig_filename = filename # Original file name in archive - - # Terminate the file name at the first null byte and - # ensure paths always use forward slashes as the directory separator. - filename = _sanitize_filename(filename) - - self.filename = filename # Normalized file name - self.date_time = date_time # year, month, day, hour, min, sec - - if date_time[0] < 1980: - raise ValueError('ZIP does not support timestamps before 1980') - - # Standard values: - self.compress_type = ZIP_STORED # Type of compression for the file - self.compress_level = None # Level for the compressor - self.comment = b"" # Comment for each file - self.extra = b"" # ZIP extra data - if sys.platform == 'win32': - self.create_system = 0 # System which created ZIP archive - else: - # Assume everything else is unix-y - self.create_system = 3 # System which created ZIP archive - self.create_version = DEFAULT_VERSION # Version which created ZIP archive - self.extract_version = DEFAULT_VERSION # Version needed to extract archive - self.reserved = 0 # Must be zero - self.flag_bits = 0 # ZIP flag bits - self.volume = 0 # Volume number of file header - self.internal_attr = 0 # Internal attributes - self.external_attr = 0 # External file attributes - self.compress_size = 0 # Size of the compressed file - self.file_size = 0 # Size of the uncompressed file - self._end_offset = None # Start of the next local header or central directory - # Other attributes are set by class ZipFile: - # header_offset Byte offset to the file header - # CRC CRC-32 of the uncompressed file - - # Maintain backward compatibility with the old protected attribute name. - @property - def _compresslevel(self): - return self.compress_level - - @_compresslevel.setter - def _compresslevel(self, value): - self.compress_level = value - - def __repr__(self): - result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)] - if self.compress_type != ZIP_STORED: - result.append(' compress_type=%s' % - compressor_names.get(self.compress_type, - self.compress_type)) - hi = self.external_attr >> 16 - lo = self.external_attr & 0xFFFF - if hi: - result.append(' filemode=%r' % stat.filemode(hi)) - if lo: - result.append(' external_attr=%#x' % lo) - isdir = self.is_dir() - if not isdir or self.file_size: - result.append(' file_size=%r' % self.file_size) - if ((not isdir or self.compress_size) and - (self.compress_type != ZIP_STORED or - self.file_size != self.compress_size)): - result.append(' compress_size=%r' % self.compress_size) - result.append('>') - return ''.join(result) - - def FileHeader(self, zip64=None): - """Return the per-file header as a bytes object. - - When the optional zip64 arg is None rather than a bool, we will - decide based upon the file_size and compress_size, if known, - False otherwise. - """ - dt = self.date_time - dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] - dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) - if self.flag_bits & _MASK_USE_DATA_DESCRIPTOR: - # Set these to zero because we write them after the file data - CRC = compress_size = file_size = 0 - else: - CRC = self.CRC - compress_size = self.compress_size - file_size = self.file_size - - extra = self.extra - - min_version = 0 - if zip64 is None: - # We always explicitly pass zip64 within this module.... This - # remains for anyone using ZipInfo.FileHeader as a public API. - zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT - if zip64: - fmt = '= 4: - tp, ln = unpack(' len(extra): - raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln)) - if tp == 0x0001: - data = extra[4:ln+4] - # ZIP64 extension (large files and/or large archives) - try: - if self.file_size in (0xFFFF_FFFF_FFFF_FFFF, 0xFFFF_FFFF): - field = "File size" - self.file_size, = unpack(' 2107: - date_time = (2107, 12, 31, 23, 59, 59) - # Create ZipInfo instance to store file information - if arcname is None: - arcname = filename - arcname = os.path.normpath(os.path.splitdrive(arcname)[1]) - while arcname[0] in (os.sep, os.altsep): - arcname = arcname[1:] - if isdir: - arcname += '/' - zinfo = cls(arcname, date_time) - zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes - if isdir: - zinfo.file_size = 0 - zinfo.external_attr |= 0x10 # MS-DOS directory flag - else: - zinfo.file_size = st.st_size - - return zinfo - - def is_dir(self): - """Return True if this archive member is a directory.""" - if self.filename.endswith('/'): - return True - # The ZIP format specification requires to use forward slashes - # as the directory separator, but in practice some ZIP files - # created on Windows can use backward slashes. For compatibility - # with the extraction code which already handles this: - if os.path.altsep: - return self.filename.endswith((os.path.sep, os.path.altsep)) - return False - - -# ZIP encryption uses the CRC32 one-byte primitive for scrambling some -# internal keys. We noticed that a direct implementation is faster than -# relying on binascii.crc32(). - -_crctable = None -def _gen_crc(crc): - for j in range(8): - if crc & 1: - crc = (crc >> 1) ^ 0xEDB88320 - else: - crc >>= 1 - return crc - -# ZIP supports a password-based form of encryption. Even though known -# plaintext attacks have been found against it, it is still useful -# to be able to get data out of such a file. -# -# Usage: -# zd = _ZipDecrypter(mypwd) -# plain_bytes = zd(cypher_bytes) - -def _ZipDecrypter(pwd): - key0 = 305419896 - key1 = 591751049 - key2 = 878082192 - - global _crctable - if _crctable is None: - _crctable = list(map(_gen_crc, range(256))) - crctable = _crctable - - def crc32(ch, crc): - """Compute the CRC32 primitive on one byte.""" - return (crc >> 8) ^ crctable[(crc ^ ch) & 0xFF] - - def update_keys(c): - nonlocal key0, key1, key2 - key0 = crc32(c, key0) - key1 = (key1 + (key0 & 0xFF)) & 0xFFFFFFFF - key1 = (key1 * 134775813 + 1) & 0xFFFFFFFF - key2 = crc32(key1 >> 24, key2) - - for p in pwd: - update_keys(p) - - def decrypter(data): - """Decrypt a bytes object.""" - result = bytearray() - append = result.append - for c in data: - k = key2 | 2 - c ^= ((k * (k^1)) >> 8) & 0xFF - update_keys(c) - append(c) - return bytes(result) - - return decrypter - - -class LZMACompressor: - - def __init__(self): - self._comp = None - - def _init(self): - props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1}) - self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[ - lzma._decode_filter_properties(lzma.FILTER_LZMA1, props) - ]) - return struct.pack('> 8) & 0xff - else: - # compare against the CRC otherwise - check_byte = (zipinfo.CRC >> 24) & 0xff - h = self._init_decrypter() - if h != check_byte: - raise RuntimeError("Bad password for file %r" % zipinfo.orig_filename) - - - def _init_decrypter(self): - self._decrypter = _ZipDecrypter(self._pwd) - # The first 12 bytes in the cypher stream is an encryption header - # used to strengthen the algorithm. The first 11 bytes are - # completely random, while the 12th contains the MSB of the CRC, - # or the MSB of the file time depending on the header type - # and is used to check the correctness of the password. - header = self._fileobj.read(12) - self._compress_left -= 12 - return self._decrypter(header)[11] - - def __repr__(self): - result = ['<%s.%s' % (self.__class__.__module__, - self.__class__.__qualname__)] - if not self.closed: - result.append(' name=%r' % (self.name,)) - if self._compress_type != ZIP_STORED: - result.append(' compress_type=%s' % - compressor_names.get(self._compress_type, - self._compress_type)) - else: - result.append(' [closed]') - result.append('>') - return ''.join(result) - - def readline(self, limit=-1): - """Read and return a line from the stream. - - If limit is specified, at most limit bytes will be read. - """ - - if limit < 0: - # Shortcut common case - newline found in buffer. - i = self._readbuffer.find(b'\n', self._offset) + 1 - if i > 0: - line = self._readbuffer[self._offset: i] - self._offset = i - return line - - return io.BufferedIOBase.readline(self, limit) - - def peek(self, n=1): - """Returns buffered bytes without advancing the position.""" - if n > len(self._readbuffer) - self._offset: - chunk = self.read(n) - if len(chunk) > self._offset: - self._readbuffer = chunk + self._readbuffer[self._offset:] - self._offset = 0 - else: - self._offset -= len(chunk) - - # Return up to 512 bytes to reduce allocation overhead for tight loops. - return self._readbuffer[self._offset: self._offset + 512] - - def readable(self): - if self.closed: - raise ValueError("I/O operation on closed file.") - return True - - def read(self, n=-1): - """Read and return up to n bytes. - If the argument is omitted, None, or negative, data is read and returned until EOF is reached. - """ - if self.closed: - raise ValueError("read from closed file.") - if n is None or n < 0: - buf = self._readbuffer[self._offset:] - self._readbuffer = b'' - self._offset = 0 - while not self._eof: - buf += self._read1(self.MAX_N) - return buf - - end = n + self._offset - if end < len(self._readbuffer): - buf = self._readbuffer[self._offset:end] - self._offset = end - return buf - - n = end - len(self._readbuffer) - buf = self._readbuffer[self._offset:] - self._readbuffer = b'' - self._offset = 0 - while n > 0 and not self._eof: - data = self._read1(n) - if n < len(data): - self._readbuffer = data - self._offset = n - buf += data[:n] - break - buf += data - n -= len(data) - return buf - - def _update_crc(self, newdata): - # Update the CRC using the given data. - if self._expected_crc is None: - # No need to compute the CRC if we don't have a reference value - return - self._running_crc = crc32(newdata, self._running_crc) - # Check the CRC if we're at the end of the file - if self._eof and self._running_crc != self._expected_crc: - raise BadZipFile("Bad CRC-32 for file %r" % self.name) - - def read1(self, n): - """Read up to n bytes with at most one read() system call.""" - - if n is None or n < 0: - buf = self._readbuffer[self._offset:] - self._readbuffer = b'' - self._offset = 0 - while not self._eof: - data = self._read1(self.MAX_N) - if data: - buf += data - break - return buf - - end = n + self._offset - if end < len(self._readbuffer): - buf = self._readbuffer[self._offset:end] - self._offset = end - return buf - - n = end - len(self._readbuffer) - buf = self._readbuffer[self._offset:] - self._readbuffer = b'' - self._offset = 0 - if n > 0: - while not self._eof: - data = self._read1(n) - if n < len(data): - self._readbuffer = data - self._offset = n - buf += data[:n] - break - if data: - buf += data - break - return buf - - def _read1(self, n): - # Read up to n compressed bytes with at most one read() system call, - # decrypt and decompress them. - if self._eof or n <= 0: - return b'' - - # Read from file. - if self._compress_type == ZIP_DEFLATED: - ## Handle unconsumed data. - data = self._decompressor.unconsumed_tail - if n > len(data): - data += self._read2(n - len(data)) - else: - data = self._read2(n) - - if self._compress_type == ZIP_STORED: - self._eof = self._compress_left <= 0 - elif self._compress_type == ZIP_DEFLATED: - n = max(n, self.MIN_READ_SIZE) - data = self._decompressor.decompress(data, n) - self._eof = (self._decompressor.eof or - self._compress_left <= 0 and - not self._decompressor.unconsumed_tail) - if self._eof: - data += self._decompressor.flush() - else: - data = self._decompressor.decompress(data) - self._eof = self._decompressor.eof or self._compress_left <= 0 - - data = data[:self._left] - self._left -= len(data) - if self._left <= 0: - self._eof = True - self._update_crc(data) - return data - - def _read2(self, n): - if self._compress_left <= 0: - return b'' - - n = max(n, self.MIN_READ_SIZE) - n = min(n, self._compress_left) - - data = self._fileobj.read(n) - self._compress_left -= len(data) - if not data: - raise EOFError - - if self._decrypter is not None: - data = self._decrypter(data) - return data - - def close(self): - try: - if self._close_fileobj: - self._fileobj.close() - finally: - super().close() - - def seekable(self): - if self.closed: - raise ValueError("I/O operation on closed file.") - return self._seekable - - def seek(self, offset, whence=os.SEEK_SET): - if self.closed: - raise ValueError("seek on closed file.") - if not self._seekable: - raise io.UnsupportedOperation("underlying stream is not seekable") - curr_pos = self.tell() - if whence == os.SEEK_SET: - new_pos = offset - elif whence == os.SEEK_CUR: - new_pos = curr_pos + offset - elif whence == os.SEEK_END: - new_pos = self._orig_file_size + offset - else: - raise ValueError("whence must be os.SEEK_SET (0), " - "os.SEEK_CUR (1), or os.SEEK_END (2)") - - if new_pos > self._orig_file_size: - new_pos = self._orig_file_size - - if new_pos < 0: - new_pos = 0 - - read_offset = new_pos - curr_pos - buff_offset = read_offset + self._offset - - if buff_offset >= 0 and buff_offset < len(self._readbuffer): - # Just move the _offset index if the new position is in the _readbuffer - self._offset = buff_offset - read_offset = 0 - # Fast seek uncompressed unencrypted file - elif self._compress_type == ZIP_STORED and self._decrypter is None and read_offset != 0: - # disable CRC checking after first seeking - it would be invalid - self._expected_crc = None - # seek actual file taking already buffered data into account - read_offset -= len(self._readbuffer) - self._offset - self._fileobj.seek(read_offset, os.SEEK_CUR) - self._left -= read_offset - self._compress_left -= read_offset - self._eof = self._left <= 0 - read_offset = 0 - # flush read buffer - self._readbuffer = b'' - self._offset = 0 - elif read_offset < 0: - # Position is before the current position. Reset the ZipExtFile - self._fileobj.seek(self._orig_compress_start) - self._running_crc = self._orig_start_crc - self._expected_crc = self._orig_crc - self._compress_left = self._orig_compress_size - self._left = self._orig_file_size - self._readbuffer = b'' - self._offset = 0 - self._decompressor = _get_decompressor(self._compress_type) - self._eof = False - read_offset = new_pos - if self._decrypter is not None: - self._init_decrypter() - - while read_offset > 0: - read_len = min(self.MAX_SEEK_READ, read_offset) - self.read(read_len) - read_offset -= read_len - - return self.tell() - - def tell(self): - if self.closed: - raise ValueError("tell on closed file.") - if not self._seekable: - raise io.UnsupportedOperation("underlying stream is not seekable") - filepos = self._orig_file_size - self._left - len(self._readbuffer) + self._offset - return filepos - - -class _ZipWriteFile(io.BufferedIOBase): - def __init__(self, zf, zinfo, zip64): - self._zinfo = zinfo - self._zip64 = zip64 - self._zipfile = zf - self._compressor = _get_compressor(zinfo.compress_type, - zinfo.compress_level) - self._file_size = 0 - self._compress_size = 0 - self._crc = 0 - - @property - def _fileobj(self): - return self._zipfile.fp - - @property - def name(self): - return self._zinfo.filename - - @property - def mode(self): - return 'wb' - - def writable(self): - return True - - def write(self, data): - if self.closed: - raise ValueError('I/O operation on closed file.') - - # Accept any data that supports the buffer protocol - if isinstance(data, (bytes, bytearray)): - nbytes = len(data) - else: - data = memoryview(data) - nbytes = data.nbytes - self._file_size += nbytes - - self._crc = crc32(data, self._crc) - if self._compressor: - data = self._compressor.compress(data) - self._compress_size += len(data) - self._fileobj.write(data) - return nbytes - - def close(self): - if self.closed: - return - try: - super().close() - # Flush any data from the compressor, and update header info - if self._compressor: - buf = self._compressor.flush() - self._compress_size += len(buf) - self._fileobj.write(buf) - self._zinfo.compress_size = self._compress_size - else: - self._zinfo.compress_size = self._file_size - self._zinfo.CRC = self._crc - self._zinfo.file_size = self._file_size - - if not self._zip64: - if self._file_size > ZIP64_LIMIT: - raise RuntimeError("File size too large, try using force_zip64") - if self._compress_size > ZIP64_LIMIT: - raise RuntimeError("Compressed size too large, try using force_zip64") - - # Write updated header info - if self._zinfo.flag_bits & _MASK_USE_DATA_DESCRIPTOR: - # Write CRC and file sizes after the file data - fmt = '') - return ''.join(result) - - def _RealGetContents(self): - """Read in the table of contents for the ZIP file.""" - fp = self.fp - try: - endrec = _EndRecData(fp) - except OSError: - raise BadZipFile("File is not a zip file") - if not endrec: - raise BadZipFile("File is not a zip file") - if self.debug > 1: - print(endrec) - size_cd = endrec[_ECD_SIZE] # bytes in central directory - offset_cd = endrec[_ECD_OFFSET] # offset of central directory - self._comment = endrec[_ECD_COMMENT] # archive comment - - # "concat" is zero, unless zip was concatenated to another file - concat = endrec[_ECD_LOCATION] - size_cd - offset_cd - - if self.debug > 2: - inferred = concat + offset_cd - print("given, inferred, offset", offset_cd, inferred, concat) - # self.start_dir: Position of start of central directory - self.start_dir = offset_cd + concat - if self.start_dir < 0: - raise BadZipFile("Bad offset for central directory") - fp.seek(self.start_dir, 0) - data = fp.read(size_cd) - fp = io.BytesIO(data) - total = 0 - while total < size_cd: - centdir = fp.read(sizeCentralDir) - if len(centdir) != sizeCentralDir: - raise BadZipFile("Truncated central directory") - centdir = struct.unpack(structCentralDir, centdir) - if centdir[_CD_SIGNATURE] != stringCentralDir: - raise BadZipFile("Bad magic number for central directory") - if self.debug > 2: - print(centdir) - filename = fp.read(centdir[_CD_FILENAME_LENGTH]) - orig_filename_crc = crc32(filename) - flags = centdir[_CD_FLAG_BITS] - if flags & _MASK_UTF_FILENAME: - # UTF-8 file names extension - filename = filename.decode('utf-8') - else: - # Historical ZIP filename encoding - filename = filename.decode(self.metadata_encoding or 'cp437') - # Create ZipInfo instance to store file information - x = ZipInfo(filename) - x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) - x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) - x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] - (x.create_version, x.create_system, x.extract_version, x.reserved, - x.flag_bits, x.compress_type, t, d, - x.CRC, x.compress_size, x.file_size) = centdir[1:12] - if x.extract_version > MAX_EXTRACT_VERSION: - raise NotImplementedError("zip file version %.1f" % - (x.extract_version / 10)) - x.volume, x.internal_attr, x.external_attr = centdir[15:18] - # Convert date/time code to (year, month, day, hour, min, sec) - x._raw_time = t - x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, - t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) - x._decodeExtra(orig_filename_crc) - x.header_offset = x.header_offset + concat - self.filelist.append(x) - self.NameToInfo[x.filename] = x - - # update total bytes read from central directory - total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] - + centdir[_CD_EXTRA_FIELD_LENGTH] - + centdir[_CD_COMMENT_LENGTH]) - - if self.debug > 2: - print("total", total) - - end_offset = self.start_dir - for zinfo in reversed(sorted(self.filelist, - key=lambda zinfo: zinfo.header_offset)): - zinfo._end_offset = end_offset - end_offset = zinfo.header_offset - - def namelist(self): - """Return a list of file names in the archive.""" - return [data.filename for data in self.filelist] - - def infolist(self): - """Return a list of class ZipInfo instances for files in the - archive.""" - return self.filelist - - def printdir(self, file=None): - """Print a table of contents for the zip file.""" - print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"), - file=file) - for zinfo in self.filelist: - date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6] - print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size), - file=file) - - def testzip(self): - """Read all the files and check the CRC. - - Return None if all files could be read successfully, or the name - of the offending file otherwise.""" - chunk_size = 2 ** 20 - for zinfo in self.filelist: - try: - # Read by chunks, to avoid an OverflowError or a - # MemoryError with very large embedded files. - with self.open(zinfo.filename, "r") as f: - while f.read(chunk_size): # Check CRC-32 - pass - except BadZipFile: - return zinfo.filename - - def getinfo(self, name): - """Return the instance of ZipInfo given 'name'.""" - info = self.NameToInfo.get(name) - if info is None: - raise KeyError( - 'There is no item named %r in the archive' % name) - - return info - - def setpassword(self, pwd): - """Set default password for encrypted files.""" - if pwd and not isinstance(pwd, bytes): - raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) - if pwd: - self.pwd = pwd - else: - self.pwd = None - - @property - def comment(self): - """The comment text associated with the ZIP file.""" - return self._comment - - @comment.setter - def comment(self, comment): - if not isinstance(comment, bytes): - raise TypeError("comment: expected bytes, got %s" % type(comment).__name__) - # check for valid comment length - if len(comment) > ZIP_MAX_COMMENT: - import warnings - warnings.warn('Archive comment is too long; truncating to %d bytes' - % ZIP_MAX_COMMENT, stacklevel=2) - comment = comment[:ZIP_MAX_COMMENT] - self._comment = comment - self._didModify = True - - def read(self, name, pwd=None): - """Return file bytes for name. 'pwd' is the password to decrypt - encrypted files.""" - with self.open(name, "r", pwd) as fp: - return fp.read() - - def open(self, name, mode="r", pwd=None, *, force_zip64=False): - """Return file-like object for 'name'. - - name is a string for the file name within the ZIP file, or a ZipInfo - object. - - mode should be 'r' to read a file already in the ZIP file, or 'w' to - write to a file newly added to the archive. - - pwd is the password to decrypt files (only used for reading). - - When writing, if the file size is not known in advance but may exceed - 2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large - files. If the size is known in advance, it is best to pass a ZipInfo - instance for name, with zinfo.file_size set. - """ - if mode not in {"r", "w"}: - raise ValueError('open() requires mode "r" or "w"') - if pwd and (mode == "w"): - raise ValueError("pwd is only supported for reading files") - if not self.fp: - raise ValueError( - "Attempt to use ZIP archive that was already closed") - - # Make sure we have an info object - if isinstance(name, ZipInfo): - # 'name' is already an info object - zinfo = name - elif mode == 'w': - zinfo = ZipInfo(name) - zinfo.compress_type = self.compression - zinfo.compress_level = self.compresslevel - else: - # Get info object for name - zinfo = self.getinfo(name) - - if mode == 'w': - return self._open_to_write(zinfo, force_zip64=force_zip64) - - if self._writing: - raise ValueError("Can't read from the ZIP file while there " - "is an open writing handle on it. " - "Close the writing handle before trying to read.") - - # Open for reading: - self._fileRefCnt += 1 - zef_file = _SharedFile(self.fp, zinfo.header_offset, - self._fpclose, self._lock, lambda: self._writing) - try: - # Skip the file header: - fheader = zef_file.read(sizeFileHeader) - if len(fheader) != sizeFileHeader: - raise BadZipFile("Truncated file header") - fheader = struct.unpack(structFileHeader, fheader) - if fheader[_FH_SIGNATURE] != stringFileHeader: - raise BadZipFile("Bad magic number for file header") - - fname = zef_file.read(fheader[_FH_FILENAME_LENGTH]) - if fheader[_FH_EXTRA_FIELD_LENGTH]: - zef_file.seek(fheader[_FH_EXTRA_FIELD_LENGTH], whence=1) - - if zinfo.flag_bits & _MASK_COMPRESSED_PATCH: - # Zip 2.7: compressed patched data - raise NotImplementedError("compressed patched data (flag bit 5)") - - if zinfo.flag_bits & _MASK_STRONG_ENCRYPTION: - # strong encryption - raise NotImplementedError("strong encryption (flag bit 6)") - - if fheader[_FH_GENERAL_PURPOSE_FLAG_BITS] & _MASK_UTF_FILENAME: - # UTF-8 filename - fname_str = fname.decode("utf-8") - else: - fname_str = fname.decode(self.metadata_encoding or "cp437") - - if fname_str != zinfo.orig_filename: - raise BadZipFile( - 'File name in directory %r and header %r differ.' - % (zinfo.orig_filename, fname)) - - if (zinfo._end_offset is not None and - zef_file.tell() + zinfo.compress_size > zinfo._end_offset): - if zinfo._end_offset == zinfo.header_offset: - import warnings - warnings.warn( - f"Overlapped entries: {zinfo.orig_filename!r} " - f"(possible zip bomb)", - skip_file_prefixes=(os.path.dirname(__file__),)) - else: - raise BadZipFile( - f"Overlapped entries: {zinfo.orig_filename!r} " - f"(possible zip bomb)") - - # check for encrypted flag & handle password - is_encrypted = zinfo.flag_bits & _MASK_ENCRYPTED - if is_encrypted: - if not pwd: - pwd = self.pwd - if pwd and not isinstance(pwd, bytes): - raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) - if not pwd: - raise RuntimeError("File %r is encrypted, password " - "required for extraction" % name) - else: - pwd = None - - return ZipExtFile(zef_file, mode + 'b', zinfo, pwd, True) - except: - zef_file.close() - raise - - def _open_to_write(self, zinfo, force_zip64=False): - if force_zip64 and not self._allowZip64: - raise ValueError( - "force_zip64 is True, but allowZip64 was False when opening " - "the ZIP file." - ) - if self._writing: - raise ValueError("Can't write to the ZIP file while there is " - "another write handle open on it. " - "Close the first handle before opening another.") - - # Size and CRC are overwritten with correct data after processing the file - zinfo.compress_size = 0 - zinfo.CRC = 0 - - zinfo.flag_bits = 0x00 - if zinfo.compress_type == ZIP_LZMA: - # Compressed data includes an end-of-stream (EOS) marker - zinfo.flag_bits |= _MASK_COMPRESS_OPTION_1 - if not self._seekable: - zinfo.flag_bits |= _MASK_USE_DATA_DESCRIPTOR - - if not zinfo.external_attr: - zinfo.external_attr = 0o600 << 16 # permissions: ?rw------- - - # Compressed size can be larger than uncompressed size - zip64 = force_zip64 or (zinfo.file_size * 1.05 > ZIP64_LIMIT) - if not self._allowZip64 and zip64: - raise LargeZipFile("Filesize would require ZIP64 extensions") - - if self._seekable: - self.fp.seek(self.start_dir) - zinfo.header_offset = self.fp.tell() - - self._writecheck(zinfo) - self._didModify = True - - self.fp.write(zinfo.FileHeader(zip64)) - - self._writing = True - return _ZipWriteFile(self, zinfo, zip64) - - def extract(self, member, path=None, pwd=None): - """Extract a member from the archive to the current working directory, - using its full name. Its file information is extracted as accurately - as possible. `member' may be a filename or a ZipInfo object. You can - specify a different directory using `path'. You can specify the - password to decrypt the file using 'pwd'. - """ - if path is None: - path = os.getcwd() - else: - path = os.fspath(path) - - return self._extract_member(member, path, pwd) - - def extractall(self, path=None, members=None, pwd=None): - """Extract all members from the archive to the current working - directory. `path' specifies a different directory to extract to. - `members' is optional and must be a subset of the list returned - by namelist(). You can specify the password to decrypt all files - using 'pwd'. - """ - if members is None: - members = self.namelist() - - if path is None: - path = os.getcwd() - else: - path = os.fspath(path) - - for zipinfo in members: - self._extract_member(zipinfo, path, pwd) - - @classmethod - def _sanitize_windows_name(cls, arcname, pathsep): - """Replace bad characters and remove trailing dots from parts.""" - table = cls._windows_illegal_name_trans_table - if not table: - illegal = ':<>|"?*' - table = str.maketrans(illegal, '_' * len(illegal)) - cls._windows_illegal_name_trans_table = table - arcname = arcname.translate(table) - # remove trailing dots and spaces - arcname = (x.rstrip(' .') for x in arcname.split(pathsep)) - # rejoin, removing empty parts. - arcname = pathsep.join(x for x in arcname if x) - return arcname - - def _extract_member(self, member, targetpath, pwd): - """Extract the ZipInfo object 'member' to a physical - file on the path targetpath. - """ - if not isinstance(member, ZipInfo): - member = self.getinfo(member) - - # build the destination pathname, replacing - # forward slashes to platform specific separators. - arcname = member.filename.replace('/', os.path.sep) - - if os.path.altsep: - arcname = arcname.replace(os.path.altsep, os.path.sep) - # interpret absolute pathname as relative, remove drive letter or - # UNC path, redundant separators, "." and ".." components. - arcname = os.path.splitdrive(arcname)[1] - invalid_path_parts = ('', os.path.curdir, os.path.pardir) - arcname = os.path.sep.join(x for x in arcname.split(os.path.sep) - if x not in invalid_path_parts) - if os.path.sep == '\\': - # filter illegal characters on Windows - arcname = self._sanitize_windows_name(arcname, os.path.sep) - - if not arcname and not member.is_dir(): - raise ValueError("Empty filename.") - - targetpath = os.path.join(targetpath, arcname) - targetpath = os.path.normpath(targetpath) - - # Create all upper directories if necessary. - upperdirs = os.path.dirname(targetpath) - if upperdirs and not os.path.exists(upperdirs): - os.makedirs(upperdirs, exist_ok=True) - - if member.is_dir(): - if not os.path.isdir(targetpath): - try: - os.mkdir(targetpath) - except FileExistsError: - if not os.path.isdir(targetpath): - raise - return targetpath - - with self.open(member, pwd=pwd) as source, \ - open(targetpath, "wb") as target: - shutil.copyfileobj(source, target) - - return targetpath - - def _writecheck(self, zinfo): - """Check for errors before writing a file to the archive.""" - if zinfo.filename in self.NameToInfo: - import warnings - warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3) - if self.mode not in ('w', 'x', 'a'): - raise ValueError("write() requires mode 'w', 'x', or 'a'") - if not self.fp: - raise ValueError( - "Attempt to write ZIP archive that was already closed") - _check_compression(zinfo.compress_type) - if not self._allowZip64: - requires_zip64 = None - if len(self.filelist) >= ZIP_FILECOUNT_LIMIT: - requires_zip64 = "Files count" - elif zinfo.file_size > ZIP64_LIMIT: - requires_zip64 = "Filesize" - elif zinfo.header_offset > ZIP64_LIMIT: - requires_zip64 = "Zipfile size" - if requires_zip64: - raise LargeZipFile(requires_zip64 + - " would require ZIP64 extensions") - - def write(self, filename, arcname=None, - compress_type=None, compresslevel=None): - """Put the bytes from filename into the archive under the name - arcname.""" - if not self.fp: - raise ValueError( - "Attempt to write to ZIP archive that was already closed") - if self._writing: - raise ValueError( - "Can't write to ZIP archive while an open writing handle exists" - ) - - zinfo = ZipInfo.from_file(filename, arcname, - strict_timestamps=self._strict_timestamps) - - if zinfo.is_dir(): - zinfo.compress_size = 0 - zinfo.CRC = 0 - self.mkdir(zinfo) - else: - if compress_type is not None: - zinfo.compress_type = compress_type - else: - zinfo.compress_type = self.compression - - if compresslevel is not None: - zinfo.compress_level = compresslevel - else: - zinfo.compress_level = self.compresslevel - - with open(filename, "rb") as src, self.open(zinfo, 'w') as dest: - shutil.copyfileobj(src, dest, 1024*8) - - def writestr(self, zinfo_or_arcname, data, - compress_type=None, compresslevel=None): - """Write a file into the archive. The contents is 'data', which - may be either a 'str' or a 'bytes' instance; if it is a 'str', - it is encoded as UTF-8 first. - 'zinfo_or_arcname' is either a ZipInfo instance or - the name of the file in the archive.""" - if isinstance(data, str): - data = data.encode("utf-8") - if not isinstance(zinfo_or_arcname, ZipInfo): - zinfo = ZipInfo(filename=zinfo_or_arcname, - date_time=time.localtime(time.time())[:6]) - zinfo.compress_type = self.compression - zinfo.compress_level = self.compresslevel - if zinfo.filename.endswith('/'): - zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x - zinfo.external_attr |= 0x10 # MS-DOS directory flag - else: - zinfo.external_attr = 0o600 << 16 # ?rw------- - else: - zinfo = zinfo_or_arcname - - if not self.fp: - raise ValueError( - "Attempt to write to ZIP archive that was already closed") - if self._writing: - raise ValueError( - "Can't write to ZIP archive while an open writing handle exists." - ) - - if compress_type is not None: - zinfo.compress_type = compress_type - - if compresslevel is not None: - zinfo.compress_level = compresslevel - - zinfo.file_size = len(data) # Uncompressed size - with self._lock: - with self.open(zinfo, mode='w') as dest: - dest.write(data) - - def mkdir(self, zinfo_or_directory_name, mode=511): - """Creates a directory inside the zip archive.""" - if isinstance(zinfo_or_directory_name, ZipInfo): - zinfo = zinfo_or_directory_name - if not zinfo.is_dir(): - raise ValueError("The given ZipInfo does not describe a directory") - elif isinstance(zinfo_or_directory_name, str): - directory_name = zinfo_or_directory_name - if not directory_name.endswith("/"): - directory_name += "/" - zinfo = ZipInfo(directory_name) - zinfo.compress_size = 0 - zinfo.CRC = 0 - zinfo.external_attr = ((0o40000 | mode) & 0xFFFF) << 16 - zinfo.file_size = 0 - zinfo.external_attr |= 0x10 - else: - raise TypeError("Expected type str or ZipInfo") - - with self._lock: - if self._seekable: - self.fp.seek(self.start_dir) - zinfo.header_offset = self.fp.tell() # Start of header bytes - if zinfo.compress_type == ZIP_LZMA: - # Compressed data includes an end-of-stream (EOS) marker - zinfo.flag_bits |= _MASK_COMPRESS_OPTION_1 - - self._writecheck(zinfo) - self._didModify = True - - self.filelist.append(zinfo) - self.NameToInfo[zinfo.filename] = zinfo - self.fp.write(zinfo.FileHeader(False)) - self.start_dir = self.fp.tell() - - def __del__(self): - """Call the "close()" method in case the user forgot.""" - self.close() - - def close(self): - """Close the file, and for mode 'w', 'x' and 'a' write the ending - records.""" - if self.fp is None: - return - - if self._writing: - raise ValueError("Can't close the ZIP file while there is " - "an open writing handle on it. " - "Close the writing handle before closing the zip.") - - try: - if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records - with self._lock: - if self._seekable: - self.fp.seek(self.start_dir) - self._write_end_record() - finally: - fp = self.fp - self.fp = None - self._fpclose(fp) - - def _write_end_record(self): - for zinfo in self.filelist: # write central directory - dt = zinfo.date_time - dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] - dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) - extra = [] - if zinfo.file_size > ZIP64_LIMIT \ - or zinfo.compress_size > ZIP64_LIMIT: - extra.append(zinfo.file_size) - extra.append(zinfo.compress_size) - file_size = 0xffffffff - compress_size = 0xffffffff - else: - file_size = zinfo.file_size - compress_size = zinfo.compress_size - - if zinfo.header_offset > ZIP64_LIMIT: - extra.append(zinfo.header_offset) - header_offset = 0xffffffff - else: - header_offset = zinfo.header_offset - - extra_data = zinfo.extra - min_version = 0 - if extra: - # Append a ZIP64 field to the extra's - extra_data = _Extra.strip(extra_data, (1,)) - extra_data = struct.pack( - ' ZIP_FILECOUNT_LIMIT: - requires_zip64 = "Files count" - elif centDirOffset > ZIP64_LIMIT: - requires_zip64 = "Central directory offset" - elif centDirSize > ZIP64_LIMIT: - requires_zip64 = "Central directory size" - if requires_zip64: - # Need to write the ZIP64 end-of-archive records - if not self._allowZip64: - raise LargeZipFile(requires_zip64 + - " would require ZIP64 extensions") - zip64endrec = struct.pack( - structEndArchive64, stringEndArchive64, - sizeEndCentDir64 - 12, 45, 45, 0, 0, centDirCount, centDirCount, - centDirSize, centDirOffset) - self.fp.write(zip64endrec) - - zip64locrec = struct.pack( - structEndArchive64Locator, - stringEndArchive64Locator, 0, pos2, 1) - self.fp.write(zip64locrec) - centDirCount = min(centDirCount, 0xFFFF) - centDirSize = min(centDirSize, 0xFFFFFFFF) - centDirOffset = min(centDirOffset, 0xFFFFFFFF) - - endrec = struct.pack(structEndArchive, stringEndArchive, - 0, 0, centDirCount, centDirCount, - centDirSize, centDirOffset, len(self._comment)) - self.fp.write(endrec) - self.fp.write(self._comment) - if self.mode == "a": - self.fp.truncate() - self.fp.flush() - - def _fpclose(self, fp): - assert self._fileRefCnt > 0 - self._fileRefCnt -= 1 - if not self._fileRefCnt and not self._filePassed: - fp.close() - - -class PyZipFile(ZipFile): - """Class to create ZIP archives with Python library files and packages.""" - - def __init__(self, file, mode="r", compression=ZIP_STORED, - allowZip64=True, optimize=-1): - ZipFile.__init__(self, file, mode=mode, compression=compression, - allowZip64=allowZip64) - self._optimize = optimize - - def writepy(self, pathname, basename="", filterfunc=None): - """Add all files from "pathname" to the ZIP archive. - - If pathname is a package directory, search the directory and - all package subdirectories recursively for all *.py and enter - the modules into the archive. If pathname is a plain - directory, listdir *.py and enter all modules. Else, pathname - must be a Python *.py file and the module will be put into the - archive. Added modules are always module.pyc. - This method will compile the module.py into module.pyc if - necessary. - If filterfunc(pathname) is given, it is called with every argument. - When it is False, the file or directory is skipped. - """ - pathname = os.fspath(pathname) - if filterfunc and not filterfunc(pathname): - if self.debug: - label = 'path' if os.path.isdir(pathname) else 'file' - print('%s %r skipped by filterfunc' % (label, pathname)) - return - dir, name = os.path.split(pathname) - if os.path.isdir(pathname): - initname = os.path.join(pathname, "__init__.py") - if os.path.isfile(initname): - # This is a package directory, add it - if basename: - basename = "%s/%s" % (basename, name) - else: - basename = name - if self.debug: - print("Adding package in", pathname, "as", basename) - fname, arcname = self._get_codename(initname[0:-3], basename) - if self.debug: - print("Adding", arcname) - self.write(fname, arcname) - dirlist = sorted(os.listdir(pathname)) - dirlist.remove("__init__.py") - # Add all *.py files and package subdirectories - for filename in dirlist: - path = os.path.join(pathname, filename) - root, ext = os.path.splitext(filename) - if os.path.isdir(path): - if os.path.isfile(os.path.join(path, "__init__.py")): - # This is a package directory, add it - self.writepy(path, basename, - filterfunc=filterfunc) # Recursive call - elif ext == ".py": - if filterfunc and not filterfunc(path): - if self.debug: - print('file %r skipped by filterfunc' % path) - continue - fname, arcname = self._get_codename(path[0:-3], - basename) - if self.debug: - print("Adding", arcname) - self.write(fname, arcname) - else: - # This is NOT a package directory, add its files at top level - if self.debug: - print("Adding files from directory", pathname) - for filename in sorted(os.listdir(pathname)): - path = os.path.join(pathname, filename) - root, ext = os.path.splitext(filename) - if ext == ".py": - if filterfunc and not filterfunc(path): - if self.debug: - print('file %r skipped by filterfunc' % path) - continue - fname, arcname = self._get_codename(path[0:-3], - basename) - if self.debug: - print("Adding", arcname) - self.write(fname, arcname) - else: - if pathname[-3:] != ".py": - raise RuntimeError( - 'Files added with writepy() must end with ".py"') - fname, arcname = self._get_codename(pathname[0:-3], basename) - if self.debug: - print("Adding file", arcname) - self.write(fname, arcname) - - def _get_codename(self, pathname, basename): - """Return (filename, archivename) for the path. - - Given a module name path, return the correct file path and - archive name, compiling if necessary. For example, given - /python/lib/string, return (/python/lib/string.pyc, string). - """ - def _compile(file, optimize=-1): - import py_compile - if self.debug: - print("Compiling", file) - try: - py_compile.compile(file, doraise=True, optimize=optimize) - except py_compile.PyCompileError as err: - print(err.msg) - return False - return True - - file_py = pathname + ".py" - file_pyc = pathname + ".pyc" - pycache_opt0 = importlib.util.cache_from_source(file_py, optimization='') - pycache_opt1 = importlib.util.cache_from_source(file_py, optimization=1) - pycache_opt2 = importlib.util.cache_from_source(file_py, optimization=2) - if self._optimize == -1: - # legacy mode: use whatever file is present - if (os.path.isfile(file_pyc) and - os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime): - # Use .pyc file. - arcname = fname = file_pyc - elif (os.path.isfile(pycache_opt0) and - os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime): - # Use the __pycache__/*.pyc file, but write it to the legacy pyc - # file name in the archive. - fname = pycache_opt0 - arcname = file_pyc - elif (os.path.isfile(pycache_opt1) and - os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime): - # Use the __pycache__/*.pyc file, but write it to the legacy pyc - # file name in the archive. - fname = pycache_opt1 - arcname = file_pyc - elif (os.path.isfile(pycache_opt2) and - os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime): - # Use the __pycache__/*.pyc file, but write it to the legacy pyc - # file name in the archive. - fname = pycache_opt2 - arcname = file_pyc - else: - # Compile py into PEP 3147 pyc file. - if _compile(file_py): - if sys.flags.optimize == 0: - fname = pycache_opt0 - elif sys.flags.optimize == 1: - fname = pycache_opt1 - else: - fname = pycache_opt2 - arcname = file_pyc - else: - fname = arcname = file_py - else: - # new mode: use given optimization level - if self._optimize == 0: - fname = pycache_opt0 - arcname = file_pyc - else: - arcname = file_pyc - if self._optimize == 1: - fname = pycache_opt1 - elif self._optimize == 2: - fname = pycache_opt2 - else: - msg = "invalid value for 'optimize': {!r}".format(self._optimize) - raise ValueError(msg) - if not (os.path.isfile(fname) and - os.stat(fname).st_mtime >= os.stat(file_py).st_mtime): - if not _compile(file_py, optimize=self._optimize): - fname = arcname = file_py - archivename = os.path.split(arcname)[1] - if basename: - archivename = "%s/%s" % (basename, archivename) - return (fname, archivename) - - -def main(args=None): - import argparse - - description = 'A simple command-line interface for zipfile module.' - parser = argparse.ArgumentParser(description=description) - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('-l', '--list', metavar='', - help='Show listing of a zipfile') - group.add_argument('-e', '--extract', nargs=2, - metavar=('', ''), - help='Extract zipfile into target dir') - group.add_argument('-c', '--create', nargs='+', - metavar=('', ''), - help='Create zipfile from sources') - group.add_argument('-t', '--test', metavar='', - help='Test if a zipfile is valid') - parser.add_argument('--metadata-encoding', metavar='', - help='Specify encoding of member names for -l, -e and -t') - args = parser.parse_args(args) - - encoding = args.metadata_encoding - - if args.test is not None: - src = args.test - with ZipFile(src, 'r', metadata_encoding=encoding) as zf: - badfile = zf.testzip() - if badfile: - print("The following enclosed file is corrupted: {!r}".format(badfile)) - print("Done testing") - - elif args.list is not None: - src = args.list - with ZipFile(src, 'r', metadata_encoding=encoding) as zf: - zf.printdir() - - elif args.extract is not None: - src, curdir = args.extract - with ZipFile(src, 'r', metadata_encoding=encoding) as zf: - zf.extractall(curdir) - - elif args.create is not None: - if encoding: - print("Non-conforming encodings not supported with -c.", - file=sys.stderr) - sys.exit(1) - - zip_name = args.create.pop(0) - files = args.create - - def addToZip(zf, path, zippath): - if os.path.isfile(path): - zf.write(path, zippath, ZIP_DEFLATED) - elif os.path.isdir(path): - if zippath: - zf.write(path, zippath) - for nm in sorted(os.listdir(path)): - addToZip(zf, - os.path.join(path, nm), os.path.join(zippath, nm)) - # else: ignore - - with ZipFile(zip_name, 'w') as zf: - for path in files: - zippath = os.path.basename(path) - if not zippath: - zippath = os.path.basename(os.path.dirname(path)) - if zippath in ('', os.curdir, os.pardir): - zippath = '' - addToZip(zf, path, zippath) - - -from ._path import ( # noqa: E402 - Path, - - # used privately for tests - CompleteDirs, # noqa: F401 -) diff --git a/Python313_13_x64_Template/Lib/zipfile/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/zipfile/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 6e1ee558..00000000 Binary files a/Python313_13_x64_Template/Lib/zipfile/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/zipfile/_path/__init__.py b/Python313_13_x64_Template/Lib/zipfile/_path/__init__.py deleted file mode 100644 index 02f81171..00000000 --- a/Python313_13_x64_Template/Lib/zipfile/_path/__init__.py +++ /dev/null @@ -1,452 +0,0 @@ -""" -A Path-like interface for zipfiles. - -This codebase is shared between zipfile.Path in the stdlib -and zipp in PyPI. See -https://github.com/python/importlib_metadata/wiki/Development-Methodology -for more detail. -""" - -import contextlib -import io -import itertools -import pathlib -import posixpath -import re -import stat -import sys -import zipfile - -from .glob import Translator - -__all__ = ['Path'] - - -def _parents(path): - """ - Given a path with elements separated by - posixpath.sep, generate all parents of that path. - - >>> list(_parents('b/d')) - ['b'] - >>> list(_parents('/b/d/')) - ['/b'] - >>> list(_parents('b/d/f/')) - ['b/d', 'b'] - >>> list(_parents('b')) - [] - >>> list(_parents('')) - [] - """ - return itertools.islice(_ancestry(path), 1, None) - - -def _ancestry(path): - """ - Given a path with elements separated by - posixpath.sep, generate all elements of that path. - - >>> list(_ancestry('b/d')) - ['b/d', 'b'] - >>> list(_ancestry('/b/d/')) - ['/b/d', '/b'] - >>> list(_ancestry('b/d/f/')) - ['b/d/f', 'b/d', 'b'] - >>> list(_ancestry('b')) - ['b'] - >>> list(_ancestry('')) - [] - - Multiple separators are treated like a single. - - >>> list(_ancestry('//b//d///f//')) - ['//b//d///f', '//b//d', '//b'] - """ - path = path.rstrip(posixpath.sep) - while path.rstrip(posixpath.sep): - yield path - path, tail = posixpath.split(path) - - -_dedupe = dict.fromkeys -"""Deduplicate an iterable in original order""" - - -def _difference(minuend, subtrahend): - """ - Return items in minuend not in subtrahend, retaining order - with O(1) lookup. - """ - return itertools.filterfalse(set(subtrahend).__contains__, minuend) - - -class InitializedState: - """ - Mix-in to save the initialization state for pickling. - """ - - def __init__(self, *args, **kwargs): - self.__args = args - self.__kwargs = kwargs - super().__init__(*args, **kwargs) - - def __getstate__(self): - return self.__args, self.__kwargs - - def __setstate__(self, state): - args, kwargs = state - super().__init__(*args, **kwargs) - - -class CompleteDirs(InitializedState, zipfile.ZipFile): - """ - A ZipFile subclass that ensures that implied directories - are always included in the namelist. - - >>> list(CompleteDirs._implied_dirs(['foo/bar.txt', 'foo/bar/baz.txt'])) - ['foo/', 'foo/bar/'] - >>> list(CompleteDirs._implied_dirs(['foo/bar.txt', 'foo/bar/baz.txt', 'foo/bar/'])) - ['foo/'] - """ - - @staticmethod - def _implied_dirs(names): - parents = itertools.chain.from_iterable(map(_parents, names)) - as_dirs = (p + posixpath.sep for p in parents) - return _dedupe(_difference(as_dirs, names)) - - def namelist(self): - names = super().namelist() - return names + list(self._implied_dirs(names)) - - def _name_set(self): - return set(self.namelist()) - - def resolve_dir(self, name): - """ - If the name represents a directory, return that name - as a directory (with the trailing slash). - """ - names = self._name_set() - dirname = name + '/' - dir_match = name not in names and dirname in names - return dirname if dir_match else name - - def getinfo(self, name): - """ - Supplement getinfo for implied dirs. - """ - try: - return super().getinfo(name) - except KeyError: - if not name.endswith('/') or name not in self._name_set(): - raise - return zipfile.ZipInfo(filename=name) - - @classmethod - def make(cls, source): - """ - Given a source (filename or zipfile), return an - appropriate CompleteDirs subclass. - """ - if isinstance(source, CompleteDirs): - return source - - if not isinstance(source, zipfile.ZipFile): - return cls(source) - - # Only allow for FastLookup when supplied zipfile is read-only - if 'r' not in source.mode: - cls = CompleteDirs - - source.__class__ = cls - return source - - @classmethod - def inject(cls, zf: zipfile.ZipFile) -> zipfile.ZipFile: - """ - Given a writable zip file zf, inject directory entries for - any directories implied by the presence of children. - """ - for name in cls._implied_dirs(zf.namelist()): - zf.writestr(name, b"") - return zf - - -class FastLookup(CompleteDirs): - """ - ZipFile subclass to ensure implicit - dirs exist and are resolved rapidly. - """ - - def namelist(self): - with contextlib.suppress(AttributeError): - return self.__names - self.__names = super().namelist() - return self.__names - - def _name_set(self): - with contextlib.suppress(AttributeError): - return self.__lookup - self.__lookup = super()._name_set() - return self.__lookup - -def _extract_text_encoding(encoding=None, *args, **kwargs): - # compute stack level so that the caller of the caller sees any warning. - is_pypy = sys.implementation.name == 'pypy' - # PyPy no longer special cased after 7.3.19 (or maybe 7.3.18) - # See jaraco/zipp#143 - is_old_pypi = is_pypy and sys.pypy_version_info < (7, 3, 19) - stack_level = 3 + is_old_pypi - return io.text_encoding(encoding, stack_level), args, kwargs - - -class Path: - """ - A :class:`importlib.resources.abc.Traversable` interface for zip files. - - Implements many of the features users enjoy from - :class:`pathlib.Path`. - - Consider a zip file with this structure:: - - . - ├── a.txt - └── b - ├── c.txt - └── d - └── e.txt - - >>> data = io.BytesIO() - >>> zf = ZipFile(data, 'w') - >>> zf.writestr('a.txt', 'content of a') - >>> zf.writestr('b/c.txt', 'content of c') - >>> zf.writestr('b/d/e.txt', 'content of e') - >>> zf.filename = 'mem/abcde.zip' - - Path accepts the zipfile object itself or a filename - - >>> path = Path(zf) - - From there, several path operations are available. - - Directory iteration (including the zip file itself): - - >>> a, b = path.iterdir() - >>> a - Path('mem/abcde.zip', 'a.txt') - >>> b - Path('mem/abcde.zip', 'b/') - - name property: - - >>> b.name - 'b' - - join with divide operator: - - >>> c = b / 'c.txt' - >>> c - Path('mem/abcde.zip', 'b/c.txt') - >>> c.name - 'c.txt' - - Read text: - - >>> c.read_text(encoding='utf-8') - 'content of c' - - existence: - - >>> c.exists() - True - >>> (b / 'missing.txt').exists() - False - - Coercion to string: - - >>> import os - >>> str(c).replace(os.sep, posixpath.sep) - 'mem/abcde.zip/b/c.txt' - - At the root, ``name``, ``filename``, and ``parent`` - resolve to the zipfile. - - >>> str(path) - 'mem/abcde.zip/' - >>> path.name - 'abcde.zip' - >>> path.filename == pathlib.Path('mem/abcde.zip') - True - >>> str(path.parent) - 'mem' - - If the zipfile has no filename, such attributes are not - valid and accessing them will raise an Exception. - - >>> zf.filename = None - >>> path.name - Traceback (most recent call last): - ... - TypeError: ... - - >>> path.filename - Traceback (most recent call last): - ... - TypeError: ... - - >>> path.parent - Traceback (most recent call last): - ... - TypeError: ... - - # workaround python/cpython#106763 - >>> pass - """ - - __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" - - def __init__(self, root, at=""): - """ - Construct a Path from a ZipFile or filename. - - Note: When the source is an existing ZipFile object, - its type (__class__) will be mutated to a - specialized type. If the caller wishes to retain the - original type, the caller should either create a - separate ZipFile object or pass a filename. - """ - self.root = FastLookup.make(root) - self.at = at - - def __eq__(self, other): - """ - >>> Path(zipfile.ZipFile(io.BytesIO(), 'w')) == 'foo' - False - """ - if self.__class__ is not other.__class__: - return NotImplemented - return (self.root, self.at) == (other.root, other.at) - - def __hash__(self): - return hash((self.root, self.at)) - - def open(self, mode='r', *args, pwd=None, **kwargs): - """ - Open this entry as text or binary following the semantics - of ``pathlib.Path.open()`` by passing arguments through - to io.TextIOWrapper(). - """ - if self.is_dir(): - raise IsADirectoryError(self) - zip_mode = mode[0] - if zip_mode == 'r' and not self.exists(): - raise FileNotFoundError(self) - stream = self.root.open(self.at, zip_mode, pwd=pwd) - if 'b' in mode: - if args or kwargs: - raise ValueError("encoding args invalid for binary operation") - return stream - # Text mode: - encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) - return io.TextIOWrapper(stream, encoding, *args, **kwargs) - - def _base(self): - return pathlib.PurePosixPath(self.at) if self.at else self.filename - - @property - def name(self): - return self._base().name - - @property - def suffix(self): - return self._base().suffix - - @property - def suffixes(self): - return self._base().suffixes - - @property - def stem(self): - return self._base().stem - - @property - def filename(self): - return pathlib.Path(self.root.filename).joinpath(self.at) - - def read_text(self, *args, **kwargs): - encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) - with self.open('r', encoding, *args, **kwargs) as strm: - return strm.read() - - def read_bytes(self): - with self.open('rb') as strm: - return strm.read() - - def _is_child(self, path): - return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") - - def _next(self, at): - return self.__class__(self.root, at) - - def is_dir(self): - return not self.at or self.at.endswith("/") - - def is_file(self): - return self.exists() and not self.is_dir() - - def exists(self): - return self.at in self.root._name_set() - - def iterdir(self): - if not self.is_dir(): - raise ValueError("Can't listdir a file") - subs = map(self._next, self.root.namelist()) - return filter(self._is_child, subs) - - def match(self, path_pattern): - return pathlib.PurePosixPath(self.at).match(path_pattern) - - def is_symlink(self): - """ - Return whether this path is a symlink. - """ - info = self.root.getinfo(self.at) - mode = info.external_attr >> 16 - return stat.S_ISLNK(mode) - - def glob(self, pattern): - if not pattern: - raise ValueError(f"Unacceptable pattern: {pattern!r}") - - prefix = re.escape(self.at) - tr = Translator(seps='/') - matches = re.compile(prefix + tr.translate(pattern)).fullmatch - return map(self._next, filter(matches, self.root.namelist())) - - def rglob(self, pattern): - return self.glob(f'**/{pattern}') - - def relative_to(self, other, *extra): - return posixpath.relpath(str(self), str(other.joinpath(*extra))) - - def __str__(self): - return posixpath.join(self.root.filename, self.at) - - def __repr__(self): - return self.__repr.format(self=self) - - def joinpath(self, *other): - next = posixpath.join(self.at, *other) - return self._next(self.root.resolve_dir(next)) - - __truediv__ = joinpath - - @property - def parent(self): - if not self.at: - return self.filename.parent - parent_at = posixpath.dirname(self.at.rstrip('/')) - if parent_at: - parent_at += '/' - return self._next(parent_at) diff --git a/Python313_13_x64_Template/Lib/zipfile/_path/__pycache__/__init__.cpython-313.pyc b/Python313_13_x64_Template/Lib/zipfile/_path/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index f32d8612..00000000 Binary files a/Python313_13_x64_Template/Lib/zipfile/_path/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/zipfile/_path/__pycache__/glob.cpython-313.pyc b/Python313_13_x64_Template/Lib/zipfile/_path/__pycache__/glob.cpython-313.pyc deleted file mode 100644 index b7a9dca7..00000000 Binary files a/Python313_13_x64_Template/Lib/zipfile/_path/__pycache__/glob.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x64_Template/Lib/zipfile/_path/glob.py b/Python313_13_x64_Template/Lib/zipfile/_path/glob.py deleted file mode 100644 index 4ed74cc4..00000000 --- a/Python313_13_x64_Template/Lib/zipfile/_path/glob.py +++ /dev/null @@ -1,113 +0,0 @@ -import os -import re - -_default_seps = os.sep + str(os.altsep) * bool(os.altsep) - - -class Translator: - """ - >>> Translator('xyz') - Traceback (most recent call last): - ... - AssertionError: Invalid separators - - >>> Translator('') - Traceback (most recent call last): - ... - AssertionError: Invalid separators - """ - - seps: str - - def __init__(self, seps: str = _default_seps): - assert seps and set(seps) <= set(_default_seps), "Invalid separators" - self.seps = seps - - def translate(self, pattern): - """ - Given a glob pattern, produce a regex that matches it. - """ - return self.extend(self.match_dirs(self.translate_core(pattern))) - - def extend(self, pattern): - r""" - Extend regex for pattern-wide concerns. - - Apply '(?s:)' to create a non-matching group that - matches newlines (valid on Unix). - - Append '\Z' to imply fullmatch even when match is used. - """ - return rf'(?s:{pattern})\Z' - - def match_dirs(self, pattern): - """ - Ensure that zipfile.Path directory names are matched. - - zipfile.Path directory names always end in a slash. - """ - return rf'{pattern}[/]?' - - def translate_core(self, pattern): - r""" - Given a glob pattern, produce a regex that matches it. - - >>> t = Translator() - >>> t.translate_core('*.txt').replace('\\\\', '') - '[^/]*\\.txt' - >>> t.translate_core('a?txt') - 'a[^/]txt' - >>> t.translate_core('**/*').replace('\\\\', '') - '.*/[^/][^/]*' - """ - self.restrict_rglob(pattern) - return ''.join(map(self.replace, separate(self.star_not_empty(pattern)))) - - def replace(self, match): - """ - Perform the replacements for a match from :func:`separate`. - """ - return match.group('set') or ( - re.escape(match.group(0)) - .replace('\\*\\*', r'.*') - .replace('\\*', rf'[^{re.escape(self.seps)}]*') - .replace('\\?', r'[^/]') - ) - - def restrict_rglob(self, pattern): - """ - Raise ValueError if ** appears in anything but a full path segment. - - >>> Translator().translate('**foo') - Traceback (most recent call last): - ... - ValueError: ** must appear alone in a path segment - """ - seps_pattern = rf'[{re.escape(self.seps)}]+' - segments = re.split(seps_pattern, pattern) - if any('**' in segment and segment != '**' for segment in segments): - raise ValueError("** must appear alone in a path segment") - - def star_not_empty(self, pattern): - """ - Ensure that * will not match an empty segment. - """ - - def handle_segment(match): - segment = match.group(0) - return '?*' if segment == '*' else segment - - not_seps_pattern = rf'[^{re.escape(self.seps)}]+' - return re.sub(not_seps_pattern, handle_segment, pattern) - - -def separate(pattern): - """ - Separate out character sets to avoid translating their contents. - - >>> [m.group(0) for m in separate('*.txt')] - ['*.txt'] - >>> [m.group(0) for m in separate('a[?]txt')] - ['a', '[?]', 'txt'] - """ - return re.finditer(r'([^\[]+)|(?P[\[].*?[\]])|([\[][^\]]*$)', pattern) diff --git a/Python313_13_x64_Template/Lib/zipimport.py b/Python313_13_x64_Template/Lib/zipimport.py deleted file mode 100644 index fb312be1..00000000 --- a/Python313_13_x64_Template/Lib/zipimport.py +++ /dev/null @@ -1,803 +0,0 @@ -"""zipimport provides support for importing Python modules from Zip archives. - -This module exports two objects: -- zipimporter: a class; its constructor takes a path to a Zip archive. -- ZipImportError: exception raised by zipimporter objects. It's a - subclass of ImportError, so it can be caught as ImportError, too. - -It is usually not needed to use the zipimport module explicitly; it is -used by the builtin import mechanism for sys.path items that are paths -to Zip archives. -""" - -#from importlib import _bootstrap_external -#from importlib import _bootstrap # for _verbose_message -import _frozen_importlib_external as _bootstrap_external -from _frozen_importlib_external import _unpack_uint16, _unpack_uint32, _unpack_uint64 -import _frozen_importlib as _bootstrap # for _verbose_message -import _imp # for check_hash_based_pycs -import _io # for open -import marshal # for loads -import sys # for modules -import time # for mktime -import _warnings # For warn() - -__all__ = ['ZipImportError', 'zipimporter'] - - -path_sep = _bootstrap_external.path_sep -alt_path_sep = _bootstrap_external.path_separators[1:] - - -class ZipImportError(ImportError): - pass - -# _read_directory() cache -_zip_directory_cache = {} - -_module_type = type(sys) - -END_CENTRAL_DIR_SIZE = 22 -END_CENTRAL_DIR_SIZE_64 = 56 -END_CENTRAL_DIR_LOCATOR_SIZE_64 = 20 -STRING_END_ARCHIVE = b'PK\x05\x06' # standard EOCD signature -STRING_END_LOCATOR_64 = b'PK\x06\x07' # Zip64 EOCD Locator signature -STRING_END_ZIP_64 = b'PK\x06\x06' # Zip64 EOCD signature -MAX_COMMENT_LEN = (1 << 16) - 1 -MAX_UINT32 = 0xffffffff -ZIP64_EXTRA_TAG = 0x1 - -class zipimporter(_bootstrap_external._LoaderBasics): - """zipimporter(archivepath) -> zipimporter object - - Create a new zipimporter instance. 'archivepath' must be a path to - a zipfile, or to a specific path inside a zipfile. For example, it can be - '/tmp/myimport.zip', or '/tmp/myimport.zip/mydirectory', if mydirectory is a - valid directory inside the archive. - - 'ZipImportError is raised if 'archivepath' doesn't point to a valid Zip - archive. - - The 'archive' attribute of zipimporter objects contains the name of the - zipfile targeted. - """ - - # Split the "subdirectory" from the Zip archive path, lookup a matching - # entry in sys.path_importer_cache, fetch the file directory from there - # if found, or else read it from the archive. - def __init__(self, path): - if not isinstance(path, str): - raise TypeError(f"expected str, not {type(path)!r}") - if not path: - raise ZipImportError('archive path is empty', path=path) - if alt_path_sep: - path = path.replace(alt_path_sep, path_sep) - - prefix = [] - while True: - try: - st = _bootstrap_external._path_stat(path) - except (OSError, ValueError): - # On Windows a ValueError is raised for too long paths. - # Back up one path element. - dirname, basename = _bootstrap_external._path_split(path) - if dirname == path: - raise ZipImportError('not a Zip file', path=path) - path = dirname - prefix.append(basename) - else: - # it exists - if (st.st_mode & 0o170000) != 0o100000: # stat.S_ISREG - # it's a not file - raise ZipImportError('not a Zip file', path=path) - break - - if path not in _zip_directory_cache: - _zip_directory_cache[path] = _read_directory(path) - self.archive = path - # a prefix directory following the ZIP file path. - self.prefix = _bootstrap_external._path_join(*prefix[::-1]) - if self.prefix: - self.prefix += path_sep - - - def find_spec(self, fullname, target=None): - """Create a ModuleSpec for the specified module. - - Returns None if the module cannot be found. - """ - module_info = _get_module_info(self, fullname) - if module_info is not None: - return _bootstrap.spec_from_loader(fullname, self, is_package=module_info) - else: - # Not a module or regular package. See if this is a directory, and - # therefore possibly a portion of a namespace package. - - # We're only interested in the last path component of fullname - # earlier components are recorded in self.prefix. - modpath = _get_module_path(self, fullname) - if _is_dir(self, modpath): - # This is possibly a portion of a namespace - # package. Return the string representing its path, - # without a trailing separator. - path = f'{self.archive}{path_sep}{modpath}' - spec = _bootstrap.ModuleSpec(name=fullname, loader=None, - is_package=True) - spec.submodule_search_locations.append(path) - return spec - else: - return None - - def get_code(self, fullname): - """get_code(fullname) -> code object. - - Return the code object for the specified module. Raise ZipImportError - if the module couldn't be imported. - """ - code, ispackage, modpath = _get_module_code(self, fullname) - return code - - - def get_data(self, pathname): - """get_data(pathname) -> string with file data. - - Return the data associated with 'pathname'. Raise OSError if - the file wasn't found. - """ - if alt_path_sep: - pathname = pathname.replace(alt_path_sep, path_sep) - - key = pathname - if pathname.startswith(self.archive + path_sep): - key = pathname[len(self.archive + path_sep):] - - try: - toc_entry = self._get_files()[key] - except KeyError: - raise OSError(0, '', key) - return _get_data(self.archive, toc_entry) - - - # Return a string matching __file__ for the named module - def get_filename(self, fullname): - """get_filename(fullname) -> filename string. - - Return the filename for the specified module or raise ZipImportError - if it couldn't be imported. - """ - # Deciding the filename requires working out where the code - # would come from if the module was actually loaded - code, ispackage, modpath = _get_module_code(self, fullname) - return modpath - - - def get_source(self, fullname): - """get_source(fullname) -> source string. - - Return the source code for the specified module. Raise ZipImportError - if the module couldn't be found, return None if the archive does - contain the module, but has no source for it. - """ - mi = _get_module_info(self, fullname) - if mi is None: - raise ZipImportError(f"can't find module {fullname!r}", name=fullname) - - path = _get_module_path(self, fullname) - if mi: - fullpath = _bootstrap_external._path_join(path, '__init__.py') - else: - fullpath = f'{path}.py' - - try: - toc_entry = self._get_files()[fullpath] - except KeyError: - # we have the module, but no source - return None - return _get_data(self.archive, toc_entry).decode() - - - # Return a bool signifying whether the module is a package or not. - def is_package(self, fullname): - """is_package(fullname) -> bool. - - Return True if the module specified by fullname is a package. - Raise ZipImportError if the module couldn't be found. - """ - mi = _get_module_info(self, fullname) - if mi is None: - raise ZipImportError(f"can't find module {fullname!r}", name=fullname) - return mi - - - # Load and return the module named by 'fullname'. - def load_module(self, fullname): - """load_module(fullname) -> module. - - Load the module specified by 'fullname'. 'fullname' must be the - fully qualified (dotted) module name. It returns the imported - module, or raises ZipImportError if it could not be imported. - - Deprecated since Python 3.10. Use exec_module() instead. - """ - msg = ("zipimport.zipimporter.load_module() is deprecated and slated for " - "removal in Python 3.12; use exec_module() instead") - _warnings.warn(msg, DeprecationWarning) - code, ispackage, modpath = _get_module_code(self, fullname) - mod = sys.modules.get(fullname) - if mod is None or not isinstance(mod, _module_type): - mod = _module_type(fullname) - sys.modules[fullname] = mod - mod.__loader__ = self - - try: - if ispackage: - # add __path__ to the module *before* the code gets - # executed - path = _get_module_path(self, fullname) - fullpath = _bootstrap_external._path_join(self.archive, path) - mod.__path__ = [fullpath] - - if not hasattr(mod, '__builtins__'): - mod.__builtins__ = __builtins__ - _bootstrap_external._fix_up_module(mod.__dict__, fullname, modpath) - exec(code, mod.__dict__) - except: - del sys.modules[fullname] - raise - - try: - mod = sys.modules[fullname] - except KeyError: - raise ImportError(f'Loaded module {fullname!r} not found in sys.modules') - _bootstrap._verbose_message('import {} # loaded from Zip {}', fullname, modpath) - return mod - - - def get_resource_reader(self, fullname): - """Return the ResourceReader for a module in a zip file.""" - from importlib.readers import ZipReader - - return ZipReader(self, fullname) - - - def _get_files(self): - """Return the files within the archive path.""" - try: - files = _zip_directory_cache[self.archive] - except KeyError: - try: - files = _zip_directory_cache[self.archive] = _read_directory(self.archive) - except ZipImportError: - files = {} - - return files - - - def invalidate_caches(self): - """Invalidates the cache of file data of the archive path.""" - _zip_directory_cache.pop(self.archive, None) - - - def __repr__(self): - return f'' - - -# _zip_searchorder defines how we search for a module in the Zip -# archive: we first search for a package __init__, then for -# non-package .pyc, and .py entries. The .pyc entries -# are swapped by initzipimport() if we run in optimized mode. Also, -# '/' is replaced by path_sep there. -_zip_searchorder = ( - (path_sep + '__init__.pyc', True, True), - (path_sep + '__init__.py', False, True), - ('.pyc', True, False), - ('.py', False, False), -) - -# Given a module name, return the potential file path in the -# archive (without extension). -def _get_module_path(self, fullname): - return self.prefix + fullname.rpartition('.')[2] - -# Does this path represent a directory? -def _is_dir(self, path): - # See if this is a "directory". If so, it's eligible to be part - # of a namespace package. We test by seeing if the name, with an - # appended path separator, exists. - dirpath = path + path_sep - # If dirpath is present in self._get_files(), we have a directory. - return dirpath in self._get_files() - -# Return some information about a module. -def _get_module_info(self, fullname): - path = _get_module_path(self, fullname) - for suffix, isbytecode, ispackage in _zip_searchorder: - fullpath = path + suffix - if fullpath in self._get_files(): - return ispackage - return None - - -# implementation - -# _read_directory(archive) -> files dict (new reference) -# -# Given a path to a Zip archive, build a dict, mapping file names -# (local to the archive, using SEP as a separator) to toc entries. -# -# A toc_entry is a tuple: -# -# (__file__, # value to use for __file__, available for all files, -# # encoded to the filesystem encoding -# compress, # compression kind; 0 for uncompressed -# data_size, # size of compressed data on disk -# file_size, # size of decompressed data -# file_offset, # offset of file header from start of archive -# time, # mod time of file (in dos format) -# date, # mod data of file (in dos format) -# crc, # crc checksum of the data -# ) -# -# Directories can be recognized by the trailing path_sep in the name, -# data_size and file_offset are 0. -def _read_directory(archive): - try: - fp = _io.open_code(archive) - except OSError: - raise ZipImportError(f"can't open Zip file: {archive!r}", path=archive) - - with fp: - # GH-87235: On macOS all file descriptors for /dev/fd/N share the same - # file offset, reset the file offset after scanning the zipfile directory - # to not cause problems when some runs 'python3 /dev/fd/9 9= 0 and pos64+END_CENTRAL_DIR_SIZE_64+END_CENTRAL_DIR_LOCATOR_SIZE_64==pos): - # Zip64 at "correct" offset from standard EOCD - buffer = data[pos64:pos64 + END_CENTRAL_DIR_SIZE_64] - if len(buffer) != END_CENTRAL_DIR_SIZE_64: - raise ZipImportError( - f"corrupt Zip64 file: Expected {END_CENTRAL_DIR_SIZE_64} byte " - f"zip64 central directory, but read {len(buffer)} bytes.", - path=archive) - header_position = file_size - len(data) + pos64 - - central_directory_size = _unpack_uint64(buffer[40:48]) - central_directory_position = _unpack_uint64(buffer[48:56]) - num_entries = _unpack_uint64(buffer[24:32]) - elif pos >= 0: - buffer = data[pos:pos+END_CENTRAL_DIR_SIZE] - if len(buffer) != END_CENTRAL_DIR_SIZE: - raise ZipImportError(f"corrupt Zip file: {archive!r}", - path=archive) - - header_position = file_size - len(data) + pos - - # Buffer now contains a valid EOCD, and header_position gives the - # starting position of it. - central_directory_size = _unpack_uint32(buffer[12:16]) - central_directory_position = _unpack_uint32(buffer[16:20]) - num_entries = _unpack_uint16(buffer[8:10]) - - # N.b. if someday you want to prefer the standard (non-zip64) EOCD, - # you need to adjust position by 76 for arc to be 0. - else: - raise ZipImportError(f'not a Zip file: {archive!r}', - path=archive) - - # Buffer now contains a valid EOCD, and header_position gives the - # starting position of it. - # XXX: These are cursory checks but are not as exact or strict as they - # could be. Checking the arc-adjusted value is probably good too. - if header_position < central_directory_size: - raise ZipImportError(f'bad central directory size: {archive!r}', path=archive) - if header_position < central_directory_position: - raise ZipImportError(f'bad central directory offset: {archive!r}', path=archive) - header_position -= central_directory_size - # On just-a-zipfile these values are the same and arc_offset is zero; if - # the file has some bytes prepended, `arc_offset` is the number of such - # bytes. This is used for pex as well as self-extracting .exe. - arc_offset = header_position - central_directory_position - if arc_offset < 0: - raise ZipImportError(f'bad central directory size or offset: {archive!r}', path=archive) - - files = {} - # Start of Central Directory - count = 0 - try: - fp.seek(header_position) - except OSError: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - while True: - buffer = fp.read(46) - if len(buffer) < 4: - raise EOFError('EOF read where not expected') - # Start of file header - if buffer[:4] != b'PK\x01\x02': - if count != num_entries: - raise ZipImportError( - f"mismatched num_entries: {count} should be {num_entries} in {archive!r}", - path=archive, - ) - break # Bad: Central Dir File Header - if len(buffer) != 46: - raise EOFError('EOF read where not expected') - flags = _unpack_uint16(buffer[8:10]) - compress = _unpack_uint16(buffer[10:12]) - time = _unpack_uint16(buffer[12:14]) - date = _unpack_uint16(buffer[14:16]) - crc = _unpack_uint32(buffer[16:20]) - data_size = _unpack_uint32(buffer[20:24]) - file_size = _unpack_uint32(buffer[24:28]) - name_size = _unpack_uint16(buffer[28:30]) - extra_size = _unpack_uint16(buffer[30:32]) - comment_size = _unpack_uint16(buffer[32:34]) - file_offset = _unpack_uint32(buffer[42:46]) - header_size = name_size + extra_size + comment_size - - try: - name = fp.read(name_size) - except OSError: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - if len(name) != name_size: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - # On Windows, calling fseek to skip over the fields we don't use is - # slower than reading the data because fseek flushes stdio's - # internal buffers. See issue #8745. - try: - extra_data_len = header_size - name_size - extra_data = memoryview(fp.read(extra_data_len)) - - if len(extra_data) != extra_data_len: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - except OSError: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - - if flags & 0x800: - # UTF-8 file names extension - name = name.decode() - else: - # Historical ZIP filename encoding - try: - name = name.decode('ascii') - except UnicodeDecodeError: - name = name.decode('latin1').translate(cp437_table) - - name = name.replace('/', path_sep) - path = _bootstrap_external._path_join(archive, name) - - # Ordering matches unpacking below. - if ( - file_size == MAX_UINT32 or - data_size == MAX_UINT32 or - file_offset == MAX_UINT32 - ): - # need to decode extra_data looking for a zip64 extra (which might not - # be present) - while extra_data: - if len(extra_data) < 4: - raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) - tag = _unpack_uint16(extra_data[:2]) - size = _unpack_uint16(extra_data[2:4]) - if len(extra_data) < 4 + size: - raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) - if tag == ZIP64_EXTRA_TAG: - if (len(extra_data) - 4) % 8 != 0: - raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) - num_extra_values = (len(extra_data) - 4) // 8 - if num_extra_values > 3: - raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) - import struct - values = list(struct.unpack_from(f"<{min(num_extra_values, 3)}Q", - extra_data, offset=4)) - - # N.b. Here be dragons: the ordering of these is different than - # the header fields, and it's really easy to get it wrong since - # naturally-occuring zips that use all 3 are >4GB - if file_size == MAX_UINT32: - file_size = values.pop(0) - if data_size == MAX_UINT32: - data_size = values.pop(0) - if file_offset == MAX_UINT32: - file_offset = values.pop(0) - - break - - # For a typical zip, this bytes-slicing only happens 2-3 times, on - # small data like timestamps and filesizes. - extra_data = extra_data[4+size:] - else: - _bootstrap._verbose_message( - "zipimport: suspected zip64 but no zip64 extra for {!r}", - path, - ) - # XXX These two statements seem swapped because `central_directory_position` - # is a position within the actual file, but `file_offset` (when compared) is - # as encoded in the entry, not adjusted for this file. - # N.b. this must be after we've potentially read the zip64 extra which can - # change `file_offset`. - if file_offset > central_directory_position: - raise ZipImportError(f'bad local header offset: {archive!r}', path=archive) - file_offset += arc_offset - - t = (path, compress, data_size, file_size, file_offset, time, date, crc) - files[name] = t - count += 1 - finally: - fp.seek(start_offset) - _bootstrap._verbose_message('zipimport: found {} names in {!r}', count, archive) - return files - -# During bootstrap, we may need to load the encodings -# package from a ZIP file. But the cp437 encoding is implemented -# in Python in the encodings package. -# -# Break out of this dependency by using the translation table for -# the cp437 encoding. -cp437_table = ( - # ASCII part, 8 rows x 16 chars - '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f' - '\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f' - ' !"#$%&\'()*+,-./' - '0123456789:;<=>?' - '@ABCDEFGHIJKLMNO' - 'PQRSTUVWXYZ[\\]^_' - '`abcdefghijklmno' - 'pqrstuvwxyz{|}~\x7f' - # non-ASCII part, 16 rows x 8 chars - '\xc7\xfc\xe9\xe2\xe4\xe0\xe5\xe7' - '\xea\xeb\xe8\xef\xee\xec\xc4\xc5' - '\xc9\xe6\xc6\xf4\xf6\xf2\xfb\xf9' - '\xff\xd6\xdc\xa2\xa3\xa5\u20a7\u0192' - '\xe1\xed\xf3\xfa\xf1\xd1\xaa\xba' - '\xbf\u2310\xac\xbd\xbc\xa1\xab\xbb' - '\u2591\u2592\u2593\u2502\u2524\u2561\u2562\u2556' - '\u2555\u2563\u2551\u2557\u255d\u255c\u255b\u2510' - '\u2514\u2534\u252c\u251c\u2500\u253c\u255e\u255f' - '\u255a\u2554\u2569\u2566\u2560\u2550\u256c\u2567' - '\u2568\u2564\u2565\u2559\u2558\u2552\u2553\u256b' - '\u256a\u2518\u250c\u2588\u2584\u258c\u2590\u2580' - '\u03b1\xdf\u0393\u03c0\u03a3\u03c3\xb5\u03c4' - '\u03a6\u0398\u03a9\u03b4\u221e\u03c6\u03b5\u2229' - '\u2261\xb1\u2265\u2264\u2320\u2321\xf7\u2248' - '\xb0\u2219\xb7\u221a\u207f\xb2\u25a0\xa0' -) - -_importing_zlib = False - -# Return the zlib.decompress function object, or NULL if zlib couldn't -# be imported. The function is cached when found, so subsequent calls -# don't import zlib again. -def _get_decompress_func(): - global _importing_zlib - if _importing_zlib: - # Someone has a zlib.py[co] in their Zip file - # let's avoid a stack overflow. - _bootstrap._verbose_message('zipimport: zlib UNAVAILABLE') - raise ZipImportError("can't decompress data; zlib not available") - - _importing_zlib = True - try: - from zlib import decompress - except Exception: - _bootstrap._verbose_message('zipimport: zlib UNAVAILABLE') - raise ZipImportError("can't decompress data; zlib not available") - finally: - _importing_zlib = False - - _bootstrap._verbose_message('zipimport: zlib available') - return decompress - -# Given a path to a Zip file and a toc_entry, return the (uncompressed) data. -def _get_data(archive, toc_entry): - datapath, compress, data_size, file_size, file_offset, time, date, crc = toc_entry - if data_size < 0: - raise ZipImportError('negative data size') - - with _io.open_code(archive) as fp: - # Check to make sure the local file header is correct - try: - fp.seek(file_offset) - except OSError: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - buffer = fp.read(30) - if len(buffer) != 30: - raise EOFError('EOF read where not expected') - - if buffer[:4] != b'PK\x03\x04': - # Bad: Local File Header - raise ZipImportError(f'bad local file header: {archive!r}', path=archive) - - name_size = _unpack_uint16(buffer[26:28]) - extra_size = _unpack_uint16(buffer[28:30]) - header_size = 30 + name_size + extra_size - file_offset += header_size # Start of file data - try: - fp.seek(file_offset) - except OSError: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - raw_data = fp.read(data_size) - if len(raw_data) != data_size: - raise OSError("zipimport: can't read data") - - if compress == 0: - # data is not compressed - return raw_data - - # Decompress with zlib - try: - decompress = _get_decompress_func() - except Exception: - raise ZipImportError("can't decompress data; zlib not available") - return decompress(raw_data, -15) - - -# Lenient date/time comparison function. The precision of the mtime -# in the archive is lower than the mtime stored in a .pyc: we -# must allow a difference of at most one second. -def _eq_mtime(t1, t2): - # dostime only stores even seconds, so be lenient - return abs(t1 - t2) <= 1 - - -# Given the contents of a .py[co] file, unmarshal the data -# and return the code object. Raises ImportError it the magic word doesn't -# match, or if the recorded .py[co] metadata does not match the source. -def _unmarshal_code(self, pathname, fullpath, fullname, data): - exc_details = { - 'name': fullname, - 'path': fullpath, - } - - flags = _bootstrap_external._classify_pyc(data, fullname, exc_details) - - hash_based = flags & 0b1 != 0 - if hash_based: - check_source = flags & 0b10 != 0 - if (_imp.check_hash_based_pycs != 'never' and - (check_source or _imp.check_hash_based_pycs == 'always')): - source_bytes = _get_pyc_source(self, fullpath) - if source_bytes is not None: - source_hash = _imp.source_hash( - _bootstrap_external._RAW_MAGIC_NUMBER, - source_bytes, - ) - - _bootstrap_external._validate_hash_pyc( - data, source_hash, fullname, exc_details) - else: - source_mtime, source_size = \ - _get_mtime_and_size_of_source(self, fullpath) - - if source_mtime: - # We don't use _bootstrap_external._validate_timestamp_pyc - # to allow for a more lenient timestamp check. - if (not _eq_mtime(_unpack_uint32(data[8:12]), source_mtime) or - _unpack_uint32(data[12:16]) != source_size): - _bootstrap._verbose_message( - f'bytecode is stale for {fullname!r}') - return None - - code = marshal.loads(data[16:]) - if not isinstance(code, _code_type): - raise TypeError(f'compiled module {pathname!r} is not a code object') - return code - -_code_type = type(_unmarshal_code.__code__) - - -# Replace any occurrences of '\r\n?' in the input string with '\n'. -# This converts DOS and Mac line endings to Unix line endings. -def _normalize_line_endings(source): - source = source.replace(b'\r\n', b'\n') - source = source.replace(b'\r', b'\n') - return source - -# Given a string buffer containing Python source code, compile it -# and return a code object. -def _compile_source(pathname, source): - source = _normalize_line_endings(source) - return compile(source, pathname, 'exec', dont_inherit=True) - -# Convert the date/time values found in the Zip archive to a value -# that's compatible with the time stamp stored in .pyc files. -def _parse_dostime(d, t): - return time.mktime(( - (d >> 9) + 1980, # bits 9..15: year - (d >> 5) & 0xF, # bits 5..8: month - d & 0x1F, # bits 0..4: day - t >> 11, # bits 11..15: hours - (t >> 5) & 0x3F, # bits 8..10: minutes - (t & 0x1F) * 2, # bits 0..7: seconds / 2 - -1, -1, -1)) - -# Given a path to a .pyc file in the archive, return the -# modification time of the matching .py file and its size, -# or (0, 0) if no source is available. -def _get_mtime_and_size_of_source(self, path): - try: - # strip 'c' or 'o' from *.py[co] - assert path[-1:] in ('c', 'o') - path = path[:-1] - toc_entry = self._get_files()[path] - # fetch the time stamp of the .py file for comparison - # with an embedded pyc time stamp - time = toc_entry[5] - date = toc_entry[6] - uncompressed_size = toc_entry[3] - return _parse_dostime(date, time), uncompressed_size - except (KeyError, IndexError, TypeError): - return 0, 0 - - -# Given a path to a .pyc file in the archive, return the -# contents of the matching .py file, or None if no source -# is available. -def _get_pyc_source(self, path): - # strip 'c' or 'o' from *.py[co] - assert path[-1:] in ('c', 'o') - path = path[:-1] - - try: - toc_entry = self._get_files()[path] - except KeyError: - return None - else: - return _get_data(self.archive, toc_entry) - - -# Get the code object associated with the module specified by -# 'fullname'. -def _get_module_code(self, fullname): - path = _get_module_path(self, fullname) - import_error = None - for suffix, isbytecode, ispackage in _zip_searchorder: - fullpath = path + suffix - _bootstrap._verbose_message('trying {}{}{}', self.archive, path_sep, fullpath, verbosity=2) - try: - toc_entry = self._get_files()[fullpath] - except KeyError: - pass - else: - modpath = toc_entry[0] - data = _get_data(self.archive, toc_entry) - code = None - if isbytecode: - try: - code = _unmarshal_code(self, modpath, fullpath, fullname, data) - except ImportError as exc: - import_error = exc - else: - code = _compile_source(modpath, data) - if code is None: - # bad magic number or non-matching mtime - # in byte code, try next - continue - modpath = toc_entry[0] - return code, ispackage, modpath - else: - if import_error: - msg = f"module load failed: {import_error}" - raise ZipImportError(msg, name=fullname) from import_error - else: - raise ZipImportError(f"can't find module {fullname!r}", name=fullname) diff --git a/Python313_13_x64_Template/NEWS.txt b/Python313_13_x64_Template/NEWS.txt deleted file mode 100644 index 3c37b7d3..00000000 --- a/Python313_13_x64_Template/NEWS.txt +++ /dev/null @@ -1,51043 +0,0 @@ -+++++++++++ -Python News -+++++++++++ - -What's New in Python 3.13.13 final? -=================================== - -*Release date: 2026-04-07* - -macOS ------ - -- gh-144551: Update macOS installer to use OpenSSL 3.0.19. - -- gh-137586: Invoke :program:`osascript` with absolute path in - :mod:`webbrowser` and :mod:`!turtledemo`. - -Windows -------- - -- gh-144551: Updated bundled version of OpenSSL to 3.0.19. - -- gh-140131: Fix REPL cursor position on Windows when module completion - suggestion line hits console width. - -Tests ------ - -- gh-144418: The Android testbed's emulator RAM has been increased from 2 GB - to 4 GB. - -- gh-146202: Fix a race condition in regrtest: make sure that the temporary - directory is created in the worker process. Previously, temp_cwd() could - fail on Windows if the "build" directory was not created. Patch by Victor - Stinner. - -- gh-144739: When Python was compiled with system expat older then 2.7.2 but - tests run with newer expat, still skip - :class:`!test.test_pyexpat.MemoryProtectionTest`. - -Security --------- - -- gh-145986: :mod:`xml.parsers.expat`: Fixed a crash caused by unbounded C - recursion when converting deeply nested XML content models with - :meth:`~xml.parsers.expat.xmlparser.ElementDeclHandler`. This addresses - :cve:`2026-4224`. - -- gh-145599: Reject control characters in :class:`http.cookies.Morsel` - :meth:`~http.cookies.Morsel.update` and - :meth:`~http.cookies.BaseCookie.js_output`. This addresses - :cve:`2026-3644`. - -- gh-145506: Fixes :cve:`2026-2297` by ensuring that - ``SourcelessFileLoader`` uses :func:`io.open_code` when opening ``.pyc`` - files. - -- gh-144370: Disallow usage of control characters in status in - :mod:`wsgiref.handlers` to prevent HTTP header injections. Patch by - Benedikt Johannes. - -- gh-143930: Reject leading dashes in URLs passed to - :func:`webbrowser.open`. - -Library -------- - -- gh-144503: Fix a regression introduced in 3.14.3 and 3.13.12 where the - :mod:`multiprocessing` ``forkserver`` start method would fail with - :exc:`BrokenPipeError` when the parent process had a very large - :data:`sys.argv`. The argv is now passed to the forkserver as separate - command-line arguments rather than being embedded in the ``-c`` command - string, avoiding the operating system's per-argument length limit. - -- gh-146613: :mod:`itertools`: Fix a crash in :func:`itertools.groupby` when - the grouper iterator is concurrently mutated. - -- gh-146080: :mod:`ssl`: fix a crash when an SNI callback tries to use an - SSL object that has already been garbage-collected. Patch by Bénédikt - Tran. - -- gh-146090: :mod:`sqlite3`: fix a crash when - :meth:`sqlite3.Connection.create_collation` fails with `SQLITE_BUSY - `__. Patch by Bénédikt Tran. - -- gh-146090: :mod:`sqlite3`: properly raise :exc:`MemoryError` instead of - :exc:`SystemError` when a context callback fails to be allocated. Patch by - Bénédikt Tran. - -- gh-145633: Fix ``struct.pack('f', float)``: use :c:func:`PyFloat_Pack4` to - raise :exc:`OverflowError`. Patch by Sergey B Kirpichev and Victor - Stinner. - -- gh-146310: The :mod:`ensurepip` module no longer looks for ``pip-*.whl`` - wheel packages in the current directory. - -- gh-146083: Update bundled `libexpat `_ to - version 2.7.5. - -- gh-146076: :mod:`zoneinfo`: fix crashes when deleting ``_weak_cache`` from - a :class:`zoneinfo.ZoneInfo` subclass. - -- gh-146054: Limit the size of :func:`encodings.search_function` cache. - Found by OSS Fuzz in :oss-fuzz:`493449985`. - -- gh-145883: :mod:`zoneinfo`: Fix heap buffer overflow reads from malformed - TZif data. Found by OSS Fuzz, issues :oss-fuzz:`492245058` and - :oss-fuzz:`492230068`. - -- gh-145750: Avoid undefined behaviour from signed integer overflow when - parsing format strings in the :mod:`struct` module. Found by OSS Fuzz in - :oss-fuzz:`488466741`. - -- gh-145492: Fix infinite recursion in :class:`collections.defaultdict` - ``__repr__`` when a ``defaultdict`` contains itself. Based on analysis by - KowalskiThomas in :gh:`145492`. - -- gh-145623: Fix crash in :mod:`struct` when calling :func:`repr` or - ``__sizeof__()`` on an uninitialized :class:`struct.Struct` object created - via ``Struct.__new__()`` without calling ``__init__()``. - -- gh-145616: Detect Android sysconfig ABI correctly on 32-bit ARM Android on - 64-bit ARM kernel - -- gh-145376: Fix null pointer dereference in unusual error scenario in - :mod:`hashlib`. - -- gh-145551: Fix InvalidStateError when cancelling process created by - :func:`asyncio.create_subprocess_exec` or - :func:`asyncio.create_subprocess_shell`. Patch by Daan De Meyer. - -- gh-145417: :mod:`venv`: Prevent incorrect preservation of SELinux context - when copying the ``Activate.ps1`` script. The script inherited the SELinux - security context of the system template directory, rather than the - destination project directory. - -- gh-145301: :mod:`hashlib`: fix a crash when the initialization of the - underlying C extension module fails. - -- gh-145264: Base64 decoder (see :func:`binascii.a2b_base64`, - :func:`base64.b64decode`, etc) no longer ignores excess data after the - first padded quad in non-strict (default) mode. Instead, in conformance - with :rfc:`4648`, section 3.3, it now ignores the pad character, "=", if - it is present before the end of the encoded data. - -- gh-145158: Avoid undefined behaviour from signed integer overflow when - parsing format strings in the :mod:`struct` module. - -- gh-144984: Fix crash in - :meth:`xml.parsers.expat.xmlparser.ExternalEntityParserCreate` when an - allocation fails. The error paths could dereference NULL ``handlers`` and - double-decrement the parent parser's reference count. - -- gh-88091: Fix :func:`unicodedata.decomposition` for Hangul characters. - -- gh-144835: Added missing explanations for some parameters in - :func:`glob.glob` and :func:`glob.iglob`. - -- gh-144833: Fixed a use-after-free in :mod:`ssl` when ``SSL_new()`` returns - NULL in ``newPySSLSocket()``. The error was reported via a dangling - pointer after the object had already been freed. - -- gh-144259: Fix inconsistent display of long multiline pasted content in - the REPL. - -- gh-144156: Fix the folding of headers by the :mod:`email` library when - :rfc:`2047` encoded words are used. Now whitespace is correctly preserved - and also correctly added between adjacent encoded words. The latter - property was broken by the fix for gh-92081, which mostly fixed previous - failures to preserve whitespace. - -- gh-66305: Fixed a hang on Windows in the :mod:`tempfile` module when - trying to create a temporary file or subdirectory in a non-writable - directory. - -- gh-140814: :func:`multiprocessing.freeze_support` no longer sets the - default start method as a side effect, which previously caused a - subsequent :func:`multiprocessing.set_start_method` call to raise - :exc:`RuntimeError`. - -- gh-144475: Calling :func:`repr` on :func:`functools.partial` is now safer - when the partial object's internal attributes are replaced while the - string representation is being generated. - -- gh-144538: Bump the version of pip bundled in ensurepip to version 26.0.1 - -- gh-144363: Update bundled `libexpat `_ to - 2.7.4 - -- gh-143637: Fixed a crash in socket.sendmsg() that could occur if ancillary - data is mutated re-entrantly during argument parsing. - -- gh-143880: Fix data race in :func:`functools.partial` in the :term:`free - threading` build. - -- gh-143543: Fix a crash in itertools.groupby that could occur when a - user-defined :meth:`~object.__eq__` method re-enters the iterator during - key comparison. - -- gh-140652: Fix a crash in :func:`!_interpchannels.list_all` after closing - a channel. - -- gh-143698: Allow *scheduler* and *setpgroup* arguments to be explicitly - :const:`None` when calling :func:`os.posix_spawn` or - :func:`os.posix_spawnp`. Patch by Bénédikt Tran. - -- gh-143698: Raise :exc:`TypeError` instead of :exc:`SystemError` when the - *scheduler* in :func:`os.posix_spawn` or :func:`os.posix_spawnp` is not a - tuple. Patch by Bénédikt Tran. - -- gh-143304: Fix :class:`ctypes.CDLL` to honor the ``handle`` parameter on - POSIX systems. - -- gh-142781: :mod:`zoneinfo`: fix a crash when instantiating - :class:`~zoneinfo.ZoneInfo` objects for which the internal class-level - cache is inconsistent. - -- gh-142763: Fix a race condition between :class:`zoneinfo.ZoneInfo` - creation and :func:`zoneinfo.ZoneInfo.clear_cache` that could raise - :exc:`KeyError`. - -- gh-142787: Fix assertion failure in :mod:`sqlite3` blob subscript when - slicing with indices that result in an empty slice. - -- gh-142352: Fix :meth:`asyncio.StreamWriter.start_tls` to transfer buffered - data from :class:`~asyncio.StreamReader` to the SSL layer, preventing data - loss when upgrading a connection to TLS mid-stream (e.g., when - implementing PROXY protocol support). - -- gh-141707: Don't change :class:`tarfile.TarInfo` type from ``AREGTYPE`` to - ``DIRTYPE`` when parsing GNU long name or link headers. - -- gh-139933: Improve :exc:`AttributeError` suggestions for classes with a - custom :meth:`~object.__dir__` method returning a list of unsortable - values. Patch by Bénédikt Tran. - -- gh-138891: Fix ``SyntaxError`` when ``inspect.get_annotations(f, - eval_str=True)`` is called on a function annotated with a :pep:`646` - ``star_expression`` - -- gh-137335: Get rid of any possibility of a name conflict for named pipes - in :mod:`multiprocessing` and :mod:`asyncio` on Windows, no matter how - small. - -- gh-80667: Support lookup for Tangut Ideographs in :mod:`unicodedata`. - -- bpo-40243: Fix :meth:`!unicodedata.ucd_3_2_0.numeric` for non-decimal - values. - -Documentation -------------- - -- gh-126676: Expand :mod:`argparse` documentation for ``type=bool`` with a - demonstration of the surprising behavior and pointers to common - alternatives. - -- gh-145450: Document missing public :class:`wave.Wave_write` getter - methods. - -Core and Builtins ------------------ - -- gh-148157: Fix an unlikely crash when parsing an invalid type comments for - function parameters. Found by OSS Fuzz in :oss-fuzz:`492782951`. - -- gh-146615: Fix a crash in :meth:`~object.__get__` for - :c:expr:`METH_METHOD` descriptors when an invalid (non-type) object is - passed as the second argument. Patch by Steven Sun. - -- gh-146128: Fix a bug which could cause constant values to be partially - corrupted in AArch64 JIT code. This issue is theoretical, and hasn't - actually been observed in unmodified Python interpreters. - -- gh-146250: Fixed a memory leak in :exc:`SyntaxError` when re-initializing - it. - -- gh-146245: Fixed reference leaks in :mod:`socket` when audit hooks raise - exceptions in :func:`socket.getaddrinfo` and :meth:`!socket.sendto`. - -- gh-146227: Fix wrong type in ``_Py_atomic_load_uint16`` in the C11 atomics - backend (``pyatomic_std.h``), which used a 32-bit atomic load instead of - 16-bit. Found by Mohammed Zuhaib. - -- gh-146056: Fix :func:`repr` for lists containing ``NULL``\ s. - -- gh-145990: ``python --help-env`` sections are now sorted by environment - variable name. - -- gh-145376: Fix GC tracking in ``structseq.__replace__()``. - -- gh-142183: Avoid a pathological case where repeated calls at a specific - stack depth could be significantly slower. - -- gh-145783: Fix an unlikely crash in the parser when certain errors were - erroneously not propagated. Found by OSS Fuzz in :oss-fuzz:`491369109`. - -- gh-145701: Fix :exc:`SystemError` when ``__classdict__`` or - ``__conditional_annotations__`` is in a class-scope inlined comprehension. - Found by OSS Fuzz in :oss-fuzz:`491105000`. - -- gh-145335: Fix a crash in :func:`os.pathconf` when called with ``-1`` as - the path argument. - -- gh-145234: Fixed a ``SystemError`` in the parser when an encoding cookie - (for example, UTF-7) decodes to carriage returns (``\r``). Newlines are - now normalized after decoding in the string tokenizer. - - Patch by Pablo Galindo. - -- gh-130555: Fix use-after-free in :meth:`dict.clear` when the dictionary - values are embedded in an object and a destructor causes re-entrant - mutation of the dictionary. - -- gh-145008: Fix a bug when calling certain methods at the recursion limit - which manifested as a corruption of Python's operand stack. Patch by Ken - Jin. - -- gh-144872: Fix heap buffer overflow in the parser found by OSS-Fuzz. - -- gh-144766: Fix a crash in fork child process when perf support is enabled. - -- gh-144759: Fix undefined behavior in the lexer when ``start`` and - ``multi_line_start`` pointers are ``NULL`` in - ``_PyLexer_remember_fstring_buffers()`` and - ``_PyLexer_restore_fstring_buffers()``. The ``NULL`` pointer arithmetic - (``NULL - valid_pointer``) is now guarded with explicit ``NULL`` checks. - -- gh-144601: Fix crash when importing a module whose ``PyInit`` function - raises an exception from a subinterpreter. - -- gh-143636: Fix a crash when calling :class:`SimpleNamespace.__replace__() - ` on non-namespace instances. Patch by Bénédikt - Tran. - -- gh-143650: Fix race condition in :mod:`importlib` where a thread could - receive a stale module reference when another thread's import fails. - -- gh-140594: Fix an out of bounds read when a single NUL character is read - from the standard input. Patch by Shamil Abdulaev. - -- gh-91636: While performing garbage collection, clear weakrefs to - unreachable objects that are created during running of finalizers. If - those weakrefs were are not cleared, they could reveal unreachable - objects. - -- gh-130327: Fix erroneous clearing of an object's :attr:`~object.__dict__` - if overwritten at runtime. - -- gh-80667: Literals using the ``\N{name}`` escape syntax can now construct - CJK ideographs and Hangul syllables using case-insensitive names. - -Build ------ - -- gh-146541: The Android testbed can now be built for 32-bit ARM and x86 - targets. - -- gh-146450: The Android build script was modified to improve parity with - other platform build scripts. - -- gh-145801: When Python build is optimized with GCC using PGO, use - ``-fprofile-update=atomic`` option to use atomic operations when updating - profile information. This option reduces the risk of gcov Data Files - (.gcda) corruption which can cause random GCC crashes. Patch by Victor - Stinner. - -- gh-129259: Fix AIX build failures caused by incorrect struct alignment in - ``_Py_CODEUNIT`` and ``_Py_BackoffCounter`` by adding AIX-specific - ``#pragma pack`` directives. - - -What's New in Python 3.13.12 final? -=================================== - -*Release date: 2026-02-03* - -Windows -------- - -- gh-128067: Fix a bug in PyREPL on Windows where output without a trailing - newline was overwritten by the next prompt. - -Tools/Demos ------------ - -- gh-142095: Make gdb 'py-bt' command use frame from thread local state when - available. Patch by Sam Gross and Victor Stinner. - -Tests ------ - -- gh-144415: The Android testbed now distinguishes between stdout/stderr - messages which were triggered by a newline, and those triggered by a - manual call to ``flush``. This fixes logging of progress indicators and - similar content. - -- gh-65784: Add support for parametrized resource ``wantobjects`` in - regrtests, which allows to run Tkinter tests with the specified value of - :data:`!tkinter.wantobjects`, for example ``-u wantobjects=0``. - -- gh-143553: Add support for parametrized resources, such as ``-u - xpickle=2.7``. - -- gh-142836: Accommodated Solaris in - ``test_pdb.test_script_target_anonymous_pipe``. - -- gh-129401: Fix a flaky test in ``test_repr_rlock`` that checks the - representation of :class:`multiprocessing.RLock`. - -- bpo-31391: Forward-port test_xpickle from Python 2 to Python 3 and add the - resource back to test's command line. - -Security --------- - -- gh-144125: :mod:`~email.generator.BytesGenerator` will now refuse to - serialize (write) headers that are unsafely folded or delimited; see - :attr:`~email.policy.Policy.verify_generated_headers`. (Contributed by Bas - Bloemsaat and Petr Viktorin in :gh:`121650`). - -- gh-143935: Fixed a bug in the folding of comments when flattening an email - message using a modern email policy. Comments consisting of a very long - sequence of non-foldable characters could trigger a forced line wrap that - omitted the required leading space on the continuation line, causing the - remainder of the comment to be interpreted as a new header field. This - enabled header injection with carefully crafted inputs. - -- gh-143925: Reject control characters in ``data:`` URL media types. - -- gh-143919: Reject control characters in :class:`http.cookies.Morsel` - fields and values. - -- gh-143916: Reject C0 control characters within wsgiref.headers.Headers - fields, values, and parameters. - -Library -------- - -- gh-144380: Improve performance of :class:`io.BufferedReader` line - iteration by ~49%. - -- gh-144169: Fix three crashes when non-string keyword arguments are - supplied to objects in the :mod:`ast` module. - -- gh-144100: Fixed a crash in ctypes when using a deprecated - ``POINTER(str)`` type in ``argtypes``. Instead of aborting, ctypes now - raises a proper Python exception when the pointer target type is - unresolved. - -- gh-144050: Fix :func:`stat.filemode` in the pure-Python implementation to - avoid misclassifying invalid mode values as block devices. - -- gh-144023: Fixed validation of file descriptor 0 in posix functions when - used with follow_symlinks parameter. - -- gh-143999: Fix an issue where :func:`inspect.getgeneratorstate` and - :func:`inspect.getcoroutinestate` could fail for generators wrapped by - :func:`types.coroutine` in the suspended state. - -- gh-143706: Fix :mod:`multiprocessing` forkserver so that :data:`sys.argv` - is correctly set before ``__main__`` is preloaded. Previously, - :data:`sys.argv` was empty during main module import in forkserver child - processes. This fixes a regression introduced in 3.13.8 and 3.14.1. Root - caused by Aaron Wieczorek, test provided by Thomas Watson, thanks! - -- gh-143638: Forbid reentrant calls of the :class:`pickle.Pickler` and - :class:`pickle.Unpickler` methods for the C implementation. Previously, - this could cause crash or data corruption, now concurrent calls of methods - of the same object raise :exc:`RuntimeError`. - -- gh-78724: Raise :exc:`RuntimeError`'s when user attempts to call methods - on half-initialized :class:`~struct.Struct` objects, For example, created - by ``Struct.__new__(Struct)``. Patch by Sergey B Kirpichev. - -- gh-143602: Fix a inconsistency issue in :meth:`~io.RawIOBase.write` that - leads to unexpected buffer overwrite by deduplicating the buffer exports. - -- gh-143547: Fix :func:`sys.unraisablehook` when the hook raises an - exception and changes :func:`sys.unraisablehook`: hold a strong reference - to the old hook. Patch by Victor Stinner. - -- gh-143378: Fix use-after-free crashes when a :class:`~io.BytesIO` object - is concurrently mutated during :meth:`~io.RawIOBase.write` or - :meth:`~io.IOBase.writelines`. - -- gh-143346: Fix incorrect wrapping of the Base64 data in - :class:`!plistlib._PlistWriter` when the indent contains a mix of tabs and - spaces. - -- gh-143310: :mod:`tkinter`: fix a crash when a Python :class:`list` is - mutated during the conversion to a Tcl object (e.g., when setting a Tcl - variable). Patch by Bénédikt Tran. - -- gh-143309: Fix a crash in :func:`os.execve` on non-Windows platforms when - given a custom environment mapping which is then mutated during parsing. - Patch by Bénédikt Tran. - -- gh-143308: :mod:`pickle`: fix use-after-free crashes when a - :class:`~pickle.PickleBuffer` is concurrently mutated by a custom buffer - callback during pickling. Patch by Bénédikt Tran and Aaron Wieczorek. - -- gh-143237: Fix support of named pipes in the rotating :mod:`logging` - handlers. - -- gh-143249: Fix possible buffer leaks in Windows overlapped I/O on error - handling. - -- gh-143241: :mod:`zoneinfo`: fix infinite loop in :meth:`ZoneInfo.from_file - ` when parsing a malformed TZif file. Patch - by Fatih Celik. - -- gh-142830: :mod:`sqlite3`: fix use-after-free crashes when the - connection's callbacks are mutated during a callback execution. Patch by - Bénédikt Tran. - -- gh-143200: :mod:`xml.etree.ElementTree`: fix use-after-free crashes in - :meth:`~object.__getitem__` and :meth:`~object.__setitem__` methods of - :class:`~xml.etree.ElementTree.Element` when the element is concurrently - mutated. Patch by Bénédikt Tran. - -- gh-142195: Updated timeout evaluation logic in :mod:`subprocess` to be - compatible with deterministic environments like Shadow where time moves - exactly as requested. - -- gh-143145: Fixed a possible reference leak in ctypes when constructing - results with multiple output parameters on error. - -- gh-122431: Corrected the error message in - :func:`readline.append_history_file` to state that ``nelements`` must be - non-negative instead of positive. - -- gh-143004: Fix a potential use-after-free in - :meth:`collections.Counter.update` when user code mutates the Counter - during an update. - -- gh-143046: The :mod:`asyncio` REPL no longer prints copyright and version - messages in the quiet mode (:option:`-q`). Patch by Bartosz Sławecki. - -- gh-140648: The :mod:`asyncio` REPL now respects the :option:`-I` flag - (isolated mode). Previously, it would load and execute - :envvar:`PYTHONSTARTUP` even if the flag was set. Contributed by Bartosz - Sławecki. - -- gh-142991: Fixed socket operations such as recvfrom() and sendto() for - FreeBSD divert(4) socket. - -- gh-143010: Fixed a bug in :mod:`mailbox` where the precise timing of an - external event could result in the library opening an existing file - instead of a file it expected to create. - -- gh-142881: Fix concurrent and reentrant call of :func:`atexit.unregister`. - -- gh-112127: Fix possible use-after-free in :func:`atexit.unregister` when - the callback is unregistered during comparison. - -- gh-142783: Fix zoneinfo use-after-free with descriptor _weak_cache. a - descriptor as _weak_cache could cause crashes during object creation. The - fix ensures proper reference counting for descriptor-provided objects. - -- gh-142754: Add the *ownerDocument* attribute to :mod:`xml.dom.minidom` - elements and attributes created by directly instantiating the ``Element`` - or ``Attr`` class. Note that this way of creating nodes is not supported; - creator functions like :py:meth:`xml.dom.Document.documentElement` should - be used instead. - -- gh-142784: The :mod:`asyncio` REPL now properly closes the loop upon the - end of interactive session. Previously, it could cause surprising - warnings. Contributed by Bartosz Sławecki. - -- gh-142555: :mod:`array`: fix a crash in ``a[i] = v`` when converting *i* - to an index via :meth:`i.__index__ ` or - :meth:`i.__float__ ` mutates the array. - -- gh-142594: Fix crash in ``TextIOWrapper.close()`` when the underlying - buffer's ``closed`` property calls :meth:`~io.TextIOBase.detach`. - -- gh-142451: :mod:`hmac`: Ensure that the :attr:`HMAC.block_size - ` attribute is correctly copied by :meth:`HMAC.copy - `. Patch by Bénédikt Tran. - -- gh-142495: :class:`collections.defaultdict` now prioritizes - :meth:`~object.__setitem__` when inserting default values from - ``default_factory``. This prevents race conditions where a default value - would overwrite a value set before ``default_factory`` returns. - -- gh-142651: :mod:`unittest.mock`: fix a thread safety issue where - :attr:`Mock.call_count ` may return - inaccurate values when the mock is called concurrently from multiple - threads. - -- gh-142595: Added type check during initialization of the :mod:`decimal` - module to prevent a crash in case of broken stdlib. Patch by Sergey B - Kirpichev. - -- gh-142517: The non-``compat32`` :mod:`email` policies now correctly handle - refolding encoded words that contain bytes that can not be decoded in - their specified character set. Previously this resulted in an encoding - exception during folding. - -- gh-112527: The help text for required options in :mod:`argparse` no longer - extended with " (default: None)". - -- gh-142315: Pdb can now run scripts from anonymous pipes used in process - substitution. Patch by Bartosz Sławecki. - -- gh-142282: Fix :func:`winreg.QueryValueEx` to not accidentally read - garbage buffer under race condition. - -- gh-75949: Fix :mod:`argparse` to preserve ``|`` separators in mutually - exclusive groups when the usage line wraps due to length. - -- gh-68552: ``MisplacedEnvelopeHeaderDefect`` and ``Missing header name`` - defects are now correctly passed to the ``handle_defect`` method of - ``policy`` in :class:`~email.parser.FeedParser`. - -- gh-142006: Fix a bug in the :mod:`email.policy.default` folding algorithm - which incorrectly resulted in a doubled newline when a line ending at - exactly max_line_length was followed by an unfoldable token. - -- gh-105836: Fix :meth:`asyncio.run_coroutine_threadsafe` leaving underlying - cancelled asyncio task running. - -- gh-139971: :mod:`pydoc`: Ensure that the link to the online documentation - of a :term:`stdlib` module is correct. - -- gh-139262: Some keystrokes can be swallowed in the new ``PyREPL`` on - Windows, especially when used together with the ALT key. Fix by Chris - Eibl. - -- gh-138897: Improved :data:`license`/:data:`copyright`/:data:`credits` - display in the :term:`REPL`: now uses a pager. - -- gh-79986: Add parsing for ``References`` and ``In-Reply-To`` headers to - the :mod:`email` library that parses the header content as lists of - message id tokens. This prevents them from being folded incorrectly. - -- gh-109263: Starting a process from spawn context in :mod:`multiprocessing` - no longer sets the start method globally. - -- gh-90871: Fixed an off by one error concerning the backlog parameter in - :meth:`~asyncio.loop.create_unix_server`. Contributed by Christian - Harries. - -- gh-133253: Fix thread-safety issues in :mod:`linecache`. - -- gh-132715: Skip writing objects during marshalling once a failure has - occurred. - -- gh-127529: Correct behavior of - :func:`!asyncio.selector_events.BaseSelectorEventLoop._accept_connection` - in handling :exc:`ConnectionAbortedError` in a loop. This improves - performance on OpenBSD. - -IDLE ----- - -- gh-143774: Better explain the operation of Format / Format Paragraph. - -Documentation -------------- - -- gh-140806: Add documentation for :func:`enum.bin`. - -Core and Builtins ------------------ - -- gh-144307: Prevent a reference leak in module teardown at interpreter - finalization. - -- gh-144194: Fix error handling in perf jitdump initialization on memory - allocation failure. - -- gh-141805: Fix crash in :class:`set` when objects with the same hash are - concurrently added to the set after removing an element with the same hash - while the set still contains elements with the same hash. - -- gh-143670: Fixes a crash in ``ga_repr_items_list`` function. - -- gh-143377: Fix a crash in :func:`!_interpreters.capture_exception` when - the exception is incorrectly formatted. Patch by Bénédikt Tran. - -- gh-143189: Fix crash when inserting a non-:class:`str` key into a split - table dictionary when the key matches an existing key in the split table - but has no corresponding value in the dict. - -- gh-143228: Fix use-after-free in perf trampoline when toggling profiling - while threads are running or during interpreter finalization with daemon - threads active. The fix uses reference counting to ensure trampolines are - not freed while any code object could still reference them. Pach by Pablo - Galindo - -- gh-142664: Fix a use-after-free crash in :meth:`memoryview.__hash__ - ` when the ``__hash__`` method of the referenced object - mutates that object or the view. Patch by Bénédikt Tran. - -- gh-142557: Fix a use-after-free crash in :ref:`bytearray.__mod__ - ` when the :class:`!bytearray` is mutated while - formatting the ``%``-style arguments. Patch by Bénédikt Tran. - -- gh-143195: Fix use-after-free crashes in :meth:`bytearray.hex` and - :meth:`memoryview.hex` when the separator's :meth:`~object.__len__` - mutates the original object. Patch by Bénédikt Tran. - -- gh-143135: Set :data:`sys.flags.inspect` to ``1`` when - :envvar:`PYTHONINSPECT` is ``0``. Previously, it was set to ``0`` in this - case. - -- gh-143003: Fix an overflow of the shared empty buffer in - :meth:`bytearray.extend` when ``__length_hint__()`` returns 0 for - non-empty iterator. - -- gh-143006: Fix a possible assertion error when comparing negative - non-integer ``float`` and ``int`` with the same number of bits in the - integer part. - -- gh-142776: Fix a file descriptor leak in import.c - -- gh-142829: Fix a use-after-free crash in :class:`contextvars.Context` - comparison when a custom ``__eq__`` method modifies the context via - :meth:`~contextvars.ContextVar.set`. - -- gh-142766: Clear the frame of a generator when :meth:`generator.close` is - called. - -- gh-142737: Tracebacks will be displayed in fallback mode even if - :func:`io.open` is lost. Previously, this would crash the interpreter. - Patch by Bartosz Sławecki. - -- gh-142554: Fix a crash in :func:`divmod` when :func:`!_pylong.int_divmod` - does not return a tuple of length two exactly. Patch by Bénédikt Tran. - -- gh-142560: Fix use-after-free in :class:`bytearray` search-like methods - (:meth:`~bytearray.find`, :meth:`~bytearray.count`, - :meth:`~bytearray.index`, :meth:`~bytearray.rindex`, and - :meth:`~bytearray.rfind`) by marking the storage as exported which causes - reallocation attempts to raise :exc:`BufferError`. For - :func:`~operator.contains`, :meth:`~bytearray.split`, and - :meth:`~bytearray.rsplit` the :ref:`buffer protocol ` is - used for this. - -- gh-142343: Fix SIGILL crash on m68k due to incorrect assembly constraint. - -- gh-141732: Ensure the :meth:`~object.__repr__` for :exc:`ExceptionGroup` - and :exc:`BaseExceptionGroup` does not change when the exception sequence - that was original passed in to its constructor is subsequently mutated. - -- gh-100964: Fix reference cycle in exhausted generator frames. Patch by - Savannah Ostrowski. - -- gh-140373: Correctly emit ``PY_UNWIND`` event when generator object is - closed. Patch by Mikhail Efimov. - -- gh-138568: Adjusted the built-in :func:`help` function so that empty - inputs are ignored in interactive mode. - -- gh-127773: Do not use the type attribute cache for types with incompatible - :term:`MRO`. - -C API ------ - -- gh-142571: :c:func:`!PyUnstable_CopyPerfMapFile` now checks that opening - the file succeeded before flushing. - -Build ------ - -- gh-142454: When calculating the digest of the JIT stencils input, sort the - hashed files by filenames before adding their content to the hasher. This - ensures deterministic hash input and hence deterministic hash, independent - on filesystem order. - -- gh-141808: When running ``make clean-retain-profile``, keep the generated - JIT stencils. That way, the stencils are not generated twice when - Profile-guided optimization (PGO) is used. It also allows distributors to - supply their own pre-built JIT stencils. - -- gh-138061: Ensure reproducible builds by making JIT stencil header - generation deterministic. - - -What's New in Python 3.13.11 final? -=================================== - -*Release date: 2025-12-05* - -Security --------- - -- gh-142145: Remove quadratic behavior in ``xml.minidom`` node ID cache - clearing. - -- gh-119451: Fix a potential memory denial of service in the - :mod:`http.client` module. When connecting to a malicious server, it could - cause an arbitrary amount of memory to be allocated. This could have led - to symptoms including a :exc:`MemoryError`, swapping, out of memory (OOM) - killed processes or containers, or even system crashes. - -- gh-119452: Fix a potential memory denial of service in the - :mod:`http.server` module. When a malicious user is connected to the CGI - server on Windows, it could cause an arbitrary amount of memory to be - allocated. This could have led to symptoms including a :exc:`MemoryError`, - swapping, out of memory (OOM) killed processes or containers, or even - system crashes. - -Library -------- - -- gh-140797: Revert changes to the undocumented :class:`!re.Scanner` class. - Capturing groups are still allowed for backward compatibility, although - using them can lead to incorrect result. They will be forbidden in future - Python versions. - -- gh-142206: The resource tracker in the :mod:`multiprocessing` module now - uses the original communication protocol, as in Python 3.14.0 and below, - by default. This avoids issues with upgrading Python while it is running. - (Note that such 'in-place' upgrades are not tested.) The tracker remains - compatible with subprocesses that use new protocol (that is, subprocesses - using Python 3.13.10, 3.14.1 and 3.15). - -Core and Builtins ------------------ - -- gh-142218: Fix crash when inserting into a split table dictionary with a - non :class:`str` key that matches an existing key. - - -What's New in Python 3.13.10 final? -=================================== - -*Release date: 2025-12-02* - -Tools/Demos ------------ - -- gh-141442: The iOS testbed now correctly handles test arguments that - contain spaces. - -Tests ------ - -- gh-140482: Preserve and restore the state of ``stty echo`` as part of the - test environment. - -- gh-140082: Update ``python -m test`` to set ``FORCE_COLOR=1`` when being - run with color enabled so that :mod:`unittest` which is run by it with - redirected output will output in color. - -- gh-136442: Use exitcode ``1`` instead of ``5`` if - :func:`unittest.TestCase.setUpClass` raises an exception - -Security --------- - -- gh-139700: Check consistency of the zip64 end of central directory record. - Support records with "zip64 extensible data" if there are no bytes - prepended to the ZIP file. - -- gh-137836: Add support of the "plaintext" element, RAWTEXT elements "xmp", - "iframe", "noembed" and "noframes", and optionally RAWTEXT element - "noscript" in :class:`html.parser.HTMLParser`. - -- gh-136063: :mod:`email.message`: ensure linear complexity for legacy HTTP - parameters parsing. Patch by Bénédikt Tran. - -- gh-136065: Fix quadratic complexity in :func:`os.path.expandvars`. - -- gh-119342: Fix a potential memory denial of service in the :mod:`plistlib` - module. When reading a Plist file received from untrusted source, it could - cause an arbitrary amount of memory to be allocated. This could have led - to symptoms including a :exc:`MemoryError`, swapping, out of memory (OOM) - killed processes or containers, or even system crashes. - -Library -------- - -- gh-74389: When the stdin being used by a :class:`subprocess.Popen` - instance is closed, this is now ignored in - :meth:`subprocess.Popen.communicate` instead of leaving the class in an - inconsistent state. - -- gh-87512: Fix :func:`subprocess.Popen.communicate` timeout handling on - Windows when writing large input. Previously, the timeout was ignored - during stdin writing, causing the method to block indefinitely if the - child process did not consume input quickly. The stdin write is now - performed in a background thread, allowing the timeout to be properly - enforced. - -- gh-141473: When :meth:`subprocess.Popen.communicate` was called with - *input* and a *timeout* and is called for a second time after a - :exc:`~subprocess.TimeoutExpired` exception before the process has died, - it should no longer hang. - -- gh-59000: Fix :mod:`pdb` breakpoint resolution for class methods when the - module defining the class is not imported. - -- gh-141570: Support :term:`file-like object` raising :exc:`OSError` from - :meth:`~io.IOBase.fileno` in color detection - (``_colorize.can_colorize()``). This can occur when ``sys.stdout`` is - redirected. - -- gh-141659: Fix bad file descriptor errors from ``_posixsubprocess`` on - AIX. - -- gh-141497: :mod:`ipaddress`: ensure that the methods - :meth:`IPv4Network.hosts() ` and - :meth:`IPv6Network.hosts() ` always return an - iterator. - -- gh-140938: The :func:`statistics.stdev` and :func:`statistics.pstdev` - functions now raise a :exc:`ValueError` when the input contains an - infinity or a NaN. - -- gh-124111: Updated Tcl threading configuration in :mod:`_tkinter` to - assume that threads are always available in Tcl 9 and later. - -- gh-137109: The :mod:`os.fork` and related forking APIs will no longer warn - in the common case where Linux or macOS platform APIs return the number of - threads in a process and find the answer to be 1 even when a - :func:`os.register_at_fork` ``after_in_parent=`` callback (re)starts a - thread. - -- gh-141314: Fix assertion failure in :meth:`io.TextIOWrapper.tell` when - reading files with standalone carriage return (``\r``) line endings. - -- gh-141311: Fix assertion failure in :func:`!io.BytesIO.readinto` and - undefined behavior arising when read position is above capcity in - :class:`io.BytesIO`. - -- gh-141141: Fix a thread safety issue with :func:`base64.b85decode`. - Contributed by Benel Tayar. - -- gh-140911: :mod:`collections`: Ensure that the methods - ``UserString.rindex()`` and ``UserString.index()`` accept - :class:`collections.UserString` instances as the sub argument. - -- gh-140797: The undocumented :class:`!re.Scanner` class now forbids regular - expressions containing capturing groups in its lexicon patterns. Patterns - using capturing groups could previously lead to crashes with segmentation - fault. Use non-capturing groups (?:...) instead. - -- gh-140815: :mod:`faulthandler` now detects if a frame or a code object is - invalid or freed. Patch by Victor Stinner. - -- gh-100218: Correctly set :attr:`~OSError.errno` when - :func:`socket.if_nametoindex` or :func:`socket.if_indextoname` raise an - :exc:`OSError`. Patch by Bénédikt Tran. - -- gh-140875: Fix handling of unclosed character references (named and - numerical) followed by the end of file in :class:`html.parser.HTMLParser` - with ``convert_charrefs=False``. - -- gh-140734: :mod:`multiprocessing`: fix off-by-one error when checking the - length of a temporary socket file path. Patch by Bénédikt Tran. - -- gh-140874: Bump the version of pip bundled in :mod:`ensurepip` to version - 25.3 - -- gh-140691: In :mod:`urllib.request`, when opening a FTP URL fails because - a data connection cannot be made, the control connection's socket is now - closed to avoid a :exc:`ResourceWarning`. - -- gh-103847: Fix hang when cancelling process created by - :func:`asyncio.create_subprocess_exec` or - :func:`asyncio.create_subprocess_shell`. Patch by Kumar Aditya. - -- gh-140590: Fix arguments checking for the - :meth:`!functools.partial.__setstate__` that may lead to internal state - corruption and crash. Patch by Sergey Miryanov. - -- gh-140634: Fix a reference counting bug in - :meth:`!os.sched_param.__reduce__`. - -- gh-140633: Ignore :exc:`AttributeError` when setting a module's - ``__file__`` attribute when loading an extension module packaged as Apple - Framework. - -- gh-140593: :mod:`xml.parsers.expat`: Fix a memory leak that could affect - users with :meth:`~xml.parsers.expat.xmlparser.ElementDeclHandler` set to - a custom element declaration handler. Patch by Sebastian Pipping. - -- gh-140607: Inside :meth:`io.RawIOBase.read`, validate that the count of - bytes returned by :meth:`io.RawIOBase.readinto` is valid (inside the - provided buffer). - -- gh-138162: Fix :class:`logging.LoggerAdapter` with ``merge_extra=True`` - and without the *extra* argument. - -- gh-140474: Fix memory leak in :class:`array.array` when creating arrays - from an empty :class:`str` and the ``u`` type code. - -- gh-140272: Fix memory leak in the :meth:`!clear` method of the - :mod:`dbm.gnu` database. - -- gh-140041: Fix import of :mod:`ctypes` on Android and Cygwin when ABI - flags are present. - -- gh-139905: Add suggestion to error message for :class:`typing.Generic` - subclasses when ``cls.__parameters__`` is missing due to a parent class - failing to call :meth:`super().__init_subclass__() - ` in its ``__init_subclass__``. - -- gh-139845: Fix to not print KeyboardInterrupt twice in default asyncio - REPL. - -- gh-139783: Fix :func:`inspect.getsourcelines` for the case when a - decorator is followed by a comment or an empty line. - -- gh-70765: :mod:`http.server`: fix default handling of HTTP/0.9 requests in - :class:`~http.server.BaseHTTPRequestHandler`. Previously, - :meth:`!BaseHTTPRequestHandler.parse_request` incorrectly waited for - headers in the request although those are not supported in HTTP/0.9. Patch - by Bénédikt Tran. - -- gh-139391: Fix an issue when, on non-Windows platforms, it was not - possible to gracefully exit a ``python -m asyncio`` process suspended by - Ctrl+Z and later resumed by :manpage:`fg` other than with :manpage:`kill`. - -- gh-101828: Fix ``'shift_jisx0213'``, ``'shift_jis_2004'``, - ``'euc_jisx0213'`` and ``'euc_jis_2004'`` codecs truncating null chars as - they were treated as part of multi-character sequences. - -- gh-139246: fix: paste zero-width in default repl width is wrong. - -- gh-90949: Add - :meth:`~xml.parsers.expat.xmlparser.SetAllocTrackerActivationThreshold` - and - :meth:`~xml.parsers.expat.xmlparser.SetAllocTrackerMaximumAmplification` - to :ref:`xmlparser ` objects to prevent use of - disproportional amounts of dynamic memory from within an Expat parser. - Patch by Bénédikt Tran. - -- gh-139065: Fix trailing space before a wrapped long word if the line - length is exactly *width* in :mod:`textwrap`. - -- gh-138993: Dedent :data:`credits` text. - -- gh-138859: Fix generic type parameterization raising a :exc:`TypeError` - when omitting a :class:`ParamSpec` that has a default which is not a list - of types. - -- gh-138775: Use of ``python -m`` with :mod:`base64` has been fixed to - detect input from a terminal so that it properly notices EOF. - -- gh-98896: Fix a failure in multiprocessing resource_tracker when - SharedMemory names contain colons. Patch by Rani Pinchuk. - -- gh-75989: :func:`tarfile.TarFile.extractall` and - :func:`tarfile.TarFile.extract` now overwrite symlinks when extracting - hardlinks. (Contributed by Alexander Enrique Urieles Nieto in - :gh:`75989`.) - -- gh-83424: Allows creating a :class:`ctypes.CDLL` without name when passing - a handle as an argument. - -- gh-136234: Fix :meth:`asyncio.WriteTransport.writelines` to be robust to - connection failure, by using the same behavior as - :meth:`~asyncio.WriteTransport.write`. - -- gh-136057: Fixed the bug in :mod:`pdb` and :mod:`bdb` where ``next`` and - ``step`` can't go over the line if a loop exists in the line. - -- gh-135307: :mod:`email`: Fix exception in ``set_content()`` when encoding - text and max_line_length is set to ``0`` or ``None`` (unlimited). - -- gh-134453: Fixed :func:`subprocess.Popen.communicate` ``input=`` handling - of :class:`memoryview` instances that were non-byte shaped on POSIX - platforms. Those are now properly cast to a byte shaped view instead of - truncating the input. Windows platforms did not have this bug. - -- gh-102431: Clarify constraints for "logical" arguments in methods of - :class:`decimal.Context`. - -IDLE ----- - -- gh-96491: Deduplicate version number in IDLE shell title bar after saving - to a file. - -Documentation -------------- - -- gh-141994: :mod:`xml.sax.handler`: Make Documentation of - :data:`xml.sax.handler.feature_external_ges` warn of opening up to - `external entity attacks - `_. Patch by - Sebastian Pipping. - -- gh-140578: Remove outdated sencence in the documentation for - :mod:`multiprocessing`, that implied that - :class:`concurrent.futures.ThreadPoolExecutor` did not exist. - -Core and Builtins ------------------ - -- gh-142048: Fix quadratically increasing garbage collection delays in - free-threaded build. - -- gh-141930: When importing a module, use Python's regular file object to - ensure that writes to ``.pyc`` files are complete or an appropriate error - is raised. - -- gh-120158: Fix inconsistent state when enabling or disabling monitoring - events too many times. - -- gh-141579: Fix :func:`sys.activate_stack_trampoline` to properly support - the ``perf_jit`` backend. Patch by Pablo Galindo. - -- gh-141312: Fix the assertion failure in the ``__setstate__`` method of the - range iterator when a non-integer argument is passed. Patch by Sergey - Miryanov. - -- gh-140939: Fix memory leak when :class:`bytearray` or :class:`bytes` is - formated with the ``%*b`` format with a large width that results in a - :exc:`MemoryError`. - -- gh-140530: Fix a reference leak when ``raise exc from cause`` fails. Patch - by Bénédikt Tran. - -- gh-140576: Fixed crash in :func:`tokenize.generate_tokens` in case of - specific incorrect input. Patch by Mikhail Efimov. - -- gh-140551: Fixed crash in :class:`dict` if :meth:`dict.clear` is called at - the lookup stage. Patch by Mikhail Efimov and Inada Naoki. - -- gh-140471: Fix potential buffer overflow in :class:`ast.AST` node - initialization when encountering malformed :attr:`~ast.AST._fields` - containing non-:class:`str`. - -- gh-140406: Fix memory leak when an object's :meth:`~object.__hash__` - method returns an object that isn't an :class:`int`. - -- gh-140306: Fix memory leaks in cross-interpreter channel operations and - shared namespace handling. - -- gh-140301: Fix memory leak of ``PyConfig`` in subinterpreters. - -- gh-140000: Fix potential memory leak when a reference cycle exists between - an instance of :class:`typing.TypeAliasType`, :class:`typing.TypeVar`, - :class:`typing.ParamSpec`, or :class:`typing.TypeVarTuple` and its - ``__name__`` attribute. Patch by Mikhail Efimov. - -- gh-139748: Fix reference leaks in error branches of functions accepting - path strings or bytes such as :func:`compile` and :func:`os.system`. Patch - by Bénédikt Tran. - -- gh-139516: Fix lambda colon erroneously start format spec in f-string in - tokenizer. - -- gh-139640: Fix swallowing some syntax warnings in different modules if - they accidentally have the same message and are emitted from the same - line. Fix duplicated warnings in the ``finally`` block. - -- gh-137400: Fix a crash in the :term:`free threading` build when disabling - profiling or tracing across all threads with - :c:func:`PyEval_SetProfileAllThreads` or - :c:func:`PyEval_SetTraceAllThreads` or their Python equivalents - :func:`threading.settrace_all_threads` and - :func:`threading.setprofile_all_threads`. - -- gh-133400: Fixed Ctrl+D (^D) behavior in _pyrepl module to match old - pre-3.13 REPL behavior. - -C API ------ - -- gh-140042: Removed the sqlite3_shutdown call that could cause closing - connections for sqlite when used with multiple sub interpreters. - -- gh-140487: Fix :c:macro:`Py_RETURN_NOTIMPLEMENTED` in limited C API 3.11 - and older: don't treat ``Py_NotImplemented`` as immortal. Patch by Victor - Stinner. - - -What's New in Python 3.13.9 final? -================================== - -*Release date: 2025-10-14* - -Library -------- - -- gh-139783: Fix :func:`inspect.getsourcelines` for the case when a - decorator is followed by a comment or an empty line. - - -What's New in Python 3.13.8 final? -================================== - -*Release date: 2025-10-07* - -macOS ------ - -- gh-124111: Update macOS installer to use Tcl/Tk 8.6.17. - -- gh-139573: Updated bundled version of OpenSSL to 3.0.18. - -Windows -------- - -- gh-139573: Updated bundled version of OpenSSL to 3.0.18. - -- gh-138896: Fix error installing C runtime on non-updated Windows machines - -Tools/Demos ------------ - -- gh-139330: SBOM generation tool didn't cross-check the version and - checksum values against the ``Modules/expat/refresh.sh`` script, leading - to the values becoming out-of-date during routine updates. - -- gh-137873: The iOS test runner has been simplified, resolving some issues - that have been observed using the runner in GitHub Actions and Azure - Pipelines test environments. - -Tests ------ - -- gh-139208: Fix regrtest ``--fast-ci --verbose``: don't ignore the - ``--verbose`` option anymore. Patch by Victor Stinner. - -Security --------- - -- gh-139400: :mod:`xml.parsers.expat`: Make sure that parent Expat parsers - are only garbage-collected once they are no longer referenced by - subparsers created by - :meth:`~xml.parsers.expat.xmlparser.ExternalEntityParserCreate`. Patch by - Sebastian Pipping. - -- gh-139283: :mod:`sqlite3`: correctly handle maximum number of rows to - fetch in :meth:`Cursor.fetchmany ` and reject - negative values for :attr:`Cursor.arraysize `. - Patch by Bénédikt Tran. - -- gh-135661: Fix CDATA section parsing in :class:`html.parser.HTMLParser` - according to the HTML5 standard: ``] ]>`` and ``]] >`` no longer end the - CDATA section. Add private method ``_set_support_cdata()`` which can be - used to specify how to parse ``<[CDATA[`` --- as a CDATA section in - foreign content (SVG or MathML) or as a bogus comment in the HTML - namespace. - -Library -------- - -- gh-139312: Upgrade bundled libexpat to 2.7.3 - -- gh-139289: Do a real lazy-import on :mod:`rlcompleter` in :mod:`pdb` and - restore the existing completer after importing :mod:`rlcompleter`. - -- gh-139210: Fix use-after-free when reporting unknown event in - :func:`xml.etree.ElementTree.iterparse`. Patch by Ken Jin. - -- gh-138860: Lazy import :mod:`rlcompleter` in :mod:`pdb` to avoid deadlock - in subprocess. - -- gh-112729: Fix crash when calling ``_interpreters.create`` when the - process is out of memory. - -- gh-139076: Fix a bug in the :mod:`pydoc` module that was hiding functions - in a Python module if they were implemented in an extension module and the - module did not have ``__all__``. - -- gh-138998: Update bundled libexpat to 2.7.2 - -- gh-130567: Fix possible crash in :func:`locale.strxfrm` due to a platform - bug on macOS. - -- gh-138779: Support device numbers larger than ``2**63-1`` for the - :attr:`~os.stat_result.st_rdev` field of the :class:`os.stat_result` - structure. - -- gh-128636: Fix crash in PyREPL when os.environ is overwritten with an - invalid value for mac - -- gh-88375: Fix normalization of the ``robots.txt`` rules and URLs in the - :mod:`urllib.robotparser` module. No longer ignore trailing ``?``. - Distinguish raw special characters ``?``, ``=`` and ``&`` from the - percent-encoded ones. - -- gh-138515: :mod:`email` is added to Emscripten build. - -- gh-111788: Fix parsing errors in the :mod:`urllib.robotparser` module. - Don't fail trying to parse weird paths. Don't fail trying to decode - non-UTF-8 ``robots.txt`` files. - -- gh-138432: :meth:`zoneinfo.reset_tzpath` will now convert any - :class:`os.PathLike` objects it receives into strings before adding them - to ``TZPATH``. It will raise ``TypeError`` if anything other than a string - is found after this conversion. If given an :class:`os.PathLike` object - that represents a relative path, it will now raise ``ValueError`` instead - of ``TypeError``, and present a more informative error message. - -- gh-138008: Fix segmentation faults in the :mod:`ctypes` module due to - invalid :attr:`~ctypes._CFuncPtr.argtypes`. Patch by Dung Nguyen. - -- gh-60462: Fix :func:`locale.strxfrm` on Solaris (and possibly other - platforms). - -- gh-138204: Forbid expansion of shared anonymous :mod:`memory maps ` - on Linux, which caused a bus error. - -- gh-138010: Fix an issue where defining a class with a - :deco:`warnings.deprecated`-decorated base class may not invoke the - correct :meth:`~object.__init_subclass__` method in cases involving - multiple inheritance. Patch by Brian Schubert. - -- gh-138133: Prevent infinite traceback loop when sending CTRL^C to Python - through ``strace``. - -- gh-134869: Fix an issue where pressing Ctrl+C during tab completion in the - REPL would leave the autocompletion menu in a corrupted state. - -- gh-137317: :func:`inspect.signature` now correctly handles classes that - use a descriptor on a wrapped :meth:`!__init__` or :meth:`!__new__` - method. Contributed by Yongyu Yan. - -- gh-137754: Fix import of the :mod:`zoneinfo` module if the C - implementation of the :mod:`datetime` module is not available. - -- gh-137490: Handle :data:`~errno.ECANCELED` in the same way as - :data:`~errno.EINTR` in :func:`signal.sigwaitinfo` on NetBSD. - -- gh-137477: Fix :func:`!inspect.getblock`, :func:`inspect.getsourcelines` - and :func:`inspect.getsource` for generator expressions. - -- gh-137017: Fix :obj:`threading.Thread.is_alive` to remain ``True`` until - the underlying OS thread is fully cleaned up. This avoids false negatives - in edge cases involving thread monitoring or premature - :obj:`threading.Thread.is_alive` calls. - -- gh-136134: :meth:`!SMTP.auth_cram_md5` now raises an - :exc:`~smtplib.SMTPException` instead of a :exc:`ValueError` if Python has - been built without MD5 support. In particular, :class:`~smtplib.SMTP` - clients will not attempt to use this method even if the remote server is - assumed to support it. Patch by Bénédikt Tran. - -- gh-136134: :meth:`IMAP4.login_cram_md5 ` now - raises an :exc:`IMAP4.error ` if CRAM-MD5 - authentication is not supported. Patch by Bénédikt Tran. - -- gh-135386: Fix opening a :mod:`dbm.sqlite3` database for reading from - read-only file or directory. - -- gh-126631: Fix :mod:`multiprocessing` ``forkserver`` bug which prevented - ``__main__`` from being preloaded. - -- gh-123085: In a bare call to :func:`importlib.resources.files`, ensure the - caller's frame is properly detected when ``importlib.resources`` is itself - available as a compiled module only (no source). - -- gh-118981: Fix potential hang in ``multiprocessing.popen_spawn_posix`` - that can happen when the child proc dies early by closing the child fds - right away. - -- gh-78319: UTF8 support for the IMAP APPEND command has been made RFC - compliant. - -- bpo-38735: Fix failure when importing a module from the root directory on - unix-like platforms with sys.pycache_prefix set. - -- bpo-41839: Allow negative priority values from - :func:`os.sched_get_priority_min` and :func:`os.sched_get_priority_max` - functions. - -Core and Builtins ------------------ - -- gh-134466: Don't run PyREPL in a degraded environment where setting - termios attributes is not allowed. - -- gh-71810: Raise :exc:`OverflowError` for ``(-1).to_bytes()`` for signed - conversions when bytes count is zero. Patch by Sergey B Kirpichev. - -- gh-105487: Remove non-existent :meth:`~object.__copy__`, - :meth:`~object.__deepcopy__`, and :attr:`~type.__bases__` from the - :meth:`~object.__dir__` entries of :class:`types.GenericAlias`. - -- gh-134163: Fix a hang when the process is out of memory inside an - exception handler. - -- gh-138479: Fix a crash when a generic object's ``__typing_subst__`` - returns an object that isn't a :class:`tuple`. - -- gh-137576: Fix for incorrect source code being shown in tracebacks from - the Basic REPL when :envvar:`PYTHONSTARTUP` is given. Patch by Adam Hartz. - -- gh-132744: Certain calls now check for runaway recursion and respect the - system recursion limit. - -C API ------ - -- gh-87135: Attempting to acquire the GIL after runtime finalization has - begun in a different thread now causes the thread to hang rather than - terminate, which avoids potential crashes or memory corruption caused by - attempting to terminate a thread that is running code not specifically - designed to support termination. In most cases this hanging is harmless - since the process will soon exit anyway. - - While not officially marked deprecated until 3.14, - ``PyThread_exit_thread`` is no longer called internally and remains solely - for interface compatibility. Its behavior is inconsistent across - platforms, and it can only be used safely in the unlikely case that every - function in the entire call stack has been designed to support the - platform-dependent termination mechanism. It is recommended that users of - this function change their design to not require thread termination. In - the unlikely case that thread termination is needed and can be done - safely, users may migrate to calling platform-specific APIs such as - ``pthread_exit`` (POSIX) or ``_endthreadex`` (Windows) directly. - -Build ------ - -- gh-135734: Python can correctly be configured and built with ``./configure - --enable-optimizations --disable-test-modules``. Previously, the profile - data generation step failed due to PGO tests where immortalization - couldn't be properly suppressed. Patch by Bénédikt Tran. - - -What's New in Python 3.13.7 final? -================================== - -*Release date: 2025-08-14* - -Library -------- - -- gh-137583: Fix a deadlock introduced in 3.13.6 when a call to - :meth:`ssl.SSLSocket.recv ` was blocked in one thread, - and then another method on the object (such as :meth:`ssl.SSLSocket.send - `) was subsequently called in another thread. - -- gh-137044: Return large limit values as positive integers instead of - negative integers in :func:`resource.getrlimit`. Accept large values and - reject negative values (except :data:`~resource.RLIM_INFINITY`) for limits - in :func:`resource.setrlimit`. - -- gh-136914: Fix retrieval of :attr:`doctest.DocTest.lineno` for objects - decorated with :func:`functools.cache` or - :class:`functools.cached_property`. - -- gh-131788: Make ``ResourceTracker.send`` from :mod:`multiprocessing` - re-entrant safe - -Documentation -------------- - -- gh-136155: We are now checking for fatal errors in EPUB builds in CI. - -Core and Builtins ------------------ - -- gh-137400: Fix a crash in the :term:`free threading` build when disabling - profiling or tracing across all threads with - :c:func:`PyEval_SetProfileAllThreads` or - :c:func:`PyEval_SetTraceAllThreads` or their Python equivalents - :func:`threading.settrace_all_threads` and - :func:`threading.setprofile_all_threads`. - - -What's New in Python 3.13.6 final? -================================== - -*Release date: 2025-08-06* - -macOS ------ - -- gh-137450: macOS installer shell path management improvements: separate - the installer ``Shell profile updater`` postinstall script from the - ``Update Shell Profile.command`` to enable more robust error handling. - -- gh-137134: Update macOS installer to ship with SQLite version 3.50.4. - -Windows -------- - -- gh-137134: Update Windows installer to ship with SQLite 3.50.4. - -Tools/Demos ------------ - -- gh-135968: Stubs for ``strip`` are now provided as part of an iOS install. - -Tests ------ - -- gh-135966: The iOS testbed now handles the ``app_packages`` folder as a - site directory. - -- gh-135494: Fix regrtest to support excluding tests from ``--pgo`` tests. - Patch by Victor Stinner. - -- gh-135489: Show verbose output for failing tests during PGO profiling step - with --enable-optimizations. - -Security --------- - -- gh-135661: Fix parsing start and end tags in - :class:`html.parser.HTMLParser` according to the HTML5 standard. - - * Whitespaces no longer accepted between ```` does not end the script section. - - * Vertical tabulation (``\v``) and non-ASCII whitespaces no longer recognized - as whitespaces. The only whitespaces are ``\t\n\r\f`` and space. - - * Null character (U+0000) no longer ends the tag name. - - * Attributes and slashes after the tag name in end tags are now ignored, - instead of terminating after the first ``>`` in quoted attribute value. - E.g. ````. - - * Multiple slashes and whitespaces between the last attribute and closing ``>`` - are now ignored in both start and end tags. E.g. ````. - - * Multiple ``=`` between attribute name and value are no longer collapsed. - E.g. ```` produces attribute "foo" with value "=bar". - -- gh-102555: Fix comment parsing in :class:`html.parser.HTMLParser` - according to the HTML5 standard. ``--!>`` now ends the comment. ``-- >`` - no longer ends the comment. Support abnormally ended empty comments - ``<-->`` and ``<--->``. - -- gh-135462: Fix quadratic complexity in processing specially crafted input - in :class:`html.parser.HTMLParser`. End-of-file errors are now handled - according to the HTML5 specs -- comments and declarations are - automatically closed, tags are ignored. - -- gh-118350: Fix support of escapable raw text mode (elements "textarea" and - "title") in :class:`html.parser.HTMLParser`. - -Library -------- - -- gh-132710: If possible, ensure that :func:`uuid.getnode` returns the same - result even across different processes. Previously, the result was - constant only within the same process. Patch by Bénédikt Tran. - -- gh-137273: Fix debug assertion failure in :func:`locale.setlocale` on - Windows. - -- gh-137257: Bump the version of pip bundled in ensurepip to version 25.2 - -- gh-81325: :class:`tarfile.TarFile` now accepts a :term:`path-like - ` when working on a tar archive. (Contributed by - Alexander Enrique Urieles Nieto in :gh:`81325`.) - -- gh-130522: Fix unraisable :exc:`TypeError` raised during - :term:`interpreter shutdown` in the :mod:`threading` module. - -- gh-130577: :mod:`tarfile` now validates archives to ensure member offsets - are non-negative. (Contributed by Alexander Enrique Urieles Nieto in - :gh:`130577`.) - -- gh-136549: Fix signature of :func:`threading.excepthook`. - -- gh-136523: Fix :class:`wave.Wave_write` emitting an unraisable when open - raises. - -- gh-52876: Add missing ``keepends`` (default ``True``) parameter to - :meth:`!codecs.StreamReaderWriter.readline` and - :meth:`!codecs.StreamReaderWriter.readlines`. - -- gh-85702: If ``zoneinfo._common.load_tzdata`` is given a package without a - resource a :exc:`zoneinfo.ZoneInfoNotFoundError` is raised rather than a - :exc:`PermissionError`. Patch by Victor Stinner. - -- gh-134759: Fix :exc:`UnboundLocalError` in - :func:`email.message.Message.get_payload` when the payload to decode is a - :class:`bytes` object. Patch by Kliment Lamonov. - -- gh-136028: Fix parsing month names containing "İ" (U+0130, LATIN CAPITAL - LETTER I WITH DOT ABOVE) in :func:`time.strptime`. This affects locales - az_AZ, ber_DZ, ber_MA and crh_UA. - -- gh-135995: In the palmos encoding, make byte ``0x9b`` decode to ``›`` - (U+203A - SINGLE RIGHT-POINTING ANGLE QUOTATION MARK). - -- gh-53203: Fix :func:`time.strptime` for ``%c`` and ``%x`` formats on - locales byn_ER, wal_ET and lzh_TW, and for ``%X`` format on locales ar_SA, - bg_BG and lzh_TW. - -- gh-91555: An earlier change, which was introduced in 3.13.4, has been - reverted. It disabled logging for a logger during handling of log messages - for that logger. Since the reversion, the behaviour should be as it was - before 3.13.4. - -- gh-135878: Fixes a crash of :class:`types.SimpleNamespace` on :term:`free - threading` builds, when several threads were calling its - :meth:`~object.__repr__` method at the same time. - -- gh-135836: Fix :exc:`IndexError` in :meth:`asyncio.loop.create_connection` - that could occur when non-\ :exc:`OSError` exception is raised during - connection and socket's ``close()`` raises :exc:`!OSError`. - -- gh-135836: Fix :exc:`IndexError` in :meth:`asyncio.loop.create_connection` - that could occur when the Happy Eyeballs algorithm resulted in an empty - exceptions list during connection attempts. - -- gh-135855: Raise :exc:`TypeError` instead of :exc:`SystemError` when - :func:`!_interpreters.set___main___attrs` is passed a non-dict object. - Patch by Brian Schubert. - -- gh-135815: :mod:`netrc`: skip security checks if :func:`os.getuid` is - missing. Patch by Bénédikt Tran. - -- gh-135640: Address bug where it was possible to call - :func:`xml.etree.ElementTree.ElementTree.write` on an ElementTree object - with an invalid root element. This behavior blanked the file passed to - ``write`` if it already existed. - -- gh-135444: Fix :meth:`asyncio.DatagramTransport.sendto` to account for - datagram header size when data cannot be sent. - -- gh-135497: Fix :func:`os.getlogin` failing for longer usernames on - BSD-based platforms. - -- gh-135487: Fix :meth:`!reprlib.Repr.repr_int` when given integers with - more than :func:`sys.get_int_max_str_digits` digits. Patch by Bénédikt - Tran. - -- gh-135335: :mod:`multiprocessing`: Flush ``stdout`` and ``stderr`` after - preloading modules in the ``forkserver``. - -- gh-135244: :mod:`uuid`: when the MAC address cannot be determined, the - 48-bit node ID is now generated with a cryptographically-secure - pseudo-random number generator (CSPRNG) as per :rfc:`RFC 9562, §6.10.3 - <9562#section-6.10-3>`. This affects :func:`~uuid.uuid1`. - -- gh-135069: Fix the "Invalid error handling" exception in - :class:`!encodings.idna.IncrementalDecoder` to correctly replace the - 'errors' parameter. - -- gh-134698: Fix a crash when calling methods of :class:`ssl.SSLContext` or - :class:`ssl.SSLSocket` across multiple threads. - -- gh-132124: On POSIX-compliant systems, - :func:`!multiprocessing.util.get_temp_dir` now ignores :envvar:`TMPDIR` - (and similar environment variables) if the path length of ``AF_UNIX`` - socket files exceeds the platform-specific maximum length when using the - *forkserver* start method. Patch by Bénédikt Tran. - -- gh-133439: Fix dot commands with trailing spaces are mistaken for - multi-line SQL statements in the sqlite3 command-line interface. - -- gh-132969: Prevent the :class:`~concurrent.futures.ProcessPoolExecutor` - executor thread, which remains running when :meth:`shutdown(wait=False) - `, from attempting to adjust the - pool's worker processes after the object state has already been reset - during shutdown. A combination of conditions, including a worker process - having terminated abormally, resulted in an exception and a potential hang - when the still-running executor thread attempted to replace dead workers - within the pool. - -- gh-130664: Support the ``'_'`` digit separator in formatting of the - integral part of :class:`~decimal.Decimal`'s. Patch by Sergey B - Kirpichev. - -- gh-85702: If ``zoneinfo._common.load_tzdata`` is given a package without a - resource a ``ZoneInfoNotFoundError`` is raised rather than a - :exc:`IsADirectoryError`. - -- gh-130664: Handle corner-case for :class:`~fractions.Fraction`'s - formatting: treat zero-padding (preceding the width field by a zero - (``'0'``) character) as an equivalent to a fill character of ``'0'`` with - an alignment type of ``'='``, just as in case of :class:`float`'s. - -Documentation -------------- - -- gh-135171: Document that the :term:`iterator` for the leftmost - :keyword:`!for` clause in the generator expression is created immediately. - -Core and Builtins ------------------ - -- gh-58124: Fix name of the Python encoding in Unicode errors of the code - page codec: use "cp65000" and "cp65001" instead of "CP_UTF7" and "CP_UTF8" - which are not valid Python code names. Patch by Victor Stinner. - -- gh-137314: Fixed a regression where raw f-strings incorrectly interpreted - escape sequences in format specifications. Raw f-strings now properly - preserve literal backslashes in format specs, matching the behavior from - Python 3.11. For example, ``rf"{obj:\xFF}"`` now correctly produces - ``'\\xFF'`` instead of ``'ÿ'``. Patch by Pablo Galindo. - -- gh-136541: Fix some issues with the perf trampolines on x86-64 and - aarch64. The trampolines were not being generated correctly for some - cases, which could lead to the perf integration not working correctly. - Patch by Pablo Galindo. - -- gh-109700: Fix memory error handling in :c:func:`PyDict_SetDefault`. - -- gh-78465: Fix error message for ``cls.__new__(cls, ...)`` where ``cls`` is - not instantiable builtin or extension type (with ``tp_new`` set to - ``NULL``). - -- gh-135871: Non-blocking mutex lock attempts now return immediately when - the lock is busy instead of briefly spinning in the :term:`free threading` - build. - -- gh-135607: Fix potential :mod:`weakref` races in an object's destructor on - the :term:`free threaded ` build. - -- gh-135496: Fix typo in the f-string conversion type error ("exclamanation" - -> "exclamation"). - -- gh-130077: Properly raise custom syntax errors when incorrect syntax - containing names that are prefixes of soft keywords is encountered. Patch - by Pablo Galindo. - -- gh-135148: Fixed a bug where f-string debug expressions (using =) would - incorrectly strip out parts of strings containing escaped quotes and # - characters. Patch by Pablo Galindo. - -- gh-133136: Limit excess memory usage in the :term:`free threading` build - when a large dictionary or list is resized and accessed by multiple - threads. - -- gh-132617: Fix :meth:`dict.update` modification check that could - incorrectly raise a "dict mutated during update" error when a different - dictionary was modified that happens to share the same underlying keys - object. - -- gh-91153: Fix a crash when a :class:`bytearray` is concurrently mutated - during item assignment. - -- gh-127971: Fix off-by-one read beyond the end of a string in string - search. - -- gh-125723: Fix crash with ``gi_frame.f_locals`` when generator frames - outlive their generator. Patch by Mikhail Efimov. - -Build ------ - -- gh-135497: Fix the detection of ``MAXLOGNAME`` in the ``configure.ac`` - script. - - -What's New in Python 3.13.5 final? -================================== - -*Release date: 2025-06-11* - -Windows -------- - -- gh-135151: Avoid distributing modified :file:`pyconfig.h` in the - traditional installer. Extension module builds must always specify - ``Py_GIL_DISABLED`` when targeting the free-threaded runtime. - -Tests ------ - -- gh-135120: Add :func:`!test.support.subTests`. - -Library -------- - -- gh-133967: Do not normalize :mod:`locale` name 'C.UTF-8' to 'en_US.UTF-8'. - -- gh-135326: Restore support of integer-like objects with :meth:`!__index__` - in :func:`random.getrandbits`. - -- gh-135321: Raise a correct exception for values greater than 0x7fffffff - for the ``BINSTRING`` opcode in the C implementation of :mod:`pickle`. - -- gh-135276: Backported bugfixes in zipfile.Path from zipp 3.23. Fixed - ``.name``, ``.stem`` and other basename-based properties on Windows when - working with a zipfile on disk. - -- gh-134151: :mod:`email`: Fix :exc:`TypeError` in - :func:`email.utils.decode_params` when sorting :rfc:`2231` continuations - that contain an unnumbered section. - -- gh-134152: :mod:`email`: Fix parsing of email message ID with invalid - domain. - -- gh-127081: Fix libc thread safety issues with :mod:`os` by replacing - ``getlogin`` with ``getlogin_r`` re-entrant version. - -- gh-131884: Fix formatting issues in :func:`json.dump` when both *indent* - and *skipkeys* are used. - -Core and Builtins ------------------ - -- gh-135171: Roll back changes to generator and list comprehensions that - went into 3.13.4 to fix GH-127682, but which involved semantic and - bytecode changes not appropriate for a bugfix release. - -C API ------ - -- gh-134989: Fix ``Py_RETURN_NONE``, ``Py_RETURN_TRUE`` and - ``Py_RETURN_FALSE`` macros in the limited C API 3.11 and older: don't - treat ``Py_None``, ``Py_True`` and ``Py_False`` as immortal. Patch by - Victor Stinner. - -- gh-134989: Implement :c:func:`PyObject_DelAttr` and - :c:func:`PyObject_DelAttrString` as macros in the limited C API 3.12 and - older. Patch by Victor Stinner. - - -What's New in Python 3.13.4 final? -================================== - -*Release date: 2025-06-03* - -Windows -------- - -- gh-130727: Fix a race in internal calls into WMI that can result in an - "invalid handle" exception under high load. Patch by Chris Eibl. - -- gh-76023: Make :func:`os.path.realpath` ignore Windows error 1005 when in - non-strict mode. - -- gh-133626: Ensures packages are not accidentally bundled into the - traditional installer. - -- gh-133512: Add warnings to :ref:`launcher` about use of subcommands - belonging to the Python install manager. - -Tests ------ - -- gh-133744: Fix multiprocessing interrupt test. Add an event to synchronize - the parent process with the child process: wait until the child process - starts sleeping. Patch by Victor Stinner. - -- gh-133639: Fix ``TestPyReplAutoindent.test_auto_indent_default()`` doesn't - run ``input_code``. - -- gh-133131: The iOS testbed will now select the most recently released - "SE-class" device for testing if a device isn't explicitly specified. - -- gh-109981: The test helper that counts the list of open file descriptors - now uses the optimised ``/dev/fd`` approach on all Apple platforms, not - just macOS. This avoids crashes caused by guarded file descriptors. - -Security --------- - -- gh-135034: Fixes multiple issues that allowed ``tarfile`` extraction - filters (``filter="data"`` and ``filter="tar"``) to be bypassed using - crafted symlinks and hard links. - - Addresses :cve:`2024-12718`, :cve:`2025-4138`, :cve:`2025-4330`, and - :cve:`2025-4517`. - -- gh-133767: Fix use-after-free in the "unicode-escape" decoder with a - non-"strict" error handler. - -- gh-128840: Short-circuit the processing of long IPv6 addresses early in - :mod:`ipaddress` to prevent excessive memory consumption and a minor - denial-of-service. - -Library -------- - -- gh-134718: :func:`ast.dump` now only omits ``None`` and ``[]`` values if - they are default values. - -- gh-128840: Fix parsing long IPv6 addresses with embedded IPv4 address. - -- gh-134696: Built-in HACL* and OpenSSL implementations of hash function - constructors now correctly accept the same *documented* named arguments. - For instance, :func:`~hashlib.md5` could be previously invoked as - ``md5(data=data)`` or ``md5(string=string)`` depending on the underlying - implementation but these calls were not compatible. Patch by Bénédikt - Tran. - -- gh-134210: :func:`curses.window.getch` now correctly handles signals. - Patch by Bénédikt Tran. - -- gh-80334: :func:`multiprocessing.freeze_support` now checks for work on - any "spawn" start method platform rather than only on Windows. - -- gh-114177: Fix :mod:`asyncio` to not close subprocess pipes which would - otherwise error out when the event loop is already closed. - -- gh-134152: Fixed :exc:`UnboundLocalError` that could occur during - :mod:`email` header parsing if an expected trailing delimiter is missing - in some contexts. - -- gh-62184: Remove import of C implementation of :class:`io.FileIO` from - Python implementation which has its own implementation - -- gh-133982: Emit :exc:`RuntimeWarning` in the Python implementation of - :mod:`io` when the :term:`file-like object ` is not closed - explicitly in the presence of multiple I/O layers. - -- gh-133890: The :mod:`tarfile` module now handles :exc:`UnicodeEncodeError` - in the same way as :exc:`OSError` when cannot extract a member. - -- gh-134097: Fix interaction of the new :term:`REPL` and :option:`-X - showrefcount <-X>` command line option. - -- gh-133889: The generated directory listing page in - :class:`http.server.SimpleHTTPRequestHandler` now only shows the decoded - path component of the requested URL, and not the query and fragment. - -- gh-134098: Fix handling paths that end with a percent-encoded slash - (``%2f`` or ``%2F``) in :class:`http.server.SimpleHTTPRequestHandler`. - -- gh-134062: :mod:`ipaddress`: fix collisions in :meth:`~object.__hash__` - for :class:`~ipaddress.IPv4Network` and :class:`~ipaddress.IPv6Network` - objects. - -- gh-133745: In 3.13.3 we accidentally changed the signature of the asyncio - ``create_task()`` family of methods and how it calls a custom task factory - in a backwards incompatible way. Since some 3rd party libraries have - already made changes to work around the issue that might break if we - simply reverted the changes, we're instead changing things to be backwards - compatible with 3.13.2 while still supporting those workarounds for - 3.13.3. In particular, the special-casing of ``name`` and ``context`` is - back (until 3.14) and consequently eager tasks may still find that their - name hasn't been set before they execute their first yielding await. - -- gh-71253: Raise :exc:`ValueError` in :func:`open` if *opener* returns a - negative file-descriptor in the Python implementation of :mod:`io` to - match the C implementation. - -- gh-77057: Fix handling of invalid markup declarations in - :class:`html.parser.HTMLParser`. - -- gh-133489: :func:`random.getrandbits` can now generate more that 2\ - :sup:`31` bits. :func:`random.randbytes` can now generate more that 256 - MiB. - -- gh-133290: Fix attribute caching issue when setting - :attr:`ctypes._Pointer._type_` in the undocumented and deprecated - :func:`!ctypes.SetPointerType` function and the undocumented - :meth:`!set_type` method. - -- gh-132876: ``ldexp()`` on Windows doesn't round subnormal results before - Windows 11, but should. Python's :func:`math.ldexp` wrapper now does - round them, so results may change slightly, in rare cases of very small - results, on Windows versions before 11. - -- gh-133089: Use original timeout value for :exc:`subprocess.TimeoutExpired` - when the func :meth:`subprocess.run` is called with a timeout instead of - sometimes a confusing partial remaining time out value used internally on - the final ``wait()``. - -- gh-133009: :mod:`xml.etree.ElementTree`: Fix a crash in - :meth:`Element.__deepcopy__ ` when the element is - concurrently mutated. Patch by Bénédikt Tran. - -- gh-132995: Bump the version of pip bundled in ensurepip to version 25.1.1 - -- gh-132017: Fix error when ``pyrepl`` is suspended, then resumed and - terminated. - -- gh-132673: Fix a crash when using ``_align_ = 0`` and ``_fields_ = []`` in - a :class:`ctypes.Structure`. - -- gh-132527: Include the valid typecode 'w' in the error message when an - invalid typecode is passed to :class:`array.array`. - -- gh-132439: Fix ``PyREPL`` on Windows: characters entered via AltGr are - swallowed. Patch by Chris Eibl. - -- gh-132429: Fix support of Bluetooth sockets on NetBSD and DragonFly BSD. - -- gh-132106: :meth:`QueueListener.start - ` now raises a :exc:`RuntimeError` - if the listener is already started. - -- gh-132417: Fix a ``NULL`` pointer dereference when a C function called - using :mod:`ctypes` with ``restype`` :class:`~ctypes.py_object` returns - ``NULL``. - -- gh-132385: Fix instance error suggestions trigger potential exceptions in - :meth:`object.__getattr__` in :mod:`traceback`. - -- gh-132308: A :class:`traceback.TracebackException` now correctly renders - the ``__context__`` and ``__cause__`` attributes from :ref:`falsey - ` :class:`Exception`, and the ``exceptions`` attribute from falsey - :class:`ExceptionGroup`. - -- gh-132250: Fixed the :exc:`SystemError` in :mod:`cProfile` when locating - the actual C function of a method raises an exception. - -- gh-132063: Prevent exceptions that evaluate as falsey (namely, when their - ``__bool__`` method returns ``False`` or their ``__len__`` method returns - 0) from being ignored by :class:`concurrent.futures.ProcessPoolExecutor` - and :class:`concurrent.futures.ThreadPoolExecutor`. - -- gh-119605: Respect ``follow_wrapped`` for :meth:`!__init__` and - :meth:`!__new__` methods when getting the class signature for a class with - :func:`inspect.signature`. Preserve class signature after wrapping with - :func:`warnings.deprecated`. Patch by Xuehai Pan. - -- gh-91555: Ignore log messages generated during handling of log messages, - to avoid deadlock or infinite recursion. [NOTE: This change has since been - reverted.] - -- gh-131434: Improve error reporting for incorrect format in - :func:`time.strptime`. - -- gh-131127: Systems using LibreSSL now successfully build. - -- gh-130999: Avoid exiting the new REPL and offer suggestions even if there - are non-string candidates when errors occur. - -- gh-130941: Fix :class:`configparser.ConfigParser` parsing empty - interpolation with ``allow_no_value`` set to ``True``. - -- gh-129098: Fix REPL traceback reporting when using :func:`compile` with an - inexisting file. Patch by Bénédikt Tran. - -- gh-130631: :func:`!http.cookiejar.join_header_words` is now more similar - to the original Perl version. It now quotes the same set of characters and - always quote values that end with ``"\n"``. - -- gh-129719: Fix missing :data:`!socket.CAN_RAW_ERR_FILTER` constant in the - socket module on Linux systems. It was missing since Python 3.11. - -- gh-124096: Turn on virtual terminal mode and enable bracketed paste in - REPL on Windows console. (If the terminal does not support bracketed - paste, enabling it does nothing.) - -- gh-122559: Remove :meth:`!__reduce__` and :meth:`!__reduce_ex__` methods - that always raise :exc:`TypeError` in the C implementation of - :class:`io.FileIO`, :class:`io.BufferedReader`, :class:`io.BufferedWriter` - and :class:`io.BufferedRandom` and replace them with default - :meth:`!__getstate__` methods that raise :exc:`!TypeError`. This restores - fine details of behavior of Python 3.11 and older versions. - -- gh-122179: :func:`hashlib.file_digest` now raises :exc:`BlockingIOError` - when no data is available during non-blocking I/O. Before, it added - spurious null bytes to the digest. - -- gh-86155: :meth:`html.parser.HTMLParser.close` no longer loses data when - the `` - """ % (output_string.replace('"', r'\"')) - - def OutputString(self, attrs=None): - # Build up our result - # - result = [] - append = result.append - - # First, the key=value pair - append("%s=%s" % (self.key, self.coded_value)) - - # Now add any defined attributes - if attrs is None: - attrs = self._reserved - items = sorted(self.items()) - for key, value in items: - if value == "": - continue - if key not in attrs: - continue - if key == "expires" and isinstance(value, int): - append("%s=%s" % (self._reserved[key], _getdate(value))) - elif key == "max-age" and isinstance(value, int): - append("%s=%d" % (self._reserved[key], value)) - elif key == "comment" and isinstance(value, str): - append("%s=%s" % (self._reserved[key], _quote(value))) - elif key in self._flags: - if value: - append(str(self._reserved[key])) - else: - append("%s=%s" % (self._reserved[key], value)) - - # Return the result - return _semispacejoin(result) - - __class_getitem__ = classmethod(types.GenericAlias) - - -# -# Pattern for finding cookie -# -# This used to be strict parsing based on the RFC2109 and RFC2068 -# specifications. I have since discovered that MSIE 3.0x doesn't -# follow the character rules outlined in those specs. As a -# result, the parsing rules here are less strict. -# - -_LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=" -_LegalValueChars = _LegalKeyChars + r'\[\]' -_CookiePattern = re.compile(r""" - \s* # Optional whitespace at start of cookie - (?P # Start of group 'key' - [""" + _LegalKeyChars + r"""]+? # Any word of at least one letter - ) # End of group 'key' - ( # Optional group: there may not be a value. - \s*=\s* # Equal Sign - (?P # Start of group 'val' - "(?:[^\\"]|\\.)*" # Any double-quoted string - | # or - # Special case for "expires" attr - (\w{3,6}day|\w{3}),\s # Day of the week or abbreviated day - [\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Date and time in specific format - | # or - [""" + _LegalValueChars + r"""]* # Any word or empty string - ) # End of group 'val' - )? # End of optional value group - \s* # Any number of spaces. - (\s+|;|$) # Ending either at space, semicolon, or EOS. - """, re.ASCII | re.VERBOSE) # re.ASCII may be removed if safe. - - -# At long last, here is the cookie class. Using this class is almost just like -# using a dictionary. See this module's docstring for example usage. -# -class BaseCookie(dict): - """A container class for a set of Morsels.""" - - def value_decode(self, val): - """real_value, coded_value = value_decode(STRING) - Called prior to setting a cookie's value from the network - representation. The VALUE is the value read from HTTP - header. - Override this function to modify the behavior of cookies. - """ - return val, val - - def value_encode(self, val): - """real_value, coded_value = value_encode(VALUE) - Called prior to setting a cookie's value from the dictionary - representation. The VALUE is the value being assigned. - Override this function to modify the behavior of cookies. - """ - strval = str(val) - return strval, strval - - def __init__(self, input=None): - if input: - self.load(input) - - def __set(self, key, real_value, coded_value): - """Private method for setting a cookie's value""" - M = self.get(key, Morsel()) - M.set(key, real_value, coded_value) - dict.__setitem__(self, key, M) - - def __setitem__(self, key, value): - """Dictionary style assignment.""" - if isinstance(value, Morsel): - # allow assignment of constructed Morsels (e.g. for pickling) - dict.__setitem__(self, key, value) - else: - rval, cval = self.value_encode(value) - self.__set(key, rval, cval) - - def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"): - """Return a string suitable for HTTP.""" - result = [] - items = sorted(self.items()) - for key, value in items: - value_output = value.output(attrs, header) - if _has_control_character(value_output): - raise CookieError("Control characters are not allowed in cookies") - result.append(value_output) - return sep.join(result) - - __str__ = output - - def __repr__(self): - l = [] - items = sorted(self.items()) - for key, value in items: - l.append('%s=%s' % (key, repr(value.value))) - return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l)) - - def js_output(self, attrs=None): - """Return a string suitable for JavaScript.""" - result = [] - items = sorted(self.items()) - for key, value in items: - result.append(value.js_output(attrs)) - return _nulljoin(result) - - def load(self, rawdata): - """Load cookies from a string (presumably HTTP_COOKIE) or - from a dictionary. Loading cookies from a dictionary 'd' - is equivalent to calling: - map(Cookie.__setitem__, d.keys(), d.values()) - """ - if isinstance(rawdata, str): - self.__parse_string(rawdata) - else: - # self.update() wouldn't call our custom __setitem__ - for key, value in rawdata.items(): - self[key] = value - return - - def __parse_string(self, str, patt=_CookiePattern): - i = 0 # Our starting point - n = len(str) # Length of string - parsed_items = [] # Parsed (type, key, value) triples - morsel_seen = False # A key=value pair was previously encountered - - TYPE_ATTRIBUTE = 1 - TYPE_KEYVALUE = 2 - - # We first parse the whole cookie string and reject it if it's - # syntactically invalid (this helps avoid some classes of injection - # attacks). - while 0 <= i < n: - # Start looking for a cookie - match = patt.match(str, i) - if not match: - # No more cookies - break - - key, value = match.group("key"), match.group("val") - i = match.end(0) - - if key[0] == "$": - if not morsel_seen: - # We ignore attributes which pertain to the cookie - # mechanism as a whole, such as "$Version". - # See RFC 2965. (Does anyone care?) - continue - parsed_items.append((TYPE_ATTRIBUTE, key[1:], value)) - elif key.lower() in Morsel._reserved: - if not morsel_seen: - # Invalid cookie string - return - if value is None: - if key.lower() in Morsel._flags: - parsed_items.append((TYPE_ATTRIBUTE, key, True)) - else: - # Invalid cookie string - return - else: - parsed_items.append((TYPE_ATTRIBUTE, key, _unquote(value))) - elif value is not None: - parsed_items.append((TYPE_KEYVALUE, key, self.value_decode(value))) - morsel_seen = True - else: - # Invalid cookie string - return - - # The cookie string is valid, apply it. - M = None # current morsel - for tp, key, value in parsed_items: - if tp == TYPE_ATTRIBUTE: - assert M is not None - M[key] = value - else: - assert tp == TYPE_KEYVALUE - rval, cval = value - self.__set(key, rval, cval) - M = self[key] - - -class SimpleCookie(BaseCookie): - """ - SimpleCookie supports strings as cookie values. When setting - the value using the dictionary assignment notation, SimpleCookie - calls the builtin str() to convert the value to a string. Values - received from HTTP are kept as strings. - """ - def value_decode(self, val): - return _unquote(val), val - - def value_encode(self, val): - strval = str(val) - return strval, _quote(strval) diff --git a/Python313_13_x86_Template/Lib/http/server.py b/Python313_13_x86_Template/Lib/http/server.py deleted file mode 100644 index 0ec47900..00000000 --- a/Python313_13_x86_Template/Lib/http/server.py +++ /dev/null @@ -1,1351 +0,0 @@ -"""HTTP server classes. - -Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see -SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST, -and (deprecated) CGIHTTPRequestHandler for CGI scripts. - -It does, however, optionally implement HTTP/1.1 persistent connections. - -Notes on CGIHTTPRequestHandler ------------------------------- - -This class is deprecated. It implements GET and POST requests to cgi-bin scripts. - -If the os.fork() function is not present (Windows), subprocess.Popen() is used, -with slightly altered but never documented semantics. Use from a threaded -process is likely to trigger a warning at os.fork() time. - -In all cases, the implementation is intentionally naive -- all -requests are executed synchronously. - -SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL --- it may execute arbitrary Python code or external programs. - -Note that status code 200 is sent prior to execution of a CGI script, so -scripts cannot send other status codes such as 302 (redirect). - -XXX To do: - -- log requests even later (to capture byte count) -- log user-agent header and other interesting goodies -- send error log to separate file -""" - - -# See also: -# -# HTTP Working Group T. Berners-Lee -# INTERNET-DRAFT R. T. Fielding -# H. Frystyk Nielsen -# Expires September 8, 1995 March 8, 1995 -# -# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt -# -# and -# -# Network Working Group R. Fielding -# Request for Comments: 2616 et al -# Obsoletes: 2068 June 1999 -# Category: Standards Track -# -# URL: http://www.faqs.org/rfcs/rfc2616.html - -# Log files -# --------- -# -# Here's a quote from the NCSA httpd docs about log file format. -# -# | The logfile format is as follows. Each line consists of: -# | -# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb -# | -# | host: Either the DNS name or the IP number of the remote client -# | rfc931: Any information returned by identd for this person, -# | - otherwise. -# | authuser: If user sent a userid for authentication, the user name, -# | - otherwise. -# | DD: Day -# | Mon: Month (calendar name) -# | YYYY: Year -# | hh: hour (24-hour format, the machine's timezone) -# | mm: minutes -# | ss: seconds -# | request: The first line of the HTTP request as sent by the client. -# | ddd: the status code returned by the server, - if not available. -# | bbbb: the total number of bytes sent, -# | *not including the HTTP/1.0 header*, - if not available -# | -# | You can determine the name of the file accessed through request. -# -# (Actually, the latter is only true if you know the server configuration -# at the time the request was made!) - -__version__ = "0.6" - -__all__ = [ - "HTTPServer", "ThreadingHTTPServer", "BaseHTTPRequestHandler", - "SimpleHTTPRequestHandler", "CGIHTTPRequestHandler", -] - -import copy -import datetime -import email.utils -import html -import http.client -import io -import itertools -import mimetypes -import os -import posixpath -import select -import shutil -import socket # For gethostbyaddr() -import socketserver -import sys -import time -import urllib.parse - -from http import HTTPStatus - - -# Default error message template -DEFAULT_ERROR_MESSAGE = """\ - - - - - Error response - - -

Error response

-

Error code: %(code)d

-

Message: %(message)s.

-

Error code explanation: %(code)s - %(explain)s.

- - -""" - -DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8" - -# Data larger than this will be read in chunks, to prevent extreme -# overallocation. -_MIN_READ_BUF_SIZE = 1 << 20 - -class HTTPServer(socketserver.TCPServer): - - allow_reuse_address = 1 # Seems to make sense in testing environment - - def server_bind(self): - """Override server_bind to store the server name.""" - socketserver.TCPServer.server_bind(self) - host, port = self.server_address[:2] - self.server_name = socket.getfqdn(host) - self.server_port = port - - -class ThreadingHTTPServer(socketserver.ThreadingMixIn, HTTPServer): - daemon_threads = True - - -class BaseHTTPRequestHandler(socketserver.StreamRequestHandler): - - """HTTP request handler base class. - - The following explanation of HTTP serves to guide you through the - code as well as to expose any misunderstandings I may have about - HTTP (so you don't need to read the code to figure out I'm wrong - :-). - - HTTP (HyperText Transfer Protocol) is an extensible protocol on - top of a reliable stream transport (e.g. TCP/IP). The protocol - recognizes three parts to a request: - - 1. One line identifying the request type and path - 2. An optional set of RFC-822-style headers - 3. An optional data part - - The headers and data are separated by a blank line. - - The first line of the request has the form - - - - where is a (case-sensitive) keyword such as GET or POST, - is a string containing path information for the request, - and should be the string "HTTP/1.0" or "HTTP/1.1". - is encoded using the URL encoding scheme (using %xx to signify - the ASCII character with hex code xx). - - The specification specifies that lines are separated by CRLF but - for compatibility with the widest range of clients recommends - servers also handle LF. Similarly, whitespace in the request line - is treated sensibly (allowing multiple spaces between components - and allowing trailing whitespace). - - Similarly, for output, lines ought to be separated by CRLF pairs - but most clients grok LF characters just fine. - - If the first line of the request has the form - - - - (i.e. is left out) then this is assumed to be an HTTP - 0.9 request; this form has no optional headers and data part and - the reply consists of just the data. - - The reply form of the HTTP 1.x protocol again has three parts: - - 1. One line giving the response code - 2. An optional set of RFC-822-style headers - 3. The data - - Again, the headers and data are separated by a blank line. - - The response code line has the form - - - - where is the protocol version ("HTTP/1.0" or "HTTP/1.1"), - is a 3-digit response code indicating success or - failure of the request, and is an optional - human-readable string explaining what the response code means. - - This server parses the request and the headers, and then calls a - function specific to the request type (). Specifically, - a request SPAM will be handled by a method do_SPAM(). If no - such method exists the server sends an error response to the - client. If it exists, it is called with no arguments: - - do_SPAM() - - Note that the request name is case sensitive (i.e. SPAM and spam - are different requests). - - The various request details are stored in instance variables: - - - client_address is the client IP address in the form (host, - port); - - - command, path and version are the broken-down request line; - - - headers is an instance of email.message.Message (or a derived - class) containing the header information; - - - rfile is a file object open for reading positioned at the - start of the optional input data part; - - - wfile is a file object open for writing. - - IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING! - - The first thing to be written must be the response line. Then - follow 0 or more header lines, then a blank line, and then the - actual data (if any). The meaning of the header lines depends on - the command executed by the server; in most cases, when data is - returned, there should be at least one header line of the form - - Content-type: / - - where and should be registered MIME types, - e.g. "text/html" or "text/plain". - - """ - - # The Python system version, truncated to its first component. - sys_version = "Python/" + sys.version.split()[0] - - # The server software version. You may want to override this. - # The format is multiple whitespace-separated strings, - # where each string is of the form name[/version]. - server_version = "BaseHTTP/" + __version__ - - error_message_format = DEFAULT_ERROR_MESSAGE - error_content_type = DEFAULT_ERROR_CONTENT_TYPE - - # The default request version. This only affects responses up until - # the point where the request line is parsed, so it mainly decides what - # the client gets back when sending a malformed request line. - # Most web servers default to HTTP 0.9, i.e. don't send a status line. - default_request_version = "HTTP/0.9" - - def parse_request(self): - """Parse a request (internal). - - The request should be stored in self.raw_requestline; the results - are in self.command, self.path, self.request_version and - self.headers. - - Return True for success, False for failure; on failure, any relevant - error response has already been sent back. - - """ - is_http_0_9 = False - self.command = None # set in case of error on the first line - self.request_version = version = self.default_request_version - self.close_connection = True - requestline = str(self.raw_requestline, 'iso-8859-1') - requestline = requestline.rstrip('\r\n') - self.requestline = requestline - words = requestline.split() - if len(words) == 0: - return False - - if len(words) >= 3: # Enough to determine protocol version - version = words[-1] - try: - if not version.startswith('HTTP/'): - raise ValueError - base_version_number = version.split('/', 1)[1] - version_number = base_version_number.split(".") - # RFC 2145 section 3.1 says there can be only one "." and - # - major and minor numbers MUST be treated as - # separate integers; - # - HTTP/2.4 is a lower version than HTTP/2.13, which in - # turn is lower than HTTP/12.3; - # - Leading zeros MUST be ignored by recipients. - if len(version_number) != 2: - raise ValueError - if any(not component.isdigit() for component in version_number): - raise ValueError("non digit in http version") - if any(len(component) > 10 for component in version_number): - raise ValueError("unreasonable length http version") - version_number = int(version_number[0]), int(version_number[1]) - except (ValueError, IndexError): - self.send_error( - HTTPStatus.BAD_REQUEST, - "Bad request version (%r)" % version) - return False - if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": - self.close_connection = False - if version_number >= (2, 0): - self.send_error( - HTTPStatus.HTTP_VERSION_NOT_SUPPORTED, - "Invalid HTTP version (%s)" % base_version_number) - return False - self.request_version = version - - if not 2 <= len(words) <= 3: - self.send_error( - HTTPStatus.BAD_REQUEST, - "Bad request syntax (%r)" % requestline) - return False - command, path = words[:2] - if len(words) == 2: - self.close_connection = True - if command != 'GET': - self.send_error( - HTTPStatus.BAD_REQUEST, - "Bad HTTP/0.9 request type (%r)" % command) - return False - is_http_0_9 = True - self.command, self.path = command, path - - # gh-87389: The purpose of replacing '//' with '/' is to protect - # against open redirect attacks possibly triggered if the path starts - # with '//' because http clients treat //path as an absolute URI - # without scheme (similar to http://path) rather than a path. - if self.path.startswith('//'): - self.path = '/' + self.path.lstrip('/') # Reduce to a single / - - # For HTTP/0.9, headers are not expected at all. - if is_http_0_9: - self.headers = {} - return True - - # Examine the headers and look for a Connection directive. - try: - self.headers = http.client.parse_headers(self.rfile, - _class=self.MessageClass) - except http.client.LineTooLong as err: - self.send_error( - HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE, - "Line too long", - str(err)) - return False - except http.client.HTTPException as err: - self.send_error( - HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE, - "Too many headers", - str(err) - ) - return False - - conntype = self.headers.get('Connection', "") - if conntype.lower() == 'close': - self.close_connection = True - elif (conntype.lower() == 'keep-alive' and - self.protocol_version >= "HTTP/1.1"): - self.close_connection = False - # Examine the headers and look for an Expect directive - expect = self.headers.get('Expect', "") - if (expect.lower() == "100-continue" and - self.protocol_version >= "HTTP/1.1" and - self.request_version >= "HTTP/1.1"): - if not self.handle_expect_100(): - return False - return True - - def handle_expect_100(self): - """Decide what to do with an "Expect: 100-continue" header. - - If the client is expecting a 100 Continue response, we must - respond with either a 100 Continue or a final response before - waiting for the request body. The default is to always respond - with a 100 Continue. You can behave differently (for example, - reject unauthorized requests) by overriding this method. - - This method should either return True (possibly after sending - a 100 Continue response) or send an error response and return - False. - - """ - self.send_response_only(HTTPStatus.CONTINUE) - self.end_headers() - return True - - def handle_one_request(self): - """Handle a single HTTP request. - - You normally don't need to override this method; see the class - __doc__ string for information on how to handle specific HTTP - commands such as GET and POST. - - """ - try: - self.raw_requestline = self.rfile.readline(65537) - if len(self.raw_requestline) > 65536: - self.requestline = '' - self.request_version = '' - self.command = '' - self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG) - return - if not self.raw_requestline: - self.close_connection = True - return - if not self.parse_request(): - # An error code has been sent, just exit - return - mname = 'do_' + self.command - if not hasattr(self, mname): - self.send_error( - HTTPStatus.NOT_IMPLEMENTED, - "Unsupported method (%r)" % self.command) - return - method = getattr(self, mname) - method() - self.wfile.flush() #actually send the response if not already done. - except TimeoutError as e: - #a read or a write timed out. Discard this connection - self.log_error("Request timed out: %r", e) - self.close_connection = True - return - - def handle(self): - """Handle multiple requests if necessary.""" - self.close_connection = True - - self.handle_one_request() - while not self.close_connection: - self.handle_one_request() - - def send_error(self, code, message=None, explain=None): - """Send and log an error reply. - - Arguments are - * code: an HTTP error code - 3 digits - * message: a simple optional 1 line reason phrase. - *( HTAB / SP / VCHAR / %x80-FF ) - defaults to short entry matching the response code - * explain: a detailed message defaults to the long entry - matching the response code. - - This sends an error response (so it must be called before any - output has been generated), logs the error, and finally sends - a piece of HTML explaining the error to the user. - - """ - - try: - shortmsg, longmsg = self.responses[code] - except KeyError: - shortmsg, longmsg = '???', '???' - if message is None: - message = shortmsg - if explain is None: - explain = longmsg - self.log_error("code %d, message %s", code, message) - self.send_response(code, message) - self.send_header('Connection', 'close') - - # Message body is omitted for cases described in: - # - RFC7230: 3.3. 1xx, 204(No Content), 304(Not Modified) - # - RFC7231: 6.3.6. 205(Reset Content) - body = None - if (code >= 200 and - code not in (HTTPStatus.NO_CONTENT, - HTTPStatus.RESET_CONTENT, - HTTPStatus.NOT_MODIFIED)): - # HTML encode to prevent Cross Site Scripting attacks - # (see bug #1100201) - content = (self.error_message_format % { - 'code': code, - 'message': html.escape(message, quote=False), - 'explain': html.escape(explain, quote=False) - }) - body = content.encode('UTF-8', 'replace') - self.send_header("Content-Type", self.error_content_type) - self.send_header('Content-Length', str(len(body))) - self.end_headers() - - if self.command != 'HEAD' and body: - self.wfile.write(body) - - def send_response(self, code, message=None): - """Add the response header to the headers buffer and log the - response code. - - Also send two standard headers with the server software - version and the current date. - - """ - self.log_request(code) - self.send_response_only(code, message) - self.send_header('Server', self.version_string()) - self.send_header('Date', self.date_time_string()) - - def send_response_only(self, code, message=None): - """Send the response header only.""" - if self.request_version != 'HTTP/0.9': - if message is None: - if code in self.responses: - message = self.responses[code][0] - else: - message = '' - if not hasattr(self, '_headers_buffer'): - self._headers_buffer = [] - self._headers_buffer.append(("%s %d %s\r\n" % - (self.protocol_version, code, message)).encode( - 'latin-1', 'strict')) - - def send_header(self, keyword, value): - """Send a MIME header to the headers buffer.""" - if self.request_version != 'HTTP/0.9': - if not hasattr(self, '_headers_buffer'): - self._headers_buffer = [] - self._headers_buffer.append( - ("%s: %s\r\n" % (keyword, value)).encode('latin-1', 'strict')) - - if keyword.lower() == 'connection': - if value.lower() == 'close': - self.close_connection = True - elif value.lower() == 'keep-alive': - self.close_connection = False - - def end_headers(self): - """Send the blank line ending the MIME headers.""" - if self.request_version != 'HTTP/0.9': - self._headers_buffer.append(b"\r\n") - self.flush_headers() - - def flush_headers(self): - if hasattr(self, '_headers_buffer'): - self.wfile.write(b"".join(self._headers_buffer)) - self._headers_buffer = [] - - def log_request(self, code='-', size='-'): - """Log an accepted request. - - This is called by send_response(). - - """ - if isinstance(code, HTTPStatus): - code = code.value - self.log_message('"%s" %s %s', - self.requestline, str(code), str(size)) - - def log_error(self, format, *args): - """Log an error. - - This is called when a request cannot be fulfilled. By - default it passes the message on to log_message(). - - Arguments are the same as for log_message(). - - XXX This should go to the separate error log. - - """ - - self.log_message(format, *args) - - # https://en.wikipedia.org/wiki/List_of_Unicode_characters#Control_codes - _control_char_table = str.maketrans( - {c: fr'\x{c:02x}' for c in itertools.chain(range(0x20), range(0x7f,0xa0))}) - _control_char_table[ord('\\')] = r'\\' - - def log_message(self, format, *args): - """Log an arbitrary message. - - This is used by all other logging functions. Override - it if you have specific logging wishes. - - The first argument, FORMAT, is a format string for the - message to be logged. If the format string contains - any % escapes requiring parameters, they should be - specified as subsequent arguments (it's just like - printf!). - - The client ip and current date/time are prefixed to - every message. - - Unicode control characters are replaced with escaped hex - before writing the output to stderr. - - """ - - message = format % args - sys.stderr.write("%s - - [%s] %s\n" % - (self.address_string(), - self.log_date_time_string(), - message.translate(self._control_char_table))) - - def version_string(self): - """Return the server software version string.""" - return self.server_version + ' ' + self.sys_version - - def date_time_string(self, timestamp=None): - """Return the current date and time formatted for a message header.""" - if timestamp is None: - timestamp = time.time() - return email.utils.formatdate(timestamp, usegmt=True) - - def log_date_time_string(self): - """Return the current time formatted for logging.""" - now = time.time() - year, month, day, hh, mm, ss, x, y, z = time.localtime(now) - s = "%02d/%3s/%04d %02d:%02d:%02d" % ( - day, self.monthname[month], year, hh, mm, ss) - return s - - weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] - - monthname = [None, - 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', - 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] - - def address_string(self): - """Return the client address.""" - - return self.client_address[0] - - # Essentially static class variables - - # The version of the HTTP protocol we support. - # Set this to HTTP/1.1 to enable automatic keepalive - protocol_version = "HTTP/1.0" - - # MessageClass used to parse headers - MessageClass = http.client.HTTPMessage - - # hack to maintain backwards compatibility - responses = { - v: (v.phrase, v.description) - for v in HTTPStatus.__members__.values() - } - - -class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): - - """Simple HTTP request handler with GET and HEAD commands. - - This serves files from the current directory and any of its - subdirectories. The MIME type for files is determined by - calling the .guess_type() method. - - The GET and HEAD requests are identical except that the HEAD - request omits the actual contents of the file. - - """ - - server_version = "SimpleHTTP/" + __version__ - index_pages = ("index.html", "index.htm") - extensions_map = _encodings_map_default = { - '.gz': 'application/gzip', - '.Z': 'application/octet-stream', - '.bz2': 'application/x-bzip2', - '.xz': 'application/x-xz', - } - - def __init__(self, *args, directory=None, **kwargs): - if directory is None: - directory = os.getcwd() - self.directory = os.fspath(directory) - super().__init__(*args, **kwargs) - - def do_GET(self): - """Serve a GET request.""" - f = self.send_head() - if f: - try: - self.copyfile(f, self.wfile) - finally: - f.close() - - def do_HEAD(self): - """Serve a HEAD request.""" - f = self.send_head() - if f: - f.close() - - def send_head(self): - """Common code for GET and HEAD commands. - - This sends the response code and MIME headers. - - Return value is either a file object (which has to be copied - to the outputfile by the caller unless the command was HEAD, - and must be closed by the caller under all circumstances), or - None, in which case the caller has nothing further to do. - - """ - path = self.translate_path(self.path) - f = None - if os.path.isdir(path): - parts = urllib.parse.urlsplit(self.path) - if not parts.path.endswith(('/', '%2f', '%2F')): - # redirect browser - doing basically what apache does - self.send_response(HTTPStatus.MOVED_PERMANENTLY) - new_parts = (parts[0], parts[1], parts[2] + '/', - parts[3], parts[4]) - new_url = urllib.parse.urlunsplit(new_parts) - self.send_header("Location", new_url) - self.send_header("Content-Length", "0") - self.end_headers() - return None - for index in self.index_pages: - index = os.path.join(path, index) - if os.path.isfile(index): - path = index - break - else: - return self.list_directory(path) - ctype = self.guess_type(path) - # check for trailing "/" which should return 404. See Issue17324 - # The test for this was added in test_httpserver.py - # However, some OS platforms accept a trailingSlash as a filename - # See discussion on python-dev and Issue34711 regarding - # parsing and rejection of filenames with a trailing slash - if path.endswith("/"): - self.send_error(HTTPStatus.NOT_FOUND, "File not found") - return None - try: - f = open(path, 'rb') - except OSError: - self.send_error(HTTPStatus.NOT_FOUND, "File not found") - return None - - try: - fs = os.fstat(f.fileno()) - # Use browser cache if possible - if ("If-Modified-Since" in self.headers - and "If-None-Match" not in self.headers): - # compare If-Modified-Since and time of last file modification - try: - ims = email.utils.parsedate_to_datetime( - self.headers["If-Modified-Since"]) - except (TypeError, IndexError, OverflowError, ValueError): - # ignore ill-formed values - pass - else: - if ims.tzinfo is None: - # obsolete format with no timezone, cf. - # https://tools.ietf.org/html/rfc7231#section-7.1.1.1 - ims = ims.replace(tzinfo=datetime.timezone.utc) - if ims.tzinfo is datetime.timezone.utc: - # compare to UTC datetime of last modification - last_modif = datetime.datetime.fromtimestamp( - fs.st_mtime, datetime.timezone.utc) - # remove microseconds, like in If-Modified-Since - last_modif = last_modif.replace(microsecond=0) - - if last_modif <= ims: - self.send_response(HTTPStatus.NOT_MODIFIED) - self.end_headers() - f.close() - return None - - self.send_response(HTTPStatus.OK) - self.send_header("Content-type", ctype) - self.send_header("Content-Length", str(fs[6])) - self.send_header("Last-Modified", - self.date_time_string(fs.st_mtime)) - self.end_headers() - return f - except: - f.close() - raise - - def list_directory(self, path): - """Helper to produce a directory listing (absent index.html). - - Return value is either a file object, or None (indicating an - error). In either case, the headers are sent, making the - interface the same as for send_head(). - - """ - try: - list = os.listdir(path) - except OSError: - self.send_error( - HTTPStatus.NOT_FOUND, - "No permission to list directory") - return None - list.sort(key=lambda a: a.lower()) - r = [] - displaypath = self.path - displaypath = displaypath.split('#', 1)[0] - displaypath = displaypath.split('?', 1)[0] - try: - displaypath = urllib.parse.unquote(displaypath, - errors='surrogatepass') - except UnicodeDecodeError: - displaypath = urllib.parse.unquote(displaypath) - displaypath = html.escape(displaypath, quote=False) - enc = sys.getfilesystemencoding() - title = f'Directory listing for {displaypath}' - r.append('') - r.append('') - r.append('') - r.append(f'') - r.append(f'{title}\n') - r.append(f'\n

{title}

') - r.append('
\n
\n
\n\n\n') - encoded = '\n'.join(r).encode(enc, 'surrogateescape') - f = io.BytesIO() - f.write(encoded) - f.seek(0) - self.send_response(HTTPStatus.OK) - self.send_header("Content-type", "text/html; charset=%s" % enc) - self.send_header("Content-Length", str(len(encoded))) - self.end_headers() - return f - - def translate_path(self, path): - """Translate a /-separated PATH to the local filename syntax. - - Components that mean special things to the local file system - (e.g. drive or directory names) are ignored. (XXX They should - probably be diagnosed.) - - """ - # abandon query parameters - path = path.split('#', 1)[0] - path = path.split('?', 1)[0] - # Don't forget explicit trailing slash when normalizing. Issue17324 - try: - path = urllib.parse.unquote(path, errors='surrogatepass') - except UnicodeDecodeError: - path = urllib.parse.unquote(path) - trailing_slash = path.endswith('/') - path = posixpath.normpath(path) - words = path.split('/') - words = filter(None, words) - path = self.directory - for word in words: - if os.path.dirname(word) or word in (os.curdir, os.pardir): - # Ignore components that are not a simple file/directory name - continue - path = os.path.join(path, word) - if trailing_slash: - path += '/' - return path - - def copyfile(self, source, outputfile): - """Copy all data between two file objects. - - The SOURCE argument is a file object open for reading - (or anything with a read() method) and the DESTINATION - argument is a file object open for writing (or - anything with a write() method). - - The only reason for overriding this would be to change - the block size or perhaps to replace newlines by CRLF - -- note however that this the default server uses this - to copy binary data as well. - - """ - shutil.copyfileobj(source, outputfile) - - def guess_type(self, path): - """Guess the type of a file. - - Argument is a PATH (a filename). - - Return value is a string of the form type/subtype, - usable for a MIME Content-type header. - - The default implementation looks the file's extension - up in the table self.extensions_map, using application/octet-stream - as a default; however it would be permissible (if - slow) to look inside the data to make a better guess. - - """ - base, ext = posixpath.splitext(path) - if ext in self.extensions_map: - return self.extensions_map[ext] - ext = ext.lower() - if ext in self.extensions_map: - return self.extensions_map[ext] - guess, _ = mimetypes.guess_file_type(path) - if guess: - return guess - return 'application/octet-stream' - - -# Utilities for CGIHTTPRequestHandler - -def _url_collapse_path(path): - """ - Given a URL path, remove extra '/'s and '.' path elements and collapse - any '..' references and returns a collapsed path. - - Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. - The utility of this function is limited to is_cgi method and helps - preventing some security attacks. - - Returns: The reconstituted URL, which will always start with a '/'. - - Raises: IndexError if too many '..' occur within the path. - - """ - # Query component should not be involved. - path, _, query = path.partition('?') - path = urllib.parse.unquote(path) - - # Similar to os.path.split(os.path.normpath(path)) but specific to URL - # path semantics rather than local operating system semantics. - path_parts = path.split('/') - head_parts = [] - for part in path_parts[:-1]: - if part == '..': - head_parts.pop() # IndexError if more '..' than prior parts - elif part and part != '.': - head_parts.append( part ) - if path_parts: - tail_part = path_parts.pop() - if tail_part: - if tail_part == '..': - head_parts.pop() - tail_part = '' - elif tail_part == '.': - tail_part = '' - else: - tail_part = '' - - if query: - tail_part = '?'.join((tail_part, query)) - - splitpath = ('/' + '/'.join(head_parts), tail_part) - collapsed_path = "/".join(splitpath) - - return collapsed_path - - - -nobody = None - -def nobody_uid(): - """Internal routine to get nobody's uid""" - global nobody - if nobody: - return nobody - try: - import pwd - except ImportError: - return -1 - try: - nobody = pwd.getpwnam('nobody')[2] - except KeyError: - nobody = 1 + max(x[2] for x in pwd.getpwall()) - return nobody - - -def executable(path): - """Test for executable file.""" - return os.access(path, os.X_OK) - - -class CGIHTTPRequestHandler(SimpleHTTPRequestHandler): - - """Complete HTTP server with GET, HEAD and POST commands. - - GET and HEAD also support running CGI scripts. - - The POST command is *only* implemented for CGI scripts. - - """ - - def __init__(self, *args, **kwargs): - import warnings - warnings._deprecated("http.server.CGIHTTPRequestHandler", - remove=(3, 15)) - super().__init__(*args, **kwargs) - - # Determine platform specifics - have_fork = hasattr(os, 'fork') - - # Make rfile unbuffered -- we need to read one line and then pass - # the rest to a subprocess, so we can't use buffered input. - rbufsize = 0 - - def do_POST(self): - """Serve a POST request. - - This is only implemented for CGI scripts. - - """ - - if self.is_cgi(): - self.run_cgi() - else: - self.send_error( - HTTPStatus.NOT_IMPLEMENTED, - "Can only POST to CGI scripts") - - def send_head(self): - """Version of send_head that support CGI scripts""" - if self.is_cgi(): - return self.run_cgi() - else: - return SimpleHTTPRequestHandler.send_head(self) - - def is_cgi(self): - """Test whether self.path corresponds to a CGI script. - - Returns True and updates the cgi_info attribute to the tuple - (dir, rest) if self.path requires running a CGI script. - Returns False otherwise. - - If any exception is raised, the caller should assume that - self.path was rejected as invalid and act accordingly. - - The default implementation tests whether the normalized url - path begins with one of the strings in self.cgi_directories - (and the next character is a '/' or the end of the string). - - """ - collapsed_path = _url_collapse_path(self.path) - dir_sep = collapsed_path.find('/', 1) - while dir_sep > 0 and not collapsed_path[:dir_sep] in self.cgi_directories: - dir_sep = collapsed_path.find('/', dir_sep+1) - if dir_sep > 0: - head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] - self.cgi_info = head, tail - return True - return False - - - cgi_directories = ['/cgi-bin', '/htbin'] - - def is_executable(self, path): - """Test whether argument path is an executable file.""" - return executable(path) - - def is_python(self, path): - """Test whether argument path is a Python script.""" - head, tail = os.path.splitext(path) - return tail.lower() in (".py", ".pyw") - - def run_cgi(self): - """Execute a CGI script.""" - dir, rest = self.cgi_info - path = dir + '/' + rest - i = path.find('/', len(dir)+1) - while i >= 0: - nextdir = path[:i] - nextrest = path[i+1:] - - scriptdir = self.translate_path(nextdir) - if os.path.isdir(scriptdir): - dir, rest = nextdir, nextrest - i = path.find('/', len(dir)+1) - else: - break - - # find an explicit query string, if present. - rest, _, query = rest.partition('?') - - # dissect the part after the directory name into a script name & - # a possible additional path, to be stored in PATH_INFO. - i = rest.find('/') - if i >= 0: - script, rest = rest[:i], rest[i:] - else: - script, rest = rest, '' - - scriptname = dir + '/' + script - scriptfile = self.translate_path(scriptname) - if not os.path.exists(scriptfile): - self.send_error( - HTTPStatus.NOT_FOUND, - "No such CGI script (%r)" % scriptname) - return - if not os.path.isfile(scriptfile): - self.send_error( - HTTPStatus.FORBIDDEN, - "CGI script is not a plain file (%r)" % scriptname) - return - ispy = self.is_python(scriptname) - if self.have_fork or not ispy: - if not self.is_executable(scriptfile): - self.send_error( - HTTPStatus.FORBIDDEN, - "CGI script is not executable (%r)" % scriptname) - return - - # Reference: https://www6.uniovi.es/~antonio/ncsa_httpd/cgi/env.html - # XXX Much of the following could be prepared ahead of time! - env = copy.deepcopy(os.environ) - env['SERVER_SOFTWARE'] = self.version_string() - env['SERVER_NAME'] = self.server.server_name - env['GATEWAY_INTERFACE'] = 'CGI/1.1' - env['SERVER_PROTOCOL'] = self.protocol_version - env['SERVER_PORT'] = str(self.server.server_port) - env['REQUEST_METHOD'] = self.command - uqrest = urllib.parse.unquote(rest) - env['PATH_INFO'] = uqrest - env['PATH_TRANSLATED'] = self.translate_path(uqrest) - env['SCRIPT_NAME'] = scriptname - env['QUERY_STRING'] = query - env['REMOTE_ADDR'] = self.client_address[0] - authorization = self.headers.get("authorization") - if authorization: - authorization = authorization.split() - if len(authorization) == 2: - import base64, binascii - env['AUTH_TYPE'] = authorization[0] - if authorization[0].lower() == "basic": - try: - authorization = authorization[1].encode('ascii') - authorization = base64.decodebytes(authorization).\ - decode('ascii') - except (binascii.Error, UnicodeError): - pass - else: - authorization = authorization.split(':') - if len(authorization) == 2: - env['REMOTE_USER'] = authorization[0] - # XXX REMOTE_IDENT - if self.headers.get('content-type') is None: - env['CONTENT_TYPE'] = self.headers.get_content_type() - else: - env['CONTENT_TYPE'] = self.headers['content-type'] - length = self.headers.get('content-length') - if length: - env['CONTENT_LENGTH'] = length - referer = self.headers.get('referer') - if referer: - env['HTTP_REFERER'] = referer - accept = self.headers.get_all('accept', ()) - env['HTTP_ACCEPT'] = ','.join(accept) - ua = self.headers.get('user-agent') - if ua: - env['HTTP_USER_AGENT'] = ua - co = filter(None, self.headers.get_all('cookie', [])) - cookie_str = ', '.join(co) - if cookie_str: - env['HTTP_COOKIE'] = cookie_str - # XXX Other HTTP_* headers - # Since we're setting the env in the parent, provide empty - # values to override previously set values - for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH', - 'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'): - env.setdefault(k, "") - - self.send_response(HTTPStatus.OK, "Script output follows") - self.flush_headers() - - decoded_query = query.replace('+', ' ') - - if self.have_fork: - # Unix -- fork as we should - args = [script] - if '=' not in decoded_query: - args.append(decoded_query) - nobody = nobody_uid() - self.wfile.flush() # Always flush before forking - pid = os.fork() - if pid != 0: - # Parent - pid, sts = os.waitpid(pid, 0) - # throw away additional data [see bug #427345] - while select.select([self.rfile], [], [], 0)[0]: - if not self.rfile.read(1): - break - exitcode = os.waitstatus_to_exitcode(sts) - if exitcode: - self.log_error(f"CGI script exit code {exitcode}") - return - # Child - try: - try: - os.setuid(nobody) - except OSError: - pass - os.dup2(self.rfile.fileno(), 0) - os.dup2(self.wfile.fileno(), 1) - os.execve(scriptfile, args, env) - except: - self.server.handle_error(self.request, self.client_address) - os._exit(127) - - else: - # Non-Unix -- use subprocess - import subprocess - cmdline = [scriptfile] - if self.is_python(scriptfile): - interp = sys.executable - if interp.lower().endswith("w.exe"): - # On Windows, use python.exe, not pythonw.exe - interp = interp[:-5] + interp[-4:] - cmdline = [interp, '-u'] + cmdline - if '=' not in query: - cmdline.append(query) - self.log_message("command: %s", subprocess.list2cmdline(cmdline)) - try: - nbytes = int(length) - except (TypeError, ValueError): - nbytes = 0 - p = subprocess.Popen(cmdline, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env = env - ) - if self.command.lower() == "post" and nbytes > 0: - cursize = 0 - data = self.rfile.read(min(nbytes, _MIN_READ_BUF_SIZE)) - while len(data) < nbytes and len(data) != cursize: - cursize = len(data) - # This is a geometric increase in read size (never more - # than doubling out the current length of data per loop - # iteration). - delta = min(cursize, nbytes - cursize) - try: - data += self.rfile.read(delta) - except TimeoutError: - break - else: - data = None - # throw away additional data [see bug #427345] - while select.select([self.rfile._sock], [], [], 0)[0]: - if not self.rfile._sock.recv(1): - break - stdout, stderr = p.communicate(data) - self.wfile.write(stdout) - if stderr: - self.log_error('%s', stderr) - p.stderr.close() - p.stdout.close() - status = p.returncode - if status: - self.log_error("CGI script exit status %#x", status) - else: - self.log_message("CGI script exited OK") - - -def _get_best_family(*address): - infos = socket.getaddrinfo( - *address, - type=socket.SOCK_STREAM, - flags=socket.AI_PASSIVE, - ) - family, type, proto, canonname, sockaddr = next(iter(infos)) - return family, sockaddr - - -def test(HandlerClass=BaseHTTPRequestHandler, - ServerClass=ThreadingHTTPServer, - protocol="HTTP/1.0", port=8000, bind=None): - """Test the HTTP request handler class. - - This runs an HTTP server on port 8000 (or the port argument). - - """ - ServerClass.address_family, addr = _get_best_family(bind, port) - HandlerClass.protocol_version = protocol - with ServerClass(addr, HandlerClass) as httpd: - host, port = httpd.socket.getsockname()[:2] - url_host = f'[{host}]' if ':' in host else host - print( - f"Serving HTTP on {host} port {port} " - f"(http://{url_host}:{port}/) ..." - ) - try: - httpd.serve_forever() - except KeyboardInterrupt: - print("\nKeyboard interrupt received, exiting.") - sys.exit(0) - -if __name__ == '__main__': - import argparse - import contextlib - - parser = argparse.ArgumentParser() - parser.add_argument('--cgi', action='store_true', - help='run as CGI server') - parser.add_argument('-b', '--bind', metavar='ADDRESS', - help='bind to this address ' - '(default: all interfaces)') - parser.add_argument('-d', '--directory', default=os.getcwd(), - help='serve this directory ' - '(default: current directory)') - parser.add_argument('-p', '--protocol', metavar='VERSION', - default='HTTP/1.0', - help='conform to this HTTP version ' - '(default: %(default)s)') - parser.add_argument('port', default=8000, type=int, nargs='?', - help='bind to this port ' - '(default: %(default)s)') - args = parser.parse_args() - if args.cgi: - handler_class = CGIHTTPRequestHandler - else: - handler_class = SimpleHTTPRequestHandler - - # ensure dual-stack is not disabled; ref #38907 - class DualStackServer(ThreadingHTTPServer): - - def server_bind(self): - # suppress exception when protocol is IPv4 - with contextlib.suppress(Exception): - self.socket.setsockopt( - socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) - return super().server_bind() - - def finish_request(self, request, client_address): - self.RequestHandlerClass(request, client_address, self, - directory=args.directory) - - test( - HandlerClass=handler_class, - ServerClass=DualStackServer, - port=args.port, - bind=args.bind, - protocol=args.protocol, - ) diff --git a/Python313_13_x86_Template/Lib/imaplib.py b/Python313_13_x86_Template/Lib/imaplib.py deleted file mode 100644 index 141e6398..00000000 --- a/Python313_13_x86_Template/Lib/imaplib.py +++ /dev/null @@ -1,1651 +0,0 @@ -"""IMAP4 client. - -Based on RFC 2060. - -Public class: IMAP4 -Public variable: Debug -Public functions: Internaldate2tuple - Int2AP - ParseFlags - Time2Internaldate -""" - -# Author: Piers Lauder December 1997. -# -# Authentication code contributed by Donn Cave June 1998. -# String method conversion by ESR, February 2001. -# GET/SETACL contributed by Anthony Baxter April 2001. -# IMAP4_SSL contributed by Tino Lange March 2002. -# GET/SETQUOTA contributed by Andreas Zeidler June 2002. -# PROXYAUTH contributed by Rick Holbert November 2002. -# GET/SETANNOTATION contributed by Tomas Lindroos June 2005. - -__version__ = "2.58" - -import binascii, errno, random, re, socket, subprocess, sys, time, calendar -from datetime import datetime, timezone, timedelta -from io import DEFAULT_BUFFER_SIZE - -try: - import ssl - HAVE_SSL = True -except ImportError: - HAVE_SSL = False - -__all__ = ["IMAP4", "IMAP4_stream", "Internaldate2tuple", - "Int2AP", "ParseFlags", "Time2Internaldate"] - -# Globals - -CRLF = b'\r\n' -Debug = 0 -IMAP4_PORT = 143 -IMAP4_SSL_PORT = 993 -AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first - -# Maximal line length when calling readline(). This is to prevent -# reading arbitrary length lines. RFC 3501 and 2060 (IMAP 4rev1) -# don't specify a line length. RFC 2683 suggests limiting client -# command lines to 1000 octets and that servers should be prepared -# to accept command lines up to 8000 octets, so we used to use 10K here. -# In the modern world (eg: gmail) the response to, for example, a -# search command can be quite large, so we now use 1M. -_MAXLINE = 1000000 - -# Data larger than this will be read in chunks, to prevent extreme -# overallocation. -_SAFE_BUF_SIZE = 1 << 20 - -# Commands - -Commands = { - # name valid states - 'APPEND': ('AUTH', 'SELECTED'), - 'AUTHENTICATE': ('NONAUTH',), - 'CAPABILITY': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), - 'CHECK': ('SELECTED',), - 'CLOSE': ('SELECTED',), - 'COPY': ('SELECTED',), - 'CREATE': ('AUTH', 'SELECTED'), - 'DELETE': ('AUTH', 'SELECTED'), - 'DELETEACL': ('AUTH', 'SELECTED'), - 'ENABLE': ('AUTH', ), - 'EXAMINE': ('AUTH', 'SELECTED'), - 'EXPUNGE': ('SELECTED',), - 'FETCH': ('SELECTED',), - 'GETACL': ('AUTH', 'SELECTED'), - 'GETANNOTATION':('AUTH', 'SELECTED'), - 'GETQUOTA': ('AUTH', 'SELECTED'), - 'GETQUOTAROOT': ('AUTH', 'SELECTED'), - 'MYRIGHTS': ('AUTH', 'SELECTED'), - 'LIST': ('AUTH', 'SELECTED'), - 'LOGIN': ('NONAUTH',), - 'LOGOUT': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), - 'LSUB': ('AUTH', 'SELECTED'), - 'MOVE': ('SELECTED',), - 'NAMESPACE': ('AUTH', 'SELECTED'), - 'NOOP': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), - 'PARTIAL': ('SELECTED',), # NB: obsolete - 'PROXYAUTH': ('AUTH',), - 'RENAME': ('AUTH', 'SELECTED'), - 'SEARCH': ('SELECTED',), - 'SELECT': ('AUTH', 'SELECTED'), - 'SETACL': ('AUTH', 'SELECTED'), - 'SETANNOTATION':('AUTH', 'SELECTED'), - 'SETQUOTA': ('AUTH', 'SELECTED'), - 'SORT': ('SELECTED',), - 'STARTTLS': ('NONAUTH',), - 'STATUS': ('AUTH', 'SELECTED'), - 'STORE': ('SELECTED',), - 'SUBSCRIBE': ('AUTH', 'SELECTED'), - 'THREAD': ('SELECTED',), - 'UID': ('SELECTED',), - 'UNSUBSCRIBE': ('AUTH', 'SELECTED'), - 'UNSELECT': ('SELECTED',), - } - -# Patterns to match server responses - -Continuation = re.compile(br'\+( (?P.*))?') -Flags = re.compile(br'.*FLAGS \((?P[^\)]*)\)') -InternalDate = re.compile(br'.*INTERNALDATE "' - br'(?P[ 0123][0-9])-(?P[A-Z][a-z][a-z])-(?P[0-9][0-9][0-9][0-9])' - br' (?P[0-9][0-9]):(?P[0-9][0-9]):(?P[0-9][0-9])' - br' (?P[-+])(?P[0-9][0-9])(?P[0-9][0-9])' - br'"') -# Literal is no longer used; kept for backward compatibility. -Literal = re.compile(br'.*{(?P\d+)}$', re.ASCII) -MapCRLF = re.compile(br'\r\n|\r|\n') -# We no longer exclude the ']' character from the data portion of the response -# code, even though it violates the RFC. Popular IMAP servers such as Gmail -# allow flags with ']', and there are programs (including imaplib!) that can -# produce them. The problem with this is if the 'text' portion of the response -# includes a ']' we'll parse the response wrong (which is the point of the RFC -# restriction). However, that seems less likely to be a problem in practice -# than being unable to correctly parse flags that include ']' chars, which -# was reported as a real-world problem in issue #21815. -Response_code = re.compile(br'\[(?P[A-Z-]+)( (?P.*))?\]') -Untagged_response = re.compile(br'\* (?P[A-Z-]+)( (?P.*))?') -# Untagged_status is no longer used; kept for backward compatibility -Untagged_status = re.compile( - br'\* (?P\d+) (?P[A-Z-]+)( (?P.*))?', re.ASCII) -# We compile these in _mode_xxx. -_Literal = br'.*{(?P\d+)}$' -_Untagged_status = br'\* (?P\d+) (?P[A-Z-]+)( (?P.*))?' - - - -class IMAP4: - - r"""IMAP4 client class. - - Instantiate with: IMAP4([host[, port[, timeout=None]]]) - - host - host's name (default: localhost); - port - port number (default: standard IMAP4 port). - timeout - socket timeout (default: None) - If timeout is not given or is None, - the global default socket timeout is used - - All IMAP4rev1 commands are supported by methods of the same - name (in lowercase). - - All arguments to commands are converted to strings, except for - AUTHENTICATE, and the last argument to APPEND which is passed as - an IMAP4 literal. If necessary (the string contains any - non-printing characters or white-space and isn't enclosed with - either parentheses or double quotes) each string is quoted. - However, the 'password' argument to the LOGIN command is always - quoted. If you want to avoid having an argument string quoted - (eg: the 'flags' argument to STORE) then enclose the string in - parentheses (eg: "(\Deleted)"). - - Each command returns a tuple: (type, [data, ...]) where 'type' - is usually 'OK' or 'NO', and 'data' is either the text from the - tagged response, or untagged results from command. Each 'data' - is either a string, or a tuple. If a tuple, then the first part - is the header of the response, and the second part contains - the data (ie: 'literal' value). - - Errors raise the exception class .error(""). - IMAP4 server errors raise .abort(""), - which is a sub-class of 'error'. Mailbox status changes - from READ-WRITE to READ-ONLY raise the exception class - .readonly(""), which is a sub-class of 'abort'. - - "error" exceptions imply a program error. - "abort" exceptions imply the connection should be reset, and - the command re-tried. - "readonly" exceptions imply the command should be re-tried. - - Note: to use this module, you must read the RFCs pertaining to the - IMAP4 protocol, as the semantics of the arguments to each IMAP4 - command are left to the invoker, not to mention the results. Also, - most IMAP servers implement a sub-set of the commands available here. - """ - - class error(Exception): pass # Logical errors - debug required - class abort(error): pass # Service errors - close and retry - class readonly(abort): pass # Mailbox status changed to READ-ONLY - - def __init__(self, host='', port=IMAP4_PORT, timeout=None): - self.debug = Debug - self.state = 'LOGOUT' - self.literal = None # A literal argument to a command - self.tagged_commands = {} # Tagged commands awaiting response - self.untagged_responses = {} # {typ: [data, ...], ...} - self.continuation_response = '' # Last continuation response - self.is_readonly = False # READ-ONLY desired state - self.tagnum = 0 - self._tls_established = False - self._mode_ascii() - - # Open socket to server. - - self.open(host, port, timeout) - - try: - self._connect() - except Exception: - try: - self.shutdown() - except OSError: - pass - raise - - def _mode_ascii(self): - self.utf8_enabled = False - self._encoding = 'ascii' - self.Literal = re.compile(_Literal, re.ASCII) - self.Untagged_status = re.compile(_Untagged_status, re.ASCII) - - - def _mode_utf8(self): - self.utf8_enabled = True - self._encoding = 'utf-8' - self.Literal = re.compile(_Literal) - self.Untagged_status = re.compile(_Untagged_status) - - - def _connect(self): - # Create unique tag for this session, - # and compile tagged response matcher. - - self.tagpre = Int2AP(random.randint(4096, 65535)) - self.tagre = re.compile(br'(?P' - + self.tagpre - + br'\d+) (?P[A-Z]+) (?P.*)', re.ASCII) - - # Get server welcome message, - # request and store CAPABILITY response. - - if __debug__: - self._cmd_log_len = 10 - self._cmd_log_idx = 0 - self._cmd_log = {} # Last `_cmd_log_len' interactions - if self.debug >= 1: - self._mesg('imaplib version %s' % __version__) - self._mesg('new IMAP4 connection, tag=%s' % self.tagpre) - - self.welcome = self._get_response() - if 'PREAUTH' in self.untagged_responses: - self.state = 'AUTH' - elif 'OK' in self.untagged_responses: - self.state = 'NONAUTH' - else: - raise self.error(self.welcome) - - self._get_capabilities() - if __debug__: - if self.debug >= 3: - self._mesg('CAPABILITIES: %r' % (self.capabilities,)) - - for version in AllowedVersions: - if not version in self.capabilities: - continue - self.PROTOCOL_VERSION = version - return - - raise self.error('server not IMAP4 compliant') - - - def __getattr__(self, attr): - # Allow UPPERCASE variants of IMAP4 command methods. - if attr in Commands: - return getattr(self, attr.lower()) - raise AttributeError("Unknown IMAP4 command: '%s'" % attr) - - def __enter__(self): - return self - - def __exit__(self, *args): - if self.state == "LOGOUT": - return - - try: - self.logout() - except OSError: - pass - - - # Overridable methods - - - def _create_socket(self, timeout): - # Default value of IMAP4.host is '', but socket.getaddrinfo() - # (which is used by socket.create_connection()) expects None - # as a default value for host. - if timeout is not None and not timeout: - raise ValueError('Non-blocking socket (timeout=0) is not supported') - host = None if not self.host else self.host - sys.audit("imaplib.open", self, self.host, self.port) - address = (host, self.port) - if timeout is not None: - return socket.create_connection(address, timeout) - return socket.create_connection(address) - - def open(self, host='', port=IMAP4_PORT, timeout=None): - """Setup connection to remote server on "host:port" - (default: localhost:standard IMAP4 port). - This connection will be used by the routines: - read, readline, send, shutdown. - """ - self.host = host - self.port = port - self.sock = self._create_socket(timeout) - self.file = self.sock.makefile('rb') - - - def read(self, size): - """Read 'size' bytes from remote.""" - cursize = min(size, _SAFE_BUF_SIZE) - data = self.file.read(cursize) - while cursize < size and len(data) == cursize: - delta = min(cursize, size - cursize) - data += self.file.read(delta) - cursize += delta - return data - - - def readline(self): - """Read line from remote.""" - line = self.file.readline(_MAXLINE + 1) - if len(line) > _MAXLINE: - raise self.error("got more than %d bytes" % _MAXLINE) - return line - - - def send(self, data): - """Send data to remote.""" - sys.audit("imaplib.send", self, data) - self.sock.sendall(data) - - - def shutdown(self): - """Close I/O established in "open".""" - self.file.close() - try: - self.sock.shutdown(socket.SHUT_RDWR) - except OSError as exc: - # The server might already have closed the connection. - # On Windows, this may result in WSAEINVAL (error 10022): - # An invalid operation was attempted. - if (exc.errno != errno.ENOTCONN - and getattr(exc, 'winerror', 0) != 10022): - raise - finally: - self.sock.close() - - - def socket(self): - """Return socket instance used to connect to IMAP4 server. - - socket = .socket() - """ - return self.sock - - - - # Utility methods - - - def recent(self): - """Return most recent 'RECENT' responses if any exist, - else prompt server for an update using the 'NOOP' command. - - (typ, [data]) = .recent() - - 'data' is None if no new messages, - else list of RECENT responses, most recent last. - """ - name = 'RECENT' - typ, dat = self._untagged_response('OK', [None], name) - if dat[-1]: - return typ, dat - typ, dat = self.noop() # Prod server for response - return self._untagged_response(typ, dat, name) - - - def response(self, code): - """Return data for response 'code' if received, or None. - - Old value for response 'code' is cleared. - - (code, [data]) = .response(code) - """ - return self._untagged_response(code, [None], code.upper()) - - - - # IMAP4 commands - - - def append(self, mailbox, flags, date_time, message): - """Append message to named mailbox. - - (typ, [data]) = .append(mailbox, flags, date_time, message) - - All args except `message' can be None. - """ - name = 'APPEND' - if not mailbox: - mailbox = 'INBOX' - if flags: - if (flags[0],flags[-1]) != ('(',')'): - flags = '(%s)' % flags - else: - flags = None - if date_time: - date_time = Time2Internaldate(date_time) - else: - date_time = None - literal = MapCRLF.sub(CRLF, message) - self.literal = literal - return self._simple_command(name, mailbox, flags, date_time) - - - def authenticate(self, mechanism, authobject): - """Authenticate command - requires response processing. - - 'mechanism' specifies which authentication mechanism is to - be used - it must appear in .capabilities in the - form AUTH=. - - 'authobject' must be a callable object: - - data = authobject(response) - - It will be called to process server continuation responses; the - response argument it is passed will be a bytes. It should return bytes - data that will be base64 encoded and sent to the server. It should - return None if the client abort response '*' should be sent instead. - """ - mech = mechanism.upper() - # XXX: shouldn't this code be removed, not commented out? - #cap = 'AUTH=%s' % mech - #if not cap in self.capabilities: # Let the server decide! - # raise self.error("Server doesn't allow %s authentication." % mech) - self.literal = _Authenticator(authobject).process - typ, dat = self._simple_command('AUTHENTICATE', mech) - if typ != 'OK': - raise self.error(dat[-1].decode('utf-8', 'replace')) - self.state = 'AUTH' - return typ, dat - - - def capability(self): - """(typ, [data]) = .capability() - Fetch capabilities list from server.""" - - name = 'CAPABILITY' - typ, dat = self._simple_command(name) - return self._untagged_response(typ, dat, name) - - - def check(self): - """Checkpoint mailbox on server. - - (typ, [data]) = .check() - """ - return self._simple_command('CHECK') - - - def close(self): - """Close currently selected mailbox. - - Deleted messages are removed from writable mailbox. - This is the recommended command before 'LOGOUT'. - - (typ, [data]) = .close() - """ - try: - typ, dat = self._simple_command('CLOSE') - finally: - self.state = 'AUTH' - return typ, dat - - - def copy(self, message_set, new_mailbox): - """Copy 'message_set' messages onto end of 'new_mailbox'. - - (typ, [data]) = .copy(message_set, new_mailbox) - """ - return self._simple_command('COPY', message_set, new_mailbox) - - - def create(self, mailbox): - """Create new mailbox. - - (typ, [data]) = .create(mailbox) - """ - return self._simple_command('CREATE', mailbox) - - - def delete(self, mailbox): - """Delete old mailbox. - - (typ, [data]) = .delete(mailbox) - """ - return self._simple_command('DELETE', mailbox) - - def deleteacl(self, mailbox, who): - """Delete the ACLs (remove any rights) set for who on mailbox. - - (typ, [data]) = .deleteacl(mailbox, who) - """ - return self._simple_command('DELETEACL', mailbox, who) - - def enable(self, capability): - """Send an RFC5161 enable string to the server. - - (typ, [data]) = .enable(capability) - """ - if 'ENABLE' not in self.capabilities: - raise IMAP4.error("Server does not support ENABLE") - typ, data = self._simple_command('ENABLE', capability) - if typ == 'OK' and 'UTF8=ACCEPT' in capability.upper(): - self._mode_utf8() - return typ, data - - def expunge(self): - """Permanently remove deleted items from selected mailbox. - - Generates 'EXPUNGE' response for each deleted message. - - (typ, [data]) = .expunge() - - 'data' is list of 'EXPUNGE'd message numbers in order received. - """ - name = 'EXPUNGE' - typ, dat = self._simple_command(name) - return self._untagged_response(typ, dat, name) - - - def fetch(self, message_set, message_parts): - """Fetch (parts of) messages. - - (typ, [data, ...]) = .fetch(message_set, message_parts) - - 'message_parts' should be a string of selected parts - enclosed in parentheses, eg: "(UID BODY[TEXT])". - - 'data' are tuples of message part envelope and data. - """ - name = 'FETCH' - typ, dat = self._simple_command(name, message_set, message_parts) - return self._untagged_response(typ, dat, name) - - - def getacl(self, mailbox): - """Get the ACLs for a mailbox. - - (typ, [data]) = .getacl(mailbox) - """ - typ, dat = self._simple_command('GETACL', mailbox) - return self._untagged_response(typ, dat, 'ACL') - - - def getannotation(self, mailbox, entry, attribute): - """(typ, [data]) = .getannotation(mailbox, entry, attribute) - Retrieve ANNOTATIONs.""" - - typ, dat = self._simple_command('GETANNOTATION', mailbox, entry, attribute) - return self._untagged_response(typ, dat, 'ANNOTATION') - - - def getquota(self, root): - """Get the quota root's resource usage and limits. - - Part of the IMAP4 QUOTA extension defined in rfc2087. - - (typ, [data]) = .getquota(root) - """ - typ, dat = self._simple_command('GETQUOTA', root) - return self._untagged_response(typ, dat, 'QUOTA') - - - def getquotaroot(self, mailbox): - """Get the list of quota roots for the named mailbox. - - (typ, [[QUOTAROOT responses...], [QUOTA responses]]) = .getquotaroot(mailbox) - """ - typ, dat = self._simple_command('GETQUOTAROOT', mailbox) - typ, quota = self._untagged_response(typ, dat, 'QUOTA') - typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT') - return typ, [quotaroot, quota] - - - def list(self, directory='""', pattern='*'): - """List mailbox names in directory matching pattern. - - (typ, [data]) = .list(directory='""', pattern='*') - - 'data' is list of LIST responses. - """ - name = 'LIST' - typ, dat = self._simple_command(name, directory, pattern) - return self._untagged_response(typ, dat, name) - - - def login(self, user, password): - """Identify client using plaintext password. - - (typ, [data]) = .login(user, password) - - NB: 'password' will be quoted. - """ - typ, dat = self._simple_command('LOGIN', user, self._quote(password)) - if typ != 'OK': - raise self.error(dat[-1]) - self.state = 'AUTH' - return typ, dat - - - def login_cram_md5(self, user, password): - """ Force use of CRAM-MD5 authentication. - - (typ, [data]) = .login_cram_md5(user, password) - """ - self.user, self.password = user, password - return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH) - - - def _CRAM_MD5_AUTH(self, challenge): - """ Authobject to use with CRAM-MD5 authentication. """ - import hmac - - if isinstance(self.password, str): - password = self.password.encode('utf-8') - else: - password = self.password - - try: - authcode = hmac.HMAC(password, challenge, 'md5') - except ValueError: # HMAC-MD5 is not available - raise self.error("CRAM-MD5 authentication is not supported") - return f"{self.user} {authcode.hexdigest()}" - - - def logout(self): - """Shutdown connection to server. - - (typ, [data]) = .logout() - - Returns server 'BYE' response. - """ - self.state = 'LOGOUT' - typ, dat = self._simple_command('LOGOUT') - self.shutdown() - return typ, dat - - - def lsub(self, directory='""', pattern='*'): - """List 'subscribed' mailbox names in directory matching pattern. - - (typ, [data, ...]) = .lsub(directory='""', pattern='*') - - 'data' are tuples of message part envelope and data. - """ - name = 'LSUB' - typ, dat = self._simple_command(name, directory, pattern) - return self._untagged_response(typ, dat, name) - - def myrights(self, mailbox): - """Show my ACLs for a mailbox (i.e. the rights that I have on mailbox). - - (typ, [data]) = .myrights(mailbox) - """ - typ,dat = self._simple_command('MYRIGHTS', mailbox) - return self._untagged_response(typ, dat, 'MYRIGHTS') - - def namespace(self): - """ Returns IMAP namespaces ala rfc2342 - - (typ, [data, ...]) = .namespace() - """ - name = 'NAMESPACE' - typ, dat = self._simple_command(name) - return self._untagged_response(typ, dat, name) - - - def noop(self): - """Send NOOP command. - - (typ, [data]) = .noop() - """ - if __debug__: - if self.debug >= 3: - self._dump_ur(self.untagged_responses) - return self._simple_command('NOOP') - - - def partial(self, message_num, message_part, start, length): - """Fetch truncated part of a message. - - (typ, [data, ...]) = .partial(message_num, message_part, start, length) - - 'data' is tuple of message part envelope and data. - """ - name = 'PARTIAL' - typ, dat = self._simple_command(name, message_num, message_part, start, length) - return self._untagged_response(typ, dat, 'FETCH') - - - def proxyauth(self, user): - """Assume authentication as "user". - - Allows an authorised administrator to proxy into any user's - mailbox. - - (typ, [data]) = .proxyauth(user) - """ - - name = 'PROXYAUTH' - return self._simple_command('PROXYAUTH', user) - - - def rename(self, oldmailbox, newmailbox): - """Rename old mailbox name to new. - - (typ, [data]) = .rename(oldmailbox, newmailbox) - """ - return self._simple_command('RENAME', oldmailbox, newmailbox) - - - def search(self, charset, *criteria): - """Search mailbox for matching messages. - - (typ, [data]) = .search(charset, criterion, ...) - - 'data' is space separated list of matching message numbers. - If UTF8 is enabled, charset MUST be None. - """ - name = 'SEARCH' - if charset: - if self.utf8_enabled: - raise IMAP4.error("Non-None charset not valid in UTF8 mode") - typ, dat = self._simple_command(name, 'CHARSET', charset, *criteria) - else: - typ, dat = self._simple_command(name, *criteria) - return self._untagged_response(typ, dat, name) - - - def select(self, mailbox='INBOX', readonly=False): - """Select a mailbox. - - Flush all untagged responses. - - (typ, [data]) = .select(mailbox='INBOX', readonly=False) - - 'data' is count of messages in mailbox ('EXISTS' response). - - Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so - other responses should be obtained via .response('FLAGS') etc. - """ - self.untagged_responses = {} # Flush old responses. - self.is_readonly = readonly - if readonly: - name = 'EXAMINE' - else: - name = 'SELECT' - typ, dat = self._simple_command(name, mailbox) - if typ != 'OK': - self.state = 'AUTH' # Might have been 'SELECTED' - return typ, dat - self.state = 'SELECTED' - if 'READ-ONLY' in self.untagged_responses \ - and not readonly: - if __debug__: - if self.debug >= 1: - self._dump_ur(self.untagged_responses) - raise self.readonly('%s is not writable' % mailbox) - return typ, self.untagged_responses.get('EXISTS', [None]) - - - def setacl(self, mailbox, who, what): - """Set a mailbox acl. - - (typ, [data]) = .setacl(mailbox, who, what) - """ - return self._simple_command('SETACL', mailbox, who, what) - - - def setannotation(self, *args): - """(typ, [data]) = .setannotation(mailbox[, entry, attribute]+) - Set ANNOTATIONs.""" - - typ, dat = self._simple_command('SETANNOTATION', *args) - return self._untagged_response(typ, dat, 'ANNOTATION') - - - def setquota(self, root, limits): - """Set the quota root's resource limits. - - (typ, [data]) = .setquota(root, limits) - """ - typ, dat = self._simple_command('SETQUOTA', root, limits) - return self._untagged_response(typ, dat, 'QUOTA') - - - def sort(self, sort_criteria, charset, *search_criteria): - """IMAP4rev1 extension SORT command. - - (typ, [data]) = .sort(sort_criteria, charset, search_criteria, ...) - """ - name = 'SORT' - #if not name in self.capabilities: # Let the server decide! - # raise self.error('unimplemented extension command: %s' % name) - if (sort_criteria[0],sort_criteria[-1]) != ('(',')'): - sort_criteria = '(%s)' % sort_criteria - typ, dat = self._simple_command(name, sort_criteria, charset, *search_criteria) - return self._untagged_response(typ, dat, name) - - - def starttls(self, ssl_context=None): - name = 'STARTTLS' - if not HAVE_SSL: - raise self.error('SSL support missing') - if self._tls_established: - raise self.abort('TLS session already established') - if name not in self.capabilities: - raise self.abort('TLS not supported by server') - # Generate a default SSL context if none was passed. - if ssl_context is None: - ssl_context = ssl._create_stdlib_context() - typ, dat = self._simple_command(name) - if typ == 'OK': - self.sock = ssl_context.wrap_socket(self.sock, - server_hostname=self.host) - self.file = self.sock.makefile('rb') - self._tls_established = True - self._get_capabilities() - else: - raise self.error("Couldn't establish TLS session") - return self._untagged_response(typ, dat, name) - - - def status(self, mailbox, names): - """Request named status conditions for mailbox. - - (typ, [data]) = .status(mailbox, names) - """ - name = 'STATUS' - #if self.PROTOCOL_VERSION == 'IMAP4': # Let the server decide! - # raise self.error('%s unimplemented in IMAP4 (obtain IMAP4rev1 server, or re-code)' % name) - typ, dat = self._simple_command(name, mailbox, names) - return self._untagged_response(typ, dat, name) - - - def store(self, message_set, command, flags): - """Alters flag dispositions for messages in mailbox. - - (typ, [data]) = .store(message_set, command, flags) - """ - if (flags[0],flags[-1]) != ('(',')'): - flags = '(%s)' % flags # Avoid quoting the flags - typ, dat = self._simple_command('STORE', message_set, command, flags) - return self._untagged_response(typ, dat, 'FETCH') - - - def subscribe(self, mailbox): - """Subscribe to new mailbox. - - (typ, [data]) = .subscribe(mailbox) - """ - return self._simple_command('SUBSCRIBE', mailbox) - - - def thread(self, threading_algorithm, charset, *search_criteria): - """IMAPrev1 extension THREAD command. - - (type, [data]) = .thread(threading_algorithm, charset, search_criteria, ...) - """ - name = 'THREAD' - typ, dat = self._simple_command(name, threading_algorithm, charset, *search_criteria) - return self._untagged_response(typ, dat, name) - - - def uid(self, command, *args): - """Execute "command arg ..." with messages identified by UID, - rather than message number. - - (typ, [data]) = .uid(command, arg1, arg2, ...) - - Returns response appropriate to 'command'. - """ - command = command.upper() - if not command in Commands: - raise self.error("Unknown IMAP4 UID command: %s" % command) - if self.state not in Commands[command]: - raise self.error("command %s illegal in state %s, " - "only allowed in states %s" % - (command, self.state, - ', '.join(Commands[command]))) - name = 'UID' - typ, dat = self._simple_command(name, command, *args) - if command in ('SEARCH', 'SORT', 'THREAD'): - name = command - else: - name = 'FETCH' - return self._untagged_response(typ, dat, name) - - - def unsubscribe(self, mailbox): - """Unsubscribe from old mailbox. - - (typ, [data]) = .unsubscribe(mailbox) - """ - return self._simple_command('UNSUBSCRIBE', mailbox) - - - def unselect(self): - """Free server's resources associated with the selected mailbox - and returns the server to the authenticated state. - This command performs the same actions as CLOSE, except - that no messages are permanently removed from the currently - selected mailbox. - - (typ, [data]) = .unselect() - """ - try: - typ, data = self._simple_command('UNSELECT') - finally: - self.state = 'AUTH' - return typ, data - - - def xatom(self, name, *args): - """Allow simple extension commands - notified by server in CAPABILITY response. - - Assumes command is legal in current state. - - (typ, [data]) = .xatom(name, arg, ...) - - Returns response appropriate to extension command `name'. - """ - name = name.upper() - #if not name in self.capabilities: # Let the server decide! - # raise self.error('unknown extension command: %s' % name) - if not name in Commands: - Commands[name] = (self.state,) - return self._simple_command(name, *args) - - - - # Private methods - - - def _append_untagged(self, typ, dat): - if dat is None: - dat = b'' - ur = self.untagged_responses - if __debug__: - if self.debug >= 5: - self._mesg('untagged_responses[%s] %s += ["%r"]' % - (typ, len(ur.get(typ,'')), dat)) - if typ in ur: - ur[typ].append(dat) - else: - ur[typ] = [dat] - - - def _check_bye(self): - bye = self.untagged_responses.get('BYE') - if bye: - raise self.abort(bye[-1].decode(self._encoding, 'replace')) - - - def _command(self, name, *args): - - if self.state not in Commands[name]: - self.literal = None - raise self.error("command %s illegal in state %s, " - "only allowed in states %s" % - (name, self.state, - ', '.join(Commands[name]))) - - for typ in ('OK', 'NO', 'BAD'): - if typ in self.untagged_responses: - del self.untagged_responses[typ] - - if 'READ-ONLY' in self.untagged_responses \ - and not self.is_readonly: - raise self.readonly('mailbox status changed to READ-ONLY') - - tag = self._new_tag() - name = bytes(name, self._encoding) - data = tag + b' ' + name - for arg in args: - if arg is None: continue - if isinstance(arg, str): - arg = bytes(arg, self._encoding) - data = data + b' ' + arg - - literal = self.literal - if literal is not None: - self.literal = None - if type(literal) is type(self._command): - literator = literal - else: - literator = None - if self.utf8_enabled: - data = data + bytes(' UTF8 (~{%s}' % len(literal), self._encoding) - literal = literal + b')' - else: - data = data + bytes(' {%s}' % len(literal), self._encoding) - - if __debug__: - if self.debug >= 4: - self._mesg('> %r' % data) - else: - self._log('> %r' % data) - - try: - self.send(data + CRLF) - except OSError as val: - raise self.abort('socket error: %s' % val) - - if literal is None: - return tag - - while 1: - # Wait for continuation response - - while self._get_response(): - if self.tagged_commands[tag]: # BAD/NO? - return tag - - # Send literal - - if literator: - literal = literator(self.continuation_response) - - if __debug__: - if self.debug >= 4: - self._mesg('write literal size %s' % len(literal)) - - try: - self.send(literal) - self.send(CRLF) - except OSError as val: - raise self.abort('socket error: %s' % val) - - if not literator: - break - - return tag - - - def _command_complete(self, name, tag): - logout = (name == 'LOGOUT') - # BYE is expected after LOGOUT - if not logout: - self._check_bye() - try: - typ, data = self._get_tagged_response(tag, expect_bye=logout) - except self.abort as val: - raise self.abort('command: %s => %s' % (name, val)) - except self.error as val: - raise self.error('command: %s => %s' % (name, val)) - if not logout: - self._check_bye() - if typ == 'BAD': - raise self.error('%s command error: %s %s' % (name, typ, data)) - return typ, data - - - def _get_capabilities(self): - typ, dat = self.capability() - if dat == [None]: - raise self.error('no CAPABILITY response from server') - dat = str(dat[-1], self._encoding) - dat = dat.upper() - self.capabilities = tuple(dat.split()) - - - def _get_response(self): - - # Read response and store. - # - # Returns None for continuation responses, - # otherwise first response line received. - - resp = self._get_line() - - # Command completion response? - - if self._match(self.tagre, resp): - tag = self.mo.group('tag') - if not tag in self.tagged_commands: - raise self.abort('unexpected tagged response: %r' % resp) - - typ = self.mo.group('type') - typ = str(typ, self._encoding) - dat = self.mo.group('data') - self.tagged_commands[tag] = (typ, [dat]) - else: - dat2 = None - - # '*' (untagged) responses? - - if not self._match(Untagged_response, resp): - if self._match(self.Untagged_status, resp): - dat2 = self.mo.group('data2') - - if self.mo is None: - # Only other possibility is '+' (continuation) response... - - if self._match(Continuation, resp): - self.continuation_response = self.mo.group('data') - return None # NB: indicates continuation - - raise self.abort("unexpected response: %r" % resp) - - typ = self.mo.group('type') - typ = str(typ, self._encoding) - dat = self.mo.group('data') - if dat is None: dat = b'' # Null untagged response - if dat2: dat = dat + b' ' + dat2 - - # Is there a literal to come? - - while self._match(self.Literal, dat): - - # Read literal direct from connection. - - size = int(self.mo.group('size')) - if __debug__: - if self.debug >= 4: - self._mesg('read literal size %s' % size) - data = self.read(size) - - # Store response with literal as tuple - - self._append_untagged(typ, (dat, data)) - - # Read trailer - possibly containing another literal - - dat = self._get_line() - - self._append_untagged(typ, dat) - - # Bracketed response information? - - if typ in ('OK', 'NO', 'BAD') and self._match(Response_code, dat): - typ = self.mo.group('type') - typ = str(typ, self._encoding) - self._append_untagged(typ, self.mo.group('data')) - - if __debug__: - if self.debug >= 1 and typ in ('NO', 'BAD', 'BYE'): - self._mesg('%s response: %r' % (typ, dat)) - - return resp - - - def _get_tagged_response(self, tag, expect_bye=False): - - while 1: - result = self.tagged_commands[tag] - if result is not None: - del self.tagged_commands[tag] - return result - - if expect_bye: - typ = 'BYE' - bye = self.untagged_responses.pop(typ, None) - if bye is not None: - # Server replies to the "LOGOUT" command with "BYE" - return (typ, bye) - - # If we've seen a BYE at this point, the socket will be - # closed, so report the BYE now. - self._check_bye() - - # Some have reported "unexpected response" exceptions. - # Note that ignoring them here causes loops. - # Instead, send me details of the unexpected response and - # I'll update the code in `_get_response()'. - - try: - self._get_response() - except self.abort as val: - if __debug__: - if self.debug >= 1: - self.print_log() - raise - - - def _get_line(self): - - line = self.readline() - if not line: - raise self.abort('socket error: EOF') - - # Protocol mandates all lines terminated by CRLF - if not line.endswith(b'\r\n'): - raise self.abort('socket error: unterminated line: %r' % line) - - line = line[:-2] - if __debug__: - if self.debug >= 4: - self._mesg('< %r' % line) - else: - self._log('< %r' % line) - return line - - - def _match(self, cre, s): - - # Run compiled regular expression match method on 's'. - # Save result, return success. - - self.mo = cre.match(s) - if __debug__: - if self.mo is not None and self.debug >= 5: - self._mesg("\tmatched %r => %r" % (cre.pattern, self.mo.groups())) - return self.mo is not None - - - def _new_tag(self): - - tag = self.tagpre + bytes(str(self.tagnum), self._encoding) - self.tagnum = self.tagnum + 1 - self.tagged_commands[tag] = None - return tag - - - def _quote(self, arg): - - arg = arg.replace('\\', '\\\\') - arg = arg.replace('"', '\\"') - - return '"' + arg + '"' - - - def _simple_command(self, name, *args): - - return self._command_complete(name, self._command(name, *args)) - - - def _untagged_response(self, typ, dat, name): - if typ == 'NO': - return typ, dat - if not name in self.untagged_responses: - return typ, [None] - data = self.untagged_responses.pop(name) - if __debug__: - if self.debug >= 5: - self._mesg('untagged_responses[%s] => %s' % (name, data)) - return typ, data - - - if __debug__: - - def _mesg(self, s, secs=None): - if secs is None: - secs = time.time() - tm = time.strftime('%M:%S', time.localtime(secs)) - sys.stderr.write(' %s.%02d %s\n' % (tm, (secs*100)%100, s)) - sys.stderr.flush() - - def _dump_ur(self, untagged_resp_dict): - if not untagged_resp_dict: - return - items = (f'{key}: {value!r}' - for key, value in untagged_resp_dict.items()) - self._mesg('untagged responses dump:' + '\n\t\t'.join(items)) - - def _log(self, line): - # Keep log of last `_cmd_log_len' interactions for debugging. - self._cmd_log[self._cmd_log_idx] = (line, time.time()) - self._cmd_log_idx += 1 - if self._cmd_log_idx >= self._cmd_log_len: - self._cmd_log_idx = 0 - - def print_log(self): - self._mesg('last %d IMAP4 interactions:' % len(self._cmd_log)) - i, n = self._cmd_log_idx, self._cmd_log_len - while n: - try: - self._mesg(*self._cmd_log[i]) - except: - pass - i += 1 - if i >= self._cmd_log_len: - i = 0 - n -= 1 - - -if HAVE_SSL: - - class IMAP4_SSL(IMAP4): - - """IMAP4 client class over SSL connection - - Instantiate with: IMAP4_SSL([host[, port[, ssl_context[, timeout=None]]]]) - - host - host's name (default: localhost); - port - port number (default: standard IMAP4 SSL port); - ssl_context - a SSLContext object that contains your certificate chain - and private key (default: None) - timeout - socket timeout (default: None) If timeout is not given or is None, - the global default socket timeout is used - - for more documentation see the docstring of the parent class IMAP4. - """ - - - def __init__(self, host='', port=IMAP4_SSL_PORT, - *, ssl_context=None, timeout=None): - if ssl_context is None: - ssl_context = ssl._create_stdlib_context() - self.ssl_context = ssl_context - IMAP4.__init__(self, host, port, timeout) - - def _create_socket(self, timeout): - sock = IMAP4._create_socket(self, timeout) - return self.ssl_context.wrap_socket(sock, - server_hostname=self.host) - - def open(self, host='', port=IMAP4_SSL_PORT, timeout=None): - """Setup connection to remote server on "host:port". - (default: localhost:standard IMAP4 SSL port). - This connection will be used by the routines: - read, readline, send, shutdown. - """ - IMAP4.open(self, host, port, timeout) - - __all__.append("IMAP4_SSL") - - -class IMAP4_stream(IMAP4): - - """IMAP4 client class over a stream - - Instantiate with: IMAP4_stream(command) - - "command" - a string that can be passed to subprocess.Popen() - - for more documentation see the docstring of the parent class IMAP4. - """ - - - def __init__(self, command): - self.command = command - IMAP4.__init__(self) - - - def open(self, host=None, port=None, timeout=None): - """Setup a stream connection. - This connection will be used by the routines: - read, readline, send, shutdown. - """ - self.host = None # For compatibility with parent class - self.port = None - self.sock = None - self.file = None - self.process = subprocess.Popen(self.command, - bufsize=DEFAULT_BUFFER_SIZE, - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - shell=True, close_fds=True) - self.writefile = self.process.stdin - self.readfile = self.process.stdout - - def read(self, size): - """Read 'size' bytes from remote.""" - return self.readfile.read(size) - - - def readline(self): - """Read line from remote.""" - return self.readfile.readline() - - - def send(self, data): - """Send data to remote.""" - self.writefile.write(data) - self.writefile.flush() - - - def shutdown(self): - """Close I/O established in "open".""" - self.readfile.close() - self.writefile.close() - self.process.wait() - - - -class _Authenticator: - - """Private class to provide en/decoding - for base64-based authentication conversation. - """ - - def __init__(self, mechinst): - self.mech = mechinst # Callable object to provide/process data - - def process(self, data): - ret = self.mech(self.decode(data)) - if ret is None: - return b'*' # Abort conversation - return self.encode(ret) - - def encode(self, inp): - # - # Invoke binascii.b2a_base64 iteratively with - # short even length buffers, strip the trailing - # line feed from the result and append. "Even" - # means a number that factors to both 6 and 8, - # so when it gets to the end of the 8-bit input - # there's no partial 6-bit output. - # - oup = b'' - if isinstance(inp, str): - inp = inp.encode('utf-8') - while inp: - if len(inp) > 48: - t = inp[:48] - inp = inp[48:] - else: - t = inp - inp = b'' - e = binascii.b2a_base64(t) - if e: - oup = oup + e[:-1] - return oup - - def decode(self, inp): - if not inp: - return b'' - return binascii.a2b_base64(inp) - -Months = ' Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split(' ') -Mon2num = {s.encode():n+1 for n, s in enumerate(Months[1:])} - -def Internaldate2tuple(resp): - """Parse an IMAP4 INTERNALDATE string. - - Return corresponding local time. The return value is a - time.struct_time tuple or None if the string has wrong format. - """ - - mo = InternalDate.match(resp) - if not mo: - return None - - mon = Mon2num[mo.group('mon')] - zonen = mo.group('zonen') - - day = int(mo.group('day')) - year = int(mo.group('year')) - hour = int(mo.group('hour')) - min = int(mo.group('min')) - sec = int(mo.group('sec')) - zoneh = int(mo.group('zoneh')) - zonem = int(mo.group('zonem')) - - # INTERNALDATE timezone must be subtracted to get UT - - zone = (zoneh*60 + zonem)*60 - if zonen == b'-': - zone = -zone - - tt = (year, mon, day, hour, min, sec, -1, -1, -1) - utc = calendar.timegm(tt) - zone - - return time.localtime(utc) - - - -def Int2AP(num): - - """Convert integer to A-P string representation.""" - - val = b''; AP = b'ABCDEFGHIJKLMNOP' - num = int(abs(num)) - while num: - num, mod = divmod(num, 16) - val = AP[mod:mod+1] + val - return val - - - -def ParseFlags(resp): - - """Convert IMAP4 flags response to python tuple.""" - - mo = Flags.match(resp) - if not mo: - return () - - return tuple(mo.group('flags').split()) - - -def Time2Internaldate(date_time): - - """Convert date_time to IMAP4 INTERNALDATE representation. - - Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The - date_time argument can be a number (int or float) representing - seconds since epoch (as returned by time.time()), a 9-tuple - representing local time, an instance of time.struct_time (as - returned by time.localtime()), an aware datetime instance or a - double-quoted string. In the last case, it is assumed to already - be in the correct format. - """ - if isinstance(date_time, (int, float)): - dt = datetime.fromtimestamp(date_time, - timezone.utc).astimezone() - elif isinstance(date_time, tuple): - try: - gmtoff = date_time.tm_gmtoff - except AttributeError: - if time.daylight: - dst = date_time[8] - if dst == -1: - dst = time.localtime(time.mktime(date_time))[8] - gmtoff = -(time.timezone, time.altzone)[dst] - else: - gmtoff = -time.timezone - delta = timedelta(seconds=gmtoff) - dt = datetime(*date_time[:6], tzinfo=timezone(delta)) - elif isinstance(date_time, datetime): - if date_time.tzinfo is None: - raise ValueError("date_time must be aware") - dt = date_time - elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'): - return date_time # Assume in correct format - else: - raise ValueError("date_time not of a known type") - fmt = '"%d-{}-%Y %H:%M:%S %z"'.format(Months[dt.month]) - return dt.strftime(fmt) - - - -if __name__ == '__main__': - - # To test: invoke either as 'python imaplib.py [IMAP4_server_hostname]' - # or 'python imaplib.py -s "rsh IMAP4_server_hostname exec /etc/rimapd"' - # to test the IMAP4_stream class - - import getopt, getpass - - try: - optlist, args = getopt.getopt(sys.argv[1:], 'd:s:') - except getopt.error as val: - optlist, args = (), () - - stream_command = None - for opt,val in optlist: - if opt == '-d': - Debug = int(val) - elif opt == '-s': - stream_command = val - if not args: args = (stream_command,) - - if not args: args = ('',) - - host = args[0] - - USER = getpass.getuser() - PASSWD = getpass.getpass("IMAP password for %s on %s: " % (USER, host or "localhost")) - - test_mesg = 'From: %(user)s@localhost%(lf)sSubject: IMAP4 test%(lf)s%(lf)sdata...%(lf)s' % {'user':USER, 'lf':'\n'} - test_seq1 = ( - ('login', (USER, PASSWD)), - ('create', ('/tmp/xxx 1',)), - ('rename', ('/tmp/xxx 1', '/tmp/yyy')), - ('CREATE', ('/tmp/yyz 2',)), - ('append', ('/tmp/yyz 2', None, None, test_mesg)), - ('list', ('/tmp', 'yy*')), - ('select', ('/tmp/yyz 2',)), - ('search', (None, 'SUBJECT', 'test')), - ('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')), - ('store', ('1', 'FLAGS', r'(\Deleted)')), - ('namespace', ()), - ('expunge', ()), - ('recent', ()), - ('close', ()), - ) - - test_seq2 = ( - ('select', ()), - ('response',('UIDVALIDITY',)), - ('uid', ('SEARCH', 'ALL')), - ('response', ('EXISTS',)), - ('append', (None, None, None, test_mesg)), - ('recent', ()), - ('logout', ()), - ) - - def run(cmd, args): - M._mesg('%s %s' % (cmd, args)) - typ, dat = getattr(M, cmd)(*args) - M._mesg('%s => %s %s' % (cmd, typ, dat)) - if typ == 'NO': raise dat[0] - return dat - - try: - if stream_command: - M = IMAP4_stream(stream_command) - else: - M = IMAP4(host) - if M.state == 'AUTH': - test_seq1 = test_seq1[1:] # Login not needed - M._mesg('PROTOCOL_VERSION = %s' % M.PROTOCOL_VERSION) - M._mesg('CAPABILITIES = %r' % (M.capabilities,)) - - for cmd,args in test_seq1: - run(cmd, args) - - for ml in run('list', ('/tmp/', 'yy%')): - mo = re.match(r'.*"([^"]+)"$', ml) - if mo: path = mo.group(1) - else: path = ml.split()[-1] - run('delete', (path,)) - - for cmd,args in test_seq2: - dat = run(cmd, args) - - if (cmd,args) != ('uid', ('SEARCH', 'ALL')): - continue - - uid = dat[-1].split() - if not uid: continue - run('uid', ('FETCH', '%s' % uid[-1], - '(FLAGS INTERNALDATE RFC822.SIZE RFC822.HEADER RFC822.TEXT)')) - - print('\nAll tests OK.') - - except: - print('\nTests failed.') - - if not Debug: - print(''' -If you would like to see debugging output, -try: %s -d5 -''' % sys.argv[0]) - - raise diff --git a/Python313_13_x86_Template/Lib/importlib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 49e1ab22..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/__pycache__/_abc.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/__pycache__/_abc.cpython-313.pyc deleted file mode 100644 index 6796758e..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/__pycache__/_abc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/__pycache__/abc.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/__pycache__/abc.cpython-313.pyc deleted file mode 100644 index 3c62a310..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/__pycache__/abc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/__pycache__/readers.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/__pycache__/readers.cpython-313.pyc deleted file mode 100644 index 89951a5d..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/__pycache__/readers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/_bootstrap.py b/Python313_13_x86_Template/Lib/importlib/_bootstrap.py deleted file mode 100644 index aed993cc..00000000 --- a/Python313_13_x86_Template/Lib/importlib/_bootstrap.py +++ /dev/null @@ -1,1559 +0,0 @@ -"""Core implementation of import. - -This module is NOT meant to be directly imported! It has been designed such -that it can be bootstrapped into Python as the implementation of import. As -such it requires the injection of specific modules and attributes in order to -work. One should use importlib as the public-facing version of this module. - -""" -# -# IMPORTANT: Whenever making changes to this module, be sure to run a top-level -# `make regen-importlib` followed by `make` in order to get the frozen version -# of the module updated. Not doing so will result in the Makefile to fail for -# all others who don't have a ./python around to freeze the module -# in the early stages of compilation. -# - -# See importlib._setup() for what is injected into the global namespace. - -# When editing this code be aware that code executed at import time CANNOT -# reference any injected objects! This includes not only global code but also -# anything specified at the class level. - -def _object_name(obj): - try: - return obj.__qualname__ - except AttributeError: - return type(obj).__qualname__ - -# Bootstrap-related code ###################################################### - -# Modules injected manually by _setup() -_thread = None -_warnings = None -_weakref = None - -# Import done by _install_external_importers() -_bootstrap_external = None - - -def _wrap(new, old): - """Simple substitute for functools.update_wrapper.""" - for replace in ['__module__', '__name__', '__qualname__', '__doc__']: - if hasattr(old, replace): - setattr(new, replace, getattr(old, replace)) - new.__dict__.update(old.__dict__) - - -def _new_module(name): - return type(sys)(name) - - -# Module-level locking ######################################################## - -# For a list that can have a weakref to it. -class _List(list): - __slots__ = ("__weakref__",) - - -# Copied from weakref.py with some simplifications and modifications unique to -# bootstrapping importlib. Many methods were simply deleting for simplicity, so if they -# are needed in the future they may work if simply copied back in. -class _WeakValueDictionary: - - def __init__(self): - self_weakref = _weakref.ref(self) - - # Inlined to avoid issues with inheriting from _weakref.ref before _weakref is - # set by _setup(). Since there's only one instance of this class, this is - # not expensive. - class KeyedRef(_weakref.ref): - - __slots__ = "key", - - def __new__(type, ob, key): - self = super().__new__(type, ob, type.remove) - self.key = key - return self - - def __init__(self, ob, key): - super().__init__(ob, self.remove) - - @staticmethod - def remove(wr): - nonlocal self_weakref - - self = self_weakref() - if self is not None: - if self._iterating: - self._pending_removals.append(wr.key) - else: - _weakref._remove_dead_weakref(self.data, wr.key) - - self._KeyedRef = KeyedRef - self.clear() - - def clear(self): - self._pending_removals = [] - self._iterating = set() - self.data = {} - - def _commit_removals(self): - pop = self._pending_removals.pop - d = self.data - while True: - try: - key = pop() - except IndexError: - return - _weakref._remove_dead_weakref(d, key) - - def get(self, key, default=None): - if self._pending_removals: - self._commit_removals() - try: - wr = self.data[key] - except KeyError: - return default - else: - if (o := wr()) is None: - return default - else: - return o - - def setdefault(self, key, default=None): - try: - o = self.data[key]() - except KeyError: - o = None - if o is None: - if self._pending_removals: - self._commit_removals() - self.data[key] = self._KeyedRef(default, key) - return default - else: - return o - - -# A dict mapping module names to weakrefs of _ModuleLock instances. -# Dictionary protected by the global import lock. -_module_locks = {} - -# A dict mapping thread IDs to weakref'ed lists of _ModuleLock instances. -# This maps a thread to the module locks it is blocking on acquiring. The -# values are lists because a single thread could perform a re-entrant import -# and be "in the process" of blocking on locks for more than one module. A -# thread can be "in the process" because a thread cannot actually block on -# acquiring more than one lock but it can have set up bookkeeping that reflects -# that it intends to block on acquiring more than one lock. -# -# The dictionary uses a WeakValueDictionary to avoid keeping unnecessary -# lists around, regardless of GC runs. This way there's no memory leak if -# the list is no longer needed (GH-106176). -_blocking_on = None - - -class _BlockingOnManager: - """A context manager responsible to updating ``_blocking_on``.""" - def __init__(self, thread_id, lock): - self.thread_id = thread_id - self.lock = lock - - def __enter__(self): - """Mark the running thread as waiting for self.lock. via _blocking_on.""" - # Interactions with _blocking_on are *not* protected by the global - # import lock here because each thread only touches the state that it - # owns (state keyed on its thread id). The global import lock is - # re-entrant (i.e., a single thread may take it more than once) so it - # wouldn't help us be correct in the face of re-entrancy either. - - self.blocked_on = _blocking_on.setdefault(self.thread_id, _List()) - self.blocked_on.append(self.lock) - - def __exit__(self, *args, **kwargs): - """Remove self.lock from this thread's _blocking_on list.""" - self.blocked_on.remove(self.lock) - - -class _DeadlockError(RuntimeError): - pass - - - -def _has_deadlocked(target_id, *, seen_ids, candidate_ids, blocking_on): - """Check if 'target_id' is holding the same lock as another thread(s). - - The search within 'blocking_on' starts with the threads listed in - 'candidate_ids'. 'seen_ids' contains any threads that are considered - already traversed in the search. - - Keyword arguments: - target_id -- The thread id to try to reach. - seen_ids -- A set of threads that have already been visited. - candidate_ids -- The thread ids from which to begin. - blocking_on -- A dict representing the thread/blocking-on graph. This may - be the same object as the global '_blocking_on' but it is - a parameter to reduce the impact that global mutable - state has on the result of this function. - """ - if target_id in candidate_ids: - # If we have already reached the target_id, we're done - signal that it - # is reachable. - return True - - # Otherwise, try to reach the target_id from each of the given candidate_ids. - for tid in candidate_ids: - if not (candidate_blocking_on := blocking_on.get(tid)): - # There are no edges out from this node, skip it. - continue - elif tid in seen_ids: - # bpo 38091: the chain of tid's we encounter here eventually leads - # to a fixed point or a cycle, but does not reach target_id. - # This means we would not actually deadlock. This can happen if - # other threads are at the beginning of acquire() below. - return False - seen_ids.add(tid) - - # Follow the edges out from this thread. - edges = [lock.owner for lock in candidate_blocking_on] - if _has_deadlocked(target_id, seen_ids=seen_ids, candidate_ids=edges, - blocking_on=blocking_on): - return True - - return False - - -class _ModuleLock: - """A recursive lock implementation which is able to detect deadlocks - (e.g. thread 1 trying to take locks A then B, and thread 2 trying to - take locks B then A). - """ - - def __init__(self, name): - # Create an RLock for protecting the import process for the - # corresponding module. Since it is an RLock, a single thread will be - # able to take it more than once. This is necessary to support - # re-entrancy in the import system that arises from (at least) signal - # handlers and the garbage collector. Consider the case of: - # - # import foo - # -> ... - # -> importlib._bootstrap._ModuleLock.acquire - # -> ... - # -> - # -> __del__ - # -> import foo - # -> ... - # -> importlib._bootstrap._ModuleLock.acquire - # -> _BlockingOnManager.__enter__ - # - # If a different thread than the running one holds the lock then the - # thread will have to block on taking the lock, which is what we want - # for thread safety. - self.lock = _thread.RLock() - self.wakeup = _thread.allocate_lock() - - # The name of the module for which this is a lock. - self.name = name - - # Can end up being set to None if this lock is not owned by any thread - # or the thread identifier for the owning thread. - self.owner = None - - # Represent the number of times the owning thread has acquired this lock - # via a list of True. This supports RLock-like ("re-entrant lock") - # behavior, necessary in case a single thread is following a circular - # import dependency and needs to take the lock for a single module - # more than once. - # - # Counts are represented as a list of True because list.append(True) - # and list.pop() are both atomic and thread-safe in CPython and it's hard - # to find another primitive with the same properties. - self.count = [] - - # This is a count of the number of threads that are blocking on - # self.wakeup.acquire() awaiting to get their turn holding this module - # lock. When the module lock is released, if this is greater than - # zero, it is decremented and `self.wakeup` is released one time. The - # intent is that this will let one other thread make more progress on - # acquiring this module lock. This repeats until all the threads have - # gotten a turn. - # - # This is incremented in self.acquire() when a thread notices it is - # going to have to wait for another thread to finish. - # - # See the comment above count for explanation of the representation. - self.waiters = [] - - def has_deadlock(self): - # To avoid deadlocks for concurrent or re-entrant circular imports, - # look at _blocking_on to see if any threads are blocking - # on getting the import lock for any module for which the import lock - # is held by this thread. - return _has_deadlocked( - # Try to find this thread. - target_id=_thread.get_ident(), - seen_ids=set(), - # Start from the thread that holds the import lock for this - # module. - candidate_ids=[self.owner], - # Use the global "blocking on" state. - blocking_on=_blocking_on, - ) - - def acquire(self): - """ - Acquire the module lock. If a potential deadlock is detected, - a _DeadlockError is raised. - Otherwise, the lock is always acquired and True is returned. - """ - tid = _thread.get_ident() - with _BlockingOnManager(tid, self): - while True: - # Protect interaction with state on self with a per-module - # lock. This makes it safe for more than one thread to try to - # acquire the lock for a single module at the same time. - with self.lock: - if self.count == [] or self.owner == tid: - # If the lock for this module is unowned then we can - # take the lock immediately and succeed. If the lock - # for this module is owned by the running thread then - # we can also allow the acquire to succeed. This - # supports circular imports (thread T imports module A - # which imports module B which imports module A). - self.owner = tid - self.count.append(True) - return True - - # At this point we know the lock is held (because count != - # 0) by another thread (because owner != tid). We'll have - # to get in line to take the module lock. - - # But first, check to see if this thread would create a - # deadlock by acquiring this module lock. If it would - # then just stop with an error. - # - # It's not clear who is expected to handle this error. - # There is one handler in _lock_unlock_module but many - # times this method is called when entering the context - # manager _ModuleLockManager instead - so _DeadlockError - # will just propagate up to application code. - # - # This seems to be more than just a hypothetical - - # https://stackoverflow.com/questions/59509154 - # https://github.com/encode/django-rest-framework/issues/7078 - if self.has_deadlock(): - raise _DeadlockError(f'deadlock detected by {self!r}') - - # Check to see if we're going to be able to acquire the - # lock. If we are going to have to wait then increment - # the waiters so `self.release` will know to unblock us - # later on. We do this part non-blockingly so we don't - # get stuck here before we increment waiters. We have - # this extra acquire call (in addition to the one below, - # outside the self.lock context manager) to make sure - # self.wakeup is held when the next acquire is called (so - # we block). This is probably needlessly complex and we - # should just take self.wakeup in the return codepath - # above. - if self.wakeup.acquire(False): - self.waiters.append(None) - - # Now take the lock in a blocking fashion. This won't - # complete until the thread holding this lock - # (self.owner) calls self.release. - self.wakeup.acquire() - - # Taking the lock has served its purpose (making us wait), so we can - # give it up now. We'll take it w/o blocking again on the - # next iteration around this 'while' loop. - self.wakeup.release() - - def release(self): - tid = _thread.get_ident() - with self.lock: - if self.owner != tid: - raise RuntimeError('cannot release un-acquired lock') - assert len(self.count) > 0 - self.count.pop() - if not len(self.count): - self.owner = None - if len(self.waiters) > 0: - self.waiters.pop() - self.wakeup.release() - - def __repr__(self): - return f'_ModuleLock({self.name!r}) at {id(self)}' - - -class _DummyModuleLock: - """A simple _ModuleLock equivalent for Python builds without - multi-threading support.""" - - def __init__(self, name): - self.name = name - self.count = 0 - - def acquire(self): - self.count += 1 - return True - - def release(self): - if self.count == 0: - raise RuntimeError('cannot release un-acquired lock') - self.count -= 1 - - def __repr__(self): - return f'_DummyModuleLock({self.name!r}) at {id(self)}' - - -class _ModuleLockManager: - - def __init__(self, name): - self._name = name - self._lock = None - - def __enter__(self): - self._lock = _get_module_lock(self._name) - self._lock.acquire() - - def __exit__(self, *args, **kwargs): - self._lock.release() - - -# The following two functions are for consumption by Python/import.c. - -def _get_module_lock(name): - """Get or create the module lock for a given module name. - - Acquire/release internally the global import lock to protect - _module_locks.""" - - _imp.acquire_lock() - try: - try: - lock = _module_locks[name]() - except KeyError: - lock = None - - if lock is None: - if _thread is None: - lock = _DummyModuleLock(name) - else: - lock = _ModuleLock(name) - - def cb(ref, name=name): - _imp.acquire_lock() - try: - # bpo-31070: Check if another thread created a new lock - # after the previous lock was destroyed - # but before the weakref callback was called. - if _module_locks.get(name) is ref: - del _module_locks[name] - finally: - _imp.release_lock() - - _module_locks[name] = _weakref.ref(lock, cb) - finally: - _imp.release_lock() - - return lock - - -def _lock_unlock_module(name): - """Acquires then releases the module lock for a given module name. - - This is used to ensure a module is completely initialized, in the - event it is being imported by another thread. - """ - lock = _get_module_lock(name) - try: - lock.acquire() - except _DeadlockError: - # Concurrent circular import, we'll accept a partially initialized - # module object. - pass - else: - lock.release() - -# Frame stripping magic ############################################### -def _call_with_frames_removed(f, *args, **kwds): - """remove_importlib_frames in import.c will always remove sequences - of importlib frames that end with a call to this function - - Use it instead of a normal call in places where including the importlib - frames introduces unwanted noise into the traceback (e.g. when executing - module code) - """ - return f(*args, **kwds) - - -def _verbose_message(message, *args, verbosity=1): - """Print the message to stderr if -v/PYTHONVERBOSE is turned on.""" - if sys.flags.verbose >= verbosity: - if not message.startswith(('#', 'import ')): - message = '# ' + message - print(message.format(*args), file=sys.stderr) - - -def _requires_builtin(fxn): - """Decorator to verify the named module is built-in.""" - def _requires_builtin_wrapper(self, fullname): - if fullname not in sys.builtin_module_names: - raise ImportError(f'{fullname!r} is not a built-in module', - name=fullname) - return fxn(self, fullname) - _wrap(_requires_builtin_wrapper, fxn) - return _requires_builtin_wrapper - - -def _requires_frozen(fxn): - """Decorator to verify the named module is frozen.""" - def _requires_frozen_wrapper(self, fullname): - if not _imp.is_frozen(fullname): - raise ImportError(f'{fullname!r} is not a frozen module', - name=fullname) - return fxn(self, fullname) - _wrap(_requires_frozen_wrapper, fxn) - return _requires_frozen_wrapper - - -# Typically used by loader classes as a method replacement. -def _load_module_shim(self, fullname): - """Load the specified module into sys.modules and return it. - - This method is deprecated. Use loader.exec_module() instead. - - """ - msg = ("the load_module() method is deprecated and slated for removal in " - "Python 3.15; use exec_module() instead") - _warnings.warn(msg, DeprecationWarning) - spec = spec_from_loader(fullname, self) - if fullname in sys.modules: - module = sys.modules[fullname] - _exec(spec, module) - return sys.modules[fullname] - else: - return _load(spec) - -# Module specifications ####################################################### - -def _module_repr(module): - """The implementation of ModuleType.__repr__().""" - loader = getattr(module, '__loader__', None) - if spec := getattr(module, "__spec__", None): - return _module_repr_from_spec(spec) - # Fall through to a catch-all which always succeeds. - try: - name = module.__name__ - except AttributeError: - name = '?' - try: - filename = module.__file__ - except AttributeError: - if loader is None: - return f'' - else: - return f'' - else: - return f'' - - -class ModuleSpec: - """The specification for a module, used for loading. - - A module's spec is the source for information about the module. For - data associated with the module, including source, use the spec's - loader. - - `name` is the absolute name of the module. `loader` is the loader - to use when loading the module. `parent` is the name of the - package the module is in. The parent is derived from the name. - - `is_package` determines if the module is considered a package or - not. On modules this is reflected by the `__path__` attribute. - - `origin` is the specific location used by the loader from which to - load the module, if that information is available. When filename is - set, origin will match. - - `has_location` indicates that a spec's "origin" reflects a location. - When this is True, `__file__` attribute of the module is set. - - `cached` is the location of the cached bytecode file, if any. It - corresponds to the `__cached__` attribute. - - `submodule_search_locations` is the sequence of path entries to - search when importing submodules. If set, is_package should be - True--and False otherwise. - - Packages are simply modules that (may) have submodules. If a spec - has a non-None value in `submodule_search_locations`, the import - system will consider modules loaded from the spec as packages. - - Only finders (see importlib.abc.MetaPathFinder and - importlib.abc.PathEntryFinder) should modify ModuleSpec instances. - - """ - - def __init__(self, name, loader, *, origin=None, loader_state=None, - is_package=None): - self.name = name - self.loader = loader - self.origin = origin - self.loader_state = loader_state - self.submodule_search_locations = [] if is_package else None - self._uninitialized_submodules = [] - - # file-location attributes - self._set_fileattr = False - self._cached = None - - def __repr__(self): - args = [f'name={self.name!r}', f'loader={self.loader!r}'] - if self.origin is not None: - args.append(f'origin={self.origin!r}') - if self.submodule_search_locations is not None: - args.append(f'submodule_search_locations={self.submodule_search_locations}') - return f'{self.__class__.__name__}({", ".join(args)})' - - def __eq__(self, other): - smsl = self.submodule_search_locations - try: - return (self.name == other.name and - self.loader == other.loader and - self.origin == other.origin and - smsl == other.submodule_search_locations and - self.cached == other.cached and - self.has_location == other.has_location) - except AttributeError: - return NotImplemented - - @property - def cached(self): - if self._cached is None: - if self.origin is not None and self._set_fileattr: - if _bootstrap_external is None: - raise NotImplementedError - self._cached = _bootstrap_external._get_cached(self.origin) - return self._cached - - @cached.setter - def cached(self, cached): - self._cached = cached - - @property - def parent(self): - """The name of the module's parent.""" - if self.submodule_search_locations is None: - return self.name.rpartition('.')[0] - else: - return self.name - - @property - def has_location(self): - return self._set_fileattr - - @has_location.setter - def has_location(self, value): - self._set_fileattr = bool(value) - - -def spec_from_loader(name, loader, *, origin=None, is_package=None): - """Return a module spec based on various loader methods.""" - if origin is None: - origin = getattr(loader, '_ORIGIN', None) - - if not origin and hasattr(loader, 'get_filename'): - if _bootstrap_external is None: - raise NotImplementedError - spec_from_file_location = _bootstrap_external.spec_from_file_location - - if is_package is None: - return spec_from_file_location(name, loader=loader) - search = [] if is_package else None - return spec_from_file_location(name, loader=loader, - submodule_search_locations=search) - - if is_package is None: - if hasattr(loader, 'is_package'): - try: - is_package = loader.is_package(name) - except ImportError: - is_package = None # aka, undefined - else: - # the default - is_package = False - - return ModuleSpec(name, loader, origin=origin, is_package=is_package) - - -def _spec_from_module(module, loader=None, origin=None): - # This function is meant for use in _setup(). - try: - spec = module.__spec__ - except AttributeError: - pass - else: - if spec is not None: - return spec - - name = module.__name__ - if loader is None: - try: - loader = module.__loader__ - except AttributeError: - # loader will stay None. - pass - try: - location = module.__file__ - except AttributeError: - location = None - if origin is None: - if loader is not None: - origin = getattr(loader, '_ORIGIN', None) - if not origin and location is not None: - origin = location - try: - cached = module.__cached__ - except AttributeError: - cached = None - try: - submodule_search_locations = list(module.__path__) - except AttributeError: - submodule_search_locations = None - - spec = ModuleSpec(name, loader, origin=origin) - spec._set_fileattr = False if location is None else (origin == location) - spec.cached = cached - spec.submodule_search_locations = submodule_search_locations - return spec - - -def _init_module_attrs(spec, module, *, override=False): - # The passed-in module may be not support attribute assignment, - # in which case we simply don't set the attributes. - # __name__ - if (override or getattr(module, '__name__', None) is None): - try: - module.__name__ = spec.name - except AttributeError: - pass - # __loader__ - if override or getattr(module, '__loader__', None) is None: - loader = spec.loader - if loader is None: - # A backward compatibility hack. - if spec.submodule_search_locations is not None: - if _bootstrap_external is None: - raise NotImplementedError - NamespaceLoader = _bootstrap_external.NamespaceLoader - - loader = NamespaceLoader.__new__(NamespaceLoader) - loader._path = spec.submodule_search_locations - spec.loader = loader - # While the docs say that module.__file__ is not set for - # built-in modules, and the code below will avoid setting it if - # spec.has_location is false, this is incorrect for namespace - # packages. Namespace packages have no location, but their - # __spec__.origin is None, and thus their module.__file__ - # should also be None for consistency. While a bit of a hack, - # this is the best place to ensure this consistency. - # - # See # https://docs.python.org/3/library/importlib.html#importlib.abc.Loader.load_module - # and bpo-32305 - module.__file__ = None - try: - module.__loader__ = loader - except AttributeError: - pass - # __package__ - if override or getattr(module, '__package__', None) is None: - try: - module.__package__ = spec.parent - except AttributeError: - pass - # __spec__ - try: - module.__spec__ = spec - except AttributeError: - pass - # __path__ - if override or getattr(module, '__path__', None) is None: - if spec.submodule_search_locations is not None: - # XXX We should extend __path__ if it's already a list. - try: - module.__path__ = spec.submodule_search_locations - except AttributeError: - pass - # __file__/__cached__ - if spec.has_location: - if override or getattr(module, '__file__', None) is None: - try: - module.__file__ = spec.origin - except AttributeError: - pass - - if override or getattr(module, '__cached__', None) is None: - if spec.cached is not None: - try: - module.__cached__ = spec.cached - except AttributeError: - pass - return module - - -def module_from_spec(spec): - """Create a module based on the provided spec.""" - # Typically loaders will not implement create_module(). - module = None - if hasattr(spec.loader, 'create_module'): - # If create_module() returns `None` then it means default - # module creation should be used. - module = spec.loader.create_module(spec) - elif hasattr(spec.loader, 'exec_module'): - raise ImportError('loaders that define exec_module() ' - 'must also define create_module()') - if module is None: - module = _new_module(spec.name) - _init_module_attrs(spec, module) - return module - - -def _module_repr_from_spec(spec): - """Return the repr to use for the module.""" - name = '?' if spec.name is None else spec.name - if spec.origin is None: - loader = spec.loader - if loader is None: - return f'' - elif ( - _bootstrap_external is not None - and isinstance(loader, _bootstrap_external.NamespaceLoader) - ): - return f'' - else: - return f'' - else: - if spec.has_location: - return f'' - else: - return f'' - - -# Used by importlib.reload() and _load_module_shim(). -def _exec(spec, module): - """Execute the spec's specified module in an existing module's namespace.""" - name = spec.name - with _ModuleLockManager(name): - if sys.modules.get(name) is not module: - msg = f'module {name!r} not in sys.modules' - raise ImportError(msg, name=name) - try: - if spec.loader is None: - if spec.submodule_search_locations is None: - raise ImportError('missing loader', name=spec.name) - # Namespace package. - _init_module_attrs(spec, module, override=True) - else: - _init_module_attrs(spec, module, override=True) - if not hasattr(spec.loader, 'exec_module'): - msg = (f"{_object_name(spec.loader)}.exec_module() not found; " - "falling back to load_module()") - _warnings.warn(msg, ImportWarning) - spec.loader.load_module(name) - else: - spec.loader.exec_module(module) - finally: - # Update the order of insertion into sys.modules for module - # clean-up at shutdown. - module = sys.modules.pop(spec.name) - sys.modules[spec.name] = module - return module - - -def _load_backward_compatible(spec): - # It is assumed that all callers have been warned about using load_module() - # appropriately before calling this function. - try: - spec.loader.load_module(spec.name) - except: - if spec.name in sys.modules: - module = sys.modules.pop(spec.name) - sys.modules[spec.name] = module - raise - # The module must be in sys.modules at this point! - # Move it to the end of sys.modules. - module = sys.modules.pop(spec.name) - sys.modules[spec.name] = module - if getattr(module, '__loader__', None) is None: - try: - module.__loader__ = spec.loader - except AttributeError: - pass - if getattr(module, '__package__', None) is None: - try: - # Since module.__path__ may not line up with - # spec.submodule_search_paths, we can't necessarily rely - # on spec.parent here. - module.__package__ = module.__name__ - if not hasattr(module, '__path__'): - module.__package__ = spec.name.rpartition('.')[0] - except AttributeError: - pass - if getattr(module, '__spec__', None) is None: - try: - module.__spec__ = spec - except AttributeError: - pass - return module - -def _load_unlocked(spec): - # A helper for direct use by the import system. - if spec.loader is not None: - # Not a namespace package. - if not hasattr(spec.loader, 'exec_module'): - msg = (f"{_object_name(spec.loader)}.exec_module() not found; " - "falling back to load_module()") - _warnings.warn(msg, ImportWarning) - return _load_backward_compatible(spec) - - module = module_from_spec(spec) - - # This must be done before putting the module in sys.modules - # (otherwise an optimization shortcut in import.c becomes - # wrong). - spec._initializing = True - try: - sys.modules[spec.name] = module - try: - if spec.loader is None: - if spec.submodule_search_locations is None: - raise ImportError('missing loader', name=spec.name) - # A namespace package so do nothing. - else: - spec.loader.exec_module(module) - except: - try: - del sys.modules[spec.name] - except KeyError: - pass - raise - # Move the module to the end of sys.modules. - # We don't ensure that the import-related module attributes get - # set in the sys.modules replacement case. Such modules are on - # their own. - module = sys.modules.pop(spec.name) - sys.modules[spec.name] = module - _verbose_message('import {!r} # {!r}', spec.name, spec.loader) - finally: - spec._initializing = False - - return module - -# A method used during testing of _load_unlocked() and by -# _load_module_shim(). -def _load(spec): - """Return a new module object, loaded by the spec's loader. - - The module is not added to its parent. - - If a module is already in sys.modules, that existing module gets - clobbered. - - """ - with _ModuleLockManager(spec.name): - return _load_unlocked(spec) - - -# Loaders ##################################################################### - -class BuiltinImporter: - - """Meta path import for built-in modules. - - All methods are either class or static methods to avoid the need to - instantiate the class. - - """ - - _ORIGIN = "built-in" - - @classmethod - def find_spec(cls, fullname, path=None, target=None): - if _imp.is_builtin(fullname): - return spec_from_loader(fullname, cls, origin=cls._ORIGIN) - else: - return None - - @staticmethod - def create_module(spec): - """Create a built-in module""" - if spec.name not in sys.builtin_module_names: - raise ImportError(f'{spec.name!r} is not a built-in module', - name=spec.name) - return _call_with_frames_removed(_imp.create_builtin, spec) - - @staticmethod - def exec_module(module): - """Exec a built-in module""" - _call_with_frames_removed(_imp.exec_builtin, module) - - @classmethod - @_requires_builtin - def get_code(cls, fullname): - """Return None as built-in modules do not have code objects.""" - return None - - @classmethod - @_requires_builtin - def get_source(cls, fullname): - """Return None as built-in modules do not have source code.""" - return None - - @classmethod - @_requires_builtin - def is_package(cls, fullname): - """Return False as built-in modules are never packages.""" - return False - - load_module = classmethod(_load_module_shim) - - -class FrozenImporter: - - """Meta path import for frozen modules. - - All methods are either class or static methods to avoid the need to - instantiate the class. - - """ - - _ORIGIN = "frozen" - - @classmethod - def _fix_up_module(cls, module): - spec = module.__spec__ - state = spec.loader_state - if state is None: - # The module is missing FrozenImporter-specific values. - - # Fix up the spec attrs. - origname = vars(module).pop('__origname__', None) - assert origname, 'see PyImport_ImportFrozenModuleObject()' - ispkg = hasattr(module, '__path__') - assert _imp.is_frozen_package(module.__name__) == ispkg, ispkg - filename, pkgdir = cls._resolve_filename(origname, spec.name, ispkg) - spec.loader_state = type(sys.implementation)( - filename=filename, - origname=origname, - ) - __path__ = spec.submodule_search_locations - if ispkg: - assert __path__ == [], __path__ - if pkgdir: - spec.submodule_search_locations.insert(0, pkgdir) - else: - assert __path__ is None, __path__ - - # Fix up the module attrs (the bare minimum). - assert not hasattr(module, '__file__'), module.__file__ - if filename: - try: - module.__file__ = filename - except AttributeError: - pass - if ispkg: - if module.__path__ != __path__: - assert module.__path__ == [], module.__path__ - module.__path__.extend(__path__) - else: - # These checks ensure that _fix_up_module() is only called - # in the right places. - __path__ = spec.submodule_search_locations - ispkg = __path__ is not None - # Check the loader state. - assert sorted(vars(state)) == ['filename', 'origname'], state - if state.origname: - # The only frozen modules with "origname" set are stdlib modules. - (__file__, pkgdir, - ) = cls._resolve_filename(state.origname, spec.name, ispkg) - assert state.filename == __file__, (state.filename, __file__) - if pkgdir: - assert __path__ == [pkgdir], (__path__, pkgdir) - else: - assert __path__ == ([] if ispkg else None), __path__ - else: - __file__ = None - assert state.filename is None, state.filename - assert __path__ == ([] if ispkg else None), __path__ - # Check the file attrs. - if __file__: - assert hasattr(module, '__file__') - assert module.__file__ == __file__, (module.__file__, __file__) - else: - assert not hasattr(module, '__file__'), module.__file__ - if ispkg: - assert hasattr(module, '__path__') - assert module.__path__ == __path__, (module.__path__, __path__) - else: - assert not hasattr(module, '__path__'), module.__path__ - assert not spec.has_location - - @classmethod - def _resolve_filename(cls, fullname, alias=None, ispkg=False): - if not fullname or not getattr(sys, '_stdlib_dir', None): - return None, None - try: - sep = cls._SEP - except AttributeError: - sep = cls._SEP = '\\' if sys.platform == 'win32' else '/' - - if fullname != alias: - if fullname.startswith('<'): - fullname = fullname[1:] - if not ispkg: - fullname = f'{fullname}.__init__' - else: - ispkg = False - relfile = fullname.replace('.', sep) - if ispkg: - pkgdir = f'{sys._stdlib_dir}{sep}{relfile}' - filename = f'{pkgdir}{sep}__init__.py' - else: - pkgdir = None - filename = f'{sys._stdlib_dir}{sep}{relfile}.py' - return filename, pkgdir - - @classmethod - def find_spec(cls, fullname, path=None, target=None): - info = _call_with_frames_removed(_imp.find_frozen, fullname) - if info is None: - return None - # We get the marshaled data in exec_module() (the loader - # part of the importer), instead of here (the finder part). - # The loader is the usual place to get the data that will - # be loaded into the module. (For example, see _LoaderBasics - # in _bootstrap_external.py.) Most importantly, this importer - # is simpler if we wait to get the data. - # However, getting as much data in the finder as possible - # to later load the module is okay, and sometimes important. - # (That's why ModuleSpec.loader_state exists.) This is - # especially true if it avoids throwing away expensive data - # the loader would otherwise duplicate later and can be done - # efficiently. In this case it isn't worth it. - _, ispkg, origname = info - spec = spec_from_loader(fullname, cls, - origin=cls._ORIGIN, - is_package=ispkg) - filename, pkgdir = cls._resolve_filename(origname, fullname, ispkg) - spec.loader_state = type(sys.implementation)( - filename=filename, - origname=origname, - ) - if pkgdir: - spec.submodule_search_locations.insert(0, pkgdir) - return spec - - @staticmethod - def create_module(spec): - """Set __file__, if able.""" - module = _new_module(spec.name) - try: - filename = spec.loader_state.filename - except AttributeError: - pass - else: - if filename: - module.__file__ = filename - return module - - @staticmethod - def exec_module(module): - spec = module.__spec__ - name = spec.name - code = _call_with_frames_removed(_imp.get_frozen_object, name) - exec(code, module.__dict__) - - @classmethod - def load_module(cls, fullname): - """Load a frozen module. - - This method is deprecated. Use exec_module() instead. - - """ - # Warning about deprecation implemented in _load_module_shim(). - module = _load_module_shim(cls, fullname) - info = _imp.find_frozen(fullname) - assert info is not None - _, ispkg, origname = info - module.__origname__ = origname - vars(module).pop('__file__', None) - if ispkg: - module.__path__ = [] - cls._fix_up_module(module) - return module - - @classmethod - @_requires_frozen - def get_code(cls, fullname): - """Return the code object for the frozen module.""" - return _imp.get_frozen_object(fullname) - - @classmethod - @_requires_frozen - def get_source(cls, fullname): - """Return None as frozen modules do not have source code.""" - return None - - @classmethod - @_requires_frozen - def is_package(cls, fullname): - """Return True if the frozen module is a package.""" - return _imp.is_frozen_package(fullname) - - -# Import itself ############################################################### - -class _ImportLockContext: - - """Context manager for the import lock.""" - - def __enter__(self): - """Acquire the import lock.""" - _imp.acquire_lock() - - def __exit__(self, exc_type, exc_value, exc_traceback): - """Release the import lock regardless of any raised exceptions.""" - _imp.release_lock() - - -def _resolve_name(name, package, level): - """Resolve a relative module name to an absolute one.""" - bits = package.rsplit('.', level - 1) - if len(bits) < level: - raise ImportError('attempted relative import beyond top-level package') - base = bits[0] - return f'{base}.{name}' if name else base - - -def _find_spec(name, path, target=None): - """Find a module's spec.""" - meta_path = sys.meta_path - if meta_path is None: - # PyImport_Cleanup() is running or has been called. - raise ImportError("sys.meta_path is None, Python is likely " - "shutting down") - - if not meta_path: - _warnings.warn('sys.meta_path is empty', ImportWarning) - - # We check sys.modules here for the reload case. While a passed-in - # target will usually indicate a reload there is no guarantee, whereas - # sys.modules provides one. - is_reload = name in sys.modules - for finder in meta_path: - with _ImportLockContext(): - try: - find_spec = finder.find_spec - except AttributeError: - continue - else: - spec = find_spec(name, path, target) - if spec is not None: - # The parent import may have already imported this module. - if not is_reload and name in sys.modules: - module = sys.modules[name] - try: - __spec__ = module.__spec__ - except AttributeError: - # We use the found spec since that is the one that - # we would have used if the parent module hadn't - # beaten us to the punch. - return spec - else: - if __spec__ is None: - return spec - else: - return __spec__ - else: - return spec - else: - return None - - -def _sanity_check(name, package, level): - """Verify arguments are "sane".""" - if not isinstance(name, str): - raise TypeError(f'module name must be str, not {type(name)}') - if level < 0: - raise ValueError('level must be >= 0') - if level > 0: - if not isinstance(package, str): - raise TypeError('__package__ not set to a string') - elif not package: - raise ImportError('attempted relative import with no known parent ' - 'package') - if not name and level == 0: - raise ValueError('Empty module name') - - -_ERR_MSG_PREFIX = 'No module named ' -_ERR_MSG = _ERR_MSG_PREFIX + '{!r}' - -def _find_and_load_unlocked(name, import_): - path = None - parent = name.rpartition('.')[0] - parent_spec = None - if parent: - if parent not in sys.modules: - _call_with_frames_removed(import_, parent) - # Crazy side-effects! - if name in sys.modules: - return sys.modules[name] - parent_module = sys.modules[parent] - try: - path = parent_module.__path__ - except AttributeError: - msg = f'{_ERR_MSG_PREFIX}{name!r}; {parent!r} is not a package' - raise ModuleNotFoundError(msg, name=name) from None - parent_spec = parent_module.__spec__ - child = name.rpartition('.')[2] - spec = _find_spec(name, path) - if spec is None: - raise ModuleNotFoundError(f'{_ERR_MSG_PREFIX}{name!r}', name=name) - else: - if parent_spec: - # Temporarily add child we are currently importing to parent's - # _uninitialized_submodules for circular import tracking. - parent_spec._uninitialized_submodules.append(child) - try: - module = _load_unlocked(spec) - finally: - if parent_spec: - parent_spec._uninitialized_submodules.pop() - if parent: - # Set the module as an attribute on its parent. - parent_module = sys.modules[parent] - try: - setattr(parent_module, child, module) - except AttributeError: - msg = f"Cannot set an attribute on {parent!r} for child module {child!r}" - _warnings.warn(msg, ImportWarning) - return module - - -_NEEDS_LOADING = object() - - -def _find_and_load(name, import_): - """Find and load the module.""" - - # Optimization: we avoid unneeded module locking if the module - # already exists in sys.modules and is fully initialized. - module = sys.modules.get(name, _NEEDS_LOADING) - if (module is _NEEDS_LOADING or - getattr(getattr(module, "__spec__", None), "_initializing", False)): - with _ModuleLockManager(name): - module = sys.modules.get(name, _NEEDS_LOADING) - if module is _NEEDS_LOADING: - return _find_and_load_unlocked(name, import_) - - # Optimization: only call _bootstrap._lock_unlock_module() if - # module.__spec__._initializing is True. - # NOTE: because of this, initializing must be set *before* - # putting the new module in sys.modules. - _lock_unlock_module(name) - else: - # Verify the module is still in sys.modules. Another thread may have - # removed it (due to import failure) between our sys.modules.get() - # above and the _initializing check. If removed, we retry the import - # to preserve normal semantics: the caller gets the exception from - # the actual import failure rather than a synthetic error. - if sys.modules.get(name) is not module: - return _find_and_load(name, import_) - - if module is None: - message = f'import of {name} halted; None in sys.modules' - raise ModuleNotFoundError(message, name=name) - - return module - - -def _gcd_import(name, package=None, level=0): - """Import and return the module based on its name, the package the call is - being made from, and the level adjustment. - - This function represents the greatest common denominator of functionality - between import_module and __import__. This includes setting __package__ if - the loader did not. - - """ - _sanity_check(name, package, level) - if level > 0: - name = _resolve_name(name, package, level) - return _find_and_load(name, _gcd_import) - - -def _handle_fromlist(module, fromlist, import_, *, recursive=False): - """Figure out what __import__ should return. - - The import_ parameter is a callable which takes the name of module to - import. It is required to decouple the function from assuming importlib's - import implementation is desired. - - """ - # The hell that is fromlist ... - # If a package was imported, try to import stuff from fromlist. - for x in fromlist: - if not isinstance(x, str): - if recursive: - where = module.__name__ + '.__all__' - else: - where = "``from list''" - raise TypeError(f"Item in {where} must be str, " - f"not {type(x).__name__}") - elif x == '*': - if not recursive and hasattr(module, '__all__'): - _handle_fromlist(module, module.__all__, import_, - recursive=True) - elif not hasattr(module, x): - from_name = f'{module.__name__}.{x}' - try: - _call_with_frames_removed(import_, from_name) - except ModuleNotFoundError as exc: - # Backwards-compatibility dictates we ignore failed - # imports triggered by fromlist for modules that don't - # exist. - if (exc.name == from_name and - sys.modules.get(from_name, _NEEDS_LOADING) is not None): - continue - raise - return module - - -def _calc___package__(globals): - """Calculate what __package__ should be. - - __package__ is not guaranteed to be defined or could be set to None - to represent that its proper value is unknown. - - """ - package = globals.get('__package__') - spec = globals.get('__spec__') - if package is not None: - if spec is not None and package != spec.parent: - _warnings.warn("__package__ != __spec__.parent " - f"({package!r} != {spec.parent!r})", - DeprecationWarning, stacklevel=3) - return package - elif spec is not None: - return spec.parent - else: - _warnings.warn("can't resolve package from __spec__ or __package__, " - "falling back on __name__ and __path__", - ImportWarning, stacklevel=3) - package = globals['__name__'] - if '__path__' not in globals: - package = package.rpartition('.')[0] - return package - - -def __import__(name, globals=None, locals=None, fromlist=(), level=0): - """Import a module. - - The 'globals' argument is used to infer where the import is occurring from - to handle relative imports. The 'locals' argument is ignored. The - 'fromlist' argument specifies what should exist as attributes on the module - being imported (e.g. ``from module import ``). The 'level' - argument represents the package location to import from in a relative - import (e.g. ``from ..pkg import mod`` would have a 'level' of 2). - - """ - if level == 0: - module = _gcd_import(name) - else: - globals_ = globals if globals is not None else {} - package = _calc___package__(globals_) - module = _gcd_import(name, package, level) - if not fromlist: - # Return up to the first dot in 'name'. This is complicated by the fact - # that 'name' may be relative. - if level == 0: - return _gcd_import(name.partition('.')[0]) - elif not name: - return module - else: - # Figure out where to slice the module's name up to the first dot - # in 'name'. - cut_off = len(name) - len(name.partition('.')[0]) - # Slice end needs to be positive to alleviate need to special-case - # when ``'.' not in name``. - return sys.modules[module.__name__[:len(module.__name__)-cut_off]] - elif hasattr(module, '__path__'): - return _handle_fromlist(module, fromlist, _gcd_import) - else: - return module - - -def _builtin_from_name(name): - spec = BuiltinImporter.find_spec(name) - if spec is None: - raise ImportError('no built-in module named ' + name) - return _load_unlocked(spec) - - -def _setup(sys_module, _imp_module): - """Setup importlib by importing needed built-in modules and injecting them - into the global namespace. - - As sys is needed for sys.modules access and _imp is needed to load built-in - modules, those two modules must be explicitly passed in. - - """ - global _imp, sys, _blocking_on - _imp = _imp_module - sys = sys_module - - # Set up the spec for existing builtin/frozen modules. - module_type = type(sys) - for name, module in sys.modules.items(): - if isinstance(module, module_type): - if name in sys.builtin_module_names: - loader = BuiltinImporter - elif _imp.is_frozen(name): - loader = FrozenImporter - else: - continue - spec = _spec_from_module(module, loader) - _init_module_attrs(spec, module) - if loader is FrozenImporter: - loader._fix_up_module(module) - - # Directly load built-in modules needed during bootstrap. - self_module = sys.modules[__name__] - for builtin_name in ('_thread', '_warnings', '_weakref'): - if builtin_name not in sys.modules: - builtin_module = _builtin_from_name(builtin_name) - else: - builtin_module = sys.modules[builtin_name] - setattr(self_module, builtin_name, builtin_module) - - # Instantiation requires _weakref to have been set. - _blocking_on = _WeakValueDictionary() - - -def _install(sys_module, _imp_module): - """Install importers for builtin and frozen modules""" - _setup(sys_module, _imp_module) - - sys.meta_path.append(BuiltinImporter) - sys.meta_path.append(FrozenImporter) - - -def _install_external_importers(): - """Install importers that require external filesystem access""" - global _bootstrap_external - import _frozen_importlib_external - _bootstrap_external = _frozen_importlib_external - _frozen_importlib_external._install(sys.modules[__name__]) diff --git a/Python313_13_x86_Template/Lib/importlib/_bootstrap_external.py b/Python313_13_x86_Template/Lib/importlib/_bootstrap_external.py deleted file mode 100644 index 0741f62e..00000000 --- a/Python313_13_x86_Template/Lib/importlib/_bootstrap_external.py +++ /dev/null @@ -1,1826 +0,0 @@ -"""Core implementation of path-based import. - -This module is NOT meant to be directly imported! It has been designed such -that it can be bootstrapped into Python as the implementation of import. As -such it requires the injection of specific modules and attributes in order to -work. One should use importlib as the public-facing version of this module. - -""" -# IMPORTANT: Whenever making changes to this module, be sure to run a top-level -# `make regen-importlib` followed by `make` in order to get the frozen version -# of the module updated. Not doing so will result in the Makefile to fail for -# all others who don't have a ./python around to freeze the module in the early -# stages of compilation. -# - -# See importlib._setup() for what is injected into the global namespace. - -# When editing this code be aware that code executed at import time CANNOT -# reference any injected objects! This includes not only global code but also -# anything specified at the class level. - -# Module injected manually by _set_bootstrap_module() -_bootstrap = None - -# Import builtin modules -import _imp -import _io -import sys -import _warnings -import marshal - - -_MS_WINDOWS = (sys.platform == 'win32') -if _MS_WINDOWS: - import nt as _os - import winreg -else: - import posix as _os - - -if _MS_WINDOWS: - path_separators = ['\\', '/'] -else: - path_separators = ['/'] -# Assumption made in _path_join() -assert all(len(sep) == 1 for sep in path_separators) -path_sep = path_separators[0] -path_sep_tuple = tuple(path_separators) -path_separators = ''.join(path_separators) -_pathseps_with_colon = {f':{s}' for s in path_separators} - - -# Bootstrap-related code ###################################################### -_CASE_INSENSITIVE_PLATFORMS_STR_KEY = 'win', -_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY = 'cygwin', 'darwin', 'ios', 'tvos', 'watchos' -_CASE_INSENSITIVE_PLATFORMS = (_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY - + _CASE_INSENSITIVE_PLATFORMS_STR_KEY) - - -def _make_relax_case(): - if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS): - if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS_STR_KEY): - key = 'PYTHONCASEOK' - else: - key = b'PYTHONCASEOK' - - def _relax_case(): - """True if filenames must be checked case-insensitively and ignore environment flags are not set.""" - return not sys.flags.ignore_environment and key in _os.environ - else: - def _relax_case(): - """True if filenames must be checked case-insensitively.""" - return False - return _relax_case - -_relax_case = _make_relax_case() - - -def _pack_uint32(x): - """Convert a 32-bit integer to little-endian.""" - return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little') - - -def _unpack_uint64(data): - """Convert 8 bytes in little-endian to an integer.""" - assert len(data) == 8 - return int.from_bytes(data, 'little') - -def _unpack_uint32(data): - """Convert 4 bytes in little-endian to an integer.""" - assert len(data) == 4 - return int.from_bytes(data, 'little') - -def _unpack_uint16(data): - """Convert 2 bytes in little-endian to an integer.""" - assert len(data) == 2 - return int.from_bytes(data, 'little') - - -if _MS_WINDOWS: - def _path_join(*path_parts): - """Replacement for os.path.join().""" - if not path_parts: - return "" - if len(path_parts) == 1: - return path_parts[0] - root = "" - path = [] - for new_root, tail in map(_os._path_splitroot, path_parts): - if new_root.startswith(path_sep_tuple) or new_root.endswith(path_sep_tuple): - root = new_root.rstrip(path_separators) or root - path = [path_sep + tail] - elif new_root.endswith(':'): - if root.casefold() != new_root.casefold(): - # Drive relative paths have to be resolved by the OS, so we reset the - # tail but do not add a path_sep prefix. - root = new_root - path = [tail] - else: - path.append(tail) - else: - root = new_root or root - path.append(tail) - path = [p.rstrip(path_separators) for p in path if p] - if len(path) == 1 and not path[0]: - # Avoid losing the root's trailing separator when joining with nothing - return root + path_sep - return root + path_sep.join(path) - -else: - def _path_join(*path_parts): - """Replacement for os.path.join().""" - return path_sep.join([part.rstrip(path_separators) - for part in path_parts if part]) - - -def _path_split(path): - """Replacement for os.path.split().""" - i = max(path.rfind(p) for p in path_separators) - if i < 0: - return '', path - return path[:i], path[i + 1:] - - -def _path_stat(path): - """Stat the path. - - Made a separate function to make it easier to override in experiments - (e.g. cache stat results). - - """ - return _os.stat(path) - - -def _path_is_mode_type(path, mode): - """Test whether the path is the specified mode type.""" - try: - stat_info = _path_stat(path) - except OSError: - return False - return (stat_info.st_mode & 0o170000) == mode - - -def _path_isfile(path): - """Replacement for os.path.isfile.""" - return _path_is_mode_type(path, 0o100000) - - -def _path_isdir(path): - """Replacement for os.path.isdir.""" - if not path: - path = _os.getcwd() - return _path_is_mode_type(path, 0o040000) - - -if _MS_WINDOWS: - def _path_isabs(path): - """Replacement for os.path.isabs.""" - if not path: - return False - root = _os._path_splitroot(path)[0].replace('/', '\\') - return len(root) > 1 and (root.startswith('\\\\') or root.endswith('\\')) - -else: - def _path_isabs(path): - """Replacement for os.path.isabs.""" - return path.startswith(path_separators) - - -def _path_abspath(path): - """Replacement for os.path.abspath.""" - if not _path_isabs(path): - for sep in path_separators: - path = path.removeprefix(f".{sep}") - return _path_join(_os.getcwd(), path) - else: - return path - - -def _write_atomic(path, data, mode=0o666): - """Best-effort function to write data to a path atomically. - Be prepared to handle a FileExistsError if concurrent writing of the - temporary file is attempted.""" - # id() is used to generate a pseudo-random filename. - path_tmp = f'{path}.{id(path)}' - fd = _os.open(path_tmp, - _os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666) - try: - # We first write data to a temporary file, and then use os.replace() to - # perform an atomic rename. - with _io.open(fd, 'wb') as file: - file.write(data) - _os.replace(path_tmp, path) - except OSError: - try: - _os.unlink(path_tmp) - except OSError: - pass - raise - - -_code_type = type(_write_atomic.__code__) - - -# Finder/loader utility code ############################################### - -# Magic word to reject .pyc files generated by other Python versions. -# It should change for each incompatible change to the bytecode. -# -# The value of CR and LF is incorporated so if you ever read or write -# a .pyc file in text mode the magic number will be wrong; also, the -# Apple MPW compiler swaps their values, botching string constants. -# -# There were a variety of old schemes for setting the magic number. -# The current working scheme is to increment the previous value by -# 10. -# -# Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic -# number also includes a new "magic tag", i.e. a human readable string used -# to represent the magic number in __pycache__ directories. When you change -# the magic number, you must also set a new unique magic tag. Generally this -# can be named after the Python major version of the magic number bump, but -# it can really be anything, as long as it's different than anything else -# that's come before. The tags are included in the following table, starting -# with Python 3.2a0. -# -# Known values: -# Python 1.5: 20121 -# Python 1.5.1: 20121 -# Python 1.5.2: 20121 -# Python 1.6: 50428 -# Python 2.0: 50823 -# Python 2.0.1: 50823 -# Python 2.1: 60202 -# Python 2.1.1: 60202 -# Python 2.1.2: 60202 -# Python 2.2: 60717 -# Python 2.3a0: 62011 -# Python 2.3a0: 62021 -# Python 2.3a0: 62011 (!) -# Python 2.4a0: 62041 -# Python 2.4a3: 62051 -# Python 2.4b1: 62061 -# Python 2.5a0: 62071 -# Python 2.5a0: 62081 (ast-branch) -# Python 2.5a0: 62091 (with) -# Python 2.5a0: 62092 (changed WITH_CLEANUP opcode) -# Python 2.5b3: 62101 (fix wrong code: for x, in ...) -# Python 2.5b3: 62111 (fix wrong code: x += yield) -# Python 2.5c1: 62121 (fix wrong lnotab with for loops and -# storing constants that should have been removed) -# Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp) -# Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode) -# Python 2.6a1: 62161 (WITH_CLEANUP optimization) -# Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND) -# Python 2.7a0: 62181 (optimize conditional branches: -# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE) -# Python 2.7a0 62191 (introduce SETUP_WITH) -# Python 2.7a0 62201 (introduce BUILD_SET) -# Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD) -# Python 3000: 3000 -# 3010 (removed UNARY_CONVERT) -# 3020 (added BUILD_SET) -# 3030 (added keyword-only parameters) -# 3040 (added signature annotations) -# 3050 (print becomes a function) -# 3060 (PEP 3115 metaclass syntax) -# 3061 (string literals become unicode) -# 3071 (PEP 3109 raise changes) -# 3081 (PEP 3137 make __file__ and __name__ unicode) -# 3091 (kill str8 interning) -# 3101 (merge from 2.6a0, see 62151) -# 3103 (__file__ points to source file) -# Python 3.0a4: 3111 (WITH_CLEANUP optimization). -# Python 3.0b1: 3131 (lexical exception stacking, including POP_EXCEPT - #3021) -# Python 3.1a1: 3141 (optimize list, set and dict comprehensions: -# change LIST_APPEND and SET_ADD, add MAP_ADD #2183) -# Python 3.1a1: 3151 (optimize conditional branches: -# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE - #4715) -# Python 3.2a1: 3160 (add SETUP_WITH #6101) -# tag: cpython-32 -# Python 3.2a2: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR #9225) -# tag: cpython-32 -# Python 3.2a3 3180 (add DELETE_DEREF #4617) -# Python 3.3a1 3190 (__class__ super closure changed) -# Python 3.3a1 3200 (PEP 3155 __qualname__ added #13448) -# Python 3.3a1 3210 (added size modulo 2**32 to the pyc header #13645) -# Python 3.3a2 3220 (changed PEP 380 implementation #14230) -# Python 3.3a4 3230 (revert changes to implicit __class__ closure #14857) -# Python 3.4a1 3250 (evaluate positional default arguments before -# keyword-only defaults #16967) -# Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override -# free vars #17853) -# Python 3.4a1 3270 (various tweaks to the __class__ closure #12370) -# Python 3.4a1 3280 (remove implicit class argument) -# Python 3.4a4 3290 (changes to __qualname__ computation #19301) -# Python 3.4a4 3300 (more changes to __qualname__ computation #19301) -# Python 3.4rc2 3310 (alter __qualname__ computation #20625) -# Python 3.5a1 3320 (PEP 465: Matrix multiplication operator #21176) -# Python 3.5b1 3330 (PEP 448: Additional Unpacking Generalizations #2292) -# Python 3.5b2 3340 (fix dictionary display evaluation order #11205) -# Python 3.5b3 3350 (add GET_YIELD_FROM_ITER opcode #24400) -# Python 3.5.2 3351 (fix BUILD_MAP_UNPACK_WITH_CALL opcode #27286) -# Python 3.6a0 3360 (add FORMAT_VALUE opcode #25483) -# Python 3.6a1 3361 (lineno delta of code.co_lnotab becomes signed #26107) -# Python 3.6a2 3370 (16 bit wordcode #26647) -# Python 3.6a2 3371 (add BUILD_CONST_KEY_MAP opcode #27140) -# Python 3.6a2 3372 (MAKE_FUNCTION simplification, remove MAKE_CLOSURE -# #27095) -# Python 3.6b1 3373 (add BUILD_STRING opcode #27078) -# Python 3.6b1 3375 (add SETUP_ANNOTATIONS and STORE_ANNOTATION opcodes -# #27985) -# Python 3.6b1 3376 (simplify CALL_FUNCTIONs & BUILD_MAP_UNPACK_WITH_CALL - #27213) -# Python 3.6b1 3377 (set __class__ cell from type.__new__ #23722) -# Python 3.6b2 3378 (add BUILD_TUPLE_UNPACK_WITH_CALL #28257) -# Python 3.6rc1 3379 (more thorough __class__ validation #23722) -# Python 3.7a1 3390 (add LOAD_METHOD and CALL_METHOD opcodes #26110) -# Python 3.7a2 3391 (update GET_AITER #31709) -# Python 3.7a4 3392 (PEP 552: Deterministic pycs #31650) -# Python 3.7b1 3393 (remove STORE_ANNOTATION opcode #32550) -# Python 3.7b5 3394 (restored docstring as the first stmt in the body; -# this might affected the first line number #32911) -# Python 3.8a1 3400 (move frame block handling to compiler #17611) -# Python 3.8a1 3401 (add END_ASYNC_FOR #33041) -# Python 3.8a1 3410 (PEP570 Python Positional-Only Parameters #36540) -# Python 3.8b2 3411 (Reverse evaluation order of key: value in dict -# comprehensions #35224) -# Python 3.8b2 3412 (Swap the position of positional args and positional -# only args in ast.arguments #37593) -# Python 3.8b4 3413 (Fix "break" and "continue" in "finally" #37830) -# Python 3.9a0 3420 (add LOAD_ASSERTION_ERROR #34880) -# Python 3.9a0 3421 (simplified bytecode for with blocks #32949) -# Python 3.9a0 3422 (remove BEGIN_FINALLY, END_FINALLY, CALL_FINALLY, POP_FINALLY bytecodes #33387) -# Python 3.9a2 3423 (add IS_OP, CONTAINS_OP and JUMP_IF_NOT_EXC_MATCH bytecodes #39156) -# Python 3.9a2 3424 (simplify bytecodes for *value unpacking) -# Python 3.9a2 3425 (simplify bytecodes for **value unpacking) -# Python 3.10a1 3430 (Make 'annotations' future by default) -# Python 3.10a1 3431 (New line number table format -- PEP 626) -# Python 3.10a2 3432 (Function annotation for MAKE_FUNCTION is changed from dict to tuple bpo-42202) -# Python 3.10a2 3433 (RERAISE restores f_lasti if oparg != 0) -# Python 3.10a6 3434 (PEP 634: Structural Pattern Matching) -# Python 3.10a7 3435 Use instruction offsets (as opposed to byte offsets). -# Python 3.10b1 3436 (Add GEN_START bytecode #43683) -# Python 3.10b1 3437 (Undo making 'annotations' future by default - We like to dance among core devs!) -# Python 3.10b1 3438 Safer line number table handling. -# Python 3.10b1 3439 (Add ROT_N) -# Python 3.11a1 3450 Use exception table for unwinding ("zero cost" exception handling) -# Python 3.11a1 3451 (Add CALL_METHOD_KW) -# Python 3.11a1 3452 (drop nlocals from marshaled code objects) -# Python 3.11a1 3453 (add co_fastlocalnames and co_fastlocalkinds) -# Python 3.11a1 3454 (compute cell offsets relative to locals bpo-43693) -# Python 3.11a1 3455 (add MAKE_CELL bpo-43693) -# Python 3.11a1 3456 (interleave cell args bpo-43693) -# Python 3.11a1 3457 (Change localsplus to a bytes object bpo-43693) -# Python 3.11a1 3458 (imported objects now don't use LOAD_METHOD/CALL_METHOD) -# Python 3.11a1 3459 (PEP 657: add end line numbers and column offsets for instructions) -# Python 3.11a1 3460 (Add co_qualname field to PyCodeObject bpo-44530) -# Python 3.11a1 3461 (JUMP_ABSOLUTE must jump backwards) -# Python 3.11a2 3462 (bpo-44511: remove COPY_DICT_WITHOUT_KEYS, change -# MATCH_CLASS and MATCH_KEYS, and add COPY) -# Python 3.11a3 3463 (bpo-45711: JUMP_IF_NOT_EXC_MATCH no longer pops the -# active exception) -# Python 3.11a3 3464 (bpo-45636: Merge numeric BINARY_*/INPLACE_* into -# BINARY_OP) -# Python 3.11a3 3465 (Add COPY_FREE_VARS opcode) -# Python 3.11a4 3466 (bpo-45292: PEP-654 except*) -# Python 3.11a4 3467 (Change CALL_xxx opcodes) -# Python 3.11a4 3468 (Add SEND opcode) -# Python 3.11a4 3469 (bpo-45711: remove type, traceback from exc_info) -# Python 3.11a4 3470 (bpo-46221: PREP_RERAISE_STAR no longer pushes lasti) -# Python 3.11a4 3471 (bpo-46202: remove pop POP_EXCEPT_AND_RERAISE) -# Python 3.11a4 3472 (bpo-46009: replace GEN_START with POP_TOP) -# Python 3.11a4 3473 (Add POP_JUMP_IF_NOT_NONE/POP_JUMP_IF_NONE opcodes) -# Python 3.11a4 3474 (Add RESUME opcode) -# Python 3.11a5 3475 (Add RETURN_GENERATOR opcode) -# Python 3.11a5 3476 (Add ASYNC_GEN_WRAP opcode) -# Python 3.11a5 3477 (Replace DUP_TOP/DUP_TOP_TWO with COPY and -# ROT_TWO/ROT_THREE/ROT_FOUR/ROT_N with SWAP) -# Python 3.11a5 3478 (New CALL opcodes) -# Python 3.11a5 3479 (Add PUSH_NULL opcode) -# Python 3.11a5 3480 (New CALL opcodes, second iteration) -# Python 3.11a5 3481 (Use inline cache for BINARY_OP) -# Python 3.11a5 3482 (Use inline caching for UNPACK_SEQUENCE and LOAD_GLOBAL) -# Python 3.11a5 3483 (Use inline caching for COMPARE_OP and BINARY_SUBSCR) -# Python 3.11a5 3484 (Use inline caching for LOAD_ATTR, LOAD_METHOD, and -# STORE_ATTR) -# Python 3.11a5 3485 (Add an oparg to GET_AWAITABLE) -# Python 3.11a6 3486 (Use inline caching for PRECALL and CALL) -# Python 3.11a6 3487 (Remove the adaptive "oparg counter" mechanism) -# Python 3.11a6 3488 (LOAD_GLOBAL can push additional NULL) -# Python 3.11a6 3489 (Add JUMP_BACKWARD, remove JUMP_ABSOLUTE) -# Python 3.11a6 3490 (remove JUMP_IF_NOT_EXC_MATCH, add CHECK_EXC_MATCH) -# Python 3.11a6 3491 (remove JUMP_IF_NOT_EG_MATCH, add CHECK_EG_MATCH, -# add JUMP_BACKWARD_NO_INTERRUPT, make JUMP_NO_INTERRUPT virtual) -# Python 3.11a7 3492 (make POP_JUMP_IF_NONE/NOT_NONE/TRUE/FALSE relative) -# Python 3.11a7 3493 (Make JUMP_IF_TRUE_OR_POP/JUMP_IF_FALSE_OR_POP relative) -# Python 3.11a7 3494 (New location info table) -# Python 3.11b4 3495 (Set line number of module's RESUME instr to 0 per PEP 626) -# Python 3.12a1 3500 (Remove PRECALL opcode) -# Python 3.12a1 3501 (YIELD_VALUE oparg == stack_depth) -# Python 3.12a1 3502 (LOAD_FAST_CHECK, no NULL-check in LOAD_FAST) -# Python 3.12a1 3503 (Shrink LOAD_METHOD cache) -# Python 3.12a1 3504 (Merge LOAD_METHOD back into LOAD_ATTR) -# Python 3.12a1 3505 (Specialization/Cache for FOR_ITER) -# Python 3.12a1 3506 (Add BINARY_SLICE and STORE_SLICE instructions) -# Python 3.12a1 3507 (Set lineno of module's RESUME to 0) -# Python 3.12a1 3508 (Add CLEANUP_THROW) -# Python 3.12a1 3509 (Conditional jumps only jump forward) -# Python 3.12a2 3510 (FOR_ITER leaves iterator on the stack) -# Python 3.12a2 3511 (Add STOPITERATION_ERROR instruction) -# Python 3.12a2 3512 (Remove all unused consts from code objects) -# Python 3.12a4 3513 (Add CALL_INTRINSIC_1 instruction, removed STOPITERATION_ERROR, PRINT_EXPR, IMPORT_STAR) -# Python 3.12a4 3514 (Remove ASYNC_GEN_WRAP, LIST_TO_TUPLE, and UNARY_POSITIVE) -# Python 3.12a5 3515 (Embed jump mask in COMPARE_OP oparg) -# Python 3.12a5 3516 (Add COMPARE_AND_BRANCH instruction) -# Python 3.12a5 3517 (Change YIELD_VALUE oparg to exception block depth) -# Python 3.12a6 3518 (Add RETURN_CONST instruction) -# Python 3.12a6 3519 (Modify SEND instruction) -# Python 3.12a6 3520 (Remove PREP_RERAISE_STAR, add CALL_INTRINSIC_2) -# Python 3.12a7 3521 (Shrink the LOAD_GLOBAL caches) -# Python 3.12a7 3522 (Removed JUMP_IF_FALSE_OR_POP/JUMP_IF_TRUE_OR_POP) -# Python 3.12a7 3523 (Convert COMPARE_AND_BRANCH back to COMPARE_OP) -# Python 3.12a7 3524 (Shrink the BINARY_SUBSCR caches) -# Python 3.12b1 3525 (Shrink the CALL caches) -# Python 3.12b1 3526 (Add instrumentation support) -# Python 3.12b1 3527 (Add LOAD_SUPER_ATTR) -# Python 3.12b1 3528 (Add LOAD_SUPER_ATTR_METHOD specialization) -# Python 3.12b1 3529 (Inline list/dict/set comprehensions) -# Python 3.12b1 3530 (Shrink the LOAD_SUPER_ATTR caches) -# Python 3.12b1 3531 (Add PEP 695 changes) -# Python 3.13a1 3550 (Plugin optimizer support) -# Python 3.13a1 3551 (Compact superinstructions) -# Python 3.13a1 3552 (Remove LOAD_FAST__LOAD_CONST and LOAD_CONST__LOAD_FAST) -# Python 3.13a1 3553 (Add SET_FUNCTION_ATTRIBUTE) -# Python 3.13a1 3554 (more efficient bytecodes for f-strings) -# Python 3.13a1 3555 (generate specialized opcodes metadata from bytecodes.c) -# Python 3.13a1 3556 (Convert LOAD_CLOSURE to a pseudo-op) -# Python 3.13a1 3557 (Make the conversion to boolean in jumps explicit) -# Python 3.13a1 3558 (Reorder the stack items for CALL) -# Python 3.13a1 3559 (Generate opcode IDs from bytecodes.c) -# Python 3.13a1 3560 (Add RESUME_CHECK instruction) -# Python 3.13a1 3561 (Add cache entry to branch instructions) -# Python 3.13a1 3562 (Assign opcode IDs for internal ops in separate range) -# Python 3.13a1 3563 (Add CALL_KW and remove KW_NAMES) -# Python 3.13a1 3564 (Removed oparg from YIELD_VALUE, changed oparg values of RESUME) -# Python 3.13a1 3565 (Oparg of YIELD_VALUE indicates whether it is in a yield-from) -# Python 3.13a1 3566 (Emit JUMP_NO_INTERRUPT instead of JUMP for non-loop no-lineno cases) -# Python 3.13a1 3567 (Reimplement line number propagation by the compiler) -# Python 3.13a1 3568 (Change semantics of END_FOR) -# Python 3.13a5 3569 (Specialize CONTAINS_OP) -# Python 3.13a6 3570 (Add __firstlineno__ class attribute) -# Python 3.13b1 3571 (Fix miscompilation of private names in generic classes) - -# Python 3.14 will start with 3600 - -# Please don't copy-paste the same pre-release tag for new entries above!!! -# You should always use the *upcoming* tag. For example, if 3.12a6 came out -# a week ago, I should put "Python 3.12a7" next to my new magic number. - -# MAGIC must change whenever the bytecode emitted by the compiler may no -# longer be understood by older implementations of the eval loop (usually -# due to the addition of new opcodes). -# -# Starting with Python 3.11, Python 3.n starts with magic number 2900+50n. -# -# Whenever MAGIC_NUMBER is changed, the ranges in the magic_values array -# in PC/launcher.c must also be updated. - -MAGIC_NUMBER = (3571).to_bytes(2, 'little') + b'\r\n' - -_RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c - -_PYCACHE = '__pycache__' -_OPT = 'opt-' - -SOURCE_SUFFIXES = ['.py'] -if _MS_WINDOWS: - SOURCE_SUFFIXES.append('.pyw') - -EXTENSION_SUFFIXES = _imp.extension_suffixes() - -BYTECODE_SUFFIXES = ['.pyc'] -# Deprecated. -DEBUG_BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES = BYTECODE_SUFFIXES - -def cache_from_source(path, debug_override=None, *, optimization=None): - """Given the path to a .py file, return the path to its .pyc file. - - The .py file does not need to exist; this simply returns the path to the - .pyc file calculated as if the .py file were imported. - - The 'optimization' parameter controls the presumed optimization level of - the bytecode file. If 'optimization' is not None, the string representation - of the argument is taken and verified to be alphanumeric (else ValueError - is raised). - - The debug_override parameter is deprecated. If debug_override is not None, - a True value is the same as setting 'optimization' to the empty string - while a False value is equivalent to setting 'optimization' to '1'. - - If sys.implementation.cache_tag is None then NotImplementedError is raised. - - """ - if debug_override is not None: - _warnings.warn('the debug_override parameter is deprecated; use ' - "'optimization' instead", DeprecationWarning) - if optimization is not None: - message = 'debug_override or optimization must be set to None' - raise TypeError(message) - optimization = '' if debug_override else 1 - path = _os.fspath(path) - head, tail = _path_split(path) - base, sep, rest = tail.rpartition('.') - tag = sys.implementation.cache_tag - if tag is None: - raise NotImplementedError('sys.implementation.cache_tag is None') - almost_filename = ''.join([(base if base else rest), sep, tag]) - if optimization is None: - if sys.flags.optimize == 0: - optimization = '' - else: - optimization = sys.flags.optimize - optimization = str(optimization) - if optimization != '': - if not optimization.isalnum(): - raise ValueError(f'{optimization!r} is not alphanumeric') - almost_filename = f'{almost_filename}.{_OPT}{optimization}' - filename = almost_filename + BYTECODE_SUFFIXES[0] - if sys.pycache_prefix is not None: - # We need an absolute path to the py file to avoid the possibility of - # collisions within sys.pycache_prefix, if someone has two different - # `foo/bar.py` on their system and they import both of them using the - # same sys.pycache_prefix. Let's say sys.pycache_prefix is - # `C:\Bytecode`; the idea here is that if we get `Foo\Bar`, we first - # make it absolute (`C:\Somewhere\Foo\Bar`), then make it root-relative - # (`Somewhere\Foo\Bar`), so we end up placing the bytecode file in an - # unambiguous `C:\Bytecode\Somewhere\Foo\Bar\`. - head = _path_abspath(head) - - # Strip initial drive from a Windows path. We know we have an absolute - # path here, so the second part of the check rules out a POSIX path that - # happens to contain a colon at the second character. - # Slicing avoids issues with an empty (or short) `head`. - if head[1:2] == ':' and head[0:1] not in path_separators: - head = head[2:] - - # Strip initial path separator from `head` to complete the conversion - # back to a root-relative path before joining. - return _path_join( - sys.pycache_prefix, - head.lstrip(path_separators), - filename, - ) - return _path_join(head, _PYCACHE, filename) - - -def source_from_cache(path): - """Given the path to a .pyc. file, return the path to its .py file. - - The .pyc file does not need to exist; this simply returns the path to - the .py file calculated to correspond to the .pyc file. If path does - not conform to PEP 3147/488 format, ValueError will be raised. If - sys.implementation.cache_tag is None then NotImplementedError is raised. - - """ - if sys.implementation.cache_tag is None: - raise NotImplementedError('sys.implementation.cache_tag is None') - path = _os.fspath(path) - head, pycache_filename = _path_split(path) - found_in_pycache_prefix = False - if sys.pycache_prefix is not None: - stripped_path = sys.pycache_prefix.rstrip(path_separators) - if head.startswith(stripped_path + path_sep): - head = head[len(stripped_path):] - found_in_pycache_prefix = True - if not found_in_pycache_prefix: - head, pycache = _path_split(head) - if pycache != _PYCACHE: - raise ValueError(f'{_PYCACHE} not bottom-level directory in ' - f'{path!r}') - dot_count = pycache_filename.count('.') - if dot_count not in {2, 3}: - raise ValueError(f'expected only 2 or 3 dots in {pycache_filename!r}') - elif dot_count == 3: - optimization = pycache_filename.rsplit('.', 2)[-2] - if not optimization.startswith(_OPT): - raise ValueError("optimization portion of filename does not start " - f"with {_OPT!r}") - opt_level = optimization[len(_OPT):] - if not opt_level.isalnum(): - raise ValueError(f"optimization level {optimization!r} is not an " - "alphanumeric value") - base_filename = pycache_filename.partition('.')[0] - return _path_join(head, base_filename + SOURCE_SUFFIXES[0]) - - -def _get_sourcefile(bytecode_path): - """Convert a bytecode file path to a source path (if possible). - - This function exists purely for backwards-compatibility for - PyImport_ExecCodeModuleWithFilenames() in the C API. - - """ - if len(bytecode_path) == 0: - return None - rest, _, extension = bytecode_path.rpartition('.') - if not rest or extension.lower()[-3:-1] != 'py': - return bytecode_path - try: - source_path = source_from_cache(bytecode_path) - except (NotImplementedError, ValueError): - source_path = bytecode_path[:-1] - return source_path if _path_isfile(source_path) else bytecode_path - - -def _get_cached(filename): - if filename.endswith(tuple(SOURCE_SUFFIXES)): - try: - return cache_from_source(filename) - except NotImplementedError: - pass - elif filename.endswith(tuple(BYTECODE_SUFFIXES)): - return filename - else: - return None - - -def _calc_mode(path): - """Calculate the mode permissions for a bytecode file.""" - try: - mode = _path_stat(path).st_mode - except OSError: - mode = 0o666 - # We always ensure write access so we can update cached files - # later even when the source files are read-only on Windows (#6074) - mode |= 0o200 - return mode - - -def _check_name(method): - """Decorator to verify that the module being requested matches the one the - loader can handle. - - The first argument (self) must define _name which the second argument is - compared against. If the comparison fails then ImportError is raised. - - """ - def _check_name_wrapper(self, name=None, *args, **kwargs): - if name is None: - name = self.name - elif self.name != name: - raise ImportError('loader for %s cannot handle %s' % - (self.name, name), name=name) - return method(self, name, *args, **kwargs) - - # FIXME: @_check_name is used to define class methods before the - # _bootstrap module is set by _set_bootstrap_module(). - if _bootstrap is not None: - _wrap = _bootstrap._wrap - else: - def _wrap(new, old): - for replace in ['__module__', '__name__', '__qualname__', '__doc__']: - if hasattr(old, replace): - setattr(new, replace, getattr(old, replace)) - new.__dict__.update(old.__dict__) - - _wrap(_check_name_wrapper, method) - return _check_name_wrapper - - -def _classify_pyc(data, name, exc_details): - """Perform basic validity checking of a pyc header and return the flags field, - which determines how the pyc should be further validated against the source. - - *data* is the contents of the pyc file. (Only the first 16 bytes are - required, though.) - - *name* is the name of the module being imported. It is used for logging. - - *exc_details* is a dictionary passed to ImportError if it raised for - improved debugging. - - ImportError is raised when the magic number is incorrect or when the flags - field is invalid. EOFError is raised when the data is found to be truncated. - - """ - magic = data[:4] - if magic != MAGIC_NUMBER: - message = f'bad magic number in {name!r}: {magic!r}' - _bootstrap._verbose_message('{}', message) - raise ImportError(message, **exc_details) - if len(data) < 16: - message = f'reached EOF while reading pyc header of {name!r}' - _bootstrap._verbose_message('{}', message) - raise EOFError(message) - flags = _unpack_uint32(data[4:8]) - # Only the first two flags are defined. - if flags & ~0b11: - message = f'invalid flags {flags!r} in {name!r}' - raise ImportError(message, **exc_details) - return flags - - -def _validate_timestamp_pyc(data, source_mtime, source_size, name, - exc_details): - """Validate a pyc against the source last-modified time. - - *data* is the contents of the pyc file. (Only the first 16 bytes are - required.) - - *source_mtime* is the last modified timestamp of the source file. - - *source_size* is None or the size of the source file in bytes. - - *name* is the name of the module being imported. It is used for logging. - - *exc_details* is a dictionary passed to ImportError if it raised for - improved debugging. - - An ImportError is raised if the bytecode is stale. - - """ - if _unpack_uint32(data[8:12]) != (source_mtime & 0xFFFFFFFF): - message = f'bytecode is stale for {name!r}' - _bootstrap._verbose_message('{}', message) - raise ImportError(message, **exc_details) - if (source_size is not None and - _unpack_uint32(data[12:16]) != (source_size & 0xFFFFFFFF)): - raise ImportError(f'bytecode is stale for {name!r}', **exc_details) - - -def _validate_hash_pyc(data, source_hash, name, exc_details): - """Validate a hash-based pyc by checking the real source hash against the one in - the pyc header. - - *data* is the contents of the pyc file. (Only the first 16 bytes are - required.) - - *source_hash* is the importlib.util.source_hash() of the source file. - - *name* is the name of the module being imported. It is used for logging. - - *exc_details* is a dictionary passed to ImportError if it raised for - improved debugging. - - An ImportError is raised if the bytecode is stale. - - """ - if data[8:16] != source_hash: - raise ImportError( - f'hash in bytecode doesn\'t match hash of source {name!r}', - **exc_details, - ) - - -def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None): - """Compile bytecode as found in a pyc.""" - code = marshal.loads(data) - if isinstance(code, _code_type): - _bootstrap._verbose_message('code object from {!r}', bytecode_path) - if source_path is not None: - _imp._fix_co_filename(code, source_path) - return code - else: - raise ImportError(f'Non-code object in {bytecode_path!r}', - name=name, path=bytecode_path) - - -def _code_to_timestamp_pyc(code, mtime=0, source_size=0): - "Produce the data for a timestamp-based pyc." - data = bytearray(MAGIC_NUMBER) - data.extend(_pack_uint32(0)) - data.extend(_pack_uint32(mtime)) - data.extend(_pack_uint32(source_size)) - data.extend(marshal.dumps(code)) - return data - - -def _code_to_hash_pyc(code, source_hash, checked=True): - "Produce the data for a hash-based pyc." - data = bytearray(MAGIC_NUMBER) - flags = 0b1 | checked << 1 - data.extend(_pack_uint32(flags)) - assert len(source_hash) == 8 - data.extend(source_hash) - data.extend(marshal.dumps(code)) - return data - - -def decode_source(source_bytes): - """Decode bytes representing source code and return the string. - - Universal newline support is used in the decoding. - """ - import tokenize # To avoid bootstrap issues. - source_bytes_readline = _io.BytesIO(source_bytes).readline - encoding = tokenize.detect_encoding(source_bytes_readline) - newline_decoder = _io.IncrementalNewlineDecoder(None, True) - return newline_decoder.decode(source_bytes.decode(encoding[0])) - - -# Module specifications ####################################################### - -_POPULATE = object() - - -def spec_from_file_location(name, location=None, *, loader=None, - submodule_search_locations=_POPULATE): - """Return a module spec based on a file location. - - To indicate that the module is a package, set - submodule_search_locations to a list of directory paths. An - empty list is sufficient, though its not otherwise useful to the - import system. - - The loader must take a spec as its only __init__() arg. - - """ - if location is None: - # The caller may simply want a partially populated location- - # oriented spec. So we set the location to a bogus value and - # fill in as much as we can. - location = '' - if hasattr(loader, 'get_filename'): - # ExecutionLoader - try: - location = loader.get_filename(name) - except ImportError: - pass - else: - location = _os.fspath(location) - try: - location = _path_abspath(location) - except OSError: - pass - - # If the location is on the filesystem, but doesn't actually exist, - # we could return None here, indicating that the location is not - # valid. However, we don't have a good way of testing since an - # indirect location (e.g. a zip file or URL) will look like a - # non-existent file relative to the filesystem. - - spec = _bootstrap.ModuleSpec(name, loader, origin=location) - spec._set_fileattr = True - - # Pick a loader if one wasn't provided. - if loader is None: - for loader_class, suffixes in _get_supported_file_loaders(): - if location.endswith(tuple(suffixes)): - loader = loader_class(name, location) - spec.loader = loader - break - else: - return None - - # Set submodule_search_paths appropriately. - if submodule_search_locations is _POPULATE: - # Check the loader. - if hasattr(loader, 'is_package'): - try: - is_package = loader.is_package(name) - except ImportError: - pass - else: - if is_package: - spec.submodule_search_locations = [] - else: - spec.submodule_search_locations = submodule_search_locations - if spec.submodule_search_locations == []: - if location: - dirname = _path_split(location)[0] - spec.submodule_search_locations.append(dirname) - - return spec - - -def _bless_my_loader(module_globals): - """Helper function for _warnings.c - - See GH#97850 for details. - """ - # 2022-10-06(warsaw): For now, this helper is only used in _warnings.c and - # that use case only has the module globals. This function could be - # extended to accept either that or a module object. However, in the - # latter case, it would be better to raise certain exceptions when looking - # at a module, which should have either a __loader__ or __spec__.loader. - # For backward compatibility, it is possible that we'll get an empty - # dictionary for the module globals, and that cannot raise an exception. - if not isinstance(module_globals, dict): - return None - - missing = object() - loader = module_globals.get('__loader__', None) - spec = module_globals.get('__spec__', missing) - - if loader is None: - if spec is missing: - # If working with a module: - # raise AttributeError('Module globals is missing a __spec__') - return None - elif spec is None: - raise ValueError('Module globals is missing a __spec__.loader') - - spec_loader = getattr(spec, 'loader', missing) - - if spec_loader in (missing, None): - if loader is None: - exc = AttributeError if spec_loader is missing else ValueError - raise exc('Module globals is missing a __spec__.loader') - _warnings.warn( - 'Module globals is missing a __spec__.loader', - DeprecationWarning) - spec_loader = loader - - assert spec_loader is not None - if loader is not None and loader != spec_loader: - _warnings.warn( - 'Module globals; __loader__ != __spec__.loader', - DeprecationWarning) - return loader - - return spec_loader - - -# Loaders ##################################################################### - -class WindowsRegistryFinder: - - """Meta path finder for modules declared in the Windows registry.""" - - REGISTRY_KEY = ( - 'Software\\Python\\PythonCore\\{sys_version}' - '\\Modules\\{fullname}') - REGISTRY_KEY_DEBUG = ( - 'Software\\Python\\PythonCore\\{sys_version}' - '\\Modules\\{fullname}\\Debug') - DEBUG_BUILD = (_MS_WINDOWS and '_d.pyd' in EXTENSION_SUFFIXES) - - @staticmethod - def _open_registry(key): - try: - return winreg.OpenKey(winreg.HKEY_CURRENT_USER, key) - except OSError: - return winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key) - - @classmethod - def _search_registry(cls, fullname): - if cls.DEBUG_BUILD: - registry_key = cls.REGISTRY_KEY_DEBUG - else: - registry_key = cls.REGISTRY_KEY - key = registry_key.format(fullname=fullname, - sys_version='%d.%d' % sys.version_info[:2]) - try: - with cls._open_registry(key) as hkey: - filepath = winreg.QueryValue(hkey, '') - except OSError: - return None - return filepath - - @classmethod - def find_spec(cls, fullname, path=None, target=None): - filepath = cls._search_registry(fullname) - if filepath is None: - return None - try: - _path_stat(filepath) - except OSError: - return None - for loader, suffixes in _get_supported_file_loaders(): - if filepath.endswith(tuple(suffixes)): - spec = _bootstrap.spec_from_loader(fullname, - loader(fullname, filepath), - origin=filepath) - return spec - - -class _LoaderBasics: - - """Base class of common code needed by both SourceLoader and - SourcelessFileLoader.""" - - def is_package(self, fullname): - """Concrete implementation of InspectLoader.is_package by checking if - the path returned by get_filename has a filename of '__init__.py'.""" - filename = _path_split(self.get_filename(fullname))[1] - filename_base = filename.rsplit('.', 1)[0] - tail_name = fullname.rpartition('.')[2] - return filename_base == '__init__' and tail_name != '__init__' - - def create_module(self, spec): - """Use default semantics for module creation.""" - - def exec_module(self, module): - """Execute the module.""" - code = self.get_code(module.__name__) - if code is None: - raise ImportError(f'cannot load module {module.__name__!r} when ' - 'get_code() returns None') - _bootstrap._call_with_frames_removed(exec, code, module.__dict__) - - def load_module(self, fullname): - """This method is deprecated.""" - # Warning implemented in _load_module_shim(). - return _bootstrap._load_module_shim(self, fullname) - - -class SourceLoader(_LoaderBasics): - - def path_mtime(self, path): - """Optional method that returns the modification time (an int) for the - specified path (a str). - - Raises OSError when the path cannot be handled. - """ - raise OSError - - def path_stats(self, path): - """Optional method returning a metadata dict for the specified - path (a str). - - Possible keys: - - 'mtime' (mandatory) is the numeric timestamp of last source - code modification; - - 'size' (optional) is the size in bytes of the source code. - - Implementing this method allows the loader to read bytecode files. - Raises OSError when the path cannot be handled. - """ - return {'mtime': self.path_mtime(path)} - - def _cache_bytecode(self, source_path, cache_path, data): - """Optional method which writes data (bytes) to a file path (a str). - - Implementing this method allows for the writing of bytecode files. - - The source path is needed in order to correctly transfer permissions - """ - # For backwards compatibility, we delegate to set_data() - return self.set_data(cache_path, data) - - def set_data(self, path, data): - """Optional method which writes data (bytes) to a file path (a str). - - Implementing this method allows for the writing of bytecode files. - """ - - - def get_source(self, fullname): - """Concrete implementation of InspectLoader.get_source.""" - path = self.get_filename(fullname) - try: - source_bytes = self.get_data(path) - except OSError as exc: - raise ImportError('source not available through get_data()', - name=fullname) from exc - return decode_source(source_bytes) - - def source_to_code(self, data, path, *, _optimize=-1): - """Return the code object compiled from source. - - The 'data' argument can be any object type that compile() supports. - """ - return _bootstrap._call_with_frames_removed(compile, data, path, 'exec', - dont_inherit=True, optimize=_optimize) - - def get_code(self, fullname): - """Concrete implementation of InspectLoader.get_code. - - Reading of bytecode requires path_stats to be implemented. To write - bytecode, set_data must also be implemented. - - """ - source_path = self.get_filename(fullname) - source_mtime = None - source_bytes = None - source_hash = None - hash_based = False - check_source = True - try: - bytecode_path = cache_from_source(source_path) - except NotImplementedError: - bytecode_path = None - else: - try: - st = self.path_stats(source_path) - except OSError: - pass - else: - source_mtime = int(st['mtime']) - try: - data = self.get_data(bytecode_path) - except OSError: - pass - else: - exc_details = { - 'name': fullname, - 'path': bytecode_path, - } - try: - flags = _classify_pyc(data, fullname, exc_details) - bytes_data = memoryview(data)[16:] - hash_based = flags & 0b1 != 0 - if hash_based: - check_source = flags & 0b10 != 0 - if (_imp.check_hash_based_pycs != 'never' and - (check_source or - _imp.check_hash_based_pycs == 'always')): - source_bytes = self.get_data(source_path) - source_hash = _imp.source_hash( - _RAW_MAGIC_NUMBER, - source_bytes, - ) - _validate_hash_pyc(data, source_hash, fullname, - exc_details) - else: - _validate_timestamp_pyc( - data, - source_mtime, - st['size'], - fullname, - exc_details, - ) - except (ImportError, EOFError): - pass - else: - _bootstrap._verbose_message('{} matches {}', bytecode_path, - source_path) - return _compile_bytecode(bytes_data, name=fullname, - bytecode_path=bytecode_path, - source_path=source_path) - if source_bytes is None: - source_bytes = self.get_data(source_path) - code_object = self.source_to_code(source_bytes, source_path) - _bootstrap._verbose_message('code object from {}', source_path) - if (not sys.dont_write_bytecode and bytecode_path is not None and - source_mtime is not None): - if hash_based: - if source_hash is None: - source_hash = _imp.source_hash(_RAW_MAGIC_NUMBER, - source_bytes) - data = _code_to_hash_pyc(code_object, source_hash, check_source) - else: - data = _code_to_timestamp_pyc(code_object, source_mtime, - len(source_bytes)) - try: - self._cache_bytecode(source_path, bytecode_path, data) - except NotImplementedError: - pass - return code_object - - -class FileLoader: - - """Base file loader class which implements the loader protocol methods that - require file system usage.""" - - def __init__(self, fullname, path): - """Cache the module name and the path to the file found by the - finder.""" - self.name = fullname - self.path = path - - def __eq__(self, other): - return (self.__class__ == other.__class__ and - self.__dict__ == other.__dict__) - - def __hash__(self): - return hash(self.name) ^ hash(self.path) - - @_check_name - def load_module(self, fullname): - """Load a module from a file. - - This method is deprecated. Use exec_module() instead. - - """ - # The only reason for this method is for the name check. - # Issue #14857: Avoid the zero-argument form of super so the implementation - # of that form can be updated without breaking the frozen module. - return super(FileLoader, self).load_module(fullname) - - @_check_name - def get_filename(self, fullname): - """Return the path to the source file as found by the finder.""" - return self.path - - def get_data(self, path): - """Return the data from path as raw bytes.""" - if isinstance(self, (SourceLoader, SourcelessFileLoader, ExtensionFileLoader)): - with _io.open_code(str(path)) as file: - return file.read() - else: - with _io.FileIO(path, 'r') as file: - return file.read() - - @_check_name - def get_resource_reader(self, module): - from importlib.readers import FileReader - return FileReader(self) - - -class SourceFileLoader(FileLoader, SourceLoader): - - """Concrete implementation of SourceLoader using the file system.""" - - def path_stats(self, path): - """Return the metadata for the path.""" - st = _path_stat(path) - return {'mtime': st.st_mtime, 'size': st.st_size} - - def _cache_bytecode(self, source_path, bytecode_path, data): - # Adapt between the two APIs - mode = _calc_mode(source_path) - return self.set_data(bytecode_path, data, _mode=mode) - - def set_data(self, path, data, *, _mode=0o666): - """Write bytes data to a file.""" - parent, filename = _path_split(path) - path_parts = [] - # Figure out what directories are missing. - while parent and not _path_isdir(parent): - parent, part = _path_split(parent) - path_parts.append(part) - # Create needed directories. - for part in reversed(path_parts): - parent = _path_join(parent, part) - try: - _os.mkdir(parent) - except FileExistsError: - # Probably another Python process already created the dir. - continue - except OSError as exc: - # Could be a permission error, read-only filesystem: just forget - # about writing the data. - _bootstrap._verbose_message('could not create {!r}: {!r}', - parent, exc) - return - try: - _write_atomic(path, data, _mode) - _bootstrap._verbose_message('created {!r}', path) - except OSError as exc: - # Same as above: just don't write the bytecode. - _bootstrap._verbose_message('could not create {!r}: {!r}', path, - exc) - - -class SourcelessFileLoader(FileLoader, _LoaderBasics): - - """Loader which handles sourceless file imports.""" - - def get_code(self, fullname): - path = self.get_filename(fullname) - data = self.get_data(path) - # Call _classify_pyc to do basic validation of the pyc but ignore the - # result. There's no source to check against. - exc_details = { - 'name': fullname, - 'path': path, - } - _classify_pyc(data, fullname, exc_details) - return _compile_bytecode( - memoryview(data)[16:], - name=fullname, - bytecode_path=path, - ) - - def get_source(self, fullname): - """Return None as there is no source code.""" - return None - - -class ExtensionFileLoader(FileLoader, _LoaderBasics): - - """Loader for extension modules. - - The constructor is designed to work with FileFinder. - - """ - - def __init__(self, name, path): - self.name = name - self.path = path - - def __eq__(self, other): - return (self.__class__ == other.__class__ and - self.__dict__ == other.__dict__) - - def __hash__(self): - return hash(self.name) ^ hash(self.path) - - def create_module(self, spec): - """Create an uninitialized extension module""" - module = _bootstrap._call_with_frames_removed( - _imp.create_dynamic, spec) - _bootstrap._verbose_message('extension module {!r} loaded from {!r}', - spec.name, self.path) - return module - - def exec_module(self, module): - """Initialize an extension module""" - _bootstrap._call_with_frames_removed(_imp.exec_dynamic, module) - _bootstrap._verbose_message('extension module {!r} executed from {!r}', - self.name, self.path) - - def is_package(self, fullname): - """Return True if the extension module is a package.""" - file_name = _path_split(self.path)[1] - return any(file_name == '__init__' + suffix - for suffix in EXTENSION_SUFFIXES) - - def get_code(self, fullname): - """Return None as an extension module cannot create a code object.""" - return None - - def get_source(self, fullname): - """Return None as extension modules have no source code.""" - return None - - @_check_name - def get_filename(self, fullname): - """Return the path to the source file as found by the finder.""" - return self.path - - -class _NamespacePath: - """Represents a namespace package's path. It uses the module name - to find its parent module, and from there it looks up the parent's - __path__. When this changes, the module's own path is recomputed, - using path_finder. For top-level modules, the parent module's path - is sys.path.""" - - # When invalidate_caches() is called, this epoch is incremented - # https://bugs.python.org/issue45703 - _epoch = 0 - - def __init__(self, name, path, path_finder): - self._name = name - self._path = path - self._last_parent_path = tuple(self._get_parent_path()) - self._last_epoch = self._epoch - self._path_finder = path_finder - - def _find_parent_path_names(self): - """Returns a tuple of (parent-module-name, parent-path-attr-name)""" - parent, dot, me = self._name.rpartition('.') - if dot == '': - # This is a top-level module. sys.path contains the parent path. - return 'sys', 'path' - # Not a top-level module. parent-module.__path__ contains the - # parent path. - return parent, '__path__' - - def _get_parent_path(self): - parent_module_name, path_attr_name = self._find_parent_path_names() - return getattr(sys.modules[parent_module_name], path_attr_name) - - def _recalculate(self): - # If the parent's path has changed, recalculate _path - parent_path = tuple(self._get_parent_path()) # Make a copy - if parent_path != self._last_parent_path or self._epoch != self._last_epoch: - spec = self._path_finder(self._name, parent_path) - # Note that no changes are made if a loader is returned, but we - # do remember the new parent path - if spec is not None and spec.loader is None: - if spec.submodule_search_locations: - self._path = spec.submodule_search_locations - self._last_parent_path = parent_path # Save the copy - self._last_epoch = self._epoch - return self._path - - def __iter__(self): - return iter(self._recalculate()) - - def __getitem__(self, index): - return self._recalculate()[index] - - def __setitem__(self, index, path): - self._path[index] = path - - def __len__(self): - return len(self._recalculate()) - - def __repr__(self): - return f'_NamespacePath({self._path!r})' - - def __contains__(self, item): - return item in self._recalculate() - - def append(self, item): - self._path.append(item) - - -# This class is actually exposed publicly in a namespace package's __loader__ -# attribute, so it should be available through a non-private name. -# https://github.com/python/cpython/issues/92054 -class NamespaceLoader: - def __init__(self, name, path, path_finder): - self._path = _NamespacePath(name, path, path_finder) - - def is_package(self, fullname): - return True - - def get_source(self, fullname): - return '' - - def get_code(self, fullname): - return compile('', '', 'exec', dont_inherit=True) - - def create_module(self, spec): - """Use default semantics for module creation.""" - - def exec_module(self, module): - pass - - def load_module(self, fullname): - """Load a namespace module. - - This method is deprecated. Use exec_module() instead. - - """ - # The import system never calls this method. - _bootstrap._verbose_message('namespace module loaded with path {!r}', - self._path) - # Warning implemented in _load_module_shim(). - return _bootstrap._load_module_shim(self, fullname) - - def get_resource_reader(self, module): - from importlib.readers import NamespaceReader - return NamespaceReader(self._path) - - -# We use this exclusively in module_from_spec() for backward-compatibility. -_NamespaceLoader = NamespaceLoader - - -# Finders ##################################################################### - -class PathFinder: - - """Meta path finder for sys.path and package __path__ attributes.""" - - @staticmethod - def invalidate_caches(): - """Call the invalidate_caches() method on all path entry finders - stored in sys.path_importer_cache (where implemented).""" - for name, finder in list(sys.path_importer_cache.items()): - # Drop entry if finder name is a relative path. The current - # working directory may have changed. - if finder is None or not _path_isabs(name): - del sys.path_importer_cache[name] - elif hasattr(finder, 'invalidate_caches'): - finder.invalidate_caches() - # Also invalidate the caches of _NamespacePaths - # https://bugs.python.org/issue45703 - _NamespacePath._epoch += 1 - - from importlib.metadata import MetadataPathFinder - MetadataPathFinder.invalidate_caches() - - @staticmethod - def _path_hooks(path): - """Search sys.path_hooks for a finder for 'path'.""" - if sys.path_hooks is not None and not sys.path_hooks: - _warnings.warn('sys.path_hooks is empty', ImportWarning) - for hook in sys.path_hooks: - try: - return hook(path) - except ImportError: - continue - else: - return None - - @classmethod - def _path_importer_cache(cls, path): - """Get the finder for the path entry from sys.path_importer_cache. - - If the path entry is not in the cache, find the appropriate finder - and cache it. If no finder is available, store None. - - """ - if path == '': - try: - path = _os.getcwd() - except FileNotFoundError: - # Don't cache the failure as the cwd can easily change to - # a valid directory later on. - return None - try: - finder = sys.path_importer_cache[path] - except KeyError: - finder = cls._path_hooks(path) - sys.path_importer_cache[path] = finder - return finder - - @classmethod - def _get_spec(cls, fullname, path, target=None): - """Find the loader or namespace_path for this module/package name.""" - # If this ends up being a namespace package, namespace_path is - # the list of paths that will become its __path__ - namespace_path = [] - for entry in path: - if not isinstance(entry, str): - continue - finder = cls._path_importer_cache(entry) - if finder is not None: - spec = finder.find_spec(fullname, target) - if spec is None: - continue - if spec.loader is not None: - return spec - portions = spec.submodule_search_locations - if portions is None: - raise ImportError('spec missing loader') - # This is possibly part of a namespace package. - # Remember these path entries (if any) for when we - # create a namespace package, and continue iterating - # on path. - namespace_path.extend(portions) - else: - spec = _bootstrap.ModuleSpec(fullname, None) - spec.submodule_search_locations = namespace_path - return spec - - @classmethod - def find_spec(cls, fullname, path=None, target=None): - """Try to find a spec for 'fullname' on sys.path or 'path'. - - The search is based on sys.path_hooks and sys.path_importer_cache. - """ - if path is None: - path = sys.path - spec = cls._get_spec(fullname, path, target) - if spec is None: - return None - elif spec.loader is None: - namespace_path = spec.submodule_search_locations - if namespace_path: - # We found at least one namespace path. Return a spec which - # can create the namespace package. - spec.origin = None - spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec) - return spec - else: - return None - else: - return spec - - @staticmethod - def find_distributions(*args, **kwargs): - """ - Find distributions. - - Return an iterable of all Distribution instances capable of - loading the metadata for packages matching ``context.name`` - (or all names if ``None`` indicated) along the paths in the list - of directories ``context.path``. - """ - from importlib.metadata import MetadataPathFinder - return MetadataPathFinder.find_distributions(*args, **kwargs) - - -class FileFinder: - - """File-based finder. - - Interactions with the file system are cached for performance, being - refreshed when the directory the finder is handling has been modified. - - """ - - def __init__(self, path, *loader_details): - """Initialize with the path to search on and a variable number of - 2-tuples containing the loader and the file suffixes the loader - recognizes.""" - loaders = [] - for loader, suffixes in loader_details: - loaders.extend((suffix, loader) for suffix in suffixes) - self._loaders = loaders - # Base (directory) path - if not path or path == '.': - self.path = _os.getcwd() - else: - self.path = _path_abspath(path) - self._path_mtime = -1 - self._path_cache = set() - self._relaxed_path_cache = set() - - def invalidate_caches(self): - """Invalidate the directory mtime.""" - self._path_mtime = -1 - - def _get_spec(self, loader_class, fullname, path, smsl, target): - loader = loader_class(fullname, path) - return spec_from_file_location(fullname, path, loader=loader, - submodule_search_locations=smsl) - - def find_spec(self, fullname, target=None): - """Try to find a spec for the specified module. - - Returns the matching spec, or None if not found. - """ - is_namespace = False - tail_module = fullname.rpartition('.')[2] - try: - mtime = _path_stat(self.path or _os.getcwd()).st_mtime - except OSError: - mtime = -1 - if mtime != self._path_mtime: - self._fill_cache() - self._path_mtime = mtime - # tail_module keeps the original casing, for __file__ and friends - if _relax_case(): - cache = self._relaxed_path_cache - cache_module = tail_module.lower() - else: - cache = self._path_cache - cache_module = tail_module - # Check if the module is the name of a directory (and thus a package). - if cache_module in cache: - base_path = _path_join(self.path, tail_module) - for suffix, loader_class in self._loaders: - init_filename = '__init__' + suffix - full_path = _path_join(base_path, init_filename) - if _path_isfile(full_path): - return self._get_spec(loader_class, fullname, full_path, [base_path], target) - else: - # If a namespace package, return the path if we don't - # find a module in the next section. - is_namespace = _path_isdir(base_path) - # Check for a file w/ a proper suffix exists. - for suffix, loader_class in self._loaders: - try: - full_path = _path_join(self.path, tail_module + suffix) - except ValueError: - return None - _bootstrap._verbose_message('trying {}', full_path, verbosity=2) - if cache_module + suffix in cache: - if _path_isfile(full_path): - return self._get_spec(loader_class, fullname, full_path, - None, target) - if is_namespace: - _bootstrap._verbose_message('possible namespace for {}', base_path) - spec = _bootstrap.ModuleSpec(fullname, None) - spec.submodule_search_locations = [base_path] - return spec - return None - - def _fill_cache(self): - """Fill the cache of potential modules and packages for this directory.""" - path = self.path - try: - contents = _os.listdir(path or _os.getcwd()) - except (FileNotFoundError, PermissionError, NotADirectoryError): - # Directory has either been removed, turned into a file, or made - # unreadable. - contents = [] - # We store two cached versions, to handle runtime changes of the - # PYTHONCASEOK environment variable. - if not sys.platform.startswith('win'): - self._path_cache = set(contents) - else: - # Windows users can import modules with case-insensitive file - # suffixes (for legacy reasons). Make the suffix lowercase here - # so it's done once instead of for every import. This is safe as - # the specified suffixes to check against are always specified in a - # case-sensitive manner. - lower_suffix_contents = set() - for item in contents: - name, dot, suffix = item.partition('.') - if dot: - new_name = f'{name}.{suffix.lower()}' - else: - new_name = name - lower_suffix_contents.add(new_name) - self._path_cache = lower_suffix_contents - if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS): - self._relaxed_path_cache = {fn.lower() for fn in contents} - - @classmethod - def path_hook(cls, *loader_details): - """A class method which returns a closure to use on sys.path_hook - which will return an instance using the specified loaders and the path - called on the closure. - - If the path called on the closure is not a directory, ImportError is - raised. - - """ - def path_hook_for_FileFinder(path): - """Path hook for importlib.machinery.FileFinder.""" - if not _path_isdir(path): - raise ImportError('only directories are supported', path=path) - return cls(path, *loader_details) - - return path_hook_for_FileFinder - - def __repr__(self): - return f'FileFinder({self.path!r})' - - -class AppleFrameworkLoader(ExtensionFileLoader): - """A loader for modules that have been packaged as frameworks for - compatibility with Apple's iOS App Store policies. - """ - def create_module(self, spec): - # If the ModuleSpec has been created by the FileFinder, it will have - # been created with an origin pointing to the .fwork file. We need to - # redirect this to the location in the Frameworks folder, using the - # content of the .fwork file. - if spec.origin.endswith(".fwork"): - with _io.FileIO(spec.origin, 'r') as file: - framework_binary = file.read().decode().strip() - bundle_path = _path_split(sys.executable)[0] - spec.origin = _path_join(bundle_path, framework_binary) - - # If the loader is created based on the spec for a loaded module, the - # path will be pointing at the Framework location. If this occurs, - # get the original .fwork location to use as the module's __file__. - if self.path.endswith(".fwork"): - path = self.path - else: - with _io.FileIO(self.path + ".origin", 'r') as file: - origin = file.read().decode().strip() - bundle_path = _path_split(sys.executable)[0] - path = _path_join(bundle_path, origin) - - module = _bootstrap._call_with_frames_removed(_imp.create_dynamic, spec) - - _bootstrap._verbose_message( - "Apple framework extension module {!r} loaded from {!r} (path {!r})", - spec.name, - spec.origin, - path, - ) - - # Ensure that the __file__ points at the .fwork location - try: - module.__file__ = path - except AttributeError: - # Not important enough to report. - # (The error is also ignored in _bootstrap._init_module_attrs or - # import_run_extension in import.c) - pass - - return module - -# Import setup ############################################################### - -def _fix_up_module(ns, name, pathname, cpathname=None): - # This function is used by PyImport_ExecCodeModuleObject(). - loader = ns.get('__loader__') - spec = ns.get('__spec__') - if not loader: - if spec: - loader = spec.loader - elif pathname == cpathname: - loader = SourcelessFileLoader(name, pathname) - else: - loader = SourceFileLoader(name, pathname) - if not spec: - spec = spec_from_file_location(name, pathname, loader=loader) - if cpathname: - spec.cached = _path_abspath(cpathname) - try: - ns['__spec__'] = spec - ns['__loader__'] = loader - ns['__file__'] = pathname - ns['__cached__'] = cpathname - except Exception: - # Not important enough to report. - pass - - -def _get_supported_file_loaders(): - """Returns a list of file-based module loaders. - - Each item is a tuple (loader, suffixes). - """ - extension_loaders = [] - if hasattr(_imp, 'create_dynamic'): - if sys.platform in {"ios", "tvos", "watchos"}: - extension_loaders = [(AppleFrameworkLoader, [ - suffix.replace(".so", ".fwork") - for suffix in _imp.extension_suffixes() - ])] - extension_loaders.append((ExtensionFileLoader, _imp.extension_suffixes())) - source = SourceFileLoader, SOURCE_SUFFIXES - bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES - return extension_loaders + [source, bytecode] - - -def _set_bootstrap_module(_bootstrap_module): - global _bootstrap - _bootstrap = _bootstrap_module - - -def _install(_bootstrap_module): - """Install the path-based import components.""" - _set_bootstrap_module(_bootstrap_module) - supported_loaders = _get_supported_file_loaders() - sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)]) - sys.meta_path.append(PathFinder) diff --git a/Python313_13_x86_Template/Lib/importlib/abc.py b/Python313_13_x86_Template/Lib/importlib/abc.py deleted file mode 100644 index 37fef357..00000000 --- a/Python313_13_x86_Template/Lib/importlib/abc.py +++ /dev/null @@ -1,243 +0,0 @@ -"""Abstract base classes related to import.""" -from . import _bootstrap_external -from . import machinery -try: - import _frozen_importlib -except ImportError as exc: - if exc.name != '_frozen_importlib': - raise - _frozen_importlib = None -try: - import _frozen_importlib_external -except ImportError: - _frozen_importlib_external = _bootstrap_external -from ._abc import Loader -import abc -import warnings - -from .resources import abc as _resources_abc - - -__all__ = [ - 'Loader', 'MetaPathFinder', 'PathEntryFinder', - 'ResourceLoader', 'InspectLoader', 'ExecutionLoader', - 'FileLoader', 'SourceLoader', -] - - -def __getattr__(name): - """ - For backwards compatibility, continue to make names - from _resources_abc available through this module. #93963 - """ - if name in _resources_abc.__all__: - obj = getattr(_resources_abc, name) - warnings._deprecated(f"{__name__}.{name}", remove=(3, 14)) - globals()[name] = obj - return obj - raise AttributeError(f'module {__name__!r} has no attribute {name!r}') - - -def _register(abstract_cls, *classes): - for cls in classes: - abstract_cls.register(cls) - if _frozen_importlib is not None: - try: - frozen_cls = getattr(_frozen_importlib, cls.__name__) - except AttributeError: - frozen_cls = getattr(_frozen_importlib_external, cls.__name__) - abstract_cls.register(frozen_cls) - - -class MetaPathFinder(metaclass=abc.ABCMeta): - - """Abstract base class for import finders on sys.meta_path.""" - - # We don't define find_spec() here since that would break - # hasattr checks we do to support backward compatibility. - - def invalidate_caches(self): - """An optional method for clearing the finder's cache, if any. - This method is used by importlib.invalidate_caches(). - """ - -_register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter, - machinery.PathFinder, machinery.WindowsRegistryFinder) - - -class PathEntryFinder(metaclass=abc.ABCMeta): - - """Abstract base class for path entry finders used by PathFinder.""" - - def invalidate_caches(self): - """An optional method for clearing the finder's cache, if any. - This method is used by PathFinder.invalidate_caches(). - """ - -_register(PathEntryFinder, machinery.FileFinder) - - -class ResourceLoader(Loader): - - """Abstract base class for loaders which can return data from their - back-end storage. - - This ABC represents one of the optional protocols specified by PEP 302. - - """ - - @abc.abstractmethod - def get_data(self, path): - """Abstract method which when implemented should return the bytes for - the specified path. The path must be a str.""" - raise OSError - - -class InspectLoader(Loader): - - """Abstract base class for loaders which support inspection about the - modules they can load. - - This ABC represents one of the optional protocols specified by PEP 302. - - """ - - def is_package(self, fullname): - """Optional method which when implemented should return whether the - module is a package. The fullname is a str. Returns a bool. - - Raises ImportError if the module cannot be found. - """ - raise ImportError - - def get_code(self, fullname): - """Method which returns the code object for the module. - - The fullname is a str. Returns a types.CodeType if possible, else - returns None if a code object does not make sense - (e.g. built-in module). Raises ImportError if the module cannot be - found. - """ - source = self.get_source(fullname) - if source is None: - return None - return self.source_to_code(source) - - @abc.abstractmethod - def get_source(self, fullname): - """Abstract method which should return the source code for the - module. The fullname is a str. Returns a str. - - Raises ImportError if the module cannot be found. - """ - raise ImportError - - @staticmethod - def source_to_code(data, path=''): - """Compile 'data' into a code object. - - The 'data' argument can be anything that compile() can handle. The'path' - argument should be where the data was retrieved (when applicable).""" - return compile(data, path, 'exec', dont_inherit=True) - - exec_module = _bootstrap_external._LoaderBasics.exec_module - load_module = _bootstrap_external._LoaderBasics.load_module - -_register(InspectLoader, machinery.BuiltinImporter, machinery.FrozenImporter, machinery.NamespaceLoader) - - -class ExecutionLoader(InspectLoader): - - """Abstract base class for loaders that wish to support the execution of - modules as scripts. - - This ABC represents one of the optional protocols specified in PEP 302. - - """ - - @abc.abstractmethod - def get_filename(self, fullname): - """Abstract method which should return the value that __file__ is to be - set to. - - Raises ImportError if the module cannot be found. - """ - raise ImportError - - def get_code(self, fullname): - """Method to return the code object for fullname. - - Should return None if not applicable (e.g. built-in module). - Raise ImportError if the module cannot be found. - """ - source = self.get_source(fullname) - if source is None: - return None - try: - path = self.get_filename(fullname) - except ImportError: - return self.source_to_code(source) - else: - return self.source_to_code(source, path) - -_register( - ExecutionLoader, - machinery.ExtensionFileLoader, - machinery.AppleFrameworkLoader, -) - - -class FileLoader(_bootstrap_external.FileLoader, ResourceLoader, ExecutionLoader): - - """Abstract base class partially implementing the ResourceLoader and - ExecutionLoader ABCs.""" - -_register(FileLoader, machinery.SourceFileLoader, - machinery.SourcelessFileLoader) - - -class SourceLoader(_bootstrap_external.SourceLoader, ResourceLoader, ExecutionLoader): - - """Abstract base class for loading source code (and optionally any - corresponding bytecode). - - To support loading from source code, the abstractmethods inherited from - ResourceLoader and ExecutionLoader need to be implemented. To also support - loading from bytecode, the optional methods specified directly by this ABC - is required. - - Inherited abstractmethods not implemented in this ABC: - - * ResourceLoader.get_data - * ExecutionLoader.get_filename - - """ - - def path_mtime(self, path): - """Return the (int) modification time for the path (str).""" - if self.path_stats.__func__ is SourceLoader.path_stats: - raise OSError - return int(self.path_stats(path)['mtime']) - - def path_stats(self, path): - """Return a metadata dict for the source pointed to by the path (str). - Possible keys: - - 'mtime' (mandatory) is the numeric timestamp of last source - code modification; - - 'size' (optional) is the size in bytes of the source code. - """ - if self.path_mtime.__func__ is SourceLoader.path_mtime: - raise OSError - return {'mtime': self.path_mtime(path)} - - def set_data(self, path, data): - """Write the bytes to the path (if possible). - - Accepts a str path and data as bytes. - - Any needed intermediary directories are to be created. If for some - reason the file cannot be written because of permissions, fail - silently. - """ - -_register(SourceLoader, machinery.SourceFileLoader) diff --git a/Python313_13_x86_Template/Lib/importlib/machinery.py b/Python313_13_x86_Template/Lib/importlib/machinery.py deleted file mode 100644 index fbd30b15..00000000 --- a/Python313_13_x86_Template/Lib/importlib/machinery.py +++ /dev/null @@ -1,21 +0,0 @@ -"""The machinery of importlib: finders, loaders, hooks, etc.""" - -from ._bootstrap import ModuleSpec -from ._bootstrap import BuiltinImporter -from ._bootstrap import FrozenImporter -from ._bootstrap_external import (SOURCE_SUFFIXES, DEBUG_BYTECODE_SUFFIXES, - OPTIMIZED_BYTECODE_SUFFIXES, BYTECODE_SUFFIXES, - EXTENSION_SUFFIXES) -from ._bootstrap_external import WindowsRegistryFinder -from ._bootstrap_external import PathFinder -from ._bootstrap_external import FileFinder -from ._bootstrap_external import SourceFileLoader -from ._bootstrap_external import SourcelessFileLoader -from ._bootstrap_external import ExtensionFileLoader -from ._bootstrap_external import AppleFrameworkLoader -from ._bootstrap_external import NamespaceLoader - - -def all_suffixes(): - """Returns a list of all recognized module suffixes for this process""" - return SOURCE_SUFFIXES + BYTECODE_SUFFIXES + EXTENSION_SUFFIXES diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 95887b5a..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_adapters.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_adapters.cpython-313.pyc deleted file mode 100644 index 7090c6e1..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_adapters.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_collections.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_collections.cpython-313.pyc deleted file mode 100644 index 22bb104e..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_collections.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_functools.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_functools.cpython-313.pyc deleted file mode 100644 index 575f9e08..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_functools.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_itertools.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_itertools.cpython-313.pyc deleted file mode 100644 index 4f3c9685..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_itertools.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_meta.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_meta.cpython-313.pyc deleted file mode 100644 index 1b3267ad..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_meta.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_text.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_text.cpython-313.pyc deleted file mode 100644 index 993e14aa..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/metadata/__pycache__/_text.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 54cf4f72..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/_adapters.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/_adapters.cpython-313.pyc deleted file mode 100644 index 20ec09d5..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/_adapters.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/_common.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/_common.cpython-313.pyc deleted file mode 100644 index ec35f9b8..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/_common.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/_functional.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/_functional.cpython-313.pyc deleted file mode 100644 index a51b3055..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/_functional.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/_itertools.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/_itertools.cpython-313.pyc deleted file mode 100644 index 6a499edd..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/_itertools.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/abc.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/abc.cpython-313.pyc deleted file mode 100644 index e3b33334..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/abc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/readers.cpython-313.pyc b/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/readers.cpython-313.pyc deleted file mode 100644 index 7fc1a1be..00000000 Binary files a/Python313_13_x86_Template/Lib/importlib/resources/__pycache__/readers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/importlib/resources/_common.py b/Python313_13_x86_Template/Lib/importlib/resources/_common.py deleted file mode 100644 index cae4699f..00000000 --- a/Python313_13_x86_Template/Lib/importlib/resources/_common.py +++ /dev/null @@ -1,211 +0,0 @@ -import os -import pathlib -import tempfile -import functools -import contextlib -import types -import importlib -import inspect -import warnings -import itertools - -from typing import Union, Optional, cast -from .abc import ResourceReader, Traversable - -Package = Union[types.ModuleType, str] -Anchor = Package - - -def package_to_anchor(func): - """ - Replace 'package' parameter as 'anchor' and warn about the change. - - Other errors should fall through. - - >>> files('a', 'b') - Traceback (most recent call last): - TypeError: files() takes from 0 to 1 positional arguments but 2 were given - - Remove this compatibility in Python 3.14. - """ - undefined = object() - - @functools.wraps(func) - def wrapper(anchor=undefined, package=undefined): - if package is not undefined: - if anchor is not undefined: - return func(anchor, package) - warnings.warn( - "First parameter to files is renamed to 'anchor'", - DeprecationWarning, - stacklevel=2, - ) - return func(package) - elif anchor is undefined: - return func() - return func(anchor) - - return wrapper - - -@package_to_anchor -def files(anchor: Optional[Anchor] = None) -> Traversable: - """ - Get a Traversable resource for an anchor. - """ - return from_package(resolve(anchor)) - - -def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]: - """ - Return the package's loader if it's a ResourceReader. - """ - # We can't use - # a issubclass() check here because apparently abc.'s __subclasscheck__() - # hook wants to create a weak reference to the object, but - # zipimport.zipimporter does not support weak references, resulting in a - # TypeError. That seems terrible. - spec = package.__spec__ - reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore[union-attr] - if reader is None: - return None - return reader(spec.name) # type: ignore[union-attr] - - -@functools.singledispatch -def resolve(cand: Optional[Anchor]) -> types.ModuleType: - return cast(types.ModuleType, cand) - - -@resolve.register -def _(cand: str) -> types.ModuleType: - return importlib.import_module(cand) - - -@resolve.register -def _(cand: None) -> types.ModuleType: - return resolve(_infer_caller().f_globals['__name__']) - - -def _infer_caller(): - """ - Walk the stack and find the frame of the first caller not in this module. - """ - - def is_this_file(frame_info): - return frame_info.filename == stack[0].filename - - def is_wrapper(frame_info): - return frame_info.function == 'wrapper' - - stack = inspect.stack() - not_this_file = itertools.filterfalse(is_this_file, stack) - # also exclude 'wrapper' due to singledispatch in the call stack - callers = itertools.filterfalse(is_wrapper, not_this_file) - return next(callers).frame - - -def from_package(package: types.ModuleType): - """ - Return a Traversable object for the given package. - - """ - # deferred for performance (python/cpython#109829) - from ._adapters import wrap_spec - - spec = wrap_spec(package) - reader = spec.loader.get_resource_reader(spec.name) - return reader.files() - - -@contextlib.contextmanager -def _tempfile( - reader, - suffix='', - # gh-93353: Keep a reference to call os.remove() in late Python - # finalization. - *, - _os_remove=os.remove, -): - # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' - # blocks due to the need to close the temporary file to work on Windows - # properly. - fd, raw_path = tempfile.mkstemp(suffix=suffix) - try: - try: - os.write(fd, reader()) - finally: - os.close(fd) - del reader - yield pathlib.Path(raw_path) - finally: - try: - _os_remove(raw_path) - except FileNotFoundError: - pass - - -def _temp_file(path): - return _tempfile(path.read_bytes, suffix=path.name) - - -def _is_present_dir(path: Traversable) -> bool: - """ - Some Traversables implement ``is_dir()`` to raise an - exception (i.e. ``FileNotFoundError``) when the - directory doesn't exist. This function wraps that call - to always return a boolean and only return True - if there's a dir and it exists. - """ - with contextlib.suppress(FileNotFoundError): - return path.is_dir() - return False - - -@functools.singledispatch -def as_file(path): - """ - Given a Traversable object, return that object as a - path on the local file system in a context manager. - """ - return _temp_dir(path) if _is_present_dir(path) else _temp_file(path) - - -@as_file.register(pathlib.Path) -@contextlib.contextmanager -def _(path): - """ - Degenerate behavior for pathlib.Path objects. - """ - yield path - - -@contextlib.contextmanager -def _temp_path(dir: tempfile.TemporaryDirectory): - """ - Wrap tempfile.TemporyDirectory to return a pathlib object. - """ - with dir as result: - yield pathlib.Path(result) - - -@contextlib.contextmanager -def _temp_dir(path): - """ - Given a traversable dir, recursively replicate the whole tree - to the file system in a context manager. - """ - assert path.is_dir() - with _temp_path(tempfile.TemporaryDirectory()) as temp_dir: - yield _write_contents(temp_dir, path) - - -def _write_contents(target, source): - child = target.joinpath(source.name) - if source.is_dir(): - child.mkdir() - for item in source.iterdir(): - _write_contents(child, item) - else: - child.write_bytes(source.read_bytes()) - return child diff --git a/Python313_13_x86_Template/Lib/importlib/util.py b/Python313_13_x86_Template/Lib/importlib/util.py deleted file mode 100644 index 284206b6..00000000 --- a/Python313_13_x86_Template/Lib/importlib/util.py +++ /dev/null @@ -1,274 +0,0 @@ -"""Utility code for constructing importers, etc.""" -from ._abc import Loader -from ._bootstrap import module_from_spec -from ._bootstrap import _resolve_name -from ._bootstrap import spec_from_loader -from ._bootstrap import _find_spec -from ._bootstrap_external import MAGIC_NUMBER -from ._bootstrap_external import _RAW_MAGIC_NUMBER -from ._bootstrap_external import cache_from_source -from ._bootstrap_external import decode_source -from ._bootstrap_external import source_from_cache -from ._bootstrap_external import spec_from_file_location - -import _imp -import sys -import types - - -def source_hash(source_bytes): - "Return the hash of *source_bytes* as used in hash-based pyc files." - return _imp.source_hash(_RAW_MAGIC_NUMBER, source_bytes) - - -def resolve_name(name, package): - """Resolve a relative module name to an absolute one.""" - if not name.startswith('.'): - return name - elif not package: - raise ImportError(f'no package specified for {repr(name)} ' - '(required for relative module names)') - level = 0 - for character in name: - if character != '.': - break - level += 1 - return _resolve_name(name[level:], package, level) - - -def _find_spec_from_path(name, path=None): - """Return the spec for the specified module. - - First, sys.modules is checked to see if the module was already imported. If - so, then sys.modules[name].__spec__ is returned. If that happens to be - set to None, then ValueError is raised. If the module is not in - sys.modules, then sys.meta_path is searched for a suitable spec with the - value of 'path' given to the finders. None is returned if no spec could - be found. - - Dotted names do not have their parent packages implicitly imported. You will - most likely need to explicitly import all parent packages in the proper - order for a submodule to get the correct spec. - - """ - if name not in sys.modules: - return _find_spec(name, path) - else: - module = sys.modules[name] - if module is None: - return None - try: - spec = module.__spec__ - except AttributeError: - raise ValueError(f'{name}.__spec__ is not set') from None - else: - if spec is None: - raise ValueError(f'{name}.__spec__ is None') - return spec - - -def find_spec(name, package=None): - """Return the spec for the specified module. - - First, sys.modules is checked to see if the module was already imported. If - so, then sys.modules[name].__spec__ is returned. If that happens to be - set to None, then ValueError is raised. If the module is not in - sys.modules, then sys.meta_path is searched for a suitable spec with the - value of 'path' given to the finders. None is returned if no spec could - be found. - - If the name is for submodule (contains a dot), the parent module is - automatically imported. - - The name and package arguments work the same as importlib.import_module(). - In other words, relative module names (with leading dots) work. - - """ - fullname = resolve_name(name, package) if name.startswith('.') else name - if fullname not in sys.modules: - parent_name = fullname.rpartition('.')[0] - if parent_name: - parent = __import__(parent_name, fromlist=['__path__']) - try: - parent_path = parent.__path__ - except AttributeError as e: - raise ModuleNotFoundError( - f"__path__ attribute not found on {parent_name!r} " - f"while trying to find {fullname!r}", name=fullname) from e - else: - parent_path = None - return _find_spec(fullname, parent_path) - else: - module = sys.modules[fullname] - if module is None: - return None - try: - spec = module.__spec__ - except AttributeError: - raise ValueError(f'{name}.__spec__ is not set') from None - else: - if spec is None: - raise ValueError(f'{name}.__spec__ is None') - return spec - - -# Normally we would use contextlib.contextmanager. However, this module -# is imported by runpy, which means we want to avoid any unnecessary -# dependencies. Thus we use a class. - -class _incompatible_extension_module_restrictions: - """A context manager that can temporarily skip the compatibility check. - - NOTE: This function is meant to accommodate an unusual case; one - which is likely to eventually go away. There's is a pretty good - chance this is not what you were looking for. - - WARNING: Using this function to disable the check can lead to - unexpected behavior and even crashes. It should only be used during - extension module development. - - If "disable_check" is True then the compatibility check will not - happen while the context manager is active. Otherwise the check - *will* happen. - - Normally, extensions that do not support multiple interpreters - may not be imported in a subinterpreter. That implies modules - that do not implement multi-phase init or that explicitly of out. - - Likewise for modules import in a subinterpreter with its own GIL - when the extension does not support a per-interpreter GIL. This - implies the module does not have a Py_mod_multiple_interpreters slot - set to Py_MOD_PER_INTERPRETER_GIL_SUPPORTED. - - In both cases, this context manager may be used to temporarily - disable the check for compatible extension modules. - - You can get the same effect as this function by implementing the - basic interface of multi-phase init (PEP 489) and lying about - support for multiple interpreters (or per-interpreter GIL). - """ - - def __init__(self, *, disable_check): - self.disable_check = bool(disable_check) - - def __enter__(self): - self.old = _imp._override_multi_interp_extensions_check(self.override) - return self - - def __exit__(self, *args): - old = self.old - del self.old - _imp._override_multi_interp_extensions_check(old) - - @property - def override(self): - return -1 if self.disable_check else 1 - - -class _LazyModule(types.ModuleType): - - """A subclass of the module type which triggers loading upon attribute access.""" - - def __getattribute__(self, attr): - """Trigger the load of the module and return the attribute.""" - __spec__ = object.__getattribute__(self, '__spec__') - loader_state = __spec__.loader_state - with loader_state['lock']: - # Only the first thread to get the lock should trigger the load - # and reset the module's class. The rest can now getattr(). - if object.__getattribute__(self, '__class__') is _LazyModule: - __class__ = loader_state['__class__'] - - # Reentrant calls from the same thread must be allowed to proceed without - # triggering the load again. - # exec_module() and self-referential imports are the primary ways this can - # happen, but in any case we must return something to avoid deadlock. - if loader_state['is_loading']: - return __class__.__getattribute__(self, attr) - loader_state['is_loading'] = True - - __dict__ = __class__.__getattribute__(self, '__dict__') - - # All module metadata must be gathered from __spec__ in order to avoid - # using mutated values. - # Get the original name to make sure no object substitution occurred - # in sys.modules. - original_name = __spec__.name - # Figure out exactly what attributes were mutated between the creation - # of the module and now. - attrs_then = loader_state['__dict__'] - attrs_now = __dict__ - attrs_updated = {} - for key, value in attrs_now.items(): - # Code that set an attribute may have kept a reference to the - # assigned object, making identity more important than equality. - if key not in attrs_then: - attrs_updated[key] = value - elif id(attrs_now[key]) != id(attrs_then[key]): - attrs_updated[key] = value - __spec__.loader.exec_module(self) - # If exec_module() was used directly there is no guarantee the module - # object was put into sys.modules. - if original_name in sys.modules: - if id(self) != id(sys.modules[original_name]): - raise ValueError(f"module object for {original_name!r} " - "substituted in sys.modules during a lazy " - "load") - # Update after loading since that's what would happen in an eager - # loading situation. - __dict__.update(attrs_updated) - # Finally, stop triggering this method, if the module did not - # already update its own __class__. - if isinstance(self, _LazyModule): - object.__setattr__(self, '__class__', __class__) - - return getattr(self, attr) - - def __delattr__(self, attr): - """Trigger the load and then perform the deletion.""" - # To trigger the load and raise an exception if the attribute - # doesn't exist. - self.__getattribute__(attr) - delattr(self, attr) - - -class LazyLoader(Loader): - - """A loader that creates a module which defers loading until attribute access.""" - - @staticmethod - def __check_eager_loader(loader): - if not hasattr(loader, 'exec_module'): - raise TypeError('loader must define exec_module()') - - @classmethod - def factory(cls, loader): - """Construct a callable which returns the eager loader made lazy.""" - cls.__check_eager_loader(loader) - return lambda *args, **kwargs: cls(loader(*args, **kwargs)) - - def __init__(self, loader): - self.__check_eager_loader(loader) - self.loader = loader - - def create_module(self, spec): - return self.loader.create_module(spec) - - def exec_module(self, module): - """Make the module load lazily.""" - # Threading is only needed for lazy loading, and importlib.util can - # be pulled in at interpreter startup, so defer until needed. - import threading - module.__spec__.loader = self.loader - module.__loader__ = self.loader - # Don't need to worry about deep-copying as trying to set an attribute - # on an object would have triggered the load, - # e.g. ``module.__spec__.loader = None`` would trigger a load from - # trying to access module.__spec__. - loader_state = {} - loader_state['__dict__'] = module.__dict__.copy() - loader_state['__class__'] = module.__class__ - loader_state['lock'] = threading.RLock() - loader_state['is_loading'] = False - module.__spec__.loader_state = loader_state - module.__class__ = _LazyModule diff --git a/Python313_13_x86_Template/Lib/inspect.py b/Python313_13_x86_Template/Lib/inspect.py deleted file mode 100644 index d74444e2..00000000 --- a/Python313_13_x86_Template/Lib/inspect.py +++ /dev/null @@ -1,3474 +0,0 @@ -"""Get useful information from live Python objects. - -This module encapsulates the interface provided by the internal special -attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion. -It also provides some help for examining source code and class layout. - -Here are some of the useful functions provided by this module: - - ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(), - isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(), - isroutine() - check object types - getmembers() - get members of an object that satisfy a given condition - - getfile(), getsourcefile(), getsource() - find an object's source code - getdoc(), getcomments() - get documentation on an object - getmodule() - determine the module that an object came from - getclasstree() - arrange classes so as to represent their hierarchy - - getargvalues(), getcallargs() - get info about function arguments - getfullargspec() - same, with support for Python 3 features - formatargvalues() - format an argument spec - getouterframes(), getinnerframes() - get info about frames - currentframe() - get the current stack frame - stack(), trace() - get info about frames on the stack or in a traceback - - signature() - get a Signature object for the callable - - get_annotations() - safely compute an object's annotations -""" - -# This module is in the public domain. No warranties. - -__author__ = ('Ka-Ping Yee ', - 'Yury Selivanov ') - -__all__ = [ - "AGEN_CLOSED", - "AGEN_CREATED", - "AGEN_RUNNING", - "AGEN_SUSPENDED", - "ArgInfo", - "Arguments", - "Attribute", - "BlockFinder", - "BoundArguments", - "BufferFlags", - "CORO_CLOSED", - "CORO_CREATED", - "CORO_RUNNING", - "CORO_SUSPENDED", - "CO_ASYNC_GENERATOR", - "CO_COROUTINE", - "CO_GENERATOR", - "CO_ITERABLE_COROUTINE", - "CO_NESTED", - "CO_NEWLOCALS", - "CO_NOFREE", - "CO_OPTIMIZED", - "CO_VARARGS", - "CO_VARKEYWORDS", - "ClassFoundException", - "ClosureVars", - "EndOfBlock", - "FrameInfo", - "FullArgSpec", - "GEN_CLOSED", - "GEN_CREATED", - "GEN_RUNNING", - "GEN_SUSPENDED", - "Parameter", - "Signature", - "TPFLAGS_IS_ABSTRACT", - "Traceback", - "classify_class_attrs", - "cleandoc", - "currentframe", - "findsource", - "formatannotation", - "formatannotationrelativeto", - "formatargvalues", - "get_annotations", - "getabsfile", - "getargs", - "getargvalues", - "getasyncgenlocals", - "getasyncgenstate", - "getattr_static", - "getblock", - "getcallargs", - "getclasstree", - "getclosurevars", - "getcomments", - "getcoroutinelocals", - "getcoroutinestate", - "getdoc", - "getfile", - "getframeinfo", - "getfullargspec", - "getgeneratorlocals", - "getgeneratorstate", - "getinnerframes", - "getlineno", - "getmembers", - "getmembers_static", - "getmodule", - "getmodulename", - "getmro", - "getouterframes", - "getsource", - "getsourcefile", - "getsourcelines", - "indentsize", - "isabstract", - "isasyncgen", - "isasyncgenfunction", - "isawaitable", - "isbuiltin", - "isclass", - "iscode", - "iscoroutine", - "iscoroutinefunction", - "isdatadescriptor", - "isframe", - "isfunction", - "isgenerator", - "isgeneratorfunction", - "isgetsetdescriptor", - "ismemberdescriptor", - "ismethod", - "ismethoddescriptor", - "ismethodwrapper", - "ismodule", - "isroutine", - "istraceback", - "markcoroutinefunction", - "signature", - "stack", - "trace", - "unwrap", - "walktree", -] - - -import abc -import ast -import dis -import collections.abc -import enum -import importlib.machinery -import itertools -import linecache -import os -import re -import sys -import tokenize -import token -import types -import functools -import builtins -from keyword import iskeyword -from operator import attrgetter -from collections import namedtuple, OrderedDict -from typing import _rewrite_star_unpack -from weakref import ref as make_weakref - -# Create constants for the compiler flags in Include/code.h -# We try to get them from dis to avoid duplication -mod_dict = globals() -for k, v in dis.COMPILER_FLAG_NAMES.items(): - mod_dict["CO_" + v] = k -del k, v, mod_dict - -# See Include/object.h -TPFLAGS_IS_ABSTRACT = 1 << 20 - - -def get_annotations(obj, *, globals=None, locals=None, eval_str=False): - """Compute the annotations dict for an object. - - obj may be a callable, class, or module. - Passing in an object of any other type raises TypeError. - - Returns a dict. get_annotations() returns a new dict every time - it's called; calling it twice on the same object will return two - different but equivalent dicts. - - This function handles several details for you: - - * If eval_str is true, values of type str will - be un-stringized using eval(). This is intended - for use with stringized annotations - ("from __future__ import annotations"). - * If obj doesn't have an annotations dict, returns an - empty dict. (Functions and methods always have an - annotations dict; classes, modules, and other types of - callables may not.) - * Ignores inherited annotations on classes. If a class - doesn't have its own annotations dict, returns an empty dict. - * All accesses to object members and dict values are done - using getattr() and dict.get() for safety. - * Always, always, always returns a freshly-created dict. - - eval_str controls whether or not values of type str are replaced - with the result of calling eval() on those values: - - * If eval_str is true, eval() is called on values of type str. - * If eval_str is false (the default), values of type str are unchanged. - - globals and locals are passed in to eval(); see the documentation - for eval() for more information. If either globals or locals is - None, this function may replace that value with a context-specific - default, contingent on type(obj): - - * If obj is a module, globals defaults to obj.__dict__. - * If obj is a class, globals defaults to - sys.modules[obj.__module__].__dict__ and locals - defaults to the obj class namespace. - * If obj is a callable, globals defaults to obj.__globals__, - although if obj is a wrapped function (using - functools.update_wrapper()) it is first unwrapped. - """ - if isinstance(obj, type): - # class - obj_dict = getattr(obj, '__dict__', None) - if obj_dict and hasattr(obj_dict, 'get'): - ann = obj_dict.get('__annotations__', None) - if isinstance(ann, types.GetSetDescriptorType): - ann = None - else: - ann = None - - obj_globals = None - module_name = getattr(obj, '__module__', None) - if module_name: - module = sys.modules.get(module_name, None) - if module: - obj_globals = getattr(module, '__dict__', None) - obj_locals = dict(vars(obj)) - unwrap = obj - elif isinstance(obj, types.ModuleType): - # module - ann = getattr(obj, '__annotations__', None) - obj_globals = getattr(obj, '__dict__') - obj_locals = None - unwrap = None - elif callable(obj): - # this includes types.Function, types.BuiltinFunctionType, - # types.BuiltinMethodType, functools.partial, functools.singledispatch, - # "class funclike" from Lib/test/test_inspect... on and on it goes. - ann = getattr(obj, '__annotations__', None) - obj_globals = getattr(obj, '__globals__', None) - obj_locals = None - unwrap = obj - else: - raise TypeError(f"{obj!r} is not a module, class, or callable.") - - if ann is None: - return {} - - if not isinstance(ann, dict): - raise ValueError(f"{obj!r}.__annotations__ is neither a dict nor None") - - if not ann: - return {} - - if not eval_str: - return dict(ann) - - if unwrap is not None: - while True: - if hasattr(unwrap, '__wrapped__'): - unwrap = unwrap.__wrapped__ - continue - if isinstance(unwrap, functools.partial): - unwrap = unwrap.func - continue - break - if hasattr(unwrap, "__globals__"): - obj_globals = unwrap.__globals__ - - if globals is None: - globals = obj_globals - if locals is None: - locals = obj_locals or {} - - # "Inject" type parameters into the local namespace - # (unless they are shadowed by assignments *in* the local namespace), - # as a way of emulating annotation scopes when calling `eval()` - if type_params := getattr(obj, "__type_params__", ()): - locals = {param.__name__: param for param in type_params} | locals - - return_value = { - key: value if not isinstance(value, str) - else eval(_rewrite_star_unpack(value), globals, locals) - for key, value in ann.items() } - return return_value - - -# ----------------------------------------------------------- type-checking -def ismodule(object): - """Return true if the object is a module.""" - return isinstance(object, types.ModuleType) - -def isclass(object): - """Return true if the object is a class.""" - return isinstance(object, type) - -def ismethod(object): - """Return true if the object is an instance method.""" - return isinstance(object, types.MethodType) - -def ismethoddescriptor(object): - """Return true if the object is a method descriptor. - - But not if ismethod() or isclass() or isfunction() are true. - - This is new in Python 2.2, and, for example, is true of int.__add__. - An object passing this test has a __get__ attribute, but not a - __set__ attribute or a __delete__ attribute. Beyond that, the set - of attributes varies; __name__ is usually sensible, and __doc__ - often is. - - Methods implemented via descriptors that also pass one of the other - tests return false from the ismethoddescriptor() test, simply because - the other tests promise more -- you can, e.g., count on having the - __func__ attribute (etc) when an object passes ismethod().""" - if isclass(object) or ismethod(object) or isfunction(object): - # mutual exclusion - return False - if isinstance(object, functools.partial): - # Lie for children. The addition of partial.__get__ - # doesn't currently change the partial objects behaviour, - # not counting a warning about future changes. - return False - tp = type(object) - return (hasattr(tp, "__get__") - and not hasattr(tp, "__set__") - and not hasattr(tp, "__delete__")) - -def isdatadescriptor(object): - """Return true if the object is a data descriptor. - - Data descriptors have a __set__ or a __delete__ attribute. Examples are - properties (defined in Python) and getsets and members (defined in C). - Typically, data descriptors will also have __name__ and __doc__ attributes - (properties, getsets, and members have both of these attributes), but this - is not guaranteed.""" - if isclass(object) or ismethod(object) or isfunction(object): - # mutual exclusion - return False - tp = type(object) - return hasattr(tp, "__set__") or hasattr(tp, "__delete__") - -if hasattr(types, 'MemberDescriptorType'): - # CPython and equivalent - def ismemberdescriptor(object): - """Return true if the object is a member descriptor. - - Member descriptors are specialized descriptors defined in extension - modules.""" - return isinstance(object, types.MemberDescriptorType) -else: - # Other implementations - def ismemberdescriptor(object): - """Return true if the object is a member descriptor. - - Member descriptors are specialized descriptors defined in extension - modules.""" - return False - -if hasattr(types, 'GetSetDescriptorType'): - # CPython and equivalent - def isgetsetdescriptor(object): - """Return true if the object is a getset descriptor. - - getset descriptors are specialized descriptors defined in extension - modules.""" - return isinstance(object, types.GetSetDescriptorType) -else: - # Other implementations - def isgetsetdescriptor(object): - """Return true if the object is a getset descriptor. - - getset descriptors are specialized descriptors defined in extension - modules.""" - return False - -def isfunction(object): - """Return true if the object is a user-defined function. - - Function objects provide these attributes: - __doc__ documentation string - __name__ name with which this function was defined - __code__ code object containing compiled function bytecode - __defaults__ tuple of any default values for arguments - __globals__ global namespace in which this function was defined - __annotations__ dict of parameter annotations - __kwdefaults__ dict of keyword only parameters with defaults""" - return isinstance(object, types.FunctionType) - -def _has_code_flag(f, flag): - """Return true if ``f`` is a function (or a method or functools.partial - wrapper wrapping a function or a functools.partialmethod wrapping a - function) whose code object has the given ``flag`` - set in its flags.""" - f = functools._unwrap_partialmethod(f) - while ismethod(f): - f = f.__func__ - f = functools._unwrap_partial(f) - if not (isfunction(f) or _signature_is_functionlike(f)): - return False - return bool(f.__code__.co_flags & flag) - -def isgeneratorfunction(obj): - """Return true if the object is a user-defined generator function. - - Generator function objects provide the same attributes as functions. - See help(isfunction) for a list of attributes.""" - return _has_code_flag(obj, CO_GENERATOR) - -# A marker for markcoroutinefunction and iscoroutinefunction. -_is_coroutine_mark = object() - -def _has_coroutine_mark(f): - while ismethod(f): - f = f.__func__ - f = functools._unwrap_partial(f) - return getattr(f, "_is_coroutine_marker", None) is _is_coroutine_mark - -def markcoroutinefunction(func): - """ - Decorator to ensure callable is recognised as a coroutine function. - """ - if hasattr(func, '__func__'): - func = func.__func__ - func._is_coroutine_marker = _is_coroutine_mark - return func - -def iscoroutinefunction(obj): - """Return true if the object is a coroutine function. - - Coroutine functions are normally defined with "async def" syntax, but may - be marked via markcoroutinefunction. - """ - return _has_code_flag(obj, CO_COROUTINE) or _has_coroutine_mark(obj) - -def isasyncgenfunction(obj): - """Return true if the object is an asynchronous generator function. - - Asynchronous generator functions are defined with "async def" - syntax and have "yield" expressions in their body. - """ - return _has_code_flag(obj, CO_ASYNC_GENERATOR) - -def isasyncgen(object): - """Return true if the object is an asynchronous generator.""" - return isinstance(object, types.AsyncGeneratorType) - -def isgenerator(object): - """Return true if the object is a generator. - - Generator objects provide these attributes: - __iter__ defined to support iteration over container - close raises a new GeneratorExit exception inside the - generator to terminate the iteration - gi_code code object - gi_frame frame object or possibly None once the generator has - been exhausted - gi_running set to 1 when generator is executing, 0 otherwise - gi_suspended set to 1 when the generator is suspended at a yield point, 0 otherwise - gi_yieldfrom object being iterated by yield from or None - next return the next item from the container - send resumes the generator and "sends" a value that becomes - the result of the current yield-expression - throw used to raise an exception inside the generator""" - return isinstance(object, types.GeneratorType) - -def iscoroutine(object): - """Return true if the object is a coroutine.""" - return isinstance(object, types.CoroutineType) - -def isawaitable(object): - """Return true if object can be passed to an ``await`` expression.""" - return (isinstance(object, types.CoroutineType) or - isinstance(object, types.GeneratorType) and - bool(object.gi_code.co_flags & CO_ITERABLE_COROUTINE) or - isinstance(object, collections.abc.Awaitable)) - -def istraceback(object): - """Return true if the object is a traceback. - - Traceback objects provide these attributes: - tb_frame frame object at this level - tb_lasti index of last attempted instruction in bytecode - tb_lineno current line number in Python source code - tb_next next inner traceback object (called by this level)""" - return isinstance(object, types.TracebackType) - -def isframe(object): - """Return true if the object is a frame object. - - Frame objects provide these attributes: - f_back next outer frame object (this frame's caller) - f_builtins built-in namespace seen by this frame - f_code code object being executed in this frame - f_globals global namespace seen by this frame - f_lasti index of last attempted instruction in bytecode - f_lineno current line number in Python source code - f_locals local namespace seen by this frame - f_trace tracing function for this frame, or None""" - return isinstance(object, types.FrameType) - -def iscode(object): - """Return true if the object is a code object. - - Code objects provide these attributes: - co_argcount number of arguments (not including *, ** args - or keyword only arguments) - co_code string of raw compiled bytecode - co_cellvars tuple of names of cell variables - co_consts tuple of constants used in the bytecode - co_filename name of file in which this code object was created - co_firstlineno number of first line in Python source code - co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg - | 16=nested | 32=generator | 64=nofree | 128=coroutine - | 256=iterable_coroutine | 512=async_generator - co_freevars tuple of names of free variables - co_posonlyargcount number of positional only arguments - co_kwonlyargcount number of keyword only arguments (not including ** arg) - co_lnotab encoded mapping of line numbers to bytecode indices - co_name name with which this code object was defined - co_names tuple of names other than arguments and function locals - co_nlocals number of local variables - co_stacksize virtual machine stack space required - co_varnames tuple of names of arguments and local variables""" - return isinstance(object, types.CodeType) - -def isbuiltin(object): - """Return true if the object is a built-in function or method. - - Built-in functions and methods provide these attributes: - __doc__ documentation string - __name__ original name of this function or method - __self__ instance to which a method is bound, or None""" - return isinstance(object, types.BuiltinFunctionType) - -def ismethodwrapper(object): - """Return true if the object is a method wrapper.""" - return isinstance(object, types.MethodWrapperType) - -def isroutine(object): - """Return true if the object is any kind of function or method.""" - return (isbuiltin(object) - or isfunction(object) - or ismethod(object) - or ismethoddescriptor(object) - or ismethodwrapper(object)) - -def isabstract(object): - """Return true if the object is an abstract base class (ABC).""" - if not isinstance(object, type): - return False - if object.__flags__ & TPFLAGS_IS_ABSTRACT: - return True - if not issubclass(type(object), abc.ABCMeta): - return False - if hasattr(object, '__abstractmethods__'): - # It looks like ABCMeta.__new__ has finished running; - # TPFLAGS_IS_ABSTRACT should have been accurate. - return False - # It looks like ABCMeta.__new__ has not finished running yet; we're - # probably in __init_subclass__. We'll look for abstractmethods manually. - for name, value in object.__dict__.items(): - if getattr(value, "__isabstractmethod__", False): - return True - for base in object.__bases__: - for name in getattr(base, "__abstractmethods__", ()): - value = getattr(object, name, None) - if getattr(value, "__isabstractmethod__", False): - return True - return False - -def _getmembers(object, predicate, getter): - results = [] - processed = set() - names = dir(object) - if isclass(object): - mro = getmro(object) - # add any DynamicClassAttributes to the list of names if object is a class; - # this may result in duplicate entries if, for example, a virtual - # attribute with the same name as a DynamicClassAttribute exists - try: - for base in object.__bases__: - for k, v in base.__dict__.items(): - if isinstance(v, types.DynamicClassAttribute): - names.append(k) - except AttributeError: - pass - else: - mro = () - for key in names: - # First try to get the value via getattr. Some descriptors don't - # like calling their __get__ (see bug #1785), so fall back to - # looking in the __dict__. - try: - value = getter(object, key) - # handle the duplicate key - if key in processed: - raise AttributeError - except AttributeError: - for base in mro: - if key in base.__dict__: - value = base.__dict__[key] - break - else: - # could be a (currently) missing slot member, or a buggy - # __dir__; discard and move on - continue - if not predicate or predicate(value): - results.append((key, value)) - processed.add(key) - results.sort(key=lambda pair: pair[0]) - return results - -def getmembers(object, predicate=None): - """Return all members of an object as (name, value) pairs sorted by name. - Optionally, only return members that satisfy a given predicate.""" - return _getmembers(object, predicate, getattr) - -def getmembers_static(object, predicate=None): - """Return all members of an object as (name, value) pairs sorted by name - without triggering dynamic lookup via the descriptor protocol, - __getattr__ or __getattribute__. Optionally, only return members that - satisfy a given predicate. - - Note: this function may not be able to retrieve all members - that getmembers can fetch (like dynamically created attributes) - and may find members that getmembers can't (like descriptors - that raise AttributeError). It can also return descriptor objects - instead of instance members in some cases. - """ - return _getmembers(object, predicate, getattr_static) - -Attribute = namedtuple('Attribute', 'name kind defining_class object') - -def classify_class_attrs(cls): - """Return list of attribute-descriptor tuples. - - For each name in dir(cls), the return list contains a 4-tuple - with these elements: - - 0. The name (a string). - - 1. The kind of attribute this is, one of these strings: - 'class method' created via classmethod() - 'static method' created via staticmethod() - 'property' created via property() - 'method' any other flavor of method or descriptor - 'data' not a method - - 2. The class which defined this attribute (a class). - - 3. The object as obtained by calling getattr; if this fails, or if the - resulting object does not live anywhere in the class' mro (including - metaclasses) then the object is looked up in the defining class's - dict (found by walking the mro). - - If one of the items in dir(cls) is stored in the metaclass it will now - be discovered and not have None be listed as the class in which it was - defined. Any items whose home class cannot be discovered are skipped. - """ - - mro = getmro(cls) - metamro = getmro(type(cls)) # for attributes stored in the metaclass - metamro = tuple(cls for cls in metamro if cls not in (type, object)) - class_bases = (cls,) + mro - all_bases = class_bases + metamro - names = dir(cls) - # :dd any DynamicClassAttributes to the list of names; - # this may result in duplicate entries if, for example, a virtual - # attribute with the same name as a DynamicClassAttribute exists. - for base in mro: - for k, v in base.__dict__.items(): - if isinstance(v, types.DynamicClassAttribute) and v.fget is not None: - names.append(k) - result = [] - processed = set() - - for name in names: - # Get the object associated with the name, and where it was defined. - # Normal objects will be looked up with both getattr and directly in - # its class' dict (in case getattr fails [bug #1785], and also to look - # for a docstring). - # For DynamicClassAttributes on the second pass we only look in the - # class's dict. - # - # Getting an obj from the __dict__ sometimes reveals more than - # using getattr. Static and class methods are dramatic examples. - homecls = None - get_obj = None - dict_obj = None - if name not in processed: - try: - if name == '__dict__': - raise Exception("__dict__ is special, don't want the proxy") - get_obj = getattr(cls, name) - except Exception: - pass - else: - homecls = getattr(get_obj, "__objclass__", homecls) - if homecls not in class_bases: - # if the resulting object does not live somewhere in the - # mro, drop it and search the mro manually - homecls = None - last_cls = None - # first look in the classes - for srch_cls in class_bases: - srch_obj = getattr(srch_cls, name, None) - if srch_obj is get_obj: - last_cls = srch_cls - # then check the metaclasses - for srch_cls in metamro: - try: - srch_obj = srch_cls.__getattr__(cls, name) - except AttributeError: - continue - if srch_obj is get_obj: - last_cls = srch_cls - if last_cls is not None: - homecls = last_cls - for base in all_bases: - if name in base.__dict__: - dict_obj = base.__dict__[name] - if homecls not in metamro: - homecls = base - break - if homecls is None: - # unable to locate the attribute anywhere, most likely due to - # buggy custom __dir__; discard and move on - continue - obj = get_obj if get_obj is not None else dict_obj - # Classify the object or its descriptor. - if isinstance(dict_obj, (staticmethod, types.BuiltinMethodType)): - kind = "static method" - obj = dict_obj - elif isinstance(dict_obj, (classmethod, types.ClassMethodDescriptorType)): - kind = "class method" - obj = dict_obj - elif isinstance(dict_obj, property): - kind = "property" - obj = dict_obj - elif isroutine(obj): - kind = "method" - else: - kind = "data" - result.append(Attribute(name, kind, homecls, obj)) - processed.add(name) - return result - -# ----------------------------------------------------------- class helpers - -def getmro(cls): - "Return tuple of base classes (including cls) in method resolution order." - return cls.__mro__ - -# -------------------------------------------------------- function helpers - -def unwrap(func, *, stop=None): - """Get the object wrapped by *func*. - - Follows the chain of :attr:`__wrapped__` attributes returning the last - object in the chain. - - *stop* is an optional callback accepting an object in the wrapper chain - as its sole argument that allows the unwrapping to be terminated early if - the callback returns a true value. If the callback never returns a true - value, the last object in the chain is returned as usual. For example, - :func:`signature` uses this to stop unwrapping if any object in the - chain has a ``__signature__`` attribute defined. - - :exc:`ValueError` is raised if a cycle is encountered. - - """ - f = func # remember the original func for error reporting - # Memoise by id to tolerate non-hashable objects, but store objects to - # ensure they aren't destroyed, which would allow their IDs to be reused. - memo = {id(f): f} - recursion_limit = sys.getrecursionlimit() - while not isinstance(func, type) and hasattr(func, '__wrapped__'): - if stop is not None and stop(func): - break - func = func.__wrapped__ - id_func = id(func) - if (id_func in memo) or (len(memo) >= recursion_limit): - raise ValueError('wrapper loop when unwrapping {!r}'.format(f)) - memo[id_func] = func - return func - -# -------------------------------------------------- source code extraction -def indentsize(line): - """Return the indent size, in spaces, at the start of a line of text.""" - expline = line.expandtabs() - return len(expline) - len(expline.lstrip()) - -def _findclass(func): - cls = sys.modules.get(func.__module__) - if cls is None: - return None - for name in func.__qualname__.split('.')[:-1]: - cls = getattr(cls, name) - if not isclass(cls): - return None - return cls - -def _finddoc(obj): - if isclass(obj): - for base in obj.__mro__: - if base is not object: - try: - doc = base.__doc__ - except AttributeError: - continue - if doc is not None: - return doc - return None - - if ismethod(obj): - name = obj.__func__.__name__ - self = obj.__self__ - if (isclass(self) and - getattr(getattr(self, name, None), '__func__') is obj.__func__): - # classmethod - cls = self - else: - cls = self.__class__ - elif isfunction(obj): - name = obj.__name__ - cls = _findclass(obj) - if cls is None or getattr(cls, name) is not obj: - return None - elif isbuiltin(obj): - name = obj.__name__ - self = obj.__self__ - if (isclass(self) and - self.__qualname__ + '.' + name == obj.__qualname__): - # classmethod - cls = self - else: - cls = self.__class__ - # Should be tested before isdatadescriptor(). - elif isinstance(obj, property): - name = obj.__name__ - cls = _findclass(obj.fget) - if cls is None or getattr(cls, name) is not obj: - return None - elif ismethoddescriptor(obj) or isdatadescriptor(obj): - name = obj.__name__ - cls = obj.__objclass__ - if getattr(cls, name) is not obj: - return None - if ismemberdescriptor(obj): - slots = getattr(cls, '__slots__', None) - if isinstance(slots, dict) and name in slots: - return slots[name] - else: - return None - for base in cls.__mro__: - try: - doc = getattr(base, name).__doc__ - except AttributeError: - continue - if doc is not None: - return doc - return None - -def getdoc(object): - """Get the documentation string for an object. - - All tabs are expanded to spaces. To clean up docstrings that are - indented to line up with blocks of code, any whitespace than can be - uniformly removed from the second line onwards is removed.""" - try: - doc = object.__doc__ - except AttributeError: - return None - if doc is None: - try: - doc = _finddoc(object) - except (AttributeError, TypeError): - return None - if not isinstance(doc, str): - return None - return cleandoc(doc) - -def cleandoc(doc): - """Clean up indentation from docstrings. - - Any whitespace that can be uniformly removed from the second line - onwards is removed.""" - lines = doc.expandtabs().split('\n') - - # Find minimum indentation of any non-blank lines after first line. - margin = sys.maxsize - for line in lines[1:]: - content = len(line.lstrip(' ')) - if content: - indent = len(line) - content - margin = min(margin, indent) - # Remove indentation. - if lines: - lines[0] = lines[0].lstrip(' ') - if margin < sys.maxsize: - for i in range(1, len(lines)): - lines[i] = lines[i][margin:] - # Remove any trailing or leading blank lines. - while lines and not lines[-1]: - lines.pop() - while lines and not lines[0]: - lines.pop(0) - return '\n'.join(lines) - - -def getfile(object): - """Work out which source or compiled file an object was defined in.""" - if ismodule(object): - if getattr(object, '__file__', None): - return object.__file__ - raise TypeError('{!r} is a built-in module'.format(object)) - if isclass(object): - if hasattr(object, '__module__'): - module = sys.modules.get(object.__module__) - if getattr(module, '__file__', None): - return module.__file__ - if object.__module__ == '__main__': - raise OSError('source code not available') - raise TypeError('{!r} is a built-in class'.format(object)) - if ismethod(object): - object = object.__func__ - if isfunction(object): - object = object.__code__ - if istraceback(object): - object = object.tb_frame - if isframe(object): - object = object.f_code - if iscode(object): - return object.co_filename - raise TypeError('module, class, method, function, traceback, frame, or ' - 'code object was expected, got {}'.format( - type(object).__name__)) - -def getmodulename(path): - """Return the module name for a given file, or None.""" - fname = os.path.basename(path) - # Check for paths that look like an actual module file - suffixes = [(-len(suffix), suffix) - for suffix in importlib.machinery.all_suffixes()] - suffixes.sort() # try longest suffixes first, in case they overlap - for neglen, suffix in suffixes: - if fname.endswith(suffix): - return fname[:neglen] - return None - -def getsourcefile(object): - """Return the filename that can be used to locate an object's source. - Return None if no way can be identified to get the source. - """ - filename = getfile(object) - all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:] - all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:] - if any(filename.endswith(s) for s in all_bytecode_suffixes): - filename = (os.path.splitext(filename)[0] + - importlib.machinery.SOURCE_SUFFIXES[0]) - elif any(filename.endswith(s) for s in - importlib.machinery.EXTENSION_SUFFIXES): - return None - elif filename.endswith(".fwork"): - # Apple mobile framework markers are another type of non-source file - return None - - # return a filename found in the linecache even if it doesn't exist on disk - if filename in linecache.cache: - return filename - if os.path.exists(filename): - return filename - # only return a non-existent filename if the module has a PEP 302 loader - module = getmodule(object, filename) - if getattr(module, '__loader__', None) is not None: - return filename - elif getattr(getattr(module, "__spec__", None), "loader", None) is not None: - return filename - -def getabsfile(object, _filename=None): - """Return an absolute path to the source or compiled file for an object. - - The idea is for each object to have a unique origin, so this routine - normalizes the result as much as possible.""" - if _filename is None: - _filename = getsourcefile(object) or getfile(object) - return os.path.normcase(os.path.abspath(_filename)) - -modulesbyfile = {} -_filesbymodname = {} - -def getmodule(object, _filename=None): - """Return the module an object was defined in, or None if not found.""" - if ismodule(object): - return object - if hasattr(object, '__module__'): - return sys.modules.get(object.__module__) - - # Try the filename to modulename cache - if _filename is not None and _filename in modulesbyfile: - return sys.modules.get(modulesbyfile[_filename]) - # Try the cache again with the absolute file name - try: - file = getabsfile(object, _filename) - except (TypeError, FileNotFoundError): - return None - if file in modulesbyfile: - return sys.modules.get(modulesbyfile[file]) - # Update the filename to module name cache and check yet again - # Copy sys.modules in order to cope with changes while iterating - for modname, module in sys.modules.copy().items(): - if ismodule(module) and hasattr(module, '__file__'): - f = module.__file__ - if f == _filesbymodname.get(modname, None): - # Have already mapped this module, so skip it - continue - _filesbymodname[modname] = f - f = getabsfile(module) - # Always map to the name the module knows itself by - modulesbyfile[f] = modulesbyfile[ - os.path.realpath(f)] = module.__name__ - if file in modulesbyfile: - return sys.modules.get(modulesbyfile[file]) - # Check the main module - main = sys.modules['__main__'] - if not hasattr(object, '__name__'): - return None - if hasattr(main, object.__name__): - mainobject = getattr(main, object.__name__) - if mainobject is object: - return main - # Check builtins - builtin = sys.modules['builtins'] - if hasattr(builtin, object.__name__): - builtinobject = getattr(builtin, object.__name__) - if builtinobject is object: - return builtin - - -class ClassFoundException(Exception): - pass - - -def findsource(object): - """Return the entire source file and starting line number for an object. - - The argument may be a module, class, method, function, traceback, frame, - or code object. The source code is returned as a list of all the lines - in the file and the line number indexes a line in that list. An OSError - is raised if the source code cannot be retrieved.""" - - file = getsourcefile(object) - if file: - # Invalidate cache if needed. - linecache.checkcache(file) - else: - file = getfile(object) - # Allow filenames in form of "" to pass through. - # `doctest` monkeypatches `linecache` module to enable - # inspection, so let `linecache.getlines` to be called. - if (not (file.startswith('<') and file.endswith('>'))) or file.endswith('.fwork'): - raise OSError('source code not available') - - module = getmodule(object, file) - if module: - lines = linecache.getlines(file, module.__dict__) - if not lines and file.startswith('<') and hasattr(object, "__code__"): - lines = linecache._getlines_from_code(object.__code__) - else: - lines = linecache.getlines(file) - if not lines: - raise OSError('could not get source code') - - if ismodule(object): - return lines, 0 - - if isclass(object): - try: - lnum = vars(object)['__firstlineno__'] - 1 - except (TypeError, KeyError): - raise OSError('source code not available') - if lnum >= len(lines): - raise OSError('lineno is out of bounds') - return lines, lnum - - if ismethod(object): - object = object.__func__ - if isfunction(object): - object = object.__code__ - if istraceback(object): - object = object.tb_frame - if isframe(object): - object = object.f_code - if iscode(object): - if not hasattr(object, 'co_firstlineno'): - raise OSError('could not find function definition') - lnum = object.co_firstlineno - 1 - if lnum >= len(lines): - raise OSError('lineno is out of bounds') - return lines, lnum - raise OSError('could not find code object') - -def getcomments(object): - """Get lines of comments immediately preceding an object's source code. - - Returns None when source can't be found. - """ - try: - lines, lnum = findsource(object) - except (OSError, TypeError): - return None - - if ismodule(object): - # Look for a comment block at the top of the file. - start = 0 - if lines and lines[0][:2] == '#!': start = 1 - while start < len(lines) and lines[start].strip() in ('', '#'): - start = start + 1 - if start < len(lines) and lines[start][:1] == '#': - comments = [] - end = start - while end < len(lines) and lines[end][:1] == '#': - comments.append(lines[end].expandtabs()) - end = end + 1 - return ''.join(comments) - - # Look for a preceding block of comments at the same indentation. - elif lnum > 0: - indent = indentsize(lines[lnum]) - end = lnum - 1 - if end >= 0 and lines[end].lstrip()[:1] == '#' and \ - indentsize(lines[end]) == indent: - comments = [lines[end].expandtabs().lstrip()] - if end > 0: - end = end - 1 - comment = lines[end].expandtabs().lstrip() - while comment[:1] == '#' and indentsize(lines[end]) == indent: - comments[:0] = [comment] - end = end - 1 - if end < 0: break - comment = lines[end].expandtabs().lstrip() - while comments and comments[0].strip() == '#': - comments[:1] = [] - while comments and comments[-1].strip() == '#': - comments[-1:] = [] - return ''.join(comments) - -class EndOfBlock(Exception): pass - -class BlockFinder: - """Provide a tokeneater() method to detect the end of a code block.""" - def __init__(self): - self.indent = 0 - self.singleline = False - self.started = False - self.passline = False - self.indecorator = False - self.last = 1 - self.body_col0 = None - - def tokeneater(self, type, token, srowcol, erowcol, line): - if not self.started and not self.indecorator: - if type in (tokenize.INDENT, tokenize.COMMENT, tokenize.NL): - pass - elif token == "async": - pass - # skip any decorators - elif token == "@": - self.indecorator = True - else: - # For "def" and "class" scan to the end of the block. - # For "lambda" and generator expression scan to - # the end of the logical line. - self.singleline = token not in ("def", "class") - self.started = True - self.passline = True # skip to the end of the line - elif type == tokenize.NEWLINE: - self.passline = False # stop skipping when a NEWLINE is seen - self.last = srowcol[0] - if self.singleline: - raise EndOfBlock - # hitting a NEWLINE when in a decorator without args - # ends the decorator - if self.indecorator: - self.indecorator = False - elif self.passline: - pass - elif type == tokenize.INDENT: - if self.body_col0 is None and self.started: - self.body_col0 = erowcol[1] - self.indent = self.indent + 1 - self.passline = True - elif type == tokenize.DEDENT: - self.indent = self.indent - 1 - # the end of matching indent/dedent pairs end a block - # (note that this only works for "def"/"class" blocks, - # not e.g. for "if: else:" or "try: finally:" blocks) - if self.indent <= 0: - raise EndOfBlock - elif type == tokenize.COMMENT: - if self.body_col0 is not None and srowcol[1] >= self.body_col0: - # Include comments if indented at least as much as the block - self.last = srowcol[0] - elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): - # any other token on the same indentation level end the previous - # block as well, except the pseudo-tokens COMMENT and NL. - raise EndOfBlock - -def getblock(lines): - """Extract the block of code at the top of the given list of lines.""" - blockfinder = BlockFinder() - try: - tokens = tokenize.generate_tokens(iter(lines).__next__) - for _token in tokens: - blockfinder.tokeneater(*_token) - except (EndOfBlock, IndentationError): - pass - except SyntaxError as e: - if "unmatched" not in e.msg: - raise e from None - _, *_token_info = _token - try: - blockfinder.tokeneater(tokenize.NEWLINE, *_token_info) - except (EndOfBlock, IndentationError): - pass - return lines[:blockfinder.last] - -def getsourcelines(object): - """Return a list of source lines and starting line number for an object. - - The argument may be a module, class, method, function, traceback, frame, - or code object. The source code is returned as a list of the lines - corresponding to the object and the line number indicates where in the - original source file the first line of code was found. An OSError is - raised if the source code cannot be retrieved.""" - object = unwrap(object) - lines, lnum = findsource(object) - - if istraceback(object): - object = object.tb_frame - - # for module or frame that corresponds to module, return all source lines - if (ismodule(object) or - (isframe(object) and object.f_code.co_name == "")): - return lines, 0 - else: - return getblock(lines[lnum:]), lnum + 1 - -def getsource(object): - """Return the text of the source code for an object. - - The argument may be a module, class, method, function, traceback, frame, - or code object. The source code is returned as a single string. An - OSError is raised if the source code cannot be retrieved.""" - lines, lnum = getsourcelines(object) - return ''.join(lines) - -# --------------------------------------------------- class tree extraction -def walktree(classes, children, parent): - """Recursive helper function for getclasstree().""" - results = [] - classes.sort(key=attrgetter('__module__', '__name__')) - for c in classes: - results.append((c, c.__bases__)) - if c in children: - results.append(walktree(children[c], children, c)) - return results - -def getclasstree(classes, unique=False): - """Arrange the given list of classes into a hierarchy of nested lists. - - Where a nested list appears, it contains classes derived from the class - whose entry immediately precedes the list. Each entry is a 2-tuple - containing a class and a tuple of its base classes. If the 'unique' - argument is true, exactly one entry appears in the returned structure - for each class in the given list. Otherwise, classes using multiple - inheritance and their descendants will appear multiple times.""" - children = {} - roots = [] - for c in classes: - if c.__bases__: - for parent in c.__bases__: - if parent not in children: - children[parent] = [] - if c not in children[parent]: - children[parent].append(c) - if unique and parent in classes: break - elif c not in roots: - roots.append(c) - for parent in children: - if parent not in classes: - roots.append(parent) - return walktree(roots, children, None) - -# ------------------------------------------------ argument list extraction -Arguments = namedtuple('Arguments', 'args, varargs, varkw') - -def getargs(co): - """Get information about the arguments accepted by a code object. - - Three things are returned: (args, varargs, varkw), where - 'args' is the list of argument names. Keyword-only arguments are - appended. 'varargs' and 'varkw' are the names of the * and ** - arguments or None.""" - if not iscode(co): - raise TypeError('{!r} is not a code object'.format(co)) - - names = co.co_varnames - nargs = co.co_argcount - nkwargs = co.co_kwonlyargcount - args = list(names[:nargs]) - kwonlyargs = list(names[nargs:nargs+nkwargs]) - - nargs += nkwargs - varargs = None - if co.co_flags & CO_VARARGS: - varargs = co.co_varnames[nargs] - nargs = nargs + 1 - varkw = None - if co.co_flags & CO_VARKEYWORDS: - varkw = co.co_varnames[nargs] - return Arguments(args + kwonlyargs, varargs, varkw) - - -FullArgSpec = namedtuple('FullArgSpec', - 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations') - -def getfullargspec(func): - """Get the names and default values of a callable object's parameters. - - A tuple of seven things is returned: - (args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations). - 'args' is a list of the parameter names. - 'varargs' and 'varkw' are the names of the * and ** parameters or None. - 'defaults' is an n-tuple of the default values of the last n parameters. - 'kwonlyargs' is a list of keyword-only parameter names. - 'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults. - 'annotations' is a dictionary mapping parameter names to annotations. - - Notable differences from inspect.signature(): - - the "self" parameter is always reported, even for bound methods - - wrapper chains defined by __wrapped__ *not* unwrapped automatically - """ - try: - # Re: `skip_bound_arg=False` - # - # There is a notable difference in behaviour between getfullargspec - # and Signature: the former always returns 'self' parameter for bound - # methods, whereas the Signature always shows the actual calling - # signature of the passed object. - # - # To simulate this behaviour, we "unbind" bound methods, to trick - # inspect.signature to always return their first parameter ("self", - # usually) - - # Re: `follow_wrapper_chains=False` - # - # getfullargspec() historically ignored __wrapped__ attributes, - # so we ensure that remains the case in 3.3+ - - sig = _signature_from_callable(func, - follow_wrapper_chains=False, - skip_bound_arg=False, - sigcls=Signature, - eval_str=False) - except Exception as ex: - # Most of the times 'signature' will raise ValueError. - # But, it can also raise AttributeError, and, maybe something - # else. So to be fully backwards compatible, we catch all - # possible exceptions here, and reraise a TypeError. - raise TypeError('unsupported callable') from ex - - args = [] - varargs = None - varkw = None - posonlyargs = [] - kwonlyargs = [] - annotations = {} - defaults = () - kwdefaults = {} - - if sig.return_annotation is not sig.empty: - annotations['return'] = sig.return_annotation - - for param in sig.parameters.values(): - kind = param.kind - name = param.name - - if kind is _POSITIONAL_ONLY: - posonlyargs.append(name) - if param.default is not param.empty: - defaults += (param.default,) - elif kind is _POSITIONAL_OR_KEYWORD: - args.append(name) - if param.default is not param.empty: - defaults += (param.default,) - elif kind is _VAR_POSITIONAL: - varargs = name - elif kind is _KEYWORD_ONLY: - kwonlyargs.append(name) - if param.default is not param.empty: - kwdefaults[name] = param.default - elif kind is _VAR_KEYWORD: - varkw = name - - if param.annotation is not param.empty: - annotations[name] = param.annotation - - if not kwdefaults: - # compatibility with 'func.__kwdefaults__' - kwdefaults = None - - if not defaults: - # compatibility with 'func.__defaults__' - defaults = None - - return FullArgSpec(posonlyargs + args, varargs, varkw, defaults, - kwonlyargs, kwdefaults, annotations) - - -ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals') - -def getargvalues(frame): - """Get information about arguments passed into a particular frame. - - A tuple of four things is returned: (args, varargs, varkw, locals). - 'args' is a list of the argument names. - 'varargs' and 'varkw' are the names of the * and ** arguments or None. - 'locals' is the locals dictionary of the given frame.""" - args, varargs, varkw = getargs(frame.f_code) - return ArgInfo(args, varargs, varkw, frame.f_locals) - -def formatannotation(annotation, base_module=None): - if getattr(annotation, '__module__', None) == 'typing': - def repl(match): - text = match.group() - return text.removeprefix('typing.') - return re.sub(r'[\w\.]+', repl, repr(annotation)) - if isinstance(annotation, types.GenericAlias): - return str(annotation) - if isinstance(annotation, type): - if annotation.__module__ in ('builtins', base_module): - return annotation.__qualname__ - return annotation.__module__+'.'+annotation.__qualname__ - return repr(annotation) - -def formatannotationrelativeto(object): - module = getattr(object, '__module__', None) - def _formatannotation(annotation): - return formatannotation(annotation, module) - return _formatannotation - - -def formatargvalues(args, varargs, varkw, locals, - formatarg=str, - formatvarargs=lambda name: '*' + name, - formatvarkw=lambda name: '**' + name, - formatvalue=lambda value: '=' + repr(value)): - """Format an argument spec from the 4 values returned by getargvalues. - - The first four arguments are (args, varargs, varkw, locals). The - next four arguments are the corresponding optional formatting functions - that are called to turn names and values into strings. The ninth - argument is an optional function to format the sequence of arguments.""" - def convert(name, locals=locals, - formatarg=formatarg, formatvalue=formatvalue): - return formatarg(name) + formatvalue(locals[name]) - specs = [] - for i in range(len(args)): - specs.append(convert(args[i])) - if varargs: - specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) - if varkw: - specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) - return '(' + ', '.join(specs) + ')' - -def _missing_arguments(f_name, argnames, pos, values): - names = [repr(name) for name in argnames if name not in values] - missing = len(names) - if missing == 1: - s = names[0] - elif missing == 2: - s = "{} and {}".format(*names) - else: - tail = ", {} and {}".format(*names[-2:]) - del names[-2:] - s = ", ".join(names) + tail - raise TypeError("%s() missing %i required %s argument%s: %s" % - (f_name, missing, - "positional" if pos else "keyword-only", - "" if missing == 1 else "s", s)) - -def _too_many(f_name, args, kwonly, varargs, defcount, given, values): - atleast = len(args) - defcount - kwonly_given = len([arg for arg in kwonly if arg in values]) - if varargs: - plural = atleast != 1 - sig = "at least %d" % (atleast,) - elif defcount: - plural = True - sig = "from %d to %d" % (atleast, len(args)) - else: - plural = len(args) != 1 - sig = str(len(args)) - kwonly_sig = "" - if kwonly_given: - msg = " positional argument%s (and %d keyword-only argument%s)" - kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given, - "s" if kwonly_given != 1 else "")) - raise TypeError("%s() takes %s positional argument%s but %d%s %s given" % - (f_name, sig, "s" if plural else "", given, kwonly_sig, - "was" if given == 1 and not kwonly_given else "were")) - -def getcallargs(func, /, *positional, **named): - """Get the mapping of arguments to values. - - A dict is returned, with keys the function argument names (including the - names of the * and ** arguments, if any), and values the respective bound - values from 'positional' and 'named'.""" - spec = getfullargspec(func) - args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec - f_name = func.__name__ - arg2value = {} - - - if ismethod(func) and func.__self__ is not None: - # implicit 'self' (or 'cls' for classmethods) argument - positional = (func.__self__,) + positional - num_pos = len(positional) - num_args = len(args) - num_defaults = len(defaults) if defaults else 0 - - n = min(num_pos, num_args) - for i in range(n): - arg2value[args[i]] = positional[i] - if varargs: - arg2value[varargs] = tuple(positional[n:]) - possible_kwargs = set(args + kwonlyargs) - if varkw: - arg2value[varkw] = {} - for kw, value in named.items(): - if kw not in possible_kwargs: - if not varkw: - raise TypeError("%s() got an unexpected keyword argument %r" % - (f_name, kw)) - arg2value[varkw][kw] = value - continue - if kw in arg2value: - raise TypeError("%s() got multiple values for argument %r" % - (f_name, kw)) - arg2value[kw] = value - if num_pos > num_args and not varargs: - _too_many(f_name, args, kwonlyargs, varargs, num_defaults, - num_pos, arg2value) - if num_pos < num_args: - req = args[:num_args - num_defaults] - for arg in req: - if arg not in arg2value: - _missing_arguments(f_name, req, True, arg2value) - for i, arg in enumerate(args[num_args - num_defaults:]): - if arg not in arg2value: - arg2value[arg] = defaults[i] - missing = 0 - for kwarg in kwonlyargs: - if kwarg not in arg2value: - if kwonlydefaults and kwarg in kwonlydefaults: - arg2value[kwarg] = kwonlydefaults[kwarg] - else: - missing += 1 - if missing: - _missing_arguments(f_name, kwonlyargs, False, arg2value) - return arg2value - -ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound') - -def getclosurevars(func): - """ - Get the mapping of free variables to their current values. - - Returns a named tuple of dicts mapping the current nonlocal, global - and builtin references as seen by the body of the function. A final - set of unbound names that could not be resolved is also provided. - """ - - if ismethod(func): - func = func.__func__ - - if not isfunction(func): - raise TypeError("{!r} is not a Python function".format(func)) - - code = func.__code__ - # Nonlocal references are named in co_freevars and resolved - # by looking them up in __closure__ by positional index - if func.__closure__ is None: - nonlocal_vars = {} - else: - nonlocal_vars = { - var : cell.cell_contents - for var, cell in zip(code.co_freevars, func.__closure__) - } - - # Global and builtin references are named in co_names and resolved - # by looking them up in __globals__ or __builtins__ - global_ns = func.__globals__ - builtin_ns = global_ns.get("__builtins__", builtins.__dict__) - if ismodule(builtin_ns): - builtin_ns = builtin_ns.__dict__ - global_vars = {} - builtin_vars = {} - unbound_names = set() - global_names = set() - for instruction in dis.get_instructions(code): - opname = instruction.opname - name = instruction.argval - if opname == "LOAD_ATTR": - unbound_names.add(name) - elif opname == "LOAD_GLOBAL": - global_names.add(name) - for name in global_names: - try: - global_vars[name] = global_ns[name] - except KeyError: - try: - builtin_vars[name] = builtin_ns[name] - except KeyError: - unbound_names.add(name) - - return ClosureVars(nonlocal_vars, global_vars, - builtin_vars, unbound_names) - -# -------------------------------------------------- stack frame extraction - -_Traceback = namedtuple('_Traceback', 'filename lineno function code_context index') - -class Traceback(_Traceback): - def __new__(cls, filename, lineno, function, code_context, index, *, positions=None): - instance = super().__new__(cls, filename, lineno, function, code_context, index) - instance.positions = positions - return instance - - def __repr__(self): - return ('Traceback(filename={!r}, lineno={!r}, function={!r}, ' - 'code_context={!r}, index={!r}, positions={!r})'.format( - self.filename, self.lineno, self.function, self.code_context, - self.index, self.positions)) - -def _get_code_position_from_tb(tb): - code, instruction_index = tb.tb_frame.f_code, tb.tb_lasti - return _get_code_position(code, instruction_index) - -def _get_code_position(code, instruction_index): - if instruction_index < 0: - return (None, None, None, None) - positions_gen = code.co_positions() - # The nth entry in code.co_positions() corresponds to instruction (2*n)th since Python 3.10+ - return next(itertools.islice(positions_gen, instruction_index // 2, None)) - -def getframeinfo(frame, context=1): - """Get information about a frame or traceback object. - - A tuple of five things is returned: the filename, the line number of - the current line, the function name, a list of lines of context from - the source code, and the index of the current line within that list. - The optional second argument specifies the number of lines of context - to return, which are centered around the current line.""" - if istraceback(frame): - positions = _get_code_position_from_tb(frame) - lineno = frame.tb_lineno - frame = frame.tb_frame - else: - lineno = frame.f_lineno - positions = _get_code_position(frame.f_code, frame.f_lasti) - - if positions[0] is None: - frame, *positions = (frame, lineno, *positions[1:]) - else: - frame, *positions = (frame, *positions) - - lineno = positions[0] - - if not isframe(frame): - raise TypeError('{!r} is not a frame or traceback object'.format(frame)) - - filename = getsourcefile(frame) or getfile(frame) - if context > 0: - start = lineno - 1 - context//2 - try: - lines, lnum = findsource(frame) - except OSError: - lines = index = None - else: - start = max(0, min(start, len(lines) - context)) - lines = lines[start:start+context] - index = lineno - 1 - start - else: - lines = index = None - - return Traceback(filename, lineno, frame.f_code.co_name, lines, - index, positions=dis.Positions(*positions)) - -def getlineno(frame): - """Get the line number from a frame object, allowing for optimization.""" - # FrameType.f_lineno is now a descriptor that grovels co_lnotab - return frame.f_lineno - -_FrameInfo = namedtuple('_FrameInfo', ('frame',) + Traceback._fields) -class FrameInfo(_FrameInfo): - def __new__(cls, frame, filename, lineno, function, code_context, index, *, positions=None): - instance = super().__new__(cls, frame, filename, lineno, function, code_context, index) - instance.positions = positions - return instance - - def __repr__(self): - return ('FrameInfo(frame={!r}, filename={!r}, lineno={!r}, function={!r}, ' - 'code_context={!r}, index={!r}, positions={!r})'.format( - self.frame, self.filename, self.lineno, self.function, - self.code_context, self.index, self.positions)) - -def getouterframes(frame, context=1): - """Get a list of records for a frame and all higher (calling) frames. - - Each record contains a frame object, filename, line number, function - name, a list of lines of context, and index within the context.""" - framelist = [] - while frame: - traceback_info = getframeinfo(frame, context) - frameinfo = (frame,) + traceback_info - framelist.append(FrameInfo(*frameinfo, positions=traceback_info.positions)) - frame = frame.f_back - return framelist - -def getinnerframes(tb, context=1): - """Get a list of records for a traceback's frame and all lower frames. - - Each record contains a frame object, filename, line number, function - name, a list of lines of context, and index within the context.""" - framelist = [] - while tb: - traceback_info = getframeinfo(tb, context) - frameinfo = (tb.tb_frame,) + traceback_info - framelist.append(FrameInfo(*frameinfo, positions=traceback_info.positions)) - tb = tb.tb_next - return framelist - -def currentframe(): - """Return the frame of the caller or None if this is not possible.""" - return sys._getframe(1) if hasattr(sys, "_getframe") else None - -def stack(context=1): - """Return a list of records for the stack above the caller's frame.""" - return getouterframes(sys._getframe(1), context) - -def trace(context=1): - """Return a list of records for the stack below the current exception.""" - exc = sys.exception() - tb = None if exc is None else exc.__traceback__ - return getinnerframes(tb, context) - - -# ------------------------------------------------ static version of getattr - -_sentinel = object() -_static_getmro = type.__dict__['__mro__'].__get__ -_get_dunder_dict_of_class = type.__dict__["__dict__"].__get__ - - -def _check_instance(obj, attr): - instance_dict = {} - try: - instance_dict = object.__getattribute__(obj, "__dict__") - except AttributeError: - pass - return dict.get(instance_dict, attr, _sentinel) - - -def _check_class(klass, attr): - for entry in _static_getmro(klass): - if _shadowed_dict(type(entry)) is _sentinel and attr in entry.__dict__: - return entry.__dict__[attr] - return _sentinel - - -@functools.lru_cache() -def _shadowed_dict_from_weakref_mro_tuple(*weakref_mro): - for weakref_entry in weakref_mro: - # Normally we'd have to check whether the result of weakref_entry() - # is None here, in case the object the weakref is pointing to has died. - # In this specific case, however, we know that the only caller of this - # function is `_shadowed_dict()`, and that therefore this weakref is - # guaranteed to point to an object that is still alive. - entry = weakref_entry() - dunder_dict = _get_dunder_dict_of_class(entry) - if '__dict__' in dunder_dict: - class_dict = dunder_dict['__dict__'] - if not (type(class_dict) is types.GetSetDescriptorType and - class_dict.__name__ == "__dict__" and - class_dict.__objclass__ is entry): - return class_dict - return _sentinel - - -def _shadowed_dict(klass): - # gh-118013: the inner function here is decorated with lru_cache for - # performance reasons, *but* make sure not to pass strong references - # to the items in the mro. Doing so can lead to unexpected memory - # consumption in cases where classes are dynamically created and - # destroyed, and the dynamically created classes happen to be the only - # objects that hold strong references to other objects that take up a - # significant amount of memory. - return _shadowed_dict_from_weakref_mro_tuple( - *[make_weakref(entry) for entry in _static_getmro(klass)] - ) - - -def getattr_static(obj, attr, default=_sentinel): - """Retrieve attributes without triggering dynamic lookup via the - descriptor protocol, __getattr__ or __getattribute__. - - Note: this function may not be able to retrieve all attributes - that getattr can fetch (like dynamically created attributes) - and may find attributes that getattr can't (like descriptors - that raise AttributeError). It can also return descriptor objects - instead of instance members in some cases. See the - documentation for details. - """ - instance_result = _sentinel - - objtype = type(obj) - if type not in _static_getmro(objtype): - klass = objtype - dict_attr = _shadowed_dict(klass) - if (dict_attr is _sentinel or - type(dict_attr) is types.MemberDescriptorType): - instance_result = _check_instance(obj, attr) - else: - klass = obj - - klass_result = _check_class(klass, attr) - - if instance_result is not _sentinel and klass_result is not _sentinel: - if _check_class(type(klass_result), "__get__") is not _sentinel and ( - _check_class(type(klass_result), "__set__") is not _sentinel - or _check_class(type(klass_result), "__delete__") is not _sentinel - ): - return klass_result - - if instance_result is not _sentinel: - return instance_result - if klass_result is not _sentinel: - return klass_result - - if obj is klass: - # for types we check the metaclass too - for entry in _static_getmro(type(klass)): - if ( - _shadowed_dict(type(entry)) is _sentinel - and attr in entry.__dict__ - ): - return entry.__dict__[attr] - if default is not _sentinel: - return default - raise AttributeError(attr) - - -# ------------------------------------------------ generator introspection - -GEN_CREATED = 'GEN_CREATED' -GEN_RUNNING = 'GEN_RUNNING' -GEN_SUSPENDED = 'GEN_SUSPENDED' -GEN_CLOSED = 'GEN_CLOSED' - -def getgeneratorstate(generator): - """Get current state of a generator-iterator. - - Possible states are: - GEN_CREATED: Waiting to start execution. - GEN_RUNNING: Currently being executed by the interpreter. - GEN_SUSPENDED: Currently suspended at a yield expression. - GEN_CLOSED: Execution has completed. - """ - if generator.gi_running: - return GEN_RUNNING - if generator.gi_suspended: - return GEN_SUSPENDED - if generator.gi_frame is None: - return GEN_CLOSED - return GEN_CREATED - - -def getgeneratorlocals(generator): - """ - Get the mapping of generator local variables to their current values. - - A dict is returned, with the keys the local variable names and values the - bound values.""" - - if not isgenerator(generator): - raise TypeError("{!r} is not a Python generator".format(generator)) - - frame = getattr(generator, "gi_frame", None) - if frame is not None: - return generator.gi_frame.f_locals - else: - return {} - - -# ------------------------------------------------ coroutine introspection - -CORO_CREATED = 'CORO_CREATED' -CORO_RUNNING = 'CORO_RUNNING' -CORO_SUSPENDED = 'CORO_SUSPENDED' -CORO_CLOSED = 'CORO_CLOSED' - -def getcoroutinestate(coroutine): - """Get current state of a coroutine object. - - Possible states are: - CORO_CREATED: Waiting to start execution. - CORO_RUNNING: Currently being executed by the interpreter. - CORO_SUSPENDED: Currently suspended at an await expression. - CORO_CLOSED: Execution has completed. - """ - if coroutine.cr_running: - return CORO_RUNNING - if coroutine.cr_suspended: - return CORO_SUSPENDED - if coroutine.cr_frame is None: - return CORO_CLOSED - return CORO_CREATED - - -def getcoroutinelocals(coroutine): - """ - Get the mapping of coroutine local variables to their current values. - - A dict is returned, with the keys the local variable names and values the - bound values.""" - frame = getattr(coroutine, "cr_frame", None) - if frame is not None: - return frame.f_locals - else: - return {} - - -# ----------------------------------- asynchronous generator introspection - -AGEN_CREATED = 'AGEN_CREATED' -AGEN_RUNNING = 'AGEN_RUNNING' -AGEN_SUSPENDED = 'AGEN_SUSPENDED' -AGEN_CLOSED = 'AGEN_CLOSED' - - -def getasyncgenstate(agen): - """Get current state of an asynchronous generator object. - - Possible states are: - AGEN_CREATED: Waiting to start execution. - AGEN_RUNNING: Currently being executed by the interpreter. - AGEN_SUSPENDED: Currently suspended at a yield expression. - AGEN_CLOSED: Execution has completed. - """ - if agen.ag_running: - return AGEN_RUNNING - if agen.ag_suspended: - return AGEN_SUSPENDED - if agen.ag_frame is None: - return AGEN_CLOSED - return AGEN_CREATED - - -def getasyncgenlocals(agen): - """ - Get the mapping of asynchronous generator local variables to their current - values. - - A dict is returned, with the keys the local variable names and values the - bound values.""" - - if not isasyncgen(agen): - raise TypeError(f"{agen!r} is not a Python async generator") - - frame = getattr(agen, "ag_frame", None) - if frame is not None: - return agen.ag_frame.f_locals - else: - return {} - - -############################################################################### -### Function Signature Object (PEP 362) -############################################################################### - - -_NonUserDefinedCallables = (types.WrapperDescriptorType, - types.MethodWrapperType, - types.ClassMethodDescriptorType, - types.BuiltinFunctionType) - - -def _signature_get_user_defined_method(cls, method_name, *, follow_wrapper_chains=True): - """Private helper. Checks if ``cls`` has an attribute - named ``method_name`` and returns it only if it is a - pure python function. - """ - if method_name == '__new__': - meth = getattr(cls, method_name, None) - else: - meth = getattr_static(cls, method_name, None) - if meth is None: - return None - - # NOTE: The meth may wraps a non-user-defined callable. - # In this case, we treat the meth as non-user-defined callable too. - # (e.g. cls.__new__ generated by @warnings.deprecated) - unwrapped_meth = None - if follow_wrapper_chains: - unwrapped_meth = unwrap(meth, stop=(lambda m: hasattr(m, "__signature__") - or _signature_is_builtin(m))) - - if (isinstance(meth, _NonUserDefinedCallables) - or isinstance(unwrapped_meth, _NonUserDefinedCallables)): - # Once '__signature__' will be added to 'C'-level - # callables, this check won't be necessary - return None - if method_name != '__new__': - meth = _descriptor_get(meth, cls) - return meth - - -def _signature_get_partial(wrapped_sig, partial, extra_args=()): - """Private helper to calculate how 'wrapped_sig' signature will - look like after applying a 'functools.partial' object (or alike) - on it. - """ - - old_params = wrapped_sig.parameters - new_params = OrderedDict(old_params.items()) - - partial_args = partial.args or () - partial_keywords = partial.keywords or {} - - if extra_args: - partial_args = extra_args + partial_args - - try: - ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords) - except TypeError as ex: - msg = 'partial object {!r} has incorrect arguments'.format(partial) - raise ValueError(msg) from ex - - - transform_to_kwonly = False - for param_name, param in old_params.items(): - try: - arg_value = ba.arguments[param_name] - except KeyError: - pass - else: - if param.kind is _POSITIONAL_ONLY: - # If positional-only parameter is bound by partial, - # it effectively disappears from the signature - new_params.pop(param_name) - continue - - if param.kind is _POSITIONAL_OR_KEYWORD: - if param_name in partial_keywords: - # This means that this parameter, and all parameters - # after it should be keyword-only (and var-positional - # should be removed). Here's why. Consider the following - # function: - # foo(a, b, *args, c): - # pass - # - # "partial(foo, a='spam')" will have the following - # signature: "(*, a='spam', b, c)". Because attempting - # to call that partial with "(10, 20)" arguments will - # raise a TypeError, saying that "a" argument received - # multiple values. - transform_to_kwonly = True - # Set the new default value - new_params[param_name] = param.replace(default=arg_value) - else: - # was passed as a positional argument - new_params.pop(param.name) - continue - - if param.kind is _KEYWORD_ONLY: - # Set the new default value - new_params[param_name] = param.replace(default=arg_value) - - if transform_to_kwonly: - assert param.kind is not _POSITIONAL_ONLY - - if param.kind is _POSITIONAL_OR_KEYWORD: - new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY) - new_params[param_name] = new_param - new_params.move_to_end(param_name) - elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD): - new_params.move_to_end(param_name) - elif param.kind is _VAR_POSITIONAL: - new_params.pop(param.name) - - return wrapped_sig.replace(parameters=new_params.values()) - - -def _signature_bound_method(sig): - """Private helper to transform signatures for unbound - functions to bound methods. - """ - - params = tuple(sig.parameters.values()) - - if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY): - raise ValueError('invalid method signature') - - kind = params[0].kind - if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY): - # Drop first parameter: - # '(p1, p2[, ...])' -> '(p2[, ...])' - params = params[1:] - else: - if kind is not _VAR_POSITIONAL: - # Unless we add a new parameter type we never - # get here - raise ValueError('invalid argument type') - # It's a var-positional parameter. - # Do nothing. '(*args[, ...])' -> '(*args[, ...])' - - return sig.replace(parameters=params) - - -def _signature_is_builtin(obj): - """Private helper to test if `obj` is a callable that might - support Argument Clinic's __text_signature__ protocol. - """ - return (isbuiltin(obj) or - ismethoddescriptor(obj) or - isinstance(obj, _NonUserDefinedCallables) or - # Can't test 'isinstance(type)' here, as it would - # also be True for regular python classes. - # Can't use the `in` operator here, as it would - # invoke the custom __eq__ method. - obj is type or obj is object) - - -def _signature_is_functionlike(obj): - """Private helper to test if `obj` is a duck type of FunctionType. - A good example of such objects are functions compiled with - Cython, which have all attributes that a pure Python function - would have, but have their code statically compiled. - """ - - if not callable(obj) or isclass(obj): - # All function-like objects are obviously callables, - # and not classes. - return False - - name = getattr(obj, '__name__', None) - code = getattr(obj, '__code__', None) - defaults = getattr(obj, '__defaults__', _void) # Important to use _void ... - kwdefaults = getattr(obj, '__kwdefaults__', _void) # ... and not None here - annotations = getattr(obj, '__annotations__', None) - - return (isinstance(code, types.CodeType) and - isinstance(name, str) and - (defaults is None or isinstance(defaults, tuple)) and - (kwdefaults is None or isinstance(kwdefaults, dict)) and - (isinstance(annotations, (dict)) or annotations is None) ) - - -def _signature_strip_non_python_syntax(signature): - """ - Private helper function. Takes a signature in Argument Clinic's - extended signature format. - - Returns a tuple of two things: - * that signature re-rendered in standard Python syntax, and - * the index of the "self" parameter (generally 0), or None if - the function does not have a "self" parameter. - """ - - if not signature: - return signature, None - - self_parameter = None - - lines = [l.encode('ascii') for l in signature.split('\n') if l] - generator = iter(lines).__next__ - token_stream = tokenize.tokenize(generator) - - text = [] - add = text.append - - current_parameter = 0 - OP = token.OP - ERRORTOKEN = token.ERRORTOKEN - - # token stream always starts with ENCODING token, skip it - t = next(token_stream) - assert t.type == tokenize.ENCODING - - for t in token_stream: - type, string = t.type, t.string - - if type == OP: - if string == ',': - current_parameter += 1 - - if (type == OP) and (string == '$'): - assert self_parameter is None - self_parameter = current_parameter - continue - - add(string) - if (string == ','): - add(' ') - clean_signature = ''.join(text).strip().replace("\n", "") - return clean_signature, self_parameter - - -def _signature_fromstr(cls, obj, s, skip_bound_arg=True): - """Private helper to parse content of '__text_signature__' - and return a Signature based on it. - """ - Parameter = cls._parameter_cls - - clean_signature, self_parameter = _signature_strip_non_python_syntax(s) - - program = "def foo" + clean_signature + ": pass" - - try: - module = ast.parse(program) - except SyntaxError: - module = None - - if not isinstance(module, ast.Module): - raise ValueError("{!r} builtin has invalid signature".format(obj)) - - f = module.body[0] - - parameters = [] - empty = Parameter.empty - - module = None - module_dict = {} - - module_name = getattr(obj, '__module__', None) - if not module_name: - objclass = getattr(obj, '__objclass__', None) - module_name = getattr(objclass, '__module__', None) - - if module_name: - module = sys.modules.get(module_name, None) - if module: - module_dict = module.__dict__ - sys_module_dict = sys.modules.copy() - - def parse_name(node): - assert isinstance(node, ast.arg) - if node.annotation is not None: - raise ValueError("Annotations are not currently supported") - return node.arg - - def wrap_value(s): - try: - value = eval(s, module_dict) - except NameError: - try: - value = eval(s, sys_module_dict) - except NameError: - raise ValueError - - if isinstance(value, (str, int, float, bytes, bool, type(None))): - return ast.Constant(value) - raise ValueError - - class RewriteSymbolics(ast.NodeTransformer): - def visit_Attribute(self, node): - a = [] - n = node - while isinstance(n, ast.Attribute): - a.append(n.attr) - n = n.value - if not isinstance(n, ast.Name): - raise ValueError - a.append(n.id) - value = ".".join(reversed(a)) - return wrap_value(value) - - def visit_Name(self, node): - if not isinstance(node.ctx, ast.Load): - raise ValueError() - return wrap_value(node.id) - - def visit_BinOp(self, node): - # Support constant folding of a couple simple binary operations - # commonly used to define default values in text signatures - left = self.visit(node.left) - right = self.visit(node.right) - if not isinstance(left, ast.Constant) or not isinstance(right, ast.Constant): - raise ValueError - if isinstance(node.op, ast.Add): - return ast.Constant(left.value + right.value) - elif isinstance(node.op, ast.Sub): - return ast.Constant(left.value - right.value) - elif isinstance(node.op, ast.BitOr): - return ast.Constant(left.value | right.value) - raise ValueError - - def p(name_node, default_node, default=empty): - name = parse_name(name_node) - if default_node and default_node is not _empty: - try: - default_node = RewriteSymbolics().visit(default_node) - default = ast.literal_eval(default_node) - except ValueError: - raise ValueError("{!r} builtin has invalid signature".format(obj)) from None - parameters.append(Parameter(name, kind, default=default, annotation=empty)) - - # non-keyword-only parameters - total_non_kw_args = len(f.args.posonlyargs) + len(f.args.args) - required_non_kw_args = total_non_kw_args - len(f.args.defaults) - defaults = itertools.chain(itertools.repeat(None, required_non_kw_args), f.args.defaults) - - kind = Parameter.POSITIONAL_ONLY - for (name, default) in zip(f.args.posonlyargs, defaults): - p(name, default) - - kind = Parameter.POSITIONAL_OR_KEYWORD - for (name, default) in zip(f.args.args, defaults): - p(name, default) - - # *args - if f.args.vararg: - kind = Parameter.VAR_POSITIONAL - p(f.args.vararg, empty) - - # keyword-only arguments - kind = Parameter.KEYWORD_ONLY - for name, default in zip(f.args.kwonlyargs, f.args.kw_defaults): - p(name, default) - - # **kwargs - if f.args.kwarg: - kind = Parameter.VAR_KEYWORD - p(f.args.kwarg, empty) - - if self_parameter is not None: - # Possibly strip the bound argument: - # - We *always* strip first bound argument if - # it is a module. - # - We don't strip first bound argument if - # skip_bound_arg is False. - assert parameters - _self = getattr(obj, '__self__', None) - self_isbound = _self is not None - self_ismodule = ismodule(_self) - if self_isbound and (self_ismodule or skip_bound_arg): - parameters.pop(0) - else: - # for builtins, self parameter is always positional-only! - p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY) - parameters[0] = p - - return cls(parameters, return_annotation=cls.empty) - - -def _signature_from_builtin(cls, func, skip_bound_arg=True): - """Private helper function to get signature for - builtin callables. - """ - - if not _signature_is_builtin(func): - raise TypeError("{!r} is not a Python builtin " - "function".format(func)) - - s = getattr(func, "__text_signature__", None) - if not s: - raise ValueError("no signature found for builtin {!r}".format(func)) - - return _signature_fromstr(cls, func, s, skip_bound_arg) - - -def _signature_from_function(cls, func, skip_bound_arg=True, - globals=None, locals=None, eval_str=False): - """Private helper: constructs Signature for the given python function.""" - - is_duck_function = False - if not isfunction(func): - if _signature_is_functionlike(func): - is_duck_function = True - else: - # If it's not a pure Python function, and not a duck type - # of pure function: - raise TypeError('{!r} is not a Python function'.format(func)) - - s = getattr(func, "__text_signature__", None) - if s: - return _signature_fromstr(cls, func, s, skip_bound_arg) - - Parameter = cls._parameter_cls - - # Parameter information. - func_code = func.__code__ - pos_count = func_code.co_argcount - arg_names = func_code.co_varnames - posonly_count = func_code.co_posonlyargcount - positional = arg_names[:pos_count] - keyword_only_count = func_code.co_kwonlyargcount - keyword_only = arg_names[pos_count:pos_count + keyword_only_count] - annotations = get_annotations(func, globals=globals, locals=locals, eval_str=eval_str) - defaults = func.__defaults__ - kwdefaults = func.__kwdefaults__ - - if defaults: - pos_default_count = len(defaults) - else: - pos_default_count = 0 - - parameters = [] - - non_default_count = pos_count - pos_default_count - posonly_left = posonly_count - - # Non-keyword-only parameters w/o defaults. - for name in positional[:non_default_count]: - kind = _POSITIONAL_ONLY if posonly_left else _POSITIONAL_OR_KEYWORD - annotation = annotations.get(name, _empty) - parameters.append(Parameter(name, annotation=annotation, - kind=kind)) - if posonly_left: - posonly_left -= 1 - - # ... w/ defaults. - for offset, name in enumerate(positional[non_default_count:]): - kind = _POSITIONAL_ONLY if posonly_left else _POSITIONAL_OR_KEYWORD - annotation = annotations.get(name, _empty) - parameters.append(Parameter(name, annotation=annotation, - kind=kind, - default=defaults[offset])) - if posonly_left: - posonly_left -= 1 - - # *args - if func_code.co_flags & CO_VARARGS: - name = arg_names[pos_count + keyword_only_count] - annotation = annotations.get(name, _empty) - parameters.append(Parameter(name, annotation=annotation, - kind=_VAR_POSITIONAL)) - - # Keyword-only parameters. - for name in keyword_only: - default = _empty - if kwdefaults is not None: - default = kwdefaults.get(name, _empty) - - annotation = annotations.get(name, _empty) - parameters.append(Parameter(name, annotation=annotation, - kind=_KEYWORD_ONLY, - default=default)) - # **kwargs - if func_code.co_flags & CO_VARKEYWORDS: - index = pos_count + keyword_only_count - if func_code.co_flags & CO_VARARGS: - index += 1 - - name = arg_names[index] - annotation = annotations.get(name, _empty) - parameters.append(Parameter(name, annotation=annotation, - kind=_VAR_KEYWORD)) - - # Is 'func' is a pure Python function - don't validate the - # parameters list (for correct order and defaults), it should be OK. - return cls(parameters, - return_annotation=annotations.get('return', _empty), - __validate_parameters__=is_duck_function) - - -def _descriptor_get(descriptor, obj): - if isclass(descriptor): - return descriptor - get = getattr(type(descriptor), '__get__', _sentinel) - if get is _sentinel: - return descriptor - return get(descriptor, obj, type(obj)) - - -def _signature_from_callable(obj, *, - follow_wrapper_chains=True, - skip_bound_arg=True, - globals=None, - locals=None, - eval_str=False, - sigcls): - - """Private helper function to get signature for arbitrary - callable objects. - """ - - _get_signature_of = functools.partial(_signature_from_callable, - follow_wrapper_chains=follow_wrapper_chains, - skip_bound_arg=skip_bound_arg, - globals=globals, - locals=locals, - sigcls=sigcls, - eval_str=eval_str) - - if not callable(obj): - raise TypeError('{!r} is not a callable object'.format(obj)) - - if isinstance(obj, types.MethodType): - # In this case we skip the first parameter of the underlying - # function (usually `self` or `cls`). - sig = _get_signature_of(obj.__func__) - - if skip_bound_arg: - return _signature_bound_method(sig) - else: - return sig - - # Was this function wrapped by a decorator? - if follow_wrapper_chains: - # Unwrap until we find an explicit signature or a MethodType (which will be - # handled explicitly below). - obj = unwrap(obj, stop=(lambda f: hasattr(f, "__signature__") - or isinstance(f, types.MethodType))) - if isinstance(obj, types.MethodType): - # If the unwrapped object is a *method*, we might want to - # skip its first parameter (self). - # See test_signature_wrapped_bound_method for details. - return _get_signature_of(obj) - - try: - sig = obj.__signature__ - except AttributeError: - pass - else: - if sig is not None: - # since __text_signature__ is not writable on classes, __signature__ - # may contain text (or be a callable that returns text); - # if so, convert it - o_sig = sig - if not isinstance(sig, (Signature, str)) and callable(sig): - sig = sig() - if isinstance(sig, str): - sig = _signature_fromstr(sigcls, obj, sig) - if not isinstance(sig, Signature): - raise TypeError( - 'unexpected object {!r} in __signature__ ' - 'attribute'.format(o_sig)) - return sig - - try: - partialmethod = obj.__partialmethod__ - except AttributeError: - pass - else: - if isinstance(partialmethod, functools.partialmethod): - # Unbound partialmethod (see functools.partialmethod) - # This means, that we need to calculate the signature - # as if it's a regular partial object, but taking into - # account that the first positional argument - # (usually `self`, or `cls`) will not be passed - # automatically (as for boundmethods) - - wrapped_sig = _get_signature_of(partialmethod.func) - - sig = _signature_get_partial(wrapped_sig, partialmethod, (None,)) - first_wrapped_param = tuple(wrapped_sig.parameters.values())[0] - if first_wrapped_param.kind is Parameter.VAR_POSITIONAL: - # First argument of the wrapped callable is `*args`, as in - # `partialmethod(lambda *args)`. - return sig - else: - sig_params = tuple(sig.parameters.values()) - assert (not sig_params or - first_wrapped_param is not sig_params[0]) - new_params = (first_wrapped_param,) + sig_params - return sig.replace(parameters=new_params) - - if isinstance(obj, functools.partial): - wrapped_sig = _get_signature_of(obj.func) - return _signature_get_partial(wrapped_sig, obj) - - if isfunction(obj) or _signature_is_functionlike(obj): - # If it's a pure Python function, or an object that is duck type - # of a Python function (Cython functions, for instance), then: - return _signature_from_function(sigcls, obj, - skip_bound_arg=skip_bound_arg, - globals=globals, locals=locals, eval_str=eval_str) - - if _signature_is_builtin(obj): - return _signature_from_builtin(sigcls, obj, - skip_bound_arg=skip_bound_arg) - - if isinstance(obj, type): - # obj is a class or a metaclass - - # First, let's see if it has an overloaded __call__ defined - # in its metaclass - call = _signature_get_user_defined_method( - type(obj), - '__call__', - follow_wrapper_chains=follow_wrapper_chains, - ) - if call is not None: - return _get_signature_of(call) - - # NOTE: The user-defined method can be a function with a thin wrapper - # around object.__new__ (e.g., generated by `@warnings.deprecated`) - new = _signature_get_user_defined_method( - obj, - '__new__', - follow_wrapper_chains=follow_wrapper_chains, - ) - init = _signature_get_user_defined_method( - obj, - '__init__', - follow_wrapper_chains=follow_wrapper_chains, - ) - - # Go through the MRO and see if any class has user-defined - # pure Python __new__ or __init__ method - for base in obj.__mro__: - # Now we check if the 'obj' class has an own '__new__' method - if new is not None and '__new__' in base.__dict__: - sig = _get_signature_of(new) - if skip_bound_arg: - sig = _signature_bound_method(sig) - return sig - # or an own '__init__' method - elif init is not None and '__init__' in base.__dict__: - return _get_signature_of(init) - - # At this point we know, that `obj` is a class, with no user- - # defined '__init__', '__new__', or class-level '__call__' - - for base in obj.__mro__[:-1]: - # Since '__text_signature__' is implemented as a - # descriptor that extracts text signature from the - # class docstring, if 'obj' is derived from a builtin - # class, its own '__text_signature__' may be 'None'. - # Therefore, we go through the MRO (except the last - # class in there, which is 'object') to find the first - # class with non-empty text signature. - try: - text_sig = base.__text_signature__ - except AttributeError: - pass - else: - if text_sig: - # If 'base' class has a __text_signature__ attribute: - # return a signature based on it - return _signature_fromstr(sigcls, base, text_sig) - - # No '__text_signature__' was found for the 'obj' class. - # Last option is to check if its '__init__' is - # object.__init__ or type.__init__. - if type not in obj.__mro__: - obj_init = obj.__init__ - obj_new = obj.__new__ - if follow_wrapper_chains: - obj_init = unwrap(obj_init) - obj_new = unwrap(obj_new) - # We have a class (not metaclass), but no user-defined - # __init__ or __new__ for it - if obj_init is object.__init__ and obj_new is object.__new__: - # Return a signature of 'object' builtin. - return sigcls.from_callable(object) - else: - raise ValueError( - 'no signature found for builtin type {!r}'.format(obj)) - - else: - # An object with __call__ - call = getattr_static(type(obj), '__call__', None) - if call is not None: - try: - text_sig = obj.__text_signature__ - except AttributeError: - pass - else: - if text_sig: - return _signature_fromstr(sigcls, obj, text_sig) - call = _descriptor_get(call, obj) - return _get_signature_of(call) - - raise ValueError('callable {!r} is not supported by signature'.format(obj)) - - -class _void: - """A private marker - used in Parameter & Signature.""" - - -class _empty: - """Marker object for Signature.empty and Parameter.empty.""" - - -class _ParameterKind(enum.IntEnum): - POSITIONAL_ONLY = 'positional-only' - POSITIONAL_OR_KEYWORD = 'positional or keyword' - VAR_POSITIONAL = 'variadic positional' - KEYWORD_ONLY = 'keyword-only' - VAR_KEYWORD = 'variadic keyword' - - def __new__(cls, description): - value = len(cls.__members__) - member = int.__new__(cls, value) - member._value_ = value - member.description = description - return member - - def __str__(self): - return self.name - -_POSITIONAL_ONLY = _ParameterKind.POSITIONAL_ONLY -_POSITIONAL_OR_KEYWORD = _ParameterKind.POSITIONAL_OR_KEYWORD -_VAR_POSITIONAL = _ParameterKind.VAR_POSITIONAL -_KEYWORD_ONLY = _ParameterKind.KEYWORD_ONLY -_VAR_KEYWORD = _ParameterKind.VAR_KEYWORD - - -class Parameter: - """Represents a parameter in a function signature. - - Has the following public attributes: - - * name : str - The name of the parameter as a string. - * default : object - The default value for the parameter if specified. If the - parameter has no default value, this attribute is set to - `Parameter.empty`. - * annotation - The annotation for the parameter if specified. If the - parameter has no annotation, this attribute is set to - `Parameter.empty`. - * kind - Describes how argument values are bound to the parameter. - Possible values: `Parameter.POSITIONAL_ONLY`, - `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`, - `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`. - Every value has a `description` attribute describing meaning. - """ - - __slots__ = ('_name', '_kind', '_default', '_annotation') - - POSITIONAL_ONLY = _POSITIONAL_ONLY - POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD - VAR_POSITIONAL = _VAR_POSITIONAL - KEYWORD_ONLY = _KEYWORD_ONLY - VAR_KEYWORD = _VAR_KEYWORD - - empty = _empty - - def __init__(self, name, kind, *, default=_empty, annotation=_empty): - try: - self._kind = _ParameterKind(kind) - except ValueError: - raise ValueError(f'value {kind!r} is not a valid Parameter.kind') - if default is not _empty: - if self._kind in (_VAR_POSITIONAL, _VAR_KEYWORD): - msg = '{} parameters cannot have default values' - msg = msg.format(self._kind.description) - raise ValueError(msg) - self._default = default - self._annotation = annotation - - if name is _empty: - raise ValueError('name is a required attribute for Parameter') - - if not isinstance(name, str): - msg = 'name must be a str, not a {}'.format(type(name).__name__) - raise TypeError(msg) - - if name[0] == '.' and name[1:].isdigit(): - # These are implicit arguments generated by comprehensions. In - # order to provide a friendlier interface to users, we recast - # their name as "implicitN" and treat them as positional-only. - # See issue 19611. - if self._kind != _POSITIONAL_OR_KEYWORD: - msg = ( - 'implicit arguments must be passed as ' - 'positional or keyword arguments, not {}' - ) - msg = msg.format(self._kind.description) - raise ValueError(msg) - self._kind = _POSITIONAL_ONLY - name = 'implicit{}'.format(name[1:]) - - # It's possible for C functions to have a positional-only parameter - # where the name is a keyword, so for compatibility we'll allow it. - is_keyword = iskeyword(name) and self._kind is not _POSITIONAL_ONLY - if is_keyword or not name.isidentifier(): - raise ValueError('{!r} is not a valid parameter name'.format(name)) - - self._name = name - - def __reduce__(self): - return (type(self), - (self._name, self._kind), - {'_default': self._default, - '_annotation': self._annotation}) - - def __setstate__(self, state): - self._default = state['_default'] - self._annotation = state['_annotation'] - - @property - def name(self): - return self._name - - @property - def default(self): - return self._default - - @property - def annotation(self): - return self._annotation - - @property - def kind(self): - return self._kind - - def replace(self, *, name=_void, kind=_void, - annotation=_void, default=_void): - """Creates a customized copy of the Parameter.""" - - if name is _void: - name = self._name - - if kind is _void: - kind = self._kind - - if annotation is _void: - annotation = self._annotation - - if default is _void: - default = self._default - - return type(self)(name, kind, default=default, annotation=annotation) - - def __str__(self): - kind = self.kind - formatted = self._name - - # Add annotation and default value - if self._annotation is not _empty: - formatted = '{}: {}'.format(formatted, - formatannotation(self._annotation)) - - if self._default is not _empty: - if self._annotation is not _empty: - formatted = '{} = {}'.format(formatted, repr(self._default)) - else: - formatted = '{}={}'.format(formatted, repr(self._default)) - - if kind == _VAR_POSITIONAL: - formatted = '*' + formatted - elif kind == _VAR_KEYWORD: - formatted = '**' + formatted - - return formatted - - __replace__ = replace - - def __repr__(self): - return '<{} "{}">'.format(self.__class__.__name__, self) - - def __hash__(self): - return hash((self._name, self._kind, self._annotation, self._default)) - - def __eq__(self, other): - if self is other: - return True - if not isinstance(other, Parameter): - return NotImplemented - return (self._name == other._name and - self._kind == other._kind and - self._default == other._default and - self._annotation == other._annotation) - - -class BoundArguments: - """Result of `Signature.bind` call. Holds the mapping of arguments - to the function's parameters. - - Has the following public attributes: - - * arguments : dict - An ordered mutable mapping of parameters' names to arguments' values. - Does not contain arguments' default values. - * signature : Signature - The Signature object that created this instance. - * args : tuple - Tuple of positional arguments values. - * kwargs : dict - Dict of keyword arguments values. - """ - - __slots__ = ('arguments', '_signature', '__weakref__') - - def __init__(self, signature, arguments): - self.arguments = arguments - self._signature = signature - - @property - def signature(self): - return self._signature - - @property - def args(self): - args = [] - for param_name, param in self._signature.parameters.items(): - if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): - break - - try: - arg = self.arguments[param_name] - except KeyError: - # We're done here. Other arguments - # will be mapped in 'BoundArguments.kwargs' - break - else: - if param.kind == _VAR_POSITIONAL: - # *args - args.extend(arg) - else: - # plain argument - args.append(arg) - - return tuple(args) - - @property - def kwargs(self): - kwargs = {} - kwargs_started = False - for param_name, param in self._signature.parameters.items(): - if not kwargs_started: - if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): - kwargs_started = True - else: - if param_name not in self.arguments: - kwargs_started = True - continue - - if not kwargs_started: - continue - - try: - arg = self.arguments[param_name] - except KeyError: - pass - else: - if param.kind == _VAR_KEYWORD: - # **kwargs - kwargs.update(arg) - else: - # plain keyword argument - kwargs[param_name] = arg - - return kwargs - - def apply_defaults(self): - """Set default values for missing arguments. - - For variable-positional arguments (*args) the default is an - empty tuple. - - For variable-keyword arguments (**kwargs) the default is an - empty dict. - """ - arguments = self.arguments - new_arguments = [] - for name, param in self._signature.parameters.items(): - try: - new_arguments.append((name, arguments[name])) - except KeyError: - if param.default is not _empty: - val = param.default - elif param.kind is _VAR_POSITIONAL: - val = () - elif param.kind is _VAR_KEYWORD: - val = {} - else: - # This BoundArguments was likely produced by - # Signature.bind_partial(). - continue - new_arguments.append((name, val)) - self.arguments = dict(new_arguments) - - def __eq__(self, other): - if self is other: - return True - if not isinstance(other, BoundArguments): - return NotImplemented - return (self.signature == other.signature and - self.arguments == other.arguments) - - def __setstate__(self, state): - self._signature = state['_signature'] - self.arguments = state['arguments'] - - def __getstate__(self): - return {'_signature': self._signature, 'arguments': self.arguments} - - def __repr__(self): - args = [] - for arg, value in self.arguments.items(): - args.append('{}={!r}'.format(arg, value)) - return '<{} ({})>'.format(self.__class__.__name__, ', '.join(args)) - - -class Signature: - """A Signature object represents the overall signature of a function. - It stores a Parameter object for each parameter accepted by the - function, as well as information specific to the function itself. - - A Signature object has the following public attributes and methods: - - * parameters : OrderedDict - An ordered mapping of parameters' names to the corresponding - Parameter objects (keyword-only arguments are in the same order - as listed in `code.co_varnames`). - * return_annotation : object - The annotation for the return type of the function if specified. - If the function has no annotation for its return type, this - attribute is set to `Signature.empty`. - * bind(*args, **kwargs) -> BoundArguments - Creates a mapping from positional and keyword arguments to - parameters. - * bind_partial(*args, **kwargs) -> BoundArguments - Creates a partial mapping from positional and keyword arguments - to parameters (simulating 'functools.partial' behavior.) - """ - - __slots__ = ('_return_annotation', '_parameters') - - _parameter_cls = Parameter - _bound_arguments_cls = BoundArguments - - empty = _empty - - def __init__(self, parameters=None, *, return_annotation=_empty, - __validate_parameters__=True): - """Constructs Signature from the given list of Parameter - objects and 'return_annotation'. All arguments are optional. - """ - - if parameters is None: - params = OrderedDict() - else: - if __validate_parameters__: - params = OrderedDict() - top_kind = _POSITIONAL_ONLY - seen_default = False - - for param in parameters: - kind = param.kind - name = param.name - - if kind < top_kind: - msg = ( - 'wrong parameter order: {} parameter before {} ' - 'parameter' - ) - msg = msg.format(top_kind.description, - kind.description) - raise ValueError(msg) - elif kind > top_kind: - top_kind = kind - - if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD): - if param.default is _empty: - if seen_default: - # No default for this parameter, but the - # previous parameter of had a default - msg = 'non-default argument follows default ' \ - 'argument' - raise ValueError(msg) - else: - # There is a default for this parameter. - seen_default = True - - if name in params: - msg = 'duplicate parameter name: {!r}'.format(name) - raise ValueError(msg) - - params[name] = param - else: - params = OrderedDict((param.name, param) for param in parameters) - - self._parameters = types.MappingProxyType(params) - self._return_annotation = return_annotation - - @classmethod - def from_callable(cls, obj, *, - follow_wrapped=True, globals=None, locals=None, eval_str=False): - """Constructs Signature for the given callable object.""" - return _signature_from_callable(obj, sigcls=cls, - follow_wrapper_chains=follow_wrapped, - globals=globals, locals=locals, eval_str=eval_str) - - @property - def parameters(self): - return self._parameters - - @property - def return_annotation(self): - return self._return_annotation - - def replace(self, *, parameters=_void, return_annotation=_void): - """Creates a customized copy of the Signature. - Pass 'parameters' and/or 'return_annotation' arguments - to override them in the new copy. - """ - - if parameters is _void: - parameters = self.parameters.values() - - if return_annotation is _void: - return_annotation = self._return_annotation - - return type(self)(parameters, - return_annotation=return_annotation) - - __replace__ = replace - - def _hash_basis(self): - params = tuple(param for param in self.parameters.values() - if param.kind != _KEYWORD_ONLY) - - kwo_params = {param.name: param for param in self.parameters.values() - if param.kind == _KEYWORD_ONLY} - - return params, kwo_params, self.return_annotation - - def __hash__(self): - params, kwo_params, return_annotation = self._hash_basis() - kwo_params = frozenset(kwo_params.values()) - return hash((params, kwo_params, return_annotation)) - - def __eq__(self, other): - if self is other: - return True - if not isinstance(other, Signature): - return NotImplemented - return self._hash_basis() == other._hash_basis() - - def _bind(self, args, kwargs, *, partial=False): - """Private method. Don't use directly.""" - - arguments = {} - - parameters = iter(self.parameters.values()) - parameters_ex = () - arg_vals = iter(args) - - pos_only_param_in_kwargs = [] - - while True: - # Let's iterate through the positional arguments and corresponding - # parameters - try: - arg_val = next(arg_vals) - except StopIteration: - # No more positional arguments - try: - param = next(parameters) - except StopIteration: - # No more parameters. That's it. Just need to check that - # we have no `kwargs` after this while loop - break - else: - if param.kind == _VAR_POSITIONAL: - # That's OK, just empty *args. Let's start parsing - # kwargs - break - elif param.name in kwargs: - if param.kind == _POSITIONAL_ONLY: - if param.default is _empty: - msg = f'missing a required positional-only argument: {param.name!r}' - raise TypeError(msg) - # Raise a TypeError once we are sure there is no - # **kwargs param later. - pos_only_param_in_kwargs.append(param) - continue - parameters_ex = (param,) - break - elif (param.kind == _VAR_KEYWORD or - param.default is not _empty): - # That's fine too - we have a default value for this - # parameter. So, lets start parsing `kwargs`, starting - # with the current parameter - parameters_ex = (param,) - break - else: - # No default, not VAR_KEYWORD, not VAR_POSITIONAL, - # not in `kwargs` - if partial: - parameters_ex = (param,) - break - else: - if param.kind == _KEYWORD_ONLY: - argtype = ' keyword-only' - else: - argtype = '' - msg = 'missing a required{argtype} argument: {arg!r}' - msg = msg.format(arg=param.name, argtype=argtype) - raise TypeError(msg) from None - else: - # We have a positional argument to process - try: - param = next(parameters) - except StopIteration: - raise TypeError('too many positional arguments') from None - else: - if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): - # Looks like we have no parameter for this positional - # argument - raise TypeError( - 'too many positional arguments') from None - - if param.kind == _VAR_POSITIONAL: - # We have an '*args'-like argument, let's fill it with - # all positional arguments we have left and move on to - # the next phase - values = [arg_val] - values.extend(arg_vals) - arguments[param.name] = tuple(values) - break - - if param.name in kwargs and param.kind != _POSITIONAL_ONLY: - raise TypeError( - 'multiple values for argument {arg!r}'.format( - arg=param.name)) from None - - arguments[param.name] = arg_val - - # Now, we iterate through the remaining parameters to process - # keyword arguments - kwargs_param = None - for param in itertools.chain(parameters_ex, parameters): - if param.kind == _VAR_KEYWORD: - # Memorize that we have a '**kwargs'-like parameter - kwargs_param = param - continue - - if param.kind == _VAR_POSITIONAL: - # Named arguments don't refer to '*args'-like parameters. - # We only arrive here if the positional arguments ended - # before reaching the last parameter before *args. - continue - - param_name = param.name - try: - arg_val = kwargs.pop(param_name) - except KeyError: - # We have no value for this parameter. It's fine though, - # if it has a default value, or it is an '*args'-like - # parameter, left alone by the processing of positional - # arguments. - if (not partial and param.kind != _VAR_POSITIONAL and - param.default is _empty): - raise TypeError('missing a required argument: {arg!r}'. \ - format(arg=param_name)) from None - - else: - arguments[param_name] = arg_val - - if kwargs: - if kwargs_param is not None: - # Process our '**kwargs'-like parameter - arguments[kwargs_param.name] = kwargs - elif pos_only_param_in_kwargs: - raise TypeError( - 'got some positional-only arguments passed as ' - 'keyword arguments: {arg!r}'.format( - arg=', '.join( - param.name - for param in pos_only_param_in_kwargs - ), - ), - ) - else: - raise TypeError( - 'got an unexpected keyword argument {arg!r}'.format( - arg=next(iter(kwargs)))) - - return self._bound_arguments_cls(self, arguments) - - def bind(self, /, *args, **kwargs): - """Get a BoundArguments object, that maps the passed `args` - and `kwargs` to the function's signature. Raises `TypeError` - if the passed arguments can not be bound. - """ - return self._bind(args, kwargs) - - def bind_partial(self, /, *args, **kwargs): - """Get a BoundArguments object, that partially maps the - passed `args` and `kwargs` to the function's signature. - Raises `TypeError` if the passed arguments can not be bound. - """ - return self._bind(args, kwargs, partial=True) - - def __reduce__(self): - return (type(self), - (tuple(self._parameters.values()),), - {'_return_annotation': self._return_annotation}) - - def __setstate__(self, state): - self._return_annotation = state['_return_annotation'] - - def __repr__(self): - return '<{} {}>'.format(self.__class__.__name__, self) - - def __str__(self): - return self.format() - - def format(self, *, max_width=None): - """Create a string representation of the Signature object. - - If *max_width* integer is passed, - signature will try to fit into the *max_width*. - If signature is longer than *max_width*, - all parameters will be on separate lines. - """ - result = [] - render_pos_only_separator = False - render_kw_only_separator = True - for param in self.parameters.values(): - formatted = str(param) - - kind = param.kind - - if kind == _POSITIONAL_ONLY: - render_pos_only_separator = True - elif render_pos_only_separator: - # It's not a positional-only parameter, and the flag - # is set to 'True' (there were pos-only params before.) - result.append('/') - render_pos_only_separator = False - - if kind == _VAR_POSITIONAL: - # OK, we have an '*args'-like parameter, so we won't need - # a '*' to separate keyword-only arguments - render_kw_only_separator = False - elif kind == _KEYWORD_ONLY and render_kw_only_separator: - # We have a keyword-only parameter to render and we haven't - # rendered an '*args'-like parameter before, so add a '*' - # separator to the parameters list ("foo(arg1, *, arg2)" case) - result.append('*') - # This condition should be only triggered once, so - # reset the flag - render_kw_only_separator = False - - result.append(formatted) - - if render_pos_only_separator: - # There were only positional-only parameters, hence the - # flag was not reset to 'False' - result.append('/') - - rendered = '({})'.format(', '.join(result)) - if max_width is not None and len(rendered) > max_width: - rendered = '(\n {}\n)'.format(',\n '.join(result)) - - if self.return_annotation is not _empty: - anno = formatannotation(self.return_annotation) - rendered += ' -> {}'.format(anno) - - return rendered - - -def signature(obj, *, follow_wrapped=True, globals=None, locals=None, eval_str=False): - """Get a signature object for the passed callable.""" - return Signature.from_callable(obj, follow_wrapped=follow_wrapped, - globals=globals, locals=locals, eval_str=eval_str) - - -class BufferFlags(enum.IntFlag): - SIMPLE = 0x0 - WRITABLE = 0x1 - FORMAT = 0x4 - ND = 0x8 - STRIDES = 0x10 | ND - C_CONTIGUOUS = 0x20 | STRIDES - F_CONTIGUOUS = 0x40 | STRIDES - ANY_CONTIGUOUS = 0x80 | STRIDES - INDIRECT = 0x100 | STRIDES - CONTIG = ND | WRITABLE - CONTIG_RO = ND - STRIDED = STRIDES | WRITABLE - STRIDED_RO = STRIDES - RECORDS = STRIDES | WRITABLE | FORMAT - RECORDS_RO = STRIDES | FORMAT - FULL = INDIRECT | WRITABLE | FORMAT - FULL_RO = INDIRECT | FORMAT - READ = 0x100 - WRITE = 0x200 - - -def _main(): - """ Logic for inspecting an object given at command line """ - import argparse - import importlib - - parser = argparse.ArgumentParser() - parser.add_argument( - 'object', - help="The object to be analysed. " - "It supports the 'module:qualname' syntax") - parser.add_argument( - '-d', '--details', action='store_true', - help='Display info about the module rather than its source code') - - args = parser.parse_args() - - target = args.object - mod_name, has_attrs, attrs = target.partition(":") - try: - obj = module = importlib.import_module(mod_name) - except Exception as exc: - msg = "Failed to import {} ({}: {})".format(mod_name, - type(exc).__name__, - exc) - print(msg, file=sys.stderr) - sys.exit(2) - - if has_attrs: - parts = attrs.split(".") - obj = module - for part in parts: - obj = getattr(obj, part) - - if module.__name__ in sys.builtin_module_names: - print("Can't get info for builtin modules.", file=sys.stderr) - sys.exit(1) - - if args.details: - print('Target: {}'.format(target)) - print('Origin: {}'.format(getsourcefile(module))) - print('Cached: {}'.format(module.__cached__)) - if obj is module: - print('Loader: {}'.format(repr(module.__loader__))) - if hasattr(module, '__path__'): - print('Submodule search path: {}'.format(module.__path__)) - else: - try: - __, lineno = findsource(obj) - except Exception: - pass - else: - print('Line: {}'.format(lineno)) - - print('\n') - else: - print(getsource(obj)) - - -if __name__ == "__main__": - _main() diff --git a/Python313_13_x86_Template/Lib/io.py b/Python313_13_x86_Template/Lib/io.py deleted file mode 100644 index f0e2fa15..00000000 --- a/Python313_13_x86_Template/Lib/io.py +++ /dev/null @@ -1,99 +0,0 @@ -"""The io module provides the Python interfaces to stream handling. The -builtin open function is defined in this module. - -At the top of the I/O hierarchy is the abstract base class IOBase. It -defines the basic interface to a stream. Note, however, that there is no -separation between reading and writing to streams; implementations are -allowed to raise an OSError if they do not support a given operation. - -Extending IOBase is RawIOBase which deals simply with the reading and -writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide -an interface to OS files. - -BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its -subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer -streams that are readable, writable, and both respectively. -BufferedRandom provides a buffered interface to random access -streams. BytesIO is a simple stream of in-memory bytes. - -Another IOBase subclass, TextIOBase, deals with the encoding and decoding -of streams into text. TextIOWrapper, which extends it, is a buffered text -interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO -is an in-memory stream for text. - -Argument names are not part of the specification, and only the arguments -of open() are intended to be used as keyword arguments. - -data: - -DEFAULT_BUFFER_SIZE - - An int containing the default buffer size used by the module's buffered - I/O classes. open() uses the file's blksize (as obtained by os.stat) if - possible. -""" -# New I/O library conforming to PEP 3116. - -__author__ = ("Guido van Rossum , " - "Mike Verdone , " - "Mark Russell , " - "Antoine Pitrou , " - "Amaury Forgeot d'Arc , " - "Benjamin Peterson ") - -__all__ = ["BlockingIOError", "open", "open_code", "IOBase", "RawIOBase", - "FileIO", "BytesIO", "StringIO", "BufferedIOBase", - "BufferedReader", "BufferedWriter", "BufferedRWPair", - "BufferedRandom", "TextIOBase", "TextIOWrapper", - "UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END", - "DEFAULT_BUFFER_SIZE", "text_encoding", "IncrementalNewlineDecoder"] - - -import _io -import abc - -from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation, - open, open_code, FileIO, BytesIO, StringIO, BufferedReader, - BufferedWriter, BufferedRWPair, BufferedRandom, - IncrementalNewlineDecoder, text_encoding, TextIOWrapper) - - -# Pretend this exception was created here. -UnsupportedOperation.__module__ = "io" - -# for seek() -SEEK_SET = 0 -SEEK_CUR = 1 -SEEK_END = 2 - -# Declaring ABCs in C is tricky so we do it here. -# Method descriptions and default implementations are inherited from the C -# version however. -class IOBase(_io._IOBase, metaclass=abc.ABCMeta): - __doc__ = _io._IOBase.__doc__ - -class RawIOBase(_io._RawIOBase, IOBase): - __doc__ = _io._RawIOBase.__doc__ - -class BufferedIOBase(_io._BufferedIOBase, IOBase): - __doc__ = _io._BufferedIOBase.__doc__ - -class TextIOBase(_io._TextIOBase, IOBase): - __doc__ = _io._TextIOBase.__doc__ - -RawIOBase.register(FileIO) - -for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom, - BufferedRWPair): - BufferedIOBase.register(klass) - -for klass in (StringIO, TextIOWrapper): - TextIOBase.register(klass) -del klass - -try: - from _io import _WindowsConsoleIO -except ImportError: - pass -else: - RawIOBase.register(_WindowsConsoleIO) diff --git a/Python313_13_x86_Template/Lib/ipaddress.py b/Python313_13_x86_Template/Lib/ipaddress.py deleted file mode 100644 index 4235ed87..00000000 --- a/Python313_13_x86_Template/Lib/ipaddress.py +++ /dev/null @@ -1,2440 +0,0 @@ -# Copyright 2007 Google Inc. -# Licensed to PSF under a Contributor Agreement. - -"""A fast, lightweight IPv4/IPv6 manipulation library in Python. - -This library is used to create/poke/manipulate IPv4 and IPv6 addresses -and networks. - -""" - -__version__ = '1.0' - - -import functools - -IPV4LENGTH = 32 -IPV6LENGTH = 128 - - -class AddressValueError(ValueError): - """A Value Error related to the address.""" - - -class NetmaskValueError(ValueError): - """A Value Error related to the netmask.""" - - -def ip_address(address): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP address. Either IPv4 or - IPv6 addresses may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Address or IPv6Address object. - - Raises: - ValueError: if the *address* passed isn't either a v4 or a v6 - address - - """ - try: - return IPv4Address(address) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Address(address) - except (AddressValueError, NetmaskValueError): - pass - - raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 address') - - -def ip_network(address, strict=True): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP network. Either IPv4 or - IPv6 networks may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Network or IPv6Network object. - - Raises: - ValueError: if the string passed isn't either a v4 or a v6 - address. Or if the network has host bits set. - - """ - try: - return IPv4Network(address, strict) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Network(address, strict) - except (AddressValueError, NetmaskValueError): - pass - - raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 network') - - -def ip_interface(address): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP address. Either IPv4 or - IPv6 addresses may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Interface or IPv6Interface object. - - Raises: - ValueError: if the string passed isn't either a v4 or a v6 - address. - - Notes: - The IPv?Interface classes describe an Address on a particular - Network, so they're basically a combination of both the Address - and Network classes. - - """ - try: - return IPv4Interface(address) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Interface(address) - except (AddressValueError, NetmaskValueError): - pass - - raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 interface') - - -def v4_int_to_packed(address): - """Represent an address as 4 packed bytes in network (big-endian) order. - - Args: - address: An integer representation of an IPv4 IP address. - - Returns: - The integer address packed as 4 bytes in network (big-endian) order. - - Raises: - ValueError: If the integer is negative or too large to be an - IPv4 IP address. - - """ - try: - return address.to_bytes(4) # big endian - except OverflowError: - raise ValueError("Address negative or too large for IPv4") - - -def v6_int_to_packed(address): - """Represent an address as 16 packed bytes in network (big-endian) order. - - Args: - address: An integer representation of an IPv6 IP address. - - Returns: - The integer address packed as 16 bytes in network (big-endian) order. - - """ - try: - return address.to_bytes(16) # big endian - except OverflowError: - raise ValueError("Address negative or too large for IPv6") - - -def _split_optional_netmask(address): - """Helper to split the netmask and raise AddressValueError if needed""" - addr = str(address).split('/') - if len(addr) > 2: - raise AddressValueError(f"Only one '/' permitted in {address!r}") - return addr - - -def _find_address_range(addresses): - """Find a sequence of sorted deduplicated IPv#Address. - - Args: - addresses: a list of IPv#Address objects. - - Yields: - A tuple containing the first and last IP addresses in the sequence. - - """ - it = iter(addresses) - first = last = next(it) - for ip in it: - if ip._ip != last._ip + 1: - yield first, last - first = ip - last = ip - yield first, last - - -def _count_righthand_zero_bits(number, bits): - """Count the number of zero bits on the right hand side. - - Args: - number: an integer. - bits: maximum number of bits to count. - - Returns: - The number of zero bits on the right hand side of the number. - - """ - if number == 0: - return bits - return min(bits, (~number & (number-1)).bit_length()) - - -def summarize_address_range(first, last): - """Summarize a network range given the first and last IP addresses. - - Example: - >>> list(summarize_address_range(IPv4Address('192.0.2.0'), - ... IPv4Address('192.0.2.130'))) - ... #doctest: +NORMALIZE_WHITESPACE - [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), - IPv4Network('192.0.2.130/32')] - - Args: - first: the first IPv4Address or IPv6Address in the range. - last: the last IPv4Address or IPv6Address in the range. - - Returns: - An iterator of the summarized IPv(4|6) network objects. - - Raise: - TypeError: - If the first and last objects are not IP addresses. - If the first and last objects are not the same version. - ValueError: - If the last object is not greater than the first. - If the version of the first address is not 4 or 6. - - """ - if (not (isinstance(first, _BaseAddress) and - isinstance(last, _BaseAddress))): - raise TypeError('first and last must be IP addresses, not networks') - if first.version != last.version: - raise TypeError("%s and %s are not of the same version" % ( - first, last)) - if first > last: - raise ValueError('last IP address must be greater than first') - - if first.version == 4: - ip = IPv4Network - elif first.version == 6: - ip = IPv6Network - else: - raise ValueError('unknown IP version') - - ip_bits = first._max_prefixlen - first_int = first._ip - last_int = last._ip - while first_int <= last_int: - nbits = min(_count_righthand_zero_bits(first_int, ip_bits), - (last_int - first_int + 1).bit_length() - 1) - net = ip((first_int, ip_bits - nbits)) - yield net - first_int += 1 << nbits - if first_int - 1 == ip._ALL_ONES: - break - - -def _collapse_addresses_internal(addresses): - """Loops through the addresses, collapsing concurrent netblocks. - - Example: - - ip1 = IPv4Network('192.0.2.0/26') - ip2 = IPv4Network('192.0.2.64/26') - ip3 = IPv4Network('192.0.2.128/26') - ip4 = IPv4Network('192.0.2.192/26') - - _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> - [IPv4Network('192.0.2.0/24')] - - This shouldn't be called directly; it is called via - collapse_addresses([]). - - Args: - addresses: A list of IPv4Network's or IPv6Network's - - Returns: - A list of IPv4Network's or IPv6Network's depending on what we were - passed. - - """ - # First merge - to_merge = list(addresses) - subnets = {} - while to_merge: - net = to_merge.pop() - supernet = net.supernet() - existing = subnets.get(supernet) - if existing is None: - subnets[supernet] = net - elif existing != net: - # Merge consecutive subnets - del subnets[supernet] - to_merge.append(supernet) - # Then iterate over resulting networks, skipping subsumed subnets - last = None - for net in sorted(subnets.values()): - if last is not None: - # Since they are sorted, last.network_address <= net.network_address - # is a given. - if last.broadcast_address >= net.broadcast_address: - continue - yield net - last = net - - -def collapse_addresses(addresses): - """Collapse a list of IP objects. - - Example: - collapse_addresses([IPv4Network('192.0.2.0/25'), - IPv4Network('192.0.2.128/25')]) -> - [IPv4Network('192.0.2.0/24')] - - Args: - addresses: An iterable of IPv4Network or IPv6Network objects. - - Returns: - An iterator of the collapsed IPv(4|6)Network objects. - - Raises: - TypeError: If passed a list of mixed version objects. - - """ - addrs = [] - ips = [] - nets = [] - - # split IP addresses and networks - for ip in addresses: - if isinstance(ip, _BaseAddress): - if ips and ips[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - ip, ips[-1])) - ips.append(ip) - elif ip._prefixlen == ip._max_prefixlen: - if ips and ips[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - ip, ips[-1])) - try: - ips.append(ip.ip) - except AttributeError: - ips.append(ip.network_address) - else: - if nets and nets[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - ip, nets[-1])) - nets.append(ip) - - # sort and dedup - ips = sorted(set(ips)) - - # find consecutive address ranges in the sorted sequence and summarize them - if ips: - for first, last in _find_address_range(ips): - addrs.extend(summarize_address_range(first, last)) - - return _collapse_addresses_internal(addrs + nets) - - -def get_mixed_type_key(obj): - """Return a key suitable for sorting between networks and addresses. - - Address and Network objects are not sortable by default; they're - fundamentally different so the expression - - IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') - - doesn't make any sense. There are some times however, where you may wish - to have ipaddress sort these for you anyway. If you need to do this, you - can use this function as the key= argument to sorted(). - - Args: - obj: either a Network or Address object. - Returns: - appropriate key. - - """ - if isinstance(obj, _BaseNetwork): - return obj._get_networks_key() - elif isinstance(obj, _BaseAddress): - return obj._get_address_key() - return NotImplemented - - -class _IPAddressBase: - - """The mother class.""" - - __slots__ = () - - @property - def exploded(self): - """Return the longhand version of the IP address as a string.""" - return self._explode_shorthand_ip_string() - - @property - def compressed(self): - """Return the shorthand version of the IP address as a string.""" - return str(self) - - @property - def reverse_pointer(self): - """The name of the reverse DNS pointer for the IP address, e.g.: - >>> ipaddress.ip_address("127.0.0.1").reverse_pointer - '1.0.0.127.in-addr.arpa' - >>> ipaddress.ip_address("2001:db8::1").reverse_pointer - '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' - - """ - return self._reverse_pointer() - - @property - def version(self): - msg = '%200s has no version specified' % (type(self),) - raise NotImplementedError(msg) - - def _check_int_address(self, address): - if address < 0: - msg = "%d (< 0) is not permitted as an IPv%d address" - raise AddressValueError(msg % (address, self._version)) - if address > self._ALL_ONES: - msg = "%d (>= 2**%d) is not permitted as an IPv%d address" - raise AddressValueError(msg % (address, self._max_prefixlen, - self._version)) - - def _check_packed_address(self, address, expected_len): - address_len = len(address) - if address_len != expected_len: - msg = "%r (len %d != %d) is not permitted as an IPv%d address" - raise AddressValueError(msg % (address, address_len, - expected_len, self._version)) - - @classmethod - def _ip_int_from_prefix(cls, prefixlen): - """Turn the prefix length into a bitwise netmask - - Args: - prefixlen: An integer, the prefix length. - - Returns: - An integer. - - """ - return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen) - - @classmethod - def _prefix_from_ip_int(cls, ip_int): - """Return prefix length from the bitwise netmask. - - Args: - ip_int: An integer, the netmask in expanded bitwise format - - Returns: - An integer, the prefix length. - - Raises: - ValueError: If the input intermingles zeroes & ones - """ - trailing_zeroes = _count_righthand_zero_bits(ip_int, - cls._max_prefixlen) - prefixlen = cls._max_prefixlen - trailing_zeroes - leading_ones = ip_int >> trailing_zeroes - all_ones = (1 << prefixlen) - 1 - if leading_ones != all_ones: - byteslen = cls._max_prefixlen // 8 - details = ip_int.to_bytes(byteslen, 'big') - msg = 'Netmask pattern %r mixes zeroes & ones' - raise ValueError(msg % details) - return prefixlen - - @classmethod - def _report_invalid_netmask(cls, netmask_str): - msg = '%r is not a valid netmask' % netmask_str - raise NetmaskValueError(msg) from None - - @classmethod - def _prefix_from_prefix_string(cls, prefixlen_str): - """Return prefix length from a numeric string - - Args: - prefixlen_str: The string to be converted - - Returns: - An integer, the prefix length. - - Raises: - NetmaskValueError: If the input is not a valid netmask - """ - # int allows a leading +/- as well as surrounding whitespace, - # so we ensure that isn't the case - if not (prefixlen_str.isascii() and prefixlen_str.isdigit()): - cls._report_invalid_netmask(prefixlen_str) - try: - prefixlen = int(prefixlen_str) - except ValueError: - cls._report_invalid_netmask(prefixlen_str) - if not (0 <= prefixlen <= cls._max_prefixlen): - cls._report_invalid_netmask(prefixlen_str) - return prefixlen - - @classmethod - def _prefix_from_ip_string(cls, ip_str): - """Turn a netmask/hostmask string into a prefix length - - Args: - ip_str: The netmask/hostmask to be converted - - Returns: - An integer, the prefix length. - - Raises: - NetmaskValueError: If the input is not a valid netmask/hostmask - """ - # Parse the netmask/hostmask like an IP address. - try: - ip_int = cls._ip_int_from_string(ip_str) - except AddressValueError: - cls._report_invalid_netmask(ip_str) - - # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). - # Note that the two ambiguous cases (all-ones and all-zeroes) are - # treated as netmasks. - try: - return cls._prefix_from_ip_int(ip_int) - except ValueError: - pass - - # Invert the bits, and try matching a /0+1+/ hostmask instead. - ip_int ^= cls._ALL_ONES - try: - return cls._prefix_from_ip_int(ip_int) - except ValueError: - cls._report_invalid_netmask(ip_str) - - @classmethod - def _split_addr_prefix(cls, address): - """Helper function to parse address of Network/Interface. - - Arg: - address: Argument of Network/Interface. - - Returns: - (addr, prefix) tuple. - """ - # a packed address or integer - if isinstance(address, (bytes, int)): - return address, cls._max_prefixlen - - if not isinstance(address, tuple): - # Assume input argument to be string or any object representation - # which converts into a formatted IP prefix string. - address = _split_optional_netmask(address) - - # Constructing from a tuple (addr, [mask]) - if len(address) > 1: - return address - return address[0], cls._max_prefixlen - - def __reduce__(self): - return self.__class__, (str(self),) - - -_address_fmt_re = None - -@functools.total_ordering -class _BaseAddress(_IPAddressBase): - - """A generic IP object. - - This IP class contains the version independent methods which are - used by single IP addresses. - """ - - __slots__ = () - - def __int__(self): - return self._ip - - def __eq__(self, other): - try: - return (self._ip == other._ip - and self._version == other._version) - except AttributeError: - return NotImplemented - - def __lt__(self, other): - if not isinstance(other, _BaseAddress): - return NotImplemented - if self._version != other._version: - raise TypeError('%s and %s are not of the same version' % ( - self, other)) - if self._ip != other._ip: - return self._ip < other._ip - return False - - # Shorthand for Integer addition and subtraction. This is not - # meant to ever support addition/subtraction of addresses. - def __add__(self, other): - if not isinstance(other, int): - return NotImplemented - return self.__class__(int(self) + other) - - def __sub__(self, other): - if not isinstance(other, int): - return NotImplemented - return self.__class__(int(self) - other) - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, str(self)) - - def __str__(self): - return str(self._string_from_ip_int(self._ip)) - - def __hash__(self): - return hash(hex(int(self._ip))) - - def _get_address_key(self): - return (self._version, self) - - def __reduce__(self): - return self.__class__, (self._ip,) - - def __format__(self, fmt): - """Returns an IP address as a formatted string. - - Supported presentation types are: - 's': returns the IP address as a string (default) - 'b': converts to binary and returns a zero-padded string - 'X' or 'x': converts to upper- or lower-case hex and returns a zero-padded string - 'n': the same as 'b' for IPv4 and 'x' for IPv6 - - For binary and hex presentation types, the alternate form specifier - '#' and the grouping option '_' are supported. - """ - - # Support string formatting - if not fmt or fmt[-1] == 's': - return format(str(self), fmt) - - # From here on down, support for 'bnXx' - global _address_fmt_re - if _address_fmt_re is None: - import re - _address_fmt_re = re.compile('(#?)(_?)([xbnX])') - - m = _address_fmt_re.fullmatch(fmt) - if not m: - return super().__format__(fmt) - - alternate, grouping, fmt_base = m.groups() - - # Set some defaults - if fmt_base == 'n': - if self._version == 4: - fmt_base = 'b' # Binary is default for ipv4 - else: - fmt_base = 'x' # Hex is default for ipv6 - - if fmt_base == 'b': - padlen = self._max_prefixlen - else: - padlen = self._max_prefixlen // 4 - - if grouping: - padlen += padlen // 4 - 1 - - if alternate: - padlen += 2 # 0b or 0x - - return format(int(self), f'{alternate}0{padlen}{grouping}{fmt_base}') - - -@functools.total_ordering -class _BaseNetwork(_IPAddressBase): - """A generic IP network object. - - This IP class contains the version independent methods which are - used by networks. - """ - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, str(self)) - - def __str__(self): - return '%s/%d' % (self.network_address, self.prefixlen) - - def hosts(self): - """Generate Iterator over usable hosts in a network. - - This is like __iter__ except it doesn't return the network - or broadcast addresses. - - """ - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in range(network + 1, broadcast): - yield self._address_class(x) - - def __iter__(self): - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in range(network, broadcast + 1): - yield self._address_class(x) - - def __getitem__(self, n): - network = int(self.network_address) - broadcast = int(self.broadcast_address) - if n >= 0: - if network + n > broadcast: - raise IndexError('address out of range') - return self._address_class(network + n) - else: - n += 1 - if broadcast + n < network: - raise IndexError('address out of range') - return self._address_class(broadcast + n) - - def __lt__(self, other): - if not isinstance(other, _BaseNetwork): - return NotImplemented - if self._version != other._version: - raise TypeError('%s and %s are not of the same version' % ( - self, other)) - if self.network_address != other.network_address: - return self.network_address < other.network_address - if self.netmask != other.netmask: - return self.netmask < other.netmask - return False - - def __eq__(self, other): - try: - return (self._version == other._version and - self.network_address == other.network_address and - int(self.netmask) == int(other.netmask)) - except AttributeError: - return NotImplemented - - def __hash__(self): - return hash((int(self.network_address), int(self.netmask))) - - def __contains__(self, other): - # always false if one is v4 and the other is v6. - if self._version != other._version: - return False - # dealing with another network. - if isinstance(other, _BaseNetwork): - return False - # dealing with another address - else: - # address - return other._ip & self.netmask._ip == self.network_address._ip - - def overlaps(self, other): - """Tell if self is partly contained in other.""" - return self.network_address in other or ( - self.broadcast_address in other or ( - other.network_address in self or ( - other.broadcast_address in self))) - - @functools.cached_property - def broadcast_address(self): - return self._address_class(int(self.network_address) | - int(self.hostmask)) - - @functools.cached_property - def hostmask(self): - return self._address_class(int(self.netmask) ^ self._ALL_ONES) - - @property - def with_prefixlen(self): - return '%s/%d' % (self.network_address, self._prefixlen) - - @property - def with_netmask(self): - return '%s/%s' % (self.network_address, self.netmask) - - @property - def with_hostmask(self): - return '%s/%s' % (self.network_address, self.hostmask) - - @property - def num_addresses(self): - """Number of hosts in the current subnet.""" - return int(self.broadcast_address) - int(self.network_address) + 1 - - @property - def _address_class(self): - # Returning bare address objects (rather than interfaces) allows for - # more consistent behaviour across the network address, broadcast - # address and individual host addresses. - msg = '%200s has no associated address class' % (type(self),) - raise NotImplementedError(msg) - - @property - def prefixlen(self): - return self._prefixlen - - def address_exclude(self, other): - """Remove an address from a larger block. - - For example: - - addr1 = ip_network('192.0.2.0/28') - addr2 = ip_network('192.0.2.1/32') - list(addr1.address_exclude(addr2)) = - [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), - IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] - - or IPv6: - - addr1 = ip_network('2001:db8::1/32') - addr2 = ip_network('2001:db8::1/128') - list(addr1.address_exclude(addr2)) = - [ip_network('2001:db8::1/128'), - ip_network('2001:db8::2/127'), - ip_network('2001:db8::4/126'), - ip_network('2001:db8::8/125'), - ... - ip_network('2001:db8:8000::/33')] - - Args: - other: An IPv4Network or IPv6Network object of the same type. - - Returns: - An iterator of the IPv(4|6)Network objects which is self - minus other. - - Raises: - TypeError: If self and other are of differing address - versions, or if other is not a network object. - ValueError: If other is not completely contained by self. - - """ - if not self._version == other._version: - raise TypeError("%s and %s are not of the same version" % ( - self, other)) - - if not isinstance(other, _BaseNetwork): - raise TypeError("%s is not a network object" % other) - - if not other.subnet_of(self): - raise ValueError('%s not contained in %s' % (other, self)) - if other == self: - return - - # Make sure we're comparing the network of other. - other = other.__class__('%s/%s' % (other.network_address, - other.prefixlen)) - - s1, s2 = self.subnets() - while s1 != other and s2 != other: - if other.subnet_of(s1): - yield s2 - s1, s2 = s1.subnets() - elif other.subnet_of(s2): - yield s1 - s1, s2 = s2.subnets() - else: - # If we got here, there's a bug somewhere. - raise AssertionError('Error performing exclusion: ' - 's1: %s s2: %s other: %s' % - (s1, s2, other)) - if s1 == other: - yield s2 - elif s2 == other: - yield s1 - else: - # If we got here, there's a bug somewhere. - raise AssertionError('Error performing exclusion: ' - 's1: %s s2: %s other: %s' % - (s1, s2, other)) - - def compare_networks(self, other): - """Compare two IP objects. - - This is only concerned about the comparison of the integer - representation of the network addresses. This means that the - host bits aren't considered at all in this method. If you want - to compare host bits, you can easily enough do a - 'HostA._ip < HostB._ip' - - Args: - other: An IP object. - - Returns: - If the IP versions of self and other are the same, returns: - - -1 if self < other: - eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') - IPv6Network('2001:db8::1000/124') < - IPv6Network('2001:db8::2000/124') - 0 if self == other - eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') - IPv6Network('2001:db8::1000/124') == - IPv6Network('2001:db8::1000/124') - 1 if self > other - eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') - IPv6Network('2001:db8::2000/124') > - IPv6Network('2001:db8::1000/124') - - Raises: - TypeError if the IP versions are different. - - """ - # does this need to raise a ValueError? - if self._version != other._version: - raise TypeError('%s and %s are not of the same type' % ( - self, other)) - # self._version == other._version below here: - if self.network_address < other.network_address: - return -1 - if self.network_address > other.network_address: - return 1 - # self.network_address == other.network_address below here: - if self.netmask < other.netmask: - return -1 - if self.netmask > other.netmask: - return 1 - return 0 - - def _get_networks_key(self): - """Network-only key function. - - Returns an object that identifies this address' network and - netmask. This function is a suitable "key" argument for sorted() - and list.sort(). - - """ - return (self._version, self.network_address, self.netmask) - - def subnets(self, prefixlen_diff=1, new_prefix=None): - """The subnets which join to make the current subnet. - - In the case that self contains only one IP - (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 - for IPv6), yield an iterator with just ourself. - - Args: - prefixlen_diff: An integer, the amount the prefix length - should be increased by. This should not be set if - new_prefix is also set. - new_prefix: The desired new prefix length. This must be a - larger number (smaller prefix) than the existing prefix. - This should not be set if prefixlen_diff is also set. - - Returns: - An iterator of IPv(4|6) objects. - - Raises: - ValueError: The prefixlen_diff is too small or too large. - OR - prefixlen_diff and new_prefix are both set or new_prefix - is a smaller number than the current prefix (smaller - number means a larger network) - - """ - if self._prefixlen == self._max_prefixlen: - yield self - return - - if new_prefix is not None: - if new_prefix < self._prefixlen: - raise ValueError('new prefix must be longer') - if prefixlen_diff != 1: - raise ValueError('cannot set prefixlen_diff and new_prefix') - prefixlen_diff = new_prefix - self._prefixlen - - if prefixlen_diff < 0: - raise ValueError('prefix length diff must be > 0') - new_prefixlen = self._prefixlen + prefixlen_diff - - if new_prefixlen > self._max_prefixlen: - raise ValueError( - 'prefix length diff %d is invalid for netblock %s' % ( - new_prefixlen, self)) - - start = int(self.network_address) - end = int(self.broadcast_address) + 1 - step = (int(self.hostmask) + 1) >> prefixlen_diff - for new_addr in range(start, end, step): - current = self.__class__((new_addr, new_prefixlen)) - yield current - - def supernet(self, prefixlen_diff=1, new_prefix=None): - """The supernet containing the current network. - - Args: - prefixlen_diff: An integer, the amount the prefix length of - the network should be decreased by. For example, given a - /24 network and a prefixlen_diff of 3, a supernet with a - /21 netmask is returned. - - Returns: - An IPv4 network object. - - Raises: - ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have - a negative prefix length. - OR - If prefixlen_diff and new_prefix are both set or new_prefix is a - larger number than the current prefix (larger number means a - smaller network) - - """ - if self._prefixlen == 0: - return self - - if new_prefix is not None: - if new_prefix > self._prefixlen: - raise ValueError('new prefix must be shorter') - if prefixlen_diff != 1: - raise ValueError('cannot set prefixlen_diff and new_prefix') - prefixlen_diff = self._prefixlen - new_prefix - - new_prefixlen = self.prefixlen - prefixlen_diff - if new_prefixlen < 0: - raise ValueError( - 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % - (self.prefixlen, prefixlen_diff)) - return self.__class__(( - int(self.network_address) & (int(self.netmask) << prefixlen_diff), - new_prefixlen - )) - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is a multicast address. - See RFC 2373 2.7 for details. - - """ - return (self.network_address.is_multicast and - self.broadcast_address.is_multicast) - - @staticmethod - def _is_subnet_of(a, b): - try: - # Always false if one is v4 and the other is v6. - if a._version != b._version: - raise TypeError(f"{a} and {b} are not of the same version") - return (b.network_address <= a.network_address and - b.broadcast_address >= a.broadcast_address) - except AttributeError: - raise TypeError(f"Unable to test subnet containment " - f"between {a} and {b}") - - def subnet_of(self, other): - """Return True if this network is a subnet of other.""" - return self._is_subnet_of(self, other) - - def supernet_of(self, other): - """Return True if this network is a supernet of other.""" - return self._is_subnet_of(other, self) - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within one of the - reserved IPv6 Network ranges. - - """ - return (self.network_address.is_reserved and - self.broadcast_address.is_reserved) - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is reserved per RFC 4291. - - """ - return (self.network_address.is_link_local and - self.broadcast_address.is_link_local) - - @property - def is_private(self): - """Test if this network belongs to a private range. - - Returns: - A boolean, True if the network is reserved per - iana-ipv4-special-registry or iana-ipv6-special-registry. - - """ - return any(self.network_address in priv_network and - self.broadcast_address in priv_network - for priv_network in self._constants._private_networks) and all( - self.network_address not in network and - self.broadcast_address not in network - for network in self._constants._private_networks_exceptions - ) - - @property - def is_global(self): - """Test if this address is allocated for public networks. - - Returns: - A boolean, True if the address is not reserved per - iana-ipv4-special-registry or iana-ipv6-special-registry. - - """ - return not self.is_private - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 2373 2.5.2. - - """ - return (self.network_address.is_unspecified and - self.broadcast_address.is_unspecified) - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback address as defined in - RFC 2373 2.5.3. - - """ - return (self.network_address.is_loopback and - self.broadcast_address.is_loopback) - - -class _BaseConstants: - - _private_networks = [] - - -_BaseNetwork._constants = _BaseConstants - - -class _BaseV4: - - """Base IPv4 object. - - The following methods are used by IPv4 objects in both single IP - addresses and networks. - - """ - - __slots__ = () - _version = 4 - # Equivalent to 255.255.255.255 or 32 bits of 1's. - _ALL_ONES = (2**IPV4LENGTH) - 1 - - _max_prefixlen = IPV4LENGTH - # There are only a handful of valid v4 netmasks, so we cache them all - # when constructed (see _make_netmask()). - _netmask_cache = {} - - def _explode_shorthand_ip_string(self): - return str(self) - - @classmethod - def _make_netmask(cls, arg): - """Make a (netmask, prefix_len) tuple from the given argument. - - Argument can be: - - an integer (the prefix length) - - a string representing the prefix length (e.g. "24") - - a string representing the prefix netmask (e.g. "255.255.255.0") - """ - if arg not in cls._netmask_cache: - if isinstance(arg, int): - prefixlen = arg - if not (0 <= prefixlen <= cls._max_prefixlen): - cls._report_invalid_netmask(prefixlen) - else: - try: - # Check for a netmask in prefix length form - prefixlen = cls._prefix_from_prefix_string(arg) - except NetmaskValueError: - # Check for a netmask or hostmask in dotted-quad form. - # This may raise NetmaskValueError. - prefixlen = cls._prefix_from_ip_string(arg) - netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen)) - cls._netmask_cache[arg] = netmask, prefixlen - return cls._netmask_cache[arg] - - @classmethod - def _ip_int_from_string(cls, ip_str): - """Turn the given IP string into an integer for comparison. - - Args: - ip_str: A string, the IP ip_str. - - Returns: - The IP ip_str as an integer. - - Raises: - AddressValueError: if ip_str isn't a valid IPv4 Address. - - """ - if not ip_str: - raise AddressValueError('Address cannot be empty') - - octets = ip_str.split('.') - if len(octets) != 4: - raise AddressValueError("Expected 4 octets in %r" % ip_str) - - try: - return int.from_bytes(map(cls._parse_octet, octets), 'big') - except ValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) from None - - @classmethod - def _parse_octet(cls, octet_str): - """Convert a decimal octet into an integer. - - Args: - octet_str: A string, the number to parse. - - Returns: - The octet as an integer. - - Raises: - ValueError: if the octet isn't strictly a decimal from [0..255]. - - """ - if not octet_str: - raise ValueError("Empty octet not permitted") - # Reject non-ASCII digits. - if not (octet_str.isascii() and octet_str.isdigit()): - msg = "Only decimal digits permitted in %r" - raise ValueError(msg % octet_str) - # We do the length check second, since the invalid character error - # is likely to be more informative for the user - if len(octet_str) > 3: - msg = "At most 3 characters permitted in %r" - raise ValueError(msg % octet_str) - # Handle leading zeros as strict as glibc's inet_pton() - # See security bug bpo-36384 - if octet_str != '0' and octet_str[0] == '0': - msg = "Leading zeros are not permitted in %r" - raise ValueError(msg % octet_str) - # Convert to integer (we know digits are legal) - octet_int = int(octet_str, 10) - if octet_int > 255: - raise ValueError("Octet %d (> 255) not permitted" % octet_int) - return octet_int - - @classmethod - def _string_from_ip_int(cls, ip_int): - """Turns a 32-bit integer into dotted decimal notation. - - Args: - ip_int: An integer, the IP address. - - Returns: - The IP address as a string in dotted decimal notation. - - """ - return '.'.join(map(str, ip_int.to_bytes(4, 'big'))) - - def _reverse_pointer(self): - """Return the reverse DNS pointer name for the IPv4 address. - - This implements the method described in RFC1035 3.5. - - """ - reverse_octets = str(self).split('.')[::-1] - return '.'.join(reverse_octets) + '.in-addr.arpa' - - @property - def max_prefixlen(self): - return self._max_prefixlen - - @property - def version(self): - return self._version - - -class IPv4Address(_BaseV4, _BaseAddress): - - """Represent and manipulate single IPv4 Addresses.""" - - __slots__ = ('_ip', '__weakref__') - - def __init__(self, address): - - """ - Args: - address: A string or integer representing the IP - - Additionally, an integer can be passed, so - IPv4Address('192.0.2.1') == IPv4Address(3221225985). - or, more generally - IPv4Address(int(IPv4Address('192.0.2.1'))) == - IPv4Address('192.0.2.1') - - Raises: - AddressValueError: If ipaddress isn't a valid IPv4 address. - - """ - # Efficient constructor from integer. - if isinstance(address, int): - self._check_int_address(address) - self._ip = address - return - - # Constructing from a packed address - if isinstance(address, bytes): - self._check_packed_address(address, 4) - self._ip = int.from_bytes(address) # big endian - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP string. - addr_str = str(address) - if '/' in addr_str: - raise AddressValueError(f"Unexpected '/' in {address!r}") - self._ip = self._ip_int_from_string(addr_str) - - @property - def packed(self): - """The binary representation of this address.""" - return v4_int_to_packed(self._ip) - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within the - reserved IPv4 Network range. - - """ - return self in self._constants._reserved_network - - @property - @functools.lru_cache() - def is_private(self): - """``True`` if the address is defined as not globally reachable by - iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ - (for IPv6) with the following exceptions: - - * ``is_private`` is ``False`` for ``100.64.0.0/10`` - * For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the - semantics of the underlying IPv4 addresses and the following condition holds - (see :attr:`IPv6Address.ipv4_mapped`):: - - address.is_private == address.ipv4_mapped.is_private - - ``is_private`` has value opposite to :attr:`is_global`, except for the ``100.64.0.0/10`` - IPv4 range where they are both ``False``. - """ - return ( - any(self in net for net in self._constants._private_networks) - and all(self not in net for net in self._constants._private_networks_exceptions) - ) - - @property - @functools.lru_cache() - def is_global(self): - """``True`` if the address is defined as globally reachable by - iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ - (for IPv6) with the following exception: - - For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the - semantics of the underlying IPv4 addresses and the following condition holds - (see :attr:`IPv6Address.ipv4_mapped`):: - - address.is_global == address.ipv4_mapped.is_global - - ``is_global`` has value opposite to :attr:`is_private`, except for the ``100.64.0.0/10`` - IPv4 range where they are both ``False``. - """ - return self not in self._constants._public_network and not self.is_private - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is multicast. - See RFC 3171 for details. - - """ - return self in self._constants._multicast_network - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 5735 3. - - """ - return self == self._constants._unspecified_address - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback per RFC 3330. - - """ - return self in self._constants._loopback_network - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is link-local per RFC 3927. - - """ - return self in self._constants._linklocal_network - - @property - def ipv6_mapped(self): - """Return the IPv4-mapped IPv6 address. - - Returns: - The IPv4-mapped IPv6 address per RFC 4291. - - """ - return IPv6Address(f'::ffff:{self}') - - -class IPv4Interface(IPv4Address): - - def __init__(self, address): - addr, mask = self._split_addr_prefix(address) - - IPv4Address.__init__(self, addr) - self.network = IPv4Network((addr, mask), strict=False) - self.netmask = self.network.netmask - self._prefixlen = self.network._prefixlen - - @functools.cached_property - def hostmask(self): - return self.network.hostmask - - def __str__(self): - return '%s/%d' % (self._string_from_ip_int(self._ip), - self._prefixlen) - - def __eq__(self, other): - address_equal = IPv4Address.__eq__(self, other) - if address_equal is NotImplemented or not address_equal: - return address_equal - try: - return self.network == other.network - except AttributeError: - # An interface with an associated network is NOT the - # same as an unassociated address. That's why the hash - # takes the extra info into account. - return False - - def __lt__(self, other): - address_less = IPv4Address.__lt__(self, other) - if address_less is NotImplemented: - return NotImplemented - try: - return (self.network < other.network or - self.network == other.network and address_less) - except AttributeError: - # We *do* allow addresses and interfaces to be sorted. The - # unassociated address is considered less than all interfaces. - return False - - def __hash__(self): - return hash((self._ip, self._prefixlen, int(self.network.network_address))) - - __reduce__ = _IPAddressBase.__reduce__ - - @property - def ip(self): - return IPv4Address(self._ip) - - @property - def with_prefixlen(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self._prefixlen) - - @property - def with_netmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.netmask) - - @property - def with_hostmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.hostmask) - - -class IPv4Network(_BaseV4, _BaseNetwork): - - """This class represents and manipulates 32-bit IPv4 network + addresses.. - - Attributes: [examples for IPv4Network('192.0.2.0/27')] - .network_address: IPv4Address('192.0.2.0') - .hostmask: IPv4Address('0.0.0.31') - .broadcast_address: IPv4Address('192.0.2.32') - .netmask: IPv4Address('255.255.255.224') - .prefixlen: 27 - - """ - # Class to use when creating address objects - _address_class = IPv4Address - - def __init__(self, address, strict=True): - """Instantiate a new IPv4 network object. - - Args: - address: A string or integer representing the IP [& network]. - '192.0.2.0/24' - '192.0.2.0/255.255.255.0' - '192.0.2.0/0.0.0.255' - are all functionally the same in IPv4. Similarly, - '192.0.2.1' - '192.0.2.1/255.255.255.255' - '192.0.2.1/32' - are also functionally equivalent. That is to say, failing to - provide a subnetmask will create an object with a mask of /32. - - If the mask (portion after the / in the argument) is given in - dotted quad form, it is treated as a netmask if it starts with a - non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it - starts with a zero field (e.g. 0.255.255.255 == /8), with the - single exception of an all-zero mask which is treated as a - netmask == /0. If no mask is given, a default of /32 is used. - - Additionally, an integer can be passed, so - IPv4Network('192.0.2.1') == IPv4Network(3221225985) - or, more generally - IPv4Interface(int(IPv4Interface('192.0.2.1'))) == - IPv4Interface('192.0.2.1') - - Raises: - AddressValueError: If ipaddress isn't a valid IPv4 address. - NetmaskValueError: If the netmask isn't valid for - an IPv4 address. - ValueError: If strict is True and a network address is not - supplied. - """ - addr, mask = self._split_addr_prefix(address) - - self.network_address = IPv4Address(addr) - self.netmask, self._prefixlen = self._make_netmask(mask) - packed = int(self.network_address) - if packed & int(self.netmask) != packed: - if strict: - raise ValueError('%s has host bits set' % self) - else: - self.network_address = IPv4Address(packed & - int(self.netmask)) - - if self._prefixlen == (self._max_prefixlen - 1): - self.hosts = self.__iter__ - elif self._prefixlen == (self._max_prefixlen): - self.hosts = lambda: iter((IPv4Address(addr),)) - - @property - @functools.lru_cache() - def is_global(self): - """Test if this address is allocated for public networks. - - Returns: - A boolean, True if the address is not reserved per - iana-ipv4-special-registry. - - """ - return (not (self.network_address in IPv4Network('100.64.0.0/10') and - self.broadcast_address in IPv4Network('100.64.0.0/10')) and - not self.is_private) - - -class _IPv4Constants: - _linklocal_network = IPv4Network('169.254.0.0/16') - - _loopback_network = IPv4Network('127.0.0.0/8') - - _multicast_network = IPv4Network('224.0.0.0/4') - - _public_network = IPv4Network('100.64.0.0/10') - - # Not globally reachable address blocks listed on - # https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml - _private_networks = [ - IPv4Network('0.0.0.0/8'), - IPv4Network('10.0.0.0/8'), - IPv4Network('127.0.0.0/8'), - IPv4Network('169.254.0.0/16'), - IPv4Network('172.16.0.0/12'), - IPv4Network('192.0.0.0/24'), - IPv4Network('192.0.0.170/31'), - IPv4Network('192.0.2.0/24'), - IPv4Network('192.168.0.0/16'), - IPv4Network('198.18.0.0/15'), - IPv4Network('198.51.100.0/24'), - IPv4Network('203.0.113.0/24'), - IPv4Network('240.0.0.0/4'), - IPv4Network('255.255.255.255/32'), - ] - - _private_networks_exceptions = [ - IPv4Network('192.0.0.9/32'), - IPv4Network('192.0.0.10/32'), - ] - - _reserved_network = IPv4Network('240.0.0.0/4') - - _unspecified_address = IPv4Address('0.0.0.0') - - -IPv4Address._constants = _IPv4Constants -IPv4Network._constants = _IPv4Constants - - -class _BaseV6: - - """Base IPv6 object. - - The following methods are used by IPv6 objects in both single IP - addresses and networks. - - """ - - __slots__ = () - _version = 6 - _ALL_ONES = (2**IPV6LENGTH) - 1 - _HEXTET_COUNT = 8 - _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef') - _max_prefixlen = IPV6LENGTH - - # There are only a bunch of valid v6 netmasks, so we cache them all - # when constructed (see _make_netmask()). - _netmask_cache = {} - - @classmethod - def _make_netmask(cls, arg): - """Make a (netmask, prefix_len) tuple from the given argument. - - Argument can be: - - an integer (the prefix length) - - a string representing the prefix length (e.g. "24") - - a string representing the prefix netmask (e.g. "255.255.255.0") - """ - if arg not in cls._netmask_cache: - if isinstance(arg, int): - prefixlen = arg - if not (0 <= prefixlen <= cls._max_prefixlen): - cls._report_invalid_netmask(prefixlen) - else: - prefixlen = cls._prefix_from_prefix_string(arg) - netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen)) - cls._netmask_cache[arg] = netmask, prefixlen - return cls._netmask_cache[arg] - - @classmethod - def _ip_int_from_string(cls, ip_str): - """Turn an IPv6 ip_str into an integer. - - Args: - ip_str: A string, the IPv6 ip_str. - - Returns: - An int, the IPv6 address - - Raises: - AddressValueError: if ip_str isn't a valid IPv6 Address. - - """ - if not ip_str: - raise AddressValueError('Address cannot be empty') - if len(ip_str) > 45: - shorten = ip_str - if len(shorten) > 100: - shorten = f'{ip_str[:45]}({len(ip_str)-90} chars elided){ip_str[-45:]}' - raise AddressValueError(f"At most 45 characters expected in " - f"{shorten!r}") - - # We want to allow more parts than the max to be 'split' - # to preserve the correct error message when there are - # too many parts combined with '::' - _max_parts = cls._HEXTET_COUNT + 1 - parts = ip_str.split(':', maxsplit=_max_parts) - - # An IPv6 address needs at least 2 colons (3 parts). - _min_parts = 3 - if len(parts) < _min_parts: - msg = "At least %d parts expected in %r" % (_min_parts, ip_str) - raise AddressValueError(msg) - - # If the address has an IPv4-style suffix, convert it to hexadecimal. - if '.' in parts[-1]: - try: - ipv4_int = IPv4Address(parts.pop())._ip - except AddressValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) from None - parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) - parts.append('%x' % (ipv4_int & 0xFFFF)) - - # An IPv6 address can't have more than 8 colons (9 parts). - # The extra colon comes from using the "::" notation for a single - # leading or trailing zero part. - if len(parts) > _max_parts: - msg = "At most %d colons permitted in %r" % (_max_parts-1, ip_str) - raise AddressValueError(msg) - - # Disregarding the endpoints, find '::' with nothing in between. - # This indicates that a run of zeroes has been skipped. - skip_index = None - for i in range(1, len(parts) - 1): - if not parts[i]: - if skip_index is not None: - # Can't have more than one '::' - msg = "At most one '::' permitted in %r" % ip_str - raise AddressValueError(msg) - skip_index = i - - # parts_hi is the number of parts to copy from above/before the '::' - # parts_lo is the number of parts to copy from below/after the '::' - if skip_index is not None: - # If we found a '::', then check if it also covers the endpoints. - parts_hi = skip_index - parts_lo = len(parts) - skip_index - 1 - if not parts[0]: - parts_hi -= 1 - if parts_hi: - msg = "Leading ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # ^: requires ^:: - if not parts[-1]: - parts_lo -= 1 - if parts_lo: - msg = "Trailing ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # :$ requires ::$ - parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) - if parts_skipped < 1: - msg = "Expected at most %d other parts with '::' in %r" - raise AddressValueError(msg % (cls._HEXTET_COUNT-1, ip_str)) - else: - # Otherwise, allocate the entire address to parts_hi. The - # endpoints could still be empty, but _parse_hextet() will check - # for that. - if len(parts) != cls._HEXTET_COUNT: - msg = "Exactly %d parts expected without '::' in %r" - raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) - if not parts[0]: - msg = "Leading ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # ^: requires ^:: - if not parts[-1]: - msg = "Trailing ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # :$ requires ::$ - parts_hi = len(parts) - parts_lo = 0 - parts_skipped = 0 - - try: - # Now, parse the hextets into a 128-bit integer. - ip_int = 0 - for i in range(parts_hi): - ip_int <<= 16 - ip_int |= cls._parse_hextet(parts[i]) - ip_int <<= 16 * parts_skipped - for i in range(-parts_lo, 0): - ip_int <<= 16 - ip_int |= cls._parse_hextet(parts[i]) - return ip_int - except ValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) from None - - @classmethod - def _parse_hextet(cls, hextet_str): - """Convert an IPv6 hextet string into an integer. - - Args: - hextet_str: A string, the number to parse. - - Returns: - The hextet as an integer. - - Raises: - ValueError: if the input isn't strictly a hex number from - [0..FFFF]. - - """ - # Reject non-ASCII digits. - if not cls._HEX_DIGITS.issuperset(hextet_str): - raise ValueError("Only hex digits permitted in %r" % hextet_str) - # We do the length check second, since the invalid character error - # is likely to be more informative for the user - if len(hextet_str) > 4: - msg = "At most 4 characters permitted in %r" - raise ValueError(msg % hextet_str) - # Length check means we can skip checking the integer value - return int(hextet_str, 16) - - @classmethod - def _compress_hextets(cls, hextets): - """Compresses a list of hextets. - - Compresses a list of strings, replacing the longest continuous - sequence of "0" in the list with "" and adding empty strings at - the beginning or at the end of the string such that subsequently - calling ":".join(hextets) will produce the compressed version of - the IPv6 address. - - Args: - hextets: A list of strings, the hextets to compress. - - Returns: - A list of strings. - - """ - best_doublecolon_start = -1 - best_doublecolon_len = 0 - doublecolon_start = -1 - doublecolon_len = 0 - for index, hextet in enumerate(hextets): - if hextet == '0': - doublecolon_len += 1 - if doublecolon_start == -1: - # Start of a sequence of zeros. - doublecolon_start = index - if doublecolon_len > best_doublecolon_len: - # This is the longest sequence of zeros so far. - best_doublecolon_len = doublecolon_len - best_doublecolon_start = doublecolon_start - else: - doublecolon_len = 0 - doublecolon_start = -1 - - if best_doublecolon_len > 1: - best_doublecolon_end = (best_doublecolon_start + - best_doublecolon_len) - # For zeros at the end of the address. - if best_doublecolon_end == len(hextets): - hextets += [''] - hextets[best_doublecolon_start:best_doublecolon_end] = [''] - # For zeros at the beginning of the address. - if best_doublecolon_start == 0: - hextets = [''] + hextets - - return hextets - - @classmethod - def _string_from_ip_int(cls, ip_int=None): - """Turns a 128-bit integer into hexadecimal notation. - - Args: - ip_int: An integer, the IP address. - - Returns: - A string, the hexadecimal representation of the address. - - Raises: - ValueError: The address is bigger than 128 bits of all ones. - - """ - if ip_int is None: - ip_int = int(cls._ip) - - if ip_int > cls._ALL_ONES: - raise ValueError('IPv6 address is too large') - - hex_str = '%032x' % ip_int - hextets = ['%x' % int(hex_str[x:x+4], 16) for x in range(0, 32, 4)] - - hextets = cls._compress_hextets(hextets) - return ':'.join(hextets) - - def _explode_shorthand_ip_string(self): - """Expand a shortened IPv6 address. - - Returns: - A string, the expanded IPv6 address. - - """ - if isinstance(self, IPv6Network): - ip_str = str(self.network_address) - elif isinstance(self, IPv6Interface): - ip_str = str(self.ip) - else: - ip_str = str(self) - - ip_int = self._ip_int_from_string(ip_str) - hex_str = '%032x' % ip_int - parts = [hex_str[x:x+4] for x in range(0, 32, 4)] - if isinstance(self, (_BaseNetwork, IPv6Interface)): - return '%s/%d' % (':'.join(parts), self._prefixlen) - return ':'.join(parts) - - def _reverse_pointer(self): - """Return the reverse DNS pointer name for the IPv6 address. - - This implements the method described in RFC3596 2.5. - - """ - reverse_chars = self.exploded[::-1].replace(':', '') - return '.'.join(reverse_chars) + '.ip6.arpa' - - @staticmethod - def _split_scope_id(ip_str): - """Helper function to parse IPv6 string address with scope id. - - See RFC 4007 for details. - - Args: - ip_str: A string, the IPv6 address. - - Returns: - (addr, scope_id) tuple. - - """ - addr, sep, scope_id = ip_str.partition('%') - if not sep: - scope_id = None - elif not scope_id or '%' in scope_id: - raise AddressValueError('Invalid IPv6 address: "%r"' % ip_str) - return addr, scope_id - - @property - def max_prefixlen(self): - return self._max_prefixlen - - @property - def version(self): - return self._version - - -class IPv6Address(_BaseV6, _BaseAddress): - - """Represent and manipulate single IPv6 Addresses.""" - - __slots__ = ('_ip', '_scope_id', '__weakref__') - - def __init__(self, address): - """Instantiate a new IPv6 address object. - - Args: - address: A string or integer representing the IP - - Additionally, an integer can be passed, so - IPv6Address('2001:db8::') == - IPv6Address(42540766411282592856903984951653826560) - or, more generally - IPv6Address(int(IPv6Address('2001:db8::'))) == - IPv6Address('2001:db8::') - - Raises: - AddressValueError: If address isn't a valid IPv6 address. - - """ - # Efficient constructor from integer. - if isinstance(address, int): - self._check_int_address(address) - self._ip = address - self._scope_id = None - return - - # Constructing from a packed address - if isinstance(address, bytes): - self._check_packed_address(address, 16) - self._ip = int.from_bytes(address, 'big') - self._scope_id = None - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP string. - addr_str = str(address) - if '/' in addr_str: - raise AddressValueError(f"Unexpected '/' in {address!r}") - addr_str, self._scope_id = self._split_scope_id(addr_str) - - self._ip = self._ip_int_from_string(addr_str) - - def _explode_shorthand_ip_string(self): - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is None: - return super()._explode_shorthand_ip_string() - prefix_len = 30 - raw_exploded_str = super()._explode_shorthand_ip_string() - return f"{raw_exploded_str[:prefix_len]}{ipv4_mapped!s}" - - def _reverse_pointer(self): - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is None: - return super()._reverse_pointer() - prefix_len = 30 - raw_exploded_str = super()._explode_shorthand_ip_string()[:prefix_len] - # ipv4 encoded using hexadecimal nibbles instead of decimals - ipv4_int = ipv4_mapped._ip - reverse_chars = f"{raw_exploded_str}{ipv4_int:008x}"[::-1].replace(':', '') - return '.'.join(reverse_chars) + '.ip6.arpa' - - def _ipv4_mapped_ipv6_to_str(self): - """Return convenient text representation of IPv4-mapped IPv6 address - - See RFC 4291 2.5.5.2, 2.2 p.3 for details. - - Returns: - A string, 'x:x:x:x:x:x:d.d.d.d', where the 'x's are the hexadecimal values of - the six high-order 16-bit pieces of the address, and the 'd's are - the decimal values of the four low-order 8-bit pieces of the - address (standard IPv4 representation) as defined in RFC 4291 2.2 p.3. - - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is None: - raise AddressValueError("Can not apply to non-IPv4-mapped IPv6 address %s" % str(self)) - high_order_bits = self._ip >> 32 - return "%s:%s" % (self._string_from_ip_int(high_order_bits), str(ipv4_mapped)) - - def __str__(self): - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is None: - ip_str = super().__str__() - else: - ip_str = self._ipv4_mapped_ipv6_to_str() - return ip_str + '%' + self._scope_id if self._scope_id else ip_str - - def __hash__(self): - return hash((self._ip, self._scope_id)) - - def __eq__(self, other): - address_equal = super().__eq__(other) - if address_equal is NotImplemented: - return NotImplemented - if not address_equal: - return False - return self._scope_id == getattr(other, '_scope_id', None) - - def __reduce__(self): - return (self.__class__, (str(self),)) - - @property - def scope_id(self): - """Identifier of a particular zone of the address's scope. - - See RFC 4007 for details. - - Returns: - A string identifying the zone of the address if specified, else None. - - """ - return self._scope_id - - @property - def packed(self): - """The binary representation of this address.""" - return v6_int_to_packed(self._ip) - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is a multicast address. - See RFC 2373 2.7 for details. - - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is not None: - return ipv4_mapped.is_multicast - return self in self._constants._multicast_network - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within one of the - reserved IPv6 Network ranges. - - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is not None: - return ipv4_mapped.is_reserved - return any(self in x for x in self._constants._reserved_networks) - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is reserved per RFC 4291. - - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is not None: - return ipv4_mapped.is_link_local - return self in self._constants._linklocal_network - - @property - def is_site_local(self): - """Test if the address is reserved for site-local. - - Note that the site-local address space has been deprecated by RFC 3879. - Use is_private to test if this address is in the space of unique local - addresses as defined by RFC 4193. - - Returns: - A boolean, True if the address is reserved per RFC 3513 2.5.6. - - """ - return self in self._constants._sitelocal_network - - @property - @functools.lru_cache() - def is_private(self): - """``True`` if the address is defined as not globally reachable by - iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ - (for IPv6) with the following exceptions: - - * ``is_private`` is ``False`` for ``100.64.0.0/10`` - * For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the - semantics of the underlying IPv4 addresses and the following condition holds - (see :attr:`IPv6Address.ipv4_mapped`):: - - address.is_private == address.ipv4_mapped.is_private - - ``is_private`` has value opposite to :attr:`is_global`, except for the ``100.64.0.0/10`` - IPv4 range where they are both ``False``. - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is not None: - return ipv4_mapped.is_private - return ( - any(self in net for net in self._constants._private_networks) - and all(self not in net for net in self._constants._private_networks_exceptions) - ) - - @property - def is_global(self): - """``True`` if the address is defined as globally reachable by - iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ - (for IPv6) with the following exception: - - For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the - semantics of the underlying IPv4 addresses and the following condition holds - (see :attr:`IPv6Address.ipv4_mapped`):: - - address.is_global == address.ipv4_mapped.is_global - - ``is_global`` has value opposite to :attr:`is_private`, except for the ``100.64.0.0/10`` - IPv4 range where they are both ``False``. - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is not None: - return ipv4_mapped.is_global - return not self.is_private - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 2373 2.5.2. - - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is not None: - return ipv4_mapped.is_unspecified - return self._ip == 0 - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback address as defined in - RFC 2373 2.5.3. - - """ - ipv4_mapped = self.ipv4_mapped - if ipv4_mapped is not None: - return ipv4_mapped.is_loopback - return self._ip == 1 - - @property - def ipv4_mapped(self): - """Return the IPv4 mapped address. - - Returns: - If the IPv6 address is a v4 mapped address, return the - IPv4 mapped address. Return None otherwise. - - """ - if (self._ip >> 32) != 0xFFFF: - return None - return IPv4Address(self._ip & 0xFFFFFFFF) - - @property - def teredo(self): - """Tuple of embedded teredo IPs. - - Returns: - Tuple of the (server, client) IPs or None if the address - doesn't appear to be a teredo address (doesn't start with - 2001::/32) - - """ - if (self._ip >> 96) != 0x20010000: - return None - return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), - IPv4Address(~self._ip & 0xFFFFFFFF)) - - @property - def sixtofour(self): - """Return the IPv4 6to4 embedded address. - - Returns: - The IPv4 6to4-embedded address if present or None if the - address doesn't appear to contain a 6to4 embedded address. - - """ - if (self._ip >> 112) != 0x2002: - return None - return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) - - -class IPv6Interface(IPv6Address): - - def __init__(self, address): - addr, mask = self._split_addr_prefix(address) - - IPv6Address.__init__(self, addr) - self.network = IPv6Network((addr, mask), strict=False) - self.netmask = self.network.netmask - self._prefixlen = self.network._prefixlen - - @functools.cached_property - def hostmask(self): - return self.network.hostmask - - def __str__(self): - return '%s/%d' % (super().__str__(), - self._prefixlen) - - def __eq__(self, other): - address_equal = IPv6Address.__eq__(self, other) - if address_equal is NotImplemented or not address_equal: - return address_equal - try: - return self.network == other.network - except AttributeError: - # An interface with an associated network is NOT the - # same as an unassociated address. That's why the hash - # takes the extra info into account. - return False - - def __lt__(self, other): - address_less = IPv6Address.__lt__(self, other) - if address_less is NotImplemented: - return address_less - try: - return (self.network < other.network or - self.network == other.network and address_less) - except AttributeError: - # We *do* allow addresses and interfaces to be sorted. The - # unassociated address is considered less than all interfaces. - return False - - def __hash__(self): - return hash((self._ip, self._prefixlen, int(self.network.network_address))) - - __reduce__ = _IPAddressBase.__reduce__ - - @property - def ip(self): - return IPv6Address(self._ip) - - @property - def with_prefixlen(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self._prefixlen) - - @property - def with_netmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.netmask) - - @property - def with_hostmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.hostmask) - - @property - def is_unspecified(self): - return self._ip == 0 and self.network.is_unspecified - - @property - def is_loopback(self): - return super().is_loopback and self.network.is_loopback - - -class IPv6Network(_BaseV6, _BaseNetwork): - - """This class represents and manipulates 128-bit IPv6 networks. - - Attributes: [examples for IPv6('2001:db8::1000/124')] - .network_address: IPv6Address('2001:db8::1000') - .hostmask: IPv6Address('::f') - .broadcast_address: IPv6Address('2001:db8::100f') - .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') - .prefixlen: 124 - - """ - - # Class to use when creating address objects - _address_class = IPv6Address - - def __init__(self, address, strict=True): - """Instantiate a new IPv6 Network object. - - Args: - address: A string or integer representing the IPv6 network or the - IP and prefix/netmask. - '2001:db8::/128' - '2001:db8:0000:0000:0000:0000:0000:0000/128' - '2001:db8::' - are all functionally the same in IPv6. That is to say, - failing to provide a subnetmask will create an object with - a mask of /128. - - Additionally, an integer can be passed, so - IPv6Network('2001:db8::') == - IPv6Network(42540766411282592856903984951653826560) - or, more generally - IPv6Network(int(IPv6Network('2001:db8::'))) == - IPv6Network('2001:db8::') - - strict: A boolean. If true, ensure that we have been passed - A true network address, eg, 2001:db8::1000/124 and not an - IP address on a network, eg, 2001:db8::1/124. - - Raises: - AddressValueError: If address isn't a valid IPv6 address. - NetmaskValueError: If the netmask isn't valid for - an IPv6 address. - ValueError: If strict was True and a network address was not - supplied. - """ - addr, mask = self._split_addr_prefix(address) - - self.network_address = IPv6Address(addr) - self.netmask, self._prefixlen = self._make_netmask(mask) - packed = int(self.network_address) - if packed & int(self.netmask) != packed: - if strict: - raise ValueError('%s has host bits set' % self) - else: - self.network_address = IPv6Address(packed & - int(self.netmask)) - - if self._prefixlen == (self._max_prefixlen - 1): - self.hosts = self.__iter__ - elif self._prefixlen == self._max_prefixlen: - self.hosts = lambda: iter((IPv6Address(addr),)) - - def hosts(self): - """Generate Iterator over usable hosts in a network. - - This is like __iter__ except it doesn't return the - Subnet-Router anycast address. - - """ - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in range(network + 1, broadcast + 1): - yield self._address_class(x) - - @property - def is_site_local(self): - """Test if the address is reserved for site-local. - - Note that the site-local address space has been deprecated by RFC 3879. - Use is_private to test if this address is in the space of unique local - addresses as defined by RFC 4193. - - Returns: - A boolean, True if the address is reserved per RFC 3513 2.5.6. - - """ - return (self.network_address.is_site_local and - self.broadcast_address.is_site_local) - - -class _IPv6Constants: - - _linklocal_network = IPv6Network('fe80::/10') - - _multicast_network = IPv6Network('ff00::/8') - - # Not globally reachable address blocks listed on - # https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml - _private_networks = [ - IPv6Network('::1/128'), - IPv6Network('::/128'), - IPv6Network('::ffff:0:0/96'), - IPv6Network('64:ff9b:1::/48'), - IPv6Network('100::/64'), - IPv6Network('2001::/23'), - IPv6Network('2001:db8::/32'), - # IANA says N/A, let's consider it not globally reachable to be safe - IPv6Network('2002::/16'), - # RFC 9637: https://www.rfc-editor.org/rfc/rfc9637.html#section-6-2.2 - IPv6Network('3fff::/20'), - IPv6Network('fc00::/7'), - IPv6Network('fe80::/10'), - ] - - _private_networks_exceptions = [ - IPv6Network('2001:1::1/128'), - IPv6Network('2001:1::2/128'), - IPv6Network('2001:3::/32'), - IPv6Network('2001:4:112::/48'), - IPv6Network('2001:20::/28'), - IPv6Network('2001:30::/28'), - ] - - _reserved_networks = [ - IPv6Network('::/8'), IPv6Network('100::/8'), - IPv6Network('200::/7'), IPv6Network('400::/6'), - IPv6Network('800::/5'), IPv6Network('1000::/4'), - IPv6Network('4000::/3'), IPv6Network('6000::/3'), - IPv6Network('8000::/3'), IPv6Network('A000::/3'), - IPv6Network('C000::/3'), IPv6Network('E000::/4'), - IPv6Network('F000::/5'), IPv6Network('F800::/6'), - IPv6Network('FE00::/9'), - ] - - _sitelocal_network = IPv6Network('fec0::/10') - - -IPv6Address._constants = _IPv6Constants -IPv6Network._constants = _IPv6Constants diff --git a/Python313_13_x86_Template/Lib/json/__init__.py b/Python313_13_x86_Template/Lib/json/__init__.py deleted file mode 100644 index c7a6dcdf..00000000 --- a/Python313_13_x86_Template/Lib/json/__init__.py +++ /dev/null @@ -1,365 +0,0 @@ -r"""JSON (JavaScript Object Notation) is a subset of -JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data -interchange format. - -:mod:`json` exposes an API familiar to users of the standard library -:mod:`marshal` and :mod:`pickle` modules. It is derived from a -version of the externally maintained simplejson library. - -Encoding basic Python object hierarchies:: - - >>> import json - >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) - '["foo", {"bar": ["baz", null, 1.0, 2]}]' - >>> print(json.dumps("\"foo\bar")) - "\"foo\bar" - >>> print(json.dumps('\u1234')) - "\u1234" - >>> print(json.dumps('\\')) - "\\" - >>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)) - {"a": 0, "b": 0, "c": 0} - >>> from io import StringIO - >>> io = StringIO() - >>> json.dump(['streaming API'], io) - >>> io.getvalue() - '["streaming API"]' - -Compact encoding:: - - >>> import json - >>> mydict = {'4': 5, '6': 7} - >>> json.dumps([1,2,3,mydict], separators=(',', ':')) - '[1,2,3,{"4":5,"6":7}]' - -Pretty printing:: - - >>> import json - >>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)) - { - "4": 5, - "6": 7 - } - -Decoding JSON:: - - >>> import json - >>> obj = ['foo', {'bar': ['baz', None, 1.0, 2]}] - >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj - True - >>> json.loads('"\\"foo\\bar"') == '"foo\x08ar' - True - >>> from io import StringIO - >>> io = StringIO('["streaming API"]') - >>> json.load(io)[0] == 'streaming API' - True - -Specializing JSON object decoding:: - - >>> import json - >>> def as_complex(dct): - ... if '__complex__' in dct: - ... return complex(dct['real'], dct['imag']) - ... return dct - ... - >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', - ... object_hook=as_complex) - (1+2j) - >>> from decimal import Decimal - >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1') - True - -Specializing JSON object encoding:: - - >>> import json - >>> def encode_complex(obj): - ... if isinstance(obj, complex): - ... return [obj.real, obj.imag] - ... raise TypeError(f'Object of type {obj.__class__.__name__} ' - ... f'is not JSON serializable') - ... - >>> json.dumps(2 + 1j, default=encode_complex) - '[2.0, 1.0]' - >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j) - '[2.0, 1.0]' - >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j)) - '[2.0, 1.0]' - - -Using json.tool from the shell to validate and pretty-print:: - - $ echo '{"json":"obj"}' | python -m json.tool - { - "json": "obj" - } - $ echo '{ 1.2:3.4}' | python -m json.tool - Expecting property name enclosed in double quotes: line 1 column 3 (char 2) -""" -__version__ = '2.0.9' -__all__ = [ - 'dump', 'dumps', 'load', 'loads', - 'JSONDecoder', 'JSONDecodeError', 'JSONEncoder', -] - -__author__ = 'Bob Ippolito ' - -from .decoder import JSONDecoder, JSONDecodeError -from .encoder import JSONEncoder -import codecs - -_default_encoder = JSONEncoder( - skipkeys=False, - ensure_ascii=True, - check_circular=True, - allow_nan=True, - indent=None, - separators=None, - default=None, -) - -def dump(obj, fp, *, skipkeys=False, ensure_ascii=True, check_circular=True, - allow_nan=True, cls=None, indent=None, separators=None, - default=None, sort_keys=False, **kw): - """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a - ``.write()``-supporting file-like object). - - If ``skipkeys`` is true then ``dict`` keys that are not basic types - (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped - instead of raising a ``TypeError``. - - If ``ensure_ascii`` is false, then the strings written to ``fp`` can - contain non-ASCII and non-printable characters if they appear in strings - contained in ``obj``. Otherwise, all such characters are escaped in JSON - strings. - - If ``check_circular`` is false, then the circular reference check - for container types will be skipped and a circular reference will - result in an ``RecursionError`` (or worse). - - If ``allow_nan`` is false, then it will be a ``ValueError`` to - serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) - in strict compliance of the JSON specification, instead of using the - JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). - - If ``indent`` is a non-negative integer, then JSON array elements and - object members will be pretty-printed with that indent level. An indent - level of 0 will only insert newlines. ``None`` is the most compact - representation. - - If specified, ``separators`` should be an ``(item_separator, - key_separator)`` tuple. The default is ``(', ', ': ')`` if *indent* is - ``None`` and ``(',', ': ')`` otherwise. To get the most compact JSON - representation, you should specify ``(',', ':')`` to eliminate - whitespace. - - ``default(obj)`` is a function that should return a serializable version - of obj or raise TypeError. The default simply raises TypeError. - - If *sort_keys* is true (default: ``False``), then the output of - dictionaries will be sorted by key. - - To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the - ``.default()`` method to serialize additional types), specify it with - the ``cls`` kwarg; otherwise ``JSONEncoder`` is used. - - """ - # cached encoder - if (not skipkeys and ensure_ascii and - check_circular and allow_nan and - cls is None and indent is None and separators is None and - default is None and not sort_keys and not kw): - iterable = _default_encoder.iterencode(obj) - else: - if cls is None: - cls = JSONEncoder - iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, - check_circular=check_circular, allow_nan=allow_nan, indent=indent, - separators=separators, - default=default, sort_keys=sort_keys, **kw).iterencode(obj) - # could accelerate with writelines in some versions of Python, at - # a debuggability cost - for chunk in iterable: - fp.write(chunk) - - -def dumps(obj, *, skipkeys=False, ensure_ascii=True, check_circular=True, - allow_nan=True, cls=None, indent=None, separators=None, - default=None, sort_keys=False, **kw): - """Serialize ``obj`` to a JSON formatted ``str``. - - If ``skipkeys`` is true then ``dict`` keys that are not basic types - (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped - instead of raising a ``TypeError``. - - If ``ensure_ascii`` is false, then the return value can contain - non-ASCII and non-printable characters if they appear in strings - contained in ``obj``. Otherwise, all such characters are escaped in - JSON strings. - - If ``check_circular`` is false, then the circular reference check - for container types will be skipped and a circular reference will - result in an ``RecursionError`` (or worse). - - If ``allow_nan`` is false, then it will be a ``ValueError`` to - serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in - strict compliance of the JSON specification, instead of using the - JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). - - If ``indent`` is a non-negative integer, then JSON array elements and - object members will be pretty-printed with that indent level. An indent - level of 0 will only insert newlines. ``None`` is the most compact - representation. - - If specified, ``separators`` should be an ``(item_separator, - key_separator)`` tuple. The default is ``(', ', ': ')`` if *indent* is - ``None`` and ``(',', ': ')`` otherwise. To get the most compact JSON - representation, you should specify ``(',', ':')`` to eliminate - whitespace. - - ``default(obj)`` is a function that should return a serializable version - of obj or raise TypeError. The default simply raises TypeError. - - If *sort_keys* is true (default: ``False``), then the output of - dictionaries will be sorted by key. - - To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the - ``.default()`` method to serialize additional types), specify it with - the ``cls`` kwarg; otherwise ``JSONEncoder`` is used. - - """ - # cached encoder - if (not skipkeys and ensure_ascii and - check_circular and allow_nan and - cls is None and indent is None and separators is None and - default is None and not sort_keys and not kw): - return _default_encoder.encode(obj) - if cls is None: - cls = JSONEncoder - return cls( - skipkeys=skipkeys, ensure_ascii=ensure_ascii, - check_circular=check_circular, allow_nan=allow_nan, indent=indent, - separators=separators, default=default, sort_keys=sort_keys, - **kw).encode(obj) - - -_default_decoder = JSONDecoder(object_hook=None, object_pairs_hook=None) - - -def detect_encoding(b): - bstartswith = b.startswith - if bstartswith((codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)): - return 'utf-32' - if bstartswith((codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)): - return 'utf-16' - if bstartswith(codecs.BOM_UTF8): - return 'utf-8-sig' - - if len(b) >= 4: - if not b[0]: - # 00 00 -- -- - utf-32-be - # 00 XX -- -- - utf-16-be - return 'utf-16-be' if b[1] else 'utf-32-be' - if not b[1]: - # XX 00 00 00 - utf-32-le - # XX 00 00 XX - utf-16-le - # XX 00 XX -- - utf-16-le - return 'utf-16-le' if b[2] or b[3] else 'utf-32-le' - elif len(b) == 2: - if not b[0]: - # 00 XX - utf-16-be - return 'utf-16-be' - if not b[1]: - # XX 00 - utf-16-le - return 'utf-16-le' - # default - return 'utf-8' - - -def load(fp, *, cls=None, object_hook=None, parse_float=None, - parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): - """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing - a JSON document) to a Python object. - - ``object_hook`` is an optional function that will be called with the - result of any object literal decode (a ``dict``). The return value of - ``object_hook`` will be used instead of the ``dict``. This feature - can be used to implement custom decoders (e.g. JSON-RPC class hinting). - - ``object_pairs_hook`` is an optional function that will be called with - the result of any object literal decoded with an ordered list of pairs. - The return value of ``object_pairs_hook`` will be used instead of the - ``dict``. This feature can be used to implement custom decoders. If - ``object_hook`` is also defined, the ``object_pairs_hook`` takes - priority. - - To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` - kwarg; otherwise ``JSONDecoder`` is used. - """ - return loads(fp.read(), - cls=cls, object_hook=object_hook, - parse_float=parse_float, parse_int=parse_int, - parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw) - - -def loads(s, *, cls=None, object_hook=None, parse_float=None, - parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): - """Deserialize ``s`` (a ``str``, ``bytes`` or ``bytearray`` instance - containing a JSON document) to a Python object. - - ``object_hook`` is an optional function that will be called with the - result of any object literal decode (a ``dict``). The return value of - ``object_hook`` will be used instead of the ``dict``. This feature - can be used to implement custom decoders (e.g. JSON-RPC class hinting). - - ``object_pairs_hook`` is an optional function that will be called with - the result of any object literal decoded with an ordered list of pairs. - The return value of ``object_pairs_hook`` will be used instead of the - ``dict``. This feature can be used to implement custom decoders. If - ``object_hook`` is also defined, the ``object_pairs_hook`` takes - priority. - - ``parse_float``, if specified, will be called with the string - of every JSON float to be decoded. By default this is equivalent to - float(num_str). This can be used to use another datatype or parser - for JSON floats (e.g. decimal.Decimal). - - ``parse_int``, if specified, will be called with the string - of every JSON int to be decoded. By default this is equivalent to - int(num_str). This can be used to use another datatype or parser - for JSON integers (e.g. float). - - ``parse_constant``, if specified, will be called with one of the - following strings: -Infinity, Infinity, NaN. - This can be used to raise an exception if invalid JSON numbers - are encountered. - - To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` - kwarg; otherwise ``JSONDecoder`` is used. - """ - if isinstance(s, str): - if s.startswith('\ufeff'): - raise JSONDecodeError("Unexpected UTF-8 BOM (decode using utf-8-sig)", - s, 0) - else: - if not isinstance(s, (bytes, bytearray)): - raise TypeError(f'the JSON object must be str, bytes or bytearray, ' - f'not {s.__class__.__name__}') - s = s.decode(detect_encoding(s), 'surrogatepass') - - if (cls is None and object_hook is None and - parse_int is None and parse_float is None and - parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) - if cls is None: - cls = JSONDecoder - if object_hook is not None: - kw['object_hook'] = object_hook - if object_pairs_hook is not None: - kw['object_pairs_hook'] = object_pairs_hook - if parse_float is not None: - kw['parse_float'] = parse_float - if parse_int is not None: - kw['parse_int'] = parse_int - if parse_constant is not None: - kw['parse_constant'] = parse_constant - return cls(**kw).decode(s) diff --git a/Python313_13_x86_Template/Lib/json/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/json/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 7ac7b410..00000000 Binary files a/Python313_13_x86_Template/Lib/json/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/json/__pycache__/decoder.cpython-313.pyc b/Python313_13_x86_Template/Lib/json/__pycache__/decoder.cpython-313.pyc deleted file mode 100644 index 48d0b476..00000000 Binary files a/Python313_13_x86_Template/Lib/json/__pycache__/decoder.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/json/__pycache__/encoder.cpython-313.pyc b/Python313_13_x86_Template/Lib/json/__pycache__/encoder.cpython-313.pyc deleted file mode 100644 index 05ed6f3e..00000000 Binary files a/Python313_13_x86_Template/Lib/json/__pycache__/encoder.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/json/__pycache__/scanner.cpython-313.pyc b/Python313_13_x86_Template/Lib/json/__pycache__/scanner.cpython-313.pyc deleted file mode 100644 index 148e3aca..00000000 Binary files a/Python313_13_x86_Template/Lib/json/__pycache__/scanner.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/json/encoder.py b/Python313_13_x86_Template/Lib/json/encoder.py deleted file mode 100644 index 0671500d..00000000 --- a/Python313_13_x86_Template/Lib/json/encoder.py +++ /dev/null @@ -1,446 +0,0 @@ -"""Implementation of JSONEncoder -""" -import re - -try: - from _json import encode_basestring_ascii as c_encode_basestring_ascii -except ImportError: - c_encode_basestring_ascii = None -try: - from _json import encode_basestring as c_encode_basestring -except ImportError: - c_encode_basestring = None -try: - from _json import make_encoder as c_make_encoder -except ImportError: - c_make_encoder = None - -ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') -ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') -HAS_UTF8 = re.compile(b'[\x80-\xff]') -ESCAPE_DCT = { - '\\': '\\\\', - '"': '\\"', - '\b': '\\b', - '\f': '\\f', - '\n': '\\n', - '\r': '\\r', - '\t': '\\t', -} -for i in range(0x20): - ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) - #ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) -del i - -INFINITY = float('inf') - -def py_encode_basestring(s): - """Return a JSON representation of a Python string - - """ - def replace(match): - return ESCAPE_DCT[match.group(0)] - return '"' + ESCAPE.sub(replace, s) + '"' - - -encode_basestring = (c_encode_basestring or py_encode_basestring) - - -def py_encode_basestring_ascii(s): - """Return an ASCII-only JSON representation of a Python string - - """ - def replace(match): - s = match.group(0) - try: - return ESCAPE_DCT[s] - except KeyError: - n = ord(s) - if n < 0x10000: - return '\\u{0:04x}'.format(n) - #return '\\u%04x' % (n,) - else: - # surrogate pair - n -= 0x10000 - s1 = 0xd800 | ((n >> 10) & 0x3ff) - s2 = 0xdc00 | (n & 0x3ff) - return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) - return '"' + ESCAPE_ASCII.sub(replace, s) + '"' - - -encode_basestring_ascii = ( - c_encode_basestring_ascii or py_encode_basestring_ascii) - -class JSONEncoder(object): - """Extensible JSON encoder for Python data structures. - - Supports the following objects and types by default: - - +-------------------+---------------+ - | Python | JSON | - +===================+===============+ - | dict | object | - +-------------------+---------------+ - | list, tuple | array | - +-------------------+---------------+ - | str | string | - +-------------------+---------------+ - | int, float | number | - +-------------------+---------------+ - | True | true | - +-------------------+---------------+ - | False | false | - +-------------------+---------------+ - | None | null | - +-------------------+---------------+ - - To extend this to recognize other objects, subclass and implement a - ``.default()`` method with another method that returns a serializable - object for ``o`` if possible, otherwise it should call the superclass - implementation (to raise ``TypeError``). - - """ - item_separator = ', ' - key_separator = ': ' - def __init__(self, *, skipkeys=False, ensure_ascii=True, - check_circular=True, allow_nan=True, sort_keys=False, - indent=None, separators=None, default=None): - """Constructor for JSONEncoder, with sensible defaults. - - If skipkeys is false, then it is a TypeError to attempt - encoding of keys that are not str, int, float, bool or None. - If skipkeys is True, such items are simply skipped. - - If ensure_ascii is true, the output is guaranteed to be str objects - with all incoming non-ASCII and non-printable characters escaped. - If ensure_ascii is false, the output can contain non-ASCII and - non-printable characters. - - If check_circular is true, then lists, dicts, and custom encoded - objects will be checked for circular references during encoding to - prevent an infinite recursion (which would cause an RecursionError). - Otherwise, no such check takes place. - - If allow_nan is true, then NaN, Infinity, and -Infinity will be - encoded as such. This behavior is not JSON specification compliant, - but is consistent with most JavaScript based encoders and decoders. - Otherwise, it will be a ValueError to encode such floats. - - If sort_keys is true, then the output of dictionaries will be - sorted by key; this is useful for regression tests to ensure - that JSON serializations can be compared on a day-to-day basis. - - If indent is a non-negative integer, then JSON array - elements and object members will be pretty-printed with that - indent level. An indent level of 0 will only insert newlines. - None is the most compact representation. - - If specified, separators should be an (item_separator, - key_separator) tuple. The default is (', ', ': ') if *indent* is - ``None`` and (',', ': ') otherwise. To get the most compact JSON - representation, you should specify (',', ':') to eliminate - whitespace. - - If specified, default is a function that gets called for objects - that can't otherwise be serialized. It should return a JSON - encodable version of the object or raise a ``TypeError``. - - """ - - self.skipkeys = skipkeys - self.ensure_ascii = ensure_ascii - self.check_circular = check_circular - self.allow_nan = allow_nan - self.sort_keys = sort_keys - self.indent = indent - if separators is not None: - self.item_separator, self.key_separator = separators - elif indent is not None: - self.item_separator = ',' - if default is not None: - self.default = default - - def default(self, o): - """Implement this method in a subclass such that it returns - a serializable object for ``o``, or calls the base implementation - (to raise a ``TypeError``). - - For example, to support arbitrary iterators, you could - implement default like this:: - - def default(self, o): - try: - iterable = iter(o) - except TypeError: - pass - else: - return list(iterable) - # Let the base class default method raise the TypeError - return super().default(o) - - """ - raise TypeError(f'Object of type {o.__class__.__name__} ' - f'is not JSON serializable') - - def encode(self, o): - """Return a JSON string representation of a Python data structure. - - >>> from json.encoder import JSONEncoder - >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) - '{"foo": ["bar", "baz"]}' - - """ - # This is for extremely simple cases and benchmarks. - if isinstance(o, str): - if self.ensure_ascii: - return encode_basestring_ascii(o) - else: - return encode_basestring(o) - # This doesn't pass the iterator directly to ''.join() because the - # exceptions aren't as detailed. The list call should be roughly - # equivalent to the PySequence_Fast that ''.join() would do. - chunks = self.iterencode(o, _one_shot=True) - if not isinstance(chunks, (list, tuple)): - chunks = list(chunks) - return ''.join(chunks) - - def iterencode(self, o, _one_shot=False): - """Encode the given object and yield each string - representation as available. - - For example:: - - for chunk in JSONEncoder().iterencode(bigobject): - mysocket.write(chunk) - - """ - if self.check_circular: - markers = {} - else: - markers = None - if self.ensure_ascii: - _encoder = encode_basestring_ascii - else: - _encoder = encode_basestring - - def floatstr(o, allow_nan=self.allow_nan, - _repr=float.__repr__, _inf=INFINITY, _neginf=-INFINITY): - # Check for specials. Note that this type of test is processor - # and/or platform-specific, so do tests which don't depend on the - # internals. - - if o != o: - text = 'NaN' - elif o == _inf: - text = 'Infinity' - elif o == _neginf: - text = '-Infinity' - else: - return _repr(o) - - if not allow_nan: - raise ValueError( - "Out of range float values are not JSON compliant: " + - repr(o)) - - return text - - - if self.indent is None or isinstance(self.indent, str): - indent = self.indent - else: - indent = ' ' * self.indent - if _one_shot and c_make_encoder is not None: - _iterencode = c_make_encoder( - markers, self.default, _encoder, indent, - self.key_separator, self.item_separator, self.sort_keys, - self.skipkeys, self.allow_nan) - else: - _iterencode = _make_iterencode( - markers, self.default, _encoder, indent, floatstr, - self.key_separator, self.item_separator, self.sort_keys, - self.skipkeys, _one_shot) - return _iterencode(o, 0) - -def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, - _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, - ## HACK: hand-optimized bytecode; turn globals into locals - ValueError=ValueError, - dict=dict, - float=float, - id=id, - int=int, - isinstance=isinstance, - list=list, - str=str, - tuple=tuple, - _intstr=int.__repr__, - ): - - def _iterencode_list(lst, _current_indent_level): - if not lst: - yield '[]' - return - if markers is not None: - markerid = id(lst) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = lst - buf = '[' - if _indent is not None: - _current_indent_level += 1 - newline_indent = '\n' + _indent * _current_indent_level - separator = _item_separator + newline_indent - buf += newline_indent - else: - newline_indent = None - separator = _item_separator - first = True - for value in lst: - if first: - first = False - else: - buf = separator - if isinstance(value, str): - yield buf + _encoder(value) - elif value is None: - yield buf + 'null' - elif value is True: - yield buf + 'true' - elif value is False: - yield buf + 'false' - elif isinstance(value, int): - # Subclasses of int/float may override __repr__, but we still - # want to encode them as integers/floats in JSON. One example - # within the standard library is IntEnum. - yield buf + _intstr(value) - elif isinstance(value, float): - # see comment above for int - yield buf + _floatstr(value) - else: - yield buf - if isinstance(value, (list, tuple)): - chunks = _iterencode_list(value, _current_indent_level) - elif isinstance(value, dict): - chunks = _iterencode_dict(value, _current_indent_level) - else: - chunks = _iterencode(value, _current_indent_level) - yield from chunks - if newline_indent is not None: - _current_indent_level -= 1 - yield '\n' + _indent * _current_indent_level - yield ']' - if markers is not None: - del markers[markerid] - - def _iterencode_dict(dct, _current_indent_level): - if not dct: - yield '{}' - return - if markers is not None: - markerid = id(dct) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = dct - yield '{' - if _indent is not None: - _current_indent_level += 1 - newline_indent = '\n' + _indent * _current_indent_level - item_separator = _item_separator + newline_indent - else: - newline_indent = None - item_separator = _item_separator - first = True - if _sort_keys: - items = sorted(dct.items()) - else: - items = dct.items() - for key, value in items: - if isinstance(key, str): - pass - # JavaScript is weakly typed for these, so it makes sense to - # also allow them. Many encoders seem to do something like this. - elif isinstance(key, float): - # see comment for int/float in _make_iterencode - key = _floatstr(key) - elif key is True: - key = 'true' - elif key is False: - key = 'false' - elif key is None: - key = 'null' - elif isinstance(key, int): - # see comment for int/float in _make_iterencode - key = _intstr(key) - elif _skipkeys: - continue - else: - raise TypeError(f'keys must be str, int, float, bool or None, ' - f'not {key.__class__.__name__}') - if first: - first = False - if newline_indent is not None: - yield newline_indent - else: - yield item_separator - yield _encoder(key) - yield _key_separator - if isinstance(value, str): - yield _encoder(value) - elif value is None: - yield 'null' - elif value is True: - yield 'true' - elif value is False: - yield 'false' - elif isinstance(value, int): - # see comment for int/float in _make_iterencode - yield _intstr(value) - elif isinstance(value, float): - # see comment for int/float in _make_iterencode - yield _floatstr(value) - else: - if isinstance(value, (list, tuple)): - chunks = _iterencode_list(value, _current_indent_level) - elif isinstance(value, dict): - chunks = _iterencode_dict(value, _current_indent_level) - else: - chunks = _iterencode(value, _current_indent_level) - yield from chunks - if not first and newline_indent is not None: - _current_indent_level -= 1 - yield '\n' + _indent * _current_indent_level - yield '}' - if markers is not None: - del markers[markerid] - - def _iterencode(o, _current_indent_level): - if isinstance(o, str): - yield _encoder(o) - elif o is None: - yield 'null' - elif o is True: - yield 'true' - elif o is False: - yield 'false' - elif isinstance(o, int): - # see comment for int/float in _make_iterencode - yield _intstr(o) - elif isinstance(o, float): - # see comment for int/float in _make_iterencode - yield _floatstr(o) - elif isinstance(o, (list, tuple)): - yield from _iterencode_list(o, _current_indent_level) - elif isinstance(o, dict): - yield from _iterencode_dict(o, _current_indent_level) - else: - if markers is not None: - markerid = id(o) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = o - o = _default(o) - yield from _iterencode(o, _current_indent_level) - if markers is not None: - del markers[markerid] - return _iterencode diff --git a/Python313_13_x86_Template/Lib/json/tool.py b/Python313_13_x86_Template/Lib/json/tool.py deleted file mode 100644 index fdfc3372..00000000 --- a/Python313_13_x86_Template/Lib/json/tool.py +++ /dev/null @@ -1,89 +0,0 @@ -r"""Command-line tool to validate and pretty-print JSON - -Usage:: - - $ echo '{"json":"obj"}' | python -m json.tool - { - "json": "obj" - } - $ echo '{ 1.2:3.4}' | python -m json.tool - Expecting property name enclosed in double quotes: line 1 column 3 (char 2) - -""" -import argparse -import json -import sys - - -def main(): - prog = 'python -m json.tool' - description = ('A simple command line interface for json module ' - 'to validate and pretty-print JSON objects.') - parser = argparse.ArgumentParser(prog=prog, description=description) - parser.add_argument('infile', nargs='?', - help='a JSON file to be validated or pretty-printed', - default='-') - parser.add_argument('outfile', nargs='?', - help='write the output of infile to outfile', - default=None) - parser.add_argument('--sort-keys', action='store_true', default=False, - help='sort the output of dictionaries alphabetically by key') - parser.add_argument('--no-ensure-ascii', dest='ensure_ascii', action='store_false', - help='disable escaping of non-ASCII characters') - parser.add_argument('--json-lines', action='store_true', default=False, - help='parse input using the JSON Lines format. ' - 'Use with --no-indent or --compact to produce valid JSON Lines output.') - group = parser.add_mutually_exclusive_group() - group.add_argument('--indent', default=4, type=int, - help='separate items with newlines and use this number ' - 'of spaces for indentation') - group.add_argument('--tab', action='store_const', dest='indent', - const='\t', help='separate items with newlines and use ' - 'tabs for indentation') - group.add_argument('--no-indent', action='store_const', dest='indent', - const=None, - help='separate items with spaces rather than newlines') - group.add_argument('--compact', action='store_true', - help='suppress all whitespace separation (most compact)') - options = parser.parse_args() - - dump_args = { - 'sort_keys': options.sort_keys, - 'indent': options.indent, - 'ensure_ascii': options.ensure_ascii, - } - if options.compact: - dump_args['indent'] = None - dump_args['separators'] = ',', ':' - - try: - if options.infile == '-': - infile = sys.stdin - else: - infile = open(options.infile, encoding='utf-8') - try: - if options.json_lines: - objs = (json.loads(line) for line in infile) - else: - objs = (json.load(infile),) - finally: - if infile is not sys.stdin: - infile.close() - - if options.outfile is None: - outfile = sys.stdout - else: - outfile = open(options.outfile, 'w', encoding='utf-8') - with outfile: - for obj in objs: - json.dump(obj, outfile, **dump_args) - outfile.write('\n') - except ValueError as e: - raise SystemExit(e) - - -if __name__ == '__main__': - try: - main() - except BrokenPipeError as exc: - sys.exit(exc.errno) diff --git a/Python313_13_x86_Template/Lib/linecache.py b/Python313_13_x86_Template/Lib/linecache.py deleted file mode 100644 index f2bb0bc9..00000000 --- a/Python313_13_x86_Template/Lib/linecache.py +++ /dev/null @@ -1,236 +0,0 @@ -"""Cache lines from Python source files. - -This is intended to read lines from modules imported -- hence if a filename -is not found, it will look down the module search path for a file by -that name. -""" - -__all__ = ["getline", "clearcache", "checkcache", "lazycache"] - - -# The cache. Maps filenames to either a thunk which will provide source code, -# or a tuple (size, mtime, lines, fullname) once loaded. -cache = {} -_interactive_cache = {} - - -def clearcache(): - """Clear the cache entirely.""" - cache.clear() - - -def getline(filename, lineno, module_globals=None): - """Get a line for a Python source file from the cache. - Update the cache if it doesn't contain an entry for this file already.""" - - lines = getlines(filename, module_globals) - if 1 <= lineno <= len(lines): - return lines[lineno - 1] - return '' - - -def getlines(filename, module_globals=None): - """Get the lines for a Python source file from the cache. - Update the cache if it doesn't contain an entry for this file already.""" - - entry = cache.get(filename, None) - if entry is not None and len(entry) != 1: - return entry[2] - - try: - return updatecache(filename, module_globals) - except MemoryError: - clearcache() - return [] - - -def _getline_from_code(filename, lineno): - lines = _getlines_from_code(filename) - if 1 <= lineno <= len(lines): - return lines[lineno - 1] - return '' - -def _make_key(code): - return (code.co_filename, code.co_qualname, code.co_firstlineno) - -def _getlines_from_code(code): - code_id = _make_key(code) - entry = _interactive_cache.get(code_id, None) - if entry is not None and len(entry) != 1: - return entry[2] - return [] - - -def checkcache(filename=None): - """Discard cache entries that are out of date. - (This is not checked upon each call!)""" - - if filename is None: - # get keys atomically - filenames = cache.copy().keys() - else: - filenames = [filename] - - for filename in filenames: - entry = cache.get(filename, None) - if entry is None or len(entry) == 1: - # lazy cache entry, leave it lazy. - continue - size, mtime, lines, fullname = entry - if mtime is None: - continue # no-op for files loaded via a __loader__ - try: - # This import can fail if the interpreter is shutting down - import os - except ImportError: - return - try: - stat = os.stat(fullname) - except (OSError, ValueError): - cache.pop(filename, None) - continue - if size != stat.st_size or mtime != stat.st_mtime: - cache.pop(filename, None) - - -def updatecache(filename, module_globals=None): - """Update a cache entry and return its list of lines. - If something's wrong, print a message, discard the cache entry, - and return an empty list.""" - - # These imports are not at top level because linecache is in the critical - # path of the interpreter startup and importing os and sys take a lot of time - # and slows down the startup sequence. - try: - import os - import sys - import tokenize - except ImportError: - # These import can fail if the interpreter is shutting down - return [] - - entry = cache.pop(filename, None) - if not filename or (filename.startswith('<') and filename.endswith('>')): - return [] - - fullname = filename - try: - stat = os.stat(fullname) - except OSError: - basename = filename - - # Realise a lazy loader based lookup if there is one - # otherwise try to lookup right now. - lazy_entry = entry if entry is not None and len(entry) == 1 else None - if lazy_entry is None: - lazy_entry = _make_lazycache_entry(filename, module_globals) - if lazy_entry is not None: - try: - data = lazy_entry[0]() - except (ImportError, OSError): - pass - else: - if data is None: - # No luck, the PEP302 loader cannot find the source - # for this module. - return [] - entry = ( - len(data), - None, - [line + '\n' for line in data.splitlines()], - fullname - ) - cache[filename] = entry - return entry[2] - - # Try looking through the module search path, which is only useful - # when handling a relative filename. - if os.path.isabs(filename): - return [] - - for dirname in sys.path: - try: - fullname = os.path.join(dirname, basename) - except (TypeError, AttributeError): - # Not sufficiently string-like to do anything useful with. - continue - try: - stat = os.stat(fullname) - break - except (OSError, ValueError): - pass - else: - return [] - except ValueError: # may be raised by os.stat() - return [] - try: - with tokenize.open(fullname) as fp: - lines = fp.readlines() - except (OSError, UnicodeDecodeError, SyntaxError): - return [] - if not lines: - lines = ['\n'] - elif not lines[-1].endswith('\n'): - lines[-1] += '\n' - size, mtime = stat.st_size, stat.st_mtime - cache[filename] = size, mtime, lines, fullname - return lines - - -def lazycache(filename, module_globals): - """Seed the cache for filename with module_globals. - - The module loader will be asked for the source only when getlines is - called, not immediately. - - If there is an entry in the cache already, it is not altered. - - :return: True if a lazy load is registered in the cache, - otherwise False. To register such a load a module loader with a - get_source method must be found, the filename must be a cacheable - filename, and the filename must not be already cached. - """ - entry = cache.get(filename, None) - if entry is not None: - return len(entry) == 1 - - lazy_entry = _make_lazycache_entry(filename, module_globals) - if lazy_entry is not None: - cache[filename] = lazy_entry - return True - return False - - -def _make_lazycache_entry(filename, module_globals): - if not filename or (filename.startswith('<') and filename.endswith('>')): - return None - # Try for a __loader__, if available - if module_globals and '__name__' in module_globals: - spec = module_globals.get('__spec__') - name = getattr(spec, 'name', None) or module_globals['__name__'] - loader = getattr(spec, 'loader', None) - if loader is None: - loader = module_globals.get('__loader__') - get_source = getattr(loader, 'get_source', None) - - if name and get_source: - def get_lines(name=name, *args, **kwargs): - return get_source(name, *args, **kwargs) - return (get_lines,) - return None - - - -def _register_code(code, string, name): - entry = (len(string), - None, - [line + '\n' for line in string.splitlines()], - name) - stack = [code] - while stack: - code = stack.pop() - for const in code.co_consts: - if isinstance(const, type(code)): - stack.append(const) - key = _make_key(code) - _interactive_cache[key] = entry diff --git a/Python313_13_x86_Template/Lib/locale.py b/Python313_13_x86_Template/Lib/locale.py deleted file mode 100644 index db6d0abb..00000000 --- a/Python313_13_x86_Template/Lib/locale.py +++ /dev/null @@ -1,1778 +0,0 @@ -"""Locale support module. - -The module provides low-level access to the C lib's locale APIs and adds high -level number formatting APIs as well as a locale aliasing engine to complement -these. - -The aliasing engine includes support for many commonly used locale names and -maps them to values suitable for passing to the C lib's setlocale() function. It -also includes default encodings for all supported locale names. - -""" - -import sys -import encodings -import encodings.aliases -import re -import _collections_abc -from builtins import str as _builtin_str -import functools - -# Try importing the _locale module. -# -# If this fails, fall back on a basic 'C' locale emulation. - -# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before -# trying the import. So __all__ is also fiddled at the end of the file. -__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error", - "setlocale", "localeconv", "strcoll", "strxfrm", - "str", "atof", "atoi", "format_string", "currency", - "normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY", - "LC_NUMERIC", "LC_ALL", "CHAR_MAX", "getencoding"] - -def _strcoll(a,b): - """ strcoll(string,string) -> int. - Compares two strings according to the locale. - """ - return (a > b) - (a < b) - -def _strxfrm(s): - """ strxfrm(string) -> string. - Returns a string that behaves for cmp locale-aware. - """ - return s - -try: - - from _locale import * - -except ImportError: - - # Locale emulation - - CHAR_MAX = 127 - LC_ALL = 6 - LC_COLLATE = 3 - LC_CTYPE = 0 - LC_MESSAGES = 5 - LC_MONETARY = 4 - LC_NUMERIC = 1 - LC_TIME = 2 - Error = ValueError - - def localeconv(): - """ localeconv() -> dict. - Returns numeric and monetary locale-specific parameters. - """ - # 'C' locale default values - return {'grouping': [127], - 'currency_symbol': '', - 'n_sign_posn': 127, - 'p_cs_precedes': 127, - 'n_cs_precedes': 127, - 'mon_grouping': [], - 'n_sep_by_space': 127, - 'decimal_point': '.', - 'negative_sign': '', - 'positive_sign': '', - 'p_sep_by_space': 127, - 'int_curr_symbol': '', - 'p_sign_posn': 127, - 'thousands_sep': '', - 'mon_thousands_sep': '', - 'frac_digits': 127, - 'mon_decimal_point': '', - 'int_frac_digits': 127} - - def setlocale(category, value=None): - """ setlocale(integer,string=None) -> string. - Activates/queries locale processing. - """ - if value not in (None, '', 'C'): - raise Error('_locale emulation only supports "C" locale') - return 'C' - -# These may or may not exist in _locale, so be sure to set them. -if 'strxfrm' not in globals(): - strxfrm = _strxfrm -if 'strcoll' not in globals(): - strcoll = _strcoll - - -_localeconv = localeconv - -# With this dict, you can override some items of localeconv's return value. -# This is useful for testing purposes. -_override_localeconv = {} - -@functools.wraps(_localeconv) -def localeconv(): - d = _localeconv() - if _override_localeconv: - d.update(_override_localeconv) - return d - - -### Number formatting APIs - -# Author: Martin von Loewis -# improved by Georg Brandl - -# Iterate over grouping intervals -def _grouping_intervals(grouping): - last_interval = None - for interval in grouping: - # if grouping is -1, we are done - if interval == CHAR_MAX: - return - # 0: re-use last group ad infinitum - if interval == 0: - if last_interval is None: - raise ValueError("invalid grouping") - while True: - yield last_interval - yield interval - last_interval = interval - -#perform the grouping from right to left -def _group(s, monetary=False): - conv = localeconv() - thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep'] - grouping = conv[monetary and 'mon_grouping' or 'grouping'] - if not grouping: - return (s, 0) - if s[-1] == ' ': - stripped = s.rstrip() - right_spaces = s[len(stripped):] - s = stripped - else: - right_spaces = '' - left_spaces = '' - groups = [] - for interval in _grouping_intervals(grouping): - if not s or s[-1] not in "0123456789": - # only non-digit characters remain (sign, spaces) - left_spaces = s - s = '' - break - groups.append(s[-interval:]) - s = s[:-interval] - if s: - groups.append(s) - groups.reverse() - return ( - left_spaces + thousands_sep.join(groups) + right_spaces, - len(thousands_sep) * (len(groups) - 1) - ) - -# Strip a given amount of excess padding from the given string -def _strip_padding(s, amount): - lpos = 0 - while amount and s[lpos] == ' ': - lpos += 1 - amount -= 1 - rpos = len(s) - 1 - while amount and s[rpos] == ' ': - rpos -= 1 - amount -= 1 - return s[lpos:rpos+1] - -_percent_re = re.compile(r'%(?:\((?P.*?)\))?' - r'(?P[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]') - -def _format(percent, value, grouping=False, monetary=False, *additional): - if additional: - formatted = percent % ((value,) + additional) - else: - formatted = percent % value - if percent[-1] in 'eEfFgGdiu': - formatted = _localize(formatted, grouping, monetary) - return formatted - -# Transform formatted as locale number according to the locale settings -def _localize(formatted, grouping=False, monetary=False): - # floats and decimal ints need special action! - if '.' in formatted: - seps = 0 - parts = formatted.split('.') - if grouping: - parts[0], seps = _group(parts[0], monetary=monetary) - decimal_point = localeconv()[monetary and 'mon_decimal_point' - or 'decimal_point'] - formatted = decimal_point.join(parts) - if seps: - formatted = _strip_padding(formatted, seps) - else: - seps = 0 - if grouping: - formatted, seps = _group(formatted, monetary=monetary) - if seps: - formatted = _strip_padding(formatted, seps) - return formatted - -def format_string(f, val, grouping=False, monetary=False): - """Formats a string in the same way that the % formatting would use, - but takes the current locale into account. - - Grouping is applied if the third parameter is true. - Conversion uses monetary thousands separator and grouping strings if - forth parameter monetary is true.""" - percents = list(_percent_re.finditer(f)) - new_f = _percent_re.sub('%s', f) - - if isinstance(val, _collections_abc.Mapping): - new_val = [] - for perc in percents: - if perc.group()[-1]=='%': - new_val.append('%') - else: - new_val.append(_format(perc.group(), val, grouping, monetary)) - else: - if not isinstance(val, tuple): - val = (val,) - new_val = [] - i = 0 - for perc in percents: - if perc.group()[-1]=='%': - new_val.append('%') - else: - starcount = perc.group('modifiers').count('*') - new_val.append(_format(perc.group(), - val[i], - grouping, - monetary, - *val[i+1:i+1+starcount])) - i += (1 + starcount) - val = tuple(new_val) - - return new_f % val - -def currency(val, symbol=True, grouping=False, international=False): - """Formats val according to the currency settings - in the current locale.""" - conv = localeconv() - - # check for illegal values - digits = conv[international and 'int_frac_digits' or 'frac_digits'] - if digits == 127: - raise ValueError("Currency formatting is not possible using " - "the 'C' locale.") - - s = _localize(f'{abs(val):.{digits}f}', grouping, monetary=True) - # '<' and '>' are markers if the sign must be inserted between symbol and value - s = '<' + s + '>' - - if symbol: - smb = conv[international and 'int_curr_symbol' or 'currency_symbol'] - precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes'] - separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space'] - - if precedes: - s = smb + (separated and ' ' or '') + s - else: - if international and smb[-1] == ' ': - smb = smb[:-1] - s = s + (separated and ' ' or '') + smb - - sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn'] - sign = conv[val<0 and 'negative_sign' or 'positive_sign'] - - if sign_pos == 0: - s = '(' + s + ')' - elif sign_pos == 1: - s = sign + s - elif sign_pos == 2: - s = s + sign - elif sign_pos == 3: - s = s.replace('<', sign) - elif sign_pos == 4: - s = s.replace('>', sign) - else: - # the default if nothing specified; - # this should be the most fitting sign position - s = sign + s - - return s.replace('<', '').replace('>', '') - -def str(val): - """Convert float to string, taking the locale into account.""" - return _format("%.12g", val) - -def delocalize(string): - "Parses a string as a normalized number according to the locale settings." - - conv = localeconv() - - #First, get rid of the grouping - ts = conv['thousands_sep'] - if ts: - string = string.replace(ts, '') - - #next, replace the decimal point with a dot - dd = conv['decimal_point'] - if dd: - string = string.replace(dd, '.') - return string - -def localize(string, grouping=False, monetary=False): - """Parses a string as locale number according to the locale settings.""" - return _localize(string, grouping, monetary) - -def atof(string, func=float): - "Parses a string as a float according to the locale settings." - return func(delocalize(string)) - -def atoi(string): - "Converts a string to an integer according to the locale settings." - return int(delocalize(string)) - -def _test(): - setlocale(LC_ALL, "") - #do grouping - s1 = format_string("%d", 123456789,1) - print(s1, "is", atoi(s1)) - #standard formatting - s1 = str(3.14) - print(s1, "is", atof(s1)) - -### Locale name aliasing engine - -# Author: Marc-Andre Lemburg, mal@lemburg.com -# Various tweaks by Fredrik Lundh - -# store away the low-level version of setlocale (it's -# overridden below) -_setlocale = setlocale - -def _replace_encoding(code, encoding): - if '.' in code: - langname = code[:code.index('.')] - else: - langname = code - # Convert the encoding to a C lib compatible encoding string - norm_encoding = encodings.normalize_encoding(encoding) - #print('norm encoding: %r' % norm_encoding) - norm_encoding = encodings.aliases.aliases.get(norm_encoding.lower(), - norm_encoding) - #print('aliased encoding: %r' % norm_encoding) - encoding = norm_encoding - norm_encoding = norm_encoding.lower() - if norm_encoding in locale_encoding_alias: - encoding = locale_encoding_alias[norm_encoding] - else: - norm_encoding = norm_encoding.replace('_', '') - norm_encoding = norm_encoding.replace('-', '') - if norm_encoding in locale_encoding_alias: - encoding = locale_encoding_alias[norm_encoding] - #print('found encoding %r' % encoding) - return langname + '.' + encoding - -def _append_modifier(code, modifier): - if modifier == 'euro': - if '.' not in code: - return code + '.ISO8859-15' - _, _, encoding = code.partition('.') - if encoding in ('ISO8859-15', 'UTF-8'): - return code - if encoding == 'ISO8859-1': - return _replace_encoding(code, 'ISO8859-15') - return code + '@' + modifier - -def normalize(localename): - - """ Returns a normalized locale code for the given locale - name. - - The returned locale code is formatted for use with - setlocale(). - - If normalization fails, the original name is returned - unchanged. - - If the given encoding is not known, the function defaults to - the default encoding for the locale code just like setlocale() - does. - - """ - # Normalize the locale name and extract the encoding and modifier - code = localename.lower() - if ':' in code: - # ':' is sometimes used as encoding delimiter. - code = code.replace(':', '.') - if '@' in code: - code, modifier = code.split('@', 1) - else: - modifier = '' - if '.' in code: - langname, encoding = code.split('.')[:2] - else: - langname = code - encoding = '' - - # First lookup: fullname (possibly with encoding and modifier) - lang_enc = langname - if encoding: - norm_encoding = encoding.replace('-', '') - norm_encoding = norm_encoding.replace('_', '') - lang_enc += '.' + norm_encoding - lookup_name = lang_enc - if modifier: - lookup_name += '@' + modifier - code = locale_alias.get(lookup_name, None) - if code is not None: - return code - #print('first lookup failed') - - if modifier: - # Second try: fullname without modifier (possibly with encoding) - code = locale_alias.get(lang_enc, None) - if code is not None: - #print('lookup without modifier succeeded') - if '@' not in code: - return _append_modifier(code, modifier) - if code.split('@', 1)[1].lower() == modifier: - return code - #print('second lookup failed') - - if encoding: - # Third try: langname (without encoding, possibly with modifier) - lookup_name = langname - if modifier: - lookup_name += '@' + modifier - code = locale_alias.get(lookup_name, None) - if code is not None: - #print('lookup without encoding succeeded') - if '@' not in code: - return _replace_encoding(code, encoding) - code, modifier = code.split('@', 1) - return _replace_encoding(code, encoding) + '@' + modifier - - if modifier: - # Fourth try: langname (without encoding and modifier) - code = locale_alias.get(langname, None) - if code is not None: - #print('lookup without modifier and encoding succeeded') - if '@' not in code: - code = _replace_encoding(code, encoding) - return _append_modifier(code, modifier) - code, defmod = code.split('@', 1) - if defmod.lower() == modifier: - return _replace_encoding(code, encoding) + '@' + defmod - - return localename - -def _parse_localename(localename): - - """ Parses the locale code for localename and returns the - result as tuple (language code, encoding). - - The localename is normalized and passed through the locale - alias engine. A ValueError is raised in case the locale name - cannot be parsed. - - The language code corresponds to RFC 1766. code and encoding - can be None in case the values cannot be determined or are - unknown to this implementation. - - """ - code = normalize(localename) - if '@' in code: - # Deal with locale modifiers - code, modifier = code.split('@', 1) - if modifier == 'euro' and '.' not in code: - # Assume Latin-9 for @euro locales. This is bogus, - # since some systems may use other encodings for these - # locales. Also, we ignore other modifiers. - return code, 'iso-8859-15' - - if '.' in code: - return tuple(code.split('.')[:2]) - elif code == 'C': - return None, None - elif code == 'UTF-8': - # On macOS "LC_CTYPE=UTF-8" is a valid locale setting - # for getting UTF-8 handling for text. - return None, 'UTF-8' - raise ValueError('unknown locale: %s' % localename) - -def _build_localename(localetuple): - - """ Builds a locale code from the given tuple (language code, - encoding). - - No aliasing or normalizing takes place. - - """ - try: - language, encoding = localetuple - - if language is None: - language = 'C' - if encoding is None: - return language - else: - return language + '.' + encoding - except (TypeError, ValueError): - raise TypeError('Locale must be None, a string, or an iterable of ' - 'two strings -- language code, encoding.') from None - -def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')): - - """ Tries to determine the default locale settings and returns - them as tuple (language code, encoding). - - According to POSIX, a program which has not called - setlocale(LC_ALL, "") runs using the portable 'C' locale. - Calling setlocale(LC_ALL, "") lets it use the default locale as - defined by the LANG variable. Since we don't want to interfere - with the current locale setting we thus emulate the behavior - in the way described above. - - To maintain compatibility with other platforms, not only the - LANG variable is tested, but a list of variables given as - envvars parameter. The first found to be defined will be - used. envvars defaults to the search path used in GNU gettext; - it must always contain the variable name 'LANG'. - - Except for the code 'C', the language code corresponds to RFC - 1766. code and encoding can be None in case the values cannot - be determined. - - """ - - import warnings - warnings._deprecated( - "locale.getdefaultlocale", - "{name!r} is deprecated and slated for removal in Python {remove}. " - "Use setlocale(), getencoding() and getlocale() instead.", - remove=(3, 15)) - return _getdefaultlocale(envvars) - - -def _getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')): - try: - # check if it's supported by the _locale module - import _locale - code, encoding = _locale._getdefaultlocale() - except (ImportError, AttributeError): - pass - else: - # make sure the code/encoding values are valid - if sys.platform == "win32" and code and code[:2] == "0x": - # map windows language identifier to language name - code = windows_locale.get(int(code, 0)) - # ...add other platform-specific processing here, if - # necessary... - return code, encoding - - # fall back on POSIX behaviour - import os - lookup = os.environ.get - for variable in envvars: - localename = lookup(variable,None) - if localename: - if variable == 'LANGUAGE': - localename = localename.split(':')[0] - break - else: - localename = 'C' - return _parse_localename(localename) - - -def getlocale(category=LC_CTYPE): - - """ Returns the current setting for the given locale category as - tuple (language code, encoding). - - category may be one of the LC_* value except LC_ALL. It - defaults to LC_CTYPE. - - Except for the code 'C', the language code corresponds to RFC - 1766. code and encoding can be None in case the values cannot - be determined. - - """ - localename = _setlocale(category) - if category == LC_ALL and ';' in localename: - raise TypeError('category LC_ALL is not supported') - return _parse_localename(localename) - -def setlocale(category, locale=None): - - """ Set the locale for the given category. The locale can be - a string, an iterable of two strings (language code and encoding), - or None. - - Iterables are converted to strings using the locale aliasing - engine. Locale strings are passed directly to the C lib. - - category may be given as one of the LC_* values. - - """ - if locale and not isinstance(locale, _builtin_str): - # convert to string - locale = normalize(_build_localename(locale)) - return _setlocale(category, locale) - - -try: - from _locale import getencoding -except ImportError: - # When _locale.getencoding() is missing, locale.getencoding() uses the - # Python filesystem encoding. - def getencoding(): - return sys.getfilesystemencoding() - - -try: - CODESET -except NameError: - def getpreferredencoding(do_setlocale=True): - """Return the charset that the user is likely using.""" - if sys.flags.warn_default_encoding: - import warnings - warnings.warn( - "UTF-8 Mode affects locale.getpreferredencoding(). Consider locale.getencoding() instead.", - EncodingWarning, 2) - if sys.flags.utf8_mode: - return 'utf-8' - return getencoding() -else: - # On Unix, if CODESET is available, use that. - def getpreferredencoding(do_setlocale=True): - """Return the charset that the user is likely using, - according to the system configuration.""" - - if sys.flags.warn_default_encoding: - import warnings - warnings.warn( - "UTF-8 Mode affects locale.getpreferredencoding(). Consider locale.getencoding() instead.", - EncodingWarning, 2) - if sys.flags.utf8_mode: - return 'utf-8' - - if not do_setlocale: - return getencoding() - - old_loc = setlocale(LC_CTYPE) - try: - try: - setlocale(LC_CTYPE, "") - except Error: - pass - return getencoding() - finally: - setlocale(LC_CTYPE, old_loc) - - -### Database -# -# The following data was extracted from the locale.alias file which -# comes with X11 and then hand edited removing the explicit encoding -# definitions and adding some more aliases. The file is usually -# available as /usr/lib/X11/locale/locale.alias. -# - -# -# The local_encoding_alias table maps lowercase encoding alias names -# to C locale encoding names (case-sensitive). Note that normalize() -# first looks up the encoding in the encodings.aliases dictionary and -# then applies this mapping to find the correct C lib name for the -# encoding. -# -locale_encoding_alias = { - - # Mappings for non-standard encoding names used in locale names - '437': 'C', - 'c': 'C', - 'en': 'ISO8859-1', - 'jis': 'JIS7', - 'jis7': 'JIS7', - 'ajec': 'eucJP', - 'koi8c': 'KOI8-C', - 'microsoftcp1251': 'CP1251', - 'microsoftcp1255': 'CP1255', - 'microsoftcp1256': 'CP1256', - '88591': 'ISO8859-1', - '88592': 'ISO8859-2', - '88595': 'ISO8859-5', - '885915': 'ISO8859-15', - - # Mappings from Python codec names to C lib encoding names - 'ascii': 'ISO8859-1', - 'latin_1': 'ISO8859-1', - 'iso8859_1': 'ISO8859-1', - 'iso8859_10': 'ISO8859-10', - 'iso8859_11': 'ISO8859-11', - 'iso8859_13': 'ISO8859-13', - 'iso8859_14': 'ISO8859-14', - 'iso8859_15': 'ISO8859-15', - 'iso8859_16': 'ISO8859-16', - 'iso8859_2': 'ISO8859-2', - 'iso8859_3': 'ISO8859-3', - 'iso8859_4': 'ISO8859-4', - 'iso8859_5': 'ISO8859-5', - 'iso8859_6': 'ISO8859-6', - 'iso8859_7': 'ISO8859-7', - 'iso8859_8': 'ISO8859-8', - 'iso8859_9': 'ISO8859-9', - 'iso2022_jp': 'JIS7', - 'shift_jis': 'SJIS', - 'tactis': 'TACTIS', - 'euc_jp': 'eucJP', - 'euc_kr': 'eucKR', - 'utf_8': 'UTF-8', - 'koi8_r': 'KOI8-R', - 'koi8_t': 'KOI8-T', - 'koi8_u': 'KOI8-U', - 'kz1048': 'RK1048', - 'cp1251': 'CP1251', - 'cp1255': 'CP1255', - 'cp1256': 'CP1256', - - # XXX This list is still incomplete. If you know more - # mappings, please file a bug report. Thanks. -} - -for k, v in sorted(locale_encoding_alias.items()): - k = k.replace('_', '') - locale_encoding_alias.setdefault(k, v) -del k, v - -# -# The locale_alias table maps lowercase alias names to C locale names -# (case-sensitive). Encodings are always separated from the locale -# name using a dot ('.'); they should only be given in case the -# language name is needed to interpret the given encoding alias -# correctly (CJK codes often have this need). -# -# Note that the normalize() function which uses this tables -# removes '_' and '-' characters from the encoding part of the -# locale name before doing the lookup. This saves a lot of -# space in the table. -# -# MAL 2004-12-10: -# Updated alias mapping to most recent locale.alias file -# from X.org distribution using makelocalealias.py. -# -# These are the differences compared to the old mapping (Python 2.4 -# and older): -# -# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' -# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' -# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' -# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' -# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' -# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' -# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1' -# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' -# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' -# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' -# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' -# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' -# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' -# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP' -# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13' -# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13' -# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' -# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' -# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11' -# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312' -# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5' -# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5' -# -# MAL 2008-05-30: -# Updated alias mapping to most recent locale.alias file -# from X.org distribution using makelocalealias.py. -# -# These are the differences compared to the old mapping (Python 2.5 -# and older): -# -# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2' -# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' -# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' -# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2' -# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' -# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' -# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2' -# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' -# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2' -# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' -# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8' -# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' -# -# AP 2010-04-12: -# Updated alias mapping to most recent locale.alias file -# from X.org distribution using makelocalealias.py. -# -# These are the differences compared to the old mapping (Python 2.6.5 -# and older): -# -# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' -# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' -# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' -# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' -# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' -# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' -# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' -# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' -# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin' -# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' -# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin' -# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8' -# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' -# -# SS 2013-12-20: -# Updated alias mapping to most recent locale.alias file -# from X.org distribution using makelocalealias.py. -# -# These are the differences compared to the old mapping (Python 3.3.3 -# and older): -# -# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' -# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' -# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' -# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' -# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' -# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' -# updated 'sd' -> 'sd_IN@devanagari.UTF-8' to 'sd_IN.UTF-8' -# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' -# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8' -# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' -# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' -# -# SS 2014-10-01: -# Updated alias mapping with glibc 2.19 supported locales. -# -# SS 2018-05-05: -# Updated alias mapping with glibc 2.27 supported locales. -# -# These are the differences compared to the old mapping (Python 3.6.5 -# and older): -# -# updated 'ca_es@valencia' -> 'ca_ES.ISO8859-15@valencia' to 'ca_ES.UTF-8@valencia' -# updated 'kk_kz' -> 'kk_KZ.RK1048' to 'kk_KZ.ptcp154' -# updated 'russian' -> 'ru_RU.ISO8859-5' to 'ru_RU.KOI8-R' -# -# SS 2025-02-04: -# Updated alias mapping with glibc 2.41 supported locales and the latest -# X lib alias mapping. -# -# These are the differences compared to the old mapping (Python 3.13.1 -# and older): -# -# updated 'c.utf8' -> 'C.UTF-8' to 'en_US.UTF-8' -# updated 'de_it' -> 'de_IT.ISO8859-1' to 'de_IT.UTF-8' -# removed 'de_li.utf8' -# updated 'en_il' -> 'en_IL.UTF-8' to 'en_IL.ISO8859-1' -# removed 'english.iso88591' -# updated 'es_cu' -> 'es_CU.UTF-8' to 'es_CU.ISO8859-1' -# updated 'russian' -> 'ru_RU.KOI8-R' to 'ru_RU.ISO8859-5' -# updated 'sr@latn' -> 'sr_CS.UTF-8@latin' to 'sr_RS.UTF-8@latin' -# removed 'univ' -# removed 'universal' -# -# SS 2025-06-10: -# Remove 'c.utf8' -> 'en_US.UTF-8' because 'en_US.UTF-8' does not exist -# on all platforms. - -locale_alias = { - 'a3': 'az_AZ.KOI8-C', - 'a3_az': 'az_AZ.KOI8-C', - 'a3_az.koic': 'az_AZ.KOI8-C', - 'aa_dj': 'aa_DJ.ISO8859-1', - 'aa_er': 'aa_ER.UTF-8', - 'aa_et': 'aa_ET.UTF-8', - 'af': 'af_ZA.ISO8859-1', - 'af_za': 'af_ZA.ISO8859-1', - 'agr_pe': 'agr_PE.UTF-8', - 'ak_gh': 'ak_GH.UTF-8', - 'am': 'am_ET.UTF-8', - 'am_et': 'am_ET.UTF-8', - 'american': 'en_US.ISO8859-1', - 'an_es': 'an_ES.ISO8859-15', - 'anp_in': 'anp_IN.UTF-8', - 'ar': 'ar_AA.ISO8859-6', - 'ar_aa': 'ar_AA.ISO8859-6', - 'ar_ae': 'ar_AE.ISO8859-6', - 'ar_bh': 'ar_BH.ISO8859-6', - 'ar_dz': 'ar_DZ.ISO8859-6', - 'ar_eg': 'ar_EG.ISO8859-6', - 'ar_in': 'ar_IN.UTF-8', - 'ar_iq': 'ar_IQ.ISO8859-6', - 'ar_jo': 'ar_JO.ISO8859-6', - 'ar_kw': 'ar_KW.ISO8859-6', - 'ar_lb': 'ar_LB.ISO8859-6', - 'ar_ly': 'ar_LY.ISO8859-6', - 'ar_ma': 'ar_MA.ISO8859-6', - 'ar_om': 'ar_OM.ISO8859-6', - 'ar_qa': 'ar_QA.ISO8859-6', - 'ar_sa': 'ar_SA.ISO8859-6', - 'ar_sd': 'ar_SD.ISO8859-6', - 'ar_ss': 'ar_SS.UTF-8', - 'ar_sy': 'ar_SY.ISO8859-6', - 'ar_tn': 'ar_TN.ISO8859-6', - 'ar_ye': 'ar_YE.ISO8859-6', - 'arabic': 'ar_AA.ISO8859-6', - 'as': 'as_IN.UTF-8', - 'as_in': 'as_IN.UTF-8', - 'ast_es': 'ast_ES.ISO8859-15', - 'ayc_pe': 'ayc_PE.UTF-8', - 'az': 'az_AZ.ISO8859-9E', - 'az_az': 'az_AZ.ISO8859-9E', - 'az_az.iso88599e': 'az_AZ.ISO8859-9E', - 'az_ir': 'az_IR.UTF-8', - 'be': 'be_BY.CP1251', - 'be@latin': 'be_BY.UTF-8@latin', - 'be_bg.utf8': 'bg_BG.UTF-8', - 'be_by': 'be_BY.CP1251', - 'be_by@latin': 'be_BY.UTF-8@latin', - 'bem_zm': 'bem_ZM.UTF-8', - 'ber_dz': 'ber_DZ.UTF-8', - 'ber_ma': 'ber_MA.UTF-8', - 'bg': 'bg_BG.CP1251', - 'bg_bg': 'bg_BG.CP1251', - 'bhb_in.utf8': 'bhb_IN.UTF-8', - 'bho_in': 'bho_IN.UTF-8', - 'bho_np': 'bho_NP.UTF-8', - 'bi_vu': 'bi_VU.UTF-8', - 'bn_bd': 'bn_BD.UTF-8', - 'bn_in': 'bn_IN.UTF-8', - 'bo_cn': 'bo_CN.UTF-8', - 'bo_in': 'bo_IN.UTF-8', - 'bokmal': 'nb_NO.ISO8859-1', - 'bokm\xe5l': 'nb_NO.ISO8859-1', - 'br': 'br_FR.ISO8859-1', - 'br_fr': 'br_FR.ISO8859-1', - 'brx_in': 'brx_IN.UTF-8', - 'bs': 'bs_BA.ISO8859-2', - 'bs_ba': 'bs_BA.ISO8859-2', - 'bulgarian': 'bg_BG.CP1251', - 'byn_er': 'byn_ER.UTF-8', - 'c': 'C', - 'c-french': 'fr_CA.ISO8859-1', - 'c.ascii': 'C', - 'c.en': 'C', - 'c.iso88591': 'en_US.ISO8859-1', - 'c_c': 'C', - 'c_c.c': 'C', - 'ca': 'ca_ES.ISO8859-1', - 'ca_ad': 'ca_AD.ISO8859-1', - 'ca_es': 'ca_ES.ISO8859-1', - 'ca_es@valencia': 'ca_ES.UTF-8@valencia', - 'ca_fr': 'ca_FR.ISO8859-1', - 'ca_it': 'ca_IT.ISO8859-1', - 'catalan': 'ca_ES.ISO8859-1', - 'ce_ru': 'ce_RU.UTF-8', - 'cextend': 'en_US.ISO8859-1', - 'chinese-s': 'zh_CN.eucCN', - 'chinese-t': 'zh_TW.eucTW', - 'chr_us': 'chr_US.UTF-8', - 'ckb_iq': 'ckb_IQ.UTF-8', - 'cmn_tw': 'cmn_TW.UTF-8', - 'crh_ru': 'crh_RU.UTF-8', - 'crh_ua': 'crh_UA.UTF-8', - 'croatian': 'hr_HR.ISO8859-2', - 'cs': 'cs_CZ.ISO8859-2', - 'cs_cs': 'cs_CZ.ISO8859-2', - 'cs_cz': 'cs_CZ.ISO8859-2', - 'csb_pl': 'csb_PL.UTF-8', - 'cv_ru': 'cv_RU.UTF-8', - 'cy': 'cy_GB.ISO8859-1', - 'cy_gb': 'cy_GB.ISO8859-1', - 'cz': 'cs_CZ.ISO8859-2', - 'cz_cz': 'cs_CZ.ISO8859-2', - 'czech': 'cs_CZ.ISO8859-2', - 'da': 'da_DK.ISO8859-1', - 'da_dk': 'da_DK.ISO8859-1', - 'danish': 'da_DK.ISO8859-1', - 'dansk': 'da_DK.ISO8859-1', - 'de': 'de_DE.ISO8859-1', - 'de_at': 'de_AT.ISO8859-1', - 'de_be': 'de_BE.ISO8859-1', - 'de_ch': 'de_CH.ISO8859-1', - 'de_de': 'de_DE.ISO8859-1', - 'de_it': 'de_IT.UTF-8', - 'de_li': 'de_LI.ISO8859-1', - 'de_lu': 'de_LU.ISO8859-1', - 'deutsch': 'de_DE.ISO8859-1', - 'doi_in': 'doi_IN.UTF-8', - 'dsb_de': 'dsb_DE.UTF-8', - 'dutch': 'nl_NL.ISO8859-1', - 'dutch.iso88591': 'nl_BE.ISO8859-1', - 'dv_mv': 'dv_MV.UTF-8', - 'dz_bt': 'dz_BT.UTF-8', - 'ee': 'ee_EE.ISO8859-4', - 'ee_ee': 'ee_EE.ISO8859-4', - 'eesti': 'et_EE.ISO8859-1', - 'el': 'el_GR.ISO8859-7', - 'el_cy': 'el_CY.ISO8859-7', - 'el_gr': 'el_GR.ISO8859-7', - 'el_gr@euro': 'el_GR.ISO8859-15', - 'en': 'en_US.ISO8859-1', - 'en_ag': 'en_AG.UTF-8', - 'en_au': 'en_AU.ISO8859-1', - 'en_be': 'en_BE.ISO8859-1', - 'en_bw': 'en_BW.ISO8859-1', - 'en_ca': 'en_CA.ISO8859-1', - 'en_dk': 'en_DK.ISO8859-1', - 'en_dl.utf8': 'en_DL.UTF-8', - 'en_gb': 'en_GB.ISO8859-1', - 'en_hk': 'en_HK.ISO8859-1', - 'en_ie': 'en_IE.ISO8859-1', - 'en_il': 'en_IL.ISO8859-1', - 'en_in': 'en_IN.ISO8859-1', - 'en_ng': 'en_NG.UTF-8', - 'en_nz': 'en_NZ.ISO8859-1', - 'en_ph': 'en_PH.ISO8859-1', - 'en_sc.utf8': 'en_SC.UTF-8', - 'en_sg': 'en_SG.ISO8859-1', - 'en_uk': 'en_GB.ISO8859-1', - 'en_us': 'en_US.ISO8859-1', - 'en_us@euro@euro': 'en_US.ISO8859-15', - 'en_za': 'en_ZA.ISO8859-1', - 'en_zm': 'en_ZM.UTF-8', - 'en_zw': 'en_ZW.ISO8859-1', - 'en_zw.utf8': 'en_ZS.UTF-8', - 'eng_gb': 'en_GB.ISO8859-1', - 'english': 'en_EN.ISO8859-1', - 'english_uk': 'en_GB.ISO8859-1', - 'english_united-states': 'en_US.ISO8859-1', - 'english_united-states.437': 'C', - 'english_us': 'en_US.ISO8859-1', - 'eo': 'eo_XX.ISO8859-3', - 'eo.utf8': 'eo.UTF-8', - 'eo_eo': 'eo_EO.ISO8859-3', - 'eo_us.utf8': 'eo_US.UTF-8', - 'eo_xx': 'eo_XX.ISO8859-3', - 'es': 'es_ES.ISO8859-1', - 'es_ar': 'es_AR.ISO8859-1', - 'es_bo': 'es_BO.ISO8859-1', - 'es_cl': 'es_CL.ISO8859-1', - 'es_co': 'es_CO.ISO8859-1', - 'es_cr': 'es_CR.ISO8859-1', - 'es_cu': 'es_CU.ISO8859-1', - 'es_do': 'es_DO.ISO8859-1', - 'es_ec': 'es_EC.ISO8859-1', - 'es_es': 'es_ES.ISO8859-1', - 'es_gt': 'es_GT.ISO8859-1', - 'es_hn': 'es_HN.ISO8859-1', - 'es_mx': 'es_MX.ISO8859-1', - 'es_ni': 'es_NI.ISO8859-1', - 'es_pa': 'es_PA.ISO8859-1', - 'es_pe': 'es_PE.ISO8859-1', - 'es_pr': 'es_PR.ISO8859-1', - 'es_py': 'es_PY.ISO8859-1', - 'es_sv': 'es_SV.ISO8859-1', - 'es_us': 'es_US.ISO8859-1', - 'es_uy': 'es_UY.ISO8859-1', - 'es_ve': 'es_VE.ISO8859-1', - 'estonian': 'et_EE.ISO8859-1', - 'et': 'et_EE.ISO8859-15', - 'et_ee': 'et_EE.ISO8859-15', - 'eu': 'eu_ES.ISO8859-1', - 'eu_es': 'eu_ES.ISO8859-1', - 'eu_fr': 'eu_FR.ISO8859-1', - 'fa': 'fa_IR.UTF-8', - 'fa_ir': 'fa_IR.UTF-8', - 'fa_ir.isiri3342': 'fa_IR.ISIRI-3342', - 'ff_sn': 'ff_SN.UTF-8', - 'fi': 'fi_FI.ISO8859-15', - 'fi_fi': 'fi_FI.ISO8859-15', - 'fil_ph': 'fil_PH.UTF-8', - 'finnish': 'fi_FI.ISO8859-1', - 'fo': 'fo_FO.ISO8859-1', - 'fo_fo': 'fo_FO.ISO8859-1', - 'fr': 'fr_FR.ISO8859-1', - 'fr_be': 'fr_BE.ISO8859-1', - 'fr_ca': 'fr_CA.ISO8859-1', - 'fr_ch': 'fr_CH.ISO8859-1', - 'fr_fr': 'fr_FR.ISO8859-1', - 'fr_lu': 'fr_LU.ISO8859-1', - 'fran\xe7ais': 'fr_FR.ISO8859-1', - 'fre_fr': 'fr_FR.ISO8859-1', - 'french': 'fr_FR.ISO8859-1', - 'french.iso88591': 'fr_CH.ISO8859-1', - 'french_france': 'fr_FR.ISO8859-1', - 'fur_it': 'fur_IT.UTF-8', - 'fy_de': 'fy_DE.UTF-8', - 'fy_nl': 'fy_NL.UTF-8', - 'ga': 'ga_IE.ISO8859-1', - 'ga_ie': 'ga_IE.ISO8859-1', - 'galego': 'gl_ES.ISO8859-1', - 'galician': 'gl_ES.ISO8859-1', - 'gbm_in': 'gbm_IN.UTF-8', - 'gd': 'gd_GB.ISO8859-1', - 'gd_gb': 'gd_GB.ISO8859-1', - 'ger_de': 'de_DE.ISO8859-1', - 'german': 'de_DE.ISO8859-1', - 'german.iso88591': 'de_CH.ISO8859-1', - 'german_germany': 'de_DE.ISO8859-1', - 'gez_er': 'gez_ER.UTF-8', - 'gez_et': 'gez_ET.UTF-8', - 'gl': 'gl_ES.ISO8859-1', - 'gl_es': 'gl_ES.ISO8859-1', - 'greek': 'el_GR.ISO8859-7', - 'gu_in': 'gu_IN.UTF-8', - 'gv': 'gv_GB.ISO8859-1', - 'gv_gb': 'gv_GB.ISO8859-1', - 'ha_ng': 'ha_NG.UTF-8', - 'hak_tw': 'hak_TW.UTF-8', - 'he': 'he_IL.ISO8859-8', - 'he_il': 'he_IL.ISO8859-8', - 'hebrew': 'he_IL.ISO8859-8', - 'hi': 'hi_IN.ISCII-DEV', - 'hi_in': 'hi_IN.ISCII-DEV', - 'hi_in.isciidev': 'hi_IN.ISCII-DEV', - 'hif_fj': 'hif_FJ.UTF-8', - 'hne': 'hne_IN.UTF-8', - 'hne_in': 'hne_IN.UTF-8', - 'hr': 'hr_HR.ISO8859-2', - 'hr_hr': 'hr_HR.ISO8859-2', - 'hrvatski': 'hr_HR.ISO8859-2', - 'hsb_de': 'hsb_DE.ISO8859-2', - 'ht_ht': 'ht_HT.UTF-8', - 'hu': 'hu_HU.ISO8859-2', - 'hu_hu': 'hu_HU.ISO8859-2', - 'hungarian': 'hu_HU.ISO8859-2', - 'hy_am': 'hy_AM.UTF-8', - 'hy_am.armscii8': 'hy_AM.ARMSCII_8', - 'ia': 'ia.UTF-8', - 'ia_fr': 'ia_FR.UTF-8', - 'icelandic': 'is_IS.ISO8859-1', - 'id': 'id_ID.ISO8859-1', - 'id_id': 'id_ID.ISO8859-1', - 'ie': 'ie.UTF-8', - 'ig_ng': 'ig_NG.UTF-8', - 'ik_ca': 'ik_CA.UTF-8', - 'in': 'id_ID.ISO8859-1', - 'in_id': 'id_ID.ISO8859-1', - 'is': 'is_IS.ISO8859-1', - 'is_is': 'is_IS.ISO8859-1', - 'iso-8859-1': 'en_US.ISO8859-1', - 'iso-8859-15': 'en_US.ISO8859-15', - 'iso8859-1': 'en_US.ISO8859-1', - 'iso8859-15': 'en_US.ISO8859-15', - 'iso_8859_1': 'en_US.ISO8859-1', - 'iso_8859_15': 'en_US.ISO8859-15', - 'it': 'it_IT.ISO8859-1', - 'it_ch': 'it_CH.ISO8859-1', - 'it_it': 'it_IT.ISO8859-1', - 'italian': 'it_IT.ISO8859-1', - 'iu': 'iu_CA.NUNACOM-8', - 'iu_ca': 'iu_CA.NUNACOM-8', - 'iu_ca.nunacom8': 'iu_CA.NUNACOM-8', - 'iw': 'he_IL.ISO8859-8', - 'iw_il': 'he_IL.ISO8859-8', - 'iw_il.utf8': 'iw_IL.UTF-8', - 'ja': 'ja_JP.eucJP', - 'ja_jp': 'ja_JP.eucJP', - 'ja_jp.euc': 'ja_JP.eucJP', - 'ja_jp.mscode': 'ja_JP.SJIS', - 'ja_jp.pck': 'ja_JP.SJIS', - 'japan': 'ja_JP.eucJP', - 'japanese': 'ja_JP.eucJP', - 'japanese-euc': 'ja_JP.eucJP', - 'japanese.euc': 'ja_JP.eucJP', - 'jp_jp': 'ja_JP.eucJP', - 'ka': 'ka_GE.GEORGIAN-ACADEMY', - 'ka_ge': 'ka_GE.GEORGIAN-ACADEMY', - 'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY', - 'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS', - 'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY', - 'kab_dz': 'kab_DZ.UTF-8', - 'kk_kz': 'kk_KZ.ptcp154', - 'kl': 'kl_GL.ISO8859-1', - 'kl_gl': 'kl_GL.ISO8859-1', - 'km_kh': 'km_KH.UTF-8', - 'kn': 'kn_IN.UTF-8', - 'kn_in': 'kn_IN.UTF-8', - 'ko': 'ko_KR.eucKR', - 'ko_kr': 'ko_KR.eucKR', - 'ko_kr.euc': 'ko_KR.eucKR', - 'kok_in': 'kok_IN.UTF-8', - 'korean': 'ko_KR.eucKR', - 'korean.euc': 'ko_KR.eucKR', - 'ks': 'ks_IN.UTF-8', - 'ks_in': 'ks_IN.UTF-8', - 'ks_in@devanagari.utf8': 'ks_IN.UTF-8@devanagari', - 'ku_tr': 'ku_TR.ISO8859-9', - 'kv_ru': 'kv_RU.UTF-8', - 'kw': 'kw_GB.ISO8859-1', - 'kw_gb': 'kw_GB.ISO8859-1', - 'ky': 'ky_KG.UTF-8', - 'ky_kg': 'ky_KG.UTF-8', - 'lb_lu': 'lb_LU.UTF-8', - 'lg_ug': 'lg_UG.ISO8859-10', - 'li_be': 'li_BE.UTF-8', - 'li_nl': 'li_NL.UTF-8', - 'lij_it': 'lij_IT.UTF-8', - 'lithuanian': 'lt_LT.ISO8859-13', - 'ln_cd': 'ln_CD.UTF-8', - 'lo': 'lo_LA.MULELAO-1', - 'lo_la': 'lo_LA.MULELAO-1', - 'lo_la.cp1133': 'lo_LA.IBM-CP1133', - 'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133', - 'lo_la.mulelao1': 'lo_LA.MULELAO-1', - 'lt': 'lt_LT.ISO8859-13', - 'lt_lt': 'lt_LT.ISO8859-13', - 'ltg_lv.utf8': 'ltg_LV.UTF-8', - 'lv': 'lv_LV.ISO8859-13', - 'lv_lv': 'lv_LV.ISO8859-13', - 'lzh_tw': 'lzh_TW.UTF-8', - 'mag_in': 'mag_IN.UTF-8', - 'mai': 'mai_IN.UTF-8', - 'mai_in': 'mai_IN.UTF-8', - 'mai_np': 'mai_NP.UTF-8', - 'mdf_ru': 'mdf_RU.UTF-8', - 'mfe_mu': 'mfe_MU.UTF-8', - 'mg_mg': 'mg_MG.ISO8859-15', - 'mhr_ru': 'mhr_RU.UTF-8', - 'mi': 'mi_NZ.ISO8859-1', - 'mi_nz': 'mi_NZ.ISO8859-1', - 'miq_ni': 'miq_NI.UTF-8', - 'mjw_in': 'mjw_IN.UTF-8', - 'mk': 'mk_MK.ISO8859-5', - 'mk_mk': 'mk_MK.ISO8859-5', - 'ml': 'ml_IN.UTF-8', - 'ml_in': 'ml_IN.UTF-8', - 'mn_mn': 'mn_MN.UTF-8', - 'mni_in': 'mni_IN.UTF-8', - 'mnw_mm': 'mnw_MM.UTF-8', - 'mr': 'mr_IN.UTF-8', - 'mr_in': 'mr_IN.UTF-8', - 'ms': 'ms_MY.ISO8859-1', - 'ms_my': 'ms_MY.ISO8859-1', - 'mt': 'mt_MT.ISO8859-3', - 'mt_mt': 'mt_MT.ISO8859-3', - 'my_mm': 'my_MM.UTF-8', - 'nan_tw': 'nan_TW.UTF-8', - 'nb': 'nb_NO.ISO8859-1', - 'nb_no': 'nb_NO.ISO8859-1', - 'nds_de': 'nds_DE.UTF-8', - 'nds_nl': 'nds_NL.UTF-8', - 'ne_np': 'ne_NP.UTF-8', - 'nhn_mx': 'nhn_MX.UTF-8', - 'niu_nu': 'niu_NU.UTF-8', - 'niu_nz': 'niu_NZ.UTF-8', - 'nl': 'nl_NL.ISO8859-1', - 'nl_aw': 'nl_AW.UTF-8', - 'nl_be': 'nl_BE.ISO8859-1', - 'nl_nl': 'nl_NL.ISO8859-1', - 'nn': 'nn_NO.ISO8859-1', - 'nn_no': 'nn_NO.ISO8859-1', - 'no': 'no_NO.ISO8859-1', - 'no@nynorsk': 'ny_NO.ISO8859-1', - 'no_no': 'no_NO.ISO8859-1', - 'no_no.iso88591@bokmal': 'no_NO.ISO8859-1', - 'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1', - 'norwegian': 'no_NO.ISO8859-1', - 'nr': 'nr_ZA.ISO8859-1', - 'nr_za': 'nr_ZA.ISO8859-1', - 'nso': 'nso_ZA.ISO8859-15', - 'nso_za': 'nso_ZA.ISO8859-15', - 'ny': 'ny_NO.ISO8859-1', - 'ny_no': 'ny_NO.ISO8859-1', - 'nynorsk': 'nn_NO.ISO8859-1', - 'oc': 'oc_FR.ISO8859-1', - 'oc_fr': 'oc_FR.ISO8859-1', - 'om_et': 'om_ET.UTF-8', - 'om_ke': 'om_KE.ISO8859-1', - 'or': 'or_IN.UTF-8', - 'or_in': 'or_IN.UTF-8', - 'os_ru': 'os_RU.UTF-8', - 'pa': 'pa_IN.UTF-8', - 'pa_in': 'pa_IN.UTF-8', - 'pa_pk': 'pa_PK.UTF-8', - 'pap_an': 'pap_AN.UTF-8', - 'pap_aw': 'pap_AW.UTF-8', - 'pap_cw': 'pap_CW.UTF-8', - 'pd': 'pd_US.ISO8859-1', - 'pd_de': 'pd_DE.ISO8859-1', - 'pd_us': 'pd_US.ISO8859-1', - 'ph': 'ph_PH.ISO8859-1', - 'ph_ph': 'ph_PH.ISO8859-1', - 'pl': 'pl_PL.ISO8859-2', - 'pl_pl': 'pl_PL.ISO8859-2', - 'polish': 'pl_PL.ISO8859-2', - 'portuguese': 'pt_PT.ISO8859-1', - 'portuguese_brazil': 'pt_BR.ISO8859-1', - 'posix': 'C', - 'posix-utf2': 'C', - 'pp': 'pp_AN.ISO8859-1', - 'pp_an': 'pp_AN.ISO8859-1', - 'ps_af': 'ps_AF.UTF-8', - 'pt': 'pt_PT.ISO8859-1', - 'pt_br': 'pt_BR.ISO8859-1', - 'pt_pt': 'pt_PT.ISO8859-1', - 'quz_pe': 'quz_PE.UTF-8', - 'raj_in': 'raj_IN.UTF-8', - 'rif_ma': 'rif_MA.UTF-8', - 'ro': 'ro_RO.ISO8859-2', - 'ro_ro': 'ro_RO.ISO8859-2', - 'romanian': 'ro_RO.ISO8859-2', - 'ru': 'ru_RU.UTF-8', - 'ru_ru': 'ru_RU.UTF-8', - 'ru_ua': 'ru_UA.KOI8-U', - 'rumanian': 'ro_RO.ISO8859-2', - 'russian': 'ru_RU.ISO8859-5', - 'rw': 'rw_RW.ISO8859-1', - 'rw_rw': 'rw_RW.ISO8859-1', - 'sa_in': 'sa_IN.UTF-8', - 'sah_ru': 'sah_RU.UTF-8', - 'sat_in': 'sat_IN.UTF-8', - 'sc_it': 'sc_IT.UTF-8', - 'scn_it': 'scn_IT.UTF-8', - 'sd': 'sd_IN.UTF-8', - 'sd_in': 'sd_IN.UTF-8', - 'sd_in@devanagari.utf8': 'sd_IN.UTF-8@devanagari', - 'sd_pk': 'sd_PK.UTF-8', - 'se_no': 'se_NO.UTF-8', - 'serbocroatian': 'sr_RS.UTF-8@latin', - 'sgs_lt': 'sgs_LT.UTF-8', - 'sh': 'sr_RS.UTF-8@latin', - 'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2', - 'sh_hr': 'sh_HR.ISO8859-2', - 'sh_hr.iso88592': 'hr_HR.ISO8859-2', - 'sh_sp': 'sr_CS.ISO8859-2', - 'sh_yu': 'sr_RS.UTF-8@latin', - 'shn_mm': 'shn_MM.UTF-8', - 'shs_ca': 'shs_CA.UTF-8', - 'si': 'si_LK.UTF-8', - 'si_lk': 'si_LK.UTF-8', - 'sid_et': 'sid_ET.UTF-8', - 'sinhala': 'si_LK.UTF-8', - 'sk': 'sk_SK.ISO8859-2', - 'sk_sk': 'sk_SK.ISO8859-2', - 'sl': 'sl_SI.ISO8859-2', - 'sl_cs': 'sl_CS.ISO8859-2', - 'sl_si': 'sl_SI.ISO8859-2', - 'slovak': 'sk_SK.ISO8859-2', - 'slovene': 'sl_SI.ISO8859-2', - 'slovenian': 'sl_SI.ISO8859-2', - 'sm_ws': 'sm_WS.UTF-8', - 'so_dj': 'so_DJ.ISO8859-1', - 'so_et': 'so_ET.UTF-8', - 'so_ke': 'so_KE.ISO8859-1', - 'so_so': 'so_SO.ISO8859-1', - 'sp': 'sr_CS.ISO8859-5', - 'sp_yu': 'sr_CS.ISO8859-5', - 'spanish': 'es_ES.ISO8859-1', - 'spanish_spain': 'es_ES.ISO8859-1', - 'sq': 'sq_AL.ISO8859-2', - 'sq_al': 'sq_AL.ISO8859-2', - 'sq_mk': 'sq_MK.UTF-8', - 'sr': 'sr_RS.UTF-8', - 'sr@cyrillic': 'sr_RS.UTF-8', - 'sr@latn': 'sr_RS.UTF-8@latin', - 'sr_cs': 'sr_CS.UTF-8', - 'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2', - 'sr_cs@latn': 'sr_CS.UTF-8@latin', - 'sr_me': 'sr_ME.UTF-8', - 'sr_rs': 'sr_RS.UTF-8', - 'sr_rs@latn': 'sr_RS.UTF-8@latin', - 'sr_sp': 'sr_CS.ISO8859-2', - 'sr_yu': 'sr_RS.UTF-8@latin', - 'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251', - 'sr_yu.iso88592': 'sr_CS.ISO8859-2', - 'sr_yu.iso88595': 'sr_CS.ISO8859-5', - 'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5', - 'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251', - 'sr_yu.utf8': 'sr_RS.UTF-8', - 'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8', - 'sr_yu@cyrillic': 'sr_RS.UTF-8', - 'ss': 'ss_ZA.ISO8859-1', - 'ss_za': 'ss_ZA.ISO8859-1', - 'ssy_er': 'ssy_ER.UTF-8', - 'st': 'st_ZA.ISO8859-1', - 'st_za': 'st_ZA.ISO8859-1', - 'su_id': 'su_ID.UTF-8', - 'sv': 'sv_SE.ISO8859-1', - 'sv_fi': 'sv_FI.ISO8859-1', - 'sv_se': 'sv_SE.ISO8859-1', - 'sw_ke': 'sw_KE.UTF-8', - 'sw_tz': 'sw_TZ.UTF-8', - 'swedish': 'sv_SE.ISO8859-1', - 'syr': 'syr.UTF-8', - 'szl_pl': 'szl_PL.UTF-8', - 'ta': 'ta_IN.TSCII-0', - 'ta_in': 'ta_IN.TSCII-0', - 'ta_in.tscii': 'ta_IN.TSCII-0', - 'ta_in.tscii0': 'ta_IN.TSCII-0', - 'ta_lk': 'ta_LK.UTF-8', - 'tcy_in.utf8': 'tcy_IN.UTF-8', - 'te': 'te_IN.UTF-8', - 'te_in': 'te_IN.UTF-8', - 'tg': 'tg_TJ.KOI8-C', - 'tg_tj': 'tg_TJ.KOI8-C', - 'th': 'th_TH.ISO8859-11', - 'th_th': 'th_TH.ISO8859-11', - 'th_th.tactis': 'th_TH.TIS620', - 'th_th.tis620': 'th_TH.TIS620', - 'thai': 'th_TH.ISO8859-11', - 'the_np': 'the_NP.UTF-8', - 'ti_er': 'ti_ER.UTF-8', - 'ti_et': 'ti_ET.UTF-8', - 'tig_er': 'tig_ER.UTF-8', - 'tk_tm': 'tk_TM.UTF-8', - 'tl': 'tl_PH.ISO8859-1', - 'tl_ph': 'tl_PH.ISO8859-1', - 'tn': 'tn_ZA.ISO8859-15', - 'tn_za': 'tn_ZA.ISO8859-15', - 'to_to': 'to_TO.UTF-8', - 'tok': 'tok.UTF-8', - 'tpi_pg': 'tpi_PG.UTF-8', - 'tr': 'tr_TR.ISO8859-9', - 'tr_cy': 'tr_CY.ISO8859-9', - 'tr_tr': 'tr_TR.ISO8859-9', - 'ts': 'ts_ZA.ISO8859-1', - 'ts_za': 'ts_ZA.ISO8859-1', - 'tt': 'tt_RU.TATAR-CYR', - 'tt_ru': 'tt_RU.TATAR-CYR', - 'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR', - 'tt_ru@iqtelif': 'tt_RU.UTF-8@iqtelif', - 'turkish': 'tr_TR.ISO8859-9', - 'ug_cn': 'ug_CN.UTF-8', - 'uk': 'uk_UA.KOI8-U', - 'uk_ua': 'uk_UA.KOI8-U', - 'univ.utf8': 'en_US.UTF-8', - 'universal.utf8@ucs4': 'en_US.UTF-8', - 'unm_us': 'unm_US.UTF-8', - 'ur': 'ur_PK.CP1256', - 'ur_in': 'ur_IN.UTF-8', - 'ur_pk': 'ur_PK.CP1256', - 'uz': 'uz_UZ.UTF-8', - 'uz_uz': 'uz_UZ.UTF-8', - 'uz_uz@cyrillic': 'uz_UZ.UTF-8', - 've': 've_ZA.UTF-8', - 've_za': 've_ZA.UTF-8', - 'vi': 'vi_VN.TCVN', - 'vi_vn': 'vi_VN.TCVN', - 'vi_vn.tcvn': 'vi_VN.TCVN', - 'vi_vn.tcvn5712': 'vi_VN.TCVN', - 'vi_vn.viscii': 'vi_VN.VISCII', - 'vi_vn.viscii111': 'vi_VN.VISCII', - 'wa': 'wa_BE.ISO8859-1', - 'wa_be': 'wa_BE.ISO8859-1', - 'wae_ch': 'wae_CH.UTF-8', - 'wal_et': 'wal_ET.UTF-8', - 'wo_sn': 'wo_SN.UTF-8', - 'xh': 'xh_ZA.ISO8859-1', - 'xh_za': 'xh_ZA.ISO8859-1', - 'yi': 'yi_US.CP1255', - 'yi_us': 'yi_US.CP1255', - 'yo_ng': 'yo_NG.UTF-8', - 'yue_hk': 'yue_HK.UTF-8', - 'yuw_pg': 'yuw_PG.UTF-8', - 'zgh_ma': 'zgh_MA.UTF-8', - 'zh': 'zh_CN.eucCN', - 'zh_cn': 'zh_CN.gb2312', - 'zh_cn.big5': 'zh_TW.big5', - 'zh_cn.euc': 'zh_CN.eucCN', - 'zh_hk': 'zh_HK.big5hkscs', - 'zh_hk.big5hk': 'zh_HK.big5hkscs', - 'zh_sg': 'zh_SG.GB2312', - 'zh_sg.gbk': 'zh_SG.GBK', - 'zh_tw': 'zh_TW.big5', - 'zh_tw.euc': 'zh_TW.eucTW', - 'zh_tw.euctw': 'zh_TW.eucTW', - 'zu': 'zu_ZA.ISO8859-1', - 'zu_za': 'zu_ZA.ISO8859-1', -} - -# -# This maps Windows language identifiers to locale strings. -# -# This list has been updated from -# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp -# to include every locale up to Windows Vista. -# -# NOTE: this mapping is incomplete. If your language is missing, please -# submit a bug report as detailed in the Python devguide at: -# https://devguide.python.org/triage/issue-tracker/ -# Make sure you include the missing language identifier and the suggested -# locale code. -# - -windows_locale = { - 0x0436: "af_ZA", # Afrikaans - 0x041c: "sq_AL", # Albanian - 0x0484: "gsw_FR",# Alsatian - France - 0x045e: "am_ET", # Amharic - Ethiopia - 0x0401: "ar_SA", # Arabic - Saudi Arabia - 0x0801: "ar_IQ", # Arabic - Iraq - 0x0c01: "ar_EG", # Arabic - Egypt - 0x1001: "ar_LY", # Arabic - Libya - 0x1401: "ar_DZ", # Arabic - Algeria - 0x1801: "ar_MA", # Arabic - Morocco - 0x1c01: "ar_TN", # Arabic - Tunisia - 0x2001: "ar_OM", # Arabic - Oman - 0x2401: "ar_YE", # Arabic - Yemen - 0x2801: "ar_SY", # Arabic - Syria - 0x2c01: "ar_JO", # Arabic - Jordan - 0x3001: "ar_LB", # Arabic - Lebanon - 0x3401: "ar_KW", # Arabic - Kuwait - 0x3801: "ar_AE", # Arabic - United Arab Emirates - 0x3c01: "ar_BH", # Arabic - Bahrain - 0x4001: "ar_QA", # Arabic - Qatar - 0x042b: "hy_AM", # Armenian - 0x044d: "as_IN", # Assamese - India - 0x042c: "az_AZ", # Azeri - Latin - 0x082c: "az_AZ", # Azeri - Cyrillic - 0x046d: "ba_RU", # Bashkir - 0x042d: "eu_ES", # Basque - Russia - 0x0423: "be_BY", # Belarusian - 0x0445: "bn_IN", # Begali - 0x201a: "bs_BA", # Bosnian - Cyrillic - 0x141a: "bs_BA", # Bosnian - Latin - 0x047e: "br_FR", # Breton - France - 0x0402: "bg_BG", # Bulgarian -# 0x0455: "my_MM", # Burmese - Not supported - 0x0403: "ca_ES", # Catalan - 0x0004: "zh_CHS",# Chinese - Simplified - 0x0404: "zh_TW", # Chinese - Taiwan - 0x0804: "zh_CN", # Chinese - PRC - 0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R. - 0x1004: "zh_SG", # Chinese - Singapore - 0x1404: "zh_MO", # Chinese - Macao S.A.R. - 0x7c04: "zh_CHT",# Chinese - Traditional - 0x0483: "co_FR", # Corsican - France - 0x041a: "hr_HR", # Croatian - 0x101a: "hr_BA", # Croatian - Bosnia - 0x0405: "cs_CZ", # Czech - 0x0406: "da_DK", # Danish - 0x048c: "gbz_AF",# Dari - Afghanistan - 0x0465: "div_MV",# Divehi - Maldives - 0x0413: "nl_NL", # Dutch - The Netherlands - 0x0813: "nl_BE", # Dutch - Belgium - 0x0409: "en_US", # English - United States - 0x0809: "en_GB", # English - United Kingdom - 0x0c09: "en_AU", # English - Australia - 0x1009: "en_CA", # English - Canada - 0x1409: "en_NZ", # English - New Zealand - 0x1809: "en_IE", # English - Ireland - 0x1c09: "en_ZA", # English - South Africa - 0x2009: "en_JA", # English - Jamaica - 0x2409: "en_CB", # English - Caribbean - 0x2809: "en_BZ", # English - Belize - 0x2c09: "en_TT", # English - Trinidad - 0x3009: "en_ZW", # English - Zimbabwe - 0x3409: "en_PH", # English - Philippines - 0x4009: "en_IN", # English - India - 0x4409: "en_MY", # English - Malaysia - 0x4809: "en_IN", # English - Singapore - 0x0425: "et_EE", # Estonian - 0x0438: "fo_FO", # Faroese - 0x0464: "fil_PH",# Filipino - 0x040b: "fi_FI", # Finnish - 0x040c: "fr_FR", # French - France - 0x080c: "fr_BE", # French - Belgium - 0x0c0c: "fr_CA", # French - Canada - 0x100c: "fr_CH", # French - Switzerland - 0x140c: "fr_LU", # French - Luxembourg - 0x180c: "fr_MC", # French - Monaco - 0x0462: "fy_NL", # Frisian - Netherlands - 0x0456: "gl_ES", # Galician - 0x0437: "ka_GE", # Georgian - 0x0407: "de_DE", # German - Germany - 0x0807: "de_CH", # German - Switzerland - 0x0c07: "de_AT", # German - Austria - 0x1007: "de_LU", # German - Luxembourg - 0x1407: "de_LI", # German - Liechtenstein - 0x0408: "el_GR", # Greek - 0x046f: "kl_GL", # Greenlandic - Greenland - 0x0447: "gu_IN", # Gujarati - 0x0468: "ha_NG", # Hausa - Latin - 0x040d: "he_IL", # Hebrew - 0x0439: "hi_IN", # Hindi - 0x040e: "hu_HU", # Hungarian - 0x040f: "is_IS", # Icelandic - 0x0421: "id_ID", # Indonesian - 0x045d: "iu_CA", # Inuktitut - Syllabics - 0x085d: "iu_CA", # Inuktitut - Latin - 0x083c: "ga_IE", # Irish - Ireland - 0x0410: "it_IT", # Italian - Italy - 0x0810: "it_CH", # Italian - Switzerland - 0x0411: "ja_JP", # Japanese - 0x044b: "kn_IN", # Kannada - India - 0x043f: "kk_KZ", # Kazakh - 0x0453: "kh_KH", # Khmer - Cambodia - 0x0486: "qut_GT",# K'iche - Guatemala - 0x0487: "rw_RW", # Kinyarwanda - Rwanda - 0x0457: "kok_IN",# Konkani - 0x0412: "ko_KR", # Korean - 0x0440: "ky_KG", # Kyrgyz - 0x0454: "lo_LA", # Lao - Lao PDR - 0x0426: "lv_LV", # Latvian - 0x0427: "lt_LT", # Lithuanian - 0x082e: "dsb_DE",# Lower Sorbian - Germany - 0x046e: "lb_LU", # Luxembourgish - 0x042f: "mk_MK", # FYROM Macedonian - 0x043e: "ms_MY", # Malay - Malaysia - 0x083e: "ms_BN", # Malay - Brunei Darussalam - 0x044c: "ml_IN", # Malayalam - India - 0x043a: "mt_MT", # Maltese - 0x0481: "mi_NZ", # Maori - 0x047a: "arn_CL",# Mapudungun - 0x044e: "mr_IN", # Marathi - 0x047c: "moh_CA",# Mohawk - Canada - 0x0450: "mn_MN", # Mongolian - Cyrillic - 0x0850: "mn_CN", # Mongolian - PRC - 0x0461: "ne_NP", # Nepali - 0x0414: "nb_NO", # Norwegian - Bokmal - 0x0814: "nn_NO", # Norwegian - Nynorsk - 0x0482: "oc_FR", # Occitan - France - 0x0448: "or_IN", # Oriya - India - 0x0463: "ps_AF", # Pashto - Afghanistan - 0x0429: "fa_IR", # Persian - 0x0415: "pl_PL", # Polish - 0x0416: "pt_BR", # Portuguese - Brazil - 0x0816: "pt_PT", # Portuguese - Portugal - 0x0446: "pa_IN", # Punjabi - 0x046b: "quz_BO",# Quechua (Bolivia) - 0x086b: "quz_EC",# Quechua (Ecuador) - 0x0c6b: "quz_PE",# Quechua (Peru) - 0x0418: "ro_RO", # Romanian - Romania - 0x0417: "rm_CH", # Romansh - 0x0419: "ru_RU", # Russian - 0x243b: "smn_FI",# Sami Finland - 0x103b: "smj_NO",# Sami Norway - 0x143b: "smj_SE",# Sami Sweden - 0x043b: "se_NO", # Sami Northern Norway - 0x083b: "se_SE", # Sami Northern Sweden - 0x0c3b: "se_FI", # Sami Northern Finland - 0x203b: "sms_FI",# Sami Skolt - 0x183b: "sma_NO",# Sami Southern Norway - 0x1c3b: "sma_SE",# Sami Southern Sweden - 0x044f: "sa_IN", # Sanskrit - 0x0c1a: "sr_SP", # Serbian - Cyrillic - 0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic - 0x081a: "sr_SP", # Serbian - Latin - 0x181a: "sr_BA", # Serbian - Bosnia Latin - 0x045b: "si_LK", # Sinhala - Sri Lanka - 0x046c: "ns_ZA", # Northern Sotho - 0x0432: "tn_ZA", # Setswana - Southern Africa - 0x041b: "sk_SK", # Slovak - 0x0424: "sl_SI", # Slovenian - 0x040a: "es_ES", # Spanish - Spain - 0x080a: "es_MX", # Spanish - Mexico - 0x0c0a: "es_ES", # Spanish - Spain (Modern) - 0x100a: "es_GT", # Spanish - Guatemala - 0x140a: "es_CR", # Spanish - Costa Rica - 0x180a: "es_PA", # Spanish - Panama - 0x1c0a: "es_DO", # Spanish - Dominican Republic - 0x200a: "es_VE", # Spanish - Venezuela - 0x240a: "es_CO", # Spanish - Colombia - 0x280a: "es_PE", # Spanish - Peru - 0x2c0a: "es_AR", # Spanish - Argentina - 0x300a: "es_EC", # Spanish - Ecuador - 0x340a: "es_CL", # Spanish - Chile - 0x380a: "es_UR", # Spanish - Uruguay - 0x3c0a: "es_PY", # Spanish - Paraguay - 0x400a: "es_BO", # Spanish - Bolivia - 0x440a: "es_SV", # Spanish - El Salvador - 0x480a: "es_HN", # Spanish - Honduras - 0x4c0a: "es_NI", # Spanish - Nicaragua - 0x500a: "es_PR", # Spanish - Puerto Rico - 0x540a: "es_US", # Spanish - United States -# 0x0430: "", # Sutu - Not supported - 0x0441: "sw_KE", # Swahili - 0x041d: "sv_SE", # Swedish - Sweden - 0x081d: "sv_FI", # Swedish - Finland - 0x045a: "syr_SY",# Syriac - 0x0428: "tg_TJ", # Tajik - Cyrillic - 0x085f: "tmz_DZ",# Tamazight - Latin - 0x0449: "ta_IN", # Tamil - 0x0444: "tt_RU", # Tatar - 0x044a: "te_IN", # Telugu - 0x041e: "th_TH", # Thai - 0x0851: "bo_BT", # Tibetan - Bhutan - 0x0451: "bo_CN", # Tibetan - PRC - 0x041f: "tr_TR", # Turkish - 0x0442: "tk_TM", # Turkmen - Cyrillic - 0x0480: "ug_CN", # Uighur - Arabic - 0x0422: "uk_UA", # Ukrainian - 0x042e: "wen_DE",# Upper Sorbian - Germany - 0x0420: "ur_PK", # Urdu - 0x0820: "ur_IN", # Urdu - India - 0x0443: "uz_UZ", # Uzbek - Latin - 0x0843: "uz_UZ", # Uzbek - Cyrillic - 0x042a: "vi_VN", # Vietnamese - 0x0452: "cy_GB", # Welsh - 0x0488: "wo_SN", # Wolof - Senegal - 0x0434: "xh_ZA", # Xhosa - South Africa - 0x0485: "sah_RU",# Yakut - Cyrillic - 0x0478: "ii_CN", # Yi - PRC - 0x046a: "yo_NG", # Yoruba - Nigeria - 0x0435: "zu_ZA", # Zulu -} - -def _print_locale(): - - """ Test function. - """ - categories = {} - def _init_categories(categories=categories): - for k,v in globals().items(): - if k[:3] == 'LC_': - categories[k] = v - _init_categories() - del categories['LC_ALL'] - - print('Locale defaults as determined by getdefaultlocale():') - print('-'*72) - lang, enc = getdefaultlocale() - print('Language: ', lang or '(undefined)') - print('Encoding: ', enc or '(undefined)') - print() - - print('Locale settings on startup:') - print('-'*72) - for name,category in categories.items(): - print(name, '...') - lang, enc = getlocale(category) - print(' Language: ', lang or '(undefined)') - print(' Encoding: ', enc or '(undefined)') - print() - - try: - setlocale(LC_ALL, "") - except: - print('NOTE:') - print('setlocale(LC_ALL, "") does not support the default locale') - print('given in the OS environment variables.') - else: - print() - print('Locale settings after calling setlocale(LC_ALL, ""):') - print('-'*72) - for name,category in categories.items(): - print(name, '...') - lang, enc = getlocale(category) - print(' Language: ', lang or '(undefined)') - print(' Encoding: ', enc or '(undefined)') - print() - -### - -try: - LC_MESSAGES -except NameError: - pass -else: - __all__.append("LC_MESSAGES") - -if __name__=='__main__': - print('Locale aliasing:') - print() - _print_locale() - print() - print('Number formatting:') - print() - _test() diff --git a/Python313_13_x86_Template/Lib/logging/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/logging/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 77594651..00000000 Binary files a/Python313_13_x86_Template/Lib/logging/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/logging/__pycache__/config.cpython-313.pyc b/Python313_13_x86_Template/Lib/logging/__pycache__/config.cpython-313.pyc deleted file mode 100644 index 1c7cb798..00000000 Binary files a/Python313_13_x86_Template/Lib/logging/__pycache__/config.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/logging/__pycache__/handlers.cpython-313.pyc b/Python313_13_x86_Template/Lib/logging/__pycache__/handlers.cpython-313.pyc deleted file mode 100644 index 41bb9b44..00000000 Binary files a/Python313_13_x86_Template/Lib/logging/__pycache__/handlers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/logging/config.py b/Python313_13_x86_Template/Lib/logging/config.py deleted file mode 100644 index 190b4f92..00000000 --- a/Python313_13_x86_Template/Lib/logging/config.py +++ /dev/null @@ -1,1065 +0,0 @@ -# Copyright 2001-2023 by Vinay Sajip. All Rights Reserved. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose and without fee is hereby granted, -# provided that the above copyright notice appear in all copies and that -# both that copyright notice and this permission notice appear in -# supporting documentation, and that the name of Vinay Sajip -# not be used in advertising or publicity pertaining to distribution -# of the software without specific, written prior permission. -# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING -# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL -# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR -# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER -# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -""" -Configuration functions for the logging package for Python. The core package -is based on PEP 282 and comments thereto in comp.lang.python, and influenced -by Apache's log4j system. - -Copyright (C) 2001-2022 Vinay Sajip. All Rights Reserved. - -To use, simply 'import logging' and log away! -""" - -import errno -import functools -import io -import logging -import logging.handlers -import os -import queue -import re -import struct -import threading -import traceback - -from socketserver import ThreadingTCPServer, StreamRequestHandler - - -DEFAULT_LOGGING_CONFIG_PORT = 9030 - -RESET_ERROR = errno.ECONNRESET - -# -# The following code implements a socket listener for on-the-fly -# reconfiguration of logging. -# -# _listener holds the server object doing the listening -_listener = None - -def fileConfig(fname, defaults=None, disable_existing_loggers=True, encoding=None): - """ - Read the logging configuration from a ConfigParser-format file. - - This can be called several times from an application, allowing an end user - the ability to select from various pre-canned configurations (if the - developer provides a mechanism to present the choices and load the chosen - configuration). - """ - import configparser - - if isinstance(fname, str): - if not os.path.exists(fname): - raise FileNotFoundError(f"{fname} doesn't exist") - elif not os.path.getsize(fname): - raise RuntimeError(f'{fname} is an empty file') - - if isinstance(fname, configparser.RawConfigParser): - cp = fname - else: - try: - cp = configparser.ConfigParser(defaults) - if hasattr(fname, 'readline'): - cp.read_file(fname) - else: - encoding = io.text_encoding(encoding) - cp.read(fname, encoding=encoding) - except configparser.ParsingError as e: - raise RuntimeError(f'{fname} is invalid: {e}') - - formatters = _create_formatters(cp) - - # critical section - with logging._lock: - _clearExistingHandlers() - - # Handlers add themselves to logging._handlers - handlers = _install_handlers(cp, formatters) - _install_loggers(cp, handlers, disable_existing_loggers) - - -def _resolve(name): - """Resolve a dotted name to a global object.""" - name = name.split('.') - used = name.pop(0) - found = __import__(used) - for n in name: - used = used + '.' + n - try: - found = getattr(found, n) - except AttributeError: - __import__(used) - found = getattr(found, n) - return found - -def _strip_spaces(alist): - return map(str.strip, alist) - -def _create_formatters(cp): - """Create and return formatters""" - flist = cp["formatters"]["keys"] - if not len(flist): - return {} - flist = flist.split(",") - flist = _strip_spaces(flist) - formatters = {} - for form in flist: - sectname = "formatter_%s" % form - fs = cp.get(sectname, "format", raw=True, fallback=None) - dfs = cp.get(sectname, "datefmt", raw=True, fallback=None) - stl = cp.get(sectname, "style", raw=True, fallback='%') - defaults = cp.get(sectname, "defaults", raw=True, fallback=None) - - c = logging.Formatter - class_name = cp[sectname].get("class") - if class_name: - c = _resolve(class_name) - - if defaults is not None: - defaults = eval(defaults, vars(logging)) - f = c(fs, dfs, stl, defaults=defaults) - else: - f = c(fs, dfs, stl) - formatters[form] = f - return formatters - - -def _install_handlers(cp, formatters): - """Install and return handlers""" - hlist = cp["handlers"]["keys"] - if not len(hlist): - return {} - hlist = hlist.split(",") - hlist = _strip_spaces(hlist) - handlers = {} - fixups = [] #for inter-handler references - for hand in hlist: - section = cp["handler_%s" % hand] - klass = section["class"] - fmt = section.get("formatter", "") - try: - klass = eval(klass, vars(logging)) - except (AttributeError, NameError): - klass = _resolve(klass) - args = section.get("args", '()') - args = eval(args, vars(logging)) - kwargs = section.get("kwargs", '{}') - kwargs = eval(kwargs, vars(logging)) - h = klass(*args, **kwargs) - h.name = hand - if "level" in section: - level = section["level"] - h.setLevel(level) - if len(fmt): - h.setFormatter(formatters[fmt]) - if issubclass(klass, logging.handlers.MemoryHandler): - target = section.get("target", "") - if len(target): #the target handler may not be loaded yet, so keep for later... - fixups.append((h, target)) - handlers[hand] = h - #now all handlers are loaded, fixup inter-handler references... - for h, t in fixups: - h.setTarget(handlers[t]) - return handlers - -def _handle_existing_loggers(existing, child_loggers, disable_existing): - """ - When (re)configuring logging, handle loggers which were in the previous - configuration but are not in the new configuration. There's no point - deleting them as other threads may continue to hold references to them; - and by disabling them, you stop them doing any logging. - - However, don't disable children of named loggers, as that's probably not - what was intended by the user. Also, allow existing loggers to NOT be - disabled if disable_existing is false. - """ - root = logging.root - for log in existing: - logger = root.manager.loggerDict[log] - if log in child_loggers: - if not isinstance(logger, logging.PlaceHolder): - logger.setLevel(logging.NOTSET) - logger.handlers = [] - logger.propagate = True - else: - logger.disabled = disable_existing - -def _install_loggers(cp, handlers, disable_existing): - """Create and install loggers""" - - # configure the root first - llist = cp["loggers"]["keys"] - llist = llist.split(",") - llist = list(_strip_spaces(llist)) - llist.remove("root") - section = cp["logger_root"] - root = logging.root - log = root - if "level" in section: - level = section["level"] - log.setLevel(level) - for h in root.handlers[:]: - root.removeHandler(h) - hlist = section["handlers"] - if len(hlist): - hlist = hlist.split(",") - hlist = _strip_spaces(hlist) - for hand in hlist: - log.addHandler(handlers[hand]) - - #and now the others... - #we don't want to lose the existing loggers, - #since other threads may have pointers to them. - #existing is set to contain all existing loggers, - #and as we go through the new configuration we - #remove any which are configured. At the end, - #what's left in existing is the set of loggers - #which were in the previous configuration but - #which are not in the new configuration. - existing = list(root.manager.loggerDict.keys()) - #The list needs to be sorted so that we can - #avoid disabling child loggers of explicitly - #named loggers. With a sorted list it is easier - #to find the child loggers. - existing.sort() - #We'll keep the list of existing loggers - #which are children of named loggers here... - child_loggers = [] - #now set up the new ones... - for log in llist: - section = cp["logger_%s" % log] - qn = section["qualname"] - propagate = section.getint("propagate", fallback=1) - logger = logging.getLogger(qn) - if qn in existing: - i = existing.index(qn) + 1 # start with the entry after qn - prefixed = qn + "." - pflen = len(prefixed) - num_existing = len(existing) - while i < num_existing: - if existing[i][:pflen] == prefixed: - child_loggers.append(existing[i]) - i += 1 - existing.remove(qn) - if "level" in section: - level = section["level"] - logger.setLevel(level) - for h in logger.handlers[:]: - logger.removeHandler(h) - logger.propagate = propagate - logger.disabled = 0 - hlist = section["handlers"] - if len(hlist): - hlist = hlist.split(",") - hlist = _strip_spaces(hlist) - for hand in hlist: - logger.addHandler(handlers[hand]) - - #Disable any old loggers. There's no point deleting - #them as other threads may continue to hold references - #and by disabling them, you stop them doing any logging. - #However, don't disable children of named loggers, as that's - #probably not what was intended by the user. - #for log in existing: - # logger = root.manager.loggerDict[log] - # if log in child_loggers: - # logger.level = logging.NOTSET - # logger.handlers = [] - # logger.propagate = 1 - # elif disable_existing_loggers: - # logger.disabled = 1 - _handle_existing_loggers(existing, child_loggers, disable_existing) - - -def _clearExistingHandlers(): - """Clear and close existing handlers""" - logging._handlers.clear() - logging.shutdown(logging._handlerList[:]) - del logging._handlerList[:] - - -IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) - - -def valid_ident(s): - m = IDENTIFIER.match(s) - if not m: - raise ValueError('Not a valid Python identifier: %r' % s) - return True - - -class ConvertingMixin(object): - """For ConvertingXXX's, this mixin class provides common functions""" - - def convert_with_key(self, key, value, replace=True): - result = self.configurator.convert(value) - #If the converted value is different, save for next time - if value is not result: - if replace: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - def convert(self, value): - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - return result - - -# The ConvertingXXX classes are wrappers around standard Python containers, -# and they serve to convert any suitable values in the container. The -# conversion converts base dicts, lists and tuples to their wrapped -# equivalents, whereas strings which match a conversion format are converted -# appropriately. -# -# Each wrapper should have a configurator attribute holding the actual -# configurator to use for conversion. - -class ConvertingDict(dict, ConvertingMixin): - """A converting dictionary wrapper.""" - - def __getitem__(self, key): - value = dict.__getitem__(self, key) - return self.convert_with_key(key, value) - - def get(self, key, default=None): - value = dict.get(self, key, default) - return self.convert_with_key(key, value) - - def pop(self, key, default=None): - value = dict.pop(self, key, default) - return self.convert_with_key(key, value, replace=False) - -class ConvertingList(list, ConvertingMixin): - """A converting list wrapper.""" - def __getitem__(self, key): - value = list.__getitem__(self, key) - return self.convert_with_key(key, value) - - def pop(self, idx=-1): - value = list.pop(self, idx) - return self.convert(value) - -class ConvertingTuple(tuple, ConvertingMixin): - """A converting tuple wrapper.""" - def __getitem__(self, key): - value = tuple.__getitem__(self, key) - # Can't replace a tuple entry. - return self.convert_with_key(key, value, replace=False) - -class BaseConfigurator(object): - """ - The configurator base class which defines some useful defaults. - """ - - CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') - - WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') - DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') - INDEX_PATTERN = re.compile(r'^\[([^\[\]]*)\]\s*') - DIGIT_PATTERN = re.compile(r'^\d+$') - - value_converters = { - 'ext' : 'ext_convert', - 'cfg' : 'cfg_convert', - } - - # We might want to use a different one, e.g. importlib - importer = staticmethod(__import__) - - def __init__(self, config): - self.config = ConvertingDict(config) - self.config.configurator = self - - def resolve(self, s): - """ - Resolve strings to objects using standard import and attribute - syntax. - """ - name = s.split('.') - used = name.pop(0) - try: - found = self.importer(used) - for frag in name: - used += '.' + frag - try: - found = getattr(found, frag) - except AttributeError: - self.importer(used) - found = getattr(found, frag) - return found - except ImportError as e: - v = ValueError('Cannot resolve %r: %s' % (s, e)) - raise v from e - - def ext_convert(self, value): - """Default converter for the ext:// protocol.""" - return self.resolve(value) - - def cfg_convert(self, value): - """Default converter for the cfg:// protocol.""" - rest = value - m = self.WORD_PATTERN.match(rest) - if m is None: - raise ValueError("Unable to convert %r" % value) - else: - rest = rest[m.end():] - d = self.config[m.groups()[0]] - #print d, rest - while rest: - m = self.DOT_PATTERN.match(rest) - if m: - d = d[m.groups()[0]] - else: - m = self.INDEX_PATTERN.match(rest) - if m: - idx = m.groups()[0] - if not self.DIGIT_PATTERN.match(idx): - d = d[idx] - else: - try: - n = int(idx) # try as number first (most likely) - d = d[n] - except TypeError: - d = d[idx] - if m: - rest = rest[m.end():] - else: - raise ValueError('Unable to convert ' - '%r at %r' % (value, rest)) - #rest should be empty - return d - - def convert(self, value): - """ - Convert values to an appropriate type. dicts, lists and tuples are - replaced by their converting alternatives. Strings are checked to - see if they have a conversion format and are converted if they do. - """ - if not isinstance(value, ConvertingDict) and isinstance(value, dict): - value = ConvertingDict(value) - value.configurator = self - elif not isinstance(value, ConvertingList) and isinstance(value, list): - value = ConvertingList(value) - value.configurator = self - elif not isinstance(value, ConvertingTuple) and\ - isinstance(value, tuple) and not hasattr(value, '_fields'): - value = ConvertingTuple(value) - value.configurator = self - elif isinstance(value, str): # str for py3k - m = self.CONVERT_PATTERN.match(value) - if m: - d = m.groupdict() - prefix = d['prefix'] - converter = self.value_converters.get(prefix, None) - if converter: - suffix = d['suffix'] - converter = getattr(self, converter) - value = converter(suffix) - return value - - def configure_custom(self, config): - """Configure an object with a user-supplied factory.""" - c = config.pop('()') - if not callable(c): - c = self.resolve(c) - # Check for valid identifiers - kwargs = {k: config[k] for k in config if (k != '.' and valid_ident(k))} - result = c(**kwargs) - props = config.pop('.', None) - if props: - for name, value in props.items(): - setattr(result, name, value) - return result - - def as_tuple(self, value): - """Utility function which converts lists to tuples.""" - if isinstance(value, list): - value = tuple(value) - return value - -def _is_queue_like_object(obj): - """Check that *obj* implements the Queue API.""" - if isinstance(obj, (queue.Queue, queue.SimpleQueue)): - return True - # defer importing multiprocessing as much as possible - from multiprocessing.queues import Queue as MPQueue - if isinstance(obj, MPQueue): - return True - # Depending on the multiprocessing start context, we cannot create - # a multiprocessing.managers.BaseManager instance 'mm' to get the - # runtime type of mm.Queue() or mm.JoinableQueue() (see gh-119819). - # - # Since we only need an object implementing the Queue API, we only - # do a protocol check, but we do not use typing.runtime_checkable() - # and typing.Protocol to reduce import time (see gh-121723). - # - # Ideally, we would have wanted to simply use strict type checking - # instead of a protocol-based type checking since the latter does - # not check the method signatures. - # - # Note that only 'put_nowait' and 'get' are required by the logging - # queue handler and queue listener (see gh-124653) and that other - # methods are either optional or unused. - minimal_queue_interface = ['put_nowait', 'get'] - return all(callable(getattr(obj, method, None)) - for method in minimal_queue_interface) - -class DictConfigurator(BaseConfigurator): - """ - Configure logging using a dictionary-like object to describe the - configuration. - """ - - def configure(self): - """Do the configuration.""" - - config = self.config - if 'version' not in config: - raise ValueError("dictionary doesn't specify a version") - if config['version'] != 1: - raise ValueError("Unsupported version: %s" % config['version']) - incremental = config.pop('incremental', False) - EMPTY_DICT = {} - with logging._lock: - if incremental: - handlers = config.get('handlers', EMPTY_DICT) - for name in handlers: - if name not in logging._handlers: - raise ValueError('No handler found with ' - 'name %r' % name) - else: - try: - handler = logging._handlers[name] - handler_config = handlers[name] - level = handler_config.get('level', None) - if level: - handler.setLevel(logging._checkLevel(level)) - except Exception as e: - raise ValueError('Unable to configure handler ' - '%r' % name) from e - loggers = config.get('loggers', EMPTY_DICT) - for name in loggers: - try: - self.configure_logger(name, loggers[name], True) - except Exception as e: - raise ValueError('Unable to configure logger ' - '%r' % name) from e - root = config.get('root', None) - if root: - try: - self.configure_root(root, True) - except Exception as e: - raise ValueError('Unable to configure root ' - 'logger') from e - else: - disable_existing = config.pop('disable_existing_loggers', True) - - _clearExistingHandlers() - - # Do formatters first - they don't refer to anything else - formatters = config.get('formatters', EMPTY_DICT) - for name in formatters: - try: - formatters[name] = self.configure_formatter( - formatters[name]) - except Exception as e: - raise ValueError('Unable to configure ' - 'formatter %r' % name) from e - # Next, do filters - they don't refer to anything else, either - filters = config.get('filters', EMPTY_DICT) - for name in filters: - try: - filters[name] = self.configure_filter(filters[name]) - except Exception as e: - raise ValueError('Unable to configure ' - 'filter %r' % name) from e - - # Next, do handlers - they refer to formatters and filters - # As handlers can refer to other handlers, sort the keys - # to allow a deterministic order of configuration - handlers = config.get('handlers', EMPTY_DICT) - deferred = [] - for name in sorted(handlers): - try: - handler = self.configure_handler(handlers[name]) - handler.name = name - handlers[name] = handler - except Exception as e: - if ' not configured yet' in str(e.__cause__): - deferred.append(name) - else: - raise ValueError('Unable to configure handler ' - '%r' % name) from e - - # Now do any that were deferred - for name in deferred: - try: - handler = self.configure_handler(handlers[name]) - handler.name = name - handlers[name] = handler - except Exception as e: - raise ValueError('Unable to configure handler ' - '%r' % name) from e - - # Next, do loggers - they refer to handlers and filters - - #we don't want to lose the existing loggers, - #since other threads may have pointers to them. - #existing is set to contain all existing loggers, - #and as we go through the new configuration we - #remove any which are configured. At the end, - #what's left in existing is the set of loggers - #which were in the previous configuration but - #which are not in the new configuration. - root = logging.root - existing = list(root.manager.loggerDict.keys()) - #The list needs to be sorted so that we can - #avoid disabling child loggers of explicitly - #named loggers. With a sorted list it is easier - #to find the child loggers. - existing.sort() - #We'll keep the list of existing loggers - #which are children of named loggers here... - child_loggers = [] - #now set up the new ones... - loggers = config.get('loggers', EMPTY_DICT) - for name in loggers: - if name in existing: - i = existing.index(name) + 1 # look after name - prefixed = name + "." - pflen = len(prefixed) - num_existing = len(existing) - while i < num_existing: - if existing[i][:pflen] == prefixed: - child_loggers.append(existing[i]) - i += 1 - existing.remove(name) - try: - self.configure_logger(name, loggers[name]) - except Exception as e: - raise ValueError('Unable to configure logger ' - '%r' % name) from e - - #Disable any old loggers. There's no point deleting - #them as other threads may continue to hold references - #and by disabling them, you stop them doing any logging. - #However, don't disable children of named loggers, as that's - #probably not what was intended by the user. - #for log in existing: - # logger = root.manager.loggerDict[log] - # if log in child_loggers: - # logger.level = logging.NOTSET - # logger.handlers = [] - # logger.propagate = True - # elif disable_existing: - # logger.disabled = True - _handle_existing_loggers(existing, child_loggers, - disable_existing) - - # And finally, do the root logger - root = config.get('root', None) - if root: - try: - self.configure_root(root) - except Exception as e: - raise ValueError('Unable to configure root ' - 'logger') from e - - def configure_formatter(self, config): - """Configure a formatter from a dictionary.""" - if '()' in config: - factory = config['()'] # for use in exception handler - try: - result = self.configure_custom(config) - except TypeError as te: - if "'format'" not in str(te): - raise - # logging.Formatter and its subclasses expect the `fmt` - # parameter instead of `format`. Retry passing configuration - # with `fmt`. - config['fmt'] = config.pop('format') - config['()'] = factory - result = self.configure_custom(config) - else: - fmt = config.get('format', None) - dfmt = config.get('datefmt', None) - style = config.get('style', '%') - cname = config.get('class', None) - defaults = config.get('defaults', None) - - if not cname: - c = logging.Formatter - else: - c = _resolve(cname) - - kwargs = {} - - # Add defaults only if it exists. - # Prevents TypeError in custom formatter callables that do not - # accept it. - if defaults is not None: - kwargs['defaults'] = defaults - - # A TypeError would be raised if "validate" key is passed in with a formatter callable - # that does not accept "validate" as a parameter - if 'validate' in config: # if user hasn't mentioned it, the default will be fine - result = c(fmt, dfmt, style, config['validate'], **kwargs) - else: - result = c(fmt, dfmt, style, **kwargs) - - return result - - def configure_filter(self, config): - """Configure a filter from a dictionary.""" - if '()' in config: - result = self.configure_custom(config) - else: - name = config.get('name', '') - result = logging.Filter(name) - return result - - def add_filters(self, filterer, filters): - """Add filters to a filterer from a list of names.""" - for f in filters: - try: - if callable(f) or callable(getattr(f, 'filter', None)): - filter_ = f - else: - filter_ = self.config['filters'][f] - filterer.addFilter(filter_) - except Exception as e: - raise ValueError('Unable to add filter %r' % f) from e - - def _configure_queue_handler(self, klass, **kwargs): - if 'queue' in kwargs: - q = kwargs.pop('queue') - else: - q = queue.Queue() # unbounded - - rhl = kwargs.pop('respect_handler_level', False) - lklass = kwargs.pop('listener', logging.handlers.QueueListener) - handlers = kwargs.pop('handlers', []) - - listener = lklass(q, *handlers, respect_handler_level=rhl) - handler = klass(q, **kwargs) - handler.listener = listener - return handler - - def configure_handler(self, config): - """Configure a handler from a dictionary.""" - config_copy = dict(config) # for restoring in case of error - formatter = config.pop('formatter', None) - if formatter: - try: - formatter = self.config['formatters'][formatter] - except Exception as e: - raise ValueError('Unable to set formatter ' - '%r' % formatter) from e - level = config.pop('level', None) - filters = config.pop('filters', None) - if '()' in config: - c = config.pop('()') - if not callable(c): - c = self.resolve(c) - factory = c - else: - cname = config.pop('class') - if callable(cname): - klass = cname - else: - klass = self.resolve(cname) - if issubclass(klass, logging.handlers.MemoryHandler): - if 'flushLevel' in config: - config['flushLevel'] = logging._checkLevel(config['flushLevel']) - if 'target' in config: - # Special case for handler which refers to another handler - try: - tn = config['target'] - th = self.config['handlers'][tn] - if not isinstance(th, logging.Handler): - config.update(config_copy) # restore for deferred cfg - raise TypeError('target not configured yet') - config['target'] = th - except Exception as e: - raise ValueError('Unable to set target handler %r' % tn) from e - elif issubclass(klass, logging.handlers.QueueHandler): - # Another special case for handler which refers to other handlers - # if 'handlers' not in config: - # raise ValueError('No handlers specified for a QueueHandler') - if 'queue' in config: - qspec = config['queue'] - - if isinstance(qspec, str): - q = self.resolve(qspec) - if not callable(q): - raise TypeError('Invalid queue specifier %r' % qspec) - config['queue'] = q() - elif isinstance(qspec, dict): - if '()' not in qspec: - raise TypeError('Invalid queue specifier %r' % qspec) - config['queue'] = self.configure_custom(dict(qspec)) - elif not _is_queue_like_object(qspec): - raise TypeError('Invalid queue specifier %r' % qspec) - - if 'listener' in config: - lspec = config['listener'] - if isinstance(lspec, type): - if not issubclass(lspec, logging.handlers.QueueListener): - raise TypeError('Invalid listener specifier %r' % lspec) - else: - if isinstance(lspec, str): - listener = self.resolve(lspec) - if isinstance(listener, type) and\ - not issubclass(listener, logging.handlers.QueueListener): - raise TypeError('Invalid listener specifier %r' % lspec) - elif isinstance(lspec, dict): - if '()' not in lspec: - raise TypeError('Invalid listener specifier %r' % lspec) - listener = self.configure_custom(dict(lspec)) - else: - raise TypeError('Invalid listener specifier %r' % lspec) - if not callable(listener): - raise TypeError('Invalid listener specifier %r' % lspec) - config['listener'] = listener - if 'handlers' in config: - hlist = [] - try: - for hn in config['handlers']: - h = self.config['handlers'][hn] - if not isinstance(h, logging.Handler): - config.update(config_copy) # restore for deferred cfg - raise TypeError('Required handler %r ' - 'is not configured yet' % hn) - hlist.append(h) - except Exception as e: - raise ValueError('Unable to set required handler %r' % hn) from e - config['handlers'] = hlist - elif issubclass(klass, logging.handlers.SMTPHandler) and\ - 'mailhost' in config: - config['mailhost'] = self.as_tuple(config['mailhost']) - elif issubclass(klass, logging.handlers.SysLogHandler) and\ - 'address' in config: - config['address'] = self.as_tuple(config['address']) - if issubclass(klass, logging.handlers.QueueHandler): - factory = functools.partial(self._configure_queue_handler, klass) - else: - factory = klass - kwargs = {k: config[k] for k in config if (k != '.' and valid_ident(k))} - try: - result = factory(**kwargs) - except TypeError as te: - if "'stream'" not in str(te): - raise - #The argument name changed from strm to stream - #Retry with old name. - #This is so that code can be used with older Python versions - #(e.g. by Django) - kwargs['strm'] = kwargs.pop('stream') - result = factory(**kwargs) - if formatter: - result.setFormatter(formatter) - if level is not None: - result.setLevel(logging._checkLevel(level)) - if filters: - self.add_filters(result, filters) - props = config.pop('.', None) - if props: - for name, value in props.items(): - setattr(result, name, value) - return result - - def add_handlers(self, logger, handlers): - """Add handlers to a logger from a list of names.""" - for h in handlers: - try: - logger.addHandler(self.config['handlers'][h]) - except Exception as e: - raise ValueError('Unable to add handler %r' % h) from e - - def common_logger_config(self, logger, config, incremental=False): - """ - Perform configuration which is common to root and non-root loggers. - """ - level = config.get('level', None) - if level is not None: - logger.setLevel(logging._checkLevel(level)) - if not incremental: - #Remove any existing handlers - for h in logger.handlers[:]: - logger.removeHandler(h) - handlers = config.get('handlers', None) - if handlers: - self.add_handlers(logger, handlers) - filters = config.get('filters', None) - if filters: - self.add_filters(logger, filters) - - def configure_logger(self, name, config, incremental=False): - """Configure a non-root logger from a dictionary.""" - logger = logging.getLogger(name) - self.common_logger_config(logger, config, incremental) - logger.disabled = False - propagate = config.get('propagate', None) - if propagate is not None: - logger.propagate = propagate - - def configure_root(self, config, incremental=False): - """Configure a root logger from a dictionary.""" - root = logging.getLogger() - self.common_logger_config(root, config, incremental) - -dictConfigClass = DictConfigurator - -def dictConfig(config): - """Configure logging using a dictionary.""" - dictConfigClass(config).configure() - - -def listen(port=DEFAULT_LOGGING_CONFIG_PORT, verify=None): - """ - Start up a socket server on the specified port, and listen for new - configurations. - - These will be sent as a file suitable for processing by fileConfig(). - Returns a Thread object on which you can call start() to start the server, - and which you can join() when appropriate. To stop the server, call - stopListening(). - - Use the ``verify`` argument to verify any bytes received across the wire - from a client. If specified, it should be a callable which receives a - single argument - the bytes of configuration data received across the - network - and it should return either ``None``, to indicate that the - passed in bytes could not be verified and should be discarded, or a - byte string which is then passed to the configuration machinery as - normal. Note that you can return transformed bytes, e.g. by decrypting - the bytes passed in. - """ - - class ConfigStreamHandler(StreamRequestHandler): - """ - Handler for a logging configuration request. - - It expects a completely new logging configuration and uses fileConfig - to install it. - """ - def handle(self): - """ - Handle a request. - - Each request is expected to be a 4-byte length, packed using - struct.pack(">L", n), followed by the config file. - Uses fileConfig() to do the grunt work. - """ - try: - conn = self.connection - chunk = conn.recv(4) - if len(chunk) == 4: - slen = struct.unpack(">L", chunk)[0] - chunk = self.connection.recv(slen) - while len(chunk) < slen: - chunk = chunk + conn.recv(slen - len(chunk)) - if self.server.verify is not None: - chunk = self.server.verify(chunk) - if chunk is not None: # verified, can process - chunk = chunk.decode("utf-8") - try: - import json - d =json.loads(chunk) - assert isinstance(d, dict) - dictConfig(d) - except Exception: - #Apply new configuration. - - file = io.StringIO(chunk) - try: - fileConfig(file) - except Exception: - traceback.print_exc() - if self.server.ready: - self.server.ready.set() - except OSError as e: - if e.errno != RESET_ERROR: - raise - - class ConfigSocketReceiver(ThreadingTCPServer): - """ - A simple TCP socket-based logging config receiver. - """ - - allow_reuse_address = 1 - - def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT, - handler=None, ready=None, verify=None): - ThreadingTCPServer.__init__(self, (host, port), handler) - with logging._lock: - self.abort = 0 - self.timeout = 1 - self.ready = ready - self.verify = verify - - def serve_until_stopped(self): - import select - abort = 0 - while not abort: - rd, wr, ex = select.select([self.socket.fileno()], - [], [], - self.timeout) - if rd: - self.handle_request() - with logging._lock: - abort = self.abort - self.server_close() - - class Server(threading.Thread): - - def __init__(self, rcvr, hdlr, port, verify): - super(Server, self).__init__() - self.rcvr = rcvr - self.hdlr = hdlr - self.port = port - self.verify = verify - self.ready = threading.Event() - - def run(self): - server = self.rcvr(port=self.port, handler=self.hdlr, - ready=self.ready, - verify=self.verify) - if self.port == 0: - self.port = server.server_address[1] - self.ready.set() - global _listener - with logging._lock: - _listener = server - server.serve_until_stopped() - - return Server(ConfigSocketReceiver, ConfigStreamHandler, port, verify) - -def stopListening(): - """ - Stop the listening server which was created with a call to listen(). - """ - global _listener - with logging._lock: - if _listener: - _listener.abort = 1 - _listener = None diff --git a/Python313_13_x86_Template/Lib/logging/handlers.py b/Python313_13_x86_Template/Lib/logging/handlers.py deleted file mode 100644 index 480dbd08..00000000 --- a/Python313_13_x86_Template/Lib/logging/handlers.py +++ /dev/null @@ -1,1629 +0,0 @@ -# Copyright 2001-2021 by Vinay Sajip. All Rights Reserved. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose and without fee is hereby granted, -# provided that the above copyright notice appear in all copies and that -# both that copyright notice and this permission notice appear in -# supporting documentation, and that the name of Vinay Sajip -# not be used in advertising or publicity pertaining to distribution -# of the software without specific, written prior permission. -# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING -# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL -# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR -# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER -# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -""" -Additional handlers for the logging package for Python. The core package is -based on PEP 282 and comments thereto in comp.lang.python. - -Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved. - -To use, simply 'import logging.handlers' and log away! -""" - -import copy -import io -import logging -import os -import pickle -import queue -import re -import socket -import struct -import threading -import time - -# -# Some constants... -# - -DEFAULT_TCP_LOGGING_PORT = 9020 -DEFAULT_UDP_LOGGING_PORT = 9021 -DEFAULT_HTTP_LOGGING_PORT = 9022 -DEFAULT_SOAP_LOGGING_PORT = 9023 -SYSLOG_UDP_PORT = 514 -SYSLOG_TCP_PORT = 514 - -_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day - -class BaseRotatingHandler(logging.FileHandler): - """ - Base class for handlers that rotate log files at a certain point. - Not meant to be instantiated directly. Instead, use RotatingFileHandler - or TimedRotatingFileHandler. - """ - namer = None - rotator = None - - def __init__(self, filename, mode, encoding=None, delay=False, errors=None): - """ - Use the specified filename for streamed logging - """ - logging.FileHandler.__init__(self, filename, mode=mode, - encoding=encoding, delay=delay, - errors=errors) - self.mode = mode - self.encoding = encoding - self.errors = errors - - def emit(self, record): - """ - Emit a record. - - Output the record to the file, catering for rollover as described - in doRollover(). - """ - try: - if self.shouldRollover(record): - self.doRollover() - logging.FileHandler.emit(self, record) - except Exception: - self.handleError(record) - - def rotation_filename(self, default_name): - """ - Modify the filename of a log file when rotating. - - This is provided so that a custom filename can be provided. - - The default implementation calls the 'namer' attribute of the - handler, if it's callable, passing the default name to - it. If the attribute isn't callable (the default is None), the name - is returned unchanged. - - :param default_name: The default name for the log file. - """ - if not callable(self.namer): - result = default_name - else: - result = self.namer(default_name) - return result - - def rotate(self, source, dest): - """ - When rotating, rotate the current log. - - The default implementation calls the 'rotator' attribute of the - handler, if it's callable, passing the source and dest arguments to - it. If the attribute isn't callable (the default is None), the source - is simply renamed to the destination. - - :param source: The source filename. This is normally the base - filename, e.g. 'test.log' - :param dest: The destination filename. This is normally - what the source is rotated to, e.g. 'test.log.1'. - """ - if not callable(self.rotator): - # Issue 18940: A file may not have been created if delay is True. - if os.path.exists(source): - os.rename(source, dest) - else: - self.rotator(source, dest) - -class RotatingFileHandler(BaseRotatingHandler): - """ - Handler for logging to a set of files, which switches from one file - to the next when the current file reaches a certain size. - """ - def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, - encoding=None, delay=False, errors=None): - """ - Open the specified file and use it as the stream for logging. - - By default, the file grows indefinitely. You can specify particular - values of maxBytes and backupCount to allow the file to rollover at - a predetermined size. - - Rollover occurs whenever the current log file is nearly maxBytes in - length. If backupCount is >= 1, the system will successively create - new files with the same pathname as the base file, but with extensions - ".1", ".2" etc. appended to it. For example, with a backupCount of 5 - and a base file name of "app.log", you would get "app.log", - "app.log.1", "app.log.2", ... through to "app.log.5". The file being - written to is always "app.log" - when it gets filled up, it is closed - and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. - exist, then they are renamed to "app.log.2", "app.log.3" etc. - respectively. - - If maxBytes is zero, rollover never occurs. - """ - # If rotation/rollover is wanted, it doesn't make sense to use another - # mode. If for example 'w' were specified, then if there were multiple - # runs of the calling application, the logs from previous runs would be - # lost if the 'w' is respected, because the log file would be truncated - # on each run. - if maxBytes > 0: - mode = 'a' - if "b" not in mode: - encoding = io.text_encoding(encoding) - BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding, - delay=delay, errors=errors) - self.maxBytes = maxBytes - self.backupCount = backupCount - - def doRollover(self): - """ - Do a rollover, as described in __init__(). - """ - if self.stream: - self.stream.close() - self.stream = None - if self.backupCount > 0: - for i in range(self.backupCount - 1, 0, -1): - sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i)) - dfn = self.rotation_filename("%s.%d" % (self.baseFilename, - i + 1)) - if os.path.exists(sfn): - if os.path.exists(dfn): - os.remove(dfn) - os.rename(sfn, dfn) - dfn = self.rotation_filename(self.baseFilename + ".1") - if os.path.exists(dfn): - os.remove(dfn) - self.rotate(self.baseFilename, dfn) - if not self.delay: - self.stream = self._open() - - def shouldRollover(self, record): - """ - Determine if rollover should occur. - - Basically, see if the supplied record would cause the file to exceed - the size limit we have. - """ - if self.stream is None: # delay was set... - self.stream = self._open() - if self.maxBytes > 0: # are we rolling over? - try: - pos = self.stream.tell() - except io.UnsupportedOperation: - # gh-143237: Never rollover a named pipe. - return False - if not pos: - # gh-116263: Never rollover an empty file - return False - msg = "%s\n" % self.format(record) - if pos + len(msg) >= self.maxBytes: - # See bpo-45401: Never rollover anything other than regular files - if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename): - return False - return True - return False - -class TimedRotatingFileHandler(BaseRotatingHandler): - """ - Handler for logging to a file, rotating the log file at certain timed - intervals. - - If backupCount is > 0, when rollover is done, no more than backupCount - files are kept - the oldest ones are deleted. - """ - def __init__(self, filename, when='h', interval=1, backupCount=0, - encoding=None, delay=False, utc=False, atTime=None, - errors=None): - encoding = io.text_encoding(encoding) - BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding, - delay=delay, errors=errors) - self.when = when.upper() - self.backupCount = backupCount - self.utc = utc - self.atTime = atTime - # Calculate the real rollover interval, which is just the number of - # seconds between rollovers. Also set the filename suffix used when - # a rollover occurs. Current 'when' events supported: - # S - Seconds - # M - Minutes - # H - Hours - # D - Days - # midnight - roll over at midnight - # W{0-6} - roll over on a certain day; 0 - Monday - # - # Case of the 'when' specifier is not important; lower or upper case - # will work. - if self.when == 'S': - self.interval = 1 # one second - self.suffix = "%Y-%m-%d_%H-%M-%S" - extMatch = r"(? '6': - raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) - self.dayOfWeek = int(self.when[1]) - self.suffix = "%Y-%m-%d" - extMatch = r"(?= self.rolloverAt: - # See #89564: Never rollover anything other than regular files - if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename): - # The file is not a regular file, so do not rollover, but do - # set the next rollover time to avoid repeated checks. - self.rolloverAt = self.computeRollover(t) - return False - - return True - return False - - def getFilesToDelete(self): - """ - Determine the files to delete when rolling over. - - More specific than the earlier method, which just used glob.glob(). - """ - dirName, baseName = os.path.split(self.baseFilename) - fileNames = os.listdir(dirName) - result = [] - if self.namer is None: - prefix = baseName + '.' - plen = len(prefix) - for fileName in fileNames: - if fileName[:plen] == prefix: - suffix = fileName[plen:] - if self.extMatch.fullmatch(suffix): - result.append(os.path.join(dirName, fileName)) - else: - for fileName in fileNames: - # Our files could be just about anything after custom naming, - # but they should contain the datetime suffix. - # Try to find the datetime suffix in the file name and verify - # that the file name can be generated by this handler. - m = self.extMatch.search(fileName) - while m: - dfn = self.namer(self.baseFilename + "." + m[0]) - if os.path.basename(dfn) == fileName: - result.append(os.path.join(dirName, fileName)) - break - m = self.extMatch.search(fileName, m.start() + 1) - - if len(result) < self.backupCount: - result = [] - else: - result.sort() - result = result[:len(result) - self.backupCount] - return result - - def doRollover(self): - """ - do a rollover; in this case, a date/time stamp is appended to the filename - when the rollover happens. However, you want the file to be named for the - start of the interval, not the current time. If there is a backup count, - then we have to get a list of matching filenames, sort them and remove - the one with the oldest suffix. - """ - # get the time that this sequence started at and make it a TimeTuple - currentTime = int(time.time()) - t = self.rolloverAt - self.interval - if self.utc: - timeTuple = time.gmtime(t) - else: - timeTuple = time.localtime(t) - dstNow = time.localtime(currentTime)[-1] - dstThen = timeTuple[-1] - if dstNow != dstThen: - if dstNow: - addend = 3600 - else: - addend = -3600 - timeTuple = time.localtime(t + addend) - dfn = self.rotation_filename(self.baseFilename + "." + - time.strftime(self.suffix, timeTuple)) - if os.path.exists(dfn): - # Already rolled over. - return - - if self.stream: - self.stream.close() - self.stream = None - self.rotate(self.baseFilename, dfn) - if self.backupCount > 0: - for s in self.getFilesToDelete(): - os.remove(s) - if not self.delay: - self.stream = self._open() - self.rolloverAt = self.computeRollover(currentTime) - -class WatchedFileHandler(logging.FileHandler): - """ - A handler for logging to a file, which watches the file - to see if it has changed while in use. This can happen because of - usage of programs such as newsyslog and logrotate which perform - log file rotation. This handler, intended for use under Unix, - watches the file to see if it has changed since the last emit. - (A file has changed if its device or inode have changed.) - If it has changed, the old file stream is closed, and the file - opened to get a new stream. - - This handler is not appropriate for use under Windows, because - under Windows open files cannot be moved or renamed - logging - opens the files with exclusive locks - and so there is no need - for such a handler. - - This handler is based on a suggestion and patch by Chad J. - Schroeder. - """ - def __init__(self, filename, mode='a', encoding=None, delay=False, - errors=None): - if "b" not in mode: - encoding = io.text_encoding(encoding) - logging.FileHandler.__init__(self, filename, mode=mode, - encoding=encoding, delay=delay, - errors=errors) - self.dev, self.ino = -1, -1 - self._statstream() - - def _statstream(self): - if self.stream is None: - return - sres = os.fstat(self.stream.fileno()) - self.dev = sres.st_dev - self.ino = sres.st_ino - - def reopenIfNeeded(self): - """ - Reopen log file if needed. - - Checks if the underlying file has changed, and if it - has, close the old stream and reopen the file to get the - current stream. - """ - if self.stream is None: - return - - # Reduce the chance of race conditions by stat'ing by path only - # once and then fstat'ing our new fd if we opened a new log stream. - # See issue #14632: Thanks to John Mulligan for the problem report - # and patch. - try: - # stat the file by path, checking for existence - sres = os.stat(self.baseFilename) - - # compare file system stat with that of our stream file handle - reopen = (sres.st_dev != self.dev or sres.st_ino != self.ino) - except FileNotFoundError: - reopen = True - - if not reopen: - return - - # we have an open file handle, clean it up - self.stream.flush() - self.stream.close() - self.stream = None # See Issue #21742: _open () might fail. - - # open a new file handle and get new stat info from that fd - self.stream = self._open() - self._statstream() - - def emit(self, record): - """ - Emit a record. - - If underlying file has changed, reopen the file before emitting the - record to it. - """ - self.reopenIfNeeded() - logging.FileHandler.emit(self, record) - - -class SocketHandler(logging.Handler): - """ - A handler class which writes logging records, in pickle format, to - a streaming socket. The socket is kept open across logging calls. - If the peer resets it, an attempt is made to reconnect on the next call. - The pickle which is sent is that of the LogRecord's attribute dictionary - (__dict__), so that the receiver does not need to have the logging module - installed in order to process the logging event. - - To unpickle the record at the receiving end into a LogRecord, use the - makeLogRecord function. - """ - - def __init__(self, host, port): - """ - Initializes the handler with a specific host address and port. - - When the attribute *closeOnError* is set to True - if a socket error - occurs, the socket is silently closed and then reopened on the next - logging call. - """ - logging.Handler.__init__(self) - self.host = host - self.port = port - if port is None: - self.address = host - else: - self.address = (host, port) - self.sock = None - self.closeOnError = False - self.retryTime = None - # - # Exponential backoff parameters. - # - self.retryStart = 1.0 - self.retryMax = 30.0 - self.retryFactor = 2.0 - - def makeSocket(self, timeout=1): - """ - A factory method which allows subclasses to define the precise - type of socket they want. - """ - if self.port is not None: - result = socket.create_connection(self.address, timeout=timeout) - else: - result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - result.settimeout(timeout) - try: - result.connect(self.address) - except OSError: - result.close() # Issue 19182 - raise - return result - - def createSocket(self): - """ - Try to create a socket, using an exponential backoff with - a max retry time. Thanks to Robert Olson for the original patch - (SF #815911) which has been slightly refactored. - """ - now = time.time() - # Either retryTime is None, in which case this - # is the first time back after a disconnect, or - # we've waited long enough. - if self.retryTime is None: - attempt = True - else: - attempt = (now >= self.retryTime) - if attempt: - try: - self.sock = self.makeSocket() - self.retryTime = None # next time, no delay before trying - except OSError: - #Creation failed, so set the retry time and return. - if self.retryTime is None: - self.retryPeriod = self.retryStart - else: - self.retryPeriod = self.retryPeriod * self.retryFactor - if self.retryPeriod > self.retryMax: - self.retryPeriod = self.retryMax - self.retryTime = now + self.retryPeriod - - def send(self, s): - """ - Send a pickled string to the socket. - - This function allows for partial sends which can happen when the - network is busy. - """ - if self.sock is None: - self.createSocket() - #self.sock can be None either because we haven't reached the retry - #time yet, or because we have reached the retry time and retried, - #but are still unable to connect. - if self.sock: - try: - self.sock.sendall(s) - except OSError: #pragma: no cover - self.sock.close() - self.sock = None # so we can call createSocket next time - - def makePickle(self, record): - """ - Pickles the record in binary format with a length prefix, and - returns it ready for transmission across the socket. - """ - ei = record.exc_info - if ei: - # just to get traceback text into record.exc_text ... - dummy = self.format(record) - # See issue #14436: If msg or args are objects, they may not be - # available on the receiving end. So we convert the msg % args - # to a string, save it as msg and zap the args. - d = dict(record.__dict__) - d['msg'] = record.getMessage() - d['args'] = None - d['exc_info'] = None - # Issue #25685: delete 'message' if present: redundant with 'msg' - d.pop('message', None) - s = pickle.dumps(d, 1) - slen = struct.pack(">L", len(s)) - return slen + s - - def handleError(self, record): - """ - Handle an error during logging. - - An error has occurred during logging. Most likely cause - - connection lost. Close the socket so that we can retry on the - next event. - """ - if self.closeOnError and self.sock: - self.sock.close() - self.sock = None #try to reconnect next time - else: - logging.Handler.handleError(self, record) - - def emit(self, record): - """ - Emit a record. - - Pickles the record and writes it to the socket in binary format. - If there is an error with the socket, silently drop the packet. - If there was a problem with the socket, re-establishes the - socket. - """ - try: - s = self.makePickle(record) - self.send(s) - except Exception: - self.handleError(record) - - def close(self): - """ - Closes the socket. - """ - with self.lock: - sock = self.sock - if sock: - self.sock = None - sock.close() - logging.Handler.close(self) - -class DatagramHandler(SocketHandler): - """ - A handler class which writes logging records, in pickle format, to - a datagram socket. The pickle which is sent is that of the LogRecord's - attribute dictionary (__dict__), so that the receiver does not need to - have the logging module installed in order to process the logging event. - - To unpickle the record at the receiving end into a LogRecord, use the - makeLogRecord function. - - """ - def __init__(self, host, port): - """ - Initializes the handler with a specific host address and port. - """ - SocketHandler.__init__(self, host, port) - self.closeOnError = False - - def makeSocket(self): - """ - The factory method of SocketHandler is here overridden to create - a UDP socket (SOCK_DGRAM). - """ - if self.port is None: - family = socket.AF_UNIX - else: - family = socket.AF_INET - s = socket.socket(family, socket.SOCK_DGRAM) - return s - - def send(self, s): - """ - Send a pickled string to a socket. - - This function no longer allows for partial sends which can happen - when the network is busy - UDP does not guarantee delivery and - can deliver packets out of sequence. - """ - if self.sock is None: - self.createSocket() - self.sock.sendto(s, self.address) - -class SysLogHandler(logging.Handler): - """ - A handler class which sends formatted logging records to a syslog - server. Based on Sam Rushing's syslog module: - http://www.nightmare.com/squirl/python-ext/misc/syslog.py - Contributed by Nicolas Untz (after which minor refactoring changes - have been made). - """ - - # from : - # ====================================================================== - # priorities/facilities are encoded into a single 32-bit quantity, where - # the bottom 3 bits are the priority (0-7) and the top 28 bits are the - # facility (0-big number). Both the priorities and the facilities map - # roughly one-to-one to strings in the syslogd(8) source code. This - # mapping is included in this file. - # - # priorities (these are ordered) - - LOG_EMERG = 0 # system is unusable - LOG_ALERT = 1 # action must be taken immediately - LOG_CRIT = 2 # critical conditions - LOG_ERR = 3 # error conditions - LOG_WARNING = 4 # warning conditions - LOG_NOTICE = 5 # normal but significant condition - LOG_INFO = 6 # informational - LOG_DEBUG = 7 # debug-level messages - - # facility codes - LOG_KERN = 0 # kernel messages - LOG_USER = 1 # random user-level messages - LOG_MAIL = 2 # mail system - LOG_DAEMON = 3 # system daemons - LOG_AUTH = 4 # security/authorization messages - LOG_SYSLOG = 5 # messages generated internally by syslogd - LOG_LPR = 6 # line printer subsystem - LOG_NEWS = 7 # network news subsystem - LOG_UUCP = 8 # UUCP subsystem - LOG_CRON = 9 # clock daemon - LOG_AUTHPRIV = 10 # security/authorization messages (private) - LOG_FTP = 11 # FTP daemon - LOG_NTP = 12 # NTP subsystem - LOG_SECURITY = 13 # Log audit - LOG_CONSOLE = 14 # Log alert - LOG_SOLCRON = 15 # Scheduling daemon (Solaris) - - # other codes through 15 reserved for system use - LOG_LOCAL0 = 16 # reserved for local use - LOG_LOCAL1 = 17 # reserved for local use - LOG_LOCAL2 = 18 # reserved for local use - LOG_LOCAL3 = 19 # reserved for local use - LOG_LOCAL4 = 20 # reserved for local use - LOG_LOCAL5 = 21 # reserved for local use - LOG_LOCAL6 = 22 # reserved for local use - LOG_LOCAL7 = 23 # reserved for local use - - priority_names = { - "alert": LOG_ALERT, - "crit": LOG_CRIT, - "critical": LOG_CRIT, - "debug": LOG_DEBUG, - "emerg": LOG_EMERG, - "err": LOG_ERR, - "error": LOG_ERR, # DEPRECATED - "info": LOG_INFO, - "notice": LOG_NOTICE, - "panic": LOG_EMERG, # DEPRECATED - "warn": LOG_WARNING, # DEPRECATED - "warning": LOG_WARNING, - } - - facility_names = { - "auth": LOG_AUTH, - "authpriv": LOG_AUTHPRIV, - "console": LOG_CONSOLE, - "cron": LOG_CRON, - "daemon": LOG_DAEMON, - "ftp": LOG_FTP, - "kern": LOG_KERN, - "lpr": LOG_LPR, - "mail": LOG_MAIL, - "news": LOG_NEWS, - "ntp": LOG_NTP, - "security": LOG_SECURITY, - "solaris-cron": LOG_SOLCRON, - "syslog": LOG_SYSLOG, - "user": LOG_USER, - "uucp": LOG_UUCP, - "local0": LOG_LOCAL0, - "local1": LOG_LOCAL1, - "local2": LOG_LOCAL2, - "local3": LOG_LOCAL3, - "local4": LOG_LOCAL4, - "local5": LOG_LOCAL5, - "local6": LOG_LOCAL6, - "local7": LOG_LOCAL7, - } - - # Originally added to work around GH-43683. Unnecessary since GH-50043 but kept - # for backwards compatibility. - priority_map = { - "DEBUG" : "debug", - "INFO" : "info", - "WARNING" : "warning", - "ERROR" : "error", - "CRITICAL" : "critical" - } - - def __init__(self, address=('localhost', SYSLOG_UDP_PORT), - facility=LOG_USER, socktype=None): - """ - Initialize a handler. - - If address is specified as a string, a UNIX socket is used. To log to a - local syslogd, "SysLogHandler(address="/dev/log")" can be used. - If facility is not specified, LOG_USER is used. If socktype is - specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific - socket type will be used. For Unix sockets, you can also specify a - socktype of None, in which case socket.SOCK_DGRAM will be used, falling - back to socket.SOCK_STREAM. - """ - logging.Handler.__init__(self) - - self.address = address - self.facility = facility - self.socktype = socktype - self.socket = None - self.createSocket() - - def _connect_unixsocket(self, address): - use_socktype = self.socktype - if use_socktype is None: - use_socktype = socket.SOCK_DGRAM - self.socket = socket.socket(socket.AF_UNIX, use_socktype) - try: - self.socket.connect(address) - # it worked, so set self.socktype to the used type - self.socktype = use_socktype - except OSError: - self.socket.close() - if self.socktype is not None: - # user didn't specify falling back, so fail - raise - use_socktype = socket.SOCK_STREAM - self.socket = socket.socket(socket.AF_UNIX, use_socktype) - try: - self.socket.connect(address) - # it worked, so set self.socktype to the used type - self.socktype = use_socktype - except OSError: - self.socket.close() - raise - - def createSocket(self): - """ - Try to create a socket and, if it's not a datagram socket, connect it - to the other end. This method is called during handler initialization, - but it's not regarded as an error if the other end isn't listening yet - --- the method will be called again when emitting an event, - if there is no socket at that point. - """ - address = self.address - socktype = self.socktype - - if isinstance(address, str): - self.unixsocket = True - # Syslog server may be unavailable during handler initialisation. - # C's openlog() function also ignores connection errors. - # Moreover, we ignore these errors while logging, so it's not worse - # to ignore it also here. - try: - self._connect_unixsocket(address) - except OSError: - pass - else: - self.unixsocket = False - if socktype is None: - socktype = socket.SOCK_DGRAM - host, port = address - ress = socket.getaddrinfo(host, port, 0, socktype) - if not ress: - raise OSError("getaddrinfo returns an empty list") - for res in ress: - af, socktype, proto, _, sa = res - err = sock = None - try: - sock = socket.socket(af, socktype, proto) - if socktype == socket.SOCK_STREAM: - sock.connect(sa) - break - except OSError as exc: - err = exc - if sock is not None: - sock.close() - if err is not None: - raise err - self.socket = sock - self.socktype = socktype - - def encodePriority(self, facility, priority): - """ - Encode the facility and priority. You can pass in strings or - integers - if strings are passed, the facility_names and - priority_names mapping dictionaries are used to convert them to - integers. - """ - if isinstance(facility, str): - facility = self.facility_names[facility] - if isinstance(priority, str): - priority = self.priority_names[priority] - return (facility << 3) | priority - - def close(self): - """ - Closes the socket. - """ - with self.lock: - sock = self.socket - if sock: - self.socket = None - sock.close() - logging.Handler.close(self) - - def mapPriority(self, levelName): - """ - Map a logging level name to a key in the priority_names map. - This is useful in two scenarios: when custom levels are being - used, and in the case where you can't do a straightforward - mapping by lowercasing the logging level name because of locale- - specific issues (see SF #1524081). - """ - return self.priority_map.get(levelName, "warning") - - ident = '' # prepended to all messages - append_nul = True # some old syslog daemons expect a NUL terminator - - def emit(self, record): - """ - Emit a record. - - The record is formatted, and then sent to the syslog server. If - exception information is present, it is NOT sent to the server. - """ - try: - msg = self.format(record) - if self.ident: - msg = self.ident + msg - if self.append_nul: - msg += '\000' - - # We need to convert record level to lowercase, maybe this will - # change in the future. - prio = '<%d>' % self.encodePriority(self.facility, - self.mapPriority(record.levelname)) - prio = prio.encode('utf-8') - # Message is a string. Convert to bytes as required by RFC 5424 - msg = msg.encode('utf-8') - msg = prio + msg - - if not self.socket: - self.createSocket() - - if self.unixsocket: - try: - self.socket.send(msg) - except OSError: - self.socket.close() - self._connect_unixsocket(self.address) - self.socket.send(msg) - elif self.socktype == socket.SOCK_DGRAM: - self.socket.sendto(msg, self.address) - else: - self.socket.sendall(msg) - except Exception: - self.handleError(record) - -class SMTPHandler(logging.Handler): - """ - A handler class which sends an SMTP email for each logging event. - """ - def __init__(self, mailhost, fromaddr, toaddrs, subject, - credentials=None, secure=None, timeout=5.0): - """ - Initialize the handler. - - Initialize the instance with the from and to addresses and subject - line of the email. To specify a non-standard SMTP port, use the - (host, port) tuple format for the mailhost argument. To specify - authentication credentials, supply a (username, password) tuple - for the credentials argument. To specify the use of a secure - protocol (TLS), pass in a tuple for the secure argument. This will - only be used when authentication credentials are supplied. The tuple - will be either an empty tuple, or a single-value tuple with the name - of a keyfile, or a 2-value tuple with the names of the keyfile and - certificate file. (This tuple is passed to the - `ssl.SSLContext.load_cert_chain` method). - A timeout in seconds can be specified for the SMTP connection (the - default is one second). - """ - logging.Handler.__init__(self) - if isinstance(mailhost, (list, tuple)): - self.mailhost, self.mailport = mailhost - else: - self.mailhost, self.mailport = mailhost, None - if isinstance(credentials, (list, tuple)): - self.username, self.password = credentials - else: - self.username = None - self.fromaddr = fromaddr - if isinstance(toaddrs, str): - toaddrs = [toaddrs] - self.toaddrs = toaddrs - self.subject = subject - self.secure = secure - self.timeout = timeout - - def getSubject(self, record): - """ - Determine the subject for the email. - - If you want to specify a subject line which is record-dependent, - override this method. - """ - return self.subject - - def emit(self, record): - """ - Emit a record. - - Format the record and send it to the specified addressees. - """ - try: - import smtplib - from email.message import EmailMessage - import email.utils - - port = self.mailport - if not port: - port = smtplib.SMTP_PORT - smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout) - msg = EmailMessage() - msg['From'] = self.fromaddr - msg['To'] = ','.join(self.toaddrs) - msg['Subject'] = self.getSubject(record) - msg['Date'] = email.utils.localtime() - msg.set_content(self.format(record)) - if self.username: - if self.secure is not None: - import ssl - - try: - keyfile = self.secure[0] - except IndexError: - keyfile = None - - try: - certfile = self.secure[1] - except IndexError: - certfile = None - - context = ssl._create_stdlib_context( - certfile=certfile, keyfile=keyfile - ) - smtp.ehlo() - smtp.starttls(context=context) - smtp.ehlo() - smtp.login(self.username, self.password) - smtp.send_message(msg) - smtp.quit() - except Exception: - self.handleError(record) - -class NTEventLogHandler(logging.Handler): - """ - A handler class which sends events to the NT Event Log. Adds a - registry entry for the specified application name. If no dllname is - provided, win32service.pyd (which contains some basic message - placeholders) is used. Note that use of these placeholders will make - your event logs big, as the entire message source is held in the log. - If you want slimmer logs, you have to pass in the name of your own DLL - which contains the message definitions you want to use in the event log. - """ - def __init__(self, appname, dllname=None, logtype="Application"): - logging.Handler.__init__(self) - try: - import win32evtlogutil, win32evtlog - self.appname = appname - self._welu = win32evtlogutil - if not dllname: - dllname = os.path.split(self._welu.__file__) - dllname = os.path.split(dllname[0]) - dllname = os.path.join(dllname[0], r'win32service.pyd') - self.dllname = dllname - self.logtype = logtype - # Administrative privileges are required to add a source to the registry. - # This may not be available for a user that just wants to add to an - # existing source - handle this specific case. - try: - self._welu.AddSourceToRegistry(appname, dllname, logtype) - except Exception as e: - # This will probably be a pywintypes.error. Only raise if it's not - # an "access denied" error, else let it pass - if getattr(e, 'winerror', None) != 5: # not access denied - raise - self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE - self.typemap = { - logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, - logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, - logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, - logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, - logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, - } - except ImportError: - print("The Python Win32 extensions for NT (service, event "\ - "logging) appear not to be available.") - self._welu = None - - def getMessageID(self, record): - """ - Return the message ID for the event record. If you are using your - own messages, you could do this by having the msg passed to the - logger being an ID rather than a formatting string. Then, in here, - you could use a dictionary lookup to get the message ID. This - version returns 1, which is the base message ID in win32service.pyd. - """ - return 1 - - def getEventCategory(self, record): - """ - Return the event category for the record. - - Override this if you want to specify your own categories. This version - returns 0. - """ - return 0 - - def getEventType(self, record): - """ - Return the event type for the record. - - Override this if you want to specify your own types. This version does - a mapping using the handler's typemap attribute, which is set up in - __init__() to a dictionary which contains mappings for DEBUG, INFO, - WARNING, ERROR and CRITICAL. If you are using your own levels you will - either need to override this method or place a suitable dictionary in - the handler's typemap attribute. - """ - return self.typemap.get(record.levelno, self.deftype) - - def emit(self, record): - """ - Emit a record. - - Determine the message ID, event category and event type. Then - log the message in the NT event log. - """ - if self._welu: - try: - id = self.getMessageID(record) - cat = self.getEventCategory(record) - type = self.getEventType(record) - msg = self.format(record) - self._welu.ReportEvent(self.appname, id, cat, type, [msg]) - except Exception: - self.handleError(record) - - def close(self): - """ - Clean up this handler. - - You can remove the application name from the registry as a - source of event log entries. However, if you do this, you will - not be able to see the events as you intended in the Event Log - Viewer - it needs to be able to access the registry to get the - DLL name. - """ - #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) - logging.Handler.close(self) - -class HTTPHandler(logging.Handler): - """ - A class which sends records to a web server, using either GET or - POST semantics. - """ - def __init__(self, host, url, method="GET", secure=False, credentials=None, - context=None): - """ - Initialize the instance with the host, the request URL, and the method - ("GET" or "POST") - """ - logging.Handler.__init__(self) - method = method.upper() - if method not in ["GET", "POST"]: - raise ValueError("method must be GET or POST") - if not secure and context is not None: - raise ValueError("context parameter only makes sense " - "with secure=True") - self.host = host - self.url = url - self.method = method - self.secure = secure - self.credentials = credentials - self.context = context - - def mapLogRecord(self, record): - """ - Default implementation of mapping the log record into a dict - that is sent as the CGI data. Overwrite in your class. - Contributed by Franz Glasner. - """ - return record.__dict__ - - def getConnection(self, host, secure): - """ - get a HTTP[S]Connection. - - Override when a custom connection is required, for example if - there is a proxy. - """ - import http.client - if secure: - connection = http.client.HTTPSConnection(host, context=self.context) - else: - connection = http.client.HTTPConnection(host) - return connection - - def emit(self, record): - """ - Emit a record. - - Send the record to the web server as a percent-encoded dictionary - """ - try: - import urllib.parse - host = self.host - h = self.getConnection(host, self.secure) - url = self.url - data = urllib.parse.urlencode(self.mapLogRecord(record)) - if self.method == "GET": - if (url.find('?') >= 0): - sep = '&' - else: - sep = '?' - url = url + "%c%s" % (sep, data) - h.putrequest(self.method, url) - # support multiple hosts on one IP address... - # need to strip optional :port from host, if present - i = host.find(":") - if i >= 0: - host = host[:i] - # See issue #30904: putrequest call above already adds this header - # on Python 3.x. - # h.putheader("Host", host) - if self.method == "POST": - h.putheader("Content-type", - "application/x-www-form-urlencoded") - h.putheader("Content-length", str(len(data))) - if self.credentials: - import base64 - s = ('%s:%s' % self.credentials).encode('utf-8') - s = 'Basic ' + base64.b64encode(s).strip().decode('ascii') - h.putheader('Authorization', s) - h.endheaders() - if self.method == "POST": - h.send(data.encode('utf-8')) - h.getresponse() #can't do anything with the result - except Exception: - self.handleError(record) - -class BufferingHandler(logging.Handler): - """ - A handler class which buffers logging records in memory. Whenever each - record is added to the buffer, a check is made to see if the buffer should - be flushed. If it should, then flush() is expected to do what's needed. - """ - def __init__(self, capacity): - """ - Initialize the handler with the buffer size. - """ - logging.Handler.__init__(self) - self.capacity = capacity - self.buffer = [] - - def shouldFlush(self, record): - """ - Should the handler flush its buffer? - - Returns true if the buffer is up to capacity. This method can be - overridden to implement custom flushing strategies. - """ - return (len(self.buffer) >= self.capacity) - - def emit(self, record): - """ - Emit a record. - - Append the record. If shouldFlush() tells us to, call flush() to process - the buffer. - """ - self.buffer.append(record) - if self.shouldFlush(record): - self.flush() - - def flush(self): - """ - Override to implement custom flushing behaviour. - - This version just zaps the buffer to empty. - """ - with self.lock: - self.buffer.clear() - - def close(self): - """ - Close the handler. - - This version just flushes and chains to the parent class' close(). - """ - try: - self.flush() - finally: - logging.Handler.close(self) - -class MemoryHandler(BufferingHandler): - """ - A handler class which buffers logging records in memory, periodically - flushing them to a target handler. Flushing occurs whenever the buffer - is full, or when an event of a certain severity or greater is seen. - """ - def __init__(self, capacity, flushLevel=logging.ERROR, target=None, - flushOnClose=True): - """ - Initialize the handler with the buffer size, the level at which - flushing should occur and an optional target. - - Note that without a target being set either here or via setTarget(), - a MemoryHandler is no use to anyone! - - The ``flushOnClose`` argument is ``True`` for backward compatibility - reasons - the old behaviour is that when the handler is closed, the - buffer is flushed, even if the flush level hasn't been exceeded nor the - capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``. - """ - BufferingHandler.__init__(self, capacity) - self.flushLevel = flushLevel - self.target = target - # See Issue #26559 for why this has been added - self.flushOnClose = flushOnClose - - def shouldFlush(self, record): - """ - Check for buffer full or a record at the flushLevel or higher. - """ - return (len(self.buffer) >= self.capacity) or \ - (record.levelno >= self.flushLevel) - - def setTarget(self, target): - """ - Set the target handler for this handler. - """ - with self.lock: - self.target = target - - def flush(self): - """ - For a MemoryHandler, flushing means just sending the buffered - records to the target, if there is one. Override if you want - different behaviour. - - The record buffer is only cleared if a target has been set. - """ - with self.lock: - if self.target: - for record in self.buffer: - self.target.handle(record) - self.buffer.clear() - - def close(self): - """ - Flush, if appropriately configured, set the target to None and lose the - buffer. - """ - try: - if self.flushOnClose: - self.flush() - finally: - with self.lock: - self.target = None - BufferingHandler.close(self) - - -class QueueHandler(logging.Handler): - """ - This handler sends events to a queue. Typically, it would be used together - with a multiprocessing Queue to centralise logging to file in one process - (in a multi-process application), so as to avoid file write contention - between processes. - - This code is new in Python 3.2, but this class can be copy pasted into - user code for use with earlier Python versions. - """ - - def __init__(self, queue): - """ - Initialise an instance, using the passed queue. - """ - logging.Handler.__init__(self) - self.queue = queue - self.listener = None # will be set to listener if configured via dictConfig() - - def enqueue(self, record): - """ - Enqueue a record. - - The base implementation uses put_nowait. You may want to override - this method if you want to use blocking, timeouts or custom queue - implementations. - """ - self.queue.put_nowait(record) - - def prepare(self, record): - """ - Prepare a record for queuing. The object returned by this method is - enqueued. - - The base implementation formats the record to merge the message and - arguments, and removes unpickleable items from the record in-place. - Specifically, it overwrites the record's `msg` and - `message` attributes with the merged message (obtained by - calling the handler's `format` method), and sets the `args`, - `exc_info` and `exc_text` attributes to None. - - You might want to override this method if you want to convert - the record to a dict or JSON string, or send a modified copy - of the record while leaving the original intact. - """ - # The format operation gets traceback text into record.exc_text - # (if there's exception data), and also returns the formatted - # message. We can then use this to replace the original - # msg + args, as these might be unpickleable. We also zap the - # exc_info, exc_text and stack_info attributes, as they are no longer - # needed and, if not None, will typically not be pickleable. - msg = self.format(record) - # bpo-35726: make copy of record to avoid affecting other handlers in the chain. - record = copy.copy(record) - record.message = msg - record.msg = msg - record.args = None - record.exc_info = None - record.exc_text = None - record.stack_info = None - return record - - def emit(self, record): - """ - Emit a record. - - Writes the LogRecord to the queue, preparing it for pickling first. - """ - try: - self.enqueue(self.prepare(record)) - except Exception: - self.handleError(record) - - -class QueueListener(object): - """ - This class implements an internal threaded listener which watches for - LogRecords being added to a queue, removes them and passes them to a - list of handlers for processing. - """ - _sentinel = None - - def __init__(self, queue, *handlers, respect_handler_level=False): - """ - Initialise an instance with the specified queue and - handlers. - """ - self.queue = queue - self.handlers = handlers - self._thread = None - self.respect_handler_level = respect_handler_level - - def dequeue(self, block): - """ - Dequeue a record and return it, optionally blocking. - - The base implementation uses get. You may want to override this method - if you want to use timeouts or work with custom queue implementations. - """ - return self.queue.get(block) - - def start(self): - """ - Start the listener. - - This starts up a background thread to monitor the queue for - LogRecords to process. - """ - if self._thread is not None: - raise RuntimeError("Listener already started") - - self._thread = t = threading.Thread(target=self._monitor) - t.daemon = True - t.start() - - def prepare(self, record): - """ - Prepare a record for handling. - - This method just returns the passed-in record. You may want to - override this method if you need to do any custom marshalling or - manipulation of the record before passing it to the handlers. - """ - return record - - def handle(self, record): - """ - Handle a record. - - This just loops through the handlers offering them the record - to handle. - """ - record = self.prepare(record) - for handler in self.handlers: - if not self.respect_handler_level: - process = True - else: - process = record.levelno >= handler.level - if process: - handler.handle(record) - - def _monitor(self): - """ - Monitor the queue for records, and ask the handler - to deal with them. - - This method runs on a separate, internal thread. - The thread will terminate if it sees a sentinel object in the queue. - """ - q = self.queue - has_task_done = hasattr(q, 'task_done') - while True: - try: - record = self.dequeue(True) - if record is self._sentinel: - if has_task_done: - q.task_done() - break - self.handle(record) - if has_task_done: - q.task_done() - except queue.Empty: - break - - def enqueue_sentinel(self): - """ - This is used to enqueue the sentinel record. - - The base implementation uses put_nowait. You may want to override this - method if you want to use timeouts or work with custom queue - implementations. - """ - self.queue.put_nowait(self._sentinel) - - def stop(self): - """ - Stop the listener. - - This asks the thread to terminate, and then waits for it to do so. - Note that if you don't call this before your application exits, there - may be some records still left on the queue, which won't be processed. - """ - if self._thread: # see gh-114706 - allow calling this more than once - self.enqueue_sentinel() - self._thread.join() - self._thread = None diff --git a/Python313_13_x86_Template/Lib/lzma.py b/Python313_13_x86_Template/Lib/lzma.py deleted file mode 100644 index c1e3d33d..00000000 --- a/Python313_13_x86_Template/Lib/lzma.py +++ /dev/null @@ -1,364 +0,0 @@ -"""Interface to the liblzma compression library. - -This module provides a class for reading and writing compressed files, -classes for incremental (de)compression, and convenience functions for -one-shot (de)compression. - -These classes and functions support both the XZ and legacy LZMA -container formats, as well as raw compressed data streams. -""" - -__all__ = [ - "CHECK_NONE", "CHECK_CRC32", "CHECK_CRC64", "CHECK_SHA256", - "CHECK_ID_MAX", "CHECK_UNKNOWN", - "FILTER_LZMA1", "FILTER_LZMA2", "FILTER_DELTA", "FILTER_X86", "FILTER_IA64", - "FILTER_ARM", "FILTER_ARMTHUMB", "FILTER_POWERPC", "FILTER_SPARC", - "FORMAT_AUTO", "FORMAT_XZ", "FORMAT_ALONE", "FORMAT_RAW", - "MF_HC3", "MF_HC4", "MF_BT2", "MF_BT3", "MF_BT4", - "MODE_FAST", "MODE_NORMAL", "PRESET_DEFAULT", "PRESET_EXTREME", - - "LZMACompressor", "LZMADecompressor", "LZMAFile", "LZMAError", - "open", "compress", "decompress", "is_check_supported", -] - -import builtins -import io -import os -from _lzma import * -from _lzma import _encode_filter_properties, _decode_filter_properties -import _compression - - -# Value 0 no longer used -_MODE_READ = 1 -# Value 2 no longer used -_MODE_WRITE = 3 - - -class LZMAFile(_compression.BaseStream): - - """A file object providing transparent LZMA (de)compression. - - An LZMAFile can act as a wrapper for an existing file object, or - refer directly to a named file on disk. - - Note that LZMAFile provides a *binary* file interface - data read - is returned as bytes, and data to be written must be given as bytes. - """ - - def __init__(self, filename=None, mode="r", *, - format=None, check=-1, preset=None, filters=None): - """Open an LZMA-compressed file in binary mode. - - filename can be either an actual file name (given as a str, - bytes, or PathLike object), in which case the named file is - opened, or it can be an existing file object to read from or - write to. - - mode can be "r" for reading (default), "w" for (over)writing, - "x" for creating exclusively, or "a" for appending. These can - equivalently be given as "rb", "wb", "xb" and "ab" respectively. - - format specifies the container format to use for the file. - If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the - default is FORMAT_XZ. - - check specifies the integrity check to use. This argument can - only be used when opening a file for writing. For FORMAT_XZ, - the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not - support integrity checks - for these formats, check must be - omitted, or be CHECK_NONE. - - When opening a file for reading, the *preset* argument is not - meaningful, and should be omitted. The *filters* argument should - also be omitted, except when format is FORMAT_RAW (in which case - it is required). - - When opening a file for writing, the settings used by the - compressor can be specified either as a preset compression - level (with the *preset* argument), or in detail as a custom - filter chain (with the *filters* argument). For FORMAT_XZ and - FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset - level. For FORMAT_RAW, the caller must always specify a filter - chain; the raw compressor does not support preset compression - levels. - - preset (if provided) should be an integer in the range 0-9, - optionally OR-ed with the constant PRESET_EXTREME. - - filters (if provided) should be a sequence of dicts. Each dict - should have an entry for "id" indicating ID of the filter, plus - additional entries for options to the filter. - """ - self._fp = None - self._closefp = False - self._mode = None - - if mode in ("r", "rb"): - if check != -1: - raise ValueError("Cannot specify an integrity check " - "when opening a file for reading") - if preset is not None: - raise ValueError("Cannot specify a preset compression " - "level when opening a file for reading") - if format is None: - format = FORMAT_AUTO - mode_code = _MODE_READ - elif mode in ("w", "wb", "a", "ab", "x", "xb"): - if format is None: - format = FORMAT_XZ - mode_code = _MODE_WRITE - self._compressor = LZMACompressor(format=format, check=check, - preset=preset, filters=filters) - self._pos = 0 - else: - raise ValueError("Invalid mode: {!r}".format(mode)) - - if isinstance(filename, (str, bytes, os.PathLike)): - if "b" not in mode: - mode += "b" - self._fp = builtins.open(filename, mode) - self._closefp = True - self._mode = mode_code - elif hasattr(filename, "read") or hasattr(filename, "write"): - self._fp = filename - self._mode = mode_code - else: - raise TypeError("filename must be a str, bytes, file or PathLike object") - - if self._mode == _MODE_READ: - raw = _compression.DecompressReader(self._fp, LZMADecompressor, - trailing_error=LZMAError, format=format, filters=filters) - self._buffer = io.BufferedReader(raw) - - def close(self): - """Flush and close the file. - - May be called more than once without error. Once the file is - closed, any other operation on it will raise a ValueError. - """ - if self.closed: - return - try: - if self._mode == _MODE_READ: - self._buffer.close() - self._buffer = None - elif self._mode == _MODE_WRITE: - self._fp.write(self._compressor.flush()) - self._compressor = None - finally: - try: - if self._closefp: - self._fp.close() - finally: - self._fp = None - self._closefp = False - - @property - def closed(self): - """True if this file is closed.""" - return self._fp is None - - @property - def name(self): - self._check_not_closed() - return self._fp.name - - @property - def mode(self): - return 'wb' if self._mode == _MODE_WRITE else 'rb' - - def fileno(self): - """Return the file descriptor for the underlying file.""" - self._check_not_closed() - return self._fp.fileno() - - def seekable(self): - """Return whether the file supports seeking.""" - return self.readable() and self._buffer.seekable() - - def readable(self): - """Return whether the file was opened for reading.""" - self._check_not_closed() - return self._mode == _MODE_READ - - def writable(self): - """Return whether the file was opened for writing.""" - self._check_not_closed() - return self._mode == _MODE_WRITE - - def peek(self, size=-1): - """Return buffered data without advancing the file position. - - Always returns at least one byte of data, unless at EOF. - The exact number of bytes returned is unspecified. - """ - self._check_can_read() - # Relies on the undocumented fact that BufferedReader.peek() always - # returns at least one byte (except at EOF) - return self._buffer.peek(size) - - def read(self, size=-1): - """Read up to size uncompressed bytes from the file. - - If size is negative or omitted, read until EOF is reached. - Returns b"" if the file is already at EOF. - """ - self._check_can_read() - return self._buffer.read(size) - - def read1(self, size=-1): - """Read up to size uncompressed bytes, while trying to avoid - making multiple reads from the underlying stream. Reads up to a - buffer's worth of data if size is negative. - - Returns b"" if the file is at EOF. - """ - self._check_can_read() - if size < 0: - size = io.DEFAULT_BUFFER_SIZE - return self._buffer.read1(size) - - def readline(self, size=-1): - """Read a line of uncompressed bytes from the file. - - The terminating newline (if present) is retained. If size is - non-negative, no more than size bytes will be read (in which - case the line may be incomplete). Returns b'' if already at EOF. - """ - self._check_can_read() - return self._buffer.readline(size) - - def write(self, data): - """Write a bytes object to the file. - - Returns the number of uncompressed bytes written, which is - always the length of data in bytes. Note that due to buffering, - the file on disk may not reflect the data written until close() - is called. - """ - self._check_can_write() - if isinstance(data, (bytes, bytearray)): - length = len(data) - else: - # accept any data that supports the buffer protocol - data = memoryview(data) - length = data.nbytes - - compressed = self._compressor.compress(data) - self._fp.write(compressed) - self._pos += length - return length - - def seek(self, offset, whence=io.SEEK_SET): - """Change the file position. - - The new position is specified by offset, relative to the - position indicated by whence. Possible values for whence are: - - 0: start of stream (default): offset must not be negative - 1: current stream position - 2: end of stream; offset must not be positive - - Returns the new file position. - - Note that seeking is emulated, so depending on the parameters, - this operation may be extremely slow. - """ - self._check_can_seek() - return self._buffer.seek(offset, whence) - - def tell(self): - """Return the current file position.""" - self._check_not_closed() - if self._mode == _MODE_READ: - return self._buffer.tell() - return self._pos - - -def open(filename, mode="rb", *, - format=None, check=-1, preset=None, filters=None, - encoding=None, errors=None, newline=None): - """Open an LZMA-compressed file in binary or text mode. - - filename can be either an actual file name (given as a str, bytes, - or PathLike object), in which case the named file is opened, or it - can be an existing file object to read from or write to. - - The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb", - "a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text - mode. - - The format, check, preset and filters arguments specify the - compression settings, as for LZMACompressor, LZMADecompressor and - LZMAFile. - - For binary mode, this function is equivalent to the LZMAFile - constructor: LZMAFile(filename, mode, ...). In this case, the - encoding, errors and newline arguments must not be provided. - - For text mode, an LZMAFile object is created, and wrapped in an - io.TextIOWrapper instance with the specified encoding, error - handling behavior, and line ending(s). - - """ - if "t" in mode: - if "b" in mode: - raise ValueError("Invalid mode: %r" % (mode,)) - else: - if encoding is not None: - raise ValueError("Argument 'encoding' not supported in binary mode") - if errors is not None: - raise ValueError("Argument 'errors' not supported in binary mode") - if newline is not None: - raise ValueError("Argument 'newline' not supported in binary mode") - - lz_mode = mode.replace("t", "") - binary_file = LZMAFile(filename, lz_mode, format=format, check=check, - preset=preset, filters=filters) - - if "t" in mode: - encoding = io.text_encoding(encoding) - return io.TextIOWrapper(binary_file, encoding, errors, newline) - else: - return binary_file - - -def compress(data, format=FORMAT_XZ, check=-1, preset=None, filters=None): - """Compress a block of data. - - Refer to LZMACompressor's docstring for a description of the - optional arguments *format*, *check*, *preset* and *filters*. - - For incremental compression, use an LZMACompressor instead. - """ - comp = LZMACompressor(format, check, preset, filters) - return comp.compress(data) + comp.flush() - - -def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None): - """Decompress a block of data. - - Refer to LZMADecompressor's docstring for a description of the - optional arguments *format*, *check* and *filters*. - - For incremental decompression, use an LZMADecompressor instead. - """ - results = [] - while True: - decomp = LZMADecompressor(format, memlimit, filters) - try: - res = decomp.decompress(data) - except LZMAError: - if results: - break # Leftover data is not a valid LZMA/XZ stream; ignore it. - else: - raise # Error on the first iteration; bail out. - results.append(res) - if not decomp.eof: - raise LZMAError("Compressed data ended before the " - "end-of-stream marker was reached") - data = decomp.unused_data - if not data: - break - return b"".join(results) diff --git a/Python313_13_x86_Template/Lib/mimetypes.py b/Python313_13_x86_Template/Lib/mimetypes.py deleted file mode 100644 index 2af7c4b7..00000000 --- a/Python313_13_x86_Template/Lib/mimetypes.py +++ /dev/null @@ -1,679 +0,0 @@ -"""Guess the MIME type of a file. - -This module defines two useful functions: - -guess_type(url, strict=True) -- guess the MIME type and encoding of a URL. - -guess_extension(type, strict=True) -- guess the extension for a given MIME type. - -It also contains the following, for tuning the behavior: - -Data: - -knownfiles -- list of files to parse -inited -- flag set when init() has been called -suffix_map -- dictionary mapping suffixes to suffixes -encodings_map -- dictionary mapping suffixes to encodings -types_map -- dictionary mapping suffixes to types - -Functions: - -init([files]) -- parse a list of files, default knownfiles (on Windows, the - default values are taken from the registry) -read_mime_types(file) -- parse one file, return a dictionary or None -""" - -import os -import sys -import posixpath -import urllib.parse - -try: - from _winapi import _mimetypes_read_windows_registry -except ImportError: - _mimetypes_read_windows_registry = None - -try: - import winreg as _winreg -except ImportError: - _winreg = None - -__all__ = [ - "knownfiles", "inited", "MimeTypes", - "guess_type", "guess_file_type", "guess_all_extensions", "guess_extension", - "add_type", "init", "read_mime_types", - "suffix_map", "encodings_map", "types_map", "common_types" -] - -knownfiles = [ - "/etc/mime.types", - "/etc/httpd/mime.types", # Mac OS X - "/etc/httpd/conf/mime.types", # Apache - "/etc/apache/mime.types", # Apache 1 - "/etc/apache2/mime.types", # Apache 2 - "/usr/local/etc/httpd/conf/mime.types", - "/usr/local/lib/netscape/mime.types", - "/usr/local/etc/httpd/conf/mime.types", # Apache 1.2 - "/usr/local/etc/mime.types", # Apache 1.3 - ] - -inited = False -_db = None - - -class MimeTypes: - """MIME-types datastore. - - This datastore can handle information from mime.types-style files - and supports basic determination of MIME type from a filename or - URL, and can guess a reasonable extension given a MIME type. - """ - - def __init__(self, filenames=(), strict=True): - if not inited: - init() - self.encodings_map = _encodings_map_default.copy() - self.suffix_map = _suffix_map_default.copy() - self.types_map = ({}, {}) # dict for (non-strict, strict) - self.types_map_inv = ({}, {}) - for (ext, type) in _types_map_default.items(): - self.add_type(type, ext, True) - for (ext, type) in _common_types_default.items(): - self.add_type(type, ext, False) - for name in filenames: - self.read(name, strict) - - def add_type(self, type, ext, strict=True): - """Add a mapping between a type and an extension. - - When the extension is already known, the new - type will replace the old one. When the type - is already known the extension will be added - to the list of known extensions. - - If strict is true, information will be added to - list of standard types, else to the list of non-standard - types. - """ - if not type: - return - self.types_map[strict][ext] = type - exts = self.types_map_inv[strict].setdefault(type, []) - if ext not in exts: - exts.append(ext) - - def guess_type(self, url, strict=True): - """Guess the type of a file which is either a URL or a path-like object. - - Return value is a tuple (type, encoding) where type is None if - the type can't be guessed (no or unknown suffix) or a string - of the form type/subtype, usable for a MIME Content-type - header; and encoding is None for no encoding or the name of - the program used to encode (e.g. compress or gzip). The - mappings are table driven. Encoding suffixes are case - sensitive; type suffixes are first tried case sensitive, then - case insensitive. - - The suffixes .tgz, .taz and .tz (case sensitive!) are all - mapped to '.tar.gz'. (This is table-driven too, using the - dictionary suffix_map.) - - Optional `strict' argument when False adds a bunch of commonly found, - but non-standard types. - """ - # TODO: Deprecate accepting file paths (in particular path-like objects). - url = os.fspath(url) - p = urllib.parse.urlparse(url) - if p.scheme and len(p.scheme) > 1: - scheme = p.scheme - url = p.path - else: - return self.guess_file_type(url, strict=strict) - if scheme == 'data': - # syntax of data URLs: - # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data - # mediatype := [ type "/" subtype ] *( ";" parameter ) - # data := *urlchar - # parameter := attribute "=" value - # type/subtype defaults to "text/plain" - comma = url.find(',') - if comma < 0: - # bad data URL - return None, None - semi = url.find(';', 0, comma) - if semi >= 0: - type = url[:semi] - else: - type = url[:comma] - if '=' in type or '/' not in type: - type = 'text/plain' - return type, None # never compressed, so encoding is None - return self._guess_file_type(url, strict, posixpath.splitext) - - def guess_file_type(self, path, *, strict=True): - """Guess the type of a file based on its path. - - Similar to guess_type(), but takes file path istead of URL. - """ - path = os.fsdecode(path) - path = os.path.splitdrive(path)[1] - return self._guess_file_type(path, strict, os.path.splitext) - - def _guess_file_type(self, path, strict, splitext): - base, ext = splitext(path) - while (ext_lower := ext.lower()) in self.suffix_map: - base, ext = splitext(base + self.suffix_map[ext_lower]) - # encodings_map is case sensitive - if ext in self.encodings_map: - encoding = self.encodings_map[ext] - base, ext = splitext(base) - else: - encoding = None - ext = ext.lower() - types_map = self.types_map[True] - if ext in types_map: - return types_map[ext], encoding - elif strict: - return None, encoding - types_map = self.types_map[False] - if ext in types_map: - return types_map[ext], encoding - else: - return None, encoding - - def guess_all_extensions(self, type, strict=True): - """Guess the extensions for a file based on its MIME type. - - Return value is a list of strings giving the possible filename - extensions, including the leading dot ('.'). The extension is not - guaranteed to have been associated with any particular data stream, - but would be mapped to the MIME type `type' by guess_type(). - - Optional `strict' argument when false adds a bunch of commonly found, - but non-standard types. - """ - type = type.lower() - extensions = list(self.types_map_inv[True].get(type, [])) - if not strict: - for ext in self.types_map_inv[False].get(type, []): - if ext not in extensions: - extensions.append(ext) - return extensions - - def guess_extension(self, type, strict=True): - """Guess the extension for a file based on its MIME type. - - Return value is a string giving a filename extension, - including the leading dot ('.'). The extension is not - guaranteed to have been associated with any particular data - stream, but would be mapped to the MIME type `type' by - guess_type(). If no extension can be guessed for `type', None - is returned. - - Optional `strict' argument when false adds a bunch of commonly found, - but non-standard types. - """ - extensions = self.guess_all_extensions(type, strict) - if not extensions: - return None - return extensions[0] - - def read(self, filename, strict=True): - """ - Read a single mime.types-format file, specified by pathname. - - If strict is true, information will be added to - list of standard types, else to the list of non-standard - types. - """ - with open(filename, encoding='utf-8') as fp: - self.readfp(fp, strict) - - def readfp(self, fp, strict=True): - """ - Read a single mime.types-format file. - - If strict is true, information will be added to - list of standard types, else to the list of non-standard - types. - """ - while line := fp.readline(): - words = line.split() - for i in range(len(words)): - if words[i][0] == '#': - del words[i:] - break - if not words: - continue - type, suffixes = words[0], words[1:] - for suff in suffixes: - self.add_type(type, '.' + suff, strict) - - def read_windows_registry(self, strict=True): - """ - Load the MIME types database from Windows registry. - - If strict is true, information will be added to - list of standard types, else to the list of non-standard - types. - """ - - if not _mimetypes_read_windows_registry and not _winreg: - return - - add_type = self.add_type - if strict: - add_type = lambda type, ext: self.add_type(type, ext, True) - - # Accelerated function if it is available - if _mimetypes_read_windows_registry: - _mimetypes_read_windows_registry(add_type) - elif _winreg: - self._read_windows_registry(add_type) - - @classmethod - def _read_windows_registry(cls, add_type): - def enum_types(mimedb): - i = 0 - while True: - try: - ctype = _winreg.EnumKey(mimedb, i) - except OSError: - break - else: - if '\0' not in ctype: - yield ctype - i += 1 - - with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, '') as hkcr: - for subkeyname in enum_types(hkcr): - try: - with _winreg.OpenKey(hkcr, subkeyname) as subkey: - # Only check file extensions - if not subkeyname.startswith("."): - continue - # raises OSError if no 'Content Type' value - mimetype, datatype = _winreg.QueryValueEx( - subkey, 'Content Type') - if datatype != _winreg.REG_SZ: - continue - add_type(mimetype, subkeyname) - except OSError: - continue - -def guess_type(url, strict=True): - """Guess the type of a file based on its URL. - - Return value is a tuple (type, encoding) where type is None if the - type can't be guessed (no or unknown suffix) or a string of the - form type/subtype, usable for a MIME Content-type header; and - encoding is None for no encoding or the name of the program used - to encode (e.g. compress or gzip). The mappings are table - driven. Encoding suffixes are case sensitive; type suffixes are - first tried case sensitive, then case insensitive. - - The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped - to ".tar.gz". (This is table-driven too, using the dictionary - suffix_map). - - Optional `strict' argument when false adds a bunch of commonly found, but - non-standard types. - """ - if _db is None: - init() - return _db.guess_type(url, strict) - - -def guess_file_type(path, *, strict=True): - """Guess the type of a file based on its path. - - Similar to guess_type(), but takes file path istead of URL. - """ - if _db is None: - init() - return _db.guess_file_type(path, strict=strict) - - -def guess_all_extensions(type, strict=True): - """Guess the extensions for a file based on its MIME type. - - Return value is a list of strings giving the possible filename - extensions, including the leading dot ('.'). The extension is not - guaranteed to have been associated with any particular data - stream, but would be mapped to the MIME type `type' by - guess_type(). If no extension can be guessed for `type', None - is returned. - - Optional `strict' argument when false adds a bunch of commonly found, - but non-standard types. - """ - if _db is None: - init() - return _db.guess_all_extensions(type, strict) - -def guess_extension(type, strict=True): - """Guess the extension for a file based on its MIME type. - - Return value is a string giving a filename extension, including the - leading dot ('.'). The extension is not guaranteed to have been - associated with any particular data stream, but would be mapped to the - MIME type `type' by guess_type(). If no extension can be guessed for - `type', None is returned. - - Optional `strict' argument when false adds a bunch of commonly found, - but non-standard types. - """ - if _db is None: - init() - return _db.guess_extension(type, strict) - -def add_type(type, ext, strict=True): - """Add a mapping between a type and an extension. - - When the extension is already known, the new - type will replace the old one. When the type - is already known the extension will be added - to the list of known extensions. - - If strict is true, information will be added to - list of standard types, else to the list of non-standard - types. - """ - if _db is None: - init() - return _db.add_type(type, ext, strict) - - -def init(files=None): - global suffix_map, types_map, encodings_map, common_types - global inited, _db - inited = True # so that MimeTypes.__init__() doesn't call us again - - if files is None or _db is None: - db = MimeTypes() - # Quick return if not supported - db.read_windows_registry() - - if files is None: - files = knownfiles - else: - files = knownfiles + list(files) - else: - db = _db - - for file in files: - if os.path.isfile(file): - db.read(file) - encodings_map = db.encodings_map - suffix_map = db.suffix_map - types_map = db.types_map[True] - common_types = db.types_map[False] - # Make the DB a global variable now that it is fully initialized - _db = db - - -def read_mime_types(file): - try: - f = open(file, encoding='utf-8') - except OSError: - return None - with f: - db = MimeTypes() - db.readfp(f, True) - return db.types_map[True] - - -def _default_mime_types(): - global suffix_map, _suffix_map_default - global encodings_map, _encodings_map_default - global types_map, _types_map_default - global common_types, _common_types_default - - suffix_map = _suffix_map_default = { - '.svgz': '.svg.gz', - '.tgz': '.tar.gz', - '.taz': '.tar.gz', - '.tz': '.tar.gz', - '.tbz2': '.tar.bz2', - '.txz': '.tar.xz', - } - - encodings_map = _encodings_map_default = { - '.gz': 'gzip', - '.Z': 'compress', - '.bz2': 'bzip2', - '.xz': 'xz', - '.br': 'br', - } - - # Before adding new types, make sure they are either registered with IANA, - # at http://www.iana.org/assignments/media-types - # or extensions, i.e. using the x- prefix - - # If you add to these, please keep them sorted by mime type. - # Make sure the entry with the preferred file extension for a particular mime type - # appears before any others of the same mimetype. - types_map = _types_map_default = { - '.js' : 'text/javascript', - '.mjs' : 'text/javascript', - '.json' : 'application/json', - '.webmanifest': 'application/manifest+json', - '.doc' : 'application/msword', - '.dot' : 'application/msword', - '.wiz' : 'application/msword', - '.nq' : 'application/n-quads', - '.nt' : 'application/n-triples', - '.bin' : 'application/octet-stream', - '.a' : 'application/octet-stream', - '.dll' : 'application/octet-stream', - '.exe' : 'application/octet-stream', - '.o' : 'application/octet-stream', - '.obj' : 'application/octet-stream', - '.so' : 'application/octet-stream', - '.oda' : 'application/oda', - '.pdf' : 'application/pdf', - '.p7c' : 'application/pkcs7-mime', - '.ps' : 'application/postscript', - '.ai' : 'application/postscript', - '.eps' : 'application/postscript', - '.trig' : 'application/trig', - '.m3u' : 'application/vnd.apple.mpegurl', - '.m3u8' : 'application/vnd.apple.mpegurl', - '.xls' : 'application/vnd.ms-excel', - '.xlb' : 'application/vnd.ms-excel', - '.ppt' : 'application/vnd.ms-powerpoint', - '.pot' : 'application/vnd.ms-powerpoint', - '.ppa' : 'application/vnd.ms-powerpoint', - '.pps' : 'application/vnd.ms-powerpoint', - '.pwz' : 'application/vnd.ms-powerpoint', - '.wasm' : 'application/wasm', - '.bcpio' : 'application/x-bcpio', - '.cpio' : 'application/x-cpio', - '.csh' : 'application/x-csh', - '.dvi' : 'application/x-dvi', - '.gtar' : 'application/x-gtar', - '.hdf' : 'application/x-hdf', - '.h5' : 'application/x-hdf5', - '.latex' : 'application/x-latex', - '.mif' : 'application/x-mif', - '.cdf' : 'application/x-netcdf', - '.nc' : 'application/x-netcdf', - '.p12' : 'application/x-pkcs12', - '.pfx' : 'application/x-pkcs12', - '.ram' : 'application/x-pn-realaudio', - '.pyc' : 'application/x-python-code', - '.pyo' : 'application/x-python-code', - '.sh' : 'application/x-sh', - '.shar' : 'application/x-shar', - '.swf' : 'application/x-shockwave-flash', - '.sv4cpio': 'application/x-sv4cpio', - '.sv4crc' : 'application/x-sv4crc', - '.tar' : 'application/x-tar', - '.tcl' : 'application/x-tcl', - '.tex' : 'application/x-tex', - '.texi' : 'application/x-texinfo', - '.texinfo': 'application/x-texinfo', - '.roff' : 'application/x-troff', - '.t' : 'application/x-troff', - '.tr' : 'application/x-troff', - '.man' : 'application/x-troff-man', - '.me' : 'application/x-troff-me', - '.ms' : 'application/x-troff-ms', - '.ustar' : 'application/x-ustar', - '.src' : 'application/x-wais-source', - '.xsl' : 'application/xml', - '.rdf' : 'application/xml', - '.wsdl' : 'application/xml', - '.xpdl' : 'application/xml', - '.zip' : 'application/zip', - '.3gp' : 'audio/3gpp', - '.3gpp' : 'audio/3gpp', - '.3g2' : 'audio/3gpp2', - '.3gpp2' : 'audio/3gpp2', - '.aac' : 'audio/aac', - '.adts' : 'audio/aac', - '.loas' : 'audio/aac', - '.ass' : 'audio/aac', - '.au' : 'audio/basic', - '.snd' : 'audio/basic', - '.mp3' : 'audio/mpeg', - '.mp2' : 'audio/mpeg', - '.opus' : 'audio/opus', - '.aif' : 'audio/x-aiff', - '.aifc' : 'audio/x-aiff', - '.aiff' : 'audio/x-aiff', - '.ra' : 'audio/x-pn-realaudio', - '.wav' : 'audio/x-wav', - '.avif' : 'image/avif', - '.bmp' : 'image/bmp', - '.gif' : 'image/gif', - '.ief' : 'image/ief', - '.jpg' : 'image/jpeg', - '.jpe' : 'image/jpeg', - '.jpeg' : 'image/jpeg', - '.heic' : 'image/heic', - '.heif' : 'image/heif', - '.png' : 'image/png', - '.svg' : 'image/svg+xml', - '.tiff' : 'image/tiff', - '.tif' : 'image/tiff', - '.ico' : 'image/vnd.microsoft.icon', - '.webp' : 'image/webp', - '.ras' : 'image/x-cmu-raster', - '.pnm' : 'image/x-portable-anymap', - '.pbm' : 'image/x-portable-bitmap', - '.pgm' : 'image/x-portable-graymap', - '.ppm' : 'image/x-portable-pixmap', - '.rgb' : 'image/x-rgb', - '.xbm' : 'image/x-xbitmap', - '.xpm' : 'image/x-xpixmap', - '.xwd' : 'image/x-xwindowdump', - '.eml' : 'message/rfc822', - '.mht' : 'message/rfc822', - '.mhtml' : 'message/rfc822', - '.nws' : 'message/rfc822', - '.css' : 'text/css', - '.csv' : 'text/csv', - '.html' : 'text/html', - '.htm' : 'text/html', - '.md' : 'text/markdown', - '.markdown': 'text/markdown', - '.n3' : 'text/n3', - '.txt' : 'text/plain', - '.bat' : 'text/plain', - '.c' : 'text/plain', - '.h' : 'text/plain', - '.ksh' : 'text/plain', - '.pl' : 'text/plain', - '.srt' : 'text/plain', - '.rtx' : 'text/richtext', - '.rtf' : 'text/rtf', - '.tsv' : 'text/tab-separated-values', - '.vtt' : 'text/vtt', - '.py' : 'text/x-python', - '.rst' : 'text/x-rst', - '.etx' : 'text/x-setext', - '.sgm' : 'text/x-sgml', - '.sgml' : 'text/x-sgml', - '.vcf' : 'text/x-vcard', - '.xml' : 'text/xml', - '.mp4' : 'video/mp4', - '.mpeg' : 'video/mpeg', - '.m1v' : 'video/mpeg', - '.mpa' : 'video/mpeg', - '.mpe' : 'video/mpeg', - '.mpg' : 'video/mpeg', - '.mov' : 'video/quicktime', - '.qt' : 'video/quicktime', - '.webm' : 'video/webm', - '.avi' : 'video/x-msvideo', - '.movie' : 'video/x-sgi-movie', - } - - # These are non-standard types, commonly found in the wild. They will - # only match if strict=0 flag is given to the API methods. - - # Please sort these too - common_types = _common_types_default = { - '.rtf' : 'application/rtf', - '.midi': 'audio/midi', - '.mid' : 'audio/midi', - '.jpg' : 'image/jpg', - '.pict': 'image/pict', - '.pct' : 'image/pict', - '.pic' : 'image/pict', - '.xul' : 'text/xul', - } - - -_default_mime_types() - - -def _main(): - import getopt - - USAGE = """\ -Usage: mimetypes.py [options] type - -Options: - --help / -h -- print this message and exit - --lenient / -l -- additionally search of some common, but non-standard - types. - --extension / -e -- guess extension instead of type - -More than one type argument may be given. -""" - - def usage(code, msg=''): - print(USAGE) - if msg: print(msg) - sys.exit(code) - - try: - opts, args = getopt.getopt(sys.argv[1:], 'hle', - ['help', 'lenient', 'extension']) - except getopt.error as msg: - usage(1, msg) - - strict = 1 - extension = 0 - for opt, arg in opts: - if opt in ('-h', '--help'): - usage(0) - elif opt in ('-l', '--lenient'): - strict = 0 - elif opt in ('-e', '--extension'): - extension = 1 - for gtype in args: - if extension: - guess = guess_extension(gtype, strict) - if not guess: print("I don't know anything about type", gtype) - else: print(guess) - else: - guess, encoding = guess_type(gtype, strict) - if not guess: print("I don't know anything about type", gtype) - else: print('type:', guess, 'encoding:', encoding) - - -if __name__ == '__main__': - _main() diff --git a/Python313_13_x86_Template/Lib/multiprocessing/connection.py b/Python313_13_x86_Template/Lib/multiprocessing/connection.py deleted file mode 100644 index efb9ea95..00000000 --- a/Python313_13_x86_Template/Lib/multiprocessing/connection.py +++ /dev/null @@ -1,1212 +0,0 @@ -# -# A higher level module for using sockets (or Windows named pipes) -# -# multiprocessing/connection.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] - -import errno -import io -import os -import sys -import socket -import struct -import time -import tempfile -import itertools - - -from . import util - -from . import AuthenticationError, BufferTooShort -from .context import reduction -_ForkingPickler = reduction.ForkingPickler - -try: - import _multiprocessing - import _winapi - from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE -except ImportError: - if sys.platform == 'win32': - raise - _winapi = None - -# -# -# - -BUFSIZE = 8192 -# A very generous timeout when it comes to local connections... -CONNECTION_TIMEOUT = 20. - -_mmap_counter = itertools.count() -_MAX_PIPE_ATTEMPTS = 100 - -default_family = 'AF_INET' -families = ['AF_INET'] - -if hasattr(socket, 'AF_UNIX'): - default_family = 'AF_UNIX' - families += ['AF_UNIX'] - -if sys.platform == 'win32': - default_family = 'AF_PIPE' - families += ['AF_PIPE'] - - -def _init_timeout(timeout=CONNECTION_TIMEOUT): - return time.monotonic() + timeout - -def _check_timeout(t): - return time.monotonic() > t - -# -# -# - -def arbitrary_address(family): - ''' - Return an arbitrary free address for the given family - ''' - if family == 'AF_INET': - return ('localhost', 0) - elif family == 'AF_UNIX': - return tempfile.mktemp(prefix='sock-', dir=util.get_temp_dir()) - elif family == 'AF_PIPE': - return (r'\\.\pipe\pyc-%d-%d-%s' % - (os.getpid(), next(_mmap_counter), os.urandom(8).hex())) - else: - raise ValueError('unrecognized family') - -def _validate_family(family): - ''' - Checks if the family is valid for the current environment. - ''' - if sys.platform != 'win32' and family == 'AF_PIPE': - raise ValueError('Family %s is not recognized.' % family) - - if sys.platform == 'win32' and family == 'AF_UNIX': - # double check - if not hasattr(socket, family): - raise ValueError('Family %s is not recognized.' % family) - -def address_type(address): - ''' - Return the types of the address - - This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' - ''' - if type(address) == tuple: - return 'AF_INET' - elif type(address) is str and address.startswith('\\\\'): - return 'AF_PIPE' - elif type(address) is str or util.is_abstract_socket_namespace(address): - return 'AF_UNIX' - else: - raise ValueError('address type of %r unrecognized' % address) - -# -# Connection classes -# - -class _ConnectionBase: - _handle = None - - def __init__(self, handle, readable=True, writable=True): - handle = handle.__index__() - if handle < 0: - raise ValueError("invalid handle") - if not readable and not writable: - raise ValueError( - "at least one of `readable` and `writable` must be True") - self._handle = handle - self._readable = readable - self._writable = writable - - # XXX should we use util.Finalize instead of a __del__? - - def __del__(self): - if self._handle is not None: - self._close() - - def _check_closed(self): - if self._handle is None: - raise OSError("handle is closed") - - def _check_readable(self): - if not self._readable: - raise OSError("connection is write-only") - - def _check_writable(self): - if not self._writable: - raise OSError("connection is read-only") - - def _bad_message_length(self): - if self._writable: - self._readable = False - else: - self.close() - raise OSError("bad message length") - - @property - def closed(self): - """True if the connection is closed""" - return self._handle is None - - @property - def readable(self): - """True if the connection is readable""" - return self._readable - - @property - def writable(self): - """True if the connection is writable""" - return self._writable - - def fileno(self): - """File descriptor or handle of the connection""" - self._check_closed() - return self._handle - - def close(self): - """Close the connection""" - if self._handle is not None: - try: - self._close() - finally: - self._handle = None - - def send_bytes(self, buf, offset=0, size=None): - """Send the bytes data from a bytes-like object""" - self._check_closed() - self._check_writable() - m = memoryview(buf) - if m.itemsize > 1: - m = m.cast('B') - n = m.nbytes - if offset < 0: - raise ValueError("offset is negative") - if n < offset: - raise ValueError("buffer length < offset") - if size is None: - size = n - offset - elif size < 0: - raise ValueError("size is negative") - elif offset + size > n: - raise ValueError("buffer length < offset + size") - self._send_bytes(m[offset:offset + size]) - - def send(self, obj): - """Send a (picklable) object""" - self._check_closed() - self._check_writable() - self._send_bytes(_ForkingPickler.dumps(obj)) - - def recv_bytes(self, maxlength=None): - """ - Receive bytes data as a bytes object. - """ - self._check_closed() - self._check_readable() - if maxlength is not None and maxlength < 0: - raise ValueError("negative maxlength") - buf = self._recv_bytes(maxlength) - if buf is None: - self._bad_message_length() - return buf.getvalue() - - def recv_bytes_into(self, buf, offset=0): - """ - Receive bytes data into a writeable bytes-like object. - Return the number of bytes read. - """ - self._check_closed() - self._check_readable() - with memoryview(buf) as m: - # Get bytesize of arbitrary buffer - itemsize = m.itemsize - bytesize = itemsize * len(m) - if offset < 0: - raise ValueError("negative offset") - elif offset > bytesize: - raise ValueError("offset too large") - result = self._recv_bytes() - size = result.tell() - if bytesize < offset + size: - raise BufferTooShort(result.getvalue()) - # Message can fit in dest - result.seek(0) - result.readinto(m[offset // itemsize : - (offset + size) // itemsize]) - return size - - def recv(self): - """Receive a (picklable) object""" - self._check_closed() - self._check_readable() - buf = self._recv_bytes() - return _ForkingPickler.loads(buf.getbuffer()) - - def poll(self, timeout=0.0): - """Whether there is any input available to be read""" - self._check_closed() - self._check_readable() - return self._poll(timeout) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - self.close() - - -if _winapi: - - class PipeConnection(_ConnectionBase): - """ - Connection class based on a Windows named pipe. - Overlapped I/O is used, so the handles must have been created - with FILE_FLAG_OVERLAPPED. - """ - _got_empty_message = False - _send_ov = None - - def _close(self, _CloseHandle=_winapi.CloseHandle): - ov = self._send_ov - if ov is not None: - # Interrupt WaitForMultipleObjects() in _send_bytes() - ov.cancel() - _CloseHandle(self._handle) - - def _send_bytes(self, buf): - if self._send_ov is not None: - # A connection should only be used by a single thread - raise ValueError("concurrent send_bytes() calls " - "are not supported") - ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) - self._send_ov = ov - try: - if err == _winapi.ERROR_IO_PENDING: - waitres = _winapi.WaitForMultipleObjects( - [ov.event], False, INFINITE) - assert waitres == WAIT_OBJECT_0 - except: - ov.cancel() - raise - finally: - self._send_ov = None - nwritten, err = ov.GetOverlappedResult(True) - if err == _winapi.ERROR_OPERATION_ABORTED: - # close() was called by another thread while - # WaitForMultipleObjects() was waiting for the overlapped - # operation. - raise OSError(errno.EPIPE, "handle is closed") - assert err == 0 - assert nwritten == len(buf) - - def _recv_bytes(self, maxsize=None): - if self._got_empty_message: - self._got_empty_message = False - return io.BytesIO() - else: - bsize = 128 if maxsize is None else min(maxsize, 128) - try: - ov, err = _winapi.ReadFile(self._handle, bsize, - overlapped=True) - try: - if err == _winapi.ERROR_IO_PENDING: - waitres = _winapi.WaitForMultipleObjects( - [ov.event], False, INFINITE) - assert waitres == WAIT_OBJECT_0 - except: - ov.cancel() - raise - finally: - nread, err = ov.GetOverlappedResult(True) - if err == 0: - f = io.BytesIO() - f.write(ov.getbuffer()) - return f - elif err == _winapi.ERROR_MORE_DATA: - return self._get_more_data(ov, maxsize) - except OSError as e: - if e.winerror == _winapi.ERROR_BROKEN_PIPE: - raise EOFError - else: - raise - raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") - - def _poll(self, timeout): - if (self._got_empty_message or - _winapi.PeekNamedPipe(self._handle)[0] != 0): - return True - return bool(wait([self], timeout)) - - def _get_more_data(self, ov, maxsize): - buf = ov.getbuffer() - f = io.BytesIO() - f.write(buf) - left = _winapi.PeekNamedPipe(self._handle)[1] - assert left > 0 - if maxsize is not None and len(buf) + left > maxsize: - self._bad_message_length() - ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) - rbytes, err = ov.GetOverlappedResult(True) - assert err == 0 - assert rbytes == left - f.write(ov.getbuffer()) - return f - - -class Connection(_ConnectionBase): - """ - Connection class based on an arbitrary file descriptor (Unix only), or - a socket handle (Windows). - """ - - if _winapi: - def _close(self, _close=_multiprocessing.closesocket): - _close(self._handle) - _write = _multiprocessing.send - _read = _multiprocessing.recv - else: - def _close(self, _close=os.close): - _close(self._handle) - _write = os.write - _read = os.read - - def _send(self, buf, write=_write): - remaining = len(buf) - while True: - n = write(self._handle, buf) - remaining -= n - if remaining == 0: - break - buf = buf[n:] - - def _recv(self, size, read=_read): - buf = io.BytesIO() - handle = self._handle - remaining = size - while remaining > 0: - chunk = read(handle, remaining) - n = len(chunk) - if n == 0: - if remaining == size: - raise EOFError - else: - raise OSError("got end of file during message") - buf.write(chunk) - remaining -= n - return buf - - def _send_bytes(self, buf): - n = len(buf) - if n > 0x7fffffff: - pre_header = struct.pack("!i", -1) - header = struct.pack("!Q", n) - self._send(pre_header) - self._send(header) - self._send(buf) - else: - # For wire compatibility with 3.7 and lower - header = struct.pack("!i", n) - if n > 16384: - # The payload is large so Nagle's algorithm won't be triggered - # and we'd better avoid the cost of concatenation. - self._send(header) - self._send(buf) - else: - # Issue #20540: concatenate before sending, to avoid delays due - # to Nagle's algorithm on a TCP socket. - # Also note we want to avoid sending a 0-length buffer separately, - # to avoid "broken pipe" errors if the other end closed the pipe. - self._send(header + buf) - - def _recv_bytes(self, maxsize=None): - buf = self._recv(4) - size, = struct.unpack("!i", buf.getvalue()) - if size == -1: - buf = self._recv(8) - size, = struct.unpack("!Q", buf.getvalue()) - if maxsize is not None and size > maxsize: - return None - return self._recv(size) - - def _poll(self, timeout): - r = wait([self], timeout) - return bool(r) - - -# -# Public functions -# - -class Listener(object): - ''' - Returns a listener object. - - This is a wrapper for a bound socket which is 'listening' for - connections, or for a Windows named pipe. - ''' - def __init__(self, address=None, family=None, backlog=1, authkey=None): - family = family or (address and address_type(address)) \ - or default_family - _validate_family(family) - if authkey is not None and not isinstance(authkey, bytes): - raise TypeError('authkey should be a byte string') - - if family == 'AF_PIPE': - if address: - self._listener = PipeListener(address, backlog) - else: - for attempts in itertools.count(): - address = arbitrary_address(family) - try: - self._listener = PipeListener(address, backlog) - break - except OSError as e: - if attempts >= _MAX_PIPE_ATTEMPTS: - raise - if e.winerror not in (_winapi.ERROR_PIPE_BUSY, - _winapi.ERROR_ACCESS_DENIED): - raise - else: - address = address or arbitrary_address(family) - self._listener = SocketListener(address, family, backlog) - - self._authkey = authkey - - def accept(self): - ''' - Accept a connection on the bound socket or named pipe of `self`. - - Returns a `Connection` object. - ''' - if self._listener is None: - raise OSError('listener is closed') - - c = self._listener.accept() - if self._authkey is not None: - deliver_challenge(c, self._authkey) - answer_challenge(c, self._authkey) - return c - - def close(self): - ''' - Close the bound socket or named pipe of `self`. - ''' - listener = self._listener - if listener is not None: - self._listener = None - listener.close() - - @property - def address(self): - return self._listener._address - - @property - def last_accepted(self): - return self._listener._last_accepted - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - self.close() - - -def Client(address, family=None, authkey=None): - ''' - Returns a connection to the address of a `Listener` - ''' - family = family or address_type(address) - _validate_family(family) - if family == 'AF_PIPE': - c = PipeClient(address) - else: - c = SocketClient(address) - - if authkey is not None and not isinstance(authkey, bytes): - raise TypeError('authkey should be a byte string') - - if authkey is not None: - answer_challenge(c, authkey) - deliver_challenge(c, authkey) - - return c - - -if sys.platform != 'win32': - - def Pipe(duplex=True): - ''' - Returns pair of connection objects at either end of a pipe - ''' - if duplex: - s1, s2 = socket.socketpair() - s1.setblocking(True) - s2.setblocking(True) - c1 = Connection(s1.detach()) - c2 = Connection(s2.detach()) - else: - fd1, fd2 = os.pipe() - c1 = Connection(fd1, writable=False) - c2 = Connection(fd2, readable=False) - - return c1, c2 - -else: - - def Pipe(duplex=True): - ''' - Returns pair of connection objects at either end of a pipe - ''' - if duplex: - openmode = _winapi.PIPE_ACCESS_DUPLEX - access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE - obsize, ibsize = BUFSIZE, BUFSIZE - else: - openmode = _winapi.PIPE_ACCESS_INBOUND - access = _winapi.GENERIC_WRITE - obsize, ibsize = 0, BUFSIZE - - for attempts in itertools.count(): - address = arbitrary_address('AF_PIPE') - try: - h1 = _winapi.CreateNamedPipe( - address, openmode | _winapi.FILE_FLAG_OVERLAPPED | - _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, - _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | - _winapi.PIPE_WAIT, - 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, - # default security descriptor: the handle cannot be inherited - _winapi.NULL - ) - break - except OSError as e: - if attempts >= _MAX_PIPE_ATTEMPTS: - raise - if e.winerror not in (_winapi.ERROR_PIPE_BUSY, - _winapi.ERROR_ACCESS_DENIED): - raise - h2 = _winapi.CreateFile( - address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, - _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL - ) - _winapi.SetNamedPipeHandleState( - h2, _winapi.PIPE_READMODE_MESSAGE, None, None - ) - - overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) - _, err = overlapped.GetOverlappedResult(True) - assert err == 0 - - c1 = PipeConnection(h1, writable=duplex) - c2 = PipeConnection(h2, readable=duplex) - - return c1, c2 - -# -# Definitions for connections based on sockets -# - -class SocketListener(object): - ''' - Representation of a socket which is bound to an address and listening - ''' - def __init__(self, address, family, backlog=1): - self._socket = socket.socket(getattr(socket, family)) - try: - # SO_REUSEADDR has different semantics on Windows (issue #2550). - if os.name == 'posix': - self._socket.setsockopt(socket.SOL_SOCKET, - socket.SO_REUSEADDR, 1) - self._socket.setblocking(True) - self._socket.bind(address) - self._socket.listen(backlog) - self._address = self._socket.getsockname() - except OSError: - self._socket.close() - raise - self._family = family - self._last_accepted = None - - if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): - # Linux abstract socket namespaces do not need to be explicitly unlinked - self._unlink = util.Finalize( - self, os.unlink, args=(address,), exitpriority=0 - ) - else: - self._unlink = None - - def accept(self): - s, self._last_accepted = self._socket.accept() - s.setblocking(True) - return Connection(s.detach()) - - def close(self): - try: - self._socket.close() - finally: - unlink = self._unlink - if unlink is not None: - self._unlink = None - unlink() - - -def SocketClient(address): - ''' - Return a connection object connected to the socket given by `address` - ''' - family = address_type(address) - with socket.socket( getattr(socket, family) ) as s: - s.setblocking(True) - s.connect(address) - return Connection(s.detach()) - -# -# Definitions for connections based on named pipes -# - -if sys.platform == 'win32': - - class PipeListener(object): - ''' - Representation of a named pipe - ''' - def __init__(self, address, backlog=None): - self._address = address - self._handle_queue = [self._new_handle(first=True)] - - self._last_accepted = None - util.sub_debug('listener created with address=%r', self._address) - self.close = util.Finalize( - self, PipeListener._finalize_pipe_listener, - args=(self._handle_queue, self._address), exitpriority=0 - ) - - def _new_handle(self, first=False): - flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED - if first: - flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE - return _winapi.CreateNamedPipe( - self._address, flags, - _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | - _winapi.PIPE_WAIT, - _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, - _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL - ) - - def accept(self): - self._handle_queue.append(self._new_handle()) - handle = self._handle_queue.pop(0) - try: - ov = _winapi.ConnectNamedPipe(handle, overlapped=True) - except OSError as e: - if e.winerror != _winapi.ERROR_NO_DATA: - raise - # ERROR_NO_DATA can occur if a client has already connected, - # written data and then disconnected -- see Issue 14725. - else: - try: - res = _winapi.WaitForMultipleObjects( - [ov.event], False, INFINITE) - except: - ov.cancel() - _winapi.CloseHandle(handle) - raise - finally: - _, err = ov.GetOverlappedResult(True) - assert err == 0 - return PipeConnection(handle) - - @staticmethod - def _finalize_pipe_listener(queue, address): - util.sub_debug('closing listener with address=%r', address) - for handle in queue: - _winapi.CloseHandle(handle) - - def PipeClient(address): - ''' - Return a connection object connected to the pipe given by `address` - ''' - t = _init_timeout() - while 1: - try: - _winapi.WaitNamedPipe(address, 1000) - h = _winapi.CreateFile( - address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, - 0, _winapi.NULL, _winapi.OPEN_EXISTING, - _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL - ) - except OSError as e: - if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, - _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): - raise - else: - break - else: - raise - - _winapi.SetNamedPipeHandleState( - h, _winapi.PIPE_READMODE_MESSAGE, None, None - ) - return PipeConnection(h) - -# -# Authentication stuff -# - -MESSAGE_LENGTH = 40 # MUST be > 20 - -_CHALLENGE = b'#CHALLENGE#' -_WELCOME = b'#WELCOME#' -_FAILURE = b'#FAILURE#' - -# multiprocessing.connection Authentication Handshake Protocol Description -# (as documented for reference after reading the existing code) -# ============================================================================= -# -# On Windows: native pipes with "overlapped IO" are used to send the bytes, -# instead of the length prefix SIZE scheme described below. (ie: the OS deals -# with message sizes for us) -# -# Protocol error behaviors: -# -# On POSIX, any failure to receive the length prefix into SIZE, for SIZE greater -# than the requested maxsize to receive, or receiving fewer than SIZE bytes -# results in the connection being closed and auth to fail. -# -# On Windows, receiving too few bytes is never a low level _recv_bytes read -# error, receiving too many will trigger an error only if receive maxsize -# value was larger than 128 OR the if the data arrived in smaller pieces. -# -# Serving side Client side -# ------------------------------ --------------------------------------- -# 0. Open a connection on the pipe. -# 1. Accept connection. -# 2. Random 20+ bytes -> MESSAGE -# Modern servers always send -# more than 20 bytes and include -# a {digest} prefix on it with -# their preferred HMAC digest. -# Legacy ones send ==20 bytes. -# 3. send 4 byte length (net order) -# prefix followed by: -# b'#CHALLENGE#' + MESSAGE -# 4. Receive 4 bytes, parse as network byte -# order integer. If it is -1, receive an -# additional 8 bytes, parse that as network -# byte order. The result is the length of -# the data that follows -> SIZE. -# 5. Receive min(SIZE, 256) bytes -> M1 -# 6. Assert that M1 starts with: -# b'#CHALLENGE#' -# 7. Strip that prefix from M1 into -> M2 -# 7.1. Parse M2: if it is exactly 20 bytes in -# length this indicates a legacy server -# supporting only HMAC-MD5. Otherwise the -# 7.2. preferred digest is looked up from an -# expected "{digest}" prefix on M2. No prefix -# or unsupported digest? <- AuthenticationError -# 7.3. Put divined algorithm name in -> D_NAME -# 8. Compute HMAC-D_NAME of AUTHKEY, M2 -> C_DIGEST -# 9. Send 4 byte length prefix (net order) -# followed by C_DIGEST bytes. -# 10. Receive 4 or 4+8 byte length -# prefix (#4 dance) -> SIZE. -# 11. Receive min(SIZE, 256) -> C_D. -# 11.1. Parse C_D: legacy servers -# accept it as is, "md5" -> D_NAME -# 11.2. modern servers check the length -# of C_D, IF it is 16 bytes? -# 11.2.1. "md5" -> D_NAME -# and skip to step 12. -# 11.3. longer? expect and parse a "{digest}" -# prefix into -> D_NAME. -# Strip the prefix and store remaining -# bytes in -> C_D. -# 11.4. Don't like D_NAME? <- AuthenticationError -# 12. Compute HMAC-D_NAME of AUTHKEY, -# MESSAGE into -> M_DIGEST. -# 13. Compare M_DIGEST == C_D: -# 14a: Match? Send length prefix & -# b'#WELCOME#' -# <- RETURN -# 14b: Mismatch? Send len prefix & -# b'#FAILURE#' -# <- CLOSE & AuthenticationError -# 15. Receive 4 or 4+8 byte length prefix (net -# order) again as in #4 into -> SIZE. -# 16. Receive min(SIZE, 256) bytes -> M3. -# 17. Compare M3 == b'#WELCOME#': -# 17a. Match? <- RETURN -# 17b. Mismatch? <- CLOSE & AuthenticationError -# -# If this RETURNed, the connection remains open: it has been authenticated. -# -# Length prefixes are used consistently. Even on the legacy protocol, this -# was good fortune and allowed us to evolve the protocol by using the length -# of the opening challenge or length of the returned digest as a signal as -# to which protocol the other end supports. - -_ALLOWED_DIGESTS = frozenset( - {b'md5', b'sha256', b'sha384', b'sha3_256', b'sha3_384'}) -_MAX_DIGEST_LEN = max(len(_) for _ in _ALLOWED_DIGESTS) - -# Old hmac-md5 only server versions from Python <=3.11 sent a message of this -# length. It happens to not match the length of any supported digest so we can -# use a message of this length to indicate that we should work in backwards -# compatible md5-only mode without a {digest_name} prefix on our response. -_MD5ONLY_MESSAGE_LENGTH = 20 -_MD5_DIGEST_LEN = 16 -_LEGACY_LENGTHS = (_MD5ONLY_MESSAGE_LENGTH, _MD5_DIGEST_LEN) - - -def _get_digest_name_and_payload(message): # type: (bytes) -> tuple[str, bytes] - """Returns a digest name and the payload for a response hash. - - If a legacy protocol is detected based on the message length - or contents the digest name returned will be empty to indicate - legacy mode where MD5 and no digest prefix should be sent. - """ - # modern message format: b"{digest}payload" longer than 20 bytes - # legacy message format: 16 or 20 byte b"payload" - if len(message) in _LEGACY_LENGTHS: - # Either this was a legacy server challenge, or we're processing - # a reply from a legacy client that sent an unprefixed 16-byte - # HMAC-MD5 response. All messages using the modern protocol will - # be longer than either of these lengths. - return '', message - if (message.startswith(b'{') and - (curly := message.find(b'}', 1, _MAX_DIGEST_LEN+2)) > 0): - digest = message[1:curly] - if digest in _ALLOWED_DIGESTS: - payload = message[curly+1:] - return digest.decode('ascii'), payload - raise AuthenticationError( - 'unsupported message length, missing digest prefix, ' - f'or unsupported digest: {message=}') - - -def _create_response(authkey, message): - """Create a MAC based on authkey and message - - The MAC algorithm defaults to HMAC-MD5, unless MD5 is not available or - the message has a '{digest_name}' prefix. For legacy HMAC-MD5, the response - is the raw MAC, otherwise the response is prefixed with '{digest_name}', - e.g. b'{sha256}abcdefg...' - - Note: The MAC protects the entire message including the digest_name prefix. - """ - import hmac - digest_name = _get_digest_name_and_payload(message)[0] - # The MAC protects the entire message: digest header and payload. - if not digest_name: - # Legacy server without a {digest} prefix on message. - # Generate a legacy non-prefixed HMAC-MD5 reply. - try: - return hmac.new(authkey, message, 'md5').digest() - except ValueError: - # HMAC-MD5 is not available (FIPS mode?), fall back to - # HMAC-SHA2-256 modern protocol. The legacy server probably - # doesn't support it and will reject us anyways. :shrug: - digest_name = 'sha256' - # Modern protocol, indicate the digest used in the reply. - response = hmac.new(authkey, message, digest_name).digest() - return b'{%s}%s' % (digest_name.encode('ascii'), response) - - -def _verify_challenge(authkey, message, response): - """Verify MAC challenge - - If our message did not include a digest_name prefix, the client is allowed - to select a stronger digest_name from _ALLOWED_DIGESTS. - - In case our message is prefixed, a client cannot downgrade to a weaker - algorithm, because the MAC is calculated over the entire message - including the '{digest_name}' prefix. - """ - import hmac - response_digest, response_mac = _get_digest_name_and_payload(response) - response_digest = response_digest or 'md5' - try: - expected = hmac.new(authkey, message, response_digest).digest() - except ValueError: - raise AuthenticationError(f'{response_digest=} unsupported') - if len(expected) != len(response_mac): - raise AuthenticationError( - f'expected {response_digest!r} of length {len(expected)} ' - f'got {len(response_mac)}') - if not hmac.compare_digest(expected, response_mac): - raise AuthenticationError('digest received was wrong') - - -def deliver_challenge(connection, authkey: bytes, digest_name='sha256'): - if not isinstance(authkey, bytes): - raise ValueError( - "Authkey must be bytes, not {0!s}".format(type(authkey))) - assert MESSAGE_LENGTH > _MD5ONLY_MESSAGE_LENGTH, "protocol constraint" - message = os.urandom(MESSAGE_LENGTH) - message = b'{%s}%s' % (digest_name.encode('ascii'), message) - # Even when sending a challenge to a legacy client that does not support - # digest prefixes, they'll take the entire thing as a challenge and - # respond to it with a raw HMAC-MD5. - connection.send_bytes(_CHALLENGE + message) - response = connection.recv_bytes(256) # reject large message - try: - _verify_challenge(authkey, message, response) - except AuthenticationError: - connection.send_bytes(_FAILURE) - raise - else: - connection.send_bytes(_WELCOME) - - -def answer_challenge(connection, authkey: bytes): - if not isinstance(authkey, bytes): - raise ValueError( - "Authkey must be bytes, not {0!s}".format(type(authkey))) - message = connection.recv_bytes(256) # reject large message - if not message.startswith(_CHALLENGE): - raise AuthenticationError( - f'Protocol error, expected challenge: {message=}') - message = message[len(_CHALLENGE):] - if len(message) < _MD5ONLY_MESSAGE_LENGTH: - raise AuthenticationError(f'challenge too short: {len(message)} bytes') - digest = _create_response(authkey, message) - connection.send_bytes(digest) - response = connection.recv_bytes(256) # reject large message - if response != _WELCOME: - raise AuthenticationError('digest sent was rejected') - -# -# Support for using xmlrpclib for serialization -# - -class ConnectionWrapper(object): - def __init__(self, conn, dumps, loads): - self._conn = conn - self._dumps = dumps - self._loads = loads - for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): - obj = getattr(conn, attr) - setattr(self, attr, obj) - def send(self, obj): - s = self._dumps(obj) - self._conn.send_bytes(s) - def recv(self): - s = self._conn.recv_bytes() - return self._loads(s) - -def _xml_dumps(obj): - return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') - -def _xml_loads(s): - (obj,), method = xmlrpclib.loads(s.decode('utf-8')) - return obj - -class XmlListener(Listener): - def accept(self): - global xmlrpclib - import xmlrpc.client as xmlrpclib - obj = Listener.accept(self) - return ConnectionWrapper(obj, _xml_dumps, _xml_loads) - -def XmlClient(*args, **kwds): - global xmlrpclib - import xmlrpc.client as xmlrpclib - return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) - -# -# Wait -# - -if sys.platform == 'win32': - - def _exhaustive_wait(handles, timeout): - # Return ALL handles which are currently signalled. (Only - # returning the first signalled might create starvation issues.) - L = list(handles) - ready = [] - # Windows limits WaitForMultipleObjects at 64 handles, and we use a - # few for synchronisation, so we switch to batched waits at 60. - if len(L) > 60: - try: - res = _winapi.BatchedWaitForMultipleObjects(L, False, timeout) - except TimeoutError: - return [] - ready.extend(L[i] for i in res) - if res: - L = [h for i, h in enumerate(L) if i > res[0] & i not in res] - timeout = 0 - while L: - short_L = L[:60] if len(L) > 60 else L - res = _winapi.WaitForMultipleObjects(short_L, False, timeout) - if res == WAIT_TIMEOUT: - break - elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): - res -= WAIT_OBJECT_0 - elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): - res -= WAIT_ABANDONED_0 - else: - raise RuntimeError('Should not get here') - ready.append(L[res]) - L = L[res+1:] - timeout = 0 - return ready - - _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} - - def wait(object_list, timeout=None): - ''' - Wait till an object in object_list is ready/readable. - - Returns list of those objects in object_list which are ready/readable. - ''' - if timeout is None: - timeout = INFINITE - elif timeout < 0: - timeout = 0 - else: - timeout = int(timeout * 1000 + 0.5) - - object_list = list(object_list) - waithandle_to_obj = {} - ov_list = [] - ready_objects = set() - ready_handles = set() - - try: - for o in object_list: - try: - fileno = getattr(o, 'fileno') - except AttributeError: - waithandle_to_obj[o.__index__()] = o - else: - # start an overlapped read of length zero - try: - ov, err = _winapi.ReadFile(fileno(), 0, True) - except OSError as e: - ov, err = None, e.winerror - if err not in _ready_errors: - raise - if err == _winapi.ERROR_IO_PENDING: - ov_list.append(ov) - waithandle_to_obj[ov.event] = o - else: - # If o.fileno() is an overlapped pipe handle and - # err == 0 then there is a zero length message - # in the pipe, but it HAS NOT been consumed... - if ov and sys.getwindowsversion()[:2] >= (6, 2): - # ... except on Windows 8 and later, where - # the message HAS been consumed. - try: - _, err = ov.GetOverlappedResult(False) - except OSError as e: - err = e.winerror - if not err and hasattr(o, '_got_empty_message'): - o._got_empty_message = True - ready_objects.add(o) - timeout = 0 - - ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) - finally: - # request that overlapped reads stop - for ov in ov_list: - ov.cancel() - - # wait for all overlapped reads to stop - for ov in ov_list: - try: - _, err = ov.GetOverlappedResult(True) - except OSError as e: - err = e.winerror - if err not in _ready_errors: - raise - if err != _winapi.ERROR_OPERATION_ABORTED: - o = waithandle_to_obj[ov.event] - ready_objects.add(o) - if err == 0: - # If o.fileno() is an overlapped pipe handle then - # a zero length message HAS been consumed. - if hasattr(o, '_got_empty_message'): - o._got_empty_message = True - - ready_objects.update(waithandle_to_obj[h] for h in ready_handles) - return [o for o in object_list if o in ready_objects] - -else: - - import selectors - - # poll/select have the advantage of not requiring any extra file - # descriptor, contrarily to epoll/kqueue (also, they require a single - # syscall). - if hasattr(selectors, 'PollSelector'): - _WaitSelector = selectors.PollSelector - else: - _WaitSelector = selectors.SelectSelector - - def wait(object_list, timeout=None): - ''' - Wait till an object in object_list is ready/readable. - - Returns list of those objects in object_list which are ready/readable. - ''' - with _WaitSelector() as selector: - for obj in object_list: - selector.register(obj, selectors.EVENT_READ) - - if timeout is not None: - deadline = time.monotonic() + timeout - - while True: - ready = selector.select(timeout) - if ready: - return [key.fileobj for (key, events) in ready] - else: - if timeout is not None: - timeout = deadline - time.monotonic() - if timeout < 0: - return ready - -# -# Make connection and socket objects shareable if possible -# - -if sys.platform == 'win32': - def reduce_connection(conn): - handle = conn.fileno() - with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: - from . import resource_sharer - ds = resource_sharer.DupSocket(s) - return rebuild_connection, (ds, conn.readable, conn.writable) - def rebuild_connection(ds, readable, writable): - sock = ds.detach() - return Connection(sock.detach(), readable, writable) - reduction.register(Connection, reduce_connection) - - def reduce_pipe_connection(conn): - access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | - (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) - dh = reduction.DupHandle(conn.fileno(), access) - return rebuild_pipe_connection, (dh, conn.readable, conn.writable) - def rebuild_pipe_connection(dh, readable, writable): - handle = dh.detach() - return PipeConnection(handle, readable, writable) - reduction.register(PipeConnection, reduce_pipe_connection) - -else: - def reduce_connection(conn): - df = reduction.DupFd(conn.fileno()) - return rebuild_connection, (df, conn.readable, conn.writable) - def rebuild_connection(df, readable, writable): - fd = df.detach() - return Connection(fd, readable, writable) - reduction.register(Connection, reduce_connection) diff --git a/Python313_13_x86_Template/Lib/multiprocessing/context.py b/Python313_13_x86_Template/Lib/multiprocessing/context.py deleted file mode 100644 index 07c8a5d1..00000000 --- a/Python313_13_x86_Template/Lib/multiprocessing/context.py +++ /dev/null @@ -1,383 +0,0 @@ -import os -import sys -import threading - -from . import process -from . import reduction - -__all__ = () - -# -# Exceptions -# - -class ProcessError(Exception): - pass - -class BufferTooShort(ProcessError): - pass - -class TimeoutError(ProcessError): - pass - -class AuthenticationError(ProcessError): - pass - -# -# Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py -# - -class BaseContext(object): - - ProcessError = ProcessError - BufferTooShort = BufferTooShort - TimeoutError = TimeoutError - AuthenticationError = AuthenticationError - - current_process = staticmethod(process.current_process) - parent_process = staticmethod(process.parent_process) - active_children = staticmethod(process.active_children) - - def cpu_count(self): - '''Returns the number of CPUs in the system''' - num = os.cpu_count() - if num is None: - raise NotImplementedError('cannot determine number of cpus') - else: - return num - - def Manager(self): - '''Returns a manager associated with a running server process - - The managers methods such as `Lock()`, `Condition()` and `Queue()` - can be used to create shared objects. - ''' - from .managers import SyncManager - m = SyncManager(ctx=self.get_context()) - m.start() - return m - - def Pipe(self, duplex=True): - '''Returns two connection object connected by a pipe''' - from .connection import Pipe - return Pipe(duplex) - - def Lock(self): - '''Returns a non-recursive lock object''' - from .synchronize import Lock - return Lock(ctx=self.get_context()) - - def RLock(self): - '''Returns a recursive lock object''' - from .synchronize import RLock - return RLock(ctx=self.get_context()) - - def Condition(self, lock=None): - '''Returns a condition object''' - from .synchronize import Condition - return Condition(lock, ctx=self.get_context()) - - def Semaphore(self, value=1): - '''Returns a semaphore object''' - from .synchronize import Semaphore - return Semaphore(value, ctx=self.get_context()) - - def BoundedSemaphore(self, value=1): - '''Returns a bounded semaphore object''' - from .synchronize import BoundedSemaphore - return BoundedSemaphore(value, ctx=self.get_context()) - - def Event(self): - '''Returns an event object''' - from .synchronize import Event - return Event(ctx=self.get_context()) - - def Barrier(self, parties, action=None, timeout=None): - '''Returns a barrier object''' - from .synchronize import Barrier - return Barrier(parties, action, timeout, ctx=self.get_context()) - - def Queue(self, maxsize=0): - '''Returns a queue object''' - from .queues import Queue - return Queue(maxsize, ctx=self.get_context()) - - def JoinableQueue(self, maxsize=0): - '''Returns a queue object''' - from .queues import JoinableQueue - return JoinableQueue(maxsize, ctx=self.get_context()) - - def SimpleQueue(self): - '''Returns a queue object''' - from .queues import SimpleQueue - return SimpleQueue(ctx=self.get_context()) - - def Pool(self, processes=None, initializer=None, initargs=(), - maxtasksperchild=None): - '''Returns a process pool object''' - from .pool import Pool - return Pool(processes, initializer, initargs, maxtasksperchild, - context=self.get_context()) - - def RawValue(self, typecode_or_type, *args): - '''Returns a shared object''' - from .sharedctypes import RawValue - return RawValue(typecode_or_type, *args) - - def RawArray(self, typecode_or_type, size_or_initializer): - '''Returns a shared array''' - from .sharedctypes import RawArray - return RawArray(typecode_or_type, size_or_initializer) - - def Value(self, typecode_or_type, *args, lock=True): - '''Returns a synchronized shared object''' - from .sharedctypes import Value - return Value(typecode_or_type, *args, lock=lock, - ctx=self.get_context()) - - def Array(self, typecode_or_type, size_or_initializer, *, lock=True): - '''Returns a synchronized shared array''' - from .sharedctypes import Array - return Array(typecode_or_type, size_or_initializer, lock=lock, - ctx=self.get_context()) - - def freeze_support(self): - '''Check whether this is a fake forked process in a frozen executable. - If so then run code specified by commandline and exit. - ''' - # gh-140814: allow_none=True avoids locking in the default start - # method, which would cause a later set_start_method() to fail. - # None is safe to pass through: spawn.freeze_support() - # independently detects whether this process is a spawned - # child, so the start method check here is only an optimization. - if (getattr(sys, 'frozen', False) - and self.get_start_method(allow_none=True) in ('spawn', None)): - from .spawn import freeze_support - freeze_support() - - def get_logger(self): - '''Return package logger -- if it does not already exist then - it is created. - ''' - from .util import get_logger - return get_logger() - - def log_to_stderr(self, level=None): - '''Turn on logging and add a handler which prints to stderr''' - from .util import log_to_stderr - return log_to_stderr(level) - - def allow_connection_pickling(self): - '''Install support for sending connections and sockets - between processes - ''' - # This is undocumented. In previous versions of multiprocessing - # its only effect was to make socket objects inheritable on Windows. - from . import connection - - def set_executable(self, executable): - '''Sets the path to a python.exe or pythonw.exe binary used to run - child processes instead of sys.executable when using the 'spawn' - start method. Useful for people embedding Python. - ''' - from .spawn import set_executable - set_executable(executable) - - def set_forkserver_preload(self, module_names): - '''Set list of module names to try to load in forkserver process. - This is really just a hint. - ''' - from .forkserver import set_forkserver_preload - set_forkserver_preload(module_names) - - def get_context(self, method=None): - if method is None: - return self - try: - ctx = _concrete_contexts[method] - except KeyError: - raise ValueError('cannot find context for %r' % method) from None - ctx._check_available() - return ctx - - def get_start_method(self, allow_none=False): - return self._name - - def set_start_method(self, method, force=False): - raise ValueError('cannot set start method of concrete context') - - @property - def reducer(self): - '''Controls how objects will be reduced to a form that can be - shared with other processes.''' - return globals().get('reduction') - - @reducer.setter - def reducer(self, reduction): - globals()['reduction'] = reduction - - def _check_available(self): - pass - -# -# Type of default context -- underlying context can be set at most once -# - -class Process(process.BaseProcess): - _start_method = None - @staticmethod - def _Popen(process_obj): - return _default_context.get_context().Process._Popen(process_obj) - - @staticmethod - def _after_fork(): - return _default_context.get_context().Process._after_fork() - -class DefaultContext(BaseContext): - Process = Process - - def __init__(self, context): - self._default_context = context - self._actual_context = None - - def get_context(self, method=None): - if method is None: - if self._actual_context is None: - self._actual_context = self._default_context - return self._actual_context - else: - return super().get_context(method) - - def set_start_method(self, method, force=False): - if self._actual_context is not None and not force: - raise RuntimeError('context has already been set') - if method is None and force: - self._actual_context = None - return - self._actual_context = self.get_context(method) - - def get_start_method(self, allow_none=False): - if self._actual_context is None: - if allow_none: - return None - self._actual_context = self._default_context - return self._actual_context._name - - def get_all_start_methods(self): - """Returns a list of the supported start methods, default first.""" - if sys.platform == 'win32': - return ['spawn'] - else: - methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] - if reduction.HAVE_SEND_HANDLE: - methods.append('forkserver') - return methods - - -# -# Context types for fixed start method -# - -if sys.platform != 'win32': - - class ForkProcess(process.BaseProcess): - _start_method = 'fork' - @staticmethod - def _Popen(process_obj): - from .popen_fork import Popen - return Popen(process_obj) - - class SpawnProcess(process.BaseProcess): - _start_method = 'spawn' - @staticmethod - def _Popen(process_obj): - from .popen_spawn_posix import Popen - return Popen(process_obj) - - @staticmethod - def _after_fork(): - # process is spawned, nothing to do - pass - - class ForkServerProcess(process.BaseProcess): - _start_method = 'forkserver' - @staticmethod - def _Popen(process_obj): - from .popen_forkserver import Popen - return Popen(process_obj) - - class ForkContext(BaseContext): - _name = 'fork' - Process = ForkProcess - - class SpawnContext(BaseContext): - _name = 'spawn' - Process = SpawnProcess - - class ForkServerContext(BaseContext): - _name = 'forkserver' - Process = ForkServerProcess - def _check_available(self): - if not reduction.HAVE_SEND_HANDLE: - raise ValueError('forkserver start method not available') - - _concrete_contexts = { - 'fork': ForkContext(), - 'spawn': SpawnContext(), - 'forkserver': ForkServerContext(), - } - if sys.platform == 'darwin': - # bpo-33725: running arbitrary code after fork() is no longer reliable - # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. - _default_context = DefaultContext(_concrete_contexts['spawn']) - else: - _default_context = DefaultContext(_concrete_contexts['fork']) - -else: - - class SpawnProcess(process.BaseProcess): - _start_method = 'spawn' - @staticmethod - def _Popen(process_obj): - from .popen_spawn_win32 import Popen - return Popen(process_obj) - - @staticmethod - def _after_fork(): - # process is spawned, nothing to do - pass - - class SpawnContext(BaseContext): - _name = 'spawn' - Process = SpawnProcess - - _concrete_contexts = { - 'spawn': SpawnContext(), - } - _default_context = DefaultContext(_concrete_contexts['spawn']) - -# -# Force the start method -# - -def _force_start_method(method): - _default_context._actual_context = _concrete_contexts[method] - -# -# Check that the current thread is spawning a child process -# - -_tls = threading.local() - -def get_spawning_popen(): - return getattr(_tls, 'spawning_popen', None) - -def set_spawning_popen(popen): - _tls.spawning_popen = popen - -def assert_spawning(obj): - if get_spawning_popen() is None: - raise RuntimeError( - '%s objects should only be shared between processes' - ' through inheritance' % type(obj).__name__ - ) diff --git a/Python313_13_x86_Template/Lib/multiprocessing/dummy/__init__.py b/Python313_13_x86_Template/Lib/multiprocessing/dummy/__init__.py deleted file mode 100644 index 6a146860..00000000 --- a/Python313_13_x86_Template/Lib/multiprocessing/dummy/__init__.py +++ /dev/null @@ -1,126 +0,0 @@ -# -# Support for the API of the multiprocessing package using threads -# -# multiprocessing/dummy/__init__.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -__all__ = [ - 'Process', 'current_process', 'active_children', 'freeze_support', - 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', - 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' - ] - -# -# Imports -# - -import threading -import sys -import weakref -import array - -from .connection import Pipe -from threading import Lock, RLock, Semaphore, BoundedSemaphore -from threading import Event, Condition, Barrier -from queue import Queue - -# -# -# - -class DummyProcess(threading.Thread): - - def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): - threading.Thread.__init__(self, group, target, name, args, kwargs) - self._pid = None - self._children = weakref.WeakKeyDictionary() - self._start_called = False - self._parent = current_process() - - def start(self): - if self._parent is not current_process(): - raise RuntimeError( - "Parent is {0!r} but current_process is {1!r}".format( - self._parent, current_process())) - self._start_called = True - if hasattr(self._parent, '_children'): - self._parent._children[self] = None - threading.Thread.start(self) - - @property - def exitcode(self): - if self._start_called and not self.is_alive(): - return 0 - else: - return None - -# -# -# - -Process = DummyProcess -current_process = threading.current_thread -current_process()._children = weakref.WeakKeyDictionary() - -def active_children(): - children = current_process()._children - for p in list(children): - if not p.is_alive(): - children.pop(p, None) - return list(children) - -def freeze_support(): - pass - -# -# -# - -class Namespace(object): - def __init__(self, /, **kwds): - self.__dict__.update(kwds) - def __repr__(self): - items = list(self.__dict__.items()) - temp = [] - for name, value in items: - if not name.startswith('_'): - temp.append('%s=%r' % (name, value)) - temp.sort() - return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) - -dict = dict -list = list - -def Array(typecode, sequence, lock=True): - return array.array(typecode, sequence) - -class Value(object): - def __init__(self, typecode, value, lock=True): - self._typecode = typecode - self._value = value - - @property - def value(self): - return self._value - - @value.setter - def value(self, value): - self._value = value - - def __repr__(self): - return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) - -def Manager(): - return sys.modules[__name__] - -def shutdown(): - pass - -def Pool(processes=None, initializer=None, initargs=()): - from ..pool import ThreadPool - return ThreadPool(processes, initializer, initargs) - -JoinableQueue = Queue diff --git a/Python313_13_x86_Template/Lib/multiprocessing/forkserver.py b/Python313_13_x86_Template/Lib/multiprocessing/forkserver.py deleted file mode 100644 index 5eacb53f..00000000 --- a/Python313_13_x86_Template/Lib/multiprocessing/forkserver.py +++ /dev/null @@ -1,373 +0,0 @@ -import atexit -import errno -import os -import selectors -import signal -import socket -import struct -import sys -import threading -import warnings - -from . import connection -from . import process -from .context import reduction -from . import resource_tracker -from . import spawn -from . import util - -__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', - 'set_forkserver_preload'] - -# -# -# - -MAXFDS_TO_SEND = 256 -SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t - -# -# Forkserver class -# - -class ForkServer(object): - - def __init__(self): - self._forkserver_address = None - self._forkserver_alive_fd = None - self._forkserver_pid = None - self._inherited_fds = None - self._lock = threading.Lock() - self._preload_modules = ['__main__'] - - def _stop(self): - # Method used by unit tests to stop the server - with self._lock: - self._stop_unlocked() - - def _stop_unlocked(self): - if self._forkserver_pid is None: - return - - # close the "alive" file descriptor asks the server to stop - os.close(self._forkserver_alive_fd) - self._forkserver_alive_fd = None - - os.waitpid(self._forkserver_pid, 0) - self._forkserver_pid = None - - if not util.is_abstract_socket_namespace(self._forkserver_address): - os.unlink(self._forkserver_address) - self._forkserver_address = None - - def set_forkserver_preload(self, modules_names): - '''Set list of module names to try to load in forkserver process.''' - if not all(type(mod) is str for mod in modules_names): - raise TypeError('module_names must be a list of strings') - self._preload_modules = modules_names - - def get_inherited_fds(self): - '''Return list of fds inherited from parent process. - - This returns None if the current process was not started by fork - server. - ''' - return self._inherited_fds - - def connect_to_new_process(self, fds): - '''Request forkserver to create a child process. - - Returns a pair of fds (status_r, data_w). The calling process can read - the child process's pid and (eventually) its returncode from status_r. - The calling process should write to data_w the pickled preparation and - process data. - ''' - self.ensure_running() - if len(fds) + 4 >= MAXFDS_TO_SEND: - raise ValueError('too many fds') - with socket.socket(socket.AF_UNIX) as client: - client.connect(self._forkserver_address) - parent_r, child_w = os.pipe() - child_r, parent_w = os.pipe() - allfds = [child_r, child_w, self._forkserver_alive_fd, - resource_tracker.getfd()] - allfds += fds - try: - reduction.sendfds(client, allfds) - return parent_r, parent_w - except: - os.close(parent_r) - os.close(parent_w) - raise - finally: - os.close(child_r) - os.close(child_w) - - def ensure_running(self): - '''Make sure that a fork server is running. - - This can be called from any process. Note that usually a child - process will just reuse the forkserver started by its parent, so - ensure_running() will do nothing. - ''' - with self._lock: - resource_tracker.ensure_running() - if self._forkserver_pid is not None: - # forkserver was launched before, is it still running? - pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) - if not pid: - # still alive - return - # dead, launch it again - os.close(self._forkserver_alive_fd) - self._forkserver_address = None - self._forkserver_alive_fd = None - self._forkserver_pid = None - - # gh-144503: sys_argv is passed as real argv elements after the - # ``-c cmd`` rather than repr'd into main_kws so that a large - # parent sys.argv cannot push the single ``-c`` command string - # over the OS per-argument length limit (MAX_ARG_STRLEN on Linux). - # The child sees them as sys.argv[1:]. - cmd = ('import sys; ' - 'from multiprocessing.forkserver import main; ' - 'main(%d, %d, %r, sys_argv=sys.argv[1:], **%r)') - - main_kws = {} - sys_argv = None - if self._preload_modules: - data = spawn.get_preparation_data('ignore') - if 'sys_path' in data: - main_kws['sys_path'] = data['sys_path'] - if 'init_main_from_path' in data: - main_kws['main_path'] = data['init_main_from_path'] - if 'sys_argv' in data: - sys_argv = data['sys_argv'] - - with socket.socket(socket.AF_UNIX) as listener: - address = connection.arbitrary_address('AF_UNIX') - listener.bind(address) - if not util.is_abstract_socket_namespace(address): - os.chmod(address, 0o600) - listener.listen() - - # all client processes own the write end of the "alive" pipe; - # when they all terminate the read end becomes ready. - alive_r, alive_w = os.pipe() - try: - fds_to_pass = [listener.fileno(), alive_r] - cmd %= (listener.fileno(), alive_r, self._preload_modules, - main_kws) - exe = spawn.get_executable() - args = [exe] + util._args_from_interpreter_flags() - args += ['-c', cmd] - if sys_argv is not None: - args += sys_argv - pid = util.spawnv_passfds(exe, args, fds_to_pass) - except: - os.close(alive_w) - raise - finally: - os.close(alive_r) - self._forkserver_address = address - self._forkserver_alive_fd = alive_w - self._forkserver_pid = pid - -# -# -# - -def main(listener_fd, alive_r, preload, main_path=None, sys_path=None, - *, sys_argv=None): - '''Run forkserver.''' - if preload: - if sys_argv is not None: - sys.argv[:] = sys_argv - if sys_path is not None: - sys.path[:] = sys_path - if '__main__' in preload and main_path is not None: - process.current_process()._inheriting = True - try: - spawn.import_main_path(main_path) - finally: - del process.current_process()._inheriting - for modname in preload: - try: - __import__(modname) - except ImportError: - pass - - # gh-135335: flush stdout/stderr in case any of the preloaded modules - # wrote to them, otherwise children might inherit buffered data - util._flush_std_streams() - - util._close_stdin() - - sig_r, sig_w = os.pipe() - os.set_blocking(sig_r, False) - os.set_blocking(sig_w, False) - - def sigchld_handler(*_unused): - # Dummy signal handler, doesn't do anything - pass - - handlers = { - # unblocking SIGCHLD allows the wakeup fd to notify our event loop - signal.SIGCHLD: sigchld_handler, - # protect the process from ^C - signal.SIGINT: signal.SIG_IGN, - } - old_handlers = {sig: signal.signal(sig, val) - for (sig, val) in handlers.items()} - - # calling os.write() in the Python signal handler is racy - signal.set_wakeup_fd(sig_w) - - # map child pids to client fds - pid_to_fd = {} - - with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ - selectors.DefaultSelector() as selector: - _forkserver._forkserver_address = listener.getsockname() - - selector.register(listener, selectors.EVENT_READ) - selector.register(alive_r, selectors.EVENT_READ) - selector.register(sig_r, selectors.EVENT_READ) - - while True: - try: - while True: - rfds = [key.fileobj for (key, events) in selector.select()] - if rfds: - break - - if alive_r in rfds: - # EOF because no more client processes left - assert os.read(alive_r, 1) == b'', "Not at EOF?" - raise SystemExit - - if sig_r in rfds: - # Got SIGCHLD - os.read(sig_r, 65536) # exhaust - while True: - # Scan for child processes - try: - pid, sts = os.waitpid(-1, os.WNOHANG) - except ChildProcessError: - break - if pid == 0: - break - child_w = pid_to_fd.pop(pid, None) - if child_w is not None: - returncode = os.waitstatus_to_exitcode(sts) - - # Send exit code to client process - try: - write_signed(child_w, returncode) - except BrokenPipeError: - # client vanished - pass - os.close(child_w) - else: - # This shouldn't happen really - warnings.warn('forkserver: waitpid returned ' - 'unexpected pid %d' % pid) - - if listener in rfds: - # Incoming fork request - with listener.accept()[0] as s: - # Receive fds from client - fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) - if len(fds) > MAXFDS_TO_SEND: - raise RuntimeError( - "Too many ({0:n}) fds to send".format( - len(fds))) - child_r, child_w, *fds = fds - s.close() - pid = os.fork() - if pid == 0: - # Child - code = 1 - try: - listener.close() - selector.close() - unused_fds = [alive_r, child_w, sig_r, sig_w] - unused_fds.extend(pid_to_fd.values()) - atexit._clear() - atexit.register(util._exit_function) - code = _serve_one(child_r, fds, - unused_fds, - old_handlers) - except Exception: - sys.excepthook(*sys.exc_info()) - sys.stderr.flush() - finally: - atexit._run_exitfuncs() - os._exit(code) - else: - # Send pid to client process - try: - write_signed(child_w, pid) - except BrokenPipeError: - # client vanished - pass - pid_to_fd[pid] = child_w - os.close(child_r) - for fd in fds: - os.close(fd) - - except OSError as e: - if e.errno != errno.ECONNABORTED: - raise - - -def _serve_one(child_r, fds, unused_fds, handlers): - # close unnecessary stuff and reset signal handlers - signal.set_wakeup_fd(-1) - for sig, val in handlers.items(): - signal.signal(sig, val) - for fd in unused_fds: - os.close(fd) - - (_forkserver._forkserver_alive_fd, - resource_tracker._resource_tracker._fd, - *_forkserver._inherited_fds) = fds - - # Run process object received over pipe - parent_sentinel = os.dup(child_r) - code = spawn._main(child_r, parent_sentinel) - - return code - - -# -# Read and write signed numbers -# - -def read_signed(fd): - data = b'' - length = SIGNED_STRUCT.size - while len(data) < length: - s = os.read(fd, length - len(data)) - if not s: - raise EOFError('unexpected EOF') - data += s - return SIGNED_STRUCT.unpack(data)[0] - -def write_signed(fd, n): - msg = SIGNED_STRUCT.pack(n) - while msg: - nbytes = os.write(fd, msg) - if nbytes == 0: - raise RuntimeError('should not get here') - msg = msg[nbytes:] - -# -# -# - -_forkserver = ForkServer() -ensure_running = _forkserver.ensure_running -get_inherited_fds = _forkserver.get_inherited_fds -connect_to_new_process = _forkserver.connect_to_new_process -set_forkserver_preload = _forkserver.set_forkserver_preload diff --git a/Python313_13_x86_Template/Lib/multiprocessing/managers.py b/Python313_13_x86_Template/Lib/multiprocessing/managers.py deleted file mode 100644 index ef791c27..00000000 --- a/Python313_13_x86_Template/Lib/multiprocessing/managers.py +++ /dev/null @@ -1,1397 +0,0 @@ -# -# Module providing manager classes for dealing -# with shared objects -# -# multiprocessing/managers.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] - -# -# Imports -# - -import sys -import threading -import signal -import array -import queue -import time -import types -import os -from os import getpid - -from traceback import format_exc - -from . import connection -from .context import reduction, get_spawning_popen, ProcessError -from . import pool -from . import process -from . import util -from . import get_context -try: - from . import shared_memory -except ImportError: - HAS_SHMEM = False -else: - HAS_SHMEM = True - __all__.append('SharedMemoryManager') - -# -# Register some things for pickling -# - -def reduce_array(a): - return array.array, (a.typecode, a.tobytes()) -reduction.register(array.array, reduce_array) - -view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] -def rebuild_as_list(obj): - return list, (list(obj),) -for view_type in view_types: - reduction.register(view_type, rebuild_as_list) -del view_type, view_types - -# -# Type for identifying shared objects -# - -class Token(object): - ''' - Type to uniquely identify a shared object - ''' - __slots__ = ('typeid', 'address', 'id') - - def __init__(self, typeid, address, id): - (self.typeid, self.address, self.id) = (typeid, address, id) - - def __getstate__(self): - return (self.typeid, self.address, self.id) - - def __setstate__(self, state): - (self.typeid, self.address, self.id) = state - - def __repr__(self): - return '%s(typeid=%r, address=%r, id=%r)' % \ - (self.__class__.__name__, self.typeid, self.address, self.id) - -# -# Function for communication with a manager's server process -# - -def dispatch(c, id, methodname, args=(), kwds={}): - ''' - Send a message to manager using connection `c` and return response - ''' - c.send((id, methodname, args, kwds)) - kind, result = c.recv() - if kind == '#RETURN': - return result - try: - raise convert_to_error(kind, result) - finally: - del result # break reference cycle - -def convert_to_error(kind, result): - if kind == '#ERROR': - return result - elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): - if not isinstance(result, str): - raise TypeError( - "Result {0!r} (kind '{1}') type is {2}, not str".format( - result, kind, type(result))) - if kind == '#UNSERIALIZABLE': - return RemoteError('Unserializable message: %s\n' % result) - else: - return RemoteError(result) - else: - return ValueError('Unrecognized message type {!r}'.format(kind)) - -class RemoteError(Exception): - def __str__(self): - return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) - -# -# Functions for finding the method names of an object -# - -def all_methods(obj): - ''' - Return a list of names of methods of `obj` - ''' - temp = [] - for name in dir(obj): - func = getattr(obj, name) - if callable(func): - temp.append(name) - return temp - -def public_methods(obj): - ''' - Return a list of names of methods of `obj` which do not start with '_' - ''' - return [name for name in all_methods(obj) if name[0] != '_'] - -# -# Server which is run in a process controlled by a manager -# - -class Server(object): - ''' - Server class which runs in a process controlled by a manager object - ''' - public = ['shutdown', 'create', 'accept_connection', 'get_methods', - 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] - - def __init__(self, registry, address, authkey, serializer): - if not isinstance(authkey, bytes): - raise TypeError( - "Authkey {0!r} is type {1!s}, not bytes".format( - authkey, type(authkey))) - self.registry = registry - self.authkey = process.AuthenticationString(authkey) - Listener, Client = listener_client[serializer] - - # do authentication later - self.listener = Listener(address=address, backlog=128) - self.address = self.listener.address - - self.id_to_obj = {'0': (None, ())} - self.id_to_refcount = {} - self.id_to_local_proxy_obj = {} - self.mutex = threading.Lock() - - def serve_forever(self): - ''' - Run the server forever - ''' - self.stop_event = threading.Event() - process.current_process()._manager_server = self - try: - accepter = threading.Thread(target=self.accepter) - accepter.daemon = True - accepter.start() - try: - while not self.stop_event.is_set(): - self.stop_event.wait(1) - except (KeyboardInterrupt, SystemExit): - pass - finally: - if sys.stdout != sys.__stdout__: # what about stderr? - util.debug('resetting stdout, stderr') - sys.stdout = sys.__stdout__ - sys.stderr = sys.__stderr__ - sys.exit(0) - - def accepter(self): - while True: - try: - c = self.listener.accept() - except OSError: - continue - t = threading.Thread(target=self.handle_request, args=(c,)) - t.daemon = True - t.start() - - def _handle_request(self, c): - request = None - try: - connection.deliver_challenge(c, self.authkey) - connection.answer_challenge(c, self.authkey) - request = c.recv() - ignore, funcname, args, kwds = request - assert funcname in self.public, '%r unrecognized' % funcname - func = getattr(self, funcname) - except Exception: - msg = ('#TRACEBACK', format_exc()) - else: - try: - result = func(c, *args, **kwds) - except Exception: - msg = ('#TRACEBACK', format_exc()) - else: - msg = ('#RETURN', result) - - try: - c.send(msg) - except Exception as e: - try: - c.send(('#TRACEBACK', format_exc())) - except Exception: - pass - util.info('Failure to send message: %r', msg) - util.info(' ... request was %r', request) - util.info(' ... exception was %r', e) - - def handle_request(self, conn): - ''' - Handle a new connection - ''' - try: - self._handle_request(conn) - except SystemExit: - # Server.serve_client() calls sys.exit(0) on EOF - pass - finally: - conn.close() - - def serve_client(self, conn): - ''' - Handle requests from the proxies in a particular process/thread - ''' - util.debug('starting server thread to service %r', - threading.current_thread().name) - - recv = conn.recv - send = conn.send - id_to_obj = self.id_to_obj - - while not self.stop_event.is_set(): - - try: - methodname = obj = None - request = recv() - ident, methodname, args, kwds = request - try: - obj, exposed, gettypeid = id_to_obj[ident] - except KeyError as ke: - try: - obj, exposed, gettypeid = \ - self.id_to_local_proxy_obj[ident] - except KeyError: - raise ke - - if methodname not in exposed: - raise AttributeError( - 'method %r of %r object is not in exposed=%r' % - (methodname, type(obj), exposed) - ) - - function = getattr(obj, methodname) - - try: - res = function(*args, **kwds) - except Exception as e: - msg = ('#ERROR', e) - else: - typeid = gettypeid and gettypeid.get(methodname, None) - if typeid: - rident, rexposed = self.create(conn, typeid, res) - token = Token(typeid, self.address, rident) - msg = ('#PROXY', (rexposed, token)) - else: - msg = ('#RETURN', res) - - except AttributeError: - if methodname is None: - msg = ('#TRACEBACK', format_exc()) - else: - try: - fallback_func = self.fallback_mapping[methodname] - result = fallback_func( - self, conn, ident, obj, *args, **kwds - ) - msg = ('#RETURN', result) - except Exception: - msg = ('#TRACEBACK', format_exc()) - - except EOFError: - util.debug('got EOF -- exiting thread serving %r', - threading.current_thread().name) - sys.exit(0) - - except Exception: - msg = ('#TRACEBACK', format_exc()) - - try: - try: - send(msg) - except Exception: - send(('#UNSERIALIZABLE', format_exc())) - except Exception as e: - util.info('exception in thread serving %r', - threading.current_thread().name) - util.info(' ... message was %r', msg) - util.info(' ... exception was %r', e) - conn.close() - sys.exit(1) - - def fallback_getvalue(self, conn, ident, obj): - return obj - - def fallback_str(self, conn, ident, obj): - return str(obj) - - def fallback_repr(self, conn, ident, obj): - return repr(obj) - - fallback_mapping = { - '__str__':fallback_str, - '__repr__':fallback_repr, - '#GETVALUE':fallback_getvalue - } - - def dummy(self, c): - pass - - def debug_info(self, c): - ''' - Return some info --- useful to spot problems with refcounting - ''' - # Perhaps include debug info about 'c'? - with self.mutex: - result = [] - keys = list(self.id_to_refcount.keys()) - keys.sort() - for ident in keys: - if ident != '0': - result.append(' %s: refcount=%s\n %s' % - (ident, self.id_to_refcount[ident], - str(self.id_to_obj[ident][0])[:75])) - return '\n'.join(result) - - def number_of_objects(self, c): - ''' - Number of shared objects - ''' - # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' - return len(self.id_to_refcount) - - def shutdown(self, c): - ''' - Shutdown this process - ''' - try: - util.debug('manager received shutdown message') - c.send(('#RETURN', None)) - except: - import traceback - traceback.print_exc() - finally: - self.stop_event.set() - - def create(self, c, typeid, /, *args, **kwds): - ''' - Create a new shared object and return its id - ''' - with self.mutex: - callable, exposed, method_to_typeid, proxytype = \ - self.registry[typeid] - - if callable is None: - if kwds or (len(args) != 1): - raise ValueError( - "Without callable, must have one non-keyword argument") - obj = args[0] - else: - obj = callable(*args, **kwds) - - if exposed is None: - exposed = public_methods(obj) - if method_to_typeid is not None: - if not isinstance(method_to_typeid, dict): - raise TypeError( - "Method_to_typeid {0!r}: type {1!s}, not dict".format( - method_to_typeid, type(method_to_typeid))) - exposed = list(exposed) + list(method_to_typeid) - - ident = '%x' % id(obj) # convert to string because xmlrpclib - # only has 32 bit signed integers - util.debug('%r callable returned object with id %r', typeid, ident) - - self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) - if ident not in self.id_to_refcount: - self.id_to_refcount[ident] = 0 - - self.incref(c, ident) - return ident, tuple(exposed) - - def get_methods(self, c, token): - ''' - Return the methods of the shared object indicated by token - ''' - return tuple(self.id_to_obj[token.id][1]) - - def accept_connection(self, c, name): - ''' - Spawn a new thread to serve this connection - ''' - threading.current_thread().name = name - c.send(('#RETURN', None)) - self.serve_client(c) - - def incref(self, c, ident): - with self.mutex: - try: - self.id_to_refcount[ident] += 1 - except KeyError as ke: - # If no external references exist but an internal (to the - # manager) still does and a new external reference is created - # from it, restore the manager's tracking of it from the - # previously stashed internal ref. - if ident in self.id_to_local_proxy_obj: - self.id_to_refcount[ident] = 1 - self.id_to_obj[ident] = \ - self.id_to_local_proxy_obj[ident] - util.debug('Server re-enabled tracking & INCREF %r', ident) - else: - raise ke - - def decref(self, c, ident): - if ident not in self.id_to_refcount and \ - ident in self.id_to_local_proxy_obj: - util.debug('Server DECREF skipping %r', ident) - return - - with self.mutex: - if self.id_to_refcount[ident] <= 0: - raise AssertionError( - "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( - ident, self.id_to_obj[ident], - self.id_to_refcount[ident])) - self.id_to_refcount[ident] -= 1 - if self.id_to_refcount[ident] == 0: - del self.id_to_refcount[ident] - - if ident not in self.id_to_refcount: - # Two-step process in case the object turns out to contain other - # proxy objects (e.g. a managed list of managed lists). - # Otherwise, deleting self.id_to_obj[ident] would trigger the - # deleting of the stored value (another managed object) which would - # in turn attempt to acquire the mutex that is already held here. - self.id_to_obj[ident] = (None, (), None) # thread-safe - util.debug('disposing of obj with id %r', ident) - with self.mutex: - del self.id_to_obj[ident] - - -# -# Class to represent state of a manager -# - -class State(object): - __slots__ = ['value'] - INITIAL = 0 - STARTED = 1 - SHUTDOWN = 2 - -# -# Mapping from serializer name to Listener and Client types -# - -listener_client = { - 'pickle' : (connection.Listener, connection.Client), - 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) - } - -# -# Definition of BaseManager -# - -class BaseManager(object): - ''' - Base class for managers - ''' - _registry = {} - _Server = Server - - def __init__(self, address=None, authkey=None, serializer='pickle', - ctx=None, *, shutdown_timeout=1.0): - if authkey is None: - authkey = process.current_process().authkey - self._address = address # XXX not final address if eg ('', 0) - self._authkey = process.AuthenticationString(authkey) - self._state = State() - self._state.value = State.INITIAL - self._serializer = serializer - self._Listener, self._Client = listener_client[serializer] - self._ctx = ctx or get_context() - self._shutdown_timeout = shutdown_timeout - - def get_server(self): - ''' - Return server object with serve_forever() method and address attribute - ''' - if self._state.value != State.INITIAL: - if self._state.value == State.STARTED: - raise ProcessError("Already started server") - elif self._state.value == State.SHUTDOWN: - raise ProcessError("Manager has shut down") - else: - raise ProcessError( - "Unknown state {!r}".format(self._state.value)) - return Server(self._registry, self._address, - self._authkey, self._serializer) - - def connect(self): - ''' - Connect manager object to the server process - ''' - Listener, Client = listener_client[self._serializer] - conn = Client(self._address, authkey=self._authkey) - dispatch(conn, None, 'dummy') - self._state.value = State.STARTED - - def start(self, initializer=None, initargs=()): - ''' - Spawn a server process for this manager object - ''' - if self._state.value != State.INITIAL: - if self._state.value == State.STARTED: - raise ProcessError("Already started server") - elif self._state.value == State.SHUTDOWN: - raise ProcessError("Manager has shut down") - else: - raise ProcessError( - "Unknown state {!r}".format(self._state.value)) - - if initializer is not None and not callable(initializer): - raise TypeError('initializer must be a callable') - - # pipe over which we will retrieve address of server - reader, writer = connection.Pipe(duplex=False) - - # spawn process which runs a server - self._process = self._ctx.Process( - target=type(self)._run_server, - args=(self._registry, self._address, self._authkey, - self._serializer, writer, initializer, initargs), - ) - ident = ':'.join(str(i) for i in self._process._identity) - self._process.name = type(self).__name__ + '-' + ident - self._process.start() - - # get address of server - writer.close() - self._address = reader.recv() - reader.close() - - # register a finalizer - self._state.value = State.STARTED - self.shutdown = util.Finalize( - self, type(self)._finalize_manager, - args=(self._process, self._address, self._authkey, self._state, - self._Client, self._shutdown_timeout), - exitpriority=0 - ) - - @classmethod - def _run_server(cls, registry, address, authkey, serializer, writer, - initializer=None, initargs=()): - ''' - Create a server, report its address and run it - ''' - # bpo-36368: protect server process from KeyboardInterrupt signals - signal.signal(signal.SIGINT, signal.SIG_IGN) - - if initializer is not None: - initializer(*initargs) - - # create server - server = cls._Server(registry, address, authkey, serializer) - - # inform parent process of the server's address - writer.send(server.address) - writer.close() - - # run the manager - util.info('manager serving at %r', server.address) - server.serve_forever() - - def _create(self, typeid, /, *args, **kwds): - ''' - Create a new shared object; return the token and exposed tuple - ''' - assert self._state.value == State.STARTED, 'server not yet started' - conn = self._Client(self._address, authkey=self._authkey) - try: - id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) - finally: - conn.close() - return Token(typeid, self._address, id), exposed - - def join(self, timeout=None): - ''' - Join the manager process (if it has been spawned) - ''' - if self._process is not None: - self._process.join(timeout) - if not self._process.is_alive(): - self._process = None - - def _debug_info(self): - ''' - Return some info about the servers shared objects and connections - ''' - conn = self._Client(self._address, authkey=self._authkey) - try: - return dispatch(conn, None, 'debug_info') - finally: - conn.close() - - def _number_of_objects(self): - ''' - Return the number of shared objects - ''' - conn = self._Client(self._address, authkey=self._authkey) - try: - return dispatch(conn, None, 'number_of_objects') - finally: - conn.close() - - def __enter__(self): - if self._state.value == State.INITIAL: - self.start() - if self._state.value != State.STARTED: - if self._state.value == State.INITIAL: - raise ProcessError("Unable to start server") - elif self._state.value == State.SHUTDOWN: - raise ProcessError("Manager has shut down") - else: - raise ProcessError( - "Unknown state {!r}".format(self._state.value)) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.shutdown() - - @staticmethod - def _finalize_manager(process, address, authkey, state, _Client, - shutdown_timeout): - ''' - Shutdown the manager process; will be registered as a finalizer - ''' - if process.is_alive(): - util.info('sending shutdown message to manager') - try: - conn = _Client(address, authkey=authkey) - try: - dispatch(conn, None, 'shutdown') - finally: - conn.close() - except Exception: - pass - - process.join(timeout=shutdown_timeout) - if process.is_alive(): - util.info('manager still alive') - if hasattr(process, 'terminate'): - util.info('trying to `terminate()` manager process') - process.terminate() - process.join(timeout=shutdown_timeout) - if process.is_alive(): - util.info('manager still alive after terminate') - process.kill() - process.join() - - state.value = State.SHUTDOWN - try: - del BaseProxy._address_to_local[address] - except KeyError: - pass - - @property - def address(self): - return self._address - - @classmethod - def register(cls, typeid, callable=None, proxytype=None, exposed=None, - method_to_typeid=None, create_method=True): - ''' - Register a typeid with the manager type - ''' - if '_registry' not in cls.__dict__: - cls._registry = cls._registry.copy() - - if proxytype is None: - proxytype = AutoProxy - - exposed = exposed or getattr(proxytype, '_exposed_', None) - - method_to_typeid = method_to_typeid or \ - getattr(proxytype, '_method_to_typeid_', None) - - if method_to_typeid: - for key, value in list(method_to_typeid.items()): # isinstance? - assert type(key) is str, '%r is not a string' % key - assert type(value) is str, '%r is not a string' % value - - cls._registry[typeid] = ( - callable, exposed, method_to_typeid, proxytype - ) - - if create_method: - def temp(self, /, *args, **kwds): - util.debug('requesting creation of a shared %r object', typeid) - token, exp = self._create(typeid, *args, **kwds) - proxy = proxytype( - token, self._serializer, manager=self, - authkey=self._authkey, exposed=exp - ) - conn = self._Client(token.address, authkey=self._authkey) - dispatch(conn, None, 'decref', (token.id,)) - return proxy - temp.__name__ = typeid - setattr(cls, typeid, temp) - -# -# Subclass of set which get cleared after a fork -# - -class ProcessLocalSet(set): - def __init__(self): - util.register_after_fork(self, lambda obj: obj.clear()) - def __reduce__(self): - return type(self), () - -# -# Definition of BaseProxy -# - -class BaseProxy(object): - ''' - A base for proxies of shared objects - ''' - _address_to_local = {} - _mutex = util.ForkAwareThreadLock() - - # Each instance gets a `_serial` number. Unlike `id(...)`, this number - # is never reused. - _next_serial = 1 - - def __init__(self, token, serializer, manager=None, - authkey=None, exposed=None, incref=True, manager_owned=False): - with BaseProxy._mutex: - tls_serials = BaseProxy._address_to_local.get(token.address, None) - if tls_serials is None: - tls_serials = util.ForkAwareLocal(), ProcessLocalSet() - BaseProxy._address_to_local[token.address] = tls_serials - - self._serial = BaseProxy._next_serial - BaseProxy._next_serial += 1 - - # self._tls is used to record the connection used by this - # thread to communicate with the manager at token.address - self._tls = tls_serials[0] - - # self._all_serials is a set used to record the identities of all - # shared objects for which the current process owns references and - # which are in the manager at token.address - self._all_serials = tls_serials[1] - - self._token = token - self._id = self._token.id - self._manager = manager - self._serializer = serializer - self._Client = listener_client[serializer][1] - - # Should be set to True only when a proxy object is being created - # on the manager server; primary use case: nested proxy objects. - # RebuildProxy detects when a proxy is being created on the manager - # and sets this value appropriately. - self._owned_by_manager = manager_owned - - if authkey is not None: - self._authkey = process.AuthenticationString(authkey) - elif self._manager is not None: - self._authkey = self._manager._authkey - else: - self._authkey = process.current_process().authkey - - if incref: - self._incref() - - util.register_after_fork(self, BaseProxy._after_fork) - - def _connect(self): - util.debug('making connection to manager') - name = process.current_process().name - if threading.current_thread().name != 'MainThread': - name += '|' + threading.current_thread().name - conn = self._Client(self._token.address, authkey=self._authkey) - dispatch(conn, None, 'accept_connection', (name,)) - self._tls.connection = conn - - def _callmethod(self, methodname, args=(), kwds={}): - ''' - Try to call a method of the referent and return a copy of the result - ''' - try: - conn = self._tls.connection - except AttributeError: - util.debug('thread %r does not own a connection', - threading.current_thread().name) - self._connect() - conn = self._tls.connection - - conn.send((self._id, methodname, args, kwds)) - kind, result = conn.recv() - - if kind == '#RETURN': - return result - elif kind == '#PROXY': - exposed, token = result - proxytype = self._manager._registry[token.typeid][-1] - token.address = self._token.address - proxy = proxytype( - token, self._serializer, manager=self._manager, - authkey=self._authkey, exposed=exposed - ) - conn = self._Client(token.address, authkey=self._authkey) - dispatch(conn, None, 'decref', (token.id,)) - return proxy - try: - raise convert_to_error(kind, result) - finally: - del result # break reference cycle - - def _getvalue(self): - ''' - Get a copy of the value of the referent - ''' - return self._callmethod('#GETVALUE') - - def _incref(self): - if self._owned_by_manager: - util.debug('owned_by_manager skipped INCREF of %r', self._token.id) - return - - conn = self._Client(self._token.address, authkey=self._authkey) - dispatch(conn, None, 'incref', (self._id,)) - util.debug('INCREF %r', self._token.id) - - self._all_serials.add(self._serial) - - state = self._manager and self._manager._state - - self._close = util.Finalize( - self, BaseProxy._decref, - args=(self._token, self._serial, self._authkey, state, - self._tls, self._all_serials, self._Client), - exitpriority=10 - ) - - @staticmethod - def _decref(token, serial, authkey, state, tls, idset, _Client): - idset.discard(serial) - - # check whether manager is still alive - if state is None or state.value == State.STARTED: - # tell manager this process no longer cares about referent - try: - util.debug('DECREF %r', token.id) - conn = _Client(token.address, authkey=authkey) - dispatch(conn, None, 'decref', (token.id,)) - except Exception as e: - util.debug('... decref failed %s', e) - - else: - util.debug('DECREF %r -- manager already shutdown', token.id) - - # check whether we can close this thread's connection because - # the process owns no more references to objects for this manager - if not idset and hasattr(tls, 'connection'): - util.debug('thread %r has no more proxies so closing conn', - threading.current_thread().name) - tls.connection.close() - del tls.connection - - def _after_fork(self): - self._manager = None - try: - self._incref() - except Exception as e: - # the proxy may just be for a manager which has shutdown - util.info('incref failed: %s' % e) - - def __reduce__(self): - kwds = {} - if get_spawning_popen() is not None: - kwds['authkey'] = self._authkey - - if getattr(self, '_isauto', False): - kwds['exposed'] = self._exposed_ - return (RebuildProxy, - (AutoProxy, self._token, self._serializer, kwds)) - else: - return (RebuildProxy, - (type(self), self._token, self._serializer, kwds)) - - def __deepcopy__(self, memo): - return self._getvalue() - - def __repr__(self): - return '<%s object, typeid %r at %#x>' % \ - (type(self).__name__, self._token.typeid, id(self)) - - def __str__(self): - ''' - Return representation of the referent (or a fall-back if that fails) - ''' - try: - return self._callmethod('__repr__') - except Exception: - return repr(self)[:-1] + "; '__str__()' failed>" - -# -# Function used for unpickling -# - -def RebuildProxy(func, token, serializer, kwds): - ''' - Function used for unpickling proxy objects. - ''' - server = getattr(process.current_process(), '_manager_server', None) - if server and server.address == token.address: - util.debug('Rebuild a proxy owned by manager, token=%r', token) - kwds['manager_owned'] = True - if token.id not in server.id_to_local_proxy_obj: - server.id_to_local_proxy_obj[token.id] = \ - server.id_to_obj[token.id] - incref = ( - kwds.pop('incref', True) and - not getattr(process.current_process(), '_inheriting', False) - ) - return func(token, serializer, incref=incref, **kwds) - -# -# Functions to create proxies and proxy types -# - -def MakeProxyType(name, exposed, _cache={}): - ''' - Return a proxy type whose methods are given by `exposed` - ''' - exposed = tuple(exposed) - try: - return _cache[(name, exposed)] - except KeyError: - pass - - dic = {} - - for meth in exposed: - exec('''def %s(self, /, *args, **kwds): - return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) - - ProxyType = type(name, (BaseProxy,), dic) - ProxyType._exposed_ = exposed - _cache[(name, exposed)] = ProxyType - return ProxyType - - -def AutoProxy(token, serializer, manager=None, authkey=None, - exposed=None, incref=True, manager_owned=False): - ''' - Return an auto-proxy for `token` - ''' - _Client = listener_client[serializer][1] - - if exposed is None: - conn = _Client(token.address, authkey=authkey) - try: - exposed = dispatch(conn, None, 'get_methods', (token,)) - finally: - conn.close() - - if authkey is None and manager is not None: - authkey = manager._authkey - if authkey is None: - authkey = process.current_process().authkey - - ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) - proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, - incref=incref, manager_owned=manager_owned) - proxy._isauto = True - return proxy - -# -# Types/callables which we will register with SyncManager -# - -class Namespace(object): - def __init__(self, /, **kwds): - self.__dict__.update(kwds) - def __repr__(self): - items = list(self.__dict__.items()) - temp = [] - for name, value in items: - if not name.startswith('_'): - temp.append('%s=%r' % (name, value)) - temp.sort() - return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) - -class Value(object): - def __init__(self, typecode, value, lock=True): - self._typecode = typecode - self._value = value - def get(self): - return self._value - def set(self, value): - self._value = value - def __repr__(self): - return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) - value = property(get, set) - -def Array(typecode, sequence, lock=True): - return array.array(typecode, sequence) - -# -# Proxy types used by SyncManager -# - -class IteratorProxy(BaseProxy): - _exposed_ = ('__next__', 'send', 'throw', 'close') - def __iter__(self): - return self - def __next__(self, *args): - return self._callmethod('__next__', args) - def send(self, *args): - return self._callmethod('send', args) - def throw(self, *args): - return self._callmethod('throw', args) - def close(self, *args): - return self._callmethod('close', args) - - -class AcquirerProxy(BaseProxy): - _exposed_ = ('acquire', 'release') - def acquire(self, blocking=True, timeout=None): - args = (blocking,) if timeout is None else (blocking, timeout) - return self._callmethod('acquire', args) - def release(self): - return self._callmethod('release') - def __enter__(self): - return self._callmethod('acquire') - def __exit__(self, exc_type, exc_val, exc_tb): - return self._callmethod('release') - - -class ConditionProxy(AcquirerProxy): - _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') - def wait(self, timeout=None): - return self._callmethod('wait', (timeout,)) - def notify(self, n=1): - return self._callmethod('notify', (n,)) - def notify_all(self): - return self._callmethod('notify_all') - def wait_for(self, predicate, timeout=None): - result = predicate() - if result: - return result - if timeout is not None: - endtime = time.monotonic() + timeout - else: - endtime = None - waittime = None - while not result: - if endtime is not None: - waittime = endtime - time.monotonic() - if waittime <= 0: - break - self.wait(waittime) - result = predicate() - return result - - -class EventProxy(BaseProxy): - _exposed_ = ('is_set', 'set', 'clear', 'wait') - def is_set(self): - return self._callmethod('is_set') - def set(self): - return self._callmethod('set') - def clear(self): - return self._callmethod('clear') - def wait(self, timeout=None): - return self._callmethod('wait', (timeout,)) - - -class BarrierProxy(BaseProxy): - _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') - def wait(self, timeout=None): - return self._callmethod('wait', (timeout,)) - def abort(self): - return self._callmethod('abort') - def reset(self): - return self._callmethod('reset') - @property - def parties(self): - return self._callmethod('__getattribute__', ('parties',)) - @property - def n_waiting(self): - return self._callmethod('__getattribute__', ('n_waiting',)) - @property - def broken(self): - return self._callmethod('__getattribute__', ('broken',)) - - -class NamespaceProxy(BaseProxy): - _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') - def __getattr__(self, key): - if key[0] == '_': - return object.__getattribute__(self, key) - callmethod = object.__getattribute__(self, '_callmethod') - return callmethod('__getattribute__', (key,)) - def __setattr__(self, key, value): - if key[0] == '_': - return object.__setattr__(self, key, value) - callmethod = object.__getattribute__(self, '_callmethod') - return callmethod('__setattr__', (key, value)) - def __delattr__(self, key): - if key[0] == '_': - return object.__delattr__(self, key) - callmethod = object.__getattribute__(self, '_callmethod') - return callmethod('__delattr__', (key,)) - - -class ValueProxy(BaseProxy): - _exposed_ = ('get', 'set') - def get(self): - return self._callmethod('get') - def set(self, value): - return self._callmethod('set', (value,)) - value = property(get, set) - - __class_getitem__ = classmethod(types.GenericAlias) - - -BaseListProxy = MakeProxyType('BaseListProxy', ( - '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', - '__mul__', '__reversed__', '__rmul__', '__setitem__', - 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', - 'reverse', 'sort', '__imul__' - )) -class ListProxy(BaseListProxy): - def __iadd__(self, value): - self._callmethod('extend', (value,)) - return self - def __imul__(self, value): - self._callmethod('__imul__', (value,)) - return self - - __class_getitem__ = classmethod(types.GenericAlias) - - -_BaseDictProxy = MakeProxyType('DictProxy', ( - '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', - '__setitem__', 'clear', 'copy', 'get', 'items', - 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' - )) -_BaseDictProxy._method_to_typeid_ = { - '__iter__': 'Iterator', - } -class DictProxy(_BaseDictProxy): - __class_getitem__ = classmethod(types.GenericAlias) - - -ArrayProxy = MakeProxyType('ArrayProxy', ( - '__len__', '__getitem__', '__setitem__' - )) - - -BasePoolProxy = MakeProxyType('PoolProxy', ( - 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', - 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', - )) -BasePoolProxy._method_to_typeid_ = { - 'apply_async': 'AsyncResult', - 'map_async': 'AsyncResult', - 'starmap_async': 'AsyncResult', - 'imap': 'Iterator', - 'imap_unordered': 'Iterator' - } -class PoolProxy(BasePoolProxy): - def __enter__(self): - return self - def __exit__(self, exc_type, exc_val, exc_tb): - self.terminate() - -# -# Definition of SyncManager -# - -class SyncManager(BaseManager): - ''' - Subclass of `BaseManager` which supports a number of shared object types. - - The types registered are those intended for the synchronization - of threads, plus `dict`, `list` and `Namespace`. - - The `multiprocessing.Manager()` function creates started instances of - this class. - ''' - -SyncManager.register('Queue', queue.Queue) -SyncManager.register('JoinableQueue', queue.Queue) -SyncManager.register('Event', threading.Event, EventProxy) -SyncManager.register('Lock', threading.Lock, AcquirerProxy) -SyncManager.register('RLock', threading.RLock, AcquirerProxy) -SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) -SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, - AcquirerProxy) -SyncManager.register('Condition', threading.Condition, ConditionProxy) -SyncManager.register('Barrier', threading.Barrier, BarrierProxy) -SyncManager.register('Pool', pool.Pool, PoolProxy) -SyncManager.register('list', list, ListProxy) -SyncManager.register('dict', dict, DictProxy) -SyncManager.register('Value', Value, ValueProxy) -SyncManager.register('Array', Array, ArrayProxy) -SyncManager.register('Namespace', Namespace, NamespaceProxy) - -# types returned by methods of PoolProxy -SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) -SyncManager.register('AsyncResult', create_method=False) - -# -# Definition of SharedMemoryManager and SharedMemoryServer -# - -if HAS_SHMEM: - class _SharedMemoryTracker: - "Manages one or more shared memory segments." - - def __init__(self, name, segment_names=[]): - self.shared_memory_context_name = name - self.segment_names = segment_names - - def register_segment(self, segment_name): - "Adds the supplied shared memory block name to tracker." - util.debug(f"Register segment {segment_name!r} in pid {getpid()}") - self.segment_names.append(segment_name) - - def destroy_segment(self, segment_name): - """Calls unlink() on the shared memory block with the supplied name - and removes it from the list of blocks being tracked.""" - util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") - self.segment_names.remove(segment_name) - segment = shared_memory.SharedMemory(segment_name) - segment.close() - segment.unlink() - - def unlink(self): - "Calls destroy_segment() on all tracked shared memory blocks." - for segment_name in self.segment_names[:]: - self.destroy_segment(segment_name) - - def __del__(self): - util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") - self.unlink() - - def __getstate__(self): - return (self.shared_memory_context_name, self.segment_names) - - def __setstate__(self, state): - self.__init__(*state) - - - class SharedMemoryServer(Server): - - public = Server.public + \ - ['track_segment', 'release_segment', 'list_segments'] - - def __init__(self, *args, **kwargs): - Server.__init__(self, *args, **kwargs) - address = self.address - # The address of Linux abstract namespaces can be bytes - if isinstance(address, bytes): - address = os.fsdecode(address) - self.shared_memory_context = \ - _SharedMemoryTracker(f"shm_{address}_{getpid()}") - util.debug(f"SharedMemoryServer started by pid {getpid()}") - - def create(self, c, typeid, /, *args, **kwargs): - """Create a new distributed-shared object (not backed by a shared - memory block) and return its id to be used in a Proxy Object.""" - # Unless set up as a shared proxy, don't make shared_memory_context - # a standard part of kwargs. This makes things easier for supplying - # simple functions. - if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): - kwargs['shared_memory_context'] = self.shared_memory_context - return Server.create(self, c, typeid, *args, **kwargs) - - def shutdown(self, c): - "Call unlink() on all tracked shared memory, terminate the Server." - self.shared_memory_context.unlink() - return Server.shutdown(self, c) - - def track_segment(self, c, segment_name): - "Adds the supplied shared memory block name to Server's tracker." - self.shared_memory_context.register_segment(segment_name) - - def release_segment(self, c, segment_name): - """Calls unlink() on the shared memory block with the supplied name - and removes it from the tracker instance inside the Server.""" - self.shared_memory_context.destroy_segment(segment_name) - - def list_segments(self, c): - """Returns a list of names of shared memory blocks that the Server - is currently tracking.""" - return self.shared_memory_context.segment_names - - - class SharedMemoryManager(BaseManager): - """Like SyncManager but uses SharedMemoryServer instead of Server. - - It provides methods for creating and returning SharedMemory instances - and for creating a list-like object (ShareableList) backed by shared - memory. It also provides methods that create and return Proxy Objects - that support synchronization across processes (i.e. multi-process-safe - locks and semaphores). - """ - - _Server = SharedMemoryServer - - def __init__(self, *args, **kwargs): - if os.name == "posix": - # bpo-36867: Ensure the resource_tracker is running before - # launching the manager process, so that concurrent - # shared_memory manipulation both in the manager and in the - # current process does not create two resource_tracker - # processes. - from . import resource_tracker - resource_tracker.ensure_running() - BaseManager.__init__(self, *args, **kwargs) - util.debug(f"{self.__class__.__name__} created by pid {getpid()}") - - def __del__(self): - util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") - - def get_server(self): - 'Better than monkeypatching for now; merge into Server ultimately' - if self._state.value != State.INITIAL: - if self._state.value == State.STARTED: - raise ProcessError("Already started SharedMemoryServer") - elif self._state.value == State.SHUTDOWN: - raise ProcessError("SharedMemoryManager has shut down") - else: - raise ProcessError( - "Unknown state {!r}".format(self._state.value)) - return self._Server(self._registry, self._address, - self._authkey, self._serializer) - - def SharedMemory(self, size): - """Returns a new SharedMemory instance with the specified size in - bytes, to be tracked by the manager.""" - with self._Client(self._address, authkey=self._authkey) as conn: - sms = shared_memory.SharedMemory(None, create=True, size=size) - try: - dispatch(conn, None, 'track_segment', (sms.name,)) - except BaseException as e: - sms.unlink() - raise e - return sms - - def ShareableList(self, sequence): - """Returns a new ShareableList instance populated with the values - from the input sequence, to be tracked by the manager.""" - with self._Client(self._address, authkey=self._authkey) as conn: - sl = shared_memory.ShareableList(sequence) - try: - dispatch(conn, None, 'track_segment', (sl.shm.name,)) - except BaseException as e: - sl.shm.unlink() - raise e - return sl diff --git a/Python313_13_x86_Template/Lib/multiprocessing/popen_fork.py b/Python313_13_x86_Template/Lib/multiprocessing/popen_fork.py deleted file mode 100644 index a57ef6bd..00000000 --- a/Python313_13_x86_Template/Lib/multiprocessing/popen_fork.py +++ /dev/null @@ -1,87 +0,0 @@ -import atexit -import os -import signal - -from . import util - -__all__ = ['Popen'] - -# -# Start child process using fork -# - -class Popen(object): - method = 'fork' - - def __init__(self, process_obj): - util._flush_std_streams() - self.returncode = None - self.finalizer = None - self._launch(process_obj) - - def duplicate_for_child(self, fd): - return fd - - def poll(self, flag=os.WNOHANG): - if self.returncode is None: - try: - pid, sts = os.waitpid(self.pid, flag) - except OSError: - # Child process not yet created. See #1731717 - # e.errno == errno.ECHILD == 10 - return None - if pid == self.pid: - self.returncode = os.waitstatus_to_exitcode(sts) - return self.returncode - - def wait(self, timeout=None): - if self.returncode is None: - if timeout is not None: - from multiprocessing.connection import wait - if not wait([self.sentinel], timeout): - return None - # This shouldn't block if wait() returned successfully. - return self.poll(os.WNOHANG if timeout == 0.0 else 0) - return self.returncode - - def _send_signal(self, sig): - if self.returncode is None: - try: - os.kill(self.pid, sig) - except ProcessLookupError: - pass - except OSError: - if self.wait(timeout=0.1) is None: - raise - - def terminate(self): - self._send_signal(signal.SIGTERM) - - def kill(self): - self._send_signal(signal.SIGKILL) - - def _launch(self, process_obj): - code = 1 - parent_r, child_w = os.pipe() - child_r, parent_w = os.pipe() - self.pid = os.fork() - if self.pid == 0: - try: - atexit._clear() - atexit.register(util._exit_function) - os.close(parent_r) - os.close(parent_w) - code = process_obj._bootstrap(parent_sentinel=child_r) - finally: - atexit._run_exitfuncs() - os._exit(code) - else: - os.close(child_w) - os.close(child_r) - self.finalizer = util.Finalize(self, util.close_fds, - (parent_r, parent_w,)) - self.sentinel = parent_r - - def close(self): - if self.finalizer is not None: - self.finalizer() diff --git a/Python313_13_x86_Template/Lib/multiprocessing/process.py b/Python313_13_x86_Template/Lib/multiprocessing/process.py deleted file mode 100644 index b45f7df4..00000000 --- a/Python313_13_x86_Template/Lib/multiprocessing/process.py +++ /dev/null @@ -1,436 +0,0 @@ -# -# Module providing the `Process` class which emulates `threading.Thread` -# -# multiprocessing/process.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -__all__ = ['BaseProcess', 'current_process', 'active_children', - 'parent_process'] - -# -# Imports -# - -import os -import sys -import signal -import itertools -import threading -from _weakrefset import WeakSet - -# -# -# - -try: - ORIGINAL_DIR = os.path.abspath(os.getcwd()) -except OSError: - ORIGINAL_DIR = None - -# -# Public functions -# - -def current_process(): - ''' - Return process object representing the current process - ''' - return _current_process - -def active_children(): - ''' - Return list of process objects corresponding to live child processes - ''' - _cleanup() - return list(_children) - - -def parent_process(): - ''' - Return process object representing the parent process - ''' - return _parent_process - -# -# -# - -def _cleanup(): - # check for processes which have finished - for p in list(_children): - if (child_popen := p._popen) and child_popen.poll() is not None: - _children.discard(p) - -# -# The `Process` class -# - -class BaseProcess(object): - ''' - Process objects represent activity that is run in a separate process - - The class is analogous to `threading.Thread` - ''' - def _Popen(self): - raise NotImplementedError - - def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, - *, daemon=None): - assert group is None, 'group argument must be None for now' - count = next(_process_counter) - self._identity = _current_process._identity + (count,) - self._config = _current_process._config.copy() - self._parent_pid = os.getpid() - self._parent_name = _current_process.name - self._popen = None - self._closed = False - self._target = target - self._args = tuple(args) - self._kwargs = dict(kwargs) - self._name = name or type(self).__name__ + '-' + \ - ':'.join(str(i) for i in self._identity) - if daemon is not None: - self.daemon = daemon - _dangling.add(self) - - def _check_closed(self): - if self._closed: - raise ValueError("process object is closed") - - def run(self): - ''' - Method to be run in sub-process; can be overridden in sub-class - ''' - if self._target: - self._target(*self._args, **self._kwargs) - - def start(self): - ''' - Start child process - ''' - self._check_closed() - assert self._popen is None, 'cannot start a process twice' - assert self._parent_pid == os.getpid(), \ - 'can only start a process object created by current process' - assert not _current_process._config.get('daemon'), \ - 'daemonic processes are not allowed to have children' - _cleanup() - self._popen = self._Popen(self) - self._sentinel = self._popen.sentinel - # Avoid a refcycle if the target function holds an indirect - # reference to the process object (see bpo-30775) - del self._target, self._args, self._kwargs - _children.add(self) - - def terminate(self): - ''' - Terminate process; sends SIGTERM signal or uses TerminateProcess() - ''' - self._check_closed() - self._popen.terminate() - - def kill(self): - ''' - Terminate process; sends SIGKILL signal or uses TerminateProcess() - ''' - self._check_closed() - self._popen.kill() - - def join(self, timeout=None): - ''' - Wait until child process terminates - ''' - self._check_closed() - assert self._parent_pid == os.getpid(), 'can only join a child process' - assert self._popen is not None, 'can only join a started process' - res = self._popen.wait(timeout) - if res is not None: - _children.discard(self) - - def is_alive(self): - ''' - Return whether process is alive - ''' - self._check_closed() - if self is _current_process: - return True - assert self._parent_pid == os.getpid(), 'can only test a child process' - - if self._popen is None: - return False - - returncode = self._popen.poll() - if returncode is None: - return True - else: - _children.discard(self) - return False - - def close(self): - ''' - Close the Process object. - - This method releases resources held by the Process object. It is - an error to call this method if the child process is still running. - ''' - if self._popen is not None: - if self._popen.poll() is None: - raise ValueError("Cannot close a process while it is still running. " - "You should first call join() or terminate().") - self._popen.close() - self._popen = None - del self._sentinel - _children.discard(self) - self._closed = True - - @property - def name(self): - return self._name - - @name.setter - def name(self, name): - assert isinstance(name, str), 'name must be a string' - self._name = name - - @property - def daemon(self): - ''' - Return whether process is a daemon - ''' - return self._config.get('daemon', False) - - @daemon.setter - def daemon(self, daemonic): - ''' - Set whether process is a daemon - ''' - assert self._popen is None, 'process has already started' - self._config['daemon'] = daemonic - - @property - def authkey(self): - return self._config['authkey'] - - @authkey.setter - def authkey(self, authkey): - ''' - Set authorization key of process - ''' - self._config['authkey'] = AuthenticationString(authkey) - - @property - def exitcode(self): - ''' - Return exit code of process or `None` if it has yet to stop - ''' - self._check_closed() - if self._popen is None: - return self._popen - return self._popen.poll() - - @property - def ident(self): - ''' - Return identifier (PID) of process or `None` if it has yet to start - ''' - self._check_closed() - if self is _current_process: - return os.getpid() - else: - return self._popen and self._popen.pid - - pid = ident - - @property - def sentinel(self): - ''' - Return a file descriptor (Unix) or handle (Windows) suitable for - waiting for process termination. - ''' - self._check_closed() - try: - return self._sentinel - except AttributeError: - raise ValueError("process not started") from None - - def __repr__(self): - exitcode = None - if self is _current_process: - status = 'started' - elif self._closed: - status = 'closed' - elif self._parent_pid != os.getpid(): - status = 'unknown' - elif self._popen is None: - status = 'initial' - else: - exitcode = self._popen.poll() - if exitcode is not None: - status = 'stopped' - else: - status = 'started' - - info = [type(self).__name__, 'name=%r' % self._name] - if self._popen is not None: - info.append('pid=%s' % self._popen.pid) - info.append('parent=%s' % self._parent_pid) - info.append(status) - if exitcode is not None: - exitcode = _exitcode_to_name.get(exitcode, exitcode) - info.append('exitcode=%s' % exitcode) - if self.daemon: - info.append('daemon') - return '<%s>' % ' '.join(info) - - ## - - def _bootstrap(self, parent_sentinel=None): - from . import util, context - global _current_process, _parent_process, _process_counter, _children - - try: - if self._start_method is not None: - context._force_start_method(self._start_method) - _process_counter = itertools.count(1) - _children = set() - util._close_stdin() - old_process = _current_process - _current_process = self - _parent_process = _ParentProcess( - self._parent_name, self._parent_pid, parent_sentinel) - if threading._HAVE_THREAD_NATIVE_ID: - threading.main_thread()._set_native_id() - try: - self._after_fork() - finally: - # delay finalization of the old process object until after - # _run_after_forkers() is executed - del old_process - util.info('child process calling self.run()') - self.run() - exitcode = 0 - except SystemExit as e: - if e.code is None: - exitcode = 0 - elif isinstance(e.code, int): - exitcode = e.code - else: - sys.stderr.write(str(e.code) + '\n') - exitcode = 1 - except: - exitcode = 1 - import traceback - sys.stderr.write('Process %s:\n' % self.name) - traceback.print_exc() - finally: - threading._shutdown() - util.info('process exiting with exitcode %d' % exitcode) - util._flush_std_streams() - - return exitcode - - @staticmethod - def _after_fork(): - from . import util - util._finalizer_registry.clear() - util._run_after_forkers() - - -# -# We subclass bytes to avoid accidental transmission of auth keys over network -# - -class AuthenticationString(bytes): - def __reduce__(self): - from .context import get_spawning_popen - if get_spawning_popen() is None: - raise TypeError( - 'Pickling an AuthenticationString object is ' - 'disallowed for security reasons' - ) - return AuthenticationString, (bytes(self),) - - -# -# Create object representing the parent process -# - -class _ParentProcess(BaseProcess): - - def __init__(self, name, pid, sentinel): - self._identity = () - self._name = name - self._pid = pid - self._parent_pid = None - self._popen = None - self._closed = False - self._sentinel = sentinel - self._config = {} - - def is_alive(self): - from multiprocessing.connection import wait - return not wait([self._sentinel], timeout=0) - - @property - def ident(self): - return self._pid - - def join(self, timeout=None): - ''' - Wait until parent process terminates - ''' - from multiprocessing.connection import wait - wait([self._sentinel], timeout=timeout) - - pid = ident - -# -# Create object representing the main process -# - -class _MainProcess(BaseProcess): - - def __init__(self): - self._identity = () - self._name = 'MainProcess' - self._parent_pid = None - self._popen = None - self._closed = False - self._config = {'authkey': AuthenticationString(os.urandom(32)), - 'semprefix': '/mp'} - # Note that some versions of FreeBSD only allow named - # semaphores to have names of up to 14 characters. Therefore - # we choose a short prefix. - # - # On MacOSX in a sandbox it may be necessary to use a - # different prefix -- see #19478. - # - # Everything in self._config will be inherited by descendant - # processes. - - def close(self): - pass - - -_parent_process = None -_current_process = _MainProcess() -_process_counter = itertools.count(1) -_children = set() -del _MainProcess - -# -# Give names to some return codes -# - -_exitcode_to_name = {} - -for name, signum in list(signal.__dict__.items()): - if name[:3]=='SIG' and '_' not in name: - _exitcode_to_name[-signum] = f'-{name}' -del name, signum - -# For debug and leak testing -_dangling = WeakSet() diff --git a/Python313_13_x86_Template/Lib/multiprocessing/queues.py b/Python313_13_x86_Template/Lib/multiprocessing/queues.py deleted file mode 100644 index 925f0439..00000000 --- a/Python313_13_x86_Template/Lib/multiprocessing/queues.py +++ /dev/null @@ -1,399 +0,0 @@ -# -# Module implementing queues -# -# multiprocessing/queues.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] - -import sys -import os -import threading -import collections -import time -import types -import weakref -import errno - -from queue import Empty, Full - -from . import connection -from . import context -_ForkingPickler = context.reduction.ForkingPickler - -from .util import debug, info, Finalize, register_after_fork, is_exiting - -# -# Queue type using a pipe, buffer and thread -# - -class Queue(object): - - def __init__(self, maxsize=0, *, ctx): - if maxsize <= 0: - # Can raise ImportError (see issues #3770 and #23400) - from .synchronize import SEM_VALUE_MAX as maxsize - self._maxsize = maxsize - self._reader, self._writer = connection.Pipe(duplex=False) - self._rlock = ctx.Lock() - self._opid = os.getpid() - if sys.platform == 'win32': - self._wlock = None - else: - self._wlock = ctx.Lock() - self._sem = ctx.BoundedSemaphore(maxsize) - # For use by concurrent.futures - self._ignore_epipe = False - self._reset() - - if sys.platform != 'win32': - register_after_fork(self, Queue._after_fork) - - def __getstate__(self): - context.assert_spawning(self) - return (self._ignore_epipe, self._maxsize, self._reader, self._writer, - self._rlock, self._wlock, self._sem, self._opid) - - def __setstate__(self, state): - (self._ignore_epipe, self._maxsize, self._reader, self._writer, - self._rlock, self._wlock, self._sem, self._opid) = state - self._reset() - - def _after_fork(self): - debug('Queue._after_fork()') - self._reset(after_fork=True) - - def _reset(self, after_fork=False): - if after_fork: - self._notempty._at_fork_reinit() - else: - self._notempty = threading.Condition(threading.Lock()) - self._buffer = collections.deque() - self._thread = None - self._jointhread = None - self._joincancelled = False - self._closed = False - self._close = None - self._send_bytes = self._writer.send_bytes - self._recv_bytes = self._reader.recv_bytes - self._poll = self._reader.poll - - def put(self, obj, block=True, timeout=None): - if self._closed: - raise ValueError(f"Queue {self!r} is closed") - if not self._sem.acquire(block, timeout): - raise Full - - with self._notempty: - if self._thread is None: - self._start_thread() - self._buffer.append(obj) - self._notempty.notify() - - def get(self, block=True, timeout=None): - if self._closed: - raise ValueError(f"Queue {self!r} is closed") - if block and timeout is None: - with self._rlock: - res = self._recv_bytes() - self._sem.release() - else: - if block: - deadline = time.monotonic() + timeout - if not self._rlock.acquire(block, timeout): - raise Empty - try: - if block: - timeout = deadline - time.monotonic() - if not self._poll(timeout): - raise Empty - elif not self._poll(): - raise Empty - res = self._recv_bytes() - self._sem.release() - finally: - self._rlock.release() - # unserialize the data after having released the lock - return _ForkingPickler.loads(res) - - def qsize(self): - # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() - return self._maxsize - self._sem._semlock._get_value() - - def empty(self): - return not self._poll() - - def full(self): - return self._sem._semlock._is_zero() - - def get_nowait(self): - return self.get(False) - - def put_nowait(self, obj): - return self.put(obj, False) - - def close(self): - self._closed = True - close = self._close - if close: - self._close = None - close() - - def join_thread(self): - debug('Queue.join_thread()') - assert self._closed, "Queue {0!r} not closed".format(self) - if self._jointhread: - self._jointhread() - - def cancel_join_thread(self): - debug('Queue.cancel_join_thread()') - self._joincancelled = True - try: - self._jointhread.cancel() - except AttributeError: - pass - - def _terminate_broken(self): - # Close a Queue on error. - - # gh-94777: Prevent queue writing to a pipe which is no longer read. - self._reader.close() - - # gh-107219: Close the connection writer which can unblock - # Queue._feed() if it was stuck in send_bytes(). - if sys.platform == 'win32': - self._writer.close() - - self.close() - self.join_thread() - - def _start_thread(self): - debug('Queue._start_thread()') - - # Start thread which transfers data from buffer to pipe - self._buffer.clear() - self._thread = threading.Thread( - target=Queue._feed, - args=(self._buffer, self._notempty, self._send_bytes, - self._wlock, self._reader.close, self._writer.close, - self._ignore_epipe, self._on_queue_feeder_error, - self._sem), - name='QueueFeederThread', - daemon=True, - ) - - try: - debug('doing self._thread.start()') - self._thread.start() - debug('... done self._thread.start()') - except: - # gh-109047: During Python finalization, creating a thread - # can fail with RuntimeError. - self._thread = None - raise - - if not self._joincancelled: - self._jointhread = Finalize( - self._thread, Queue._finalize_join, - [weakref.ref(self._thread)], - exitpriority=-5 - ) - - # Send sentinel to the thread queue object when garbage collected - self._close = Finalize( - self, Queue._finalize_close, - [self._buffer, self._notempty], - exitpriority=10 - ) - - @staticmethod - def _finalize_join(twr): - debug('joining queue thread') - thread = twr() - if thread is not None: - thread.join() - debug('... queue thread joined') - else: - debug('... queue thread already dead') - - @staticmethod - def _finalize_close(buffer, notempty): - debug('telling queue thread to quit') - with notempty: - buffer.append(_sentinel) - notempty.notify() - - @staticmethod - def _feed(buffer, notempty, send_bytes, writelock, reader_close, - writer_close, ignore_epipe, onerror, queue_sem): - debug('starting thread to feed data to pipe') - nacquire = notempty.acquire - nrelease = notempty.release - nwait = notempty.wait - bpopleft = buffer.popleft - sentinel = _sentinel - if sys.platform != 'win32': - wacquire = writelock.acquire - wrelease = writelock.release - else: - wacquire = None - - while 1: - try: - nacquire() - try: - if not buffer: - nwait() - finally: - nrelease() - try: - while 1: - obj = bpopleft() - if obj is sentinel: - debug('feeder thread got sentinel -- exiting') - reader_close() - writer_close() - return - - # serialize the data before acquiring the lock - obj = _ForkingPickler.dumps(obj) - if wacquire is None: - send_bytes(obj) - else: - wacquire() - try: - send_bytes(obj) - finally: - wrelease() - except IndexError: - pass - except Exception as e: - if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: - return - # Since this runs in a daemon thread the resources it uses - # may be become unusable while the process is cleaning up. - # We ignore errors which happen after the process has - # started to cleanup. - if is_exiting(): - info('error in queue thread: %s', e) - return - else: - # Since the object has not been sent in the queue, we need - # to decrease the size of the queue. The error acts as - # if the object had been silently removed from the queue - # and this step is necessary to have a properly working - # queue. - queue_sem.release() - onerror(e, obj) - - @staticmethod - def _on_queue_feeder_error(e, obj): - """ - Private API hook called when feeding data in the background thread - raises an exception. For overriding by concurrent.futures. - """ - import traceback - traceback.print_exc() - - __class_getitem__ = classmethod(types.GenericAlias) - - -_sentinel = object() - -# -# A queue type which also supports join() and task_done() methods -# -# Note that if you do not call task_done() for each finished task then -# eventually the counter's semaphore may overflow causing Bad Things -# to happen. -# - -class JoinableQueue(Queue): - - def __init__(self, maxsize=0, *, ctx): - Queue.__init__(self, maxsize, ctx=ctx) - self._unfinished_tasks = ctx.Semaphore(0) - self._cond = ctx.Condition() - - def __getstate__(self): - return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) - - def __setstate__(self, state): - Queue.__setstate__(self, state[:-2]) - self._cond, self._unfinished_tasks = state[-2:] - - def put(self, obj, block=True, timeout=None): - if self._closed: - raise ValueError(f"Queue {self!r} is closed") - if not self._sem.acquire(block, timeout): - raise Full - - with self._notempty, self._cond: - if self._thread is None: - self._start_thread() - self._buffer.append(obj) - self._unfinished_tasks.release() - self._notempty.notify() - - def task_done(self): - with self._cond: - if not self._unfinished_tasks.acquire(False): - raise ValueError('task_done() called too many times') - if self._unfinished_tasks._semlock._is_zero(): - self._cond.notify_all() - - def join(self): - with self._cond: - if not self._unfinished_tasks._semlock._is_zero(): - self._cond.wait() - -# -# Simplified Queue type -- really just a locked pipe -# - -class SimpleQueue(object): - - def __init__(self, *, ctx): - self._reader, self._writer = connection.Pipe(duplex=False) - self._rlock = ctx.Lock() - self._poll = self._reader.poll - if sys.platform == 'win32': - self._wlock = None - else: - self._wlock = ctx.Lock() - - def close(self): - self._reader.close() - self._writer.close() - - def empty(self): - return not self._poll() - - def __getstate__(self): - context.assert_spawning(self) - return (self._reader, self._writer, self._rlock, self._wlock) - - def __setstate__(self, state): - (self._reader, self._writer, self._rlock, self._wlock) = state - self._poll = self._reader.poll - - def get(self): - with self._rlock: - res = self._reader.recv_bytes() - # unserialize the data after having released the lock - return _ForkingPickler.loads(res) - - def put(self, obj): - # serialize the data before acquiring the lock - obj = _ForkingPickler.dumps(obj) - if self._wlock is None: - # writes to a message oriented win32 pipe are atomic - self._writer.send_bytes(obj) - else: - with self._wlock: - self._writer.send_bytes(obj) - - __class_getitem__ = classmethod(types.GenericAlias) diff --git a/Python313_13_x86_Template/Lib/multiprocessing/reduction.py b/Python313_13_x86_Template/Lib/multiprocessing/reduction.py deleted file mode 100644 index 5593f068..00000000 --- a/Python313_13_x86_Template/Lib/multiprocessing/reduction.py +++ /dev/null @@ -1,281 +0,0 @@ -# -# Module which deals with pickling of objects. -# -# multiprocessing/reduction.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -from abc import ABCMeta -import copyreg -import functools -import io -import os -import pickle -import socket -import sys - -from . import context - -__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] - - -HAVE_SEND_HANDLE = (sys.platform == 'win32' or - (hasattr(socket, 'CMSG_LEN') and - hasattr(socket, 'SCM_RIGHTS') and - hasattr(socket.socket, 'sendmsg'))) - -# -# Pickler subclass -# - -class ForkingPickler(pickle.Pickler): - '''Pickler subclass used by multiprocessing.''' - _extra_reducers = {} - _copyreg_dispatch_table = copyreg.dispatch_table - - def __init__(self, *args): - super().__init__(*args) - self.dispatch_table = self._copyreg_dispatch_table.copy() - self.dispatch_table.update(self._extra_reducers) - - @classmethod - def register(cls, type, reduce): - '''Register a reduce function for a type.''' - cls._extra_reducers[type] = reduce - - @classmethod - def dumps(cls, obj, protocol=None): - buf = io.BytesIO() - cls(buf, protocol).dump(obj) - return buf.getbuffer() - - loads = pickle.loads - -register = ForkingPickler.register - -def dump(obj, file, protocol=None): - '''Replacement for pickle.dump() using ForkingPickler.''' - ForkingPickler(file, protocol).dump(obj) - -# -# Platform specific definitions -# - -if sys.platform == 'win32': - # Windows - __all__ += ['DupHandle', 'duplicate', 'steal_handle'] - import _winapi - - def duplicate(handle, target_process=None, inheritable=False, - *, source_process=None): - '''Duplicate a handle. (target_process is a handle not a pid!)''' - current_process = _winapi.GetCurrentProcess() - if source_process is None: - source_process = current_process - if target_process is None: - target_process = current_process - return _winapi.DuplicateHandle( - source_process, handle, target_process, - 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) - - def steal_handle(source_pid, handle): - '''Steal a handle from process identified by source_pid.''' - source_process_handle = _winapi.OpenProcess( - _winapi.PROCESS_DUP_HANDLE, False, source_pid) - try: - return _winapi.DuplicateHandle( - source_process_handle, handle, - _winapi.GetCurrentProcess(), 0, False, - _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) - finally: - _winapi.CloseHandle(source_process_handle) - - def send_handle(conn, handle, destination_pid): - '''Send a handle over a local connection.''' - dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) - conn.send(dh) - - def recv_handle(conn): - '''Receive a handle over a local connection.''' - return conn.recv().detach() - - class DupHandle(object): - '''Picklable wrapper for a handle.''' - def __init__(self, handle, access, pid=None): - if pid is None: - # We just duplicate the handle in the current process and - # let the receiving process steal the handle. - pid = os.getpid() - proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) - try: - self._handle = _winapi.DuplicateHandle( - _winapi.GetCurrentProcess(), - handle, proc, access, False, 0) - finally: - _winapi.CloseHandle(proc) - self._access = access - self._pid = pid - - def detach(self): - '''Get the handle. This should only be called once.''' - # retrieve handle from process which currently owns it - if self._pid == os.getpid(): - # The handle has already been duplicated for this process. - return self._handle - # We must steal the handle from the process whose pid is self._pid. - proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, - self._pid) - try: - return _winapi.DuplicateHandle( - proc, self._handle, _winapi.GetCurrentProcess(), - self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) - finally: - _winapi.CloseHandle(proc) - -else: - # Unix - __all__ += ['DupFd', 'sendfds', 'recvfds'] - import array - - # On MacOSX we should acknowledge receipt of fds -- see Issue14669 - ACKNOWLEDGE = sys.platform == 'darwin' - - def sendfds(sock, fds): - '''Send an array of fds over an AF_UNIX socket.''' - fds = array.array('i', fds) - msg = bytes([len(fds) % 256]) - sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) - if ACKNOWLEDGE and sock.recv(1) != b'A': - raise RuntimeError('did not receive acknowledgement of fd') - - def recvfds(sock, size): - '''Receive an array of fds over an AF_UNIX socket.''' - a = array.array('i') - bytes_size = a.itemsize * size - msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) - if not msg and not ancdata: - raise EOFError - try: - if ACKNOWLEDGE: - sock.send(b'A') - if len(ancdata) != 1: - raise RuntimeError('received %d items of ancdata' % - len(ancdata)) - cmsg_level, cmsg_type, cmsg_data = ancdata[0] - if (cmsg_level == socket.SOL_SOCKET and - cmsg_type == socket.SCM_RIGHTS): - if len(cmsg_data) % a.itemsize != 0: - raise ValueError - a.frombytes(cmsg_data) - if len(a) % 256 != msg[0]: - raise AssertionError( - "Len is {0:n} but msg[0] is {1!r}".format( - len(a), msg[0])) - return list(a) - except (ValueError, IndexError): - pass - raise RuntimeError('Invalid data received') - - def send_handle(conn, handle, destination_pid): - '''Send a handle over a local connection.''' - with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: - sendfds(s, [handle]) - - def recv_handle(conn): - '''Receive a handle over a local connection.''' - with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: - return recvfds(s, 1)[0] - - def DupFd(fd): - '''Return a wrapper for an fd.''' - popen_obj = context.get_spawning_popen() - if popen_obj is not None: - return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) - elif HAVE_SEND_HANDLE: - from . import resource_sharer - return resource_sharer.DupFd(fd) - else: - raise ValueError('SCM_RIGHTS appears not to be available') - -# -# Try making some callable types picklable -# - -def _reduce_method(m): - if m.__self__ is None: - return getattr, (m.__class__, m.__func__.__name__) - else: - return getattr, (m.__self__, m.__func__.__name__) -class _C: - def f(self): - pass -register(type(_C().f), _reduce_method) - - -def _reduce_method_descriptor(m): - return getattr, (m.__objclass__, m.__name__) -register(type(list.append), _reduce_method_descriptor) -register(type(int.__add__), _reduce_method_descriptor) - - -def _reduce_partial(p): - return _rebuild_partial, (p.func, p.args, p.keywords or {}) -def _rebuild_partial(func, args, keywords): - return functools.partial(func, *args, **keywords) -register(functools.partial, _reduce_partial) - -# -# Make sockets picklable -# - -if sys.platform == 'win32': - def _reduce_socket(s): - from .resource_sharer import DupSocket - return _rebuild_socket, (DupSocket(s),) - def _rebuild_socket(ds): - return ds.detach() - register(socket.socket, _reduce_socket) - -else: - def _reduce_socket(s): - df = DupFd(s.fileno()) - return _rebuild_socket, (df, s.family, s.type, s.proto) - def _rebuild_socket(df, family, type, proto): - fd = df.detach() - return socket.socket(family, type, proto, fileno=fd) - register(socket.socket, _reduce_socket) - - -class AbstractReducer(metaclass=ABCMeta): - '''Abstract base class for use in implementing a Reduction class - suitable for use in replacing the standard reduction mechanism - used in multiprocessing.''' - ForkingPickler = ForkingPickler - register = register - dump = dump - send_handle = send_handle - recv_handle = recv_handle - - if sys.platform == 'win32': - steal_handle = steal_handle - duplicate = duplicate - DupHandle = DupHandle - else: - sendfds = sendfds - recvfds = recvfds - DupFd = DupFd - - _reduce_method = _reduce_method - _reduce_method_descriptor = _reduce_method_descriptor - _rebuild_partial = _rebuild_partial - _reduce_socket = _reduce_socket - _rebuild_socket = _rebuild_socket - - def __init__(self, *args): - register(type(_C().f), _reduce_method) - register(type(list.append), _reduce_method_descriptor) - register(type(int.__add__), _reduce_method_descriptor) - register(functools.partial, _reduce_partial) - register(socket.socket, _reduce_socket) diff --git a/Python313_13_x86_Template/Lib/multiprocessing/resource_tracker.py b/Python313_13_x86_Template/Lib/multiprocessing/resource_tracker.py deleted file mode 100644 index 22e3bbcf..00000000 --- a/Python313_13_x86_Template/Lib/multiprocessing/resource_tracker.py +++ /dev/null @@ -1,420 +0,0 @@ -############################################################################### -# Server process to keep track of unlinked resources (like shared memory -# segments, semaphores etc.) and clean them. -# -# On Unix we run a server process which keeps track of unlinked -# resources. The server ignores SIGINT and SIGTERM and reads from a -# pipe. Every other process of the program has a copy of the writable -# end of the pipe, so we get EOF when all other processes have exited. -# Then the server process unlinks any remaining resource names. -# -# This is important because there may be system limits for such resources: for -# instance, the system only supports a limited number of named semaphores, and -# shared-memory segments live in the RAM. If a python process leaks such a -# resource, this resource will not be removed till the next reboot. Without -# this resource tracker process, "killall python" would probably leave unlinked -# resources. - -import base64 -import os -import signal -import sys -import threading -import warnings -from collections import deque - -import json - -from . import spawn -from . import util - -__all__ = ['ensure_running', 'register', 'unregister'] - -_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') -_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) - -def cleanup_noop(name): - raise RuntimeError('noop should never be registered or cleaned up') - -_CLEANUP_FUNCS = { - 'noop': cleanup_noop, - 'dummy': lambda name: None, # Dummy resource used in tests -} - -if os.name == 'posix': - import _multiprocessing - import _posixshmem - - # Use sem_unlink() to clean up named semaphores. - # - # sem_unlink() may be missing if the Python build process detected the - # absence of POSIX named semaphores. In that case, no named semaphores were - # ever opened, so no cleanup would be necessary. - if hasattr(_multiprocessing, 'sem_unlink'): - _CLEANUP_FUNCS.update({ - 'semaphore': _multiprocessing.sem_unlink, - }) - _CLEANUP_FUNCS.update({ - 'shared_memory': _posixshmem.shm_unlink, - }) - - -class ReentrantCallError(RuntimeError): - pass - - -class ResourceTracker(object): - - def __init__(self): - self._lock = threading.RLock() - self._fd = None - self._pid = None - self._exitcode = None - self._reentrant_messages = deque() - - # True to use colon-separated lines, rather than JSON lines, - # for internal communication. (Mainly for testing). - # Filenames not supported by the simple format will always be sent - # using JSON. - # The reader should understand all formats. - self._use_simple_format = True - - def _reentrant_call_error(self): - # gh-109629: this happens if an explicit call to the ResourceTracker - # gets interrupted by a garbage collection, invoking a finalizer (*) - # that itself calls back into ResourceTracker. - # (*) for example the SemLock finalizer - raise ReentrantCallError( - "Reentrant call into the multiprocessing resource tracker") - - def __del__(self): - # making sure child processess are cleaned before ResourceTracker - # gets destructed. - # see https://github.com/python/cpython/issues/88887 - self._stop(use_blocking_lock=False) - - def _stop(self, use_blocking_lock=True): - if use_blocking_lock: - with self._lock: - self._stop_locked() - else: - acquired = self._lock.acquire(blocking=False) - try: - self._stop_locked() - finally: - if acquired: - self._lock.release() - - def _stop_locked( - self, - close=os.close, - waitpid=os.waitpid, - waitstatus_to_exitcode=os.waitstatus_to_exitcode, - ): - # This shouldn't happen (it might when called by a finalizer) - # so we check for it anyway. - if self._lock._recursion_count() > 1: - raise self._reentrant_call_error() - if self._fd is None: - # not running - return - if self._pid is None: - return - - # closing the "alive" file descriptor stops main() - close(self._fd) - self._fd = None - - try: - _, status = waitpid(self._pid, 0) - except ChildProcessError: - self._pid = None - self._exitcode = None - return - - self._pid = None - - try: - self._exitcode = waitstatus_to_exitcode(status) - except ValueError: - # os.waitstatus_to_exitcode may raise an exception for invalid values - self._exitcode = None - - def getfd(self): - self.ensure_running() - return self._fd - - def ensure_running(self): - '''Make sure that resource tracker process is running. - - This can be run from any process. Usually a child process will use - the resource created by its parent.''' - return self._ensure_running_and_write() - - def _teardown_dead_process(self): - os.close(self._fd) - - # Clean-up to avoid dangling processes. - try: - # _pid can be None if this process is a child from another - # python process, which has started the resource_tracker. - if self._pid is not None: - os.waitpid(self._pid, 0) - except ChildProcessError: - # The resource_tracker has already been terminated. - pass - self._fd = None - self._pid = None - self._exitcode = None - - warnings.warn('resource_tracker: process died unexpectedly, ' - 'relaunching. Some resources might leak.') - - def _launch(self): - fds_to_pass = [] - try: - fds_to_pass.append(sys.stderr.fileno()) - except Exception: - pass - r, w = os.pipe() - try: - fds_to_pass.append(r) - # process will out live us, so no need to wait on pid - exe = spawn.get_executable() - args = [ - exe, - *util._args_from_interpreter_flags(), - '-c', - f'from multiprocessing.resource_tracker import main;main({r})', - ] - # bpo-33613: Register a signal mask that will block the signals. - # This signal mask will be inherited by the child that is going - # to be spawned and will protect the child from a race condition - # that can make the child die before it registers signal handlers - # for SIGINT and SIGTERM. The mask is unregistered after spawning - # the child. - prev_sigmask = None - try: - if _HAVE_SIGMASK: - prev_sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) - pid = util.spawnv_passfds(exe, args, fds_to_pass) - finally: - if prev_sigmask is not None: - signal.pthread_sigmask(signal.SIG_SETMASK, prev_sigmask) - except: - os.close(w) - raise - else: - self._fd = w - self._pid = pid - finally: - os.close(r) - - def _make_probe_message(self): - """Return a probe message.""" - if self._use_simple_format: - return b'PROBE:0:noop\n' - return ( - json.dumps( - {"cmd": "PROBE", "rtype": "noop"}, - ensure_ascii=True, - separators=(",", ":"), - ) - + "\n" - ).encode("ascii") - - def _ensure_running_and_write(self, msg=None): - with self._lock: - if self._lock._recursion_count() > 1: - # The code below is certainly not reentrant-safe, so bail out - if msg is None: - raise self._reentrant_call_error() - return self._reentrant_messages.append(msg) - - if self._fd is not None: - # resource tracker was launched before, is it still running? - if msg is None: - to_send = self._make_probe_message() - else: - to_send = msg - try: - self._write(to_send) - except OSError: - self._teardown_dead_process() - self._launch() - - msg = None # message was sent in probe - else: - self._launch() - - while True: - try: - reentrant_msg = self._reentrant_messages.popleft() - except IndexError: - break - self._write(reentrant_msg) - if msg is not None: - self._write(msg) - - def _check_alive(self): - '''Check that the pipe has not been closed by sending a probe.''' - try: - # We cannot use send here as it calls ensure_running, creating - # a cycle. - os.write(self._fd, self._make_probe_message()) - except OSError: - return False - else: - return True - - def register(self, name, rtype): - '''Register name of resource with resource tracker.''' - self._send('REGISTER', name, rtype) - - def unregister(self, name, rtype): - '''Unregister name of resource with resource tracker.''' - self._send('UNREGISTER', name, rtype) - - def _write(self, msg): - nbytes = os.write(self._fd, msg) - assert nbytes == len(msg), f"{nbytes=} != {len(msg)=}" - - def _send(self, cmd, name, rtype): - if self._use_simple_format and '\n' not in name: - msg = f"{cmd}:{name}:{rtype}\n".encode("ascii") - if len(msg) > 512: - # posix guarantees that writes to a pipe of less than PIPE_BUF - # bytes are atomic, and that PIPE_BUF >= 512 - raise ValueError('msg too long') - self._ensure_running_and_write(msg) - return - - # POSIX guarantees that writes to a pipe of less than PIPE_BUF (512 on Linux) - # bytes are atomic. Therefore, we want the message to be shorter than 512 bytes. - # POSIX shm_open() and sem_open() require the name, including its leading slash, - # to be at most NAME_MAX bytes (255 on Linux) - # With json.dump(..., ensure_ascii=True) every non-ASCII byte becomes a 6-char - # escape like \uDC80. - # As we want the overall message to be kept atomic and therefore smaller than 512, - # we encode encode the raw name bytes with URL-safe Base64 - so a 255 long name - # will not exceed 340 bytes. - b = name.encode('utf-8', 'surrogateescape') - if len(b) > 255: - raise ValueError('shared memory name too long (max 255 bytes)') - b64 = base64.urlsafe_b64encode(b).decode('ascii') - - payload = {"cmd": cmd, "rtype": rtype, "base64_name": b64} - msg = (json.dumps(payload, ensure_ascii=True, separators=(",", ":")) + "\n").encode("ascii") - - # The entire JSON message is guaranteed < PIPE_BUF (512 bytes) by construction. - assert len(msg) <= 512, f"internal error: message too long ({len(msg)} bytes)" - assert msg.startswith(b'{') - - self._ensure_running_and_write(msg) - -_resource_tracker = ResourceTracker() -ensure_running = _resource_tracker.ensure_running -register = _resource_tracker.register -unregister = _resource_tracker.unregister -getfd = _resource_tracker.getfd - - -def _decode_message(line): - if line.startswith(b'{'): - try: - obj = json.loads(line.decode('ascii')) - except Exception as e: - raise ValueError("malformed resource_tracker message: %r" % (line,)) from e - - cmd = obj["cmd"] - rtype = obj["rtype"] - b64 = obj.get("base64_name", "") - - if not isinstance(cmd, str) or not isinstance(rtype, str) or not isinstance(b64, str): - raise ValueError("malformed resource_tracker fields: %r" % (obj,)) - - try: - name = base64.urlsafe_b64decode(b64).decode('utf-8', 'surrogateescape') - except ValueError as e: - raise ValueError("malformed resource_tracker base64_name: %r" % (b64,)) from e - else: - cmd, rest = line.strip().decode('ascii').split(':', maxsplit=1) - name, rtype = rest.rsplit(':', maxsplit=1) - return cmd, rtype, name - - -def main(fd): - '''Run resource tracker.''' - # protect the process from ^C and "killall python" etc - signal.signal(signal.SIGINT, signal.SIG_IGN) - signal.signal(signal.SIGTERM, signal.SIG_IGN) - if _HAVE_SIGMASK: - signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) - - for f in (sys.stdin, sys.stdout): - try: - f.close() - except Exception: - pass - - cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} - exit_code = 0 - - try: - # keep track of registered/unregistered resources - with open(fd, 'rb') as f: - for line in f: - try: - cmd, rtype, name = _decode_message(line) - cleanup_func = _CLEANUP_FUNCS.get(rtype, None) - if cleanup_func is None: - raise ValueError( - f'Cannot register {name} for automatic cleanup: ' - f'unknown resource type {rtype}') - - if cmd == 'REGISTER': - cache[rtype].add(name) - elif cmd == 'UNREGISTER': - cache[rtype].remove(name) - elif cmd == 'PROBE': - pass - else: - raise RuntimeError('unrecognized command %r' % cmd) - except Exception: - exit_code = 3 - try: - sys.excepthook(*sys.exc_info()) - except: - pass - finally: - # all processes have terminated; cleanup any remaining resources - for rtype, rtype_cache in cache.items(): - if rtype_cache: - try: - exit_code = 1 - if rtype == 'dummy': - # The test 'dummy' resource is expected to leak. - # We skip the warning (and *only* the warning) for it. - pass - else: - warnings.warn( - f'resource_tracker: There appear to be ' - f'{len(rtype_cache)} leaked {rtype} objects to ' - f'clean up at shutdown: {rtype_cache}' - ) - except Exception: - pass - for name in rtype_cache: - # For some reason the process which created and registered this - # resource has failed to unregister it. Presumably it has - # died. We therefore unlink it. - try: - try: - _CLEANUP_FUNCS[rtype](name) - except Exception as e: - exit_code = 2 - warnings.warn('resource_tracker: %r: %s' % (name, e)) - finally: - pass - - sys.exit(exit_code) diff --git a/Python313_13_x86_Template/Lib/multiprocessing/shared_memory.py b/Python313_13_x86_Template/Lib/multiprocessing/shared_memory.py deleted file mode 100644 index 67e70fdc..00000000 --- a/Python313_13_x86_Template/Lib/multiprocessing/shared_memory.py +++ /dev/null @@ -1,544 +0,0 @@ -"""Provides shared memory for direct access across processes. - -The API of this package is currently provisional. Refer to the -documentation for details. -""" - - -__all__ = [ 'SharedMemory', 'ShareableList' ] - - -from functools import partial -import mmap -import os -import errno -import struct -import secrets -import types - -if os.name == "nt": - import _winapi - _USE_POSIX = False -else: - import _posixshmem - _USE_POSIX = True - -from . import resource_tracker - -_O_CREX = os.O_CREAT | os.O_EXCL - -# FreeBSD (and perhaps other BSDs) limit names to 14 characters. -_SHM_SAFE_NAME_LENGTH = 14 - -# Shared memory block name prefix -if _USE_POSIX: - _SHM_NAME_PREFIX = '/psm_' -else: - _SHM_NAME_PREFIX = 'wnsm_' - - -def _make_filename(): - "Create a random filename for the shared memory object." - # number of random bytes to use for name - nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 - assert nbytes >= 2, '_SHM_NAME_PREFIX too long' - name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) - assert len(name) <= _SHM_SAFE_NAME_LENGTH - return name - - -class SharedMemory: - """Creates a new shared memory block or attaches to an existing - shared memory block. - - Every shared memory block is assigned a unique name. This enables - one process to create a shared memory block with a particular name - so that a different process can attach to that same shared memory - block using that same name. - - As a resource for sharing data across processes, shared memory blocks - may outlive the original process that created them. When one process - no longer needs access to a shared memory block that might still be - needed by other processes, the close() method should be called. - When a shared memory block is no longer needed by any process, the - unlink() method should be called to ensure proper cleanup.""" - - # Defaults; enables close() and unlink() to run without errors. - _name = None - _fd = -1 - _mmap = None - _buf = None - _flags = os.O_RDWR - _mode = 0o600 - _prepend_leading_slash = True if _USE_POSIX else False - _track = True - - def __init__(self, name=None, create=False, size=0, *, track=True): - if not size >= 0: - raise ValueError("'size' must be a positive integer") - if create: - self._flags = _O_CREX | os.O_RDWR - if size == 0: - raise ValueError("'size' must be a positive number different from zero") - if name is None and not self._flags & os.O_EXCL: - raise ValueError("'name' can only be None if create=True") - - self._track = track - if _USE_POSIX: - - # POSIX Shared Memory - - if name is None: - while True: - name = _make_filename() - try: - self._fd = _posixshmem.shm_open( - name, - self._flags, - mode=self._mode - ) - except FileExistsError: - continue - self._name = name - break - else: - name = "/" + name if self._prepend_leading_slash else name - self._fd = _posixshmem.shm_open( - name, - self._flags, - mode=self._mode - ) - self._name = name - try: - if create and size: - os.ftruncate(self._fd, size) - stats = os.fstat(self._fd) - size = stats.st_size - self._mmap = mmap.mmap(self._fd, size) - except OSError: - self.unlink() - raise - if self._track: - resource_tracker.register(self._name, "shared_memory") - - else: - - # Windows Named Shared Memory - - if create: - while True: - temp_name = _make_filename() if name is None else name - # Create and reserve shared memory block with this name - # until it can be attached to by mmap. - h_map = _winapi.CreateFileMapping( - _winapi.INVALID_HANDLE_VALUE, - _winapi.NULL, - _winapi.PAGE_READWRITE, - (size >> 32) & 0xFFFFFFFF, - size & 0xFFFFFFFF, - temp_name - ) - try: - last_error_code = _winapi.GetLastError() - if last_error_code == _winapi.ERROR_ALREADY_EXISTS: - if name is not None: - raise FileExistsError( - errno.EEXIST, - os.strerror(errno.EEXIST), - name, - _winapi.ERROR_ALREADY_EXISTS - ) - else: - continue - self._mmap = mmap.mmap(-1, size, tagname=temp_name) - finally: - _winapi.CloseHandle(h_map) - self._name = temp_name - break - - else: - self._name = name - # Dynamically determine the existing named shared memory - # block's size which is likely a multiple of mmap.PAGESIZE. - h_map = _winapi.OpenFileMapping( - _winapi.FILE_MAP_READ, - False, - name - ) - try: - p_buf = _winapi.MapViewOfFile( - h_map, - _winapi.FILE_MAP_READ, - 0, - 0, - 0 - ) - finally: - _winapi.CloseHandle(h_map) - try: - size = _winapi.VirtualQuerySize(p_buf) - finally: - _winapi.UnmapViewOfFile(p_buf) - self._mmap = mmap.mmap(-1, size, tagname=name) - - self._size = size - self._buf = memoryview(self._mmap) - - def __del__(self): - try: - self.close() - except OSError: - pass - - def __reduce__(self): - return ( - self.__class__, - ( - self.name, - False, - self.size, - ), - ) - - def __repr__(self): - return f'{self.__class__.__name__}({self.name!r}, size={self.size})' - - @property - def buf(self): - "A memoryview of contents of the shared memory block." - return self._buf - - @property - def name(self): - "Unique name that identifies the shared memory block." - reported_name = self._name - if _USE_POSIX and self._prepend_leading_slash: - if self._name.startswith("/"): - reported_name = self._name[1:] - return reported_name - - @property - def size(self): - "Size in bytes." - return self._size - - def close(self): - """Closes access to the shared memory from this instance but does - not destroy the shared memory block.""" - if self._buf is not None: - self._buf.release() - self._buf = None - if self._mmap is not None: - self._mmap.close() - self._mmap = None - if _USE_POSIX and self._fd >= 0: - os.close(self._fd) - self._fd = -1 - - def unlink(self): - """Requests that the underlying shared memory block be destroyed. - - Unlink should be called once (and only once) across all handles - which have access to the shared memory block, even if these - handles belong to different processes. Closing and unlinking may - happen in any order, but trying to access data inside a shared - memory block after unlinking may result in memory errors, - depending on platform. - - This method has no effect on Windows, where the only way to - delete a shared memory block is to close all handles.""" - - if _USE_POSIX and self._name: - _posixshmem.shm_unlink(self._name) - if self._track: - resource_tracker.unregister(self._name, "shared_memory") - - -_encoding = "utf8" - -class ShareableList: - """Pattern for a mutable list-like object shareable via a shared - memory block. It differs from the built-in list type in that these - lists can not change their overall length (i.e. no append, insert, - etc.) - - Because values are packed into a memoryview as bytes, the struct - packing format for any storable value must require no more than 8 - characters to describe its format.""" - - # The shared memory area is organized as follows: - # - 8 bytes: number of items (N) as a 64-bit integer - # - (N + 1) * 8 bytes: offsets of each element from the start of the - # data area - # - K bytes: the data area storing item values (with encoding and size - # depending on their respective types) - # - N * 8 bytes: `struct` format string for each element - # - N bytes: index into _back_transforms_mapping for each element - # (for reconstructing the corresponding Python value) - _types_mapping = { - int: "q", - float: "d", - bool: "xxxxxxx?", - str: "%ds", - bytes: "%ds", - None.__class__: "xxxxxx?x", - } - _alignment = 8 - _back_transforms_mapping = { - 0: lambda value: value, # int, float, bool - 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str - 2: lambda value: value.rstrip(b'\x00'), # bytes - 3: lambda _value: None, # None - } - - @staticmethod - def _extract_recreation_code(value): - """Used in concert with _back_transforms_mapping to convert values - into the appropriate Python objects when retrieving them from - the list as well as when storing them.""" - if not isinstance(value, (str, bytes, None.__class__)): - return 0 - elif isinstance(value, str): - return 1 - elif isinstance(value, bytes): - return 2 - else: - return 3 # NoneType - - def __init__(self, sequence=None, *, name=None): - if name is None or sequence is not None: - sequence = sequence or () - _formats = [ - self._types_mapping[type(item)] - if not isinstance(item, (str, bytes)) - else self._types_mapping[type(item)] % ( - self._alignment * (len(item) // self._alignment + 1), - ) - for item in sequence - ] - self._list_len = len(_formats) - assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len - offset = 0 - # The offsets of each list element into the shared memory's - # data area (0 meaning the start of the data area, not the start - # of the shared memory area). - self._allocated_offsets = [0] - for fmt in _formats: - offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) - self._allocated_offsets.append(offset) - _recreation_codes = [ - self._extract_recreation_code(item) for item in sequence - ] - requested_size = struct.calcsize( - "q" + self._format_size_metainfo + - "".join(_formats) + - self._format_packing_metainfo + - self._format_back_transform_codes - ) - - self.shm = SharedMemory(name, create=True, size=requested_size) - else: - self.shm = SharedMemory(name) - - if sequence is not None: - _enc = _encoding - struct.pack_into( - "q" + self._format_size_metainfo, - self.shm.buf, - 0, - self._list_len, - *(self._allocated_offsets) - ) - struct.pack_into( - "".join(_formats), - self.shm.buf, - self._offset_data_start, - *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) - ) - struct.pack_into( - self._format_packing_metainfo, - self.shm.buf, - self._offset_packing_formats, - *(v.encode(_enc) for v in _formats) - ) - struct.pack_into( - self._format_back_transform_codes, - self.shm.buf, - self._offset_back_transform_codes, - *(_recreation_codes) - ) - - else: - self._list_len = len(self) # Obtains size from offset 0 in buffer. - self._allocated_offsets = list( - struct.unpack_from( - self._format_size_metainfo, - self.shm.buf, - 1 * 8 - ) - ) - - def _get_packing_format(self, position): - "Gets the packing format for a single value stored in the list." - position = position if position >= 0 else position + self._list_len - if (position >= self._list_len) or (self._list_len < 0): - raise IndexError("Requested position out of range.") - - v = struct.unpack_from( - "8s", - self.shm.buf, - self._offset_packing_formats + position * 8 - )[0] - fmt = v.rstrip(b'\x00') - fmt_as_str = fmt.decode(_encoding) - - return fmt_as_str - - def _get_back_transform(self, position): - "Gets the back transformation function for a single value." - - if (position >= self._list_len) or (self._list_len < 0): - raise IndexError("Requested position out of range.") - - transform_code = struct.unpack_from( - "b", - self.shm.buf, - self._offset_back_transform_codes + position - )[0] - transform_function = self._back_transforms_mapping[transform_code] - - return transform_function - - def _set_packing_format_and_transform(self, position, fmt_as_str, value): - """Sets the packing format and back transformation code for a - single value in the list at the specified position.""" - - if (position >= self._list_len) or (self._list_len < 0): - raise IndexError("Requested position out of range.") - - struct.pack_into( - "8s", - self.shm.buf, - self._offset_packing_formats + position * 8, - fmt_as_str.encode(_encoding) - ) - - transform_code = self._extract_recreation_code(value) - struct.pack_into( - "b", - self.shm.buf, - self._offset_back_transform_codes + position, - transform_code - ) - - def __getitem__(self, position): - position = position if position >= 0 else position + self._list_len - try: - offset = self._offset_data_start + self._allocated_offsets[position] - (v,) = struct.unpack_from( - self._get_packing_format(position), - self.shm.buf, - offset - ) - except IndexError: - raise IndexError("index out of range") - - back_transform = self._get_back_transform(position) - v = back_transform(v) - - return v - - def __setitem__(self, position, value): - position = position if position >= 0 else position + self._list_len - try: - item_offset = self._allocated_offsets[position] - offset = self._offset_data_start + item_offset - current_format = self._get_packing_format(position) - except IndexError: - raise IndexError("assignment index out of range") - - if not isinstance(value, (str, bytes)): - new_format = self._types_mapping[type(value)] - encoded_value = value - else: - allocated_length = self._allocated_offsets[position + 1] - item_offset - - encoded_value = (value.encode(_encoding) - if isinstance(value, str) else value) - if len(encoded_value) > allocated_length: - raise ValueError("bytes/str item exceeds available storage") - if current_format[-1] == "s": - new_format = current_format - else: - new_format = self._types_mapping[str] % ( - allocated_length, - ) - - self._set_packing_format_and_transform( - position, - new_format, - value - ) - struct.pack_into(new_format, self.shm.buf, offset, encoded_value) - - def __reduce__(self): - return partial(self.__class__, name=self.shm.name), () - - def __len__(self): - return struct.unpack_from("q", self.shm.buf, 0)[0] - - def __repr__(self): - return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' - - @property - def format(self): - "The struct packing format used by all currently stored items." - return "".join( - self._get_packing_format(i) for i in range(self._list_len) - ) - - @property - def _format_size_metainfo(self): - "The struct packing format used for the items' storage offsets." - return "q" * (self._list_len + 1) - - @property - def _format_packing_metainfo(self): - "The struct packing format used for the items' packing formats." - return "8s" * self._list_len - - @property - def _format_back_transform_codes(self): - "The struct packing format used for the items' back transforms." - return "b" * self._list_len - - @property - def _offset_data_start(self): - # - 8 bytes for the list length - # - (N + 1) * 8 bytes for the element offsets - return (self._list_len + 2) * 8 - - @property - def _offset_packing_formats(self): - return self._offset_data_start + self._allocated_offsets[-1] - - @property - def _offset_back_transform_codes(self): - return self._offset_packing_formats + self._list_len * 8 - - def count(self, value): - "L.count(value) -> integer -- return number of occurrences of value." - - return sum(value == entry for entry in self) - - def index(self, value): - """L.index(value) -> integer -- return first index of value. - Raises ValueError if the value is not present.""" - - for position, entry in enumerate(self): - if value == entry: - return position - else: - raise ValueError(f"{value!r} not in this container") - - __class_getitem__ = classmethod(types.GenericAlias) diff --git a/Python313_13_x86_Template/Lib/multiprocessing/synchronize.py b/Python313_13_x86_Template/Lib/multiprocessing/synchronize.py deleted file mode 100644 index 870c9134..00000000 --- a/Python313_13_x86_Template/Lib/multiprocessing/synchronize.py +++ /dev/null @@ -1,404 +0,0 @@ -# -# Module implementing synchronization primitives -# -# multiprocessing/synchronize.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -__all__ = [ - 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' - ] - -import threading -import sys -import tempfile -import _multiprocessing -import time - -from . import context -from . import process -from . import util - -# Try to import the mp.synchronize module cleanly, if it fails -# raise ImportError for platforms lacking a working sem_open implementation. -# See issue 3770 -try: - from _multiprocessing import SemLock, sem_unlink -except (ImportError): - raise ImportError("This platform lacks a functioning sem_open" + - " implementation, therefore, the required" + - " synchronization primitives needed will not" + - " function, see issue 3770.") - -# -# Constants -# - -RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) -SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX - -# -# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` -# - -class SemLock(object): - - _rand = tempfile._RandomNameSequence() - - def __init__(self, kind, value, maxvalue, *, ctx): - if ctx is None: - ctx = context._default_context.get_context() - self._is_fork_ctx = ctx.get_start_method() == 'fork' - unlink_now = sys.platform == 'win32' or self._is_fork_ctx - for i in range(100): - try: - sl = self._semlock = _multiprocessing.SemLock( - kind, value, maxvalue, self._make_name(), - unlink_now) - except FileExistsError: - pass - else: - break - else: - raise FileExistsError('cannot find name for semaphore') - - util.debug('created semlock with handle %s' % sl.handle) - self._make_methods() - - if sys.platform != 'win32': - def _after_fork(obj): - obj._semlock._after_fork() - util.register_after_fork(self, _after_fork) - - if self._semlock.name is not None: - # We only get here if we are on Unix with forking - # disabled. When the object is garbage collected or the - # process shuts down we unlink the semaphore name - from .resource_tracker import register - register(self._semlock.name, "semaphore") - util.Finalize(self, SemLock._cleanup, (self._semlock.name,), - exitpriority=0) - - @staticmethod - def _cleanup(name): - from .resource_tracker import unregister - sem_unlink(name) - unregister(name, "semaphore") - - def _make_methods(self): - self.acquire = self._semlock.acquire - self.release = self._semlock.release - - def __enter__(self): - return self._semlock.__enter__() - - def __exit__(self, *args): - return self._semlock.__exit__(*args) - - def __getstate__(self): - context.assert_spawning(self) - sl = self._semlock - if sys.platform == 'win32': - h = context.get_spawning_popen().duplicate_for_child(sl.handle) - else: - if self._is_fork_ctx: - raise RuntimeError('A SemLock created in a fork context is being ' - 'shared with a process in a spawn context. This is ' - 'not supported. Please use the same context to create ' - 'multiprocessing objects and Process.') - h = sl.handle - return (h, sl.kind, sl.maxvalue, sl.name) - - def __setstate__(self, state): - self._semlock = _multiprocessing.SemLock._rebuild(*state) - util.debug('recreated blocker with handle %r' % state[0]) - self._make_methods() - # Ensure that deserialized SemLock can be serialized again (gh-108520). - self._is_fork_ctx = False - - @staticmethod - def _make_name(): - return '%s-%s' % (process.current_process()._config['semprefix'], - next(SemLock._rand)) - -# -# Semaphore -# - -class Semaphore(SemLock): - - def __init__(self, value=1, *, ctx): - SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) - - def get_value(self): - return self._semlock._get_value() - - def __repr__(self): - try: - value = self._semlock._get_value() - except Exception: - value = 'unknown' - return '<%s(value=%s)>' % (self.__class__.__name__, value) - -# -# Bounded semaphore -# - -class BoundedSemaphore(Semaphore): - - def __init__(self, value=1, *, ctx): - SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) - - def __repr__(self): - try: - value = self._semlock._get_value() - except Exception: - value = 'unknown' - return '<%s(value=%s, maxvalue=%s)>' % \ - (self.__class__.__name__, value, self._semlock.maxvalue) - -# -# Non-recursive lock -# - -class Lock(SemLock): - - def __init__(self, *, ctx): - SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) - - def __repr__(self): - try: - if self._semlock._is_mine(): - name = process.current_process().name - if threading.current_thread().name != 'MainThread': - name += '|' + threading.current_thread().name - elif not self._semlock._is_zero(): - name = 'None' - elif self._semlock._count() > 0: - name = 'SomeOtherThread' - else: - name = 'SomeOtherProcess' - except Exception: - name = 'unknown' - return '<%s(owner=%s)>' % (self.__class__.__name__, name) - -# -# Recursive lock -# - -class RLock(SemLock): - - def __init__(self, *, ctx): - SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) - - def __repr__(self): - try: - if self._semlock._is_mine(): - name = process.current_process().name - if threading.current_thread().name != 'MainThread': - name += '|' + threading.current_thread().name - count = self._semlock._count() - elif not self._semlock._is_zero(): - name, count = 'None', 0 - elif self._semlock._count() > 0: - name, count = 'SomeOtherThread', 'nonzero' - else: - name, count = 'SomeOtherProcess', 'nonzero' - except Exception: - name, count = 'unknown', 'unknown' - return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) - -# -# Condition variable -# - -class Condition(object): - - def __init__(self, lock=None, *, ctx): - self._lock = lock or ctx.RLock() - self._sleeping_count = ctx.Semaphore(0) - self._woken_count = ctx.Semaphore(0) - self._wait_semaphore = ctx.Semaphore(0) - self._make_methods() - - def __getstate__(self): - context.assert_spawning(self) - return (self._lock, self._sleeping_count, - self._woken_count, self._wait_semaphore) - - def __setstate__(self, state): - (self._lock, self._sleeping_count, - self._woken_count, self._wait_semaphore) = state - self._make_methods() - - def __enter__(self): - return self._lock.__enter__() - - def __exit__(self, *args): - return self._lock.__exit__(*args) - - def _make_methods(self): - self.acquire = self._lock.acquire - self.release = self._lock.release - - def __repr__(self): - try: - num_waiters = (self._sleeping_count._semlock._get_value() - - self._woken_count._semlock._get_value()) - except Exception: - num_waiters = 'unknown' - return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) - - def wait(self, timeout=None): - assert self._lock._semlock._is_mine(), \ - 'must acquire() condition before using wait()' - - # indicate that this thread is going to sleep - self._sleeping_count.release() - - # release lock - count = self._lock._semlock._count() - for i in range(count): - self._lock.release() - - try: - # wait for notification or timeout - return self._wait_semaphore.acquire(True, timeout) - finally: - # indicate that this thread has woken - self._woken_count.release() - - # reacquire lock - for i in range(count): - self._lock.acquire() - - def notify(self, n=1): - assert self._lock._semlock._is_mine(), 'lock is not owned' - assert not self._wait_semaphore.acquire( - False), ('notify: Should not have been able to acquire ' - + '_wait_semaphore') - - # to take account of timeouts since last notify*() we subtract - # woken_count from sleeping_count and rezero woken_count - while self._woken_count.acquire(False): - res = self._sleeping_count.acquire(False) - assert res, ('notify: Bug in sleeping_count.acquire' - + '- res should not be False') - - sleepers = 0 - while sleepers < n and self._sleeping_count.acquire(False): - self._wait_semaphore.release() # wake up one sleeper - sleepers += 1 - - if sleepers: - for i in range(sleepers): - self._woken_count.acquire() # wait for a sleeper to wake - - # rezero wait_semaphore in case some timeouts just happened - while self._wait_semaphore.acquire(False): - pass - - def notify_all(self): - self.notify(n=sys.maxsize) - - def wait_for(self, predicate, timeout=None): - result = predicate() - if result: - return result - if timeout is not None: - endtime = time.monotonic() + timeout - else: - endtime = None - waittime = None - while not result: - if endtime is not None: - waittime = endtime - time.monotonic() - if waittime <= 0: - break - self.wait(waittime) - result = predicate() - return result - -# -# Event -# - -class Event(object): - - def __init__(self, *, ctx): - self._cond = ctx.Condition(ctx.Lock()) - self._flag = ctx.Semaphore(0) - - def is_set(self): - with self._cond: - if self._flag.acquire(False): - self._flag.release() - return True - return False - - def set(self): - with self._cond: - self._flag.acquire(False) - self._flag.release() - self._cond.notify_all() - - def clear(self): - with self._cond: - self._flag.acquire(False) - - def wait(self, timeout=None): - with self._cond: - if self._flag.acquire(False): - self._flag.release() - else: - self._cond.wait(timeout) - - if self._flag.acquire(False): - self._flag.release() - return True - return False - - def __repr__(self): - set_status = 'set' if self.is_set() else 'unset' - return f"<{type(self).__qualname__} at {id(self):#x} {set_status}>" -# -# Barrier -# - -class Barrier(threading.Barrier): - - def __init__(self, parties, action=None, timeout=None, *, ctx): - import struct - from .heap import BufferWrapper - wrapper = BufferWrapper(struct.calcsize('i') * 2) - cond = ctx.Condition() - self.__setstate__((parties, action, timeout, cond, wrapper)) - self._state = 0 - self._count = 0 - - def __setstate__(self, state): - (self._parties, self._action, self._timeout, - self._cond, self._wrapper) = state - self._array = self._wrapper.create_memoryview().cast('i') - - def __getstate__(self): - return (self._parties, self._action, self._timeout, - self._cond, self._wrapper) - - @property - def _state(self): - return self._array[0] - - @_state.setter - def _state(self, value): - self._array[0] = value - - @property - def _count(self): - return self._array[1] - - @_count.setter - def _count(self, value): - self._array[1] = value diff --git a/Python313_13_x86_Template/Lib/multiprocessing/util.py b/Python313_13_x86_Template/Lib/multiprocessing/util.py deleted file mode 100644 index b8bfea04..00000000 --- a/Python313_13_x86_Template/Lib/multiprocessing/util.py +++ /dev/null @@ -1,562 +0,0 @@ -# -# Module providing various facilities to other parts of the package -# -# multiprocessing/util.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -import os -import itertools -import sys -import weakref -import atexit -import threading # we want threading to install it's - # cleanup function before multiprocessing does -from subprocess import _args_from_interpreter_flags - -from . import process - -__all__ = [ - 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', - 'log_to_stderr', 'get_temp_dir', 'register_after_fork', - 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', - 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', - ] - -# -# Logging -# - -NOTSET = 0 -SUBDEBUG = 5 -DEBUG = 10 -INFO = 20 -SUBWARNING = 25 -WARNING = 30 - -LOGGER_NAME = 'multiprocessing' -DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' - -_logger = None -_log_to_stderr = False - -def sub_debug(msg, *args): - if _logger: - _logger.log(SUBDEBUG, msg, *args, stacklevel=2) - -def debug(msg, *args): - if _logger: - _logger.log(DEBUG, msg, *args, stacklevel=2) - -def info(msg, *args): - if _logger: - _logger.log(INFO, msg, *args, stacklevel=2) - -def _warn(msg, *args): - if _logger: - _logger.log(WARNING, msg, *args, stacklevel=2) - -def sub_warning(msg, *args): - if _logger: - _logger.log(SUBWARNING, msg, *args, stacklevel=2) - -def get_logger(): - ''' - Returns logger used by multiprocessing - ''' - global _logger - import logging - - with logging._lock: - if not _logger: - - _logger = logging.getLogger(LOGGER_NAME) - _logger.propagate = 0 - - # XXX multiprocessing should cleanup before logging - if hasattr(atexit, 'unregister'): - atexit.unregister(_exit_function) - atexit.register(_exit_function) - else: - atexit._exithandlers.remove((_exit_function, (), {})) - atexit._exithandlers.append((_exit_function, (), {})) - - return _logger - -def log_to_stderr(level=None): - ''' - Turn on logging and add a handler which prints to stderr - ''' - global _log_to_stderr - import logging - - logger = get_logger() - formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) - handler = logging.StreamHandler() - handler.setFormatter(formatter) - logger.addHandler(handler) - - if level: - logger.setLevel(level) - _log_to_stderr = True - return _logger - - -# Abstract socket support - -def _platform_supports_abstract_sockets(): - return sys.platform in ("linux", "android") - - -def is_abstract_socket_namespace(address): - if not address: - return False - if isinstance(address, bytes): - return address[0] == 0 - elif isinstance(address, str): - return address[0] == "\0" - raise TypeError(f'address type of {address!r} unrecognized') - - -abstract_sockets_supported = _platform_supports_abstract_sockets() - -# -# Function returning a temp directory which will be removed on exit -# - -# Maximum length of a NULL-terminated [1] socket file path is usually -# between 92 and 108 [2], but Linux is known to use a size of 108 [3]. -# BSD-based systems usually use a size of 104 or 108 and Windows does -# not create AF_UNIX sockets. -# -# [1]: https://github.com/python/cpython/issues/140734 -# [2]: https://pubs.opengroup.org/onlinepubs/9799919799/basedefs/sys_un.h.html -# [3]: https://man7.org/linux/man-pages/man7/unix.7.html - -if sys.platform == 'linux': - _SUN_PATH_MAX = 108 -elif sys.platform.startswith(('openbsd', 'freebsd')): - _SUN_PATH_MAX = 104 -else: - # On Windows platforms, we do not create AF_UNIX sockets. - _SUN_PATH_MAX = None if os.name == 'nt' else 92 - -def _remove_temp_dir(rmtree, tempdir): - rmtree(tempdir) - - current_process = process.current_process() - # current_process() can be None if the finalizer is called - # late during Python finalization - if current_process is not None: - current_process._config['tempdir'] = None - -def _get_base_temp_dir(tempfile): - """Get a temporary directory where socket files will be created. - - To prevent additional imports, pass a pre-imported 'tempfile' module. - """ - if os.name == 'nt': - return None - # Most of the time, the default temporary directory is /tmp. Thus, - # listener sockets files "$TMPDIR/pymp-XXXXXXXX/sock-XXXXXXXX" do - # not have a path length exceeding SUN_PATH_MAX. - # - # If users specify their own temporary directory, we may be unable - # to create those files. Therefore, we fall back to the system-wide - # temporary directory /tmp, assumed to exist on POSIX systems. - # - # See https://github.com/python/cpython/issues/132124. - base_tempdir = tempfile.gettempdir() - # Files created in a temporary directory are suffixed by a string - # generated by tempfile._RandomNameSequence, which, by design, - # is 8 characters long. - # - # Thus, the socket file path length (without NULL terminator) will be: - # - # len(base_tempdir + '/pymp-XXXXXXXX' + '/sock-XXXXXXXX') - sun_path_len = len(base_tempdir) + 14 + 14 - # Strict inequality to account for the NULL terminator. - # See https://github.com/python/cpython/issues/140734. - if sun_path_len < _SUN_PATH_MAX: - return base_tempdir - # Fallback to the default system-wide temporary directory. - # This ignores user-defined environment variables. - # - # On POSIX systems, /tmp MUST be writable by any application [1]. - # We however emit a warning if this is not the case to prevent - # obscure errors later in the execution. - # - # On some legacy systems, /var/tmp and /usr/tmp can be present - # and will be used instead. - # - # [1]: https://refspecs.linuxfoundation.org/FHS_3.0/fhs/ch03s18.html - dirlist = ['/tmp', '/var/tmp', '/usr/tmp'] - try: - base_system_tempdir = tempfile._get_default_tempdir(dirlist) - except FileNotFoundError: - _warn("Process-wide temporary directory %s will not be usable for " - "creating socket files and no usable system-wide temporary " - "directory was found in %s", base_tempdir, dirlist) - # At this point, the system-wide temporary directory is not usable - # but we may assume that the user-defined one is, even if we will - # not be able to write socket files out there. - return base_tempdir - _warn("Ignoring user-defined temporary directory: %s", base_tempdir) - # at most max(map(len, dirlist)) + 14 + 14 = 36 characters - assert len(base_system_tempdir) + 14 + 14 < _SUN_PATH_MAX - return base_system_tempdir - -def get_temp_dir(): - # get name of a temp directory which will be automatically cleaned up - tempdir = process.current_process()._config.get('tempdir') - if tempdir is None: - import shutil, tempfile - base_tempdir = _get_base_temp_dir(tempfile) - tempdir = tempfile.mkdtemp(prefix='pymp-', dir=base_tempdir) - info('created temp directory %s', tempdir) - # keep a strong reference to shutil.rmtree(), since the finalizer - # can be called late during Python shutdown - Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), - exitpriority=-100) - process.current_process()._config['tempdir'] = tempdir - return tempdir - -# -# Support for reinitialization of objects when bootstrapping a child process -# - -_afterfork_registry = weakref.WeakValueDictionary() -_afterfork_counter = itertools.count() - -def _run_after_forkers(): - items = list(_afterfork_registry.items()) - items.sort() - for (index, ident, func), obj in items: - try: - func(obj) - except Exception as e: - info('after forker raised exception %s', e) - -def register_after_fork(obj, func): - _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj - -# -# Finalization using weakrefs -# - -_finalizer_registry = {} -_finalizer_counter = itertools.count() - - -class Finalize(object): - ''' - Class which supports object finalization using weakrefs - ''' - def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): - if (exitpriority is not None) and not isinstance(exitpriority,int): - raise TypeError( - "Exitpriority ({0!r}) must be None or int, not {1!s}".format( - exitpriority, type(exitpriority))) - - if obj is not None: - self._weakref = weakref.ref(obj, self) - elif exitpriority is None: - raise ValueError("Without object, exitpriority cannot be None") - - self._callback = callback - self._args = args - self._kwargs = kwargs or {} - self._key = (exitpriority, next(_finalizer_counter)) - self._pid = os.getpid() - - _finalizer_registry[self._key] = self - - def __call__(self, wr=None, - # Need to bind these locally because the globals can have - # been cleared at shutdown - _finalizer_registry=_finalizer_registry, - sub_debug=sub_debug, getpid=os.getpid): - ''' - Run the callback unless it has already been called or cancelled - ''' - try: - del _finalizer_registry[self._key] - except KeyError: - sub_debug('finalizer no longer registered') - else: - if self._pid != getpid(): - sub_debug('finalizer ignored because different process') - res = None - else: - sub_debug('finalizer calling %s with args %s and kwargs %s', - self._callback, self._args, self._kwargs) - res = self._callback(*self._args, **self._kwargs) - self._weakref = self._callback = self._args = \ - self._kwargs = self._key = None - return res - - def cancel(self): - ''' - Cancel finalization of the object - ''' - try: - del _finalizer_registry[self._key] - except KeyError: - pass - else: - self._weakref = self._callback = self._args = \ - self._kwargs = self._key = None - - def still_active(self): - ''' - Return whether this finalizer is still waiting to invoke callback - ''' - return self._key in _finalizer_registry - - def __repr__(self): - try: - obj = self._weakref() - except (AttributeError, TypeError): - obj = None - - if obj is None: - return '<%s object, dead>' % self.__class__.__name__ - - x = '<%s object, callback=%s' % ( - self.__class__.__name__, - getattr(self._callback, '__name__', self._callback)) - if self._args: - x += ', args=' + str(self._args) - if self._kwargs: - x += ', kwargs=' + str(self._kwargs) - if self._key[0] is not None: - x += ', exitpriority=' + str(self._key[0]) - return x + '>' - - -def _run_finalizers(minpriority=None): - ''' - Run all finalizers whose exit priority is not None and at least minpriority - - Finalizers with highest priority are called first; finalizers with - the same priority will be called in reverse order of creation. - ''' - if _finalizer_registry is None: - # This function may be called after this module's globals are - # destroyed. See the _exit_function function in this module for more - # notes. - return - - if minpriority is None: - f = lambda p : p[0] is not None - else: - f = lambda p : p[0] is not None and p[0] >= minpriority - - # Careful: _finalizer_registry may be mutated while this function - # is running (either by a GC run or by another thread). - - # list(_finalizer_registry) should be atomic, while - # list(_finalizer_registry.items()) is not. - keys = [key for key in list(_finalizer_registry) if f(key)] - keys.sort(reverse=True) - - for key in keys: - finalizer = _finalizer_registry.get(key) - # key may have been removed from the registry - if finalizer is not None: - sub_debug('calling %s', finalizer) - try: - finalizer() - except Exception: - import traceback - traceback.print_exc() - - if minpriority is None: - _finalizer_registry.clear() - -# -# Clean up on exit -# - -def is_exiting(): - ''' - Returns true if the process is shutting down - ''' - return _exiting or _exiting is None - -_exiting = False - -def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, - active_children=process.active_children, - current_process=process.current_process): - # We hold on to references to functions in the arglist due to the - # situation described below, where this function is called after this - # module's globals are destroyed. - - global _exiting - - if not _exiting: - _exiting = True - - info('process shutting down') - debug('running all "atexit" finalizers with priority >= 0') - _run_finalizers(0) - - if current_process() is not None: - # We check if the current process is None here because if - # it's None, any call to ``active_children()`` will raise - # an AttributeError (active_children winds up trying to - # get attributes from util._current_process). One - # situation where this can happen is if someone has - # manipulated sys.modules, causing this module to be - # garbage collected. The destructor for the module type - # then replaces all values in the module dict with None. - # For instance, after setuptools runs a test it replaces - # sys.modules with a copy created earlier. See issues - # #9775 and #15881. Also related: #4106, #9205, and - # #9207. - - for p in active_children(): - if p.daemon: - info('calling terminate() for daemon %s', p.name) - p._popen.terminate() - - for p in active_children(): - info('calling join() for process %s', p.name) - p.join() - - debug('running the remaining "atexit" finalizers') - _run_finalizers() - -atexit.register(_exit_function) - -# -# Some fork aware types -# - -class ForkAwareThreadLock(object): - def __init__(self): - self._lock = threading.Lock() - self.acquire = self._lock.acquire - self.release = self._lock.release - register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) - - def _at_fork_reinit(self): - self._lock._at_fork_reinit() - - def __enter__(self): - return self._lock.__enter__() - - def __exit__(self, *args): - return self._lock.__exit__(*args) - - -class ForkAwareLocal(threading.local): - def __init__(self): - register_after_fork(self, lambda obj : obj.__dict__.clear()) - def __reduce__(self): - return type(self), () - -# -# Close fds except those specified -# - -try: - MAXFD = os.sysconf("SC_OPEN_MAX") -except Exception: - MAXFD = 256 - -def close_all_fds_except(fds): - fds = list(fds) + [-1, MAXFD] - fds.sort() - assert fds[-1] == MAXFD, 'fd too large' - for i in range(len(fds) - 1): - os.closerange(fds[i]+1, fds[i+1]) -# -# Close sys.stdin and replace stdin with os.devnull -# - -def _close_stdin(): - if sys.stdin is None: - return - - try: - sys.stdin.close() - except (OSError, ValueError): - pass - - try: - fd = os.open(os.devnull, os.O_RDONLY) - try: - sys.stdin = open(fd, encoding="utf-8", closefd=False) - except: - os.close(fd) - raise - except (OSError, ValueError): - pass - -# -# Flush standard streams, if any -# - -def _flush_std_streams(): - try: - sys.stdout.flush() - except (AttributeError, ValueError): - pass - try: - sys.stderr.flush() - except (AttributeError, ValueError): - pass - -# -# Start a program with only specified fds kept open -# - -def spawnv_passfds(path, args, passfds): - import _posixsubprocess - import subprocess - passfds = tuple(sorted(map(int, passfds))) - errpipe_read, errpipe_write = os.pipe() - try: - return _posixsubprocess.fork_exec( - args, [path], True, passfds, None, None, - -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, - False, False, -1, None, None, None, -1, None, - subprocess._USE_VFORK) - finally: - os.close(errpipe_read) - os.close(errpipe_write) - - -def close_fds(*fds): - """Close each file descriptor given as an argument""" - for fd in fds: - os.close(fd) - - -def _cleanup_tests(): - """Cleanup multiprocessing resources when multiprocessing tests - completed.""" - - from test import support - - # cleanup multiprocessing - process._cleanup() - - # Stop the ForkServer process if it's running - from multiprocessing import forkserver - forkserver._forkserver._stop() - - # Stop the ResourceTracker process if it's running - from multiprocessing import resource_tracker - resource_tracker._resource_tracker._stop() - - # bpo-37421: Explicitly call _run_finalizers() to remove immediately - # temporary directories created by multiprocessing.util.get_temp_dir(). - _run_finalizers() - support.gc_collect() - - support.reap_children() diff --git a/Python313_13_x86_Template/Lib/nturl2path.py b/Python313_13_x86_Template/Lib/nturl2path.py deleted file mode 100644 index 757fd01b..00000000 --- a/Python313_13_x86_Template/Lib/nturl2path.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Convert a NT pathname to a file URL and vice versa. - -This module only exists to provide OS-specific code -for urllib.requests, thus do not use directly. -""" -# Testing is done through test_urllib. - -def url2pathname(url): - """OS-specific conversion from a relative URL of the 'file' scheme - to a file system path; not recommended for general use.""" - # e.g. - # ///C|/foo/bar/spam.foo - # and - # ///C:/foo/bar/spam.foo - # become - # C:\foo\bar\spam.foo - import string, urllib.parse - if url[:3] == '///': - # URL has an empty authority section, so the path begins on the third - # character. - url = url[2:] - elif url[:12] == '//localhost/': - # Skip past 'localhost' authority. - url = url[11:] - if url[:3] == '///': - # Skip past extra slash before UNC drive in URL path. - url = url[1:] - # Windows itself uses ":" even in URLs. - url = url.replace(':', '|') - if not '|' in url: - # No drive specifier, just convert slashes - # make sure not to convert quoted slashes :-) - return urllib.parse.unquote(url.replace('/', '\\')) - comp = url.split('|') - if len(comp) != 2 or comp[0][-1] not in string.ascii_letters: - error = 'Bad URL: ' + url - raise OSError(error) - drive = comp[0][-1].upper() - tail = urllib.parse.unquote(comp[1].replace('/', '\\')) - return drive + ':' + tail - -def pathname2url(p): - """OS-specific conversion from a file system path to a relative URL - of the 'file' scheme; not recommended for general use.""" - # e.g. - # C:\foo\bar\spam.foo - # becomes - # ///C:/foo/bar/spam.foo - import urllib.parse - # First, clean up some special forms. We are going to sacrifice - # the additional information anyway - p = p.replace('\\', '/') - if p[:4] == '//?/': - p = p[4:] - if p[:4].upper() == 'UNC/': - p = '//' + p[4:] - elif p[1:2] != ':': - raise OSError('Bad path: ' + p) - if not ':' in p: - # No DOS drive specified, just quote the pathname - return urllib.parse.quote(p) - comp = p.split(':', maxsplit=2) - if len(comp) != 2 or len(comp[0]) > 1: - error = 'Bad path: ' + p - raise OSError(error) - - drive = urllib.parse.quote(comp[0].upper()) - tail = urllib.parse.quote(comp[1]) - return '///' + drive + ':' + tail diff --git a/Python313_13_x86_Template/Lib/opcode.py b/Python313_13_x86_Template/Lib/opcode.py deleted file mode 100644 index 5735686f..00000000 --- a/Python313_13_x86_Template/Lib/opcode.py +++ /dev/null @@ -1,115 +0,0 @@ - -""" -opcode module - potentially shared between dis and other modules which -operate on bytecodes (e.g. peephole optimizers). -""" - - -__all__ = ["cmp_op", "stack_effect", "hascompare", "opname", "opmap", - "HAVE_ARGUMENT", "EXTENDED_ARG", "hasarg", "hasconst", "hasname", - "hasjump", "hasjrel", "hasjabs", "hasfree", "haslocal", "hasexc"] - -import _opcode -from _opcode import stack_effect - -from _opcode_metadata import (_specializations, _specialized_opmap, opmap, - HAVE_ARGUMENT, MIN_INSTRUMENTED_OPCODE) -EXTENDED_ARG = opmap['EXTENDED_ARG'] - -opname = ['<%r>' % (op,) for op in range(max(opmap.values()) + 1)] -for op, i in opmap.items(): - opname[i] = op - -cmp_op = ('<', '<=', '==', '!=', '>', '>=') - -# These lists are documented as part of the dis module's API -hasarg = [op for op in opmap.values() if _opcode.has_arg(op)] -hasconst = [op for op in opmap.values() if _opcode.has_const(op)] -hasname = [op for op in opmap.values() if _opcode.has_name(op)] -hasjump = [op for op in opmap.values() if _opcode.has_jump(op)] -hasjrel = hasjump # for backward compatibility -hasjabs = [] -hasfree = [op for op in opmap.values() if _opcode.has_free(op)] -haslocal = [op for op in opmap.values() if _opcode.has_local(op)] -hasexc = [op for op in opmap.values() if _opcode.has_exc(op)] - - -_intrinsic_1_descs = _opcode.get_intrinsic1_descs() -_intrinsic_2_descs = _opcode.get_intrinsic2_descs() -_nb_ops = _opcode.get_nb_ops() - -hascompare = [opmap["COMPARE_OP"]] - -_cache_format = { - "LOAD_GLOBAL": { - "counter": 1, - "index": 1, - "module_keys_version": 1, - "builtin_keys_version": 1, - }, - "BINARY_OP": { - "counter": 1, - }, - "UNPACK_SEQUENCE": { - "counter": 1, - }, - "COMPARE_OP": { - "counter": 1, - }, - "CONTAINS_OP": { - "counter": 1, - }, - "BINARY_SUBSCR": { - "counter": 1, - }, - "FOR_ITER": { - "counter": 1, - }, - "LOAD_SUPER_ATTR": { - "counter": 1, - }, - "LOAD_ATTR": { - "counter": 1, - "version": 2, - "keys_version": 2, - "descr": 4, - }, - "STORE_ATTR": { - "counter": 1, - "version": 2, - "index": 1, - }, - "CALL": { - "counter": 1, - "func_version": 2, - }, - "STORE_SUBSCR": { - "counter": 1, - }, - "SEND": { - "counter": 1, - }, - "JUMP_BACKWARD": { - "counter": 1, - }, - "TO_BOOL": { - "counter": 1, - "version": 2, - }, - "POP_JUMP_IF_TRUE": { - "counter": 1, - }, - "POP_JUMP_IF_FALSE": { - "counter": 1, - }, - "POP_JUMP_IF_NONE": { - "counter": 1, - }, - "POP_JUMP_IF_NOT_NONE": { - "counter": 1, - }, -} - -_inline_cache_entries = { - name : sum(value.values()) for (name, value) in _cache_format.items() -} diff --git a/Python313_13_x86_Template/Lib/operator.py b/Python313_13_x86_Template/Lib/operator.py deleted file mode 100644 index 02ccdaa1..00000000 --- a/Python313_13_x86_Template/Lib/operator.py +++ /dev/null @@ -1,467 +0,0 @@ -""" -Operator Interface - -This module exports a set of functions corresponding to the intrinsic -operators of Python. For example, operator.add(x, y) is equivalent -to the expression x+y. The function names are those used for special -methods; variants without leading and trailing '__' are also provided -for convenience. - -This is the pure Python implementation of the module. -""" - -__all__ = ['abs', 'add', 'and_', 'attrgetter', 'call', 'concat', 'contains', 'countOf', - 'delitem', 'eq', 'floordiv', 'ge', 'getitem', 'gt', 'iadd', 'iand', - 'iconcat', 'ifloordiv', 'ilshift', 'imatmul', 'imod', 'imul', - 'index', 'indexOf', 'inv', 'invert', 'ior', 'ipow', 'irshift', - 'is_', 'is_not', 'isub', 'itemgetter', 'itruediv', 'ixor', 'le', - 'length_hint', 'lshift', 'lt', 'matmul', 'methodcaller', 'mod', - 'mul', 'ne', 'neg', 'not_', 'or_', 'pos', 'pow', 'rshift', - 'setitem', 'sub', 'truediv', 'truth', 'xor'] - -from builtins import abs as _abs - - -# Comparison Operations *******************************************************# - -def lt(a, b): - "Same as a < b." - return a < b - -def le(a, b): - "Same as a <= b." - return a <= b - -def eq(a, b): - "Same as a == b." - return a == b - -def ne(a, b): - "Same as a != b." - return a != b - -def ge(a, b): - "Same as a >= b." - return a >= b - -def gt(a, b): - "Same as a > b." - return a > b - -# Logical Operations **********************************************************# - -def not_(a): - "Same as not a." - return not a - -def truth(a): - "Return True if a is true, False otherwise." - return True if a else False - -def is_(a, b): - "Same as a is b." - return a is b - -def is_not(a, b): - "Same as a is not b." - return a is not b - -# Mathematical/Bitwise Operations *********************************************# - -def abs(a): - "Same as abs(a)." - return _abs(a) - -def add(a, b): - "Same as a + b." - return a + b - -def and_(a, b): - "Same as a & b." - return a & b - -def floordiv(a, b): - "Same as a // b." - return a // b - -def index(a): - "Same as a.__index__()." - return a.__index__() - -def inv(a): - "Same as ~a." - return ~a -invert = inv - -def lshift(a, b): - "Same as a << b." - return a << b - -def mod(a, b): - "Same as a % b." - return a % b - -def mul(a, b): - "Same as a * b." - return a * b - -def matmul(a, b): - "Same as a @ b." - return a @ b - -def neg(a): - "Same as -a." - return -a - -def or_(a, b): - "Same as a | b." - return a | b - -def pos(a): - "Same as +a." - return +a - -def pow(a, b): - "Same as a ** b." - return a ** b - -def rshift(a, b): - "Same as a >> b." - return a >> b - -def sub(a, b): - "Same as a - b." - return a - b - -def truediv(a, b): - "Same as a / b." - return a / b - -def xor(a, b): - "Same as a ^ b." - return a ^ b - -# Sequence Operations *********************************************************# - -def concat(a, b): - "Same as a + b, for a and b sequences." - if not hasattr(a, '__getitem__'): - msg = "'%s' object can't be concatenated" % type(a).__name__ - raise TypeError(msg) - return a + b - -def contains(a, b): - "Same as b in a (note reversed operands)." - return b in a - -def countOf(a, b): - "Return the number of items in a which are, or which equal, b." - count = 0 - for i in a: - if i is b or i == b: - count += 1 - return count - -def delitem(a, b): - "Same as del a[b]." - del a[b] - -def getitem(a, b): - "Same as a[b]." - return a[b] - -def indexOf(a, b): - "Return the first index of b in a." - for i, j in enumerate(a): - if j is b or j == b: - return i - else: - raise ValueError('sequence.index(x): x not in sequence') - -def setitem(a, b, c): - "Same as a[b] = c." - a[b] = c - -def length_hint(obj, default=0): - """ - Return an estimate of the number of items in obj. - This is useful for presizing containers when building from an iterable. - - If the object supports len(), the result will be exact. Otherwise, it may - over- or under-estimate by an arbitrary amount. The result will be an - integer >= 0. - """ - if not isinstance(default, int): - msg = ("'%s' object cannot be interpreted as an integer" % - type(default).__name__) - raise TypeError(msg) - - try: - return len(obj) - except TypeError: - pass - - try: - hint = type(obj).__length_hint__ - except AttributeError: - return default - - try: - val = hint(obj) - except TypeError: - return default - if val is NotImplemented: - return default - if not isinstance(val, int): - msg = ('__length_hint__ must be integer, not %s' % - type(val).__name__) - raise TypeError(msg) - if val < 0: - msg = '__length_hint__() should return >= 0' - raise ValueError(msg) - return val - -# Other Operations ************************************************************# - -def call(obj, /, *args, **kwargs): - """Same as obj(*args, **kwargs).""" - return obj(*args, **kwargs) - -# Generalized Lookup Objects **************************************************# - -class attrgetter: - """ - Return a callable object that fetches the given attribute(s) from its operand. - After f = attrgetter('name'), the call f(r) returns r.name. - After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date). - After h = attrgetter('name.first', 'name.last'), the call h(r) returns - (r.name.first, r.name.last). - """ - __slots__ = ('_attrs', '_call') - - def __init__(self, attr, /, *attrs): - if not attrs: - if not isinstance(attr, str): - raise TypeError('attribute name must be a string') - self._attrs = (attr,) - names = attr.split('.') - def func(obj): - for name in names: - obj = getattr(obj, name) - return obj - self._call = func - else: - self._attrs = (attr,) + attrs - getters = tuple(map(attrgetter, self._attrs)) - def func(obj): - return tuple(getter(obj) for getter in getters) - self._call = func - - def __call__(self, obj, /): - return self._call(obj) - - def __repr__(self): - return '%s.%s(%s)' % (self.__class__.__module__, - self.__class__.__qualname__, - ', '.join(map(repr, self._attrs))) - - def __reduce__(self): - return self.__class__, self._attrs - -class itemgetter: - """ - Return a callable object that fetches the given item(s) from its operand. - After f = itemgetter(2), the call f(r) returns r[2]. - After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]) - """ - __slots__ = ('_items', '_call') - - def __init__(self, item, /, *items): - if not items: - self._items = (item,) - def func(obj): - return obj[item] - self._call = func - else: - self._items = items = (item,) + items - def func(obj): - return tuple(obj[i] for i in items) - self._call = func - - def __call__(self, obj, /): - return self._call(obj) - - def __repr__(self): - return '%s.%s(%s)' % (self.__class__.__module__, - self.__class__.__name__, - ', '.join(map(repr, self._items))) - - def __reduce__(self): - return self.__class__, self._items - -class methodcaller: - """ - Return a callable object that calls the given method on its operand. - After f = methodcaller('name'), the call f(r) returns r.name(). - After g = methodcaller('name', 'date', foo=1), the call g(r) returns - r.name('date', foo=1). - """ - __slots__ = ('_name', '_args', '_kwargs') - - def __init__(self, name, /, *args, **kwargs): - self._name = name - if not isinstance(self._name, str): - raise TypeError('method name must be a string') - self._args = args - self._kwargs = kwargs - - def __call__(self, obj, /): - return getattr(obj, self._name)(*self._args, **self._kwargs) - - def __repr__(self): - args = [repr(self._name)] - args.extend(map(repr, self._args)) - args.extend('%s=%r' % (k, v) for k, v in self._kwargs.items()) - return '%s.%s(%s)' % (self.__class__.__module__, - self.__class__.__name__, - ', '.join(args)) - - def __reduce__(self): - if not self._kwargs: - return self.__class__, (self._name,) + self._args - else: - from functools import partial - return partial(self.__class__, self._name, **self._kwargs), self._args - - -# In-place Operations *********************************************************# - -def iadd(a, b): - "Same as a += b." - a += b - return a - -def iand(a, b): - "Same as a &= b." - a &= b - return a - -def iconcat(a, b): - "Same as a += b, for a and b sequences." - if not hasattr(a, '__getitem__'): - msg = "'%s' object can't be concatenated" % type(a).__name__ - raise TypeError(msg) - a += b - return a - -def ifloordiv(a, b): - "Same as a //= b." - a //= b - return a - -def ilshift(a, b): - "Same as a <<= b." - a <<= b - return a - -def imod(a, b): - "Same as a %= b." - a %= b - return a - -def imul(a, b): - "Same as a *= b." - a *= b - return a - -def imatmul(a, b): - "Same as a @= b." - a @= b - return a - -def ior(a, b): - "Same as a |= b." - a |= b - return a - -def ipow(a, b): - "Same as a **= b." - a **=b - return a - -def irshift(a, b): - "Same as a >>= b." - a >>= b - return a - -def isub(a, b): - "Same as a -= b." - a -= b - return a - -def itruediv(a, b): - "Same as a /= b." - a /= b - return a - -def ixor(a, b): - "Same as a ^= b." - a ^= b - return a - - -try: - from _operator import * -except ImportError: - pass -else: - from _operator import __doc__ - -# All of these "__func__ = func" assignments have to happen after importing -# from _operator to make sure they're set to the right function -__lt__ = lt -__le__ = le -__eq__ = eq -__ne__ = ne -__ge__ = ge -__gt__ = gt -__not__ = not_ -__abs__ = abs -__add__ = add -__and__ = and_ -__call__ = call -__floordiv__ = floordiv -__index__ = index -__inv__ = inv -__invert__ = invert -__lshift__ = lshift -__mod__ = mod -__mul__ = mul -__matmul__ = matmul -__neg__ = neg -__or__ = or_ -__pos__ = pos -__pow__ = pow -__rshift__ = rshift -__sub__ = sub -__truediv__ = truediv -__xor__ = xor -__concat__ = concat -__contains__ = contains -__delitem__ = delitem -__getitem__ = getitem -__setitem__ = setitem -__iadd__ = iadd -__iand__ = iand -__iconcat__ = iconcat -__ifloordiv__ = ifloordiv -__ilshift__ = ilshift -__imod__ = imod -__imul__ = imul -__imatmul__ = imatmul -__ior__ = ior -__ipow__ = ipow -__irshift__ = irshift -__isub__ = isub -__itruediv__ = itruediv -__ixor__ = ixor diff --git a/Python313_13_x86_Template/Lib/optparse.py b/Python313_13_x86_Template/Lib/optparse.py deleted file mode 100644 index 1c450c6f..00000000 --- a/Python313_13_x86_Template/Lib/optparse.py +++ /dev/null @@ -1,1681 +0,0 @@ -"""A powerful, extensible, and easy-to-use option parser. - -By Greg Ward - -Originally distributed as Optik. - -For support, use the optik-users@lists.sourceforge.net mailing list -(http://lists.sourceforge.net/lists/listinfo/optik-users). - -Simple usage example: - - from optparse import OptionParser - - parser = OptionParser() - parser.add_option("-f", "--file", dest="filename", - help="write report to FILE", metavar="FILE") - parser.add_option("-q", "--quiet", - action="store_false", dest="verbose", default=True, - help="don't print status messages to stdout") - - (options, args) = parser.parse_args() -""" - -__version__ = "1.5.3" - -__all__ = ['Option', - 'make_option', - 'SUPPRESS_HELP', - 'SUPPRESS_USAGE', - 'Values', - 'OptionContainer', - 'OptionGroup', - 'OptionParser', - 'HelpFormatter', - 'IndentedHelpFormatter', - 'TitledHelpFormatter', - 'OptParseError', - 'OptionError', - 'OptionConflictError', - 'OptionValueError', - 'BadOptionError', - 'check_choice'] - -__copyright__ = """ -Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved. -Copyright (c) 2002-2006 Python Software Foundation. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - * Neither the name of the author nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -""" - -import sys, os -import textwrap - -def _repr(self): - return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self) - - -# This file was generated from: -# Id: option_parser.py 527 2006-07-23 15:21:30Z greg -# Id: option.py 522 2006-06-11 16:22:03Z gward -# Id: help.py 527 2006-07-23 15:21:30Z greg -# Id: errors.py 509 2006-04-20 00:58:24Z gward - -try: - from gettext import gettext, ngettext -except ImportError: - def gettext(message): - return message - - def ngettext(singular, plural, n): - if n == 1: - return singular - return plural - -_ = gettext - - -class OptParseError (Exception): - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - - -class OptionError (OptParseError): - """ - Raised if an Option instance is created with invalid or - inconsistent arguments. - """ - - def __init__(self, msg, option): - self.msg = msg - self.option_id = str(option) - - def __str__(self): - if self.option_id: - return "option %s: %s" % (self.option_id, self.msg) - else: - return self.msg - -class OptionConflictError (OptionError): - """ - Raised if conflicting options are added to an OptionParser. - """ - -class OptionValueError (OptParseError): - """ - Raised if an invalid option value is encountered on the command - line. - """ - -class BadOptionError (OptParseError): - """ - Raised if an invalid option is seen on the command line. - """ - def __init__(self, opt_str): - self.opt_str = opt_str - - def __str__(self): - return _("no such option: %s") % self.opt_str - -class AmbiguousOptionError (BadOptionError): - """ - Raised if an ambiguous option is seen on the command line. - """ - def __init__(self, opt_str, possibilities): - BadOptionError.__init__(self, opt_str) - self.possibilities = possibilities - - def __str__(self): - return (_("ambiguous option: %s (%s?)") - % (self.opt_str, ", ".join(self.possibilities))) - - -class HelpFormatter: - - """ - Abstract base class for formatting option help. OptionParser - instances should use one of the HelpFormatter subclasses for - formatting help; by default IndentedHelpFormatter is used. - - Instance attributes: - parser : OptionParser - the controlling OptionParser instance - indent_increment : int - the number of columns to indent per nesting level - max_help_position : int - the maximum starting column for option help text - help_position : int - the calculated starting column for option help text; - initially the same as the maximum - width : int - total number of columns for output (pass None to constructor for - this value to be taken from the $COLUMNS environment variable) - level : int - current indentation level - current_indent : int - current indentation level (in columns) - help_width : int - number of columns available for option help text (calculated) - default_tag : str - text to replace with each option's default value, "%default" - by default. Set to false value to disable default value expansion. - option_strings : { Option : str } - maps Option instances to the snippet of help text explaining - the syntax of that option, e.g. "-h, --help" or - "-fFILE, --file=FILE" - _short_opt_fmt : str - format string controlling how short options with values are - printed in help text. Must be either "%s%s" ("-fFILE") or - "%s %s" ("-f FILE"), because those are the two syntaxes that - Optik supports. - _long_opt_fmt : str - similar but for long options; must be either "%s %s" ("--file FILE") - or "%s=%s" ("--file=FILE"). - """ - - NO_DEFAULT_VALUE = "none" - - def __init__(self, - indent_increment, - max_help_position, - width, - short_first): - self.parser = None - self.indent_increment = indent_increment - if width is None: - try: - width = int(os.environ['COLUMNS']) - except (KeyError, ValueError): - width = 80 - width -= 2 - self.width = width - self.help_position = self.max_help_position = \ - min(max_help_position, max(width - 20, indent_increment * 2)) - self.current_indent = 0 - self.level = 0 - self.help_width = None # computed later - self.short_first = short_first - self.default_tag = "%default" - self.option_strings = {} - self._short_opt_fmt = "%s %s" - self._long_opt_fmt = "%s=%s" - - def set_parser(self, parser): - self.parser = parser - - def set_short_opt_delimiter(self, delim): - if delim not in ("", " "): - raise ValueError( - "invalid metavar delimiter for short options: %r" % delim) - self._short_opt_fmt = "%s" + delim + "%s" - - def set_long_opt_delimiter(self, delim): - if delim not in ("=", " "): - raise ValueError( - "invalid metavar delimiter for long options: %r" % delim) - self._long_opt_fmt = "%s" + delim + "%s" - - def indent(self): - self.current_indent += self.indent_increment - self.level += 1 - - def dedent(self): - self.current_indent -= self.indent_increment - assert self.current_indent >= 0, "Indent decreased below 0." - self.level -= 1 - - def format_usage(self, usage): - raise NotImplementedError("subclasses must implement") - - def format_heading(self, heading): - raise NotImplementedError("subclasses must implement") - - def _format_text(self, text): - """ - Format a paragraph of free-form text for inclusion in the - help output at the current indentation level. - """ - text_width = max(self.width - self.current_indent, 11) - indent = " "*self.current_indent - return textwrap.fill(text, - text_width, - initial_indent=indent, - subsequent_indent=indent) - - def format_description(self, description): - if description: - return self._format_text(description) + "\n" - else: - return "" - - def format_epilog(self, epilog): - if epilog: - return "\n" + self._format_text(epilog) + "\n" - else: - return "" - - - def expand_default(self, option): - if self.parser is None or not self.default_tag: - return option.help - - default_value = self.parser.defaults.get(option.dest) - if default_value is NO_DEFAULT or default_value is None: - default_value = self.NO_DEFAULT_VALUE - - return option.help.replace(self.default_tag, str(default_value)) - - def format_option(self, option): - # The help for each option consists of two parts: - # * the opt strings and metavars - # eg. ("-x", or "-fFILENAME, --file=FILENAME") - # * the user-supplied help string - # eg. ("turn on expert mode", "read data from FILENAME") - # - # If possible, we write both of these on the same line: - # -x turn on expert mode - # - # But if the opt string list is too long, we put the help - # string on a second line, indented to the same column it would - # start in if it fit on the first line. - # -fFILENAME, --file=FILENAME - # read data from FILENAME - result = [] - opts = self.option_strings[option] - opt_width = self.help_position - self.current_indent - 2 - if len(opts) > opt_width: - opts = "%*s%s\n" % (self.current_indent, "", opts) - indent_first = self.help_position - else: # start help on same line as opts - opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts) - indent_first = 0 - result.append(opts) - if option.help: - help_text = self.expand_default(option) - help_lines = textwrap.wrap(help_text, self.help_width) - result.append("%*s%s\n" % (indent_first, "", help_lines[0])) - result.extend(["%*s%s\n" % (self.help_position, "", line) - for line in help_lines[1:]]) - elif opts[-1] != "\n": - result.append("\n") - return "".join(result) - - def store_option_strings(self, parser): - self.indent() - max_len = 0 - for opt in parser.option_list: - strings = self.format_option_strings(opt) - self.option_strings[opt] = strings - max_len = max(max_len, len(strings) + self.current_indent) - self.indent() - for group in parser.option_groups: - for opt in group.option_list: - strings = self.format_option_strings(opt) - self.option_strings[opt] = strings - max_len = max(max_len, len(strings) + self.current_indent) - self.dedent() - self.dedent() - self.help_position = min(max_len + 2, self.max_help_position) - self.help_width = max(self.width - self.help_position, 11) - - def format_option_strings(self, option): - """Return a comma-separated list of option strings & metavariables.""" - if option.takes_value(): - metavar = option.metavar or option.dest.upper() - short_opts = [self._short_opt_fmt % (sopt, metavar) - for sopt in option._short_opts] - long_opts = [self._long_opt_fmt % (lopt, metavar) - for lopt in option._long_opts] - else: - short_opts = option._short_opts - long_opts = option._long_opts - - if self.short_first: - opts = short_opts + long_opts - else: - opts = long_opts + short_opts - - return ", ".join(opts) - -class IndentedHelpFormatter (HelpFormatter): - """Format help with indented section bodies. - """ - - def __init__(self, - indent_increment=2, - max_help_position=24, - width=None, - short_first=1): - HelpFormatter.__init__( - self, indent_increment, max_help_position, width, short_first) - - def format_usage(self, usage): - return _("Usage: %s\n") % usage - - def format_heading(self, heading): - return "%*s%s:\n" % (self.current_indent, "", heading) - - -class TitledHelpFormatter (HelpFormatter): - """Format help with underlined section headers. - """ - - def __init__(self, - indent_increment=0, - max_help_position=24, - width=None, - short_first=0): - HelpFormatter.__init__ ( - self, indent_increment, max_help_position, width, short_first) - - def format_usage(self, usage): - return "%s %s\n" % (self.format_heading(_("Usage")), usage) - - def format_heading(self, heading): - return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading)) - - -def _parse_num(val, type): - if val[:2].lower() == "0x": # hexadecimal - radix = 16 - elif val[:2].lower() == "0b": # binary - radix = 2 - val = val[2:] or "0" # have to remove "0b" prefix - elif val[:1] == "0": # octal - radix = 8 - else: # decimal - radix = 10 - - return type(val, radix) - -def _parse_int(val): - return _parse_num(val, int) - -_builtin_cvt = { "int" : (_parse_int, _("integer")), - "long" : (_parse_int, _("integer")), - "float" : (float, _("floating-point")), - "complex" : (complex, _("complex")) } - -def check_builtin(option, opt, value): - (cvt, what) = _builtin_cvt[option.type] - try: - return cvt(value) - except ValueError: - raise OptionValueError( - _("option %s: invalid %s value: %r") % (opt, what, value)) - -def check_choice(option, opt, value): - if value in option.choices: - return value - else: - choices = ", ".join(map(repr, option.choices)) - raise OptionValueError( - _("option %s: invalid choice: %r (choose from %s)") - % (opt, value, choices)) - -# Not supplying a default is different from a default of None, -# so we need an explicit "not supplied" value. -NO_DEFAULT = ("NO", "DEFAULT") - - -class Option: - """ - Instance attributes: - _short_opts : [string] - _long_opts : [string] - - action : string - type : string - dest : string - default : any - nargs : int - const : any - choices : [string] - callback : function - callback_args : (any*) - callback_kwargs : { string : any } - help : string - metavar : string - """ - - # The list of instance attributes that may be set through - # keyword args to the constructor. - ATTRS = ['action', - 'type', - 'dest', - 'default', - 'nargs', - 'const', - 'choices', - 'callback', - 'callback_args', - 'callback_kwargs', - 'help', - 'metavar'] - - # The set of actions allowed by option parsers. Explicitly listed - # here so the constructor can validate its arguments. - ACTIONS = ("store", - "store_const", - "store_true", - "store_false", - "append", - "append_const", - "count", - "callback", - "help", - "version") - - # The set of actions that involve storing a value somewhere; - # also listed just for constructor argument validation. (If - # the action is one of these, there must be a destination.) - STORE_ACTIONS = ("store", - "store_const", - "store_true", - "store_false", - "append", - "append_const", - "count") - - # The set of actions for which it makes sense to supply a value - # type, ie. which may consume an argument from the command line. - TYPED_ACTIONS = ("store", - "append", - "callback") - - # The set of actions which *require* a value type, ie. that - # always consume an argument from the command line. - ALWAYS_TYPED_ACTIONS = ("store", - "append") - - # The set of actions which take a 'const' attribute. - CONST_ACTIONS = ("store_const", - "append_const") - - # The set of known types for option parsers. Again, listed here for - # constructor argument validation. - TYPES = ("string", "int", "long", "float", "complex", "choice") - - # Dictionary of argument checking functions, which convert and - # validate option arguments according to the option type. - # - # Signature of checking functions is: - # check(option : Option, opt : string, value : string) -> any - # where - # option is the Option instance calling the checker - # opt is the actual option seen on the command-line - # (eg. "-a", "--file") - # value is the option argument seen on the command-line - # - # The return value should be in the appropriate Python type - # for option.type -- eg. an integer if option.type == "int". - # - # If no checker is defined for a type, arguments will be - # unchecked and remain strings. - TYPE_CHECKER = { "int" : check_builtin, - "long" : check_builtin, - "float" : check_builtin, - "complex": check_builtin, - "choice" : check_choice, - } - - - # CHECK_METHODS is a list of unbound method objects; they are called - # by the constructor, in order, after all attributes are - # initialized. The list is created and filled in later, after all - # the methods are actually defined. (I just put it here because I - # like to define and document all class attributes in the same - # place.) Subclasses that add another _check_*() method should - # define their own CHECK_METHODS list that adds their check method - # to those from this class. - CHECK_METHODS = None - - - # -- Constructor/initialization methods ---------------------------- - - def __init__(self, *opts, **attrs): - # Set _short_opts, _long_opts attrs from 'opts' tuple. - # Have to be set now, in case no option strings are supplied. - self._short_opts = [] - self._long_opts = [] - opts = self._check_opt_strings(opts) - self._set_opt_strings(opts) - - # Set all other attrs (action, type, etc.) from 'attrs' dict - self._set_attrs(attrs) - - # Check all the attributes we just set. There are lots of - # complicated interdependencies, but luckily they can be farmed - # out to the _check_*() methods listed in CHECK_METHODS -- which - # could be handy for subclasses! The one thing these all share - # is that they raise OptionError if they discover a problem. - for checker in self.CHECK_METHODS: - checker(self) - - def _check_opt_strings(self, opts): - # Filter out None because early versions of Optik had exactly - # one short option and one long option, either of which - # could be None. - opts = [opt for opt in opts if opt] - if not opts: - raise TypeError("at least one option string must be supplied") - return opts - - def _set_opt_strings(self, opts): - for opt in opts: - if len(opt) < 2: - raise OptionError( - "invalid option string %r: " - "must be at least two characters long" % opt, self) - elif len(opt) == 2: - if not (opt[0] == "-" and opt[1] != "-"): - raise OptionError( - "invalid short option string %r: " - "must be of the form -x, (x any non-dash char)" % opt, - self) - self._short_opts.append(opt) - else: - if not (opt[0:2] == "--" and opt[2] != "-"): - raise OptionError( - "invalid long option string %r: " - "must start with --, followed by non-dash" % opt, - self) - self._long_opts.append(opt) - - def _set_attrs(self, attrs): - for attr in self.ATTRS: - if attr in attrs: - setattr(self, attr, attrs[attr]) - del attrs[attr] - else: - if attr == 'default': - setattr(self, attr, NO_DEFAULT) - else: - setattr(self, attr, None) - if attrs: - attrs = sorted(attrs.keys()) - raise OptionError( - "invalid keyword arguments: %s" % ", ".join(attrs), - self) - - - # -- Constructor validation methods -------------------------------- - - def _check_action(self): - if self.action is None: - self.action = "store" - elif self.action not in self.ACTIONS: - raise OptionError("invalid action: %r" % self.action, self) - - def _check_type(self): - if self.type is None: - if self.action in self.ALWAYS_TYPED_ACTIONS: - if self.choices is not None: - # The "choices" attribute implies "choice" type. - self.type = "choice" - else: - # No type given? "string" is the most sensible default. - self.type = "string" - else: - # Allow type objects or builtin type conversion functions - # (int, str, etc.) as an alternative to their names. - if isinstance(self.type, type): - self.type = self.type.__name__ - - if self.type == "str": - self.type = "string" - - if self.type not in self.TYPES: - raise OptionError("invalid option type: %r" % self.type, self) - if self.action not in self.TYPED_ACTIONS: - raise OptionError( - "must not supply a type for action %r" % self.action, self) - - def _check_choice(self): - if self.type == "choice": - if self.choices is None: - raise OptionError( - "must supply a list of choices for type 'choice'", self) - elif not isinstance(self.choices, (tuple, list)): - raise OptionError( - "choices must be a list of strings ('%s' supplied)" - % str(type(self.choices)).split("'")[1], self) - elif self.choices is not None: - raise OptionError( - "must not supply choices for type %r" % self.type, self) - - def _check_dest(self): - # No destination given, and we need one for this action. The - # self.type check is for callbacks that take a value. - takes_value = (self.action in self.STORE_ACTIONS or - self.type is not None) - if self.dest is None and takes_value: - - # Glean a destination from the first long option string, - # or from the first short option string if no long options. - if self._long_opts: - # eg. "--foo-bar" -> "foo_bar" - self.dest = self._long_opts[0][2:].replace('-', '_') - else: - self.dest = self._short_opts[0][1] - - def _check_const(self): - if self.action not in self.CONST_ACTIONS and self.const is not None: - raise OptionError( - "'const' must not be supplied for action %r" % self.action, - self) - - def _check_nargs(self): - if self.action in self.TYPED_ACTIONS: - if self.nargs is None: - self.nargs = 1 - elif self.nargs is not None: - raise OptionError( - "'nargs' must not be supplied for action %r" % self.action, - self) - - def _check_callback(self): - if self.action == "callback": - if not callable(self.callback): - raise OptionError( - "callback not callable: %r" % self.callback, self) - if (self.callback_args is not None and - not isinstance(self.callback_args, tuple)): - raise OptionError( - "callback_args, if supplied, must be a tuple: not %r" - % self.callback_args, self) - if (self.callback_kwargs is not None and - not isinstance(self.callback_kwargs, dict)): - raise OptionError( - "callback_kwargs, if supplied, must be a dict: not %r" - % self.callback_kwargs, self) - else: - if self.callback is not None: - raise OptionError( - "callback supplied (%r) for non-callback option" - % self.callback, self) - if self.callback_args is not None: - raise OptionError( - "callback_args supplied for non-callback option", self) - if self.callback_kwargs is not None: - raise OptionError( - "callback_kwargs supplied for non-callback option", self) - - - CHECK_METHODS = [_check_action, - _check_type, - _check_choice, - _check_dest, - _check_const, - _check_nargs, - _check_callback] - - - # -- Miscellaneous methods ----------------------------------------- - - def __str__(self): - return "/".join(self._short_opts + self._long_opts) - - __repr__ = _repr - - def takes_value(self): - return self.type is not None - - def get_opt_string(self): - if self._long_opts: - return self._long_opts[0] - else: - return self._short_opts[0] - - - # -- Processing methods -------------------------------------------- - - def check_value(self, opt, value): - checker = self.TYPE_CHECKER.get(self.type) - if checker is None: - return value - else: - return checker(self, opt, value) - - def convert_value(self, opt, value): - if value is not None: - if self.nargs == 1: - return self.check_value(opt, value) - else: - return tuple([self.check_value(opt, v) for v in value]) - - def process(self, opt, value, values, parser): - - # First, convert the value(s) to the right type. Howl if any - # value(s) are bogus. - value = self.convert_value(opt, value) - - # And then take whatever action is expected of us. - # This is a separate method to make life easier for - # subclasses to add new actions. - return self.take_action( - self.action, self.dest, opt, value, values, parser) - - def take_action(self, action, dest, opt, value, values, parser): - if action == "store": - setattr(values, dest, value) - elif action == "store_const": - setattr(values, dest, self.const) - elif action == "store_true": - setattr(values, dest, True) - elif action == "store_false": - setattr(values, dest, False) - elif action == "append": - values.ensure_value(dest, []).append(value) - elif action == "append_const": - values.ensure_value(dest, []).append(self.const) - elif action == "count": - setattr(values, dest, values.ensure_value(dest, 0) + 1) - elif action == "callback": - args = self.callback_args or () - kwargs = self.callback_kwargs or {} - self.callback(self, opt, value, parser, *args, **kwargs) - elif action == "help": - parser.print_help() - parser.exit() - elif action == "version": - parser.print_version() - parser.exit() - else: - raise ValueError("unknown action %r" % self.action) - - return 1 - -# class Option - - -SUPPRESS_HELP = "SUPPRESS"+"HELP" -SUPPRESS_USAGE = "SUPPRESS"+"USAGE" - -class Values: - - def __init__(self, defaults=None): - if defaults: - for (attr, val) in defaults.items(): - setattr(self, attr, val) - - def __str__(self): - return str(self.__dict__) - - __repr__ = _repr - - def __eq__(self, other): - if isinstance(other, Values): - return self.__dict__ == other.__dict__ - elif isinstance(other, dict): - return self.__dict__ == other - else: - return NotImplemented - - def _update_careful(self, dict): - """ - Update the option values from an arbitrary dictionary, but only - use keys from dict that already have a corresponding attribute - in self. Any keys in dict without a corresponding attribute - are silently ignored. - """ - for attr in dir(self): - if attr in dict: - dval = dict[attr] - if dval is not None: - setattr(self, attr, dval) - - def _update_loose(self, dict): - """ - Update the option values from an arbitrary dictionary, - using all keys from the dictionary regardless of whether - they have a corresponding attribute in self or not. - """ - self.__dict__.update(dict) - - def _update(self, dict, mode): - if mode == "careful": - self._update_careful(dict) - elif mode == "loose": - self._update_loose(dict) - else: - raise ValueError("invalid update mode: %r" % mode) - - def read_module(self, modname, mode="careful"): - __import__(modname) - mod = sys.modules[modname] - self._update(vars(mod), mode) - - def read_file(self, filename, mode="careful"): - vars = {} - exec(open(filename).read(), vars) - self._update(vars, mode) - - def ensure_value(self, attr, value): - if not hasattr(self, attr) or getattr(self, attr) is None: - setattr(self, attr, value) - return getattr(self, attr) - - -class OptionContainer: - - """ - Abstract base class. - - Class attributes: - standard_option_list : [Option] - list of standard options that will be accepted by all instances - of this parser class (intended to be overridden by subclasses). - - Instance attributes: - option_list : [Option] - the list of Option objects contained by this OptionContainer - _short_opt : { string : Option } - dictionary mapping short option strings, eg. "-f" or "-X", - to the Option instances that implement them. If an Option - has multiple short option strings, it will appear in this - dictionary multiple times. [1] - _long_opt : { string : Option } - dictionary mapping long option strings, eg. "--file" or - "--exclude", to the Option instances that implement them. - Again, a given Option can occur multiple times in this - dictionary. [1] - defaults : { string : any } - dictionary mapping option destination names to default - values for each destination [1] - - [1] These mappings are common to (shared by) all components of the - controlling OptionParser, where they are initially created. - - """ - - def __init__(self, option_class, conflict_handler, description): - # Initialize the option list and related data structures. - # This method must be provided by subclasses, and it must - # initialize at least the following instance attributes: - # option_list, _short_opt, _long_opt, defaults. - self._create_option_list() - - self.option_class = option_class - self.set_conflict_handler(conflict_handler) - self.set_description(description) - - def _create_option_mappings(self): - # For use by OptionParser constructor -- create the main - # option mappings used by this OptionParser and all - # OptionGroups that it owns. - self._short_opt = {} # single letter -> Option instance - self._long_opt = {} # long option -> Option instance - self.defaults = {} # maps option dest -> default value - - - def _share_option_mappings(self, parser): - # For use by OptionGroup constructor -- use shared option - # mappings from the OptionParser that owns this OptionGroup. - self._short_opt = parser._short_opt - self._long_opt = parser._long_opt - self.defaults = parser.defaults - - def set_conflict_handler(self, handler): - if handler not in ("error", "resolve"): - raise ValueError("invalid conflict_resolution value %r" % handler) - self.conflict_handler = handler - - def set_description(self, description): - self.description = description - - def get_description(self): - return self.description - - - def destroy(self): - """see OptionParser.destroy().""" - del self._short_opt - del self._long_opt - del self.defaults - - - # -- Option-adding methods ----------------------------------------- - - def _check_conflict(self, option): - conflict_opts = [] - for opt in option._short_opts: - if opt in self._short_opt: - conflict_opts.append((opt, self._short_opt[opt])) - for opt in option._long_opts: - if opt in self._long_opt: - conflict_opts.append((opt, self._long_opt[opt])) - - if conflict_opts: - handler = self.conflict_handler - if handler == "error": - raise OptionConflictError( - "conflicting option string(s): %s" - % ", ".join([co[0] for co in conflict_opts]), - option) - elif handler == "resolve": - for (opt, c_option) in conflict_opts: - if opt.startswith("--"): - c_option._long_opts.remove(opt) - del self._long_opt[opt] - else: - c_option._short_opts.remove(opt) - del self._short_opt[opt] - if not (c_option._short_opts or c_option._long_opts): - c_option.container.option_list.remove(c_option) - - def add_option(self, *args, **kwargs): - """add_option(Option) - add_option(opt_str, ..., kwarg=val, ...) - """ - if isinstance(args[0], str): - option = self.option_class(*args, **kwargs) - elif len(args) == 1 and not kwargs: - option = args[0] - if not isinstance(option, Option): - raise TypeError("not an Option instance: %r" % option) - else: - raise TypeError("invalid arguments") - - self._check_conflict(option) - - self.option_list.append(option) - option.container = self - for opt in option._short_opts: - self._short_opt[opt] = option - for opt in option._long_opts: - self._long_opt[opt] = option - - if option.dest is not None: # option has a dest, we need a default - if option.default is not NO_DEFAULT: - self.defaults[option.dest] = option.default - elif option.dest not in self.defaults: - self.defaults[option.dest] = None - - return option - - def add_options(self, option_list): - for option in option_list: - self.add_option(option) - - # -- Option query/removal methods ---------------------------------- - - def get_option(self, opt_str): - return (self._short_opt.get(opt_str) or - self._long_opt.get(opt_str)) - - def has_option(self, opt_str): - return (opt_str in self._short_opt or - opt_str in self._long_opt) - - def remove_option(self, opt_str): - option = self._short_opt.get(opt_str) - if option is None: - option = self._long_opt.get(opt_str) - if option is None: - raise ValueError("no such option %r" % opt_str) - - for opt in option._short_opts: - del self._short_opt[opt] - for opt in option._long_opts: - del self._long_opt[opt] - option.container.option_list.remove(option) - - - # -- Help-formatting methods --------------------------------------- - - def format_option_help(self, formatter): - if not self.option_list: - return "" - result = [] - for option in self.option_list: - if not option.help is SUPPRESS_HELP: - result.append(formatter.format_option(option)) - return "".join(result) - - def format_description(self, formatter): - return formatter.format_description(self.get_description()) - - def format_help(self, formatter): - result = [] - if self.description: - result.append(self.format_description(formatter)) - if self.option_list: - result.append(self.format_option_help(formatter)) - return "\n".join(result) - - -class OptionGroup (OptionContainer): - - def __init__(self, parser, title, description=None): - self.parser = parser - OptionContainer.__init__( - self, parser.option_class, parser.conflict_handler, description) - self.title = title - - def _create_option_list(self): - self.option_list = [] - self._share_option_mappings(self.parser) - - def set_title(self, title): - self.title = title - - def destroy(self): - """see OptionParser.destroy().""" - OptionContainer.destroy(self) - del self.option_list - - # -- Help-formatting methods --------------------------------------- - - def format_help(self, formatter): - result = formatter.format_heading(self.title) - formatter.indent() - result += OptionContainer.format_help(self, formatter) - formatter.dedent() - return result - - -class OptionParser (OptionContainer): - - """ - Class attributes: - standard_option_list : [Option] - list of standard options that will be accepted by all instances - of this parser class (intended to be overridden by subclasses). - - Instance attributes: - usage : string - a usage string for your program. Before it is displayed - to the user, "%prog" will be expanded to the name of - your program (self.prog or os.path.basename(sys.argv[0])). - prog : string - the name of the current program (to override - os.path.basename(sys.argv[0])). - description : string - A paragraph of text giving a brief overview of your program. - optparse reformats this paragraph to fit the current terminal - width and prints it when the user requests help (after usage, - but before the list of options). - epilog : string - paragraph of help text to print after option help - - option_groups : [OptionGroup] - list of option groups in this parser (option groups are - irrelevant for parsing the command-line, but very useful - for generating help) - - allow_interspersed_args : bool = true - if true, positional arguments may be interspersed with options. - Assuming -a and -b each take a single argument, the command-line - -ablah foo bar -bboo baz - will be interpreted the same as - -ablah -bboo -- foo bar baz - If this flag were false, that command line would be interpreted as - -ablah -- foo bar -bboo baz - -- ie. we stop processing options as soon as we see the first - non-option argument. (This is the tradition followed by - Python's getopt module, Perl's Getopt::Std, and other argument- - parsing libraries, but it is generally annoying to users.) - - process_default_values : bool = true - if true, option default values are processed similarly to option - values from the command line: that is, they are passed to the - type-checking function for the option's type (as long as the - default value is a string). (This really only matters if you - have defined custom types; see SF bug #955889.) Set it to false - to restore the behaviour of Optik 1.4.1 and earlier. - - rargs : [string] - the argument list currently being parsed. Only set when - parse_args() is active, and continually trimmed down as - we consume arguments. Mainly there for the benefit of - callback options. - largs : [string] - the list of leftover arguments that we have skipped while - parsing options. If allow_interspersed_args is false, this - list is always empty. - values : Values - the set of option values currently being accumulated. Only - set when parse_args() is active. Also mainly for callbacks. - - Because of the 'rargs', 'largs', and 'values' attributes, - OptionParser is not thread-safe. If, for some perverse reason, you - need to parse command-line arguments simultaneously in different - threads, use different OptionParser instances. - - """ - - standard_option_list = [] - - def __init__(self, - usage=None, - option_list=None, - option_class=Option, - version=None, - conflict_handler="error", - description=None, - formatter=None, - add_help_option=True, - prog=None, - epilog=None): - OptionContainer.__init__( - self, option_class, conflict_handler, description) - self.set_usage(usage) - self.prog = prog - self.version = version - self.allow_interspersed_args = True - self.process_default_values = True - if formatter is None: - formatter = IndentedHelpFormatter() - self.formatter = formatter - self.formatter.set_parser(self) - self.epilog = epilog - - # Populate the option list; initial sources are the - # standard_option_list class attribute, the 'option_list' - # argument, and (if applicable) the _add_version_option() and - # _add_help_option() methods. - self._populate_option_list(option_list, - add_help=add_help_option) - - self._init_parsing_state() - - - def destroy(self): - """ - Declare that you are done with this OptionParser. This cleans up - reference cycles so the OptionParser (and all objects referenced by - it) can be garbage-collected promptly. After calling destroy(), the - OptionParser is unusable. - """ - OptionContainer.destroy(self) - for group in self.option_groups: - group.destroy() - del self.option_list - del self.option_groups - del self.formatter - - - # -- Private methods ----------------------------------------------- - # (used by our or OptionContainer's constructor) - - def _create_option_list(self): - self.option_list = [] - self.option_groups = [] - self._create_option_mappings() - - def _add_help_option(self): - self.add_option("-h", "--help", - action="help", - help=_("show this help message and exit")) - - def _add_version_option(self): - self.add_option("--version", - action="version", - help=_("show program's version number and exit")) - - def _populate_option_list(self, option_list, add_help=True): - if self.standard_option_list: - self.add_options(self.standard_option_list) - if option_list: - self.add_options(option_list) - if self.version: - self._add_version_option() - if add_help: - self._add_help_option() - - def _init_parsing_state(self): - # These are set in parse_args() for the convenience of callbacks. - self.rargs = None - self.largs = None - self.values = None - - - # -- Simple modifier methods --------------------------------------- - - def set_usage(self, usage): - if usage is None: - self.usage = _("%prog [options]") - elif usage is SUPPRESS_USAGE: - self.usage = None - # For backwards compatibility with Optik 1.3 and earlier. - elif usage.lower().startswith("usage: "): - self.usage = usage[7:] - else: - self.usage = usage - - def enable_interspersed_args(self): - """Set parsing to not stop on the first non-option, allowing - interspersing switches with command arguments. This is the - default behavior. See also disable_interspersed_args() and the - class documentation description of the attribute - allow_interspersed_args.""" - self.allow_interspersed_args = True - - def disable_interspersed_args(self): - """Set parsing to stop on the first non-option. Use this if - you have a command processor which runs another command that - has options of its own and you want to make sure these options - don't get confused. - """ - self.allow_interspersed_args = False - - def set_process_default_values(self, process): - self.process_default_values = process - - def set_default(self, dest, value): - self.defaults[dest] = value - - def set_defaults(self, **kwargs): - self.defaults.update(kwargs) - - def _get_all_options(self): - options = self.option_list[:] - for group in self.option_groups: - options.extend(group.option_list) - return options - - def get_default_values(self): - if not self.process_default_values: - # Old, pre-Optik 1.5 behaviour. - return Values(self.defaults) - - defaults = self.defaults.copy() - for option in self._get_all_options(): - default = defaults.get(option.dest) - if isinstance(default, str): - opt_str = option.get_opt_string() - defaults[option.dest] = option.check_value(opt_str, default) - - return Values(defaults) - - - # -- OptionGroup methods ------------------------------------------- - - def add_option_group(self, *args, **kwargs): - # XXX lots of overlap with OptionContainer.add_option() - if isinstance(args[0], str): - group = OptionGroup(self, *args, **kwargs) - elif len(args) == 1 and not kwargs: - group = args[0] - if not isinstance(group, OptionGroup): - raise TypeError("not an OptionGroup instance: %r" % group) - if group.parser is not self: - raise ValueError("invalid OptionGroup (wrong parser)") - else: - raise TypeError("invalid arguments") - - self.option_groups.append(group) - return group - - def get_option_group(self, opt_str): - option = (self._short_opt.get(opt_str) or - self._long_opt.get(opt_str)) - if option and option.container is not self: - return option.container - return None - - - # -- Option-parsing methods ---------------------------------------- - - def _get_args(self, args): - if args is None: - return sys.argv[1:] - else: - return args[:] # don't modify caller's list - - def parse_args(self, args=None, values=None): - """ - parse_args(args : [string] = sys.argv[1:], - values : Values = None) - -> (values : Values, args : [string]) - - Parse the command-line options found in 'args' (default: - sys.argv[1:]). Any errors result in a call to 'error()', which - by default prints the usage message to stderr and calls - sys.exit() with an error message. On success returns a pair - (values, args) where 'values' is a Values instance (with all - your option values) and 'args' is the list of arguments left - over after parsing options. - """ - rargs = self._get_args(args) - if values is None: - values = self.get_default_values() - - # Store the halves of the argument list as attributes for the - # convenience of callbacks: - # rargs - # the rest of the command-line (the "r" stands for - # "remaining" or "right-hand") - # largs - # the leftover arguments -- ie. what's left after removing - # options and their arguments (the "l" stands for "leftover" - # or "left-hand") - self.rargs = rargs - self.largs = largs = [] - self.values = values - - try: - stop = self._process_args(largs, rargs, values) - except (BadOptionError, OptionValueError) as err: - self.error(str(err)) - - args = largs + rargs - return self.check_values(values, args) - - def check_values(self, values, args): - """ - check_values(values : Values, args : [string]) - -> (values : Values, args : [string]) - - Check that the supplied option values and leftover arguments are - valid. Returns the option values and leftover arguments - (possibly adjusted, possibly completely new -- whatever you - like). Default implementation just returns the passed-in - values; subclasses may override as desired. - """ - return (values, args) - - def _process_args(self, largs, rargs, values): - """_process_args(largs : [string], - rargs : [string], - values : Values) - - Process command-line arguments and populate 'values', consuming - options and arguments from 'rargs'. If 'allow_interspersed_args' is - false, stop at the first non-option argument. If true, accumulate any - interspersed non-option arguments in 'largs'. - """ - while rargs: - arg = rargs[0] - # We handle bare "--" explicitly, and bare "-" is handled by the - # standard arg handler since the short arg case ensures that the - # len of the opt string is greater than 1. - if arg == "--": - del rargs[0] - return - elif arg[0:2] == "--": - # process a single long option (possibly with value(s)) - self._process_long_opt(rargs, values) - elif arg[:1] == "-" and len(arg) > 1: - # process a cluster of short options (possibly with - # value(s) for the last one only) - self._process_short_opts(rargs, values) - elif self.allow_interspersed_args: - largs.append(arg) - del rargs[0] - else: - return # stop now, leave this arg in rargs - - # Say this is the original argument list: - # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] - # ^ - # (we are about to process arg(i)). - # - # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of - # [arg0, ..., arg(i-1)] (any options and their arguments will have - # been removed from largs). - # - # The while loop will usually consume 1 or more arguments per pass. - # If it consumes 1 (eg. arg is an option that takes no arguments), - # then after _process_arg() is done the situation is: - # - # largs = subset of [arg0, ..., arg(i)] - # rargs = [arg(i+1), ..., arg(N-1)] - # - # If allow_interspersed_args is false, largs will always be - # *empty* -- still a subset of [arg0, ..., arg(i-1)], but - # not a very interesting subset! - - def _match_long_opt(self, opt): - """_match_long_opt(opt : string) -> string - - Determine which long option string 'opt' matches, ie. which one - it is an unambiguous abbreviation for. Raises BadOptionError if - 'opt' doesn't unambiguously match any long option string. - """ - return _match_abbrev(opt, self._long_opt) - - def _process_long_opt(self, rargs, values): - arg = rargs.pop(0) - - # Value explicitly attached to arg? Pretend it's the next - # argument. - if "=" in arg: - (opt, next_arg) = arg.split("=", 1) - rargs.insert(0, next_arg) - had_explicit_value = True - else: - opt = arg - had_explicit_value = False - - opt = self._match_long_opt(opt) - option = self._long_opt[opt] - if option.takes_value(): - nargs = option.nargs - if len(rargs) < nargs: - self.error(ngettext( - "%(option)s option requires %(number)d argument", - "%(option)s option requires %(number)d arguments", - nargs) % {"option": opt, "number": nargs}) - elif nargs == 1: - value = rargs.pop(0) - else: - value = tuple(rargs[0:nargs]) - del rargs[0:nargs] - - elif had_explicit_value: - self.error(_("%s option does not take a value") % opt) - - else: - value = None - - option.process(opt, value, values, self) - - def _process_short_opts(self, rargs, values): - arg = rargs.pop(0) - stop = False - i = 1 - for ch in arg[1:]: - opt = "-" + ch - option = self._short_opt.get(opt) - i += 1 # we have consumed a character - - if not option: - raise BadOptionError(opt) - if option.takes_value(): - # Any characters left in arg? Pretend they're the - # next arg, and stop consuming characters of arg. - if i < len(arg): - rargs.insert(0, arg[i:]) - stop = True - - nargs = option.nargs - if len(rargs) < nargs: - self.error(ngettext( - "%(option)s option requires %(number)d argument", - "%(option)s option requires %(number)d arguments", - nargs) % {"option": opt, "number": nargs}) - elif nargs == 1: - value = rargs.pop(0) - else: - value = tuple(rargs[0:nargs]) - del rargs[0:nargs] - - else: # option doesn't take a value - value = None - - option.process(opt, value, values, self) - - if stop: - break - - - # -- Feedback methods ---------------------------------------------- - - def get_prog_name(self): - if self.prog is None: - return os.path.basename(sys.argv[0]) - else: - return self.prog - - def expand_prog_name(self, s): - return s.replace("%prog", self.get_prog_name()) - - def get_description(self): - return self.expand_prog_name(self.description) - - def exit(self, status=0, msg=None): - if msg: - sys.stderr.write(msg) - sys.exit(status) - - def error(self, msg): - """error(msg : string) - - Print a usage message incorporating 'msg' to stderr and exit. - If you override this in a subclass, it should not return -- it - should either exit or raise an exception. - """ - self.print_usage(sys.stderr) - self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg)) - - def get_usage(self): - if self.usage: - return self.formatter.format_usage( - self.expand_prog_name(self.usage)) - else: - return "" - - def print_usage(self, file=None): - """print_usage(file : file = stdout) - - Print the usage message for the current program (self.usage) to - 'file' (default stdout). Any occurrence of the string "%prog" in - self.usage is replaced with the name of the current program - (basename of sys.argv[0]). Does nothing if self.usage is empty - or not defined. - """ - if self.usage: - print(self.get_usage(), file=file) - - def get_version(self): - if self.version: - return self.expand_prog_name(self.version) - else: - return "" - - def print_version(self, file=None): - """print_version(file : file = stdout) - - Print the version message for this program (self.version) to - 'file' (default stdout). As with print_usage(), any occurrence - of "%prog" in self.version is replaced by the current program's - name. Does nothing if self.version is empty or undefined. - """ - if self.version: - print(self.get_version(), file=file) - - def format_option_help(self, formatter=None): - if formatter is None: - formatter = self.formatter - formatter.store_option_strings(self) - result = [] - result.append(formatter.format_heading(_("Options"))) - formatter.indent() - if self.option_list: - result.append(OptionContainer.format_option_help(self, formatter)) - result.append("\n") - for group in self.option_groups: - result.append(group.format_help(formatter)) - result.append("\n") - formatter.dedent() - # Drop the last "\n", or the header if no options or option groups: - return "".join(result[:-1]) - - def format_epilog(self, formatter): - return formatter.format_epilog(self.epilog) - - def format_help(self, formatter=None): - if formatter is None: - formatter = self.formatter - result = [] - if self.usage: - result.append(self.get_usage() + "\n") - if self.description: - result.append(self.format_description(formatter) + "\n") - result.append(self.format_option_help(formatter)) - result.append(self.format_epilog(formatter)) - return "".join(result) - - def print_help(self, file=None): - """print_help(file : file = stdout) - - Print an extended help message, listing all options and any - help text provided with them, to 'file' (default stdout). - """ - if file is None: - file = sys.stdout - file.write(self.format_help()) - -# class OptionParser - - -def _match_abbrev(s, wordmap): - """_match_abbrev(s : string, wordmap : {string : Option}) -> string - - Return the string key in 'wordmap' for which 's' is an unambiguous - abbreviation. If 's' is found to be ambiguous or doesn't match any of - 'words', raise BadOptionError. - """ - # Is there an exact match? - if s in wordmap: - return s - else: - # Isolate all words with s as a prefix. - possibilities = [word for word in wordmap.keys() - if word.startswith(s)] - # No exact match, so there had better be just one possibility. - if len(possibilities) == 1: - return possibilities[0] - elif not possibilities: - raise BadOptionError(s) - else: - # More than one possible completion: ambiguous prefix. - possibilities.sort() - raise AmbiguousOptionError(s, possibilities) - - -# Some day, there might be many Option classes. As of Optik 1.3, the -# preferred way to instantiate Options is indirectly, via make_option(), -# which will become a factory function when there are many Option -# classes. -make_option = Option diff --git a/Python313_13_x86_Template/Lib/os.py b/Python313_13_x86_Template/Lib/os.py deleted file mode 100644 index 1b1645f4..00000000 --- a/Python313_13_x86_Template/Lib/os.py +++ /dev/null @@ -1,1184 +0,0 @@ -r"""OS routines for NT or Posix depending on what system we're on. - -This exports: - - all functions from posix or nt, e.g. unlink, stat, etc. - - os.path is either posixpath or ntpath - - os.name is either 'posix' or 'nt' - - os.curdir is a string representing the current directory (always '.') - - os.pardir is a string representing the parent directory (always '..') - - os.sep is the (or a most common) pathname separator ('/' or '\\') - - os.extsep is the extension separator (always '.') - - os.altsep is the alternate pathname separator (None or '/') - - os.pathsep is the component separator used in $PATH etc - - os.linesep is the line separator in text files ('\n' or '\r\n') - - os.defpath is the default search path for executables - - os.devnull is the file path of the null device ('/dev/null', etc.) - -Programs that import and use 'os' stand a better chance of being -portable between different platforms. Of course, they must then -only use functions that are defined by all platforms (e.g., unlink -and opendir), and leave all pathname manipulation to os.path -(e.g., split and join). -""" - -#' -import abc -import sys -import stat as st - -from _collections_abc import _check_methods - -GenericAlias = type(list[int]) - -_names = sys.builtin_module_names - -# Note: more names are added to __all__ later. -__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep", - "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR", - "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen", - "extsep"] - -def _exists(name): - return name in globals() - -def _get_exports_list(module): - try: - return list(module.__all__) - except AttributeError: - return [n for n in dir(module) if n[0] != '_'] - -# Any new dependencies of the os module and/or changes in path separator -# requires updating importlib as well. -if 'posix' in _names: - name = 'posix' - linesep = '\n' - from posix import * - try: - from posix import _exit - __all__.append('_exit') - except ImportError: - pass - import posixpath as path - - try: - from posix import _have_functions - except ImportError: - pass - - import posix - __all__.extend(_get_exports_list(posix)) - del posix - -elif 'nt' in _names: - name = 'nt' - linesep = '\r\n' - from nt import * - try: - from nt import _exit - __all__.append('_exit') - except ImportError: - pass - import ntpath as path - - import nt - __all__.extend(_get_exports_list(nt)) - del nt - - try: - from nt import _have_functions - except ImportError: - pass - -else: - raise ImportError('no os specific module found') - -sys.modules['os.path'] = path -from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep, - devnull) - -del _names - - -if _exists("_have_functions"): - _globals = globals() - def _add(str, fn): - if (fn in _globals) and (str in _have_functions): - _set.add(_globals[fn]) - - _set = set() - _add("HAVE_FACCESSAT", "access") - _add("HAVE_FCHMODAT", "chmod") - _add("HAVE_FCHOWNAT", "chown") - _add("HAVE_FSTATAT", "stat") - _add("HAVE_LSTAT", "lstat") - _add("HAVE_FUTIMESAT", "utime") - _add("HAVE_LINKAT", "link") - _add("HAVE_MKDIRAT", "mkdir") - _add("HAVE_MKFIFOAT", "mkfifo") - _add("HAVE_MKNODAT", "mknod") - _add("HAVE_OPENAT", "open") - _add("HAVE_READLINKAT", "readlink") - _add("HAVE_RENAMEAT", "rename") - _add("HAVE_SYMLINKAT", "symlink") - _add("HAVE_UNLINKAT", "unlink") - _add("HAVE_UNLINKAT", "rmdir") - _add("HAVE_UTIMENSAT", "utime") - supports_dir_fd = _set - - _set = set() - _add("HAVE_FACCESSAT", "access") - supports_effective_ids = _set - - _set = set() - _add("HAVE_FCHDIR", "chdir") - _add("HAVE_FCHMOD", "chmod") - _add("MS_WINDOWS", "chmod") - _add("HAVE_FCHOWN", "chown") - _add("HAVE_FDOPENDIR", "listdir") - _add("HAVE_FDOPENDIR", "scandir") - _add("HAVE_FEXECVE", "execve") - _set.add(stat) # fstat always works - _add("HAVE_FTRUNCATE", "truncate") - _add("HAVE_FUTIMENS", "utime") - _add("HAVE_FUTIMES", "utime") - _add("HAVE_FPATHCONF", "pathconf") - if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3 - _add("HAVE_FSTATVFS", "statvfs") - supports_fd = _set - - _set = set() - _add("HAVE_FACCESSAT", "access") - # Some platforms don't support lchmod(). Often the function exists - # anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP. - # (No, I don't know why that's a good design.) ./configure will detect - # this and reject it--so HAVE_LCHMOD still won't be defined on such - # platforms. This is Very Helpful. - # - # However, sometimes platforms without a working lchmod() *do* have - # fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15, - # OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes - # it behave like lchmod(). So in theory it would be a suitable - # replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s - # flag doesn't work *either*. Sadly ./configure isn't sophisticated - # enough to detect this condition--it only determines whether or not - # fchmodat() minimally works. - # - # Therefore we simply ignore fchmodat() when deciding whether or not - # os.chmod supports follow_symlinks. Just checking lchmod() is - # sufficient. After all--if you have a working fchmodat(), your - # lchmod() almost certainly works too. - # - # _add("HAVE_FCHMODAT", "chmod") - _add("HAVE_FCHOWNAT", "chown") - _add("HAVE_FSTATAT", "stat") - _add("HAVE_LCHFLAGS", "chflags") - _add("HAVE_LCHMOD", "chmod") - _add("MS_WINDOWS", "chmod") - if _exists("lchown"): # mac os x10.3 - _add("HAVE_LCHOWN", "chown") - _add("HAVE_LINKAT", "link") - _add("HAVE_LUTIMES", "utime") - _add("HAVE_LSTAT", "stat") - _add("HAVE_FSTATAT", "stat") - _add("HAVE_UTIMENSAT", "utime") - _add("MS_WINDOWS", "stat") - supports_follow_symlinks = _set - - del _set - del _have_functions - del _globals - del _add - - -# Python uses fixed values for the SEEK_ constants; they are mapped -# to native constants if necessary in posixmodule.c -# Other possible SEEK values are directly imported from posixmodule.c -SEEK_SET = 0 -SEEK_CUR = 1 -SEEK_END = 2 - -# Super directory utilities. -# (Inspired by Eric Raymond; the doc strings are mostly his) - -def makedirs(name, mode=0o777, exist_ok=False): - """makedirs(name [, mode=0o777][, exist_ok=False]) - - Super-mkdir; create a leaf directory and all intermediate ones. Works like - mkdir, except that any intermediate path segment (not just the rightmost) - will be created if it does not exist. If the target directory already - exists, raise an OSError if exist_ok is False. Otherwise no exception is - raised. This is recursive. - - """ - head, tail = path.split(name) - if not tail: - head, tail = path.split(head) - if head and tail and not path.exists(head): - try: - makedirs(head, exist_ok=exist_ok) - except FileExistsError: - # Defeats race condition when another thread created the path - pass - cdir = curdir - if isinstance(tail, bytes): - cdir = bytes(curdir, 'ASCII') - if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists - return - try: - mkdir(name, mode) - except OSError: - # Cannot rely on checking for EEXIST, since the operating system - # could give priority to other errors like EACCES or EROFS - if not exist_ok or not path.isdir(name): - raise - -def removedirs(name): - """removedirs(name) - - Super-rmdir; remove a leaf directory and all empty intermediate - ones. Works like rmdir except that, if the leaf directory is - successfully removed, directories corresponding to rightmost path - segments will be pruned away until either the whole path is - consumed or an error occurs. Errors during this latter phase are - ignored -- they generally mean that a directory was not empty. - - """ - rmdir(name) - head, tail = path.split(name) - if not tail: - head, tail = path.split(head) - while head and tail: - try: - rmdir(head) - except OSError: - break - head, tail = path.split(head) - -def renames(old, new): - """renames(old, new) - - Super-rename; create directories as necessary and delete any left - empty. Works like rename, except creation of any intermediate - directories needed to make the new pathname good is attempted - first. After the rename, directories corresponding to rightmost - path segments of the old name will be pruned until either the - whole path is consumed or a nonempty directory is found. - - Note: this function can fail with the new directory structure made - if you lack permissions needed to unlink the leaf directory or - file. - - """ - head, tail = path.split(new) - if head and tail and not path.exists(head): - makedirs(head) - rename(old, new) - head, tail = path.split(old) - if head and tail: - try: - removedirs(head) - except OSError: - pass - -__all__.extend(["makedirs", "removedirs", "renames"]) - -# Private sentinel that makes walk() classify all symlinks and junctions as -# regular files. -_walk_symlinks_as_files = object() - -def walk(top, topdown=True, onerror=None, followlinks=False): - """Directory tree generator. - - For each directory in the directory tree rooted at top (including top - itself, but excluding '.' and '..'), yields a 3-tuple - - dirpath, dirnames, filenames - - dirpath is a string, the path to the directory. dirnames is a list of - the names of the subdirectories in dirpath (including symlinks to directories, - and excluding '.' and '..'). - filenames is a list of the names of the non-directory files in dirpath. - Note that the names in the lists are just names, with no path components. - To get a full path (which begins with top) to a file or directory in - dirpath, do os.path.join(dirpath, name). - - If optional arg 'topdown' is true or not specified, the triple for a - directory is generated before the triples for any of its subdirectories - (directories are generated top down). If topdown is false, the triple - for a directory is generated after the triples for all of its - subdirectories (directories are generated bottom up). - - When topdown is true, the caller can modify the dirnames list in-place - (e.g., via del or slice assignment), and walk will only recurse into the - subdirectories whose names remain in dirnames; this can be used to prune the - search, or to impose a specific order of visiting. Modifying dirnames when - topdown is false has no effect on the behavior of os.walk(), since the - directories in dirnames have already been generated by the time dirnames - itself is generated. No matter the value of topdown, the list of - subdirectories is retrieved before the tuples for the directory and its - subdirectories are generated. - - By default errors from the os.scandir() call are ignored. If - optional arg 'onerror' is specified, it should be a function; it - will be called with one argument, an OSError instance. It can - report the error to continue with the walk, or raise the exception - to abort the walk. Note that the filename is available as the - filename attribute of the exception object. - - By default, os.walk does not follow symbolic links to subdirectories on - systems that support them. In order to get this functionality, set the - optional argument 'followlinks' to true. - - Caution: if you pass a relative pathname for top, don't change the - current working directory between resumptions of walk. walk never - changes the current directory, and assumes that the client doesn't - either. - - Example: - - import os - from os.path import join, getsize - for root, dirs, files in os.walk('python/Lib/xml'): - print(root, "consumes ") - print(sum(getsize(join(root, name)) for name in files), end=" ") - print("bytes in", len(files), "non-directory files") - if '__pycache__' in dirs: - dirs.remove('__pycache__') # don't visit __pycache__ directories - - """ - sys.audit("os.walk", top, topdown, onerror, followlinks) - - stack = [fspath(top)] - islink, join = path.islink, path.join - while stack: - top = stack.pop() - if isinstance(top, tuple): - yield top - continue - - dirs = [] - nondirs = [] - walk_dirs = [] - - # We may not have read permission for top, in which case we can't - # get a list of the files the directory contains. - # We suppress the exception here, rather than blow up for a - # minor reason when (say) a thousand readable directories are still - # left to visit. - try: - scandir_it = scandir(top) - except OSError as error: - if onerror is not None: - onerror(error) - continue - - cont = False - with scandir_it: - while True: - try: - try: - entry = next(scandir_it) - except StopIteration: - break - except OSError as error: - if onerror is not None: - onerror(error) - cont = True - break - - try: - if followlinks is _walk_symlinks_as_files: - is_dir = entry.is_dir(follow_symlinks=False) and not entry.is_junction() - else: - is_dir = entry.is_dir() - except OSError: - # If is_dir() raises an OSError, consider the entry not to - # be a directory, same behaviour as os.path.isdir(). - is_dir = False - - if is_dir: - dirs.append(entry.name) - else: - nondirs.append(entry.name) - - if not topdown and is_dir: - # Bottom-up: traverse into sub-directory, but exclude - # symlinks to directories if followlinks is False - if followlinks: - walk_into = True - else: - try: - is_symlink = entry.is_symlink() - except OSError: - # If is_symlink() raises an OSError, consider the - # entry not to be a symbolic link, same behaviour - # as os.path.islink(). - is_symlink = False - walk_into = not is_symlink - - if walk_into: - walk_dirs.append(entry.path) - if cont: - continue - - if topdown: - # Yield before sub-directory traversal if going top down - yield top, dirs, nondirs - # Traverse into sub-directories - for dirname in reversed(dirs): - new_path = join(top, dirname) - # bpo-23605: os.path.islink() is used instead of caching - # entry.is_symlink() result during the loop on os.scandir() because - # the caller can replace the directory entry during the "yield" - # above. - if followlinks or not islink(new_path): - stack.append(new_path) - else: - # Yield after sub-directory traversal if going bottom up - stack.append((top, dirs, nondirs)) - # Traverse into sub-directories - for new_path in reversed(walk_dirs): - stack.append(new_path) - -__all__.append("walk") - -if {open, stat} <= supports_dir_fd and {scandir, stat} <= supports_fd: - - def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None): - """Directory tree generator. - - This behaves exactly like walk(), except that it yields a 4-tuple - - dirpath, dirnames, filenames, dirfd - - `dirpath`, `dirnames` and `filenames` are identical to walk() output, - and `dirfd` is a file descriptor referring to the directory `dirpath`. - - The advantage of fwalk() over walk() is that it's safe against symlink - races (when follow_symlinks is False). - - If dir_fd is not None, it should be a file descriptor open to a directory, - and top should be relative; top will then be relative to that directory. - (dir_fd is always supported for fwalk.) - - Caution: - Since fwalk() yields file descriptors, those are only valid until the - next iteration step, so you should dup() them if you want to keep them - for a longer period. - - Example: - - import os - for root, dirs, files, rootfd in os.fwalk('python/Lib/xml'): - print(root, "consumes", end="") - print(sum(os.stat(name, dir_fd=rootfd).st_size for name in files), - end="") - print("bytes in", len(files), "non-directory files") - if '__pycache__' in dirs: - dirs.remove('__pycache__') # don't visit __pycache__ directories - """ - sys.audit("os.fwalk", top, topdown, onerror, follow_symlinks, dir_fd) - top = fspath(top) - stack = [(_fwalk_walk, (True, dir_fd, top, top, None))] - isbytes = isinstance(top, bytes) - try: - while stack: - yield from _fwalk(stack, isbytes, topdown, onerror, follow_symlinks) - finally: - # Close any file descriptors still on the stack. - while stack: - action, value = stack.pop() - if action == _fwalk_close: - close(value) - - # Each item in the _fwalk() stack is a pair (action, args). - _fwalk_walk = 0 # args: (isroot, dirfd, toppath, topname, entry) - _fwalk_yield = 1 # args: (toppath, dirnames, filenames, topfd) - _fwalk_close = 2 # args: dirfd - - def _fwalk(stack, isbytes, topdown, onerror, follow_symlinks): - # Note: This uses O(depth of the directory tree) file descriptors: if - # necessary, it can be adapted to only require O(1) FDs, see issue - # #13734. - - action, value = stack.pop() - if action == _fwalk_close: - close(value) - return - elif action == _fwalk_yield: - yield value - return - assert action == _fwalk_walk - isroot, dirfd, toppath, topname, entry = value - try: - if not follow_symlinks: - # Note: To guard against symlink races, we use the standard - # lstat()/open()/fstat() trick. - if entry is None: - orig_st = stat(topname, follow_symlinks=False, dir_fd=dirfd) - else: - orig_st = entry.stat(follow_symlinks=False) - topfd = open(topname, O_RDONLY | O_NONBLOCK, dir_fd=dirfd) - except OSError as err: - if isroot: - raise - if onerror is not None: - onerror(err) - return - stack.append((_fwalk_close, topfd)) - if not follow_symlinks: - if isroot and not st.S_ISDIR(orig_st.st_mode): - return - if not path.samestat(orig_st, stat(topfd)): - return - - scandir_it = scandir(topfd) - dirs = [] - nondirs = [] - entries = None if topdown or follow_symlinks else [] - for entry in scandir_it: - name = entry.name - if isbytes: - name = fsencode(name) - try: - if entry.is_dir(): - dirs.append(name) - if entries is not None: - entries.append(entry) - else: - nondirs.append(name) - except OSError: - try: - # Add dangling symlinks, ignore disappeared files - if entry.is_symlink(): - nondirs.append(name) - except OSError: - pass - - if topdown: - yield toppath, dirs, nondirs, topfd - else: - stack.append((_fwalk_yield, (toppath, dirs, nondirs, topfd))) - - toppath = path.join(toppath, toppath[:0]) # Add trailing slash. - if entries is None: - stack.extend( - (_fwalk_walk, (False, topfd, toppath + name, name, None)) - for name in dirs[::-1]) - else: - stack.extend( - (_fwalk_walk, (False, topfd, toppath + name, name, entry)) - for name, entry in zip(dirs[::-1], entries[::-1])) - - __all__.append("fwalk") - -def execl(file, *args): - """execl(file, *args) - - Execute the executable file with argument list args, replacing the - current process. """ - execv(file, args) - -def execle(file, *args): - """execle(file, *args, env) - - Execute the executable file with argument list args and - environment env, replacing the current process. """ - env = args[-1] - execve(file, args[:-1], env) - -def execlp(file, *args): - """execlp(file, *args) - - Execute the executable file (which is searched for along $PATH) - with argument list args, replacing the current process. """ - execvp(file, args) - -def execlpe(file, *args): - """execlpe(file, *args, env) - - Execute the executable file (which is searched for along $PATH) - with argument list args and environment env, replacing the current - process. """ - env = args[-1] - execvpe(file, args[:-1], env) - -def execvp(file, args): - """execvp(file, args) - - Execute the executable file (which is searched for along $PATH) - with argument list args, replacing the current process. - args may be a list or tuple of strings. """ - _execvpe(file, args) - -def execvpe(file, args, env): - """execvpe(file, args, env) - - Execute the executable file (which is searched for along $PATH) - with argument list args and environment env, replacing the - current process. - args may be a list or tuple of strings. """ - _execvpe(file, args, env) - -__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"]) - -def _execvpe(file, args, env=None): - if env is not None: - exec_func = execve - argrest = (args, env) - else: - exec_func = execv - argrest = (args,) - env = environ - - if path.dirname(file): - exec_func(file, *argrest) - return - saved_exc = None - path_list = get_exec_path(env) - if name != 'nt': - file = fsencode(file) - path_list = map(fsencode, path_list) - for dir in path_list: - fullname = path.join(dir, file) - try: - exec_func(fullname, *argrest) - except (FileNotFoundError, NotADirectoryError) as e: - last_exc = e - except OSError as e: - last_exc = e - if saved_exc is None: - saved_exc = e - if saved_exc is not None: - raise saved_exc - raise last_exc - - -def get_exec_path(env=None): - """Returns the sequence of directories that will be searched for the - named executable (similar to a shell) when launching a process. - - *env* must be an environment variable dict or None. If *env* is None, - os.environ will be used. - """ - # Use a local import instead of a global import to limit the number of - # modules loaded at startup: the os module is always loaded at startup by - # Python. It may also avoid a bootstrap issue. - import warnings - - if env is None: - env = environ - - # {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a - # BytesWarning when using python -b or python -bb: ignore the warning - with warnings.catch_warnings(): - warnings.simplefilter("ignore", BytesWarning) - - try: - path_list = env.get('PATH') - except TypeError: - path_list = None - - if supports_bytes_environ: - try: - path_listb = env[b'PATH'] - except (KeyError, TypeError): - pass - else: - if path_list is not None: - raise ValueError( - "env cannot contain 'PATH' and b'PATH' keys") - path_list = path_listb - - if path_list is not None and isinstance(path_list, bytes): - path_list = fsdecode(path_list) - - if path_list is None: - path_list = defpath - return path_list.split(pathsep) - - -# Change environ to automatically call putenv() and unsetenv() -from _collections_abc import MutableMapping, Mapping - -class _Environ(MutableMapping): - def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue): - self.encodekey = encodekey - self.decodekey = decodekey - self.encodevalue = encodevalue - self.decodevalue = decodevalue - self._data = data - - def __getitem__(self, key): - try: - value = self._data[self.encodekey(key)] - except KeyError: - # raise KeyError with the original key value - raise KeyError(key) from None - return self.decodevalue(value) - - def __setitem__(self, key, value): - key = self.encodekey(key) - value = self.encodevalue(value) - putenv(key, value) - self._data[key] = value - - def __delitem__(self, key): - encodedkey = self.encodekey(key) - unsetenv(encodedkey) - try: - del self._data[encodedkey] - except KeyError: - # raise KeyError with the original key value - raise KeyError(key) from None - - def __iter__(self): - # list() from dict object is an atomic operation - keys = list(self._data) - for key in keys: - yield self.decodekey(key) - - def __len__(self): - return len(self._data) - - def __repr__(self): - formatted_items = ", ".join( - f"{self.decodekey(key)!r}: {self.decodevalue(value)!r}" - for key, value in self._data.items() - ) - return f"environ({{{formatted_items}}})" - - def copy(self): - return dict(self) - - def setdefault(self, key, value): - if key not in self: - self[key] = value - return self[key] - - def __ior__(self, other): - self.update(other) - return self - - def __or__(self, other): - if not isinstance(other, Mapping): - return NotImplemented - new = dict(self) - new.update(other) - return new - - def __ror__(self, other): - if not isinstance(other, Mapping): - return NotImplemented - new = dict(other) - new.update(self) - return new - -def _createenviron(): - if name == 'nt': - # Where Env Var Names Must Be UPPERCASE - def check_str(value): - if not isinstance(value, str): - raise TypeError("str expected, not %s" % type(value).__name__) - return value - encode = check_str - decode = str - def encodekey(key): - return encode(key).upper() - data = {} - for key, value in environ.items(): - data[encodekey(key)] = value - else: - # Where Env Var Names Can Be Mixed Case - encoding = sys.getfilesystemencoding() - def encode(value): - if not isinstance(value, str): - raise TypeError("str expected, not %s" % type(value).__name__) - return value.encode(encoding, 'surrogateescape') - def decode(value): - return value.decode(encoding, 'surrogateescape') - encodekey = encode - data = environ - return _Environ(data, - encodekey, decode, - encode, decode) - -# unicode environ -environ = _createenviron() -del _createenviron - - -def getenv(key, default=None): - """Get an environment variable, return None if it doesn't exist. - The optional second argument can specify an alternate default. - key, default and the result are str.""" - return environ.get(key, default) - -supports_bytes_environ = (name != 'nt') -__all__.extend(("getenv", "supports_bytes_environ")) - -if supports_bytes_environ: - def _check_bytes(value): - if not isinstance(value, bytes): - raise TypeError("bytes expected, not %s" % type(value).__name__) - return value - - # bytes environ - environb = _Environ(environ._data, - _check_bytes, bytes, - _check_bytes, bytes) - del _check_bytes - - def getenvb(key, default=None): - """Get an environment variable, return None if it doesn't exist. - The optional second argument can specify an alternate default. - key, default and the result are bytes.""" - return environb.get(key, default) - - __all__.extend(("environb", "getenvb")) - -def _fscodec(): - encoding = sys.getfilesystemencoding() - errors = sys.getfilesystemencodeerrors() - - def fsencode(filename): - """Encode filename (an os.PathLike, bytes, or str) to the filesystem - encoding with 'surrogateescape' error handler, return bytes unchanged. - On Windows, use 'strict' error handler if the file system encoding is - 'mbcs' (which is the default encoding). - """ - filename = fspath(filename) # Does type-checking of `filename`. - if isinstance(filename, str): - return filename.encode(encoding, errors) - else: - return filename - - def fsdecode(filename): - """Decode filename (an os.PathLike, bytes, or str) from the filesystem - encoding with 'surrogateescape' error handler, return str unchanged. On - Windows, use 'strict' error handler if the file system encoding is - 'mbcs' (which is the default encoding). - """ - filename = fspath(filename) # Does type-checking of `filename`. - if isinstance(filename, bytes): - return filename.decode(encoding, errors) - else: - return filename - - return fsencode, fsdecode - -fsencode, fsdecode = _fscodec() -del _fscodec - -# Supply spawn*() (probably only for Unix) -if _exists("fork") and not _exists("spawnv") and _exists("execv"): - - P_WAIT = 0 - P_NOWAIT = P_NOWAITO = 1 - - __all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"]) - - # XXX Should we support P_DETACH? I suppose it could fork()**2 - # and close the std I/O streams. Also, P_OVERLAY is the same - # as execv*()? - - def _spawnvef(mode, file, args, env, func): - # Internal helper; func is the exec*() function to use - if not isinstance(args, (tuple, list)): - raise TypeError('argv must be a tuple or a list') - if not args or not args[0]: - raise ValueError('argv first element cannot be empty') - pid = fork() - if not pid: - # Child - try: - if env is None: - func(file, args) - else: - func(file, args, env) - except: - _exit(127) - else: - # Parent - if mode == P_NOWAIT: - return pid # Caller is responsible for waiting! - while 1: - wpid, sts = waitpid(pid, 0) - if WIFSTOPPED(sts): - continue - - return waitstatus_to_exitcode(sts) - - def spawnv(mode, file, args): - """spawnv(mode, file, args) -> integer - -Execute file with arguments from args in a subprocess. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return _spawnvef(mode, file, args, None, execv) - - def spawnve(mode, file, args, env): - """spawnve(mode, file, args, env) -> integer - -Execute file with arguments from args in a subprocess with the -specified environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return _spawnvef(mode, file, args, env, execve) - - # Note: spawnvp[e] isn't currently supported on Windows - - def spawnvp(mode, file, args): - """spawnvp(mode, file, args) -> integer - -Execute file (which is looked for along $PATH) with arguments from -args in a subprocess. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return _spawnvef(mode, file, args, None, execvp) - - def spawnvpe(mode, file, args, env): - """spawnvpe(mode, file, args, env) -> integer - -Execute file (which is looked for along $PATH) with arguments from -args in a subprocess with the supplied environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return _spawnvef(mode, file, args, env, execvpe) - - - __all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"]) - - -if _exists("spawnv"): - # These aren't supplied by the basic Windows code - # but can be easily implemented in Python - - def spawnl(mode, file, *args): - """spawnl(mode, file, *args) -> integer - -Execute file with arguments from args in a subprocess. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return spawnv(mode, file, args) - - def spawnle(mode, file, *args): - """spawnle(mode, file, *args, env) -> integer - -Execute file with arguments from args in a subprocess with the -supplied environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - env = args[-1] - return spawnve(mode, file, args[:-1], env) - - - __all__.extend(["spawnl", "spawnle"]) - - -if _exists("spawnvp"): - # At the moment, Windows doesn't implement spawnvp[e], - # so it won't have spawnlp[e] either. - def spawnlp(mode, file, *args): - """spawnlp(mode, file, *args) -> integer - -Execute file (which is looked for along $PATH) with arguments from -args in a subprocess with the supplied environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return spawnvp(mode, file, args) - - def spawnlpe(mode, file, *args): - """spawnlpe(mode, file, *args, env) -> integer - -Execute file (which is looked for along $PATH) with arguments from -args in a subprocess with the supplied environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - env = args[-1] - return spawnvpe(mode, file, args[:-1], env) - - - __all__.extend(["spawnlp", "spawnlpe"]) - -# VxWorks has no user space shell provided. As a result, running -# command in a shell can't be supported. -if sys.platform != 'vxworks': - # Supply os.popen() - def popen(cmd, mode="r", buffering=-1): - if not isinstance(cmd, str): - raise TypeError("invalid cmd type (%s, expected string)" % type(cmd)) - if mode not in ("r", "w"): - raise ValueError("invalid mode %r" % mode) - if buffering == 0 or buffering is None: - raise ValueError("popen() does not support unbuffered streams") - import subprocess - if mode == "r": - proc = subprocess.Popen(cmd, - shell=True, text=True, - stdout=subprocess.PIPE, - bufsize=buffering) - return _wrap_close(proc.stdout, proc) - else: - proc = subprocess.Popen(cmd, - shell=True, text=True, - stdin=subprocess.PIPE, - bufsize=buffering) - return _wrap_close(proc.stdin, proc) - - # Helper for popen() -- a proxy for a file whose close waits for the process - class _wrap_close: - def __init__(self, stream, proc): - self._stream = stream - self._proc = proc - def close(self): - self._stream.close() - returncode = self._proc.wait() - if returncode == 0: - return None - if name == 'nt': - return returncode - else: - return returncode << 8 # Shift left to match old behavior - def __enter__(self): - return self - def __exit__(self, *args): - self.close() - def __getattr__(self, name): - return getattr(self._stream, name) - def __iter__(self): - return iter(self._stream) - - __all__.append("popen") - -# Supply os.fdopen() -def fdopen(fd, mode="r", buffering=-1, encoding=None, *args, **kwargs): - if not isinstance(fd, int): - raise TypeError("invalid fd type (%s, expected integer)" % type(fd)) - import io - if "b" not in mode: - encoding = io.text_encoding(encoding) - return io.open(fd, mode, buffering, encoding, *args, **kwargs) - - -# For testing purposes, make sure the function is available when the C -# implementation exists. -def _fspath(path): - """Return the path representation of a path-like object. - - If str or bytes is passed in, it is returned unchanged. Otherwise the - os.PathLike interface is used to get the path representation. If the - path representation is not str or bytes, TypeError is raised. If the - provided path is not str, bytes, or os.PathLike, TypeError is raised. - """ - if isinstance(path, (str, bytes)): - return path - - # Work from the object's type to match method resolution of other magic - # methods. - path_type = type(path) - try: - path_repr = path_type.__fspath__(path) - except AttributeError: - if hasattr(path_type, '__fspath__'): - raise - else: - raise TypeError("expected str, bytes or os.PathLike object, " - "not " + path_type.__name__) - except TypeError: - if path_type.__fspath__ is None: - raise TypeError("expected str, bytes or os.PathLike object, " - "not " + path_type.__name__) from None - else: - raise - if isinstance(path_repr, (str, bytes)): - return path_repr - else: - raise TypeError("expected {}.__fspath__() to return str or bytes, " - "not {}".format(path_type.__name__, - type(path_repr).__name__)) - -# If there is no C implementation, make the pure Python version the -# implementation as transparently as possible. -if not _exists('fspath'): - fspath = _fspath - fspath.__name__ = "fspath" - - -class PathLike(abc.ABC): - - """Abstract base class for implementing the file system path protocol.""" - - __slots__ = () - - @abc.abstractmethod - def __fspath__(self): - """Return the file system path representation of the object.""" - raise NotImplementedError - - @classmethod - def __subclasshook__(cls, subclass): - if cls is PathLike: - return _check_methods(subclass, '__fspath__') - return NotImplemented - - __class_getitem__ = classmethod(GenericAlias) - - -if name == 'nt': - class _AddedDllDirectory: - def __init__(self, path, cookie, remove_dll_directory): - self.path = path - self._cookie = cookie - self._remove_dll_directory = remove_dll_directory - def close(self): - self._remove_dll_directory(self._cookie) - self.path = None - def __enter__(self): - return self - def __exit__(self, *args): - self.close() - def __repr__(self): - if self.path: - return "".format(self.path) - return "" - - def add_dll_directory(path): - """Add a path to the DLL search path. - - This search path is used when resolving dependencies for imported - extension modules (the module itself is resolved through sys.path), - and also by ctypes. - - Remove the directory by calling close() on the returned object or - using it in a with statement. - """ - import nt - cookie = nt._add_dll_directory(path) - return _AddedDllDirectory( - path, - cookie, - nt._remove_dll_directory - ) - - -if _exists('sched_getaffinity') and sys._get_cpu_count_config() < 0: - def process_cpu_count(): - """ - Get the number of CPUs of the current process. - - Return the number of logical CPUs usable by the calling thread of the - current process. Return None if indeterminable. - """ - return len(sched_getaffinity(0)) -else: - # Just an alias to cpu_count() (same docstring) - process_cpu_count = cpu_count diff --git a/Python313_13_x86_Template/Lib/pathlib/__init__.py b/Python313_13_x86_Template/Lib/pathlib/__init__.py deleted file mode 100644 index 4b3edf53..00000000 --- a/Python313_13_x86_Template/Lib/pathlib/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Object-oriented filesystem paths. - -This module provides classes to represent abstract paths and concrete -paths with operations that have semantics appropriate for different -operating systems. -""" - -from ._abc import * -from ._local import * - -__all__ = (_abc.__all__ + - _local.__all__) diff --git a/Python313_13_x86_Template/Lib/pathlib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/pathlib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 3899d87a..00000000 Binary files a/Python313_13_x86_Template/Lib/pathlib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/pathlib/__pycache__/_abc.cpython-313.pyc b/Python313_13_x86_Template/Lib/pathlib/__pycache__/_abc.cpython-313.pyc deleted file mode 100644 index 15d4d12c..00000000 Binary files a/Python313_13_x86_Template/Lib/pathlib/__pycache__/_abc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/pathlib/__pycache__/_local.cpython-313.pyc b/Python313_13_x86_Template/Lib/pathlib/__pycache__/_local.cpython-313.pyc deleted file mode 100644 index 45321ad1..00000000 Binary files a/Python313_13_x86_Template/Lib/pathlib/__pycache__/_local.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/pathlib/_abc.py b/Python313_13_x86_Template/Lib/pathlib/_abc.py deleted file mode 100644 index 4d24146a..00000000 --- a/Python313_13_x86_Template/Lib/pathlib/_abc.py +++ /dev/null @@ -1,930 +0,0 @@ -""" -Abstract base classes for rich path objects. - -This module is published as a PyPI package called "pathlib-abc". - -This module is also a *PRIVATE* part of the Python standard library, where -it's developed alongside pathlib. If it finds success and maturity as a PyPI -package, it could become a public part of the standard library. - -Two base classes are defined here -- PurePathBase and PathBase -- that -resemble pathlib's PurePath and Path respectively. -""" - -import functools -from glob import _Globber, _no_recurse_symlinks -from errno import ENOENT, ENOTDIR, EBADF, ELOOP, EINVAL -from stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO - - -__all__ = ["UnsupportedOperation"] - -# -# Internals -# - -_WINERROR_NOT_READY = 21 # drive exists but is not accessible -_WINERROR_INVALID_NAME = 123 # fix for bpo-35306 -_WINERROR_CANT_RESOLVE_FILENAME = 1921 # broken symlink pointing to itself - -# EBADF - guard against macOS `stat` throwing EBADF -_IGNORED_ERRNOS = (ENOENT, ENOTDIR, EBADF, ELOOP) - -_IGNORED_WINERRORS = ( - _WINERROR_NOT_READY, - _WINERROR_INVALID_NAME, - _WINERROR_CANT_RESOLVE_FILENAME) - -def _ignore_error(exception): - return (getattr(exception, 'errno', None) in _IGNORED_ERRNOS or - getattr(exception, 'winerror', None) in _IGNORED_WINERRORS) - - -@functools.cache -def _is_case_sensitive(parser): - return parser.normcase('Aa') == 'Aa' - - -class UnsupportedOperation(NotImplementedError): - """An exception that is raised when an unsupported operation is called on - a path object. - """ - pass - - -class ParserBase: - """Base class for path parsers, which do low-level path manipulation. - - Path parsers provide a subset of the os.path API, specifically those - functions needed to provide PurePathBase functionality. Each PurePathBase - subclass references its path parser via a 'parser' class attribute. - - Every method in this base class raises an UnsupportedOperation exception. - """ - - @classmethod - def _unsupported_msg(cls, attribute): - return f"{cls.__name__}.{attribute} is unsupported" - - @property - def sep(self): - """The character used to separate path components.""" - raise UnsupportedOperation(self._unsupported_msg('sep')) - - def join(self, path, *paths): - """Join path segments.""" - raise UnsupportedOperation(self._unsupported_msg('join()')) - - def split(self, path): - """Split the path into a pair (head, tail), where *head* is everything - before the final path separator, and *tail* is everything after. - Either part may be empty. - """ - raise UnsupportedOperation(self._unsupported_msg('split()')) - - def splitdrive(self, path): - """Split the path into a 2-item tuple (drive, tail), where *drive* is - a device name or mount point, and *tail* is everything after the - drive. Either part may be empty.""" - raise UnsupportedOperation(self._unsupported_msg('splitdrive()')) - - def normcase(self, path): - """Normalize the case of the path.""" - raise UnsupportedOperation(self._unsupported_msg('normcase()')) - - def isabs(self, path): - """Returns whether the path is absolute, i.e. unaffected by the - current directory or drive.""" - raise UnsupportedOperation(self._unsupported_msg('isabs()')) - - -class PurePathBase: - """Base class for pure path objects. - - This class *does not* provide several magic methods that are defined in - its subclass PurePath. They are: __fspath__, __bytes__, __reduce__, - __hash__, __eq__, __lt__, __le__, __gt__, __ge__. Its initializer and path - joining methods accept only strings, not os.PathLike objects more broadly. - """ - - __slots__ = ( - # The `_raw_path` slot store a joined string path. This is set in the - # `__init__()` method. - '_raw_path', - - # The '_resolving' slot stores a boolean indicating whether the path - # is being processed by `PathBase.resolve()`. This prevents duplicate - # work from occurring when `resolve()` calls `stat()` or `readlink()`. - '_resolving', - ) - parser = ParserBase() - _globber = _Globber - - def __init__(self, path, *paths): - self._raw_path = self.parser.join(path, *paths) if paths else path - if not isinstance(self._raw_path, str): - raise TypeError( - f"path should be a str, not {type(self._raw_path).__name__!r}") - self._resolving = False - - def with_segments(self, *pathsegments): - """Construct a new path object from any number of path-like objects. - Subclasses may override this method to customize how new path objects - are created from methods like `iterdir()`. - """ - return type(self)(*pathsegments) - - def __str__(self): - """Return the string representation of the path, suitable for - passing to system calls.""" - return self._raw_path - - def as_posix(self): - """Return the string representation of the path with forward (/) - slashes.""" - return str(self).replace(self.parser.sep, '/') - - @property - def drive(self): - """The drive prefix (letter or UNC path), if any.""" - return self.parser.splitdrive(self.anchor)[0] - - @property - def root(self): - """The root of the path, if any.""" - return self.parser.splitdrive(self.anchor)[1] - - @property - def anchor(self): - """The concatenation of the drive and root, or ''.""" - return self._stack[0] - - @property - def name(self): - """The final path component, if any.""" - return self.parser.split(self._raw_path)[1] - - @property - def suffix(self): - """ - The final component's last suffix, if any. - - This includes the leading period. For example: '.txt' - """ - name = self.name - i = name.rfind('.') - if 0 < i < len(name) - 1: - return name[i:] - else: - return '' - - @property - def suffixes(self): - """ - A list of the final component's suffixes, if any. - - These include the leading periods. For example: ['.tar', '.gz'] - """ - name = self.name - if name.endswith('.'): - return [] - name = name.lstrip('.') - return ['.' + suffix for suffix in name.split('.')[1:]] - - @property - def stem(self): - """The final path component, minus its last suffix.""" - name = self.name - i = name.rfind('.') - if 0 < i < len(name) - 1: - return name[:i] - else: - return name - - def with_name(self, name): - """Return a new path with the file name changed.""" - split = self.parser.split - if split(name)[0]: - raise ValueError(f"Invalid name {name!r}") - return self.with_segments(split(self._raw_path)[0], name) - - def with_stem(self, stem): - """Return a new path with the stem changed.""" - suffix = self.suffix - if not suffix: - return self.with_name(stem) - elif not stem: - # If the suffix is non-empty, we can't make the stem empty. - raise ValueError(f"{self!r} has a non-empty suffix") - else: - return self.with_name(stem + suffix) - - def with_suffix(self, suffix): - """Return a new path with the file suffix changed. If the path - has no suffix, add given suffix. If the given suffix is an empty - string, remove the suffix from the path. - """ - stem = self.stem - if not stem: - # If the stem is empty, we can't make the suffix non-empty. - raise ValueError(f"{self!r} has an empty name") - elif suffix and not (suffix.startswith('.') and len(suffix) > 1): - raise ValueError(f"Invalid suffix {suffix!r}") - else: - return self.with_name(stem + suffix) - - def relative_to(self, other, *, walk_up=False): - """Return the relative path to another path identified by the passed - arguments. If the operation is not possible (because this is not - related to the other path), raise ValueError. - - The *walk_up* parameter controls whether `..` may be used to resolve - the path. - """ - if not isinstance(other, PurePathBase): - other = self.with_segments(other) - anchor0, parts0 = self._stack - anchor1, parts1 = other._stack - if anchor0 != anchor1: - raise ValueError(f"{self._raw_path!r} and {other._raw_path!r} have different anchors") - while parts0 and parts1 and parts0[-1] == parts1[-1]: - parts0.pop() - parts1.pop() - for part in parts1: - if not part or part == '.': - pass - elif not walk_up: - raise ValueError(f"{self._raw_path!r} is not in the subpath of {other._raw_path!r}") - elif part == '..': - raise ValueError(f"'..' segment in {other._raw_path!r} cannot be walked") - else: - parts0.append('..') - return self.with_segments('', *reversed(parts0)) - - def is_relative_to(self, other): - """Return True if the path is relative to another path or False. - """ - if not isinstance(other, PurePathBase): - other = self.with_segments(other) - anchor0, parts0 = self._stack - anchor1, parts1 = other._stack - if anchor0 != anchor1: - return False - while parts0 and parts1 and parts0[-1] == parts1[-1]: - parts0.pop() - parts1.pop() - for part in parts1: - if part and part != '.': - return False - return True - - @property - def parts(self): - """An object providing sequence-like access to the - components in the filesystem path.""" - anchor, parts = self._stack - if anchor: - parts.append(anchor) - return tuple(reversed(parts)) - - def joinpath(self, *pathsegments): - """Combine this path with one or several arguments, and return a - new path representing either a subpath (if all arguments are relative - paths) or a totally different path (if one of the arguments is - anchored). - """ - return self.with_segments(self._raw_path, *pathsegments) - - def __truediv__(self, key): - try: - return self.with_segments(self._raw_path, key) - except TypeError: - return NotImplemented - - def __rtruediv__(self, key): - try: - return self.with_segments(key, self._raw_path) - except TypeError: - return NotImplemented - - @property - def _stack(self): - """ - Split the path into a 2-tuple (anchor, parts), where *anchor* is the - uppermost parent of the path (equivalent to path.parents[-1]), and - *parts* is a reversed list of parts following the anchor. - """ - split = self.parser.split - path = self._raw_path - parent, name = split(path) - names = [] - while path != parent: - names.append(name) - path = parent - parent, name = split(path) - return path, names - - @property - def parent(self): - """The logical parent of the path.""" - path = self._raw_path - parent = self.parser.split(path)[0] - if path != parent: - parent = self.with_segments(parent) - parent._resolving = self._resolving - return parent - return self - - @property - def parents(self): - """A sequence of this path's logical parents.""" - split = self.parser.split - path = self._raw_path - parent = split(path)[0] - parents = [] - while path != parent: - parents.append(self.with_segments(parent)) - path = parent - parent = split(path)[0] - return tuple(parents) - - def is_absolute(self): - """True if the path is absolute (has both a root and, if applicable, - a drive).""" - return self.parser.isabs(self._raw_path) - - @property - def _pattern_str(self): - """The path expressed as a string, for use in pattern-matching.""" - return str(self) - - def match(self, path_pattern, *, case_sensitive=None): - """ - Return True if this path matches the given pattern. If the pattern is - relative, matching is done from the right; otherwise, the entire path - is matched. The recursive wildcard '**' is *not* supported by this - method. - """ - if not isinstance(path_pattern, PurePathBase): - path_pattern = self.with_segments(path_pattern) - if case_sensitive is None: - case_sensitive = _is_case_sensitive(self.parser) - sep = path_pattern.parser.sep - path_parts = self.parts[::-1] - pattern_parts = path_pattern.parts[::-1] - if not pattern_parts: - raise ValueError("empty pattern") - if len(path_parts) < len(pattern_parts): - return False - if len(path_parts) > len(pattern_parts) and path_pattern.anchor: - return False - globber = self._globber(sep, case_sensitive) - for path_part, pattern_part in zip(path_parts, pattern_parts): - match = globber.compile(pattern_part) - if match(path_part) is None: - return False - return True - - def full_match(self, pattern, *, case_sensitive=None): - """ - Return True if this path matches the given glob-style pattern. The - pattern is matched against the entire path. - """ - if not isinstance(pattern, PurePathBase): - pattern = self.with_segments(pattern) - if case_sensitive is None: - case_sensitive = _is_case_sensitive(self.parser) - globber = self._globber(pattern.parser.sep, case_sensitive, recursive=True) - match = globber.compile(pattern._pattern_str) - return match(self._pattern_str) is not None - - - -class PathBase(PurePathBase): - """Base class for concrete path objects. - - This class provides dummy implementations for many methods that derived - classes can override selectively; the default implementations raise - UnsupportedOperation. The most basic methods, such as stat() and open(), - directly raise UnsupportedOperation; these basic methods are called by - other methods such as is_dir() and read_text(). - - The Path class derives this class to implement local filesystem paths. - Users may derive their own classes to implement virtual filesystem paths, - such as paths in archive files or on remote storage systems. - """ - __slots__ = () - - # Maximum number of symlinks to follow in resolve() - _max_symlinks = 40 - - @classmethod - def _unsupported_msg(cls, attribute): - return f"{cls.__name__}.{attribute} is unsupported" - - def stat(self, *, follow_symlinks=True): - """ - Return the result of the stat() system call on this path, like - os.stat() does. - """ - raise UnsupportedOperation(self._unsupported_msg('stat()')) - - def lstat(self): - """ - Like stat(), except if the path points to a symlink, the symlink's - status information is returned, rather than its target's. - """ - return self.stat(follow_symlinks=False) - - - # Convenience functions for querying the stat results - - def exists(self, *, follow_symlinks=True): - """ - Whether this path exists. - - This method normally follows symlinks; to check whether a symlink exists, - add the argument follow_symlinks=False. - """ - try: - self.stat(follow_symlinks=follow_symlinks) - except OSError as e: - if not _ignore_error(e): - raise - return False - except ValueError: - # Non-encodable path - return False - return True - - def is_dir(self, *, follow_symlinks=True): - """ - Whether this path is a directory. - """ - try: - return S_ISDIR(self.stat(follow_symlinks=follow_symlinks).st_mode) - except OSError as e: - if not _ignore_error(e): - raise - # Path doesn't exist or is a broken symlink - # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) - return False - except ValueError: - # Non-encodable path - return False - - def is_file(self, *, follow_symlinks=True): - """ - Whether this path is a regular file (also True for symlinks pointing - to regular files). - """ - try: - return S_ISREG(self.stat(follow_symlinks=follow_symlinks).st_mode) - except OSError as e: - if not _ignore_error(e): - raise - # Path doesn't exist or is a broken symlink - # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) - return False - except ValueError: - # Non-encodable path - return False - - def is_mount(self): - """ - Check if this path is a mount point - """ - # Need to exist and be a dir - if not self.exists() or not self.is_dir(): - return False - - try: - parent_dev = self.parent.stat().st_dev - except OSError: - return False - - dev = self.stat().st_dev - if dev != parent_dev: - return True - ino = self.stat().st_ino - parent_ino = self.parent.stat().st_ino - return ino == parent_ino - - def is_symlink(self): - """ - Whether this path is a symbolic link. - """ - try: - return S_ISLNK(self.lstat().st_mode) - except OSError as e: - if not _ignore_error(e): - raise - # Path doesn't exist - return False - except ValueError: - # Non-encodable path - return False - - def is_junction(self): - """ - Whether this path is a junction. - """ - # Junctions are a Windows-only feature, not present in POSIX nor the - # majority of virtual filesystems. There is no cross-platform idiom - # to check for junctions (using stat().st_mode). - return False - - def is_block_device(self): - """ - Whether this path is a block device. - """ - try: - return S_ISBLK(self.stat().st_mode) - except OSError as e: - if not _ignore_error(e): - raise - # Path doesn't exist or is a broken symlink - # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) - return False - except ValueError: - # Non-encodable path - return False - - def is_char_device(self): - """ - Whether this path is a character device. - """ - try: - return S_ISCHR(self.stat().st_mode) - except OSError as e: - if not _ignore_error(e): - raise - # Path doesn't exist or is a broken symlink - # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) - return False - except ValueError: - # Non-encodable path - return False - - def is_fifo(self): - """ - Whether this path is a FIFO. - """ - try: - return S_ISFIFO(self.stat().st_mode) - except OSError as e: - if not _ignore_error(e): - raise - # Path doesn't exist or is a broken symlink - # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) - return False - except ValueError: - # Non-encodable path - return False - - def is_socket(self): - """ - Whether this path is a socket. - """ - try: - return S_ISSOCK(self.stat().st_mode) - except OSError as e: - if not _ignore_error(e): - raise - # Path doesn't exist or is a broken symlink - # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) - return False - except ValueError: - # Non-encodable path - return False - - def samefile(self, other_path): - """Return whether other_path is the same or not as this file - (as returned by os.path.samefile()). - """ - st = self.stat() - try: - other_st = other_path.stat() - except AttributeError: - other_st = self.with_segments(other_path).stat() - return (st.st_ino == other_st.st_ino and - st.st_dev == other_st.st_dev) - - def open(self, mode='r', buffering=-1, encoding=None, - errors=None, newline=None): - """ - Open the file pointed to by this path and return a file object, as - the built-in open() function does. - """ - raise UnsupportedOperation(self._unsupported_msg('open()')) - - def read_bytes(self): - """ - Open the file in bytes mode, read it, and close the file. - """ - with self.open(mode='rb') as f: - return f.read() - - def read_text(self, encoding=None, errors=None, newline=None): - """ - Open the file in text mode, read it, and close the file. - """ - with self.open(mode='r', encoding=encoding, errors=errors, newline=newline) as f: - return f.read() - - def write_bytes(self, data): - """ - Open the file in bytes mode, write to it, and close the file. - """ - # type-check for the buffer interface before truncating the file - view = memoryview(data) - with self.open(mode='wb') as f: - return f.write(view) - - def write_text(self, data, encoding=None, errors=None, newline=None): - """ - Open the file in text mode, write to it, and close the file. - """ - if not isinstance(data, str): - raise TypeError('data must be str, not %s' % - data.__class__.__name__) - with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f: - return f.write(data) - - def iterdir(self): - """Yield path objects of the directory contents. - - The children are yielded in arbitrary order, and the - special entries '.' and '..' are not included. - """ - raise UnsupportedOperation(self._unsupported_msg('iterdir()')) - - def _glob_selector(self, parts, case_sensitive, recurse_symlinks): - if case_sensitive is None: - case_sensitive = _is_case_sensitive(self.parser) - case_pedantic = False - else: - # The user has expressed a case sensitivity choice, but we don't - # know the case sensitivity of the underlying filesystem, so we - # must use scandir() for everything, including non-wildcard parts. - case_pedantic = True - recursive = True if recurse_symlinks else _no_recurse_symlinks - globber = self._globber(self.parser.sep, case_sensitive, case_pedantic, recursive) - return globber.selector(parts) - - def glob(self, pattern, *, case_sensitive=None, recurse_symlinks=True): - """Iterate over this subtree and yield all existing files (of any - kind, including directories) matching the given relative pattern. - """ - if not isinstance(pattern, PurePathBase): - pattern = self.with_segments(pattern) - anchor, parts = pattern._stack - if anchor: - raise NotImplementedError("Non-relative patterns are unsupported") - select = self._glob_selector(parts, case_sensitive, recurse_symlinks) - return select(self) - - def rglob(self, pattern, *, case_sensitive=None, recurse_symlinks=True): - """Recursively yield all existing files (of any kind, including - directories) matching the given relative pattern, anywhere in - this subtree. - """ - if not isinstance(pattern, PurePathBase): - pattern = self.with_segments(pattern) - pattern = '**' / pattern - return self.glob(pattern, case_sensitive=case_sensitive, recurse_symlinks=recurse_symlinks) - - def walk(self, top_down=True, on_error=None, follow_symlinks=False): - """Walk the directory tree from this directory, similar to os.walk().""" - paths = [self] - while paths: - path = paths.pop() - if isinstance(path, tuple): - yield path - continue - dirnames = [] - filenames = [] - if not top_down: - paths.append((path, dirnames, filenames)) - try: - for child in path.iterdir(): - try: - if child.is_dir(follow_symlinks=follow_symlinks): - if not top_down: - paths.append(child) - dirnames.append(child.name) - else: - filenames.append(child.name) - except OSError: - filenames.append(child.name) - except OSError as error: - if on_error is not None: - on_error(error) - if not top_down: - while not isinstance(paths.pop(), tuple): - pass - continue - if top_down: - yield path, dirnames, filenames - paths += [path.joinpath(d) for d in reversed(dirnames)] - - def absolute(self): - """Return an absolute version of this path - No normalization or symlink resolution is performed. - - Use resolve() to resolve symlinks and remove '..' segments. - """ - raise UnsupportedOperation(self._unsupported_msg('absolute()')) - - @classmethod - def cwd(cls): - """Return a new path pointing to the current working directory.""" - # We call 'absolute()' rather than using 'os.getcwd()' directly to - # enable users to replace the implementation of 'absolute()' in a - # subclass and benefit from the new behaviour here. This works because - # os.path.abspath('.') == os.getcwd(). - return cls('').absolute() - - def expanduser(self): - """ Return a new path with expanded ~ and ~user constructs - (as returned by os.path.expanduser) - """ - raise UnsupportedOperation(self._unsupported_msg('expanduser()')) - - @classmethod - def home(cls): - """Return a new path pointing to expanduser('~'). - """ - return cls("~").expanduser() - - def readlink(self): - """ - Return the path to which the symbolic link points. - """ - raise UnsupportedOperation(self._unsupported_msg('readlink()')) - readlink._supported = False - - def resolve(self, strict=False): - """ - Make the path absolute, resolving all symlinks on the way and also - normalizing it. - """ - if self._resolving: - return self - path_root, parts = self._stack - path = self.with_segments(path_root) - try: - path = path.absolute() - except UnsupportedOperation: - path_tail = [] - else: - path_root, path_tail = path._stack - path_tail.reverse() - - # If the user has *not* overridden the `readlink()` method, then symlinks are unsupported - # and (in non-strict mode) we can improve performance by not calling `stat()`. - querying = strict or getattr(self.readlink, '_supported', True) - link_count = 0 - while parts: - part = parts.pop() - if not part or part == '.': - continue - if part == '..': - if not path_tail: - if path_root: - # Delete '..' segment immediately following root - continue - elif path_tail[-1] != '..': - # Delete '..' segment and its predecessor - path_tail.pop() - continue - path_tail.append(part) - if querying and part != '..': - path = self.with_segments(path_root + self.parser.sep.join(path_tail)) - path._resolving = True - try: - st = path.stat(follow_symlinks=False) - if S_ISLNK(st.st_mode): - # Like Linux and macOS, raise OSError(errno.ELOOP) if too many symlinks are - # encountered during resolution. - link_count += 1 - if link_count >= self._max_symlinks: - raise OSError(ELOOP, "Too many symbolic links in path", self._raw_path) - target_root, target_parts = path.readlink()._stack - # If the symlink target is absolute (like '/etc/hosts'), set the current - # path to its uppermost parent (like '/'). - if target_root: - path_root = target_root - path_tail.clear() - else: - path_tail.pop() - # Add the symlink target's reversed tail parts (like ['hosts', 'etc']) to - # the stack of unresolved path parts. - parts.extend(target_parts) - continue - elif parts and not S_ISDIR(st.st_mode): - raise NotADirectoryError(ENOTDIR, "Not a directory", self._raw_path) - except OSError: - if strict: - raise - else: - querying = False - return self.with_segments(path_root + self.parser.sep.join(path_tail)) - - def symlink_to(self, target, target_is_directory=False): - """ - Make this path a symlink pointing to the target path. - Note the order of arguments (link, target) is the reverse of os.symlink. - """ - raise UnsupportedOperation(self._unsupported_msg('symlink_to()')) - - def hardlink_to(self, target): - """ - Make this path a hard link pointing to the same file as *target*. - - Note the order of arguments (self, target) is the reverse of os.link's. - """ - raise UnsupportedOperation(self._unsupported_msg('hardlink_to()')) - - def touch(self, mode=0o666, exist_ok=True): - """ - Create this file with the given access mode, if it doesn't exist. - """ - raise UnsupportedOperation(self._unsupported_msg('touch()')) - - def mkdir(self, mode=0o777, parents=False, exist_ok=False): - """ - Create a new directory at this given path. - """ - raise UnsupportedOperation(self._unsupported_msg('mkdir()')) - - def rename(self, target): - """ - Rename this path to the target path. - - The target path may be absolute or relative. Relative paths are - interpreted relative to the current working directory, *not* the - directory of the Path object. - - Returns the new Path instance pointing to the target path. - """ - raise UnsupportedOperation(self._unsupported_msg('rename()')) - - def replace(self, target): - """ - Rename this path to the target path, overwriting if that path exists. - - The target path may be absolute or relative. Relative paths are - interpreted relative to the current working directory, *not* the - directory of the Path object. - - Returns the new Path instance pointing to the target path. - """ - raise UnsupportedOperation(self._unsupported_msg('replace()')) - - def chmod(self, mode, *, follow_symlinks=True): - """ - Change the permissions of the path, like os.chmod(). - """ - raise UnsupportedOperation(self._unsupported_msg('chmod()')) - - def lchmod(self, mode): - """ - Like chmod(), except if the path points to a symlink, the symlink's - permissions are changed, rather than its target's. - """ - self.chmod(mode, follow_symlinks=False) - - def unlink(self, missing_ok=False): - """ - Remove this file or link. - If the path is a directory, use rmdir() instead. - """ - raise UnsupportedOperation(self._unsupported_msg('unlink()')) - - def rmdir(self): - """ - Remove this directory. The directory must be empty. - """ - raise UnsupportedOperation(self._unsupported_msg('rmdir()')) - - def owner(self, *, follow_symlinks=True): - """ - Return the login name of the file owner. - """ - raise UnsupportedOperation(self._unsupported_msg('owner()')) - - def group(self, *, follow_symlinks=True): - """ - Return the group name of the file gid. - """ - raise UnsupportedOperation(self._unsupported_msg('group()')) - - @classmethod - def from_uri(cls, uri): - """Return a new path from the given 'file' URI.""" - raise UnsupportedOperation(cls._unsupported_msg('from_uri()')) - - def as_uri(self): - """Return the path as a URI.""" - raise UnsupportedOperation(self._unsupported_msg('as_uri()')) diff --git a/Python313_13_x86_Template/Lib/pathlib/_local.py b/Python313_13_x86_Template/Lib/pathlib/_local.py deleted file mode 100644 index 0188e7c7..00000000 --- a/Python313_13_x86_Template/Lib/pathlib/_local.py +++ /dev/null @@ -1,861 +0,0 @@ -import io -import ntpath -import operator -import os -import posixpath -import sys -import warnings -from glob import _StringGlobber -from itertools import chain -from _collections_abc import Sequence - -try: - import pwd -except ImportError: - pwd = None -try: - import grp -except ImportError: - grp = None - -from ._abc import UnsupportedOperation, PurePathBase, PathBase - - -__all__ = [ - "PurePath", "PurePosixPath", "PureWindowsPath", - "Path", "PosixPath", "WindowsPath", - ] - - -class _PathParents(Sequence): - """This object provides sequence-like access to the logical ancestors - of a path. Don't try to construct it yourself.""" - __slots__ = ('_path', '_drv', '_root', '_tail') - - def __init__(self, path): - self._path = path - self._drv = path.drive - self._root = path.root - self._tail = path._tail - - def __len__(self): - return len(self._tail) - - def __getitem__(self, idx): - if isinstance(idx, slice): - return tuple(self[i] for i in range(*idx.indices(len(self)))) - - if idx >= len(self) or idx < -len(self): - raise IndexError(idx) - if idx < 0: - idx += len(self) - return self._path._from_parsed_parts(self._drv, self._root, - self._tail[:-idx - 1]) - - def __repr__(self): - return "<{}.parents>".format(type(self._path).__name__) - - -class PurePath(PurePathBase): - """Base class for manipulating paths without I/O. - - PurePath represents a filesystem path and offers operations which - don't imply any actual filesystem I/O. Depending on your system, - instantiating a PurePath will return either a PurePosixPath or a - PureWindowsPath object. You can also instantiate either of these classes - directly, regardless of your system. - """ - - __slots__ = ( - # The `_raw_paths` slot stores unnormalized string paths. This is set - # in the `__init__()` method. - '_raw_paths', - - # The `_drv`, `_root` and `_tail_cached` slots store parsed and - # normalized parts of the path. They are set when any of the `drive`, - # `root` or `_tail` properties are accessed for the first time. The - # three-part division corresponds to the result of - # `os.path.splitroot()`, except that the tail is further split on path - # separators (i.e. it is a list of strings), and that the root and - # tail are normalized. - '_drv', '_root', '_tail_cached', - - # The `_str` slot stores the string representation of the path, - # computed from the drive, root and tail when `__str__()` is called - # for the first time. It's used to implement `_str_normcase` - '_str', - - # The `_str_normcase_cached` slot stores the string path with - # normalized case. It is set when the `_str_normcase` property is - # accessed for the first time. It's used to implement `__eq__()` - # `__hash__()`, and `_parts_normcase` - '_str_normcase_cached', - - # The `_parts_normcase_cached` slot stores the case-normalized - # string path after splitting on path separators. It's set when the - # `_parts_normcase` property is accessed for the first time. It's used - # to implement comparison methods like `__lt__()`. - '_parts_normcase_cached', - - # The `_hash` slot stores the hash of the case-normalized string - # path. It's set when `__hash__()` is called for the first time. - '_hash', - ) - parser = os.path - _globber = _StringGlobber - - def __new__(cls, *args, **kwargs): - """Construct a PurePath from one or several strings and or existing - PurePath objects. The strings and path objects are combined so as - to yield a canonicalized path, which is incorporated into the - new PurePath object. - """ - if cls is PurePath: - cls = PureWindowsPath if os.name == 'nt' else PurePosixPath - return object.__new__(cls) - - def __init__(self, *args): - paths = [] - for arg in args: - if isinstance(arg, PurePath): - if arg.parser is not self.parser: - # GH-103631: Convert separators for backwards compatibility. - paths.append(arg.as_posix()) - else: - paths.extend(arg._raw_paths) - else: - try: - path = os.fspath(arg) - except TypeError: - path = arg - if not isinstance(path, str): - raise TypeError( - "argument should be a str or an os.PathLike " - "object where __fspath__ returns a str, " - f"not {type(path).__name__!r}") - paths.append(path) - # Avoid calling super().__init__, as an optimisation - self._raw_paths = paths - - def joinpath(self, *pathsegments): - """Combine this path with one or several arguments, and return a - new path representing either a subpath (if all arguments are relative - paths) or a totally different path (if one of the arguments is - anchored). - """ - return self.with_segments(self, *pathsegments) - - def __truediv__(self, key): - try: - return self.with_segments(self, key) - except TypeError: - return NotImplemented - - def __rtruediv__(self, key): - try: - return self.with_segments(key, self) - except TypeError: - return NotImplemented - - def __reduce__(self): - return self.__class__, tuple(self._raw_paths) - - def __repr__(self): - return "{}({!r})".format(self.__class__.__name__, self.as_posix()) - - def __fspath__(self): - return str(self) - - def __bytes__(self): - """Return the bytes representation of the path. This is only - recommended to use under Unix.""" - return os.fsencode(self) - - @property - def _str_normcase(self): - # String with normalized case, for hashing and equality checks - try: - return self._str_normcase_cached - except AttributeError: - if self.parser is posixpath: - self._str_normcase_cached = str(self) - else: - self._str_normcase_cached = str(self).lower() - return self._str_normcase_cached - - def __hash__(self): - try: - return self._hash - except AttributeError: - self._hash = hash(self._str_normcase) - return self._hash - - def __eq__(self, other): - if not isinstance(other, PurePath): - return NotImplemented - return self._str_normcase == other._str_normcase and self.parser is other.parser - - @property - def _parts_normcase(self): - # Cached parts with normalized case, for comparisons. - try: - return self._parts_normcase_cached - except AttributeError: - self._parts_normcase_cached = self._str_normcase.split(self.parser.sep) - return self._parts_normcase_cached - - def __lt__(self, other): - if not isinstance(other, PurePath) or self.parser is not other.parser: - return NotImplemented - return self._parts_normcase < other._parts_normcase - - def __le__(self, other): - if not isinstance(other, PurePath) or self.parser is not other.parser: - return NotImplemented - return self._parts_normcase <= other._parts_normcase - - def __gt__(self, other): - if not isinstance(other, PurePath) or self.parser is not other.parser: - return NotImplemented - return self._parts_normcase > other._parts_normcase - - def __ge__(self, other): - if not isinstance(other, PurePath) or self.parser is not other.parser: - return NotImplemented - return self._parts_normcase >= other._parts_normcase - - def __str__(self): - """Return the string representation of the path, suitable for - passing to system calls.""" - try: - return self._str - except AttributeError: - self._str = self._format_parsed_parts(self.drive, self.root, - self._tail) or '.' - return self._str - - @classmethod - def _format_parsed_parts(cls, drv, root, tail): - if drv or root: - return drv + root + cls.parser.sep.join(tail) - elif tail and cls.parser.splitdrive(tail[0])[0]: - tail = ['.'] + tail - return cls.parser.sep.join(tail) - - def _from_parsed_parts(self, drv, root, tail): - path = self._from_parsed_string(self._format_parsed_parts(drv, root, tail)) - path._drv = drv - path._root = root - path._tail_cached = tail - return path - - def _from_parsed_string(self, path_str): - path = self.with_segments(path_str) - path._str = path_str or '.' - return path - - @classmethod - def _parse_path(cls, path): - if not path: - return '', '', [] - sep = cls.parser.sep - altsep = cls.parser.altsep - if altsep: - path = path.replace(altsep, sep) - drv, root, rel = cls.parser.splitroot(path) - if not root and drv.startswith(sep) and not drv.endswith(sep): - drv_parts = drv.split(sep) - if len(drv_parts) == 4 and drv_parts[2] not in '?.': - # e.g. //server/share - root = sep - elif len(drv_parts) == 6: - # e.g. //?/unc/server/share - root = sep - parsed = [sys.intern(str(x)) for x in rel.split(sep) if x and x != '.'] - return drv, root, parsed - - @property - def _raw_path(self): - """The joined but unnormalized path.""" - paths = self._raw_paths - if len(paths) == 0: - path = '' - elif len(paths) == 1: - path = paths[0] - else: - path = self.parser.join(*paths) - return path - - @property - def drive(self): - """The drive prefix (letter or UNC path), if any.""" - try: - return self._drv - except AttributeError: - self._drv, self._root, self._tail_cached = self._parse_path(self._raw_path) - return self._drv - - @property - def root(self): - """The root of the path, if any.""" - try: - return self._root - except AttributeError: - self._drv, self._root, self._tail_cached = self._parse_path(self._raw_path) - return self._root - - @property - def _tail(self): - try: - return self._tail_cached - except AttributeError: - self._drv, self._root, self._tail_cached = self._parse_path(self._raw_path) - return self._tail_cached - - @property - def anchor(self): - """The concatenation of the drive and root, or ''.""" - return self.drive + self.root - - @property - def parts(self): - """An object providing sequence-like access to the - components in the filesystem path.""" - if self.drive or self.root: - return (self.drive + self.root,) + tuple(self._tail) - else: - return tuple(self._tail) - - @property - def parent(self): - """The logical parent of the path.""" - drv = self.drive - root = self.root - tail = self._tail - if not tail: - return self - return self._from_parsed_parts(drv, root, tail[:-1]) - - @property - def parents(self): - """A sequence of this path's logical parents.""" - # The value of this property should not be cached on the path object, - # as doing so would introduce a reference cycle. - return _PathParents(self) - - @property - def name(self): - """The final path component, if any.""" - tail = self._tail - if not tail: - return '' - return tail[-1] - - def with_name(self, name): - """Return a new path with the file name changed.""" - p = self.parser - if not name or p.sep in name or (p.altsep and p.altsep in name) or name == '.': - raise ValueError(f"Invalid name {name!r}") - tail = self._tail.copy() - if not tail: - raise ValueError(f"{self!r} has an empty name") - tail[-1] = name - return self._from_parsed_parts(self.drive, self.root, tail) - - def relative_to(self, other, /, *_deprecated, walk_up=False): - """Return the relative path to another path identified by the passed - arguments. If the operation is not possible (because this is not - related to the other path), raise ValueError. - - The *walk_up* parameter controls whether `..` may be used to resolve - the path. - """ - if _deprecated: - msg = ("support for supplying more than one positional argument " - "to pathlib.PurePath.relative_to() is deprecated and " - "scheduled for removal in Python 3.14") - warnings.warn(msg, DeprecationWarning, stacklevel=2) - other = self.with_segments(other, *_deprecated) - elif not isinstance(other, PurePath): - other = self.with_segments(other) - for step, path in enumerate(chain([other], other.parents)): - if path == self or path in self.parents: - break - elif not walk_up: - raise ValueError(f"{str(self)!r} is not in the subpath of {str(other)!r}") - elif path.name == '..': - raise ValueError(f"'..' segment in {str(other)!r} cannot be walked") - else: - raise ValueError(f"{str(self)!r} and {str(other)!r} have different anchors") - parts = ['..'] * step + self._tail[len(path._tail):] - return self._from_parsed_parts('', '', parts) - - def is_relative_to(self, other, /, *_deprecated): - """Return True if the path is relative to another path or False. - """ - if _deprecated: - msg = ("support for supplying more than one argument to " - "pathlib.PurePath.is_relative_to() is deprecated and " - "scheduled for removal in Python 3.14") - warnings.warn(msg, DeprecationWarning, stacklevel=2) - other = self.with_segments(other, *_deprecated) - elif not isinstance(other, PurePath): - other = self.with_segments(other) - return other == self or other in self.parents - - def is_absolute(self): - """True if the path is absolute (has both a root and, if applicable, - a drive).""" - if self.parser is posixpath: - # Optimization: work with raw paths on POSIX. - for path in self._raw_paths: - if path.startswith('/'): - return True - return False - return self.parser.isabs(self) - - def is_reserved(self): - """Return True if the path contains one of the special names reserved - by the system, if any.""" - msg = ("pathlib.PurePath.is_reserved() is deprecated and scheduled " - "for removal in Python 3.15. Use os.path.isreserved() to " - "detect reserved paths on Windows.") - warnings.warn(msg, DeprecationWarning, stacklevel=2) - if self.parser is ntpath: - return self.parser.isreserved(self) - return False - - def as_uri(self): - """Return the path as a URI.""" - if not self.is_absolute(): - raise ValueError("relative path can't be expressed as a file URI") - - drive = self.drive - if len(drive) == 2 and drive[1] == ':': - # It's a path on a local drive => 'file:///c:/a/b' - prefix = 'file:///' + drive - path = self.as_posix()[2:] - elif drive: - # It's a path on a network drive => 'file://host/share/a/b' - prefix = 'file:' - path = self.as_posix() - else: - # It's a posix path => 'file:///etc/hosts' - prefix = 'file://' - path = str(self) - from urllib.parse import quote_from_bytes - return prefix + quote_from_bytes(os.fsencode(path)) - - @property - def _pattern_str(self): - """The path expressed as a string, for use in pattern-matching.""" - # The string representation of an empty path is a single dot ('.'). Empty - # paths shouldn't match wildcards, so we change it to the empty string. - path_str = str(self) - return '' if path_str == '.' else path_str - -# Subclassing os.PathLike makes isinstance() checks slower, -# which in turn makes Path construction slower. Register instead! -os.PathLike.register(PurePath) - - -class PurePosixPath(PurePath): - """PurePath subclass for non-Windows systems. - - On a POSIX system, instantiating a PurePath should return this object. - However, you can also instantiate it directly on any system. - """ - parser = posixpath - __slots__ = () - - -class PureWindowsPath(PurePath): - """PurePath subclass for Windows systems. - - On a Windows system, instantiating a PurePath should return this object. - However, you can also instantiate it directly on any system. - """ - parser = ntpath - __slots__ = () - - -class Path(PathBase, PurePath): - """PurePath subclass that can make system calls. - - Path represents a filesystem path but unlike PurePath, also offers - methods to do system calls on path objects. Depending on your system, - instantiating a Path will return either a PosixPath or a WindowsPath - object. You can also instantiate a PosixPath or WindowsPath directly, - but cannot instantiate a WindowsPath on a POSIX system or vice versa. - """ - __slots__ = () - as_uri = PurePath.as_uri - - @classmethod - def _unsupported_msg(cls, attribute): - return f"{cls.__name__}.{attribute} is unsupported on this system" - - def __init__(self, *args, **kwargs): - if kwargs: - msg = ("support for supplying keyword arguments to pathlib.PurePath " - "is deprecated and scheduled for removal in Python {remove}") - warnings._deprecated("pathlib.PurePath(**kwargs)", msg, remove=(3, 14)) - super().__init__(*args) - - def __new__(cls, *args, **kwargs): - if cls is Path: - cls = WindowsPath if os.name == 'nt' else PosixPath - return object.__new__(cls) - - def stat(self, *, follow_symlinks=True): - """ - Return the result of the stat() system call on this path, like - os.stat() does. - """ - return os.stat(self, follow_symlinks=follow_symlinks) - - def is_mount(self): - """ - Check if this path is a mount point - """ - return os.path.ismount(self) - - def is_junction(self): - """ - Whether this path is a junction. - """ - return os.path.isjunction(self) - - def open(self, mode='r', buffering=-1, encoding=None, - errors=None, newline=None): - """ - Open the file pointed to by this path and return a file object, as - the built-in open() function does. - """ - if "b" not in mode: - encoding = io.text_encoding(encoding) - return io.open(self, mode, buffering, encoding, errors, newline) - - def read_text(self, encoding=None, errors=None, newline=None): - """ - Open the file in text mode, read it, and close the file. - """ - # Call io.text_encoding() here to ensure any warning is raised at an - # appropriate stack level. - encoding = io.text_encoding(encoding) - return PathBase.read_text(self, encoding, errors, newline) - - def write_text(self, data, encoding=None, errors=None, newline=None): - """ - Open the file in text mode, write to it, and close the file. - """ - # Call io.text_encoding() here to ensure any warning is raised at an - # appropriate stack level. - encoding = io.text_encoding(encoding) - return PathBase.write_text(self, data, encoding, errors, newline) - - _remove_leading_dot = operator.itemgetter(slice(2, None)) - _remove_trailing_slash = operator.itemgetter(slice(-1)) - - def _filter_trailing_slash(self, paths): - sep = self.parser.sep - anchor_len = len(self.anchor) - for path_str in paths: - if len(path_str) > anchor_len and path_str[-1] == sep: - path_str = path_str[:-1] - yield path_str - - def iterdir(self): - """Yield path objects of the directory contents. - - The children are yielded in arbitrary order, and the - special entries '.' and '..' are not included. - """ - root_dir = str(self) - with os.scandir(root_dir) as scandir_it: - paths = [entry.path for entry in scandir_it] - if root_dir == '.': - paths = map(self._remove_leading_dot, paths) - return map(self._from_parsed_string, paths) - - def glob(self, pattern, *, case_sensitive=None, recurse_symlinks=False): - """Iterate over this subtree and yield all existing files (of any - kind, including directories) matching the given relative pattern. - """ - sys.audit("pathlib.Path.glob", self, pattern) - if not isinstance(pattern, PurePath): - pattern = self.with_segments(pattern) - if pattern.anchor: - raise NotImplementedError("Non-relative patterns are unsupported") - parts = pattern._tail.copy() - if not parts: - raise ValueError("Unacceptable pattern: {!r}".format(pattern)) - raw = pattern._raw_path - if raw[-1] in (self.parser.sep, self.parser.altsep): - # GH-65238: pathlib doesn't preserve trailing slash. Add it back. - parts.append('') - select = self._glob_selector(parts[::-1], case_sensitive, recurse_symlinks) - root = str(self) - paths = select(root) - - # Normalize results - if root == '.': - paths = map(self._remove_leading_dot, paths) - if parts[-1] == '': - paths = map(self._remove_trailing_slash, paths) - elif parts[-1] == '**': - paths = self._filter_trailing_slash(paths) - paths = map(self._from_parsed_string, paths) - return paths - - def rglob(self, pattern, *, case_sensitive=None, recurse_symlinks=False): - """Recursively yield all existing files (of any kind, including - directories) matching the given relative pattern, anywhere in - this subtree. - """ - sys.audit("pathlib.Path.rglob", self, pattern) - if not isinstance(pattern, PurePath): - pattern = self.with_segments(pattern) - pattern = '**' / pattern - return self.glob(pattern, case_sensitive=case_sensitive, recurse_symlinks=recurse_symlinks) - - def walk(self, top_down=True, on_error=None, follow_symlinks=False): - """Walk the directory tree from this directory, similar to os.walk().""" - sys.audit("pathlib.Path.walk", self, on_error, follow_symlinks) - root_dir = str(self) - if not follow_symlinks: - follow_symlinks = os._walk_symlinks_as_files - results = os.walk(root_dir, top_down, on_error, follow_symlinks) - for path_str, dirnames, filenames in results: - if root_dir == '.': - path_str = path_str[2:] - yield self._from_parsed_string(path_str), dirnames, filenames - - def absolute(self): - """Return an absolute version of this path - No normalization or symlink resolution is performed. - - Use resolve() to resolve symlinks and remove '..' segments. - """ - if self.is_absolute(): - return self - if self.root: - drive = os.path.splitroot(os.getcwd())[0] - return self._from_parsed_parts(drive, self.root, self._tail) - if self.drive: - # There is a CWD on each drive-letter drive. - cwd = os.path.abspath(self.drive) - else: - cwd = os.getcwd() - if not self._tail: - # Fast path for "empty" paths, e.g. Path("."), Path("") or Path(). - # We pass only one argument to with_segments() to avoid the cost - # of joining, and we exploit the fact that getcwd() returns a - # fully-normalized string by storing it in _str. This is used to - # implement Path.cwd(). - return self._from_parsed_string(cwd) - drive, root, rel = os.path.splitroot(cwd) - if not rel: - return self._from_parsed_parts(drive, root, self._tail) - tail = rel.split(self.parser.sep) - tail.extend(self._tail) - return self._from_parsed_parts(drive, root, tail) - - def resolve(self, strict=False): - """ - Make the path absolute, resolving all symlinks on the way and also - normalizing it. - """ - - return self.with_segments(os.path.realpath(self, strict=strict)) - - if pwd: - def owner(self, *, follow_symlinks=True): - """ - Return the login name of the file owner. - """ - uid = self.stat(follow_symlinks=follow_symlinks).st_uid - return pwd.getpwuid(uid).pw_name - - if grp: - def group(self, *, follow_symlinks=True): - """ - Return the group name of the file gid. - """ - gid = self.stat(follow_symlinks=follow_symlinks).st_gid - return grp.getgrgid(gid).gr_name - - if hasattr(os, "readlink"): - def readlink(self): - """ - Return the path to which the symbolic link points. - """ - return self.with_segments(os.readlink(self)) - - def touch(self, mode=0o666, exist_ok=True): - """ - Create this file with the given access mode, if it doesn't exist. - """ - - if exist_ok: - # First try to bump modification time - # Implementation note: GNU touch uses the UTIME_NOW option of - # the utimensat() / futimens() functions. - try: - os.utime(self, None) - except OSError: - # Avoid exception chaining - pass - else: - return - flags = os.O_CREAT | os.O_WRONLY - if not exist_ok: - flags |= os.O_EXCL - fd = os.open(self, flags, mode) - os.close(fd) - - def mkdir(self, mode=0o777, parents=False, exist_ok=False): - """ - Create a new directory at this given path. - """ - try: - os.mkdir(self, mode) - except FileNotFoundError: - if not parents or self.parent == self: - raise - self.parent.mkdir(parents=True, exist_ok=True) - self.mkdir(mode, parents=False, exist_ok=exist_ok) - except OSError: - # Cannot rely on checking for EEXIST, since the operating system - # could give priority to other errors like EACCES or EROFS - if not exist_ok or not self.is_dir(): - raise - - def chmod(self, mode, *, follow_symlinks=True): - """ - Change the permissions of the path, like os.chmod(). - """ - os.chmod(self, mode, follow_symlinks=follow_symlinks) - - def unlink(self, missing_ok=False): - """ - Remove this file or link. - If the path is a directory, use rmdir() instead. - """ - try: - os.unlink(self) - except FileNotFoundError: - if not missing_ok: - raise - - def rmdir(self): - """ - Remove this directory. The directory must be empty. - """ - os.rmdir(self) - - def rename(self, target): - """ - Rename this path to the target path. - - The target path may be absolute or relative. Relative paths are - interpreted relative to the current working directory, *not* the - directory of the Path object. - - Returns the new Path instance pointing to the target path. - """ - os.rename(self, target) - return self.with_segments(target) - - def replace(self, target): - """ - Rename this path to the target path, overwriting if that path exists. - - The target path may be absolute or relative. Relative paths are - interpreted relative to the current working directory, *not* the - directory of the Path object. - - Returns the new Path instance pointing to the target path. - """ - os.replace(self, target) - return self.with_segments(target) - - if hasattr(os, "symlink"): - def symlink_to(self, target, target_is_directory=False): - """ - Make this path a symlink pointing to the target path. - Note the order of arguments (link, target) is the reverse of os.symlink. - """ - os.symlink(target, self, target_is_directory) - - if hasattr(os, "link"): - def hardlink_to(self, target): - """ - Make this path a hard link pointing to the same file as *target*. - - Note the order of arguments (self, target) is the reverse of os.link's. - """ - os.link(target, self) - - def expanduser(self): - """ Return a new path with expanded ~ and ~user constructs - (as returned by os.path.expanduser) - """ - if (not (self.drive or self.root) and - self._tail and self._tail[0][:1] == '~'): - homedir = os.path.expanduser(self._tail[0]) - if homedir[:1] == "~": - raise RuntimeError("Could not determine home directory.") - drv, root, tail = self._parse_path(homedir) - return self._from_parsed_parts(drv, root, tail + self._tail[1:]) - - return self - - @classmethod - def from_uri(cls, uri): - """Return a new path from the given 'file' URI.""" - if not uri.startswith('file:'): - raise ValueError(f"URI does not start with 'file:': {uri!r}") - path = uri[5:] - if path[:3] == '///': - # Remove empty authority - path = path[2:] - elif path[:12] == '//localhost/': - # Remove 'localhost' authority - path = path[11:] - if path[:3] == '///' or (path[:1] == '/' and path[2:3] in ':|'): - # Remove slash before DOS device/UNC path - path = path[1:] - if path[1:2] == '|': - # Replace bar with colon in DOS drive - path = path[:1] + ':' + path[2:] - from urllib.parse import unquote_to_bytes - path = cls(os.fsdecode(unquote_to_bytes(path))) - if not path.is_absolute(): - raise ValueError(f"URI is not absolute: {uri!r}") - return path - - -class PosixPath(Path, PurePosixPath): - """Path subclass for non-Windows systems. - - On a POSIX system, instantiating a Path should return this object. - """ - __slots__ = () - - if os.name == 'nt': - def __new__(cls, *args, **kwargs): - raise UnsupportedOperation( - f"cannot instantiate {cls.__name__!r} on your system") - -class WindowsPath(Path, PureWindowsPath): - """Path subclass for Windows systems. - - On a Windows system, instantiating a Path should return this object. - """ - __slots__ = () - - if os.name != 'nt': - def __new__(cls, *args, **kwargs): - raise UnsupportedOperation( - f"cannot instantiate {cls.__name__!r} on your system") diff --git a/Python313_13_x86_Template/Lib/pdb.py b/Python313_13_x86_Template/Lib/pdb.py deleted file mode 100644 index 5c9be23e..00000000 --- a/Python313_13_x86_Template/Lib/pdb.py +++ /dev/null @@ -1,2550 +0,0 @@ -#! /usr/bin/env python3 - -""" -The Python Debugger Pdb -======================= - -To use the debugger in its simplest form: - - >>> import pdb - >>> pdb.run('') - -The debugger's prompt is '(Pdb) '. This will stop in the first -function call in . - -Alternatively, if a statement terminated with an unhandled exception, -you can use pdb's post-mortem facility to inspect the contents of the -traceback: - - >>> - - >>> import pdb - >>> pdb.pm() - -The commands recognized by the debugger are listed in the next -section. Most can be abbreviated as indicated; e.g., h(elp) means -that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel', -nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in -square brackets. Alternatives in the command syntax are separated -by a vertical bar (|). - -A blank line repeats the previous command literally, except for -'list', where it lists the next 11 lines. - -Commands that the debugger doesn't recognize are assumed to be Python -statements and are executed in the context of the program being -debugged. Python statements can also be prefixed with an exclamation -point ('!'). This is a powerful way to inspect the program being -debugged; it is even possible to change variables or call functions. -When an exception occurs in such a statement, the exception name is -printed but the debugger's state is not changed. - -The debugger supports aliases, which can save typing. And aliases can -have parameters (see the alias help entry) which allows one a certain -level of adaptability to the context under examination. - -Multiple commands may be entered on a single line, separated by the -pair ';;'. No intelligence is applied to separating the commands; the -input is split at the first ';;', even if it is in the middle of a -quoted string. - -If a file ".pdbrc" exists in your home directory or in the current -directory, it is read in and executed as if it had been typed at the -debugger prompt. This is particularly useful for aliases. If both -files exist, the one in the home directory is read first and aliases -defined there can be overridden by the local file. This behavior can be -disabled by passing the "readrc=False" argument to the Pdb constructor. - -Aside from aliases, the debugger is not directly programmable; but it -is implemented as a class from which you can derive your own debugger -class, which you can make as fancy as you like. - - -Debugger commands -================= - -""" -# NOTE: the actual command documentation is collected from docstrings of the -# commands and is appended to __doc__ after the class has been defined. - -import os -import io -import re -import sys -import cmd -import bdb -import dis -import code -import glob -import token -import types -import codeop -import pprint -import signal -import inspect -import textwrap -import tokenize -import itertools -import traceback -import linecache -import _colorize - -from contextlib import contextmanager -from types import CodeType - - -class Restart(Exception): - """Causes a debugger to be restarted for the debugged python program.""" - pass - -__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace", - "post_mortem", "help"] - - -def find_first_executable_line(code): - """ Try to find the first executable line of the code object. - - Equivalently, find the line number of the instruction that's - after RESUME - - Return code.co_firstlineno if no executable line is found. - """ - prev = None - for instr in dis.get_instructions(code): - if prev is not None and prev.opname == 'RESUME': - if instr.positions.lineno is not None: - return instr.positions.lineno - return code.co_firstlineno - prev = instr - return code.co_firstlineno - -def find_function(funcname, filename): - cre = re.compile(r'def\s+%s(\s*\[.+\])?\s*[(]' % re.escape(funcname)) - try: - fp = tokenize.open(filename) - except OSError: - lines = linecache.getlines(filename) - if not lines: - return None - fp = io.StringIO(''.join(lines)) - funcdef = "" - funcstart = None - # consumer of this info expects the first line to be 1 - with fp: - for lineno, line in enumerate(fp, start=1): - if cre.match(line): - funcstart, funcdef = lineno, line - elif funcdef: - funcdef += line - - if funcdef: - try: - code = compile(funcdef, filename, 'exec') - except SyntaxError: - continue - # We should always be able to find the code object here - funccode = next(c for c in code.co_consts if - isinstance(c, CodeType) and c.co_name == funcname) - lineno_offset = find_first_executable_line(funccode) - return funcname, filename, funcstart + lineno_offset - 1 - return None - -def lasti2lineno(code, lasti): - linestarts = list(dis.findlinestarts(code)) - linestarts.reverse() - for i, lineno in linestarts: - if lasti >= i: - return lineno - return 0 - - -class _rstr(str): - """String that doesn't quote its repr.""" - def __repr__(self): - return self - - -class _ExecutableTarget: - filename: str - code: CodeType | str - namespace: dict - - -class _ScriptTarget(_ExecutableTarget): - def __init__(self, target): - self._check(target) - self._target = self._safe_realpath(target) - - # If PYTHONSAFEPATH (-P) is not set, sys.path[0] is the directory - # of pdb, and we should replace it with the directory of the script - if not sys.flags.safe_path: - sys.path[0] = os.path.dirname(self._target) - - @staticmethod - def _check(target): - """ - Check that target is plausibly a script. - """ - if not os.path.exists(target): - print(f'Error: {target} does not exist') - sys.exit(1) - if os.path.isdir(target): - print(f'Error: {target} is a directory') - sys.exit(1) - - @staticmethod - def _safe_realpath(path): - """ - Return the canonical path (realpath) if it is accessible from the userspace. - Otherwise (for example, if the path is a symlink to an anonymous pipe), - return the original path. - - See GH-142315. - """ - realpath = os.path.realpath(path) - return realpath if os.path.exists(realpath) else path - - def __repr__(self): - return self._target - - @property - def filename(self): - return self._target - - @property - def code(self): - # Open the file each time because the file may be modified - with io.open_code(self._target) as fp: - return f"exec(compile({fp.read()!r}, {self._target!r}, 'exec'))" - - @property - def namespace(self): - return dict( - __name__='__main__', - __file__=self._target, - __builtins__=__builtins__, - __spec__=None, - ) - - -class _ModuleTarget(_ExecutableTarget): - def __init__(self, target): - self._target = target - - import runpy - try: - _, self._spec, self._code = runpy._get_module_details(self._target) - except ImportError as e: - print(f"ImportError: {e}") - sys.exit(1) - except Exception: - traceback.print_exc() - sys.exit(1) - - def __repr__(self): - return self._target - - @property - def filename(self): - return self._code.co_filename - - @property - def code(self): - return self._code - - @property - def namespace(self): - return dict( - __name__='__main__', - __file__=os.path.normcase(os.path.abspath(self.filename)), - __package__=self._spec.parent, - __loader__=self._spec.loader, - __spec__=self._spec, - __builtins__=__builtins__, - ) - - -class _ZipTarget(_ExecutableTarget): - def __init__(self, target): - import runpy - - self._target = os.path.realpath(target) - sys.path.insert(0, self._target) - try: - _, self._spec, self._code = runpy._get_main_module_details() - except ImportError as e: - print(f"ImportError: {e}") - sys.exit(1) - except Exception: - traceback.print_exc() - sys.exit(1) - - def __repr__(self): - return self._target - - @property - def filename(self): - return self._code.co_filename - - @property - def code(self): - return self._code - - @property - def namespace(self): - return dict( - __name__='__main__', - __file__=os.path.normcase(os.path.abspath(self.filename)), - __package__=self._spec.parent, - __loader__=self._spec.loader, - __spec__=self._spec, - __builtins__=__builtins__, - ) - - -class _PdbInteractiveConsole(code.InteractiveConsole): - def __init__(self, ns, message): - self._message = message - super().__init__(locals=ns, local_exit=True) - - def write(self, data): - self._message(data, end='') - - -# Interaction prompt line will separate file and call info from code -# text using value of line_prefix string. A newline and arrow may -# be to your liking. You can set it once pdb is imported using the -# command "pdb.line_prefix = '\n% '". -# line_prefix = ': ' # Use this to get the old situation back -line_prefix = '\n-> ' # Probably a better default - - - -class Pdb(bdb.Bdb, cmd.Cmd): - _previous_sigint_handler = None - - # Limit the maximum depth of chained exceptions, we should be handling cycles, - # but in case there are recursions, we stop at 999. - MAX_CHAINED_EXCEPTION_DEPTH = 999 - - _file_mtime_table = {} - - def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None, - nosigint=False, readrc=True): - bdb.Bdb.__init__(self, skip=skip) - cmd.Cmd.__init__(self, completekey, stdin, stdout) - sys.audit("pdb.Pdb") - if stdout: - self.use_rawinput = 0 - self.prompt = '(Pdb) ' - self.aliases = {} - self.displaying = {} - self.mainpyfile = '' - self._wait_for_mainpyfile = False - self.tb_lineno = {} - # Try to load readline if it exists - try: - import readline - # remove some common file name delimiters - readline.set_completer_delims(' \t\n`@#%^&*()=+[{]}\\|;:\'",<>?') - except ImportError: - pass - - self.allow_kbdint = False - self.nosigint = nosigint - # Consider these characters as part of the command so when the users type - # c.a or c['a'], it won't be recognized as a c(ontinue) command - self.identchars = cmd.Cmd.identchars + '=.[](),"\'+-*/%@&|<>~^' - - # Read ~/.pdbrc and ./.pdbrc - self.rcLines = [] - if readrc: - try: - with open(os.path.expanduser('~/.pdbrc'), encoding='utf-8') as rcFile: - self.rcLines.extend(rcFile) - except OSError: - pass - try: - with open(".pdbrc", encoding='utf-8') as rcFile: - self.rcLines.extend(rcFile) - except OSError: - pass - - self.commands = {} # associates a command list to breakpoint numbers - self.commands_doprompt = {} # for each bp num, tells if the prompt - # must be disp. after execing the cmd list - self.commands_silent = {} # for each bp num, tells if the stack trace - # must be disp. after execing the cmd list - self.commands_defining = False # True while in the process of defining - # a command list - self.commands_bnum = None # The breakpoint number for which we are - # defining a list - - self._chained_exceptions = tuple() - self._chained_exception_index = 0 - - def sigint_handler(self, signum, frame): - if self.allow_kbdint: - raise KeyboardInterrupt - self.message("\nProgram interrupted. (Use 'cont' to resume).") - self.set_step() - self.set_trace(frame) - - def reset(self): - bdb.Bdb.reset(self) - self.forget() - - def forget(self): - self.lineno = None - self.stack = [] - self.curindex = 0 - if hasattr(self, 'curframe') and self.curframe: - self.curframe.f_globals.pop('__pdb_convenience_variables', None) - self.curframe = None - self.curframe_locals = {} - self.tb_lineno.clear() - - def setup(self, f, tb): - self.forget() - self.stack, self.curindex = self.get_stack(f, tb) - while tb: - # when setting up post-mortem debugging with a traceback, save all - # the original line numbers to be displayed along the current line - # numbers (which can be different, e.g. due to finally clauses) - lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti) - self.tb_lineno[tb.tb_frame] = lineno - tb = tb.tb_next - self.curframe = self.stack[self.curindex][0] - # The f_locals dictionary used to be updated from the actual frame - # locals whenever the .f_locals accessor was called, so it was - # cached here to ensure that modifications were not overwritten. While - # the caching is no longer required now that f_locals is a direct proxy - # on optimized frames, it's also harmless, so the code structure has - # been left unchanged. - self.curframe_locals = self.curframe.f_locals - self.set_convenience_variable(self.curframe, '_frame', self.curframe) - - if self._chained_exceptions: - self.set_convenience_variable( - self.curframe, - '_exception', - self._chained_exceptions[self._chained_exception_index], - ) - - if self.rcLines: - self.cmdqueue = [ - line for line in self.rcLines - if line.strip() and not line.strip().startswith("#") - ] - self.rcLines = [] - - # Override Bdb methods - - def user_call(self, frame, argument_list): - """This method is called when there is the remote possibility - that we ever need to stop in this function.""" - if self._wait_for_mainpyfile: - return - if self.stop_here(frame): - self.message('--Call--') - self.interaction(frame, None) - - def user_line(self, frame): - """This function is called when we stop or break at this line.""" - if self._wait_for_mainpyfile: - if (self.mainpyfile != self.canonic(frame.f_code.co_filename)): - return - self._wait_for_mainpyfile = False - if self.trace_opcodes: - # GH-127321 - # We want to avoid stopping at an opcode that does not have - # an associated line number because pdb does not like it - if frame.f_lineno is None: - self.set_stepinstr() - return - if self.bp_commands(frame): - self.interaction(frame, None) - - user_opcode = user_line - - def bp_commands(self, frame): - """Call every command that was set for the current active breakpoint - (if there is one). - - Returns True if the normal interaction function must be called, - False otherwise.""" - # self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit - if getattr(self, "currentbp", False) and \ - self.currentbp in self.commands: - currentbp = self.currentbp - self.currentbp = 0 - lastcmd_back = self.lastcmd - self.setup(frame, None) - for line in self.commands[currentbp]: - self.onecmd(line) - self.lastcmd = lastcmd_back - if not self.commands_silent[currentbp]: - self.print_stack_entry(self.stack[self.curindex]) - if self.commands_doprompt[currentbp]: - self._cmdloop() - self.forget() - return - return 1 - - def user_return(self, frame, return_value): - """This function is called when a return trap is set here.""" - if self._wait_for_mainpyfile: - return - frame.f_locals['__return__'] = return_value - self.set_convenience_variable(frame, '_retval', return_value) - self.message('--Return--') - self.interaction(frame, None) - - def user_exception(self, frame, exc_info): - """This function is called if an exception occurs, - but only if we are to stop at or just below this level.""" - if self._wait_for_mainpyfile: - return - exc_type, exc_value, exc_traceback = exc_info - frame.f_locals['__exception__'] = exc_type, exc_value - self.set_convenience_variable(frame, '_exception', exc_value) - - # An 'Internal StopIteration' exception is an exception debug event - # issued by the interpreter when handling a subgenerator run with - # 'yield from' or a generator controlled by a for loop. No exception has - # actually occurred in this case. The debugger uses this debug event to - # stop when the debuggee is returning from such generators. - prefix = 'Internal ' if (not exc_traceback - and exc_type is StopIteration) else '' - self.message('%s%s' % (prefix, self._format_exc(exc_value))) - self.interaction(frame, exc_traceback) - - # General interaction function - def _cmdloop(self): - while True: - try: - # keyboard interrupts allow for an easy way to cancel - # the current command, so allow them during interactive input - self.allow_kbdint = True - self.cmdloop() - self.allow_kbdint = False - break - except KeyboardInterrupt: - self.message('--KeyboardInterrupt--') - - def _validate_file_mtime(self): - """Check if the source file of the current frame has been modified since - the last time we saw it. If so, give a warning.""" - try: - filename = self.curframe.f_code.co_filename - mtime = os.path.getmtime(filename) - except Exception: - return - if (filename in self._file_mtime_table and - mtime != self._file_mtime_table[filename]): - self.message(f"*** WARNING: file '{filename}' was edited, " - "running stale code until the program is rerun") - self._file_mtime_table[filename] = mtime - - # Called before loop, handles display expressions - # Set up convenience variable containers - def _show_display(self): - displaying = self.displaying.get(self.curframe) - if displaying: - for expr, oldvalue in displaying.items(): - newvalue = self._getval_except(expr) - # check for identity first; this prevents custom __eq__ to - # be called at every loop, and also prevents instances whose - # fields are changed to be displayed - if newvalue is not oldvalue and newvalue != oldvalue: - displaying[expr] = newvalue - self.message('display %s: %s [old: %s]' % - (expr, self._safe_repr(newvalue, expr), - self._safe_repr(oldvalue, expr))) - - def _get_tb_and_exceptions(self, tb_or_exc): - """ - Given a tracecack or an exception, return a tuple of chained exceptions - and current traceback to inspect. - - This will deal with selecting the right ``__cause__`` or ``__context__`` - as well as handling cycles, and return a flattened list of exceptions we - can jump to with do_exceptions. - - """ - _exceptions = [] - if isinstance(tb_or_exc, BaseException): - traceback, current = tb_or_exc.__traceback__, tb_or_exc - - while current is not None: - if current in _exceptions: - break - _exceptions.append(current) - if current.__cause__ is not None: - current = current.__cause__ - elif ( - current.__context__ is not None and not current.__suppress_context__ - ): - current = current.__context__ - - if len(_exceptions) >= self.MAX_CHAINED_EXCEPTION_DEPTH: - self.message( - f"More than {self.MAX_CHAINED_EXCEPTION_DEPTH}" - " chained exceptions found, not all exceptions" - "will be browsable with `exceptions`." - ) - break - else: - traceback = tb_or_exc - return tuple(reversed(_exceptions)), traceback - - @contextmanager - def _hold_exceptions(self, exceptions): - """ - Context manager to ensure proper cleaning of exceptions references - - When given a chained exception instead of a traceback, - pdb may hold references to many objects which may leak memory. - - We use this context manager to make sure everything is properly cleaned - - """ - try: - self._chained_exceptions = exceptions - self._chained_exception_index = len(exceptions) - 1 - yield - finally: - # we can't put those in forget as otherwise they would - # be cleared on exception change - self._chained_exceptions = tuple() - self._chained_exception_index = 0 - - def interaction(self, frame, tb_or_exc): - # Restore the previous signal handler at the Pdb prompt. - if Pdb._previous_sigint_handler: - try: - signal.signal(signal.SIGINT, Pdb._previous_sigint_handler) - except ValueError: # ValueError: signal only works in main thread - pass - else: - Pdb._previous_sigint_handler = None - - _chained_exceptions, tb = self._get_tb_and_exceptions(tb_or_exc) - if isinstance(tb_or_exc, BaseException): - assert tb is not None, "main exception must have a traceback" - with self._hold_exceptions(_chained_exceptions): - self.setup(frame, tb) - # We should print the stack entry if and only if the user input - # is expected, and we should print it right before the user input. - # We achieve this by appending _pdbcmd_print_frame_status to the - # command queue. If cmdqueue is not exausted, the user input is - # not expected and we will not print the stack entry. - self.cmdqueue.append('_pdbcmd_print_frame_status') - self._cmdloop() - # If _pdbcmd_print_frame_status is not used, pop it out - if self.cmdqueue and self.cmdqueue[-1] == '_pdbcmd_print_frame_status': - self.cmdqueue.pop() - self.forget() - - def displayhook(self, obj): - """Custom displayhook for the exec in default(), which prevents - assignment of the _ variable in the builtins. - """ - # reproduce the behavior of the standard displayhook, not printing None - if obj is not None: - self.message(repr(obj)) - - @contextmanager - def _disable_command_completion(self): - completenames = self.completenames - try: - self.completenames = self.completedefault - yield - finally: - self.completenames = completenames - return - - def _exec_in_closure(self, source, globals, locals): - """ Run source code in closure so code object created within source - can find variables in locals correctly - - returns True if the source is executed, False otherwise - """ - - # Determine if the source should be executed in closure. Only when the - # source compiled to multiple code objects, we should use this feature. - # Otherwise, we can just raise an exception and normal exec will be used. - - code = compile(source, "", "exec") - if not any(isinstance(const, CodeType) for const in code.co_consts): - return False - - # locals could be a proxy which does not support pop - # copy it first to avoid modifying the original locals - locals_copy = dict(locals) - - locals_copy["__pdb_eval__"] = { - "result": None, - "write_back": {} - } - - # If the source is an expression, we need to print its value - try: - compile(source, "", "eval") - except SyntaxError: - pass - else: - source = "__pdb_eval__['result'] = " + source - - # Add write-back to update the locals - source = ("try:\n" + - textwrap.indent(source, " ") + "\n" + - "finally:\n" + - " __pdb_eval__['write_back'] = locals()") - - # Build a closure source code with freevars from locals like: - # def __pdb_outer(): - # var = None - # def __pdb_scope(): # This is the code object we want to execute - # nonlocal var - # - # return __pdb_scope.__code__ - source_with_closure = ("def __pdb_outer():\n" + - "\n".join(f" {var} = None" for var in locals_copy) + "\n" + - " def __pdb_scope():\n" + - "\n".join(f" nonlocal {var}" for var in locals_copy) + "\n" + - textwrap.indent(source, " ") + "\n" + - " return __pdb_scope.__code__" - ) - - # Get the code object of __pdb_scope() - # The exec fills locals_copy with the __pdb_outer() function and we can call - # that to get the code object of __pdb_scope() - ns = {} - try: - exec(source_with_closure, {}, ns) - except Exception: - return False - code = ns["__pdb_outer"]() - - cells = tuple(types.CellType(locals_copy.get(var)) for var in code.co_freevars) - - try: - exec(code, globals, locals_copy, closure=cells) - except Exception: - return False - - # get the data we need from the statement - pdb_eval = locals_copy["__pdb_eval__"] - - # __pdb_eval__ should not be updated back to locals - pdb_eval["write_back"].pop("__pdb_eval__") - - # Write all local variables back to locals - locals.update(pdb_eval["write_back"]) - eval_result = pdb_eval["result"] - if eval_result is not None: - print(repr(eval_result)) - - return True - - def default(self, line): - if line[:1] == '!': line = line[1:].strip() - locals = self.curframe_locals - globals = self.curframe.f_globals - try: - buffer = line - if (code := codeop.compile_command(line + '\n', '', 'single')) is None: - # Multi-line mode - with self._disable_command_completion(): - buffer = line - continue_prompt = "... " - while (code := codeop.compile_command(buffer, '', 'single')) is None: - if self.use_rawinput: - try: - line = input(continue_prompt) - except (EOFError, KeyboardInterrupt): - self.lastcmd = "" - print('\n') - return - else: - self.stdout.write(continue_prompt) - self.stdout.flush() - line = self.stdin.readline() - if not len(line): - self.lastcmd = "" - self.stdout.write('\n') - self.stdout.flush() - return - else: - line = line.rstrip('\r\n') - buffer += '\n' + line - self.lastcmd = buffer - save_stdout = sys.stdout - save_stdin = sys.stdin - save_displayhook = sys.displayhook - try: - sys.stdin = self.stdin - sys.stdout = self.stdout - sys.displayhook = self.displayhook - if not self._exec_in_closure(buffer, globals, locals): - exec(code, globals, locals) - finally: - sys.stdout = save_stdout - sys.stdin = save_stdin - sys.displayhook = save_displayhook - except: - self._error_exc() - - def _replace_convenience_variables(self, line): - """Replace the convenience variables in 'line' with their values. - e.g. $foo is replaced by __pdb_convenience_variables["foo"]. - Note: such pattern in string literals will be skipped""" - - if "$" not in line: - return line - - dollar_start = dollar_end = -1 - replace_variables = [] - try: - for t in tokenize.generate_tokens(io.StringIO(line).readline): - token_type, token_string, start, end, _ = t - if token_type == token.OP and token_string == '$': - dollar_start, dollar_end = start, end - elif start == dollar_end and token_type == token.NAME: - # line is a one-line command so we only care about column - replace_variables.append((dollar_start[1], end[1], token_string)) - except tokenize.TokenError: - return line - - if not replace_variables: - return line - - last_end = 0 - line_pieces = [] - for start, end, name in replace_variables: - line_pieces.append(line[last_end:start] + f'__pdb_convenience_variables["{name}"]') - last_end = end - line_pieces.append(line[last_end:]) - - return ''.join(line_pieces) - - def precmd(self, line): - """Handle alias expansion and ';;' separator.""" - if not line.strip(): - return line - args = line.split() - while args[0] in self.aliases: - line = self.aliases[args[0]] - for idx in range(1, 10): - if f'%{idx}' in line: - if idx >= len(args): - self.error(f"Not enough arguments for alias '{args[0]}'") - # This is a no-op - return "!" - line = line.replace(f'%{idx}', args[idx]) - elif '%*' not in line: - if idx < len(args): - self.error(f"Too many arguments for alias '{args[0]}'") - # This is a no-op - return "!" - break - - line = line.replace("%*", ' '.join(args[1:])) - args = line.split() - # split into ';;' separated commands - # unless it's an alias command - if args[0] != 'alias': - marker = line.find(';;') - if marker >= 0: - # queue up everything after marker - next = line[marker+2:].lstrip() - self.cmdqueue.insert(0, next) - line = line[:marker].rstrip() - - # Replace all the convenience variables - line = self._replace_convenience_variables(line) - - return line - - def onecmd(self, line): - """Interpret the argument as though it had been typed in response - to the prompt. - - Checks whether this line is typed at the normal prompt or in - a breakpoint command list definition. - """ - if not self.commands_defining: - self._validate_file_mtime() - if line.startswith('_pdbcmd'): - command, arg, line = self.parseline(line) - if hasattr(self, command): - return getattr(self, command)(arg) - return cmd.Cmd.onecmd(self, line) - else: - return self.handle_command_def(line) - - def handle_command_def(self, line): - """Handles one command line during command list definition.""" - cmd, arg, line = self.parseline(line) - if not cmd: - return False - if cmd == 'silent': - self.commands_silent[self.commands_bnum] = True - return False # continue to handle other cmd def in the cmd list - elif cmd == 'end': - return True # end of cmd list - cmdlist = self.commands[self.commands_bnum] - if arg: - cmdlist.append(cmd+' '+arg) - else: - cmdlist.append(cmd) - # Determine if we must stop - try: - func = getattr(self, 'do_' + cmd) - except AttributeError: - func = self.default - # one of the resuming commands - if func.__name__ in self.commands_resuming: - self.commands_doprompt[self.commands_bnum] = False - return True - return False - - # interface abstraction functions - - def message(self, msg, end='\n'): - print(msg, end=end, file=self.stdout) - - def error(self, msg): - print('***', msg, file=self.stdout) - - # convenience variables - - def set_convenience_variable(self, frame, name, value): - if '__pdb_convenience_variables' not in frame.f_globals: - frame.f_globals['__pdb_convenience_variables'] = {} - frame.f_globals['__pdb_convenience_variables'][name] = value - - # Generic completion functions. Individual complete_foo methods can be - # assigned below to one of these functions. - - @property - def rlcompleter(self): - """Return the `Completer` class from `rlcompleter`, while avoiding the - side effects of changing the completer from `import rlcompleter`. - - This is a compromise between GH-138860 and GH-139289. If GH-139289 is - fixed, then we don't need this and we can just `import rlcompleter` in - `Pdb.__init__`. - """ - if not hasattr(self, "_rlcompleter"): - try: - import readline - except ImportError: - # readline is not available, just get the Completer - from rlcompleter import Completer - self._rlcompleter = Completer - else: - # importing rlcompleter could have side effect of changing - # the current completer, we need to restore it - prev_completer = readline.get_completer() - from rlcompleter import Completer - self._rlcompleter = Completer - readline.set_completer(prev_completer) - return self._rlcompleter - - def completenames(self, text, line, begidx, endidx): - # Overwrite completenames() of cmd so for the command completion, - # if no current command matches, check for expressions as well - commands = super().completenames(text, line, begidx, endidx) - for alias in self.aliases: - if alias.startswith(text): - commands.append(alias) - if commands: - return commands - else: - expressions = self._complete_expression(text, line, begidx, endidx) - if expressions: - return expressions - return self.completedefault(text, line, begidx, endidx) - - def _complete_location(self, text, line, begidx, endidx): - # Complete a file/module/function location for break/tbreak/clear. - if line.strip().endswith((':', ',')): - # Here comes a line number or a condition which we can't complete. - return [] - # First, try to find matching functions (i.e. expressions). - try: - ret = self._complete_expression(text, line, begidx, endidx) - except Exception: - ret = [] - # Then, try to complete file names as well. - globs = glob.glob(glob.escape(text) + '*') - for fn in globs: - if os.path.isdir(fn): - ret.append(fn + '/') - elif os.path.isfile(fn) and fn.lower().endswith(('.py', '.pyw')): - ret.append(fn + ':') - return ret - - def _complete_bpnumber(self, text, line, begidx, endidx): - # Complete a breakpoint number. (This would be more helpful if we could - # display additional info along with the completions, such as file/line - # of the breakpoint.) - return [str(i) for i, bp in enumerate(bdb.Breakpoint.bpbynumber) - if bp is not None and str(i).startswith(text)] - - def _complete_expression(self, text, line, begidx, endidx): - # Complete an arbitrary expression. - if not self.curframe: - return [] - # Collect globals and locals. It is usually not really sensible to also - # complete builtins, and they clutter the namespace quite heavily, so we - # leave them out. - ns = {**self.curframe.f_globals, **self.curframe_locals} - if text.startswith("$"): - # Complete convenience variables - conv_vars = self.curframe.f_globals.get('__pdb_convenience_variables', {}) - return [f"${name}" for name in conv_vars if name.startswith(text[1:])] - if '.' in text: - # Walk an attribute chain up to the last part, similar to what - # rlcompleter does. This will bail if any of the parts are not - # simple attribute access, which is what we want. - dotted = text.split('.') - try: - obj = ns[dotted[0]] - for part in dotted[1:-1]: - obj = getattr(obj, part) - except (KeyError, AttributeError): - return [] - prefix = '.'.join(dotted[:-1]) + '.' - return [prefix + n for n in dir(obj) if n.startswith(dotted[-1])] - else: - # Complete a simple name. - return [n for n in ns.keys() if n.startswith(text)] - - def completedefault(self, text, line, begidx, endidx): - if text.startswith("$"): - # Complete convenience variables - conv_vars = self.curframe.f_globals.get('__pdb_convenience_variables', {}) - return [f"${name}" for name in conv_vars if name.startswith(text[1:])] - - state = 0 - matches = [] - completer = self.rlcompleter(self.curframe.f_globals | self.curframe_locals) - while (match := completer.complete(text, state)) is not None: - matches.append(match) - state += 1 - return matches - - # Pdb meta commands, only intended to be used internally by pdb - - def _pdbcmd_print_frame_status(self, arg): - self.print_stack_entry(self.stack[self.curindex]) - self._show_display() - - # Command definitions, called by cmdloop() - # The argument is the remaining string on the command line - # Return true to exit from the command loop - - def do_commands(self, arg): - """(Pdb) commands [bpnumber] - (com) ... - (com) end - (Pdb) - - Specify a list of commands for breakpoint number bpnumber. - The commands themselves are entered on the following lines. - Type a line containing just 'end' to terminate the commands. - The commands are executed when the breakpoint is hit. - - To remove all commands from a breakpoint, type commands and - follow it immediately with end; that is, give no commands. - - With no bpnumber argument, commands refers to the last - breakpoint set. - - You can use breakpoint commands to start your program up - again. Simply use the continue command, or step, or any other - command that resumes execution. - - Specifying any command resuming execution (currently continue, - step, next, return, jump, quit and their abbreviations) - terminates the command list (as if that command was - immediately followed by end). This is because any time you - resume execution (even with a simple next or step), you may - encounter another breakpoint -- which could have its own - command list, leading to ambiguities about which list to - execute. - - If you use the 'silent' command in the command list, the usual - message about stopping at a breakpoint is not printed. This - may be desirable for breakpoints that are to print a specific - message and then continue. If none of the other commands - print anything, you will see no sign that the breakpoint was - reached. - """ - if not arg: - bnum = len(bdb.Breakpoint.bpbynumber) - 1 - else: - try: - bnum = int(arg) - except: - self._print_invalid_arg(arg) - return - try: - self.get_bpbynumber(bnum) - except ValueError as err: - self.error('cannot set commands: %s' % err) - return - - self.commands_bnum = bnum - # Save old definitions for the case of a keyboard interrupt. - if bnum in self.commands: - old_command_defs = (self.commands[bnum], - self.commands_doprompt[bnum], - self.commands_silent[bnum]) - else: - old_command_defs = None - self.commands[bnum] = [] - self.commands_doprompt[bnum] = True - self.commands_silent[bnum] = False - - prompt_back = self.prompt - self.prompt = '(com) ' - self.commands_defining = True - try: - self.cmdloop() - except KeyboardInterrupt: - # Restore old definitions. - if old_command_defs: - self.commands[bnum] = old_command_defs[0] - self.commands_doprompt[bnum] = old_command_defs[1] - self.commands_silent[bnum] = old_command_defs[2] - else: - del self.commands[bnum] - del self.commands_doprompt[bnum] - del self.commands_silent[bnum] - self.error('command definition aborted, old commands restored') - finally: - self.commands_defining = False - self.prompt = prompt_back - - complete_commands = _complete_bpnumber - - def do_break(self, arg, temporary = 0): - """b(reak) [ ([filename:]lineno | function) [, condition] ] - - Without argument, list all breaks. - - With a line number argument, set a break at this line in the - current file. With a function name, set a break at the first - executable line of that function. If a second argument is - present, it is a string specifying an expression which must - evaluate to true before the breakpoint is honored. - - The line number may be prefixed with a filename and a colon, - to specify a breakpoint in another file (probably one that - hasn't been loaded yet). The file is searched for on - sys.path; the .py suffix may be omitted. - """ - if not arg: - if self.breaks: # There's at least one - self.message("Num Type Disp Enb Where") - for bp in bdb.Breakpoint.bpbynumber: - if bp: - self.message(bp.bpformat()) - return - # parse arguments; comma has lowest precedence - # and cannot occur in filename - filename = None - lineno = None - cond = None - comma = arg.find(',') - if comma > 0: - # parse stuff after comma: "condition" - cond = arg[comma+1:].lstrip() - if err := self._compile_error_message(cond): - self.error('Invalid condition %s: %r' % (cond, err)) - return - arg = arg[:comma].rstrip() - # parse stuff before comma: [filename:]lineno | function - colon = arg.rfind(':') - funcname = None - if colon >= 0: - filename = arg[:colon].rstrip() - f = self.lookupmodule(filename) - if not f: - self.error('%r not found from sys.path' % filename) - return - else: - filename = f - arg = arg[colon+1:].lstrip() - try: - lineno = int(arg) - except ValueError: - self.error('Bad lineno: %s' % arg) - return - else: - # no colon; can be lineno or function - try: - lineno = int(arg) - except ValueError: - try: - func = eval(arg, - self.curframe.f_globals, - self.curframe_locals) - except: - func = arg - try: - if hasattr(func, '__func__'): - func = func.__func__ - code = func.__code__ - #use co_name to identify the bkpt (function names - #could be aliased, but co_name is invariant) - funcname = code.co_name - lineno = find_first_executable_line(code) - filename = code.co_filename - except: - # last thing to try - (ok, filename, ln) = self.lineinfo(arg) - if not ok: - self.error('The specified object %r is not a function ' - 'or was not found along sys.path.' % arg) - return - funcname = ok # ok contains a function name - lineno = int(ln) - if not filename: - filename = self.defaultFile() - # Check for reasonable breakpoint - line = self.checkline(filename, lineno) - if line: - # now set the break point - err = self.set_break(filename, line, temporary, cond, funcname) - if err: - self.error(err) - else: - bp = self.get_breaks(filename, line)[-1] - self.message("Breakpoint %d at %s:%d" % - (bp.number, bp.file, bp.line)) - - # To be overridden in derived debuggers - def defaultFile(self): - """Produce a reasonable default.""" - filename = self.curframe.f_code.co_filename - if filename == '' and self.mainpyfile: - filename = self.mainpyfile - return filename - - do_b = do_break - - complete_break = _complete_location - complete_b = _complete_location - - def do_tbreak(self, arg): - """tbreak [ ([filename:]lineno | function) [, condition] ] - - Same arguments as break, but sets a temporary breakpoint: it - is automatically deleted when first hit. - """ - self.do_break(arg, 1) - - complete_tbreak = _complete_location - - def lineinfo(self, identifier): - failed = (None, None, None) - # Input is identifier, may be in single quotes - idstring = identifier.split("'") - if len(idstring) == 1: - # not in single quotes - id = idstring[0].strip() - elif len(idstring) == 3: - # quoted - id = idstring[1].strip() - else: - return failed - if id == '': return failed - parts = id.split('.') - # Protection for derived debuggers - if parts[0] == 'self': - del parts[0] - if len(parts) == 0: - return failed - # Best first guess at file to look at - fname = self.defaultFile() - if len(parts) == 1: - item = parts[0] - else: - # More than one part. - # First is module, second is method/class - f = self.lookupmodule(parts[0]) - if f: - fname = f - item = parts[1] - else: - return failed - answer = find_function(item, self.canonic(fname)) - return answer or failed - - def checkline(self, filename, lineno): - """Check whether specified line seems to be executable. - - Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank - line or EOF). Warning: testing is not comprehensive. - """ - # this method should be callable before starting debugging, so default - # to "no globals" if there is no current frame - frame = getattr(self, 'curframe', None) - globs = frame.f_globals if frame else None - line = linecache.getline(filename, lineno, globs) - if not line: - self.message('End of file') - return 0 - line = line.strip() - # Don't allow setting breakpoint at a blank line - if (not line or (line[0] == '#') or - (line[:3] == '"""') or line[:3] == "'''"): - self.error('Blank or comment') - return 0 - return lineno - - def do_enable(self, arg): - """enable bpnumber [bpnumber ...] - - Enables the breakpoints given as a space separated list of - breakpoint numbers. - """ - args = arg.split() - for i in args: - try: - bp = self.get_bpbynumber(i) - except ValueError as err: - self.error(err) - else: - bp.enable() - self.message('Enabled %s' % bp) - - complete_enable = _complete_bpnumber - - def do_disable(self, arg): - """disable bpnumber [bpnumber ...] - - Disables the breakpoints given as a space separated list of - breakpoint numbers. Disabling a breakpoint means it cannot - cause the program to stop execution, but unlike clearing a - breakpoint, it remains in the list of breakpoints and can be - (re-)enabled. - """ - args = arg.split() - for i in args: - try: - bp = self.get_bpbynumber(i) - except ValueError as err: - self.error(err) - else: - bp.disable() - self.message('Disabled %s' % bp) - - complete_disable = _complete_bpnumber - - def do_condition(self, arg): - """condition bpnumber [condition] - - Set a new condition for the breakpoint, an expression which - must evaluate to true before the breakpoint is honored. If - condition is absent, any existing condition is removed; i.e., - the breakpoint is made unconditional. - """ - args = arg.split(' ', 1) - try: - cond = args[1] - if err := self._compile_error_message(cond): - self.error('Invalid condition %s: %r' % (cond, err)) - return - except IndexError: - cond = None - try: - bp = self.get_bpbynumber(args[0].strip()) - except IndexError: - self.error('Breakpoint number expected') - except ValueError as err: - self.error(err) - else: - bp.cond = cond - if not cond: - self.message('Breakpoint %d is now unconditional.' % bp.number) - else: - self.message('New condition set for breakpoint %d.' % bp.number) - - complete_condition = _complete_bpnumber - - def do_ignore(self, arg): - """ignore bpnumber [count] - - Set the ignore count for the given breakpoint number. If - count is omitted, the ignore count is set to 0. A breakpoint - becomes active when the ignore count is zero. When non-zero, - the count is decremented each time the breakpoint is reached - and the breakpoint is not disabled and any associated - condition evaluates to true. - """ - args = arg.split() - if not args: - self.error('Breakpoint number expected') - return - if len(args) == 1: - count = 0 - elif len(args) == 2: - try: - count = int(args[1]) - except ValueError: - self._print_invalid_arg(arg) - return - else: - self._print_invalid_arg(arg) - return - try: - bp = self.get_bpbynumber(args[0].strip()) - except ValueError as err: - self.error(err) - else: - bp.ignore = count - if count > 0: - if count > 1: - countstr = '%d crossings' % count - else: - countstr = '1 crossing' - self.message('Will ignore next %s of breakpoint %d.' % - (countstr, bp.number)) - else: - self.message('Will stop next time breakpoint %d is reached.' - % bp.number) - - complete_ignore = _complete_bpnumber - - def do_clear(self, arg): - """cl(ear) [filename:lineno | bpnumber ...] - - With a space separated list of breakpoint numbers, clear - those breakpoints. Without argument, clear all breaks (but - first ask confirmation). With a filename:lineno argument, - clear all breaks at that line in that file. - """ - if not arg: - try: - reply = input('Clear all breaks? ') - except EOFError: - reply = 'no' - reply = reply.strip().lower() - if reply in ('y', 'yes'): - bplist = [bp for bp in bdb.Breakpoint.bpbynumber if bp] - self.clear_all_breaks() - for bp in bplist: - self.message('Deleted %s' % bp) - return - if ':' in arg: - # Make sure it works for "clear C:\foo\bar.py:12" - i = arg.rfind(':') - filename = arg[:i] - arg = arg[i+1:] - try: - lineno = int(arg) - except ValueError: - err = "Invalid line number (%s)" % arg - else: - bplist = self.get_breaks(filename, lineno)[:] - err = self.clear_break(filename, lineno) - if err: - self.error(err) - else: - for bp in bplist: - self.message('Deleted %s' % bp) - return - numberlist = arg.split() - for i in numberlist: - try: - bp = self.get_bpbynumber(i) - except ValueError as err: - self.error(err) - else: - self.clear_bpbynumber(i) - self.message('Deleted %s' % bp) - do_cl = do_clear # 'c' is already an abbreviation for 'continue' - - complete_clear = _complete_location - complete_cl = _complete_location - - def do_where(self, arg): - """w(here) - - Print a stack trace, with the most recent frame at the bottom. - An arrow indicates the "current frame", which determines the - context of most commands. 'bt' is an alias for this command. - """ - if arg: - self._print_invalid_arg(arg) - return - self.print_stack_trace() - do_w = do_where - do_bt = do_where - - def _select_frame(self, number): - assert 0 <= number < len(self.stack) - self.curindex = number - self.curframe = self.stack[self.curindex][0] - self.curframe_locals = self.curframe.f_locals - self.set_convenience_variable(self.curframe, '_frame', self.curframe) - self.print_stack_entry(self.stack[self.curindex]) - self.lineno = None - - def do_exceptions(self, arg): - """exceptions [number] - - List or change current exception in an exception chain. - - Without arguments, list all the current exception in the exception - chain. Exceptions will be numbered, with the current exception indicated - with an arrow. - - If given an integer as argument, switch to the exception at that index. - """ - if not self._chained_exceptions: - self.message( - "Did not find chained exceptions. To move between" - " exceptions, pdb/post_mortem must be given an exception" - " object rather than a traceback." - ) - return - if not arg: - for ix, exc in enumerate(self._chained_exceptions): - prompt = ">" if ix == self._chained_exception_index else " " - rep = repr(exc) - if len(rep) > 80: - rep = rep[:77] + "..." - indicator = ( - " -" - if self._chained_exceptions[ix].__traceback__ is None - else f"{ix:>3}" - ) - self.message(f"{prompt} {indicator} {rep}") - else: - try: - number = int(arg) - except ValueError: - self.error("Argument must be an integer") - return - if 0 <= number < len(self._chained_exceptions): - if self._chained_exceptions[number].__traceback__ is None: - self.error("This exception does not have a traceback, cannot jump to it") - return - - self._chained_exception_index = number - self.setup(None, self._chained_exceptions[number].__traceback__) - self.print_stack_entry(self.stack[self.curindex]) - else: - self.error("No exception with that number") - - def do_up(self, arg): - """u(p) [count] - - Move the current frame count (default one) levels up in the - stack trace (to an older frame). - """ - if self.curindex == 0: - self.error('Oldest frame') - return - try: - count = int(arg or 1) - except ValueError: - self.error('Invalid frame count (%s)' % arg) - return - if count < 0: - newframe = 0 - else: - newframe = max(0, self.curindex - count) - self._select_frame(newframe) - do_u = do_up - - def do_down(self, arg): - """d(own) [count] - - Move the current frame count (default one) levels down in the - stack trace (to a newer frame). - """ - if self.curindex + 1 == len(self.stack): - self.error('Newest frame') - return - try: - count = int(arg or 1) - except ValueError: - self.error('Invalid frame count (%s)' % arg) - return - if count < 0: - newframe = len(self.stack) - 1 - else: - newframe = min(len(self.stack) - 1, self.curindex + count) - self._select_frame(newframe) - do_d = do_down - - def do_until(self, arg): - """unt(il) [lineno] - - Without argument, continue execution until the line with a - number greater than the current one is reached. With a line - number, continue execution until a line with a number greater - or equal to that is reached. In both cases, also stop when - the current frame returns. - """ - if arg: - try: - lineno = int(arg) - except ValueError: - self.error('Error in argument: %r' % arg) - return - if lineno <= self.curframe.f_lineno: - self.error('"until" line number is smaller than current ' - 'line number') - return - else: - lineno = None - self.set_until(self.curframe, lineno) - return 1 - do_unt = do_until - - def do_step(self, arg): - """s(tep) - - Execute the current line, stop at the first possible occasion - (either in a function that is called or in the current - function). - """ - if arg: - self._print_invalid_arg(arg) - return - self.set_step() - return 1 - do_s = do_step - - def do_next(self, arg): - """n(ext) - - Continue execution until the next line in the current function - is reached or it returns. - """ - if arg: - self._print_invalid_arg(arg) - return - self.set_next(self.curframe) - return 1 - do_n = do_next - - def do_run(self, arg): - """run [args...] - - Restart the debugged python program. If a string is supplied - it is split with "shlex", and the result is used as the new - sys.argv. History, breakpoints, actions and debugger options - are preserved. "restart" is an alias for "run". - """ - if arg: - import shlex - argv0 = sys.argv[0:1] - try: - sys.argv = shlex.split(arg) - except ValueError as e: - self.error('Cannot run %s: %s' % (arg, e)) - return - sys.argv[:0] = argv0 - # this is caught in the main debugger loop - raise Restart - - do_restart = do_run - - def do_return(self, arg): - """r(eturn) - - Continue execution until the current function returns. - """ - if arg: - self._print_invalid_arg(arg) - return - self.set_return(self.curframe) - return 1 - do_r = do_return - - def do_continue(self, arg): - """c(ont(inue)) - - Continue execution, only stop when a breakpoint is encountered. - """ - if arg: - self._print_invalid_arg(arg) - return - if not self.nosigint: - try: - Pdb._previous_sigint_handler = \ - signal.signal(signal.SIGINT, self.sigint_handler) - except ValueError: - # ValueError happens when do_continue() is invoked from - # a non-main thread in which case we just continue without - # SIGINT set. Would printing a message here (once) make - # sense? - pass - self.set_continue() - return 1 - do_c = do_cont = do_continue - - def do_jump(self, arg): - """j(ump) lineno - - Set the next line that will be executed. Only available in - the bottom-most frame. This lets you jump back and execute - code again, or jump forward to skip code that you don't want - to run. - - It should be noted that not all jumps are allowed -- for - instance it is not possible to jump into the middle of a - for loop or out of a finally clause. - """ - if self.curindex + 1 != len(self.stack): - self.error('You can only jump within the bottom frame') - return - try: - arg = int(arg) - except ValueError: - self.error("The 'jump' command requires a line number") - else: - try: - # Do the jump, fix up our copy of the stack, and display the - # new position - self.curframe.f_lineno = arg - self.stack[self.curindex] = self.stack[self.curindex][0], arg - self.print_stack_entry(self.stack[self.curindex]) - except ValueError as e: - self.error('Jump failed: %s' % e) - do_j = do_jump - - def do_debug(self, arg): - """debug code - - Enter a recursive debugger that steps through the code - argument (which is an arbitrary expression or statement to be - executed in the current environment). - """ - sys.settrace(None) - globals = self.curframe.f_globals - locals = self.curframe_locals - p = Pdb(self.completekey, self.stdin, self.stdout) - p.prompt = "(%s) " % self.prompt.strip() - self.message("ENTERING RECURSIVE DEBUGGER") - try: - sys.call_tracing(p.run, (arg, globals, locals)) - except Exception: - self._error_exc() - self.message("LEAVING RECURSIVE DEBUGGER") - sys.settrace(self.trace_dispatch) - self.lastcmd = p.lastcmd - - complete_debug = _complete_expression - - def do_quit(self, arg): - """q(uit) | exit - - Quit from the debugger. The program being executed is aborted. - """ - self._user_requested_quit = True - self.set_quit() - return 1 - - do_q = do_quit - do_exit = do_quit - - def do_EOF(self, arg): - """EOF - - Handles the receipt of EOF as a command. - """ - self.message('') - self._user_requested_quit = True - self.set_quit() - return 1 - - def do_args(self, arg): - """a(rgs) - - Print the argument list of the current function. - """ - if arg: - self._print_invalid_arg(arg) - return - co = self.curframe.f_code - dict = self.curframe_locals - n = co.co_argcount + co.co_kwonlyargcount - if co.co_flags & inspect.CO_VARARGS: n = n+1 - if co.co_flags & inspect.CO_VARKEYWORDS: n = n+1 - for i in range(n): - name = co.co_varnames[i] - if name in dict: - self.message('%s = %s' % (name, self._safe_repr(dict[name], name))) - else: - self.message('%s = *** undefined ***' % (name,)) - do_a = do_args - - def do_retval(self, arg): - """retval - - Print the return value for the last return of a function. - """ - if arg: - self._print_invalid_arg(arg) - return - if '__return__' in self.curframe_locals: - self.message(self._safe_repr(self.curframe_locals['__return__'], "retval")) - else: - self.error('Not yet returned!') - do_rv = do_retval - - def _getval(self, arg): - try: - return eval(arg, self.curframe.f_globals, self.curframe_locals) - except: - self._error_exc() - raise - - def _getval_except(self, arg, frame=None): - try: - if frame is None: - return eval(arg, self.curframe.f_globals, self.curframe_locals) - else: - return eval(arg, frame.f_globals, frame.f_locals) - except BaseException as exc: - return _rstr('** raised %s **' % self._format_exc(exc)) - - def _error_exc(self): - exc = sys.exception() - self.error(self._format_exc(exc)) - - def _msg_val_func(self, arg, func): - try: - val = self._getval(arg) - except: - return # _getval() has displayed the error - try: - self.message(func(val)) - except: - self._error_exc() - - def _safe_repr(self, obj, expr): - try: - return repr(obj) - except Exception as e: - return _rstr(f"*** repr({expr}) failed: {self._format_exc(e)} ***") - - def do_p(self, arg): - """p expression - - Print the value of the expression. - """ - self._msg_val_func(arg, repr) - - def do_pp(self, arg): - """pp expression - - Pretty-print the value of the expression. - """ - self._msg_val_func(arg, pprint.pformat) - - complete_print = _complete_expression - complete_p = _complete_expression - complete_pp = _complete_expression - - def do_list(self, arg): - """l(ist) [first[, last] | .] - - List source code for the current file. Without arguments, - list 11 lines around the current line or continue the previous - listing. With . as argument, list 11 lines around the current - line. With one argument, list 11 lines starting at that line. - With two arguments, list the given range; if the second - argument is less than the first, it is a count. - - The current line in the current frame is indicated by "->". - If an exception is being debugged, the line where the - exception was originally raised or propagated is indicated by - ">>", if it differs from the current line. - """ - self.lastcmd = 'list' - last = None - if arg and arg != '.': - try: - if ',' in arg: - first, last = arg.split(',') - first = int(first.strip()) - last = int(last.strip()) - if last < first: - # assume it's a count - last = first + last - else: - first = int(arg.strip()) - first = max(1, first - 5) - except ValueError: - self.error('Error in argument: %r' % arg) - return - elif self.lineno is None or arg == '.': - first = max(1, self.curframe.f_lineno - 5) - else: - first = self.lineno + 1 - if last is None: - last = first + 10 - filename = self.curframe.f_code.co_filename - # gh-93696: stdlib frozen modules provide a useful __file__ - # this workaround can be removed with the closure of gh-89815 - if filename.startswith("

- - - - ''' % (cls, title) - if prelude: - result = result + ''' - - -''' % (cls, marginalia, cls, prelude, gap) - else: - result = result + ''' -''' % (cls, marginalia, gap) - - return result + '\n
 
%s
%s%s
%s
%s%s%s
' % contents - - def bigsection(self, title, *args): - """Format a section with a big heading.""" - title = '%s' % title - return self.section(title, *args) - - def preformat(self, text): - """Format literal preformatted text.""" - text = self.escape(text.expandtabs()) - return replace(text, '\n\n', '\n \n', '\n\n', '\n \n', - ' ', ' ', '\n', '
\n') - - def multicolumn(self, list, format): - """Format a list of items into a multi-column list.""" - result = '' - rows = (len(list) + 3) // 4 - for col in range(4): - result = result + '' - for i in range(rows*col, rows*col+rows): - if i < len(list): - result = result + format(list[i]) + '
\n' - result = result + '' - return '%s
' % result - - def grey(self, text): return '%s' % text - - def namelink(self, name, *dicts): - """Make a link for an identifier, given name-to-URL mappings.""" - for dict in dicts: - if name in dict: - return '
%s' % (dict[name], name) - return name - - def classlink(self, object, modname): - """Make a link for a class.""" - name, module = object.__name__, sys.modules.get(object.__module__) - if hasattr(module, name) and getattr(module, name) is object: - return '%s' % ( - module.__name__, name, classname(object, modname)) - return classname(object, modname) - - def parentlink(self, object, modname): - """Make a link for the enclosing class or module.""" - link = None - name, module = object.__name__, sys.modules.get(object.__module__) - if hasattr(module, name) and getattr(module, name) is object: - if '.' in object.__qualname__: - name = object.__qualname__.rpartition('.')[0] - if object.__module__ != modname: - link = '%s.html#%s' % (module.__name__, name) - else: - link = '#%s' % name - else: - if object.__module__ != modname: - link = '%s.html' % module.__name__ - if link: - return '%s' % (link, parentname(object, modname)) - else: - return parentname(object, modname) - - def modulelink(self, object): - """Make a link for a module.""" - return '%s' % (object.__name__, object.__name__) - - def modpkglink(self, modpkginfo): - """Make a link for a module or package to display in an index.""" - name, path, ispackage, shadowed = modpkginfo - if shadowed: - return self.grey(name) - if path: - url = '%s.%s.html' % (path, name) - else: - url = '%s.html' % name - if ispackage: - text = '%s (package)' % name - else: - text = name - return '%s' % (url, text) - - def filelink(self, url, path): - """Make a link to source file.""" - return '%s' % (url, path) - - def markup(self, text, escape=None, funcs={}, classes={}, methods={}): - """Mark up some plain text, given a context of symbols to look for. - Each context dictionary maps object names to anchor names.""" - escape = escape or self.escape - results = [] - here = 0 - pattern = re.compile(r'\b((http|https|ftp)://\S+[\w/]|' - r'RFC[- ]?(\d+)|' - r'PEP[- ]?(\d+)|' - r'(self\.)?(\w+))') - while match := pattern.search(text, here): - start, end = match.span() - results.append(escape(text[here:start])) - - all, scheme, rfc, pep, selfdot, name = match.groups() - if scheme: - url = escape(all).replace('"', '"') - results.append('%s' % (url, url)) - elif rfc: - url = 'https://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc) - results.append('%s' % (url, escape(all))) - elif pep: - url = 'https://peps.python.org/pep-%04d/' % int(pep) - results.append('%s' % (url, escape(all))) - elif selfdot: - # Create a link for methods like 'self.method(...)' - # and use for attributes like 'self.attr' - if text[end:end+1] == '(': - results.append('self.' + self.namelink(name, methods)) - else: - results.append('self.%s' % name) - elif text[end:end+1] == '(': - results.append(self.namelink(name, methods, funcs, classes)) - else: - results.append(self.namelink(name, classes)) - here = end - results.append(escape(text[here:])) - return ''.join(results) - - # ---------------------------------------------- type-specific routines - - def formattree(self, tree, modname, parent=None): - """Produce HTML for a class tree as given by inspect.getclasstree().""" - result = '' - for entry in tree: - if isinstance(entry, tuple): - c, bases = entry - result = result + '

' - result = result + self.classlink(c, modname) - if bases and bases != (parent,): - parents = [] - for base in bases: - parents.append(self.classlink(base, modname)) - result = result + '(' + ', '.join(parents) + ')' - result = result + '\n
' - elif isinstance(entry, list): - result = result + '
\n%s
\n' % self.formattree( - entry, modname, c) - return '
\n%s
\n' % result - - def docmodule(self, object, name=None, mod=None, *ignored): - """Produce HTML documentation for a module object.""" - name = object.__name__ # ignore the passed-in name - try: - all = object.__all__ - except AttributeError: - all = None - parts = name.split('.') - links = [] - for i in range(len(parts)-1): - links.append( - '%s' % - ('.'.join(parts[:i+1]), parts[i])) - linkedname = '.'.join(links + parts[-1:]) - head = '%s' % linkedname - try: - path = inspect.getabsfile(object) - url = urllib.parse.quote(path) - filelink = self.filelink(url, path) - except TypeError: - filelink = '(built-in)' - info = [] - if hasattr(object, '__version__'): - version = str(object.__version__) - if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': - version = version[11:-1].strip() - info.append('version %s' % self.escape(version)) - if hasattr(object, '__date__'): - info.append(self.escape(str(object.__date__))) - if info: - head = head + ' (%s)' % ', '.join(info) - docloc = self.getdocloc(object) - if docloc is not None: - docloc = '
Module Reference' % locals() - else: - docloc = '' - result = self.heading(head, 'index
' + filelink + docloc) - - modules = inspect.getmembers(object, inspect.ismodule) - - classes, cdict = [], {} - for key, value in inspect.getmembers(object, inspect.isclass): - # if __all__ exists, believe it. Otherwise use old heuristic. - if (all is not None or - (inspect.getmodule(value) or object) is object): - if visiblename(key, all, object): - classes.append((key, value)) - cdict[key] = cdict[value] = '#' + key - for key, value in classes: - for base in value.__bases__: - key, modname = base.__name__, base.__module__ - module = sys.modules.get(modname) - if modname != name and module and hasattr(module, key): - if getattr(module, key) is base: - if not key in cdict: - cdict[key] = cdict[base] = modname + '.html#' + key - funcs, fdict = [], {} - for key, value in inspect.getmembers(object, inspect.isroutine): - # if __all__ exists, believe it. Otherwise use a heuristic. - if (all is not None - or inspect.isbuiltin(value) - or (inspect.getmodule(value) or object) is object): - if visiblename(key, all, object): - funcs.append((key, value)) - fdict[key] = '#-' + key - if inspect.isfunction(value): fdict[value] = fdict[key] - data = [] - for key, value in inspect.getmembers(object, isdata): - if visiblename(key, all, object): - data.append((key, value)) - - doc = self.markup(getdoc(object), self.preformat, fdict, cdict) - doc = doc and '%s' % doc - result = result + '

%s

\n' % doc - - if hasattr(object, '__path__'): - modpkgs = [] - for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): - modpkgs.append((modname, name, ispkg, 0)) - modpkgs.sort() - contents = self.multicolumn(modpkgs, self.modpkglink) - result = result + self.bigsection( - 'Package Contents', 'pkg-content', contents) - elif modules: - contents = self.multicolumn( - modules, lambda t: self.modulelink(t[1])) - result = result + self.bigsection( - 'Modules', 'pkg-content', contents) - - if classes: - classlist = [value for (key, value) in classes] - contents = [ - self.formattree(inspect.getclasstree(classlist, 1), name)] - for key, value in classes: - contents.append(self.document(value, key, name, fdict, cdict)) - result = result + self.bigsection( - 'Classes', 'index', ' '.join(contents)) - if funcs: - contents = [] - for key, value in funcs: - contents.append(self.document(value, key, name, fdict, cdict)) - result = result + self.bigsection( - 'Functions', 'functions', ' '.join(contents)) - if data: - contents = [] - for key, value in data: - contents.append(self.document(value, key)) - result = result + self.bigsection( - 'Data', 'data', '
\n'.join(contents)) - if hasattr(object, '__author__'): - contents = self.markup(str(object.__author__), self.preformat) - result = result + self.bigsection('Author', 'author', contents) - if hasattr(object, '__credits__'): - contents = self.markup(str(object.__credits__), self.preformat) - result = result + self.bigsection('Credits', 'credits', contents) - - return result - - def docclass(self, object, name=None, mod=None, funcs={}, classes={}, - *ignored): - """Produce HTML documentation for a class object.""" - realname = object.__name__ - name = name or realname - bases = object.__bases__ - - contents = [] - push = contents.append - - # Cute little class to pump out a horizontal rule between sections. - class HorizontalRule: - def __init__(self): - self.needone = 0 - def maybe(self): - if self.needone: - push('
\n') - self.needone = 1 - hr = HorizontalRule() - - # List the mro, if non-trivial. - mro = deque(inspect.getmro(object)) - if len(mro) > 2: - hr.maybe() - push('
Method resolution order:
\n') - for base in mro: - push('
%s
\n' % self.classlink(base, - object.__module__)) - push('
\n') - - def spill(msg, attrs, predicate): - ok, attrs = _split_list(attrs, predicate) - if ok: - hr.maybe() - push(msg) - for name, kind, homecls, value in ok: - try: - value = getattr(object, name) - except Exception: - # Some descriptors may meet a failure in their __get__. - # (bug #1785) - push(self.docdata(value, name, mod)) - else: - push(self.document(value, name, mod, - funcs, classes, mdict, object, homecls)) - push('\n') - return attrs - - def spilldescriptors(msg, attrs, predicate): - ok, attrs = _split_list(attrs, predicate) - if ok: - hr.maybe() - push(msg) - for name, kind, homecls, value in ok: - push(self.docdata(value, name, mod)) - return attrs - - def spilldata(msg, attrs, predicate): - ok, attrs = _split_list(attrs, predicate) - if ok: - hr.maybe() - push(msg) - for name, kind, homecls, value in ok: - base = self.docother(getattr(object, name), name, mod) - doc = getdoc(value) - if not doc: - push('
%s
\n' % base) - else: - doc = self.markup(getdoc(value), self.preformat, - funcs, classes, mdict) - doc = '
%s' % doc - push('
%s%s
\n' % (base, doc)) - push('\n') - return attrs - - attrs = [(name, kind, cls, value) - for name, kind, cls, value in classify_class_attrs(object) - if visiblename(name, obj=object)] - - mdict = {} - for key, kind, homecls, value in attrs: - mdict[key] = anchor = '#' + name + '-' + key - try: - value = getattr(object, name) - except Exception: - # Some descriptors may meet a failure in their __get__. - # (bug #1785) - pass - try: - # The value may not be hashable (e.g., a data attr with - # a dict or list value). - mdict[value] = anchor - except TypeError: - pass - - while attrs: - if mro: - thisclass = mro.popleft() - else: - thisclass = attrs[0][2] - attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) - - if object is not builtins.object and thisclass is builtins.object: - attrs = inherited - continue - elif thisclass is object: - tag = 'defined here' - else: - tag = 'inherited from %s' % self.classlink(thisclass, - object.__module__) - tag += ':
\n' - - sort_attributes(attrs, object) - - # Pump out the attrs, segregated by kind. - attrs = spill('Methods %s' % tag, attrs, - lambda t: t[1] == 'method') - attrs = spill('Class methods %s' % tag, attrs, - lambda t: t[1] == 'class method') - attrs = spill('Static methods %s' % tag, attrs, - lambda t: t[1] == 'static method') - attrs = spilldescriptors("Readonly properties %s" % tag, attrs, - lambda t: t[1] == 'readonly property') - attrs = spilldescriptors('Data descriptors %s' % tag, attrs, - lambda t: t[1] == 'data descriptor') - attrs = spilldata('Data and other attributes %s' % tag, attrs, - lambda t: t[1] == 'data') - assert attrs == [] - attrs = inherited - - contents = ''.join(contents) - - if name == realname: - title = 'class %s' % ( - name, realname) - else: - title = '%s = class %s' % ( - name, name, realname) - if bases: - parents = [] - for base in bases: - parents.append(self.classlink(base, object.__module__)) - title = title + '(%s)' % ', '.join(parents) - - decl = '' - argspec = _getargspec(object) - if argspec and argspec != '()': - decl = name + self.escape(argspec) + '\n\n' - - doc = getdoc(object) - if decl: - doc = decl + (doc or '') - doc = self.markup(doc, self.preformat, funcs, classes, mdict) - doc = doc and '%s
 
' % doc - - return self.section(title, 'title', contents, 3, doc) - - def formatvalue(self, object): - """Format an argument default value as text.""" - return self.grey('=' + self.repr(object)) - - def docroutine(self, object, name=None, mod=None, - funcs={}, classes={}, methods={}, cl=None, homecls=None): - """Produce HTML documentation for a function or method object.""" - realname = object.__name__ - name = name or realname - if homecls is None: - homecls = cl - anchor = ('' if cl is None else cl.__name__) + '-' + name - note = '' - skipdocs = False - imfunc = None - if _is_bound_method(object): - imself = object.__self__ - if imself is cl: - imfunc = getattr(object, '__func__', None) - elif inspect.isclass(imself): - note = ' class method of %s' % self.classlink(imself, mod) - else: - note = ' method of %s instance' % self.classlink( - imself.__class__, mod) - elif (inspect.ismethoddescriptor(object) or - inspect.ismethodwrapper(object)): - try: - objclass = object.__objclass__ - except AttributeError: - pass - else: - if cl is None: - note = ' unbound %s method' % self.classlink(objclass, mod) - elif objclass is not homecls: - note = ' from ' + self.classlink(objclass, mod) - else: - imfunc = object - if inspect.isfunction(imfunc) and homecls is not None and ( - imfunc.__module__ != homecls.__module__ or - imfunc.__qualname__ != homecls.__qualname__ + '.' + realname): - pname = self.parentlink(imfunc, mod) - if pname: - note = ' from %s' % pname - - if (inspect.iscoroutinefunction(object) or - inspect.isasyncgenfunction(object)): - asyncqualifier = 'async ' - else: - asyncqualifier = '' - - if name == realname: - title = '%s' % (anchor, realname) - else: - if (cl is not None and - inspect.getattr_static(cl, realname, []) is object): - reallink = '%s' % ( - cl.__name__ + '-' + realname, realname) - skipdocs = True - if note.startswith(' from '): - note = '' - else: - reallink = realname - title = '%s = %s' % ( - anchor, name, reallink) - argspec = None - if inspect.isroutine(object): - argspec = _getargspec(object) - if argspec and realname == '': - title = '%s lambda ' % name - # XXX lambda's won't usually have func_annotations['return'] - # since the syntax doesn't support but it is possible. - # So removing parentheses isn't truly safe. - if not object.__annotations__: - argspec = argspec[1:-1] # remove parentheses - if not argspec: - argspec = '(...)' - - decl = asyncqualifier + title + self.escape(argspec) + (note and - self.grey('%s' % note)) - - if skipdocs: - return '
%s
\n' % decl - else: - doc = self.markup( - getdoc(object), self.preformat, funcs, classes, methods) - doc = doc and '
%s
' % doc - return '
%s
%s
\n' % (decl, doc) - - def docdata(self, object, name=None, mod=None, cl=None, *ignored): - """Produce html documentation for a data descriptor.""" - results = [] - push = results.append - - if name: - push('
%s
\n' % name) - doc = self.markup(getdoc(object), self.preformat) - if doc: - push('
%s
\n' % doc) - push('
\n') - - return ''.join(results) - - docproperty = docdata - - def docother(self, object, name=None, mod=None, *ignored): - """Produce HTML documentation for a data object.""" - lhs = name and '%s = ' % name or '' - return lhs + self.repr(object) - - def index(self, dir, shadowed=None): - """Generate an HTML index for a directory of modules.""" - modpkgs = [] - if shadowed is None: shadowed = {} - for importer, name, ispkg in pkgutil.iter_modules([dir]): - if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name): - # ignore a module if its name contains a surrogate character - continue - modpkgs.append((name, '', ispkg, name in shadowed)) - shadowed[name] = 1 - - modpkgs.sort() - contents = self.multicolumn(modpkgs, self.modpkglink) - return self.bigsection(dir, 'index', contents) - -# -------------------------------------------- text documentation generator - -class TextRepr(Repr): - """Class for safely making a text representation of a Python object.""" - def __init__(self): - Repr.__init__(self) - self.maxlist = self.maxtuple = 20 - self.maxdict = 10 - self.maxstring = self.maxother = 100 - - def repr1(self, x, level): - if hasattr(type(x), '__name__'): - methodname = 'repr_' + '_'.join(type(x).__name__.split()) - if hasattr(self, methodname): - return getattr(self, methodname)(x, level) - return cram(stripid(repr(x)), self.maxother) - - def repr_string(self, x, level): - test = cram(x, self.maxstring) - testrepr = repr(test) - if '\\' in test and '\\' not in replace(testrepr, r'\\', ''): - # Backslashes are only literal in the string and are never - # needed to make any special characters, so show a raw string. - return 'r' + testrepr[0] + test + testrepr[0] - return testrepr - - repr_str = repr_string - - def repr_instance(self, x, level): - try: - return cram(stripid(repr(x)), self.maxstring) - except: - return '<%s instance>' % x.__class__.__name__ - -class TextDoc(Doc): - """Formatter class for text documentation.""" - - # ------------------------------------------- text formatting utilities - - _repr_instance = TextRepr() - repr = _repr_instance.repr - - def bold(self, text): - """Format a string in bold by overstriking.""" - return ''.join(ch + '\b' + ch for ch in text) - - def indent(self, text, prefix=' '): - """Indent text by prepending a given prefix to each line.""" - if not text: return '' - lines = [(prefix + line).rstrip() for line in text.split('\n')] - return '\n'.join(lines) - - def section(self, title, contents): - """Format a section with a given heading.""" - clean_contents = self.indent(contents).rstrip() - return self.bold(title) + '\n' + clean_contents + '\n\n' - - # ---------------------------------------------- type-specific routines - - def formattree(self, tree, modname, parent=None, prefix=''): - """Render in text a class tree as returned by inspect.getclasstree().""" - result = '' - for entry in tree: - if isinstance(entry, tuple): - c, bases = entry - result = result + prefix + classname(c, modname) - if bases and bases != (parent,): - parents = (classname(c, modname) for c in bases) - result = result + '(%s)' % ', '.join(parents) - result = result + '\n' - elif isinstance(entry, list): - result = result + self.formattree( - entry, modname, c, prefix + ' ') - return result - - def docmodule(self, object, name=None, mod=None, *ignored): - """Produce text documentation for a given module object.""" - name = object.__name__ # ignore the passed-in name - synop, desc = splitdoc(getdoc(object)) - result = self.section('NAME', name + (synop and ' - ' + synop)) - all = getattr(object, '__all__', None) - docloc = self.getdocloc(object) - if docloc is not None: - result = result + self.section('MODULE REFERENCE', docloc + """ - -The following documentation is automatically generated from the Python -source files. It may be incomplete, incorrect or include features that -are considered implementation detail and may vary between Python -implementations. When in doubt, consult the module reference at the -location listed above. -""") - - if desc: - result = result + self.section('DESCRIPTION', desc) - - classes = [] - for key, value in inspect.getmembers(object, inspect.isclass): - # if __all__ exists, believe it. Otherwise use old heuristic. - if (all is not None - or (inspect.getmodule(value) or object) is object): - if visiblename(key, all, object): - classes.append((key, value)) - funcs = [] - for key, value in inspect.getmembers(object, inspect.isroutine): - # if __all__ exists, believe it. Otherwise use a heuristic. - if (all is not None - or inspect.isbuiltin(value) - or (inspect.getmodule(value) or object) is object): - if visiblename(key, all, object): - funcs.append((key, value)) - data = [] - for key, value in inspect.getmembers(object, isdata): - if visiblename(key, all, object): - data.append((key, value)) - - modpkgs = [] - modpkgs_names = set() - if hasattr(object, '__path__'): - for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): - modpkgs_names.add(modname) - if ispkg: - modpkgs.append(modname + ' (package)') - else: - modpkgs.append(modname) - - modpkgs.sort() - result = result + self.section( - 'PACKAGE CONTENTS', '\n'.join(modpkgs)) - - # Detect submodules as sometimes created by C extensions - submodules = [] - for key, value in inspect.getmembers(object, inspect.ismodule): - if value.__name__.startswith(name + '.') and key not in modpkgs_names: - submodules.append(key) - if submodules: - submodules.sort() - result = result + self.section( - 'SUBMODULES', '\n'.join(submodules)) - - if classes: - classlist = [value for key, value in classes] - contents = [self.formattree( - inspect.getclasstree(classlist, 1), name)] - for key, value in classes: - contents.append(self.document(value, key, name)) - result = result + self.section('CLASSES', '\n'.join(contents)) - - if funcs: - contents = [] - for key, value in funcs: - contents.append(self.document(value, key, name)) - result = result + self.section('FUNCTIONS', '\n'.join(contents)) - - if data: - contents = [] - for key, value in data: - contents.append(self.docother(value, key, name, maxlen=70)) - result = result + self.section('DATA', '\n'.join(contents)) - - if hasattr(object, '__version__'): - version = str(object.__version__) - if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': - version = version[11:-1].strip() - result = result + self.section('VERSION', version) - if hasattr(object, '__date__'): - result = result + self.section('DATE', str(object.__date__)) - if hasattr(object, '__author__'): - result = result + self.section('AUTHOR', str(object.__author__)) - if hasattr(object, '__credits__'): - result = result + self.section('CREDITS', str(object.__credits__)) - try: - file = inspect.getabsfile(object) - except TypeError: - file = '(built-in)' - result = result + self.section('FILE', file) - return result - - def docclass(self, object, name=None, mod=None, *ignored): - """Produce text documentation for a given class object.""" - realname = object.__name__ - name = name or realname - bases = object.__bases__ - - def makename(c, m=object.__module__): - return classname(c, m) - - if name == realname: - title = 'class ' + self.bold(realname) - else: - title = self.bold(name) + ' = class ' + realname - if bases: - parents = map(makename, bases) - title = title + '(%s)' % ', '.join(parents) - - contents = [] - push = contents.append - - argspec = _getargspec(object) - if argspec and argspec != '()': - push(name + argspec + '\n') - - doc = getdoc(object) - if doc: - push(doc + '\n') - - # List the mro, if non-trivial. - mro = deque(inspect.getmro(object)) - if len(mro) > 2: - push("Method resolution order:") - for base in mro: - push(' ' + makename(base)) - push('') - - # List the built-in subclasses, if any: - subclasses = sorted( - (str(cls.__name__) for cls in type.__subclasses__(object) - if not cls.__name__.startswith("_") and cls.__module__ == "builtins"), - key=str.lower - ) - no_of_subclasses = len(subclasses) - MAX_SUBCLASSES_TO_DISPLAY = 4 - if subclasses: - push("Built-in subclasses:") - for subclassname in subclasses[:MAX_SUBCLASSES_TO_DISPLAY]: - push(' ' + subclassname) - if no_of_subclasses > MAX_SUBCLASSES_TO_DISPLAY: - push(' ... and ' + - str(no_of_subclasses - MAX_SUBCLASSES_TO_DISPLAY) + - ' other subclasses') - push('') - - # Cute little class to pump out a horizontal rule between sections. - class HorizontalRule: - def __init__(self): - self.needone = 0 - def maybe(self): - if self.needone: - push('-' * 70) - self.needone = 1 - hr = HorizontalRule() - - def spill(msg, attrs, predicate): - ok, attrs = _split_list(attrs, predicate) - if ok: - hr.maybe() - push(msg) - for name, kind, homecls, value in ok: - try: - value = getattr(object, name) - except Exception: - # Some descriptors may meet a failure in their __get__. - # (bug #1785) - push(self.docdata(value, name, mod)) - else: - push(self.document(value, - name, mod, object, homecls)) - return attrs - - def spilldescriptors(msg, attrs, predicate): - ok, attrs = _split_list(attrs, predicate) - if ok: - hr.maybe() - push(msg) - for name, kind, homecls, value in ok: - push(self.docdata(value, name, mod)) - return attrs - - def spilldata(msg, attrs, predicate): - ok, attrs = _split_list(attrs, predicate) - if ok: - hr.maybe() - push(msg) - for name, kind, homecls, value in ok: - doc = getdoc(value) - try: - obj = getattr(object, name) - except AttributeError: - obj = homecls.__dict__[name] - push(self.docother(obj, name, mod, maxlen=70, doc=doc) + - '\n') - return attrs - - attrs = [(name, kind, cls, value) - for name, kind, cls, value in classify_class_attrs(object) - if visiblename(name, obj=object)] - - while attrs: - if mro: - thisclass = mro.popleft() - else: - thisclass = attrs[0][2] - attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) - - if object is not builtins.object and thisclass is builtins.object: - attrs = inherited - continue - elif thisclass is object: - tag = "defined here" - else: - tag = "inherited from %s" % classname(thisclass, - object.__module__) - - sort_attributes(attrs, object) - - # Pump out the attrs, segregated by kind. - attrs = spill("Methods %s:\n" % tag, attrs, - lambda t: t[1] == 'method') - attrs = spill("Class methods %s:\n" % tag, attrs, - lambda t: t[1] == 'class method') - attrs = spill("Static methods %s:\n" % tag, attrs, - lambda t: t[1] == 'static method') - attrs = spilldescriptors("Readonly properties %s:\n" % tag, attrs, - lambda t: t[1] == 'readonly property') - attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs, - lambda t: t[1] == 'data descriptor') - attrs = spilldata("Data and other attributes %s:\n" % tag, attrs, - lambda t: t[1] == 'data') - - assert attrs == [] - attrs = inherited - - contents = '\n'.join(contents) - if not contents: - return title + '\n' - return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n' - - def formatvalue(self, object): - """Format an argument default value as text.""" - return '=' + self.repr(object) - - def docroutine(self, object, name=None, mod=None, cl=None, homecls=None): - """Produce text documentation for a function or method object.""" - realname = object.__name__ - name = name or realname - if homecls is None: - homecls = cl - note = '' - skipdocs = False - imfunc = None - if _is_bound_method(object): - imself = object.__self__ - if imself is cl: - imfunc = getattr(object, '__func__', None) - elif inspect.isclass(imself): - note = ' class method of %s' % classname(imself, mod) - else: - note = ' method of %s instance' % classname( - imself.__class__, mod) - elif (inspect.ismethoddescriptor(object) or - inspect.ismethodwrapper(object)): - try: - objclass = object.__objclass__ - except AttributeError: - pass - else: - if cl is None: - note = ' unbound %s method' % classname(objclass, mod) - elif objclass is not homecls: - note = ' from ' + classname(objclass, mod) - else: - imfunc = object - if inspect.isfunction(imfunc) and homecls is not None and ( - imfunc.__module__ != homecls.__module__ or - imfunc.__qualname__ != homecls.__qualname__ + '.' + realname): - pname = parentname(imfunc, mod) - if pname: - note = ' from %s' % pname - - if (inspect.iscoroutinefunction(object) or - inspect.isasyncgenfunction(object)): - asyncqualifier = 'async ' - else: - asyncqualifier = '' - - if name == realname: - title = self.bold(realname) - else: - if (cl is not None and - inspect.getattr_static(cl, realname, []) is object): - skipdocs = True - if note.startswith(' from '): - note = '' - title = self.bold(name) + ' = ' + realname - argspec = None - - if inspect.isroutine(object): - argspec = _getargspec(object) - if argspec and realname == '': - title = self.bold(name) + ' lambda ' - # XXX lambda's won't usually have func_annotations['return'] - # since the syntax doesn't support but it is possible. - # So removing parentheses isn't truly safe. - if not object.__annotations__: - argspec = argspec[1:-1] - if not argspec: - argspec = '(...)' - decl = asyncqualifier + title + argspec + note - - if skipdocs: - return decl + '\n' - else: - doc = getdoc(object) or '' - return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n') - - def docdata(self, object, name=None, mod=None, cl=None, *ignored): - """Produce text documentation for a data descriptor.""" - results = [] - push = results.append - - if name: - push(self.bold(name)) - push('\n') - doc = getdoc(object) or '' - if doc: - push(self.indent(doc)) - push('\n') - return ''.join(results) - - docproperty = docdata - - def docother(self, object, name=None, mod=None, parent=None, *ignored, - maxlen=None, doc=None): - """Produce text documentation for a data object.""" - repr = self.repr(object) - if maxlen: - line = (name and name + ' = ' or '') + repr - chop = maxlen - len(line) - if chop < 0: repr = repr[:chop] + '...' - line = (name and self.bold(name) + ' = ' or '') + repr - if not doc: - doc = getdoc(object) - if doc: - line += '\n' + self.indent(str(doc)) + '\n' - return line - -class _PlainTextDoc(TextDoc): - """Subclass of TextDoc which overrides string styling""" - def bold(self, text): - return text - -# --------------------------------------------------------- user interfaces - -def pager(text, title=''): - """The first time this is called, determine what kind of pager to use.""" - global pager - pager = get_pager() - pager(text, title) - -def describe(thing): - """Produce a short description of the given thing.""" - if inspect.ismodule(thing): - if thing.__name__ in sys.builtin_module_names: - return 'built-in module ' + thing.__name__ - if hasattr(thing, '__path__'): - return 'package ' + thing.__name__ - else: - return 'module ' + thing.__name__ - if inspect.isbuiltin(thing): - return 'built-in function ' + thing.__name__ - if inspect.isgetsetdescriptor(thing): - return 'getset descriptor %s.%s.%s' % ( - thing.__objclass__.__module__, thing.__objclass__.__name__, - thing.__name__) - if inspect.ismemberdescriptor(thing): - return 'member descriptor %s.%s.%s' % ( - thing.__objclass__.__module__, thing.__objclass__.__name__, - thing.__name__) - if inspect.isclass(thing): - return 'class ' + thing.__name__ - if inspect.isfunction(thing): - return 'function ' + thing.__name__ - if inspect.ismethod(thing): - return 'method ' + thing.__name__ - return type(thing).__name__ - -def locate(path, forceload=0): - """Locate an object by name or dotted path, importing as necessary.""" - parts = [part for part in path.split('.') if part] - module, n = None, 0 - while n < len(parts): - nextmodule = safeimport('.'.join(parts[:n+1]), forceload) - if nextmodule: module, n = nextmodule, n + 1 - else: break - if module: - object = module - else: - object = builtins - for part in parts[n:]: - try: - object = getattr(object, part) - except AttributeError: - return None - return object - -# --------------------------------------- interactive interpreter interface - -text = TextDoc() -plaintext = _PlainTextDoc() -html = HTMLDoc() - -def resolve(thing, forceload=0): - """Given an object or a path to an object, get the object and its name.""" - if isinstance(thing, str): - object = locate(thing, forceload) - if object is None: - raise ImportError('''\ -No Python documentation found for %r. -Use help() to get the interactive help utility. -Use help(str) for help on the str class.''' % thing) - return object, thing - else: - name = getattr(thing, '__name__', None) - return thing, name if isinstance(name, str) else None - -def render_doc(thing, title='Python Library Documentation: %s', forceload=0, - renderer=None): - """Render text documentation, given an object or a path to an object.""" - if renderer is None: - renderer = text - object, name = resolve(thing, forceload) - desc = describe(object) - module = inspect.getmodule(object) - if name and '.' in name: - desc += ' in ' + name[:name.rfind('.')] - elif module and module is not object: - desc += ' in module ' + module.__name__ - - if not (inspect.ismodule(object) or - inspect.isclass(object) or - inspect.isroutine(object) or - inspect.isdatadescriptor(object) or - _getdoc(object)): - # If the passed object is a piece of data or an instance, - # document its available methods instead of its value. - if hasattr(object, '__origin__'): - object = object.__origin__ - else: - object = type(object) - desc += ' object' - return title % desc + '\n\n' + renderer.document(object, name) - -def doc(thing, title='Python Library Documentation: %s', forceload=0, - output=None, is_cli=False): - """Display text documentation, given an object or a path to an object.""" - if output is None: - try: - if isinstance(thing, str): - what = thing - else: - what = getattr(thing, '__qualname__', None) - if not isinstance(what, str): - what = getattr(thing, '__name__', None) - if not isinstance(what, str): - what = type(thing).__name__ + ' object' - pager(render_doc(thing, title, forceload), f'Help on {what!s}') - except ImportError as exc: - if is_cli: - raise - print(exc) - else: - try: - s = render_doc(thing, title, forceload, plaintext) - except ImportError as exc: - s = str(exc) - output.write(s) - -def writedoc(thing, forceload=0): - """Write HTML documentation to a file in the current directory.""" - object, name = resolve(thing, forceload) - page = html.page(describe(object), html.document(object, name)) - with open(name + '.html', 'w', encoding='utf-8') as file: - file.write(page) - print('wrote', name + '.html') - -def writedocs(dir, pkgpath='', done=None): - """Write out HTML documentation for all modules in a directory tree.""" - if done is None: done = {} - for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath): - writedoc(modname) - return - -class Helper: - - # These dictionaries map a topic name to either an alias, or a tuple - # (label, seealso-items). The "label" is the label of the corresponding - # section in the .rst file under Doc/ and an index into the dictionary - # in pydoc_data/topics.py. - # - # CAUTION: if you change one of these dictionaries, be sure to adapt the - # list of needed labels in Doc/tools/extensions/pyspecific.py and - # regenerate the pydoc_data/topics.py file by running - # make pydoc-topics - # in Doc/ and copying the output file into the Lib/ directory. - - keywords = { - 'False': '', - 'None': '', - 'True': '', - 'and': 'BOOLEAN', - 'as': 'with', - 'assert': ('assert', ''), - 'async': ('async', ''), - 'await': ('await', ''), - 'break': ('break', 'while for'), - 'class': ('class', 'CLASSES SPECIALMETHODS'), - 'continue': ('continue', 'while for'), - 'def': ('function', ''), - 'del': ('del', 'BASICMETHODS'), - 'elif': 'if', - 'else': ('else', 'while for'), - 'except': 'try', - 'finally': 'try', - 'for': ('for', 'break continue while'), - 'from': 'import', - 'global': ('global', 'nonlocal NAMESPACES'), - 'if': ('if', 'TRUTHVALUE'), - 'import': ('import', 'MODULES'), - 'in': ('in', 'SEQUENCEMETHODS'), - 'is': 'COMPARISON', - 'lambda': ('lambda', 'FUNCTIONS'), - 'nonlocal': ('nonlocal', 'global NAMESPACES'), - 'not': 'BOOLEAN', - 'or': 'BOOLEAN', - 'pass': ('pass', ''), - 'raise': ('raise', 'EXCEPTIONS'), - 'return': ('return', 'FUNCTIONS'), - 'try': ('try', 'EXCEPTIONS'), - 'while': ('while', 'break continue if TRUTHVALUE'), - 'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'), - 'yield': ('yield', ''), - } - # Either add symbols to this dictionary or to the symbols dictionary - # directly: Whichever is easier. They are merged later. - _strprefixes = [p + q for p in ('b', 'f', 'r', 'u') for q in ("'", '"')] - _symbols_inverse = { - 'STRINGS' : ("'", "'''", '"', '"""', *_strprefixes), - 'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&', - '|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'), - 'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'), - 'UNARY' : ('-', '~'), - 'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=', - '^=', '<<=', '>>=', '**=', '//='), - 'BITWISE' : ('<<', '>>', '&', '|', '^', '~'), - 'COMPLEX' : ('j', 'J') - } - symbols = { - '%': 'OPERATORS FORMATTING', - '**': 'POWER', - ',': 'TUPLES LISTS FUNCTIONS', - '.': 'ATTRIBUTES FLOAT MODULES OBJECTS', - '...': 'ELLIPSIS', - ':': 'SLICINGS DICTIONARYLITERALS', - '@': 'def class', - '\\': 'STRINGS', - ':=': 'ASSIGNMENTEXPRESSIONS', - '_': 'PRIVATENAMES', - '__': 'PRIVATENAMES SPECIALMETHODS', - '`': 'BACKQUOTES', - '(': 'TUPLES FUNCTIONS CALLS', - ')': 'TUPLES FUNCTIONS CALLS', - '[': 'LISTS SUBSCRIPTS SLICINGS', - ']': 'LISTS SUBSCRIPTS SLICINGS' - } - for topic, symbols_ in _symbols_inverse.items(): - for symbol in symbols_: - topics = symbols.get(symbol, topic) - if topic not in topics: - topics = topics + ' ' + topic - symbols[symbol] = topics - del topic, symbols_, symbol, topics - - topics = { - 'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS ' - 'FUNCTIONS CLASSES MODULES FILES inspect'), - 'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS ' - 'FORMATTING TYPES'), - 'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'), - 'FORMATTING': ('formatstrings', 'OPERATORS'), - 'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS ' - 'FORMATTING TYPES'), - 'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'), - 'INTEGER': ('integers', 'int range'), - 'FLOAT': ('floating', 'float math'), - 'COMPLEX': ('imaginary', 'complex cmath'), - 'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'), - 'MAPPINGS': 'DICTIONARIES', - 'FUNCTIONS': ('typesfunctions', 'def TYPES'), - 'METHODS': ('typesmethods', 'class def CLASSES TYPES'), - 'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'), - 'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'), - 'FRAMEOBJECTS': 'TYPES', - 'TRACEBACKS': 'TYPES', - 'NONE': ('bltin-null-object', ''), - 'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'), - 'SPECIALATTRIBUTES': ('specialattrs', ''), - 'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'), - 'MODULES': ('typesmodules', 'import'), - 'PACKAGES': 'import', - 'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN ' - 'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER ' - 'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES ' - 'LISTS DICTIONARIES'), - 'OPERATORS': 'EXPRESSIONS', - 'PRECEDENCE': 'EXPRESSIONS', - 'OBJECTS': ('objects', 'TYPES'), - 'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS ' - 'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS ' - 'NUMBERMETHODS CLASSES'), - 'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'), - 'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'), - 'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'), - 'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS ' - 'SPECIALMETHODS'), - 'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'), - 'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT ' - 'SPECIALMETHODS'), - 'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'), - 'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'), - 'DYNAMICFEATURES': ('dynamic-features', ''), - 'SCOPING': 'NAMESPACES', - 'FRAMES': 'NAMESPACES', - 'EXCEPTIONS': ('exceptions', 'try except finally raise'), - 'CONVERSIONS': ('conversions', ''), - 'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'), - 'SPECIALIDENTIFIERS': ('id-classes', ''), - 'PRIVATENAMES': ('atom-identifiers', ''), - 'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS ' - 'LISTLITERALS DICTIONARYLITERALS'), - 'TUPLES': 'SEQUENCES', - 'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'), - 'LISTS': ('typesseq-mutable', 'LISTLITERALS'), - 'LISTLITERALS': ('lists', 'LISTS LITERALS'), - 'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'), - 'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'), - 'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'), - 'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'), - 'SLICINGS': ('slicings', 'SEQUENCEMETHODS'), - 'CALLS': ('calls', 'EXPRESSIONS'), - 'POWER': ('power', 'EXPRESSIONS'), - 'UNARY': ('unary', 'EXPRESSIONS'), - 'BINARY': ('binary', 'EXPRESSIONS'), - 'SHIFTING': ('shifting', 'EXPRESSIONS'), - 'BITWISE': ('bitwise', 'EXPRESSIONS'), - 'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'), - 'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'), - 'ASSERTION': 'assert', - 'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'), - 'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'), - 'ASSIGNMENTEXPRESSIONS': ('assignment-expressions', ''), - 'DELETION': 'del', - 'RETURNING': 'return', - 'IMPORTING': 'import', - 'CONDITIONAL': 'if', - 'LOOPING': ('compound', 'for while break continue'), - 'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'), - 'DEBUGGING': ('debugger', 'pdb'), - 'CONTEXTMANAGERS': ('context-managers', 'with'), - } - - def __init__(self, input=None, output=None): - self._input = input - self._output = output - - @property - def input(self): - return self._input or sys.stdin - - @property - def output(self): - return self._output or sys.stdout - - def __repr__(self): - if inspect.stack()[1][3] == '?': - self() - return '' - return '<%s.%s instance>' % (self.__class__.__module__, - self.__class__.__qualname__) - - _GoInteractive = object() - def __call__(self, request=_GoInteractive): - if request is not self._GoInteractive: - try: - self.help(request) - except ImportError as err: - self.output.write(f'{err}\n') - else: - self.intro() - self.interact() - self.output.write(''' -You are now leaving help and returning to the Python interpreter. -If you want to ask for help on a particular object directly from the -interpreter, you can type "help(object)". Executing "help('string')" -has the same effect as typing a particular string at the help> prompt. -''') - - def interact(self): - self.output.write('\n') - while True: - try: - request = self.getline('help> ') - except (KeyboardInterrupt, EOFError): - break - request = request.strip() - if not request: - continue # back to the prompt - - # Make sure significant trailing quoting marks of literals don't - # get deleted while cleaning input - if (len(request) > 2 and request[0] == request[-1] in ("'", '"') - and request[0] not in request[1:-1]): - request = request[1:-1] - if request.lower() in ('q', 'quit', 'exit'): break - if request == 'help': - self.intro() - else: - self.help(request) - - def getline(self, prompt): - """Read one line, using input() when appropriate.""" - if self.input is sys.stdin: - return input(prompt) - else: - self.output.write(prompt) - self.output.flush() - return self.input.readline() - - def help(self, request, is_cli=False): - if isinstance(request, str): - request = request.strip() - if request == 'keywords': self.listkeywords() - elif request == 'symbols': self.listsymbols() - elif request == 'topics': self.listtopics() - elif request == 'modules': self.listmodules() - elif request[:8] == 'modules ': - self.listmodules(request.split()[1]) - elif request in self.symbols: self.showsymbol(request) - elif request in ['True', 'False', 'None']: - # special case these keywords since they are objects too - doc(eval(request), 'Help on %s:', output=self._output, is_cli=is_cli) - elif request in self.keywords: self.showtopic(request) - elif request in self.topics: self.showtopic(request) - elif request: doc(request, 'Help on %s:', output=self._output, is_cli=is_cli) - else: doc(str, 'Help on %s:', output=self._output, is_cli=is_cli) - elif isinstance(request, Helper): self() - else: doc(request, 'Help on %s:', output=self._output, is_cli=is_cli) - self.output.write('\n') - - def intro(self): - self.output.write('''\ -Welcome to Python {0}'s help utility! If this is your first time using -Python, you should definitely check out the tutorial at -https://docs.python.org/{0}/tutorial/. - -Enter the name of any module, keyword, or topic to get help on writing -Python programs and using Python modules. To get a list of available -modules, keywords, symbols, or topics, enter "modules", "keywords", -"symbols", or "topics". - -Each module also comes with a one-line summary of what it does; to list -the modules whose name or summary contain a given string such as "spam", -enter "modules spam". - -To quit this help utility and return to the interpreter, -enter "q", "quit" or "exit". -'''.format('%d.%d' % sys.version_info[:2])) - - def list(self, items, columns=4, width=80): - items = sorted(items) - colw = width // columns - rows = (len(items) + columns - 1) // columns - for row in range(rows): - for col in range(columns): - i = col * rows + row - if i < len(items): - self.output.write(items[i]) - if col < columns - 1: - self.output.write(' ' + ' ' * (colw - 1 - len(items[i]))) - self.output.write('\n') - - def listkeywords(self): - self.output.write(''' -Here is a list of the Python keywords. Enter any keyword to get more help. - -''') - self.list(self.keywords.keys()) - - def listsymbols(self): - self.output.write(''' -Here is a list of the punctuation symbols which Python assigns special meaning -to. Enter any symbol to get more help. - -''') - self.list(self.symbols.keys()) - - def listtopics(self): - self.output.write(''' -Here is a list of available topics. Enter any topic name to get more help. - -''') - self.list(self.topics.keys(), columns=3) - - def showtopic(self, topic, more_xrefs=''): - try: - import pydoc_data.topics - except ImportError: - self.output.write(''' -Sorry, topic and keyword documentation is not available because the -module "pydoc_data.topics" could not be found. -''') - return - target = self.topics.get(topic, self.keywords.get(topic)) - if not target: - self.output.write('no documentation found for %s\n' % repr(topic)) - return - if isinstance(target, str): - return self.showtopic(target, more_xrefs) - - label, xrefs = target - try: - doc = pydoc_data.topics.topics[label] - except KeyError: - self.output.write('no documentation found for %s\n' % repr(topic)) - return - doc = doc.strip() + '\n' - if more_xrefs: - xrefs = (xrefs or '') + ' ' + more_xrefs - if xrefs: - import textwrap - text = 'Related help topics: ' + ', '.join(xrefs.split()) + '\n' - wrapped_text = textwrap.wrap(text, 72) - doc += '\n%s\n' % '\n'.join(wrapped_text) - - if self._output is None: - pager(doc, f'Help on {topic!s}') - else: - self.output.write(doc) - - def _gettopic(self, topic, more_xrefs=''): - """Return unbuffered tuple of (topic, xrefs). - - If an error occurs here, the exception is caught and displayed by - the url handler. - - This function duplicates the showtopic method but returns its - result directly so it can be formatted for display in an html page. - """ - try: - import pydoc_data.topics - except ImportError: - return(''' -Sorry, topic and keyword documentation is not available because the -module "pydoc_data.topics" could not be found. -''' , '') - target = self.topics.get(topic, self.keywords.get(topic)) - if not target: - raise ValueError('could not find topic') - if isinstance(target, str): - return self._gettopic(target, more_xrefs) - label, xrefs = target - doc = pydoc_data.topics.topics[label] - if more_xrefs: - xrefs = (xrefs or '') + ' ' + more_xrefs - return doc, xrefs - - def showsymbol(self, symbol): - target = self.symbols[symbol] - topic, _, xrefs = target.partition(' ') - self.showtopic(topic, xrefs) - - def listmodules(self, key=''): - if key: - self.output.write(''' -Here is a list of modules whose name or summary contains '{}'. -If there are any, enter a module name to get more help. - -'''.format(key)) - apropos(key) - else: - self.output.write(''' -Please wait a moment while I gather a list of all available modules... - -''') - modules = {} - def callback(path, modname, desc, modules=modules): - if modname and modname[-9:] == '.__init__': - modname = modname[:-9] + ' (package)' - if modname.find('.') < 0: - modules[modname] = 1 - def onerror(modname): - callback(None, modname, None) - ModuleScanner().run(callback, onerror=onerror) - self.list(modules.keys()) - self.output.write(''' -Enter any module name to get more help. Or, type "modules spam" to search -for modules whose name or summary contain the string "spam". -''') - -help = Helper() - -class ModuleScanner: - """An interruptible scanner that searches module synopses.""" - - def run(self, callback, key=None, completer=None, onerror=None): - if key: key = key.lower() - self.quit = False - seen = {} - - for modname in sys.builtin_module_names: - if modname != '__main__': - seen[modname] = 1 - if key is None: - callback(None, modname, '') - else: - name = __import__(modname).__doc__ or '' - desc = name.split('\n')[0] - name = modname + ' - ' + desc - if name.lower().find(key) >= 0: - callback(None, modname, desc) - - for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror): - if self.quit: - break - - if key is None: - callback(None, modname, '') - else: - try: - spec = importer.find_spec(modname) - except SyntaxError: - # raised by tests for bad coding cookies or BOM - continue - loader = spec.loader - if hasattr(loader, 'get_source'): - try: - source = loader.get_source(modname) - except Exception: - if onerror: - onerror(modname) - continue - desc = source_synopsis(io.StringIO(source)) or '' - if hasattr(loader, 'get_filename'): - path = loader.get_filename(modname) - else: - path = None - else: - try: - module = importlib._bootstrap._load(spec) - except ImportError: - if onerror: - onerror(modname) - continue - desc = module.__doc__.splitlines()[0] if module.__doc__ else '' - path = getattr(module,'__file__',None) - name = modname + ' - ' + desc - if name.lower().find(key) >= 0: - callback(path, modname, desc) - - if completer: - completer() - -def apropos(key): - """Print all the one-line module summaries that contain a substring.""" - def callback(path, modname, desc): - if modname[-9:] == '.__init__': - modname = modname[:-9] + ' (package)' - print(modname, desc and '- ' + desc) - def onerror(modname): - pass - with warnings.catch_warnings(): - warnings.filterwarnings('ignore') # ignore problems during import - ModuleScanner().run(callback, key, onerror=onerror) - -# --------------------------------------- enhanced web browser interface - -def _start_server(urlhandler, hostname, port): - """Start an HTTP server thread on a specific port. - - Start an HTML/text server thread, so HTML or text documents can be - browsed dynamically and interactively with a web browser. Example use: - - >>> import time - >>> import pydoc - - Define a URL handler. To determine what the client is asking - for, check the URL and content_type. - - Then get or generate some text or HTML code and return it. - - >>> def my_url_handler(url, content_type): - ... text = 'the URL sent was: (%s, %s)' % (url, content_type) - ... return text - - Start server thread on port 0. - If you use port 0, the server will pick a random port number. - You can then use serverthread.port to get the port number. - - >>> port = 0 - >>> serverthread = pydoc._start_server(my_url_handler, port) - - Check that the server is really started. If it is, open browser - and get first page. Use serverthread.url as the starting page. - - >>> if serverthread.serving: - ... import webbrowser - - The next two lines are commented out so a browser doesn't open if - doctest is run on this module. - - #... webbrowser.open(serverthread.url) - #True - - Let the server do its thing. We just need to monitor its status. - Use time.sleep so the loop doesn't hog the CPU. - - >>> starttime = time.monotonic() - >>> timeout = 1 #seconds - - This is a short timeout for testing purposes. - - >>> while serverthread.serving: - ... time.sleep(.01) - ... if serverthread.serving and time.monotonic() - starttime > timeout: - ... serverthread.stop() - ... break - - Print any errors that may have occurred. - - >>> print(serverthread.error) - None - """ - import http.server - import email.message - import select - import threading - - class DocHandler(http.server.BaseHTTPRequestHandler): - - def do_GET(self): - """Process a request from an HTML browser. - - The URL received is in self.path. - Get an HTML page from self.urlhandler and send it. - """ - if self.path.endswith('.css'): - content_type = 'text/css' - else: - content_type = 'text/html' - self.send_response(200) - self.send_header('Content-Type', '%s; charset=UTF-8' % content_type) - self.end_headers() - self.wfile.write(self.urlhandler( - self.path, content_type).encode('utf-8')) - - def log_message(self, *args): - # Don't log messages. - pass - - class DocServer(http.server.HTTPServer): - - def __init__(self, host, port, callback): - self.host = host - self.address = (self.host, port) - self.callback = callback - self.base.__init__(self, self.address, self.handler) - self.quit = False - - def serve_until_quit(self): - while not self.quit: - rd, wr, ex = select.select([self.socket.fileno()], [], [], 1) - if rd: - self.handle_request() - self.server_close() - - def server_activate(self): - self.base.server_activate(self) - if self.callback: - self.callback(self) - - class ServerThread(threading.Thread): - - def __init__(self, urlhandler, host, port): - self.urlhandler = urlhandler - self.host = host - self.port = int(port) - threading.Thread.__init__(self) - self.serving = False - self.error = None - self.docserver = None - - def run(self): - """Start the server.""" - try: - DocServer.base = http.server.HTTPServer - DocServer.handler = DocHandler - DocHandler.MessageClass = email.message.Message - DocHandler.urlhandler = staticmethod(self.urlhandler) - docsvr = DocServer(self.host, self.port, self.ready) - self.docserver = docsvr - docsvr.serve_until_quit() - except Exception as err: - self.error = err - - def ready(self, server): - self.serving = True - self.host = server.host - self.port = server.server_port - self.url = 'http://%s:%d/' % (self.host, self.port) - - def stop(self): - """Stop the server and this thread nicely""" - self.docserver.quit = True - self.join() - # explicitly break a reference cycle: DocServer.callback - # has indirectly a reference to ServerThread. - self.docserver = None - self.serving = False - self.url = None - - thread = ServerThread(urlhandler, hostname, port) - thread.start() - # Wait until thread.serving is True and thread.docserver is set - # to make sure we are really up before returning. - while not thread.error and not (thread.serving and thread.docserver): - time.sleep(.01) - return thread - - -def _url_handler(url, content_type="text/html"): - """The pydoc url handler for use with the pydoc server. - - If the content_type is 'text/css', the _pydoc.css style - sheet is read and returned if it exits. - - If the content_type is 'text/html', then the result of - get_html_page(url) is returned. - """ - class _HTMLDoc(HTMLDoc): - - def page(self, title, contents): - """Format an HTML page.""" - css_path = "pydoc_data/_pydoc.css" - css_link = ( - '' % - css_path) - return '''\ - - - - -Pydoc: %s -%s%s
%s
-''' % (title, css_link, html_navbar(), contents) - - - html = _HTMLDoc() - - def html_navbar(): - version = html.escape("%s [%s, %s]" % (platform.python_version(), - platform.python_build()[0], - platform.python_compiler())) - return """ -
- Python %s
%s -
-
- -
-
- - -
  -
- - -
-
-
- """ % (version, html.escape(platform.platform(terse=True))) - - def html_index(): - """Module Index page.""" - - def bltinlink(name): - return '%s' % (name, name) - - heading = html.heading( - 'Index of Modules' - ) - names = [name for name in sys.builtin_module_names - if name != '__main__'] - contents = html.multicolumn(names, bltinlink) - contents = [heading, '

' + html.bigsection( - 'Built-in Modules', 'index', contents)] - - seen = {} - for dir in sys.path: - contents.append(html.index(dir, seen)) - - contents.append( - '

pydoc by Ka-Ping Yee' - '<ping@lfw.org>

') - return 'Index of Modules', ''.join(contents) - - def html_search(key): - """Search results page.""" - # scan for modules - search_result = [] - - def callback(path, modname, desc): - if modname[-9:] == '.__init__': - modname = modname[:-9] + ' (package)' - search_result.append((modname, desc and '- ' + desc)) - - with warnings.catch_warnings(): - warnings.filterwarnings('ignore') # ignore problems during import - def onerror(modname): - pass - ModuleScanner().run(callback, key, onerror=onerror) - - # format page - def bltinlink(name): - return '%s' % (name, name) - - results = [] - heading = html.heading( - 'Search Results', - ) - for name, desc in search_result: - results.append(bltinlink(name) + desc) - contents = heading + html.bigsection( - 'key = %s' % key, 'index', '
'.join(results)) - return 'Search Results', contents - - def html_topics(): - """Index of topic texts available.""" - - def bltinlink(name): - return '%s' % (name, name) - - heading = html.heading( - 'INDEX', - ) - names = sorted(Helper.topics.keys()) - - contents = html.multicolumn(names, bltinlink) - contents = heading + html.bigsection( - 'Topics', 'index', contents) - return 'Topics', contents - - def html_keywords(): - """Index of keywords.""" - heading = html.heading( - 'INDEX', - ) - names = sorted(Helper.keywords.keys()) - - def bltinlink(name): - return '%s' % (name, name) - - contents = html.multicolumn(names, bltinlink) - contents = heading + html.bigsection( - 'Keywords', 'index', contents) - return 'Keywords', contents - - def html_topicpage(topic): - """Topic or keyword help page.""" - buf = io.StringIO() - htmlhelp = Helper(buf, buf) - contents, xrefs = htmlhelp._gettopic(topic) - if topic in htmlhelp.keywords: - title = 'KEYWORD' - else: - title = 'TOPIC' - heading = html.heading( - '%s' % title, - ) - contents = '
%s
' % html.markup(contents) - contents = html.bigsection(topic , 'index', contents) - if xrefs: - xrefs = sorted(xrefs.split()) - - def bltinlink(name): - return '%s' % (name, name) - - xrefs = html.multicolumn(xrefs, bltinlink) - xrefs = html.section('Related help topics: ', 'index', xrefs) - return ('%s %s' % (title, topic), - ''.join((heading, contents, xrefs))) - - def html_getobj(url): - obj = locate(url, forceload=1) - if obj is None and url != 'None': - raise ValueError('could not find object') - title = describe(obj) - content = html.document(obj, url) - return title, content - - def html_error(url, exc): - heading = html.heading( - 'Error', - ) - contents = '
'.join(html.escape(line) for line in - format_exception_only(type(exc), exc)) - contents = heading + html.bigsection(url, 'error', contents) - return "Error - %s" % url, contents - - def get_html_page(url): - """Generate an HTML page for url.""" - complete_url = url - if url.endswith('.html'): - url = url[:-5] - try: - if url in ("", "index"): - title, content = html_index() - elif url == "topics": - title, content = html_topics() - elif url == "keywords": - title, content = html_keywords() - elif '=' in url: - op, _, url = url.partition('=') - if op == "search?key": - title, content = html_search(url) - elif op == "topic?key": - # try topics first, then objects. - try: - title, content = html_topicpage(url) - except ValueError: - title, content = html_getobj(url) - elif op == "get?key": - # try objects first, then topics. - if url in ("", "index"): - title, content = html_index() - else: - try: - title, content = html_getobj(url) - except ValueError: - title, content = html_topicpage(url) - else: - raise ValueError('bad pydoc url') - else: - title, content = html_getobj(url) - except Exception as exc: - # Catch any errors and display them in an error page. - title, content = html_error(complete_url, exc) - return html.page(title, content) - - if url.startswith('/'): - url = url[1:] - if content_type == 'text/css': - path_here = os.path.dirname(os.path.realpath(__file__)) - css_path = os.path.join(path_here, url) - with open(css_path) as fp: - return ''.join(fp.readlines()) - elif content_type == 'text/html': - return get_html_page(url) - # Errors outside the url handler are caught by the server. - raise TypeError('unknown content type %r for url %s' % (content_type, url)) - - -def browse(port=0, *, open_browser=True, hostname='localhost'): - """Start the enhanced pydoc web server and open a web browser. - - Use port '0' to start the server on an arbitrary port. - Set open_browser to False to suppress opening a browser. - """ - import webbrowser - serverthread = _start_server(_url_handler, hostname, port) - if serverthread.error: - print(serverthread.error) - return - if serverthread.serving: - server_help_msg = 'Server commands: [b]rowser, [q]uit' - if open_browser: - webbrowser.open(serverthread.url) - try: - print('Server ready at', serverthread.url) - print(server_help_msg) - while serverthread.serving: - cmd = input('server> ') - cmd = cmd.lower() - if cmd == 'q': - break - elif cmd == 'b': - webbrowser.open(serverthread.url) - else: - print(server_help_msg) - except (KeyboardInterrupt, EOFError): - print() - finally: - if serverthread.serving: - serverthread.stop() - print('Server stopped') - - -# -------------------------------------------------- command-line interface - -def ispath(x): - return isinstance(x, str) and x.find(os.sep) >= 0 - -def _get_revised_path(given_path, argv0): - """Ensures current directory is on returned path, and argv0 directory is not - - Exception: argv0 dir is left alone if it's also pydoc's directory. - - Returns a new path entry list, or None if no adjustment is needed. - """ - # Scripts may get the current directory in their path by default if they're - # run with the -m switch, or directly from the current directory. - # The interactive prompt also allows imports from the current directory. - - # Accordingly, if the current directory is already present, don't make - # any changes to the given_path - if '' in given_path or os.curdir in given_path or os.getcwd() in given_path: - return None - - # Otherwise, add the current directory to the given path, and remove the - # script directory (as long as the latter isn't also pydoc's directory. - stdlib_dir = os.path.dirname(__file__) - script_dir = os.path.dirname(argv0) - revised_path = given_path.copy() - if script_dir in given_path and not os.path.samefile(script_dir, stdlib_dir): - revised_path.remove(script_dir) - revised_path.insert(0, os.getcwd()) - return revised_path - - -# Note: the tests only cover _get_revised_path, not _adjust_cli_path itself -def _adjust_cli_sys_path(): - """Ensures current directory is on sys.path, and __main__ directory is not. - - Exception: __main__ dir is left alone if it's also pydoc's directory. - """ - revised_path = _get_revised_path(sys.path, sys.argv[0]) - if revised_path is not None: - sys.path[:] = revised_path - - -def cli(): - """Command-line interface (looks at sys.argv to decide what to do).""" - import getopt - class BadUsage(Exception): pass - - _adjust_cli_sys_path() - - try: - opts, args = getopt.getopt(sys.argv[1:], 'bk:n:p:w') - writing = False - start_server = False - open_browser = False - port = 0 - hostname = 'localhost' - for opt, val in opts: - if opt == '-b': - start_server = True - open_browser = True - if opt == '-k': - apropos(val) - return - if opt == '-p': - start_server = True - port = val - if opt == '-w': - writing = True - if opt == '-n': - start_server = True - hostname = val - - if start_server: - browse(port, hostname=hostname, open_browser=open_browser) - return - - if not args: raise BadUsage - for arg in args: - if ispath(arg) and not os.path.exists(arg): - print('file %r does not exist' % arg) - sys.exit(1) - try: - if ispath(arg) and os.path.isfile(arg): - arg = importfile(arg) - if writing: - if ispath(arg) and os.path.isdir(arg): - writedocs(arg) - else: - writedoc(arg) - else: - help.help(arg, is_cli=True) - except (ImportError, ErrorDuringImport) as value: - print(value) - sys.exit(1) - - except (getopt.error, BadUsage): - cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0] - print("""pydoc - the Python documentation tool - -{cmd} ... - Show text documentation on something. may be the name of a - Python keyword, topic, function, module, or package, or a dotted - reference to a class or function within a module or module in a - package. If contains a '{sep}', it is used as the path to a - Python source file to document. If name is 'keywords', 'topics', - or 'modules', a listing of these things is displayed. - -{cmd} -k - Search for a keyword in the synopsis lines of all available modules. - -{cmd} -n - Start an HTTP server with the given hostname (default: localhost). - -{cmd} -p - Start an HTTP server on the given port on the local machine. Port - number 0 can be used to get an arbitrary unused port. - -{cmd} -b - Start an HTTP server on an arbitrary unused port and open a web browser - to interactively browse documentation. This option can be used in - combination with -n and/or -p. - -{cmd} -w ... - Write out the HTML documentation for a module to a file in the current - directory. If contains a '{sep}', it is treated as a filename; if - it names a directory, documentation is written for all the contents. -""".format(cmd=cmd, sep=os.sep)) - -if __name__ == '__main__': - cli() diff --git a/Python313_13_x86_Template/Lib/pydoc_data/module_docs.py b/Python313_13_x86_Template/Lib/pydoc_data/module_docs.py deleted file mode 100644 index 8c401360..00000000 --- a/Python313_13_x86_Template/Lib/pydoc_data/module_docs.py +++ /dev/null @@ -1,314 +0,0 @@ -# Autogenerated by Sphinx on Tue Apr 7 20:18:56 2026 -# as part of the release process. - -module_docs = { - '__future__': '__future__#module-__future__', - '__main__': '__main__#module-__main__', - '_thread': '_thread#module-_thread', - '_tkinter': 'tkinter#module-_tkinter', - 'abc': 'abc#module-abc', - 'aifc': 'aifc#module-aifc', - 'argparse': 'argparse#module-argparse', - 'array': 'array#module-array', - 'ast': 'ast#module-ast', - 'asynchat': 'asynchat#module-asynchat', - 'asyncio': 'asyncio#module-asyncio', - 'asyncore': 'asyncore#module-asyncore', - 'atexit': 'atexit#module-atexit', - 'audioop': 'audioop#module-audioop', - 'base64': 'base64#module-base64', - 'bdb': 'bdb#module-bdb', - 'binascii': 'binascii#module-binascii', - 'bisect': 'bisect#module-bisect', - 'builtins': 'builtins#module-builtins', - 'bz2': 'bz2#module-bz2', - 'cProfile': 'profile#module-cProfile', - 'calendar': 'calendar#module-calendar', - 'cgi': 'cgi#module-cgi', - 'cgitb': 'cgitb#module-cgitb', - 'chunk': 'chunk#module-chunk', - 'cmath': 'cmath#module-cmath', - 'cmd': 'cmd#module-cmd', - 'code': 'code#module-code', - 'codecs': 'codecs#module-codecs', - 'codeop': 'codeop#module-codeop', - 'collections': 'collections#module-collections', - 'collections.abc': 'collections.abc#module-collections.abc', - 'colorsys': 'colorsys#module-colorsys', - 'compileall': 'compileall#module-compileall', - 'concurrent.futures': 'concurrent.futures#module-concurrent.futures', - 'configparser': 'configparser#module-configparser', - 'contextlib': 'contextlib#module-contextlib', - 'contextvars': 'contextvars#module-contextvars', - 'copy': 'copy#module-copy', - 'copyreg': 'copyreg#module-copyreg', - 'crypt': 'crypt#module-crypt', - 'csv': 'csv#module-csv', - 'ctypes': 'ctypes#module-ctypes', - 'curses': 'curses#module-curses', - 'curses.ascii': 'curses.ascii#module-curses.ascii', - 'curses.panel': 'curses.panel#module-curses.panel', - 'curses.textpad': 'curses#module-curses.textpad', - 'dataclasses': 'dataclasses#module-dataclasses', - 'datetime': 'datetime#module-datetime', - 'dbm': 'dbm#module-dbm', - 'dbm.dumb': 'dbm#module-dbm.dumb', - 'dbm.gnu': 'dbm#module-dbm.gnu', - 'dbm.ndbm': 'dbm#module-dbm.ndbm', - 'dbm.sqlite3': 'dbm#module-dbm.sqlite3', - 'decimal': 'decimal#module-decimal', - 'difflib': 'difflib#module-difflib', - 'dis': 'dis#module-dis', - 'distutils': 'distutils#module-distutils', - 'doctest': 'doctest#module-doctest', - 'email': 'email#module-email', - 'email.charset': 'email.charset#module-email.charset', - 'email.contentmanager': 'email.contentmanager#module-email.contentmanager', - 'email.encoders': 'email.encoders#module-email.encoders', - 'email.errors': 'email.errors#module-email.errors', - 'email.generator': 'email.generator#module-email.generator', - 'email.header': 'email.header#module-email.header', - 'email.headerregistry': 'email.headerregistry#module-email.headerregistry', - 'email.iterators': 'email.iterators#module-email.iterators', - 'email.message': 'email.message#module-email.message', - 'email.mime': 'email.mime#module-email.mime', - 'email.mime.application': 'email.mime#module-email.mime.application', - 'email.mime.audio': 'email.mime#module-email.mime.audio', - 'email.mime.base': 'email.mime#module-email.mime.base', - 'email.mime.image': 'email.mime#module-email.mime.image', - 'email.mime.message': 'email.mime#module-email.mime.message', - 'email.mime.multipart': 'email.mime#module-email.mime.multipart', - 'email.mime.nonmultipart': 'email.mime#module-email.mime.nonmultipart', - 'email.mime.text': 'email.mime#module-email.mime.text', - 'email.parser': 'email.parser#module-email.parser', - 'email.policy': 'email.policy#module-email.policy', - 'email.utils': 'email.utils#module-email.utils', - 'encodings': 'codecs#module-encodings', - 'encodings.idna': 'codecs#module-encodings.idna', - 'encodings.mbcs': 'codecs#module-encodings.mbcs', - 'encodings.utf_8_sig': 'codecs#module-encodings.utf_8_sig', - 'ensurepip': 'ensurepip#module-ensurepip', - 'enum': 'enum#module-enum', - 'errno': 'errno#module-errno', - 'faulthandler': 'faulthandler#module-faulthandler', - 'fcntl': 'fcntl#module-fcntl', - 'filecmp': 'filecmp#module-filecmp', - 'fileinput': 'fileinput#module-fileinput', - 'fnmatch': 'fnmatch#module-fnmatch', - 'fractions': 'fractions#module-fractions', - 'ftplib': 'ftplib#module-ftplib', - 'functools': 'functools#module-functools', - 'gc': 'gc#module-gc', - 'getopt': 'getopt#module-getopt', - 'getpass': 'getpass#module-getpass', - 'gettext': 'gettext#module-gettext', - 'glob': 'glob#module-glob', - 'graphlib': 'graphlib#module-graphlib', - 'grp': 'grp#module-grp', - 'gzip': 'gzip#module-gzip', - 'hashlib': 'hashlib#module-hashlib', - 'heapq': 'heapq#module-heapq', - 'hmac': 'hmac#module-hmac', - 'html': 'html#module-html', - 'html.entities': 'html.entities#module-html.entities', - 'html.parser': 'html.parser#module-html.parser', - 'http': 'http#module-http', - 'http.client': 'http.client#module-http.client', - 'http.cookiejar': 'http.cookiejar#module-http.cookiejar', - 'http.cookies': 'http.cookies#module-http.cookies', - 'http.server': 'http.server#module-http.server', - 'idlelib': 'idle#module-idlelib', - 'imaplib': 'imaplib#module-imaplib', - 'imghdr': 'imghdr#module-imghdr', - 'imp': 'imp#module-imp', - 'importlib': 'importlib#module-importlib', - 'importlib.abc': 'importlib#module-importlib.abc', - 'importlib.machinery': 'importlib#module-importlib.machinery', - 'importlib.metadata': 'importlib.metadata#module-importlib.metadata', - 'importlib.resources': 'importlib.resources#module-importlib.resources', - 'importlib.resources.abc': 'importlib.resources.abc#module-importlib.resources.abc', - 'importlib.util': 'importlib#module-importlib.util', - 'inspect': 'inspect#module-inspect', - 'io': 'io#module-io', - 'ipaddress': 'ipaddress#module-ipaddress', - 'itertools': 'itertools#module-itertools', - 'json': 'json#module-json', - 'json.tool': 'json#module-json.tool', - 'keyword': 'keyword#module-keyword', - 'linecache': 'linecache#module-linecache', - 'locale': 'locale#module-locale', - 'logging': 'logging#module-logging', - 'logging.config': 'logging.config#module-logging.config', - 'logging.handlers': 'logging.handlers#module-logging.handlers', - 'lzma': 'lzma#module-lzma', - 'mailbox': 'mailbox#module-mailbox', - 'mailcap': 'mailcap#module-mailcap', - 'marshal': 'marshal#module-marshal', - 'math': 'math#module-math', - 'mimetypes': 'mimetypes#module-mimetypes', - 'mmap': 'mmap#module-mmap', - 'modulefinder': 'modulefinder#module-modulefinder', - 'msilib': 'msilib#module-msilib', - 'msvcrt': 'msvcrt#module-msvcrt', - 'multiprocessing': 'multiprocessing#module-multiprocessing', - 'multiprocessing.connection': 'multiprocessing#module-multiprocessing.connection', - 'multiprocessing.dummy': 'multiprocessing#module-multiprocessing.dummy', - 'multiprocessing.managers': 'multiprocessing#module-multiprocessing.managers', - 'multiprocessing.pool': 'multiprocessing#module-multiprocessing.pool', - 'multiprocessing.shared_memory': 'multiprocessing.shared_memory#module-multiprocessing.shared_memory', - 'multiprocessing.sharedctypes': 'multiprocessing#module-multiprocessing.sharedctypes', - 'netrc': 'netrc#module-netrc', - 'nis': 'nis#module-nis', - 'nntplib': 'nntplib#module-nntplib', - 'numbers': 'numbers#module-numbers', - 'operator': 'operator#module-operator', - 'optparse': 'optparse#module-optparse', - 'os': 'os#module-os', - 'os.path': 'os.path#module-os.path', - 'ossaudiodev': 'ossaudiodev#module-ossaudiodev', - 'pathlib': 'pathlib#module-pathlib', - 'pdb': 'pdb#module-pdb', - 'pickle': 'pickle#module-pickle', - 'pickletools': 'pickletools#module-pickletools', - 'pipes': 'pipes#module-pipes', - 'pkgutil': 'pkgutil#module-pkgutil', - 'platform': 'platform#module-platform', - 'plistlib': 'plistlib#module-plistlib', - 'poplib': 'poplib#module-poplib', - 'posix': 'posix#module-posix', - 'pprint': 'pprint#module-pprint', - 'profile': 'profile#module-profile', - 'pstats': 'profile#module-pstats', - 'pty': 'pty#module-pty', - 'pwd': 'pwd#module-pwd', - 'py_compile': 'py_compile#module-py_compile', - 'pyclbr': 'pyclbr#module-pyclbr', - 'pydoc': 'pydoc#module-pydoc', - 'queue': 'queue#module-queue', - 'quopri': 'quopri#module-quopri', - 'random': 'random#module-random', - 're': 're#module-re', - 'readline': 'readline#module-readline', - 'reprlib': 'reprlib#module-reprlib', - 'resource': 'resource#module-resource', - 'rlcompleter': 'rlcompleter#module-rlcompleter', - 'runpy': 'runpy#module-runpy', - 'sched': 'sched#module-sched', - 'secrets': 'secrets#module-secrets', - 'select': 'select#module-select', - 'selectors': 'selectors#module-selectors', - 'shelve': 'shelve#module-shelve', - 'shlex': 'shlex#module-shlex', - 'shutil': 'shutil#module-shutil', - 'signal': 'signal#module-signal', - 'site': 'site#module-site', - 'sitecustomize': 'site#module-sitecustomize', - 'smtpd': 'smtpd#module-smtpd', - 'smtplib': 'smtplib#module-smtplib', - 'sndhdr': 'sndhdr#module-sndhdr', - 'socket': 'socket#module-socket', - 'socketserver': 'socketserver#module-socketserver', - 'spwd': 'spwd#module-spwd', - 'sqlite3': 'sqlite3#module-sqlite3', - 'ssl': 'ssl#module-ssl', - 'stat': 'stat#module-stat', - 'statistics': 'statistics#module-statistics', - 'string': 'string#module-string', - 'stringprep': 'stringprep#module-stringprep', - 'struct': 'struct#module-struct', - 'subprocess': 'subprocess#module-subprocess', - 'sunau': 'sunau#module-sunau', - 'symtable': 'symtable#module-symtable', - 'sys': 'sys#module-sys', - 'sys.monitoring': 'sys.monitoring#module-sys.monitoring', - 'sysconfig': 'sysconfig#module-sysconfig', - 'syslog': 'syslog#module-syslog', - 'tabnanny': 'tabnanny#module-tabnanny', - 'tarfile': 'tarfile#module-tarfile', - 'telnetlib': 'telnetlib#module-telnetlib', - 'tempfile': 'tempfile#module-tempfile', - 'termios': 'termios#module-termios', - 'test': 'test#module-test', - 'test.regrtest': 'test#module-test.regrtest', - 'test.support': 'test#module-test.support', - 'test.support.bytecode_helper': 'test#module-test.support.bytecode_helper', - 'test.support.import_helper': 'test#module-test.support.import_helper', - 'test.support.os_helper': 'test#module-test.support.os_helper', - 'test.support.script_helper': 'test#module-test.support.script_helper', - 'test.support.socket_helper': 'test#module-test.support.socket_helper', - 'test.support.threading_helper': 'test#module-test.support.threading_helper', - 'test.support.warnings_helper': 'test#module-test.support.warnings_helper', - 'textwrap': 'textwrap#module-textwrap', - 'threading': 'threading#module-threading', - 'time': 'time#module-time', - 'timeit': 'timeit#module-timeit', - 'tkinter': 'tkinter#module-tkinter', - 'tkinter.colorchooser': 'tkinter.colorchooser#module-tkinter.colorchooser', - 'tkinter.commondialog': 'dialog#module-tkinter.commondialog', - 'tkinter.dnd': 'tkinter.dnd#module-tkinter.dnd', - 'tkinter.filedialog': 'dialog#module-tkinter.filedialog', - 'tkinter.font': 'tkinter.font#module-tkinter.font', - 'tkinter.messagebox': 'tkinter.messagebox#module-tkinter.messagebox', - 'tkinter.scrolledtext': 'tkinter.scrolledtext#module-tkinter.scrolledtext', - 'tkinter.simpledialog': 'dialog#module-tkinter.simpledialog', - 'tkinter.ttk': 'tkinter.ttk#module-tkinter.ttk', - 'token': 'token#module-token', - 'tokenize': 'tokenize#module-tokenize', - 'tomllib': 'tomllib#module-tomllib', - 'trace': 'trace#module-trace', - 'traceback': 'traceback#module-traceback', - 'tracemalloc': 'tracemalloc#module-tracemalloc', - 'tty': 'tty#module-tty', - 'turtle': 'turtle#module-turtle', - 'turtledemo': 'turtle#module-turtledemo', - 'types': 'types#module-types', - 'typing': 'typing#module-typing', - 'unicodedata': 'unicodedata#module-unicodedata', - 'unittest': 'unittest#module-unittest', - 'unittest.mock': 'unittest.mock#module-unittest.mock', - 'urllib': 'urllib#module-urllib', - 'urllib.error': 'urllib.error#module-urllib.error', - 'urllib.parse': 'urllib.parse#module-urllib.parse', - 'urllib.request': 'urllib.request#module-urllib.request', - 'urllib.response': 'urllib.request#module-urllib.response', - 'urllib.robotparser': 'urllib.robotparser#module-urllib.robotparser', - 'usercustomize': 'site#module-usercustomize', - 'uu': 'uu#module-uu', - 'uuid': 'uuid#module-uuid', - 'venv': 'venv#module-venv', - 'warnings': 'warnings#module-warnings', - 'wave': 'wave#module-wave', - 'weakref': 'weakref#module-weakref', - 'webbrowser': 'webbrowser#module-webbrowser', - 'winreg': 'winreg#module-winreg', - 'winsound': 'winsound#module-winsound', - 'wsgiref': 'wsgiref#module-wsgiref', - 'wsgiref.handlers': 'wsgiref#module-wsgiref.handlers', - 'wsgiref.headers': 'wsgiref#module-wsgiref.headers', - 'wsgiref.simple_server': 'wsgiref#module-wsgiref.simple_server', - 'wsgiref.types': 'wsgiref#module-wsgiref.types', - 'wsgiref.util': 'wsgiref#module-wsgiref.util', - 'wsgiref.validate': 'wsgiref#module-wsgiref.validate', - 'xdrlib': 'xdrlib#module-xdrlib', - 'xml': 'xml#module-xml', - 'xml.dom': 'xml.dom#module-xml.dom', - 'xml.dom.minidom': 'xml.dom.minidom#module-xml.dom.minidom', - 'xml.dom.pulldom': 'xml.dom.pulldom#module-xml.dom.pulldom', - 'xml.etree.ElementInclude': 'xml.etree.elementtree#module-xml.etree.ElementInclude', - 'xml.etree.ElementTree': 'xml.etree.elementtree#module-xml.etree.ElementTree', - 'xml.parsers.expat': 'pyexpat#module-xml.parsers.expat', - 'xml.parsers.expat.errors': 'pyexpat#module-xml.parsers.expat.errors', - 'xml.parsers.expat.model': 'pyexpat#module-xml.parsers.expat.model', - 'xml.sax': 'xml.sax#module-xml.sax', - 'xml.sax.handler': 'xml.sax.handler#module-xml.sax.handler', - 'xml.sax.saxutils': 'xml.sax.utils#module-xml.sax.saxutils', - 'xml.sax.xmlreader': 'xml.sax.reader#module-xml.sax.xmlreader', - 'xmlrpc': 'xmlrpc#module-xmlrpc', - 'xmlrpc.client': 'xmlrpc.client#module-xmlrpc.client', - 'xmlrpc.server': 'xmlrpc.server#module-xmlrpc.server', - 'zipapp': 'zipapp#module-zipapp', - 'zipfile': 'zipfile#module-zipfile', - 'zipimport': 'zipimport#module-zipimport', - 'zlib': 'zlib#module-zlib', - 'zoneinfo': 'zoneinfo#module-zoneinfo', -} diff --git a/Python313_13_x86_Template/Lib/pydoc_data/topics.py b/Python313_13_x86_Template/Lib/pydoc_data/topics.py deleted file mode 100644 index bbbd6a3e..00000000 --- a/Python313_13_x86_Template/Lib/pydoc_data/topics.py +++ /dev/null @@ -1,13095 +0,0 @@ -# Autogenerated by Sphinx on Tue Apr 7 20:18:56 2026 -# as part of the release process. - -topics = { - 'assert': r'''The "assert" statement -********************** - -Assert statements are a convenient way to insert debugging assertions -into a program: - - assert_stmt ::= "assert" expression ["," expression] - -The simple form, "assert expression", is equivalent to - - if __debug__: - if not expression: raise AssertionError - -The extended form, "assert expression1, expression2", is equivalent to - - if __debug__: - if not expression1: raise AssertionError(expression2) - -These equivalences assume that "__debug__" and "AssertionError" refer -to the built-in variables with those names. In the current -implementation, the built-in variable "__debug__" is "True" under -normal circumstances, "False" when optimization is requested (command -line option "-O"). The current code generator emits no code for an -"assert" statement when optimization is requested at compile time. -Note that it is unnecessary to include the source code for the -expression that failed in the error message; it will be displayed as -part of the stack trace. - -Assignments to "__debug__" are illegal. The value for the built-in -variable is determined when the interpreter starts. -''', - 'assignment': r'''Assignment statements -********************* - -Assignment statements are used to (re)bind names to values and to -modify attributes or items of mutable objects: - - assignment_stmt ::= (target_list "=")+ (starred_expression | yield_expression) - target_list ::= target ("," target)* [","] - target ::= identifier - | "(" [target_list] ")" - | "[" [target_list] "]" - | attributeref - | subscription - | slicing - | "*" target - -(See section Primaries for the syntax definitions for *attributeref*, -*subscription*, and *slicing*.) - -An assignment statement evaluates the expression list (remember that -this can be a single expression or a comma-separated list, the latter -yielding a tuple) and assigns the single resulting object to each of -the target lists, from left to right. - -Assignment is defined recursively depending on the form of the target -(list). When a target is part of a mutable object (an attribute -reference, subscription or slicing), the mutable object must -ultimately perform the assignment and decide about its validity, and -may raise an exception if the assignment is unacceptable. The rules -observed by various types and the exceptions raised are given with the -definition of the object types (see section The standard type -hierarchy). - -Assignment of an object to a target list, optionally enclosed in -parentheses or square brackets, is recursively defined as follows. - -* If the target list is a single target with no trailing comma, - optionally in parentheses, the object is assigned to that target. - -* Else: - - * If the target list contains one target prefixed with an asterisk, - called a “starred” target: The object must be an iterable with at - least as many items as there are targets in the target list, minus - one. The first items of the iterable are assigned, from left to - right, to the targets before the starred target. The final items - of the iterable are assigned to the targets after the starred - target. A list of the remaining items in the iterable is then - assigned to the starred target (the list can be empty). - - * Else: The object must be an iterable with the same number of items - as there are targets in the target list, and the items are - assigned, from left to right, to the corresponding targets. - -Assignment of an object to a single target is recursively defined as -follows. - -* If the target is an identifier (name): - - * If the name does not occur in a "global" or "nonlocal" statement - in the current code block: the name is bound to the object in the - current local namespace. - - * Otherwise: the name is bound to the object in the global namespace - or the outer namespace determined by "nonlocal", respectively. - - The name is rebound if it was already bound. This may cause the - reference count for the object previously bound to the name to reach - zero, causing the object to be deallocated and its destructor (if it - has one) to be called. - -* If the target is an attribute reference: The primary expression in - the reference is evaluated. It should yield an object with - assignable attributes; if this is not the case, "TypeError" is - raised. That object is then asked to assign the assigned object to - the given attribute; if it cannot perform the assignment, it raises - an exception (usually but not necessarily "AttributeError"). - - Note: If the object is a class instance and the attribute reference - occurs on both sides of the assignment operator, the right-hand side - expression, "a.x" can access either an instance attribute or (if no - instance attribute exists) a class attribute. The left-hand side - target "a.x" is always set as an instance attribute, creating it if - necessary. Thus, the two occurrences of "a.x" do not necessarily - refer to the same attribute: if the right-hand side expression - refers to a class attribute, the left-hand side creates a new - instance attribute as the target of the assignment: - - class Cls: - x = 3 # class variable - inst = Cls() - inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3 - - This description does not necessarily apply to descriptor - attributes, such as properties created with "property()". - -* If the target is a subscription: The primary expression in the - reference is evaluated. It should yield either a mutable sequence - object (such as a list) or a mapping object (such as a dictionary). - Next, the subscript expression is evaluated. - - If the primary is a mutable sequence object (such as a list), the - subscript must yield an integer. If it is negative, the sequence’s - length is added to it. The resulting value must be a nonnegative - integer less than the sequence’s length, and the sequence is asked - to assign the assigned object to its item with that index. If the - index is out of range, "IndexError" is raised (assignment to a - subscripted sequence cannot add new items to a list). - - If the primary is a mapping object (such as a dictionary), the - subscript must have a type compatible with the mapping’s key type, - and the mapping is then asked to create a key/value pair which maps - the subscript to the assigned object. This can either replace an - existing key/value pair with the same key value, or insert a new - key/value pair (if no key with the same value existed). - - For user-defined objects, the "__setitem__()" method is called with - appropriate arguments. - -* If the target is a slicing: The primary expression in the reference - is evaluated. It should yield a mutable sequence object (such as a - list). The assigned object should be a sequence object of the same - type. Next, the lower and upper bound expressions are evaluated, - insofar they are present; defaults are zero and the sequence’s - length. The bounds should evaluate to integers. If either bound is - negative, the sequence’s length is added to it. The resulting - bounds are clipped to lie between zero and the sequence’s length, - inclusive. Finally, the sequence object is asked to replace the - slice with the items of the assigned sequence. The length of the - slice may be different from the length of the assigned sequence, - thus changing the length of the target sequence, if the target - sequence allows it. - -**CPython implementation detail:** In the current implementation, the -syntax for targets is taken to be the same as for expressions, and -invalid syntax is rejected during the code generation phase, causing -less detailed error messages. - -Although the definition of assignment implies that overlaps between -the left-hand side and the right-hand side are ‘simultaneous’ (for -example "a, b = b, a" swaps two variables), overlaps *within* the -collection of assigned-to variables occur left-to-right, sometimes -resulting in confusion. For instance, the following program prints -"[0, 2]": - - x = [0, 1] - i = 0 - i, x[i] = 1, 2 # i is updated, then x[i] is updated - print(x) - -See also: - - **PEP 3132** - Extended Iterable Unpacking - The specification for the "*target" feature. - - -Augmented assignment statements -=============================== - -Augmented assignment is the combination, in a single statement, of a -binary operation and an assignment statement: - - augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression) - augtarget ::= identifier | attributeref | subscription | slicing - augop ::= "+=" | "-=" | "*=" | "@=" | "/=" | "//=" | "%=" | "**=" - | ">>=" | "<<=" | "&=" | "^=" | "|=" - -(See section Primaries for the syntax definitions of the last three -symbols.) - -An augmented assignment evaluates the target (which, unlike normal -assignment statements, cannot be an unpacking) and the expression -list, performs the binary operation specific to the type of assignment -on the two operands, and assigns the result to the original target. -The target is only evaluated once. - -An augmented assignment statement like "x += 1" can be rewritten as "x -= x + 1" to achieve a similar, but not exactly equal effect. In the -augmented version, "x" is only evaluated once. Also, when possible, -the actual operation is performed *in-place*, meaning that rather than -creating a new object and assigning that to the target, the old object -is modified instead. - -Unlike normal assignments, augmented assignments evaluate the left- -hand side *before* evaluating the right-hand side. For example, "a[i] -+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs -the addition, and lastly, it writes the result back to "a[i]". - -With the exception of assigning to tuples and multiple targets in a -single statement, the assignment done by augmented assignment -statements is handled the same way as normal assignments. Similarly, -with the exception of the possible *in-place* behavior, the binary -operation performed by augmented assignment is the same as the normal -binary operations. - -For targets which are attribute references, the same caveat about -class and instance attributes applies as for regular assignments. - - -Annotated assignment statements -=============================== - -*Annotation* assignment is the combination, in a single statement, of -a variable or attribute annotation and an optional assignment -statement: - - annotated_assignment_stmt ::= augtarget ":" expression - ["=" (starred_expression | yield_expression)] - -The difference from normal Assignment statements is that only a single -target is allowed. - -The assignment target is considered “simple” if it consists of a -single name that is not enclosed in parentheses. For simple assignment -targets, if in class or module scope, the annotations are evaluated -and stored in a special class or module attribute "__annotations__" -that is a dictionary mapping from variable names (mangled if private) -to evaluated annotations. This attribute is writable and is -automatically created at the start of class or module body execution, -if annotations are found statically. - -If the assignment target is not simple (an attribute, subscript node, -or parenthesized name), the annotation is evaluated if in class or -module scope, but not stored. - -If a name is annotated in a function scope, then this name is local -for that scope. Annotations are never evaluated and stored in function -scopes. - -If the right hand side is present, an annotated assignment performs -the actual assignment before evaluating annotations (where -applicable). If the right hand side is not present for an expression -target, then the interpreter evaluates the target except for the last -"__setitem__()" or "__setattr__()" call. - -See also: - - **PEP 526** - Syntax for Variable Annotations - The proposal that added syntax for annotating the types of - variables (including class variables and instance variables), - instead of expressing them through comments. - - **PEP 484** - Type hints - The proposal that added the "typing" module to provide a standard - syntax for type annotations that can be used in static analysis - tools and IDEs. - -Changed in version 3.8: Now annotated assignments allow the same -expressions in the right hand side as regular assignments. Previously, -some expressions (like un-parenthesized tuple expressions) caused a -syntax error. -''', - 'assignment-expressions': r'''Assignment expressions -********************** - - assignment_expression ::= [identifier ":="] expression - -An assignment expression (sometimes also called a “named expression” -or “walrus”) assigns an "expression" to an "identifier", while also -returning the value of the "expression". - -One common use case is when handling matched regular expressions: - - if matching := pattern.search(data): - do_something(matching) - -Or, when processing a file stream in chunks: - - while chunk := file.read(9000): - process(chunk) - -Assignment expressions must be surrounded by parentheses when used as -expression statements and when used as sub-expressions in slicing, -conditional, lambda, keyword-argument, and comprehension-if -expressions and in "assert", "with", and "assignment" statements. In -all other places where they can be used, parentheses are not required, -including in "if" and "while" statements. - -Added in version 3.8: See **PEP 572** for more details about -assignment expressions. -''', - 'async': r'''Coroutines -********** - -Added in version 3.5. - - -Coroutine function definition -============================= - - async_funcdef ::= [decorators] "async" "def" funcname "(" [parameter_list] ")" - ["->" expression] ":" suite - -Execution of Python coroutines can be suspended and resumed at many -points (see *coroutine*). "await" expressions, "async for" and "async -with" can only be used in the body of a coroutine function. - -Functions defined with "async def" syntax are always coroutine -functions, even if they do not contain "await" or "async" keywords. - -It is a "SyntaxError" to use a "yield from" expression inside the body -of a coroutine function. - -An example of a coroutine function: - - async def func(param1, param2): - do_stuff() - await some_coroutine() - -Changed in version 3.7: "await" and "async" are now keywords; -previously they were only treated as such inside the body of a -coroutine function. - - -The "async for" statement -========================= - - async_for_stmt ::= "async" for_stmt - -An *asynchronous iterable* provides an "__aiter__" method that -directly returns an *asynchronous iterator*, which can call -asynchronous code in its "__anext__" method. - -The "async for" statement allows convenient iteration over -asynchronous iterables. - -The following code: - - async for TARGET in ITER: - SUITE - else: - SUITE2 - -Is semantically equivalent to: - - iter = (ITER).__aiter__() - running = True - - while running: - try: - TARGET = await iter.__anext__() - except StopAsyncIteration: - running = False - else: - SUITE - else: - SUITE2 - -except that implicit special method lookup is used for "__aiter__()" -and "__anext__()". - -It is a "SyntaxError" to use an "async for" statement outside the body -of a coroutine function. - - -The "async with" statement -========================== - - async_with_stmt ::= "async" with_stmt - -An *asynchronous context manager* is a *context manager* that is able -to suspend execution in its *enter* and *exit* methods. - -The following code: - - async with EXPRESSION as TARGET: - SUITE - -is semantically equivalent to: - - manager = (EXPRESSION) - aenter = manager.__aenter__ - aexit = manager.__aexit__ - value = await aenter() - hit_except = False - - try: - TARGET = value - SUITE - except: - hit_except = True - if not await aexit(*sys.exc_info()): - raise - finally: - if not hit_except: - await aexit(None, None, None) - -except that implicit special method lookup is used for "__aenter__()" -and "__aexit__()". - -It is a "SyntaxError" to use an "async with" statement outside the -body of a coroutine function. - -See also: - - **PEP 492** - Coroutines with async and await syntax - The proposal that made coroutines a proper standalone concept in - Python, and added supporting syntax. -''', - 'atom-identifiers': r'''Identifiers (Names) -******************* - -An identifier occurring as an atom is a name. See section Identifiers -and keywords for lexical definition and section Naming and binding for -documentation of naming and binding. - -When the name is bound to an object, evaluation of the atom yields -that object. When a name is not bound, an attempt to evaluate it -raises a "NameError" exception. - - -Private name mangling -===================== - -When an identifier that textually occurs in a class definition begins -with two or more underscore characters and does not end in two or more -underscores, it is considered a *private name* of that class. - -See also: The class specifications. - -More precisely, private names are transformed to a longer form before -code is generated for them. If the transformed name is longer than -255 characters, implementation-defined truncation may happen. - -The transformation is independent of the syntactical context in which -the identifier is used but only the following private identifiers are -mangled: - -* Any name used as the name of a variable that is assigned or read or - any name of an attribute being accessed. - - The "__name__" attribute of nested functions, classes, and type - aliases is however not mangled. - -* The name of imported modules, e.g., "__spam" in "import __spam". If - the module is part of a package (i.e., its name contains a dot), the - name is *not* mangled, e.g., the "__foo" in "import __foo.bar" is - not mangled. - -* The name of an imported member, e.g., "__f" in "from spam import - __f". - -The transformation rule is defined as follows: - -* The class name, with leading underscores removed and a single - leading underscore inserted, is inserted in front of the identifier, - e.g., the identifier "__spam" occurring in a class named "Foo", - "_Foo" or "__Foo" is transformed to "_Foo__spam". - -* If the class name consists only of underscores, the transformation - is the identity, e.g., the identifier "__spam" occurring in a class - named "_" or "__" is left as is. -''', - 'atom-literals': r'''Literals -******** - -Python supports string and bytes literals and various numeric -literals: - - literal ::= stringliteral | bytesliteral - | integer | floatnumber | imagnumber - -Evaluation of a literal yields an object of the given type (string, -bytes, integer, floating-point number, complex number) with the given -value. The value may be approximated in the case of floating-point -and imaginary (complex) literals. See section Literals for details. - -All literals correspond to immutable data types, and hence the -object’s identity is less important than its value. Multiple -evaluations of literals with the same value (either the same -occurrence in the program text or a different occurrence) may obtain -the same object or a different object with the same value. -''', - 'attribute-access': r'''Customizing attribute access -**************************** - -The following methods can be defined to customize the meaning of -attribute access (use of, assignment to, or deletion of "x.name") for -class instances. - -object.__getattr__(self, name) - - Called when the default attribute access fails with an - "AttributeError" (either "__getattribute__()" raises an - "AttributeError" because *name* is not an instance attribute or an - attribute in the class tree for "self"; or "__get__()" of a *name* - property raises "AttributeError"). This method should either - return the (computed) attribute value or raise an "AttributeError" - exception. The "object" class itself does not provide this method. - - Note that if the attribute is found through the normal mechanism, - "__getattr__()" is not called. (This is an intentional asymmetry - between "__getattr__()" and "__setattr__()".) This is done both for - efficiency reasons and because otherwise "__getattr__()" would have - no way to access other attributes of the instance. Note that at - least for instance variables, you can take total control by not - inserting any values in the instance attribute dictionary (but - instead inserting them in another object). See the - "__getattribute__()" method below for a way to actually get total - control over attribute access. - -object.__getattribute__(self, name) - - Called unconditionally to implement attribute accesses for - instances of the class. If the class also defines "__getattr__()", - the latter will not be called unless "__getattribute__()" either - calls it explicitly or raises an "AttributeError". This method - should return the (computed) attribute value or raise an - "AttributeError" exception. In order to avoid infinite recursion in - this method, its implementation should always call the base class - method with the same name to access any attributes it needs, for - example, "object.__getattribute__(self, name)". - - Note: - - This method may still be bypassed when looking up special methods - as the result of implicit invocation via language syntax or - built-in functions. See Special method lookup. - - For certain sensitive attribute accesses, raises an auditing event - "object.__getattr__" with arguments "obj" and "name". - -object.__setattr__(self, name, value) - - Called when an attribute assignment is attempted. This is called - instead of the normal mechanism (i.e. store the value in the - instance dictionary). *name* is the attribute name, *value* is the - value to be assigned to it. - - If "__setattr__()" wants to assign to an instance attribute, it - should call the base class method with the same name, for example, - "object.__setattr__(self, name, value)". - - For certain sensitive attribute assignments, raises an auditing - event "object.__setattr__" with arguments "obj", "name", "value". - -object.__delattr__(self, name) - - Like "__setattr__()" but for attribute deletion instead of - assignment. This should only be implemented if "del obj.name" is - meaningful for the object. - - For certain sensitive attribute deletions, raises an auditing event - "object.__delattr__" with arguments "obj" and "name". - -object.__dir__(self) - - Called when "dir()" is called on the object. An iterable must be - returned. "dir()" converts the returned iterable to a list and - sorts it. - - -Customizing module attribute access -=================================== - -module.__getattr__() -module.__dir__() - -Special names "__getattr__" and "__dir__" can be also used to -customize access to module attributes. The "__getattr__" function at -the module level should accept one argument which is the name of an -attribute and return the computed value or raise an "AttributeError". -If an attribute is not found on a module object through the normal -lookup, i.e. "object.__getattribute__()", then "__getattr__" is -searched in the module "__dict__" before raising an "AttributeError". -If found, it is called with the attribute name and the result is -returned. - -The "__dir__" function should accept no arguments, and return an -iterable of strings that represents the names accessible on module. If -present, this function overrides the standard "dir()" search on a -module. - -module.__class__ - -For a more fine grained customization of the module behavior (setting -attributes, properties, etc.), one can set the "__class__" attribute -of a module object to a subclass of "types.ModuleType". For example: - - import sys - from types import ModuleType - - class VerboseModule(ModuleType): - def __repr__(self): - return f'Verbose {self.__name__}' - - def __setattr__(self, attr, value): - print(f'Setting {attr}...') - super().__setattr__(attr, value) - - sys.modules[__name__].__class__ = VerboseModule - -Note: - - Defining module "__getattr__" and setting module "__class__" only - affect lookups made using the attribute access syntax – directly - accessing the module globals (whether by code within the module, or - via a reference to the module’s globals dictionary) is unaffected. - -Changed in version 3.5: "__class__" module attribute is now writable. - -Added in version 3.7: "__getattr__" and "__dir__" module attributes. - -See also: - - **PEP 562** - Module __getattr__ and __dir__ - Describes the "__getattr__" and "__dir__" functions on modules. - - -Implementing Descriptors -======================== - -The following methods only apply when an instance of the class -containing the method (a so-called *descriptor* class) appears in an -*owner* class (the descriptor must be in either the owner’s class -dictionary or in the class dictionary for one of its parents). In the -examples below, “the attribute” refers to the attribute whose name is -the key of the property in the owner class’ "__dict__". The "object" -class itself does not implement any of these protocols. - -object.__get__(self, instance, owner=None) - - Called to get the attribute of the owner class (class attribute - access) or of an instance of that class (instance attribute - access). The optional *owner* argument is the owner class, while - *instance* is the instance that the attribute was accessed through, - or "None" when the attribute is accessed through the *owner*. - - This method should return the computed attribute value or raise an - "AttributeError" exception. - - **PEP 252** specifies that "__get__()" is callable with one or two - arguments. Python’s own built-in descriptors support this - specification; however, it is likely that some third-party tools - have descriptors that require both arguments. Python’s own - "__getattribute__()" implementation always passes in both arguments - whether they are required or not. - -object.__set__(self, instance, value) - - Called to set the attribute on an instance *instance* of the owner - class to a new value, *value*. - - Note, adding "__set__()" or "__delete__()" changes the kind of - descriptor to a “data descriptor”. See Invoking Descriptors for - more details. - -object.__delete__(self, instance) - - Called to delete the attribute on an instance *instance* of the - owner class. - -Instances of descriptors may also have the "__objclass__" attribute -present: - -object.__objclass__ - - The attribute "__objclass__" is interpreted by the "inspect" module - as specifying the class where this object was defined (setting this - appropriately can assist in runtime introspection of dynamic class - attributes). For callables, it may indicate that an instance of the - given type (or a subclass) is expected or required as the first - positional argument (for example, CPython sets this attribute for - unbound methods that are implemented in C). - - -Invoking Descriptors -==================== - -In general, a descriptor is an object attribute with “binding -behavior”, one whose attribute access has been overridden by methods -in the descriptor protocol: "__get__()", "__set__()", and -"__delete__()". If any of those methods are defined for an object, it -is said to be a descriptor. - -The default behavior for attribute access is to get, set, or delete -the attribute from an object’s dictionary. For instance, "a.x" has a -lookup chain starting with "a.__dict__['x']", then -"type(a).__dict__['x']", and continuing through the base classes of -"type(a)" excluding metaclasses. - -However, if the looked-up value is an object defining one of the -descriptor methods, then Python may override the default behavior and -invoke the descriptor method instead. Where this occurs in the -precedence chain depends on which descriptor methods were defined and -how they were called. - -The starting point for descriptor invocation is a binding, "a.x". How -the arguments are assembled depends on "a": - -Direct Call - The simplest and least common call is when user code directly - invokes a descriptor method: "x.__get__(a)". - -Instance Binding - If binding to an object instance, "a.x" is transformed into the - call: "type(a).__dict__['x'].__get__(a, type(a))". - -Class Binding - If binding to a class, "A.x" is transformed into the call: - "A.__dict__['x'].__get__(None, A)". - -Super Binding - A dotted lookup such as "super(A, a).x" searches - "a.__class__.__mro__" for a base class "B" following "A" and then - returns "B.__dict__['x'].__get__(a, A)". If not a descriptor, "x" - is returned unchanged. - -For instance bindings, the precedence of descriptor invocation depends -on which descriptor methods are defined. A descriptor can define any -combination of "__get__()", "__set__()" and "__delete__()". If it -does not define "__get__()", then accessing the attribute will return -the descriptor object itself unless there is a value in the object’s -instance dictionary. If the descriptor defines "__set__()" and/or -"__delete__()", it is a data descriptor; if it defines neither, it is -a non-data descriptor. Normally, data descriptors define both -"__get__()" and "__set__()", while non-data descriptors have just the -"__get__()" method. Data descriptors with "__get__()" and "__set__()" -(and/or "__delete__()") defined always override a redefinition in an -instance dictionary. In contrast, non-data descriptors can be -overridden by instances. - -Python methods (including those decorated with "@staticmethod" and -"@classmethod") are implemented as non-data descriptors. Accordingly, -instances can redefine and override methods. This allows individual -instances to acquire behaviors that differ from other instances of the -same class. - -The "property()" function is implemented as a data descriptor. -Accordingly, instances cannot override the behavior of a property. - - -__slots__ -========= - -*__slots__* allow us to explicitly declare data members (like -properties) and deny the creation of "__dict__" and *__weakref__* -(unless explicitly declared in *__slots__* or available in a parent.) - -The space saved over using "__dict__" can be significant. Attribute -lookup speed can be significantly improved as well. - -object.__slots__ - - This class variable can be assigned a string, iterable, or sequence - of strings with variable names used by instances. *__slots__* - reserves space for the declared variables and prevents the - automatic creation of "__dict__" and *__weakref__* for each - instance. - -Notes on using *__slots__*: - -* When inheriting from a class without *__slots__*, the "__dict__" and - *__weakref__* attribute of the instances will always be accessible. - -* Without a "__dict__" variable, instances cannot be assigned new - variables not listed in the *__slots__* definition. Attempts to - assign to an unlisted variable name raises "AttributeError". If - dynamic assignment of new variables is desired, then add - "'__dict__'" to the sequence of strings in the *__slots__* - declaration. - -* Without a *__weakref__* variable for each instance, classes defining - *__slots__* do not support "weak references" to its instances. If - weak reference support is needed, then add "'__weakref__'" to the - sequence of strings in the *__slots__* declaration. - -* *__slots__* are implemented at the class level by creating - descriptors for each variable name. As a result, class attributes - cannot be used to set default values for instance variables defined - by *__slots__*; otherwise, the class attribute would overwrite the - descriptor assignment. - -* The action of a *__slots__* declaration is not limited to the class - where it is defined. *__slots__* declared in parents are available - in child classes. However, instances of a child subclass will get a - "__dict__" and *__weakref__* unless the subclass also defines - *__slots__* (which should only contain names of any *additional* - slots). - -* If a class defines a slot also defined in a base class, the instance - variable defined by the base class slot is inaccessible (except by - retrieving its descriptor directly from the base class). This - renders the meaning of the program undefined. In the future, a - check may be added to prevent this. - -* "TypeError" will be raised if nonempty *__slots__* are defined for a - class derived from a ""variable-length" built-in type" such as - "int", "bytes", and "tuple". - -* Any non-string *iterable* may be assigned to *__slots__*. - -* If a "dictionary" is used to assign *__slots__*, the dictionary keys - will be used as the slot names. The values of the dictionary can be - used to provide per-attribute docstrings that will be recognised by - "inspect.getdoc()" and displayed in the output of "help()". - -* "__class__" assignment works only if both classes have the same - *__slots__*. - -* Multiple inheritance with multiple slotted parent classes can be - used, but only one parent is allowed to have attributes created by - slots (the other bases must have empty slot layouts) - violations - raise "TypeError". - -* If an *iterator* is used for *__slots__* then a *descriptor* is - created for each of the iterator’s values. However, the *__slots__* - attribute will be an empty iterator. -''', - 'attribute-references': r'''Attribute references -******************** - -An attribute reference is a primary followed by a period and a name: - - attributeref ::= primary "." identifier - -The primary must evaluate to an object of a type that supports -attribute references, which most objects do. This object is then -asked to produce the attribute whose name is the identifier. The type -and value produced is determined by the object. Multiple evaluations -of the same attribute reference may yield different objects. - -This production can be customized by overriding the -"__getattribute__()" method or the "__getattr__()" method. The -"__getattribute__()" method is called first and either returns a value -or raises "AttributeError" if the attribute is not available. - -If an "AttributeError" is raised and the object has a "__getattr__()" -method, that method is called as a fallback. -''', - 'augassign': r'''Augmented assignment statements -******************************* - -Augmented assignment is the combination, in a single statement, of a -binary operation and an assignment statement: - - augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression) - augtarget ::= identifier | attributeref | subscription | slicing - augop ::= "+=" | "-=" | "*=" | "@=" | "/=" | "//=" | "%=" | "**=" - | ">>=" | "<<=" | "&=" | "^=" | "|=" - -(See section Primaries for the syntax definitions of the last three -symbols.) - -An augmented assignment evaluates the target (which, unlike normal -assignment statements, cannot be an unpacking) and the expression -list, performs the binary operation specific to the type of assignment -on the two operands, and assigns the result to the original target. -The target is only evaluated once. - -An augmented assignment statement like "x += 1" can be rewritten as "x -= x + 1" to achieve a similar, but not exactly equal effect. In the -augmented version, "x" is only evaluated once. Also, when possible, -the actual operation is performed *in-place*, meaning that rather than -creating a new object and assigning that to the target, the old object -is modified instead. - -Unlike normal assignments, augmented assignments evaluate the left- -hand side *before* evaluating the right-hand side. For example, "a[i] -+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs -the addition, and lastly, it writes the result back to "a[i]". - -With the exception of assigning to tuples and multiple targets in a -single statement, the assignment done by augmented assignment -statements is handled the same way as normal assignments. Similarly, -with the exception of the possible *in-place* behavior, the binary -operation performed by augmented assignment is the same as the normal -binary operations. - -For targets which are attribute references, the same caveat about -class and instance attributes applies as for regular assignments. -''', - 'await': r'''Await expression -**************** - -Suspend the execution of *coroutine* on an *awaitable* object. Can -only be used inside a *coroutine function*. - - await_expr ::= "await" primary - -Added in version 3.5. -''', - 'binary': r'''Binary arithmetic operations -**************************** - -The binary arithmetic operations have the conventional priority -levels. Note that some of these operations also apply to certain non- -numeric types. Apart from the power operator, there are only two -levels, one for multiplicative operators and one for additive -operators: - - m_expr ::= u_expr | m_expr "*" u_expr | m_expr "@" m_expr | - m_expr "//" u_expr | m_expr "/" u_expr | - m_expr "%" u_expr - a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr - -The "*" (multiplication) operator yields the product of its arguments. -The arguments must either both be numbers, or one argument must be an -integer and the other must be a sequence. In the former case, the -numbers are converted to a common type and then multiplied together. -In the latter case, sequence repetition is performed; a negative -repetition factor yields an empty sequence. - -This operation can be customized using the special "__mul__()" and -"__rmul__()" methods. - -The "@" (at) operator is intended to be used for matrix -multiplication. No builtin Python types implement this operator. - -This operation can be customized using the special "__matmul__()" and -"__rmatmul__()" methods. - -Added in version 3.5. - -The "/" (division) and "//" (floor division) operators yield the -quotient of their arguments. The numeric arguments are first -converted to a common type. Division of integers yields a float, while -floor division of integers results in an integer; the result is that -of mathematical division with the ‘floor’ function applied to the -result. Division by zero raises the "ZeroDivisionError" exception. - -The division operation can be customized using the special -"__truediv__()" and "__rtruediv__()" methods. The floor division -operation can be customized using the special "__floordiv__()" and -"__rfloordiv__()" methods. - -The "%" (modulo) operator yields the remainder from the division of -the first argument by the second. The numeric arguments are first -converted to a common type. A zero right argument raises the -"ZeroDivisionError" exception. The arguments may be floating-point -numbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 + -0.34".) The modulo operator always yields a result with the same sign -as its second operand (or zero); the absolute value of the result is -strictly smaller than the absolute value of the second operand [1]. - -The floor division and modulo operators are connected by the following -identity: "x == (x//y)*y + (x%y)". Floor division and modulo are also -connected with the built-in function "divmod()": "divmod(x, y) == -(x//y, x%y)". [2]. - -In addition to performing the modulo operation on numbers, the "%" -operator is also overloaded by string objects to perform old-style -string formatting (also known as interpolation). The syntax for -string formatting is described in the Python Library Reference, -section printf-style String Formatting. - -The *modulo* operation can be customized using the special "__mod__()" -and "__rmod__()" methods. - -The floor division operator, the modulo operator, and the "divmod()" -function are not defined for complex numbers. Instead, convert to a -floating-point number using the "abs()" function if appropriate. - -The "+" (addition) operator yields the sum of its arguments. The -arguments must either both be numbers or both be sequences of the same -type. In the former case, the numbers are converted to a common type -and then added together. In the latter case, the sequences are -concatenated. - -This operation can be customized using the special "__add__()" and -"__radd__()" methods. - -The "-" (subtraction) operator yields the difference of its arguments. -The numeric arguments are first converted to a common type. - -This operation can be customized using the special "__sub__()" and -"__rsub__()" methods. -''', - 'bitwise': r'''Binary bitwise operations -************************* - -Each of the three bitwise operations has a different priority level: - - and_expr ::= shift_expr | and_expr "&" shift_expr - xor_expr ::= and_expr | xor_expr "^" and_expr - or_expr ::= xor_expr | or_expr "|" xor_expr - -The "&" operator yields the bitwise AND of its arguments, which must -be integers or one of them must be a custom object overriding -"__and__()" or "__rand__()" special methods. - -The "^" operator yields the bitwise XOR (exclusive OR) of its -arguments, which must be integers or one of them must be a custom -object overriding "__xor__()" or "__rxor__()" special methods. - -The "|" operator yields the bitwise (inclusive) OR of its arguments, -which must be integers or one of them must be a custom object -overriding "__or__()" or "__ror__()" special methods. -''', - 'bltin-code-objects': r'''Code Objects -************ - -Code objects are used by the implementation to represent “pseudo- -compiled” executable Python code such as a function body. They differ -from function objects because they don’t contain a reference to their -global execution environment. Code objects are returned by the built- -in "compile()" function and can be extracted from function objects -through their "__code__" attribute. See also the "code" module. - -Accessing "__code__" raises an auditing event "object.__getattr__" -with arguments "obj" and ""__code__"". - -A code object can be executed or evaluated by passing it (instead of a -source string) to the "exec()" or "eval()" built-in functions. - -See The standard type hierarchy for more information. -''', - 'bltin-ellipsis-object': r'''The Ellipsis Object -******************* - -This object is commonly used to indicate that something is omitted. It -supports no special operations. There is exactly one ellipsis object, -named "Ellipsis" (a built-in name). "type(Ellipsis)()" produces the -"Ellipsis" singleton. - -It is written as "Ellipsis" or "...". - -In typical use, "..." as the "Ellipsis" object appears in a few -different places, for instance: - -* In type annotations, such as callable arguments or tuple elements. - -* As the body of a function instead of a pass statement. - -* In third-party libraries, such as Numpy’s slicing and striding. - -Python also uses three dots in ways that are not "Ellipsis" objects, -for instance: - -* Doctest’s "ELLIPSIS", as a pattern for missing content. - -* The default Python prompt of the *interactive* shell when partial - input is incomplete. - -Lastly, the Python documentation often uses three dots in conventional -English usage to mean omitted content, even in code examples that also -use them as the "Ellipsis". -''', - 'bltin-null-object': r'''The Null Object -*************** - -This object is returned by functions that don’t explicitly return a -value. It supports no special operations. There is exactly one null -object, named "None" (a built-in name). "type(None)()" produces the -same singleton. - -It is written as "None". -''', - 'bltin-type-objects': r'''Type Objects -************ - -Type objects represent the various object types. An object’s type is -accessed by the built-in function "type()". There are no special -operations on types. The standard module "types" defines names for -all standard built-in types. - -Types are written like this: "". -''', - 'booleans': r'''Boolean operations -****************** - - or_test ::= and_test | or_test "or" and_test - and_test ::= not_test | and_test "and" not_test - not_test ::= comparison | "not" not_test - -In the context of Boolean operations, and also when expressions are -used by control flow statements, the following values are interpreted -as false: "False", "None", numeric zero of all types, and empty -strings and containers (including strings, tuples, lists, -dictionaries, sets and frozensets). All other values are interpreted -as true. User-defined objects can customize their truth value by -providing a "__bool__()" method. - -The operator "not" yields "True" if its argument is false, "False" -otherwise. - -The expression "x and y" first evaluates *x*; if *x* is false, its -value is returned; otherwise, *y* is evaluated and the resulting value -is returned. - -The expression "x or y" first evaluates *x*; if *x* is true, its value -is returned; otherwise, *y* is evaluated and the resulting value is -returned. - -Note that neither "and" nor "or" restrict the value and type they -return to "False" and "True", but rather return the last evaluated -argument. This is sometimes useful, e.g., if "s" is a string that -should be replaced by a default value if it is empty, the expression -"s or 'foo'" yields the desired value. Because "not" has to create a -new value, it returns a boolean value regardless of the type of its -argument (for example, "not 'foo'" produces "False" rather than "''".) -''', - 'break': r'''The "break" statement -********************* - - break_stmt ::= "break" - -"break" may only occur syntactically nested in a "for" or "while" -loop, but not nested in a function or class definition within that -loop. - -It terminates the nearest enclosing loop, skipping the optional "else" -clause if the loop has one. - -If a "for" loop is terminated by "break", the loop control target -keeps its current value. - -When "break" passes control out of a "try" statement with a "finally" -clause, that "finally" clause is executed before really leaving the -loop. -''', - 'callable-types': r'''Emulating callable objects -************************** - -object.__call__(self[, args...]) - - Called when the instance is “called” as a function; if this method - is defined, "x(arg1, arg2, ...)" roughly translates to - "type(x).__call__(x, arg1, ...)". The "object" class itself does - not provide this method. -''', - 'calls': r'''Calls -***** - -A call calls a callable object (e.g., a *function*) with a possibly -empty series of *arguments*: - - call ::= primary "(" [argument_list [","] | comprehension] ")" - argument_list ::= positional_arguments ["," starred_and_keywords] - ["," keywords_arguments] - | starred_and_keywords ["," keywords_arguments] - | keywords_arguments - positional_arguments ::= positional_item ("," positional_item)* - positional_item ::= assignment_expression | "*" expression - starred_and_keywords ::= ("*" expression | keyword_item) - ("," "*" expression | "," keyword_item)* - keywords_arguments ::= (keyword_item | "**" expression) - ("," keyword_item | "," "**" expression)* - keyword_item ::= identifier "=" expression - -An optional trailing comma may be present after the positional and -keyword arguments but does not affect the semantics. - -The primary must evaluate to a callable object (user-defined -functions, built-in functions, methods of built-in objects, class -objects, methods of class instances, and all objects having a -"__call__()" method are callable). All argument expressions are -evaluated before the call is attempted. Please refer to section -Function definitions for the syntax of formal *parameter* lists. - -If keyword arguments are present, they are first converted to -positional arguments, as follows. First, a list of unfilled slots is -created for the formal parameters. If there are N positional -arguments, they are placed in the first N slots. Next, for each -keyword argument, the identifier is used to determine the -corresponding slot (if the identifier is the same as the first formal -parameter name, the first slot is used, and so on). If the slot is -already filled, a "TypeError" exception is raised. Otherwise, the -argument is placed in the slot, filling it (even if the expression is -"None", it fills the slot). When all arguments have been processed, -the slots that are still unfilled are filled with the corresponding -default value from the function definition. (Default values are -calculated, once, when the function is defined; thus, a mutable object -such as a list or dictionary used as default value will be shared by -all calls that don’t specify an argument value for the corresponding -slot; this should usually be avoided.) If there are any unfilled -slots for which no default value is specified, a "TypeError" exception -is raised. Otherwise, the list of filled slots is used as the -argument list for the call. - -**CPython implementation detail:** An implementation may provide -built-in functions whose positional parameters do not have names, even -if they are ‘named’ for the purpose of documentation, and which -therefore cannot be supplied by keyword. In CPython, this is the case -for functions implemented in C that use "PyArg_ParseTuple()" to parse -their arguments. - -If there are more positional arguments than there are formal parameter -slots, a "TypeError" exception is raised, unless a formal parameter -using the syntax "*identifier" is present; in this case, that formal -parameter receives a tuple containing the excess positional arguments -(or an empty tuple if there were no excess positional arguments). - -If any keyword argument does not correspond to a formal parameter -name, a "TypeError" exception is raised, unless a formal parameter -using the syntax "**identifier" is present; in this case, that formal -parameter receives a dictionary containing the excess keyword -arguments (using the keywords as keys and the argument values as -corresponding values), or a (new) empty dictionary if there were no -excess keyword arguments. - -If the syntax "*expression" appears in the function call, "expression" -must evaluate to an *iterable*. Elements from these iterables are -treated as if they were additional positional arguments. For the call -"f(x1, x2, *y, x3, x4)", if *y* evaluates to a sequence *y1*, …, *yM*, -this is equivalent to a call with M+4 positional arguments *x1*, *x2*, -*y1*, …, *yM*, *x3*, *x4*. - -A consequence of this is that although the "*expression" syntax may -appear *after* explicit keyword arguments, it is processed *before* -the keyword arguments (and any "**expression" arguments – see below). -So: - - >>> def f(a, b): - ... print(a, b) - ... - >>> f(b=1, *(2,)) - 2 1 - >>> f(a=1, *(2,)) - Traceback (most recent call last): - File "", line 1, in - TypeError: f() got multiple values for keyword argument 'a' - >>> f(1, *(2,)) - 1 2 - -It is unusual for both keyword arguments and the "*expression" syntax -to be used in the same call, so in practice this confusion does not -often arise. - -If the syntax "**expression" appears in the function call, -"expression" must evaluate to a *mapping*, the contents of which are -treated as additional keyword arguments. If a parameter matching a key -has already been given a value (by an explicit keyword argument, or -from another unpacking), a "TypeError" exception is raised. - -When "**expression" is used, each key in this mapping must be a -string. Each value from the mapping is assigned to the first formal -parameter eligible for keyword assignment whose name is equal to the -key. A key need not be a Python identifier (e.g. ""max-temp °F"" is -acceptable, although it will not match any formal parameter that could -be declared). If there is no match to a formal parameter the key-value -pair is collected by the "**" parameter, if there is one, or if there -is not, a "TypeError" exception is raised. - -Formal parameters using the syntax "*identifier" or "**identifier" -cannot be used as positional argument slots or as keyword argument -names. - -Changed in version 3.5: Function calls accept any number of "*" and -"**" unpackings, positional arguments may follow iterable unpackings -("*"), and keyword arguments may follow dictionary unpackings ("**"). -Originally proposed by **PEP 448**. - -A call always returns some value, possibly "None", unless it raises an -exception. How this value is computed depends on the type of the -callable object. - -If it is— - -a user-defined function: - The code block for the function is executed, passing it the - argument list. The first thing the code block will do is bind the - formal parameters to the arguments; this is described in section - Function definitions. When the code block executes a "return" - statement, this specifies the return value of the function call. - If execution reaches the end of the code block without executing a - "return" statement, the return value is "None". - -a built-in function or method: - The result is up to the interpreter; see Built-in Functions for the - descriptions of built-in functions and methods. - -a class object: - A new instance of that class is returned. - -a class instance method: - The corresponding user-defined function is called, with an argument - list that is one longer than the argument list of the call: the - instance becomes the first argument. - -a class instance: - The class must define a "__call__()" method; the effect is then the - same as if that method was called. -''', - 'class': r'''Class definitions -***************** - -A class definition defines a class object (see section The standard -type hierarchy): - - classdef ::= [decorators] "class" classname [type_params] [inheritance] ":" suite - inheritance ::= "(" [argument_list] ")" - classname ::= identifier - -A class definition is an executable statement. The inheritance list -usually gives a list of base classes (see Metaclasses for more -advanced uses), so each item in the list should evaluate to a class -object which allows subclassing. Classes without an inheritance list -inherit, by default, from the base class "object"; hence, - - class Foo: - pass - -is equivalent to - - class Foo(object): - pass - -The class’s suite is then executed in a new execution frame (see -Naming and binding), using a newly created local namespace and the -original global namespace. (Usually, the suite contains mostly -function definitions.) When the class’s suite finishes execution, its -execution frame is discarded but its local namespace is saved. [5] A -class object is then created using the inheritance list for the base -classes and the saved local namespace for the attribute dictionary. -The class name is bound to this class object in the original local -namespace. - -The order in which attributes are defined in the class body is -preserved in the new class’s "__dict__". Note that this is reliable -only right after the class is created and only for classes that were -defined using the definition syntax. - -Class creation can be customized heavily using metaclasses. - -Classes can also be decorated: just like when decorating functions, - - @f1(arg) - @f2 - class Foo: pass - -is roughly equivalent to - - class Foo: pass - Foo = f1(arg)(f2(Foo)) - -The evaluation rules for the decorator expressions are the same as for -function decorators. The result is then bound to the class name. - -Changed in version 3.9: Classes may be decorated with any valid -"assignment_expression". Previously, the grammar was much more -restrictive; see **PEP 614** for details. - -A list of type parameters may be given in square brackets immediately -after the class’s name. This indicates to static type checkers that -the class is generic. At runtime, the type parameters can be retrieved -from the class’s "__type_params__" attribute. See Generic classes for -more. - -Changed in version 3.12: Type parameter lists are new in Python 3.12. - -**Programmer’s note:** Variables defined in the class definition are -class attributes; they are shared by instances. Instance attributes -can be set in a method with "self.name = value". Both class and -instance attributes are accessible through the notation “"self.name"”, -and an instance attribute hides a class attribute with the same name -when accessed in this way. Class attributes can be used as defaults -for instance attributes, but using mutable values there can lead to -unexpected results. Descriptors can be used to create instance -variables with different implementation details. - -See also: - - **PEP 3115** - Metaclasses in Python 3000 - The proposal that changed the declaration of metaclasses to the - current syntax, and the semantics for how classes with - metaclasses are constructed. - - **PEP 3129** - Class Decorators - The proposal that added class decorators. Function and method - decorators were introduced in **PEP 318**. -''', - 'comparisons': r'''Comparisons -*********** - -Unlike C, all comparison operations in Python have the same priority, -which is lower than that of any arithmetic, shifting or bitwise -operation. Also unlike C, expressions like "a < b < c" have the -interpretation that is conventional in mathematics: - - comparison ::= or_expr (comp_operator or_expr)* - comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!=" - | "is" ["not"] | ["not"] "in" - -Comparisons yield boolean values: "True" or "False". Custom *rich -comparison methods* may return non-boolean values. In this case Python -will call "bool()" on such value in boolean contexts. - -Comparisons can be chained arbitrarily, e.g., "x < y <= z" is -equivalent to "x < y and y <= z", except that "y" is evaluated only -once (but in both cases "z" is not evaluated at all when "x < y" is -found to be false). - -Formally, if *a*, *b*, *c*, …, *y*, *z* are expressions and *op1*, -*op2*, …, *opN* are comparison operators, then "a op1 b op2 c ... y -opN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except -that each expression is evaluated at most once. - -Note that "a op1 b op2 c" doesn’t imply any kind of comparison between -*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though -perhaps not pretty). - - -Value comparisons -================= - -The operators "<", ">", "==", ">=", "<=", and "!=" compare the values -of two objects. The objects do not need to have the same type. - -Chapter Objects, values and types states that objects have a value (in -addition to type and identity). The value of an object is a rather -abstract notion in Python: For example, there is no canonical access -method for an object’s value. Also, there is no requirement that the -value of an object should be constructed in a particular way, e.g. -comprised of all its data attributes. Comparison operators implement a -particular notion of what the value of an object is. One can think of -them as defining the value of an object indirectly, by means of their -comparison implementation. - -Because all types are (direct or indirect) subtypes of "object", they -inherit the default comparison behavior from "object". Types can -customize their comparison behavior by implementing *rich comparison -methods* like "__lt__()", described in Basic customization. - -The default behavior for equality comparison ("==" and "!=") is based -on the identity of the objects. Hence, equality comparison of -instances with the same identity results in equality, and equality -comparison of instances with different identities results in -inequality. A motivation for this default behavior is the desire that -all objects should be reflexive (i.e. "x is y" implies "x == y"). - -A default order comparison ("<", ">", "<=", and ">=") is not provided; -an attempt raises "TypeError". A motivation for this default behavior -is the lack of a similar invariant as for equality. - -The behavior of the default equality comparison, that instances with -different identities are always unequal, may be in contrast to what -types will need that have a sensible definition of object value and -value-based equality. Such types will need to customize their -comparison behavior, and in fact, a number of built-in types have done -that. - -The following list describes the comparison behavior of the most -important built-in types. - -* Numbers of built-in numeric types (Numeric Types — int, float, - complex) and of the standard library types "fractions.Fraction" and - "decimal.Decimal" can be compared within and across their types, - with the restriction that complex numbers do not support order - comparison. Within the limits of the types involved, they compare - mathematically (algorithmically) correct without loss of precision. - - The not-a-number values "float('NaN')" and "decimal.Decimal('NaN')" - are special. Any ordered comparison of a number to a not-a-number - value is false. A counter-intuitive implication is that not-a-number - values are not equal to themselves. For example, if "x = - float('NaN')", "3 < x", "x < 3" and "x == x" are all false, while "x - != x" is true. This behavior is compliant with IEEE 754. - -* "None" and "NotImplemented" are singletons. **PEP 8** advises that - comparisons for singletons should always be done with "is" or "is - not", never the equality operators. - -* Binary sequences (instances of "bytes" or "bytearray") can be - compared within and across their types. They compare - lexicographically using the numeric values of their elements. - -* Strings (instances of "str") compare lexicographically using the - numerical Unicode code points (the result of the built-in function - "ord()") of their characters. [3] - - Strings and binary sequences cannot be directly compared. - -* Sequences (instances of "tuple", "list", or "range") can be compared - only within each of their types, with the restriction that ranges do - not support order comparison. Equality comparison across these - types results in inequality, and ordering comparison across these - types raises "TypeError". - - Sequences compare lexicographically using comparison of - corresponding elements. The built-in containers typically assume - identical objects are equal to themselves. That lets them bypass - equality tests for identical objects to improve performance and to - maintain their internal invariants. - - Lexicographical comparison between built-in collections works as - follows: - - * For two collections to compare equal, they must be of the same - type, have the same length, and each pair of corresponding - elements must compare equal (for example, "[1,2] == (1,2)" is - false because the type is not the same). - - * Collections that support order comparison are ordered the same as - their first unequal elements (for example, "[1,2,x] <= [1,2,y]" - has the same value as "x <= y"). If a corresponding element does - not exist, the shorter collection is ordered first (for example, - "[1,2] < [1,2,3]" is true). - -* Mappings (instances of "dict") compare equal if and only if they - have equal "(key, value)" pairs. Equality comparison of the keys and - values enforces reflexivity. - - Order comparisons ("<", ">", "<=", and ">=") raise "TypeError". - -* Sets (instances of "set" or "frozenset") can be compared within and - across their types. - - They define order comparison operators to mean subset and superset - tests. Those relations do not define total orderings (for example, - the two sets "{1,2}" and "{2,3}" are not equal, nor subsets of one - another, nor supersets of one another). Accordingly, sets are not - appropriate arguments for functions which depend on total ordering - (for example, "min()", "max()", and "sorted()" produce undefined - results given a list of sets as inputs). - - Comparison of sets enforces reflexivity of its elements. - -* Most other built-in types have no comparison methods implemented, so - they inherit the default comparison behavior. - -User-defined classes that customize their comparison behavior should -follow some consistency rules, if possible: - -* Equality comparison should be reflexive. In other words, identical - objects should compare equal: - - "x is y" implies "x == y" - -* Comparison should be symmetric. In other words, the following - expressions should have the same result: - - "x == y" and "y == x" - - "x != y" and "y != x" - - "x < y" and "y > x" - - "x <= y" and "y >= x" - -* Comparison should be transitive. The following (non-exhaustive) - examples illustrate that: - - "x > y and y > z" implies "x > z" - - "x < y and y <= z" implies "x < z" - -* Inverse comparison should result in the boolean negation. In other - words, the following expressions should have the same result: - - "x == y" and "not x != y" - - "x < y" and "not x >= y" (for total ordering) - - "x > y" and "not x <= y" (for total ordering) - - The last two expressions apply to totally ordered collections (e.g. - to sequences, but not to sets or mappings). See also the - "total_ordering()" decorator. - -* The "hash()" result should be consistent with equality. Objects that - are equal should either have the same hash value, or be marked as - unhashable. - -Python does not enforce these consistency rules. In fact, the -not-a-number values are an example for not following these rules. - - -Membership test operations -========================== - -The operators "in" and "not in" test for membership. "x in s" -evaluates to "True" if *x* is a member of *s*, and "False" otherwise. -"x not in s" returns the negation of "x in s". All built-in sequences -and set types support this as well as dictionary, for which "in" tests -whether the dictionary has a given key. For container types such as -list, tuple, set, frozenset, dict, or collections.deque, the -expression "x in y" is equivalent to "any(x is e or x == e for e in -y)". - -For the string and bytes types, "x in y" is "True" if and only if *x* -is a substring of *y*. An equivalent test is "y.find(x) != -1". -Empty strings are always considered to be a substring of any other -string, so """ in "abc"" will return "True". - -For user-defined classes which define the "__contains__()" method, "x -in y" returns "True" if "y.__contains__(x)" returns a true value, and -"False" otherwise. - -For user-defined classes which do not define "__contains__()" but do -define "__iter__()", "x in y" is "True" if some value "z", for which -the expression "x is z or x == z" is true, is produced while iterating -over "y". If an exception is raised during the iteration, it is as if -"in" raised that exception. - -Lastly, the old-style iteration protocol is tried: if a class defines -"__getitem__()", "x in y" is "True" if and only if there is a non- -negative integer index *i* such that "x is y[i] or x == y[i]", and no -lower integer index raises the "IndexError" exception. (If any other -exception is raised, it is as if "in" raised that exception). - -The operator "not in" is defined to have the inverse truth value of -"in". - - -Identity comparisons -==================== - -The operators "is" and "is not" test for an object’s identity: "x is -y" is true if and only if *x* and *y* are the same object. An -Object’s identity is determined using the "id()" function. "x is not -y" yields the inverse truth value. [4] -''', - 'compound': r'''Compound statements -******************* - -Compound statements contain (groups of) other statements; they affect -or control the execution of those other statements in some way. In -general, compound statements span multiple lines, although in simple -incarnations a whole compound statement may be contained in one line. - -The "if", "while" and "for" statements implement traditional control -flow constructs. "try" specifies exception handlers and/or cleanup -code for a group of statements, while the "with" statement allows the -execution of initialization and finalization code around a block of -code. Function and class definitions are also syntactically compound -statements. - -A compound statement consists of one or more ‘clauses.’ A clause -consists of a header and a ‘suite.’ The clause headers of a -particular compound statement are all at the same indentation level. -Each clause header begins with a uniquely identifying keyword and ends -with a colon. A suite is a group of statements controlled by a -clause. A suite can be one or more semicolon-separated simple -statements on the same line as the header, following the header’s -colon, or it can be one or more indented statements on subsequent -lines. Only the latter form of a suite can contain nested compound -statements; the following is illegal, mostly because it wouldn’t be -clear to which "if" clause a following "else" clause would belong: - - if test1: if test2: print(x) - -Also note that the semicolon binds tighter than the colon in this -context, so that in the following example, either all or none of the -"print()" calls are executed: - - if x < y < z: print(x); print(y); print(z) - -Summarizing: - - compound_stmt ::= if_stmt - | while_stmt - | for_stmt - | try_stmt - | with_stmt - | match_stmt - | funcdef - | classdef - | async_with_stmt - | async_for_stmt - | async_funcdef - suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT - statement ::= stmt_list NEWLINE | compound_stmt - stmt_list ::= simple_stmt (";" simple_stmt)* [";"] - -Note that statements always end in a "NEWLINE" possibly followed by a -"DEDENT". Also note that optional continuation clauses always begin -with a keyword that cannot start a statement, thus there are no -ambiguities (the ‘dangling "else"’ problem is solved in Python by -requiring nested "if" statements to be indented). - -The formatting of the grammar rules in the following sections places -each clause on a separate line for clarity. - - -The "if" statement -================== - -The "if" statement is used for conditional execution: - - if_stmt ::= "if" assignment_expression ":" suite - ("elif" assignment_expression ":" suite)* - ["else" ":" suite] - -It selects exactly one of the suites by evaluating the expressions one -by one until one is found to be true (see section Boolean operations -for the definition of true and false); then that suite is executed -(and no other part of the "if" statement is executed or evaluated). -If all expressions are false, the suite of the "else" clause, if -present, is executed. - - -The "while" statement -===================== - -The "while" statement is used for repeated execution as long as an -expression is true: - - while_stmt ::= "while" assignment_expression ":" suite - ["else" ":" suite] - -This repeatedly tests the expression and, if it is true, executes the -first suite; if the expression is false (which may be the first time -it is tested) the suite of the "else" clause, if present, is executed -and the loop terminates. - -A "break" statement executed in the first suite terminates the loop -without executing the "else" clause’s suite. A "continue" statement -executed in the first suite skips the rest of the suite and goes back -to testing the expression. - - -The "for" statement -=================== - -The "for" statement is used to iterate over the elements of a sequence -(such as a string, tuple or list) or other iterable object: - - for_stmt ::= "for" target_list "in" `!starred_list` ":" suite - ["else" ":" suite] - -The "starred_list" expression is evaluated once; it should yield an -*iterable* object. An *iterator* is created for that iterable. The -first item provided by the iterator is then assigned to the target -list using the standard rules for assignments (see Assignment -statements), and the suite is executed. This repeats for each item -provided by the iterator. When the iterator is exhausted, the suite -in the "else" clause, if present, is executed, and the loop -terminates. - -A "break" statement executed in the first suite terminates the loop -without executing the "else" clause’s suite. A "continue" statement -executed in the first suite skips the rest of the suite and continues -with the next item, or with the "else" clause if there is no next -item. - -The for-loop makes assignments to the variables in the target list. -This overwrites all previous assignments to those variables including -those made in the suite of the for-loop: - - for i in range(10): - print(i) - i = 5 # this will not affect the for-loop - # because i will be overwritten with the next - # index in the range - -Names in the target list are not deleted when the loop is finished, -but if the sequence is empty, they will not have been assigned to at -all by the loop. Hint: the built-in type "range()" represents -immutable arithmetic sequences of integers. For instance, iterating -"range(3)" successively yields 0, 1, and then 2. - -Changed in version 3.11: Starred elements are now allowed in the -expression list. - - -The "try" statement -=================== - -The "try" statement specifies exception handlers and/or cleanup code -for a group of statements: - - try_stmt ::= try1_stmt | try2_stmt | try3_stmt - try1_stmt ::= "try" ":" suite - ("except" [expression ["as" identifier]] ":" suite)+ - ["else" ":" suite] - ["finally" ":" suite] - try2_stmt ::= "try" ":" suite - ("except" "*" expression ["as" identifier] ":" suite)+ - ["else" ":" suite] - ["finally" ":" suite] - try3_stmt ::= "try" ":" suite - "finally" ":" suite - -Additional information on exceptions can be found in section -Exceptions, and information on using the "raise" statement to generate -exceptions may be found in section The raise statement. - - -"except" clause ---------------- - -The "except" clause(s) specify one or more exception handlers. When no -exception occurs in the "try" clause, no exception handler is -executed. When an exception occurs in the "try" suite, a search for an -exception handler is started. This search inspects the "except" -clauses in turn until one is found that matches the exception. An -expression-less "except" clause, if present, must be last; it matches -any exception. - -For an "except" clause with an expression, the expression must -evaluate to an exception type or a tuple of exception types. The -raised exception matches an "except" clause whose expression evaluates -to the class or a *non-virtual base class* of the exception object, or -to a tuple that contains such a class. - -If no "except" clause matches the exception, the search for an -exception handler continues in the surrounding code and on the -invocation stack. [1] - -If the evaluation of an expression in the header of an "except" clause -raises an exception, the original search for a handler is canceled and -a search starts for the new exception in the surrounding code and on -the call stack (it is treated as if the entire "try" statement raised -the exception). - -When a matching "except" clause is found, the exception is assigned to -the target specified after the "as" keyword in that "except" clause, -if present, and the "except" clause’s suite is executed. All "except" -clauses must have an executable block. When the end of this block is -reached, execution continues normally after the entire "try" -statement. (This means that if two nested handlers exist for the same -exception, and the exception occurs in the "try" clause of the inner -handler, the outer handler will not handle the exception.) - -When an exception has been assigned using "as target", it is cleared -at the end of the "except" clause. This is as if - - except E as N: - foo - -was translated to - - except E as N: - try: - foo - finally: - del N - -This means the exception must be assigned to a different name to be -able to refer to it after the "except" clause. Exceptions are cleared -because with the traceback attached to them, they form a reference -cycle with the stack frame, keeping all locals in that frame alive -until the next garbage collection occurs. - -Before an "except" clause’s suite is executed, the exception is stored -in the "sys" module, where it can be accessed from within the body of -the "except" clause by calling "sys.exception()". When leaving an -exception handler, the exception stored in the "sys" module is reset -to its previous value: - - >>> print(sys.exception()) - None - >>> try: - ... raise TypeError - ... except: - ... print(repr(sys.exception())) - ... try: - ... raise ValueError - ... except: - ... print(repr(sys.exception())) - ... print(repr(sys.exception())) - ... - TypeError() - ValueError() - TypeError() - >>> print(sys.exception()) - None - - -"except*" clause ----------------- - -The "except*" clause(s) specify one or more handlers for groups of -exceptions ("BaseExceptionGroup" instances). A "try" statement can -have either "except" or "except*" clauses, but not both. The exception -type for matching is mandatory in the case of "except*", so "except*:" -is a syntax error. The type is interpreted as in the case of "except", -but matching is performed on the exceptions contained in the group -that is being handled. An "TypeError" is raised if a matching type is -a subclass of "BaseExceptionGroup", because that would have ambiguous -semantics. - -When an exception group is raised in the try block, each "except*" -clause splits (see "split()") it into the subgroups of matching and -non-matching exceptions. If the matching subgroup is not empty, it -becomes the handled exception (the value returned from -"sys.exception()") and assigned to the target of the "except*" clause -(if there is one). Then, the body of the "except*" clause executes. If -the non-matching subgroup is not empty, it is processed by the next -"except*" in the same manner. This continues until all exceptions in -the group have been matched, or the last "except*" clause has run. - -After all "except*" clauses execute, the group of unhandled exceptions -is merged with any exceptions that were raised or re-raised from -within "except*" clauses. This merged exception group propagates on.: - - >>> try: - ... raise ExceptionGroup("eg", - ... [ValueError(1), TypeError(2), OSError(3), OSError(4)]) - ... except* TypeError as e: - ... print(f'caught {type(e)} with nested {e.exceptions}') - ... except* OSError as e: - ... print(f'caught {type(e)} with nested {e.exceptions}') - ... - caught with nested (TypeError(2),) - caught with nested (OSError(3), OSError(4)) - + Exception Group Traceback (most recent call last): - | File "", line 2, in - | raise ExceptionGroup("eg", - | [ValueError(1), TypeError(2), OSError(3), OSError(4)]) - | ExceptionGroup: eg (1 sub-exception) - +-+---------------- 1 ---------------- - | ValueError: 1 - +------------------------------------ - -If the exception raised from the "try" block is not an exception group -and its type matches one of the "except*" clauses, it is caught and -wrapped by an exception group with an empty message string. This -ensures that the type of the target "e" is consistently -"BaseExceptionGroup": - - >>> try: - ... raise BlockingIOError - ... except* BlockingIOError as e: - ... print(repr(e)) - ... - ExceptionGroup('', (BlockingIOError(),)) - -"break", "continue" and "return" cannot appear in an "except*" clause. - - -"else" clause -------------- - -The optional "else" clause is executed if the control flow leaves the -"try" suite, no exception was raised, and no "return", "continue", or -"break" statement was executed. Exceptions in the "else" clause are -not handled by the preceding "except" clauses. - - -"finally" clause ----------------- - -If "finally" is present, it specifies a ‘cleanup’ handler. The "try" -clause is executed, including any "except" and "else" clauses. If an -exception occurs in any of the clauses and is not handled, the -exception is temporarily saved. The "finally" clause is executed. If -there is a saved exception it is re-raised at the end of the "finally" -clause. If the "finally" clause raises another exception, the saved -exception is set as the context of the new exception. If the "finally" -clause executes a "return", "break" or "continue" statement, the saved -exception is discarded: - - >>> def f(): - ... try: - ... 1/0 - ... finally: - ... return 42 - ... - >>> f() - 42 - -The exception information is not available to the program during -execution of the "finally" clause. - -When a "return", "break" or "continue" statement is executed in the -"try" suite of a "try"…"finally" statement, the "finally" clause is -also executed ‘on the way out.’ - -The return value of a function is determined by the last "return" -statement executed. Since the "finally" clause always executes, a -"return" statement executed in the "finally" clause will always be the -last one executed: - - >>> def foo(): - ... try: - ... return 'try' - ... finally: - ... return 'finally' - ... - >>> foo() - 'finally' - -Changed in version 3.8: Prior to Python 3.8, a "continue" statement -was illegal in the "finally" clause due to a problem with the -implementation. - - -The "with" statement -==================== - -The "with" statement is used to wrap the execution of a block with -methods defined by a context manager (see section With Statement -Context Managers). This allows common "try"…"except"…"finally" usage -patterns to be encapsulated for convenient reuse. - - with_stmt ::= "with" ( "(" with_stmt_contents ","? ")" | with_stmt_contents ) ":" suite - with_stmt_contents ::= with_item ("," with_item)* - with_item ::= expression ["as" target] - -The execution of the "with" statement with one “item” proceeds as -follows: - -1. The context expression (the expression given in the "with_item") is - evaluated to obtain a context manager. - -2. The context manager’s "__enter__()" is loaded for later use. - -3. The context manager’s "__exit__()" is loaded for later use. - -4. The context manager’s "__enter__()" method is invoked. - -5. If a target was included in the "with" statement, the return value - from "__enter__()" is assigned to it. - - Note: - - The "with" statement guarantees that if the "__enter__()" method - returns without an error, then "__exit__()" will always be - called. Thus, if an error occurs during the assignment to the - target list, it will be treated the same as an error occurring - within the suite would be. See step 7 below. - -6. The suite is executed. - -7. The context manager’s "__exit__()" method is invoked. If an - exception caused the suite to be exited, its type, value, and - traceback are passed as arguments to "__exit__()". Otherwise, three - "None" arguments are supplied. - - If the suite was exited due to an exception, and the return value - from the "__exit__()" method was false, the exception is reraised. - If the return value was true, the exception is suppressed, and - execution continues with the statement following the "with" - statement. - - If the suite was exited for any reason other than an exception, the - return value from "__exit__()" is ignored, and execution proceeds - at the normal location for the kind of exit that was taken. - -The following code: - - with EXPRESSION as TARGET: - SUITE - -is semantically equivalent to: - - manager = (EXPRESSION) - enter = manager.__enter__ - exit = manager.__exit__ - value = enter() - hit_except = False - - try: - TARGET = value - SUITE - except: - hit_except = True - if not exit(*sys.exc_info()): - raise - finally: - if not hit_except: - exit(None, None, None) - -except that implicit special method lookup is used for "__enter__()" -and "__exit__()". - -With more than one item, the context managers are processed as if -multiple "with" statements were nested: - - with A() as a, B() as b: - SUITE - -is semantically equivalent to: - - with A() as a: - with B() as b: - SUITE - -You can also write multi-item context managers in multiple lines if -the items are surrounded by parentheses. For example: - - with ( - A() as a, - B() as b, - ): - SUITE - -Changed in version 3.1: Support for multiple context expressions. - -Changed in version 3.10: Support for using grouping parentheses to -break the statement in multiple lines. - -See also: - - **PEP 343** - The “with” statement - The specification, background, and examples for the Python "with" - statement. - - -The "match" statement -===================== - -Added in version 3.10. - -The match statement is used for pattern matching. Syntax: - - match_stmt ::= 'match' subject_expr ":" NEWLINE INDENT case_block+ DEDENT - subject_expr ::= `!star_named_expression` "," `!star_named_expressions`? - | `!named_expression` - case_block ::= 'case' patterns [guard] ":" `!block` - -Note: - - This section uses single quotes to denote soft keywords. - -Pattern matching takes a pattern as input (following "case") and a -subject value (following "match"). The pattern (which may contain -subpatterns) is matched against the subject value. The outcomes are: - -* A match success or failure (also termed a pattern success or - failure). - -* Possible binding of matched values to a name. The prerequisites for - this are further discussed below. - -The "match" and "case" keywords are soft keywords. - -See also: - - * **PEP 634** – Structural Pattern Matching: Specification - - * **PEP 636** – Structural Pattern Matching: Tutorial - - -Overview --------- - -Here’s an overview of the logical flow of a match statement: - -1. The subject expression "subject_expr" is evaluated and a resulting - subject value obtained. If the subject expression contains a comma, - a tuple is constructed using the standard rules. - -2. Each pattern in a "case_block" is attempted to match with the - subject value. The specific rules for success or failure are - described below. The match attempt can also bind some or all of the - standalone names within the pattern. The precise pattern binding - rules vary per pattern type and are specified below. **Name - bindings made during a successful pattern match outlive the - executed block and can be used after the match statement**. - - Note: - - During failed pattern matches, some subpatterns may succeed. Do - not rely on bindings being made for a failed match. Conversely, - do not rely on variables remaining unchanged after a failed - match. The exact behavior is dependent on implementation and may - vary. This is an intentional decision made to allow different - implementations to add optimizations. - -3. If the pattern succeeds, the corresponding guard (if present) is - evaluated. In this case all name bindings are guaranteed to have - happened. - - * If the guard evaluates as true or is missing, the "block" inside - "case_block" is executed. - - * Otherwise, the next "case_block" is attempted as described above. - - * If there are no further case blocks, the match statement is - completed. - -Note: - - Users should generally never rely on a pattern being evaluated. - Depending on implementation, the interpreter may cache values or use - other optimizations which skip repeated evaluations. - -A sample match statement: - - >>> flag = False - >>> match (100, 200): - ... case (100, 300): # Mismatch: 200 != 300 - ... print('Case 1') - ... case (100, 200) if flag: # Successful match, but guard fails - ... print('Case 2') - ... case (100, y): # Matches and binds y to 200 - ... print(f'Case 3, y: {y}') - ... case _: # Pattern not attempted - ... print('Case 4, I match anything!') - ... - Case 3, y: 200 - -In this case, "if flag" is a guard. Read more about that in the next -section. - - -Guards ------- - - guard ::= "if" `!named_expression` - -A "guard" (which is part of the "case") must succeed for code inside -the "case" block to execute. It takes the form: "if" followed by an -expression. - -The logical flow of a "case" block with a "guard" follows: - -1. Check that the pattern in the "case" block succeeded. If the - pattern failed, the "guard" is not evaluated and the next "case" - block is checked. - -2. If the pattern succeeded, evaluate the "guard". - - * If the "guard" condition evaluates as true, the case block is - selected. - - * If the "guard" condition evaluates as false, the case block is - not selected. - - * If the "guard" raises an exception during evaluation, the - exception bubbles up. - -Guards are allowed to have side effects as they are expressions. -Guard evaluation must proceed from the first to the last case block, -one at a time, skipping case blocks whose pattern(s) don’t all -succeed. (I.e., guard evaluation must happen in order.) Guard -evaluation must stop once a case block is selected. - - -Irrefutable Case Blocks ------------------------ - -An irrefutable case block is a match-all case block. A match -statement may have at most one irrefutable case block, and it must be -last. - -A case block is considered irrefutable if it has no guard and its -pattern is irrefutable. A pattern is considered irrefutable if we can -prove from its syntax alone that it will always succeed. Only the -following patterns are irrefutable: - -* AS Patterns whose left-hand side is irrefutable - -* OR Patterns containing at least one irrefutable pattern - -* Capture Patterns - -* Wildcard Patterns - -* parenthesized irrefutable patterns - - -Patterns --------- - -Note: - - This section uses grammar notations beyond standard EBNF: - - * the notation "SEP.RULE+" is shorthand for "RULE (SEP RULE)*" - - * the notation "!RULE" is shorthand for a negative lookahead - assertion - -The top-level syntax for "patterns" is: - - patterns ::= open_sequence_pattern | pattern - pattern ::= as_pattern | or_pattern - closed_pattern ::= | literal_pattern - | capture_pattern - | wildcard_pattern - | value_pattern - | group_pattern - | sequence_pattern - | mapping_pattern - | class_pattern - -The descriptions below will include a description “in simple terms” of -what a pattern does for illustration purposes (credits to Raymond -Hettinger for a document that inspired most of the descriptions). Note -that these descriptions are purely for illustration purposes and **may -not** reflect the underlying implementation. Furthermore, they do not -cover all valid forms. - - -OR Patterns -~~~~~~~~~~~ - -An OR pattern is two or more patterns separated by vertical bars "|". -Syntax: - - or_pattern ::= "|".closed_pattern+ - -Only the final subpattern may be irrefutable, and each subpattern must -bind the same set of names to avoid ambiguity. - -An OR pattern matches each of its subpatterns in turn to the subject -value, until one succeeds. The OR pattern is then considered -successful. Otherwise, if none of the subpatterns succeed, the OR -pattern fails. - -In simple terms, "P1 | P2 | ..." will try to match "P1", if it fails -it will try to match "P2", succeeding immediately if any succeeds, -failing otherwise. - - -AS Patterns -~~~~~~~~~~~ - -An AS pattern matches an OR pattern on the left of the "as" keyword -against a subject. Syntax: - - as_pattern ::= or_pattern "as" capture_pattern - -If the OR pattern fails, the AS pattern fails. Otherwise, the AS -pattern binds the subject to the name on the right of the as keyword -and succeeds. "capture_pattern" cannot be a "_". - -In simple terms "P as NAME" will match with "P", and on success it -will set "NAME = ". - - -Literal Patterns -~~~~~~~~~~~~~~~~ - -A literal pattern corresponds to most literals in Python. Syntax: - - literal_pattern ::= signed_number - | signed_number "+" NUMBER - | signed_number "-" NUMBER - | `!strings` - | "None" - | "True" - | "False" - signed_number ::= ["-"] NUMBER - -The rule "strings" and the token "NUMBER" are defined in the standard -Python grammar. Triple-quoted strings are supported. Raw strings and -byte strings are supported. f-strings are not supported. - -The forms "signed_number '+' NUMBER" and "signed_number '-' NUMBER" -are for expressing complex numbers; they require a real number on the -left and an imaginary number on the right. E.g. "3 + 4j". - -In simple terms, "LITERAL" will succeed only if " == -LITERAL". For the singletons "None", "True" and "False", the "is" -operator is used. - - -Capture Patterns -~~~~~~~~~~~~~~~~ - -A capture pattern binds the subject value to a name. Syntax: - - capture_pattern ::= !'_' NAME - -A single underscore "_" is not a capture pattern (this is what "!'_'" -expresses). It is instead treated as a "wildcard_pattern". - -In a given pattern, a given name can only be bound once. E.g. "case -x, x: ..." is invalid while "case [x] | x: ..." is allowed. - -Capture patterns always succeed. The binding follows scoping rules -established by the assignment expression operator in **PEP 572**; the -name becomes a local variable in the closest containing function scope -unless there’s an applicable "global" or "nonlocal" statement. - -In simple terms "NAME" will always succeed and it will set "NAME = -". - - -Wildcard Patterns -~~~~~~~~~~~~~~~~~ - -A wildcard pattern always succeeds (matches anything) and binds no -name. Syntax: - - wildcard_pattern ::= '_' - -"_" is a soft keyword within any pattern, but only within patterns. -It is an identifier, as usual, even within "match" subject -expressions, "guard"s, and "case" blocks. - -In simple terms, "_" will always succeed. - - -Value Patterns -~~~~~~~~~~~~~~ - -A value pattern represents a named value in Python. Syntax: - - value_pattern ::= attr - attr ::= name_or_attr "." NAME - name_or_attr ::= attr | NAME - -The dotted name in the pattern is looked up using standard Python name -resolution rules. The pattern succeeds if the value found compares -equal to the subject value (using the "==" equality operator). - -In simple terms "NAME1.NAME2" will succeed only if " == -NAME1.NAME2" - -Note: - - If the same value occurs multiple times in the same match statement, - the interpreter may cache the first value found and reuse it rather - than repeat the same lookup. This cache is strictly tied to a given - execution of a given match statement. - - -Group Patterns -~~~~~~~~~~~~~~ - -A group pattern allows users to add parentheses around patterns to -emphasize the intended grouping. Otherwise, it has no additional -syntax. Syntax: - - group_pattern ::= "(" pattern ")" - -In simple terms "(P)" has the same effect as "P". - - -Sequence Patterns -~~~~~~~~~~~~~~~~~ - -A sequence pattern contains several subpatterns to be matched against -sequence elements. The syntax is similar to the unpacking of a list or -tuple. - - sequence_pattern ::= "[" [maybe_sequence_pattern] "]" - | "(" [open_sequence_pattern] ")" - open_sequence_pattern ::= maybe_star_pattern "," [maybe_sequence_pattern] - maybe_sequence_pattern ::= ",".maybe_star_pattern+ ","? - maybe_star_pattern ::= star_pattern | pattern - star_pattern ::= "*" (capture_pattern | wildcard_pattern) - -There is no difference if parentheses or square brackets are used for -sequence patterns (i.e. "(...)" vs "[...]" ). - -Note: - - A single pattern enclosed in parentheses without a trailing comma - (e.g. "(3 | 4)") is a group pattern. While a single pattern enclosed - in square brackets (e.g. "[3 | 4]") is still a sequence pattern. - -At most one star subpattern may be in a sequence pattern. The star -subpattern may occur in any position. If no star subpattern is -present, the sequence pattern is a fixed-length sequence pattern; -otherwise it is a variable-length sequence pattern. - -The following is the logical flow for matching a sequence pattern -against a subject value: - -1. If the subject value is not a sequence [2], the sequence pattern - fails. - -2. If the subject value is an instance of "str", "bytes" or - "bytearray" the sequence pattern fails. - -3. The subsequent steps depend on whether the sequence pattern is - fixed or variable-length. - - If the sequence pattern is fixed-length: - - 1. If the length of the subject sequence is not equal to the number - of subpatterns, the sequence pattern fails - - 2. Subpatterns in the sequence pattern are matched to their - corresponding items in the subject sequence from left to right. - Matching stops as soon as a subpattern fails. If all - subpatterns succeed in matching their corresponding item, the - sequence pattern succeeds. - - Otherwise, if the sequence pattern is variable-length: - - 1. If the length of the subject sequence is less than the number of - non-star subpatterns, the sequence pattern fails. - - 2. The leading non-star subpatterns are matched to their - corresponding items as for fixed-length sequences. - - 3. If the previous step succeeds, the star subpattern matches a - list formed of the remaining subject items, excluding the - remaining items corresponding to non-star subpatterns following - the star subpattern. - - 4. Remaining non-star subpatterns are matched to their - corresponding subject items, as for a fixed-length sequence. - - Note: - - The length of the subject sequence is obtained via "len()" (i.e. - via the "__len__()" protocol). This length may be cached by the - interpreter in a similar manner as value patterns. - -In simple terms "[P1, P2, P3," … ", P]" matches only if all the -following happens: - -* check "" is a sequence - -* "len(subject) == " - -* "P1" matches "[0]" (note that this match can also bind - names) - -* "P2" matches "[1]" (note that this match can also bind - names) - -* … and so on for the corresponding pattern/element. - - -Mapping Patterns -~~~~~~~~~~~~~~~~ - -A mapping pattern contains one or more key-value patterns. The syntax -is similar to the construction of a dictionary. Syntax: - - mapping_pattern ::= "{" [items_pattern] "}" - items_pattern ::= ",".key_value_pattern+ ","? - key_value_pattern ::= (literal_pattern | value_pattern) ":" pattern - | double_star_pattern - double_star_pattern ::= "**" capture_pattern - -At most one double star pattern may be in a mapping pattern. The -double star pattern must be the last subpattern in the mapping -pattern. - -Duplicate keys in mapping patterns are disallowed. Duplicate literal -keys will raise a "SyntaxError". Two keys that otherwise have the same -value will raise a "ValueError" at runtime. - -The following is the logical flow for matching a mapping pattern -against a subject value: - -1. If the subject value is not a mapping [3],the mapping pattern - fails. - -2. If every key given in the mapping pattern is present in the subject - mapping, and the pattern for each key matches the corresponding - item of the subject mapping, the mapping pattern succeeds. - -3. If duplicate keys are detected in the mapping pattern, the pattern - is considered invalid. A "SyntaxError" is raised for duplicate - literal values; or a "ValueError" for named keys of the same value. - -Note: - - Key-value pairs are matched using the two-argument form of the - mapping subject’s "get()" method. Matched key-value pairs must - already be present in the mapping, and not created on-the-fly via - "__missing__()" or "__getitem__()". - -In simple terms "{KEY1: P1, KEY2: P2, ... }" matches only if all the -following happens: - -* check "" is a mapping - -* "KEY1 in " - -* "P1" matches "[KEY1]" - -* … and so on for the corresponding KEY/pattern pair. - - -Class Patterns -~~~~~~~~~~~~~~ - -A class pattern represents a class and its positional and keyword -arguments (if any). Syntax: - - class_pattern ::= name_or_attr "(" [pattern_arguments ","?] ")" - pattern_arguments ::= positional_patterns ["," keyword_patterns] - | keyword_patterns - positional_patterns ::= ",".pattern+ - keyword_patterns ::= ",".keyword_pattern+ - keyword_pattern ::= NAME "=" pattern - -The same keyword should not be repeated in class patterns. - -The following is the logical flow for matching a class pattern against -a subject value: - -1. If "name_or_attr" is not an instance of the builtin "type" , raise - "TypeError". - -2. If the subject value is not an instance of "name_or_attr" (tested - via "isinstance()"), the class pattern fails. - -3. If no pattern arguments are present, the pattern succeeds. - Otherwise, the subsequent steps depend on whether keyword or - positional argument patterns are present. - - For a number of built-in types (specified below), a single - positional subpattern is accepted which will match the entire - subject; for these types keyword patterns also work as for other - types. - - If only keyword patterns are present, they are processed as - follows, one by one: - - 1. The keyword is looked up as an attribute on the subject. - - * If this raises an exception other than "AttributeError", the - exception bubbles up. - - * If this raises "AttributeError", the class pattern has failed. - - * Else, the subpattern associated with the keyword pattern is - matched against the subject’s attribute value. If this fails, - the class pattern fails; if this succeeds, the match proceeds - to the next keyword. - - 2. If all keyword patterns succeed, the class pattern succeeds. - - If any positional patterns are present, they are converted to - keyword patterns using the "__match_args__" attribute on the class - "name_or_attr" before matching: - - 1. The equivalent of "getattr(cls, "__match_args__", ())" is - called. - - * If this raises an exception, the exception bubbles up. - - * If the returned value is not a tuple, the conversion fails and - "TypeError" is raised. - - * If there are more positional patterns than - "len(cls.__match_args__)", "TypeError" is raised. - - * Otherwise, positional pattern "i" is converted to a keyword - pattern using "__match_args__[i]" as the keyword. - "__match_args__[i]" must be a string; if not "TypeError" is - raised. - - * If there are duplicate keywords, "TypeError" is raised. - - See also: - - Customizing positional arguments in class pattern matching - - 2. Once all positional patterns have been converted to keyword - patterns, the match proceeds as if there were only keyword - patterns. - - For the following built-in types the handling of positional - subpatterns is different: - - * "bool" - - * "bytearray" - - * "bytes" - - * "dict" - - * "float" - - * "frozenset" - - * "int" - - * "list" - - * "set" - - * "str" - - * "tuple" - - These classes accept a single positional argument, and the pattern - there is matched against the whole object rather than an attribute. - For example "int(0|1)" matches the value "0", but not the value - "0.0". - -In simple terms "CLS(P1, attr=P2)" matches only if the following -happens: - -* "isinstance(, CLS)" - -* convert "P1" to a keyword pattern using "CLS.__match_args__" - -* For each keyword argument "attr=P2": - - * "hasattr(, "attr")" - - * "P2" matches ".attr" - -* … and so on for the corresponding keyword argument/pattern pair. - -See also: - - * **PEP 634** – Structural Pattern Matching: Specification - - * **PEP 636** – Structural Pattern Matching: Tutorial - - -Function definitions -==================== - -A function definition defines a user-defined function object (see -section The standard type hierarchy): - - funcdef ::= [decorators] "def" funcname [type_params] "(" [parameter_list] ")" - ["->" expression] ":" suite - decorators ::= decorator+ - decorator ::= "@" assignment_expression NEWLINE - parameter_list ::= defparameter ("," defparameter)* "," "/" ["," [parameter_list_no_posonly]] - | parameter_list_no_posonly - parameter_list_no_posonly ::= defparameter ("," defparameter)* ["," [parameter_list_starargs]] - | parameter_list_starargs - parameter_list_starargs ::= "*" [star_parameter] ("," defparameter)* ["," [parameter_star_kwargs]] - | "*" ("," defparameter)+ ["," [parameter_star_kwargs]] - | parameter_star_kwargs - parameter_star_kwargs ::= "**" parameter [","] - parameter ::= identifier [":" expression] - star_parameter ::= identifier [":" ["*"] expression] - defparameter ::= parameter ["=" expression] - funcname ::= identifier - -A function definition is an executable statement. Its execution binds -the function name in the current local namespace to a function object -(a wrapper around the executable code for the function). This -function object contains a reference to the current global namespace -as the global namespace to be used when the function is called. - -The function definition does not execute the function body; this gets -executed only when the function is called. [4] - -A function definition may be wrapped by one or more *decorator* -expressions. Decorator expressions are evaluated when the function is -defined, in the scope that contains the function definition. The -result must be a callable, which is invoked with the function object -as the only argument. The returned value is bound to the function name -instead of the function object. Multiple decorators are applied in -nested fashion. For example, the following code - - @f1(arg) - @f2 - def func(): pass - -is roughly equivalent to - - def func(): pass - func = f1(arg)(f2(func)) - -except that the original function is not temporarily bound to the name -"func". - -Changed in version 3.9: Functions may be decorated with any valid -"assignment_expression". Previously, the grammar was much more -restrictive; see **PEP 614** for details. - -A list of type parameters may be given in square brackets between the -function’s name and the opening parenthesis for its parameter list. -This indicates to static type checkers that the function is generic. -At runtime, the type parameters can be retrieved from the function’s -"__type_params__" attribute. See Generic functions for more. - -Changed in version 3.12: Type parameter lists are new in Python 3.12. - -When one or more *parameters* have the form *parameter* "=" -*expression*, the function is said to have “default parameter values.” -For a parameter with a default value, the corresponding *argument* may -be omitted from a call, in which case the parameter’s default value is -substituted. If a parameter has a default value, all following -parameters up until the “"*"” must also have a default value — this is -a syntactic restriction that is not expressed by the grammar. - -**Default parameter values are evaluated from left to right when the -function definition is executed.** This means that the expression is -evaluated once, when the function is defined, and that the same “pre- -computed” value is used for each call. This is especially important -to understand when a default parameter value is a mutable object, such -as a list or a dictionary: if the function modifies the object (e.g. -by appending an item to a list), the default parameter value is in -effect modified. This is generally not what was intended. A way -around this is to use "None" as the default, and explicitly test for -it in the body of the function, e.g.: - - def whats_on_the_telly(penguin=None): - if penguin is None: - penguin = [] - penguin.append("property of the zoo") - return penguin - -Function call semantics are described in more detail in section Calls. -A function call always assigns values to all parameters mentioned in -the parameter list, either from positional arguments, from keyword -arguments, or from default values. If the form “"*identifier"” is -present, it is initialized to a tuple receiving any excess positional -parameters, defaulting to the empty tuple. If the form -“"**identifier"” is present, it is initialized to a new ordered -mapping receiving any excess keyword arguments, defaulting to a new -empty mapping of the same type. Parameters after “"*"” or -“"*identifier"” are keyword-only parameters and may only be passed by -keyword arguments. Parameters before “"/"” are positional-only -parameters and may only be passed by positional arguments. - -Changed in version 3.8: The "/" function parameter syntax may be used -to indicate positional-only parameters. See **PEP 570** for details. - -Parameters may have an *annotation* of the form “": expression"” -following the parameter name. Any parameter may have an annotation, -even those of the form "*identifier" or "**identifier". (As a special -case, parameters of the form "*identifier" may have an annotation “": -*expression"”.) Functions may have “return” annotation of the form -“"-> expression"” after the parameter list. These annotations can be -any valid Python expression. The presence of annotations does not -change the semantics of a function. The annotation values are -available as values of a dictionary keyed by the parameters’ names in -the "__annotations__" attribute of the function object. If the -"annotations" import from "__future__" is used, annotations are -preserved as strings at runtime which enables postponed evaluation. -Otherwise, they are evaluated when the function definition is -executed. In this case annotations may be evaluated in a different -order than they appear in the source code. - -Changed in version 3.11: Parameters of the form “"*identifier"” may -have an annotation “": *expression"”. See **PEP 646**. - -It is also possible to create anonymous functions (functions not bound -to a name), for immediate use in expressions. This uses lambda -expressions, described in section Lambdas. Note that the lambda -expression is merely a shorthand for a simplified function definition; -a function defined in a “"def"” statement can be passed around or -assigned to another name just like a function defined by a lambda -expression. The “"def"” form is actually more powerful since it -allows the execution of multiple statements and annotations. - -**Programmer’s note:** Functions are first-class objects. A “"def"” -statement executed inside a function definition defines a local -function that can be returned or passed around. Free variables used -in the nested function can access the local variables of the function -containing the def. See section Naming and binding for details. - -See also: - - **PEP 3107** - Function Annotations - The original specification for function annotations. - - **PEP 484** - Type Hints - Definition of a standard meaning for annotations: type hints. - - **PEP 526** - Syntax for Variable Annotations - Ability to type hint variable declarations, including class - variables and instance variables. - - **PEP 563** - Postponed Evaluation of Annotations - Support for forward references within annotations by preserving - annotations in a string form at runtime instead of eager - evaluation. - - **PEP 318** - Decorators for Functions and Methods - Function and method decorators were introduced. Class decorators - were introduced in **PEP 3129**. - - -Class definitions -================= - -A class definition defines a class object (see section The standard -type hierarchy): - - classdef ::= [decorators] "class" classname [type_params] [inheritance] ":" suite - inheritance ::= "(" [argument_list] ")" - classname ::= identifier - -A class definition is an executable statement. The inheritance list -usually gives a list of base classes (see Metaclasses for more -advanced uses), so each item in the list should evaluate to a class -object which allows subclassing. Classes without an inheritance list -inherit, by default, from the base class "object"; hence, - - class Foo: - pass - -is equivalent to - - class Foo(object): - pass - -The class’s suite is then executed in a new execution frame (see -Naming and binding), using a newly created local namespace and the -original global namespace. (Usually, the suite contains mostly -function definitions.) When the class’s suite finishes execution, its -execution frame is discarded but its local namespace is saved. [5] A -class object is then created using the inheritance list for the base -classes and the saved local namespace for the attribute dictionary. -The class name is bound to this class object in the original local -namespace. - -The order in which attributes are defined in the class body is -preserved in the new class’s "__dict__". Note that this is reliable -only right after the class is created and only for classes that were -defined using the definition syntax. - -Class creation can be customized heavily using metaclasses. - -Classes can also be decorated: just like when decorating functions, - - @f1(arg) - @f2 - class Foo: pass - -is roughly equivalent to - - class Foo: pass - Foo = f1(arg)(f2(Foo)) - -The evaluation rules for the decorator expressions are the same as for -function decorators. The result is then bound to the class name. - -Changed in version 3.9: Classes may be decorated with any valid -"assignment_expression". Previously, the grammar was much more -restrictive; see **PEP 614** for details. - -A list of type parameters may be given in square brackets immediately -after the class’s name. This indicates to static type checkers that -the class is generic. At runtime, the type parameters can be retrieved -from the class’s "__type_params__" attribute. See Generic classes for -more. - -Changed in version 3.12: Type parameter lists are new in Python 3.12. - -**Programmer’s note:** Variables defined in the class definition are -class attributes; they are shared by instances. Instance attributes -can be set in a method with "self.name = value". Both class and -instance attributes are accessible through the notation “"self.name"”, -and an instance attribute hides a class attribute with the same name -when accessed in this way. Class attributes can be used as defaults -for instance attributes, but using mutable values there can lead to -unexpected results. Descriptors can be used to create instance -variables with different implementation details. - -See also: - - **PEP 3115** - Metaclasses in Python 3000 - The proposal that changed the declaration of metaclasses to the - current syntax, and the semantics for how classes with - metaclasses are constructed. - - **PEP 3129** - Class Decorators - The proposal that added class decorators. Function and method - decorators were introduced in **PEP 318**. - - -Coroutines -========== - -Added in version 3.5. - - -Coroutine function definition ------------------------------ - - async_funcdef ::= [decorators] "async" "def" funcname "(" [parameter_list] ")" - ["->" expression] ":" suite - -Execution of Python coroutines can be suspended and resumed at many -points (see *coroutine*). "await" expressions, "async for" and "async -with" can only be used in the body of a coroutine function. - -Functions defined with "async def" syntax are always coroutine -functions, even if they do not contain "await" or "async" keywords. - -It is a "SyntaxError" to use a "yield from" expression inside the body -of a coroutine function. - -An example of a coroutine function: - - async def func(param1, param2): - do_stuff() - await some_coroutine() - -Changed in version 3.7: "await" and "async" are now keywords; -previously they were only treated as such inside the body of a -coroutine function. - - -The "async for" statement -------------------------- - - async_for_stmt ::= "async" for_stmt - -An *asynchronous iterable* provides an "__aiter__" method that -directly returns an *asynchronous iterator*, which can call -asynchronous code in its "__anext__" method. - -The "async for" statement allows convenient iteration over -asynchronous iterables. - -The following code: - - async for TARGET in ITER: - SUITE - else: - SUITE2 - -Is semantically equivalent to: - - iter = (ITER).__aiter__() - running = True - - while running: - try: - TARGET = await iter.__anext__() - except StopAsyncIteration: - running = False - else: - SUITE - else: - SUITE2 - -except that implicit special method lookup is used for "__aiter__()" -and "__anext__()". - -It is a "SyntaxError" to use an "async for" statement outside the body -of a coroutine function. - - -The "async with" statement --------------------------- - - async_with_stmt ::= "async" with_stmt - -An *asynchronous context manager* is a *context manager* that is able -to suspend execution in its *enter* and *exit* methods. - -The following code: - - async with EXPRESSION as TARGET: - SUITE - -is semantically equivalent to: - - manager = (EXPRESSION) - aenter = manager.__aenter__ - aexit = manager.__aexit__ - value = await aenter() - hit_except = False - - try: - TARGET = value - SUITE - except: - hit_except = True - if not await aexit(*sys.exc_info()): - raise - finally: - if not hit_except: - await aexit(None, None, None) - -except that implicit special method lookup is used for "__aenter__()" -and "__aexit__()". - -It is a "SyntaxError" to use an "async with" statement outside the -body of a coroutine function. - -See also: - - **PEP 492** - Coroutines with async and await syntax - The proposal that made coroutines a proper standalone concept in - Python, and added supporting syntax. - - -Type parameter lists -==================== - -Added in version 3.12. - -Changed in version 3.13: Support for default values was added (see -**PEP 696**). - - type_params ::= "[" type_param ("," type_param)* "]" - type_param ::= typevar | typevartuple | paramspec - typevar ::= identifier (":" expression)? ("=" expression)? - typevartuple ::= "*" identifier ("=" expression)? - paramspec ::= "**" identifier ("=" expression)? - -Functions (including coroutines), classes and type aliases may contain -a type parameter list: - - def max[T](args: list[T]) -> T: - ... - - async def amax[T](args: list[T]) -> T: - ... - - class Bag[T]: - def __iter__(self) -> Iterator[T]: - ... - - def add(self, arg: T) -> None: - ... - - type ListOrSet[T] = list[T] | set[T] - -Semantically, this indicates that the function, class, or type alias -is generic over a type variable. This information is primarily used by -static type checkers, and at runtime, generic objects behave much like -their non-generic counterparts. - -Type parameters are declared in square brackets ("[]") immediately -after the name of the function, class, or type alias. The type -parameters are accessible within the scope of the generic object, but -not elsewhere. Thus, after a declaration "def func[T](): pass", the -name "T" is not available in the module scope. Below, the semantics of -generic objects are described with more precision. The scope of type -parameters is modeled with a special function (technically, an -annotation scope) that wraps the creation of the generic object. - -Generic functions, classes, and type aliases have a "__type_params__" -attribute listing their type parameters. - -Type parameters come in three kinds: - -* "typing.TypeVar", introduced by a plain name (e.g., "T"). - Semantically, this represents a single type to a type checker. - -* "typing.TypeVarTuple", introduced by a name prefixed with a single - asterisk (e.g., "*Ts"). Semantically, this stands for a tuple of any - number of types. - -* "typing.ParamSpec", introduced by a name prefixed with two asterisks - (e.g., "**P"). Semantically, this stands for the parameters of a - callable. - -"typing.TypeVar" declarations can define *bounds* and *constraints* -with a colon (":") followed by an expression. A single expression -after the colon indicates a bound (e.g. "T: int"). Semantically, this -means that the "typing.TypeVar" can only represent types that are a -subtype of this bound. A parenthesized tuple of expressions after the -colon indicates a set of constraints (e.g. "T: (str, bytes)"). Each -member of the tuple should be a type (again, this is not enforced at -runtime). Constrained type variables can only take on one of the types -in the list of constraints. - -For "typing.TypeVar"s declared using the type parameter list syntax, -the bound and constraints are not evaluated when the generic object is -created, but only when the value is explicitly accessed through the -attributes "__bound__" and "__constraints__". To accomplish this, the -bounds or constraints are evaluated in a separate annotation scope. - -"typing.TypeVarTuple"s and "typing.ParamSpec"s cannot have bounds or -constraints. - -All three flavors of type parameters can also have a *default value*, -which is used when the type parameter is not explicitly provided. This -is added by appending a single equals sign ("=") followed by an -expression. Like the bounds and constraints of type variables, the -default value is not evaluated when the object is created, but only -when the type parameter’s "__default__" attribute is accessed. To this -end, the default value is evaluated in a separate annotation scope. If -no default value is specified for a type parameter, the "__default__" -attribute is set to the special sentinel object "typing.NoDefault". - -The following example indicates the full set of allowed type parameter -declarations: - - def overly_generic[ - SimpleTypeVar, - TypeVarWithDefault = int, - TypeVarWithBound: int, - TypeVarWithConstraints: (str, bytes), - *SimpleTypeVarTuple = (int, float), - **SimpleParamSpec = (str, bytearray), - ]( - a: SimpleTypeVar, - b: TypeVarWithDefault, - c: TypeVarWithBound, - d: Callable[SimpleParamSpec, TypeVarWithConstraints], - *e: SimpleTypeVarTuple, - ): ... - - -Generic functions ------------------ - -Generic functions are declared as follows: - - def func[T](arg: T): ... - -This syntax is equivalent to: - - annotation-def TYPE_PARAMS_OF_func(): - T = typing.TypeVar("T") - def func(arg: T): ... - func.__type_params__ = (T,) - return func - func = TYPE_PARAMS_OF_func() - -Here "annotation-def" indicates an annotation scope, which is not -actually bound to any name at runtime. (One other liberty is taken in -the translation: the syntax does not go through attribute access on -the "typing" module, but creates an instance of "typing.TypeVar" -directly.) - -The annotations of generic functions are evaluated within the -annotation scope used for declaring the type parameters, but the -function’s defaults and decorators are not. - -The following example illustrates the scoping rules for these cases, -as well as for additional flavors of type parameters: - - @decorator - def func[T: int, *Ts, **P](*args: *Ts, arg: Callable[P, T] = some_default): - ... - -Except for the lazy evaluation of the "TypeVar" bound, this is -equivalent to: - - DEFAULT_OF_arg = some_default - - annotation-def TYPE_PARAMS_OF_func(): - - annotation-def BOUND_OF_T(): - return int - # In reality, BOUND_OF_T() is evaluated only on demand. - T = typing.TypeVar("T", bound=BOUND_OF_T()) - - Ts = typing.TypeVarTuple("Ts") - P = typing.ParamSpec("P") - - def func(*args: *Ts, arg: Callable[P, T] = DEFAULT_OF_arg): - ... - - func.__type_params__ = (T, Ts, P) - return func - func = decorator(TYPE_PARAMS_OF_func()) - -The capitalized names like "DEFAULT_OF_arg" are not actually bound at -runtime. - - -Generic classes ---------------- - -Generic classes are declared as follows: - - class Bag[T]: ... - -This syntax is equivalent to: - - annotation-def TYPE_PARAMS_OF_Bag(): - T = typing.TypeVar("T") - class Bag(typing.Generic[T]): - __type_params__ = (T,) - ... - return Bag - Bag = TYPE_PARAMS_OF_Bag() - -Here again "annotation-def" (not a real keyword) indicates an -annotation scope, and the name "TYPE_PARAMS_OF_Bag" is not actually -bound at runtime. - -Generic classes implicitly inherit from "typing.Generic". The base -classes and keyword arguments of generic classes are evaluated within -the type scope for the type parameters, and decorators are evaluated -outside that scope. This is illustrated by this example: - - @decorator - class Bag(Base[T], arg=T): ... - -This is equivalent to: - - annotation-def TYPE_PARAMS_OF_Bag(): - T = typing.TypeVar("T") - class Bag(Base[T], typing.Generic[T], arg=T): - __type_params__ = (T,) - ... - return Bag - Bag = decorator(TYPE_PARAMS_OF_Bag()) - - -Generic type aliases --------------------- - -The "type" statement can also be used to create a generic type alias: - - type ListOrSet[T] = list[T] | set[T] - -Except for the lazy evaluation of the value, this is equivalent to: - - annotation-def TYPE_PARAMS_OF_ListOrSet(): - T = typing.TypeVar("T") - - annotation-def VALUE_OF_ListOrSet(): - return list[T] | set[T] - # In reality, the value is lazily evaluated - return typing.TypeAliasType("ListOrSet", VALUE_OF_ListOrSet(), type_params=(T,)) - ListOrSet = TYPE_PARAMS_OF_ListOrSet() - -Here, "annotation-def" (not a real keyword) indicates an annotation -scope. The capitalized names like "TYPE_PARAMS_OF_ListOrSet" are not -actually bound at runtime. - --[ Footnotes ]- - -[1] The exception is propagated to the invocation stack unless there - is a "finally" clause which happens to raise another exception. - That new exception causes the old one to be lost. - -[2] In pattern matching, a sequence is defined as one of the - following: - - * a class that inherits from "collections.abc.Sequence" - - * a Python class that has been registered as - "collections.abc.Sequence" - - * a builtin class that has its (CPython) "Py_TPFLAGS_SEQUENCE" bit - set - - * a class that inherits from any of the above - - The following standard library classes are sequences: - - * "array.array" - - * "collections.deque" - - * "list" - - * "memoryview" - - * "range" - - * "tuple" - - Note: - - Subject values of type "str", "bytes", and "bytearray" do not - match sequence patterns. - -[3] In pattern matching, a mapping is defined as one of the following: - - * a class that inherits from "collections.abc.Mapping" - - * a Python class that has been registered as - "collections.abc.Mapping" - - * a builtin class that has its (CPython) "Py_TPFLAGS_MAPPING" bit - set - - * a class that inherits from any of the above - - The standard library classes "dict" and "types.MappingProxyType" - are mappings. - -[4] A string literal appearing as the first statement in the function - body is transformed into the function’s "__doc__" attribute and - therefore the function’s *docstring*. - -[5] A string literal appearing as the first statement in the class - body is transformed into the namespace’s "__doc__" item and - therefore the class’s *docstring*. -''', - 'context-managers': r'''With Statement Context Managers -******************************* - -A *context manager* is an object that defines the runtime context to -be established when executing a "with" statement. The context manager -handles the entry into, and the exit from, the desired runtime context -for the execution of the block of code. Context managers are normally -invoked using the "with" statement (described in section The with -statement), but can also be used by directly invoking their methods. - -Typical uses of context managers include saving and restoring various -kinds of global state, locking and unlocking resources, closing opened -files, etc. - -For more information on context managers, see Context Manager Types. -The "object" class itself does not provide the context manager -methods. - -object.__enter__(self) - - Enter the runtime context related to this object. The "with" - statement will bind this method’s return value to the target(s) - specified in the "as" clause of the statement, if any. - -object.__exit__(self, exc_type, exc_value, traceback) - - Exit the runtime context related to this object. The parameters - describe the exception that caused the context to be exited. If the - context was exited without an exception, all three arguments will - be "None". - - If an exception is supplied, and the method wishes to suppress the - exception (i.e., prevent it from being propagated), it should - return a true value. Otherwise, the exception will be processed - normally upon exit from this method. - - Note that "__exit__()" methods should not reraise the passed-in - exception; this is the caller’s responsibility. - -See also: - - **PEP 343** - The “with” statement - The specification, background, and examples for the Python "with" - statement. -''', - 'continue': r'''The "continue" statement -************************ - - continue_stmt ::= "continue" - -"continue" may only occur syntactically nested in a "for" or "while" -loop, but not nested in a function or class definition within that -loop. It continues with the next cycle of the nearest enclosing loop. - -When "continue" passes control out of a "try" statement with a -"finally" clause, that "finally" clause is executed before really -starting the next loop cycle. -''', - 'conversions': r'''Arithmetic conversions -********************** - -When a description of an arithmetic operator below uses the phrase -“the numeric arguments are converted to a common type”, this means -that the operator implementation for built-in types works as follows: - -* If either argument is a complex number, the other is converted to - complex; - -* otherwise, if either argument is a floating-point number, the other - is converted to floating point; - -* otherwise, both must be integers and no conversion is necessary. - -Some additional rules apply for certain operators (e.g., a string as a -left argument to the ‘%’ operator). Extensions must define their own -conversion behavior. -''', - 'customization': r'''Basic customization -******************* - -object.__new__(cls[, ...]) - - Called to create a new instance of class *cls*. "__new__()" is a - static method (special-cased so you need not declare it as such) - that takes the class of which an instance was requested as its - first argument. The remaining arguments are those passed to the - object constructor expression (the call to the class). The return - value of "__new__()" should be the new object instance (usually an - instance of *cls*). - - Typical implementations create a new instance of the class by - invoking the superclass’s "__new__()" method using - "super().__new__(cls[, ...])" with appropriate arguments and then - modifying the newly created instance as necessary before returning - it. - - If "__new__()" is invoked during object construction and it returns - an instance of *cls*, then the new instance’s "__init__()" method - will be invoked like "__init__(self[, ...])", where *self* is the - new instance and the remaining arguments are the same as were - passed to the object constructor. - - If "__new__()" does not return an instance of *cls*, then the new - instance’s "__init__()" method will not be invoked. - - "__new__()" is intended mainly to allow subclasses of immutable - types (like int, str, or tuple) to customize instance creation. It - is also commonly overridden in custom metaclasses in order to - customize class creation. - -object.__init__(self[, ...]) - - Called after the instance has been created (by "__new__()"), but - before it is returned to the caller. The arguments are those - passed to the class constructor expression. If a base class has an - "__init__()" method, the derived class’s "__init__()" method, if - any, must explicitly call it to ensure proper initialization of the - base class part of the instance; for example: - "super().__init__([args...])". - - Because "__new__()" and "__init__()" work together in constructing - objects ("__new__()" to create it, and "__init__()" to customize - it), no non-"None" value may be returned by "__init__()"; doing so - will cause a "TypeError" to be raised at runtime. - -object.__del__(self) - - Called when the instance is about to be destroyed. This is also - called a finalizer or (improperly) a destructor. If a base class - has a "__del__()" method, the derived class’s "__del__()" method, - if any, must explicitly call it to ensure proper deletion of the - base class part of the instance. - - It is possible (though not recommended!) for the "__del__()" method - to postpone destruction of the instance by creating a new reference - to it. This is called object *resurrection*. It is - implementation-dependent whether "__del__()" is called a second - time when a resurrected object is about to be destroyed; the - current *CPython* implementation only calls it once. - - It is not guaranteed that "__del__()" methods are called for - objects that still exist when the interpreter exits. - "weakref.finalize" provides a straightforward way to register a - cleanup function to be called when an object is garbage collected. - - Note: - - "del x" doesn’t directly call "x.__del__()" — the former - decrements the reference count for "x" by one, and the latter is - only called when "x"’s reference count reaches zero. - - **CPython implementation detail:** It is possible for a reference - cycle to prevent the reference count of an object from going to - zero. In this case, the cycle will be later detected and deleted - by the *cyclic garbage collector*. A common cause of reference - cycles is when an exception has been caught in a local variable. - The frame’s locals then reference the exception, which references - its own traceback, which references the locals of all frames caught - in the traceback. - - See also: Documentation for the "gc" module. - - Warning: - - Due to the precarious circumstances under which "__del__()" - methods are invoked, exceptions that occur during their execution - are ignored, and a warning is printed to "sys.stderr" instead. - In particular: - - * "__del__()" can be invoked when arbitrary code is being - executed, including from any arbitrary thread. If "__del__()" - needs to take a lock or invoke any other blocking resource, it - may deadlock as the resource may already be taken by the code - that gets interrupted to execute "__del__()". - - * "__del__()" can be executed during interpreter shutdown. As a - consequence, the global variables it needs to access (including - other modules) may already have been deleted or set to "None". - Python guarantees that globals whose name begins with a single - underscore are deleted from their module before other globals - are deleted; if no other references to such globals exist, this - may help in assuring that imported modules are still available - at the time when the "__del__()" method is called. - -object.__repr__(self) - - Called by the "repr()" built-in function to compute the “official” - string representation of an object. If at all possible, this - should look like a valid Python expression that could be used to - recreate an object with the same value (given an appropriate - environment). If this is not possible, a string of the form - "<...some useful description...>" should be returned. The return - value must be a string object. If a class defines "__repr__()" but - not "__str__()", then "__repr__()" is also used when an “informal” - string representation of instances of that class is required. - - This is typically used for debugging, so it is important that the - representation is information-rich and unambiguous. A default - implementation is provided by the "object" class itself. - -object.__str__(self) - - Called by "str(object)", the default "__format__()" implementation, - and the built-in function "print()", to compute the “informal” or - nicely printable string representation of an object. The return - value must be a str object. - - This method differs from "object.__repr__()" in that there is no - expectation that "__str__()" return a valid Python expression: a - more convenient or concise representation can be used. - - The default implementation defined by the built-in type "object" - calls "object.__repr__()". - -object.__bytes__(self) - - Called by bytes to compute a byte-string representation of an - object. This should return a "bytes" object. The "object" class - itself does not provide this method. - -object.__format__(self, format_spec) - - Called by the "format()" built-in function, and by extension, - evaluation of formatted string literals and the "str.format()" - method, to produce a “formatted” string representation of an - object. The *format_spec* argument is a string that contains a - description of the formatting options desired. The interpretation - of the *format_spec* argument is up to the type implementing - "__format__()", however most classes will either delegate - formatting to one of the built-in types, or use a similar - formatting option syntax. - - See Format specification mini-language for a description of the - standard formatting syntax. - - The return value must be a string object. - - The default implementation by the "object" class should be given an - empty *format_spec* string. It delegates to "__str__()". - - Changed in version 3.4: The __format__ method of "object" itself - raises a "TypeError" if passed any non-empty string. - - Changed in version 3.7: "object.__format__(x, '')" is now - equivalent to "str(x)" rather than "format(str(x), '')". - -object.__lt__(self, other) -object.__le__(self, other) -object.__eq__(self, other) -object.__ne__(self, other) -object.__gt__(self, other) -object.__ge__(self, other) - - These are the so-called “rich comparison” methods. The - correspondence between operator symbols and method names is as - follows: "xy" calls - "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)". - - A rich comparison method may return the singleton "NotImplemented" - if it does not implement the operation for a given pair of - arguments. By convention, "False" and "True" are returned for a - successful comparison. However, these methods can return any value, - so if the comparison operator is used in a Boolean context (e.g., - in the condition of an "if" statement), Python will call "bool()" - on the value to determine if the result is true or false. - - By default, "object" implements "__eq__()" by using "is", returning - "NotImplemented" in the case of a false comparison: "True if x is y - else NotImplemented". For "__ne__()", by default it delegates to - "__eq__()" and inverts the result unless it is "NotImplemented". - There are no other implied relationships among the comparison - operators or default implementations; for example, the truth of - "(x.__hash__". - - If a class that does not override "__eq__()" wishes to suppress - hash support, it should include "__hash__ = None" in the class - definition. A class which defines its own "__hash__()" that - explicitly raises a "TypeError" would be incorrectly identified as - hashable by an "isinstance(obj, collections.abc.Hashable)" call. - - Note: - - By default, the "__hash__()" values of str and bytes objects are - “salted” with an unpredictable random value. Although they - remain constant within an individual Python process, they are not - predictable between repeated invocations of Python.This is - intended to provide protection against a denial-of-service caused - by carefully chosen inputs that exploit the worst case - performance of a dict insertion, *O*(*n*^2) complexity. See - https://ocert.org/advisories/ocert-2011-003.html for - details.Changing hash values affects the iteration order of sets. - Python has never made guarantees about this ordering (and it - typically varies between 32-bit and 64-bit builds).See also - "PYTHONHASHSEED". - - Changed in version 3.3: Hash randomization is enabled by default. - -object.__bool__(self) - - Called to implement truth value testing and the built-in operation - "bool()"; should return "False" or "True". When this method is not - defined, "__len__()" is called, if it is defined, and the object is - considered true if its result is nonzero. If a class defines - neither "__len__()" nor "__bool__()" (which is true of the "object" - class itself), all its instances are considered true. -''', - 'debugger': r'''"pdb" — The Python Debugger -*************************** - -**Source code:** Lib/pdb.py - -====================================================================== - -The module "pdb" defines an interactive source code debugger for -Python programs. It supports setting (conditional) breakpoints and -single stepping at the source line level, inspection of stack frames, -source code listing, and evaluation of arbitrary Python code in the -context of any stack frame. It also supports post-mortem debugging -and can be called under program control. - -The debugger is extensible – it is actually defined as the class -"Pdb". This is currently undocumented but easily understood by reading -the source. The extension interface uses the modules "bdb" and "cmd". - -See also: - - Module "faulthandler" - Used to dump Python tracebacks explicitly, on a fault, after a - timeout, or on a user signal. - - Module "traceback" - Standard interface to extract, format and print stack traces of - Python programs. - -The typical usage to break into the debugger is to insert: - - import pdb; pdb.set_trace() - -Or: - - breakpoint() - -at the location you want to break into the debugger, and then run the -program. You can then step through the code following this statement, -and continue running without the debugger using the "continue" -command. - -Changed in version 3.7: The built-in "breakpoint()", when called with -defaults, can be used instead of "import pdb; pdb.set_trace()". - - def double(x): - breakpoint() - return x * 2 - val = 3 - print(f"{val} * 2 is {double(val)}") - -The debugger’s prompt is "(Pdb)", which is the indicator that you are -in debug mode: - - > ...(2)double() - -> breakpoint() - (Pdb) p x - 3 - (Pdb) continue - 3 * 2 is 6 - -Changed in version 3.3: Tab-completion via the "readline" module is -available for commands and command arguments, e.g. the current global -and local names are offered as arguments of the "p" command. - - -Command-line interface -====================== - -You can also invoke "pdb" from the command line to debug other -scripts. For example: - - python -m pdb [-c command] (-m module | pyfile) [args ...] - -When invoked as a module, pdb will automatically enter post-mortem -debugging if the program being debugged exits abnormally. After post- -mortem debugging (or after normal exit of the program), pdb will -restart the program. Automatic restarting preserves pdb’s state (such -as breakpoints) and in most cases is more useful than quitting the -debugger upon program’s exit. - --c, --command - - To execute commands as if given in a ".pdbrc" file; see Debugger - commands. - - Changed in version 3.2: Added the "-c" option. - --m - - To execute modules similar to the way "python -m" does. As with a - script, the debugger will pause execution just before the first - line of the module. - - Changed in version 3.7: Added the "-m" option. - -Typical usage to execute a statement under control of the debugger is: - - >>> import pdb - >>> def f(x): - ... print(1 / x) - >>> pdb.run("f(2)") - > (1)() - (Pdb) continue - 0.5 - >>> - -The typical usage to inspect a crashed program is: - - >>> import pdb - >>> def f(x): - ... print(1 / x) - ... - >>> f(0) - Traceback (most recent call last): - File "", line 1, in - File "", line 2, in f - ZeroDivisionError: division by zero - >>> pdb.pm() - > (2)f() - (Pdb) p x - 0 - (Pdb) - -Changed in version 3.13: The implementation of **PEP 667** means that -name assignments made via "pdb" will immediately affect the active -scope, even when running inside an *optimized scope*. - -The module defines the following functions; each enters the debugger -in a slightly different way: - -pdb.run(statement, globals=None, locals=None) - - Execute the *statement* (given as a string or a code object) under - debugger control. The debugger prompt appears before any code is - executed; you can set breakpoints and type "continue", or you can - step through the statement using "step" or "next" (all these - commands are explained below). The optional *globals* and *locals* - arguments specify the environment in which the code is executed; by - default the dictionary of the module "__main__" is used. (See the - explanation of the built-in "exec()" or "eval()" functions.) - -pdb.runeval(expression, globals=None, locals=None) - - Evaluate the *expression* (given as a string or a code object) - under debugger control. When "runeval()" returns, it returns the - value of the *expression*. Otherwise this function is similar to - "run()". - -pdb.runcall(function, *args, **kwds) - - Call the *function* (a function or method object, not a string) - with the given arguments. When "runcall()" returns, it returns - whatever the function call returned. The debugger prompt appears - as soon as the function is entered. - -pdb.set_trace(*, header=None) - - Enter the debugger at the calling stack frame. This is useful to - hard-code a breakpoint at a given point in a program, even if the - code is not otherwise being debugged (e.g. when an assertion - fails). If given, *header* is printed to the console just before - debugging begins. - - Changed in version 3.7: The keyword-only argument *header*. - - Changed in version 3.13: "set_trace()" will enter the debugger - immediately, rather than on the next line of code to be executed. - -pdb.post_mortem(t=None) - - Enter post-mortem debugging of the given exception or traceback - object. If no value is given, it uses the exception that is - currently being handled, or raises "ValueError" if there isn’t one. - - Changed in version 3.13: Support for exception objects was added. - -pdb.pm() - - Enter post-mortem debugging of the exception found in - "sys.last_exc". - -The "run*" functions and "set_trace()" are aliases for instantiating -the "Pdb" class and calling the method of the same name. If you want -to access further features, you have to do this yourself: - -class pdb.Pdb(completekey='tab', stdin=None, stdout=None, skip=None, nosigint=False, readrc=True) - - "Pdb" is the debugger class. - - The *completekey*, *stdin* and *stdout* arguments are passed to the - underlying "cmd.Cmd" class; see the description there. - - The *skip* argument, if given, must be an iterable of glob-style - module name patterns. The debugger will not step into frames that - originate in a module that matches one of these patterns. [1] - - By default, Pdb sets a handler for the SIGINT signal (which is sent - when the user presses "Ctrl"-"C" on the console) when you give a - "continue" command. This allows you to break into the debugger - again by pressing "Ctrl"-"C". If you want Pdb not to touch the - SIGINT handler, set *nosigint* to true. - - The *readrc* argument defaults to true and controls whether Pdb - will load .pdbrc files from the filesystem. - - Example call to enable tracing with *skip*: - - import pdb; pdb.Pdb(skip=['django.*']).set_trace() - - Raises an auditing event "pdb.Pdb" with no arguments. - - Changed in version 3.1: Added the *skip* parameter. - - Changed in version 3.2: Added the *nosigint* parameter. Previously, - a SIGINT handler was never set by Pdb. - - Changed in version 3.6: The *readrc* argument. - - run(statement, globals=None, locals=None) - runeval(expression, globals=None, locals=None) - runcall(function, *args, **kwds) - set_trace() - - See the documentation for the functions explained above. - - -Debugger commands -================= - -The commands recognized by the debugger are listed below. Most -commands can be abbreviated to one or two letters as indicated; e.g. -"h(elp)" means that either "h" or "help" can be used to enter the help -command (but not "he" or "hel", nor "H" or "Help" or "HELP"). -Arguments to commands must be separated by whitespace (spaces or -tabs). Optional arguments are enclosed in square brackets ("[]") in -the command syntax; the square brackets must not be typed. -Alternatives in the command syntax are separated by a vertical bar -("|"). - -Entering a blank line repeats the last command entered. Exception: if -the last command was a "list" command, the next 11 lines are listed. - -Commands that the debugger doesn’t recognize are assumed to be Python -statements and are executed in the context of the program being -debugged. Python statements can also be prefixed with an exclamation -point ("!"). This is a powerful way to inspect the program being -debugged; it is even possible to change a variable or call a function. -When an exception occurs in such a statement, the exception name is -printed but the debugger’s state is not changed. - -Changed in version 3.13: Expressions/Statements whose prefix is a pdb -command are now correctly identified and executed. - -The debugger supports aliases. Aliases can have parameters which -allows one a certain level of adaptability to the context under -examination. - -Multiple commands may be entered on a single line, separated by ";;". -(A single ";" is not used as it is the separator for multiple commands -in a line that is passed to the Python parser.) No intelligence is -applied to separating the commands; the input is split at the first -";;" pair, even if it is in the middle of a quoted string. A -workaround for strings with double semicolons is to use implicit -string concatenation "';'';'" or "";"";"". - -To set a temporary global variable, use a *convenience variable*. A -*convenience variable* is a variable whose name starts with "$". For -example, "$foo = 1" sets a global variable "$foo" which you can use in -the debugger session. The *convenience variables* are cleared when -the program resumes execution so it’s less likely to interfere with -your program compared to using normal variables like "foo = 1". - -There are three preset *convenience variables*: - -* "$_frame": the current frame you are debugging - -* "$_retval": the return value if the frame is returning - -* "$_exception": the exception if the frame is raising an exception - -Added in version 3.12: Added the *convenience variable* feature. - -If a file ".pdbrc" exists in the user’s home directory or in the -current directory, it is read with "'utf-8'" encoding and executed as -if it had been typed at the debugger prompt, with the exception that -empty lines and lines starting with "#" are ignored. This is -particularly useful for aliases. If both files exist, the one in the -home directory is read first and aliases defined there can be -overridden by the local file. - -Changed in version 3.2: ".pdbrc" can now contain commands that -continue debugging, such as "continue" or "next". Previously, these -commands had no effect. - -Changed in version 3.11: ".pdbrc" is now read with "'utf-8'" encoding. -Previously, it was read with the system locale encoding. - -h(elp) [command] - - Without argument, print the list of available commands. With a - *command* as argument, print help about that command. "help pdb" - displays the full documentation (the docstring of the "pdb" - module). Since the *command* argument must be an identifier, "help - exec" must be entered to get help on the "!" command. - -w(here) - - Print a stack trace, with the most recent frame at the bottom. An - arrow (">") indicates the current frame, which determines the - context of most commands. - -d(own) [count] - - Move the current frame *count* (default one) levels down in the - stack trace (to a newer frame). - -u(p) [count] - - Move the current frame *count* (default one) levels up in the stack - trace (to an older frame). - -b(reak) [([filename:]lineno | function) [, condition]] - - With a *lineno* argument, set a break at line *lineno* in the - current file. The line number may be prefixed with a *filename* and - a colon, to specify a breakpoint in another file (possibly one that - hasn’t been loaded yet). The file is searched on "sys.path". - Accepatable forms of *filename* are "/abspath/to/file.py", - "relpath/file.py", "module" and "package.module". - - With a *function* argument, set a break at the first executable - statement within that function. *function* can be any expression - that evaluates to a function in the current namespace. - - If a second argument is present, it is an expression which must - evaluate to true before the breakpoint is honored. - - Without argument, list all breaks, including for each breakpoint, - the number of times that breakpoint has been hit, the current - ignore count, and the associated condition if any. - - Each breakpoint is assigned a number to which all the other - breakpoint commands refer. - -tbreak [([filename:]lineno | function) [, condition]] - - Temporary breakpoint, which is removed automatically when it is - first hit. The arguments are the same as for "break". - -cl(ear) [filename:lineno | bpnumber ...] - - With a *filename:lineno* argument, clear all the breakpoints at - this line. With a space separated list of breakpoint numbers, clear - those breakpoints. Without argument, clear all breaks (but first - ask confirmation). - -disable bpnumber [bpnumber ...] - - Disable the breakpoints given as a space separated list of - breakpoint numbers. Disabling a breakpoint means it cannot cause - the program to stop execution, but unlike clearing a breakpoint, it - remains in the list of breakpoints and can be (re-)enabled. - -enable bpnumber [bpnumber ...] - - Enable the breakpoints specified. - -ignore bpnumber [count] - - Set the ignore count for the given breakpoint number. If *count* - is omitted, the ignore count is set to 0. A breakpoint becomes - active when the ignore count is zero. When non-zero, the *count* - is decremented each time the breakpoint is reached and the - breakpoint is not disabled and any associated condition evaluates - to true. - -condition bpnumber [condition] - - Set a new *condition* for the breakpoint, an expression which must - evaluate to true before the breakpoint is honored. If *condition* - is absent, any existing condition is removed; i.e., the breakpoint - is made unconditional. - -commands [bpnumber] - - Specify a list of commands for breakpoint number *bpnumber*. The - commands themselves appear on the following lines. Type a line - containing just "end" to terminate the commands. An example: - - (Pdb) commands 1 - (com) p some_variable - (com) end - (Pdb) - - To remove all commands from a breakpoint, type "commands" and - follow it immediately with "end"; that is, give no commands. - - With no *bpnumber* argument, "commands" refers to the last - breakpoint set. - - You can use breakpoint commands to start your program up again. - Simply use the "continue" command, or "step", or any other command - that resumes execution. - - Specifying any command resuming execution (currently "continue", - "step", "next", "return", "jump", "quit" and their abbreviations) - terminates the command list (as if that command was immediately - followed by end). This is because any time you resume execution - (even with a simple next or step), you may encounter another - breakpoint—which could have its own command list, leading to - ambiguities about which list to execute. - - If you use the "silent" command in the command list, the usual - message about stopping at a breakpoint is not printed. This may be - desirable for breakpoints that are to print a specific message and - then continue. If none of the other commands print anything, you - see no sign that the breakpoint was reached. - -s(tep) - - Execute the current line, stop at the first possible occasion - (either in a function that is called or on the next line in the - current function). - -n(ext) - - Continue execution until the next line in the current function is - reached or it returns. (The difference between "next" and "step" - is that "step" stops inside a called function, while "next" - executes called functions at (nearly) full speed, only stopping at - the next line in the current function.) - -unt(il) [lineno] - - Without argument, continue execution until the line with a number - greater than the current one is reached. - - With *lineno*, continue execution until a line with a number - greater or equal to *lineno* is reached. In both cases, also stop - when the current frame returns. - - Changed in version 3.2: Allow giving an explicit line number. - -r(eturn) - - Continue execution until the current function returns. - -c(ont(inue)) - - Continue execution, only stop when a breakpoint is encountered. - -j(ump) lineno - - Set the next line that will be executed. Only available in the - bottom-most frame. This lets you jump back and execute code again, - or jump forward to skip code that you don’t want to run. - - It should be noted that not all jumps are allowed – for instance it - is not possible to jump into the middle of a "for" loop or out of a - "finally" clause. - -l(ist) [first[, last]] - - List source code for the current file. Without arguments, list 11 - lines around the current line or continue the previous listing. - With "." as argument, list 11 lines around the current line. With - one argument, list 11 lines around at that line. With two - arguments, list the given range; if the second argument is less - than the first, it is interpreted as a count. - - The current line in the current frame is indicated by "->". If an - exception is being debugged, the line where the exception was - originally raised or propagated is indicated by ">>", if it differs - from the current line. - - Changed in version 3.2: Added the ">>" marker. - -ll | longlist - - List all source code for the current function or frame. - Interesting lines are marked as for "list". - - Added in version 3.2. - -a(rgs) - - Print the arguments of the current function and their current - values. - -p expression - - Evaluate *expression* in the current context and print its value. - - Note: - - "print()" can also be used, but is not a debugger command — this - executes the Python "print()" function. - -pp expression - - Like the "p" command, except the value of *expression* is pretty- - printed using the "pprint" module. - -whatis expression - - Print the type of *expression*. - -source expression - - Try to get source code of *expression* and display it. - - Added in version 3.2. - -display [expression] - - Display the value of *expression* if it changed, each time - execution stops in the current frame. - - Without *expression*, list all display expressions for the current - frame. - - Note: - - Display evaluates *expression* and compares to the result of the - previous evaluation of *expression*, so when the result is - mutable, display may not be able to pick up the changes. - - Example: - - lst = [] - breakpoint() - pass - lst.append(1) - print(lst) - - Display won’t realize "lst" has been changed because the result of - evaluation is modified in place by "lst.append(1)" before being - compared: - - > example.py(3)() - -> pass - (Pdb) display lst - display lst: [] - (Pdb) n - > example.py(4)() - -> lst.append(1) - (Pdb) n - > example.py(5)() - -> print(lst) - (Pdb) - - You can do some tricks with copy mechanism to make it work: - - > example.py(3)() - -> pass - (Pdb) display lst[:] - display lst[:]: [] - (Pdb) n - > example.py(4)() - -> lst.append(1) - (Pdb) n - > example.py(5)() - -> print(lst) - display lst[:]: [1] [old: []] - (Pdb) - - Added in version 3.2. - -undisplay [expression] - - Do not display *expression* anymore in the current frame. Without - *expression*, clear all display expressions for the current frame. - - Added in version 3.2. - -interact - - Start an interactive interpreter (using the "code" module) in a new - global namespace initialised from the local and global namespaces - for the current scope. Use "exit()" or "quit()" to exit the - interpreter and return to the debugger. - - Note: - - As "interact" creates a new dedicated namespace for code - execution, assignments to variables will not affect the original - namespaces. However, modifications to any referenced mutable - objects will be reflected in the original namespaces as usual. - - Added in version 3.2. - - Changed in version 3.13: "exit()" and "quit()" can be used to exit - the "interact" command. - - Changed in version 3.13: "interact" directs its output to the - debugger’s output channel rather than "sys.stderr". - -alias [name [command]] - - Create an alias called *name* that executes *command*. The - *command* must *not* be enclosed in quotes. Replaceable parameters - can be indicated by "%1", "%2", … and "%9", while "%*" is replaced - by all the parameters. If *command* is omitted, the current alias - for *name* is shown. If no arguments are given, all aliases are - listed. - - Aliases may be nested and can contain anything that can be legally - typed at the pdb prompt. Note that internal pdb commands *can* be - overridden by aliases. Such a command is then hidden until the - alias is removed. Aliasing is recursively applied to the first - word of the command line; all other words in the line are left - alone. - - As an example, here are two useful aliases (especially when placed - in the ".pdbrc" file): - - # Print instance variables (usage "pi classInst") - alias pi for k in %1.__dict__.keys(): print(f"%1.{k} = {%1.__dict__[k]}") - # Print instance variables in self - alias ps pi self - -unalias name - - Delete the specified alias *name*. - -! statement - - Execute the (one-line) *statement* in the context of the current - stack frame. The exclamation point can be omitted unless the first - word of the statement resembles a debugger command, e.g.: - - (Pdb) ! n=42 - (Pdb) - - To set a global variable, you can prefix the assignment command - with a "global" statement on the same line, e.g.: - - (Pdb) global list_options; list_options = ['-l'] - (Pdb) - -run [args ...] -restart [args ...] - - Restart the debugged Python program. If *args* is supplied, it is - split with "shlex" and the result is used as the new "sys.argv". - History, breakpoints, actions and debugger options are preserved. - "restart" is an alias for "run". - -q(uit) - - Quit from the debugger. The program being executed is aborted. - -debug code - - Enter a recursive debugger that steps through *code* (which is an - arbitrary expression or statement to be executed in the current - environment). - -retval - - Print the return value for the last return of the current function. - -exceptions [excnumber] - - List or jump between chained exceptions. - - When using "pdb.pm()" or "Pdb.post_mortem(...)" with a chained - exception instead of a traceback, it allows the user to move - between the chained exceptions using "exceptions" command to list - exceptions, and "exceptions " to switch to that exception. - - Example: - - def out(): - try: - middle() - except Exception as e: - raise ValueError("reraise middle() error") from e - - def middle(): - try: - return inner(0) - except Exception as e: - raise ValueError("Middle fail") - - def inner(x): - 1 / x - - out() - - calling "pdb.pm()" will allow to move between exceptions: - - > example.py(5)out() - -> raise ValueError("reraise middle() error") from e - - (Pdb) exceptions - 0 ZeroDivisionError('division by zero') - 1 ValueError('Middle fail') - > 2 ValueError('reraise middle() error') - - (Pdb) exceptions 0 - > example.py(16)inner() - -> 1 / x - - (Pdb) up - > example.py(10)middle() - -> return inner(0) - - Added in version 3.13. - --[ Footnotes ]- - -[1] Whether a frame is considered to originate in a certain module is - determined by the "__name__" in the frame globals. -''', - 'del': r'''The "del" statement -******************* - - del_stmt ::= "del" target_list - -Deletion is recursively defined very similar to the way assignment is -defined. Rather than spelling it out in full details, here are some -hints. - -Deletion of a target list recursively deletes each target, from left -to right. - -Deletion of a name removes the binding of that name from the local or -global namespace, depending on whether the name occurs in a "global" -statement in the same code block. Trying to delete an unbound name -raises a "NameError" exception. - -Deletion of attribute references, subscriptions and slicings is passed -to the primary object involved; deletion of a slicing is in general -equivalent to assignment of an empty slice of the right type (but even -this is determined by the sliced object). - -Changed in version 3.2: Previously it was illegal to delete a name -from the local namespace if it occurs as a free variable in a nested -block. -''', - 'dict': r'''Dictionary displays -******************* - -A dictionary display is a possibly empty series of dict items -(key/value pairs) enclosed in curly braces: - - dict_display ::= "{" [dict_item_list | dict_comprehension] "}" - dict_item_list ::= dict_item ("," dict_item)* [","] - dict_item ::= expression ":" expression | "**" or_expr - dict_comprehension ::= expression ":" expression comp_for - -A dictionary display yields a new dictionary object. - -If a comma-separated sequence of dict items is given, they are -evaluated from left to right to define the entries of the dictionary: -each key object is used as a key into the dictionary to store the -corresponding value. This means that you can specify the same key -multiple times in the dict item list, and the final dictionary’s value -for that key will be the last one given. - -A double asterisk "**" denotes *dictionary unpacking*. Its operand -must be a *mapping*. Each mapping item is added to the new -dictionary. Later values replace values already set by earlier dict -items and earlier dictionary unpackings. - -Added in version 3.5: Unpacking into dictionary displays, originally -proposed by **PEP 448**. - -A dict comprehension, in contrast to list and set comprehensions, -needs two expressions separated with a colon followed by the usual -“for” and “if” clauses. When the comprehension is run, the resulting -key and value elements are inserted in the new dictionary in the order -they are produced. - -Restrictions on the types of the key values are listed earlier in -section The standard type hierarchy. (To summarize, the key type -should be *hashable*, which excludes all mutable objects.) Clashes -between duplicate keys are not detected; the last value (textually -rightmost in the display) stored for a given key value prevails. - -Changed in version 3.8: Prior to Python 3.8, in dict comprehensions, -the evaluation order of key and value was not well-defined. In -CPython, the value was evaluated before the key. Starting with 3.8, -the key is evaluated before the value, as proposed by **PEP 572**. -''', - 'dynamic-features': r'''Interaction with dynamic features -********************************* - -Name resolution of free variables occurs at runtime, not at compile -time. This means that the following code will print 42: - - i = 10 - def f(): - print(i) - i = 42 - f() - -The "eval()" and "exec()" functions do not have access to the full -environment for resolving names. Names may be resolved in the local -and global namespaces of the caller. Free variables are not resolved -in the nearest enclosing namespace, but in the global namespace. [1] -The "exec()" and "eval()" functions have optional arguments to -override the global and local namespace. If only one namespace is -specified, it is used for both. -''', - 'else': r'''The "if" statement -****************** - -The "if" statement is used for conditional execution: - - if_stmt ::= "if" assignment_expression ":" suite - ("elif" assignment_expression ":" suite)* - ["else" ":" suite] - -It selects exactly one of the suites by evaluating the expressions one -by one until one is found to be true (see section Boolean operations -for the definition of true and false); then that suite is executed -(and no other part of the "if" statement is executed or evaluated). -If all expressions are false, the suite of the "else" clause, if -present, is executed. -''', - 'exceptions': r'''Exceptions -********** - -Exceptions are a means of breaking out of the normal flow of control -of a code block in order to handle errors or other exceptional -conditions. An exception is *raised* at the point where the error is -detected; it may be *handled* by the surrounding code block or by any -code block that directly or indirectly invoked the code block where -the error occurred. - -The Python interpreter raises an exception when it detects a run-time -error (such as division by zero). A Python program can also -explicitly raise an exception with the "raise" statement. Exception -handlers are specified with the "try" … "except" statement. The -"finally" clause of such a statement can be used to specify cleanup -code which does not handle the exception, but is executed whether an -exception occurred or not in the preceding code. - -Python uses the “termination” model of error handling: an exception -handler can find out what happened and continue execution at an outer -level, but it cannot repair the cause of the error and retry the -failing operation (except by re-entering the offending piece of code -from the top). - -When an exception is not handled at all, the interpreter terminates -execution of the program, or returns to its interactive main loop. In -either case, it prints a stack traceback, except when the exception is -"SystemExit". - -Exceptions are identified by class instances. The "except" clause is -selected depending on the class of the instance: it must reference the -class of the instance or a *non-virtual base class* thereof. The -instance can be received by the handler and can carry additional -information about the exceptional condition. - -Note: - - Exception messages are not part of the Python API. Their contents - may change from one version of Python to the next without warning - and should not be relied on by code which will run under multiple - versions of the interpreter. - -See also the description of the "try" statement in section The try -statement and "raise" statement in section The raise statement. - --[ Footnotes ]- - -[1] This limitation occurs because the code that is executed by these - operations is not available at the time the module is compiled. -''', - 'execmodel': r'''Execution model -*************** - - -Structure of a program -====================== - -A Python program is constructed from code blocks. A *block* is a piece -of Python program text that is executed as a unit. The following are -blocks: a module, a function body, and a class definition. Each -command typed interactively is a block. A script file (a file given -as standard input to the interpreter or specified as a command line -argument to the interpreter) is a code block. A script command (a -command specified on the interpreter command line with the "-c" -option) is a code block. A module run as a top level script (as module -"__main__") from the command line using a "-m" argument is also a code -block. The string argument passed to the built-in functions "eval()" -and "exec()" is a code block. - -A code block is executed in an *execution frame*. A frame contains -some administrative information (used for debugging) and determines -where and how execution continues after the code block’s execution has -completed. - - -Naming and binding -================== - - -Binding of names ----------------- - -*Names* refer to objects. Names are introduced by name binding -operations. - -The following constructs bind names: - -* formal parameters to functions, - -* class definitions, - -* function definitions, - -* assignment expressions, - -* targets that are identifiers if occurring in an assignment: - - * "for" loop header, - - * after "as" in a "with" statement, "except" clause, "except*" - clause, or in the as-pattern in structural pattern matching, - - * in a capture pattern in structural pattern matching - -* "import" statements. - -* "type" statements. - -* type parameter lists. - -The "import" statement of the form "from ... import *" binds all names -defined in the imported module, except those beginning with an -underscore. This form may only be used at the module level. - -A target occurring in a "del" statement is also considered bound for -this purpose (though the actual semantics are to unbind the name). - -Each assignment or import statement occurs within a block defined by a -class or function definition or at the module level (the top-level -code block). - -If a name is bound in a block, it is a local variable of that block, -unless declared as "nonlocal" or "global". If a name is bound at the -module level, it is a global variable. (The variables of the module -code block are local and global.) If a variable is used in a code -block but not defined there, it is a *free variable*. - -Each occurrence of a name in the program text refers to the *binding* -of that name established by the following name resolution rules. - - -Resolution of names -------------------- - -A *scope* defines the visibility of a name within a block. If a local -variable is defined in a block, its scope includes that block. If the -definition occurs in a function block, the scope extends to any blocks -contained within the defining one, unless a contained block introduces -a different binding for the name. - -When a name is used in a code block, it is resolved using the nearest -enclosing scope. The set of all such scopes visible to a code block -is called the block’s *environment*. - -When a name is not found at all, a "NameError" exception is raised. If -the current scope is a function scope, and the name refers to a local -variable that has not yet been bound to a value at the point where the -name is used, an "UnboundLocalError" exception is raised. -"UnboundLocalError" is a subclass of "NameError". - -If a name binding operation occurs anywhere within a code block, all -uses of the name within the block are treated as references to the -current block. This can lead to errors when a name is used within a -block before it is bound. This rule is subtle. Python lacks -declarations and allows name binding operations to occur anywhere -within a code block. The local variables of a code block can be -determined by scanning the entire text of the block for name binding -operations. See the FAQ entry on UnboundLocalError for examples. - -If the "global" statement occurs within a block, all uses of the names -specified in the statement refer to the bindings of those names in the -top-level namespace. Names are resolved in the top-level namespace by -searching the global namespace, i.e. the namespace of the module -containing the code block, and the builtins namespace, the namespace -of the module "builtins". The global namespace is searched first. If -the names are not found there, the builtins namespace is searched -next. If the names are also not found in the builtins namespace, new -variables are created in the global namespace. The global statement -must precede all uses of the listed names. - -The "global" statement has the same scope as a name binding operation -in the same block. If the nearest enclosing scope for a free variable -contains a global statement, the free variable is treated as a global. - -The "nonlocal" statement causes corresponding names to refer to -previously bound variables in the nearest enclosing function scope. -"SyntaxError" is raised at compile time if the given name does not -exist in any enclosing function scope. Type parameters cannot be -rebound with the "nonlocal" statement. - -The namespace for a module is automatically created the first time a -module is imported. The main module for a script is always called -"__main__". - -Class definition blocks and arguments to "exec()" and "eval()" are -special in the context of name resolution. A class definition is an -executable statement that may use and define names. These references -follow the normal rules for name resolution with an exception that -unbound local variables are looked up in the global namespace. The -namespace of the class definition becomes the attribute dictionary of -the class. The scope of names defined in a class block is limited to -the class block; it does not extend to the code blocks of methods. -This includes comprehensions and generator expressions, but it does -not include annotation scopes, which have access to their enclosing -class scopes. This means that the following will fail: - - class A: - a = 42 - b = list(a + i for i in range(10)) - -However, the following will succeed: - - class A: - type Alias = Nested - class Nested: pass - - print(A.Alias.__value__) # - - -Annotation scopes ------------------ - -Type parameter lists and "type" statements introduce *annotation -scopes*, which behave mostly like function scopes, but with some -exceptions discussed below. *Annotations* currently do not use -annotation scopes, but they are expected to use annotation scopes in -Python 3.13 when **PEP 649** is implemented. - -Annotation scopes are used in the following contexts: - -* Type parameter lists for generic type aliases. - -* Type parameter lists for generic functions. A generic function’s - annotations are executed within the annotation scope, but its - defaults and decorators are not. - -* Type parameter lists for generic classes. A generic class’s base - classes and keyword arguments are executed within the annotation - scope, but its decorators are not. - -* The bounds, constraints, and default values for type parameters - (lazily evaluated). - -* The value of type aliases (lazily evaluated). - -Annotation scopes differ from function scopes in the following ways: - -* Annotation scopes have access to their enclosing class namespace. If - an annotation scope is immediately within a class scope, or within - another annotation scope that is immediately within a class scope, - the code in the annotation scope can use names defined in the class - scope as if it were executed directly within the class body. This - contrasts with regular functions defined within classes, which - cannot access names defined in the class scope. - -* Expressions in annotation scopes cannot contain "yield", "yield - from", "await", or ":=" expressions. (These expressions are allowed - in other scopes contained within the annotation scope.) - -* Names defined in annotation scopes cannot be rebound with "nonlocal" - statements in inner scopes. This includes only type parameters, as - no other syntactic elements that can appear within annotation scopes - can introduce new names. - -* While annotation scopes have an internal name, that name is not - reflected in the *qualified name* of objects defined within the - scope. Instead, the "__qualname__" of such objects is as if the - object were defined in the enclosing scope. - -Added in version 3.12: Annotation scopes were introduced in Python -3.12 as part of **PEP 695**. - -Changed in version 3.13: Annotation scopes are also used for type -parameter defaults, as introduced by **PEP 696**. - - -Lazy evaluation ---------------- - -The values of type aliases created through the "type" statement are -*lazily evaluated*. The same applies to the bounds, constraints, and -default values of type variables created through the type parameter -syntax. This means that they are not evaluated when the type alias or -type variable is created. Instead, they are only evaluated when doing -so is necessary to resolve an attribute access. - -Example: - - >>> type Alias = 1/0 - >>> Alias.__value__ - Traceback (most recent call last): - ... - ZeroDivisionError: division by zero - >>> def func[T: 1/0](): pass - >>> T = func.__type_params__[0] - >>> T.__bound__ - Traceback (most recent call last): - ... - ZeroDivisionError: division by zero - -Here the exception is raised only when the "__value__" attribute of -the type alias or the "__bound__" attribute of the type variable is -accessed. - -This behavior is primarily useful for references to types that have -not yet been defined when the type alias or type variable is created. -For example, lazy evaluation enables creation of mutually recursive -type aliases: - - from typing import Literal - - type SimpleExpr = int | Parenthesized - type Parenthesized = tuple[Literal["("], Expr, Literal[")"]] - type Expr = SimpleExpr | tuple[SimpleExpr, Literal["+", "-"], Expr] - -Lazily evaluated values are evaluated in annotation scope, which means -that names that appear inside the lazily evaluated value are looked up -as if they were used in the immediately enclosing scope. - -Added in version 3.12. - - -Builtins and restricted execution ---------------------------------- - -**CPython implementation detail:** Users should not touch -"__builtins__"; it is strictly an implementation detail. Users -wanting to override values in the builtins namespace should "import" -the "builtins" module and modify its attributes appropriately. - -The builtins namespace associated with the execution of a code block -is actually found by looking up the name "__builtins__" in its global -namespace; this should be a dictionary or a module (in the latter case -the module’s dictionary is used). By default, when in the "__main__" -module, "__builtins__" is the built-in module "builtins"; when in any -other module, "__builtins__" is an alias for the dictionary of the -"builtins" module itself. - - -Interaction with dynamic features ---------------------------------- - -Name resolution of free variables occurs at runtime, not at compile -time. This means that the following code will print 42: - - i = 10 - def f(): - print(i) - i = 42 - f() - -The "eval()" and "exec()" functions do not have access to the full -environment for resolving names. Names may be resolved in the local -and global namespaces of the caller. Free variables are not resolved -in the nearest enclosing namespace, but in the global namespace. [1] -The "exec()" and "eval()" functions have optional arguments to -override the global and local namespace. If only one namespace is -specified, it is used for both. - - -Exceptions -========== - -Exceptions are a means of breaking out of the normal flow of control -of a code block in order to handle errors or other exceptional -conditions. An exception is *raised* at the point where the error is -detected; it may be *handled* by the surrounding code block or by any -code block that directly or indirectly invoked the code block where -the error occurred. - -The Python interpreter raises an exception when it detects a run-time -error (such as division by zero). A Python program can also -explicitly raise an exception with the "raise" statement. Exception -handlers are specified with the "try" … "except" statement. The -"finally" clause of such a statement can be used to specify cleanup -code which does not handle the exception, but is executed whether an -exception occurred or not in the preceding code. - -Python uses the “termination” model of error handling: an exception -handler can find out what happened and continue execution at an outer -level, but it cannot repair the cause of the error and retry the -failing operation (except by re-entering the offending piece of code -from the top). - -When an exception is not handled at all, the interpreter terminates -execution of the program, or returns to its interactive main loop. In -either case, it prints a stack traceback, except when the exception is -"SystemExit". - -Exceptions are identified by class instances. The "except" clause is -selected depending on the class of the instance: it must reference the -class of the instance or a *non-virtual base class* thereof. The -instance can be received by the handler and can carry additional -information about the exceptional condition. - -Note: - - Exception messages are not part of the Python API. Their contents - may change from one version of Python to the next without warning - and should not be relied on by code which will run under multiple - versions of the interpreter. - -See also the description of the "try" statement in section The try -statement and "raise" statement in section The raise statement. - --[ Footnotes ]- - -[1] This limitation occurs because the code that is executed by these - operations is not available at the time the module is compiled. -''', - 'exprlists': r'''Expression lists -**************** - - starred_expression ::= ["*"] or_expr - flexible_expression ::= assignment_expression | starred_expression - flexible_expression_list ::= flexible_expression ("," flexible_expression)* [","] - starred_expression_list ::= starred_expression ("," starred_expression)* [","] - expression_list ::= expression ("," expression)* [","] - yield_list ::= expression_list | starred_expression "," [starred_expression_list] - -Except when part of a list or set display, an expression list -containing at least one comma yields a tuple. The length of the tuple -is the number of expressions in the list. The expressions are -evaluated from left to right. - -An asterisk "*" denotes *iterable unpacking*. Its operand must be an -*iterable*. The iterable is expanded into a sequence of items, which -are included in the new tuple, list, or set, at the site of the -unpacking. - -Added in version 3.5: Iterable unpacking in expression lists, -originally proposed by **PEP 448**. - -Added in version 3.11: Any item in an expression list may be starred. -See **PEP 646**. - -A trailing comma is required only to create a one-item tuple, such as -"1,"; it is optional in all other cases. A single expression without a -trailing comma doesn’t create a tuple, but rather yields the value of -that expression. (To create an empty tuple, use an empty pair of -parentheses: "()".) -''', - 'floating': r'''Floating-point literals -*********************** - -Floating-point literals are described by the following lexical -definitions: - - floatnumber ::= pointfloat | exponentfloat - pointfloat ::= [digitpart] fraction | digitpart "." - exponentfloat ::= (digitpart | pointfloat) exponent - digitpart ::= digit (["_"] digit)* - fraction ::= "." digitpart - exponent ::= ("e" | "E") ["+" | "-"] digitpart - -Note that the integer and exponent parts are always interpreted using -radix 10. For example, "077e010" is legal, and denotes the same number -as "77e10". The allowed range of floating-point literals is -implementation-dependent. As in integer literals, underscores are -supported for digit grouping. - -Some examples of floating-point literals: - - 3.14 10. .001 1e100 3.14e-10 0e0 3.14_15_93 - -Changed in version 3.6: Underscores are now allowed for grouping -purposes in literals. -''', - 'for': r'''The "for" statement -******************* - -The "for" statement is used to iterate over the elements of a sequence -(such as a string, tuple or list) or other iterable object: - - for_stmt ::= "for" target_list "in" `!starred_list` ":" suite - ["else" ":" suite] - -The "starred_list" expression is evaluated once; it should yield an -*iterable* object. An *iterator* is created for that iterable. The -first item provided by the iterator is then assigned to the target -list using the standard rules for assignments (see Assignment -statements), and the suite is executed. This repeats for each item -provided by the iterator. When the iterator is exhausted, the suite -in the "else" clause, if present, is executed, and the loop -terminates. - -A "break" statement executed in the first suite terminates the loop -without executing the "else" clause’s suite. A "continue" statement -executed in the first suite skips the rest of the suite and continues -with the next item, or with the "else" clause if there is no next -item. - -The for-loop makes assignments to the variables in the target list. -This overwrites all previous assignments to those variables including -those made in the suite of the for-loop: - - for i in range(10): - print(i) - i = 5 # this will not affect the for-loop - # because i will be overwritten with the next - # index in the range - -Names in the target list are not deleted when the loop is finished, -but if the sequence is empty, they will not have been assigned to at -all by the loop. Hint: the built-in type "range()" represents -immutable arithmetic sequences of integers. For instance, iterating -"range(3)" successively yields 0, 1, and then 2. - -Changed in version 3.11: Starred elements are now allowed in the -expression list. -''', - 'formatstrings': r'''Format string syntax -******************** - -The "str.format()" method and the "Formatter" class share the same -syntax for format strings (although in the case of "Formatter", -subclasses can define their own format string syntax). The syntax is -related to that of formatted string literals, but it is less -sophisticated and, in particular, does not support arbitrary -expressions. - -Format strings contain “replacement fields” surrounded by curly braces -"{}". Anything that is not contained in braces is considered literal -text, which is copied unchanged to the output. If you need to include -a brace character in the literal text, it can be escaped by doubling: -"{{" and "}}". - -The grammar for a replacement field is as follows: - - replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}" - field_name ::= arg_name ("." attribute_name | "[" element_index "]")* - arg_name ::= [identifier | digit+] - attribute_name ::= identifier - element_index ::= digit+ | index_string - index_string ::= + - conversion ::= "r" | "s" | "a" - format_spec ::= format-spec:format_spec - -In less formal terms, the replacement field can start with a -*field_name* that specifies the object whose value is to be formatted -and inserted into the output instead of the replacement field. The -*field_name* is optionally followed by a *conversion* field, which is -preceded by an exclamation point "'!'", and a *format_spec*, which is -preceded by a colon "':'". These specify a non-default format for the -replacement value. - -See also the Format specification mini-language section. - -The *field_name* itself begins with an *arg_name* that is either a -number or a keyword. If it’s a number, it refers to a positional -argument, and if it’s a keyword, it refers to a named keyword -argument. An *arg_name* is treated as a number if a call to -"str.isdecimal()" on the string would return true. If the numerical -arg_names in a format string are 0, 1, 2, … in sequence, they can all -be omitted (not just some) and the numbers 0, 1, 2, … will be -automatically inserted in that order. Because *arg_name* is not quote- -delimited, it is not possible to specify arbitrary dictionary keys -(e.g., the strings "'10'" or "':-]'") within a format string. The -*arg_name* can be followed by any number of index or attribute -expressions. An expression of the form "'.name'" selects the named -attribute using "getattr()", while an expression of the form -"'[index]'" does an index lookup using "__getitem__()". - -Changed in version 3.1: The positional argument specifiers can be -omitted for "str.format()", so "'{} {}'.format(a, b)" is equivalent to -"'{0} {1}'.format(a, b)". - -Changed in version 3.4: The positional argument specifiers can be -omitted for "Formatter". - -Some simple format string examples: - - "First, thou shalt count to {0}" # References first positional argument - "Bring me a {}" # Implicitly references the first positional argument - "From {} to {}" # Same as "From {0} to {1}" - "My quest is {name}" # References keyword argument 'name' - "Weight in tons {0.weight}" # 'weight' attribute of first positional arg - "Units destroyed: {players[0]}" # First element of keyword argument 'players'. - -The *conversion* field causes a type coercion before formatting. -Normally, the job of formatting a value is done by the "__format__()" -method of the value itself. However, in some cases it is desirable to -force a type to be formatted as a string, overriding its own -definition of formatting. By converting the value to a string before -calling "__format__()", the normal formatting logic is bypassed. - -Three conversion flags are currently supported: "'!s'" which calls -"str()" on the value, "'!r'" which calls "repr()" and "'!a'" which -calls "ascii()". - -Some examples: - - "Harold's a clever {0!s}" # Calls str() on the argument first - "Bring out the holy {name!r}" # Calls repr() on the argument first - "More {!a}" # Calls ascii() on the argument first - -The *format_spec* field contains a specification of how the value -should be presented, including such details as field width, alignment, -padding, decimal precision and so on. Each value type can define its -own “formatting mini-language” or interpretation of the *format_spec*. - -Most built-in types support a common formatting mini-language, which -is described in the next section. - -A *format_spec* field can also include nested replacement fields -within it. These nested replacement fields may contain a field name, -conversion flag and format specification, but deeper nesting is not -allowed. The replacement fields within the format_spec are -substituted before the *format_spec* string is interpreted. This -allows the formatting of a value to be dynamically specified. - -See the Format examples section for some examples. - - -Format specification mini-language -================================== - -“Format specifications” are used within replacement fields contained -within a format string to define how individual values are presented -(see Format string syntax and f-strings). They can also be passed -directly to the built-in "format()" function. Each formattable type -may define how the format specification is to be interpreted. - -Most built-in types implement the following options for format -specifications, although some of the formatting options are only -supported by the numeric types. - -A general convention is that an empty format specification produces -the same result as if you had called "str()" on the value. A non-empty -format specification typically modifies the result. - -The general form of a *standard format specifier* is: - - format_spec ::= [options][width][grouping]["." precision][type] - options ::= [[fill]align][sign]["z"]["#"]["0"] - fill ::= - align ::= "<" | ">" | "=" | "^" - sign ::= "+" | "-" | " " - width ::= digit+ - grouping ::= "," | "_" - precision ::= digit+ - type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" - | "G" | "n" | "o" | "s" | "x" | "X" | "%" - -If a valid *align* value is specified, it can be preceded by a *fill* -character that can be any character and defaults to a space if -omitted. It is not possible to use a literal curly brace (”"{"” or -“"}"”) as the *fill* character in a formatted string literal or when -using the "str.format()" method. However, it is possible to insert a -curly brace with a nested replacement field. This limitation doesn’t -affect the "format()" function. - -The meaning of the various alignment options is as follows: - -+-----------+------------------------------------------------------------+ -| Option | Meaning | -|===========|============================================================| -| "'<'" | Forces the field to be left-aligned within the available | -| | space (this is the default for most objects). | -+-----------+------------------------------------------------------------+ -| "'>'" | Forces the field to be right-aligned within the available | -| | space (this is the default for numbers). | -+-----------+------------------------------------------------------------+ -| "'='" | Forces the padding to be placed after the sign (if any) | -| | but before the digits. This is used for printing fields | -| | in the form ‘+000000120’. This alignment option is only | -| | valid for numeric types, excluding "complex". It becomes | -| | the default for numbers when ‘0’ immediately precedes the | -| | field width. | -+-----------+------------------------------------------------------------+ -| "'^'" | Forces the field to be centered within the available | -| | space. | -+-----------+------------------------------------------------------------+ - -Note that unless a minimum field width is defined, the field width -will always be the same size as the data to fill it, so that the -alignment option has no meaning in this case. - -The *sign* option is only valid for number types, and can be one of -the following: - -+-----------+------------------------------------------------------------+ -| Option | Meaning | -|===========|============================================================| -| "'+'" | Indicates that a sign should be used for both positive as | -| | well as negative numbers. | -+-----------+------------------------------------------------------------+ -| "'-'" | Indicates that a sign should be used only for negative | -| | numbers (this is the default behavior). | -+-----------+------------------------------------------------------------+ -| space | Indicates that a leading space should be used on positive | -| | numbers, and a minus sign on negative numbers. | -+-----------+------------------------------------------------------------+ - -The "'z'" option coerces negative zero floating-point values to -positive zero after rounding to the format precision. This option is -only valid for floating-point presentation types. - -Changed in version 3.11: Added the "'z'" option (see also **PEP -682**). - -The "'#'" option causes the “alternate form” to be used for the -conversion. The alternate form is defined differently for different -types. This option is only valid for integer, float and complex -types. For integers, when binary, octal, or hexadecimal output is -used, this option adds the respective prefix "'0b'", "'0o'", "'0x'", -or "'0X'" to the output value. For float and complex the alternate -form causes the result of the conversion to always contain a decimal- -point character, even if no digits follow it. Normally, a decimal- -point character appears in the result of these conversions only if a -digit follows it. In addition, for "'g'" and "'G'" conversions, -trailing zeros are not removed from the result. - -The *width* is a decimal integer defining the minimum total field -width, including any prefixes, separators, and other formatting -characters. If not specified, then the field width will be determined -by the content. - -When no explicit alignment is given, preceding the *width* field by a -zero ("'0'") character enables sign-aware zero-padding for numeric -types, excluding "complex". This is equivalent to a *fill* character -of "'0'" with an *alignment* type of "'='". - -Changed in version 3.10: Preceding the *width* field by "'0'" no -longer affects the default alignment for strings. - -The *grouping* option after the *width* field specifies a digit group -separator for the integral part of a number. It can be one of the -following: - -+-----------+------------------------------------------------------------+ -| Option | Meaning | -|===========|============================================================| -| "','" | Inserts a comma every 3 digits for integer presentation | -| | type "'d'" and floating-point presentation types, | -| | excluding "'n'". For other presentation types, this option | -| | is not supported. | -+-----------+------------------------------------------------------------+ -| "'_'" | Inserts an underscore every 3 digits for integer | -| | presentation type "'d'" and floating-point presentation | -| | types, excluding "'n'". For integer presentation types | -| | "'b'", "'o'", "'x'", and "'X'", underscores are inserted | -| | every 4 digits. For other presentation types, this option | -| | is not supported. | -+-----------+------------------------------------------------------------+ - -For a locale aware separator, use the "'n'" presentation type instead. - -Changed in version 3.1: Added the "','" option (see also **PEP 378**). - -Changed in version 3.6: Added the "'_'" option (see also **PEP 515**). - -The *precision* is a decimal integer indicating how many digits should -be displayed after the decimal point for presentation types "'f'" and -"'F'", or before and after the decimal point for presentation types -"'g'" or "'G'". For string presentation types the field indicates the -maximum field size - in other words, how many characters will be used -from the field content. The *precision* is not allowed for integer -presentation types. - -Finally, the *type* determines how the data should be presented. - -The available string presentation types are: - - +-----------+------------------------------------------------------------+ - | Type | Meaning | - |===========|============================================================| - | "'s'" | String format. This is the default type for strings and | - | | may be omitted. | - +-----------+------------------------------------------------------------+ - | None | The same as "'s'". | - +-----------+------------------------------------------------------------+ - -The available integer presentation types are: - - +-----------+------------------------------------------------------------+ - | Type | Meaning | - |===========|============================================================| - | "'b'" | Binary format. Outputs the number in base 2. | - +-----------+------------------------------------------------------------+ - | "'c'" | Character. Converts the integer to the corresponding | - | | unicode character before printing. | - +-----------+------------------------------------------------------------+ - | "'d'" | Decimal Integer. Outputs the number in base 10. | - +-----------+------------------------------------------------------------+ - | "'o'" | Octal format. Outputs the number in base 8. | - +-----------+------------------------------------------------------------+ - | "'x'" | Hex format. Outputs the number in base 16, using lower- | - | | case letters for the digits above 9. | - +-----------+------------------------------------------------------------+ - | "'X'" | Hex format. Outputs the number in base 16, using upper- | - | | case letters for the digits above 9. In case "'#'" is | - | | specified, the prefix "'0x'" will be upper-cased to "'0X'" | - | | as well. | - +-----------+------------------------------------------------------------+ - | "'n'" | Number. This is the same as "'d'", except that it uses the | - | | current locale setting to insert the appropriate digit | - | | group separators. | - +-----------+------------------------------------------------------------+ - | None | The same as "'d'". | - +-----------+------------------------------------------------------------+ - -In addition to the above presentation types, integers can be formatted -with the floating-point presentation types listed below (except "'n'" -and "None"). When doing so, "float()" is used to convert the integer -to a floating-point number before formatting. - -The available presentation types for "float" and "Decimal" values are: - - +-----------+------------------------------------------------------------+ - | Type | Meaning | - |===========|============================================================| - | "'e'" | Scientific notation. For a given precision "p", formats | - | | the number in scientific notation with the letter ‘e’ | - | | separating the coefficient from the exponent. The | - | | coefficient has one digit before and "p" digits after the | - | | decimal point, for a total of "p + 1" significant digits. | - | | With no precision given, uses a precision of "6" digits | - | | after the decimal point for "float", and shows all | - | | coefficient digits for "Decimal". If "p=0", the decimal | - | | point is omitted unless the "#" option is used. For | - | | "float", the exponent always contains at least two digits, | - | | and is zero if the value is zero. | - +-----------+------------------------------------------------------------+ - | "'E'" | Scientific notation. Same as "'e'" except it uses an upper | - | | case ‘E’ as the separator character. | - +-----------+------------------------------------------------------------+ - | "'f'" | Fixed-point notation. For a given precision "p", formats | - | | the number as a decimal number with exactly "p" digits | - | | following the decimal point. With no precision given, uses | - | | a precision of "6" digits after the decimal point for | - | | "float", and uses a precision large enough to show all | - | | coefficient digits for "Decimal". If "p=0", the decimal | - | | point is omitted unless the "#" option is used. | - +-----------+------------------------------------------------------------+ - | "'F'" | Fixed-point notation. Same as "'f'", but converts "nan" to | - | | "NAN" and "inf" to "INF". | - +-----------+------------------------------------------------------------+ - | "'g'" | General format. For a given precision "p >= 1", this | - | | rounds the number to "p" significant digits and then | - | | formats the result in either fixed-point format or in | - | | scientific notation, depending on its magnitude. A | - | | precision of "0" is treated as equivalent to a precision | - | | of "1". The precise rules are as follows: suppose that | - | | the result formatted with presentation type "'e'" and | - | | precision "p-1" would have exponent "exp". Then, if "m <= | - | | exp < p", where "m" is -4 for floats and -6 for | - | | "Decimals", the number is formatted with presentation type | - | | "'f'" and precision "p-1-exp". Otherwise, the number is | - | | formatted with presentation type "'e'" and precision | - | | "p-1". In both cases insignificant trailing zeros are | - | | removed from the significand, and the decimal point is | - | | also removed if there are no remaining digits following | - | | it, unless the "'#'" option is used. With no precision | - | | given, uses a precision of "6" significant digits for | - | | "float". For "Decimal", the coefficient of the result is | - | | formed from the coefficient digits of the value; | - | | scientific notation is used for values smaller than "1e-6" | - | | in absolute value and values where the place value of the | - | | least significant digit is larger than 1, and fixed-point | - | | notation is used otherwise. Positive and negative | - | | infinity, positive and negative zero, and nans, are | - | | formatted as "inf", "-inf", "0", "-0" and "nan" | - | | respectively, regardless of the precision. | - +-----------+------------------------------------------------------------+ - | "'G'" | General format. Same as "'g'" except switches to "'E'" if | - | | the number gets too large. The representations of infinity | - | | and NaN are uppercased, too. | - +-----------+------------------------------------------------------------+ - | "'n'" | Number. This is the same as "'g'", except that it uses the | - | | current locale setting to insert the appropriate digit | - | | group separators for the integral part of a number. | - +-----------+------------------------------------------------------------+ - | "'%'" | Percentage. Multiplies the number by 100 and displays in | - | | fixed ("'f'") format, followed by a percent sign. | - +-----------+------------------------------------------------------------+ - | None | For "float" this is like the "'g'" type, except that when | - | | fixed- point notation is used to format the result, it | - | | always includes at least one digit past the decimal point, | - | | and switches to the scientific notation when "exp >= p - | - | | 1". When the precision is not specified, the latter will | - | | be as large as needed to represent the given value | - | | faithfully. For "Decimal", this is the same as either | - | | "'g'" or "'G'" depending on the value of | - | | "context.capitals" for the current decimal context. The | - | | overall effect is to match the output of "str()" as | - | | altered by the other format modifiers. | - +-----------+------------------------------------------------------------+ - -The result should be correctly rounded to a given precision "p" of -digits after the decimal point. The rounding mode for "float" matches -that of the "round()" builtin. For "Decimal", the rounding mode of -the current context will be used. - -The available presentation types for "complex" are the same as those -for "float" ("'%'" is not allowed). Both the real and imaginary -components of a complex number are formatted as floating-point -numbers, according to the specified presentation type. They are -separated by the mandatory sign of the imaginary part, the latter -being terminated by a "j" suffix. If the presentation type is -missing, the result will match the output of "str()" (complex numbers -with a non-zero real part are also surrounded by parentheses), -possibly altered by other format modifiers. - - -Format examples -=============== - -This section contains examples of the "str.format()" syntax and -comparison with the old "%"-formatting. - -In most of the cases the syntax is similar to the old "%"-formatting, -with the addition of the "{}" and with ":" used instead of "%". For -example, "'%03.2f'" can be translated to "'{:03.2f}'". - -The new format syntax also supports new and different options, shown -in the following examples. - -Accessing arguments by position: - - >>> '{0}, {1}, {2}'.format('a', 'b', 'c') - 'a, b, c' - >>> '{}, {}, {}'.format('a', 'b', 'c') # 3.1+ only - 'a, b, c' - >>> '{2}, {1}, {0}'.format('a', 'b', 'c') - 'c, b, a' - >>> '{2}, {1}, {0}'.format(*'abc') # unpacking argument sequence - 'c, b, a' - >>> '{0}{1}{0}'.format('abra', 'cad') # arguments' indices can be repeated - 'abracadabra' - -Accessing arguments by name: - - >>> 'Coordinates: {latitude}, {longitude}'.format(latitude='37.24N', longitude='-115.81W') - 'Coordinates: 37.24N, -115.81W' - >>> coord = {'latitude': '37.24N', 'longitude': '-115.81W'} - >>> 'Coordinates: {latitude}, {longitude}'.format(**coord) - 'Coordinates: 37.24N, -115.81W' - -Accessing arguments’ attributes: - - >>> c = 3-5j - >>> ('The complex number {0} is formed from the real part {0.real} ' - ... 'and the imaginary part {0.imag}.').format(c) - 'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.' - >>> class Point: - ... def __init__(self, x, y): - ... self.x, self.y = x, y - ... def __str__(self): - ... return 'Point({self.x}, {self.y})'.format(self=self) - ... - >>> str(Point(4, 2)) - 'Point(4, 2)' - -Accessing arguments’ items: - - >>> coord = (3, 5) - >>> 'X: {0[0]}; Y: {0[1]}'.format(coord) - 'X: 3; Y: 5' - -Replacing "%s" and "%r": - - >>> "repr() shows quotes: {!r}; str() doesn't: {!s}".format('test1', 'test2') - "repr() shows quotes: 'test1'; str() doesn't: test2" - -Aligning the text and specifying a width: - - >>> '{:<30}'.format('left aligned') - 'left aligned ' - >>> '{:>30}'.format('right aligned') - ' right aligned' - >>> '{:^30}'.format('centered') - ' centered ' - >>> '{:*^30}'.format('centered') # use '*' as a fill char - '***********centered***********' - -Replacing "%+f", "%-f", and "% f" and specifying a sign: - - >>> '{:+f}; {:+f}'.format(3.14, -3.14) # show it always - '+3.140000; -3.140000' - >>> '{: f}; {: f}'.format(3.14, -3.14) # show a space for positive numbers - ' 3.140000; -3.140000' - >>> '{:-f}; {:-f}'.format(3.14, -3.14) # show only the minus -- same as '{:f}; {:f}' - '3.140000; -3.140000' - -Replacing "%x" and "%o" and converting the value to different bases: - - >>> # format also supports binary numbers - >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42) - 'int: 42; hex: 2a; oct: 52; bin: 101010' - >>> # with 0x, 0o, or 0b as prefix: - >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42) - 'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010' - -Using the comma or the underscore as a digit group separator: - - >>> '{:,}'.format(1234567890) - '1,234,567,890' - >>> '{:_}'.format(1234567890) - '1_234_567_890' - >>> '{:_b}'.format(1234567890) - '100_1001_1001_0110_0000_0010_1101_0010' - >>> '{:_x}'.format(1234567890) - '4996_02d2' - -Expressing a percentage: - - >>> points = 19 - >>> total = 22 - >>> 'Correct answers: {:.2%}'.format(points/total) - 'Correct answers: 86.36%' - -Using type-specific formatting: - - >>> import datetime as dt - >>> d = dt.datetime(2010, 7, 4, 12, 15, 58) - >>> '{:%Y-%m-%d %H:%M:%S}'.format(d) - '2010-07-04 12:15:58' - -Nesting arguments and more complex examples: - - >>> for align, text in zip('<^>', ['left', 'center', 'right']): - ... '{0:{fill}{align}16}'.format(text, fill=align, align=align) - ... - 'left<<<<<<<<<<<<' - '^^^^^center^^^^^' - '>>>>>>>>>>>right' - >>> - >>> octets = [192, 168, 0, 1] - >>> '{:02X}{:02X}{:02X}{:02X}'.format(*octets) - 'C0A80001' - >>> int(_, 16) - 3232235521 - >>> - >>> width = 5 - >>> for num in range(5,12): - ... for base in 'dXob': - ... print('{0:{width}{base}}'.format(num, base=base, width=width), end=' ') - ... print() - ... - 5 5 5 101 - 6 6 6 110 - 7 7 7 111 - 8 8 10 1000 - 9 9 11 1001 - 10 A 12 1010 - 11 B 13 1011 -''', - 'function': r'''Function definitions -******************** - -A function definition defines a user-defined function object (see -section The standard type hierarchy): - - funcdef ::= [decorators] "def" funcname [type_params] "(" [parameter_list] ")" - ["->" expression] ":" suite - decorators ::= decorator+ - decorator ::= "@" assignment_expression NEWLINE - parameter_list ::= defparameter ("," defparameter)* "," "/" ["," [parameter_list_no_posonly]] - | parameter_list_no_posonly - parameter_list_no_posonly ::= defparameter ("," defparameter)* ["," [parameter_list_starargs]] - | parameter_list_starargs - parameter_list_starargs ::= "*" [star_parameter] ("," defparameter)* ["," [parameter_star_kwargs]] - | "*" ("," defparameter)+ ["," [parameter_star_kwargs]] - | parameter_star_kwargs - parameter_star_kwargs ::= "**" parameter [","] - parameter ::= identifier [":" expression] - star_parameter ::= identifier [":" ["*"] expression] - defparameter ::= parameter ["=" expression] - funcname ::= identifier - -A function definition is an executable statement. Its execution binds -the function name in the current local namespace to a function object -(a wrapper around the executable code for the function). This -function object contains a reference to the current global namespace -as the global namespace to be used when the function is called. - -The function definition does not execute the function body; this gets -executed only when the function is called. [4] - -A function definition may be wrapped by one or more *decorator* -expressions. Decorator expressions are evaluated when the function is -defined, in the scope that contains the function definition. The -result must be a callable, which is invoked with the function object -as the only argument. The returned value is bound to the function name -instead of the function object. Multiple decorators are applied in -nested fashion. For example, the following code - - @f1(arg) - @f2 - def func(): pass - -is roughly equivalent to - - def func(): pass - func = f1(arg)(f2(func)) - -except that the original function is not temporarily bound to the name -"func". - -Changed in version 3.9: Functions may be decorated with any valid -"assignment_expression". Previously, the grammar was much more -restrictive; see **PEP 614** for details. - -A list of type parameters may be given in square brackets between the -function’s name and the opening parenthesis for its parameter list. -This indicates to static type checkers that the function is generic. -At runtime, the type parameters can be retrieved from the function’s -"__type_params__" attribute. See Generic functions for more. - -Changed in version 3.12: Type parameter lists are new in Python 3.12. - -When one or more *parameters* have the form *parameter* "=" -*expression*, the function is said to have “default parameter values.” -For a parameter with a default value, the corresponding *argument* may -be omitted from a call, in which case the parameter’s default value is -substituted. If a parameter has a default value, all following -parameters up until the “"*"” must also have a default value — this is -a syntactic restriction that is not expressed by the grammar. - -**Default parameter values are evaluated from left to right when the -function definition is executed.** This means that the expression is -evaluated once, when the function is defined, and that the same “pre- -computed” value is used for each call. This is especially important -to understand when a default parameter value is a mutable object, such -as a list or a dictionary: if the function modifies the object (e.g. -by appending an item to a list), the default parameter value is in -effect modified. This is generally not what was intended. A way -around this is to use "None" as the default, and explicitly test for -it in the body of the function, e.g.: - - def whats_on_the_telly(penguin=None): - if penguin is None: - penguin = [] - penguin.append("property of the zoo") - return penguin - -Function call semantics are described in more detail in section Calls. -A function call always assigns values to all parameters mentioned in -the parameter list, either from positional arguments, from keyword -arguments, or from default values. If the form “"*identifier"” is -present, it is initialized to a tuple receiving any excess positional -parameters, defaulting to the empty tuple. If the form -“"**identifier"” is present, it is initialized to a new ordered -mapping receiving any excess keyword arguments, defaulting to a new -empty mapping of the same type. Parameters after “"*"” or -“"*identifier"” are keyword-only parameters and may only be passed by -keyword arguments. Parameters before “"/"” are positional-only -parameters and may only be passed by positional arguments. - -Changed in version 3.8: The "/" function parameter syntax may be used -to indicate positional-only parameters. See **PEP 570** for details. - -Parameters may have an *annotation* of the form “": expression"” -following the parameter name. Any parameter may have an annotation, -even those of the form "*identifier" or "**identifier". (As a special -case, parameters of the form "*identifier" may have an annotation “": -*expression"”.) Functions may have “return” annotation of the form -“"-> expression"” after the parameter list. These annotations can be -any valid Python expression. The presence of annotations does not -change the semantics of a function. The annotation values are -available as values of a dictionary keyed by the parameters’ names in -the "__annotations__" attribute of the function object. If the -"annotations" import from "__future__" is used, annotations are -preserved as strings at runtime which enables postponed evaluation. -Otherwise, they are evaluated when the function definition is -executed. In this case annotations may be evaluated in a different -order than they appear in the source code. - -Changed in version 3.11: Parameters of the form “"*identifier"” may -have an annotation “": *expression"”. See **PEP 646**. - -It is also possible to create anonymous functions (functions not bound -to a name), for immediate use in expressions. This uses lambda -expressions, described in section Lambdas. Note that the lambda -expression is merely a shorthand for a simplified function definition; -a function defined in a “"def"” statement can be passed around or -assigned to another name just like a function defined by a lambda -expression. The “"def"” form is actually more powerful since it -allows the execution of multiple statements and annotations. - -**Programmer’s note:** Functions are first-class objects. A “"def"” -statement executed inside a function definition defines a local -function that can be returned or passed around. Free variables used -in the nested function can access the local variables of the function -containing the def. See section Naming and binding for details. - -See also: - - **PEP 3107** - Function Annotations - The original specification for function annotations. - - **PEP 484** - Type Hints - Definition of a standard meaning for annotations: type hints. - - **PEP 526** - Syntax for Variable Annotations - Ability to type hint variable declarations, including class - variables and instance variables. - - **PEP 563** - Postponed Evaluation of Annotations - Support for forward references within annotations by preserving - annotations in a string form at runtime instead of eager - evaluation. - - **PEP 318** - Decorators for Functions and Methods - Function and method decorators were introduced. Class decorators - were introduced in **PEP 3129**. -''', - 'global': r'''The "global" statement -********************** - - global_stmt ::= "global" identifier ("," identifier)* - -The "global" statement causes the listed identifiers to be interpreted -as globals. It would be impossible to assign to a global variable -without "global", although free variables may refer to globals without -being declared global. - -The "global" statement applies to the entire current scope (module, -function body or class definition). A "SyntaxError" is raised if a -variable is used or assigned to prior to its global declaration in the -scope. - -At the module level, all variables are global, so a "global" statement -has no effect. However, variables must still not be used or assigned -to prior to their "global" declaration. This requirement is relaxed in -the interactive prompt (*REPL*). - -**Programmer’s note:** "global" is a directive to the parser. It -applies only to code parsed at the same time as the "global" -statement. In particular, a "global" statement contained in a string -or code object supplied to the built-in "exec()" function does not -affect the code block *containing* the function call, and code -contained in such a string is unaffected by "global" statements in the -code containing the function call. The same applies to the "eval()" -and "compile()" functions. -''', - 'id-classes': r'''Reserved classes of identifiers -******************************* - -Certain classes of identifiers (besides keywords) have special -meanings. These classes are identified by the patterns of leading and -trailing underscore characters: - -"_*" - Not imported by "from module import *". - -"_" - In a "case" pattern within a "match" statement, "_" is a soft - keyword that denotes a wildcard. - - Separately, the interactive interpreter makes the result of the - last evaluation available in the variable "_". (It is stored in the - "builtins" module, alongside built-in functions like "print".) - - Elsewhere, "_" is a regular identifier. It is often used to name - “special” items, but it is not special to Python itself. - - Note: - - The name "_" is often used in conjunction with - internationalization; refer to the documentation for the - "gettext" module for more information on this convention.It is - also commonly used for unused variables. - -"__*__" - System-defined names, informally known as “dunder” names. These - names are defined by the interpreter and its implementation - (including the standard library). Current system names are - discussed in the Special method names section and elsewhere. More - will likely be defined in future versions of Python. *Any* use of - "__*__" names, in any context, that does not follow explicitly - documented use, is subject to breakage without warning. - -"__*" - Class-private names. Names in this category, when used within the - context of a class definition, are re-written to use a mangled form - to help avoid name clashes between “private” attributes of base and - derived classes. See section Identifiers (Names). -''', - 'identifiers': r'''Identifiers and keywords -************************ - -Identifiers (also referred to as *names*) are described by the -following lexical definitions. - -The syntax of identifiers in Python is based on the Unicode standard -annex UAX-31, with elaboration and changes as defined below; see also -**PEP 3131** for further details. - -Within the ASCII range (U+0001..U+007F), the valid characters for -identifiers include the uppercase and lowercase letters "A" through -"Z", the underscore "_" and, except for the first character, the -digits "0" through "9". Python 3.0 introduced additional characters -from outside the ASCII range (see **PEP 3131**). For these -characters, the classification uses the version of the Unicode -Character Database as included in the "unicodedata" module. - -Identifiers are unlimited in length. Case is significant. - - identifier ::= xid_start xid_continue* - id_start ::= - id_continue ::= - xid_start ::= - xid_continue ::= - -The Unicode category codes mentioned above stand for: - -* *Lu* - uppercase letters - -* *Ll* - lowercase letters - -* *Lt* - titlecase letters - -* *Lm* - modifier letters - -* *Lo* - other letters - -* *Nl* - letter numbers - -* *Mn* - nonspacing marks - -* *Mc* - spacing combining marks - -* *Nd* - decimal numbers - -* *Pc* - connector punctuations - -* *Other_ID_Start* - explicit list of characters in PropList.txt to - support backwards compatibility - -* *Other_ID_Continue* - likewise - -All identifiers are converted into the normal form NFKC while parsing; -comparison of identifiers is based on NFKC. - -A non-normative HTML file listing all valid identifier characters for -Unicode 15.1.0 can be found at -https://www.unicode.org/Public/15.1.0/ucd/DerivedCoreProperties.txt - - -Keywords -======== - -The following identifiers are used as reserved words, or *keywords* of -the language, and cannot be used as ordinary identifiers. They must -be spelled exactly as written here: - - False await else import pass - None break except in raise - True class finally is return - and continue for lambda try - as def from nonlocal while - assert del global not with - async elif if or yield - - -Soft Keywords -============= - -Added in version 3.10. - -Some identifiers are only reserved under specific contexts. These are -known as *soft keywords*. The identifiers "match", "case", "type" and -"_" can syntactically act as keywords in certain contexts, but this -distinction is done at the parser level, not when tokenizing. - -As soft keywords, their use in the grammar is possible while still -preserving compatibility with existing code that uses these names as -identifier names. - -"match", "case", and "_" are used in the "match" statement. "type" is -used in the "type" statement. - -Changed in version 3.12: "type" is now a soft keyword. - - -Reserved classes of identifiers -=============================== - -Certain classes of identifiers (besides keywords) have special -meanings. These classes are identified by the patterns of leading and -trailing underscore characters: - -"_*" - Not imported by "from module import *". - -"_" - In a "case" pattern within a "match" statement, "_" is a soft - keyword that denotes a wildcard. - - Separately, the interactive interpreter makes the result of the - last evaluation available in the variable "_". (It is stored in the - "builtins" module, alongside built-in functions like "print".) - - Elsewhere, "_" is a regular identifier. It is often used to name - “special” items, but it is not special to Python itself. - - Note: - - The name "_" is often used in conjunction with - internationalization; refer to the documentation for the - "gettext" module for more information on this convention.It is - also commonly used for unused variables. - -"__*__" - System-defined names, informally known as “dunder” names. These - names are defined by the interpreter and its implementation - (including the standard library). Current system names are - discussed in the Special method names section and elsewhere. More - will likely be defined in future versions of Python. *Any* use of - "__*__" names, in any context, that does not follow explicitly - documented use, is subject to breakage without warning. - -"__*" - Class-private names. Names in this category, when used within the - context of a class definition, are re-written to use a mangled form - to help avoid name clashes between “private” attributes of base and - derived classes. See section Identifiers (Names). -''', - 'if': r'''The "if" statement -****************** - -The "if" statement is used for conditional execution: - - if_stmt ::= "if" assignment_expression ":" suite - ("elif" assignment_expression ":" suite)* - ["else" ":" suite] - -It selects exactly one of the suites by evaluating the expressions one -by one until one is found to be true (see section Boolean operations -for the definition of true and false); then that suite is executed -(and no other part of the "if" statement is executed or evaluated). -If all expressions are false, the suite of the "else" clause, if -present, is executed. -''', - 'imaginary': r'''Imaginary literals -****************** - -Imaginary literals are described by the following lexical definitions: - - imagnumber ::= (floatnumber | digitpart) ("j" | "J") - -An imaginary literal yields a complex number with a real part of 0.0. -Complex numbers are represented as a pair of floating-point numbers -and have the same restrictions on their range. To create a complex -number with a nonzero real part, add a floating-point number to it, -e.g., "(3+4j)". Some examples of imaginary literals: - - 3.14j 10.j 10j .001j 1e100j 3.14e-10j 3.14_15_93j -''', - 'import': r'''The "import" statement -********************** - - import_stmt ::= "import" module ["as" identifier] ("," module ["as" identifier])* - | "from" relative_module "import" identifier ["as" identifier] - ("," identifier ["as" identifier])* - | "from" relative_module "import" "(" identifier ["as" identifier] - ("," identifier ["as" identifier])* [","] ")" - | "from" relative_module "import" "*" - module ::= (identifier ".")* identifier - relative_module ::= "."* module | "."+ - -The basic import statement (no "from" clause) is executed in two -steps: - -1. find a module, loading and initializing it if necessary - -2. define a name or names in the local namespace for the scope where - the "import" statement occurs. - -When the statement contains multiple clauses (separated by commas) the -two steps are carried out separately for each clause, just as though -the clauses had been separated out into individual import statements. - -The details of the first step, finding and loading modules, are -described in greater detail in the section on the import system, which -also describes the various types of packages and modules that can be -imported, as well as all the hooks that can be used to customize the -import system. Note that failures in this step may indicate either -that the module could not be located, *or* that an error occurred -while initializing the module, which includes execution of the -module’s code. - -If the requested module is retrieved successfully, it will be made -available in the local namespace in one of three ways: - -* If the module name is followed by "as", then the name following "as" - is bound directly to the imported module. - -* If no other name is specified, and the module being imported is a - top level module, the module’s name is bound in the local namespace - as a reference to the imported module - -* If the module being imported is *not* a top level module, then the - name of the top level package that contains the module is bound in - the local namespace as a reference to the top level package. The - imported module must be accessed using its full qualified name - rather than directly - -The "from" form uses a slightly more complex process: - -1. find the module specified in the "from" clause, loading and - initializing it if necessary; - -2. for each of the identifiers specified in the "import" clauses: - - 1. check if the imported module has an attribute by that name - - 2. if not, attempt to import a submodule with that name and then - check the imported module again for that attribute - - 3. if the attribute is not found, "ImportError" is raised. - - 4. otherwise, a reference to that value is stored in the local - namespace, using the name in the "as" clause if it is present, - otherwise using the attribute name - -Examples: - - import foo # foo imported and bound locally - import foo.bar.baz # foo, foo.bar, and foo.bar.baz imported, foo bound locally - import foo.bar.baz as fbb # foo, foo.bar, and foo.bar.baz imported, foo.bar.baz bound as fbb - from foo.bar import baz # foo, foo.bar, and foo.bar.baz imported, foo.bar.baz bound as baz - from foo import attr # foo imported and foo.attr bound as attr - -If the list of identifiers is replaced by a star ("'*'"), all public -names defined in the module are bound in the local namespace for the -scope where the "import" statement occurs. - -The *public names* defined by a module are determined by checking the -module’s namespace for a variable named "__all__"; if defined, it must -be a sequence of strings which are names defined or imported by that -module. Names containing non-ASCII characters must be in the -normalization form NFKC. The names given in "__all__" are all -considered public and are required to exist. If "__all__" is not -defined, the set of public names includes all names found in the -module’s namespace which do not begin with an underscore character -("'_'"). "__all__" should contain the entire public API. It is -intended to avoid accidentally exporting items that are not part of -the API (such as library modules which were imported and used within -the module). - -The wild card form of import — "from module import *" — is only -allowed at the module level. Attempting to use it in class or -function definitions will raise a "SyntaxError". - -When specifying what module to import you do not have to specify the -absolute name of the module. When a module or package is contained -within another package it is possible to make a relative import within -the same top package without having to mention the package name. By -using leading dots in the specified module or package after "from" you -can specify how high to traverse up the current package hierarchy -without specifying exact names. One leading dot means the current -package where the module making the import exists. Two dots means up -one package level. Three dots is up two levels, etc. So if you execute -"from . import mod" from a module in the "pkg" package then you will -end up importing "pkg.mod". If you execute "from ..subpkg2 import mod" -from within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The -specification for relative imports is contained in the Package -Relative Imports section. - -"importlib.import_module()" is provided to support applications that -determine dynamically the modules to be loaded. - -Raises an auditing event "import" with arguments "module", "filename", -"sys.path", "sys.meta_path", "sys.path_hooks". - - -Future statements -================= - -A *future statement* is a directive to the compiler that a particular -module should be compiled using syntax or semantics that will be -available in a specified future release of Python where the feature -becomes standard. - -The future statement is intended to ease migration to future versions -of Python that introduce incompatible changes to the language. It -allows use of the new features on a per-module basis before the -release in which the feature becomes standard. - - future_stmt ::= "from" "__future__" "import" feature ["as" identifier] - ("," feature ["as" identifier])* - | "from" "__future__" "import" "(" feature ["as" identifier] - ("," feature ["as" identifier])* [","] ")" - feature ::= identifier - -A future statement must appear near the top of the module. The only -lines that can appear before a future statement are: - -* the module docstring (if any), - -* comments, - -* blank lines, and - -* other future statements. - -The only feature that requires using the future statement is -"annotations" (see **PEP 563**). - -All historical features enabled by the future statement are still -recognized by Python 3. The list includes "absolute_import", -"division", "generators", "generator_stop", "unicode_literals", -"print_function", "nested_scopes" and "with_statement". They are all -redundant because they are always enabled, and only kept for backwards -compatibility. - -A future statement is recognized and treated specially at compile -time: Changes to the semantics of core constructs are often -implemented by generating different code. It may even be the case -that a new feature introduces new incompatible syntax (such as a new -reserved word), in which case the compiler may need to parse the -module differently. Such decisions cannot be pushed off until -runtime. - -For any given release, the compiler knows which feature names have -been defined, and raises a compile-time error if a future statement -contains a feature not known to it. - -The direct runtime semantics are the same as for any import statement: -there is a standard module "__future__", described later, and it will -be imported in the usual way at the time the future statement is -executed. - -The interesting runtime semantics depend on the specific feature -enabled by the future statement. - -Note that there is nothing special about the statement: - - import __future__ [as name] - -That is not a future statement; it’s an ordinary import statement with -no special semantics or syntax restrictions. - -Code compiled by calls to the built-in functions "exec()" and -"compile()" that occur in a module "M" containing a future statement -will, by default, use the new syntax or semantics associated with the -future statement. This can be controlled by optional arguments to -"compile()" — see the documentation of that function for details. - -A future statement typed at an interactive interpreter prompt will -take effect for the rest of the interpreter session. If an -interpreter is started with the "-i" option, is passed a script name -to execute, and the script includes a future statement, it will be in -effect in the interactive session started after the script is -executed. - -See also: - - **PEP 236** - Back to the __future__ - The original proposal for the __future__ mechanism. -''', - 'in': r'''Membership test operations -************************** - -The operators "in" and "not in" test for membership. "x in s" -evaluates to "True" if *x* is a member of *s*, and "False" otherwise. -"x not in s" returns the negation of "x in s". All built-in sequences -and set types support this as well as dictionary, for which "in" tests -whether the dictionary has a given key. For container types such as -list, tuple, set, frozenset, dict, or collections.deque, the -expression "x in y" is equivalent to "any(x is e or x == e for e in -y)". - -For the string and bytes types, "x in y" is "True" if and only if *x* -is a substring of *y*. An equivalent test is "y.find(x) != -1". -Empty strings are always considered to be a substring of any other -string, so """ in "abc"" will return "True". - -For user-defined classes which define the "__contains__()" method, "x -in y" returns "True" if "y.__contains__(x)" returns a true value, and -"False" otherwise. - -For user-defined classes which do not define "__contains__()" but do -define "__iter__()", "x in y" is "True" if some value "z", for which -the expression "x is z or x == z" is true, is produced while iterating -over "y". If an exception is raised during the iteration, it is as if -"in" raised that exception. - -Lastly, the old-style iteration protocol is tried: if a class defines -"__getitem__()", "x in y" is "True" if and only if there is a non- -negative integer index *i* such that "x is y[i] or x == y[i]", and no -lower integer index raises the "IndexError" exception. (If any other -exception is raised, it is as if "in" raised that exception). - -The operator "not in" is defined to have the inverse truth value of -"in". -''', - 'integers': r'''Integer literals -**************** - -Integer literals are described by the following lexical definitions: - - integer ::= decinteger | bininteger | octinteger | hexinteger - decinteger ::= nonzerodigit (["_"] digit)* | "0"+ (["_"] "0")* - bininteger ::= "0" ("b" | "B") (["_"] bindigit)+ - octinteger ::= "0" ("o" | "O") (["_"] octdigit)+ - hexinteger ::= "0" ("x" | "X") (["_"] hexdigit)+ - nonzerodigit ::= "1"..."9" - digit ::= "0"..."9" - bindigit ::= "0" | "1" - octdigit ::= "0"..."7" - hexdigit ::= digit | "a"..."f" | "A"..."F" - -There is no limit for the length of integer literals apart from what -can be stored in available memory. - -Underscores are ignored for determining the numeric value of the -literal. They can be used to group digits for enhanced readability. -One underscore can occur between digits, and after base specifiers -like "0x". - -Note that leading zeros in a non-zero decimal number are not allowed. -This is for disambiguation with C-style octal literals, which Python -used before version 3.0. - -Some examples of integer literals: - - 7 2147483647 0o177 0b100110111 - 3 79228162514264337593543950336 0o377 0xdeadbeef - 100_000_000_000 0b_1110_0101 - -Changed in version 3.6: Underscores are now allowed for grouping -purposes in literals. -''', - 'lambda': r'''Lambdas -******* - - lambda_expr ::= "lambda" [parameter_list] ":" expression - -Lambda expressions (sometimes called lambda forms) are used to create -anonymous functions. The expression "lambda parameters: expression" -yields a function object. The unnamed object behaves like a function -object defined with: - - def (parameters): - return expression - -See section Function definitions for the syntax of parameter lists. -Note that functions created with lambda expressions cannot contain -statements or annotations. -''', - 'lists': r'''List displays -************* - -A list display is a possibly empty series of expressions enclosed in -square brackets: - - list_display ::= "[" [flexible_expression_list | comprehension] "]" - -A list display yields a new list object, the contents being specified -by either a list of expressions or a comprehension. When a comma- -separated list of expressions is supplied, its elements are evaluated -from left to right and placed into the list object in that order. -When a comprehension is supplied, the list is constructed from the -elements resulting from the comprehension. -''', - 'naming': r'''Naming and binding -****************** - - -Binding of names -================ - -*Names* refer to objects. Names are introduced by name binding -operations. - -The following constructs bind names: - -* formal parameters to functions, - -* class definitions, - -* function definitions, - -* assignment expressions, - -* targets that are identifiers if occurring in an assignment: - - * "for" loop header, - - * after "as" in a "with" statement, "except" clause, "except*" - clause, or in the as-pattern in structural pattern matching, - - * in a capture pattern in structural pattern matching - -* "import" statements. - -* "type" statements. - -* type parameter lists. - -The "import" statement of the form "from ... import *" binds all names -defined in the imported module, except those beginning with an -underscore. This form may only be used at the module level. - -A target occurring in a "del" statement is also considered bound for -this purpose (though the actual semantics are to unbind the name). - -Each assignment or import statement occurs within a block defined by a -class or function definition or at the module level (the top-level -code block). - -If a name is bound in a block, it is a local variable of that block, -unless declared as "nonlocal" or "global". If a name is bound at the -module level, it is a global variable. (The variables of the module -code block are local and global.) If a variable is used in a code -block but not defined there, it is a *free variable*. - -Each occurrence of a name in the program text refers to the *binding* -of that name established by the following name resolution rules. - - -Resolution of names -=================== - -A *scope* defines the visibility of a name within a block. If a local -variable is defined in a block, its scope includes that block. If the -definition occurs in a function block, the scope extends to any blocks -contained within the defining one, unless a contained block introduces -a different binding for the name. - -When a name is used in a code block, it is resolved using the nearest -enclosing scope. The set of all such scopes visible to a code block -is called the block’s *environment*. - -When a name is not found at all, a "NameError" exception is raised. If -the current scope is a function scope, and the name refers to a local -variable that has not yet been bound to a value at the point where the -name is used, an "UnboundLocalError" exception is raised. -"UnboundLocalError" is a subclass of "NameError". - -If a name binding operation occurs anywhere within a code block, all -uses of the name within the block are treated as references to the -current block. This can lead to errors when a name is used within a -block before it is bound. This rule is subtle. Python lacks -declarations and allows name binding operations to occur anywhere -within a code block. The local variables of a code block can be -determined by scanning the entire text of the block for name binding -operations. See the FAQ entry on UnboundLocalError for examples. - -If the "global" statement occurs within a block, all uses of the names -specified in the statement refer to the bindings of those names in the -top-level namespace. Names are resolved in the top-level namespace by -searching the global namespace, i.e. the namespace of the module -containing the code block, and the builtins namespace, the namespace -of the module "builtins". The global namespace is searched first. If -the names are not found there, the builtins namespace is searched -next. If the names are also not found in the builtins namespace, new -variables are created in the global namespace. The global statement -must precede all uses of the listed names. - -The "global" statement has the same scope as a name binding operation -in the same block. If the nearest enclosing scope for a free variable -contains a global statement, the free variable is treated as a global. - -The "nonlocal" statement causes corresponding names to refer to -previously bound variables in the nearest enclosing function scope. -"SyntaxError" is raised at compile time if the given name does not -exist in any enclosing function scope. Type parameters cannot be -rebound with the "nonlocal" statement. - -The namespace for a module is automatically created the first time a -module is imported. The main module for a script is always called -"__main__". - -Class definition blocks and arguments to "exec()" and "eval()" are -special in the context of name resolution. A class definition is an -executable statement that may use and define names. These references -follow the normal rules for name resolution with an exception that -unbound local variables are looked up in the global namespace. The -namespace of the class definition becomes the attribute dictionary of -the class. The scope of names defined in a class block is limited to -the class block; it does not extend to the code blocks of methods. -This includes comprehensions and generator expressions, but it does -not include annotation scopes, which have access to their enclosing -class scopes. This means that the following will fail: - - class A: - a = 42 - b = list(a + i for i in range(10)) - -However, the following will succeed: - - class A: - type Alias = Nested - class Nested: pass - - print(A.Alias.__value__) # - - -Annotation scopes -================= - -Type parameter lists and "type" statements introduce *annotation -scopes*, which behave mostly like function scopes, but with some -exceptions discussed below. *Annotations* currently do not use -annotation scopes, but they are expected to use annotation scopes in -Python 3.13 when **PEP 649** is implemented. - -Annotation scopes are used in the following contexts: - -* Type parameter lists for generic type aliases. - -* Type parameter lists for generic functions. A generic function’s - annotations are executed within the annotation scope, but its - defaults and decorators are not. - -* Type parameter lists for generic classes. A generic class’s base - classes and keyword arguments are executed within the annotation - scope, but its decorators are not. - -* The bounds, constraints, and default values for type parameters - (lazily evaluated). - -* The value of type aliases (lazily evaluated). - -Annotation scopes differ from function scopes in the following ways: - -* Annotation scopes have access to their enclosing class namespace. If - an annotation scope is immediately within a class scope, or within - another annotation scope that is immediately within a class scope, - the code in the annotation scope can use names defined in the class - scope as if it were executed directly within the class body. This - contrasts with regular functions defined within classes, which - cannot access names defined in the class scope. - -* Expressions in annotation scopes cannot contain "yield", "yield - from", "await", or ":=" expressions. (These expressions are allowed - in other scopes contained within the annotation scope.) - -* Names defined in annotation scopes cannot be rebound with "nonlocal" - statements in inner scopes. This includes only type parameters, as - no other syntactic elements that can appear within annotation scopes - can introduce new names. - -* While annotation scopes have an internal name, that name is not - reflected in the *qualified name* of objects defined within the - scope. Instead, the "__qualname__" of such objects is as if the - object were defined in the enclosing scope. - -Added in version 3.12: Annotation scopes were introduced in Python -3.12 as part of **PEP 695**. - -Changed in version 3.13: Annotation scopes are also used for type -parameter defaults, as introduced by **PEP 696**. - - -Lazy evaluation -=============== - -The values of type aliases created through the "type" statement are -*lazily evaluated*. The same applies to the bounds, constraints, and -default values of type variables created through the type parameter -syntax. This means that they are not evaluated when the type alias or -type variable is created. Instead, they are only evaluated when doing -so is necessary to resolve an attribute access. - -Example: - - >>> type Alias = 1/0 - >>> Alias.__value__ - Traceback (most recent call last): - ... - ZeroDivisionError: division by zero - >>> def func[T: 1/0](): pass - >>> T = func.__type_params__[0] - >>> T.__bound__ - Traceback (most recent call last): - ... - ZeroDivisionError: division by zero - -Here the exception is raised only when the "__value__" attribute of -the type alias or the "__bound__" attribute of the type variable is -accessed. - -This behavior is primarily useful for references to types that have -not yet been defined when the type alias or type variable is created. -For example, lazy evaluation enables creation of mutually recursive -type aliases: - - from typing import Literal - - type SimpleExpr = int | Parenthesized - type Parenthesized = tuple[Literal["("], Expr, Literal[")"]] - type Expr = SimpleExpr | tuple[SimpleExpr, Literal["+", "-"], Expr] - -Lazily evaluated values are evaluated in annotation scope, which means -that names that appear inside the lazily evaluated value are looked up -as if they were used in the immediately enclosing scope. - -Added in version 3.12. - - -Builtins and restricted execution -================================= - -**CPython implementation detail:** Users should not touch -"__builtins__"; it is strictly an implementation detail. Users -wanting to override values in the builtins namespace should "import" -the "builtins" module and modify its attributes appropriately. - -The builtins namespace associated with the execution of a code block -is actually found by looking up the name "__builtins__" in its global -namespace; this should be a dictionary or a module (in the latter case -the module’s dictionary is used). By default, when in the "__main__" -module, "__builtins__" is the built-in module "builtins"; when in any -other module, "__builtins__" is an alias for the dictionary of the -"builtins" module itself. - - -Interaction with dynamic features -================================= - -Name resolution of free variables occurs at runtime, not at compile -time. This means that the following code will print 42: - - i = 10 - def f(): - print(i) - i = 42 - f() - -The "eval()" and "exec()" functions do not have access to the full -environment for resolving names. Names may be resolved in the local -and global namespaces of the caller. Free variables are not resolved -in the nearest enclosing namespace, but in the global namespace. [1] -The "exec()" and "eval()" functions have optional arguments to -override the global and local namespace. If only one namespace is -specified, it is used for both. -''', - 'nonlocal': r'''The "nonlocal" statement -************************ - - nonlocal_stmt ::= "nonlocal" identifier ("," identifier)* - -When the definition of a function or class is nested (enclosed) within -the definitions of other functions, its nonlocal scopes are the local -scopes of the enclosing functions. The "nonlocal" statement causes the -listed identifiers to refer to names previously bound in nonlocal -scopes. It allows encapsulated code to rebind such nonlocal -identifiers. If a name is bound in more than one nonlocal scope, the -nearest binding is used. If a name is not bound in any nonlocal scope, -or if there is no nonlocal scope, a "SyntaxError" is raised. - -The "nonlocal" statement applies to the entire scope of a function or -class body. A "SyntaxError" is raised if a variable is used or -assigned to prior to its nonlocal declaration in the scope. - -See also: - - **PEP 3104** - Access to Names in Outer Scopes - The specification for the "nonlocal" statement. - -**Programmer’s note:** "nonlocal" is a directive to the parser and -applies only to code parsed along with it. See the note for the -"global" statement. -''', - 'numbers': r'''Numeric literals -**************** - -There are three types of numeric literals: integers, floating-point -numbers, and imaginary numbers. There are no complex literals -(complex numbers can be formed by adding a real number and an -imaginary number). - -Note that numeric literals do not include a sign; a phrase like "-1" -is actually an expression composed of the unary operator ‘"-"’ and the -literal "1". -''', - 'numeric-types': r'''Emulating numeric types -*********************** - -The following methods can be defined to emulate numeric objects. -Methods corresponding to operations that are not supported by the -particular kind of number implemented (e.g., bitwise operations for -non-integral numbers) should be left undefined. - -object.__add__(self, other) -object.__sub__(self, other) -object.__mul__(self, other) -object.__matmul__(self, other) -object.__truediv__(self, other) -object.__floordiv__(self, other) -object.__mod__(self, other) -object.__divmod__(self, other) -object.__pow__(self, other[, modulo]) -object.__lshift__(self, other) -object.__rshift__(self, other) -object.__and__(self, other) -object.__xor__(self, other) -object.__or__(self, other) - - These methods are called to implement the binary arithmetic - operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", - "pow()", "**", "<<", ">>", "&", "^", "|"). For instance, to - evaluate the expression "x + y", where *x* is an instance of a - class that has an "__add__()" method, "type(x).__add__(x, y)" is - called. The "__divmod__()" method should be the equivalent to - using "__floordiv__()" and "__mod__()"; it should not be related to - "__truediv__()". Note that "__pow__()" should be defined to accept - an optional third argument if the ternary version of the built-in - "pow()" function is to be supported. - - If one of those methods does not support the operation with the - supplied arguments, it should return "NotImplemented". - -object.__radd__(self, other) -object.__rsub__(self, other) -object.__rmul__(self, other) -object.__rmatmul__(self, other) -object.__rtruediv__(self, other) -object.__rfloordiv__(self, other) -object.__rmod__(self, other) -object.__rdivmod__(self, other) -object.__rpow__(self, other[, modulo]) -object.__rlshift__(self, other) -object.__rrshift__(self, other) -object.__rand__(self, other) -object.__rxor__(self, other) -object.__ror__(self, other) - - These methods are called to implement the binary arithmetic - operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", - "pow()", "**", "<<", ">>", "&", "^", "|") with reflected (swapped) - operands. These functions are only called if the left operand does - not support the corresponding operation [3] and the operands are of - different types. [4] For instance, to evaluate the expression "x - - y", where *y* is an instance of a class that has an "__rsub__()" - method, "type(y).__rsub__(y, x)" is called if "type(x).__sub__(x, - y)" returns "NotImplemented". - - Note that ternary "pow()" will not try calling "__rpow__()" (the - coercion rules would become too complicated). - - Note: - - If the right operand’s type is a subclass of the left operand’s - type and that subclass provides a different implementation of the - reflected method for the operation, this method will be called - before the left operand’s non-reflected method. This behavior - allows subclasses to override their ancestors’ operations. - -object.__iadd__(self, other) -object.__isub__(self, other) -object.__imul__(self, other) -object.__imatmul__(self, other) -object.__itruediv__(self, other) -object.__ifloordiv__(self, other) -object.__imod__(self, other) -object.__ipow__(self, other[, modulo]) -object.__ilshift__(self, other) -object.__irshift__(self, other) -object.__iand__(self, other) -object.__ixor__(self, other) -object.__ior__(self, other) - - These methods are called to implement the augmented arithmetic - assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", "**=", - "<<=", ">>=", "&=", "^=", "|="). These methods should attempt to - do the operation in-place (modifying *self*) and return the result - (which could be, but does not have to be, *self*). If a specific - method is not defined, or if that method returns "NotImplemented", - the augmented assignment falls back to the normal methods. For - instance, if *x* is an instance of a class with an "__iadd__()" - method, "x += y" is equivalent to "x = x.__iadd__(y)" . If - "__iadd__()" does not exist, or if "x.__iadd__(y)" returns - "NotImplemented", "x.__add__(y)" and "y.__radd__(x)" are - considered, as with the evaluation of "x + y". In certain - situations, augmented assignment can result in unexpected errors - (see Why does a_tuple[i] += [‘item’] raise an exception when the - addition works?), but this behavior is in fact part of the data - model. - -object.__neg__(self) -object.__pos__(self) -object.__abs__(self) -object.__invert__(self) - - Called to implement the unary arithmetic operations ("-", "+", - "abs()" and "~"). - -object.__complex__(self) -object.__int__(self) -object.__float__(self) - - Called to implement the built-in functions "complex()", "int()" and - "float()". Should return a value of the appropriate type. - -object.__index__(self) - - Called to implement "operator.index()", and whenever Python needs - to losslessly convert the numeric object to an integer object (such - as in slicing, or in the built-in "bin()", "hex()" and "oct()" - functions). Presence of this method indicates that the numeric - object is an integer type. Must return an integer. - - If "__int__()", "__float__()" and "__complex__()" are not defined - then corresponding built-in functions "int()", "float()" and - "complex()" fall back to "__index__()". - -object.__round__(self[, ndigits]) -object.__trunc__(self) -object.__floor__(self) -object.__ceil__(self) - - Called to implement the built-in function "round()" and "math" - functions "trunc()", "floor()" and "ceil()". Unless *ndigits* is - passed to "__round__()" all these methods should return the value - of the object truncated to an "Integral" (typically an "int"). - - The built-in function "int()" falls back to "__trunc__()" if - neither "__int__()" nor "__index__()" is defined. - - Changed in version 3.11: The delegation of "int()" to "__trunc__()" - is deprecated. -''', - 'objects': r'''Objects, values and types -************************* - -*Objects* are Python’s abstraction for data. All data in a Python -program is represented by objects or by relations between objects. -Even code is represented by objects. - -Every object has an identity, a type and a value. An object’s -*identity* never changes once it has been created; you may think of it -as the object’s address in memory. The "is" operator compares the -identity of two objects; the "id()" function returns an integer -representing its identity. - -**CPython implementation detail:** For CPython, "id(x)" is the memory -address where "x" is stored. - -An object’s type determines the operations that the object supports -(e.g., “does it have a length?”) and also defines the possible values -for objects of that type. The "type()" function returns an object’s -type (which is an object itself). Like its identity, an object’s -*type* is also unchangeable. [1] - -The *value* of some objects can change. Objects whose value can -change are said to be *mutable*; objects whose value is unchangeable -once they are created are called *immutable*. (The value of an -immutable container object that contains a reference to a mutable -object can change when the latter’s value is changed; however the -container is still considered immutable, because the collection of -objects it contains cannot be changed. So, immutability is not -strictly the same as having an unchangeable value, it is more subtle.) -An object’s mutability is determined by its type; for instance, -numbers, strings and tuples are immutable, while dictionaries and -lists are mutable. - -Objects are never explicitly destroyed; however, when they become -unreachable they may be garbage-collected. An implementation is -allowed to postpone garbage collection or omit it altogether — it is a -matter of implementation quality how garbage collection is -implemented, as long as no objects are collected that are still -reachable. - -**CPython implementation detail:** CPython currently uses a reference- -counting scheme with (optional) delayed detection of cyclically linked -garbage, which collects most objects as soon as they become -unreachable, but is not guaranteed to collect garbage containing -circular references. See the documentation of the "gc" module for -information on controlling the collection of cyclic garbage. Other -implementations act differently and CPython may change. Do not depend -on immediate finalization of objects when they become unreachable (so -you should always close files explicitly). - -Note that the use of the implementation’s tracing or debugging -facilities may keep objects alive that would normally be collectable. -Also note that catching an exception with a "try"…"except" statement -may keep objects alive. - -Some objects contain references to “external” resources such as open -files or windows. It is understood that these resources are freed -when the object is garbage-collected, but since garbage collection is -not guaranteed to happen, such objects also provide an explicit way to -release the external resource, usually a "close()" method. Programs -are strongly recommended to explicitly close such objects. The -"try"…"finally" statement and the "with" statement provide convenient -ways to do this. - -Some objects contain references to other objects; these are called -*containers*. Examples of containers are tuples, lists and -dictionaries. The references are part of a container’s value. In -most cases, when we talk about the value of a container, we imply the -values, not the identities of the contained objects; however, when we -talk about the mutability of a container, only the identities of the -immediately contained objects are implied. So, if an immutable -container (like a tuple) contains a reference to a mutable object, its -value changes if that mutable object is changed. - -Types affect almost all aspects of object behavior. Even the -importance of object identity is affected in some sense: for immutable -types, operations that compute new values may actually return a -reference to any existing object with the same type and value, while -for mutable objects this is not allowed. For example, after "a = 1; b -= 1", *a* and *b* may or may not refer to the same object with the -value one, depending on the implementation. This is because "int" is -an immutable type, so the reference to "1" can be reused. This -behaviour depends on the implementation used, so should not be relied -upon, but is something to be aware of when making use of object -identity tests. However, after "c = []; d = []", *c* and *d* are -guaranteed to refer to two different, unique, newly created empty -lists. (Note that "e = f = []" assigns the *same* object to both *e* -and *f*.) -''', - 'operator-summary': r'''Operator precedence -******************* - -The following table summarizes the operator precedence in Python, from -highest precedence (most binding) to lowest precedence (least -binding). Operators in the same box have the same precedence. Unless -the syntax is explicitly given, operators are binary. Operators in -the same box group left to right (except for exponentiation and -conditional expressions, which group from right to left). - -Note that comparisons, membership tests, and identity tests, all have -the same precedence and have a left-to-right chaining feature as -described in the Comparisons section. - -+-------------------------------------------------+---------------------------------------+ -| Operator | Description | -|=================================================|=======================================| -| "(expressions...)", "[expressions...]", "{key: | Binding or parenthesized expression, | -| value...}", "{expressions...}" | list display, dictionary display, set | -| | display | -+-------------------------------------------------+---------------------------------------+ -| "x[index]", "x[index:index]", | Subscription, slicing, call, | -| "x(arguments...)", "x.attribute" | attribute reference | -+-------------------------------------------------+---------------------------------------+ -| "await x" | Await expression | -+-------------------------------------------------+---------------------------------------+ -| "**" | Exponentiation [5] | -+-------------------------------------------------+---------------------------------------+ -| "+x", "-x", "~x" | Positive, negative, bitwise NOT | -+-------------------------------------------------+---------------------------------------+ -| "*", "@", "/", "//", "%" | Multiplication, matrix | -| | multiplication, division, floor | -| | division, remainder [6] | -+-------------------------------------------------+---------------------------------------+ -| "+", "-" | Addition and subtraction | -+-------------------------------------------------+---------------------------------------+ -| "<<", ">>" | Shifts | -+-------------------------------------------------+---------------------------------------+ -| "&" | Bitwise AND | -+-------------------------------------------------+---------------------------------------+ -| "^" | Bitwise XOR | -+-------------------------------------------------+---------------------------------------+ -| "|" | Bitwise OR | -+-------------------------------------------------+---------------------------------------+ -| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership | -| ">=", "!=", "==" | tests and identity tests | -+-------------------------------------------------+---------------------------------------+ -| "not x" | Boolean NOT | -+-------------------------------------------------+---------------------------------------+ -| "and" | Boolean AND | -+-------------------------------------------------+---------------------------------------+ -| "or" | Boolean OR | -+-------------------------------------------------+---------------------------------------+ -| "if" – "else" | Conditional expression | -+-------------------------------------------------+---------------------------------------+ -| "lambda" | Lambda expression | -+-------------------------------------------------+---------------------------------------+ -| ":=" | Assignment expression | -+-------------------------------------------------+---------------------------------------+ - --[ Footnotes ]- - -[1] While "abs(x%y) < abs(y)" is true mathematically, for floats it - may not be true numerically due to roundoff. For example, and - assuming a platform on which a Python float is an IEEE 754 double- - precision number, in order that "-1e-100 % 1e100" have the same - sign as "1e100", the computed result is "-1e-100 + 1e100", which - is numerically exactly equal to "1e100". The function - "math.fmod()" returns a result whose sign matches the sign of the - first argument instead, and so returns "-1e-100" in this case. - Which approach is more appropriate depends on the application. - -[2] If x is very close to an exact integer multiple of y, it’s - possible for "x//y" to be one larger than "(x-x%y)//y" due to - rounding. In such cases, Python returns the latter result, in - order to preserve that "divmod(x,y)[0] * y + x % y" be very close - to "x". - -[3] The Unicode standard distinguishes between *code points* (e.g. - U+0041) and *abstract characters* (e.g. “LATIN CAPITAL LETTER A”). - While most abstract characters in Unicode are only represented - using one code point, there is a number of abstract characters - that can in addition be represented using a sequence of more than - one code point. For example, the abstract character “LATIN - CAPITAL LETTER C WITH CEDILLA” can be represented as a single - *precomposed character* at code position U+00C7, or as a sequence - of a *base character* at code position U+0043 (LATIN CAPITAL - LETTER C), followed by a *combining character* at code position - U+0327 (COMBINING CEDILLA). - - The comparison operators on strings compare at the level of - Unicode code points. This may be counter-intuitive to humans. For - example, ""\u00C7" == "\u0043\u0327"" is "False", even though both - strings represent the same abstract character “LATIN CAPITAL - LETTER C WITH CEDILLA”. - - To compare strings at the level of abstract characters (that is, - in a way intuitive to humans), use "unicodedata.normalize()". - -[4] Due to automatic garbage-collection, free lists, and the dynamic - nature of descriptors, you may notice seemingly unusual behaviour - in certain uses of the "is" operator, like those involving - comparisons between instance methods, or constants. Check their - documentation for more info. - -[5] The power operator "**" binds less tightly than an arithmetic or - bitwise unary operator on its right, that is, "2**-1" is "0.5". - -[6] The "%" operator is also used for string formatting; the same - precedence applies. -''', - 'pass': r'''The "pass" statement -******************** - - pass_stmt ::= "pass" - -"pass" is a null operation — when it is executed, nothing happens. It -is useful as a placeholder when a statement is required syntactically, -but no code needs to be executed, for example: - - def f(arg): pass # a function that does nothing (yet) - - class C: pass # a class with no methods (yet) -''', - 'power': r'''The power operator -****************** - -The power operator binds more tightly than unary operators on its -left; it binds less tightly than unary operators on its right. The -syntax is: - - power ::= (await_expr | primary) ["**" u_expr] - -Thus, in an unparenthesized sequence of power and unary operators, the -operators are evaluated from right to left (this does not constrain -the evaluation order for the operands): "-1**2" results in "-1". - -The power operator has the same semantics as the built-in "pow()" -function, when called with two arguments: it yields its left argument -raised to the power of its right argument. The numeric arguments are -first converted to a common type, and the result is of that type. - -For int operands, the result has the same type as the operands unless -the second argument is negative; in that case, all arguments are -converted to float and a float result is delivered. For example, -"10**2" returns "100", but "10**-2" returns "0.01". - -Raising "0.0" to a negative power results in a "ZeroDivisionError". -Raising a negative number to a fractional power results in a "complex" -number. (In earlier versions it raised a "ValueError".) - -This operation can be customized using the special "__pow__()" and -"__rpow__()" methods. -''', - 'raise': r'''The "raise" statement -********************* - - raise_stmt ::= "raise" [expression ["from" expression]] - -If no expressions are present, "raise" re-raises the exception that is -currently being handled, which is also known as the *active -exception*. If there isn’t currently an active exception, a -"RuntimeError" exception is raised indicating that this is an error. - -Otherwise, "raise" evaluates the first expression as the exception -object. It must be either a subclass or an instance of -"BaseException". If it is a class, the exception instance will be -obtained when needed by instantiating the class with no arguments. - -The *type* of the exception is the exception instance’s class, the -*value* is the instance itself. - -A traceback object is normally created automatically when an exception -is raised and attached to it as the "__traceback__" attribute. You can -create an exception and set your own traceback in one step using the -"with_traceback()" exception method (which returns the same exception -instance, with its traceback set to its argument), like so: - - raise Exception("foo occurred").with_traceback(tracebackobj) - -The "from" clause is used for exception chaining: if given, the second -*expression* must be another exception class or instance. If the -second expression is an exception instance, it will be attached to the -raised exception as the "__cause__" attribute (which is writable). If -the expression is an exception class, the class will be instantiated -and the resulting exception instance will be attached to the raised -exception as the "__cause__" attribute. If the raised exception is not -handled, both exceptions will be printed: - - >>> try: - ... print(1 / 0) - ... except Exception as exc: - ... raise RuntimeError("Something bad happened") from exc - ... - Traceback (most recent call last): - File "", line 2, in - print(1 / 0) - ~~^~~ - ZeroDivisionError: division by zero - - The above exception was the direct cause of the following exception: - - Traceback (most recent call last): - File "", line 4, in - raise RuntimeError("Something bad happened") from exc - RuntimeError: Something bad happened - -A similar mechanism works implicitly if a new exception is raised when -an exception is already being handled. An exception may be handled -when an "except" or "finally" clause, or a "with" statement, is used. -The previous exception is then attached as the new exception’s -"__context__" attribute: - - >>> try: - ... print(1 / 0) - ... except: - ... raise RuntimeError("Something bad happened") - ... - Traceback (most recent call last): - File "", line 2, in - print(1 / 0) - ~~^~~ - ZeroDivisionError: division by zero - - During handling of the above exception, another exception occurred: - - Traceback (most recent call last): - File "", line 4, in - raise RuntimeError("Something bad happened") - RuntimeError: Something bad happened - -Exception chaining can be explicitly suppressed by specifying "None" -in the "from" clause: - - >>> try: - ... print(1 / 0) - ... except: - ... raise RuntimeError("Something bad happened") from None - ... - Traceback (most recent call last): - File "", line 4, in - RuntimeError: Something bad happened - -Additional information on exceptions can be found in section -Exceptions, and information about handling exceptions is in section -The try statement. - -Changed in version 3.3: "None" is now permitted as "Y" in "raise X -from Y".Added the "__suppress_context__" attribute to suppress -automatic display of the exception context. - -Changed in version 3.11: If the traceback of the active exception is -modified in an "except" clause, a subsequent "raise" statement re- -raises the exception with the modified traceback. Previously, the -exception was re-raised with the traceback it had when it was caught. -''', - 'return': r'''The "return" statement -********************** - - return_stmt ::= "return" [expression_list] - -"return" may only occur syntactically nested in a function definition, -not within a nested class definition. - -If an expression list is present, it is evaluated, else "None" is -substituted. - -"return" leaves the current function call with the expression list (or -"None") as return value. - -When "return" passes control out of a "try" statement with a "finally" -clause, that "finally" clause is executed before really leaving the -function. - -In a generator function, the "return" statement indicates that the -generator is done and will cause "StopIteration" to be raised. The -returned value (if any) is used as an argument to construct -"StopIteration" and becomes the "StopIteration.value" attribute. - -In an asynchronous generator function, an empty "return" statement -indicates that the asynchronous generator is done and will cause -"StopAsyncIteration" to be raised. A non-empty "return" statement is -a syntax error in an asynchronous generator function. -''', - 'sequence-types': r'''Emulating container types -************************* - -The following methods can be defined to implement container objects. -None of them are provided by the "object" class itself. Containers -usually are *sequences* (such as "lists" or "tuples") or *mappings* -(like *dictionaries*), but can represent other containers as well. -The first set of methods is used either to emulate a sequence or to -emulate a mapping; the difference is that for a sequence, the -allowable keys should be the integers *k* for which "0 <= k < N" where -*N* is the length of the sequence, or "slice" objects, which define a -range of items. It is also recommended that mappings provide the -methods "keys()", "values()", "items()", "get()", "clear()", -"setdefault()", "pop()", "popitem()", "copy()", and "update()" -behaving similar to those for Python’s standard "dictionary" objects. -The "collections.abc" module provides a "MutableMapping" *abstract -base class* to help create those methods from a base set of -"__getitem__()", "__setitem__()", "__delitem__()", and "keys()". - -Mutable sequences should provide methods "append()", "clear()", -"count()", "extend()", "index()", "insert()", "pop()", "remove()", and -"reverse()", like Python standard "list" objects. Finally, sequence -types should implement addition (meaning concatenation) and -multiplication (meaning repetition) by defining the methods -"__add__()", "__radd__()", "__iadd__()", "__mul__()", "__rmul__()" and -"__imul__()" described below; they should not define other numerical -operators. - -It is recommended that both mappings and sequences implement the -"__contains__()" method to allow efficient use of the "in" operator; -for mappings, "in" should search the mapping’s keys; for sequences, it -should search through the values. It is further recommended that both -mappings and sequences implement the "__iter__()" method to allow -efficient iteration through the container; for mappings, "__iter__()" -should iterate through the object’s keys; for sequences, it should -iterate through the values. - -object.__len__(self) - - Called to implement the built-in function "len()". Should return - the length of the object, an integer ">=" 0. Also, an object that - doesn’t define a "__bool__()" method and whose "__len__()" method - returns zero is considered to be false in a Boolean context. - - **CPython implementation detail:** In CPython, the length is - required to be at most "sys.maxsize". If the length is larger than - "sys.maxsize" some features (such as "len()") may raise - "OverflowError". To prevent raising "OverflowError" by truth value - testing, an object must define a "__bool__()" method. - -object.__length_hint__(self) - - Called to implement "operator.length_hint()". Should return an - estimated length for the object (which may be greater or less than - the actual length). The length must be an integer ">=" 0. The - return value may also be "NotImplemented", which is treated the - same as if the "__length_hint__" method didn’t exist at all. This - method is purely an optimization and is never required for - correctness. - - Added in version 3.4. - -Note: - - Slicing is done exclusively with the following three methods. A - call like - - a[1:2] = b - - is translated to - - a[slice(1, 2, None)] = b - - and so forth. Missing slice items are always filled in with "None". - -object.__getitem__(self, key) - - Called to implement evaluation of "self[key]". For *sequence* - types, the accepted keys should be integers. Optionally, they may - support "slice" objects as well. Negative index support is also - optional. If *key* is of an inappropriate type, "TypeError" may be - raised; if *key* is a value outside the set of indexes for the - sequence (after any special interpretation of negative values), - "IndexError" should be raised. For *mapping* types, if *key* is - missing (not in the container), "KeyError" should be raised. - - Note: - - "for" loops expect that an "IndexError" will be raised for - illegal indexes to allow proper detection of the end of the - sequence. - - Note: - - When subscripting a *class*, the special class method - "__class_getitem__()" may be called instead of "__getitem__()". - See __class_getitem__ versus __getitem__ for more details. - -object.__setitem__(self, key, value) - - Called to implement assignment to "self[key]". Same note as for - "__getitem__()". This should only be implemented for mappings if - the objects support changes to the values for keys, or if new keys - can be added, or for sequences if elements can be replaced. The - same exceptions should be raised for improper *key* values as for - the "__getitem__()" method. - -object.__delitem__(self, key) - - Called to implement deletion of "self[key]". Same note as for - "__getitem__()". This should only be implemented for mappings if - the objects support removal of keys, or for sequences if elements - can be removed from the sequence. The same exceptions should be - raised for improper *key* values as for the "__getitem__()" method. - -object.__missing__(self, key) - - Called by "dict"."__getitem__()" to implement "self[key]" for dict - subclasses when key is not in the dictionary. - -object.__iter__(self) - - This method is called when an *iterator* is required for a - container. This method should return a new iterator object that can - iterate over all the objects in the container. For mappings, it - should iterate over the keys of the container. - -object.__reversed__(self) - - Called (if present) by the "reversed()" built-in to implement - reverse iteration. It should return a new iterator object that - iterates over all the objects in the container in reverse order. - - If the "__reversed__()" method is not provided, the "reversed()" - built-in will fall back to using the sequence protocol ("__len__()" - and "__getitem__()"). Objects that support the sequence protocol - should only provide "__reversed__()" if they can provide an - implementation that is more efficient than the one provided by - "reversed()". - -The membership test operators ("in" and "not in") are normally -implemented as an iteration through a container. However, container -objects can supply the following special method with a more efficient -implementation, which also does not require the object be iterable. - -object.__contains__(self, item) - - Called to implement membership test operators. Should return true - if *item* is in *self*, false otherwise. For mapping objects, this - should consider the keys of the mapping rather than the values or - the key-item pairs. - - For objects that don’t define "__contains__()", the membership test - first tries iteration via "__iter__()", then the old sequence - iteration protocol via "__getitem__()", see this section in the - language reference. -''', - 'shifting': r'''Shifting operations -******************* - -The shifting operations have lower priority than the arithmetic -operations: - - shift_expr ::= a_expr | shift_expr ("<<" | ">>") a_expr - -These operators accept integers as arguments. They shift the first -argument to the left or right by the number of bits given by the -second argument. - -The left shift operation can be customized using the special -"__lshift__()" and "__rlshift__()" methods. The right shift operation -can be customized using the special "__rshift__()" and "__rrshift__()" -methods. - -A right shift by *n* bits is defined as floor division by "pow(2,n)". -A left shift by *n* bits is defined as multiplication with "pow(2,n)". -''', - 'slicings': r'''Slicings -******** - -A slicing selects a range of items in a sequence object (e.g., a -string, tuple or list). Slicings may be used as expressions or as -targets in assignment or "del" statements. The syntax for a slicing: - - slicing ::= primary "[" slice_list "]" - slice_list ::= slice_item ("," slice_item)* [","] - slice_item ::= expression | proper_slice - proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" [stride] ] - lower_bound ::= expression - upper_bound ::= expression - stride ::= expression - -There is ambiguity in the formal syntax here: anything that looks like -an expression list also looks like a slice list, so any subscription -can be interpreted as a slicing. Rather than further complicating the -syntax, this is disambiguated by defining that in this case the -interpretation as a subscription takes priority over the -interpretation as a slicing (this is the case if the slice list -contains no proper slice). - -The semantics for a slicing are as follows. The primary is indexed -(using the same "__getitem__()" method as normal subscription) with a -key that is constructed from the slice list, as follows. If the slice -list contains at least one comma, the key is a tuple containing the -conversion of the slice items; otherwise, the conversion of the lone -slice item is the key. The conversion of a slice item that is an -expression is that expression. The conversion of a proper slice is a -slice object (see section The standard type hierarchy) whose "start", -"stop" and "step" attributes are the values of the expressions given -as lower bound, upper bound and stride, respectively, substituting -"None" for missing expressions. -''', - 'specialattrs': r'''Special Attributes -****************** - -The implementation adds a few special read-only attributes to several -object types, where they are relevant. Some of these are not reported -by the "dir()" built-in function. - -definition.__name__ - - The name of the class, function, method, descriptor, or generator - instance. - -definition.__qualname__ - - The *qualified name* of the class, function, method, descriptor, or - generator instance. - - Added in version 3.3. - -definition.__module__ - - The name of the module in which a class or function was defined. - -definition.__doc__ - - The documentation string of a class or function, or "None" if - undefined. - -definition.__type_params__ - - The type parameters of generic classes, functions, and type - aliases. For classes and functions that are not generic, this will - be an empty tuple. - - Added in version 3.12. -''', - 'specialnames': r'''Special method names -******************** - -A class can implement certain operations that are invoked by special -syntax (such as arithmetic operations or subscripting and slicing) by -defining methods with special names. This is Python’s approach to -*operator overloading*, allowing classes to define their own behavior -with respect to language operators. For instance, if a class defines -a method named "__getitem__()", and "x" is an instance of this class, -then "x[i]" is roughly equivalent to "type(x).__getitem__(x, i)". -Except where mentioned, attempts to execute an operation raise an -exception when no appropriate method is defined (typically -"AttributeError" or "TypeError"). - -Setting a special method to "None" indicates that the corresponding -operation is not available. For example, if a class sets "__iter__()" -to "None", the class is not iterable, so calling "iter()" on its -instances will raise a "TypeError" (without falling back to -"__getitem__()"). [2] - -When implementing a class that emulates any built-in type, it is -important that the emulation only be implemented to the degree that it -makes sense for the object being modelled. For example, some -sequences may work well with retrieval of individual elements, but -extracting a slice may not make sense. (One example of this is the -NodeList interface in the W3C’s Document Object Model.) - - -Basic customization -=================== - -object.__new__(cls[, ...]) - - Called to create a new instance of class *cls*. "__new__()" is a - static method (special-cased so you need not declare it as such) - that takes the class of which an instance was requested as its - first argument. The remaining arguments are those passed to the - object constructor expression (the call to the class). The return - value of "__new__()" should be the new object instance (usually an - instance of *cls*). - - Typical implementations create a new instance of the class by - invoking the superclass’s "__new__()" method using - "super().__new__(cls[, ...])" with appropriate arguments and then - modifying the newly created instance as necessary before returning - it. - - If "__new__()" is invoked during object construction and it returns - an instance of *cls*, then the new instance’s "__init__()" method - will be invoked like "__init__(self[, ...])", where *self* is the - new instance and the remaining arguments are the same as were - passed to the object constructor. - - If "__new__()" does not return an instance of *cls*, then the new - instance’s "__init__()" method will not be invoked. - - "__new__()" is intended mainly to allow subclasses of immutable - types (like int, str, or tuple) to customize instance creation. It - is also commonly overridden in custom metaclasses in order to - customize class creation. - -object.__init__(self[, ...]) - - Called after the instance has been created (by "__new__()"), but - before it is returned to the caller. The arguments are those - passed to the class constructor expression. If a base class has an - "__init__()" method, the derived class’s "__init__()" method, if - any, must explicitly call it to ensure proper initialization of the - base class part of the instance; for example: - "super().__init__([args...])". - - Because "__new__()" and "__init__()" work together in constructing - objects ("__new__()" to create it, and "__init__()" to customize - it), no non-"None" value may be returned by "__init__()"; doing so - will cause a "TypeError" to be raised at runtime. - -object.__del__(self) - - Called when the instance is about to be destroyed. This is also - called a finalizer or (improperly) a destructor. If a base class - has a "__del__()" method, the derived class’s "__del__()" method, - if any, must explicitly call it to ensure proper deletion of the - base class part of the instance. - - It is possible (though not recommended!) for the "__del__()" method - to postpone destruction of the instance by creating a new reference - to it. This is called object *resurrection*. It is - implementation-dependent whether "__del__()" is called a second - time when a resurrected object is about to be destroyed; the - current *CPython* implementation only calls it once. - - It is not guaranteed that "__del__()" methods are called for - objects that still exist when the interpreter exits. - "weakref.finalize" provides a straightforward way to register a - cleanup function to be called when an object is garbage collected. - - Note: - - "del x" doesn’t directly call "x.__del__()" — the former - decrements the reference count for "x" by one, and the latter is - only called when "x"’s reference count reaches zero. - - **CPython implementation detail:** It is possible for a reference - cycle to prevent the reference count of an object from going to - zero. In this case, the cycle will be later detected and deleted - by the *cyclic garbage collector*. A common cause of reference - cycles is when an exception has been caught in a local variable. - The frame’s locals then reference the exception, which references - its own traceback, which references the locals of all frames caught - in the traceback. - - See also: Documentation for the "gc" module. - - Warning: - - Due to the precarious circumstances under which "__del__()" - methods are invoked, exceptions that occur during their execution - are ignored, and a warning is printed to "sys.stderr" instead. - In particular: - - * "__del__()" can be invoked when arbitrary code is being - executed, including from any arbitrary thread. If "__del__()" - needs to take a lock or invoke any other blocking resource, it - may deadlock as the resource may already be taken by the code - that gets interrupted to execute "__del__()". - - * "__del__()" can be executed during interpreter shutdown. As a - consequence, the global variables it needs to access (including - other modules) may already have been deleted or set to "None". - Python guarantees that globals whose name begins with a single - underscore are deleted from their module before other globals - are deleted; if no other references to such globals exist, this - may help in assuring that imported modules are still available - at the time when the "__del__()" method is called. - -object.__repr__(self) - - Called by the "repr()" built-in function to compute the “official” - string representation of an object. If at all possible, this - should look like a valid Python expression that could be used to - recreate an object with the same value (given an appropriate - environment). If this is not possible, a string of the form - "<...some useful description...>" should be returned. The return - value must be a string object. If a class defines "__repr__()" but - not "__str__()", then "__repr__()" is also used when an “informal” - string representation of instances of that class is required. - - This is typically used for debugging, so it is important that the - representation is information-rich and unambiguous. A default - implementation is provided by the "object" class itself. - -object.__str__(self) - - Called by "str(object)", the default "__format__()" implementation, - and the built-in function "print()", to compute the “informal” or - nicely printable string representation of an object. The return - value must be a str object. - - This method differs from "object.__repr__()" in that there is no - expectation that "__str__()" return a valid Python expression: a - more convenient or concise representation can be used. - - The default implementation defined by the built-in type "object" - calls "object.__repr__()". - -object.__bytes__(self) - - Called by bytes to compute a byte-string representation of an - object. This should return a "bytes" object. The "object" class - itself does not provide this method. - -object.__format__(self, format_spec) - - Called by the "format()" built-in function, and by extension, - evaluation of formatted string literals and the "str.format()" - method, to produce a “formatted” string representation of an - object. The *format_spec* argument is a string that contains a - description of the formatting options desired. The interpretation - of the *format_spec* argument is up to the type implementing - "__format__()", however most classes will either delegate - formatting to one of the built-in types, or use a similar - formatting option syntax. - - See Format specification mini-language for a description of the - standard formatting syntax. - - The return value must be a string object. - - The default implementation by the "object" class should be given an - empty *format_spec* string. It delegates to "__str__()". - - Changed in version 3.4: The __format__ method of "object" itself - raises a "TypeError" if passed any non-empty string. - - Changed in version 3.7: "object.__format__(x, '')" is now - equivalent to "str(x)" rather than "format(str(x), '')". - -object.__lt__(self, other) -object.__le__(self, other) -object.__eq__(self, other) -object.__ne__(self, other) -object.__gt__(self, other) -object.__ge__(self, other) - - These are the so-called “rich comparison” methods. The - correspondence between operator symbols and method names is as - follows: "xy" calls - "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)". - - A rich comparison method may return the singleton "NotImplemented" - if it does not implement the operation for a given pair of - arguments. By convention, "False" and "True" are returned for a - successful comparison. However, these methods can return any value, - so if the comparison operator is used in a Boolean context (e.g., - in the condition of an "if" statement), Python will call "bool()" - on the value to determine if the result is true or false. - - By default, "object" implements "__eq__()" by using "is", returning - "NotImplemented" in the case of a false comparison: "True if x is y - else NotImplemented". For "__ne__()", by default it delegates to - "__eq__()" and inverts the result unless it is "NotImplemented". - There are no other implied relationships among the comparison - operators or default implementations; for example, the truth of - "(x.__hash__". - - If a class that does not override "__eq__()" wishes to suppress - hash support, it should include "__hash__ = None" in the class - definition. A class which defines its own "__hash__()" that - explicitly raises a "TypeError" would be incorrectly identified as - hashable by an "isinstance(obj, collections.abc.Hashable)" call. - - Note: - - By default, the "__hash__()" values of str and bytes objects are - “salted” with an unpredictable random value. Although they - remain constant within an individual Python process, they are not - predictable between repeated invocations of Python.This is - intended to provide protection against a denial-of-service caused - by carefully chosen inputs that exploit the worst case - performance of a dict insertion, *O*(*n*^2) complexity. See - https://ocert.org/advisories/ocert-2011-003.html for - details.Changing hash values affects the iteration order of sets. - Python has never made guarantees about this ordering (and it - typically varies between 32-bit and 64-bit builds).See also - "PYTHONHASHSEED". - - Changed in version 3.3: Hash randomization is enabled by default. - -object.__bool__(self) - - Called to implement truth value testing and the built-in operation - "bool()"; should return "False" or "True". When this method is not - defined, "__len__()" is called, if it is defined, and the object is - considered true if its result is nonzero. If a class defines - neither "__len__()" nor "__bool__()" (which is true of the "object" - class itself), all its instances are considered true. - - -Customizing attribute access -============================ - -The following methods can be defined to customize the meaning of -attribute access (use of, assignment to, or deletion of "x.name") for -class instances. - -object.__getattr__(self, name) - - Called when the default attribute access fails with an - "AttributeError" (either "__getattribute__()" raises an - "AttributeError" because *name* is not an instance attribute or an - attribute in the class tree for "self"; or "__get__()" of a *name* - property raises "AttributeError"). This method should either - return the (computed) attribute value or raise an "AttributeError" - exception. The "object" class itself does not provide this method. - - Note that if the attribute is found through the normal mechanism, - "__getattr__()" is not called. (This is an intentional asymmetry - between "__getattr__()" and "__setattr__()".) This is done both for - efficiency reasons and because otherwise "__getattr__()" would have - no way to access other attributes of the instance. Note that at - least for instance variables, you can take total control by not - inserting any values in the instance attribute dictionary (but - instead inserting them in another object). See the - "__getattribute__()" method below for a way to actually get total - control over attribute access. - -object.__getattribute__(self, name) - - Called unconditionally to implement attribute accesses for - instances of the class. If the class also defines "__getattr__()", - the latter will not be called unless "__getattribute__()" either - calls it explicitly or raises an "AttributeError". This method - should return the (computed) attribute value or raise an - "AttributeError" exception. In order to avoid infinite recursion in - this method, its implementation should always call the base class - method with the same name to access any attributes it needs, for - example, "object.__getattribute__(self, name)". - - Note: - - This method may still be bypassed when looking up special methods - as the result of implicit invocation via language syntax or - built-in functions. See Special method lookup. - - For certain sensitive attribute accesses, raises an auditing event - "object.__getattr__" with arguments "obj" and "name". - -object.__setattr__(self, name, value) - - Called when an attribute assignment is attempted. This is called - instead of the normal mechanism (i.e. store the value in the - instance dictionary). *name* is the attribute name, *value* is the - value to be assigned to it. - - If "__setattr__()" wants to assign to an instance attribute, it - should call the base class method with the same name, for example, - "object.__setattr__(self, name, value)". - - For certain sensitive attribute assignments, raises an auditing - event "object.__setattr__" with arguments "obj", "name", "value". - -object.__delattr__(self, name) - - Like "__setattr__()" but for attribute deletion instead of - assignment. This should only be implemented if "del obj.name" is - meaningful for the object. - - For certain sensitive attribute deletions, raises an auditing event - "object.__delattr__" with arguments "obj" and "name". - -object.__dir__(self) - - Called when "dir()" is called on the object. An iterable must be - returned. "dir()" converts the returned iterable to a list and - sorts it. - - -Customizing module attribute access ------------------------------------ - -module.__getattr__() -module.__dir__() - -Special names "__getattr__" and "__dir__" can be also used to -customize access to module attributes. The "__getattr__" function at -the module level should accept one argument which is the name of an -attribute and return the computed value or raise an "AttributeError". -If an attribute is not found on a module object through the normal -lookup, i.e. "object.__getattribute__()", then "__getattr__" is -searched in the module "__dict__" before raising an "AttributeError". -If found, it is called with the attribute name and the result is -returned. - -The "__dir__" function should accept no arguments, and return an -iterable of strings that represents the names accessible on module. If -present, this function overrides the standard "dir()" search on a -module. - -module.__class__ - -For a more fine grained customization of the module behavior (setting -attributes, properties, etc.), one can set the "__class__" attribute -of a module object to a subclass of "types.ModuleType". For example: - - import sys - from types import ModuleType - - class VerboseModule(ModuleType): - def __repr__(self): - return f'Verbose {self.__name__}' - - def __setattr__(self, attr, value): - print(f'Setting {attr}...') - super().__setattr__(attr, value) - - sys.modules[__name__].__class__ = VerboseModule - -Note: - - Defining module "__getattr__" and setting module "__class__" only - affect lookups made using the attribute access syntax – directly - accessing the module globals (whether by code within the module, or - via a reference to the module’s globals dictionary) is unaffected. - -Changed in version 3.5: "__class__" module attribute is now writable. - -Added in version 3.7: "__getattr__" and "__dir__" module attributes. - -See also: - - **PEP 562** - Module __getattr__ and __dir__ - Describes the "__getattr__" and "__dir__" functions on modules. - - -Implementing Descriptors ------------------------- - -The following methods only apply when an instance of the class -containing the method (a so-called *descriptor* class) appears in an -*owner* class (the descriptor must be in either the owner’s class -dictionary or in the class dictionary for one of its parents). In the -examples below, “the attribute” refers to the attribute whose name is -the key of the property in the owner class’ "__dict__". The "object" -class itself does not implement any of these protocols. - -object.__get__(self, instance, owner=None) - - Called to get the attribute of the owner class (class attribute - access) or of an instance of that class (instance attribute - access). The optional *owner* argument is the owner class, while - *instance* is the instance that the attribute was accessed through, - or "None" when the attribute is accessed through the *owner*. - - This method should return the computed attribute value or raise an - "AttributeError" exception. - - **PEP 252** specifies that "__get__()" is callable with one or two - arguments. Python’s own built-in descriptors support this - specification; however, it is likely that some third-party tools - have descriptors that require both arguments. Python’s own - "__getattribute__()" implementation always passes in both arguments - whether they are required or not. - -object.__set__(self, instance, value) - - Called to set the attribute on an instance *instance* of the owner - class to a new value, *value*. - - Note, adding "__set__()" or "__delete__()" changes the kind of - descriptor to a “data descriptor”. See Invoking Descriptors for - more details. - -object.__delete__(self, instance) - - Called to delete the attribute on an instance *instance* of the - owner class. - -Instances of descriptors may also have the "__objclass__" attribute -present: - -object.__objclass__ - - The attribute "__objclass__" is interpreted by the "inspect" module - as specifying the class where this object was defined (setting this - appropriately can assist in runtime introspection of dynamic class - attributes). For callables, it may indicate that an instance of the - given type (or a subclass) is expected or required as the first - positional argument (for example, CPython sets this attribute for - unbound methods that are implemented in C). - - -Invoking Descriptors --------------------- - -In general, a descriptor is an object attribute with “binding -behavior”, one whose attribute access has been overridden by methods -in the descriptor protocol: "__get__()", "__set__()", and -"__delete__()". If any of those methods are defined for an object, it -is said to be a descriptor. - -The default behavior for attribute access is to get, set, or delete -the attribute from an object’s dictionary. For instance, "a.x" has a -lookup chain starting with "a.__dict__['x']", then -"type(a).__dict__['x']", and continuing through the base classes of -"type(a)" excluding metaclasses. - -However, if the looked-up value is an object defining one of the -descriptor methods, then Python may override the default behavior and -invoke the descriptor method instead. Where this occurs in the -precedence chain depends on which descriptor methods were defined and -how they were called. - -The starting point for descriptor invocation is a binding, "a.x". How -the arguments are assembled depends on "a": - -Direct Call - The simplest and least common call is when user code directly - invokes a descriptor method: "x.__get__(a)". - -Instance Binding - If binding to an object instance, "a.x" is transformed into the - call: "type(a).__dict__['x'].__get__(a, type(a))". - -Class Binding - If binding to a class, "A.x" is transformed into the call: - "A.__dict__['x'].__get__(None, A)". - -Super Binding - A dotted lookup such as "super(A, a).x" searches - "a.__class__.__mro__" for a base class "B" following "A" and then - returns "B.__dict__['x'].__get__(a, A)". If not a descriptor, "x" - is returned unchanged. - -For instance bindings, the precedence of descriptor invocation depends -on which descriptor methods are defined. A descriptor can define any -combination of "__get__()", "__set__()" and "__delete__()". If it -does not define "__get__()", then accessing the attribute will return -the descriptor object itself unless there is a value in the object’s -instance dictionary. If the descriptor defines "__set__()" and/or -"__delete__()", it is a data descriptor; if it defines neither, it is -a non-data descriptor. Normally, data descriptors define both -"__get__()" and "__set__()", while non-data descriptors have just the -"__get__()" method. Data descriptors with "__get__()" and "__set__()" -(and/or "__delete__()") defined always override a redefinition in an -instance dictionary. In contrast, non-data descriptors can be -overridden by instances. - -Python methods (including those decorated with "@staticmethod" and -"@classmethod") are implemented as non-data descriptors. Accordingly, -instances can redefine and override methods. This allows individual -instances to acquire behaviors that differ from other instances of the -same class. - -The "property()" function is implemented as a data descriptor. -Accordingly, instances cannot override the behavior of a property. - - -__slots__ ---------- - -*__slots__* allow us to explicitly declare data members (like -properties) and deny the creation of "__dict__" and *__weakref__* -(unless explicitly declared in *__slots__* or available in a parent.) - -The space saved over using "__dict__" can be significant. Attribute -lookup speed can be significantly improved as well. - -object.__slots__ - - This class variable can be assigned a string, iterable, or sequence - of strings with variable names used by instances. *__slots__* - reserves space for the declared variables and prevents the - automatic creation of "__dict__" and *__weakref__* for each - instance. - -Notes on using *__slots__*: - -* When inheriting from a class without *__slots__*, the "__dict__" and - *__weakref__* attribute of the instances will always be accessible. - -* Without a "__dict__" variable, instances cannot be assigned new - variables not listed in the *__slots__* definition. Attempts to - assign to an unlisted variable name raises "AttributeError". If - dynamic assignment of new variables is desired, then add - "'__dict__'" to the sequence of strings in the *__slots__* - declaration. - -* Without a *__weakref__* variable for each instance, classes defining - *__slots__* do not support "weak references" to its instances. If - weak reference support is needed, then add "'__weakref__'" to the - sequence of strings in the *__slots__* declaration. - -* *__slots__* are implemented at the class level by creating - descriptors for each variable name. As a result, class attributes - cannot be used to set default values for instance variables defined - by *__slots__*; otherwise, the class attribute would overwrite the - descriptor assignment. - -* The action of a *__slots__* declaration is not limited to the class - where it is defined. *__slots__* declared in parents are available - in child classes. However, instances of a child subclass will get a - "__dict__" and *__weakref__* unless the subclass also defines - *__slots__* (which should only contain names of any *additional* - slots). - -* If a class defines a slot also defined in a base class, the instance - variable defined by the base class slot is inaccessible (except by - retrieving its descriptor directly from the base class). This - renders the meaning of the program undefined. In the future, a - check may be added to prevent this. - -* "TypeError" will be raised if nonempty *__slots__* are defined for a - class derived from a ""variable-length" built-in type" such as - "int", "bytes", and "tuple". - -* Any non-string *iterable* may be assigned to *__slots__*. - -* If a "dictionary" is used to assign *__slots__*, the dictionary keys - will be used as the slot names. The values of the dictionary can be - used to provide per-attribute docstrings that will be recognised by - "inspect.getdoc()" and displayed in the output of "help()". - -* "__class__" assignment works only if both classes have the same - *__slots__*. - -* Multiple inheritance with multiple slotted parent classes can be - used, but only one parent is allowed to have attributes created by - slots (the other bases must have empty slot layouts) - violations - raise "TypeError". - -* If an *iterator* is used for *__slots__* then a *descriptor* is - created for each of the iterator’s values. However, the *__slots__* - attribute will be an empty iterator. - - -Customizing class creation -========================== - -Whenever a class inherits from another class, "__init_subclass__()" is -called on the parent class. This way, it is possible to write classes -which change the behavior of subclasses. This is closely related to -class decorators, but where class decorators only affect the specific -class they’re applied to, "__init_subclass__" solely applies to future -subclasses of the class defining the method. - -classmethod object.__init_subclass__(cls) - - This method is called whenever the containing class is subclassed. - *cls* is then the new subclass. If defined as a normal instance - method, this method is implicitly converted to a class method. - - Keyword arguments which are given to a new class are passed to the - parent class’s "__init_subclass__". For compatibility with other - classes using "__init_subclass__", one should take out the needed - keyword arguments and pass the others over to the base class, as - in: - - class Philosopher: - def __init_subclass__(cls, /, default_name, **kwargs): - super().__init_subclass__(**kwargs) - cls.default_name = default_name - - class AustralianPhilosopher(Philosopher, default_name="Bruce"): - pass - - The default implementation "object.__init_subclass__" does nothing, - but raises an error if it is called with any arguments. - - Note: - - The metaclass hint "metaclass" is consumed by the rest of the - type machinery, and is never passed to "__init_subclass__" - implementations. The actual metaclass (rather than the explicit - hint) can be accessed as "type(cls)". - - Added in version 3.6. - -When a class is created, "type.__new__()" scans the class variables -and makes callbacks to those with a "__set_name__()" hook. - -object.__set_name__(self, owner, name) - - Automatically called at the time the owning class *owner* is - created. The object has been assigned to *name* in that class: - - class A: - x = C() # Automatically calls: x.__set_name__(A, 'x') - - If the class variable is assigned after the class is created, - "__set_name__()" will not be called automatically. If needed, - "__set_name__()" can be called directly: - - class A: - pass - - c = C() - A.x = c # The hook is not called - c.__set_name__(A, 'x') # Manually invoke the hook - - See Creating the class object for more details. - - Added in version 3.6. - - -Metaclasses ------------ - -By default, classes are constructed using "type()". The class body is -executed in a new namespace and the class name is bound locally to the -result of "type(name, bases, namespace)". - -The class creation process can be customized by passing the -"metaclass" keyword argument in the class definition line, or by -inheriting from an existing class that included such an argument. In -the following example, both "MyClass" and "MySubclass" are instances -of "Meta": - - class Meta(type): - pass - - class MyClass(metaclass=Meta): - pass - - class MySubclass(MyClass): - pass - -Any other keyword arguments that are specified in the class definition -are passed through to all metaclass operations described below. - -When a class definition is executed, the following steps occur: - -* MRO entries are resolved; - -* the appropriate metaclass is determined; - -* the class namespace is prepared; - -* the class body is executed; - -* the class object is created. - - -Resolving MRO entries ---------------------- - -object.__mro_entries__(self, bases) - - If a base that appears in a class definition is not an instance of - "type", then an "__mro_entries__()" method is searched on the base. - If an "__mro_entries__()" method is found, the base is substituted - with the result of a call to "__mro_entries__()" when creating the - class. The method is called with the original bases tuple passed to - the *bases* parameter, and must return a tuple of classes that will - be used instead of the base. The returned tuple may be empty: in - these cases, the original base is ignored. - -See also: - - "types.resolve_bases()" - Dynamically resolve bases that are not instances of "type". - - "types.get_original_bases()" - Retrieve a class’s “original bases” prior to modifications by - "__mro_entries__()". - - **PEP 560** - Core support for typing module and generic types. - - -Determining the appropriate metaclass -------------------------------------- - -The appropriate metaclass for a class definition is determined as -follows: - -* if no bases and no explicit metaclass are given, then "type()" is - used; - -* if an explicit metaclass is given and it is *not* an instance of - "type()", then it is used directly as the metaclass; - -* if an instance of "type()" is given as the explicit metaclass, or - bases are defined, then the most derived metaclass is used. - -The most derived metaclass is selected from the explicitly specified -metaclass (if any) and the metaclasses (i.e. "type(cls)") of all -specified base classes. The most derived metaclass is one which is a -subtype of *all* of these candidate metaclasses. If none of the -candidate metaclasses meets that criterion, then the class definition -will fail with "TypeError". - - -Preparing the class namespace ------------------------------ - -Once the appropriate metaclass has been identified, then the class -namespace is prepared. If the metaclass has a "__prepare__" attribute, -it is called as "namespace = metaclass.__prepare__(name, bases, -**kwds)" (where the additional keyword arguments, if any, come from -the class definition). The "__prepare__" method should be implemented -as a "classmethod". The namespace returned by "__prepare__" is passed -in to "__new__", but when the final class object is created the -namespace is copied into a new "dict". - -If the metaclass has no "__prepare__" attribute, then the class -namespace is initialised as an empty ordered mapping. - -See also: - - **PEP 3115** - Metaclasses in Python 3000 - Introduced the "__prepare__" namespace hook - - -Executing the class body ------------------------- - -The class body is executed (approximately) as "exec(body, globals(), -namespace)". The key difference from a normal call to "exec()" is that -lexical scoping allows the class body (including any methods) to -reference names from the current and outer scopes when the class -definition occurs inside a function. - -However, even when the class definition occurs inside the function, -methods defined inside the class still cannot see names defined at the -class scope. Class variables must be accessed through the first -parameter of instance or class methods, or through the implicit -lexically scoped "__class__" reference described in the next section. - - -Creating the class object -------------------------- - -Once the class namespace has been populated by executing the class -body, the class object is created by calling "metaclass(name, bases, -namespace, **kwds)" (the additional keywords passed here are the same -as those passed to "__prepare__"). - -This class object is the one that will be referenced by the zero- -argument form of "super()". "__class__" is an implicit closure -reference created by the compiler if any methods in a class body refer -to either "__class__" or "super". This allows the zero argument form -of "super()" to correctly identify the class being defined based on -lexical scoping, while the class or instance that was used to make the -current call is identified based on the first argument passed to the -method. - -**CPython implementation detail:** In CPython 3.6 and later, the -"__class__" cell is passed to the metaclass as a "__classcell__" entry -in the class namespace. If present, this must be propagated up to the -"type.__new__" call in order for the class to be initialised -correctly. Failing to do so will result in a "RuntimeError" in Python -3.8. - -When using the default metaclass "type", or any metaclass that -ultimately calls "type.__new__", the following additional -customization steps are invoked after creating the class object: - -1. The "type.__new__" method collects all of the attributes in the - class namespace that define a "__set_name__()" method; - -2. Those "__set_name__" methods are called with the class being - defined and the assigned name of that particular attribute; - -3. The "__init_subclass__()" hook is called on the immediate parent of - the new class in its method resolution order. - -After the class object is created, it is passed to the class -decorators included in the class definition (if any) and the resulting -object is bound in the local namespace as the defined class. - -When a new class is created by "type.__new__", the object provided as -the namespace parameter is copied to a new ordered mapping and the -original object is discarded. The new copy is wrapped in a read-only -proxy, which becomes the "__dict__" attribute of the class object. - -See also: - - **PEP 3135** - New super - Describes the implicit "__class__" closure reference - - -Uses for metaclasses --------------------- - -The potential uses for metaclasses are boundless. Some ideas that have -been explored include enum, logging, interface checking, automatic -delegation, automatic property creation, proxies, frameworks, and -automatic resource locking/synchronization. - - -Customizing instance and subclass checks -======================================== - -The following methods are used to override the default behavior of the -"isinstance()" and "issubclass()" built-in functions. - -In particular, the metaclass "abc.ABCMeta" implements these methods in -order to allow the addition of Abstract Base Classes (ABCs) as -“virtual base classes” to any class or type (including built-in -types), including other ABCs. - -type.__instancecheck__(self, instance) - - Return true if *instance* should be considered a (direct or - indirect) instance of *class*. If defined, called to implement - "isinstance(instance, class)". - -type.__subclasscheck__(self, subclass) - - Return true if *subclass* should be considered a (direct or - indirect) subclass of *class*. If defined, called to implement - "issubclass(subclass, class)". - -Note that these methods are looked up on the type (metaclass) of a -class. They cannot be defined as class methods in the actual class. -This is consistent with the lookup of special methods that are called -on instances, only in this case the instance is itself a class. - -See also: - - **PEP 3119** - Introducing Abstract Base Classes - Includes the specification for customizing "isinstance()" and - "issubclass()" behavior through "__instancecheck__()" and - "__subclasscheck__()", with motivation for this functionality in - the context of adding Abstract Base Classes (see the "abc" - module) to the language. - - -Emulating generic types -======================= - -When using *type annotations*, it is often useful to *parameterize* a -*generic type* using Python’s square-brackets notation. For example, -the annotation "list[int]" might be used to signify a "list" in which -all the elements are of type "int". - -See also: - - **PEP 484** - Type Hints - Introducing Python’s framework for type annotations - - Generic Alias Types - Documentation for objects representing parameterized generic - classes - - Generics, user-defined generics and "typing.Generic" - Documentation on how to implement generic classes that can be - parameterized at runtime and understood by static type-checkers. - -A class can *generally* only be parameterized if it defines the -special class method "__class_getitem__()". - -classmethod object.__class_getitem__(cls, key) - - Return an object representing the specialization of a generic class - by type arguments found in *key*. - - When defined on a class, "__class_getitem__()" is automatically a - class method. As such, there is no need for it to be decorated with - "@classmethod" when it is defined. - - -The purpose of *__class_getitem__* ----------------------------------- - -The purpose of "__class_getitem__()" is to allow runtime -parameterization of standard-library generic classes in order to more -easily apply *type hints* to these classes. - -To implement custom generic classes that can be parameterized at -runtime and understood by static type-checkers, users should either -inherit from a standard library class that already implements -"__class_getitem__()", or inherit from "typing.Generic", which has its -own implementation of "__class_getitem__()". - -Custom implementations of "__class_getitem__()" on classes defined -outside of the standard library may not be understood by third-party -type-checkers such as mypy. Using "__class_getitem__()" on any class -for purposes other than type hinting is discouraged. - - -*__class_getitem__* versus *__getitem__* ----------------------------------------- - -Usually, the subscription of an object using square brackets will call -the "__getitem__()" instance method defined on the object’s class. -However, if the object being subscribed is itself a class, the class -method "__class_getitem__()" may be called instead. -"__class_getitem__()" should return a GenericAlias object if it is -properly defined. - -Presented with the *expression* "obj[x]", the Python interpreter -follows something like the following process to decide whether -"__getitem__()" or "__class_getitem__()" should be called: - - from inspect import isclass - - def subscribe(obj, x): - """Return the result of the expression 'obj[x]'""" - - class_of_obj = type(obj) - - # If the class of obj defines __getitem__, - # call class_of_obj.__getitem__(obj, x) - if hasattr(class_of_obj, '__getitem__'): - return class_of_obj.__getitem__(obj, x) - - # Else, if obj is a class and defines __class_getitem__, - # call obj.__class_getitem__(x) - elif isclass(obj) and hasattr(obj, '__class_getitem__'): - return obj.__class_getitem__(x) - - # Else, raise an exception - else: - raise TypeError( - f"'{class_of_obj.__name__}' object is not subscriptable" - ) - -In Python, all classes are themselves instances of other classes. The -class of a class is known as that class’s *metaclass*, and most -classes have the "type" class as their metaclass. "type" does not -define "__getitem__()", meaning that expressions such as "list[int]", -"dict[str, float]" and "tuple[str, bytes]" all result in -"__class_getitem__()" being called: - - >>> # list has class "type" as its metaclass, like most classes: - >>> type(list) - - >>> type(dict) == type(list) == type(tuple) == type(str) == type(bytes) - True - >>> # "list[int]" calls "list.__class_getitem__(int)" - >>> list[int] - list[int] - >>> # list.__class_getitem__ returns a GenericAlias object: - >>> type(list[int]) - - -However, if a class has a custom metaclass that defines -"__getitem__()", subscribing the class may result in different -behaviour. An example of this can be found in the "enum" module: - - >>> from enum import Enum - >>> class Menu(Enum): - ... """A breakfast menu""" - ... SPAM = 'spam' - ... BACON = 'bacon' - ... - >>> # Enum classes have a custom metaclass: - >>> type(Menu) - - >>> # EnumMeta defines __getitem__, - >>> # so __class_getitem__ is not called, - >>> # and the result is not a GenericAlias object: - >>> Menu['SPAM'] - - >>> type(Menu['SPAM']) - - -See also: - - **PEP 560** - Core Support for typing module and generic types - Introducing "__class_getitem__()", and outlining when a - subscription results in "__class_getitem__()" being called - instead of "__getitem__()" - - -Emulating callable objects -========================== - -object.__call__(self[, args...]) - - Called when the instance is “called” as a function; if this method - is defined, "x(arg1, arg2, ...)" roughly translates to - "type(x).__call__(x, arg1, ...)". The "object" class itself does - not provide this method. - - -Emulating container types -========================= - -The following methods can be defined to implement container objects. -None of them are provided by the "object" class itself. Containers -usually are *sequences* (such as "lists" or "tuples") or *mappings* -(like *dictionaries*), but can represent other containers as well. -The first set of methods is used either to emulate a sequence or to -emulate a mapping; the difference is that for a sequence, the -allowable keys should be the integers *k* for which "0 <= k < N" where -*N* is the length of the sequence, or "slice" objects, which define a -range of items. It is also recommended that mappings provide the -methods "keys()", "values()", "items()", "get()", "clear()", -"setdefault()", "pop()", "popitem()", "copy()", and "update()" -behaving similar to those for Python’s standard "dictionary" objects. -The "collections.abc" module provides a "MutableMapping" *abstract -base class* to help create those methods from a base set of -"__getitem__()", "__setitem__()", "__delitem__()", and "keys()". - -Mutable sequences should provide methods "append()", "clear()", -"count()", "extend()", "index()", "insert()", "pop()", "remove()", and -"reverse()", like Python standard "list" objects. Finally, sequence -types should implement addition (meaning concatenation) and -multiplication (meaning repetition) by defining the methods -"__add__()", "__radd__()", "__iadd__()", "__mul__()", "__rmul__()" and -"__imul__()" described below; they should not define other numerical -operators. - -It is recommended that both mappings and sequences implement the -"__contains__()" method to allow efficient use of the "in" operator; -for mappings, "in" should search the mapping’s keys; for sequences, it -should search through the values. It is further recommended that both -mappings and sequences implement the "__iter__()" method to allow -efficient iteration through the container; for mappings, "__iter__()" -should iterate through the object’s keys; for sequences, it should -iterate through the values. - -object.__len__(self) - - Called to implement the built-in function "len()". Should return - the length of the object, an integer ">=" 0. Also, an object that - doesn’t define a "__bool__()" method and whose "__len__()" method - returns zero is considered to be false in a Boolean context. - - **CPython implementation detail:** In CPython, the length is - required to be at most "sys.maxsize". If the length is larger than - "sys.maxsize" some features (such as "len()") may raise - "OverflowError". To prevent raising "OverflowError" by truth value - testing, an object must define a "__bool__()" method. - -object.__length_hint__(self) - - Called to implement "operator.length_hint()". Should return an - estimated length for the object (which may be greater or less than - the actual length). The length must be an integer ">=" 0. The - return value may also be "NotImplemented", which is treated the - same as if the "__length_hint__" method didn’t exist at all. This - method is purely an optimization and is never required for - correctness. - - Added in version 3.4. - -Note: - - Slicing is done exclusively with the following three methods. A - call like - - a[1:2] = b - - is translated to - - a[slice(1, 2, None)] = b - - and so forth. Missing slice items are always filled in with "None". - -object.__getitem__(self, key) - - Called to implement evaluation of "self[key]". For *sequence* - types, the accepted keys should be integers. Optionally, they may - support "slice" objects as well. Negative index support is also - optional. If *key* is of an inappropriate type, "TypeError" may be - raised; if *key* is a value outside the set of indexes for the - sequence (after any special interpretation of negative values), - "IndexError" should be raised. For *mapping* types, if *key* is - missing (not in the container), "KeyError" should be raised. - - Note: - - "for" loops expect that an "IndexError" will be raised for - illegal indexes to allow proper detection of the end of the - sequence. - - Note: - - When subscripting a *class*, the special class method - "__class_getitem__()" may be called instead of "__getitem__()". - See __class_getitem__ versus __getitem__ for more details. - -object.__setitem__(self, key, value) - - Called to implement assignment to "self[key]". Same note as for - "__getitem__()". This should only be implemented for mappings if - the objects support changes to the values for keys, or if new keys - can be added, or for sequences if elements can be replaced. The - same exceptions should be raised for improper *key* values as for - the "__getitem__()" method. - -object.__delitem__(self, key) - - Called to implement deletion of "self[key]". Same note as for - "__getitem__()". This should only be implemented for mappings if - the objects support removal of keys, or for sequences if elements - can be removed from the sequence. The same exceptions should be - raised for improper *key* values as for the "__getitem__()" method. - -object.__missing__(self, key) - - Called by "dict"."__getitem__()" to implement "self[key]" for dict - subclasses when key is not in the dictionary. - -object.__iter__(self) - - This method is called when an *iterator* is required for a - container. This method should return a new iterator object that can - iterate over all the objects in the container. For mappings, it - should iterate over the keys of the container. - -object.__reversed__(self) - - Called (if present) by the "reversed()" built-in to implement - reverse iteration. It should return a new iterator object that - iterates over all the objects in the container in reverse order. - - If the "__reversed__()" method is not provided, the "reversed()" - built-in will fall back to using the sequence protocol ("__len__()" - and "__getitem__()"). Objects that support the sequence protocol - should only provide "__reversed__()" if they can provide an - implementation that is more efficient than the one provided by - "reversed()". - -The membership test operators ("in" and "not in") are normally -implemented as an iteration through a container. However, container -objects can supply the following special method with a more efficient -implementation, which also does not require the object be iterable. - -object.__contains__(self, item) - - Called to implement membership test operators. Should return true - if *item* is in *self*, false otherwise. For mapping objects, this - should consider the keys of the mapping rather than the values or - the key-item pairs. - - For objects that don’t define "__contains__()", the membership test - first tries iteration via "__iter__()", then the old sequence - iteration protocol via "__getitem__()", see this section in the - language reference. - - -Emulating numeric types -======================= - -The following methods can be defined to emulate numeric objects. -Methods corresponding to operations that are not supported by the -particular kind of number implemented (e.g., bitwise operations for -non-integral numbers) should be left undefined. - -object.__add__(self, other) -object.__sub__(self, other) -object.__mul__(self, other) -object.__matmul__(self, other) -object.__truediv__(self, other) -object.__floordiv__(self, other) -object.__mod__(self, other) -object.__divmod__(self, other) -object.__pow__(self, other[, modulo]) -object.__lshift__(self, other) -object.__rshift__(self, other) -object.__and__(self, other) -object.__xor__(self, other) -object.__or__(self, other) - - These methods are called to implement the binary arithmetic - operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", - "pow()", "**", "<<", ">>", "&", "^", "|"). For instance, to - evaluate the expression "x + y", where *x* is an instance of a - class that has an "__add__()" method, "type(x).__add__(x, y)" is - called. The "__divmod__()" method should be the equivalent to - using "__floordiv__()" and "__mod__()"; it should not be related to - "__truediv__()". Note that "__pow__()" should be defined to accept - an optional third argument if the ternary version of the built-in - "pow()" function is to be supported. - - If one of those methods does not support the operation with the - supplied arguments, it should return "NotImplemented". - -object.__radd__(self, other) -object.__rsub__(self, other) -object.__rmul__(self, other) -object.__rmatmul__(self, other) -object.__rtruediv__(self, other) -object.__rfloordiv__(self, other) -object.__rmod__(self, other) -object.__rdivmod__(self, other) -object.__rpow__(self, other[, modulo]) -object.__rlshift__(self, other) -object.__rrshift__(self, other) -object.__rand__(self, other) -object.__rxor__(self, other) -object.__ror__(self, other) - - These methods are called to implement the binary arithmetic - operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", - "pow()", "**", "<<", ">>", "&", "^", "|") with reflected (swapped) - operands. These functions are only called if the left operand does - not support the corresponding operation [3] and the operands are of - different types. [4] For instance, to evaluate the expression "x - - y", where *y* is an instance of a class that has an "__rsub__()" - method, "type(y).__rsub__(y, x)" is called if "type(x).__sub__(x, - y)" returns "NotImplemented". - - Note that ternary "pow()" will not try calling "__rpow__()" (the - coercion rules would become too complicated). - - Note: - - If the right operand’s type is a subclass of the left operand’s - type and that subclass provides a different implementation of the - reflected method for the operation, this method will be called - before the left operand’s non-reflected method. This behavior - allows subclasses to override their ancestors’ operations. - -object.__iadd__(self, other) -object.__isub__(self, other) -object.__imul__(self, other) -object.__imatmul__(self, other) -object.__itruediv__(self, other) -object.__ifloordiv__(self, other) -object.__imod__(self, other) -object.__ipow__(self, other[, modulo]) -object.__ilshift__(self, other) -object.__irshift__(self, other) -object.__iand__(self, other) -object.__ixor__(self, other) -object.__ior__(self, other) - - These methods are called to implement the augmented arithmetic - assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", "**=", - "<<=", ">>=", "&=", "^=", "|="). These methods should attempt to - do the operation in-place (modifying *self*) and return the result - (which could be, but does not have to be, *self*). If a specific - method is not defined, or if that method returns "NotImplemented", - the augmented assignment falls back to the normal methods. For - instance, if *x* is an instance of a class with an "__iadd__()" - method, "x += y" is equivalent to "x = x.__iadd__(y)" . If - "__iadd__()" does not exist, or if "x.__iadd__(y)" returns - "NotImplemented", "x.__add__(y)" and "y.__radd__(x)" are - considered, as with the evaluation of "x + y". In certain - situations, augmented assignment can result in unexpected errors - (see Why does a_tuple[i] += [‘item’] raise an exception when the - addition works?), but this behavior is in fact part of the data - model. - -object.__neg__(self) -object.__pos__(self) -object.__abs__(self) -object.__invert__(self) - - Called to implement the unary arithmetic operations ("-", "+", - "abs()" and "~"). - -object.__complex__(self) -object.__int__(self) -object.__float__(self) - - Called to implement the built-in functions "complex()", "int()" and - "float()". Should return a value of the appropriate type. - -object.__index__(self) - - Called to implement "operator.index()", and whenever Python needs - to losslessly convert the numeric object to an integer object (such - as in slicing, or in the built-in "bin()", "hex()" and "oct()" - functions). Presence of this method indicates that the numeric - object is an integer type. Must return an integer. - - If "__int__()", "__float__()" and "__complex__()" are not defined - then corresponding built-in functions "int()", "float()" and - "complex()" fall back to "__index__()". - -object.__round__(self[, ndigits]) -object.__trunc__(self) -object.__floor__(self) -object.__ceil__(self) - - Called to implement the built-in function "round()" and "math" - functions "trunc()", "floor()" and "ceil()". Unless *ndigits* is - passed to "__round__()" all these methods should return the value - of the object truncated to an "Integral" (typically an "int"). - - The built-in function "int()" falls back to "__trunc__()" if - neither "__int__()" nor "__index__()" is defined. - - Changed in version 3.11: The delegation of "int()" to "__trunc__()" - is deprecated. - - -With Statement Context Managers -=============================== - -A *context manager* is an object that defines the runtime context to -be established when executing a "with" statement. The context manager -handles the entry into, and the exit from, the desired runtime context -for the execution of the block of code. Context managers are normally -invoked using the "with" statement (described in section The with -statement), but can also be used by directly invoking their methods. - -Typical uses of context managers include saving and restoring various -kinds of global state, locking and unlocking resources, closing opened -files, etc. - -For more information on context managers, see Context Manager Types. -The "object" class itself does not provide the context manager -methods. - -object.__enter__(self) - - Enter the runtime context related to this object. The "with" - statement will bind this method’s return value to the target(s) - specified in the "as" clause of the statement, if any. - -object.__exit__(self, exc_type, exc_value, traceback) - - Exit the runtime context related to this object. The parameters - describe the exception that caused the context to be exited. If the - context was exited without an exception, all three arguments will - be "None". - - If an exception is supplied, and the method wishes to suppress the - exception (i.e., prevent it from being propagated), it should - return a true value. Otherwise, the exception will be processed - normally upon exit from this method. - - Note that "__exit__()" methods should not reraise the passed-in - exception; this is the caller’s responsibility. - -See also: - - **PEP 343** - The “with” statement - The specification, background, and examples for the Python "with" - statement. - - -Customizing positional arguments in class pattern matching -========================================================== - -When using a class name in a pattern, positional arguments in the -pattern are not allowed by default, i.e. "case MyClass(x, y)" is -typically invalid without special support in "MyClass". To be able to -use that kind of pattern, the class needs to define a *__match_args__* -attribute. - -object.__match_args__ - - This class variable can be assigned a tuple of strings. When this - class is used in a class pattern with positional arguments, each - positional argument will be converted into a keyword argument, - using the corresponding value in *__match_args__* as the keyword. - The absence of this attribute is equivalent to setting it to "()". - -For example, if "MyClass.__match_args__" is "("left", "center", -"right")" that means that "case MyClass(x, y)" is equivalent to "case -MyClass(left=x, center=y)". Note that the number of arguments in the -pattern must be smaller than or equal to the number of elements in -*__match_args__*; if it is larger, the pattern match attempt will -raise a "TypeError". - -Added in version 3.10. - -See also: - - **PEP 634** - Structural Pattern Matching - The specification for the Python "match" statement. - - -Emulating buffer types -====================== - -The buffer protocol provides a way for Python objects to expose -efficient access to a low-level memory array. This protocol is -implemented by builtin types such as "bytes" and "memoryview", and -third-party libraries may define additional buffer types. - -While buffer types are usually implemented in C, it is also possible -to implement the protocol in Python. - -object.__buffer__(self, flags) - - Called when a buffer is requested from *self* (for example, by the - "memoryview" constructor). The *flags* argument is an integer - representing the kind of buffer requested, affecting for example - whether the returned buffer is read-only or writable. - "inspect.BufferFlags" provides a convenient way to interpret the - flags. The method must return a "memoryview" object. - -object.__release_buffer__(self, buffer) - - Called when a buffer is no longer needed. The *buffer* argument is - a "memoryview" object that was previously returned by - "__buffer__()". The method must release any resources associated - with the buffer. This method should return "None". Buffer objects - that do not need to perform any cleanup are not required to - implement this method. - -Added in version 3.12. - -See also: - - **PEP 688** - Making the buffer protocol accessible in Python - Introduces the Python "__buffer__" and "__release_buffer__" - methods. - - "collections.abc.Buffer" - ABC for buffer types. - - -Special method lookup -===================== - -For custom classes, implicit invocations of special methods are only -guaranteed to work correctly if defined on an object’s type, not in -the object’s instance dictionary. That behaviour is the reason why -the following code raises an exception: - - >>> class C: - ... pass - ... - >>> c = C() - >>> c.__len__ = lambda: 5 - >>> len(c) - Traceback (most recent call last): - File "", line 1, in - TypeError: object of type 'C' has no len() - -The rationale behind this behaviour lies with a number of special -methods such as "__hash__()" and "__repr__()" that are implemented by -all objects, including type objects. If the implicit lookup of these -methods used the conventional lookup process, they would fail when -invoked on the type object itself: - - >>> 1 .__hash__() == hash(1) - True - >>> int.__hash__() == hash(int) - Traceback (most recent call last): - File "", line 1, in - TypeError: descriptor '__hash__' of 'int' object needs an argument - -Incorrectly attempting to invoke an unbound method of a class in this -way is sometimes referred to as ‘metaclass confusion’, and is avoided -by bypassing the instance when looking up special methods: - - >>> type(1).__hash__(1) == hash(1) - True - >>> type(int).__hash__(int) == hash(int) - True - -In addition to bypassing any instance attributes in the interest of -correctness, implicit special method lookup generally also bypasses -the "__getattribute__()" method even of the object’s metaclass: - - >>> class Meta(type): - ... def __getattribute__(*args): - ... print("Metaclass getattribute invoked") - ... return type.__getattribute__(*args) - ... - >>> class C(object, metaclass=Meta): - ... def __len__(self): - ... return 10 - ... def __getattribute__(*args): - ... print("Class getattribute invoked") - ... return object.__getattribute__(*args) - ... - >>> c = C() - >>> c.__len__() # Explicit lookup via instance - Class getattribute invoked - 10 - >>> type(c).__len__(c) # Explicit lookup via type - Metaclass getattribute invoked - 10 - >>> len(c) # Implicit lookup - 10 - -Bypassing the "__getattribute__()" machinery in this fashion provides -significant scope for speed optimisations within the interpreter, at -the cost of some flexibility in the handling of special methods (the -special method *must* be set on the class object itself in order to be -consistently invoked by the interpreter). -''', - 'string-methods': r'''String Methods -************** - -Strings implement all of the common sequence operations, along with -the additional methods described below. - -Strings also support two styles of string formatting, one providing a -large degree of flexibility and customization (see "str.format()", -Format string syntax and Custom string formatting) and the other based -on C "printf" style formatting that handles a narrower range of types -and is slightly harder to use correctly, but is often faster for the -cases it can handle (printf-style String Formatting). - -The Text Processing Services section of the standard library covers a -number of other modules that provide various text related utilities -(including regular expression support in the "re" module). - -str.capitalize() - - Return a copy of the string with its first character capitalized - and the rest lowercased. - - Changed in version 3.8: The first character is now put into - titlecase rather than uppercase. This means that characters like - digraphs will only have their first letter capitalized, instead of - the full character. - -str.casefold() - - Return a casefolded copy of the string. Casefolded strings may be - used for caseless matching. - - Casefolding is similar to lowercasing but more aggressive because - it is intended to remove all case distinctions in a string. For - example, the German lowercase letter "'ß'" is equivalent to ""ss"". - Since it is already lowercase, "lower()" would do nothing to "'ß'"; - "casefold()" converts it to ""ss"". For example: - - >>> 'straße'.lower() - 'straße' - >>> 'straße'.casefold() - 'strasse' - - The casefolding algorithm is described in section 3.13 ‘Default - Case Folding’ of the Unicode Standard. - - Added in version 3.3. - -str.center(width, fillchar=' ', /) - - Return centered in a string of length *width*. Padding is done - using the specified *fillchar* (default is an ASCII space). The - original string is returned if *width* is less than or equal to - "len(s)". For example: - - >>> 'Python'.center(10) - ' Python ' - >>> 'Python'.center(10, '-') - '--Python--' - >>> 'Python'.center(4) - 'Python' - -str.count(sub[, start[, end]]) - - Return the number of non-overlapping occurrences of substring *sub* - in the range [*start*, *end*]. Optional arguments *start* and - *end* are interpreted as in slice notation. - - If *sub* is empty, returns the number of empty strings between - characters which is the length of the string plus one. For example: - - >>> 'spam, spam, spam'.count('spam') - 3 - >>> 'spam, spam, spam'.count('spam', 5) - 2 - >>> 'spam, spam, spam'.count('spam', 5, 10) - 1 - >>> 'spam, spam, spam'.count('eggs') - 0 - >>> 'spam, spam, spam'.count('') - 17 - -str.encode(encoding='utf-8', errors='strict') - - Return the string encoded to "bytes". - - *encoding* defaults to "'utf-8'"; see Standard Encodings for - possible values. - - *errors* controls how encoding errors are handled. If "'strict'" - (the default), a "UnicodeError" exception is raised. Other possible - values are "'ignore'", "'replace'", "'xmlcharrefreplace'", - "'backslashreplace'" and any other name registered via - "codecs.register_error()". See Error Handlers for details. - - For performance reasons, the value of *errors* is not checked for - validity unless an encoding error actually occurs, Python - Development Mode is enabled or a debug build is used. For example: - - >>> encoded_str_to_bytes = 'Python'.encode() - >>> type(encoded_str_to_bytes) - - >>> encoded_str_to_bytes - b'Python' - - Changed in version 3.1: Added support for keyword arguments. - - Changed in version 3.9: The value of the *errors* argument is now - checked in Python Development Mode and in debug mode. - -str.endswith(suffix[, start[, end]]) - - Return "True" if the string ends with the specified *suffix*, - otherwise return "False". *suffix* can also be a tuple of suffixes - to look for. With optional *start*, test beginning at that - position. With optional *end*, stop comparing at that position. - Using *start* and *end* is equivalent to - "str[start:end].endswith(suffix)". For example: - - >>> 'Python'.endswith('on') - True - >>> 'a tuple of suffixes'.endswith(('at', 'in')) - False - >>> 'a tuple of suffixes'.endswith(('at', 'es')) - True - >>> 'Python is amazing'.endswith('is', 0, 9) - True - - See also "startswith()" and "removesuffix()". - -str.expandtabs(tabsize=8) - - Return a copy of the string where all tab characters are replaced - by one or more spaces, depending on the current column and the - given tab size. Tab positions occur every *tabsize* characters - (default is 8, giving tab positions at columns 0, 8, 16 and so on). - To expand the string, the current column is set to zero and the - string is examined character by character. If the character is a - tab ("\t"), one or more space characters are inserted in the result - until the current column is equal to the next tab position. (The - tab character itself is not copied.) If the character is a newline - ("\n") or return ("\r"), it is copied and the current column is - reset to zero. Any other character is copied unchanged and the - current column is incremented by one regardless of how the - character is represented when printed. For example: - - >>> '01\t012\t0123\t01234'.expandtabs() - '01 012 0123 01234' - >>> '01\t012\t0123\t01234'.expandtabs(4) - '01 012 0123 01234' - >>> print('01\t012\n0123\t01234'.expandtabs(4)) - 01 012 - 0123 01234 - -str.find(sub[, start[, end]]) - - Return the lowest index in the string where substring *sub* is - found within the slice "s[start:end]". Optional arguments *start* - and *end* are interpreted as in slice notation. Return "-1" if - *sub* is not found. For example: - - >>> 'spam, spam, spam'.find('sp') - 0 - >>> 'spam, spam, spam'.find('sp', 5) - 6 - - See also "rfind()" and "index()". - - Note: - - The "find()" method should be used only if you need to know the - position of *sub*. To check if *sub* is a substring or not, use - the "in" operator: - - >>> 'Py' in 'Python' - True - -str.format(*args, **kwargs) - - Perform a string formatting operation. The string on which this - method is called can contain literal text or replacement fields - delimited by braces "{}". Each replacement field contains either - the numeric index of a positional argument, or the name of a - keyword argument. Returns a copy of the string where each - replacement field is replaced with the string value of the - corresponding argument. For example: - - >>> "The sum of 1 + 2 is {0}".format(1+2) - 'The sum of 1 + 2 is 3' - >>> "The sum of {a} + {b} is {answer}".format(answer=1+2, a=1, b=2) - 'The sum of 1 + 2 is 3' - >>> "{1} expects the {0} Inquisition!".format("Spanish", "Nobody") - 'Nobody expects the Spanish Inquisition!' - - See Format string syntax for a description of the various - formatting options that can be specified in format strings. - - Note: - - When formatting a number ("int", "float", "complex", - "decimal.Decimal" and subclasses) with the "n" type (ex: - "'{:n}'.format(1234)"), the function temporarily sets the - "LC_CTYPE" locale to the "LC_NUMERIC" locale to decode - "decimal_point" and "thousands_sep" fields of "localeconv()" if - they are non-ASCII or longer than 1 byte, and the "LC_NUMERIC" - locale is different than the "LC_CTYPE" locale. This temporary - change affects other threads. - - Changed in version 3.7: When formatting a number with the "n" type, - the function sets temporarily the "LC_CTYPE" locale to the - "LC_NUMERIC" locale in some cases. - -str.format_map(mapping, /) - - Similar to "str.format(**mapping)", except that "mapping" is used - directly and not copied to a "dict". This is useful if for example - "mapping" is a dict subclass: - - >>> class Default(dict): - ... def __missing__(self, key): - ... return key - ... - >>> '{name} was born in {country}'.format_map(Default(name='Guido')) - 'Guido was born in country' - - Added in version 3.2. - -str.index(sub[, start[, end]]) - - Like "find()", but raise "ValueError" when the substring is not - found. For example: - - >>> 'spam, spam, spam'.index('spam') - 0 - >>> 'spam, spam, spam'.index('eggs') - Traceback (most recent call last): - File "", line 1, in - 'spam, spam, spam'.index('eggs') - ~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^ - ValueError: substring not found - - See also "rindex()". - -str.isalnum() - - Return "True" if all characters in the string are alphanumeric and - there is at least one character, "False" otherwise. A character - "c" is alphanumeric if one of the following returns "True": - "c.isalpha()", "c.isdecimal()", "c.isdigit()", or "c.isnumeric()". - For example: - - >>> 'abc123'.isalnum() - True - >>> 'abc123!@#'.isalnum() - False - >>> ''.isalnum() - False - >>> ' '.isalnum() - False - -str.isalpha() - - Return "True" if all characters in the string are alphabetic and - there is at least one character, "False" otherwise. Alphabetic - characters are those characters defined in the Unicode character - database as “Letter”, i.e., those with general category property - being one of “Lm”, “Lt”, “Lu”, “Ll”, or “Lo”. Note that this is - different from the Alphabetic property defined in the section 4.10 - ‘Letters, Alphabetic, and Ideographic’ of the Unicode Standard. For - example: - - >>> 'Letters and spaces'.isalpha() - False - >>> 'LettersOnly'.isalpha() - True - >>> 'µ'.isalpha() # non-ASCII characters can be considered alphabetical too - True - - See Unicode Properties. - -str.isascii() - - Return "True" if the string is empty or all characters in the - string are ASCII, "False" otherwise. ASCII characters have code - points in the range U+0000-U+007F. For example: - - >>> 'ASCII characters'.isascii() - True - >>> 'µ'.isascii() - False - - Added in version 3.7. - -str.isdecimal() - - Return "True" if all characters in the string are decimal - characters and there is at least one character, "False" otherwise. - Decimal characters are those that can be used to form numbers in - base 10, such as U+0660, ARABIC-INDIC DIGIT ZERO. Formally a - decimal character is a character in the Unicode General Category - “Nd”. For example: - - >>> '0123456789'.isdecimal() - True - >>> '٠١٢٣٤٥٦٧٨٩'.isdecimal() # Arabic-Indic digits zero to nine - True - >>> 'alphabetic'.isdecimal() - False - -str.isdigit() - - Return "True" if all characters in the string are digits and there - is at least one character, "False" otherwise. Digits include - decimal characters and digits that need special handling, such as - the compatibility superscript digits. This covers digits which - cannot be used to form numbers in base 10, like the Kharosthi - numbers. Formally, a digit is a character that has the property - value Numeric_Type=Digit or Numeric_Type=Decimal. - -str.isidentifier() - - Return "True" if the string is a valid identifier according to the - language definition, section Identifiers and keywords. - - "keyword.iskeyword()" can be used to test whether string "s" is a - reserved identifier, such as "def" and "class". - - Example: - - >>> from keyword import iskeyword - - >>> 'hello'.isidentifier(), iskeyword('hello') - (True, False) - >>> 'def'.isidentifier(), iskeyword('def') - (True, True) - -str.islower() - - Return "True" if all cased characters [4] in the string are - lowercase and there is at least one cased character, "False" - otherwise. - -str.isnumeric() - - Return "True" if all characters in the string are numeric - characters, and there is at least one character, "False" otherwise. - Numeric characters include digit characters, and all characters - that have the Unicode numeric value property, e.g. U+2155, VULGAR - FRACTION ONE FIFTH. Formally, numeric characters are those with - the property value Numeric_Type=Digit, Numeric_Type=Decimal or - Numeric_Type=Numeric. For example: - - >>> '0123456789'.isnumeric() - True - >>> '٠١٢٣٤٥٦٧٨٩'.isnumeric() # Arabic-indic digit zero to nine - True - >>> '⅕'.isnumeric() # Vulgar fraction one fifth - True - >>> '²'.isdecimal(), '²'.isdigit(), '²'.isnumeric() - (False, True, True) - - See also "isdecimal()" and "isdigit()". Numeric characters are a - superset of decimal numbers. - -str.isprintable() - - Return "True" if all characters in the string are printable, - "False" if it contains at least one non-printable character. - - Here “printable” means the character is suitable for "repr()" to - use in its output; “non-printable” means that "repr()" on built-in - types will hex-escape the character. It has no bearing on the - handling of strings written to "sys.stdout" or "sys.stderr". - - The printable characters are those which in the Unicode character - database (see "unicodedata") have a general category in group - Letter, Mark, Number, Punctuation, or Symbol (L, M, N, P, or S); - plus the ASCII space 0x20. Nonprintable characters are those in - group Separator or Other (Z or C), except the ASCII space. - - For example: - - >>> ''.isprintable(), ' '.isprintable() - (True, True) - >>> '\t'.isprintable(), '\n'.isprintable() - (False, False) - - See also "isspace()". - -str.isspace() - - Return "True" if there are only whitespace characters in the string - and there is at least one character, "False" otherwise. - - For example: - - >>> ''.isspace() - False - >>> ' '.isspace() - True - >>> '\t\n'.isspace() # TAB and BREAK LINE - True - >>> '\u3000'.isspace() # IDEOGRAPHIC SPACE - True - - A character is *whitespace* if in the Unicode character database - (see "unicodedata"), either its general category is "Zs" - (“Separator, space”), or its bidirectional class is one of "WS", - "B", or "S". - - See also "isprintable()". - -str.istitle() - - Return "True" if the string is a titlecased string and there is at - least one character, for example uppercase characters may only - follow uncased characters and lowercase characters only cased ones. - Return "False" otherwise. - - For example: - - >>> 'Spam, Spam, Spam'.istitle() - True - >>> 'spam, spam, spam'.istitle() - False - >>> 'SPAM, SPAM, SPAM'.istitle() - False - - See also "title()". - -str.isupper() - - Return "True" if all cased characters [4] in the string are - uppercase and there is at least one cased character, "False" - otherwise. - - >>> 'BANANA'.isupper() - True - >>> 'banana'.isupper() - False - >>> 'baNana'.isupper() - False - >>> ' '.isupper() - False - -str.join(iterable, /) - - Return a string which is the concatenation of the strings in - *iterable*. A "TypeError" will be raised if there are any non- - string values in *iterable*, including "bytes" objects. The - separator between elements is the string providing this method. For - example: - - >>> ', '.join(['spam', 'spam', 'spam']) - 'spam, spam, spam' - >>> '-'.join('Python') - 'P-y-t-h-o-n' - - See also "split()". - -str.ljust(width, fillchar=' ', /) - - Return the string left justified in a string of length *width*. - Padding is done using the specified *fillchar* (default is an ASCII - space). The original string is returned if *width* is less than or - equal to "len(s)". - - For example: - - >>> 'Python'.ljust(10) - 'Python ' - >>> 'Python'.ljust(10, '.') - 'Python....' - >>> 'Monty Python'.ljust(10, '.') - 'Monty Python' - - See also "rjust()". - -str.lower() - - Return a copy of the string with all the cased characters [4] - converted to lowercase. For example: - - >>> 'Lower Method Example'.lower() - 'lower method example' - - The lowercasing algorithm used is described in section 3.13 - ‘Default Case Folding’ of the Unicode Standard. - -str.lstrip(chars=None, /) - - Return a copy of the string with leading characters removed. The - *chars* argument is a string specifying the set of characters to be - removed. If omitted or "None", the *chars* argument defaults to - removing whitespace. The *chars* argument is not a prefix; rather, - all combinations of its values are stripped: - - >>> ' spacious '.lstrip() - 'spacious ' - >>> 'www.example.com'.lstrip('cmowz.') - 'example.com' - - See "str.removeprefix()" for a method that will remove a single - prefix string rather than all of a set of characters. For example: - - >>> 'Arthur: three!'.lstrip('Arthur: ') - 'ee!' - >>> 'Arthur: three!'.removeprefix('Arthur: ') - 'three!' - -static str.maketrans(dict, /) -static str.maketrans(from, to, remove='', /) - - This static method returns a translation table usable for - "str.translate()". - - If there is only one argument, it must be a dictionary mapping - Unicode ordinals (integers) or characters (strings of length 1) to - Unicode ordinals, strings (of arbitrary lengths) or "None". - Character keys will then be converted to ordinals. - - If there are two arguments, they must be strings of equal length, - and in the resulting dictionary, each character in *from* will be - mapped to the character at the same position in *to*. If there is - a third argument, it must be a string, whose characters will be - mapped to "None" in the result. - -str.partition(sep, /) - - Split the string at the first occurrence of *sep*, and return a - 3-tuple containing the part before the separator, the separator - itself, and the part after the separator. If the separator is not - found, return a 3-tuple containing the string itself, followed by - two empty strings. - - For example: - - >>> 'Monty Python'.partition(' ') - ('Monty', ' ', 'Python') - >>> "Monty Python's Flying Circus".partition(' ') - ('Monty', ' ', "Python's Flying Circus") - >>> 'Monty Python'.partition('-') - ('Monty Python', '', '') - - See also "rpartition()". - -str.removeprefix(prefix, /) - - If the string starts with the *prefix* string, return - "string[len(prefix):]". Otherwise, return a copy of the original - string: - - >>> 'TestHook'.removeprefix('Test') - 'Hook' - >>> 'BaseTestCase'.removeprefix('Test') - 'BaseTestCase' - - Added in version 3.9. - - See also "removesuffix()" and "startswith()". - -str.removesuffix(suffix, /) - - If the string ends with the *suffix* string and that *suffix* is - not empty, return "string[:-len(suffix)]". Otherwise, return a copy - of the original string: - - >>> 'MiscTests'.removesuffix('Tests') - 'Misc' - >>> 'TmpDirMixin'.removesuffix('Tests') - 'TmpDirMixin' - - Added in version 3.9. - - See also "removeprefix()" and "endswith()". - -str.replace(old, new, /, count=-1) - - Return a copy of the string with all occurrences of substring *old* - replaced by *new*. If *count* is given, only the first *count* - occurrences are replaced. If *count* is not specified or "-1", then - all occurrences are replaced. For example: - - >>> 'spam, spam, spam'.replace('spam', 'eggs') - 'eggs, eggs, eggs' - >>> 'spam, spam, spam'.replace('spam', 'eggs', 1) - 'eggs, spam, spam' - - Changed in version 3.13: *count* is now supported as a keyword - argument. - -str.rfind(sub[, start[, end]]) - - Return the highest index in the string where substring *sub* is - found, such that *sub* is contained within "s[start:end]". - Optional arguments *start* and *end* are interpreted as in slice - notation. Return "-1" on failure. For example: - - >>> 'spam, spam, spam'.rfind('sp') - 12 - >>> 'spam, spam, spam'.rfind('sp', 0, 10) - 6 - - See also "find()" and "rindex()". - -str.rindex(sub[, start[, end]]) - - Like "rfind()" but raises "ValueError" when the substring *sub* is - not found. For example: - - >>> 'spam, spam, spam'.rindex('spam') - 12 - >>> 'spam, spam, spam'.rindex('eggs') - Traceback (most recent call last): - File "", line 1, in - 'spam, spam, spam'.rindex('eggs') - ~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^ - ValueError: substring not found - - See also "index()" and "find()". - -str.rjust(width, fillchar=' ', /) - - Return the string right justified in a string of length *width*. - Padding is done using the specified *fillchar* (default is an ASCII - space). The original string is returned if *width* is less than or - equal to "len(s)". - - For example: - - >>> 'Python'.rjust(10) - ' Python' - >>> 'Python'.rjust(10, '.') - '....Python' - >>> 'Monty Python'.rjust(10, '.') - 'Monty Python' - - See also "ljust()" and "zfill()". - -str.rpartition(sep, /) - - Split the string at the last occurrence of *sep*, and return a - 3-tuple containing the part before the separator, the separator - itself, and the part after the separator. If the separator is not - found, return a 3-tuple containing two empty strings, followed by - the string itself. - - For example: - - >>> 'Monty Python'.rpartition(' ') - ('Monty', ' ', 'Python') - >>> "Monty Python's Flying Circus".rpartition(' ') - ("Monty Python's Flying", ' ', 'Circus') - >>> 'Monty Python'.rpartition('-') - ('', '', 'Monty Python') - - See also "partition()". - -str.rsplit(sep=None, maxsplit=-1) - - Return a list of the words in the string, using *sep* as the - delimiter string. If *maxsplit* is given, at most *maxsplit* splits - are done, the *rightmost* ones. If *sep* is not specified or - "None", any whitespace string is a separator. Except for splitting - from the right, "rsplit()" behaves like "split()" which is - described in detail below. - -str.rstrip(chars=None, /) - - Return a copy of the string with trailing characters removed. The - *chars* argument is a string specifying the set of characters to be - removed. If omitted or "None", the *chars* argument defaults to - removing whitespace. The *chars* argument is not a suffix; rather, - all combinations of its values are stripped. For example: - - >>> ' spacious '.rstrip() - ' spacious' - >>> 'mississippi'.rstrip('ipz') - 'mississ' - - See "removesuffix()" for a method that will remove a single suffix - string rather than all of a set of characters. For example: - - >>> 'Monty Python'.rstrip(' Python') - 'M' - >>> 'Monty Python'.removesuffix(' Python') - 'Monty' - - See also "strip()". - -str.split(sep=None, maxsplit=-1) - - Return a list of the words in the string, using *sep* as the - delimiter string. If *maxsplit* is given, at most *maxsplit* - splits are done (thus, the list will have at most "maxsplit+1" - elements). If *maxsplit* is not specified or "-1", then there is - no limit on the number of splits (all possible splits are made). - - If *sep* is given, consecutive delimiters are not grouped together - and are deemed to delimit empty strings (for example, - "'1,,2'.split(',')" returns "['1', '', '2']"). The *sep* argument - may consist of multiple characters as a single delimiter (to split - with multiple delimiters, use "re.split()"). Splitting an empty - string with a specified separator returns "['']". - - For example: - - >>> '1,2,3'.split(',') - ['1', '2', '3'] - >>> '1,2,3'.split(',', maxsplit=1) - ['1', '2,3'] - >>> '1,2,,3,'.split(',') - ['1', '2', '', '3', ''] - >>> '1<>2<>3<4'.split('<>') - ['1', '2', '3<4'] - - If *sep* is not specified or is "None", a different splitting - algorithm is applied: runs of consecutive whitespace are regarded - as a single separator, and the result will contain no empty strings - at the start or end if the string has leading or trailing - whitespace. Consequently, splitting an empty string or a string - consisting of just whitespace with a "None" separator returns "[]". - - For example: - - >>> '1 2 3'.split() - ['1', '2', '3'] - >>> '1 2 3'.split(maxsplit=1) - ['1', '2 3'] - >>> ' 1 2 3 '.split() - ['1', '2', '3'] - - If *sep* is not specified or is "None" and *maxsplit* is "0", only - leading runs of consecutive whitespace are considered. - - For example: - - >>> "".split(None, 0) - [] - >>> " ".split(None, 0) - [] - >>> " foo ".split(maxsplit=0) - ['foo '] - - See also "join()". - -str.splitlines(keepends=False) - - Return a list of the lines in the string, breaking at line - boundaries. Line breaks are not included in the resulting list - unless *keepends* is given and true. - - This method splits on the following line boundaries. In - particular, the boundaries are a superset of *universal newlines*. - - +-------------------------+-------------------------------+ - | Representation | Description | - |=========================|===============================| - | "\n" | Line Feed | - +-------------------------+-------------------------------+ - | "\r" | Carriage Return | - +-------------------------+-------------------------------+ - | "\r\n" | Carriage Return + Line Feed | - +-------------------------+-------------------------------+ - | "\v" or "\x0b" | Line Tabulation | - +-------------------------+-------------------------------+ - | "\f" or "\x0c" | Form Feed | - +-------------------------+-------------------------------+ - | "\x1c" | File Separator | - +-------------------------+-------------------------------+ - | "\x1d" | Group Separator | - +-------------------------+-------------------------------+ - | "\x1e" | Record Separator | - +-------------------------+-------------------------------+ - | "\x85" | Next Line (C1 Control Code) | - +-------------------------+-------------------------------+ - | "\u2028" | Line Separator | - +-------------------------+-------------------------------+ - | "\u2029" | Paragraph Separator | - +-------------------------+-------------------------------+ - - Changed in version 3.2: "\v" and "\f" added to list of line - boundaries. - - For example: - - >>> 'ab c\n\nde fg\rkl\r\n'.splitlines() - ['ab c', '', 'de fg', 'kl'] - >>> 'ab c\n\nde fg\rkl\r\n'.splitlines(keepends=True) - ['ab c\n', '\n', 'de fg\r', 'kl\r\n'] - - Unlike "split()" when a delimiter string *sep* is given, this - method returns an empty list for the empty string, and a terminal - line break does not result in an extra line: - - >>> "".splitlines() - [] - >>> "One line\n".splitlines() - ['One line'] - - For comparison, "split('\n')" gives: - - >>> ''.split('\n') - [''] - >>> 'Two lines\n'.split('\n') - ['Two lines', ''] - -str.startswith(prefix[, start[, end]]) - - Return "True" if string starts with the *prefix*, otherwise return - "False". *prefix* can also be a tuple of prefixes to look for. - With optional *start*, test string beginning at that position. - With optional *end*, stop comparing string at that position. - - For example: - - >>> 'Python'.startswith('Py') - True - >>> 'a tuple of prefixes'.startswith(('at', 'a')) - True - >>> 'Python is amazing'.startswith('is', 7) - True - - See also "endswith()" and "removeprefix()". - -str.strip(chars=None, /) - - Return a copy of the string with the leading and trailing - characters removed. The *chars* argument is a string specifying the - set of characters to be removed. If omitted or "None", the *chars* - argument defaults to removing whitespace. The *chars* argument is - not a prefix or suffix; rather, all combinations of its values are - stripped. - - For example: - - >>> ' spacious '.strip() - 'spacious' - >>> 'www.example.com'.strip('cmowz.') - 'example' - - The outermost leading and trailing *chars* argument values are - stripped from the string. Characters are removed from the leading - end until reaching a string character that is not contained in the - set of characters in *chars*. A similar action takes place on the - trailing end. - - For example: - - >>> comment_string = '#....... Section 3.2.1 Issue #32 .......' - >>> comment_string.strip('.#! ') - 'Section 3.2.1 Issue #32' - - See also "rstrip()". - -str.swapcase() - - Return a copy of the string with uppercase characters converted to - lowercase and vice versa. Note that it is not necessarily true that - "s.swapcase().swapcase() == s". - -str.title() - - Return a titlecased version of the string where words start with an - uppercase character and the remaining characters are lowercase. - - For example: - - >>> 'Hello world'.title() - 'Hello World' - - The algorithm uses a simple language-independent definition of a - word as groups of consecutive letters. The definition works in - many contexts but it means that apostrophes in contractions and - possessives form word boundaries, which may not be the desired - result: - - >>> "they're bill's friends from the UK".title() - "They'Re Bill'S Friends From The Uk" - - The "string.capwords()" function does not have this problem, as it - splits words on spaces only. - - Alternatively, a workaround for apostrophes can be constructed - using regular expressions: - - >>> import re - >>> def titlecase(s): - ... return re.sub(r"[A-Za-z]+('[A-Za-z]+)?", - ... lambda mo: mo.group(0).capitalize(), - ... s) - ... - >>> titlecase("they're bill's friends.") - "They're Bill's Friends." - - See also "istitle()". - -str.translate(table, /) - - Return a copy of the string in which each character has been mapped - through the given translation table. The table must be an object - that implements indexing via "__getitem__()", typically a *mapping* - or *sequence*. When indexed by a Unicode ordinal (an integer), the - table object can do any of the following: return a Unicode ordinal - or a string, to map the character to one or more other characters; - return "None", to delete the character from the return string; or - raise a "LookupError" exception, to map the character to itself. - - You can use "str.maketrans()" to create a translation map from - character-to-character mappings in different formats. - - See also the "codecs" module for a more flexible approach to custom - character mappings. - -str.upper() - - Return a copy of the string with all the cased characters [4] - converted to uppercase. Note that "s.upper().isupper()" might be - "False" if "s" contains uncased characters or if the Unicode - category of the resulting character(s) is not “Lu” (Letter, - uppercase), but e.g. “Lt” (Letter, titlecase). - - The uppercasing algorithm used is described in section 3.13 - ‘Default Case Folding’ of the Unicode Standard. - -str.zfill(width, /) - - Return a copy of the string left filled with ASCII "'0'" digits to - make a string of length *width*. A leading sign prefix - ("'+'"/"'-'") is handled by inserting the padding *after* the sign - character rather than before. The original string is returned if - *width* is less than or equal to "len(s)". - - For example: - - >>> "42".zfill(5) - '00042' - >>> "-42".zfill(5) - '-0042' - - See also "rjust()". -''', - 'strings': '''String and Bytes literals -************************* - -String literals are described by the following lexical definitions: - - stringliteral ::= [stringprefix](shortstring | longstring) - stringprefix ::= "r" | "u" | "R" | "U" | "f" | "F" - | "fr" | "Fr" | "fR" | "FR" | "rf" | "rF" | "Rf" | "RF" - shortstring ::= "'" shortstringitem* "'" | '"' shortstringitem* '"' - longstring ::= "\'\'\'" longstringitem* "\'\'\'" | '"""' longstringitem* '"""' - shortstringitem ::= shortstringchar | stringescapeseq - longstringitem ::= longstringchar | stringescapeseq - shortstringchar ::= - longstringchar ::= - stringescapeseq ::= "\\" - - bytesliteral ::= bytesprefix(shortbytes | longbytes) - bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | "rb" | "rB" | "Rb" | "RB" - shortbytes ::= "'" shortbytesitem* "'" | '"' shortbytesitem* '"' - longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | '"""' longbytesitem* '"""' - shortbytesitem ::= shortbyteschar | bytesescapeseq - longbytesitem ::= longbyteschar | bytesescapeseq - shortbyteschar ::= - longbyteschar ::= - bytesescapeseq ::= "\\" - -One syntactic restriction not indicated by these productions is that -whitespace is not allowed between the "stringprefix" or "bytesprefix" -and the rest of the literal. The source character set is defined by -the encoding declaration; it is UTF-8 if no encoding declaration is -given in the source file; see section Encoding declarations. - -In plain English: Both types of literals can be enclosed in matching -single quotes ("'") or double quotes ("""). They can also be enclosed -in matching groups of three single or double quotes (these are -generally referred to as *triple-quoted strings*). The backslash ("\\") -character is used to give special meaning to otherwise ordinary -characters like "n", which means ‘newline’ when escaped ("\\n"). It can -also be used to escape characters that otherwise have a special -meaning, such as newline, backslash itself, or the quote character. -See escape sequences below for examples. - -Bytes literals are always prefixed with "'b'" or "'B'"; they produce -an instance of the "bytes" type instead of the "str" type. They may -only contain ASCII characters; bytes with a numeric value of 128 or -greater must be expressed with escapes. - -Both string and bytes literals may optionally be prefixed with a -letter "'r'" or "'R'"; such constructs are called *raw string -literals* and *raw bytes literals* respectively and treat backslashes -as literal characters. As a result, in raw string literals, "'\\U'" -and "'\\u'" escapes are not treated specially. - -Added in version 3.3: The "'rb'" prefix of raw bytes literals has been -added as a synonym of "'br'".Support for the unicode legacy literal -("u'value'") was reintroduced to simplify the maintenance of dual -Python 2.x and 3.x codebases. See **PEP 414** for more information. - -A string literal with "'f'" or "'F'" in its prefix is a *formatted -string literal*; see f-strings. The "'f'" may be combined with "'r'", -but not with "'b'" or "'u'", therefore raw formatted strings are -possible, but formatted bytes literals are not. - -In triple-quoted literals, unescaped newlines and quotes are allowed -(and are retained), except that three unescaped quotes in a row -terminate the literal. (A “quote” is the character used to open the -literal, i.e. either "'" or """.) - - -Escape sequences -================ - -Unless an "'r'" or "'R'" prefix is present, escape sequences in string -and bytes literals are interpreted according to rules similar to those -used by Standard C. The recognized escape sequences are: - -+---------------------------+-----------------------------------+---------+ -| Escape Sequence | Meaning | Notes | -|===========================|===================================|=========| -| "\\" | Backslash and newline ignored | (1) | -+---------------------------+-----------------------------------+---------+ -| "\\\\" | Backslash ("\\") | | -+---------------------------+-----------------------------------+---------+ -| "\\'" | Single quote ("'") | | -+---------------------------+-----------------------------------+---------+ -| "\\"" | Double quote (""") | | -+---------------------------+-----------------------------------+---------+ -| "\\a" | ASCII Bell (BEL) | | -+---------------------------+-----------------------------------+---------+ -| "\\b" | ASCII Backspace (BS) | | -+---------------------------+-----------------------------------+---------+ -| "\\f" | ASCII Formfeed (FF) | | -+---------------------------+-----------------------------------+---------+ -| "\\n" | ASCII Linefeed (LF) | | -+---------------------------+-----------------------------------+---------+ -| "\\r" | ASCII Carriage Return (CR) | | -+---------------------------+-----------------------------------+---------+ -| "\\t" | ASCII Horizontal Tab (TAB) | | -+---------------------------+-----------------------------------+---------+ -| "\\v" | ASCII Vertical Tab (VT) | | -+---------------------------+-----------------------------------+---------+ -| "\\*ooo*" | Character with octal value *ooo* | (2,4) | -+---------------------------+-----------------------------------+---------+ -| "\\x*hh*" | Character with hex value *hh* | (3,4) | -+---------------------------+-----------------------------------+---------+ - -Escape sequences only recognized in string literals are: - -+---------------------------+-----------------------------------+---------+ -| Escape Sequence | Meaning | Notes | -|===========================|===================================|=========| -| "\\N{*name*}" | Character named *name* in the | (5) | -| | Unicode database | | -+---------------------------+-----------------------------------+---------+ -| "\\u*xxxx*" | Character with 16-bit hex value | (6) | -| | *xxxx* | | -+---------------------------+-----------------------------------+---------+ -| "\\U*xxxxxxxx*" | Character with 32-bit hex value | (7) | -| | *xxxxxxxx* | | -+---------------------------+-----------------------------------+---------+ - -Notes: - -1. A backslash can be added at the end of a line to ignore the - newline: - - >>> 'This string will not include \\ - ... backslashes or newline characters.' - 'This string will not include backslashes or newline characters.' - - The same result can be achieved using triple-quoted strings, or - parentheses and string literal concatenation. - -2. As in Standard C, up to three octal digits are accepted. - - Changed in version 3.11: Octal escapes with value larger than - "0o377" produce a "DeprecationWarning". - - Changed in version 3.12: Octal escapes with value larger than - "0o377" produce a "SyntaxWarning". In a future Python version they - will be eventually a "SyntaxError". - -3. Unlike in Standard C, exactly two hex digits are required. - -4. In a bytes literal, hexadecimal and octal escapes denote the byte - with the given value. In a string literal, these escapes denote a - Unicode character with the given value. - -5. Changed in version 3.3: Support for name aliases [1] has been - added. - -6. Exactly four hex digits are required. - -7. Any Unicode character can be encoded this way. Exactly eight hex - digits are required. - -Unlike Standard C, all unrecognized escape sequences are left in the -string unchanged, i.e., *the backslash is left in the result*. (This -behavior is useful when debugging: if an escape sequence is mistyped, -the resulting output is more easily recognized as broken.) It is also -important to note that the escape sequences only recognized in string -literals fall into the category of unrecognized escapes for bytes -literals. - -Changed in version 3.6: Unrecognized escape sequences produce a -"DeprecationWarning". - -Changed in version 3.12: Unrecognized escape sequences produce a -"SyntaxWarning". In a future Python version they will be eventually a -"SyntaxError". - -Even in a raw literal, quotes can be escaped with a backslash, but the -backslash remains in the result; for example, "r"\\""" is a valid -string literal consisting of two characters: a backslash and a double -quote; "r"\\"" is not a valid string literal (even a raw string cannot -end in an odd number of backslashes). Specifically, *a raw literal -cannot end in a single backslash* (since the backslash would escape -the following quote character). Note also that a single backslash -followed by a newline is interpreted as those two characters as part -of the literal, *not* as a line continuation. -''', - 'subscriptions': r'''Subscriptions -************* - -The subscription of an instance of a container class will generally -select an element from the container. The subscription of a *generic -class* will generally return a GenericAlias object. - - subscription ::= primary "[" flexible_expression_list "]" - -When an object is subscripted, the interpreter will evaluate the -primary and the expression list. - -The primary must evaluate to an object that supports subscription. An -object may support subscription through defining one or both of -"__getitem__()" and "__class_getitem__()". When the primary is -subscripted, the evaluated result of the expression list will be -passed to one of these methods. For more details on when -"__class_getitem__" is called instead of "__getitem__", see -__class_getitem__ versus __getitem__. - -If the expression list contains at least one comma, or if any of the -expressions are starred, the expression list will evaluate to a -"tuple" containing the items of the expression list. Otherwise, the -expression list will evaluate to the value of the list’s sole member. - -Changed in version 3.11: Expressions in an expression list may be -starred. See **PEP 646**. - -For built-in objects, there are two types of objects that support -subscription via "__getitem__()": - -1. Mappings. If the primary is a *mapping*, the expression list must - evaluate to an object whose value is one of the keys of the - mapping, and the subscription selects the value in the mapping that - corresponds to that key. An example of a builtin mapping class is - the "dict" class. - -2. Sequences. If the primary is a *sequence*, the expression list must - evaluate to an "int" or a "slice" (as discussed in the following - section). Examples of builtin sequence classes include the "str", - "list" and "tuple" classes. - -The formal syntax makes no special provision for negative indices in -*sequences*. However, built-in sequences all provide a "__getitem__()" -method that interprets negative indices by adding the length of the -sequence to the index so that, for example, "x[-1]" selects the last -item of "x". The resulting value must be a nonnegative integer less -than the number of items in the sequence, and the subscription selects -the item whose index is that value (counting from zero). Since the -support for negative indices and slicing occurs in the object’s -"__getitem__()" method, subclasses overriding this method will need to -explicitly add that support. - -A "string" is a special kind of sequence whose items are *characters*. -A character is not a separate data type but a string of exactly one -character. -''', - 'truth': r'''Truth Value Testing -******************* - -Any object can be tested for truth value, for use in an "if" or -"while" condition or as operand of the Boolean operations below. - -By default, an object is considered true unless its class defines -either a "__bool__()" method that returns "False" or a "__len__()" -method that returns zero, when called with the object. [1] Here are -most of the built-in objects considered false: - -* constants defined to be false: "None" and "False" - -* zero of any numeric type: "0", "0.0", "0j", "Decimal(0)", - "Fraction(0, 1)" - -* empty sequences and collections: "''", "()", "[]", "{}", "set()", - "range(0)" - -Operations and built-in functions that have a Boolean result always -return "0" or "False" for false and "1" or "True" for true, unless -otherwise stated. (Important exception: the Boolean operations "or" -and "and" always return one of their operands.) -''', - 'try': r'''The "try" statement -******************* - -The "try" statement specifies exception handlers and/or cleanup code -for a group of statements: - - try_stmt ::= try1_stmt | try2_stmt | try3_stmt - try1_stmt ::= "try" ":" suite - ("except" [expression ["as" identifier]] ":" suite)+ - ["else" ":" suite] - ["finally" ":" suite] - try2_stmt ::= "try" ":" suite - ("except" "*" expression ["as" identifier] ":" suite)+ - ["else" ":" suite] - ["finally" ":" suite] - try3_stmt ::= "try" ":" suite - "finally" ":" suite - -Additional information on exceptions can be found in section -Exceptions, and information on using the "raise" statement to generate -exceptions may be found in section The raise statement. - - -"except" clause -=============== - -The "except" clause(s) specify one or more exception handlers. When no -exception occurs in the "try" clause, no exception handler is -executed. When an exception occurs in the "try" suite, a search for an -exception handler is started. This search inspects the "except" -clauses in turn until one is found that matches the exception. An -expression-less "except" clause, if present, must be last; it matches -any exception. - -For an "except" clause with an expression, the expression must -evaluate to an exception type or a tuple of exception types. The -raised exception matches an "except" clause whose expression evaluates -to the class or a *non-virtual base class* of the exception object, or -to a tuple that contains such a class. - -If no "except" clause matches the exception, the search for an -exception handler continues in the surrounding code and on the -invocation stack. [1] - -If the evaluation of an expression in the header of an "except" clause -raises an exception, the original search for a handler is canceled and -a search starts for the new exception in the surrounding code and on -the call stack (it is treated as if the entire "try" statement raised -the exception). - -When a matching "except" clause is found, the exception is assigned to -the target specified after the "as" keyword in that "except" clause, -if present, and the "except" clause’s suite is executed. All "except" -clauses must have an executable block. When the end of this block is -reached, execution continues normally after the entire "try" -statement. (This means that if two nested handlers exist for the same -exception, and the exception occurs in the "try" clause of the inner -handler, the outer handler will not handle the exception.) - -When an exception has been assigned using "as target", it is cleared -at the end of the "except" clause. This is as if - - except E as N: - foo - -was translated to - - except E as N: - try: - foo - finally: - del N - -This means the exception must be assigned to a different name to be -able to refer to it after the "except" clause. Exceptions are cleared -because with the traceback attached to them, they form a reference -cycle with the stack frame, keeping all locals in that frame alive -until the next garbage collection occurs. - -Before an "except" clause’s suite is executed, the exception is stored -in the "sys" module, where it can be accessed from within the body of -the "except" clause by calling "sys.exception()". When leaving an -exception handler, the exception stored in the "sys" module is reset -to its previous value: - - >>> print(sys.exception()) - None - >>> try: - ... raise TypeError - ... except: - ... print(repr(sys.exception())) - ... try: - ... raise ValueError - ... except: - ... print(repr(sys.exception())) - ... print(repr(sys.exception())) - ... - TypeError() - ValueError() - TypeError() - >>> print(sys.exception()) - None - - -"except*" clause -================ - -The "except*" clause(s) specify one or more handlers for groups of -exceptions ("BaseExceptionGroup" instances). A "try" statement can -have either "except" or "except*" clauses, but not both. The exception -type for matching is mandatory in the case of "except*", so "except*:" -is a syntax error. The type is interpreted as in the case of "except", -but matching is performed on the exceptions contained in the group -that is being handled. An "TypeError" is raised if a matching type is -a subclass of "BaseExceptionGroup", because that would have ambiguous -semantics. - -When an exception group is raised in the try block, each "except*" -clause splits (see "split()") it into the subgroups of matching and -non-matching exceptions. If the matching subgroup is not empty, it -becomes the handled exception (the value returned from -"sys.exception()") and assigned to the target of the "except*" clause -(if there is one). Then, the body of the "except*" clause executes. If -the non-matching subgroup is not empty, it is processed by the next -"except*" in the same manner. This continues until all exceptions in -the group have been matched, or the last "except*" clause has run. - -After all "except*" clauses execute, the group of unhandled exceptions -is merged with any exceptions that were raised or re-raised from -within "except*" clauses. This merged exception group propagates on.: - - >>> try: - ... raise ExceptionGroup("eg", - ... [ValueError(1), TypeError(2), OSError(3), OSError(4)]) - ... except* TypeError as e: - ... print(f'caught {type(e)} with nested {e.exceptions}') - ... except* OSError as e: - ... print(f'caught {type(e)} with nested {e.exceptions}') - ... - caught with nested (TypeError(2),) - caught with nested (OSError(3), OSError(4)) - + Exception Group Traceback (most recent call last): - | File "", line 2, in - | raise ExceptionGroup("eg", - | [ValueError(1), TypeError(2), OSError(3), OSError(4)]) - | ExceptionGroup: eg (1 sub-exception) - +-+---------------- 1 ---------------- - | ValueError: 1 - +------------------------------------ - -If the exception raised from the "try" block is not an exception group -and its type matches one of the "except*" clauses, it is caught and -wrapped by an exception group with an empty message string. This -ensures that the type of the target "e" is consistently -"BaseExceptionGroup": - - >>> try: - ... raise BlockingIOError - ... except* BlockingIOError as e: - ... print(repr(e)) - ... - ExceptionGroup('', (BlockingIOError(),)) - -"break", "continue" and "return" cannot appear in an "except*" clause. - - -"else" clause -============= - -The optional "else" clause is executed if the control flow leaves the -"try" suite, no exception was raised, and no "return", "continue", or -"break" statement was executed. Exceptions in the "else" clause are -not handled by the preceding "except" clauses. - - -"finally" clause -================ - -If "finally" is present, it specifies a ‘cleanup’ handler. The "try" -clause is executed, including any "except" and "else" clauses. If an -exception occurs in any of the clauses and is not handled, the -exception is temporarily saved. The "finally" clause is executed. If -there is a saved exception it is re-raised at the end of the "finally" -clause. If the "finally" clause raises another exception, the saved -exception is set as the context of the new exception. If the "finally" -clause executes a "return", "break" or "continue" statement, the saved -exception is discarded: - - >>> def f(): - ... try: - ... 1/0 - ... finally: - ... return 42 - ... - >>> f() - 42 - -The exception information is not available to the program during -execution of the "finally" clause. - -When a "return", "break" or "continue" statement is executed in the -"try" suite of a "try"…"finally" statement, the "finally" clause is -also executed ‘on the way out.’ - -The return value of a function is determined by the last "return" -statement executed. Since the "finally" clause always executes, a -"return" statement executed in the "finally" clause will always be the -last one executed: - - >>> def foo(): - ... try: - ... return 'try' - ... finally: - ... return 'finally' - ... - >>> foo() - 'finally' - -Changed in version 3.8: Prior to Python 3.8, a "continue" statement -was illegal in the "finally" clause due to a problem with the -implementation. -''', - 'types': r'''The standard type hierarchy -*************************** - -Below is a list of the types that are built into Python. Extension -modules (written in C, Java, or other languages, depending on the -implementation) can define additional types. Future versions of -Python may add types to the type hierarchy (e.g., rational numbers, -efficiently stored arrays of integers, etc.), although such additions -will often be provided via the standard library instead. - -Some of the type descriptions below contain a paragraph listing -‘special attributes.’ These are attributes that provide access to the -implementation and are not intended for general use. Their definition -may change in the future. - - -None -==== - -This type has a single value. There is a single object with this -value. This object is accessed through the built-in name "None". It is -used to signify the absence of a value in many situations, e.g., it is -returned from functions that don’t explicitly return anything. Its -truth value is false. - - -NotImplemented -============== - -This type has a single value. There is a single object with this -value. This object is accessed through the built-in name -"NotImplemented". Numeric methods and rich comparison methods should -return this value if they do not implement the operation for the -operands provided. (The interpreter will then try the reflected -operation, or some other fallback, depending on the operator.) It -should not be evaluated in a boolean context. - -See Implementing the arithmetic operations for more details. - -Changed in version 3.9: Evaluating "NotImplemented" in a boolean -context is deprecated. While it currently evaluates as true, it will -emit a "DeprecationWarning". It will raise a "TypeError" in a future -version of Python. - - -Ellipsis -======== - -This type has a single value. There is a single object with this -value. This object is accessed through the literal "..." or the built- -in name "Ellipsis". Its truth value is true. - - -"numbers.Number" -================ - -These are created by numeric literals and returned as results by -arithmetic operators and arithmetic built-in functions. Numeric -objects are immutable; once created their value never changes. Python -numbers are of course strongly related to mathematical numbers, but -subject to the limitations of numerical representation in computers. - -The string representations of the numeric classes, computed by -"__repr__()" and "__str__()", have the following properties: - -* They are valid numeric literals which, when passed to their class - constructor, produce an object having the value of the original - numeric. - -* The representation is in base 10, when possible. - -* Leading zeros, possibly excepting a single zero before a decimal - point, are not shown. - -* Trailing zeros, possibly excepting a single zero after a decimal - point, are not shown. - -* A sign is shown only when the number is negative. - -Python distinguishes between integers, floating-point numbers, and -complex numbers: - - -"numbers.Integral" ------------------- - -These represent elements from the mathematical set of integers -(positive and negative). - -Note: - - The rules for integer representation are intended to give the most - meaningful interpretation of shift and mask operations involving - negative integers. - -There are two types of integers: - -Integers ("int") - These represent numbers in an unlimited range, subject to available - (virtual) memory only. For the purpose of shift and mask - operations, a binary representation is assumed, and negative - numbers are represented in a variant of 2’s complement which gives - the illusion of an infinite string of sign bits extending to the - left. - -Booleans ("bool") - These represent the truth values False and True. The two objects - representing the values "False" and "True" are the only Boolean - objects. The Boolean type is a subtype of the integer type, and - Boolean values behave like the values 0 and 1, respectively, in - almost all contexts, the exception being that when converted to a - string, the strings ""False"" or ""True"" are returned, - respectively. - - -"numbers.Real" ("float") ------------------------- - -These represent machine-level double precision floating-point numbers. -You are at the mercy of the underlying machine architecture (and C or -Java implementation) for the accepted range and handling of overflow. -Python does not support single-precision floating-point numbers; the -savings in processor and memory usage that are usually the reason for -using these are dwarfed by the overhead of using objects in Python, so -there is no reason to complicate the language with two kinds of -floating-point numbers. - - -"numbers.Complex" ("complex") ------------------------------ - -These represent complex numbers as a pair of machine-level double -precision floating-point numbers. The same caveats apply as for -floating-point numbers. The real and imaginary parts of a complex -number "z" can be retrieved through the read-only attributes "z.real" -and "z.imag". - - -Sequences -========= - -These represent finite ordered sets indexed by non-negative numbers. -The built-in function "len()" returns the number of items of a -sequence. When the length of a sequence is *n*, the index set contains -the numbers 0, 1, …, *n*-1. Item *i* of sequence *a* is selected by -"a[i]". Some sequences, including built-in sequences, interpret -negative subscripts by adding the sequence length. For example, -"a[-2]" equals "a[n-2]", the second to last item of sequence a with -length "n". - -Sequences also support slicing: "a[i:j]" selects all items with index -*k* such that *i* "<=" *k* "<" *j*. When used as an expression, a -slice is a sequence of the same type. The comment above about negative -indexes also applies to negative slice positions. - -Some sequences also support “extended slicing” with a third “step” -parameter: "a[i:j:k]" selects all items of *a* with index *x* where "x -= i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*. - -Sequences are distinguished according to their mutability: - - -Immutable sequences -------------------- - -An object of an immutable sequence type cannot change once it is -created. (If the object contains references to other objects, these -other objects may be mutable and may be changed; however, the -collection of objects directly referenced by an immutable object -cannot change.) - -The following types are immutable sequences: - -Strings - A string is a sequence of values that represent Unicode code - points. All the code points in the range "U+0000 - U+10FFFF" can be - represented in a string. Python doesn’t have a char type; instead, - every code point in the string is represented as a string object - with length "1". The built-in function "ord()" converts a code - point from its string form to an integer in the range "0 - 10FFFF"; - "chr()" converts an integer in the range "0 - 10FFFF" to the - corresponding length "1" string object. "str.encode()" can be used - to convert a "str" to "bytes" using the given text encoding, and - "bytes.decode()" can be used to achieve the opposite. - -Tuples - The items of a tuple are arbitrary Python objects. Tuples of two or - more items are formed by comma-separated lists of expressions. A - tuple of one item (a ‘singleton’) can be formed by affixing a comma - to an expression (an expression by itself does not create a tuple, - since parentheses must be usable for grouping of expressions). An - empty tuple can be formed by an empty pair of parentheses. - -Bytes - A bytes object is an immutable array. The items are 8-bit bytes, - represented by integers in the range 0 <= x < 256. Bytes literals - (like "b'abc'") and the built-in "bytes()" constructor can be used - to create bytes objects. Also, bytes objects can be decoded to - strings via the "decode()" method. - - -Mutable sequences ------------------ - -Mutable sequences can be changed after they are created. The -subscription and slicing notations can be used as the target of -assignment and "del" (delete) statements. - -Note: - - The "collections" and "array" module provide additional examples of - mutable sequence types. - -There are currently two intrinsic mutable sequence types: - -Lists - The items of a list are arbitrary Python objects. Lists are formed - by placing a comma-separated list of expressions in square - brackets. (Note that there are no special cases needed to form - lists of length 0 or 1.) - -Byte Arrays - A bytearray object is a mutable array. They are created by the - built-in "bytearray()" constructor. Aside from being mutable (and - hence unhashable), byte arrays otherwise provide the same interface - and functionality as immutable "bytes" objects. - - -Set types -========= - -These represent unordered, finite sets of unique, immutable objects. -As such, they cannot be indexed by any subscript. However, they can be -iterated over, and the built-in function "len()" returns the number of -items in a set. Common uses for sets are fast membership testing, -removing duplicates from a sequence, and computing mathematical -operations such as intersection, union, difference, and symmetric -difference. - -For set elements, the same immutability rules apply as for dictionary -keys. Note that numeric types obey the normal rules for numeric -comparison: if two numbers compare equal (e.g., "1" and "1.0"), only -one of them can be contained in a set. - -There are currently two intrinsic set types: - -Sets - These represent a mutable set. They are created by the built-in - "set()" constructor and can be modified afterwards by several - methods, such as "add()". - -Frozen sets - These represent an immutable set. They are created by the built-in - "frozenset()" constructor. As a frozenset is immutable and - *hashable*, it can be used again as an element of another set, or - as a dictionary key. - - -Mappings -======== - -These represent finite sets of objects indexed by arbitrary index -sets. The subscript notation "a[k]" selects the item indexed by "k" -from the mapping "a"; this can be used in expressions and as the -target of assignments or "del" statements. The built-in function -"len()" returns the number of items in a mapping. - -There is currently a single intrinsic mapping type: - - -Dictionaries ------------- - -These represent finite sets of objects indexed by nearly arbitrary -values. The only types of values not acceptable as keys are values -containing lists or dictionaries or other mutable types that are -compared by value rather than by object identity, the reason being -that the efficient implementation of dictionaries requires a key’s -hash value to remain constant. Numeric types used for keys obey the -normal rules for numeric comparison: if two numbers compare equal -(e.g., "1" and "1.0") then they can be used interchangeably to index -the same dictionary entry. - -Dictionaries preserve insertion order, meaning that keys will be -produced in the same order they were added sequentially over the -dictionary. Replacing an existing key does not change the order, -however removing a key and re-inserting it will add it to the end -instead of keeping its old place. - -Dictionaries are mutable; they can be created by the "{}" notation -(see section Dictionary displays). - -The extension modules "dbm.ndbm" and "dbm.gnu" provide additional -examples of mapping types, as does the "collections" module. - -Changed in version 3.7: Dictionaries did not preserve insertion order -in versions of Python before 3.6. In CPython 3.6, insertion order was -preserved, but it was considered an implementation detail at that time -rather than a language guarantee. - - -Callable types -============== - -These are the types to which the function call operation (see section -Calls) can be applied: - - -User-defined functions ----------------------- - -A user-defined function object is created by a function definition -(see section Function definitions). It should be called with an -argument list containing the same number of items as the function’s -formal parameter list. - - -Special read-only attributes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -+----------------------------------------------------+----------------------------------------------------+ -| Attribute | Meaning | -|====================================================|====================================================| -| function.__builtins__ | A reference to the "dictionary" that holds the | -| | function’s builtins namespace. Added in version | -| | 3.10. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__globals__ | A reference to the "dictionary" that holds the | -| | function’s global variables – the global namespace | -| | of the module in which the function was defined. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__closure__ | "None" or a "tuple" of cells that contain bindings | -| | for the names specified in the "co_freevars" | -| | attribute of the function’s "code object". A cell | -| | object has the attribute "cell_contents". This can | -| | be used to get the value of the cell, as well as | -| | set the value. | -+----------------------------------------------------+----------------------------------------------------+ - - -Special writable attributes -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Most of these attributes check the type of the assigned value: - -+----------------------------------------------------+----------------------------------------------------+ -| Attribute | Meaning | -|====================================================|====================================================| -| function.__doc__ | The function’s documentation string, or "None" if | -| | unavailable. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__name__ | The function’s name. See also: "__name__ | -| | attributes". | -+----------------------------------------------------+----------------------------------------------------+ -| function.__qualname__ | The function’s *qualified name*. See also: | -| | "__qualname__ attributes". Added in version 3.3. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__module__ | The name of the module the function was defined | -| | in, or "None" if unavailable. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__defaults__ | A "tuple" containing default *parameter* values | -| | for those parameters that have defaults, or "None" | -| | if no parameters have a default value. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__code__ | The code object representing the compiled function | -| | body. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__dict__ | The namespace supporting arbitrary function | -| | attributes. See also: "__dict__ attributes". | -+----------------------------------------------------+----------------------------------------------------+ -| function.__annotations__ | A "dictionary" containing annotations of | -| | *parameters*. The keys of the dictionary are the | -| | parameter names, and "'return'" for the return | -| | annotation, if provided. See also: Annotations | -| | Best Practices. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__kwdefaults__ | A "dictionary" containing defaults for keyword- | -| | only *parameters*. | -+----------------------------------------------------+----------------------------------------------------+ -| function.__type_params__ | A "tuple" containing the type parameters of a | -| | generic function. Added in version 3.12. | -+----------------------------------------------------+----------------------------------------------------+ - -Function objects also support getting and setting arbitrary -attributes, which can be used, for example, to attach metadata to -functions. Regular attribute dot-notation is used to get and set such -attributes. - -**CPython implementation detail:** CPython’s current implementation -only supports function attributes on user-defined functions. Function -attributes on built-in functions may be supported in the future. - -Additional information about a function’s definition can be retrieved -from its code object (accessible via the "__code__" attribute). - - -Instance methods ----------------- - -An instance method object combines a class, a class instance and any -callable object (normally a user-defined function). - -Special read-only attributes: - -+----------------------------------------------------+----------------------------------------------------+ -| method.__self__ | Refers to the class instance object to which the | -| | method is bound | -+----------------------------------------------------+----------------------------------------------------+ -| method.__func__ | Refers to the original function object | -+----------------------------------------------------+----------------------------------------------------+ -| method.__doc__ | The method’s documentation (same as | -| | "method.__func__.__doc__"). A "string" if the | -| | original function had a docstring, else "None". | -+----------------------------------------------------+----------------------------------------------------+ -| method.__name__ | The name of the method (same as | -| | "method.__func__.__name__") | -+----------------------------------------------------+----------------------------------------------------+ -| method.__module__ | The name of the module the method was defined in, | -| | or "None" if unavailable. | -+----------------------------------------------------+----------------------------------------------------+ - -Methods also support accessing (but not setting) the arbitrary -function attributes on the underlying function object. - -User-defined method objects may be created when getting an attribute -of a class (perhaps via an instance of that class), if that attribute -is a user-defined function object or a "classmethod" object. - -When an instance method object is created by retrieving a user-defined -function object from a class via one of its instances, its "__self__" -attribute is the instance, and the method object is said to be -*bound*. The new method’s "__func__" attribute is the original -function object. - -When an instance method object is created by retrieving a -"classmethod" object from a class or instance, its "__self__" -attribute is the class itself, and its "__func__" attribute is the -function object underlying the class method. - -When an instance method object is called, the underlying function -("__func__") is called, inserting the class instance ("__self__") in -front of the argument list. For instance, when "C" is a class which -contains a definition for a function "f()", and "x" is an instance of -"C", calling "x.f(1)" is equivalent to calling "C.f(x, 1)". - -When an instance method object is derived from a "classmethod" object, -the “class instance” stored in "__self__" will actually be the class -itself, so that calling either "x.f(1)" or "C.f(1)" is equivalent to -calling "f(C,1)" where "f" is the underlying function. - -It is important to note that user-defined functions which are -attributes of a class instance are not converted to bound methods; -this *only* happens when the function is an attribute of the class. - - -Generator functions -------------------- - -A function or method which uses the "yield" statement (see section The -yield statement) is called a *generator function*. Such a function, -when called, always returns an *iterator* object which can be used to -execute the body of the function: calling the iterator’s -"iterator.__next__()" method will cause the function to execute until -it provides a value using the "yield" statement. When the function -executes a "return" statement or falls off the end, a "StopIteration" -exception is raised and the iterator will have reached the end of the -set of values to be returned. - - -Coroutine functions -------------------- - -A function or method which is defined using "async def" is called a -*coroutine function*. Such a function, when called, returns a -*coroutine* object. It may contain "await" expressions, as well as -"async with" and "async for" statements. See also the Coroutine -Objects section. - - -Asynchronous generator functions --------------------------------- - -A function or method which is defined using "async def" and which uses -the "yield" statement is called a *asynchronous generator function*. -Such a function, when called, returns an *asynchronous iterator* -object which can be used in an "async for" statement to execute the -body of the function. - -Calling the asynchronous iterator’s "aiterator.__anext__" method will -return an *awaitable* which when awaited will execute until it -provides a value using the "yield" expression. When the function -executes an empty "return" statement or falls off the end, a -"StopAsyncIteration" exception is raised and the asynchronous iterator -will have reached the end of the set of values to be yielded. - - -Built-in functions ------------------- - -A built-in function object is a wrapper around a C function. Examples -of built-in functions are "len()" and "math.sin()" ("math" is a -standard built-in module). The number and type of the arguments are -determined by the C function. Special read-only attributes: - -* "__doc__" is the function’s documentation string, or "None" if - unavailable. See "function.__doc__". - -* "__name__" is the function’s name. See "function.__name__". - -* "__self__" is set to "None" (but see the next item). - -* "__module__" is the name of the module the function was defined in - or "None" if unavailable. See "function.__module__". - - -Built-in methods ----------------- - -This is really a different disguise of a built-in function, this time -containing an object passed to the C function as an implicit extra -argument. An example of a built-in method is "alist.append()", -assuming *alist* is a list object. In this case, the special read-only -attribute "__self__" is set to the object denoted by *alist*. (The -attribute has the same semantics as it does with "other instance -methods".) - - -Classes -------- - -Classes are callable. These objects normally act as factories for new -instances of themselves, but variations are possible for class types -that override "__new__()". The arguments of the call are passed to -"__new__()" and, in the typical case, to "__init__()" to initialize -the new instance. - - -Class Instances ---------------- - -Instances of arbitrary classes can be made callable by defining a -"__call__()" method in their class. - - -Modules -======= - -Modules are a basic organizational unit of Python code, and are -created by the import system as invoked either by the "import" -statement, or by calling functions such as "importlib.import_module()" -and built-in "__import__()". A module object has a namespace -implemented by a "dictionary" object (this is the dictionary -referenced by the "__globals__" attribute of functions defined in the -module). Attribute references are translated to lookups in this -dictionary, e.g., "m.x" is equivalent to "m.__dict__["x"]". A module -object does not contain the code object used to initialize the module -(since it isn’t needed once the initialization is done). - -Attribute assignment updates the module’s namespace dictionary, e.g., -"m.x = 1" is equivalent to "m.__dict__["x"] = 1". - - -Import-related attributes on module objects -------------------------------------------- - -Module objects have the following attributes that relate to the import -system. When a module is created using the machinery associated with -the import system, these attributes are filled in based on the -module’s *spec*, before the *loader* executes and loads the module. - -To create a module dynamically rather than using the import system, -it’s recommended to use "importlib.util.module_from_spec()", which -will set the various import-controlled attributes to appropriate -values. It’s also possible to use the "types.ModuleType" constructor -to create modules directly, but this technique is more error-prone, as -most attributes must be manually set on the module object after it has -been created when using this approach. - -Caution: - - With the exception of "__name__", it is **strongly** recommended - that you rely on "__spec__" and its attributes instead of any of the - other individual attributes listed in this subsection. Note that - updating an attribute on "__spec__" will not update the - corresponding attribute on the module itself: - - >>> import typing - >>> typing.__name__, typing.__spec__.name - ('typing', 'typing') - >>> typing.__spec__.name = 'spelling' - >>> typing.__name__, typing.__spec__.name - ('typing', 'spelling') - >>> typing.__name__ = 'keyboard_smashing' - >>> typing.__name__, typing.__spec__.name - ('keyboard_smashing', 'spelling') - -module.__name__ - - The name used to uniquely identify the module in the import system. - For a directly executed module, this will be set to ""__main__"". - - This attribute must be set to the fully qualified name of the - module. It is expected to match the value of - "module.__spec__.name". - -module.__spec__ - - A record of the module’s import-system-related state. - - Set to the "module spec" that was used when importing the module. - See Module specs for more details. - - Added in version 3.4. - -module.__package__ - - The *package* a module belongs to. - - If the module is top-level (that is, not a part of any specific - package) then the attribute should be set to "''" (the empty - string). Otherwise, it should be set to the name of the module’s - package (which can be equal to "module.__name__" if the module - itself is a package). See **PEP 366** for further details. - - This attribute is used instead of "__name__" to calculate explicit - relative imports for main modules. It defaults to "None" for - modules created dynamically using the "types.ModuleType" - constructor; use "importlib.util.module_from_spec()" instead to - ensure the attribute is set to a "str". - - It is **strongly** recommended that you use - "module.__spec__.parent" instead of "module.__package__". - "__package__" is now only used as a fallback if "__spec__.parent" - is not set, and this fallback path is deprecated. - - Changed in version 3.4: This attribute now defaults to "None" for - modules created dynamically using the "types.ModuleType" - constructor. Previously the attribute was optional. - - Changed in version 3.6: The value of "__package__" is expected to - be the same as "__spec__.parent". "__package__" is now only used as - a fallback during import resolution if "__spec__.parent" is not - defined. - - Changed in version 3.10: "ImportWarning" is raised if an import - resolution falls back to "__package__" instead of - "__spec__.parent". - - Changed in version 3.12: Raise "DeprecationWarning" instead of - "ImportWarning" when falling back to "__package__" during import - resolution. - - Deprecated since version 3.13, will be removed in version 3.15: - "__package__" will cease to be set or taken into consideration by - the import system or standard library. - -module.__loader__ - - The *loader* object that the import machinery used to load the - module. - - This attribute is mostly useful for introspection, but can be used - for additional loader-specific functionality, for example getting - data associated with a loader. - - "__loader__" defaults to "None" for modules created dynamically - using the "types.ModuleType" constructor; use - "importlib.util.module_from_spec()" instead to ensure the attribute - is set to a *loader* object. - - It is **strongly** recommended that you use - "module.__spec__.loader" instead of "module.__loader__". - - Changed in version 3.4: This attribute now defaults to "None" for - modules created dynamically using the "types.ModuleType" - constructor. Previously the attribute was optional. - - Deprecated since version 3.12, will be removed in version 3.16: - Setting "__loader__" on a module while failing to set - "__spec__.loader" is deprecated. In Python 3.16, "__loader__" will - cease to be set or taken into consideration by the import system or - the standard library. - -module.__path__ - - A (possibly empty) *sequence* of strings enumerating the locations - where the package’s submodules will be found. Non-package modules - should not have a "__path__" attribute. See __path__ attributes on - modules for more details. - - It is **strongly** recommended that you use - "module.__spec__.submodule_search_locations" instead of - "module.__path__". - -module.__file__ - -module.__cached__ - - "__file__" and "__cached__" are both optional attributes that may - or may not be set. Both attributes should be a "str" when they are - available. - - "__file__" indicates the pathname of the file from which the module - was loaded (if loaded from a file), or the pathname of the shared - library file for extension modules loaded dynamically from a shared - library. It might be missing for certain types of modules, such as - C modules that are statically linked into the interpreter, and the - import system may opt to leave it unset if it has no semantic - meaning (for example, a module loaded from a database). - - If "__file__" is set then the "__cached__" attribute might also be - set, which is the path to any compiled version of the code (for - example, a byte-compiled file). The file does not need to exist to - set this attribute; the path can simply point to where the compiled - file *would* exist (see **PEP 3147**). - - Note that "__cached__" may be set even if "__file__" is not set. - However, that scenario is quite atypical. Ultimately, the *loader* - is what makes use of the module spec provided by the *finder* (from - which "__file__" and "__cached__" are derived). So if a loader can - load from a cached module but otherwise does not load from a file, - that atypical scenario may be appropriate. - - It is **strongly** recommended that you use - "module.__spec__.cached" instead of "module.__cached__". - - Deprecated since version 3.13, will be removed in version 3.15: - Setting "__cached__" on a module while failing to set - "__spec__.cached" is deprecated. In Python 3.15, "__cached__" will - cease to be set or taken into consideration by the import system or - standard library. - - -Other writable attributes on module objects -------------------------------------------- - -As well as the import-related attributes listed above, module objects -also have the following writable attributes: - -module.__doc__ - - The module’s documentation string, or "None" if unavailable. See - also: "__doc__ attributes". - -module.__annotations__ - - A dictionary containing *variable annotations* collected during - module body execution. For best practices on working with - "__annotations__", please see Annotations Best Practices. - - -Module dictionaries -------------------- - -Module objects also have the following special read-only attribute: - -module.__dict__ - - The module’s namespace as a dictionary object. Uniquely among the - attributes listed here, "__dict__" cannot be accessed as a global - variable from within a module; it can only be accessed as an - attribute on module objects. - - **CPython implementation detail:** Because of the way CPython - clears module dictionaries, the module dictionary will be cleared - when the module falls out of scope even if the dictionary still has - live references. To avoid this, copy the dictionary or keep the - module around while using its dictionary directly. - - -Custom classes -============== - -Custom class types are typically created by class definitions (see -section Class definitions). A class has a namespace implemented by a -dictionary object. Class attribute references are translated to -lookups in this dictionary, e.g., "C.x" is translated to -"C.__dict__["x"]" (although there are a number of hooks which allow -for other means of locating attributes). When the attribute name is -not found there, the attribute search continues in the base classes. -This search of the base classes uses the C3 method resolution order -which behaves correctly even in the presence of ‘diamond’ inheritance -structures where there are multiple inheritance paths leading back to -a common ancestor. Additional details on the C3 MRO used by Python can -be found at The Python 2.3 Method Resolution Order. - -When a class attribute reference (for class "C", say) would yield a -class method object, it is transformed into an instance method object -whose "__self__" attribute is "C". When it would yield a -"staticmethod" object, it is transformed into the object wrapped by -the static method object. See section Implementing Descriptors for -another way in which attributes retrieved from a class may differ from -those actually contained in its "__dict__". - -Class attribute assignments update the class’s dictionary, never the -dictionary of a base class. - -A class object can be called (see above) to yield a class instance -(see below). - - -Special attributes ------------------- - -+----------------------------------------------------+----------------------------------------------------+ -| Attribute | Meaning | -|====================================================|====================================================| -| type.__name__ | The class’s name. See also: "__name__ attributes". | -+----------------------------------------------------+----------------------------------------------------+ -| type.__qualname__ | The class’s *qualified name*. See also: | -| | "__qualname__ attributes". | -+----------------------------------------------------+----------------------------------------------------+ -| type.__module__ | The name of the module in which the class was | -| | defined. | -+----------------------------------------------------+----------------------------------------------------+ -| type.__dict__ | A "mapping proxy" providing a read-only view of | -| | the class’s namespace. See also: "__dict__ | -| | attributes". | -+----------------------------------------------------+----------------------------------------------------+ -| type.__bases__ | A "tuple" containing the class’s bases. In most | -| | cases, for a class defined as "class X(A, B, C)", | -| | "X.__bases__" will be exactly equal to "(A, B, | -| | C)". | -+----------------------------------------------------+----------------------------------------------------+ -| type.__base__ | **CPython implementation detail:** The single base | -| | class in the inheritance chain that is responsible | -| | for the memory layout of instances. This attribute | -| | corresponds to "tp_base" at the C level. | -+----------------------------------------------------+----------------------------------------------------+ -| type.__doc__ | The class’s documentation string, or "None" if | -| | undefined. Not inherited by subclasses. | -+----------------------------------------------------+----------------------------------------------------+ -| type.__annotations__ | A dictionary containing *variable annotations* | -| | collected during class body execution. For best | -| | practices on working with "__annotations__", | -| | please see Annotations Best Practices. Caution: | -| | Accessing the "__annotations__" attribute of a | -| | class object directly may yield incorrect results | -| | in the presence of metaclasses. In addition, the | -| | attribute may not exist for some classes. Use | -| | "inspect.get_annotations()" to retrieve class | -| | annotations safely. | -+----------------------------------------------------+----------------------------------------------------+ -| type.__type_params__ | A "tuple" containing the type parameters of a | -| | generic class. Added in version 3.12. | -+----------------------------------------------------+----------------------------------------------------+ -| type.__static_attributes__ | A "tuple" containing names of attributes of this | -| | class which are assigned through "self.X" from any | -| | function in its body. Added in version 3.13. | -+----------------------------------------------------+----------------------------------------------------+ -| type.__firstlineno__ | The line number of the first line of the class | -| | definition, including decorators. Setting the | -| | "__module__" attribute removes the | -| | "__firstlineno__" item from the type’s dictionary. | -| | Added in version 3.13. | -+----------------------------------------------------+----------------------------------------------------+ -| type.__mro__ | The "tuple" of classes that are considered when | -| | looking for base classes during method resolution. | -+----------------------------------------------------+----------------------------------------------------+ - - -Special methods ---------------- - -In addition to the special attributes described above, all Python -classes also have the following two methods available: - -type.mro() - - This method can be overridden by a metaclass to customize the - method resolution order for its instances. It is called at class - instantiation, and its result is stored in "__mro__". - -type.__subclasses__() - - Each class keeps a list of weak references to its immediate - subclasses. This method returns a list of all those references - still alive. The list is in definition order. Example: - - >>> class A: pass - >>> class B(A): pass - >>> A.__subclasses__() - [] - - -Class instances -=============== - -A class instance is created by calling a class object (see above). A -class instance has a namespace implemented as a dictionary which is -the first place in which attribute references are searched. When an -attribute is not found there, and the instance’s class has an -attribute by that name, the search continues with the class -attributes. If a class attribute is found that is a user-defined -function object, it is transformed into an instance method object -whose "__self__" attribute is the instance. Static method and class -method objects are also transformed; see above under “Classes”. See -section Implementing Descriptors for another way in which attributes -of a class retrieved via its instances may differ from the objects -actually stored in the class’s "__dict__". If no class attribute is -found, and the object’s class has a "__getattr__()" method, that is -called to satisfy the lookup. - -Attribute assignments and deletions update the instance’s dictionary, -never a class’s dictionary. If the class has a "__setattr__()" or -"__delattr__()" method, this is called instead of updating the -instance dictionary directly. - -Class instances can pretend to be numbers, sequences, or mappings if -they have methods with certain special names. See section Special -method names. - - -Special attributes ------------------- - -object.__class__ - - The class to which a class instance belongs. - -object.__dict__ - - A dictionary or other mapping object used to store an object’s - (writable) attributes. Not all instances have a "__dict__" - attribute; see the section on __slots__ for more details. - - -I/O objects (also known as file objects) -======================================== - -A *file object* represents an open file. Various shortcuts are -available to create file objects: the "open()" built-in function, and -also "os.popen()", "os.fdopen()", and the "makefile()" method of -socket objects (and perhaps by other functions or methods provided by -extension modules). - -File objects implement common methods, listed below, to simplify usage -in generic code. They are expected to be With Statement Context -Managers. - -The objects "sys.stdin", "sys.stdout" and "sys.stderr" are initialized -to file objects corresponding to the interpreter’s standard input, -output and error streams; they are all open in text mode and therefore -follow the interface defined by the "io.TextIOBase" abstract class. - -file.read(size=-1, /) - - Retrieve up to *size* data from the file. As a convenience if - *size* is unspecified or -1 retrieve all data available. - -file.write(data, /) - - Store *data* to the file. - -file.close() - - Flush any buffers and close the underlying file. - - -Internal types -============== - -A few types used internally by the interpreter are exposed to the -user. Their definitions may change with future versions of the -interpreter, but they are mentioned here for completeness. - - -Code objects ------------- - -Code objects represent *byte-compiled* executable Python code, or -*bytecode*. The difference between a code object and a function object -is that the function object contains an explicit reference to the -function’s globals (the module in which it was defined), while a code -object contains no context; also the default argument values are -stored in the function object, not in the code object (because they -represent values calculated at run-time). Unlike function objects, -code objects are immutable and contain no references (directly or -indirectly) to mutable objects. - - -Special read-only attributes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_name | The function name | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_qualname | The fully qualified function name Added in | -| | version 3.11. | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_argcount | The total number of positional *parameters* | -| | (including positional-only parameters and | -| | parameters with default values) that the function | -| | has | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_posonlyargcount | The number of positional-only *parameters* | -| | (including arguments with default values) that the | -| | function has | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_kwonlyargcount | The number of keyword-only *parameters* (including | -| | arguments with default values) that the function | -| | has | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_nlocals | The number of local variables used by the function | -| | (including parameters) | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_varnames | A "tuple" containing the names of the local | -| | variables in the function (starting with the | -| | parameter names) | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_cellvars | A "tuple" containing the names of local variables | -| | that are referenced from at least one *nested | -| | scope* inside the function | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_freevars | A "tuple" containing the names of *free (closure) | -| | variables* that a *nested scope* references in an | -| | outer scope. See also "function.__closure__". | -| | Note: references to global and builtin names are | -| | *not* included. | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_code | A string representing the sequence of *bytecode* | -| | instructions in the function | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_consts | A "tuple" containing the literals used by the | -| | *bytecode* in the function | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_names | A "tuple" containing the names used by the | -| | *bytecode* in the function | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_filename | The name of the file from which the code was | -| | compiled | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_firstlineno | The line number of the first line of the function | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_lnotab | A string encoding the mapping from *bytecode* | -| | offsets to line numbers. For details, see the | -| | source code of the interpreter. Deprecated since | -| | version 3.12: This attribute of code objects is | -| | deprecated, and may be removed in Python 3.15. | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_stacksize | The required stack size of the code object | -+----------------------------------------------------+----------------------------------------------------+ -| codeobject.co_flags | An "integer" encoding a number of flags for the | -| | interpreter. | -+----------------------------------------------------+----------------------------------------------------+ - -The following flag bits are defined for "co_flags": bit "0x04" is set -if the function uses the "*arguments" syntax to accept an arbitrary -number of positional arguments; bit "0x08" is set if the function uses -the "**keywords" syntax to accept arbitrary keyword arguments; bit -"0x20" is set if the function is a generator. See Code Objects Bit -Flags for details on the semantics of each flags that might be -present. - -Future feature declarations (for example, "from __future__ import -division") also use bits in "co_flags" to indicate whether a code -object was compiled with a particular feature enabled. See -"compiler_flag". - -Other bits in "co_flags" are reserved for internal use. - -If a code object represents a function, the first item in "co_consts" -is the documentation string of the function, or "None" if undefined. - - -Methods on code objects -~~~~~~~~~~~~~~~~~~~~~~~ - -codeobject.co_positions() - - Returns an iterable over the source code positions of each - *bytecode* instruction in the code object. - - The iterator returns "tuple"s containing the "(start_line, - end_line, start_column, end_column)". The *i-th* tuple corresponds - to the position of the source code that compiled to the *i-th* code - unit. Column information is 0-indexed utf-8 byte offsets on the - given source line. - - This positional information can be missing. A non-exhaustive lists - of cases where this may happen: - - * Running the interpreter with "-X" "no_debug_ranges". - - * Loading a pyc file compiled while using "-X" "no_debug_ranges". - - * Position tuples corresponding to artificial instructions. - - * Line and column numbers that can’t be represented due to - implementation specific limitations. - - When this occurs, some or all of the tuple elements can be "None". - - Added in version 3.11. - - Note: - - This feature requires storing column positions in code objects - which may result in a small increase of disk usage of compiled - Python files or interpreter memory usage. To avoid storing the - extra information and/or deactivate printing the extra traceback - information, the "-X" "no_debug_ranges" command line flag or the - "PYTHONNODEBUGRANGES" environment variable can be used. - -codeobject.co_lines() - - Returns an iterator that yields information about successive ranges - of *bytecode*s. Each item yielded is a "(start, end, lineno)" - "tuple": - - * "start" (an "int") represents the offset (inclusive) of the start - of the *bytecode* range - - * "end" (an "int") represents the offset (exclusive) of the end of - the *bytecode* range - - * "lineno" is an "int" representing the line number of the - *bytecode* range, or "None" if the bytecodes in the given range - have no line number - - The items yielded will have the following properties: - - * The first range yielded will have a "start" of 0. - - * The "(start, end)" ranges will be non-decreasing and consecutive. - That is, for any pair of "tuple"s, the "start" of the second will - be equal to the "end" of the first. - - * No range will be backwards: "end >= start" for all triples. - - * The last "tuple" yielded will have "end" equal to the size of the - *bytecode*. - - Zero-width ranges, where "start == end", are allowed. Zero-width - ranges are used for lines that are present in the source code, but - have been eliminated by the *bytecode* compiler. - - Added in version 3.10. - - See also: - - **PEP 626** - Precise line numbers for debugging and other tools. - The PEP that introduced the "co_lines()" method. - -codeobject.replace(**kwargs) - - Return a copy of the code object with new values for the specified - fields. - - Code objects are also supported by the generic function - "copy.replace()". - - Added in version 3.8. - - -Frame objects -------------- - -Frame objects represent execution frames. They may occur in traceback -objects, and are also passed to registered trace functions. - - -Special read-only attributes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_back | Points to the previous stack frame (towards the | -| | caller), or "None" if this is the bottom stack | -| | frame | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_code | The code object being executed in this frame. | -| | Accessing this attribute raises an auditing event | -| | "object.__getattr__" with arguments "obj" and | -| | ""f_code"". | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_locals | The mapping used by the frame to look up local | -| | variables. If the frame refers to an *optimized | -| | scope*, this may return a write-through proxy | -| | object. Changed in version 3.13: Return a proxy | -| | for optimized scopes. | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_globals | The dictionary used by the frame to look up global | -| | variables | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_builtins | The dictionary used by the frame to look up built- | -| | in (intrinsic) names | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_lasti | The “precise instruction” of the frame object | -| | (this is an index into the *bytecode* string of | -| | the code object) | -+----------------------------------------------------+----------------------------------------------------+ - - -Special writable attributes -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_trace | If not "None", this is a function called for | -| | various events during code execution (this is used | -| | by debuggers). Normally an event is triggered for | -| | each new source line (see "f_trace_lines"). | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_trace_lines | Set this attribute to "False" to disable | -| | triggering a tracing event for each source line. | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_trace_opcodes | Set this attribute to "True" to allow per-opcode | -| | events to be requested. Note that this may lead to | -| | undefined interpreter behaviour if exceptions | -| | raised by the trace function escape to the | -| | function being traced. | -+----------------------------------------------------+----------------------------------------------------+ -| frame.f_lineno | The current line number of the frame – writing to | -| | this from within a trace function jumps to the | -| | given line (only for the bottom-most frame). A | -| | debugger can implement a Jump command (aka Set | -| | Next Statement) by writing to this attribute. | -+----------------------------------------------------+----------------------------------------------------+ - - -Frame object methods -~~~~~~~~~~~~~~~~~~~~ - -Frame objects support one method: - -frame.clear() - - This method clears all references to local variables held by the - frame. Also, if the frame belonged to a *generator*, the generator - is finalized. This helps break reference cycles involving frame - objects (for example when catching an exception and storing its - traceback for later use). - - "RuntimeError" is raised if the frame is currently executing or - suspended. - - Added in version 3.4. - - Changed in version 3.13: Attempting to clear a suspended frame - raises "RuntimeError" (as has always been the case for executing - frames). - - -Traceback objects ------------------ - -Traceback objects represent the stack trace of an exception. A -traceback object is implicitly created when an exception occurs, and -may also be explicitly created by calling "types.TracebackType". - -Changed in version 3.7: Traceback objects can now be explicitly -instantiated from Python code. - -For implicitly created tracebacks, when the search for an exception -handler unwinds the execution stack, at each unwound level a traceback -object is inserted in front of the current traceback. When an -exception handler is entered, the stack trace is made available to the -program. (See section The try statement.) It is accessible as the -third item of the tuple returned by "sys.exc_info()", and as the -"__traceback__" attribute of the caught exception. - -When the program contains no suitable handler, the stack trace is -written (nicely formatted) to the standard error stream; if the -interpreter is interactive, it is also made available to the user as -"sys.last_traceback". - -For explicitly created tracebacks, it is up to the creator of the -traceback to determine how the "tb_next" attributes should be linked -to form a full stack trace. - -Special read-only attributes: - -+----------------------------------------------------+----------------------------------------------------+ -| traceback.tb_frame | Points to the execution frame of the current | -| | level. Accessing this attribute raises an | -| | auditing event "object.__getattr__" with arguments | -| | "obj" and ""tb_frame"". | -+----------------------------------------------------+----------------------------------------------------+ -| traceback.tb_lineno | Gives the line number where the exception occurred | -+----------------------------------------------------+----------------------------------------------------+ -| traceback.tb_lasti | Indicates the “precise instruction”. | -+----------------------------------------------------+----------------------------------------------------+ - -The line number and last instruction in the traceback may differ from -the line number of its frame object if the exception occurred in a -"try" statement with no matching except clause or with a "finally" -clause. - -traceback.tb_next - - The special writable attribute "tb_next" is the next level in the - stack trace (towards the frame where the exception occurred), or - "None" if there is no next level. - - Changed in version 3.7: This attribute is now writable - - -Slice objects -------------- - -Slice objects are used to represent slices for "__getitem__()" -methods. They are also created by the built-in "slice()" function. - -Special read-only attributes: "start" is the lower bound; "stop" is -the upper bound; "step" is the step value; each is "None" if omitted. -These attributes can have any type. - -Slice objects support one method: - -slice.indices(self, length) - - This method takes a single integer argument *length* and computes - information about the slice that the slice object would describe if - applied to a sequence of *length* items. It returns a tuple of - three integers; respectively these are the *start* and *stop* - indices and the *step* or stride length of the slice. Missing or - out-of-bounds indices are handled in a manner consistent with - regular slices. - - -Static method objects ---------------------- - -Static method objects provide a way of defeating the transformation of -function objects to method objects described above. A static method -object is a wrapper around any other object, usually a user-defined -method object. When a static method object is retrieved from a class -or a class instance, the object actually returned is the wrapped -object, which is not subject to any further transformation. Static -method objects are also callable. Static method objects are created by -the built-in "staticmethod()" constructor. - - -Class method objects --------------------- - -A class method object, like a static method object, is a wrapper -around another object that alters the way in which that object is -retrieved from classes and class instances. The behaviour of class -method objects upon such retrieval is described above, under “instance -methods”. Class method objects are created by the built-in -"classmethod()" constructor. -''', - 'typesfunctions': r'''Functions -********* - -Function objects are created by function definitions. The only -operation on a function object is to call it: "func(argument-list)". - -There are really two flavors of function objects: built-in functions -and user-defined functions. Both support the same operation (to call -the function), but the implementation is different, hence the -different object types. - -See Function definitions for more information. -''', - 'typesmapping': r'''Mapping Types — "dict" -********************** - -A *mapping* object maps *hashable* values to arbitrary objects. -Mappings are mutable objects. There is currently only one standard -mapping type, the *dictionary*. (For other containers see the built- -in "list", "set", and "tuple" classes, and the "collections" module.) - -A dictionary’s keys are *almost* arbitrary values. Values that are -not *hashable*, that is, values containing lists, dictionaries or -other mutable types (that are compared by value rather than by object -identity) may not be used as keys. Values that compare equal (such as -"1", "1.0", and "True") can be used interchangeably to index the same -dictionary entry. - -class dict(**kwargs) -class dict(mapping, /, **kwargs) -class dict(iterable, /, **kwargs) - - Return a new dictionary initialized from an optional positional - argument and a possibly empty set of keyword arguments. - - Dictionaries can be created by several means: - - * Use a comma-separated list of "key: value" pairs within braces: - "{'jack': 4098, 'sjoerd': 4127}" or "{4098: 'jack', 4127: - 'sjoerd'}" - - * Use a dict comprehension: "{}", "{x: x ** 2 for x in range(10)}" - - * Use the type constructor: "dict()", "dict([('foo', 100), ('bar', - 200)])", "dict(foo=100, bar=200)" - - If no positional argument is given, an empty dictionary is created. - If a positional argument is given and it defines a "keys()" method, - a dictionary is created by calling "__getitem__()" on the argument - with each returned key from the method. Otherwise, the positional - argument must be an *iterable* object. Each item in the iterable - must itself be an iterable with exactly two elements. The first - element of each item becomes a key in the new dictionary, and the - second element the corresponding value. If a key occurs more than - once, the last value for that key becomes the corresponding value - in the new dictionary. - - If keyword arguments are given, the keyword arguments and their - values are added to the dictionary created from the positional - argument. If a key being added is already present, the value from - the keyword argument replaces the value from the positional - argument. - - Dictionaries compare equal if and only if they have the same "(key, - value)" pairs (regardless of ordering). Order comparisons (‘<’, - ‘<=’, ‘>=’, ‘>’) raise "TypeError". To illustrate dictionary - creation and equality, the following examples all return a - dictionary equal to "{"one": 1, "two": 2, "three": 3}": - - >>> a = dict(one=1, two=2, three=3) - >>> b = {'one': 1, 'two': 2, 'three': 3} - >>> c = dict(zip(['one', 'two', 'three'], [1, 2, 3])) - >>> d = dict([('two', 2), ('one', 1), ('three', 3)]) - >>> e = dict({'three': 3, 'one': 1, 'two': 2}) - >>> f = dict({'one': 1, 'three': 3}, two=2) - >>> a == b == c == d == e == f - True - - Providing keyword arguments as in the first example only works for - keys that are valid Python identifiers. Otherwise, any valid keys - can be used. - - Dictionaries preserve insertion order. Note that updating a key - does not affect the order. Keys added after deletion are inserted - at the end. - - >>> d = {"one": 1, "two": 2, "three": 3, "four": 4} - >>> d - {'one': 1, 'two': 2, 'three': 3, 'four': 4} - >>> list(d) - ['one', 'two', 'three', 'four'] - >>> list(d.values()) - [1, 2, 3, 4] - >>> d["one"] = 42 - >>> d - {'one': 42, 'two': 2, 'three': 3, 'four': 4} - >>> del d["two"] - >>> d["two"] = None - >>> d - {'one': 42, 'three': 3, 'four': 4, 'two': None} - - Changed in version 3.7: Dictionary order is guaranteed to be - insertion order. This behavior was an implementation detail of - CPython from 3.6. - - These are the operations that dictionaries support (and therefore, - custom mapping types should support too): - - list(d) - - Return a list of all the keys used in the dictionary *d*. - - len(d) - - Return the number of items in the dictionary *d*. - - d[key] - - Return the item of *d* with key *key*. Raises a "KeyError" if - *key* is not in the map. - - If a subclass of dict defines a method "__missing__()" and *key* - is not present, the "d[key]" operation calls that method with - the key *key* as argument. The "d[key]" operation then returns - or raises whatever is returned or raised by the - "__missing__(key)" call. No other operations or methods invoke - "__missing__()". If "__missing__()" is not defined, "KeyError" - is raised. "__missing__()" must be a method; it cannot be an - instance variable: - - >>> class Counter(dict): - ... def __missing__(self, key): - ... return 0 - ... - >>> c = Counter() - >>> c['red'] - 0 - >>> c['red'] += 1 - >>> c['red'] - 1 - - The example above shows part of the implementation of - "collections.Counter". A different "__missing__()" method is - used by "collections.defaultdict". - - d[key] = value - - Set "d[key]" to *value*. - - del d[key] - - Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not - in the map. - - key in d - - Return "True" if *d* has a key *key*, else "False". - - key not in d - - Equivalent to "not key in d". - - iter(d) - - Return an iterator over the keys of the dictionary. This is a - shortcut for "iter(d.keys())". - - clear() - - Remove all items from the dictionary. - - copy() - - Return a shallow copy of the dictionary. - - classmethod fromkeys(iterable, value=None, /) - - Create a new dictionary with keys from *iterable* and values set - to *value*. - - "fromkeys()" is a class method that returns a new dictionary. - *value* defaults to "None". All of the values refer to just a - single instance, so it generally doesn’t make sense for *value* - to be a mutable object such as an empty list. To get distinct - values, use a dict comprehension instead. - - get(key, default=None, /) - - Return the value for *key* if *key* is in the dictionary, else - *default*. If *default* is not given, it defaults to "None", so - that this method never raises a "KeyError". - - items() - - Return a new view of the dictionary’s items ("(key, value)" - pairs). See the documentation of view objects. - - keys() - - Return a new view of the dictionary’s keys. See the - documentation of view objects. - - pop(key, /) - pop(key, default, /) - - If *key* is in the dictionary, remove it and return its value, - else return *default*. If *default* is not given and *key* is - not in the dictionary, a "KeyError" is raised. - - popitem() - - Remove and return a "(key, value)" pair from the dictionary. - Pairs are returned in LIFO (last-in, first-out) order. - - "popitem()" is useful to destructively iterate over a - dictionary, as often used in set algorithms. If the dictionary - is empty, calling "popitem()" raises a "KeyError". - - Changed in version 3.7: LIFO order is now guaranteed. In prior - versions, "popitem()" would return an arbitrary key/value pair. - - reversed(d) - - Return a reverse iterator over the keys of the dictionary. This - is a shortcut for "reversed(d.keys())". - - Added in version 3.8. - - setdefault(key, default=None, /) - - If *key* is in the dictionary, return its value. If not, insert - *key* with a value of *default* and return *default*. *default* - defaults to "None". - - update(**kwargs) - update(mapping, /, **kwargs) - update(iterable, /, **kwargs) - - Update the dictionary with the key/value pairs from *mapping* or - *iterable* and *kwargs*, overwriting existing keys. Return - "None". - - "update()" accepts either another object with a "keys()" method - (in which case "__getitem__()" is called with every key returned - from the method) or an iterable of key/value pairs (as tuples or - other iterables of length two). If keyword arguments are - specified, the dictionary is then updated with those key/value - pairs: "d.update(red=1, blue=2)". - - values() - - Return a new view of the dictionary’s values. See the - documentation of view objects. - - An equality comparison between one "dict.values()" view and - another will always return "False". This also applies when - comparing "dict.values()" to itself: - - >>> d = {'a': 1} - >>> d.values() == d.values() - False - - d | other - - Create a new dictionary with the merged keys and values of *d* - and *other*, which must both be dictionaries. The values of - *other* take priority when *d* and *other* share keys. - - Added in version 3.9. - - d |= other - - Update the dictionary *d* with keys and values from *other*, - which may be either a *mapping* or an *iterable* of key/value - pairs. The values of *other* take priority when *d* and *other* - share keys. - - Added in version 3.9. - - Dictionaries and dictionary views are reversible. - - >>> d = {"one": 1, "two": 2, "three": 3, "four": 4} - >>> d - {'one': 1, 'two': 2, 'three': 3, 'four': 4} - >>> list(reversed(d)) - ['four', 'three', 'two', 'one'] - >>> list(reversed(d.values())) - [4, 3, 2, 1] - >>> list(reversed(d.items())) - [('four', 4), ('three', 3), ('two', 2), ('one', 1)] - - Changed in version 3.8: Dictionaries are now reversible. - -See also: - - "types.MappingProxyType" can be used to create a read-only view of a - "dict". - - -Dictionary view objects -======================= - -The objects returned by "dict.keys()", "dict.values()" and -"dict.items()" are *view objects*. They provide a dynamic view on the -dictionary’s entries, which means that when the dictionary changes, -the view reflects these changes. - -Dictionary views can be iterated over to yield their respective data, -and support membership tests: - -len(dictview) - - Return the number of entries in the dictionary. - -iter(dictview) - - Return an iterator over the keys, values or items (represented as - tuples of "(key, value)") in the dictionary. - - Keys and values are iterated over in insertion order. This allows - the creation of "(value, key)" pairs using "zip()": "pairs = - zip(d.values(), d.keys())". Another way to create the same list is - "pairs = [(v, k) for (k, v) in d.items()]". - - Iterating views while adding or deleting entries in the dictionary - may raise a "RuntimeError" or fail to iterate over all entries. - - Changed in version 3.7: Dictionary order is guaranteed to be - insertion order. - -x in dictview - - Return "True" if *x* is in the underlying dictionary’s keys, values - or items (in the latter case, *x* should be a "(key, value)" - tuple). - -reversed(dictview) - - Return a reverse iterator over the keys, values or items of the - dictionary. The view will be iterated in reverse order of the - insertion. - - Changed in version 3.8: Dictionary views are now reversible. - -dictview.mapping - - Return a "types.MappingProxyType" that wraps the original - dictionary to which the view refers. - - Added in version 3.10. - -Keys views are set-like since their entries are unique and *hashable*. -Items views also have set-like operations since the (key, value) pairs -are unique and the keys are hashable. If all values in an items view -are hashable as well, then the items view can interoperate with other -sets. (Values views are not treated as set-like since the entries are -generally not unique.) For set-like views, all of the operations -defined for the abstract base class "collections.abc.Set" are -available (for example, "==", "<", or "^"). While using set -operators, set-like views accept any iterable as the other operand, -unlike sets which only accept sets as the input. - -An example of dictionary view usage: - - >>> dishes = {'eggs': 2, 'sausage': 1, 'bacon': 1, 'spam': 500} - >>> keys = dishes.keys() - >>> values = dishes.values() - - >>> # iteration - >>> n = 0 - >>> for val in values: - ... n += val - ... - >>> print(n) - 504 - - >>> # keys and values are iterated over in the same order (insertion order) - >>> list(keys) - ['eggs', 'sausage', 'bacon', 'spam'] - >>> list(values) - [2, 1, 1, 500] - - >>> # view objects are dynamic and reflect dict changes - >>> del dishes['eggs'] - >>> del dishes['sausage'] - >>> list(keys) - ['bacon', 'spam'] - - >>> # set operations - >>> keys & {'eggs', 'bacon', 'salad'} - {'bacon'} - >>> keys ^ {'sausage', 'juice'} == {'juice', 'sausage', 'bacon', 'spam'} - True - >>> keys | ['juice', 'juice', 'juice'] == {'bacon', 'spam', 'juice'} - True - - >>> # get back a read-only proxy for the original dictionary - >>> values.mapping - mappingproxy({'bacon': 1, 'spam': 500}) - >>> values.mapping['spam'] - 500 -''', - 'typesmethods': r'''Methods -******* - -Methods are functions that are called using the attribute notation. -There are two flavors: built-in methods (such as "append()" on lists) -and class instance method. Built-in methods are described with the -types that support them. - -If you access a method (a function defined in a class namespace) -through an instance, you get a special object: a *bound method* (also -called instance method) object. When called, it will add the "self" -argument to the argument list. Bound methods have two special read- -only attributes: "m.__self__" is the object on which the method -operates, and "m.__func__" is the function implementing the method. -Calling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to -calling "m.__func__(m.__self__, arg-1, arg-2, ..., arg-n)". - -Like function objects, bound method objects support getting arbitrary -attributes. However, since method attributes are actually stored on -the underlying function object ("method.__func__"), setting method -attributes on bound methods is disallowed. Attempting to set an -attribute on a method results in an "AttributeError" being raised. In -order to set a method attribute, you need to explicitly set it on the -underlying function object: - - >>> class C: - ... def method(self): - ... pass - ... - >>> c = C() - >>> c.method.whoami = 'my name is method' # can't set on the method - Traceback (most recent call last): - File "", line 1, in - AttributeError: 'method' object has no attribute 'whoami' - >>> c.method.__func__.whoami = 'my name is method' - >>> c.method.whoami - 'my name is method' - -See Instance methods for more information. -''', - 'typesmodules': r'''Modules -******* - -The only special operation on a module is attribute access: "m.name", -where *m* is a module and *name* accesses a name defined in *m*’s -symbol table. Module attributes can be assigned to. (Note that the -"import" statement is not, strictly speaking, an operation on a module -object; "import foo" does not require a module object named *foo* to -exist, rather it requires an (external) *definition* for a module -named *foo* somewhere.) - -A special attribute of every module is "__dict__". This is the -dictionary containing the module’s symbol table. Modifying this -dictionary will actually change the module’s symbol table, but direct -assignment to the "__dict__" attribute is not possible (you can write -"m.__dict__['a'] = 1", which defines "m.a" to be "1", but you can’t -write "m.__dict__ = {}"). Modifying "__dict__" directly is not -recommended. - -Modules built into the interpreter are written like this: "". If loaded from a file, they are written as -"". -''', - 'typesseq': r'''Sequence Types — "list", "tuple", "range" -***************************************** - -There are three basic sequence types: lists, tuples, and range -objects. Additional sequence types tailored for processing of binary -data and text strings are described in dedicated sections. - - -Common Sequence Operations -========================== - -The operations in the following table are supported by most sequence -types, both mutable and immutable. The "collections.abc.Sequence" ABC -is provided to make it easier to correctly implement these operations -on custom sequence types. - -This table lists the sequence operations sorted in ascending priority. -In the table, *s* and *t* are sequences of the same type, *n*, *i*, -*j* and *k* are integers and *x* is an arbitrary object that meets any -type and value restrictions imposed by *s*. - -The "in" and "not in" operations have the same priorities as the -comparison operations. The "+" (concatenation) and "*" (repetition) -operations have the same priority as the corresponding numeric -operations. [3] - -+----------------------------+----------------------------------+------------+ -| Operation | Result | Notes | -|============================|==================================|============| -| "x in s" | "True" if an item of *s* is | (1) | -| | equal to *x*, else "False" | | -+----------------------------+----------------------------------+------------+ -| "x not in s" | "False" if an item of *s* is | (1) | -| | equal to *x*, else "True" | | -+----------------------------+----------------------------------+------------+ -| "s + t" | the concatenation of *s* and *t* | (6)(7) | -+----------------------------+----------------------------------+------------+ -| "s * n" or "n * s" | equivalent to adding *s* to | (2)(7) | -| | itself *n* times | | -+----------------------------+----------------------------------+------------+ -| "s[i]" | *i*th item of *s*, origin 0 | (3)(8) | -+----------------------------+----------------------------------+------------+ -| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) | -+----------------------------+----------------------------------+------------+ -| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) | -| | with step *k* | | -+----------------------------+----------------------------------+------------+ -| "len(s)" | length of *s* | | -+----------------------------+----------------------------------+------------+ -| "min(s)" | smallest item of *s* | | -+----------------------------+----------------------------------+------------+ -| "max(s)" | largest item of *s* | | -+----------------------------+----------------------------------+------------+ - -Sequences of the same type also support comparisons. In particular, -tuples and lists are compared lexicographically by comparing -corresponding elements. This means that to compare equal, every -element must compare equal and the two sequences must be of the same -type and have the same length. (For full details see Comparisons in -the language reference.) - -Forward and reversed iterators over mutable sequences access values -using an index. That index will continue to march forward (or -backward) even if the underlying sequence is mutated. The iterator -terminates only when an "IndexError" or a "StopIteration" is -encountered (or when the index drops below zero). - -Notes: - -1. While the "in" and "not in" operations are used only for simple - containment testing in the general case, some specialised sequences - (such as "str", "bytes" and "bytearray") also use them for - subsequence testing: - - >>> "gg" in "eggs" - True - -2. Values of *n* less than "0" are treated as "0" (which yields an - empty sequence of the same type as *s*). Note that items in the - sequence *s* are not copied; they are referenced multiple times. - This often haunts new Python programmers; consider: - - >>> lists = [[]] * 3 - >>> lists - [[], [], []] - >>> lists[0].append(3) - >>> lists - [[3], [3], [3]] - - What has happened is that "[[]]" is a one-element list containing - an empty list, so all three elements of "[[]] * 3" are references - to this single empty list. Modifying any of the elements of - "lists" modifies this single list. You can create a list of - different lists this way: - - >>> lists = [[] for i in range(3)] - >>> lists[0].append(3) - >>> lists[1].append(5) - >>> lists[2].append(7) - >>> lists - [[3], [5], [7]] - - Further explanation is available in the FAQ entry How do I create a - multidimensional list?. - -3. If *i* or *j* is negative, the index is relative to the end of - sequence *s*: "len(s) + i" or "len(s) + j" is substituted. But - note that "-0" is still "0". - -4. The slice of *s* from *i* to *j* is defined as the sequence of - items with index *k* such that "i <= k < j". - - * If *i* is omitted or "None", use "0". - - * If *j* is omitted or "None", use "len(s)". - - * If *i* or *j* is less than "-len(s)", use "0". - - * If *i* or *j* is greater than "len(s)", use "len(s)". - - * If *i* is greater than or equal to *j*, the slice is empty. - -5. The slice of *s* from *i* to *j* with step *k* is defined as the - sequence of items with index "x = i + n*k" such that "0 <= n < - (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k", - "i+3*k" and so on, stopping when *j* is reached (but never - including *j*). When *k* is positive, *i* and *j* are reduced to - "len(s)" if they are greater. When *k* is negative, *i* and *j* are - reduced to "len(s) - 1" if they are greater. If *i* or *j* are - omitted or "None", they become “end” values (which end depends on - the sign of *k*). Note, *k* cannot be zero. If *k* is "None", it - is treated like "1". - -6. Concatenating immutable sequences always results in a new object. - This means that building up a sequence by repeated concatenation - will have a quadratic runtime cost in the total sequence length. - To get a linear runtime cost, you must switch to one of the - alternatives below: - - * if concatenating "str" objects, you can build a list and use - "str.join()" at the end or else write to an "io.StringIO" - instance and retrieve its value when complete - - * if concatenating "bytes" objects, you can similarly use - "bytes.join()" or "io.BytesIO", or you can do in-place - concatenation with a "bytearray" object. "bytearray" objects are - mutable and have an efficient overallocation mechanism - - * if concatenating "tuple" objects, extend a "list" instead - - * for other types, investigate the relevant class documentation - -7. Some sequence types (such as "range") only support item sequences - that follow specific patterns, and hence don’t support sequence - concatenation or repetition. - -8. An "IndexError" is raised if *i* is outside the sequence range. - --[ Sequence Methods ]- - -Sequence types also support the following methods: - -sequence.count(value, /) - - Return the total number of occurrences of *value* in *sequence*. - -sequence.index(value[, start[, stop]]) - - Return the index of the first occurrence of *value* in *sequence*. - - Raises "ValueError" if *value* is not found in *sequence*. - - The *start* or *stop* arguments allow for efficient searching of - subsections of the sequence, beginning at *start* and ending at - *stop*. This is roughly equivalent to "start + - sequence[start:stop].index(value)", only without copying any data. - - Caution: - - Not all sequence types support passing the *start* and *stop* - arguments. - - -Immutable Sequence Types -======================== - -The only operation that immutable sequence types generally implement -that is not also implemented by mutable sequence types is support for -the "hash()" built-in. - -This support allows immutable sequences, such as "tuple" instances, to -be used as "dict" keys and stored in "set" and "frozenset" instances. - -Attempting to hash an immutable sequence that contains unhashable -values will result in "TypeError". - - -Mutable Sequence Types -====================== - -The operations in the following table are defined on mutable sequence -types. The "collections.abc.MutableSequence" ABC is provided to make -it easier to correctly implement these operations on custom sequence -types. - -In the table *s* is an instance of a mutable sequence type, *t* is any -iterable object and *x* is an arbitrary object that meets any type and -value restrictions imposed by *s* (for example, "bytearray" only -accepts integers that meet the value restriction "0 <= x <= 255"). - -+--------------------------------+----------------------------------+-----------------------+ -| Operation | Result | Notes | -|================================|==================================|=======================| -| "s[i] = x" | item *i* of *s* is replaced by | | -| | *x* | | -+--------------------------------+----------------------------------+-----------------------+ -| "del s[i]" | removes item *i* of *s* | | -+--------------------------------+----------------------------------+-----------------------+ -| "s[i:j] = t" | slice of *s* from *i* to *j* is | | -| | replaced by the contents of the | | -| | iterable *t* | | -+--------------------------------+----------------------------------+-----------------------+ -| "del s[i:j]" | removes the elements of "s[i:j]" | | -| | from the list (same as "s[i:j] = | | -| | []") | | -+--------------------------------+----------------------------------+-----------------------+ -| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) | -| | replaced by those of *t* | | -+--------------------------------+----------------------------------+-----------------------+ -| "del s[i:j:k]" | removes the elements of | | -| | "s[i:j:k]" from the list | | -+--------------------------------+----------------------------------+-----------------------+ -| "s += t" | extends *s* with the contents of | | -| | *t* (for the most part the same | | -| | as "s[len(s):len(s)] = t") | | -+--------------------------------+----------------------------------+-----------------------+ -| "s *= n" | updates *s* with its contents | (2) | -| | repeated *n* times | | -+--------------------------------+----------------------------------+-----------------------+ - -Notes: - -1. If *k* is not equal to "1", *t* must have the same length as the - slice it is replacing. - -2. The value *n* is an integer, or an object implementing - "__index__()". Zero and negative values of *n* clear the sequence. - Items in the sequence are not copied; they are referenced multiple - times, as explained for "s * n" under Common Sequence Operations. - --[ Mutable Sequence Methods ]- - -Mutable sequence types also support the following methods: - -sequence.append(value, /) - - Append *value* to the end of the sequence. This is equivalent to - writing "seq[len(seq):len(seq)] = [value]". - -sequence.clear() - - Added in version 3.3. - - Remove all items from *sequence*. This is equivalent to writing - "del sequence[:]". - -sequence.copy() - - Added in version 3.3. - - Create a shallow copy of *sequence*. This is equivalent to writing - "sequence[:]". - - Hint: - - The "copy()" method is not part of the "MutableSequence" "ABC", - but most concrete mutable sequence types provide it. - -sequence.extend(iterable, /) - - Extend *sequence* with the contents of *iterable*. For the most - part, this is the same as writing "seq[len(seq):len(seq)] = - iterable". - -sequence.insert(index, value, /) - - Insert *value* into *sequence* at the given *index*. This is - equivalent to writing "sequence[index:index] = [value]". - -sequence.pop(index=-1, /) - - Retrieve the item at *index* and also removes it from *sequence*. - By default, the last item in *sequence* is removed and returned. - -sequence.remove(value, /) - - Remove the first item from *sequence* where "sequence[i] == value". - - Raises "ValueError" if *value* is not found in *sequence*. - -sequence.reverse() - - Reverse the items of *sequence* in place. This method maintains - economy of space when reversing a large sequence. To remind users - that it operates by side-effect, it returns "None". - - -Lists -===== - -Lists are mutable sequences, typically used to store collections of -homogeneous items (where the precise degree of similarity will vary by -application). - -class list(iterable=(), /) - - Lists may be constructed in several ways: - - * Using a pair of square brackets to denote the empty list: "[]" - - * Using square brackets, separating items with commas: "[a]", "[a, - b, c]" - - * Using a list comprehension: "[x for x in iterable]" - - * Using the type constructor: "list()" or "list(iterable)" - - The constructor builds a list whose items are the same and in the - same order as *iterable*’s items. *iterable* may be either a - sequence, a container that supports iteration, or an iterator - object. If *iterable* is already a list, a copy is made and - returned, similar to "iterable[:]". For example, "list('abc')" - returns "['a', 'b', 'c']" and "list( (1, 2, 3) )" returns "[1, 2, - 3]". If no argument is given, the constructor creates a new empty - list, "[]". - - Many other operations also produce lists, including the "sorted()" - built-in. - - Lists implement all of the common and mutable sequence operations. - Lists also provide the following additional method: - - sort(*, key=None, reverse=False) - - This method sorts the list in place, using only "<" comparisons - between items. Exceptions are not suppressed - if any comparison - operations fail, the entire sort operation will fail (and the - list will likely be left in a partially modified state). - - "sort()" accepts two arguments that can only be passed by - keyword (keyword-only arguments): - - *key* specifies a function of one argument that is used to - extract a comparison key from each list element (for example, - "key=str.lower"). The key corresponding to each item in the list - is calculated once and then used for the entire sorting process. - The default value of "None" means that list items are sorted - directly without calculating a separate key value. - - The "functools.cmp_to_key()" utility is available to convert a - 2.x style *cmp* function to a *key* function. - - *reverse* is a boolean value. If set to "True", then the list - elements are sorted as if each comparison were reversed. - - This method modifies the sequence in place for economy of space - when sorting a large sequence. To remind users that it operates - by side effect, it does not return the sorted sequence (use - "sorted()" to explicitly request a new sorted list instance). - - The "sort()" method is guaranteed to be stable. A sort is - stable if it guarantees not to change the relative order of - elements that compare equal — this is helpful for sorting in - multiple passes (for example, sort by department, then by salary - grade). - - For sorting examples and a brief sorting tutorial, see Sorting - Techniques. - - **CPython implementation detail:** While a list is being sorted, - the effect of attempting to mutate, or even inspect, the list is - undefined. The C implementation of Python makes the list appear - empty for the duration, and raises "ValueError" if it can detect - that the list has been mutated during a sort. - - -Tuples -====== - -Tuples are immutable sequences, typically used to store collections of -heterogeneous data (such as the 2-tuples produced by the "enumerate()" -built-in). Tuples are also used for cases where an immutable sequence -of homogeneous data is needed (such as allowing storage in a "set" or -"dict" instance). - -class tuple(iterable=(), /) - - Tuples may be constructed in a number of ways: - - * Using a pair of parentheses to denote the empty tuple: "()" - - * Using a trailing comma for a singleton tuple: "a," or "(a,)" - - * Separating items with commas: "a, b, c" or "(a, b, c)" - - * Using the "tuple()" built-in: "tuple()" or "tuple(iterable)" - - The constructor builds a tuple whose items are the same and in the - same order as *iterable*’s items. *iterable* may be either a - sequence, a container that supports iteration, or an iterator - object. If *iterable* is already a tuple, it is returned - unchanged. For example, "tuple('abc')" returns "('a', 'b', 'c')" - and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument is - given, the constructor creates a new empty tuple, "()". - - Note that it is actually the comma which makes a tuple, not the - parentheses. The parentheses are optional, except in the empty - tuple case, or when they are needed to avoid syntactic ambiguity. - For example, "f(a, b, c)" is a function call with three arguments, - while "f((a, b, c))" is a function call with a 3-tuple as the sole - argument. - - Tuples implement all of the common sequence operations. - -For heterogeneous collections of data where access by name is clearer -than access by index, "collections.namedtuple()" may be a more -appropriate choice than a simple tuple object. - - -Ranges -====== - -The "range" type represents an immutable sequence of numbers and is -commonly used for looping a specific number of times in "for" loops. - -class range(stop, /) -class range(start, stop, step=1, /) - - The arguments to the range constructor must be integers (either - built-in "int" or any object that implements the "__index__()" - special method). If the *step* argument is omitted, it defaults to - "1". If the *start* argument is omitted, it defaults to "0". If - *step* is zero, "ValueError" is raised. - - For a positive *step*, the contents of a range "r" are determined - by the formula "r[i] = start + step*i" where "i >= 0" and "r[i] < - stop". - - For a negative *step*, the contents of the range are still - determined by the formula "r[i] = start + step*i", but the - constraints are "i >= 0" and "r[i] > stop". - - A range object will be empty if "r[0]" does not meet the value - constraint. Ranges do support negative indices, but these are - interpreted as indexing from the end of the sequence determined by - the positive indices. - - Ranges containing absolute values larger than "sys.maxsize" are - permitted but some features (such as "len()") may raise - "OverflowError". - - Range examples: - - >>> list(range(10)) - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] - >>> list(range(1, 11)) - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - >>> list(range(0, 30, 5)) - [0, 5, 10, 15, 20, 25] - >>> list(range(0, 10, 3)) - [0, 3, 6, 9] - >>> list(range(0, -10, -1)) - [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] - >>> list(range(0)) - [] - >>> list(range(1, 0)) - [] - - Ranges implement all of the common sequence operations except - concatenation and repetition (due to the fact that range objects - can only represent sequences that follow a strict pattern and - repetition and concatenation will usually violate that pattern). - - start - - The value of the *start* parameter (or "0" if the parameter was - not supplied) - - stop - - The value of the *stop* parameter - - step - - The value of the *step* parameter (or "1" if the parameter was - not supplied) - -The advantage of the "range" type over a regular "list" or "tuple" is -that a "range" object will always take the same (small) amount of -memory, no matter the size of the range it represents (as it only -stores the "start", "stop" and "step" values, calculating individual -items and subranges as needed). - -Range objects implement the "collections.abc.Sequence" ABC, and -provide features such as containment tests, element index lookup, -slicing and support for negative indices (see Sequence Types — list, -tuple, range): - ->>> r = range(0, 20, 2) ->>> r -range(0, 20, 2) ->>> 11 in r -False ->>> 10 in r -True ->>> r.index(10) -5 ->>> r[5] -10 ->>> r[:5] -range(0, 10, 2) ->>> r[-1] -18 - -Testing range objects for equality with "==" and "!=" compares them as -sequences. That is, two range objects are considered equal if they -represent the same sequence of values. (Note that two range objects -that compare equal might have different "start", "stop" and "step" -attributes, for example "range(0) == range(2, 1, 3)" or "range(0, 3, -2) == range(0, 4, 2)".) - -Changed in version 3.2: Implement the Sequence ABC. Support slicing -and negative indices. Test "int" objects for membership in constant -time instead of iterating through all items. - -Changed in version 3.3: Define ‘==’ and ‘!=’ to compare range objects -based on the sequence of values they define (instead of comparing -based on object identity).Added the "start", "stop" and "step" -attributes. - -See also: - - * The linspace recipe shows how to implement a lazy version of range - suitable for floating-point applications. -''', - 'typesseq-mutable': r'''Mutable Sequence Types -********************** - -The operations in the following table are defined on mutable sequence -types. The "collections.abc.MutableSequence" ABC is provided to make -it easier to correctly implement these operations on custom sequence -types. - -In the table *s* is an instance of a mutable sequence type, *t* is any -iterable object and *x* is an arbitrary object that meets any type and -value restrictions imposed by *s* (for example, "bytearray" only -accepts integers that meet the value restriction "0 <= x <= 255"). - -+--------------------------------+----------------------------------+-----------------------+ -| Operation | Result | Notes | -|================================|==================================|=======================| -| "s[i] = x" | item *i* of *s* is replaced by | | -| | *x* | | -+--------------------------------+----------------------------------+-----------------------+ -| "del s[i]" | removes item *i* of *s* | | -+--------------------------------+----------------------------------+-----------------------+ -| "s[i:j] = t" | slice of *s* from *i* to *j* is | | -| | replaced by the contents of the | | -| | iterable *t* | | -+--------------------------------+----------------------------------+-----------------------+ -| "del s[i:j]" | removes the elements of "s[i:j]" | | -| | from the list (same as "s[i:j] = | | -| | []") | | -+--------------------------------+----------------------------------+-----------------------+ -| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) | -| | replaced by those of *t* | | -+--------------------------------+----------------------------------+-----------------------+ -| "del s[i:j:k]" | removes the elements of | | -| | "s[i:j:k]" from the list | | -+--------------------------------+----------------------------------+-----------------------+ -| "s += t" | extends *s* with the contents of | | -| | *t* (for the most part the same | | -| | as "s[len(s):len(s)] = t") | | -+--------------------------------+----------------------------------+-----------------------+ -| "s *= n" | updates *s* with its contents | (2) | -| | repeated *n* times | | -+--------------------------------+----------------------------------+-----------------------+ - -Notes: - -1. If *k* is not equal to "1", *t* must have the same length as the - slice it is replacing. - -2. The value *n* is an integer, or an object implementing - "__index__()". Zero and negative values of *n* clear the sequence. - Items in the sequence are not copied; they are referenced multiple - times, as explained for "s * n" under Common Sequence Operations. - --[ Mutable Sequence Methods ]- - -Mutable sequence types also support the following methods: - -sequence.append(value, /) - - Append *value* to the end of the sequence. This is equivalent to - writing "seq[len(seq):len(seq)] = [value]". - -sequence.clear() - - Added in version 3.3. - - Remove all items from *sequence*. This is equivalent to writing - "del sequence[:]". - -sequence.copy() - - Added in version 3.3. - - Create a shallow copy of *sequence*. This is equivalent to writing - "sequence[:]". - - Hint: - - The "copy()" method is not part of the "MutableSequence" "ABC", - but most concrete mutable sequence types provide it. - -sequence.extend(iterable, /) - - Extend *sequence* with the contents of *iterable*. For the most - part, this is the same as writing "seq[len(seq):len(seq)] = - iterable". - -sequence.insert(index, value, /) - - Insert *value* into *sequence* at the given *index*. This is - equivalent to writing "sequence[index:index] = [value]". - -sequence.pop(index=-1, /) - - Retrieve the item at *index* and also removes it from *sequence*. - By default, the last item in *sequence* is removed and returned. - -sequence.remove(value, /) - - Remove the first item from *sequence* where "sequence[i] == value". - - Raises "ValueError" if *value* is not found in *sequence*. - -sequence.reverse() - - Reverse the items of *sequence* in place. This method maintains - economy of space when reversing a large sequence. To remind users - that it operates by side-effect, it returns "None". -''', - 'unary': r'''Unary arithmetic and bitwise operations -*************************************** - -All unary arithmetic and bitwise operations have the same priority: - - u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr - -The unary "-" (minus) operator yields the negation of its numeric -argument; the operation can be overridden with the "__neg__()" special -method. - -The unary "+" (plus) operator yields its numeric argument unchanged; -the operation can be overridden with the "__pos__()" special method. - -The unary "~" (invert) operator yields the bitwise inversion of its -integer argument. The bitwise inversion of "x" is defined as -"-(x+1)". It only applies to integral numbers or to custom objects -that override the "__invert__()" special method. - -In all three cases, if the argument does not have the proper type, a -"TypeError" exception is raised. -''', - 'while': r'''The "while" statement -********************* - -The "while" statement is used for repeated execution as long as an -expression is true: - - while_stmt ::= "while" assignment_expression ":" suite - ["else" ":" suite] - -This repeatedly tests the expression and, if it is true, executes the -first suite; if the expression is false (which may be the first time -it is tested) the suite of the "else" clause, if present, is executed -and the loop terminates. - -A "break" statement executed in the first suite terminates the loop -without executing the "else" clause’s suite. A "continue" statement -executed in the first suite skips the rest of the suite and goes back -to testing the expression. -''', - 'with': r'''The "with" statement -******************** - -The "with" statement is used to wrap the execution of a block with -methods defined by a context manager (see section With Statement -Context Managers). This allows common "try"…"except"…"finally" usage -patterns to be encapsulated for convenient reuse. - - with_stmt ::= "with" ( "(" with_stmt_contents ","? ")" | with_stmt_contents ) ":" suite - with_stmt_contents ::= with_item ("," with_item)* - with_item ::= expression ["as" target] - -The execution of the "with" statement with one “item” proceeds as -follows: - -1. The context expression (the expression given in the "with_item") is - evaluated to obtain a context manager. - -2. The context manager’s "__enter__()" is loaded for later use. - -3. The context manager’s "__exit__()" is loaded for later use. - -4. The context manager’s "__enter__()" method is invoked. - -5. If a target was included in the "with" statement, the return value - from "__enter__()" is assigned to it. - - Note: - - The "with" statement guarantees that if the "__enter__()" method - returns without an error, then "__exit__()" will always be - called. Thus, if an error occurs during the assignment to the - target list, it will be treated the same as an error occurring - within the suite would be. See step 7 below. - -6. The suite is executed. - -7. The context manager’s "__exit__()" method is invoked. If an - exception caused the suite to be exited, its type, value, and - traceback are passed as arguments to "__exit__()". Otherwise, three - "None" arguments are supplied. - - If the suite was exited due to an exception, and the return value - from the "__exit__()" method was false, the exception is reraised. - If the return value was true, the exception is suppressed, and - execution continues with the statement following the "with" - statement. - - If the suite was exited for any reason other than an exception, the - return value from "__exit__()" is ignored, and execution proceeds - at the normal location for the kind of exit that was taken. - -The following code: - - with EXPRESSION as TARGET: - SUITE - -is semantically equivalent to: - - manager = (EXPRESSION) - enter = manager.__enter__ - exit = manager.__exit__ - value = enter() - hit_except = False - - try: - TARGET = value - SUITE - except: - hit_except = True - if not exit(*sys.exc_info()): - raise - finally: - if not hit_except: - exit(None, None, None) - -except that implicit special method lookup is used for "__enter__()" -and "__exit__()". - -With more than one item, the context managers are processed as if -multiple "with" statements were nested: - - with A() as a, B() as b: - SUITE - -is semantically equivalent to: - - with A() as a: - with B() as b: - SUITE - -You can also write multi-item context managers in multiple lines if -the items are surrounded by parentheses. For example: - - with ( - A() as a, - B() as b, - ): - SUITE - -Changed in version 3.1: Support for multiple context expressions. - -Changed in version 3.10: Support for using grouping parentheses to -break the statement in multiple lines. - -See also: - - **PEP 343** - The “with” statement - The specification, background, and examples for the Python "with" - statement. -''', - 'yield': r'''The "yield" statement -********************* - - yield_stmt ::= yield_expression - -A "yield" statement is semantically equivalent to a yield expression. -The "yield" statement can be used to omit the parentheses that would -otherwise be required in the equivalent yield expression statement. -For example, the yield statements - - yield - yield from - -are equivalent to the yield expression statements - - (yield ) - (yield from ) - -Yield expressions and statements are only used when defining a -*generator* function, and are only used in the body of the generator -function. Using "yield" in a function definition is sufficient to -cause that definition to create a generator function instead of a -normal function. - -For full details of "yield" semantics, refer to the Yield expressions -section. -''', -} diff --git a/Python313_13_x86_Template/Lib/quopri.py b/Python313_13_x86_Template/Lib/quopri.py deleted file mode 100644 index f36cf7b3..00000000 --- a/Python313_13_x86_Template/Lib/quopri.py +++ /dev/null @@ -1,237 +0,0 @@ -#! /usr/bin/env python3 - -"""Conversions to/from quoted-printable transport encoding as per RFC 1521.""" - -# (Dec 1991 version). - -__all__ = ["encode", "decode", "encodestring", "decodestring"] - -ESCAPE = b'=' -MAXLINESIZE = 76 -HEX = b'0123456789ABCDEF' -EMPTYSTRING = b'' - -try: - from binascii import a2b_qp, b2a_qp -except ImportError: - a2b_qp = None - b2a_qp = None - - -def needsquoting(c, quotetabs, header): - """Decide whether a particular byte ordinal needs to be quoted. - - The 'quotetabs' flag indicates whether embedded tabs and spaces should be - quoted. Note that line-ending tabs and spaces are always encoded, as per - RFC 1521. - """ - assert isinstance(c, bytes) - if c in b' \t': - return quotetabs - # if header, we have to escape _ because _ is used to escape space - if c == b'_': - return header - return c == ESCAPE or not (b' ' <= c <= b'~') - -def quote(c): - """Quote a single character.""" - assert isinstance(c, bytes) and len(c)==1 - c = ord(c) - return ESCAPE + bytes((HEX[c//16], HEX[c%16])) - - - -def encode(input, output, quotetabs, header=False): - """Read 'input', apply quoted-printable encoding, and write to 'output'. - - 'input' and 'output' are binary file objects. The 'quotetabs' flag - indicates whether embedded tabs and spaces should be quoted. Note that - line-ending tabs and spaces are always encoded, as per RFC 1521. - The 'header' flag indicates whether we are encoding spaces as _ as per RFC - 1522.""" - - if b2a_qp is not None: - data = input.read() - odata = b2a_qp(data, quotetabs=quotetabs, header=header) - output.write(odata) - return - - def write(s, output=output, lineEnd=b'\n'): - # RFC 1521 requires that the line ending in a space or tab must have - # that trailing character encoded. - if s and s[-1:] in b' \t': - output.write(s[:-1] + quote(s[-1:]) + lineEnd) - elif s == b'.': - output.write(quote(s) + lineEnd) - else: - output.write(s + lineEnd) - - prevline = None - while line := input.readline(): - outline = [] - # Strip off any readline induced trailing newline - stripped = b'' - if line[-1:] == b'\n': - line = line[:-1] - stripped = b'\n' - # Calculate the un-length-limited encoded line - for c in line: - c = bytes((c,)) - if needsquoting(c, quotetabs, header): - c = quote(c) - if header and c == b' ': - outline.append(b'_') - else: - outline.append(c) - # First, write out the previous line - if prevline is not None: - write(prevline) - # Now see if we need any soft line breaks because of RFC-imposed - # length limitations. Then do the thisline->prevline dance. - thisline = EMPTYSTRING.join(outline) - while len(thisline) > MAXLINESIZE: - # Don't forget to include the soft line break `=' sign in the - # length calculation! - write(thisline[:MAXLINESIZE-1], lineEnd=b'=\n') - thisline = thisline[MAXLINESIZE-1:] - # Write out the current line - prevline = thisline - # Write out the last line, without a trailing newline - if prevline is not None: - write(prevline, lineEnd=stripped) - -def encodestring(s, quotetabs=False, header=False): - if b2a_qp is not None: - return b2a_qp(s, quotetabs=quotetabs, header=header) - from io import BytesIO - infp = BytesIO(s) - outfp = BytesIO() - encode(infp, outfp, quotetabs, header) - return outfp.getvalue() - - - -def decode(input, output, header=False): - """Read 'input', apply quoted-printable decoding, and write to 'output'. - 'input' and 'output' are binary file objects. - If 'header' is true, decode underscore as space (per RFC 1522).""" - - if a2b_qp is not None: - data = input.read() - odata = a2b_qp(data, header=header) - output.write(odata) - return - - new = b'' - while line := input.readline(): - i, n = 0, len(line) - if n > 0 and line[n-1:n] == b'\n': - partial = 0; n = n-1 - # Strip trailing whitespace - while n > 0 and line[n-1:n] in b" \t\r": - n = n-1 - else: - partial = 1 - while i < n: - c = line[i:i+1] - if c == b'_' and header: - new = new + b' '; i = i+1 - elif c != ESCAPE: - new = new + c; i = i+1 - elif i+1 == n and not partial: - partial = 1; break - elif i+1 < n and line[i+1:i+2] == ESCAPE: - new = new + ESCAPE; i = i+2 - elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]): - new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3 - else: # Bad escape sequence -- leave it in - new = new + c; i = i+1 - if not partial: - output.write(new + b'\n') - new = b'' - if new: - output.write(new) - -def decodestring(s, header=False): - if a2b_qp is not None: - return a2b_qp(s, header=header) - from io import BytesIO - infp = BytesIO(s) - outfp = BytesIO() - decode(infp, outfp, header=header) - return outfp.getvalue() - - - -# Other helper functions -def ishex(c): - """Return true if the byte ordinal 'c' is a hexadecimal digit in ASCII.""" - assert isinstance(c, bytes) - return b'0' <= c <= b'9' or b'a' <= c <= b'f' or b'A' <= c <= b'F' - -def unhex(s): - """Get the integer value of a hexadecimal number.""" - bits = 0 - for c in s: - c = bytes((c,)) - if b'0' <= c <= b'9': - i = ord('0') - elif b'a' <= c <= b'f': - i = ord('a')-10 - elif b'A' <= c <= b'F': - i = ord(b'A')-10 - else: - assert False, "non-hex digit "+repr(c) - bits = bits*16 + (ord(c) - i) - return bits - - - -def main(): - import sys - import getopt - try: - opts, args = getopt.getopt(sys.argv[1:], 'td') - except getopt.error as msg: - sys.stdout = sys.stderr - print(msg) - print("usage: quopri [-t | -d] [file] ...") - print("-t: quote tabs") - print("-d: decode; default encode") - sys.exit(2) - deco = False - tabs = False - for o, a in opts: - if o == '-t': tabs = True - if o == '-d': deco = True - if tabs and deco: - sys.stdout = sys.stderr - print("-t and -d are mutually exclusive") - sys.exit(2) - if not args: args = ['-'] - sts = 0 - for file in args: - if file == '-': - fp = sys.stdin.buffer - else: - try: - fp = open(file, "rb") - except OSError as msg: - sys.stderr.write("%s: can't open (%s)\n" % (file, msg)) - sts = 1 - continue - try: - if deco: - decode(fp, sys.stdout.buffer) - else: - encode(fp, sys.stdout.buffer, tabs) - finally: - if file != '-': - fp.close() - if sts: - sys.exit(sts) - - - -if __name__ == '__main__': - main() diff --git a/Python313_13_x86_Template/Lib/random.py b/Python313_13_x86_Template/Lib/random.py deleted file mode 100644 index 1abcae77..00000000 --- a/Python313_13_x86_Template/Lib/random.py +++ /dev/null @@ -1,1070 +0,0 @@ -"""Random variable generators. - - bytes - ----- - uniform bytes (values between 0 and 255) - - integers - -------- - uniform within range - - sequences - --------- - pick random element - pick random sample - pick weighted random sample - generate random permutation - - distributions on the real line: - ------------------------------ - uniform - triangular - normal (Gaussian) - lognormal - negative exponential - gamma - beta - pareto - Weibull - - distributions on the circle (angles 0 to 2pi) - --------------------------------------------- - circular uniform - von Mises - - discrete distributions - ---------------------- - binomial - - -General notes on the underlying Mersenne Twister core generator: - -* The period is 2**19937-1. -* It is one of the most extensively tested generators in existence. -* The random() method is implemented in C, executes in a single Python step, - and is, therefore, threadsafe. - -""" - -# Translated by Guido van Rossum from C source provided by -# Adrian Baddeley. Adapted by Raymond Hettinger for use with -# the Mersenne Twister and os.urandom() core generators. - -from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil -from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin -from math import tau as TWOPI, floor as _floor, isfinite as _isfinite -from math import lgamma as _lgamma, fabs as _fabs, log2 as _log2 -from os import urandom as _urandom -from _collections_abc import Sequence as _Sequence -from operator import index as _index -from itertools import accumulate as _accumulate, repeat as _repeat -from bisect import bisect as _bisect -import os as _os -import _random - -__all__ = [ - "Random", - "SystemRandom", - "betavariate", - "binomialvariate", - "choice", - "choices", - "expovariate", - "gammavariate", - "gauss", - "getrandbits", - "getstate", - "lognormvariate", - "normalvariate", - "paretovariate", - "randbytes", - "randint", - "random", - "randrange", - "sample", - "seed", - "setstate", - "shuffle", - "triangular", - "uniform", - "vonmisesvariate", - "weibullvariate", -] - -NV_MAGICCONST = 4 * _exp(-0.5) / _sqrt(2.0) -LOG4 = _log(4.0) -SG_MAGICCONST = 1.0 + _log(4.5) -BPF = 53 # Number of bits in a float -RECIP_BPF = 2 ** -BPF -_ONE = 1 -_sha512 = None - - -class Random(_random.Random): - """Random number generator base class used by bound module functions. - - Used to instantiate instances of Random to get generators that don't - share state. - - Class Random can also be subclassed if you want to use a different basic - generator of your own devising: in that case, override the following - methods: random(), seed(), getstate(), and setstate(). - Optionally, implement a getrandbits() method so that randrange() - can cover arbitrarily large ranges. - - """ - - VERSION = 3 # used by getstate/setstate - - def __init__(self, x=None): - """Initialize an instance. - - Optional argument x controls seeding, as for Random.seed(). - """ - - self.seed(x) - self.gauss_next = None - - def seed(self, a=None, version=2): - """Initialize internal state from a seed. - - The only supported seed types are None, int, float, - str, bytes, and bytearray. - - None or no argument seeds from current time or from an operating - system specific randomness source if available. - - If *a* is an int, all bits are used. - - For version 2 (the default), all of the bits are used if *a* is a str, - bytes, or bytearray. For version 1 (provided for reproducing random - sequences from older versions of Python), the algorithm for str and - bytes generates a narrower range of seeds. - - """ - - if version == 1 and isinstance(a, (str, bytes)): - a = a.decode('latin-1') if isinstance(a, bytes) else a - x = ord(a[0]) << 7 if a else 0 - for c in map(ord, a): - x = ((1000003 * x) ^ c) & 0xFFFFFFFFFFFFFFFF - x ^= len(a) - a = -2 if x == -1 else x - - elif version == 2 and isinstance(a, (str, bytes, bytearray)): - global _sha512 - if _sha512 is None: - try: - # hashlib is pretty heavy to load, try lean internal - # module first - from _sha2 import sha512 as _sha512 - except ImportError: - # fallback to official implementation - from hashlib import sha512 as _sha512 - - if isinstance(a, str): - a = a.encode() - a = int.from_bytes(a + _sha512(a).digest()) - - elif not isinstance(a, (type(None), int, float, str, bytes, bytearray)): - raise TypeError('The only supported seed types are:\n' - 'None, int, float, str, bytes, and bytearray.') - - super().seed(a) - self.gauss_next = None - - def getstate(self): - """Return internal state; can be passed to setstate() later.""" - return self.VERSION, super().getstate(), self.gauss_next - - def setstate(self, state): - """Restore internal state from object returned by getstate().""" - version = state[0] - if version == 3: - version, internalstate, self.gauss_next = state - super().setstate(internalstate) - elif version == 2: - version, internalstate, self.gauss_next = state - # In version 2, the state was saved as signed ints, which causes - # inconsistencies between 32/64-bit systems. The state is - # really unsigned 32-bit ints, so we convert negative ints from - # version 2 to positive longs for version 3. - try: - internalstate = tuple(x % (2 ** 32) for x in internalstate) - except ValueError as e: - raise TypeError from e - super().setstate(internalstate) - else: - raise ValueError("state with version %s passed to " - "Random.setstate() of version %s" % - (version, self.VERSION)) - - - ## ------------------------------------------------------- - ## ---- Methods below this point do not need to be overridden or extended - ## ---- when subclassing for the purpose of using a different core generator. - - - ## -------------------- pickle support ------------------- - - # Issue 17489: Since __reduce__ was defined to fix #759889 this is no - # longer called; we leave it here because it has been here since random was - # rewritten back in 2001 and why risk breaking something. - def __getstate__(self): # for pickle - return self.getstate() - - def __setstate__(self, state): # for pickle - self.setstate(state) - - def __reduce__(self): - return self.__class__, (), self.getstate() - - - ## ---- internal support method for evenly distributed integers ---- - - def __init_subclass__(cls, /, **kwargs): - """Control how subclasses generate random integers. - - The algorithm a subclass can use depends on the random() and/or - getrandbits() implementation available to it and determines - whether it can generate random integers from arbitrarily large - ranges. - """ - - for c in cls.__mro__: - if '_randbelow' in c.__dict__: - # just inherit it - break - if 'getrandbits' in c.__dict__: - cls._randbelow = cls._randbelow_with_getrandbits - break - if 'random' in c.__dict__: - cls._randbelow = cls._randbelow_without_getrandbits - break - - def _randbelow_with_getrandbits(self, n): - "Return a random int in the range [0,n). Defined for n > 0." - - getrandbits = self.getrandbits - k = n.bit_length() - r = getrandbits(k) # 0 <= r < 2**k - while r >= n: - r = getrandbits(k) - return r - - def _randbelow_without_getrandbits(self, n, maxsize=1< 0. - - The implementation does not use getrandbits, but only random. - """ - - random = self.random - if n >= maxsize: - from warnings import warn - warn("Underlying random() generator does not supply \n" - "enough bits to choose from a population range this large.\n" - "To remove the range limitation, add a getrandbits() method.") - return _floor(random() * n) - rem = maxsize % n - limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0 - r = random() - while r >= limit: - r = random() - return _floor(r * maxsize) % n - - _randbelow = _randbelow_with_getrandbits - - - ## -------------------------------------------------------- - ## ---- Methods below this point generate custom distributions - ## ---- based on the methods defined above. They do not - ## ---- directly touch the underlying generator and only - ## ---- access randomness through the methods: random(), - ## ---- getrandbits(), or _randbelow(). - - - ## -------------------- bytes methods --------------------- - - def randbytes(self, n): - """Generate n random bytes.""" - return self.getrandbits(n * 8).to_bytes(n, 'little') - - - ## -------------------- integer methods ------------------- - - def randrange(self, start, stop=None, step=_ONE): - """Choose a random item from range(stop) or range(start, stop[, step]). - - Roughly equivalent to ``choice(range(start, stop, step))`` but - supports arbitrarily large ranges and is optimized for common cases. - - """ - - # This code is a bit messy to make it fast for the - # common case while still doing adequate error checking. - istart = _index(start) - if stop is None: - # We don't check for "step != 1" because it hasn't been - # type checked and converted to an integer yet. - if step is not _ONE: - raise TypeError("Missing a non-None stop argument") - if istart > 0: - return self._randbelow(istart) - raise ValueError("empty range for randrange()") - - # Stop argument supplied. - istop = _index(stop) - width = istop - istart - istep = _index(step) - # Fast path. - if istep == 1: - if width > 0: - return istart + self._randbelow(width) - raise ValueError(f"empty range in randrange({start}, {stop})") - - # Non-unit step argument supplied. - if istep > 0: - n = (width + istep - 1) // istep - elif istep < 0: - n = (width + istep + 1) // istep - else: - raise ValueError("zero step for randrange()") - if n <= 0: - raise ValueError(f"empty range in randrange({start}, {stop}, {step})") - return istart + istep * self._randbelow(n) - - def randint(self, a, b): - """Return random integer in range [a, b], including both end points. - """ - - return self.randrange(a, b+1) - - - ## -------------------- sequence methods ------------------- - - def choice(self, seq): - """Choose a random element from a non-empty sequence.""" - - # As an accommodation for NumPy, we don't use "if not seq" - # because bool(numpy.array()) raises a ValueError. - if not len(seq): - raise IndexError('Cannot choose from an empty sequence') - return seq[self._randbelow(len(seq))] - - def shuffle(self, x): - """Shuffle list x in place, and return None.""" - - randbelow = self._randbelow - for i in reversed(range(1, len(x))): - # pick an element in x[:i+1] with which to exchange x[i] - j = randbelow(i + 1) - x[i], x[j] = x[j], x[i] - - def sample(self, population, k, *, counts=None): - """Chooses k unique random elements from a population sequence. - - Returns a new list containing elements from the population while - leaving the original population unchanged. The resulting list is - in selection order so that all sub-slices will also be valid random - samples. This allows raffle winners (the sample) to be partitioned - into grand prize and second place winners (the subslices). - - Members of the population need not be hashable or unique. If the - population contains repeats, then each occurrence is a possible - selection in the sample. - - Repeated elements can be specified one at a time or with the optional - counts parameter. For example: - - sample(['red', 'blue'], counts=[4, 2], k=5) - - is equivalent to: - - sample(['red', 'red', 'red', 'red', 'blue', 'blue'], k=5) - - To choose a sample from a range of integers, use range() for the - population argument. This is especially fast and space efficient - for sampling from a large population: - - sample(range(10000000), 60) - - """ - - # Sampling without replacement entails tracking either potential - # selections (the pool) in a list or previous selections in a set. - - # When the number of selections is small compared to the - # population, then tracking selections is efficient, requiring - # only a small set and an occasional reselection. For - # a larger number of selections, the pool tracking method is - # preferred since the list takes less space than the - # set and it doesn't suffer from frequent reselections. - - # The number of calls to _randbelow() is kept at or near k, the - # theoretical minimum. This is important because running time - # is dominated by _randbelow() and because it extracts the - # least entropy from the underlying random number generators. - - # Memory requirements are kept to the smaller of a k-length - # set or an n-length list. - - # There are other sampling algorithms that do not require - # auxiliary memory, but they were rejected because they made - # too many calls to _randbelow(), making them slower and - # causing them to eat more entropy than necessary. - - if not isinstance(population, _Sequence): - raise TypeError("Population must be a sequence. " - "For dicts or sets, use sorted(d).") - n = len(population) - if counts is not None: - cum_counts = list(_accumulate(counts)) - if len(cum_counts) != n: - raise ValueError('The number of counts does not match the population') - total = cum_counts.pop() if cum_counts else 0 - if not isinstance(total, int): - raise TypeError('Counts must be integers') - if total < 0: - raise ValueError('Counts must be non-negative') - selections = self.sample(range(total), k=k) - bisect = _bisect - return [population[bisect(cum_counts, s)] for s in selections] - randbelow = self._randbelow - if not 0 <= k <= n: - raise ValueError("Sample larger than population or is negative") - result = [None] * k - setsize = 21 # size of a small set minus size of an empty list - if k > 5: - setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets - if n <= setsize: - # An n-length list is smaller than a k-length set. - # Invariant: non-selected at pool[0 : n-i] - pool = list(population) - for i in range(k): - j = randbelow(n - i) - result[i] = pool[j] - pool[j] = pool[n - i - 1] # move non-selected item into vacancy - else: - selected = set() - selected_add = selected.add - for i in range(k): - j = randbelow(n) - while j in selected: - j = randbelow(n) - selected_add(j) - result[i] = population[j] - return result - - def choices(self, population, weights=None, *, cum_weights=None, k=1): - """Return a k sized list of population elements chosen with replacement. - - If the relative weights or cumulative weights are not specified, - the selections are made with equal probability. - - """ - random = self.random - n = len(population) - if cum_weights is None: - if weights is None: - floor = _floor - n += 0.0 # convert to float for a small speed improvement - return [population[floor(random() * n)] for i in _repeat(None, k)] - try: - cum_weights = list(_accumulate(weights)) - except TypeError: - if not isinstance(weights, int): - raise - k = weights - raise TypeError( - f'The number of choices must be a keyword argument: {k=}' - ) from None - elif weights is not None: - raise TypeError('Cannot specify both weights and cumulative weights') - if len(cum_weights) != n: - raise ValueError('The number of weights does not match the population') - total = cum_weights[-1] + 0.0 # convert to float - if total <= 0.0: - raise ValueError('Total of weights must be greater than zero') - if not _isfinite(total): - raise ValueError('Total of weights must be finite') - bisect = _bisect - hi = n - 1 - return [population[bisect(cum_weights, random() * total, 0, hi)] - for i in _repeat(None, k)] - - - ## -------------------- real-valued distributions ------------------- - - def uniform(self, a, b): - """Get a random number in the range [a, b) or [a, b] depending on rounding. - - The mean (expected value) and variance of the random variable are: - - E[X] = (a + b) / 2 - Var[X] = (b - a) ** 2 / 12 - - """ - return a + (b - a) * self.random() - - def triangular(self, low=0.0, high=1.0, mode=None): - """Triangular distribution. - - Continuous distribution bounded by given lower and upper limits, - and having a given mode value in-between. - - http://en.wikipedia.org/wiki/Triangular_distribution - - The mean (expected value) and variance of the random variable are: - - E[X] = (low + high + mode) / 3 - Var[X] = (low**2 + high**2 + mode**2 - low*high - low*mode - high*mode) / 18 - - """ - u = self.random() - try: - c = 0.5 if mode is None else (mode - low) / (high - low) - except ZeroDivisionError: - return low - if u > c: - u = 1.0 - u - c = 1.0 - c - low, high = high, low - return low + (high - low) * _sqrt(u * c) - - def normalvariate(self, mu=0.0, sigma=1.0): - """Normal distribution. - - mu is the mean, and sigma is the standard deviation. - - """ - # Uses Kinderman and Monahan method. Reference: Kinderman, - # A.J. and Monahan, J.F., "Computer generation of random - # variables using the ratio of uniform deviates", ACM Trans - # Math Software, 3, (1977), pp257-260. - - random = self.random - while True: - u1 = random() - u2 = 1.0 - random() - z = NV_MAGICCONST * (u1 - 0.5) / u2 - zz = z * z / 4.0 - if zz <= -_log(u2): - break - return mu + z * sigma - - def gauss(self, mu=0.0, sigma=1.0): - """Gaussian distribution. - - mu is the mean, and sigma is the standard deviation. This is - slightly faster than the normalvariate() function. - - Not thread-safe without a lock around calls. - - """ - # When x and y are two variables from [0, 1), uniformly - # distributed, then - # - # cos(2*pi*x)*sqrt(-2*log(1-y)) - # sin(2*pi*x)*sqrt(-2*log(1-y)) - # - # are two *independent* variables with normal distribution - # (mu = 0, sigma = 1). - # (Lambert Meertens) - # (corrected version; bug discovered by Mike Miller, fixed by LM) - - # Multithreading note: When two threads call this function - # simultaneously, it is possible that they will receive the - # same return value. The window is very small though. To - # avoid this, you have to use a lock around all calls. (I - # didn't want to slow this down in the serial case by using a - # lock here.) - - random = self.random - z = self.gauss_next - self.gauss_next = None - if z is None: - x2pi = random() * TWOPI - g2rad = _sqrt(-2.0 * _log(1.0 - random())) - z = _cos(x2pi) * g2rad - self.gauss_next = _sin(x2pi) * g2rad - - return mu + z * sigma - - def lognormvariate(self, mu, sigma): - """Log normal distribution. - - If you take the natural logarithm of this distribution, you'll get a - normal distribution with mean mu and standard deviation sigma. - mu can have any value, and sigma must be greater than zero. - - """ - return _exp(self.normalvariate(mu, sigma)) - - def expovariate(self, lambd=1.0): - """Exponential distribution. - - lambd is 1.0 divided by the desired mean. It should be - nonzero. (The parameter would be called "lambda", but that is - a reserved word in Python.) Returned values range from 0 to - positive infinity if lambd is positive, and from negative - infinity to 0 if lambd is negative. - - The mean (expected value) and variance of the random variable are: - - E[X] = 1 / lambd - Var[X] = 1 / lambd ** 2 - - """ - # we use 1-random() instead of random() to preclude the - # possibility of taking the log of zero. - - return -_log(1.0 - self.random()) / lambd - - def vonmisesvariate(self, mu, kappa): - """Circular data distribution. - - mu is the mean angle, expressed in radians between 0 and 2*pi, and - kappa is the concentration parameter, which must be greater than or - equal to zero. If kappa is equal to zero, this distribution reduces - to a uniform random angle over the range 0 to 2*pi. - - """ - # Based upon an algorithm published in: Fisher, N.I., - # "Statistical Analysis of Circular Data", Cambridge - # University Press, 1993. - - # Thanks to Magnus Kessler for a correction to the - # implementation of step 4. - - random = self.random - if kappa <= 1e-6: - return TWOPI * random() - - s = 0.5 / kappa - r = s + _sqrt(1.0 + s * s) - - while True: - u1 = random() - z = _cos(_pi * u1) - - d = z / (r + z) - u2 = random() - if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): - break - - q = 1.0 / r - f = (q + z) / (1.0 + q * z) - u3 = random() - if u3 > 0.5: - theta = (mu + _acos(f)) % TWOPI - else: - theta = (mu - _acos(f)) % TWOPI - - return theta - - def gammavariate(self, alpha, beta): - """Gamma distribution. Not the gamma function! - - Conditions on the parameters are alpha > 0 and beta > 0. - - The probability distribution function is: - - x ** (alpha - 1) * math.exp(-x / beta) - pdf(x) = -------------------------------------- - math.gamma(alpha) * beta ** alpha - - The mean (expected value) and variance of the random variable are: - - E[X] = alpha * beta - Var[X] = alpha * beta ** 2 - - """ - - # Warning: a few older sources define the gamma distribution in terms - # of alpha > -1.0 - if alpha <= 0.0 or beta <= 0.0: - raise ValueError('gammavariate: alpha and beta must be > 0.0') - - random = self.random - if alpha > 1.0: - - # Uses R.C.H. Cheng, "The generation of Gamma - # variables with non-integral shape parameters", - # Applied Statistics, (1977), 26, No. 1, p71-74 - - ainv = _sqrt(2.0 * alpha - 1.0) - bbb = alpha - LOG4 - ccc = alpha + ainv - - while True: - u1 = random() - if not 1e-7 < u1 < 0.9999999: - continue - u2 = 1.0 - random() - v = _log(u1 / (1.0 - u1)) / ainv - x = alpha * _exp(v) - z = u1 * u1 * u2 - r = bbb + ccc * v - x - if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z): - return x * beta - - elif alpha == 1.0: - # expovariate(1/beta) - return -_log(1.0 - random()) * beta - - else: - # alpha is between 0 and 1 (exclusive) - # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle - while True: - u = random() - b = (_e + alpha) / _e - p = b * u - if p <= 1.0: - x = p ** (1.0 / alpha) - else: - x = -_log((b - p) / alpha) - u1 = random() - if p > 1.0: - if u1 <= x ** (alpha - 1.0): - break - elif u1 <= _exp(-x): - break - return x * beta - - def betavariate(self, alpha, beta): - """Beta distribution. - - Conditions on the parameters are alpha > 0 and beta > 0. - Returned values range between 0 and 1. - - The mean (expected value) and variance of the random variable are: - - E[X] = alpha / (alpha + beta) - Var[X] = alpha * beta / ((alpha + beta)**2 * (alpha + beta + 1)) - - """ - ## See - ## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html - ## for Ivan Frohne's insightful analysis of why the original implementation: - ## - ## def betavariate(self, alpha, beta): - ## # Discrete Event Simulation in C, pp 87-88. - ## - ## y = self.expovariate(alpha) - ## z = self.expovariate(1.0/beta) - ## return z/(y+z) - ## - ## was dead wrong, and how it probably got that way. - - # This version due to Janne Sinkkonen, and matches all the std - # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution"). - y = self.gammavariate(alpha, 1.0) - if y: - return y / (y + self.gammavariate(beta, 1.0)) - return 0.0 - - def paretovariate(self, alpha): - """Pareto distribution. alpha is the shape parameter.""" - # Jain, pg. 495 - - u = 1.0 - self.random() - return u ** (-1.0 / alpha) - - def weibullvariate(self, alpha, beta): - """Weibull distribution. - - alpha is the scale parameter and beta is the shape parameter. - - """ - # Jain, pg. 499; bug fix courtesy Bill Arms - - u = 1.0 - self.random() - return alpha * (-_log(u)) ** (1.0 / beta) - - - ## -------------------- discrete distributions --------------------- - - def binomialvariate(self, n=1, p=0.5): - """Binomial random variable. - - Gives the number of successes for *n* independent trials - with the probability of success in each trial being *p*: - - sum(random() < p for i in range(n)) - - Returns an integer in the range: 0 <= X <= n - - The mean (expected value) and variance of the random variable are: - - E[X] = n * p - Var[x] = n * p * (1 - p) - - """ - # Error check inputs and handle edge cases - if n < 0: - raise ValueError("n must be non-negative") - if p <= 0.0 or p >= 1.0: - if p == 0.0: - return 0 - if p == 1.0: - return n - raise ValueError("p must be in the range 0.0 <= p <= 1.0") - - random = self.random - - # Fast path for a common case - if n == 1: - return _index(random() < p) - - # Exploit symmetry to establish: p <= 0.5 - if p > 0.5: - return n - self.binomialvariate(n, 1.0 - p) - - if n * p < 10.0: - # BG: Geometric method by Devroye with running time of O(np). - # https://dl.acm.org/doi/pdf/10.1145/42372.42381 - x = y = 0 - c = _log2(1.0 - p) - if not c: - return x - while True: - y += _floor(_log2(random()) / c) + 1 - if y > n: - return x - x += 1 - - # BTRS: Transformed rejection with squeeze method by Wolfgang Hörmann - # https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.47.8407&rep=rep1&type=pdf - assert n*p >= 10.0 and p <= 0.5 - setup_complete = False - - spq = _sqrt(n * p * (1.0 - p)) # Standard deviation of the distribution - b = 1.15 + 2.53 * spq - a = -0.0873 + 0.0248 * b + 0.01 * p - c = n * p + 0.5 - vr = 0.92 - 4.2 / b - - while True: - - u = random() - u -= 0.5 - us = 0.5 - _fabs(u) - k = _floor((2.0 * a / us + b) * u + c) - if k < 0 or k > n: - continue - - # The early-out "squeeze" test substantially reduces - # the number of acceptance condition evaluations. - v = random() - if us >= 0.07 and v <= vr: - return k - - # Acceptance-rejection test. - # Note, the original paper erroneously omits the call to log(v) - # when comparing to the log of the rescaled binomial distribution. - if not setup_complete: - alpha = (2.83 + 5.1 / b) * spq - lpq = _log(p / (1.0 - p)) - m = _floor((n + 1) * p) # Mode of the distribution - h = _lgamma(m + 1) + _lgamma(n - m + 1) - setup_complete = True # Only needs to be done once - v *= alpha / (a / (us * us) + b) - if _log(v) <= h - _lgamma(k + 1) - _lgamma(n - k + 1) + (k - m) * lpq: - return k - - -## ------------------------------------------------------------------ -## --------------- Operating System Random Source ------------------ - - -class SystemRandom(Random): - """Alternate random number generator using sources provided - by the operating system (such as /dev/urandom on Unix or - CryptGenRandom on Windows). - - Not available on all systems (see os.urandom() for details). - - """ - - def random(self): - """Get the next random number in the range 0.0 <= X < 1.0.""" - return (int.from_bytes(_urandom(7)) >> 3) * RECIP_BPF - - def getrandbits(self, k): - """getrandbits(k) -> x. Generates an int with k random bits.""" - if k < 0: - raise ValueError('number of bits must be non-negative') - numbytes = (k + 7) // 8 # bits / 8 and rounded up - x = int.from_bytes(_urandom(numbytes)) - return x >> (numbytes * 8 - k) # trim excess bits - - def randbytes(self, n): - """Generate n random bytes.""" - # os.urandom(n) fails with ValueError for n < 0 - # and returns an empty bytes string for n == 0. - return _urandom(n) - - def seed(self, *args, **kwds): - "Stub method. Not used for a system random number generator." - return None - - def _notimplemented(self, *args, **kwds): - "Method should not be called for a system random number generator." - raise NotImplementedError('System entropy source does not have state.') - getstate = setstate = _notimplemented - - -# ---------------------------------------------------------------------- -# Create one instance, seeded from current time, and export its methods -# as module-level functions. The functions share state across all uses -# (both in the user's code and in the Python libraries), but that's fine -# for most programs and is easier for the casual user than making them -# instantiate their own Random() instance. - -_inst = Random() -seed = _inst.seed -random = _inst.random -uniform = _inst.uniform -triangular = _inst.triangular -randint = _inst.randint -choice = _inst.choice -randrange = _inst.randrange -sample = _inst.sample -shuffle = _inst.shuffle -choices = _inst.choices -normalvariate = _inst.normalvariate -lognormvariate = _inst.lognormvariate -expovariate = _inst.expovariate -vonmisesvariate = _inst.vonmisesvariate -gammavariate = _inst.gammavariate -gauss = _inst.gauss -betavariate = _inst.betavariate -binomialvariate = _inst.binomialvariate -paretovariate = _inst.paretovariate -weibullvariate = _inst.weibullvariate -getstate = _inst.getstate -setstate = _inst.setstate -getrandbits = _inst.getrandbits -randbytes = _inst.randbytes - - -## ------------------------------------------------------ -## ----------------- test program ----------------------- - -def _test_generator(n, func, args): - from statistics import stdev, fmean as mean - from time import perf_counter - - t0 = perf_counter() - data = [func(*args) for i in _repeat(None, n)] - t1 = perf_counter() - - xbar = mean(data) - sigma = stdev(data, xbar) - low = min(data) - high = max(data) - - print(f'{t1 - t0:.3f} sec, {n} times {func.__name__}{args!r}') - print('avg %g, stddev %g, min %g, max %g\n' % (xbar, sigma, low, high)) - - -def _test(N=10_000): - _test_generator(N, random, ()) - _test_generator(N, normalvariate, (0.0, 1.0)) - _test_generator(N, lognormvariate, (0.0, 1.0)) - _test_generator(N, vonmisesvariate, (0.0, 1.0)) - _test_generator(N, binomialvariate, (15, 0.60)) - _test_generator(N, binomialvariate, (100, 0.75)) - _test_generator(N, gammavariate, (0.01, 1.0)) - _test_generator(N, gammavariate, (0.1, 1.0)) - _test_generator(N, gammavariate, (0.1, 2.0)) - _test_generator(N, gammavariate, (0.5, 1.0)) - _test_generator(N, gammavariate, (0.9, 1.0)) - _test_generator(N, gammavariate, (1.0, 1.0)) - _test_generator(N, gammavariate, (2.0, 1.0)) - _test_generator(N, gammavariate, (20.0, 1.0)) - _test_generator(N, gammavariate, (200.0, 1.0)) - _test_generator(N, gauss, (0.0, 1.0)) - _test_generator(N, betavariate, (3.0, 3.0)) - _test_generator(N, triangular, (0.0, 1.0, 1.0 / 3.0)) - - -## ------------------------------------------------------ -## ------------------ fork support --------------------- - -if hasattr(_os, "fork"): - _os.register_at_fork(after_in_child=_inst.seed) - - -# ------------------------------------------------------ -# -------------- command-line interface ---------------- - - -def _parse_args(arg_list: list[str] | None): - import argparse - parser = argparse.ArgumentParser( - formatter_class=argparse.RawTextHelpFormatter) - group = parser.add_mutually_exclusive_group() - group.add_argument( - "-c", "--choice", nargs="+", - help="print a random choice") - group.add_argument( - "-i", "--integer", type=int, metavar="N", - help="print a random integer between 1 and N inclusive") - group.add_argument( - "-f", "--float", type=float, metavar="N", - help="print a random floating-point number between 0 and N inclusive") - group.add_argument( - "--test", type=int, const=10_000, nargs="?", - help=argparse.SUPPRESS) - parser.add_argument("input", nargs="*", - help="""\ -if no options given, output depends on the input - string or multiple: same as --choice - integer: same as --integer - float: same as --float""") - args = parser.parse_args(arg_list) - return args, parser.format_help() - - -def main(arg_list: list[str] | None = None) -> int | str: - args, help_text = _parse_args(arg_list) - - # Explicit arguments - if args.choice: - return choice(args.choice) - - if args.integer is not None: - return randint(1, args.integer) - - if args.float is not None: - return uniform(0, args.float) - - if args.test: - _test(args.test) - return "" - - # No explicit argument, select based on input - if len(args.input) == 1: - val = args.input[0] - try: - # Is it an integer? - val = int(val) - return randint(1, val) - except ValueError: - try: - # Is it a float? - val = float(val) - return uniform(0, val) - except ValueError: - # Split in case of space-separated string: "a b c" - return choice(val.split()) - - if len(args.input) >= 2: - return choice(args.input) - - return help_text - - -if __name__ == '__main__': - print(main()) diff --git a/Python313_13_x86_Template/Lib/re/__init__.py b/Python313_13_x86_Template/Lib/re/__init__.py deleted file mode 100644 index 7e8abbf6..00000000 --- a/Python313_13_x86_Template/Lib/re/__init__.py +++ /dev/null @@ -1,428 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# re-compatible interface for the sre matching engine -# -# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. -# -# This version of the SRE library can be redistributed under CNRI's -# Python 1.6 license. For any other use, please contact Secret Labs -# AB (info@pythonware.com). -# -# Portions of this engine have been developed in cooperation with -# CNRI. Hewlett-Packard provided funding for 1.6 integration and -# other compatibility work. -# - -r"""Support for regular expressions (RE). - -This module provides regular expression matching operations similar to -those found in Perl. It supports both 8-bit and Unicode strings; both -the pattern and the strings being processed can contain null bytes and -characters outside the US ASCII range. - -Regular expressions can contain both special and ordinary characters. -Most ordinary characters, like "A", "a", or "0", are the simplest -regular expressions; they simply match themselves. You can -concatenate ordinary characters, so last matches the string 'last'. - -The special characters are: - "." Matches any character except a newline. - "^" Matches the start of the string. - "$" Matches the end of the string or just before the newline at - the end of the string. - "*" Matches 0 or more (greedy) repetitions of the preceding RE. - Greedy means that it will match as many repetitions as possible. - "+" Matches 1 or more (greedy) repetitions of the preceding RE. - "?" Matches 0 or 1 (greedy) of the preceding RE. - *?,+?,?? Non-greedy versions of the previous three special characters. - {m,n} Matches from m to n repetitions of the preceding RE. - {m,n}? Non-greedy version of the above. - "\\" Either escapes special characters or signals a special sequence. - [] Indicates a set of characters. - A "^" as the first character indicates a complementing set. - "|" A|B, creates an RE that will match either A or B. - (...) Matches the RE inside the parentheses. - The contents can be retrieved or matched later in the string. - (?aiLmsux) The letters set the corresponding flags defined below. - (?:...) Non-grouping version of regular parentheses. - (?P...) The substring matched by the group is accessible by name. - (?P=name) Matches the text matched earlier by the group named name. - (?#...) A comment; ignored. - (?=...) Matches if ... matches next, but doesn't consume the string. - (?!...) Matches if ... doesn't match next. - (?<=...) Matches if preceded by ... (must be fixed length). - (?= _MAXCACHE: - # Drop the least recently used item. - # next(iter(_cache)) is known to have linear amortized time, - # but it is used here to avoid a dependency from using OrderedDict. - # For the small _MAXCACHE value it doesn't make much of a difference. - try: - del _cache[next(iter(_cache))] - except (StopIteration, RuntimeError, KeyError): - pass - # Append to the end. - _cache[key] = p - - if len(_cache2) >= _MAXCACHE2: - # Drop the oldest item. - try: - del _cache2[next(iter(_cache2))] - except (StopIteration, RuntimeError, KeyError): - pass - _cache2[key] = p - return p - -@functools.lru_cache(_MAXCACHE) -def _compile_template(pattern, repl): - # internal: compile replacement pattern - return _sre.template(pattern, _parser.parse_template(repl, pattern)) - -# register myself for pickling - -import copyreg - -def _pickle(p): - return _compile, (p.pattern, p.flags) - -copyreg.pickle(Pattern, _pickle, _compile) - -# -------------------------------------------------------------------- -# experimental stuff (see python-dev discussions for details) - -class Scanner: - def __init__(self, lexicon, flags=0): - from ._constants import BRANCH, SUBPATTERN - if isinstance(flags, RegexFlag): - flags = flags.value - self.lexicon = lexicon - # combine phrases into a compound pattern - p = [] - s = _parser.State() - s.flags = flags - for phrase, action in lexicon: - gid = s.opengroup() - p.append(_parser.SubPattern(s, [ - (SUBPATTERN, (gid, 0, 0, _parser.parse(phrase, flags))), - ])) - s.closegroup(gid, p[-1]) - p = _parser.SubPattern(s, [(BRANCH, (None, p))]) - self.scanner = _compiler.compile(p) - def scan(self, string): - result = [] - append = result.append - match = self.scanner.scanner(string).match - i = 0 - while True: - m = match() - if not m: - break - j = m.end() - if i == j: - break - action = self.lexicon[m.lastindex-1][1] - if callable(action): - self.match = m - action = action(self, m.group()) - if action is not None: - append(action) - i = j - return result, string[i:] diff --git a/Python313_13_x86_Template/Lib/re/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/re/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 4ca50469..00000000 Binary files a/Python313_13_x86_Template/Lib/re/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/re/__pycache__/_casefix.cpython-313.pyc b/Python313_13_x86_Template/Lib/re/__pycache__/_casefix.cpython-313.pyc deleted file mode 100644 index d6b92328..00000000 Binary files a/Python313_13_x86_Template/Lib/re/__pycache__/_casefix.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/re/__pycache__/_compiler.cpython-313.pyc b/Python313_13_x86_Template/Lib/re/__pycache__/_compiler.cpython-313.pyc deleted file mode 100644 index c9008307..00000000 Binary files a/Python313_13_x86_Template/Lib/re/__pycache__/_compiler.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/re/__pycache__/_constants.cpython-313.pyc b/Python313_13_x86_Template/Lib/re/__pycache__/_constants.cpython-313.pyc deleted file mode 100644 index f7a439f8..00000000 Binary files a/Python313_13_x86_Template/Lib/re/__pycache__/_constants.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/re/__pycache__/_parser.cpython-313.pyc b/Python313_13_x86_Template/Lib/re/__pycache__/_parser.cpython-313.pyc deleted file mode 100644 index 33ab59a1..00000000 Binary files a/Python313_13_x86_Template/Lib/re/__pycache__/_parser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/re/_compiler.py b/Python313_13_x86_Template/Lib/re/_compiler.py deleted file mode 100644 index 1b1aaa77..00000000 --- a/Python313_13_x86_Template/Lib/re/_compiler.py +++ /dev/null @@ -1,768 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# convert template to internal format -# -# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. -# -# See the __init__.py file for information on usage and redistribution. -# - -"""Internal support module for sre""" - -import _sre -from . import _parser -from ._constants import * -from ._casefix import _EXTRA_CASES - -assert _sre.MAGIC == MAGIC, "SRE module mismatch" - -_LITERAL_CODES = {LITERAL, NOT_LITERAL} -_SUCCESS_CODES = {SUCCESS, FAILURE} -_ASSERT_CODES = {ASSERT, ASSERT_NOT} -_UNIT_CODES = _LITERAL_CODES | {ANY, IN} - -_REPEATING_CODES = { - MIN_REPEAT: (REPEAT, MIN_UNTIL, MIN_REPEAT_ONE), - MAX_REPEAT: (REPEAT, MAX_UNTIL, REPEAT_ONE), - POSSESSIVE_REPEAT: (POSSESSIVE_REPEAT, SUCCESS, POSSESSIVE_REPEAT_ONE), -} - -def _combine_flags(flags, add_flags, del_flags, - TYPE_FLAGS=_parser.TYPE_FLAGS): - if add_flags & TYPE_FLAGS: - flags &= ~TYPE_FLAGS - return (flags | add_flags) & ~del_flags - -def _compile(code, pattern, flags): - # internal: compile a (sub)pattern - emit = code.append - _len = len - LITERAL_CODES = _LITERAL_CODES - REPEATING_CODES = _REPEATING_CODES - SUCCESS_CODES = _SUCCESS_CODES - ASSERT_CODES = _ASSERT_CODES - iscased = None - tolower = None - fixes = None - if flags & SRE_FLAG_IGNORECASE and not flags & SRE_FLAG_LOCALE: - if flags & SRE_FLAG_UNICODE: - iscased = _sre.unicode_iscased - tolower = _sre.unicode_tolower - fixes = _EXTRA_CASES - else: - iscased = _sre.ascii_iscased - tolower = _sre.ascii_tolower - for op, av in pattern: - if op in LITERAL_CODES: - if not flags & SRE_FLAG_IGNORECASE: - emit(op) - emit(av) - elif flags & SRE_FLAG_LOCALE: - emit(OP_LOCALE_IGNORE[op]) - emit(av) - elif not iscased(av): - emit(op) - emit(av) - else: - lo = tolower(av) - if not fixes: # ascii - emit(OP_IGNORE[op]) - emit(lo) - elif lo not in fixes: - emit(OP_UNICODE_IGNORE[op]) - emit(lo) - else: - emit(IN_UNI_IGNORE) - skip = _len(code); emit(0) - if op is NOT_LITERAL: - emit(NEGATE) - for k in (lo,) + fixes[lo]: - emit(LITERAL) - emit(k) - emit(FAILURE) - code[skip] = _len(code) - skip - elif op is IN: - charset, hascased = _optimize_charset(av, iscased, tolower, fixes) - if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE: - emit(IN_LOC_IGNORE) - elif not hascased: - emit(IN) - elif not fixes: # ascii - emit(IN_IGNORE) - else: - emit(IN_UNI_IGNORE) - skip = _len(code); emit(0) - _compile_charset(charset, flags, code) - code[skip] = _len(code) - skip - elif op is ANY: - if flags & SRE_FLAG_DOTALL: - emit(ANY_ALL) - else: - emit(ANY) - elif op in REPEATING_CODES: - if _simple(av[2]): - emit(REPEATING_CODES[op][2]) - skip = _len(code); emit(0) - emit(av[0]) - emit(av[1]) - _compile(code, av[2], flags) - emit(SUCCESS) - code[skip] = _len(code) - skip - else: - emit(REPEATING_CODES[op][0]) - skip = _len(code); emit(0) - emit(av[0]) - emit(av[1]) - _compile(code, av[2], flags) - code[skip] = _len(code) - skip - emit(REPEATING_CODES[op][1]) - elif op is SUBPATTERN: - group, add_flags, del_flags, p = av - if group: - emit(MARK) - emit((group-1)*2) - # _compile_info(code, p, _combine_flags(flags, add_flags, del_flags)) - _compile(code, p, _combine_flags(flags, add_flags, del_flags)) - if group: - emit(MARK) - emit((group-1)*2+1) - elif op is ATOMIC_GROUP: - # Atomic Groups are handled by starting with an Atomic - # Group op code, then putting in the atomic group pattern - # and finally a success op code to tell any repeat - # operations within the Atomic Group to stop eating and - # pop their stack if they reach it - emit(ATOMIC_GROUP) - skip = _len(code); emit(0) - _compile(code, av, flags) - emit(SUCCESS) - code[skip] = _len(code) - skip - elif op in SUCCESS_CODES: - emit(op) - elif op in ASSERT_CODES: - emit(op) - skip = _len(code); emit(0) - if av[0] >= 0: - emit(0) # look ahead - else: - lo, hi = av[1].getwidth() - if lo > MAXCODE: - raise error("looks too much behind") - if lo != hi: - raise PatternError("look-behind requires fixed-width pattern") - emit(lo) # look behind - _compile(code, av[1], flags) - emit(SUCCESS) - code[skip] = _len(code) - skip - elif op is AT: - emit(op) - if flags & SRE_FLAG_MULTILINE: - av = AT_MULTILINE.get(av, av) - if flags & SRE_FLAG_LOCALE: - av = AT_LOCALE.get(av, av) - elif flags & SRE_FLAG_UNICODE: - av = AT_UNICODE.get(av, av) - emit(av) - elif op is BRANCH: - emit(op) - tail = [] - tailappend = tail.append - for av in av[1]: - skip = _len(code); emit(0) - # _compile_info(code, av, flags) - _compile(code, av, flags) - emit(JUMP) - tailappend(_len(code)); emit(0) - code[skip] = _len(code) - skip - emit(FAILURE) # end of branch - for tail in tail: - code[tail] = _len(code) - tail - elif op is CATEGORY: - emit(op) - if flags & SRE_FLAG_LOCALE: - av = CH_LOCALE[av] - elif flags & SRE_FLAG_UNICODE: - av = CH_UNICODE[av] - emit(av) - elif op is GROUPREF: - if not flags & SRE_FLAG_IGNORECASE: - emit(op) - elif flags & SRE_FLAG_LOCALE: - emit(GROUPREF_LOC_IGNORE) - elif not fixes: # ascii - emit(GROUPREF_IGNORE) - else: - emit(GROUPREF_UNI_IGNORE) - emit(av-1) - elif op is GROUPREF_EXISTS: - emit(op) - emit(av[0]-1) - skipyes = _len(code); emit(0) - _compile(code, av[1], flags) - if av[2]: - emit(JUMP) - skipno = _len(code); emit(0) - code[skipyes] = _len(code) - skipyes + 1 - _compile(code, av[2], flags) - code[skipno] = _len(code) - skipno - else: - code[skipyes] = _len(code) - skipyes + 1 - else: - raise PatternError(f"internal: unsupported operand type {op!r}") - -def _compile_charset(charset, flags, code): - # compile charset subprogram - emit = code.append - for op, av in charset: - emit(op) - if op is NEGATE: - pass - elif op is LITERAL: - emit(av) - elif op is RANGE or op is RANGE_UNI_IGNORE: - emit(av[0]) - emit(av[1]) - elif op is CHARSET: - code.extend(av) - elif op is BIGCHARSET: - code.extend(av) - elif op is CATEGORY: - if flags & SRE_FLAG_LOCALE: - emit(CH_LOCALE[av]) - elif flags & SRE_FLAG_UNICODE: - emit(CH_UNICODE[av]) - else: - emit(av) - else: - raise PatternError(f"internal: unsupported set operator {op!r}") - emit(FAILURE) - -def _optimize_charset(charset, iscased=None, fixup=None, fixes=None): - # internal: optimize character set - out = [] - tail = [] - charmap = bytearray(256) - hascased = False - for op, av in charset: - while True: - try: - if op is LITERAL: - if fixup: # IGNORECASE and not LOCALE - av = fixup(av) - charmap[av] = 1 - if fixes and av in fixes: - for k in fixes[av]: - charmap[k] = 1 - if not hascased and iscased(av): - hascased = True - else: - charmap[av] = 1 - elif op is RANGE: - r = range(av[0], av[1]+1) - if fixup: # IGNORECASE and not LOCALE - if fixes: - for i in map(fixup, r): - charmap[i] = 1 - if i in fixes: - for k in fixes[i]: - charmap[k] = 1 - else: - for i in map(fixup, r): - charmap[i] = 1 - if not hascased: - hascased = any(map(iscased, r)) - else: - for i in r: - charmap[i] = 1 - elif op is NEGATE: - out.append((op, av)) - else: - tail.append((op, av)) - except IndexError: - if len(charmap) == 256: - # character set contains non-UCS1 character codes - charmap += b'\0' * 0xff00 - continue - # Character set contains non-BMP character codes. - # For range, all BMP characters in the range are already - # proceeded. - if fixup: # IGNORECASE and not LOCALE - # For now, IN_UNI_IGNORE+LITERAL and - # IN_UNI_IGNORE+RANGE_UNI_IGNORE work for all non-BMP - # characters, because two characters (at least one of - # which is not in the BMP) match case-insensitively - # if and only if: - # 1) c1.lower() == c2.lower() - # 2) c1.lower() == c2 or c1.lower().upper() == c2 - # Also, both c.lower() and c.lower().upper() are single - # characters for every non-BMP character. - if op is RANGE: - if fixes: # not ASCII - op = RANGE_UNI_IGNORE - hascased = True - else: - assert op is LITERAL - if not hascased and iscased(av): - hascased = True - tail.append((op, av)) - break - - # compress character map - runs = [] - q = 0 - while True: - p = charmap.find(1, q) - if p < 0: - break - if len(runs) >= 2: - runs = None - break - q = charmap.find(0, p) - if q < 0: - runs.append((p, len(charmap))) - break - runs.append((p, q)) - if runs is not None: - # use literal/range - for p, q in runs: - if q - p == 1: - out.append((LITERAL, p)) - else: - out.append((RANGE, (p, q - 1))) - out += tail - # if the case was changed or new representation is more compact - if hascased or len(out) < len(charset): - return out, hascased - # else original character set is good enough - return charset, hascased - - # use bitmap - if len(charmap) == 256: - data = _mk_bitmap(charmap) - out.append((CHARSET, data)) - out += tail - return out, hascased - - # To represent a big charset, first a bitmap of all characters in the - # set is constructed. Then, this bitmap is sliced into chunks of 256 - # characters, duplicate chunks are eliminated, and each chunk is - # given a number. In the compiled expression, the charset is - # represented by a 32-bit word sequence, consisting of one word for - # the number of different chunks, a sequence of 256 bytes (64 words) - # of chunk numbers indexed by their original chunk position, and a - # sequence of 256-bit chunks (8 words each). - - # Compression is normally good: in a typical charset, large ranges of - # Unicode will be either completely excluded (e.g. if only cyrillic - # letters are to be matched), or completely included (e.g. if large - # subranges of Kanji match). These ranges will be represented by - # chunks of all one-bits or all zero-bits. - - # Matching can be also done efficiently: the more significant byte of - # the Unicode character is an index into the chunk number, and the - # less significant byte is a bit index in the chunk (just like the - # CHARSET matching). - - charmap = bytes(charmap) # should be hashable - comps = {} - mapping = bytearray(256) - block = 0 - data = bytearray() - for i in range(0, 65536, 256): - chunk = charmap[i: i + 256] - if chunk in comps: - mapping[i // 256] = comps[chunk] - else: - mapping[i // 256] = comps[chunk] = block - block += 1 - data += chunk - data = _mk_bitmap(data) - data[0:0] = [block] + _bytes_to_codes(mapping) - out.append((BIGCHARSET, data)) - out += tail - return out, hascased - -_CODEBITS = _sre.CODESIZE * 8 -MAXCODE = (1 << _CODEBITS) - 1 -_BITS_TRANS = b'0' + b'1' * 255 -def _mk_bitmap(bits, _CODEBITS=_CODEBITS, _int=int): - s = bits.translate(_BITS_TRANS)[::-1] - return [_int(s[i - _CODEBITS: i], 2) - for i in range(len(s), 0, -_CODEBITS)] - -def _bytes_to_codes(b): - # Convert block indices to word array - a = memoryview(b).cast('I') - assert a.itemsize == _sre.CODESIZE - assert len(a) * a.itemsize == len(b) - return a.tolist() - -def _simple(p): - # check if this subpattern is a "simple" operator - if len(p) != 1: - return False - op, av = p[0] - if op is SUBPATTERN: - return av[0] is None and _simple(av[-1]) - return op in _UNIT_CODES - -def _generate_overlap_table(prefix): - """ - Generate an overlap table for the following prefix. - An overlap table is a table of the same size as the prefix which - informs about the potential self-overlap for each index in the prefix: - - if overlap[i] == 0, prefix[i:] can't overlap prefix[0:...] - - if overlap[i] == k with 0 < k <= i, prefix[i-k+1:i+1] overlaps with - prefix[0:k] - """ - table = [0] * len(prefix) - for i in range(1, len(prefix)): - idx = table[i - 1] - while prefix[i] != prefix[idx]: - if idx == 0: - table[i] = 0 - break - idx = table[idx - 1] - else: - table[i] = idx + 1 - return table - -def _get_iscased(flags): - if not flags & SRE_FLAG_IGNORECASE: - return None - elif flags & SRE_FLAG_UNICODE: - return _sre.unicode_iscased - else: - return _sre.ascii_iscased - -def _get_literal_prefix(pattern, flags): - # look for literal prefix - prefix = [] - prefixappend = prefix.append - prefix_skip = None - iscased = _get_iscased(flags) - for op, av in pattern.data: - if op is LITERAL: - if iscased and iscased(av): - break - prefixappend(av) - elif op is SUBPATTERN: - group, add_flags, del_flags, p = av - flags1 = _combine_flags(flags, add_flags, del_flags) - if flags1 & SRE_FLAG_IGNORECASE and flags1 & SRE_FLAG_LOCALE: - break - prefix1, prefix_skip1, got_all = _get_literal_prefix(p, flags1) - if prefix_skip is None: - if group is not None: - prefix_skip = len(prefix) - elif prefix_skip1 is not None: - prefix_skip = len(prefix) + prefix_skip1 - prefix.extend(prefix1) - if not got_all: - break - else: - break - else: - return prefix, prefix_skip, True - return prefix, prefix_skip, False - -def _get_charset_prefix(pattern, flags): - while True: - if not pattern.data: - return None - op, av = pattern.data[0] - if op is not SUBPATTERN: - break - group, add_flags, del_flags, pattern = av - flags = _combine_flags(flags, add_flags, del_flags) - if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE: - return None - - iscased = _get_iscased(flags) - if op is LITERAL: - if iscased and iscased(av): - return None - return [(op, av)] - elif op is BRANCH: - charset = [] - charsetappend = charset.append - for p in av[1]: - if not p: - return None - op, av = p[0] - if op is LITERAL and not (iscased and iscased(av)): - charsetappend((op, av)) - else: - return None - return charset - elif op is IN: - charset = av - if iscased: - for op, av in charset: - if op is LITERAL: - if iscased(av): - return None - elif op is RANGE: - if av[1] > 0xffff: - return None - if any(map(iscased, range(av[0], av[1]+1))): - return None - return charset - return None - -def _compile_info(code, pattern, flags): - # internal: compile an info block. in the current version, - # this contains min/max pattern width, and an optional literal - # prefix or a character map - lo, hi = pattern.getwidth() - if hi > MAXCODE: - hi = MAXCODE - if lo == 0: - code.extend([INFO, 4, 0, lo, hi]) - return - # look for a literal prefix - prefix = [] - prefix_skip = 0 - charset = [] # not used - if not (flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE): - # look for literal prefix - prefix, prefix_skip, got_all = _get_literal_prefix(pattern, flags) - # if no prefix, look for charset prefix - if not prefix: - charset = _get_charset_prefix(pattern, flags) -## if prefix: -## print("*** PREFIX", prefix, prefix_skip) -## if charset: -## print("*** CHARSET", charset) - # add an info block - emit = code.append - emit(INFO) - skip = len(code); emit(0) - # literal flag - mask = 0 - if prefix: - mask = SRE_INFO_PREFIX - if prefix_skip is None and got_all: - mask = mask | SRE_INFO_LITERAL - elif charset: - mask = mask | SRE_INFO_CHARSET - emit(mask) - # pattern length - if lo < MAXCODE: - emit(lo) - else: - emit(MAXCODE) - prefix = prefix[:MAXCODE] - emit(hi) - # add literal prefix - if prefix: - emit(len(prefix)) # length - if prefix_skip is None: - prefix_skip = len(prefix) - emit(prefix_skip) # skip - code.extend(prefix) - # generate overlap table - code.extend(_generate_overlap_table(prefix)) - elif charset: - charset, hascased = _optimize_charset(charset) - assert not hascased - _compile_charset(charset, flags, code) - code[skip] = len(code) - skip - -def isstring(obj): - return isinstance(obj, (str, bytes)) - -def _code(p, flags): - - flags = p.state.flags | flags - code = [] - - # compile info block - _compile_info(code, p, flags) - - # compile the pattern - _compile(code, p.data, flags) - - code.append(SUCCESS) - - return code - -def _hex_code(code): - return '[%s]' % ', '.join('%#0*x' % (_sre.CODESIZE*2+2, x) for x in code) - -def dis(code): - import sys - - labels = set() - level = 0 - offset_width = len(str(len(code) - 1)) - - def dis_(start, end): - def print_(*args, to=None): - if to is not None: - labels.add(to) - args += ('(to %d)' % (to,),) - print('%*d%s ' % (offset_width, start, ':' if start in labels else '.'), - end=' '*(level-1)) - print(*args) - - def print_2(*args): - print(end=' '*(offset_width + 2*level)) - print(*args) - - nonlocal level - level += 1 - i = start - while i < end: - start = i - op = code[i] - i += 1 - op = OPCODES[op] - if op in (SUCCESS, FAILURE, ANY, ANY_ALL, - MAX_UNTIL, MIN_UNTIL, NEGATE): - print_(op) - elif op in (LITERAL, NOT_LITERAL, - LITERAL_IGNORE, NOT_LITERAL_IGNORE, - LITERAL_UNI_IGNORE, NOT_LITERAL_UNI_IGNORE, - LITERAL_LOC_IGNORE, NOT_LITERAL_LOC_IGNORE): - arg = code[i] - i += 1 - print_(op, '%#02x (%r)' % (arg, chr(arg))) - elif op is AT: - arg = code[i] - i += 1 - arg = str(ATCODES[arg]) - assert arg[:3] == 'AT_' - print_(op, arg[3:]) - elif op is CATEGORY: - arg = code[i] - i += 1 - arg = str(CHCODES[arg]) - assert arg[:9] == 'CATEGORY_' - print_(op, arg[9:]) - elif op in (IN, IN_IGNORE, IN_UNI_IGNORE, IN_LOC_IGNORE): - skip = code[i] - print_(op, skip, to=i+skip) - dis_(i+1, i+skip) - i += skip - elif op in (RANGE, RANGE_UNI_IGNORE): - lo, hi = code[i: i+2] - i += 2 - print_(op, '%#02x %#02x (%r-%r)' % (lo, hi, chr(lo), chr(hi))) - elif op is CHARSET: - print_(op, _hex_code(code[i: i + 256//_CODEBITS])) - i += 256//_CODEBITS - elif op is BIGCHARSET: - arg = code[i] - i += 1 - mapping = list(b''.join(x.to_bytes(_sre.CODESIZE, sys.byteorder) - for x in code[i: i + 256//_sre.CODESIZE])) - print_(op, arg, mapping) - i += 256//_sre.CODESIZE - level += 1 - for j in range(arg): - print_2(_hex_code(code[i: i + 256//_CODEBITS])) - i += 256//_CODEBITS - level -= 1 - elif op in (MARK, GROUPREF, GROUPREF_IGNORE, GROUPREF_UNI_IGNORE, - GROUPREF_LOC_IGNORE): - arg = code[i] - i += 1 - print_(op, arg) - elif op is JUMP: - skip = code[i] - print_(op, skip, to=i+skip) - i += 1 - elif op is BRANCH: - skip = code[i] - print_(op, skip, to=i+skip) - while skip: - dis_(i+1, i+skip) - i += skip - start = i - skip = code[i] - if skip: - print_('branch', skip, to=i+skip) - else: - print_(FAILURE) - i += 1 - elif op in (REPEAT, REPEAT_ONE, MIN_REPEAT_ONE, - POSSESSIVE_REPEAT, POSSESSIVE_REPEAT_ONE): - skip, min, max = code[i: i+3] - if max == MAXREPEAT: - max = 'MAXREPEAT' - print_(op, skip, min, max, to=i+skip) - dis_(i+3, i+skip) - i += skip - elif op is GROUPREF_EXISTS: - arg, skip = code[i: i+2] - print_(op, arg, skip, to=i+skip) - i += 2 - elif op in (ASSERT, ASSERT_NOT): - skip, arg = code[i: i+2] - print_(op, skip, arg, to=i+skip) - dis_(i+2, i+skip) - i += skip - elif op is ATOMIC_GROUP: - skip = code[i] - print_(op, skip, to=i+skip) - dis_(i+1, i+skip) - i += skip - elif op is INFO: - skip, flags, min, max = code[i: i+4] - if max == MAXREPEAT: - max = 'MAXREPEAT' - print_(op, skip, bin(flags), min, max, to=i+skip) - start = i+4 - if flags & SRE_INFO_PREFIX: - prefix_len, prefix_skip = code[i+4: i+6] - print_2(' prefix_skip', prefix_skip) - start = i + 6 - prefix = code[start: start+prefix_len] - print_2(' prefix', - '[%s]' % ', '.join('%#02x' % x for x in prefix), - '(%r)' % ''.join(map(chr, prefix))) - start += prefix_len - print_2(' overlap', code[start: start+prefix_len]) - start += prefix_len - if flags & SRE_INFO_CHARSET: - level += 1 - print_2('in') - dis_(start, i+skip) - level -= 1 - i += skip - else: - raise ValueError(op) - - level -= 1 - - dis_(0, len(code)) - - -def compile(p, flags=0): - # internal: convert pattern list to internal format - - if isstring(p): - pattern = p - p = _parser.parse(p, flags) - else: - pattern = None - - code = _code(p, flags) - - if flags & SRE_FLAG_DEBUG: - print() - dis(code) - - # map in either direction - groupindex = p.state.groupdict - indexgroup = [None] * p.state.groups - for k, i in groupindex.items(): - indexgroup[i] = k - - return _sre.compile( - pattern, flags | p.state.flags, code, - p.state.groups-1, - groupindex, tuple(indexgroup) - ) diff --git a/Python313_13_x86_Template/Lib/re/_constants.py b/Python313_13_x86_Template/Lib/re/_constants.py deleted file mode 100644 index 9c3c294b..00000000 --- a/Python313_13_x86_Template/Lib/re/_constants.py +++ /dev/null @@ -1,222 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# various symbols used by the regular expression engine. -# run this script to update the _sre include files! -# -# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. -# -# See the __init__.py file for information on usage and redistribution. -# - -"""Internal support module for sre""" - -# update when constants are added or removed - -MAGIC = 20230612 - -from _sre import MAXREPEAT, MAXGROUPS - -# SRE standard exception (access as sre.error) -# should this really be here? - -class PatternError(Exception): - """Exception raised for invalid regular expressions. - - Attributes: - - msg: The unformatted error message - pattern: The regular expression pattern - pos: The index in the pattern where compilation failed (may be None) - lineno: The line corresponding to pos (may be None) - colno: The column corresponding to pos (may be None) - """ - - __module__ = 're' - - def __init__(self, msg, pattern=None, pos=None): - self.msg = msg - self.pattern = pattern - self.pos = pos - if pattern is not None and pos is not None: - msg = '%s at position %d' % (msg, pos) - if isinstance(pattern, str): - newline = '\n' - else: - newline = b'\n' - self.lineno = pattern.count(newline, 0, pos) + 1 - self.colno = pos - pattern.rfind(newline, 0, pos) - if newline in pattern: - msg = '%s (line %d, column %d)' % (msg, self.lineno, self.colno) - else: - self.lineno = self.colno = None - super().__init__(msg) - - -# Backward compatibility after renaming in 3.13 -error = PatternError - -class _NamedIntConstant(int): - def __new__(cls, value, name): - self = super(_NamedIntConstant, cls).__new__(cls, value) - self.name = name - return self - - def __repr__(self): - return self.name - - __reduce__ = None - -MAXREPEAT = _NamedIntConstant(MAXREPEAT, 'MAXREPEAT') - -def _makecodes(*names): - items = [_NamedIntConstant(i, name) for i, name in enumerate(names)] - globals().update({item.name: item for item in items}) - return items - -# operators -OPCODES = _makecodes( - # failure=0 success=1 (just because it looks better that way :-) - 'FAILURE', 'SUCCESS', - - 'ANY', 'ANY_ALL', - 'ASSERT', 'ASSERT_NOT', - 'AT', - 'BRANCH', - 'CATEGORY', - 'CHARSET', 'BIGCHARSET', - 'GROUPREF', 'GROUPREF_EXISTS', - 'IN', - 'INFO', - 'JUMP', - 'LITERAL', - 'MARK', - 'MAX_UNTIL', - 'MIN_UNTIL', - 'NOT_LITERAL', - 'NEGATE', - 'RANGE', - 'REPEAT', - 'REPEAT_ONE', - 'SUBPATTERN', - 'MIN_REPEAT_ONE', - 'ATOMIC_GROUP', - 'POSSESSIVE_REPEAT', - 'POSSESSIVE_REPEAT_ONE', - - 'GROUPREF_IGNORE', - 'IN_IGNORE', - 'LITERAL_IGNORE', - 'NOT_LITERAL_IGNORE', - - 'GROUPREF_LOC_IGNORE', - 'IN_LOC_IGNORE', - 'LITERAL_LOC_IGNORE', - 'NOT_LITERAL_LOC_IGNORE', - - 'GROUPREF_UNI_IGNORE', - 'IN_UNI_IGNORE', - 'LITERAL_UNI_IGNORE', - 'NOT_LITERAL_UNI_IGNORE', - 'RANGE_UNI_IGNORE', - - # The following opcodes are only occurred in the parser output, - # but not in the compiled code. - 'MIN_REPEAT', 'MAX_REPEAT', -) -del OPCODES[-2:] # remove MIN_REPEAT and MAX_REPEAT - -# positions -ATCODES = _makecodes( - 'AT_BEGINNING', 'AT_BEGINNING_LINE', 'AT_BEGINNING_STRING', - 'AT_BOUNDARY', 'AT_NON_BOUNDARY', - 'AT_END', 'AT_END_LINE', 'AT_END_STRING', - - 'AT_LOC_BOUNDARY', 'AT_LOC_NON_BOUNDARY', - - 'AT_UNI_BOUNDARY', 'AT_UNI_NON_BOUNDARY', -) - -# categories -CHCODES = _makecodes( - 'CATEGORY_DIGIT', 'CATEGORY_NOT_DIGIT', - 'CATEGORY_SPACE', 'CATEGORY_NOT_SPACE', - 'CATEGORY_WORD', 'CATEGORY_NOT_WORD', - 'CATEGORY_LINEBREAK', 'CATEGORY_NOT_LINEBREAK', - - 'CATEGORY_LOC_WORD', 'CATEGORY_LOC_NOT_WORD', - - 'CATEGORY_UNI_DIGIT', 'CATEGORY_UNI_NOT_DIGIT', - 'CATEGORY_UNI_SPACE', 'CATEGORY_UNI_NOT_SPACE', - 'CATEGORY_UNI_WORD', 'CATEGORY_UNI_NOT_WORD', - 'CATEGORY_UNI_LINEBREAK', 'CATEGORY_UNI_NOT_LINEBREAK', -) - - -# replacement operations for "ignore case" mode -OP_IGNORE = { - LITERAL: LITERAL_IGNORE, - NOT_LITERAL: NOT_LITERAL_IGNORE, -} - -OP_LOCALE_IGNORE = { - LITERAL: LITERAL_LOC_IGNORE, - NOT_LITERAL: NOT_LITERAL_LOC_IGNORE, -} - -OP_UNICODE_IGNORE = { - LITERAL: LITERAL_UNI_IGNORE, - NOT_LITERAL: NOT_LITERAL_UNI_IGNORE, -} - -AT_MULTILINE = { - AT_BEGINNING: AT_BEGINNING_LINE, - AT_END: AT_END_LINE -} - -AT_LOCALE = { - AT_BOUNDARY: AT_LOC_BOUNDARY, - AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY -} - -AT_UNICODE = { - AT_BOUNDARY: AT_UNI_BOUNDARY, - AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY -} - -CH_LOCALE = { - CATEGORY_DIGIT: CATEGORY_DIGIT, - CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT, - CATEGORY_SPACE: CATEGORY_SPACE, - CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE, - CATEGORY_WORD: CATEGORY_LOC_WORD, - CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD, - CATEGORY_LINEBREAK: CATEGORY_LINEBREAK, - CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK -} - -CH_UNICODE = { - CATEGORY_DIGIT: CATEGORY_UNI_DIGIT, - CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT, - CATEGORY_SPACE: CATEGORY_UNI_SPACE, - CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE, - CATEGORY_WORD: CATEGORY_UNI_WORD, - CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD, - CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK, - CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK -} - -# flags -SRE_FLAG_IGNORECASE = 2 # case insensitive -SRE_FLAG_LOCALE = 4 # honour system locale -SRE_FLAG_MULTILINE = 8 # treat target as multiline string -SRE_FLAG_DOTALL = 16 # treat target as a single string -SRE_FLAG_UNICODE = 32 # use unicode "locale" -SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments -SRE_FLAG_DEBUG = 128 # debugging -SRE_FLAG_ASCII = 256 # use ascii "locale" - -# flags for INFO primitive -SRE_INFO_PREFIX = 1 # has prefix -SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix) -SRE_INFO_CHARSET = 4 # pattern starts with character from given set diff --git a/Python313_13_x86_Template/Lib/re/_parser.py b/Python313_13_x86_Template/Lib/re/_parser.py deleted file mode 100644 index f3c77934..00000000 --- a/Python313_13_x86_Template/Lib/re/_parser.py +++ /dev/null @@ -1,1081 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# convert re-style regular expression to sre pattern -# -# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. -# -# See the __init__.py file for information on usage and redistribution. -# - -"""Internal support module for sre""" - -# XXX: show string offset and offending character for all errors - -from ._constants import * - -SPECIAL_CHARS = ".\\[{()*+?^$|" -REPEAT_CHARS = "*+?{" - -DIGITS = frozenset("0123456789") - -OCTDIGITS = frozenset("01234567") -HEXDIGITS = frozenset("0123456789abcdefABCDEF") -ASCIILETTERS = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - -WHITESPACE = frozenset(" \t\n\r\v\f") - -_REPEATCODES = frozenset({MIN_REPEAT, MAX_REPEAT, POSSESSIVE_REPEAT}) -_UNITCODES = frozenset({ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY}) - -ESCAPES = { - r"\a": (LITERAL, ord("\a")), - r"\b": (LITERAL, ord("\b")), - r"\f": (LITERAL, ord("\f")), - r"\n": (LITERAL, ord("\n")), - r"\r": (LITERAL, ord("\r")), - r"\t": (LITERAL, ord("\t")), - r"\v": (LITERAL, ord("\v")), - r"\\": (LITERAL, ord("\\")) -} - -CATEGORIES = { - r"\A": (AT, AT_BEGINNING_STRING), # start of string - r"\b": (AT, AT_BOUNDARY), - r"\B": (AT, AT_NON_BOUNDARY), - r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]), - r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]), - r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]), - r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]), - r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]), - r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]), - r"\Z": (AT, AT_END_STRING), # end of string -} - -FLAGS = { - # standard flags - "i": SRE_FLAG_IGNORECASE, - "L": SRE_FLAG_LOCALE, - "m": SRE_FLAG_MULTILINE, - "s": SRE_FLAG_DOTALL, - "x": SRE_FLAG_VERBOSE, - # extensions - "a": SRE_FLAG_ASCII, - "u": SRE_FLAG_UNICODE, -} - -TYPE_FLAGS = SRE_FLAG_ASCII | SRE_FLAG_LOCALE | SRE_FLAG_UNICODE -GLOBAL_FLAGS = SRE_FLAG_DEBUG - -# Maximal value returned by SubPattern.getwidth(). -# Must be larger than MAXREPEAT, MAXCODE and sys.maxsize. -MAXWIDTH = 1 << 64 - -class State: - # keeps track of state for parsing - def __init__(self): - self.flags = 0 - self.groupdict = {} - self.groupwidths = [None] # group 0 - self.lookbehindgroups = None - self.grouprefpos = {} - @property - def groups(self): - return len(self.groupwidths) - def opengroup(self, name=None): - gid = self.groups - self.groupwidths.append(None) - if self.groups > MAXGROUPS: - raise error("too many groups") - if name is not None: - ogid = self.groupdict.get(name, None) - if ogid is not None: - raise error("redefinition of group name %r as group %d; " - "was group %d" % (name, gid, ogid)) - self.groupdict[name] = gid - return gid - def closegroup(self, gid, p): - self.groupwidths[gid] = p.getwidth() - def checkgroup(self, gid): - return gid < self.groups and self.groupwidths[gid] is not None - - def checklookbehindgroup(self, gid, source): - if self.lookbehindgroups is not None: - if not self.checkgroup(gid): - raise source.error('cannot refer to an open group') - if gid >= self.lookbehindgroups: - raise source.error('cannot refer to group defined in the same ' - 'lookbehind subpattern') - -class SubPattern: - # a subpattern, in intermediate form - def __init__(self, state, data=None): - self.state = state - if data is None: - data = [] - self.data = data - self.width = None - - def dump(self, level=0): - seqtypes = (tuple, list) - for op, av in self.data: - print(level*" " + str(op), end='') - if op is IN: - # member sublanguage - print() - for op, a in av: - print((level+1)*" " + str(op), a) - elif op is BRANCH: - print() - for i, a in enumerate(av[1]): - if i: - print(level*" " + "OR") - a.dump(level+1) - elif op is GROUPREF_EXISTS: - condgroup, item_yes, item_no = av - print('', condgroup) - item_yes.dump(level+1) - if item_no: - print(level*" " + "ELSE") - item_no.dump(level+1) - elif isinstance(av, SubPattern): - print() - av.dump(level+1) - elif isinstance(av, seqtypes): - nl = False - for a in av: - if isinstance(a, SubPattern): - if not nl: - print() - a.dump(level+1) - nl = True - else: - if not nl: - print(' ', end='') - print(a, end='') - nl = False - if not nl: - print() - else: - print('', av) - def __repr__(self): - return repr(self.data) - def __len__(self): - return len(self.data) - def __delitem__(self, index): - del self.data[index] - def __getitem__(self, index): - if isinstance(index, slice): - return SubPattern(self.state, self.data[index]) - return self.data[index] - def __setitem__(self, index, code): - self.data[index] = code - def insert(self, index, code): - self.data.insert(index, code) - def append(self, code): - self.data.append(code) - def getwidth(self): - # determine the width (min, max) for this subpattern - if self.width is not None: - return self.width - lo = hi = 0 - for op, av in self.data: - if op is BRANCH: - i = MAXWIDTH - j = 0 - for av in av[1]: - l, h = av.getwidth() - i = min(i, l) - j = max(j, h) - lo = lo + i - hi = hi + j - elif op is ATOMIC_GROUP: - i, j = av.getwidth() - lo = lo + i - hi = hi + j - elif op is SUBPATTERN: - i, j = av[-1].getwidth() - lo = lo + i - hi = hi + j - elif op in _REPEATCODES: - i, j = av[2].getwidth() - lo = lo + i * av[0] - if av[1] == MAXREPEAT and j: - hi = MAXWIDTH - else: - hi = hi + j * av[1] - elif op in _UNITCODES: - lo = lo + 1 - hi = hi + 1 - elif op is GROUPREF: - i, j = self.state.groupwidths[av] - lo = lo + i - hi = hi + j - elif op is GROUPREF_EXISTS: - i, j = av[1].getwidth() - if av[2] is not None: - l, h = av[2].getwidth() - i = min(i, l) - j = max(j, h) - else: - i = 0 - lo = lo + i - hi = hi + j - elif op is SUCCESS: - break - self.width = min(lo, MAXWIDTH), min(hi, MAXWIDTH) - return self.width - -class Tokenizer: - def __init__(self, string): - self.istext = isinstance(string, str) - self.string = string - if not self.istext: - string = str(string, 'latin1') - self.decoded_string = string - self.index = 0 - self.next = None - self.__next() - def __next(self): - index = self.index - try: - char = self.decoded_string[index] - except IndexError: - self.next = None - return - if char == "\\": - index += 1 - try: - char += self.decoded_string[index] - except IndexError: - raise error("bad escape (end of pattern)", - self.string, len(self.string) - 1) from None - self.index = index + 1 - self.next = char - def match(self, char): - if char == self.next: - self.__next() - return True - return False - def get(self): - this = self.next - self.__next() - return this - def getwhile(self, n, charset): - result = '' - for _ in range(n): - c = self.next - if c not in charset: - break - result += c - self.__next() - return result - def getuntil(self, terminator, name): - result = '' - while True: - c = self.next - self.__next() - if c is None: - if not result: - raise self.error("missing " + name) - raise self.error("missing %s, unterminated name" % terminator, - len(result)) - if c == terminator: - if not result: - raise self.error("missing " + name, 1) - break - result += c - return result - @property - def pos(self): - return self.index - len(self.next or '') - def tell(self): - return self.index - len(self.next or '') - def seek(self, index): - self.index = index - self.__next() - - def error(self, msg, offset=0): - if not self.istext: - msg = msg.encode('ascii', 'backslashreplace').decode('ascii') - return error(msg, self.string, self.tell() - offset) - - def checkgroupname(self, name, offset): - if not (self.istext or name.isascii()): - msg = "bad character in group name %a" % name - raise self.error(msg, len(name) + offset) - if not name.isidentifier(): - msg = "bad character in group name %r" % name - raise self.error(msg, len(name) + offset) - -def _class_escape(source, escape): - # handle escape code inside character class - code = ESCAPES.get(escape) - if code: - return code - code = CATEGORIES.get(escape) - if code and code[0] is IN: - return code - try: - c = escape[1:2] - if c == "x": - # hexadecimal escape (exactly two digits) - escape += source.getwhile(2, HEXDIGITS) - if len(escape) != 4: - raise source.error("incomplete escape %s" % escape, len(escape)) - return LITERAL, int(escape[2:], 16) - elif c == "u" and source.istext: - # unicode escape (exactly four digits) - escape += source.getwhile(4, HEXDIGITS) - if len(escape) != 6: - raise source.error("incomplete escape %s" % escape, len(escape)) - return LITERAL, int(escape[2:], 16) - elif c == "U" and source.istext: - # unicode escape (exactly eight digits) - escape += source.getwhile(8, HEXDIGITS) - if len(escape) != 10: - raise source.error("incomplete escape %s" % escape, len(escape)) - c = int(escape[2:], 16) - chr(c) # raise ValueError for invalid code - return LITERAL, c - elif c == "N" and source.istext: - import unicodedata - # named unicode escape e.g. \N{EM DASH} - if not source.match('{'): - raise source.error("missing {") - charname = source.getuntil('}', 'character name') - try: - c = ord(unicodedata.lookup(charname)) - except (KeyError, TypeError): - raise source.error("undefined character name %r" % charname, - len(charname) + len(r'\N{}')) from None - return LITERAL, c - elif c in OCTDIGITS: - # octal escape (up to three digits) - escape += source.getwhile(2, OCTDIGITS) - c = int(escape[1:], 8) - if c > 0o377: - raise source.error('octal escape value %s outside of ' - 'range 0-0o377' % escape, len(escape)) - return LITERAL, c - elif c in DIGITS: - raise ValueError - if len(escape) == 2: - if c in ASCIILETTERS: - raise source.error('bad escape %s' % escape, len(escape)) - return LITERAL, ord(escape[1]) - except ValueError: - pass - raise source.error("bad escape %s" % escape, len(escape)) - -def _escape(source, escape, state): - # handle escape code in expression - code = CATEGORIES.get(escape) - if code: - return code - code = ESCAPES.get(escape) - if code: - return code - try: - c = escape[1:2] - if c == "x": - # hexadecimal escape - escape += source.getwhile(2, HEXDIGITS) - if len(escape) != 4: - raise source.error("incomplete escape %s" % escape, len(escape)) - return LITERAL, int(escape[2:], 16) - elif c == "u" and source.istext: - # unicode escape (exactly four digits) - escape += source.getwhile(4, HEXDIGITS) - if len(escape) != 6: - raise source.error("incomplete escape %s" % escape, len(escape)) - return LITERAL, int(escape[2:], 16) - elif c == "U" and source.istext: - # unicode escape (exactly eight digits) - escape += source.getwhile(8, HEXDIGITS) - if len(escape) != 10: - raise source.error("incomplete escape %s" % escape, len(escape)) - c = int(escape[2:], 16) - chr(c) # raise ValueError for invalid code - return LITERAL, c - elif c == "N" and source.istext: - import unicodedata - # named unicode escape e.g. \N{EM DASH} - if not source.match('{'): - raise source.error("missing {") - charname = source.getuntil('}', 'character name') - try: - c = ord(unicodedata.lookup(charname)) - except (KeyError, TypeError): - raise source.error("undefined character name %r" % charname, - len(charname) + len(r'\N{}')) from None - return LITERAL, c - elif c == "0": - # octal escape - escape += source.getwhile(2, OCTDIGITS) - return LITERAL, int(escape[1:], 8) - elif c in DIGITS: - # octal escape *or* decimal group reference (sigh) - if source.next in DIGITS: - escape += source.get() - if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and - source.next in OCTDIGITS): - # got three octal digits; this is an octal escape - escape += source.get() - c = int(escape[1:], 8) - if c > 0o377: - raise source.error('octal escape value %s outside of ' - 'range 0-0o377' % escape, - len(escape)) - return LITERAL, c - # not an octal escape, so this is a group reference - group = int(escape[1:]) - if group < state.groups: - if not state.checkgroup(group): - raise source.error("cannot refer to an open group", - len(escape)) - state.checklookbehindgroup(group, source) - return GROUPREF, group - raise source.error("invalid group reference %d" % group, len(escape) - 1) - if len(escape) == 2: - if c in ASCIILETTERS: - raise source.error("bad escape %s" % escape, len(escape)) - return LITERAL, ord(escape[1]) - except ValueError: - pass - raise source.error("bad escape %s" % escape, len(escape)) - -def _uniq(items): - return list(dict.fromkeys(items)) - -def _parse_sub(source, state, verbose, nested): - # parse an alternation: a|b|c - - items = [] - itemsappend = items.append - sourcematch = source.match - start = source.tell() - while True: - itemsappend(_parse(source, state, verbose, nested + 1, - not nested and not items)) - if not sourcematch("|"): - break - if not nested: - verbose = state.flags & SRE_FLAG_VERBOSE - - if len(items) == 1: - return items[0] - - subpattern = SubPattern(state) - - # check if all items share a common prefix - while True: - prefix = None - for item in items: - if not item: - break - if prefix is None: - prefix = item[0] - elif item[0] != prefix: - break - else: - # all subitems start with a common "prefix". - # move it out of the branch - for item in items: - del item[0] - subpattern.append(prefix) - continue # check next one - break - - # check if the branch can be replaced by a character set - set = [] - for item in items: - if len(item) != 1: - break - op, av = item[0] - if op is LITERAL: - set.append((op, av)) - elif op is IN and av[0][0] is not NEGATE: - set.extend(av) - else: - break - else: - # we can store this as a character set instead of a - # branch (the compiler may optimize this even more) - subpattern.append((IN, _uniq(set))) - return subpattern - - subpattern.append((BRANCH, (None, items))) - return subpattern - -def _parse(source, state, verbose, nested, first=False): - # parse a simple pattern - subpattern = SubPattern(state) - - # precompute constants into local variables - subpatternappend = subpattern.append - sourceget = source.get - sourcematch = source.match - _len = len - _ord = ord - - while True: - - this = source.next - if this is None: - break # end of pattern - if this in "|)": - break # end of subpattern - sourceget() - - if verbose: - # skip whitespace and comments - if this in WHITESPACE: - continue - if this == "#": - while True: - this = sourceget() - if this is None or this == "\n": - break - continue - - if this[0] == "\\": - code = _escape(source, this, state) - subpatternappend(code) - - elif this not in SPECIAL_CHARS: - subpatternappend((LITERAL, _ord(this))) - - elif this == "[": - here = source.tell() - 1 - # character set - set = [] - setappend = set.append -## if sourcematch(":"): -## pass # handle character classes - if source.next == '[': - import warnings - warnings.warn( - 'Possible nested set at position %d' % source.tell(), - FutureWarning, stacklevel=nested + 6 - ) - negate = sourcematch("^") - # check remaining characters - while True: - this = sourceget() - if this is None: - raise source.error("unterminated character set", - source.tell() - here) - if this == "]" and set: - break - elif this[0] == "\\": - code1 = _class_escape(source, this) - else: - if set and this in '-&~|' and source.next == this: - import warnings - warnings.warn( - 'Possible set %s at position %d' % ( - 'difference' if this == '-' else - 'intersection' if this == '&' else - 'symmetric difference' if this == '~' else - 'union', - source.tell() - 1), - FutureWarning, stacklevel=nested + 6 - ) - code1 = LITERAL, _ord(this) - if sourcematch("-"): - # potential range - that = sourceget() - if that is None: - raise source.error("unterminated character set", - source.tell() - here) - if that == "]": - if code1[0] is IN: - code1 = code1[1][0] - setappend(code1) - setappend((LITERAL, _ord("-"))) - break - if that[0] == "\\": - code2 = _class_escape(source, that) - else: - if that == '-': - import warnings - warnings.warn( - 'Possible set difference at position %d' % ( - source.tell() - 2), - FutureWarning, stacklevel=nested + 6 - ) - code2 = LITERAL, _ord(that) - if code1[0] != LITERAL or code2[0] != LITERAL: - msg = "bad character range %s-%s" % (this, that) - raise source.error(msg, len(this) + 1 + len(that)) - lo = code1[1] - hi = code2[1] - if hi < lo: - msg = "bad character range %s-%s" % (this, that) - raise source.error(msg, len(this) + 1 + len(that)) - setappend((RANGE, (lo, hi))) - else: - if code1[0] is IN: - code1 = code1[1][0] - setappend(code1) - - set = _uniq(set) - # XXX: should move set optimization to compiler! - if _len(set) == 1 and set[0][0] is LITERAL: - # optimization - if negate: - subpatternappend((NOT_LITERAL, set[0][1])) - else: - subpatternappend(set[0]) - else: - if negate: - set.insert(0, (NEGATE, None)) - # charmap optimization can't be added here because - # global flags still are not known - subpatternappend((IN, set)) - - elif this in REPEAT_CHARS: - # repeat previous item - here = source.tell() - if this == "?": - min, max = 0, 1 - elif this == "*": - min, max = 0, MAXREPEAT - - elif this == "+": - min, max = 1, MAXREPEAT - elif this == "{": - if source.next == "}": - subpatternappend((LITERAL, _ord(this))) - continue - - min, max = 0, MAXREPEAT - lo = hi = "" - while source.next in DIGITS: - lo += sourceget() - if sourcematch(","): - while source.next in DIGITS: - hi += sourceget() - else: - hi = lo - if not sourcematch("}"): - subpatternappend((LITERAL, _ord(this))) - source.seek(here) - continue - - if lo: - min = int(lo) - if min >= MAXREPEAT: - raise OverflowError("the repetition number is too large") - if hi: - max = int(hi) - if max >= MAXREPEAT: - raise OverflowError("the repetition number is too large") - if max < min: - raise source.error("min repeat greater than max repeat", - source.tell() - here) - else: - raise AssertionError("unsupported quantifier %r" % (char,)) - # figure out which item to repeat - if subpattern: - item = subpattern[-1:] - else: - item = None - if not item or item[0][0] is AT: - raise source.error("nothing to repeat", - source.tell() - here + len(this)) - if item[0][0] in _REPEATCODES: - raise source.error("multiple repeat", - source.tell() - here + len(this)) - if item[0][0] is SUBPATTERN: - group, add_flags, del_flags, p = item[0][1] - if group is None and not add_flags and not del_flags: - item = p - if sourcematch("?"): - # Non-Greedy Match - subpattern[-1] = (MIN_REPEAT, (min, max, item)) - elif sourcematch("+"): - # Possessive Match (Always Greedy) - subpattern[-1] = (POSSESSIVE_REPEAT, (min, max, item)) - else: - # Greedy Match - subpattern[-1] = (MAX_REPEAT, (min, max, item)) - - elif this == ".": - subpatternappend((ANY, None)) - - elif this == "(": - start = source.tell() - 1 - capture = True - atomic = False - name = None - add_flags = 0 - del_flags = 0 - if sourcematch("?"): - # options - char = sourceget() - if char is None: - raise source.error("unexpected end of pattern") - if char == "P": - # python extensions - if sourcematch("<"): - # named group: skip forward to end of name - name = source.getuntil(">", "group name") - source.checkgroupname(name, 1) - elif sourcematch("="): - # named backreference - name = source.getuntil(")", "group name") - source.checkgroupname(name, 1) - gid = state.groupdict.get(name) - if gid is None: - msg = "unknown group name %r" % name - raise source.error(msg, len(name) + 1) - if not state.checkgroup(gid): - raise source.error("cannot refer to an open group", - len(name) + 1) - state.checklookbehindgroup(gid, source) - subpatternappend((GROUPREF, gid)) - continue - - else: - char = sourceget() - if char is None: - raise source.error("unexpected end of pattern") - raise source.error("unknown extension ?P" + char, - len(char) + 2) - elif char == ":": - # non-capturing group - capture = False - elif char == "#": - # comment - while True: - if source.next is None: - raise source.error("missing ), unterminated comment", - source.tell() - start) - if sourceget() == ")": - break - continue - - elif char in "=!<": - # lookahead assertions - dir = 1 - if char == "<": - char = sourceget() - if char is None: - raise source.error("unexpected end of pattern") - if char not in "=!": - raise source.error("unknown extension ?<" + char, - len(char) + 2) - dir = -1 # lookbehind - lookbehindgroups = state.lookbehindgroups - if lookbehindgroups is None: - state.lookbehindgroups = state.groups - p = _parse_sub(source, state, verbose, nested + 1) - if dir < 0: - if lookbehindgroups is None: - state.lookbehindgroups = None - if not sourcematch(")"): - raise source.error("missing ), unterminated subpattern", - source.tell() - start) - if char == "=": - subpatternappend((ASSERT, (dir, p))) - elif p: - subpatternappend((ASSERT_NOT, (dir, p))) - else: - subpatternappend((FAILURE, ())) - continue - - elif char == "(": - # conditional backreference group - condname = source.getuntil(")", "group name") - if not (condname.isdecimal() and condname.isascii()): - source.checkgroupname(condname, 1) - condgroup = state.groupdict.get(condname) - if condgroup is None: - msg = "unknown group name %r" % condname - raise source.error(msg, len(condname) + 1) - else: - condgroup = int(condname) - if not condgroup: - raise source.error("bad group number", - len(condname) + 1) - if condgroup >= MAXGROUPS: - msg = "invalid group reference %d" % condgroup - raise source.error(msg, len(condname) + 1) - if condgroup not in state.grouprefpos: - state.grouprefpos[condgroup] = ( - source.tell() - len(condname) - 1 - ) - if not (condname.isdecimal() and condname.isascii()): - import warnings - warnings.warn( - "bad character in group name %s at position %d" % - (repr(condname) if source.istext else ascii(condname), - source.tell() - len(condname) - 1), - DeprecationWarning, stacklevel=nested + 6 - ) - state.checklookbehindgroup(condgroup, source) - item_yes = _parse(source, state, verbose, nested + 1) - if source.match("|"): - item_no = _parse(source, state, verbose, nested + 1) - if source.next == "|": - raise source.error("conditional backref with more than two branches") - else: - item_no = None - if not source.match(")"): - raise source.error("missing ), unterminated subpattern", - source.tell() - start) - subpatternappend((GROUPREF_EXISTS, (condgroup, item_yes, item_no))) - continue - - elif char == ">": - # non-capturing, atomic group - capture = False - atomic = True - elif char in FLAGS or char == "-": - # flags - flags = _parse_flags(source, state, char) - if flags is None: # global flags - if not first or subpattern: - raise source.error('global flags not at the start ' - 'of the expression', - source.tell() - start) - verbose = state.flags & SRE_FLAG_VERBOSE - continue - - add_flags, del_flags = flags - capture = False - else: - raise source.error("unknown extension ?" + char, - len(char) + 1) - - # parse group contents - if capture: - try: - group = state.opengroup(name) - except error as err: - raise source.error(err.msg, len(name) + 1) from None - else: - group = None - sub_verbose = ((verbose or (add_flags & SRE_FLAG_VERBOSE)) and - not (del_flags & SRE_FLAG_VERBOSE)) - p = _parse_sub(source, state, sub_verbose, nested + 1) - if not source.match(")"): - raise source.error("missing ), unterminated subpattern", - source.tell() - start) - if group is not None: - state.closegroup(group, p) - if atomic: - assert group is None - subpatternappend((ATOMIC_GROUP, p)) - else: - subpatternappend((SUBPATTERN, (group, add_flags, del_flags, p))) - - elif this == "^": - subpatternappend((AT, AT_BEGINNING)) - - elif this == "$": - subpatternappend((AT, AT_END)) - - else: - raise AssertionError("unsupported special character %r" % (char,)) - - # unpack non-capturing groups - for i in range(len(subpattern))[::-1]: - op, av = subpattern[i] - if op is SUBPATTERN: - group, add_flags, del_flags, p = av - if group is None and not add_flags and not del_flags: - subpattern[i: i+1] = p - - return subpattern - -def _parse_flags(source, state, char): - sourceget = source.get - add_flags = 0 - del_flags = 0 - if char != "-": - while True: - flag = FLAGS[char] - if source.istext: - if char == 'L': - msg = "bad inline flags: cannot use 'L' flag with a str pattern" - raise source.error(msg) - else: - if char == 'u': - msg = "bad inline flags: cannot use 'u' flag with a bytes pattern" - raise source.error(msg) - add_flags |= flag - if (flag & TYPE_FLAGS) and (add_flags & TYPE_FLAGS) != flag: - msg = "bad inline flags: flags 'a', 'u' and 'L' are incompatible" - raise source.error(msg) - char = sourceget() - if char is None: - raise source.error("missing -, : or )") - if char in ")-:": - break - if char not in FLAGS: - msg = "unknown flag" if char.isalpha() else "missing -, : or )" - raise source.error(msg, len(char)) - if char == ")": - state.flags |= add_flags - return None - if add_flags & GLOBAL_FLAGS: - raise source.error("bad inline flags: cannot turn on global flag", 1) - if char == "-": - char = sourceget() - if char is None: - raise source.error("missing flag") - if char not in FLAGS: - msg = "unknown flag" if char.isalpha() else "missing flag" - raise source.error(msg, len(char)) - while True: - flag = FLAGS[char] - if flag & TYPE_FLAGS: - msg = "bad inline flags: cannot turn off flags 'a', 'u' and 'L'" - raise source.error(msg) - del_flags |= flag - char = sourceget() - if char is None: - raise source.error("missing :") - if char == ":": - break - if char not in FLAGS: - msg = "unknown flag" if char.isalpha() else "missing :" - raise source.error(msg, len(char)) - assert char == ":" - if del_flags & GLOBAL_FLAGS: - raise source.error("bad inline flags: cannot turn off global flag", 1) - if add_flags & del_flags: - raise source.error("bad inline flags: flag turned on and off", 1) - return add_flags, del_flags - -def fix_flags(src, flags): - # Check and fix flags according to the type of pattern (str or bytes) - if isinstance(src, str): - if flags & SRE_FLAG_LOCALE: - raise ValueError("cannot use LOCALE flag with a str pattern") - if not flags & SRE_FLAG_ASCII: - flags |= SRE_FLAG_UNICODE - elif flags & SRE_FLAG_UNICODE: - raise ValueError("ASCII and UNICODE flags are incompatible") - else: - if flags & SRE_FLAG_UNICODE: - raise ValueError("cannot use UNICODE flag with a bytes pattern") - if flags & SRE_FLAG_LOCALE and flags & SRE_FLAG_ASCII: - raise ValueError("ASCII and LOCALE flags are incompatible") - return flags - -def parse(str, flags=0, state=None): - # parse 're' pattern into list of (opcode, argument) tuples - - source = Tokenizer(str) - - if state is None: - state = State() - state.flags = flags - state.str = str - - p = _parse_sub(source, state, flags & SRE_FLAG_VERBOSE, 0) - p.state.flags = fix_flags(str, p.state.flags) - - if source.next is not None: - assert source.next == ")" - raise source.error("unbalanced parenthesis") - - for g in p.state.grouprefpos: - if g >= p.state.groups: - msg = "invalid group reference %d" % g - raise error(msg, str, p.state.grouprefpos[g]) - - if flags & SRE_FLAG_DEBUG: - p.dump() - - return p - -def parse_template(source, pattern): - # parse 're' replacement string into list of literals and - # group references - s = Tokenizer(source) - sget = s.get - result = [] - literal = [] - lappend = literal.append - def addliteral(): - if s.istext: - result.append(''.join(literal)) - else: - # The tokenizer implicitly decodes bytes objects as latin-1, we must - # therefore re-encode the final representation. - result.append(''.join(literal).encode('latin-1')) - del literal[:] - def addgroup(index, pos): - if index > pattern.groups: - raise s.error("invalid group reference %d" % index, pos) - addliteral() - result.append(index) - groupindex = pattern.groupindex - while True: - this = sget() - if this is None: - break # end of replacement string - if this[0] == "\\": - # group - c = this[1] - if c == "g": - if not s.match("<"): - raise s.error("missing <") - name = s.getuntil(">", "group name") - if not (name.isdecimal() and name.isascii()): - s.checkgroupname(name, 1) - try: - index = groupindex[name] - except KeyError: - raise IndexError("unknown group name %r" % name) from None - else: - index = int(name) - if index >= MAXGROUPS: - raise s.error("invalid group reference %d" % index, - len(name) + 1) - if not (name.isdecimal() and name.isascii()): - import warnings - warnings.warn( - "bad character in group name %s at position %d" % - (repr(name) if s.istext else ascii(name), - s.tell() - len(name) - 1), - DeprecationWarning, stacklevel=5 - ) - addgroup(index, len(name) + 1) - elif c == "0": - if s.next in OCTDIGITS: - this += sget() - if s.next in OCTDIGITS: - this += sget() - lappend(chr(int(this[1:], 8) & 0xff)) - elif c in DIGITS: - isoctal = False - if s.next in DIGITS: - this += sget() - if (c in OCTDIGITS and this[2] in OCTDIGITS and - s.next in OCTDIGITS): - this += sget() - isoctal = True - c = int(this[1:], 8) - if c > 0o377: - raise s.error('octal escape value %s outside of ' - 'range 0-0o377' % this, len(this)) - lappend(chr(c)) - if not isoctal: - addgroup(int(this[1:]), len(this) - 1) - else: - try: - this = chr(ESCAPES[this][1]) - except KeyError: - if c in ASCIILETTERS: - raise s.error('bad escape %s' % this, len(this)) from None - lappend(this) - else: - lappend(this) - addliteral() - return result diff --git a/Python313_13_x86_Template/Lib/reprlib.py b/Python313_13_x86_Template/Lib/reprlib.py deleted file mode 100644 index f6831850..00000000 --- a/Python313_13_x86_Template/Lib/reprlib.py +++ /dev/null @@ -1,230 +0,0 @@ -"""Redo the builtin repr() (representation) but with limits on most sizes.""" - -__all__ = ["Repr", "repr", "recursive_repr"] - -import builtins -from itertools import islice -from _thread import get_ident - -def recursive_repr(fillvalue='...'): - 'Decorator to make a repr function return fillvalue for a recursive call' - - def decorating_function(user_function): - repr_running = set() - - def wrapper(self): - key = id(self), get_ident() - if key in repr_running: - return fillvalue - repr_running.add(key) - try: - result = user_function(self) - finally: - repr_running.discard(key) - return result - - # Can't use functools.wraps() here because of bootstrap issues - wrapper.__module__ = getattr(user_function, '__module__') - wrapper.__doc__ = getattr(user_function, '__doc__') - wrapper.__name__ = getattr(user_function, '__name__') - wrapper.__qualname__ = getattr(user_function, '__qualname__') - wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) - wrapper.__type_params__ = getattr(user_function, '__type_params__', ()) - wrapper.__wrapped__ = user_function - return wrapper - - return decorating_function - -class Repr: - _lookup = { - 'tuple': 'builtins', - 'list': 'builtins', - 'array': 'array', - 'set': 'builtins', - 'frozenset': 'builtins', - 'deque': 'collections', - 'dict': 'builtins', - 'str': 'builtins', - 'int': 'builtins' - } - - def __init__( - self, *, maxlevel=6, maxtuple=6, maxlist=6, maxarray=5, maxdict=4, - maxset=6, maxfrozenset=6, maxdeque=6, maxstring=30, maxlong=40, - maxother=30, fillvalue='...', indent=None, - ): - self.maxlevel = maxlevel - self.maxtuple = maxtuple - self.maxlist = maxlist - self.maxarray = maxarray - self.maxdict = maxdict - self.maxset = maxset - self.maxfrozenset = maxfrozenset - self.maxdeque = maxdeque - self.maxstring = maxstring - self.maxlong = maxlong - self.maxother = maxother - self.fillvalue = fillvalue - self.indent = indent - - def repr(self, x): - return self.repr1(x, self.maxlevel) - - def repr1(self, x, level): - cls = type(x) - typename = cls.__name__ - - if ' ' in typename: - parts = typename.split() - typename = '_'.join(parts) - - method = getattr(self, 'repr_' + typename, None) - if method: - # not defined in this class - if typename not in self._lookup: - return method(x, level) - module = getattr(cls, '__module__', None) - # defined in this class and is the module intended - if module == self._lookup[typename]: - return method(x, level) - - return self.repr_instance(x, level) - - def _join(self, pieces, level): - if self.indent is None: - return ', '.join(pieces) - if not pieces: - return '' - indent = self.indent - if isinstance(indent, int): - if indent < 0: - raise ValueError( - f'Repr.indent cannot be negative int (was {indent!r})' - ) - indent *= ' ' - try: - sep = ',\n' + (self.maxlevel - level + 1) * indent - except TypeError as error: - raise TypeError( - f'Repr.indent must be a str, int or None, not {type(indent)}' - ) from error - return sep.join(('', *pieces, ''))[1:-len(indent) or None] - - def _repr_iterable(self, x, level, left, right, maxiter, trail=''): - n = len(x) - if level <= 0 and n: - s = self.fillvalue - else: - newlevel = level - 1 - repr1 = self.repr1 - pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)] - if n > maxiter: - pieces.append(self.fillvalue) - s = self._join(pieces, level) - if n == 1 and trail and self.indent is None: - right = trail + right - return '%s%s%s' % (left, s, right) - - def repr_tuple(self, x, level): - return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',') - - def repr_list(self, x, level): - return self._repr_iterable(x, level, '[', ']', self.maxlist) - - def repr_array(self, x, level): - if not x: - return "array('%s')" % x.typecode - header = "array('%s', [" % x.typecode - return self._repr_iterable(x, level, header, '])', self.maxarray) - - def repr_set(self, x, level): - if not x: - return 'set()' - x = _possibly_sorted(x) - return self._repr_iterable(x, level, '{', '}', self.maxset) - - def repr_frozenset(self, x, level): - if not x: - return 'frozenset()' - x = _possibly_sorted(x) - return self._repr_iterable(x, level, 'frozenset({', '})', - self.maxfrozenset) - - def repr_deque(self, x, level): - return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque) - - def repr_dict(self, x, level): - n = len(x) - if n == 0: - return '{}' - if level <= 0: - return '{' + self.fillvalue + '}' - newlevel = level - 1 - repr1 = self.repr1 - pieces = [] - for key in islice(_possibly_sorted(x), self.maxdict): - keyrepr = repr1(key, newlevel) - valrepr = repr1(x[key], newlevel) - pieces.append('%s: %s' % (keyrepr, valrepr)) - if n > self.maxdict: - pieces.append(self.fillvalue) - s = self._join(pieces, level) - return '{%s}' % (s,) - - def repr_str(self, x, level): - s = builtins.repr(x[:self.maxstring]) - if len(s) > self.maxstring: - i = max(0, (self.maxstring-3)//2) - j = max(0, self.maxstring-3-i) - s = builtins.repr(x[:i] + x[len(x)-j:]) - s = s[:i] + self.fillvalue + s[len(s)-j:] - return s - - def repr_int(self, x, level): - try: - s = builtins.repr(x) - except ValueError as exc: - assert 'sys.set_int_max_str_digits()' in str(exc) - # Those imports must be deferred due to Python's build system - # where the reprlib module is imported before the math module. - import math, sys - # Integers with more than sys.get_int_max_str_digits() digits - # are rendered differently as their repr() raises a ValueError. - # See https://github.com/python/cpython/issues/135487. - k = 1 + int(math.log10(abs(x))) - # Note: math.log10(abs(x)) may be overestimated or underestimated, - # but for simplicity, we do not compute the exact number of digits. - max_digits = sys.get_int_max_str_digits() - return (f'<{x.__class__.__name__} instance with roughly {k} ' - f'digits (limit at {max_digits}) at 0x{id(x):x}>') - if len(s) > self.maxlong: - i = max(0, (self.maxlong-3)//2) - j = max(0, self.maxlong-3-i) - s = s[:i] + self.fillvalue + s[len(s)-j:] - return s - - def repr_instance(self, x, level): - try: - s = builtins.repr(x) - # Bugs in x.__repr__() can cause arbitrary - # exceptions -- then make up something - except Exception: - return '<%s instance at %#x>' % (x.__class__.__name__, id(x)) - if len(s) > self.maxother: - i = max(0, (self.maxother-3)//2) - j = max(0, self.maxother-3-i) - s = s[:i] + self.fillvalue + s[len(s)-j:] - return s - - -def _possibly_sorted(x): - # Since not all sequences of items can be sorted and comparison - # functions may raise arbitrary exceptions, return an unsorted - # sequence in that case. - try: - return sorted(x) - except Exception: - return list(x) - -aRepr = Repr() -repr = aRepr.repr diff --git a/Python313_13_x86_Template/Lib/shlex.py b/Python313_13_x86_Template/Lib/shlex.py deleted file mode 100644 index f4821616..00000000 --- a/Python313_13_x86_Template/Lib/shlex.py +++ /dev/null @@ -1,345 +0,0 @@ -"""A lexical analyzer class for simple shell-like syntaxes.""" - -# Module and documentation by Eric S. Raymond, 21 Dec 1998 -# Input stacking and error message cleanup added by ESR, March 2000 -# push_source() and pop_source() made explicit by ESR, January 2001. -# Posix compliance, split(), string arguments, and -# iterator interface by Gustavo Niemeyer, April 2003. -# changes to tokenize more like Posix shells by Vinay Sajip, July 2016. - -import os -import re -import sys -from collections import deque - -from io import StringIO - -__all__ = ["shlex", "split", "quote", "join"] - -class shlex: - "A lexical analyzer class for simple shell-like syntaxes." - def __init__(self, instream=None, infile=None, posix=False, - punctuation_chars=False): - if isinstance(instream, str): - instream = StringIO(instream) - if instream is not None: - self.instream = instream - self.infile = infile - else: - self.instream = sys.stdin - self.infile = None - self.posix = posix - if posix: - self.eof = None - else: - self.eof = '' - self.commenters = '#' - self.wordchars = ('abcdfeghijklmnopqrstuvwxyz' - 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_') - if self.posix: - self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ' - 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ') - self.whitespace = ' \t\r\n' - self.whitespace_split = False - self.quotes = '\'"' - self.escape = '\\' - self.escapedquotes = '"' - self.state = ' ' - self.pushback = deque() - self.lineno = 1 - self.debug = 0 - self.token = '' - self.filestack = deque() - self.source = None - if not punctuation_chars: - punctuation_chars = '' - elif punctuation_chars is True: - punctuation_chars = '();<>|&' - self._punctuation_chars = punctuation_chars - if punctuation_chars: - # _pushback_chars is a push back queue used by lookahead logic - self._pushback_chars = deque() - # these chars added because allowed in file names, args, wildcards - self.wordchars += '~-./*?=' - #remove any punctuation chars from wordchars - t = self.wordchars.maketrans(dict.fromkeys(punctuation_chars)) - self.wordchars = self.wordchars.translate(t) - - @property - def punctuation_chars(self): - return self._punctuation_chars - - def push_token(self, tok): - "Push a token onto the stack popped by the get_token method" - if self.debug >= 1: - print("shlex: pushing token " + repr(tok)) - self.pushback.appendleft(tok) - - def push_source(self, newstream, newfile=None): - "Push an input source onto the lexer's input source stack." - if isinstance(newstream, str): - newstream = StringIO(newstream) - self.filestack.appendleft((self.infile, self.instream, self.lineno)) - self.infile = newfile - self.instream = newstream - self.lineno = 1 - if self.debug: - if newfile is not None: - print('shlex: pushing to file %s' % (self.infile,)) - else: - print('shlex: pushing to stream %s' % (self.instream,)) - - def pop_source(self): - "Pop the input source stack." - self.instream.close() - (self.infile, self.instream, self.lineno) = self.filestack.popleft() - if self.debug: - print('shlex: popping to %s, line %d' \ - % (self.instream, self.lineno)) - self.state = ' ' - - def get_token(self): - "Get a token from the input stream (or from stack if it's nonempty)" - if self.pushback: - tok = self.pushback.popleft() - if self.debug >= 1: - print("shlex: popping token " + repr(tok)) - return tok - # No pushback. Get a token. - raw = self.read_token() - # Handle inclusions - if self.source is not None: - while raw == self.source: - spec = self.sourcehook(self.read_token()) - if spec: - (newfile, newstream) = spec - self.push_source(newstream, newfile) - raw = self.get_token() - # Maybe we got EOF instead? - while raw == self.eof: - if not self.filestack: - return self.eof - else: - self.pop_source() - raw = self.get_token() - # Neither inclusion nor EOF - if self.debug >= 1: - if raw != self.eof: - print("shlex: token=" + repr(raw)) - else: - print("shlex: token=EOF") - return raw - - def read_token(self): - quoted = False - escapedstate = ' ' - while True: - if self.punctuation_chars and self._pushback_chars: - nextchar = self._pushback_chars.pop() - else: - nextchar = self.instream.read(1) - if nextchar == '\n': - self.lineno += 1 - if self.debug >= 3: - print("shlex: in state %r I see character: %r" % (self.state, - nextchar)) - if self.state is None: - self.token = '' # past end of file - break - elif self.state == ' ': - if not nextchar: - self.state = None # end of file - break - elif nextchar in self.whitespace: - if self.debug >= 2: - print("shlex: I see whitespace in whitespace state") - if self.token or (self.posix and quoted): - break # emit current token - else: - continue - elif nextchar in self.commenters: - self.instream.readline() - self.lineno += 1 - elif self.posix and nextchar in self.escape: - escapedstate = 'a' - self.state = nextchar - elif nextchar in self.wordchars: - self.token = nextchar - self.state = 'a' - elif nextchar in self.punctuation_chars: - self.token = nextchar - self.state = 'c' - elif nextchar in self.quotes: - if not self.posix: - self.token = nextchar - self.state = nextchar - elif self.whitespace_split: - self.token = nextchar - self.state = 'a' - else: - self.token = nextchar - if self.token or (self.posix and quoted): - break # emit current token - else: - continue - elif self.state in self.quotes: - quoted = True - if not nextchar: # end of file - if self.debug >= 2: - print("shlex: I see EOF in quotes state") - # XXX what error should be raised here? - raise ValueError("No closing quotation") - if nextchar == self.state: - if not self.posix: - self.token += nextchar - self.state = ' ' - break - else: - self.state = 'a' - elif (self.posix and nextchar in self.escape and self.state - in self.escapedquotes): - escapedstate = self.state - self.state = nextchar - else: - self.token += nextchar - elif self.state in self.escape: - if not nextchar: # end of file - if self.debug >= 2: - print("shlex: I see EOF in escape state") - # XXX what error should be raised here? - raise ValueError("No escaped character") - # In posix shells, only the quote itself or the escape - # character may be escaped within quotes. - if (escapedstate in self.quotes and - nextchar != self.state and nextchar != escapedstate): - self.token += self.state - self.token += nextchar - self.state = escapedstate - elif self.state in ('a', 'c'): - if not nextchar: - self.state = None # end of file - break - elif nextchar in self.whitespace: - if self.debug >= 2: - print("shlex: I see whitespace in word state") - self.state = ' ' - if self.token or (self.posix and quoted): - break # emit current token - else: - continue - elif nextchar in self.commenters: - self.instream.readline() - self.lineno += 1 - if self.posix: - self.state = ' ' - if self.token or (self.posix and quoted): - break # emit current token - else: - continue - elif self.state == 'c': - if nextchar in self.punctuation_chars: - self.token += nextchar - else: - if nextchar not in self.whitespace: - self._pushback_chars.append(nextchar) - self.state = ' ' - break - elif self.posix and nextchar in self.quotes: - self.state = nextchar - elif self.posix and nextchar in self.escape: - escapedstate = 'a' - self.state = nextchar - elif (nextchar in self.wordchars or nextchar in self.quotes - or (self.whitespace_split and - nextchar not in self.punctuation_chars)): - self.token += nextchar - else: - if self.punctuation_chars: - self._pushback_chars.append(nextchar) - else: - self.pushback.appendleft(nextchar) - if self.debug >= 2: - print("shlex: I see punctuation in word state") - self.state = ' ' - if self.token or (self.posix and quoted): - break # emit current token - else: - continue - result = self.token - self.token = '' - if self.posix and not quoted and result == '': - result = None - if self.debug > 1: - if result: - print("shlex: raw token=" + repr(result)) - else: - print("shlex: raw token=EOF") - return result - - def sourcehook(self, newfile): - "Hook called on a filename to be sourced." - if newfile[0] == '"': - newfile = newfile[1:-1] - # This implements cpp-like semantics for relative-path inclusion. - if isinstance(self.infile, str) and not os.path.isabs(newfile): - newfile = os.path.join(os.path.dirname(self.infile), newfile) - return (newfile, open(newfile, "r")) - - def error_leader(self, infile=None, lineno=None): - "Emit a C-compiler-like, Emacs-friendly error-message leader." - if infile is None: - infile = self.infile - if lineno is None: - lineno = self.lineno - return "\"%s\", line %d: " % (infile, lineno) - - def __iter__(self): - return self - - def __next__(self): - token = self.get_token() - if token == self.eof: - raise StopIteration - return token - -def split(s, comments=False, posix=True): - """Split the string *s* using shell-like syntax.""" - if s is None: - raise ValueError("s argument must not be None") - lex = shlex(s, posix=posix) - lex.whitespace_split = True - if not comments: - lex.commenters = '' - return list(lex) - - -def join(split_command): - """Return a shell-escaped string from *split_command*.""" - return ' '.join(quote(arg) for arg in split_command) - - -_find_unsafe = re.compile(r'[^\w@%+=:,./-]', re.ASCII).search - -def quote(s): - """Return a shell-escaped version of the string *s*.""" - if not s: - return "''" - if _find_unsafe(s) is None: - return s - - # use single quotes, and put single quotes into double quotes - # the string $'b is then quoted as '$'"'"'b' - return "'" + s.replace("'", "'\"'\"'") + "'" - - -def _print_tokens(lexer): - while tt := lexer.get_token(): - print("Token: " + repr(tt)) - -if __name__ == '__main__': - if len(sys.argv) == 1: - _print_tokens(shlex()) - else: - fn = sys.argv[1] - with open(fn) as f: - _print_tokens(shlex(f, fn)) diff --git a/Python313_13_x86_Template/Lib/shutil.py b/Python313_13_x86_Template/Lib/shutil.py deleted file mode 100644 index 7df97201..00000000 --- a/Python313_13_x86_Template/Lib/shutil.py +++ /dev/null @@ -1,1583 +0,0 @@ -"""Utility functions for copying and archiving files and directory trees. - -XXX The functions here don't copy the resource fork or other metadata on Mac. - -""" - -import os -import sys -import stat -import fnmatch -import collections -import errno - -try: - import zlib - del zlib - _ZLIB_SUPPORTED = True -except ImportError: - _ZLIB_SUPPORTED = False - -try: - import bz2 - del bz2 - _BZ2_SUPPORTED = True -except ImportError: - _BZ2_SUPPORTED = False - -try: - import lzma - del lzma - _LZMA_SUPPORTED = True -except ImportError: - _LZMA_SUPPORTED = False - -_WINDOWS = os.name == 'nt' -posix = nt = None -if os.name == 'posix': - import posix -elif _WINDOWS: - import nt - -if sys.platform == 'win32': - import _winapi -else: - _winapi = None - -COPY_BUFSIZE = 1024 * 1024 if _WINDOWS else 64 * 1024 -# This should never be removed, see rationale in: -# https://bugs.python.org/issue43743#msg393429 -_USE_CP_SENDFILE = (hasattr(os, "sendfile") - and sys.platform.startswith(("linux", "android"))) -_HAS_FCOPYFILE = posix and hasattr(posix, "_fcopyfile") # macOS - -# CMD defaults in Windows 10 -_WIN_DEFAULT_PATHEXT = ".COM;.EXE;.BAT;.CMD;.VBS;.JS;.WS;.MSC" - -__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", - "copytree", "move", "rmtree", "Error", "SpecialFileError", - "ExecError", "make_archive", "get_archive_formats", - "register_archive_format", "unregister_archive_format", - "get_unpack_formats", "register_unpack_format", - "unregister_unpack_format", "unpack_archive", - "ignore_patterns", "chown", "which", "get_terminal_size", - "SameFileError"] - # disk_usage is added later, if available on the platform - -class Error(OSError): - pass - -class SameFileError(Error): - """Raised when source and destination are the same file.""" - -class SpecialFileError(OSError): - """Raised when trying to do a kind of operation (e.g. copying) which is - not supported on a special file (e.g. a named pipe)""" - -class ExecError(OSError): - """Raised when a command could not be executed""" - -class ReadError(OSError): - """Raised when an archive cannot be read""" - -class RegistryError(Exception): - """Raised when a registry operation with the archiving - and unpacking registries fails""" - -class _GiveupOnFastCopy(Exception): - """Raised as a signal to fallback on using raw read()/write() - file copy when fast-copy functions fail to do so. - """ - -def _fastcopy_fcopyfile(fsrc, fdst, flags): - """Copy a regular file content or metadata by using high-performance - fcopyfile(3) syscall (macOS). - """ - try: - infd = fsrc.fileno() - outfd = fdst.fileno() - except Exception as err: - raise _GiveupOnFastCopy(err) # not a regular file - - try: - posix._fcopyfile(infd, outfd, flags) - except OSError as err: - err.filename = fsrc.name - err.filename2 = fdst.name - if err.errno in {errno.EINVAL, errno.ENOTSUP}: - raise _GiveupOnFastCopy(err) - else: - raise err from None - -def _fastcopy_sendfile(fsrc, fdst): - """Copy data from one regular mmap-like fd to another by using - high-performance sendfile(2) syscall. - This should work on Linux >= 2.6.33 only. - """ - # Note: copyfileobj() is left alone in order to not introduce any - # unexpected breakage. Possible risks by using zero-copy calls - # in copyfileobj() are: - # - fdst cannot be open in "a"(ppend) mode - # - fsrc and fdst may be open in "t"(ext) mode - # - fsrc may be a BufferedReader (which hides unread data in a buffer), - # GzipFile (which decompresses data), HTTPResponse (which decodes - # chunks). - # - possibly others (e.g. encrypted fs/partition?) - global _USE_CP_SENDFILE - try: - infd = fsrc.fileno() - outfd = fdst.fileno() - except Exception as err: - raise _GiveupOnFastCopy(err) # not a regular file - - # Hopefully the whole file will be copied in a single call. - # sendfile() is called in a loop 'till EOF is reached (0 return) - # so a bufsize smaller or bigger than the actual file size - # should not make any difference, also in case the file content - # changes while being copied. - try: - blocksize = max(os.fstat(infd).st_size, 2 ** 23) # min 8MiB - except OSError: - blocksize = 2 ** 27 # 128MiB - # On 32-bit architectures truncate to 1GiB to avoid OverflowError, - # see bpo-38319. - if sys.maxsize < 2 ** 32: - blocksize = min(blocksize, 2 ** 30) - - offset = 0 - while True: - try: - sent = os.sendfile(outfd, infd, offset, blocksize) - except OSError as err: - # ...in oder to have a more informative exception. - err.filename = fsrc.name - err.filename2 = fdst.name - - if err.errno == errno.ENOTSOCK: - # sendfile() on this platform (probably Linux < 2.6.33) - # does not support copies between regular files (only - # sockets). - _USE_CP_SENDFILE = False - raise _GiveupOnFastCopy(err) - - if err.errno == errno.ENOSPC: # filesystem is full - raise err from None - - # Give up on first call and if no data was copied. - if offset == 0 and os.lseek(outfd, 0, os.SEEK_CUR) == 0: - raise _GiveupOnFastCopy(err) - - raise err - else: - if sent == 0: - break # EOF - offset += sent - -def _copyfileobj_readinto(fsrc, fdst, length=COPY_BUFSIZE): - """readinto()/memoryview() based variant of copyfileobj(). - *fsrc* must support readinto() method and both files must be - open in binary mode. - """ - # Localize variable access to minimize overhead. - fsrc_readinto = fsrc.readinto - fdst_write = fdst.write - with memoryview(bytearray(length)) as mv: - while True: - n = fsrc_readinto(mv) - if not n: - break - elif n < length: - with mv[:n] as smv: - fdst_write(smv) - break - else: - fdst_write(mv) - -def copyfileobj(fsrc, fdst, length=0): - """copy data from file-like object fsrc to file-like object fdst""" - if not length: - length = COPY_BUFSIZE - # Localize variable access to minimize overhead. - fsrc_read = fsrc.read - fdst_write = fdst.write - while buf := fsrc_read(length): - fdst_write(buf) - -def _samefile(src, dst): - # Macintosh, Unix. - if isinstance(src, os.DirEntry) and hasattr(os.path, 'samestat'): - try: - return os.path.samestat(src.stat(), os.stat(dst)) - except OSError: - return False - - if hasattr(os.path, 'samefile'): - try: - return os.path.samefile(src, dst) - except OSError: - return False - - # All other platforms: check for same pathname. - return (os.path.normcase(os.path.abspath(src)) == - os.path.normcase(os.path.abspath(dst))) - -def _stat(fn): - return fn.stat() if isinstance(fn, os.DirEntry) else os.stat(fn) - -def _islink(fn): - return fn.is_symlink() if isinstance(fn, os.DirEntry) else os.path.islink(fn) - -def copyfile(src, dst, *, follow_symlinks=True): - """Copy data from src to dst in the most efficient way possible. - - If follow_symlinks is not set and src is a symbolic link, a new - symlink will be created instead of copying the file it points to. - - """ - sys.audit("shutil.copyfile", src, dst) - - if _samefile(src, dst): - raise SameFileError("{!r} and {!r} are the same file".format(src, dst)) - - file_size = 0 - for i, fn in enumerate([src, dst]): - try: - st = _stat(fn) - except OSError: - # File most likely does not exist - pass - else: - # XXX What about other special files? (sockets, devices...) - if stat.S_ISFIFO(st.st_mode): - fn = fn.path if isinstance(fn, os.DirEntry) else fn - raise SpecialFileError("`%s` is a named pipe" % fn) - if _WINDOWS and i == 0: - file_size = st.st_size - - if not follow_symlinks and _islink(src): - os.symlink(os.readlink(src), dst) - else: - with open(src, 'rb') as fsrc: - try: - with open(dst, 'wb') as fdst: - # macOS - if _HAS_FCOPYFILE: - try: - _fastcopy_fcopyfile(fsrc, fdst, posix._COPYFILE_DATA) - return dst - except _GiveupOnFastCopy: - pass - # Linux - elif _USE_CP_SENDFILE: - try: - _fastcopy_sendfile(fsrc, fdst) - return dst - except _GiveupOnFastCopy: - pass - # Windows, see: - # https://github.com/python/cpython/pull/7160#discussion_r195405230 - elif _WINDOWS and file_size > 0: - _copyfileobj_readinto(fsrc, fdst, min(file_size, COPY_BUFSIZE)) - return dst - - copyfileobj(fsrc, fdst) - - # Issue 43219, raise a less confusing exception - except IsADirectoryError as e: - if not os.path.exists(dst): - raise FileNotFoundError(f'Directory does not exist: {dst}') from e - else: - raise - - return dst - -def copymode(src, dst, *, follow_symlinks=True): - """Copy mode bits from src to dst. - - If follow_symlinks is not set, symlinks aren't followed if and only - if both `src` and `dst` are symlinks. If `lchmod` isn't available - (e.g. Linux) this method does nothing. - - """ - sys.audit("shutil.copymode", src, dst) - - if not follow_symlinks and _islink(src) and os.path.islink(dst): - if hasattr(os, 'lchmod'): - stat_func, chmod_func = os.lstat, os.lchmod - else: - return - else: - stat_func = _stat - if os.name == 'nt' and os.path.islink(dst): - def chmod_func(*args): - os.chmod(*args, follow_symlinks=True) - else: - chmod_func = os.chmod - - st = stat_func(src) - chmod_func(dst, stat.S_IMODE(st.st_mode)) - -if hasattr(os, 'listxattr'): - def _copyxattr(src, dst, *, follow_symlinks=True): - """Copy extended filesystem attributes from `src` to `dst`. - - Overwrite existing attributes. - - If `follow_symlinks` is false, symlinks won't be followed. - - """ - - try: - names = os.listxattr(src, follow_symlinks=follow_symlinks) - except OSError as e: - if e.errno not in (errno.ENOTSUP, errno.ENODATA, errno.EINVAL): - raise - return - for name in names: - try: - value = os.getxattr(src, name, follow_symlinks=follow_symlinks) - os.setxattr(dst, name, value, follow_symlinks=follow_symlinks) - except OSError as e: - if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA, - errno.EINVAL, errno.EACCES): - raise -else: - def _copyxattr(*args, **kwargs): - pass - -def copystat(src, dst, *, follow_symlinks=True): - """Copy file metadata - - Copy the permission bits, last access time, last modification time, and - flags from `src` to `dst`. On Linux, copystat() also copies the "extended - attributes" where possible. The file contents, owner, and group are - unaffected. `src` and `dst` are path-like objects or path names given as - strings. - - If the optional flag `follow_symlinks` is not set, symlinks aren't - followed if and only if both `src` and `dst` are symlinks. - """ - sys.audit("shutil.copystat", src, dst) - - def _nop(*args, ns=None, follow_symlinks=None): - pass - - # follow symlinks (aka don't not follow symlinks) - follow = follow_symlinks or not (_islink(src) and os.path.islink(dst)) - if follow: - # use the real function if it exists - def lookup(name): - return getattr(os, name, _nop) - else: - # use the real function only if it exists - # *and* it supports follow_symlinks - def lookup(name): - fn = getattr(os, name, _nop) - if fn in os.supports_follow_symlinks: - return fn - return _nop - - if isinstance(src, os.DirEntry): - st = src.stat(follow_symlinks=follow) - else: - st = lookup("stat")(src, follow_symlinks=follow) - mode = stat.S_IMODE(st.st_mode) - lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns), - follow_symlinks=follow) - # We must copy extended attributes before the file is (potentially) - # chmod()'ed read-only, otherwise setxattr() will error with -EACCES. - _copyxattr(src, dst, follow_symlinks=follow) - try: - lookup("chmod")(dst, mode, follow_symlinks=follow) - except NotImplementedError: - # if we got a NotImplementedError, it's because - # * follow_symlinks=False, - # * lchown() is unavailable, and - # * either - # * fchownat() is unavailable or - # * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW. - # (it returned ENOSUP.) - # therefore we're out of options--we simply cannot chown the - # symlink. give up, suppress the error. - # (which is what shutil always did in this circumstance.) - pass - if hasattr(st, 'st_flags'): - try: - lookup("chflags")(dst, st.st_flags, follow_symlinks=follow) - except OSError as why: - for err in 'EOPNOTSUPP', 'ENOTSUP': - if hasattr(errno, err) and why.errno == getattr(errno, err): - break - else: - raise - -def copy(src, dst, *, follow_symlinks=True): - """Copy data and mode bits ("cp src dst"). Return the file's destination. - - The destination may be a directory. - - If follow_symlinks is false, symlinks won't be followed. This - resembles GNU's "cp -P src dst". - - If source and destination are the same file, a SameFileError will be - raised. - - """ - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - copyfile(src, dst, follow_symlinks=follow_symlinks) - copymode(src, dst, follow_symlinks=follow_symlinks) - return dst - -def copy2(src, dst, *, follow_symlinks=True): - """Copy data and metadata. Return the file's destination. - - Metadata is copied with copystat(). Please see the copystat function - for more information. - - The destination may be a directory. - - If follow_symlinks is false, symlinks won't be followed. This - resembles GNU's "cp -P src dst". - """ - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - - if hasattr(_winapi, "CopyFile2"): - src_ = os.fsdecode(src) - dst_ = os.fsdecode(dst) - flags = _winapi.COPY_FILE_ALLOW_DECRYPTED_DESTINATION # for compat - if not follow_symlinks: - flags |= _winapi.COPY_FILE_COPY_SYMLINK - try: - _winapi.CopyFile2(src_, dst_, flags) - return dst - except OSError as exc: - if (exc.winerror == _winapi.ERROR_PRIVILEGE_NOT_HELD - and not follow_symlinks): - # Likely encountered a symlink we aren't allowed to create. - # Fall back on the old code - pass - elif exc.winerror == _winapi.ERROR_ACCESS_DENIED: - # Possibly encountered a hidden or readonly file we can't - # overwrite. Fall back on old code - pass - else: - raise - - copyfile(src, dst, follow_symlinks=follow_symlinks) - copystat(src, dst, follow_symlinks=follow_symlinks) - return dst - -def ignore_patterns(*patterns): - """Function that can be used as copytree() ignore parameter. - - Patterns is a sequence of glob-style patterns - that are used to exclude files""" - def _ignore_patterns(path, names): - ignored_names = [] - for pattern in patterns: - ignored_names.extend(fnmatch.filter(names, pattern)) - return set(ignored_names) - return _ignore_patterns - -def _copytree(entries, src, dst, symlinks, ignore, copy_function, - ignore_dangling_symlinks, dirs_exist_ok=False): - if ignore is not None: - ignored_names = ignore(os.fspath(src), [x.name for x in entries]) - else: - ignored_names = () - - os.makedirs(dst, exist_ok=dirs_exist_ok) - errors = [] - use_srcentry = copy_function is copy2 or copy_function is copy - - for srcentry in entries: - if srcentry.name in ignored_names: - continue - srcname = os.path.join(src, srcentry.name) - dstname = os.path.join(dst, srcentry.name) - srcobj = srcentry if use_srcentry else srcname - try: - is_symlink = srcentry.is_symlink() - if is_symlink and os.name == 'nt': - # Special check for directory junctions, which appear as - # symlinks but we want to recurse. - lstat = srcentry.stat(follow_symlinks=False) - if lstat.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT: - is_symlink = False - if is_symlink: - linkto = os.readlink(srcname) - if symlinks: - # We can't just leave it to `copy_function` because legacy - # code with a custom `copy_function` may rely on copytree - # doing the right thing. - os.symlink(linkto, dstname) - copystat(srcobj, dstname, follow_symlinks=not symlinks) - else: - # ignore dangling symlink if the flag is on - if not os.path.exists(linkto) and ignore_dangling_symlinks: - continue - # otherwise let the copy occur. copy2 will raise an error - if srcentry.is_dir(): - copytree(srcobj, dstname, symlinks, ignore, - copy_function, ignore_dangling_symlinks, - dirs_exist_ok) - else: - copy_function(srcobj, dstname) - elif srcentry.is_dir(): - copytree(srcobj, dstname, symlinks, ignore, copy_function, - ignore_dangling_symlinks, dirs_exist_ok) - else: - # Will raise a SpecialFileError for unsupported file types - copy_function(srcobj, dstname) - # catch the Error from the recursive copytree so that we can - # continue with other files - except Error as err: - errors.extend(err.args[0]) - except OSError as why: - errors.append((srcname, dstname, str(why))) - try: - copystat(src, dst) - except OSError as why: - # Copying file access times may fail on Windows - if getattr(why, 'winerror', None) is None: - errors.append((src, dst, str(why))) - if errors: - raise Error(errors) - return dst - -def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, - ignore_dangling_symlinks=False, dirs_exist_ok=False): - """Recursively copy a directory tree and return the destination directory. - - If exception(s) occur, an Error is raised with a list of reasons. - - If the optional symlinks flag is true, symbolic links in the - source tree result in symbolic links in the destination tree; if - it is false, the contents of the files pointed to by symbolic - links are copied. If the file pointed to by the symlink doesn't - exist, an exception will be added in the list of errors raised in - an Error exception at the end of the copy process. - - You can set the optional ignore_dangling_symlinks flag to true if you - want to silence this exception. Notice that this has no effect on - platforms that don't support os.symlink. - - The optional ignore argument is a callable. If given, it - is called with the `src` parameter, which is the directory - being visited by copytree(), and `names` which is the list of - `src` contents, as returned by os.listdir(): - - callable(src, names) -> ignored_names - - Since copytree() is called recursively, the callable will be - called once for each directory that is copied. It returns a - list of names relative to the `src` directory that should - not be copied. - - The optional copy_function argument is a callable that will be used - to copy each file. It will be called with the source path and the - destination path as arguments. By default, copy2() is used, but any - function that supports the same signature (like copy()) can be used. - - If dirs_exist_ok is false (the default) and `dst` already exists, a - `FileExistsError` is raised. If `dirs_exist_ok` is true, the copying - operation will continue if it encounters existing directories, and files - within the `dst` tree will be overwritten by corresponding files from the - `src` tree. - """ - sys.audit("shutil.copytree", src, dst) - with os.scandir(src) as itr: - entries = list(itr) - return _copytree(entries=entries, src=src, dst=dst, symlinks=symlinks, - ignore=ignore, copy_function=copy_function, - ignore_dangling_symlinks=ignore_dangling_symlinks, - dirs_exist_ok=dirs_exist_ok) - -if hasattr(os.stat_result, 'st_file_attributes'): - def _rmtree_islink(st): - return (stat.S_ISLNK(st.st_mode) or - (st.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT - and st.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT)) -else: - def _rmtree_islink(st): - return stat.S_ISLNK(st.st_mode) - -# version vulnerable to race conditions -def _rmtree_unsafe(path, onexc): - def onerror(err): - if not isinstance(err, FileNotFoundError): - onexc(os.scandir, err.filename, err) - results = os.walk(path, topdown=False, onerror=onerror, followlinks=os._walk_symlinks_as_files) - for dirpath, dirnames, filenames in results: - for name in dirnames: - fullname = os.path.join(dirpath, name) - try: - os.rmdir(fullname) - except FileNotFoundError: - continue - except OSError as err: - onexc(os.rmdir, fullname, err) - for name in filenames: - fullname = os.path.join(dirpath, name) - try: - os.unlink(fullname) - except FileNotFoundError: - continue - except OSError as err: - onexc(os.unlink, fullname, err) - try: - os.rmdir(path) - except FileNotFoundError: - pass - except OSError as err: - onexc(os.rmdir, path, err) - -# Version using fd-based APIs to protect against races -def _rmtree_safe_fd(stack, onexc): - # Each stack item has four elements: - # * func: The first operation to perform: os.lstat, os.close or os.rmdir. - # Walking a directory starts with an os.lstat() to detect symlinks; in - # this case, func is updated before subsequent operations and passed to - # onexc() if an error occurs. - # * dirfd: Open file descriptor, or None if we're processing the top-level - # directory given to rmtree() and the user didn't supply dir_fd. - # * path: Path of file to operate upon. This is passed to onexc() if an - # error occurs. - # * orig_entry: os.DirEntry, or None if we're processing the top-level - # directory given to rmtree(). We used the cached stat() of the entry to - # save a call to os.lstat() when walking subdirectories. - func, dirfd, path, orig_entry = stack.pop() - name = path if orig_entry is None else orig_entry.name - try: - if func is os.close: - os.close(dirfd) - return - if func is os.rmdir: - os.rmdir(name, dir_fd=dirfd) - return - - # Note: To guard against symlink races, we use the standard - # lstat()/open()/fstat() trick. - assert func is os.lstat - if orig_entry is None: - orig_st = os.lstat(name, dir_fd=dirfd) - else: - orig_st = orig_entry.stat(follow_symlinks=False) - - func = os.open # For error reporting. - topfd = os.open(name, os.O_RDONLY | os.O_NONBLOCK, dir_fd=dirfd) - - func = os.path.islink # For error reporting. - try: - if not os.path.samestat(orig_st, os.fstat(topfd)): - # Symlinks to directories are forbidden, see GH-46010. - raise OSError("Cannot call rmtree on a symbolic link") - stack.append((os.rmdir, dirfd, path, orig_entry)) - finally: - stack.append((os.close, topfd, path, orig_entry)) - - func = os.scandir # For error reporting. - with os.scandir(topfd) as scandir_it: - entries = list(scandir_it) - for entry in entries: - fullname = os.path.join(path, entry.name) - try: - if entry.is_dir(follow_symlinks=False): - # Traverse into sub-directory. - stack.append((os.lstat, topfd, fullname, entry)) - continue - except FileNotFoundError: - continue - except OSError: - pass - try: - os.unlink(entry.name, dir_fd=topfd) - except FileNotFoundError: - continue - except OSError as err: - onexc(os.unlink, fullname, err) - except FileNotFoundError as err: - if orig_entry is None or func is os.close: - err.filename = path - onexc(func, path, err) - except OSError as err: - err.filename = path - onexc(func, path, err) - -_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <= - os.supports_dir_fd and - os.scandir in os.supports_fd and - os.stat in os.supports_follow_symlinks) - -def rmtree(path, ignore_errors=False, onerror=None, *, onexc=None, dir_fd=None): - """Recursively delete a directory tree. - - If dir_fd is not None, it should be a file descriptor open to a directory; - path will then be relative to that directory. - dir_fd may not be implemented on your platform. - If it is unavailable, using it will raise a NotImplementedError. - - If ignore_errors is set, errors are ignored; otherwise, if onexc or - onerror is set, it is called to handle the error with arguments (func, - path, exc_info) where func is platform and implementation dependent; - path is the argument to that function that caused it to fail; and - the value of exc_info describes the exception. For onexc it is the - exception instance, and for onerror it is a tuple as returned by - sys.exc_info(). If ignore_errors is false and both onexc and - onerror are None, the exception is reraised. - - onerror is deprecated and only remains for backwards compatibility. - If both onerror and onexc are set, onerror is ignored and onexc is used. - """ - - sys.audit("shutil.rmtree", path, dir_fd) - if ignore_errors: - def onexc(*args): - pass - elif onerror is None and onexc is None: - def onexc(*args): - raise - elif onexc is None: - if onerror is None: - def onexc(*args): - raise - else: - # delegate to onerror - def onexc(*args): - func, path, exc = args - if exc is None: - exc_info = None, None, None - else: - exc_info = type(exc), exc, exc.__traceback__ - return onerror(func, path, exc_info) - - if _use_fd_functions: - # While the unsafe rmtree works fine on bytes, the fd based does not. - if isinstance(path, bytes): - path = os.fsdecode(path) - stack = [(os.lstat, dir_fd, path, None)] - try: - while stack: - _rmtree_safe_fd(stack, onexc) - finally: - # Close any file descriptors still on the stack. - while stack: - func, fd, path, entry = stack.pop() - if func is not os.close: - continue - try: - os.close(fd) - except OSError as err: - onexc(os.close, path, err) - else: - if dir_fd is not None: - raise NotImplementedError("dir_fd unavailable on this platform") - try: - st = os.lstat(path) - except OSError as err: - onexc(os.lstat, path, err) - return - try: - if _rmtree_islink(st): - # symlinks to directories are forbidden, see bug #1669 - raise OSError("Cannot call rmtree on a symbolic link") - except OSError as err: - onexc(os.path.islink, path, err) - # can't continue even if onexc hook returns - return - return _rmtree_unsafe(path, onexc) - -# Allow introspection of whether or not the hardening against symlink -# attacks is supported on the current platform -rmtree.avoids_symlink_attacks = _use_fd_functions - -def _basename(path): - """A basename() variant which first strips the trailing slash, if present. - Thus we always get the last component of the path, even for directories. - - path: Union[PathLike, str] - - e.g. - >>> os.path.basename('/bar/foo') - 'foo' - >>> os.path.basename('/bar/foo/') - '' - >>> _basename('/bar/foo/') - 'foo' - """ - path = os.fspath(path) - sep = os.path.sep + (os.path.altsep or '') - return os.path.basename(path.rstrip(sep)) - -def move(src, dst, copy_function=copy2): - """Recursively move a file or directory to another location. This is - similar to the Unix "mv" command. Return the file or directory's - destination. - - If dst is an existing directory or a symlink to a directory, then src is - moved inside that directory. The destination path in that directory must - not already exist. - - If dst already exists but is not a directory, it may be overwritten - depending on os.rename() semantics. - - If the destination is on our current filesystem, then rename() is used. - Otherwise, src is copied to the destination and then removed. Symlinks are - recreated under the new name if os.rename() fails because of cross - filesystem renames. - - The optional `copy_function` argument is a callable that will be used - to copy the source or it will be delegated to `copytree`. - By default, copy2() is used, but any function that supports the same - signature (like copy()) can be used. - - A lot more could be done here... A look at a mv.c shows a lot of - the issues this implementation glosses over. - - """ - sys.audit("shutil.move", src, dst) - real_dst = dst - if os.path.isdir(dst): - if _samefile(src, dst) and not os.path.islink(src): - # We might be on a case insensitive filesystem, - # perform the rename anyway. - os.rename(src, dst) - return - - # Using _basename instead of os.path.basename is important, as we must - # ignore any trailing slash to avoid the basename returning '' - real_dst = os.path.join(dst, _basename(src)) - - if os.path.exists(real_dst): - raise Error("Destination path '%s' already exists" % real_dst) - try: - os.rename(src, real_dst) - except OSError: - if os.path.islink(src): - linkto = os.readlink(src) - os.symlink(linkto, real_dst) - os.unlink(src) - elif os.path.isdir(src): - if _destinsrc(src, dst): - raise Error("Cannot move a directory '%s' into itself" - " '%s'." % (src, dst)) - if (_is_immutable(src) - or (not os.access(src, os.W_OK) and os.listdir(src) - and sys.platform == 'darwin')): - raise PermissionError("Cannot move the non-empty directory " - "'%s': Lacking write permission to '%s'." - % (src, src)) - copytree(src, real_dst, copy_function=copy_function, - symlinks=True) - rmtree(src) - else: - copy_function(src, real_dst) - os.unlink(src) - return real_dst - -def _destinsrc(src, dst): - src = os.path.abspath(src) - dst = os.path.abspath(dst) - if not src.endswith(os.path.sep): - src += os.path.sep - if not dst.endswith(os.path.sep): - dst += os.path.sep - return dst.startswith(src) - -def _is_immutable(src): - st = _stat(src) - immutable_states = [stat.UF_IMMUTABLE, stat.SF_IMMUTABLE] - return hasattr(st, 'st_flags') and st.st_flags in immutable_states - -def _get_gid(name): - """Returns a gid, given a group name.""" - if name is None: - return None - - try: - from grp import getgrnam - except ImportError: - return None - - try: - result = getgrnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def _get_uid(name): - """Returns an uid, given a user name.""" - if name is None: - return None - - try: - from pwd import getpwnam - except ImportError: - return None - - try: - result = getpwnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, - owner=None, group=None, logger=None, root_dir=None): - """Create a (possibly compressed) tar file from all the files under - 'base_dir'. - - 'compress' must be "gzip" (the default), "bzip2", "xz", or None. - - 'owner' and 'group' can be used to define an owner and a group for the - archive that is being built. If not provided, the current owner and group - will be used. - - The output tar file will be named 'base_name' + ".tar", possibly plus - the appropriate compression extension (".gz", ".bz2", or ".xz"). - - Returns the output filename. - """ - if compress is None: - tar_compression = '' - elif _ZLIB_SUPPORTED and compress == 'gzip': - tar_compression = 'gz' - elif _BZ2_SUPPORTED and compress == 'bzip2': - tar_compression = 'bz2' - elif _LZMA_SUPPORTED and compress == 'xz': - tar_compression = 'xz' - else: - raise ValueError("bad value for 'compress', or compression format not " - "supported : {0}".format(compress)) - - import tarfile # late import for breaking circular dependency - - compress_ext = '.' + tar_compression if compress else '' - archive_name = base_name + '.tar' + compress_ext - archive_dir = os.path.dirname(archive_name) - - if archive_dir and not os.path.exists(archive_dir): - if logger is not None: - logger.info("creating %s", archive_dir) - if not dry_run: - os.makedirs(archive_dir) - - # creating the tarball - if logger is not None: - logger.info('Creating tar archive') - - uid = _get_uid(owner) - gid = _get_gid(group) - - def _set_uid_gid(tarinfo): - if gid is not None: - tarinfo.gid = gid - tarinfo.gname = group - if uid is not None: - tarinfo.uid = uid - tarinfo.uname = owner - return tarinfo - - if not dry_run: - tar = tarfile.open(archive_name, 'w|%s' % tar_compression) - arcname = base_dir - if root_dir is not None: - base_dir = os.path.join(root_dir, base_dir) - try: - tar.add(base_dir, arcname, filter=_set_uid_gid) - finally: - tar.close() - - if root_dir is not None: - archive_name = os.path.abspath(archive_name) - return archive_name - -def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, - logger=None, owner=None, group=None, root_dir=None): - """Create a zip file from all the files under 'base_dir'. - - The output zip file will be named 'base_name' + ".zip". Returns the - name of the output zip file. - """ - import zipfile # late import for breaking circular dependency - - zip_filename = base_name + ".zip" - archive_dir = os.path.dirname(base_name) - - if archive_dir and not os.path.exists(archive_dir): - if logger is not None: - logger.info("creating %s", archive_dir) - if not dry_run: - os.makedirs(archive_dir) - - if logger is not None: - logger.info("creating '%s' and adding '%s' to it", - zip_filename, base_dir) - - if not dry_run: - with zipfile.ZipFile(zip_filename, "w", - compression=zipfile.ZIP_DEFLATED) as zf: - arcname = os.path.normpath(base_dir) - if root_dir is not None: - base_dir = os.path.join(root_dir, base_dir) - base_dir = os.path.normpath(base_dir) - if arcname != os.curdir: - zf.write(base_dir, arcname) - if logger is not None: - logger.info("adding '%s'", base_dir) - for dirpath, dirnames, filenames in os.walk(base_dir): - arcdirpath = dirpath - if root_dir is not None: - arcdirpath = os.path.relpath(arcdirpath, root_dir) - arcdirpath = os.path.normpath(arcdirpath) - for name in sorted(dirnames): - path = os.path.join(dirpath, name) - arcname = os.path.join(arcdirpath, name) - zf.write(path, arcname) - if logger is not None: - logger.info("adding '%s'", path) - for name in filenames: - path = os.path.join(dirpath, name) - path = os.path.normpath(path) - if os.path.isfile(path): - arcname = os.path.join(arcdirpath, name) - zf.write(path, arcname) - if logger is not None: - logger.info("adding '%s'", path) - - if root_dir is not None: - zip_filename = os.path.abspath(zip_filename) - return zip_filename - -_make_tarball.supports_root_dir = True -_make_zipfile.supports_root_dir = True - -# Maps the name of the archive format to a tuple containing: -# * the archiving function -# * extra keyword arguments -# * description -_ARCHIVE_FORMATS = { - 'tar': (_make_tarball, [('compress', None)], - "uncompressed tar file"), -} - -if _ZLIB_SUPPORTED: - _ARCHIVE_FORMATS['gztar'] = (_make_tarball, [('compress', 'gzip')], - "gzip'ed tar-file") - _ARCHIVE_FORMATS['zip'] = (_make_zipfile, [], "ZIP file") - -if _BZ2_SUPPORTED: - _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], - "bzip2'ed tar-file") - -if _LZMA_SUPPORTED: - _ARCHIVE_FORMATS['xztar'] = (_make_tarball, [('compress', 'xz')], - "xz'ed tar-file") - -def get_archive_formats(): - """Returns a list of supported formats for archiving and unarchiving. - - Each element of the returned sequence is a tuple (name, description) - """ - formats = [(name, registry[2]) for name, registry in - _ARCHIVE_FORMATS.items()] - formats.sort() - return formats - -def register_archive_format(name, function, extra_args=None, description=''): - """Registers an archive format. - - name is the name of the format. function is the callable that will be - used to create archives. If provided, extra_args is a sequence of - (name, value) tuples that will be passed as arguments to the callable. - description can be provided to describe the format, and will be returned - by the get_archive_formats() function. - """ - if extra_args is None: - extra_args = [] - if not callable(function): - raise TypeError('The %s object is not callable' % function) - if not isinstance(extra_args, (tuple, list)): - raise TypeError('extra_args needs to be a sequence') - for element in extra_args: - if not isinstance(element, (tuple, list)) or len(element) !=2: - raise TypeError('extra_args elements are : (arg_name, value)') - - _ARCHIVE_FORMATS[name] = (function, extra_args, description) - -def unregister_archive_format(name): - del _ARCHIVE_FORMATS[name] - -def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, - dry_run=0, owner=None, group=None, logger=None): - """Create an archive file (eg. zip or tar). - - 'base_name' is the name of the file to create, minus any format-specific - extension; 'format' is the archive format: one of "zip", "tar", "gztar", - "bztar", or "xztar". Or any other registered format. - - 'root_dir' is a directory that will be the root directory of the - archive; ie. we typically chdir into 'root_dir' before creating the - archive. 'base_dir' is the directory where we start archiving from; - ie. 'base_dir' will be the common prefix of all files and - directories in the archive. 'root_dir' and 'base_dir' both default - to the current directory. Returns the name of the archive file. - - 'owner' and 'group' are used when creating a tar archive. By default, - uses the current owner and group. - """ - sys.audit("shutil.make_archive", base_name, format, root_dir, base_dir) - try: - format_info = _ARCHIVE_FORMATS[format] - except KeyError: - raise ValueError("unknown archive format '%s'" % format) from None - - kwargs = {'dry_run': dry_run, 'logger': logger, - 'owner': owner, 'group': group} - - func = format_info[0] - for arg, val in format_info[1]: - kwargs[arg] = val - - if base_dir is None: - base_dir = os.curdir - - supports_root_dir = getattr(func, 'supports_root_dir', False) - save_cwd = None - if root_dir is not None: - stmd = os.stat(root_dir).st_mode - if not stat.S_ISDIR(stmd): - raise NotADirectoryError(errno.ENOTDIR, 'Not a directory', root_dir) - - if supports_root_dir: - # Support path-like base_name here for backwards-compatibility. - base_name = os.fspath(base_name) - kwargs['root_dir'] = root_dir - else: - save_cwd = os.getcwd() - if logger is not None: - logger.debug("changing into '%s'", root_dir) - base_name = os.path.abspath(base_name) - if not dry_run: - os.chdir(root_dir) - - try: - filename = func(base_name, base_dir, **kwargs) - finally: - if save_cwd is not None: - if logger is not None: - logger.debug("changing back to '%s'", save_cwd) - os.chdir(save_cwd) - - return filename - - -def get_unpack_formats(): - """Returns a list of supported formats for unpacking. - - Each element of the returned sequence is a tuple - (name, extensions, description) - """ - formats = [(name, info[0], info[3]) for name, info in - _UNPACK_FORMATS.items()] - formats.sort() - return formats - -def _check_unpack_options(extensions, function, extra_args): - """Checks what gets registered as an unpacker.""" - # first make sure no other unpacker is registered for this extension - existing_extensions = {} - for name, info in _UNPACK_FORMATS.items(): - for ext in info[0]: - existing_extensions[ext] = name - - for extension in extensions: - if extension in existing_extensions: - msg = '%s is already registered for "%s"' - raise RegistryError(msg % (extension, - existing_extensions[extension])) - - if not callable(function): - raise TypeError('The registered function must be a callable') - - -def register_unpack_format(name, extensions, function, extra_args=None, - description=''): - """Registers an unpack format. - - `name` is the name of the format. `extensions` is a list of extensions - corresponding to the format. - - `function` is the callable that will be - used to unpack archives. The callable will receive archives to unpack. - If it's unable to handle an archive, it needs to raise a ReadError - exception. - - If provided, `extra_args` is a sequence of - (name, value) tuples that will be passed as arguments to the callable. - description can be provided to describe the format, and will be returned - by the get_unpack_formats() function. - """ - if extra_args is None: - extra_args = [] - _check_unpack_options(extensions, function, extra_args) - _UNPACK_FORMATS[name] = extensions, function, extra_args, description - -def unregister_unpack_format(name): - """Removes the pack format from the registry.""" - del _UNPACK_FORMATS[name] - -def _ensure_directory(path): - """Ensure that the parent directory of `path` exists""" - dirname = os.path.dirname(path) - if not os.path.isdir(dirname): - os.makedirs(dirname) - -def _unpack_zipfile(filename, extract_dir): - """Unpack zip `filename` to `extract_dir` - """ - import zipfile # late import for breaking circular dependency - - if not zipfile.is_zipfile(filename): - raise ReadError("%s is not a zip file" % filename) - - zip = zipfile.ZipFile(filename) - try: - for info in zip.infolist(): - name = info.filename - - # don't extract absolute paths or ones with .. in them - if name.startswith('/') or '..' in name: - continue - - targetpath = os.path.join(extract_dir, *name.split('/')) - if not targetpath: - continue - - _ensure_directory(targetpath) - if not name.endswith('/'): - # file - with zip.open(name, 'r') as source, \ - open(targetpath, 'wb') as target: - copyfileobj(source, target) - finally: - zip.close() - -def _unpack_tarfile(filename, extract_dir, *, filter=None): - """Unpack tar/tar.gz/tar.bz2/tar.xz `filename` to `extract_dir` - """ - import tarfile # late import for breaking circular dependency - try: - tarobj = tarfile.open(filename) - except tarfile.TarError: - raise ReadError( - "%s is not a compressed or uncompressed tar file" % filename) - try: - tarobj.extractall(extract_dir, filter=filter) - finally: - tarobj.close() - -# Maps the name of the unpack format to a tuple containing: -# * extensions -# * the unpacking function -# * extra keyword arguments -# * description -_UNPACK_FORMATS = { - 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), - 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file"), -} - -if _ZLIB_SUPPORTED: - _UNPACK_FORMATS['gztar'] = (['.tar.gz', '.tgz'], _unpack_tarfile, [], - "gzip'ed tar-file") - -if _BZ2_SUPPORTED: - _UNPACK_FORMATS['bztar'] = (['.tar.bz2', '.tbz2'], _unpack_tarfile, [], - "bzip2'ed tar-file") - -if _LZMA_SUPPORTED: - _UNPACK_FORMATS['xztar'] = (['.tar.xz', '.txz'], _unpack_tarfile, [], - "xz'ed tar-file") - -def _find_unpack_format(filename): - for name, info in _UNPACK_FORMATS.items(): - for extension in info[0]: - if filename.endswith(extension): - return name - return None - -def unpack_archive(filename, extract_dir=None, format=None, *, filter=None): - """Unpack an archive. - - `filename` is the name of the archive. - - `extract_dir` is the name of the target directory, where the archive - is unpacked. If not provided, the current working directory is used. - - `format` is the archive format: one of "zip", "tar", "gztar", "bztar", - or "xztar". Or any other registered format. If not provided, - unpack_archive will use the filename extension and see if an unpacker - was registered for that extension. - - In case none is found, a ValueError is raised. - - If `filter` is given, it is passed to the underlying - extraction function. - """ - sys.audit("shutil.unpack_archive", filename, extract_dir, format) - - if extract_dir is None: - extract_dir = os.getcwd() - - extract_dir = os.fspath(extract_dir) - filename = os.fspath(filename) - - if filter is None: - filter_kwargs = {} - else: - filter_kwargs = {'filter': filter} - if format is not None: - try: - format_info = _UNPACK_FORMATS[format] - except KeyError: - raise ValueError("Unknown unpack format '{0}'".format(format)) from None - - func = format_info[1] - func(filename, extract_dir, **dict(format_info[2]), **filter_kwargs) - else: - # we need to look at the registered unpackers supported extensions - format = _find_unpack_format(filename) - if format is None: - raise ReadError("Unknown archive format '{0}'".format(filename)) - - func = _UNPACK_FORMATS[format][1] - kwargs = dict(_UNPACK_FORMATS[format][2]) | filter_kwargs - func(filename, extract_dir, **kwargs) - - -if hasattr(os, 'statvfs'): - - __all__.append('disk_usage') - _ntuple_diskusage = collections.namedtuple('usage', 'total used free') - _ntuple_diskusage.total.__doc__ = 'Total space in bytes' - _ntuple_diskusage.used.__doc__ = 'Used space in bytes' - _ntuple_diskusage.free.__doc__ = 'Free space in bytes' - - def disk_usage(path): - """Return disk usage statistics about the given path. - - Returned value is a named tuple with attributes 'total', 'used' and - 'free', which are the amount of total, used and free space, in bytes. - """ - st = os.statvfs(path) - free = st.f_bavail * st.f_frsize - total = st.f_blocks * st.f_frsize - used = (st.f_blocks - st.f_bfree) * st.f_frsize - return _ntuple_diskusage(total, used, free) - -elif _WINDOWS: - - __all__.append('disk_usage') - _ntuple_diskusage = collections.namedtuple('usage', 'total used free') - - def disk_usage(path): - """Return disk usage statistics about the given path. - - Returned values is a named tuple with attributes 'total', 'used' and - 'free', which are the amount of total, used and free space, in bytes. - """ - total, free = nt._getdiskusage(path) - used = total - free - return _ntuple_diskusage(total, used, free) - - -def chown(path, user=None, group=None, *, dir_fd=None, follow_symlinks=True): - """Change owner user and group of the given path. - - user and group can be the uid/gid or the user/group names, and in that case, - they are converted to their respective uid/gid. - - If dir_fd is set, it should be an open file descriptor to the directory to - be used as the root of *path* if it is relative. - - If follow_symlinks is set to False and the last element of the path is a - symbolic link, chown will modify the link itself and not the file being - referenced by the link. - """ - sys.audit('shutil.chown', path, user, group) - - if user is None and group is None: - raise ValueError("user and/or group must be set") - - _user = user - _group = group - - # -1 means don't change it - if user is None: - _user = -1 - # user can either be an int (the uid) or a string (the system username) - elif isinstance(user, str): - _user = _get_uid(user) - if _user is None: - raise LookupError("no such user: {!r}".format(user)) - - if group is None: - _group = -1 - elif not isinstance(group, int): - _group = _get_gid(group) - if _group is None: - raise LookupError("no such group: {!r}".format(group)) - - os.chown(path, _user, _group, dir_fd=dir_fd, - follow_symlinks=follow_symlinks) - -def get_terminal_size(fallback=(80, 24)): - """Get the size of the terminal window. - - For each of the two dimensions, the environment variable, COLUMNS - and LINES respectively, is checked. If the variable is defined and - the value is a positive integer, it is used. - - When COLUMNS or LINES is not defined, which is the common case, - the terminal connected to sys.__stdout__ is queried - by invoking os.get_terminal_size. - - If the terminal size cannot be successfully queried, either because - the system doesn't support querying, or because we are not - connected to a terminal, the value given in fallback parameter - is used. Fallback defaults to (80, 24) which is the default - size used by many terminal emulators. - - The value returned is a named tuple of type os.terminal_size. - """ - # columns, lines are the working values - try: - columns = int(os.environ['COLUMNS']) - except (KeyError, ValueError): - columns = 0 - - try: - lines = int(os.environ['LINES']) - except (KeyError, ValueError): - lines = 0 - - # only query if necessary - if columns <= 0 or lines <= 0: - try: - size = os.get_terminal_size(sys.__stdout__.fileno()) - except (AttributeError, ValueError, OSError): - # stdout is None, closed, detached, or not a terminal, or - # os.get_terminal_size() is unsupported - size = os.terminal_size(fallback) - if columns <= 0: - columns = size.columns or fallback[0] - if lines <= 0: - lines = size.lines or fallback[1] - - return os.terminal_size((columns, lines)) - - -# Check that a given file can be accessed with the correct mode. -# Additionally check that `file` is not a directory, as on Windows -# directories pass the os.access check. -def _access_check(fn, mode): - return (os.path.exists(fn) and os.access(fn, mode) - and not os.path.isdir(fn)) - - -def _win_path_needs_curdir(cmd, mode): - """ - On Windows, we can use NeedCurrentDirectoryForExePath to figure out - if we should add the cwd to PATH when searching for executables if - the mode is executable. - """ - return (not (mode & os.X_OK)) or _winapi.NeedCurrentDirectoryForExePath( - os.fsdecode(cmd)) - - -def which(cmd, mode=os.F_OK | os.X_OK, path=None): - """Given a command, mode, and a PATH string, return the path which - conforms to the given mode on the PATH, or None if there is no such - file. - - `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result - of os.environ.get("PATH"), or can be overridden with a custom search - path. - - """ - use_bytes = isinstance(cmd, bytes) - - # If we're given a path with a directory part, look it up directly rather - # than referring to PATH directories. This includes checking relative to - # the current directory, e.g. ./script - dirname, cmd = os.path.split(cmd) - if dirname: - path = [dirname] - else: - if path is None: - path = os.environ.get("PATH", None) - if path is None: - try: - path = os.confstr("CS_PATH") - except (AttributeError, ValueError): - # os.confstr() or CS_PATH is not available - path = os.defpath - # bpo-35755: Don't use os.defpath if the PATH environment variable - # is set to an empty string - - # PATH='' doesn't match, whereas PATH=':' looks in the current - # directory - if not path: - return None - - if use_bytes: - path = os.fsencode(path) - path = path.split(os.fsencode(os.pathsep)) - else: - path = os.fsdecode(path) - path = path.split(os.pathsep) - - if sys.platform == "win32" and _win_path_needs_curdir(cmd, mode): - curdir = os.curdir - if use_bytes: - curdir = os.fsencode(curdir) - path.insert(0, curdir) - - if sys.platform == "win32": - # PATHEXT is necessary to check on Windows. - pathext_source = os.getenv("PATHEXT") or _WIN_DEFAULT_PATHEXT - pathext = pathext_source.split(os.pathsep) - pathext = [ext.rstrip('.') for ext in pathext if ext] - - if use_bytes: - pathext = [os.fsencode(ext) for ext in pathext] - - files = [cmd + ext for ext in pathext] - - # If X_OK in mode, simulate the cmd.exe behavior: look at direct - # match if and only if the extension is in PATHEXT. - # If X_OK not in mode, simulate the first result of where.exe: - # always look at direct match before a PATHEXT match. - normcmd = cmd.upper() - if not (mode & os.X_OK) or any(normcmd.endswith(ext.upper()) for ext in pathext): - files.insert(0, cmd) - else: - # On other platforms you don't have things like PATHEXT to tell you - # what file suffixes are executable, so just pass on cmd as-is. - files = [cmd] - - seen = set() - for dir in path: - normdir = os.path.normcase(dir) - if normdir not in seen: - seen.add(normdir) - for thefile in files: - name = os.path.join(dir, thefile) - if _access_check(name, mode): - return name - return None diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/RECORD b/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/RECORD deleted file mode 100644 index 91e38227..00000000 --- a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/RECORD +++ /dev/null @@ -1,878 +0,0 @@ -../../Scripts/pip.exe,sha256=PsvvMmU404Rl6dIEd9Gi8PCIkom4dqzmnawoCBmzkB4,98088 -../../Scripts/pip3.13.exe,sha256=PsvvMmU404Rl6dIEd9Gi8PCIkom4dqzmnawoCBmzkB4,98088 -../../Scripts/pip3.exe,sha256=PsvvMmU404Rl6dIEd9Gi8PCIkom4dqzmnawoCBmzkB4,98088 -pip-26.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -pip-26.0.1.dist-info/METADATA,sha256=ZqIZuNGsG6l2gHiKlQjVQghFQhgSWfhEDHuCVPW3aN8,4675 -pip-26.0.1.dist-info/RECORD,, -pip-26.0.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip-26.0.1.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82 -pip-26.0.1.dist-info/entry_points.txt,sha256=Vhf8s0IYgX37mtd4vGL73BPcxdKnqeCFPzB5-d30x8o,84 -pip-26.0.1.dist-info/licenses/AUTHORS.txt,sha256=grSl9YDNOpOFFJTX8ZYKSdgfouXi_DzlRyYGE2-u5aI,11731 -pip-26.0.1.dist-info/licenses/LICENSE.txt,sha256=Y0MApmnUmurmWxLGxIySTFGkzfPR_whtw0VtyLyqIQQ,1093 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt,sha256=hu7uh74qQ_P_H1ZJb0UfaSQ5JvAl_tuwM2ZsMExMFhs,558 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/certifi/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt,sha256=GrNuPipLqGMWJThPh-ngkdsfrtA0xbIzJbMjmr8sxSU,1099 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt,sha256=gI4QyKarjesUn_mz-xn0R6gICUYG1xKpylf-rTVSWZ0,14531 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/distro/LICENSE,sha256=y16Ofl9KOYjhBjwULGDcLfdWBfTEZRXnduOspt-XbhQ,11325 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md,sha256=t6M2q_OwThgOwGXN0W5wXQeeHMehT5EKpukYfza5zYc,1541 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/msgpack/COPYING,sha256=SS3tuoXaWHL3jmCRvNH-pHTWYNNay03ulkuKqz8AdCc,614 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE,sha256=KeD9YukphQ6G6yjD_czwzv30-pSHkBHP-z0NS-1tTbY,1089 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/pygments/LICENSE,sha256=qdZvHVJt8C4p3Oc0NtNOVuhjL0bCdbvf_HBWnogvnxc,1331 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE,sha256=GyKwSbUmfW38I6Z79KhNjsBLn9-xpR02DkK0NCyLQVQ,1081 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/requests/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE,sha256=84j9OMrRMRLB3A9mm76A5_hFQe26-3LzAw0sp2QsPJ0,751 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/rich/LICENSE,sha256=3u18F6QxgVgZCj6iOcyHmlpQJxzruYrnAl9I--WNyhU,1056 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/truststore/LICENSE,sha256=M757fo-k_Rmxdg4ajtimaL2rhSyRtpLdQUJLy3Jan8o,1086 -pip-26.0.1.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt,sha256=w3vxhuJ8-dvpYZ5V7f486nswCRzrPaY8fay-Dm13kHs,1115 -pip/__init__.py,sha256=3EhKF2588Ab15tmBszgD3Bp0N26sJx7VhS2Akn_qY38,355 -pip/__main__.py,sha256=WzbhHXTbSE6gBY19mNN9m4s5o_365LOvTYSgqgbdBhE,854 -pip/__pip-runner__.py,sha256=JOoEZTwrtv7jRaXBkgSQKAE04yNyfFmGHxqpHiGHvL0,1450 -pip/__pycache__/__init__.cpython-313.pyc,, -pip/__pycache__/__main__.cpython-313.pyc,, -pip/__pycache__/__pip-runner__.cpython-313.pyc,, -pip/_internal/__init__.py,sha256=S7i9Dn9aSZS0MG-2Wrve3dV9TImPzvQn5jjhp9t_uf0,511 -pip/_internal/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/__pycache__/build_env.cpython-313.pyc,, -pip/_internal/__pycache__/cache.cpython-313.pyc,, -pip/_internal/__pycache__/configuration.cpython-313.pyc,, -pip/_internal/__pycache__/exceptions.cpython-313.pyc,, -pip/_internal/__pycache__/main.cpython-313.pyc,, -pip/_internal/__pycache__/pyproject.cpython-313.pyc,, -pip/_internal/__pycache__/self_outdated_check.cpython-313.pyc,, -pip/_internal/__pycache__/wheel_builder.cpython-313.pyc,, -pip/_internal/build_env.py,sha256=XpgOIlTQLgz3PvDT2n7j2NzX_rVFZLCIG7t7b2ddhcM,21911 -pip/_internal/cache.py,sha256=nMh48Yv3yu1HS1yCdscouu6B6B5zYBWdV6bhqs7gL-E,10345 -pip/_internal/cli/__init__.py,sha256=Iqg_tKA771XuMO1P4t_sDHnSKPzkUb9D0DqunAmw_ko,131 -pip/_internal/cli/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/cli/__pycache__/autocompletion.cpython-313.pyc,, -pip/_internal/cli/__pycache__/base_command.cpython-313.pyc,, -pip/_internal/cli/__pycache__/cmdoptions.cpython-313.pyc,, -pip/_internal/cli/__pycache__/command_context.cpython-313.pyc,, -pip/_internal/cli/__pycache__/index_command.cpython-313.pyc,, -pip/_internal/cli/__pycache__/main.cpython-313.pyc,, -pip/_internal/cli/__pycache__/main_parser.cpython-313.pyc,, -pip/_internal/cli/__pycache__/parser.cpython-313.pyc,, -pip/_internal/cli/__pycache__/progress_bars.cpython-313.pyc,, -pip/_internal/cli/__pycache__/req_command.cpython-313.pyc,, -pip/_internal/cli/__pycache__/spinners.cpython-313.pyc,, -pip/_internal/cli/__pycache__/status_codes.cpython-313.pyc,, -pip/_internal/cli/autocompletion.py,sha256=ZG2cM03nlcNrs-WG_SFTW46isx9s2Go5lUD_8-iv70o,7193 -pip/_internal/cli/base_command.py,sha256=6OW75PSGzkH8Fz761WZ3OSz1TsuO3-suc6iap-sQjTM,9168 -pip/_internal/cli/cmdoptions.py,sha256=hfA9B29Nnq2vYMWhFVg7EcWjdlfdPBPU4WwWT2Lkq4A,36164 -pip/_internal/cli/command_context.py,sha256=kmu3EWZbfBega1oDamnGJTA_UaejhIQNuMj2CVmMXu0,817 -pip/_internal/cli/index_command.py,sha256=s3x75lpDXWJtCkBacTQ3qAAprldHMJCniEQ5qkQ0FiI,6484 -pip/_internal/cli/main.py,sha256=ljDQBkvBtC8xTjOdb6rDJzJUNi1s-PnVR_W5C-Mq0Dk,3137 -pip/_internal/cli/main_parser.py,sha256=YjzJAjqf78ARNsLlnJT9l6fNbpyDPJA-arOIXYsK5Ik,4403 -pip/_internal/cli/parser.py,sha256=EIFExrWX_1nrl1Ib--GOor70WYqLtduHByenb1u9xH4,13827 -pip/_internal/cli/progress_bars.py,sha256=IW1PH5n2FPqUBTP7ULQ5Yu-wyNNO9XGY3g1PT4RMu44,4706 -pip/_internal/cli/req_command.py,sha256=QjDXId0hFdopwE8hNx2eustumxUNbnOCvG_ORmUC7vM,16482 -pip/_internal/cli/spinners.py,sha256=EJzZIZNyUtJljp3-WjcsyIrqxW-HUsfWzhuW84n_Tqw,7362 -pip/_internal/cli/status_codes.py,sha256=sEFHUaUJbqv8iArL3HAtcztWZmGOFX01hTesSytDEh0,116 -pip/_internal/commands/__init__.py,sha256=aNeCbQurGWihfhQq7BqaLXHqWDQ0i3I04OS7kxK6plQ,4026 -pip/_internal/commands/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/commands/__pycache__/cache.cpython-313.pyc,, -pip/_internal/commands/__pycache__/check.cpython-313.pyc,, -pip/_internal/commands/__pycache__/completion.cpython-313.pyc,, -pip/_internal/commands/__pycache__/configuration.cpython-313.pyc,, -pip/_internal/commands/__pycache__/debug.cpython-313.pyc,, -pip/_internal/commands/__pycache__/download.cpython-313.pyc,, -pip/_internal/commands/__pycache__/freeze.cpython-313.pyc,, -pip/_internal/commands/__pycache__/hash.cpython-313.pyc,, -pip/_internal/commands/__pycache__/help.cpython-313.pyc,, -pip/_internal/commands/__pycache__/index.cpython-313.pyc,, -pip/_internal/commands/__pycache__/inspect.cpython-313.pyc,, -pip/_internal/commands/__pycache__/install.cpython-313.pyc,, -pip/_internal/commands/__pycache__/list.cpython-313.pyc,, -pip/_internal/commands/__pycache__/lock.cpython-313.pyc,, -pip/_internal/commands/__pycache__/search.cpython-313.pyc,, -pip/_internal/commands/__pycache__/show.cpython-313.pyc,, -pip/_internal/commands/__pycache__/uninstall.cpython-313.pyc,, -pip/_internal/commands/__pycache__/wheel.cpython-313.pyc,, -pip/_internal/commands/cache.py,sha256=XjT7kjY8GSISMksFHsLvjS9Ogfi5extNlUUv-dUoWCM,9142 -pip/_internal/commands/check.py,sha256=hVFBQezQ3zj4EydoWbFQj_afPUppMt7r9JPAlY22U6Y,2244 -pip/_internal/commands/completion.py,sha256=LjvRIZ6QUiDXJL3IOMFeD-_J97HfjMGgEk0j2tWGu1U,4565 -pip/_internal/commands/configuration.py,sha256=6gNOGrVWnOLU15zUnAiNuOMhf76RRIZvCdVD0degPRk,10105 -pip/_internal/commands/debug.py,sha256=_8IqM8Fx1_lY2STu_qspr63tufF7zyFJCyYAXtxz0N4,6805 -pip/_internal/commands/download.py,sha256=LUNVobuvCdagjLBuPBaxHeBiHEiIe03fTO2m6ahC8qw,5178 -pip/_internal/commands/freeze.py,sha256=fxoW8AAc-bAqB_fXdNq2VnZ3JfWkFMg-bR6LcdDVO7A,3099 -pip/_internal/commands/hash.py,sha256=GO9pRN3wXC2kQaovK57TaLYBMc3IltOH92O6QEw6YE0,1679 -pip/_internal/commands/help.py,sha256=Bz3LcjNQXkz4Cu__pL4CZ86o4-HNLZj1NZWdlJhjuu0,1108 -pip/_internal/commands/index.py,sha256=kDpx2MO6ZxTt5PpeY4jqcssVbYhzxpkpreDe_6PPhks,5520 -pip/_internal/commands/inspect.py,sha256=ogm4UT7LRo8bIQcWUS1IiA25QdD4VHLa7JaPAodDttM,3177 -pip/_internal/commands/install.py,sha256=L6X1qi49ROVTGABhwwxDgBBTijlOpVn6XSDVZ7QW1Kc,30588 -pip/_internal/commands/list.py,sha256=L5nWuwawqSrBNsuxfyHLAagfz7XJP86tC9nK3L9YiI8,13497 -pip/_internal/commands/lock.py,sha256=145ihjUK_-7gP8O65XPDi_xMhlh5hne1ptkHdfnbAnQ,6027 -pip/_internal/commands/search.py,sha256=zbMsX_YASj6kXA6XIBgTDv0bGK51xG-CV3IynZJcE-c,5782 -pip/_internal/commands/show.py,sha256=oLVJIfKWmDKm0SsQGEi3pozNiqrXjTras_fbBSYKpBA,8066 -pip/_internal/commands/uninstall.py,sha256=CsOihqvb6ZA6O67L70oXeoLHeOfNzMM88H9g-9aocgw,3868 -pip/_internal/commands/wheel.py,sha256=L9vEzJ_E42scF_Hgh5X4Hk39nqJDKxGg4u7glDYbNWc,5880 -pip/_internal/configuration.py,sha256=WxwwSwY_Bm6QzDgf32BsujEyO8dgRedegCpgbUfDvM8,14568 -pip/_internal/distributions/__init__.py,sha256=Hq6kt6gXBgjNit5hTTWLAzeCNOKoB-N0pGYSqehrli8,858 -pip/_internal/distributions/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/distributions/__pycache__/base.cpython-313.pyc,, -pip/_internal/distributions/__pycache__/installed.cpython-313.pyc,, -pip/_internal/distributions/__pycache__/sdist.cpython-313.pyc,, -pip/_internal/distributions/__pycache__/wheel.cpython-313.pyc,, -pip/_internal/distributions/base.py,sha256=l-OTCAIs25lsapejA6IYpPZxSM5-BET4sdZDkql8jiY,1830 -pip/_internal/distributions/installed.py,sha256=kgIEE_1NzjZxLBSC-v5s64uOFZlVEt3aPrjTtL6x2XY,929 -pip/_internal/distributions/sdist.py,sha256=RYwQIbuxpKy6OjlBZCAefxpMDaoocUQ4dFtheGsiTOQ,6627 -pip/_internal/distributions/wheel.py,sha256=_HbG0OehF8dwj4UX-xV__tXLwgPus9OjMEf2NTRqBbE,1364 -pip/_internal/exceptions.py,sha256=JdPCrQ9iTLvE-GBebzBEeGP3hoTffWEKqbYEsa6cEZc,32165 -pip/_internal/index/__init__.py,sha256=tzwMH_fhQeubwMqHdSivasg1cRgTSbNg2CiMVnzMmyU,29 -pip/_internal/index/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/index/__pycache__/collector.cpython-313.pyc,, -pip/_internal/index/__pycache__/package_finder.cpython-313.pyc,, -pip/_internal/index/__pycache__/sources.cpython-313.pyc,, -pip/_internal/index/collector.py,sha256=R7Gcx_4GEoSEI-iazfAZVEPG3Lp6mbZT4lbAD6NjAc0,16144 -pip/_internal/index/package_finder.py,sha256=a3_L4FDNsuDf3y8Af9J7sfsHR1ahs8o13Ths-WYwFh0,41776 -pip/_internal/index/sources.py,sha256=nXJkOjhLy-O2FsrKU9RIqCOqgY2PsoKWybtZjjRgqU0,8639 -pip/_internal/locations/__init__.py,sha256=Sd67ap1LIemvXArUDFqm8U-HuZvj9i3ApEuiIwUc9UE,14157 -pip/_internal/locations/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/locations/__pycache__/_distutils.cpython-313.pyc,, -pip/_internal/locations/__pycache__/_sysconfig.cpython-313.pyc,, -pip/_internal/locations/__pycache__/base.cpython-313.pyc,, -pip/_internal/locations/_distutils.py,sha256=jpFj4V00rD9IR3vA9TqrGkwcdNVFc58LsChZavge9JY,5975 -pip/_internal/locations/_sysconfig.py,sha256=8CpTjtxaCzHSCrKpaxWnHE7aKcJrRJRmntR1ZLVysLk,7779 -pip/_internal/locations/base.py,sha256=AImjYJWxOtDkc0KKc6Y4Gz677cg91caMA4L94B9FZEg,2550 -pip/_internal/main.py,sha256=1cHqjsfFCrMFf3B5twzocxTJUdHMLoXUpy5lJoFqUi8,338 -pip/_internal/metadata/__init__.py,sha256=vp-JAxiWg_-l5F8AT0Jcey72uUnh8CDwwol9-KktHZ8,5824 -pip/_internal/metadata/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/metadata/__pycache__/_json.cpython-313.pyc,, -pip/_internal/metadata/__pycache__/base.cpython-313.pyc,, -pip/_internal/metadata/__pycache__/pkg_resources.cpython-313.pyc,, -pip/_internal/metadata/_json.py,sha256=hNvnMHOXLAyNlzirWhPL9Nx2CvCqa1iRma6Osq1YfV8,2711 -pip/_internal/metadata/base.py,sha256=BGuMenlcQT8i7j9iclrfdC3vSwgvhr8gjn955cCy16s,25420 -pip/_internal/metadata/importlib/__init__.py,sha256=jUUidoxnHcfITHHaAWG1G2i5fdBYklv_uJcjo2x7VYE,135 -pip/_internal/metadata/importlib/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/metadata/importlib/__pycache__/_compat.cpython-313.pyc,, -pip/_internal/metadata/importlib/__pycache__/_dists.cpython-313.pyc,, -pip/_internal/metadata/importlib/__pycache__/_envs.cpython-313.pyc,, -pip/_internal/metadata/importlib/_compat.py,sha256=sneVh4_6WxQZK4ljdl3ylVuP-q0ttSqbgl9mWt0HnOg,2804 -pip/_internal/metadata/importlib/_dists.py,sha256=znZD7MN4RC73-87KXAn6tKZv9lAQRI0AxxK2bubDvPw,8420 -pip/_internal/metadata/importlib/_envs.py,sha256=H3qVLXVh4LWvrPvu_ekXf3dfbtwnlhNJQP2pxXpccfU,5333 -pip/_internal/metadata/pkg_resources.py,sha256=NO76ZrfR2-LKJTyaXrmQoGhmJMArALvacrlZHViSDT8,10544 -pip/_internal/models/__init__.py,sha256=AjmCEBxX_MH9f_jVjIGNCFJKYCYeSEe18yyvNx4uRKQ,62 -pip/_internal/models/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/models/__pycache__/candidate.cpython-313.pyc,, -pip/_internal/models/__pycache__/direct_url.cpython-313.pyc,, -pip/_internal/models/__pycache__/format_control.cpython-313.pyc,, -pip/_internal/models/__pycache__/index.cpython-313.pyc,, -pip/_internal/models/__pycache__/installation_report.cpython-313.pyc,, -pip/_internal/models/__pycache__/link.cpython-313.pyc,, -pip/_internal/models/__pycache__/release_control.cpython-313.pyc,, -pip/_internal/models/__pycache__/scheme.cpython-313.pyc,, -pip/_internal/models/__pycache__/search_scope.cpython-313.pyc,, -pip/_internal/models/__pycache__/selection_prefs.cpython-313.pyc,, -pip/_internal/models/__pycache__/target_python.cpython-313.pyc,, -pip/_internal/models/__pycache__/wheel.cpython-313.pyc,, -pip/_internal/models/candidate.py,sha256=zzgFRuw_kWPjKpGw7LC0ZUMD2CQ2EberUIYs8izjdCA,753 -pip/_internal/models/direct_url.py,sha256=4NMWacu_QzPPWREC1te7v6Wfv-2HkI4tvSJF-CBgLh4,6555 -pip/_internal/models/format_control.py,sha256=PwemYG1L27BM0f1KP61rm24wShENFyxqlD1TWu34alc,2471 -pip/_internal/models/index.py,sha256=tYnL8oxGi4aSNWur0mG8DAP7rC6yuha_MwJO8xw0crI,1030 -pip/_internal/models/installation_report.py,sha256=cqfWJ93ThCxjcacqSWryOCD2XtIn1CZrgzZxAv5FQZ0,2839 -pip/_internal/models/link.py,sha256=zti5UCx1hT03etYqm6MCqFd714clmTgX8rTZT9CKZDQ,21992 -pip/_internal/models/release_control.py,sha256=XD14Hy_XLh9xWR1p7JHqPZPEv3Nnb1BZGMpClk76sLs,3403 -pip/_internal/models/scheme.py,sha256=PakmHJM3e8OOWSZFtfz1Az7f1meONJnkGuQxFlt3wBE,575 -pip/_internal/models/search_scope.py,sha256=1hxU2IVsAaLZVjp0CbzJbYaYzCxv72_Qbg3JL0qhXo0,4507 -pip/_internal/models/selection_prefs.py,sha256=IDOA3euRtyqWUyIK7lX2bzIZasYiEvunKA6H3Mngk-M,2221 -pip/_internal/models/target_python.py,sha256=I0eFS-eia3kwhrOvgsphFZtNAB2IwXZ9Sr9fp6IjBP4,4243 -pip/_internal/models/wheel.py,sha256=1SdfDvN7ALTsbyZ9EOsNy1GPirP1n6EjHyzPrZyLSh8,2920 -pip/_internal/network/__init__.py,sha256=FMy06P__y6jMjUc8z3ZcQdKF-pmZ2zM14_vBeHPGhUI,49 -pip/_internal/network/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/network/__pycache__/auth.cpython-313.pyc,, -pip/_internal/network/__pycache__/cache.cpython-313.pyc,, -pip/_internal/network/__pycache__/download.cpython-313.pyc,, -pip/_internal/network/__pycache__/lazy_wheel.cpython-313.pyc,, -pip/_internal/network/__pycache__/session.cpython-313.pyc,, -pip/_internal/network/__pycache__/utils.cpython-313.pyc,, -pip/_internal/network/__pycache__/xmlrpc.cpython-313.pyc,, -pip/_internal/network/auth.py,sha256=azFp14I9cyWAAzkxF2VM0Q_xtHnbNz3_NQXszy87KQo,20806 -pip/_internal/network/cache.py,sha256=kmRXKQrG9E26xQRj211LHeEGpDg_SlYU9Dn1fJ-AMeI,4862 -pip/_internal/network/download.py,sha256=8sVwIc9MWwpGlMPYCkO1S9U-FD8TA2utw42tj00skjM,12667 -pip/_internal/network/lazy_wheel.py,sha256=y9gVksdJCSjnLfYzs_m3DYUAtl3hc_k-xFPDBd9DgOs,7646 -pip/_internal/network/session.py,sha256=7zK7EeQCSRFipu4ZzcWl1V3AMKkiXdtGqFr7GvU2LrY,19555 -pip/_internal/network/utils.py,sha256=ACsXd1msqNCidHVXsu7LHUSr8NgaypcOKQ4KG-Z_wJM,4091 -pip/_internal/network/xmlrpc.py,sha256=_-Rnk3vOff8uF9hAGmT6SLALflY1gMBcbGwS12fb_Y4,1830 -pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/operations/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/operations/__pycache__/check.cpython-313.pyc,, -pip/_internal/operations/__pycache__/freeze.cpython-313.pyc,, -pip/_internal/operations/__pycache__/prepare.cpython-313.pyc,, -pip/_internal/operations/build/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/operations/build/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/operations/build/__pycache__/build_tracker.cpython-313.pyc,, -pip/_internal/operations/build/__pycache__/metadata.cpython-313.pyc,, -pip/_internal/operations/build/__pycache__/metadata_editable.cpython-313.pyc,, -pip/_internal/operations/build/__pycache__/wheel.cpython-313.pyc,, -pip/_internal/operations/build/__pycache__/wheel_editable.cpython-313.pyc,, -pip/_internal/operations/build/build_tracker.py,sha256=W3b5cmkMWPaE6QIwfzsTayJo7-OlxFHWDxfPuax1KcE,4771 -pip/_internal/operations/build/metadata.py,sha256=INHaeiRfOiLYCXApfDNRo9Cw2xI4VwTc0KItvfdfOjk,1421 -pip/_internal/operations/build/metadata_editable.py,sha256=oWudMsnjy4loO_Jy7g4N9nxsnaEX_iDlVRgCy7pu1rs,1509 -pip/_internal/operations/build/wheel.py,sha256=3bP-nNiJ4S8JvMaBnyessXQUBhxTqt1GBx6DQ1iPJDY,1136 -pip/_internal/operations/build/wheel_editable.py,sha256=q3kfElclM6FutVbFwE87JOTpVWt5ixDf3_UkHAIVfz4,1478 -pip/_internal/operations/check.py,sha256=yC2XWth6iehGGE_fj7XRJLjVKBsTIG3ZoWRkFi3rOwc,5894 -pip/_internal/operations/freeze.py,sha256=PDdY-y_ZtZZJLAKcaWPIGRKAGW7DXR48f0aMRU0j7BA,9854 -pip/_internal/operations/install/__init__.py,sha256=ak-UETcQPKlFZaWoYKWu5QVXbpFBvg0sXc3i0O4vSYY,50 -pip/_internal/operations/install/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/operations/install/__pycache__/wheel.cpython-313.pyc,, -pip/_internal/operations/install/wheel.py,sha256=FQIl2AnNadHV5YGGOVEmOHtUUNO8lpzj3Icoo4S2xis,27923 -pip/_internal/operations/prepare.py,sha256=ptVsmQf0Mo6jirk1Q5Djdse_wJw5Zdh1Fla2iL9HAJM,28830 -pip/_internal/pyproject.py,sha256=J-sTWqC-XfsKQgz9m1bypMWZPHItsSHzIN_NWeIRmhM,4555 -pip/_internal/req/__init__.py,sha256=WcY9z7D3rlIKX1QY8_tRnAsS_poebiGGdtQ7EJ5JQQo,3041 -pip/_internal/req/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/req/__pycache__/constructors.cpython-313.pyc,, -pip/_internal/req/__pycache__/pep723.cpython-313.pyc,, -pip/_internal/req/__pycache__/req_dependency_group.cpython-313.pyc,, -pip/_internal/req/__pycache__/req_file.cpython-313.pyc,, -pip/_internal/req/__pycache__/req_install.cpython-313.pyc,, -pip/_internal/req/__pycache__/req_set.cpython-313.pyc,, -pip/_internal/req/__pycache__/req_uninstall.cpython-313.pyc,, -pip/_internal/req/constructors.py,sha256=R-6n8irjnaa2DMMXlR4YMouXzykFBlzUFjhOZ1NcUUg,18688 -pip/_internal/req/pep723.py,sha256=olZL3tLmHWJhyLNfbD6U9UuikuzTcLDB06qd9WavTjs,1225 -pip/_internal/req/req_dependency_group.py,sha256=0yEQCUaO5Bza66Y3D5o9JRf0qII5QgCRugn1x5aRivA,2618 -pip/_internal/req/req_file.py,sha256=e32ZQ3kJaL_Sdtf32twGKqIau_AqR43MeSycl0iS2Mw,20685 -pip/_internal/req/req_install.py,sha256=vv5cbs3P5gf43e_1v72gwSQ2N_D_qpsfuXOyerMhDuI,31273 -pip/_internal/req/req_set.py,sha256=awkqIXnYA4Prmsj0Qb3zhqdbYUmXd-1o0P-KZ3mvRQs,2828 -pip/_internal/req/req_uninstall.py,sha256=dCmOHt-9RaJBq921L4tMH3PmIBDetGplnbjRKXmGt00,24099 -pip/_internal/resolution/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/resolution/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/resolution/__pycache__/base.cpython-313.pyc,, -pip/_internal/resolution/base.py,sha256=RIsqSP79olPdOgtPKW-oOQ364ICVopehA6RfGkRfe2s,577 -pip/_internal/resolution/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/resolution/legacy/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/resolution/legacy/__pycache__/resolver.cpython-313.pyc,, -pip/_internal/resolution/legacy/resolver.py,sha256=bwUqE66etz2bcPabqxed18-iyqqb-kx3Er2aT6GeUJY,24060 -pip/_internal/resolution/resolvelib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/base.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-313.pyc,, -pip/_internal/resolution/resolvelib/base.py,sha256=_AoP0ZWlaSct8CRDn2ol3CbNn4zDtnh_0zQGjXASDKI,5047 -pip/_internal/resolution/resolvelib/candidates.py,sha256=50AN7BfB-pCfEmbKNlFZSXtdC0C8ms1waJrF2arknQE,20454 -pip/_internal/resolution/resolvelib/factory.py,sha256=82mLwnPlig37mMrDwcgKHJTE9mPczVuJIxeaUb7CQ0Y,34028 -pip/_internal/resolution/resolvelib/found_candidates.py,sha256=8bZYDCZLXSdLHy_s1o5f4r15HmKvqFUhzBUQOF21Lr4,6018 -pip/_internal/resolution/resolvelib/provider.py,sha256=tbVPfFv4Vg780yZ2_XGoGFP5LVo0U2bFnZov3jpSAIk,11441 -pip/_internal/resolution/resolvelib/reporter.py,sha256=faSgjqme0k_uzv1fvM5T0ZatPQ2eEktNvKBqfvXeGjc,3909 -pip/_internal/resolution/resolvelib/requirements.py,sha256=Izl9n8nc188lA1BSPS8QxfudfDQPHgngw-ij6hXt0nQ,8239 -pip/_internal/resolution/resolvelib/resolver.py,sha256=wQ94Hkep-7kWEHAc-NbMJhmzeEzgEAtxeBxyKVzZoeo,13437 -pip/_internal/self_outdated_check.py,sha256=zDKsyLMufFHuEZY16WRu129FBbBp-ADuxyWMIN4ihPE,8284 -pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/utils/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/utils/__pycache__/_jaraco_text.cpython-313.pyc,, -pip/_internal/utils/__pycache__/_log.cpython-313.pyc,, -pip/_internal/utils/__pycache__/appdirs.cpython-313.pyc,, -pip/_internal/utils/__pycache__/compat.cpython-313.pyc,, -pip/_internal/utils/__pycache__/compatibility_tags.cpython-313.pyc,, -pip/_internal/utils/__pycache__/datetime.cpython-313.pyc,, -pip/_internal/utils/__pycache__/deprecation.cpython-313.pyc,, -pip/_internal/utils/__pycache__/direct_url_helpers.cpython-313.pyc,, -pip/_internal/utils/__pycache__/egg_link.cpython-313.pyc,, -pip/_internal/utils/__pycache__/entrypoints.cpython-313.pyc,, -pip/_internal/utils/__pycache__/filesystem.cpython-313.pyc,, -pip/_internal/utils/__pycache__/filetypes.cpython-313.pyc,, -pip/_internal/utils/__pycache__/glibc.cpython-313.pyc,, -pip/_internal/utils/__pycache__/hashes.cpython-313.pyc,, -pip/_internal/utils/__pycache__/logging.cpython-313.pyc,, -pip/_internal/utils/__pycache__/misc.cpython-313.pyc,, -pip/_internal/utils/__pycache__/packaging.cpython-313.pyc,, -pip/_internal/utils/__pycache__/pylock.cpython-313.pyc,, -pip/_internal/utils/__pycache__/retry.cpython-313.pyc,, -pip/_internal/utils/__pycache__/subprocess.cpython-313.pyc,, -pip/_internal/utils/__pycache__/temp_dir.cpython-313.pyc,, -pip/_internal/utils/__pycache__/unpacking.cpython-313.pyc,, -pip/_internal/utils/__pycache__/urls.cpython-313.pyc,, -pip/_internal/utils/__pycache__/virtualenv.cpython-313.pyc,, -pip/_internal/utils/__pycache__/wheel.cpython-313.pyc,, -pip/_internal/utils/_jaraco_text.py,sha256=M15uUPIh5NpP1tdUGBxRau6q1ZAEtI8-XyLEETscFfE,3350 -pip/_internal/utils/_log.py,sha256=-jHLOE_THaZz5BFcCnoSL9EYAtJ0nXem49s9of4jvKw,1015 -pip/_internal/utils/appdirs.py,sha256=LrzDPZMKVh0rubtCx9vu3XlZbLCSug6VSj4Qsvt66BA,1681 -pip/_internal/utils/compat.py,sha256=C9LHXJAKkwAH8Hn3nPkz9EYK3rqPBeO_IXkOG2zzsdQ,2514 -pip/_internal/utils/compatibility_tags.py,sha256=DiNSLqpuruXUamGQwOJ2WZByDGLTGaXi9O-Xf8fOi34,6630 -pip/_internal/utils/datetime.py,sha256=kuJOf1mW8G5tRFN6jWardddS-9qSaR53lK1jmx3NTZY,868 -pip/_internal/utils/deprecation.py,sha256=HVhvyO5qiRFcG88PhZlp_87qdKQNwPTUIIHWtsTR2yI,3696 -pip/_internal/utils/direct_url_helpers.py,sha256=ttKv4GMUqlRwPPog9_CUopy6SDgoxVILzeBJzgfn2tg,3200 -pip/_internal/utils/egg_link.py,sha256=YWfsrbmfcrfWgqQYy6OuIjsyb9IfL1q_2v4zsms1WjI,2459 -pip/_internal/utils/entrypoints.py,sha256=uPjAyShKObdotjQjJUzprQ6r3xQvDIZwUYfHHqZ7Dok,3324 -pip/_internal/utils/filesystem.py,sha256=mJ_PP8z1V1x4HMhydWIWDyEmWikLX0f-NXPCXEcjiLo,6892 -pip/_internal/utils/filetypes.py,sha256=sEMa38qaqjvx1Zid3OCAUja31BOBU-USuSMPBvU3yjo,689 -pip/_internal/utils/glibc.py,sha256=sEh8RJJLYSdRvTqAO4THVPPA-YSDVLD4SI9So-bxX1U,3726 -pip/_internal/utils/hashes.py,sha256=d32UI1en8nyqZzdZQvxUVdfeBoe4ADWx7HtrIM4-XQ4,4998 -pip/_internal/utils/logging.py,sha256=6lJWMC6c7_aD_i4sdgaaeb-Tm3kWpYg0hba_V1-OLnE,13414 -pip/_internal/utils/misc.py,sha256=phFIbHm2kmliHDXJ0eNPxgGP423ZpvZoMKKtJ1_Zvjs,23722 -pip/_internal/utils/packaging.py,sha256=s5tpUmFumwV0H9JSTzryrIY4JwQM8paGt7Sm7eNwt2Y,1601 -pip/_internal/utils/pylock.py,sha256=nKQknZgyswWgzi--hRQX_DLUYQ3g5wGTCwVNQNdoJ54,3817 -pip/_internal/utils/retry.py,sha256=83wReEB2rcntMZ5VLd7ascaYSjn_kLdlQCqxILxWkPM,1461 -pip/_internal/utils/subprocess.py,sha256=r4-Ba_Yc3uZXQpi0K4pZFsCT_QqdSvtF3XJ-204QWaA,8983 -pip/_internal/utils/temp_dir.py,sha256=D9c8D7WOProOO8GGDqpBeVSj10NGFmunG0o2TodjjIU,9307 -pip/_internal/utils/unpacking.py,sha256=4hNg6dqHOn_KzGCzSC76nChG97d_UjtF9AnLSof672o,12972 -pip/_internal/utils/urls.py,sha256=aF_eg9ul5d8bMCxfSSSxQcfs-OpJdbStYqZHoy2K1RE,1601 -pip/_internal/utils/virtualenv.py,sha256=mX-UPyw1MPxhwUxKhbqWWX70J6PHXAJjVVrRnG0h9mc,3455 -pip/_internal/utils/wheel.py,sha256=YdRuj6MicG-Q9Mg03FbUv1WTLam6Lc7AgijY4voVyis,4468 -pip/_internal/vcs/__init__.py,sha256=UAqvzpbi0VbZo3Ub6skEeZAw-ooIZR-zX_WpCbxyCoU,596 -pip/_internal/vcs/__pycache__/__init__.cpython-313.pyc,, -pip/_internal/vcs/__pycache__/bazaar.cpython-313.pyc,, -pip/_internal/vcs/__pycache__/git.cpython-313.pyc,, -pip/_internal/vcs/__pycache__/mercurial.cpython-313.pyc,, -pip/_internal/vcs/__pycache__/subversion.cpython-313.pyc,, -pip/_internal/vcs/__pycache__/versioncontrol.cpython-313.pyc,, -pip/_internal/vcs/bazaar.py,sha256=3W1eHjkYx2vc6boeb2NBh4I_rlGAXM-vrzfNhLm1Rxg,3734 -pip/_internal/vcs/git.py,sha256=TTeqDuzS-_BFSNuUStVWmE2nGDpKuvUhBBJk_CCQXV0,19144 -pip/_internal/vcs/mercurial.py,sha256=w1ZJWLKqNP1onEjkfjlwBVnMqPZNSIER8ayjQcnTq4w,5575 -pip/_internal/vcs/subversion.py,sha256=uUgdPvxmvEB8Qwtjr0Hc0XgFjbiNi5cbvI4vARLOJXo,11787 -pip/_internal/vcs/versioncontrol.py,sha256=Ma_HMZBVveSkeYvxacvqeujnkSIaF1XjxTsS3BwcJ8E,22599 -pip/_internal/wheel_builder.py,sha256=yvEULStZtty9Kplp89tDis3hGdyKQ-2BUbFLmJ_5ink,9010 -pip/_vendor/README.rst,sha256=pKKBwCWhu3M3qQ9dDnsmxb3KdsRr-nWmMq2srbH_Bi0,9394 -pip/_vendor/__init__.py,sha256=WzusPTGWIMeQQWSVJ0h2rafGkVTa9WKJ2HT-2-EoZrU,4907 -pip/_vendor/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/cachecontrol/LICENSE.txt,sha256=hu7uh74qQ_P_H1ZJb0UfaSQ5JvAl_tuwM2ZsMExMFhs,558 -pip/_vendor/cachecontrol/__init__.py,sha256=GxwRkm_TQBtPZpfpVK9r6S9dAy2DVnVgDVHJKTiPZ1k,820 -pip/_vendor/cachecontrol/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/adapter.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/cache.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/controller.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/serialize.cpython-313.pyc,, -pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-313.pyc,, -pip/_vendor/cachecontrol/_cmd.py,sha256=iist2EpzJvDVIhMAxXq8iFnTBsiZAd6iplxfmNboNyk,1737 -pip/_vendor/cachecontrol/adapter.py,sha256=W-HW-l01gyCsnxkOyCbqx7sxrWYoBbKrDsKkVVQN6NE,6586 -pip/_vendor/cachecontrol/cache.py,sha256=OXwv7Fn2AwnKNiahJHnjtvaKLndvVLv_-zO-ltlV9qI,1953 -pip/_vendor/cachecontrol/caches/__init__.py,sha256=dtrrroK5BnADR1GWjCZ19aZ0tFsMfvFBtLQQU1sp_ag,303 -pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-313.pyc,, -pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-313.pyc,, -pip/_vendor/cachecontrol/caches/file_cache.py,sha256=d8upFmy_zwaCmlbWEVBlLXFddt8Zw8c5SFpxeOZsdfw,4117 -pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=9rmqwtYu_ljVkW6_oLqbC7EaX_a8YT_yLuna-eS0dgo,1386 -pip/_vendor/cachecontrol/controller.py,sha256=xBauC-vUSu5GsJsxD4-W-JaKqqbBz0MN6Zv8PA2N8hI,19102 -pip/_vendor/cachecontrol/filewrapper.py,sha256=DhxC_rSk-beKdbsYhfvBUDovQHX9r3gHH_jP9-q_mKk,4354 -pip/_vendor/cachecontrol/heuristics.py,sha256=gqMXU8w0gQuEQiSdu3Yg-0vd9kW7nrWKbLca75rheGE,4881 -pip/_vendor/cachecontrol/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/cachecontrol/serialize.py,sha256=HQd2IllQ05HzPkVLMXTF2uX5mjEQjDBkxCqUJUODpZk,5163 -pip/_vendor/cachecontrol/wrapper.py,sha256=hsGc7g8QGQTT-4f8tgz3AM5qwScg6FO0BSdLSRdEvpU,1417 -pip/_vendor/certifi/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989 -pip/_vendor/certifi/__init__.py,sha256=969deMMS7Uchipr0oO4dbRBUvRi0uNYCn07VmG1aTrg,94 -pip/_vendor/certifi/__main__.py,sha256=1k3Cr95vCxxGRGDljrW3wMdpZdL3Nhf0u1n-k2qdsCY,255 -pip/_vendor/certifi/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/certifi/__pycache__/__main__.cpython-313.pyc,, -pip/_vendor/certifi/__pycache__/core.cpython-313.pyc,, -pip/_vendor/certifi/cacert.pem,sha256=Tzl1_zCrvzVEO0hgZK6Ly0Hf9wf_31dsdtKS-0WKoKk,270954 -pip/_vendor/certifi/core.py,sha256=gu_ECVI1m3Rq0ytpsNE61hgQGcKaOAt9Rs9G8KsTCOI,3442 -pip/_vendor/certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/dependency_groups/LICENSE.txt,sha256=GrNuPipLqGMWJThPh-ngkdsfrtA0xbIzJbMjmr8sxSU,1099 -pip/_vendor/dependency_groups/__init__.py,sha256=C3OFu0NGwDzQ4LOmmSOFPsRSvkbBn-mdd4j_5YqJw-s,250 -pip/_vendor/dependency_groups/__main__.py,sha256=UNTM7P5mfVtT7wDi9kOTXWgV3fu3e8bTrt1Qp1jvjKo,1709 -pip/_vendor/dependency_groups/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/dependency_groups/__pycache__/__main__.cpython-313.pyc,, -pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-313.pyc,, -pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-313.pyc,, -pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-313.pyc,, -pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-313.pyc,, -pip/_vendor/dependency_groups/_implementation.py,sha256=Gqb2DlQELRakeHlKf6QtQSW0M-bcEomxHw4JsvID1ls,8041 -pip/_vendor/dependency_groups/_lint_dependency_groups.py,sha256=yp-DDqKXtbkDTNa0ifa-FmOA8ra24lPZEXftW-R5AuI,1710 -pip/_vendor/dependency_groups/_pip_wrapper.py,sha256=nuVW_w_ntVxpE26ELEvngMY0N04sFLsijXRyZZROFG8,1865 -pip/_vendor/dependency_groups/_toml_compat.py,sha256=BHnXnFacm3DeolsA35GjI6qkDApvua-1F20kv3BfZWE,285 -pip/_vendor/dependency_groups/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/distlib/LICENSE.txt,sha256=gI4QyKarjesUn_mz-xn0R6gICUYG1xKpylf-rTVSWZ0,14531 -pip/_vendor/distlib/__init__.py,sha256=Deo3uo98aUyIfdKJNqofeSEFWwDzrV2QeGLXLsgq0Ag,625 -pip/_vendor/distlib/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/distlib/__pycache__/compat.cpython-313.pyc,, -pip/_vendor/distlib/__pycache__/resources.cpython-313.pyc,, -pip/_vendor/distlib/__pycache__/scripts.cpython-313.pyc,, -pip/_vendor/distlib/__pycache__/util.cpython-313.pyc,, -pip/_vendor/distlib/compat.py,sha256=2jRSjRI4o-vlXeTK2BCGIUhkc6e9ZGhSsacRM5oseTw,41467 -pip/_vendor/distlib/resources.py,sha256=LwbPksc0A1JMbi6XnuPdMBUn83X7BPuFNWqPGEKI698,10820 -pip/_vendor/distlib/scripts.py,sha256=Qvp76E9Jc3IgyYubnpqI9fS7eseGOe4FjpeVKqKt9Iw,18612 -pip/_vendor/distlib/t32.exe,sha256=a0GV5kCoWsMutvliiCKmIgV98eRZ33wXoS-XrqvJQVs,97792 -pip/_vendor/distlib/t64-arm.exe,sha256=68TAa32V504xVBnufojh0PcenpR3U4wAqTqf-MZqbPw,182784 -pip/_vendor/distlib/t64.exe,sha256=gaYY8hy4fbkHYTTnA4i26ct8IQZzkBG2pRdy0iyuBrc,108032 -pip/_vendor/distlib/util.py,sha256=vMPGvsS4j9hF6Y9k3Tyom1aaHLb0rFmZAEyzeAdel9w,66682 -pip/_vendor/distlib/w32.exe,sha256=R4csx3-OGM9kL4aPIzQKRo5TfmRSHZo6QWyLhDhNBks,91648 -pip/_vendor/distlib/w64-arm.exe,sha256=xdyYhKj0WDcVUOCb05blQYvzdYIKMbmJn2SZvzkcey4,168448 -pip/_vendor/distlib/w64.exe,sha256=ejGf-rojoBfXseGLpya6bFTFPWRG21X5KvU8J5iU-K0,101888 -pip/_vendor/distro/LICENSE,sha256=y16Ofl9KOYjhBjwULGDcLfdWBfTEZRXnduOspt-XbhQ,11325 -pip/_vendor/distro/__init__.py,sha256=2fHjF-SfgPvjyNZ1iHh_wjqWdR_Yo5ODHwZC0jLBPhc,981 -pip/_vendor/distro/__main__.py,sha256=bu9d3TifoKciZFcqRBuygV3GSuThnVD_m2IK4cz96Vs,64 -pip/_vendor/distro/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/distro/__pycache__/__main__.cpython-313.pyc,, -pip/_vendor/distro/__pycache__/distro.cpython-313.pyc,, -pip/_vendor/distro/distro.py,sha256=XqbefacAhDT4zr_trnbA15eY8vdK4GTghgmvUGrEM_4,49430 -pip/_vendor/distro/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/idna/LICENSE.md,sha256=t6M2q_OwThgOwGXN0W5wXQeeHMehT5EKpukYfza5zYc,1541 -pip/_vendor/idna/__init__.py,sha256=MPqNDLZbXqGaNdXxAFhiqFPKEQXju2jNQhCey6-5eJM,868 -pip/_vendor/idna/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/idna/__pycache__/codec.cpython-313.pyc,, -pip/_vendor/idna/__pycache__/compat.cpython-313.pyc,, -pip/_vendor/idna/__pycache__/core.cpython-313.pyc,, -pip/_vendor/idna/__pycache__/idnadata.cpython-313.pyc,, -pip/_vendor/idna/__pycache__/intranges.cpython-313.pyc,, -pip/_vendor/idna/__pycache__/package_data.cpython-313.pyc,, -pip/_vendor/idna/__pycache__/uts46data.cpython-313.pyc,, -pip/_vendor/idna/codec.py,sha256=M2SGWN7cs_6B32QmKTyTN6xQGZeYQgQ2wiX3_DR6loE,3438 -pip/_vendor/idna/compat.py,sha256=RzLy6QQCdl9784aFhb2EX9EKGCJjg0P3PilGdeXXcx8,316 -pip/_vendor/idna/core.py,sha256=P26_XVycuMTZ1R2mNK1ZREVzM5mvTzdabBXfyZVU1Lc,13246 -pip/_vendor/idna/idnadata.py,sha256=SG8jhaGE53iiD6B49pt2pwTv_UvClciWE-N54oR2p4U,79623 -pip/_vendor/idna/intranges.py,sha256=amUtkdhYcQG8Zr-CoMM_kVRacxkivC1WgxN1b63KKdU,1898 -pip/_vendor/idna/package_data.py,sha256=_CUavOxobnbyNG2FLyHoN8QHP3QM9W1tKuw7eq9QwBk,21 -pip/_vendor/idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/idna/uts46data.py,sha256=H9J35VkD0F9L9mKOqjeNGd2A-Va6FlPoz6Jz4K7h-ps,243725 -pip/_vendor/msgpack/COPYING,sha256=SS3tuoXaWHL3jmCRvNH-pHTWYNNay03ulkuKqz8AdCc,614 -pip/_vendor/msgpack/__init__.py,sha256=RA8gcqK17YpkxBnNwXJVa1oa2LygWDgfF1nA1NPw3mo,1109 -pip/_vendor/msgpack/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/msgpack/__pycache__/exceptions.cpython-313.pyc,, -pip/_vendor/msgpack/__pycache__/ext.cpython-313.pyc,, -pip/_vendor/msgpack/__pycache__/fallback.cpython-313.pyc,, -pip/_vendor/msgpack/exceptions.py,sha256=dCTWei8dpkrMsQDcjQk74ATl9HsIBH0ybt8zOPNqMYc,1081 -pip/_vendor/msgpack/ext.py,sha256=kteJv03n9tYzd5oo3xYopVTo4vRaAxonBQQJhXohZZo,5726 -pip/_vendor/msgpack/fallback.py,sha256=0g1Pzp0vtmBEmJ5w9F3s_-JMVURP8RS4G1cc5TRaAsI,32390 -pip/_vendor/packaging/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 -pip/_vendor/packaging/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 -pip/_vendor/packaging/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 -pip/_vendor/packaging/__init__.py,sha256=y4lVbpeBzCGk-IPDw5BGBZ_b0P3ukEEJZAbGYc6Ey8c,494 -pip/_vendor/packaging/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/_elffile.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/_manylinux.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/_musllinux.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/_parser.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/_structures.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/_tokenizer.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/markers.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/metadata.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/pylock.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/requirements.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/specifiers.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/tags.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/utils.cpython-313.pyc,, -pip/_vendor/packaging/__pycache__/version.cpython-313.pyc,, -pip/_vendor/packaging/_elffile.py,sha256=-sKkptYqzYw2-x3QByJa5mB4rfPWu1pxkZHRx1WAFCY,3211 -pip/_vendor/packaging/_manylinux.py,sha256=Hf6nB0cOrayEs96-p3oIXAgGnFquv20DO5l-o2_Xnv0,9559 -pip/_vendor/packaging/_musllinux.py,sha256=Z6swjH3MA7XS3qXnmMN7QPhqP3fnoYI0eQ18e9-HgAE,2707 -pip/_vendor/packaging/_parser.py,sha256=U_DajsEx2VoC_F46fSVV3hDKNCWoQYkPkasO3dld0ig,10518 -pip/_vendor/packaging/_structures.py,sha256=Hn49Ta8zV9Wo8GiCL8Nl2ARZY983Un3pruZGVNldPwE,1514 -pip/_vendor/packaging/_tokenizer.py,sha256=M8EwNIdXeL9NMFuFrQtiOKwjka_xFx8KjRQnfE8O_z8,5421 -pip/_vendor/packaging/licenses/__init__.py,sha256=TwXLHZCXwSgdFwRLPxW602T6mSieunSFHM6fp8pgW78,5819 -pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-313.pyc,, -pip/_vendor/packaging/licenses/_spdx.py,sha256=WW7DXiyg68up_YND_wpRYlr1SHhiV4FfJLQffghhMxQ,51122 -pip/_vendor/packaging/markers.py,sha256=ZX-cLvW1S3cZcEc0fHI4z7zSx5U2T19yMpDP_mE-CYw,12771 -pip/_vendor/packaging/metadata.py,sha256=CWVZpN_HfoYMSSDuCP7igOvGgqA9AOmpW8f3qTisfnc,39360 -pip/_vendor/packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/packaging/pylock.py,sha256=-R1uNfJ4PaLto7Mg62YsGOHgvskuiIEqPwxOywl42Jk,22537 -pip/_vendor/packaging/requirements.py,sha256=PMCAWD8aNMnVD-6uZMedhBuAVX2573eZ4yPBLXmz04I,2870 -pip/_vendor/packaging/specifiers.py,sha256=tF2nC-jwW94FYe6So9dNGenQx1Hdif7ErmWlVp1QiXE,40821 -pip/_vendor/packaging/tags.py,sha256=cXLV1pJD3UtJlDg7Wz3zrfdQhRZqr8jumSAKKAAd2xE,22856 -pip/_vendor/packaging/utils.py,sha256=N4c6oZzFJy6klTZ3AnkNz7sSkJesuFWPp68LA3B5dAo,5040 -pip/_vendor/packaging/version.py,sha256=RVRKq8_GD5Bcak6E1kGG8K7siNZYW9n_XK8M2ZLl0H8,23284 -pip/_vendor/pkg_resources/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 -pip/_vendor/pkg_resources/__init__.py,sha256=vbTJ0_ruUgGxQjlEqsruFmiNPVyh2t9q-zyTDT053xI,124451 -pip/_vendor/pkg_resources/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/platformdirs/LICENSE,sha256=KeD9YukphQ6G6yjD_czwzv30-pSHkBHP-z0NS-1tTbY,1089 -pip/_vendor/platformdirs/__init__.py,sha256=UfeSHWl8AeTtbOBOoHAxK4dODOWkZtfy-m_i7cWdJ8c,22344 -pip/_vendor/platformdirs/__main__.py,sha256=jBJ8zb7Mpx5ebcqF83xrpO94MaeCpNGHVf9cvDN2JLg,1505 -pip/_vendor/platformdirs/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/platformdirs/__pycache__/__main__.cpython-313.pyc,, -pip/_vendor/platformdirs/__pycache__/android.cpython-313.pyc,, -pip/_vendor/platformdirs/__pycache__/api.cpython-313.pyc,, -pip/_vendor/platformdirs/__pycache__/macos.cpython-313.pyc,, -pip/_vendor/platformdirs/__pycache__/unix.cpython-313.pyc,, -pip/_vendor/platformdirs/__pycache__/version.cpython-313.pyc,, -pip/_vendor/platformdirs/__pycache__/windows.cpython-313.pyc,, -pip/_vendor/platformdirs/android.py,sha256=r0DshVBf-RO1jXJGX8C4Til7F1XWt-bkdWMgmvEiaYg,9013 -pip/_vendor/platformdirs/api.py,sha256=wPHOlwOsfz2oqQZ6A2FcCu5kEAj-JondzoNOHYFQ0h8,9281 -pip/_vendor/platformdirs/macos.py,sha256=0XoOgin1NK7Qki7iskD-oS8xKxw6bXgoKEgdqpCRAFQ,6322 -pip/_vendor/platformdirs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/platformdirs/unix.py,sha256=WZmkUA--L3JNRGmz32s35YfoD3ica6xKIPdCV_HhLcs,10458 -pip/_vendor/platformdirs/version.py,sha256=BI_dKLSMwlkl57vlxZnT8oVjPiUC2W_sdx_8_h99HeQ,704 -pip/_vendor/platformdirs/windows.py,sha256=XvCfklGUMVxJbXit51jpYMN-lNeScPB82qS1CAeplL0,10362 -pip/_vendor/pygments/LICENSE,sha256=qdZvHVJt8C4p3Oc0NtNOVuhjL0bCdbvf_HBWnogvnxc,1331 -pip/_vendor/pygments/__init__.py,sha256=8uNqJCCwXqbEx5aSsBr0FykUQOBDKBihO5mPqiw1aqo,2983 -pip/_vendor/pygments/__main__.py,sha256=WrndpSe6i1ckX_SQ1KaxD9CTKGzD0EuCOFxcbwFpoLU,353 -pip/_vendor/pygments/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/__main__.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/console.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/filter.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/formatter.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/lexer.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/modeline.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/plugin.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/regexopt.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/scanner.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/sphinxext.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/style.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/token.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/unistring.cpython-313.pyc,, -pip/_vendor/pygments/__pycache__/util.cpython-313.pyc,, -pip/_vendor/pygments/console.py,sha256=AagDWqwea2yBWf10KC9ptBgMpMjxKp8yABAmh-NQOVk,1718 -pip/_vendor/pygments/filter.py,sha256=YLtpTnZiu07nY3oK9nfR6E9Y1FBHhP5PX8gvkJWcfag,1910 -pip/_vendor/pygments/filters/__init__.py,sha256=4U4jtA0X3iP83uQnB9-TI-HDSw8E8y8zMYHa0UjbbaI,40392 -pip/_vendor/pygments/filters/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/pygments/formatter.py,sha256=KZQMmyo_xkOIkQG8g66LYEkBh1bx7a0HyGCBcvhI9Ew,4390 -pip/_vendor/pygments/formatters/__init__.py,sha256=KTwBmnXlaopJhQDOemVHYHskiDghuq-08YtP6xPNJPg,5385 -pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-313.pyc,, -pip/_vendor/pygments/formatters/_mapping.py,sha256=1Cw37FuQlNacnxRKmtlPX4nyLoX9_ttko5ZwscNUZZ4,4176 -pip/_vendor/pygments/lexer.py,sha256=_kBrOJ_NT5Tl0IVM0rA9c8eysP6_yrlGzEQI0eVYB-A,35349 -pip/_vendor/pygments/lexers/__init__.py,sha256=wbIME35GH7bI1B9rNPJFqWT-ij_RApZDYPUlZycaLzA,12115 -pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-313.pyc,, -pip/_vendor/pygments/lexers/__pycache__/python.cpython-313.pyc,, -pip/_vendor/pygments/lexers/_mapping.py,sha256=l4tCXM8e9aPC2BD6sjIr0deT-J-z5tHgCwL-p1fS0PE,77602 -pip/_vendor/pygments/lexers/python.py,sha256=vxjn1cOHclIKJKxoyiBsQTY65GHbkZtZRuKQ2AVCKaw,53853 -pip/_vendor/pygments/modeline.py,sha256=K5eSkR8GS1r5OkXXTHOcV0aM_6xpk9eWNEIAW-OOJ2g,1005 -pip/_vendor/pygments/plugin.py,sha256=tPx0rJCTIZ9ioRgLNYG4pifCbAwTRUZddvLw-NfAk2w,1891 -pip/_vendor/pygments/regexopt.py,sha256=wXaP9Gjp_hKAdnICqoDkRxAOQJSc4v3X6mcxx3z-TNs,3072 -pip/_vendor/pygments/scanner.py,sha256=nNcETRR1tRuiTaHmHSTTECVYFPcLf6mDZu1e4u91A9E,3092 -pip/_vendor/pygments/sphinxext.py,sha256=5x7Zh9YlU6ISJ31dMwduiaanb5dWZnKg3MyEQsseNnQ,7981 -pip/_vendor/pygments/style.py,sha256=PlOZqlsnTVd58RGy50vkA2cXQ_lP5bF5EGMEBTno6DA,6420 -pip/_vendor/pygments/styles/__init__.py,sha256=x9ebctfyvCAFpMTlMJ5YxwcNYBzjgq6zJaKkNm78r4M,2042 -pip/_vendor/pygments/styles/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-313.pyc,, -pip/_vendor/pygments/styles/_mapping.py,sha256=6lovFUE29tz6EsV3XYY4hgozJ7q1JL7cfO3UOlgnS8w,3312 -pip/_vendor/pygments/token.py,sha256=WbdWGhYm_Vosb0DDxW9lHNPgITXfWTsQmHt6cy9RbcM,6226 -pip/_vendor/pygments/unistring.py,sha256=al-_rBemRuGvinsrM6atNsHTmJ6DUbw24q2O2Ru1cBc,63208 -pip/_vendor/pygments/util.py,sha256=oRtSpiAo5jM9ulntkvVbgXUdiAW57jnuYGB7t9fYuhc,10031 -pip/_vendor/pyproject_hooks/LICENSE,sha256=GyKwSbUmfW38I6Z79KhNjsBLn9-xpR02DkK0NCyLQVQ,1081 -pip/_vendor/pyproject_hooks/__init__.py,sha256=cPB_a9LXz5xvsRbX1o2qyAdjLatZJdQ_Lc5McNX-X7Y,691 -pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-313.pyc,, -pip/_vendor/pyproject_hooks/_impl.py,sha256=jY-raxnmyRyB57ruAitrJRUzEexuAhGTpgMygqx67Z4,14936 -pip/_vendor/pyproject_hooks/_in_process/__init__.py,sha256=MJNPpfIxcO-FghxpBbxkG1rFiQf6HOUbV4U5mq0HFns,557 -pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-313.pyc,, -pip/_vendor/pyproject_hooks/_in_process/_in_process.py,sha256=qcXMhmx__MIJq10gGHW3mA4Tl8dy8YzHMccwnNoKlw0,12216 -pip/_vendor/pyproject_hooks/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/requests/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142 -pip/_vendor/requests/__init__.py,sha256=HlB_HzhrzGtfD_aaYUwUh1zWXLZ75_YCLyit75d0Vz8,5057 -pip/_vendor/requests/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/__version__.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/_internal_utils.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/adapters.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/api.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/auth.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/certs.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/compat.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/cookies.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/exceptions.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/help.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/hooks.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/models.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/packages.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/sessions.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/status_codes.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/structures.cpython-313.pyc,, -pip/_vendor/requests/__pycache__/utils.cpython-313.pyc,, -pip/_vendor/requests/__version__.py,sha256=QKDceK8K_ujqwDDc3oYrR0odOBYgKVOQQ5vFap_G_cg,435 -pip/_vendor/requests/_internal_utils.py,sha256=nMQymr4hs32TqVo5AbCrmcJEhvPUh7xXlluyqwslLiQ,1495 -pip/_vendor/requests/adapters.py,sha256=2MLFOK9GpYNhiTd6zLDUrAgSkIB-76i6pmSuUJjHC2w,26429 -pip/_vendor/requests/api.py,sha256=_Zb9Oa7tzVIizTKwFrPjDEY9ejtm_OnSRERnADxGsQs,6449 -pip/_vendor/requests/auth.py,sha256=kF75tqnLctZ9Mf_hm9TZIj4cQWnN5uxRz8oWsx5wmR0,10186 -pip/_vendor/requests/certs.py,sha256=kHDlkK_beuHXeMPc5jta2wgl8gdKeUWt5f2nTDVrvt8,441 -pip/_vendor/requests/compat.py,sha256=QfbmdTFiZzjSHMXiMrd4joCRU6RabtQ9zIcPoVaHIus,1822 -pip/_vendor/requests/cookies.py,sha256=bNi-iqEj4NPZ00-ob-rHvzkvObzN3lEpgw3g6paS3Xw,18590 -pip/_vendor/requests/exceptions.py,sha256=D1wqzYWne1mS2rU43tP9CeN1G7QAy7eqL9o1god6Ejw,4272 -pip/_vendor/requests/help.py,sha256=hRKaf9u0G7fdwrqMHtF3oG16RKktRf6KiwtSq2Fo1_0,3813 -pip/_vendor/requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733 -pip/_vendor/requests/models.py,sha256=taljlg6vJ4b-xMu2TaMNFFkaiwMex_VsEQ6qUTN3wzY,35575 -pip/_vendor/requests/packages.py,sha256=_ZQDCJTJ8SP3kVWunSqBsRZNPzj2c1WFVqbdr08pz3U,1057 -pip/_vendor/requests/sessions.py,sha256=Cl1dpEnOfwrzzPbku-emepNeN4Rt_0_58Iy2x-JGTm8,30503 -pip/_vendor/requests/status_codes.py,sha256=iJUAeA25baTdw-6PfD0eF4qhpINDJRJI-yaMqxs4LEI,4322 -pip/_vendor/requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912 -pip/_vendor/requests/utils.py,sha256=WS3wHSQaaEfceu1syiFo5jf4e_CWKUTep_IabOVI_J0,33225 -pip/_vendor/resolvelib/LICENSE,sha256=84j9OMrRMRLB3A9mm76A5_hFQe26-3LzAw0sp2QsPJ0,751 -pip/_vendor/resolvelib/__init__.py,sha256=yoX-d4STvwGGCiQRE5cJC9Cter69SgVgqClxOCvSP7M,541 -pip/_vendor/resolvelib/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/resolvelib/__pycache__/providers.cpython-313.pyc,, -pip/_vendor/resolvelib/__pycache__/reporters.cpython-313.pyc,, -pip/_vendor/resolvelib/__pycache__/structs.cpython-313.pyc,, -pip/_vendor/resolvelib/providers.py,sha256=pIWJbIdJJ9GFtNbtwTH0Ia43Vj6hYCEJj2DOLue15FM,8914 -pip/_vendor/resolvelib/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/resolvelib/reporters.py,sha256=pNJf4nFxLpAeKxlBUi2GEj0a2Ij1nikY0UabTKXesT4,2037 -pip/_vendor/resolvelib/resolvers/__init__.py,sha256=728M3EvmnPbVXS7ExXlv2kMu6b7wEsoPutEfl-uVk_I,640 -pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-313.pyc,, -pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-313.pyc,, -pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-313.pyc,, -pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-313.pyc,, -pip/_vendor/resolvelib/resolvers/abstract.py,sha256=CNeQPnpAudY77nmzOkONSmAgRlzIf06X-X9mvRYODms,1543 -pip/_vendor/resolvelib/resolvers/criterion.py,sha256=lcmZGv5sKHOnFD_RzZwvlGSj19MeA-5rCMpdf2Sgw7Y,1768 -pip/_vendor/resolvelib/resolvers/exceptions.py,sha256=ln_jaQtgLlRUSFY627yiHG2gD7AgaXzRKaElFVh7fDQ,1768 -pip/_vendor/resolvelib/resolvers/resolution.py,sha256=3J_zkW-sD3EY-BlNXjyln__njpyH5n0UZJT6uV7CheA,24212 -pip/_vendor/resolvelib/structs.py,sha256=pu-EJiR2IBITr2SQeNPRa0rXhjlStfmO_GEgAhr3004,6420 -pip/_vendor/rich/LICENSE,sha256=3u18F6QxgVgZCj6iOcyHmlpQJxzruYrnAl9I--WNyhU,1056 -pip/_vendor/rich/__init__.py,sha256=dRxjIL-SbFVY0q3IjSMrfgBTHrm1LZDgLOygVBwiYZc,6090 -pip/_vendor/rich/__main__.py,sha256=e_aVC-tDzarWQW9SuZMuCgBr6ODV_iDNV2Wh2xkxOlw,7896 -pip/_vendor/rich/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/__main__.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_cell_widths.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_emoji_codes.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_emoji_replace.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_export_format.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_extension.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_fileno.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_inspect.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_log_render.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_loop.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_null_file.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_palettes.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_pick.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_ratio.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_spinners.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_stack.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_timer.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_win32_console.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_windows.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_windows_renderer.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/_wrap.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/abc.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/align.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/ansi.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/bar.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/box.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/cells.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/color.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/color_triplet.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/columns.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/console.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/constrain.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/containers.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/control.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/default_styles.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/diagnose.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/emoji.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/errors.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/file_proxy.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/filesize.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/highlighter.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/json.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/jupyter.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/layout.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/live.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/live_render.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/logging.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/markup.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/measure.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/padding.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/pager.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/palette.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/panel.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/pretty.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/progress.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/progress_bar.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/prompt.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/protocol.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/region.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/repr.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/rule.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/scope.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/screen.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/segment.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/spinner.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/status.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/style.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/styled.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/syntax.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/table.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/terminal_theme.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/text.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/theme.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/themes.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/traceback.cpython-313.pyc,, -pip/_vendor/rich/__pycache__/tree.cpython-313.pyc,, -pip/_vendor/rich/_cell_widths.py,sha256=fbmeyetEdHjzE_Vx2l1uK7tnPOhMs2X1lJfO3vsKDpA,10209 -pip/_vendor/rich/_emoji_codes.py,sha256=hu1VL9nbVdppJrVoijVshRlcRRe_v3dju3Mmd2sKZdY,140235 -pip/_vendor/rich/_emoji_replace.py,sha256=n-kcetsEUx2ZUmhQrfeMNc-teeGhpuSQ5F8VPBsyvDo,1064 -pip/_vendor/rich/_export_format.py,sha256=RI08pSrm5tBSzPMvnbTqbD9WIalaOoN5d4M1RTmLq1Y,2128 -pip/_vendor/rich/_extension.py,sha256=Xt47QacCKwYruzjDi-gOBq724JReDj9Cm9xUi5fr-34,265 -pip/_vendor/rich/_fileno.py,sha256=HWZxP5C2ajMbHryvAQZseflVfQoGzsKOHzKGsLD8ynQ,799 -pip/_vendor/rich/_inspect.py,sha256=ROT0PLC2GMWialWZkqJIjmYq7INRijQQkoSokWTaAiI,9656 -pip/_vendor/rich/_log_render.py,sha256=1ByI0PA1ZpxZY3CGJOK54hjlq4X-Bz_boIjIqCd8Kns,3225 -pip/_vendor/rich/_loop.py,sha256=hV_6CLdoPm0va22Wpw4zKqM0RYsz3TZxXj0PoS-9eDQ,1236 -pip/_vendor/rich/_null_file.py,sha256=ADGKp1yt-k70FMKV6tnqCqecB-rSJzp-WQsD7LPL-kg,1394 -pip/_vendor/rich/_palettes.py,sha256=cdev1JQKZ0JvlguV9ipHgznTdnvlIzUFDBb0It2PzjI,7063 -pip/_vendor/rich/_pick.py,sha256=evDt8QN4lF5CiwrUIXlOJCntitBCOsI3ZLPEIAVRLJU,423 -pip/_vendor/rich/_ratio.py,sha256=IOtl78sQCYZsmHyxhe45krkb68u9xVz7zFsXVJD-b2Y,5325 -pip/_vendor/rich/_spinners.py,sha256=U2r1_g_1zSjsjiUdAESc2iAMc3i4ri_S8PYP6kQ5z1I,19919 -pip/_vendor/rich/_stack.py,sha256=-C8OK7rxn3sIUdVwxZBBpeHhIzX0eI-VM3MemYfaXm0,351 -pip/_vendor/rich/_timer.py,sha256=zelxbT6oPFZnNrwWPpc1ktUeAT-Vc4fuFcRZLQGLtMI,417 -pip/_vendor/rich/_win32_console.py,sha256=BSaDRIMwBLITn_m0mTRLPqME5q-quGdSMuYMpYeYJwc,22755 -pip/_vendor/rich/_windows.py,sha256=aBwaD_S56SbgopIvayVmpk0Y28uwY2C5Bab1wl3Bp-I,1925 -pip/_vendor/rich/_windows_renderer.py,sha256=t74ZL3xuDCP3nmTp9pH1L5LiI2cakJuQRQleHCJerlk,2783 -pip/_vendor/rich/_wrap.py,sha256=FlSsom5EX0LVkA3KWy34yHnCfLtqX-ZIepXKh-70rpc,3404 -pip/_vendor/rich/abc.py,sha256=ON-E-ZqSSheZ88VrKX2M3PXpFbGEUUZPMa_Af0l-4f0,890 -pip/_vendor/rich/align.py,sha256=dg-7uY0ukMLLlUEsBDRLva22_sQgIJD4BK0dmZHFHug,10324 -pip/_vendor/rich/ansi.py,sha256=Avs1LHbSdcyOvDOdpELZUoULcBiYewY76eNBp6uFBhs,6921 -pip/_vendor/rich/bar.py,sha256=ldbVHOzKJOnflVNuv1xS7g6dLX2E3wMnXkdPbpzJTcs,3263 -pip/_vendor/rich/box.py,sha256=kmavBc_dn73L_g_8vxWSwYJD2uzBXOUFTtJOfpbczcM,10686 -pip/_vendor/rich/cells.py,sha256=KrQkj5-LghCCpJLSNQIyAZjndc4bnEqOEmi5YuZ9UCY,5130 -pip/_vendor/rich/color.py,sha256=3HSULVDj7qQkXUdFWv78JOiSZzfy5y1nkcYhna296V0,18211 -pip/_vendor/rich/color_triplet.py,sha256=3lhQkdJbvWPoLDO-AnYImAWmJvV5dlgYNCVZ97ORaN4,1054 -pip/_vendor/rich/columns.py,sha256=HUX0KcMm9dsKNi11fTbiM_h2iDtl8ySCaVcxlalEzq8,7131 -pip/_vendor/rich/console.py,sha256=t9azZpmRMVU5cphVBZSShNsmBxd2-IAWcTTlhor-E1s,100849 -pip/_vendor/rich/constrain.py,sha256=1VIPuC8AgtKWrcncQrjBdYqA3JVWysu6jZo1rrh7c7Q,1288 -pip/_vendor/rich/containers.py,sha256=c_56TxcedGYqDepHBMTuZdUIijitAQgnox-Qde0Z1qo,5502 -pip/_vendor/rich/control.py,sha256=EUTSUFLQbxY6Zmo_sdM-5Ls323vIHTBfN8TPulqeHUY,6487 -pip/_vendor/rich/default_styles.py,sha256=khQFqqaoDs3bprMqWpHw8nO5UpG2DN6QtuTd6LzZwYc,8257 -pip/_vendor/rich/diagnose.py,sha256=fJl1TItRn19gGwouqTg-8zPUW3YqQBqGltrfPQs1H9w,1025 -pip/_vendor/rich/emoji.py,sha256=Wd4bQubZdSy6-PyrRQNuMHtn2VkljK9uPZPVlu2cmx0,2367 -pip/_vendor/rich/errors.py,sha256=5pP3Kc5d4QJ_c0KFsxrfyhjiPVe7J1zOqSFbFAzcV-Y,642 -pip/_vendor/rich/file_proxy.py,sha256=Tl9THMDZ-Pk5Wm8sI1gGg_U5DhusmxD-FZ0fUbcU0W0,1683 -pip/_vendor/rich/filesize.py,sha256=_iz9lIpRgvW7MNSeCZnLg-HwzbP4GETg543WqD8SFs0,2484 -pip/_vendor/rich/highlighter.py,sha256=G_sn-8DKjM1sEjLG_oc4ovkWmiUpWvj8bXi0yed2LnY,9586 -pip/_vendor/rich/json.py,sha256=vVEoKdawoJRjAFayPwXkMBPLy7RSTs-f44wSQDR2nJ0,5031 -pip/_vendor/rich/jupyter.py,sha256=QyoKoE_8IdCbrtiSHp9TsTSNyTHY0FO5whE7jOTd9UE,3252 -pip/_vendor/rich/layout.py,sha256=ajkSFAtEVv9EFTcFs-w4uZfft7nEXhNzL7ZVdgrT5rI,14004 -pip/_vendor/rich/live.py,sha256=tF3ukAAJZ_N2ZbGclqZ-iwLoIoZ8f0HHUz79jAyJqj8,15180 -pip/_vendor/rich/live_render.py,sha256=It_39YdzrBm8o3LL0kaGorPFg-BfZWAcrBjLjFokbx4,3521 -pip/_vendor/rich/logging.py,sha256=5KaPPSMP9FxcXPBcKM4cGd_zW78PMgf-YbMVnvfSw0o,12468 -pip/_vendor/rich/markup.py,sha256=3euGKP5s41NCQwaSjTnJxus5iZMHjxpIM0W6fCxra38,8451 -pip/_vendor/rich/measure.py,sha256=HmrIJX8sWRTHbgh8MxEay_83VkqNW_70s8aKP5ZcYI8,5305 -pip/_vendor/rich/padding.py,sha256=KVEI3tOwo9sgK1YNSuH__M1_jUWmLZwRVV_KmOtVzyM,4908 -pip/_vendor/rich/pager.py,sha256=SO_ETBFKbg3n_AgOzXm41Sv36YxXAyI3_R-KOY2_uSc,828 -pip/_vendor/rich/palette.py,sha256=lInvR1ODDT2f3UZMfL1grq7dY_pDdKHw4bdUgOGaM4Y,3396 -pip/_vendor/rich/panel.py,sha256=9sQl00hPIqH5G2gALQo4NepFwpP0k9wT-s_gOms5pIc,11157 -pip/_vendor/rich/pretty.py,sha256=gy3S72u4FRg2ytoo7N1ZDWDIvB4unbzd5iUGdgm-8fc,36391 -pip/_vendor/rich/progress.py,sha256=CUc2lkU-X59mVdGfjMCBkZeiGPL3uxdONjhNJF2T7wY,60408 -pip/_vendor/rich/progress_bar.py,sha256=mZTPpJUwcfcdgQCTTz3kyY-fc79ddLwtx6Ghhxfo064,8162 -pip/_vendor/rich/prompt.py,sha256=l0RhQU-0UVTV9e08xW1BbIj0Jq2IXyChX4lC0lFNzt4,12447 -pip/_vendor/rich/protocol.py,sha256=5hHHDDNHckdk8iWH5zEbi-zuIVSF5hbU2jIo47R7lTE,1391 -pip/_vendor/rich/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/rich/region.py,sha256=rNT9xZrVZTYIXZC0NYn41CJQwYNbR-KecPOxTgQvB8Y,166 -pip/_vendor/rich/repr.py,sha256=5MZJZmONgC6kud-QW-_m1okXwL2aR6u6y-pUcUCJz28,4431 -pip/_vendor/rich/rule.py,sha256=0fNaS_aERa3UMRc3T5WMpN_sumtDxfaor2y3of1ftBk,4602 -pip/_vendor/rich/scope.py,sha256=TMUU8qo17thyqQCPqjDLYpg_UU1k5qVd-WwiJvnJVas,2843 -pip/_vendor/rich/screen.py,sha256=YoeReESUhx74grqb0mSSb9lghhysWmFHYhsbMVQjXO8,1591 -pip/_vendor/rich/segment.py,sha256=otnKeKGEV-WRlQVosfJVeFDcDxAKHpvJ_hLzSu5lumM,24743 -pip/_vendor/rich/spinner.py,sha256=onIhpKlljRHppTZasxO8kXgtYyCHUkpSgKglRJ3o51g,4214 -pip/_vendor/rich/status.py,sha256=kkPph3YeAZBo-X-4wPp8gTqZyU466NLwZBA4PZTTewo,4424 -pip/_vendor/rich/style.py,sha256=W9Ccy8Py8lNICtlfcp-ryzMTuQaGxAU3av7-g5fHu0s,26990 -pip/_vendor/rich/styled.py,sha256=eZNnzGrI4ki_54pgY3Oj0T-x3lxdXTYh4_ryDB24wBU,1258 -pip/_vendor/rich/syntax.py,sha256=eDKIRwl--eZ0Lwo2da2RRtfutXGavrJO61Cl5OkS59U,36371 -pip/_vendor/rich/table.py,sha256=ZmT7V7MMCOYKw7TGY9SZLyYDf6JdM-WVf07FdVuVhTI,40049 -pip/_vendor/rich/terminal_theme.py,sha256=1j5-ufJfnvlAo5Qsi_ACZiXDmwMXzqgmFByObT9-yJY,3370 -pip/_vendor/rich/text.py,sha256=AO7JPCz6-gaN1thVLXMBntEmDPVYFgFNG1oM61_sanU,47552 -pip/_vendor/rich/theme.py,sha256=oNyhXhGagtDlbDye3tVu3esWOWk0vNkuxFw-_unlaK0,3771 -pip/_vendor/rich/themes.py,sha256=0xgTLozfabebYtcJtDdC5QkX5IVUEaviqDUJJh4YVFk,102 -pip/_vendor/rich/traceback.py,sha256=c0WmB_L04_UfZbLaoH982_U_s7eosxKMUiAVmDPdRYU,35861 -pip/_vendor/rich/tree.py,sha256=yWnQ6rAvRGJ3qZGqBrxS2SW2TKBTNrP0SdY8QxOFPuw,9451 -pip/_vendor/tomli/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 -pip/_vendor/tomli/__init__.py,sha256=qzEGl8QHhqgQPCuLzfKyPIuH3KKPspf-UVPbZ0ppBD4,314 -pip/_vendor/tomli/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/tomli/__pycache__/_parser.cpython-313.pyc,, -pip/_vendor/tomli/__pycache__/_re.cpython-313.pyc,, -pip/_vendor/tomli/__pycache__/_types.cpython-313.pyc,, -pip/_vendor/tomli/_parser.py,sha256=bO8tUYmnyA2K6m4TnbQbfUqmIFcDv7mG1KuC9gqRVmA,25778 -pip/_vendor/tomli/_re.py,sha256=n8-Io8ZK1U-F6jzlg7Pabc40hLFJsawE2uNLKH9w7iU,3235 -pip/_vendor/tomli/_types.py,sha256=-GTG2VUqkpxwMqzmVO4F7ybKddIbAnuAHXfmWQcTi3Q,254 -pip/_vendor/tomli/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 -pip/_vendor/tomli_w/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 -pip/_vendor/tomli_w/__init__.py,sha256=0F8yDtXx3Uunhm874KrAcP76srsM98y7WyHQwCulZbo,169 -pip/_vendor/tomli_w/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/tomli_w/__pycache__/_writer.cpython-313.pyc,, -pip/_vendor/tomli_w/_writer.py,sha256=dsifFS2xYf1i76mmRyfz9y125xC7Z_HQ845ZKhJsYXs,6961 -pip/_vendor/tomli_w/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 -pip/_vendor/truststore/LICENSE,sha256=M757fo-k_Rmxdg4ajtimaL2rhSyRtpLdQUJLy3Jan8o,1086 -pip/_vendor/truststore/__init__.py,sha256=Bu7kqkmpunhLsj5xCu8gT_25ktoPXcSnwe8VHk1GmJo,1320 -pip/_vendor/truststore/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/truststore/__pycache__/_api.cpython-313.pyc,, -pip/_vendor/truststore/__pycache__/_macos.cpython-313.pyc,, -pip/_vendor/truststore/__pycache__/_openssl.cpython-313.pyc,, -pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-313.pyc,, -pip/_vendor/truststore/__pycache__/_windows.cpython-313.pyc,, -pip/_vendor/truststore/_api.py,sha256=CYJCV5BTfttZYfqY3movdMBE-8az7uhET_LYbKT2Nn4,11413 -pip/_vendor/truststore/_macos.py,sha256=nZlLkOmszUE0g6ryRwBVGY5COzPyudcsiJtDWarM5LQ,20503 -pip/_vendor/truststore/_openssl.py,sha256=zB-SQvJydks7tQ0yIwrP6GD3fQNSSaPiq7zw4yF5T40,2412 -pip/_vendor/truststore/_ssl_constants.py,sha256=NUD4fVKdSD02ri7-db0tnO0VqLP9aHuzmStcW7tAl08,1130 -pip/_vendor/truststore/_windows.py,sha256=rAHyKYD8M7t-bXfG8VgOVa3TpfhVhbt4rZQlO45YuP8,17993 -pip/_vendor/truststore/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/urllib3/LICENSE.txt,sha256=w3vxhuJ8-dvpYZ5V7f486nswCRzrPaY8fay-Dm13kHs,1115 -pip/_vendor/urllib3/__init__.py,sha256=iXLcYiJySn0GNbWOOZDDApgBL1JgP44EZ8i1760S8Mc,3333 -pip/_vendor/urllib3/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/_collections.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/_version.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/connection.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/connectionpool.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/exceptions.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/fields.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/filepost.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/poolmanager.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/request.cpython-313.pyc,, -pip/_vendor/urllib3/__pycache__/response.cpython-313.pyc,, -pip/_vendor/urllib3/_collections.py,sha256=pyASJJhW7wdOpqJj9QJA8FyGRfr8E8uUUhqUvhF0728,11372 -pip/_vendor/urllib3/_version.py,sha256=t9wGB6ooOTXXgiY66K1m6BZS1CJyXHAU8EoWDTe6Shk,64 -pip/_vendor/urllib3/connection.py,sha256=ttIA909BrbTUzwkqEe_TzZVh4JOOj7g61Ysei2mrwGg,20314 -pip/_vendor/urllib3/connectionpool.py,sha256=e2eiAwNbFNCKxj4bwDKNK-w7HIdSz3OmMxU_TIt-evQ,40408 -pip/_vendor/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957 -pip/_vendor/urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-313.pyc,, -pip/_vendor/urllib3/contrib/_securetransport/bindings.py,sha256=4Xk64qIkPBt09A5q-RIFUuDhNc9mXilVapm7WnYnzRw,17632 -pip/_vendor/urllib3/contrib/_securetransport/low_level.py,sha256=B2JBB2_NRP02xK6DCa1Pa9IuxrPwxzDzZbixQkb7U9M,13922 -pip/_vendor/urllib3/contrib/appengine.py,sha256=VR68eAVE137lxTgjBDwCna5UiBZTOKa01Aj_-5BaCz4,11036 -pip/_vendor/urllib3/contrib/ntlmpool.py,sha256=NlfkW7WMdW8ziqudopjHoW299og1BTWi0IeIibquFwk,4528 -pip/_vendor/urllib3/contrib/pyopenssl.py,sha256=hDJh4MhyY_p-oKlFcYcQaVQRDv6GMmBGuW9yjxyeejM,17081 -pip/_vendor/urllib3/contrib/securetransport.py,sha256=Fef1IIUUFHqpevzXiDPbIGkDKchY2FVKeVeLGR1Qq3g,34446 -pip/_vendor/urllib3/contrib/socks.py,sha256=aRi9eWXo9ZEb95XUxef4Z21CFlnnjbEiAo9HOseoMt4,7097 -pip/_vendor/urllib3/exceptions.py,sha256=0Mnno3KHTNfXRfY7638NufOPkUb6mXOm-Lqj-4x2w8A,8217 -pip/_vendor/urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579 -pip/_vendor/urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440 -pip/_vendor/urllib3/packages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/urllib3/packages/__pycache__/six.cpython-313.pyc,, -pip/_vendor/urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-313.pyc,, -pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-313.pyc,, -pip/_vendor/urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417 -pip/_vendor/urllib3/packages/backports/weakref_finalize.py,sha256=tRCal5OAhNSRyb0DhHp-38AtIlCsRP8BxF3NX-6rqIA,5343 -pip/_vendor/urllib3/packages/six.py,sha256=b9LM0wBXv7E7SrbCjAm4wwN-hrH-iNxv18LgWNMMKPo,34665 -pip/_vendor/urllib3/poolmanager.py,sha256=aWyhXRtNO4JUnCSVVqKTKQd8EXTvUm1VN9pgs2bcONo,19990 -pip/_vendor/urllib3/request.py,sha256=YTWFNr7QIwh7E1W9dde9LM77v2VWTJ5V78XuTTw7D1A,6691 -pip/_vendor/urllib3/response.py,sha256=fmDJAFkG71uFTn-sVSTh2Iw0WmcXQYqkbRjihvwBjU8,30641 -pip/_vendor/urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155 -pip/_vendor/urllib3/util/__pycache__/__init__.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/connection.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/proxy.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/queue.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/request.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/response.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/retry.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/timeout.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/url.cpython-313.pyc,, -pip/_vendor/urllib3/util/__pycache__/wait.cpython-313.pyc,, -pip/_vendor/urllib3/util/connection.py,sha256=5Lx2B1PW29KxBn2T0xkN1CBgRBa3gGVJBKoQoRogEVk,4901 -pip/_vendor/urllib3/util/proxy.py,sha256=zUvPPCJrp6dOF0N4GAVbOcl6o-4uXKSrGiTkkr5vUS4,1605 -pip/_vendor/urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498 -pip/_vendor/urllib3/util/request.py,sha256=C0OUt2tcU6LRiQJ7YYNP9GvPrSvl7ziIBekQ-5nlBZk,3997 -pip/_vendor/urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510 -pip/_vendor/urllib3/util/retry.py,sha256=6ENvOZ8PBDzh8kgixpql9lIrb2dxH-k7ZmBanJF2Ng4,22050 -pip/_vendor/urllib3/util/ssl_.py,sha256=QDuuTxPSCj1rYtZ4xpD7Ux-r20TD50aHyqKyhQ7Bq4A,17460 -pip/_vendor/urllib3/util/ssl_match_hostname.py,sha256=Ir4cZVEjmAk8gUAIHWSi7wtOO83UCYABY2xFD1Ql_WA,5758 -pip/_vendor/urllib3/util/ssltransport.py,sha256=NA-u5rMTrDFDFC8QzRKUEKMG0561hOD4qBTr3Z4pv6E,6895 -pip/_vendor/urllib3/util/timeout.py,sha256=cwq4dMk87mJHSBktK1miYJ-85G-3T3RmT20v7SFCpno,10168 -pip/_vendor/urllib3/util/url.py,sha256=lCAE7M5myA8EDdW0sJuyyZhVB9K_j38ljWhHAnFaWoE,14296 -pip/_vendor/urllib3/util/wait.py,sha256=fOX0_faozG2P7iVojQoE1mbydweNyTcm-hXEfFrTtLI,5403 -pip/_vendor/vendor.txt,sha256=f2msFLZ-chXWIZSKW31NLGyMWmt_-Vfy7sY5dHYgmnw,342 -pip/py.typed,sha256=EBVvvPRTn_eIpz5e5QztSCdrMX7Qwd7VP93RSoIlZ2I,286 diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 194698d9..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/__pycache__/__main__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index e979e28c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/__pycache__/__pip-runner__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/__pycache__/__pip-runner__.cpython-313.pyc deleted file mode 100644 index d44998d4..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/__pycache__/__pip-runner__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 71af37db..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-313.pyc deleted file mode 100644 index 29192933..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-313.pyc deleted file mode 100644 index ecc92de4..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-313.pyc deleted file mode 100644 index fbc3de60..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-313.pyc deleted file mode 100644 index 27fb194a..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/main.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/main.cpython-313.pyc deleted file mode 100644 index 227b5e59..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/main.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-313.pyc deleted file mode 100644 index 8d58fb6f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-313.pyc deleted file mode 100644 index 84ba3a7f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-313.pyc deleted file mode 100644 index e8694559..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 0ff73179..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-313.pyc deleted file mode 100644 index 3777b8f1..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-313.pyc deleted file mode 100644 index 55d5601f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-313.pyc deleted file mode 100644 index 10bb3435..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-313.pyc deleted file mode 100644 index 8e8f5870..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-313.pyc deleted file mode 100644 index 476c1e34..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-313.pyc deleted file mode 100644 index abf89570..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-313.pyc deleted file mode 100644 index 685978db..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-313.pyc deleted file mode 100644 index dea23563..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-313.pyc deleted file mode 100644 index d21db21d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-313.pyc deleted file mode 100644 index 6534342b..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-313.pyc deleted file mode 100644 index e2f892b3..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-313.pyc deleted file mode 100644 index 05e381a5..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 78a3025c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-313.pyc deleted file mode 100644 index 0ea0aa11..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-313.pyc deleted file mode 100644 index 12a2a629..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-313.pyc deleted file mode 100644 index cf5212e1..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-313.pyc deleted file mode 100644 index 17420043..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-313.pyc deleted file mode 100644 index 7a3891a1..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-313.pyc deleted file mode 100644 index 1d3ee3bd..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-313.pyc deleted file mode 100644 index d2df6711..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-313.pyc deleted file mode 100644 index bca2ce20..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-313.pyc deleted file mode 100644 index acc04dde..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/index.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/index.cpython-313.pyc deleted file mode 100644 index 3f0d727a..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/index.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-313.pyc deleted file mode 100644 index adc1c153..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-313.pyc deleted file mode 100644 index b9dcb33f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-313.pyc deleted file mode 100644 index a2120725..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/lock.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/lock.cpython-313.pyc deleted file mode 100644 index 880d6224..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/lock.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-313.pyc deleted file mode 100644 index 6c481565..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-313.pyc deleted file mode 100644 index de91e025..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-313.pyc deleted file mode 100644 index 90e63676..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-313.pyc deleted file mode 100644 index ca2e4f73..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 67393c82..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-313.pyc deleted file mode 100644 index 1d1819a5..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-313.pyc deleted file mode 100644 index 6244e77d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-313.pyc deleted file mode 100644 index f3f8c23a..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-313.pyc deleted file mode 100644 index 23d11876..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index d1d83f3d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-313.pyc deleted file mode 100644 index f8d44ed9..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-313.pyc deleted file mode 100644 index 8871665f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-313.pyc deleted file mode 100644 index 2d7948ac..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 02994876..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-313.pyc deleted file mode 100644 index 9d901ca8..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-313.pyc deleted file mode 100644 index a2331401..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-313.pyc deleted file mode 100644 index cbcd2843..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 33d40f9b..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-313.pyc deleted file mode 100644 index d5856455..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-313.pyc deleted file mode 100644 index c6a36a33..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-313.pyc deleted file mode 100644 index d7c14916..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 09ecce0c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-313.pyc deleted file mode 100644 index 7adfa169..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-313.pyc deleted file mode 100644 index f57d4a89..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-313.pyc deleted file mode 100644 index 7b91150c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 5a2b9c33..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-313.pyc deleted file mode 100644 index 4aceaba1..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-313.pyc deleted file mode 100644 index 3fd935c3..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-313.pyc deleted file mode 100644 index 7a615392..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-313.pyc deleted file mode 100644 index 986f9d8e..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-313.pyc deleted file mode 100644 index da491138..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-313.pyc deleted file mode 100644 index 7bf2aa51..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/release_control.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/release_control.cpython-313.pyc deleted file mode 100644 index a8bf3edf..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/release_control.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-313.pyc deleted file mode 100644 index d117f643..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-313.pyc deleted file mode 100644 index 48430680..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-313.pyc deleted file mode 100644 index aff5552f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-313.pyc deleted file mode 100644 index 66fd6294..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-313.pyc deleted file mode 100644 index e3afdd65..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index f4ddff5d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-313.pyc deleted file mode 100644 index ee6cf048..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-313.pyc deleted file mode 100644 index 141fcddc..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-313.pyc deleted file mode 100644 index 2c24a54f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-313.pyc deleted file mode 100644 index 06b4cab0..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-313.pyc deleted file mode 100644 index e9404075..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-313.pyc deleted file mode 100644 index bf9c4654..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-313.pyc deleted file mode 100644 index 4735a5ca..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 607b8f49..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-313.pyc deleted file mode 100644 index 62a34c8f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-313.pyc deleted file mode 100644 index b2f45b28..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-313.pyc deleted file mode 100644 index 9a0bc6f3..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index b1b6584d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-313.pyc deleted file mode 100644 index e2d4a21a..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-313.pyc deleted file mode 100644 index 77c05646..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-313.pyc deleted file mode 100644 index d2e8d094..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-313.pyc deleted file mode 100644 index 08344570..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-313.pyc deleted file mode 100644 index 7b1876c7..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 2a13c410..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-313.pyc deleted file mode 100644 index 8e921d5b..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 41f86a8d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-313.pyc deleted file mode 100644 index cd09521c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/pep723.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/pep723.cpython-313.pyc deleted file mode 100644 index d864384d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/pep723.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-313.pyc deleted file mode 100644 index f1bdc187..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-313.pyc deleted file mode 100644 index 2ef1bf0d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-313.pyc deleted file mode 100644 index 58278719..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-313.pyc deleted file mode 100644 index 785336cc..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-313.pyc deleted file mode 100644 index caf3d1d8..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 2677517f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-313.pyc deleted file mode 100644 index 48ccce54..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index a493c514..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-313.pyc deleted file mode 100644 index a1649d8f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 36f92542..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-313.pyc deleted file mode 100644 index c4c971e9..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-313.pyc deleted file mode 100644 index 3e46bc16..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-313.pyc deleted file mode 100644 index 01ff97c0..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-313.pyc deleted file mode 100644 index 076e682e..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-313.pyc deleted file mode 100644 index 779f1d3e..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-313.pyc deleted file mode 100644 index e167eebc..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-313.pyc deleted file mode 100644 index de15322d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-313.pyc deleted file mode 100644 index 3b806af3..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 27cf8452..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-313.pyc deleted file mode 100644 index 81263733..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_log.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_log.cpython-313.pyc deleted file mode 100644 index 31c4fa63..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_log.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-313.pyc deleted file mode 100644 index fa9159e2..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-313.pyc deleted file mode 100644 index 27c592ec..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-313.pyc deleted file mode 100644 index f5451be8..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-313.pyc deleted file mode 100644 index 1ac271bf..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-313.pyc deleted file mode 100644 index 181c2631..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-313.pyc deleted file mode 100644 index a9cce392..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-313.pyc deleted file mode 100644 index 9ae9cc59..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-313.pyc deleted file mode 100644 index 3dcca860..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-313.pyc deleted file mode 100644 index 87a02605..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-313.pyc deleted file mode 100644 index 7293a9ee..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-313.pyc deleted file mode 100644 index 9ea72801..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-313.pyc deleted file mode 100644 index de41eecb..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-313.pyc deleted file mode 100644 index 6cd9468e..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-313.pyc deleted file mode 100644 index bf13b4f7..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-313.pyc deleted file mode 100644 index 5d7bc771..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/pylock.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/pylock.cpython-313.pyc deleted file mode 100644 index ba10019e..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/pylock.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/retry.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/retry.cpython-313.pyc deleted file mode 100644 index ed4eabc2..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/retry.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-313.pyc deleted file mode 100644 index 96989962..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-313.pyc deleted file mode 100644 index 660c1e54..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-313.pyc deleted file mode 100644 index 34eacbb1..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-313.pyc deleted file mode 100644 index 5180ddf2..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-313.pyc deleted file mode 100644 index bd0a42c0..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-313.pyc deleted file mode 100644 index 0a6e6b50..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index f46eafae..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-313.pyc deleted file mode 100644 index 4508e8b9..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-313.pyc deleted file mode 100644 index 9657c032..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-313.pyc deleted file mode 100644 index 83d19366..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-313.pyc deleted file mode 100644 index 7287d8b3..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-313.pyc deleted file mode 100644 index 3b9c99df..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index aff134f5..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 8f1a11a0..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-313.pyc deleted file mode 100644 index f6b6ce1a..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-313.pyc deleted file mode 100644 index 5ec71f37..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-313.pyc deleted file mode 100644 index 81a3ad8b..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-313.pyc deleted file mode 100644 index 2bf2040c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-313.pyc deleted file mode 100644 index d235445e..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-313.pyc deleted file mode 100644 index 6f20b186..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-313.pyc deleted file mode 100644 index 4f73b730..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-313.pyc deleted file mode 100644 index 84f08bd7..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index e4639cb7..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-313.pyc deleted file mode 100644 index 75335552..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-313.pyc deleted file mode 100644 index dbbf92e6..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 97168eff..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index 0af72a9e..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-313.pyc deleted file mode 100644 index 24b91a21..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index c139e1b0..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index 7e251701..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-313.pyc deleted file mode 100644 index 1964b904..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-313.pyc deleted file mode 100644 index 0c565244..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-313.pyc deleted file mode 100644 index d143319f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-313.pyc deleted file mode 100644 index 4e51bd03..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 693057dd..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-313.pyc deleted file mode 100644 index 5e99510d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-313.pyc deleted file mode 100644 index f288536f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-313.pyc deleted file mode 100644 index 86902bb5..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-313.pyc deleted file mode 100644 index c87377ee..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 92be9d34..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index 14e633f7..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-313.pyc deleted file mode 100644 index b7963e11..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 938139e9..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-313.pyc deleted file mode 100644 index 03b2603d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-313.pyc deleted file mode 100644 index 406dfbdc..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-313.pyc deleted file mode 100644 index 3d4a4757..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-313.pyc deleted file mode 100644 index b72c01a4..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-313.pyc deleted file mode 100644 index a97f9f98..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-313.pyc deleted file mode 100644 index 698df08a..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-313.pyc deleted file mode 100644 index 2a94a87e..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index d431fd00..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-313.pyc deleted file mode 100644 index 121ae2bc..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-313.pyc deleted file mode 100644 index 015a9770..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-313.pyc deleted file mode 100644 index 1bc7efc5..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index d37d934c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-313.pyc deleted file mode 100644 index 461623e7..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-313.pyc deleted file mode 100644 index 61fd3587..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-313.pyc deleted file mode 100644 index a479008b..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-313.pyc deleted file mode 100644 index 4d7c7072..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-313.pyc deleted file mode 100644 index 69b13c70..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-313.pyc deleted file mode 100644 index f1a159f9..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-313.pyc deleted file mode 100644 index 501c3e7b..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-313.pyc deleted file mode 100644 index 0ab05ece..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/pylock.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/pylock.cpython-313.pyc deleted file mode 100644 index fe449d39..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/pylock.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-313.pyc deleted file mode 100644 index 514309e1..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-313.pyc deleted file mode 100644 index ba085f95..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-313.pyc deleted file mode 100644 index 40b2bf50..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-313.pyc deleted file mode 100644 index d62968d6..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-313.pyc deleted file mode 100644 index 4a4c8774..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 9d6dc320..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-313.pyc deleted file mode 100644 index 8667eeec..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 5a781407..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index e4a616a3..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index 99b4d0ea..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-313.pyc deleted file mode 100644 index 7990ba45..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-313.pyc deleted file mode 100644 index 13b1825d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-313.pyc deleted file mode 100644 index bf368f6c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-313.pyc deleted file mode 100644 index f4e476f2..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-313.pyc deleted file mode 100644 index 090613c0..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-313.pyc deleted file mode 100644 index acf97a97..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 057983ce..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index 1f5f9f5c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-313.pyc deleted file mode 100644 index e3a175d5..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-313.pyc deleted file mode 100644 index 1a9ab636..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-313.pyc deleted file mode 100644 index 2bb7d220..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-313.pyc deleted file mode 100644 index 5f901595..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-313.pyc deleted file mode 100644 index 16f6b386..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-313.pyc deleted file mode 100644 index c7bf331f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-313.pyc deleted file mode 100644 index 70c6503f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-313.pyc deleted file mode 100644 index 65a1dd3b..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-313.pyc deleted file mode 100644 index dba40e4e..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-313.pyc deleted file mode 100644 index a8b80f7f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-313.pyc deleted file mode 100644 index 7d844420..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-313.pyc deleted file mode 100644 index 37963d9b..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-313.pyc deleted file mode 100644 index e4ec80c6..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 498225c7..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 9d93e0bc..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-313.pyc deleted file mode 100644 index 87fb1777..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 77004cdc..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-313.pyc deleted file mode 100644 index 75e9a2c8..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-313.pyc deleted file mode 100644 index 0fe98265..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index bc5fc9e0..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-313.pyc deleted file mode 100644 index f7c6f77a..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 259b866d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-313.pyc deleted file mode 100644 index 671597e9..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index b8fed385..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-313.pyc deleted file mode 100644 index 74347341..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index e03cd1e3..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-313.pyc deleted file mode 100644 index 6163df74..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-313.pyc deleted file mode 100644 index 803181d8..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-313.pyc deleted file mode 100644 index 2179c66d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-313.pyc deleted file mode 100644 index fa456835..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-313.pyc deleted file mode 100644 index c88f5434..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-313.pyc deleted file mode 100644 index 8042280e..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-313.pyc deleted file mode 100644 index 62168038..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-313.pyc deleted file mode 100644 index a8538e11..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-313.pyc deleted file mode 100644 index c9ee0229..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-313.pyc deleted file mode 100644 index bc705c13..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-313.pyc deleted file mode 100644 index fd90fa9b..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-313.pyc deleted file mode 100644 index af92f90c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-313.pyc deleted file mode 100644 index e080f9bf..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-313.pyc deleted file mode 100644 index 5851b6a0..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-313.pyc deleted file mode 100644 index fecdd65d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-313.pyc deleted file mode 100644 index ed982718..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-313.pyc deleted file mode 100644 index 3d3960f5..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 125c42f9..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-313.pyc deleted file mode 100644 index 56e53c2d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-313.pyc deleted file mode 100644 index c213f835..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-313.pyc deleted file mode 100644 index da8277b3..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 9cab507b..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-313.pyc deleted file mode 100644 index 296209ca..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-313.pyc deleted file mode 100644 index 7cb6439d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-313.pyc deleted file mode 100644 index edc36245..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-313.pyc deleted file mode 100644 index cded8b86..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index b93e039a..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-313.pyc deleted file mode 100644 index 25e2e72c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-313.pyc deleted file mode 100644 index 8c619eb1..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-313.pyc deleted file mode 100644 index 0f5f452a..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-313.pyc deleted file mode 100644 index a9d847e5..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-313.pyc deleted file mode 100644 index 314e6009..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-313.pyc deleted file mode 100644 index e553b146..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-313.pyc deleted file mode 100644 index 26dbf27b..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-313.pyc deleted file mode 100644 index d0705850..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-313.pyc deleted file mode 100644 index 4fd909ee..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-313.pyc deleted file mode 100644 index c8198bfa..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-313.pyc deleted file mode 100644 index 58a47a2d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-313.pyc deleted file mode 100644 index ad1b7179..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-313.pyc deleted file mode 100644 index b41aed48..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-313.pyc deleted file mode 100644 index eb1e547d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-313.pyc deleted file mode 100644 index 265206ce..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-313.pyc deleted file mode 100644 index 44d4a2b7..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-313.pyc deleted file mode 100644 index a32436b4..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-313.pyc deleted file mode 100644 index f7d272e0..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-313.pyc deleted file mode 100644 index 44b89d55..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-313.pyc deleted file mode 100644 index 46e230f3..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-313.pyc deleted file mode 100644 index 36171e3f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-313.pyc deleted file mode 100644 index 175b301f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/align.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/align.cpython-313.pyc deleted file mode 100644 index dc4822ef..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/align.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-313.pyc deleted file mode 100644 index c13d18b8..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-313.pyc deleted file mode 100644 index 4c227056..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/box.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/box.cpython-313.pyc deleted file mode 100644 index e7fcfd63..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/box.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-313.pyc deleted file mode 100644 index ce8e5840..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color.cpython-313.pyc deleted file mode 100644 index 14ef4120..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-313.pyc deleted file mode 100644 index 07ba12bd..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-313.pyc deleted file mode 100644 index 23e3abd4..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/console.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/console.cpython-313.pyc deleted file mode 100644 index 5465cf0c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/console.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-313.pyc deleted file mode 100644 index 838b4ad2..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-313.pyc deleted file mode 100644 index f701deeb..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/control.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/control.cpython-313.pyc deleted file mode 100644 index bd431fdf..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/control.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-313.pyc deleted file mode 100644 index 8bdd3706..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-313.pyc deleted file mode 100644 index 53a631e7..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-313.pyc deleted file mode 100644 index e2a84cdb..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-313.pyc deleted file mode 100644 index 5697f1e1..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-313.pyc deleted file mode 100644 index a1d46269..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-313.pyc deleted file mode 100644 index 61d2226f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-313.pyc deleted file mode 100644 index e258eee1..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/json.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/json.cpython-313.pyc deleted file mode 100644 index 02fbb592..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/json.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-313.pyc deleted file mode 100644 index aa706dfe..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-313.pyc deleted file mode 100644 index 23e4819f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live.cpython-313.pyc deleted file mode 100644 index 37e9363f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-313.pyc deleted file mode 100644 index a89bc39d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-313.pyc deleted file mode 100644 index 3c52a769..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-313.pyc deleted file mode 100644 index aa69069e..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-313.pyc deleted file mode 100644 index 97fb10ad..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-313.pyc deleted file mode 100644 index 9beb7cb1..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-313.pyc deleted file mode 100644 index 5f4a7406..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-313.pyc deleted file mode 100644 index 59b9e5ab..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-313.pyc deleted file mode 100644 index 8d82456d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-313.pyc deleted file mode 100644 index 05095940..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-313.pyc deleted file mode 100644 index 9c745ca7..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-313.pyc deleted file mode 100644 index dc2eabe0..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/prompt.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/prompt.cpython-313.pyc deleted file mode 100644 index 1d35fdec..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/prompt.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-313.pyc deleted file mode 100644 index 3ee162d2..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/region.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/region.cpython-313.pyc deleted file mode 100644 index 6dabfc43..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/region.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-313.pyc deleted file mode 100644 index ad3f8702..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/rule.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/rule.cpython-313.pyc deleted file mode 100644 index 7296487d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/rule.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-313.pyc deleted file mode 100644 index 11358390..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-313.pyc deleted file mode 100644 index 042e6d99..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-313.pyc deleted file mode 100644 index 860a34d5..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-313.pyc deleted file mode 100644 index b38d42af..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/status.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/status.cpython-313.pyc deleted file mode 100644 index 1f112fc9..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/status.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/style.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/style.cpython-313.pyc deleted file mode 100644 index 0139f682..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/style.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-313.pyc deleted file mode 100644 index a9bf6ed3..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-313.pyc deleted file mode 100644 index 746172b9..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/table.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/table.cpython-313.pyc deleted file mode 100644 index 9cbdfea8..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/table.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-313.pyc deleted file mode 100644 index 54bf3120..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/text.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/text.cpython-313.pyc deleted file mode 100644 index 97ad0c4e..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/text.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-313.pyc deleted file mode 100644 index f70c366c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-313.pyc deleted file mode 100644 index 3f438aff..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-313.pyc deleted file mode 100644 index 31eaf1c5..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/tree.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/tree.cpython-313.pyc deleted file mode 100644 index 345e6ab1..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/tree.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 6859a1eb..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_parser.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_parser.cpython-313.pyc deleted file mode 100644 index aaaf9923..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_parser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_re.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_re.cpython-313.pyc deleted file mode 100644 index 47ab45e5..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_re.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_types.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_types.cpython-313.pyc deleted file mode 100644 index dac251f5..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_types.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 8bbe3ff9..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/_writer.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/_writer.cpython-313.pyc deleted file mode 100644 index ed245b69..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/_writer.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 7f0013d6..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_api.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_api.cpython-313.pyc deleted file mode 100644 index 8a6b2879..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_api.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_macos.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_macos.cpython-313.pyc deleted file mode 100644 index c0d59f1c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_macos.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_openssl.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_openssl.cpython-313.pyc deleted file mode 100644 index c95a698f..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_openssl.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-313.pyc deleted file mode 100644 index 91c95bfe..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_windows.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_windows.cpython-313.pyc deleted file mode 100644 index 44c76114..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_windows.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index a96d3f5b..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-313.pyc deleted file mode 100644 index aca09f07..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-313.pyc deleted file mode 100644 index f0cf4b96..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-313.pyc deleted file mode 100644 index b8d64d31..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-313.pyc deleted file mode 100644 index d5163c67..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-313.pyc deleted file mode 100644 index 049f9be4..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-313.pyc deleted file mode 100644 index 1b7159e6..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-313.pyc deleted file mode 100644 index 9684883c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-313.pyc deleted file mode 100644 index 0d55d2ec..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-313.pyc deleted file mode 100644 index 08184efe..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-313.pyc deleted file mode 100644 index 766c52f6..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 0dee7c6e..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-313.pyc deleted file mode 100644 index 3f1ba77b..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-313.pyc deleted file mode 100644 index 4a7f2488..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-313.pyc deleted file mode 100644 index a3dde118..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-313.pyc deleted file mode 100644 index f250e6dd..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-313.pyc deleted file mode 100644 index 658cc1ba..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-313.pyc deleted file mode 100644 index c5458ca2..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index d18343a7..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-313.pyc deleted file mode 100644 index 0dbc09b1..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-313.pyc deleted file mode 100644 index 3d425acc..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 7d997bc0..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-313.pyc deleted file mode 100644 index db0fdb91..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 26c30c43..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-313.pyc deleted file mode 100644 index ad96658b..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-313.pyc deleted file mode 100644 index d81055b5..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 1b1ed1af..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-313.pyc deleted file mode 100644 index a57cbe64..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-313.pyc deleted file mode 100644 index 72e24587..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-313.pyc deleted file mode 100644 index 4b4f7490..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-313.pyc deleted file mode 100644 index d584949d..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-313.pyc deleted file mode 100644 index 1d6afad1..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-313.pyc deleted file mode 100644 index c9fbc9d5..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-313.pyc deleted file mode 100644 index 025a6569..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-313.pyc deleted file mode 100644 index bfd3db18..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-313.pyc deleted file mode 100644 index 16620eb3..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-313.pyc deleted file mode 100644 index aa26ba1c..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-313.pyc deleted file mode 100644 index e3307954..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-313.pyc b/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-313.pyc deleted file mode 100644 index 64f61185..00000000 Binary files a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/site.py b/Python313_13_x86_Template/Lib/site.py deleted file mode 100644 index 041dca11..00000000 --- a/Python313_13_x86_Template/Lib/site.py +++ /dev/null @@ -1,773 +0,0 @@ -"""Append module search paths for third-party packages to sys.path. - -**************************************************************** -* This module is automatically imported during initialization. * -**************************************************************** - -This will append site-specific paths to the module search path. On -Unix (including Mac OSX), it starts with sys.prefix and -sys.exec_prefix (if different) and appends -lib/python/site-packages. -On other platforms (such as Windows), it tries each of the -prefixes directly, as well as with lib/site-packages appended. The -resulting directories, if they exist, are appended to sys.path, and -also inspected for path configuration files. - -If a file named "pyvenv.cfg" exists one directory above sys.executable, -sys.prefix and sys.exec_prefix are set to that directory and -it is also checked for site-packages (sys.base_prefix and -sys.base_exec_prefix will always be the "real" prefixes of the Python -installation). If "pyvenv.cfg" (a bootstrap configuration file) contains -the key "include-system-site-packages" set to anything other than "false" -(case-insensitive), the system-level prefixes will still also be -searched for site-packages; otherwise they won't. - -All of the resulting site-specific directories, if they exist, are -appended to sys.path, and also inspected for path configuration -files. - -A path configuration file is a file whose name has the form -.pth; its contents are additional directories (one per line) -to be added to sys.path. Non-existing directories (or -non-directories) are never added to sys.path; no directory is added to -sys.path more than once. Blank lines and lines beginning with -'#' are skipped. Lines starting with 'import' are executed. - -For example, suppose sys.prefix and sys.exec_prefix are set to -/usr/local and there is a directory /usr/local/lib/python2.5/site-packages -with three subdirectories, foo, bar and spam, and two path -configuration files, foo.pth and bar.pth. Assume foo.pth contains the -following: - - # foo package configuration - foo - bar - bletch - -and bar.pth contains: - - # bar package configuration - bar - -Then the following directories are added to sys.path, in this order: - - /usr/local/lib/python2.5/site-packages/bar - /usr/local/lib/python2.5/site-packages/foo - -Note that bletch is omitted because it doesn't exist; bar precedes foo -because bar.pth comes alphabetically before foo.pth; and spam is -omitted because it is not mentioned in either path configuration file. - -The readline module is also automatically configured to enable -completion for systems that support it. This can be overridden in -sitecustomize, usercustomize or PYTHONSTARTUP. Starting Python in -isolated mode (-I) disables automatic readline configuration. - -After these operations, an attempt is made to import a module -named sitecustomize, which can perform arbitrary additional -site-specific customizations. If this import fails with an -ImportError exception, it is silently ignored. -""" - -import sys -import os -import builtins -import _sitebuiltins -import io -import stat -import errno - -# Prefixes for site-packages; add additional prefixes like /usr/local here -PREFIXES = [sys.prefix, sys.exec_prefix] -# Enable per user site-packages directory -# set it to False to disable the feature or True to force the feature -ENABLE_USER_SITE = None - -# for distutils.commands.install -# These values are initialized by the getuserbase() and getusersitepackages() -# functions, through the main() function when Python starts. -USER_SITE = None -USER_BASE = None - - -def _trace(message): - if sys.flags.verbose: - print(message, file=sys.stderr) - - -def makepath(*paths): - dir = os.path.join(*paths) - try: - dir = os.path.abspath(dir) - except OSError: - pass - return dir, os.path.normcase(dir) - - -def abs_paths(): - """Set all module __file__ and __cached__ attributes to an absolute path""" - for m in set(sys.modules.values()): - loader_module = None - try: - loader_module = m.__loader__.__module__ - except AttributeError: - try: - loader_module = m.__spec__.loader.__module__ - except AttributeError: - pass - if loader_module not in {'_frozen_importlib', '_frozen_importlib_external'}: - continue # don't mess with a PEP 302-supplied __file__ - try: - m.__file__ = os.path.abspath(m.__file__) - except (AttributeError, OSError, TypeError): - pass - try: - m.__cached__ = os.path.abspath(m.__cached__) - except (AttributeError, OSError, TypeError): - pass - - -def removeduppaths(): - """ Remove duplicate entries from sys.path along with making them - absolute""" - # This ensures that the initial path provided by the interpreter contains - # only absolute pathnames, even if we're running from the build directory. - L = [] - known_paths = set() - for dir in sys.path: - # Filter out duplicate paths (on case-insensitive file systems also - # if they only differ in case); turn relative paths into absolute - # paths. - dir, dircase = makepath(dir) - if dircase not in known_paths: - L.append(dir) - known_paths.add(dircase) - sys.path[:] = L - return known_paths - - -def _init_pathinfo(): - """Return a set containing all existing file system items from sys.path.""" - d = set() - for item in sys.path: - try: - if os.path.exists(item): - _, itemcase = makepath(item) - d.add(itemcase) - except TypeError: - continue - return d - - -def addpackage(sitedir, name, known_paths): - """Process a .pth file within the site-packages directory: - For each line in the file, either combine it with sitedir to a path - and add that to known_paths, or execute it if it starts with 'import '. - """ - if known_paths is None: - known_paths = _init_pathinfo() - reset = True - else: - reset = False - fullname = os.path.join(sitedir, name) - try: - st = os.lstat(fullname) - except OSError: - return - if ((getattr(st, 'st_flags', 0) & stat.UF_HIDDEN) or - (getattr(st, 'st_file_attributes', 0) & stat.FILE_ATTRIBUTE_HIDDEN)): - _trace(f"Skipping hidden .pth file: {fullname!r}") - return - _trace(f"Processing .pth file: {fullname!r}") - try: - with io.open_code(fullname) as f: - pth_content = f.read() - except OSError: - return - - try: - # Accept BOM markers in .pth files as we do in source files - # (Windows PowerShell 5.1 makes it hard to emit UTF-8 files without a BOM) - pth_content = pth_content.decode("utf-8-sig") - except UnicodeDecodeError: - # Fallback to locale encoding for backward compatibility. - # We will deprecate this fallback in the future. - import locale - pth_content = pth_content.decode(locale.getencoding()) - _trace(f"Cannot read {fullname!r} as UTF-8. " - f"Using fallback encoding {locale.getencoding()!r}") - - for n, line in enumerate(pth_content.splitlines(), 1): - if line.startswith("#"): - continue - if line.strip() == "": - continue - try: - if line.startswith(("import ", "import\t")): - exec(line) - continue - line = line.rstrip() - dir, dircase = makepath(sitedir, line) - if dircase not in known_paths and os.path.exists(dir): - sys.path.append(dir) - known_paths.add(dircase) - except Exception as exc: - print(f"Error processing line {n:d} of {fullname}:\n", - file=sys.stderr) - import traceback - for record in traceback.format_exception(exc): - for line in record.splitlines(): - print(' '+line, file=sys.stderr) - print("\nRemainder of file ignored", file=sys.stderr) - break - if reset: - known_paths = None - return known_paths - - -def addsitedir(sitedir, known_paths=None): - """Add 'sitedir' argument to sys.path if missing and handle .pth files in - 'sitedir'""" - _trace(f"Adding directory: {sitedir!r}") - if known_paths is None: - known_paths = _init_pathinfo() - reset = True - else: - reset = False - sitedir, sitedircase = makepath(sitedir) - if not sitedircase in known_paths: - sys.path.append(sitedir) # Add path component - known_paths.add(sitedircase) - try: - names = os.listdir(sitedir) - except OSError: - return - names = [name for name in names - if name.endswith(".pth") and not name.startswith(".")] - for name in sorted(names): - addpackage(sitedir, name, known_paths) - if reset: - known_paths = None - return known_paths - - -def check_enableusersite(): - """Check if user site directory is safe for inclusion - - The function tests for the command line flag (including environment var), - process uid/gid equal to effective uid/gid. - - None: Disabled for security reasons - False: Disabled by user (command line option) - True: Safe and enabled - """ - if sys.flags.no_user_site: - return False - - if hasattr(os, "getuid") and hasattr(os, "geteuid"): - # check process uid == effective uid - if os.geteuid() != os.getuid(): - return None - if hasattr(os, "getgid") and hasattr(os, "getegid"): - # check process gid == effective gid - if os.getegid() != os.getgid(): - return None - - return True - - -# NOTE: sysconfig and it's dependencies are relatively large but site module -# needs very limited part of them. -# To speedup startup time, we have copy of them. -# -# See https://bugs.python.org/issue29585 - -# Copy of sysconfig._get_implementation() -def _get_implementation(): - return 'Python' - -# Copy of sysconfig._getuserbase() -def _getuserbase(): - env_base = os.environ.get("PYTHONUSERBASE", None) - if env_base: - return env_base - - # Emscripten, iOS, tvOS, VxWorks, WASI, and watchOS have no home directories - if sys.platform in {"emscripten", "ios", "tvos", "vxworks", "wasi", "watchos"}: - return None - - def joinuser(*args): - return os.path.expanduser(os.path.join(*args)) - - if os.name == "nt": - base = os.environ.get("APPDATA") or "~" - return joinuser(base, _get_implementation()) - - if sys.platform == "darwin" and sys._framework: - return joinuser("~", "Library", sys._framework, - "%d.%d" % sys.version_info[:2]) - - return joinuser("~", ".local") - - -# Same to sysconfig.get_path('purelib', os.name+'_user') -def _get_path(userbase): - version = sys.version_info - if hasattr(sys, 'abiflags') and 't' in sys.abiflags: - abi_thread = 't' - else: - abi_thread = '' - - implementation = _get_implementation() - implementation_lower = implementation.lower() - if os.name == 'nt': - ver_nodot = sys.winver.replace('.', '') - return f'{userbase}\\{implementation}{ver_nodot}\\site-packages' - - if sys.platform == 'darwin' and sys._framework: - return f'{userbase}/lib/{implementation_lower}/site-packages' - - return f'{userbase}/lib/python{version[0]}.{version[1]}{abi_thread}/site-packages' - - -def getuserbase(): - """Returns the `user base` directory path. - - The `user base` directory can be used to store data. If the global - variable ``USER_BASE`` is not initialized yet, this function will also set - it. - """ - global USER_BASE - if USER_BASE is None: - USER_BASE = _getuserbase() - return USER_BASE - - -def getusersitepackages(): - """Returns the user-specific site-packages directory path. - - If the global variable ``USER_SITE`` is not initialized yet, this - function will also set it. - """ - global USER_SITE, ENABLE_USER_SITE - userbase = getuserbase() # this will also set USER_BASE - - if USER_SITE is None: - if userbase is None: - ENABLE_USER_SITE = False # disable user site and return None - else: - USER_SITE = _get_path(userbase) - - return USER_SITE - -def addusersitepackages(known_paths): - """Add a per user site-package to sys.path - - Each user has its own python directory with site-packages in the - home directory. - """ - # get the per user site-package path - # this call will also make sure USER_BASE and USER_SITE are set - _trace("Processing user site-packages") - user_site = getusersitepackages() - - if ENABLE_USER_SITE and os.path.isdir(user_site): - addsitedir(user_site, known_paths) - return known_paths - -def getsitepackages(prefixes=None): - """Returns a list containing all global site-packages directories. - - For each directory present in ``prefixes`` (or the global ``PREFIXES``), - this function will find its `site-packages` subdirectory depending on the - system environment, and will return a list of full paths. - """ - sitepackages = [] - seen = set() - - if prefixes is None: - prefixes = PREFIXES - - for prefix in prefixes: - if not prefix or prefix in seen: - continue - seen.add(prefix) - - implementation = _get_implementation().lower() - ver = sys.version_info - if hasattr(sys, 'abiflags') and 't' in sys.abiflags: - abi_thread = 't' - else: - abi_thread = '' - if os.sep == '/': - libdirs = [sys.platlibdir] - if sys.platlibdir != "lib": - libdirs.append("lib") - - for libdir in libdirs: - path = os.path.join(prefix, libdir, - f"{implementation}{ver[0]}.{ver[1]}{abi_thread}", - "site-packages") - sitepackages.append(path) - else: - sitepackages.append(prefix) - sitepackages.append(os.path.join(prefix, "Lib", "site-packages")) - return sitepackages - -def addsitepackages(known_paths, prefixes=None): - """Add site-packages to sys.path""" - _trace("Processing global site-packages") - for sitedir in getsitepackages(prefixes): - if os.path.isdir(sitedir): - addsitedir(sitedir, known_paths) - - return known_paths - -def setquit(): - """Define new builtins 'quit' and 'exit'. - - These are objects which make the interpreter exit when called. - The repr of each object contains a hint at how it works. - - """ - if os.sep == '\\': - eof = 'Ctrl-Z plus Return' - else: - eof = 'Ctrl-D (i.e. EOF)' - - builtins.quit = _sitebuiltins.Quitter('quit', eof) - builtins.exit = _sitebuiltins.Quitter('exit', eof) - - -def setcopyright(): - """Set 'copyright' and 'credits' in builtins""" - builtins.copyright = _sitebuiltins._Printer("copyright", sys.copyright) - builtins.credits = _sitebuiltins._Printer("credits", """\ -Thanks to CWI, CNRI, BeOpen, Zope Corporation, the Python Software -Foundation, and a cast of thousands for supporting Python -development. See www.python.org for more information.""") - files, dirs = [], [] - # Not all modules are required to have a __file__ attribute. See - # PEP 420 for more details. - here = getattr(sys, '_stdlib_dir', None) - if not here and hasattr(os, '__file__'): - here = os.path.dirname(os.__file__) - if here: - files.extend(["LICENSE.txt", "LICENSE"]) - dirs.extend([os.path.join(here, os.pardir), here, os.curdir]) - builtins.license = _sitebuiltins._Printer( - "license", - "See https://www.python.org/psf/license/", - files, dirs) - - -def sethelper(): - builtins.help = _sitebuiltins._Helper() - - -def gethistoryfile(): - """Check if the PYTHON_HISTORY environment variable is set and define - it as the .python_history file. If PYTHON_HISTORY is not set, use the - default .python_history file. - """ - if not sys.flags.ignore_environment: - history = os.environ.get("PYTHON_HISTORY") - if history: - return history - return os.path.join(os.path.expanduser('~'), - '.python_history') - - -def enablerlcompleter(): - """Enable default readline configuration on interactive prompts, by - registering a sys.__interactivehook__. - """ - sys.__interactivehook__ = register_readline - - -def register_readline(): - """Configure readline completion on interactive prompts. - - If the readline module can be imported, the hook will set the Tab key - as completion key and register ~/.python_history as history file. - This can be overridden in the sitecustomize or usercustomize module, - or in a PYTHONSTARTUP file. - """ - if not sys.flags.ignore_environment: - PYTHON_BASIC_REPL = os.getenv("PYTHON_BASIC_REPL") - else: - PYTHON_BASIC_REPL = False - - import atexit - - try: - try: - import readline - except ImportError: - readline = None - else: - import rlcompleter # noqa: F401 - except ImportError: - return - - try: - if PYTHON_BASIC_REPL: - CAN_USE_PYREPL = False - else: - original_path = sys.path - sys.path = [p for p in original_path if p != ''] - try: - import _pyrepl.readline - if os.name == "nt": - import _pyrepl.windows_console - console_errors = (_pyrepl.windows_console._error,) - else: - import _pyrepl.unix_console - console_errors = _pyrepl.unix_console._error - from _pyrepl.main import CAN_USE_PYREPL - finally: - sys.path = original_path - except ImportError: - return - - if readline is not None: - # Reading the initialization (config) file may not be enough to set a - # completion key, so we set one first and then read the file. - if readline.backend == 'editline': - readline.parse_and_bind('bind ^I rl_complete') - else: - readline.parse_and_bind('tab: complete') - - try: - readline.read_init_file() - except OSError: - # An OSError here could have many causes, but the most likely one - # is that there's no .inputrc file (or .editrc file in the case of - # Mac OS X + libedit) in the expected location. In that case, we - # want to ignore the exception. - pass - - if readline is None or readline.get_current_history_length() == 0: - # If no history was loaded, default to .python_history, - # or PYTHON_HISTORY. - # The guard is necessary to avoid doubling history size at - # each interpreter exit when readline was already configured - # through a PYTHONSTARTUP hook, see: - # http://bugs.python.org/issue5845#msg198636 - history = gethistoryfile() - - if CAN_USE_PYREPL: - readline_module = _pyrepl.readline - exceptions = (OSError, *console_errors) - else: - if readline is None: - return - readline_module = readline - exceptions = OSError - - try: - readline_module.read_history_file(history) - except exceptions: - pass - - def write_history(): - try: - readline_module.write_history_file(history) - except (FileNotFoundError, PermissionError): - # home directory does not exist or is not writable - # https://bugs.python.org/issue19891 - pass - except OSError: - if errno.EROFS: - pass # gh-128066: read-only file system - else: - raise - - atexit.register(write_history) - - -def venv(known_paths): - global PREFIXES, ENABLE_USER_SITE - - env = os.environ - if sys.platform == 'darwin' and '__PYVENV_LAUNCHER__' in env: - executable = sys._base_executable = os.environ['__PYVENV_LAUNCHER__'] - else: - executable = sys.executable - exe_dir = os.path.dirname(os.path.abspath(executable)) - site_prefix = os.path.dirname(exe_dir) - sys._home = None - conf_basename = 'pyvenv.cfg' - candidate_conf = next( - ( - conffile for conffile in ( - os.path.join(exe_dir, conf_basename), - os.path.join(site_prefix, conf_basename) - ) - if os.path.isfile(conffile) - ), - None - ) - - if candidate_conf: - virtual_conf = candidate_conf - system_site = "true" - # Issue 25185: Use UTF-8, as that's what the venv module uses when - # writing the file. - with open(virtual_conf, encoding='utf-8') as f: - for line in f: - if '=' in line: - key, _, value = line.partition('=') - key = key.strip().lower() - value = value.strip() - if key == 'include-system-site-packages': - system_site = value.lower() - elif key == 'home': - sys._home = value - - sys.prefix = sys.exec_prefix = site_prefix - - # Doing this here ensures venv takes precedence over user-site - addsitepackages(known_paths, [sys.prefix]) - - # addsitepackages will process site_prefix again if its in PREFIXES, - # but that's ok; known_paths will prevent anything being added twice - if system_site == "true": - PREFIXES.insert(0, sys.prefix) - else: - PREFIXES = [sys.prefix] - ENABLE_USER_SITE = False - - return known_paths - - -def execsitecustomize(): - """Run custom site specific code, if available.""" - try: - try: - import sitecustomize - except ImportError as exc: - if exc.name == 'sitecustomize': - pass - else: - raise - except Exception as err: - if sys.flags.verbose: - sys.excepthook(*sys.exc_info()) - else: - sys.stderr.write( - "Error in sitecustomize; set PYTHONVERBOSE for traceback:\n" - "%s: %s\n" % - (err.__class__.__name__, err)) - - -def execusercustomize(): - """Run custom user specific code, if available.""" - try: - try: - import usercustomize - except ImportError as exc: - if exc.name == 'usercustomize': - pass - else: - raise - except Exception as err: - if sys.flags.verbose: - sys.excepthook(*sys.exc_info()) - else: - sys.stderr.write( - "Error in usercustomize; set PYTHONVERBOSE for traceback:\n" - "%s: %s\n" % - (err.__class__.__name__, err)) - - -def main(): - """Add standard site-specific directories to the module search path. - - This function is called automatically when this module is imported, - unless the python interpreter was started with the -S flag. - """ - global ENABLE_USER_SITE - - orig_path = sys.path[:] - known_paths = removeduppaths() - if orig_path != sys.path: - # removeduppaths() might make sys.path absolute. - # fix __file__ and __cached__ of already imported modules too. - abs_paths() - - known_paths = venv(known_paths) - if ENABLE_USER_SITE is None: - ENABLE_USER_SITE = check_enableusersite() - known_paths = addusersitepackages(known_paths) - known_paths = addsitepackages(known_paths) - setquit() - setcopyright() - sethelper() - if not sys.flags.isolated: - enablerlcompleter() - execsitecustomize() - if ENABLE_USER_SITE: - execusercustomize() - -# Prevent extending of sys.path when python was started with -S and -# site is imported later. -if not sys.flags.no_site: - main() - -def _script(): - help = """\ - %s [--user-base] [--user-site] - - Without arguments print some useful information - With arguments print the value of USER_BASE and/or USER_SITE separated - by '%s'. - - Exit codes with --user-base or --user-site: - 0 - user site directory is enabled - 1 - user site directory is disabled by user - 2 - user site directory is disabled by super user - or for security reasons - >2 - unknown error - """ - args = sys.argv[1:] - if not args: - user_base = getuserbase() - user_site = getusersitepackages() - print("sys.path = [") - for dir in sys.path: - print(" %r," % (dir,)) - print("]") - def exists(path): - if path is not None and os.path.isdir(path): - return "exists" - else: - return "doesn't exist" - print(f"USER_BASE: {user_base!r} ({exists(user_base)})") - print(f"USER_SITE: {user_site!r} ({exists(user_site)})") - print(f"ENABLE_USER_SITE: {ENABLE_USER_SITE!r}") - sys.exit(0) - - buffer = [] - if '--user-base' in args: - buffer.append(USER_BASE) - if '--user-site' in args: - buffer.append(USER_SITE) - - if buffer: - print(os.pathsep.join(buffer)) - if ENABLE_USER_SITE: - sys.exit(0) - elif ENABLE_USER_SITE is False: - sys.exit(1) - elif ENABLE_USER_SITE is None: - sys.exit(2) - else: - sys.exit(3) - else: - import textwrap - print(textwrap.dedent(help % (sys.argv[0], os.pathsep))) - sys.exit(10) - -if __name__ == '__main__': - _script() diff --git a/Python313_13_x86_Template/Lib/smtplib.py b/Python313_13_x86_Template/Lib/smtplib.py deleted file mode 100644 index 9bedcc5f..00000000 --- a/Python313_13_x86_Template/Lib/smtplib.py +++ /dev/null @@ -1,1123 +0,0 @@ -#! /usr/bin/env python3 - -'''SMTP/ESMTP client class. - -This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP -Authentication) and RFC 2487 (Secure SMTP over TLS). - -Notes: - -Please remember, when doing ESMTP, that the names of the SMTP service -extensions are NOT the same thing as the option keywords for the RCPT -and MAIL commands! - -Example: - - >>> import smtplib - >>> s=smtplib.SMTP("localhost") - >>> print(s.help()) - This is Sendmail version 8.8.4 - Topics: - HELO EHLO MAIL RCPT DATA - RSET NOOP QUIT HELP VRFY - EXPN VERB ETRN DSN - For more info use "HELP ". - To report bugs in the implementation send email to - sendmail-bugs@sendmail.org. - For local information send email to Postmaster at your site. - End of HELP info - >>> s.putcmd("vrfy","someone@here") - >>> s.getreply() - (250, "Somebody OverHere ") - >>> s.quit() -''' - -# Author: The Dragon De Monsyne -# ESMTP support, test code and doc fixes added by -# Eric S. Raymond -# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data) -# by Carey Evans , for picky mail servers. -# RFC 2554 (authentication) support by Gerhard Haering . -# -# This was modified from the Python 1.5 library HTTP lib. - -import socket -import io -import re -import email.utils -import email.message -import email.generator -import base64 -import hmac -import copy -import datetime -import sys -from email.base64mime import body_encode as encode_base64 - -__all__ = ["SMTPException", "SMTPNotSupportedError", "SMTPServerDisconnected", "SMTPResponseException", - "SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError", - "SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError", - "quoteaddr", "quotedata", "SMTP"] - -SMTP_PORT = 25 -SMTP_SSL_PORT = 465 -CRLF = "\r\n" -bCRLF = b"\r\n" -_MAXLINE = 8192 # more than 8 times larger than RFC 821, 4.5.3 -_MAXCHALLENGE = 5 # Maximum number of AUTH challenges sent - -OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I) - -# Exception classes used by this module. -class SMTPException(OSError): - """Base class for all exceptions raised by this module.""" - -class SMTPNotSupportedError(SMTPException): - """The command or option is not supported by the SMTP server. - - This exception is raised when an attempt is made to run a command or a - command with an option which is not supported by the server. - """ - -class SMTPServerDisconnected(SMTPException): - """Not connected to any SMTP server. - - This exception is raised when the server unexpectedly disconnects, - or when an attempt is made to use the SMTP instance before - connecting it to a server. - """ - -class SMTPResponseException(SMTPException): - """Base class for all exceptions that include an SMTP error code. - - These exceptions are generated in some instances when the SMTP - server returns an error code. The error code is stored in the - `smtp_code' attribute of the error, and the `smtp_error' attribute - is set to the error message. - """ - - def __init__(self, code, msg): - self.smtp_code = code - self.smtp_error = msg - self.args = (code, msg) - -class SMTPSenderRefused(SMTPResponseException): - """Sender address refused. - - In addition to the attributes set by on all SMTPResponseException - exceptions, this sets `sender' to the string that the SMTP refused. - """ - - def __init__(self, code, msg, sender): - self.smtp_code = code - self.smtp_error = msg - self.sender = sender - self.args = (code, msg, sender) - -class SMTPRecipientsRefused(SMTPException): - """All recipient addresses refused. - - The errors for each recipient are accessible through the attribute - 'recipients', which is a dictionary of exactly the same sort as - SMTP.sendmail() returns. - """ - - def __init__(self, recipients): - self.recipients = recipients - self.args = (recipients,) - - -class SMTPDataError(SMTPResponseException): - """The SMTP server didn't accept the data.""" - -class SMTPConnectError(SMTPResponseException): - """Error during connection establishment.""" - -class SMTPHeloError(SMTPResponseException): - """The server refused our HELO reply.""" - -class SMTPAuthenticationError(SMTPResponseException): - """Authentication error. - - Most probably the server didn't accept the username/password - combination provided. - """ - -def quoteaddr(addrstring): - """Quote a subset of the email addresses defined by RFC 821. - - Should be able to handle anything email.utils.parseaddr can handle. - """ - displayname, addr = email.utils.parseaddr(addrstring) - if (displayname, addr) == ('', ''): - # parseaddr couldn't parse it, use it as is and hope for the best. - if addrstring.strip().startswith('<'): - return addrstring - return "<%s>" % addrstring - return "<%s>" % addr - -def _addr_only(addrstring): - displayname, addr = email.utils.parseaddr(addrstring) - if (displayname, addr) == ('', ''): - # parseaddr couldn't parse it, so use it as is. - return addrstring - return addr - -# Legacy method kept for backward compatibility. -def quotedata(data): - """Quote data for email. - - Double leading '.', and change Unix newline '\\n', or Mac '\\r' into - internet CRLF end-of-line. - """ - return re.sub(r'(?m)^\.', '..', - re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data)) - -def _quote_periods(bindata): - return re.sub(br'(?m)^\.', b'..', bindata) - -def _fix_eols(data): - return re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data) - - -try: - hmac.digest(b'', b'', 'md5') -except ValueError: - _have_cram_md5_support = False -else: - _have_cram_md5_support = True - - -try: - import ssl -except ImportError: - _have_ssl = False -else: - _have_ssl = True - - -class SMTP: - """This class manages a connection to an SMTP or ESMTP server. - SMTP Objects: - SMTP objects have the following attributes: - helo_resp - This is the message given by the server in response to the - most recent HELO command. - - ehlo_resp - This is the message given by the server in response to the - most recent EHLO command. This is usually multiline. - - does_esmtp - This is a True value _after you do an EHLO command_, if the - server supports ESMTP. - - esmtp_features - This is a dictionary, which, if the server supports ESMTP, - will _after you do an EHLO command_, contain the names of the - SMTP service extensions this server supports, and their - parameters (if any). - - Note, all extension names are mapped to lower case in the - dictionary. - - See each method's docstrings for details. In general, there is a - method of the same name to perform each SMTP command. There is also a - method called 'sendmail' that will do an entire mail transaction. - """ - debuglevel = 0 - - sock = None - file = None - helo_resp = None - ehlo_msg = "ehlo" - ehlo_resp = None - does_esmtp = False - default_port = SMTP_PORT - - def __init__(self, host='', port=0, local_hostname=None, - timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - source_address=None): - """Initialize a new instance. - - If specified, `host` is the name of the remote host to which to - connect. If specified, `port` specifies the port to which to connect. - By default, smtplib.SMTP_PORT is used. If a host is specified the - connect method is called, and if it returns anything other than a - success code an SMTPConnectError is raised. If specified, - `local_hostname` is used as the FQDN of the local host in the HELO/EHLO - command. Otherwise, the local hostname is found using - socket.getfqdn(). The `source_address` parameter takes a 2-tuple (host, - port) for the socket to bind to as its source address before - connecting. If the host is '' and port is 0, the OS default behavior - will be used. - - """ - self._host = host - self.timeout = timeout - self.esmtp_features = {} - self.command_encoding = 'ascii' - self.source_address = source_address - self._auth_challenge_count = 0 - - if host: - (code, msg) = self.connect(host, port) - if code != 220: - self.close() - raise SMTPConnectError(code, msg) - if local_hostname is not None: - self.local_hostname = local_hostname - else: - # RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and - # if that can't be calculated, that we should use a domain literal - # instead (essentially an encoded IP address like [A.B.C.D]). - fqdn = socket.getfqdn() - if '.' in fqdn: - self.local_hostname = fqdn - else: - # We can't find an fqdn hostname, so use a domain literal - addr = '127.0.0.1' - try: - addr = socket.gethostbyname(socket.gethostname()) - except socket.gaierror: - pass - self.local_hostname = '[%s]' % addr - - def __enter__(self): - return self - - def __exit__(self, *args): - try: - code, message = self.docmd("QUIT") - if code != 221: - raise SMTPResponseException(code, message) - except SMTPServerDisconnected: - pass - finally: - self.close() - - def set_debuglevel(self, debuglevel): - """Set the debug output level. - - A non-false value results in debug messages for connection and for all - messages sent to and received from the server. - - """ - self.debuglevel = debuglevel - - def _print_debug(self, *args): - if self.debuglevel > 1: - print(datetime.datetime.now().time(), *args, file=sys.stderr) - else: - print(*args, file=sys.stderr) - - def _get_socket(self, host, port, timeout): - # This makes it simpler for SMTP_SSL to use the SMTP connect code - # and just alter the socket connection bit. - if timeout is not None and not timeout: - raise ValueError('Non-blocking socket (timeout=0) is not supported') - if self.debuglevel > 0: - self._print_debug('connect: to', (host, port), self.source_address) - return socket.create_connection((host, port), timeout, - self.source_address) - - def connect(self, host='localhost', port=0, source_address=None): - """Connect to a host on a given port. - - If the hostname ends with a colon (`:') followed by a number, and - there is no port specified, that suffix will be stripped off and the - number interpreted as the port number to use. - - Note: This method is automatically invoked by __init__, if a host is - specified during instantiation. - - """ - - if source_address: - self.source_address = source_address - - if not port and (host.find(':') == host.rfind(':')): - i = host.rfind(':') - if i >= 0: - host, port = host[:i], host[i + 1:] - try: - port = int(port) - except ValueError: - raise OSError("nonnumeric port") - if not port: - port = self.default_port - sys.audit("smtplib.connect", self, host, port) - self.sock = self._get_socket(host, port, self.timeout) - self.file = None - (code, msg) = self.getreply() - if self.debuglevel > 0: - self._print_debug('connect:', repr(msg)) - return (code, msg) - - def send(self, s): - """Send `s' to the server.""" - if self.debuglevel > 0: - self._print_debug('send:', repr(s)) - if self.sock: - if isinstance(s, str): - # send is used by the 'data' command, where command_encoding - # should not be used, but 'data' needs to convert the string to - # binary itself anyway, so that's not a problem. - s = s.encode(self.command_encoding) - sys.audit("smtplib.send", self, s) - try: - self.sock.sendall(s) - except OSError: - self.close() - raise SMTPServerDisconnected('Server not connected') - else: - raise SMTPServerDisconnected('please run connect() first') - - def putcmd(self, cmd, args=""): - """Send a command to the server.""" - if args == "": - s = cmd - else: - s = f'{cmd} {args}' - if '\r' in s or '\n' in s: - s = s.replace('\n', '\\n').replace('\r', '\\r') - raise ValueError( - f'command and arguments contain prohibited newline characters: {s}' - ) - self.send(f'{s}{CRLF}') - - def getreply(self): - """Get a reply from the server. - - Returns a tuple consisting of: - - - server response code (e.g. '250', or such, if all goes well) - Note: returns -1 if it can't read response code. - - - server response string corresponding to response code (multiline - responses are converted to a single, multiline string). - - Raises SMTPServerDisconnected if end-of-file is reached. - """ - resp = [] - if self.file is None: - self.file = self.sock.makefile('rb') - while 1: - try: - line = self.file.readline(_MAXLINE + 1) - except OSError as e: - self.close() - raise SMTPServerDisconnected("Connection unexpectedly closed: " - + str(e)) - if not line: - self.close() - raise SMTPServerDisconnected("Connection unexpectedly closed") - if self.debuglevel > 0: - self._print_debug('reply:', repr(line)) - if len(line) > _MAXLINE: - self.close() - raise SMTPResponseException(500, "Line too long.") - resp.append(line[4:].strip(b' \t\r\n')) - code = line[:3] - # Check that the error code is syntactically correct. - # Don't attempt to read a continuation line if it is broken. - try: - errcode = int(code) - except ValueError: - errcode = -1 - break - # Check if multiline response. - if line[3:4] != b"-": - break - - errmsg = b"\n".join(resp) - if self.debuglevel > 0: - self._print_debug('reply: retcode (%s); Msg: %a' % (errcode, errmsg)) - return errcode, errmsg - - def docmd(self, cmd, args=""): - """Send a command, and return its response code.""" - self.putcmd(cmd, args) - return self.getreply() - - # std smtp commands - def helo(self, name=''): - """SMTP 'helo' command. - Hostname to send for this command defaults to the FQDN of the local - host. - """ - self.putcmd("helo", name or self.local_hostname) - (code, msg) = self.getreply() - self.helo_resp = msg - return (code, msg) - - def ehlo(self, name=''): - """ SMTP 'ehlo' command. - Hostname to send for this command defaults to the FQDN of the local - host. - """ - self.esmtp_features = {} - self.putcmd(self.ehlo_msg, name or self.local_hostname) - (code, msg) = self.getreply() - # According to RFC1869 some (badly written) - # MTA's will disconnect on an ehlo. Toss an exception if - # that happens -ddm - if code == -1 and len(msg) == 0: - self.close() - raise SMTPServerDisconnected("Server not connected") - self.ehlo_resp = msg - if code != 250: - return (code, msg) - self.does_esmtp = True - #parse the ehlo response -ddm - assert isinstance(self.ehlo_resp, bytes), repr(self.ehlo_resp) - resp = self.ehlo_resp.decode("latin-1").split('\n') - del resp[0] - for each in resp: - # To be able to communicate with as many SMTP servers as possible, - # we have to take the old-style auth advertisement into account, - # because: - # 1) Else our SMTP feature parser gets confused. - # 2) There are some servers that only advertise the auth methods we - # support using the old style. - auth_match = OLDSTYLE_AUTH.match(each) - if auth_match: - # This doesn't remove duplicates, but that's no problem - self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \ - + " " + auth_match.groups(0)[0] - continue - - # RFC 1869 requires a space between ehlo keyword and parameters. - # It's actually stricter, in that only spaces are allowed between - # parameters, but were not going to check for that here. Note - # that the space isn't present if there are no parameters. - m = re.match(r'(?P[A-Za-z0-9][A-Za-z0-9\-]*) ?', each) - if m: - feature = m.group("feature").lower() - params = m.string[m.end("feature"):].strip() - if feature == "auth": - self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \ - + " " + params - else: - self.esmtp_features[feature] = params - return (code, msg) - - def has_extn(self, opt): - """Does the server support a given SMTP service extension?""" - return opt.lower() in self.esmtp_features - - def help(self, args=''): - """SMTP 'help' command. - Returns help text from server.""" - self.putcmd("help", args) - return self.getreply()[1] - - def rset(self): - """SMTP 'rset' command -- resets session.""" - self.command_encoding = 'ascii' - return self.docmd("rset") - - def _rset(self): - """Internal 'rset' command which ignores any SMTPServerDisconnected error. - - Used internally in the library, since the server disconnected error - should appear to the application when the *next* command is issued, if - we are doing an internal "safety" reset. - """ - try: - self.rset() - except SMTPServerDisconnected: - pass - - def noop(self): - """SMTP 'noop' command -- doesn't do anything :>""" - return self.docmd("noop") - - def mail(self, sender, options=()): - """SMTP 'mail' command -- begins mail xfer session. - - This method may raise the following exceptions: - - SMTPNotSupportedError The options parameter includes 'SMTPUTF8' - but the SMTPUTF8 extension is not supported by - the server. - """ - optionlist = '' - if options and self.does_esmtp: - if any(x.lower()=='smtputf8' for x in options): - if self.has_extn('smtputf8'): - self.command_encoding = 'utf-8' - else: - raise SMTPNotSupportedError( - 'SMTPUTF8 not supported by server') - optionlist = ' ' + ' '.join(options) - self.putcmd("mail", "from:%s%s" % (quoteaddr(sender), optionlist)) - return self.getreply() - - def rcpt(self, recip, options=()): - """SMTP 'rcpt' command -- indicates 1 recipient for this mail.""" - optionlist = '' - if options and self.does_esmtp: - optionlist = ' ' + ' '.join(options) - self.putcmd("rcpt", "to:%s%s" % (quoteaddr(recip), optionlist)) - return self.getreply() - - def data(self, msg): - """SMTP 'DATA' command -- sends message data to server. - - Automatically quotes lines beginning with a period per rfc821. - Raises SMTPDataError if there is an unexpected reply to the - DATA command; the return value from this method is the final - response code received when the all data is sent. If msg - is a string, lone '\\r' and '\\n' characters are converted to - '\\r\\n' characters. If msg is bytes, it is transmitted as is. - """ - self.putcmd("data") - (code, repl) = self.getreply() - if self.debuglevel > 0: - self._print_debug('data:', (code, repl)) - if code != 354: - raise SMTPDataError(code, repl) - else: - if isinstance(msg, str): - msg = _fix_eols(msg).encode('ascii') - q = _quote_periods(msg) - if q[-2:] != bCRLF: - q = q + bCRLF - q = q + b"." + bCRLF - self.send(q) - (code, msg) = self.getreply() - if self.debuglevel > 0: - self._print_debug('data:', (code, msg)) - return (code, msg) - - def verify(self, address): - """SMTP 'verify' command -- checks for address validity.""" - self.putcmd("vrfy", _addr_only(address)) - return self.getreply() - # a.k.a. - vrfy = verify - - def expn(self, address): - """SMTP 'expn' command -- expands a mailing list.""" - self.putcmd("expn", _addr_only(address)) - return self.getreply() - - # some useful methods - - def ehlo_or_helo_if_needed(self): - """Call self.ehlo() and/or self.helo() if needed. - - If there has been no previous EHLO or HELO command this session, this - method tries ESMTP EHLO first. - - This method may raise the following exceptions: - - SMTPHeloError The server didn't reply properly to - the helo greeting. - """ - if self.helo_resp is None and self.ehlo_resp is None: - if not (200 <= self.ehlo()[0] <= 299): - (code, resp) = self.helo() - if not (200 <= code <= 299): - raise SMTPHeloError(code, resp) - - def auth(self, mechanism, authobject, *, initial_response_ok=True): - """Authentication command - requires response processing. - - 'mechanism' specifies which authentication mechanism is to - be used - the valid values are those listed in the 'auth' - element of 'esmtp_features'. - - 'authobject' must be a callable object taking a single argument: - - data = authobject(challenge) - - It will be called to process the server's challenge response; the - challenge argument it is passed will be a bytes. It should return - an ASCII string that will be base64 encoded and sent to the server. - - Keyword arguments: - - initial_response_ok: Allow sending the RFC 4954 initial-response - to the AUTH command, if the authentication methods supports it. - """ - # RFC 4954 allows auth methods to provide an initial response. Not all - # methods support it. By definition, if they return something other - # than None when challenge is None, then they do. See issue #15014. - mechanism = mechanism.upper() - initial_response = (authobject() if initial_response_ok else None) - if initial_response is not None: - response = encode_base64(initial_response.encode('ascii'), eol='') - (code, resp) = self.docmd("AUTH", mechanism + " " + response) - self._auth_challenge_count = 1 - else: - (code, resp) = self.docmd("AUTH", mechanism) - self._auth_challenge_count = 0 - # If server responds with a challenge, send the response. - while code == 334: - self._auth_challenge_count += 1 - challenge = base64.decodebytes(resp) - response = encode_base64( - authobject(challenge).encode('ascii'), eol='') - (code, resp) = self.docmd(response) - # If server keeps sending challenges, something is wrong. - if self._auth_challenge_count > _MAXCHALLENGE: - raise SMTPException( - "Server AUTH mechanism infinite loop. Last response: " - + repr((code, resp)) - ) - if code in (235, 503): - return (code, resp) - raise SMTPAuthenticationError(code, resp) - - def auth_cram_md5(self, challenge=None): - """ Authobject to use with CRAM-MD5 authentication. Requires self.user - and self.password to be set.""" - # CRAM-MD5 does not support initial-response. - if challenge is None: - return None - if not _have_cram_md5_support: - raise SMTPException("CRAM-MD5 is not supported") - password = self.password.encode('ascii') - authcode = hmac.HMAC(password, challenge, 'md5') - return f"{self.user} {authcode.hexdigest()}" - - def auth_plain(self, challenge=None): - """ Authobject to use with PLAIN authentication. Requires self.user and - self.password to be set.""" - return "\0%s\0%s" % (self.user, self.password) - - def auth_login(self, challenge=None): - """ Authobject to use with LOGIN authentication. Requires self.user and - self.password to be set.""" - if challenge is None or self._auth_challenge_count < 2: - return self.user - else: - return self.password - - def login(self, user, password, *, initial_response_ok=True): - """Log in on an SMTP server that requires authentication. - - The arguments are: - - user: The user name to authenticate with. - - password: The password for the authentication. - - Keyword arguments: - - initial_response_ok: Allow sending the RFC 4954 initial-response - to the AUTH command, if the authentication methods supports it. - - If there has been no previous EHLO or HELO command this session, this - method tries ESMTP EHLO first. - - This method will return normally if the authentication was successful. - - This method may raise the following exceptions: - - SMTPHeloError The server didn't reply properly to - the helo greeting. - SMTPAuthenticationError The server didn't accept the username/ - password combination. - SMTPNotSupportedError The AUTH command is not supported by the - server. - SMTPException No suitable authentication method was - found. - """ - - self.ehlo_or_helo_if_needed() - if not self.has_extn("auth"): - raise SMTPNotSupportedError( - "SMTP AUTH extension not supported by server.") - - # Authentication methods the server claims to support - advertised_authlist = self.esmtp_features["auth"].split() - - # Authentication methods we can handle in our preferred order: - if _have_cram_md5_support: - preferred_auths = ['CRAM-MD5', 'PLAIN', 'LOGIN'] - else: - preferred_auths = ['PLAIN', 'LOGIN'] - # We try the supported authentications in our preferred order, if - # the server supports them. - authlist = [auth for auth in preferred_auths - if auth in advertised_authlist] - if not authlist: - raise SMTPException("No suitable authentication method found.") - - # Some servers advertise authentication methods they don't really - # support, so if authentication fails, we continue until we've tried - # all methods. - self.user, self.password = user, password - for authmethod in authlist: - method_name = 'auth_' + authmethod.lower().replace('-', '_') - try: - (code, resp) = self.auth( - authmethod, getattr(self, method_name), - initial_response_ok=initial_response_ok) - # 235 == 'Authentication successful' - # 503 == 'Error: already authenticated' - if code in (235, 503): - return (code, resp) - except SMTPAuthenticationError as e: - last_exception = e - - # We could not login successfully. Return result of last attempt. - raise last_exception - - def starttls(self, *, context=None): - """Puts the connection to the SMTP server into TLS mode. - - If there has been no previous EHLO or HELO command this session, this - method tries ESMTP EHLO first. - - If the server supports TLS, this will encrypt the rest of the SMTP - session. If you provide the context parameter, - the identity of the SMTP server and client can be checked. This, - however, depends on whether the socket module really checks the - certificates. - - This method may raise the following exceptions: - - SMTPHeloError The server didn't reply properly to - the helo greeting. - """ - self.ehlo_or_helo_if_needed() - if not self.has_extn("starttls"): - raise SMTPNotSupportedError( - "STARTTLS extension not supported by server.") - (resp, reply) = self.docmd("STARTTLS") - if resp == 220: - if not _have_ssl: - raise RuntimeError("No SSL support included in this Python") - if context is None: - context = ssl._create_stdlib_context() - self.sock = context.wrap_socket(self.sock, - server_hostname=self._host) - self.file = None - # RFC 3207: - # The client MUST discard any knowledge obtained from - # the server, such as the list of SMTP service extensions, - # which was not obtained from the TLS negotiation itself. - self.helo_resp = None - self.ehlo_resp = None - self.esmtp_features = {} - self.does_esmtp = False - else: - # RFC 3207: - # 501 Syntax error (no parameters allowed) - # 454 TLS not available due to temporary reason - raise SMTPResponseException(resp, reply) - return (resp, reply) - - def sendmail(self, from_addr, to_addrs, msg, mail_options=(), - rcpt_options=()): - """This command performs an entire mail transaction. - - The arguments are: - - from_addr : The address sending this mail. - - to_addrs : A list of addresses to send this mail to. A bare - string will be treated as a list with 1 address. - - msg : The message to send. - - mail_options : List of ESMTP options (such as 8bitmime) for the - mail command. - - rcpt_options : List of ESMTP options (such as DSN commands) for - all the rcpt commands. - - msg may be a string containing characters in the ASCII range, or a byte - string. A string is encoded to bytes using the ascii codec, and lone - \\r and \\n characters are converted to \\r\\n characters. - - If there has been no previous EHLO or HELO command this session, this - method tries ESMTP EHLO first. If the server does ESMTP, message size - and each of the specified options will be passed to it. If EHLO - fails, HELO will be tried and ESMTP options suppressed. - - This method will return normally if the mail is accepted for at least - one recipient. It returns a dictionary, with one entry for each - recipient that was refused. Each entry contains a tuple of the SMTP - error code and the accompanying error message sent by the server. - - This method may raise the following exceptions: - - SMTPHeloError The server didn't reply properly to - the helo greeting. - SMTPRecipientsRefused The server rejected ALL recipients - (no mail was sent). - SMTPSenderRefused The server didn't accept the from_addr. - SMTPDataError The server replied with an unexpected - error code (other than a refusal of - a recipient). - SMTPNotSupportedError The mail_options parameter includes 'SMTPUTF8' - but the SMTPUTF8 extension is not supported by - the server. - - Note: the connection will be open even after an exception is raised. - - Example: - - >>> import smtplib - >>> s=smtplib.SMTP("localhost") - >>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"] - >>> msg = '''\\ - ... From: Me@my.org - ... Subject: testin'... - ... - ... This is a test ''' - >>> s.sendmail("me@my.org",tolist,msg) - { "three@three.org" : ( 550 ,"User unknown" ) } - >>> s.quit() - - In the above example, the message was accepted for delivery to three - of the four addresses, and one was rejected, with the error code - 550. If all addresses are accepted, then the method will return an - empty dictionary. - - """ - self.ehlo_or_helo_if_needed() - esmtp_opts = [] - if isinstance(msg, str): - msg = _fix_eols(msg).encode('ascii') - if self.does_esmtp: - if self.has_extn('size'): - esmtp_opts.append("size=%d" % len(msg)) - for option in mail_options: - esmtp_opts.append(option) - (code, resp) = self.mail(from_addr, esmtp_opts) - if code != 250: - if code == 421: - self.close() - else: - self._rset() - raise SMTPSenderRefused(code, resp, from_addr) - senderrs = {} - if isinstance(to_addrs, str): - to_addrs = [to_addrs] - for each in to_addrs: - (code, resp) = self.rcpt(each, rcpt_options) - if (code != 250) and (code != 251): - senderrs[each] = (code, resp) - if code == 421: - self.close() - raise SMTPRecipientsRefused(senderrs) - if len(senderrs) == len(to_addrs): - # the server refused all our recipients - self._rset() - raise SMTPRecipientsRefused(senderrs) - (code, resp) = self.data(msg) - if code != 250: - if code == 421: - self.close() - else: - self._rset() - raise SMTPDataError(code, resp) - #if we got here then somebody got our mail - return senderrs - - def send_message(self, msg, from_addr=None, to_addrs=None, - mail_options=(), rcpt_options=()): - """Converts message to a bytestring and passes it to sendmail. - - The arguments are as for sendmail, except that msg is an - email.message.Message object. If from_addr is None or to_addrs is - None, these arguments are taken from the headers of the Message as - described in RFC 5322 (a ValueError is raised if there is more than - one set of 'Resent-' headers). Regardless of the values of from_addr and - to_addr, any Bcc field (or Resent-Bcc field, when the Message is a - resent) of the Message object won't be transmitted. The Message - object is then serialized using email.generator.BytesGenerator and - sendmail is called to transmit the message. If the sender or any of - the recipient addresses contain non-ASCII and the server advertises the - SMTPUTF8 capability, the policy is cloned with utf8 set to True for the - serialization, and SMTPUTF8 and BODY=8BITMIME are asserted on the send. - If the server does not support SMTPUTF8, an SMTPNotSupported error is - raised. Otherwise the generator is called without modifying the - policy. - - """ - # 'Resent-Date' is a mandatory field if the Message is resent (RFC 5322 - # Section 3.6.6). In such a case, we use the 'Resent-*' fields. However, - # if there is more than one 'Resent-' block there's no way to - # unambiguously determine which one is the most recent in all cases, - # so rather than guess we raise a ValueError in that case. - # - # TODO implement heuristics to guess the correct Resent-* block with an - # option allowing the user to enable the heuristics. (It should be - # possible to guess correctly almost all of the time.) - - self.ehlo_or_helo_if_needed() - resent = msg.get_all('Resent-Date') - if resent is None: - header_prefix = '' - elif len(resent) == 1: - header_prefix = 'Resent-' - else: - raise ValueError("message has more than one 'Resent-' header block") - if from_addr is None: - # Prefer the sender field per RFC 5322 section 3.6.2. - from_addr = (msg[header_prefix + 'Sender'] - if (header_prefix + 'Sender') in msg - else msg[header_prefix + 'From']) - from_addr = email.utils.getaddresses([from_addr])[0][1] - if to_addrs is None: - addr_fields = [f for f in (msg[header_prefix + 'To'], - msg[header_prefix + 'Bcc'], - msg[header_prefix + 'Cc']) - if f is not None] - to_addrs = [a[1] for a in email.utils.getaddresses(addr_fields)] - # Make a local copy so we can delete the bcc headers. - msg_copy = copy.copy(msg) - del msg_copy['Bcc'] - del msg_copy['Resent-Bcc'] - international = False - try: - ''.join([from_addr, *to_addrs]).encode('ascii') - except UnicodeEncodeError: - if not self.has_extn('smtputf8'): - raise SMTPNotSupportedError( - "One or more source or delivery addresses require" - " internationalized email support, but the server" - " does not advertise the required SMTPUTF8 capability") - international = True - with io.BytesIO() as bytesmsg: - if international: - g = email.generator.BytesGenerator( - bytesmsg, policy=msg.policy.clone(utf8=True)) - mail_options = (*mail_options, 'SMTPUTF8', 'BODY=8BITMIME') - else: - g = email.generator.BytesGenerator(bytesmsg) - g.flatten(msg_copy, linesep='\r\n') - flatmsg = bytesmsg.getvalue() - return self.sendmail(from_addr, to_addrs, flatmsg, mail_options, - rcpt_options) - - def close(self): - """Close the connection to the SMTP server.""" - try: - file = self.file - self.file = None - if file: - file.close() - finally: - sock = self.sock - self.sock = None - if sock: - sock.close() - - def quit(self): - """Terminate the SMTP session.""" - res = self.docmd("quit") - # A new EHLO is required after reconnecting with connect() - self.ehlo_resp = self.helo_resp = None - self.esmtp_features = {} - self.does_esmtp = False - self.close() - return res - -if _have_ssl: - - class SMTP_SSL(SMTP): - """ This is a subclass derived from SMTP that connects over an SSL - encrypted socket (to use this class you need a socket module that was - compiled with SSL support). If host is not specified, '' (the local - host) is used. If port is omitted, the standard SMTP-over-SSL port - (465) is used. local_hostname and source_address have the same meaning - as they do in the SMTP class. context also optional, can contain a - SSLContext. - - """ - - default_port = SMTP_SSL_PORT - - def __init__(self, host='', port=0, local_hostname=None, - *, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - source_address=None, context=None): - if context is None: - context = ssl._create_stdlib_context() - self.context = context - SMTP.__init__(self, host, port, local_hostname, timeout, - source_address) - - def _get_socket(self, host, port, timeout): - if self.debuglevel > 0: - self._print_debug('connect:', (host, port)) - new_socket = super()._get_socket(host, port, timeout) - new_socket = self.context.wrap_socket(new_socket, - server_hostname=self._host) - return new_socket - - __all__.append("SMTP_SSL") - -# -# LMTP extension -# -LMTP_PORT = 2003 - -class LMTP(SMTP): - """LMTP - Local Mail Transfer Protocol - - The LMTP protocol, which is very similar to ESMTP, is heavily based - on the standard SMTP client. It's common to use Unix sockets for - LMTP, so our connect() method must support that as well as a regular - host:port server. local_hostname and source_address have the same - meaning as they do in the SMTP class. To specify a Unix socket, - you must use an absolute path as the host, starting with a '/'. - - Authentication is supported, using the regular SMTP mechanism. When - using a Unix socket, LMTP generally don't support or require any - authentication, but your mileage might vary.""" - - ehlo_msg = "lhlo" - - def __init__(self, host='', port=LMTP_PORT, local_hostname=None, - source_address=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): - """Initialize a new instance.""" - super().__init__(host, port, local_hostname=local_hostname, - source_address=source_address, timeout=timeout) - - def connect(self, host='localhost', port=0, source_address=None): - """Connect to the LMTP daemon, on either a Unix or a TCP socket.""" - if host[0] != '/': - return super().connect(host, port, source_address=source_address) - - if self.timeout is not None and not self.timeout: - raise ValueError('Non-blocking socket (timeout=0) is not supported') - - # Handle Unix-domain sockets. - try: - self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: - self.sock.settimeout(self.timeout) - self.file = None - self.sock.connect(host) - except OSError: - if self.debuglevel > 0: - self._print_debug('connect fail:', host) - if self.sock: - self.sock.close() - self.sock = None - raise - (code, msg) = self.getreply() - if self.debuglevel > 0: - self._print_debug('connect:', msg) - return (code, msg) - - -# Test the sendmail method, which tests most of the others. -# Note: This always sends to localhost. -if __name__ == '__main__': - def prompt(prompt): - sys.stdout.write(prompt + ": ") - sys.stdout.flush() - return sys.stdin.readline().strip() - - fromaddr = prompt("From") - toaddrs = prompt("To").split(',') - print("Enter message, end with ^D:") - msg = '' - while line := sys.stdin.readline(): - msg = msg + line - print("Message length is %d" % len(msg)) - - server = SMTP('localhost') - server.set_debuglevel(1) - server.sendmail(fromaddr, toaddrs, msg) - server.quit() diff --git a/Python313_13_x86_Template/Lib/socket.py b/Python313_13_x86_Template/Lib/socket.py deleted file mode 100644 index 35d87eff..00000000 --- a/Python313_13_x86_Template/Lib/socket.py +++ /dev/null @@ -1,982 +0,0 @@ -# Wrapper module for _socket, providing some additional facilities -# implemented in Python. - -"""\ -This module provides socket operations and some related functions. -On Unix, it supports IP (Internet Protocol) and Unix domain sockets. -On other systems, it only supports IP. Functions specific for a -socket are available as methods of the socket object. - -Functions: - -socket() -- create a new socket object -socketpair() -- create a pair of new socket objects [*] -fromfd() -- create a socket object from an open file descriptor [*] -send_fds() -- Send file descriptor to the socket. -recv_fds() -- Receive file descriptors from the socket. -fromshare() -- create a socket object from data received from socket.share() [*] -gethostname() -- return the current hostname -gethostbyname() -- map a hostname to its IP number -gethostbyaddr() -- map an IP number or hostname to DNS info -getservbyname() -- map a service name and a protocol name to a port number -getprotobyname() -- map a protocol name (e.g. 'tcp') to a number -ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order -htons(), htonl() -- convert 16, 32 bit int from host to network byte order -inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format -inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89) -socket.getdefaulttimeout() -- get the default timeout value -socket.setdefaulttimeout() -- set the default timeout value -create_connection() -- connects to an address, with an optional timeout and - optional source address. -create_server() -- create a TCP socket and bind it to a specified address. - - [*] not available on all platforms! - -Special objects: - -SocketType -- type object for socket objects -error -- exception raised for I/O errors -has_ipv6 -- boolean value indicating if IPv6 is supported - -IntEnum constants: - -AF_INET, AF_UNIX -- socket domains (first argument to socket() call) -SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument) - -Integer constants: - -Many other constants may be defined; these may be used in calls to -the setsockopt() and getsockopt() methods. -""" - -import _socket -from _socket import * - -import os, sys, io, selectors -from enum import IntEnum, IntFlag - -try: - import errno -except ImportError: - errno = None -EBADF = getattr(errno, 'EBADF', 9) -EAGAIN = getattr(errno, 'EAGAIN', 11) -EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11) - -__all__ = ["fromfd", "getfqdn", "create_connection", "create_server", - "has_dualstack_ipv6", "AddressFamily", "SocketKind"] -__all__.extend(os._get_exports_list(_socket)) - -# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for -# nicer string representations. -# Note that _socket only knows about the integer values. The public interface -# in this module understands the enums and translates them back from integers -# where needed (e.g. .family property of a socket object). - -IntEnum._convert_( - 'AddressFamily', - __name__, - lambda C: C.isupper() and C.startswith('AF_')) - -IntEnum._convert_( - 'SocketKind', - __name__, - lambda C: C.isupper() and C.startswith('SOCK_')) - -IntFlag._convert_( - 'MsgFlag', - __name__, - lambda C: C.isupper() and C.startswith('MSG_')) - -IntFlag._convert_( - 'AddressInfo', - __name__, - lambda C: C.isupper() and C.startswith('AI_')) - -_LOCALHOST = '127.0.0.1' -_LOCALHOST_V6 = '::1' - - -def _intenum_converter(value, enum_klass): - """Convert a numeric family value to an IntEnum member. - - If it's not a known member, return the numeric value itself. - """ - try: - return enum_klass(value) - except ValueError: - return value - - -# WSA error codes -if sys.platform.lower().startswith("win"): - errorTab = {} - errorTab[6] = "Specified event object handle is invalid." - errorTab[8] = "Insufficient memory available." - errorTab[87] = "One or more parameters are invalid." - errorTab[995] = "Overlapped operation aborted." - errorTab[996] = "Overlapped I/O event object not in signaled state." - errorTab[997] = "Overlapped operation will complete later." - errorTab[10004] = "The operation was interrupted." - errorTab[10009] = "A bad file handle was passed." - errorTab[10013] = "Permission denied." - errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT - errorTab[10022] = "An invalid operation was attempted." - errorTab[10024] = "Too many open files." - errorTab[10035] = "The socket operation would block." - errorTab[10036] = "A blocking operation is already in progress." - errorTab[10037] = "Operation already in progress." - errorTab[10038] = "Socket operation on nonsocket." - errorTab[10039] = "Destination address required." - errorTab[10040] = "Message too long." - errorTab[10041] = "Protocol wrong type for socket." - errorTab[10042] = "Bad protocol option." - errorTab[10043] = "Protocol not supported." - errorTab[10044] = "Socket type not supported." - errorTab[10045] = "Operation not supported." - errorTab[10046] = "Protocol family not supported." - errorTab[10047] = "Address family not supported by protocol family." - errorTab[10048] = "The network address is in use." - errorTab[10049] = "Cannot assign requested address." - errorTab[10050] = "Network is down." - errorTab[10051] = "Network is unreachable." - errorTab[10052] = "Network dropped connection on reset." - errorTab[10053] = "Software caused connection abort." - errorTab[10054] = "The connection has been reset." - errorTab[10055] = "No buffer space available." - errorTab[10056] = "Socket is already connected." - errorTab[10057] = "Socket is not connected." - errorTab[10058] = "The network has been shut down." - errorTab[10059] = "Too many references." - errorTab[10060] = "The operation timed out." - errorTab[10061] = "Connection refused." - errorTab[10062] = "Cannot translate name." - errorTab[10063] = "The name is too long." - errorTab[10064] = "The host is down." - errorTab[10065] = "The host is unreachable." - errorTab[10066] = "Directory not empty." - errorTab[10067] = "Too many processes." - errorTab[10068] = "User quota exceeded." - errorTab[10069] = "Disk quota exceeded." - errorTab[10070] = "Stale file handle reference." - errorTab[10071] = "Item is remote." - errorTab[10091] = "Network subsystem is unavailable." - errorTab[10092] = "Winsock.dll version out of range." - errorTab[10093] = "Successful WSAStartup not yet performed." - errorTab[10101] = "Graceful shutdown in progress." - errorTab[10102] = "No more results from WSALookupServiceNext." - errorTab[10103] = "Call has been canceled." - errorTab[10104] = "Procedure call table is invalid." - errorTab[10105] = "Service provider is invalid." - errorTab[10106] = "Service provider failed to initialize." - errorTab[10107] = "System call failure." - errorTab[10108] = "Service not found." - errorTab[10109] = "Class type not found." - errorTab[10110] = "No more results from WSALookupServiceNext." - errorTab[10111] = "Call was canceled." - errorTab[10112] = "Database query was refused." - errorTab[11001] = "Host not found." - errorTab[11002] = "Nonauthoritative host not found." - errorTab[11003] = "This is a nonrecoverable error." - errorTab[11004] = "Valid name, no data record requested type." - errorTab[11005] = "QoS receivers." - errorTab[11006] = "QoS senders." - errorTab[11007] = "No QoS senders." - errorTab[11008] = "QoS no receivers." - errorTab[11009] = "QoS request confirmed." - errorTab[11010] = "QoS admission error." - errorTab[11011] = "QoS policy failure." - errorTab[11012] = "QoS bad style." - errorTab[11013] = "QoS bad object." - errorTab[11014] = "QoS traffic control error." - errorTab[11015] = "QoS generic error." - errorTab[11016] = "QoS service type error." - errorTab[11017] = "QoS flowspec error." - errorTab[11018] = "Invalid QoS provider buffer." - errorTab[11019] = "Invalid QoS filter style." - errorTab[11020] = "Invalid QoS filter style." - errorTab[11021] = "Incorrect QoS filter count." - errorTab[11022] = "Invalid QoS object length." - errorTab[11023] = "Incorrect QoS flow count." - errorTab[11024] = "Unrecognized QoS object." - errorTab[11025] = "Invalid QoS policy object." - errorTab[11026] = "Invalid QoS flow descriptor." - errorTab[11027] = "Invalid QoS provider-specific flowspec." - errorTab[11028] = "Invalid QoS provider-specific filterspec." - errorTab[11029] = "Invalid QoS shape discard mode object." - errorTab[11030] = "Invalid QoS shaping rate object." - errorTab[11031] = "Reserved policy QoS element type." - __all__.append("errorTab") - - -class _GiveupOnSendfile(Exception): pass - - -class socket(_socket.socket): - - """A subclass of _socket.socket adding the makefile() method.""" - - __slots__ = ["__weakref__", "_io_refs", "_closed"] - - def __init__(self, family=-1, type=-1, proto=-1, fileno=None): - # For user code address family and type values are IntEnum members, but - # for the underlying _socket.socket they're just integers. The - # constructor of _socket.socket converts the given argument to an - # integer automatically. - if fileno is None: - if family == -1: - family = AF_INET - if type == -1: - type = SOCK_STREAM - if proto == -1: - proto = 0 - _socket.socket.__init__(self, family, type, proto, fileno) - self._io_refs = 0 - self._closed = False - - def __enter__(self): - return self - - def __exit__(self, *args): - if not self._closed: - self.close() - - def __repr__(self): - """Wrap __repr__() to reveal the real class name and socket - address(es). - """ - closed = getattr(self, '_closed', False) - s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \ - % (self.__class__.__module__, - self.__class__.__qualname__, - " [closed]" if closed else "", - self.fileno(), - self.family, - self.type, - self.proto) - if not closed: - # getsockname and getpeername may not be available on WASI. - try: - laddr = self.getsockname() - if laddr: - s += ", laddr=%s" % str(laddr) - except (error, AttributeError): - pass - try: - raddr = self.getpeername() - if raddr: - s += ", raddr=%s" % str(raddr) - except (error, AttributeError): - pass - s += '>' - return s - - def __getstate__(self): - raise TypeError(f"cannot pickle {self.__class__.__name__!r} object") - - def dup(self): - """dup() -> socket object - - Duplicate the socket. Return a new socket object connected to the same - system resource. The new socket is non-inheritable. - """ - fd = dup(self.fileno()) - sock = self.__class__(self.family, self.type, self.proto, fileno=fd) - sock.settimeout(self.gettimeout()) - return sock - - def accept(self): - """accept() -> (socket object, address info) - - Wait for an incoming connection. Return a new socket - representing the connection, and the address of the client. - For IP sockets, the address info is a pair (hostaddr, port). - """ - fd, addr = self._accept() - sock = socket(self.family, self.type, self.proto, fileno=fd) - # Issue #7995: if no default timeout is set and the listening - # socket had a (non-zero) timeout, force the new socket in blocking - # mode to override platform-specific socket flags inheritance. - if getdefaulttimeout() is None and self.gettimeout(): - sock.setblocking(True) - return sock, addr - - def makefile(self, mode="r", buffering=None, *, - encoding=None, errors=None, newline=None): - """makefile(...) -> an I/O stream connected to the socket - - The arguments are as for io.open() after the filename, except the only - supported mode values are 'r' (default), 'w', 'b', or a combination of - those. - """ - # XXX refactor to share code? - if not set(mode) <= {"r", "w", "b"}: - raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,)) - writing = "w" in mode - reading = "r" in mode or not writing - assert reading or writing - binary = "b" in mode - rawmode = "" - if reading: - rawmode += "r" - if writing: - rawmode += "w" - raw = SocketIO(self, rawmode) - self._io_refs += 1 - if buffering is None: - buffering = -1 - if buffering < 0: - buffering = io.DEFAULT_BUFFER_SIZE - if buffering == 0: - if not binary: - raise ValueError("unbuffered streams must be binary") - return raw - if reading and writing: - buffer = io.BufferedRWPair(raw, raw, buffering) - elif reading: - buffer = io.BufferedReader(raw, buffering) - else: - assert writing - buffer = io.BufferedWriter(raw, buffering) - if binary: - return buffer - encoding = io.text_encoding(encoding) - text = io.TextIOWrapper(buffer, encoding, errors, newline) - text.mode = mode - return text - - if hasattr(os, 'sendfile'): - - def _sendfile_use_sendfile(self, file, offset=0, count=None): - self._check_sendfile_params(file, offset, count) - sockno = self.fileno() - try: - fileno = file.fileno() - except (AttributeError, io.UnsupportedOperation) as err: - raise _GiveupOnSendfile(err) # not a regular file - try: - fsize = os.fstat(fileno).st_size - except OSError as err: - raise _GiveupOnSendfile(err) # not a regular file - if not fsize: - return 0 # empty file - # Truncate to 1GiB to avoid OverflowError, see bpo-38319. - blocksize = min(count or fsize, 2 ** 30) - timeout = self.gettimeout() - if timeout == 0: - raise ValueError("non-blocking sockets are not supported") - # poll/select have the advantage of not requiring any - # extra file descriptor, contrarily to epoll/kqueue - # (also, they require a single syscall). - if hasattr(selectors, 'PollSelector'): - selector = selectors.PollSelector() - else: - selector = selectors.SelectSelector() - selector.register(sockno, selectors.EVENT_WRITE) - - total_sent = 0 - # localize variable access to minimize overhead - selector_select = selector.select - os_sendfile = os.sendfile - try: - while True: - if timeout and not selector_select(timeout): - raise TimeoutError('timed out') - if count: - blocksize = min(count - total_sent, blocksize) - if blocksize <= 0: - break - try: - sent = os_sendfile(sockno, fileno, offset, blocksize) - except BlockingIOError: - if not timeout: - # Block until the socket is ready to send some - # data; avoids hogging CPU resources. - selector_select() - continue - except OSError as err: - if total_sent == 0: - # We can get here for different reasons, the main - # one being 'file' is not a regular mmap(2)-like - # file, in which case we'll fall back on using - # plain send(). - raise _GiveupOnSendfile(err) - raise err from None - else: - if sent == 0: - break # EOF - offset += sent - total_sent += sent - return total_sent - finally: - if total_sent > 0 and hasattr(file, 'seek'): - file.seek(offset) - else: - def _sendfile_use_sendfile(self, file, offset=0, count=None): - raise _GiveupOnSendfile( - "os.sendfile() not available on this platform") - - def _sendfile_use_send(self, file, offset=0, count=None): - self._check_sendfile_params(file, offset, count) - if self.gettimeout() == 0: - raise ValueError("non-blocking sockets are not supported") - if offset: - file.seek(offset) - blocksize = min(count, 8192) if count else 8192 - total_sent = 0 - # localize variable access to minimize overhead - file_read = file.read - sock_send = self.send - try: - while True: - if count: - blocksize = min(count - total_sent, blocksize) - if blocksize <= 0: - break - data = memoryview(file_read(blocksize)) - if not data: - break # EOF - while True: - try: - sent = sock_send(data) - except BlockingIOError: - continue - else: - total_sent += sent - if sent < len(data): - data = data[sent:] - else: - break - return total_sent - finally: - if total_sent > 0 and hasattr(file, 'seek'): - file.seek(offset + total_sent) - - def _check_sendfile_params(self, file, offset, count): - if 'b' not in getattr(file, 'mode', 'b'): - raise ValueError("file should be opened in binary mode") - if not self.type & SOCK_STREAM: - raise ValueError("only SOCK_STREAM type sockets are supported") - if count is not None: - if not isinstance(count, int): - raise TypeError( - "count must be a positive integer (got {!r})".format(count)) - if count <= 0: - raise ValueError( - "count must be a positive integer (got {!r})".format(count)) - - def sendfile(self, file, offset=0, count=None): - """sendfile(file[, offset[, count]]) -> sent - - Send a file until EOF is reached by using high-performance - os.sendfile() and return the total number of bytes which - were sent. - *file* must be a regular file object opened in binary mode. - If os.sendfile() is not available (e.g. Windows) or file is - not a regular file socket.send() will be used instead. - *offset* tells from where to start reading the file. - If specified, *count* is the total number of bytes to transmit - as opposed to sending the file until EOF is reached. - File position is updated on return or also in case of error in - which case file.tell() can be used to figure out the number of - bytes which were sent. - The socket must be of SOCK_STREAM type. - Non-blocking sockets are not supported. - """ - try: - return self._sendfile_use_sendfile(file, offset, count) - except _GiveupOnSendfile: - return self._sendfile_use_send(file, offset, count) - - def _decref_socketios(self): - if self._io_refs > 0: - self._io_refs -= 1 - if self._closed: - self.close() - - def _real_close(self, _ss=_socket.socket): - # This function should not reference any globals. See issue #808164. - _ss.close(self) - - def close(self): - # This function should not reference any globals. See issue #808164. - self._closed = True - if self._io_refs <= 0: - self._real_close() - - def detach(self): - """detach() -> file descriptor - - Close the socket object without closing the underlying file descriptor. - The object cannot be used after this call, but the file descriptor - can be reused for other purposes. The file descriptor is returned. - """ - self._closed = True - return super().detach() - - @property - def family(self): - """Read-only access to the address family for this socket. - """ - return _intenum_converter(super().family, AddressFamily) - - @property - def type(self): - """Read-only access to the socket type. - """ - return _intenum_converter(super().type, SocketKind) - - if os.name == 'nt': - def get_inheritable(self): - return os.get_handle_inheritable(self.fileno()) - def set_inheritable(self, inheritable): - os.set_handle_inheritable(self.fileno(), inheritable) - else: - def get_inheritable(self): - return os.get_inheritable(self.fileno()) - def set_inheritable(self, inheritable): - os.set_inheritable(self.fileno(), inheritable) - get_inheritable.__doc__ = "Get the inheritable flag of the socket" - set_inheritable.__doc__ = "Set the inheritable flag of the socket" - -def fromfd(fd, family, type, proto=0): - """ fromfd(fd, family, type[, proto]) -> socket object - - Create a socket object from a duplicate of the given file - descriptor. The remaining arguments are the same as for socket(). - """ - nfd = dup(fd) - return socket(family, type, proto, nfd) - -if hasattr(_socket.socket, "sendmsg"): - import array - - def send_fds(sock, buffers, fds, flags=0, address=None): - """ send_fds(sock, buffers, fds[, flags[, address]]) -> integer - - Send the list of file descriptors fds over an AF_UNIX socket. - """ - return sock.sendmsg(buffers, [(_socket.SOL_SOCKET, - _socket.SCM_RIGHTS, array.array("i", fds))]) - __all__.append("send_fds") - -if hasattr(_socket.socket, "recvmsg"): - import array - - def recv_fds(sock, bufsize, maxfds, flags=0): - """ recv_fds(sock, bufsize, maxfds[, flags]) -> (data, list of file - descriptors, msg_flags, address) - - Receive up to maxfds file descriptors returning the message - data and a list containing the descriptors. - """ - # Array of ints - fds = array.array("i") - msg, ancdata, flags, addr = sock.recvmsg(bufsize, - _socket.CMSG_LEN(maxfds * fds.itemsize)) - for cmsg_level, cmsg_type, cmsg_data in ancdata: - if (cmsg_level == _socket.SOL_SOCKET and cmsg_type == _socket.SCM_RIGHTS): - fds.frombytes(cmsg_data[: - len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) - - return msg, list(fds), flags, addr - __all__.append("recv_fds") - -if hasattr(_socket.socket, "share"): - def fromshare(info): - """ fromshare(info) -> socket object - - Create a socket object from the bytes object returned by - socket.share(pid). - """ - return socket(0, 0, 0, info) - __all__.append("fromshare") - -# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain. -# This is used if _socket doesn't natively provide socketpair. It's -# always defined so that it can be patched in for testing purposes. -def _fallback_socketpair(family=AF_INET, type=SOCK_STREAM, proto=0): - if family == AF_INET: - host = _LOCALHOST - elif family == AF_INET6: - host = _LOCALHOST_V6 - else: - raise ValueError("Only AF_INET and AF_INET6 socket address families " - "are supported") - if type != SOCK_STREAM: - raise ValueError("Only SOCK_STREAM socket type is supported") - if proto != 0: - raise ValueError("Only protocol zero is supported") - - # We create a connected TCP socket. Note the trick with - # setblocking(False) that prevents us from having to create a thread. - lsock = socket(family, type, proto) - try: - lsock.bind((host, 0)) - lsock.listen() - # On IPv6, ignore flow_info and scope_id - addr, port = lsock.getsockname()[:2] - csock = socket(family, type, proto) - try: - csock.setblocking(False) - try: - csock.connect((addr, port)) - except (BlockingIOError, InterruptedError): - pass - csock.setblocking(True) - ssock, _ = lsock.accept() - except: - csock.close() - raise - finally: - lsock.close() - - # Authenticating avoids using a connection from something else - # able to connect to {host}:{port} instead of us. - # We expect only AF_INET and AF_INET6 families. - try: - if ( - ssock.getsockname() != csock.getpeername() - or csock.getsockname() != ssock.getpeername() - ): - raise ConnectionError("Unexpected peer connection") - except: - # getsockname() and getpeername() can fail - # if either socket isn't connected. - ssock.close() - csock.close() - raise - - return (ssock, csock) - -if hasattr(_socket, "socketpair"): - def socketpair(family=None, type=SOCK_STREAM, proto=0): - if family is None: - try: - family = AF_UNIX - except NameError: - family = AF_INET - a, b = _socket.socketpair(family, type, proto) - a = socket(family, type, proto, a.detach()) - b = socket(family, type, proto, b.detach()) - return a, b - -else: - socketpair = _fallback_socketpair - __all__.append("socketpair") - -socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object) -Create a pair of socket objects from the sockets returned by the platform -socketpair() function. -The arguments are the same as for socket() except the default family is AF_UNIX -if defined on the platform; otherwise, the default is AF_INET. -""" - -_blocking_errnos = { EAGAIN, EWOULDBLOCK } - -class SocketIO(io.RawIOBase): - - """Raw I/O implementation for stream sockets. - - This class supports the makefile() method on sockets. It provides - the raw I/O interface on top of a socket object. - """ - - # One might wonder why not let FileIO do the job instead. There are two - # main reasons why FileIO is not adapted: - # - it wouldn't work under Windows (where you can't used read() and - # write() on a socket handle) - # - it wouldn't work with socket timeouts (FileIO would ignore the - # timeout and consider the socket non-blocking) - - # XXX More docs - - def __init__(self, sock, mode): - if mode not in ("r", "w", "rw", "rb", "wb", "rwb"): - raise ValueError("invalid mode: %r" % mode) - io.RawIOBase.__init__(self) - self._sock = sock - if "b" not in mode: - mode += "b" - self._mode = mode - self._reading = "r" in mode - self._writing = "w" in mode - self._timeout_occurred = False - - def readinto(self, b): - """Read up to len(b) bytes into the writable buffer *b* and return - the number of bytes read. If the socket is non-blocking and no bytes - are available, None is returned. - - If *b* is non-empty, a 0 return value indicates that the connection - was shutdown at the other end. - """ - self._checkClosed() - self._checkReadable() - if self._timeout_occurred: - raise OSError("cannot read from timed out object") - try: - return self._sock.recv_into(b) - except timeout: - self._timeout_occurred = True - raise - except error as e: - if e.errno in _blocking_errnos: - return None - raise - - def write(self, b): - """Write the given bytes or bytearray object *b* to the socket - and return the number of bytes written. This can be less than - len(b) if not all data could be written. If the socket is - non-blocking and no bytes could be written None is returned. - """ - self._checkClosed() - self._checkWritable() - try: - return self._sock.send(b) - except error as e: - # XXX what about EINTR? - if e.errno in _blocking_errnos: - return None - raise - - def readable(self): - """True if the SocketIO is open for reading. - """ - if self.closed: - raise ValueError("I/O operation on closed socket.") - return self._reading - - def writable(self): - """True if the SocketIO is open for writing. - """ - if self.closed: - raise ValueError("I/O operation on closed socket.") - return self._writing - - def seekable(self): - """True if the SocketIO is open for seeking. - """ - if self.closed: - raise ValueError("I/O operation on closed socket.") - return super().seekable() - - def fileno(self): - """Return the file descriptor of the underlying socket. - """ - self._checkClosed() - return self._sock.fileno() - - @property - def name(self): - if not self.closed: - return self.fileno() - else: - return -1 - - @property - def mode(self): - return self._mode - - def close(self): - """Close the SocketIO object. This doesn't close the underlying - socket, except if all references to it have disappeared. - """ - if self.closed: - return - io.RawIOBase.close(self) - self._sock._decref_socketios() - self._sock = None - - -def getfqdn(name=''): - """Get fully qualified domain name from name. - - An empty argument is interpreted as meaning the local host. - - First the hostname returned by gethostbyaddr() is checked, then - possibly existing aliases. In case no FQDN is available and `name` - was given, it is returned unchanged. If `name` was empty, '0.0.0.0' or '::', - hostname from gethostname() is returned. - """ - name = name.strip() - if not name or name in ('0.0.0.0', '::'): - name = gethostname() - try: - hostname, aliases, ipaddrs = gethostbyaddr(name) - except error: - pass - else: - aliases.insert(0, hostname) - for name in aliases: - if '.' in name: - break - else: - name = hostname - return name - - -_GLOBAL_DEFAULT_TIMEOUT = object() - -def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, - source_address=None, *, all_errors=False): - """Connect to *address* and return the socket object. - - Convenience function. Connect to *address* (a 2-tuple ``(host, - port)``) and return the socket object. Passing the optional - *timeout* parameter will set the timeout on the socket instance - before attempting to connect. If no *timeout* is supplied, the - global default timeout setting returned by :func:`getdefaulttimeout` - is used. If *source_address* is set it must be a tuple of (host, port) - for the socket to bind as a source address before making the connection. - A host of '' or port 0 tells the OS to use the default. When a connection - cannot be created, raises the last error if *all_errors* is False, - and an ExceptionGroup of all errors if *all_errors* is True. - """ - - host, port = address - exceptions = [] - for res in getaddrinfo(host, port, 0, SOCK_STREAM): - af, socktype, proto, canonname, sa = res - sock = None - try: - sock = socket(af, socktype, proto) - if timeout is not _GLOBAL_DEFAULT_TIMEOUT: - sock.settimeout(timeout) - if source_address: - sock.bind(source_address) - sock.connect(sa) - # Break explicitly a reference cycle - exceptions.clear() - return sock - - except error as exc: - if not all_errors: - exceptions.clear() # raise only the last error - exceptions.append(exc) - if sock is not None: - sock.close() - - if len(exceptions): - try: - if not all_errors: - raise exceptions[0] - raise ExceptionGroup("create_connection failed", exceptions) - finally: - # Break explicitly a reference cycle - exceptions.clear() - else: - raise error("getaddrinfo returns an empty list") - - -def has_dualstack_ipv6(): - """Return True if the platform supports creating a SOCK_STREAM socket - which can handle both AF_INET and AF_INET6 (IPv4 / IPv6) connections. - """ - if not has_ipv6 \ - or not hasattr(_socket, 'IPPROTO_IPV6') \ - or not hasattr(_socket, 'IPV6_V6ONLY'): - return False - try: - with socket(AF_INET6, SOCK_STREAM) as sock: - sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0) - return True - except error: - return False - - -def create_server(address, *, family=AF_INET, backlog=None, reuse_port=False, - dualstack_ipv6=False): - """Convenience function which creates a SOCK_STREAM type socket - bound to *address* (a 2-tuple (host, port)) and return the socket - object. - - *family* should be either AF_INET or AF_INET6. - *backlog* is the queue size passed to socket.listen(). - *reuse_port* dictates whether to use the SO_REUSEPORT socket option. - *dualstack_ipv6*: if true and the platform supports it, it will - create an AF_INET6 socket able to accept both IPv4 or IPv6 - connections. When false it will explicitly disable this option on - platforms that enable it by default (e.g. Linux). - - >>> with create_server(('', 8000)) as server: - ... while True: - ... conn, addr = server.accept() - ... # handle new connection - """ - if reuse_port and not hasattr(_socket, "SO_REUSEPORT"): - raise ValueError("SO_REUSEPORT not supported on this platform") - if dualstack_ipv6: - if not has_dualstack_ipv6(): - raise ValueError("dualstack_ipv6 not supported on this platform") - if family != AF_INET6: - raise ValueError("dualstack_ipv6 requires AF_INET6 family") - sock = socket(family, SOCK_STREAM) - try: - # Note about Windows. We don't set SO_REUSEADDR because: - # 1) It's unnecessary: bind() will succeed even in case of a - # previous closed socket on the same address and still in - # TIME_WAIT state. - # 2) If set, another socket is free to bind() on the same - # address, effectively preventing this one from accepting - # connections. Also, it may set the process in a state where - # it'll no longer respond to any signals or graceful kills. - # See: https://learn.microsoft.com/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse - if os.name not in ('nt', 'cygwin') and \ - hasattr(_socket, 'SO_REUSEADDR'): - try: - sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) - except error: - # Fail later on bind(), for platforms which may not - # support this option. - pass - # Since Linux 6.12.9, SO_REUSEPORT is not allowed - # on other address families than AF_INET/AF_INET6. - if reuse_port and family in (AF_INET, AF_INET6): - sock.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1) - if has_ipv6 and family == AF_INET6: - if dualstack_ipv6: - sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0) - elif hasattr(_socket, "IPV6_V6ONLY") and \ - hasattr(_socket, "IPPROTO_IPV6"): - sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1) - try: - sock.bind(address) - except error as err: - msg = '%s (while attempting to bind on address %r)' % \ - (err.strerror, address) - raise error(err.errno, msg) from None - if backlog is None: - sock.listen() - else: - sock.listen(backlog) - return sock - except error: - sock.close() - raise - - -def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): - """Resolve host and port into list of address info entries. - - Translate the host/port argument into a sequence of 5-tuples that contain - all the necessary arguments for creating a socket connected to that service. - host is a domain name, a string representation of an IPv4/v6 address or - None. port is a string service name such as 'http', a numeric port number or - None. By passing None as the value of host and port, you can pass NULL to - the underlying C API. - - The family, type and proto arguments can be optionally specified in order to - narrow the list of addresses returned. Passing zero as a value for each of - these arguments selects the full range of results. - """ - # We override this function since we want to translate the numeric family - # and socket type values to enum constants. - addrlist = [] - for res in _socket.getaddrinfo(host, port, family, type, proto, flags): - af, socktype, proto, canonname, sa = res - addrlist.append((_intenum_converter(af, AddressFamily), - _intenum_converter(socktype, SocketKind), - proto, canonname, sa)) - return addrlist diff --git a/Python313_13_x86_Template/Lib/sqlite3/__init__.py b/Python313_13_x86_Template/Lib/sqlite3/__init__.py deleted file mode 100644 index e3c81ffc..00000000 --- a/Python313_13_x86_Template/Lib/sqlite3/__init__.py +++ /dev/null @@ -1,70 +0,0 @@ -# pysqlite2/__init__.py: the pysqlite2 package. -# -# Copyright (C) 2005 Gerhard Häring -# -# This file is part of pysqlite. -# -# This software is provided 'as-is', without any express or implied -# warranty. In no event will the authors be held liable for any damages -# arising from the use of this software. -# -# Permission is granted to anyone to use this software for any purpose, -# including commercial applications, and to alter it and redistribute it -# freely, subject to the following restrictions: -# -# 1. The origin of this software must not be misrepresented; you must not -# claim that you wrote the original software. If you use this software -# in a product, an acknowledgment in the product documentation would be -# appreciated but is not required. -# 2. Altered source versions must be plainly marked as such, and must not be -# misrepresented as being the original software. -# 3. This notice may not be removed or altered from any source distribution. - -""" -The sqlite3 extension module provides a DB-API 2.0 (PEP 249) compliant -interface to the SQLite library, and requires SQLite 3.15.2 or newer. - -To use the module, start by creating a database Connection object: - - import sqlite3 - cx = sqlite3.connect("test.db") # test.db will be created or opened - -The special path name ":memory:" can be provided to connect to a transient -in-memory database: - - cx = sqlite3.connect(":memory:") # connect to a database in RAM - -Once a connection has been established, create a Cursor object and call -its execute() method to perform SQL queries: - - cu = cx.cursor() - - # create a table - cu.execute("create table lang(name, first_appeared)") - - # insert values into a table - cu.execute("insert into lang values (?, ?)", ("C", 1972)) - - # execute a query and iterate over the result - for row in cu.execute("select * from lang"): - print(row) - - cx.close() - -The sqlite3 module is written by Gerhard Häring . -""" - -from sqlite3.dbapi2 import * -from sqlite3.dbapi2 import (_deprecated_names, - _deprecated_version_info, - _deprecated_version) - - -def __getattr__(name): - if name in _deprecated_names: - from warnings import warn - - warn(f"{name} is deprecated and will be removed in Python 3.14", - DeprecationWarning, stacklevel=2) - return globals()[f"_deprecated_{name}"] - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/Python313_13_x86_Template/Lib/sqlite3/__main__.py b/Python313_13_x86_Template/Lib/sqlite3/__main__.py deleted file mode 100644 index 87a80a6f..00000000 --- a/Python313_13_x86_Template/Lib/sqlite3/__main__.py +++ /dev/null @@ -1,139 +0,0 @@ -"""A simple SQLite CLI for the sqlite3 module. - -Apart from using 'argparse' for the command-line interface, -this module implements the REPL as a thin wrapper around -the InteractiveConsole class from the 'code' stdlib module. -""" -import sqlite3 -import sys - -from argparse import ArgumentParser -from code import InteractiveConsole -from textwrap import dedent - - -def execute(c, sql, suppress_errors=True): - """Helper that wraps execution of SQL code. - - This is used both by the REPL and by direct execution from the CLI. - - 'c' may be a cursor or a connection. - 'sql' is the SQL string to execute. - """ - - try: - for row in c.execute(sql): - print(row) - except sqlite3.Error as e: - tp = type(e).__name__ - try: - print(f"{tp} ({e.sqlite_errorname}): {e}", file=sys.stderr) - except AttributeError: - print(f"{tp}: {e}", file=sys.stderr) - if not suppress_errors: - sys.exit(1) - - -class SqliteInteractiveConsole(InteractiveConsole): - """A simple SQLite REPL.""" - - def __init__(self, connection): - super().__init__() - self._con = connection - self._cur = connection.cursor() - - def runsource(self, source, filename="", symbol="single"): - """Override runsource, the core of the InteractiveConsole REPL. - - Return True if more input is needed; buffering is done automatically. - Return False is input is a complete statement ready for execution. - """ - if not source or source.isspace(): - return False - if source[0] == ".": - match source[1:].strip(): - case "version": - print(f"{sqlite3.sqlite_version}") - case "help": - print("Enter SQL code and press enter.") - case "quit": - sys.exit(0) - case "": - pass - case _ as unknown: - self.write("Error: unknown command or invalid arguments:" - f' "{unknown}".\n') - else: - if not sqlite3.complete_statement(source): - return True - execute(self._cur, source) - return False - - -def main(*args): - parser = ArgumentParser( - description="Python sqlite3 CLI", - prog="python -m sqlite3", - ) - parser.add_argument( - "filename", type=str, default=":memory:", nargs="?", - help=( - "SQLite database to open (defaults to ':memory:'). " - "A new database is created if the file does not previously exist." - ), - ) - parser.add_argument( - "sql", type=str, nargs="?", - help=( - "An SQL query to execute. " - "Any returned rows are printed to stdout." - ), - ) - parser.add_argument( - "-v", "--version", action="version", - version=f"SQLite version {sqlite3.sqlite_version}", - help="Print underlying SQLite library version", - ) - args = parser.parse_args(*args) - - if args.filename == ":memory:": - db_name = "a transient in-memory database" - else: - db_name = repr(args.filename) - - # Prepare REPL banner and prompts. - if sys.platform == "win32" and "idlelib.run" not in sys.modules: - eofkey = "CTRL-Z" - else: - eofkey = "CTRL-D" - banner = dedent(f""" - sqlite3 shell, running on SQLite version {sqlite3.sqlite_version} - Connected to {db_name} - - Each command will be run using execute() on the cursor. - Type ".help" for more information; type ".quit" or {eofkey} to quit. - """).strip() - sys.ps1 = "sqlite> " - sys.ps2 = " ... " - - con = sqlite3.connect(args.filename, isolation_level=None) - try: - if args.sql: - # SQL statement provided on the command-line; execute it directly. - execute(con, args.sql, suppress_errors=False) - else: - # No SQL provided; start the REPL. - console = SqliteInteractiveConsole(con) - try: - import readline - except ImportError: - pass - console.interact(banner, exitmsg="") - finally: - con.close() - - sys.exit(0) - - -if __name__ == "__main__": - main(sys.argv[1:]) diff --git a/Python313_13_x86_Template/Lib/sqlite3/dbapi2.py b/Python313_13_x86_Template/Lib/sqlite3/dbapi2.py deleted file mode 100644 index 56fc0461..00000000 --- a/Python313_13_x86_Template/Lib/sqlite3/dbapi2.py +++ /dev/null @@ -1,108 +0,0 @@ -# pysqlite2/dbapi2.py: the DB-API 2.0 interface -# -# Copyright (C) 2004-2005 Gerhard Häring -# -# This file is part of pysqlite. -# -# This software is provided 'as-is', without any express or implied -# warranty. In no event will the authors be held liable for any damages -# arising from the use of this software. -# -# Permission is granted to anyone to use this software for any purpose, -# including commercial applications, and to alter it and redistribute it -# freely, subject to the following restrictions: -# -# 1. The origin of this software must not be misrepresented; you must not -# claim that you wrote the original software. If you use this software -# in a product, an acknowledgment in the product documentation would be -# appreciated but is not required. -# 2. Altered source versions must be plainly marked as such, and must not be -# misrepresented as being the original software. -# 3. This notice may not be removed or altered from any source distribution. - -import datetime -import time -import collections.abc - -from _sqlite3 import * -from _sqlite3 import _deprecated_version - -_deprecated_names = frozenset({"version", "version_info"}) - -paramstyle = "qmark" - -apilevel = "2.0" - -Date = datetime.date - -Time = datetime.time - -Timestamp = datetime.datetime - -def DateFromTicks(ticks): - return Date(*time.localtime(ticks)[:3]) - -def TimeFromTicks(ticks): - return Time(*time.localtime(ticks)[3:6]) - -def TimestampFromTicks(ticks): - return Timestamp(*time.localtime(ticks)[:6]) - -_deprecated_version_info = tuple(map(int, _deprecated_version.split("."))) -sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")]) - -Binary = memoryview -collections.abc.Sequence.register(Row) - -def register_adapters_and_converters(): - from warnings import warn - - msg = ("The default {what} is deprecated as of Python 3.12; " - "see the sqlite3 documentation for suggested replacement recipes") - - def adapt_date(val): - warn(msg.format(what="date adapter"), DeprecationWarning, stacklevel=2) - return val.isoformat() - - def adapt_datetime(val): - warn(msg.format(what="datetime adapter"), DeprecationWarning, stacklevel=2) - return val.isoformat(" ") - - def convert_date(val): - warn(msg.format(what="date converter"), DeprecationWarning, stacklevel=2) - return datetime.date(*map(int, val.split(b"-"))) - - def convert_timestamp(val): - warn(msg.format(what="timestamp converter"), DeprecationWarning, stacklevel=2) - datepart, timepart = val.split(b" ") - year, month, day = map(int, datepart.split(b"-")) - timepart_full = timepart.split(b".") - hours, minutes, seconds = map(int, timepart_full[0].split(b":")) - if len(timepart_full) == 2: - microseconds = int('{:0<6.6}'.format(timepart_full[1].decode())) - else: - microseconds = 0 - - val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds) - return val - - - register_adapter(datetime.date, adapt_date) - register_adapter(datetime.datetime, adapt_datetime) - register_converter("date", convert_date) - register_converter("timestamp", convert_timestamp) - -register_adapters_and_converters() - -# Clean up namespace - -del(register_adapters_and_converters) - -def __getattr__(name): - if name in _deprecated_names: - from warnings import warn - - warn(f"{name} is deprecated and will be removed in Python 3.14", - DeprecationWarning, stacklevel=2) - return globals()[f"_deprecated_{name}"] - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/Python313_13_x86_Template/Lib/ssl.py b/Python313_13_x86_Template/Lib/ssl.py deleted file mode 100644 index 7508e4f2..00000000 --- a/Python313_13_x86_Template/Lib/ssl.py +++ /dev/null @@ -1,1529 +0,0 @@ -# Wrapper module for _ssl, providing some additional facilities -# implemented in Python. Written by Bill Janssen. - -"""This module provides some more Pythonic support for SSL. - -Object types: - - SSLSocket -- subtype of socket.socket which does SSL over the socket - -Exceptions: - - SSLError -- exception raised for I/O errors - -Functions: - - cert_time_to_seconds -- convert time string used for certificate - notBefore and notAfter functions to integer - seconds past the Epoch (the time values - returned from time.time()) - - get_server_certificate (addr, ssl_version, ca_certs, timeout) -- Retrieve the - certificate from the server at the specified - address and return it as a PEM-encoded string - - -Integer constants: - -SSL_ERROR_ZERO_RETURN -SSL_ERROR_WANT_READ -SSL_ERROR_WANT_WRITE -SSL_ERROR_WANT_X509_LOOKUP -SSL_ERROR_SYSCALL -SSL_ERROR_SSL -SSL_ERROR_WANT_CONNECT - -SSL_ERROR_EOF -SSL_ERROR_INVALID_ERROR_CODE - -The following group define certificate requirements that one side is -allowing/requiring from the other side: - -CERT_NONE - no certificates from the other side are required (or will - be looked at if provided) -CERT_OPTIONAL - certificates are not required, but if provided will be - validated, and if validation fails, the connection will - also fail -CERT_REQUIRED - certificates are required, and will be validated, and - if validation fails, the connection will also fail - -The following constants identify various SSL protocol variants: - -PROTOCOL_SSLv2 -PROTOCOL_SSLv3 -PROTOCOL_SSLv23 -PROTOCOL_TLS -PROTOCOL_TLS_CLIENT -PROTOCOL_TLS_SERVER -PROTOCOL_TLSv1 -PROTOCOL_TLSv1_1 -PROTOCOL_TLSv1_2 - -The following constants identify various SSL alert message descriptions as per -http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-6 - -ALERT_DESCRIPTION_CLOSE_NOTIFY -ALERT_DESCRIPTION_UNEXPECTED_MESSAGE -ALERT_DESCRIPTION_BAD_RECORD_MAC -ALERT_DESCRIPTION_RECORD_OVERFLOW -ALERT_DESCRIPTION_DECOMPRESSION_FAILURE -ALERT_DESCRIPTION_HANDSHAKE_FAILURE -ALERT_DESCRIPTION_BAD_CERTIFICATE -ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE -ALERT_DESCRIPTION_CERTIFICATE_REVOKED -ALERT_DESCRIPTION_CERTIFICATE_EXPIRED -ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN -ALERT_DESCRIPTION_ILLEGAL_PARAMETER -ALERT_DESCRIPTION_UNKNOWN_CA -ALERT_DESCRIPTION_ACCESS_DENIED -ALERT_DESCRIPTION_DECODE_ERROR -ALERT_DESCRIPTION_DECRYPT_ERROR -ALERT_DESCRIPTION_PROTOCOL_VERSION -ALERT_DESCRIPTION_INSUFFICIENT_SECURITY -ALERT_DESCRIPTION_INTERNAL_ERROR -ALERT_DESCRIPTION_USER_CANCELLED -ALERT_DESCRIPTION_NO_RENEGOTIATION -ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION -ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE -ALERT_DESCRIPTION_UNRECOGNIZED_NAME -ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE -ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE -ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY -""" - -import sys -import os -from collections import namedtuple -from enum import Enum as _Enum, IntEnum as _IntEnum, IntFlag as _IntFlag -from enum import _simple_enum - -import _ssl # if we can't import it, let the error propagate - -from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION -from _ssl import _SSLContext, MemoryBIO, SSLSession -from _ssl import ( - SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError, - SSLSyscallError, SSLEOFError, SSLCertVerificationError - ) -from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj -from _ssl import RAND_status, RAND_add, RAND_bytes -try: - from _ssl import RAND_egd -except ImportError: - # RAND_egd is not supported on some platforms - pass - - -from _ssl import ( - HAS_SNI, HAS_ECDH, HAS_NPN, HAS_ALPN, HAS_SSLv2, HAS_SSLv3, HAS_TLSv1, - HAS_TLSv1_1, HAS_TLSv1_2, HAS_TLSv1_3, HAS_PSK -) -from _ssl import _DEFAULT_CIPHERS, _OPENSSL_API_VERSION - -_IntEnum._convert_( - '_SSLMethod', __name__, - lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23', - source=_ssl) - -_IntFlag._convert_( - 'Options', __name__, - lambda name: name.startswith('OP_'), - source=_ssl) - -_IntEnum._convert_( - 'AlertDescription', __name__, - lambda name: name.startswith('ALERT_DESCRIPTION_'), - source=_ssl) - -_IntEnum._convert_( - 'SSLErrorNumber', __name__, - lambda name: name.startswith('SSL_ERROR_'), - source=_ssl) - -_IntFlag._convert_( - 'VerifyFlags', __name__, - lambda name: name.startswith('VERIFY_'), - source=_ssl) - -_IntEnum._convert_( - 'VerifyMode', __name__, - lambda name: name.startswith('CERT_'), - source=_ssl) - -PROTOCOL_SSLv23 = _SSLMethod.PROTOCOL_SSLv23 = _SSLMethod.PROTOCOL_TLS -_PROTOCOL_NAMES = {value: name for name, value in _SSLMethod.__members__.items()} - -_SSLv2_IF_EXISTS = getattr(_SSLMethod, 'PROTOCOL_SSLv2', None) - - -@_simple_enum(_IntEnum) -class TLSVersion: - MINIMUM_SUPPORTED = _ssl.PROTO_MINIMUM_SUPPORTED - SSLv3 = _ssl.PROTO_SSLv3 - TLSv1 = _ssl.PROTO_TLSv1 - TLSv1_1 = _ssl.PROTO_TLSv1_1 - TLSv1_2 = _ssl.PROTO_TLSv1_2 - TLSv1_3 = _ssl.PROTO_TLSv1_3 - MAXIMUM_SUPPORTED = _ssl.PROTO_MAXIMUM_SUPPORTED - - -@_simple_enum(_IntEnum) -class _TLSContentType: - """Content types (record layer) - - See RFC 8446, section B.1 - """ - CHANGE_CIPHER_SPEC = 20 - ALERT = 21 - HANDSHAKE = 22 - APPLICATION_DATA = 23 - # pseudo content types - HEADER = 0x100 - INNER_CONTENT_TYPE = 0x101 - - -@_simple_enum(_IntEnum) -class _TLSAlertType: - """Alert types for TLSContentType.ALERT messages - - See RFC 8446, section B.2 - """ - CLOSE_NOTIFY = 0 - UNEXPECTED_MESSAGE = 10 - BAD_RECORD_MAC = 20 - DECRYPTION_FAILED = 21 - RECORD_OVERFLOW = 22 - DECOMPRESSION_FAILURE = 30 - HANDSHAKE_FAILURE = 40 - NO_CERTIFICATE = 41 - BAD_CERTIFICATE = 42 - UNSUPPORTED_CERTIFICATE = 43 - CERTIFICATE_REVOKED = 44 - CERTIFICATE_EXPIRED = 45 - CERTIFICATE_UNKNOWN = 46 - ILLEGAL_PARAMETER = 47 - UNKNOWN_CA = 48 - ACCESS_DENIED = 49 - DECODE_ERROR = 50 - DECRYPT_ERROR = 51 - EXPORT_RESTRICTION = 60 - PROTOCOL_VERSION = 70 - INSUFFICIENT_SECURITY = 71 - INTERNAL_ERROR = 80 - INAPPROPRIATE_FALLBACK = 86 - USER_CANCELED = 90 - NO_RENEGOTIATION = 100 - MISSING_EXTENSION = 109 - UNSUPPORTED_EXTENSION = 110 - CERTIFICATE_UNOBTAINABLE = 111 - UNRECOGNIZED_NAME = 112 - BAD_CERTIFICATE_STATUS_RESPONSE = 113 - BAD_CERTIFICATE_HASH_VALUE = 114 - UNKNOWN_PSK_IDENTITY = 115 - CERTIFICATE_REQUIRED = 116 - NO_APPLICATION_PROTOCOL = 120 - - -@_simple_enum(_IntEnum) -class _TLSMessageType: - """Message types (handshake protocol) - - See RFC 8446, section B.3 - """ - HELLO_REQUEST = 0 - CLIENT_HELLO = 1 - SERVER_HELLO = 2 - HELLO_VERIFY_REQUEST = 3 - NEWSESSION_TICKET = 4 - END_OF_EARLY_DATA = 5 - HELLO_RETRY_REQUEST = 6 - ENCRYPTED_EXTENSIONS = 8 - CERTIFICATE = 11 - SERVER_KEY_EXCHANGE = 12 - CERTIFICATE_REQUEST = 13 - SERVER_DONE = 14 - CERTIFICATE_VERIFY = 15 - CLIENT_KEY_EXCHANGE = 16 - FINISHED = 20 - CERTIFICATE_URL = 21 - CERTIFICATE_STATUS = 22 - SUPPLEMENTAL_DATA = 23 - KEY_UPDATE = 24 - NEXT_PROTO = 67 - MESSAGE_HASH = 254 - CHANGE_CIPHER_SPEC = 0x0101 - - -if sys.platform == "win32": - from _ssl import enum_certificates, enum_crls - -from socket import socket, SOCK_STREAM, create_connection -from socket import SOL_SOCKET, SO_TYPE, _GLOBAL_DEFAULT_TIMEOUT -import socket as _socket -import base64 # for DER-to-PEM translation -import errno -import warnings - - -socket_error = OSError # keep that public name in module namespace - -CHANNEL_BINDING_TYPES = ['tls-unique'] - -HAS_NEVER_CHECK_COMMON_NAME = hasattr(_ssl, 'HOSTFLAG_NEVER_CHECK_SUBJECT') - - -_RESTRICTED_SERVER_CIPHERS = _DEFAULT_CIPHERS - -CertificateError = SSLCertVerificationError - - -def _dnsname_match(dn, hostname): - """Matching according to RFC 6125, section 6.4.3 - - - Hostnames are compared lower-case. - - For IDNA, both dn and hostname must be encoded as IDN A-label (ACE). - - Partial wildcards like 'www*.example.org', multiple wildcards, sole - wildcard or wildcards in labels other then the left-most label are not - supported and a CertificateError is raised. - - A wildcard must match at least one character. - """ - if not dn: - return False - - wildcards = dn.count('*') - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - if wildcards > 1: - raise CertificateError( - "too many wildcards in certificate DNS name: {!r}.".format(dn)) - - dn_leftmost, sep, dn_remainder = dn.partition('.') - - if '*' in dn_remainder: - # Only match wildcard in leftmost segment. - raise CertificateError( - "wildcard can only be present in the leftmost label: " - "{!r}.".format(dn)) - - if not sep: - # no right side - raise CertificateError( - "sole wildcard without additional labels are not support: " - "{!r}.".format(dn)) - - if dn_leftmost != '*': - # no partial wildcard matching - raise CertificateError( - "partial wildcards in leftmost label are not supported: " - "{!r}.".format(dn)) - - hostname_leftmost, sep, hostname_remainder = hostname.partition('.') - if not hostname_leftmost or not sep: - # wildcard must match at least one char - return False - return dn_remainder.lower() == hostname_remainder.lower() - - -def _inet_paton(ipname): - """Try to convert an IP address to packed binary form - - Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6 - support. - """ - # inet_aton() also accepts strings like '1', '127.1', some also trailing - # data like '127.0.0.1 whatever'. - try: - addr = _socket.inet_aton(ipname) - except OSError: - # not an IPv4 address - pass - else: - if _socket.inet_ntoa(addr) == ipname: - # only accept injective ipnames - return addr - else: - # refuse for short IPv4 notation and additional trailing data - raise ValueError( - "{!r} is not a quad-dotted IPv4 address.".format(ipname) - ) - - try: - return _socket.inet_pton(_socket.AF_INET6, ipname) - except OSError: - raise ValueError("{!r} is neither an IPv4 nor an IP6 " - "address.".format(ipname)) - except AttributeError: - # AF_INET6 not available - pass - - raise ValueError("{!r} is not an IPv4 address.".format(ipname)) - - -def _ipaddress_match(cert_ipaddress, host_ip): - """Exact matching of IP addresses. - - RFC 6125 explicitly doesn't define an algorithm for this - (section 1.7.2 - "Out of Scope"). - """ - # OpenSSL may add a trailing newline to a subjectAltName's IP address, - # commonly with IPv6 addresses. Strip off trailing \n. - ip = _inet_paton(cert_ipaddress.rstrip()) - return ip == host_ip - - -DefaultVerifyPaths = namedtuple("DefaultVerifyPaths", - "cafile capath openssl_cafile_env openssl_cafile openssl_capath_env " - "openssl_capath") - -def get_default_verify_paths(): - """Return paths to default cafile and capath. - """ - parts = _ssl.get_default_verify_paths() - - # environment vars shadow paths - cafile = os.environ.get(parts[0], parts[1]) - capath = os.environ.get(parts[2], parts[3]) - - return DefaultVerifyPaths(cafile if os.path.isfile(cafile) else None, - capath if os.path.isdir(capath) else None, - *parts) - - -class _ASN1Object(namedtuple("_ASN1Object", "nid shortname longname oid")): - """ASN.1 object identifier lookup - """ - __slots__ = () - - def __new__(cls, oid): - return super().__new__(cls, *_txt2obj(oid, name=False)) - - @classmethod - def fromnid(cls, nid): - """Create _ASN1Object from OpenSSL numeric ID - """ - return super().__new__(cls, *_nid2obj(nid)) - - @classmethod - def fromname(cls, name): - """Create _ASN1Object from short name, long name or OID - """ - return super().__new__(cls, *_txt2obj(name, name=True)) - - -class Purpose(_ASN1Object, _Enum): - """SSLContext purpose flags with X509v3 Extended Key Usage objects - """ - SERVER_AUTH = '1.3.6.1.5.5.7.3.1' - CLIENT_AUTH = '1.3.6.1.5.5.7.3.2' - - -class SSLContext(_SSLContext): - """An SSLContext holds various SSL-related configuration options and - data, such as certificates and possibly a private key.""" - _windows_cert_stores = ("CA", "ROOT") - - sslsocket_class = None # SSLSocket is assigned later. - sslobject_class = None # SSLObject is assigned later. - - def __new__(cls, protocol=None, *args, **kwargs): - if protocol is None: - warnings.warn( - "ssl.SSLContext() without protocol argument is deprecated.", - category=DeprecationWarning, - stacklevel=2 - ) - protocol = PROTOCOL_TLS - self = _SSLContext.__new__(cls, protocol) - return self - - def _encode_hostname(self, hostname): - if hostname is None: - return None - elif isinstance(hostname, str): - return hostname.encode('idna').decode('ascii') - else: - return hostname.decode('ascii') - - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, - suppress_ragged_eofs=True, - server_hostname=None, session=None): - # SSLSocket class handles server_hostname encoding before it calls - # ctx._wrap_socket() - return self.sslsocket_class._create( - sock=sock, - server_side=server_side, - do_handshake_on_connect=do_handshake_on_connect, - suppress_ragged_eofs=suppress_ragged_eofs, - server_hostname=server_hostname, - context=self, - session=session - ) - - def wrap_bio(self, incoming, outgoing, server_side=False, - server_hostname=None, session=None): - # Need to encode server_hostname here because _wrap_bio() can only - # handle ASCII str. - return self.sslobject_class._create( - incoming, outgoing, server_side=server_side, - server_hostname=self._encode_hostname(server_hostname), - session=session, context=self, - ) - - def set_npn_protocols(self, npn_protocols): - warnings.warn( - "ssl NPN is deprecated, use ALPN instead", - DeprecationWarning, - stacklevel=2 - ) - protos = bytearray() - for protocol in npn_protocols: - b = bytes(protocol, 'ascii') - if len(b) == 0 or len(b) > 255: - raise SSLError('NPN protocols must be 1 to 255 in length') - protos.append(len(b)) - protos.extend(b) - - self._set_npn_protocols(protos) - - def set_servername_callback(self, server_name_callback): - if server_name_callback is None: - self.sni_callback = None - else: - if not callable(server_name_callback): - raise TypeError("not a callable object") - - def shim_cb(sslobj, servername, sslctx): - servername = self._encode_hostname(servername) - return server_name_callback(sslobj, servername, sslctx) - - self.sni_callback = shim_cb - - def set_alpn_protocols(self, alpn_protocols): - protos = bytearray() - for protocol in alpn_protocols: - b = bytes(protocol, 'ascii') - if len(b) == 0 or len(b) > 255: - raise SSLError('ALPN protocols must be 1 to 255 in length') - protos.append(len(b)) - protos.extend(b) - - self._set_alpn_protocols(protos) - - def _load_windows_store_certs(self, storename, purpose): - try: - for cert, encoding, trust in enum_certificates(storename): - # CA certs are never PKCS#7 encoded - if encoding == "x509_asn": - if trust is True or purpose.oid in trust: - try: - self.load_verify_locations(cadata=cert) - except SSLError as exc: - warnings.warn(f"Bad certificate in Windows certificate store: {exc!s}") - except PermissionError: - warnings.warn("unable to enumerate Windows certificate store") - - def load_default_certs(self, purpose=Purpose.SERVER_AUTH): - if not isinstance(purpose, _ASN1Object): - raise TypeError(purpose) - if sys.platform == "win32": - for storename in self._windows_cert_stores: - self._load_windows_store_certs(storename, purpose) - self.set_default_verify_paths() - - if hasattr(_SSLContext, 'minimum_version'): - @property - def minimum_version(self): - return TLSVersion(super().minimum_version) - - @minimum_version.setter - def minimum_version(self, value): - if value == TLSVersion.SSLv3: - self.options &= ~Options.OP_NO_SSLv3 - super(SSLContext, SSLContext).minimum_version.__set__(self, value) - - @property - def maximum_version(self): - return TLSVersion(super().maximum_version) - - @maximum_version.setter - def maximum_version(self, value): - super(SSLContext, SSLContext).maximum_version.__set__(self, value) - - @property - def options(self): - return Options(super().options) - - @options.setter - def options(self, value): - super(SSLContext, SSLContext).options.__set__(self, value) - - if hasattr(_ssl, 'HOSTFLAG_NEVER_CHECK_SUBJECT'): - @property - def hostname_checks_common_name(self): - ncs = self._host_flags & _ssl.HOSTFLAG_NEVER_CHECK_SUBJECT - return ncs != _ssl.HOSTFLAG_NEVER_CHECK_SUBJECT - - @hostname_checks_common_name.setter - def hostname_checks_common_name(self, value): - if value: - self._host_flags &= ~_ssl.HOSTFLAG_NEVER_CHECK_SUBJECT - else: - self._host_flags |= _ssl.HOSTFLAG_NEVER_CHECK_SUBJECT - else: - @property - def hostname_checks_common_name(self): - return True - - @property - def _msg_callback(self): - """TLS message callback - - The message callback provides a debugging hook to analyze TLS - connections. The callback is called for any TLS protocol message - (header, handshake, alert, and more), but not for application data. - Due to technical limitations, the callback can't be used to filter - traffic or to abort a connection. Any exception raised in the - callback is delayed until the handshake, read, or write operation - has been performed. - - def msg_cb(conn, direction, version, content_type, msg_type, data): - pass - - conn - :class:`SSLSocket` or :class:`SSLObject` instance - direction - ``read`` or ``write`` - version - :class:`TLSVersion` enum member or int for unknown version. For a - frame header, it's the header version. - content_type - :class:`_TLSContentType` enum member or int for unsupported - content type. - msg_type - Either a :class:`_TLSContentType` enum number for a header - message, a :class:`_TLSAlertType` enum member for an alert - message, a :class:`_TLSMessageType` enum member for other - messages, or int for unsupported message types. - data - Raw, decrypted message content as bytes - """ - inner = super()._msg_callback - if inner is not None: - return inner.user_function - else: - return None - - @_msg_callback.setter - def _msg_callback(self, callback): - if callback is None: - super(SSLContext, SSLContext)._msg_callback.__set__(self, None) - return - - if not hasattr(callback, '__call__'): - raise TypeError(f"{callback} is not callable.") - - def inner(conn, direction, version, content_type, msg_type, data): - try: - version = TLSVersion(version) - except ValueError: - pass - - try: - content_type = _TLSContentType(content_type) - except ValueError: - pass - - if content_type == _TLSContentType.HEADER: - msg_enum = _TLSContentType - elif content_type == _TLSContentType.ALERT: - msg_enum = _TLSAlertType - else: - msg_enum = _TLSMessageType - try: - msg_type = msg_enum(msg_type) - except ValueError: - pass - - return callback(conn, direction, version, - content_type, msg_type, data) - - inner.user_function = callback - - super(SSLContext, SSLContext)._msg_callback.__set__(self, inner) - - @property - def protocol(self): - return _SSLMethod(super().protocol) - - @property - def verify_flags(self): - return VerifyFlags(super().verify_flags) - - @verify_flags.setter - def verify_flags(self, value): - super(SSLContext, SSLContext).verify_flags.__set__(self, value) - - @property - def verify_mode(self): - value = super().verify_mode - try: - return VerifyMode(value) - except ValueError: - return value - - @verify_mode.setter - def verify_mode(self, value): - super(SSLContext, SSLContext).verify_mode.__set__(self, value) - - -def create_default_context(purpose=Purpose.SERVER_AUTH, *, cafile=None, - capath=None, cadata=None): - """Create a SSLContext object with default settings. - - NOTE: The protocol and settings may change anytime without prior - deprecation. The values represent a fair balance between maximum - compatibility and security. - """ - if not isinstance(purpose, _ASN1Object): - raise TypeError(purpose) - - # SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION, - # OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE - # by default. - if purpose == Purpose.SERVER_AUTH: - # verify certs and host name in client mode - context = SSLContext(PROTOCOL_TLS_CLIENT) - context.verify_mode = CERT_REQUIRED - context.check_hostname = True - elif purpose == Purpose.CLIENT_AUTH: - context = SSLContext(PROTOCOL_TLS_SERVER) - else: - raise ValueError(purpose) - - # `VERIFY_X509_PARTIAL_CHAIN` makes OpenSSL's chain building behave more - # like RFC 3280 and 5280, which specify that chain building stops with the - # first trust anchor, even if that anchor is not self-signed. - # - # `VERIFY_X509_STRICT` makes OpenSSL more conservative about the - # certificates it accepts, including "disabling workarounds for - # some broken certificates." - context.verify_flags |= (_ssl.VERIFY_X509_PARTIAL_CHAIN | - _ssl.VERIFY_X509_STRICT) - - if cafile or capath or cadata: - context.load_verify_locations(cafile, capath, cadata) - elif context.verify_mode != CERT_NONE: - # no explicit cafile, capath or cadata but the verify mode is - # CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system - # root CA certificates for the given purpose. This may fail silently. - context.load_default_certs(purpose) - # OpenSSL 1.1.1 keylog file - if hasattr(context, 'keylog_filename'): - keylogfile = os.environ.get('SSLKEYLOGFILE') - if keylogfile and not sys.flags.ignore_environment: - context.keylog_filename = keylogfile - return context - -def _create_unverified_context(protocol=None, *, cert_reqs=CERT_NONE, - check_hostname=False, purpose=Purpose.SERVER_AUTH, - certfile=None, keyfile=None, - cafile=None, capath=None, cadata=None): - """Create a SSLContext object for Python stdlib modules - - All Python stdlib modules shall use this function to create SSLContext - objects in order to keep common settings in one place. The configuration - is less restrict than create_default_context()'s to increase backward - compatibility. - """ - if not isinstance(purpose, _ASN1Object): - raise TypeError(purpose) - - # SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION, - # OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE - # by default. - if purpose == Purpose.SERVER_AUTH: - # verify certs and host name in client mode - if protocol is None: - protocol = PROTOCOL_TLS_CLIENT - elif purpose == Purpose.CLIENT_AUTH: - if protocol is None: - protocol = PROTOCOL_TLS_SERVER - else: - raise ValueError(purpose) - - context = SSLContext(protocol) - context.check_hostname = check_hostname - if cert_reqs is not None: - context.verify_mode = cert_reqs - if check_hostname: - context.check_hostname = True - - if keyfile and not certfile: - raise ValueError("certfile must be specified") - if certfile or keyfile: - context.load_cert_chain(certfile, keyfile) - - # load CA root certs - if cafile or capath or cadata: - context.load_verify_locations(cafile, capath, cadata) - elif context.verify_mode != CERT_NONE: - # no explicit cafile, capath or cadata but the verify mode is - # CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system - # root CA certificates for the given purpose. This may fail silently. - context.load_default_certs(purpose) - # OpenSSL 1.1.1 keylog file - if hasattr(context, 'keylog_filename'): - keylogfile = os.environ.get('SSLKEYLOGFILE') - if keylogfile and not sys.flags.ignore_environment: - context.keylog_filename = keylogfile - return context - -# Used by http.client if no context is explicitly passed. -_create_default_https_context = create_default_context - - -# Backwards compatibility alias, even though it's not a public name. -_create_stdlib_context = _create_unverified_context - - -class SSLObject: - """This class implements an interface on top of a low-level SSL object as - implemented by OpenSSL. This object captures the state of an SSL connection - but does not provide any network IO itself. IO needs to be performed - through separate "BIO" objects which are OpenSSL's IO abstraction layer. - - This class does not have a public constructor. Instances are returned by - ``SSLContext.wrap_bio``. This class is typically used by framework authors - that want to implement asynchronous IO for SSL through memory buffers. - - When compared to ``SSLSocket``, this object lacks the following features: - - * Any form of network IO, including methods such as ``recv`` and ``send``. - * The ``do_handshake_on_connect`` and ``suppress_ragged_eofs`` machinery. - """ - def __init__(self, *args, **kwargs): - raise TypeError( - f"{self.__class__.__name__} does not have a public " - f"constructor. Instances are returned by SSLContext.wrap_bio()." - ) - - @classmethod - def _create(cls, incoming, outgoing, server_side=False, - server_hostname=None, session=None, context=None): - self = cls.__new__(cls) - sslobj = context._wrap_bio( - incoming, outgoing, server_side=server_side, - server_hostname=server_hostname, - owner=self, session=session - ) - self._sslobj = sslobj - return self - - @property - def context(self): - """The SSLContext that is currently in use.""" - return self._sslobj.context - - @context.setter - def context(self, ctx): - self._sslobj.context = ctx - - @property - def session(self): - """The SSLSession for client socket.""" - return self._sslobj.session - - @session.setter - def session(self, session): - self._sslobj.session = session - - @property - def session_reused(self): - """Was the client session reused during handshake""" - return self._sslobj.session_reused - - @property - def server_side(self): - """Whether this is a server-side socket.""" - return self._sslobj.server_side - - @property - def server_hostname(self): - """The currently set server hostname (for SNI), or ``None`` if no - server hostname is set.""" - return self._sslobj.server_hostname - - def read(self, len=1024, buffer=None): - """Read up to 'len' bytes from the SSL object and return them. - - If 'buffer' is provided, read into this buffer and return the number of - bytes read. - """ - if buffer is not None: - v = self._sslobj.read(len, buffer) - else: - v = self._sslobj.read(len) - return v - - def write(self, data): - """Write 'data' to the SSL object and return the number of bytes - written. - - The 'data' argument must support the buffer interface. - """ - return self._sslobj.write(data) - - def getpeercert(self, binary_form=False): - """Returns a formatted version of the data in the certificate provided - by the other end of the SSL channel. - - Return None if no certificate was provided, {} if a certificate was - provided, but not validated. - """ - return self._sslobj.getpeercert(binary_form) - - def get_verified_chain(self): - """Returns verified certificate chain provided by the other - end of the SSL channel as a list of DER-encoded bytes. - - If certificate verification was disabled method acts the same as - ``SSLSocket.get_unverified_chain``. - """ - chain = self._sslobj.get_verified_chain() - - if chain is None: - return [] - - return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] - - def get_unverified_chain(self): - """Returns raw certificate chain provided by the other - end of the SSL channel as a list of DER-encoded bytes. - """ - chain = self._sslobj.get_unverified_chain() - - if chain is None: - return [] - - return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] - - def selected_npn_protocol(self): - """Return the currently selected NPN protocol as a string, or ``None`` - if a next protocol was not negotiated or if NPN is not supported by one - of the peers.""" - warnings.warn( - "ssl NPN is deprecated, use ALPN instead", - DeprecationWarning, - stacklevel=2 - ) - - def selected_alpn_protocol(self): - """Return the currently selected ALPN protocol as a string, or ``None`` - if a next protocol was not negotiated or if ALPN is not supported by one - of the peers.""" - return self._sslobj.selected_alpn_protocol() - - def cipher(self): - """Return the currently selected cipher as a 3-tuple ``(name, - ssl_version, secret_bits)``.""" - return self._sslobj.cipher() - - def shared_ciphers(self): - """Return a list of ciphers shared by the client during the handshake or - None if this is not a valid server connection. - """ - return self._sslobj.shared_ciphers() - - def compression(self): - """Return the current compression algorithm in use, or ``None`` if - compression was not negotiated or not supported by one of the peers.""" - return self._sslobj.compression() - - def pending(self): - """Return the number of bytes that can be read immediately.""" - return self._sslobj.pending() - - def do_handshake(self): - """Start the SSL/TLS handshake.""" - self._sslobj.do_handshake() - - def unwrap(self): - """Start the SSL shutdown handshake.""" - return self._sslobj.shutdown() - - def get_channel_binding(self, cb_type="tls-unique"): - """Get channel binding data for current connection. Raise ValueError - if the requested `cb_type` is not supported. Return bytes of the data - or None if the data is not available (e.g. before the handshake).""" - return self._sslobj.get_channel_binding(cb_type) - - def version(self): - """Return a string identifying the protocol version used by the - current SSL channel. """ - return self._sslobj.version() - - def verify_client_post_handshake(self): - return self._sslobj.verify_client_post_handshake() - - -def _sslcopydoc(func): - """Copy docstring from SSLObject to SSLSocket""" - func.__doc__ = getattr(SSLObject, func.__name__).__doc__ - return func - - -class SSLSocket(socket): - """This class implements a subtype of socket.socket that wraps - the underlying OS socket in an SSL context when necessary, and - provides read and write methods over that channel. """ - - def __init__(self, *args, **kwargs): - raise TypeError( - f"{self.__class__.__name__} does not have a public " - f"constructor. Instances are returned by " - f"SSLContext.wrap_socket()." - ) - - @classmethod - def _create(cls, sock, server_side=False, do_handshake_on_connect=True, - suppress_ragged_eofs=True, server_hostname=None, - context=None, session=None): - if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM: - raise NotImplementedError("only stream sockets are supported") - if server_side: - if server_hostname: - raise ValueError("server_hostname can only be specified " - "in client mode") - if session is not None: - raise ValueError("session can only be specified in " - "client mode") - if context.check_hostname and not server_hostname: - raise ValueError("check_hostname requires server_hostname") - - sock_timeout = sock.gettimeout() - kwargs = dict( - family=sock.family, type=sock.type, proto=sock.proto, - fileno=sock.fileno() - ) - self = cls.__new__(cls, **kwargs) - super(SSLSocket, self).__init__(**kwargs) - sock.detach() - # Now SSLSocket is responsible for closing the file descriptor. - try: - self._context = context - self._session = session - self._closed = False - self._sslobj = None - self.server_side = server_side - self.server_hostname = context._encode_hostname(server_hostname) - self.do_handshake_on_connect = do_handshake_on_connect - self.suppress_ragged_eofs = suppress_ragged_eofs - - # See if we are connected - try: - self.getpeername() - except OSError as e: - if e.errno != errno.ENOTCONN: - raise - connected = False - blocking = self.getblocking() - self.setblocking(False) - try: - # We are not connected so this is not supposed to block, but - # testing revealed otherwise on macOS and Windows so we do - # the non-blocking dance regardless. Our raise when any data - # is found means consuming the data is harmless. - notconn_pre_handshake_data = self.recv(1) - except OSError as e: - # EINVAL occurs for recv(1) on non-connected on unix sockets. - if e.errno not in (errno.ENOTCONN, errno.EINVAL): - raise - notconn_pre_handshake_data = b'' - self.setblocking(blocking) - if notconn_pre_handshake_data: - # This prevents pending data sent to the socket before it was - # closed from escaping to the caller who could otherwise - # presume it came through a successful TLS connection. - reason = "Closed before TLS handshake with data in recv buffer." - notconn_pre_handshake_data_error = SSLError(e.errno, reason) - # Add the SSLError attributes that _ssl.c always adds. - notconn_pre_handshake_data_error.reason = reason - notconn_pre_handshake_data_error.library = None - try: - raise notconn_pre_handshake_data_error - finally: - # Explicitly break the reference cycle. - notconn_pre_handshake_data_error = None - else: - connected = True - - self.settimeout(sock_timeout) # Must come after setblocking() calls. - self._connected = connected - if connected: - # create the SSL object - self._sslobj = self._context._wrap_socket( - self, server_side, self.server_hostname, - owner=self, session=self._session, - ) - if do_handshake_on_connect: - timeout = self.gettimeout() - if timeout == 0.0: - # non-blocking - raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets") - self.do_handshake() - except: - try: - self.close() - except OSError: - pass - raise - return self - - @property - @_sslcopydoc - def context(self): - return self._context - - @context.setter - def context(self, ctx): - self._context = ctx - self._sslobj.context = ctx - - @property - @_sslcopydoc - def session(self): - if self._sslobj is not None: - return self._sslobj.session - - @session.setter - def session(self, session): - self._session = session - if self._sslobj is not None: - self._sslobj.session = session - - @property - @_sslcopydoc - def session_reused(self): - if self._sslobj is not None: - return self._sslobj.session_reused - - def dup(self): - raise NotImplementedError("Can't dup() %s instances" % - self.__class__.__name__) - - def _checkClosed(self, msg=None): - # raise an exception here if you wish to check for spurious closes - pass - - def _check_connected(self): - if not self._connected: - # getpeername() will raise ENOTCONN if the socket is really - # not connected; note that we can be connected even without - # _connected being set, e.g. if connect() first returned - # EAGAIN. - self.getpeername() - - def read(self, len=1024, buffer=None): - """Read up to LEN bytes and return them. - Return zero-length string on EOF.""" - - self._checkClosed() - if self._sslobj is None: - raise ValueError("Read on closed or unwrapped SSL socket.") - try: - if buffer is not None: - return self._sslobj.read(len, buffer) - else: - return self._sslobj.read(len) - except SSLError as x: - if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs: - if buffer is not None: - return 0 - else: - return b'' - else: - raise - - def write(self, data): - """Write DATA to the underlying SSL channel. Returns - number of bytes of DATA actually transmitted.""" - - self._checkClosed() - if self._sslobj is None: - raise ValueError("Write on closed or unwrapped SSL socket.") - return self._sslobj.write(data) - - @_sslcopydoc - def getpeercert(self, binary_form=False): - self._checkClosed() - self._check_connected() - return self._sslobj.getpeercert(binary_form) - - @_sslcopydoc - def get_verified_chain(self): - chain = self._sslobj.get_verified_chain() - - if chain is None: - return [] - - return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] - - @_sslcopydoc - def get_unverified_chain(self): - chain = self._sslobj.get_unverified_chain() - - if chain is None: - return [] - - return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] - - @_sslcopydoc - def selected_npn_protocol(self): - self._checkClosed() - warnings.warn( - "ssl NPN is deprecated, use ALPN instead", - DeprecationWarning, - stacklevel=2 - ) - return None - - @_sslcopydoc - def selected_alpn_protocol(self): - self._checkClosed() - if self._sslobj is None or not _ssl.HAS_ALPN: - return None - else: - return self._sslobj.selected_alpn_protocol() - - @_sslcopydoc - def cipher(self): - self._checkClosed() - if self._sslobj is None: - return None - else: - return self._sslobj.cipher() - - @_sslcopydoc - def shared_ciphers(self): - self._checkClosed() - if self._sslobj is None: - return None - else: - return self._sslobj.shared_ciphers() - - @_sslcopydoc - def compression(self): - self._checkClosed() - if self._sslobj is None: - return None - else: - return self._sslobj.compression() - - def send(self, data, flags=0): - self._checkClosed() - if self._sslobj is not None: - if flags != 0: - raise ValueError( - "non-zero flags not allowed in calls to send() on %s" % - self.__class__) - return self._sslobj.write(data) - else: - return super().send(data, flags) - - def sendto(self, data, flags_or_addr, addr=None): - self._checkClosed() - if self._sslobj is not None: - raise ValueError("sendto not allowed on instances of %s" % - self.__class__) - elif addr is None: - return super().sendto(data, flags_or_addr) - else: - return super().sendto(data, flags_or_addr, addr) - - def sendmsg(self, *args, **kwargs): - # Ensure programs don't send data unencrypted if they try to - # use this method. - raise NotImplementedError("sendmsg not allowed on instances of %s" % - self.__class__) - - def sendall(self, data, flags=0): - self._checkClosed() - if self._sslobj is not None: - if flags != 0: - raise ValueError( - "non-zero flags not allowed in calls to sendall() on %s" % - self.__class__) - count = 0 - with memoryview(data) as view, view.cast("B") as byte_view: - amount = len(byte_view) - while count < amount: - v = self.send(byte_view[count:]) - count += v - else: - return super().sendall(data, flags) - - def sendfile(self, file, offset=0, count=None): - """Send a file, possibly by using os.sendfile() if this is a - clear-text socket. Return the total number of bytes sent. - """ - if self._sslobj is not None: - return self._sendfile_use_send(file, offset, count) - else: - # os.sendfile() works with plain sockets only - return super().sendfile(file, offset, count) - - def recv(self, buflen=1024, flags=0): - self._checkClosed() - if self._sslobj is not None: - if flags != 0: - raise ValueError( - "non-zero flags not allowed in calls to recv() on %s" % - self.__class__) - return self.read(buflen) - else: - return super().recv(buflen, flags) - - def recv_into(self, buffer, nbytes=None, flags=0): - self._checkClosed() - if nbytes is None: - if buffer is not None: - with memoryview(buffer) as view: - nbytes = view.nbytes - if not nbytes: - nbytes = 1024 - else: - nbytes = 1024 - if self._sslobj is not None: - if flags != 0: - raise ValueError( - "non-zero flags not allowed in calls to recv_into() on %s" % - self.__class__) - return self.read(nbytes, buffer) - else: - return super().recv_into(buffer, nbytes, flags) - - def recvfrom(self, buflen=1024, flags=0): - self._checkClosed() - if self._sslobj is not None: - raise ValueError("recvfrom not allowed on instances of %s" % - self.__class__) - else: - return super().recvfrom(buflen, flags) - - def recvfrom_into(self, buffer, nbytes=None, flags=0): - self._checkClosed() - if self._sslobj is not None: - raise ValueError("recvfrom_into not allowed on instances of %s" % - self.__class__) - else: - return super().recvfrom_into(buffer, nbytes, flags) - - def recvmsg(self, *args, **kwargs): - raise NotImplementedError("recvmsg not allowed on instances of %s" % - self.__class__) - - def recvmsg_into(self, *args, **kwargs): - raise NotImplementedError("recvmsg_into not allowed on instances of " - "%s" % self.__class__) - - @_sslcopydoc - def pending(self): - self._checkClosed() - if self._sslobj is not None: - return self._sslobj.pending() - else: - return 0 - - def shutdown(self, how): - self._checkClosed() - self._sslobj = None - super().shutdown(how) - - @_sslcopydoc - def unwrap(self): - if self._sslobj: - s = self._sslobj.shutdown() - self._sslobj = None - return s - else: - raise ValueError("No SSL wrapper around " + str(self)) - - @_sslcopydoc - def verify_client_post_handshake(self): - if self._sslobj: - return self._sslobj.verify_client_post_handshake() - else: - raise ValueError("No SSL wrapper around " + str(self)) - - def _real_close(self): - self._sslobj = None - super()._real_close() - - @_sslcopydoc - def do_handshake(self, block=False): - self._check_connected() - timeout = self.gettimeout() - try: - if timeout == 0.0 and block: - self.settimeout(None) - self._sslobj.do_handshake() - finally: - self.settimeout(timeout) - - def _real_connect(self, addr, connect_ex): - if self.server_side: - raise ValueError("can't connect in server-side mode") - # Here we assume that the socket is client-side, and not - # connected at the time of the call. We connect it, then wrap it. - if self._connected or self._sslobj is not None: - raise ValueError("attempt to connect already-connected SSLSocket!") - self._sslobj = self.context._wrap_socket( - self, False, self.server_hostname, - owner=self, session=self._session - ) - try: - if connect_ex: - rc = super().connect_ex(addr) - else: - rc = None - super().connect(addr) - if not rc: - self._connected = True - if self.do_handshake_on_connect: - self.do_handshake() - return rc - except (OSError, ValueError): - self._sslobj = None - raise - - def connect(self, addr): - """Connects to remote ADDR, and then wraps the connection in - an SSL channel.""" - self._real_connect(addr, False) - - def connect_ex(self, addr): - """Connects to remote ADDR, and then wraps the connection in - an SSL channel.""" - return self._real_connect(addr, True) - - def accept(self): - """Accepts a new connection from a remote client, and returns - a tuple containing that new connection wrapped with a server-side - SSL channel, and the address of the remote client.""" - - newsock, addr = super().accept() - newsock = self.context.wrap_socket(newsock, - do_handshake_on_connect=self.do_handshake_on_connect, - suppress_ragged_eofs=self.suppress_ragged_eofs, - server_side=True) - return newsock, addr - - @_sslcopydoc - def get_channel_binding(self, cb_type="tls-unique"): - if self._sslobj is not None: - return self._sslobj.get_channel_binding(cb_type) - else: - if cb_type not in CHANNEL_BINDING_TYPES: - raise ValueError( - "{0} channel binding type not implemented".format(cb_type) - ) - return None - - @_sslcopydoc - def version(self): - if self._sslobj is not None: - return self._sslobj.version() - else: - return None - - -# Python does not support forward declaration of types. -SSLContext.sslsocket_class = SSLSocket -SSLContext.sslobject_class = SSLObject - - -# some utility functions - -def cert_time_to_seconds(cert_time): - """Return the time in seconds since the Epoch, given the timestring - representing the "notBefore" or "notAfter" date from a certificate - in ``"%b %d %H:%M:%S %Y %Z"`` strptime format (C locale). - - "notBefore" or "notAfter" dates must use UTC (RFC 5280). - - Month is one of: Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec - UTC should be specified as GMT (see ASN1_TIME_print()) - """ - from time import strptime - from calendar import timegm - - months = ( - "Jan","Feb","Mar","Apr","May","Jun", - "Jul","Aug","Sep","Oct","Nov","Dec" - ) - time_format = ' %d %H:%M:%S %Y GMT' # NOTE: no month, fixed GMT - try: - month_number = months.index(cert_time[:3].title()) + 1 - except ValueError: - raise ValueError('time data %r does not match ' - 'format "%%b%s"' % (cert_time, time_format)) - else: - # found valid month - tt = strptime(cert_time[3:], time_format) - # return an integer, the previous mktime()-based implementation - # returned a float (fractional seconds are always zero here). - return timegm((tt[0], month_number) + tt[2:6]) - -PEM_HEADER = "-----BEGIN CERTIFICATE-----" -PEM_FOOTER = "-----END CERTIFICATE-----" - -def DER_cert_to_PEM_cert(der_cert_bytes): - """Takes a certificate in binary DER format and returns the - PEM version of it as a string.""" - - f = str(base64.standard_b64encode(der_cert_bytes), 'ASCII', 'strict') - ss = [PEM_HEADER] - ss += [f[i:i+64] for i in range(0, len(f), 64)] - ss.append(PEM_FOOTER + '\n') - return '\n'.join(ss) - -def PEM_cert_to_DER_cert(pem_cert_string): - """Takes a certificate in ASCII PEM format and returns the - DER-encoded version of it as a byte sequence""" - - if not pem_cert_string.startswith(PEM_HEADER): - raise ValueError("Invalid PEM encoding; must start with %s" - % PEM_HEADER) - if not pem_cert_string.strip().endswith(PEM_FOOTER): - raise ValueError("Invalid PEM encoding; must end with %s" - % PEM_FOOTER) - d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)] - return base64.decodebytes(d.encode('ASCII', 'strict')) - -def get_server_certificate(addr, ssl_version=PROTOCOL_TLS_CLIENT, - ca_certs=None, timeout=_GLOBAL_DEFAULT_TIMEOUT): - """Retrieve the certificate from the server at the specified address, - and return it as a PEM-encoded string. - If 'ca_certs' is specified, validate the server cert against it. - If 'ssl_version' is specified, use it in the connection attempt. - If 'timeout' is specified, use it in the connection attempt. - """ - - host, port = addr - if ca_certs is not None: - cert_reqs = CERT_REQUIRED - else: - cert_reqs = CERT_NONE - context = _create_stdlib_context(ssl_version, - cert_reqs=cert_reqs, - cafile=ca_certs) - with create_connection(addr, timeout=timeout) as sock: - with context.wrap_socket(sock, server_hostname=host) as sslsock: - dercert = sslsock.getpeercert(True) - return DER_cert_to_PEM_cert(dercert) - -def get_protocol_name(protocol_code): - return _PROTOCOL_NAMES.get(protocol_code, '') diff --git a/Python313_13_x86_Template/Lib/statistics.py b/Python313_13_x86_Template/Lib/statistics.py deleted file mode 100644 index c71e83aa..00000000 --- a/Python313_13_x86_Template/Lib/statistics.py +++ /dev/null @@ -1,1817 +0,0 @@ -""" -Basic statistics module. - -This module provides functions for calculating statistics of data, including -averages, variance, and standard deviation. - -Calculating averages --------------------- - -================== ================================================== -Function Description -================== ================================================== -mean Arithmetic mean (average) of data. -fmean Fast, floating-point arithmetic mean. -geometric_mean Geometric mean of data. -harmonic_mean Harmonic mean of data. -median Median (middle value) of data. -median_low Low median of data. -median_high High median of data. -median_grouped Median, or 50th percentile, of grouped data. -mode Mode (most common value) of data. -multimode List of modes (most common values of data). -quantiles Divide data into intervals with equal probability. -================== ================================================== - -Calculate the arithmetic mean ("the average") of data: - ->>> mean([-1.0, 2.5, 3.25, 5.75]) -2.625 - - -Calculate the standard median of discrete data: - ->>> median([2, 3, 4, 5]) -3.5 - - -Calculate the median, or 50th percentile, of data grouped into class intervals -centred on the data values provided. E.g. if your data points are rounded to -the nearest whole number: - ->>> median_grouped([2, 2, 3, 3, 3, 4]) #doctest: +ELLIPSIS -2.8333333333... - -This should be interpreted in this way: you have two data points in the class -interval 1.5-2.5, three data points in the class interval 2.5-3.5, and one in -the class interval 3.5-4.5. The median of these data points is 2.8333... - - -Calculating variability or spread ---------------------------------- - -================== ============================================= -Function Description -================== ============================================= -pvariance Population variance of data. -variance Sample variance of data. -pstdev Population standard deviation of data. -stdev Sample standard deviation of data. -================== ============================================= - -Calculate the standard deviation of sample data: - ->>> stdev([2.5, 3.25, 5.5, 11.25, 11.75]) #doctest: +ELLIPSIS -4.38961843444... - -If you have previously calculated the mean, you can pass it as the optional -second argument to the four "spread" functions to avoid recalculating it: - ->>> data = [1, 2, 2, 4, 4, 4, 5, 6] ->>> mu = mean(data) ->>> pvariance(data, mu) -2.5 - - -Statistics for relations between two inputs -------------------------------------------- - -================== ==================================================== -Function Description -================== ==================================================== -covariance Sample covariance for two variables. -correlation Pearson's correlation coefficient for two variables. -linear_regression Intercept and slope for simple linear regression. -================== ==================================================== - -Calculate covariance, Pearson's correlation, and simple linear regression -for two inputs: - ->>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9] ->>> y = [1, 2, 3, 1, 2, 3, 1, 2, 3] ->>> covariance(x, y) -0.75 ->>> correlation(x, y) #doctest: +ELLIPSIS -0.31622776601... ->>> linear_regression(x, y) #doctest: -LinearRegression(slope=0.1, intercept=1.5) - - -Exceptions ----------- - -A single exception is defined: StatisticsError is a subclass of ValueError. - -""" - -__all__ = [ - 'NormalDist', - 'StatisticsError', - 'correlation', - 'covariance', - 'fmean', - 'geometric_mean', - 'harmonic_mean', - 'kde', - 'kde_random', - 'linear_regression', - 'mean', - 'median', - 'median_grouped', - 'median_high', - 'median_low', - 'mode', - 'multimode', - 'pstdev', - 'pvariance', - 'quantiles', - 'stdev', - 'variance', -] - -import math -import numbers -import random -import sys - -from fractions import Fraction -from decimal import Decimal -from itertools import count, groupby, repeat -from bisect import bisect_left, bisect_right -from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum, sumprod -from math import isfinite, isinf, pi, cos, sin, tan, cosh, asin, atan, acos -from functools import reduce -from operator import itemgetter -from collections import Counter, namedtuple, defaultdict - -_SQRT2 = sqrt(2.0) -_random = random - -# === Exceptions === - -class StatisticsError(ValueError): - pass - - -# === Private utilities === - -def _sum(data): - """_sum(data) -> (type, sum, count) - - Return a high-precision sum of the given numeric data as a fraction, - together with the type to be converted to and the count of items. - - Examples - -------- - - >>> _sum([3, 2.25, 4.5, -0.5, 0.25]) - (, Fraction(19, 2), 5) - - Some sources of round-off error will be avoided: - - # Built-in sum returns zero. - >>> _sum([1e50, 1, -1e50] * 1000) - (, Fraction(1000, 1), 3000) - - Fractions and Decimals are also supported: - - >>> from fractions import Fraction as F - >>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)]) - (, Fraction(63, 20), 4) - - >>> from decimal import Decimal as D - >>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")] - >>> _sum(data) - (, Fraction(6963, 10000), 4) - - Mixed types are currently treated as an error, except that int is - allowed. - """ - count = 0 - types = set() - types_add = types.add - partials = {} - partials_get = partials.get - for typ, values in groupby(data, type): - types_add(typ) - for n, d in map(_exact_ratio, values): - count += 1 - partials[d] = partials_get(d, 0) + n - if None in partials: - # The sum will be a NAN or INF. We can ignore all the finite - # partials, and just look at this special one. - total = partials[None] - assert not _isfinite(total) - else: - # Sum all the partial sums using builtin sum. - total = sum(Fraction(n, d) for d, n in partials.items()) - T = reduce(_coerce, types, int) # or raise TypeError - return (T, total, count) - - -def _ss(data, c=None): - """Return the exact mean and sum of square deviations of sequence data. - - Calculations are done in a single pass, allowing the input to be an iterator. - - If given *c* is used the mean; otherwise, it is calculated from the data. - Use the *c* argument with care, as it can lead to garbage results. - - """ - if c is not None: - T, ssd, count = _sum((d := x - c) * d for x in data) - return (T, ssd, c, count) - count = 0 - types = set() - types_add = types.add - sx_partials = defaultdict(int) - sxx_partials = defaultdict(int) - for typ, values in groupby(data, type): - types_add(typ) - for n, d in map(_exact_ratio, values): - count += 1 - sx_partials[d] += n - sxx_partials[d] += n * n - if not count: - ssd = c = Fraction(0) - elif None in sx_partials: - # The sum will be a NAN or INF. We can ignore all the finite - # partials, and just look at this special one. - ssd = c = sx_partials[None] - assert not _isfinite(ssd) - else: - sx = sum(Fraction(n, d) for d, n in sx_partials.items()) - sxx = sum(Fraction(n, d*d) for d, n in sxx_partials.items()) - # This formula has poor numeric properties for floats, - # but with fractions it is exact. - ssd = (count * sxx - sx * sx) / count - c = sx / count - T = reduce(_coerce, types, int) # or raise TypeError - return (T, ssd, c, count) - - -def _isfinite(x): - try: - return x.is_finite() # Likely a Decimal. - except AttributeError: - return math.isfinite(x) # Coerces to float first. - - -def _coerce(T, S): - """Coerce types T and S to a common type, or raise TypeError. - - Coercion rules are currently an implementation detail. See the CoerceTest - test class in test_statistics for details. - """ - # See http://bugs.python.org/issue24068. - assert T is not bool, "initial type T is bool" - # If the types are the same, no need to coerce anything. Put this - # first, so that the usual case (no coercion needed) happens as soon - # as possible. - if T is S: return T - # Mixed int & other coerce to the other type. - if S is int or S is bool: return T - if T is int: return S - # If one is a (strict) subclass of the other, coerce to the subclass. - if issubclass(S, T): return S - if issubclass(T, S): return T - # Ints coerce to the other type. - if issubclass(T, int): return S - if issubclass(S, int): return T - # Mixed fraction & float coerces to float (or float subclass). - if issubclass(T, Fraction) and issubclass(S, float): - return S - if issubclass(T, float) and issubclass(S, Fraction): - return T - # Any other combination is disallowed. - msg = "don't know how to coerce %s and %s" - raise TypeError(msg % (T.__name__, S.__name__)) - - -def _exact_ratio(x): - """Return Real number x to exact (numerator, denominator) pair. - - >>> _exact_ratio(0.25) - (1, 4) - - x is expected to be an int, Fraction, Decimal or float. - """ - - # XXX We should revisit whether using fractions to accumulate exact - # ratios is the right way to go. - - # The integer ratios for binary floats can have numerators or - # denominators with over 300 decimal digits. The problem is more - # acute with decimal floats where the default decimal context - # supports a huge range of exponents from Emin=-999999 to - # Emax=999999. When expanded with as_integer_ratio(), numbers like - # Decimal('3.14E+5000') and Decimal('3.14E-5000') have large - # numerators or denominators that will slow computation. - - # When the integer ratios are accumulated as fractions, the size - # grows to cover the full range from the smallest magnitude to the - # largest. For example, Fraction(3.14E+300) + Fraction(3.14E-300), - # has a 616 digit numerator. Likewise, - # Fraction(Decimal('3.14E+5000')) + Fraction(Decimal('3.14E-5000')) - # has 10,003 digit numerator. - - # This doesn't seem to have been problem in practice, but it is a - # potential pitfall. - - try: - return x.as_integer_ratio() - except AttributeError: - pass - except (OverflowError, ValueError): - # float NAN or INF. - assert not _isfinite(x) - return (x, None) - try: - # x may be an Integral ABC. - return (x.numerator, x.denominator) - except AttributeError: - msg = f"can't convert type '{type(x).__name__}' to numerator/denominator" - raise TypeError(msg) - - -def _convert(value, T): - """Convert value to given numeric type T.""" - if type(value) is T: - # This covers the cases where T is Fraction, or where value is - # a NAN or INF (Decimal or float). - return value - if issubclass(T, int) and value.denominator != 1: - T = float - try: - # FIXME: what do we do if this overflows? - return T(value) - except TypeError: - if issubclass(T, Decimal): - return T(value.numerator) / T(value.denominator) - else: - raise - - -def _fail_neg(values, errmsg='negative value'): - """Iterate over values, failing if any are less than zero.""" - for x in values: - if x < 0: - raise StatisticsError(errmsg) - yield x - - -def _rank(data, /, *, key=None, reverse=False, ties='average', start=1) -> list[float]: - """Rank order a dataset. The lowest value has rank 1. - - Ties are averaged so that equal values receive the same rank: - - >>> data = [31, 56, 31, 25, 75, 18] - >>> _rank(data) - [3.5, 5.0, 3.5, 2.0, 6.0, 1.0] - - The operation is idempotent: - - >>> _rank([3.5, 5.0, 3.5, 2.0, 6.0, 1.0]) - [3.5, 5.0, 3.5, 2.0, 6.0, 1.0] - - It is possible to rank the data in reverse order so that the - highest value has rank 1. Also, a key-function can extract - the field to be ranked: - - >>> goals = [('eagles', 45), ('bears', 48), ('lions', 44)] - >>> _rank(goals, key=itemgetter(1), reverse=True) - [2.0, 1.0, 3.0] - - Ranks are conventionally numbered starting from one; however, - setting *start* to zero allows the ranks to be used as array indices: - - >>> prize = ['Gold', 'Silver', 'Bronze', 'Certificate'] - >>> scores = [8.1, 7.3, 9.4, 8.3] - >>> [prize[int(i)] for i in _rank(scores, start=0, reverse=True)] - ['Bronze', 'Certificate', 'Gold', 'Silver'] - - """ - # If this function becomes public at some point, more thought - # needs to be given to the signature. A list of ints is - # plausible when ties is "min" or "max". When ties is "average", - # either list[float] or list[Fraction] is plausible. - - # Default handling of ties matches scipy.stats.mstats.spearmanr. - if ties != 'average': - raise ValueError(f'Unknown tie resolution method: {ties!r}') - if key is not None: - data = map(key, data) - val_pos = sorted(zip(data, count()), reverse=reverse) - i = start - 1 - result = [0] * len(val_pos) - for _, g in groupby(val_pos, key=itemgetter(0)): - group = list(g) - size = len(group) - rank = i + (size + 1) / 2 - for value, orig_pos in group: - result[orig_pos] = rank - i += size - return result - - -def _integer_sqrt_of_frac_rto(n: int, m: int) -> int: - """Square root of n/m, rounded to the nearest integer using round-to-odd.""" - # Reference: https://www.lri.fr/~melquion/doc/05-imacs17_1-expose.pdf - a = math.isqrt(n // m) - return a | (a*a*m != n) - - -# For 53 bit precision floats, the bit width used in -# _float_sqrt_of_frac() is 109. -_sqrt_bit_width: int = 2 * sys.float_info.mant_dig + 3 - - -def _float_sqrt_of_frac(n: int, m: int) -> float: - """Square root of n/m as a float, correctly rounded.""" - # See principle and proof sketch at: https://bugs.python.org/msg407078 - q = (n.bit_length() - m.bit_length() - _sqrt_bit_width) // 2 - if q >= 0: - numerator = _integer_sqrt_of_frac_rto(n, m << 2 * q) << q - denominator = 1 - else: - numerator = _integer_sqrt_of_frac_rto(n << -2 * q, m) - denominator = 1 << -q - return numerator / denominator # Convert to float - - -def _decimal_sqrt_of_frac(n: int, m: int) -> Decimal: - """Square root of n/m as a Decimal, correctly rounded.""" - # Premise: For decimal, computing (n/m).sqrt() can be off - # by 1 ulp from the correctly rounded result. - # Method: Check the result, moving up or down a step if needed. - if n <= 0: - if not n: - return Decimal('0.0') - n, m = -n, -m - - root = (Decimal(n) / Decimal(m)).sqrt() - nr, dr = root.as_integer_ratio() - - plus = root.next_plus() - np, dp = plus.as_integer_ratio() - # test: n / m > ((root + plus) / 2) ** 2 - if 4 * n * (dr*dp)**2 > m * (dr*np + dp*nr)**2: - return plus - - minus = root.next_minus() - nm, dm = minus.as_integer_ratio() - # test: n / m < ((root + minus) / 2) ** 2 - if 4 * n * (dr*dm)**2 < m * (dr*nm + dm*nr)**2: - return minus - - return root - - -# === Measures of central tendency (averages) === - -def mean(data): - """Return the sample arithmetic mean of data. - - >>> mean([1, 2, 3, 4, 4]) - 2.8 - - >>> from fractions import Fraction as F - >>> mean([F(3, 7), F(1, 21), F(5, 3), F(1, 3)]) - Fraction(13, 21) - - >>> from decimal import Decimal as D - >>> mean([D("0.5"), D("0.75"), D("0.625"), D("0.375")]) - Decimal('0.5625') - - If ``data`` is empty, StatisticsError will be raised. - """ - T, total, n = _sum(data) - if n < 1: - raise StatisticsError('mean requires at least one data point') - return _convert(total / n, T) - - -def fmean(data, weights=None): - """Convert data to floats and compute the arithmetic mean. - - This runs faster than the mean() function and it always returns a float. - If the input dataset is empty, it raises a StatisticsError. - - >>> fmean([3.5, 4.0, 5.25]) - 4.25 - """ - if weights is None: - try: - n = len(data) - except TypeError: - # Handle iterators that do not define __len__(). - n = 0 - def count(iterable): - nonlocal n - for n, x in enumerate(iterable, start=1): - yield x - data = count(data) - total = fsum(data) - if not n: - raise StatisticsError('fmean requires at least one data point') - return total / n - if not isinstance(weights, (list, tuple)): - weights = list(weights) - try: - num = sumprod(data, weights) - except ValueError: - raise StatisticsError('data and weights must be the same length') - den = fsum(weights) - if not den: - raise StatisticsError('sum of weights must be non-zero') - return num / den - - -def geometric_mean(data): - """Convert data to floats and compute the geometric mean. - - Raises a StatisticsError if the input dataset is empty - or if it contains a negative value. - - Returns zero if the product of inputs is zero. - - No special efforts are made to achieve exact results. - (However, this may change in the future.) - - >>> round(geometric_mean([54, 24, 36]), 9) - 36.0 - """ - n = 0 - found_zero = False - def count_positive(iterable): - nonlocal n, found_zero - for n, x in enumerate(iterable, start=1): - if x > 0.0 or math.isnan(x): - yield x - elif x == 0.0: - found_zero = True - else: - raise StatisticsError('No negative inputs allowed', x) - total = fsum(map(log, count_positive(data))) - if not n: - raise StatisticsError('Must have a non-empty dataset') - if math.isnan(total): - return math.nan - if found_zero: - return math.nan if total == math.inf else 0.0 - return exp(total / n) - - -def harmonic_mean(data, weights=None): - """Return the harmonic mean of data. - - The harmonic mean is the reciprocal of the arithmetic mean of the - reciprocals of the data. It can be used for averaging ratios or - rates, for example speeds. - - Suppose a car travels 40 km/hr for 5 km and then speeds-up to - 60 km/hr for another 5 km. What is the average speed? - - >>> harmonic_mean([40, 60]) - 48.0 - - Suppose a car travels 40 km/hr for 5 km, and when traffic clears, - speeds-up to 60 km/hr for the remaining 30 km of the journey. What - is the average speed? - - >>> harmonic_mean([40, 60], weights=[5, 30]) - 56.0 - - If ``data`` is empty, or any element is less than zero, - ``harmonic_mean`` will raise ``StatisticsError``. - """ - if iter(data) is data: - data = list(data) - errmsg = 'harmonic mean does not support negative values' - n = len(data) - if n < 1: - raise StatisticsError('harmonic_mean requires at least one data point') - elif n == 1 and weights is None: - x = data[0] - if isinstance(x, (numbers.Real, Decimal)): - if x < 0: - raise StatisticsError(errmsg) - return x - else: - raise TypeError('unsupported type') - if weights is None: - weights = repeat(1, n) - sum_weights = n - else: - if iter(weights) is weights: - weights = list(weights) - if len(weights) != n: - raise StatisticsError('Number of weights does not match data size') - _, sum_weights, _ = _sum(w for w in _fail_neg(weights, errmsg)) - try: - data = _fail_neg(data, errmsg) - T, total, count = _sum(w / x if w else 0 for w, x in zip(weights, data)) - except ZeroDivisionError: - return 0 - if total <= 0: - raise StatisticsError('Weighted sum must be positive') - return _convert(sum_weights / total, T) - -# FIXME: investigate ways to calculate medians without sorting? Quickselect? -def median(data): - """Return the median (middle value) of numeric data. - - When the number of data points is odd, return the middle data point. - When the number of data points is even, the median is interpolated by - taking the average of the two middle values: - - >>> median([1, 3, 5]) - 3 - >>> median([1, 3, 5, 7]) - 4.0 - - """ - data = sorted(data) - n = len(data) - if n == 0: - raise StatisticsError("no median for empty data") - if n % 2 == 1: - return data[n // 2] - else: - i = n // 2 - return (data[i - 1] + data[i]) / 2 - - -def median_low(data): - """Return the low median of numeric data. - - When the number of data points is odd, the middle value is returned. - When it is even, the smaller of the two middle values is returned. - - >>> median_low([1, 3, 5]) - 3 - >>> median_low([1, 3, 5, 7]) - 3 - - """ - data = sorted(data) - n = len(data) - if n == 0: - raise StatisticsError("no median for empty data") - if n % 2 == 1: - return data[n // 2] - else: - return data[n // 2 - 1] - - -def median_high(data): - """Return the high median of data. - - When the number of data points is odd, the middle value is returned. - When it is even, the larger of the two middle values is returned. - - >>> median_high([1, 3, 5]) - 3 - >>> median_high([1, 3, 5, 7]) - 5 - - """ - data = sorted(data) - n = len(data) - if n == 0: - raise StatisticsError("no median for empty data") - return data[n // 2] - - -def median_grouped(data, interval=1.0): - """Estimates the median for numeric data binned around the midpoints - of consecutive, fixed-width intervals. - - The *data* can be any iterable of numeric data with each value being - exactly the midpoint of a bin. At least one value must be present. - - The *interval* is width of each bin. - - For example, demographic information may have been summarized into - consecutive ten-year age groups with each group being represented - by the 5-year midpoints of the intervals: - - >>> demographics = Counter({ - ... 25: 172, # 20 to 30 years old - ... 35: 484, # 30 to 40 years old - ... 45: 387, # 40 to 50 years old - ... 55: 22, # 50 to 60 years old - ... 65: 6, # 60 to 70 years old - ... }) - - The 50th percentile (median) is the 536th person out of the 1071 - member cohort. That person is in the 30 to 40 year old age group. - - The regular median() function would assume that everyone in the - tricenarian age group was exactly 35 years old. A more tenable - assumption is that the 484 members of that age group are evenly - distributed between 30 and 40. For that, we use median_grouped(). - - >>> data = list(demographics.elements()) - >>> median(data) - 35 - >>> round(median_grouped(data, interval=10), 1) - 37.5 - - The caller is responsible for making sure the data points are separated - by exact multiples of *interval*. This is essential for getting a - correct result. The function does not check this precondition. - - Inputs may be any numeric type that can be coerced to a float during - the interpolation step. - - """ - data = sorted(data) - n = len(data) - if not n: - raise StatisticsError("no median for empty data") - - # Find the value at the midpoint. Remember this corresponds to the - # midpoint of the class interval. - x = data[n // 2] - - # Using O(log n) bisection, find where all the x values occur in the data. - # All x will lie within data[i:j]. - i = bisect_left(data, x) - j = bisect_right(data, x, lo=i) - - # Coerce to floats, raising a TypeError if not possible - try: - interval = float(interval) - x = float(x) - except ValueError: - raise TypeError(f'Value cannot be converted to a float') - - # Interpolate the median using the formula found at: - # https://www.cuemath.com/data/median-of-grouped-data/ - L = x - interval / 2.0 # Lower limit of the median interval - cf = i # Cumulative frequency of the preceding interval - f = j - i # Number of elements in the median internal - return L + interval * (n / 2 - cf) / f - - -def mode(data): - """Return the most common data point from discrete or nominal data. - - ``mode`` assumes discrete data, and returns a single value. This is the - standard treatment of the mode as commonly taught in schools: - - >>> mode([1, 1, 2, 3, 3, 3, 3, 4]) - 3 - - This also works with nominal (non-numeric) data: - - >>> mode(["red", "blue", "blue", "red", "green", "red", "red"]) - 'red' - - If there are multiple modes with same frequency, return the first one - encountered: - - >>> mode(['red', 'red', 'green', 'blue', 'blue']) - 'red' - - If *data* is empty, ``mode``, raises StatisticsError. - - """ - pairs = Counter(iter(data)).most_common(1) - try: - return pairs[0][0] - except IndexError: - raise StatisticsError('no mode for empty data') from None - - -def multimode(data): - """Return a list of the most frequently occurring values. - - Will return more than one result if there are multiple modes - or an empty list if *data* is empty. - - >>> multimode('aabbbbbbbbcc') - ['b'] - >>> multimode('aabbbbccddddeeffffgg') - ['b', 'd', 'f'] - >>> multimode('') - [] - """ - counts = Counter(iter(data)) - if not counts: - return [] - maxcount = max(counts.values()) - return [value for value, count in counts.items() if count == maxcount] - - -def kde(data, h, kernel='normal', *, cumulative=False): - """Kernel Density Estimation: Create a continuous probability density - function or cumulative distribution function from discrete samples. - - The basic idea is to smooth the data using a kernel function - to help draw inferences about a population from a sample. - - The degree of smoothing is controlled by the scaling parameter h - which is called the bandwidth. Smaller values emphasize local - features while larger values give smoother results. - - The kernel determines the relative weights of the sample data - points. Generally, the choice of kernel shape does not matter - as much as the more influential bandwidth smoothing parameter. - - Kernels that give some weight to every sample point: - - normal (gauss) - logistic - sigmoid - - Kernels that only give weight to sample points within - the bandwidth: - - rectangular (uniform) - triangular - parabolic (epanechnikov) - quartic (biweight) - triweight - cosine - - If *cumulative* is true, will return a cumulative distribution function. - - A StatisticsError will be raised if the data sequence is empty. - - Example - ------- - - Given a sample of six data points, construct a continuous - function that estimates the underlying probability density: - - >>> sample = [-2.1, -1.3, -0.4, 1.9, 5.1, 6.2] - >>> f_hat = kde(sample, h=1.5) - - Compute the area under the curve: - - >>> area = sum(f_hat(x) for x in range(-20, 20)) - >>> round(area, 4) - 1.0 - - Plot the estimated probability density function at - evenly spaced points from -6 to 10: - - >>> for x in range(-6, 11): - ... density = f_hat(x) - ... plot = ' ' * int(density * 400) + 'x' - ... print(f'{x:2}: {density:.3f} {plot}') - ... - -6: 0.002 x - -5: 0.009 x - -4: 0.031 x - -3: 0.070 x - -2: 0.111 x - -1: 0.125 x - 0: 0.110 x - 1: 0.086 x - 2: 0.068 x - 3: 0.059 x - 4: 0.066 x - 5: 0.082 x - 6: 0.082 x - 7: 0.058 x - 8: 0.028 x - 9: 0.009 x - 10: 0.002 x - - Estimate P(4.5 < X <= 7.5), the probability that a new sample value - will be between 4.5 and 7.5: - - >>> cdf = kde(sample, h=1.5, cumulative=True) - >>> round(cdf(7.5) - cdf(4.5), 2) - 0.22 - - References - ---------- - - Kernel density estimation and its application: - https://www.itm-conferences.org/articles/itmconf/pdf/2018/08/itmconf_sam2018_00037.pdf - - Kernel functions in common use: - https://en.wikipedia.org/wiki/Kernel_(statistics)#kernel_functions_in_common_use - - Interactive graphical demonstration and exploration: - https://demonstrations.wolfram.com/KernelDensityEstimation/ - - Kernel estimation of cumulative distribution function of a random variable with bounded support - https://www.econstor.eu/bitstream/10419/207829/1/10.21307_stattrans-2016-037.pdf - - """ - - n = len(data) - if not n: - raise StatisticsError('Empty data sequence') - - if not isinstance(data[0], (int, float)): - raise TypeError('Data sequence must contain ints or floats') - - if h <= 0.0: - raise StatisticsError(f'Bandwidth h must be positive, not {h=!r}') - - match kernel: - - case 'normal' | 'gauss': - sqrt2pi = sqrt(2 * pi) - sqrt2 = sqrt(2) - K = lambda t: exp(-1/2 * t * t) / sqrt2pi - W = lambda t: 1/2 * (1.0 + erf(t / sqrt2)) - support = None - - case 'logistic': - # 1.0 / (exp(t) + 2.0 + exp(-t)) - K = lambda t: 1/2 / (1.0 + cosh(t)) - W = lambda t: 1.0 - 1.0 / (exp(t) + 1.0) - support = None - - case 'sigmoid': - # (2/pi) / (exp(t) + exp(-t)) - c1 = 1 / pi - c2 = 2 / pi - K = lambda t: c1 / cosh(t) - W = lambda t: c2 * atan(exp(t)) - support = None - - case 'rectangular' | 'uniform': - K = lambda t: 1/2 - W = lambda t: 1/2 * t + 1/2 - support = 1.0 - - case 'triangular': - K = lambda t: 1.0 - abs(t) - W = lambda t: t*t * (1/2 if t < 0.0 else -1/2) + t + 1/2 - support = 1.0 - - case 'parabolic' | 'epanechnikov': - K = lambda t: 3/4 * (1.0 - t * t) - W = lambda t: -1/4 * t**3 + 3/4 * t + 1/2 - support = 1.0 - - case 'quartic' | 'biweight': - K = lambda t: 15/16 * (1.0 - t * t) ** 2 - W = lambda t: 3/16 * t**5 - 5/8 * t**3 + 15/16 * t + 1/2 - support = 1.0 - - case 'triweight': - K = lambda t: 35/32 * (1.0 - t * t) ** 3 - W = lambda t: 35/32 * (-1/7*t**7 + 3/5*t**5 - t**3 + t) + 1/2 - support = 1.0 - - case 'cosine': - c1 = pi / 4 - c2 = pi / 2 - K = lambda t: c1 * cos(c2 * t) - W = lambda t: 1/2 * sin(c2 * t) + 1/2 - support = 1.0 - - case _: - raise StatisticsError(f'Unknown kernel name: {kernel!r}') - - if support is None: - - def pdf(x): - n = len(data) - return sum(K((x - x_i) / h) for x_i in data) / (n * h) - - def cdf(x): - n = len(data) - return sum(W((x - x_i) / h) for x_i in data) / n - - else: - - sample = sorted(data) - bandwidth = h * support - - def pdf(x): - nonlocal n, sample - if len(data) != n: - sample = sorted(data) - n = len(data) - i = bisect_left(sample, x - bandwidth) - j = bisect_right(sample, x + bandwidth) - supported = sample[i : j] - return sum(K((x - x_i) / h) for x_i in supported) / (n * h) - - def cdf(x): - nonlocal n, sample - if len(data) != n: - sample = sorted(data) - n = len(data) - i = bisect_left(sample, x - bandwidth) - j = bisect_right(sample, x + bandwidth) - supported = sample[i : j] - return sum((W((x - x_i) / h) for x_i in supported), i) / n - - if cumulative: - cdf.__doc__ = f'CDF estimate with {h=!r} and {kernel=!r}' - return cdf - - else: - pdf.__doc__ = f'PDF estimate with {h=!r} and {kernel=!r}' - return pdf - - -# Notes on methods for computing quantiles -# ---------------------------------------- -# -# There is no one perfect way to compute quantiles. Here we offer -# two methods that serve common needs. Most other packages -# surveyed offered at least one or both of these two, making them -# "standard" in the sense of "widely-adopted and reproducible". -# They are also easy to explain, easy to compute manually, and have -# straight-forward interpretations that aren't surprising. - -# The default method is known as "R6", "PERCENTILE.EXC", or "expected -# value of rank order statistics". The alternative method is known as -# "R7", "PERCENTILE.INC", or "mode of rank order statistics". - -# For sample data where there is a positive probability for values -# beyond the range of the data, the R6 exclusive method is a -# reasonable choice. Consider a random sample of nine values from a -# population with a uniform distribution from 0.0 to 1.0. The -# distribution of the third ranked sample point is described by -# betavariate(alpha=3, beta=7) which has mode=0.250, median=0.286, and -# mean=0.300. Only the latter (which corresponds with R6) gives the -# desired cut point with 30% of the population falling below that -# value, making it comparable to a result from an inv_cdf() function. -# The R6 exclusive method is also idempotent. - -# For describing population data where the end points are known to -# be included in the data, the R7 inclusive method is a reasonable -# choice. Instead of the mean, it uses the mode of the beta -# distribution for the interior points. Per Hyndman & Fan, "One nice -# property is that the vertices of Q7(p) divide the range into n - 1 -# intervals, and exactly 100p% of the intervals lie to the left of -# Q7(p) and 100(1 - p)% of the intervals lie to the right of Q7(p)." - -# If needed, other methods could be added. However, for now, the -# position is that fewer options make for easier choices and that -# external packages can be used for anything more advanced. - -def quantiles(data, *, n=4, method='exclusive'): - """Divide *data* into *n* continuous intervals with equal probability. - - Returns a list of (n - 1) cut points separating the intervals. - - Set *n* to 4 for quartiles (the default). Set *n* to 10 for deciles. - Set *n* to 100 for percentiles which gives the 99 cuts points that - separate *data* in to 100 equal sized groups. - - The *data* can be any iterable containing sample. - The cut points are linearly interpolated between data points. - - If *method* is set to *inclusive*, *data* is treated as population - data. The minimum value is treated as the 0th percentile and the - maximum value is treated as the 100th percentile. - """ - if n < 1: - raise StatisticsError('n must be at least 1') - data = sorted(data) - ld = len(data) - if ld < 2: - if ld == 1: - return data * (n - 1) - raise StatisticsError('must have at least one data point') - - if method == 'inclusive': - m = ld - 1 - result = [] - for i in range(1, n): - j, delta = divmod(i * m, n) - interpolated = (data[j] * (n - delta) + data[j + 1] * delta) / n - result.append(interpolated) - return result - - if method == 'exclusive': - m = ld + 1 - result = [] - for i in range(1, n): - j = i * m // n # rescale i to m/n - j = 1 if j < 1 else ld-1 if j > ld-1 else j # clamp to 1 .. ld-1 - delta = i*m - j*n # exact integer math - interpolated = (data[j - 1] * (n - delta) + data[j] * delta) / n - result.append(interpolated) - return result - - raise ValueError(f'Unknown method: {method!r}') - - -# === Measures of spread === - -# See http://mathworld.wolfram.com/Variance.html -# http://mathworld.wolfram.com/SampleVariance.html - - -def variance(data, xbar=None): - """Return the sample variance of data. - - data should be an iterable of Real-valued numbers, with at least two - values. The optional argument xbar, if given, should be the mean of - the data. If it is missing or None, the mean is automatically calculated. - - Use this function when your data is a sample from a population. To - calculate the variance from the entire population, see ``pvariance``. - - Examples: - - >>> data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5] - >>> variance(data) - 1.3720238095238095 - - If you have already calculated the mean of your data, you can pass it as - the optional second argument ``xbar`` to avoid recalculating it: - - >>> m = mean(data) - >>> variance(data, m) - 1.3720238095238095 - - This function does not check that ``xbar`` is actually the mean of - ``data``. Giving arbitrary values for ``xbar`` may lead to invalid or - impossible results. - - Decimals and Fractions are supported: - - >>> from decimal import Decimal as D - >>> variance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")]) - Decimal('31.01875') - - >>> from fractions import Fraction as F - >>> variance([F(1, 6), F(1, 2), F(5, 3)]) - Fraction(67, 108) - - """ - T, ss, c, n = _ss(data, xbar) - if n < 2: - raise StatisticsError('variance requires at least two data points') - return _convert(ss / (n - 1), T) - - -def pvariance(data, mu=None): - """Return the population variance of ``data``. - - data should be a sequence or iterable of Real-valued numbers, with at least one - value. The optional argument mu, if given, should be the mean of - the data. If it is missing or None, the mean is automatically calculated. - - Use this function to calculate the variance from the entire population. - To estimate the variance from a sample, the ``variance`` function is - usually a better choice. - - Examples: - - >>> data = [0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25] - >>> pvariance(data) - 1.25 - - If you have already calculated the mean of the data, you can pass it as - the optional second argument to avoid recalculating it: - - >>> mu = mean(data) - >>> pvariance(data, mu) - 1.25 - - Decimals and Fractions are supported: - - >>> from decimal import Decimal as D - >>> pvariance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")]) - Decimal('24.815') - - >>> from fractions import Fraction as F - >>> pvariance([F(1, 4), F(5, 4), F(1, 2)]) - Fraction(13, 72) - - """ - T, ss, c, n = _ss(data, mu) - if n < 1: - raise StatisticsError('pvariance requires at least one data point') - return _convert(ss / n, T) - - -def stdev(data, xbar=None): - """Return the square root of the sample variance. - - See ``variance`` for arguments and other details. - - >>> stdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75]) - 1.0810874155219827 - - """ - T, ss, c, n = _ss(data, xbar) - if n < 2: - raise StatisticsError('stdev requires at least two data points') - mss = ss / (n - 1) - try: - mss_numerator = mss.numerator - mss_denominator = mss.denominator - except AttributeError: - raise ValueError('inf or nan encountered in data') - if issubclass(T, Decimal): - return _decimal_sqrt_of_frac(mss_numerator, mss_denominator) - return _float_sqrt_of_frac(mss_numerator, mss_denominator) - - -def pstdev(data, mu=None): - """Return the square root of the population variance. - - See ``pvariance`` for arguments and other details. - - >>> pstdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75]) - 0.986893273527251 - - """ - T, ss, c, n = _ss(data, mu) - if n < 1: - raise StatisticsError('pstdev requires at least one data point') - mss = ss / n - try: - mss_numerator = mss.numerator - mss_denominator = mss.denominator - except AttributeError: - raise ValueError('inf or nan encountered in data') - if issubclass(T, Decimal): - return _decimal_sqrt_of_frac(mss_numerator, mss_denominator) - return _float_sqrt_of_frac(mss_numerator, mss_denominator) - - -def _mean_stdev(data): - """In one pass, compute the mean and sample standard deviation as floats.""" - T, ss, xbar, n = _ss(data) - if n < 2: - raise StatisticsError('stdev requires at least two data points') - mss = ss / (n - 1) - try: - return float(xbar), _float_sqrt_of_frac(mss.numerator, mss.denominator) - except AttributeError: - # Handle Nans and Infs gracefully - return float(xbar), float(xbar) / float(ss) - -def _sqrtprod(x: float, y: float) -> float: - "Return sqrt(x * y) computed with improved accuracy and without overflow/underflow." - h = sqrt(x * y) - if not isfinite(h): - if isinf(h) and not isinf(x) and not isinf(y): - # Finite inputs overflowed, so scale down, and recompute. - scale = 2.0 ** -512 # sqrt(1 / sys.float_info.max) - return _sqrtprod(scale * x, scale * y) / scale - return h - if not h: - if x and y: - # Non-zero inputs underflowed, so scale up, and recompute. - # Scale: 1 / sqrt(sys.float_info.min * sys.float_info.epsilon) - scale = 2.0 ** 537 - return _sqrtprod(scale * x, scale * y) / scale - return h - # Improve accuracy with a differential correction. - # https://www.wolframalpha.com/input/?i=Maclaurin+series+sqrt%28h**2+%2B+x%29+at+x%3D0 - d = sumprod((x, h), (y, -h)) - return h + d / (2.0 * h) - - -# === Statistics for relations between two inputs === - -# See https://en.wikipedia.org/wiki/Covariance -# https://en.wikipedia.org/wiki/Pearson_correlation_coefficient -# https://en.wikipedia.org/wiki/Simple_linear_regression - - -def covariance(x, y, /): - """Covariance - - Return the sample covariance of two inputs *x* and *y*. Covariance - is a measure of the joint variability of two inputs. - - >>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9] - >>> y = [1, 2, 3, 1, 2, 3, 1, 2, 3] - >>> covariance(x, y) - 0.75 - >>> z = [9, 8, 7, 6, 5, 4, 3, 2, 1] - >>> covariance(x, z) - -7.5 - >>> covariance(z, x) - -7.5 - - """ - n = len(x) - if len(y) != n: - raise StatisticsError('covariance requires that both inputs have same number of data points') - if n < 2: - raise StatisticsError('covariance requires at least two data points') - xbar = fsum(x) / n - ybar = fsum(y) / n - sxy = sumprod((xi - xbar for xi in x), (yi - ybar for yi in y)) - return sxy / (n - 1) - - -def correlation(x, y, /, *, method='linear'): - """Pearson's correlation coefficient - - Return the Pearson's correlation coefficient for two inputs. Pearson's - correlation coefficient *r* takes values between -1 and +1. It measures - the strength and direction of a linear relationship. - - >>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9] - >>> y = [9, 8, 7, 6, 5, 4, 3, 2, 1] - >>> correlation(x, x) - 1.0 - >>> correlation(x, y) - -1.0 - - If *method* is "ranked", computes Spearman's rank correlation coefficient - for two inputs. The data is replaced by ranks. Ties are averaged - so that equal values receive the same rank. The resulting coefficient - measures the strength of a monotonic relationship. - - Spearman's rank correlation coefficient is appropriate for ordinal - data or for continuous data that doesn't meet the linear proportion - requirement for Pearson's correlation coefficient. - """ - n = len(x) - if len(y) != n: - raise StatisticsError('correlation requires that both inputs have same number of data points') - if n < 2: - raise StatisticsError('correlation requires at least two data points') - if method not in {'linear', 'ranked'}: - raise ValueError(f'Unknown method: {method!r}') - if method == 'ranked': - start = (n - 1) / -2 # Center rankings around zero - x = _rank(x, start=start) - y = _rank(y, start=start) - else: - xbar = fsum(x) / n - ybar = fsum(y) / n - x = [xi - xbar for xi in x] - y = [yi - ybar for yi in y] - sxy = sumprod(x, y) - sxx = sumprod(x, x) - syy = sumprod(y, y) - try: - return sxy / _sqrtprod(sxx, syy) - except ZeroDivisionError: - raise StatisticsError('at least one of the inputs is constant') - - -LinearRegression = namedtuple('LinearRegression', ('slope', 'intercept')) - - -def linear_regression(x, y, /, *, proportional=False): - """Slope and intercept for simple linear regression. - - Return the slope and intercept of simple linear regression - parameters estimated using ordinary least squares. Simple linear - regression describes relationship between an independent variable - *x* and a dependent variable *y* in terms of a linear function: - - y = slope * x + intercept + noise - - where *slope* and *intercept* are the regression parameters that are - estimated, and noise represents the variability of the data that was - not explained by the linear regression (it is equal to the - difference between predicted and actual values of the dependent - variable). - - The parameters are returned as a named tuple. - - >>> x = [1, 2, 3, 4, 5] - >>> noise = NormalDist().samples(5, seed=42) - >>> y = [3 * x[i] + 2 + noise[i] for i in range(5)] - >>> linear_regression(x, y) #doctest: +ELLIPSIS - LinearRegression(slope=3.17495..., intercept=1.00925...) - - If *proportional* is true, the independent variable *x* and the - dependent variable *y* are assumed to be directly proportional. - The data is fit to a line passing through the origin. - - Since the *intercept* will always be 0.0, the underlying linear - function simplifies to: - - y = slope * x + noise - - >>> y = [3 * x[i] + noise[i] for i in range(5)] - >>> linear_regression(x, y, proportional=True) #doctest: +ELLIPSIS - LinearRegression(slope=2.90475..., intercept=0.0) - - """ - n = len(x) - if len(y) != n: - raise StatisticsError('linear regression requires that both inputs have same number of data points') - if n < 2: - raise StatisticsError('linear regression requires at least two data points') - if not proportional: - xbar = fsum(x) / n - ybar = fsum(y) / n - x = [xi - xbar for xi in x] # List because used three times below - y = (yi - ybar for yi in y) # Generator because only used once below - sxy = sumprod(x, y) + 0.0 # Add zero to coerce result to a float - sxx = sumprod(x, x) - try: - slope = sxy / sxx # equivalent to: covariance(x, y) / variance(x) - except ZeroDivisionError: - raise StatisticsError('x is constant') - intercept = 0.0 if proportional else ybar - slope * xbar - return LinearRegression(slope=slope, intercept=intercept) - - -## Normal Distribution ##################################################### - - -def _normal_dist_inv_cdf(p, mu, sigma): - # There is no closed-form solution to the inverse CDF for the normal - # distribution, so we use a rational approximation instead: - # Wichura, M.J. (1988). "Algorithm AS241: The Percentage Points of the - # Normal Distribution". Applied Statistics. Blackwell Publishing. 37 - # (3): 477–484. doi:10.2307/2347330. JSTOR 2347330. - q = p - 0.5 - if fabs(q) <= 0.425: - r = 0.180625 - q * q - # Hash sum: 55.88319_28806_14901_4439 - num = (((((((2.50908_09287_30122_6727e+3 * r + - 3.34305_75583_58812_8105e+4) * r + - 6.72657_70927_00870_0853e+4) * r + - 4.59219_53931_54987_1457e+4) * r + - 1.37316_93765_50946_1125e+4) * r + - 1.97159_09503_06551_4427e+3) * r + - 1.33141_66789_17843_7745e+2) * r + - 3.38713_28727_96366_6080e+0) * q - den = (((((((5.22649_52788_52854_5610e+3 * r + - 2.87290_85735_72194_2674e+4) * r + - 3.93078_95800_09271_0610e+4) * r + - 2.12137_94301_58659_5867e+4) * r + - 5.39419_60214_24751_1077e+3) * r + - 6.87187_00749_20579_0830e+2) * r + - 4.23133_30701_60091_1252e+1) * r + - 1.0) - x = num / den - return mu + (x * sigma) - r = p if q <= 0.0 else 1.0 - p - r = sqrt(-log(r)) - if r <= 5.0: - r = r - 1.6 - # Hash sum: 49.33206_50330_16102_89036 - num = (((((((7.74545_01427_83414_07640e-4 * r + - 2.27238_44989_26918_45833e-2) * r + - 2.41780_72517_74506_11770e-1) * r + - 1.27045_82524_52368_38258e+0) * r + - 3.64784_83247_63204_60504e+0) * r + - 5.76949_72214_60691_40550e+0) * r + - 4.63033_78461_56545_29590e+0) * r + - 1.42343_71107_49683_57734e+0) - den = (((((((1.05075_00716_44416_84324e-9 * r + - 5.47593_80849_95344_94600e-4) * r + - 1.51986_66563_61645_71966e-2) * r + - 1.48103_97642_74800_74590e-1) * r + - 6.89767_33498_51000_04550e-1) * r + - 1.67638_48301_83803_84940e+0) * r + - 2.05319_16266_37758_82187e+0) * r + - 1.0) - else: - r = r - 5.0 - # Hash sum: 47.52583_31754_92896_71629 - num = (((((((2.01033_43992_92288_13265e-7 * r + - 2.71155_55687_43487_57815e-5) * r + - 1.24266_09473_88078_43860e-3) * r + - 2.65321_89526_57612_30930e-2) * r + - 2.96560_57182_85048_91230e-1) * r + - 1.78482_65399_17291_33580e+0) * r + - 5.46378_49111_64114_36990e+0) * r + - 6.65790_46435_01103_77720e+0) - den = (((((((2.04426_31033_89939_78564e-15 * r + - 1.42151_17583_16445_88870e-7) * r + - 1.84631_83175_10054_68180e-5) * r + - 7.86869_13114_56132_59100e-4) * r + - 1.48753_61290_85061_48525e-2) * r + - 1.36929_88092_27358_05310e-1) * r + - 5.99832_20655_58879_37690e-1) * r + - 1.0) - x = num / den - if q < 0.0: - x = -x - return mu + (x * sigma) - - -# If available, use C implementation -try: - from _statistics import _normal_dist_inv_cdf -except ImportError: - pass - - -class NormalDist: - "Normal distribution of a random variable" - # https://en.wikipedia.org/wiki/Normal_distribution - # https://en.wikipedia.org/wiki/Variance#Properties - - __slots__ = { - '_mu': 'Arithmetic mean of a normal distribution', - '_sigma': 'Standard deviation of a normal distribution', - } - - def __init__(self, mu=0.0, sigma=1.0): - "NormalDist where mu is the mean and sigma is the standard deviation." - if sigma < 0.0: - raise StatisticsError('sigma must be non-negative') - self._mu = float(mu) - self._sigma = float(sigma) - - @classmethod - def from_samples(cls, data): - "Make a normal distribution instance from sample data." - return cls(*_mean_stdev(data)) - - def samples(self, n, *, seed=None): - "Generate *n* samples for a given mean and standard deviation." - rnd = random.random if seed is None else random.Random(seed).random - inv_cdf = _normal_dist_inv_cdf - mu = self._mu - sigma = self._sigma - return [inv_cdf(rnd(), mu, sigma) for _ in repeat(None, n)] - - def pdf(self, x): - "Probability density function. P(x <= X < x+dx) / dx" - variance = self._sigma * self._sigma - if not variance: - raise StatisticsError('pdf() not defined when sigma is zero') - diff = x - self._mu - return exp(diff * diff / (-2.0 * variance)) / sqrt(tau * variance) - - def cdf(self, x): - "Cumulative distribution function. P(X <= x)" - if not self._sigma: - raise StatisticsError('cdf() not defined when sigma is zero') - return 0.5 * (1.0 + erf((x - self._mu) / (self._sigma * _SQRT2))) - - def inv_cdf(self, p): - """Inverse cumulative distribution function. x : P(X <= x) = p - - Finds the value of the random variable such that the probability of - the variable being less than or equal to that value equals the given - probability. - - This function is also called the percent point function or quantile - function. - """ - if p <= 0.0 or p >= 1.0: - raise StatisticsError('p must be in the range 0.0 < p < 1.0') - return _normal_dist_inv_cdf(p, self._mu, self._sigma) - - def quantiles(self, n=4): - """Divide into *n* continuous intervals with equal probability. - - Returns a list of (n - 1) cut points separating the intervals. - - Set *n* to 4 for quartiles (the default). Set *n* to 10 for deciles. - Set *n* to 100 for percentiles which gives the 99 cuts points that - separate the normal distribution in to 100 equal sized groups. - """ - return [self.inv_cdf(i / n) for i in range(1, n)] - - def overlap(self, other): - """Compute the overlapping coefficient (OVL) between two normal distributions. - - Measures the agreement between two normal probability distributions. - Returns a value between 0.0 and 1.0 giving the overlapping area in - the two underlying probability density functions. - - >>> N1 = NormalDist(2.4, 1.6) - >>> N2 = NormalDist(3.2, 2.0) - >>> N1.overlap(N2) - 0.8035050657330205 - """ - # See: "The overlapping coefficient as a measure of agreement between - # probability distributions and point estimation of the overlap of two - # normal densities" -- Henry F. Inman and Edwin L. Bradley Jr - # http://dx.doi.org/10.1080/03610928908830127 - if not isinstance(other, NormalDist): - raise TypeError('Expected another NormalDist instance') - X, Y = self, other - if (Y._sigma, Y._mu) < (X._sigma, X._mu): # sort to assure commutativity - X, Y = Y, X - X_var, Y_var = X.variance, Y.variance - if not X_var or not Y_var: - raise StatisticsError('overlap() not defined when sigma is zero') - dv = Y_var - X_var - dm = fabs(Y._mu - X._mu) - if not dv: - return 1.0 - erf(dm / (2.0 * X._sigma * _SQRT2)) - a = X._mu * Y_var - Y._mu * X_var - b = X._sigma * Y._sigma * sqrt(dm * dm + dv * log(Y_var / X_var)) - x1 = (a + b) / dv - x2 = (a - b) / dv - return 1.0 - (fabs(Y.cdf(x1) - X.cdf(x1)) + fabs(Y.cdf(x2) - X.cdf(x2))) - - def zscore(self, x): - """Compute the Standard Score. (x - mean) / stdev - - Describes *x* in terms of the number of standard deviations - above or below the mean of the normal distribution. - """ - # https://www.statisticshowto.com/probability-and-statistics/z-score/ - if not self._sigma: - raise StatisticsError('zscore() not defined when sigma is zero') - return (x - self._mu) / self._sigma - - @property - def mean(self): - "Arithmetic mean of the normal distribution." - return self._mu - - @property - def median(self): - "Return the median of the normal distribution" - return self._mu - - @property - def mode(self): - """Return the mode of the normal distribution - - The mode is the value x where which the probability density - function (pdf) takes its maximum value. - """ - return self._mu - - @property - def stdev(self): - "Standard deviation of the normal distribution." - return self._sigma - - @property - def variance(self): - "Square of the standard deviation." - return self._sigma * self._sigma - - def __add__(x1, x2): - """Add a constant or another NormalDist instance. - - If *other* is a constant, translate mu by the constant, - leaving sigma unchanged. - - If *other* is a NormalDist, add both the means and the variances. - Mathematically, this works only if the two distributions are - independent or if they are jointly normally distributed. - """ - if isinstance(x2, NormalDist): - return NormalDist(x1._mu + x2._mu, hypot(x1._sigma, x2._sigma)) - return NormalDist(x1._mu + x2, x1._sigma) - - def __sub__(x1, x2): - """Subtract a constant or another NormalDist instance. - - If *other* is a constant, translate by the constant mu, - leaving sigma unchanged. - - If *other* is a NormalDist, subtract the means and add the variances. - Mathematically, this works only if the two distributions are - independent or if they are jointly normally distributed. - """ - if isinstance(x2, NormalDist): - return NormalDist(x1._mu - x2._mu, hypot(x1._sigma, x2._sigma)) - return NormalDist(x1._mu - x2, x1._sigma) - - def __mul__(x1, x2): - """Multiply both mu and sigma by a constant. - - Used for rescaling, perhaps to change measurement units. - Sigma is scaled with the absolute value of the constant. - """ - return NormalDist(x1._mu * x2, x1._sigma * fabs(x2)) - - def __truediv__(x1, x2): - """Divide both mu and sigma by a constant. - - Used for rescaling, perhaps to change measurement units. - Sigma is scaled with the absolute value of the constant. - """ - return NormalDist(x1._mu / x2, x1._sigma / fabs(x2)) - - def __pos__(x1): - "Return a copy of the instance." - return NormalDist(x1._mu, x1._sigma) - - def __neg__(x1): - "Negates mu while keeping sigma the same." - return NormalDist(-x1._mu, x1._sigma) - - __radd__ = __add__ - - def __rsub__(x1, x2): - "Subtract a NormalDist from a constant or another NormalDist." - return -(x1 - x2) - - __rmul__ = __mul__ - - def __eq__(x1, x2): - "Two NormalDist objects are equal if their mu and sigma are both equal." - if not isinstance(x2, NormalDist): - return NotImplemented - return x1._mu == x2._mu and x1._sigma == x2._sigma - - def __hash__(self): - "NormalDist objects hash equal if their mu and sigma are both equal." - return hash((self._mu, self._sigma)) - - def __repr__(self): - return f'{type(self).__name__}(mu={self._mu!r}, sigma={self._sigma!r})' - - def __getstate__(self): - return self._mu, self._sigma - - def __setstate__(self, state): - self._mu, self._sigma = state - - -## kde_random() ############################################################## - -def _newton_raphson(f_inv_estimate, f, f_prime, tolerance=1e-12): - def f_inv(y): - "Return x such that f(x) ≈ y within the specified tolerance." - x = f_inv_estimate(y) - while abs(diff := f(x) - y) > tolerance: - x -= diff / f_prime(x) - return x - return f_inv - -def _quartic_invcdf_estimate(p): - sign, p = (1.0, p) if p <= 1/2 else (-1.0, 1.0 - p) - x = (2.0 * p) ** 0.4258865685331 - 1.0 - if p >= 0.004 < 0.499: - x += 0.026818732 * sin(7.101753784 * p + 2.73230839482953) - return x * sign - -_quartic_invcdf = _newton_raphson( - f_inv_estimate = _quartic_invcdf_estimate, - f = lambda t: 3/16 * t**5 - 5/8 * t**3 + 15/16 * t + 1/2, - f_prime = lambda t: 15/16 * (1.0 - t * t) ** 2) - -def _triweight_invcdf_estimate(p): - sign, p = (1.0, p) if p <= 1/2 else (-1.0, 1.0 - p) - x = (2.0 * p) ** 0.3400218741872791 - 1.0 - return x * sign - -_triweight_invcdf = _newton_raphson( - f_inv_estimate = _triweight_invcdf_estimate, - f = lambda t: 35/32 * (-1/7*t**7 + 3/5*t**5 - t**3 + t) + 1/2, - f_prime = lambda t: 35/32 * (1.0 - t * t) ** 3) - -_kernel_invcdfs = { - 'normal': NormalDist().inv_cdf, - 'logistic': lambda p: log(p / (1 - p)), - 'sigmoid': lambda p: log(tan(p * pi/2)), - 'rectangular': lambda p: 2*p - 1, - 'parabolic': lambda p: 2 * cos((acos(2*p-1) + pi) / 3), - 'quartic': _quartic_invcdf, - 'triweight': _triweight_invcdf, - 'triangular': lambda p: sqrt(2*p) - 1 if p < 1/2 else 1 - sqrt(2 - 2*p), - 'cosine': lambda p: 2 * asin(2*p - 1) / pi, -} -_kernel_invcdfs['gauss'] = _kernel_invcdfs['normal'] -_kernel_invcdfs['uniform'] = _kernel_invcdfs['rectangular'] -_kernel_invcdfs['epanechnikov'] = _kernel_invcdfs['parabolic'] -_kernel_invcdfs['biweight'] = _kernel_invcdfs['quartic'] - -def kde_random(data, h, kernel='normal', *, seed=None): - """Return a function that makes a random selection from the estimated - probability density function created by kde(data, h, kernel). - - Providing a *seed* allows reproducible selections within a single - thread. The seed may be an integer, float, str, or bytes. - - A StatisticsError will be raised if the *data* sequence is empty. - - Example: - - >>> data = [-2.1, -1.3, -0.4, 1.9, 5.1, 6.2] - >>> rand = kde_random(data, h=1.5, seed=8675309) - >>> new_selections = [rand() for i in range(10)] - >>> [round(x, 1) for x in new_selections] - [0.7, 6.2, 1.2, 6.9, 7.0, 1.8, 2.5, -0.5, -1.8, 5.6] - - """ - n = len(data) - if not n: - raise StatisticsError('Empty data sequence') - - if not isinstance(data[0], (int, float)): - raise TypeError('Data sequence must contain ints or floats') - - if h <= 0.0: - raise StatisticsError(f'Bandwidth h must be positive, not {h=!r}') - - kernel_invcdf = _kernel_invcdfs.get(kernel) - if kernel_invcdf is None: - raise StatisticsError(f'Unknown kernel name: {kernel!r}') - - prng = _random.Random(seed) - random = prng.random - choice = prng.choice - - def rand(): - return choice(data) + h * kernel_invcdf(random()) - - rand.__doc__ = f'Random KDE selection with {h=!r} and {kernel=!r}' - - return rand diff --git a/Python313_13_x86_Template/Lib/string.py b/Python313_13_x86_Template/Lib/string.py deleted file mode 100644 index 2eab6d4f..00000000 --- a/Python313_13_x86_Template/Lib/string.py +++ /dev/null @@ -1,309 +0,0 @@ -"""A collection of string constants. - -Public module variables: - -whitespace -- a string containing all ASCII whitespace -ascii_lowercase -- a string containing all ASCII lowercase letters -ascii_uppercase -- a string containing all ASCII uppercase letters -ascii_letters -- a string containing all ASCII letters -digits -- a string containing all ASCII decimal digits -hexdigits -- a string containing all ASCII hexadecimal digits -octdigits -- a string containing all ASCII octal digits -punctuation -- a string containing all ASCII punctuation characters -printable -- a string containing all ASCII characters considered printable - -""" - -__all__ = ["ascii_letters", "ascii_lowercase", "ascii_uppercase", "capwords", - "digits", "hexdigits", "octdigits", "printable", "punctuation", - "whitespace", "Formatter", "Template"] - -import _string - -# Some strings for ctype-style character classification -whitespace = ' \t\n\r\v\f' -ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz' -ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' -ascii_letters = ascii_lowercase + ascii_uppercase -digits = '0123456789' -hexdigits = digits + 'abcdef' + 'ABCDEF' -octdigits = '01234567' -punctuation = r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~""" -printable = digits + ascii_letters + punctuation + whitespace - -# Functions which aren't available as string methods. - -# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def". -def capwords(s, sep=None): - """capwords(s [,sep]) -> string - - Split the argument into words using split, capitalize each - word using capitalize, and join the capitalized words using - join. If the optional second argument sep is absent or None, - runs of whitespace characters are replaced by a single space - and leading and trailing whitespace are removed, otherwise - sep is used to split and join the words. - - """ - return (sep or ' ').join(map(str.capitalize, s.split(sep))) - - -#################################################################### -import re as _re -from collections import ChainMap as _ChainMap - -_sentinel_dict = {} - -class Template: - """A string class for supporting $-substitutions.""" - - delimiter = '$' - # r'[a-z]' matches to non-ASCII letters when used with IGNORECASE, but - # without the ASCII flag. We can't add re.ASCII to flags because of - # backward compatibility. So we use the ?a local flag and [a-z] pattern. - # See https://bugs.python.org/issue31672 - idpattern = r'(?a:[_a-z][_a-z0-9]*)' - braceidpattern = None - flags = _re.IGNORECASE - - def __init_subclass__(cls): - super().__init_subclass__() - if 'pattern' in cls.__dict__: - pattern = cls.pattern - else: - delim = _re.escape(cls.delimiter) - id = cls.idpattern - bid = cls.braceidpattern or cls.idpattern - pattern = fr""" - {delim}(?: - (?P{delim}) | # Escape sequence of two delimiters - (?P{id}) | # delimiter and a Python identifier - {{(?P{bid})}} | # delimiter and a braced identifier - (?P) # Other ill-formed delimiter exprs - ) - """ - cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE) - - def __init__(self, template): - self.template = template - - # Search for $$, $identifier, ${identifier}, and any bare $'s - - def _invalid(self, mo): - i = mo.start('invalid') - lines = self.template[:i].splitlines(keepends=True) - if not lines: - colno = 1 - lineno = 1 - else: - colno = i - len(''.join(lines[:-1])) - lineno = len(lines) - raise ValueError('Invalid placeholder in string: line %d, col %d' % - (lineno, colno)) - - def substitute(self, mapping=_sentinel_dict, /, **kws): - if mapping is _sentinel_dict: - mapping = kws - elif kws: - mapping = _ChainMap(kws, mapping) - # Helper function for .sub() - def convert(mo): - # Check the most common path first. - named = mo.group('named') or mo.group('braced') - if named is not None: - return str(mapping[named]) - if mo.group('escaped') is not None: - return self.delimiter - if mo.group('invalid') is not None: - self._invalid(mo) - raise ValueError('Unrecognized named group in pattern', - self.pattern) - return self.pattern.sub(convert, self.template) - - def safe_substitute(self, mapping=_sentinel_dict, /, **kws): - if mapping is _sentinel_dict: - mapping = kws - elif kws: - mapping = _ChainMap(kws, mapping) - # Helper function for .sub() - def convert(mo): - named = mo.group('named') or mo.group('braced') - if named is not None: - try: - return str(mapping[named]) - except KeyError: - return mo.group() - if mo.group('escaped') is not None: - return self.delimiter - if mo.group('invalid') is not None: - return mo.group() - raise ValueError('Unrecognized named group in pattern', - self.pattern) - return self.pattern.sub(convert, self.template) - - def is_valid(self): - for mo in self.pattern.finditer(self.template): - if mo.group('invalid') is not None: - return False - if (mo.group('named') is None - and mo.group('braced') is None - and mo.group('escaped') is None): - # If all the groups are None, there must be - # another group we're not expecting - raise ValueError('Unrecognized named group in pattern', - self.pattern) - return True - - def get_identifiers(self): - ids = [] - for mo in self.pattern.finditer(self.template): - named = mo.group('named') or mo.group('braced') - if named is not None and named not in ids: - # add a named group only the first time it appears - ids.append(named) - elif (named is None - and mo.group('invalid') is None - and mo.group('escaped') is None): - # If all the groups are None, there must be - # another group we're not expecting - raise ValueError('Unrecognized named group in pattern', - self.pattern) - return ids - -# Initialize Template.pattern. __init_subclass__() is automatically called -# only for subclasses, not for the Template class itself. -Template.__init_subclass__() - - -######################################################################## -# the Formatter class -# see PEP 3101 for details and purpose of this class - -# The hard parts are reused from the C implementation. They're exposed as "_" -# prefixed methods of str. - -# The overall parser is implemented in _string.formatter_parser. -# The field name parser is implemented in _string.formatter_field_name_split - -class Formatter: - def format(self, format_string, /, *args, **kwargs): - return self.vformat(format_string, args, kwargs) - - def vformat(self, format_string, args, kwargs): - used_args = set() - result, _ = self._vformat(format_string, args, kwargs, used_args, 2) - self.check_unused_args(used_args, args, kwargs) - return result - - def _vformat(self, format_string, args, kwargs, used_args, recursion_depth, - auto_arg_index=0): - if recursion_depth < 0: - raise ValueError('Max string recursion exceeded') - result = [] - for literal_text, field_name, format_spec, conversion in \ - self.parse(format_string): - - # output the literal text - if literal_text: - result.append(literal_text) - - # if there's a field, output it - if field_name is not None: - # this is some markup, find the object and do - # the formatting - - # handle arg indexing when empty field_names are given. - if field_name == '': - if auto_arg_index is False: - raise ValueError('cannot switch from manual field ' - 'specification to automatic field ' - 'numbering') - field_name = str(auto_arg_index) - auto_arg_index += 1 - elif field_name.isdigit(): - if auto_arg_index: - raise ValueError('cannot switch from manual field ' - 'specification to automatic field ' - 'numbering') - # disable auto arg incrementing, if it gets - # used later on, then an exception will be raised - auto_arg_index = False - - # given the field_name, find the object it references - # and the argument it came from - obj, arg_used = self.get_field(field_name, args, kwargs) - used_args.add(arg_used) - - # do any conversion on the resulting object - obj = self.convert_field(obj, conversion) - - # expand the format spec, if needed - format_spec, auto_arg_index = self._vformat( - format_spec, args, kwargs, - used_args, recursion_depth-1, - auto_arg_index=auto_arg_index) - - # format the object and append to the result - result.append(self.format_field(obj, format_spec)) - - return ''.join(result), auto_arg_index - - - def get_value(self, key, args, kwargs): - if isinstance(key, int): - return args[key] - else: - return kwargs[key] - - - def check_unused_args(self, used_args, args, kwargs): - pass - - - def format_field(self, value, format_spec): - return format(value, format_spec) - - - def convert_field(self, value, conversion): - # do any conversion on the resulting object - if conversion is None: - return value - elif conversion == 's': - return str(value) - elif conversion == 'r': - return repr(value) - elif conversion == 'a': - return ascii(value) - raise ValueError("Unknown conversion specifier {0!s}".format(conversion)) - - - # returns an iterable that contains tuples of the form: - # (literal_text, field_name, format_spec, conversion) - # literal_text can be zero length - # field_name can be None, in which case there's no - # object to format and output - # if field_name is not None, it is looked up, formatted - # with format_spec and conversion and then used - def parse(self, format_string): - return _string.formatter_parser(format_string) - - - # given a field_name, find the object it references. - # field_name: the field being looked up, e.g. "0.name" - # or "lookup[3]" - # used_args: a set of which args have been used - # args, kwargs: as passed in to vformat - def get_field(self, field_name, args, kwargs): - first, rest = _string.formatter_field_name_split(field_name) - - obj = self.get_value(first, args, kwargs) - - # loop through the rest of the field_name, doing - # getattr or getitem as needed - for is_attr, i in rest: - if is_attr: - obj = getattr(obj, i) - else: - obj = obj[i] - - return obj, first diff --git a/Python313_13_x86_Template/Lib/struct.py b/Python313_13_x86_Template/Lib/struct.py deleted file mode 100644 index d6bba588..00000000 --- a/Python313_13_x86_Template/Lib/struct.py +++ /dev/null @@ -1,15 +0,0 @@ -__all__ = [ - # Functions - 'calcsize', 'pack', 'pack_into', 'unpack', 'unpack_from', - 'iter_unpack', - - # Classes - 'Struct', - - # Exceptions - 'error' - ] - -from _struct import * -from _struct import _clearcache -from _struct import __doc__ diff --git a/Python313_13_x86_Template/Lib/subprocess.py b/Python313_13_x86_Template/Lib/subprocess.py deleted file mode 100644 index 3a8c7434..00000000 --- a/Python313_13_x86_Template/Lib/subprocess.py +++ /dev/null @@ -1,2258 +0,0 @@ -# subprocess - Subprocesses with accessible I/O streams -# -# For more information about this module, see PEP 324. -# -# Copyright (c) 2003-2005 by Peter Astrand -# -# Licensed to PSF under a Contributor Agreement. - -r"""Subprocesses with accessible I/O streams - -This module allows you to spawn processes, connect to their -input/output/error pipes, and obtain their return codes. - -For a complete description of this module see the Python documentation. - -Main API -======== -run(...): Runs a command, waits for it to complete, then returns a - CompletedProcess instance. -Popen(...): A class for flexibly executing a command in a new process - -Constants ---------- -DEVNULL: Special value that indicates that os.devnull should be used -PIPE: Special value that indicates a pipe should be created -STDOUT: Special value that indicates that stderr should go to stdout - - -Older API -========= -call(...): Runs a command, waits for it to complete, then returns - the return code. -check_call(...): Same as call() but raises CalledProcessError() - if return code is not 0 -check_output(...): Same as check_call() but returns the contents of - stdout instead of a return code -getoutput(...): Runs a command in the shell, waits for it to complete, - then returns the output -getstatusoutput(...): Runs a command in the shell, waits for it to complete, - then returns a (exitcode, output) tuple -""" - -import builtins -import errno -import io -import locale -import os -import time -import signal -import sys -import threading -import warnings -import contextlib -from time import monotonic as _time -import types - -try: - import fcntl -except ImportError: - fcntl = None - - -__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput", - "getoutput", "check_output", "run", "CalledProcessError", "DEVNULL", - "SubprocessError", "TimeoutExpired", "CompletedProcess"] - # NOTE: We intentionally exclude list2cmdline as it is - # considered an internal implementation detail. issue10838. - -# use presence of msvcrt to detect Windows-like platforms (see bpo-8110) -try: - import msvcrt -except ModuleNotFoundError: - _mswindows = False -else: - _mswindows = True - -# some platforms do not support subprocesses -_can_fork_exec = sys.platform not in {"emscripten", "wasi", "ios", "tvos", "watchos"} - -if _mswindows: - import _winapi - from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP, - STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, - STD_ERROR_HANDLE, SW_HIDE, - STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW, - STARTF_FORCEONFEEDBACK, STARTF_FORCEOFFFEEDBACK, - ABOVE_NORMAL_PRIORITY_CLASS, BELOW_NORMAL_PRIORITY_CLASS, - HIGH_PRIORITY_CLASS, IDLE_PRIORITY_CLASS, - NORMAL_PRIORITY_CLASS, REALTIME_PRIORITY_CLASS, - CREATE_NO_WINDOW, DETACHED_PROCESS, - CREATE_DEFAULT_ERROR_MODE, CREATE_BREAKAWAY_FROM_JOB) - - __all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP", - "STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE", - "STD_ERROR_HANDLE", "SW_HIDE", - "STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW", - "STARTF_FORCEONFEEDBACK", "STARTF_FORCEOFFFEEDBACK", - "STARTUPINFO", - "ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS", - "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS", - "NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS", - "CREATE_NO_WINDOW", "DETACHED_PROCESS", - "CREATE_DEFAULT_ERROR_MODE", "CREATE_BREAKAWAY_FROM_JOB"]) -else: - if _can_fork_exec: - from _posixsubprocess import fork_exec as _fork_exec - # used in methods that are called by __del__ - class _del_safe: - waitpid = os.waitpid - waitstatus_to_exitcode = os.waitstatus_to_exitcode - WIFSTOPPED = os.WIFSTOPPED - WSTOPSIG = os.WSTOPSIG - WNOHANG = os.WNOHANG - ECHILD = errno.ECHILD - else: - class _del_safe: - waitpid = None - waitstatus_to_exitcode = None - WIFSTOPPED = None - WSTOPSIG = None - WNOHANG = None - ECHILD = errno.ECHILD - - import select - import selectors - - -# Exception classes used by this module. -class SubprocessError(Exception): pass - - -class CalledProcessError(SubprocessError): - """Raised when run() is called with check=True and the process - returns a non-zero exit status. - - Attributes: - cmd, returncode, stdout, stderr, output - """ - def __init__(self, returncode, cmd, output=None, stderr=None): - self.returncode = returncode - self.cmd = cmd - self.output = output - self.stderr = stderr - - def __str__(self): - if self.returncode and self.returncode < 0: - try: - return "Command '%s' died with %r." % ( - self.cmd, signal.Signals(-self.returncode)) - except ValueError: - return "Command '%s' died with unknown signal %d." % ( - self.cmd, -self.returncode) - else: - return "Command '%s' returned non-zero exit status %d." % ( - self.cmd, self.returncode) - - @property - def stdout(self): - """Alias for output attribute, to match stderr""" - return self.output - - @stdout.setter - def stdout(self, value): - # There's no obvious reason to set this, but allow it anyway so - # .stdout is a transparent alias for .output - self.output = value - - -class TimeoutExpired(SubprocessError): - """This exception is raised when the timeout expires while waiting for a - child process. - - Attributes: - cmd, output, stdout, stderr, timeout - """ - def __init__(self, cmd, timeout, output=None, stderr=None): - self.cmd = cmd - self.timeout = timeout - self.output = output - self.stderr = stderr - - def __str__(self): - return ("Command '%s' timed out after %s seconds" % - (self.cmd, self.timeout)) - - @property - def stdout(self): - return self.output - - @stdout.setter - def stdout(self, value): - # There's no obvious reason to set this, but allow it anyway so - # .stdout is a transparent alias for .output - self.output = value - - -if _mswindows: - class STARTUPINFO: - def __init__(self, *, dwFlags=0, hStdInput=None, hStdOutput=None, - hStdError=None, wShowWindow=0, lpAttributeList=None): - self.dwFlags = dwFlags - self.hStdInput = hStdInput - self.hStdOutput = hStdOutput - self.hStdError = hStdError - self.wShowWindow = wShowWindow - self.lpAttributeList = lpAttributeList or {"handle_list": []} - - def copy(self): - attr_list = self.lpAttributeList.copy() - if 'handle_list' in attr_list: - attr_list['handle_list'] = list(attr_list['handle_list']) - - return STARTUPINFO(dwFlags=self.dwFlags, - hStdInput=self.hStdInput, - hStdOutput=self.hStdOutput, - hStdError=self.hStdError, - wShowWindow=self.wShowWindow, - lpAttributeList=attr_list) - - - class Handle(int): - closed = False - - def Close(self, CloseHandle=_winapi.CloseHandle): - if not self.closed: - self.closed = True - CloseHandle(self) - - def Detach(self): - if not self.closed: - self.closed = True - return int(self) - raise ValueError("already closed") - - def __repr__(self): - return "%s(%d)" % (self.__class__.__name__, int(self)) - - __del__ = Close -else: - # When select or poll has indicated that the file is writable, - # we can write up to _PIPE_BUF bytes without risk of blocking. - # POSIX defines PIPE_BUF as >= 512. - _PIPE_BUF = getattr(select, 'PIPE_BUF', 512) - - # poll/select have the advantage of not requiring any extra file - # descriptor, contrarily to epoll/kqueue (also, they require a single - # syscall). - if hasattr(selectors, 'PollSelector'): - _PopenSelector = selectors.PollSelector - else: - _PopenSelector = selectors.SelectSelector - - -if _mswindows: - # On Windows we just need to close `Popen._handle` when we no longer need - # it, so that the kernel can free it. `Popen._handle` gets closed - # implicitly when the `Popen` instance is finalized (see `Handle.__del__`, - # which is calling `CloseHandle` as requested in [1]), so there is nothing - # for `_cleanup` to do. - # - # [1] https://docs.microsoft.com/en-us/windows/desktop/ProcThread/ - # creating-processes - _active = None - - def _cleanup(): - pass -else: - # This lists holds Popen instances for which the underlying process had not - # exited at the time its __del__ method got called: those processes are - # wait()ed for synchronously from _cleanup() when a new Popen object is - # created, to avoid zombie processes. - _active = [] - - def _cleanup(): - if _active is None: - return - for inst in _active[:]: - res = inst._internal_poll(_deadstate=sys.maxsize) - if res is not None: - try: - _active.remove(inst) - except ValueError: - # This can happen if two threads create a new Popen instance. - # It's harmless that it was already removed, so ignore. - pass - -PIPE = -1 -STDOUT = -2 -DEVNULL = -3 - - -# XXX This function is only used by multiprocessing and the test suite, -# but it's here so that it can be imported when Python is compiled without -# threads. - -def _optim_args_from_interpreter_flags(): - """Return a list of command-line arguments reproducing the current - optimization settings in sys.flags.""" - args = [] - value = sys.flags.optimize - if value > 0: - args.append('-' + 'O' * value) - return args - - -def _args_from_interpreter_flags(): - """Return a list of command-line arguments reproducing the current - settings in sys.flags, sys.warnoptions and sys._xoptions.""" - flag_opt_map = { - 'debug': 'd', - # 'inspect': 'i', - # 'interactive': 'i', - 'dont_write_bytecode': 'B', - 'no_site': 'S', - 'verbose': 'v', - 'bytes_warning': 'b', - 'quiet': 'q', - # -O is handled in _optim_args_from_interpreter_flags() - } - args = _optim_args_from_interpreter_flags() - for flag, opt in flag_opt_map.items(): - v = getattr(sys.flags, flag) - if v > 0: - args.append('-' + opt * v) - - if sys.flags.isolated: - args.append('-I') - else: - if sys.flags.ignore_environment: - args.append('-E') - if sys.flags.no_user_site: - args.append('-s') - if sys.flags.safe_path: - args.append('-P') - - # -W options - warnopts = sys.warnoptions[:] - xoptions = getattr(sys, '_xoptions', {}) - bytes_warning = sys.flags.bytes_warning - dev_mode = sys.flags.dev_mode - - if bytes_warning > 1: - warnopts.remove("error::BytesWarning") - elif bytes_warning: - warnopts.remove("default::BytesWarning") - if dev_mode: - warnopts.remove('default') - for opt in warnopts: - args.append('-W' + opt) - - # -X options - if dev_mode: - args.extend(('-X', 'dev')) - for opt in ('faulthandler', 'tracemalloc', 'importtime', - 'frozen_modules', 'showrefcount', 'utf8', 'gil'): - if opt in xoptions: - value = xoptions[opt] - if value is True: - arg = opt - else: - arg = '%s=%s' % (opt, value) - args.extend(('-X', arg)) - - return args - - -def _text_encoding(): - # Return default text encoding and emit EncodingWarning if - # sys.flags.warn_default_encoding is true. - if sys.flags.warn_default_encoding: - f = sys._getframe() - filename = f.f_code.co_filename - stacklevel = 2 - while f := f.f_back: - if f.f_code.co_filename != filename: - break - stacklevel += 1 - warnings.warn("'encoding' argument not specified.", - EncodingWarning, stacklevel) - - if sys.flags.utf8_mode: - return "utf-8" - else: - return locale.getencoding() - - -def call(*popenargs, timeout=None, **kwargs): - """Run command with arguments. Wait for command to complete or - for timeout seconds, then return the returncode attribute. - - The arguments are the same as for the Popen constructor. Example: - - retcode = call(["ls", "-l"]) - """ - with Popen(*popenargs, **kwargs) as p: - try: - return p.wait(timeout=timeout) - except: # Including KeyboardInterrupt, wait handled that. - p.kill() - # We don't call p.wait() again as p.__exit__ does that for us. - raise - - -def check_call(*popenargs, **kwargs): - """Run command with arguments. Wait for command to complete. If - the exit code was zero then return, otherwise raise - CalledProcessError. The CalledProcessError object will have the - return code in the returncode attribute. - - The arguments are the same as for the call function. Example: - - check_call(["ls", "-l"]) - """ - retcode = call(*popenargs, **kwargs) - if retcode: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - raise CalledProcessError(retcode, cmd) - return 0 - - -def check_output(*popenargs, timeout=None, **kwargs): - r"""Run command with arguments and return its output. - - If the exit code was non-zero it raises a CalledProcessError. The - CalledProcessError object will have the return code in the returncode - attribute and output in the output attribute. - - The arguments are the same as for the Popen constructor. Example: - - >>> check_output(["ls", "-l", "/dev/null"]) - b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' - - The stdout argument is not allowed as it is used internally. - To capture standard error in the result, use stderr=STDOUT. - - >>> check_output(["/bin/sh", "-c", - ... "ls -l non_existent_file ; exit 0"], - ... stderr=STDOUT) - b'ls: non_existent_file: No such file or directory\n' - - There is an additional optional argument, "input", allowing you to - pass a string to the subprocess's stdin. If you use this argument - you may not also use the Popen constructor's "stdin" argument, as - it too will be used internally. Example: - - >>> check_output(["sed", "-e", "s/foo/bar/"], - ... input=b"when in the course of fooman events\n") - b'when in the course of barman events\n' - - By default, all communication is in bytes, and therefore any "input" - should be bytes, and the return value will be bytes. If in text mode, - any "input" should be a string, and the return value will be a string - decoded according to locale encoding, or by "encoding" if set. Text mode - is triggered by setting any of text, encoding, errors or universal_newlines. - """ - for kw in ('stdout', 'check'): - if kw in kwargs: - raise ValueError(f'{kw} argument not allowed, it will be overridden.') - - if 'input' in kwargs and kwargs['input'] is None: - # Explicitly passing input=None was previously equivalent to passing an - # empty string. That is maintained here for backwards compatibility. - if kwargs.get('universal_newlines') or kwargs.get('text') or kwargs.get('encoding') \ - or kwargs.get('errors'): - empty = '' - else: - empty = b'' - kwargs['input'] = empty - - return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, - **kwargs).stdout - - -class CompletedProcess(object): - """A process that has finished running. - - This is returned by run(). - - Attributes: - args: The list or str args passed to run(). - returncode: The exit code of the process, negative for signals. - stdout: The standard output (None if not captured). - stderr: The standard error (None if not captured). - """ - def __init__(self, args, returncode, stdout=None, stderr=None): - self.args = args - self.returncode = returncode - self.stdout = stdout - self.stderr = stderr - - def __repr__(self): - args = ['args={!r}'.format(self.args), - 'returncode={!r}'.format(self.returncode)] - if self.stdout is not None: - args.append('stdout={!r}'.format(self.stdout)) - if self.stderr is not None: - args.append('stderr={!r}'.format(self.stderr)) - return "{}({})".format(type(self).__name__, ', '.join(args)) - - __class_getitem__ = classmethod(types.GenericAlias) - - - def check_returncode(self): - """Raise CalledProcessError if the exit code is non-zero.""" - if self.returncode: - raise CalledProcessError(self.returncode, self.args, self.stdout, - self.stderr) - - -def run(*popenargs, - input=None, capture_output=False, timeout=None, check=False, **kwargs): - """Run command with arguments and return a CompletedProcess instance. - - The returned instance will have attributes args, returncode, stdout and - stderr. By default, stdout and stderr are not captured, and those attributes - will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them, - or pass capture_output=True to capture both. - - If check is True and the exit code was non-zero, it raises a - CalledProcessError. The CalledProcessError object will have the return code - in the returncode attribute, and output & stderr attributes if those streams - were captured. - - If timeout (seconds) is given and the process takes too long, - a TimeoutExpired exception will be raised. - - There is an optional argument "input", allowing you to - pass bytes or a string to the subprocess's stdin. If you use this argument - you may not also use the Popen constructor's "stdin" argument, as - it will be used internally. - - By default, all communication is in bytes, and therefore any "input" should - be bytes, and the stdout and stderr will be bytes. If in text mode, any - "input" should be a string, and stdout and stderr will be strings decoded - according to locale encoding, or by "encoding" if set. Text mode is - triggered by setting any of text, encoding, errors or universal_newlines. - - The other arguments are the same as for the Popen constructor. - """ - if input is not None: - if kwargs.get('stdin') is not None: - raise ValueError('stdin and input arguments may not both be used.') - kwargs['stdin'] = PIPE - - if capture_output: - if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None: - raise ValueError('stdout and stderr arguments may not be used ' - 'with capture_output.') - kwargs['stdout'] = PIPE - kwargs['stderr'] = PIPE - - with Popen(*popenargs, **kwargs) as process: - try: - stdout, stderr = process.communicate(input, timeout=timeout) - except TimeoutExpired as exc: - process.kill() - if _mswindows: - # Windows accumulates the output in a single blocking - # read() call run on child threads, with the timeout - # being done in a join() on those threads. communicate() - # _after_ kill() is required to collect that and add it - # to the exception. - exc.stdout, exc.stderr = process.communicate() - else: - # POSIX _communicate already populated the output so - # far into the TimeoutExpired exception. - process.wait() - raise - except: # Including KeyboardInterrupt, communicate handled that. - process.kill() - # We don't call process.wait() as .__exit__ does that for us. - raise - retcode = process.poll() - if check and retcode: - raise CalledProcessError(retcode, process.args, - output=stdout, stderr=stderr) - return CompletedProcess(process.args, retcode, stdout, stderr) - - -def list2cmdline(seq): - """ - Translate a sequence of arguments into a command line - string, using the same rules as the MS C runtime: - - 1) Arguments are delimited by white space, which is either a - space or a tab. - - 2) A string surrounded by double quotation marks is - interpreted as a single argument, regardless of white space - contained within. A quoted string can be embedded in an - argument. - - 3) A double quotation mark preceded by a backslash is - interpreted as a literal double quotation mark. - - 4) Backslashes are interpreted literally, unless they - immediately precede a double quotation mark. - - 5) If backslashes immediately precede a double quotation mark, - every pair of backslashes is interpreted as a literal - backslash. If the number of backslashes is odd, the last - backslash escapes the next double quotation mark as - described in rule 3. - """ - - # See - # http://msdn.microsoft.com/en-us/library/17w5ykft.aspx - # or search http://msdn.microsoft.com for - # "Parsing C++ Command-Line Arguments" - result = [] - needquote = False - for arg in map(os.fsdecode, seq): - bs_buf = [] - - # Add a space to separate this argument from the others - if result: - result.append(' ') - - needquote = (" " in arg) or ("\t" in arg) or not arg - if needquote: - result.append('"') - - for c in arg: - if c == '\\': - # Don't know if we need to double yet. - bs_buf.append(c) - elif c == '"': - # Double backslashes. - result.append('\\' * len(bs_buf)*2) - bs_buf = [] - result.append('\\"') - else: - # Normal char - if bs_buf: - result.extend(bs_buf) - bs_buf = [] - result.append(c) - - # Add remaining backslashes, if any. - if bs_buf: - result.extend(bs_buf) - - if needquote: - result.extend(bs_buf) - result.append('"') - - return ''.join(result) - - -# Various tools for executing commands and looking at their output and status. -# - -def getstatusoutput(cmd, *, encoding=None, errors=None): - """Return (exitcode, output) of executing cmd in a shell. - - Execute the string 'cmd' in a shell with 'check_output' and - return a 2-tuple (status, output). The locale encoding is used - to decode the output and process newlines. - - A trailing newline is stripped from the output. - The exit status for the command can be interpreted - according to the rules for the function 'wait'. Example: - - >>> import subprocess - >>> subprocess.getstatusoutput('ls /bin/ls') - (0, '/bin/ls') - >>> subprocess.getstatusoutput('cat /bin/junk') - (1, 'cat: /bin/junk: No such file or directory') - >>> subprocess.getstatusoutput('/bin/junk') - (127, 'sh: /bin/junk: not found') - >>> subprocess.getstatusoutput('/bin/kill $$') - (-15, '') - """ - try: - data = check_output(cmd, shell=True, text=True, stderr=STDOUT, - encoding=encoding, errors=errors) - exitcode = 0 - except CalledProcessError as ex: - data = ex.output - exitcode = ex.returncode - if data[-1:] == '\n': - data = data[:-1] - return exitcode, data - -def getoutput(cmd, *, encoding=None, errors=None): - """Return output (stdout or stderr) of executing cmd in a shell. - - Like getstatusoutput(), except the exit status is ignored and the return - value is a string containing the command's output. Example: - - >>> import subprocess - >>> subprocess.getoutput('ls /bin/ls') - '/bin/ls' - """ - return getstatusoutput(cmd, encoding=encoding, errors=errors)[1] - - - -def _use_posix_spawn(): - """Check if posix_spawn() can be used for subprocess. - - subprocess requires a posix_spawn() implementation that properly reports - errors to the parent process, & sets errno on the following failures: - - * Process attribute actions failed. - * File actions failed. - * exec() failed. - - Prefer an implementation which can use vfork() in some cases for best - performance. - """ - if _mswindows or not hasattr(os, 'posix_spawn'): - # os.posix_spawn() is not available - return False - - if ((_env := os.environ.get('_PYTHON_SUBPROCESS_USE_POSIX_SPAWN')) in ('0', '1')): - return bool(int(_env)) - - if sys.platform in ('darwin', 'sunos5'): - # posix_spawn() is a syscall on both macOS and Solaris, - # and properly reports errors - return True - - # Check libc name and runtime libc version - try: - ver = os.confstr('CS_GNU_LIBC_VERSION') - # parse 'glibc 2.28' as ('glibc', (2, 28)) - parts = ver.split(maxsplit=1) - if len(parts) != 2: - # reject unknown format - raise ValueError - libc = parts[0] - version = tuple(map(int, parts[1].split('.'))) - - if sys.platform == 'linux' and libc == 'glibc' and version >= (2, 24): - # glibc 2.24 has a new Linux posix_spawn implementation using vfork - # which properly reports errors to the parent process. - return True - # Note: Don't use the implementation in earlier glibc because it doesn't - # use vfork (even if glibc 2.26 added a pipe to properly report errors - # to the parent process). - except (AttributeError, ValueError, OSError): - # os.confstr() or CS_GNU_LIBC_VERSION value not available - pass - - # By default, assume that posix_spawn() does not properly report errors. - return False - - -# These are primarily fail-safe knobs for negatives. A True value does not -# guarantee the given libc/syscall API will be used. -_USE_POSIX_SPAWN = _use_posix_spawn() -_USE_VFORK = True -_HAVE_POSIX_SPAWN_CLOSEFROM = hasattr(os, 'POSIX_SPAWN_CLOSEFROM') - - -class Popen: - """ Execute a child program in a new process. - - For a complete description of the arguments see the Python documentation. - - Arguments: - args: A string, or a sequence of program arguments. - - bufsize: supplied as the buffering argument to the open() function when - creating the stdin/stdout/stderr pipe file objects - - executable: A replacement program to execute. - - stdin, stdout and stderr: These specify the executed programs' standard - input, standard output and standard error file handles, respectively. - - preexec_fn: (POSIX only) An object to be called in the child process - just before the child is executed. - - close_fds: Controls closing or inheriting of file descriptors. - - shell: If true, the command will be executed through the shell. - - cwd: Sets the current directory before the child is executed. - - env: Defines the environment variables for the new process. - - text: If true, decode stdin, stdout and stderr using the given encoding - (if set) or the system default otherwise. - - universal_newlines: Alias of text, provided for backwards compatibility. - - startupinfo and creationflags (Windows only) - - restore_signals (POSIX only) - - start_new_session (POSIX only) - - process_group (POSIX only) - - group (POSIX only) - - extra_groups (POSIX only) - - user (POSIX only) - - umask (POSIX only) - - pass_fds (POSIX only) - - encoding and errors: Text mode encoding and error handling to use for - file objects stdin, stdout and stderr. - - Attributes: - stdin, stdout, stderr, pid, returncode - """ - _child_created = False # Set here since __del__ checks it - - def __init__(self, args, bufsize=-1, executable=None, - stdin=None, stdout=None, stderr=None, - preexec_fn=None, close_fds=True, - shell=False, cwd=None, env=None, universal_newlines=None, - startupinfo=None, creationflags=0, - restore_signals=True, start_new_session=False, - pass_fds=(), *, user=None, group=None, extra_groups=None, - encoding=None, errors=None, text=None, umask=-1, pipesize=-1, - process_group=None): - """Create new Popen instance.""" - if not _can_fork_exec: - raise OSError( - errno.ENOTSUP, f"{sys.platform} does not support processes." - ) - - _cleanup() - # Held while anything is calling waitpid before returncode has been - # updated to prevent clobbering returncode if wait() or poll() are - # called from multiple threads at once. After acquiring the lock, - # code must re-check self.returncode to see if another thread just - # finished a waitpid() call. - self._waitpid_lock = threading.Lock() - - self._input = None - self._communication_started = False - if bufsize is None: - bufsize = -1 # Restore default - if not isinstance(bufsize, int): - raise TypeError("bufsize must be an integer") - - if stdout is STDOUT: - raise ValueError("STDOUT can only be used for stderr") - - if pipesize is None: - pipesize = -1 # Restore default - if not isinstance(pipesize, int): - raise TypeError("pipesize must be an integer") - - if _mswindows: - if preexec_fn is not None: - raise ValueError("preexec_fn is not supported on Windows " - "platforms") - else: - # POSIX - if pass_fds and not close_fds: - warnings.warn("pass_fds overriding close_fds.", RuntimeWarning) - close_fds = True - if startupinfo is not None: - raise ValueError("startupinfo is only supported on Windows " - "platforms") - if creationflags != 0: - raise ValueError("creationflags is only supported on Windows " - "platforms") - - self.args = args - self.stdin = None - self.stdout = None - self.stderr = None - self.pid = None - self.returncode = None - self.encoding = encoding - self.errors = errors - self.pipesize = pipesize - - # Validate the combinations of text and universal_newlines - if (text is not None and universal_newlines is not None - and bool(universal_newlines) != bool(text)): - raise SubprocessError('Cannot disambiguate when both text ' - 'and universal_newlines are supplied but ' - 'different. Pass one or the other.') - - self.text_mode = encoding or errors or text or universal_newlines - if self.text_mode and encoding is None: - self.encoding = encoding = _text_encoding() - - # How long to resume waiting on a child after the first ^C. - # There is no right value for this. The purpose is to be polite - # yet remain good for interactive users trying to exit a tool. - self._sigint_wait_secs = 0.25 # 1/xkcd221.getRandomNumber() - - self._closed_child_pipe_fds = False - - if self.text_mode: - if bufsize == 1: - line_buffering = True - # Use the default buffer size for the underlying binary streams - # since they don't support line buffering. - bufsize = -1 - else: - line_buffering = False - - if process_group is None: - process_group = -1 # The internal APIs are int-only - - gid = None - if group is not None: - if not hasattr(os, 'setregid'): - raise ValueError("The 'group' parameter is not supported on the " - "current platform") - - elif isinstance(group, str): - try: - import grp - except ImportError: - raise ValueError("The group parameter cannot be a string " - "on systems without the grp module") - - gid = grp.getgrnam(group).gr_gid - elif isinstance(group, int): - gid = group - else: - raise TypeError("Group must be a string or an integer, not {}" - .format(type(group))) - - if gid < 0: - raise ValueError(f"Group ID cannot be negative, got {gid}") - - gids = None - if extra_groups is not None: - if not hasattr(os, 'setgroups'): - raise ValueError("The 'extra_groups' parameter is not " - "supported on the current platform") - - elif isinstance(extra_groups, str): - raise ValueError("Groups must be a list, not a string") - - gids = [] - for extra_group in extra_groups: - if isinstance(extra_group, str): - try: - import grp - except ImportError: - raise ValueError("Items in extra_groups cannot be " - "strings on systems without the " - "grp module") - - gids.append(grp.getgrnam(extra_group).gr_gid) - elif isinstance(extra_group, int): - gids.append(extra_group) - else: - raise TypeError("Items in extra_groups must be a string " - "or integer, not {}" - .format(type(extra_group))) - - # make sure that the gids are all positive here so we can do less - # checking in the C code - for gid_check in gids: - if gid_check < 0: - raise ValueError(f"Group ID cannot be negative, got {gid_check}") - - uid = None - if user is not None: - if not hasattr(os, 'setreuid'): - raise ValueError("The 'user' parameter is not supported on " - "the current platform") - - elif isinstance(user, str): - try: - import pwd - except ImportError: - raise ValueError("The user parameter cannot be a string " - "on systems without the pwd module") - uid = pwd.getpwnam(user).pw_uid - elif isinstance(user, int): - uid = user - else: - raise TypeError("User must be a string or an integer") - - if uid < 0: - raise ValueError(f"User ID cannot be negative, got {uid}") - - # Input and output objects. The general principle is like - # this: - # - # Parent Child - # ------ ----- - # p2cwrite ---stdin---> p2cread - # c2pread <--stdout--- c2pwrite - # errread <--stderr--- errwrite - # - # On POSIX, the child objects are file descriptors. On - # Windows, these are Windows file handles. The parent objects - # are file descriptors on both platforms. The parent objects - # are -1 when not using PIPEs. The child objects are -1 - # when not redirecting. - - (p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite) = self._get_handles(stdin, stdout, stderr) - - # From here on, raising exceptions may cause file descriptor leakage - - # We wrap OS handles *before* launching the child, otherwise a - # quickly terminating child could make our fds unwrappable - # (see #8458). - - if _mswindows: - if p2cwrite != -1: - p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0) - if c2pread != -1: - c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0) - if errread != -1: - errread = msvcrt.open_osfhandle(errread.Detach(), 0) - - try: - if p2cwrite != -1: - self.stdin = io.open(p2cwrite, 'wb', bufsize) - if self.text_mode: - self.stdin = io.TextIOWrapper(self.stdin, write_through=True, - line_buffering=line_buffering, - encoding=encoding, errors=errors) - if c2pread != -1: - self.stdout = io.open(c2pread, 'rb', bufsize) - if self.text_mode: - self.stdout = io.TextIOWrapper(self.stdout, - encoding=encoding, errors=errors) - if errread != -1: - self.stderr = io.open(errread, 'rb', bufsize) - if self.text_mode: - self.stderr = io.TextIOWrapper(self.stderr, - encoding=encoding, errors=errors) - - self._execute_child(args, executable, preexec_fn, close_fds, - pass_fds, cwd, env, - startupinfo, creationflags, shell, - p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite, - restore_signals, - gid, gids, uid, umask, - start_new_session, process_group) - except: - # Cleanup if the child failed starting. - for f in filter(None, (self.stdin, self.stdout, self.stderr)): - try: - f.close() - except OSError: - pass # Ignore EBADF or other errors. - - if not self._closed_child_pipe_fds: - to_close = [] - if stdin == PIPE: - to_close.append(p2cread) - if stdout == PIPE: - to_close.append(c2pwrite) - if stderr == PIPE: - to_close.append(errwrite) - if hasattr(self, '_devnull'): - to_close.append(self._devnull) - for fd in to_close: - try: - if _mswindows and isinstance(fd, Handle): - fd.Close() - else: - os.close(fd) - except OSError: - pass - - raise - - def __repr__(self): - obj_repr = ( - f"<{self.__class__.__name__}: " - f"returncode: {self.returncode} args: {self.args!r}>" - ) - if len(obj_repr) > 80: - obj_repr = obj_repr[:76] + "...>" - return obj_repr - - __class_getitem__ = classmethod(types.GenericAlias) - - @property - def universal_newlines(self): - # universal_newlines as retained as an alias of text_mode for API - # compatibility. bpo-31756 - return self.text_mode - - @universal_newlines.setter - def universal_newlines(self, universal_newlines): - self.text_mode = bool(universal_newlines) - - def _translate_newlines(self, data, encoding, errors): - data = data.decode(encoding, errors) - return data.replace("\r\n", "\n").replace("\r", "\n") - - def __enter__(self): - return self - - def __exit__(self, exc_type, value, traceback): - if self.stdout: - self.stdout.close() - if self.stderr: - self.stderr.close() - try: # Flushing a BufferedWriter may raise an error - if self.stdin: - self.stdin.close() - finally: - if exc_type == KeyboardInterrupt: - # https://bugs.python.org/issue25942 - # In the case of a KeyboardInterrupt we assume the SIGINT - # was also already sent to our child processes. We can't - # block indefinitely as that is not user friendly. - # If we have not already waited a brief amount of time in - # an interrupted .wait() or .communicate() call, do so here - # for consistency. - if self._sigint_wait_secs > 0: - try: - self._wait(timeout=self._sigint_wait_secs) - except TimeoutExpired: - pass - self._sigint_wait_secs = 0 # Note that this has been done. - return # resume the KeyboardInterrupt - - # Wait for the process to terminate, to avoid zombies. - self.wait() - - def __del__(self, _maxsize=sys.maxsize, _warn=warnings.warn): - if not self._child_created: - # We didn't get to successfully create a child process. - return - if self.returncode is None: - # Not reading subprocess exit status creates a zombie process which - # is only destroyed at the parent python process exit - _warn("subprocess %s is still running" % self.pid, - ResourceWarning, source=self) - # In case the child hasn't been waited on, check if it's done. - self._internal_poll(_deadstate=_maxsize) - if self.returncode is None and _active is not None: - # Child is still running, keep us alive until we can wait on it. - _active.append(self) - - def _get_devnull(self): - if not hasattr(self, '_devnull'): - self._devnull = os.open(os.devnull, os.O_RDWR) - return self._devnull - - def _stdin_write(self, input): - if input: - try: - self.stdin.write(input) - except BrokenPipeError: - pass # communicate() must ignore broken pipe errors. - except OSError as exc: - if exc.errno == errno.EINVAL: - # bpo-19612, bpo-30418: On Windows, stdin.write() fails - # with EINVAL if the child process exited or if the child - # process is still running but closed the pipe. - pass - else: - raise - - try: - self.stdin.close() - except BrokenPipeError: - pass # communicate() must ignore broken pipe errors. - except OSError as exc: - if exc.errno == errno.EINVAL: - pass - else: - raise - - def communicate(self, input=None, timeout=None): - """Interact with process: Send data to stdin and close it. - Read data from stdout and stderr, until end-of-file is - reached. Wait for process to terminate. - - The optional "input" argument should be data to be sent to the - child process, or None, if no data should be sent to the child. - communicate() returns a tuple (stdout, stderr). - - By default, all communication is in bytes, and therefore any - "input" should be bytes, and the (stdout, stderr) will be bytes. - If in text mode (indicated by self.text_mode), any "input" should - be a string, and (stdout, stderr) will be strings decoded - according to locale encoding, or by "encoding" if set. Text mode - is triggered by setting any of text, encoding, errors or - universal_newlines. - """ - - if self._communication_started and input: - raise ValueError("Cannot send input after starting communication") - - # Optimization: If we are not worried about timeouts, we haven't - # started communicating, and we have one or zero pipes, using select() - # or threads is unnecessary. - if (timeout is None and not self._communication_started and - [self.stdin, self.stdout, self.stderr].count(None) >= 2): - stdout = None - stderr = None - if self.stdin: - self._stdin_write(input) - elif self.stdout: - stdout = self.stdout.read() - self.stdout.close() - elif self.stderr: - stderr = self.stderr.read() - self.stderr.close() - self.wait() - else: - if timeout is not None: - endtime = _time() + timeout - else: - endtime = None - - try: - stdout, stderr = self._communicate(input, endtime, timeout) - except KeyboardInterrupt: - # https://bugs.python.org/issue25942 - # See the detailed comment in .wait(). - if timeout is not None: - sigint_timeout = min(self._sigint_wait_secs, - self._remaining_time(endtime)) - else: - sigint_timeout = self._sigint_wait_secs - self._sigint_wait_secs = 0 # nothing else should wait. - try: - self._wait(timeout=sigint_timeout) - except TimeoutExpired: - pass - raise # resume the KeyboardInterrupt - - finally: - self._communication_started = True - try: - sts = self.wait(timeout=self._remaining_time(endtime)) - except TimeoutExpired as exc: - exc.timeout = timeout - raise - - return (stdout, stderr) - - - def poll(self): - """Check if child process has terminated. Set and return returncode - attribute.""" - return self._internal_poll() - - - def _remaining_time(self, endtime): - """Convenience for _communicate when computing timeouts.""" - if endtime is None: - return None - else: - return endtime - _time() - - - def _check_timeout(self, endtime, orig_timeout, stdout_seq, stderr_seq, - skip_check_and_raise=False): - """Convenience for checking if a timeout has expired.""" - if endtime is None: - return - if skip_check_and_raise or _time() > endtime: - raise TimeoutExpired( - self.args, orig_timeout, - output=b''.join(stdout_seq) if stdout_seq else None, - stderr=b''.join(stderr_seq) if stderr_seq else None) - - - def wait(self, timeout=None): - """Wait for child process to terminate; returns self.returncode.""" - if timeout is not None: - endtime = _time() + timeout - try: - return self._wait(timeout=timeout) - except KeyboardInterrupt: - # https://bugs.python.org/issue25942 - # The first keyboard interrupt waits briefly for the child to - # exit under the common assumption that it also received the ^C - # generated SIGINT and will exit rapidly. - if timeout is not None: - sigint_timeout = min(self._sigint_wait_secs, - self._remaining_time(endtime)) - else: - sigint_timeout = self._sigint_wait_secs - self._sigint_wait_secs = 0 # nothing else should wait. - try: - self._wait(timeout=sigint_timeout) - except TimeoutExpired: - pass - raise # resume the KeyboardInterrupt - - def _close_pipe_fds(self, - p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite): - # self._devnull is not always defined. - devnull_fd = getattr(self, '_devnull', None) - - with contextlib.ExitStack() as stack: - if _mswindows: - if p2cread != -1: - stack.callback(p2cread.Close) - if c2pwrite != -1: - stack.callback(c2pwrite.Close) - if errwrite != -1: - stack.callback(errwrite.Close) - else: - if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd: - stack.callback(os.close, p2cread) - if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd: - stack.callback(os.close, c2pwrite) - if errwrite != -1 and errread != -1 and errwrite != devnull_fd: - stack.callback(os.close, errwrite) - - if devnull_fd is not None: - stack.callback(os.close, devnull_fd) - - # Prevent a double close of these handles/fds from __init__ on error. - self._closed_child_pipe_fds = True - - @contextlib.contextmanager - def _on_error_fd_closer(self): - """Helper to ensure file descriptors opened in _get_handles are closed""" - to_close = [] - try: - yield to_close - except: - if hasattr(self, '_devnull'): - to_close.append(self._devnull) - del self._devnull - for fd in to_close: - try: - if _mswindows and isinstance(fd, Handle): - fd.Close() - else: - os.close(fd) - except OSError: - pass - raise - - if _mswindows: - # - # Windows methods - # - def _get_handles(self, stdin, stdout, stderr): - """Construct and return tuple with IO objects: - p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite - """ - if stdin is None and stdout is None and stderr is None: - return (-1, -1, -1, -1, -1, -1) - - p2cread, p2cwrite = -1, -1 - c2pread, c2pwrite = -1, -1 - errread, errwrite = -1, -1 - - with self._on_error_fd_closer() as err_close_fds: - if stdin is None: - p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE) - if p2cread is None: - p2cread, _ = _winapi.CreatePipe(None, 0) - p2cread = Handle(p2cread) - err_close_fds.append(p2cread) - _winapi.CloseHandle(_) - elif stdin == PIPE: - p2cread, p2cwrite = _winapi.CreatePipe(None, 0) - p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite) - err_close_fds.extend((p2cread, p2cwrite)) - elif stdin == DEVNULL: - p2cread = msvcrt.get_osfhandle(self._get_devnull()) - elif isinstance(stdin, int): - p2cread = msvcrt.get_osfhandle(stdin) - else: - # Assuming file-like object - p2cread = msvcrt.get_osfhandle(stdin.fileno()) - p2cread = self._make_inheritable(p2cread) - - if stdout is None: - c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE) - if c2pwrite is None: - _, c2pwrite = _winapi.CreatePipe(None, 0) - c2pwrite = Handle(c2pwrite) - err_close_fds.append(c2pwrite) - _winapi.CloseHandle(_) - elif stdout == PIPE: - c2pread, c2pwrite = _winapi.CreatePipe(None, 0) - c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite) - err_close_fds.extend((c2pread, c2pwrite)) - elif stdout == DEVNULL: - c2pwrite = msvcrt.get_osfhandle(self._get_devnull()) - elif isinstance(stdout, int): - c2pwrite = msvcrt.get_osfhandle(stdout) - else: - # Assuming file-like object - c2pwrite = msvcrt.get_osfhandle(stdout.fileno()) - c2pwrite = self._make_inheritable(c2pwrite) - - if stderr is None: - errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE) - if errwrite is None: - _, errwrite = _winapi.CreatePipe(None, 0) - errwrite = Handle(errwrite) - err_close_fds.append(errwrite) - _winapi.CloseHandle(_) - elif stderr == PIPE: - errread, errwrite = _winapi.CreatePipe(None, 0) - errread, errwrite = Handle(errread), Handle(errwrite) - err_close_fds.extend((errread, errwrite)) - elif stderr == STDOUT: - errwrite = c2pwrite - elif stderr == DEVNULL: - errwrite = msvcrt.get_osfhandle(self._get_devnull()) - elif isinstance(stderr, int): - errwrite = msvcrt.get_osfhandle(stderr) - else: - # Assuming file-like object - errwrite = msvcrt.get_osfhandle(stderr.fileno()) - errwrite = self._make_inheritable(errwrite) - - return (p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite) - - - def _make_inheritable(self, handle): - """Return a duplicate of handle, which is inheritable""" - h = _winapi.DuplicateHandle( - _winapi.GetCurrentProcess(), handle, - _winapi.GetCurrentProcess(), 0, 1, - _winapi.DUPLICATE_SAME_ACCESS) - return Handle(h) - - - def _filter_handle_list(self, handle_list): - """Filter out console handles that can't be used - in lpAttributeList["handle_list"] and make sure the list - isn't empty. This also removes duplicate handles.""" - # An handle with it's lowest two bits set might be a special console - # handle that if passed in lpAttributeList["handle_list"], will - # cause it to fail. - return list({handle for handle in handle_list - if handle & 0x3 != 0x3 - or _winapi.GetFileType(handle) != - _winapi.FILE_TYPE_CHAR}) - - - def _execute_child(self, args, executable, preexec_fn, close_fds, - pass_fds, cwd, env, - startupinfo, creationflags, shell, - p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite, - unused_restore_signals, - unused_gid, unused_gids, unused_uid, - unused_umask, - unused_start_new_session, unused_process_group): - """Execute program (MS Windows version)""" - - assert not pass_fds, "pass_fds not supported on Windows." - - if isinstance(args, str): - pass - elif isinstance(args, bytes): - if shell: - raise TypeError('bytes args is not allowed on Windows') - args = list2cmdline([args]) - elif isinstance(args, os.PathLike): - if shell: - raise TypeError('path-like args is not allowed when ' - 'shell is true') - args = list2cmdline([args]) - else: - args = list2cmdline(args) - - if executable is not None: - executable = os.fsdecode(executable) - - # Process startup details - if startupinfo is None: - startupinfo = STARTUPINFO() - else: - # bpo-34044: Copy STARTUPINFO since it is modified above, - # so the caller can reuse it multiple times. - startupinfo = startupinfo.copy() - - use_std_handles = -1 not in (p2cread, c2pwrite, errwrite) - if use_std_handles: - startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES - startupinfo.hStdInput = p2cread - startupinfo.hStdOutput = c2pwrite - startupinfo.hStdError = errwrite - - attribute_list = startupinfo.lpAttributeList - have_handle_list = bool(attribute_list and - "handle_list" in attribute_list and - attribute_list["handle_list"]) - - # If we were given an handle_list or need to create one - if have_handle_list or (use_std_handles and close_fds): - if attribute_list is None: - attribute_list = startupinfo.lpAttributeList = {} - handle_list = attribute_list["handle_list"] = \ - list(attribute_list.get("handle_list", [])) - - if use_std_handles: - handle_list += [int(p2cread), int(c2pwrite), int(errwrite)] - - handle_list[:] = self._filter_handle_list(handle_list) - - if handle_list: - if not close_fds: - warnings.warn("startupinfo.lpAttributeList['handle_list'] " - "overriding close_fds", RuntimeWarning) - - # When using the handle_list we always request to inherit - # handles but the only handles that will be inherited are - # the ones in the handle_list - close_fds = False - - if shell: - startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW - startupinfo.wShowWindow = _winapi.SW_HIDE - if not executable: - # gh-101283: without a fully-qualified path, before Windows - # checks the system directories, it first looks in the - # application directory, and also the current directory if - # NeedCurrentDirectoryForExePathW(ExeName) is true, so try - # to avoid executing unqualified "cmd.exe". - comspec = os.environ.get('ComSpec') - if not comspec: - system_root = os.environ.get('SystemRoot', '') - comspec = os.path.join(system_root, 'System32', 'cmd.exe') - if not os.path.isabs(comspec): - raise FileNotFoundError('shell not found: neither %ComSpec% nor %SystemRoot% is set') - if os.path.isabs(comspec): - executable = comspec - else: - comspec = executable - - args = '{} /c "{}"'.format (comspec, args) - - if cwd is not None: - cwd = os.fsdecode(cwd) - - sys.audit("subprocess.Popen", executable, args, cwd, env) - - # Start the process - try: - hp, ht, pid, tid = _winapi.CreateProcess(executable, args, - # no special security - None, None, - int(not close_fds), - creationflags, - env, - cwd, - startupinfo) - finally: - # Child is launched. Close the parent's copy of those pipe - # handles that only the child should have open. You need - # to make sure that no handles to the write end of the - # output pipe are maintained in this process or else the - # pipe will not close when the child process exits and the - # ReadFile will hang. - self._close_pipe_fds(p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite) - - # Retain the process handle, but close the thread handle - self._child_created = True - self._handle = Handle(hp) - self.pid = pid - _winapi.CloseHandle(ht) - - def _internal_poll(self, _deadstate=None, - _WaitForSingleObject=_winapi.WaitForSingleObject, - _WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0, - _GetExitCodeProcess=_winapi.GetExitCodeProcess): - """Check if child process has terminated. Returns returncode - attribute. - - This method is called by __del__, so it can only refer to objects - in its local scope. - - """ - if self.returncode is None: - if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0: - self.returncode = _GetExitCodeProcess(self._handle) - return self.returncode - - - def _wait(self, timeout): - """Internal implementation of wait() on Windows.""" - if timeout is None: - timeout_millis = _winapi.INFINITE - elif timeout <= 0: - timeout_millis = 0 - else: - timeout_millis = int(timeout * 1000) - if self.returncode is None: - # API note: Returns immediately if timeout_millis == 0. - result = _winapi.WaitForSingleObject(self._handle, - timeout_millis) - if result == _winapi.WAIT_TIMEOUT: - raise TimeoutExpired(self.args, timeout) - self.returncode = _winapi.GetExitCodeProcess(self._handle) - return self.returncode - - - def _readerthread(self, fh, buffer): - buffer.append(fh.read()) - fh.close() - - - def _writerthread(self, input): - self._stdin_write(input) - - - def _communicate(self, input, endtime, orig_timeout): - # Start reader threads feeding into a list hanging off of this - # object, unless they've already been started. - if self.stdout and not hasattr(self, "_stdout_buff"): - self._stdout_buff = [] - self.stdout_thread = \ - threading.Thread(target=self._readerthread, - args=(self.stdout, self._stdout_buff)) - self.stdout_thread.daemon = True - self.stdout_thread.start() - if self.stderr and not hasattr(self, "_stderr_buff"): - self._stderr_buff = [] - self.stderr_thread = \ - threading.Thread(target=self._readerthread, - args=(self.stderr, self._stderr_buff)) - self.stderr_thread.daemon = True - self.stderr_thread.start() - - # Start writer thread to send input to stdin, unless already - # started. The thread writes input and closes stdin when done, - # or continues in the background on timeout. - if self.stdin and not hasattr(self, "_stdin_thread"): - self._stdin_thread = \ - threading.Thread(target=self._writerthread, - args=(input,)) - self._stdin_thread.daemon = True - self._stdin_thread.start() - - # Wait for the writer thread, or time out. If we time out, the - # thread remains writing and the fd left open in case the user - # calls communicate again. - if hasattr(self, "_stdin_thread"): - self._stdin_thread.join(self._remaining_time(endtime)) - if self._stdin_thread.is_alive(): - raise TimeoutExpired(self.args, orig_timeout) - - # Wait for the reader threads, or time out. If we time out, the - # threads remain reading and the fds left open in case the user - # calls communicate again. - if self.stdout is not None: - self.stdout_thread.join(self._remaining_time(endtime)) - if self.stdout_thread.is_alive(): - raise TimeoutExpired(self.args, orig_timeout) - if self.stderr is not None: - self.stderr_thread.join(self._remaining_time(endtime)) - if self.stderr_thread.is_alive(): - raise TimeoutExpired(self.args, orig_timeout) - - # Collect the output from and close both pipes, now that we know - # both have been read successfully. - stdout = None - stderr = None - if self.stdout: - stdout = self._stdout_buff - self.stdout.close() - if self.stderr: - stderr = self._stderr_buff - self.stderr.close() - - # All data exchanged. Translate lists into strings. - stdout = stdout[0] if stdout else None - stderr = stderr[0] if stderr else None - - return (stdout, stderr) - - def send_signal(self, sig): - """Send a signal to the process.""" - # Don't signal a process that we know has already died. - if self.returncode is not None: - return - if sig == signal.SIGTERM: - self.terminate() - elif sig == signal.CTRL_C_EVENT: - os.kill(self.pid, signal.CTRL_C_EVENT) - elif sig == signal.CTRL_BREAK_EVENT: - os.kill(self.pid, signal.CTRL_BREAK_EVENT) - else: - raise ValueError("Unsupported signal: {}".format(sig)) - - def terminate(self): - """Terminates the process.""" - # Don't terminate a process that we know has already died. - if self.returncode is not None: - return - try: - _winapi.TerminateProcess(self._handle, 1) - except PermissionError: - # ERROR_ACCESS_DENIED (winerror 5) is received when the - # process already died. - rc = _winapi.GetExitCodeProcess(self._handle) - if rc == _winapi.STILL_ACTIVE: - raise - self.returncode = rc - - kill = terminate - - else: - # - # POSIX methods - # - def _get_handles(self, stdin, stdout, stderr): - """Construct and return tuple with IO objects: - p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite - """ - p2cread, p2cwrite = -1, -1 - c2pread, c2pwrite = -1, -1 - errread, errwrite = -1, -1 - - with self._on_error_fd_closer() as err_close_fds: - if stdin is None: - pass - elif stdin == PIPE: - p2cread, p2cwrite = os.pipe() - err_close_fds.extend((p2cread, p2cwrite)) - if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"): - fcntl.fcntl(p2cwrite, fcntl.F_SETPIPE_SZ, self.pipesize) - elif stdin == DEVNULL: - p2cread = self._get_devnull() - elif isinstance(stdin, int): - p2cread = stdin - else: - # Assuming file-like object - p2cread = stdin.fileno() - - if stdout is None: - pass - elif stdout == PIPE: - c2pread, c2pwrite = os.pipe() - err_close_fds.extend((c2pread, c2pwrite)) - if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"): - fcntl.fcntl(c2pwrite, fcntl.F_SETPIPE_SZ, self.pipesize) - elif stdout == DEVNULL: - c2pwrite = self._get_devnull() - elif isinstance(stdout, int): - c2pwrite = stdout - else: - # Assuming file-like object - c2pwrite = stdout.fileno() - - if stderr is None: - pass - elif stderr == PIPE: - errread, errwrite = os.pipe() - err_close_fds.extend((errread, errwrite)) - if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"): - fcntl.fcntl(errwrite, fcntl.F_SETPIPE_SZ, self.pipesize) - elif stderr == STDOUT: - if c2pwrite != -1: - errwrite = c2pwrite - else: # child's stdout is not set, use parent's stdout - errwrite = sys.__stdout__.fileno() - elif stderr == DEVNULL: - errwrite = self._get_devnull() - elif isinstance(stderr, int): - errwrite = stderr - else: - # Assuming file-like object - errwrite = stderr.fileno() - - return (p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite) - - - def _posix_spawn(self, args, executable, env, restore_signals, close_fds, - p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite): - """Execute program using os.posix_spawn().""" - kwargs = {} - if restore_signals: - # See _Py_RestoreSignals() in Python/pylifecycle.c - sigset = [] - for signame in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'): - signum = getattr(signal, signame, None) - if signum is not None: - sigset.append(signum) - kwargs['setsigdef'] = sigset - - file_actions = [] - for fd in (p2cwrite, c2pread, errread): - if fd != -1: - file_actions.append((os.POSIX_SPAWN_CLOSE, fd)) - for fd, fd2 in ( - (p2cread, 0), - (c2pwrite, 1), - (errwrite, 2), - ): - if fd != -1: - file_actions.append((os.POSIX_SPAWN_DUP2, fd, fd2)) - - if close_fds: - file_actions.append((os.POSIX_SPAWN_CLOSEFROM, 3)) - - if file_actions: - kwargs['file_actions'] = file_actions - - self.pid = os.posix_spawn(executable, args, env, **kwargs) - self._child_created = True - - self._close_pipe_fds(p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite) - - def _execute_child(self, args, executable, preexec_fn, close_fds, - pass_fds, cwd, env, - startupinfo, creationflags, shell, - p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite, - restore_signals, - gid, gids, uid, umask, - start_new_session, process_group): - """Execute program (POSIX version)""" - - if isinstance(args, (str, bytes)): - args = [args] - elif isinstance(args, os.PathLike): - if shell: - raise TypeError('path-like args is not allowed when ' - 'shell is true') - args = [args] - else: - args = list(args) - - if shell: - # On Android the default shell is at '/system/bin/sh'. - unix_shell = ('/system/bin/sh' if - hasattr(sys, 'getandroidapilevel') else '/bin/sh') - args = [unix_shell, "-c"] + args - if executable: - args[0] = executable - - if executable is None: - executable = args[0] - - sys.audit("subprocess.Popen", executable, args, cwd, env) - - if (_USE_POSIX_SPAWN - and os.path.dirname(executable) - and preexec_fn is None - and (not close_fds or _HAVE_POSIX_SPAWN_CLOSEFROM) - and not pass_fds - and cwd is None - and (p2cread == -1 or p2cread > 2) - and (c2pwrite == -1 or c2pwrite > 2) - and (errwrite == -1 or errwrite > 2) - and not start_new_session - and process_group == -1 - and gid is None - and gids is None - and uid is None - and umask < 0): - self._posix_spawn(args, executable, env, restore_signals, close_fds, - p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite) - return - - orig_executable = executable - - # For transferring possible exec failure from child to parent. - # Data format: "exception name:hex errno:description" - # Pickle is not used; it is complex and involves memory allocation. - errpipe_read, errpipe_write = os.pipe() - # errpipe_write must not be in the standard io 0, 1, or 2 fd range. - low_fds_to_close = [] - while errpipe_write < 3: - low_fds_to_close.append(errpipe_write) - errpipe_write = os.dup(errpipe_write) - for low_fd in low_fds_to_close: - os.close(low_fd) - try: - try: - # We must avoid complex work that could involve - # malloc or free in the child process to avoid - # potential deadlocks, thus we do all this here. - # and pass it to fork_exec() - - if env is not None: - env_list = [] - for k, v in env.items(): - k = os.fsencode(k) - if b'=' in k: - raise ValueError("illegal environment variable name") - env_list.append(k + b'=' + os.fsencode(v)) - else: - env_list = None # Use execv instead of execve. - executable = os.fsencode(executable) - if os.path.dirname(executable): - executable_list = (executable,) - else: - # This matches the behavior of os._execvpe(). - executable_list = tuple( - os.path.join(os.fsencode(dir), executable) - for dir in os.get_exec_path(env)) - fds_to_keep = set(pass_fds) - fds_to_keep.add(errpipe_write) - self.pid = _fork_exec( - args, executable_list, - close_fds, tuple(sorted(map(int, fds_to_keep))), - cwd, env_list, - p2cread, p2cwrite, c2pread, c2pwrite, - errread, errwrite, - errpipe_read, errpipe_write, - restore_signals, start_new_session, - process_group, gid, gids, uid, umask, - preexec_fn, _USE_VFORK) - self._child_created = True - finally: - # be sure the FD is closed no matter what - os.close(errpipe_write) - - self._close_pipe_fds(p2cread, p2cwrite, - c2pread, c2pwrite, - errread, errwrite) - - # Wait for exec to fail or succeed; possibly raising an - # exception (limited in size) - errpipe_data = bytearray() - while True: - part = os.read(errpipe_read, 50000) - errpipe_data += part - if not part or len(errpipe_data) > 50000: - break - finally: - # be sure the FD is closed no matter what - os.close(errpipe_read) - - if errpipe_data: - try: - pid, sts = os.waitpid(self.pid, 0) - if pid == self.pid: - self._handle_exitstatus(sts) - else: - self.returncode = sys.maxsize - except ChildProcessError: - pass - - try: - exception_name, hex_errno, err_msg = ( - errpipe_data.split(b':', 2)) - # The encoding here should match the encoding - # written in by the subprocess implementations - # like _posixsubprocess - err_msg = err_msg.decode() - except ValueError: - exception_name = b'SubprocessError' - hex_errno = b'0' - err_msg = 'Bad exception data from child: {!r}'.format( - bytes(errpipe_data)) - child_exception_type = getattr( - builtins, exception_name.decode('ascii'), - SubprocessError) - if issubclass(child_exception_type, OSError) and hex_errno: - errno_num = int(hex_errno, 16) - if err_msg == "noexec:chdir": - err_msg = "" - # The error must be from chdir(cwd). - err_filename = cwd - elif err_msg == "noexec": - err_msg = "" - err_filename = None - else: - err_filename = orig_executable - if errno_num != 0: - err_msg = os.strerror(errno_num) - if err_filename is not None: - raise child_exception_type(errno_num, err_msg, err_filename) - else: - raise child_exception_type(errno_num, err_msg) - raise child_exception_type(err_msg) - - - def _handle_exitstatus(self, sts, _del_safe=_del_safe): - """All callers to this function MUST hold self._waitpid_lock.""" - # This method is called (indirectly) by __del__, so it cannot - # refer to anything outside of its local scope. - if _del_safe.WIFSTOPPED(sts): - self.returncode = -_del_safe.WSTOPSIG(sts) - else: - self.returncode = _del_safe.waitstatus_to_exitcode(sts) - - def _internal_poll(self, _deadstate=None, _del_safe=_del_safe): - """Check if child process has terminated. Returns returncode - attribute. - - This method is called by __del__, so it cannot reference anything - outside of the local scope (nor can any methods it calls). - - """ - if self.returncode is None: - if not self._waitpid_lock.acquire(False): - # Something else is busy calling waitpid. Don't allow two - # at once. We know nothing yet. - return None - try: - if self.returncode is not None: - return self.returncode # Another thread waited. - pid, sts = _del_safe.waitpid(self.pid, _del_safe.WNOHANG) - if pid == self.pid: - self._handle_exitstatus(sts) - except OSError as e: - if _deadstate is not None: - self.returncode = _deadstate - elif e.errno == _del_safe.ECHILD: - # This happens if SIGCLD is set to be ignored or - # waiting for child processes has otherwise been - # disabled for our process. This child is dead, we - # can't get the status. - # http://bugs.python.org/issue15756 - self.returncode = 0 - finally: - self._waitpid_lock.release() - return self.returncode - - - def _try_wait(self, wait_flags): - """All callers to this function MUST hold self._waitpid_lock.""" - try: - (pid, sts) = os.waitpid(self.pid, wait_flags) - except ChildProcessError: - # This happens if SIGCLD is set to be ignored or waiting - # for child processes has otherwise been disabled for our - # process. This child is dead, we can't get the status. - pid = self.pid - sts = 0 - return (pid, sts) - - - def _wait(self, timeout): - """Internal implementation of wait() on POSIX.""" - if self.returncode is not None: - return self.returncode - - if timeout is not None: - endtime = _time() + timeout - # Enter a busy loop if we have a timeout. This busy loop was - # cribbed from Lib/threading.py in Thread.wait() at r71065. - delay = 0.0005 # 500 us -> initial delay of 1 ms - while True: - if self._waitpid_lock.acquire(False): - try: - if self.returncode is not None: - break # Another thread waited. - (pid, sts) = self._try_wait(os.WNOHANG) - assert pid == self.pid or pid == 0 - if pid == self.pid: - self._handle_exitstatus(sts) - break - finally: - self._waitpid_lock.release() - remaining = self._remaining_time(endtime) - if remaining <= 0: - raise TimeoutExpired(self.args, timeout) - delay = min(delay * 2, remaining, .05) - time.sleep(delay) - else: - while self.returncode is None: - with self._waitpid_lock: - if self.returncode is not None: - break # Another thread waited. - (pid, sts) = self._try_wait(0) - # Check the pid and loop as waitpid has been known to - # return 0 even without WNOHANG in odd situations. - # http://bugs.python.org/issue14396. - if pid == self.pid: - self._handle_exitstatus(sts) - return self.returncode - - - def _communicate(self, input, endtime, orig_timeout): - if self.stdin and not self._communication_started: - # Flush stdio buffer. This might block, if the user has - # been writing to .stdin in an uncontrolled fashion. - try: - self.stdin.flush() - except BrokenPipeError: - pass # communicate() must ignore BrokenPipeError. - except ValueError: - # ignore ValueError: I/O operation on closed file. - if not self.stdin.closed: - raise - if not input: - try: - self.stdin.close() - except BrokenPipeError: - pass # communicate() must ignore BrokenPipeError. - - stdout = None - stderr = None - - # Only create this mapping if we haven't already. - if not self._communication_started: - self._fileobj2output = {} - if self.stdout: - self._fileobj2output[self.stdout] = [] - if self.stderr: - self._fileobj2output[self.stderr] = [] - - if self.stdout: - stdout = self._fileobj2output[self.stdout] - if self.stderr: - stderr = self._fileobj2output[self.stderr] - - self._save_input(input) - - if self._input: - if not isinstance(self._input, memoryview): - input_view = memoryview(self._input) - else: - input_view = self._input.cast("b") # byte input required - - with _PopenSelector() as selector: - if self.stdin and not self.stdin.closed and self._input: - selector.register(self.stdin, selectors.EVENT_WRITE) - if self.stdout and not self.stdout.closed: - selector.register(self.stdout, selectors.EVENT_READ) - if self.stderr and not self.stderr.closed: - selector.register(self.stderr, selectors.EVENT_READ) - - while selector.get_map(): - timeout = self._remaining_time(endtime) - if timeout is not None and timeout <= 0: - self._check_timeout(endtime, orig_timeout, - stdout, stderr, - skip_check_and_raise=True) - raise RuntimeError( # Impossible :) - '_check_timeout(..., skip_check_and_raise=True) ' - 'failed to raise TimeoutExpired.') - - ready = selector.select(timeout) - self._check_timeout(endtime, orig_timeout, stdout, stderr) - - # XXX Rewrite these to use non-blocking I/O on the file - # objects; they are no longer using C stdio! - - for key, events in ready: - if key.fileobj is self.stdin: - chunk = input_view[self._input_offset : - self._input_offset + _PIPE_BUF] - try: - self._input_offset += os.write(key.fd, chunk) - except BrokenPipeError: - selector.unregister(key.fileobj) - key.fileobj.close() - else: - if self._input_offset >= len(input_view): - selector.unregister(key.fileobj) - key.fileobj.close() - elif key.fileobj in (self.stdout, self.stderr): - data = os.read(key.fd, 32768) - if not data: - selector.unregister(key.fileobj) - key.fileobj.close() - self._fileobj2output[key.fileobj].append(data) - try: - self.wait(timeout=self._remaining_time(endtime)) - except TimeoutExpired as exc: - exc.timeout = orig_timeout - raise - - # All data exchanged. Translate lists into strings. - if stdout is not None: - stdout = b''.join(stdout) - if stderr is not None: - stderr = b''.join(stderr) - - # Translate newlines, if requested. - # This also turns bytes into strings. - if self.text_mode: - if stdout is not None: - stdout = self._translate_newlines(stdout, - self.stdout.encoding, - self.stdout.errors) - if stderr is not None: - stderr = self._translate_newlines(stderr, - self.stderr.encoding, - self.stderr.errors) - - return (stdout, stderr) - - - def _save_input(self, input): - # This method is called from the _communicate_with_*() methods - # so that if we time out while communicating, we can continue - # sending input if we retry. - if self.stdin and self._input is None: - self._input_offset = 0 - self._input = input - if input is not None and self.text_mode: - self._input = self._input.encode(self.stdin.encoding, - self.stdin.errors) - - - def send_signal(self, sig): - """Send a signal to the process.""" - # bpo-38630: Polling reduces the risk of sending a signal to the - # wrong process if the process completed, the Popen.returncode - # attribute is still None, and the pid has been reassigned - # (recycled) to a new different process. This race condition can - # happens in two cases. - # - # Case 1. Thread A calls Popen.poll(), thread B calls - # Popen.send_signal(). In thread A, waitpid() succeed and returns - # the exit status. Thread B calls kill() because poll() in thread A - # did not set returncode yet. Calling poll() in thread B prevents - # the race condition thanks to Popen._waitpid_lock. - # - # Case 2. waitpid(pid, 0) has been called directly, without - # using Popen methods: returncode is still None is this case. - # Calling Popen.poll() will set returncode to a default value, - # since waitpid() fails with ProcessLookupError. - self.poll() - if self.returncode is not None: - # Skip signalling a process that we know has already died. - return - - # The race condition can still happen if the race condition - # described above happens between the returncode test - # and the kill() call. - try: - os.kill(self.pid, sig) - except ProcessLookupError: - # Suppress the race condition error; bpo-40550. - pass - - def terminate(self): - """Terminate the process with SIGTERM - """ - self.send_signal(signal.SIGTERM) - - def kill(self): - """Kill the process with SIGKILL - """ - self.send_signal(signal.SIGKILL) diff --git a/Python313_13_x86_Template/Lib/symtable.py b/Python313_13_x86_Template/Lib/symtable.py deleted file mode 100644 index 672ec0ce..00000000 --- a/Python313_13_x86_Template/Lib/symtable.py +++ /dev/null @@ -1,414 +0,0 @@ -"""Interface to the compiler's internal symbol tables""" - -import _symtable -from _symtable import (USE, DEF_GLOBAL, DEF_NONLOCAL, DEF_LOCAL, DEF_PARAM, - DEF_IMPORT, DEF_BOUND, DEF_ANNOT, SCOPE_OFF, SCOPE_MASK, FREE, - LOCAL, GLOBAL_IMPLICIT, GLOBAL_EXPLICIT, CELL) - -import weakref -from enum import StrEnum - -__all__ = ["symtable", "SymbolTableType", "SymbolTable", "Class", "Function", "Symbol"] - -def symtable(code, filename, compile_type): - """ Return the toplevel *SymbolTable* for the source code. - - *filename* is the name of the file with the code - and *compile_type* is the *compile()* mode argument. - """ - top = _symtable.symtable(code, filename, compile_type) - return _newSymbolTable(top, filename) - -class SymbolTableFactory: - def __init__(self): - self.__memo = weakref.WeakValueDictionary() - - def new(self, table, filename): - if table.type == _symtable.TYPE_FUNCTION: - return Function(table, filename) - if table.type == _symtable.TYPE_CLASS: - return Class(table, filename) - return SymbolTable(table, filename) - - def __call__(self, table, filename): - key = table, filename - obj = self.__memo.get(key, None) - if obj is None: - obj = self.__memo[key] = self.new(table, filename) - return obj - -_newSymbolTable = SymbolTableFactory() - - -class SymbolTableType(StrEnum): - MODULE = "module" - FUNCTION = "function" - CLASS = "class" - ANNOTATION = "annotation" - TYPE_ALIAS = "type alias" - TYPE_PARAMETERS = "type parameters" - TYPE_VARIABLE = "type variable" - - -class SymbolTable: - - def __init__(self, raw_table, filename): - self._table = raw_table - self._filename = filename - self._symbols = {} - - def __repr__(self): - if self.__class__ == SymbolTable: - kind = "" - else: - kind = "%s " % self.__class__.__name__ - - if self._table.name == "top": - return "<{0}SymbolTable for module {1}>".format(kind, self._filename) - else: - return "<{0}SymbolTable for {1} in {2}>".format(kind, - self._table.name, - self._filename) - - def get_type(self): - """Return the type of the symbol table. - - The value returned is one of the values in - the ``SymbolTableType`` enumeration. - """ - if self._table.type == _symtable.TYPE_MODULE: - return SymbolTableType.MODULE - if self._table.type == _symtable.TYPE_FUNCTION: - return SymbolTableType.FUNCTION - if self._table.type == _symtable.TYPE_CLASS: - return SymbolTableType.CLASS - if self._table.type == _symtable.TYPE_ANNOTATION: - return SymbolTableType.ANNOTATION - if self._table.type == _symtable.TYPE_TYPE_ALIAS: - return SymbolTableType.TYPE_ALIAS - if self._table.type == _symtable.TYPE_TYPE_PARAMETERS: - return SymbolTableType.TYPE_PARAMETERS - if self._table.type == _symtable.TYPE_TYPE_VARIABLE: - return SymbolTableType.TYPE_VARIABLE - assert False, f"unexpected type: {self._table.type}" - - def get_id(self): - """Return an identifier for the table. - """ - return self._table.id - - def get_name(self): - """Return the table's name. - - This corresponds to the name of the class, function - or 'top' if the table is for a class, function or - global respectively. - """ - return self._table.name - - def get_lineno(self): - """Return the number of the first line in the - block for the table. - """ - return self._table.lineno - - def is_optimized(self): - """Return *True* if the locals in the table - are optimizable. - """ - return bool(self._table.type == _symtable.TYPE_FUNCTION) - - def is_nested(self): - """Return *True* if the block is a nested class - or function.""" - return bool(self._table.nested) - - def has_children(self): - """Return *True* if the block has nested namespaces. - """ - return bool(self._table.children) - - def get_identifiers(self): - """Return a view object containing the names of symbols in the table. - """ - return self._table.symbols.keys() - - def lookup(self, name): - """Lookup a *name* in the table. - - Returns a *Symbol* instance. - """ - sym = self._symbols.get(name) - if sym is None: - flags = self._table.symbols[name] - namespaces = self.__check_children(name) - module_scope = (self._table.name == "top") - sym = self._symbols[name] = Symbol(name, flags, namespaces, - module_scope=module_scope) - return sym - - def get_symbols(self): - """Return a list of *Symbol* instances for - names in the table. - """ - return [self.lookup(ident) for ident in self.get_identifiers()] - - def __check_children(self, name): - return [_newSymbolTable(st, self._filename) - for st in self._table.children - if st.name == name] - - def get_children(self): - """Return a list of the nested symbol tables. - """ - return [_newSymbolTable(st, self._filename) - for st in self._table.children] - - -class Function(SymbolTable): - - # Default values for instance variables - __params = None - __locals = None - __frees = None - __globals = None - __nonlocals = None - - def __idents_matching(self, test_func): - return tuple(ident for ident in self.get_identifiers() - if test_func(self._table.symbols[ident])) - - def get_parameters(self): - """Return a tuple of parameters to the function. - """ - if self.__params is None: - self.__params = self.__idents_matching(lambda x:x & DEF_PARAM) - return self.__params - - def get_locals(self): - """Return a tuple of locals in the function. - """ - if self.__locals is None: - locs = (LOCAL, CELL) - test = lambda x: ((x >> SCOPE_OFF) & SCOPE_MASK) in locs - self.__locals = self.__idents_matching(test) - return self.__locals - - def get_globals(self): - """Return a tuple of globals in the function. - """ - if self.__globals is None: - glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT) - test = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) in glob - self.__globals = self.__idents_matching(test) - return self.__globals - - def get_nonlocals(self): - """Return a tuple of nonlocals in the function. - """ - if self.__nonlocals is None: - self.__nonlocals = self.__idents_matching(lambda x:x & DEF_NONLOCAL) - return self.__nonlocals - - def get_frees(self): - """Return a tuple of free variables in the function. - """ - if self.__frees is None: - is_free = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) == FREE - self.__frees = self.__idents_matching(is_free) - return self.__frees - - -class Class(SymbolTable): - - __methods = None - - def get_methods(self): - """Return a tuple of methods declared in the class. - """ - if self.__methods is None: - d = {} - - def is_local_symbol(ident): - flags = self._table.symbols.get(ident, 0) - return ((flags >> SCOPE_OFF) & SCOPE_MASK) == LOCAL - - for st in self._table.children: - # pick the function-like symbols that are local identifiers - if is_local_symbol(st.name): - match st.type: - case _symtable.TYPE_FUNCTION: - # generators are of type TYPE_FUNCTION with a ".0" - # parameter as a first parameter (which makes them - # distinguishable from a function named 'genexpr') - if st.name == 'genexpr' and '.0' in st.varnames: - continue - d[st.name] = 1 - case _symtable.TYPE_TYPE_PARAMETERS: - # Get the function-def block in the annotation - # scope 'st' with the same identifier, if any. - scope_name = st.name - for c in st.children: - if c.name == scope_name and c.type == _symtable.TYPE_FUNCTION: - # A generic generator of type TYPE_FUNCTION - # cannot be a direct child of 'st' (but it - # can be a descendant), e.g.: - # - # class A: - # type genexpr[genexpr] = (x for x in []) - assert scope_name != 'genexpr' or '.0' not in c.varnames - d[scope_name] = 1 - break - self.__methods = tuple(d) - return self.__methods - - -class Symbol: - - def __init__(self, name, flags, namespaces=None, *, module_scope=False): - self.__name = name - self.__flags = flags - self.__scope = (flags >> SCOPE_OFF) & SCOPE_MASK # like PyST_GetScope() - self.__namespaces = namespaces or () - self.__module_scope = module_scope - - def __repr__(self): - flags_str = '|'.join(self._flags_str()) - return f'' - - def _scope_str(self): - return _scopes_value_to_name.get(self.__scope) or str(self.__scope) - - def _flags_str(self): - for flagname, flagvalue in _flags: - if self.__flags & flagvalue == flagvalue: - yield flagname - - def get_name(self): - """Return a name of a symbol. - """ - return self.__name - - def is_referenced(self): - """Return *True* if the symbol is used in - its block. - """ - return bool(self.__flags & _symtable.USE) - - def is_parameter(self): - """Return *True* if the symbol is a parameter. - """ - return bool(self.__flags & DEF_PARAM) - - def is_global(self): - """Return *True* if the symbol is global. - """ - return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT) - or (self.__module_scope and self.__flags & DEF_BOUND)) - - def is_nonlocal(self): - """Return *True* if the symbol is nonlocal.""" - return bool(self.__flags & DEF_NONLOCAL) - - def is_declared_global(self): - """Return *True* if the symbol is declared global - with a global statement.""" - return bool(self.__scope == GLOBAL_EXPLICIT) - - def is_local(self): - """Return *True* if the symbol is local. - """ - return bool(self.__scope in (LOCAL, CELL) - or (self.__module_scope and self.__flags & DEF_BOUND)) - - def is_annotated(self): - """Return *True* if the symbol is annotated. - """ - return bool(self.__flags & DEF_ANNOT) - - def is_free(self): - """Return *True* if a referenced symbol is - not assigned to. - """ - return bool(self.__scope == FREE) - - def is_imported(self): - """Return *True* if the symbol is created from - an import statement. - """ - return bool(self.__flags & DEF_IMPORT) - - def is_assigned(self): - """Return *True* if a symbol is assigned to.""" - return bool(self.__flags & DEF_LOCAL) - - def is_namespace(self): - """Returns *True* if name binding introduces new namespace. - - If the name is used as the target of a function or class - statement, this will be true. - - Note that a single name can be bound to multiple objects. If - is_namespace() is true, the name may also be bound to other - objects, like an int or list, that does not introduce a new - namespace. - """ - return bool(self.__namespaces) - - def get_namespaces(self): - """Return a list of namespaces bound to this name""" - return self.__namespaces - - def get_namespace(self): - """Return the single namespace bound to this name. - - Raises ValueError if the name is bound to multiple namespaces - or no namespace. - """ - if len(self.__namespaces) == 0: - raise ValueError("name is not bound to any namespaces") - elif len(self.__namespaces) > 1: - raise ValueError("name is bound to multiple namespaces") - else: - return self.__namespaces[0] - - -_flags = [('USE', USE)] -_flags.extend(kv for kv in globals().items() if kv[0].startswith('DEF_')) -_scopes_names = ('FREE', 'LOCAL', 'GLOBAL_IMPLICIT', 'GLOBAL_EXPLICIT', 'CELL') -_scopes_value_to_name = {globals()[n]: n for n in _scopes_names} - - -def main(args): - import sys - def print_symbols(table, level=0): - indent = ' ' * level - nested = "nested " if table.is_nested() else "" - if table.get_type() == 'module': - what = f'from file {table._filename!r}' - else: - what = f'{table.get_name()!r}' - print(f'{indent}symbol table for {nested}{table.get_type()} {what}:') - for ident in table.get_identifiers(): - symbol = table.lookup(ident) - flags = ', '.join(symbol._flags_str()).lower() - print(f' {indent}{symbol._scope_str().lower()} symbol {symbol.get_name()!r}: {flags}') - print() - - for table2 in table.get_children(): - print_symbols(table2, level + 1) - - for filename in args or ['-']: - if filename == '-': - src = sys.stdin.read() - filename = '' - else: - with open(filename, 'rb') as f: - src = f.read() - mod = symtable(src, filename, 'exec') - print_symbols(mod) - - -if __name__ == "__main__": - import sys - main(sys.argv[1:]) diff --git a/Python313_13_x86_Template/Lib/sysconfig/__init__.py b/Python313_13_x86_Template/Lib/sysconfig/__init__.py deleted file mode 100644 index 43edebce..00000000 --- a/Python313_13_x86_Template/Lib/sysconfig/__init__.py +++ /dev/null @@ -1,734 +0,0 @@ -"""Access to Python's configuration information.""" - -import os -import sys -import threading -from os.path import realpath - -__all__ = [ - 'get_config_h_filename', - 'get_config_var', - 'get_config_vars', - 'get_makefile_filename', - 'get_path', - 'get_path_names', - 'get_paths', - 'get_platform', - 'get_python_version', - 'get_scheme_names', - 'parse_config_h', -] - -# Keys for get_config_var() that are never converted to Python integers. -_ALWAYS_STR = { - 'IPHONEOS_DEPLOYMENT_TARGET', - 'MACOSX_DEPLOYMENT_TARGET', -} - -_INSTALL_SCHEMES = { - 'posix_prefix': { - 'stdlib': '{installed_base}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', - 'platstdlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', - 'purelib': '{base}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', - 'platlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}/site-packages', - 'include': - '{installed_base}/include/{implementation_lower}{py_version_short}{abiflags}', - 'platinclude': - '{installed_platbase}/include/{implementation_lower}{py_version_short}{abiflags}', - 'scripts': '{base}/bin', - 'data': '{base}', - }, - 'posix_home': { - 'stdlib': '{installed_base}/lib/{implementation_lower}', - 'platstdlib': '{base}/lib/{implementation_lower}', - 'purelib': '{base}/lib/{implementation_lower}', - 'platlib': '{base}/lib/{implementation_lower}', - 'include': '{installed_base}/include/{implementation_lower}', - 'platinclude': '{installed_base}/include/{implementation_lower}', - 'scripts': '{base}/bin', - 'data': '{base}', - }, - 'nt': { - 'stdlib': '{installed_base}/Lib', - 'platstdlib': '{base}/Lib', - 'purelib': '{base}/Lib/site-packages', - 'platlib': '{base}/Lib/site-packages', - 'include': '{installed_base}/Include', - 'platinclude': '{installed_base}/Include', - 'scripts': '{base}/Scripts', - 'data': '{base}', - }, - - # Downstream distributors can overwrite the default install scheme. - # This is done to support downstream modifications where distributors change - # the installation layout (eg. different site-packages directory). - # So, distributors will change the default scheme to one that correctly - # represents their layout. - # This presents an issue for projects/people that need to bootstrap virtual - # environments, like virtualenv. As distributors might now be customizing - # the default install scheme, there is no guarantee that the information - # returned by sysconfig.get_default_scheme/get_paths is correct for - # a virtual environment, the only guarantee we have is that it is correct - # for the *current* environment. When bootstrapping a virtual environment, - # we need to know its layout, so that we can place the files in the - # correct locations. - # The "*_venv" install scheme is a scheme to bootstrap virtual environments, - # essentially identical to the default posix_prefix/nt schemes. - # Downstream distributors who patch posix_prefix/nt scheme are encouraged to - # leave the following schemes unchanged - 'posix_venv': { - 'stdlib': '{installed_base}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', - 'platstdlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', - 'purelib': '{base}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', - 'platlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}/site-packages', - 'include': - '{installed_base}/include/{implementation_lower}{py_version_short}{abiflags}', - 'platinclude': - '{installed_platbase}/include/{implementation_lower}{py_version_short}{abiflags}', - 'scripts': '{base}/bin', - 'data': '{base}', - }, - 'nt_venv': { - 'stdlib': '{installed_base}/Lib', - 'platstdlib': '{base}/Lib', - 'purelib': '{base}/Lib/site-packages', - 'platlib': '{base}/Lib/site-packages', - 'include': '{installed_base}/Include', - 'platinclude': '{installed_base}/Include', - 'scripts': '{base}/Scripts', - 'data': '{base}', - }, - } - -# For the OS-native venv scheme, we essentially provide an alias: -if os.name == 'nt': - _INSTALL_SCHEMES['venv'] = _INSTALL_SCHEMES['nt_venv'] -else: - _INSTALL_SCHEMES['venv'] = _INSTALL_SCHEMES['posix_venv'] - -def _get_implementation(): - return 'Python' - -# NOTE: site.py has copy of this function. -# Sync it when modify this function. -def _getuserbase(): - env_base = os.environ.get("PYTHONUSERBASE", None) - if env_base: - return env_base - - # Emscripten, iOS, tvOS, VxWorks, WASI, and watchOS have no home directories - if sys.platform in {"emscripten", "ios", "tvos", "vxworks", "wasi", "watchos"}: - return None - - def joinuser(*args): - return os.path.expanduser(os.path.join(*args)) - - if os.name == "nt": - base = os.environ.get("APPDATA") or "~" - return joinuser(base, _get_implementation()) - - if sys.platform == "darwin" and sys._framework: - return joinuser("~", "Library", sys._framework, - f"{sys.version_info[0]}.{sys.version_info[1]}") - - return joinuser("~", ".local") - -_HAS_USER_BASE = (_getuserbase() is not None) - -if _HAS_USER_BASE: - _INSTALL_SCHEMES |= { - # NOTE: When modifying "purelib" scheme, update site._get_path() too. - 'nt_user': { - 'stdlib': '{userbase}/{implementation}{py_version_nodot_plat}', - 'platstdlib': '{userbase}/{implementation}{py_version_nodot_plat}', - 'purelib': '{userbase}/{implementation}{py_version_nodot_plat}/site-packages', - 'platlib': '{userbase}/{implementation}{py_version_nodot_plat}/site-packages', - 'include': '{userbase}/{implementation}{py_version_nodot_plat}/Include', - 'scripts': '{userbase}/{implementation}{py_version_nodot_plat}/Scripts', - 'data': '{userbase}', - }, - 'posix_user': { - 'stdlib': '{userbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', - 'platstdlib': '{userbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', - 'purelib': '{userbase}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', - 'platlib': '{userbase}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', - 'include': '{userbase}/include/{implementation_lower}{py_version_short}{abi_thread}', - 'scripts': '{userbase}/bin', - 'data': '{userbase}', - }, - 'osx_framework_user': { - 'stdlib': '{userbase}/lib/{implementation_lower}', - 'platstdlib': '{userbase}/lib/{implementation_lower}', - 'purelib': '{userbase}/lib/{implementation_lower}/site-packages', - 'platlib': '{userbase}/lib/{implementation_lower}/site-packages', - 'include': '{userbase}/include/{implementation_lower}{py_version_short}', - 'scripts': '{userbase}/bin', - 'data': '{userbase}', - }, - } - -_SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include', - 'scripts', 'data') - -_PY_VERSION = sys.version.split()[0] -_PY_VERSION_SHORT = f'{sys.version_info[0]}.{sys.version_info[1]}' -_PY_VERSION_SHORT_NO_DOT = f'{sys.version_info[0]}{sys.version_info[1]}' -_BASE_PREFIX = os.path.normpath(sys.base_prefix) -_BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix) -# Mutex guarding initialization of _CONFIG_VARS. -_CONFIG_VARS_LOCK = threading.RLock() -_CONFIG_VARS = None -# True iff _CONFIG_VARS has been fully initialized. -_CONFIG_VARS_INITIALIZED = False -_USER_BASE = None - - -def _safe_realpath(path): - try: - return realpath(path) - except OSError: - return path - -if sys.executable: - _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) -else: - # sys.executable can be empty if argv[0] has been changed and Python is - # unable to retrieve the real program name - _PROJECT_BASE = _safe_realpath(os.getcwd()) - -# In a virtual environment, `sys._home` gives us the target directory -# `_PROJECT_BASE` for the executable that created it when the virtual -# python is an actual executable ('venv --copies' or Windows). -_sys_home = getattr(sys, '_home', None) -if _sys_home: - _PROJECT_BASE = _sys_home - -if os.name == 'nt': - # In a source build, the executable is in a subdirectory of the root - # that we want (\PCbuild\). - # `_BASE_PREFIX` is used as the base installation is where the source - # will be. The realpath is needed to prevent mount point confusion - # that can occur with just string comparisons. - if _safe_realpath(_PROJECT_BASE).startswith( - _safe_realpath(f'{_BASE_PREFIX}\\PCbuild')): - _PROJECT_BASE = _BASE_PREFIX - -# set for cross builds -if "_PYTHON_PROJECT_BASE" in os.environ: - _PROJECT_BASE = _safe_realpath(os.environ["_PYTHON_PROJECT_BASE"]) - -def is_python_build(check_home=None): - if check_home is not None: - import warnings - warnings.warn( - ( - 'The check_home argument of sysconfig.is_python_build is ' - 'deprecated and its value is ignored. ' - 'It will be removed in Python 3.15.' - ), - DeprecationWarning, - stacklevel=2, - ) - for fn in ("Setup", "Setup.local"): - if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): - return True - return False - -_PYTHON_BUILD = is_python_build() - -if _PYTHON_BUILD: - for scheme in ('posix_prefix', 'posix_home'): - # On POSIX-y platforms, Python will: - # - Build from .h files in 'headers' (which is only added to the - # scheme when building CPython) - # - Install .h files to 'include' - scheme = _INSTALL_SCHEMES[scheme] - scheme['headers'] = scheme['include'] - scheme['include'] = '{srcdir}/Include' - scheme['platinclude'] = '{projectbase}/.' - del scheme - - -def _subst_vars(s, local_vars): - try: - return s.format(**local_vars) - except KeyError as var: - try: - return s.format(**os.environ) - except KeyError: - raise AttributeError(f'{var}') from None - -def _extend_dict(target_dict, other_dict): - target_keys = target_dict.keys() - for key, value in other_dict.items(): - if key in target_keys: - continue - target_dict[key] = value - - -def _expand_vars(scheme, vars): - res = {} - if vars is None: - vars = {} - _extend_dict(vars, get_config_vars()) - if os.name == 'nt': - # On Windows we want to substitute 'lib' for schemes rather - # than the native value (without modifying vars, in case it - # was passed in) - vars = vars | {'platlibdir': 'lib'} - - for key, value in _INSTALL_SCHEMES[scheme].items(): - if os.name in ('posix', 'nt'): - value = os.path.expanduser(value) - res[key] = os.path.normpath(_subst_vars(value, vars)) - return res - - -def _get_preferred_schemes(): - if os.name == 'nt': - return { - 'prefix': 'nt', - 'home': 'posix_home', - 'user': 'nt_user', - } - if sys.platform == 'darwin' and sys._framework: - return { - 'prefix': 'posix_prefix', - 'home': 'posix_home', - 'user': 'osx_framework_user', - } - - return { - 'prefix': 'posix_prefix', - 'home': 'posix_home', - 'user': 'posix_user', - } - - -def get_preferred_scheme(key): - if key == 'prefix' and sys.prefix != sys.base_prefix: - return 'venv' - scheme = _get_preferred_schemes()[key] - if scheme not in _INSTALL_SCHEMES: - raise ValueError( - f"{key!r} returned {scheme!r}, which is not a valid scheme " - f"on this platform" - ) - return scheme - - -def get_default_scheme(): - return get_preferred_scheme('prefix') - - -def get_makefile_filename(): - """Return the path of the Makefile.""" - if _PYTHON_BUILD: - return os.path.join(_PROJECT_BASE, "Makefile") - if hasattr(sys, 'abiflags'): - config_dir_name = f'config-{_PY_VERSION_SHORT}{sys.abiflags}' - else: - config_dir_name = 'config' - if hasattr(sys.implementation, '_multiarch'): - config_dir_name += f'-{sys.implementation._multiarch}' - return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') - - -def _get_sysconfigdata_name(): - multiarch = getattr(sys.implementation, '_multiarch', '') - return os.environ.get( - '_PYTHON_SYSCONFIGDATA_NAME', - f'_sysconfigdata_{sys.abiflags}_{sys.platform}_{multiarch}', - ) - -def _init_posix(vars): - """Initialize the module as appropriate for POSIX systems.""" - # _sysconfigdata is generated at build time, see _generate_posix_vars() - name = _get_sysconfigdata_name() - - # For cross builds, the path to the target's sysconfigdata must be specified - # so it can be imported. It cannot be in PYTHONPATH, as foreign modules in - # sys.path can cause crashes when loaded by the host interpreter. - # Rely on truthiness as a valueless env variable is still an empty string. - # See OS X note in _generate_posix_vars re _sysconfigdata. - if (path := os.environ.get('_PYTHON_SYSCONFIGDATA_PATH')): - from importlib.machinery import FileFinder, SourceFileLoader, SOURCE_SUFFIXES - from importlib.util import module_from_spec - spec = FileFinder(path, (SourceFileLoader, SOURCE_SUFFIXES)).find_spec(name) - _temp = module_from_spec(spec) - spec.loader.exec_module(_temp) - else: - _temp = __import__(name, globals(), locals(), ['build_time_vars'], 0) - build_time_vars = _temp.build_time_vars - vars.update(build_time_vars) - -def _init_non_posix(vars): - """Initialize the module as appropriate for NT""" - # set basic install directories - import _winapi - import _sysconfig - vars['LIBDEST'] = get_path('stdlib') - vars['BINLIBDEST'] = get_path('platstdlib') - vars['INCLUDEPY'] = get_path('include') - - # Add EXT_SUFFIX, SOABI, and Py_GIL_DISABLED - vars.update(_sysconfig.config_vars()) - - vars['LIBDIR'] = _safe_realpath(os.path.join(get_config_var('installed_base'), 'libs')) - if hasattr(sys, 'dllhandle'): - dllhandle = _winapi.GetModuleFileName(sys.dllhandle) - vars['LIBRARY'] = os.path.basename(_safe_realpath(dllhandle)) - vars['LDLIBRARY'] = vars['LIBRARY'] - vars['EXE'] = '.exe' - vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT - vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) - vars['TZPATH'] = '' - -# -# public APIs -# - - -def parse_config_h(fp, vars=None): - """Parse a config.h-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - if vars is None: - vars = {} - import re - define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") - undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") - - while True: - line = fp.readline() - if not line: - break - m = define_rx.match(line) - if m: - n, v = m.group(1, 2) - try: - if n in _ALWAYS_STR: - raise ValueError - v = int(v) - except ValueError: - pass - vars[n] = v - else: - m = undef_rx.match(line) - if m: - vars[m.group(1)] = 0 - return vars - - -def get_config_h_filename(): - """Return the path of pyconfig.h.""" - if _PYTHON_BUILD: - if os.name == "nt": - inc_dir = os.path.dirname(sys._base_executable) - else: - inc_dir = _PROJECT_BASE - else: - inc_dir = get_path('platinclude') - return os.path.join(inc_dir, 'pyconfig.h') - - -def get_scheme_names(): - """Return a tuple containing the schemes names.""" - return tuple(sorted(_INSTALL_SCHEMES)) - - -def get_path_names(): - """Return a tuple containing the paths names.""" - return _SCHEME_KEYS - - -def get_paths(scheme=get_default_scheme(), vars=None, expand=True): - """Return a mapping containing an install scheme. - - ``scheme`` is the install scheme name. If not provided, it will - return the default scheme for the current platform. - """ - if expand: - return _expand_vars(scheme, vars) - else: - return _INSTALL_SCHEMES[scheme] - - -def get_path(name, scheme=get_default_scheme(), vars=None, expand=True): - """Return a path corresponding to the scheme. - - ``scheme`` is the install scheme name. - """ - return get_paths(scheme, vars, expand)[name] - - -def _init_config_vars(): - global _CONFIG_VARS - _CONFIG_VARS = {} - # Normalized versions of prefix and exec_prefix are handy to have; - # in fact, these are the standard versions used most places in the - # Distutils. - _PREFIX = os.path.normpath(sys.prefix) - _EXEC_PREFIX = os.path.normpath(sys.exec_prefix) - _CONFIG_VARS['prefix'] = _PREFIX # FIXME: This gets overwriten by _init_posix. - _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX # FIXME: This gets overwriten by _init_posix. - _CONFIG_VARS['py_version'] = _PY_VERSION - _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT - _CONFIG_VARS['py_version_nodot'] = _PY_VERSION_SHORT_NO_DOT - _CONFIG_VARS['installed_base'] = _BASE_PREFIX - _CONFIG_VARS['base'] = _PREFIX - _CONFIG_VARS['installed_platbase'] = _BASE_EXEC_PREFIX - _CONFIG_VARS['platbase'] = _EXEC_PREFIX - _CONFIG_VARS['projectbase'] = _PROJECT_BASE - _CONFIG_VARS['platlibdir'] = sys.platlibdir - _CONFIG_VARS['implementation'] = _get_implementation() - _CONFIG_VARS['implementation_lower'] = _get_implementation().lower() - try: - _CONFIG_VARS['abiflags'] = sys.abiflags - except AttributeError: - # sys.abiflags may not be defined on all platforms. - _CONFIG_VARS['abiflags'] = '' - try: - _CONFIG_VARS['py_version_nodot_plat'] = sys.winver.replace('.', '') - except AttributeError: - _CONFIG_VARS['py_version_nodot_plat'] = '' - - if os.name == 'nt': - _init_non_posix(_CONFIG_VARS) - _CONFIG_VARS['VPATH'] = sys._vpath - if os.name == 'posix': - _init_posix(_CONFIG_VARS) - if _HAS_USER_BASE: - # Setting 'userbase' is done below the call to the - # init function to enable using 'get_config_var' in - # the init-function. - _CONFIG_VARS['userbase'] = _getuserbase() - - # e.g., 't' for free-threaded or '' for default build - _CONFIG_VARS['abi_thread'] = 't' if _CONFIG_VARS.get('Py_GIL_DISABLED') else '' - - # Always convert srcdir to an absolute path - srcdir = _CONFIG_VARS.get('srcdir', _PROJECT_BASE) - if os.name == 'posix': - if _PYTHON_BUILD: - # If srcdir is a relative path (typically '.' or '..') - # then it should be interpreted relative to the directory - # containing Makefile. - base = os.path.dirname(get_makefile_filename()) - srcdir = os.path.join(base, srcdir) - else: - # srcdir is not meaningful since the installation is - # spread about the filesystem. We choose the - # directory containing the Makefile since we know it - # exists. - srcdir = os.path.dirname(get_makefile_filename()) - _CONFIG_VARS['srcdir'] = _safe_realpath(srcdir) - - # OS X platforms require special customization to handle - # multi-architecture, multi-os-version installers - if sys.platform == 'darwin': - import _osx_support - _osx_support.customize_config_vars(_CONFIG_VARS) - - global _CONFIG_VARS_INITIALIZED - _CONFIG_VARS_INITIALIZED = True - - -def get_config_vars(*args): - """With no arguments, return a dictionary of all configuration - variables relevant for the current platform. - - On Unix, this means every variable defined in Python's installed Makefile; - On Windows it's a much smaller set. - - With arguments, return a list of values that result from looking up - each argument in the configuration variable dictionary. - """ - global _CONFIG_VARS_INITIALIZED - - # Avoid claiming the lock once initialization is complete. - if not _CONFIG_VARS_INITIALIZED: - with _CONFIG_VARS_LOCK: - # Test again with the lock held to avoid races. Note that - # we test _CONFIG_VARS here, not _CONFIG_VARS_INITIALIZED, - # to ensure that recursive calls to get_config_vars() - # don't re-enter init_config_vars(). - if _CONFIG_VARS is None: - _init_config_vars() - else: - # If the site module initialization happened after _CONFIG_VARS was - # initialized, a virtual environment might have been activated, resulting in - # variables like sys.prefix changing their value, so we need to re-init the - # config vars (see GH-126789). - if _CONFIG_VARS['base'] != os.path.normpath(sys.prefix): - with _CONFIG_VARS_LOCK: - _CONFIG_VARS_INITIALIZED = False - _init_config_vars() - - if args: - vals = [] - for name in args: - vals.append(_CONFIG_VARS.get(name)) - return vals - else: - return _CONFIG_VARS - - -def get_config_var(name): - """Return the value of a single variable using the dictionary returned by - 'get_config_vars()'. - - Equivalent to get_config_vars().get(name) - """ - return get_config_vars().get(name) - - -def get_platform(): - """Return a string that identifies the current platform. - - This is used mainly to distinguish platform-specific build directories and - platform-specific built distributions. Typically includes the OS name and - version and the architecture (as supplied by 'os.uname()'), although the - exact information included depends on the OS; on Linux, the kernel version - isn't particularly important. - - Examples of returned values: - - - Windows: - - - win-amd64 (64-bit Windows on AMD64, aka x86_64, Intel64, and EM64T) - - win-arm64 (64-bit Windows on ARM64, aka AArch64) - - win32 (all others - specifically, sys.platform is returned) - - POSIX based OS: - - - linux-x86_64 - - macosx-15.5-arm64 - - macosx-26.0-universal2 (macOS on Apple Silicon or Intel) - - android-24-arm64_v8a - - For other non-POSIX platforms, currently just returns :data:`sys.platform`.""" - if os.name == 'nt': - if 'amd64' in sys.version.lower(): - return 'win-amd64' - if '(arm)' in sys.version.lower(): - return 'win-arm32' - if '(arm64)' in sys.version.lower(): - return 'win-arm64' - return sys.platform - - if os.name != "posix" or not hasattr(os, 'uname'): - # XXX what about the architecture? NT is Intel or Alpha - return sys.platform - - # Set for cross builds explicitly - if "_PYTHON_HOST_PLATFORM" in os.environ: - return os.environ["_PYTHON_HOST_PLATFORM"] - - # Try to distinguish various flavours of Unix - osname, host, release, version, machine = os.uname() - - # Convert the OS name to lowercase, remove '/' characters, and translate - # spaces (for "Power Macintosh") - osname = osname.lower().replace('/', '') - machine = machine.replace(' ', '_') - machine = machine.replace('/', '-') - - if osname[:5] == "linux": - if sys.platform == "android": - osname = "android" - release = get_config_var("ANDROID_API_LEVEL") - - # Wheel tags use the ABI names from Android's own tools. - # When Python is running on 32-bit ARM Android on a 64-bit ARM kernel, - # 'os.uname().machine' is 'armv8l'. Such devices run the same userspace - # code as 'armv7l' devices. - # During the build process of the Android testbed when targeting 32-bit ARM, - # '_PYTHON_HOST_PLATFORM' is 'arm-linux-androideabi', so 'machine' becomes - # 'arm'. - machine = { - "aarch64": "arm64_v8a", - "arm": "armeabi_v7a", - "armv7l": "armeabi_v7a", - "armv8l": "armeabi_v7a", - "i686": "x86", - "x86_64": "x86_64", - }[machine] - else: - # At least on Linux/Intel, 'machine' is the processor -- - # i386, etc. - # XXX what about Alpha, SPARC, etc? - return f"{osname}-{machine}" - elif osname[:5] == "sunos": - if release[0] >= "5": # SunOS 5 == Solaris 2 - osname = "solaris" - release = f"{int(release[0]) - 3}.{release[2:]}" - # We can't use "platform.architecture()[0]" because a - # bootstrap problem. We use a dict to get an error - # if some suspicious happens. - bitness = {2147483647:"32bit", 9223372036854775807:"64bit"} - machine += f".{bitness[sys.maxsize]}" - # fall through to standard osname-release-machine representation - elif osname[:3] == "aix": - from _aix_support import aix_platform - return aix_platform() - elif osname[:6] == "cygwin": - osname = "cygwin" - import re - rel_re = re.compile(r'[\d.]+') - m = rel_re.match(release) - if m: - release = m.group() - elif osname[:6] == "darwin": - if sys.platform == "ios": - release = get_config_vars().get("IPHONEOS_DEPLOYMENT_TARGET", "13.0") - osname = sys.platform - machine = sys.implementation._multiarch - else: - import _osx_support - osname, release, machine = _osx_support.get_platform_osx( - get_config_vars(), - osname, release, machine) - - return f"{osname}-{release}-{machine}" - - -def get_python_version(): - return _PY_VERSION_SHORT - - -def _get_python_version_abi(): - return _PY_VERSION_SHORT + get_config_var("abi_thread") - - -def expand_makefile_vars(s, vars): - """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in - 'string' according to 'vars' (a dictionary mapping variable names to - values). Variables not present in 'vars' are silently expanded to the - empty string. The variable values in 'vars' should not contain further - variable expansions; if 'vars' is the output of 'parse_makefile()', - you're fine. Returns a variable-expanded version of 's'. - """ - import re - - _findvar1_rx = r"\$\(([A-Za-z][A-Za-z0-9_]*)\)" - _findvar2_rx = r"\${([A-Za-z][A-Za-z0-9_]*)}" - - # This algorithm does multiple expansion, so if vars['foo'] contains - # "${bar}", it will expand ${foo} to ${bar}, and then expand - # ${bar}... and so forth. This is fine as long as 'vars' comes from - # 'parse_makefile()', which takes care of such expansions eagerly, - # according to make's variable expansion semantics. - - while True: - m = re.search(_findvar1_rx, s) or re.search(_findvar2_rx, s) - if m: - (beg, end) = m.span() - s = s[0:beg] + vars.get(m.group(1)) + s[end:] - else: - break - return s diff --git a/Python313_13_x86_Template/Lib/sysconfig/__main__.py b/Python313_13_x86_Template/Lib/sysconfig/__main__.py deleted file mode 100644 index d7257b9d..00000000 --- a/Python313_13_x86_Template/Lib/sysconfig/__main__.py +++ /dev/null @@ -1,248 +0,0 @@ -import os -import sys -from sysconfig import ( - _ALWAYS_STR, - _PYTHON_BUILD, - _get_sysconfigdata_name, - get_config_h_filename, - get_config_vars, - get_default_scheme, - get_makefile_filename, - get_paths, - get_platform, - get_python_version, - parse_config_h, -) - - -# Regexes needed for parsing Makefile (and similar syntaxes, -# like old-style Setup files). -_variable_rx = r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)" -_findvar1_rx = r"\$\(([A-Za-z][A-Za-z0-9_]*)\)" -_findvar2_rx = r"\${([A-Za-z][A-Za-z0-9_]*)}" - - -def _parse_makefile(filename, vars=None, keep_unresolved=True): - """Parse a Makefile-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - import re - - if vars is None: - vars = {} - done = {} - notdone = {} - - with open(filename, encoding=sys.getfilesystemencoding(), - errors="surrogateescape") as f: - lines = f.readlines() - - for line in lines: - if line.startswith('#') or line.strip() == '': - continue - m = re.match(_variable_rx, line) - if m: - n, v = m.group(1, 2) - v = v.strip() - # `$$' is a literal `$' in make - tmpv = v.replace('$$', '') - - if "$" in tmpv: - notdone[n] = v - else: - try: - if n in _ALWAYS_STR: - raise ValueError - - v = int(v) - except ValueError: - # insert literal `$' - done[n] = v.replace('$$', '$') - else: - done[n] = v - - # do variable interpolation here - variables = list(notdone.keys()) - - # Variables with a 'PY_' prefix in the makefile. These need to - # be made available without that prefix through sysconfig. - # Special care is needed to ensure that variable expansion works, even - # if the expansion uses the name without a prefix. - renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') - - while len(variables) > 0: - for name in tuple(variables): - value = notdone[name] - m1 = re.search(_findvar1_rx, value) - m2 = re.search(_findvar2_rx, value) - if m1 and m2: - m = m1 if m1.start() < m2.start() else m2 - else: - m = m1 if m1 else m2 - if m is not None: - n = m.group(1) - found = True - if n in done: - item = str(done[n]) - elif n in notdone: - # get it on a subsequent round - found = False - elif n in os.environ: - # do it like make: fall back to environment - item = os.environ[n] - - elif n in renamed_variables: - if (name.startswith('PY_') and - name[3:] in renamed_variables): - item = "" - - elif 'PY_' + n in notdone: - found = False - - else: - item = str(done['PY_' + n]) - - else: - done[n] = item = "" - - if found: - after = value[m.end():] - value = value[:m.start()] + item + after - if "$" in after: - notdone[name] = value - else: - try: - if name in _ALWAYS_STR: - raise ValueError - value = int(value) - except ValueError: - done[name] = value.strip() - else: - done[name] = value - variables.remove(name) - - if name.startswith('PY_') \ - and name[3:] in renamed_variables: - - name = name[3:] - if name not in done: - done[name] = value - - else: - # Adds unresolved variables to the done dict. - # This is disabled when called from distutils.sysconfig - if keep_unresolved: - done[name] = value - # bogus variable reference (e.g. "prefix=$/opt/python"); - # just drop it since we can't deal - variables.remove(name) - - # strip spurious spaces - for k, v in done.items(): - if isinstance(v, str): - done[k] = v.strip() - - # save the results in the global dictionary - vars.update(done) - return vars - - -def _print_config_dict(d, stream): - print ("{", file=stream) - for k, v in sorted(d.items()): - print(f" {k!r}: {v!r},", file=stream) - print ("}", file=stream) - - -def _generate_posix_vars(): - """Generate the Python module containing build-time variables.""" - vars = {} - # load the installed Makefile: - makefile = get_makefile_filename() - try: - _parse_makefile(makefile, vars) - except OSError as e: - msg = f"invalid Python installation: unable to open {makefile}" - if hasattr(e, "strerror"): - msg = f"{msg} ({e.strerror})" - raise OSError(msg) - # load the installed pyconfig.h: - config_h = get_config_h_filename() - try: - with open(config_h, encoding="utf-8") as f: - parse_config_h(f, vars) - except OSError as e: - msg = f"invalid Python installation: unable to open {config_h}" - if hasattr(e, "strerror"): - msg = f"{msg} ({e.strerror})" - raise OSError(msg) - # On AIX, there are wrong paths to the linker scripts in the Makefile - # -- these paths are relative to the Python source, but when installed - # the scripts are in another directory. - if _PYTHON_BUILD: - vars['BLDSHARED'] = vars['LDSHARED'] - - # There's a chicken-and-egg situation on OS X with regards to the - # _sysconfigdata module after the changes introduced by #15298: - # get_config_vars() is called by get_platform() as part of the - # `make pybuilddir.txt` target -- which is a precursor to the - # _sysconfigdata.py module being constructed. Unfortunately, - # get_config_vars() eventually calls _init_posix(), which attempts - # to import _sysconfigdata, which we won't have built yet. In order - # for _init_posix() to work, if we're on Darwin, just mock up the - # _sysconfigdata module manually and populate it with the build vars. - # This is more than sufficient for ensuring the subsequent call to - # get_platform() succeeds. - name = _get_sysconfigdata_name() - if 'darwin' in sys.platform: - import types - module = types.ModuleType(name) - module.build_time_vars = vars - sys.modules[name] = module - - pybuilddir = f'build/lib.{get_platform()}-{get_python_version()}' - if hasattr(sys, "gettotalrefcount"): - pybuilddir += '-pydebug' - os.makedirs(pybuilddir, exist_ok=True) - destfile = os.path.join(pybuilddir, name + '.py') - - with open(destfile, 'w', encoding='utf8') as f: - f.write('# system configuration generated and used by' - ' the sysconfig module\n') - f.write('build_time_vars = ') - _print_config_dict(vars, stream=f) - - # Create file used for sys.path fixup -- see Modules/getpath.c - with open('pybuilddir.txt', 'w', encoding='utf8') as f: - f.write(pybuilddir) - - -def _print_dict(title, data): - for index, (key, value) in enumerate(sorted(data.items())): - if index == 0: - print(f'{title}: ') - print(f'\t{key} = "{value}"') - - -def _main(): - """Display all information sysconfig detains.""" - if '--generate-posix-vars' in sys.argv: - _generate_posix_vars() - return - print(f'Platform: "{get_platform()}"') - print(f'Python version: "{get_python_version()}"') - print(f'Current installation scheme: "{get_default_scheme()}"') - print() - _print_dict('Paths', get_paths()) - print() - _print_dict('Variables', get_config_vars()) - - -if __name__ == '__main__': - try: - _main() - except BrokenPipeError: - pass diff --git a/Python313_13_x86_Template/Lib/sysconfig/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/sysconfig/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 1855cff1..00000000 Binary files a/Python313_13_x86_Template/Lib/sysconfig/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/tabnanny.py b/Python313_13_x86_Template/Lib/tabnanny.py deleted file mode 100644 index d06c4c22..00000000 --- a/Python313_13_x86_Template/Lib/tabnanny.py +++ /dev/null @@ -1,340 +0,0 @@ -#! /usr/bin/env python3 - -"""The Tab Nanny despises ambiguous indentation. She knows no mercy. - -tabnanny -- Detection of ambiguous indentation - -For the time being this module is intended to be called as a script. -However it is possible to import it into an IDE and use the function -check() described below. - -Warning: The API provided by this module is likely to change in future -releases; such changes may not be backward compatible. -""" - -# Released to the public domain, by Tim Peters, 15 April 1998. - -# XXX Note: this is now a standard library module. -# XXX The API needs to undergo changes however; the current code is too -# XXX script-like. This will be addressed later. - -__version__ = "6" - -import os -import sys -import tokenize - -__all__ = ["check", "NannyNag", "process_tokens"] - -verbose = 0 -filename_only = 0 - -def errprint(*args): - sep = "" - for arg in args: - sys.stderr.write(sep + str(arg)) - sep = " " - sys.stderr.write("\n") - sys.exit(1) - -def main(): - import getopt - - global verbose, filename_only - try: - opts, args = getopt.getopt(sys.argv[1:], "qv") - except getopt.error as msg: - errprint(msg) - for o, a in opts: - if o == '-q': - filename_only = filename_only + 1 - if o == '-v': - verbose = verbose + 1 - if not args: - errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...") - for arg in args: - check(arg) - -class NannyNag(Exception): - """ - Raised by process_tokens() if detecting an ambiguous indent. - Captured and handled in check(). - """ - def __init__(self, lineno, msg, line): - self.lineno, self.msg, self.line = lineno, msg, line - def get_lineno(self): - return self.lineno - def get_msg(self): - return self.msg - def get_line(self): - return self.line - -def check(file): - """check(file_or_dir) - - If file_or_dir is a directory and not a symbolic link, then recursively - descend the directory tree named by file_or_dir, checking all .py files - along the way. If file_or_dir is an ordinary Python source file, it is - checked for whitespace related problems. The diagnostic messages are - written to standard output using the print statement. - """ - - if os.path.isdir(file) and not os.path.islink(file): - if verbose: - print("%r: listing directory" % (file,)) - names = os.listdir(file) - for name in names: - fullname = os.path.join(file, name) - if (os.path.isdir(fullname) and - not os.path.islink(fullname) or - os.path.normcase(name[-3:]) == ".py"): - check(fullname) - return - - try: - f = tokenize.open(file) - except OSError as msg: - errprint("%r: I/O Error: %s" % (file, msg)) - return - - if verbose > 1: - print("checking %r ..." % file) - - try: - process_tokens(tokenize.generate_tokens(f.readline)) - - except tokenize.TokenError as msg: - errprint("%r: Token Error: %s" % (file, msg)) - return - - except IndentationError as msg: - errprint("%r: Indentation Error: %s" % (file, msg)) - return - - except SyntaxError as msg: - errprint("%r: Syntax Error: %s" % (file, msg)) - return - - except NannyNag as nag: - badline = nag.get_lineno() - line = nag.get_line() - if verbose: - print("%r: *** Line %d: trouble in tab city! ***" % (file, badline)) - print("offending line: %r" % (line,)) - print(nag.get_msg()) - else: - if ' ' in file: file = '"' + file + '"' - if filename_only: print(file) - else: print(file, badline, repr(line)) - return - - finally: - f.close() - - if verbose: - print("%r: Clean bill of health." % (file,)) - -class Whitespace: - # the characters used for space and tab - S, T = ' \t' - - # members: - # raw - # the original string - # n - # the number of leading whitespace characters in raw - # nt - # the number of tabs in raw[:n] - # norm - # the normal form as a pair (count, trailing), where: - # count - # a tuple such that raw[:n] contains count[i] - # instances of S * i + T - # trailing - # the number of trailing spaces in raw[:n] - # It's A Theorem that m.indent_level(t) == - # n.indent_level(t) for all t >= 1 iff m.norm == n.norm. - # is_simple - # true iff raw[:n] is of the form (T*)(S*) - - def __init__(self, ws): - self.raw = ws - S, T = Whitespace.S, Whitespace.T - count = [] - b = n = nt = 0 - for ch in self.raw: - if ch == S: - n = n + 1 - b = b + 1 - elif ch == T: - n = n + 1 - nt = nt + 1 - if b >= len(count): - count = count + [0] * (b - len(count) + 1) - count[b] = count[b] + 1 - b = 0 - else: - break - self.n = n - self.nt = nt - self.norm = tuple(count), b - self.is_simple = len(count) <= 1 - - # return length of longest contiguous run of spaces (whether or not - # preceding a tab) - def longest_run_of_spaces(self): - count, trailing = self.norm - return max(len(count)-1, trailing) - - def indent_level(self, tabsize): - # count, il = self.norm - # for i in range(len(count)): - # if count[i]: - # il = il + (i//tabsize + 1)*tabsize * count[i] - # return il - - # quicker: - # il = trailing + sum (i//ts + 1)*ts*count[i] = - # trailing + ts * sum (i//ts + 1)*count[i] = - # trailing + ts * sum i//ts*count[i] + count[i] = - # trailing + ts * [(sum i//ts*count[i]) + (sum count[i])] = - # trailing + ts * [(sum i//ts*count[i]) + num_tabs] - # and note that i//ts*count[i] is 0 when i < ts - - count, trailing = self.norm - il = 0 - for i in range(tabsize, len(count)): - il = il + i//tabsize * count[i] - return trailing + tabsize * (il + self.nt) - - # return true iff self.indent_level(t) == other.indent_level(t) - # for all t >= 1 - def equal(self, other): - return self.norm == other.norm - - # return a list of tuples (ts, i1, i2) such that - # i1 == self.indent_level(ts) != other.indent_level(ts) == i2. - # Intended to be used after not self.equal(other) is known, in which - # case it will return at least one witnessing tab size. - def not_equal_witness(self, other): - n = max(self.longest_run_of_spaces(), - other.longest_run_of_spaces()) + 1 - a = [] - for ts in range(1, n+1): - if self.indent_level(ts) != other.indent_level(ts): - a.append( (ts, - self.indent_level(ts), - other.indent_level(ts)) ) - return a - - # Return True iff self.indent_level(t) < other.indent_level(t) - # for all t >= 1. - # The algorithm is due to Vincent Broman. - # Easy to prove it's correct. - # XXXpost that. - # Trivial to prove n is sharp (consider T vs ST). - # Unknown whether there's a faster general way. I suspected so at - # first, but no longer. - # For the special (but common!) case where M and N are both of the - # form (T*)(S*), M.less(N) iff M.len() < N.len() and - # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded. - # XXXwrite that up. - # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1. - def less(self, other): - if self.n >= other.n: - return False - if self.is_simple and other.is_simple: - return self.nt <= other.nt - n = max(self.longest_run_of_spaces(), - other.longest_run_of_spaces()) + 1 - # the self.n >= other.n test already did it for ts=1 - for ts in range(2, n+1): - if self.indent_level(ts) >= other.indent_level(ts): - return False - return True - - # return a list of tuples (ts, i1, i2) such that - # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2. - # Intended to be used after not self.less(other) is known, in which - # case it will return at least one witnessing tab size. - def not_less_witness(self, other): - n = max(self.longest_run_of_spaces(), - other.longest_run_of_spaces()) + 1 - a = [] - for ts in range(1, n+1): - if self.indent_level(ts) >= other.indent_level(ts): - a.append( (ts, - self.indent_level(ts), - other.indent_level(ts)) ) - return a - -def format_witnesses(w): - firsts = (str(tup[0]) for tup in w) - prefix = "at tab size" - if len(w) > 1: - prefix = prefix + "s" - return prefix + " " + ', '.join(firsts) - -def process_tokens(tokens): - try: - _process_tokens(tokens) - except TabError as e: - raise NannyNag(e.lineno, e.msg, e.text) - -def _process_tokens(tokens): - INDENT = tokenize.INDENT - DEDENT = tokenize.DEDENT - NEWLINE = tokenize.NEWLINE - JUNK = tokenize.COMMENT, tokenize.NL - indents = [Whitespace("")] - check_equal = 0 - - for (type, token, start, end, line) in tokens: - if type == NEWLINE: - # a program statement, or ENDMARKER, will eventually follow, - # after some (possibly empty) run of tokens of the form - # (NL | COMMENT)* (INDENT | DEDENT+)? - # If an INDENT appears, setting check_equal is wrong, and will - # be undone when we see the INDENT. - check_equal = 1 - - elif type == INDENT: - check_equal = 0 - thisguy = Whitespace(token) - if not indents[-1].less(thisguy): - witness = indents[-1].not_less_witness(thisguy) - msg = "indent not greater e.g. " + format_witnesses(witness) - raise NannyNag(start[0], msg, line) - indents.append(thisguy) - - elif type == DEDENT: - # there's nothing we need to check here! what's important is - # that when the run of DEDENTs ends, the indentation of the - # program statement (or ENDMARKER) that triggered the run is - # equal to what's left at the top of the indents stack - - # Ouch! This assert triggers if the last line of the source - # is indented *and* lacks a newline -- then DEDENTs pop out - # of thin air. - # assert check_equal # else no earlier NEWLINE, or an earlier INDENT - check_equal = 1 - - del indents[-1] - - elif check_equal and type not in JUNK: - # this is the first "real token" following a NEWLINE, so it - # must be the first token of the next program statement, or an - # ENDMARKER; the "line" argument exposes the leading whitespace - # for this statement; in the case of ENDMARKER, line is an empty - # string, so will properly match the empty string with which the - # "indents" stack was seeded - check_equal = 0 - thisguy = Whitespace(line) - if not indents[-1].equal(thisguy): - witness = indents[-1].not_equal_witness(thisguy) - msg = "indent not equal e.g. " + format_witnesses(witness) - raise NannyNag(start[0], msg, line) - - -if __name__ == '__main__': - main() diff --git a/Python313_13_x86_Template/Lib/tarfile.py b/Python313_13_x86_Template/Lib/tarfile.py deleted file mode 100644 index 533c0cc8..00000000 --- a/Python313_13_x86_Template/Lib/tarfile.py +++ /dev/null @@ -1,3091 +0,0 @@ -#!/usr/bin/env python3 -#------------------------------------------------------------------- -# tarfile.py -#------------------------------------------------------------------- -# Copyright (C) 2002 Lars Gustaebel -# All rights reserved. -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation -# files (the "Software"), to deal in the Software without -# restriction, including without limitation the rights to use, -# copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following -# conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -"""Read from and write to tar format archives. -""" - -version = "0.9.0" -__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" -__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." - -#--------- -# Imports -#--------- -from builtins import open as bltn_open -import sys -import os -import io -import shutil -import stat -import time -import struct -import copy -import re - -try: - import pwd -except ImportError: - pwd = None -try: - import grp -except ImportError: - grp = None - -# os.symlink on Windows prior to 6.0 raises NotImplementedError -# OSError (winerror=1314) will be raised if the caller does not hold the -# SeCreateSymbolicLinkPrivilege privilege -symlink_exception = (AttributeError, NotImplementedError, OSError) - -# from tarfile import * -__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError", "ReadError", - "CompressionError", "StreamError", "ExtractError", "HeaderError", - "ENCODING", "USTAR_FORMAT", "GNU_FORMAT", "PAX_FORMAT", - "DEFAULT_FORMAT", "open","fully_trusted_filter", "data_filter", - "tar_filter", "FilterError", "AbsoluteLinkError", - "OutsideDestinationError", "SpecialFileError", "AbsolutePathError", - "LinkOutsideDestinationError", "LinkFallbackError"] - - -#--------------------------------------------------------- -# tar constants -#--------------------------------------------------------- -NUL = b"\0" # the null character -BLOCKSIZE = 512 # length of processing blocks -RECORDSIZE = BLOCKSIZE * 20 # length of records -GNU_MAGIC = b"ustar \0" # magic gnu tar string -POSIX_MAGIC = b"ustar\x0000" # magic posix tar string - -LENGTH_NAME = 100 # maximum length of a filename -LENGTH_LINK = 100 # maximum length of a linkname -LENGTH_PREFIX = 155 # maximum length of the prefix field - -REGTYPE = b"0" # regular file -AREGTYPE = b"\0" # regular file -LNKTYPE = b"1" # link (inside tarfile) -SYMTYPE = b"2" # symbolic link -CHRTYPE = b"3" # character special device -BLKTYPE = b"4" # block special device -DIRTYPE = b"5" # directory -FIFOTYPE = b"6" # fifo special device -CONTTYPE = b"7" # contiguous file - -GNUTYPE_LONGNAME = b"L" # GNU tar longname -GNUTYPE_LONGLINK = b"K" # GNU tar longlink -GNUTYPE_SPARSE = b"S" # GNU tar sparse file - -XHDTYPE = b"x" # POSIX.1-2001 extended header -XGLTYPE = b"g" # POSIX.1-2001 global header -SOLARIS_XHDTYPE = b"X" # Solaris extended header - -USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format -GNU_FORMAT = 1 # GNU tar format -PAX_FORMAT = 2 # POSIX.1-2001 (pax) format -DEFAULT_FORMAT = PAX_FORMAT - -#--------------------------------------------------------- -# tarfile constants -#--------------------------------------------------------- -# File types that tarfile supports: -SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, - SYMTYPE, DIRTYPE, FIFOTYPE, - CONTTYPE, CHRTYPE, BLKTYPE, - GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, - GNUTYPE_SPARSE) - -# File types that will be treated as a regular file. -REGULAR_TYPES = (REGTYPE, AREGTYPE, - CONTTYPE, GNUTYPE_SPARSE) - -# File types that are part of the GNU tar format. -GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, - GNUTYPE_SPARSE) - -# Fields from a pax header that override a TarInfo attribute. -PAX_FIELDS = ("path", "linkpath", "size", "mtime", - "uid", "gid", "uname", "gname") - -# Fields from a pax header that are affected by hdrcharset. -PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"} - -# Fields in a pax header that are numbers, all other fields -# are treated as strings. -PAX_NUMBER_FIELDS = { - "atime": float, - "ctime": float, - "mtime": float, - "uid": int, - "gid": int, - "size": int -} - -#--------------------------------------------------------- -# initialization -#--------------------------------------------------------- -if os.name == "nt": - ENCODING = "utf-8" -else: - ENCODING = sys.getfilesystemencoding() - -#--------------------------------------------------------- -# Some useful functions -#--------------------------------------------------------- - -def stn(s, length, encoding, errors): - """Convert a string to a null-terminated bytes object. - """ - if s is None: - raise ValueError("metadata cannot contain None") - s = s.encode(encoding, errors) - return s[:length] + (length - len(s)) * NUL - -def nts(s, encoding, errors): - """Convert a null-terminated bytes object to a string. - """ - p = s.find(b"\0") - if p != -1: - s = s[:p] - return s.decode(encoding, errors) - -def nti(s): - """Convert a number field to a python number. - """ - # There are two possible encodings for a number field, see - # itn() below. - if s[0] in (0o200, 0o377): - n = 0 - for i in range(len(s) - 1): - n <<= 8 - n += s[i + 1] - if s[0] == 0o377: - n = -(256 ** (len(s) - 1) - n) - else: - try: - s = nts(s, "ascii", "strict") - n = int(s.strip() or "0", 8) - except ValueError: - raise InvalidHeaderError("invalid header") - return n - -def itn(n, digits=8, format=DEFAULT_FORMAT): - """Convert a python number to a number field. - """ - # POSIX 1003.1-1988 requires numbers to be encoded as a string of - # octal digits followed by a null-byte, this allows values up to - # (8**(digits-1))-1. GNU tar allows storing numbers greater than - # that if necessary. A leading 0o200 or 0o377 byte indicate this - # particular encoding, the following digits-1 bytes are a big-endian - # base-256 representation. This allows values up to (256**(digits-1))-1. - # A 0o200 byte indicates a positive number, a 0o377 byte a negative - # number. - original_n = n - n = int(n) - if 0 <= n < 8 ** (digits - 1): - s = bytes("%0*o" % (digits - 1, n), "ascii") + NUL - elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1): - if n >= 0: - s = bytearray([0o200]) - else: - s = bytearray([0o377]) - n = 256 ** digits + n - - for i in range(digits - 1): - s.insert(1, n & 0o377) - n >>= 8 - else: - raise ValueError("overflow in number field") - - return s - -def calc_chksums(buf): - """Calculate the checksum for a member's header by summing up all - characters except for the chksum field which is treated as if - it was filled with spaces. According to the GNU tar sources, - some tars (Sun and NeXT) calculate chksum with signed char, - which will be different if there are chars in the buffer with - the high bit set. So we calculate two checksums, unsigned and - signed. - """ - unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf)) - signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf)) - return unsigned_chksum, signed_chksum - -def copyfileobj(src, dst, length=None, exception=OSError, bufsize=None): - """Copy length bytes from fileobj src to fileobj dst. - If length is None, copy the entire content. - """ - bufsize = bufsize or 16 * 1024 - if length == 0: - return - if length is None: - shutil.copyfileobj(src, dst, bufsize) - return - - blocks, remainder = divmod(length, bufsize) - for b in range(blocks): - buf = src.read(bufsize) - if len(buf) < bufsize: - raise exception("unexpected end of data") - dst.write(buf) - - if remainder != 0: - buf = src.read(remainder) - if len(buf) < remainder: - raise exception("unexpected end of data") - dst.write(buf) - return - -def _safe_print(s): - encoding = getattr(sys.stdout, 'encoding', None) - if encoding is not None: - s = s.encode(encoding, 'backslashreplace').decode(encoding) - print(s, end=' ') - - -class TarError(Exception): - """Base exception.""" - pass -class ExtractError(TarError): - """General exception for extract errors.""" - pass -class ReadError(TarError): - """Exception for unreadable tar archives.""" - pass -class CompressionError(TarError): - """Exception for unavailable compression methods.""" - pass -class StreamError(TarError): - """Exception for unsupported operations on stream-like TarFiles.""" - pass -class HeaderError(TarError): - """Base exception for header errors.""" - pass -class EmptyHeaderError(HeaderError): - """Exception for empty headers.""" - pass -class TruncatedHeaderError(HeaderError): - """Exception for truncated headers.""" - pass -class EOFHeaderError(HeaderError): - """Exception for end of file headers.""" - pass -class InvalidHeaderError(HeaderError): - """Exception for invalid headers.""" - pass -class SubsequentHeaderError(HeaderError): - """Exception for missing and invalid extended headers.""" - pass - -#--------------------------- -# internal stream interface -#--------------------------- -class _LowLevelFile: - """Low-level file object. Supports reading and writing. - It is used instead of a regular file object for streaming - access. - """ - - def __init__(self, name, mode): - mode = { - "r": os.O_RDONLY, - "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, - }[mode] - if hasattr(os, "O_BINARY"): - mode |= os.O_BINARY - self.fd = os.open(name, mode, 0o666) - - def close(self): - os.close(self.fd) - - def read(self, size): - return os.read(self.fd, size) - - def write(self, s): - os.write(self.fd, s) - -class _Stream: - """Class that serves as an adapter between TarFile and - a stream-like object. The stream-like object only - needs to have a read() or write() method that works with bytes, - and the method is accessed blockwise. - Use of gzip or bzip2 compression is possible. - A stream-like object could be for example: sys.stdin.buffer, - sys.stdout.buffer, a socket, a tape device etc. - - _Stream is intended to be used only internally. - """ - - def __init__(self, name, mode, comptype, fileobj, bufsize, - compresslevel): - """Construct a _Stream object. - """ - self._extfileobj = True - if fileobj is None: - fileobj = _LowLevelFile(name, mode) - self._extfileobj = False - - if comptype == '*': - # Enable transparent compression detection for the - # stream interface - fileobj = _StreamProxy(fileobj) - comptype = fileobj.getcomptype() - - self.name = os.fspath(name) if name is not None else "" - self.mode = mode - self.comptype = comptype - self.fileobj = fileobj - self.bufsize = bufsize - self.buf = b"" - self.pos = 0 - self.closed = False - - try: - if comptype == "gz": - try: - import zlib - except ImportError: - raise CompressionError("zlib module is not available") from None - self.zlib = zlib - self.crc = zlib.crc32(b"") - if mode == "r": - self.exception = zlib.error - self._init_read_gz() - else: - self._init_write_gz(compresslevel) - - elif comptype == "bz2": - try: - import bz2 - except ImportError: - raise CompressionError("bz2 module is not available") from None - if mode == "r": - self.dbuf = b"" - self.cmp = bz2.BZ2Decompressor() - self.exception = OSError - else: - self.cmp = bz2.BZ2Compressor(compresslevel) - - elif comptype == "xz": - try: - import lzma - except ImportError: - raise CompressionError("lzma module is not available") from None - if mode == "r": - self.dbuf = b"" - self.cmp = lzma.LZMADecompressor() - self.exception = lzma.LZMAError - else: - self.cmp = lzma.LZMACompressor() - - elif comptype != "tar": - raise CompressionError("unknown compression type %r" % comptype) - - except: - if not self._extfileobj: - self.fileobj.close() - self.closed = True - raise - - def __del__(self): - if hasattr(self, "closed") and not self.closed: - self.close() - - def _init_write_gz(self, compresslevel): - """Initialize for writing with gzip compression. - """ - self.cmp = self.zlib.compressobj(compresslevel, - self.zlib.DEFLATED, - -self.zlib.MAX_WBITS, - self.zlib.DEF_MEM_LEVEL, - 0) - timestamp = struct.pack(" self.bufsize: - self.fileobj.write(self.buf[:self.bufsize]) - self.buf = self.buf[self.bufsize:] - - def close(self): - """Close the _Stream object. No operation should be - done on it afterwards. - """ - if self.closed: - return - - self.closed = True - try: - if self.mode == "w" and self.comptype != "tar": - self.buf += self.cmp.flush() - - if self.mode == "w" and self.buf: - self.fileobj.write(self.buf) - self.buf = b"" - if self.comptype == "gz": - self.fileobj.write(struct.pack("= 0: - blocks, remainder = divmod(pos - self.pos, self.bufsize) - for i in range(blocks): - self.read(self.bufsize) - self.read(remainder) - else: - raise StreamError("seeking backwards is not allowed") - return self.pos - - def read(self, size): - """Return the next size number of bytes from the stream.""" - assert size is not None - buf = self._read(size) - self.pos += len(buf) - return buf - - def _read(self, size): - """Return size bytes from the stream. - """ - if self.comptype == "tar": - return self.__read(size) - - c = len(self.dbuf) - t = [self.dbuf] - while c < size: - # Skip underlying buffer to avoid unaligned double buffering. - if self.buf: - buf = self.buf - self.buf = b"" - else: - buf = self.fileobj.read(self.bufsize) - if not buf: - break - try: - buf = self.cmp.decompress(buf) - except self.exception as e: - raise ReadError("invalid compressed data") from e - t.append(buf) - c += len(buf) - t = b"".join(t) - self.dbuf = t[size:] - return t[:size] - - def __read(self, size): - """Return size bytes from stream. If internal buffer is empty, - read another block from the stream. - """ - c = len(self.buf) - t = [self.buf] - while c < size: - buf = self.fileobj.read(self.bufsize) - if not buf: - break - t.append(buf) - c += len(buf) - t = b"".join(t) - self.buf = t[size:] - return t[:size] -# class _Stream - -class _StreamProxy(object): - """Small proxy class that enables transparent compression - detection for the Stream interface (mode 'r|*'). - """ - - def __init__(self, fileobj): - self.fileobj = fileobj - self.buf = self.fileobj.read(BLOCKSIZE) - - def read(self, size): - self.read = self.fileobj.read - return self.buf - - def getcomptype(self): - if self.buf.startswith(b"\x1f\x8b\x08"): - return "gz" - elif self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY": - return "bz2" - elif self.buf.startswith((b"\x5d\x00\x00\x80", b"\xfd7zXZ")): - return "xz" - else: - return "tar" - - def close(self): - self.fileobj.close() -# class StreamProxy - -#------------------------ -# Extraction file object -#------------------------ -class _FileInFile(object): - """A thin wrapper around an existing file object that - provides a part of its data as an individual file - object. - """ - - def __init__(self, fileobj, offset, size, name, blockinfo=None): - self.fileobj = fileobj - self.offset = offset - self.size = size - self.position = 0 - self.name = name - self.closed = False - - if blockinfo is None: - blockinfo = [(0, size)] - - # Construct a map with data and zero blocks. - self.map_index = 0 - self.map = [] - lastpos = 0 - realpos = self.offset - for offset, size in blockinfo: - if offset > lastpos: - self.map.append((False, lastpos, offset, None)) - self.map.append((True, offset, offset + size, realpos)) - realpos += size - lastpos = offset + size - if lastpos < self.size: - self.map.append((False, lastpos, self.size, None)) - - def flush(self): - pass - - @property - def mode(self): - return 'rb' - - def readable(self): - return True - - def writable(self): - return False - - def seekable(self): - return self.fileobj.seekable() - - def tell(self): - """Return the current file position. - """ - return self.position - - def seek(self, position, whence=io.SEEK_SET): - """Seek to a position in the file. - """ - if whence == io.SEEK_SET: - self.position = min(max(position, 0), self.size) - elif whence == io.SEEK_CUR: - if position < 0: - self.position = max(self.position + position, 0) - else: - self.position = min(self.position + position, self.size) - elif whence == io.SEEK_END: - self.position = max(min(self.size + position, self.size), 0) - else: - raise ValueError("Invalid argument") - return self.position - - def read(self, size=None): - """Read data from the file. - """ - if size is None: - size = self.size - self.position - else: - size = min(size, self.size - self.position) - - buf = b"" - while size > 0: - while True: - data, start, stop, offset = self.map[self.map_index] - if start <= self.position < stop: - break - else: - self.map_index += 1 - if self.map_index == len(self.map): - self.map_index = 0 - length = min(size, stop - self.position) - if data: - self.fileobj.seek(offset + (self.position - start)) - b = self.fileobj.read(length) - if len(b) != length: - raise ReadError("unexpected end of data") - buf += b - else: - buf += NUL * length - size -= length - self.position += length - return buf - - def readinto(self, b): - buf = self.read(len(b)) - b[:len(buf)] = buf - return len(buf) - - def close(self): - self.closed = True -#class _FileInFile - -class ExFileObject(io.BufferedReader): - - def __init__(self, tarfile, tarinfo): - fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data, - tarinfo.size, tarinfo.name, tarinfo.sparse) - super().__init__(fileobj) -#class ExFileObject - - -#----------------------------- -# extraction filters (PEP 706) -#----------------------------- - -class FilterError(TarError): - pass - -class AbsolutePathError(FilterError): - def __init__(self, tarinfo): - self.tarinfo = tarinfo - super().__init__(f'member {tarinfo.name!r} has an absolute path') - -class OutsideDestinationError(FilterError): - def __init__(self, tarinfo, path): - self.tarinfo = tarinfo - self._path = path - super().__init__(f'{tarinfo.name!r} would be extracted to {path!r}, ' - + 'which is outside the destination') - -class SpecialFileError(FilterError): - def __init__(self, tarinfo): - self.tarinfo = tarinfo - super().__init__(f'{tarinfo.name!r} is a special file') - -class AbsoluteLinkError(FilterError): - def __init__(self, tarinfo): - self.tarinfo = tarinfo - super().__init__(f'{tarinfo.name!r} is a link to an absolute path') - -class LinkOutsideDestinationError(FilterError): - def __init__(self, tarinfo, path): - self.tarinfo = tarinfo - self._path = path - super().__init__(f'{tarinfo.name!r} would link to {path!r}, ' - + 'which is outside the destination') - -class LinkFallbackError(FilterError): - def __init__(self, tarinfo, path): - self.tarinfo = tarinfo - self._path = path - super().__init__(f'link {tarinfo.name!r} would be extracted as a ' - + f'copy of {path!r}, which was rejected') - -# Errors caused by filters -- both "fatal" and "non-fatal" -- that -# we consider to be issues with the argument, rather than a bug in the -# filter function -_FILTER_ERRORS = (FilterError, OSError, ExtractError) - -def _get_filtered_attrs(member, dest_path, for_data=True): - new_attrs = {} - name = member.name - dest_path = os.path.realpath(dest_path, strict=os.path.ALLOW_MISSING) - # Strip leading / (tar's directory separator) from filenames. - # Include os.sep (target OS directory separator) as well. - if name.startswith(('/', os.sep)): - name = new_attrs['name'] = member.path.lstrip('/' + os.sep) - if os.path.isabs(name): - # Path is absolute even after stripping. - # For example, 'C:/foo' on Windows. - raise AbsolutePathError(member) - # Ensure we stay in the destination - target_path = os.path.realpath(os.path.join(dest_path, name), - strict=os.path.ALLOW_MISSING) - if os.path.commonpath([target_path, dest_path]) != dest_path: - raise OutsideDestinationError(member, target_path) - # Limit permissions (no high bits, and go-w) - mode = member.mode - if mode is not None: - # Strip high bits & group/other write bits - mode = mode & 0o755 - if for_data: - # For data, handle permissions & file types - if member.isreg() or member.islnk(): - if not mode & 0o100: - # Clear executable bits if not executable by user - mode &= ~0o111 - # Ensure owner can read & write - mode |= 0o600 - elif member.isdir() or member.issym(): - # Ignore mode for directories & symlinks - mode = None - else: - # Reject special files - raise SpecialFileError(member) - if mode != member.mode: - new_attrs['mode'] = mode - if for_data: - # Ignore ownership for 'data' - if member.uid is not None: - new_attrs['uid'] = None - if member.gid is not None: - new_attrs['gid'] = None - if member.uname is not None: - new_attrs['uname'] = None - if member.gname is not None: - new_attrs['gname'] = None - # Check link destination for 'data' - if member.islnk() or member.issym(): - if os.path.isabs(member.linkname): - raise AbsoluteLinkError(member) - normalized = os.path.normpath(member.linkname) - if normalized != member.linkname: - new_attrs['linkname'] = normalized - if member.issym(): - target_path = os.path.join(dest_path, - os.path.dirname(name), - member.linkname) - else: - target_path = os.path.join(dest_path, - member.linkname) - target_path = os.path.realpath(target_path, - strict=os.path.ALLOW_MISSING) - if os.path.commonpath([target_path, dest_path]) != dest_path: - raise LinkOutsideDestinationError(member, target_path) - return new_attrs - -def fully_trusted_filter(member, dest_path): - return member - -def tar_filter(member, dest_path): - new_attrs = _get_filtered_attrs(member, dest_path, False) - if new_attrs: - return member.replace(**new_attrs, deep=False) - return member - -def data_filter(member, dest_path): - new_attrs = _get_filtered_attrs(member, dest_path, True) - if new_attrs: - return member.replace(**new_attrs, deep=False) - return member - -_NAMED_FILTERS = { - "fully_trusted": fully_trusted_filter, - "tar": tar_filter, - "data": data_filter, -} - -#------------------ -# Exported Classes -#------------------ - -# Sentinel for replace() defaults, meaning "don't change the attribute" -_KEEP = object() - -# Header length is digits followed by a space. -_header_length_prefix_re = re.compile(br"([0-9]{1,20}) ") - -class TarInfo(object): - """Informational class which holds the details about an - archive member given by a tar header block. - TarInfo objects are returned by TarFile.getmember(), - TarFile.getmembers() and TarFile.gettarinfo() and are - usually created internally. - """ - - __slots__ = dict( - name = 'Name of the archive member.', - mode = 'Permission bits.', - uid = 'User ID of the user who originally stored this member.', - gid = 'Group ID of the user who originally stored this member.', - size = 'Size in bytes.', - mtime = 'Time of last modification.', - chksum = 'Header checksum.', - type = ('File type. type is usually one of these constants: ' - 'REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, ' - 'CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_SPARSE.'), - linkname = ('Name of the target file name, which is only present ' - 'in TarInfo objects of type LNKTYPE and SYMTYPE.'), - uname = 'User name.', - gname = 'Group name.', - devmajor = 'Device major number.', - devminor = 'Device minor number.', - offset = 'The tar header starts here.', - offset_data = "The file's data starts here.", - pax_headers = ('A dictionary containing key-value pairs of an ' - 'associated pax extended header.'), - sparse = 'Sparse member information.', - _tarfile = None, - _sparse_structs = None, - _link_target = None, - ) - - def __init__(self, name=""): - """Construct a TarInfo object. name is the optional name - of the member. - """ - self.name = name # member name - self.mode = 0o644 # file permissions - self.uid = 0 # user id - self.gid = 0 # group id - self.size = 0 # file size - self.mtime = 0 # modification time - self.chksum = 0 # header checksum - self.type = REGTYPE # member type - self.linkname = "" # link name - self.uname = "" # user name - self.gname = "" # group name - self.devmajor = 0 # device major number - self.devminor = 0 # device minor number - - self.offset = 0 # the tar header starts here - self.offset_data = 0 # the file's data starts here - - self.sparse = None # sparse member information - self.pax_headers = {} # pax header information - - @property - def tarfile(self): - import warnings - warnings.warn( - 'The undocumented "tarfile" attribute of TarInfo objects ' - + 'is deprecated and will be removed in Python 3.16', - DeprecationWarning, stacklevel=2) - return self._tarfile - - @tarfile.setter - def tarfile(self, tarfile): - import warnings - warnings.warn( - 'The undocumented "tarfile" attribute of TarInfo objects ' - + 'is deprecated and will be removed in Python 3.16', - DeprecationWarning, stacklevel=2) - self._tarfile = tarfile - - @property - def path(self): - 'In pax headers, "name" is called "path".' - return self.name - - @path.setter - def path(self, name): - self.name = name - - @property - def linkpath(self): - 'In pax headers, "linkname" is called "linkpath".' - return self.linkname - - @linkpath.setter - def linkpath(self, linkname): - self.linkname = linkname - - def __repr__(self): - return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) - - def replace(self, *, - name=_KEEP, mtime=_KEEP, mode=_KEEP, linkname=_KEEP, - uid=_KEEP, gid=_KEEP, uname=_KEEP, gname=_KEEP, - deep=True, _KEEP=_KEEP): - """Return a deep copy of self with the given attributes replaced. - """ - if deep: - result = copy.deepcopy(self) - else: - result = copy.copy(self) - if name is not _KEEP: - result.name = name - if mtime is not _KEEP: - result.mtime = mtime - if mode is not _KEEP: - result.mode = mode - if linkname is not _KEEP: - result.linkname = linkname - if uid is not _KEEP: - result.uid = uid - if gid is not _KEEP: - result.gid = gid - if uname is not _KEEP: - result.uname = uname - if gname is not _KEEP: - result.gname = gname - return result - - def get_info(self): - """Return the TarInfo's attributes as a dictionary. - """ - if self.mode is None: - mode = None - else: - mode = self.mode & 0o7777 - info = { - "name": self.name, - "mode": mode, - "uid": self.uid, - "gid": self.gid, - "size": self.size, - "mtime": self.mtime, - "chksum": self.chksum, - "type": self.type, - "linkname": self.linkname, - "uname": self.uname, - "gname": self.gname, - "devmajor": self.devmajor, - "devminor": self.devminor - } - - if info["type"] == DIRTYPE and not info["name"].endswith("/"): - info["name"] += "/" - - return info - - def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): - """Return a tar header as a string of 512 byte blocks. - """ - info = self.get_info() - for name, value in info.items(): - if value is None: - raise ValueError("%s may not be None" % name) - - if format == USTAR_FORMAT: - return self.create_ustar_header(info, encoding, errors) - elif format == GNU_FORMAT: - return self.create_gnu_header(info, encoding, errors) - elif format == PAX_FORMAT: - return self.create_pax_header(info, encoding) - else: - raise ValueError("invalid format") - - def create_ustar_header(self, info, encoding, errors): - """Return the object as a ustar header block. - """ - info["magic"] = POSIX_MAGIC - - if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK: - raise ValueError("linkname is too long") - - if len(info["name"].encode(encoding, errors)) > LENGTH_NAME: - info["prefix"], info["name"] = self._posix_split_name(info["name"], encoding, errors) - - return self._create_header(info, USTAR_FORMAT, encoding, errors) - - def create_gnu_header(self, info, encoding, errors): - """Return the object as a GNU header block sequence. - """ - info["magic"] = GNU_MAGIC - - buf = b"" - if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK: - buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) - - if len(info["name"].encode(encoding, errors)) > LENGTH_NAME: - buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) - - return buf + self._create_header(info, GNU_FORMAT, encoding, errors) - - def create_pax_header(self, info, encoding): - """Return the object as a ustar header block. If it cannot be - represented this way, prepend a pax extended header sequence - with supplement information. - """ - info["magic"] = POSIX_MAGIC - pax_headers = self.pax_headers.copy() - - # Test string fields for values that exceed the field length or cannot - # be represented in ASCII encoding. - for name, hname, length in ( - ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), - ("uname", "uname", 32), ("gname", "gname", 32)): - - if hname in pax_headers: - # The pax header has priority. - continue - - # Try to encode the string as ASCII. - try: - info[name].encode("ascii", "strict") - except UnicodeEncodeError: - pax_headers[hname] = info[name] - continue - - if len(info[name]) > length: - pax_headers[hname] = info[name] - - # Test number fields for values that exceed the field limit or values - # that like to be stored as float. - for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): - needs_pax = False - - val = info[name] - val_is_float = isinstance(val, float) - val_int = round(val) if val_is_float else val - if not 0 <= val_int < 8 ** (digits - 1): - # Avoid overflow. - info[name] = 0 - needs_pax = True - elif val_is_float: - # Put rounded value in ustar header, and full - # precision value in pax header. - info[name] = val_int - needs_pax = True - - # The existing pax header has priority. - if needs_pax and name not in pax_headers: - pax_headers[name] = str(val) - - # Create a pax extended header if necessary. - if pax_headers: - buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) - else: - buf = b"" - - return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") - - @classmethod - def create_pax_global_header(cls, pax_headers): - """Return the object as a pax global header block sequence. - """ - return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf-8") - - def _posix_split_name(self, name, encoding, errors): - """Split a name longer than 100 chars into a prefix - and a name part. - """ - components = name.split("/") - for i in range(1, len(components)): - prefix = "/".join(components[:i]) - name = "/".join(components[i:]) - if len(prefix.encode(encoding, errors)) <= LENGTH_PREFIX and \ - len(name.encode(encoding, errors)) <= LENGTH_NAME: - break - else: - raise ValueError("name is too long") - - return prefix, name - - @staticmethod - def _create_header(info, format, encoding, errors): - """Return a header block. info is a dictionary with file - information, format must be one of the *_FORMAT constants. - """ - has_device_fields = info.get("type") in (CHRTYPE, BLKTYPE) - if has_device_fields: - devmajor = itn(info.get("devmajor", 0), 8, format) - devminor = itn(info.get("devminor", 0), 8, format) - else: - devmajor = stn("", 8, encoding, errors) - devminor = stn("", 8, encoding, errors) - - # None values in metadata should cause ValueError. - # itn()/stn() do this for all fields except type. - filetype = info.get("type", REGTYPE) - if filetype is None: - raise ValueError("TarInfo.type must not be None") - - parts = [ - stn(info.get("name", ""), 100, encoding, errors), - itn(info.get("mode", 0) & 0o7777, 8, format), - itn(info.get("uid", 0), 8, format), - itn(info.get("gid", 0), 8, format), - itn(info.get("size", 0), 12, format), - itn(info.get("mtime", 0), 12, format), - b" ", # checksum field - filetype, - stn(info.get("linkname", ""), 100, encoding, errors), - info.get("magic", POSIX_MAGIC), - stn(info.get("uname", ""), 32, encoding, errors), - stn(info.get("gname", ""), 32, encoding, errors), - devmajor, - devminor, - stn(info.get("prefix", ""), 155, encoding, errors) - ] - - buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) - chksum = calc_chksums(buf[-BLOCKSIZE:])[0] - buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:] - return buf - - @staticmethod - def _create_payload(payload): - """Return the string payload filled with zero bytes - up to the next 512 byte border. - """ - blocks, remainder = divmod(len(payload), BLOCKSIZE) - if remainder > 0: - payload += (BLOCKSIZE - remainder) * NUL - return payload - - @classmethod - def _create_gnu_long_header(cls, name, type, encoding, errors): - """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence - for name. - """ - name = name.encode(encoding, errors) + NUL - - info = {} - info["name"] = "././@LongLink" - info["type"] = type - info["size"] = len(name) - info["magic"] = GNU_MAGIC - - # create extended header + name blocks. - return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ - cls._create_payload(name) - - @classmethod - def _create_pax_generic_header(cls, pax_headers, type, encoding): - """Return a POSIX.1-2008 extended or global header sequence - that contains a list of keyword, value pairs. The values - must be strings. - """ - # Check if one of the fields contains surrogate characters and thereby - # forces hdrcharset=BINARY, see _proc_pax() for more information. - binary = False - for keyword, value in pax_headers.items(): - try: - value.encode("utf-8", "strict") - except UnicodeEncodeError: - binary = True - break - - records = b"" - if binary: - # Put the hdrcharset field at the beginning of the header. - records += b"21 hdrcharset=BINARY\n" - - for keyword, value in pax_headers.items(): - keyword = keyword.encode("utf-8") - if binary: - # Try to restore the original byte representation of `value'. - # Needless to say, that the encoding must match the string. - value = value.encode(encoding, "surrogateescape") - else: - value = value.encode("utf-8") - - l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' - n = p = 0 - while True: - n = l + len(str(p)) - if n == p: - break - p = n - records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" - - # We use a hardcoded "././@PaxHeader" name like star does - # instead of the one that POSIX recommends. - info = {} - info["name"] = "././@PaxHeader" - info["type"] = type - info["size"] = len(records) - info["magic"] = POSIX_MAGIC - - # Create pax header + record blocks. - return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ - cls._create_payload(records) - - @classmethod - def frombuf(cls, buf, encoding, errors): - """Construct a TarInfo object from a 512 byte bytes object. - - To support the old v7 tar format AREGTYPE headers are - transformed to DIRTYPE headers if their name ends in '/'. - """ - return cls._frombuf(buf, encoding, errors) - - @classmethod - def _frombuf(cls, buf, encoding, errors, *, dircheck=True): - """Construct a TarInfo object from a 512 byte bytes object. - - If ``dircheck`` is set to ``True`` then ``AREGTYPE`` headers will - be normalized to ``DIRTYPE`` if the name ends in a trailing slash. - ``dircheck`` must be set to ``False`` if this function is called - on a follow-up header such as ``GNUTYPE_LONGNAME``. - """ - if len(buf) == 0: - raise EmptyHeaderError("empty header") - if len(buf) != BLOCKSIZE: - raise TruncatedHeaderError("truncated header") - if buf.count(NUL) == BLOCKSIZE: - raise EOFHeaderError("end of file header") - - chksum = nti(buf[148:156]) - if chksum not in calc_chksums(buf): - raise InvalidHeaderError("bad checksum") - - obj = cls() - obj.name = nts(buf[0:100], encoding, errors) - obj.mode = nti(buf[100:108]) - obj.uid = nti(buf[108:116]) - obj.gid = nti(buf[116:124]) - obj.size = nti(buf[124:136]) - obj.mtime = nti(buf[136:148]) - obj.chksum = chksum - obj.type = buf[156:157] - obj.linkname = nts(buf[157:257], encoding, errors) - obj.uname = nts(buf[265:297], encoding, errors) - obj.gname = nts(buf[297:329], encoding, errors) - obj.devmajor = nti(buf[329:337]) - obj.devminor = nti(buf[337:345]) - prefix = nts(buf[345:500], encoding, errors) - - # Old V7 tar format represents a directory as a regular - # file with a trailing slash. - if dircheck and obj.type == AREGTYPE and obj.name.endswith("/"): - obj.type = DIRTYPE - - # The old GNU sparse format occupies some of the unused - # space in the buffer for up to 4 sparse structures. - # Save them for later processing in _proc_sparse(). - if obj.type == GNUTYPE_SPARSE: - pos = 386 - structs = [] - for i in range(4): - try: - offset = nti(buf[pos:pos + 12]) - numbytes = nti(buf[pos + 12:pos + 24]) - except ValueError: - break - structs.append((offset, numbytes)) - pos += 24 - isextended = bool(buf[482]) - origsize = nti(buf[483:495]) - obj._sparse_structs = (structs, isextended, origsize) - - # Remove redundant slashes from directories. - if obj.isdir(): - obj.name = obj.name.rstrip("/") - - # Reconstruct a ustar longname. - if prefix and obj.type not in GNU_TYPES: - obj.name = prefix + "/" + obj.name - return obj - - @classmethod - def fromtarfile(cls, tarfile): - """Return the next TarInfo object from TarFile object - tarfile. - """ - return cls._fromtarfile(tarfile) - - @classmethod - def _fromtarfile(cls, tarfile, *, dircheck=True): - """ - See dircheck documentation in _frombuf(). - """ - buf = tarfile.fileobj.read(BLOCKSIZE) - obj = cls._frombuf(buf, tarfile.encoding, tarfile.errors, dircheck=dircheck) - obj.offset = tarfile.fileobj.tell() - BLOCKSIZE - return obj._proc_member(tarfile) - - #-------------------------------------------------------------------------- - # The following are methods that are called depending on the type of a - # member. The entry point is _proc_member() which can be overridden in a - # subclass to add custom _proc_*() methods. A _proc_*() method MUST - # implement the following - # operations: - # 1. Set self.offset_data to the position where the data blocks begin, - # if there is data that follows. - # 2. Set tarfile.offset to the position where the next member's header will - # begin. - # 3. Return self or another valid TarInfo object. - def _proc_member(self, tarfile): - """Choose the right processing method depending on - the type and call it. - """ - if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): - return self._proc_gnulong(tarfile) - elif self.type == GNUTYPE_SPARSE: - return self._proc_sparse(tarfile) - elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): - return self._proc_pax(tarfile) - else: - return self._proc_builtin(tarfile) - - def _proc_builtin(self, tarfile): - """Process a builtin type or an unknown type which - will be treated as a regular file. - """ - self.offset_data = tarfile.fileobj.tell() - offset = self.offset_data - if self.isreg() or self.type not in SUPPORTED_TYPES: - # Skip the following data blocks. - offset += self._block(self.size) - tarfile.offset = offset - - # Patch the TarInfo object with saved global - # header information. - self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) - - # Remove redundant slashes from directories. This is to be consistent - # with frombuf(). - if self.isdir(): - self.name = self.name.rstrip("/") - - return self - - def _proc_gnulong(self, tarfile): - """Process the blocks that hold a GNU longname - or longlink member. - """ - buf = tarfile.fileobj.read(self._block(self.size)) - - # Fetch the next header and process it. - try: - next = self._fromtarfile(tarfile, dircheck=False) - except HeaderError as e: - raise SubsequentHeaderError(str(e)) from None - - # Patch the TarInfo object from the next header with - # the longname information. - next.offset = self.offset - if self.type == GNUTYPE_LONGNAME: - next.name = nts(buf, tarfile.encoding, tarfile.errors) - elif self.type == GNUTYPE_LONGLINK: - next.linkname = nts(buf, tarfile.encoding, tarfile.errors) - - # Remove redundant slashes from directories. This is to be consistent - # with frombuf(). - if next.isdir(): - next.name = next.name.removesuffix("/") - - return next - - def _proc_sparse(self, tarfile): - """Process a GNU sparse header plus extra headers. - """ - # We already collected some sparse structures in frombuf(). - structs, isextended, origsize = self._sparse_structs - del self._sparse_structs - - # Collect sparse structures from extended header blocks. - while isextended: - buf = tarfile.fileobj.read(BLOCKSIZE) - pos = 0 - for i in range(21): - try: - offset = nti(buf[pos:pos + 12]) - numbytes = nti(buf[pos + 12:pos + 24]) - except ValueError: - break - if offset and numbytes: - structs.append((offset, numbytes)) - pos += 24 - isextended = bool(buf[504]) - self.sparse = structs - - self.offset_data = tarfile.fileobj.tell() - tarfile.offset = self.offset_data + self._block(self.size) - self.size = origsize - return self - - def _proc_pax(self, tarfile): - """Process an extended or global header as described in - POSIX.1-2008. - """ - # Read the header information. - buf = tarfile.fileobj.read(self._block(self.size)) - - # A pax header stores supplemental information for either - # the following file (extended) or all following files - # (global). - if self.type == XGLTYPE: - pax_headers = tarfile.pax_headers - else: - pax_headers = tarfile.pax_headers.copy() - - # Parse pax header information. A record looks like that: - # "%d %s=%s\n" % (length, keyword, value). length is the size - # of the complete record including the length field itself and - # the newline. - pos = 0 - encoding = None - raw_headers = [] - while len(buf) > pos and buf[pos] != 0x00: - if not (match := _header_length_prefix_re.match(buf, pos)): - raise InvalidHeaderError("invalid header") - try: - length = int(match.group(1)) - except ValueError: - raise InvalidHeaderError("invalid header") - # Headers must be at least 5 bytes, shortest being '5 x=\n'. - # Value is allowed to be empty. - if length < 5: - raise InvalidHeaderError("invalid header") - if pos + length > len(buf): - raise InvalidHeaderError("invalid header") - - header_value_end_offset = match.start(1) + length - 1 # Last byte of the header - keyword_and_value = buf[match.end(1) + 1:header_value_end_offset] - raw_keyword, equals, raw_value = keyword_and_value.partition(b"=") - - # Check the framing of the header. The last character must be '\n' (0x0A) - if not raw_keyword or equals != b"=" or buf[header_value_end_offset] != 0x0A: - raise InvalidHeaderError("invalid header") - raw_headers.append((length, raw_keyword, raw_value)) - - # Check if the pax header contains a hdrcharset field. This tells us - # the encoding of the path, linkpath, uname and gname fields. Normally, - # these fields are UTF-8 encoded but since POSIX.1-2008 tar - # implementations are allowed to store them as raw binary strings if - # the translation to UTF-8 fails. For the time being, we don't care about - # anything other than "BINARY". The only other value that is currently - # allowed by the standard is "ISO-IR 10646 2000 UTF-8" in other words UTF-8. - # Note that we only follow the initial 'hdrcharset' setting to preserve - # the initial behavior of the 'tarfile' module. - if raw_keyword == b"hdrcharset" and encoding is None: - if raw_value == b"BINARY": - encoding = tarfile.encoding - else: # This branch ensures only the first 'hdrcharset' header is used. - encoding = "utf-8" - - pos += length - - # If no explicit hdrcharset is set, we use UTF-8 as a default. - if encoding is None: - encoding = "utf-8" - - # After parsing the raw headers we can decode them to text. - for length, raw_keyword, raw_value in raw_headers: - # Normally, we could just use "utf-8" as the encoding and "strict" - # as the error handler, but we better not take the risk. For - # example, GNU tar <= 1.23 is known to store filenames it cannot - # translate to UTF-8 as raw strings (unfortunately without a - # hdrcharset=BINARY header). - # We first try the strict standard encoding, and if that fails we - # fall back on the user's encoding and error handler. - keyword = self._decode_pax_field(raw_keyword, "utf-8", "utf-8", - tarfile.errors) - if keyword in PAX_NAME_FIELDS: - value = self._decode_pax_field(raw_value, encoding, tarfile.encoding, - tarfile.errors) - else: - value = self._decode_pax_field(raw_value, "utf-8", "utf-8", - tarfile.errors) - - pax_headers[keyword] = value - - # Fetch the next header. - try: - next = self._fromtarfile(tarfile, dircheck=False) - except HeaderError as e: - raise SubsequentHeaderError(str(e)) from None - - # Process GNU sparse information. - if "GNU.sparse.map" in pax_headers: - # GNU extended sparse format version 0.1. - self._proc_gnusparse_01(next, pax_headers) - - elif "GNU.sparse.size" in pax_headers: - # GNU extended sparse format version 0.0. - self._proc_gnusparse_00(next, raw_headers) - - elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": - # GNU extended sparse format version 1.0. - self._proc_gnusparse_10(next, pax_headers, tarfile) - - if self.type in (XHDTYPE, SOLARIS_XHDTYPE): - # Patch the TarInfo object with the extended header info. - next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) - next.offset = self.offset - - if "size" in pax_headers: - # If the extended header replaces the size field, - # we need to recalculate the offset where the next - # header starts. - offset = next.offset_data - if next.isreg() or next.type not in SUPPORTED_TYPES: - offset += next._block(next.size) - tarfile.offset = offset - - return next - - def _proc_gnusparse_00(self, next, raw_headers): - """Process a GNU tar extended sparse header, version 0.0. - """ - offsets = [] - numbytes = [] - for _, keyword, value in raw_headers: - if keyword == b"GNU.sparse.offset": - try: - offsets.append(int(value.decode())) - except ValueError: - raise InvalidHeaderError("invalid header") - - elif keyword == b"GNU.sparse.numbytes": - try: - numbytes.append(int(value.decode())) - except ValueError: - raise InvalidHeaderError("invalid header") - - next.sparse = list(zip(offsets, numbytes)) - - def _proc_gnusparse_01(self, next, pax_headers): - """Process a GNU tar extended sparse header, version 0.1. - """ - sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] - next.sparse = list(zip(sparse[::2], sparse[1::2])) - - def _proc_gnusparse_10(self, next, pax_headers, tarfile): - """Process a GNU tar extended sparse header, version 1.0. - """ - fields = None - sparse = [] - buf = tarfile.fileobj.read(BLOCKSIZE) - fields, buf = buf.split(b"\n", 1) - fields = int(fields) - while len(sparse) < fields * 2: - if b"\n" not in buf: - buf += tarfile.fileobj.read(BLOCKSIZE) - number, buf = buf.split(b"\n", 1) - sparse.append(int(number)) - next.offset_data = tarfile.fileobj.tell() - next.sparse = list(zip(sparse[::2], sparse[1::2])) - - def _apply_pax_info(self, pax_headers, encoding, errors): - """Replace fields with supplemental information from a previous - pax extended or global header. - """ - for keyword, value in pax_headers.items(): - if keyword == "GNU.sparse.name": - setattr(self, "path", value) - elif keyword == "GNU.sparse.size": - setattr(self, "size", int(value)) - elif keyword == "GNU.sparse.realsize": - setattr(self, "size", int(value)) - elif keyword in PAX_FIELDS: - if keyword in PAX_NUMBER_FIELDS: - try: - value = PAX_NUMBER_FIELDS[keyword](value) - except ValueError: - value = 0 - if keyword == "path": - value = value.rstrip("/") - setattr(self, keyword, value) - - self.pax_headers = pax_headers.copy() - - def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): - """Decode a single field from a pax record. - """ - try: - return value.decode(encoding, "strict") - except UnicodeDecodeError: - return value.decode(fallback_encoding, fallback_errors) - - def _block(self, count): - """Round up a byte count by BLOCKSIZE and return it, - e.g. _block(834) => 1024. - """ - # Only non-negative offsets are allowed - if count < 0: - raise InvalidHeaderError("invalid offset") - blocks, remainder = divmod(count, BLOCKSIZE) - if remainder: - blocks += 1 - return blocks * BLOCKSIZE - - def isreg(self): - 'Return True if the Tarinfo object is a regular file.' - return self.type in REGULAR_TYPES - - def isfile(self): - 'Return True if the Tarinfo object is a regular file.' - return self.isreg() - - def isdir(self): - 'Return True if it is a directory.' - return self.type == DIRTYPE - - def issym(self): - 'Return True if it is a symbolic link.' - return self.type == SYMTYPE - - def islnk(self): - 'Return True if it is a hard link.' - return self.type == LNKTYPE - - def ischr(self): - 'Return True if it is a character device.' - return self.type == CHRTYPE - - def isblk(self): - 'Return True if it is a block device.' - return self.type == BLKTYPE - - def isfifo(self): - 'Return True if it is a FIFO.' - return self.type == FIFOTYPE - - def issparse(self): - return self.sparse is not None - - def isdev(self): - 'Return True if it is one of character device, block device or FIFO.' - return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) -# class TarInfo - -class TarFile(object): - """The TarFile Class provides an interface to tar archives. - """ - - debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) - - dereference = False # If true, add content of linked file to the - # tar file, else the link. - - ignore_zeros = False # If true, skips empty or invalid blocks and - # continues processing. - - errorlevel = 1 # If 0, fatal errors only appear in debug - # messages (if debug >= 0). If > 0, errors - # are passed to the caller as exceptions. - - format = DEFAULT_FORMAT # The format to use when creating an archive. - - encoding = ENCODING # Encoding for 8-bit character strings. - - errors = None # Error handler for unicode conversion. - - tarinfo = TarInfo # The default TarInfo class to use. - - fileobject = ExFileObject # The file-object for extractfile(). - - extraction_filter = None # The default filter for extraction. - - def __init__(self, name=None, mode="r", fileobj=None, format=None, - tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, - errors="surrogateescape", pax_headers=None, debug=None, - errorlevel=None, copybufsize=None, stream=False): - """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to - read from an existing archive, 'a' to append data to an existing - file or 'w' to create a new file overwriting an existing one. `mode' - defaults to 'r'. - If `fileobj' is given, it is used for reading or writing data. If it - can be determined, `mode' is overridden by `fileobj's mode. - `fileobj' is not closed, when TarFile is closed. - """ - modes = {"r": "rb", "a": "r+b", "w": "wb", "x": "xb"} - if mode not in modes: - raise ValueError("mode must be 'r', 'a', 'w' or 'x'") - self.mode = mode - self._mode = modes[mode] - - if not fileobj: - if self.mode == "a" and not os.path.exists(name): - # Create nonexistent files in append mode. - self.mode = "w" - self._mode = "wb" - fileobj = bltn_open(name, self._mode) - self._extfileobj = False - else: - if (name is None and hasattr(fileobj, "name") and - isinstance(fileobj.name, (str, bytes))): - name = fileobj.name - if hasattr(fileobj, "mode"): - self._mode = fileobj.mode - self._extfileobj = True - self.name = os.path.abspath(name) if name else None - self.fileobj = fileobj - - self.stream = stream - - # Init attributes. - if format is not None: - self.format = format - if tarinfo is not None: - self.tarinfo = tarinfo - if dereference is not None: - self.dereference = dereference - if ignore_zeros is not None: - self.ignore_zeros = ignore_zeros - if encoding is not None: - self.encoding = encoding - self.errors = errors - - if pax_headers is not None and self.format == PAX_FORMAT: - self.pax_headers = pax_headers - else: - self.pax_headers = {} - - if debug is not None: - self.debug = debug - if errorlevel is not None: - self.errorlevel = errorlevel - - # Init datastructures. - self.copybufsize = copybufsize - self.closed = False - self.members = [] # list of members as TarInfo objects - self._loaded = False # flag if all members have been read - self.offset = self.fileobj.tell() - # current position in the archive file - self.inodes = {} # dictionary caching the inodes of - # archive members already added - - try: - if self.mode == "r": - self.firstmember = None - self.firstmember = self.next() - - if self.mode == "a": - # Move to the end of the archive, - # before the first empty block. - while True: - self.fileobj.seek(self.offset) - try: - tarinfo = self.tarinfo.fromtarfile(self) - self.members.append(tarinfo) - except EOFHeaderError: - self.fileobj.seek(self.offset) - break - except HeaderError as e: - raise ReadError(str(e)) from None - - if self.mode in ("a", "w", "x"): - self._loaded = True - - if self.pax_headers: - buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) - self.fileobj.write(buf) - self.offset += len(buf) - except: - if not self._extfileobj: - self.fileobj.close() - self.closed = True - raise - - #-------------------------------------------------------------------------- - # Below are the classmethods which act as alternate constructors to the - # TarFile class. The open() method is the only one that is needed for - # public use; it is the "super"-constructor and is able to select an - # adequate "sub"-constructor for a particular compression using the mapping - # from OPEN_METH. - # - # This concept allows one to subclass TarFile without losing the comfort of - # the super-constructor. A sub-constructor is registered and made available - # by adding it to the mapping in OPEN_METH. - - @classmethod - def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): - """Open a tar archive for reading, writing or appending. Return - an appropriate TarFile class. - - mode: - 'r' or 'r:*' open for reading with transparent compression - 'r:' open for reading exclusively uncompressed - 'r:gz' open for reading with gzip compression - 'r:bz2' open for reading with bzip2 compression - 'r:xz' open for reading with lzma compression - 'a' or 'a:' open for appending, creating the file if necessary - 'w' or 'w:' open for writing without compression - 'w:gz' open for writing with gzip compression - 'w:bz2' open for writing with bzip2 compression - 'w:xz' open for writing with lzma compression - - 'x' or 'x:' create a tarfile exclusively without compression, raise - an exception if the file is already created - 'x:gz' create a gzip compressed tarfile, raise an exception - if the file is already created - 'x:bz2' create a bzip2 compressed tarfile, raise an exception - if the file is already created - 'x:xz' create an lzma compressed tarfile, raise an exception - if the file is already created - - 'r|*' open a stream of tar blocks with transparent compression - 'r|' open an uncompressed stream of tar blocks for reading - 'r|gz' open a gzip compressed stream of tar blocks - 'r|bz2' open a bzip2 compressed stream of tar blocks - 'r|xz' open an lzma compressed stream of tar blocks - 'w|' open an uncompressed stream for writing - 'w|gz' open a gzip compressed stream for writing - 'w|bz2' open a bzip2 compressed stream for writing - 'w|xz' open an lzma compressed stream for writing - """ - - if not name and not fileobj: - raise ValueError("nothing to open") - - if mode in ("r", "r:*"): - # Find out which *open() is appropriate for opening the file. - def not_compressed(comptype): - return cls.OPEN_METH[comptype] == 'taropen' - error_msgs = [] - for comptype in sorted(cls.OPEN_METH, key=not_compressed): - func = getattr(cls, cls.OPEN_METH[comptype]) - if fileobj is not None: - saved_pos = fileobj.tell() - try: - return func(name, "r", fileobj, **kwargs) - except (ReadError, CompressionError) as e: - error_msgs.append(f'- method {comptype}: {e!r}') - if fileobj is not None: - fileobj.seek(saved_pos) - continue - error_msgs_summary = '\n'.join(error_msgs) - raise ReadError(f"file could not be opened successfully:\n{error_msgs_summary}") - - elif ":" in mode: - filemode, comptype = mode.split(":", 1) - filemode = filemode or "r" - comptype = comptype or "tar" - - # Select the *open() function according to - # given compression. - if comptype in cls.OPEN_METH: - func = getattr(cls, cls.OPEN_METH[comptype]) - else: - raise CompressionError("unknown compression type %r" % comptype) - return func(name, filemode, fileobj, **kwargs) - - elif "|" in mode: - filemode, comptype = mode.split("|", 1) - filemode = filemode or "r" - comptype = comptype or "tar" - - if filemode not in ("r", "w"): - raise ValueError("mode must be 'r' or 'w'") - - compresslevel = kwargs.pop("compresslevel", 9) - stream = _Stream(name, filemode, comptype, fileobj, bufsize, - compresslevel) - try: - t = cls(name, filemode, stream, **kwargs) - except: - stream.close() - raise - t._extfileobj = False - return t - - elif mode in ("a", "w", "x"): - return cls.taropen(name, mode, fileobj, **kwargs) - - raise ValueError("undiscernible mode") - - @classmethod - def taropen(cls, name, mode="r", fileobj=None, **kwargs): - """Open uncompressed tar archive name for reading or writing. - """ - if mode not in ("r", "a", "w", "x"): - raise ValueError("mode must be 'r', 'a', 'w' or 'x'") - return cls(name, mode, fileobj, **kwargs) - - @classmethod - def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): - """Open gzip compressed tar archive name for reading or writing. - Appending is not allowed. - """ - if mode not in ("r", "w", "x"): - raise ValueError("mode must be 'r', 'w' or 'x'") - - try: - from gzip import GzipFile - except ImportError: - raise CompressionError("gzip module is not available") from None - - try: - fileobj = GzipFile(name, mode + "b", compresslevel, fileobj) - except OSError as e: - if fileobj is not None and mode == 'r': - raise ReadError("not a gzip file") from e - raise - - try: - t = cls.taropen(name, mode, fileobj, **kwargs) - except OSError as e: - fileobj.close() - if mode == 'r': - raise ReadError("not a gzip file") from e - raise - except: - fileobj.close() - raise - t._extfileobj = False - return t - - @classmethod - def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): - """Open bzip2 compressed tar archive name for reading or writing. - Appending is not allowed. - """ - if mode not in ("r", "w", "x"): - raise ValueError("mode must be 'r', 'w' or 'x'") - - try: - from bz2 import BZ2File - except ImportError: - raise CompressionError("bz2 module is not available") from None - - fileobj = BZ2File(fileobj or name, mode, compresslevel=compresslevel) - - try: - t = cls.taropen(name, mode, fileobj, **kwargs) - except (OSError, EOFError) as e: - fileobj.close() - if mode == 'r': - raise ReadError("not a bzip2 file") from e - raise - except: - fileobj.close() - raise - t._extfileobj = False - return t - - @classmethod - def xzopen(cls, name, mode="r", fileobj=None, preset=None, **kwargs): - """Open lzma compressed tar archive name for reading or writing. - Appending is not allowed. - """ - if mode not in ("r", "w", "x"): - raise ValueError("mode must be 'r', 'w' or 'x'") - - try: - from lzma import LZMAFile, LZMAError - except ImportError: - raise CompressionError("lzma module is not available") from None - - fileobj = LZMAFile(fileobj or name, mode, preset=preset) - - try: - t = cls.taropen(name, mode, fileobj, **kwargs) - except (LZMAError, EOFError) as e: - fileobj.close() - if mode == 'r': - raise ReadError("not an lzma file") from e - raise - except: - fileobj.close() - raise - t._extfileobj = False - return t - - # All *open() methods are registered here. - OPEN_METH = { - "tar": "taropen", # uncompressed tar - "gz": "gzopen", # gzip compressed tar - "bz2": "bz2open", # bzip2 compressed tar - "xz": "xzopen" # lzma compressed tar - } - - #-------------------------------------------------------------------------- - # The public methods which TarFile provides: - - def close(self): - """Close the TarFile. In write-mode, two finishing zero blocks are - appended to the archive. - """ - if self.closed: - return - - self.closed = True - try: - if self.mode in ("a", "w", "x"): - self.fileobj.write(NUL * (BLOCKSIZE * 2)) - self.offset += (BLOCKSIZE * 2) - # fill up the end with zero-blocks - # (like option -b20 for tar does) - blocks, remainder = divmod(self.offset, RECORDSIZE) - if remainder > 0: - self.fileobj.write(NUL * (RECORDSIZE - remainder)) - finally: - if not self._extfileobj: - self.fileobj.close() - - def getmember(self, name): - """Return a TarInfo object for member `name'. If `name' can not be - found in the archive, KeyError is raised. If a member occurs more - than once in the archive, its last occurrence is assumed to be the - most up-to-date version. - """ - tarinfo = self._getmember(name.rstrip('/')) - if tarinfo is None: - raise KeyError("filename %r not found" % name) - return tarinfo - - def getmembers(self): - """Return the members of the archive as a list of TarInfo objects. The - list has the same order as the members in the archive. - """ - self._check() - if not self._loaded: # if we want to obtain a list of - self._load() # all members, we first have to - # scan the whole archive. - return self.members - - def getnames(self): - """Return the members of the archive as a list of their names. It has - the same order as the list returned by getmembers(). - """ - return [tarinfo.name for tarinfo in self.getmembers()] - - def gettarinfo(self, name=None, arcname=None, fileobj=None): - """Create a TarInfo object from the result of os.stat or equivalent - on an existing file. The file is either named by `name', or - specified as a file object `fileobj' with a file descriptor. If - given, `arcname' specifies an alternative name for the file in the - archive, otherwise, the name is taken from the 'name' attribute of - 'fileobj', or the 'name' argument. The name should be a text - string. - """ - self._check("awx") - - # When fileobj is given, replace name by - # fileobj's real name. - if fileobj is not None: - name = fileobj.name - - # Building the name of the member in the archive. - # Backward slashes are converted to forward slashes, - # Absolute paths are turned to relative paths. - if arcname is None: - arcname = name - drv, arcname = os.path.splitdrive(arcname) - arcname = arcname.replace(os.sep, "/") - arcname = arcname.lstrip("/") - - # Now, fill the TarInfo object with - # information specific for the file. - tarinfo = self.tarinfo() - tarinfo._tarfile = self # To be removed in 3.16. - - # Use os.stat or os.lstat, depending on if symlinks shall be resolved. - if fileobj is None: - if not self.dereference: - statres = os.lstat(name) - else: - statres = os.stat(name) - else: - statres = os.fstat(fileobj.fileno()) - linkname = "" - - stmd = statres.st_mode - if stat.S_ISREG(stmd): - inode = (statres.st_ino, statres.st_dev) - if not self.dereference and statres.st_nlink > 1 and \ - inode in self.inodes and arcname != self.inodes[inode]: - # Is it a hardlink to an already - # archived file? - type = LNKTYPE - linkname = self.inodes[inode] - else: - # The inode is added only if its valid. - # For win32 it is always 0. - type = REGTYPE - if inode[0]: - self.inodes[inode] = arcname - elif stat.S_ISDIR(stmd): - type = DIRTYPE - elif stat.S_ISFIFO(stmd): - type = FIFOTYPE - elif stat.S_ISLNK(stmd): - type = SYMTYPE - linkname = os.readlink(name) - elif stat.S_ISCHR(stmd): - type = CHRTYPE - elif stat.S_ISBLK(stmd): - type = BLKTYPE - else: - return None - - # Fill the TarInfo object with all - # information we can get. - tarinfo.name = arcname - tarinfo.mode = stmd - tarinfo.uid = statres.st_uid - tarinfo.gid = statres.st_gid - if type == REGTYPE: - tarinfo.size = statres.st_size - else: - tarinfo.size = 0 - tarinfo.mtime = statres.st_mtime - tarinfo.type = type - tarinfo.linkname = linkname - if pwd: - try: - tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] - except KeyError: - pass - if grp: - try: - tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] - except KeyError: - pass - - if type in (CHRTYPE, BLKTYPE): - if hasattr(os, "major") and hasattr(os, "minor"): - tarinfo.devmajor = os.major(statres.st_rdev) - tarinfo.devminor = os.minor(statres.st_rdev) - return tarinfo - - def list(self, verbose=True, *, members=None): - """Print a table of contents to sys.stdout. If `verbose' is False, only - the names of the members are printed. If it is True, an `ls -l'-like - output is produced. `members' is optional and must be a subset of the - list returned by getmembers(). - """ - # Convert tarinfo type to stat type. - type2mode = {REGTYPE: stat.S_IFREG, SYMTYPE: stat.S_IFLNK, - FIFOTYPE: stat.S_IFIFO, CHRTYPE: stat.S_IFCHR, - DIRTYPE: stat.S_IFDIR, BLKTYPE: stat.S_IFBLK} - self._check() - - if members is None: - members = self - for tarinfo in members: - if verbose: - if tarinfo.mode is None: - _safe_print("??????????") - else: - modetype = type2mode.get(tarinfo.type, 0) - _safe_print(stat.filemode(modetype | tarinfo.mode)) - _safe_print("%s/%s" % (tarinfo.uname or tarinfo.uid, - tarinfo.gname or tarinfo.gid)) - if tarinfo.ischr() or tarinfo.isblk(): - _safe_print("%10s" % - ("%d,%d" % (tarinfo.devmajor, tarinfo.devminor))) - else: - _safe_print("%10d" % tarinfo.size) - if tarinfo.mtime is None: - _safe_print("????-??-?? ??:??:??") - else: - _safe_print("%d-%02d-%02d %02d:%02d:%02d" \ - % time.localtime(tarinfo.mtime)[:6]) - - _safe_print(tarinfo.name + ("/" if tarinfo.isdir() else "")) - - if verbose: - if tarinfo.issym(): - _safe_print("-> " + tarinfo.linkname) - if tarinfo.islnk(): - _safe_print("link to " + tarinfo.linkname) - print() - - def add(self, name, arcname=None, recursive=True, *, filter=None): - """Add the file `name' to the archive. `name' may be any type of file - (directory, fifo, symbolic link, etc.). If given, `arcname' - specifies an alternative name for the file in the archive. - Directories are added recursively by default. This can be avoided by - setting `recursive' to False. `filter' is a function - that expects a TarInfo object argument and returns the changed - TarInfo object, if it returns None the TarInfo object will be - excluded from the archive. - """ - self._check("awx") - - if arcname is None: - arcname = name - - # Skip if somebody tries to archive the archive... - if self.name is not None and os.path.abspath(name) == self.name: - self._dbg(2, "tarfile: Skipped %r" % name) - return - - self._dbg(1, name) - - # Create a TarInfo object from the file. - tarinfo = self.gettarinfo(name, arcname) - - if tarinfo is None: - self._dbg(1, "tarfile: Unsupported type %r" % name) - return - - # Change or exclude the TarInfo object. - if filter is not None: - tarinfo = filter(tarinfo) - if tarinfo is None: - self._dbg(2, "tarfile: Excluded %r" % name) - return - - # Append the tar header and data to the archive. - if tarinfo.isreg(): - with bltn_open(name, "rb") as f: - self.addfile(tarinfo, f) - - elif tarinfo.isdir(): - self.addfile(tarinfo) - if recursive: - for f in sorted(os.listdir(name)): - self.add(os.path.join(name, f), os.path.join(arcname, f), - recursive, filter=filter) - - else: - self.addfile(tarinfo) - - def addfile(self, tarinfo, fileobj=None): - """Add the TarInfo object `tarinfo' to the archive. If `tarinfo' represents - a non zero-size regular file, the `fileobj' argument should be a binary file, - and tarinfo.size bytes are read from it and added to the archive. - You can create TarInfo objects directly, or by using gettarinfo(). - """ - self._check("awx") - - if fileobj is None and tarinfo.isreg() and tarinfo.size != 0: - raise ValueError("fileobj not provided for non zero-size regular file") - - tarinfo = copy.copy(tarinfo) - - buf = tarinfo.tobuf(self.format, self.encoding, self.errors) - self.fileobj.write(buf) - self.offset += len(buf) - bufsize=self.copybufsize - # If there's data to follow, append it. - if fileobj is not None: - copyfileobj(fileobj, self.fileobj, tarinfo.size, bufsize=bufsize) - blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) - if remainder > 0: - self.fileobj.write(NUL * (BLOCKSIZE - remainder)) - blocks += 1 - self.offset += blocks * BLOCKSIZE - - self.members.append(tarinfo) - - def _get_filter_function(self, filter): - if filter is None: - filter = self.extraction_filter - if filter is None: - import warnings - warnings.warn( - 'Python 3.14 will, by default, filter extracted tar ' - + 'archives and reject files or modify their metadata. ' - + 'Use the filter argument to control this behavior.', - DeprecationWarning, stacklevel=3) - return fully_trusted_filter - if isinstance(filter, str): - raise TypeError( - 'String names are not supported for ' - + 'TarFile.extraction_filter. Use a function such as ' - + 'tarfile.data_filter directly.') - return filter - if callable(filter): - return filter - try: - return _NAMED_FILTERS[filter] - except KeyError: - raise ValueError(f"filter {filter!r} not found") from None - - def extractall(self, path=".", members=None, *, numeric_owner=False, - filter=None): - """Extract all members from the archive to the current working - directory and set owner, modification time and permissions on - directories afterwards. `path' specifies a different directory - to extract to. `members' is optional and must be a subset of the - list returned by getmembers(). If `numeric_owner` is True, only - the numbers for user/group names are used and not the names. - - The `filter` function will be called on each member just - before extraction. - It can return a changed TarInfo or None to skip the member. - String names of common filters are accepted. - """ - directories = [] - - filter_function = self._get_filter_function(filter) - if members is None: - members = self - - for member in members: - tarinfo, unfiltered = self._get_extract_tarinfo( - member, filter_function, path) - if tarinfo is None: - continue - if tarinfo.isdir(): - # For directories, delay setting attributes until later, - # since permissions can interfere with extraction and - # extracting contents can reset mtime. - directories.append(unfiltered) - self._extract_one(tarinfo, path, set_attrs=not tarinfo.isdir(), - numeric_owner=numeric_owner, - filter_function=filter_function) - - # Reverse sort directories. - directories.sort(key=lambda a: a.name, reverse=True) - - - # Set correct owner, mtime and filemode on directories. - for unfiltered in directories: - try: - # Need to re-apply any filter, to take the *current* filesystem - # state into account. - try: - tarinfo = filter_function(unfiltered, path) - except _FILTER_ERRORS as exc: - self._log_no_directory_fixup(unfiltered, repr(exc)) - continue - if tarinfo is None: - self._log_no_directory_fixup(unfiltered, - 'excluded by filter') - continue - dirpath = os.path.join(path, tarinfo.name) - try: - lstat = os.lstat(dirpath) - except FileNotFoundError: - self._log_no_directory_fixup(tarinfo, 'missing') - continue - if not stat.S_ISDIR(lstat.st_mode): - # This is no longer a directory; presumably a later - # member overwrote the entry. - self._log_no_directory_fixup(tarinfo, 'not a directory') - continue - self.chown(tarinfo, dirpath, numeric_owner=numeric_owner) - self.utime(tarinfo, dirpath) - self.chmod(tarinfo, dirpath) - except ExtractError as e: - self._handle_nonfatal_error(e) - - def _log_no_directory_fixup(self, member, reason): - self._dbg(2, "tarfile: Not fixing up directory %r (%s)" % - (member.name, reason)) - - def extract(self, member, path="", set_attrs=True, *, numeric_owner=False, - filter=None): - """Extract a member from the archive to the current working directory, - using its full name. Its file information is extracted as accurately - as possible. `member' may be a filename or a TarInfo object. You can - specify a different directory using `path'. File attributes (owner, - mtime, mode) are set unless `set_attrs' is False. If `numeric_owner` - is True, only the numbers for user/group names are used and not - the names. - - The `filter` function will be called before extraction. - It can return a changed TarInfo or None to skip the member. - String names of common filters are accepted. - """ - filter_function = self._get_filter_function(filter) - tarinfo, unfiltered = self._get_extract_tarinfo( - member, filter_function, path) - if tarinfo is not None: - self._extract_one(tarinfo, path, set_attrs, numeric_owner) - - def _get_extract_tarinfo(self, member, filter_function, path): - """Get (filtered, unfiltered) TarInfos from *member* - - *member* might be a string. - - Return (None, None) if not found. - """ - - if isinstance(member, str): - unfiltered = self.getmember(member) - else: - unfiltered = member - - filtered = None - try: - filtered = filter_function(unfiltered, path) - except (OSError, UnicodeEncodeError, FilterError) as e: - self._handle_fatal_error(e) - except ExtractError as e: - self._handle_nonfatal_error(e) - if filtered is None: - self._dbg(2, "tarfile: Excluded %r" % unfiltered.name) - return None, None - - # Prepare the link target for makelink(). - if filtered.islnk(): - filtered = copy.copy(filtered) - filtered._link_target = os.path.join(path, filtered.linkname) - return filtered, unfiltered - - def _extract_one(self, tarinfo, path, set_attrs, numeric_owner, - filter_function=None): - """Extract from filtered tarinfo to disk. - - filter_function is only used when extracting a *different* - member (e.g. as fallback to creating a symlink) - """ - self._check("r") - - try: - self._extract_member(tarinfo, os.path.join(path, tarinfo.name), - set_attrs=set_attrs, - numeric_owner=numeric_owner, - filter_function=filter_function, - extraction_root=path) - except (OSError, UnicodeEncodeError) as e: - self._handle_fatal_error(e) - except ExtractError as e: - self._handle_nonfatal_error(e) - - def _handle_nonfatal_error(self, e): - """Handle non-fatal error (ExtractError) according to errorlevel""" - if self.errorlevel > 1: - raise - else: - self._dbg(1, "tarfile: %s" % e) - - def _handle_fatal_error(self, e): - """Handle "fatal" error according to self.errorlevel""" - if self.errorlevel > 0: - raise - elif isinstance(e, OSError): - if e.filename is None: - self._dbg(1, "tarfile: %s" % e.strerror) - else: - self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) - else: - self._dbg(1, "tarfile: %s %s" % (type(e).__name__, e)) - - def extractfile(self, member): - """Extract a member from the archive as a file object. `member' may be - a filename or a TarInfo object. If `member' is a regular file or - a link, an io.BufferedReader object is returned. For all other - existing members, None is returned. If `member' does not appear - in the archive, KeyError is raised. - """ - self._check("r") - - if isinstance(member, str): - tarinfo = self.getmember(member) - else: - tarinfo = member - - if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES: - # Members with unknown types are treated as regular files. - return self.fileobject(self, tarinfo) - - elif tarinfo.islnk() or tarinfo.issym(): - if isinstance(self.fileobj, _Stream): - # A small but ugly workaround for the case that someone tries - # to extract a (sym)link as a file-object from a non-seekable - # stream of tar blocks. - raise StreamError("cannot extract (sym)link as file object") - else: - # A (sym)link's file object is its target's file object. - return self.extractfile(self._find_link_target(tarinfo)) - else: - # If there's no data associated with the member (directory, chrdev, - # blkdev, etc.), return None instead of a file object. - return None - - def _extract_member(self, tarinfo, targetpath, set_attrs=True, - numeric_owner=False, *, filter_function=None, - extraction_root=None): - """Extract the filtered TarInfo object tarinfo to a physical - file called targetpath. - - filter_function is only used when extracting a *different* - member (e.g. as fallback to creating a symlink) - """ - # Fetch the TarInfo object for the given name - # and build the destination pathname, replacing - # forward slashes to platform specific separators. - targetpath = targetpath.rstrip("/") - targetpath = targetpath.replace("/", os.sep) - - # Create all upper directories. - upperdirs = os.path.dirname(targetpath) - if upperdirs and not os.path.exists(upperdirs): - # Create directories that are not part of the archive with - # default permissions. - os.makedirs(upperdirs, exist_ok=True) - - if tarinfo.islnk() or tarinfo.issym(): - self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) - else: - self._dbg(1, tarinfo.name) - - if tarinfo.isreg(): - self.makefile(tarinfo, targetpath) - elif tarinfo.isdir(): - self.makedir(tarinfo, targetpath) - elif tarinfo.isfifo(): - self.makefifo(tarinfo, targetpath) - elif tarinfo.ischr() or tarinfo.isblk(): - self.makedev(tarinfo, targetpath) - elif tarinfo.islnk() or tarinfo.issym(): - self.makelink_with_filter( - tarinfo, targetpath, - filter_function=filter_function, - extraction_root=extraction_root) - elif tarinfo.type not in SUPPORTED_TYPES: - self.makeunknown(tarinfo, targetpath) - else: - self.makefile(tarinfo, targetpath) - - if set_attrs: - self.chown(tarinfo, targetpath, numeric_owner) - if not tarinfo.issym(): - self.chmod(tarinfo, targetpath) - self.utime(tarinfo, targetpath) - - #-------------------------------------------------------------------------- - # Below are the different file methods. They are called via - # _extract_member() when extract() is called. They can be replaced in a - # subclass to implement other functionality. - - def makedir(self, tarinfo, targetpath): - """Make a directory called targetpath. - """ - try: - if tarinfo.mode is None: - # Use the system's default mode - os.mkdir(targetpath) - else: - # Use a safe mode for the directory, the real mode is set - # later in _extract_member(). - os.mkdir(targetpath, 0o700) - except FileExistsError: - if not os.path.isdir(targetpath): - raise - - def makefile(self, tarinfo, targetpath): - """Make a file called targetpath. - """ - source = self.fileobj - source.seek(tarinfo.offset_data) - bufsize = self.copybufsize - with bltn_open(targetpath, "wb") as target: - if tarinfo.sparse is not None: - for offset, size in tarinfo.sparse: - target.seek(offset) - copyfileobj(source, target, size, ReadError, bufsize) - target.seek(tarinfo.size) - target.truncate() - else: - copyfileobj(source, target, tarinfo.size, ReadError, bufsize) - - def makeunknown(self, tarinfo, targetpath): - """Make a file from a TarInfo object with an unknown type - at targetpath. - """ - self.makefile(tarinfo, targetpath) - self._dbg(1, "tarfile: Unknown file type %r, " \ - "extracted as regular file." % tarinfo.type) - - def makefifo(self, tarinfo, targetpath): - """Make a fifo called targetpath. - """ - if hasattr(os, "mkfifo"): - os.mkfifo(targetpath) - else: - raise ExtractError("fifo not supported by system") - - def makedev(self, tarinfo, targetpath): - """Make a character or block device called targetpath. - """ - if not hasattr(os, "mknod") or not hasattr(os, "makedev"): - raise ExtractError("special devices not supported by system") - - mode = tarinfo.mode - if mode is None: - # Use mknod's default - mode = 0o600 - if tarinfo.isblk(): - mode |= stat.S_IFBLK - else: - mode |= stat.S_IFCHR - - os.mknod(targetpath, mode, - os.makedev(tarinfo.devmajor, tarinfo.devminor)) - - def makelink(self, tarinfo, targetpath): - return self.makelink_with_filter(tarinfo, targetpath, None, None) - - def makelink_with_filter(self, tarinfo, targetpath, - filter_function, extraction_root): - """Make a (symbolic) link called targetpath. If it cannot be created - (platform limitation), we try to make a copy of the referenced file - instead of a link. - - filter_function is only used when extracting a *different* - member (e.g. as fallback to creating a link). - """ - keyerror_to_extracterror = False - try: - # For systems that support symbolic and hard links. - if tarinfo.issym(): - if os.path.lexists(targetpath): - # Avoid FileExistsError on following os.symlink. - os.unlink(targetpath) - os.symlink(tarinfo.linkname, targetpath) - return - else: - if os.path.exists(tarinfo._link_target): - if os.path.lexists(targetpath): - # Avoid FileExistsError on following os.link. - os.unlink(targetpath) - os.link(tarinfo._link_target, targetpath) - return - except symlink_exception: - keyerror_to_extracterror = True - - try: - unfiltered = self._find_link_target(tarinfo) - except KeyError: - if keyerror_to_extracterror: - raise ExtractError( - "unable to resolve link inside archive") from None - else: - raise - - if filter_function is None: - filtered = unfiltered - else: - if extraction_root is None: - raise ExtractError( - "makelink_with_filter: if filter_function is not None, " - + "extraction_root must also not be None") - try: - filtered = filter_function(unfiltered, extraction_root) - except _FILTER_ERRORS as cause: - raise LinkFallbackError(tarinfo, unfiltered.name) from cause - if filtered is not None: - self._extract_member(filtered, targetpath, - filter_function=filter_function, - extraction_root=extraction_root) - - def chown(self, tarinfo, targetpath, numeric_owner): - """Set owner of targetpath according to tarinfo. If numeric_owner - is True, use .gid/.uid instead of .gname/.uname. If numeric_owner - is False, fall back to .gid/.uid when the search based on name - fails. - """ - if hasattr(os, "geteuid") and os.geteuid() == 0: - # We have to be root to do so. - g = tarinfo.gid - u = tarinfo.uid - if not numeric_owner: - try: - if grp and tarinfo.gname: - g = grp.getgrnam(tarinfo.gname)[2] - except KeyError: - pass - try: - if pwd and tarinfo.uname: - u = pwd.getpwnam(tarinfo.uname)[2] - except KeyError: - pass - if g is None: - g = -1 - if u is None: - u = -1 - try: - if tarinfo.issym() and hasattr(os, "lchown"): - os.lchown(targetpath, u, g) - else: - os.chown(targetpath, u, g) - except (OSError, OverflowError) as e: - # OverflowError can be raised if an ID doesn't fit in `id_t` - raise ExtractError("could not change owner") from e - - def chmod(self, tarinfo, targetpath): - """Set file permissions of targetpath according to tarinfo. - """ - if tarinfo.mode is None: - return - try: - os.chmod(targetpath, tarinfo.mode) - except OSError as e: - raise ExtractError("could not change mode") from e - - def utime(self, tarinfo, targetpath): - """Set modification time of targetpath according to tarinfo. - """ - mtime = tarinfo.mtime - if mtime is None: - return - if not hasattr(os, 'utime'): - return - try: - os.utime(targetpath, (mtime, mtime)) - except OSError as e: - raise ExtractError("could not change modification time") from e - - #-------------------------------------------------------------------------- - def next(self): - """Return the next member of the archive as a TarInfo object, when - TarFile is opened for reading. Return None if there is no more - available. - """ - self._check("ra") - if self.firstmember is not None: - m = self.firstmember - self.firstmember = None - return m - - # Advance the file pointer. - if self.offset != self.fileobj.tell(): - if self.offset == 0: - return None - self.fileobj.seek(self.offset - 1) - if not self.fileobj.read(1): - raise ReadError("unexpected end of data") - - # Read the next block. - tarinfo = None - while True: - try: - tarinfo = self.tarinfo.fromtarfile(self) - except EOFHeaderError as e: - if self.ignore_zeros: - self._dbg(2, "0x%X: %s" % (self.offset, e)) - self.offset += BLOCKSIZE - continue - except InvalidHeaderError as e: - if self.ignore_zeros: - self._dbg(2, "0x%X: %s" % (self.offset, e)) - self.offset += BLOCKSIZE - continue - elif self.offset == 0: - raise ReadError(str(e)) from None - except EmptyHeaderError: - if self.offset == 0: - raise ReadError("empty file") from None - except TruncatedHeaderError as e: - if self.offset == 0: - raise ReadError(str(e)) from None - except SubsequentHeaderError as e: - raise ReadError(str(e)) from None - except Exception as e: - try: - import zlib - if isinstance(e, zlib.error): - raise ReadError(f'zlib error: {e}') from None - else: - raise e - except ImportError: - raise e - break - - if tarinfo is not None: - # if streaming the file we do not want to cache the tarinfo - if not self.stream: - self.members.append(tarinfo) - else: - self._loaded = True - - return tarinfo - - #-------------------------------------------------------------------------- - # Little helper methods: - - def _getmember(self, name, tarinfo=None, normalize=False): - """Find an archive member by name from bottom to top. - If tarinfo is given, it is used as the starting point. - """ - # Ensure that all members have been loaded. - members = self.getmembers() - - # Limit the member search list up to tarinfo. - skipping = False - if tarinfo is not None: - try: - index = members.index(tarinfo) - except ValueError: - # The given starting point might be a (modified) copy. - # We'll later skip members until we find an equivalent. - skipping = True - else: - # Happy fast path - members = members[:index] - - if normalize: - name = os.path.normpath(name) - - for member in reversed(members): - if skipping: - if tarinfo.offset == member.offset: - skipping = False - continue - if normalize: - member_name = os.path.normpath(member.name) - else: - member_name = member.name - - if name == member_name: - return member - - if skipping: - # Starting point was not found - raise ValueError(tarinfo) - - def _load(self): - """Read through the entire archive file and look for readable - members. This should not run if the file is set to stream. - """ - if not self.stream: - while self.next() is not None: - pass - self._loaded = True - - def _check(self, mode=None): - """Check if TarFile is still open, and if the operation's mode - corresponds to TarFile's mode. - """ - if self.closed: - raise OSError("%s is closed" % self.__class__.__name__) - if mode is not None and self.mode not in mode: - raise OSError("bad operation for mode %r" % self.mode) - - def _find_link_target(self, tarinfo): - """Find the target member of a symlink or hardlink member in the - archive. - """ - if tarinfo.issym(): - # Always search the entire archive. - linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname))) - limit = None - else: - # Search the archive before the link, because a hard link is - # just a reference to an already archived file. - linkname = tarinfo.linkname - limit = tarinfo - - member = self._getmember(linkname, tarinfo=limit, normalize=True) - if member is None: - raise KeyError("linkname %r not found" % linkname) - return member - - def __iter__(self): - """Provide an iterator object. - """ - if self._loaded: - yield from self.members - return - - # Yield items using TarFile's next() method. - # When all members have been read, set TarFile as _loaded. - index = 0 - # Fix for SF #1100429: Under rare circumstances it can - # happen that getmembers() is called during iteration, - # which will have already exhausted the next() method. - if self.firstmember is not None: - tarinfo = self.next() - index += 1 - yield tarinfo - - while True: - if index < len(self.members): - tarinfo = self.members[index] - elif not self._loaded: - tarinfo = self.next() - if not tarinfo: - self._loaded = True - return - else: - return - index += 1 - yield tarinfo - - def _dbg(self, level, msg): - """Write debugging output to sys.stderr. - """ - if level <= self.debug: - print(msg, file=sys.stderr) - - def __enter__(self): - self._check() - return self - - def __exit__(self, type, value, traceback): - if type is None: - self.close() - else: - # An exception occurred. We must not call close() because - # it would try to write end-of-archive blocks and padding. - if not self._extfileobj: - self.fileobj.close() - self.closed = True - -#-------------------- -# exported functions -#-------------------- - -def is_tarfile(name): - """Return True if name points to a tar archive that we - are able to handle, else return False. - - 'name' should be a string, file, or file-like object. - """ - try: - if hasattr(name, "read"): - pos = name.tell() - t = open(fileobj=name) - name.seek(pos) - else: - t = open(name) - t.close() - return True - except TarError: - return False - -open = TarFile.open - - -def main(): - import argparse - - description = 'A simple command-line interface for tarfile module.' - parser = argparse.ArgumentParser(description=description) - parser.add_argument('-v', '--verbose', action='store_true', default=False, - help='Verbose output') - parser.add_argument('--filter', metavar='', - choices=_NAMED_FILTERS, - help='Filter for extraction') - - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('-l', '--list', metavar='', - help='Show listing of a tarfile') - group.add_argument('-e', '--extract', nargs='+', - metavar=('', ''), - help='Extract tarfile into target dir') - group.add_argument('-c', '--create', nargs='+', - metavar=('', ''), - help='Create tarfile from sources') - group.add_argument('-t', '--test', metavar='', - help='Test if a tarfile is valid') - - args = parser.parse_args() - - if args.filter and args.extract is None: - parser.exit(1, '--filter is only valid for extraction\n') - - if args.test is not None: - src = args.test - if is_tarfile(src): - with open(src, 'r') as tar: - tar.getmembers() - print(tar.getmembers(), file=sys.stderr) - if args.verbose: - print('{!r} is a tar archive.'.format(src)) - else: - parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) - - elif args.list is not None: - src = args.list - if is_tarfile(src): - with TarFile.open(src, 'r:*') as tf: - tf.list(verbose=args.verbose) - else: - parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) - - elif args.extract is not None: - if len(args.extract) == 1: - src = args.extract[0] - curdir = os.curdir - elif len(args.extract) == 2: - src, curdir = args.extract - else: - parser.exit(1, parser.format_help()) - - if is_tarfile(src): - with TarFile.open(src, 'r:*') as tf: - tf.extractall(path=curdir, filter=args.filter) - if args.verbose: - if curdir == '.': - msg = '{!r} file is extracted.'.format(src) - else: - msg = ('{!r} file is extracted ' - 'into {!r} directory.').format(src, curdir) - print(msg) - else: - parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) - - elif args.create is not None: - tar_name = args.create.pop(0) - _, ext = os.path.splitext(tar_name) - compressions = { - # gz - '.gz': 'gz', - '.tgz': 'gz', - # xz - '.xz': 'xz', - '.txz': 'xz', - # bz2 - '.bz2': 'bz2', - '.tbz': 'bz2', - '.tbz2': 'bz2', - '.tb2': 'bz2', - } - tar_mode = 'w:' + compressions[ext] if ext in compressions else 'w' - tar_files = args.create - - with TarFile.open(tar_name, tar_mode) as tf: - for file_name in tar_files: - tf.add(file_name) - - if args.verbose: - print('{!r} file created.'.format(tar_name)) - -if __name__ == '__main__': - main() diff --git a/Python313_13_x86_Template/Lib/tempfile.py b/Python313_13_x86_Template/Lib/tempfile.py deleted file mode 100644 index 609ef487..00000000 --- a/Python313_13_x86_Template/Lib/tempfile.py +++ /dev/null @@ -1,957 +0,0 @@ -"""Temporary files. - -This module provides generic, low- and high-level interfaces for -creating temporary files and directories. All of the interfaces -provided by this module can be used without fear of race conditions -except for 'mktemp'. 'mktemp' is subject to race conditions and -should not be used; it is provided for backward compatibility only. - -The default path names are returned as str. If you supply bytes as -input, all return values will be in bytes. Ex: - - >>> tempfile.mkstemp() - (4, '/tmp/tmptpu9nin8') - >>> tempfile.mkdtemp(suffix=b'') - b'/tmp/tmppbi8f0hy' - -This module also provides some data items to the user: - - TMP_MAX - maximum number of names that will be tried before - giving up. - tempdir - If this is set to a string before the first use of - any routine from this module, it will be considered as - another candidate location to store temporary files. -""" - -__all__ = [ - "NamedTemporaryFile", "TemporaryFile", # high level safe interfaces - "SpooledTemporaryFile", "TemporaryDirectory", - "mkstemp", "mkdtemp", # low level safe interfaces - "mktemp", # deprecated unsafe interface - "TMP_MAX", "gettempprefix", # constants - "tempdir", "gettempdir", - "gettempprefixb", "gettempdirb", - ] - - -# Imports. - -import functools as _functools -import warnings as _warnings -import io as _io -import os as _os -import shutil as _shutil -import errno as _errno -from random import Random as _Random -import sys as _sys -import types as _types -import weakref as _weakref -import _thread -_allocate_lock = _thread.allocate_lock - -_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL -if hasattr(_os, 'O_NOFOLLOW'): - _text_openflags |= _os.O_NOFOLLOW - -_bin_openflags = _text_openflags -if hasattr(_os, 'O_BINARY'): - _bin_openflags |= _os.O_BINARY - -# This is more than enough. -# Each name contains over 40 random bits. Even with a million temporary -# files, the chance of a conflict is less than 1 in a million, and with -# 20 attempts, it is less than 1e-120. -TMP_MAX = 20 - -# This variable _was_ unused for legacy reasons, see issue 10354. -# But as of 3.5 we actually use it at runtime so changing it would -# have a possibly desirable side effect... But we do not want to support -# that as an API. It is undocumented on purpose. Do not depend on this. -template = "tmp" - -# Internal routines. - -_once_lock = _allocate_lock() - - -def _exists(fn): - try: - _os.lstat(fn) - except OSError: - return False - else: - return True - - -def _infer_return_type(*args): - """Look at the type of all args and divine their implied return type.""" - return_type = None - for arg in args: - if arg is None: - continue - - if isinstance(arg, _os.PathLike): - arg = _os.fspath(arg) - - if isinstance(arg, bytes): - if return_type is str: - raise TypeError("Can't mix bytes and non-bytes in " - "path components.") - return_type = bytes - else: - if return_type is bytes: - raise TypeError("Can't mix bytes and non-bytes in " - "path components.") - return_type = str - if return_type is None: - if tempdir is None or isinstance(tempdir, str): - return str # tempfile APIs return a str by default. - else: - # we could check for bytes but it'll fail later on anyway - return bytes - return return_type - - -def _sanitize_params(prefix, suffix, dir): - """Common parameter processing for most APIs in this module.""" - output_type = _infer_return_type(prefix, suffix, dir) - if suffix is None: - suffix = output_type() - if prefix is None: - if output_type is str: - prefix = template - else: - prefix = _os.fsencode(template) - if dir is None: - if output_type is str: - dir = gettempdir() - else: - dir = gettempdirb() - return prefix, suffix, dir, output_type - - -class _RandomNameSequence: - """An instance of _RandomNameSequence generates an endless - sequence of unpredictable strings which can safely be incorporated - into file names. Each string is eight characters long. Multiple - threads can safely use the same instance at the same time. - - _RandomNameSequence is an iterator.""" - - characters = "abcdefghijklmnopqrstuvwxyz0123456789_" - - @property - def rng(self): - cur_pid = _os.getpid() - if cur_pid != getattr(self, '_rng_pid', None): - self._rng = _Random() - self._rng_pid = cur_pid - return self._rng - - def __iter__(self): - return self - - def __next__(self): - return ''.join(self.rng.choices(self.characters, k=8)) - -def _candidate_tempdir_list(): - """Generate a list of candidate temporary directories which - _get_default_tempdir will try.""" - - dirlist = [] - - # First, try the environment. - for envname in 'TMPDIR', 'TEMP', 'TMP': - dirname = _os.getenv(envname) - if dirname: dirlist.append(dirname) - - # Failing that, try OS-specific locations. - if _os.name == 'nt': - dirlist.extend([ _os.path.expanduser(r'~\AppData\Local\Temp'), - _os.path.expandvars(r'%SYSTEMROOT%\Temp'), - r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ]) - else: - dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ]) - - # As a last resort, the current directory. - try: - dirlist.append(_os.getcwd()) - except (AttributeError, OSError): - dirlist.append(_os.curdir) - - return dirlist - -def _get_default_tempdir(dirlist=None): - """Calculate the default directory to use for temporary files. - This routine should be called exactly once. - - We determine whether or not a candidate temp dir is usable by - trying to create and write to a file in that directory. If this - is successful, the test file is deleted. To prevent denial of - service, the name of the test file must be randomized.""" - - namer = _RandomNameSequence() - if dirlist is None: - dirlist = _candidate_tempdir_list() - - for dir in dirlist: - if dir != _os.curdir: - dir = _os.path.abspath(dir) - for seq in range(TMP_MAX): - name = next(namer) - filename = _os.path.join(dir, name) - try: - fd = _os.open(filename, _bin_openflags, 0o600) - try: - try: - _os.write(fd, b'blat') - finally: - _os.close(fd) - finally: - _os.unlink(filename) - return dir - except FileExistsError: - pass - except PermissionError: - # See the comment in mkdtemp(). - if _os.name == 'nt' and _os.path.isdir(dir): - continue - break # no point trying more names in this directory - except OSError: - break # no point trying more names in this directory - raise FileNotFoundError(_errno.ENOENT, - "No usable temporary directory found in %s" % - dirlist) - -_name_sequence = None - -def _get_candidate_names(): - """Common setup sequence for all user-callable interfaces.""" - - global _name_sequence - if _name_sequence is None: - _once_lock.acquire() - try: - if _name_sequence is None: - _name_sequence = _RandomNameSequence() - finally: - _once_lock.release() - return _name_sequence - - -def _mkstemp_inner(dir, pre, suf, flags, output_type): - """Code common to mkstemp, TemporaryFile, and NamedTemporaryFile.""" - - dir = _os.path.abspath(dir) - names = _get_candidate_names() - if output_type is bytes: - names = map(_os.fsencode, names) - - for seq in range(TMP_MAX): - name = next(names) - file = _os.path.join(dir, pre + name + suf) - _sys.audit("tempfile.mkstemp", file) - try: - fd = _os.open(file, flags, 0o600) - except FileExistsError: - continue # try again - except PermissionError: - # See the comment in mkdtemp(). - if _os.name == 'nt' and _os.path.isdir(dir) and seq < TMP_MAX - 1: - continue - else: - raise - return fd, file - - raise FileExistsError(_errno.EEXIST, - "No usable temporary file name found") - -def _dont_follow_symlinks(func, path, *args): - # Pass follow_symlinks=False, unless not supported on this platform. - if func in _os.supports_follow_symlinks: - func(path, *args, follow_symlinks=False) - elif not _os.path.islink(path): - func(path, *args) - -def _resetperms(path): - try: - chflags = _os.chflags - except AttributeError: - pass - else: - _dont_follow_symlinks(chflags, path, 0) - _dont_follow_symlinks(_os.chmod, path, 0o700) - - -# User visible interfaces. - -def gettempprefix(): - """The default prefix for temporary directories as string.""" - return _os.fsdecode(template) - -def gettempprefixb(): - """The default prefix for temporary directories as bytes.""" - return _os.fsencode(template) - -tempdir = None - -def _gettempdir(): - """Private accessor for tempfile.tempdir.""" - global tempdir - if tempdir is None: - _once_lock.acquire() - try: - if tempdir is None: - tempdir = _get_default_tempdir() - finally: - _once_lock.release() - return tempdir - -def gettempdir(): - """Returns tempfile.tempdir as str.""" - return _os.fsdecode(_gettempdir()) - -def gettempdirb(): - """Returns tempfile.tempdir as bytes.""" - return _os.fsencode(_gettempdir()) - -def mkstemp(suffix=None, prefix=None, dir=None, text=False): - """User-callable function to create and return a unique temporary - file. The return value is a pair (fd, name) where fd is the - file descriptor returned by os.open, and name is the filename. - - If 'suffix' is not None, the file name will end with that suffix, - otherwise there will be no suffix. - - If 'prefix' is not None, the file name will begin with that prefix, - otherwise a default prefix is used. - - If 'dir' is not None, the file will be created in that directory, - otherwise a default directory is used. - - If 'text' is specified and true, the file is opened in text - mode. Else (the default) the file is opened in binary mode. - - If any of 'suffix', 'prefix' and 'dir' are not None, they must be the - same type. If they are bytes, the returned name will be bytes; str - otherwise. - - The file is readable and writable only by the creating user ID. - If the operating system uses permission bits to indicate whether a - file is executable, the file is executable by no one. The file - descriptor is not inherited by children of this process. - - Caller is responsible for deleting the file when done with it. - """ - - prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) - - if text: - flags = _text_openflags - else: - flags = _bin_openflags - - return _mkstemp_inner(dir, prefix, suffix, flags, output_type) - - -def mkdtemp(suffix=None, prefix=None, dir=None): - """User-callable function to create and return a unique temporary - directory. The return value is the pathname of the directory. - - Arguments are as for mkstemp, except that the 'text' argument is - not accepted. - - The directory is readable, writable, and searchable only by the - creating user. - - Caller is responsible for deleting the directory when done with it. - """ - - prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) - - names = _get_candidate_names() - if output_type is bytes: - names = map(_os.fsencode, names) - - for seq in range(TMP_MAX): - name = next(names) - file = _os.path.join(dir, prefix + name + suffix) - _sys.audit("tempfile.mkdtemp", file) - try: - _os.mkdir(file, 0o700) - except FileExistsError: - continue # try again - except PermissionError: - # On Posix, this exception is raised when the user has no - # write access to the parent directory. - # On Windows, it is also raised when a directory with - # the chosen name already exists, or if the parent directory - # is not a directory. - # We cannot distinguish between "directory-exists-error" and - # "access-denied-error". - if _os.name == 'nt' and _os.path.isdir(dir) and seq < TMP_MAX - 1: - continue - else: - raise - return _os.path.abspath(file) - - raise FileExistsError(_errno.EEXIST, - "No usable temporary directory name found") - -def mktemp(suffix="", prefix=template, dir=None): - """User-callable function to return a unique temporary file name. The - file is not created. - - Arguments are similar to mkstemp, except that the 'text' argument is - not accepted, and suffix=None, prefix=None and bytes file names are not - supported. - - THIS FUNCTION IS UNSAFE AND SHOULD NOT BE USED. The file name may - refer to a file that did not exist at some point, but by the time - you get around to creating it, someone else may have beaten you to - the punch. - """ - -## from warnings import warn as _warn -## _warn("mktemp is a potential security risk to your program", -## RuntimeWarning, stacklevel=2) - - if dir is None: - dir = gettempdir() - - names = _get_candidate_names() - for seq in range(TMP_MAX): - name = next(names) - file = _os.path.join(dir, prefix + name + suffix) - if not _exists(file): - return file - - raise FileExistsError(_errno.EEXIST, - "No usable temporary filename found") - - -class _TemporaryFileCloser: - """A separate object allowing proper closing of a temporary file's - underlying file object, without adding a __del__ method to the - temporary file.""" - - cleanup_called = False - close_called = False - - def __init__(self, file, name, delete=True, delete_on_close=True): - self.file = file - self.name = name - self.delete = delete - self.delete_on_close = delete_on_close - - def cleanup(self, windows=(_os.name == 'nt'), unlink=_os.unlink): - if not self.cleanup_called: - self.cleanup_called = True - try: - if not self.close_called: - self.close_called = True - self.file.close() - finally: - # Windows provides delete-on-close as a primitive, in which - # case the file was deleted by self.file.close(). - if self.delete and not (windows and self.delete_on_close): - try: - unlink(self.name) - except FileNotFoundError: - pass - - def close(self): - if not self.close_called: - self.close_called = True - try: - self.file.close() - finally: - if self.delete and self.delete_on_close: - self.cleanup() - - def __del__(self): - self.cleanup() - - -class _TemporaryFileWrapper: - """Temporary file wrapper - - This class provides a wrapper around files opened for - temporary use. In particular, it seeks to automatically - remove the file when it is no longer needed. - """ - - def __init__(self, file, name, delete=True, delete_on_close=True): - self.file = file - self.name = name - self._closer = _TemporaryFileCloser(file, name, delete, - delete_on_close) - - def __getattr__(self, name): - # Attribute lookups are delegated to the underlying file - # and cached for non-numeric results - # (i.e. methods are cached, closed and friends are not) - file = self.__dict__['file'] - a = getattr(file, name) - if hasattr(a, '__call__'): - func = a - @_functools.wraps(func) - def func_wrapper(*args, **kwargs): - return func(*args, **kwargs) - # Avoid closing the file as long as the wrapper is alive, - # see issue #18879. - func_wrapper._closer = self._closer - a = func_wrapper - if not isinstance(a, int): - setattr(self, name, a) - return a - - # The underlying __enter__ method returns the wrong object - # (self.file) so override it to return the wrapper - def __enter__(self): - self.file.__enter__() - return self - - # Need to trap __exit__ as well to ensure the file gets - # deleted when used in a with statement - def __exit__(self, exc, value, tb): - result = self.file.__exit__(exc, value, tb) - self._closer.cleanup() - return result - - def close(self): - """ - Close the temporary file, possibly deleting it. - """ - self._closer.close() - - # iter() doesn't use __getattr__ to find the __iter__ method - def __iter__(self): - # Don't return iter(self.file), but yield from it to avoid closing - # file as long as it's being used as iterator (see issue #23700). We - # can't use 'yield from' here because iter(file) returns the file - # object itself, which has a close method, and thus the file would get - # closed when the generator is finalized, due to PEP380 semantics. - for line in self.file: - yield line - -def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None, - newline=None, suffix=None, prefix=None, - dir=None, delete=True, *, errors=None, - delete_on_close=True): - """Create and return a temporary file. - Arguments: - 'prefix', 'suffix', 'dir' -- as for mkstemp. - 'mode' -- the mode argument to io.open (default "w+b"). - 'buffering' -- the buffer size argument to io.open (default -1). - 'encoding' -- the encoding argument to io.open (default None) - 'newline' -- the newline argument to io.open (default None) - 'delete' -- whether the file is automatically deleted (default True). - 'delete_on_close' -- if 'delete', whether the file is deleted on close - (default True) or otherwise either on context manager exit - (if context manager was used) or on object finalization. . - 'errors' -- the errors argument to io.open (default None) - The file is created as mkstemp() would do it. - - Returns an object with a file-like interface; the name of the file - is accessible as its 'name' attribute. The file will be automatically - deleted when it is closed unless the 'delete' argument is set to False. - - On POSIX, NamedTemporaryFiles cannot be automatically deleted if - the creating process is terminated abruptly with a SIGKILL signal. - Windows can delete the file even in this case. - """ - - prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) - - flags = _bin_openflags - - # Setting O_TEMPORARY in the flags causes the OS to delete - # the file when it is closed. This is only supported by Windows. - if _os.name == 'nt' and delete and delete_on_close: - flags |= _os.O_TEMPORARY - - if "b" not in mode: - encoding = _io.text_encoding(encoding) - - name = None - def opener(*args): - nonlocal name - fd, name = _mkstemp_inner(dir, prefix, suffix, flags, output_type) - return fd - try: - file = _io.open(dir, mode, buffering=buffering, - newline=newline, encoding=encoding, errors=errors, - opener=opener) - try: - raw = getattr(file, 'buffer', file) - raw = getattr(raw, 'raw', raw) - raw.name = name - return _TemporaryFileWrapper(file, name, delete, delete_on_close) - except: - file.close() - raise - except: - if name is not None and not ( - _os.name == 'nt' and delete and delete_on_close): - _os.unlink(name) - raise - -if _os.name != 'posix' or _sys.platform == 'cygwin': - # On non-POSIX and Cygwin systems, assume that we cannot unlink a file - # while it is open. - TemporaryFile = NamedTemporaryFile - -else: - # Is the O_TMPFILE flag available and does it work? - # The flag is set to False if os.open(dir, os.O_TMPFILE) raises an - # IsADirectoryError exception - _O_TMPFILE_WORKS = hasattr(_os, 'O_TMPFILE') - - def TemporaryFile(mode='w+b', buffering=-1, encoding=None, - newline=None, suffix=None, prefix=None, - dir=None, *, errors=None): - """Create and return a temporary file. - Arguments: - 'prefix', 'suffix', 'dir' -- as for mkstemp. - 'mode' -- the mode argument to io.open (default "w+b"). - 'buffering' -- the buffer size argument to io.open (default -1). - 'encoding' -- the encoding argument to io.open (default None) - 'newline' -- the newline argument to io.open (default None) - 'errors' -- the errors argument to io.open (default None) - The file is created as mkstemp() would do it. - - Returns an object with a file-like interface. The file has no - name, and will cease to exist when it is closed. - """ - global _O_TMPFILE_WORKS - - if "b" not in mode: - encoding = _io.text_encoding(encoding) - - prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) - - flags = _bin_openflags - if _O_TMPFILE_WORKS: - fd = None - def opener(*args): - nonlocal fd - flags2 = (flags | _os.O_TMPFILE) & ~_os.O_CREAT - fd = _os.open(dir, flags2, 0o600) - return fd - try: - file = _io.open(dir, mode, buffering=buffering, - newline=newline, encoding=encoding, - errors=errors, opener=opener) - raw = getattr(file, 'buffer', file) - raw = getattr(raw, 'raw', raw) - raw.name = fd - return file - except IsADirectoryError: - # Linux kernel older than 3.11 ignores the O_TMPFILE flag: - # O_TMPFILE is read as O_DIRECTORY. Trying to open a directory - # with O_RDWR|O_DIRECTORY fails with IsADirectoryError, a - # directory cannot be open to write. Set flag to False to not - # try again. - _O_TMPFILE_WORKS = False - except OSError: - # The filesystem of the directory does not support O_TMPFILE. - # For example, OSError(95, 'Operation not supported'). - # - # On Linux kernel older than 3.11, trying to open a regular - # file (or a symbolic link to a regular file) with O_TMPFILE - # fails with NotADirectoryError, because O_TMPFILE is read as - # O_DIRECTORY. - pass - # Fallback to _mkstemp_inner(). - - fd = None - def opener(*args): - nonlocal fd - fd, name = _mkstemp_inner(dir, prefix, suffix, flags, output_type) - try: - _os.unlink(name) - except BaseException as e: - _os.close(fd) - raise - return fd - file = _io.open(dir, mode, buffering=buffering, - newline=newline, encoding=encoding, errors=errors, - opener=opener) - raw = getattr(file, 'buffer', file) - raw = getattr(raw, 'raw', raw) - raw.name = fd - return file - -class SpooledTemporaryFile(_io.IOBase): - """Temporary file wrapper, specialized to switch from BytesIO - or StringIO to a real file when it exceeds a certain size or - when a fileno is needed. - """ - _rolled = False - - def __init__(self, max_size=0, mode='w+b', buffering=-1, - encoding=None, newline=None, - suffix=None, prefix=None, dir=None, *, errors=None): - if 'b' in mode: - self._file = _io.BytesIO() - else: - encoding = _io.text_encoding(encoding) - self._file = _io.TextIOWrapper(_io.BytesIO(), - encoding=encoding, errors=errors, - newline=newline) - self._max_size = max_size - self._rolled = False - self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering, - 'suffix': suffix, 'prefix': prefix, - 'encoding': encoding, 'newline': newline, - 'dir': dir, 'errors': errors} - - __class_getitem__ = classmethod(_types.GenericAlias) - - def _check(self, file): - if self._rolled: return - max_size = self._max_size - if max_size and file.tell() > max_size: - self.rollover() - - def rollover(self): - if self._rolled: return - file = self._file - newfile = self._file = TemporaryFile(**self._TemporaryFileArgs) - del self._TemporaryFileArgs - - pos = file.tell() - if hasattr(newfile, 'buffer'): - newfile.buffer.write(file.detach().getvalue()) - else: - newfile.write(file.getvalue()) - newfile.seek(pos, 0) - - self._rolled = True - - # The method caching trick from NamedTemporaryFile - # won't work here, because _file may change from a - # BytesIO/StringIO instance to a real file. So we list - # all the methods directly. - - # Context management protocol - def __enter__(self): - if self._file.closed: - raise ValueError("Cannot enter context with closed file") - return self - - def __exit__(self, exc, value, tb): - self._file.close() - - # file protocol - def __iter__(self): - return self._file.__iter__() - - def __del__(self): - if not self.closed: - _warnings.warn( - "Unclosed file {!r}".format(self), - ResourceWarning, - stacklevel=2, - source=self - ) - self.close() - - def close(self): - self._file.close() - - @property - def closed(self): - return self._file.closed - - @property - def encoding(self): - return self._file.encoding - - @property - def errors(self): - return self._file.errors - - def fileno(self): - self.rollover() - return self._file.fileno() - - def flush(self): - self._file.flush() - - def isatty(self): - return self._file.isatty() - - @property - def mode(self): - try: - return self._file.mode - except AttributeError: - return self._TemporaryFileArgs['mode'] - - @property - def name(self): - try: - return self._file.name - except AttributeError: - return None - - @property - def newlines(self): - return self._file.newlines - - def readable(self): - return self._file.readable() - - def read(self, *args): - return self._file.read(*args) - - def read1(self, *args): - return self._file.read1(*args) - - def readinto(self, b): - return self._file.readinto(b) - - def readinto1(self, b): - return self._file.readinto1(b) - - def readline(self, *args): - return self._file.readline(*args) - - def readlines(self, *args): - return self._file.readlines(*args) - - def seekable(self): - return self._file.seekable() - - def seek(self, *args): - return self._file.seek(*args) - - def tell(self): - return self._file.tell() - - def truncate(self, size=None): - if size is None: - return self._file.truncate() - else: - if size > self._max_size: - self.rollover() - return self._file.truncate(size) - - def writable(self): - return self._file.writable() - - def write(self, s): - file = self._file - rv = file.write(s) - self._check(file) - return rv - - def writelines(self, iterable): - if self._max_size == 0 or self._rolled: - return self._file.writelines(iterable) - - it = iter(iterable) - for line in it: - self.write(line) - if self._rolled: - return self._file.writelines(it) - - def detach(self): - return self._file.detach() - - -class TemporaryDirectory: - """Create and return a temporary directory. This has the same - behavior as mkdtemp but can be used as a context manager. For - example: - - with TemporaryDirectory() as tmpdir: - ... - - Upon exiting the context, the directory and everything contained - in it are removed (unless delete=False is passed or an exception - is raised during cleanup and ignore_cleanup_errors is not True). - - Optional Arguments: - suffix - A str suffix for the directory name. (see mkdtemp) - prefix - A str prefix for the directory name. (see mkdtemp) - dir - A directory to create this temp dir in. (see mkdtemp) - ignore_cleanup_errors - False; ignore exceptions during cleanup? - delete - True; whether the directory is automatically deleted. - """ - - def __init__(self, suffix=None, prefix=None, dir=None, - ignore_cleanup_errors=False, *, delete=True): - self.name = mkdtemp(suffix, prefix, dir) - self._ignore_cleanup_errors = ignore_cleanup_errors - self._delete = delete - self._finalizer = _weakref.finalize( - self, self._cleanup, self.name, - warn_message="Implicitly cleaning up {!r}".format(self), - ignore_errors=self._ignore_cleanup_errors, delete=self._delete) - - @classmethod - def _rmtree(cls, name, ignore_errors=False, repeated=False): - def onexc(func, path, exc): - if isinstance(exc, PermissionError): - if repeated and path == name: - if ignore_errors: - return - raise - - try: - if path != name: - _resetperms(_os.path.dirname(path)) - _resetperms(path) - - try: - _os.unlink(path) - except IsADirectoryError: - cls._rmtree(path, ignore_errors=ignore_errors) - except PermissionError: - # The PermissionError handler was originally added for - # FreeBSD in directories, but it seems that it is raised - # on Windows too. - # bpo-43153: Calling _rmtree again may - # raise NotADirectoryError and mask the PermissionError. - # So we must re-raise the current PermissionError if - # path is not a directory. - if not _os.path.isdir(path) or _os.path.isjunction(path): - if ignore_errors: - return - raise - cls._rmtree(path, ignore_errors=ignore_errors, - repeated=(path == name)) - except FileNotFoundError: - pass - elif isinstance(exc, FileNotFoundError): - pass - else: - if not ignore_errors: - raise - - _shutil.rmtree(name, onexc=onexc) - - @classmethod - def _cleanup(cls, name, warn_message, ignore_errors=False, delete=True): - if delete: - cls._rmtree(name, ignore_errors=ignore_errors) - _warnings.warn(warn_message, ResourceWarning) - - def __repr__(self): - return "<{} {!r}>".format(self.__class__.__name__, self.name) - - def __enter__(self): - return self.name - - def __exit__(self, exc, value, tb): - if self._delete: - self.cleanup() - - def cleanup(self): - if self._finalizer.detach() or _os.path.exists(self.name): - self._rmtree(self.name, ignore_errors=self._ignore_cleanup_errors) - - __class_getitem__ = classmethod(_types.GenericAlias) diff --git a/Python313_13_x86_Template/Lib/textwrap.py b/Python313_13_x86_Template/Lib/textwrap.py deleted file mode 100644 index 686c9eb8..00000000 --- a/Python313_13_x86_Template/Lib/textwrap.py +++ /dev/null @@ -1,497 +0,0 @@ -"""Text wrapping and filling. -""" - -# Copyright (C) 1999-2001 Gregory P. Ward. -# Copyright (C) 2002, 2003 Python Software Foundation. -# Written by Greg Ward - -import re - -__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent', 'shorten'] - -# Hardcode the recognized whitespace characters to the US-ASCII -# whitespace characters. The main reason for doing this is that -# some Unicode spaces (like \u00a0) are non-breaking whitespaces. -_whitespace = '\t\n\x0b\x0c\r ' - -class TextWrapper: - """ - Object for wrapping/filling text. The public interface consists of - the wrap() and fill() methods; the other methods are just there for - subclasses to override in order to tweak the default behaviour. - If you want to completely replace the main wrapping algorithm, - you'll probably have to override _wrap_chunks(). - - Several instance attributes control various aspects of wrapping: - width (default: 70) - the maximum width of wrapped lines (unless break_long_words - is false) - initial_indent (default: "") - string that will be prepended to the first line of wrapped - output. Counts towards the line's width. - subsequent_indent (default: "") - string that will be prepended to all lines save the first - of wrapped output; also counts towards each line's width. - expand_tabs (default: true) - Expand tabs in input text to spaces before further processing. - Each tab will become 0 .. 'tabsize' spaces, depending on its position - in its line. If false, each tab is treated as a single character. - tabsize (default: 8) - Expand tabs in input text to 0 .. 'tabsize' spaces, unless - 'expand_tabs' is false. - replace_whitespace (default: true) - Replace all whitespace characters in the input text by spaces - after tab expansion. Note that if expand_tabs is false and - replace_whitespace is true, every tab will be converted to a - single space! - fix_sentence_endings (default: false) - Ensure that sentence-ending punctuation is always followed - by two spaces. Off by default because the algorithm is - (unavoidably) imperfect. - break_long_words (default: true) - Break words longer than 'width'. If false, those words will not - be broken, and some lines might be longer than 'width'. - break_on_hyphens (default: true) - Allow breaking hyphenated words. If true, wrapping will occur - preferably on whitespaces and right after hyphens part of - compound words. - drop_whitespace (default: true) - Drop leading and trailing whitespace from lines. - max_lines (default: None) - Truncate wrapped lines. - placeholder (default: ' [...]') - Append to the last line of truncated text. - """ - - unicode_whitespace_trans = dict.fromkeys(map(ord, _whitespace), ord(' ')) - - # This funky little regex is just the trick for splitting - # text up into word-wrappable chunks. E.g. - # "Hello there -- you goof-ball, use the -b option!" - # splits into - # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option! - # (after stripping out empty strings). - word_punct = r'[\w!"\'&.,?]' - letter = r'[^\d\W]' - whitespace = r'[%s]' % re.escape(_whitespace) - nowhitespace = '[^' + whitespace[1:] - wordsep_re = re.compile(r''' - ( # any whitespace - %(ws)s+ - | # em-dash between words - (?<=%(wp)s) -{2,} (?=\w) - | # word, possibly hyphenated - %(nws)s+? (?: - # hyphenated word - -(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-)) - (?= %(lt)s -? %(lt)s) - | # end of word - (?=%(ws)s|\Z) - | # em-dash - (?<=%(wp)s) (?=-{2,}\w) - ) - )''' % {'wp': word_punct, 'lt': letter, - 'ws': whitespace, 'nws': nowhitespace}, - re.VERBOSE) - del word_punct, letter, nowhitespace - - # This less funky little regex just split on recognized spaces. E.g. - # "Hello there -- you goof-ball, use the -b option!" - # splits into - # Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/ - wordsep_simple_re = re.compile(r'(%s+)' % whitespace) - del whitespace - - # XXX this is not locale- or charset-aware -- string.lowercase - # is US-ASCII only (and therefore English-only) - sentence_end_re = re.compile(r'[a-z]' # lowercase letter - r'[\.\!\?]' # sentence-ending punct. - r'[\"\']?' # optional end-of-quote - r'\Z') # end of chunk - - def __init__(self, - width=70, - initial_indent="", - subsequent_indent="", - expand_tabs=True, - replace_whitespace=True, - fix_sentence_endings=False, - break_long_words=True, - drop_whitespace=True, - break_on_hyphens=True, - tabsize=8, - *, - max_lines=None, - placeholder=' [...]'): - self.width = width - self.initial_indent = initial_indent - self.subsequent_indent = subsequent_indent - self.expand_tabs = expand_tabs - self.replace_whitespace = replace_whitespace - self.fix_sentence_endings = fix_sentence_endings - self.break_long_words = break_long_words - self.drop_whitespace = drop_whitespace - self.break_on_hyphens = break_on_hyphens - self.tabsize = tabsize - self.max_lines = max_lines - self.placeholder = placeholder - - - # -- Private methods ----------------------------------------------- - # (possibly useful for subclasses to override) - - def _munge_whitespace(self, text): - """_munge_whitespace(text : string) -> string - - Munge whitespace in text: expand tabs and convert all other - whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz" - becomes " foo bar baz". - """ - if self.expand_tabs: - text = text.expandtabs(self.tabsize) - if self.replace_whitespace: - text = text.translate(self.unicode_whitespace_trans) - return text - - - def _split(self, text): - """_split(text : string) -> [string] - - Split the text to wrap into indivisible chunks. Chunks are - not quite the same as words; see _wrap_chunks() for full - details. As an example, the text - Look, goof-ball -- use the -b option! - breaks into the following chunks: - 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', - 'use', ' ', 'the', ' ', '-b', ' ', 'option!' - if break_on_hyphens is True, or in: - 'Look,', ' ', 'goof-ball', ' ', '--', ' ', - 'use', ' ', 'the', ' ', '-b', ' ', option!' - otherwise. - """ - if self.break_on_hyphens is True: - chunks = self.wordsep_re.split(text) - else: - chunks = self.wordsep_simple_re.split(text) - chunks = [c for c in chunks if c] - return chunks - - def _fix_sentence_endings(self, chunks): - """_fix_sentence_endings(chunks : [string]) - - Correct for sentence endings buried in 'chunks'. Eg. when the - original text contains "... foo.\\nBar ...", munge_whitespace() - and split() will convert that to [..., "foo.", " ", "Bar", ...] - which has one too few spaces; this method simply changes the one - space to two. - """ - i = 0 - patsearch = self.sentence_end_re.search - while i < len(chunks)-1: - if chunks[i+1] == " " and patsearch(chunks[i]): - chunks[i+1] = " " - i += 2 - else: - i += 1 - - def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): - """_handle_long_word(chunks : [string], - cur_line : [string], - cur_len : int, width : int) - - Handle a chunk of text (most likely a word, not whitespace) that - is too long to fit in any line. - """ - # Figure out when indent is larger than the specified width, and make - # sure at least one character is stripped off on every pass - if width < 1: - space_left = 1 - else: - space_left = width - cur_len - - # If we're allowed to break long words, then do so: put as much - # of the next chunk onto the current line as will fit. - if self.break_long_words and space_left > 0: - end = space_left - chunk = reversed_chunks[-1] - if self.break_on_hyphens and len(chunk) > space_left: - # break after last hyphen, but only if there are - # non-hyphens before it - hyphen = chunk.rfind('-', 0, space_left) - if hyphen > 0 and any(c != '-' for c in chunk[:hyphen]): - end = hyphen + 1 - cur_line.append(chunk[:end]) - reversed_chunks[-1] = chunk[end:] - - # Otherwise, we have to preserve the long word intact. Only add - # it to the current line if there's nothing already there -- - # that minimizes how much we violate the width constraint. - elif not cur_line: - cur_line.append(reversed_chunks.pop()) - - # If we're not allowed to break long words, and there's already - # text on the current line, do nothing. Next time through the - # main loop of _wrap_chunks(), we'll wind up here again, but - # cur_len will be zero, so the next line will be entirely - # devoted to the long word that we can't handle right now. - - def _wrap_chunks(self, chunks): - """_wrap_chunks(chunks : [string]) -> [string] - - Wrap a sequence of text chunks and return a list of lines of - length 'self.width' or less. (If 'break_long_words' is false, - some lines may be longer than this.) Chunks correspond roughly - to words and the whitespace between them: each chunk is - indivisible (modulo 'break_long_words'), but a line break can - come between any two chunks. Chunks should not have internal - whitespace; ie. a chunk is either all whitespace or a "word". - Whitespace chunks will be removed from the beginning and end of - lines, but apart from that whitespace is preserved. - """ - lines = [] - if self.width <= 0: - raise ValueError("invalid width %r (must be > 0)" % self.width) - if self.max_lines is not None: - if self.max_lines > 1: - indent = self.subsequent_indent - else: - indent = self.initial_indent - if len(indent) + len(self.placeholder.lstrip()) > self.width: - raise ValueError("placeholder too large for max width") - - # Arrange in reverse order so items can be efficiently popped - # from a stack of chucks. - chunks.reverse() - - while chunks: - - # Start the list of chunks that will make up the current line. - # cur_len is just the length of all the chunks in cur_line. - cur_line = [] - cur_len = 0 - - # Figure out which static string will prefix this line. - if lines: - indent = self.subsequent_indent - else: - indent = self.initial_indent - - # Maximum width for this line. - width = self.width - len(indent) - - # First chunk on line is whitespace -- drop it, unless this - # is the very beginning of the text (ie. no lines started yet). - if self.drop_whitespace and chunks[-1].strip() == '' and lines: - del chunks[-1] - - while chunks: - l = len(chunks[-1]) - - # Can at least squeeze this chunk onto the current line. - if cur_len + l <= width: - cur_line.append(chunks.pop()) - cur_len += l - - # Nope, this line is full. - else: - break - - # The current line is full, and the next chunk is too big to - # fit on *any* line (not just this one). - if chunks and len(chunks[-1]) > width: - self._handle_long_word(chunks, cur_line, cur_len, width) - cur_len = sum(map(len, cur_line)) - - # If the last chunk on this line is all whitespace, drop it. - if self.drop_whitespace and cur_line and cur_line[-1].strip() == '': - cur_len -= len(cur_line[-1]) - del cur_line[-1] - - if cur_line: - if (self.max_lines is None or - len(lines) + 1 < self.max_lines or - (not chunks or - self.drop_whitespace and - len(chunks) == 1 and - not chunks[0].strip()) and cur_len <= width): - # Convert current line back to a string and store it in - # list of all lines (return value). - lines.append(indent + ''.join(cur_line)) - else: - while cur_line: - if (cur_line[-1].strip() and - cur_len + len(self.placeholder) <= width): - cur_line.append(self.placeholder) - lines.append(indent + ''.join(cur_line)) - break - cur_len -= len(cur_line[-1]) - del cur_line[-1] - else: - if lines: - prev_line = lines[-1].rstrip() - if (len(prev_line) + len(self.placeholder) <= - self.width): - lines[-1] = prev_line + self.placeholder - break - lines.append(indent + self.placeholder.lstrip()) - break - - return lines - - def _split_chunks(self, text): - text = self._munge_whitespace(text) - return self._split(text) - - # -- Public interface ---------------------------------------------- - - def wrap(self, text): - """wrap(text : string) -> [string] - - Reformat the single paragraph in 'text' so it fits in lines of - no more than 'self.width' columns, and return a list of wrapped - lines. Tabs in 'text' are expanded with string.expandtabs(), - and all other whitespace characters (including newline) are - converted to space. - """ - chunks = self._split_chunks(text) - if self.fix_sentence_endings: - self._fix_sentence_endings(chunks) - return self._wrap_chunks(chunks) - - def fill(self, text): - """fill(text : string) -> string - - Reformat the single paragraph in 'text' to fit in lines of no - more than 'self.width' columns, and return a new string - containing the entire wrapped paragraph. - """ - return "\n".join(self.wrap(text)) - - -# -- Convenience interface --------------------------------------------- - -def wrap(text, width=70, **kwargs): - """Wrap a single paragraph of text, returning a list of wrapped lines. - - Reformat the single paragraph in 'text' so it fits in lines of no - more than 'width' columns, and return a list of wrapped lines. By - default, tabs in 'text' are expanded with string.expandtabs(), and - all other whitespace characters (including newline) are converted to - space. See TextWrapper class for available keyword args to customize - wrapping behaviour. - """ - w = TextWrapper(width=width, **kwargs) - return w.wrap(text) - -def fill(text, width=70, **kwargs): - """Fill a single paragraph of text, returning a new string. - - Reformat the single paragraph in 'text' to fit in lines of no more - than 'width' columns, and return a new string containing the entire - wrapped paragraph. As with wrap(), tabs are expanded and other - whitespace characters converted to space. See TextWrapper class for - available keyword args to customize wrapping behaviour. - """ - w = TextWrapper(width=width, **kwargs) - return w.fill(text) - -def shorten(text, width, **kwargs): - """Collapse and truncate the given text to fit in the given width. - - The text first has its whitespace collapsed. If it then fits in - the *width*, it is returned as is. Otherwise, as many words - as possible are joined and then the placeholder is appended:: - - >>> textwrap.shorten("Hello world!", width=12) - 'Hello world!' - >>> textwrap.shorten("Hello world!", width=11) - 'Hello [...]' - """ - w = TextWrapper(width=width, max_lines=1, **kwargs) - return w.fill(' '.join(text.strip().split())) - - -# -- Loosely related functionality ------------------------------------- - -_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE) -_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE) - -def dedent(text): - """Remove any common leading whitespace from every line in `text`. - - This can be used to make triple-quoted strings line up with the left - edge of the display, while still presenting them in the source code - in indented form. - - Note that tabs and spaces are both treated as whitespace, but they - are not equal: the lines " hello" and "\\thello" are - considered to have no common leading whitespace. - - Entirely blank lines are normalized to a newline character. - """ - # Look for the longest leading string of spaces and tabs common to - # all lines. - margin = None - text = _whitespace_only_re.sub('', text) - indents = _leading_whitespace_re.findall(text) - for indent in indents: - if margin is None: - margin = indent - - # Current line more deeply indented than previous winner: - # no change (previous winner is still on top). - elif indent.startswith(margin): - pass - - # Current line consistent with and no deeper than previous winner: - # it's the new winner. - elif margin.startswith(indent): - margin = indent - - # Find the largest common whitespace between current line and previous - # winner. - else: - for i, (x, y) in enumerate(zip(margin, indent)): - if x != y: - margin = margin[:i] - break - - # sanity check (testing/debugging only) - if 0 and margin: - for line in text.split("\n"): - assert not line or line.startswith(margin), \ - "line = %r, margin = %r" % (line, margin) - - if margin: - text = re.sub(r'(?m)^' + margin, '', text) - return text - - -def indent(text, prefix, predicate=None): - """Adds 'prefix' to the beginning of selected lines in 'text'. - - If 'predicate' is provided, 'prefix' will only be added to the lines - where 'predicate(line)' is True. If 'predicate' is not provided, - it will default to adding 'prefix' to all non-empty lines that do not - consist solely of whitespace characters. - """ - if predicate is None: - # str.splitlines(True) doesn't produce empty string. - # ''.splitlines(True) => [] - # 'foo\n'.splitlines(True) => ['foo\n'] - # So we can use just `not s.isspace()` here. - predicate = lambda s: not s.isspace() - - prefixed_lines = [] - for line in text.splitlines(True): - if predicate(line): - prefixed_lines.append(prefix) - prefixed_lines.append(line) - - return ''.join(prefixed_lines) - - -if __name__ == "__main__": - #print dedent("\tfoo\n\tbar") - #print dedent(" \thello there\n \t how are you?") - print(dedent("Hello there.\n This is indented.")) diff --git a/Python313_13_x86_Template/Lib/threading.py b/Python313_13_x86_Template/Lib/threading.py deleted file mode 100644 index 15bf786a..00000000 --- a/Python313_13_x86_Template/Lib/threading.py +++ /dev/null @@ -1,1602 +0,0 @@ -"""Thread module emulating a subset of Java's threading model.""" - -import os as _os -import sys as _sys -import _thread -import warnings - -from time import monotonic as _time -from _weakrefset import WeakSet -from itertools import count as _count -try: - from _collections import deque as _deque -except ImportError: - from collections import deque as _deque - -# Note regarding PEP 8 compliant names -# This threading model was originally inspired by Java, and inherited -# the convention of camelCase function and method names from that -# language. Those original names are not in any imminent danger of -# being deprecated (even for Py3k),so this module provides them as an -# alias for the PEP 8 compliant names -# Note that using the new PEP 8 compliant names facilitates substitution -# with the multiprocessing module, which doesn't provide the old -# Java inspired names. - -__all__ = ['get_ident', 'active_count', 'Condition', 'current_thread', - 'enumerate', 'main_thread', 'TIMEOUT_MAX', - 'Event', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', - 'Barrier', 'BrokenBarrierError', 'Timer', 'ThreadError', - 'setprofile', 'settrace', 'local', 'stack_size', - 'excepthook', 'ExceptHookArgs', 'gettrace', 'getprofile', - 'setprofile_all_threads','settrace_all_threads'] - -# Rename some stuff so "from threading import *" is safe -_start_joinable_thread = _thread.start_joinable_thread -_daemon_threads_allowed = _thread.daemon_threads_allowed -_allocate_lock = _thread.allocate_lock -_LockType = _thread.LockType -_thread_shutdown = _thread._shutdown -_make_thread_handle = _thread._make_thread_handle -_ThreadHandle = _thread._ThreadHandle -get_ident = _thread.get_ident -_get_main_thread_ident = _thread._get_main_thread_ident -_is_main_interpreter = _thread._is_main_interpreter -try: - get_native_id = _thread.get_native_id - _HAVE_THREAD_NATIVE_ID = True - __all__.append('get_native_id') -except AttributeError: - _HAVE_THREAD_NATIVE_ID = False -ThreadError = _thread.error -try: - _CRLock = _thread.RLock -except AttributeError: - _CRLock = None -TIMEOUT_MAX = _thread.TIMEOUT_MAX -del _thread - -# get thread-local implementation, either from the thread -# module, or from the python fallback - -try: - from _thread import _local as local -except ImportError: - from _threading_local import local - -# Support for profile and trace hooks - -_profile_hook = None -_trace_hook = None - -def setprofile(func): - """Set a profile function for all threads started from the threading module. - - The func will be passed to sys.setprofile() for each thread, before its - run() method is called. - """ - global _profile_hook - _profile_hook = func - -def setprofile_all_threads(func): - """Set a profile function for all threads started from the threading module - and all Python threads that are currently executing. - - The func will be passed to sys.setprofile() for each thread, before its - run() method is called. - """ - setprofile(func) - _sys._setprofileallthreads(func) - -def getprofile(): - """Get the profiler function as set by threading.setprofile().""" - return _profile_hook - -def settrace(func): - """Set a trace function for all threads started from the threading module. - - The func will be passed to sys.settrace() for each thread, before its run() - method is called. - """ - global _trace_hook - _trace_hook = func - -def settrace_all_threads(func): - """Set a trace function for all threads started from the threading module - and all Python threads that are currently executing. - - The func will be passed to sys.settrace() for each thread, before its run() - method is called. - """ - settrace(func) - _sys._settraceallthreads(func) - -def gettrace(): - """Get the trace function as set by threading.settrace().""" - return _trace_hook - -# Synchronization classes - -Lock = _LockType - -def RLock(*args, **kwargs): - """Factory function that returns a new reentrant lock. - - A reentrant lock must be released by the thread that acquired it. Once a - thread has acquired a reentrant lock, the same thread may acquire it again - without blocking; the thread must release it once for each time it has - acquired it. - - """ - if args or kwargs: - warnings.warn( - 'Passing arguments to RLock is deprecated and will be removed in 3.15', - DeprecationWarning, - stacklevel=2, - ) - if _CRLock is None: - return _PyRLock(*args, **kwargs) - return _CRLock(*args, **kwargs) - -class _RLock: - """This class implements reentrant lock objects. - - A reentrant lock must be released by the thread that acquired it. Once a - thread has acquired a reentrant lock, the same thread may acquire it - again without blocking; the thread must release it once for each time it - has acquired it. - - """ - - def __init__(self): - self._block = _allocate_lock() - self._owner = None - self._count = 0 - - def __repr__(self): - owner = self._owner - try: - owner = _active[owner].name - except KeyError: - pass - return "<%s %s.%s object owner=%r count=%d at %s>" % ( - "locked" if self._block.locked() else "unlocked", - self.__class__.__module__, - self.__class__.__qualname__, - owner, - self._count, - hex(id(self)) - ) - - def _at_fork_reinit(self): - self._block._at_fork_reinit() - self._owner = None - self._count = 0 - - def acquire(self, blocking=True, timeout=-1): - """Acquire a lock, blocking or non-blocking. - - When invoked without arguments: if this thread already owns the lock, - increment the recursion level by one, and return immediately. Otherwise, - if another thread owns the lock, block until the lock is unlocked. Once - the lock is unlocked (not owned by any thread), then grab ownership, set - the recursion level to one, and return. If more than one thread is - blocked waiting until the lock is unlocked, only one at a time will be - able to grab ownership of the lock. There is no return value in this - case. - - When invoked with the blocking argument set to true, do the same thing - as when called without arguments, and return true. - - When invoked with the blocking argument set to false, do not block. If a - call without an argument would block, return false immediately; - otherwise, do the same thing as when called without arguments, and - return true. - - When invoked with the floating-point timeout argument set to a positive - value, block for at most the number of seconds specified by timeout - and as long as the lock cannot be acquired. Return true if the lock has - been acquired, false if the timeout has elapsed. - - """ - me = get_ident() - if self._owner == me: - self._count += 1 - return 1 - rc = self._block.acquire(blocking, timeout) - if rc: - self._owner = me - self._count = 1 - return rc - - __enter__ = acquire - - def release(self): - """Release a lock, decrementing the recursion level. - - If after the decrement it is zero, reset the lock to unlocked (not owned - by any thread), and if any other threads are blocked waiting for the - lock to become unlocked, allow exactly one of them to proceed. If after - the decrement the recursion level is still nonzero, the lock remains - locked and owned by the calling thread. - - Only call this method when the calling thread owns the lock. A - RuntimeError is raised if this method is called when the lock is - unlocked. - - There is no return value. - - """ - if self._owner != get_ident(): - raise RuntimeError("cannot release un-acquired lock") - self._count = count = self._count - 1 - if not count: - self._owner = None - self._block.release() - - def __exit__(self, t, v, tb): - self.release() - - # Internal methods used by condition variables - - def _acquire_restore(self, state): - self._block.acquire() - self._count, self._owner = state - - def _release_save(self): - if self._count == 0: - raise RuntimeError("cannot release un-acquired lock") - count = self._count - self._count = 0 - owner = self._owner - self._owner = None - self._block.release() - return (count, owner) - - def _is_owned(self): - return self._owner == get_ident() - - # Internal method used for reentrancy checks - - def _recursion_count(self): - if self._owner != get_ident(): - return 0 - return self._count - -_PyRLock = _RLock - - -class Condition: - """Class that implements a condition variable. - - A condition variable allows one or more threads to wait until they are - notified by another thread. - - If the lock argument is given and not None, it must be a Lock or RLock - object, and it is used as the underlying lock. Otherwise, a new RLock object - is created and used as the underlying lock. - - """ - - def __init__(self, lock=None): - if lock is None: - lock = RLock() - self._lock = lock - # Export the lock's acquire() and release() methods - self.acquire = lock.acquire - self.release = lock.release - # If the lock defines _release_save() and/or _acquire_restore(), - # these override the default implementations (which just call - # release() and acquire() on the lock). Ditto for _is_owned(). - if hasattr(lock, '_release_save'): - self._release_save = lock._release_save - if hasattr(lock, '_acquire_restore'): - self._acquire_restore = lock._acquire_restore - if hasattr(lock, '_is_owned'): - self._is_owned = lock._is_owned - self._waiters = _deque() - - def _at_fork_reinit(self): - self._lock._at_fork_reinit() - self._waiters.clear() - - def __enter__(self): - return self._lock.__enter__() - - def __exit__(self, *args): - return self._lock.__exit__(*args) - - def __repr__(self): - return "" % (self._lock, len(self._waiters)) - - def _release_save(self): - self._lock.release() # No state to save - - def _acquire_restore(self, x): - self._lock.acquire() # Ignore saved state - - def _is_owned(self): - # Return True if lock is owned by current_thread. - # This method is called only if _lock doesn't have _is_owned(). - if self._lock.acquire(False): - self._lock.release() - return False - else: - return True - - def wait(self, timeout=None): - """Wait until notified or until a timeout occurs. - - If the calling thread has not acquired the lock when this method is - called, a RuntimeError is raised. - - This method releases the underlying lock, and then blocks until it is - awakened by a notify() or notify_all() call for the same condition - variable in another thread, or until the optional timeout occurs. Once - awakened or timed out, it re-acquires the lock and returns. - - When the timeout argument is present and not None, it should be a - floating-point number specifying a timeout for the operation in seconds - (or fractions thereof). - - When the underlying lock is an RLock, it is not released using its - release() method, since this may not actually unlock the lock when it - was acquired multiple times recursively. Instead, an internal interface - of the RLock class is used, which really unlocks it even when it has - been recursively acquired several times. Another internal interface is - then used to restore the recursion level when the lock is reacquired. - - """ - if not self._is_owned(): - raise RuntimeError("cannot wait on un-acquired lock") - waiter = _allocate_lock() - waiter.acquire() - self._waiters.append(waiter) - saved_state = self._release_save() - gotit = False - try: # restore state no matter what (e.g., KeyboardInterrupt) - if timeout is None: - waiter.acquire() - gotit = True - else: - if timeout > 0: - gotit = waiter.acquire(True, timeout) - else: - gotit = waiter.acquire(False) - return gotit - finally: - self._acquire_restore(saved_state) - if not gotit: - try: - self._waiters.remove(waiter) - except ValueError: - pass - - def wait_for(self, predicate, timeout=None): - """Wait until a condition evaluates to True. - - predicate should be a callable which result will be interpreted as a - boolean value. A timeout may be provided giving the maximum time to - wait. - - """ - endtime = None - waittime = timeout - result = predicate() - while not result: - if waittime is not None: - if endtime is None: - endtime = _time() + waittime - else: - waittime = endtime - _time() - if waittime <= 0: - break - self.wait(waittime) - result = predicate() - return result - - def notify(self, n=1): - """Wake up one or more threads waiting on this condition, if any. - - If the calling thread has not acquired the lock when this method is - called, a RuntimeError is raised. - - This method wakes up at most n of the threads waiting for the condition - variable; it is a no-op if no threads are waiting. - - """ - if not self._is_owned(): - raise RuntimeError("cannot notify on un-acquired lock") - waiters = self._waiters - while waiters and n > 0: - waiter = waiters[0] - try: - waiter.release() - except RuntimeError: - # gh-92530: The previous call of notify() released the lock, - # but was interrupted before removing it from the queue. - # It can happen if a signal handler raises an exception, - # like CTRL+C which raises KeyboardInterrupt. - pass - else: - n -= 1 - try: - waiters.remove(waiter) - except ValueError: - pass - - def notify_all(self): - """Wake up all threads waiting on this condition. - - If the calling thread has not acquired the lock when this method - is called, a RuntimeError is raised. - - """ - self.notify(len(self._waiters)) - - def notifyAll(self): - """Wake up all threads waiting on this condition. - - This method is deprecated, use notify_all() instead. - - """ - import warnings - warnings.warn('notifyAll() is deprecated, use notify_all() instead', - DeprecationWarning, stacklevel=2) - self.notify_all() - - -class Semaphore: - """This class implements semaphore objects. - - Semaphores manage a counter representing the number of release() calls minus - the number of acquire() calls, plus an initial value. The acquire() method - blocks if necessary until it can return without making the counter - negative. If not given, value defaults to 1. - - """ - - # After Tim Peters' semaphore class, but not quite the same (no maximum) - - def __init__(self, value=1): - if value < 0: - raise ValueError("semaphore initial value must be >= 0") - self._cond = Condition(Lock()) - self._value = value - - def __repr__(self): - cls = self.__class__ - return (f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}:" - f" value={self._value}>") - - def acquire(self, blocking=True, timeout=None): - """Acquire a semaphore, decrementing the internal counter by one. - - When invoked without arguments: if the internal counter is larger than - zero on entry, decrement it by one and return immediately. If it is zero - on entry, block, waiting until some other thread has called release() to - make it larger than zero. This is done with proper interlocking so that - if multiple acquire() calls are blocked, release() will wake exactly one - of them up. The implementation may pick one at random, so the order in - which blocked threads are awakened should not be relied on. There is no - return value in this case. - - When invoked with blocking set to true, do the same thing as when called - without arguments, and return true. - - When invoked with blocking set to false, do not block. If a call without - an argument would block, return false immediately; otherwise, do the - same thing as when called without arguments, and return true. - - When invoked with a timeout other than None, it will block for at - most timeout seconds. If acquire does not complete successfully in - that interval, return false. Return true otherwise. - - """ - if not blocking and timeout is not None: - raise ValueError("can't specify timeout for non-blocking acquire") - rc = False - endtime = None - with self._cond: - while self._value == 0: - if not blocking: - break - if timeout is not None: - if endtime is None: - endtime = _time() + timeout - else: - timeout = endtime - _time() - if timeout <= 0: - break - self._cond.wait(timeout) - else: - self._value -= 1 - rc = True - return rc - - __enter__ = acquire - - def release(self, n=1): - """Release a semaphore, incrementing the internal counter by one or more. - - When the counter is zero on entry and another thread is waiting for it - to become larger than zero again, wake up that thread. - - """ - if n < 1: - raise ValueError('n must be one or more') - with self._cond: - self._value += n - self._cond.notify(n) - - def __exit__(self, t, v, tb): - self.release() - - -class BoundedSemaphore(Semaphore): - """Implements a bounded semaphore. - - A bounded semaphore checks to make sure its current value doesn't exceed its - initial value. If it does, ValueError is raised. In most situations - semaphores are used to guard resources with limited capacity. - - If the semaphore is released too many times it's a sign of a bug. If not - given, value defaults to 1. - - Like regular semaphores, bounded semaphores manage a counter representing - the number of release() calls minus the number of acquire() calls, plus an - initial value. The acquire() method blocks if necessary until it can return - without making the counter negative. If not given, value defaults to 1. - - """ - - def __init__(self, value=1): - super().__init__(value) - self._initial_value = value - - def __repr__(self): - cls = self.__class__ - return (f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}:" - f" value={self._value}/{self._initial_value}>") - - def release(self, n=1): - """Release a semaphore, incrementing the internal counter by one or more. - - When the counter is zero on entry and another thread is waiting for it - to become larger than zero again, wake up that thread. - - If the number of releases exceeds the number of acquires, - raise a ValueError. - - """ - if n < 1: - raise ValueError('n must be one or more') - with self._cond: - if self._value + n > self._initial_value: - raise ValueError("Semaphore released too many times") - self._value += n - self._cond.notify(n) - - -class Event: - """Class implementing event objects. - - Events manage a flag that can be set to true with the set() method and reset - to false with the clear() method. The wait() method blocks until the flag is - true. The flag is initially false. - - """ - - # After Tim Peters' event class (without is_posted()) - - def __init__(self): - self._cond = Condition(Lock()) - self._flag = False - - def __repr__(self): - cls = self.__class__ - status = 'set' if self._flag else 'unset' - return f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}: {status}>" - - def _at_fork_reinit(self): - # Private method called by Thread._after_fork() - self._cond._at_fork_reinit() - - def is_set(self): - """Return true if and only if the internal flag is true.""" - return self._flag - - def isSet(self): - """Return true if and only if the internal flag is true. - - This method is deprecated, use is_set() instead. - - """ - import warnings - warnings.warn('isSet() is deprecated, use is_set() instead', - DeprecationWarning, stacklevel=2) - return self.is_set() - - def set(self): - """Set the internal flag to true. - - All threads waiting for it to become true are awakened. Threads - that call wait() once the flag is true will not block at all. - - """ - with self._cond: - self._flag = True - self._cond.notify_all() - - def clear(self): - """Reset the internal flag to false. - - Subsequently, threads calling wait() will block until set() is called to - set the internal flag to true again. - - """ - with self._cond: - self._flag = False - - def wait(self, timeout=None): - """Block until the internal flag is true. - - If the internal flag is true on entry, return immediately. Otherwise, - block until another thread calls set() to set the flag to true, or until - the optional timeout occurs. - - When the timeout argument is present and not None, it should be a - floating-point number specifying a timeout for the operation in seconds - (or fractions thereof). - - This method returns the internal flag on exit, so it will always return - ``True`` except if a timeout is given and the operation times out, when - it will return ``False``. - - """ - with self._cond: - signaled = self._flag - if not signaled: - signaled = self._cond.wait(timeout) - return signaled - - -# A barrier class. Inspired in part by the pthread_barrier_* api and -# the CyclicBarrier class from Java. See -# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and -# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/ -# CyclicBarrier.html -# for information. -# We maintain two main states, 'filling' and 'draining' enabling the barrier -# to be cyclic. Threads are not allowed into it until it has fully drained -# since the previous cycle. In addition, a 'resetting' state exists which is -# similar to 'draining' except that threads leave with a BrokenBarrierError, -# and a 'broken' state in which all threads get the exception. -class Barrier: - """Implements a Barrier. - - Useful for synchronizing a fixed number of threads at known synchronization - points. Threads block on 'wait()' and are simultaneously awoken once they - have all made that call. - - """ - - def __init__(self, parties, action=None, timeout=None): - """Create a barrier, initialised to 'parties' threads. - - 'action' is a callable which, when supplied, will be called by one of - the threads after they have all entered the barrier and just prior to - releasing them all. If a 'timeout' is provided, it is used as the - default for all subsequent 'wait()' calls. - - """ - if parties < 1: - raise ValueError("parties must be >= 1") - self._cond = Condition(Lock()) - self._action = action - self._timeout = timeout - self._parties = parties - self._state = 0 # 0 filling, 1 draining, -1 resetting, -2 broken - self._count = 0 - - def __repr__(self): - cls = self.__class__ - if self.broken: - return f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}: broken>" - return (f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}:" - f" waiters={self.n_waiting}/{self.parties}>") - - def wait(self, timeout=None): - """Wait for the barrier. - - When the specified number of threads have started waiting, they are all - simultaneously awoken. If an 'action' was provided for the barrier, one - of the threads will have executed that callback prior to returning. - Returns an individual index number from 0 to 'parties-1'. - - """ - if timeout is None: - timeout = self._timeout - with self._cond: - self._enter() # Block while the barrier drains. - index = self._count - self._count += 1 - try: - if index + 1 == self._parties: - # We release the barrier - self._release() - else: - # We wait until someone releases us - self._wait(timeout) - return index - finally: - self._count -= 1 - # Wake up any threads waiting for barrier to drain. - self._exit() - - # Block until the barrier is ready for us, or raise an exception - # if it is broken. - def _enter(self): - while self._state in (-1, 1): - # It is draining or resetting, wait until done - self._cond.wait() - #see if the barrier is in a broken state - if self._state < 0: - raise BrokenBarrierError - assert self._state == 0 - - # Optionally run the 'action' and release the threads waiting - # in the barrier. - def _release(self): - try: - if self._action: - self._action() - # enter draining state - self._state = 1 - self._cond.notify_all() - except: - #an exception during the _action handler. Break and reraise - self._break() - raise - - # Wait in the barrier until we are released. Raise an exception - # if the barrier is reset or broken. - def _wait(self, timeout): - if not self._cond.wait_for(lambda : self._state != 0, timeout): - #timed out. Break the barrier - self._break() - raise BrokenBarrierError - if self._state < 0: - raise BrokenBarrierError - assert self._state == 1 - - # If we are the last thread to exit the barrier, signal any threads - # waiting for the barrier to drain. - def _exit(self): - if self._count == 0: - if self._state in (-1, 1): - #resetting or draining - self._state = 0 - self._cond.notify_all() - - def reset(self): - """Reset the barrier to the initial state. - - Any threads currently waiting will get the BrokenBarrier exception - raised. - - """ - with self._cond: - if self._count > 0: - if self._state == 0: - #reset the barrier, waking up threads - self._state = -1 - elif self._state == -2: - #was broken, set it to reset state - #which clears when the last thread exits - self._state = -1 - else: - self._state = 0 - self._cond.notify_all() - - def abort(self): - """Place the barrier into a 'broken' state. - - Useful in case of error. Any currently waiting threads and threads - attempting to 'wait()' will have BrokenBarrierError raised. - - """ - with self._cond: - self._break() - - def _break(self): - # An internal error was detected. The barrier is set to - # a broken state all parties awakened. - self._state = -2 - self._cond.notify_all() - - @property - def parties(self): - """Return the number of threads required to trip the barrier.""" - return self._parties - - @property - def n_waiting(self): - """Return the number of threads currently waiting at the barrier.""" - # We don't need synchronization here since this is an ephemeral result - # anyway. It returns the correct value in the steady state. - if self._state == 0: - return self._count - return 0 - - @property - def broken(self): - """Return True if the barrier is in a broken state.""" - return self._state == -2 - -# exception raised by the Barrier class -class BrokenBarrierError(RuntimeError): - pass - - -# Helper to generate new thread names -_counter = _count(1).__next__ -def _newname(name_template): - return name_template % _counter() - -# Active thread administration. -# -# bpo-44422: Use a reentrant lock to allow reentrant calls to functions like -# threading.enumerate(). -_active_limbo_lock = RLock() -_active = {} # maps thread id to Thread object -_limbo = {} -_dangling = WeakSet() - - -# Main class for threads - -class Thread: - """A class that represents a thread of control. - - This class can be safely subclassed in a limited fashion. There are two ways - to specify the activity: by passing a callable object to the constructor, or - by overriding the run() method in a subclass. - - """ - - _initialized = False - - def __init__(self, group=None, target=None, name=None, - args=(), kwargs=None, *, daemon=None): - """This constructor should always be called with keyword arguments. Arguments are: - - *group* should be None; reserved for future extension when a ThreadGroup - class is implemented. - - *target* is the callable object to be invoked by the run() - method. Defaults to None, meaning nothing is called. - - *name* is the thread name. By default, a unique name is constructed of - the form "Thread-N" where N is a small decimal number. - - *args* is a list or tuple of arguments for the target invocation. Defaults to (). - - *kwargs* is a dictionary of keyword arguments for the target - invocation. Defaults to {}. - - If a subclass overrides the constructor, it must make sure to invoke - the base class constructor (Thread.__init__()) before doing anything - else to the thread. - - """ - assert group is None, "group argument must be None for now" - if kwargs is None: - kwargs = {} - if name: - name = str(name) - else: - name = _newname("Thread-%d") - if target is not None: - try: - target_name = target.__name__ - name += f" ({target_name})" - except AttributeError: - pass - - self._target = target - self._name = name - self._args = args - self._kwargs = kwargs - if daemon is not None: - if daemon and not _daemon_threads_allowed(): - raise RuntimeError('daemon threads are disabled in this (sub)interpreter') - self._daemonic = daemon - else: - self._daemonic = current_thread().daemon - self._ident = None - if _HAVE_THREAD_NATIVE_ID: - self._native_id = None - self._handle = _ThreadHandle() - self._started = Event() - self._initialized = True - # Copy of sys.stderr used by self._invoke_excepthook() - self._stderr = _sys.stderr - self._invoke_excepthook = _make_invoke_excepthook() - # For debugging and _after_fork() - _dangling.add(self) - - def _after_fork(self, new_ident=None): - # Private! Called by threading._after_fork(). - self._started._at_fork_reinit() - if new_ident is not None: - # This thread is alive. - self._ident = new_ident - assert self._handle.ident == new_ident - if _HAVE_THREAD_NATIVE_ID: - self._set_native_id() - else: - # Otherwise, the thread is dead, Jim. _PyThread_AfterFork() - # already marked our handle done. - pass - - def __repr__(self): - assert self._initialized, "Thread.__init__() was not called" - status = "initial" - if self._started.is_set(): - status = "started" - if self._handle.is_done(): - status = "stopped" - if self._daemonic: - status += " daemon" - if self._ident is not None: - status += " %s" % self._ident - return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status) - - def start(self): - """Start the thread's activity. - - It must be called at most once per thread object. It arranges for the - object's run() method to be invoked in a separate thread of control. - - This method will raise a RuntimeError if called more than once on the - same thread object. - - """ - if not self._initialized: - raise RuntimeError("thread.__init__() not called") - - if self._started.is_set(): - raise RuntimeError("threads can only be started once") - - with _active_limbo_lock: - _limbo[self] = self - try: - # Start joinable thread - _start_joinable_thread(self._bootstrap, handle=self._handle, - daemon=self.daemon) - except Exception: - with _active_limbo_lock: - del _limbo[self] - raise - self._started.wait() # Will set ident and native_id - - def run(self): - """Method representing the thread's activity. - - You may override this method in a subclass. The standard run() method - invokes the callable object passed to the object's constructor as the - target argument, if any, with sequential and keyword arguments taken - from the args and kwargs arguments, respectively. - - """ - try: - if self._target is not None: - self._target(*self._args, **self._kwargs) - finally: - # Avoid a refcycle if the thread is running a function with - # an argument that has a member that points to the thread. - del self._target, self._args, self._kwargs - - def _bootstrap(self): - # Wrapper around the real bootstrap code that ignores - # exceptions during interpreter cleanup. Those typically - # happen when a daemon thread wakes up at an unfortunate - # moment, finds the world around it destroyed, and raises some - # random exception *** while trying to report the exception in - # _bootstrap_inner() below ***. Those random exceptions - # don't help anybody, and they confuse users, so we suppress - # them. We suppress them only when it appears that the world - # indeed has already been destroyed, so that exceptions in - # _bootstrap_inner() during normal business hours are properly - # reported. Also, we only suppress them for daemonic threads; - # if a non-daemonic encounters this, something else is wrong. - try: - self._bootstrap_inner() - except: - if self._daemonic and _sys is None: - return - raise - - def _set_ident(self): - self._ident = get_ident() - - if _HAVE_THREAD_NATIVE_ID: - def _set_native_id(self): - self._native_id = get_native_id() - - def _bootstrap_inner(self): - try: - self._set_ident() - if _HAVE_THREAD_NATIVE_ID: - self._set_native_id() - self._started.set() - with _active_limbo_lock: - _active[self._ident] = self - del _limbo[self] - - if _trace_hook: - _sys.settrace(_trace_hook) - if _profile_hook: - _sys.setprofile(_profile_hook) - - try: - self.run() - except: - self._invoke_excepthook(self) - finally: - self._delete() - - def _delete(self): - "Remove current thread from the dict of currently running threads." - with _active_limbo_lock: - del _active[get_ident()] - # There must not be any python code between the previous line - # and after the lock is released. Otherwise a tracing function - # could try to acquire the lock again in the same thread, (in - # current_thread()), and would block. - - def join(self, timeout=None): - """Wait until the thread terminates. - - This blocks the calling thread until the thread whose join() method is - called terminates -- either normally or through an unhandled exception - or until the optional timeout occurs. - - When the timeout argument is present and not None, it should be a - floating-point number specifying a timeout for the operation in seconds - (or fractions thereof). As join() always returns None, you must call - is_alive() after join() to decide whether a timeout happened -- if the - thread is still alive, the join() call timed out. - - When the timeout argument is not present or None, the operation will - block until the thread terminates. - - A thread can be join()ed many times. - - join() raises a RuntimeError if an attempt is made to join the current - thread as that would cause a deadlock. It is also an error to join() a - thread before it has been started and attempts to do so raises the same - exception. - - """ - if not self._initialized: - raise RuntimeError("Thread.__init__() not called") - if not self._started.is_set(): - raise RuntimeError("cannot join thread before it is started") - if self is current_thread(): - raise RuntimeError("cannot join current thread") - - # the behavior of a negative timeout isn't documented, but - # historically .join(timeout=x) for x<0 has acted as if timeout=0 - if timeout is not None: - timeout = max(timeout, 0) - - self._handle.join(timeout) - - @property - def name(self): - """A string used for identification purposes only. - - It has no semantics. Multiple threads may be given the same name. The - initial name is set by the constructor. - - """ - assert self._initialized, "Thread.__init__() not called" - return self._name - - @name.setter - def name(self, name): - assert self._initialized, "Thread.__init__() not called" - self._name = str(name) - - @property - def ident(self): - """Thread identifier of this thread or None if it has not been started. - - This is a nonzero integer. See the get_ident() function. Thread - identifiers may be recycled when a thread exits and another thread is - created. The identifier is available even after the thread has exited. - - """ - assert self._initialized, "Thread.__init__() not called" - return self._ident - - if _HAVE_THREAD_NATIVE_ID: - @property - def native_id(self): - """Native integral thread ID of this thread, or None if it has not been started. - - This is a non-negative integer. See the get_native_id() function. - This represents the Thread ID as reported by the kernel. - - """ - assert self._initialized, "Thread.__init__() not called" - return self._native_id - - def is_alive(self): - """Return whether the thread is alive. - - This method returns True just before the run() method starts until just - after the run() method terminates. See also the module function - enumerate(). - - """ - assert self._initialized, "Thread.__init__() not called" - return self._started.is_set() and not self._handle.is_done() - - @property - def daemon(self): - """A boolean value indicating whether this thread is a daemon thread. - - This must be set before start() is called, otherwise RuntimeError is - raised. Its initial value is inherited from the creating thread; the - main thread is not a daemon thread and therefore all threads created in - the main thread default to daemon = False. - - The entire Python program exits when only daemon threads are left. - - """ - assert self._initialized, "Thread.__init__() not called" - return self._daemonic - - @daemon.setter - def daemon(self, daemonic): - if not self._initialized: - raise RuntimeError("Thread.__init__() not called") - if daemonic and not _daemon_threads_allowed(): - raise RuntimeError('daemon threads are disabled in this interpreter') - if self._started.is_set(): - raise RuntimeError("cannot set daemon status of active thread") - self._daemonic = daemonic - - def isDaemon(self): - """Return whether this thread is a daemon. - - This method is deprecated, use the daemon attribute instead. - - """ - import warnings - warnings.warn('isDaemon() is deprecated, get the daemon attribute instead', - DeprecationWarning, stacklevel=2) - return self.daemon - - def setDaemon(self, daemonic): - """Set whether this thread is a daemon. - - This method is deprecated, use the .daemon property instead. - - """ - import warnings - warnings.warn('setDaemon() is deprecated, set the daemon attribute instead', - DeprecationWarning, stacklevel=2) - self.daemon = daemonic - - def getName(self): - """Return a string used for identification purposes only. - - This method is deprecated, use the name attribute instead. - - """ - import warnings - warnings.warn('getName() is deprecated, get the name attribute instead', - DeprecationWarning, stacklevel=2) - return self.name - - def setName(self, name): - """Set the name string for this thread. - - This method is deprecated, use the name attribute instead. - - """ - import warnings - warnings.warn('setName() is deprecated, set the name attribute instead', - DeprecationWarning, stacklevel=2) - self.name = name - - -try: - from _thread import (_excepthook as excepthook, - _ExceptHookArgs as ExceptHookArgs) -except ImportError: - # Simple Python implementation if _thread._excepthook() is not available - from traceback import print_exception as _print_exception - from collections import namedtuple - - _ExceptHookArgs = namedtuple( - 'ExceptHookArgs', - 'exc_type exc_value exc_traceback thread') - - def ExceptHookArgs(args): - return _ExceptHookArgs(*args) - - def excepthook(args, /): - """ - Handle uncaught Thread.run() exception. - """ - if args.exc_type == SystemExit: - # silently ignore SystemExit - return - - if _sys is not None and _sys.stderr is not None: - stderr = _sys.stderr - elif args.thread is not None: - stderr = args.thread._stderr - if stderr is None: - # do nothing if sys.stderr is None and sys.stderr was None - # when the thread was created - return - else: - # do nothing if sys.stderr is None and args.thread is None - return - - if args.thread is not None: - name = args.thread.name - else: - name = get_ident() - print(f"Exception in thread {name}:", - file=stderr, flush=True) - _print_exception(args.exc_type, args.exc_value, args.exc_traceback, - file=stderr) - stderr.flush() - - -# Original value of threading.excepthook -__excepthook__ = excepthook - - -def _make_invoke_excepthook(): - # Create a local namespace to ensure that variables remain alive - # when _invoke_excepthook() is called, even if it is called late during - # Python shutdown. It is mostly needed for daemon threads. - - old_excepthook = excepthook - old_sys_excepthook = _sys.excepthook - if old_excepthook is None: - raise RuntimeError("threading.excepthook is None") - if old_sys_excepthook is None: - raise RuntimeError("sys.excepthook is None") - - sys_exc_info = _sys.exc_info - local_print = print - local_sys = _sys - - def invoke_excepthook(thread): - global excepthook - try: - hook = excepthook - if hook is None: - hook = old_excepthook - - args = ExceptHookArgs([*sys_exc_info(), thread]) - - hook(args) - except Exception as exc: - exc.__suppress_context__ = True - del exc - - if local_sys is not None and local_sys.stderr is not None: - stderr = local_sys.stderr - else: - stderr = thread._stderr - - local_print("Exception in threading.excepthook:", - file=stderr, flush=True) - - if local_sys is not None and local_sys.excepthook is not None: - sys_excepthook = local_sys.excepthook - else: - sys_excepthook = old_sys_excepthook - - sys_excepthook(*sys_exc_info()) - finally: - # Break reference cycle (exception stored in a variable) - args = None - - return invoke_excepthook - - -# The timer class was contributed by Itamar Shtull-Trauring - -class Timer(Thread): - """Call a function after a specified number of seconds: - - t = Timer(30.0, f, args=None, kwargs=None) - t.start() - t.cancel() # stop the timer's action if it's still waiting - - """ - - def __init__(self, interval, function, args=None, kwargs=None): - Thread.__init__(self) - self.interval = interval - self.function = function - self.args = args if args is not None else [] - self.kwargs = kwargs if kwargs is not None else {} - self.finished = Event() - - def cancel(self): - """Stop the timer if it hasn't finished yet.""" - self.finished.set() - - def run(self): - self.finished.wait(self.interval) - if not self.finished.is_set(): - self.function(*self.args, **self.kwargs) - self.finished.set() - - -# Special thread class to represent the main thread - -class _MainThread(Thread): - - def __init__(self): - Thread.__init__(self, name="MainThread", daemon=False) - self._started.set() - self._ident = _get_main_thread_ident() - self._handle = _make_thread_handle(self._ident) - if _HAVE_THREAD_NATIVE_ID: - self._set_native_id() - with _active_limbo_lock: - _active[self._ident] = self - - -# Helper thread-local instance to detect when a _DummyThread -# is collected. Not a part of the public API. -_thread_local_info = local() - - -class _DeleteDummyThreadOnDel: - ''' - Helper class to remove a dummy thread from threading._active on __del__. - ''' - - def __init__(self, dummy_thread): - self._dummy_thread = dummy_thread - self._tident = dummy_thread.ident - # Put the thread on a thread local variable so that when - # the related thread finishes this instance is collected. - # - # Note: no other references to this instance may be created. - # If any client code creates a reference to this instance, - # the related _DummyThread will be kept forever! - _thread_local_info._track_dummy_thread_ref = self - - def __del__(self, _active_limbo_lock=_active_limbo_lock, _active=_active): - with _active_limbo_lock: - if _active.get(self._tident) is self._dummy_thread: - _active.pop(self._tident, None) - - -# Dummy thread class to represent threads not started here. -# These should be added to `_active` and removed automatically -# when they die, although they can't be waited for. -# Their purpose is to return *something* from current_thread(). -# They are marked as daemon threads so we won't wait for them -# when we exit (conform previous semantics). - -class _DummyThread(Thread): - - def __init__(self): - Thread.__init__(self, name=_newname("Dummy-%d"), - daemon=_daemon_threads_allowed()) - self._started.set() - self._set_ident() - self._handle = _make_thread_handle(self._ident) - if _HAVE_THREAD_NATIVE_ID: - self._set_native_id() - with _active_limbo_lock: - _active[self._ident] = self - _DeleteDummyThreadOnDel(self) - - def is_alive(self): - if not self._handle.is_done() and self._started.is_set(): - return True - raise RuntimeError("thread is not alive") - - def join(self, timeout=None): - raise RuntimeError("cannot join a dummy thread") - - def _after_fork(self, new_ident=None): - if new_ident is not None: - self.__class__ = _MainThread - self._name = 'MainThread' - self._daemonic = False - Thread._after_fork(self, new_ident=new_ident) - - -# Global API functions - -def current_thread(): - """Return the current Thread object, corresponding to the caller's thread of control. - - If the caller's thread of control was not created through the threading - module, a dummy thread object with limited functionality is returned. - - """ - try: - return _active[get_ident()] - except KeyError: - return _DummyThread() - -def currentThread(): - """Return the current Thread object, corresponding to the caller's thread of control. - - This function is deprecated, use current_thread() instead. - - """ - import warnings - warnings.warn('currentThread() is deprecated, use current_thread() instead', - DeprecationWarning, stacklevel=2) - return current_thread() - -def active_count(): - """Return the number of Thread objects currently alive. - - The returned count is equal to the length of the list returned by - enumerate(). - - """ - # NOTE: if the logic in here ever changes, update Modules/posixmodule.c - # warn_about_fork_with_threads() to match. - with _active_limbo_lock: - return len(_active) + len(_limbo) - -def activeCount(): - """Return the number of Thread objects currently alive. - - This function is deprecated, use active_count() instead. - - """ - import warnings - warnings.warn('activeCount() is deprecated, use active_count() instead', - DeprecationWarning, stacklevel=2) - return active_count() - -def _enumerate(): - # Same as enumerate(), but without the lock. Internal use only. - return list(_active.values()) + list(_limbo.values()) - -def enumerate(): - """Return a list of all Thread objects currently alive. - - The list includes daemonic threads, dummy thread objects created by - current_thread(), and the main thread. It excludes terminated threads and - threads that have not yet been started. - - """ - with _active_limbo_lock: - return list(_active.values()) + list(_limbo.values()) - - -_threading_atexits = [] -_SHUTTING_DOWN = False - -def _register_atexit(func, *arg, **kwargs): - """CPython internal: register *func* to be called before joining threads. - - The registered *func* is called with its arguments just before all - non-daemon threads are joined in `_shutdown()`. It provides a similar - purpose to `atexit.register()`, but its functions are called prior to - threading shutdown instead of interpreter shutdown. - - For similarity to atexit, the registered functions are called in reverse. - """ - if _SHUTTING_DOWN: - raise RuntimeError("can't register atexit after shutdown") - - _threading_atexits.append(lambda: func(*arg, **kwargs)) - - -from _thread import stack_size - -# Create the main thread object, -# and make it available for the interpreter -# (Py_Main) as threading._shutdown. - -_main_thread = _MainThread() - -def _shutdown(): - """ - Wait until the Python thread state of all non-daemon threads get deleted. - """ - # Obscure: other threads may be waiting to join _main_thread. That's - # dubious, but some code does it. We can't wait for it to be marked as done - # normally - that won't happen until the interpreter is nearly dead. So - # mark it done here. - if _main_thread._handle.is_done() and _is_main_interpreter(): - # _shutdown() was already called - return - - global _SHUTTING_DOWN - _SHUTTING_DOWN = True - - # Call registered threading atexit functions before threads are joined. - # Order is reversed, similar to atexit. - for atexit_call in reversed(_threading_atexits): - atexit_call() - - if _is_main_interpreter(): - _main_thread._handle._set_done() - - # Wait for all non-daemon threads to exit. - _thread_shutdown() - - -def main_thread(): - """Return the main thread object. - - In normal conditions, the main thread is the thread from which the - Python interpreter was started. - """ - # XXX Figure this out for subinterpreters. (See gh-75698.) - return _main_thread - - -def _after_fork(): - """ - Cleanup threading module state that should not exist after a fork. - """ - # Reset _active_limbo_lock, in case we forked while the lock was held - # by another (non-forked) thread. http://bugs.python.org/issue874900 - global _active_limbo_lock, _main_thread - _active_limbo_lock = RLock() - - # fork() only copied the current thread; clear references to others. - new_active = {} - - try: - current = _active[get_ident()] - except KeyError: - # fork() was called in a thread which was not spawned - # by threading.Thread. For example, a thread spawned - # by thread.start_new_thread(). - current = _MainThread() - - _main_thread = current - - with _active_limbo_lock: - # Dangling thread instances must still have their locks reset, - # because someone may join() them. - threads = set(_enumerate()) - threads.update(_dangling) - for thread in threads: - # Any lock/condition variable may be currently locked or in an - # invalid state, so we reinitialize them. - if thread is current: - # This is the one and only active thread. - ident = get_ident() - thread._after_fork(new_ident=ident) - new_active[ident] = thread - else: - # All the others are already stopped. - thread._after_fork() - - _limbo.clear() - _active.clear() - _active.update(new_active) - assert len(_active) == 1 - - -if hasattr(_os, "register_at_fork"): - _os.register_at_fork(after_in_child=_after_fork) diff --git a/Python313_13_x86_Template/Lib/timeit.py b/Python313_13_x86_Template/Lib/timeit.py deleted file mode 100644 index 02cfafaf..00000000 --- a/Python313_13_x86_Template/Lib/timeit.py +++ /dev/null @@ -1,381 +0,0 @@ -#! /usr/bin/env python3 - -"""Tool for measuring execution time of small code snippets. - -This module avoids a number of common traps for measuring execution -times. See also Tim Peters' introduction to the Algorithms chapter in -the Python Cookbook, published by O'Reilly. - -Library usage: see the Timer class. - -Command line usage: - python timeit.py [-n N] [-r N] [-s S] [-p] [-h] [--] [statement] - -Options: - -n/--number N: how many times to execute 'statement' (default: see below) - -r/--repeat N: how many times to repeat the timer (default 5) - -s/--setup S: statement to be executed once initially (default 'pass'). - Execution time of this setup statement is NOT timed. - -p/--process: use time.process_time() (default is time.perf_counter()) - -v/--verbose: print raw timing results; repeat for more digits precision - -u/--unit: set the output time unit (nsec, usec, msec, or sec) - -h/--help: print this usage message and exit - --: separate options from statement, use when statement starts with - - statement: statement to be timed (default 'pass') - -A multi-line statement may be given by specifying each line as a -separate argument; indented lines are possible by enclosing an -argument in quotes and using leading spaces. Multiple -s options are -treated similarly. - -If -n is not given, a suitable number of loops is calculated by trying -increasing numbers from the sequence 1, 2, 5, 10, 20, 50, ... until the -total time is at least 0.2 seconds. - -Note: there is a certain baseline overhead associated with executing a -pass statement. It differs between versions. The code here doesn't try -to hide it, but you should be aware of it. The baseline overhead can be -measured by invoking the program without arguments. - -Classes: - - Timer - -Functions: - - timeit(string, string) -> float - repeat(string, string) -> list - default_timer() -> float - -""" - -import gc -import itertools -import sys -import time - -__all__ = ["Timer", "timeit", "repeat", "default_timer"] - -dummy_src_name = "" -default_number = 1000000 -default_repeat = 5 -default_timer = time.perf_counter - -_globals = globals - -# Don't change the indentation of the template; the reindent() calls -# in Timer.__init__() depend on setup being indented 4 spaces and stmt -# being indented 8 spaces. -template = """ -def inner(_it, _timer{init}): - {setup} - _t0 = _timer() - for _i in _it: - {stmt} - pass - _t1 = _timer() - return _t1 - _t0 -""" - - -def reindent(src, indent): - """Helper to reindent a multi-line statement.""" - return src.replace("\n", "\n" + " " * indent) - - -class Timer: - """Class for timing execution speed of small code snippets. - - The constructor takes a statement to be timed, an additional - statement used for setup, and a timer function. Both statements - default to 'pass'; the timer function is platform-dependent (see - module doc string). If 'globals' is specified, the code will be - executed within that namespace (as opposed to inside timeit's - namespace). - - To measure the execution time of the first statement, use the - timeit() method. The repeat() method is a convenience to call - timeit() multiple times and return a list of results. - - The statements may contain newlines, as long as they don't contain - multi-line string literals. - """ - - def __init__(self, stmt="pass", setup="pass", timer=default_timer, - globals=None): - """Constructor. See class doc string.""" - self.timer = timer - local_ns = {} - global_ns = _globals() if globals is None else globals - init = '' - if isinstance(setup, str): - # Check that the code can be compiled outside a function - compile(setup, dummy_src_name, "exec") - stmtprefix = setup + '\n' - setup = reindent(setup, 4) - elif callable(setup): - local_ns['_setup'] = setup - init += ', _setup=_setup' - stmtprefix = '' - setup = '_setup()' - else: - raise ValueError("setup is neither a string nor callable") - if isinstance(stmt, str): - # Check that the code can be compiled outside a function - compile(stmtprefix + stmt, dummy_src_name, "exec") - stmt = reindent(stmt, 8) - elif callable(stmt): - local_ns['_stmt'] = stmt - init += ', _stmt=_stmt' - stmt = '_stmt()' - else: - raise ValueError("stmt is neither a string nor callable") - src = template.format(stmt=stmt, setup=setup, init=init) - self.src = src # Save for traceback display - code = compile(src, dummy_src_name, "exec") - exec(code, global_ns, local_ns) - self.inner = local_ns["inner"] - - def print_exc(self, file=None): - """Helper to print a traceback from the timed code. - - Typical use: - - t = Timer(...) # outside the try/except - try: - t.timeit(...) # or t.repeat(...) - except: - t.print_exc() - - The advantage over the standard traceback is that source lines - in the compiled template will be displayed. - - The optional file argument directs where the traceback is - sent; it defaults to sys.stderr. - """ - import linecache, traceback - if self.src is not None: - linecache.cache[dummy_src_name] = (len(self.src), - None, - self.src.split("\n"), - dummy_src_name) - # else the source is already stored somewhere else - - traceback.print_exc(file=file) - - def timeit(self, number=default_number): - """Time 'number' executions of the main statement. - - To be precise, this executes the setup statement once, and - then returns the time it takes to execute the main statement - a number of times, as float seconds if using the default timer. The - argument is the number of times through the loop, defaulting - to one million. The main statement, the setup statement and - the timer function to be used are passed to the constructor. - """ - it = itertools.repeat(None, number) - gcold = gc.isenabled() - gc.disable() - try: - timing = self.inner(it, self.timer) - finally: - if gcold: - gc.enable() - return timing - - def repeat(self, repeat=default_repeat, number=default_number): - """Call timeit() a few times. - - This is a convenience function that calls the timeit() - repeatedly, returning a list of results. The first argument - specifies how many times to call timeit(), defaulting to 5; - the second argument specifies the timer argument, defaulting - to one million. - - Note: it's tempting to calculate mean and standard deviation - from the result vector and report these. However, this is not - very useful. In a typical case, the lowest value gives a - lower bound for how fast your machine can run the given code - snippet; higher values in the result vector are typically not - caused by variability in Python's speed, but by other - processes interfering with your timing accuracy. So the min() - of the result is probably the only number you should be - interested in. After that, you should look at the entire - vector and apply common sense rather than statistics. - """ - r = [] - for i in range(repeat): - t = self.timeit(number) - r.append(t) - return r - - def autorange(self, callback=None): - """Return the number of loops and time taken so that total time >= 0.2. - - Calls the timeit method with increasing numbers from the sequence - 1, 2, 5, 10, 20, 50, ... until the time taken is at least 0.2 - second. Returns (number, time_taken). - - If *callback* is given and is not None, it will be called after - each trial with two arguments: ``callback(number, time_taken)``. - """ - i = 1 - while True: - for j in 1, 2, 5: - number = i * j - time_taken = self.timeit(number) - if callback: - callback(number, time_taken) - if time_taken >= 0.2: - return (number, time_taken) - i *= 10 - - -def timeit(stmt="pass", setup="pass", timer=default_timer, - number=default_number, globals=None): - """Convenience function to create Timer object and call timeit method.""" - return Timer(stmt, setup, timer, globals).timeit(number) - - -def repeat(stmt="pass", setup="pass", timer=default_timer, - repeat=default_repeat, number=default_number, globals=None): - """Convenience function to create Timer object and call repeat method.""" - return Timer(stmt, setup, timer, globals).repeat(repeat, number) - - -def main(args=None, *, _wrap_timer=None): - """Main program, used when run as a script. - - The optional 'args' argument specifies the command line to be parsed, - defaulting to sys.argv[1:]. - - The return value is an exit code to be passed to sys.exit(); it - may be None to indicate success. - - When an exception happens during timing, a traceback is printed to - stderr and the return value is 1. Exceptions at other times - (including the template compilation) are not caught. - - '_wrap_timer' is an internal interface used for unit testing. If it - is not None, it must be a callable that accepts a timer function - and returns another timer function (used for unit testing). - """ - if args is None: - args = sys.argv[1:] - import getopt - try: - opts, args = getopt.getopt(args, "n:u:s:r:pvh", - ["number=", "setup=", "repeat=", - "process", "verbose", "unit=", "help"]) - except getopt.error as err: - print(err) - print("use -h/--help for command line help") - return 2 - - timer = default_timer - stmt = "\n".join(args) or "pass" - number = 0 # auto-determine - setup = [] - repeat = default_repeat - verbose = 0 - time_unit = None - units = {"nsec": 1e-9, "usec": 1e-6, "msec": 1e-3, "sec": 1.0} - precision = 3 - for o, a in opts: - if o in ("-n", "--number"): - number = int(a) - if o in ("-s", "--setup"): - setup.append(a) - if o in ("-u", "--unit"): - if a in units: - time_unit = a - else: - print("Unrecognized unit. Please select nsec, usec, msec, or sec.", - file=sys.stderr) - return 2 - if o in ("-r", "--repeat"): - repeat = int(a) - if repeat <= 0: - repeat = 1 - if o in ("-p", "--process"): - timer = time.process_time - if o in ("-v", "--verbose"): - if verbose: - precision += 1 - verbose += 1 - if o in ("-h", "--help"): - print(__doc__, end=' ') - return 0 - setup = "\n".join(setup) or "pass" - - # Include the current directory, so that local imports work (sys.path - # contains the directory of this script, rather than the current - # directory) - import os - sys.path.insert(0, os.curdir) - if _wrap_timer is not None: - timer = _wrap_timer(timer) - - t = Timer(stmt, setup, timer) - if number == 0: - # determine number so that 0.2 <= total time < 2.0 - callback = None - if verbose: - def callback(number, time_taken): - msg = "{num} loop{s} -> {secs:.{prec}g} secs" - plural = (number != 1) - print(msg.format(num=number, s='s' if plural else '', - secs=time_taken, prec=precision)) - try: - number, _ = t.autorange(callback) - except: - t.print_exc() - return 1 - - if verbose: - print() - - try: - raw_timings = t.repeat(repeat, number) - except: - t.print_exc() - return 1 - - def format_time(dt): - unit = time_unit - - if unit is not None: - scale = units[unit] - else: - scales = [(scale, unit) for unit, scale in units.items()] - scales.sort(reverse=True) - for scale, unit in scales: - if dt >= scale: - break - - return "%.*g %s" % (precision, dt / scale, unit) - - if verbose: - print("raw times: %s" % ", ".join(map(format_time, raw_timings))) - print() - timings = [dt / number for dt in raw_timings] - - best = min(timings) - print("%d loop%s, best of %d: %s per loop" - % (number, 's' if number != 1 else '', - repeat, format_time(best))) - - best = min(timings) - worst = max(timings) - if worst >= best * 4: - import warnings - warnings.warn_explicit("The test results are likely unreliable. " - "The worst time (%s) was more than four times " - "slower than the best time (%s)." - % (format_time(worst), format_time(best)), - UserWarning, '', 0) - return None - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/Python313_13_x86_Template/Lib/token.py b/Python313_13_x86_Template/Lib/token.py deleted file mode 100644 index 54d7cdcc..00000000 --- a/Python313_13_x86_Template/Lib/token.py +++ /dev/null @@ -1,141 +0,0 @@ -"""Token constants.""" -# Auto-generated by Tools/build/generate_token.py - -__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF', - 'EXACT_TOKEN_TYPES'] - -ENDMARKER = 0 -NAME = 1 -NUMBER = 2 -STRING = 3 -NEWLINE = 4 -INDENT = 5 -DEDENT = 6 -LPAR = 7 -RPAR = 8 -LSQB = 9 -RSQB = 10 -COLON = 11 -COMMA = 12 -SEMI = 13 -PLUS = 14 -MINUS = 15 -STAR = 16 -SLASH = 17 -VBAR = 18 -AMPER = 19 -LESS = 20 -GREATER = 21 -EQUAL = 22 -DOT = 23 -PERCENT = 24 -LBRACE = 25 -RBRACE = 26 -EQEQUAL = 27 -NOTEQUAL = 28 -LESSEQUAL = 29 -GREATEREQUAL = 30 -TILDE = 31 -CIRCUMFLEX = 32 -LEFTSHIFT = 33 -RIGHTSHIFT = 34 -DOUBLESTAR = 35 -PLUSEQUAL = 36 -MINEQUAL = 37 -STAREQUAL = 38 -SLASHEQUAL = 39 -PERCENTEQUAL = 40 -AMPEREQUAL = 41 -VBAREQUAL = 42 -CIRCUMFLEXEQUAL = 43 -LEFTSHIFTEQUAL = 44 -RIGHTSHIFTEQUAL = 45 -DOUBLESTAREQUAL = 46 -DOUBLESLASH = 47 -DOUBLESLASHEQUAL = 48 -AT = 49 -ATEQUAL = 50 -RARROW = 51 -ELLIPSIS = 52 -COLONEQUAL = 53 -EXCLAMATION = 54 -OP = 55 -TYPE_IGNORE = 56 -TYPE_COMMENT = 57 -SOFT_KEYWORD = 58 -FSTRING_START = 59 -FSTRING_MIDDLE = 60 -FSTRING_END = 61 -COMMENT = 62 -NL = 63 -# These aren't used by the C tokenizer but are needed for tokenize.py -ERRORTOKEN = 64 -ENCODING = 65 -N_TOKENS = 66 -# Special definitions for cooperation with parser -NT_OFFSET = 256 - -tok_name = {value: name - for name, value in globals().items() - if isinstance(value, int) and not name.startswith('_')} -__all__.extend(tok_name.values()) - -EXACT_TOKEN_TYPES = { - '!': EXCLAMATION, - '!=': NOTEQUAL, - '%': PERCENT, - '%=': PERCENTEQUAL, - '&': AMPER, - '&=': AMPEREQUAL, - '(': LPAR, - ')': RPAR, - '*': STAR, - '**': DOUBLESTAR, - '**=': DOUBLESTAREQUAL, - '*=': STAREQUAL, - '+': PLUS, - '+=': PLUSEQUAL, - ',': COMMA, - '-': MINUS, - '-=': MINEQUAL, - '->': RARROW, - '.': DOT, - '...': ELLIPSIS, - '/': SLASH, - '//': DOUBLESLASH, - '//=': DOUBLESLASHEQUAL, - '/=': SLASHEQUAL, - ':': COLON, - ':=': COLONEQUAL, - ';': SEMI, - '<': LESS, - '<<': LEFTSHIFT, - '<<=': LEFTSHIFTEQUAL, - '<=': LESSEQUAL, - '=': EQUAL, - '==': EQEQUAL, - '>': GREATER, - '>=': GREATEREQUAL, - '>>': RIGHTSHIFT, - '>>=': RIGHTSHIFTEQUAL, - '@': AT, - '@=': ATEQUAL, - '[': LSQB, - ']': RSQB, - '^': CIRCUMFLEX, - '^=': CIRCUMFLEXEQUAL, - '{': LBRACE, - '|': VBAR, - '|=': VBAREQUAL, - '}': RBRACE, - '~': TILDE, -} - -def ISTERMINAL(x): - return x < NT_OFFSET - -def ISNONTERMINAL(x): - return x >= NT_OFFSET - -def ISEOF(x): - return x == ENDMARKER diff --git a/Python313_13_x86_Template/Lib/tokenize.py b/Python313_13_x86_Template/Lib/tokenize.py deleted file mode 100644 index 7ca552c4..00000000 --- a/Python313_13_x86_Template/Lib/tokenize.py +++ /dev/null @@ -1,592 +0,0 @@ -"""Tokenization help for Python programs. - -tokenize(readline) is a generator that breaks a stream of bytes into -Python tokens. It decodes the bytes according to PEP-0263 for -determining source file encoding. - -It accepts a readline-like method which is called repeatedly to get the -next line of input (or b"" for EOF). It generates 5-tuples with these -members: - - the token type (see token.py) - the token (a string) - the starting (row, column) indices of the token (a 2-tuple of ints) - the ending (row, column) indices of the token (a 2-tuple of ints) - the original line (string) - -It is designed to match the working of the Python tokenizer exactly, except -that it produces COMMENT tokens for comments and gives type OP for all -operators. Additionally, all token lists start with an ENCODING token -which tells you which encoding was used to decode the bytes stream. -""" - -__author__ = 'Ka-Ping Yee ' -__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' - 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' - 'Michael Foord') -from builtins import open as _builtin_open -from codecs import lookup, BOM_UTF8 -import collections -import functools -from io import TextIOWrapper -import itertools as _itertools -import re -import sys -from token import * -from token import EXACT_TOKEN_TYPES -import _tokenize - -cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII) -blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) - -import token -__all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding", - "untokenize", "TokenInfo", "open", "TokenError"] -del token - -class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): - def __repr__(self): - annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) - return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % - self._replace(type=annotated_type)) - - @property - def exact_type(self): - if self.type == OP and self.string in EXACT_TOKEN_TYPES: - return EXACT_TOKEN_TYPES[self.string] - else: - return self.type - -def group(*choices): return '(' + '|'.join(choices) + ')' -def any(*choices): return group(*choices) + '*' -def maybe(*choices): return group(*choices) + '?' - -# Note: we use unicode matching for names ("\w") but ascii matching for -# number literals. -Whitespace = r'[ \f\t]*' -Comment = r'#[^\r\n]*' -Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) -Name = r'\w+' - -Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+' -Binnumber = r'0[bB](?:_?[01])+' -Octnumber = r'0[oO](?:_?[0-7])+' -Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)' -Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) -Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*' -Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?', - r'\.[0-9](?:_?[0-9])*') + maybe(Exponent) -Expfloat = r'[0-9](?:_?[0-9])*' + Exponent -Floatnumber = group(Pointfloat, Expfloat) -Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]') -Number = group(Imagnumber, Floatnumber, Intnumber) - -# Return the empty string, plus all of the valid string prefixes. -def _all_string_prefixes(): - # The valid string prefixes. Only contain the lower case versions, - # and don't contain any permutations (include 'fr', but not - # 'rf'). The various permutations will be generated. - _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr'] - # if we add binary f-strings, add: ['fb', 'fbr'] - result = {''} - for prefix in _valid_string_prefixes: - for t in _itertools.permutations(prefix): - # create a list with upper and lower versions of each - # character - for u in _itertools.product(*[(c, c.upper()) for c in t]): - result.add(''.join(u)) - return result - -@functools.lru_cache -def _compile(expr): - return re.compile(expr, re.UNICODE) - -# Note that since _all_string_prefixes includes the empty string, -# StringPrefix can be the empty string (making it optional). -StringPrefix = group(*_all_string_prefixes()) - -# Tail end of ' string. -Single = r"[^'\\]*(?:\\.[^'\\]*)*'" -# Tail end of " string. -Double = r'[^"\\]*(?:\\.[^"\\]*)*"' -# Tail end of ''' string. -Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" -# Tail end of """ string. -Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' -Triple = group(StringPrefix + "'''", StringPrefix + '"""') -# Single-line ' or " string. -String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", - StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') - -# Sorting in reverse order puts the long operators before their prefixes. -# Otherwise if = came before ==, == would get recognized as two instances -# of =. -Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse=True))) -Funny = group(r'\r?\n', Special) - -PlainToken = group(Number, Funny, String, Name) -Token = Ignore + PlainToken - -# First (or only) line of ' or " string. -ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + - group("'", r'\\\r?\n'), - StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + - group('"', r'\\\r?\n')) -PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple) -PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) - -# For a given string prefix plus quotes, endpats maps it to a regex -# to match the remainder of that string. _prefix can be empty, for -# a normal single or triple quoted string (with no prefix). -endpats = {} -for _prefix in _all_string_prefixes(): - endpats[_prefix + "'"] = Single - endpats[_prefix + '"'] = Double - endpats[_prefix + "'''"] = Single3 - endpats[_prefix + '"""'] = Double3 -del _prefix - -# A set of all of the single and triple quoted string prefixes, -# including the opening quotes. -single_quoted = set() -triple_quoted = set() -for t in _all_string_prefixes(): - for u in (t + '"', t + "'"): - single_quoted.add(u) - for u in (t + '"""', t + "'''"): - triple_quoted.add(u) -del t, u - -tabsize = 8 - -class TokenError(Exception): pass - - -class Untokenizer: - - def __init__(self): - self.tokens = [] - self.prev_row = 1 - self.prev_col = 0 - self.prev_type = None - self.prev_line = "" - self.encoding = None - - def add_whitespace(self, start): - row, col = start - if row < self.prev_row or row == self.prev_row and col < self.prev_col: - raise ValueError("start ({},{}) precedes previous end ({},{})" - .format(row, col, self.prev_row, self.prev_col)) - self.add_backslash_continuation(start) - col_offset = col - self.prev_col - if col_offset: - self.tokens.append(" " * col_offset) - - def add_backslash_continuation(self, start): - """Add backslash continuation characters if the row has increased - without encountering a newline token. - - This also inserts the correct amount of whitespace before the backslash. - """ - row = start[0] - row_offset = row - self.prev_row - if row_offset == 0: - return - - newline = '\r\n' if self.prev_line.endswith('\r\n') else '\n' - line = self.prev_line.rstrip('\\\r\n') - ws = ''.join(_itertools.takewhile(str.isspace, reversed(line))) - self.tokens.append(ws + f"\\{newline}" * row_offset) - self.prev_col = 0 - - def escape_brackets(self, token): - characters = [] - consume_until_next_bracket = False - for character in token: - if character == "}": - if consume_until_next_bracket: - consume_until_next_bracket = False - else: - characters.append(character) - if character == "{": - n_backslashes = sum( - 1 for char in _itertools.takewhile( - "\\".__eq__, - characters[-2::-1] - ) - ) - if n_backslashes % 2 == 0 or characters[-1] != "N": - characters.append(character) - else: - consume_until_next_bracket = True - characters.append(character) - return "".join(characters) - - def untokenize(self, iterable): - it = iter(iterable) - indents = [] - startline = False - for t in it: - if len(t) == 2: - self.compat(t, it) - break - tok_type, token, start, end, line = t - if tok_type == ENCODING: - self.encoding = token - continue - if tok_type == ENDMARKER: - break - if tok_type == INDENT: - indents.append(token) - continue - elif tok_type == DEDENT: - indents.pop() - self.prev_row, self.prev_col = end - continue - elif tok_type in (NEWLINE, NL): - startline = True - elif startline and indents: - indent = indents[-1] - if start[1] >= len(indent): - self.tokens.append(indent) - self.prev_col = len(indent) - startline = False - elif tok_type == FSTRING_MIDDLE: - if '{' in token or '}' in token: - token = self.escape_brackets(token) - last_line = token.splitlines()[-1] - end_line, end_col = end - extra_chars = last_line.count("{{") + last_line.count("}}") - end = (end_line, end_col + extra_chars) - - self.add_whitespace(start) - self.tokens.append(token) - self.prev_row, self.prev_col = end - if tok_type in (NEWLINE, NL): - self.prev_row += 1 - self.prev_col = 0 - self.prev_type = tok_type - self.prev_line = line - return "".join(self.tokens) - - def compat(self, token, iterable): - indents = [] - toks_append = self.tokens.append - startline = token[0] in (NEWLINE, NL) - prevstring = False - in_fstring = 0 - - for tok in _itertools.chain([token], iterable): - toknum, tokval = tok[:2] - if toknum == ENCODING: - self.encoding = tokval - continue - - if toknum in (NAME, NUMBER): - tokval += ' ' - - # Insert a space between two consecutive strings - if toknum == STRING: - if prevstring: - tokval = ' ' + tokval - prevstring = True - else: - prevstring = False - - if toknum == FSTRING_START: - in_fstring += 1 - elif toknum == FSTRING_END: - in_fstring -= 1 - if toknum == INDENT: - indents.append(tokval) - continue - elif toknum == DEDENT: - indents.pop() - continue - elif toknum in (NEWLINE, NL): - startline = True - elif startline and indents: - toks_append(indents[-1]) - startline = False - elif toknum == FSTRING_MIDDLE: - tokval = self.escape_brackets(tokval) - - # Insert a space between two consecutive brackets if we are in an f-string - if tokval in {"{", "}"} and self.tokens and self.tokens[-1] == tokval and in_fstring: - tokval = ' ' + tokval - - # Insert a space between two consecutive f-strings - if toknum in (STRING, FSTRING_START) and self.prev_type in (STRING, FSTRING_END): - self.tokens.append(" ") - - toks_append(tokval) - self.prev_type = toknum - - -def untokenize(iterable): - """Transform tokens back into Python source code. - It returns a bytes object, encoded using the ENCODING - token, which is the first token sequence output by tokenize. - - Each element returned by the iterable must be a token sequence - with at least two elements, a token number and token value. If - only two tokens are passed, the resulting output is poor. - - The result is guaranteed to tokenize back to match the input so - that the conversion is lossless and round-trips are assured. - The guarantee applies only to the token type and token string as - the spacing between tokens (column positions) may change. - """ - ut = Untokenizer() - out = ut.untokenize(iterable) - if ut.encoding is not None: - out = out.encode(ut.encoding) - return out - - -def _get_normal_name(orig_enc): - """Imitates get_normal_name in Parser/tokenizer/helpers.c.""" - # Only care about the first 12 characters. - enc = orig_enc[:12].lower().replace("_", "-") - if enc == "utf-8" or enc.startswith("utf-8-"): - return "utf-8" - if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ - enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): - return "iso-8859-1" - return orig_enc - -def detect_encoding(readline): - """ - The detect_encoding() function is used to detect the encoding that should - be used to decode a Python source file. It requires one argument, readline, - in the same way as the tokenize() generator. - - It will call readline a maximum of twice, and return the encoding used - (as a string) and a list of any lines (left as bytes) it has read in. - - It detects the encoding from the presence of a utf-8 bom or an encoding - cookie as specified in pep-0263. If both a bom and a cookie are present, - but disagree, a SyntaxError will be raised. If the encoding cookie is an - invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, - 'utf-8-sig' is returned. - - If no encoding is specified, then the default of 'utf-8' will be returned. - """ - try: - filename = readline.__self__.name - except AttributeError: - filename = None - bom_found = False - encoding = None - default = 'utf-8' - def read_or_stop(): - try: - return readline() - except StopIteration: - return b'' - - def find_cookie(line): - try: - # Decode as UTF-8. Either the line is an encoding declaration, - # in which case it should be pure ASCII, or it must be UTF-8 - # per default encoding. - line_string = line.decode('utf-8') - except UnicodeDecodeError: - msg = "invalid or missing encoding declaration" - if filename is not None: - msg = '{} for {!r}'.format(msg, filename) - raise SyntaxError(msg) - - match = cookie_re.match(line_string) - if not match: - return None - encoding = _get_normal_name(match.group(1)) - try: - codec = lookup(encoding) - except LookupError: - # This behaviour mimics the Python interpreter - if filename is None: - msg = "unknown encoding: " + encoding - else: - msg = "unknown encoding for {!r}: {}".format(filename, - encoding) - raise SyntaxError(msg) - - if bom_found: - if encoding != 'utf-8': - # This behaviour mimics the Python interpreter - if filename is None: - msg = 'encoding problem: utf-8' - else: - msg = 'encoding problem for {!r}: utf-8'.format(filename) - raise SyntaxError(msg) - encoding += '-sig' - return encoding - - first = read_or_stop() - if first.startswith(BOM_UTF8): - bom_found = True - first = first[3:] - default = 'utf-8-sig' - if not first: - return default, [] - - encoding = find_cookie(first) - if encoding: - return encoding, [first] - if not blank_re.match(first): - return default, [first] - - second = read_or_stop() - if not second: - return default, [first] - - encoding = find_cookie(second) - if encoding: - return encoding, [first, second] - - return default, [first, second] - - -def open(filename): - """Open a file in read only mode using the encoding detected by - detect_encoding(). - """ - buffer = _builtin_open(filename, 'rb') - try: - encoding, lines = detect_encoding(buffer.readline) - buffer.seek(0) - text = TextIOWrapper(buffer, encoding, line_buffering=True) - text.mode = 'r' - return text - except: - buffer.close() - raise - -def tokenize(readline): - """ - The tokenize() generator requires one argument, readline, which - must be a callable object which provides the same interface as the - readline() method of built-in file objects. Each call to the function - should return one line of input as bytes. Alternatively, readline - can be a callable function terminating with StopIteration: - readline = open(myfile, 'rb').__next__ # Example of alternate readline - - The generator produces 5-tuples with these members: the token type; the - token string; a 2-tuple (srow, scol) of ints specifying the row and - column where the token begins in the source; a 2-tuple (erow, ecol) of - ints specifying the row and column where the token ends in the source; - and the line on which the token was found. The line passed is the - physical line. - - The first token sequence will always be an ENCODING token - which tells you which encoding was used to decode the bytes stream. - """ - encoding, consumed = detect_encoding(readline) - rl_gen = _itertools.chain(consumed, iter(readline, b"")) - if encoding is not None: - if encoding == "utf-8-sig": - # BOM will already have been stripped. - encoding = "utf-8" - yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') - yield from _generate_tokens_from_c_tokenizer(rl_gen.__next__, encoding, extra_tokens=True) - -def generate_tokens(readline): - """Tokenize a source reading Python code as unicode strings. - - This has the same API as tokenize(), except that it expects the *readline* - callable to return str objects instead of bytes. - """ - return _generate_tokens_from_c_tokenizer(readline, extra_tokens=True) - -def main(): - import argparse - - # Helper error handling routines - def perror(message): - sys.stderr.write(message) - sys.stderr.write('\n') - - def error(message, filename=None, location=None): - if location: - args = (filename,) + location + (message,) - perror("%s:%d:%d: error: %s" % args) - elif filename: - perror("%s: error: %s" % (filename, message)) - else: - perror("error: %s" % message) - sys.exit(1) - - # Parse the arguments and options - parser = argparse.ArgumentParser(prog='python -m tokenize') - parser.add_argument(dest='filename', nargs='?', - metavar='filename.py', - help='the file to tokenize; defaults to stdin') - parser.add_argument('-e', '--exact', dest='exact', action='store_true', - help='display token names using the exact type') - args = parser.parse_args() - - try: - # Tokenize the input - if args.filename: - filename = args.filename - with _builtin_open(filename, 'rb') as f: - tokens = list(tokenize(f.readline)) - else: - filename = "" - tokens = _generate_tokens_from_c_tokenizer( - sys.stdin.readline, extra_tokens=True) - - - # Output the tokenization - for token in tokens: - token_type = token.type - if args.exact: - token_type = token.exact_type - token_range = "%d,%d-%d,%d:" % (token.start + token.end) - print("%-20s%-15s%-15r" % - (token_range, tok_name[token_type], token.string)) - except IndentationError as err: - line, column = err.args[1][1:3] - error(err.args[0], filename, (line, column)) - except TokenError as err: - line, column = err.args[1] - error(err.args[0], filename, (line, column)) - except SyntaxError as err: - error(err, filename) - except OSError as err: - error(err) - except KeyboardInterrupt: - print("interrupted\n") - except Exception as err: - perror("unexpected error: %s" % err) - raise - -def _transform_msg(msg): - """Transform error messages from the C tokenizer into the Python tokenize - - The C tokenizer is more picky than the Python one, so we need to massage - the error messages a bit for backwards compatibility. - """ - if "unterminated triple-quoted string literal" in msg: - return "EOF in multi-line string" - return msg - -def _generate_tokens_from_c_tokenizer(source, encoding=None, extra_tokens=False): - """Tokenize a source reading Python code as unicode strings using the internal C tokenizer""" - if encoding is None: - it = _tokenize.TokenizerIter(source, extra_tokens=extra_tokens) - else: - it = _tokenize.TokenizerIter(source, encoding=encoding, extra_tokens=extra_tokens) - try: - for info in it: - yield TokenInfo._make(info) - except SyntaxError as e: - if type(e) != SyntaxError: - raise e from None - msg = _transform_msg(e.msg) - raise TokenError(msg, (e.lineno, e.offset)) from None - - -if __name__ == "__main__": - main() diff --git a/Python313_13_x86_Template/Lib/tomllib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/tomllib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 0124a69e..00000000 Binary files a/Python313_13_x86_Template/Lib/tomllib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/tomllib/__pycache__/_parser.cpython-313.pyc b/Python313_13_x86_Template/Lib/tomllib/__pycache__/_parser.cpython-313.pyc deleted file mode 100644 index c2a2b285..00000000 Binary files a/Python313_13_x86_Template/Lib/tomllib/__pycache__/_parser.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/tomllib/__pycache__/_re.cpython-313.pyc b/Python313_13_x86_Template/Lib/tomllib/__pycache__/_re.cpython-313.pyc deleted file mode 100644 index 9537e096..00000000 Binary files a/Python313_13_x86_Template/Lib/tomllib/__pycache__/_re.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/tomllib/__pycache__/_types.cpython-313.pyc b/Python313_13_x86_Template/Lib/tomllib/__pycache__/_types.cpython-313.pyc deleted file mode 100644 index 9602c8af..00000000 Binary files a/Python313_13_x86_Template/Lib/tomllib/__pycache__/_types.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/tomllib/_parser.py b/Python313_13_x86_Template/Lib/tomllib/_parser.py deleted file mode 100644 index 9c80a6a5..00000000 --- a/Python313_13_x86_Template/Lib/tomllib/_parser.py +++ /dev/null @@ -1,691 +0,0 @@ -# SPDX-License-Identifier: MIT -# SPDX-FileCopyrightText: 2021 Taneli Hukkinen -# Licensed to PSF under a Contributor Agreement. - -from __future__ import annotations - -from collections.abc import Iterable -import string -from types import MappingProxyType -from typing import Any, BinaryIO, NamedTuple - -from ._re import ( - RE_DATETIME, - RE_LOCALTIME, - RE_NUMBER, - match_to_datetime, - match_to_localtime, - match_to_number, -) -from ._types import Key, ParseFloat, Pos - -ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) - -# Neither of these sets include quotation mark or backslash. They are -# currently handled as separate cases in the parser functions. -ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t") -ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n") - -ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS -ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS - -ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS - -TOML_WS = frozenset(" \t") -TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n") -BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_") -KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'") -HEXDIGIT_CHARS = frozenset(string.hexdigits) - -BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType( - { - "\\b": "\u0008", # backspace - "\\t": "\u0009", # tab - "\\n": "\u000A", # linefeed - "\\f": "\u000C", # form feed - "\\r": "\u000D", # carriage return - '\\"': "\u0022", # quote - "\\\\": "\u005C", # backslash - } -) - - -class TOMLDecodeError(ValueError): - """An error raised if a document is not valid TOML.""" - - -def load(fp: BinaryIO, /, *, parse_float: ParseFloat = float) -> dict[str, Any]: - """Parse TOML from a binary file object.""" - b = fp.read() - try: - s = b.decode() - except AttributeError: - raise TypeError( - "File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`" - ) from None - return loads(s, parse_float=parse_float) - - -def loads(s: str, /, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901 - """Parse TOML from a string.""" - - # The spec allows converting "\r\n" to "\n", even in string - # literals. Let's do so to simplify parsing. - src = s.replace("\r\n", "\n") - pos = 0 - out = Output(NestedDict(), Flags()) - header: Key = () - parse_float = make_safe_parse_float(parse_float) - - # Parse one statement at a time - # (typically means one line in TOML source) - while True: - # 1. Skip line leading whitespace - pos = skip_chars(src, pos, TOML_WS) - - # 2. Parse rules. Expect one of the following: - # - end of file - # - end of line - # - comment - # - key/value pair - # - append dict to list (and move to its namespace) - # - create dict (and move to its namespace) - # Skip trailing whitespace when applicable. - try: - char = src[pos] - except IndexError: - break - if char == "\n": - pos += 1 - continue - if char in KEY_INITIAL_CHARS: - pos = key_value_rule(src, pos, out, header, parse_float) - pos = skip_chars(src, pos, TOML_WS) - elif char == "[": - try: - second_char: str | None = src[pos + 1] - except IndexError: - second_char = None - out.flags.finalize_pending() - if second_char == "[": - pos, header = create_list_rule(src, pos, out) - else: - pos, header = create_dict_rule(src, pos, out) - pos = skip_chars(src, pos, TOML_WS) - elif char != "#": - raise suffixed_err(src, pos, "Invalid statement") - - # 3. Skip comment - pos = skip_comment(src, pos) - - # 4. Expect end of line or end of file - try: - char = src[pos] - except IndexError: - break - if char != "\n": - raise suffixed_err( - src, pos, "Expected newline or end of document after a statement" - ) - pos += 1 - - return out.data.dict - - -class Flags: - """Flags that map to parsed keys/namespaces.""" - - # Marks an immutable namespace (inline array or inline table). - FROZEN = 0 - # Marks a nest that has been explicitly created and can no longer - # be opened using the "[table]" syntax. - EXPLICIT_NEST = 1 - - def __init__(self) -> None: - self._flags: dict[str, dict[Any, Any]] = {} - self._pending_flags: set[tuple[Key, int]] = set() - - def add_pending(self, key: Key, flag: int) -> None: - self._pending_flags.add((key, flag)) - - def finalize_pending(self) -> None: - for key, flag in self._pending_flags: - self.set(key, flag, recursive=False) - self._pending_flags.clear() - - def unset_all(self, key: Key) -> None: - cont = self._flags - for k in key[:-1]: - if k not in cont: - return - cont = cont[k]["nested"] - cont.pop(key[-1], None) - - def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003 - cont = self._flags - key_parent, key_stem = key[:-1], key[-1] - for k in key_parent: - if k not in cont: - cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} - cont = cont[k]["nested"] - if key_stem not in cont: - cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}} - cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag) - - def is_(self, key: Key, flag: int) -> bool: - if not key: - return False # document root has no flags - cont = self._flags - for k in key[:-1]: - if k not in cont: - return False - inner_cont = cont[k] - if flag in inner_cont["recursive_flags"]: - return True - cont = inner_cont["nested"] - key_stem = key[-1] - if key_stem in cont: - cont = cont[key_stem] - return flag in cont["flags"] or flag in cont["recursive_flags"] - return False - - -class NestedDict: - def __init__(self) -> None: - # The parsed content of the TOML document - self.dict: dict[str, Any] = {} - - def get_or_create_nest( - self, - key: Key, - *, - access_lists: bool = True, - ) -> dict[str, Any]: - cont: Any = self.dict - for k in key: - if k not in cont: - cont[k] = {} - cont = cont[k] - if access_lists and isinstance(cont, list): - cont = cont[-1] - if not isinstance(cont, dict): - raise KeyError("There is no nest behind this key") - return cont # type: ignore[no-any-return] - - def append_nest_to_list(self, key: Key) -> None: - cont = self.get_or_create_nest(key[:-1]) - last_key = key[-1] - if last_key in cont: - list_ = cont[last_key] - if not isinstance(list_, list): - raise KeyError("An object other than list found behind this key") - list_.append({}) - else: - cont[last_key] = [{}] - - -class Output(NamedTuple): - data: NestedDict - flags: Flags - - -def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: - try: - while src[pos] in chars: - pos += 1 - except IndexError: - pass - return pos - - -def skip_until( - src: str, - pos: Pos, - expect: str, - *, - error_on: frozenset[str], - error_on_eof: bool, -) -> Pos: - try: - new_pos = src.index(expect, pos) - except ValueError: - new_pos = len(src) - if error_on_eof: - raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None - - if not error_on.isdisjoint(src[pos:new_pos]): - while src[pos] not in error_on: - pos += 1 - raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}") - return new_pos - - -def skip_comment(src: str, pos: Pos) -> Pos: - try: - char: str | None = src[pos] - except IndexError: - char = None - if char == "#": - return skip_until( - src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False - ) - return pos - - -def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos: - while True: - pos_before_skip = pos - pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) - pos = skip_comment(src, pos) - if pos == pos_before_skip: - return pos - - -def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: - pos += 1 # Skip "[" - pos = skip_chars(src, pos, TOML_WS) - pos, key = parse_key(src, pos) - - if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot declare {key} twice") - out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) - try: - out.data.get_or_create_nest(key) - except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None - - if not src.startswith("]", pos): - raise suffixed_err(src, pos, "Expected ']' at the end of a table declaration") - return pos + 1, key - - -def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: - pos += 2 # Skip "[[" - pos = skip_chars(src, pos, TOML_WS) - pos, key = parse_key(src, pos) - - if out.flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}") - # Free the namespace now that it points to another empty list item... - out.flags.unset_all(key) - # ...but this key precisely is still prohibited from table declaration - out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) - try: - out.data.append_nest_to_list(key) - except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None - - if not src.startswith("]]", pos): - raise suffixed_err(src, pos, "Expected ']]' at the end of an array declaration") - return pos + 2, key - - -def key_value_rule( - src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat -) -> Pos: - pos, key, value = parse_key_value_pair(src, pos, parse_float) - key_parent, key_stem = key[:-1], key[-1] - abs_key_parent = header + key_parent - - relative_path_cont_keys = (header + key[:i] for i in range(1, len(key))) - for cont_key in relative_path_cont_keys: - # Check that dotted key syntax does not redefine an existing table - if out.flags.is_(cont_key, Flags.EXPLICIT_NEST): - raise suffixed_err(src, pos, f"Cannot redefine namespace {cont_key}") - # Containers in the relative path can't be opened with the table syntax or - # dotted key/value syntax in following table sections. - out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST) - - if out.flags.is_(abs_key_parent, Flags.FROZEN): - raise suffixed_err( - src, pos, f"Cannot mutate immutable namespace {abs_key_parent}" - ) - - try: - nest = out.data.get_or_create_nest(abs_key_parent) - except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None - if key_stem in nest: - raise suffixed_err(src, pos, "Cannot overwrite a value") - # Mark inline table and array namespaces recursively immutable - if isinstance(value, (dict, list)): - out.flags.set(header + key, Flags.FROZEN, recursive=True) - nest[key_stem] = value - return pos - - -def parse_key_value_pair( - src: str, pos: Pos, parse_float: ParseFloat -) -> tuple[Pos, Key, Any]: - pos, key = parse_key(src, pos) - try: - char: str | None = src[pos] - except IndexError: - char = None - if char != "=": - raise suffixed_err(src, pos, "Expected '=' after a key in a key/value pair") - pos += 1 - pos = skip_chars(src, pos, TOML_WS) - pos, value = parse_value(src, pos, parse_float) - return pos, key, value - - -def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]: - pos, key_part = parse_key_part(src, pos) - key: Key = (key_part,) - pos = skip_chars(src, pos, TOML_WS) - while True: - try: - char: str | None = src[pos] - except IndexError: - char = None - if char != ".": - return pos, key - pos += 1 - pos = skip_chars(src, pos, TOML_WS) - pos, key_part = parse_key_part(src, pos) - key += (key_part,) - pos = skip_chars(src, pos, TOML_WS) - - -def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]: - try: - char: str | None = src[pos] - except IndexError: - char = None - if char in BARE_KEY_CHARS: - start_pos = pos - pos = skip_chars(src, pos, BARE_KEY_CHARS) - return pos, src[start_pos:pos] - if char == "'": - return parse_literal_str(src, pos) - if char == '"': - return parse_one_line_basic_str(src, pos) - raise suffixed_err(src, pos, "Invalid initial character for a key part") - - -def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]: - pos += 1 - return parse_basic_str(src, pos, multiline=False) - - -def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list[Any]]: - pos += 1 - array: list[Any] = [] - - pos = skip_comments_and_array_ws(src, pos) - if src.startswith("]", pos): - return pos + 1, array - while True: - pos, val = parse_value(src, pos, parse_float) - array.append(val) - pos = skip_comments_and_array_ws(src, pos) - - c = src[pos : pos + 1] - if c == "]": - return pos + 1, array - if c != ",": - raise suffixed_err(src, pos, "Unclosed array") - pos += 1 - - pos = skip_comments_and_array_ws(src, pos) - if src.startswith("]", pos): - return pos + 1, array - - -def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict[str, Any]]: - pos += 1 - nested_dict = NestedDict() - flags = Flags() - - pos = skip_chars(src, pos, TOML_WS) - if src.startswith("}", pos): - return pos + 1, nested_dict.dict - while True: - pos, key, value = parse_key_value_pair(src, pos, parse_float) - key_parent, key_stem = key[:-1], key[-1] - if flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}") - try: - nest = nested_dict.get_or_create_nest(key_parent, access_lists=False) - except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None - if key_stem in nest: - raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}") - nest[key_stem] = value - pos = skip_chars(src, pos, TOML_WS) - c = src[pos : pos + 1] - if c == "}": - return pos + 1, nested_dict.dict - if c != ",": - raise suffixed_err(src, pos, "Unclosed inline table") - if isinstance(value, (dict, list)): - flags.set(key, Flags.FROZEN, recursive=True) - pos += 1 - pos = skip_chars(src, pos, TOML_WS) - - -def parse_basic_str_escape( - src: str, pos: Pos, *, multiline: bool = False -) -> tuple[Pos, str]: - escape_id = src[pos : pos + 2] - pos += 2 - if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}: - # Skip whitespace until next non-whitespace character or end of - # the doc. Error if non-whitespace is found before newline. - if escape_id != "\\\n": - pos = skip_chars(src, pos, TOML_WS) - try: - char = src[pos] - except IndexError: - return pos, "" - if char != "\n": - raise suffixed_err(src, pos, "Unescaped '\\' in a string") - pos += 1 - pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) - return pos, "" - if escape_id == "\\u": - return parse_hex_char(src, pos, 4) - if escape_id == "\\U": - return parse_hex_char(src, pos, 8) - try: - return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id] - except KeyError: - raise suffixed_err(src, pos, "Unescaped '\\' in a string") from None - - -def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]: - return parse_basic_str_escape(src, pos, multiline=True) - - -def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]: - hex_str = src[pos : pos + hex_len] - if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str): - raise suffixed_err(src, pos, "Invalid hex value") - pos += hex_len - hex_int = int(hex_str, 16) - if not is_unicode_scalar_value(hex_int): - raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value") - return pos, chr(hex_int) - - -def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]: - pos += 1 # Skip starting apostrophe - start_pos = pos - pos = skip_until( - src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True - ) - return pos + 1, src[start_pos:pos] # Skip ending apostrophe - - -def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]: - pos += 3 - if src.startswith("\n", pos): - pos += 1 - - if literal: - delim = "'" - end_pos = skip_until( - src, - pos, - "'''", - error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS, - error_on_eof=True, - ) - result = src[pos:end_pos] - pos = end_pos + 3 - else: - delim = '"' - pos, result = parse_basic_str(src, pos, multiline=True) - - # Add at maximum two extra apostrophes/quotes if the end sequence - # is 4 or 5 chars long instead of just 3. - if not src.startswith(delim, pos): - return pos, result - pos += 1 - if not src.startswith(delim, pos): - return pos, result + delim - pos += 1 - return pos, result + (delim * 2) - - -def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]: - if multiline: - error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS - parse_escapes = parse_basic_str_escape_multiline - else: - error_on = ILLEGAL_BASIC_STR_CHARS - parse_escapes = parse_basic_str_escape - result = "" - start_pos = pos - while True: - try: - char = src[pos] - except IndexError: - raise suffixed_err(src, pos, "Unterminated string") from None - if char == '"': - if not multiline: - return pos + 1, result + src[start_pos:pos] - if src.startswith('"""', pos): - return pos + 3, result + src[start_pos:pos] - pos += 1 - continue - if char == "\\": - result += src[start_pos:pos] - pos, parsed_escape = parse_escapes(src, pos) - result += parsed_escape - start_pos = pos - continue - if char in error_on: - raise suffixed_err(src, pos, f"Illegal character {char!r}") - pos += 1 - - -def parse_value( # noqa: C901 - src: str, pos: Pos, parse_float: ParseFloat -) -> tuple[Pos, Any]: - try: - char: str | None = src[pos] - except IndexError: - char = None - - # IMPORTANT: order conditions based on speed of checking and likelihood - - # Basic strings - if char == '"': - if src.startswith('"""', pos): - return parse_multiline_str(src, pos, literal=False) - return parse_one_line_basic_str(src, pos) - - # Literal strings - if char == "'": - if src.startswith("'''", pos): - return parse_multiline_str(src, pos, literal=True) - return parse_literal_str(src, pos) - - # Booleans - if char == "t": - if src.startswith("true", pos): - return pos + 4, True - if char == "f": - if src.startswith("false", pos): - return pos + 5, False - - # Arrays - if char == "[": - return parse_array(src, pos, parse_float) - - # Inline tables - if char == "{": - return parse_inline_table(src, pos, parse_float) - - # Dates and times - datetime_match = RE_DATETIME.match(src, pos) - if datetime_match: - try: - datetime_obj = match_to_datetime(datetime_match) - except ValueError as e: - raise suffixed_err(src, pos, "Invalid date or datetime") from e - return datetime_match.end(), datetime_obj - localtime_match = RE_LOCALTIME.match(src, pos) - if localtime_match: - return localtime_match.end(), match_to_localtime(localtime_match) - - # Integers and "normal" floats. - # The regex will greedily match any type starting with a decimal - # char, so needs to be located after handling of dates and times. - number_match = RE_NUMBER.match(src, pos) - if number_match: - return number_match.end(), match_to_number(number_match, parse_float) - - # Special floats - first_three = src[pos : pos + 3] - if first_three in {"inf", "nan"}: - return pos + 3, parse_float(first_three) - first_four = src[pos : pos + 4] - if first_four in {"-inf", "+inf", "-nan", "+nan"}: - return pos + 4, parse_float(first_four) - - raise suffixed_err(src, pos, "Invalid value") - - -def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError: - """Return a `TOMLDecodeError` where error message is suffixed with - coordinates in source.""" - - def coord_repr(src: str, pos: Pos) -> str: - if pos >= len(src): - return "end of document" - line = src.count("\n", 0, pos) + 1 - if line == 1: - column = pos + 1 - else: - column = pos - src.rindex("\n", 0, pos) - return f"line {line}, column {column}" - - return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})") - - -def is_unicode_scalar_value(codepoint: int) -> bool: - return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111) - - -def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat: - """A decorator to make `parse_float` safe. - - `parse_float` must not return dicts or lists, because these types - would be mixed with parsed TOML tables and arrays, thus confusing - the parser. The returned decorated callable raises `ValueError` - instead of returning illegal types. - """ - # The default `float` callable never returns illegal types. Optimize it. - if parse_float is float: - return float - - def safe_parse_float(float_str: str) -> Any: - float_value = parse_float(float_str) - if isinstance(float_value, (dict, list)): - raise ValueError("parse_float must not return dicts or lists") - return float_value - - return safe_parse_float diff --git a/Python313_13_x86_Template/Lib/tomllib/_re.py b/Python313_13_x86_Template/Lib/tomllib/_re.py deleted file mode 100644 index a97cab2f..00000000 --- a/Python313_13_x86_Template/Lib/tomllib/_re.py +++ /dev/null @@ -1,107 +0,0 @@ -# SPDX-License-Identifier: MIT -# SPDX-FileCopyrightText: 2021 Taneli Hukkinen -# Licensed to PSF under a Contributor Agreement. - -from __future__ import annotations - -from datetime import date, datetime, time, timedelta, timezone, tzinfo -from functools import lru_cache -import re -from typing import Any - -from ._types import ParseFloat - -# E.g. -# - 00:32:00.999999 -# - 00:32:00 -_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?" - -RE_NUMBER = re.compile( - r""" -0 -(?: - x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex - | - b[01](?:_?[01])* # bin - | - o[0-7](?:_?[0-7])* # oct -) -| -[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part -(?P - (?:\.[0-9](?:_?[0-9])*)? # optional fractional part - (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part -) -""", - flags=re.VERBOSE, -) -RE_LOCALTIME = re.compile(_TIME_RE_STR) -RE_DATETIME = re.compile( - rf""" -([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27 -(?: - [Tt ] - {_TIME_RE_STR} - (?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset -)? -""", - flags=re.VERBOSE, -) - - -def match_to_datetime(match: re.Match[str]) -> datetime | date: - """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. - - Raises ValueError if the match does not correspond to a valid date - or datetime. - """ - ( - year_str, - month_str, - day_str, - hour_str, - minute_str, - sec_str, - micros_str, - zulu_time, - offset_sign_str, - offset_hour_str, - offset_minute_str, - ) = match.groups() - year, month, day = int(year_str), int(month_str), int(day_str) - if hour_str is None: - return date(year, month, day) - hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) - micros = int(micros_str.ljust(6, "0")) if micros_str else 0 - if offset_sign_str: - tz: tzinfo | None = cached_tz( - offset_hour_str, offset_minute_str, offset_sign_str - ) - elif zulu_time: - tz = timezone.utc - else: # local date-time - tz = None - return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz) - - -@lru_cache(maxsize=None) -def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone: - sign = 1 if sign_str == "+" else -1 - return timezone( - timedelta( - hours=sign * int(hour_str), - minutes=sign * int(minute_str), - ) - ) - - -def match_to_localtime(match: re.Match[str]) -> time: - hour_str, minute_str, sec_str, micros_str = match.groups() - micros = int(micros_str.ljust(6, "0")) if micros_str else 0 - return time(int(hour_str), int(minute_str), int(sec_str), micros) - - -def match_to_number(match: re.Match[str], parse_float: ParseFloat) -> Any: - if match.group("floatpart"): - return parse_float(match.group()) - return int(match.group(), 0) diff --git a/Python313_13_x86_Template/Lib/trace.py b/Python313_13_x86_Template/Lib/trace.py deleted file mode 100644 index 64fc8037..00000000 --- a/Python313_13_x86_Template/Lib/trace.py +++ /dev/null @@ -1,754 +0,0 @@ -#!/usr/bin/env python3 - -# portions copyright 2001, Autonomous Zones Industries, Inc., all rights... -# err... reserved and offered to the public under the terms of the -# Python 2.2 license. -# Author: Zooko O'Whielacronx -# http://zooko.com/ -# mailto:zooko@zooko.com -# -# Copyright 2000, Mojam Media, Inc., all rights reserved. -# Author: Skip Montanaro -# -# Copyright 1999, Bioreason, Inc., all rights reserved. -# Author: Andrew Dalke -# -# Copyright 1995-1997, Automatrix, Inc., all rights reserved. -# Author: Skip Montanaro -# -# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved. -# -# -# Permission to use, copy, modify, and distribute this Python software and -# its associated documentation for any purpose without fee is hereby -# granted, provided that the above copyright notice appears in all copies, -# and that both that copyright notice and this permission notice appear in -# supporting documentation, and that the name of neither Automatrix, -# Bioreason or Mojam Media be used in advertising or publicity pertaining to -# distribution of the software without specific, written prior permission. -# -"""program/module to trace Python program or function execution - -Sample use, command line: - trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs - trace.py -t --ignore-dir '$prefix' spam.py eggs - trace.py --trackcalls spam.py eggs - -Sample use, programmatically - import sys - - # create a Trace object, telling it what to ignore, and whether to - # do tracing or line-counting or both. - tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,], - trace=0, count=1) - # run the new command using the given tracer - tracer.run('main()') - # make a report, placing output in /tmp - r = tracer.results() - r.write_results(show_missing=True, coverdir="/tmp") -""" -__all__ = ['Trace', 'CoverageResults'] - -import io -import linecache -import os -import sys -import sysconfig -import token -import tokenize -import inspect -import gc -import dis -import pickle -from time import monotonic as _time - -import threading - -PRAGMA_NOCOVER = "#pragma NO COVER" - -class _Ignore: - def __init__(self, modules=None, dirs=None): - self._mods = set() if not modules else set(modules) - self._dirs = [] if not dirs else [os.path.normpath(d) - for d in dirs] - self._ignore = { '': 1 } - - def names(self, filename, modulename): - if modulename in self._ignore: - return self._ignore[modulename] - - # haven't seen this one before, so see if the module name is - # on the ignore list. - if modulename in self._mods: # Identical names, so ignore - self._ignore[modulename] = 1 - return 1 - - # check if the module is a proper submodule of something on - # the ignore list - for mod in self._mods: - # Need to take some care since ignoring - # "cmp" mustn't mean ignoring "cmpcache" but ignoring - # "Spam" must also mean ignoring "Spam.Eggs". - if modulename.startswith(mod + '.'): - self._ignore[modulename] = 1 - return 1 - - # Now check that filename isn't in one of the directories - if filename is None: - # must be a built-in, so we must ignore - self._ignore[modulename] = 1 - return 1 - - # Ignore a file when it contains one of the ignorable paths - for d in self._dirs: - # The '+ os.sep' is to ensure that d is a parent directory, - # as compared to cases like: - # d = "/usr/local" - # filename = "/usr/local.py" - # or - # d = "/usr/local.py" - # filename = "/usr/local.py" - if filename.startswith(d + os.sep): - self._ignore[modulename] = 1 - return 1 - - # Tried the different ways, so we don't ignore this module - self._ignore[modulename] = 0 - return 0 - -def _modname(path): - """Return a plausible module name for the path.""" - - base = os.path.basename(path) - filename, ext = os.path.splitext(base) - return filename - -def _fullmodname(path): - """Return a plausible module name for the path.""" - - # If the file 'path' is part of a package, then the filename isn't - # enough to uniquely identify it. Try to do the right thing by - # looking in sys.path for the longest matching prefix. We'll - # assume that the rest is the package name. - - comparepath = os.path.normcase(path) - longest = "" - for dir in sys.path: - dir = os.path.normcase(dir) - if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep: - if len(dir) > len(longest): - longest = dir - - if longest: - base = path[len(longest) + 1:] - else: - base = path - # the drive letter is never part of the module name - drive, base = os.path.splitdrive(base) - base = base.replace(os.sep, ".") - if os.altsep: - base = base.replace(os.altsep, ".") - filename, ext = os.path.splitext(base) - return filename.lstrip(".") - -class CoverageResults: - def __init__(self, counts=None, calledfuncs=None, infile=None, - callers=None, outfile=None): - self.counts = counts - if self.counts is None: - self.counts = {} - self.counter = self.counts.copy() # map (filename, lineno) to count - self.calledfuncs = calledfuncs - if self.calledfuncs is None: - self.calledfuncs = {} - self.calledfuncs = self.calledfuncs.copy() - self.callers = callers - if self.callers is None: - self.callers = {} - self.callers = self.callers.copy() - self.infile = infile - self.outfile = outfile - if self.infile: - # Try to merge existing counts file. - try: - with open(self.infile, 'rb') as f: - counts, calledfuncs, callers = pickle.load(f) - self.update(self.__class__(counts, calledfuncs, callers=callers)) - except (OSError, EOFError, ValueError) as err: - print(("Skipping counts file %r: %s" - % (self.infile, err)), file=sys.stderr) - - def is_ignored_filename(self, filename): - """Return True if the filename does not refer to a file - we want to have reported. - """ - return filename.startswith('<') and filename.endswith('>') - - def update(self, other): - """Merge in the data from another CoverageResults""" - counts = self.counts - calledfuncs = self.calledfuncs - callers = self.callers - other_counts = other.counts - other_calledfuncs = other.calledfuncs - other_callers = other.callers - - for key in other_counts: - counts[key] = counts.get(key, 0) + other_counts[key] - - for key in other_calledfuncs: - calledfuncs[key] = 1 - - for key in other_callers: - callers[key] = 1 - - def write_results(self, show_missing=True, summary=False, coverdir=None, *, - ignore_missing_files=False): - """ - Write the coverage results. - - :param show_missing: Show lines that had no hits. - :param summary: Include coverage summary per module. - :param coverdir: If None, the results of each module are placed in its - directory, otherwise it is included in the directory - specified. - :param ignore_missing_files: If True, counts for files that no longer - exist are silently ignored. Otherwise, a missing file - will raise a FileNotFoundError. - """ - if self.calledfuncs: - print() - print("functions called:") - calls = self.calledfuncs - for filename, modulename, funcname in sorted(calls): - print(("filename: %s, modulename: %s, funcname: %s" - % (filename, modulename, funcname))) - - if self.callers: - print() - print("calling relationships:") - lastfile = lastcfile = "" - for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) \ - in sorted(self.callers): - if pfile != lastfile: - print() - print("***", pfile, "***") - lastfile = pfile - lastcfile = "" - if cfile != pfile and lastcfile != cfile: - print(" -->", cfile) - lastcfile = cfile - print(" %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc)) - - # turn the counts data ("(filename, lineno) = count") into something - # accessible on a per-file basis - per_file = {} - for filename, lineno in self.counts: - lines_hit = per_file[filename] = per_file.get(filename, {}) - lines_hit[lineno] = self.counts[(filename, lineno)] - - # accumulate summary info, if needed - sums = {} - - for filename, count in per_file.items(): - if self.is_ignored_filename(filename): - continue - - if filename.endswith(".pyc"): - filename = filename[:-1] - - if ignore_missing_files and not os.path.isfile(filename): - continue - - if coverdir is None: - dir = os.path.dirname(os.path.abspath(filename)) - modulename = _modname(filename) - else: - dir = coverdir - os.makedirs(dir, exist_ok=True) - modulename = _fullmodname(filename) - - # If desired, get a list of the line numbers which represent - # executable content (returned as a dict for better lookup speed) - if show_missing: - lnotab = _find_executable_linenos(filename) - else: - lnotab = {} - source = linecache.getlines(filename) - coverpath = os.path.join(dir, modulename + ".cover") - with open(filename, 'rb') as fp: - encoding, _ = tokenize.detect_encoding(fp.readline) - n_hits, n_lines = self.write_results_file(coverpath, source, - lnotab, count, encoding) - if summary and n_lines: - percent = int(100 * n_hits / n_lines) - sums[modulename] = n_lines, percent, modulename, filename - - if summary and sums: - print("lines cov% module (path)") - for m in sorted(sums): - n_lines, percent, modulename, filename = sums[m] - print("%5d %3d%% %s (%s)" % sums[m]) - - if self.outfile: - # try and store counts and module info into self.outfile - try: - with open(self.outfile, 'wb') as f: - pickle.dump((self.counts, self.calledfuncs, self.callers), - f, 1) - except OSError as err: - print("Can't save counts files because %s" % err, file=sys.stderr) - - def write_results_file(self, path, lines, lnotab, lines_hit, encoding=None): - """Return a coverage results file in path.""" - # ``lnotab`` is a dict of executable lines, or a line number "table" - - try: - outfile = open(path, "w", encoding=encoding) - except OSError as err: - print(("trace: Could not open %r for writing: %s " - "- skipping" % (path, err)), file=sys.stderr) - return 0, 0 - - n_lines = 0 - n_hits = 0 - with outfile: - for lineno, line in enumerate(lines, 1): - # do the blank/comment match to try to mark more lines - # (help the reader find stuff that hasn't been covered) - if lineno in lines_hit: - outfile.write("%5d: " % lines_hit[lineno]) - n_hits += 1 - n_lines += 1 - elif lineno in lnotab and not PRAGMA_NOCOVER in line: - # Highlight never-executed lines, unless the line contains - # #pragma: NO COVER - outfile.write(">>>>>> ") - n_lines += 1 - else: - outfile.write(" ") - outfile.write(line.expandtabs(8)) - - return n_hits, n_lines - -def _find_lines_from_code(code, strs): - """Return dict where keys are lines in the line number table.""" - linenos = {} - - for _, lineno in dis.findlinestarts(code): - if lineno not in strs: - linenos[lineno] = 1 - - return linenos - -def _find_lines(code, strs): - """Return lineno dict for all code objects reachable from code.""" - # get all of the lineno information from the code of this scope level - linenos = _find_lines_from_code(code, strs) - - # and check the constants for references to other code objects - for c in code.co_consts: - if inspect.iscode(c): - # find another code object, so recurse into it - linenos.update(_find_lines(c, strs)) - return linenos - -def _find_strings(filename, encoding=None): - """Return a dict of possible docstring positions. - - The dict maps line numbers to strings. There is an entry for - line that contains only a string or a part of a triple-quoted - string. - """ - d = {} - # If the first token is a string, then it's the module docstring. - # Add this special case so that the test in the loop passes. - prev_ttype = token.INDENT - with open(filename, encoding=encoding) as f: - tok = tokenize.generate_tokens(f.readline) - for ttype, tstr, start, end, line in tok: - if ttype == token.STRING: - if prev_ttype == token.INDENT: - sline, scol = start - eline, ecol = end - for i in range(sline, eline + 1): - d[i] = 1 - prev_ttype = ttype - return d - -def _find_executable_linenos(filename): - """Return dict where keys are line numbers in the line number table.""" - try: - with tokenize.open(filename) as f: - prog = f.read() - encoding = f.encoding - except OSError as err: - print(("Not printing coverage data for %r: %s" - % (filename, err)), file=sys.stderr) - return {} - code = compile(prog, filename, "exec") - strs = _find_strings(filename, encoding) - return _find_lines(code, strs) - -class Trace: - def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0, - ignoremods=(), ignoredirs=(), infile=None, outfile=None, - timing=False): - """ - @param count true iff it should count number of times each - line is executed - @param trace true iff it should print out each line that is - being counted - @param countfuncs true iff it should just output a list of - (filename, modulename, funcname,) for functions - that were called at least once; This overrides - `count' and `trace' - @param ignoremods a list of the names of modules to ignore - @param ignoredirs a list of the names of directories to ignore - all of the (recursive) contents of - @param infile file from which to read stored counts to be - added into the results - @param outfile file in which to write the results - @param timing true iff timing information be displayed - """ - self.infile = infile - self.outfile = outfile - self.ignore = _Ignore(ignoremods, ignoredirs) - self.counts = {} # keys are (filename, linenumber) - self.pathtobasename = {} # for memoizing os.path.basename - self.donothing = 0 - self.trace = trace - self._calledfuncs = {} - self._callers = {} - self._caller_cache = {} - self.start_time = None - if timing: - self.start_time = _time() - if countcallers: - self.globaltrace = self.globaltrace_trackcallers - elif countfuncs: - self.globaltrace = self.globaltrace_countfuncs - elif trace and count: - self.globaltrace = self.globaltrace_lt - self.localtrace = self.localtrace_trace_and_count - elif trace: - self.globaltrace = self.globaltrace_lt - self.localtrace = self.localtrace_trace - elif count: - self.globaltrace = self.globaltrace_lt - self.localtrace = self.localtrace_count - else: - # Ahem -- do nothing? Okay. - self.donothing = 1 - - def run(self, cmd): - import __main__ - dict = __main__.__dict__ - self.runctx(cmd, dict, dict) - - def runctx(self, cmd, globals=None, locals=None): - if globals is None: globals = {} - if locals is None: locals = {} - if not self.donothing: - threading.settrace(self.globaltrace) - sys.settrace(self.globaltrace) - try: - exec(cmd, globals, locals) - finally: - if not self.donothing: - sys.settrace(None) - threading.settrace(None) - - def runfunc(self, func, /, *args, **kw): - result = None - if not self.donothing: - sys.settrace(self.globaltrace) - try: - result = func(*args, **kw) - finally: - if not self.donothing: - sys.settrace(None) - return result - - def file_module_function_of(self, frame): - code = frame.f_code - filename = code.co_filename - if filename: - modulename = _modname(filename) - else: - modulename = None - - funcname = code.co_name - clsname = None - if code in self._caller_cache: - if self._caller_cache[code] is not None: - clsname = self._caller_cache[code] - else: - self._caller_cache[code] = None - ## use of gc.get_referrers() was suggested by Michael Hudson - # all functions which refer to this code object - funcs = [f for f in gc.get_referrers(code) - if inspect.isfunction(f)] - # require len(func) == 1 to avoid ambiguity caused by calls to - # new.function(): "In the face of ambiguity, refuse the - # temptation to guess." - if len(funcs) == 1: - dicts = [d for d in gc.get_referrers(funcs[0]) - if isinstance(d, dict)] - if len(dicts) == 1: - classes = [c for c in gc.get_referrers(dicts[0]) - if hasattr(c, "__bases__")] - if len(classes) == 1: - # ditto for new.classobj() - clsname = classes[0].__name__ - # cache the result - assumption is that new.* is - # not called later to disturb this relationship - # _caller_cache could be flushed if functions in - # the new module get called. - self._caller_cache[code] = clsname - if clsname is not None: - funcname = "%s.%s" % (clsname, funcname) - - return filename, modulename, funcname - - def globaltrace_trackcallers(self, frame, why, arg): - """Handler for call events. - - Adds information about who called who to the self._callers dict. - """ - if why == 'call': - # XXX Should do a better job of identifying methods - this_func = self.file_module_function_of(frame) - parent_func = self.file_module_function_of(frame.f_back) - self._callers[(parent_func, this_func)] = 1 - - def globaltrace_countfuncs(self, frame, why, arg): - """Handler for call events. - - Adds (filename, modulename, funcname) to the self._calledfuncs dict. - """ - if why == 'call': - this_func = self.file_module_function_of(frame) - self._calledfuncs[this_func] = 1 - - def globaltrace_lt(self, frame, why, arg): - """Handler for call events. - - If the code block being entered is to be ignored, returns `None', - else returns self.localtrace. - """ - if why == 'call': - code = frame.f_code - filename = frame.f_globals.get('__file__', None) - if filename: - # XXX _modname() doesn't work right for packages, so - # the ignore support won't work right for packages - modulename = _modname(filename) - if modulename is not None: - ignore_it = self.ignore.names(filename, modulename) - if not ignore_it: - if self.trace: - print((" --- modulename: %s, funcname: %s" - % (modulename, code.co_name))) - return self.localtrace - else: - return None - - def localtrace_trace_and_count(self, frame, why, arg): - if why == "line": - # record the file name and line number of every trace - filename = frame.f_code.co_filename - lineno = frame.f_lineno - key = filename, lineno - self.counts[key] = self.counts.get(key, 0) + 1 - - if self.start_time: - print('%.2f' % (_time() - self.start_time), end=' ') - bname = os.path.basename(filename) - line = linecache.getline(filename, lineno) - print("%s(%d)" % (bname, lineno), end='') - if line: - print(": ", line, end='') - else: - print() - return self.localtrace - - def localtrace_trace(self, frame, why, arg): - if why == "line": - # record the file name and line number of every trace - filename = frame.f_code.co_filename - lineno = frame.f_lineno - - if self.start_time: - print('%.2f' % (_time() - self.start_time), end=' ') - bname = os.path.basename(filename) - line = linecache.getline(filename, lineno) - print("%s(%d)" % (bname, lineno), end='') - if line: - print(": ", line, end='') - else: - print() - return self.localtrace - - def localtrace_count(self, frame, why, arg): - if why == "line": - filename = frame.f_code.co_filename - lineno = frame.f_lineno - key = filename, lineno - self.counts[key] = self.counts.get(key, 0) + 1 - return self.localtrace - - def results(self): - return CoverageResults(self.counts, infile=self.infile, - outfile=self.outfile, - calledfuncs=self._calledfuncs, - callers=self._callers) - -def main(): - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument('--version', action='version', version='trace 2.0') - - grp = parser.add_argument_group('Main options', - 'One of these (or --report) must be given') - - grp.add_argument('-c', '--count', action='store_true', - help='Count the number of times each line is executed and write ' - 'the counts to .cover for each module executed, in ' - 'the module\'s directory. See also --coverdir, --file, ' - '--no-report below.') - grp.add_argument('-t', '--trace', action='store_true', - help='Print each line to sys.stdout before it is executed') - grp.add_argument('-l', '--listfuncs', action='store_true', - help='Keep track of which functions are executed at least once ' - 'and write the results to sys.stdout after the program exits. ' - 'Cannot be specified alongside --trace or --count.') - grp.add_argument('-T', '--trackcalls', action='store_true', - help='Keep track of caller/called pairs and write the results to ' - 'sys.stdout after the program exits.') - - grp = parser.add_argument_group('Modifiers') - - _grp = grp.add_mutually_exclusive_group() - _grp.add_argument('-r', '--report', action='store_true', - help='Generate a report from a counts file; does not execute any ' - 'code. --file must specify the results file to read, which ' - 'must have been created in a previous run with --count ' - '--file=FILE') - _grp.add_argument('-R', '--no-report', action='store_true', - help='Do not generate the coverage report files. ' - 'Useful if you want to accumulate over several runs.') - - grp.add_argument('-f', '--file', - help='File to accumulate counts over several runs') - grp.add_argument('-C', '--coverdir', - help='Directory where the report files go. The coverage report ' - 'for . will be written to file ' - '//.cover') - grp.add_argument('-m', '--missing', action='store_true', - help='Annotate executable lines that were not executed with ' - '">>>>>> "') - grp.add_argument('-s', '--summary', action='store_true', - help='Write a brief summary for each file to sys.stdout. ' - 'Can only be used with --count or --report') - grp.add_argument('-g', '--timing', action='store_true', - help='Prefix each line with the time since the program started. ' - 'Only used while tracing') - - grp = parser.add_argument_group('Filters', - 'Can be specified multiple times') - grp.add_argument('--ignore-module', action='append', default=[], - help='Ignore the given module(s) and its submodules ' - '(if it is a package). Accepts comma separated list of ' - 'module names.') - grp.add_argument('--ignore-dir', action='append', default=[], - help='Ignore files in the given directory ' - '(multiple directories can be joined by os.pathsep).') - - parser.add_argument('--module', action='store_true', default=False, - help='Trace a module. ') - parser.add_argument('progname', nargs='?', - help='file to run as main program') - parser.add_argument('arguments', nargs=argparse.REMAINDER, - help='arguments to the program') - - opts = parser.parse_args() - - if opts.ignore_dir: - _prefix = sysconfig.get_path("stdlib") - _exec_prefix = sysconfig.get_path("platstdlib") - - def parse_ignore_dir(s): - s = os.path.expanduser(os.path.expandvars(s)) - s = s.replace('$prefix', _prefix).replace('$exec_prefix', _exec_prefix) - return os.path.normpath(s) - - opts.ignore_module = [mod.strip() - for i in opts.ignore_module for mod in i.split(',')] - opts.ignore_dir = [parse_ignore_dir(s) - for i in opts.ignore_dir for s in i.split(os.pathsep)] - - if opts.report: - if not opts.file: - parser.error('-r/--report requires -f/--file') - results = CoverageResults(infile=opts.file, outfile=opts.file) - return results.write_results(opts.missing, opts.summary, opts.coverdir) - - if not any([opts.trace, opts.count, opts.listfuncs, opts.trackcalls]): - parser.error('must specify one of --trace, --count, --report, ' - '--listfuncs, or --trackcalls') - - if opts.listfuncs and (opts.count or opts.trace): - parser.error('cannot specify both --listfuncs and (--trace or --count)') - - if opts.summary and not opts.count: - parser.error('--summary can only be used with --count or --report') - - if opts.progname is None: - parser.error('progname is missing: required with the main options') - - t = Trace(opts.count, opts.trace, countfuncs=opts.listfuncs, - countcallers=opts.trackcalls, ignoremods=opts.ignore_module, - ignoredirs=opts.ignore_dir, infile=opts.file, - outfile=opts.file, timing=opts.timing) - try: - if opts.module: - import runpy - module_name = opts.progname - mod_name, mod_spec, code = runpy._get_module_details(module_name) - sys.argv = [code.co_filename, *opts.arguments] - globs = { - '__name__': '__main__', - '__file__': code.co_filename, - '__package__': mod_spec.parent, - '__loader__': mod_spec.loader, - '__spec__': mod_spec, - '__cached__': None, - } - else: - sys.argv = [opts.progname, *opts.arguments] - sys.path[0] = os.path.dirname(opts.progname) - - with io.open_code(opts.progname) as fp: - code = compile(fp.read(), opts.progname, 'exec') - # try to emulate __main__ namespace as much as possible - globs = { - '__file__': opts.progname, - '__name__': '__main__', - '__package__': None, - '__cached__': None, - } - t.runctx(code, globs, globs) - except OSError as err: - sys.exit("Cannot run file %r because: %s" % (sys.argv[0], err)) - except SystemExit: - pass - - results = t.results() - - if not opts.no_report: - results.write_results(opts.missing, opts.summary, opts.coverdir) - -if __name__=='__main__': - main() diff --git a/Python313_13_x86_Template/Lib/traceback.py b/Python313_13_x86_Template/Lib/traceback.py deleted file mode 100644 index b412954b..00000000 --- a/Python313_13_x86_Template/Lib/traceback.py +++ /dev/null @@ -1,1640 +0,0 @@ -"""Extract, format and print information about Python stack traces.""" - -import collections.abc -import itertools -import linecache -import sys -import textwrap -import warnings -from contextlib import suppress -import _colorize -from _colorize import ANSIColors - -__all__ = ['extract_stack', 'extract_tb', 'format_exception', - 'format_exception_only', 'format_list', 'format_stack', - 'format_tb', 'print_exc', 'format_exc', 'print_exception', - 'print_last', 'print_stack', 'print_tb', 'clear_frames', - 'FrameSummary', 'StackSummary', 'TracebackException', - 'walk_stack', 'walk_tb'] - -# -# Formatting and printing lists of traceback lines. -# - - -def print_list(extracted_list, file=None): - """Print the list of tuples as returned by extract_tb() or - extract_stack() as a formatted stack trace to the given file.""" - if file is None: - file = sys.stderr - for item in StackSummary.from_list(extracted_list).format(): - print(item, file=file, end="") - -def format_list(extracted_list): - """Format a list of tuples or FrameSummary objects for printing. - - Given a list of tuples or FrameSummary objects as returned by - extract_tb() or extract_stack(), return a list of strings ready - for printing. - - Each string in the resulting list corresponds to the item with the - same index in the argument list. Each string ends in a newline; - the strings may contain internal newlines as well, for those items - whose source text line is not None. - """ - return StackSummary.from_list(extracted_list).format() - -# -# Printing and Extracting Tracebacks. -# - -def print_tb(tb, limit=None, file=None): - """Print up to 'limit' stack trace entries from the traceback 'tb'. - - If 'limit' is omitted or None, all entries are printed. If 'file' - is omitted or None, the output goes to sys.stderr; otherwise - 'file' should be an open file or file-like object with a write() - method. - """ - print_list(extract_tb(tb, limit=limit), file=file) - -def format_tb(tb, limit=None): - """A shorthand for 'format_list(extract_tb(tb, limit))'.""" - return extract_tb(tb, limit=limit).format() - -def extract_tb(tb, limit=None): - """ - Return a StackSummary object representing a list of - pre-processed entries from traceback. - - This is useful for alternate formatting of stack traces. If - 'limit' is omitted or None, all entries are extracted. A - pre-processed stack trace entry is a FrameSummary object - containing attributes filename, lineno, name, and line - representing the information that is usually printed for a stack - trace. The line is a string with leading and trailing - whitespace stripped; if the source is not available it is None. - """ - return StackSummary._extract_from_extended_frame_gen( - _walk_tb_with_full_positions(tb), limit=limit) - -# -# Exception formatting and output. -# - -_cause_message = ( - "\nThe above exception was the direct cause " - "of the following exception:\n\n") - -_context_message = ( - "\nDuring handling of the above exception, " - "another exception occurred:\n\n") - - -class _Sentinel: - def __repr__(self): - return "" - -_sentinel = _Sentinel() - -def _parse_value_tb(exc, value, tb): - if (value is _sentinel) != (tb is _sentinel): - raise ValueError("Both or neither of value and tb must be given") - if value is tb is _sentinel: - if exc is not None: - if isinstance(exc, BaseException): - return exc, exc.__traceback__ - - raise TypeError(f'Exception expected for value, ' - f'{type(exc).__name__} found') - else: - return None, None - return value, tb - - -def print_exception(exc, /, value=_sentinel, tb=_sentinel, limit=None, \ - file=None, chain=True, **kwargs): - """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. - - This differs from print_tb() in the following ways: (1) if - traceback is not None, it prints a header "Traceback (most recent - call last):"; (2) it prints the exception type and value after the - stack trace; (3) if type is SyntaxError and value has the - appropriate format, it prints the line where the syntax error - occurred with a caret on the next line indicating the approximate - position of the error. - """ - colorize = kwargs.get("colorize", False) - value, tb = _parse_value_tb(exc, value, tb) - te = TracebackException(type(value), value, tb, limit=limit, compact=True) - te.print(file=file, chain=chain, colorize=colorize) - - -BUILTIN_EXCEPTION_LIMIT = object() - - -def _print_exception_bltin(exc, /): - file = sys.stderr if sys.stderr is not None else sys.__stderr__ - colorize = _colorize.can_colorize(file=file) - return print_exception(exc, limit=BUILTIN_EXCEPTION_LIMIT, file=file, colorize=colorize) - - -def format_exception(exc, /, value=_sentinel, tb=_sentinel, limit=None, \ - chain=True, **kwargs): - """Format a stack trace and the exception information. - - The arguments have the same meaning as the corresponding arguments - to print_exception(). The return value is a list of strings, each - ending in a newline and some containing internal newlines. When - these lines are concatenated and printed, exactly the same text is - printed as does print_exception(). - """ - colorize = kwargs.get("colorize", False) - value, tb = _parse_value_tb(exc, value, tb) - te = TracebackException(type(value), value, tb, limit=limit, compact=True) - return list(te.format(chain=chain, colorize=colorize)) - - -def format_exception_only(exc, /, value=_sentinel, *, show_group=False, **kwargs): - """Format the exception part of a traceback. - - The return value is a list of strings, each ending in a newline. - - The list contains the exception's message, which is - normally a single string; however, for :exc:`SyntaxError` exceptions, it - contains several lines that (when printed) display detailed information - about where the syntax error occurred. Following the message, the list - contains the exception's ``__notes__``. - - When *show_group* is ``True``, and the exception is an instance of - :exc:`BaseExceptionGroup`, the nested exceptions are included as - well, recursively, with indentation relative to their nesting depth. - """ - colorize = kwargs.get("colorize", False) - if value is _sentinel: - value = exc - te = TracebackException(type(value), value, None, compact=True) - return list(te.format_exception_only(show_group=show_group, colorize=colorize)) - - -# -- not official API but folk probably use these two functions. - -def _format_final_exc_line(etype, value, *, insert_final_newline=True, colorize=False): - valuestr = _safe_string(value, 'exception') - end_char = "\n" if insert_final_newline else "" - if colorize: - if value is None or not valuestr: - line = f"{ANSIColors.BOLD_MAGENTA}{etype}{ANSIColors.RESET}{end_char}" - else: - line = f"{ANSIColors.BOLD_MAGENTA}{etype}{ANSIColors.RESET}: {ANSIColors.MAGENTA}{valuestr}{ANSIColors.RESET}{end_char}" - else: - if value is None or not valuestr: - line = f"{etype}{end_char}" - else: - line = f"{etype}: {valuestr}{end_char}" - return line - - -def _safe_string(value, what, func=str): - try: - return func(value) - except: - return f'<{what} {func.__name__}() failed>' - -# -- - -def print_exc(limit=None, file=None, chain=True): - """Shorthand for 'print_exception(sys.exception(), limit=limit, file=file, chain=chain)'.""" - print_exception(sys.exception(), limit=limit, file=file, chain=chain) - -def format_exc(limit=None, chain=True): - """Like print_exc() but return a string.""" - return "".join(format_exception(sys.exception(), limit=limit, chain=chain)) - -def print_last(limit=None, file=None, chain=True): - """This is a shorthand for 'print_exception(sys.last_exc, limit=limit, file=file, chain=chain)'.""" - if not hasattr(sys, "last_exc") and not hasattr(sys, "last_type"): - raise ValueError("no last exception") - - if hasattr(sys, "last_exc"): - print_exception(sys.last_exc, limit=limit, file=file, chain=chain) - else: - print_exception(sys.last_type, sys.last_value, sys.last_traceback, - limit=limit, file=file, chain=chain) - - -# -# Printing and Extracting Stacks. -# - -def print_stack(f=None, limit=None, file=None): - """Print a stack trace from its invocation point. - - The optional 'f' argument can be used to specify an alternate - stack frame at which to start. The optional 'limit' and 'file' - arguments have the same meaning as for print_exception(). - """ - if f is None: - f = sys._getframe().f_back - print_list(extract_stack(f, limit=limit), file=file) - - -def format_stack(f=None, limit=None): - """Shorthand for 'format_list(extract_stack(f, limit))'.""" - if f is None: - f = sys._getframe().f_back - return format_list(extract_stack(f, limit=limit)) - - -def extract_stack(f=None, limit=None): - """Extract the raw traceback from the current stack frame. - - The return value has the same format as for extract_tb(). The - optional 'f' and 'limit' arguments have the same meaning as for - print_stack(). Each item in the list is a quadruple (filename, - line number, function name, text), and the entries are in order - from oldest to newest stack frame. - """ - if f is None: - f = sys._getframe().f_back - stack = StackSummary.extract(walk_stack(f), limit=limit) - stack.reverse() - return stack - - -def clear_frames(tb): - "Clear all references to local variables in the frames of a traceback." - while tb is not None: - try: - tb.tb_frame.clear() - except RuntimeError: - # Ignore the exception raised if the frame is still executing. - pass - tb = tb.tb_next - - -class FrameSummary: - """Information about a single frame from a traceback. - - - :attr:`filename` The filename for the frame. - - :attr:`lineno` The line within filename for the frame that was - active when the frame was captured. - - :attr:`name` The name of the function or method that was executing - when the frame was captured. - - :attr:`line` The text from the linecache module for the - of code that was running when the frame was captured. - - :attr:`locals` Either None if locals were not supplied, or a dict - mapping the name to the repr() of the variable. - """ - - __slots__ = ('filename', 'lineno', 'end_lineno', 'colno', 'end_colno', - 'name', '_lines', '_lines_dedented', 'locals', '_code') - - def __init__(self, filename, lineno, name, *, lookup_line=True, - locals=None, line=None, - end_lineno=None, colno=None, end_colno=None, **kwargs): - """Construct a FrameSummary. - - :param lookup_line: If True, `linecache` is consulted for the source - code line. Otherwise, the line will be looked up when first needed. - :param locals: If supplied the frame locals, which will be captured as - object representations. - :param line: If provided, use this instead of looking up the line in - the linecache. - """ - self.filename = filename - self.lineno = lineno - self.end_lineno = lineno if end_lineno is None else end_lineno - self.colno = colno - self.end_colno = end_colno - self.name = name - self._code = kwargs.get("_code") - self._lines = line - self._lines_dedented = None - if lookup_line: - self.line - self.locals = {k: _safe_string(v, 'local', func=repr) - for k, v in locals.items()} if locals else None - - def __eq__(self, other): - if isinstance(other, FrameSummary): - return (self.filename == other.filename and - self.lineno == other.lineno and - self.name == other.name and - self.locals == other.locals) - if isinstance(other, tuple): - return (self.filename, self.lineno, self.name, self.line) == other - return NotImplemented - - def __getitem__(self, pos): - return (self.filename, self.lineno, self.name, self.line)[pos] - - def __iter__(self): - return iter([self.filename, self.lineno, self.name, self.line]) - - def __repr__(self): - return "".format( - filename=self.filename, lineno=self.lineno, name=self.name) - - def __len__(self): - return 4 - - def _set_lines(self): - if ( - self._lines is None - and self.lineno is not None - and self.end_lineno is not None - ): - lines = [] - for lineno in range(self.lineno, self.end_lineno + 1): - # treat errors (empty string) and empty lines (newline) as the same - line = linecache.getline(self.filename, lineno).rstrip() - if not line and self._code is not None and self.filename.startswith("<"): - line = linecache._getline_from_code(self._code, lineno).rstrip() - lines.append(line) - self._lines = "\n".join(lines) + "\n" - - @property - def _original_lines(self): - # Returns the line as-is from the source, without modifying whitespace. - self._set_lines() - return self._lines - - @property - def _dedented_lines(self): - # Returns _original_lines, but dedented - self._set_lines() - if self._lines_dedented is None and self._lines is not None: - self._lines_dedented = textwrap.dedent(self._lines) - return self._lines_dedented - - @property - def line(self): - self._set_lines() - if self._lines is None: - return None - # return only the first line, stripped - return self._lines.partition("\n")[0].strip() - - -def walk_stack(f): - """Walk a stack yielding the frame and line number for each frame. - - This will follow f.f_back from the given frame. If no frame is given, the - current stack is used. Usually used with StackSummary.extract. - """ - if f is None: - f = sys._getframe().f_back.f_back.f_back.f_back - while f is not None: - yield f, f.f_lineno - f = f.f_back - - -def walk_tb(tb): - """Walk a traceback yielding the frame and line number for each frame. - - This will follow tb.tb_next (and thus is in the opposite order to - walk_stack). Usually used with StackSummary.extract. - """ - while tb is not None: - yield tb.tb_frame, tb.tb_lineno - tb = tb.tb_next - - -def _walk_tb_with_full_positions(tb): - # Internal version of walk_tb that yields full code positions including - # end line and column information. - while tb is not None: - positions = _get_code_position(tb.tb_frame.f_code, tb.tb_lasti) - # Yield tb_lineno when co_positions does not have a line number to - # maintain behavior with walk_tb. - if positions[0] is None: - yield tb.tb_frame, (tb.tb_lineno, ) + positions[1:] - else: - yield tb.tb_frame, positions - tb = tb.tb_next - - -def _get_code_position(code, instruction_index): - if instruction_index < 0: - return (None, None, None, None) - positions_gen = code.co_positions() - return next(itertools.islice(positions_gen, instruction_index // 2, None)) - - -_RECURSIVE_CUTOFF = 3 # Also hardcoded in traceback.c. - - -class StackSummary(list): - """A list of FrameSummary objects, representing a stack of frames.""" - - @classmethod - def extract(klass, frame_gen, *, limit=None, lookup_lines=True, - capture_locals=False): - """Create a StackSummary from a traceback or stack object. - - :param frame_gen: A generator that yields (frame, lineno) tuples - whose summaries are to be included in the stack. - :param limit: None to include all frames or the number of frames to - include. - :param lookup_lines: If True, lookup lines for each frame immediately, - otherwise lookup is deferred until the frame is rendered. - :param capture_locals: If True, the local variables from each frame will - be captured as object representations into the FrameSummary. - """ - def extended_frame_gen(): - for f, lineno in frame_gen: - yield f, (lineno, None, None, None) - - return klass._extract_from_extended_frame_gen( - extended_frame_gen(), limit=limit, lookup_lines=lookup_lines, - capture_locals=capture_locals) - - @classmethod - def _extract_from_extended_frame_gen(klass, frame_gen, *, limit=None, - lookup_lines=True, capture_locals=False): - # Same as extract but operates on a frame generator that yields - # (frame, (lineno, end_lineno, colno, end_colno)) in the stack. - # Only lineno is required, the remaining fields can be None if the - # information is not available. - builtin_limit = limit is BUILTIN_EXCEPTION_LIMIT - if limit is None or builtin_limit: - limit = getattr(sys, 'tracebacklimit', None) - if limit is not None and limit < 0: - limit = 0 - if limit is not None: - if builtin_limit: - frame_gen = tuple(frame_gen) - frame_gen = frame_gen[len(frame_gen) - limit:] - elif limit >= 0: - frame_gen = itertools.islice(frame_gen, limit) - else: - frame_gen = collections.deque(frame_gen, maxlen=-limit) - - result = klass() - fnames = set() - for f, (lineno, end_lineno, colno, end_colno) in frame_gen: - co = f.f_code - filename = co.co_filename - name = co.co_name - fnames.add(filename) - linecache.lazycache(filename, f.f_globals) - # Must defer line lookups until we have called checkcache. - if capture_locals: - f_locals = f.f_locals - else: - f_locals = None - result.append( - FrameSummary(filename, lineno, name, - lookup_line=False, locals=f_locals, - end_lineno=end_lineno, colno=colno, end_colno=end_colno, - _code=f.f_code, - ) - ) - for filename in fnames: - linecache.checkcache(filename) - - # If immediate lookup was desired, trigger lookups now. - if lookup_lines: - for f in result: - f.line - return result - - @classmethod - def from_list(klass, a_list): - """ - Create a StackSummary object from a supplied list of - FrameSummary objects or old-style list of tuples. - """ - # While doing a fast-path check for isinstance(a_list, StackSummary) is - # appealing, idlelib.run.cleanup_traceback and other similar code may - # break this by making arbitrary frames plain tuples, so we need to - # check on a frame by frame basis. - result = StackSummary() - for frame in a_list: - if isinstance(frame, FrameSummary): - result.append(frame) - else: - filename, lineno, name, line = frame - result.append(FrameSummary(filename, lineno, name, line=line)) - return result - - def format_frame_summary(self, frame_summary, **kwargs): - """Format the lines for a single FrameSummary. - - Returns a string representing one frame involved in the stack. This - gets called for every frame to be printed in the stack summary. - """ - colorize = kwargs.get("colorize", False) - row = [] - filename = frame_summary.filename - if frame_summary.filename.startswith("'): - filename = "" - if colorize: - row.append(' File {}"{}"{}, line {}{}{}, in {}{}{}\n'.format( - ANSIColors.MAGENTA, - filename, - ANSIColors.RESET, - ANSIColors.MAGENTA, - frame_summary.lineno, - ANSIColors.RESET, - ANSIColors.MAGENTA, - frame_summary.name, - ANSIColors.RESET, - ) - ) - else: - row.append(' File "{}", line {}, in {}\n'.format( - filename, frame_summary.lineno, frame_summary.name)) - if frame_summary._dedented_lines and frame_summary._dedented_lines.strip(): - if ( - frame_summary.colno is None or - frame_summary.end_colno is None - ): - # only output first line if column information is missing - row.append(textwrap.indent(frame_summary.line, ' ') + "\n") - else: - # get first and last line - all_lines_original = frame_summary._original_lines.splitlines() - first_line = all_lines_original[0] - # assume all_lines_original has enough lines (since we constructed it) - last_line = all_lines_original[frame_summary.end_lineno - frame_summary.lineno] - - # character index of the start/end of the instruction - start_offset = _byte_offset_to_character_offset(first_line, frame_summary.colno) - end_offset = _byte_offset_to_character_offset(last_line, frame_summary.end_colno) - - all_lines = frame_summary._dedented_lines.splitlines()[ - :frame_summary.end_lineno - frame_summary.lineno + 1 - ] - - # adjust start/end offset based on dedent - dedent_characters = len(first_line) - len(all_lines[0]) - start_offset = max(0, start_offset - dedent_characters) - end_offset = max(0, end_offset - dedent_characters) - - # When showing this on a terminal, some of the non-ASCII characters - # might be rendered as double-width characters, so we need to take - # that into account when calculating the length of the line. - dp_start_offset = _display_width(all_lines[0], offset=start_offset) - dp_end_offset = _display_width(all_lines[-1], offset=end_offset) - - # get exact code segment corresponding to the instruction - segment = "\n".join(all_lines) - segment = segment[start_offset:len(segment) - (len(all_lines[-1]) - end_offset)] - - # attempt to parse for anchors - anchors = None - show_carets = False - with suppress(Exception): - anchors = _extract_caret_anchors_from_line_segment(segment) - show_carets = self._should_show_carets(start_offset, end_offset, all_lines, anchors) - - result = [] - - # only display first line, last line, and lines around anchor start/end - significant_lines = {0, len(all_lines) - 1} - - anchors_left_end_offset = 0 - anchors_right_start_offset = 0 - primary_char = "^" - secondary_char = "^" - if anchors: - anchors_left_end_offset = anchors.left_end_offset - anchors_right_start_offset = anchors.right_start_offset - # computed anchor positions do not take start_offset into account, - # so account for it here - if anchors.left_end_lineno == 0: - anchors_left_end_offset += start_offset - if anchors.right_start_lineno == 0: - anchors_right_start_offset += start_offset - - # account for display width - anchors_left_end_offset = _display_width( - all_lines[anchors.left_end_lineno], offset=anchors_left_end_offset - ) - anchors_right_start_offset = _display_width( - all_lines[anchors.right_start_lineno], offset=anchors_right_start_offset - ) - - primary_char = anchors.primary_char - secondary_char = anchors.secondary_char - significant_lines.update( - range(anchors.left_end_lineno - 1, anchors.left_end_lineno + 2) - ) - significant_lines.update( - range(anchors.right_start_lineno - 1, anchors.right_start_lineno + 2) - ) - - # remove bad line numbers - significant_lines.discard(-1) - significant_lines.discard(len(all_lines)) - - def output_line(lineno): - """output all_lines[lineno] along with carets""" - result.append(all_lines[lineno] + "\n") - if not show_carets: - return - num_spaces = len(all_lines[lineno]) - len(all_lines[lineno].lstrip()) - carets = [] - num_carets = dp_end_offset if lineno == len(all_lines) - 1 else _display_width(all_lines[lineno]) - # compute caret character for each position - for col in range(num_carets): - if col < num_spaces or (lineno == 0 and col < dp_start_offset): - # before first non-ws char of the line, or before start of instruction - carets.append(' ') - elif anchors and ( - lineno > anchors.left_end_lineno or - (lineno == anchors.left_end_lineno and col >= anchors_left_end_offset) - ) and ( - lineno < anchors.right_start_lineno or - (lineno == anchors.right_start_lineno and col < anchors_right_start_offset) - ): - # within anchors - carets.append(secondary_char) - else: - carets.append(primary_char) - if colorize: - # Replace the previous line with a red version of it only in the parts covered - # by the carets. - line = result[-1] - colorized_line_parts = [] - colorized_carets_parts = [] - - for color, group in itertools.groupby(itertools.zip_longest(line, carets, fillvalue=""), key=lambda x: x[1]): - caret_group = list(group) - if color == "^": - colorized_line_parts.append(ANSIColors.BOLD_RED + "".join(char for char, _ in caret_group) + ANSIColors.RESET) - colorized_carets_parts.append(ANSIColors.BOLD_RED + "".join(caret for _, caret in caret_group) + ANSIColors.RESET) - elif color == "~": - colorized_line_parts.append(ANSIColors.RED + "".join(char for char, _ in caret_group) + ANSIColors.RESET) - colorized_carets_parts.append(ANSIColors.RED + "".join(caret for _, caret in caret_group) + ANSIColors.RESET) - else: - colorized_line_parts.append("".join(char for char, _ in caret_group)) - colorized_carets_parts.append("".join(caret for _, caret in caret_group)) - - colorized_line = "".join(colorized_line_parts) - colorized_carets = "".join(colorized_carets_parts) - result[-1] = colorized_line - result.append(colorized_carets + "\n") - else: - result.append("".join(carets) + "\n") - - # display significant lines - sig_lines_list = sorted(significant_lines) - for i, lineno in enumerate(sig_lines_list): - if i: - linediff = lineno - sig_lines_list[i - 1] - if linediff == 2: - # 1 line in between - just output it - output_line(lineno - 1) - elif linediff > 2: - # > 1 line in between - abbreviate - result.append(f"...<{linediff - 1} lines>...\n") - output_line(lineno) - - row.append( - textwrap.indent(textwrap.dedent("".join(result)), ' ', lambda line: True) - ) - if frame_summary.locals: - for name, value in sorted(frame_summary.locals.items()): - row.append(' {name} = {value}\n'.format(name=name, value=value)) - - return ''.join(row) - - def _should_show_carets(self, start_offset, end_offset, all_lines, anchors): - with suppress(SyntaxError, ImportError): - import ast - tree = ast.parse('\n'.join(all_lines)) - if not tree.body: - return False - statement = tree.body[0] - value = None - def _spawns_full_line(value): - return ( - value.lineno == 1 - and value.end_lineno == len(all_lines) - and value.col_offset == start_offset - and value.end_col_offset == end_offset - ) - match statement: - case ast.Return(value=ast.Call()): - if isinstance(statement.value.func, ast.Name): - value = statement.value - case ast.Assign(value=ast.Call()): - if ( - len(statement.targets) == 1 and - isinstance(statement.targets[0], ast.Name) - ): - value = statement.value - if value is not None and _spawns_full_line(value): - return False - if anchors: - return True - if all_lines[0][:start_offset].lstrip() or all_lines[-1][end_offset:].rstrip(): - return True - return False - - def format(self, **kwargs): - """Format the stack ready for printing. - - Returns a list of strings ready for printing. Each string in the - resulting list corresponds to a single frame from the stack. - Each string ends in a newline; the strings may contain internal - newlines as well, for those items with source text lines. - - For long sequences of the same frame and line, the first few - repetitions are shown, followed by a summary line stating the exact - number of further repetitions. - """ - colorize = kwargs.get("colorize", False) - result = [] - last_file = None - last_line = None - last_name = None - count = 0 - for frame_summary in self: - formatted_frame = self.format_frame_summary(frame_summary, colorize=colorize) - if formatted_frame is None: - continue - if (last_file is None or last_file != frame_summary.filename or - last_line is None or last_line != frame_summary.lineno or - last_name is None or last_name != frame_summary.name): - if count > _RECURSIVE_CUTOFF: - count -= _RECURSIVE_CUTOFF - result.append( - f' [Previous line repeated {count} more ' - f'time{"s" if count > 1 else ""}]\n' - ) - last_file = frame_summary.filename - last_line = frame_summary.lineno - last_name = frame_summary.name - count = 0 - count += 1 - if count > _RECURSIVE_CUTOFF: - continue - result.append(formatted_frame) - - if count > _RECURSIVE_CUTOFF: - count -= _RECURSIVE_CUTOFF - result.append( - f' [Previous line repeated {count} more ' - f'time{"s" if count > 1 else ""}]\n' - ) - return result - - -def _byte_offset_to_character_offset(str, offset): - as_utf8 = str.encode('utf-8') - return len(as_utf8[:offset].decode("utf-8", errors="replace")) - - -_Anchors = collections.namedtuple( - "_Anchors", - [ - "left_end_lineno", - "left_end_offset", - "right_start_lineno", - "right_start_offset", - "primary_char", - "secondary_char", - ], - defaults=["~", "^"] -) - -def _extract_caret_anchors_from_line_segment(segment): - """ - Given source code `segment` corresponding to a FrameSummary, determine: - - for binary ops, the location of the binary op - - for indexing and function calls, the location of the brackets. - `segment` is expected to be a valid Python expression. - """ - import ast - - try: - # Without parentheses, `segment` is parsed as a statement. - # Binary ops, subscripts, and calls are expressions, so - # we can wrap them with parentheses to parse them as - # (possibly multi-line) expressions. - # e.g. if we try to highlight the addition in - # x = ( - # a + - # b - # ) - # then we would ast.parse - # a + - # b - # which is not a valid statement because of the newline. - # Adding brackets makes it a valid expression. - # ( - # a + - # b - # ) - # Line locations will be different than the original, - # which is taken into account later on. - tree = ast.parse(f"(\n{segment}\n)") - except SyntaxError: - return None - - if len(tree.body) != 1: - return None - - lines = segment.splitlines() - - def normalize(lineno, offset): - """Get character index given byte offset""" - return _byte_offset_to_character_offset(lines[lineno], offset) - - def next_valid_char(lineno, col): - """Gets the next valid character index in `lines`, if - the current location is not valid. Handles empty lines. - """ - while lineno < len(lines) and col >= len(lines[lineno]): - col = 0 - lineno += 1 - assert lineno < len(lines) and col < len(lines[lineno]) - return lineno, col - - def increment(lineno, col): - """Get the next valid character index in `lines`.""" - col += 1 - lineno, col = next_valid_char(lineno, col) - return lineno, col - - def nextline(lineno, col): - """Get the next valid character at least on the next line""" - col = 0 - lineno += 1 - lineno, col = next_valid_char(lineno, col) - return lineno, col - - def increment_until(lineno, col, stop): - """Get the next valid non-"\\#" character that satisfies the `stop` predicate""" - while True: - ch = lines[lineno][col] - if ch in "\\#": - lineno, col = nextline(lineno, col) - elif not stop(ch): - lineno, col = increment(lineno, col) - else: - break - return lineno, col - - def setup_positions(expr, force_valid=True): - """Get the lineno/col position of the end of `expr`. If `force_valid` is True, - forces the position to be a valid character (e.g. if the position is beyond the - end of the line, move to the next line) - """ - # -2 since end_lineno is 1-indexed and because we added an extra - # bracket + newline to `segment` when calling ast.parse - lineno = expr.end_lineno - 2 - col = normalize(lineno, expr.end_col_offset) - return next_valid_char(lineno, col) if force_valid else (lineno, col) - - statement = tree.body[0] - match statement: - case ast.Expr(expr): - match expr: - case ast.BinOp(): - # ast gives these locations for BinOp subexpressions - # ( left_expr ) + ( right_expr ) - # left^^^^^ right^^^^^ - lineno, col = setup_positions(expr.left) - - # First operator character is the first non-space/')' character - lineno, col = increment_until(lineno, col, lambda x: not x.isspace() and x != ')') - - # binary op is 1 or 2 characters long, on the same line, - # before the right subexpression - right_col = col + 1 - if ( - right_col < len(lines[lineno]) - and ( - # operator char should not be in the right subexpression - expr.right.lineno - 2 > lineno or - right_col < normalize(expr.right.lineno - 2, expr.right.col_offset) - ) - and not (ch := lines[lineno][right_col]).isspace() - and ch not in "\\#" - ): - right_col += 1 - - # right_col can be invalid since it is exclusive - return _Anchors(lineno, col, lineno, right_col) - case ast.Subscript(): - # ast gives these locations for value and slice subexpressions - # ( value_expr ) [ slice_expr ] - # value^^^^^ slice^^^^^ - # subscript^^^^^^^^^^^^^^^^^^^^ - - # find left bracket - left_lineno, left_col = setup_positions(expr.value) - left_lineno, left_col = increment_until(left_lineno, left_col, lambda x: x == '[') - # find right bracket (final character of expression) - right_lineno, right_col = setup_positions(expr, force_valid=False) - return _Anchors(left_lineno, left_col, right_lineno, right_col) - case ast.Call(): - # ast gives these locations for function call expressions - # ( func_expr ) (args, kwargs) - # func^^^^^ - # call^^^^^^^^^^^^^^^^^^^^^^^^ - - # find left bracket - left_lineno, left_col = setup_positions(expr.func) - left_lineno, left_col = increment_until(left_lineno, left_col, lambda x: x == '(') - # find right bracket (final character of expression) - right_lineno, right_col = setup_positions(expr, force_valid=False) - return _Anchors(left_lineno, left_col, right_lineno, right_col) - - return None - -_WIDE_CHAR_SPECIFIERS = "WF" - -def _display_width(line, offset=None): - """Calculate the extra amount of width space the given source - code segment might take if it were to be displayed on a fixed - width output device. Supports wide unicode characters and emojis.""" - - if offset is None: - offset = len(line) - - # Fast track for ASCII-only strings - if line.isascii(): - return offset - - import unicodedata - - return sum( - 2 if unicodedata.east_asian_width(char) in _WIDE_CHAR_SPECIFIERS else 1 - for char in line[:offset] - ) - - - -class _ExceptionPrintContext: - def __init__(self): - self.seen = set() - self.exception_group_depth = 0 - self.need_close = False - - def indent(self): - return ' ' * (2 * self.exception_group_depth) - - def emit(self, text_gen, margin_char=None): - if margin_char is None: - margin_char = '|' - indent_str = self.indent() - if self.exception_group_depth: - indent_str += margin_char + ' ' - - if isinstance(text_gen, str): - yield textwrap.indent(text_gen, indent_str, lambda line: True) - else: - for text in text_gen: - yield textwrap.indent(text, indent_str, lambda line: True) - - -class TracebackException: - """An exception ready for rendering. - - The traceback module captures enough attributes from the original exception - to this intermediary form to ensure that no references are held, while - still being able to fully print or format it. - - max_group_width and max_group_depth control the formatting of exception - groups. The depth refers to the nesting level of the group, and the width - refers to the size of a single exception group's exceptions array. The - formatted output is truncated when either limit is exceeded. - - Use `from_exception` to create TracebackException instances from exception - objects, or the constructor to create TracebackException instances from - individual components. - - - :attr:`__cause__` A TracebackException of the original *__cause__*. - - :attr:`__context__` A TracebackException of the original *__context__*. - - :attr:`exceptions` For exception groups - a list of TracebackException - instances for the nested *exceptions*. ``None`` for other exceptions. - - :attr:`__suppress_context__` The *__suppress_context__* value from the - original exception. - - :attr:`stack` A `StackSummary` representing the traceback. - - :attr:`exc_type` (deprecated) The class of the original traceback. - - :attr:`exc_type_str` String display of exc_type - - :attr:`filename` For syntax errors - the filename where the error - occurred. - - :attr:`lineno` For syntax errors - the linenumber where the error - occurred. - - :attr:`end_lineno` For syntax errors - the end linenumber where the error - occurred. Can be `None` if not present. - - :attr:`text` For syntax errors - the text where the error - occurred. - - :attr:`offset` For syntax errors - the offset into the text where the - error occurred. - - :attr:`end_offset` For syntax errors - the end offset into the text where - the error occurred. Can be `None` if not present. - - :attr:`msg` For syntax errors - the compiler error message. - """ - - def __init__(self, exc_type, exc_value, exc_traceback, *, limit=None, - lookup_lines=True, capture_locals=False, compact=False, - max_group_width=15, max_group_depth=10, save_exc_type=True, _seen=None): - # NB: we need to accept exc_traceback, exc_value, exc_traceback to - # permit backwards compat with the existing API, otherwise we - # need stub thunk objects just to glue it together. - # Handle loops in __cause__ or __context__. - is_recursive_call = _seen is not None - if _seen is None: - _seen = set() - _seen.add(id(exc_value)) - - self.max_group_width = max_group_width - self.max_group_depth = max_group_depth - - self.stack = StackSummary._extract_from_extended_frame_gen( - _walk_tb_with_full_positions(exc_traceback), - limit=limit, lookup_lines=lookup_lines, - capture_locals=capture_locals) - - self._exc_type = exc_type if save_exc_type else None - - # Capture now to permit freeing resources: only complication is in the - # unofficial API _format_final_exc_line - self._str = _safe_string(exc_value, 'exception') - try: - self.__notes__ = getattr(exc_value, '__notes__', None) - except Exception as e: - self.__notes__ = [ - f'Ignored error getting __notes__: {_safe_string(e, '__notes__', repr)}'] - - self._is_syntax_error = False - self._have_exc_type = exc_type is not None - if exc_type is not None: - self.exc_type_qualname = exc_type.__qualname__ - self.exc_type_module = exc_type.__module__ - else: - self.exc_type_qualname = None - self.exc_type_module = None - - if exc_type and issubclass(exc_type, SyntaxError): - # Handle SyntaxError's specially - self.filename = exc_value.filename - lno = exc_value.lineno - self.lineno = str(lno) if lno is not None else None - end_lno = exc_value.end_lineno - self.end_lineno = str(end_lno) if end_lno is not None else None - self.text = exc_value.text - self.offset = exc_value.offset - self.end_offset = exc_value.end_offset - self.msg = exc_value.msg - self._is_syntax_error = True - elif exc_type and issubclass(exc_type, ImportError) and \ - getattr(exc_value, "name_from", None) is not None: - wrong_name = getattr(exc_value, "name_from", None) - suggestion = _compute_suggestion_error(exc_value, exc_traceback, wrong_name) - if suggestion: - self._str += f". Did you mean: '{suggestion}'?" - elif exc_type and issubclass(exc_type, (NameError, AttributeError)) and \ - getattr(exc_value, "name", None) is not None: - wrong_name = getattr(exc_value, "name", None) - suggestion = _compute_suggestion_error(exc_value, exc_traceback, wrong_name) - if suggestion: - self._str += f". Did you mean: '{suggestion}'?" - if issubclass(exc_type, NameError): - wrong_name = getattr(exc_value, "name", None) - if wrong_name is not None and wrong_name in sys.stdlib_module_names: - if suggestion: - self._str += f" Or did you forget to import '{wrong_name}'?" - else: - self._str += f". Did you forget to import '{wrong_name}'?" - if lookup_lines: - self._load_lines() - self.__suppress_context__ = \ - exc_value.__suppress_context__ if exc_value is not None else False - - # Convert __cause__ and __context__ to `TracebackExceptions`s, use a - # queue to avoid recursion (only the top-level call gets _seen == None) - if not is_recursive_call: - queue = [(self, exc_value)] - while queue: - te, e = queue.pop() - if (e is not None and e.__cause__ is not None - and id(e.__cause__) not in _seen): - cause = TracebackException( - type(e.__cause__), - e.__cause__, - e.__cause__.__traceback__, - limit=limit, - lookup_lines=lookup_lines, - capture_locals=capture_locals, - max_group_width=max_group_width, - max_group_depth=max_group_depth, - _seen=_seen) - else: - cause = None - - if compact: - need_context = (cause is None and - e is not None and - not e.__suppress_context__) - else: - need_context = True - if (e is not None and e.__context__ is not None - and need_context and id(e.__context__) not in _seen): - context = TracebackException( - type(e.__context__), - e.__context__, - e.__context__.__traceback__, - limit=limit, - lookup_lines=lookup_lines, - capture_locals=capture_locals, - max_group_width=max_group_width, - max_group_depth=max_group_depth, - _seen=_seen) - else: - context = None - - if e is not None and isinstance(e, BaseExceptionGroup): - exceptions = [] - for exc in e.exceptions: - texc = TracebackException( - type(exc), - exc, - exc.__traceback__, - limit=limit, - lookup_lines=lookup_lines, - capture_locals=capture_locals, - max_group_width=max_group_width, - max_group_depth=max_group_depth, - _seen=_seen) - exceptions.append(texc) - else: - exceptions = None - - te.__cause__ = cause - te.__context__ = context - te.exceptions = exceptions - if cause: - queue.append((te.__cause__, e.__cause__)) - if context: - queue.append((te.__context__, e.__context__)) - if exceptions: - queue.extend(zip(te.exceptions, e.exceptions)) - - @classmethod - def from_exception(cls, exc, *args, **kwargs): - """Create a TracebackException from an exception.""" - return cls(type(exc), exc, exc.__traceback__, *args, **kwargs) - - @property - def exc_type(self): - warnings.warn('Deprecated in 3.13. Use exc_type_str instead.', - DeprecationWarning, stacklevel=2) - return self._exc_type - - @property - def exc_type_str(self): - if not self._have_exc_type: - return None - stype = self.exc_type_qualname - smod = self.exc_type_module - if smod not in ("__main__", "builtins"): - if not isinstance(smod, str): - smod = "" - stype = smod + '.' + stype - return stype - - def _load_lines(self): - """Private API. force all lines in the stack to be loaded.""" - for frame in self.stack: - frame.line - - def __eq__(self, other): - if isinstance(other, TracebackException): - return self.__dict__ == other.__dict__ - return NotImplemented - - def __str__(self): - return self._str - - def format_exception_only(self, *, show_group=False, _depth=0, **kwargs): - """Format the exception part of the traceback. - - The return value is a generator of strings, each ending in a newline. - - Generator yields the exception message. - For :exc:`SyntaxError` exceptions, it - also yields (before the exception message) - several lines that (when printed) - display detailed information about where the syntax error occurred. - Following the message, generator also yields - all the exception's ``__notes__``. - - When *show_group* is ``True``, and the exception is an instance of - :exc:`BaseExceptionGroup`, the nested exceptions are included as - well, recursively, with indentation relative to their nesting depth. - """ - colorize = kwargs.get("colorize", False) - - indent = 3 * _depth * ' ' - if not self._have_exc_type: - yield indent + _format_final_exc_line(None, self._str, colorize=colorize) - return - - stype = self.exc_type_str - if not self._is_syntax_error: - if _depth > 0: - # Nested exceptions needs correct handling of multiline messages. - formatted = _format_final_exc_line( - stype, self._str, insert_final_newline=False, colorize=colorize - ).split('\n') - yield from [ - indent + l + '\n' - for l in formatted - ] - else: - yield _format_final_exc_line(stype, self._str, colorize=colorize) - else: - yield from [indent + l for l in self._format_syntax_error(stype, colorize=colorize)] - - if ( - isinstance(self.__notes__, collections.abc.Sequence) - and not isinstance(self.__notes__, (str, bytes)) - ): - for note in self.__notes__: - note = _safe_string(note, 'note') - yield from [indent + l + '\n' for l in note.split('\n')] - elif self.__notes__ is not None: - yield indent + "{}\n".format(_safe_string(self.__notes__, '__notes__', func=repr)) - - if self.exceptions and show_group: - for ex in self.exceptions: - yield from ex.format_exception_only(show_group=show_group, _depth=_depth+1, colorize=colorize) - - def _format_syntax_error(self, stype, **kwargs): - """Format SyntaxError exceptions (internal helper).""" - # Show exactly where the problem was found. - colorize = kwargs.get("colorize", False) - filename_suffix = '' - if self.lineno is not None: - if colorize: - yield ' File {}"{}"{}, line {}{}{}\n'.format( - ANSIColors.MAGENTA, - self.filename or "", - ANSIColors.RESET, - ANSIColors.MAGENTA, - self.lineno, - ANSIColors.RESET, - ) - else: - yield ' File "{}", line {}\n'.format( - self.filename or "", self.lineno) - elif self.filename is not None: - filename_suffix = ' ({})'.format(self.filename) - - text = self.text - if isinstance(text, str): - # text = " foo\n" - # rtext = " foo" - # ltext = "foo" - rtext = text.rstrip('\n') - ltext = rtext.lstrip(' \n\f') - spaces = len(rtext) - len(ltext) - if self.offset is None: - yield ' {}\n'.format(ltext) - elif isinstance(self.offset, int): - offset = self.offset - if self.lineno == self.end_lineno: - end_offset = ( - self.end_offset - if ( - isinstance(self.end_offset, int) - and self.end_offset != 0 - ) - else offset - ) - else: - end_offset = len(rtext) + 1 - - if self.text and offset > len(self.text): - offset = len(rtext) + 1 - if self.text and end_offset > len(self.text): - end_offset = len(rtext) + 1 - if offset >= end_offset or end_offset < 0: - end_offset = offset + 1 - - # Convert 1-based column offset to 0-based index into stripped text - colno = offset - 1 - spaces - end_colno = end_offset - 1 - spaces - caretspace = ' ' - if colno >= 0: - # non-space whitespace (likes tabs) must be kept for alignment - caretspace = ((c if c.isspace() else ' ') for c in ltext[:colno]) - start_color = end_color = "" - if colorize: - # colorize from colno to end_colno - ltext = ( - ltext[:colno] + - ANSIColors.BOLD_RED + ltext[colno:end_colno] + ANSIColors.RESET + - ltext[end_colno:] - ) - start_color = ANSIColors.BOLD_RED - end_color = ANSIColors.RESET - yield ' {}\n'.format(ltext) - yield ' {}{}{}{}\n'.format( - "".join(caretspace), - start_color, - ('^' * (end_colno - colno)), - end_color, - ) - else: - yield ' {}\n'.format(ltext) - msg = self.msg or "" - if colorize: - yield "{}{}{}: {}{}{}{}\n".format( - ANSIColors.BOLD_MAGENTA, - stype, - ANSIColors.RESET, - ANSIColors.MAGENTA, - msg, - ANSIColors.RESET, - filename_suffix) - else: - yield "{}: {}{}\n".format(stype, msg, filename_suffix) - - def format(self, *, chain=True, _ctx=None, **kwargs): - """Format the exception. - - If chain is not *True*, *__cause__* and *__context__* will not be formatted. - - The return value is a generator of strings, each ending in a newline and - some containing internal newlines. `print_exception` is a wrapper around - this method which just prints the lines to a file. - - The message indicating which exception occurred is always the last - string in the output. - """ - colorize = kwargs.get("colorize", False) - if _ctx is None: - _ctx = _ExceptionPrintContext() - - output = [] - exc = self - if chain: - while exc: - if exc.__cause__ is not None: - chained_msg = _cause_message - chained_exc = exc.__cause__ - elif (exc.__context__ is not None and - not exc.__suppress_context__): - chained_msg = _context_message - chained_exc = exc.__context__ - else: - chained_msg = None - chained_exc = None - - output.append((chained_msg, exc)) - exc = chained_exc - else: - output.append((None, exc)) - - for msg, exc in reversed(output): - if msg is not None: - yield from _ctx.emit(msg) - if exc.exceptions is None: - if exc.stack: - yield from _ctx.emit('Traceback (most recent call last):\n') - yield from _ctx.emit(exc.stack.format(colorize=colorize)) - yield from _ctx.emit(exc.format_exception_only(colorize=colorize)) - elif _ctx.exception_group_depth > self.max_group_depth: - # exception group, but depth exceeds limit - yield from _ctx.emit( - f"... (max_group_depth is {self.max_group_depth})\n") - else: - # format exception group - is_toplevel = (_ctx.exception_group_depth == 0) - if is_toplevel: - _ctx.exception_group_depth += 1 - - if exc.stack: - yield from _ctx.emit( - 'Exception Group Traceback (most recent call last):\n', - margin_char = '+' if is_toplevel else None) - yield from _ctx.emit(exc.stack.format(colorize=colorize)) - - yield from _ctx.emit(exc.format_exception_only(colorize=colorize)) - num_excs = len(exc.exceptions) - if num_excs <= self.max_group_width: - n = num_excs - else: - n = self.max_group_width + 1 - _ctx.need_close = False - for i in range(n): - last_exc = (i == n-1) - if last_exc: - # The closing frame may be added by a recursive call - _ctx.need_close = True - - if self.max_group_width is not None: - truncated = (i >= self.max_group_width) - else: - truncated = False - title = f'{i+1}' if not truncated else '...' - yield (_ctx.indent() + - ('+-' if i==0 else ' ') + - f'+---------------- {title} ----------------\n') - _ctx.exception_group_depth += 1 - if not truncated: - yield from exc.exceptions[i].format(chain=chain, _ctx=_ctx, colorize=colorize) - else: - remaining = num_excs - self.max_group_width - plural = 's' if remaining > 1 else '' - yield from _ctx.emit( - f"and {remaining} more exception{plural}\n") - - if last_exc and _ctx.need_close: - yield (_ctx.indent() + - "+------------------------------------\n") - _ctx.need_close = False - _ctx.exception_group_depth -= 1 - - if is_toplevel: - assert _ctx.exception_group_depth == 1 - _ctx.exception_group_depth = 0 - - - def print(self, *, file=None, chain=True, **kwargs): - """Print the result of self.format(chain=chain) to 'file'.""" - colorize = kwargs.get("colorize", False) - if file is None: - file = sys.stderr - for line in self.format(chain=chain, colorize=colorize): - print(line, file=file, end="") - - -_MAX_CANDIDATE_ITEMS = 750 -_MAX_STRING_SIZE = 40 -_MOVE_COST = 2 -_CASE_COST = 1 - - -def _substitution_cost(ch_a, ch_b): - if ch_a == ch_b: - return 0 - if ch_a.lower() == ch_b.lower(): - return _CASE_COST - return _MOVE_COST - - -def _get_safe___dir__(obj): - # Use obj.__dir__() to avoid a TypeError when calling dir(obj). - # See gh-131001 and gh-139933. - try: - d = obj.__dir__() - except TypeError: # when obj is a class - d = type(obj).__dir__(obj) - return sorted(x for x in d if isinstance(x, str)) - - -def _compute_suggestion_error(exc_value, tb, wrong_name): - if wrong_name is None or not isinstance(wrong_name, str): - return None - if isinstance(exc_value, AttributeError): - obj = exc_value.obj - try: - d = _get_safe___dir__(obj) - hide_underscored = (wrong_name[:1] != '_') - if hide_underscored and tb is not None: - while tb.tb_next is not None: - tb = tb.tb_next - frame = tb.tb_frame - if 'self' in frame.f_locals and frame.f_locals['self'] is obj: - hide_underscored = False - if hide_underscored: - d = [x for x in d if x[:1] != '_'] - except Exception: - return None - elif isinstance(exc_value, ImportError): - try: - mod = __import__(exc_value.name) - d = _get_safe___dir__(mod) - if wrong_name[:1] != '_': - d = [x for x in d if x[:1] != '_'] - except Exception: - return None - else: - assert isinstance(exc_value, NameError) - # find most recent frame - if tb is None: - return None - while tb.tb_next is not None: - tb = tb.tb_next - frame = tb.tb_frame - d = ( - list(frame.f_locals) - + list(frame.f_globals) - + list(frame.f_builtins) - ) - d = [x for x in d if isinstance(x, str)] - - # Check first if we are in a method and the instance - # has the wrong name as attribute - if 'self' in frame.f_locals: - self = frame.f_locals['self'] - try: - has_wrong_name = hasattr(self, wrong_name) - except Exception: - has_wrong_name = False - if has_wrong_name: - return f"self.{wrong_name}" - - try: - import _suggestions - except ImportError: - pass - else: - return _suggestions._generate_suggestions(d, wrong_name) - - # Compute closest match - - if len(d) > _MAX_CANDIDATE_ITEMS: - return None - wrong_name_len = len(wrong_name) - if wrong_name_len > _MAX_STRING_SIZE: - return None - best_distance = wrong_name_len - suggestion = None - for possible_name in d: - if possible_name == wrong_name: - # A missing attribute is "found". Don't suggest it (see GH-88821). - continue - # No more than 1/3 of the involved characters should need changed. - max_distance = (len(possible_name) + wrong_name_len + 3) * _MOVE_COST // 6 - # Don't take matches we've already beaten. - max_distance = min(max_distance, best_distance - 1) - current_distance = _levenshtein_distance(wrong_name, possible_name, max_distance) - if current_distance > max_distance: - continue - if not suggestion or current_distance < best_distance: - suggestion = possible_name - best_distance = current_distance - return suggestion - - -def _levenshtein_distance(a, b, max_cost): - # A Python implementation of Python/suggestions.c:levenshtein_distance. - - # Both strings are the same - if a == b: - return 0 - - # Trim away common affixes - pre = 0 - while a[pre:] and b[pre:] and a[pre] == b[pre]: - pre += 1 - a = a[pre:] - b = b[pre:] - post = 0 - while a[:post or None] and b[:post or None] and a[post-1] == b[post-1]: - post -= 1 - a = a[:post or None] - b = b[:post or None] - if not a or not b: - return _MOVE_COST * (len(a) + len(b)) - if len(a) > _MAX_STRING_SIZE or len(b) > _MAX_STRING_SIZE: - return max_cost + 1 - - # Prefer shorter buffer - if len(b) < len(a): - a, b = b, a - - # Quick fail when a match is impossible - if (len(b) - len(a)) * _MOVE_COST > max_cost: - return max_cost + 1 - - # Instead of producing the whole traditional len(a)-by-len(b) - # matrix, we can update just one row in place. - # Initialize the buffer row - row = list(range(_MOVE_COST, _MOVE_COST * (len(a) + 1), _MOVE_COST)) - - result = 0 - for bindex in range(len(b)): - bchar = b[bindex] - distance = result = bindex * _MOVE_COST - minimum = sys.maxsize - for index in range(len(a)): - # 1) Previous distance in this row is cost(b[:b_index], a[:index]) - substitute = distance + _substitution_cost(bchar, a[index]) - # 2) cost(b[:b_index], a[:index+1]) from previous row - distance = row[index] - # 3) existing result is cost(b[:b_index+1], a[index]) - - insert_delete = min(result, distance) + _MOVE_COST - result = min(insert_delete, substitute) - - # cost(b[:b_index+1], a[:index+1]) - row[index] = result - if result < minimum: - minimum = result - if minimum > max_cost: - # Everything in this row is too big, so bail early. - return max_cost + 1 - return result diff --git a/Python313_13_x86_Template/Lib/turtle.py b/Python313_13_x86_Template/Lib/turtle.py deleted file mode 100644 index ff2002cc..00000000 --- a/Python313_13_x86_Template/Lib/turtle.py +++ /dev/null @@ -1,4199 +0,0 @@ -# -# turtle.py: a Tkinter based turtle graphics module for Python -# Version 1.1b - 4. 5. 2009 -# -# Copyright (C) 2006 - 2010 Gregor Lingl -# email: glingl@aon.at -# -# This software is provided 'as-is', without any express or implied -# warranty. In no event will the authors be held liable for any damages -# arising from the use of this software. -# -# Permission is granted to anyone to use this software for any purpose, -# including commercial applications, and to alter it and redistribute it -# freely, subject to the following restrictions: -# -# 1. The origin of this software must not be misrepresented; you must not -# claim that you wrote the original software. If you use this software -# in a product, an acknowledgment in the product documentation would be -# appreciated but is not required. -# 2. Altered source versions must be plainly marked as such, and must not be -# misrepresented as being the original software. -# 3. This notice may not be removed or altered from any source distribution. - -""" -Turtle graphics is a popular way for introducing programming to -kids. It was part of the original Logo programming language developed -by Wally Feurzig and Seymour Papert in 1966. - -Imagine a robotic turtle starting at (0, 0) in the x-y plane. After an ``import turtle``, give it -the command turtle.forward(15), and it moves (on-screen!) 15 pixels in -the direction it is facing, drawing a line as it moves. Give it the -command turtle.right(25), and it rotates in-place 25 degrees clockwise. - -By combining together these and similar commands, intricate shapes and -pictures can easily be drawn. - ------ turtle.py - -This module is an extended reimplementation of turtle.py from the -Python standard distribution up to Python 2.5. (See: https://www.python.org) - -It tries to keep the merits of turtle.py and to be (nearly) 100% -compatible with it. This means in the first place to enable the -learning programmer to use all the commands, classes and methods -interactively when using the module from within IDLE run with -the -n switch. - -Roughly it has the following features added: - -- Better animation of the turtle movements, especially of turning the - turtle. So the turtles can more easily be used as a visual feedback - instrument by the (beginning) programmer. - -- Different turtle shapes, gif-images as turtle shapes, user defined - and user controllable turtle shapes, among them compound - (multicolored) shapes. Turtle shapes can be stretched and tilted, which - makes turtles very versatile geometrical objects. - -- Fine control over turtle movement and screen updates via delay(), - and enhanced tracer() and speed() methods. - -- Aliases for the most commonly used commands, like fd for forward etc., - following the early Logo traditions. This reduces the boring work of - typing long sequences of commands, which often occur in a natural way - when kids try to program fancy pictures on their first encounter with - turtle graphics. - -- Turtles now have an undo()-method with configurable undo-buffer. - -- Some simple commands/methods for creating event driven programs - (mouse-, key-, timer-events). Especially useful for programming games. - -- A scrollable Canvas class. The default scrollable Canvas can be - extended interactively as needed while playing around with the turtle(s). - -- A TurtleScreen class with methods controlling background color or - background image, window and canvas size and other properties of the - TurtleScreen. - -- There is a method, setworldcoordinates(), to install a user defined - coordinate-system for the TurtleScreen. - -- The implementation uses a 2-vector class named Vec2D, derived from tuple. - This class is public, so it can be imported by the application programmer, - which makes certain types of computations very natural and compact. - -- Appearance of the TurtleScreen and the Turtles at startup/import can be - configured by means of a turtle.cfg configuration file. - The default configuration mimics the appearance of the old turtle module. - -- If configured appropriately the module reads in docstrings from a docstring - dictionary in some different language, supplied separately and replaces - the English ones by those read in. There is a utility function - write_docstringdict() to write a dictionary with the original (English) - docstrings to disc, so it can serve as a template for translations. - -Behind the scenes there are some features included with possible -extensions in mind. These will be commented and documented elsewhere. -""" - -import tkinter as TK -import types -import math -import time -import inspect -import sys - -from os.path import isfile, split, join -from copy import deepcopy -from tkinter import simpledialog - -_tg_classes = ['ScrolledCanvas', 'TurtleScreen', 'Screen', - 'RawTurtle', 'Turtle', 'RawPen', 'Pen', 'Shape', 'Vec2D'] -_tg_screen_functions = ['addshape', 'bgcolor', 'bgpic', 'bye', - 'clearscreen', 'colormode', 'delay', 'exitonclick', 'getcanvas', - 'getshapes', 'listen', 'mainloop', 'mode', 'numinput', - 'onkey', 'onkeypress', 'onkeyrelease', 'onscreenclick', 'ontimer', - 'register_shape', 'resetscreen', 'screensize', 'setup', - 'setworldcoordinates', 'textinput', 'title', 'tracer', 'turtles', 'update', - 'window_height', 'window_width'] -_tg_turtle_functions = ['back', 'backward', 'begin_fill', 'begin_poly', 'bk', - 'circle', 'clear', 'clearstamp', 'clearstamps', 'clone', 'color', - 'degrees', 'distance', 'dot', 'down', 'end_fill', 'end_poly', 'fd', - 'fillcolor', 'filling', 'forward', 'get_poly', 'getpen', 'getscreen', 'get_shapepoly', - 'getturtle', 'goto', 'heading', 'hideturtle', 'home', 'ht', 'isdown', - 'isvisible', 'left', 'lt', 'onclick', 'ondrag', 'onrelease', 'pd', - 'pen', 'pencolor', 'pendown', 'pensize', 'penup', 'pos', 'position', - 'pu', 'radians', 'right', 'reset', 'resizemode', 'rt', - 'seth', 'setheading', 'setpos', 'setposition', - 'setundobuffer', 'setx', 'sety', 'shape', 'shapesize', 'shapetransform', 'shearfactor', 'showturtle', - 'speed', 'st', 'stamp', 'teleport', 'tilt', 'tiltangle', 'towards', - 'turtlesize', 'undo', 'undobufferentries', 'up', 'width', - 'write', 'xcor', 'ycor'] -_tg_utilities = ['write_docstringdict', 'done'] - -__all__ = (_tg_classes + _tg_screen_functions + _tg_turtle_functions + - _tg_utilities + ['Terminator']) - -_alias_list = ['addshape', 'backward', 'bk', 'fd', 'ht', 'lt', 'pd', 'pos', - 'pu', 'rt', 'seth', 'setpos', 'setposition', 'st', - 'turtlesize', 'up', 'width'] - -_CFG = {"width" : 0.5, # Screen - "height" : 0.75, - "canvwidth" : 400, - "canvheight": 300, - "leftright": None, - "topbottom": None, - "mode": "standard", # TurtleScreen - "colormode": 1.0, - "delay": 10, - "undobuffersize": 1000, # RawTurtle - "shape": "classic", - "pencolor" : "black", - "fillcolor" : "black", - "resizemode" : "noresize", - "visible" : True, - "language": "english", # docstrings - "exampleturtle": "turtle", - "examplescreen": "screen", - "title": "Python Turtle Graphics", - "using_IDLE": False - } - -def config_dict(filename): - """Convert content of config-file into dictionary.""" - with open(filename, "r") as f: - cfglines = f.readlines() - cfgdict = {} - for line in cfglines: - line = line.strip() - if not line or line.startswith("#"): - continue - try: - key, value = line.split("=") - except ValueError: - print("Bad line in config-file %s:\n%s" % (filename,line)) - continue - key = key.strip() - value = value.strip() - if value in ["True", "False", "None", "''", '""']: - value = eval(value) - else: - try: - if "." in value: - value = float(value) - else: - value = int(value) - except ValueError: - pass # value need not be converted - cfgdict[key] = value - return cfgdict - -def readconfig(cfgdict): - """Read config-files, change configuration-dict accordingly. - - If there is a turtle.cfg file in the current working directory, - read it from there. If this contains an importconfig-value, - say 'myway', construct filename turtle_mayway.cfg else use - turtle.cfg and read it from the import-directory, where - turtle.py is located. - Update configuration dictionary first according to config-file, - in the import directory, then according to config-file in the - current working directory. - If no config-file is found, the default configuration is used. - """ - default_cfg = "turtle.cfg" - cfgdict1 = {} - cfgdict2 = {} - if isfile(default_cfg): - cfgdict1 = config_dict(default_cfg) - if "importconfig" in cfgdict1: - default_cfg = "turtle_%s.cfg" % cfgdict1["importconfig"] - try: - head, tail = split(__file__) - cfg_file2 = join(head, default_cfg) - except Exception: - cfg_file2 = "" - if isfile(cfg_file2): - cfgdict2 = config_dict(cfg_file2) - _CFG.update(cfgdict2) - _CFG.update(cfgdict1) - -try: - readconfig(_CFG) -except Exception: - print ("No configfile read, reason unknown") - - -class Vec2D(tuple): - """A 2 dimensional vector class, used as a helper class - for implementing turtle graphics. - May be useful for turtle graphics programs also. - Derived from tuple, so a vector is a tuple! - - Provides (for a, b vectors, k number): - a+b vector addition - a-b vector subtraction - a*b inner product - k*a and a*k multiplication with scalar - |a| absolute value of a - a.rotate(angle) rotation - """ - def __new__(cls, x, y): - return tuple.__new__(cls, (x, y)) - def __add__(self, other): - return Vec2D(self[0]+other[0], self[1]+other[1]) - def __mul__(self, other): - if isinstance(other, Vec2D): - return self[0]*other[0]+self[1]*other[1] - return Vec2D(self[0]*other, self[1]*other) - def __rmul__(self, other): - if isinstance(other, int) or isinstance(other, float): - return Vec2D(self[0]*other, self[1]*other) - return NotImplemented - def __sub__(self, other): - return Vec2D(self[0]-other[0], self[1]-other[1]) - def __neg__(self): - return Vec2D(-self[0], -self[1]) - def __abs__(self): - return math.hypot(*self) - def rotate(self, angle): - """rotate self counterclockwise by angle - """ - perp = Vec2D(-self[1], self[0]) - angle = math.radians(angle) - c, s = math.cos(angle), math.sin(angle) - return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s) - def __getnewargs__(self): - return (self[0], self[1]) - def __repr__(self): - return "(%.2f,%.2f)" % self - - -############################################################################## -### From here up to line : Tkinter - Interface for turtle.py ### -### May be replaced by an interface to some different graphics toolkit ### -############################################################################## - -## helper functions for Scrolled Canvas, to forward Canvas-methods -## to ScrolledCanvas class - -def __methodDict(cls, _dict): - """helper function for Scrolled Canvas""" - baseList = list(cls.__bases__) - baseList.reverse() - for _super in baseList: - __methodDict(_super, _dict) - for key, value in cls.__dict__.items(): - if type(value) == types.FunctionType: - _dict[key] = value - -def __methods(cls): - """helper function for Scrolled Canvas""" - _dict = {} - __methodDict(cls, _dict) - return _dict.keys() - -__stringBody = ( - 'def %(method)s(self, *args, **kw): return ' + - 'self.%(attribute)s.%(method)s(*args, **kw)') - -def __forwardmethods(fromClass, toClass, toPart, exclude = ()): - ### MANY CHANGES ### - _dict_1 = {} - __methodDict(toClass, _dict_1) - _dict = {} - mfc = __methods(fromClass) - for ex in _dict_1.keys(): - if ex[:1] == '_' or ex[-1:] == '_' or ex in exclude or ex in mfc: - pass - else: - _dict[ex] = _dict_1[ex] - - for method, func in _dict.items(): - d = {'method': method, 'func': func} - if isinstance(toPart, str): - execString = \ - __stringBody % {'method' : method, 'attribute' : toPart} - exec(execString, d) - setattr(fromClass, method, d[method]) ### NEWU! - - -class ScrolledCanvas(TK.Frame): - """Modeled after the scrolled canvas class from Grayons's Tkinter book. - - Used as the default canvas, which pops up automatically when - using turtle graphics functions or the Turtle class. - """ - def __init__(self, master, width=500, height=350, - canvwidth=600, canvheight=500): - TK.Frame.__init__(self, master, width=width, height=height) - self._rootwindow = self.winfo_toplevel() - self.width, self.height = width, height - self.canvwidth, self.canvheight = canvwidth, canvheight - self.bg = "white" - self._canvas = TK.Canvas(master, width=width, height=height, - bg=self.bg, relief=TK.SUNKEN, borderwidth=2) - self.hscroll = TK.Scrollbar(master, command=self._canvas.xview, - orient=TK.HORIZONTAL) - self.vscroll = TK.Scrollbar(master, command=self._canvas.yview) - self._canvas.configure(xscrollcommand=self.hscroll.set, - yscrollcommand=self.vscroll.set) - self.rowconfigure(0, weight=1, minsize=0) - self.columnconfigure(0, weight=1, minsize=0) - self._canvas.grid(padx=1, in_ = self, pady=1, row=0, - column=0, rowspan=1, columnspan=1, sticky='news') - self.vscroll.grid(padx=1, in_ = self, pady=1, row=0, - column=1, rowspan=1, columnspan=1, sticky='news') - self.hscroll.grid(padx=1, in_ = self, pady=1, row=1, - column=0, rowspan=1, columnspan=1, sticky='news') - self.reset() - self._rootwindow.bind('', self.onResize) - - def reset(self, canvwidth=None, canvheight=None, bg = None): - """Adjust canvas and scrollbars according to given canvas size.""" - if canvwidth: - self.canvwidth = canvwidth - if canvheight: - self.canvheight = canvheight - if bg: - self.bg = bg - self._canvas.config(bg=bg, - scrollregion=(-self.canvwidth//2, -self.canvheight//2, - self.canvwidth//2, self.canvheight//2)) - self._canvas.xview_moveto(0.5*(self.canvwidth - self.width + 30) / - self.canvwidth) - self._canvas.yview_moveto(0.5*(self.canvheight- self.height + 30) / - self.canvheight) - self.adjustScrolls() - - - def adjustScrolls(self): - """ Adjust scrollbars according to window- and canvas-size. - """ - cwidth = self._canvas.winfo_width() - cheight = self._canvas.winfo_height() - self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth) - self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight) - if cwidth < self.canvwidth or cheight < self.canvheight: - self.hscroll.grid(padx=1, in_ = self, pady=1, row=1, - column=0, rowspan=1, columnspan=1, sticky='news') - self.vscroll.grid(padx=1, in_ = self, pady=1, row=0, - column=1, rowspan=1, columnspan=1, sticky='news') - else: - self.hscroll.grid_forget() - self.vscroll.grid_forget() - - def onResize(self, event): - """self-explanatory""" - self.adjustScrolls() - - def bbox(self, *args): - """ 'forward' method, which canvas itself has inherited... - """ - return self._canvas.bbox(*args) - - def cget(self, *args, **kwargs): - """ 'forward' method, which canvas itself has inherited... - """ - return self._canvas.cget(*args, **kwargs) - - def config(self, *args, **kwargs): - """ 'forward' method, which canvas itself has inherited... - """ - self._canvas.config(*args, **kwargs) - - def bind(self, *args, **kwargs): - """ 'forward' method, which canvas itself has inherited... - """ - self._canvas.bind(*args, **kwargs) - - def unbind(self, *args, **kwargs): - """ 'forward' method, which canvas itself has inherited... - """ - self._canvas.unbind(*args, **kwargs) - - def focus_force(self): - """ 'forward' method, which canvas itself has inherited... - """ - self._canvas.focus_force() - -__forwardmethods(ScrolledCanvas, TK.Canvas, '_canvas') - - -class _Root(TK.Tk): - """Root class for Screen based on Tkinter.""" - def __init__(self): - TK.Tk.__init__(self) - - def setupcanvas(self, width, height, cwidth, cheight): - self._canvas = ScrolledCanvas(self, width, height, cwidth, cheight) - self._canvas.pack(expand=1, fill="both") - - def _getcanvas(self): - return self._canvas - - def set_geometry(self, width, height, startx, starty): - self.geometry("%dx%d%+d%+d"%(width, height, startx, starty)) - - def ondestroy(self, destroy): - self.wm_protocol("WM_DELETE_WINDOW", destroy) - - def win_width(self): - return self.winfo_screenwidth() - - def win_height(self): - return self.winfo_screenheight() - -Canvas = TK.Canvas - - -class TurtleScreenBase(object): - """Provide the basic graphics functionality. - Interface between Tkinter and turtle.py. - - To port turtle.py to some different graphics toolkit - a corresponding TurtleScreenBase class has to be implemented. - """ - - def _blankimage(self): - """return a blank image object - """ - img = TK.PhotoImage(width=1, height=1, master=self.cv) - img.blank() - return img - - def _image(self, filename): - """return an image object containing the - imagedata from a gif-file named filename. - """ - return TK.PhotoImage(file=filename, master=self.cv) - - def __init__(self, cv): - self.cv = cv - if isinstance(cv, ScrolledCanvas): - w = self.cv.canvwidth - h = self.cv.canvheight - else: # expected: ordinary TK.Canvas - w = int(self.cv.cget("width")) - h = int(self.cv.cget("height")) - self.cv.config(scrollregion = (-w//2, -h//2, w//2, h//2 )) - self.canvwidth = w - self.canvheight = h - self.xscale = self.yscale = 1.0 - - def _createpoly(self): - """Create an invisible polygon item on canvas self.cv) - """ - return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill="", outline="") - - def _drawpoly(self, polyitem, coordlist, fill=None, - outline=None, width=None, top=False): - """Configure polygonitem polyitem according to provided - arguments: - coordlist is sequence of coordinates - fill is filling color - outline is outline color - top is a boolean value, which specifies if polyitem - will be put on top of the canvas' displaylist so it - will not be covered by other items. - """ - cl = [] - for x, y in coordlist: - cl.append(x * self.xscale) - cl.append(-y * self.yscale) - self.cv.coords(polyitem, *cl) - if fill is not None: - self.cv.itemconfigure(polyitem, fill=fill) - if outline is not None: - self.cv.itemconfigure(polyitem, outline=outline) - if width is not None: - self.cv.itemconfigure(polyitem, width=width) - if top: - self.cv.tag_raise(polyitem) - - def _createline(self): - """Create an invisible line item on canvas self.cv) - """ - return self.cv.create_line(0, 0, 0, 0, fill="", width=2, - capstyle = TK.ROUND) - - def _drawline(self, lineitem, coordlist=None, - fill=None, width=None, top=False): - """Configure lineitem according to provided arguments: - coordlist is sequence of coordinates - fill is drawing color - width is width of drawn line. - top is a boolean value, which specifies if polyitem - will be put on top of the canvas' displaylist so it - will not be covered by other items. - """ - if coordlist is not None: - cl = [] - for x, y in coordlist: - cl.append(x * self.xscale) - cl.append(-y * self.yscale) - self.cv.coords(lineitem, *cl) - if fill is not None: - self.cv.itemconfigure(lineitem, fill=fill) - if width is not None: - self.cv.itemconfigure(lineitem, width=width) - if top: - self.cv.tag_raise(lineitem) - - def _delete(self, item): - """Delete graphics item from canvas. - If item is"all" delete all graphics items. - """ - self.cv.delete(item) - - def _update(self): - """Redraw graphics items on canvas - """ - self.cv.update() - - def _delay(self, delay): - """Delay subsequent canvas actions for delay ms.""" - self.cv.after(delay) - - def _iscolorstring(self, color): - """Check if the string color is a legal Tkinter color string. - """ - try: - rgb = self.cv.winfo_rgb(color) - ok = True - except TK.TclError: - ok = False - return ok - - def _bgcolor(self, color=None): - """Set canvas' backgroundcolor if color is not None, - else return backgroundcolor.""" - if color is not None: - self.cv.config(bg = color) - self._update() - else: - return self.cv.cget("bg") - - def _write(self, pos, txt, align, font, pencolor): - """Write txt at pos in canvas with specified font - and color. - Return text item and x-coord of right bottom corner - of text's bounding box.""" - x, y = pos - x = x * self.xscale - y = y * self.yscale - anchor = {"left":"sw", "center":"s", "right":"se" } - item = self.cv.create_text(x-1, -y, text = txt, anchor = anchor[align], - fill = pencolor, font = font) - x0, y0, x1, y1 = self.cv.bbox(item) - return item, x1-1 - - def _onclick(self, item, fun, num=1, add=None): - """Bind fun to mouse-click event on turtle. - fun must be a function with two arguments, the coordinates - of the clicked point on the canvas. - num, the number of the mouse-button defaults to 1 - """ - if fun is None: - self.cv.tag_unbind(item, "" % num) - else: - def eventfun(event): - x, y = (self.cv.canvasx(event.x)/self.xscale, - -self.cv.canvasy(event.y)/self.yscale) - fun(x, y) - self.cv.tag_bind(item, "" % num, eventfun, add) - - def _onrelease(self, item, fun, num=1, add=None): - """Bind fun to mouse-button-release event on turtle. - fun must be a function with two arguments, the coordinates - of the point on the canvas where mouse button is released. - num, the number of the mouse-button defaults to 1 - - If a turtle is clicked, first _onclick-event will be performed, - then _onscreensclick-event. - """ - if fun is None: - self.cv.tag_unbind(item, "" % num) - else: - def eventfun(event): - x, y = (self.cv.canvasx(event.x)/self.xscale, - -self.cv.canvasy(event.y)/self.yscale) - fun(x, y) - self.cv.tag_bind(item, "" % num, - eventfun, add) - - def _ondrag(self, item, fun, num=1, add=None): - """Bind fun to mouse-move-event (with pressed mouse button) on turtle. - fun must be a function with two arguments, the coordinates of the - actual mouse position on the canvas. - num, the number of the mouse-button defaults to 1 - - Every sequence of mouse-move-events on a turtle is preceded by a - mouse-click event on that turtle. - """ - if fun is None: - self.cv.tag_unbind(item, "" % num) - else: - def eventfun(event): - try: - x, y = (self.cv.canvasx(event.x)/self.xscale, - -self.cv.canvasy(event.y)/self.yscale) - fun(x, y) - except Exception: - pass - self.cv.tag_bind(item, "" % num, eventfun, add) - - def _onscreenclick(self, fun, num=1, add=None): - """Bind fun to mouse-click event on canvas. - fun must be a function with two arguments, the coordinates - of the clicked point on the canvas. - num, the number of the mouse-button defaults to 1 - - If a turtle is clicked, first _onclick-event will be performed, - then _onscreensclick-event. - """ - if fun is None: - self.cv.unbind("" % num) - else: - def eventfun(event): - x, y = (self.cv.canvasx(event.x)/self.xscale, - -self.cv.canvasy(event.y)/self.yscale) - fun(x, y) - self.cv.bind("" % num, eventfun, add) - - def _onkeyrelease(self, fun, key): - """Bind fun to key-release event of key. - Canvas must have focus. See method listen - """ - if fun is None: - self.cv.unbind("" % key, None) - else: - def eventfun(event): - fun() - self.cv.bind("" % key, eventfun) - - def _onkeypress(self, fun, key=None): - """If key is given, bind fun to key-press event of key. - Otherwise bind fun to any key-press. - Canvas must have focus. See method listen. - """ - if fun is None: - if key is None: - self.cv.unbind("", None) - else: - self.cv.unbind("" % key, None) - else: - def eventfun(event): - fun() - if key is None: - self.cv.bind("", eventfun) - else: - self.cv.bind("" % key, eventfun) - - def _listen(self): - """Set focus on canvas (in order to collect key-events) - """ - self.cv.focus_force() - - def _ontimer(self, fun, t): - """Install a timer, which calls fun after t milliseconds. - """ - if t == 0: - self.cv.after_idle(fun) - else: - self.cv.after(t, fun) - - def _createimage(self, image): - """Create and return image item on canvas. - """ - return self.cv.create_image(0, 0, image=image) - - def _drawimage(self, item, pos, image): - """Configure image item as to draw image object - at position (x,y) on canvas) - """ - x, y = pos - self.cv.coords(item, (x * self.xscale, -y * self.yscale)) - self.cv.itemconfig(item, image=image) - - def _setbgpic(self, item, image): - """Configure image item as to draw image object - at center of canvas. Set item to the first item - in the displaylist, so it will be drawn below - any other item .""" - self.cv.itemconfig(item, image=image) - self.cv.tag_lower(item) - - def _type(self, item): - """Return 'line' or 'polygon' or 'image' depending on - type of item. - """ - return self.cv.type(item) - - def _pointlist(self, item): - """returns list of coordinate-pairs of points of item - Example (for insiders): - >>> from turtle import * - >>> getscreen()._pointlist(getturtle().turtle._item) - [(0.0, 9.9999999999999982), (0.0, -9.9999999999999982), - (9.9999999999999982, 0.0)] - >>> """ - cl = self.cv.coords(item) - pl = [(cl[i], -cl[i+1]) for i in range(0, len(cl), 2)] - return pl - - def _setscrollregion(self, srx1, sry1, srx2, sry2): - self.cv.config(scrollregion=(srx1, sry1, srx2, sry2)) - - def _rescale(self, xscalefactor, yscalefactor): - items = self.cv.find_all() - for item in items: - coordinates = list(self.cv.coords(item)) - newcoordlist = [] - while coordinates: - x, y = coordinates[:2] - newcoordlist.append(x * xscalefactor) - newcoordlist.append(y * yscalefactor) - coordinates = coordinates[2:] - self.cv.coords(item, *newcoordlist) - - def _resize(self, canvwidth=None, canvheight=None, bg=None): - """Resize the canvas the turtles are drawing on. Does - not alter the drawing window. - """ - # needs amendment - if not isinstance(self.cv, ScrolledCanvas): - return self.canvwidth, self.canvheight - if canvwidth is canvheight is bg is None: - return self.cv.canvwidth, self.cv.canvheight - if canvwidth is not None: - self.canvwidth = canvwidth - if canvheight is not None: - self.canvheight = canvheight - self.cv.reset(canvwidth, canvheight, bg) - - def _window_size(self): - """ Return the width and height of the turtle window. - """ - width = self.cv.winfo_width() - if width <= 1: # the window isn't managed by a geometry manager - width = self.cv['width'] - height = self.cv.winfo_height() - if height <= 1: # the window isn't managed by a geometry manager - height = self.cv['height'] - return width, height - - def mainloop(self): - """Starts event loop - calling Tkinter's mainloop function. - - No argument. - - Must be last statement in a turtle graphics program. - Must NOT be used if a script is run from within IDLE in -n mode - (No subprocess) - for interactive use of turtle graphics. - - Example (for a TurtleScreen instance named screen): - >>> screen.mainloop() - - """ - self.cv.tk.mainloop() - - def textinput(self, title, prompt): - """Pop up a dialog window for input of a string. - - Arguments: title is the title of the dialog window, - prompt is a text mostly describing what information to input. - - Return the string input - If the dialog is canceled, return None. - - Example (for a TurtleScreen instance named screen): - >>> screen.textinput("NIM", "Name of first player:") - - """ - return simpledialog.askstring(title, prompt, parent=self.cv) - - def numinput(self, title, prompt, default=None, minval=None, maxval=None): - """Pop up a dialog window for input of a number. - - Arguments: title is the title of the dialog window, - prompt is a text mostly describing what numerical information to input. - default: default value - minval: minimum value for input - maxval: maximum value for input - - The number input must be in the range minval .. maxval if these are - given. If not, a hint is issued and the dialog remains open for - correction. Return the number input. - If the dialog is canceled, return None. - - Example (for a TurtleScreen instance named screen): - >>> screen.numinput("Poker", "Your stakes:", 1000, minval=10, maxval=10000) - - """ - return simpledialog.askfloat(title, prompt, initialvalue=default, - minvalue=minval, maxvalue=maxval, - parent=self.cv) - - -############################################################################## -### End of Tkinter - interface ### -############################################################################## - - -class Terminator (Exception): - """Will be raised in TurtleScreen.update, if _RUNNING becomes False. - - This stops execution of a turtle graphics script. - Main purpose: use in the Demo-Viewer turtle.Demo.py. - """ - pass - - -class TurtleGraphicsError(Exception): - """Some TurtleGraphics Error - """ - - -class Shape(object): - """Data structure modeling shapes. - - attribute _type is one of "polygon", "image", "compound" - attribute _data is - depending on _type a poygon-tuple, - an image or a list constructed using the addcomponent method. - """ - def __init__(self, type_, data=None): - self._type = type_ - if type_ == "polygon": - if isinstance(data, list): - data = tuple(data) - elif type_ == "image": - if isinstance(data, str): - if data.lower().endswith(".gif") and isfile(data): - data = TurtleScreen._image(data) - # else data assumed to be PhotoImage - elif type_ == "compound": - data = [] - else: - raise TurtleGraphicsError("There is no shape type %s" % type_) - self._data = data - - def addcomponent(self, poly, fill, outline=None): - """Add component to a shape of type compound. - - Arguments: poly is a polygon, i. e. a tuple of number pairs. - fill is the fillcolor of the component, - outline is the outline color of the component. - - call (for a Shapeobject namend s): - -- s.addcomponent(((0,0), (10,10), (-10,10)), "red", "blue") - - Example: - >>> poly = ((0,0),(10,-5),(0,10),(-10,-5)) - >>> s = Shape("compound") - >>> s.addcomponent(poly, "red", "blue") - >>> # .. add more components and then use register_shape() - """ - if self._type != "compound": - raise TurtleGraphicsError("Cannot add component to %s Shape" - % self._type) - if outline is None: - outline = fill - self._data.append([poly, fill, outline]) - - -class Tbuffer(object): - """Ring buffer used as undobuffer for RawTurtle objects.""" - def __init__(self, bufsize=10): - self.bufsize = bufsize - self.buffer = [[None]] * bufsize - self.ptr = -1 - self.cumulate = False - def reset(self, bufsize=None): - if bufsize is None: - for i in range(self.bufsize): - self.buffer[i] = [None] - else: - self.bufsize = bufsize - self.buffer = [[None]] * bufsize - self.ptr = -1 - def push(self, item): - if self.bufsize > 0: - if not self.cumulate: - self.ptr = (self.ptr + 1) % self.bufsize - self.buffer[self.ptr] = item - else: - self.buffer[self.ptr].append(item) - def pop(self): - if self.bufsize > 0: - item = self.buffer[self.ptr] - if item is None: - return None - else: - self.buffer[self.ptr] = [None] - self.ptr = (self.ptr - 1) % self.bufsize - return (item) - def nr_of_items(self): - return self.bufsize - self.buffer.count([None]) - def __repr__(self): - return str(self.buffer) + " " + str(self.ptr) - - - -class TurtleScreen(TurtleScreenBase): - """Provides screen oriented methods like bgcolor etc. - - Only relies upon the methods of TurtleScreenBase and NOT - upon components of the underlying graphics toolkit - - which is Tkinter in this case. - """ - _RUNNING = True - - def __init__(self, cv, mode=_CFG["mode"], - colormode=_CFG["colormode"], delay=_CFG["delay"]): - TurtleScreenBase.__init__(self, cv) - - self._shapes = { - "arrow" : Shape("polygon", ((-10,0), (10,0), (0,10))), - "turtle" : Shape("polygon", ((0,16), (-2,14), (-1,10), (-4,7), - (-7,9), (-9,8), (-6,5), (-7,1), (-5,-3), (-8,-6), - (-6,-8), (-4,-5), (0,-7), (4,-5), (6,-8), (8,-6), - (5,-3), (7,1), (6,5), (9,8), (7,9), (4,7), (1,10), - (2,14))), - "circle" : Shape("polygon", ((10,0), (9.51,3.09), (8.09,5.88), - (5.88,8.09), (3.09,9.51), (0,10), (-3.09,9.51), - (-5.88,8.09), (-8.09,5.88), (-9.51,3.09), (-10,0), - (-9.51,-3.09), (-8.09,-5.88), (-5.88,-8.09), - (-3.09,-9.51), (-0.00,-10.00), (3.09,-9.51), - (5.88,-8.09), (8.09,-5.88), (9.51,-3.09))), - "square" : Shape("polygon", ((10,-10), (10,10), (-10,10), - (-10,-10))), - "triangle" : Shape("polygon", ((10,-5.77), (0,11.55), - (-10,-5.77))), - "classic": Shape("polygon", ((0,0),(-5,-9),(0,-7),(5,-9))), - "blank" : Shape("image", self._blankimage()) - } - - self._bgpics = {"nopic" : ""} - - self._mode = mode - self._delayvalue = delay - self._colormode = _CFG["colormode"] - self._keys = [] - self.clear() - if sys.platform == 'darwin': - # Force Turtle window to the front on OS X. This is needed because - # the Turtle window will show behind the Terminal window when you - # start the demo from the command line. - rootwindow = cv.winfo_toplevel() - rootwindow.call('wm', 'attributes', '.', '-topmost', '1') - rootwindow.call('wm', 'attributes', '.', '-topmost', '0') - - def clear(self): - """Delete all drawings and all turtles from the TurtleScreen. - - No argument. - - Reset empty TurtleScreen to its initial state: white background, - no backgroundimage, no eventbindings and tracing on. - - Example (for a TurtleScreen instance named screen): - >>> screen.clear() - - Note: this method is not available as function. - """ - self._delayvalue = _CFG["delay"] - self._colormode = _CFG["colormode"] - self._delete("all") - self._bgpic = self._createimage("") - self._bgpicname = "nopic" - self._tracing = 1 - self._updatecounter = 0 - self._turtles = [] - self.bgcolor("white") - for btn in 1, 2, 3: - self.onclick(None, btn) - self.onkeypress(None) - for key in self._keys[:]: - self.onkey(None, key) - self.onkeypress(None, key) - Turtle._pen = None - - def mode(self, mode=None): - """Set turtle-mode ('standard', 'logo' or 'world') and perform reset. - - Optional argument: - mode -- one of the strings 'standard', 'logo' or 'world' - - Mode 'standard' is compatible with turtle.py. - Mode 'logo' is compatible with most Logo-Turtle-Graphics. - Mode 'world' uses userdefined 'worldcoordinates'. *Attention*: in - this mode angles appear distorted if x/y unit-ratio doesn't equal 1. - If mode is not given, return the current mode. - - Mode Initial turtle heading positive angles - ------------|-------------------------|------------------- - 'standard' to the right (east) counterclockwise - 'logo' upward (north) clockwise - - Examples: - >>> mode('logo') # resets turtle heading to north - >>> mode() - 'logo' - """ - if mode is None: - return self._mode - mode = mode.lower() - if mode not in ["standard", "logo", "world"]: - raise TurtleGraphicsError("No turtle-graphics-mode %s" % mode) - self._mode = mode - if mode in ["standard", "logo"]: - self._setscrollregion(-self.canvwidth//2, -self.canvheight//2, - self.canvwidth//2, self.canvheight//2) - self.xscale = self.yscale = 1.0 - self.reset() - - def setworldcoordinates(self, llx, lly, urx, ury): - """Set up a user defined coordinate-system. - - Arguments: - llx -- a number, x-coordinate of lower left corner of canvas - lly -- a number, y-coordinate of lower left corner of canvas - urx -- a number, x-coordinate of upper right corner of canvas - ury -- a number, y-coordinate of upper right corner of canvas - - Set up user coodinat-system and switch to mode 'world' if necessary. - This performs a screen.reset. If mode 'world' is already active, - all drawings are redrawn according to the new coordinates. - - But ATTENTION: in user-defined coordinatesystems angles may appear - distorted. (see Screen.mode()) - - Example (for a TurtleScreen instance named screen): - >>> screen.setworldcoordinates(-10,-0.5,50,1.5) - >>> for _ in range(36): - ... left(10) - ... forward(0.5) - """ - if self.mode() != "world": - self.mode("world") - xspan = float(urx - llx) - yspan = float(ury - lly) - wx, wy = self._window_size() - self.screensize(wx-20, wy-20) - oldxscale, oldyscale = self.xscale, self.yscale - self.xscale = self.canvwidth / xspan - self.yscale = self.canvheight / yspan - srx1 = llx * self.xscale - sry1 = -ury * self.yscale - srx2 = self.canvwidth + srx1 - sry2 = self.canvheight + sry1 - self._setscrollregion(srx1, sry1, srx2, sry2) - self._rescale(self.xscale/oldxscale, self.yscale/oldyscale) - self.update() - - def register_shape(self, name, shape=None): - """Adds a turtle shape to TurtleScreen's shapelist. - - Arguments: - (1) name is the name of a gif-file and shape is None. - Installs the corresponding image shape. - !! Image-shapes DO NOT rotate when turning the turtle, - !! so they do not display the heading of the turtle! - (2) name is an arbitrary string and shape is a tuple - of pairs of coordinates. Installs the corresponding - polygon shape - (3) name is an arbitrary string and shape is a - (compound) Shape object. Installs the corresponding - compound shape. - To use a shape, you have to issue the command shape(shapename). - - call: register_shape("turtle.gif") - --or: register_shape("tri", ((0,0), (10,10), (-10,10))) - - Example (for a TurtleScreen instance named screen): - >>> screen.register_shape("triangle", ((5,-3),(0,5),(-5,-3))) - - """ - if shape is None: - # image - if name.lower().endswith(".gif"): - shape = Shape("image", self._image(name)) - else: - raise TurtleGraphicsError("Bad arguments for register_shape.\n" - + "Use help(register_shape)" ) - elif isinstance(shape, tuple): - shape = Shape("polygon", shape) - ## else shape assumed to be Shape-instance - self._shapes[name] = shape - - def _colorstr(self, color): - """Return color string corresponding to args. - - Argument may be a string or a tuple of three - numbers corresponding to actual colormode, - i.e. in the range 0<=n<=colormode. - - If the argument doesn't represent a color, - an error is raised. - """ - if len(color) == 1: - color = color[0] - if isinstance(color, str): - if self._iscolorstring(color) or color == "": - return color - else: - raise TurtleGraphicsError("bad color string: %s" % str(color)) - try: - r, g, b = color - except (TypeError, ValueError): - raise TurtleGraphicsError("bad color arguments: %s" % str(color)) - if self._colormode == 1.0: - r, g, b = [round(255.0*x) for x in (r, g, b)] - if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)): - raise TurtleGraphicsError("bad color sequence: %s" % str(color)) - return "#%02x%02x%02x" % (r, g, b) - - def _color(self, cstr): - if not cstr.startswith("#"): - return cstr - if len(cstr) == 7: - cl = [int(cstr[i:i+2], 16) for i in (1, 3, 5)] - elif len(cstr) == 4: - cl = [16*int(cstr[h], 16) for h in cstr[1:]] - else: - raise TurtleGraphicsError("bad colorstring: %s" % cstr) - return tuple(c * self._colormode/255 for c in cl) - - def colormode(self, cmode=None): - """Return the colormode or set it to 1.0 or 255. - - Optional argument: - cmode -- one of the values 1.0 or 255 - - r, g, b values of colortriples have to be in range 0..cmode. - - Example (for a TurtleScreen instance named screen): - >>> screen.colormode() - 1.0 - >>> screen.colormode(255) - >>> pencolor(240,160,80) - """ - if cmode is None: - return self._colormode - if cmode == 1.0: - self._colormode = float(cmode) - elif cmode == 255: - self._colormode = int(cmode) - - def reset(self): - """Reset all Turtles on the Screen to their initial state. - - No argument. - - Example (for a TurtleScreen instance named screen): - >>> screen.reset() - """ - for turtle in self._turtles: - turtle._setmode(self._mode) - turtle.reset() - - def turtles(self): - """Return the list of turtles on the screen. - - Example (for a TurtleScreen instance named screen): - >>> screen.turtles() - [] - """ - return self._turtles - - def bgcolor(self, *args): - """Set or return backgroundcolor of the TurtleScreen. - - Four input formats are allowed: - - bgcolor() - Return the current background color as color specification - string or as a tuple (see example). May be used as input - to another color/pencolor/fillcolor/bgcolor call. - - bgcolor(colorstring) - Set the background color to colorstring, which is a Tk color - specification string, such as "red", "yellow", or "#33cc8c". - - bgcolor((r, g, b)) - Set the background color to the RGB color represented by - the tuple of r, g, and b. Each of r, g, and b must be in - the range 0..colormode, where colormode is either 1.0 or 255 - (see colormode()). - - bgcolor(r, g, b) - Set the background color to the RGB color represented by - r, g, and b. Each of r, g, and b must be in the range - 0..colormode. - - Example (for a TurtleScreen instance named screen): - >>> screen.bgcolor("orange") - >>> screen.bgcolor() - 'orange' - >>> colormode(255) - >>> screen.bgcolor('#800080') - >>> screen.bgcolor() - (128.0, 0.0, 128.0) - """ - if args: - color = self._colorstr(args) - else: - color = None - color = self._bgcolor(color) - if color is not None: - color = self._color(color) - return color - - def tracer(self, n=None, delay=None): - """Turns turtle animation on/off and set delay for update drawings. - - Optional arguments: - n -- nonnegative integer - delay -- nonnegative integer - - If n is given, only each n-th regular screen update is really performed. - (Can be used to accelerate the drawing of complex graphics.) - Second arguments sets delay value (see RawTurtle.delay()) - - Example (for a TurtleScreen instance named screen): - >>> screen.tracer(8, 25) - >>> dist = 2 - >>> for i in range(200): - ... fd(dist) - ... rt(90) - ... dist += 2 - """ - if n is None: - return self._tracing - self._tracing = int(n) - self._updatecounter = 0 - if delay is not None: - self._delayvalue = int(delay) - if self._tracing: - self.update() - - def delay(self, delay=None): - """ Return or set the drawing delay in milliseconds. - - Optional argument: - delay -- positive integer - - Example (for a TurtleScreen instance named screen): - >>> screen.delay(15) - >>> screen.delay() - 15 - """ - if delay is None: - return self._delayvalue - self._delayvalue = int(delay) - - def _incrementudc(self): - """Increment update counter.""" - if not TurtleScreen._RUNNING: - TurtleScreen._RUNNING = True - raise Terminator - if self._tracing > 0: - self._updatecounter += 1 - self._updatecounter %= self._tracing - - def update(self): - """Perform a TurtleScreen update. - """ - tracing = self._tracing - self._tracing = True - for t in self.turtles(): - t._update_data() - t._drawturtle() - self._tracing = tracing - self._update() - - def window_width(self): - """ Return the width of the turtle window. - - Example (for a TurtleScreen instance named screen): - >>> screen.window_width() - 640 - """ - return self._window_size()[0] - - def window_height(self): - """ Return the height of the turtle window. - - Example (for a TurtleScreen instance named screen): - >>> screen.window_height() - 480 - """ - return self._window_size()[1] - - def getcanvas(self): - """Return the Canvas of this TurtleScreen. - - No argument. - - Example (for a Screen instance named screen): - >>> cv = screen.getcanvas() - >>> cv - - """ - return self.cv - - def getshapes(self): - """Return a list of names of all currently available turtle shapes. - - No argument. - - Example (for a TurtleScreen instance named screen): - >>> screen.getshapes() - ['arrow', 'blank', 'circle', ... , 'turtle'] - """ - return sorted(self._shapes.keys()) - - def onclick(self, fun, btn=1, add=None): - """Bind fun to mouse-click event on canvas. - - Arguments: - fun -- a function with two arguments, the coordinates of the - clicked point on the canvas. - btn -- the number of the mouse-button, defaults to 1 - - Example (for a TurtleScreen instance named screen) - - >>> screen.onclick(goto) - >>> # Subsequently clicking into the TurtleScreen will - >>> # make the turtle move to the clicked point. - >>> screen.onclick(None) - """ - self._onscreenclick(fun, btn, add) - - def onkey(self, fun, key): - """Bind fun to key-release event of key. - - Arguments: - fun -- a function with no arguments - key -- a string: key (e.g. "a") or key-symbol (e.g. "space") - - In order to be able to register key-events, TurtleScreen - must have focus. (See method listen.) - - Example (for a TurtleScreen instance named screen): - - >>> def f(): - ... fd(50) - ... lt(60) - ... - >>> screen.onkey(f, "Up") - >>> screen.listen() - - Subsequently the turtle can be moved by repeatedly pressing - the up-arrow key, consequently drawing a hexagon - - """ - if fun is None: - if key in self._keys: - self._keys.remove(key) - elif key not in self._keys: - self._keys.append(key) - self._onkeyrelease(fun, key) - - def onkeypress(self, fun, key=None): - """Bind fun to key-press event of key if key is given, - or to any key-press-event if no key is given. - - Arguments: - fun -- a function with no arguments - key -- a string: key (e.g. "a") or key-symbol (e.g. "space") - - In order to be able to register key-events, TurtleScreen - must have focus. (See method listen.) - - Example (for a TurtleScreen instance named screen - and a Turtle instance named turtle): - - >>> def f(): - ... fd(50) - ... lt(60) - ... - >>> screen.onkeypress(f, "Up") - >>> screen.listen() - - Subsequently the turtle can be moved by repeatedly pressing - the up-arrow key, or by keeping pressed the up-arrow key. - consequently drawing a hexagon. - """ - if fun is None: - if key in self._keys: - self._keys.remove(key) - elif key is not None and key not in self._keys: - self._keys.append(key) - self._onkeypress(fun, key) - - def listen(self, xdummy=None, ydummy=None): - """Set focus on TurtleScreen (in order to collect key-events) - - No arguments. - Dummy arguments are provided in order - to be able to pass listen to the onclick method. - - Example (for a TurtleScreen instance named screen): - >>> screen.listen() - """ - self._listen() - - def ontimer(self, fun, t=0): - """Install a timer, which calls fun after t milliseconds. - - Arguments: - fun -- a function with no arguments. - t -- a number >= 0 - - Example (for a TurtleScreen instance named screen): - - >>> running = True - >>> def f(): - ... if running: - ... fd(50) - ... lt(60) - ... screen.ontimer(f, 250) - ... - >>> f() # makes the turtle marching around - >>> running = False - """ - self._ontimer(fun, t) - - def bgpic(self, picname=None): - """Set background image or return name of current backgroundimage. - - Optional argument: - picname -- a string, name of a gif-file or "nopic". - - If picname is a filename, set the corresponding image as background. - If picname is "nopic", delete backgroundimage, if present. - If picname is None, return the filename of the current backgroundimage. - - Example (for a TurtleScreen instance named screen): - >>> screen.bgpic() - 'nopic' - >>> screen.bgpic("landscape.gif") - >>> screen.bgpic() - 'landscape.gif' - """ - if picname is None: - return self._bgpicname - if picname not in self._bgpics: - self._bgpics[picname] = self._image(picname) - self._setbgpic(self._bgpic, self._bgpics[picname]) - self._bgpicname = picname - - def screensize(self, canvwidth=None, canvheight=None, bg=None): - """Resize the canvas the turtles are drawing on. - - Optional arguments: - canvwidth -- positive integer, new width of canvas in pixels - canvheight -- positive integer, new height of canvas in pixels - bg -- colorstring or color-tuple, new backgroundcolor - If no arguments are given, return current (canvaswidth, canvasheight) - - Do not alter the drawing window. To observe hidden parts of - the canvas use the scrollbars. (Can make visible those parts - of a drawing, which were outside the canvas before!) - - Example (for a Turtle instance named turtle): - >>> turtle.screensize(2000,1500) - >>> # e.g. to search for an erroneously escaped turtle ;-) - """ - return self._resize(canvwidth, canvheight, bg) - - onscreenclick = onclick - resetscreen = reset - clearscreen = clear - addshape = register_shape - onkeyrelease = onkey - -class TNavigator(object): - """Navigation part of the RawTurtle. - Implements methods for turtle movement. - """ - START_ORIENTATION = { - "standard": Vec2D(1.0, 0.0), - "world" : Vec2D(1.0, 0.0), - "logo" : Vec2D(0.0, 1.0) } - DEFAULT_MODE = "standard" - DEFAULT_ANGLEOFFSET = 0 - DEFAULT_ANGLEORIENT = 1 - - def __init__(self, mode=DEFAULT_MODE): - self._angleOffset = self.DEFAULT_ANGLEOFFSET - self._angleOrient = self.DEFAULT_ANGLEORIENT - self._mode = mode - self.undobuffer = None - self.degrees() - self._mode = None - self._setmode(mode) - TNavigator.reset(self) - - def reset(self): - """reset turtle to its initial values - - Will be overwritten by parent class - """ - self._position = Vec2D(0.0, 0.0) - self._orient = TNavigator.START_ORIENTATION[self._mode] - - def _setmode(self, mode=None): - """Set turtle-mode to 'standard', 'world' or 'logo'. - """ - if mode is None: - return self._mode - if mode not in ["standard", "logo", "world"]: - return - self._mode = mode - if mode in ["standard", "world"]: - self._angleOffset = 0 - self._angleOrient = 1 - else: # mode == "logo": - self._angleOffset = self._fullcircle/4. - self._angleOrient = -1 - - def _setDegreesPerAU(self, fullcircle): - """Helper function for degrees() and radians()""" - self._fullcircle = fullcircle - self._degreesPerAU = 360/fullcircle - if self._mode == "standard": - self._angleOffset = 0 - else: - self._angleOffset = fullcircle/4. - - def degrees(self, fullcircle=360.0): - """ Set angle measurement units to degrees. - - Optional argument: - fullcircle - a number - - Set angle measurement units, i. e. set number - of 'degrees' for a full circle. Default value is - 360 degrees. - - Example (for a Turtle instance named turtle): - >>> turtle.left(90) - >>> turtle.heading() - 90 - - Change angle measurement unit to grad (also known as gon, - grade, or gradian and equals 1/100-th of the right angle.) - >>> turtle.degrees(400.0) - >>> turtle.heading() - 100 - - """ - self._setDegreesPerAU(fullcircle) - - def radians(self): - """ Set the angle measurement units to radians. - - No arguments. - - Example (for a Turtle instance named turtle): - >>> turtle.heading() - 90 - >>> turtle.radians() - >>> turtle.heading() - 1.5707963267948966 - """ - self._setDegreesPerAU(math.tau) - - def _go(self, distance): - """move turtle forward by specified distance""" - ende = self._position + self._orient * distance - self._goto(ende) - - def _rotate(self, angle): - """Turn turtle counterclockwise by specified angle if angle > 0.""" - angle *= self._degreesPerAU - self._orient = self._orient.rotate(angle) - - def _goto(self, end): - """move turtle to position end.""" - self._position = end - - def teleport(self, x=None, y=None, *, fill_gap: bool = False) -> None: - """To be overwritten by child class RawTurtle. - Includes no TPen references.""" - new_x = x if x is not None else self._position[0] - new_y = y if y is not None else self._position[1] - self._position = Vec2D(new_x, new_y) - - def forward(self, distance): - """Move the turtle forward by the specified distance. - - Aliases: forward | fd - - Argument: - distance -- a number (integer or float) - - Move the turtle forward by the specified distance, in the direction - the turtle is headed. - - Example (for a Turtle instance named turtle): - >>> turtle.position() - (0.00,0.00) - >>> turtle.forward(25) - >>> turtle.position() - (25.00,0.00) - >>> turtle.forward(-75) - >>> turtle.position() - (-50.00,0.00) - """ - self._go(distance) - - def back(self, distance): - """Move the turtle backward by distance. - - Aliases: back | backward | bk - - Argument: - distance -- a number - - Move the turtle backward by distance, opposite to the direction the - turtle is headed. Do not change the turtle's heading. - - Example (for a Turtle instance named turtle): - >>> turtle.position() - (0.00,0.00) - >>> turtle.backward(30) - >>> turtle.position() - (-30.00,0.00) - """ - self._go(-distance) - - def right(self, angle): - """Turn turtle right by angle units. - - Aliases: right | rt - - Argument: - angle -- a number (integer or float) - - Turn turtle right by angle units. (Units are by default degrees, - but can be set via the degrees() and radians() functions.) - Angle orientation depends on mode. (See this.) - - Example (for a Turtle instance named turtle): - >>> turtle.heading() - 22.0 - >>> turtle.right(45) - >>> turtle.heading() - 337.0 - """ - self._rotate(-angle) - - def left(self, angle): - """Turn turtle left by angle units. - - Aliases: left | lt - - Argument: - angle -- a number (integer or float) - - Turn turtle left by angle units. (Units are by default degrees, - but can be set via the degrees() and radians() functions.) - Angle orientation depends on mode. (See this.) - - Example (for a Turtle instance named turtle): - >>> turtle.heading() - 22.0 - >>> turtle.left(45) - >>> turtle.heading() - 67.0 - """ - self._rotate(angle) - - def pos(self): - """Return the turtle's current location (x,y), as a Vec2D-vector. - - Aliases: pos | position - - No arguments. - - Example (for a Turtle instance named turtle): - >>> turtle.pos() - (0.00, 240.00) - """ - return self._position - - def xcor(self): - """ Return the turtle's x coordinate. - - No arguments. - - Example (for a Turtle instance named turtle): - >>> reset() - >>> turtle.left(60) - >>> turtle.forward(100) - >>> print(turtle.xcor()) - 50.0 - """ - return self._position[0] - - def ycor(self): - """ Return the turtle's y coordinate - --- - No arguments. - - Example (for a Turtle instance named turtle): - >>> reset() - >>> turtle.left(60) - >>> turtle.forward(100) - >>> print(turtle.ycor()) - 86.6025403784 - """ - return self._position[1] - - - def goto(self, x, y=None): - """Move turtle to an absolute position. - - Aliases: setpos | setposition | goto: - - Arguments: - x -- a number or a pair/vector of numbers - y -- a number None - - call: goto(x, y) # two coordinates - --or: goto((x, y)) # a pair (tuple) of coordinates - --or: goto(vec) # e.g. as returned by pos() - - Move turtle to an absolute position. If the pen is down, - a line will be drawn. The turtle's orientation does not change. - - Example (for a Turtle instance named turtle): - >>> tp = turtle.pos() - >>> tp - (0.00,0.00) - >>> turtle.setpos(60,30) - >>> turtle.pos() - (60.00,30.00) - >>> turtle.setpos((20,80)) - >>> turtle.pos() - (20.00,80.00) - >>> turtle.setpos(tp) - >>> turtle.pos() - (0.00,0.00) - """ - if y is None: - self._goto(Vec2D(*x)) - else: - self._goto(Vec2D(x, y)) - - def home(self): - """Move turtle to the origin - coordinates (0,0). - - No arguments. - - Move turtle to the origin - coordinates (0,0) and set its - heading to its start-orientation (which depends on mode). - - Example (for a Turtle instance named turtle): - >>> turtle.home() - """ - self.goto(0, 0) - self.setheading(0) - - def setx(self, x): - """Set the turtle's first coordinate to x - - Argument: - x -- a number (integer or float) - - Set the turtle's first coordinate to x, leave second coordinate - unchanged. - - Example (for a Turtle instance named turtle): - >>> turtle.position() - (0.00, 240.00) - >>> turtle.setx(10) - >>> turtle.position() - (10.00, 240.00) - """ - self._goto(Vec2D(x, self._position[1])) - - def sety(self, y): - """Set the turtle's second coordinate to y - - Argument: - y -- a number (integer or float) - - Set the turtle's first coordinate to x, second coordinate remains - unchanged. - - Example (for a Turtle instance named turtle): - >>> turtle.position() - (0.00, 40.00) - >>> turtle.sety(-10) - >>> turtle.position() - (0.00, -10.00) - """ - self._goto(Vec2D(self._position[0], y)) - - def distance(self, x, y=None): - """Return the distance from the turtle to (x,y) in turtle step units. - - Arguments: - x -- a number or a pair/vector of numbers or a turtle instance - y -- a number None None - - call: distance(x, y) # two coordinates - --or: distance((x, y)) # a pair (tuple) of coordinates - --or: distance(vec) # e.g. as returned by pos() - --or: distance(mypen) # where mypen is another turtle - - Example (for a Turtle instance named turtle): - >>> turtle.pos() - (0.00,0.00) - >>> turtle.distance(30,40) - 50.0 - >>> pen = Turtle() - >>> pen.forward(77) - >>> turtle.distance(pen) - 77.0 - """ - if y is not None: - pos = Vec2D(x, y) - if isinstance(x, Vec2D): - pos = x - elif isinstance(x, tuple): - pos = Vec2D(*x) - elif isinstance(x, TNavigator): - pos = x._position - return abs(pos - self._position) - - def towards(self, x, y=None): - """Return the angle of the line from the turtle's position to (x, y). - - Arguments: - x -- a number or a pair/vector of numbers or a turtle instance - y -- a number None None - - call: distance(x, y) # two coordinates - --or: distance((x, y)) # a pair (tuple) of coordinates - --or: distance(vec) # e.g. as returned by pos() - --or: distance(mypen) # where mypen is another turtle - - Return the angle, between the line from turtle-position to position - specified by x, y and the turtle's start orientation. (Depends on - modes - "standard" or "logo") - - Example (for a Turtle instance named turtle): - >>> turtle.pos() - (10.00, 10.00) - >>> turtle.towards(0,0) - 225.0 - """ - if y is not None: - pos = Vec2D(x, y) - if isinstance(x, Vec2D): - pos = x - elif isinstance(x, tuple): - pos = Vec2D(*x) - elif isinstance(x, TNavigator): - pos = x._position - x, y = pos - self._position - result = round(math.degrees(math.atan2(y, x)), 10) % 360.0 - result /= self._degreesPerAU - return (self._angleOffset + self._angleOrient*result) % self._fullcircle - - def heading(self): - """ Return the turtle's current heading. - - No arguments. - - Example (for a Turtle instance named turtle): - >>> turtle.left(67) - >>> turtle.heading() - 67.0 - """ - x, y = self._orient - result = round(math.degrees(math.atan2(y, x)), 10) % 360.0 - result /= self._degreesPerAU - return (self._angleOffset + self._angleOrient*result) % self._fullcircle - - def setheading(self, to_angle): - """Set the orientation of the turtle to to_angle. - - Aliases: setheading | seth - - Argument: - to_angle -- a number (integer or float) - - Set the orientation of the turtle to to_angle. - Here are some common directions in degrees: - - standard - mode: logo-mode: - -------------------|-------------------- - 0 - east 0 - north - 90 - north 90 - east - 180 - west 180 - south - 270 - south 270 - west - - Example (for a Turtle instance named turtle): - >>> turtle.setheading(90) - >>> turtle.heading() - 90 - """ - angle = (to_angle - self.heading())*self._angleOrient - full = self._fullcircle - angle = (angle+full/2.)%full - full/2. - self._rotate(angle) - - def circle(self, radius, extent = None, steps = None): - """ Draw a circle with given radius. - - Arguments: - radius -- a number - extent (optional) -- a number - steps (optional) -- an integer - - Draw a circle with given radius. The center is radius units left - of the turtle; extent - an angle - determines which part of the - circle is drawn. If extent is not given, draw the entire circle. - If extent is not a full circle, one endpoint of the arc is the - current pen position. Draw the arc in counterclockwise direction - if radius is positive, otherwise in clockwise direction. Finally - the direction of the turtle is changed by the amount of extent. - - As the circle is approximated by an inscribed regular polygon, - steps determines the number of steps to use. If not given, - it will be calculated automatically. Maybe used to draw regular - polygons. - - call: circle(radius) # full circle - --or: circle(radius, extent) # arc - --or: circle(radius, extent, steps) - --or: circle(radius, steps=6) # 6-sided polygon - - Example (for a Turtle instance named turtle): - >>> turtle.circle(50) - >>> turtle.circle(120, 180) # semicircle - """ - if self.undobuffer: - self.undobuffer.push(["seq"]) - self.undobuffer.cumulate = True - speed = self.speed() - if extent is None: - extent = self._fullcircle - if steps is None: - frac = abs(extent)/self._fullcircle - steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac) - w = 1.0 * extent / steps - w2 = 0.5 * w - l = 2.0 * radius * math.sin(math.radians(w2)*self._degreesPerAU) - if radius < 0: - l, w, w2 = -l, -w, -w2 - tr = self._tracer() - dl = self._delay() - if speed == 0: - self._tracer(0, 0) - else: - self.speed(0) - self._rotate(w2) - for i in range(steps): - self.speed(speed) - self._go(l) - self.speed(0) - self._rotate(w) - self._rotate(-w2) - if speed == 0: - self._tracer(tr, dl) - self.speed(speed) - if self.undobuffer: - self.undobuffer.cumulate = False - -## three dummy methods to be implemented by child class: - - def speed(self, s=0): - """dummy method - to be overwritten by child class""" - def _tracer(self, a=None, b=None): - """dummy method - to be overwritten by child class""" - def _delay(self, n=None): - """dummy method - to be overwritten by child class""" - - fd = forward - bk = back - backward = back - rt = right - lt = left - position = pos - setpos = goto - setposition = goto - seth = setheading - - -class TPen(object): - """Drawing part of the RawTurtle. - Implements drawing properties. - """ - def __init__(self, resizemode=_CFG["resizemode"]): - self._resizemode = resizemode # or "user" or "noresize" - self.undobuffer = None - TPen._reset(self) - - def _reset(self, pencolor=_CFG["pencolor"], - fillcolor=_CFG["fillcolor"]): - self._pensize = 1 - self._shown = True - self._pencolor = pencolor - self._fillcolor = fillcolor - self._drawing = True - self._speed = 3 - self._stretchfactor = (1., 1.) - self._shearfactor = 0. - self._tilt = 0. - self._shapetrafo = (1., 0., 0., 1.) - self._outlinewidth = 1 - - def resizemode(self, rmode=None): - """Set resizemode to one of the values: "auto", "user", "noresize". - - (Optional) Argument: - rmode -- one of the strings "auto", "user", "noresize" - - Different resizemodes have the following effects: - - "auto" adapts the appearance of the turtle - corresponding to the value of pensize. - - "user" adapts the appearance of the turtle according to the - values of stretchfactor and outlinewidth (outline), - which are set by shapesize() - - "noresize" no adaption of the turtle's appearance takes place. - If no argument is given, return current resizemode. - resizemode("user") is called by a call of shapesize with arguments. - - - Examples (for a Turtle instance named turtle): - >>> turtle.resizemode("noresize") - >>> turtle.resizemode() - 'noresize' - """ - if rmode is None: - return self._resizemode - rmode = rmode.lower() - if rmode in ["auto", "user", "noresize"]: - self.pen(resizemode=rmode) - - def pensize(self, width=None): - """Set or return the line thickness. - - Aliases: pensize | width - - Argument: - width -- positive number - - Set the line thickness to width or return it. If resizemode is set - to "auto" and turtleshape is a polygon, that polygon is drawn with - the same line thickness. If no argument is given, current pensize - is returned. - - Example (for a Turtle instance named turtle): - >>> turtle.pensize() - 1 - >>> turtle.pensize(10) # from here on lines of width 10 are drawn - """ - if width is None: - return self._pensize - self.pen(pensize=width) - - - def penup(self): - """Pull the pen up -- no drawing when moving. - - Aliases: penup | pu | up - - No argument - - Example (for a Turtle instance named turtle): - >>> turtle.penup() - """ - if not self._drawing: - return - self.pen(pendown=False) - - def pendown(self): - """Pull the pen down -- drawing when moving. - - Aliases: pendown | pd | down - - No argument. - - Example (for a Turtle instance named turtle): - >>> turtle.pendown() - """ - if self._drawing: - return - self.pen(pendown=True) - - def isdown(self): - """Return True if pen is down, False if it's up. - - No argument. - - Example (for a Turtle instance named turtle): - >>> turtle.penup() - >>> turtle.isdown() - False - >>> turtle.pendown() - >>> turtle.isdown() - True - """ - return self._drawing - - def speed(self, speed=None): - """ Return or set the turtle's speed. - - Optional argument: - speed -- an integer in the range 0..10 or a speedstring (see below) - - Set the turtle's speed to an integer value in the range 0 .. 10. - If no argument is given: return current speed. - - If input is a number greater than 10 or smaller than 0.5, - speed is set to 0. - Speedstrings are mapped to speedvalues in the following way: - 'fastest' : 0 - 'fast' : 10 - 'normal' : 6 - 'slow' : 3 - 'slowest' : 1 - speeds from 1 to 10 enforce increasingly faster animation of - line drawing and turtle turning. - - Attention: - speed = 0 : *no* animation takes place. forward/back makes turtle jump - and likewise left/right make the turtle turn instantly. - - Example (for a Turtle instance named turtle): - >>> turtle.speed(3) - """ - speeds = {'fastest':0, 'fast':10, 'normal':6, 'slow':3, 'slowest':1 } - if speed is None: - return self._speed - if speed in speeds: - speed = speeds[speed] - elif 0.5 < speed < 10.5: - speed = int(round(speed)) - else: - speed = 0 - self.pen(speed=speed) - - def color(self, *args): - """Return or set the pencolor and fillcolor. - - Arguments: - Several input formats are allowed. - They use 0 to 3 arguments as follows: - - color() - Return the current pencolor and the current fillcolor as - a pair of color specification strings or tuples as returned - by pencolor() and fillcolor(). - - color(colorstring), color((r,g,b)), color(r,g,b) - Inputs as in pencolor(), set both, fillcolor and pencolor, - to the given value. - - color(colorstring1, colorstring2), color((r1,g1,b1), (r2,g2,b2)) - Equivalent to pencolor(colorstring1) and fillcolor(colorstring2) - and analogously if the other input format is used. - - If turtleshape is a polygon, outline and interior of that polygon - is drawn with the newly set colors. - For more info see: pencolor, fillcolor - - Example (for a Turtle instance named turtle): - >>> turtle.color('red', 'green') - >>> turtle.color() - ('red', 'green') - >>> colormode(255) - >>> color(('#285078', '#a0c8f0')) - >>> color() - ((40.0, 80.0, 120.0), (160.0, 200.0, 240.0)) - """ - if args: - l = len(args) - if l == 1: - pcolor = fcolor = args[0] - elif l == 2: - pcolor, fcolor = args - elif l == 3: - pcolor = fcolor = args - pcolor = self._colorstr(pcolor) - fcolor = self._colorstr(fcolor) - self.pen(pencolor=pcolor, fillcolor=fcolor) - else: - return self._color(self._pencolor), self._color(self._fillcolor) - - def pencolor(self, *args): - """ Return or set the pencolor. - - Arguments: - Four input formats are allowed: - - pencolor() - Return the current pencolor as color specification string or - as a tuple (see example). May be used as input to another - color/pencolor/fillcolor/bgcolor call. - - pencolor(colorstring) - Set pencolor to colorstring, which is a Tk color - specification string, such as "red", "yellow", or "#33cc8c". - - pencolor((r, g, b)) - Set pencolor to the RGB color represented by the tuple of - r, g, and b. Each of r, g, and b must be in the range - 0..colormode, where colormode is either 1.0 or 255 (see - colormode()). - - pencolor(r, g, b) - Set pencolor to the RGB color represented by r, g, and b. - Each of r, g, and b must be in the range 0..colormode. - - If turtleshape is a polygon, the outline of that polygon is drawn - with the newly set pencolor. - - Example (for a Turtle instance named turtle): - >>> turtle.pencolor('brown') - >>> turtle.pencolor() - 'brown' - >>> colormode(255) - >>> turtle.pencolor('#32c18f') - >>> turtle.pencolor() - (50.0, 193.0, 143.0) - """ - if args: - color = self._colorstr(args) - if color == self._pencolor: - return - self.pen(pencolor=color) - else: - return self._color(self._pencolor) - - def fillcolor(self, *args): - """ Return or set the fillcolor. - - Arguments: - Four input formats are allowed: - - fillcolor() - Return the current fillcolor as color specification string, - possibly in tuple format (see example). May be used as - input to another color/pencolor/fillcolor/bgcolor call. - - fillcolor(colorstring) - Set fillcolor to colorstring, which is a Tk color - specification string, such as "red", "yellow", or "#33cc8c". - - fillcolor((r, g, b)) - Set fillcolor to the RGB color represented by the tuple of - r, g, and b. Each of r, g, and b must be in the range - 0..colormode, where colormode is either 1.0 or 255 (see - colormode()). - - fillcolor(r, g, b) - Set fillcolor to the RGB color represented by r, g, and b. - Each of r, g, and b must be in the range 0..colormode. - - If turtleshape is a polygon, the interior of that polygon is drawn - with the newly set fillcolor. - - Example (for a Turtle instance named turtle): - >>> turtle.fillcolor('violet') - >>> turtle.fillcolor() - 'violet' - >>> colormode(255) - >>> turtle.fillcolor('#ffffff') - >>> turtle.fillcolor() - (255.0, 255.0, 255.0) - """ - if args: - color = self._colorstr(args) - if color == self._fillcolor: - return - self.pen(fillcolor=color) - else: - return self._color(self._fillcolor) - - def teleport(self, x=None, y=None, *, fill_gap: bool = False) -> None: - """To be overwritten by child class RawTurtle. - Includes no TNavigator references. - """ - pendown = self.isdown() - if pendown: - self.pen(pendown=False) - self.pen(pendown=pendown) - - def showturtle(self): - """Makes the turtle visible. - - Aliases: showturtle | st - - No argument. - - Example (for a Turtle instance named turtle): - >>> turtle.hideturtle() - >>> turtle.showturtle() - """ - self.pen(shown=True) - - def hideturtle(self): - """Makes the turtle invisible. - - Aliases: hideturtle | ht - - No argument. - - It's a good idea to do this while you're in the - middle of a complicated drawing, because hiding - the turtle speeds up the drawing observably. - - Example (for a Turtle instance named turtle): - >>> turtle.hideturtle() - """ - self.pen(shown=False) - - def isvisible(self): - """Return True if the Turtle is shown, False if it's hidden. - - No argument. - - Example (for a Turtle instance named turtle): - >>> turtle.hideturtle() - >>> print(turtle.isvisible()) - False - """ - return self._shown - - def pen(self, pen=None, **pendict): - """Return or set the pen's attributes. - - Arguments: - pen -- a dictionary with some or all of the below listed keys. - **pendict -- one or more keyword-arguments with the below - listed keys as keywords. - - Return or set the pen's attributes in a 'pen-dictionary' - with the following key/value pairs: - "shown" : True/False - "pendown" : True/False - "pencolor" : color-string or color-tuple - "fillcolor" : color-string or color-tuple - "pensize" : positive number - "speed" : number in range 0..10 - "resizemode" : "auto" or "user" or "noresize" - "stretchfactor": (positive number, positive number) - "shearfactor": number - "outline" : positive number - "tilt" : number - - This dictionary can be used as argument for a subsequent - pen()-call to restore the former pen-state. Moreover one - or more of these attributes can be provided as keyword-arguments. - This can be used to set several pen attributes in one statement. - - - Examples (for a Turtle instance named turtle): - >>> turtle.pen(fillcolor="black", pencolor="red", pensize=10) - >>> turtle.pen() - {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, - 'pencolor': 'red', 'pendown': True, 'fillcolor': 'black', - 'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0} - >>> penstate=turtle.pen() - >>> turtle.color("yellow","") - >>> turtle.penup() - >>> turtle.pen() - {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, - 'pencolor': 'yellow', 'pendown': False, 'fillcolor': '', - 'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0} - >>> p.pen(penstate, fillcolor="green") - >>> p.pen() - {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, - 'pencolor': 'red', 'pendown': True, 'fillcolor': 'green', - 'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0} - """ - _pd = {"shown" : self._shown, - "pendown" : self._drawing, - "pencolor" : self._pencolor, - "fillcolor" : self._fillcolor, - "pensize" : self._pensize, - "speed" : self._speed, - "resizemode" : self._resizemode, - "stretchfactor" : self._stretchfactor, - "shearfactor" : self._shearfactor, - "outline" : self._outlinewidth, - "tilt" : self._tilt - } - - if not (pen or pendict): - return _pd - - if isinstance(pen, dict): - p = pen - else: - p = {} - p.update(pendict) - - _p_buf = {} - for key in p: - _p_buf[key] = _pd[key] - - if self.undobuffer: - self.undobuffer.push(("pen", _p_buf)) - - newLine = False - if "pendown" in p: - if self._drawing != p["pendown"]: - newLine = True - if "pencolor" in p: - if isinstance(p["pencolor"], tuple): - p["pencolor"] = self._colorstr((p["pencolor"],)) - if self._pencolor != p["pencolor"]: - newLine = True - if "pensize" in p: - if self._pensize != p["pensize"]: - newLine = True - if newLine: - self._newLine() - if "pendown" in p: - self._drawing = p["pendown"] - if "pencolor" in p: - self._pencolor = p["pencolor"] - if "pensize" in p: - self._pensize = p["pensize"] - if "fillcolor" in p: - if isinstance(p["fillcolor"], tuple): - p["fillcolor"] = self._colorstr((p["fillcolor"],)) - self._fillcolor = p["fillcolor"] - if "speed" in p: - self._speed = p["speed"] - if "resizemode" in p: - self._resizemode = p["resizemode"] - if "stretchfactor" in p: - sf = p["stretchfactor"] - if isinstance(sf, (int, float)): - sf = (sf, sf) - self._stretchfactor = sf - if "shearfactor" in p: - self._shearfactor = p["shearfactor"] - if "outline" in p: - self._outlinewidth = p["outline"] - if "shown" in p: - self._shown = p["shown"] - if "tilt" in p: - self._tilt = p["tilt"] - if "stretchfactor" in p or "tilt" in p or "shearfactor" in p: - scx, scy = self._stretchfactor - shf = self._shearfactor - sa, ca = math.sin(self._tilt), math.cos(self._tilt) - self._shapetrafo = ( scx*ca, scy*(shf*ca + sa), - -scx*sa, scy*(ca - shf*sa)) - self._update() - -## three dummy methods to be implemented by child class: - - def _newLine(self, usePos = True): - """dummy method - to be overwritten by child class""" - def _update(self, count=True, forced=False): - """dummy method - to be overwritten by child class""" - def _color(self, args): - """dummy method - to be overwritten by child class""" - def _colorstr(self, args): - """dummy method - to be overwritten by child class""" - - width = pensize - up = penup - pu = penup - pd = pendown - down = pendown - st = showturtle - ht = hideturtle - - -class _TurtleImage(object): - """Helper class: Datatype to store Turtle attributes - """ - - def __init__(self, screen, shapeIndex): - self.screen = screen - self._type = None - self._setshape(shapeIndex) - - def _setshape(self, shapeIndex): - screen = self.screen - self.shapeIndex = shapeIndex - if self._type == "polygon" == screen._shapes[shapeIndex]._type: - return - if self._type == "image" == screen._shapes[shapeIndex]._type: - return - if self._type in ["image", "polygon"]: - screen._delete(self._item) - elif self._type == "compound": - for item in self._item: - screen._delete(item) - self._type = screen._shapes[shapeIndex]._type - if self._type == "polygon": - self._item = screen._createpoly() - elif self._type == "image": - self._item = screen._createimage(screen._shapes["blank"]._data) - elif self._type == "compound": - self._item = [screen._createpoly() for item in - screen._shapes[shapeIndex]._data] - - -class RawTurtle(TPen, TNavigator): - """Animation part of the RawTurtle. - Puts RawTurtle upon a TurtleScreen and provides tools for - its animation. - """ - screens = [] - - def __init__(self, canvas=None, - shape=_CFG["shape"], - undobuffersize=_CFG["undobuffersize"], - visible=_CFG["visible"]): - if isinstance(canvas, _Screen): - self.screen = canvas - elif isinstance(canvas, TurtleScreen): - if canvas not in RawTurtle.screens: - RawTurtle.screens.append(canvas) - self.screen = canvas - elif isinstance(canvas, (ScrolledCanvas, Canvas)): - for screen in RawTurtle.screens: - if screen.cv == canvas: - self.screen = screen - break - else: - self.screen = TurtleScreen(canvas) - RawTurtle.screens.append(self.screen) - else: - raise TurtleGraphicsError("bad canvas argument %s" % canvas) - - screen = self.screen - TNavigator.__init__(self, screen.mode()) - TPen.__init__(self) - screen._turtles.append(self) - self.drawingLineItem = screen._createline() - self.turtle = _TurtleImage(screen, shape) - self._poly = None - self._creatingPoly = False - self._fillitem = self._fillpath = None - self._shown = visible - self._hidden_from_screen = False - self.currentLineItem = screen._createline() - self.currentLine = [self._position] - self.items = [self.currentLineItem] - self.stampItems = [] - self._undobuffersize = undobuffersize - self.undobuffer = Tbuffer(undobuffersize) - self._update() - - def reset(self): - """Delete the turtle's drawings and restore its default values. - - No argument. - - Delete the turtle's drawings from the screen, re-center the turtle - and set variables to the default values. - - Example (for a Turtle instance named turtle): - >>> turtle.position() - (0.00,-22.00) - >>> turtle.heading() - 100.0 - >>> turtle.reset() - >>> turtle.position() - (0.00,0.00) - >>> turtle.heading() - 0.0 - """ - TNavigator.reset(self) - TPen._reset(self) - self._clear() - self._drawturtle() - self._update() - - def setundobuffer(self, size): - """Set or disable undobuffer. - - Argument: - size -- an integer or None - - If size is an integer an empty undobuffer of given size is installed. - Size gives the maximum number of turtle-actions that can be undone - by the undo() function. - If size is None, no undobuffer is present. - - Example (for a Turtle instance named turtle): - >>> turtle.setundobuffer(42) - """ - if size is None or size <= 0: - self.undobuffer = None - else: - self.undobuffer = Tbuffer(size) - - def undobufferentries(self): - """Return count of entries in the undobuffer. - - No argument. - - Example (for a Turtle instance named turtle): - >>> while undobufferentries(): - ... undo() - """ - if self.undobuffer is None: - return 0 - return self.undobuffer.nr_of_items() - - def _clear(self): - """Delete all of pen's drawings""" - self._fillitem = self._fillpath = None - for item in self.items: - self.screen._delete(item) - self.currentLineItem = self.screen._createline() - self.currentLine = [] - if self._drawing: - self.currentLine.append(self._position) - self.items = [self.currentLineItem] - self.clearstamps() - self.setundobuffer(self._undobuffersize) - - - def clear(self): - """Delete the turtle's drawings from the screen. Do not move turtle. - - No arguments. - - Delete the turtle's drawings from the screen. Do not move turtle. - State and position of the turtle as well as drawings of other - turtles are not affected. - - Examples (for a Turtle instance named turtle): - >>> turtle.clear() - """ - self._clear() - self._update() - - def _update_data(self): - self.screen._incrementudc() - if self.screen._updatecounter != 0: - return - if len(self.currentLine)>1: - self.screen._drawline(self.currentLineItem, self.currentLine, - self._pencolor, self._pensize) - - def _update(self): - """Perform a Turtle-data update. - """ - screen = self.screen - if screen._tracing == 0: - return - elif screen._tracing == 1: - self._update_data() - self._drawturtle() - screen._update() # TurtleScreenBase - screen._delay(screen._delayvalue) # TurtleScreenBase - else: - self._update_data() - if screen._updatecounter == 0: - for t in screen.turtles(): - t._drawturtle() - screen._update() - - def _tracer(self, flag=None, delay=None): - """Turns turtle animation on/off and set delay for update drawings. - - Optional arguments: - n -- nonnegative integer - delay -- nonnegative integer - - If n is given, only each n-th regular screen update is really performed. - (Can be used to accelerate the drawing of complex graphics.) - Second arguments sets delay value (see RawTurtle.delay()) - - Example (for a Turtle instance named turtle): - >>> turtle.tracer(8, 25) - >>> dist = 2 - >>> for i in range(200): - ... turtle.fd(dist) - ... turtle.rt(90) - ... dist += 2 - """ - return self.screen.tracer(flag, delay) - - def _color(self, args): - return self.screen._color(args) - - def _colorstr(self, args): - return self.screen._colorstr(args) - - def _cc(self, args): - """Convert colortriples to hexstrings. - """ - if isinstance(args, str): - return args - try: - r, g, b = args - except (TypeError, ValueError): - raise TurtleGraphicsError("bad color arguments: %s" % str(args)) - if self.screen._colormode == 1.0: - r, g, b = [round(255.0*x) for x in (r, g, b)] - if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)): - raise TurtleGraphicsError("bad color sequence: %s" % str(args)) - return "#%02x%02x%02x" % (r, g, b) - - def teleport(self, x=None, y=None, *, fill_gap: bool = False) -> None: - """Instantly move turtle to an absolute position. - - Arguments: - x -- a number or None - y -- a number None - fill_gap -- a boolean This argument must be specified by name. - - call: teleport(x, y) # two coordinates - --or: teleport(x) # teleport to x position, keeping y as is - --or: teleport(y=y) # teleport to y position, keeping x as is - --or: teleport(x, y, fill_gap=True) - # teleport but fill the gap in between - - Move turtle to an absolute position. Unlike goto(x, y), a line will not - be drawn. The turtle's orientation does not change. If currently - filling, the polygon(s) teleported from will be filled after leaving, - and filling will begin again after teleporting. This can be disabled - with fill_gap=True, which makes the imaginary line traveled during - teleporting act as a fill barrier like in goto(x, y). - - Example (for a Turtle instance named turtle): - >>> tp = turtle.pos() - >>> tp - (0.00,0.00) - >>> turtle.teleport(60) - >>> turtle.pos() - (60.00,0.00) - >>> turtle.teleport(y=10) - >>> turtle.pos() - (60.00,10.00) - >>> turtle.teleport(20, 30) - >>> turtle.pos() - (20.00,30.00) - """ - pendown = self.isdown() - was_filling = self.filling() - if pendown: - self.pen(pendown=False) - if was_filling and not fill_gap: - self.end_fill() - new_x = x if x is not None else self._position[0] - new_y = y if y is not None else self._position[1] - self._position = Vec2D(new_x, new_y) - self.pen(pendown=pendown) - if was_filling and not fill_gap: - self.begin_fill() - - def clone(self): - """Create and return a clone of the turtle. - - No argument. - - Create and return a clone of the turtle with same position, heading - and turtle properties. - - Example (for a Turtle instance named mick): - mick = Turtle() - joe = mick.clone() - """ - screen = self.screen - self._newLine(self._drawing) - - turtle = self.turtle - self.screen = None - self.turtle = None # too make self deepcopy-able - - q = deepcopy(self) - - self.screen = screen - self.turtle = turtle - - q.screen = screen - q.turtle = _TurtleImage(screen, self.turtle.shapeIndex) - - screen._turtles.append(q) - ttype = screen._shapes[self.turtle.shapeIndex]._type - if ttype == "polygon": - q.turtle._item = screen._createpoly() - elif ttype == "image": - q.turtle._item = screen._createimage(screen._shapes["blank"]._data) - elif ttype == "compound": - q.turtle._item = [screen._createpoly() for item in - screen._shapes[self.turtle.shapeIndex]._data] - q.currentLineItem = screen._createline() - q._update() - return q - - def shape(self, name=None): - """Set turtle shape to shape with given name / return current shapename. - - Optional argument: - name -- a string, which is a valid shapename - - Set turtle shape to shape with given name or, if name is not given, - return name of current shape. - Shape with name must exist in the TurtleScreen's shape dictionary. - Initially there are the following polygon shapes: - 'arrow', 'turtle', 'circle', 'square', 'triangle', 'classic'. - To learn about how to deal with shapes see Screen-method register_shape. - - Example (for a Turtle instance named turtle): - >>> turtle.shape() - 'arrow' - >>> turtle.shape("turtle") - >>> turtle.shape() - 'turtle' - """ - if name is None: - return self.turtle.shapeIndex - if not name in self.screen.getshapes(): - raise TurtleGraphicsError("There is no shape named %s" % name) - self.turtle._setshape(name) - self._update() - - def shapesize(self, stretch_wid=None, stretch_len=None, outline=None): - """Set/return turtle's stretchfactors/outline. Set resizemode to "user". - - Optional arguments: - stretch_wid : positive number - stretch_len : positive number - outline : positive number - - Return or set the pen's attributes x/y-stretchfactors and/or outline. - Set resizemode to "user". - If and only if resizemode is set to "user", the turtle will be displayed - stretched according to its stretchfactors: - stretch_wid is stretchfactor perpendicular to orientation - stretch_len is stretchfactor in direction of turtles orientation. - outline determines the width of the shapes's outline. - - Examples (for a Turtle instance named turtle): - >>> turtle.resizemode("user") - >>> turtle.shapesize(5, 5, 12) - >>> turtle.shapesize(outline=8) - """ - if stretch_wid is stretch_len is outline is None: - stretch_wid, stretch_len = self._stretchfactor - return stretch_wid, stretch_len, self._outlinewidth - if stretch_wid == 0 or stretch_len == 0: - raise TurtleGraphicsError("stretch_wid/stretch_len must not be zero") - if stretch_wid is not None: - if stretch_len is None: - stretchfactor = stretch_wid, stretch_wid - else: - stretchfactor = stretch_wid, stretch_len - elif stretch_len is not None: - stretchfactor = self._stretchfactor[0], stretch_len - else: - stretchfactor = self._stretchfactor - if outline is None: - outline = self._outlinewidth - self.pen(resizemode="user", - stretchfactor=stretchfactor, outline=outline) - - def shearfactor(self, shear=None): - """Set or return the current shearfactor. - - Optional argument: shear -- number, tangent of the shear angle - - Shear the turtleshape according to the given shearfactor shear, - which is the tangent of the shear angle. DO NOT change the - turtle's heading (direction of movement). - If shear is not given: return the current shearfactor, i. e. the - tangent of the shear angle, by which lines parallel to the - heading of the turtle are sheared. - - Examples (for a Turtle instance named turtle): - >>> turtle.shape("circle") - >>> turtle.shapesize(5,2) - >>> turtle.shearfactor(0.5) - >>> turtle.shearfactor() - >>> 0.5 - """ - if shear is None: - return self._shearfactor - self.pen(resizemode="user", shearfactor=shear) - - def tiltangle(self, angle=None): - """Set or return the current tilt-angle. - - Optional argument: angle -- number - - Rotate the turtleshape to point in the direction specified by angle, - regardless of its current tilt-angle. DO NOT change the turtle's - heading (direction of movement). - If angle is not given: return the current tilt-angle, i. e. the angle - between the orientation of the turtleshape and the heading of the - turtle (its direction of movement). - - Examples (for a Turtle instance named turtle): - >>> turtle.shape("circle") - >>> turtle.shapesize(5, 2) - >>> turtle.tiltangle() - 0.0 - >>> turtle.tiltangle(45) - >>> turtle.tiltangle() - 45.0 - >>> turtle.stamp() - >>> turtle.fd(50) - >>> turtle.tiltangle(-45) - >>> turtle.tiltangle() - 315.0 - >>> turtle.stamp() - >>> turtle.fd(50) - """ - if angle is None: - tilt = -math.degrees(self._tilt) * self._angleOrient - return (tilt / self._degreesPerAU) % self._fullcircle - else: - tilt = -angle * self._degreesPerAU * self._angleOrient - tilt = math.radians(tilt) % math.tau - self.pen(resizemode="user", tilt=tilt) - - def tilt(self, angle): - """Rotate the turtleshape by angle. - - Argument: - angle - a number - - Rotate the turtleshape by angle from its current tilt-angle, - but do NOT change the turtle's heading (direction of movement). - - Examples (for a Turtle instance named turtle): - >>> turtle.shape("circle") - >>> turtle.shapesize(5,2) - >>> turtle.tilt(30) - >>> turtle.fd(50) - >>> turtle.tilt(30) - >>> turtle.fd(50) - """ - self.tiltangle(angle + self.tiltangle()) - - def shapetransform(self, t11=None, t12=None, t21=None, t22=None): - """Set or return the current transformation matrix of the turtle shape. - - Optional arguments: t11, t12, t21, t22 -- numbers. - - If none of the matrix elements are given, return the transformation - matrix. - Otherwise set the given elements and transform the turtleshape - according to the matrix consisting of first row t11, t12 and - second row t21, 22. - Modify stretchfactor, shearfactor and tiltangle according to the - given matrix. - - Examples (for a Turtle instance named turtle): - >>> turtle.shape("square") - >>> turtle.shapesize(4,2) - >>> turtle.shearfactor(-0.5) - >>> turtle.shapetransform() - (4.0, -1.0, -0.0, 2.0) - """ - if t11 is t12 is t21 is t22 is None: - return self._shapetrafo - m11, m12, m21, m22 = self._shapetrafo - if t11 is not None: m11 = t11 - if t12 is not None: m12 = t12 - if t21 is not None: m21 = t21 - if t22 is not None: m22 = t22 - if t11 * t22 - t12 * t21 == 0: - raise TurtleGraphicsError("Bad shape transform matrix: must not be singular") - self._shapetrafo = (m11, m12, m21, m22) - alfa = math.atan2(-m21, m11) % math.tau - sa, ca = math.sin(alfa), math.cos(alfa) - a11, a12, a21, a22 = (ca*m11 - sa*m21, ca*m12 - sa*m22, - sa*m11 + ca*m21, sa*m12 + ca*m22) - self._stretchfactor = a11, a22 - self._shearfactor = a12/a22 - self._tilt = alfa - self.pen(resizemode="user") - - - def _polytrafo(self, poly): - """Computes transformed polygon shapes from a shape - according to current position and heading. - """ - screen = self.screen - p0, p1 = self._position - e0, e1 = self._orient - e = Vec2D(e0, e1 * screen.yscale / screen.xscale) - e0, e1 = (1.0 / abs(e)) * e - return [(p0+(e1*x+e0*y)/screen.xscale, p1+(-e0*x+e1*y)/screen.yscale) - for (x, y) in poly] - - def get_shapepoly(self): - """Return the current shape polygon as tuple of coordinate pairs. - - No argument. - - Examples (for a Turtle instance named turtle): - >>> turtle.shape("square") - >>> turtle.shapetransform(4, -1, 0, 2) - >>> turtle.get_shapepoly() - ((50, -20), (30, 20), (-50, 20), (-30, -20)) - - """ - shape = self.screen._shapes[self.turtle.shapeIndex] - if shape._type == "polygon": - return self._getshapepoly(shape._data, shape._type == "compound") - # else return None - - def _getshapepoly(self, polygon, compound=False): - """Calculate transformed shape polygon according to resizemode - and shapetransform. - """ - if self._resizemode == "user" or compound: - t11, t12, t21, t22 = self._shapetrafo - elif self._resizemode == "auto": - l = max(1, self._pensize/5.0) - t11, t12, t21, t22 = l, 0, 0, l - elif self._resizemode == "noresize": - return polygon - return tuple((t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon) - - def _drawturtle(self): - """Manages the correct rendering of the turtle with respect to - its shape, resizemode, stretch and tilt etc.""" - screen = self.screen - shape = screen._shapes[self.turtle.shapeIndex] - ttype = shape._type - titem = self.turtle._item - if self._shown and screen._updatecounter == 0 and screen._tracing > 0: - self._hidden_from_screen = False - tshape = shape._data - if ttype == "polygon": - if self._resizemode == "noresize": w = 1 - elif self._resizemode == "auto": w = self._pensize - else: w =self._outlinewidth - shape = self._polytrafo(self._getshapepoly(tshape)) - fc, oc = self._fillcolor, self._pencolor - screen._drawpoly(titem, shape, fill=fc, outline=oc, - width=w, top=True) - elif ttype == "image": - screen._drawimage(titem, self._position, tshape) - elif ttype == "compound": - for item, (poly, fc, oc) in zip(titem, tshape): - poly = self._polytrafo(self._getshapepoly(poly, True)) - screen._drawpoly(item, poly, fill=self._cc(fc), - outline=self._cc(oc), width=self._outlinewidth, top=True) - else: - if self._hidden_from_screen: - return - if ttype == "polygon": - screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), "", "") - elif ttype == "image": - screen._drawimage(titem, self._position, - screen._shapes["blank"]._data) - elif ttype == "compound": - for item in titem: - screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), "", "") - self._hidden_from_screen = True - -############################## stamp stuff ############################### - - def stamp(self): - """Stamp a copy of the turtleshape onto the canvas and return its id. - - No argument. - - Stamp a copy of the turtle shape onto the canvas at the current - turtle position. Return a stamp_id for that stamp, which can be - used to delete it by calling clearstamp(stamp_id). - - Example (for a Turtle instance named turtle): - >>> turtle.color("blue") - >>> turtle.stamp() - 13 - >>> turtle.fd(50) - """ - screen = self.screen - shape = screen._shapes[self.turtle.shapeIndex] - ttype = shape._type - tshape = shape._data - if ttype == "polygon": - stitem = screen._createpoly() - if self._resizemode == "noresize": w = 1 - elif self._resizemode == "auto": w = self._pensize - else: w =self._outlinewidth - shape = self._polytrafo(self._getshapepoly(tshape)) - fc, oc = self._fillcolor, self._pencolor - screen._drawpoly(stitem, shape, fill=fc, outline=oc, - width=w, top=True) - elif ttype == "image": - stitem = screen._createimage("") - screen._drawimage(stitem, self._position, tshape) - elif ttype == "compound": - stitem = [] - for element in tshape: - item = screen._createpoly() - stitem.append(item) - stitem = tuple(stitem) - for item, (poly, fc, oc) in zip(stitem, tshape): - poly = self._polytrafo(self._getshapepoly(poly, True)) - screen._drawpoly(item, poly, fill=self._cc(fc), - outline=self._cc(oc), width=self._outlinewidth, top=True) - self.stampItems.append(stitem) - self.undobuffer.push(("stamp", stitem)) - return stitem - - def _clearstamp(self, stampid): - """does the work for clearstamp() and clearstamps() - """ - if stampid in self.stampItems: - if isinstance(stampid, tuple): - for subitem in stampid: - self.screen._delete(subitem) - else: - self.screen._delete(stampid) - self.stampItems.remove(stampid) - # Delete stampitem from undobuffer if necessary - # if clearstamp is called directly. - item = ("stamp", stampid) - buf = self.undobuffer - if item not in buf.buffer: - return - index = buf.buffer.index(item) - buf.buffer.remove(item) - if index <= buf.ptr: - buf.ptr = (buf.ptr - 1) % buf.bufsize - buf.buffer.insert((buf.ptr+1)%buf.bufsize, [None]) - - def clearstamp(self, stampid): - """Delete stamp with given stampid - - Argument: - stampid - an integer, must be return value of previous stamp() call. - - Example (for a Turtle instance named turtle): - >>> turtle.color("blue") - >>> astamp = turtle.stamp() - >>> turtle.fd(50) - >>> turtle.clearstamp(astamp) - """ - self._clearstamp(stampid) - self._update() - - def clearstamps(self, n=None): - """Delete all or first/last n of turtle's stamps. - - Optional argument: - n -- an integer - - If n is None, delete all of pen's stamps, - else if n > 0 delete first n stamps - else if n < 0 delete last n stamps. - - Example (for a Turtle instance named turtle): - >>> for i in range(8): - ... turtle.stamp(); turtle.fd(30) - ... - >>> turtle.clearstamps(2) - >>> turtle.clearstamps(-2) - >>> turtle.clearstamps() - """ - if n is None: - toDelete = self.stampItems[:] - elif n >= 0: - toDelete = self.stampItems[:n] - else: - toDelete = self.stampItems[n:] - for item in toDelete: - self._clearstamp(item) - self._update() - - def _goto(self, end): - """Move the pen to the point end, thereby drawing a line - if pen is down. All other methods for turtle movement depend - on this one. - """ - ## Version with undo-stuff - go_modes = ( self._drawing, - self._pencolor, - self._pensize, - isinstance(self._fillpath, list)) - screen = self.screen - undo_entry = ("go", self._position, end, go_modes, - (self.currentLineItem, - self.currentLine[:], - screen._pointlist(self.currentLineItem), - self.items[:]) - ) - if self.undobuffer: - self.undobuffer.push(undo_entry) - start = self._position - if self._speed and screen._tracing == 1: - diff = (end-start) - diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2 - nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed)) - delta = diff * (1.0/nhops) - for n in range(1, nhops): - if n == 1: - top = True - else: - top = False - self._position = start + delta * n - if self._drawing: - screen._drawline(self.drawingLineItem, - (start, self._position), - self._pencolor, self._pensize, top) - self._update() - if self._drawing: - screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)), - fill="", width=self._pensize) - # Turtle now at end, - if self._drawing: # now update currentLine - self.currentLine.append(end) - if isinstance(self._fillpath, list): - self._fillpath.append(end) - ###### vererbung!!!!!!!!!!!!!!!!!!!!!! - self._position = end - if self._creatingPoly: - self._poly.append(end) - if len(self.currentLine) > 42: # 42! answer to the ultimate question - # of life, the universe and everything - self._newLine() - self._update() #count=True) - - def _undogoto(self, entry): - """Reverse a _goto. Used for undo() - """ - old, new, go_modes, coodata = entry - drawing, pc, ps, filling = go_modes - cLI, cL, pl, items = coodata - screen = self.screen - if abs(self._position - new) > 0.5: - print ("undogoto: HALLO-DA-STIMMT-WAS-NICHT!") - # restore former situation - self.currentLineItem = cLI - self.currentLine = cL - - if pl == [(0, 0), (0, 0)]: - usepc = "" - else: - usepc = pc - screen._drawline(cLI, pl, fill=usepc, width=ps) - - todelete = [i for i in self.items if (i not in items) and - (screen._type(i) == "line")] - for i in todelete: - screen._delete(i) - self.items.remove(i) - - start = old - if self._speed and screen._tracing == 1: - diff = old - new - diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2 - nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed)) - delta = diff * (1.0/nhops) - for n in range(1, nhops): - if n == 1: - top = True - else: - top = False - self._position = new + delta * n - if drawing: - screen._drawline(self.drawingLineItem, - (start, self._position), - pc, ps, top) - self._update() - if drawing: - screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)), - fill="", width=ps) - # Turtle now at position old, - self._position = old - ## if undo is done during creating a polygon, the last vertex - ## will be deleted. if the polygon is entirely deleted, - ## creatingPoly will be set to False. - ## Polygons created before the last one will not be affected by undo() - if self._creatingPoly: - if len(self._poly) > 0: - self._poly.pop() - if self._poly == []: - self._creatingPoly = False - self._poly = None - if filling: - if self._fillpath == []: - self._fillpath = None - print("Unwahrscheinlich in _undogoto!") - elif self._fillpath is not None: - self._fillpath.pop() - self._update() #count=True) - - def _rotate(self, angle): - """Turns pen clockwise by angle. - """ - if self.undobuffer: - self.undobuffer.push(("rot", angle, self._degreesPerAU)) - angle *= self._degreesPerAU - neworient = self._orient.rotate(angle) - tracing = self.screen._tracing - if tracing == 1 and self._speed > 0: - anglevel = 3.0 * self._speed - steps = 1 + int(abs(angle)/anglevel) - delta = 1.0*angle/steps - for _ in range(steps): - self._orient = self._orient.rotate(delta) - self._update() - self._orient = neworient - self._update() - - def _newLine(self, usePos=True): - """Closes current line item and starts a new one. - Remark: if current line became too long, animation - performance (via _drawline) slowed down considerably. - """ - if len(self.currentLine) > 1: - self.screen._drawline(self.currentLineItem, self.currentLine, - self._pencolor, self._pensize) - self.currentLineItem = self.screen._createline() - self.items.append(self.currentLineItem) - else: - self.screen._drawline(self.currentLineItem, top=True) - self.currentLine = [] - if usePos: - self.currentLine = [self._position] - - def filling(self): - """Return fillstate (True if filling, False else). - - No argument. - - Example (for a Turtle instance named turtle): - >>> turtle.begin_fill() - >>> if turtle.filling(): - ... turtle.pensize(5) - ... else: - ... turtle.pensize(3) - """ - return isinstance(self._fillpath, list) - - def begin_fill(self): - """Called just before drawing a shape to be filled. - - No argument. - - Example (for a Turtle instance named turtle): - >>> turtle.color("black", "red") - >>> turtle.begin_fill() - >>> turtle.circle(60) - >>> turtle.end_fill() - """ - if not self.filling(): - self._fillitem = self.screen._createpoly() - self.items.append(self._fillitem) - self._fillpath = [self._position] - self._newLine() - if self.undobuffer: - self.undobuffer.push(("beginfill", self._fillitem)) - self._update() - - - def end_fill(self): - """Fill the shape drawn after the call begin_fill(). - - No argument. - - Example (for a Turtle instance named turtle): - >>> turtle.color("black", "red") - >>> turtle.begin_fill() - >>> turtle.circle(60) - >>> turtle.end_fill() - """ - if self.filling(): - if len(self._fillpath) > 2: - self.screen._drawpoly(self._fillitem, self._fillpath, - fill=self._fillcolor) - if self.undobuffer: - self.undobuffer.push(("dofill", self._fillitem)) - self._fillitem = self._fillpath = None - self._update() - - def dot(self, size=None, *color): - """Draw a dot with diameter size, using color. - - Optional arguments: - size -- an integer >= 1 (if given) - color -- a colorstring or a numeric color tuple - - Draw a circular dot with diameter size, using color. - If size is not given, the maximum of pensize+4 and 2*pensize is used. - - Example (for a Turtle instance named turtle): - >>> turtle.dot() - >>> turtle.fd(50); turtle.dot(20, "blue"); turtle.fd(50) - """ - if not color: - if isinstance(size, (str, tuple)): - color = self._colorstr(size) - size = self._pensize + max(self._pensize, 4) - else: - color = self._pencolor - if not size: - size = self._pensize + max(self._pensize, 4) - else: - if size is None: - size = self._pensize + max(self._pensize, 4) - color = self._colorstr(color) - # If screen were to gain a dot function, see GH #104218. - pen = self.pen() - if self.undobuffer: - self.undobuffer.push(["seq"]) - self.undobuffer.cumulate = True - try: - if self.resizemode() == 'auto': - self.ht() - self.pendown() - self.pensize(size) - self.pencolor(color) - self.forward(0) - finally: - self.pen(pen) - if self.undobuffer: - self.undobuffer.cumulate = False - - def _write(self, txt, align, font): - """Performs the writing for write() - """ - item, end = self.screen._write(self._position, txt, align, font, - self._pencolor) - self._update() - self.items.append(item) - if self.undobuffer: - self.undobuffer.push(("wri", item)) - return end - - def write(self, arg, move=False, align="left", font=("Arial", 8, "normal")): - """Write text at the current turtle position. - - Arguments: - arg -- info, which is to be written to the TurtleScreen - move (optional) -- True/False - align (optional) -- one of the strings "left", "center" or right" - font (optional) -- a triple (fontname, fontsize, fonttype) - - Write text - the string representation of arg - at the current - turtle position according to align ("left", "center" or right") - and with the given font. - If move is True, the pen is moved to the bottom-right corner - of the text. By default, move is False. - - Example (for a Turtle instance named turtle): - >>> turtle.write('Home = ', True, align="center") - >>> turtle.write((0,0), True) - """ - if self.undobuffer: - self.undobuffer.push(["seq"]) - self.undobuffer.cumulate = True - end = self._write(str(arg), align.lower(), font) - if move: - x, y = self.pos() - self.setpos(end, y) - if self.undobuffer: - self.undobuffer.cumulate = False - - def begin_poly(self): - """Start recording the vertices of a polygon. - - No argument. - - Start recording the vertices of a polygon. Current turtle position - is first point of polygon. - - Example (for a Turtle instance named turtle): - >>> turtle.begin_poly() - """ - self._poly = [self._position] - self._creatingPoly = True - - def end_poly(self): - """Stop recording the vertices of a polygon. - - No argument. - - Stop recording the vertices of a polygon. Current turtle position is - last point of polygon. This will be connected with the first point. - - Example (for a Turtle instance named turtle): - >>> turtle.end_poly() - """ - self._creatingPoly = False - - def get_poly(self): - """Return the lastly recorded polygon. - - No argument. - - Example (for a Turtle instance named turtle): - >>> p = turtle.get_poly() - >>> turtle.register_shape("myFavouriteShape", p) - """ - ## check if there is any poly? - if self._poly is not None: - return tuple(self._poly) - - def getscreen(self): - """Return the TurtleScreen object, the turtle is drawing on. - - No argument. - - Return the TurtleScreen object, the turtle is drawing on. - So TurtleScreen-methods can be called for that object. - - Example (for a Turtle instance named turtle): - >>> ts = turtle.getscreen() - >>> ts - - >>> ts.bgcolor("pink") - """ - return self.screen - - def getturtle(self): - """Return the Turtleobject itself. - - No argument. - - Only reasonable use: as a function to return the 'anonymous turtle': - - Example: - >>> pet = getturtle() - >>> pet.fd(50) - >>> pet - - >>> turtles() - [] - """ - return self - - getpen = getturtle - - - ################################################################ - ### screen oriented methods recurring to methods of TurtleScreen - ################################################################ - - def _delay(self, delay=None): - """Set delay value which determines speed of turtle animation. - """ - return self.screen.delay(delay) - - def onclick(self, fun, btn=1, add=None): - """Bind fun to mouse-click event on this turtle on canvas. - - Arguments: - fun -- a function with two arguments, to which will be assigned - the coordinates of the clicked point on the canvas. - btn -- number of the mouse-button defaults to 1 (left mouse button). - add -- True or False. If True, new binding will be added, otherwise - it will replace a former binding. - - Example for the anonymous turtle, i. e. the procedural way: - - >>> def turn(x, y): - ... left(360) - ... - >>> onclick(turn) # Now clicking into the turtle will turn it. - >>> onclick(None) # event-binding will be removed - """ - self.screen._onclick(self.turtle._item, fun, btn, add) - self._update() - - def onrelease(self, fun, btn=1, add=None): - """Bind fun to mouse-button-release event on this turtle on canvas. - - Arguments: - fun -- a function with two arguments, to which will be assigned - the coordinates of the clicked point on the canvas. - btn -- number of the mouse-button defaults to 1 (left mouse button). - - Example (for a MyTurtle instance named joe): - >>> class MyTurtle(Turtle): - ... def glow(self,x,y): - ... self.fillcolor("red") - ... def unglow(self,x,y): - ... self.fillcolor("") - ... - >>> joe = MyTurtle() - >>> joe.onclick(joe.glow) - >>> joe.onrelease(joe.unglow) - - Clicking on joe turns fillcolor red, unclicking turns it to - transparent. - """ - self.screen._onrelease(self.turtle._item, fun, btn, add) - self._update() - - def ondrag(self, fun, btn=1, add=None): - """Bind fun to mouse-move event on this turtle on canvas. - - Arguments: - fun -- a function with two arguments, to which will be assigned - the coordinates of the clicked point on the canvas. - btn -- number of the mouse-button defaults to 1 (left mouse button). - - Every sequence of mouse-move-events on a turtle is preceded by a - mouse-click event on that turtle. - - Example (for a Turtle instance named turtle): - >>> turtle.ondrag(turtle.goto) - - Subsequently clicking and dragging a Turtle will move it - across the screen thereby producing handdrawings (if pen is - down). - """ - self.screen._ondrag(self.turtle._item, fun, btn, add) - - - def _undo(self, action, data): - """Does the main part of the work for undo() - """ - if self.undobuffer is None: - return - if action == "rot": - angle, degPAU = data - self._rotate(-angle*degPAU/self._degreesPerAU) - dummy = self.undobuffer.pop() - elif action == "stamp": - stitem = data[0] - self.clearstamp(stitem) - elif action == "go": - self._undogoto(data) - elif action in ["wri", "dot"]: - item = data[0] - self.screen._delete(item) - self.items.remove(item) - elif action == "dofill": - item = data[0] - self.screen._drawpoly(item, ((0, 0),(0, 0),(0, 0)), - fill="", outline="") - elif action == "beginfill": - item = data[0] - self._fillitem = self._fillpath = None - if item in self.items: - self.screen._delete(item) - self.items.remove(item) - elif action == "pen": - TPen.pen(self, data[0]) - self.undobuffer.pop() - - def undo(self): - """undo (repeatedly) the last turtle action. - - No argument. - - undo (repeatedly) the last turtle action. - Number of available undo actions is determined by the size of - the undobuffer. - - Example (for a Turtle instance named turtle): - >>> for i in range(4): - ... turtle.fd(50); turtle.lt(80) - ... - >>> for i in range(8): - ... turtle.undo() - ... - """ - if self.undobuffer is None: - return - item = self.undobuffer.pop() - action = item[0] - data = item[1:] - if action == "seq": - while data: - item = data.pop() - self._undo(item[0], item[1:]) - else: - self._undo(action, data) - - turtlesize = shapesize - -RawPen = RawTurtle - -### Screen - Singleton ######################## - -def Screen(): - """Return the singleton screen object. - If none exists at the moment, create a new one and return it, - else return the existing one.""" - if Turtle._screen is None: - Turtle._screen = _Screen() - return Turtle._screen - -class _Screen(TurtleScreen): - - _root = None - _canvas = None - _title = _CFG["title"] - - def __init__(self): - if _Screen._root is None: - _Screen._root = self._root = _Root() - self._root.title(_Screen._title) - self._root.ondestroy(self._destroy) - if _Screen._canvas is None: - width = _CFG["width"] - height = _CFG["height"] - canvwidth = _CFG["canvwidth"] - canvheight = _CFG["canvheight"] - leftright = _CFG["leftright"] - topbottom = _CFG["topbottom"] - self._root.setupcanvas(width, height, canvwidth, canvheight) - _Screen._canvas = self._root._getcanvas() - TurtleScreen.__init__(self, _Screen._canvas) - self.setup(width, height, leftright, topbottom) - - def setup(self, width=_CFG["width"], height=_CFG["height"], - startx=_CFG["leftright"], starty=_CFG["topbottom"]): - """ Set the size and position of the main window. - - Arguments: - width: as integer a size in pixels, as float a fraction of the screen. - Default is 50% of screen. - height: as integer the height in pixels, as float a fraction of the - screen. Default is 75% of screen. - startx: if positive, starting position in pixels from the left - edge of the screen, if negative from the right edge - Default, startx=None is to center window horizontally. - starty: if positive, starting position in pixels from the top - edge of the screen, if negative from the bottom edge - Default, starty=None is to center window vertically. - - Examples (for a Screen instance named screen): - >>> screen.setup (width=200, height=200, startx=0, starty=0) - - sets window to 200x200 pixels, in upper left of screen - - >>> screen.setup(width=.75, height=0.5, startx=None, starty=None) - - sets window to 75% of screen by 50% of screen and centers - """ - if not hasattr(self._root, "set_geometry"): - return - sw = self._root.win_width() - sh = self._root.win_height() - if isinstance(width, float) and 0 <= width <= 1: - width = sw*width - if startx is None: - startx = (sw - width) / 2 - if isinstance(height, float) and 0 <= height <= 1: - height = sh*height - if starty is None: - starty = (sh - height) / 2 - self._root.set_geometry(width, height, startx, starty) - self.update() - - def title(self, titlestring): - """Set title of turtle-window - - Argument: - titlestring -- a string, to appear in the titlebar of the - turtle graphics window. - - This is a method of Screen-class. Not available for TurtleScreen- - objects. - - Example (for a Screen instance named screen): - >>> screen.title("Welcome to the turtle-zoo!") - """ - if _Screen._root is not None: - _Screen._root.title(titlestring) - _Screen._title = titlestring - - def _destroy(self): - root = self._root - if root is _Screen._root: - Turtle._pen = None - Turtle._screen = None - _Screen._root = None - _Screen._canvas = None - TurtleScreen._RUNNING = False - root.destroy() - - def bye(self): - """Shut the turtlegraphics window. - - Example (for a TurtleScreen instance named screen): - >>> screen.bye() - """ - self._destroy() - - def exitonclick(self): - """Go into mainloop until the mouse is clicked. - - No arguments. - - Bind bye() method to mouseclick on TurtleScreen. - If "using_IDLE" - value in configuration dictionary is False - (default value), enter mainloop. - If IDLE with -n switch (no subprocess) is used, this value should be - set to True in turtle.cfg. In this case IDLE's mainloop - is active also for the client script. - - This is a method of the Screen-class and not available for - TurtleScreen instances. - - Example (for a Screen instance named screen): - >>> screen.exitonclick() - - """ - def exitGracefully(x, y): - """Screen.bye() with two dummy-parameters""" - self.bye() - self.onclick(exitGracefully) - if _CFG["using_IDLE"]: - return - try: - mainloop() - except AttributeError: - exit(0) - -class Turtle(RawTurtle): - """RawTurtle auto-creating (scrolled) canvas. - - When a Turtle object is created or a function derived from some - Turtle method is called a TurtleScreen object is automatically created. - """ - _pen = None - _screen = None - - def __init__(self, - shape=_CFG["shape"], - undobuffersize=_CFG["undobuffersize"], - visible=_CFG["visible"]): - if Turtle._screen is None: - Turtle._screen = Screen() - RawTurtle.__init__(self, Turtle._screen, - shape=shape, - undobuffersize=undobuffersize, - visible=visible) - -Pen = Turtle - -def write_docstringdict(filename="turtle_docstringdict"): - """Create and write docstring-dictionary to file. - - Optional argument: - filename -- a string, used as filename - default value is turtle_docstringdict - - Has to be called explicitly, (not used by the turtle-graphics classes) - The docstring dictionary will be written to the Python script .py - It is intended to serve as a template for translation of the docstrings - into different languages. - """ - docsdict = {} - - for methodname in _tg_screen_functions: - key = "_Screen."+methodname - docsdict[key] = eval(key).__doc__ - for methodname in _tg_turtle_functions: - key = "Turtle."+methodname - docsdict[key] = eval(key).__doc__ - - with open("%s.py" % filename,"w") as f: - keys = sorted(x for x in docsdict - if x.split('.')[1] not in _alias_list) - f.write('docsdict = {\n\n') - for key in keys[:-1]: - f.write('%s :\n' % repr(key)) - f.write(' """%s\n""",\n\n' % docsdict[key]) - key = keys[-1] - f.write('%s :\n' % repr(key)) - f.write(' """%s\n"""\n\n' % docsdict[key]) - f.write("}\n") - f.close() - -def read_docstrings(lang): - """Read in docstrings from lang-specific docstring dictionary. - - Transfer docstrings, translated to lang, from a dictionary-file - to the methods of classes Screen and Turtle and - in revised form - - to the corresponding functions. - """ - modname = "turtle_docstringdict_%(language)s" % {'language':lang.lower()} - module = __import__(modname) - docsdict = module.docsdict - for key in docsdict: - try: -# eval(key).im_func.__doc__ = docsdict[key] - eval(key).__doc__ = docsdict[key] - except Exception: - print("Bad docstring-entry: %s" % key) - -_LANGUAGE = _CFG["language"] - -try: - if _LANGUAGE != "english": - read_docstrings(_LANGUAGE) -except ImportError: - print("Cannot find docsdict for", _LANGUAGE) -except Exception: - print ("Unknown Error when trying to import %s-docstring-dictionary" % - _LANGUAGE) - - -def getmethparlist(ob): - """Get strings describing the arguments for the given object - - Returns a pair of strings representing function parameter lists - including parenthesis. The first string is suitable for use in - function definition and the second is suitable for use in function - call. The "self" parameter is not included. - """ - orig_sig = inspect.signature(ob) - # bit of a hack for methods - turn it into a function - # but we drop the "self" param. - # Try and build one for Python defined functions - func_sig = orig_sig.replace( - parameters=list(orig_sig.parameters.values())[1:], - ) - - call_args = [] - for param in func_sig.parameters.values(): - match param.kind: - case ( - inspect.Parameter.POSITIONAL_ONLY - | inspect.Parameter.POSITIONAL_OR_KEYWORD - ): - call_args.append(param.name) - case inspect.Parameter.VAR_POSITIONAL: - call_args.append(f'*{param.name}') - case inspect.Parameter.KEYWORD_ONLY: - call_args.append(f'{param.name}={param.name}') - case inspect.Parameter.VAR_KEYWORD: - call_args.append(f'**{param.name}') - case _: - raise RuntimeError('Unsupported parameter kind', param.kind) - call_text = f'({', '.join(call_args)})' - - return str(func_sig), call_text - -def _turtle_docrevise(docstr): - """To reduce docstrings from RawTurtle class for functions - """ - import re - if docstr is None: - return None - turtlename = _CFG["exampleturtle"] - newdocstr = docstr.replace("%s." % turtlename,"") - parexp = re.compile(r' \(.+ %s\):' % turtlename) - newdocstr = parexp.sub(":", newdocstr) - return newdocstr - -def _screen_docrevise(docstr): - """To reduce docstrings from TurtleScreen class for functions - """ - import re - if docstr is None: - return None - screenname = _CFG["examplescreen"] - newdocstr = docstr.replace("%s." % screenname,"") - parexp = re.compile(r' \(.+ %s\):' % screenname) - newdocstr = parexp.sub(":", newdocstr) - return newdocstr - -## The following mechanism makes all methods of RawTurtle and Turtle available -## as functions. So we can enhance, change, add, delete methods to these -## classes and do not need to change anything here. - -__func_body = """\ -def {name}{paramslist}: - if {obj} is None: - if not TurtleScreen._RUNNING: - TurtleScreen._RUNNING = True - raise Terminator - {obj} = {init} - try: - return {obj}.{name}{argslist} - except TK.TclError: - if not TurtleScreen._RUNNING: - TurtleScreen._RUNNING = True - raise Terminator - raise -""" - -def _make_global_funcs(functions, cls, obj, init, docrevise): - for methodname in functions: - method = getattr(cls, methodname) - pl1, pl2 = getmethparlist(method) - if pl1 == "": - print(">>>>>>", pl1, pl2) - continue - defstr = __func_body.format(obj=obj, init=init, name=methodname, - paramslist=pl1, argslist=pl2) - exec(defstr, globals()) - globals()[methodname].__doc__ = docrevise(method.__doc__) - -_make_global_funcs(_tg_screen_functions, _Screen, - 'Turtle._screen', 'Screen()', _screen_docrevise) -_make_global_funcs(_tg_turtle_functions, Turtle, - 'Turtle._pen', 'Turtle()', _turtle_docrevise) - - -done = mainloop - -if __name__ == "__main__": - def switchpen(): - if isdown(): - pu() - else: - pd() - - def demo1(): - """Demo of old turtle.py - module""" - reset() - tracer(True) - up() - backward(100) - down() - # draw 3 squares; the last filled - width(3) - for i in range(3): - if i == 2: - begin_fill() - for _ in range(4): - forward(20) - left(90) - if i == 2: - color("maroon") - end_fill() - up() - forward(30) - down() - width(1) - color("black") - # move out of the way - tracer(False) - up() - right(90) - forward(100) - right(90) - forward(100) - right(180) - down() - # some text - write("startstart", 1) - write("start", 1) - color("red") - # staircase - for i in range(5): - forward(20) - left(90) - forward(20) - right(90) - # filled staircase - tracer(True) - begin_fill() - for i in range(5): - forward(20) - left(90) - forward(20) - right(90) - end_fill() - # more text - - def demo2(): - """Demo of some new features.""" - speed(1) - st() - pensize(3) - setheading(towards(0, 0)) - radius = distance(0, 0)/2.0 - rt(90) - for _ in range(18): - switchpen() - circle(radius, 10) - write("wait a moment...") - while undobufferentries(): - undo() - reset() - lt(90) - colormode(255) - laenge = 10 - pencolor("green") - pensize(3) - lt(180) - for i in range(-2, 16): - if i > 0: - begin_fill() - fillcolor(255-15*i, 0, 15*i) - for _ in range(3): - fd(laenge) - lt(120) - end_fill() - laenge += 10 - lt(15) - speed((speed()+1)%12) - #end_fill() - - lt(120) - pu() - fd(70) - rt(30) - pd() - color("red","yellow") - speed(0) - begin_fill() - for _ in range(4): - circle(50, 90) - rt(90) - fd(30) - rt(90) - end_fill() - lt(90) - pu() - fd(30) - pd() - shape("turtle") - - tri = getturtle() - tri.resizemode("auto") - turtle = Turtle() - turtle.resizemode("auto") - turtle.shape("turtle") - turtle.reset() - turtle.left(90) - turtle.speed(0) - turtle.up() - turtle.goto(280, 40) - turtle.lt(30) - turtle.down() - turtle.speed(6) - turtle.color("blue","orange") - turtle.pensize(2) - tri.speed(6) - setheading(towards(turtle)) - count = 1 - while tri.distance(turtle) > 4: - turtle.fd(3.5) - turtle.lt(0.6) - tri.setheading(tri.towards(turtle)) - tri.fd(4) - if count % 20 == 0: - turtle.stamp() - tri.stamp() - switchpen() - count += 1 - tri.write("CAUGHT! ", font=("Arial", 16, "bold"), align="right") - tri.pencolor("black") - tri.pencolor("red") - - def baba(xdummy, ydummy): - clearscreen() - bye() - - time.sleep(2) - - while undobufferentries(): - tri.undo() - turtle.undo() - tri.fd(50) - tri.write(" Click me!", font = ("Courier", 12, "bold") ) - tri.onclick(baba, 1) - - demo1() - demo2() - exitonclick() diff --git a/Python313_13_x86_Template/Lib/types.py b/Python313_13_x86_Template/Lib/types.py deleted file mode 100644 index ff474c14..00000000 --- a/Python313_13_x86_Template/Lib/types.py +++ /dev/null @@ -1,345 +0,0 @@ -""" -Define names for built-in types that aren't directly accessible as a builtin. -""" - -import sys - -# Iterators in Python aren't a matter of type but of protocol. A large -# and changing number of builtin types implement *some* flavor of -# iterator. Don't check the type! Use hasattr to check for both -# "__iter__" and "__next__" attributes instead. - -def _f(): pass -FunctionType = type(_f) -LambdaType = type(lambda: None) # Same as FunctionType -CodeType = type(_f.__code__) -MappingProxyType = type(type.__dict__) -SimpleNamespace = type(sys.implementation) - -def _cell_factory(): - a = 1 - def f(): - nonlocal a - return f.__closure__[0] -CellType = type(_cell_factory()) - -def _g(): - yield 1 -GeneratorType = type(_g()) - -async def _c(): pass -_c = _c() -CoroutineType = type(_c) -_c.close() # Prevent ResourceWarning - -async def _ag(): - yield -_ag = _ag() -AsyncGeneratorType = type(_ag) - -class _C: - def _m(self): pass -MethodType = type(_C()._m) - -BuiltinFunctionType = type(len) -BuiltinMethodType = type([].append) # Same as BuiltinFunctionType - -WrapperDescriptorType = type(object.__init__) -MethodWrapperType = type(object().__str__) -MethodDescriptorType = type(str.join) -ClassMethodDescriptorType = type(dict.__dict__['fromkeys']) - -ModuleType = type(sys) - -try: - raise TypeError -except TypeError as exc: - TracebackType = type(exc.__traceback__) - FrameType = type(exc.__traceback__.tb_frame) - -GetSetDescriptorType = type(FunctionType.__code__) -MemberDescriptorType = type(FunctionType.__globals__) - -del sys, _f, _g, _C, _c, _ag, _cell_factory # Not for export - - -# Provide a PEP 3115 compliant mechanism for class creation -def new_class(name, bases=(), kwds=None, exec_body=None): - """Create a class object dynamically using the appropriate metaclass.""" - resolved_bases = resolve_bases(bases) - meta, ns, kwds = prepare_class(name, resolved_bases, kwds) - if exec_body is not None: - exec_body(ns) - if resolved_bases is not bases: - ns['__orig_bases__'] = bases - return meta(name, resolved_bases, ns, **kwds) - -def resolve_bases(bases): - """Resolve MRO entries dynamically as specified by PEP 560.""" - new_bases = list(bases) - updated = False - shift = 0 - for i, base in enumerate(bases): - if isinstance(base, type): - continue - if not hasattr(base, "__mro_entries__"): - continue - new_base = base.__mro_entries__(bases) - updated = True - if not isinstance(new_base, tuple): - raise TypeError("__mro_entries__ must return a tuple") - else: - new_bases[i+shift:i+shift+1] = new_base - shift += len(new_base) - 1 - if not updated: - return bases - return tuple(new_bases) - -def prepare_class(name, bases=(), kwds=None): - """Call the __prepare__ method of the appropriate metaclass. - - Returns (metaclass, namespace, kwds) as a 3-tuple - - *metaclass* is the appropriate metaclass - *namespace* is the prepared class namespace - *kwds* is an updated copy of the passed in kwds argument with any - 'metaclass' entry removed. If no kwds argument is passed in, this will - be an empty dict. - """ - if kwds is None: - kwds = {} - else: - kwds = dict(kwds) # Don't alter the provided mapping - if 'metaclass' in kwds: - meta = kwds.pop('metaclass') - else: - if bases: - meta = type(bases[0]) - else: - meta = type - if isinstance(meta, type): - # when meta is a type, we first determine the most-derived metaclass - # instead of invoking the initial candidate directly - meta = _calculate_meta(meta, bases) - if hasattr(meta, '__prepare__'): - ns = meta.__prepare__(name, bases, **kwds) - else: - ns = {} - return meta, ns, kwds - -def _calculate_meta(meta, bases): - """Calculate the most derived metaclass.""" - winner = meta - for base in bases: - base_meta = type(base) - if issubclass(winner, base_meta): - continue - if issubclass(base_meta, winner): - winner = base_meta - continue - # else: - raise TypeError("metaclass conflict: " - "the metaclass of a derived class " - "must be a (non-strict) subclass " - "of the metaclasses of all its bases") - return winner - - -def get_original_bases(cls, /): - """Return the class's "original" bases prior to modification by `__mro_entries__`. - - Examples:: - - from typing import TypeVar, Generic, NamedTuple, TypedDict - - T = TypeVar("T") - class Foo(Generic[T]): ... - class Bar(Foo[int], float): ... - class Baz(list[str]): ... - Eggs = NamedTuple("Eggs", [("a", int), ("b", str)]) - Spam = TypedDict("Spam", {"a": int, "b": str}) - - assert get_original_bases(Bar) == (Foo[int], float) - assert get_original_bases(Baz) == (list[str],) - assert get_original_bases(Eggs) == (NamedTuple,) - assert get_original_bases(Spam) == (TypedDict,) - assert get_original_bases(int) == (object,) - """ - try: - return cls.__dict__.get("__orig_bases__", cls.__bases__) - except AttributeError: - raise TypeError( - f"Expected an instance of type, not {type(cls).__name__!r}" - ) from None - - -class DynamicClassAttribute: - """Route attribute access on a class to __getattr__. - - This is a descriptor, used to define attributes that act differently when - accessed through an instance and through a class. Instance access remains - normal, but access to an attribute through a class will be routed to the - class's __getattr__ method; this is done by raising AttributeError. - - This allows one to have properties active on an instance, and have virtual - attributes on the class with the same name. (Enum used this between Python - versions 3.4 - 3.9 .) - - Subclass from this to use a different method of accessing virtual attributes - and still be treated properly by the inspect module. (Enum uses this since - Python 3.10 .) - - """ - def __init__(self, fget=None, fset=None, fdel=None, doc=None): - self.fget = fget - self.fset = fset - self.fdel = fdel - # next two lines make DynamicClassAttribute act the same as property - self.__doc__ = doc or fget.__doc__ - self.overwrite_doc = doc is None - # support for abstract methods - self.__isabstractmethod__ = bool(getattr(fget, '__isabstractmethod__', False)) - - def __get__(self, instance, ownerclass=None): - if instance is None: - if self.__isabstractmethod__: - return self - raise AttributeError() - elif self.fget is None: - raise AttributeError("unreadable attribute") - return self.fget(instance) - - def __set__(self, instance, value): - if self.fset is None: - raise AttributeError("can't set attribute") - self.fset(instance, value) - - def __delete__(self, instance): - if self.fdel is None: - raise AttributeError("can't delete attribute") - self.fdel(instance) - - def getter(self, fget): - fdoc = fget.__doc__ if self.overwrite_doc else None - result = type(self)(fget, self.fset, self.fdel, fdoc or self.__doc__) - result.overwrite_doc = self.overwrite_doc - return result - - def setter(self, fset): - result = type(self)(self.fget, fset, self.fdel, self.__doc__) - result.overwrite_doc = self.overwrite_doc - return result - - def deleter(self, fdel): - result = type(self)(self.fget, self.fset, fdel, self.__doc__) - result.overwrite_doc = self.overwrite_doc - return result - - -class _GeneratorWrapper: - # TODO: Implement this in C. - def __init__(self, gen): - self.__wrapped = gen - self.__isgen = gen.__class__ is GeneratorType - self.__name__ = getattr(gen, '__name__', None) - self.__qualname__ = getattr(gen, '__qualname__', None) - def send(self, val): - return self.__wrapped.send(val) - def throw(self, tp, *rest): - return self.__wrapped.throw(tp, *rest) - def close(self): - return self.__wrapped.close() - @property - def gi_code(self): - return self.__wrapped.gi_code - @property - def gi_frame(self): - return self.__wrapped.gi_frame - @property - def gi_running(self): - return self.__wrapped.gi_running - @property - def gi_yieldfrom(self): - return self.__wrapped.gi_yieldfrom - @property - def gi_suspended(self): - return self.__wrapped.gi_suspended - cr_code = gi_code - cr_frame = gi_frame - cr_running = gi_running - cr_await = gi_yieldfrom - cr_suspended = gi_suspended - def __next__(self): - return next(self.__wrapped) - def __iter__(self): - if self.__isgen: - return self.__wrapped - return self - __await__ = __iter__ - -def coroutine(func): - """Convert regular generator function to a coroutine.""" - - if not callable(func): - raise TypeError('types.coroutine() expects a callable') - - if (func.__class__ is FunctionType and - getattr(func, '__code__', None).__class__ is CodeType): - - co_flags = func.__code__.co_flags - - # Check if 'func' is a coroutine function. - # (0x180 == CO_COROUTINE | CO_ITERABLE_COROUTINE) - if co_flags & 0x180: - return func - - # Check if 'func' is a generator function. - # (0x20 == CO_GENERATOR) - if co_flags & 0x20: - # TODO: Implement this in C. - co = func.__code__ - # 0x100 == CO_ITERABLE_COROUTINE - func.__code__ = co.replace(co_flags=co.co_flags | 0x100) - return func - - # The following code is primarily to support functions that - # return generator-like objects (for instance generators - # compiled with Cython). - - # Delay functools and _collections_abc import for speeding up types import. - import functools - import _collections_abc - @functools.wraps(func) - def wrapped(*args, **kwargs): - coro = func(*args, **kwargs) - if (coro.__class__ is CoroutineType or - coro.__class__ is GeneratorType and coro.gi_code.co_flags & 0x100): - # 'coro' is a native coroutine object or an iterable coroutine - return coro - if (isinstance(coro, _collections_abc.Generator) and - not isinstance(coro, _collections_abc.Coroutine)): - # 'coro' is either a pure Python generator iterator, or it - # implements collections.abc.Generator (and does not implement - # collections.abc.Coroutine). - return _GeneratorWrapper(coro) - # 'coro' is either an instance of collections.abc.Coroutine or - # some other object -- pass it through. - return coro - - return wrapped - -GenericAlias = type(list[int]) -UnionType = type(int | str) - -EllipsisType = type(Ellipsis) -NoneType = type(None) -NotImplementedType = type(NotImplemented) - -def __getattr__(name): - if name == 'CapsuleType': - import _socket - return type(_socket.CAPI) - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") - -__all__ = [n for n in globals() if n[:1] != '_'] -__all__ += ['CapsuleType'] diff --git a/Python313_13_x86_Template/Lib/typing.py b/Python313_13_x86_Template/Lib/typing.py deleted file mode 100644 index cbc6d90e..00000000 --- a/Python313_13_x86_Template/Lib/typing.py +++ /dev/null @@ -1,3834 +0,0 @@ -""" -The typing module: Support for gradual typing as defined by PEP 484 and subsequent PEPs. - -Among other things, the module includes the following: -* Generic, Protocol, and internal machinery to support generic aliases. - All subscripted types like X[int], Union[int, str] are generic aliases. -* Various "special forms" that have unique meanings in type annotations: - NoReturn, Never, ClassVar, Self, Concatenate, Unpack, and others. -* Classes whose instances can be type arguments to generic classes and functions: - TypeVar, ParamSpec, TypeVarTuple. -* Public helper functions: get_type_hints, overload, cast, final, and others. -* Several protocols to support duck-typing: - SupportsFloat, SupportsIndex, SupportsAbs, and others. -* Special types: NewType, NamedTuple, TypedDict. -* Deprecated aliases for builtin types and collections.abc ABCs. - -Any name not present in __all__ is an implementation detail -that may be changed without notice. Use at your own risk! -""" - -from abc import abstractmethod, ABCMeta -import collections -from collections import defaultdict -import collections.abc -import copyreg -import functools -import operator -import sys -import types -from types import WrapperDescriptorType, MethodWrapperType, MethodDescriptorType, GenericAlias - -from _typing import ( - _idfunc, - TypeVar, - ParamSpec, - TypeVarTuple, - ParamSpecArgs, - ParamSpecKwargs, - TypeAliasType, - Generic, - NoDefault, -) - -# Please keep __all__ alphabetized within each category. -__all__ = [ - # Super-special typing primitives. - 'Annotated', - 'Any', - 'Callable', - 'ClassVar', - 'Concatenate', - 'Final', - 'ForwardRef', - 'Generic', - 'Literal', - 'Optional', - 'ParamSpec', - 'Protocol', - 'Tuple', - 'Type', - 'TypeVar', - 'TypeVarTuple', - 'Union', - - # ABCs (from collections.abc). - 'AbstractSet', # collections.abc.Set. - 'ByteString', - 'Container', - 'ContextManager', - 'Hashable', - 'ItemsView', - 'Iterable', - 'Iterator', - 'KeysView', - 'Mapping', - 'MappingView', - 'MutableMapping', - 'MutableSequence', - 'MutableSet', - 'Sequence', - 'Sized', - 'ValuesView', - 'Awaitable', - 'AsyncIterator', - 'AsyncIterable', - 'Coroutine', - 'Collection', - 'AsyncGenerator', - 'AsyncContextManager', - - # Structural checks, a.k.a. protocols. - 'Reversible', - 'SupportsAbs', - 'SupportsBytes', - 'SupportsComplex', - 'SupportsFloat', - 'SupportsIndex', - 'SupportsInt', - 'SupportsRound', - - # Concrete collection types. - 'ChainMap', - 'Counter', - 'Deque', - 'Dict', - 'DefaultDict', - 'List', - 'OrderedDict', - 'Set', - 'FrozenSet', - 'NamedTuple', # Not really a type. - 'TypedDict', # Not really a type. - 'Generator', - - # Other concrete types. - 'BinaryIO', - 'IO', - 'Match', - 'Pattern', - 'TextIO', - - # One-off things. - 'AnyStr', - 'assert_type', - 'assert_never', - 'cast', - 'clear_overloads', - 'dataclass_transform', - 'final', - 'get_args', - 'get_origin', - 'get_overloads', - 'get_protocol_members', - 'get_type_hints', - 'is_protocol', - 'is_typeddict', - 'LiteralString', - 'Never', - 'NewType', - 'no_type_check', - 'no_type_check_decorator', - 'NoDefault', - 'NoReturn', - 'NotRequired', - 'overload', - 'override', - 'ParamSpecArgs', - 'ParamSpecKwargs', - 'ReadOnly', - 'Required', - 'reveal_type', - 'runtime_checkable', - 'Self', - 'Text', - 'TYPE_CHECKING', - 'TypeAlias', - 'TypeGuard', - 'TypeIs', - 'TypeAliasType', - 'Unpack', -] - - -def _type_convert(arg, module=None, *, allow_special_forms=False): - """For converting None to type(None), and strings to ForwardRef.""" - if arg is None: - return type(None) - if isinstance(arg, str): - return ForwardRef(arg, module=module, is_class=allow_special_forms) - return arg - - -def _type_check(arg, msg, is_argument=True, module=None, *, allow_special_forms=False): - """Check that the argument is a type, and return it (internal helper). - - As a special case, accept None and return type(None) instead. Also wrap strings - into ForwardRef instances. Consider several corner cases, for example plain - special forms like Union are not valid, while Union[int, str] is OK, etc. - The msg argument is a human-readable error message, e.g.:: - - "Union[arg, ...]: arg should be a type." - - We append the repr() of the actual value (truncated to 100 chars). - """ - invalid_generic_forms = (Generic, Protocol) - if not allow_special_forms: - invalid_generic_forms += (ClassVar,) - if is_argument: - invalid_generic_forms += (Final,) - - arg = _type_convert(arg, module=module, allow_special_forms=allow_special_forms) - if (isinstance(arg, _GenericAlias) and - arg.__origin__ in invalid_generic_forms): - raise TypeError(f"{arg} is not valid as type argument") - if arg in (Any, LiteralString, NoReturn, Never, Self, TypeAlias): - return arg - if allow_special_forms and arg in (ClassVar, Final): - return arg - if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol): - raise TypeError(f"Plain {arg} is not valid as type argument") - if type(arg) is tuple: - raise TypeError(f"{msg} Got {arg!r:.100}.") - return arg - - -def _is_param_expr(arg): - return arg is ... or isinstance(arg, - (tuple, list, ParamSpec, _ConcatenateGenericAlias)) - - -def _should_unflatten_callable_args(typ, args): - """Internal helper for munging collections.abc.Callable's __args__. - - The canonical representation for a Callable's __args__ flattens the - argument types, see https://github.com/python/cpython/issues/86361. - - For example:: - - >>> import collections.abc - >>> P = ParamSpec('P') - >>> collections.abc.Callable[[int, int], str].__args__ == (int, int, str) - True - >>> collections.abc.Callable[P, str].__args__ == (P, str) - True - - As a result, if we need to reconstruct the Callable from its __args__, - we need to unflatten it. - """ - return ( - typ.__origin__ is collections.abc.Callable - and not (len(args) == 2 and _is_param_expr(args[0])) - ) - - -def _type_repr(obj): - """Return the repr() of an object, special-casing types (internal helper). - - If obj is a type, we return a shorter version than the default - type.__repr__, based on the module and qualified name, which is - typically enough to uniquely identify a type. For everything - else, we fall back on repr(obj). - """ - # When changing this function, don't forget about - # `_collections_abc._type_repr`, which does the same thing - # and must be consistent with this one. - if isinstance(obj, type): - if obj.__module__ == 'builtins': - return obj.__qualname__ - return f'{obj.__module__}.{obj.__qualname__}' - if obj is ...: - return '...' - if isinstance(obj, types.FunctionType): - return obj.__name__ - if isinstance(obj, tuple): - # Special case for `repr` of types with `ParamSpec`: - return '[' + ', '.join(_type_repr(t) for t in obj) + ']' - return repr(obj) - - -def _collect_type_parameters(args, *, enforce_default_ordering: bool = True): - """Collect all type parameters in args - in order of first appearance (lexicographic order). - - For example:: - - >>> P = ParamSpec('P') - >>> T = TypeVar('T') - >>> _collect_type_parameters((T, Callable[P, T])) - (~T, ~P) - """ - # required type parameter cannot appear after parameter with default - default_encountered = False - # or after TypeVarTuple - type_var_tuple_encountered = False - parameters = [] - for t in args: - if isinstance(t, type): - # We don't want __parameters__ descriptor of a bare Python class. - pass - elif isinstance(t, tuple): - # `t` might be a tuple, when `ParamSpec` is substituted with - # `[T, int]`, or `[int, *Ts]`, etc. - for x in t: - for collected in _collect_type_parameters([x]): - if collected not in parameters: - parameters.append(collected) - elif hasattr(t, '__typing_subst__'): - if t not in parameters: - if enforce_default_ordering: - if type_var_tuple_encountered and t.has_default(): - raise TypeError('Type parameter with a default' - ' follows TypeVarTuple') - - if t.has_default(): - default_encountered = True - elif default_encountered: - raise TypeError(f'Type parameter {t!r} without a default' - ' follows type parameter with a default') - - parameters.append(t) - else: - if _is_unpacked_typevartuple(t): - type_var_tuple_encountered = True - for x in getattr(t, '__parameters__', ()): - if x not in parameters: - parameters.append(x) - return tuple(parameters) - - -def _check_generic_specialization(cls, arguments): - """Check correct count for parameters of a generic cls (internal helper). - - This gives a nice error message in case of count mismatch. - """ - expected_len = len(cls.__parameters__) - if not expected_len: - raise TypeError(f"{cls} is not a generic class") - actual_len = len(arguments) - if actual_len != expected_len: - # deal with defaults - if actual_len < expected_len: - # If the parameter at index `actual_len` in the parameters list - # has a default, then all parameters after it must also have - # one, because we validated as much in _collect_type_parameters(). - # That means that no error needs to be raised here, despite - # the number of arguments being passed not matching the number - # of parameters: all parameters that aren't explicitly - # specialized in this call are parameters with default values. - if cls.__parameters__[actual_len].has_default(): - return - - expected_len -= sum(p.has_default() for p in cls.__parameters__) - expect_val = f"at least {expected_len}" - else: - expect_val = expected_len - - raise TypeError(f"Too {'many' if actual_len > expected_len else 'few'} arguments" - f" for {cls}; actual {actual_len}, expected {expect_val}") - - -def _unpack_args(*args): - newargs = [] - for arg in args: - subargs = getattr(arg, '__typing_unpacked_tuple_args__', None) - if subargs is not None and not (subargs and subargs[-1] is ...): - newargs.extend(subargs) - else: - newargs.append(arg) - return newargs - -def _deduplicate(params, *, unhashable_fallback=False): - # Weed out strict duplicates, preserving the first of each occurrence. - try: - return dict.fromkeys(params) - except TypeError: - if not unhashable_fallback: - raise - # Happens for cases like `Annotated[dict, {'x': IntValidator()}]` - return _deduplicate_unhashable(params) - -def _deduplicate_unhashable(unhashable_params): - new_unhashable = [] - for t in unhashable_params: - if t not in new_unhashable: - new_unhashable.append(t) - return new_unhashable - -def _compare_args_orderless(first_args, second_args): - first_unhashable = _deduplicate_unhashable(first_args) - second_unhashable = _deduplicate_unhashable(second_args) - t = list(second_unhashable) - try: - for elem in first_unhashable: - t.remove(elem) - except ValueError: - return False - return not t - -def _remove_dups_flatten(parameters): - """Internal helper for Union creation and substitution. - - Flatten Unions among parameters, then remove duplicates. - """ - # Flatten out Union[Union[...], ...]. - params = [] - for p in parameters: - if isinstance(p, (_UnionGenericAlias, types.UnionType)): - params.extend(p.__args__) - else: - params.append(p) - - return tuple(_deduplicate(params, unhashable_fallback=True)) - - -def _flatten_literal_params(parameters): - """Internal helper for Literal creation: flatten Literals among parameters.""" - params = [] - for p in parameters: - if isinstance(p, _LiteralGenericAlias): - params.extend(p.__args__) - else: - params.append(p) - return tuple(params) - - -_cleanups = [] -_caches = {} - - -def _tp_cache(func=None, /, *, typed=False): - """Internal wrapper caching __getitem__ of generic types. - - For non-hashable arguments, the original function is used as a fallback. - """ - def decorator(func): - # The callback 'inner' references the newly created lru_cache - # indirectly by performing a lookup in the global '_caches' dictionary. - # This breaks a reference that can be problematic when combined with - # C API extensions that leak references to types. See GH-98253. - - cache = functools.lru_cache(typed=typed)(func) - _caches[func] = cache - _cleanups.append(cache.cache_clear) - del cache - - @functools.wraps(func) - def inner(*args, **kwds): - try: - return _caches[func](*args, **kwds) - except TypeError: - pass # All real errors (not unhashable args) are raised below. - return func(*args, **kwds) - return inner - - if func is not None: - return decorator(func) - - return decorator - - -def _deprecation_warning_for_no_type_params_passed(funcname: str) -> None: - import warnings - - depr_message = ( - f"Failing to pass a value to the 'type_params' parameter " - f"of {funcname!r} is deprecated, as it leads to incorrect behaviour " - f"when calling {funcname} on a stringified annotation " - f"that references a PEP 695 type parameter. " - f"It will be disallowed in Python 3.15." - ) - warnings.warn(depr_message, category=DeprecationWarning, stacklevel=3) - - -class _Sentinel: - __slots__ = () - def __repr__(self): - return '' - - -_sentinel = _Sentinel() - - -def _eval_type(t, globalns, localns, type_params=_sentinel, *, recursive_guard=frozenset()): - """Evaluate all forward references in the given type t. - - For use of globalns and localns see the docstring for get_type_hints(). - recursive_guard is used to prevent infinite recursion with a recursive - ForwardRef. - """ - if type_params is _sentinel: - _deprecation_warning_for_no_type_params_passed("typing._eval_type") - type_params = () - if isinstance(t, ForwardRef): - return t._evaluate(globalns, localns, type_params, recursive_guard=recursive_guard) - if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)): - if isinstance(t, GenericAlias): - args = tuple( - ForwardRef(arg) if isinstance(arg, str) else arg - for arg in t.__args__ - ) - is_unpacked = t.__unpacked__ - if _should_unflatten_callable_args(t, args): - t = t.__origin__[(args[:-1], args[-1])] - else: - t = t.__origin__[args] - if is_unpacked: - t = Unpack[t] - - ev_args = tuple( - _eval_type( - a, globalns, localns, type_params, recursive_guard=recursive_guard - ) - for a in t.__args__ - ) - if ev_args == t.__args__: - return t - if isinstance(t, GenericAlias): - return GenericAlias(t.__origin__, ev_args) - if isinstance(t, types.UnionType): - return functools.reduce(operator.or_, ev_args) - else: - return t.copy_with(ev_args) - return t - - -class _Final: - """Mixin to prohibit subclassing.""" - - __slots__ = ('__weakref__',) - - def __init_subclass__(cls, /, *args, **kwds): - if '_root' not in kwds: - raise TypeError("Cannot subclass special typing classes") - - -class _NotIterable: - """Mixin to prevent iteration, without being compatible with Iterable. - - That is, we could do:: - - def __iter__(self): raise TypeError() - - But this would make users of this mixin duck type-compatible with - collections.abc.Iterable - isinstance(foo, Iterable) would be True. - - Luckily, we can instead prevent iteration by setting __iter__ to None, which - is treated specially. - """ - - __slots__ = () - __iter__ = None - - -# Internal indicator of special typing constructs. -# See __doc__ instance attribute for specific docs. -class _SpecialForm(_Final, _NotIterable, _root=True): - __slots__ = ('_name', '__doc__', '_getitem') - - def __init__(self, getitem): - self._getitem = getitem - self._name = getitem.__name__ - self.__doc__ = getitem.__doc__ - - def __getattr__(self, item): - if item in {'__name__', '__qualname__'}: - return self._name - - raise AttributeError(item) - - def __mro_entries__(self, bases): - raise TypeError(f"Cannot subclass {self!r}") - - def __repr__(self): - return 'typing.' + self._name - - def __reduce__(self): - return self._name - - def __call__(self, *args, **kwds): - raise TypeError(f"Cannot instantiate {self!r}") - - def __or__(self, other): - return Union[self, other] - - def __ror__(self, other): - return Union[other, self] - - def __instancecheck__(self, obj): - raise TypeError(f"{self} cannot be used with isinstance()") - - def __subclasscheck__(self, cls): - raise TypeError(f"{self} cannot be used with issubclass()") - - @_tp_cache - def __getitem__(self, parameters): - return self._getitem(self, parameters) - - -class _TypedCacheSpecialForm(_SpecialForm, _root=True): - def __getitem__(self, parameters): - if not isinstance(parameters, tuple): - parameters = (parameters,) - return self._getitem(self, *parameters) - - -class _AnyMeta(type): - def __instancecheck__(self, obj): - if self is Any: - raise TypeError("typing.Any cannot be used with isinstance()") - return super().__instancecheck__(obj) - - def __repr__(self): - if self is Any: - return "typing.Any" - return super().__repr__() # respect to subclasses - - -class Any(metaclass=_AnyMeta): - """Special type indicating an unconstrained type. - - - Any is compatible with every type. - - Any assumed to have all methods. - - All values assumed to be instances of Any. - - Note that all the above statements are true from the point of view of - static type checkers. At runtime, Any should not be used with instance - checks. - """ - - def __new__(cls, *args, **kwargs): - if cls is Any: - raise TypeError("Any cannot be instantiated") - return super().__new__(cls) - - -@_SpecialForm -def NoReturn(self, parameters): - """Special type indicating functions that never return. - - Example:: - - from typing import NoReturn - - def stop() -> NoReturn: - raise Exception('no way') - - NoReturn can also be used as a bottom type, a type that - has no values. Starting in Python 3.11, the Never type should - be used for this concept instead. Type checkers should treat the two - equivalently. - """ - raise TypeError(f"{self} is not subscriptable") - -# This is semantically identical to NoReturn, but it is implemented -# separately so that type checkers can distinguish between the two -# if they want. -@_SpecialForm -def Never(self, parameters): - """The bottom type, a type that has no members. - - This can be used to define a function that should never be - called, or a function that never returns:: - - from typing import Never - - def never_call_me(arg: Never) -> None: - pass - - def int_or_str(arg: int | str) -> None: - never_call_me(arg) # type checker error - match arg: - case int(): - print("It's an int") - case str(): - print("It's a str") - case _: - never_call_me(arg) # OK, arg is of type Never - """ - raise TypeError(f"{self} is not subscriptable") - - -@_SpecialForm -def Self(self, parameters): - """Used to spell the type of "self" in classes. - - Example:: - - from typing import Self - - class Foo: - def return_self(self) -> Self: - ... - return self - - This is especially useful for: - - classmethods that are used as alternative constructors - - annotating an `__enter__` method which returns self - """ - raise TypeError(f"{self} is not subscriptable") - - -@_SpecialForm -def LiteralString(self, parameters): - """Represents an arbitrary literal string. - - Example:: - - from typing import LiteralString - - def run_query(sql: LiteralString) -> None: - ... - - def caller(arbitrary_string: str, literal_string: LiteralString) -> None: - run_query("SELECT * FROM students") # OK - run_query(literal_string) # OK - run_query("SELECT * FROM " + literal_string) # OK - run_query(arbitrary_string) # type checker error - run_query( # type checker error - f"SELECT * FROM students WHERE name = {arbitrary_string}" - ) - - Only string literals and other LiteralStrings are compatible - with LiteralString. This provides a tool to help prevent - security issues such as SQL injection. - """ - raise TypeError(f"{self} is not subscriptable") - - -@_SpecialForm -def ClassVar(self, parameters): - """Special type construct to mark class variables. - - An annotation wrapped in ClassVar indicates that a given - attribute is intended to be used as a class variable and - should not be set on instances of that class. - - Usage:: - - class Starship: - stats: ClassVar[dict[str, int]] = {} # class variable - damage: int = 10 # instance variable - - ClassVar accepts only types and cannot be further subscribed. - - Note that ClassVar is not a class itself, and should not - be used with isinstance() or issubclass(). - """ - item = _type_check(parameters, f'{self} accepts only single type.', allow_special_forms=True) - return _GenericAlias(self, (item,)) - -@_SpecialForm -def Final(self, parameters): - """Special typing construct to indicate final names to type checkers. - - A final name cannot be re-assigned or overridden in a subclass. - - For example:: - - MAX_SIZE: Final = 9000 - MAX_SIZE += 1 # Error reported by type checker - - class Connection: - TIMEOUT: Final[int] = 10 - - class FastConnector(Connection): - TIMEOUT = 1 # Error reported by type checker - - There is no runtime checking of these properties. - """ - item = _type_check(parameters, f'{self} accepts only single type.', allow_special_forms=True) - return _GenericAlias(self, (item,)) - -@_SpecialForm -def Union(self, parameters): - """Union type; Union[X, Y] means either X or Y. - - On Python 3.10 and higher, the | operator - can also be used to denote unions; - X | Y means the same thing to the type checker as Union[X, Y]. - - To define a union, use e.g. Union[int, str]. Details: - - The arguments must be types and there must be at least one. - - None as an argument is a special case and is replaced by - type(None). - - Unions of unions are flattened, e.g.:: - - assert Union[Union[int, str], float] == Union[int, str, float] - - - Unions of a single argument vanish, e.g.:: - - assert Union[int] == int # The constructor actually returns int - - - Redundant arguments are skipped, e.g.:: - - assert Union[int, str, int] == Union[int, str] - - - When comparing unions, the argument order is ignored, e.g.:: - - assert Union[int, str] == Union[str, int] - - - You cannot subclass or instantiate a union. - - You can use Optional[X] as a shorthand for Union[X, None]. - """ - if parameters == (): - raise TypeError("Cannot take a Union of no types.") - if not isinstance(parameters, tuple): - parameters = (parameters,) - msg = "Union[arg, ...]: each arg must be a type." - parameters = tuple(_type_check(p, msg) for p in parameters) - parameters = _remove_dups_flatten(parameters) - if len(parameters) == 1: - return parameters[0] - if len(parameters) == 2 and type(None) in parameters: - return _UnionGenericAlias(self, parameters, name="Optional") - return _UnionGenericAlias(self, parameters) - -def _make_union(left, right): - """Used from the C implementation of TypeVar. - - TypeVar.__or__ calls this instead of returning types.UnionType - because we want to allow unions between TypeVars and strings - (forward references). - """ - return Union[left, right] - -@_SpecialForm -def Optional(self, parameters): - """Optional[X] is equivalent to Union[X, None].""" - arg = _type_check(parameters, f"{self} requires a single type.") - return Union[arg, type(None)] - -@_TypedCacheSpecialForm -@_tp_cache(typed=True) -def Literal(self, *parameters): - """Special typing form to define literal types (a.k.a. value types). - - This form can be used to indicate to type checkers that the corresponding - variable or function parameter has a value equivalent to the provided - literal (or one of several literals):: - - def validate_simple(data: Any) -> Literal[True]: # always returns True - ... - - MODE = Literal['r', 'rb', 'w', 'wb'] - def open_helper(file: str, mode: MODE) -> str: - ... - - open_helper('/some/path', 'r') # Passes type check - open_helper('/other/path', 'typo') # Error in type checker - - Literal[...] cannot be subclassed. At runtime, an arbitrary value - is allowed as type argument to Literal[...], but type checkers may - impose restrictions. - """ - # There is no '_type_check' call because arguments to Literal[...] are - # values, not types. - parameters = _flatten_literal_params(parameters) - - try: - parameters = tuple(p for p, _ in _deduplicate(list(_value_and_type_iter(parameters)))) - except TypeError: # unhashable parameters - pass - - return _LiteralGenericAlias(self, parameters) - - -@_SpecialForm -def TypeAlias(self, parameters): - """Special form for marking type aliases. - - Use TypeAlias to indicate that an assignment should - be recognized as a proper type alias definition by type - checkers. - - For example:: - - Predicate: TypeAlias = Callable[..., bool] - - It's invalid when used anywhere except as in the example above. - """ - raise TypeError(f"{self} is not subscriptable") - - -@_SpecialForm -def Concatenate(self, parameters): - """Special form for annotating higher-order functions. - - ``Concatenate`` can be used in conjunction with ``ParamSpec`` and - ``Callable`` to represent a higher-order function which adds, removes or - transforms the parameters of a callable. - - For example:: - - Callable[Concatenate[int, P], int] - - See PEP 612 for detailed information. - """ - if parameters == (): - raise TypeError("Cannot take a Concatenate of no types.") - if not isinstance(parameters, tuple): - parameters = (parameters,) - if not (parameters[-1] is ... or isinstance(parameters[-1], ParamSpec)): - raise TypeError("The last parameter to Concatenate should be a " - "ParamSpec variable or ellipsis.") - msg = "Concatenate[arg, ...]: each arg must be a type." - parameters = (*(_type_check(p, msg) for p in parameters[:-1]), parameters[-1]) - return _ConcatenateGenericAlias(self, parameters) - - -@_SpecialForm -def TypeGuard(self, parameters): - """Special typing construct for marking user-defined type predicate functions. - - ``TypeGuard`` can be used to annotate the return type of a user-defined - type predicate function. ``TypeGuard`` only accepts a single type argument. - At runtime, functions marked this way should return a boolean. - - ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static - type checkers to determine a more precise type of an expression within a - program's code flow. Usually type narrowing is done by analyzing - conditional code flow and applying the narrowing to a block of code. The - conditional expression here is sometimes referred to as a "type predicate". - - Sometimes it would be convenient to use a user-defined boolean function - as a type predicate. Such a function should use ``TypeGuard[...]`` or - ``TypeIs[...]`` as its return type to alert static type checkers to - this intention. ``TypeGuard`` should be used over ``TypeIs`` when narrowing - from an incompatible type (e.g., ``list[object]`` to ``list[int]``) or when - the function does not return ``True`` for all instances of the narrowed type. - - Using ``-> TypeGuard[NarrowedType]`` tells the static type checker that - for a given function: - - 1. The return value is a boolean. - 2. If the return value is ``True``, the type of its argument - is ``NarrowedType``. - - For example:: - - def is_str_list(val: list[object]) -> TypeGuard[list[str]]: - '''Determines whether all objects in the list are strings''' - return all(isinstance(x, str) for x in val) - - def func1(val: list[object]): - if is_str_list(val): - # Type of ``val`` is narrowed to ``list[str]``. - print(" ".join(val)) - else: - # Type of ``val`` remains as ``list[object]``. - print("Not a list of strings!") - - Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower - form of ``TypeA`` (it can even be a wider form) and this may lead to - type-unsafe results. The main reason is to allow for things like - narrowing ``list[object]`` to ``list[str]`` even though the latter is not - a subtype of the former, since ``list`` is invariant. The responsibility of - writing type-safe type predicates is left to the user. - - ``TypeGuard`` also works with type variables. For more information, see - PEP 647 (User-Defined Type Guards). - """ - item = _type_check(parameters, f'{self} accepts only single type.') - return _GenericAlias(self, (item,)) - - -@_SpecialForm -def TypeIs(self, parameters): - """Special typing construct for marking user-defined type predicate functions. - - ``TypeIs`` can be used to annotate the return type of a user-defined - type predicate function. ``TypeIs`` only accepts a single type argument. - At runtime, functions marked this way should return a boolean and accept - at least one argument. - - ``TypeIs`` aims to benefit *type narrowing* -- a technique used by static - type checkers to determine a more precise type of an expression within a - program's code flow. Usually type narrowing is done by analyzing - conditional code flow and applying the narrowing to a block of code. The - conditional expression here is sometimes referred to as a "type predicate". - - Sometimes it would be convenient to use a user-defined boolean function - as a type predicate. Such a function should use ``TypeIs[...]`` or - ``TypeGuard[...]`` as its return type to alert static type checkers to - this intention. ``TypeIs`` usually has more intuitive behavior than - ``TypeGuard``, but it cannot be used when the input and output types - are incompatible (e.g., ``list[object]`` to ``list[int]``) or when the - function does not return ``True`` for all instances of the narrowed type. - - Using ``-> TypeIs[NarrowedType]`` tells the static type checker that for - a given function: - - 1. The return value is a boolean. - 2. If the return value is ``True``, the type of its argument - is the intersection of the argument's original type and - ``NarrowedType``. - 3. If the return value is ``False``, the type of its argument - is narrowed to exclude ``NarrowedType``. - - For example:: - - from typing import assert_type, final, TypeIs - - class Parent: pass - class Child(Parent): pass - @final - class Unrelated: pass - - def is_parent(val: object) -> TypeIs[Parent]: - return isinstance(val, Parent) - - def run(arg: Child | Unrelated): - if is_parent(arg): - # Type of ``arg`` is narrowed to the intersection - # of ``Parent`` and ``Child``, which is equivalent to - # ``Child``. - assert_type(arg, Child) - else: - # Type of ``arg`` is narrowed to exclude ``Parent``, - # so only ``Unrelated`` is left. - assert_type(arg, Unrelated) - - The type inside ``TypeIs`` must be consistent with the type of the - function's argument; if it is not, static type checkers will raise - an error. An incorrectly written ``TypeIs`` function can lead to - unsound behavior in the type system; it is the user's responsibility - to write such functions in a type-safe manner. - - ``TypeIs`` also works with type variables. For more information, see - PEP 742 (Narrowing types with ``TypeIs``). - """ - item = _type_check(parameters, f'{self} accepts only single type.') - return _GenericAlias(self, (item,)) - - -class ForwardRef(_Final, _root=True): - """Internal wrapper to hold a forward reference.""" - - __slots__ = ('__forward_arg__', '__forward_code__', - '__forward_evaluated__', '__forward_value__', - '__forward_is_argument__', '__forward_is_class__', - '__forward_module__') - - def __init__(self, arg, is_argument=True, module=None, *, is_class=False): - if not isinstance(arg, str): - raise TypeError(f"Forward reference must be a string -- got {arg!r}") - - try: - code = compile(_rewrite_star_unpack(arg), '', 'eval') - except SyntaxError: - raise SyntaxError(f"Forward reference must be an expression -- got {arg!r}") - - self.__forward_arg__ = arg - self.__forward_code__ = code - self.__forward_evaluated__ = False - self.__forward_value__ = None - self.__forward_is_argument__ = is_argument - self.__forward_is_class__ = is_class - self.__forward_module__ = module - - def _evaluate(self, globalns, localns, type_params=_sentinel, *, recursive_guard): - if type_params is _sentinel: - _deprecation_warning_for_no_type_params_passed("typing.ForwardRef._evaluate") - type_params = () - if self.__forward_arg__ in recursive_guard: - return self - if not self.__forward_evaluated__ or localns is not globalns: - if globalns is None and localns is None: - globalns = localns = {} - elif globalns is None: - globalns = localns - elif localns is None: - localns = globalns - if self.__forward_module__ is not None: - globalns = getattr( - sys.modules.get(self.__forward_module__, None), '__dict__', globalns - ) - - # type parameters require some special handling, - # as they exist in their own scope - # but `eval()` does not have a dedicated parameter for that scope. - # For classes, names in type parameter scopes should override - # names in the global scope (which here are called `localns`!), - # but should in turn be overridden by names in the class scope - # (which here are called `globalns`!) - if type_params: - globalns, localns = dict(globalns), dict(localns) - for param in type_params: - param_name = param.__name__ - if not self.__forward_is_class__ or param_name not in globalns: - globalns[param_name] = param - localns.pop(param_name, None) - - type_ = _type_check( - eval(self.__forward_code__, globalns, localns), - "Forward references must evaluate to types.", - is_argument=self.__forward_is_argument__, - allow_special_forms=self.__forward_is_class__, - ) - self.__forward_value__ = _eval_type( - type_, - globalns, - localns, - type_params, - recursive_guard=(recursive_guard | {self.__forward_arg__}), - ) - self.__forward_evaluated__ = True - return self.__forward_value__ - - def __eq__(self, other): - if not isinstance(other, ForwardRef): - return NotImplemented - if self.__forward_evaluated__ and other.__forward_evaluated__: - return (self.__forward_arg__ == other.__forward_arg__ and - self.__forward_value__ == other.__forward_value__) - return (self.__forward_arg__ == other.__forward_arg__ and - self.__forward_module__ == other.__forward_module__) - - def __hash__(self): - return hash((self.__forward_arg__, self.__forward_module__)) - - def __or__(self, other): - return Union[self, other] - - def __ror__(self, other): - return Union[other, self] - - def __repr__(self): - if self.__forward_module__ is None: - module_repr = '' - else: - module_repr = f', module={self.__forward_module__!r}' - return f'ForwardRef({self.__forward_arg__!r}{module_repr})' - - -def _rewrite_star_unpack(arg): - """If the given argument annotation expression is a star unpack e.g. `'*Ts'` - rewrite it to a valid expression. - """ - if arg.startswith("*"): - return f"({arg},)[0]" # E.g. (*Ts,)[0] or (*tuple[int, int],)[0] - else: - return arg - - -def _is_unpacked_typevartuple(x: Any) -> bool: - return ((not isinstance(x, type)) and - getattr(x, '__typing_is_unpacked_typevartuple__', False)) - - -def _is_typevar_like(x: Any) -> bool: - return isinstance(x, (TypeVar, ParamSpec)) or _is_unpacked_typevartuple(x) - - -def _typevar_subst(self, arg): - msg = "Parameters to generic types must be types." - arg = _type_check(arg, msg, is_argument=True) - if ((isinstance(arg, _GenericAlias) and arg.__origin__ is Unpack) or - (isinstance(arg, GenericAlias) and getattr(arg, '__unpacked__', False))): - raise TypeError(f"{arg} is not valid as type argument") - return arg - - -def _typevartuple_prepare_subst(self, alias, args): - params = alias.__parameters__ - typevartuple_index = params.index(self) - for param in params[typevartuple_index + 1:]: - if isinstance(param, TypeVarTuple): - raise TypeError(f"More than one TypeVarTuple parameter in {alias}") - - alen = len(args) - plen = len(params) - left = typevartuple_index - right = plen - typevartuple_index - 1 - var_tuple_index = None - fillarg = None - for k, arg in enumerate(args): - if not isinstance(arg, type): - subargs = getattr(arg, '__typing_unpacked_tuple_args__', None) - if subargs and len(subargs) == 2 and subargs[-1] is ...: - if var_tuple_index is not None: - raise TypeError("More than one unpacked arbitrary-length tuple argument") - var_tuple_index = k - fillarg = subargs[0] - if var_tuple_index is not None: - left = min(left, var_tuple_index) - right = min(right, alen - var_tuple_index - 1) - elif left + right > alen: - raise TypeError(f"Too few arguments for {alias};" - f" actual {alen}, expected at least {plen-1}") - if left == alen - right and self.has_default(): - replacement = _unpack_args(self.__default__) - else: - replacement = args[left: alen - right] - - return ( - *args[:left], - *([fillarg]*(typevartuple_index - left)), - replacement, - *([fillarg]*(plen - right - left - typevartuple_index - 1)), - *args[alen - right:], - ) - - -def _paramspec_subst(self, arg): - if isinstance(arg, (list, tuple)): - arg = tuple(_type_check(a, "Expected a type.") for a in arg) - elif not _is_param_expr(arg): - raise TypeError(f"Expected a list of types, an ellipsis, " - f"ParamSpec, or Concatenate. Got {arg}") - return arg - - -def _paramspec_prepare_subst(self, alias, args): - params = alias.__parameters__ - i = params.index(self) - if i == len(args) and self.has_default(): - args = (*args, self.__default__) - if i >= len(args): - raise TypeError(f"Too few arguments for {alias}") - # Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612. - if len(params) == 1 and not _is_param_expr(args[0]): - assert i == 0 - args = (args,) - # Convert lists to tuples to help other libraries cache the results. - elif isinstance(args[i], list): - args = (*args[:i], tuple(args[i]), *args[i+1:]) - return args - - -@_tp_cache -def _generic_class_getitem(cls, args): - """Parameterizes a generic class. - - At least, parameterizing a generic class is the *main* thing this method - does. For example, for some generic class `Foo`, this is called when we - do `Foo[int]` - there, with `cls=Foo` and `args=int`. - - However, note that this method is also called when defining generic - classes in the first place with `class Foo(Generic[T]): ...`. - """ - if not isinstance(args, tuple): - args = (args,) - - args = tuple(_type_convert(p) for p in args) - is_generic_or_protocol = cls in (Generic, Protocol) - - if is_generic_or_protocol: - # Generic and Protocol can only be subscripted with unique type variables. - if not args: - raise TypeError( - f"Parameter list to {cls.__qualname__}[...] cannot be empty" - ) - if not all(_is_typevar_like(p) for p in args): - raise TypeError( - f"Parameters to {cls.__name__}[...] must all be type variables " - f"or parameter specification variables.") - if len(set(args)) != len(args): - raise TypeError( - f"Parameters to {cls.__name__}[...] must all be unique") - else: - # Subscripting a regular Generic subclass. - try: - parameters = cls.__parameters__ - except AttributeError as e: - init_subclass = getattr(cls, '__init_subclass__', None) - if init_subclass not in {None, Generic.__init_subclass__}: - e.add_note( - f"Note: this exception may have been caused by " - f"{init_subclass.__qualname__!r} (or the " - f"'__init_subclass__' method on a superclass) not " - f"calling 'super().__init_subclass__()'" - ) - raise - for param in parameters: - prepare = getattr(param, '__typing_prepare_subst__', None) - if prepare is not None: - args = prepare(cls, args) - _check_generic_specialization(cls, args) - - new_args = [] - for param, new_arg in zip(parameters, args): - if isinstance(param, TypeVarTuple): - new_args.extend(new_arg) - else: - new_args.append(new_arg) - args = tuple(new_args) - - return _GenericAlias(cls, args) - - -def _generic_init_subclass(cls, *args, **kwargs): - super(Generic, cls).__init_subclass__(*args, **kwargs) - tvars = [] - if '__orig_bases__' in cls.__dict__: - error = Generic in cls.__orig_bases__ - else: - error = (Generic in cls.__bases__ and - cls.__name__ != 'Protocol' and - type(cls) != _TypedDictMeta) - if error: - raise TypeError("Cannot inherit from plain Generic") - if '__orig_bases__' in cls.__dict__: - tvars = _collect_type_parameters(cls.__orig_bases__) - # Look for Generic[T1, ..., Tn]. - # If found, tvars must be a subset of it. - # If not found, tvars is it. - # Also check for and reject plain Generic, - # and reject multiple Generic[...]. - gvars = None - for base in cls.__orig_bases__: - if (isinstance(base, _GenericAlias) and - base.__origin__ is Generic): - if gvars is not None: - raise TypeError( - "Cannot inherit from Generic[...] multiple times.") - gvars = base.__parameters__ - if gvars is not None: - tvarset = set(tvars) - gvarset = set(gvars) - if not tvarset <= gvarset: - s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) - s_args = ', '.join(str(g) for g in gvars) - raise TypeError(f"Some type variables ({s_vars}) are" - f" not listed in Generic[{s_args}]") - tvars = gvars - cls.__parameters__ = tuple(tvars) - - -def _is_dunder(attr): - return attr.startswith('__') and attr.endswith('__') - -class _BaseGenericAlias(_Final, _root=True): - """The central part of the internal API. - - This represents a generic version of type 'origin' with type arguments 'params'. - There are two kind of these aliases: user defined and special. The special ones - are wrappers around builtin collections and ABCs in collections.abc. These must - have 'name' always set. If 'inst' is False, then the alias can't be instantiated; - this is used by e.g. typing.List and typing.Dict. - """ - - def __init__(self, origin, *, inst=True, name=None): - self._inst = inst - self._name = name - self.__origin__ = origin - self.__slots__ = None # This is not documented. - - def __call__(self, *args, **kwargs): - if not self._inst: - raise TypeError(f"Type {self._name} cannot be instantiated; " - f"use {self.__origin__.__name__}() instead") - result = self.__origin__(*args, **kwargs) - try: - result.__orig_class__ = self - # Some objects raise TypeError (or something even more exotic) - # if you try to set attributes on them; we guard against that here - except Exception: - pass - return result - - def __mro_entries__(self, bases): - res = [] - if self.__origin__ not in bases: - res.append(self.__origin__) - - # Check if any base that occurs after us in `bases` is either itself a - # subclass of Generic, or something which will add a subclass of Generic - # to `__bases__` via its `__mro_entries__`. If not, add Generic - # ourselves. The goal is to ensure that Generic (or a subclass) will - # appear exactly once in the final bases tuple. If we let it appear - # multiple times, we risk "can't form a consistent MRO" errors. - i = bases.index(self) - for b in bases[i+1:]: - if isinstance(b, _BaseGenericAlias): - break - if not isinstance(b, type): - meth = getattr(b, "__mro_entries__", None) - new_bases = meth(bases) if meth else None - if ( - isinstance(new_bases, tuple) and - any( - isinstance(b2, type) and issubclass(b2, Generic) - for b2 in new_bases - ) - ): - break - elif issubclass(b, Generic): - break - else: - res.append(Generic) - return tuple(res) - - def __getattr__(self, attr): - if attr in {'__name__', '__qualname__'}: - return self._name or self.__origin__.__name__ - - # We are careful for copy and pickle. - # Also for simplicity we don't relay any dunder names - if '__origin__' in self.__dict__ and not _is_dunder(attr): - return getattr(self.__origin__, attr) - raise AttributeError(attr) - - def __setattr__(self, attr, val): - if _is_dunder(attr) or attr in {'_name', '_inst', '_nparams', '_defaults'}: - super().__setattr__(attr, val) - else: - setattr(self.__origin__, attr, val) - - def __instancecheck__(self, obj): - return self.__subclasscheck__(type(obj)) - - def __subclasscheck__(self, cls): - raise TypeError("Subscripted generics cannot be used with" - " class and instance checks") - - def __dir__(self): - return list(set(super().__dir__() - + [attr for attr in dir(self.__origin__) if not _is_dunder(attr)])) - - -# Special typing constructs Union, Optional, Generic, Callable and Tuple -# use three special attributes for internal bookkeeping of generic types: -# * __parameters__ is a tuple of unique free type parameters of a generic -# type, for example, Dict[T, T].__parameters__ == (T,); -# * __origin__ keeps a reference to a type that was subscripted, -# e.g., Union[T, int].__origin__ == Union, or the non-generic version of -# the type. -# * __args__ is a tuple of all arguments used in subscripting, -# e.g., Dict[T, int].__args__ == (T, int). - - -class _GenericAlias(_BaseGenericAlias, _root=True): - # The type of parameterized generics. - # - # That is, for example, `type(List[int])` is `_GenericAlias`. - # - # Objects which are instances of this class include: - # * Parameterized container types, e.g. `Tuple[int]`, `List[int]`. - # * Note that native container types, e.g. `tuple`, `list`, use - # `types.GenericAlias` instead. - # * Parameterized classes: - # class C[T]: pass - # # C[int] is a _GenericAlias - # * `Callable` aliases, generic `Callable` aliases, and - # parameterized `Callable` aliases: - # T = TypeVar('T') - # # _CallableGenericAlias inherits from _GenericAlias. - # A = Callable[[], None] # _CallableGenericAlias - # B = Callable[[T], None] # _CallableGenericAlias - # C = B[int] # _CallableGenericAlias - # * Parameterized `Final`, `ClassVar`, `TypeGuard`, and `TypeIs`: - # # All _GenericAlias - # Final[int] - # ClassVar[float] - # TypeGuard[bool] - # TypeIs[range] - - def __init__(self, origin, args, *, inst=True, name=None): - super().__init__(origin, inst=inst, name=name) - if not isinstance(args, tuple): - args = (args,) - self.__args__ = tuple(... if a is _TypingEllipsis else - a for a in args) - enforce_default_ordering = origin in (Generic, Protocol) - self.__parameters__ = _collect_type_parameters( - args, - enforce_default_ordering=enforce_default_ordering, - ) - if not name: - self.__module__ = origin.__module__ - - def __eq__(self, other): - if not isinstance(other, _GenericAlias): - return NotImplemented - return (self.__origin__ == other.__origin__ - and self.__args__ == other.__args__) - - def __hash__(self): - return hash((self.__origin__, self.__args__)) - - def __or__(self, right): - return Union[self, right] - - def __ror__(self, left): - return Union[left, self] - - @_tp_cache - def __getitem__(self, args): - # Parameterizes an already-parameterized object. - # - # For example, we arrive here doing something like: - # T1 = TypeVar('T1') - # T2 = TypeVar('T2') - # T3 = TypeVar('T3') - # class A(Generic[T1]): pass - # B = A[T2] # B is a _GenericAlias - # C = B[T3] # Invokes _GenericAlias.__getitem__ - # - # We also arrive here when parameterizing a generic `Callable` alias: - # T = TypeVar('T') - # C = Callable[[T], None] - # C[int] # Invokes _GenericAlias.__getitem__ - - if self.__origin__ in (Generic, Protocol): - # Can't subscript Generic[...] or Protocol[...]. - raise TypeError(f"Cannot subscript already-subscripted {self}") - if not self.__parameters__: - raise TypeError(f"{self} is not a generic class") - - # Preprocess `args`. - if not isinstance(args, tuple): - args = (args,) - args = _unpack_args(*(_type_convert(p) for p in args)) - new_args = self._determine_new_args(args) - r = self.copy_with(new_args) - return r - - def _determine_new_args(self, args): - # Determines new __args__ for __getitem__. - # - # For example, suppose we had: - # T1 = TypeVar('T1') - # T2 = TypeVar('T2') - # class A(Generic[T1, T2]): pass - # T3 = TypeVar('T3') - # B = A[int, T3] - # C = B[str] - # `B.__args__` is `(int, T3)`, so `C.__args__` should be `(int, str)`. - # Unfortunately, this is harder than it looks, because if `T3` is - # anything more exotic than a plain `TypeVar`, we need to consider - # edge cases. - - params = self.__parameters__ - # In the example above, this would be {T3: str} - for param in params: - prepare = getattr(param, '__typing_prepare_subst__', None) - if prepare is not None: - args = prepare(self, args) - alen = len(args) - plen = len(params) - if alen != plen: - raise TypeError(f"Too {'many' if alen > plen else 'few'} arguments for {self};" - f" actual {alen}, expected {plen}") - new_arg_by_param = dict(zip(params, args)) - return tuple(self._make_substitution(self.__args__, new_arg_by_param)) - - def _make_substitution(self, args, new_arg_by_param): - """Create a list of new type arguments.""" - new_args = [] - for old_arg in args: - if isinstance(old_arg, type): - new_args.append(old_arg) - continue - - substfunc = getattr(old_arg, '__typing_subst__', None) - if substfunc: - new_arg = substfunc(new_arg_by_param[old_arg]) - else: - subparams = getattr(old_arg, '__parameters__', ()) - if not subparams: - new_arg = old_arg - else: - subargs = [] - for x in subparams: - if isinstance(x, TypeVarTuple): - subargs.extend(new_arg_by_param[x]) - else: - subargs.append(new_arg_by_param[x]) - new_arg = old_arg[tuple(subargs)] - - if self.__origin__ == collections.abc.Callable and isinstance(new_arg, tuple): - # Consider the following `Callable`. - # C = Callable[[int], str] - # Here, `C.__args__` should be (int, str) - NOT ([int], str). - # That means that if we had something like... - # P = ParamSpec('P') - # T = TypeVar('T') - # C = Callable[P, T] - # D = C[[int, str], float] - # ...we need to be careful; `new_args` should end up as - # `(int, str, float)` rather than `([int, str], float)`. - new_args.extend(new_arg) - elif _is_unpacked_typevartuple(old_arg): - # Consider the following `_GenericAlias`, `B`: - # class A(Generic[*Ts]): ... - # B = A[T, *Ts] - # If we then do: - # B[float, int, str] - # The `new_arg` corresponding to `T` will be `float`, and the - # `new_arg` corresponding to `*Ts` will be `(int, str)`. We - # should join all these types together in a flat list - # `(float, int, str)` - so again, we should `extend`. - new_args.extend(new_arg) - elif isinstance(old_arg, tuple): - # Corner case: - # P = ParamSpec('P') - # T = TypeVar('T') - # class Base(Generic[P]): ... - # Can be substituted like this: - # X = Base[[int, T]] - # In this case, `old_arg` will be a tuple: - new_args.append( - tuple(self._make_substitution(old_arg, new_arg_by_param)), - ) - else: - new_args.append(new_arg) - return new_args - - def copy_with(self, args): - return self.__class__(self.__origin__, args, name=self._name, inst=self._inst) - - def __repr__(self): - if self._name: - name = 'typing.' + self._name - else: - name = _type_repr(self.__origin__) - if self.__args__: - args = ", ".join([_type_repr(a) for a in self.__args__]) - else: - # To ensure the repr is eval-able. - args = "()" - return f'{name}[{args}]' - - def __reduce__(self): - if self._name: - origin = globals()[self._name] - else: - origin = self.__origin__ - args = tuple(self.__args__) - if len(args) == 1 and not isinstance(args[0], tuple): - args, = args - return operator.getitem, (origin, args) - - def __mro_entries__(self, bases): - if isinstance(self.__origin__, _SpecialForm): - raise TypeError(f"Cannot subclass {self!r}") - - if self._name: # generic version of an ABC or built-in class - return super().__mro_entries__(bases) - if self.__origin__ is Generic: - if Protocol in bases: - return () - i = bases.index(self) - for b in bases[i+1:]: - if isinstance(b, _BaseGenericAlias) and b is not self: - return () - return (self.__origin__,) - - def __iter__(self): - yield Unpack[self] - - -# _nparams is the number of accepted parameters, e.g. 0 for Hashable, -# 1 for List and 2 for Dict. It may be -1 if variable number of -# parameters are accepted (needs custom __getitem__). - -class _SpecialGenericAlias(_NotIterable, _BaseGenericAlias, _root=True): - def __init__(self, origin, nparams, *, inst=True, name=None, defaults=()): - if name is None: - name = origin.__name__ - super().__init__(origin, inst=inst, name=name) - self._nparams = nparams - self._defaults = defaults - if origin.__module__ == 'builtins': - self.__doc__ = f'Deprecated alias to {origin.__qualname__}.' - else: - self.__doc__ = f'Deprecated alias to {origin.__module__}.{origin.__qualname__}.' - - @_tp_cache - def __getitem__(self, params): - if not isinstance(params, tuple): - params = (params,) - msg = "Parameters to generic types must be types." - params = tuple(_type_check(p, msg) for p in params) - if (self._defaults - and len(params) < self._nparams - and len(params) + len(self._defaults) >= self._nparams - ): - params = (*params, *self._defaults[len(params) - self._nparams:]) - actual_len = len(params) - - if actual_len != self._nparams: - if self._defaults: - expected = f"at least {self._nparams - len(self._defaults)}" - else: - expected = str(self._nparams) - if not self._nparams: - raise TypeError(f"{self} is not a generic class") - raise TypeError(f"Too {'many' if actual_len > self._nparams else 'few'} arguments for {self};" - f" actual {actual_len}, expected {expected}") - return self.copy_with(params) - - def copy_with(self, params): - return _GenericAlias(self.__origin__, params, - name=self._name, inst=self._inst) - - def __repr__(self): - return 'typing.' + self._name - - def __subclasscheck__(self, cls): - if isinstance(cls, _SpecialGenericAlias): - return issubclass(cls.__origin__, self.__origin__) - if not isinstance(cls, _GenericAlias): - return issubclass(cls, self.__origin__) - return super().__subclasscheck__(cls) - - def __reduce__(self): - return self._name - - def __or__(self, right): - return Union[self, right] - - def __ror__(self, left): - return Union[left, self] - - -class _DeprecatedGenericAlias(_SpecialGenericAlias, _root=True): - def __init__( - self, origin, nparams, *, removal_version, inst=True, name=None - ): - super().__init__(origin, nparams, inst=inst, name=name) - self._removal_version = removal_version - - def __instancecheck__(self, inst): - import warnings - warnings._deprecated( - f"{self.__module__}.{self._name}", remove=self._removal_version - ) - return super().__instancecheck__(inst) - - -class _CallableGenericAlias(_NotIterable, _GenericAlias, _root=True): - def __repr__(self): - assert self._name == 'Callable' - args = self.__args__ - if len(args) == 2 and _is_param_expr(args[0]): - return super().__repr__() - return (f'typing.Callable' - f'[[{", ".join([_type_repr(a) for a in args[:-1]])}], ' - f'{_type_repr(args[-1])}]') - - def __reduce__(self): - args = self.__args__ - if not (len(args) == 2 and _is_param_expr(args[0])): - args = list(args[:-1]), args[-1] - return operator.getitem, (Callable, args) - - -class _CallableType(_SpecialGenericAlias, _root=True): - def copy_with(self, params): - return _CallableGenericAlias(self.__origin__, params, - name=self._name, inst=self._inst) - - def __getitem__(self, params): - if not isinstance(params, tuple) or len(params) != 2: - raise TypeError("Callable must be used as " - "Callable[[arg, ...], result].") - args, result = params - # This relaxes what args can be on purpose to allow things like - # PEP 612 ParamSpec. Responsibility for whether a user is using - # Callable[...] properly is deferred to static type checkers. - if isinstance(args, list): - params = (tuple(args), result) - else: - params = (args, result) - return self.__getitem_inner__(params) - - @_tp_cache - def __getitem_inner__(self, params): - args, result = params - msg = "Callable[args, result]: result must be a type." - result = _type_check(result, msg) - if args is Ellipsis: - return self.copy_with((_TypingEllipsis, result)) - if not isinstance(args, tuple): - args = (args,) - args = tuple(_type_convert(arg) for arg in args) - params = args + (result,) - return self.copy_with(params) - - -class _TupleType(_SpecialGenericAlias, _root=True): - @_tp_cache - def __getitem__(self, params): - if not isinstance(params, tuple): - params = (params,) - if len(params) >= 2 and params[-1] is ...: - msg = "Tuple[t, ...]: t must be a type." - params = tuple(_type_check(p, msg) for p in params[:-1]) - return self.copy_with((*params, _TypingEllipsis)) - msg = "Tuple[t0, t1, ...]: each t must be a type." - params = tuple(_type_check(p, msg) for p in params) - return self.copy_with(params) - - -class _UnionGenericAlias(_NotIterable, _GenericAlias, _root=True): - def copy_with(self, params): - return Union[params] - - def __eq__(self, other): - if not isinstance(other, (_UnionGenericAlias, types.UnionType)): - return NotImplemented - try: # fast path - return set(self.__args__) == set(other.__args__) - except TypeError: # not hashable, slow path - return _compare_args_orderless(self.__args__, other.__args__) - - def __hash__(self): - return hash(frozenset(self.__args__)) - - def __repr__(self): - args = self.__args__ - if len(args) == 2: - if args[0] is type(None): - return f'typing.Optional[{_type_repr(args[1])}]' - elif args[1] is type(None): - return f'typing.Optional[{_type_repr(args[0])}]' - return super().__repr__() - - def __instancecheck__(self, obj): - for arg in self.__args__: - if isinstance(obj, arg): - return True - return False - - def __subclasscheck__(self, cls): - for arg in self.__args__: - if issubclass(cls, arg): - return True - return False - - def __reduce__(self): - func, (origin, args) = super().__reduce__() - return func, (Union, args) - - -def _value_and_type_iter(parameters): - return ((p, type(p)) for p in parameters) - - -class _LiteralGenericAlias(_GenericAlias, _root=True): - def __eq__(self, other): - if not isinstance(other, _LiteralGenericAlias): - return NotImplemented - - return set(_value_and_type_iter(self.__args__)) == set(_value_and_type_iter(other.__args__)) - - def __hash__(self): - return hash(frozenset(_value_and_type_iter(self.__args__))) - - -class _ConcatenateGenericAlias(_GenericAlias, _root=True): - def copy_with(self, params): - if isinstance(params[-1], (list, tuple)): - return (*params[:-1], *params[-1]) - if isinstance(params[-1], _ConcatenateGenericAlias): - params = (*params[:-1], *params[-1].__args__) - return super().copy_with(params) - - -@_SpecialForm -def Unpack(self, parameters): - """Type unpack operator. - - The type unpack operator takes the child types from some container type, - such as `tuple[int, str]` or a `TypeVarTuple`, and 'pulls them out'. - - For example:: - - # For some generic class `Foo`: - Foo[Unpack[tuple[int, str]]] # Equivalent to Foo[int, str] - - Ts = TypeVarTuple('Ts') - # Specifies that `Bar` is generic in an arbitrary number of types. - # (Think of `Ts` as a tuple of an arbitrary number of individual - # `TypeVar`s, which the `Unpack` is 'pulling out' directly into the - # `Generic[]`.) - class Bar(Generic[Unpack[Ts]]): ... - Bar[int] # Valid - Bar[int, str] # Also valid - - From Python 3.11, this can also be done using the `*` operator:: - - Foo[*tuple[int, str]] - class Bar(Generic[*Ts]): ... - - And from Python 3.12, it can be done using built-in syntax for generics:: - - Foo[*tuple[int, str]] - class Bar[*Ts]: ... - - The operator can also be used along with a `TypedDict` to annotate - `**kwargs` in a function signature:: - - class Movie(TypedDict): - name: str - year: int - - # This function expects two keyword arguments - *name* of type `str` and - # *year* of type `int`. - def foo(**kwargs: Unpack[Movie]): ... - - Note that there is only some runtime checking of this operator. Not - everything the runtime allows may be accepted by static type checkers. - - For more information, see PEPs 646 and 692. - """ - item = _type_check(parameters, f'{self} accepts only single type.') - return _UnpackGenericAlias(origin=self, args=(item,)) - - -class _UnpackGenericAlias(_GenericAlias, _root=True): - def __repr__(self): - # `Unpack` only takes one argument, so __args__ should contain only - # a single item. - return f'typing.Unpack[{_type_repr(self.__args__[0])}]' - - def __getitem__(self, args): - if self.__typing_is_unpacked_typevartuple__: - return args - return super().__getitem__(args) - - @property - def __typing_unpacked_tuple_args__(self): - assert self.__origin__ is Unpack - assert len(self.__args__) == 1 - arg, = self.__args__ - if isinstance(arg, (_GenericAlias, types.GenericAlias)): - if arg.__origin__ is not tuple: - raise TypeError("Unpack[...] must be used with a tuple type") - return arg.__args__ - return None - - @property - def __typing_is_unpacked_typevartuple__(self): - assert self.__origin__ is Unpack - assert len(self.__args__) == 1 - return isinstance(self.__args__[0], TypeVarTuple) - - -class _TypingEllipsis: - """Internal placeholder for ... (ellipsis).""" - - -_TYPING_INTERNALS = frozenset({ - '__parameters__', '__orig_bases__', '__orig_class__', - '_is_protocol', '_is_runtime_protocol', '__protocol_attrs__', - '__non_callable_proto_members__', '__type_params__', -}) - -_SPECIAL_NAMES = frozenset({ - '__abstractmethods__', '__annotations__', '__dict__', '__doc__', - '__init__', '__module__', '__new__', '__slots__', - '__subclasshook__', '__weakref__', '__class_getitem__', - '__match_args__', '__static_attributes__', '__firstlineno__', -}) - -# These special attributes will be not collected as protocol members. -EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS | _SPECIAL_NAMES | {'_MutableMapping__marker'} - - -def _get_protocol_attrs(cls): - """Collect protocol members from a protocol class objects. - - This includes names actually defined in the class dictionary, as well - as names that appear in annotations. Special names (above) are skipped. - """ - attrs = set() - for base in cls.__mro__[:-1]: # without object - if base.__name__ in {'Protocol', 'Generic'}: - continue - annotations = getattr(base, '__annotations__', {}) - for attr in (*base.__dict__, *annotations): - if not attr.startswith('_abc_') and attr not in EXCLUDED_ATTRIBUTES: - attrs.add(attr) - return attrs - - -def _no_init_or_replace_init(self, *args, **kwargs): - cls = type(self) - - if cls._is_protocol: - raise TypeError('Protocols cannot be instantiated') - - # Already using a custom `__init__`. No need to calculate correct - # `__init__` to call. This can lead to RecursionError. See bpo-45121. - if cls.__init__ is not _no_init_or_replace_init: - return - - # Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`. - # The first instantiation of the subclass will call `_no_init_or_replace_init` which - # searches for a proper new `__init__` in the MRO. The new `__init__` - # replaces the subclass' old `__init__` (ie `_no_init_or_replace_init`). Subsequent - # instantiation of the protocol subclass will thus use the new - # `__init__` and no longer call `_no_init_or_replace_init`. - for base in cls.__mro__: - init = base.__dict__.get('__init__', _no_init_or_replace_init) - if init is not _no_init_or_replace_init: - cls.__init__ = init - break - else: - # should not happen - cls.__init__ = object.__init__ - - cls.__init__(self, *args, **kwargs) - - -def _caller(depth=1, default='__main__'): - try: - return sys._getframemodulename(depth + 1) or default - except AttributeError: # For platforms without _getframemodulename() - pass - try: - return sys._getframe(depth + 1).f_globals.get('__name__', default) - except (AttributeError, ValueError): # For platforms without _getframe() - pass - return None - -def _allow_reckless_class_checks(depth=2): - """Allow instance and class checks for special stdlib modules. - - The abc and functools modules indiscriminately call isinstance() and - issubclass() on the whole MRO of a user class, which may contain protocols. - """ - return _caller(depth) in {'abc', 'functools', None} - - -_PROTO_ALLOWLIST = { - 'collections.abc': [ - 'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable', - 'AsyncIterator', 'Hashable', 'Sized', 'Container', 'Collection', - 'Reversible', 'Buffer', - ], - 'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'], -} - - -@functools.cache -def _lazy_load_getattr_static(): - # Import getattr_static lazily so as not to slow down the import of typing.py - # Cache the result so we don't slow down _ProtocolMeta.__instancecheck__ unnecessarily - from inspect import getattr_static - return getattr_static - - -_cleanups.append(_lazy_load_getattr_static.cache_clear) - -def _pickle_psargs(psargs): - return ParamSpecArgs, (psargs.__origin__,) - -copyreg.pickle(ParamSpecArgs, _pickle_psargs) - -def _pickle_pskwargs(pskwargs): - return ParamSpecKwargs, (pskwargs.__origin__,) - -copyreg.pickle(ParamSpecKwargs, _pickle_pskwargs) - -del _pickle_psargs, _pickle_pskwargs - - -# Preload these once, as globals, as a micro-optimisation. -# This makes a significant difference to the time it takes -# to do `isinstance()`/`issubclass()` checks -# against runtime-checkable protocols with only one callable member. -_abc_instancecheck = ABCMeta.__instancecheck__ -_abc_subclasscheck = ABCMeta.__subclasscheck__ - - -def _type_check_issubclass_arg_1(arg): - """Raise TypeError if `arg` is not an instance of `type` - in `issubclass(arg, )`. - - In most cases, this is verified by type.__subclasscheck__. - Checking it again unnecessarily would slow down issubclass() checks, - so, we don't perform this check unless we absolutely have to. - - For various error paths, however, - we want to ensure that *this* error message is shown to the user - where relevant, rather than a typing.py-specific error message. - """ - if not isinstance(arg, type): - # Same error message as for issubclass(1, int). - raise TypeError('issubclass() arg 1 must be a class') - - -class _ProtocolMeta(ABCMeta): - # This metaclass is somewhat unfortunate, - # but is necessary for several reasons... - def __new__(mcls, name, bases, namespace, /, **kwargs): - if name == "Protocol" and bases == (Generic,): - pass - elif Protocol in bases: - for base in bases: - if not ( - base in {object, Generic} - or base.__name__ in _PROTO_ALLOWLIST.get(base.__module__, []) - or ( - issubclass(base, Generic) - and getattr(base, "_is_protocol", False) - ) - ): - raise TypeError( - f"Protocols can only inherit from other protocols, " - f"got {base!r}" - ) - return super().__new__(mcls, name, bases, namespace, **kwargs) - - def __init__(cls, *args, **kwargs): - super().__init__(*args, **kwargs) - if getattr(cls, "_is_protocol", False): - cls.__protocol_attrs__ = _get_protocol_attrs(cls) - - def __subclasscheck__(cls, other): - if cls is Protocol: - return type.__subclasscheck__(cls, other) - if ( - getattr(cls, '_is_protocol', False) - and not _allow_reckless_class_checks() - ): - if not getattr(cls, '_is_runtime_protocol', False): - _type_check_issubclass_arg_1(other) - raise TypeError( - "Instance and class checks can only be used with " - "@runtime_checkable protocols" - ) - if ( - # this attribute is set by @runtime_checkable: - cls.__non_callable_proto_members__ - and cls.__dict__.get("__subclasshook__") is _proto_hook - ): - _type_check_issubclass_arg_1(other) - non_method_attrs = sorted(cls.__non_callable_proto_members__) - raise TypeError( - "Protocols with non-method members don't support issubclass()." - f" Non-method members: {str(non_method_attrs)[1:-1]}." - ) - return _abc_subclasscheck(cls, other) - - def __instancecheck__(cls, instance): - # We need this method for situations where attributes are - # assigned in __init__. - if cls is Protocol: - return type.__instancecheck__(cls, instance) - if not getattr(cls, "_is_protocol", False): - # i.e., it's a concrete subclass of a protocol - return _abc_instancecheck(cls, instance) - - if ( - not getattr(cls, '_is_runtime_protocol', False) and - not _allow_reckless_class_checks() - ): - raise TypeError("Instance and class checks can only be used with" - " @runtime_checkable protocols") - - if _abc_instancecheck(cls, instance): - return True - - getattr_static = _lazy_load_getattr_static() - for attr in cls.__protocol_attrs__: - try: - val = getattr_static(instance, attr) - except AttributeError: - break - # this attribute is set by @runtime_checkable: - if val is None and attr not in cls.__non_callable_proto_members__: - break - else: - return True - - return False - - -@classmethod -def _proto_hook(cls, other): - if not cls.__dict__.get('_is_protocol', False): - return NotImplemented - - for attr in cls.__protocol_attrs__: - for base in other.__mro__: - # Check if the members appears in the class dictionary... - if attr in base.__dict__: - if base.__dict__[attr] is None: - return NotImplemented - break - - # ...or in annotations, if it is a sub-protocol. - annotations = getattr(base, '__annotations__', {}) - if (isinstance(annotations, collections.abc.Mapping) and - attr in annotations and - issubclass(other, Generic) and getattr(other, '_is_protocol', False)): - break - else: - return NotImplemented - return True - - -class Protocol(Generic, metaclass=_ProtocolMeta): - """Base class for protocol classes. - - Protocol classes are defined as:: - - class Proto(Protocol): - def meth(self) -> int: - ... - - Such classes are primarily used with static type checkers that recognize - structural subtyping (static duck-typing). - - For example:: - - class C: - def meth(self) -> int: - return 0 - - def func(x: Proto) -> int: - return x.meth() - - func(C()) # Passes static type check - - See PEP 544 for details. Protocol classes decorated with - @typing.runtime_checkable act as simple-minded runtime protocols that check - only the presence of given attributes, ignoring their type signatures. - Protocol classes can be generic, they are defined as:: - - class GenProto[T](Protocol): - def meth(self) -> T: - ... - """ - - __slots__ = () - _is_protocol = True - _is_runtime_protocol = False - - def __init_subclass__(cls, *args, **kwargs): - super().__init_subclass__(*args, **kwargs) - - # Determine if this is a protocol or a concrete subclass. - if not cls.__dict__.get('_is_protocol', False): - cls._is_protocol = any(b is Protocol for b in cls.__bases__) - - # Set (or override) the protocol subclass hook. - if '__subclasshook__' not in cls.__dict__: - cls.__subclasshook__ = _proto_hook - - # Prohibit instantiation for protocol classes - if cls._is_protocol and cls.__init__ is Protocol.__init__: - cls.__init__ = _no_init_or_replace_init - - -class _AnnotatedAlias(_NotIterable, _GenericAlias, _root=True): - """Runtime representation of an annotated type. - - At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' - with extra annotations. The alias behaves like a normal typing alias. - Instantiating is the same as instantiating the underlying type; binding - it to types is also the same. - - The metadata itself is stored in a '__metadata__' attribute as a tuple. - """ - - def __init__(self, origin, metadata): - if isinstance(origin, _AnnotatedAlias): - metadata = origin.__metadata__ + metadata - origin = origin.__origin__ - super().__init__(origin, origin, name='Annotated') - self.__metadata__ = metadata - - def copy_with(self, params): - assert len(params) == 1 - new_type = params[0] - return _AnnotatedAlias(new_type, self.__metadata__) - - def __repr__(self): - return "typing.Annotated[{}, {}]".format( - _type_repr(self.__origin__), - ", ".join(repr(a) for a in self.__metadata__) - ) - - def __reduce__(self): - return operator.getitem, ( - Annotated, (self.__origin__,) + self.__metadata__ - ) - - def __eq__(self, other): - if not isinstance(other, _AnnotatedAlias): - return NotImplemented - return (self.__origin__ == other.__origin__ - and self.__metadata__ == other.__metadata__) - - def __hash__(self): - return hash((self.__origin__, self.__metadata__)) - - def __getattr__(self, attr): - if attr in {'__name__', '__qualname__'}: - return 'Annotated' - return super().__getattr__(attr) - - def __mro_entries__(self, bases): - return (self.__origin__,) - - -@_TypedCacheSpecialForm -@_tp_cache(typed=True) -def Annotated(self, *params): - """Add context-specific metadata to a type. - - Example: Annotated[int, runtime_check.Unsigned] indicates to the - hypothetical runtime_check module that this type is an unsigned int. - Every other consumer of this type can ignore this metadata and treat - this type as int. - - The first argument to Annotated must be a valid type. - - Details: - - - It's an error to call `Annotated` with less than two arguments. - - Access the metadata via the ``__metadata__`` attribute:: - - assert Annotated[int, '$'].__metadata__ == ('$',) - - - Nested Annotated types are flattened:: - - assert Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] - - - Instantiating an annotated type is equivalent to instantiating the - underlying type:: - - assert Annotated[C, Ann1](5) == C(5) - - - Annotated can be used as a generic type alias:: - - type Optimized[T] = Annotated[T, runtime.Optimize()] - # type checker will treat Optimized[int] - # as equivalent to Annotated[int, runtime.Optimize()] - - type OptimizedList[T] = Annotated[list[T], runtime.Optimize()] - # type checker will treat OptimizedList[int] - # as equivalent to Annotated[list[int], runtime.Optimize()] - - - Annotated cannot be used with an unpacked TypeVarTuple:: - - type Variadic[*Ts] = Annotated[*Ts, Ann1] # NOT valid - - This would be equivalent to:: - - Annotated[T1, T2, T3, ..., Ann1] - - where T1, T2 etc. are TypeVars, which would be invalid, because - only one type should be passed to Annotated. - """ - if len(params) < 2: - raise TypeError("Annotated[...] should be used " - "with at least two arguments (a type and an " - "annotation).") - if _is_unpacked_typevartuple(params[0]): - raise TypeError("Annotated[...] should not be used with an " - "unpacked TypeVarTuple") - msg = "Annotated[t, ...]: t must be a type." - origin = _type_check(params[0], msg, allow_special_forms=True) - metadata = tuple(params[1:]) - return _AnnotatedAlias(origin, metadata) - - -def runtime_checkable(cls): - """Mark a protocol class as a runtime protocol. - - Such protocol can be used with isinstance() and issubclass(). - Raise TypeError if applied to a non-protocol class. - This allows a simple-minded structural check very similar to - one trick ponies in collections.abc such as Iterable. - - For example:: - - @runtime_checkable - class Closable(Protocol): - def close(self): ... - - assert isinstance(open('/some/file'), Closable) - - Warning: this will check only the presence of the required methods, - not their type signatures! - """ - if not issubclass(cls, Generic) or not getattr(cls, '_is_protocol', False): - raise TypeError('@runtime_checkable can be only applied to protocol classes,' - ' got %r' % cls) - cls._is_runtime_protocol = True - # PEP 544 prohibits using issubclass() - # with protocols that have non-method members. - # See gh-113320 for why we compute this attribute here, - # rather than in `_ProtocolMeta.__init__` - cls.__non_callable_proto_members__ = set() - for attr in cls.__protocol_attrs__: - try: - is_callable = callable(getattr(cls, attr, None)) - except Exception as e: - raise TypeError( - f"Failed to determine whether protocol member {attr!r} " - "is a method member" - ) from e - else: - if not is_callable: - cls.__non_callable_proto_members__.add(attr) - return cls - - -def cast(typ, val): - """Cast a value to a type. - - This returns the value unchanged. To the type checker this - signals that the return value has the designated type, but at - runtime we intentionally don't check anything (we want this - to be as fast as possible). - """ - return val - - -def assert_type(val, typ, /): - """Ask a static type checker to confirm that the value is of the given type. - - At runtime this does nothing: it returns the first argument unchanged with no - checks or side effects, no matter the actual type of the argument. - - When a static type checker encounters a call to assert_type(), it - emits an error if the value is not of the specified type:: - - def greet(name: str) -> None: - assert_type(name, str) # OK - assert_type(name, int) # type checker error - """ - return val - - -_allowed_types = (types.FunctionType, types.BuiltinFunctionType, - types.MethodType, types.ModuleType, - WrapperDescriptorType, MethodWrapperType, MethodDescriptorType) - - -def get_type_hints(obj, globalns=None, localns=None, include_extras=False): - """Return type hints for an object. - - This is often the same as obj.__annotations__, but it handles - forward references encoded as string literals and recursively replaces all - 'Annotated[T, ...]' with 'T' (unless 'include_extras=True'). - - The argument may be a module, class, method, or function. The annotations - are returned as a dictionary. For classes, annotations include also - inherited members. - - TypeError is raised if the argument is not of a type that can contain - annotations, and an empty dictionary is returned if no annotations are - present. - - BEWARE -- the behavior of globalns and localns is counterintuitive - (unless you are familiar with how eval() and exec() work). The - search order is locals first, then globals. - - - If no dict arguments are passed, an attempt is made to use the - globals from obj (or the respective module's globals for classes), - and these are also used as the locals. If the object does not appear - to have globals, an empty dictionary is used. For classes, the search - order is globals first then locals. - - - If one dict argument is passed, it is used for both globals and - locals. - - - If two dict arguments are passed, they specify globals and - locals, respectively. - """ - if getattr(obj, '__no_type_check__', None): - return {} - # Classes require a special treatment. - if isinstance(obj, type): - hints = {} - for base in reversed(obj.__mro__): - if globalns is None: - base_globals = getattr(sys.modules.get(base.__module__, None), '__dict__', {}) - else: - base_globals = globalns - ann = base.__dict__.get('__annotations__', {}) - if isinstance(ann, types.GetSetDescriptorType): - ann = {} - base_locals = dict(vars(base)) if localns is None else localns - if localns is None and globalns is None: - # This is surprising, but required. Before Python 3.10, - # get_type_hints only evaluated the globalns of - # a class. To maintain backwards compatibility, we reverse - # the globalns and localns order so that eval() looks into - # *base_globals* first rather than *base_locals*. - # This only affects ForwardRefs. - base_globals, base_locals = base_locals, base_globals - for name, value in ann.items(): - if value is None: - value = type(None) - if isinstance(value, str): - value = ForwardRef(value, is_argument=False, is_class=True) - value = _eval_type(value, base_globals, base_locals, base.__type_params__) - hints[name] = value - return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()} - - if globalns is None: - if isinstance(obj, types.ModuleType): - globalns = obj.__dict__ - else: - nsobj = obj - # Find globalns for the unwrapped object. - while hasattr(nsobj, '__wrapped__'): - nsobj = nsobj.__wrapped__ - globalns = getattr(nsobj, '__globals__', {}) - if localns is None: - localns = globalns - elif localns is None: - localns = globalns - hints = getattr(obj, '__annotations__', None) - if hints is None: - # Return empty annotations for something that _could_ have them. - if isinstance(obj, _allowed_types): - return {} - else: - raise TypeError('{!r} is not a module, class, method, ' - 'or function.'.format(obj)) - hints = dict(hints) - type_params = getattr(obj, "__type_params__", ()) - for name, value in hints.items(): - if value is None: - value = type(None) - if isinstance(value, str): - # class-level forward refs were handled above, this must be either - # a module-level annotation or a function argument annotation - value = ForwardRef( - value, - is_argument=not isinstance(obj, types.ModuleType), - is_class=False, - ) - hints[name] = _eval_type(value, globalns, localns, type_params) - return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()} - - -def _strip_annotations(t): - """Strip the annotations from a given type.""" - if isinstance(t, _AnnotatedAlias): - return _strip_annotations(t.__origin__) - if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired, ReadOnly): - return _strip_annotations(t.__args__[0]) - if isinstance(t, _GenericAlias): - stripped_args = tuple(_strip_annotations(a) for a in t.__args__) - if stripped_args == t.__args__: - return t - return t.copy_with(stripped_args) - if isinstance(t, GenericAlias): - stripped_args = tuple(_strip_annotations(a) for a in t.__args__) - if stripped_args == t.__args__: - return t - return GenericAlias(t.__origin__, stripped_args) - if isinstance(t, types.UnionType): - stripped_args = tuple(_strip_annotations(a) for a in t.__args__) - if stripped_args == t.__args__: - return t - return functools.reduce(operator.or_, stripped_args) - - return t - - -def get_origin(tp): - """Get the unsubscripted version of a type. - - This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar, - Annotated, and others. Return None for unsupported types. - - Examples:: - - >>> P = ParamSpec('P') - >>> assert get_origin(Literal[42]) is Literal - >>> assert get_origin(int) is None - >>> assert get_origin(ClassVar[int]) is ClassVar - >>> assert get_origin(Generic) is Generic - >>> assert get_origin(Generic[T]) is Generic - >>> assert get_origin(Union[T, int]) is Union - >>> assert get_origin(List[Tuple[T, T]][int]) is list - >>> assert get_origin(P.args) is P - """ - if isinstance(tp, _AnnotatedAlias): - return Annotated - if isinstance(tp, (_BaseGenericAlias, GenericAlias, - ParamSpecArgs, ParamSpecKwargs)): - return tp.__origin__ - if tp is Generic: - return Generic - if isinstance(tp, types.UnionType): - return types.UnionType - return None - - -def get_args(tp): - """Get type arguments with all substitutions performed. - - For unions, basic simplifications used by Union constructor are performed. - - Examples:: - - >>> T = TypeVar('T') - >>> assert get_args(Dict[str, int]) == (str, int) - >>> assert get_args(int) == () - >>> assert get_args(Union[int, Union[T, int], str][int]) == (int, str) - >>> assert get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) - >>> assert get_args(Callable[[], T][int]) == ([], int) - """ - if isinstance(tp, _AnnotatedAlias): - return (tp.__origin__,) + tp.__metadata__ - if isinstance(tp, (_GenericAlias, GenericAlias)): - res = tp.__args__ - if _should_unflatten_callable_args(tp, res): - res = (list(res[:-1]), res[-1]) - return res - if isinstance(tp, types.UnionType): - return tp.__args__ - return () - - -def is_typeddict(tp): - """Check if an annotation is a TypedDict class. - - For example:: - - >>> from typing import TypedDict - >>> class Film(TypedDict): - ... title: str - ... year: int - ... - >>> is_typeddict(Film) - True - >>> is_typeddict(dict) - False - """ - return isinstance(tp, _TypedDictMeta) - - -_ASSERT_NEVER_REPR_MAX_LENGTH = 100 - - -def assert_never(arg: Never, /) -> Never: - """Statically assert that a line of code is unreachable. - - Example:: - - def int_or_str(arg: int | str) -> None: - match arg: - case int(): - print("It's an int") - case str(): - print("It's a str") - case _: - assert_never(arg) - - If a type checker finds that a call to assert_never() is - reachable, it will emit an error. - - At runtime, this throws an exception when called. - """ - value = repr(arg) - if len(value) > _ASSERT_NEVER_REPR_MAX_LENGTH: - value = value[:_ASSERT_NEVER_REPR_MAX_LENGTH] + '...' - raise AssertionError(f"Expected code to be unreachable, but got: {value}") - - -def no_type_check(arg): - """Decorator to indicate that annotations are not type hints. - - The argument must be a class or function; if it is a class, it - applies recursively to all methods and classes defined in that class - (but not to methods defined in its superclasses or subclasses). - - This mutates the function(s) or class(es) in place. - """ - if isinstance(arg, type): - for key in dir(arg): - obj = getattr(arg, key) - if ( - not hasattr(obj, '__qualname__') - or obj.__qualname__ != f'{arg.__qualname__}.{obj.__name__}' - or getattr(obj, '__module__', None) != arg.__module__ - ): - # We only modify objects that are defined in this type directly. - # If classes / methods are nested in multiple layers, - # we will modify them when processing their direct holders. - continue - # Instance, class, and static methods: - if isinstance(obj, types.FunctionType): - obj.__no_type_check__ = True - if isinstance(obj, types.MethodType): - obj.__func__.__no_type_check__ = True - # Nested types: - if isinstance(obj, type): - no_type_check(obj) - try: - arg.__no_type_check__ = True - except TypeError: # built-in classes - pass - return arg - - -def no_type_check_decorator(decorator): - """Decorator to give another decorator the @no_type_check effect. - - This wraps the decorator with something that wraps the decorated - function in @no_type_check. - """ - import warnings - warnings._deprecated("typing.no_type_check_decorator", remove=(3, 15)) - @functools.wraps(decorator) - def wrapped_decorator(*args, **kwds): - func = decorator(*args, **kwds) - func = no_type_check(func) - return func - - return wrapped_decorator - - -def _overload_dummy(*args, **kwds): - """Helper for @overload to raise when called.""" - raise NotImplementedError( - "You should not call an overloaded function. " - "A series of @overload-decorated functions " - "outside a stub module should always be followed " - "by an implementation that is not @overload-ed.") - - -# {module: {qualname: {firstlineno: func}}} -_overload_registry = defaultdict(functools.partial(defaultdict, dict)) - - -def overload(func): - """Decorator for overloaded functions/methods. - - In a stub file, place two or more stub definitions for the same - function in a row, each decorated with @overload. - - For example:: - - @overload - def utf8(value: None) -> None: ... - @overload - def utf8(value: bytes) -> bytes: ... - @overload - def utf8(value: str) -> bytes: ... - - In a non-stub file (i.e. a regular .py file), do the same but - follow it with an implementation. The implementation should *not* - be decorated with @overload:: - - @overload - def utf8(value: None) -> None: ... - @overload - def utf8(value: bytes) -> bytes: ... - @overload - def utf8(value: str) -> bytes: ... - def utf8(value): - ... # implementation goes here - - The overloads for a function can be retrieved at runtime using the - get_overloads() function. - """ - # classmethod and staticmethod - f = getattr(func, "__func__", func) - try: - _overload_registry[f.__module__][f.__qualname__][f.__code__.co_firstlineno] = func - except AttributeError: - # Not a normal function; ignore. - pass - return _overload_dummy - - -def get_overloads(func): - """Return all defined overloads for *func* as a sequence.""" - # classmethod and staticmethod - f = getattr(func, "__func__", func) - if f.__module__ not in _overload_registry: - return [] - mod_dict = _overload_registry[f.__module__] - if f.__qualname__ not in mod_dict: - return [] - return list(mod_dict[f.__qualname__].values()) - - -def clear_overloads(): - """Clear all overloads in the registry.""" - _overload_registry.clear() - - -def final(f): - """Decorator to indicate final methods and final classes. - - Use this decorator to indicate to type checkers that the decorated - method cannot be overridden, and decorated class cannot be subclassed. - - For example:: - - class Base: - @final - def done(self) -> None: - ... - class Sub(Base): - def done(self) -> None: # Error reported by type checker - ... - - @final - class Leaf: - ... - class Other(Leaf): # Error reported by type checker - ... - - There is no runtime checking of these properties. The decorator - attempts to set the ``__final__`` attribute to ``True`` on the decorated - object to allow runtime introspection. - """ - try: - f.__final__ = True - except (AttributeError, TypeError): - # Skip the attribute silently if it is not writable. - # AttributeError happens if the object has __slots__ or a - # read-only property, TypeError if it's a builtin class. - pass - return f - - -# Some unconstrained type variables. These were initially used by the container types. -# They were never meant for export and are now unused, but we keep them around to -# avoid breaking compatibility with users who import them. -T = TypeVar('T') # Any type. -KT = TypeVar('KT') # Key type. -VT = TypeVar('VT') # Value type. -T_co = TypeVar('T_co', covariant=True) # Any type covariant containers. -V_co = TypeVar('V_co', covariant=True) # Any type covariant containers. -VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers. -T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant. -# Internal type variable used for Type[]. -CT_co = TypeVar('CT_co', covariant=True, bound=type) - - -# A useful type variable with constraints. This represents string types. -# (This one *is* for export!) -AnyStr = TypeVar('AnyStr', bytes, str) - - -# Various ABCs mimicking those in collections.abc. -_alias = _SpecialGenericAlias - -Hashable = _alias(collections.abc.Hashable, 0) # Not generic. -Awaitable = _alias(collections.abc.Awaitable, 1) -Coroutine = _alias(collections.abc.Coroutine, 3) -AsyncIterable = _alias(collections.abc.AsyncIterable, 1) -AsyncIterator = _alias(collections.abc.AsyncIterator, 1) -Iterable = _alias(collections.abc.Iterable, 1) -Iterator = _alias(collections.abc.Iterator, 1) -Reversible = _alias(collections.abc.Reversible, 1) -Sized = _alias(collections.abc.Sized, 0) # Not generic. -Container = _alias(collections.abc.Container, 1) -Collection = _alias(collections.abc.Collection, 1) -Callable = _CallableType(collections.abc.Callable, 2) -Callable.__doc__ = \ - """Deprecated alias to collections.abc.Callable. - - Callable[[int], str] signifies a function that takes a single - parameter of type int and returns a str. - - The subscription syntax must always be used with exactly two - values: the argument list and the return type. - The argument list must be a list of types, a ParamSpec, - Concatenate or ellipsis. The return type must be a single type. - - There is no syntax to indicate optional or keyword arguments; - such function types are rarely used as callback types. - """ -AbstractSet = _alias(collections.abc.Set, 1, name='AbstractSet') -MutableSet = _alias(collections.abc.MutableSet, 1) -# NOTE: Mapping is only covariant in the value type. -Mapping = _alias(collections.abc.Mapping, 2) -MutableMapping = _alias(collections.abc.MutableMapping, 2) -Sequence = _alias(collections.abc.Sequence, 1) -MutableSequence = _alias(collections.abc.MutableSequence, 1) -ByteString = _DeprecatedGenericAlias( - collections.abc.ByteString, 0, removal_version=(3, 17) # Not generic. -) -# Tuple accepts variable number of parameters. -Tuple = _TupleType(tuple, -1, inst=False, name='Tuple') -Tuple.__doc__ = \ - """Deprecated alias to builtins.tuple. - - Tuple[X, Y] is the cross-product type of X and Y. - - Example: Tuple[T1, T2] is a tuple of two elements corresponding - to type variables T1 and T2. Tuple[int, float, str] is a tuple - of an int, a float and a string. - - To specify a variable-length tuple of homogeneous type, use Tuple[T, ...]. - """ -List = _alias(list, 1, inst=False, name='List') -Deque = _alias(collections.deque, 1, name='Deque') -Set = _alias(set, 1, inst=False, name='Set') -FrozenSet = _alias(frozenset, 1, inst=False, name='FrozenSet') -MappingView = _alias(collections.abc.MappingView, 1) -KeysView = _alias(collections.abc.KeysView, 1) -ItemsView = _alias(collections.abc.ItemsView, 2) -ValuesView = _alias(collections.abc.ValuesView, 1) -Dict = _alias(dict, 2, inst=False, name='Dict') -DefaultDict = _alias(collections.defaultdict, 2, name='DefaultDict') -OrderedDict = _alias(collections.OrderedDict, 2) -Counter = _alias(collections.Counter, 1) -ChainMap = _alias(collections.ChainMap, 2) -Generator = _alias(collections.abc.Generator, 3, defaults=(types.NoneType, types.NoneType)) -AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2, defaults=(types.NoneType,)) -Type = _alias(type, 1, inst=False, name='Type') -Type.__doc__ = \ - """Deprecated alias to builtins.type. - - builtins.type or typing.Type can be used to annotate class objects. - For example, suppose we have the following classes:: - - class User: ... # Abstract base for User classes - class BasicUser(User): ... - class ProUser(User): ... - class TeamUser(User): ... - - And a function that takes a class argument that's a subclass of - User and returns an instance of the corresponding class:: - - def new_user[U](user_class: Type[U]) -> U: - user = user_class() - # (Here we could write the user object to a database) - return user - - joe = new_user(BasicUser) - - At this point the type checker knows that joe has type BasicUser. - """ - - -@runtime_checkable -class SupportsInt(Protocol): - """An ABC with one abstract method __int__.""" - - __slots__ = () - - @abstractmethod - def __int__(self) -> int: - pass - - -@runtime_checkable -class SupportsFloat(Protocol): - """An ABC with one abstract method __float__.""" - - __slots__ = () - - @abstractmethod - def __float__(self) -> float: - pass - - -@runtime_checkable -class SupportsComplex(Protocol): - """An ABC with one abstract method __complex__.""" - - __slots__ = () - - @abstractmethod - def __complex__(self) -> complex: - pass - - -@runtime_checkable -class SupportsBytes(Protocol): - """An ABC with one abstract method __bytes__.""" - - __slots__ = () - - @abstractmethod - def __bytes__(self) -> bytes: - pass - - -@runtime_checkable -class SupportsIndex(Protocol): - """An ABC with one abstract method __index__.""" - - __slots__ = () - - @abstractmethod - def __index__(self) -> int: - pass - - -@runtime_checkable -class SupportsAbs[T](Protocol): - """An ABC with one abstract method __abs__ that is covariant in its return type.""" - - __slots__ = () - - @abstractmethod - def __abs__(self) -> T: - pass - - -@runtime_checkable -class SupportsRound[T](Protocol): - """An ABC with one abstract method __round__ that is covariant in its return type.""" - - __slots__ = () - - @abstractmethod - def __round__(self, ndigits: int = 0) -> T: - pass - - -def _make_nmtuple(name, types, module, defaults = ()): - fields = [n for n, t in types] - types = {n: _type_check(t, f"field {n} annotation must be a type") - for n, t in types} - nm_tpl = collections.namedtuple(name, fields, - defaults=defaults, module=module) - nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = types - return nm_tpl - - -# attributes prohibited to set in NamedTuple class syntax -_prohibited = frozenset({'__new__', '__init__', '__slots__', '__getnewargs__', - '_fields', '_field_defaults', - '_make', '_replace', '_asdict', '_source'}) - -_special = frozenset({'__module__', '__name__', '__annotations__'}) - - -class NamedTupleMeta(type): - def __new__(cls, typename, bases, ns): - assert _NamedTuple in bases - for base in bases: - if base is not _NamedTuple and base is not Generic: - raise TypeError( - 'can only inherit from a NamedTuple type and Generic') - bases = tuple(tuple if base is _NamedTuple else base for base in bases) - types = ns.get('__annotations__', {}) - default_names = [] - for field_name in types: - if field_name in ns: - default_names.append(field_name) - elif default_names: - raise TypeError(f"Non-default namedtuple field {field_name} " - f"cannot follow default field" - f"{'s' if len(default_names) > 1 else ''} " - f"{', '.join(default_names)}") - nm_tpl = _make_nmtuple(typename, types.items(), - defaults=[ns[n] for n in default_names], - module=ns['__module__']) - nm_tpl.__bases__ = bases - if Generic in bases: - class_getitem = _generic_class_getitem - nm_tpl.__class_getitem__ = classmethod(class_getitem) - # update from user namespace without overriding special namedtuple attributes - for key, val in ns.items(): - if key in _prohibited: - raise AttributeError("Cannot overwrite NamedTuple attribute " + key) - elif key not in _special: - if key not in nm_tpl._fields: - setattr(nm_tpl, key, val) - try: - set_name = type(val).__set_name__ - except AttributeError: - pass - else: - try: - set_name(val, nm_tpl, key) - except BaseException as e: - e.add_note( - f"Error calling __set_name__ on {type(val).__name__!r} " - f"instance {key!r} in {typename!r}" - ) - raise - - if Generic in bases: - nm_tpl.__init_subclass__() - return nm_tpl - - -def NamedTuple(typename, fields=_sentinel, /, **kwargs): - """Typed version of namedtuple. - - Usage:: - - class Employee(NamedTuple): - name: str - id: int - - This is equivalent to:: - - Employee = collections.namedtuple('Employee', ['name', 'id']) - - The resulting class has an extra __annotations__ attribute, giving a - dict that maps field names to types. (The field names are also in - the _fields attribute, which is part of the namedtuple API.) - An alternative equivalent functional syntax is also accepted:: - - Employee = NamedTuple('Employee', [('name', str), ('id', int)]) - """ - if fields is _sentinel: - if kwargs: - deprecated_thing = "Creating NamedTuple classes using keyword arguments" - deprecation_msg = ( - "{name} is deprecated and will be disallowed in Python {remove}. " - "Use the class-based or functional syntax instead." - ) - else: - deprecated_thing = "Failing to pass a value for the 'fields' parameter" - example = f"`{typename} = NamedTuple({typename!r}, [])`" - deprecation_msg = ( - "{name} is deprecated and will be disallowed in Python {remove}. " - "To create a NamedTuple class with 0 fields " - "using the functional syntax, " - "pass an empty list, e.g. " - ) + example + "." - elif fields is None: - if kwargs: - raise TypeError( - "Cannot pass `None` as the 'fields' parameter " - "and also specify fields using keyword arguments" - ) - else: - deprecated_thing = "Passing `None` as the 'fields' parameter" - example = f"`{typename} = NamedTuple({typename!r}, [])`" - deprecation_msg = ( - "{name} is deprecated and will be disallowed in Python {remove}. " - "To create a NamedTuple class with 0 fields " - "using the functional syntax, " - "pass an empty list, e.g. " - ) + example + "." - elif kwargs: - raise TypeError("Either list of fields or keywords" - " can be provided to NamedTuple, not both") - if fields is _sentinel or fields is None: - import warnings - warnings._deprecated(deprecated_thing, message=deprecation_msg, remove=(3, 15)) - fields = kwargs.items() - nt = _make_nmtuple(typename, fields, module=_caller()) - nt.__orig_bases__ = (NamedTuple,) - return nt - -_NamedTuple = type.__new__(NamedTupleMeta, 'NamedTuple', (), {}) - -def _namedtuple_mro_entries(bases): - assert NamedTuple in bases - return (_NamedTuple,) - -NamedTuple.__mro_entries__ = _namedtuple_mro_entries - - -def _get_typeddict_qualifiers(annotation_type): - while True: - annotation_origin = get_origin(annotation_type) - if annotation_origin is Annotated: - annotation_args = get_args(annotation_type) - if annotation_args: - annotation_type = annotation_args[0] - else: - break - elif annotation_origin is Required: - yield Required - (annotation_type,) = get_args(annotation_type) - elif annotation_origin is NotRequired: - yield NotRequired - (annotation_type,) = get_args(annotation_type) - elif annotation_origin is ReadOnly: - yield ReadOnly - (annotation_type,) = get_args(annotation_type) - else: - break - - -class _TypedDictMeta(type): - def __new__(cls, name, bases, ns, total=True): - """Create a new typed dict class object. - - This method is called when TypedDict is subclassed, - or when TypedDict is instantiated. This way - TypedDict supports all three syntax forms described in its docstring. - Subclasses and instances of TypedDict return actual dictionaries. - """ - for base in bases: - if type(base) is not _TypedDictMeta and base is not Generic: - raise TypeError('cannot inherit from both a TypedDict type ' - 'and a non-TypedDict base class') - - if any(issubclass(b, Generic) for b in bases): - generic_base = (Generic,) - else: - generic_base = () - - tp_dict = type.__new__(_TypedDictMeta, name, (*generic_base, dict), ns) - - if not hasattr(tp_dict, '__orig_bases__'): - tp_dict.__orig_bases__ = bases - - annotations = {} - own_annotations = ns.get('__annotations__', {}) - msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" - own_annotations = { - n: _type_check(tp, msg, module=tp_dict.__module__) - for n, tp in own_annotations.items() - } - required_keys = set() - optional_keys = set() - readonly_keys = set() - mutable_keys = set() - - for base in bases: - annotations.update(base.__dict__.get('__annotations__', {})) - - base_required = base.__dict__.get('__required_keys__', set()) - required_keys |= base_required - optional_keys -= base_required - - base_optional = base.__dict__.get('__optional_keys__', set()) - required_keys -= base_optional - optional_keys |= base_optional - - readonly_keys.update(base.__dict__.get('__readonly_keys__', ())) - mutable_keys.update(base.__dict__.get('__mutable_keys__', ())) - - annotations.update(own_annotations) - for annotation_key, annotation_type in own_annotations.items(): - qualifiers = set(_get_typeddict_qualifiers(annotation_type)) - if Required in qualifiers: - is_required = True - elif NotRequired in qualifiers: - is_required = False - else: - is_required = total - - if is_required: - required_keys.add(annotation_key) - optional_keys.discard(annotation_key) - else: - optional_keys.add(annotation_key) - required_keys.discard(annotation_key) - - if ReadOnly in qualifiers: - if annotation_key in mutable_keys: - raise TypeError( - f"Cannot override mutable key {annotation_key!r}" - " with read-only key" - ) - readonly_keys.add(annotation_key) - else: - mutable_keys.add(annotation_key) - readonly_keys.discard(annotation_key) - - assert required_keys.isdisjoint(optional_keys), ( - f"Required keys overlap with optional keys in {name}:" - f" {required_keys=}, {optional_keys=}" - ) - tp_dict.__annotations__ = annotations - tp_dict.__required_keys__ = frozenset(required_keys) - tp_dict.__optional_keys__ = frozenset(optional_keys) - tp_dict.__readonly_keys__ = frozenset(readonly_keys) - tp_dict.__mutable_keys__ = frozenset(mutable_keys) - tp_dict.__total__ = total - return tp_dict - - __call__ = dict # static method - - def __subclasscheck__(cls, other): - # Typed dicts are only for static structural subtyping. - raise TypeError('TypedDict does not support instance and class checks') - - __instancecheck__ = __subclasscheck__ - - -def TypedDict(typename, fields=_sentinel, /, *, total=True): - """A simple typed namespace. At runtime it is equivalent to a plain dict. - - TypedDict creates a dictionary type such that a type checker will expect all - instances to have a certain set of keys, where each key is - associated with a value of a consistent type. This expectation - is not checked at runtime. - - Usage:: - - >>> class Point2D(TypedDict): - ... x: int - ... y: int - ... label: str - ... - >>> a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK - >>> b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check - >>> Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') - True - - The type info can be accessed via the Point2D.__annotations__ dict, and - the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets. - TypedDict supports an additional equivalent form:: - - Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) - - By default, all keys must be present in a TypedDict. It is possible - to override this by specifying totality:: - - class Point2D(TypedDict, total=False): - x: int - y: int - - This means that a Point2D TypedDict can have any of the keys omitted. A type - checker is only expected to support a literal False or True as the value of - the total argument. True is the default, and makes all items defined in the - class body be required. - - The Required and NotRequired special forms can also be used to mark - individual keys as being required or not required:: - - class Point2D(TypedDict): - x: int # the "x" key must always be present (Required is the default) - y: NotRequired[int] # the "y" key can be omitted - - See PEP 655 for more details on Required and NotRequired. - - The ReadOnly special form can be used - to mark individual keys as immutable for type checkers:: - - class DatabaseUser(TypedDict): - id: ReadOnly[int] # the "id" key must not be modified - username: str # the "username" key can be changed - - """ - if fields is _sentinel or fields is None: - import warnings - - if fields is _sentinel: - deprecated_thing = "Failing to pass a value for the 'fields' parameter" - else: - deprecated_thing = "Passing `None` as the 'fields' parameter" - - example = f"`{typename} = TypedDict({typename!r}, {{{{}}}})`" - deprecation_msg = ( - "{name} is deprecated and will be disallowed in Python {remove}. " - "To create a TypedDict class with 0 fields " - "using the functional syntax, " - "pass an empty dictionary, e.g. " - ) + example + "." - warnings._deprecated(deprecated_thing, message=deprecation_msg, remove=(3, 15)) - fields = {} - - ns = {'__annotations__': dict(fields)} - module = _caller() - if module is not None: - # Setting correct module is necessary to make typed dict classes pickleable. - ns['__module__'] = module - - td = _TypedDictMeta(typename, (), ns, total=total) - td.__orig_bases__ = (TypedDict,) - return td - -_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {}) -TypedDict.__mro_entries__ = lambda bases: (_TypedDict,) - - -@_SpecialForm -def Required(self, parameters): - """Special typing construct to mark a TypedDict key as required. - - This is mainly useful for total=False TypedDicts. - - For example:: - - class Movie(TypedDict, total=False): - title: Required[str] - year: int - - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - - There is no runtime checking that a required key is actually provided - when instantiating a related TypedDict. - """ - item = _type_check(parameters, f'{self._name} accepts only a single type.') - return _GenericAlias(self, (item,)) - - -@_SpecialForm -def NotRequired(self, parameters): - """Special typing construct to mark a TypedDict key as potentially missing. - - For example:: - - class Movie(TypedDict): - title: str - year: NotRequired[int] - - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - """ - item = _type_check(parameters, f'{self._name} accepts only a single type.') - return _GenericAlias(self, (item,)) - - -@_SpecialForm -def ReadOnly(self, parameters): - """A special typing construct to mark an item of a TypedDict as read-only. - - For example:: - - class Movie(TypedDict): - title: ReadOnly[str] - year: int - - def mutate_movie(m: Movie) -> None: - m["year"] = 1992 # allowed - m["title"] = "The Matrix" # typechecker error - - There is no runtime checking for this property. - """ - item = _type_check(parameters, f'{self._name} accepts only a single type.') - return _GenericAlias(self, (item,)) - - -class NewType: - """NewType creates simple unique types with almost zero runtime overhead. - - NewType(name, tp) is considered a subtype of tp - by static type checkers. At runtime, NewType(name, tp) returns - a dummy callable that simply returns its argument. - - Usage:: - - UserId = NewType('UserId', int) - - def name_by_id(user_id: UserId) -> str: - ... - - UserId('user') # Fails type check - - name_by_id(42) # Fails type check - name_by_id(UserId(42)) # OK - - num = UserId(5) + 1 # type: int - """ - - __call__ = _idfunc - - def __init__(self, name, tp): - self.__qualname__ = name - if '.' in name: - name = name.rpartition('.')[-1] - self.__name__ = name - self.__supertype__ = tp - def_mod = _caller() - if def_mod != 'typing': - self.__module__ = def_mod - - def __mro_entries__(self, bases): - # We defined __mro_entries__ to get a better error message - # if a user attempts to subclass a NewType instance. bpo-46170 - superclass_name = self.__name__ - - class Dummy: - def __init_subclass__(cls): - subclass_name = cls.__name__ - raise TypeError( - f"Cannot subclass an instance of NewType. Perhaps you were looking for: " - f"`{subclass_name} = NewType({subclass_name!r}, {superclass_name})`" - ) - - return (Dummy,) - - def __repr__(self): - return f'{self.__module__}.{self.__qualname__}' - - def __reduce__(self): - return self.__qualname__ - - def __or__(self, other): - return Union[self, other] - - def __ror__(self, other): - return Union[other, self] - - -# Python-version-specific alias (Python 2: unicode; Python 3: str) -Text = str - - -# Constant that's True when type checking, but False here. -TYPE_CHECKING = False - - -class IO(Generic[AnyStr]): - """Generic base class for TextIO and BinaryIO. - - This is an abstract, generic version of the return of open(). - - NOTE: This does not distinguish between the different possible - classes (text vs. binary, read vs. write vs. read/write, - append-only, unbuffered). The TextIO and BinaryIO subclasses - below capture the distinctions between text vs. binary, which is - pervasive in the interface; however we currently do not offer a - way to track the other distinctions in the type system. - """ - - __slots__ = () - - @property - @abstractmethod - def mode(self) -> str: - pass - - @property - @abstractmethod - def name(self) -> str: - pass - - @abstractmethod - def close(self) -> None: - pass - - @property - @abstractmethod - def closed(self) -> bool: - pass - - @abstractmethod - def fileno(self) -> int: - pass - - @abstractmethod - def flush(self) -> None: - pass - - @abstractmethod - def isatty(self) -> bool: - pass - - @abstractmethod - def read(self, n: int = -1) -> AnyStr: - pass - - @abstractmethod - def readable(self) -> bool: - pass - - @abstractmethod - def readline(self, limit: int = -1) -> AnyStr: - pass - - @abstractmethod - def readlines(self, hint: int = -1) -> List[AnyStr]: - pass - - @abstractmethod - def seek(self, offset: int, whence: int = 0) -> int: - pass - - @abstractmethod - def seekable(self) -> bool: - pass - - @abstractmethod - def tell(self) -> int: - pass - - @abstractmethod - def truncate(self, size: int = None) -> int: - pass - - @abstractmethod - def writable(self) -> bool: - pass - - @abstractmethod - def write(self, s: AnyStr) -> int: - pass - - @abstractmethod - def writelines(self, lines: List[AnyStr]) -> None: - pass - - @abstractmethod - def __enter__(self) -> 'IO[AnyStr]': - pass - - @abstractmethod - def __exit__(self, type, value, traceback) -> None: - pass - - -class BinaryIO(IO[bytes]): - """Typed version of the return of open() in binary mode.""" - - __slots__ = () - - @abstractmethod - def write(self, s: Union[bytes, bytearray]) -> int: - pass - - @abstractmethod - def __enter__(self) -> 'BinaryIO': - pass - - -class TextIO(IO[str]): - """Typed version of the return of open() in text mode.""" - - __slots__ = () - - @property - @abstractmethod - def buffer(self) -> BinaryIO: - pass - - @property - @abstractmethod - def encoding(self) -> str: - pass - - @property - @abstractmethod - def errors(self) -> Optional[str]: - pass - - @property - @abstractmethod - def line_buffering(self) -> bool: - pass - - @property - @abstractmethod - def newlines(self) -> Any: - pass - - @abstractmethod - def __enter__(self) -> 'TextIO': - pass - - -def reveal_type[T](obj: T, /) -> T: - """Ask a static type checker to reveal the inferred type of an expression. - - When a static type checker encounters a call to ``reveal_type()``, - it will emit the inferred type of the argument:: - - x: int = 1 - reveal_type(x) - - Running a static type checker (e.g., mypy) on this example - will produce output similar to 'Revealed type is "builtins.int"'. - - At runtime, the function prints the runtime type of the - argument and returns the argument unchanged. - """ - print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr) - return obj - - -class _IdentityCallable(Protocol): - def __call__[T](self, arg: T, /) -> T: - ... - - -def dataclass_transform( - *, - eq_default: bool = True, - order_default: bool = False, - kw_only_default: bool = False, - frozen_default: bool = False, - field_specifiers: tuple[type[Any] | Callable[..., Any], ...] = (), - **kwargs: Any, -) -> _IdentityCallable: - """Decorator to mark an object as providing dataclass-like behaviour. - - The decorator can be applied to a function, class, or metaclass. - - Example usage with a decorator function:: - - @dataclass_transform() - def create_model[T](cls: type[T]) -> type[T]: - ... - return cls - - @create_model - class CustomerModel: - id: int - name: str - - On a base class:: - - @dataclass_transform() - class ModelBase: ... - - class CustomerModel(ModelBase): - id: int - name: str - - On a metaclass:: - - @dataclass_transform() - class ModelMeta(type): ... - - class ModelBase(metaclass=ModelMeta): ... - - class CustomerModel(ModelBase): - id: int - name: str - - The ``CustomerModel`` classes defined above will - be treated by type checkers similarly to classes created with - ``@dataclasses.dataclass``. - For example, type checkers will assume these classes have - ``__init__`` methods that accept ``id`` and ``name``. - - The arguments to this decorator can be used to customize this behavior: - - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be - ``True`` or ``False`` if it is omitted by the caller. - - ``order_default`` indicates whether the ``order`` parameter is - assumed to be True or False if it is omitted by the caller. - - ``kw_only_default`` indicates whether the ``kw_only`` parameter is - assumed to be True or False if it is omitted by the caller. - - ``frozen_default`` indicates whether the ``frozen`` parameter is - assumed to be True or False if it is omitted by the caller. - - ``field_specifiers`` specifies a static list of supported classes - or functions that describe fields, similar to ``dataclasses.field()``. - - Arbitrary other keyword arguments are accepted in order to allow for - possible future extensions. - - At runtime, this decorator records its arguments in the - ``__dataclass_transform__`` attribute on the decorated object. - It has no other runtime effect. - - See PEP 681 for more details. - """ - def decorator(cls_or_fn): - cls_or_fn.__dataclass_transform__ = { - "eq_default": eq_default, - "order_default": order_default, - "kw_only_default": kw_only_default, - "frozen_default": frozen_default, - "field_specifiers": field_specifiers, - "kwargs": kwargs, - } - return cls_or_fn - return decorator - - -type _Func = Callable[..., Any] - - -def override[F: _Func](method: F, /) -> F: - """Indicate that a method is intended to override a method in a base class. - - Usage:: - - class Base: - def method(self) -> None: - pass - - class Child(Base): - @override - def method(self) -> None: - super().method() - - When this decorator is applied to a method, the type checker will - validate that it overrides a method or attribute with the same name on a - base class. This helps prevent bugs that may occur when a base class is - changed without an equivalent change to a child class. - - There is no runtime checking of this property. The decorator attempts to - set the ``__override__`` attribute to ``True`` on the decorated object to - allow runtime introspection. - - See PEP 698 for details. - """ - try: - method.__override__ = True - except (AttributeError, TypeError): - # Skip the attribute silently if it is not writable. - # AttributeError happens if the object has __slots__ or a - # read-only property, TypeError if it's a builtin class. - pass - return method - - -def is_protocol(tp: type, /) -> bool: - """Return True if the given type is a Protocol. - - Example:: - - >>> from typing import Protocol, is_protocol - >>> class P(Protocol): - ... def a(self) -> str: ... - ... b: int - >>> is_protocol(P) - True - >>> is_protocol(int) - False - """ - return ( - isinstance(tp, type) - and getattr(tp, '_is_protocol', False) - and tp != Protocol - ) - - -def get_protocol_members(tp: type, /) -> frozenset[str]: - """Return the set of members defined in a Protocol. - - Example:: - - >>> from typing import Protocol, get_protocol_members - >>> class P(Protocol): - ... def a(self) -> str: ... - ... b: int - >>> get_protocol_members(P) == frozenset({'a', 'b'}) - True - - Raise a TypeError for arguments that are not Protocols. - """ - if not is_protocol(tp): - raise TypeError(f'{tp!r} is not a Protocol') - return frozenset(tp.__protocol_attrs__) - - -def __getattr__(attr): - """Improve the import time of the typing module. - - Soft-deprecated objects which are costly to create - are only created on-demand here. - """ - if attr in {"Pattern", "Match"}: - import re - obj = _alias(getattr(re, attr), 1) - elif attr in {"ContextManager", "AsyncContextManager"}: - import contextlib - obj = _alias(getattr(contextlib, f"Abstract{attr}"), 2, name=attr, defaults=(bool | None,)) - elif attr == "_collect_parameters": - import warnings - - depr_message = ( - "The private _collect_parameters function is deprecated and will be" - " removed in a future version of Python. Any use of private functions" - " is discouraged and may break in the future." - ) - warnings.warn(depr_message, category=DeprecationWarning, stacklevel=2) - obj = _collect_type_parameters - else: - raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") - globals()[attr] = obj - return obj diff --git a/Python313_13_x86_Template/Lib/unittest/__init__.py b/Python313_13_x86_Template/Lib/unittest/__init__.py deleted file mode 100644 index f1f6c911..00000000 --- a/Python313_13_x86_Template/Lib/unittest/__init__.py +++ /dev/null @@ -1,80 +0,0 @@ -""" -Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's -Smalltalk testing framework (used with permission). - -This module contains the core framework classes that form the basis of -specific test cases and suites (TestCase, TestSuite etc.), and also a -text-based utility class for running the tests and reporting the results - (TextTestRunner). - -Simple usage: - - import unittest - - class IntegerArithmeticTestCase(unittest.TestCase): - def testAdd(self): # test method names begin with 'test' - self.assertEqual((1 + 2), 3) - self.assertEqual(0 + 1, 1) - def testMultiply(self): - self.assertEqual((0 * 10), 0) - self.assertEqual((5 * 8), 40) - - if __name__ == '__main__': - unittest.main() - -Further information is available in the bundled documentation, and from - - http://docs.python.org/library/unittest.html - -Copyright (c) 1999-2003 Steve Purcell -Copyright (c) 2003-2010 Python Software Foundation -This module is free software, and you may redistribute it and/or modify -it under the same terms as Python itself, so long as this copyright message -and disclaimer are retained in their original form. - -IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, -SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF -THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH -DAMAGE. - -THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, -AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, -SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. -""" - -__all__ = ['TestResult', 'TestCase', 'IsolatedAsyncioTestCase', 'TestSuite', - 'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main', - 'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless', - 'expectedFailure', 'TextTestResult', 'installHandler', - 'registerResult', 'removeResult', 'removeHandler', - 'addModuleCleanup', 'doModuleCleanups', 'enterModuleContext'] - -__unittest = True - -from .result import TestResult -from .case import (addModuleCleanup, TestCase, FunctionTestCase, SkipTest, skip, - skipIf, skipUnless, expectedFailure, doModuleCleanups, - enterModuleContext) -from .suite import BaseTestSuite, TestSuite -from .loader import TestLoader, defaultTestLoader -from .main import TestProgram, main -from .runner import TextTestRunner, TextTestResult -from .signals import installHandler, registerResult, removeResult, removeHandler -# IsolatedAsyncioTestCase will be imported lazily. - - -# Lazy import of IsolatedAsyncioTestCase from .async_case -# It imports asyncio, which is relatively heavy, but most tests -# do not need it. - -def __dir__(): - return globals().keys() | {'IsolatedAsyncioTestCase'} - -def __getattr__(name): - if name == 'IsolatedAsyncioTestCase': - global IsolatedAsyncioTestCase - from .async_case import IsolatedAsyncioTestCase - return IsolatedAsyncioTestCase - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/Python313_13_x86_Template/Lib/unittest/async_case.py b/Python313_13_x86_Template/Lib/unittest/async_case.py deleted file mode 100644 index e761ba7e..00000000 --- a/Python313_13_x86_Template/Lib/unittest/async_case.py +++ /dev/null @@ -1,146 +0,0 @@ -import asyncio -import contextvars -import inspect -import warnings - -from .case import TestCase - -__unittest = True - -class IsolatedAsyncioTestCase(TestCase): - # Names intentionally have a long prefix - # to reduce a chance of clashing with user-defined attributes - # from inherited test case - # - # The class doesn't call loop.run_until_complete(self.setUp()) and family - # but uses a different approach: - # 1. create a long-running task that reads self.setUp() - # awaitable from queue along with a future - # 2. await the awaitable object passing in and set the result - # into the future object - # 3. Outer code puts the awaitable and the future object into a queue - # with waiting for the future - # The trick is necessary because every run_until_complete() call - # creates a new task with embedded ContextVar context. - # To share contextvars between setUp(), test and tearDown() we need to execute - # them inside the same task. - - # Note: the test case modifies event loop policy if the policy was not instantiated - # yet, unless loop_factory=asyncio.EventLoop is set. - # asyncio.get_event_loop_policy() creates a default policy on demand but never - # returns None - # I believe this is not an issue in user level tests but python itself for testing - # should reset a policy in every test module - # by calling asyncio.set_event_loop_policy(None) in tearDownModule() - # or set loop_factory=asyncio.EventLoop - - loop_factory = None - - def __init__(self, methodName='runTest'): - super().__init__(methodName) - self._asyncioRunner = None - self._asyncioTestContext = contextvars.copy_context() - - async def asyncSetUp(self): - pass - - async def asyncTearDown(self): - pass - - def addAsyncCleanup(self, func, /, *args, **kwargs): - # A trivial trampoline to addCleanup() - # the function exists because it has a different semantics - # and signature: - # addCleanup() accepts regular functions - # but addAsyncCleanup() accepts coroutines - # - # We intentionally don't add inspect.iscoroutinefunction() check - # for func argument because there is no way - # to check for async function reliably: - # 1. It can be "async def func()" itself - # 2. Class can implement "async def __call__()" method - # 3. Regular "def func()" that returns awaitable object - self.addCleanup(*(func, *args), **kwargs) - - async def enterAsyncContext(self, cm): - """Enters the supplied asynchronous context manager. - - If successful, also adds its __aexit__ method as a cleanup - function and returns the result of the __aenter__ method. - """ - # We look up the special methods on the type to match the with - # statement. - cls = type(cm) - try: - enter = cls.__aenter__ - exit = cls.__aexit__ - except AttributeError: - raise TypeError(f"'{cls.__module__}.{cls.__qualname__}' object does " - f"not support the asynchronous context manager protocol" - ) from None - result = await enter(cm) - self.addAsyncCleanup(exit, cm, None, None, None) - return result - - def _callSetUp(self): - # Force loop to be initialized and set as the current loop - # so that setUp functions can use get_event_loop() and get the - # correct loop instance. - self._asyncioRunner.get_loop() - self._asyncioTestContext.run(self.setUp) - self._callAsync(self.asyncSetUp) - - def _callTestMethod(self, method): - if self._callMaybeAsync(method) is not None: - warnings.warn(f'It is deprecated to return a value that is not None from a ' - f'test case ({method})', DeprecationWarning, stacklevel=4) - - def _callTearDown(self): - self._callAsync(self.asyncTearDown) - self._asyncioTestContext.run(self.tearDown) - - def _callCleanup(self, function, *args, **kwargs): - self._callMaybeAsync(function, *args, **kwargs) - - def _callAsync(self, func, /, *args, **kwargs): - assert self._asyncioRunner is not None, 'asyncio runner is not initialized' - assert inspect.iscoroutinefunction(func), f'{func!r} is not an async function' - return self._asyncioRunner.run( - func(*args, **kwargs), - context=self._asyncioTestContext - ) - - def _callMaybeAsync(self, func, /, *args, **kwargs): - assert self._asyncioRunner is not None, 'asyncio runner is not initialized' - if inspect.iscoroutinefunction(func): - return self._asyncioRunner.run( - func(*args, **kwargs), - context=self._asyncioTestContext, - ) - else: - return self._asyncioTestContext.run(func, *args, **kwargs) - - def _setupAsyncioRunner(self): - assert self._asyncioRunner is None, 'asyncio runner is already initialized' - runner = asyncio.Runner(debug=True, loop_factory=self.loop_factory) - self._asyncioRunner = runner - - def _tearDownAsyncioRunner(self): - runner = self._asyncioRunner - runner.close() - - def run(self, result=None): - self._setupAsyncioRunner() - try: - return super().run(result) - finally: - self._tearDownAsyncioRunner() - - def debug(self): - self._setupAsyncioRunner() - super().debug() - self._tearDownAsyncioRunner() - - def __del__(self): - if self._asyncioRunner is not None: - self._tearDownAsyncioRunner() diff --git a/Python313_13_x86_Template/Lib/unittest/case.py b/Python313_13_x86_Template/Lib/unittest/case.py deleted file mode 100644 index 36daa61f..00000000 --- a/Python313_13_x86_Template/Lib/unittest/case.py +++ /dev/null @@ -1,1478 +0,0 @@ -"""Test case implementation""" - -import sys -import functools -import difflib -import pprint -import re -import warnings -import collections -import contextlib -import traceback -import time -import types - -from . import result -from .util import (strclass, safe_repr, _count_diff_all_purpose, - _count_diff_hashable, _common_shorten_repr) - -__unittest = True - -_subtest_msg_sentinel = object() - -DIFF_OMITTED = ('\nDiff is %s characters long. ' - 'Set self.maxDiff to None to see it.') - -class SkipTest(Exception): - """ - Raise this exception in a test to skip it. - - Usually you can use TestCase.skipTest() or one of the skipping decorators - instead of raising this directly. - """ - -class _ShouldStop(Exception): - """ - The test should stop. - """ - -class _UnexpectedSuccess(Exception): - """ - The test was supposed to fail, but it didn't! - """ - - -class _Outcome(object): - def __init__(self, result=None): - self.expecting_failure = False - self.result = result - self.result_supports_subtests = hasattr(result, "addSubTest") - self.success = True - self.expectedFailure = None - - @contextlib.contextmanager - def testPartExecutor(self, test_case, subTest=False): - old_success = self.success - self.success = True - try: - yield - except KeyboardInterrupt: - raise - except SkipTest as e: - self.success = False - _addSkip(self.result, test_case, str(e)) - except _ShouldStop: - pass - except: - exc_info = sys.exc_info() - if self.expecting_failure: - self.expectedFailure = exc_info - else: - self.success = False - if subTest: - self.result.addSubTest(test_case.test_case, test_case, exc_info) - else: - _addError(self.result, test_case, exc_info) - # explicitly break a reference cycle: - # exc_info -> frame -> exc_info - exc_info = None - else: - if subTest and self.success: - self.result.addSubTest(test_case.test_case, test_case, None) - finally: - self.success = self.success and old_success - - -def _addSkip(result, test_case, reason): - addSkip = getattr(result, 'addSkip', None) - if addSkip is not None: - addSkip(test_case, reason) - else: - warnings.warn("TestResult has no addSkip method, skips not reported", - RuntimeWarning, 2) - result.addSuccess(test_case) - -def _addError(result, test, exc_info): - if result is not None and exc_info is not None: - if issubclass(exc_info[0], test.failureException): - result.addFailure(test, exc_info) - else: - result.addError(test, exc_info) - -def _id(obj): - return obj - - -def _enter_context(cm, addcleanup): - # We look up the special methods on the type to match the with - # statement. - cls = type(cm) - try: - enter = cls.__enter__ - exit = cls.__exit__ - except AttributeError: - raise TypeError(f"'{cls.__module__}.{cls.__qualname__}' object does " - f"not support the context manager protocol") from None - result = enter(cm) - addcleanup(exit, cm, None, None, None) - return result - - -_module_cleanups = [] -def addModuleCleanup(function, /, *args, **kwargs): - """Same as addCleanup, except the cleanup items are called even if - setUpModule fails (unlike tearDownModule).""" - _module_cleanups.append((function, args, kwargs)) - -def enterModuleContext(cm): - """Same as enterContext, but module-wide.""" - return _enter_context(cm, addModuleCleanup) - - -def doModuleCleanups(): - """Execute all module cleanup functions. Normally called for you after - tearDownModule.""" - exceptions = [] - while _module_cleanups: - function, args, kwargs = _module_cleanups.pop() - try: - function(*args, **kwargs) - except Exception as exc: - exceptions.append(exc) - if exceptions: - # Swallows all but first exception. If a multi-exception handler - # gets written we should use that here instead. - raise exceptions[0] - - -def skip(reason): - """ - Unconditionally skip a test. - """ - def decorator(test_item): - if not isinstance(test_item, type): - @functools.wraps(test_item) - def skip_wrapper(*args, **kwargs): - raise SkipTest(reason) - test_item = skip_wrapper - - test_item.__unittest_skip__ = True - test_item.__unittest_skip_why__ = reason - return test_item - if isinstance(reason, types.FunctionType): - test_item = reason - reason = '' - return decorator(test_item) - return decorator - -def skipIf(condition, reason): - """ - Skip a test if the condition is true. - """ - if condition: - return skip(reason) - return _id - -def skipUnless(condition, reason): - """ - Skip a test unless the condition is true. - """ - if not condition: - return skip(reason) - return _id - -def expectedFailure(test_item): - test_item.__unittest_expecting_failure__ = True - return test_item - -def _is_subtype(expected, basetype): - if isinstance(expected, tuple): - return all(_is_subtype(e, basetype) for e in expected) - return isinstance(expected, type) and issubclass(expected, basetype) - -class _BaseTestCaseContext: - - def __init__(self, test_case): - self.test_case = test_case - - def _raiseFailure(self, standardMsg): - msg = self.test_case._formatMessage(self.msg, standardMsg) - raise self.test_case.failureException(msg) - -class _AssertRaisesBaseContext(_BaseTestCaseContext): - - def __init__(self, expected, test_case, expected_regex=None): - _BaseTestCaseContext.__init__(self, test_case) - self.expected = expected - self.test_case = test_case - if expected_regex is not None: - expected_regex = re.compile(expected_regex) - self.expected_regex = expected_regex - self.obj_name = None - self.msg = None - - def handle(self, name, args, kwargs): - """ - If args is empty, assertRaises/Warns is being used as a - context manager, so check for a 'msg' kwarg and return self. - If args is not empty, call a callable passing positional and keyword - arguments. - """ - try: - if not _is_subtype(self.expected, self._base_type): - raise TypeError('%s() arg 1 must be %s' % - (name, self._base_type_str)) - if not args: - self.msg = kwargs.pop('msg', None) - if kwargs: - raise TypeError('%r is an invalid keyword argument for ' - 'this function' % (next(iter(kwargs)),)) - return self - - callable_obj, *args = args - try: - self.obj_name = callable_obj.__name__ - except AttributeError: - self.obj_name = str(callable_obj) - with self: - callable_obj(*args, **kwargs) - finally: - # bpo-23890: manually break a reference cycle - self = None - - -class _AssertRaisesContext(_AssertRaisesBaseContext): - """A context manager used to implement TestCase.assertRaises* methods.""" - - _base_type = BaseException - _base_type_str = 'an exception type or tuple of exception types' - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - if exc_type is None: - try: - exc_name = self.expected.__name__ - except AttributeError: - exc_name = str(self.expected) - if self.obj_name: - self._raiseFailure("{} not raised by {}".format(exc_name, - self.obj_name)) - else: - self._raiseFailure("{} not raised".format(exc_name)) - else: - traceback.clear_frames(tb) - if not issubclass(exc_type, self.expected): - # let unexpected exceptions pass through - return False - # store exception, without traceback, for later retrieval - self.exception = exc_value.with_traceback(None) - if self.expected_regex is None: - return True - - expected_regex = self.expected_regex - if not expected_regex.search(str(exc_value)): - self._raiseFailure('"{}" does not match "{}"'.format( - expected_regex.pattern, str(exc_value))) - return True - - __class_getitem__ = classmethod(types.GenericAlias) - - -class _AssertWarnsContext(_AssertRaisesBaseContext): - """A context manager used to implement TestCase.assertWarns* methods.""" - - _base_type = Warning - _base_type_str = 'a warning type or tuple of warning types' - - def __enter__(self): - # The __warningregistry__'s need to be in a pristine state for tests - # to work properly. - for v in list(sys.modules.values()): - if getattr(v, '__warningregistry__', None): - v.__warningregistry__ = {} - self.warnings_manager = warnings.catch_warnings(record=True) - self.warnings = self.warnings_manager.__enter__() - warnings.simplefilter("always", self.expected) - return self - - def __exit__(self, exc_type, exc_value, tb): - self.warnings_manager.__exit__(exc_type, exc_value, tb) - if exc_type is not None: - # let unexpected exceptions pass through - return - try: - exc_name = self.expected.__name__ - except AttributeError: - exc_name = str(self.expected) - first_matching = None - for m in self.warnings: - w = m.message - if not isinstance(w, self.expected): - continue - if first_matching is None: - first_matching = w - if (self.expected_regex is not None and - not self.expected_regex.search(str(w))): - continue - # store warning for later retrieval - self.warning = w - self.filename = m.filename - self.lineno = m.lineno - return - # Now we simply try to choose a helpful failure message - if first_matching is not None: - self._raiseFailure('"{}" does not match "{}"'.format( - self.expected_regex.pattern, str(first_matching))) - if self.obj_name: - self._raiseFailure("{} not triggered by {}".format(exc_name, - self.obj_name)) - else: - self._raiseFailure("{} not triggered".format(exc_name)) - - -class _AssertNotWarnsContext(_AssertWarnsContext): - - def __exit__(self, exc_type, exc_value, tb): - self.warnings_manager.__exit__(exc_type, exc_value, tb) - if exc_type is not None: - # let unexpected exceptions pass through - return - try: - exc_name = self.expected.__name__ - except AttributeError: - exc_name = str(self.expected) - for m in self.warnings: - w = m.message - if isinstance(w, self.expected): - self._raiseFailure(f"{exc_name} triggered") - - -class _OrderedChainMap(collections.ChainMap): - def __iter__(self): - seen = set() - for mapping in self.maps: - for k in mapping: - if k not in seen: - seen.add(k) - yield k - - -class TestCase(object): - """A class whose instances are single test cases. - - By default, the test code itself should be placed in a method named - 'runTest'. - - If the fixture may be used for many test cases, create as - many test methods as are needed. When instantiating such a TestCase - subclass, specify in the constructor arguments the name of the test method - that the instance is to execute. - - Test authors should subclass TestCase for their own tests. Construction - and deconstruction of the test's environment ('fixture') can be - implemented by overriding the 'setUp' and 'tearDown' methods respectively. - - If it is necessary to override the __init__ method, the base class - __init__ method must always be called. It is important that subclasses - should not change the signature of their __init__ method, since instances - of the classes are instantiated automatically by parts of the framework - in order to be run. - - When subclassing TestCase, you can set these attributes: - * failureException: determines which exception will be raised when - the instance's assertion methods fail; test methods raising this - exception will be deemed to have 'failed' rather than 'errored'. - * longMessage: determines whether long messages (including repr of - objects used in assert methods) will be printed on failure in *addition* - to any explicit message passed. - * maxDiff: sets the maximum length of a diff in failure messages - by assert methods using difflib. It is looked up as an instance - attribute so can be configured by individual tests if required. - """ - - failureException = AssertionError - - longMessage = True - - maxDiff = 80*8 - - # If a string is longer than _diffThreshold, use normal comparison instead - # of difflib. See #11763. - _diffThreshold = 2**16 - - def __init_subclass__(cls, *args, **kwargs): - # Attribute used by TestSuite for classSetUp - cls._classSetupFailed = False - cls._class_cleanups = [] - super().__init_subclass__(*args, **kwargs) - - def __init__(self, methodName='runTest'): - """Create an instance of the class that will use the named test - method when executed. Raises a ValueError if the instance does - not have a method with the specified name. - """ - self._testMethodName = methodName - self._outcome = None - self._testMethodDoc = 'No test' - try: - testMethod = getattr(self, methodName) - except AttributeError: - if methodName != 'runTest': - # we allow instantiation with no explicit method name - # but not an *incorrect* or missing method name - raise ValueError("no such test method in %s: %s" % - (self.__class__, methodName)) - else: - self._testMethodDoc = testMethod.__doc__ - self._cleanups = [] - self._subtest = None - - # Map types to custom assertEqual functions that will compare - # instances of said type in more detail to generate a more useful - # error message. - self._type_equality_funcs = {} - self.addTypeEqualityFunc(dict, 'assertDictEqual') - self.addTypeEqualityFunc(list, 'assertListEqual') - self.addTypeEqualityFunc(tuple, 'assertTupleEqual') - self.addTypeEqualityFunc(set, 'assertSetEqual') - self.addTypeEqualityFunc(frozenset, 'assertSetEqual') - self.addTypeEqualityFunc(str, 'assertMultiLineEqual') - - def addTypeEqualityFunc(self, typeobj, function): - """Add a type specific assertEqual style function to compare a type. - - This method is for use by TestCase subclasses that need to register - their own type equality functions to provide nicer error messages. - - Args: - typeobj: The data type to call this function on when both values - are of the same type in assertEqual(). - function: The callable taking two arguments and an optional - msg= argument that raises self.failureException with a - useful error message when the two arguments are not equal. - """ - self._type_equality_funcs[typeobj] = function - - def addCleanup(self, function, /, *args, **kwargs): - """Add a function, with arguments, to be called when the test is - completed. Functions added are called on a LIFO basis and are - called after tearDown on test failure or success. - - Cleanup items are called even if setUp fails (unlike tearDown).""" - self._cleanups.append((function, args, kwargs)) - - def enterContext(self, cm): - """Enters the supplied context manager. - - If successful, also adds its __exit__ method as a cleanup - function and returns the result of the __enter__ method. - """ - return _enter_context(cm, self.addCleanup) - - @classmethod - def addClassCleanup(cls, function, /, *args, **kwargs): - """Same as addCleanup, except the cleanup items are called even if - setUpClass fails (unlike tearDownClass).""" - cls._class_cleanups.append((function, args, kwargs)) - - @classmethod - def enterClassContext(cls, cm): - """Same as enterContext, but class-wide.""" - return _enter_context(cm, cls.addClassCleanup) - - def setUp(self): - "Hook method for setting up the test fixture before exercising it." - pass - - def tearDown(self): - "Hook method for deconstructing the test fixture after testing it." - pass - - @classmethod - def setUpClass(cls): - "Hook method for setting up class fixture before running tests in the class." - - @classmethod - def tearDownClass(cls): - "Hook method for deconstructing the class fixture after running all tests in the class." - - def countTestCases(self): - return 1 - - def defaultTestResult(self): - return result.TestResult() - - def shortDescription(self): - """Returns a one-line description of the test, or None if no - description has been provided. - - The default implementation of this method returns the first line of - the specified test method's docstring. - """ - doc = self._testMethodDoc - return doc.strip().split("\n")[0].strip() if doc else None - - - def id(self): - return "%s.%s" % (strclass(self.__class__), self._testMethodName) - - def __eq__(self, other): - if type(self) is not type(other): - return NotImplemented - - return self._testMethodName == other._testMethodName - - def __hash__(self): - return hash((type(self), self._testMethodName)) - - def __str__(self): - return "%s (%s.%s)" % (self._testMethodName, strclass(self.__class__), self._testMethodName) - - def __repr__(self): - return "<%s testMethod=%s>" % \ - (strclass(self.__class__), self._testMethodName) - - @contextlib.contextmanager - def subTest(self, msg=_subtest_msg_sentinel, **params): - """Return a context manager that will return the enclosed block - of code in a subtest identified by the optional message and - keyword parameters. A failure in the subtest marks the test - case as failed but resumes execution at the end of the enclosed - block, allowing further test code to be executed. - """ - if self._outcome is None or not self._outcome.result_supports_subtests: - yield - return - parent = self._subtest - if parent is None: - params_map = _OrderedChainMap(params) - else: - params_map = parent.params.new_child(params) - self._subtest = _SubTest(self, msg, params_map) - try: - with self._outcome.testPartExecutor(self._subtest, subTest=True): - yield - if not self._outcome.success: - result = self._outcome.result - if result is not None and result.failfast: - raise _ShouldStop - elif self._outcome.expectedFailure: - # If the test is expecting a failure, we really want to - # stop now and register the expected failure. - raise _ShouldStop - finally: - self._subtest = parent - - def _addExpectedFailure(self, result, exc_info): - try: - addExpectedFailure = result.addExpectedFailure - except AttributeError: - warnings.warn("TestResult has no addExpectedFailure method, reporting as passes", - RuntimeWarning) - result.addSuccess(self) - else: - addExpectedFailure(self, exc_info) - - def _addUnexpectedSuccess(self, result): - try: - addUnexpectedSuccess = result.addUnexpectedSuccess - except AttributeError: - warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failure", - RuntimeWarning) - # We need to pass an actual exception and traceback to addFailure, - # otherwise the legacy result can choke. - try: - raise _UnexpectedSuccess from None - except _UnexpectedSuccess: - result.addFailure(self, sys.exc_info()) - else: - addUnexpectedSuccess(self) - - def _addDuration(self, result, elapsed): - try: - addDuration = result.addDuration - except AttributeError: - warnings.warn("TestResult has no addDuration method", - RuntimeWarning) - else: - addDuration(self, elapsed) - - def _callSetUp(self): - self.setUp() - - def _callTestMethod(self, method): - if method() is not None: - warnings.warn(f'It is deprecated to return a value that is not None from a ' - f'test case ({method})', DeprecationWarning, stacklevel=3) - - def _callTearDown(self): - self.tearDown() - - def _callCleanup(self, function, /, *args, **kwargs): - function(*args, **kwargs) - - def run(self, result=None): - if result is None: - result = self.defaultTestResult() - startTestRun = getattr(result, 'startTestRun', None) - stopTestRun = getattr(result, 'stopTestRun', None) - if startTestRun is not None: - startTestRun() - else: - stopTestRun = None - - result.startTest(self) - try: - testMethod = getattr(self, self._testMethodName) - if (getattr(self.__class__, "__unittest_skip__", False) or - getattr(testMethod, "__unittest_skip__", False)): - # If the class or method was skipped. - skip_why = (getattr(self.__class__, '__unittest_skip_why__', '') - or getattr(testMethod, '__unittest_skip_why__', '')) - _addSkip(result, self, skip_why) - return result - - expecting_failure = ( - getattr(self, "__unittest_expecting_failure__", False) or - getattr(testMethod, "__unittest_expecting_failure__", False) - ) - outcome = _Outcome(result) - start_time = time.perf_counter() - try: - self._outcome = outcome - - with outcome.testPartExecutor(self): - self._callSetUp() - if outcome.success: - outcome.expecting_failure = expecting_failure - with outcome.testPartExecutor(self): - self._callTestMethod(testMethod) - outcome.expecting_failure = False - with outcome.testPartExecutor(self): - self._callTearDown() - self.doCleanups() - self._addDuration(result, (time.perf_counter() - start_time)) - - if outcome.success: - if expecting_failure: - if outcome.expectedFailure: - self._addExpectedFailure(result, outcome.expectedFailure) - else: - self._addUnexpectedSuccess(result) - else: - result.addSuccess(self) - return result - finally: - # explicitly break reference cycle: - # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure - outcome.expectedFailure = None - outcome = None - - # clear the outcome, no more needed - self._outcome = None - - finally: - result.stopTest(self) - if stopTestRun is not None: - stopTestRun() - - def doCleanups(self): - """Execute all cleanup functions. Normally called for you after - tearDown.""" - outcome = self._outcome or _Outcome() - while self._cleanups: - function, args, kwargs = self._cleanups.pop() - with outcome.testPartExecutor(self): - self._callCleanup(function, *args, **kwargs) - - # return this for backwards compatibility - # even though we no longer use it internally - return outcome.success - - @classmethod - def doClassCleanups(cls): - """Execute all class cleanup functions. Normally called for you after - tearDownClass.""" - cls.tearDown_exceptions = [] - while cls._class_cleanups: - function, args, kwargs = cls._class_cleanups.pop() - try: - function(*args, **kwargs) - except Exception: - cls.tearDown_exceptions.append(sys.exc_info()) - - def __call__(self, *args, **kwds): - return self.run(*args, **kwds) - - def debug(self): - """Run the test without collecting errors in a TestResult""" - testMethod = getattr(self, self._testMethodName) - if (getattr(self.__class__, "__unittest_skip__", False) or - getattr(testMethod, "__unittest_skip__", False)): - # If the class or method was skipped. - skip_why = (getattr(self.__class__, '__unittest_skip_why__', '') - or getattr(testMethod, '__unittest_skip_why__', '')) - raise SkipTest(skip_why) - - self._callSetUp() - self._callTestMethod(testMethod) - self._callTearDown() - while self._cleanups: - function, args, kwargs = self._cleanups.pop() - self._callCleanup(function, *args, **kwargs) - - def skipTest(self, reason): - """Skip this test.""" - raise SkipTest(reason) - - def fail(self, msg=None): - """Fail immediately, with the given message.""" - raise self.failureException(msg) - - def assertFalse(self, expr, msg=None): - """Check that the expression is false.""" - if expr: - msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr)) - raise self.failureException(msg) - - def assertTrue(self, expr, msg=None): - """Check that the expression is true.""" - if not expr: - msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr)) - raise self.failureException(msg) - - def _formatMessage(self, msg, standardMsg): - """Honour the longMessage attribute when generating failure messages. - If longMessage is False this means: - * Use only an explicit message if it is provided - * Otherwise use the standard message for the assert - - If longMessage is True: - * Use the standard message - * If an explicit message is provided, plus ' : ' and the explicit message - """ - if not self.longMessage: - return msg or standardMsg - if msg is None: - return standardMsg - try: - # don't switch to '{}' formatting in Python 2.X - # it changes the way unicode input is handled - return '%s : %s' % (standardMsg, msg) - except UnicodeDecodeError: - return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg)) - - def assertRaises(self, expected_exception, *args, **kwargs): - """Fail unless an exception of class expected_exception is raised - by the callable when invoked with specified positional and - keyword arguments. If a different type of exception is - raised, it will not be caught, and the test case will be - deemed to have suffered an error, exactly as for an - unexpected exception. - - If called with the callable and arguments omitted, will return a - context object used like this:: - - with self.assertRaises(SomeException): - do_something() - - An optional keyword argument 'msg' can be provided when assertRaises - is used as a context object. - - The context manager keeps a reference to the exception as - the 'exception' attribute. This allows you to inspect the - exception after the assertion:: - - with self.assertRaises(SomeException) as cm: - do_something() - the_exception = cm.exception - self.assertEqual(the_exception.error_code, 3) - """ - context = _AssertRaisesContext(expected_exception, self) - try: - return context.handle('assertRaises', args, kwargs) - finally: - # bpo-23890: manually break a reference cycle - context = None - - def assertWarns(self, expected_warning, *args, **kwargs): - """Fail unless a warning of class warnClass is triggered - by the callable when invoked with specified positional and - keyword arguments. If a different type of warning is - triggered, it will not be handled: depending on the other - warning filtering rules in effect, it might be silenced, printed - out, or raised as an exception. - - If called with the callable and arguments omitted, will return a - context object used like this:: - - with self.assertWarns(SomeWarning): - do_something() - - An optional keyword argument 'msg' can be provided when assertWarns - is used as a context object. - - The context manager keeps a reference to the first matching - warning as the 'warning' attribute; similarly, the 'filename' - and 'lineno' attributes give you information about the line - of Python code from which the warning was triggered. - This allows you to inspect the warning after the assertion:: - - with self.assertWarns(SomeWarning) as cm: - do_something() - the_warning = cm.warning - self.assertEqual(the_warning.some_attribute, 147) - """ - context = _AssertWarnsContext(expected_warning, self) - return context.handle('assertWarns', args, kwargs) - - def _assertNotWarns(self, expected_warning, *args, **kwargs): - """The opposite of assertWarns. Private due to low demand.""" - context = _AssertNotWarnsContext(expected_warning, self) - return context.handle('_assertNotWarns', args, kwargs) - - def assertLogs(self, logger=None, level=None): - """Fail unless a log message of level *level* or higher is emitted - on *logger_name* or its children. If omitted, *level* defaults to - INFO and *logger* defaults to the root logger. - - This method must be used as a context manager, and will yield - a recording object with two attributes: `output` and `records`. - At the end of the context manager, the `output` attribute will - be a list of the matching formatted log messages and the - `records` attribute will be a list of the corresponding LogRecord - objects. - - Example:: - - with self.assertLogs('foo', level='INFO') as cm: - logging.getLogger('foo').info('first message') - logging.getLogger('foo.bar').error('second message') - self.assertEqual(cm.output, ['INFO:foo:first message', - 'ERROR:foo.bar:second message']) - """ - # Lazy import to avoid importing logging if it is not needed. - from ._log import _AssertLogsContext - return _AssertLogsContext(self, logger, level, no_logs=False) - - def assertNoLogs(self, logger=None, level=None): - """ Fail unless no log messages of level *level* or higher are emitted - on *logger_name* or its children. - - This method must be used as a context manager. - """ - from ._log import _AssertLogsContext - return _AssertLogsContext(self, logger, level, no_logs=True) - - def _getAssertEqualityFunc(self, first, second): - """Get a detailed comparison function for the types of the two args. - - Returns: A callable accepting (first, second, msg=None) that will - raise a failure exception if first != second with a useful human - readable error message for those types. - """ - # - # NOTE(gregory.p.smith): I considered isinstance(first, type(second)) - # and vice versa. I opted for the conservative approach in case - # subclasses are not intended to be compared in detail to their super - # class instances using a type equality func. This means testing - # subtypes won't automagically use the detailed comparison. Callers - # should use their type specific assertSpamEqual method to compare - # subclasses if the detailed comparison is desired and appropriate. - # See the discussion in http://bugs.python.org/issue2578. - # - if type(first) is type(second): - asserter = self._type_equality_funcs.get(type(first)) - if asserter is not None: - if isinstance(asserter, str): - asserter = getattr(self, asserter) - return asserter - - return self._baseAssertEqual - - def _baseAssertEqual(self, first, second, msg=None): - """The default assertEqual implementation, not type specific.""" - if not first == second: - standardMsg = '%s != %s' % _common_shorten_repr(first, second) - msg = self._formatMessage(msg, standardMsg) - raise self.failureException(msg) - - def assertEqual(self, first, second, msg=None): - """Fail if the two objects are unequal as determined by the '==' - operator. - """ - assertion_func = self._getAssertEqualityFunc(first, second) - assertion_func(first, second, msg=msg) - - def assertNotEqual(self, first, second, msg=None): - """Fail if the two objects are equal as determined by the '!=' - operator. - """ - if not first != second: - msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first), - safe_repr(second))) - raise self.failureException(msg) - - def assertAlmostEqual(self, first, second, places=None, msg=None, - delta=None): - """Fail if the two objects are unequal as determined by their - difference rounded to the given number of decimal places - (default 7) and comparing to zero, or by comparing that the - difference between the two objects is more than the given - delta. - - Note that decimal places (from zero) are usually not the same - as significant digits (measured from the most significant digit). - - If the two objects compare equal then they will automatically - compare almost equal. - """ - if first == second: - # shortcut - return - if delta is not None and places is not None: - raise TypeError("specify delta or places not both") - - diff = abs(first - second) - if delta is not None: - if diff <= delta: - return - - standardMsg = '%s != %s within %s delta (%s difference)' % ( - safe_repr(first), - safe_repr(second), - safe_repr(delta), - safe_repr(diff)) - else: - if places is None: - places = 7 - - if round(diff, places) == 0: - return - - standardMsg = '%s != %s within %r places (%s difference)' % ( - safe_repr(first), - safe_repr(second), - places, - safe_repr(diff)) - msg = self._formatMessage(msg, standardMsg) - raise self.failureException(msg) - - def assertNotAlmostEqual(self, first, second, places=None, msg=None, - delta=None): - """Fail if the two objects are equal as determined by their - difference rounded to the given number of decimal places - (default 7) and comparing to zero, or by comparing that the - difference between the two objects is less than the given delta. - - Note that decimal places (from zero) are usually not the same - as significant digits (measured from the most significant digit). - - Objects that are equal automatically fail. - """ - if delta is not None and places is not None: - raise TypeError("specify delta or places not both") - diff = abs(first - second) - if delta is not None: - if not (first == second) and diff > delta: - return - standardMsg = '%s == %s within %s delta (%s difference)' % ( - safe_repr(first), - safe_repr(second), - safe_repr(delta), - safe_repr(diff)) - else: - if places is None: - places = 7 - if not (first == second) and round(diff, places) != 0: - return - standardMsg = '%s == %s within %r places' % (safe_repr(first), - safe_repr(second), - places) - - msg = self._formatMessage(msg, standardMsg) - raise self.failureException(msg) - - def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None): - """An equality assertion for ordered sequences (like lists and tuples). - - For the purposes of this function, a valid ordered sequence type is one - which can be indexed, has a length, and has an equality operator. - - Args: - seq1: The first sequence to compare. - seq2: The second sequence to compare. - seq_type: The expected datatype of the sequences, or None if no - datatype should be enforced. - msg: Optional message to use on failure instead of a list of - differences. - """ - if seq_type is not None: - seq_type_name = seq_type.__name__ - if not isinstance(seq1, seq_type): - raise self.failureException('First sequence is not a %s: %s' - % (seq_type_name, safe_repr(seq1))) - if not isinstance(seq2, seq_type): - raise self.failureException('Second sequence is not a %s: %s' - % (seq_type_name, safe_repr(seq2))) - else: - seq_type_name = "sequence" - - differing = None - try: - len1 = len(seq1) - except (TypeError, NotImplementedError): - differing = 'First %s has no length. Non-sequence?' % ( - seq_type_name) - - if differing is None: - try: - len2 = len(seq2) - except (TypeError, NotImplementedError): - differing = 'Second %s has no length. Non-sequence?' % ( - seq_type_name) - - if differing is None: - if seq1 == seq2: - return - - differing = '%ss differ: %s != %s\n' % ( - (seq_type_name.capitalize(),) + - _common_shorten_repr(seq1, seq2)) - - for i in range(min(len1, len2)): - try: - item1 = seq1[i] - except (TypeError, IndexError, NotImplementedError): - differing += ('\nUnable to index element %d of first %s\n' % - (i, seq_type_name)) - break - - try: - item2 = seq2[i] - except (TypeError, IndexError, NotImplementedError): - differing += ('\nUnable to index element %d of second %s\n' % - (i, seq_type_name)) - break - - if item1 != item2: - differing += ('\nFirst differing element %d:\n%s\n%s\n' % - ((i,) + _common_shorten_repr(item1, item2))) - break - else: - if (len1 == len2 and seq_type is None and - type(seq1) != type(seq2)): - # The sequences are the same, but have differing types. - return - - if len1 > len2: - differing += ('\nFirst %s contains %d additional ' - 'elements.\n' % (seq_type_name, len1 - len2)) - try: - differing += ('First extra element %d:\n%s\n' % - (len2, safe_repr(seq1[len2]))) - except (TypeError, IndexError, NotImplementedError): - differing += ('Unable to index element %d ' - 'of first %s\n' % (len2, seq_type_name)) - elif len1 < len2: - differing += ('\nSecond %s contains %d additional ' - 'elements.\n' % (seq_type_name, len2 - len1)) - try: - differing += ('First extra element %d:\n%s\n' % - (len1, safe_repr(seq2[len1]))) - except (TypeError, IndexError, NotImplementedError): - differing += ('Unable to index element %d ' - 'of second %s\n' % (len1, seq_type_name)) - standardMsg = differing - diffMsg = '\n' + '\n'.join( - difflib.ndiff(pprint.pformat(seq1).splitlines(), - pprint.pformat(seq2).splitlines())) - - standardMsg = self._truncateMessage(standardMsg, diffMsg) - msg = self._formatMessage(msg, standardMsg) - self.fail(msg) - - def _truncateMessage(self, message, diff): - max_diff = self.maxDiff - if max_diff is None or len(diff) <= max_diff: - return message + diff - return message + (DIFF_OMITTED % len(diff)) - - def assertListEqual(self, list1, list2, msg=None): - """A list-specific equality assertion. - - Args: - list1: The first list to compare. - list2: The second list to compare. - msg: Optional message to use on failure instead of a list of - differences. - - """ - self.assertSequenceEqual(list1, list2, msg, seq_type=list) - - def assertTupleEqual(self, tuple1, tuple2, msg=None): - """A tuple-specific equality assertion. - - Args: - tuple1: The first tuple to compare. - tuple2: The second tuple to compare. - msg: Optional message to use on failure instead of a list of - differences. - """ - self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple) - - def assertSetEqual(self, set1, set2, msg=None): - """A set-specific equality assertion. - - Args: - set1: The first set to compare. - set2: The second set to compare. - msg: Optional message to use on failure instead of a list of - differences. - - assertSetEqual uses ducktyping to support different types of sets, and - is optimized for sets specifically (parameters must support a - difference method). - """ - try: - difference1 = set1.difference(set2) - except TypeError as e: - self.fail('invalid type when attempting set difference: %s' % e) - except AttributeError as e: - self.fail('first argument does not support set difference: %s' % e) - - try: - difference2 = set2.difference(set1) - except TypeError as e: - self.fail('invalid type when attempting set difference: %s' % e) - except AttributeError as e: - self.fail('second argument does not support set difference: %s' % e) - - if not (difference1 or difference2): - return - - lines = [] - if difference1: - lines.append('Items in the first set but not the second:') - for item in difference1: - lines.append(repr(item)) - if difference2: - lines.append('Items in the second set but not the first:') - for item in difference2: - lines.append(repr(item)) - - standardMsg = '\n'.join(lines) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIn(self, member, container, msg=None): - """Just like self.assertTrue(a in b), but with a nicer default message.""" - if member not in container: - standardMsg = '%s not found in %s' % (safe_repr(member), - safe_repr(container)) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertNotIn(self, member, container, msg=None): - """Just like self.assertTrue(a not in b), but with a nicer default message.""" - if member in container: - standardMsg = '%s unexpectedly found in %s' % (safe_repr(member), - safe_repr(container)) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIs(self, expr1, expr2, msg=None): - """Just like self.assertTrue(a is b), but with a nicer default message.""" - if expr1 is not expr2: - standardMsg = '%s is not %s' % (safe_repr(expr1), - safe_repr(expr2)) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIsNot(self, expr1, expr2, msg=None): - """Just like self.assertTrue(a is not b), but with a nicer default message.""" - if expr1 is expr2: - standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertDictEqual(self, d1, d2, msg=None): - self.assertIsInstance(d1, dict, 'First argument is not a dictionary') - self.assertIsInstance(d2, dict, 'Second argument is not a dictionary') - - if d1 != d2: - standardMsg = '%s != %s' % _common_shorten_repr(d1, d2) - diff = ('\n' + '\n'.join(difflib.ndiff( - pprint.pformat(d1).splitlines(), - pprint.pformat(d2).splitlines()))) - standardMsg = self._truncateMessage(standardMsg, diff) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertCountEqual(self, first, second, msg=None): - """Asserts that two iterables have the same elements, the same number of - times, without regard to order. - - self.assertEqual(Counter(list(first)), - Counter(list(second))) - - Example: - - [0, 1, 1] and [1, 0, 1] compare equal. - - [0, 0, 1] and [0, 1] compare unequal. - - """ - first_seq, second_seq = list(first), list(second) - try: - first = collections.Counter(first_seq) - second = collections.Counter(second_seq) - except TypeError: - # Handle case with unhashable elements - differences = _count_diff_all_purpose(first_seq, second_seq) - else: - if first == second: - return - differences = _count_diff_hashable(first_seq, second_seq) - - if differences: - standardMsg = 'Element counts were not equal:\n' - lines = ['First has %d, Second has %d: %r' % diff for diff in differences] - diffMsg = '\n'.join(lines) - standardMsg = self._truncateMessage(standardMsg, diffMsg) - msg = self._formatMessage(msg, standardMsg) - self.fail(msg) - - def assertMultiLineEqual(self, first, second, msg=None): - """Assert that two multi-line strings are equal.""" - self.assertIsInstance(first, str, "First argument is not a string") - self.assertIsInstance(second, str, "Second argument is not a string") - - if first != second: - # Don't use difflib if the strings are too long - if (len(first) > self._diffThreshold or - len(second) > self._diffThreshold): - self._baseAssertEqual(first, second, msg) - - # Append \n to both strings if either is missing the \n. - # This allows the final ndiff to show the \n difference. The - # exception here is if the string is empty, in which case no - # \n should be added - first_presplit = first - second_presplit = second - if first and second: - if first[-1] != '\n' or second[-1] != '\n': - first_presplit += '\n' - second_presplit += '\n' - elif second and second[-1] != '\n': - second_presplit += '\n' - elif first and first[-1] != '\n': - first_presplit += '\n' - - firstlines = first_presplit.splitlines(keepends=True) - secondlines = second_presplit.splitlines(keepends=True) - - # Generate the message and diff, then raise the exception - standardMsg = '%s != %s' % _common_shorten_repr(first, second) - diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines)) - standardMsg = self._truncateMessage(standardMsg, diff) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertLess(self, a, b, msg=None): - """Just like self.assertTrue(a < b), but with a nicer default message.""" - if not a < b: - standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b)) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertLessEqual(self, a, b, msg=None): - """Just like self.assertTrue(a <= b), but with a nicer default message.""" - if not a <= b: - standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b)) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertGreater(self, a, b, msg=None): - """Just like self.assertTrue(a > b), but with a nicer default message.""" - if not a > b: - standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b)) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertGreaterEqual(self, a, b, msg=None): - """Just like self.assertTrue(a >= b), but with a nicer default message.""" - if not a >= b: - standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b)) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIsNone(self, obj, msg=None): - """Same as self.assertTrue(obj is None), with a nicer default message.""" - if obj is not None: - standardMsg = '%s is not None' % (safe_repr(obj),) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIsNotNone(self, obj, msg=None): - """Included for symmetry with assertIsNone.""" - if obj is None: - standardMsg = 'unexpectedly None' - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIsInstance(self, obj, cls, msg=None): - """Same as self.assertTrue(isinstance(obj, cls)), with a nicer - default message.""" - if not isinstance(obj, cls): - standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertNotIsInstance(self, obj, cls, msg=None): - """Included for symmetry with assertIsInstance.""" - if isinstance(obj, cls): - standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertRaisesRegex(self, expected_exception, expected_regex, - *args, **kwargs): - """Asserts that the message in a raised exception matches a regex. - - Args: - expected_exception: Exception class expected to be raised. - expected_regex: Regex (re.Pattern object or string) expected - to be found in error message. - args: Function to be called and extra positional args. - kwargs: Extra kwargs. - msg: Optional message used in case of failure. Can only be used - when assertRaisesRegex is used as a context manager. - """ - context = _AssertRaisesContext(expected_exception, self, expected_regex) - return context.handle('assertRaisesRegex', args, kwargs) - - def assertWarnsRegex(self, expected_warning, expected_regex, - *args, **kwargs): - """Asserts that the message in a triggered warning matches a regexp. - Basic functioning is similar to assertWarns() with the addition - that only warnings whose messages also match the regular expression - are considered successful matches. - - Args: - expected_warning: Warning class expected to be triggered. - expected_regex: Regex (re.Pattern object or string) expected - to be found in error message. - args: Function to be called and extra positional args. - kwargs: Extra kwargs. - msg: Optional message used in case of failure. Can only be used - when assertWarnsRegex is used as a context manager. - """ - context = _AssertWarnsContext(expected_warning, self, expected_regex) - return context.handle('assertWarnsRegex', args, kwargs) - - def assertRegex(self, text, expected_regex, msg=None): - """Fail the test unless the text matches the regular expression.""" - if isinstance(expected_regex, (str, bytes)): - assert expected_regex, "expected_regex must not be empty." - expected_regex = re.compile(expected_regex) - if not expected_regex.search(text): - standardMsg = "Regex didn't match: %r not found in %r" % ( - expected_regex.pattern, text) - # _formatMessage ensures the longMessage option is respected - msg = self._formatMessage(msg, standardMsg) - raise self.failureException(msg) - - def assertNotRegex(self, text, unexpected_regex, msg=None): - """Fail the test if the text matches the regular expression.""" - if isinstance(unexpected_regex, (str, bytes)): - unexpected_regex = re.compile(unexpected_regex) - match = unexpected_regex.search(text) - if match: - standardMsg = 'Regex matched: %r matches %r in %r' % ( - text[match.start() : match.end()], - unexpected_regex.pattern, - text) - # _formatMessage ensures the longMessage option is respected - msg = self._formatMessage(msg, standardMsg) - raise self.failureException(msg) - - - -class FunctionTestCase(TestCase): - """A test case that wraps a test function. - - This is useful for slipping pre-existing test functions into the - unittest framework. Optionally, set-up and tidy-up functions can be - supplied. As with TestCase, the tidy-up ('tearDown') function will - always be called if the set-up ('setUp') function ran successfully. - """ - - def __init__(self, testFunc, setUp=None, tearDown=None, description=None): - super(FunctionTestCase, self).__init__() - self._setUpFunc = setUp - self._tearDownFunc = tearDown - self._testFunc = testFunc - self._description = description - - def setUp(self): - if self._setUpFunc is not None: - self._setUpFunc() - - def tearDown(self): - if self._tearDownFunc is not None: - self._tearDownFunc() - - def runTest(self): - self._testFunc() - - def id(self): - return self._testFunc.__name__ - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - - return self._setUpFunc == other._setUpFunc and \ - self._tearDownFunc == other._tearDownFunc and \ - self._testFunc == other._testFunc and \ - self._description == other._description - - def __hash__(self): - return hash((type(self), self._setUpFunc, self._tearDownFunc, - self._testFunc, self._description)) - - def __str__(self): - return "%s (%s)" % (strclass(self.__class__), - self._testFunc.__name__) - - def __repr__(self): - return "<%s tec=%s>" % (strclass(self.__class__), - self._testFunc) - - def shortDescription(self): - if self._description is not None: - return self._description - doc = self._testFunc.__doc__ - return doc and doc.split("\n")[0].strip() or None - - -class _SubTest(TestCase): - - def __init__(self, test_case, message, params): - super().__init__() - self._message = message - self.test_case = test_case - self.params = params - self.failureException = test_case.failureException - - def runTest(self): - raise NotImplementedError("subtests cannot be run directly") - - def _subDescription(self): - parts = [] - if self._message is not _subtest_msg_sentinel: - parts.append("[{}]".format(self._message)) - if self.params: - params_desc = ', '.join( - "{}={!r}".format(k, v) - for (k, v) in self.params.items()) - parts.append("({})".format(params_desc)) - return " ".join(parts) or '()' - - def id(self): - return "{} {}".format(self.test_case.id(), self._subDescription()) - - def shortDescription(self): - """Returns a one-line description of the subtest, or None if no - description has been provided. - """ - return self.test_case.shortDescription() - - def __str__(self): - return "{} {}".format(self.test_case, self._subDescription()) diff --git a/Python313_13_x86_Template/Lib/unittest/loader.py b/Python313_13_x86_Template/Lib/unittest/loader.py deleted file mode 100644 index 22797b83..00000000 --- a/Python313_13_x86_Template/Lib/unittest/loader.py +++ /dev/null @@ -1,453 +0,0 @@ -"""Loading unittests.""" - -import os -import re -import sys -import traceback -import types -import functools - -from fnmatch import fnmatch, fnmatchcase - -from . import case, suite, util - -__unittest = True - -# what about .pyc (etc) -# we would need to avoid loading the same tests multiple times -# from '.py', *and* '.pyc' -VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE) - - -class _FailedTest(case.TestCase): - _testMethodName = None - - def __init__(self, method_name, exception): - self._exception = exception - super(_FailedTest, self).__init__(method_name) - - def __getattr__(self, name): - if name != self._testMethodName: - return super(_FailedTest, self).__getattr__(name) - def testFailure(): - raise self._exception - return testFailure - - -def _make_failed_import_test(name, suiteClass): - message = 'Failed to import test module: %s\n%s' % ( - name, traceback.format_exc()) - return _make_failed_test(name, ImportError(message), suiteClass, message) - -def _make_failed_load_tests(name, exception, suiteClass): - message = 'Failed to call load_tests:\n%s' % (traceback.format_exc(),) - return _make_failed_test( - name, exception, suiteClass, message) - -def _make_failed_test(methodname, exception, suiteClass, message): - test = _FailedTest(methodname, exception) - return suiteClass((test,)), message - -def _make_skipped_test(methodname, exception, suiteClass): - @case.skip(str(exception)) - def testSkipped(self): - pass - attrs = {methodname: testSkipped} - TestClass = type("ModuleSkipped", (case.TestCase,), attrs) - return suiteClass((TestClass(methodname),)) - -def _splitext(path): - return os.path.splitext(path)[0] - - -class TestLoader(object): - """ - This class is responsible for loading tests according to various criteria - and returning them wrapped in a TestSuite - """ - testMethodPrefix = 'test' - sortTestMethodsUsing = staticmethod(util.three_way_cmp) - testNamePatterns = None - suiteClass = suite.TestSuite - _top_level_dir = None - - def __init__(self): - super(TestLoader, self).__init__() - self.errors = [] - # Tracks packages which we have called into via load_tests, to - # avoid infinite re-entrancy. - self._loading_packages = set() - - def loadTestsFromTestCase(self, testCaseClass): - """Return a suite of all test cases contained in testCaseClass""" - if issubclass(testCaseClass, suite.TestSuite): - raise TypeError("Test cases should not be derived from " - "TestSuite. Maybe you meant to derive from " - "TestCase?") - if testCaseClass in (case.TestCase, case.FunctionTestCase): - # We don't load any tests from base types that should not be loaded. - testCaseNames = [] - else: - testCaseNames = self.getTestCaseNames(testCaseClass) - if not testCaseNames and hasattr(testCaseClass, 'runTest'): - testCaseNames = ['runTest'] - loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames)) - return loaded_suite - - def loadTestsFromModule(self, module, *, pattern=None): - """Return a suite of all test cases contained in the given module""" - tests = [] - for name in dir(module): - obj = getattr(module, name) - if ( - isinstance(obj, type) - and issubclass(obj, case.TestCase) - and obj not in (case.TestCase, case.FunctionTestCase) - ): - tests.append(self.loadTestsFromTestCase(obj)) - - load_tests = getattr(module, 'load_tests', None) - tests = self.suiteClass(tests) - if load_tests is not None: - try: - return load_tests(self, tests, pattern) - except Exception as e: - error_case, error_message = _make_failed_load_tests( - module.__name__, e, self.suiteClass) - self.errors.append(error_message) - return error_case - return tests - - def loadTestsFromName(self, name, module=None): - """Return a suite of all test cases given a string specifier. - - The name may resolve either to a module, a test case class, a - test method within a test case class, or a callable object which - returns a TestCase or TestSuite instance. - - The method optionally resolves the names relative to a given module. - """ - parts = name.split('.') - error_case, error_message = None, None - if module is None: - parts_copy = parts[:] - while parts_copy: - try: - module_name = '.'.join(parts_copy) - module = __import__(module_name) - break - except ImportError: - next_attribute = parts_copy.pop() - # Last error so we can give it to the user if needed. - error_case, error_message = _make_failed_import_test( - next_attribute, self.suiteClass) - if not parts_copy: - # Even the top level import failed: report that error. - self.errors.append(error_message) - return error_case - parts = parts[1:] - obj = module - for part in parts: - try: - parent, obj = obj, getattr(obj, part) - except AttributeError as e: - # We can't traverse some part of the name. - if (getattr(obj, '__path__', None) is not None - and error_case is not None): - # This is a package (no __path__ per importlib docs), and we - # encountered an error importing something. We cannot tell - # the difference between package.WrongNameTestClass and - # package.wrong_module_name so we just report the - # ImportError - it is more informative. - self.errors.append(error_message) - return error_case - else: - # Otherwise, we signal that an AttributeError has occurred. - error_case, error_message = _make_failed_test( - part, e, self.suiteClass, - 'Failed to access attribute:\n%s' % ( - traceback.format_exc(),)) - self.errors.append(error_message) - return error_case - - if isinstance(obj, types.ModuleType): - return self.loadTestsFromModule(obj) - elif ( - isinstance(obj, type) - and issubclass(obj, case.TestCase) - and obj not in (case.TestCase, case.FunctionTestCase) - ): - return self.loadTestsFromTestCase(obj) - elif (isinstance(obj, types.FunctionType) and - isinstance(parent, type) and - issubclass(parent, case.TestCase)): - name = parts[-1] - inst = parent(name) - # static methods follow a different path - if not isinstance(getattr(inst, name), types.FunctionType): - return self.suiteClass([inst]) - elif isinstance(obj, suite.TestSuite): - return obj - if callable(obj): - test = obj() - if isinstance(test, suite.TestSuite): - return test - elif isinstance(test, case.TestCase): - return self.suiteClass([test]) - else: - raise TypeError("calling %s returned %s, not a test" % - (obj, test)) - else: - raise TypeError("don't know how to make test from: %s" % obj) - - def loadTestsFromNames(self, names, module=None): - """Return a suite of all test cases found using the given sequence - of string specifiers. See 'loadTestsFromName()'. - """ - suites = [self.loadTestsFromName(name, module) for name in names] - return self.suiteClass(suites) - - def getTestCaseNames(self, testCaseClass): - """Return a sorted sequence of method names found within testCaseClass - """ - def shouldIncludeMethod(attrname): - if not attrname.startswith(self.testMethodPrefix): - return False - testFunc = getattr(testCaseClass, attrname) - if not callable(testFunc): - return False - fullName = f'%s.%s.%s' % ( - testCaseClass.__module__, testCaseClass.__qualname__, attrname - ) - return self.testNamePatterns is None or \ - any(fnmatchcase(fullName, pattern) for pattern in self.testNamePatterns) - testFnNames = list(filter(shouldIncludeMethod, dir(testCaseClass))) - if self.sortTestMethodsUsing: - testFnNames.sort(key=functools.cmp_to_key(self.sortTestMethodsUsing)) - return testFnNames - - def discover(self, start_dir, pattern='test*.py', top_level_dir=None): - """Find and return all test modules from the specified start - directory, recursing into subdirectories to find them and return all - tests found within them. Only test files that match the pattern will - be loaded. (Using shell style pattern matching.) - - All test modules must be importable from the top level of the project. - If the start directory is not the top level directory then the top - level directory must be specified separately. - - If a test package name (directory with '__init__.py') matches the - pattern then the package will be checked for a 'load_tests' function. If - this exists then it will be called with (loader, tests, pattern) unless - the package has already had load_tests called from the same discovery - invocation, in which case the package module object is not scanned for - tests - this ensures that when a package uses discover to further - discover child tests that infinite recursion does not happen. - - If load_tests exists then discovery does *not* recurse into the package, - load_tests is responsible for loading all tests in the package. - - The pattern is deliberately not stored as a loader attribute so that - packages can continue discovery themselves. top_level_dir is stored so - load_tests does not need to pass this argument in to loader.discover(). - - Paths are sorted before being imported to ensure reproducible execution - order even on filesystems with non-alphabetical ordering like ext3/4. - """ - original_top_level_dir = self._top_level_dir - set_implicit_top = False - if top_level_dir is None and self._top_level_dir is not None: - # make top_level_dir optional if called from load_tests in a package - top_level_dir = self._top_level_dir - elif top_level_dir is None: - set_implicit_top = True - top_level_dir = start_dir - - top_level_dir = os.path.abspath(top_level_dir) - - if not top_level_dir in sys.path: - # all test modules must be importable from the top level directory - # should we *unconditionally* put the start directory in first - # in sys.path to minimise likelihood of conflicts between installed - # modules and development versions? - sys.path.insert(0, top_level_dir) - self._top_level_dir = top_level_dir - - is_not_importable = False - if os.path.isdir(os.path.abspath(start_dir)): - start_dir = os.path.abspath(start_dir) - if start_dir != top_level_dir: - is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py')) - else: - # support for discovery from dotted module names - try: - __import__(start_dir) - except ImportError: - is_not_importable = True - else: - the_module = sys.modules[start_dir] - top_part = start_dir.split('.')[0] - try: - start_dir = os.path.abspath( - os.path.dirname((the_module.__file__))) - except AttributeError: - if the_module.__name__ in sys.builtin_module_names: - # builtin module - raise TypeError('Can not use builtin modules ' - 'as dotted module names') from None - else: - raise TypeError( - f"don't know how to discover from {the_module!r}" - ) from None - - if set_implicit_top: - self._top_level_dir = self._get_directory_containing_module(top_part) - sys.path.remove(top_level_dir) - - if is_not_importable: - raise ImportError('Start directory is not importable: %r' % start_dir) - - tests = list(self._find_tests(start_dir, pattern)) - self._top_level_dir = original_top_level_dir - return self.suiteClass(tests) - - def _get_directory_containing_module(self, module_name): - module = sys.modules[module_name] - full_path = os.path.abspath(module.__file__) - - if os.path.basename(full_path).lower().startswith('__init__.py'): - return os.path.dirname(os.path.dirname(full_path)) - else: - # here we have been given a module rather than a package - so - # all we can do is search the *same* directory the module is in - # should an exception be raised instead - return os.path.dirname(full_path) - - def _get_name_from_path(self, path): - if path == self._top_level_dir: - return '.' - path = _splitext(os.path.normpath(path)) - - _relpath = os.path.relpath(path, self._top_level_dir) - assert not os.path.isabs(_relpath), "Path must be within the project" - assert not _relpath.startswith('..'), "Path must be within the project" - - name = _relpath.replace(os.path.sep, '.') - return name - - def _get_module_from_name(self, name): - __import__(name) - return sys.modules[name] - - def _match_path(self, path, full_path, pattern): - # override this method to use alternative matching strategy - return fnmatch(path, pattern) - - def _find_tests(self, start_dir, pattern): - """Used by discovery. Yields test suites it loads.""" - # Handle the __init__ in this package - name = self._get_name_from_path(start_dir) - # name is '.' when start_dir == top_level_dir (and top_level_dir is by - # definition not a package). - if name != '.' and name not in self._loading_packages: - # name is in self._loading_packages while we have called into - # loadTestsFromModule with name. - tests, should_recurse = self._find_test_path(start_dir, pattern) - if tests is not None: - yield tests - if not should_recurse: - # Either an error occurred, or load_tests was used by the - # package. - return - # Handle the contents. - paths = sorted(os.listdir(start_dir)) - for path in paths: - full_path = os.path.join(start_dir, path) - tests, should_recurse = self._find_test_path(full_path, pattern) - if tests is not None: - yield tests - if should_recurse: - # we found a package that didn't use load_tests. - name = self._get_name_from_path(full_path) - self._loading_packages.add(name) - try: - yield from self._find_tests(full_path, pattern) - finally: - self._loading_packages.discard(name) - - def _find_test_path(self, full_path, pattern): - """Used by discovery. - - Loads tests from a single file, or a directories' __init__.py when - passed the directory. - - Returns a tuple (None_or_tests_from_file, should_recurse). - """ - basename = os.path.basename(full_path) - if os.path.isfile(full_path): - if not VALID_MODULE_NAME.match(basename): - # valid Python identifiers only - return None, False - if not self._match_path(basename, full_path, pattern): - return None, False - # if the test file matches, load it - name = self._get_name_from_path(full_path) - try: - module = self._get_module_from_name(name) - except case.SkipTest as e: - return _make_skipped_test(name, e, self.suiteClass), False - except: - error_case, error_message = \ - _make_failed_import_test(name, self.suiteClass) - self.errors.append(error_message) - return error_case, False - else: - mod_file = os.path.abspath( - getattr(module, '__file__', full_path)) - realpath = _splitext( - os.path.realpath(mod_file)) - fullpath_noext = _splitext( - os.path.realpath(full_path)) - if realpath.lower() != fullpath_noext.lower(): - module_dir = os.path.dirname(realpath) - mod_name = _splitext( - os.path.basename(full_path)) - expected_dir = os.path.dirname(full_path) - msg = ("%r module incorrectly imported from %r. Expected " - "%r. Is this module globally installed?") - raise ImportError( - msg % (mod_name, module_dir, expected_dir)) - return self.loadTestsFromModule(module, pattern=pattern), False - elif os.path.isdir(full_path): - if not os.path.isfile(os.path.join(full_path, '__init__.py')): - return None, False - - load_tests = None - tests = None - name = self._get_name_from_path(full_path) - try: - package = self._get_module_from_name(name) - except case.SkipTest as e: - return _make_skipped_test(name, e, self.suiteClass), False - except: - error_case, error_message = \ - _make_failed_import_test(name, self.suiteClass) - self.errors.append(error_message) - return error_case, False - else: - load_tests = getattr(package, 'load_tests', None) - # Mark this package as being in load_tests (possibly ;)) - self._loading_packages.add(name) - try: - tests = self.loadTestsFromModule(package, pattern=pattern) - if load_tests is not None: - # loadTestsFromModule(package) has loaded tests for us. - return tests, False - return tests, True - finally: - self._loading_packages.discard(name) - else: - return None, False - - -defaultTestLoader = TestLoader() diff --git a/Python313_13_x86_Template/Lib/unittest/main.py b/Python313_13_x86_Template/Lib/unittest/main.py deleted file mode 100644 index a0cd8a9f..00000000 --- a/Python313_13_x86_Template/Lib/unittest/main.py +++ /dev/null @@ -1,280 +0,0 @@ -"""Unittest main program""" - -import sys -import argparse -import os - -from . import loader, runner -from .signals import installHandler - -__unittest = True -_NO_TESTS_EXITCODE = 5 - -MAIN_EXAMPLES = """\ -Examples: - %(prog)s test_module - run tests from test_module - %(prog)s module.TestClass - run tests from module.TestClass - %(prog)s module.Class.test_method - run specified test method - %(prog)s path/to/test_file.py - run tests from test_file.py -""" - -MODULE_EXAMPLES = """\ -Examples: - %(prog)s - run default set of tests - %(prog)s MyTestSuite - run suite 'MyTestSuite' - %(prog)s MyTestCase.testSomething - run MyTestCase.testSomething - %(prog)s MyTestCase - run all 'test*' test methods - in MyTestCase -""" - -def _convert_name(name): - # on Linux / Mac OS X 'foo.PY' is not importable, but on - # Windows it is. Simpler to do a case insensitive match - # a better check would be to check that the name is a - # valid Python module name. - if os.path.isfile(name) and name.lower().endswith('.py'): - if os.path.isabs(name): - rel_path = os.path.relpath(name, os.getcwd()) - if os.path.isabs(rel_path) or rel_path.startswith(os.pardir): - return name - name = rel_path - # on Windows both '\' and '/' are used as path - # separators. Better to replace both than rely on os.path.sep - return os.path.normpath(name)[:-3].replace('\\', '.').replace('/', '.') - return name - -def _convert_names(names): - return [_convert_name(name) for name in names] - - -def _convert_select_pattern(pattern): - if not '*' in pattern: - pattern = '*%s*' % pattern - return pattern - - -class TestProgram(object): - """A command-line program that runs a set of tests; this is primarily - for making test modules conveniently executable. - """ - # defaults for testing - module=None - verbosity = 1 - failfast = catchbreak = buffer = progName = warnings = testNamePatterns = None - _discovery_parser = None - - def __init__(self, module='__main__', defaultTest=None, argv=None, - testRunner=None, testLoader=loader.defaultTestLoader, - exit=True, verbosity=1, failfast=None, catchbreak=None, - buffer=None, warnings=None, *, tb_locals=False, - durations=None): - if isinstance(module, str): - self.module = __import__(module) - for part in module.split('.')[1:]: - self.module = getattr(self.module, part) - else: - self.module = module - if argv is None: - argv = sys.argv - - self.exit = exit - self.failfast = failfast - self.catchbreak = catchbreak - self.verbosity = verbosity - self.buffer = buffer - self.tb_locals = tb_locals - self.durations = durations - if warnings is None and not sys.warnoptions: - # even if DeprecationWarnings are ignored by default - # print them anyway unless other warnings settings are - # specified by the warnings arg or the -W python flag - self.warnings = 'default' - else: - # here self.warnings is set either to the value passed - # to the warnings args or to None. - # If the user didn't pass a value self.warnings will - # be None. This means that the behavior is unchanged - # and depends on the values passed to -W. - self.warnings = warnings - self.defaultTest = defaultTest - self.testRunner = testRunner - self.testLoader = testLoader - self.progName = os.path.basename(argv[0]) - self.parseArgs(argv) - self.runTests() - - def _print_help(self, *args, **kwargs): - if self.module is None: - print(self._main_parser.format_help()) - print(MAIN_EXAMPLES % {'prog': self.progName}) - self._discovery_parser.print_help() - else: - print(self._main_parser.format_help()) - print(MODULE_EXAMPLES % {'prog': self.progName}) - - def parseArgs(self, argv): - self._initArgParsers() - if self.module is None: - if len(argv) > 1 and argv[1].lower() == 'discover': - self._do_discovery(argv[2:]) - return - self._main_parser.parse_args(argv[1:], self) - if not self.tests: - # this allows "python -m unittest -v" to still work for - # test discovery. - self._do_discovery([]) - return - else: - self._main_parser.parse_args(argv[1:], self) - - if self.tests: - self.testNames = _convert_names(self.tests) - if __name__ == '__main__': - # to support python -m unittest ... - self.module = None - elif self.defaultTest is None: - # createTests will load tests from self.module - self.testNames = None - elif isinstance(self.defaultTest, str): - self.testNames = (self.defaultTest,) - else: - self.testNames = list(self.defaultTest) - self.createTests() - - def createTests(self, from_discovery=False, Loader=None): - if self.testNamePatterns: - self.testLoader.testNamePatterns = self.testNamePatterns - if from_discovery: - loader = self.testLoader if Loader is None else Loader() - self.test = loader.discover(self.start, self.pattern, self.top) - elif self.testNames is None: - self.test = self.testLoader.loadTestsFromModule(self.module) - else: - self.test = self.testLoader.loadTestsFromNames(self.testNames, - self.module) - - def _initArgParsers(self): - parent_parser = self._getParentArgParser() - self._main_parser = self._getMainArgParser(parent_parser) - self._discovery_parser = self._getDiscoveryArgParser(parent_parser) - - def _getParentArgParser(self): - parser = argparse.ArgumentParser(add_help=False) - - parser.add_argument('-v', '--verbose', dest='verbosity', - action='store_const', const=2, - help='Verbose output') - parser.add_argument('-q', '--quiet', dest='verbosity', - action='store_const', const=0, - help='Quiet output') - parser.add_argument('--locals', dest='tb_locals', - action='store_true', - help='Show local variables in tracebacks') - parser.add_argument('--durations', dest='durations', type=int, - default=None, metavar="N", - help='Show the N slowest test cases (N=0 for all)') - if self.failfast is None: - parser.add_argument('-f', '--failfast', dest='failfast', - action='store_true', - help='Stop on first fail or error') - self.failfast = False - if self.catchbreak is None: - parser.add_argument('-c', '--catch', dest='catchbreak', - action='store_true', - help='Catch Ctrl-C and display results so far') - self.catchbreak = False - if self.buffer is None: - parser.add_argument('-b', '--buffer', dest='buffer', - action='store_true', - help='Buffer stdout and stderr during tests') - self.buffer = False - if self.testNamePatterns is None: - parser.add_argument('-k', dest='testNamePatterns', - action='append', type=_convert_select_pattern, - help='Only run tests which match the given substring') - self.testNamePatterns = [] - - return parser - - def _getMainArgParser(self, parent): - parser = argparse.ArgumentParser(parents=[parent]) - parser.prog = self.progName - parser.print_help = self._print_help - - parser.add_argument('tests', nargs='*', - help='a list of any number of test modules, ' - 'classes and test methods.') - - return parser - - def _getDiscoveryArgParser(self, parent): - parser = argparse.ArgumentParser(parents=[parent]) - parser.prog = '%s discover' % self.progName - parser.epilog = ('For test discovery all test modules must be ' - 'importable from the top level directory of the ' - 'project.') - - parser.add_argument('-s', '--start-directory', dest='start', - help="Directory to start discovery ('.' default)") - parser.add_argument('-p', '--pattern', dest='pattern', - help="Pattern to match tests ('test*.py' default)") - parser.add_argument('-t', '--top-level-directory', dest='top', - help='Top level directory of project (defaults to ' - 'start directory)') - for arg in ('start', 'pattern', 'top'): - parser.add_argument(arg, nargs='?', - default=argparse.SUPPRESS, - help=argparse.SUPPRESS) - - return parser - - def _do_discovery(self, argv, Loader=None): - self.start = '.' - self.pattern = 'test*.py' - self.top = None - if argv is not None: - # handle command line args for test discovery - if self._discovery_parser is None: - # for testing - self._initArgParsers() - self._discovery_parser.parse_args(argv, self) - - self.createTests(from_discovery=True, Loader=Loader) - - def runTests(self): - if self.catchbreak: - installHandler() - if self.testRunner is None: - self.testRunner = runner.TextTestRunner - if isinstance(self.testRunner, type): - try: - try: - testRunner = self.testRunner(verbosity=self.verbosity, - failfast=self.failfast, - buffer=self.buffer, - warnings=self.warnings, - tb_locals=self.tb_locals, - durations=self.durations) - except TypeError: - # didn't accept the tb_locals or durations argument - testRunner = self.testRunner(verbosity=self.verbosity, - failfast=self.failfast, - buffer=self.buffer, - warnings=self.warnings) - except TypeError: - # didn't accept the verbosity, buffer or failfast arguments - testRunner = self.testRunner() - else: - # it is assumed to be a TestRunner instance - testRunner = self.testRunner - self.result = testRunner.run(self.test) - if self.exit: - if not self.result.wasSuccessful(): - sys.exit(1) - elif self.result.testsRun == 0 and len(self.result.skipped) == 0: - sys.exit(_NO_TESTS_EXITCODE) - else: - sys.exit(0) - - -main = TestProgram diff --git a/Python313_13_x86_Template/Lib/unittest/mock.py b/Python313_13_x86_Template/Lib/unittest/mock.py deleted file mode 100644 index b6dd1c27..00000000 --- a/Python313_13_x86_Template/Lib/unittest/mock.py +++ /dev/null @@ -1,3185 +0,0 @@ -# mock.py -# Test tools for mocking and patching. -# Maintained by Michael Foord -# Backport for other versions of Python available from -# https://pypi.org/project/mock - -__all__ = ( - 'Mock', - 'MagicMock', - 'patch', - 'sentinel', - 'DEFAULT', - 'ANY', - 'call', - 'create_autospec', - 'AsyncMock', - 'ThreadingMock', - 'FILTER_DIR', - 'NonCallableMock', - 'NonCallableMagicMock', - 'mock_open', - 'PropertyMock', - 'seal', -) - - -import asyncio -import contextlib -import io -import inspect -import pprint -import sys -import builtins -import pkgutil -from asyncio import iscoroutinefunction -import threading -from types import CodeType, ModuleType, MethodType -from unittest.util import safe_repr -from functools import wraps, partial -from threading import RLock - - -class InvalidSpecError(Exception): - """Indicates that an invalid value was used as a mock spec.""" - - -_builtins = {name for name in dir(builtins) if not name.startswith('_')} - -FILTER_DIR = True - -# Workaround for issue #12370 -# Without this, the __class__ properties wouldn't be set correctly -_safe_super = super - -def _is_async_obj(obj): - if _is_instance_mock(obj) and not isinstance(obj, AsyncMock): - return False - if hasattr(obj, '__func__'): - obj = getattr(obj, '__func__') - return iscoroutinefunction(obj) or inspect.isawaitable(obj) - - -def _is_async_func(func): - if getattr(func, '__code__', None): - return iscoroutinefunction(func) - else: - return False - - -def _is_instance_mock(obj): - # can't use isinstance on Mock objects because they override __class__ - # The base class for all mocks is NonCallableMock - return issubclass(type(obj), NonCallableMock) - - -def _is_exception(obj): - return ( - isinstance(obj, BaseException) or - isinstance(obj, type) and issubclass(obj, BaseException) - ) - - -def _extract_mock(obj): - # Autospecced functions will return a FunctionType with "mock" attribute - # which is the actual mock object that needs to be used. - if isinstance(obj, FunctionTypes) and hasattr(obj, 'mock'): - return obj.mock - else: - return obj - - -def _get_signature_object(func, as_instance, eat_self): - """ - Given an arbitrary, possibly callable object, try to create a suitable - signature object. - Return a (reduced func, signature) tuple, or None. - """ - if isinstance(func, type) and not as_instance: - # If it's a type and should be modelled as a type, use __init__. - func = func.__init__ - # Skip the `self` argument in __init__ - eat_self = True - elif isinstance(func, (classmethod, staticmethod)): - if isinstance(func, classmethod): - # Skip the `cls` argument of a class method - eat_self = True - # Use the original decorated method to extract the correct function signature - func = func.__func__ - elif not isinstance(func, FunctionTypes): - # If we really want to model an instance of the passed type, - # __call__ should be looked up, not __init__. - try: - func = func.__call__ - except AttributeError: - return None - if eat_self: - sig_func = partial(func, None) - else: - sig_func = func - try: - return func, inspect.signature(sig_func) - except ValueError: - # Certain callable types are not supported by inspect.signature() - return None - - -def _check_signature(func, mock, skipfirst, instance=False): - sig = _get_signature_object(func, instance, skipfirst) - if sig is None: - return - func, sig = sig - def checksig(self, /, *args, **kwargs): - sig.bind(*args, **kwargs) - _copy_func_details(func, checksig) - type(mock)._mock_check_sig = checksig - type(mock).__signature__ = sig - - -def _copy_func_details(func, funcopy): - # we explicitly don't copy func.__dict__ into this copy as it would - # expose original attributes that should be mocked - for attribute in ( - '__name__', '__doc__', '__text_signature__', - '__module__', '__defaults__', '__kwdefaults__', - ): - try: - setattr(funcopy, attribute, getattr(func, attribute)) - except AttributeError: - pass - - -def _callable(obj): - if isinstance(obj, type): - return True - if isinstance(obj, (staticmethod, classmethod, MethodType)): - return _callable(obj.__func__) - if getattr(obj, '__call__', None) is not None: - return True - return False - - -def _is_list(obj): - # checks for list or tuples - # XXXX badly named! - return type(obj) in (list, tuple) - - -def _instance_callable(obj): - """Given an object, return True if the object is callable. - For classes, return True if instances would be callable.""" - if not isinstance(obj, type): - # already an instance - return getattr(obj, '__call__', None) is not None - - # *could* be broken by a class overriding __mro__ or __dict__ via - # a metaclass - for base in (obj,) + obj.__mro__: - if base.__dict__.get('__call__') is not None: - return True - return False - - -def _set_signature(mock, original, instance=False): - # creates a function with signature (*args, **kwargs) that delegates to a - # mock. It still does signature checking by calling a lambda with the same - # signature as the original. - - skipfirst = isinstance(original, type) - result = _get_signature_object(original, instance, skipfirst) - if result is None: - return mock - func, sig = result - def checksig(*args, **kwargs): - sig.bind(*args, **kwargs) - _copy_func_details(func, checksig) - - name = original.__name__ - if not name.isidentifier(): - name = 'funcopy' - context = {'_checksig_': checksig, 'mock': mock} - src = """def %s(*args, **kwargs): - _checksig_(*args, **kwargs) - return mock(*args, **kwargs)""" % name - exec (src, context) - funcopy = context[name] - _setup_func(funcopy, mock, sig) - return funcopy - -def _set_async_signature(mock, original, instance=False, is_async_mock=False): - # creates an async function with signature (*args, **kwargs) that delegates to a - # mock. It still does signature checking by calling a lambda with the same - # signature as the original. - - skipfirst = isinstance(original, type) - func, sig = _get_signature_object(original, instance, skipfirst) - def checksig(*args, **kwargs): - sig.bind(*args, **kwargs) - _copy_func_details(func, checksig) - - name = original.__name__ - context = {'_checksig_': checksig, 'mock': mock} - src = """async def %s(*args, **kwargs): - _checksig_(*args, **kwargs) - return await mock(*args, **kwargs)""" % name - exec (src, context) - funcopy = context[name] - _setup_func(funcopy, mock, sig) - _setup_async_mock(funcopy) - return funcopy - - -def _setup_func(funcopy, mock, sig): - funcopy.mock = mock - - def assert_called_with(*args, **kwargs): - return mock.assert_called_with(*args, **kwargs) - def assert_called(*args, **kwargs): - return mock.assert_called(*args, **kwargs) - def assert_not_called(*args, **kwargs): - return mock.assert_not_called(*args, **kwargs) - def assert_called_once(*args, **kwargs): - return mock.assert_called_once(*args, **kwargs) - def assert_called_once_with(*args, **kwargs): - return mock.assert_called_once_with(*args, **kwargs) - def assert_has_calls(*args, **kwargs): - return mock.assert_has_calls(*args, **kwargs) - def assert_any_call(*args, **kwargs): - return mock.assert_any_call(*args, **kwargs) - def reset_mock(): - funcopy.method_calls = _CallList() - funcopy.mock_calls = _CallList() - mock.reset_mock() - ret = funcopy.return_value - if _is_instance_mock(ret) and not ret is mock: - ret.reset_mock() - - funcopy.called = False - funcopy.call_count = 0 - funcopy.call_args = None - funcopy.call_args_list = _CallList() - funcopy.method_calls = _CallList() - funcopy.mock_calls = _CallList() - - funcopy.return_value = mock.return_value - funcopy.side_effect = mock.side_effect - funcopy._mock_children = mock._mock_children - - funcopy.assert_called_with = assert_called_with - funcopy.assert_called_once_with = assert_called_once_with - funcopy.assert_has_calls = assert_has_calls - funcopy.assert_any_call = assert_any_call - funcopy.reset_mock = reset_mock - funcopy.assert_called = assert_called - funcopy.assert_not_called = assert_not_called - funcopy.assert_called_once = assert_called_once - funcopy.__signature__ = sig - - mock._mock_delegate = funcopy - - -def _setup_async_mock(mock): - mock._is_coroutine = asyncio.coroutines._is_coroutine - mock.await_count = 0 - mock.await_args = None - mock.await_args_list = _CallList() - - # Mock is not configured yet so the attributes are set - # to a function and then the corresponding mock helper function - # is called when the helper is accessed similar to _setup_func. - def wrapper(attr, /, *args, **kwargs): - return getattr(mock.mock, attr)(*args, **kwargs) - - for attribute in ('assert_awaited', - 'assert_awaited_once', - 'assert_awaited_with', - 'assert_awaited_once_with', - 'assert_any_await', - 'assert_has_awaits', - 'assert_not_awaited'): - - # setattr(mock, attribute, wrapper) causes late binding - # hence attribute will always be the last value in the loop - # Use partial(wrapper, attribute) to ensure the attribute is bound - # correctly. - setattr(mock, attribute, partial(wrapper, attribute)) - - -def _is_magic(name): - return '__%s__' % name[2:-2] == name - - -class _SentinelObject(object): - "A unique, named, sentinel object." - def __init__(self, name): - self.name = name - - def __repr__(self): - return 'sentinel.%s' % self.name - - def __reduce__(self): - return 'sentinel.%s' % self.name - - -class _Sentinel(object): - """Access attributes to return a named object, usable as a sentinel.""" - def __init__(self): - self._sentinels = {} - - def __getattr__(self, name): - if name == '__bases__': - # Without this help(unittest.mock) raises an exception - raise AttributeError - return self._sentinels.setdefault(name, _SentinelObject(name)) - - def __reduce__(self): - return 'sentinel' - - -sentinel = _Sentinel() - -DEFAULT = sentinel.DEFAULT -_missing = sentinel.MISSING -_deleted = sentinel.DELETED - - -_allowed_names = { - 'return_value', '_mock_return_value', 'side_effect', - '_mock_side_effect', '_mock_parent', '_mock_new_parent', - '_mock_name', '_mock_new_name' -} - - -def _delegating_property(name): - _allowed_names.add(name) - _the_name = '_mock_' + name - def _get(self, name=name, _the_name=_the_name): - sig = self._mock_delegate - if sig is None: - return getattr(self, _the_name) - return getattr(sig, name) - def _set(self, value, name=name, _the_name=_the_name): - sig = self._mock_delegate - if sig is None: - self.__dict__[_the_name] = value - else: - setattr(sig, name, value) - - return property(_get, _set) - - - -class _CallList(list): - - def __contains__(self, value): - if not isinstance(value, list): - return list.__contains__(self, value) - len_value = len(value) - len_self = len(self) - if len_value > len_self: - return False - - for i in range(0, len_self - len_value + 1): - sub_list = self[i:i+len_value] - if sub_list == value: - return True - return False - - def __repr__(self): - return pprint.pformat(list(self)) - - -def _check_and_set_parent(parent, value, name, new_name): - value = _extract_mock(value) - - if not _is_instance_mock(value): - return False - if ((value._mock_name or value._mock_new_name) or - (value._mock_parent is not None) or - (value._mock_new_parent is not None)): - return False - - _parent = parent - while _parent is not None: - # setting a mock (value) as a child or return value of itself - # should not modify the mock - if _parent is value: - return False - _parent = _parent._mock_new_parent - - if new_name: - value._mock_new_parent = parent - value._mock_new_name = new_name - if name: - value._mock_parent = parent - value._mock_name = name - return True - -# Internal class to identify if we wrapped an iterator object or not. -class _MockIter(object): - def __init__(self, obj): - self.obj = iter(obj) - def __next__(self): - return next(self.obj) - -class Base(object): - _mock_return_value = DEFAULT - _mock_side_effect = None - def __init__(self, /, *args, **kwargs): - pass - - - -class NonCallableMock(Base): - """A non-callable version of `Mock`""" - - # Store a mutex as a class attribute in order to protect concurrent access - # to mock attributes. Using a class attribute allows all NonCallableMock - # instances to share the mutex for simplicity. - # - # See https://github.com/python/cpython/issues/98624 for why this is - # necessary. - _lock = RLock() - - def __new__( - cls, spec=None, wraps=None, name=None, spec_set=None, - parent=None, _spec_state=None, _new_name='', _new_parent=None, - _spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs - ): - # every instance has its own class - # so we can create magic methods on the - # class without stomping on other mocks - bases = (cls,) - if not issubclass(cls, AsyncMockMixin): - # Check if spec is an async object or function - spec_arg = spec_set or spec - if spec_arg is not None and _is_async_obj(spec_arg): - bases = (AsyncMockMixin, cls) - new = type(cls.__name__, bases, {'__doc__': cls.__doc__}) - instance = _safe_super(NonCallableMock, cls).__new__(new) - return instance - - - def __init__( - self, spec=None, wraps=None, name=None, spec_set=None, - parent=None, _spec_state=None, _new_name='', _new_parent=None, - _spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs - ): - if _new_parent is None: - _new_parent = parent - - __dict__ = self.__dict__ - __dict__['_mock_parent'] = parent - __dict__['_mock_name'] = name - __dict__['_mock_new_name'] = _new_name - __dict__['_mock_new_parent'] = _new_parent - __dict__['_mock_sealed'] = False - - if spec_set is not None: - spec = spec_set - spec_set = True - if _eat_self is None: - _eat_self = parent is not None - - self._mock_add_spec(spec, spec_set, _spec_as_instance, _eat_self) - - __dict__['_mock_children'] = {} - __dict__['_mock_wraps'] = wraps - __dict__['_mock_delegate'] = None - - __dict__['_mock_called'] = False - __dict__['_mock_call_args'] = None - __dict__['_mock_call_count'] = 0 - __dict__['_mock_call_args_list'] = _CallList() - __dict__['_mock_mock_calls'] = _CallList() - - __dict__['method_calls'] = _CallList() - __dict__['_mock_unsafe'] = unsafe - - if kwargs: - self.configure_mock(**kwargs) - - _safe_super(NonCallableMock, self).__init__( - spec, wraps, name, spec_set, parent, - _spec_state - ) - - - def attach_mock(self, mock, attribute): - """ - Attach a mock as an attribute of this one, replacing its name and - parent. Calls to the attached mock will be recorded in the - `method_calls` and `mock_calls` attributes of this one.""" - inner_mock = _extract_mock(mock) - - inner_mock._mock_parent = None - inner_mock._mock_new_parent = None - inner_mock._mock_name = '' - inner_mock._mock_new_name = None - - setattr(self, attribute, mock) - - - def mock_add_spec(self, spec, spec_set=False): - """Add a spec to a mock. `spec` can either be an object or a - list of strings. Only attributes on the `spec` can be fetched as - attributes from the mock. - - If `spec_set` is True then only attributes on the spec can be set.""" - self._mock_add_spec(spec, spec_set) - - - def _mock_add_spec(self, spec, spec_set, _spec_as_instance=False, - _eat_self=False): - if _is_instance_mock(spec): - raise InvalidSpecError(f'Cannot spec a Mock object. [object={spec!r}]') - - _spec_class = None - _spec_signature = None - _spec_asyncs = [] - - if spec is not None and not _is_list(spec): - if isinstance(spec, type): - _spec_class = spec - else: - _spec_class = type(spec) - res = _get_signature_object(spec, - _spec_as_instance, _eat_self) - _spec_signature = res and res[1] - - spec_list = dir(spec) - - for attr in spec_list: - static_attr = inspect.getattr_static(spec, attr, None) - unwrapped_attr = static_attr - try: - unwrapped_attr = inspect.unwrap(unwrapped_attr) - except ValueError: - pass - if iscoroutinefunction(unwrapped_attr): - _spec_asyncs.append(attr) - - spec = spec_list - - __dict__ = self.__dict__ - __dict__['_spec_class'] = _spec_class - __dict__['_spec_set'] = spec_set - __dict__['_spec_signature'] = _spec_signature - __dict__['_mock_methods'] = spec - __dict__['_spec_asyncs'] = _spec_asyncs - - def __get_return_value(self): - ret = self._mock_return_value - if self._mock_delegate is not None: - ret = self._mock_delegate.return_value - - if ret is DEFAULT and self._mock_wraps is None: - ret = self._get_child_mock( - _new_parent=self, _new_name='()' - ) - self.return_value = ret - return ret - - - def __set_return_value(self, value): - if self._mock_delegate is not None: - self._mock_delegate.return_value = value - else: - self._mock_return_value = value - _check_and_set_parent(self, value, None, '()') - - __return_value_doc = "The value to be returned when the mock is called." - return_value = property(__get_return_value, __set_return_value, - __return_value_doc) - - - @property - def __class__(self): - if self._spec_class is None: - return type(self) - return self._spec_class - - called = _delegating_property('called') - call_count = _delegating_property('call_count') - call_args = _delegating_property('call_args') - call_args_list = _delegating_property('call_args_list') - mock_calls = _delegating_property('mock_calls') - - - def __get_side_effect(self): - delegated = self._mock_delegate - if delegated is None: - return self._mock_side_effect - sf = delegated.side_effect - if (sf is not None and not callable(sf) - and not isinstance(sf, _MockIter) and not _is_exception(sf)): - sf = _MockIter(sf) - delegated.side_effect = sf - return sf - - def __set_side_effect(self, value): - value = _try_iter(value) - delegated = self._mock_delegate - if delegated is None: - self._mock_side_effect = value - else: - delegated.side_effect = value - - side_effect = property(__get_side_effect, __set_side_effect) - - - def reset_mock(self, visited=None, *, - return_value: bool = False, - side_effect: bool = False): - "Restore the mock object to its initial state." - if visited is None: - visited = [] - if id(self) in visited: - return - visited.append(id(self)) - - self.called = False - self.call_args = None - self.call_count = 0 - self.mock_calls = _CallList() - self.call_args_list = _CallList() - self.method_calls = _CallList() - - if return_value: - self._mock_return_value = DEFAULT - if side_effect: - self._mock_side_effect = None - - for child in self._mock_children.values(): - if isinstance(child, _SpecState) or child is _deleted: - continue - child.reset_mock(visited, return_value=return_value, side_effect=side_effect) - - ret = self._mock_return_value - if _is_instance_mock(ret) and ret is not self: - ret.reset_mock(visited) - - - def configure_mock(self, /, **kwargs): - """Set attributes on the mock through keyword arguments. - - Attributes plus return values and side effects can be set on child - mocks using standard dot notation and unpacking a dictionary in the - method call: - - >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError} - >>> mock.configure_mock(**attrs)""" - for arg, val in sorted(kwargs.items(), - # we sort on the number of dots so that - # attributes are set before we set attributes on - # attributes - key=lambda entry: entry[0].count('.')): - args = arg.split('.') - final = args.pop() - obj = self - for entry in args: - obj = getattr(obj, entry) - setattr(obj, final, val) - - - def __getattr__(self, name): - if name in {'_mock_methods', '_mock_unsafe'}: - raise AttributeError(name) - elif self._mock_methods is not None: - if name not in self._mock_methods or name in _all_magics: - raise AttributeError("Mock object has no attribute %r" % name) - elif _is_magic(name): - raise AttributeError(name) - if not self._mock_unsafe and (not self._mock_methods or name not in self._mock_methods): - if name.startswith(('assert', 'assret', 'asert', 'aseert', 'assrt')) or name in _ATTRIB_DENY_LIST: - raise AttributeError( - f"{name!r} is not a valid assertion. Use a spec " - f"for the mock if {name!r} is meant to be an attribute.") - - with NonCallableMock._lock: - result = self._mock_children.get(name) - if result is _deleted: - raise AttributeError(name) - elif result is None: - wraps = None - if self._mock_wraps is not None: - # XXXX should we get the attribute without triggering code - # execution? - wraps = getattr(self._mock_wraps, name) - - result = self._get_child_mock( - parent=self, name=name, wraps=wraps, _new_name=name, - _new_parent=self - ) - self._mock_children[name] = result - - elif isinstance(result, _SpecState): - try: - result = create_autospec( - result.spec, result.spec_set, result.instance, - result.parent, result.name - ) - except InvalidSpecError: - target_name = self.__dict__['_mock_name'] or self - raise InvalidSpecError( - f'Cannot autospec attr {name!r} from target ' - f'{target_name!r} as it has already been mocked out. ' - f'[target={self!r}, attr={result.spec!r}]') - self._mock_children[name] = result - - return result - - - def _extract_mock_name(self): - _name_list = [self._mock_new_name] - _parent = self._mock_new_parent - last = self - - dot = '.' - if _name_list == ['()']: - dot = '' - - while _parent is not None: - last = _parent - - _name_list.append(_parent._mock_new_name + dot) - dot = '.' - if _parent._mock_new_name == '()': - dot = '' - - _parent = _parent._mock_new_parent - - _name_list = list(reversed(_name_list)) - _first = last._mock_name or 'mock' - if len(_name_list) > 1: - if _name_list[1] not in ('()', '().'): - _first += '.' - _name_list[0] = _first - return ''.join(_name_list) - - def __repr__(self): - name = self._extract_mock_name() - - name_string = '' - if name not in ('mock', 'mock.'): - name_string = ' name=%r' % name - - spec_string = '' - if self._spec_class is not None: - spec_string = ' spec=%r' - if self._spec_set: - spec_string = ' spec_set=%r' - spec_string = spec_string % self._spec_class.__name__ - return "<%s%s%s id='%s'>" % ( - type(self).__name__, - name_string, - spec_string, - id(self) - ) - - - def __dir__(self): - """Filter the output of `dir(mock)` to only useful members.""" - if not FILTER_DIR: - return object.__dir__(self) - - extras = self._mock_methods or [] - from_type = dir(type(self)) - from_dict = list(self.__dict__) - from_child_mocks = [ - m_name for m_name, m_value in self._mock_children.items() - if m_value is not _deleted] - - from_type = [e for e in from_type if not e.startswith('_')] - from_dict = [e for e in from_dict if not e.startswith('_') or - _is_magic(e)] - return sorted(set(extras + from_type + from_dict + from_child_mocks)) - - - def __setattr__(self, name, value): - if name in _allowed_names: - # property setters go through here - return object.__setattr__(self, name, value) - elif (self._spec_set and self._mock_methods is not None and - name not in self._mock_methods and - name not in self.__dict__): - raise AttributeError("Mock object has no attribute '%s'" % name) - elif name in _unsupported_magics: - msg = 'Attempting to set unsupported magic method %r.' % name - raise AttributeError(msg) - elif name in _all_magics: - if self._mock_methods is not None and name not in self._mock_methods: - raise AttributeError("Mock object has no attribute '%s'" % name) - - if not _is_instance_mock(value): - setattr(type(self), name, _get_method(name, value)) - original = value - value = lambda *args, **kw: original(self, *args, **kw) - else: - # only set _new_name and not name so that mock_calls is tracked - # but not method calls - _check_and_set_parent(self, value, None, name) - setattr(type(self), name, value) - self._mock_children[name] = value - elif name == '__class__': - self._spec_class = value - return - else: - if _check_and_set_parent(self, value, name, name): - self._mock_children[name] = value - - if self._mock_sealed and not hasattr(self, name): - mock_name = f'{self._extract_mock_name()}.{name}' - raise AttributeError(f'Cannot set {mock_name}') - - if isinstance(value, PropertyMock): - self.__dict__[name] = value - return - return object.__setattr__(self, name, value) - - - def __delattr__(self, name): - if name in _all_magics and name in type(self).__dict__: - delattr(type(self), name) - if name not in self.__dict__: - # for magic methods that are still MagicProxy objects and - # not set on the instance itself - return - - obj = self._mock_children.get(name, _missing) - if name in self.__dict__: - _safe_super(NonCallableMock, self).__delattr__(name) - elif obj is _deleted: - raise AttributeError(name) - if obj is not _missing: - del self._mock_children[name] - self._mock_children[name] = _deleted - - - def _format_mock_call_signature(self, args, kwargs): - name = self._mock_name or 'mock' - return _format_call_signature(name, args, kwargs) - - - def _format_mock_failure_message(self, args, kwargs, action='call'): - message = 'expected %s not found.\nExpected: %s\n Actual: %s' - expected_string = self._format_mock_call_signature(args, kwargs) - call_args = self.call_args - actual_string = self._format_mock_call_signature(*call_args) - return message % (action, expected_string, actual_string) - - - def _get_call_signature_from_name(self, name): - """ - * If call objects are asserted against a method/function like obj.meth1 - then there could be no name for the call object to lookup. Hence just - return the spec_signature of the method/function being asserted against. - * If the name is not empty then remove () and split by '.' to get - list of names to iterate through the children until a potential - match is found. A child mock is created only during attribute access - so if we get a _SpecState then no attributes of the spec were accessed - and can be safely exited. - """ - if not name: - return self._spec_signature - - sig = None - names = name.replace('()', '').split('.') - children = self._mock_children - - for name in names: - child = children.get(name) - if child is None or isinstance(child, _SpecState): - break - else: - # If an autospecced object is attached using attach_mock the - # child would be a function with mock object as attribute from - # which signature has to be derived. - child = _extract_mock(child) - children = child._mock_children - sig = child._spec_signature - - return sig - - - def _call_matcher(self, _call): - """ - Given a call (or simply an (args, kwargs) tuple), return a - comparison key suitable for matching with other calls. - This is a best effort method which relies on the spec's signature, - if available, or falls back on the arguments themselves. - """ - - if isinstance(_call, tuple) and len(_call) > 2: - sig = self._get_call_signature_from_name(_call[0]) - else: - sig = self._spec_signature - - if sig is not None: - if len(_call) == 2: - name = '' - args, kwargs = _call - else: - name, args, kwargs = _call - try: - bound_call = sig.bind(*args, **kwargs) - return call(name, bound_call.args, bound_call.kwargs) - except TypeError as e: - return e.with_traceback(None) - else: - return _call - - def assert_not_called(self): - """assert that the mock was never called. - """ - if self.call_count != 0: - msg = ("Expected '%s' to not have been called. Called %s times.%s" - % (self._mock_name or 'mock', - self.call_count, - self._calls_repr())) - raise AssertionError(msg) - - def assert_called(self): - """assert that the mock was called at least once - """ - if self.call_count == 0: - msg = ("Expected '%s' to have been called." % - (self._mock_name or 'mock')) - raise AssertionError(msg) - - def assert_called_once(self): - """assert that the mock was called only once. - """ - if not self.call_count == 1: - msg = ("Expected '%s' to have been called once. Called %s times.%s" - % (self._mock_name or 'mock', - self.call_count, - self._calls_repr())) - raise AssertionError(msg) - - def assert_called_with(self, /, *args, **kwargs): - """assert that the last call was made with the specified arguments. - - Raises an AssertionError if the args and keyword args passed in are - different to the last call to the mock.""" - if self.call_args is None: - expected = self._format_mock_call_signature(args, kwargs) - actual = 'not called.' - error_message = ('expected call not found.\nExpected: %s\n Actual: %s' - % (expected, actual)) - raise AssertionError(error_message) - - def _error_message(): - msg = self._format_mock_failure_message(args, kwargs) - return msg - expected = self._call_matcher(_Call((args, kwargs), two=True)) - actual = self._call_matcher(self.call_args) - if actual != expected: - cause = expected if isinstance(expected, Exception) else None - raise AssertionError(_error_message()) from cause - - - def assert_called_once_with(self, /, *args, **kwargs): - """assert that the mock was called exactly once and that that call was - with the specified arguments.""" - if not self.call_count == 1: - msg = ("Expected '%s' to be called once. Called %s times.%s" - % (self._mock_name or 'mock', - self.call_count, - self._calls_repr())) - raise AssertionError(msg) - return self.assert_called_with(*args, **kwargs) - - - def assert_has_calls(self, calls, any_order=False): - """assert the mock has been called with the specified calls. - The `mock_calls` list is checked for the calls. - - If `any_order` is False (the default) then the calls must be - sequential. There can be extra calls before or after the - specified calls. - - If `any_order` is True then the calls can be in any order, but - they must all appear in `mock_calls`.""" - expected = [self._call_matcher(c) for c in calls] - cause = next((e for e in expected if isinstance(e, Exception)), None) - all_calls = _CallList(self._call_matcher(c) for c in self.mock_calls) - if not any_order: - if expected not in all_calls: - if cause is None: - problem = 'Calls not found.' - else: - problem = ('Error processing expected calls.\n' - 'Errors: {}').format( - [e if isinstance(e, Exception) else None - for e in expected]) - raise AssertionError( - f'{problem}\n' - f'Expected: {_CallList(calls)}\n' - f' Actual: {safe_repr(self.mock_calls)}' - ) from cause - return - - all_calls = list(all_calls) - - not_found = [] - for kall in expected: - try: - all_calls.remove(kall) - except ValueError: - not_found.append(kall) - if not_found: - raise AssertionError( - '%r does not contain all of %r in its call list, ' - 'found %r instead' % (self._mock_name or 'mock', - tuple(not_found), all_calls) - ) from cause - - - def assert_any_call(self, /, *args, **kwargs): - """assert the mock has been called with the specified arguments. - - The assert passes if the mock has *ever* been called, unlike - `assert_called_with` and `assert_called_once_with` that only pass if - the call is the most recent one.""" - expected = self._call_matcher(_Call((args, kwargs), two=True)) - cause = expected if isinstance(expected, Exception) else None - actual = [self._call_matcher(c) for c in self.call_args_list] - if cause or expected not in _AnyComparer(actual): - expected_string = self._format_mock_call_signature(args, kwargs) - raise AssertionError( - '%s call not found' % expected_string - ) from cause - - - def _get_child_mock(self, /, **kw): - """Create the child mocks for attributes and return value. - By default child mocks will be the same type as the parent. - Subclasses of Mock may want to override this to customize the way - child mocks are made. - - For non-callable mocks the callable variant will be used (rather than - any custom subclass).""" - if self._mock_sealed: - attribute = f".{kw['name']}" if "name" in kw else "()" - mock_name = self._extract_mock_name() + attribute - raise AttributeError(mock_name) - - _new_name = kw.get("_new_name") - if _new_name in self.__dict__['_spec_asyncs']: - return AsyncMock(**kw) - - _type = type(self) - if issubclass(_type, MagicMock) and _new_name in _async_method_magics: - # Any asynchronous magic becomes an AsyncMock - klass = AsyncMock - elif issubclass(_type, AsyncMockMixin): - if (_new_name in _all_sync_magics or - self._mock_methods and _new_name in self._mock_methods): - # Any synchronous method on AsyncMock becomes a MagicMock - klass = MagicMock - else: - klass = AsyncMock - elif not issubclass(_type, CallableMixin): - if issubclass(_type, NonCallableMagicMock): - klass = MagicMock - elif issubclass(_type, NonCallableMock): - klass = Mock - else: - klass = _type.__mro__[1] - return klass(**kw) - - - def _calls_repr(self): - """Renders self.mock_calls as a string. - - Example: "\nCalls: [call(1), call(2)]." - - If self.mock_calls is empty, an empty string is returned. The - output will be truncated if very long. - """ - if not self.mock_calls: - return "" - return f"\nCalls: {safe_repr(self.mock_calls)}." - - -# Denylist for forbidden attribute names in safe mode -_ATTRIB_DENY_LIST = frozenset({ - name.removeprefix("assert_") - for name in dir(NonCallableMock) - if name.startswith("assert_") -}) - - -class _AnyComparer(list): - """A list which checks if it contains a call which may have an - argument of ANY, flipping the components of item and self from - their traditional locations so that ANY is guaranteed to be on - the left.""" - def __contains__(self, item): - for _call in self: - assert len(item) == len(_call) - if all([ - expected == actual - for expected, actual in zip(item, _call) - ]): - return True - return False - - -def _try_iter(obj): - if obj is None: - return obj - if _is_exception(obj): - return obj - if _callable(obj): - return obj - try: - return iter(obj) - except TypeError: - # XXXX backwards compatibility - # but this will blow up on first call - so maybe we should fail early? - return obj - - -class CallableMixin(Base): - - def __init__(self, spec=None, side_effect=None, return_value=DEFAULT, - wraps=None, name=None, spec_set=None, parent=None, - _spec_state=None, _new_name='', _new_parent=None, **kwargs): - self.__dict__['_mock_return_value'] = return_value - _safe_super(CallableMixin, self).__init__( - spec, wraps, name, spec_set, parent, - _spec_state, _new_name, _new_parent, **kwargs - ) - - self.side_effect = side_effect - - - def _mock_check_sig(self, /, *args, **kwargs): - # stub method that can be replaced with one with a specific signature - pass - - - def __call__(self, /, *args, **kwargs): - # can't use self in-case a function / method we are mocking uses self - # in the signature - self._mock_check_sig(*args, **kwargs) - self._increment_mock_call(*args, **kwargs) - return self._mock_call(*args, **kwargs) - - - def _mock_call(self, /, *args, **kwargs): - return self._execute_mock_call(*args, **kwargs) - - def _increment_mock_call(self, /, *args, **kwargs): - self.called = True - - # handle call_args - # needs to be set here so assertions on call arguments pass before - # execution in the case of awaited calls - with NonCallableMock._lock: - # Lock is used here so that call_args_list and call_count are - # set atomically otherwise it is possible that by the time call_count - # is set another thread may have appended to call_args_list. - # The rest of this function relies on list.append being atomic and - # skips locking. - _call = _Call((args, kwargs), two=True) - self.call_args = _call - self.call_args_list.append(_call) - self.call_count = len(self.call_args_list) - - # initial stuff for method_calls: - do_method_calls = self._mock_parent is not None - method_call_name = self._mock_name - - # initial stuff for mock_calls: - mock_call_name = self._mock_new_name - is_a_call = mock_call_name == '()' - self.mock_calls.append(_Call(('', args, kwargs))) - - # follow up the chain of mocks: - _new_parent = self._mock_new_parent - while _new_parent is not None: - - # handle method_calls: - if do_method_calls: - _new_parent.method_calls.append(_Call((method_call_name, args, kwargs))) - do_method_calls = _new_parent._mock_parent is not None - if do_method_calls: - method_call_name = _new_parent._mock_name + '.' + method_call_name - - # handle mock_calls: - this_mock_call = _Call((mock_call_name, args, kwargs)) - _new_parent.mock_calls.append(this_mock_call) - - if _new_parent._mock_new_name: - if is_a_call: - dot = '' - else: - dot = '.' - is_a_call = _new_parent._mock_new_name == '()' - mock_call_name = _new_parent._mock_new_name + dot + mock_call_name - - # follow the parental chain: - _new_parent = _new_parent._mock_new_parent - - def _execute_mock_call(self, /, *args, **kwargs): - # separate from _increment_mock_call so that awaited functions are - # executed separately from their call, also AsyncMock overrides this method - - effect = self.side_effect - if effect is not None: - if _is_exception(effect): - raise effect - elif not _callable(effect): - result = next(effect) - if _is_exception(result): - raise result - else: - result = effect(*args, **kwargs) - - if result is not DEFAULT: - return result - - if self._mock_return_value is not DEFAULT: - return self.return_value - - if self._mock_delegate and self._mock_delegate.return_value is not DEFAULT: - return self.return_value - - if self._mock_wraps is not None: - return self._mock_wraps(*args, **kwargs) - - return self.return_value - - - -class Mock(CallableMixin, NonCallableMock): - """ - Create a new `Mock` object. `Mock` takes several optional arguments - that specify the behaviour of the Mock object: - - * `spec`: This can be either a list of strings or an existing object (a - class or instance) that acts as the specification for the mock object. If - you pass in an object then a list of strings is formed by calling dir on - the object (excluding unsupported magic attributes and methods). Accessing - any attribute not in this list will raise an `AttributeError`. - - If `spec` is an object (rather than a list of strings) then - `mock.__class__` returns the class of the spec object. This allows mocks - to pass `isinstance` tests. - - * `spec_set`: A stricter variant of `spec`. If used, attempting to *set* - or get an attribute on the mock that isn't on the object passed as - `spec_set` will raise an `AttributeError`. - - * `side_effect`: A function to be called whenever the Mock is called. See - the `side_effect` attribute. Useful for raising exceptions or - dynamically changing return values. The function is called with the same - arguments as the mock, and unless it returns `DEFAULT`, the return - value of this function is used as the return value. - - If `side_effect` is an iterable then each call to the mock will return - the next value from the iterable. If any of the members of the iterable - are exceptions they will be raised instead of returned. - - * `return_value`: The value returned when the mock is called. By default - this is a new Mock (created on first access). See the - `return_value` attribute. - - * `unsafe`: By default, accessing any attribute whose name starts with - *assert*, *assret*, *asert*, *aseert*, or *assrt* raises an AttributeError. - Additionally, an AttributeError is raised when accessing - attributes that match the name of an assertion method without the prefix - `assert_`, e.g. accessing `called_once` instead of `assert_called_once`. - Passing `unsafe=True` will allow access to these attributes. - - * `wraps`: Item for the mock object to wrap. If `wraps` is not None then - calling the Mock will pass the call through to the wrapped object - (returning the real result). Attribute access on the mock will return a - Mock object that wraps the corresponding attribute of the wrapped object - (so attempting to access an attribute that doesn't exist will raise an - `AttributeError`). - - If the mock has an explicit `return_value` set then calls are not passed - to the wrapped object and the `return_value` is returned instead. - - * `name`: If the mock has a name then it will be used in the repr of the - mock. This can be useful for debugging. The name is propagated to child - mocks. - - Mocks can also be called with arbitrary keyword arguments. These will be - used to set attributes on the mock after it is created. - """ - - -# _check_spec_arg_typos takes kwargs from commands like patch and checks that -# they don't contain common misspellings of arguments related to autospeccing. -def _check_spec_arg_typos(kwargs_to_check): - typos = ("autospect", "auto_spec", "set_spec") - for typo in typos: - if typo in kwargs_to_check: - raise RuntimeError( - f"{typo!r} might be a typo; use unsafe=True if this is intended" - ) - - -class _patch(object): - - attribute_name = None - _active_patches = [] - - def __init__( - self, getter, attribute, new, spec, create, - spec_set, autospec, new_callable, kwargs, *, unsafe=False - ): - if new_callable is not None: - if new is not DEFAULT: - raise ValueError( - "Cannot use 'new' and 'new_callable' together" - ) - if autospec is not None: - raise ValueError( - "Cannot use 'autospec' and 'new_callable' together" - ) - if not unsafe: - _check_spec_arg_typos(kwargs) - if _is_instance_mock(spec): - raise InvalidSpecError( - f'Cannot spec attr {attribute!r} as the spec ' - f'has already been mocked out. [spec={spec!r}]') - if _is_instance_mock(spec_set): - raise InvalidSpecError( - f'Cannot spec attr {attribute!r} as the spec_set ' - f'target has already been mocked out. [spec_set={spec_set!r}]') - - self.getter = getter - self.attribute = attribute - self.new = new - self.new_callable = new_callable - self.spec = spec - self.create = create - self.has_local = False - self.spec_set = spec_set - self.autospec = autospec - self.kwargs = kwargs - self.additional_patchers = [] - self.is_started = False - - - def copy(self): - patcher = _patch( - self.getter, self.attribute, self.new, self.spec, - self.create, self.spec_set, - self.autospec, self.new_callable, self.kwargs - ) - patcher.attribute_name = self.attribute_name - patcher.additional_patchers = [ - p.copy() for p in self.additional_patchers - ] - return patcher - - - def __call__(self, func): - if isinstance(func, type): - return self.decorate_class(func) - if inspect.iscoroutinefunction(func): - return self.decorate_async_callable(func) - return self.decorate_callable(func) - - - def decorate_class(self, klass): - for attr in dir(klass): - if not attr.startswith(patch.TEST_PREFIX): - continue - - attr_value = getattr(klass, attr) - if not hasattr(attr_value, "__call__"): - continue - - patcher = self.copy() - setattr(klass, attr, patcher(attr_value)) - return klass - - - @contextlib.contextmanager - def decoration_helper(self, patched, args, keywargs): - extra_args = [] - with contextlib.ExitStack() as exit_stack: - for patching in patched.patchings: - arg = exit_stack.enter_context(patching) - if patching.attribute_name is not None: - keywargs.update(arg) - elif patching.new is DEFAULT: - extra_args.append(arg) - - args += tuple(extra_args) - yield (args, keywargs) - - - def decorate_callable(self, func): - # NB. Keep the method in sync with decorate_async_callable() - if hasattr(func, 'patchings'): - func.patchings.append(self) - return func - - @wraps(func) - def patched(*args, **keywargs): - with self.decoration_helper(patched, - args, - keywargs) as (newargs, newkeywargs): - return func(*newargs, **newkeywargs) - - patched.patchings = [self] - return patched - - - def decorate_async_callable(self, func): - # NB. Keep the method in sync with decorate_callable() - if hasattr(func, 'patchings'): - func.patchings.append(self) - return func - - @wraps(func) - async def patched(*args, **keywargs): - with self.decoration_helper(patched, - args, - keywargs) as (newargs, newkeywargs): - return await func(*newargs, **newkeywargs) - - patched.patchings = [self] - return patched - - - def get_original(self): - target = self.getter() - name = self.attribute - - original = DEFAULT - local = False - - try: - original = target.__dict__[name] - except (AttributeError, KeyError): - original = getattr(target, name, DEFAULT) - else: - local = True - - if name in _builtins and isinstance(target, ModuleType): - self.create = True - - if not self.create and original is DEFAULT: - raise AttributeError( - "%s does not have the attribute %r" % (target, name) - ) - return original, local - - - def __enter__(self): - """Perform the patch.""" - if self.is_started: - raise RuntimeError("Patch is already started") - - new, spec, spec_set = self.new, self.spec, self.spec_set - autospec, kwargs = self.autospec, self.kwargs - new_callable = self.new_callable - self.target = self.getter() - - # normalise False to None - if spec is False: - spec = None - if spec_set is False: - spec_set = None - if autospec is False: - autospec = None - - if spec is not None and autospec is not None: - raise TypeError("Can't specify spec and autospec") - if ((spec is not None or autospec is not None) and - spec_set not in (True, None)): - raise TypeError("Can't provide explicit spec_set *and* spec or autospec") - - original, local = self.get_original() - - if new is DEFAULT and autospec is None: - inherit = False - if spec is True: - # set spec to the object we are replacing - spec = original - if spec_set is True: - spec_set = original - spec = None - elif spec is not None: - if spec_set is True: - spec_set = spec - spec = None - elif spec_set is True: - spec_set = original - - if spec is not None or spec_set is not None: - if original is DEFAULT: - raise TypeError("Can't use 'spec' with create=True") - if isinstance(original, type): - # If we're patching out a class and there is a spec - inherit = True - - # Determine the Klass to use - if new_callable is not None: - Klass = new_callable - elif spec is None and _is_async_obj(original): - Klass = AsyncMock - elif spec is not None or spec_set is not None: - this_spec = spec - if spec_set is not None: - this_spec = spec_set - if _is_list(this_spec): - not_callable = '__call__' not in this_spec - else: - not_callable = not callable(this_spec) - if _is_async_obj(this_spec): - Klass = AsyncMock - elif not_callable: - Klass = NonCallableMagicMock - else: - Klass = MagicMock - else: - Klass = MagicMock - - _kwargs = {} - if spec is not None: - _kwargs['spec'] = spec - if spec_set is not None: - _kwargs['spec_set'] = spec_set - - # add a name to mocks - if (isinstance(Klass, type) and - issubclass(Klass, NonCallableMock) and self.attribute): - _kwargs['name'] = self.attribute - - _kwargs.update(kwargs) - new = Klass(**_kwargs) - - if inherit and _is_instance_mock(new): - # we can only tell if the instance should be callable if the - # spec is not a list - this_spec = spec - if spec_set is not None: - this_spec = spec_set - if (not _is_list(this_spec) and not - _instance_callable(this_spec)): - Klass = NonCallableMagicMock - - _kwargs.pop('name') - new.return_value = Klass(_new_parent=new, _new_name='()', - **_kwargs) - elif autospec is not None: - # spec is ignored, new *must* be default, spec_set is treated - # as a boolean. Should we check spec is not None and that spec_set - # is a bool? - if new is not DEFAULT: - raise TypeError( - "autospec creates the mock for you. Can't specify " - "autospec and new." - ) - if original is DEFAULT: - raise TypeError("Can't use 'autospec' with create=True") - spec_set = bool(spec_set) - if autospec is True: - autospec = original - - if _is_instance_mock(self.target): - raise InvalidSpecError( - f'Cannot autospec attr {self.attribute!r} as the patch ' - f'target has already been mocked out. ' - f'[target={self.target!r}, attr={autospec!r}]') - if _is_instance_mock(autospec): - target_name = getattr(self.target, '__name__', self.target) - raise InvalidSpecError( - f'Cannot autospec attr {self.attribute!r} from target ' - f'{target_name!r} as it has already been mocked out. ' - f'[target={self.target!r}, attr={autospec!r}]') - - new = create_autospec(autospec, spec_set=spec_set, - _name=self.attribute, **kwargs) - elif kwargs: - # can't set keyword args when we aren't creating the mock - # XXXX If new is a Mock we could call new.configure_mock(**kwargs) - raise TypeError("Can't pass kwargs to a mock we aren't creating") - - new_attr = new - - self.temp_original = original - self.is_local = local - self._exit_stack = contextlib.ExitStack() - self.is_started = True - try: - setattr(self.target, self.attribute, new_attr) - if self.attribute_name is not None: - extra_args = {} - if self.new is DEFAULT: - extra_args[self.attribute_name] = new - for patching in self.additional_patchers: - arg = self._exit_stack.enter_context(patching) - if patching.new is DEFAULT: - extra_args.update(arg) - return extra_args - - return new - except: - if not self.__exit__(*sys.exc_info()): - raise - - def __exit__(self, *exc_info): - """Undo the patch.""" - if not self.is_started: - return - - if self.is_local and self.temp_original is not DEFAULT: - setattr(self.target, self.attribute, self.temp_original) - else: - delattr(self.target, self.attribute) - if not self.create and (not hasattr(self.target, self.attribute) or - self.attribute in ('__doc__', '__module__', - '__defaults__', '__annotations__', - '__kwdefaults__')): - # needed for proxy objects like django settings - setattr(self.target, self.attribute, self.temp_original) - - del self.temp_original - del self.is_local - del self.target - exit_stack = self._exit_stack - del self._exit_stack - self.is_started = False - return exit_stack.__exit__(*exc_info) - - - def start(self): - """Activate a patch, returning any created mock.""" - result = self.__enter__() - self._active_patches.append(self) - return result - - - def stop(self): - """Stop an active patch.""" - try: - self._active_patches.remove(self) - except ValueError: - # If the patch hasn't been started this will fail - return None - - return self.__exit__(None, None, None) - - - -def _get_target(target): - try: - target, attribute = target.rsplit('.', 1) - except (TypeError, ValueError, AttributeError): - raise TypeError( - f"Need a valid target to patch. You supplied: {target!r}") - return partial(pkgutil.resolve_name, target), attribute - - -def _patch_object( - target, attribute, new=DEFAULT, spec=None, - create=False, spec_set=None, autospec=None, - new_callable=None, *, unsafe=False, **kwargs - ): - """ - patch the named member (`attribute`) on an object (`target`) with a mock - object. - - `patch.object` can be used as a decorator, class decorator or a context - manager. Arguments `new`, `spec`, `create`, `spec_set`, - `autospec` and `new_callable` have the same meaning as for `patch`. Like - `patch`, `patch.object` takes arbitrary keyword arguments for configuring - the mock object it creates. - - When used as a class decorator `patch.object` honours `patch.TEST_PREFIX` - for choosing which methods to wrap. - """ - if type(target) is str: - raise TypeError( - f"{target!r} must be the actual object to be patched, not a str" - ) - getter = lambda: target - return _patch( - getter, attribute, new, spec, create, - spec_set, autospec, new_callable, kwargs, unsafe=unsafe - ) - - -def _patch_multiple(target, spec=None, create=False, spec_set=None, - autospec=None, new_callable=None, **kwargs): - """Perform multiple patches in a single call. It takes the object to be - patched (either as an object or a string to fetch the object by importing) - and keyword arguments for the patches:: - - with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'): - ... - - Use `DEFAULT` as the value if you want `patch.multiple` to create - mocks for you. In this case the created mocks are passed into a decorated - function by keyword, and a dictionary is returned when `patch.multiple` is - used as a context manager. - - `patch.multiple` can be used as a decorator, class decorator or a context - manager. The arguments `spec`, `spec_set`, `create`, - `autospec` and `new_callable` have the same meaning as for `patch`. These - arguments will be applied to *all* patches done by `patch.multiple`. - - When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX` - for choosing which methods to wrap. - """ - if type(target) is str: - getter = partial(pkgutil.resolve_name, target) - else: - getter = lambda: target - - if not kwargs: - raise ValueError( - 'Must supply at least one keyword argument with patch.multiple' - ) - # need to wrap in a list for python 3, where items is a view - items = list(kwargs.items()) - attribute, new = items[0] - patcher = _patch( - getter, attribute, new, spec, create, spec_set, - autospec, new_callable, {} - ) - patcher.attribute_name = attribute - for attribute, new in items[1:]: - this_patcher = _patch( - getter, attribute, new, spec, create, spec_set, - autospec, new_callable, {} - ) - this_patcher.attribute_name = attribute - patcher.additional_patchers.append(this_patcher) - return patcher - - -def patch( - target, new=DEFAULT, spec=None, create=False, - spec_set=None, autospec=None, new_callable=None, *, unsafe=False, **kwargs - ): - """ - `patch` acts as a function decorator, class decorator or a context - manager. Inside the body of the function or with statement, the `target` - is patched with a `new` object. When the function/with statement exits - the patch is undone. - - If `new` is omitted, then the target is replaced with an - `AsyncMock if the patched object is an async function or a - `MagicMock` otherwise. If `patch` is used as a decorator and `new` is - omitted, the created mock is passed in as an extra argument to the - decorated function. If `patch` is used as a context manager the created - mock is returned by the context manager. - - `target` should be a string in the form `'package.module.ClassName'`. The - `target` is imported and the specified object replaced with the `new` - object, so the `target` must be importable from the environment you are - calling `patch` from. The target is imported when the decorated function - is executed, not at decoration time. - - The `spec` and `spec_set` keyword arguments are passed to the `MagicMock` - if patch is creating one for you. - - In addition you can pass `spec=True` or `spec_set=True`, which causes - patch to pass in the object being mocked as the spec/spec_set object. - - `new_callable` allows you to specify a different class, or callable object, - that will be called to create the `new` object. By default `AsyncMock` is - used for async functions and `MagicMock` for the rest. - - A more powerful form of `spec` is `autospec`. If you set `autospec=True` - then the mock will be created with a spec from the object being replaced. - All attributes of the mock will also have the spec of the corresponding - attribute of the object being replaced. Methods and functions being - mocked will have their arguments checked and will raise a `TypeError` if - they are called with the wrong signature. For mocks replacing a class, - their return value (the 'instance') will have the same spec as the class. - - Instead of `autospec=True` you can pass `autospec=some_object` to use an - arbitrary object as the spec instead of the one being replaced. - - By default `patch` will fail to replace attributes that don't exist. If - you pass in `create=True`, and the attribute doesn't exist, patch will - create the attribute for you when the patched function is called, and - delete it again afterwards. This is useful for writing tests against - attributes that your production code creates at runtime. It is off by - default because it can be dangerous. With it switched on you can write - passing tests against APIs that don't actually exist! - - Patch can be used as a `TestCase` class decorator. It works by - decorating each test method in the class. This reduces the boilerplate - code when your test methods share a common patchings set. `patch` finds - tests by looking for method names that start with `patch.TEST_PREFIX`. - By default this is `test`, which matches the way `unittest` finds tests. - You can specify an alternative prefix by setting `patch.TEST_PREFIX`. - - Patch can be used as a context manager, with the with statement. Here the - patching applies to the indented block after the with statement. If you - use "as" then the patched object will be bound to the name after the - "as"; very useful if `patch` is creating a mock object for you. - - Patch will raise a `RuntimeError` if passed some common misspellings of - the arguments autospec and spec_set. Pass the argument `unsafe` with the - value True to disable that check. - - `patch` takes arbitrary keyword arguments. These will be passed to - `AsyncMock` if the patched object is asynchronous, to `MagicMock` - otherwise or to `new_callable` if specified. - - `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are - available for alternate use-cases. - """ - getter, attribute = _get_target(target) - return _patch( - getter, attribute, new, spec, create, - spec_set, autospec, new_callable, kwargs, unsafe=unsafe - ) - - -class _patch_dict(object): - """ - Patch a dictionary, or dictionary like object, and restore the dictionary - to its original state after the test. - - `in_dict` can be a dictionary or a mapping like container. If it is a - mapping then it must at least support getting, setting and deleting items - plus iterating over keys. - - `in_dict` can also be a string specifying the name of the dictionary, which - will then be fetched by importing it. - - `values` can be a dictionary of values to set in the dictionary. `values` - can also be an iterable of `(key, value)` pairs. - - If `clear` is True then the dictionary will be cleared before the new - values are set. - - `patch.dict` can also be called with arbitrary keyword arguments to set - values in the dictionary:: - - with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()): - ... - - `patch.dict` can be used as a context manager, decorator or class - decorator. When used as a class decorator `patch.dict` honours - `patch.TEST_PREFIX` for choosing which methods to wrap. - """ - - def __init__(self, in_dict, values=(), clear=False, **kwargs): - self.in_dict = in_dict - # support any argument supported by dict(...) constructor - self.values = dict(values) - self.values.update(kwargs) - self.clear = clear - self._original = None - - - def __call__(self, f): - if isinstance(f, type): - return self.decorate_class(f) - if inspect.iscoroutinefunction(f): - return self.decorate_async_callable(f) - return self.decorate_callable(f) - - - def decorate_callable(self, f): - @wraps(f) - def _inner(*args, **kw): - self._patch_dict() - try: - return f(*args, **kw) - finally: - self._unpatch_dict() - - return _inner - - - def decorate_async_callable(self, f): - @wraps(f) - async def _inner(*args, **kw): - self._patch_dict() - try: - return await f(*args, **kw) - finally: - self._unpatch_dict() - - return _inner - - - def decorate_class(self, klass): - for attr in dir(klass): - attr_value = getattr(klass, attr) - if (attr.startswith(patch.TEST_PREFIX) and - hasattr(attr_value, "__call__")): - decorator = _patch_dict(self.in_dict, self.values, self.clear) - decorated = decorator(attr_value) - setattr(klass, attr, decorated) - return klass - - - def __enter__(self): - """Patch the dict.""" - self._patch_dict() - return self.in_dict - - - def _patch_dict(self): - values = self.values - if isinstance(self.in_dict, str): - self.in_dict = pkgutil.resolve_name(self.in_dict) - in_dict = self.in_dict - clear = self.clear - - try: - original = in_dict.copy() - except AttributeError: - # dict like object with no copy method - # must support iteration over keys - original = {} - for key in in_dict: - original[key] = in_dict[key] - self._original = original - - if clear: - _clear_dict(in_dict) - - try: - in_dict.update(values) - except AttributeError: - # dict like object with no update method - for key in values: - in_dict[key] = values[key] - - - def _unpatch_dict(self): - in_dict = self.in_dict - original = self._original - - _clear_dict(in_dict) - - try: - in_dict.update(original) - except AttributeError: - for key in original: - in_dict[key] = original[key] - - - def __exit__(self, *args): - """Unpatch the dict.""" - if self._original is not None: - self._unpatch_dict() - return False - - - def start(self): - """Activate a patch, returning any created mock.""" - result = self.__enter__() - _patch._active_patches.append(self) - return result - - - def stop(self): - """Stop an active patch.""" - try: - _patch._active_patches.remove(self) - except ValueError: - # If the patch hasn't been started this will fail - return None - - return self.__exit__(None, None, None) - - -def _clear_dict(in_dict): - try: - in_dict.clear() - except AttributeError: - keys = list(in_dict) - for key in keys: - del in_dict[key] - - -def _patch_stopall(): - """Stop all active patches. LIFO to unroll nested patches.""" - for patch in reversed(_patch._active_patches): - patch.stop() - - -patch.object = _patch_object -patch.dict = _patch_dict -patch.multiple = _patch_multiple -patch.stopall = _patch_stopall -patch.TEST_PREFIX = 'test' - -magic_methods = ( - "lt le gt ge eq ne " - "getitem setitem delitem " - "len contains iter " - "hash str sizeof " - "enter exit " - # we added divmod and rdivmod here instead of numerics - # because there is no idivmod - "divmod rdivmod neg pos abs invert " - "complex int float index " - "round trunc floor ceil " - "bool next " - "fspath " - "aiter " -) - -numerics = ( - "add sub mul matmul truediv floordiv mod lshift rshift and xor or pow" -) -inplace = ' '.join('i%s' % n for n in numerics.split()) -right = ' '.join('r%s' % n for n in numerics.split()) - -# not including __prepare__, __instancecheck__, __subclasscheck__ -# (as they are metaclass methods) -# __del__ is not supported at all as it causes problems if it exists - -_non_defaults = { - '__get__', '__set__', '__delete__', '__reversed__', '__missing__', - '__reduce__', '__reduce_ex__', '__getinitargs__', '__getnewargs__', - '__getstate__', '__setstate__', '__getformat__', - '__repr__', '__dir__', '__subclasses__', '__format__', - '__getnewargs_ex__', -} - - -def _get_method(name, func): - "Turns a callable object (like a mock) into a real function" - def method(self, /, *args, **kw): - return func(self, *args, **kw) - method.__name__ = name - return method - - -_magics = { - '__%s__' % method for method in - ' '.join([magic_methods, numerics, inplace, right]).split() -} - -# Magic methods used for async `with` statements -_async_method_magics = {"__aenter__", "__aexit__", "__anext__"} -# Magic methods that are only used with async calls but are synchronous functions themselves -_sync_async_magics = {"__aiter__"} -_async_magics = _async_method_magics | _sync_async_magics - -_all_sync_magics = _magics | _non_defaults -_all_magics = _all_sync_magics | _async_magics - -_unsupported_magics = { - '__getattr__', '__setattr__', - '__init__', '__new__', '__prepare__', - '__instancecheck__', '__subclasscheck__', - '__del__' -} - -_calculate_return_value = { - '__hash__': lambda self: object.__hash__(self), - '__str__': lambda self: object.__str__(self), - '__sizeof__': lambda self: object.__sizeof__(self), - '__fspath__': lambda self: f"{type(self).__name__}/{self._extract_mock_name()}/{id(self)}", -} - -_return_values = { - '__lt__': NotImplemented, - '__gt__': NotImplemented, - '__le__': NotImplemented, - '__ge__': NotImplemented, - '__int__': 1, - '__contains__': False, - '__len__': 0, - '__exit__': False, - '__complex__': 1j, - '__float__': 1.0, - '__bool__': True, - '__index__': 1, - '__aexit__': False, -} - - -def _get_eq(self): - def __eq__(other): - ret_val = self.__eq__._mock_return_value - if ret_val is not DEFAULT: - return ret_val - if self is other: - return True - return NotImplemented - return __eq__ - -def _get_ne(self): - def __ne__(other): - if self.__ne__._mock_return_value is not DEFAULT: - return DEFAULT - if self is other: - return False - return NotImplemented - return __ne__ - -def _get_iter(self): - def __iter__(): - ret_val = self.__iter__._mock_return_value - if ret_val is DEFAULT: - return iter([]) - # if ret_val was already an iterator, then calling iter on it should - # return the iterator unchanged - return iter(ret_val) - return __iter__ - -def _get_async_iter(self): - def __aiter__(): - ret_val = self.__aiter__._mock_return_value - if ret_val is DEFAULT: - return _AsyncIterator(iter([])) - return _AsyncIterator(iter(ret_val)) - return __aiter__ - -_side_effect_methods = { - '__eq__': _get_eq, - '__ne__': _get_ne, - '__iter__': _get_iter, - '__aiter__': _get_async_iter -} - - - -def _set_return_value(mock, method, name): - fixed = _return_values.get(name, DEFAULT) - if fixed is not DEFAULT: - method.return_value = fixed - return - - return_calculator = _calculate_return_value.get(name) - if return_calculator is not None: - return_value = return_calculator(mock) - method.return_value = return_value - return - - side_effector = _side_effect_methods.get(name) - if side_effector is not None: - method.side_effect = side_effector(mock) - - - -class MagicMixin(Base): - def __init__(self, /, *args, **kw): - self._mock_set_magics() # make magic work for kwargs in init - _safe_super(MagicMixin, self).__init__(*args, **kw) - self._mock_set_magics() # fix magic broken by upper level init - - - def _mock_set_magics(self): - orig_magics = _magics | _async_method_magics - these_magics = orig_magics - - if getattr(self, "_mock_methods", None) is not None: - these_magics = orig_magics.intersection(self._mock_methods) - - remove_magics = set() - remove_magics = orig_magics - these_magics - - for entry in remove_magics: - if entry in type(self).__dict__: - # remove unneeded magic methods - delattr(self, entry) - - # don't overwrite existing attributes if called a second time - these_magics = these_magics - set(type(self).__dict__) - - _type = type(self) - for entry in these_magics: - setattr(_type, entry, MagicProxy(entry, self)) - - - -class NonCallableMagicMock(MagicMixin, NonCallableMock): - """A version of `MagicMock` that isn't callable.""" - def mock_add_spec(self, spec, spec_set=False): - """Add a spec to a mock. `spec` can either be an object or a - list of strings. Only attributes on the `spec` can be fetched as - attributes from the mock. - - If `spec_set` is True then only attributes on the spec can be set.""" - self._mock_add_spec(spec, spec_set) - self._mock_set_magics() - - -class AsyncMagicMixin(MagicMixin): - pass - - -class MagicMock(MagicMixin, Mock): - """ - MagicMock is a subclass of Mock with default implementations - of most of the magic methods. You can use MagicMock without having to - configure the magic methods yourself. - - If you use the `spec` or `spec_set` arguments then *only* magic - methods that exist in the spec will be created. - - Attributes and the return value of a `MagicMock` will also be `MagicMocks`. - """ - def mock_add_spec(self, spec, spec_set=False): - """Add a spec to a mock. `spec` can either be an object or a - list of strings. Only attributes on the `spec` can be fetched as - attributes from the mock. - - If `spec_set` is True then only attributes on the spec can be set.""" - self._mock_add_spec(spec, spec_set) - self._mock_set_magics() - - def reset_mock(self, /, *args, return_value: bool = False, **kwargs): - if ( - return_value - and self._mock_name - and _is_magic(self._mock_name) - ): - # Don't reset return values for magic methods, - # otherwise `m.__str__` will start - # to return `MagicMock` instances, instead of `str` instances. - return_value = False - super().reset_mock(*args, return_value=return_value, **kwargs) - - -class MagicProxy(Base): - def __init__(self, name, parent): - self.name = name - self.parent = parent - - def create_mock(self): - entry = self.name - parent = self.parent - m = parent._get_child_mock(name=entry, _new_name=entry, - _new_parent=parent) - setattr(parent, entry, m) - _set_return_value(parent, m, entry) - return m - - def __get__(self, obj, _type=None): - return self.create_mock() - - -try: - _CODE_SIG = inspect.signature(partial(CodeType.__init__, None)) - _CODE_ATTRS = dir(CodeType) -except ValueError: - _CODE_SIG = None - - -class AsyncMockMixin(Base): - await_count = _delegating_property('await_count') - await_args = _delegating_property('await_args') - await_args_list = _delegating_property('await_args_list') - - def __init__(self, /, *args, **kwargs): - super().__init__(*args, **kwargs) - # iscoroutinefunction() checks _is_coroutine property to say if an - # object is a coroutine. Without this check it looks to see if it is a - # function/method, which in this case it is not (since it is an - # AsyncMock). - # It is set through __dict__ because when spec_set is True, this - # attribute is likely undefined. - self.__dict__['_is_coroutine'] = asyncio.coroutines._is_coroutine - self.__dict__['_mock_await_count'] = 0 - self.__dict__['_mock_await_args'] = None - self.__dict__['_mock_await_args_list'] = _CallList() - if _CODE_SIG: - code_mock = NonCallableMock(spec_set=_CODE_ATTRS) - code_mock.__dict__["_spec_class"] = CodeType - code_mock.__dict__["_spec_signature"] = _CODE_SIG - else: - code_mock = NonCallableMock(spec_set=CodeType) - code_mock.co_flags = ( - inspect.CO_COROUTINE - + inspect.CO_VARARGS - + inspect.CO_VARKEYWORDS - ) - code_mock.co_argcount = 0 - code_mock.co_varnames = ('args', 'kwargs') - code_mock.co_posonlyargcount = 0 - code_mock.co_kwonlyargcount = 0 - self.__dict__['__code__'] = code_mock - self.__dict__['__name__'] = 'AsyncMock' - self.__dict__['__defaults__'] = tuple() - self.__dict__['__kwdefaults__'] = {} - self.__dict__['__annotations__'] = None - - async def _execute_mock_call(self, /, *args, **kwargs): - # This is nearly just like super(), except for special handling - # of coroutines - - _call = _Call((args, kwargs), two=True) - self.await_count += 1 - self.await_args = _call - self.await_args_list.append(_call) - - effect = self.side_effect - if effect is not None: - if _is_exception(effect): - raise effect - elif not _callable(effect): - try: - result = next(effect) - except StopIteration: - # It is impossible to propagate a StopIteration - # through coroutines because of PEP 479 - raise StopAsyncIteration - if _is_exception(result): - raise result - elif iscoroutinefunction(effect): - result = await effect(*args, **kwargs) - else: - result = effect(*args, **kwargs) - - if result is not DEFAULT: - return result - - if self._mock_return_value is not DEFAULT: - return self.return_value - - if self._mock_wraps is not None: - if iscoroutinefunction(self._mock_wraps): - return await self._mock_wraps(*args, **kwargs) - return self._mock_wraps(*args, **kwargs) - - return self.return_value - - def assert_awaited(self): - """ - Assert that the mock was awaited at least once. - """ - if self.await_count == 0: - msg = f"Expected {self._mock_name or 'mock'} to have been awaited." - raise AssertionError(msg) - - def assert_awaited_once(self): - """ - Assert that the mock was awaited exactly once. - """ - if not self.await_count == 1: - msg = (f"Expected {self._mock_name or 'mock'} to have been awaited once." - f" Awaited {self.await_count} times.") - raise AssertionError(msg) - - def assert_awaited_with(self, /, *args, **kwargs): - """ - Assert that the last await was with the specified arguments. - """ - if self.await_args is None: - expected = self._format_mock_call_signature(args, kwargs) - raise AssertionError(f'Expected await: {expected}\nNot awaited') - - def _error_message(): - msg = self._format_mock_failure_message(args, kwargs, action='await') - return msg - - expected = self._call_matcher(_Call((args, kwargs), two=True)) - actual = self._call_matcher(self.await_args) - if actual != expected: - cause = expected if isinstance(expected, Exception) else None - raise AssertionError(_error_message()) from cause - - def assert_awaited_once_with(self, /, *args, **kwargs): - """ - Assert that the mock was awaited exactly once and with the specified - arguments. - """ - if not self.await_count == 1: - msg = (f"Expected {self._mock_name or 'mock'} to have been awaited once." - f" Awaited {self.await_count} times.") - raise AssertionError(msg) - return self.assert_awaited_with(*args, **kwargs) - - def assert_any_await(self, /, *args, **kwargs): - """ - Assert the mock has ever been awaited with the specified arguments. - """ - expected = self._call_matcher(_Call((args, kwargs), two=True)) - cause = expected if isinstance(expected, Exception) else None - actual = [self._call_matcher(c) for c in self.await_args_list] - if cause or expected not in _AnyComparer(actual): - expected_string = self._format_mock_call_signature(args, kwargs) - raise AssertionError( - '%s await not found' % expected_string - ) from cause - - def assert_has_awaits(self, calls, any_order=False): - """ - Assert the mock has been awaited with the specified calls. - The :attr:`await_args_list` list is checked for the awaits. - - If `any_order` is False (the default) then the awaits must be - sequential. There can be extra calls before or after the - specified awaits. - - If `any_order` is True then the awaits can be in any order, but - they must all appear in :attr:`await_args_list`. - """ - expected = [self._call_matcher(c) for c in calls] - cause = next((e for e in expected if isinstance(e, Exception)), None) - all_awaits = _CallList(self._call_matcher(c) for c in self.await_args_list) - if not any_order: - if expected not in all_awaits: - if cause is None: - problem = 'Awaits not found.' - else: - problem = ('Error processing expected awaits.\n' - 'Errors: {}').format( - [e if isinstance(e, Exception) else None - for e in expected]) - raise AssertionError( - f'{problem}\n' - f'Expected: {_CallList(calls)}\n' - f'Actual: {self.await_args_list}' - ) from cause - return - - all_awaits = list(all_awaits) - - not_found = [] - for kall in expected: - try: - all_awaits.remove(kall) - except ValueError: - not_found.append(kall) - if not_found: - raise AssertionError( - '%r not all found in await list' % (tuple(not_found),) - ) from cause - - def assert_not_awaited(self): - """ - Assert that the mock was never awaited. - """ - if self.await_count != 0: - msg = (f"Expected {self._mock_name or 'mock'} to not have been awaited." - f" Awaited {self.await_count} times.") - raise AssertionError(msg) - - def reset_mock(self, /, *args, **kwargs): - """ - See :func:`.Mock.reset_mock()` - """ - super().reset_mock(*args, **kwargs) - self.await_count = 0 - self.await_args = None - self.await_args_list = _CallList() - - -class AsyncMock(AsyncMockMixin, AsyncMagicMixin, Mock): - """ - Enhance :class:`Mock` with features allowing to mock - an async function. - - The :class:`AsyncMock` object will behave so the object is - recognized as an async function, and the result of a call is an awaitable: - - >>> mock = AsyncMock() - >>> iscoroutinefunction(mock) - True - >>> inspect.isawaitable(mock()) - True - - - The result of ``mock()`` is an async function which will have the outcome - of ``side_effect`` or ``return_value``: - - - if ``side_effect`` is a function, the async function will return the - result of that function, - - if ``side_effect`` is an exception, the async function will raise the - exception, - - if ``side_effect`` is an iterable, the async function will return the - next value of the iterable, however, if the sequence of result is - exhausted, ``StopIteration`` is raised immediately, - - if ``side_effect`` is not defined, the async function will return the - value defined by ``return_value``, hence, by default, the async function - returns a new :class:`AsyncMock` object. - - If the outcome of ``side_effect`` or ``return_value`` is an async function, - the mock async function obtained when the mock object is called will be this - async function itself (and not an async function returning an async - function). - - The test author can also specify a wrapped object with ``wraps``. In this - case, the :class:`Mock` object behavior is the same as with an - :class:`.Mock` object: the wrapped object may have methods - defined as async function functions. - - Based on Martin Richard's asynctest project. - """ - - -class _ANY(object): - "A helper object that compares equal to everything." - - def __eq__(self, other): - return True - - def __ne__(self, other): - return False - - def __repr__(self): - return '' - -ANY = _ANY() - - - -def _format_call_signature(name, args, kwargs): - message = '%s(%%s)' % name - formatted_args = '' - args_string = ', '.join([repr(arg) for arg in args]) - kwargs_string = ', '.join([ - '%s=%r' % (key, value) for key, value in kwargs.items() - ]) - if args_string: - formatted_args = args_string - if kwargs_string: - if formatted_args: - formatted_args += ', ' - formatted_args += kwargs_string - - return message % formatted_args - - - -class _Call(tuple): - """ - A tuple for holding the results of a call to a mock, either in the form - `(args, kwargs)` or `(name, args, kwargs)`. - - If args or kwargs are empty then a call tuple will compare equal to - a tuple without those values. This makes comparisons less verbose:: - - _Call(('name', (), {})) == ('name',) - _Call(('name', (1,), {})) == ('name', (1,)) - _Call(((), {'a': 'b'})) == ({'a': 'b'},) - - The `_Call` object provides a useful shortcut for comparing with call:: - - _Call(((1, 2), {'a': 3})) == call(1, 2, a=3) - _Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3) - - If the _Call has no name then it will match any name. - """ - def __new__(cls, value=(), name='', parent=None, two=False, - from_kall=True): - args = () - kwargs = {} - _len = len(value) - if _len == 3: - name, args, kwargs = value - elif _len == 2: - first, second = value - if isinstance(first, str): - name = first - if isinstance(second, tuple): - args = second - else: - kwargs = second - else: - args, kwargs = first, second - elif _len == 1: - value, = value - if isinstance(value, str): - name = value - elif isinstance(value, tuple): - args = value - else: - kwargs = value - - if two: - return tuple.__new__(cls, (args, kwargs)) - - return tuple.__new__(cls, (name, args, kwargs)) - - - def __init__(self, value=(), name=None, parent=None, two=False, - from_kall=True): - self._mock_name = name - self._mock_parent = parent - self._mock_from_kall = from_kall - - - def __eq__(self, other): - try: - len_other = len(other) - except TypeError: - return NotImplemented - - self_name = '' - if len(self) == 2: - self_args, self_kwargs = self - else: - self_name, self_args, self_kwargs = self - - if (getattr(self, '_mock_parent', None) and getattr(other, '_mock_parent', None) - and self._mock_parent != other._mock_parent): - return False - - other_name = '' - if len_other == 0: - other_args, other_kwargs = (), {} - elif len_other == 3: - other_name, other_args, other_kwargs = other - elif len_other == 1: - value, = other - if isinstance(value, tuple): - other_args = value - other_kwargs = {} - elif isinstance(value, str): - other_name = value - other_args, other_kwargs = (), {} - else: - other_args = () - other_kwargs = value - elif len_other == 2: - # could be (name, args) or (name, kwargs) or (args, kwargs) - first, second = other - if isinstance(first, str): - other_name = first - if isinstance(second, tuple): - other_args, other_kwargs = second, {} - else: - other_args, other_kwargs = (), second - else: - other_args, other_kwargs = first, second - else: - return False - - if self_name and other_name != self_name: - return False - - # this order is important for ANY to work! - return (other_args, other_kwargs) == (self_args, self_kwargs) - - - __ne__ = object.__ne__ - - - def __call__(self, /, *args, **kwargs): - if self._mock_name is None: - return _Call(('', args, kwargs), name='()') - - name = self._mock_name + '()' - return _Call((self._mock_name, args, kwargs), name=name, parent=self) - - - def __getattr__(self, attr): - if self._mock_name is None: - return _Call(name=attr, from_kall=False) - name = '%s.%s' % (self._mock_name, attr) - return _Call(name=name, parent=self, from_kall=False) - - - def __getattribute__(self, attr): - if attr in tuple.__dict__: - raise AttributeError - return tuple.__getattribute__(self, attr) - - - def _get_call_arguments(self): - if len(self) == 2: - args, kwargs = self - else: - name, args, kwargs = self - - return args, kwargs - - @property - def args(self): - return self._get_call_arguments()[0] - - @property - def kwargs(self): - return self._get_call_arguments()[1] - - def __repr__(self): - if not self._mock_from_kall: - name = self._mock_name or 'call' - if name.startswith('()'): - name = 'call%s' % name - return name - - if len(self) == 2: - name = 'call' - args, kwargs = self - else: - name, args, kwargs = self - if not name: - name = 'call' - elif not name.startswith('()'): - name = 'call.%s' % name - else: - name = 'call%s' % name - return _format_call_signature(name, args, kwargs) - - - def call_list(self): - """For a call object that represents multiple calls, `call_list` - returns a list of all the intermediate calls as well as the - final call.""" - vals = [] - thing = self - while thing is not None: - if thing._mock_from_kall: - vals.append(thing) - thing = thing._mock_parent - return _CallList(reversed(vals)) - - -call = _Call(from_kall=False) - - -def create_autospec(spec, spec_set=False, instance=False, _parent=None, - _name=None, *, unsafe=False, **kwargs): - """Create a mock object using another object as a spec. Attributes on the - mock will use the corresponding attribute on the `spec` object as their - spec. - - Functions or methods being mocked will have their arguments checked - to check that they are called with the correct signature. - - If `spec_set` is True then attempting to set attributes that don't exist - on the spec object will raise an `AttributeError`. - - If a class is used as a spec then the return value of the mock (the - instance of the class) will have the same spec. You can use a class as the - spec for an instance object by passing `instance=True`. The returned mock - will only be callable if instances of the mock are callable. - - `create_autospec` will raise a `RuntimeError` if passed some common - misspellings of the arguments autospec and spec_set. Pass the argument - `unsafe` with the value True to disable that check. - - `create_autospec` also takes arbitrary keyword arguments that are passed to - the constructor of the created mock.""" - if _is_list(spec): - # can't pass a list instance to the mock constructor as it will be - # interpreted as a list of strings - spec = type(spec) - - is_type = isinstance(spec, type) - if _is_instance_mock(spec): - raise InvalidSpecError(f'Cannot autospec a Mock object. ' - f'[object={spec!r}]') - is_async_func = _is_async_func(spec) - _kwargs = {'spec': spec} - if spec_set: - _kwargs = {'spec_set': spec} - elif spec is None: - # None we mock with a normal mock without a spec - _kwargs = {} - if _kwargs and instance: - _kwargs['_spec_as_instance'] = True - if not unsafe: - _check_spec_arg_typos(kwargs) - - _name = kwargs.pop('name', _name) - _new_name = _name - if _parent is None: - # for a top level object no _new_name should be set - _new_name = '' - - _kwargs.update(kwargs) - - Klass = MagicMock - if inspect.isdatadescriptor(spec): - # descriptors don't have a spec - # because we don't know what type they return - _kwargs = {} - elif is_async_func: - if instance: - raise RuntimeError("Instance can not be True when create_autospec " - "is mocking an async function") - Klass = AsyncMock - elif not _callable(spec): - Klass = NonCallableMagicMock - elif is_type and instance and not _instance_callable(spec): - Klass = NonCallableMagicMock - - mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name, - name=_name, **_kwargs) - - if isinstance(spec, FunctionTypes): - # should only happen at the top level because we don't - # recurse for functions - if is_async_func: - mock = _set_async_signature(mock, spec) - else: - mock = _set_signature(mock, spec) - else: - _check_signature(spec, mock, is_type, instance) - - if _parent is not None and not instance: - _parent._mock_children[_name] = mock - - # Pop wraps from kwargs because it must not be passed to configure_mock. - wrapped = kwargs.pop('wraps', None) - if is_type and not instance and 'return_value' not in kwargs: - mock.return_value = create_autospec(spec, spec_set, instance=True, - _name='()', _parent=mock, - wraps=wrapped) - - for entry in dir(spec): - if _is_magic(entry): - # MagicMock already does the useful magic methods for us - continue - - # XXXX do we need a better way of getting attributes without - # triggering code execution (?) Probably not - we need the actual - # object to mock it so we would rather trigger a property than mock - # the property descriptor. Likewise we want to mock out dynamically - # provided attributes. - # XXXX what about attributes that raise exceptions other than - # AttributeError on being fetched? - # we could be resilient against it, or catch and propagate the - # exception when the attribute is fetched from the mock - try: - original = getattr(spec, entry) - except AttributeError: - continue - - child_kwargs = {'spec': original} - # Wrap child attributes also. - if wrapped and hasattr(wrapped, entry): - child_kwargs.update(wraps=original) - if spec_set: - child_kwargs = {'spec_set': original} - - if not isinstance(original, FunctionTypes): - new = _SpecState(original, spec_set, mock, entry, instance) - mock._mock_children[entry] = new - else: - parent = mock - if isinstance(spec, FunctionTypes): - parent = mock.mock - - skipfirst = _must_skip(spec, entry, is_type) - child_kwargs['_eat_self'] = skipfirst - if iscoroutinefunction(original): - child_klass = AsyncMock - else: - child_klass = MagicMock - new = child_klass(parent=parent, name=entry, _new_name=entry, - _new_parent=parent, **child_kwargs) - mock._mock_children[entry] = new - new.return_value = child_klass() - _check_signature(original, new, skipfirst=skipfirst) - - # so functions created with _set_signature become instance attributes, - # *plus* their underlying mock exists in _mock_children of the parent - # mock. Adding to _mock_children may be unnecessary where we are also - # setting as an instance attribute? - if isinstance(new, FunctionTypes): - setattr(mock, entry, new) - # kwargs are passed with respect to the parent mock so, they are not used - # for creating return_value of the parent mock. So, this condition - # should be true only for the parent mock if kwargs are given. - if _is_instance_mock(mock) and kwargs: - mock.configure_mock(**kwargs) - - return mock - - -def _must_skip(spec, entry, is_type): - """ - Return whether we should skip the first argument on spec's `entry` - attribute. - """ - if not isinstance(spec, type): - if entry in getattr(spec, '__dict__', {}): - # instance attribute - shouldn't skip - return False - spec = spec.__class__ - - for klass in spec.__mro__: - result = klass.__dict__.get(entry, DEFAULT) - if result is DEFAULT: - continue - if isinstance(result, (staticmethod, classmethod)): - return False - elif isinstance(result, FunctionTypes): - # Normal method => skip if looked up on type - # (if looked up on instance, self is already skipped) - return is_type - else: - return False - - # function is a dynamically provided attribute - return is_type - - -class _SpecState(object): - - def __init__(self, spec, spec_set=False, parent=None, - name=None, ids=None, instance=False): - self.spec = spec - self.ids = ids - self.spec_set = spec_set - self.parent = parent - self.instance = instance - self.name = name - - -FunctionTypes = ( - # python function - type(create_autospec), - # instance method - type(ANY.__eq__), -) - - -file_spec = None -open_spec = None - - -def _to_stream(read_data): - if isinstance(read_data, bytes): - return io.BytesIO(read_data) - else: - return io.StringIO(read_data) - - -def mock_open(mock=None, read_data=''): - """ - A helper function to create a mock to replace the use of `open`. It works - for `open` called directly or used as a context manager. - - The `mock` argument is the mock object to configure. If `None` (the - default) then a `MagicMock` will be created for you, with the API limited - to methods or attributes available on standard file handles. - - `read_data` is a string for the `read`, `readline` and `readlines` of the - file handle to return. This is an empty string by default. - """ - _read_data = _to_stream(read_data) - _state = [_read_data, None] - - def _readlines_side_effect(*args, **kwargs): - if handle.readlines.return_value is not None: - return handle.readlines.return_value - return _state[0].readlines(*args, **kwargs) - - def _read_side_effect(*args, **kwargs): - if handle.read.return_value is not None: - return handle.read.return_value - return _state[0].read(*args, **kwargs) - - def _readline_side_effect(*args, **kwargs): - yield from _iter_side_effect() - while True: - yield _state[0].readline(*args, **kwargs) - - def _iter_side_effect(): - if handle.readline.return_value is not None: - while True: - yield handle.readline.return_value - for line in _state[0]: - yield line - - def _next_side_effect(): - if handle.readline.return_value is not None: - return handle.readline.return_value - return next(_state[0]) - - def _exit_side_effect(exctype, excinst, exctb): - handle.close() - - global file_spec - if file_spec is None: - import _io - file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) - - global open_spec - if open_spec is None: - import _io - open_spec = list(set(dir(_io.open))) - if mock is None: - mock = MagicMock(name='open', spec=open_spec) - - handle = MagicMock(spec=file_spec) - handle.__enter__.return_value = handle - - handle.write.return_value = None - handle.read.return_value = None - handle.readline.return_value = None - handle.readlines.return_value = None - - handle.read.side_effect = _read_side_effect - _state[1] = _readline_side_effect() - handle.readline.side_effect = _state[1] - handle.readlines.side_effect = _readlines_side_effect - handle.__iter__.side_effect = _iter_side_effect - handle.__next__.side_effect = _next_side_effect - handle.__exit__.side_effect = _exit_side_effect - - def reset_data(*args, **kwargs): - _state[0] = _to_stream(read_data) - if handle.readline.side_effect == _state[1]: - # Only reset the side effect if the user hasn't overridden it. - _state[1] = _readline_side_effect() - handle.readline.side_effect = _state[1] - return DEFAULT - - mock.side_effect = reset_data - mock.return_value = handle - return mock - - -class PropertyMock(Mock): - """ - A mock intended to be used as a property, or other descriptor, on a class. - `PropertyMock` provides `__get__` and `__set__` methods so you can specify - a return value when it is fetched. - - Fetching a `PropertyMock` instance from an object calls the mock, with - no args. Setting it calls the mock with the value being set. - """ - def _get_child_mock(self, /, **kwargs): - return MagicMock(**kwargs) - - def __get__(self, obj, obj_type=None): - return self() - def __set__(self, obj, val): - self(val) - - -_timeout_unset = sentinel.TIMEOUT_UNSET - -class ThreadingMixin(Base): - - DEFAULT_TIMEOUT = None - - def _get_child_mock(self, /, **kw): - if isinstance(kw.get("parent"), ThreadingMixin): - kw["timeout"] = kw["parent"]._mock_wait_timeout - elif isinstance(kw.get("_new_parent"), ThreadingMixin): - kw["timeout"] = kw["_new_parent"]._mock_wait_timeout - return super()._get_child_mock(**kw) - - def __init__(self, *args, timeout=_timeout_unset, **kwargs): - super().__init__(*args, **kwargs) - if timeout is _timeout_unset: - timeout = self.DEFAULT_TIMEOUT - self.__dict__["_mock_event"] = threading.Event() # Event for any call - self.__dict__["_mock_calls_events"] = [] # Events for each of the calls - self.__dict__["_mock_calls_events_lock"] = threading.Lock() - self.__dict__["_mock_wait_timeout"] = timeout - - def reset_mock(self, /, *args, **kwargs): - """ - See :func:`.Mock.reset_mock()` - """ - super().reset_mock(*args, **kwargs) - self.__dict__["_mock_event"] = threading.Event() - self.__dict__["_mock_calls_events"] = [] - - def __get_event(self, expected_args, expected_kwargs): - with self._mock_calls_events_lock: - for args, kwargs, event in self._mock_calls_events: - if (args, kwargs) == (expected_args, expected_kwargs): - return event - new_event = threading.Event() - self._mock_calls_events.append((expected_args, expected_kwargs, new_event)) - return new_event - - def _mock_call(self, *args, **kwargs): - ret_value = super()._mock_call(*args, **kwargs) - - call_event = self.__get_event(args, kwargs) - call_event.set() - - self._mock_event.set() - - return ret_value - - def wait_until_called(self, *, timeout=_timeout_unset): - """Wait until the mock object is called. - - `timeout` - time to wait for in seconds, waits forever otherwise. - Defaults to the constructor provided timeout. - Use None to block undefinetively. - """ - if timeout is _timeout_unset: - timeout = self._mock_wait_timeout - if not self._mock_event.wait(timeout=timeout): - msg = (f"{self._mock_name or 'mock'} was not called before" - f" timeout({timeout}).") - raise AssertionError(msg) - - def wait_until_any_call_with(self, *args, **kwargs): - """Wait until the mock object is called with given args. - - Waits for the timeout in seconds provided in the constructor. - """ - event = self.__get_event(args, kwargs) - if not event.wait(timeout=self._mock_wait_timeout): - expected_string = self._format_mock_call_signature(args, kwargs) - raise AssertionError(f'{expected_string} call not found') - - -class ThreadingMock(ThreadingMixin, MagicMixin, Mock): - """ - A mock that can be used to wait until on calls happening - in a different thread. - - The constructor can take a `timeout` argument which - controls the timeout in seconds for all `wait` calls of the mock. - - You can change the default timeout of all instances via the - `ThreadingMock.DEFAULT_TIMEOUT` attribute. - - If no timeout is set, it will block undefinetively. - """ - pass - - -def seal(mock): - """Disable the automatic generation of child mocks. - - Given an input Mock, seals it to ensure no further mocks will be generated - when accessing an attribute that was not already defined. - - The operation recursively seals the mock passed in, meaning that - the mock itself, any mocks generated by accessing one of its attributes, - and all assigned mocks without a name or spec will be sealed. - """ - mock._mock_sealed = True - for attr in dir(mock): - try: - m = getattr(mock, attr) - except AttributeError: - continue - if not isinstance(m, NonCallableMock): - continue - if isinstance(m._mock_children.get(attr), _SpecState): - continue - if m._mock_new_parent is mock: - seal(m) - - -class _AsyncIterator: - """ - Wraps an iterator in an asynchronous iterator. - """ - def __init__(self, iterator): - self.iterator = iterator - code_mock = NonCallableMock(spec_set=CodeType) - code_mock.co_flags = inspect.CO_ITERABLE_COROUTINE - self.__dict__['__code__'] = code_mock - - async def __anext__(self): - try: - return next(self.iterator) - except StopIteration: - pass - raise StopAsyncIteration diff --git a/Python313_13_x86_Template/Lib/unittest/result.py b/Python313_13_x86_Template/Lib/unittest/result.py deleted file mode 100644 index 3ace0a5b..00000000 --- a/Python313_13_x86_Template/Lib/unittest/result.py +++ /dev/null @@ -1,256 +0,0 @@ -"""Test result object""" - -import io -import sys -import traceback - -from . import util -from functools import wraps - -__unittest = True - -def failfast(method): - @wraps(method) - def inner(self, *args, **kw): - if getattr(self, 'failfast', False): - self.stop() - return method(self, *args, **kw) - return inner - -STDOUT_LINE = '\nStdout:\n%s' -STDERR_LINE = '\nStderr:\n%s' - - -class TestResult(object): - """Holder for test result information. - - Test results are automatically managed by the TestCase and TestSuite - classes, and do not need to be explicitly manipulated by writers of tests. - - Each instance holds the total number of tests run, and collections of - failures and errors that occurred among those test runs. The collections - contain tuples of (testcase, exceptioninfo), where exceptioninfo is the - formatted traceback of the error that occurred. - """ - _previousTestClass = None - _testRunEntered = False - _moduleSetUpFailed = False - def __init__(self, stream=None, descriptions=None, verbosity=None): - self.failfast = False - self.failures = [] - self.errors = [] - self.testsRun = 0 - self.skipped = [] - self.expectedFailures = [] - self.unexpectedSuccesses = [] - self.collectedDurations = [] - self.shouldStop = False - self.buffer = False - self.tb_locals = False - self._stdout_buffer = None - self._stderr_buffer = None - self._original_stdout = sys.stdout - self._original_stderr = sys.stderr - self._mirrorOutput = False - - def printErrors(self): - "Called by TestRunner after test run" - - def startTest(self, test): - "Called when the given test is about to be run" - self.testsRun += 1 - self._mirrorOutput = False - self._setupStdout() - - def _setupStdout(self): - if self.buffer: - if self._stderr_buffer is None: - self._stderr_buffer = io.StringIO() - self._stdout_buffer = io.StringIO() - sys.stdout = self._stdout_buffer - sys.stderr = self._stderr_buffer - - def startTestRun(self): - """Called once before any tests are executed. - - See startTest for a method called before each test. - """ - - def stopTest(self, test): - """Called when the given test has been run""" - self._restoreStdout() - self._mirrorOutput = False - - def _restoreStdout(self): - if self.buffer: - if self._mirrorOutput: - output = sys.stdout.getvalue() - error = sys.stderr.getvalue() - if output: - if not output.endswith('\n'): - output += '\n' - self._original_stdout.write(STDOUT_LINE % output) - if error: - if not error.endswith('\n'): - error += '\n' - self._original_stderr.write(STDERR_LINE % error) - - sys.stdout = self._original_stdout - sys.stderr = self._original_stderr - self._stdout_buffer.seek(0) - self._stdout_buffer.truncate() - self._stderr_buffer.seek(0) - self._stderr_buffer.truncate() - - def stopTestRun(self): - """Called once after all tests are executed. - - See stopTest for a method called after each test. - """ - - @failfast - def addError(self, test, err): - """Called when an error has occurred. 'err' is a tuple of values as - returned by sys.exc_info(). - """ - self.errors.append((test, self._exc_info_to_string(err, test))) - self._mirrorOutput = True - - @failfast - def addFailure(self, test, err): - """Called when an error has occurred. 'err' is a tuple of values as - returned by sys.exc_info().""" - self.failures.append((test, self._exc_info_to_string(err, test))) - self._mirrorOutput = True - - def addSubTest(self, test, subtest, err): - """Called at the end of a subtest. - 'err' is None if the subtest ended successfully, otherwise it's a - tuple of values as returned by sys.exc_info(). - """ - # By default, we don't do anything with successful subtests, but - # more sophisticated test results might want to record them. - if err is not None: - if getattr(self, 'failfast', False): - self.stop() - if issubclass(err[0], test.failureException): - errors = self.failures - else: - errors = self.errors - errors.append((subtest, self._exc_info_to_string(err, test))) - self._mirrorOutput = True - - def addSuccess(self, test): - "Called when a test has completed successfully" - pass - - def addSkip(self, test, reason): - """Called when a test is skipped.""" - self.skipped.append((test, reason)) - - def addExpectedFailure(self, test, err): - """Called when an expected failure/error occurred.""" - self.expectedFailures.append( - (test, self._exc_info_to_string(err, test))) - - @failfast - def addUnexpectedSuccess(self, test): - """Called when a test was expected to fail, but succeed.""" - self.unexpectedSuccesses.append(test) - - def addDuration(self, test, elapsed): - """Called when a test finished to run, regardless of its outcome. - *test* is the test case corresponding to the test method. - *elapsed* is the time represented in seconds, and it includes the - execution of cleanup functions. - """ - # support for a TextTestRunner using an old TestResult class - if hasattr(self, "collectedDurations"): - # Pass test repr and not the test object itself to avoid resources leak - self.collectedDurations.append((str(test), elapsed)) - - def wasSuccessful(self): - """Tells whether or not this result was a success.""" - # The hasattr check is for test_result's OldResult test. That - # way this method works on objects that lack the attribute. - # (where would such result instances come from? old stored pickles?) - return ((len(self.failures) == len(self.errors) == 0) and - (not hasattr(self, 'unexpectedSuccesses') or - len(self.unexpectedSuccesses) == 0)) - - def stop(self): - """Indicates that the tests should be aborted.""" - self.shouldStop = True - - def _exc_info_to_string(self, err, test): - """Converts a sys.exc_info()-style tuple of values into a string.""" - exctype, value, tb = err - tb = self._clean_tracebacks(exctype, value, tb, test) - tb_e = traceback.TracebackException( - exctype, value, tb, - capture_locals=self.tb_locals, compact=True) - msgLines = list(tb_e.format()) - - if self.buffer: - output = sys.stdout.getvalue() - error = sys.stderr.getvalue() - if output: - if not output.endswith('\n'): - output += '\n' - msgLines.append(STDOUT_LINE % output) - if error: - if not error.endswith('\n'): - error += '\n' - msgLines.append(STDERR_LINE % error) - return ''.join(msgLines) - - def _clean_tracebacks(self, exctype, value, tb, test): - ret = None - first = True - excs = [(exctype, value, tb)] - seen = {id(value)} # Detect loops in chained exceptions. - while excs: - (exctype, value, tb) = excs.pop() - # Skip test runner traceback levels - while tb and self._is_relevant_tb_level(tb): - tb = tb.tb_next - - # Skip assert*() traceback levels - if exctype is test.failureException: - self._remove_unittest_tb_frames(tb) - - if first: - ret = tb - first = False - else: - value.__traceback__ = tb - - if value is not None: - for c in (value.__cause__, value.__context__): - if c is not None and id(c) not in seen: - excs.append((type(c), c, c.__traceback__)) - seen.add(id(c)) - return ret - - def _is_relevant_tb_level(self, tb): - return '__unittest' in tb.tb_frame.f_globals - - def _remove_unittest_tb_frames(self, tb): - '''Truncates usercode tb at the first unittest frame. - - If the first frame of the traceback is in user code, - the prefix up to the first unittest frame is returned. - If the first frame is already in the unittest module, - the traceback is not modified. - ''' - prev = None - while tb and not self._is_relevant_tb_level(tb): - prev = tb - tb = tb.tb_next - if prev is not None: - prev.tb_next = None - - def __repr__(self): - return ("<%s run=%i errors=%i failures=%i>" % - (util.strclass(self.__class__), self.testsRun, len(self.errors), - len(self.failures))) diff --git a/Python313_13_x86_Template/Lib/unittest/runner.py b/Python313_13_x86_Template/Lib/unittest/runner.py deleted file mode 100644 index 2bcadf0c..00000000 --- a/Python313_13_x86_Template/Lib/unittest/runner.py +++ /dev/null @@ -1,292 +0,0 @@ -"""Running tests""" - -import sys -import time -import warnings - -from . import result -from .case import _SubTest -from .signals import registerResult - -__unittest = True - - -class _WritelnDecorator(object): - """Used to decorate file-like objects with a handy 'writeln' method""" - def __init__(self,stream): - self.stream = stream - - def __getattr__(self, attr): - if attr in ('stream', '__getstate__'): - raise AttributeError(attr) - return getattr(self.stream,attr) - - def writeln(self, arg=None): - if arg: - self.write(arg) - self.write('\n') # text-mode streams translate to \r\n if needed - - -class TextTestResult(result.TestResult): - """A test result class that can print formatted text results to a stream. - - Used by TextTestRunner. - """ - separator1 = '=' * 70 - separator2 = '-' * 70 - - def __init__(self, stream, descriptions, verbosity, *, durations=None): - """Construct a TextTestResult. Subclasses should accept **kwargs - to ensure compatibility as the interface changes.""" - super(TextTestResult, self).__init__(stream, descriptions, verbosity) - self.stream = stream - self.showAll = verbosity > 1 - self.dots = verbosity == 1 - self.descriptions = descriptions - self._newline = True - self.durations = durations - - def getDescription(self, test): - doc_first_line = test.shortDescription() - if self.descriptions and doc_first_line: - return '\n'.join((str(test), doc_first_line)) - else: - return str(test) - - def startTest(self, test): - super(TextTestResult, self).startTest(test) - if self.showAll: - self.stream.write(self.getDescription(test)) - self.stream.write(" ... ") - self.stream.flush() - self._newline = False - - def _write_status(self, test, status): - is_subtest = isinstance(test, _SubTest) - if is_subtest or self._newline: - if not self._newline: - self.stream.writeln() - if is_subtest: - self.stream.write(" ") - self.stream.write(self.getDescription(test)) - self.stream.write(" ... ") - self.stream.writeln(status) - self.stream.flush() - self._newline = True - - def addSubTest(self, test, subtest, err): - if err is not None: - if self.showAll: - if issubclass(err[0], subtest.failureException): - self._write_status(subtest, "FAIL") - else: - self._write_status(subtest, "ERROR") - elif self.dots: - if issubclass(err[0], subtest.failureException): - self.stream.write('F') - else: - self.stream.write('E') - self.stream.flush() - super(TextTestResult, self).addSubTest(test, subtest, err) - - def addSuccess(self, test): - super(TextTestResult, self).addSuccess(test) - if self.showAll: - self._write_status(test, "ok") - elif self.dots: - self.stream.write('.') - self.stream.flush() - - def addError(self, test, err): - super(TextTestResult, self).addError(test, err) - if self.showAll: - self._write_status(test, "ERROR") - elif self.dots: - self.stream.write('E') - self.stream.flush() - - def addFailure(self, test, err): - super(TextTestResult, self).addFailure(test, err) - if self.showAll: - self._write_status(test, "FAIL") - elif self.dots: - self.stream.write('F') - self.stream.flush() - - def addSkip(self, test, reason): - super(TextTestResult, self).addSkip(test, reason) - if self.showAll: - self._write_status(test, "skipped {0!r}".format(reason)) - elif self.dots: - self.stream.write("s") - self.stream.flush() - - def addExpectedFailure(self, test, err): - super(TextTestResult, self).addExpectedFailure(test, err) - if self.showAll: - self.stream.writeln("expected failure") - self.stream.flush() - elif self.dots: - self.stream.write("x") - self.stream.flush() - - def addUnexpectedSuccess(self, test): - super(TextTestResult, self).addUnexpectedSuccess(test) - if self.showAll: - self.stream.writeln("unexpected success") - self.stream.flush() - elif self.dots: - self.stream.write("u") - self.stream.flush() - - def printErrors(self): - if self.dots or self.showAll: - self.stream.writeln() - self.stream.flush() - self.printErrorList('ERROR', self.errors) - self.printErrorList('FAIL', self.failures) - unexpectedSuccesses = getattr(self, 'unexpectedSuccesses', ()) - if unexpectedSuccesses: - self.stream.writeln(self.separator1) - for test in unexpectedSuccesses: - self.stream.writeln(f"UNEXPECTED SUCCESS: {self.getDescription(test)}") - self.stream.flush() - - def printErrorList(self, flavour, errors): - for test, err in errors: - self.stream.writeln(self.separator1) - self.stream.writeln("%s: %s" % (flavour,self.getDescription(test))) - self.stream.writeln(self.separator2) - self.stream.writeln("%s" % err) - self.stream.flush() - - -class TextTestRunner(object): - """A test runner class that displays results in textual form. - - It prints out the names of tests as they are run, errors as they - occur, and a summary of the results at the end of the test run. - """ - resultclass = TextTestResult - - def __init__(self, stream=None, descriptions=True, verbosity=1, - failfast=False, buffer=False, resultclass=None, warnings=None, - *, tb_locals=False, durations=None): - """Construct a TextTestRunner. - - Subclasses should accept **kwargs to ensure compatibility as the - interface changes. - """ - if stream is None: - stream = sys.stderr - self.stream = _WritelnDecorator(stream) - self.descriptions = descriptions - self.verbosity = verbosity - self.failfast = failfast - self.buffer = buffer - self.tb_locals = tb_locals - self.durations = durations - self.warnings = warnings - if resultclass is not None: - self.resultclass = resultclass - - def _makeResult(self): - try: - return self.resultclass(self.stream, self.descriptions, - self.verbosity, durations=self.durations) - except TypeError: - # didn't accept the durations argument - return self.resultclass(self.stream, self.descriptions, - self.verbosity) - - def _printDurations(self, result): - if not result.collectedDurations: - return - ls = sorted(result.collectedDurations, key=lambda x: x[1], - reverse=True) - if self.durations > 0: - ls = ls[:self.durations] - self.stream.writeln("Slowest test durations") - if hasattr(result, 'separator2'): - self.stream.writeln(result.separator2) - hidden = False - for test, elapsed in ls: - if self.verbosity < 2 and elapsed < 0.001: - hidden = True - continue - self.stream.writeln("%-10s %s" % ("%.3fs" % elapsed, test)) - if hidden: - self.stream.writeln("\n(durations < 0.001s were hidden; " - "use -v to show these durations)") - else: - self.stream.writeln("") - - def run(self, test): - "Run the given test case or test suite." - result = self._makeResult() - registerResult(result) - result.failfast = self.failfast - result.buffer = self.buffer - result.tb_locals = self.tb_locals - with warnings.catch_warnings(): - if self.warnings: - # if self.warnings is set, use it to filter all the warnings - warnings.simplefilter(self.warnings) - startTime = time.perf_counter() - startTestRun = getattr(result, 'startTestRun', None) - if startTestRun is not None: - startTestRun() - try: - test(result) - finally: - stopTestRun = getattr(result, 'stopTestRun', None) - if stopTestRun is not None: - stopTestRun() - stopTime = time.perf_counter() - timeTaken = stopTime - startTime - result.printErrors() - if self.durations is not None: - self._printDurations(result) - - if hasattr(result, 'separator2'): - self.stream.writeln(result.separator2) - - run = result.testsRun - self.stream.writeln("Ran %d test%s in %.3fs" % - (run, run != 1 and "s" or "", timeTaken)) - self.stream.writeln() - - expectedFails = unexpectedSuccesses = skipped = 0 - try: - results = map(len, (result.expectedFailures, - result.unexpectedSuccesses, - result.skipped)) - except AttributeError: - pass - else: - expectedFails, unexpectedSuccesses, skipped = results - - infos = [] - if not result.wasSuccessful(): - self.stream.write("FAILED") - failed, errored = len(result.failures), len(result.errors) - if failed: - infos.append("failures=%d" % failed) - if errored: - infos.append("errors=%d" % errored) - elif run == 0 and not skipped: - self.stream.write("NO TESTS RAN") - else: - self.stream.write("OK") - if skipped: - infos.append("skipped=%d" % skipped) - if expectedFails: - infos.append("expected failures=%d" % expectedFails) - if unexpectedSuccesses: - infos.append("unexpected successes=%d" % unexpectedSuccesses) - if infos: - self.stream.writeln(" (%s)" % (", ".join(infos),)) - else: - self.stream.write("\n") - self.stream.flush() - return result diff --git a/Python313_13_x86_Template/Lib/urllib/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/urllib/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 9fb4e40d..00000000 Binary files a/Python313_13_x86_Template/Lib/urllib/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/urllib/__pycache__/error.cpython-313.pyc b/Python313_13_x86_Template/Lib/urllib/__pycache__/error.cpython-313.pyc deleted file mode 100644 index 4577b77f..00000000 Binary files a/Python313_13_x86_Template/Lib/urllib/__pycache__/error.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/urllib/__pycache__/parse.cpython-313.pyc b/Python313_13_x86_Template/Lib/urllib/__pycache__/parse.cpython-313.pyc deleted file mode 100644 index 5886cffd..00000000 Binary files a/Python313_13_x86_Template/Lib/urllib/__pycache__/parse.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/urllib/__pycache__/request.cpython-313.pyc b/Python313_13_x86_Template/Lib/urllib/__pycache__/request.cpython-313.pyc deleted file mode 100644 index 5f1f6c64..00000000 Binary files a/Python313_13_x86_Template/Lib/urllib/__pycache__/request.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/urllib/__pycache__/response.cpython-313.pyc b/Python313_13_x86_Template/Lib/urllib/__pycache__/response.cpython-313.pyc deleted file mode 100644 index 2a77b186..00000000 Binary files a/Python313_13_x86_Template/Lib/urllib/__pycache__/response.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/urllib/parse.py b/Python313_13_x86_Template/Lib/urllib/parse.py deleted file mode 100644 index 14f66c5a..00000000 --- a/Python313_13_x86_Template/Lib/urllib/parse.py +++ /dev/null @@ -1,1264 +0,0 @@ -"""Parse (absolute and relative) URLs. - -urllib.parse module is based upon the following RFC specifications. - -RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding -and L. Masinter, January 2005. - -RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter -and L.Masinter, December 1999. - -RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. -Berners-Lee, R. Fielding, and L. Masinter, August 1998. - -RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. - -RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June -1995. - -RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. -McCahill, December 1994 - -RFC 3986 is considered the current standard and any future changes to -urllib.parse module should conform with it. The urllib.parse module is -currently not entirely compliant with this RFC due to defacto -scenarios for parsing, and for backward compatibility purposes, some -parsing quirks from older RFCs are retained. The testcases in -test_urlparse.py provides a good indicator of parsing behavior. - -The WHATWG URL Parser spec should also be considered. We are not compliant with -it either due to existing user code API behavior expectations (Hyrum's Law). -It serves as a useful guide when making changes. -""" - -from collections import namedtuple -import functools -import math -import re -import types -import warnings -import ipaddress - -__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", - "urlsplit", "urlunsplit", "urlencode", "parse_qs", - "parse_qsl", "quote", "quote_plus", "quote_from_bytes", - "unquote", "unquote_plus", "unquote_to_bytes", - "DefragResult", "ParseResult", "SplitResult", - "DefragResultBytes", "ParseResultBytes", "SplitResultBytes"] - -# A classification of schemes. -# The empty string classifies URLs with no scheme specified, -# being the default value returned by “urlsplit” and “urlparse”. - -uses_relative = ['', 'ftp', 'http', 'gopher', 'nntp', 'imap', - 'wais', 'file', 'https', 'shttp', 'mms', - 'prospero', 'rtsp', 'rtsps', 'rtspu', 'sftp', - 'svn', 'svn+ssh', 'ws', 'wss'] - -uses_netloc = ['', 'ftp', 'http', 'gopher', 'nntp', 'telnet', - 'imap', 'wais', 'file', 'mms', 'https', 'shttp', - 'snews', 'prospero', 'rtsp', 'rtsps', 'rtspu', 'rsync', - 'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh', - 'ws', 'wss', 'itms-services'] - -uses_params = ['', 'ftp', 'hdl', 'prospero', 'http', 'imap', - 'https', 'shttp', 'rtsp', 'rtsps', 'rtspu', 'sip', - 'sips', 'mms', 'sftp', 'tel'] - -# These are not actually used anymore, but should stay for backwards -# compatibility. (They are undocumented, but have a public-looking name.) - -non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', - 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] - -uses_query = ['', 'http', 'wais', 'imap', 'https', 'shttp', 'mms', - 'gopher', 'rtsp', 'rtsps', 'rtspu', 'sip', 'sips'] - -uses_fragment = ['', 'ftp', 'hdl', 'http', 'gopher', 'news', - 'nntp', 'wais', 'https', 'shttp', 'snews', - 'file', 'prospero'] - -# Characters valid in scheme names -scheme_chars = ('abcdefghijklmnopqrstuvwxyz' - 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' - '0123456789' - '+-.') - -# Leading and trailing C0 control and space to be stripped per WHATWG spec. -# == "".join([chr(i) for i in range(0, 0x20 + 1)]) -_WHATWG_C0_CONTROL_OR_SPACE = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f ' - -# Unsafe bytes to be removed per WHATWG spec -_UNSAFE_URL_BYTES_TO_REMOVE = ['\t', '\r', '\n'] - -def clear_cache(): - """Clear internal performance caches. Undocumented; some tests want it.""" - urlsplit.cache_clear() - _byte_quoter_factory.cache_clear() - -# Helpers for bytes handling -# For 3.2, we deliberately require applications that -# handle improperly quoted URLs to do their own -# decoding and encoding. If valid use cases are -# presented, we may relax this by using latin-1 -# decoding internally for 3.3 -_implicit_encoding = 'ascii' -_implicit_errors = 'strict' - -def _noop(obj): - return obj - -def _encode_result(obj, encoding=_implicit_encoding, - errors=_implicit_errors): - return obj.encode(encoding, errors) - -def _decode_args(args, encoding=_implicit_encoding, - errors=_implicit_errors): - return tuple(x.decode(encoding, errors) if x else '' for x in args) - -def _coerce_args(*args): - # Invokes decode if necessary to create str args - # and returns the coerced inputs along with - # an appropriate result coercion function - # - noop for str inputs - # - encoding function otherwise - str_input = isinstance(args[0], str) - for arg in args[1:]: - # We special-case the empty string to support the - # "scheme=''" default argument to some functions - if arg and isinstance(arg, str) != str_input: - raise TypeError("Cannot mix str and non-str arguments") - if str_input: - return args + (_noop,) - return _decode_args(args) + (_encode_result,) - -# Result objects are more helpful than simple tuples -class _ResultMixinStr(object): - """Standard approach to encoding parsed results from str to bytes""" - __slots__ = () - - def encode(self, encoding='ascii', errors='strict'): - return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self)) - - -class _ResultMixinBytes(object): - """Standard approach to decoding parsed results from bytes to str""" - __slots__ = () - - def decode(self, encoding='ascii', errors='strict'): - return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self)) - - -class _NetlocResultMixinBase(object): - """Shared methods for the parsed result objects containing a netloc element""" - __slots__ = () - - @property - def username(self): - return self._userinfo[0] - - @property - def password(self): - return self._userinfo[1] - - @property - def hostname(self): - hostname = self._hostinfo[0] - if not hostname: - return None - # Scoped IPv6 address may have zone info, which must not be lowercased - # like http://[fe80::822a:a8ff:fe49:470c%tESt]:1234/keys - separator = '%' if isinstance(hostname, str) else b'%' - hostname, percent, zone = hostname.partition(separator) - return hostname.lower() + percent + zone - - @property - def port(self): - port = self._hostinfo[1] - if port is not None: - if port.isdigit() and port.isascii(): - port = int(port) - else: - raise ValueError(f"Port could not be cast to integer value as {port!r}") - if not (0 <= port <= 65535): - raise ValueError("Port out of range 0-65535") - return port - - __class_getitem__ = classmethod(types.GenericAlias) - - -class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr): - __slots__ = () - - @property - def _userinfo(self): - netloc = self.netloc - userinfo, have_info, hostinfo = netloc.rpartition('@') - if have_info: - username, have_password, password = userinfo.partition(':') - if not have_password: - password = None - else: - username = password = None - return username, password - - @property - def _hostinfo(self): - netloc = self.netloc - _, _, hostinfo = netloc.rpartition('@') - _, have_open_br, bracketed = hostinfo.partition('[') - if have_open_br: - hostname, _, port = bracketed.partition(']') - _, _, port = port.partition(':') - else: - hostname, _, port = hostinfo.partition(':') - if not port: - port = None - return hostname, port - - -class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes): - __slots__ = () - - @property - def _userinfo(self): - netloc = self.netloc - userinfo, have_info, hostinfo = netloc.rpartition(b'@') - if have_info: - username, have_password, password = userinfo.partition(b':') - if not have_password: - password = None - else: - username = password = None - return username, password - - @property - def _hostinfo(self): - netloc = self.netloc - _, _, hostinfo = netloc.rpartition(b'@') - _, have_open_br, bracketed = hostinfo.partition(b'[') - if have_open_br: - hostname, _, port = bracketed.partition(b']') - _, _, port = port.partition(b':') - else: - hostname, _, port = hostinfo.partition(b':') - if not port: - port = None - return hostname, port - - -_DefragResultBase = namedtuple('DefragResult', 'url fragment') -_SplitResultBase = namedtuple( - 'SplitResult', 'scheme netloc path query fragment') -_ParseResultBase = namedtuple( - 'ParseResult', 'scheme netloc path params query fragment') - -_DefragResultBase.__doc__ = """ -DefragResult(url, fragment) - -A 2-tuple that contains the url without fragment identifier and the fragment -identifier as a separate argument. -""" - -_DefragResultBase.url.__doc__ = """The URL with no fragment identifier.""" - -_DefragResultBase.fragment.__doc__ = """ -Fragment identifier separated from URL, that allows indirect identification of a -secondary resource by reference to a primary resource and additional identifying -information. -""" - -_SplitResultBase.__doc__ = """ -SplitResult(scheme, netloc, path, query, fragment) - -A 5-tuple that contains the different components of a URL. Similar to -ParseResult, but does not split params. -""" - -_SplitResultBase.scheme.__doc__ = """Specifies URL scheme for the request.""" - -_SplitResultBase.netloc.__doc__ = """ -Network location where the request is made to. -""" - -_SplitResultBase.path.__doc__ = """ -The hierarchical path, such as the path to a file to download. -""" - -_SplitResultBase.query.__doc__ = """ -The query component, that contains non-hierarchical data, that along with data -in path component, identifies a resource in the scope of URI's scheme and -network location. -""" - -_SplitResultBase.fragment.__doc__ = """ -Fragment identifier, that allows indirect identification of a secondary resource -by reference to a primary resource and additional identifying information. -""" - -_ParseResultBase.__doc__ = """ -ParseResult(scheme, netloc, path, params, query, fragment) - -A 6-tuple that contains components of a parsed URL. -""" - -_ParseResultBase.scheme.__doc__ = _SplitResultBase.scheme.__doc__ -_ParseResultBase.netloc.__doc__ = _SplitResultBase.netloc.__doc__ -_ParseResultBase.path.__doc__ = _SplitResultBase.path.__doc__ -_ParseResultBase.params.__doc__ = """ -Parameters for last path element used to dereference the URI in order to provide -access to perform some operation on the resource. -""" - -_ParseResultBase.query.__doc__ = _SplitResultBase.query.__doc__ -_ParseResultBase.fragment.__doc__ = _SplitResultBase.fragment.__doc__ - - -# For backwards compatibility, alias _NetlocResultMixinStr -# ResultBase is no longer part of the documented API, but it is -# retained since deprecating it isn't worth the hassle -ResultBase = _NetlocResultMixinStr - -# Structured result objects for string data -class DefragResult(_DefragResultBase, _ResultMixinStr): - __slots__ = () - def geturl(self): - if self.fragment: - return self.url + '#' + self.fragment - else: - return self.url - -class SplitResult(_SplitResultBase, _NetlocResultMixinStr): - __slots__ = () - def geturl(self): - return urlunsplit(self) - -class ParseResult(_ParseResultBase, _NetlocResultMixinStr): - __slots__ = () - def geturl(self): - return urlunparse(self) - -# Structured result objects for bytes data -class DefragResultBytes(_DefragResultBase, _ResultMixinBytes): - __slots__ = () - def geturl(self): - if self.fragment: - return self.url + b'#' + self.fragment - else: - return self.url - -class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes): - __slots__ = () - def geturl(self): - return urlunsplit(self) - -class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes): - __slots__ = () - def geturl(self): - return urlunparse(self) - -# Set up the encode/decode result pairs -def _fix_result_transcoding(): - _result_pairs = ( - (DefragResult, DefragResultBytes), - (SplitResult, SplitResultBytes), - (ParseResult, ParseResultBytes), - ) - for _decoded, _encoded in _result_pairs: - _decoded._encoded_counterpart = _encoded - _encoded._decoded_counterpart = _decoded - -_fix_result_transcoding() -del _fix_result_transcoding - -def urlparse(url, scheme='', allow_fragments=True): - """Parse a URL into 6 components: - :///;?# - - The result is a named 6-tuple with fields corresponding to the - above. It is either a ParseResult or ParseResultBytes object, - depending on the type of the url parameter. - - The username, password, hostname, and port sub-components of netloc - can also be accessed as attributes of the returned object. - - The scheme argument provides the default value of the scheme - component when no scheme is found in url. - - If allow_fragments is False, no attempt is made to separate the - fragment component from the previous component, which can be either - path or query. - - Note that % escapes are not expanded. - - urlsplit() should generally be used instead of urlparse(). - """ - url, scheme, _coerce_result = _coerce_args(url, scheme) - splitresult = urlsplit(url, scheme, allow_fragments) - scheme, netloc, url, query, fragment = splitresult - if scheme in uses_params and ';' in url: - url, params = _splitparams(url) - else: - params = '' - result = ParseResult(scheme, netloc, url, params, query, fragment) - return _coerce_result(result) - -def _splitparams(url): - if '/' in url: - i = url.find(';', url.rfind('/')) - if i < 0: - return url, '' - else: - i = url.find(';') - return url[:i], url[i+1:] - -def _splitnetloc(url, start=0): - delim = len(url) # position of end of domain part of url, default is end - for c in '/?#': # look for delimiters; the order is NOT important - wdelim = url.find(c, start) # find first of this delim - if wdelim >= 0: # if found - delim = min(delim, wdelim) # use earliest delim position - return url[start:delim], url[delim:] # return (domain, rest) - -def _checknetloc(netloc): - if not netloc or netloc.isascii(): - return - # looking for characters like \u2100 that expand to 'a/c' - # IDNA uses NFKC equivalence, so normalize for this check - import unicodedata - n = netloc.replace('@', '') # ignore characters already included - n = n.replace(':', '') # but not the surrounding text - n = n.replace('#', '') - n = n.replace('?', '') - netloc2 = unicodedata.normalize('NFKC', n) - if n == netloc2: - return - for c in '/?#@:': - if c in netloc2: - raise ValueError("netloc '" + netloc + "' contains invalid " + - "characters under NFKC normalization") - -def _check_bracketed_netloc(netloc): - # Note that this function must mirror the splitting - # done in NetlocResultMixins._hostinfo(). - hostname_and_port = netloc.rpartition('@')[2] - before_bracket, have_open_br, bracketed = hostname_and_port.partition('[') - if have_open_br: - # No data is allowed before a bracket. - if before_bracket: - raise ValueError("Invalid IPv6 URL") - hostname, _, port = bracketed.partition(']') - # No data is allowed after the bracket but before the port delimiter. - if port and not port.startswith(":"): - raise ValueError("Invalid IPv6 URL") - else: - hostname, _, port = hostname_and_port.partition(':') - _check_bracketed_host(hostname) - -# Valid bracketed hosts are defined in -# https://www.rfc-editor.org/rfc/rfc3986#page-49 and https://url.spec.whatwg.org/ -def _check_bracketed_host(hostname): - if hostname.startswith('v'): - if not re.match(r"\Av[a-fA-F0-9]+\..+\Z", hostname): - raise ValueError(f"IPvFuture address is invalid") - else: - ip = ipaddress.ip_address(hostname) # Throws Value Error if not IPv6 or IPv4 - if isinstance(ip, ipaddress.IPv4Address): - raise ValueError(f"An IPv4 address cannot be in brackets") - -# typed=True avoids BytesWarnings being emitted during cache key -# comparison since this API supports both bytes and str input. -@functools.lru_cache(typed=True) -def urlsplit(url, scheme='', allow_fragments=True): - """Parse a URL into 5 components: - :///?# - - The result is a named 5-tuple with fields corresponding to the - above. It is either a SplitResult or SplitResultBytes object, - depending on the type of the url parameter. - - The username, password, hostname, and port sub-components of netloc - can also be accessed as attributes of the returned object. - - The scheme argument provides the default value of the scheme - component when no scheme is found in url. - - If allow_fragments is False, no attempt is made to separate the - fragment component from the previous component, which can be either - path or query. - - Note that % escapes are not expanded. - """ - - url, scheme, _coerce_result = _coerce_args(url, scheme) - # Only lstrip url as some applications rely on preserving trailing space. - # (https://url.spec.whatwg.org/#concept-basic-url-parser would strip both) - url = url.lstrip(_WHATWG_C0_CONTROL_OR_SPACE) - scheme = scheme.strip(_WHATWG_C0_CONTROL_OR_SPACE) - - for b in _UNSAFE_URL_BYTES_TO_REMOVE: - url = url.replace(b, "") - scheme = scheme.replace(b, "") - - allow_fragments = bool(allow_fragments) - netloc = query = fragment = '' - i = url.find(':') - if i > 0 and url[0].isascii() and url[0].isalpha(): - for c in url[:i]: - if c not in scheme_chars: - break - else: - scheme, url = url[:i].lower(), url[i+1:] - if url[:2] == '//': - netloc, url = _splitnetloc(url, 2) - if (('[' in netloc and ']' not in netloc) or - (']' in netloc and '[' not in netloc)): - raise ValueError("Invalid IPv6 URL") - if '[' in netloc and ']' in netloc: - _check_bracketed_netloc(netloc) - if allow_fragments and '#' in url: - url, fragment = url.split('#', 1) - if '?' in url: - url, query = url.split('?', 1) - _checknetloc(netloc) - v = SplitResult(scheme, netloc, url, query, fragment) - return _coerce_result(v) - -def urlunparse(components): - """Put a parsed URL back together again. This may result in a - slightly different, but equivalent URL, if the URL that was parsed - originally had redundant delimiters, e.g. a ? with an empty query - (the draft states that these are equivalent).""" - scheme, netloc, url, params, query, fragment, _coerce_result = ( - _coerce_args(*components)) - if params: - url = "%s;%s" % (url, params) - return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment))) - -def urlunsplit(components): - """Combine the elements of a tuple as returned by urlsplit() into a - complete URL as a string. The data argument can be any five-item iterable. - This may result in a slightly different, but equivalent URL, if the URL that - was parsed originally had unnecessary delimiters (for example, a ? with an - empty query; the RFC states that these are equivalent).""" - scheme, netloc, url, query, fragment, _coerce_result = ( - _coerce_args(*components)) - if netloc: - if url and url[:1] != '/': url = '/' + url - url = '//' + netloc + url - elif url[:2] == '//': - url = '//' + url - elif scheme and scheme in uses_netloc and (not url or url[:1] == '/'): - url = '//' + url - if scheme: - url = scheme + ':' + url - if query: - url = url + '?' + query - if fragment: - url = url + '#' + fragment - return _coerce_result(url) - -def urljoin(base, url, allow_fragments=True): - """Join a base URL and a possibly relative URL to form an absolute - interpretation of the latter.""" - if not base: - return url - if not url: - return base - - base, url, _coerce_result = _coerce_args(base, url) - bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ - urlparse(base, '', allow_fragments) - scheme, netloc, path, params, query, fragment = \ - urlparse(url, bscheme, allow_fragments) - - if scheme != bscheme or scheme not in uses_relative: - return _coerce_result(url) - if scheme in uses_netloc: - if netloc: - return _coerce_result(urlunparse((scheme, netloc, path, - params, query, fragment))) - netloc = bnetloc - - if not path and not params: - path = bpath - params = bparams - if not query: - query = bquery - return _coerce_result(urlunparse((scheme, netloc, path, - params, query, fragment))) - - base_parts = bpath.split('/') - if base_parts[-1] != '': - # the last item is not a directory, so will not be taken into account - # in resolving the relative path - del base_parts[-1] - - # for rfc3986, ignore all base path should the first character be root. - if path[:1] == '/': - segments = path.split('/') - else: - segments = base_parts + path.split('/') - # filter out elements that would cause redundant slashes on re-joining - # the resolved_path - segments[1:-1] = filter(None, segments[1:-1]) - - resolved_path = [] - - for seg in segments: - if seg == '..': - try: - resolved_path.pop() - except IndexError: - # ignore any .. segments that would otherwise cause an IndexError - # when popped from resolved_path if resolving for rfc3986 - pass - elif seg == '.': - continue - else: - resolved_path.append(seg) - - if segments[-1] in ('.', '..'): - # do some post-processing here. if the last segment was a relative dir, - # then we need to append the trailing '/' - resolved_path.append('') - - return _coerce_result(urlunparse((scheme, netloc, '/'.join( - resolved_path) or '/', params, query, fragment))) - - -def urldefrag(url): - """Removes any existing fragment from URL. - - Returns a tuple of the defragmented URL and the fragment. If - the URL contained no fragments, the second element is the - empty string. - """ - url, _coerce_result = _coerce_args(url) - if '#' in url: - s, n, p, a, q, frag = urlparse(url) - defrag = urlunparse((s, n, p, a, q, '')) - else: - frag = '' - defrag = url - return _coerce_result(DefragResult(defrag, frag)) - -_hexdig = '0123456789ABCDEFabcdef' -_hextobyte = None - -def unquote_to_bytes(string): - """unquote_to_bytes('abc%20def') -> b'abc def'.""" - return bytes(_unquote_impl(string)) - -def _unquote_impl(string: bytes | bytearray | str) -> bytes | bytearray: - # Note: strings are encoded as UTF-8. This is only an issue if it contains - # unescaped non-ASCII characters, which URIs should not. - if not string: - # Is it a string-like object? - string.split - return b'' - if isinstance(string, str): - string = string.encode('utf-8') - bits = string.split(b'%') - if len(bits) == 1: - return string - res = bytearray(bits[0]) - append = res.extend - # Delay the initialization of the table to not waste memory - # if the function is never called - global _hextobyte - if _hextobyte is None: - _hextobyte = {(a + b).encode(): bytes.fromhex(a + b) - for a in _hexdig for b in _hexdig} - for item in bits[1:]: - try: - append(_hextobyte[item[:2]]) - append(item[2:]) - except KeyError: - append(b'%') - append(item) - return res - -_asciire = re.compile('([\x00-\x7f]+)') - -def _generate_unquoted_parts(string, encoding, errors): - previous_match_end = 0 - for ascii_match in _asciire.finditer(string): - start, end = ascii_match.span() - yield string[previous_match_end:start] # Non-ASCII - # The ascii_match[1] group == string[start:end]. - yield _unquote_impl(ascii_match[1]).decode(encoding, errors) - previous_match_end = end - yield string[previous_match_end:] # Non-ASCII tail - -def unquote(string, encoding='utf-8', errors='replace'): - """Replace %xx escapes by their single-character equivalent. The optional - encoding and errors parameters specify how to decode percent-encoded - sequences into Unicode characters, as accepted by the bytes.decode() - method. - By default, percent-encoded sequences are decoded with UTF-8, and invalid - sequences are replaced by a placeholder character. - - unquote('abc%20def') -> 'abc def'. - """ - if isinstance(string, bytes): - return _unquote_impl(string).decode(encoding, errors) - if '%' not in string: - # Is it a string-like object? - string.split - return string - if encoding is None: - encoding = 'utf-8' - if errors is None: - errors = 'replace' - return ''.join(_generate_unquoted_parts(string, encoding, errors)) - - -def parse_qs(qs, keep_blank_values=False, strict_parsing=False, - encoding='utf-8', errors='replace', max_num_fields=None, separator='&'): - """Parse a query given as a string argument. - - Arguments: - - qs: percent-encoded query string to be parsed - - keep_blank_values: flag indicating whether blank values in - percent-encoded queries should be treated as blank strings. - A true value indicates that blanks should be retained as - blank strings. The default false value indicates that - blank values are to be ignored and treated as if they were - not included. - - strict_parsing: flag indicating what to do with parsing errors. - If false (the default), errors are silently ignored. - If true, errors raise a ValueError exception. - - encoding and errors: specify how to decode percent-encoded sequences - into Unicode characters, as accepted by the bytes.decode() method. - - max_num_fields: int. If set, then throws a ValueError if there - are more than n fields read by parse_qsl(). - - separator: str. The symbol to use for separating the query arguments. - Defaults to &. - - Returns a dictionary. - """ - parsed_result = {} - pairs = parse_qsl(qs, keep_blank_values, strict_parsing, - encoding=encoding, errors=errors, - max_num_fields=max_num_fields, separator=separator) - for name, value in pairs: - if name in parsed_result: - parsed_result[name].append(value) - else: - parsed_result[name] = [value] - return parsed_result - - -def parse_qsl(qs, keep_blank_values=False, strict_parsing=False, - encoding='utf-8', errors='replace', max_num_fields=None, separator='&'): - """Parse a query given as a string argument. - - Arguments: - - qs: percent-encoded query string to be parsed - - keep_blank_values: flag indicating whether blank values in - percent-encoded queries should be treated as blank strings. - A true value indicates that blanks should be retained as blank - strings. The default false value indicates that blank values - are to be ignored and treated as if they were not included. - - strict_parsing: flag indicating what to do with parsing errors. If - false (the default), errors are silently ignored. If true, - errors raise a ValueError exception. - - encoding and errors: specify how to decode percent-encoded sequences - into Unicode characters, as accepted by the bytes.decode() method. - - max_num_fields: int. If set, then throws a ValueError - if there are more than n fields read by parse_qsl(). - - separator: str. The symbol to use for separating the query arguments. - Defaults to &. - - Returns a list, as G-d intended. - """ - - if not separator or not isinstance(separator, (str, bytes)): - raise ValueError("Separator must be of type string or bytes.") - if isinstance(qs, str): - if not isinstance(separator, str): - separator = str(separator, 'ascii') - eq = '=' - def _unquote(s): - return unquote_plus(s, encoding=encoding, errors=errors) - else: - if not qs: - return [] - # Use memoryview() to reject integers and iterables, - # acceptable by the bytes constructor. - qs = bytes(memoryview(qs)) - if isinstance(separator, str): - separator = bytes(separator, 'ascii') - eq = b'=' - def _unquote(s): - return unquote_to_bytes(s.replace(b'+', b' ')) - - if not qs: - return [] - - # If max_num_fields is defined then check that the number of fields - # is less than max_num_fields. This prevents a memory exhaustion DOS - # attack via post bodies with many fields. - if max_num_fields is not None: - num_fields = 1 + qs.count(separator) - if max_num_fields < num_fields: - raise ValueError('Max number of fields exceeded') - - r = [] - for name_value in qs.split(separator): - if name_value or strict_parsing: - name, has_eq, value = name_value.partition(eq) - if not has_eq and strict_parsing: - raise ValueError("bad query field: %r" % (name_value,)) - if value or keep_blank_values: - name = _unquote(name) - value = _unquote(value) - r.append((name, value)) - return r - -def unquote_plus(string, encoding='utf-8', errors='replace'): - """Like unquote(), but also replace plus signs by spaces, as required for - unquoting HTML form values. - - unquote_plus('%7e/abc+def') -> '~/abc def' - """ - string = string.replace('+', ' ') - return unquote(string, encoding, errors) - -_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' - b'abcdefghijklmnopqrstuvwxyz' - b'0123456789' - b'_.-~') -_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE) - -def __getattr__(name): - if name == 'Quoter': - warnings.warn('Deprecated in 3.11. ' - 'urllib.parse.Quoter will be removed in Python 3.14. ' - 'It was not intended to be a public API.', - DeprecationWarning, stacklevel=2) - return _Quoter - raise AttributeError(f'module {__name__!r} has no attribute {name!r}') - -class _Quoter(dict): - """A mapping from bytes numbers (in range(0,256)) to strings. - - String values are percent-encoded byte values, unless the key < 128, and - in either of the specified safe set, or the always safe set. - """ - # Keeps a cache internally, via __missing__, for efficiency (lookups - # of cached keys don't call Python code at all). - def __init__(self, safe): - """safe: bytes object.""" - self.safe = _ALWAYS_SAFE.union(safe) - - def __repr__(self): - return f"" - - def __missing__(self, b): - # Handle a cache miss. Store quoted string in cache and return. - res = chr(b) if b in self.safe else '%{:02X}'.format(b) - self[b] = res - return res - -def quote(string, safe='/', encoding=None, errors=None): - """quote('abc def') -> 'abc%20def' - - Each part of a URL, e.g. the path info, the query, etc., has a - different set of reserved characters that must be quoted. The - quote function offers a cautious (not minimal) way to quote a - string for most of these parts. - - RFC 3986 Uniform Resource Identifier (URI): Generic Syntax lists - the following (un)reserved characters. - - unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" - reserved = gen-delims / sub-delims - gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" - sub-delims = "!" / "$" / "&" / "'" / "(" / ")" - / "*" / "+" / "," / ";" / "=" - - Each of the reserved characters is reserved in some component of a URL, - but not necessarily in all of them. - - The quote function %-escapes all characters that are neither in the - unreserved chars ("always safe") nor the additional chars set via the - safe arg. - - The default for the safe arg is '/'. The character is reserved, but in - typical usage the quote function is being called on a path where the - existing slash characters are to be preserved. - - Python 3.7 updates from using RFC 2396 to RFC 3986 to quote URL strings. - Now, "~" is included in the set of unreserved characters. - - string and safe may be either str or bytes objects. encoding and errors - must not be specified if string is a bytes object. - - The optional encoding and errors parameters specify how to deal with - non-ASCII characters, as accepted by the str.encode method. - By default, encoding='utf-8' (characters are encoded with UTF-8), and - errors='strict' (unsupported characters raise a UnicodeEncodeError). - """ - if isinstance(string, str): - if not string: - return string - if encoding is None: - encoding = 'utf-8' - if errors is None: - errors = 'strict' - string = string.encode(encoding, errors) - else: - if encoding is not None: - raise TypeError("quote() doesn't support 'encoding' for bytes") - if errors is not None: - raise TypeError("quote() doesn't support 'errors' for bytes") - return quote_from_bytes(string, safe) - -def quote_plus(string, safe='', encoding=None, errors=None): - """Like quote(), but also replace ' ' with '+', as required for quoting - HTML form values. Plus signs in the original string are escaped unless - they are included in safe. It also does not have safe default to '/'. - """ - # Check if ' ' in string, where string may either be a str or bytes. If - # there are no spaces, the regular quote will produce the right answer. - if ((isinstance(string, str) and ' ' not in string) or - (isinstance(string, bytes) and b' ' not in string)): - return quote(string, safe, encoding, errors) - if isinstance(safe, str): - space = ' ' - else: - space = b' ' - string = quote(string, safe + space, encoding, errors) - return string.replace(' ', '+') - -# Expectation: A typical program is unlikely to create more than 5 of these. -@functools.lru_cache -def _byte_quoter_factory(safe): - return _Quoter(safe).__getitem__ - -def quote_from_bytes(bs, safe='/'): - """Like quote(), but accepts a bytes object rather than a str, and does - not perform string-to-bytes encoding. It always returns an ASCII string. - quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f' - """ - if not isinstance(bs, (bytes, bytearray)): - raise TypeError("quote_from_bytes() expected bytes") - if not bs: - return '' - if isinstance(safe, str): - # Normalize 'safe' by converting to bytes and removing non-ASCII chars - safe = safe.encode('ascii', 'ignore') - else: - # List comprehensions are faster than generator expressions. - safe = bytes([c for c in safe if c < 128]) - if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe): - return bs.decode() - quoter = _byte_quoter_factory(safe) - if (bs_len := len(bs)) < 200_000: - return ''.join(map(quoter, bs)) - else: - # This saves memory - https://github.com/python/cpython/issues/95865 - chunk_size = math.isqrt(bs_len) - chunks = [''.join(map(quoter, bs[i:i+chunk_size])) - for i in range(0, bs_len, chunk_size)] - return ''.join(chunks) - -def urlencode(query, doseq=False, safe='', encoding=None, errors=None, - quote_via=quote_plus): - """Encode a dict or sequence of two-element tuples into a URL query string. - - If any values in the query arg are sequences and doseq is true, each - sequence element is converted to a separate parameter. - - If the query arg is a sequence of two-element tuples, the order of the - parameters in the output will match the order of parameters in the - input. - - The components of a query arg may each be either a string or a bytes type. - - The safe, encoding, and errors parameters are passed down to the function - specified by quote_via (encoding and errors only if a component is a str). - """ - - if hasattr(query, "items"): - query = query.items() - else: - # It's a bother at times that strings and string-like objects are - # sequences. - try: - # non-sequence items should not work with len() - # non-empty strings will fail this - if len(query) and not isinstance(query[0], tuple): - raise TypeError - # Zero-length sequences of all types will get here and succeed, - # but that's a minor nit. Since the original implementation - # allowed empty dicts that type of behavior probably should be - # preserved for consistency - except TypeError as err: - raise TypeError("not a valid non-string sequence " - "or mapping object") from err - - l = [] - if not doseq: - for k, v in query: - if isinstance(k, bytes): - k = quote_via(k, safe) - else: - k = quote_via(str(k), safe, encoding, errors) - - if isinstance(v, bytes): - v = quote_via(v, safe) - else: - v = quote_via(str(v), safe, encoding, errors) - l.append(k + '=' + v) - else: - for k, v in query: - if isinstance(k, bytes): - k = quote_via(k, safe) - else: - k = quote_via(str(k), safe, encoding, errors) - - if isinstance(v, bytes): - v = quote_via(v, safe) - l.append(k + '=' + v) - elif isinstance(v, str): - v = quote_via(v, safe, encoding, errors) - l.append(k + '=' + v) - else: - try: - # Is this a sufficient test for sequence-ness? - x = len(v) - except TypeError: - # not a sequence - v = quote_via(str(v), safe, encoding, errors) - l.append(k + '=' + v) - else: - # loop over the sequence - for elt in v: - if isinstance(elt, bytes): - elt = quote_via(elt, safe) - else: - elt = quote_via(str(elt), safe, encoding, errors) - l.append(k + '=' + elt) - return '&'.join(l) - - -def to_bytes(url): - warnings.warn("urllib.parse.to_bytes() is deprecated as of 3.8", - DeprecationWarning, stacklevel=2) - return _to_bytes(url) - - -def _to_bytes(url): - """to_bytes(u"URL") --> 'URL'.""" - # Most URL schemes require ASCII. If that changes, the conversion - # can be relaxed. - # XXX get rid of to_bytes() - if isinstance(url, str): - try: - url = url.encode("ASCII").decode() - except UnicodeError: - raise UnicodeError("URL " + repr(url) + - " contains non-ASCII characters") - return url - - -def unwrap(url): - """Transform a string like '' into 'scheme://host/path'. - - The string is returned unchanged if it's not a wrapped URL. - """ - url = str(url).strip() - if url[:1] == '<' and url[-1:] == '>': - url = url[1:-1].strip() - if url[:4] == 'URL:': - url = url[4:].strip() - return url - - -def splittype(url): - warnings.warn("urllib.parse.splittype() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splittype(url) - - -_typeprog = None -def _splittype(url): - """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" - global _typeprog - if _typeprog is None: - _typeprog = re.compile('([^/:]+):(.*)', re.DOTALL) - - match = _typeprog.match(url) - if match: - scheme, data = match.groups() - return scheme.lower(), data - return None, url - - -def splithost(url): - warnings.warn("urllib.parse.splithost() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splithost(url) - - -_hostprog = None -def _splithost(url): - """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" - global _hostprog - if _hostprog is None: - _hostprog = re.compile('//([^/#?]*)(.*)', re.DOTALL) - - match = _hostprog.match(url) - if match: - host_port, path = match.groups() - if path and path[0] != '/': - path = '/' + path - return host_port, path - return None, url - - -def splituser(host): - warnings.warn("urllib.parse.splituser() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splituser(host) - - -def _splituser(host): - """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" - user, delim, host = host.rpartition('@') - return (user if delim else None), host - - -def splitpasswd(user): - warnings.warn("urllib.parse.splitpasswd() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splitpasswd(user) - - -def _splitpasswd(user): - """splitpasswd('user:passwd') -> 'user', 'passwd'.""" - user, delim, passwd = user.partition(':') - return user, (passwd if delim else None) - - -def splitport(host): - warnings.warn("urllib.parse.splitport() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splitport(host) - - -# splittag('/path#tag') --> '/path', 'tag' -_portprog = None -def _splitport(host): - """splitport('host:port') --> 'host', 'port'.""" - global _portprog - if _portprog is None: - _portprog = re.compile('(.*):([0-9]*)', re.DOTALL) - - match = _portprog.fullmatch(host) - if match: - host, port = match.groups() - if port: - return host, port - return host, None - - -def splitnport(host, defport=-1): - warnings.warn("urllib.parse.splitnport() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splitnport(host, defport) - - -def _splitnport(host, defport=-1): - """Split host and port, returning numeric port. - Return given default port if no ':' found; defaults to -1. - Return numerical port if a valid number is found after ':'. - Return None if ':' but not a valid number.""" - host, delim, port = host.rpartition(':') - if not delim: - host = port - elif port: - if port.isdigit() and port.isascii(): - nport = int(port) - else: - nport = None - return host, nport - return host, defport - - -def splitquery(url): - warnings.warn("urllib.parse.splitquery() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splitquery(url) - - -def _splitquery(url): - """splitquery('/path?query') --> '/path', 'query'.""" - path, delim, query = url.rpartition('?') - if delim: - return path, query - return url, None - - -def splittag(url): - warnings.warn("urllib.parse.splittag() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splittag(url) - - -def _splittag(url): - """splittag('/path#tag') --> '/path', 'tag'.""" - path, delim, tag = url.rpartition('#') - if delim: - return path, tag - return url, None - - -def splitattr(url): - warnings.warn("urllib.parse.splitattr() is deprecated as of 3.8, " - "use urllib.parse.urlparse() instead", - DeprecationWarning, stacklevel=2) - return _splitattr(url) - - -def _splitattr(url): - """splitattr('/path;attr1=value1;attr2=value2;...') -> - '/path', ['attr1=value1', 'attr2=value2', ...].""" - words = url.split(';') - return words[0], words[1:] - - -def splitvalue(attr): - warnings.warn("urllib.parse.splitvalue() is deprecated as of 3.8, " - "use urllib.parse.parse_qsl() instead", - DeprecationWarning, stacklevel=2) - return _splitvalue(attr) - - -def _splitvalue(attr): - """splitvalue('attr=value') --> 'attr', 'value'.""" - attr, delim, value = attr.partition('=') - return attr, (value if delim else None) diff --git a/Python313_13_x86_Template/Lib/urllib/request.py b/Python313_13_x86_Template/Lib/urllib/request.py deleted file mode 100644 index 3d864f1d..00000000 --- a/Python313_13_x86_Template/Lib/urllib/request.py +++ /dev/null @@ -1,2797 +0,0 @@ -"""An extensible library for opening URLs using a variety of protocols - -The simplest way to use this module is to call the urlopen function, -which accepts a string containing a URL or a Request object (described -below). It opens the URL and returns the results as file-like -object; the returned object has some extra methods described below. - -The OpenerDirector manages a collection of Handler objects that do -all the actual work. Each Handler implements a particular protocol or -option. The OpenerDirector is a composite object that invokes the -Handlers needed to open the requested URL. For example, the -HTTPHandler performs HTTP GET and POST requests and deals with -non-error returns. The HTTPRedirectHandler automatically deals with -HTTP 301, 302, 303, 307, and 308 redirect errors, and the -HTTPDigestAuthHandler deals with digest authentication. - -urlopen(url, data=None) -- Basic usage is the same as original -urllib. pass the url and optionally data to post to an HTTP URL, and -get a file-like object back. One difference is that you can also pass -a Request instance instead of URL. Raises a URLError (subclass of -OSError); for HTTP errors, raises an HTTPError, which can also be -treated as a valid response. - -build_opener -- Function that creates a new OpenerDirector instance. -Will install the default handlers. Accepts one or more Handlers as -arguments, either instances or Handler classes that it will -instantiate. If one of the argument is a subclass of the default -handler, the argument will be installed instead of the default. - -install_opener -- Installs a new opener as the default opener. - -objects of interest: - -OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages -the Handler classes, while dealing with requests and responses. - -Request -- An object that encapsulates the state of a request. The -state can be as simple as the URL. It can also include extra HTTP -headers, e.g. a User-Agent. - -BaseHandler -- - -internals: -BaseHandler and parent -_call_chain conventions - -Example usage: - -import urllib.request - -# set up authentication info -authinfo = urllib.request.HTTPBasicAuthHandler() -authinfo.add_password(realm='PDQ Application', - uri='https://mahler:8092/site-updates.py', - user='klem', - passwd='geheim$parole') - -proxy_support = urllib.request.ProxyHandler({"http" : "http://ahad-haam:3128"}) - -# build a new opener that adds authentication and caching FTP handlers -opener = urllib.request.build_opener(proxy_support, authinfo, - urllib.request.CacheFTPHandler) - -# install it -urllib.request.install_opener(opener) - -f = urllib.request.urlopen('https://www.python.org/') -""" - -# XXX issues: -# If an authentication error handler that tries to perform -# authentication for some reason but fails, how should the error be -# signalled? The client needs to know the HTTP error code. But if -# the handler knows that the problem was, e.g., that it didn't know -# that hash algo that requested in the challenge, it would be good to -# pass that information along to the client, too. -# ftp errors aren't handled cleanly -# check digest against correct (i.e. non-apache) implementation - -# Possible extensions: -# complex proxies XXX not sure what exactly was meant by this -# abstract factory for opener - -import base64 -import bisect -import email -import hashlib -import http.client -import io -import os -import re -import socket -import string -import sys -import time -import tempfile -import contextlib -import warnings - - -from urllib.error import URLError, HTTPError, ContentTooShortError -from urllib.parse import ( - urlparse, urlsplit, urljoin, unwrap, quote, unquote, - _splittype, _splithost, _splitport, _splituser, _splitpasswd, - _splitattr, _splitquery, _splitvalue, _splittag, _to_bytes, - unquote_to_bytes, urlunparse) -from urllib.response import addinfourl, addclosehook - -# check for SSL -try: - import ssl -except ImportError: - _have_ssl = False -else: - _have_ssl = True - -__all__ = [ - # Classes - 'Request', 'OpenerDirector', 'BaseHandler', 'HTTPDefaultErrorHandler', - 'HTTPRedirectHandler', 'HTTPCookieProcessor', 'ProxyHandler', - 'HTTPPasswordMgr', 'HTTPPasswordMgrWithDefaultRealm', - 'HTTPPasswordMgrWithPriorAuth', 'AbstractBasicAuthHandler', - 'HTTPBasicAuthHandler', 'ProxyBasicAuthHandler', 'AbstractDigestAuthHandler', - 'HTTPDigestAuthHandler', 'ProxyDigestAuthHandler', 'HTTPHandler', - 'FileHandler', 'FTPHandler', 'CacheFTPHandler', 'DataHandler', - 'UnknownHandler', 'HTTPErrorProcessor', - # Functions - 'urlopen', 'install_opener', 'build_opener', - 'pathname2url', 'url2pathname', 'getproxies', - # Legacy interface - 'urlretrieve', 'urlcleanup', 'URLopener', 'FancyURLopener', -] - -# used in User-Agent header sent -__version__ = '%d.%d' % sys.version_info[:2] - -_opener = None -def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - *, context=None): - '''Open the URL url, which can be either a string or a Request object. - - *data* must be an object specifying additional data to be sent to - the server, or None if no such data is needed. See Request for - details. - - urllib.request module uses HTTP/1.1 and includes a "Connection:close" - header in its HTTP requests. - - The optional *timeout* parameter specifies a timeout in seconds for - blocking operations like the connection attempt (if not specified, the - global default timeout setting will be used). This only works for HTTP, - HTTPS and FTP connections. - - If *context* is specified, it must be a ssl.SSLContext instance describing - the various SSL options. See HTTPSConnection for more details. - - - This function always returns an object which can work as a - context manager and has the properties url, headers, and status. - See urllib.response.addinfourl for more detail on these properties. - - For HTTP and HTTPS URLs, this function returns a http.client.HTTPResponse - object slightly modified. In addition to the three new methods above, the - msg attribute contains the same information as the reason attribute --- - the reason phrase returned by the server --- instead of the response - headers as it is specified in the documentation for HTTPResponse. - - For FTP, file, and data URLs and requests explicitly handled by legacy - URLopener and FancyURLopener classes, this function returns a - urllib.response.addinfourl object. - - Note that None may be returned if no handler handles the request (though - the default installed global OpenerDirector uses UnknownHandler to ensure - this never happens). - - In addition, if proxy settings are detected (for example, when a *_proxy - environment variable like http_proxy is set), ProxyHandler is default - installed and makes sure the requests are handled through the proxy. - - ''' - global _opener - if context: - https_handler = HTTPSHandler(context=context) - opener = build_opener(https_handler) - elif _opener is None: - _opener = opener = build_opener() - else: - opener = _opener - return opener.open(url, data, timeout) - -def install_opener(opener): - global _opener - _opener = opener - -_url_tempfiles = [] -def urlretrieve(url, filename=None, reporthook=None, data=None): - """ - Retrieve a URL into a temporary location on disk. - - Requires a URL argument. If a filename is passed, it is used as - the temporary file location. The reporthook argument should be - a callable that accepts a block number, a read size, and the - total file size of the URL target. The data argument should be - valid URL encoded data. - - If a filename is passed and the URL points to a local resource, - the result is a copy from local file to new file. - - Returns a tuple containing the path to the newly created - data file as well as the resulting HTTPMessage object. - """ - url_type, path = _splittype(url) - - with contextlib.closing(urlopen(url, data)) as fp: - headers = fp.info() - - # Just return the local path and the "headers" for file:// - # URLs. No sense in performing a copy unless requested. - if url_type == "file" and not filename: - return os.path.normpath(path), headers - - # Handle temporary file setup. - if filename: - tfp = open(filename, 'wb') - else: - tfp = tempfile.NamedTemporaryFile(delete=False) - filename = tfp.name - _url_tempfiles.append(filename) - - with tfp: - result = filename, headers - bs = 1024*8 - size = -1 - read = 0 - blocknum = 0 - if "content-length" in headers: - size = int(headers["Content-Length"]) - - if reporthook: - reporthook(blocknum, bs, size) - - while block := fp.read(bs): - read += len(block) - tfp.write(block) - blocknum += 1 - if reporthook: - reporthook(blocknum, bs, size) - - if size >= 0 and read < size: - raise ContentTooShortError( - "retrieval incomplete: got only %i out of %i bytes" - % (read, size), result) - - return result - -def urlcleanup(): - """Clean up temporary files from urlretrieve calls.""" - for temp_file in _url_tempfiles: - try: - os.unlink(temp_file) - except OSError: - pass - - del _url_tempfiles[:] - global _opener - if _opener: - _opener = None - -# copied from cookielib.py -_cut_port_re = re.compile(r":\d+$", re.ASCII) -def request_host(request): - """Return request-host, as defined by RFC 2965. - - Variation from RFC: returned value is lowercased, for convenient - comparison. - - """ - url = request.full_url - host = urlparse(url)[1] - if host == "": - host = request.get_header("Host", "") - - # remove port, if present - host = _cut_port_re.sub("", host, 1) - return host.lower() - -class Request: - - def __init__(self, url, data=None, headers={}, - origin_req_host=None, unverifiable=False, - method=None): - self.full_url = url - self.headers = {} - self.unredirected_hdrs = {} - self._data = None - self.data = data - self._tunnel_host = None - for key, value in headers.items(): - self.add_header(key, value) - if origin_req_host is None: - origin_req_host = request_host(self) - self.origin_req_host = origin_req_host - self.unverifiable = unverifiable - if method: - self.method = method - - @property - def full_url(self): - if self.fragment: - return '{}#{}'.format(self._full_url, self.fragment) - return self._full_url - - @full_url.setter - def full_url(self, url): - # unwrap('') --> 'type://host/path' - self._full_url = unwrap(url) - self._full_url, self.fragment = _splittag(self._full_url) - self._parse() - - @full_url.deleter - def full_url(self): - self._full_url = None - self.fragment = None - self.selector = '' - - @property - def data(self): - return self._data - - @data.setter - def data(self, data): - if data != self._data: - self._data = data - # issue 16464 - # if we change data we need to remove content-length header - # (cause it's most probably calculated for previous value) - if self.has_header("Content-length"): - self.remove_header("Content-length") - - @data.deleter - def data(self): - self.data = None - - def _parse(self): - self.type, rest = _splittype(self._full_url) - if self.type is None: - raise ValueError("unknown url type: %r" % self.full_url) - self.host, self.selector = _splithost(rest) - if self.host: - self.host = unquote(self.host) - - def get_method(self): - """Return a string indicating the HTTP request method.""" - default_method = "POST" if self.data is not None else "GET" - return getattr(self, 'method', default_method) - - def get_full_url(self): - return self.full_url - - def set_proxy(self, host, type): - if self.type == 'https' and not self._tunnel_host: - self._tunnel_host = self.host - else: - self.type= type - self.selector = self.full_url - self.host = host - - def has_proxy(self): - return self.selector == self.full_url - - def add_header(self, key, val): - # useful for something like authentication - self.headers[key.capitalize()] = val - - def add_unredirected_header(self, key, val): - # will not be added to a redirected request - self.unredirected_hdrs[key.capitalize()] = val - - def has_header(self, header_name): - return (header_name in self.headers or - header_name in self.unredirected_hdrs) - - def get_header(self, header_name, default=None): - return self.headers.get( - header_name, - self.unredirected_hdrs.get(header_name, default)) - - def remove_header(self, header_name): - self.headers.pop(header_name, None) - self.unredirected_hdrs.pop(header_name, None) - - def header_items(self): - hdrs = {**self.unredirected_hdrs, **self.headers} - return list(hdrs.items()) - -class OpenerDirector: - def __init__(self): - client_version = "Python-urllib/%s" % __version__ - self.addheaders = [('User-agent', client_version)] - # self.handlers is retained only for backward compatibility - self.handlers = [] - # manage the individual handlers - self.handle_open = {} - self.handle_error = {} - self.process_response = {} - self.process_request = {} - - def add_handler(self, handler): - if not hasattr(handler, "add_parent"): - raise TypeError("expected BaseHandler instance, got %r" % - type(handler)) - - added = False - for meth in dir(handler): - if meth in ["redirect_request", "do_open", "proxy_open"]: - # oops, coincidental match - continue - - i = meth.find("_") - protocol = meth[:i] - condition = meth[i+1:] - - if condition.startswith("error"): - j = condition.find("_") + i + 1 - kind = meth[j+1:] - try: - kind = int(kind) - except ValueError: - pass - lookup = self.handle_error.get(protocol, {}) - self.handle_error[protocol] = lookup - elif condition == "open": - kind = protocol - lookup = self.handle_open - elif condition == "response": - kind = protocol - lookup = self.process_response - elif condition == "request": - kind = protocol - lookup = self.process_request - else: - continue - - handlers = lookup.setdefault(kind, []) - if handlers: - bisect.insort(handlers, handler) - else: - handlers.append(handler) - added = True - - if added: - bisect.insort(self.handlers, handler) - handler.add_parent(self) - - def close(self): - # Only exists for backwards compatibility. - pass - - def _call_chain(self, chain, kind, meth_name, *args): - # Handlers raise an exception if no one else should try to handle - # the request, or return None if they can't but another handler - # could. Otherwise, they return the response. - handlers = chain.get(kind, ()) - for handler in handlers: - func = getattr(handler, meth_name) - result = func(*args) - if result is not None: - return result - - def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): - # accept a URL or a Request object - if isinstance(fullurl, str): - req = Request(fullurl, data) - else: - req = fullurl - if data is not None: - req.data = data - - req.timeout = timeout - protocol = req.type - - # pre-process request - meth_name = protocol+"_request" - for processor in self.process_request.get(protocol, []): - meth = getattr(processor, meth_name) - req = meth(req) - - sys.audit('urllib.Request', req.full_url, req.data, req.headers, req.get_method()) - response = self._open(req, data) - - # post-process response - meth_name = protocol+"_response" - for processor in self.process_response.get(protocol, []): - meth = getattr(processor, meth_name) - response = meth(req, response) - - return response - - def _open(self, req, data=None): - result = self._call_chain(self.handle_open, 'default', - 'default_open', req) - if result: - return result - - protocol = req.type - result = self._call_chain(self.handle_open, protocol, protocol + - '_open', req) - if result: - return result - - return self._call_chain(self.handle_open, 'unknown', - 'unknown_open', req) - - def error(self, proto, *args): - if proto in ('http', 'https'): - # XXX http[s] protocols are special-cased - dict = self.handle_error['http'] # https is not different than http - proto = args[2] # YUCK! - meth_name = 'http_error_%s' % proto - http_err = 1 - orig_args = args - else: - dict = self.handle_error - meth_name = proto + '_error' - http_err = 0 - args = (dict, proto, meth_name) + args - result = self._call_chain(*args) - if result: - return result - - if http_err: - args = (dict, 'default', 'http_error_default') + orig_args - return self._call_chain(*args) - -# XXX probably also want an abstract factory that knows when it makes -# sense to skip a superclass in favor of a subclass and when it might -# make sense to include both - -def build_opener(*handlers): - """Create an opener object from a list of handlers. - - The opener will use several default handlers, including support - for HTTP, FTP and when applicable HTTPS. - - If any of the handlers passed as arguments are subclasses of the - default handlers, the default handlers will not be used. - """ - opener = OpenerDirector() - default_classes = [ProxyHandler, UnknownHandler, HTTPHandler, - HTTPDefaultErrorHandler, HTTPRedirectHandler, - FTPHandler, FileHandler, HTTPErrorProcessor, - DataHandler] - if hasattr(http.client, "HTTPSConnection"): - default_classes.append(HTTPSHandler) - skip = set() - for klass in default_classes: - for check in handlers: - if isinstance(check, type): - if issubclass(check, klass): - skip.add(klass) - elif isinstance(check, klass): - skip.add(klass) - for klass in skip: - default_classes.remove(klass) - - for klass in default_classes: - opener.add_handler(klass()) - - for h in handlers: - if isinstance(h, type): - h = h() - opener.add_handler(h) - return opener - -class BaseHandler: - handler_order = 500 - - def add_parent(self, parent): - self.parent = parent - - def close(self): - # Only exists for backwards compatibility - pass - - def __lt__(self, other): - if not hasattr(other, "handler_order"): - # Try to preserve the old behavior of having custom classes - # inserted after default ones (works only for custom user - # classes which are not aware of handler_order). - return True - return self.handler_order < other.handler_order - - -class HTTPErrorProcessor(BaseHandler): - """Process HTTP error responses.""" - handler_order = 1000 # after all other processing - - def http_response(self, request, response): - code, msg, hdrs = response.code, response.msg, response.info() - - # According to RFC 2616, "2xx" code indicates that the client's - # request was successfully received, understood, and accepted. - if not (200 <= code < 300): - response = self.parent.error( - 'http', request, response, code, msg, hdrs) - - return response - - https_response = http_response - -class HTTPDefaultErrorHandler(BaseHandler): - def http_error_default(self, req, fp, code, msg, hdrs): - raise HTTPError(req.full_url, code, msg, hdrs, fp) - -class HTTPRedirectHandler(BaseHandler): - # maximum number of redirections to any single URL - # this is needed because of the state that cookies introduce - max_repeats = 4 - # maximum total number of redirections (regardless of URL) before - # assuming we're in a loop - max_redirections = 10 - - def redirect_request(self, req, fp, code, msg, headers, newurl): - """Return a Request or None in response to a redirect. - - This is called by the http_error_30x methods when a - redirection response is received. If a redirection should - take place, return a new Request to allow http_error_30x to - perform the redirect. Otherwise, raise HTTPError if no-one - else should try to handle this url. Return None if you can't - but another Handler might. - """ - m = req.get_method() - if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD") - or code in (301, 302, 303) and m == "POST")): - raise HTTPError(req.full_url, code, msg, headers, fp) - - # Strictly (according to RFC 2616), 301 or 302 in response to - # a POST MUST NOT cause a redirection without confirmation - # from the user (of urllib.request, in this case). In practice, - # essentially all clients do redirect in this case, so we do - # the same. - - # Be conciliant with URIs containing a space. This is mainly - # redundant with the more complete encoding done in http_error_302(), - # but it is kept for compatibility with other callers. - newurl = newurl.replace(' ', '%20') - - CONTENT_HEADERS = ("content-length", "content-type") - newheaders = {k: v for k, v in req.headers.items() - if k.lower() not in CONTENT_HEADERS} - return Request(newurl, - method="HEAD" if m == "HEAD" else "GET", - headers=newheaders, - origin_req_host=req.origin_req_host, - unverifiable=True) - - # Implementation note: To avoid the server sending us into an - # infinite loop, the request object needs to track what URLs we - # have already seen. Do this by adding a handler-specific - # attribute to the Request object. - def http_error_302(self, req, fp, code, msg, headers): - # Some servers (incorrectly) return multiple Location headers - # (so probably same goes for URI). Use first header. - if "location" in headers: - newurl = headers["location"] - elif "uri" in headers: - newurl = headers["uri"] - else: - return - - # fix a possible malformed URL - urlparts = urlparse(newurl) - - # For security reasons we don't allow redirection to anything other - # than http, https or ftp. - - if urlparts.scheme not in ('http', 'https', 'ftp', ''): - raise HTTPError( - newurl, code, - "%s - Redirection to url '%s' is not allowed" % (msg, newurl), - headers, fp) - - if not urlparts.path and urlparts.netloc: - urlparts = list(urlparts) - urlparts[2] = "/" - newurl = urlunparse(urlparts) - - # http.client.parse_headers() decodes as ISO-8859-1. Recover the - # original bytes and percent-encode non-ASCII bytes, and any special - # characters such as the space. - newurl = quote( - newurl, encoding="iso-8859-1", safe=string.punctuation) - newurl = urljoin(req.full_url, newurl) - - # XXX Probably want to forget about the state of the current - # request, although that might interact poorly with other - # handlers that also use handler-specific request attributes - new = self.redirect_request(req, fp, code, msg, headers, newurl) - if new is None: - return - - # loop detection - # .redirect_dict has a key url if url was previously visited. - if hasattr(req, 'redirect_dict'): - visited = new.redirect_dict = req.redirect_dict - if (visited.get(newurl, 0) >= self.max_repeats or - len(visited) >= self.max_redirections): - raise HTTPError(req.full_url, code, - self.inf_msg + msg, headers, fp) - else: - visited = new.redirect_dict = req.redirect_dict = {} - visited[newurl] = visited.get(newurl, 0) + 1 - - # Don't close the fp until we are sure that we won't use it - # with HTTPError. - fp.read() - fp.close() - - return self.parent.open(new, timeout=req.timeout) - - http_error_301 = http_error_303 = http_error_307 = http_error_308 = http_error_302 - - inf_msg = "The HTTP server returned a redirect error that would " \ - "lead to an infinite loop.\n" \ - "The last 30x error message was:\n" - - -def _parse_proxy(proxy): - """Return (scheme, user, password, host/port) given a URL or an authority. - - If a URL is supplied, it must have an authority (host:port) component. - According to RFC 3986, having an authority component means the URL must - have two slashes after the scheme. - """ - scheme, r_scheme = _splittype(proxy) - if not r_scheme.startswith("/"): - # authority - scheme = None - authority = proxy - else: - # URL - if not r_scheme.startswith("//"): - raise ValueError("proxy URL with no authority: %r" % proxy) - # We have an authority, so for RFC 3986-compliant URLs (by ss 3. - # and 3.3.), path is empty or starts with '/' - if '@' in r_scheme: - host_separator = r_scheme.find('@') - end = r_scheme.find("/", host_separator) - else: - end = r_scheme.find("/", 2) - if end == -1: - end = None - authority = r_scheme[2:end] - userinfo, hostport = _splituser(authority) - if userinfo is not None: - user, password = _splitpasswd(userinfo) - else: - user = password = None - return scheme, user, password, hostport - -class ProxyHandler(BaseHandler): - # Proxies must be in front - handler_order = 100 - - def __init__(self, proxies=None): - if proxies is None: - proxies = getproxies() - assert hasattr(proxies, 'keys'), "proxies must be a mapping" - self.proxies = proxies - for type, url in proxies.items(): - type = type.lower() - setattr(self, '%s_open' % type, - lambda r, proxy=url, type=type, meth=self.proxy_open: - meth(r, proxy, type)) - - def proxy_open(self, req, proxy, type): - orig_type = req.type - proxy_type, user, password, hostport = _parse_proxy(proxy) - if proxy_type is None: - proxy_type = orig_type - - if req.host and proxy_bypass(req.host): - return None - - if user and password: - user_pass = '%s:%s' % (unquote(user), - unquote(password)) - creds = base64.b64encode(user_pass.encode()).decode("ascii") - req.add_header('Proxy-authorization', 'Basic ' + creds) - hostport = unquote(hostport) - req.set_proxy(hostport, proxy_type) - if orig_type == proxy_type or orig_type == 'https': - # let other handlers take care of it - return None - else: - # need to start over, because the other handlers don't - # grok the proxy's URL type - # e.g. if we have a constructor arg proxies like so: - # {'http': 'ftp://proxy.example.com'}, we may end up turning - # a request for http://acme.example.com/a into one for - # ftp://proxy.example.com/a - return self.parent.open(req, timeout=req.timeout) - -class HTTPPasswordMgr: - - def __init__(self): - self.passwd = {} - - def add_password(self, realm, uri, user, passwd): - # uri could be a single URI or a sequence - if isinstance(uri, str): - uri = [uri] - if realm not in self.passwd: - self.passwd[realm] = {} - for default_port in True, False: - reduced_uri = tuple( - self.reduce_uri(u, default_port) for u in uri) - self.passwd[realm][reduced_uri] = (user, passwd) - - def find_user_password(self, realm, authuri): - domains = self.passwd.get(realm, {}) - for default_port in True, False: - reduced_authuri = self.reduce_uri(authuri, default_port) - for uris, authinfo in domains.items(): - for uri in uris: - if self.is_suburi(uri, reduced_authuri): - return authinfo - return None, None - - def reduce_uri(self, uri, default_port=True): - """Accept authority or URI and extract only the authority and path.""" - # note HTTP URLs do not have a userinfo component - parts = urlsplit(uri) - if parts[1]: - # URI - scheme = parts[0] - authority = parts[1] - path = parts[2] or '/' - else: - # host or host:port - scheme = None - authority = uri - path = '/' - host, port = _splitport(authority) - if default_port and port is None and scheme is not None: - dport = {"http": 80, - "https": 443, - }.get(scheme) - if dport is not None: - authority = "%s:%d" % (host, dport) - return authority, path - - def is_suburi(self, base, test): - """Check if test is below base in a URI tree - - Both args must be URIs in reduced form. - """ - if base == test: - return True - if base[0] != test[0]: - return False - prefix = base[1] - if prefix[-1:] != '/': - prefix += '/' - return test[1].startswith(prefix) - - -class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr): - - def find_user_password(self, realm, authuri): - user, password = HTTPPasswordMgr.find_user_password(self, realm, - authuri) - if user is not None: - return user, password - return HTTPPasswordMgr.find_user_password(self, None, authuri) - - -class HTTPPasswordMgrWithPriorAuth(HTTPPasswordMgrWithDefaultRealm): - - def __init__(self): - self.authenticated = {} - super().__init__() - - def add_password(self, realm, uri, user, passwd, is_authenticated=False): - self.update_authenticated(uri, is_authenticated) - # Add a default for prior auth requests - if realm is not None: - super().add_password(None, uri, user, passwd) - super().add_password(realm, uri, user, passwd) - - def update_authenticated(self, uri, is_authenticated=False): - # uri could be a single URI or a sequence - if isinstance(uri, str): - uri = [uri] - - for default_port in True, False: - for u in uri: - reduced_uri = self.reduce_uri(u, default_port) - self.authenticated[reduced_uri] = is_authenticated - - def is_authenticated(self, authuri): - for default_port in True, False: - reduced_authuri = self.reduce_uri(authuri, default_port) - for uri in self.authenticated: - if self.is_suburi(uri, reduced_authuri): - return self.authenticated[uri] - - -class AbstractBasicAuthHandler: - - # XXX this allows for multiple auth-schemes, but will stupidly pick - # the last one with a realm specified. - - # allow for double- and single-quoted realm values - # (single quotes are a violation of the RFC, but appear in the wild) - rx = re.compile('(?:^|,)' # start of the string or ',' - '[ \t]*' # optional whitespaces - '([^ \t,]+)' # scheme like "Basic" - '[ \t]+' # mandatory whitespaces - # realm=xxx - # realm='xxx' - # realm="xxx" - 'realm=(["\']?)([^"\']*)\\2', - re.I) - - # XXX could pre-emptively send auth info already accepted (RFC 2617, - # end of section 2, and section 1.2 immediately after "credentials" - # production). - - def __init__(self, password_mgr=None): - if password_mgr is None: - password_mgr = HTTPPasswordMgr() - self.passwd = password_mgr - self.add_password = self.passwd.add_password - - def _parse_realm(self, header): - # parse WWW-Authenticate header: accept multiple challenges per header - found_challenge = False - for mo in AbstractBasicAuthHandler.rx.finditer(header): - scheme, quote, realm = mo.groups() - if quote not in ['"', "'"]: - warnings.warn("Basic Auth Realm was unquoted", - UserWarning, 3) - - yield (scheme, realm) - - found_challenge = True - - if not found_challenge: - if header: - scheme = header.split()[0] - else: - scheme = '' - yield (scheme, None) - - def http_error_auth_reqed(self, authreq, host, req, headers): - # host may be an authority (without userinfo) or a URL with an - # authority - headers = headers.get_all(authreq) - if not headers: - # no header found - return - - unsupported = None - for header in headers: - for scheme, realm in self._parse_realm(header): - if scheme.lower() != 'basic': - unsupported = scheme - continue - - if realm is not None: - # Use the first matching Basic challenge. - # Ignore following challenges even if they use the Basic - # scheme. - return self.retry_http_basic_auth(host, req, realm) - - if unsupported is not None: - raise ValueError("AbstractBasicAuthHandler does not " - "support the following scheme: %r" - % (scheme,)) - - def retry_http_basic_auth(self, host, req, realm): - user, pw = self.passwd.find_user_password(realm, host) - if pw is not None: - raw = "%s:%s" % (user, pw) - auth = "Basic " + base64.b64encode(raw.encode()).decode("ascii") - if req.get_header(self.auth_header, None) == auth: - return None - req.add_unredirected_header(self.auth_header, auth) - return self.parent.open(req, timeout=req.timeout) - else: - return None - - def http_request(self, req): - if (not hasattr(self.passwd, 'is_authenticated') or - not self.passwd.is_authenticated(req.full_url)): - return req - - if not req.has_header('Authorization'): - user, passwd = self.passwd.find_user_password(None, req.full_url) - credentials = '{0}:{1}'.format(user, passwd).encode() - auth_str = base64.standard_b64encode(credentials).decode() - req.add_unredirected_header('Authorization', - 'Basic {}'.format(auth_str.strip())) - return req - - def http_response(self, req, response): - if hasattr(self.passwd, 'is_authenticated'): - if 200 <= response.code < 300: - self.passwd.update_authenticated(req.full_url, True) - else: - self.passwd.update_authenticated(req.full_url, False) - return response - - https_request = http_request - https_response = http_response - - - -class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): - - auth_header = 'Authorization' - - def http_error_401(self, req, fp, code, msg, headers): - url = req.full_url - response = self.http_error_auth_reqed('www-authenticate', - url, req, headers) - return response - - -class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): - - auth_header = 'Proxy-authorization' - - def http_error_407(self, req, fp, code, msg, headers): - # http_error_auth_reqed requires that there is no userinfo component in - # authority. Assume there isn't one, since urllib.request does not (and - # should not, RFC 3986 s. 3.2.1) support requests for URLs containing - # userinfo. - authority = req.host - response = self.http_error_auth_reqed('proxy-authenticate', - authority, req, headers) - return response - - -# Return n random bytes. -_randombytes = os.urandom - - -class AbstractDigestAuthHandler: - # Digest authentication is specified in RFC 2617. - - # XXX The client does not inspect the Authentication-Info header - # in a successful response. - - # XXX It should be possible to test this implementation against - # a mock server that just generates a static set of challenges. - - # XXX qop="auth-int" supports is shaky - - def __init__(self, passwd=None): - if passwd is None: - passwd = HTTPPasswordMgr() - self.passwd = passwd - self.add_password = self.passwd.add_password - self.retried = 0 - self.nonce_count = 0 - self.last_nonce = None - - def reset_retry_count(self): - self.retried = 0 - - def http_error_auth_reqed(self, auth_header, host, req, headers): - authreq = headers.get(auth_header, None) - if self.retried > 5: - # Don't fail endlessly - if we failed once, we'll probably - # fail a second time. Hm. Unless the Password Manager is - # prompting for the information. Crap. This isn't great - # but it's better than the current 'repeat until recursion - # depth exceeded' approach - raise HTTPError(req.full_url, 401, "digest auth failed", - headers, None) - else: - self.retried += 1 - if authreq: - scheme = authreq.split()[0] - if scheme.lower() == 'digest': - return self.retry_http_digest_auth(req, authreq) - elif scheme.lower() != 'basic': - raise ValueError("AbstractDigestAuthHandler does not support" - " the following scheme: '%s'" % scheme) - - def retry_http_digest_auth(self, req, auth): - token, challenge = auth.split(' ', 1) - chal = parse_keqv_list(filter(None, parse_http_list(challenge))) - auth = self.get_authorization(req, chal) - if auth: - auth_val = 'Digest %s' % auth - if req.headers.get(self.auth_header, None) == auth_val: - return None - req.add_unredirected_header(self.auth_header, auth_val) - resp = self.parent.open(req, timeout=req.timeout) - return resp - - def get_cnonce(self, nonce): - # The cnonce-value is an opaque - # quoted string value provided by the client and used by both client - # and server to avoid chosen plaintext attacks, to provide mutual - # authentication, and to provide some message integrity protection. - # This isn't a fabulous effort, but it's probably Good Enough. - s = "%s:%s:%s:" % (self.nonce_count, nonce, time.ctime()) - b = s.encode("ascii") + _randombytes(8) - dig = hashlib.sha1(b).hexdigest() - return dig[:16] - - def get_authorization(self, req, chal): - try: - realm = chal['realm'] - nonce = chal['nonce'] - qop = chal.get('qop') - algorithm = chal.get('algorithm', 'MD5') - # mod_digest doesn't send an opaque, even though it isn't - # supposed to be optional - opaque = chal.get('opaque', None) - except KeyError: - return None - - H, KD = self.get_algorithm_impls(algorithm) - if H is None: - return None - - user, pw = self.passwd.find_user_password(realm, req.full_url) - if user is None: - return None - - # XXX not implemented yet - if req.data is not None: - entdig = self.get_entity_digest(req.data, chal) - else: - entdig = None - - A1 = "%s:%s:%s" % (user, realm, pw) - A2 = "%s:%s" % (req.get_method(), - # XXX selector: what about proxies and full urls - req.selector) - # NOTE: As per RFC 2617, when server sends "auth,auth-int", the client could use either `auth` - # or `auth-int` to the response back. we use `auth` to send the response back. - if qop is None: - respdig = KD(H(A1), "%s:%s" % (nonce, H(A2))) - elif 'auth' in qop.split(','): - if nonce == self.last_nonce: - self.nonce_count += 1 - else: - self.nonce_count = 1 - self.last_nonce = nonce - ncvalue = '%08x' % self.nonce_count - cnonce = self.get_cnonce(nonce) - noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, 'auth', H(A2)) - respdig = KD(H(A1), noncebit) - else: - # XXX handle auth-int. - raise URLError("qop '%s' is not supported." % qop) - - # XXX should the partial digests be encoded too? - - base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ - 'response="%s"' % (user, realm, nonce, req.selector, - respdig) - if opaque: - base += ', opaque="%s"' % opaque - if entdig: - base += ', digest="%s"' % entdig - base += ', algorithm="%s"' % algorithm - if qop: - base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce) - return base - - def get_algorithm_impls(self, algorithm): - # lambdas assume digest modules are imported at the top level - if algorithm == 'MD5': - H = lambda x: hashlib.md5(x.encode("ascii")).hexdigest() - elif algorithm == 'SHA': - H = lambda x: hashlib.sha1(x.encode("ascii")).hexdigest() - # XXX MD5-sess - else: - raise ValueError("Unsupported digest authentication " - "algorithm %r" % algorithm) - KD = lambda s, d: H("%s:%s" % (s, d)) - return H, KD - - def get_entity_digest(self, data, chal): - # XXX not implemented yet - return None - - -class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): - """An authentication protocol defined by RFC 2069 - - Digest authentication improves on basic authentication because it - does not transmit passwords in the clear. - """ - - auth_header = 'Authorization' - handler_order = 490 # before Basic auth - - def http_error_401(self, req, fp, code, msg, headers): - host = urlparse(req.full_url)[1] - retry = self.http_error_auth_reqed('www-authenticate', - host, req, headers) - self.reset_retry_count() - return retry - - -class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): - - auth_header = 'Proxy-Authorization' - handler_order = 490 # before Basic auth - - def http_error_407(self, req, fp, code, msg, headers): - host = req.host - retry = self.http_error_auth_reqed('proxy-authenticate', - host, req, headers) - self.reset_retry_count() - return retry - -class AbstractHTTPHandler(BaseHandler): - - def __init__(self, debuglevel=None): - self._debuglevel = debuglevel if debuglevel is not None else http.client.HTTPConnection.debuglevel - - def set_http_debuglevel(self, level): - self._debuglevel = level - - def _get_content_length(self, request): - return http.client.HTTPConnection._get_content_length( - request.data, - request.get_method()) - - def do_request_(self, request): - host = request.host - if not host: - raise URLError('no host given') - - if request.data is not None: # POST - data = request.data - if isinstance(data, str): - msg = "POST data should be bytes, an iterable of bytes, " \ - "or a file object. It cannot be of type str." - raise TypeError(msg) - if not request.has_header('Content-type'): - request.add_unredirected_header( - 'Content-type', - 'application/x-www-form-urlencoded') - if (not request.has_header('Content-length') - and not request.has_header('Transfer-encoding')): - content_length = self._get_content_length(request) - if content_length is not None: - request.add_unredirected_header( - 'Content-length', str(content_length)) - else: - request.add_unredirected_header( - 'Transfer-encoding', 'chunked') - - sel_host = host - if request.has_proxy(): - scheme, sel = _splittype(request.selector) - sel_host, sel_path = _splithost(sel) - if not request.has_header('Host'): - request.add_unredirected_header('Host', sel_host) - for name, value in self.parent.addheaders: - name = name.capitalize() - if not request.has_header(name): - request.add_unredirected_header(name, value) - - return request - - def do_open(self, http_class, req, **http_conn_args): - """Return an HTTPResponse object for the request, using http_class. - - http_class must implement the HTTPConnection API from http.client. - """ - host = req.host - if not host: - raise URLError('no host given') - - # will parse host:port - h = http_class(host, timeout=req.timeout, **http_conn_args) - h.set_debuglevel(self._debuglevel) - - headers = dict(req.unredirected_hdrs) - headers.update({k: v for k, v in req.headers.items() - if k not in headers}) - - # TODO(jhylton): Should this be redesigned to handle - # persistent connections? - - # We want to make an HTTP/1.1 request, but the addinfourl - # class isn't prepared to deal with a persistent connection. - # It will try to read all remaining data from the socket, - # which will block while the server waits for the next request. - # So make sure the connection gets closed after the (only) - # request. - headers["Connection"] = "close" - headers = {name.title(): val for name, val in headers.items()} - - if req._tunnel_host: - tunnel_headers = {} - proxy_auth_hdr = "Proxy-Authorization" - if proxy_auth_hdr in headers: - tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr] - # Proxy-Authorization should not be sent to origin - # server. - del headers[proxy_auth_hdr] - h.set_tunnel(req._tunnel_host, headers=tunnel_headers) - - try: - try: - h.request(req.get_method(), req.selector, req.data, headers, - encode_chunked=req.has_header('Transfer-encoding')) - except OSError as err: # timeout error - raise URLError(err) - r = h.getresponse() - except: - h.close() - raise - - # If the server does not send us a 'Connection: close' header, - # HTTPConnection assumes the socket should be left open. Manually - # mark the socket to be closed when this response object goes away. - if h.sock: - h.sock.close() - h.sock = None - - r.url = req.get_full_url() - # This line replaces the .msg attribute of the HTTPResponse - # with .headers, because urllib clients expect the response to - # have the reason in .msg. It would be good to mark this - # attribute is deprecated and get then to use info() or - # .headers. - r.msg = r.reason - return r - - -class HTTPHandler(AbstractHTTPHandler): - - def http_open(self, req): - return self.do_open(http.client.HTTPConnection, req) - - http_request = AbstractHTTPHandler.do_request_ - -if hasattr(http.client, 'HTTPSConnection'): - - class HTTPSHandler(AbstractHTTPHandler): - - def __init__(self, debuglevel=None, context=None, check_hostname=None): - debuglevel = debuglevel if debuglevel is not None else http.client.HTTPSConnection.debuglevel - AbstractHTTPHandler.__init__(self, debuglevel) - if context is None: - http_version = http.client.HTTPSConnection._http_vsn - context = http.client._create_https_context(http_version) - if check_hostname is not None: - context.check_hostname = check_hostname - self._context = context - - def https_open(self, req): - return self.do_open(http.client.HTTPSConnection, req, - context=self._context) - - https_request = AbstractHTTPHandler.do_request_ - - __all__.append('HTTPSHandler') - -class HTTPCookieProcessor(BaseHandler): - def __init__(self, cookiejar=None): - import http.cookiejar - if cookiejar is None: - cookiejar = http.cookiejar.CookieJar() - self.cookiejar = cookiejar - - def http_request(self, request): - self.cookiejar.add_cookie_header(request) - return request - - def http_response(self, request, response): - self.cookiejar.extract_cookies(response, request) - return response - - https_request = http_request - https_response = http_response - -class UnknownHandler(BaseHandler): - def unknown_open(self, req): - type = req.type - raise URLError('unknown url type: %s' % type) - -def parse_keqv_list(l): - """Parse list of key=value strings where keys are not duplicated.""" - parsed = {} - for elt in l: - k, v = elt.split('=', 1) - if v[0] == '"' and v[-1] == '"': - v = v[1:-1] - parsed[k] = v - return parsed - -def parse_http_list(s): - """Parse lists as described by RFC 2068 Section 2. - - In particular, parse comma-separated lists where the elements of - the list may include quoted-strings. A quoted-string could - contain a comma. A non-quoted string could have quotes in the - middle. Neither commas nor quotes count if they are escaped. - Only double-quotes count, not single-quotes. - """ - res = [] - part = '' - - escape = quote = False - for cur in s: - if escape: - part += cur - escape = False - continue - if quote: - if cur == '\\': - escape = True - continue - elif cur == '"': - quote = False - part += cur - continue - - if cur == ',': - res.append(part) - part = '' - continue - - if cur == '"': - quote = True - - part += cur - - # append last part - if part: - res.append(part) - - return [part.strip() for part in res] - -class FileHandler(BaseHandler): - # Use local file or FTP depending on form of URL - def file_open(self, req): - url = req.selector - if url[:2] == '//' and url[2:3] != '/' and (req.host and - req.host != 'localhost'): - if not req.host in self.get_names(): - raise URLError("file:// scheme is supported only on localhost") - else: - return self.open_local_file(req) - - # names for the localhost - names = None - def get_names(self): - if FileHandler.names is None: - try: - FileHandler.names = tuple( - socket.gethostbyname_ex('localhost')[2] + - socket.gethostbyname_ex(socket.gethostname())[2]) - except socket.gaierror: - FileHandler.names = (socket.gethostbyname('localhost'),) - return FileHandler.names - - # not entirely sure what the rules are here - def open_local_file(self, req): - import email.utils - import mimetypes - host = req.host - filename = req.selector - localfile = url2pathname(filename) - try: - stats = os.stat(localfile) - size = stats.st_size - modified = email.utils.formatdate(stats.st_mtime, usegmt=True) - mtype = mimetypes.guess_type(filename)[0] - headers = email.message_from_string( - 'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' % - (mtype or 'text/plain', size, modified)) - if host: - host, port = _splitport(host) - if not host or \ - (not port and _safe_gethostbyname(host) in self.get_names()): - if host: - origurl = 'file://' + host + filename - else: - origurl = 'file://' + filename - return addinfourl(open(localfile, 'rb'), headers, origurl) - except OSError as exp: - raise URLError(exp) - raise URLError('file not on local host') - -def _safe_gethostbyname(host): - try: - return socket.gethostbyname(host) - except socket.gaierror: - return None - -class FTPHandler(BaseHandler): - def ftp_open(self, req): - import ftplib - import mimetypes - host = req.host - if not host: - raise URLError('ftp error: no host given') - host, port = _splitport(host) - if port is None: - port = ftplib.FTP_PORT - else: - port = int(port) - - # username/password handling - user, host = _splituser(host) - if user: - user, passwd = _splitpasswd(user) - else: - passwd = None - host = unquote(host) - user = user or '' - passwd = passwd or '' - - try: - host = socket.gethostbyname(host) - except OSError as msg: - raise URLError(msg) - path, attrs = _splitattr(req.selector) - dirs = path.split('/') - dirs = list(map(unquote, dirs)) - dirs, file = dirs[:-1], dirs[-1] - if dirs and not dirs[0]: - dirs = dirs[1:] - fw = None - try: - fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout) - type = file and 'I' or 'D' - for attr in attrs: - attr, value = _splitvalue(attr) - if attr.lower() == 'type' and \ - value in ('a', 'A', 'i', 'I', 'd', 'D'): - type = value.upper() - fp, retrlen = fw.retrfile(file, type) - headers = "" - mtype = mimetypes.guess_type(req.full_url)[0] - if mtype: - headers += "Content-type: %s\n" % mtype - if retrlen is not None and retrlen >= 0: - headers += "Content-length: %d\n" % retrlen - headers = email.message_from_string(headers) - return addinfourl(fp, headers, req.full_url) - except Exception as exp: - if fw is not None and not fw.keepalive: - fw.close() - if isinstance(exp, ftplib.all_errors): - raise URLError(exp) from exp - raise - - def connect_ftp(self, user, passwd, host, port, dirs, timeout): - return ftpwrapper(user, passwd, host, port, dirs, timeout, - persistent=False) - -class CacheFTPHandler(FTPHandler): - # XXX would be nice to have pluggable cache strategies - # XXX this stuff is definitely not thread safe - def __init__(self): - self.cache = {} - self.timeout = {} - self.soonest = 0 - self.delay = 60 - self.max_conns = 16 - - def setTimeout(self, t): - self.delay = t - - def setMaxConns(self, m): - self.max_conns = m - - def connect_ftp(self, user, passwd, host, port, dirs, timeout): - key = user, host, port, '/'.join(dirs), timeout - conn = self.cache.get(key) - if conn is None or not conn.keepalive: - if conn is not None: - conn.close() - conn = self.cache[key] = ftpwrapper(user, passwd, host, port, - dirs, timeout) - self.timeout[key] = time.time() + self.delay - self.check_cache() - return conn - - def check_cache(self): - # first check for old ones - t = time.time() - if self.soonest <= t: - for k, v in list(self.timeout.items()): - if v < t: - self.cache[k].close() - del self.cache[k] - del self.timeout[k] - self.soonest = min(list(self.timeout.values())) - - # then check the size - if len(self.cache) == self.max_conns: - for k, v in list(self.timeout.items()): - if v == self.soonest: - del self.cache[k] - del self.timeout[k] - break - self.soonest = min(list(self.timeout.values())) - - def clear_cache(self): - for conn in self.cache.values(): - conn.close() - self.cache.clear() - self.timeout.clear() - -class DataHandler(BaseHandler): - def data_open(self, req): - # data URLs as specified in RFC 2397. - # - # ignores POSTed data - # - # syntax: - # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data - # mediatype := [ type "/" subtype ] *( ";" parameter ) - # data := *urlchar - # parameter := attribute "=" value - url = req.full_url - - scheme, data = url.split(":",1) - mediatype, data = data.split(",",1) - - # Disallow control characters within mediatype. - if re.search(r"[\x00-\x1F\x7F]", mediatype): - raise ValueError( - "Control characters not allowed in data: mediatype") - - # even base64 encoded data URLs might be quoted so unquote in any case: - data = unquote_to_bytes(data) - if mediatype.endswith(";base64"): - data = base64.decodebytes(data) - mediatype = mediatype[:-7] - - if not mediatype: - mediatype = "text/plain;charset=US-ASCII" - - headers = email.message_from_string("Content-type: %s\nContent-length: %d\n" % - (mediatype, len(data))) - - return addinfourl(io.BytesIO(data), headers, url) - - -# Code move from the old urllib module - -MAXFTPCACHE = 10 # Trim the ftp cache beyond this size - -# Helper for non-unix systems -if os.name == 'nt': - from nturl2path import url2pathname, pathname2url -else: - def url2pathname(pathname): - """OS-specific conversion from a relative URL of the 'file' scheme - to a file system path; not recommended for general use.""" - if pathname[:3] == '///': - # URL has an empty authority section, so the path begins on the - # third character. - pathname = pathname[2:] - elif pathname[:12] == '//localhost/': - # Skip past 'localhost' authority. - pathname = pathname[11:] - encoding = sys.getfilesystemencoding() - errors = sys.getfilesystemencodeerrors() - return unquote(pathname, encoding=encoding, errors=errors) - - def pathname2url(pathname): - """OS-specific conversion from a file system path to a relative URL - of the 'file' scheme; not recommended for general use.""" - if pathname[:2] == '//': - # Add explicitly empty authority to avoid interpreting the path - # as authority. - pathname = '//' + pathname - encoding = sys.getfilesystemencoding() - errors = sys.getfilesystemencodeerrors() - return quote(pathname, encoding=encoding, errors=errors) - - -ftpcache = {} - - -class URLopener: - """Class to open URLs. - This is a class rather than just a subroutine because we may need - more than one set of global protocol-specific options. - Note -- this is a base class for those who don't want the - automatic handling of errors type 302 (relocated) and 401 - (authorization needed).""" - - __tempfiles = None - - version = "Python-urllib/%s" % __version__ - - # Constructor - def __init__(self, proxies=None, **x509): - msg = "%(class)s style of invoking requests is deprecated. " \ - "Use newer urlopen functions/methods" % {'class': self.__class__.__name__} - warnings.warn(msg, DeprecationWarning, stacklevel=3) - if proxies is None: - proxies = getproxies() - assert hasattr(proxies, 'keys'), "proxies must be a mapping" - self.proxies = proxies - self.key_file = x509.get('key_file') - self.cert_file = x509.get('cert_file') - self.addheaders = [('User-Agent', self.version), ('Accept', '*/*')] - self.__tempfiles = [] - self.__unlink = os.unlink # See cleanup() - self.tempcache = None - # Undocumented feature: if you assign {} to tempcache, - # it is used to cache files retrieved with - # self.retrieve(). This is not enabled by default - # since it does not work for changing documents (and I - # haven't got the logic to check expiration headers - # yet). - self.ftpcache = ftpcache - # Undocumented feature: you can use a different - # ftp cache by assigning to the .ftpcache member; - # in case you want logically independent URL openers - # XXX This is not threadsafe. Bah. - - def __del__(self): - self.close() - - def close(self): - self.cleanup() - - def cleanup(self): - # This code sometimes runs when the rest of this module - # has already been deleted, so it can't use any globals - # or import anything. - if self.__tempfiles: - for file in self.__tempfiles: - try: - self.__unlink(file) - except OSError: - pass - del self.__tempfiles[:] - if self.tempcache: - self.tempcache.clear() - - def addheader(self, *args): - """Add a header to be used by the HTTP interface only - e.g. u.addheader('Accept', 'sound/basic')""" - self.addheaders.append(args) - - # External interface - def open(self, fullurl, data=None): - """Use URLopener().open(file) instead of open(file, 'r').""" - fullurl = unwrap(_to_bytes(fullurl)) - fullurl = quote(fullurl, safe="%/:=&?~#+!$,;'@()*[]|") - if self.tempcache and fullurl in self.tempcache: - filename, headers = self.tempcache[fullurl] - fp = open(filename, 'rb') - return addinfourl(fp, headers, fullurl) - urltype, url = _splittype(fullurl) - if not urltype: - urltype = 'file' - if urltype in self.proxies: - proxy = self.proxies[urltype] - urltype, proxyhost = _splittype(proxy) - host, selector = _splithost(proxyhost) - url = (host, fullurl) # Signal special case to open_*() - else: - proxy = None - name = 'open_' + urltype - self.type = urltype - name = name.replace('-', '_') - if not hasattr(self, name) or name == 'open_local_file': - if proxy: - return self.open_unknown_proxy(proxy, fullurl, data) - else: - return self.open_unknown(fullurl, data) - try: - if data is None: - return getattr(self, name)(url) - else: - return getattr(self, name)(url, data) - except (HTTPError, URLError): - raise - except OSError as msg: - raise OSError('socket error', msg) from msg - - def open_unknown(self, fullurl, data=None): - """Overridable interface to open unknown URL type.""" - type, url = _splittype(fullurl) - raise OSError('url error', 'unknown url type', type) - - def open_unknown_proxy(self, proxy, fullurl, data=None): - """Overridable interface to open unknown URL type.""" - type, url = _splittype(fullurl) - raise OSError('url error', 'invalid proxy for %s' % type, proxy) - - # External interface - def retrieve(self, url, filename=None, reporthook=None, data=None): - """retrieve(url) returns (filename, headers) for a local object - or (tempfilename, headers) for a remote object.""" - url = unwrap(_to_bytes(url)) - if self.tempcache and url in self.tempcache: - return self.tempcache[url] - type, url1 = _splittype(url) - if filename is None and (not type or type == 'file'): - try: - fp = self.open_local_file(url1) - hdrs = fp.info() - fp.close() - return url2pathname(_splithost(url1)[1]), hdrs - except OSError: - pass - fp = self.open(url, data) - try: - headers = fp.info() - if filename: - tfp = open(filename, 'wb') - else: - garbage, path = _splittype(url) - garbage, path = _splithost(path or "") - path, garbage = _splitquery(path or "") - path, garbage = _splitattr(path or "") - suffix = os.path.splitext(path)[1] - (fd, filename) = tempfile.mkstemp(suffix) - self.__tempfiles.append(filename) - tfp = os.fdopen(fd, 'wb') - try: - result = filename, headers - if self.tempcache is not None: - self.tempcache[url] = result - bs = 1024*8 - size = -1 - read = 0 - blocknum = 0 - if "content-length" in headers: - size = int(headers["Content-Length"]) - if reporthook: - reporthook(blocknum, bs, size) - while block := fp.read(bs): - read += len(block) - tfp.write(block) - blocknum += 1 - if reporthook: - reporthook(blocknum, bs, size) - finally: - tfp.close() - finally: - fp.close() - - # raise exception if actual size does not match content-length header - if size >= 0 and read < size: - raise ContentTooShortError( - "retrieval incomplete: got only %i out of %i bytes" - % (read, size), result) - - return result - - # Each method named open_ knows how to open that type of URL - - def _open_generic_http(self, connection_factory, url, data): - """Make an HTTP connection using connection_class. - - This is an internal method that should be called from - open_http() or open_https(). - - Arguments: - - connection_factory should take a host name and return an - HTTPConnection instance. - - url is the url to retrieval or a host, relative-path pair. - - data is payload for a POST request or None. - """ - - user_passwd = None - proxy_passwd= None - if isinstance(url, str): - host, selector = _splithost(url) - if host: - user_passwd, host = _splituser(host) - host = unquote(host) - realhost = host - else: - host, selector = url - # check whether the proxy contains authorization information - proxy_passwd, host = _splituser(host) - # now we proceed with the url we want to obtain - urltype, rest = _splittype(selector) - url = rest - user_passwd = None - if urltype.lower() != 'http': - realhost = None - else: - realhost, rest = _splithost(rest) - if realhost: - user_passwd, realhost = _splituser(realhost) - if user_passwd: - selector = "%s://%s%s" % (urltype, realhost, rest) - if proxy_bypass(realhost): - host = realhost - - if not host: raise OSError('http error', 'no host given') - - if proxy_passwd: - proxy_passwd = unquote(proxy_passwd) - proxy_auth = base64.b64encode(proxy_passwd.encode()).decode('ascii') - else: - proxy_auth = None - - if user_passwd: - user_passwd = unquote(user_passwd) - auth = base64.b64encode(user_passwd.encode()).decode('ascii') - else: - auth = None - http_conn = connection_factory(host) - headers = {} - if proxy_auth: - headers["Proxy-Authorization"] = "Basic %s" % proxy_auth - if auth: - headers["Authorization"] = "Basic %s" % auth - if realhost: - headers["Host"] = realhost - - # Add Connection:close as we don't support persistent connections yet. - # This helps in closing the socket and avoiding ResourceWarning - - headers["Connection"] = "close" - - for header, value in self.addheaders: - headers[header] = value - - if data is not None: - headers["Content-Type"] = "application/x-www-form-urlencoded" - http_conn.request("POST", selector, data, headers) - else: - http_conn.request("GET", selector, headers=headers) - - try: - response = http_conn.getresponse() - except http.client.BadStatusLine: - # something went wrong with the HTTP status line - raise URLError("http protocol error: bad status line") - - # According to RFC 2616, "2xx" code indicates that the client's - # request was successfully received, understood, and accepted. - if 200 <= response.status < 300: - return addinfourl(response, response.msg, "http:" + url, - response.status) - else: - return self.http_error( - url, response.fp, - response.status, response.reason, response.msg, data) - - def open_http(self, url, data=None): - """Use HTTP protocol.""" - return self._open_generic_http(http.client.HTTPConnection, url, data) - - def http_error(self, url, fp, errcode, errmsg, headers, data=None): - """Handle http errors. - - Derived class can override this, or provide specific handlers - named http_error_DDD where DDD is the 3-digit error code.""" - # First check if there's a specific handler for this error - name = 'http_error_%d' % errcode - if hasattr(self, name): - method = getattr(self, name) - if data is None: - result = method(url, fp, errcode, errmsg, headers) - else: - result = method(url, fp, errcode, errmsg, headers, data) - if result: return result - return self.http_error_default(url, fp, errcode, errmsg, headers) - - def http_error_default(self, url, fp, errcode, errmsg, headers): - """Default error handler: close the connection and raise OSError.""" - fp.close() - raise HTTPError(url, errcode, errmsg, headers, None) - - if _have_ssl: - def _https_connection(self, host): - if self.key_file or self.cert_file: - http_version = http.client.HTTPSConnection._http_vsn - context = http.client._create_https_context(http_version) - context.load_cert_chain(self.cert_file, self.key_file) - # cert and key file means the user wants to authenticate. - # enable TLS 1.3 PHA implicitly even for custom contexts. - if context.post_handshake_auth is not None: - context.post_handshake_auth = True - else: - context = None - return http.client.HTTPSConnection(host, context=context) - - def open_https(self, url, data=None): - """Use HTTPS protocol.""" - return self._open_generic_http(self._https_connection, url, data) - - def open_file(self, url): - """Use local file or FTP depending on form of URL.""" - if not isinstance(url, str): - raise URLError('file error: proxy support for file protocol currently not implemented') - if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/': - raise ValueError("file:// scheme is supported only on localhost") - else: - return self.open_local_file(url) - - def open_local_file(self, url): - """Use local file.""" - import email.utils - import mimetypes - host, file = _splithost(url) - localname = url2pathname(file) - try: - stats = os.stat(localname) - except OSError as e: - raise URLError(e.strerror, e.filename) - size = stats.st_size - modified = email.utils.formatdate(stats.st_mtime, usegmt=True) - mtype = mimetypes.guess_type(url)[0] - headers = email.message_from_string( - 'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' % - (mtype or 'text/plain', size, modified)) - if not host: - urlfile = file - if file[:1] == '/': - urlfile = 'file://' + file - return addinfourl(open(localname, 'rb'), headers, urlfile) - host, port = _splitport(host) - if (not port - and socket.gethostbyname(host) in ((localhost(),) + thishost())): - urlfile = file - if file[:1] == '/': - urlfile = 'file://' + file - elif file[:2] == './': - raise ValueError("local file url may start with / or file:. Unknown url of type: %s" % url) - return addinfourl(open(localname, 'rb'), headers, urlfile) - raise URLError('local file error: not on local host') - - def open_ftp(self, url): - """Use FTP protocol.""" - if not isinstance(url, str): - raise URLError('ftp error: proxy support for ftp protocol currently not implemented') - import mimetypes - host, path = _splithost(url) - if not host: raise URLError('ftp error: no host given') - host, port = _splitport(host) - user, host = _splituser(host) - if user: user, passwd = _splitpasswd(user) - else: passwd = None - host = unquote(host) - user = unquote(user or '') - passwd = unquote(passwd or '') - host = socket.gethostbyname(host) - if not port: - import ftplib - port = ftplib.FTP_PORT - else: - port = int(port) - path, attrs = _splitattr(path) - path = unquote(path) - dirs = path.split('/') - dirs, file = dirs[:-1], dirs[-1] - if dirs and not dirs[0]: dirs = dirs[1:] - if dirs and not dirs[0]: dirs[0] = '/' - key = user, host, port, '/'.join(dirs) - # XXX thread unsafe! - if len(self.ftpcache) > MAXFTPCACHE: - # Prune the cache, rather arbitrarily - for k in list(self.ftpcache): - if k != key: - v = self.ftpcache[k] - del self.ftpcache[k] - v.close() - try: - if key not in self.ftpcache: - self.ftpcache[key] = \ - ftpwrapper(user, passwd, host, port, dirs) - if not file: type = 'D' - else: type = 'I' - for attr in attrs: - attr, value = _splitvalue(attr) - if attr.lower() == 'type' and \ - value in ('a', 'A', 'i', 'I', 'd', 'D'): - type = value.upper() - (fp, retrlen) = self.ftpcache[key].retrfile(file, type) - mtype = mimetypes.guess_type("ftp:" + url)[0] - headers = "" - if mtype: - headers += "Content-Type: %s\n" % mtype - if retrlen is not None and retrlen >= 0: - headers += "Content-Length: %d\n" % retrlen - headers = email.message_from_string(headers) - return addinfourl(fp, headers, "ftp:" + url) - except ftperrors() as exp: - raise URLError(f'ftp error: {exp}') from exp - - def open_data(self, url, data=None): - """Use "data" URL.""" - if not isinstance(url, str): - raise URLError('data error: proxy support for data protocol currently not implemented') - # ignore POSTed data - # - # syntax of data URLs: - # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data - # mediatype := [ type "/" subtype ] *( ";" parameter ) - # data := *urlchar - # parameter := attribute "=" value - try: - [type, data] = url.split(',', 1) - except ValueError: - raise OSError('data error', 'bad data URL') - if not type: - type = 'text/plain;charset=US-ASCII' - semi = type.rfind(';') - if semi >= 0 and '=' not in type[semi:]: - encoding = type[semi+1:] - type = type[:semi] - else: - encoding = '' - msg = [] - msg.append('Date: %s'%time.strftime('%a, %d %b %Y %H:%M:%S GMT', - time.gmtime(time.time()))) - msg.append('Content-type: %s' % type) - if encoding == 'base64': - # XXX is this encoding/decoding ok? - data = base64.decodebytes(data.encode('ascii')).decode('latin-1') - else: - data = unquote(data) - msg.append('Content-Length: %d' % len(data)) - msg.append('') - msg.append(data) - msg = '\n'.join(msg) - headers = email.message_from_string(msg) - f = io.StringIO(msg) - #f.fileno = None # needed for addinfourl - return addinfourl(f, headers, url) - - -class FancyURLopener(URLopener): - """Derived class with handlers for errors we can handle (perhaps).""" - - def __init__(self, *args, **kwargs): - URLopener.__init__(self, *args, **kwargs) - self.auth_cache = {} - self.tries = 0 - self.maxtries = 10 - - def http_error_default(self, url, fp, errcode, errmsg, headers): - """Default error handling -- don't raise an exception.""" - return addinfourl(fp, headers, "http:" + url, errcode) - - def http_error_302(self, url, fp, errcode, errmsg, headers, data=None): - """Error 302 -- relocated (temporarily).""" - self.tries += 1 - try: - if self.maxtries and self.tries >= self.maxtries: - if hasattr(self, "http_error_500"): - meth = self.http_error_500 - else: - meth = self.http_error_default - return meth(url, fp, 500, - "Internal Server Error: Redirect Recursion", - headers) - result = self.redirect_internal(url, fp, errcode, errmsg, - headers, data) - return result - finally: - self.tries = 0 - - def redirect_internal(self, url, fp, errcode, errmsg, headers, data): - if 'location' in headers: - newurl = headers['location'] - elif 'uri' in headers: - newurl = headers['uri'] - else: - return - fp.close() - - # In case the server sent a relative URL, join with original: - newurl = urljoin(self.type + ":" + url, newurl) - - urlparts = urlparse(newurl) - - # For security reasons, we don't allow redirection to anything other - # than http, https and ftp. - - # We are using newer HTTPError with older redirect_internal method - # This older method will get deprecated in 3.3 - - if urlparts.scheme not in ('http', 'https', 'ftp', ''): - raise HTTPError(newurl, errcode, - errmsg + - " Redirection to url '%s' is not allowed." % newurl, - headers, fp) - - return self.open(newurl) - - def http_error_301(self, url, fp, errcode, errmsg, headers, data=None): - """Error 301 -- also relocated (permanently).""" - return self.http_error_302(url, fp, errcode, errmsg, headers, data) - - def http_error_303(self, url, fp, errcode, errmsg, headers, data=None): - """Error 303 -- also relocated (essentially identical to 302).""" - return self.http_error_302(url, fp, errcode, errmsg, headers, data) - - def http_error_307(self, url, fp, errcode, errmsg, headers, data=None): - """Error 307 -- relocated, but turn POST into error.""" - if data is None: - return self.http_error_302(url, fp, errcode, errmsg, headers, data) - else: - return self.http_error_default(url, fp, errcode, errmsg, headers) - - def http_error_308(self, url, fp, errcode, errmsg, headers, data=None): - """Error 308 -- relocated, but turn POST into error.""" - if data is None: - return self.http_error_301(url, fp, errcode, errmsg, headers, data) - else: - return self.http_error_default(url, fp, errcode, errmsg, headers) - - def http_error_401(self, url, fp, errcode, errmsg, headers, data=None, - retry=False): - """Error 401 -- authentication required. - This function supports Basic authentication only.""" - if 'www-authenticate' not in headers: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - stuff = headers['www-authenticate'] - match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) - if not match: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - scheme, realm = match.groups() - if scheme.lower() != 'basic': - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - if not retry: - URLopener.http_error_default(self, url, fp, errcode, errmsg, - headers) - name = 'retry_' + self.type + '_basic_auth' - if data is None: - return getattr(self,name)(url, realm) - else: - return getattr(self,name)(url, realm, data) - - def http_error_407(self, url, fp, errcode, errmsg, headers, data=None, - retry=False): - """Error 407 -- proxy authentication required. - This function supports Basic authentication only.""" - if 'proxy-authenticate' not in headers: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - stuff = headers['proxy-authenticate'] - match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) - if not match: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - scheme, realm = match.groups() - if scheme.lower() != 'basic': - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - if not retry: - URLopener.http_error_default(self, url, fp, errcode, errmsg, - headers) - name = 'retry_proxy_' + self.type + '_basic_auth' - if data is None: - return getattr(self,name)(url, realm) - else: - return getattr(self,name)(url, realm, data) - - def retry_proxy_http_basic_auth(self, url, realm, data=None): - host, selector = _splithost(url) - newurl = 'http://' + host + selector - proxy = self.proxies['http'] - urltype, proxyhost = _splittype(proxy) - proxyhost, proxyselector = _splithost(proxyhost) - i = proxyhost.find('@') + 1 - proxyhost = proxyhost[i:] - user, passwd = self.get_user_passwd(proxyhost, realm, i) - if not (user or passwd): return None - proxyhost = "%s:%s@%s" % (quote(user, safe=''), - quote(passwd, safe=''), proxyhost) - self.proxies['http'] = 'http://' + proxyhost + proxyselector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def retry_proxy_https_basic_auth(self, url, realm, data=None): - host, selector = _splithost(url) - newurl = 'https://' + host + selector - proxy = self.proxies['https'] - urltype, proxyhost = _splittype(proxy) - proxyhost, proxyselector = _splithost(proxyhost) - i = proxyhost.find('@') + 1 - proxyhost = proxyhost[i:] - user, passwd = self.get_user_passwd(proxyhost, realm, i) - if not (user or passwd): return None - proxyhost = "%s:%s@%s" % (quote(user, safe=''), - quote(passwd, safe=''), proxyhost) - self.proxies['https'] = 'https://' + proxyhost + proxyselector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def retry_http_basic_auth(self, url, realm, data=None): - host, selector = _splithost(url) - i = host.find('@') + 1 - host = host[i:] - user, passwd = self.get_user_passwd(host, realm, i) - if not (user or passwd): return None - host = "%s:%s@%s" % (quote(user, safe=''), - quote(passwd, safe=''), host) - newurl = 'http://' + host + selector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def retry_https_basic_auth(self, url, realm, data=None): - host, selector = _splithost(url) - i = host.find('@') + 1 - host = host[i:] - user, passwd = self.get_user_passwd(host, realm, i) - if not (user or passwd): return None - host = "%s:%s@%s" % (quote(user, safe=''), - quote(passwd, safe=''), host) - newurl = 'https://' + host + selector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def get_user_passwd(self, host, realm, clear_cache=0): - key = realm + '@' + host.lower() - if key in self.auth_cache: - if clear_cache: - del self.auth_cache[key] - else: - return self.auth_cache[key] - user, passwd = self.prompt_user_passwd(host, realm) - if user or passwd: self.auth_cache[key] = (user, passwd) - return user, passwd - - def prompt_user_passwd(self, host, realm): - """Override this in a GUI environment!""" - import getpass - try: - user = input("Enter username for %s at %s: " % (realm, host)) - passwd = getpass.getpass("Enter password for %s in %s at %s: " % - (user, realm, host)) - return user, passwd - except KeyboardInterrupt: - print() - return None, None - - -# Utility functions - -_localhost = None -def localhost(): - """Return the IP address of the magic hostname 'localhost'.""" - global _localhost - if _localhost is None: - _localhost = socket.gethostbyname('localhost') - return _localhost - -_thishost = None -def thishost(): - """Return the IP addresses of the current host.""" - global _thishost - if _thishost is None: - try: - _thishost = tuple(socket.gethostbyname_ex(socket.gethostname())[2]) - except socket.gaierror: - _thishost = tuple(socket.gethostbyname_ex('localhost')[2]) - return _thishost - -_ftperrors = None -def ftperrors(): - """Return the set of errors raised by the FTP class.""" - global _ftperrors - if _ftperrors is None: - import ftplib - _ftperrors = ftplib.all_errors - return _ftperrors - -_noheaders = None -def noheaders(): - """Return an empty email Message object.""" - global _noheaders - if _noheaders is None: - _noheaders = email.message_from_string("") - return _noheaders - - -# Utility classes - -class ftpwrapper: - """Class used by open_ftp() for cache of open FTP connections.""" - - def __init__(self, user, passwd, host, port, dirs, timeout=None, - persistent=True): - self.user = user - self.passwd = passwd - self.host = host - self.port = port - self.dirs = dirs - self.timeout = timeout - self.refcount = 0 - self.keepalive = persistent - try: - self.init() - except: - self.close() - raise - - def init(self): - import ftplib - self.busy = 0 - self.ftp = ftplib.FTP() - self.ftp.connect(self.host, self.port, self.timeout) - self.ftp.login(self.user, self.passwd) - _target = '/'.join(self.dirs) - self.ftp.cwd(_target) - - def retrfile(self, file, type): - import ftplib - self.endtransfer() - if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1 - else: cmd = 'TYPE ' + type; isdir = 0 - try: - self.ftp.voidcmd(cmd) - except ftplib.all_errors: - self.init() - self.ftp.voidcmd(cmd) - conn = None - if file and not isdir: - # Try to retrieve as a file - try: - cmd = 'RETR ' + file - conn, retrlen = self.ftp.ntransfercmd(cmd) - except ftplib.error_perm as reason: - if str(reason)[:3] != '550': - raise URLError(f'ftp error: {reason}') from reason - if not conn: - # Set transfer mode to ASCII! - self.ftp.voidcmd('TYPE A') - # Try a directory listing. Verify that directory exists. - if file: - pwd = self.ftp.pwd() - try: - try: - self.ftp.cwd(file) - except ftplib.error_perm as reason: - raise URLError('ftp error: %r' % reason) from reason - finally: - self.ftp.cwd(pwd) - cmd = 'LIST ' + file - else: - cmd = 'LIST' - conn, retrlen = self.ftp.ntransfercmd(cmd) - self.busy = 1 - - ftpobj = addclosehook(conn.makefile('rb'), self.file_close) - self.refcount += 1 - conn.close() - # Pass back both a suitably decorated object and a retrieval length - return (ftpobj, retrlen) - - def endtransfer(self): - if not self.busy: - return - self.busy = 0 - try: - self.ftp.voidresp() - except ftperrors(): - pass - - def close(self): - self.keepalive = False - if self.refcount <= 0: - self.real_close() - - def file_close(self): - self.endtransfer() - self.refcount -= 1 - if self.refcount <= 0 and not self.keepalive: - self.real_close() - - def real_close(self): - self.endtransfer() - try: - self.ftp.close() - except ftperrors(): - pass - -# Proxy handling -def getproxies_environment(): - """Return a dictionary of scheme -> proxy server URL mappings. - - Scan the environment for variables named _proxy; - this seems to be the standard convention. If you need a - different way, you can pass a proxies dictionary to the - [Fancy]URLopener constructor. - """ - # in order to prefer lowercase variables, process environment in - # two passes: first matches any, second pass matches lowercase only - - # select only environment variables which end in (after making lowercase) _proxy - proxies = {} - environment = [] - for name in os.environ: - # fast screen underscore position before more expensive case-folding - if len(name) > 5 and name[-6] == "_" and name[-5:].lower() == "proxy": - value = os.environ[name] - proxy_name = name[:-6].lower() - environment.append((name, value, proxy_name)) - if value: - proxies[proxy_name] = value - # CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY - # (non-all-lowercase) as it may be set from the web server by a "Proxy:" - # header from the client - # If "proxy" is lowercase, it will still be used thanks to the next block - if 'REQUEST_METHOD' in os.environ: - proxies.pop('http', None) - for name, value, proxy_name in environment: - # not case-folded, checking here for lower-case env vars only - if name[-6:] == '_proxy': - if value: - proxies[proxy_name] = value - else: - proxies.pop(proxy_name, None) - return proxies - -def proxy_bypass_environment(host, proxies=None): - """Test if proxies should not be used for a particular host. - - Checks the proxy dict for the value of no_proxy, which should - be a list of comma separated DNS suffixes, or '*' for all hosts. - - """ - if proxies is None: - proxies = getproxies_environment() - # don't bypass, if no_proxy isn't specified - try: - no_proxy = proxies['no'] - except KeyError: - return False - # '*' is special case for always bypass - if no_proxy == '*': - return True - host = host.lower() - # strip port off host - hostonly, port = _splitport(host) - # check if the host ends with any of the DNS suffixes - for name in no_proxy.split(','): - name = name.strip() - if name: - name = name.lstrip('.') # ignore leading dots - name = name.lower() - if hostonly == name or host == name: - return True - name = '.' + name - if hostonly.endswith(name) or host.endswith(name): - return True - # otherwise, don't bypass - return False - - -# This code tests an OSX specific data structure but is testable on all -# platforms -def _proxy_bypass_macosx_sysconf(host, proxy_settings): - """ - Return True iff this host shouldn't be accessed using a proxy - - This function uses the MacOSX framework SystemConfiguration - to fetch the proxy information. - - proxy_settings come from _scproxy._get_proxy_settings or get mocked ie: - { 'exclude_simple': bool, - 'exceptions': ['foo.bar', '*.bar.com', '127.0.0.1', '10.1', '10.0/16'] - } - """ - from fnmatch import fnmatch - from ipaddress import AddressValueError, IPv4Address - - hostonly, port = _splitport(host) - - def ip2num(ipAddr): - parts = ipAddr.split('.') - parts = list(map(int, parts)) - if len(parts) != 4: - parts = (parts + [0, 0, 0, 0])[:4] - return (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3] - - # Check for simple host names: - if '.' not in host: - if proxy_settings['exclude_simple']: - return True - - hostIP = None - try: - hostIP = int(IPv4Address(hostonly)) - except AddressValueError: - pass - - for value in proxy_settings.get('exceptions', ()): - # Items in the list are strings like these: *.local, 169.254/16 - if not value: continue - - m = re.match(r"(\d+(?:\.\d+)*)(/\d+)?", value) - if m is not None and hostIP is not None: - base = ip2num(m.group(1)) - mask = m.group(2) - if mask is None: - mask = 8 * (m.group(1).count('.') + 1) - else: - mask = int(mask[1:]) - - if mask < 0 or mask > 32: - # System libraries ignore invalid prefix lengths - continue - - mask = 32 - mask - - if (hostIP >> mask) == (base >> mask): - return True - - elif fnmatch(host, value): - return True - - return False - - -# Same as _proxy_bypass_macosx_sysconf, testable on all platforms -def _proxy_bypass_winreg_override(host, override): - """Return True if the host should bypass the proxy server. - - The proxy override list is obtained from the Windows - Internet settings proxy override registry value. - - An example of a proxy override value is: - "www.example.com;*.example.net; 192.168.0.1" - """ - from fnmatch import fnmatch - - host, _ = _splitport(host) - proxy_override = override.split(';') - for test in proxy_override: - test = test.strip() - # "" should bypass the proxy server for all intranet addresses - if test == '': - if '.' not in host: - return True - elif fnmatch(host, test): - return True - return False - - -if sys.platform == 'darwin': - from _scproxy import _get_proxy_settings, _get_proxies - - def proxy_bypass_macosx_sysconf(host): - proxy_settings = _get_proxy_settings() - return _proxy_bypass_macosx_sysconf(host, proxy_settings) - - def getproxies_macosx_sysconf(): - """Return a dictionary of scheme -> proxy server URL mappings. - - This function uses the MacOSX framework SystemConfiguration - to fetch the proxy information. - """ - return _get_proxies() - - - - def proxy_bypass(host): - """Return True, if host should be bypassed. - - Checks proxy settings gathered from the environment, if specified, - or from the MacOSX framework SystemConfiguration. - - """ - proxies = getproxies_environment() - if proxies: - return proxy_bypass_environment(host, proxies) - else: - return proxy_bypass_macosx_sysconf(host) - - def getproxies(): - return getproxies_environment() or getproxies_macosx_sysconf() - - -elif os.name == 'nt': - def getproxies_registry(): - """Return a dictionary of scheme -> proxy server URL mappings. - - Win32 uses the registry to store proxies. - - """ - proxies = {} - try: - import winreg - except ImportError: - # Std module, so should be around - but you never know! - return proxies - try: - internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') - proxyEnable = winreg.QueryValueEx(internetSettings, - 'ProxyEnable')[0] - if proxyEnable: - # Returned as Unicode but problems if not converted to ASCII - proxyServer = str(winreg.QueryValueEx(internetSettings, - 'ProxyServer')[0]) - if '=' not in proxyServer and ';' not in proxyServer: - # Use one setting for all protocols. - proxyServer = 'http={0};https={0};ftp={0}'.format(proxyServer) - for p in proxyServer.split(';'): - protocol, address = p.split('=', 1) - # See if address has a type:// prefix - if not re.match('(?:[^/:]+)://', address): - # Add type:// prefix to address without specifying type - if protocol in ('http', 'https', 'ftp'): - # The default proxy type of Windows is HTTP - address = 'http://' + address - elif protocol == 'socks': - address = 'socks://' + address - proxies[protocol] = address - # Use SOCKS proxy for HTTP(S) protocols - if proxies.get('socks'): - # The default SOCKS proxy type of Windows is SOCKS4 - address = re.sub(r'^socks://', 'socks4://', proxies['socks']) - proxies['http'] = proxies.get('http') or address - proxies['https'] = proxies.get('https') or address - internetSettings.Close() - except (OSError, ValueError, TypeError): - # Either registry key not found etc, or the value in an - # unexpected format. - # proxies already set up to be empty so nothing to do - pass - return proxies - - def getproxies(): - """Return a dictionary of scheme -> proxy server URL mappings. - - Returns settings gathered from the environment, if specified, - or the registry. - - """ - return getproxies_environment() or getproxies_registry() - - def proxy_bypass_registry(host): - try: - import winreg - except ImportError: - # Std modules, so should be around - but you never know! - return False - try: - internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') - proxyEnable = winreg.QueryValueEx(internetSettings, - 'ProxyEnable')[0] - proxyOverride = str(winreg.QueryValueEx(internetSettings, - 'ProxyOverride')[0]) - # ^^^^ Returned as Unicode but problems if not converted to ASCII - except OSError: - return False - if not proxyEnable or not proxyOverride: - return False - return _proxy_bypass_winreg_override(host, proxyOverride) - - def proxy_bypass(host): - """Return True, if host should be bypassed. - - Checks proxy settings gathered from the environment, if specified, - or the registry. - - """ - proxies = getproxies_environment() - if proxies: - return proxy_bypass_environment(host, proxies) - else: - return proxy_bypass_registry(host) - -else: - # By default use environment variables - getproxies = getproxies_environment - proxy_bypass = proxy_bypass_environment diff --git a/Python313_13_x86_Template/Lib/urllib/robotparser.py b/Python313_13_x86_Template/Lib/urllib/robotparser.py deleted file mode 100644 index 63689816..00000000 --- a/Python313_13_x86_Template/Lib/urllib/robotparser.py +++ /dev/null @@ -1,286 +0,0 @@ -""" robotparser.py - - Copyright (C) 2000 Bastian Kleineidam - - You can choose between two licenses when using this package: - 1) GNU GPLv2 - 2) PSF license for Python 2.2 - - The robots.txt Exclusion Protocol is implemented as specified in - http://www.robotstxt.org/norobots-rfc.txt -""" - -import collections -import re -import urllib.error -import urllib.parse -import urllib.request - -__all__ = ["RobotFileParser"] - -RequestRate = collections.namedtuple("RequestRate", "requests seconds") - - -def normalize(path): - unquoted = urllib.parse.unquote(path, errors='surrogateescape') - return urllib.parse.quote(unquoted, errors='surrogateescape') - -def normalize_path(path): - path, sep, query = path.partition('?') - path = normalize(path) - if sep: - query = re.sub(r'[^=&]+', lambda m: normalize(m[0]), query) - path += '?' + query - return path - - -class RobotFileParser: - """ This class provides a set of methods to read, parse and answer - questions about a single robots.txt file. - - """ - - def __init__(self, url=''): - self.entries = [] - self.sitemaps = [] - self.default_entry = None - self.disallow_all = False - self.allow_all = False - self.set_url(url) - self.last_checked = 0 - - def mtime(self): - """Returns the time the robots.txt file was last fetched. - - This is useful for long-running web spiders that need to - check for new robots.txt files periodically. - - """ - return self.last_checked - - def modified(self): - """Sets the time the robots.txt file was last fetched to the - current time. - - """ - import time - self.last_checked = time.time() - - def set_url(self, url): - """Sets the URL referring to a robots.txt file.""" - self.url = url - self.host, self.path = urllib.parse.urlsplit(url)[1:3] - - def read(self): - """Reads the robots.txt URL and feeds it to the parser.""" - try: - f = urllib.request.urlopen(self.url) - except urllib.error.HTTPError as err: - if err.code in (401, 403): - self.disallow_all = True - elif err.code >= 400 and err.code < 500: - self.allow_all = True - err.close() - else: - raw = f.read() - self.parse(raw.decode("utf-8", "surrogateescape").splitlines()) - - def _add_entry(self, entry): - if "*" in entry.useragents: - # the default entry is considered last - if self.default_entry is None: - # the first default entry wins - self.default_entry = entry - else: - self.entries.append(entry) - - def parse(self, lines): - """Parse the input lines from a robots.txt file. - - We allow that a user-agent: line is not preceded by - one or more blank lines. - """ - # states: - # 0: start state - # 1: saw user-agent line - # 2: saw an allow or disallow line - state = 0 - entry = Entry() - - self.modified() - for line in lines: - if not line: - if state == 1: - entry = Entry() - state = 0 - elif state == 2: - self._add_entry(entry) - entry = Entry() - state = 0 - # remove optional comment and strip line - i = line.find('#') - if i >= 0: - line = line[:i] - line = line.strip() - if not line: - continue - line = line.split(':', 1) - if len(line) == 2: - line[0] = line[0].strip().lower() - line[1] = line[1].strip() - if line[0] == "user-agent": - if state == 2: - self._add_entry(entry) - entry = Entry() - entry.useragents.append(line[1]) - state = 1 - elif line[0] == "disallow": - if state != 0: - entry.rulelines.append(RuleLine(line[1], False)) - state = 2 - elif line[0] == "allow": - if state != 0: - entry.rulelines.append(RuleLine(line[1], True)) - state = 2 - elif line[0] == "crawl-delay": - if state != 0: - # before trying to convert to int we need to make - # sure that robots.txt has valid syntax otherwise - # it will crash - if line[1].strip().isdigit(): - entry.delay = int(line[1]) - state = 2 - elif line[0] == "request-rate": - if state != 0: - numbers = line[1].split('/') - # check if all values are sane - if (len(numbers) == 2 and numbers[0].strip().isdigit() - and numbers[1].strip().isdigit()): - entry.req_rate = RequestRate(int(numbers[0]), int(numbers[1])) - state = 2 - elif line[0] == "sitemap": - # According to http://www.sitemaps.org/protocol.html - # "This directive is independent of the user-agent line, - # so it doesn't matter where you place it in your file." - # Therefore we do not change the state of the parser. - self.sitemaps.append(line[1]) - if state == 2: - self._add_entry(entry) - - def can_fetch(self, useragent, url): - """using the parsed robots.txt decide if useragent can fetch url""" - if self.disallow_all: - return False - if self.allow_all: - return True - # Until the robots.txt file has been read or found not - # to exist, we must assume that no url is allowable. - # This prevents false positives when a user erroneously - # calls can_fetch() before calling read(). - if not self.last_checked: - return False - # search for given user agent matches - # the first match counts - parsed_url = urllib.parse.urlsplit(url) - url = urllib.parse.urlunsplit(('', '', *parsed_url[2:])) - url = normalize_path(url) - if not url: - url = "/" - for entry in self.entries: - if entry.applies_to(useragent): - return entry.allowance(url) - # try the default entry last - if self.default_entry: - return self.default_entry.allowance(url) - # agent not found ==> access granted - return True - - def crawl_delay(self, useragent): - if not self.mtime(): - return None - for entry in self.entries: - if entry.applies_to(useragent): - return entry.delay - if self.default_entry: - return self.default_entry.delay - return None - - def request_rate(self, useragent): - if not self.mtime(): - return None - for entry in self.entries: - if entry.applies_to(useragent): - return entry.req_rate - if self.default_entry: - return self.default_entry.req_rate - return None - - def site_maps(self): - if not self.sitemaps: - return None - return self.sitemaps - - def __str__(self): - entries = self.entries - if self.default_entry is not None: - entries = entries + [self.default_entry] - return '\n\n'.join(map(str, entries)) - -class RuleLine: - """A rule line is a single "Allow:" (allowance==True) or "Disallow:" - (allowance==False) followed by a path.""" - def __init__(self, path, allowance): - if path == '' and not allowance: - # an empty value means allow all - allowance = True - self.path = normalize_path(path) - self.allowance = allowance - - def applies_to(self, filename): - return self.path == "*" or filename.startswith(self.path) - - def __str__(self): - return ("Allow" if self.allowance else "Disallow") + ": " + self.path - - -class Entry: - """An entry has one or more user-agents and zero or more rulelines""" - def __init__(self): - self.useragents = [] - self.rulelines = [] - self.delay = None - self.req_rate = None - - def __str__(self): - ret = [] - for agent in self.useragents: - ret.append(f"User-agent: {agent}") - if self.delay is not None: - ret.append(f"Crawl-delay: {self.delay}") - if self.req_rate is not None: - rate = self.req_rate - ret.append(f"Request-rate: {rate.requests}/{rate.seconds}") - ret.extend(map(str, self.rulelines)) - return '\n'.join(ret) - - def applies_to(self, useragent): - """check if this entry applies to the specified agent""" - # split the name token and make it lower case - useragent = useragent.split("/")[0].lower() - for agent in self.useragents: - if agent == '*': - # we have the catch-all agent - return True - agent = agent.lower() - if agent in useragent: - return True - return False - - def allowance(self, filename): - """Preconditions: - - our agent applies to this entry - - filename is URL encoded""" - for line in self.rulelines: - if line.applies_to(filename): - return line.allowance - return True diff --git a/Python313_13_x86_Template/Lib/uuid.py b/Python313_13_x86_Template/Lib/uuid.py deleted file mode 100644 index 55f46eb5..00000000 --- a/Python313_13_x86_Template/Lib/uuid.py +++ /dev/null @@ -1,784 +0,0 @@ -r"""UUID objects (universally unique identifiers) according to RFC 4122. - -This module provides immutable UUID objects (class UUID) and the functions -uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5 -UUIDs as specified in RFC 4122. - -If all you want is a unique ID, you should probably call uuid1() or uuid4(). -Note that uuid1() may compromise privacy since it creates a UUID containing -the computer's network address. uuid4() creates a random UUID. - -Typical usage: - - >>> import uuid - - # make a UUID based on the host ID and current time - >>> uuid.uuid1() # doctest: +SKIP - UUID('a8098c1a-f86e-11da-bd1a-00112444be1e') - - # make a UUID using an MD5 hash of a namespace UUID and a name - >>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org') - UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e') - - # make a random UUID - >>> uuid.uuid4() # doctest: +SKIP - UUID('16fd2706-8baf-433b-82eb-8c7fada847da') - - # make a UUID using a SHA-1 hash of a namespace UUID and a name - >>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org') - UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d') - - # make a UUID from a string of hex digits (braces and hyphens ignored) - >>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}') - - # convert a UUID to a string of hex digits in standard form - >>> str(x) - '00010203-0405-0607-0809-0a0b0c0d0e0f' - - # get the raw 16 bytes of the UUID - >>> x.bytes - b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f' - - # make a UUID from a 16-byte string - >>> uuid.UUID(bytes=x.bytes) - UUID('00010203-0405-0607-0809-0a0b0c0d0e0f') -""" - -import os -import sys - -from enum import Enum, _simple_enum - - -__author__ = 'Ka-Ping Yee ' - -# The recognized platforms - known behaviors -if sys.platform in {'win32', 'darwin', 'emscripten', 'wasi'}: - _AIX = _LINUX = False -elif sys.platform == 'linux': - _LINUX = True - _AIX = False -else: - import platform - _platform_system = platform.system() - _AIX = _platform_system == 'AIX' - _LINUX = _platform_system in ('Linux', 'Android') - -_MAC_DELIM = b':' -_MAC_OMITS_LEADING_ZEROES = False -if _AIX: - _MAC_DELIM = b'.' - _MAC_OMITS_LEADING_ZEROES = True - -RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [ - 'reserved for NCS compatibility', 'specified in RFC 4122', - 'reserved for Microsoft compatibility', 'reserved for future definition'] - -int_ = int # The built-in int type -bytes_ = bytes # The built-in bytes type - - -@_simple_enum(Enum) -class SafeUUID: - safe = 0 - unsafe = -1 - unknown = None - - -class UUID: - """Instances of the UUID class represent UUIDs as specified in RFC 4122. - UUID objects are immutable, hashable, and usable as dictionary keys. - Converting a UUID to a string with str() yields something in the form - '12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts - five possible forms: a similar string of hexadecimal digits, or a tuple - of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and - 48-bit values respectively) as an argument named 'fields', or a string - of 16 bytes (with all the integer fields in big-endian order) as an - argument named 'bytes', or a string of 16 bytes (with the first three - fields in little-endian order) as an argument named 'bytes_le', or a - single 128-bit integer as an argument named 'int'. - - UUIDs have these read-only attributes: - - bytes the UUID as a 16-byte string (containing the six - integer fields in big-endian byte order) - - bytes_le the UUID as a 16-byte string (with time_low, time_mid, - and time_hi_version in little-endian byte order) - - fields a tuple of the six integer fields of the UUID, - which are also available as six individual attributes - and two derived attributes: - - time_low the first 32 bits of the UUID - time_mid the next 16 bits of the UUID - time_hi_version the next 16 bits of the UUID - clock_seq_hi_variant the next 8 bits of the UUID - clock_seq_low the next 8 bits of the UUID - node the last 48 bits of the UUID - - time the 60-bit timestamp - clock_seq the 14-bit sequence number - - hex the UUID as a 32-character hexadecimal string - - int the UUID as a 128-bit integer - - urn the UUID as a URN as specified in RFC 4122 - - variant the UUID variant (one of the constants RESERVED_NCS, - RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE) - - version the UUID version number (1 through 5, meaningful only - when the variant is RFC_4122) - - is_safe An enum indicating whether the UUID has been generated in - a way that is safe for multiprocessing applications, via - uuid_generate_time_safe(3). - """ - - __slots__ = ('int', 'is_safe', '__weakref__') - - def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None, - int=None, version=None, - *, is_safe=SafeUUID.unknown): - r"""Create a UUID from either a string of 32 hexadecimal digits, - a string of 16 bytes as the 'bytes' argument, a string of 16 bytes - in little-endian order as the 'bytes_le' argument, a tuple of six - integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version, - 8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as - the 'fields' argument, or a single 128-bit integer as the 'int' - argument. When a string of hex digits is given, curly braces, - hyphens, and a URN prefix are all optional. For example, these - expressions all yield the same UUID: - - UUID('{12345678-1234-5678-1234-567812345678}') - UUID('12345678123456781234567812345678') - UUID('urn:uuid:12345678-1234-5678-1234-567812345678') - UUID(bytes='\x12\x34\x56\x78'*4) - UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' + - '\x12\x34\x56\x78\x12\x34\x56\x78') - UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678)) - UUID(int=0x12345678123456781234567812345678) - - Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must - be given. The 'version' argument is optional; if given, the resulting - UUID will have its variant and version set according to RFC 4122, - overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'. - - is_safe is an enum exposed as an attribute on the instance. It - indicates whether the UUID has been generated in a way that is safe - for multiprocessing applications, via uuid_generate_time_safe(3). - """ - - if [hex, bytes, bytes_le, fields, int].count(None) != 4: - raise TypeError('one of the hex, bytes, bytes_le, fields, ' - 'or int arguments must be given') - if hex is not None: - hex = hex.replace('urn:', '').replace('uuid:', '') - hex = hex.strip('{}').replace('-', '') - if len(hex) != 32: - raise ValueError('badly formed hexadecimal UUID string') - int = int_(hex, 16) - if bytes_le is not None: - if len(bytes_le) != 16: - raise ValueError('bytes_le is not a 16-char string') - bytes = (bytes_le[4-1::-1] + bytes_le[6-1:4-1:-1] + - bytes_le[8-1:6-1:-1] + bytes_le[8:]) - if bytes is not None: - if len(bytes) != 16: - raise ValueError('bytes is not a 16-char string') - assert isinstance(bytes, bytes_), repr(bytes) - int = int_.from_bytes(bytes) # big endian - if fields is not None: - if len(fields) != 6: - raise ValueError('fields is not a 6-tuple') - (time_low, time_mid, time_hi_version, - clock_seq_hi_variant, clock_seq_low, node) = fields - if not 0 <= time_low < 1<<32: - raise ValueError('field 1 out of range (need a 32-bit value)') - if not 0 <= time_mid < 1<<16: - raise ValueError('field 2 out of range (need a 16-bit value)') - if not 0 <= time_hi_version < 1<<16: - raise ValueError('field 3 out of range (need a 16-bit value)') - if not 0 <= clock_seq_hi_variant < 1<<8: - raise ValueError('field 4 out of range (need an 8-bit value)') - if not 0 <= clock_seq_low < 1<<8: - raise ValueError('field 5 out of range (need an 8-bit value)') - if not 0 <= node < 1<<48: - raise ValueError('field 6 out of range (need a 48-bit value)') - clock_seq = (clock_seq_hi_variant << 8) | clock_seq_low - int = ((time_low << 96) | (time_mid << 80) | - (time_hi_version << 64) | (clock_seq << 48) | node) - if int is not None: - if not 0 <= int < 1<<128: - raise ValueError('int is out of range (need a 128-bit value)') - if version is not None: - if not 1 <= version <= 5: - raise ValueError('illegal version number') - # Set the variant to RFC 4122. - int &= ~(0xc000 << 48) - int |= 0x8000 << 48 - # Set the version number. - int &= ~(0xf000 << 64) - int |= version << 76 - object.__setattr__(self, 'int', int) - object.__setattr__(self, 'is_safe', is_safe) - - def __getstate__(self): - d = {'int': self.int} - if self.is_safe != SafeUUID.unknown: - # is_safe is a SafeUUID instance. Return just its value, so that - # it can be un-pickled in older Python versions without SafeUUID. - d['is_safe'] = self.is_safe.value - return d - - def __setstate__(self, state): - object.__setattr__(self, 'int', state['int']) - # is_safe was added in 3.7; it is also omitted when it is "unknown" - object.__setattr__(self, 'is_safe', - SafeUUID(state['is_safe']) - if 'is_safe' in state else SafeUUID.unknown) - - def __eq__(self, other): - if isinstance(other, UUID): - return self.int == other.int - return NotImplemented - - # Q. What's the value of being able to sort UUIDs? - # A. Use them as keys in a B-Tree or similar mapping. - - def __lt__(self, other): - if isinstance(other, UUID): - return self.int < other.int - return NotImplemented - - def __gt__(self, other): - if isinstance(other, UUID): - return self.int > other.int - return NotImplemented - - def __le__(self, other): - if isinstance(other, UUID): - return self.int <= other.int - return NotImplemented - - def __ge__(self, other): - if isinstance(other, UUID): - return self.int >= other.int - return NotImplemented - - def __hash__(self): - return hash(self.int) - - def __int__(self): - return self.int - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, str(self)) - - def __setattr__(self, name, value): - raise TypeError('UUID objects are immutable') - - def __str__(self): - hex = '%032x' % self.int - return '%s-%s-%s-%s-%s' % ( - hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:]) - - @property - def bytes(self): - return self.int.to_bytes(16) # big endian - - @property - def bytes_le(self): - bytes = self.bytes - return (bytes[4-1::-1] + bytes[6-1:4-1:-1] + bytes[8-1:6-1:-1] + - bytes[8:]) - - @property - def fields(self): - return (self.time_low, self.time_mid, self.time_hi_version, - self.clock_seq_hi_variant, self.clock_seq_low, self.node) - - @property - def time_low(self): - return self.int >> 96 - - @property - def time_mid(self): - return (self.int >> 80) & 0xffff - - @property - def time_hi_version(self): - return (self.int >> 64) & 0xffff - - @property - def clock_seq_hi_variant(self): - return (self.int >> 56) & 0xff - - @property - def clock_seq_low(self): - return (self.int >> 48) & 0xff - - @property - def time(self): - return (((self.time_hi_version & 0x0fff) << 48) | - (self.time_mid << 32) | self.time_low) - - @property - def clock_seq(self): - return (((self.clock_seq_hi_variant & 0x3f) << 8) | - self.clock_seq_low) - - @property - def node(self): - return self.int & 0xffffffffffff - - @property - def hex(self): - return '%032x' % self.int - - @property - def urn(self): - return 'urn:uuid:' + str(self) - - @property - def variant(self): - if not self.int & (0x8000 << 48): - return RESERVED_NCS - elif not self.int & (0x4000 << 48): - return RFC_4122 - elif not self.int & (0x2000 << 48): - return RESERVED_MICROSOFT - else: - return RESERVED_FUTURE - - @property - def version(self): - # The version bits are only meaningful for RFC 4122 UUIDs. - if self.variant == RFC_4122: - return int((self.int >> 76) & 0xf) - - -def _get_command_stdout(command, *args): - import io, os, shutil, subprocess - - try: - path_dirs = os.environ.get('PATH', os.defpath).split(os.pathsep) - path_dirs.extend(['/sbin', '/usr/sbin']) - executable = shutil.which(command, path=os.pathsep.join(path_dirs)) - if executable is None: - return None - # LC_ALL=C to ensure English output, stderr=DEVNULL to prevent output - # on stderr (Note: we don't have an example where the words we search - # for are actually localized, but in theory some system could do so.) - env = dict(os.environ) - env['LC_ALL'] = 'C' - # Empty strings will be quoted by popen so we should just ommit it - if args != ('',): - command = (executable, *args) - else: - command = (executable,) - proc = subprocess.Popen(command, - stdout=subprocess.PIPE, - stderr=subprocess.DEVNULL, - env=env) - if not proc: - return None - stdout, stderr = proc.communicate() - return io.BytesIO(stdout) - except (OSError, subprocess.SubprocessError): - return None - - -# For MAC (a.k.a. IEEE 802, or EUI-48) addresses, the second least significant -# bit of the first octet signifies whether the MAC address is universally (0) -# or locally (1) administered. Network cards from hardware manufacturers will -# always be universally administered to guarantee global uniqueness of the MAC -# address, but any particular machine may have other interfaces which are -# locally administered. An example of the latter is the bridge interface to -# the Touch Bar on MacBook Pros. -# -# This bit works out to be the 42nd bit counting from 1 being the least -# significant, or 1<<41. We'll prefer universally administered MAC addresses -# over locally administered ones since the former are globally unique, but -# we'll return the first of the latter found if that's all the machine has. -# -# See https://en.wikipedia.org/wiki/MAC_address#Universal_vs._local_(U/L_bit) - -def _is_universal(mac): - return not (mac & (1 << 41)) - - -def _find_mac_near_keyword(command, args, keywords, get_word_index): - """Searches a command's output for a MAC address near a keyword. - - Each line of words in the output is case-insensitively searched for - any of the given keywords. Upon a match, get_word_index is invoked - to pick a word from the line, given the index of the match. For - example, lambda i: 0 would get the first word on the line, while - lambda i: i - 1 would get the word preceding the keyword. - """ - stdout = _get_command_stdout(command, args) - if stdout is None: - return None - - first_local_mac = None - for line in stdout: - words = line.lower().rstrip().split() - for i in range(len(words)): - if words[i] in keywords: - try: - word = words[get_word_index(i)] - mac = int(word.replace(_MAC_DELIM, b''), 16) - except (ValueError, IndexError): - # Virtual interfaces, such as those provided by - # VPNs, do not have a colon-delimited MAC address - # as expected, but a 16-byte HWAddr separated by - # dashes. These should be ignored in favor of a - # real MAC address - pass - else: - if _is_universal(mac): - return mac - first_local_mac = first_local_mac or mac - return first_local_mac or None - - -def _parse_mac(word): - # Accept 'HH:HH:HH:HH:HH:HH' MAC address (ex: '52:54:00:9d:0e:67'), - # but reject IPv6 address (ex: 'fe80::5054:ff:fe9' or '123:2:3:4:5:6:7:8'). - # - # Virtual interfaces, such as those provided by VPNs, do not have a - # colon-delimited MAC address as expected, but a 16-byte HWAddr separated - # by dashes. These should be ignored in favor of a real MAC address - parts = word.split(_MAC_DELIM) - if len(parts) != 6: - return - if _MAC_OMITS_LEADING_ZEROES: - # (Only) on AIX the macaddr value given is not prefixed by 0, e.g. - # en0 1500 link#2 fa.bc.de.f7.62.4 110854824 0 160133733 0 0 - # not - # en0 1500 link#2 fa.bc.de.f7.62.04 110854824 0 160133733 0 0 - if not all(1 <= len(part) <= 2 for part in parts): - return - hexstr = b''.join(part.rjust(2, b'0') for part in parts) - else: - if not all(len(part) == 2 for part in parts): - return - hexstr = b''.join(parts) - try: - return int(hexstr, 16) - except ValueError: - return - - -def _find_mac_under_heading(command, args, heading): - """Looks for a MAC address under a heading in a command's output. - - The first line of words in the output is searched for the given - heading. Words at the same word index as the heading in subsequent - lines are then examined to see if they look like MAC addresses. - """ - stdout = _get_command_stdout(command, args) - if stdout is None: - return None - - keywords = stdout.readline().rstrip().split() - try: - column_index = keywords.index(heading) - except ValueError: - return None - - first_local_mac = None - for line in stdout: - words = line.rstrip().split() - try: - word = words[column_index] - except IndexError: - continue - - mac = _parse_mac(word) - if mac is None: - continue - if _is_universal(mac): - return mac - if first_local_mac is None: - first_local_mac = mac - - return first_local_mac - - -# The following functions call external programs to 'get' a macaddr value to -# be used as basis for an uuid -def _ifconfig_getnode(): - """Get the hardware address on Unix by running ifconfig.""" - # This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes. - keywords = (b'hwaddr', b'ether', b'address:', b'lladdr') - for args in ('', '-a', '-av'): - mac = _find_mac_near_keyword('ifconfig', args, keywords, lambda i: i+1) - if mac: - return mac - return None - -def _ip_getnode(): - """Get the hardware address on Unix by running ip.""" - # This works on Linux with iproute2. - mac = _find_mac_near_keyword('ip', 'link', [b'link/ether'], lambda i: i+1) - if mac: - return mac - return None - -def _arp_getnode(): - """Get the hardware address on Unix by running arp.""" - import os, socket - if not hasattr(socket, "gethostbyname"): - return None - try: - ip_addr = socket.gethostbyname(socket.gethostname()) - except OSError: - return None - - # Try getting the MAC addr from arp based on our IP address (Solaris). - mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: -1) - if mac: - return mac - - # This works on OpenBSD - mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: i+1) - if mac: - return mac - - # This works on Linux, FreeBSD and NetBSD - mac = _find_mac_near_keyword('arp', '-an', [os.fsencode('(%s)' % ip_addr)], - lambda i: i+2) - # Return None instead of 0. - if mac: - return mac - return None - -def _lanscan_getnode(): - """Get the hardware address on Unix by running lanscan.""" - # This might work on HP-UX. - return _find_mac_near_keyword('lanscan', '-ai', [b'lan0'], lambda i: 0) - -def _netstat_getnode(): - """Get the hardware address on Unix by running netstat.""" - # This works on AIX and might work on Tru64 UNIX. - return _find_mac_under_heading('netstat', '-ian', b'Address') - - -# Import optional C extension at toplevel, to help disabling it when testing -try: - import _uuid - _generate_time_safe = getattr(_uuid, "generate_time_safe", None) - _has_stable_extractable_node = getattr(_uuid, "has_stable_extractable_node", False) - _UuidCreate = getattr(_uuid, "UuidCreate", None) -except ImportError: - _uuid = None - _generate_time_safe = None - _has_stable_extractable_node = False - _UuidCreate = None - - -def _unix_getnode(): - """Get the hardware address on Unix using the _uuid extension module.""" - if _generate_time_safe and _has_stable_extractable_node: - uuid_time, _ = _generate_time_safe() - return UUID(bytes=uuid_time).node - -def _windll_getnode(): - """Get the hardware address on Windows using the _uuid extension module.""" - if _UuidCreate and _has_stable_extractable_node: - uuid_bytes = _UuidCreate() - return UUID(bytes_le=uuid_bytes).node - -def _random_getnode(): - """Get a random node ID.""" - # RFC 9562, §6.10-3 says that - # - # Implementations MAY elect to obtain a 48-bit cryptographic-quality - # random number as per Section 6.9 to use as the Node ID. [...] [and] - # implementations MUST set the least significant bit of the first octet - # of the Node ID to 1. This bit is the unicast or multicast bit, which - # will never be set in IEEE 802 addresses obtained from network cards. - # - # The "multicast bit" of a MAC address is defined to be "the least - # significant bit of the first octet". This works out to be the 41st bit - # counting from 1 being the least significant bit, or 1<<40. - # - # See https://en.wikipedia.org/w/index.php?title=MAC_address&oldid=1128764812#Universal_vs._local_(U/L_bit) - return int.from_bytes(os.urandom(6)) | (1 << 40) - - -# _OS_GETTERS, when known, are targeted for a specific OS or platform. -# The order is by 'common practice' on the specified platform. -# Note: 'posix' and 'windows' _OS_GETTERS are prefixed by a dll/dlload() method -# which, when successful, means none of these "external" methods are called. -# _GETTERS is (also) used by test_uuid.py to SkipUnless(), e.g., -# @unittest.skipUnless(_uuid._ifconfig_getnode in _uuid._GETTERS, ...) -if _LINUX: - _OS_GETTERS = [_ip_getnode, _ifconfig_getnode] -elif sys.platform == 'darwin': - _OS_GETTERS = [_ifconfig_getnode, _arp_getnode, _netstat_getnode] -elif sys.platform == 'win32': - # bpo-40201: _windll_getnode will always succeed, so these are not needed - _OS_GETTERS = [] -elif _AIX: - _OS_GETTERS = [_netstat_getnode] -else: - _OS_GETTERS = [_ifconfig_getnode, _ip_getnode, _arp_getnode, - _netstat_getnode, _lanscan_getnode] -if os.name == 'posix': - _GETTERS = [_unix_getnode] + _OS_GETTERS -elif os.name == 'nt': - _GETTERS = [_windll_getnode] + _OS_GETTERS -else: - _GETTERS = _OS_GETTERS - -_node = None - -def getnode(): - """Get the hardware address as a 48-bit positive integer. - - The first time this runs, it may launch a separate program, which could - be quite slow. If all attempts to obtain the hardware address fail, we - choose a random 48-bit number with its eighth bit set to 1 as recommended - in RFC 4122. - """ - global _node - if _node is not None: - return _node - - for getter in _GETTERS + [_random_getnode]: - try: - _node = getter() - except: - continue - if (_node is not None) and (0 <= _node < (1 << 48)): - return _node - assert False, '_random_getnode() returned invalid value: {}'.format(_node) - - -_last_timestamp = None - -def uuid1(node=None, clock_seq=None): - """Generate a UUID from a host ID, sequence number, and the current time. - If 'node' is not given, getnode() is used to obtain the hardware - address. If 'clock_seq' is given, it is used as the sequence number; - otherwise a random 14-bit sequence number is chosen.""" - - # When the system provides a version-1 UUID generator, use it (but don't - # use UuidCreate here because its UUIDs don't conform to RFC 4122). - if _generate_time_safe is not None and node is clock_seq is None: - uuid_time, safely_generated = _generate_time_safe() - try: - is_safe = SafeUUID(safely_generated) - except ValueError: - is_safe = SafeUUID.unknown - return UUID(bytes=uuid_time, is_safe=is_safe) - - global _last_timestamp - import time - nanoseconds = time.time_ns() - # 0x01b21dd213814000 is the number of 100-ns intervals between the - # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00. - timestamp = nanoseconds // 100 + 0x01b21dd213814000 - if _last_timestamp is not None and timestamp <= _last_timestamp: - timestamp = _last_timestamp + 1 - _last_timestamp = timestamp - if clock_seq is None: - import random - clock_seq = random.getrandbits(14) # instead of stable storage - time_low = timestamp & 0xffffffff - time_mid = (timestamp >> 32) & 0xffff - time_hi_version = (timestamp >> 48) & 0x0fff - clock_seq_low = clock_seq & 0xff - clock_seq_hi_variant = (clock_seq >> 8) & 0x3f - if node is None: - node = getnode() - return UUID(fields=(time_low, time_mid, time_hi_version, - clock_seq_hi_variant, clock_seq_low, node), version=1) - -def uuid3(namespace, name): - """Generate a UUID from the MD5 hash of a namespace UUID and a name.""" - if isinstance(name, str): - name = bytes(name, "utf-8") - from hashlib import md5 - digest = md5( - namespace.bytes + name, - usedforsecurity=False - ).digest() - return UUID(bytes=digest[:16], version=3) - -def uuid4(): - """Generate a random UUID.""" - return UUID(bytes=os.urandom(16), version=4) - -def uuid5(namespace, name): - """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" - if isinstance(name, str): - name = bytes(name, "utf-8") - from hashlib import sha1 - hash = sha1(namespace.bytes + name).digest() - return UUID(bytes=hash[:16], version=5) - - -def main(): - """Run the uuid command line interface.""" - uuid_funcs = { - "uuid1": uuid1, - "uuid3": uuid3, - "uuid4": uuid4, - "uuid5": uuid5 - } - uuid_namespace_funcs = ("uuid3", "uuid5") - namespaces = { - "@dns": NAMESPACE_DNS, - "@url": NAMESPACE_URL, - "@oid": NAMESPACE_OID, - "@x500": NAMESPACE_X500 - } - - import argparse - parser = argparse.ArgumentParser( - description="Generates a uuid using the selected uuid function.") - parser.add_argument("-u", "--uuid", choices=uuid_funcs.keys(), default="uuid4", - help="The function to use to generate the uuid. " - "By default uuid4 function is used.") - parser.add_argument("-n", "--namespace", - help="The namespace is a UUID, or '@ns' where 'ns' is a " - "well-known predefined UUID addressed by namespace name. " - "Such as @dns, @url, @oid, and @x500. " - "Only required for uuid3/uuid5 functions.") - parser.add_argument("-N", "--name", - help="The name used as part of generating the uuid. " - "Only required for uuid3/uuid5 functions.") - - args = parser.parse_args() - uuid_func = uuid_funcs[args.uuid] - namespace = args.namespace - name = args.name - - if args.uuid in uuid_namespace_funcs: - if not namespace or not name: - parser.error( - "Incorrect number of arguments. " - f"{args.uuid} requires a namespace and a name. " - "Run 'python -m uuid -h' for more information." - ) - namespace = namespaces[namespace] if namespace in namespaces else UUID(namespace) - print(uuid_func(namespace, name)) - else: - print(uuid_func()) - - -# The following standard UUIDs are for use with uuid3() or uuid5(). - -NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8') -NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8') -NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8') -NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8') - -if __name__ == "__main__": - main() diff --git a/Python313_13_x86_Template/Lib/venv/__init__.py b/Python313_13_x86_Template/Lib/venv/__init__.py deleted file mode 100644 index c45cb2ee..00000000 --- a/Python313_13_x86_Template/Lib/venv/__init__.py +++ /dev/null @@ -1,687 +0,0 @@ -""" -Virtual environment (venv) package for Python. Based on PEP 405. - -Copyright (C) 2011-2014 Vinay Sajip. -Licensed to the PSF under a contributor agreement. -""" -import logging -import os -import shutil -import subprocess -import sys -import sysconfig -import types -import shlex - - -CORE_VENV_DEPS = ('pip',) -logger = logging.getLogger(__name__) - - -class EnvBuilder: - """ - This class exists to allow virtual environment creation to be - customized. The constructor parameters determine the builder's - behaviour when called upon to create a virtual environment. - - By default, the builder makes the system (global) site-packages dir - *un*available to the created environment. - - If invoked using the Python -m option, the default is to use copying - on Windows platforms but symlinks elsewhere. If instantiated some - other way, the default is to *not* use symlinks. - - :param system_site_packages: If True, the system (global) site-packages - dir is available to created environments. - :param clear: If True, delete the contents of the environment directory if - it already exists, before environment creation. - :param symlinks: If True, attempt to symlink rather than copy files into - virtual environment. - :param upgrade: If True, upgrade an existing virtual environment. - :param with_pip: If True, ensure pip is installed in the virtual - environment - :param prompt: Alternative terminal prefix for the environment. - :param upgrade_deps: Update the base venv modules to the latest on PyPI - :param scm_ignore_files: Create ignore files for the SCMs specified by the - iterable. - """ - - def __init__(self, system_site_packages=False, clear=False, - symlinks=False, upgrade=False, with_pip=False, prompt=None, - upgrade_deps=False, *, scm_ignore_files=frozenset()): - self.system_site_packages = system_site_packages - self.clear = clear - self.symlinks = symlinks - self.upgrade = upgrade - self.with_pip = with_pip - self.orig_prompt = prompt - if prompt == '.': # see bpo-38901 - prompt = os.path.basename(os.getcwd()) - self.prompt = prompt - self.upgrade_deps = upgrade_deps - self.scm_ignore_files = frozenset(map(str.lower, scm_ignore_files)) - - def create(self, env_dir): - """ - Create a virtual environment in a directory. - - :param env_dir: The target directory to create an environment in. - - """ - env_dir = os.path.abspath(env_dir) - context = self.ensure_directories(env_dir) - for scm in self.scm_ignore_files: - getattr(self, f"create_{scm}_ignore_file")(context) - # See issue 24875. We need system_site_packages to be False - # until after pip is installed. - true_system_site_packages = self.system_site_packages - self.system_site_packages = False - self.create_configuration(context) - self.setup_python(context) - if self.with_pip: - self._setup_pip(context) - if not self.upgrade: - self.setup_scripts(context) - self.post_setup(context) - if true_system_site_packages: - # We had set it to False before, now - # restore it and rewrite the configuration - self.system_site_packages = True - self.create_configuration(context) - if self.upgrade_deps: - self.upgrade_dependencies(context) - - def clear_directory(self, path): - for fn in os.listdir(path): - fn = os.path.join(path, fn) - if os.path.islink(fn) or os.path.isfile(fn): - os.remove(fn) - elif os.path.isdir(fn): - shutil.rmtree(fn) - - def _venv_path(self, env_dir, name): - vars = { - 'base': env_dir, - 'platbase': env_dir, - 'installed_base': env_dir, - 'installed_platbase': env_dir, - } - return sysconfig.get_path(name, scheme='venv', vars=vars) - - @classmethod - def _same_path(cls, path1, path2): - """Check whether two paths appear the same. - - Whether they refer to the same file is irrelevant; we're testing for - whether a human reader would look at the path string and easily tell - that they're the same file. - """ - if sys.platform == 'win32': - if os.path.normcase(path1) == os.path.normcase(path2): - return True - # gh-90329: Don't display a warning for short/long names - import _winapi - try: - path1 = _winapi.GetLongPathName(os.fsdecode(path1)) - except OSError: - pass - try: - path2 = _winapi.GetLongPathName(os.fsdecode(path2)) - except OSError: - pass - if os.path.normcase(path1) == os.path.normcase(path2): - return True - return False - else: - return path1 == path2 - - def ensure_directories(self, env_dir): - """ - Create the directories for the environment. - - Returns a context object which holds paths in the environment, - for use by subsequent logic. - """ - - def create_if_needed(d): - if not os.path.exists(d): - os.makedirs(d) - elif os.path.islink(d) or os.path.isfile(d): - raise ValueError('Unable to create directory %r' % d) - - if os.pathsep in os.fspath(env_dir): - raise ValueError(f'Refusing to create a venv in {env_dir} because ' - f'it contains the PATH separator {os.pathsep}.') - if os.path.exists(env_dir) and self.clear: - self.clear_directory(env_dir) - context = types.SimpleNamespace() - context.env_dir = env_dir - context.env_name = os.path.split(env_dir)[1] - context.prompt = self.prompt if self.prompt is not None else context.env_name - create_if_needed(env_dir) - executable = sys._base_executable - if not executable: # see gh-96861 - raise ValueError('Unable to determine path to the running ' - 'Python interpreter. Provide an explicit path or ' - 'check that your PATH environment variable is ' - 'correctly set.') - dirname, exename = os.path.split(os.path.abspath(executable)) - if sys.platform == 'win32': - # Always create the simplest name in the venv. It will either be a - # link back to executable, or a copy of the appropriate launcher - _d = '_d' if os.path.splitext(exename)[0].endswith('_d') else '' - exename = f'python{_d}.exe' - context.executable = executable - context.python_dir = dirname - context.python_exe = exename - binpath = self._venv_path(env_dir, 'scripts') - incpath = self._venv_path(env_dir, 'include') - libpath = self._venv_path(env_dir, 'purelib') - - context.inc_path = incpath - create_if_needed(incpath) - context.lib_path = libpath - create_if_needed(libpath) - # Issue 21197: create lib64 as a symlink to lib on 64-bit non-OS X POSIX - if ((sys.maxsize > 2**32) and (os.name == 'posix') and - (sys.platform != 'darwin')): - link_path = os.path.join(env_dir, 'lib64') - if not os.path.exists(link_path): # Issue #21643 - os.symlink('lib', link_path) - context.bin_path = binpath - context.bin_name = os.path.relpath(binpath, env_dir) - context.env_exe = os.path.join(binpath, exename) - create_if_needed(binpath) - # Assign and update the command to use when launching the newly created - # environment, in case it isn't simply the executable script (e.g. bpo-45337) - context.env_exec_cmd = context.env_exe - if sys.platform == 'win32': - # bpo-45337: Fix up env_exec_cmd to account for file system redirections. - # Some redirects only apply to CreateFile and not CreateProcess - real_env_exe = os.path.realpath(context.env_exe) - if not self._same_path(real_env_exe, context.env_exe): - logger.warning('Actual environment location may have moved due to ' - 'redirects, links or junctions.\n' - ' Requested location: "%s"\n' - ' Actual location: "%s"', - context.env_exe, real_env_exe) - context.env_exec_cmd = real_env_exe - return context - - def create_configuration(self, context): - """ - Create a configuration file indicating where the environment's Python - was copied from, and whether the system site-packages should be made - available in the environment. - - :param context: The information for the environment creation request - being processed. - """ - context.cfg_path = path = os.path.join(context.env_dir, 'pyvenv.cfg') - with open(path, 'w', encoding='utf-8') as f: - f.write('home = %s\n' % context.python_dir) - if self.system_site_packages: - incl = 'true' - else: - incl = 'false' - f.write('include-system-site-packages = %s\n' % incl) - f.write('version = %d.%d.%d\n' % sys.version_info[:3]) - if self.prompt is not None: - f.write(f'prompt = {self.prompt!r}\n') - f.write('executable = %s\n' % os.path.realpath(sys.executable)) - args = [] - nt = os.name == 'nt' - if nt and self.symlinks: - args.append('--symlinks') - if not nt and not self.symlinks: - args.append('--copies') - if not self.with_pip: - args.append('--without-pip') - if self.system_site_packages: - args.append('--system-site-packages') - if self.clear: - args.append('--clear') - if self.upgrade: - args.append('--upgrade') - if self.upgrade_deps: - args.append('--upgrade-deps') - if self.orig_prompt is not None: - args.append(f'--prompt="{self.orig_prompt}"') - if not self.scm_ignore_files: - args.append('--without-scm-ignore-files') - - args.append(context.env_dir) - args = ' '.join(args) - f.write(f'command = {sys.executable} -m venv {args}\n') - - def symlink_or_copy(self, src, dst, relative_symlinks_ok=False): - """ - Try symlinking a file, and if that fails, fall back to copying. - (Unused on Windows, because we can't just copy a failed symlink file: we - switch to a different set of files instead.) - """ - assert os.name != 'nt' - force_copy = not self.symlinks - if not force_copy: - try: - if not os.path.islink(dst): # can't link to itself! - if relative_symlinks_ok: - assert os.path.dirname(src) == os.path.dirname(dst) - os.symlink(os.path.basename(src), dst) - else: - os.symlink(src, dst) - except Exception: # may need to use a more specific exception - logger.warning('Unable to symlink %r to %r', src, dst) - force_copy = True - if force_copy: - shutil.copyfile(src, dst) - - def create_git_ignore_file(self, context): - """ - Create a .gitignore file in the environment directory. - - The contents of the file cause the entire environment directory to be - ignored by git. - """ - gitignore_path = os.path.join(context.env_dir, '.gitignore') - with open(gitignore_path, 'w', encoding='utf-8') as file: - file.write('# Created by venv; ' - 'see https://docs.python.org/3/library/venv.html\n') - file.write('*\n') - - if os.name != 'nt': - def setup_python(self, context): - """ - Set up a Python executable in the environment. - - :param context: The information for the environment creation request - being processed. - """ - binpath = context.bin_path - path = context.env_exe - copier = self.symlink_or_copy - dirname = context.python_dir - copier(context.executable, path) - if not os.path.islink(path): - os.chmod(path, 0o755) - for suffix in ('python', 'python3', - f'python3.{sys.version_info[1]}'): - path = os.path.join(binpath, suffix) - if not os.path.exists(path): - # Issue 18807: make copies if - # symlinks are not wanted - copier(context.env_exe, path, relative_symlinks_ok=True) - if not os.path.islink(path): - os.chmod(path, 0o755) - - else: - def setup_python(self, context): - """ - Set up a Python executable in the environment. - - :param context: The information for the environment creation request - being processed. - """ - binpath = context.bin_path - dirname = context.python_dir - exename = os.path.basename(context.env_exe) - exe_stem = os.path.splitext(exename)[0] - exe_d = '_d' if os.path.normcase(exe_stem).endswith('_d') else '' - if sysconfig.is_python_build(): - scripts = dirname - else: - scripts = os.path.join(os.path.dirname(__file__), - 'scripts', 'nt') - if not sysconfig.get_config_var("Py_GIL_DISABLED"): - python_exe = os.path.join(dirname, f'python{exe_d}.exe') - pythonw_exe = os.path.join(dirname, f'pythonw{exe_d}.exe') - link_sources = { - 'python.exe': python_exe, - f'python{exe_d}.exe': python_exe, - 'pythonw.exe': pythonw_exe, - f'pythonw{exe_d}.exe': pythonw_exe, - } - python_exe = os.path.join(scripts, f'venvlauncher{exe_d}.exe') - pythonw_exe = os.path.join(scripts, f'venvwlauncher{exe_d}.exe') - copy_sources = { - 'python.exe': python_exe, - f'python{exe_d}.exe': python_exe, - 'pythonw.exe': pythonw_exe, - f'pythonw{exe_d}.exe': pythonw_exe, - } - else: - exe_t = f'3.{sys.version_info[1]}t' - python_exe = os.path.join(dirname, f'python{exe_t}{exe_d}.exe') - pythonw_exe = os.path.join(dirname, f'pythonw{exe_t}{exe_d}.exe') - link_sources = { - 'python.exe': python_exe, - f'python{exe_d}.exe': python_exe, - f'python{exe_t}.exe': python_exe, - f'python{exe_t}{exe_d}.exe': python_exe, - 'pythonw.exe': pythonw_exe, - f'pythonw{exe_d}.exe': pythonw_exe, - f'pythonw{exe_t}.exe': pythonw_exe, - f'pythonw{exe_t}{exe_d}.exe': pythonw_exe, - } - python_exe = os.path.join(scripts, f'venvlaunchert{exe_d}.exe') - pythonw_exe = os.path.join(scripts, f'venvwlaunchert{exe_d}.exe') - copy_sources = { - 'python.exe': python_exe, - f'python{exe_d}.exe': python_exe, - f'python{exe_t}.exe': python_exe, - f'python{exe_t}{exe_d}.exe': python_exe, - 'pythonw.exe': pythonw_exe, - f'pythonw{exe_d}.exe': pythonw_exe, - f'pythonw{exe_t}.exe': pythonw_exe, - f'pythonw{exe_t}{exe_d}.exe': pythonw_exe, - } - - do_copies = True - if self.symlinks: - do_copies = False - # For symlinking, we need all the DLLs to be available alongside - # the executables. - link_sources.update({ - f: os.path.join(dirname, f) for f in os.listdir(dirname) - if os.path.normcase(f).startswith(('python', 'vcruntime')) - and os.path.normcase(os.path.splitext(f)[1]) == '.dll' - }) - - to_unlink = [] - for dest, src in link_sources.items(): - dest = os.path.join(binpath, dest) - try: - os.symlink(src, dest) - to_unlink.append(dest) - except OSError: - logger.warning('Unable to symlink %r to %r', src, dest) - do_copies = True - for f in to_unlink: - try: - os.unlink(f) - except OSError: - logger.warning('Failed to clean up symlink %r', - f) - logger.warning('Retrying with copies') - break - - if do_copies: - for dest, src in copy_sources.items(): - dest = os.path.join(binpath, dest) - try: - shutil.copy2(src, dest) - except OSError: - logger.warning('Unable to copy %r to %r', src, dest) - - if sysconfig.is_python_build(): - # copy init.tcl - for root, dirs, files in os.walk(context.python_dir): - if 'init.tcl' in files: - tcldir = os.path.basename(root) - tcldir = os.path.join(context.env_dir, 'Lib', tcldir) - if not os.path.exists(tcldir): - os.makedirs(tcldir) - src = os.path.join(root, 'init.tcl') - dst = os.path.join(tcldir, 'init.tcl') - shutil.copyfile(src, dst) - break - - def _call_new_python(self, context, *py_args, **kwargs): - """Executes the newly created Python using safe-ish options""" - # gh-98251: We do not want to just use '-I' because that masks - # legitimate user preferences (such as not writing bytecode). All we - # really need is to ensure that the path variables do not overrule - # normal venv handling. - args = [context.env_exec_cmd, *py_args] - kwargs['env'] = env = os.environ.copy() - env['VIRTUAL_ENV'] = context.env_dir - env.pop('PYTHONHOME', None) - env.pop('PYTHONPATH', None) - kwargs['cwd'] = context.env_dir - kwargs['executable'] = context.env_exec_cmd - subprocess.check_output(args, **kwargs) - - def _setup_pip(self, context): - """Installs or upgrades pip in a virtual environment""" - self._call_new_python(context, '-m', 'ensurepip', '--upgrade', - '--default-pip', stderr=subprocess.STDOUT) - - def setup_scripts(self, context): - """ - Set up scripts into the created environment from a directory. - - This method installs the default scripts into the environment - being created. You can prevent the default installation by overriding - this method if you really need to, or if you need to specify - a different location for the scripts to install. By default, the - 'scripts' directory in the venv package is used as the source of - scripts to install. - """ - path = os.path.abspath(os.path.dirname(__file__)) - path = os.path.join(path, 'scripts') - self.install_scripts(context, path) - - def post_setup(self, context): - """ - Hook for post-setup modification of the venv. Subclasses may install - additional packages or scripts here, add activation shell scripts, etc. - - :param context: The information for the environment creation request - being processed. - """ - pass - - def replace_variables(self, text, context): - """ - Replace variable placeholders in script text with context-specific - variables. - - Return the text passed in , but with variables replaced. - - :param text: The text in which to replace placeholder variables. - :param context: The information for the environment creation request - being processed. - """ - replacements = { - '__VENV_DIR__': context.env_dir, - '__VENV_NAME__': context.env_name, - '__VENV_PROMPT__': context.prompt, - '__VENV_BIN_NAME__': context.bin_name, - '__VENV_PYTHON__': context.env_exe, - } - - def quote_ps1(s): - """ - This should satisfy PowerShell quoting rules [1], unless the quoted - string is passed directly to Windows native commands [2]. - [1]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_quoting_rules - [2]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_parsing#passing-arguments-that-contain-quote-characters - """ - s = s.replace("'", "''") - return f"'{s}'" - - def quote_bat(s): - return s - - # gh-124651: need to quote the template strings properly - quote = shlex.quote - script_path = context.script_path - if script_path.endswith('.ps1'): - quote = quote_ps1 - elif script_path.endswith('.bat'): - quote = quote_bat - else: - # fallbacks to POSIX shell compliant quote - quote = shlex.quote - - replacements = {key: quote(s) for key, s in replacements.items()} - for key, quoted in replacements.items(): - text = text.replace(key, quoted) - return text - - def install_scripts(self, context, path): - """ - Install scripts into the created environment from a directory. - - :param context: The information for the environment creation request - being processed. - :param path: Absolute pathname of a directory containing script. - Scripts in the 'common' subdirectory of this directory, - and those in the directory named for the platform - being run on, are installed in the created environment. - Placeholder variables are replaced with environment- - specific values. - """ - binpath = context.bin_path - plen = len(path) - if os.name == 'nt': - def skip_file(f): - f = os.path.normcase(f) - return (f.startswith(('python', 'venv')) - and f.endswith(('.exe', '.pdb'))) - else: - def skip_file(f): - return False - for root, dirs, files in os.walk(path): - if root == path: # at top-level, remove irrelevant dirs - for d in dirs[:]: - if d not in ('common', os.name): - dirs.remove(d) - continue # ignore files in top level - for f in files: - if skip_file(f): - continue - srcfile = os.path.join(root, f) - suffix = root[plen:].split(os.sep)[2:] - if not suffix: - dstdir = binpath - else: - dstdir = os.path.join(binpath, *suffix) - if not os.path.exists(dstdir): - os.makedirs(dstdir) - dstfile = os.path.join(dstdir, f) - if os.name == 'nt' and srcfile.endswith(('.exe', '.pdb')): - shutil.copy2(srcfile, dstfile) - continue - with open(srcfile, 'rb') as f: - data = f.read() - try: - context.script_path = srcfile - new_data = ( - self.replace_variables(data.decode('utf-8'), context) - .encode('utf-8') - ) - except UnicodeError as e: - logger.warning('unable to copy script %r, ' - 'may be binary: %s', srcfile, e) - continue - if new_data == data: - shutil.copy(srcfile, dstfile) - else: - with open(dstfile, 'wb') as f: - f.write(new_data) - shutil.copymode(srcfile, dstfile) - - def upgrade_dependencies(self, context): - logger.debug( - f'Upgrading {CORE_VENV_DEPS} packages in {context.bin_path}' - ) - self._call_new_python(context, '-m', 'pip', 'install', '--upgrade', - *CORE_VENV_DEPS) - - -def create(env_dir, system_site_packages=False, clear=False, - symlinks=False, with_pip=False, prompt=None, upgrade_deps=False, - *, scm_ignore_files=frozenset()): - """Create a virtual environment in a directory.""" - builder = EnvBuilder(system_site_packages=system_site_packages, - clear=clear, symlinks=symlinks, with_pip=with_pip, - prompt=prompt, upgrade_deps=upgrade_deps, - scm_ignore_files=scm_ignore_files) - builder.create(env_dir) - - -def main(args=None): - import argparse - - parser = argparse.ArgumentParser(prog=__name__, - description='Creates virtual Python ' - 'environments in one or ' - 'more target ' - 'directories.', - epilog='Once an environment has been ' - 'created, you may wish to ' - 'activate it, e.g. by ' - 'sourcing an activate script ' - 'in its bin directory.') - parser.add_argument('dirs', metavar='ENV_DIR', nargs='+', - help='A directory to create the environment in.') - parser.add_argument('--system-site-packages', default=False, - action='store_true', dest='system_site', - help='Give the virtual environment access to the ' - 'system site-packages dir.') - if os.name == 'nt': - use_symlinks = False - else: - use_symlinks = True - group = parser.add_mutually_exclusive_group() - group.add_argument('--symlinks', default=use_symlinks, - action='store_true', dest='symlinks', - help='Try to use symlinks rather than copies, ' - 'when symlinks are not the default for ' - 'the platform.') - group.add_argument('--copies', default=not use_symlinks, - action='store_false', dest='symlinks', - help='Try to use copies rather than symlinks, ' - 'even when symlinks are the default for ' - 'the platform.') - parser.add_argument('--clear', default=False, action='store_true', - dest='clear', help='Delete the contents of the ' - 'environment directory if it ' - 'already exists, before ' - 'environment creation.') - parser.add_argument('--upgrade', default=False, action='store_true', - dest='upgrade', help='Upgrade the environment ' - 'directory to use this version ' - 'of Python, assuming Python ' - 'has been upgraded in-place.') - parser.add_argument('--without-pip', dest='with_pip', - default=True, action='store_false', - help='Skips installing or upgrading pip in the ' - 'virtual environment (pip is bootstrapped ' - 'by default)') - parser.add_argument('--prompt', - help='Provides an alternative prompt prefix for ' - 'this environment.') - parser.add_argument('--upgrade-deps', default=False, action='store_true', - dest='upgrade_deps', - help=f'Upgrade core dependencies ({", ".join(CORE_VENV_DEPS)}) ' - 'to the latest version in PyPI') - parser.add_argument('--without-scm-ignore-files', dest='scm_ignore_files', - action='store_const', const=frozenset(), - default=frozenset(['git']), - help='Skips adding SCM ignore files to the environment ' - 'directory (Git is supported by default).') - options = parser.parse_args(args) - if options.upgrade and options.clear: - raise ValueError('you cannot supply --upgrade and --clear together.') - builder = EnvBuilder(system_site_packages=options.system_site, - clear=options.clear, - symlinks=options.symlinks, - upgrade=options.upgrade, - with_pip=options.with_pip, - prompt=options.prompt, - upgrade_deps=options.upgrade_deps, - scm_ignore_files=options.scm_ignore_files) - for d in options.dirs: - builder.create(d) - - -if __name__ == '__main__': - rc = 1 - try: - main() - rc = 0 - except Exception as e: - print('Error: %s' % e, file=sys.stderr) - sys.exit(rc) diff --git a/Python313_13_x86_Template/Lib/venv/scripts/common/Activate.ps1 b/Python313_13_x86_Template/Lib/venv/scripts/common/Activate.ps1 deleted file mode 100644 index f1460ba0..00000000 --- a/Python313_13_x86_Template/Lib/venv/scripts/common/Activate.ps1 +++ /dev/null @@ -1,547 +0,0 @@ -<# -.Synopsis -Activate a Python virtual environment for the current PowerShell session. - -.Description -Pushes the python executable for a virtual environment to the front of the -$Env:PATH environment variable and sets the prompt to signify that you are -in a Python virtual environment. Makes use of the command line switches as -well as the `pyvenv.cfg` file values present in the virtual environment. - -.Parameter VenvDir -Path to the directory that contains the virtual environment to activate. The -default value for this is the parent of the directory that the Activate.ps1 -script is located within. - -.Parameter Prompt -The prompt prefix to display when this virtual environment is activated. By -default, this prompt is the name of the virtual environment folder (VenvDir) -surrounded by parentheses and followed by a single space (ie. '(.venv) '). - -.Example -Activate.ps1 -Activates the Python virtual environment that contains the Activate.ps1 script. - -.Example -Activate.ps1 -Verbose -Activates the Python virtual environment that contains the Activate.ps1 script, -and shows extra information about the activation as it executes. - -.Example -Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv -Activates the Python virtual environment located in the specified location. - -.Example -Activate.ps1 -Prompt "MyPython" -Activates the Python virtual environment that contains the Activate.ps1 script, -and prefixes the current prompt with the specified string (surrounded in -parentheses) while the virtual environment is active. - -.Notes -On Windows, it may be required to enable this Activate.ps1 script by setting the -execution policy for the user. You can do this by issuing the following PowerShell -command: - -PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser - -For more information on Execution Policies: -https://go.microsoft.com/fwlink/?LinkID=135170 - -#> -Param( - [Parameter(Mandatory = $false)] - [String] - $VenvDir, - [Parameter(Mandatory = $false)] - [String] - $Prompt -) - -<# Function declarations --------------------------------------------------- #> - -<# -.Synopsis -Remove all shell session elements added by the Activate script, including the -addition of the virtual environment's Python executable from the beginning of -the PATH variable. - -.Parameter NonDestructive -If present, do not remove this function from the global namespace for the -session. - -#> -function global:deactivate ([switch]$NonDestructive) { - # Revert to original values - - # The prior prompt: - if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { - Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt - Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT - } - - # The prior PYTHONHOME: - if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { - Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME - Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME - } - - # The prior PATH: - if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { - Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH - Remove-Item -Path Env:_OLD_VIRTUAL_PATH - } - - # Just remove the VIRTUAL_ENV altogether: - if (Test-Path -Path Env:VIRTUAL_ENV) { - Remove-Item -Path env:VIRTUAL_ENV - } - - # Just remove VIRTUAL_ENV_PROMPT altogether. - if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { - Remove-Item -Path env:VIRTUAL_ENV_PROMPT - } - - # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: - if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { - Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force - } - - # Leave deactivate function in the global namespace if requested: - if (-not $NonDestructive) { - Remove-Item -Path function:deactivate - } -} - -<# -.Description -Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the -given folder, and returns them in a map. - -For each line in the pyvenv.cfg file, if that line can be parsed into exactly -two strings separated by `=` (with any amount of whitespace surrounding the =) -then it is considered a `key = value` line. The left hand string is the key, -the right hand is the value. - -If the value starts with a `'` or a `"` then the first and last character is -stripped from the value before being captured. - -.Parameter ConfigDir -Path to the directory that contains the `pyvenv.cfg` file. -#> -function Get-PyVenvConfig( - [String] - $ConfigDir -) { - Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" - - # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). - $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue - - # An empty map will be returned if no config file is found. - $pyvenvConfig = @{ } - - if ($pyvenvConfigPath) { - - Write-Verbose "File exists, parse `key = value` lines" - $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath - - $pyvenvConfigContent | ForEach-Object { - $keyval = $PSItem -split "\s*=\s*", 2 - if ($keyval[0] -and $keyval[1]) { - $val = $keyval[1] - - # Remove extraneous quotations around a string value. - if ("'""".Contains($val.Substring(0, 1))) { - $val = $val.Substring(1, $val.Length - 2) - } - - $pyvenvConfig[$keyval[0]] = $val - Write-Verbose "Adding Key: '$($keyval[0])'='$val'" - } - } - } - return $pyvenvConfig -} - - -<# Begin Activate script --------------------------------------------------- #> - -# Determine the containing directory of this script -$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition -$VenvExecDir = Get-Item -Path $VenvExecPath - -Write-Verbose "Activation script is located in path: '$VenvExecPath'" -Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" -Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" - -# Set values required in priority: CmdLine, ConfigFile, Default -# First, get the location of the virtual environment, it might not be -# VenvExecDir if specified on the command line. -if ($VenvDir) { - Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" -} -else { - Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." - $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") - Write-Verbose "VenvDir=$VenvDir" -} - -# Next, read the `pyvenv.cfg` file to determine any required value such -# as `prompt`. -$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir - -# Next, set the prompt from the command line, or the config file, or -# just use the name of the virtual environment folder. -if ($Prompt) { - Write-Verbose "Prompt specified as argument, using '$Prompt'" -} -else { - Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" - if ($pyvenvCfg -and $pyvenvCfg['prompt']) { - Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" - $Prompt = $pyvenvCfg['prompt']; - } - else { - Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" - Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" - $Prompt = Split-Path -Path $venvDir -Leaf - } -} - -Write-Verbose "Prompt = '$Prompt'" -Write-Verbose "VenvDir='$VenvDir'" - -# Deactivate any currently active virtual environment, but leave the -# deactivate function in place. -deactivate -nondestructive - -# Now set the environment variable VIRTUAL_ENV, used by many tools to determine -# that there is an activated venv. -$env:VIRTUAL_ENV = $VenvDir - -$env:VIRTUAL_ENV_PROMPT = $Prompt - -if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { - - Write-Verbose "Setting prompt to '$Prompt'" - - # Set the prompt to include the env name - # Make sure _OLD_VIRTUAL_PROMPT is global - function global:_OLD_VIRTUAL_PROMPT { "" } - Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT - New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt - - function global:prompt { - Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " - _OLD_VIRTUAL_PROMPT - } -} - -# Clear PYTHONHOME -if (Test-Path -Path Env:PYTHONHOME) { - Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME - Remove-Item -Path Env:PYTHONHOME -} - -# Add the venv to the PATH -Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH -$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" - -# SIG # Begin signature block -# MII3ZAYJKoZIhvcNAQcCoII3VTCCN1ECAQExDzANBglghkgBZQMEAgEFADB5Bgor -# BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG -# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCBALKwKRFIhr2RY -# IW/WJLd9pc8a9sj/IoThKU92fTfKsKCCG9IwggXMMIIDtKADAgECAhBUmNLR1FsZ -# lUgTecgRwIeZMA0GCSqGSIb3DQEBDAUAMHcxCzAJBgNVBAYTAlVTMR4wHAYDVQQK -# ExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xSDBGBgNVBAMTP01pY3Jvc29mdCBJZGVu -# dGl0eSBWZXJpZmljYXRpb24gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgMjAy -# MDAeFw0yMDA0MTYxODM2MTZaFw00NTA0MTYxODQ0NDBaMHcxCzAJBgNVBAYTAlVT -# MR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xSDBGBgNVBAMTP01pY3Jv -# c29mdCBJZGVudGl0eSBWZXJpZmljYXRpb24gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRo -# b3JpdHkgMjAyMDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALORKgeD -# Bmf9np3gx8C3pOZCBH8Ppttf+9Va10Wg+3cL8IDzpm1aTXlT2KCGhFdFIMeiVPvH -# or+Kx24186IVxC9O40qFlkkN/76Z2BT2vCcH7kKbK/ULkgbk/WkTZaiRcvKYhOuD -# PQ7k13ESSCHLDe32R0m3m/nJxxe2hE//uKya13NnSYXjhr03QNAlhtTetcJtYmrV -# qXi8LW9J+eVsFBT9FMfTZRY33stuvF4pjf1imxUs1gXmuYkyM6Nix9fWUmcIxC70 -# ViueC4fM7Ke0pqrrBc0ZV6U6CwQnHJFnni1iLS8evtrAIMsEGcoz+4m+mOJyoHI1 -# vnnhnINv5G0Xb5DzPQCGdTiO0OBJmrvb0/gwytVXiGhNctO/bX9x2P29Da6SZEi3 -# W295JrXNm5UhhNHvDzI9e1eM80UHTHzgXhgONXaLbZ7LNnSrBfjgc10yVpRnlyUK -# xjU9lJfnwUSLgP3B+PR0GeUw9gb7IVc+BhyLaxWGJ0l7gpPKWeh1R+g/OPTHU3mg -# trTiXFHvvV84wRPmeAyVWi7FQFkozA8kwOy6CXcjmTimthzax7ogttc32H83rwjj -# O3HbbnMbfZlysOSGM1l0tRYAe1BtxoYT2v3EOYI9JACaYNq6lMAFUSw0rFCZE4e7 -# swWAsk0wAly4JoNdtGNz764jlU9gKL431VulAgMBAAGjVDBSMA4GA1UdDwEB/wQE -# AwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTIftJqhSobyhmYBAcnz1AQ -# T2ioojAQBgkrBgEEAYI3FQEEAwIBADANBgkqhkiG9w0BAQwFAAOCAgEAr2rd5hnn -# LZRDGU7L6VCVZKUDkQKL4jaAOxWiUsIWGbZqWl10QzD0m/9gdAmxIR6QFm3FJI9c -# Zohj9E/MffISTEAQiwGf2qnIrvKVG8+dBetJPnSgaFvlVixlHIJ+U9pW2UYXeZJF -# xBA2CFIpF8svpvJ+1Gkkih6PsHMNzBxKq7Kq7aeRYwFkIqgyuH4yKLNncy2RtNwx -# AQv3Rwqm8ddK7VZgxCwIo3tAsLx0J1KH1r6I3TeKiW5niB31yV2g/rarOoDXGpc8 -# FzYiQR6sTdWD5jw4vU8w6VSp07YEwzJ2YbuwGMUrGLPAgNW3lbBeUU0i/OxYqujY -# lLSlLu2S3ucYfCFX3VVj979tzR/SpncocMfiWzpbCNJbTsgAlrPhgzavhgplXHT2 -# 6ux6anSg8Evu75SjrFDyh+3XOjCDyft9V77l4/hByuVkrrOj7FjshZrM77nq81YY -# uVxzmq/FdxeDWds3GhhyVKVB0rYjdaNDmuV3fJZ5t0GNv+zcgKCf0Xd1WF81E+Al -# GmcLfc4l+gcK5GEh2NQc5QfGNpn0ltDGFf5Ozdeui53bFv0ExpK91IjmqaOqu/dk -# ODtfzAzQNb50GQOmxapMomE2gj4d8yu8l13bS3g7LfU772Aj6PXsCyM2la+YZr9T -# 03u4aUoqlmZpxJTG9F9urJh4iIAGXKKy7aIwggb+MIIE5qADAgECAhMzAAfqVHr/ -# 4Q/aDzAcAAAAB+pUMA0GCSqGSIb3DQEBDAUAMFoxCzAJBgNVBAYTAlVTMR4wHAYD -# VQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKzApBgNVBAMTIk1pY3Jvc29mdCBJ -# RCBWZXJpZmllZCBDUyBFT0MgQ0EgMDIwHhcNMjYwNDA3MDcyODM1WhcNMjYwNDEw -# MDcyODM1WjB8MQswCQYDVQQGEwJVUzEPMA0GA1UECBMGT3JlZ29uMRIwEAYDVQQH -# EwlCZWF2ZXJ0b24xIzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9u -# MSMwIQYDVQQDExpQeXRob24gU29mdHdhcmUgRm91bmRhdGlvbjCCAaIwDQYJKoZI -# hvcNAQEBBQADggGPADCCAYoCggGBAND/lHfn3OCIvUzMUIL6OdsKJrpnvuRtahV1 -# 6NCf0YSqOQemwQw2bTIyTkgSFwY4WaCvfHzcliURiPidXiqy56OmeC19A95BarKA -# UmKRv3bVpM0XEK7OLvMyRFNg9aPUi1nmdF3Vx02RI9p88wBHQR5nNIpOTXlwfONQ -# klggyEZSxkBf+dCL6jtz4jiqoreiEmRwesOrtQxKNsRuezbumpmVMZGxrMQVLBIX -# OWG9a3GS6Sqfi+cJgxQhSKa9JENPRojyxOyVG8vdwJQiMqSjm2ZMFAkIkSWBQSfx -# WjrRmw8/20WaBENattpqb7/cjX7zwimJ86uV48D8AQIGzAxfYAySG6NG9iMfU5S5 -# wzDFpiCuXyfrlgAbZu4fnBIyOmGcq01XxruzJ3FcdLMif5YXZU+n30XOaJfgY9/x -# Gq2HiEIQF5MeuxknfD+vYi/GXGtC/nlKS0Tx91+YXt6RctxgJEwpZCGzFZmmaiUa -# Y0GBp4jzXXwLqX8T15lgxAGoqoPvvwIDAQABo4ICGTCCAhUwDAYDVR0TAQH/BAIw -# ADAOBgNVHQ8BAf8EBAMCB4AwPAYDVR0lBDUwMwYKKwYBBAGCN2EBAAYIKwYBBQUH -# AwMGGysGAQQBgjdhgqKNuwqmkohkgZH0oEWCk/3hbzAdBgNVHQ4EFgQUy3N6DzeS -# y91jju8Ihmm3r+5AO58wHwYDVR0jBBgwFoAUZZ9RzoVofy+KRYiq3acxux4NAF4w -# ZwYDVR0fBGAwXjBcoFqgWIZWaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9w -# cy9jcmwvTWljcm9zb2Z0JTIwSUQlMjBWZXJpZmllZCUyMENTJTIwRU9DJTIwQ0El -# MjAwMi5jcmwwgaUGCCsGAQUFBwEBBIGYMIGVMGQGCCsGAQUFBzAChlhodHRwOi8v -# d3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NlcnRzL01pY3Jvc29mdCUyMElEJTIw -# VmVyaWZpZWQlMjBDUyUyMEVPQyUyMENBJTIwMDIuY3J0MC0GCCsGAQUFBzABhiFo -# dHRwOi8vb25lb2NzcC5taWNyb3NvZnQuY29tL29jc3AwZgYDVR0gBF8wXTBRBgwr -# BgEEAYI3TIN9AQEwQTA/BggrBgEFBQcCARYzaHR0cDovL3d3dy5taWNyb3NvZnQu -# Y29tL3BraW9wcy9Eb2NzL1JlcG9zaXRvcnkuaHRtMAgGBmeBDAEEATANBgkqhkiG -# 9w0BAQwFAAOCAgEAPPwJPfkrkQMH39/iTBbir6tGnQpLCpOuP1A6mmKp22GxCG0/ -# 1IPx4QK1qXpy8hYd/G9ySDSYu3DSg22/icSmGSxdcI3zoRsj9vdJeesQrxtK8v9y -# 4zMxN5TaLV5CmatSUZPyX1t7Tee9wiLBUeZIj+3Lg2gNUsdvavywRYxSYkWGuGaM -# jGtJrs4PoJW3f4KkOc5mShCpUgl4Mo9ZO+ChcQpKEP99UJ9CXB9wrNzXnEOTyGnR -# f1sYklPqBifC7hrnKIPZiJte1efmGeExmspWewmUSNXCIGenDAN8XDut2yi1iSSQ -# n1VtL6deCRhS1cTn+FAzy2q7a/8Jhhq+HUlcJwRGtrxgKZHrwEvGRvIWNK5l1rKl -# Q+WQ7RqRrH6PpSfR/xoptfpJX9LNUoHS0m114HcE2xk2hbv+U/5ZgxUtSd4MbF7/ -# C8eShz4Os8CznYXJ/d+kfvoyEqKE9VCbc4BUC+w1iufQOPo4tRvK4TFJu1N4IqJk -# NsChWXUef7lIT5CoaJw4np0dVS2NosmRCxi1dMyADzqFNDXGKQxq5k6MpnXbevL5 -# JdcznhhxgwRUcwNK/3f9WSaU2mnI+6tHrnATteL7Ct6FzZWjqWDbURkU66bRqrBh -# +u5KyLZAAQXTfdsaDUfxtElQJf5wROgYvwnW1dGvujgc+XKVvf1VT3GSFRIwggda -# MIIFQqADAgECAhMzAAAABft6XDITYd9dAAAAAAAFMA0GCSqGSIb3DQEBDAUAMGMx -# CzAJBgNVBAYTAlVTMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xNDAy -# BgNVBAMTK01pY3Jvc29mdCBJRCBWZXJpZmllZCBDb2RlIFNpZ25pbmcgUENBIDIw -# MjEwHhcNMjEwNDEzMTczMTUzWhcNMjYwNDEzMTczMTUzWjBaMQswCQYDVQQGEwJV -# UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSswKQYDVQQDEyJNaWNy -# b3NvZnQgSUQgVmVyaWZpZWQgQ1MgRU9DIENBIDAyMIICIjANBgkqhkiG9w0BAQEF -# AAOCAg8AMIICCgKCAgEA0hqZfD8ykKTA6CDbWvshmBpDoBf7Lv132RVuSqVwQO3a -# ALLkuRnnTIoRmMGo0fIMQrtwR6UHB06xdqOkAfqB6exubXTHu44+duHUCdE4ngjE -# LBQyluMuSOnHaEdveIbt31OhMEX/4nQkph4+Ah0eR4H2sTRrVKmKrlOoQlhia73Q -# g2dHoitcX1uT1vW3Knpt9Mt76H7ZHbLNspMZLkWBabKMl6BdaWZXYpPGdS+qY80g -# DaNCvFq0d10UMu7xHesIqXpTDT3Q3AeOxSylSTc/74P3og9j3OuemEFauFzL55t1 -# MvpadEhQmD8uFMxFv/iZOjwvcdY1zhanVLLyplz13/NzSoU3QjhPdqAGhRIwh/YD -# zo3jCdVJgWQRrW83P3qWFFkxNiME2iO4IuYgj7RwseGwv7I9cxOyaHihKMdT9Neo -# SjpSNzVnKKGcYMtOdMtKFqoV7Cim2m84GmIYZTBorR/Po9iwlasTYKFpGZqdWKyY -# nJO2FV8oMmWkIK1iagLLgEt6ZaR0rk/1jUYssyTiRqWr84Qs3XL/V5KUBEtUEQfQ -# /4RtnI09uFFUIGJZV9mD/xOUksWodGrCQSem6Hy261xMJAHqTqMuDKgwi8xk/mfl -# r7yhXPL73SOULmu1Aqu4I7Gpe6QwNW2TtQBxM3vtSTmdPW6rK5y0gED51RjsyK0C -# AwEAAaOCAg4wggIKMA4GA1UdDwEB/wQEAwIBhjAQBgkrBgEEAYI3FQEEAwIBADAd -# BgNVHQ4EFgQUZZ9RzoVofy+KRYiq3acxux4NAF4wVAYDVR0gBE0wSzBJBgRVHSAA -# MEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9wa2lvcHMv -# RG9jcy9SZXBvc2l0b3J5Lmh0bTAZBgkrBgEEAYI3FAIEDB4KAFMAdQBiAEMAQTAS -# BgNVHRMBAf8ECDAGAQH/AgEAMB8GA1UdIwQYMBaAFNlBKbAPD2Ns72nX9c0pnqRI -# ajDmMHAGA1UdHwRpMGcwZaBjoGGGX2h0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9w -# a2lvcHMvY3JsL01pY3Jvc29mdCUyMElEJTIwVmVyaWZpZWQlMjBDb2RlJTIwU2ln -# bmluZyUyMFBDQSUyMDIwMjEuY3JsMIGuBggrBgEFBQcBAQSBoTCBnjBtBggrBgEF -# BQcwAoZhaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9jZXJ0cy9NaWNy -# b3NvZnQlMjBJRCUyMFZlcmlmaWVkJTIwQ29kZSUyMFNpZ25pbmclMjBQQ0ElMjAy -# MDIxLmNydDAtBggrBgEFBQcwAYYhaHR0cDovL29uZW9jc3AubWljcm9zb2Z0LmNv -# bS9vY3NwMA0GCSqGSIb3DQEBDAUAA4ICAQBFSWDUd08X4g5HzvVfrB1SiV8pk6XP -# HT9jPkCmvU/uvBzmZRAjYk2gKYR3pXoStRJaJ/lhjC5Dq/2R7P1YRZHCDYyK0zvS -# RMdE6YQtgGjmsdhzD0nCS6hVVcgfmNQscPJ1WHxbvG5EQgYQ0ZED1FN0MOPQzWe1 -# zbH5Va0dSxtnodBVRjnyDYEm7sNEcvJHTG3eXzAyd00E5KDCsEl4z5O0mvXqwaH2 -# PS0200E6P4WqLwgs/NmUu5+Aa8Lw/2En2VkIW7Pkir4Un1jG6+tj/ehuqgFyUPPC -# h6kbnvk48bisi/zPjAVkj7qErr7fSYICCzJ4s4YUNVVHgdoFn2xbW7ZfBT3QA9zf -# hq9u4ExXbrVD5rxXSTFEUg2gzQq9JHxsdHyMfcCKLFQOXODSzcYeLpCd+r6GcoDB -# ToyPdKccjC6mAq6+/hiMDnpvKUIHpyYEzWUeattyKXtMf+QrJeQ+ny5jBL+xqdOO -# PEz3dg7qn8/oprUrUbGLBv9fWm18fWXdAv1PCtLL/acMLtHoyeSVMKQYqDHb3Qm0 -# uQ+NQ0YE4kUxSQa+W/cCzYAI32uN0nb9M4Mr1pj4bJZidNkM4JyYqezohILxYkgH -# bboJQISrQWrm5RYdyhKBpptJ9JJn0Z63LjdnzlOUxjlsAbQir2Wmz/OJE703BbHm -# QZRwzPx1vu7S5zCCB54wggWGoAMCAQICEzMAAAAHh6M0o3uljhwAAAAAAAcwDQYJ -# KoZIhvcNAQEMBQAwdzELMAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBD -# b3Jwb3JhdGlvbjFIMEYGA1UEAxM/TWljcm9zb2Z0IElkZW50aXR5IFZlcmlmaWNh -# dGlvbiBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAyMDIwMB4XDTIxMDQwMTIw -# MDUyMFoXDTM2MDQwMTIwMTUyMFowYzELMAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1p -# Y3Jvc29mdCBDb3Jwb3JhdGlvbjE0MDIGA1UEAxMrTWljcm9zb2Z0IElEIFZlcmlm -# aWVkIENvZGUgU2lnbmluZyBQQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIP -# ADCCAgoCggIBALLwwK8ZiCji3VR6TElsaQhVCbRS/3pK+MHrJSj3Zxd3KU3rlfL3 -# qrZilYKJNqztA9OQacr1AwoNcHbKBLbsQAhBnIB34zxf52bDpIO3NJlfIaTE/xrw -# eLoQ71lzCHkD7A4As1Bs076Iu+mA6cQzsYYH/Cbl1icwQ6C65rU4V9NQhNUwgrx9 -# rGQ//h890Q8JdjLLw0nV+ayQ2Fbkd242o9kH82RZsH3HEyqjAB5a8+Ae2nPIPc8s -# ZU6ZE7iRrRZywRmrKDp5+TcmJX9MRff241UaOBs4NmHOyke8oU1TYrkxh+YeHgfW -# o5tTgkoSMoayqoDpHOLJs+qG8Tvh8SnifW2Jj3+ii11TS8/FGngEaNAWrbyfNrC6 -# 9oKpRQXY9bGH6jn9NEJv9weFxhTwyvx9OJLXmRGbAUXN1U9nf4lXezky6Uh/cgjk -# Vd6CGUAf0K+Jw+GE/5VpIVbcNr9rNE50Sbmy/4RTCEGvOq3GhjITbCa4crCzTTHg -# YYjHs1NbOc6brH+eKpWLtr+bGecy9CrwQyx7S/BfYJ+ozst7+yZtG2wR461uckFu -# 0t+gCwLdN0A6cFtSRtR8bvxVFyWwTtgMMFRuBa3vmUOTnfKLsLefRaQcVTgRnzeL -# zdpt32cdYKp+dhr2ogc+qM6K4CBI5/j4VFyC4QFeUP2YAidLtvpXRRo3AgMBAAGj -# ggI1MIICMTAOBgNVHQ8BAf8EBAMCAYYwEAYJKwYBBAGCNxUBBAMCAQAwHQYDVR0O -# BBYEFNlBKbAPD2Ns72nX9c0pnqRIajDmMFQGA1UdIARNMEswSQYEVR0gADBBMD8G -# CCsGAQUFBwIBFjNodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL0RvY3Mv -# UmVwb3NpdG9yeS5odG0wGQYJKwYBBAGCNxQCBAweCgBTAHUAYgBDAEEwDwYDVR0T -# AQH/BAUwAwEB/zAfBgNVHSMEGDAWgBTIftJqhSobyhmYBAcnz1AQT2ioojCBhAYD -# VR0fBH0wezB5oHegdYZzaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9j -# cmwvTWljcm9zb2Z0JTIwSWRlbnRpdHklMjBWZXJpZmljYXRpb24lMjBSb290JTIw -# Q2VydGlmaWNhdGUlMjBBdXRob3JpdHklMjAyMDIwLmNybDCBwwYIKwYBBQUHAQEE -# gbYwgbMwgYEGCCsGAQUFBzAChnVodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtp -# b3BzL2NlcnRzL01pY3Jvc29mdCUyMElkZW50aXR5JTIwVmVyaWZpY2F0aW9uJTIw -# Um9vdCUyMENlcnRpZmljYXRlJTIwQXV0aG9yaXR5JTIwMjAyMC5jcnQwLQYIKwYB -# BQUHMAGGIWh0dHA6Ly9vbmVvY3NwLm1pY3Jvc29mdC5jb20vb2NzcDANBgkqhkiG -# 9w0BAQwFAAOCAgEAfyUqnv7Uq+rdZgrbVyNMul5skONbhls5fccPlmIbzi+OwVdP -# Q4H55v7VOInnmezQEeW4LqK0wja+fBznANbXLB0KrdMCbHQpbLvG6UA/Xv2pfpVI -# E1CRFfNF4XKO8XYEa3oW8oVH+KZHgIQRIwAbyFKQ9iyj4aOWeAzwk+f9E5StNp5T -# 8FG7/VEURIVWArbAzPt9ThVN3w1fAZkF7+YU9kbq1bCR2YD+MtunSQ1Rft6XG7b4 -# e0ejRA7mB2IoX5hNh3UEauY0byxNRG+fT2MCEhQl9g2i2fs6VOG19CNep7SquKaB -# jhWmirYyANb0RJSLWjinMLXNOAga10n8i9jqeprzSMU5ODmrMCJE12xS/NWShg/t -# uLjAsKP6SzYZ+1Ry358ZTFcx0FS/mx2vSoU8s8HRvy+rnXqyUJ9HBqS0DErVLjQw -# K8VtsBdekBmdTbQVoCgPCqr+PDPB3xajYnzevs7eidBsM71PINK2BoE2UfMwxCCX -# 3mccFgx6UsQeRSdVVVNSyALQe6PT12418xon2iDGE81OGCreLzDcMAZnrUAx4XQL -# Uz6ZTl65yPUiOh3k7Yww94lDf+8oG2oZmDh5O1Qe38E+M3vhKwmzIeoB1dVLlz4i -# 3IpaDcR+iuGjH2TdaC1ZOmBXiCRKJLj4DT2uhJ04ji+tHD6n58vhavFIrmcxghro -# MIIa5AIBATBxMFoxCzAJBgNVBAYTAlVTMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29y -# cG9yYXRpb24xKzApBgNVBAMTIk1pY3Jvc29mdCBJRCBWZXJpZmllZCBDUyBFT0Mg -# Q0EgMDICEzMAB+pUev/hD9oPMBwAAAAH6lQwDQYJYIZIAWUDBAIBBQCggbQwGQYJ -# KoZIhvcNAQkDMQwGCisGAQQBgjcCAQQwHAYKKwYBBAGCNwIBCzEOMAwGCisGAQQB -# gjcCARUwLwYJKoZIhvcNAQkEMSIEICpXe3RS3b2coD0CJveEHlglqtPUYZ2FqSrO -# UfP6C6Y4MEgGCisGAQQBgjcCAQwxOjA4oDKAMABQAHkAdABoAG8AbgAgADMALgAx -# ADMALgAxADMAIAAoADAAMQAxADAANABjAGUAKaECgAAwDQYJKoZIhvcNAQEBBQAE -# ggGAE+NSuDZvG3igu5a4sqdyWzpiCadCeGW/MeexQY3ttAFVHGoFn4aKPdPsfFB+ -# YgDPw3+rHC2h619JkyWSJXfojj24d+16z/kRVRKAZxLMp4NKto1Y9ZOLN5spf10q -# rIyAWZybbmll4QwcBEyb1fnlpfLUSzK9a8IhvKWGyb7Q3S0mkHApX0Lo1ppe9Chh -# wpsd+tqOTlJoIE18CZNgFvN4lRl7T2XhX3XqyMNZbBemtzIvKBXoWTMvHAErQm5M -# L6xdiAiLjA3bDsYqd60Maa0TYEwVLCxFfxbSQk2hHL1h2mpPwdTNYYQ8II4lcSDj -# St5jb9hyF82JnFYU6KBRVVyu+j5/zW6fu7/mbqBSNW9NgzftNNT1AlzZyzyMFhYp -# Wu2gJGCpi0XeibKNaRhU1UAc7cD+Kv2CTZaKq19wq1Q3KdcxxuZ0ITDc0GIjGZCz -# oVYgurx7Ooz/MrS1Qc6oHsTL9wdORG288/wOPB1qCI33eRw9+T+5kOGhEpHQZ/6M -# XzLyoYIYETCCGA0GCisGAQQBgjcDAwExghf9MIIX+QYJKoZIhvcNAQcCoIIX6jCC -# F+YCAQMxDzANBglghkgBZQMEAgEFADCCAWIGCyqGSIb3DQEJEAEEoIIBUQSCAU0w -# ggFJAgEBBgorBgEEAYRZCgMBMDEwDQYJYIZIAWUDBAIBBQAEIGeiqCZUyu3m9A3N -# 0x1/GV6dpz+x8P335/R8ZIND3pjbAgZpwnLJmBYYEzIwMjYwNDA3MjAzNjIzLjU0 -# N1owBIACAfSggeGkgd4wgdsxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5n -# dG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9y -# YXRpb24xJTAjBgNVBAsTHE1pY3Jvc29mdCBBbWVyaWNhIE9wZXJhdGlvbnMxJzAl -# BgNVBAsTHm5TaGllbGQgVFNTIEVTTjo3RDAwLTA1RTAtRDk0NzE1MDMGA1UEAxMs -# TWljcm9zb2Z0IFB1YmxpYyBSU0EgVGltZSBTdGFtcGluZyBBdXRob3JpdHmggg8h -# MIIHgjCCBWqgAwIBAgITMwAAAAXlzw//Zi7JhwAAAAAABTANBgkqhkiG9w0BAQwF -# ADB3MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9u -# MUgwRgYDVQQDEz9NaWNyb3NvZnQgSWRlbnRpdHkgVmVyaWZpY2F0aW9uIFJvb3Qg -# Q2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMjAwHhcNMjAxMTE5MjAzMjMxWhcNMzUx -# MTE5MjA0MjMxWjBhMQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENv -# cnBvcmF0aW9uMTIwMAYDVQQDEylNaWNyb3NvZnQgUHVibGljIFJTQSBUaW1lc3Rh -# bXBpbmcgQ0EgMjAyMDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ58 -# 51Jj/eDFnwV9Y7UGIqMcHtfnlzPREwW9ZUZHd5HBXXBvf7KrQ5cMSqFSHGqg2/qJ -# hYqOQxwuEQXG8kB41wsDJP5d0zmLYKAY8Zxv3lYkuLDsfMuIEqvGYOPURAH+Ybl4 -# SJEESnt0MbPEoKdNihwM5xGv0rGofJ1qOYSTNcc55EbBT7uq3wx3mXhtVmtcCEr5 -# ZKTkKKE1CxZvNPWdGWJUPC6e4uRfWHIhZcgCsJ+sozf5EeH5KrlFnxpjKKTavwfF -# P6XaGZGWUG8TZaiTogRoAlqcevbiqioUz1Yt4FRK53P6ovnUfANjIgM9JDdJ4e0q -# iDRm5sOTiEQtBLGd9Vhd1MadxoGcHrRCsS5rO9yhv2fjJHrmlQ0EIXmp4DhDBieK -# UGR+eZ4CNE3ctW4uvSDQVeSp9h1SaPV8UWEfyTxgGjOsRpeexIveR1MPTVf7gt8h -# Y64XNPO6iyUGsEgt8c2PxF87E+CO7A28TpjNq5eLiiunhKbq0XbjkNoU5JhtYUrl -# mAbpxRjb9tSreDdtACpm3rkpxp7AQndnI0Shu/fk1/rE3oWsDqMX3jjv40e8KN5Y -# sJBnczyWB4JyeeFMW3JBfdeAKhzohFe8U5w9WuvcP1E8cIxLoKSDzCCBOu0hWdjz -# KNu8Y5SwB1lt5dQhABYyzR3dxEO/T1K/BVF3rV69AgMBAAGjggIbMIICFzAOBgNV -# HQ8BAf8EBAMCAYYwEAYJKwYBBAGCNxUBBAMCAQAwHQYDVR0OBBYEFGtpKDo1L0hj -# QM972K9J6T7ZPdshMFQGA1UdIARNMEswSQYEVR0gADBBMD8GCCsGAQUFBwIBFjNo -# dHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL0RvY3MvUmVwb3NpdG9yeS5o -# dG0wEwYDVR0lBAwwCgYIKwYBBQUHAwgwGQYJKwYBBAGCNxQCBAweCgBTAHUAYgBD -# AEEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBTIftJqhSobyhmYBAcnz1AQ -# T2ioojCBhAYDVR0fBH0wezB5oHegdYZzaHR0cDovL3d3dy5taWNyb3NvZnQuY29t -# L3BraW9wcy9jcmwvTWljcm9zb2Z0JTIwSWRlbnRpdHklMjBWZXJpZmljYXRpb24l -# MjBSb290JTIwQ2VydGlmaWNhdGUlMjBBdXRob3JpdHklMjAyMDIwLmNybDCBlAYI -# KwYBBQUHAQEEgYcwgYQwgYEGCCsGAQUFBzAChnVodHRwOi8vd3d3Lm1pY3Jvc29m -# dC5jb20vcGtpb3BzL2NlcnRzL01pY3Jvc29mdCUyMElkZW50aXR5JTIwVmVyaWZp -# Y2F0aW9uJTIwUm9vdCUyMENlcnRpZmljYXRlJTIwQXV0aG9yaXR5JTIwMjAyMC5j -# cnQwDQYJKoZIhvcNAQEMBQADggIBAF+Idsd+bbVaFXXnTHho+k7h2ESZJRWluLE0 -# Oa/pO+4ge/XEizXvhs0Y7+KVYyb4nHlugBesnFqBGEdC2IWmtKMyS1OWIviwpnK3 -# aL5JedwzbeBF7POyg6IGG/XhhJ3UqWeWTO+Czb1c2NP5zyEh89F72u9UIw+IfvM9 -# lzDmc2O2END7MPnrcjWdQnrLn1Ntday7JSyrDvBdmgbNnCKNZPmhzoa8PccOiQlj -# jTW6GePe5sGFuRHzdFt8y+bN2neF7Zu8hTO1I64XNGqst8S+w+RUdie8fXC1jKu3 -# m9KGIqF4aldrYBamyh3g4nJPj/LR2CBaLyD+2BuGZCVmoNR/dSpRCxlot0i79dKO -# ChmoONqbMI8m04uLaEHAv4qwKHQ1vBzbV/nG89LDKbRSSvijmwJwxRxLLpMQ/u4x -# XxFfR4f/gksSkbJp7oqLwliDm/h+w0aJ/U5ccnYhYb7vPKNMN+SZDWycU5ODIRfy -# oGl59BsXR/HpRGtiJquOYGmvA/pk5vC1lcnbeMrcWD/26ozePQ/TWfNXKBOmkFpv -# PE8CH+EeGGWzqTCjdAsno2jzTeNSxlx3glDGJgcdz5D/AAxw9Sdgq/+rY7jjgs7X -# 6fqPTXPmaCAJKVHAP19oEjJIBwD1LyHbaEgBxFCogYSOiUIr0Xqcr1nJfiWG2GwY -# e6ZoAF1bMIIHlzCCBX+gAwIBAgITMwAAAFXZ3WkmKPn44gAAAAAAVTANBgkqhkiG -# 9w0BAQwFADBhMQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBv -# cmF0aW9uMTIwMAYDVQQDEylNaWNyb3NvZnQgUHVibGljIFJTQSBUaW1lc3RhbXBp -# bmcgQ0EgMjAyMDAeFw0yNTEwMjMyMDQ2NDlaFw0yNjEwMjIyMDQ2NDlaMIHbMQsw -# CQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9u -# ZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSUwIwYDVQQLExxNaWNy -# b3NvZnQgQW1lcmljYSBPcGVyYXRpb25zMScwJQYDVQQLEx5uU2hpZWxkIFRTUyBF -# U046N0QwMC0wNUUwLUQ5NDcxNTAzBgNVBAMTLE1pY3Jvc29mdCBQdWJsaWMgUlNB -# IFRpbWUgU3RhbXBpbmcgQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A -# MIICCgKCAgEAvbkfkh5ZSLP0MCUWafaw/KZoVZu9iQx8r5JwhZvdrUi86UjCCFQO -# NjQanrIxGF9hRGIZLQZ50gHrLC+4fpUEJff5t04VwByWC2/bWOuk6NmaTh9JpPZD -# cGzNR95QlryjfEjtl+gxj12zNPEdADPplVfzt8cYRWFBx/Fbfch08k6P9p7jX2q1 -# jFPbUxWYJ+xOyGC1aKhDGY5b+8wL39v6qC0HFIx/v3y+bep+aEXooK8VoeWK+szf -# aFjXo8YTcvQ8UL4szu9HFTuZNv6vvoJ7Ju+o5aTj51sph+0+FXW38TlL/rDBd5ia -# 79jskLtOeHbDjkbljilwzegcxv9i49F05ZrS/5ELZCCY1VaqO7EOLKVaxxdAO5oy -# 1vb0Bx0ZRVX1mxFjYzay2EC051k6yGJHm58y1oe2IKRa/SM1+BTGse6vHNi5Q2d5 -# ZnoR9AOAUDDwJIIqRI4rZz2MSinh11WrXTG9urF2uoyd5Ve+8hxes9ABeP2PYQKl -# XYTAxvdaeanDTQ/vwmnM+yTcWzrVm84Z38XVFw4G7p/ZNZ2nscvv6uru2AevXcyV -# 1t8ha7iWmhhgTWBNBrViuDlc3iPvOz2SVPbPeqhyY/NXwNZCAgc2H5pOztu6MwQx -# DIjte3XM/FkKBxHofS2abNT/0HG+xZtFqUJDaxgbJa6lN1zh7spjuQ8CAwEAAaOC -# AcswggHHMB0GA1UdDgQWBBRWBF8QbdwIA/DIv6nJFsrB16xltjAfBgNVHSMEGDAW -# gBRraSg6NS9IY0DPe9ivSek+2T3bITBsBgNVHR8EZTBjMGGgX6BdhltodHRwOi8v -# d3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NybC9NaWNyb3NvZnQlMjBQdWJsaWMl -# MjBSU0ElMjBUaW1lc3RhbXBpbmclMjBDQSUyMDIwMjAuY3JsMHkGCCsGAQUFBwEB -# BG0wazBpBggrBgEFBQcwAoZdaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9w -# cy9jZXJ0cy9NaWNyb3NvZnQlMjBQdWJsaWMlMjBSU0ElMjBUaW1lc3RhbXBpbmcl -# MjBDQSUyMDIwMjAuY3J0MAwGA1UdEwEB/wQCMAAwFgYDVR0lAQH/BAwwCgYIKwYB -# BQUHAwgwDgYDVR0PAQH/BAQDAgeAMGYGA1UdIARfMF0wUQYMKwYBBAGCN0yDfQEB -# MEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9wa2lvcHMv -# RG9jcy9SZXBvc2l0b3J5Lmh0bTAIBgZngQwBBAIwDQYJKoZIhvcNAQEMBQADggIB -# AFIe4ZJUe9qUKcWeWypchB58fXE/ZIWv2D5XP5/k/tB7LCN9BvmNSVKZ3VeclQM9 -# 78wfEvuvdMQSUv6Y20boIM8DK1K1IU9cP21MG0ExiHxaqjrikf2qbfrXIip4Ef3v -# 2bNYKQxCxN3Sczp1SX0H7uqK2L5OhfDEiXf15iou5hh+EPaaqp49czNQpJDOR/vf -# JghUc/qcslDPhoCZpZx8b2ODvywGQNXwqlbsmCS24uGmEkQ3UH5JUeN6c91yasVc -# hS78riMrm6R9ZpAiO5pfNKMGU2MLm1A3pp098DcbFTAc95Hh6Qvkh//28F/Xe2bM -# Fb6DL7Sw0ZO95v0gv0ZTyJfxS/LCxfraeEII9FSFOKAMEp1zNFSs2ue0GGjBt9yE -# EMUwvxq9ExFz0aZzYm8ivJfffpIVDnX/+rVRTYcxIkQyFYslIhYlWF9SjCw5r49q -# akjMRNh8W9O7aaoolSVZleQZjGt0K8JzMlyp6hp2lbW6XqRx2cOHbbxJDxmENzoh -# GUziI13lI2g2Bf5qibfC4bKNRpJo9lbE8HUbY0qJiE8u3SU8eDQaySPXOEhJjxRC -# QwwOvejYmBG5P7CckQNBSnnl12+FKRKgPoj0Mv+z5OMhj9z2MtpbnHLAkep0odQC -# lEyyCG/uR5tK5rW6mZH5Oq56UWS0NI6NV1JGS7Jri6jFMYIHQzCCBz8CAQEweDBh -# MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTIw -# MAYDVQQDEylNaWNyb3NvZnQgUHVibGljIFJTQSBUaW1lc3RhbXBpbmcgQ0EgMjAy -# MAITMwAAAFXZ3WkmKPn44gAAAAAAVTANBglghkgBZQMEAgEFAKCCBJwwEQYLKoZI -# hvcNAQkQAg8xAgUAMBoGCSqGSIb3DQEJAzENBgsqhkiG9w0BCRABBDAcBgkqhkiG -# 9w0BCQUxDxcNMjYwNDA3MjAzNjIzWjAvBgkqhkiG9w0BCQQxIgQgKqYqRp979q8g -# 7hK0TG+U1u6PRrp2uF9Xesi1PlSt0XUwgbkGCyqGSIb3DQEJEAIvMYGpMIGmMIGj -# MIGgBCDYuTyXZIZiu799/v4PaqsmeSzBxh0rqkYq7sYYavj+zTB8MGWkYzBhMQsw -# CQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTIwMAYD -# VQQDEylNaWNyb3NvZnQgUHVibGljIFJTQSBUaW1lc3RhbXBpbmcgQ0EgMjAyMAIT -# MwAAAFXZ3WkmKPn44gAAAAAAVTCCA14GCyqGSIb3DQEJEAISMYIDTTCCA0mhggNF -# MIIDQTCCAikCAQEwggEJoYHhpIHeMIHbMQswCQYDVQQGEwJVUzETMBEGA1UECBMK -# V2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0 -# IENvcnBvcmF0aW9uMSUwIwYDVQQLExxNaWNyb3NvZnQgQW1lcmljYSBPcGVyYXRp -# b25zMScwJQYDVQQLEx5uU2hpZWxkIFRTUyBFU046N0QwMC0wNUUwLUQ5NDcxNTAz -# BgNVBAMTLE1pY3Jvc29mdCBQdWJsaWMgUlNBIFRpbWUgU3RhbXBpbmcgQXV0aG9y -# aXR5oiMKAQEwBwYFKw4DAhoDFQAdO1QBgmW/tuBZV5EGjhfsV4cN6qBnMGWkYzBh -# MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTIw -# MAYDVQQDEylNaWNyb3NvZnQgUHVibGljIFJTQSBUaW1lc3RhbXBpbmcgQ0EgMjAy -# MDANBgkqhkiG9w0BAQsFAAIFAO1/Zh0wIhgPMjAyNjA0MDcxMTE2NDVaGA8yMDI2 -# MDQwODExMTY0NVowdDA6BgorBgEEAYRZCgQBMSwwKjAKAgUA7X9mHQIBADAHAgEA -# AgITDjAHAgEAAgIcRzAKAgUA7YC3nQIBADA2BgorBgEEAYRZCgQCMSgwJjAMBgor -# BgEEAYRZCgMCoAowCAIBAAIDB6EgoQowCAIBAAIDAYagMA0GCSqGSIb3DQEBCwUA -# A4IBAQCP8NhPOsRpXyExxSxtV1IGKEWgzQWAmjL30svR7/O2jDhTCAvbuQ+3CVvJ -# V9Y6fD60sFPaxuxLjIiP5CdNSgd6o0D9R70U+Q8fzTWq2HNENnhRPDPT0qBV8pyH -# DRiMZNKkvwZAvpwBiAW5/9mHSoRXBhbg7GLIoQwqnNh2qg3BHH0usKATSSbhZnds -# MHBUUIpx3XvqAKlXliV7wxsNYWLNJSvKWnu/ur8qfHQGtazxDXtFQedgDJ6FkIPR -# 3QfNcQKWH6wrFJzKTxOMjajswpq7xhEfN4sM33CPbiEn1RGQ1OGMOFjeu40y+96z -# nFJ8qaGkipaxvlm9lbiGZup3HBZsMA0GCSqGSIb3DQEBAQUABIICADaemTQ/4ZMQ -# soyCIO9q48tSWvkyJuSkTOW/M7bVT6w/5aJszNjASFT9Dj5RPaUAMOsTarqEEuyR -# Tu2wdsHQxilgIc/vr/MJ3LPh1ywX3TQ2FGpXBXff8zKAxOn0Q/LCiIuVzVKmtr5U -# s4jAxqVmcQ1O1rFC7iOj1pFm4RKcMV5mob54tVp/CVb99nA/SPen2RVbJpaIk/jh -# PQNrizjJSlMMQudkOU94H58/HegK5RGkcrI5ShsnU6FA6TlYE3avFyjt6/LBN34R -# By7f2eNk87zOfF/YccTHKc34FygIdJpkh89Xzhg4cr416ZU6mdsAGvGNwq/snJXR -# nzjFist8pl3/Q1lqakSC8BEhpIf4pftQcJsCJUBInSMFHgAVtpKxJKO60h1/QlSq -# ot9CoBoxmwA1TJpSAli/T83d007EBcHE2u+GGQCz6hAcwjsJgsYI75ONLRzD0u/B -# /EKNduVquwz6XWVVDTTjFfYETPT/cV+zDX0m28S8tZF62+xKRHjJDbPBGj8hTiBi -# +VNvfTv++Z7RGVGWUxAlxoCRnAjsSxiS5zoSvgkkm0+rVipaBRgwoviDLcyReVJ5 -# gawZvW25bgeYO6t9Qgxp4tQCqrlABgwR5pCGAEk+z5dHckintm1qwu8YaAgA8Bcv -# GBDbYo8mMvA4mwTau9S5DpdwbZYVLfWp -# SIG # End signature block diff --git a/Python313_13_x86_Template/Lib/venv/scripts/nt/venvlauncher.exe b/Python313_13_x86_Template/Lib/venv/scripts/nt/venvlauncher.exe deleted file mode 100644 index 45b59bd0..00000000 Binary files a/Python313_13_x86_Template/Lib/venv/scripts/nt/venvlauncher.exe and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/venv/scripts/nt/venvwlauncher.exe b/Python313_13_x86_Template/Lib/venv/scripts/nt/venvwlauncher.exe deleted file mode 100644 index 017f5015..00000000 Binary files a/Python313_13_x86_Template/Lib/venv/scripts/nt/venvwlauncher.exe and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/warnings.py b/Python313_13_x86_Template/Lib/warnings.py deleted file mode 100644 index 04320d47..00000000 --- a/Python313_13_x86_Template/Lib/warnings.py +++ /dev/null @@ -1,735 +0,0 @@ -"""Python part of the warnings subsystem.""" - -import sys - - -__all__ = ["warn", "warn_explicit", "showwarning", - "formatwarning", "filterwarnings", "simplefilter", - "resetwarnings", "catch_warnings", "deprecated"] - -def showwarning(message, category, filename, lineno, file=None, line=None): - """Hook to write a warning to a file; replace if you like.""" - msg = WarningMessage(message, category, filename, lineno, file, line) - _showwarnmsg_impl(msg) - -def formatwarning(message, category, filename, lineno, line=None): - """Function to format a warning the standard way.""" - msg = WarningMessage(message, category, filename, lineno, None, line) - return _formatwarnmsg_impl(msg) - -def _showwarnmsg_impl(msg): - file = msg.file - if file is None: - file = sys.stderr - if file is None: - # sys.stderr is None when run with pythonw.exe: - # warnings get lost - return - text = _formatwarnmsg(msg) - try: - file.write(text) - except OSError: - # the file (probably stderr) is invalid - this warning gets lost. - pass - -def _formatwarnmsg_impl(msg): - category = msg.category.__name__ - s = f"{msg.filename}:{msg.lineno}: {category}: {msg.message}\n" - - if msg.line is None: - try: - import linecache - line = linecache.getline(msg.filename, msg.lineno) - except Exception: - # When a warning is logged during Python shutdown, linecache - # and the import machinery don't work anymore - line = None - linecache = None - else: - line = msg.line - if line: - line = line.strip() - s += " %s\n" % line - - if msg.source is not None: - try: - import tracemalloc - # Logging a warning should not raise a new exception: - # catch Exception, not only ImportError and RecursionError. - except Exception: - # don't suggest to enable tracemalloc if it's not available - suggest_tracemalloc = False - tb = None - else: - try: - suggest_tracemalloc = not tracemalloc.is_tracing() - tb = tracemalloc.get_object_traceback(msg.source) - except Exception: - # When a warning is logged during Python shutdown, tracemalloc - # and the import machinery don't work anymore - suggest_tracemalloc = False - tb = None - - if tb is not None: - s += 'Object allocated at (most recent call last):\n' - for frame in tb: - s += (' File "%s", lineno %s\n' - % (frame.filename, frame.lineno)) - - try: - if linecache is not None: - line = linecache.getline(frame.filename, frame.lineno) - else: - line = None - except Exception: - line = None - if line: - line = line.strip() - s += ' %s\n' % line - elif suggest_tracemalloc: - s += (f'{category}: Enable tracemalloc to get the object ' - f'allocation traceback\n') - return s - -# Keep a reference to check if the function was replaced -_showwarning_orig = showwarning - -def _showwarnmsg(msg): - """Hook to write a warning to a file; replace if you like.""" - try: - sw = showwarning - except NameError: - pass - else: - if sw is not _showwarning_orig: - # warnings.showwarning() was replaced - if not callable(sw): - raise TypeError("warnings.showwarning() must be set to a " - "function or method") - - sw(msg.message, msg.category, msg.filename, msg.lineno, - msg.file, msg.line) - return - _showwarnmsg_impl(msg) - -# Keep a reference to check if the function was replaced -_formatwarning_orig = formatwarning - -def _formatwarnmsg(msg): - """Function to format a warning the standard way.""" - try: - fw = formatwarning - except NameError: - pass - else: - if fw is not _formatwarning_orig: - # warnings.formatwarning() was replaced - return fw(msg.message, msg.category, - msg.filename, msg.lineno, msg.line) - return _formatwarnmsg_impl(msg) - -def filterwarnings(action, message="", category=Warning, module="", lineno=0, - append=False): - """Insert an entry into the list of warnings filters (at the front). - - 'action' -- one of "error", "ignore", "always", "default", "module", - or "once" - 'message' -- a regex that the warning message must match - 'category' -- a class that the warning must be a subclass of - 'module' -- a regex that the module name must match - 'lineno' -- an integer line number, 0 matches all warnings - 'append' -- if true, append to the list of filters - """ - if action not in {"error", "ignore", "always", "default", "module", "once"}: - raise ValueError(f"invalid action: {action!r}") - if not isinstance(message, str): - raise TypeError("message must be a string") - if not isinstance(category, type) or not issubclass(category, Warning): - raise TypeError("category must be a Warning subclass") - if not isinstance(module, str): - raise TypeError("module must be a string") - if not isinstance(lineno, int): - raise TypeError("lineno must be an int") - if lineno < 0: - raise ValueError("lineno must be an int >= 0") - - if message or module: - import re - - if message: - message = re.compile(message, re.I) - else: - message = None - if module: - module = re.compile(module) - else: - module = None - - _add_filter(action, message, category, module, lineno, append=append) - -def simplefilter(action, category=Warning, lineno=0, append=False): - """Insert a simple entry into the list of warnings filters (at the front). - - A simple filter matches all modules and messages. - 'action' -- one of "error", "ignore", "always", "default", "module", - or "once" - 'category' -- a class that the warning must be a subclass of - 'lineno' -- an integer line number, 0 matches all warnings - 'append' -- if true, append to the list of filters - """ - if action not in {"error", "ignore", "always", "default", "module", "once"}: - raise ValueError(f"invalid action: {action!r}") - if not isinstance(lineno, int): - raise TypeError("lineno must be an int") - if lineno < 0: - raise ValueError("lineno must be an int >= 0") - _add_filter(action, None, category, None, lineno, append=append) - -def _add_filter(*item, append): - # Remove possible duplicate filters, so new one will be placed - # in correct place. If append=True and duplicate exists, do nothing. - if not append: - try: - filters.remove(item) - except ValueError: - pass - filters.insert(0, item) - else: - if item not in filters: - filters.append(item) - _filters_mutated() - -def resetwarnings(): - """Clear the list of warning filters, so that no filters are active.""" - filters[:] = [] - _filters_mutated() - -class _OptionError(Exception): - """Exception used by option processing helpers.""" - pass - -# Helper to process -W options passed via sys.warnoptions -def _processoptions(args): - for arg in args: - try: - _setoption(arg) - except _OptionError as msg: - print("Invalid -W option ignored:", msg, file=sys.stderr) - -# Helper for _processoptions() -def _setoption(arg): - parts = arg.split(':') - if len(parts) > 5: - raise _OptionError("too many fields (max 5): %r" % (arg,)) - while len(parts) < 5: - parts.append('') - action, message, category, module, lineno = [s.strip() - for s in parts] - action = _getaction(action) - category = _getcategory(category) - if message or module: - import re - if message: - message = re.escape(message) - if module: - module = re.escape(module) + r'\Z' - if lineno: - try: - lineno = int(lineno) - if lineno < 0: - raise ValueError - except (ValueError, OverflowError): - raise _OptionError("invalid lineno %r" % (lineno,)) from None - else: - lineno = 0 - filterwarnings(action, message, category, module, lineno) - -# Helper for _setoption() -def _getaction(action): - if not action: - return "default" - if action == "all": return "always" # Alias - for a in ('default', 'always', 'ignore', 'module', 'once', 'error'): - if a.startswith(action): - return a - raise _OptionError("invalid action: %r" % (action,)) - -# Helper for _setoption() -def _getcategory(category): - if not category: - return Warning - if '.' not in category: - import builtins as m - klass = category - else: - module, _, klass = category.rpartition('.') - try: - m = __import__(module, None, None, [klass]) - except ImportError: - raise _OptionError("invalid module name: %r" % (module,)) from None - try: - cat = getattr(m, klass) - except AttributeError: - raise _OptionError("unknown warning category: %r" % (category,)) from None - if not issubclass(cat, Warning): - raise _OptionError("invalid warning category: %r" % (category,)) - return cat - - -def _is_internal_filename(filename): - return 'importlib' in filename and '_bootstrap' in filename - - -def _is_filename_to_skip(filename, skip_file_prefixes): - return any(filename.startswith(prefix) for prefix in skip_file_prefixes) - - -def _is_internal_frame(frame): - """Signal whether the frame is an internal CPython implementation detail.""" - return _is_internal_filename(frame.f_code.co_filename) - - -def _next_external_frame(frame, skip_file_prefixes): - """Find the next frame that doesn't involve Python or user internals.""" - frame = frame.f_back - while frame is not None and ( - _is_internal_filename(filename := frame.f_code.co_filename) or - _is_filename_to_skip(filename, skip_file_prefixes)): - frame = frame.f_back - return frame - - -# Code typically replaced by _warnings -def warn(message, category=None, stacklevel=1, source=None, - *, skip_file_prefixes=()): - """Issue a warning, or maybe ignore it or raise an exception.""" - # Check if message is already a Warning object - if isinstance(message, Warning): - category = message.__class__ - # Check category argument - if category is None: - category = UserWarning - if not (isinstance(category, type) and issubclass(category, Warning)): - raise TypeError("category must be a Warning subclass, " - "not '{:s}'".format(type(category).__name__)) - if not isinstance(skip_file_prefixes, tuple): - # The C version demands a tuple for implementation performance. - raise TypeError('skip_file_prefixes must be a tuple of strs.') - if skip_file_prefixes: - stacklevel = max(2, stacklevel) - # Get context information - try: - if stacklevel <= 1 or _is_internal_frame(sys._getframe(1)): - # If frame is too small to care or if the warning originated in - # internal code, then do not try to hide any frames. - frame = sys._getframe(stacklevel) - else: - frame = sys._getframe(1) - # Look for one frame less since the above line starts us off. - for x in range(stacklevel-1): - frame = _next_external_frame(frame, skip_file_prefixes) - if frame is None: - raise ValueError - except ValueError: - globals = sys.__dict__ - filename = "" - lineno = 0 - else: - globals = frame.f_globals - filename = frame.f_code.co_filename - lineno = frame.f_lineno - if '__name__' in globals: - module = globals['__name__'] - else: - module = "" - registry = globals.setdefault("__warningregistry__", {}) - warn_explicit(message, category, filename, lineno, module, registry, - globals, source) - -def warn_explicit(message, category, filename, lineno, - module=None, registry=None, module_globals=None, - source=None): - lineno = int(lineno) - if module is None: - module = filename or "" - if module[-3:].lower() == ".py": - module = module[:-3] # XXX What about leading pathname? - if registry is None: - registry = {} - if registry.get('version', 0) != _filters_version: - registry.clear() - registry['version'] = _filters_version - if isinstance(message, Warning): - text = str(message) - category = message.__class__ - else: - text = message - message = category(message) - key = (text, category, lineno) - # Quick test for common case - if registry.get(key): - return - # Search the filters - for item in filters: - action, msg, cat, mod, ln = item - if ((msg is None or msg.match(text)) and - issubclass(category, cat) and - (mod is None or mod.match(module)) and - (ln == 0 or lineno == ln)): - break - else: - action = defaultaction - # Early exit actions - if action == "ignore": - return - - # Prime the linecache for formatting, in case the - # "file" is actually in a zipfile or something. - import linecache - linecache.getlines(filename, module_globals) - - if action == "error": - raise message - # Other actions - if action == "once": - registry[key] = 1 - oncekey = (text, category) - if onceregistry.get(oncekey): - return - onceregistry[oncekey] = 1 - elif action == "always": - pass - elif action == "module": - registry[key] = 1 - altkey = (text, category, 0) - if registry.get(altkey): - return - registry[altkey] = 1 - elif action == "default": - registry[key] = 1 - else: - # Unrecognized actions are errors - raise RuntimeError( - "Unrecognized action (%r) in warnings.filters:\n %s" % - (action, item)) - # Print message and context - msg = WarningMessage(message, category, filename, lineno, source=source) - _showwarnmsg(msg) - - -class WarningMessage(object): - - _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", - "line", "source") - - def __init__(self, message, category, filename, lineno, file=None, - line=None, source=None): - self.message = message - self.category = category - self.filename = filename - self.lineno = lineno - self.file = file - self.line = line - self.source = source - self._category_name = category.__name__ if category else None - - def __str__(self): - return ("{message : %r, category : %r, filename : %r, lineno : %s, " - "line : %r}" % (self.message, self._category_name, - self.filename, self.lineno, self.line)) - - -class catch_warnings(object): - - """A context manager that copies and restores the warnings filter upon - exiting the context. - - The 'record' argument specifies whether warnings should be captured by a - custom implementation of warnings.showwarning() and be appended to a list - returned by the context manager. Otherwise None is returned by the context - manager. The objects appended to the list are arguments whose attributes - mirror the arguments to showwarning(). - - The 'module' argument is to specify an alternative module to the module - named 'warnings' and imported under that name. This argument is only useful - when testing the warnings module itself. - - If the 'action' argument is not None, the remaining arguments are passed - to warnings.simplefilter() as if it were called immediately on entering the - context. - """ - - def __init__(self, *, record=False, module=None, - action=None, category=Warning, lineno=0, append=False): - """Specify whether to record warnings and if an alternative module - should be used other than sys.modules['warnings']. - - For compatibility with Python 3.0, please consider all arguments to be - keyword-only. - - """ - self._record = record - self._module = sys.modules['warnings'] if module is None else module - self._entered = False - if action is None: - self._filter = None - else: - self._filter = (action, category, lineno, append) - - def __repr__(self): - args = [] - if self._record: - args.append("record=True") - if self._module is not sys.modules['warnings']: - args.append("module=%r" % self._module) - name = type(self).__name__ - return "%s(%s)" % (name, ", ".join(args)) - - def __enter__(self): - if self._entered: - raise RuntimeError("Cannot enter %r twice" % self) - self._entered = True - self._filters = self._module.filters - self._module.filters = self._filters[:] - self._module._filters_mutated() - self._showwarning = self._module.showwarning - self._showwarnmsg_impl = self._module._showwarnmsg_impl - if self._filter is not None: - simplefilter(*self._filter) - if self._record: - log = [] - self._module._showwarnmsg_impl = log.append - # Reset showwarning() to the default implementation to make sure - # that _showwarnmsg() calls _showwarnmsg_impl() - self._module.showwarning = self._module._showwarning_orig - return log - else: - return None - - def __exit__(self, *exc_info): - if not self._entered: - raise RuntimeError("Cannot exit %r without entering first" % self) - self._module.filters = self._filters - self._module._filters_mutated() - self._module.showwarning = self._showwarning - self._module._showwarnmsg_impl = self._showwarnmsg_impl - - -class deprecated: - """Indicate that a class, function or overload is deprecated. - - When this decorator is applied to an object, the type checker - will generate a diagnostic on usage of the deprecated object. - - Usage: - - @deprecated("Use B instead") - class A: - pass - - @deprecated("Use g instead") - def f(): - pass - - @overload - @deprecated("int support is deprecated") - def g(x: int) -> int: ... - @overload - def g(x: str) -> int: ... - - The warning specified by *category* will be emitted at runtime - on use of deprecated objects. For functions, that happens on calls; - for classes, on instantiation and on creation of subclasses. - If the *category* is ``None``, no warning is emitted at runtime. - The *stacklevel* determines where the - warning is emitted. If it is ``1`` (the default), the warning - is emitted at the direct caller of the deprecated object; if it - is higher, it is emitted further up the stack. - Static type checker behavior is not affected by the *category* - and *stacklevel* arguments. - - The deprecation message passed to the decorator is saved in the - ``__deprecated__`` attribute on the decorated object. - If applied to an overload, the decorator - must be after the ``@overload`` decorator for the attribute to - exist on the overload as returned by ``get_overloads()``. - - See PEP 702 for details. - - """ - def __init__( - self, - message: str, - /, - *, - category: type[Warning] | None = DeprecationWarning, - stacklevel: int = 1, - ) -> None: - if not isinstance(message, str): - raise TypeError( - f"Expected an object of type str for 'message', not {type(message).__name__!r}" - ) - self.message = message - self.category = category - self.stacklevel = stacklevel - - def __call__(self, arg, /): - # Make sure the inner functions created below don't - # retain a reference to self. - msg = self.message - category = self.category - stacklevel = self.stacklevel - if category is None: - arg.__deprecated__ = msg - return arg - elif isinstance(arg, type): - import functools - from types import MethodType - - original_new = arg.__new__ - - @functools.wraps(original_new) - def __new__(cls, /, *args, **kwargs): - if cls is arg: - warn(msg, category=category, stacklevel=stacklevel + 1) - if original_new is not object.__new__: - return original_new(cls, *args, **kwargs) - # Mirrors a similar check in object.__new__. - elif cls.__init__ is object.__init__ and (args or kwargs): - raise TypeError(f"{cls.__name__}() takes no arguments") - else: - return original_new(cls) - - arg.__new__ = staticmethod(__new__) - - if "__init_subclass__" in arg.__dict__: - # __init_subclass__ is directly present on the decorated class. - # Synthesize a wrapper that calls this method directly. - original_init_subclass = arg.__init_subclass__ - # We need slightly different behavior if __init_subclass__ - # is a bound method (likely if it was implemented in Python). - # Otherwise, it likely means it's a builtin such as - # object's implementation of __init_subclass__. - if isinstance(original_init_subclass, MethodType): - original_init_subclass = original_init_subclass.__func__ - - @functools.wraps(original_init_subclass) - def __init_subclass__(*args, **kwargs): - warn(msg, category=category, stacklevel=stacklevel + 1) - return original_init_subclass(*args, **kwargs) - else: - def __init_subclass__(cls, *args, **kwargs): - warn(msg, category=category, stacklevel=stacklevel + 1) - return super(arg, cls).__init_subclass__(*args, **kwargs) - - arg.__init_subclass__ = classmethod(__init_subclass__) - - arg.__deprecated__ = __new__.__deprecated__ = msg - __init_subclass__.__deprecated__ = msg - return arg - elif callable(arg): - import functools - import inspect - - @functools.wraps(arg) - def wrapper(*args, **kwargs): - warn(msg, category=category, stacklevel=stacklevel + 1) - return arg(*args, **kwargs) - - if inspect.iscoroutinefunction(arg): - wrapper = inspect.markcoroutinefunction(wrapper) - - arg.__deprecated__ = wrapper.__deprecated__ = msg - return wrapper - else: - raise TypeError( - "@deprecated decorator with non-None category must be applied to " - f"a class or callable, not {arg!r}" - ) - - -_DEPRECATED_MSG = "{name!r} is deprecated and slated for removal in Python {remove}" - -def _deprecated(name, message=_DEPRECATED_MSG, *, remove, _version=sys.version_info): - """Warn that *name* is deprecated or should be removed. - - RuntimeError is raised if *remove* specifies a major/minor tuple older than - the current Python version or the same version but past the alpha. - - The *message* argument is formatted with *name* and *remove* as a Python - version tuple (e.g. (3, 11)). - - """ - remove_formatted = f"{remove[0]}.{remove[1]}" - if (_version[:2] > remove) or (_version[:2] == remove and _version[3] != "alpha"): - msg = f"{name!r} was slated for removal after Python {remove_formatted} alpha" - raise RuntimeError(msg) - else: - msg = message.format(name=name, remove=remove_formatted) - warn(msg, DeprecationWarning, stacklevel=3) - - -# Private utility function called by _PyErr_WarnUnawaitedCoroutine -def _warn_unawaited_coroutine(coro): - msg_lines = [ - f"coroutine '{coro.__qualname__}' was never awaited\n" - ] - if coro.cr_origin is not None: - import linecache, traceback - def extract(): - for filename, lineno, funcname in reversed(coro.cr_origin): - line = linecache.getline(filename, lineno) - yield (filename, lineno, funcname, line) - msg_lines.append("Coroutine created at (most recent call last)\n") - msg_lines += traceback.format_list(list(extract())) - msg = "".join(msg_lines).rstrip("\n") - # Passing source= here means that if the user happens to have tracemalloc - # enabled and tracking where the coroutine was created, the warning will - # contain that traceback. This does mean that if they have *both* - # coroutine origin tracking *and* tracemalloc enabled, they'll get two - # partially-redundant tracebacks. If we wanted to be clever we could - # probably detect this case and avoid it, but for now we don't bother. - warn(msg, category=RuntimeWarning, stacklevel=2, source=coro) - - -# filters contains a sequence of filter 5-tuples -# The components of the 5-tuple are: -# - an action: error, ignore, always, default, module, or once -# - a compiled regex that must match the warning message -# - a class representing the warning category -# - a compiled regex that must match the module that is being warned -# - a line number for the line being warning, or 0 to mean any line -# If either if the compiled regexs are None, match anything. -try: - from _warnings import (filters, _defaultaction, _onceregistry, - warn, warn_explicit, _filters_mutated) - defaultaction = _defaultaction - onceregistry = _onceregistry - _warnings_defaults = True -except ImportError: - filters = [] - defaultaction = "default" - onceregistry = {} - - _filters_version = 1 - - def _filters_mutated(): - global _filters_version - _filters_version += 1 - - _warnings_defaults = False - - -# Module initialization -_processoptions(sys.warnoptions) -if not _warnings_defaults: - # Several warning categories are ignored by default in regular builds - if not hasattr(sys, 'gettotalrefcount'): - filterwarnings("default", category=DeprecationWarning, - module="__main__", append=1) - simplefilter("ignore", category=DeprecationWarning, append=1) - simplefilter("ignore", category=PendingDeprecationWarning, append=1) - simplefilter("ignore", category=ImportWarning, append=1) - simplefilter("ignore", category=ResourceWarning, append=1) - -del _warnings_defaults diff --git a/Python313_13_x86_Template/Lib/weakref.py b/Python313_13_x86_Template/Lib/weakref.py deleted file mode 100644 index 25b70927..00000000 --- a/Python313_13_x86_Template/Lib/weakref.py +++ /dev/null @@ -1,674 +0,0 @@ -"""Weak reference support for Python. - -This module is an implementation of PEP 205: - -https://peps.python.org/pep-0205/ -""" - -# Naming convention: Variables named "wr" are weak reference objects; -# they are called this instead of "ref" to avoid name collisions with -# the module-global ref() function imported from _weakref. - -from _weakref import ( - getweakrefcount, - getweakrefs, - ref, - proxy, - CallableProxyType, - ProxyType, - ReferenceType, - _remove_dead_weakref) - -from _weakrefset import WeakSet, _IterationGuard - -import _collections_abc # Import after _weakref to avoid circular import. -import sys -import itertools - -ProxyTypes = (ProxyType, CallableProxyType) - -__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs", - "WeakKeyDictionary", "ReferenceType", "ProxyType", - "CallableProxyType", "ProxyTypes", "WeakValueDictionary", - "WeakSet", "WeakMethod", "finalize"] - - -_collections_abc.MutableSet.register(WeakSet) - -class WeakMethod(ref): - """ - A custom `weakref.ref` subclass which simulates a weak reference to - a bound method, working around the lifetime problem of bound methods. - """ - - __slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__" - - def __new__(cls, meth, callback=None): - try: - obj = meth.__self__ - func = meth.__func__ - except AttributeError: - raise TypeError("argument should be a bound method, not {}" - .format(type(meth))) from None - def _cb(arg): - # The self-weakref trick is needed to avoid creating a reference - # cycle. - self = self_wr() - if self._alive: - self._alive = False - if callback is not None: - callback(self) - self = ref.__new__(cls, obj, _cb) - self._func_ref = ref(func, _cb) - self._meth_type = type(meth) - self._alive = True - self_wr = ref(self) - return self - - def __call__(self): - obj = super().__call__() - func = self._func_ref() - if obj is None or func is None: - return None - return self._meth_type(func, obj) - - def __eq__(self, other): - if isinstance(other, WeakMethod): - if not self._alive or not other._alive: - return self is other - return ref.__eq__(self, other) and self._func_ref == other._func_ref - return NotImplemented - - def __ne__(self, other): - if isinstance(other, WeakMethod): - if not self._alive or not other._alive: - return self is not other - return ref.__ne__(self, other) or self._func_ref != other._func_ref - return NotImplemented - - __hash__ = ref.__hash__ - - -class WeakValueDictionary(_collections_abc.MutableMapping): - """Mapping class that references values weakly. - - Entries in the dictionary will be discarded when no strong - reference to the value exists anymore - """ - # We inherit the constructor without worrying about the input - # dictionary; since it uses our .update() method, we get the right - # checks (if the other dictionary is a WeakValueDictionary, - # objects are unwrapped on the way out, and we always wrap on the - # way in). - - def __init__(self, other=(), /, **kw): - def remove(wr, selfref=ref(self), _atomic_removal=_remove_dead_weakref): - self = selfref() - if self is not None: - if self._iterating: - self._pending_removals.append(wr.key) - else: - # Atomic removal is necessary since this function - # can be called asynchronously by the GC - _atomic_removal(self.data, wr.key) - self._remove = remove - # A list of keys to be removed - self._pending_removals = [] - self._iterating = set() - self.data = {} - self.update(other, **kw) - - def _commit_removals(self, _atomic_removal=_remove_dead_weakref): - pop = self._pending_removals.pop - d = self.data - # We shouldn't encounter any KeyError, because this method should - # always be called *before* mutating the dict. - while True: - try: - key = pop() - except IndexError: - return - _atomic_removal(d, key) - - def __getitem__(self, key): - if self._pending_removals: - self._commit_removals() - o = self.data[key]() - if o is None: - raise KeyError(key) - else: - return o - - def __delitem__(self, key): - if self._pending_removals: - self._commit_removals() - del self.data[key] - - def __len__(self): - if self._pending_removals: - self._commit_removals() - return len(self.data) - - def __contains__(self, key): - if self._pending_removals: - self._commit_removals() - try: - o = self.data[key]() - except KeyError: - return False - return o is not None - - def __repr__(self): - return "<%s at %#x>" % (self.__class__.__name__, id(self)) - - def __setitem__(self, key, value): - if self._pending_removals: - self._commit_removals() - self.data[key] = KeyedRef(value, self._remove, key) - - def copy(self): - if self._pending_removals: - self._commit_removals() - new = WeakValueDictionary() - with _IterationGuard(self): - for key, wr in self.data.items(): - o = wr() - if o is not None: - new[key] = o - return new - - __copy__ = copy - - def __deepcopy__(self, memo): - from copy import deepcopy - if self._pending_removals: - self._commit_removals() - new = self.__class__() - with _IterationGuard(self): - for key, wr in self.data.items(): - o = wr() - if o is not None: - new[deepcopy(key, memo)] = o - return new - - def get(self, key, default=None): - if self._pending_removals: - self._commit_removals() - try: - wr = self.data[key] - except KeyError: - return default - else: - o = wr() - if o is None: - # This should only happen - return default - else: - return o - - def items(self): - if self._pending_removals: - self._commit_removals() - with _IterationGuard(self): - for k, wr in self.data.items(): - v = wr() - if v is not None: - yield k, v - - def keys(self): - if self._pending_removals: - self._commit_removals() - with _IterationGuard(self): - for k, wr in self.data.items(): - if wr() is not None: - yield k - - __iter__ = keys - - def itervaluerefs(self): - """Return an iterator that yields the weak references to the values. - - The references are not guaranteed to be 'live' at the time - they are used, so the result of calling the references needs - to be checked before being used. This can be used to avoid - creating references that will cause the garbage collector to - keep the values around longer than needed. - - """ - if self._pending_removals: - self._commit_removals() - with _IterationGuard(self): - yield from self.data.values() - - def values(self): - if self._pending_removals: - self._commit_removals() - with _IterationGuard(self): - for wr in self.data.values(): - obj = wr() - if obj is not None: - yield obj - - def popitem(self): - if self._pending_removals: - self._commit_removals() - while True: - key, wr = self.data.popitem() - o = wr() - if o is not None: - return key, o - - def pop(self, key, *args): - if self._pending_removals: - self._commit_removals() - try: - o = self.data.pop(key)() - except KeyError: - o = None - if o is None: - if args: - return args[0] - else: - raise KeyError(key) - else: - return o - - def setdefault(self, key, default=None): - try: - o = self.data[key]() - except KeyError: - o = None - if o is None: - if self._pending_removals: - self._commit_removals() - self.data[key] = KeyedRef(default, self._remove, key) - return default - else: - return o - - def update(self, other=None, /, **kwargs): - if self._pending_removals: - self._commit_removals() - d = self.data - if other is not None: - if not hasattr(other, "items"): - other = dict(other) - for key, o in other.items(): - d[key] = KeyedRef(o, self._remove, key) - for key, o in kwargs.items(): - d[key] = KeyedRef(o, self._remove, key) - - def valuerefs(self): - """Return a list of weak references to the values. - - The references are not guaranteed to be 'live' at the time - they are used, so the result of calling the references needs - to be checked before being used. This can be used to avoid - creating references that will cause the garbage collector to - keep the values around longer than needed. - - """ - if self._pending_removals: - self._commit_removals() - return list(self.data.values()) - - def __ior__(self, other): - self.update(other) - return self - - def __or__(self, other): - if isinstance(other, _collections_abc.Mapping): - c = self.copy() - c.update(other) - return c - return NotImplemented - - def __ror__(self, other): - if isinstance(other, _collections_abc.Mapping): - c = self.__class__() - c.update(other) - c.update(self) - return c - return NotImplemented - - -class KeyedRef(ref): - """Specialized reference that includes a key corresponding to the value. - - This is used in the WeakValueDictionary to avoid having to create - a function object for each key stored in the mapping. A shared - callback object can use the 'key' attribute of a KeyedRef instead - of getting a reference to the key from an enclosing scope. - - """ - - __slots__ = "key", - - def __new__(type, ob, callback, key): - self = ref.__new__(type, ob, callback) - self.key = key - return self - - def __init__(self, ob, callback, key): - super().__init__(ob, callback) - - -class WeakKeyDictionary(_collections_abc.MutableMapping): - """ Mapping class that references keys weakly. - - Entries in the dictionary will be discarded when there is no - longer a strong reference to the key. This can be used to - associate additional data with an object owned by other parts of - an application without adding attributes to those objects. This - can be especially useful with objects that override attribute - accesses. - """ - - def __init__(self, dict=None): - self.data = {} - def remove(k, selfref=ref(self)): - self = selfref() - if self is not None: - if self._iterating: - self._pending_removals.append(k) - else: - try: - del self.data[k] - except KeyError: - pass - self._remove = remove - # A list of dead weakrefs (keys to be removed) - self._pending_removals = [] - self._iterating = set() - self._dirty_len = False - if dict is not None: - self.update(dict) - - def _commit_removals(self): - # NOTE: We don't need to call this method before mutating the dict, - # because a dead weakref never compares equal to a live weakref, - # even if they happened to refer to equal objects. - # However, it means keys may already have been removed. - pop = self._pending_removals.pop - d = self.data - while True: - try: - key = pop() - except IndexError: - return - - try: - del d[key] - except KeyError: - pass - - def _scrub_removals(self): - d = self.data - self._pending_removals = [k for k in self._pending_removals if k in d] - self._dirty_len = False - - def __delitem__(self, key): - self._dirty_len = True - del self.data[ref(key)] - - def __getitem__(self, key): - return self.data[ref(key)] - - def __len__(self): - if self._dirty_len and self._pending_removals: - # self._pending_removals may still contain keys which were - # explicitly removed, we have to scrub them (see issue #21173). - self._scrub_removals() - return len(self.data) - len(self._pending_removals) - - def __repr__(self): - return "<%s at %#x>" % (self.__class__.__name__, id(self)) - - def __setitem__(self, key, value): - self.data[ref(key, self._remove)] = value - - def copy(self): - new = WeakKeyDictionary() - with _IterationGuard(self): - for key, value in self.data.items(): - o = key() - if o is not None: - new[o] = value - return new - - __copy__ = copy - - def __deepcopy__(self, memo): - from copy import deepcopy - new = self.__class__() - with _IterationGuard(self): - for key, value in self.data.items(): - o = key() - if o is not None: - new[o] = deepcopy(value, memo) - return new - - def get(self, key, default=None): - return self.data.get(ref(key),default) - - def __contains__(self, key): - try: - wr = ref(key) - except TypeError: - return False - return wr in self.data - - def items(self): - with _IterationGuard(self): - for wr, value in self.data.items(): - key = wr() - if key is not None: - yield key, value - - def keys(self): - with _IterationGuard(self): - for wr in self.data: - obj = wr() - if obj is not None: - yield obj - - __iter__ = keys - - def values(self): - with _IterationGuard(self): - for wr, value in self.data.items(): - if wr() is not None: - yield value - - def keyrefs(self): - """Return a list of weak references to the keys. - - The references are not guaranteed to be 'live' at the time - they are used, so the result of calling the references needs - to be checked before being used. This can be used to avoid - creating references that will cause the garbage collector to - keep the keys around longer than needed. - - """ - return list(self.data) - - def popitem(self): - self._dirty_len = True - while True: - key, value = self.data.popitem() - o = key() - if o is not None: - return o, value - - def pop(self, key, *args): - self._dirty_len = True - return self.data.pop(ref(key), *args) - - def setdefault(self, key, default=None): - return self.data.setdefault(ref(key, self._remove),default) - - def update(self, dict=None, /, **kwargs): - d = self.data - if dict is not None: - if not hasattr(dict, "items"): - dict = type({})(dict) - for key, value in dict.items(): - d[ref(key, self._remove)] = value - if len(kwargs): - self.update(kwargs) - - def __ior__(self, other): - self.update(other) - return self - - def __or__(self, other): - if isinstance(other, _collections_abc.Mapping): - c = self.copy() - c.update(other) - return c - return NotImplemented - - def __ror__(self, other): - if isinstance(other, _collections_abc.Mapping): - c = self.__class__() - c.update(other) - c.update(self) - return c - return NotImplemented - - -class finalize: - """Class for finalization of weakrefable objects - - finalize(obj, func, *args, **kwargs) returns a callable finalizer - object which will be called when obj is garbage collected. The - first time the finalizer is called it evaluates func(*arg, **kwargs) - and returns the result. After this the finalizer is dead, and - calling it just returns None. - - When the program exits any remaining finalizers for which the - atexit attribute is true will be run in reverse order of creation. - By default atexit is true. - """ - - # Finalizer objects don't have any state of their own. They are - # just used as keys to lookup _Info objects in the registry. This - # ensures that they cannot be part of a ref-cycle. - - __slots__ = () - _registry = {} - _shutdown = False - _index_iter = itertools.count() - _dirty = False - _registered_with_atexit = False - - class _Info: - __slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index") - - def __init__(self, obj, func, /, *args, **kwargs): - if not self._registered_with_atexit: - # We may register the exit function more than once because - # of a thread race, but that is harmless - import atexit - atexit.register(self._exitfunc) - finalize._registered_with_atexit = True - info = self._Info() - info.weakref = ref(obj, self) - info.func = func - info.args = args - info.kwargs = kwargs or None - info.atexit = True - info.index = next(self._index_iter) - self._registry[self] = info - finalize._dirty = True - - def __call__(self, _=None): - """If alive then mark as dead and return func(*args, **kwargs); - otherwise return None""" - info = self._registry.pop(self, None) - if info and not self._shutdown: - return info.func(*info.args, **(info.kwargs or {})) - - def detach(self): - """If alive then mark as dead and return (obj, func, args, kwargs); - otherwise return None""" - info = self._registry.get(self) - obj = info and info.weakref() - if obj is not None and self._registry.pop(self, None): - return (obj, info.func, info.args, info.kwargs or {}) - - def peek(self): - """If alive then return (obj, func, args, kwargs); - otherwise return None""" - info = self._registry.get(self) - obj = info and info.weakref() - if obj is not None: - return (obj, info.func, info.args, info.kwargs or {}) - - @property - def alive(self): - """Whether finalizer is alive""" - return self in self._registry - - @property - def atexit(self): - """Whether finalizer should be called at exit""" - info = self._registry.get(self) - return bool(info) and info.atexit - - @atexit.setter - def atexit(self, value): - info = self._registry.get(self) - if info: - info.atexit = bool(value) - - def __repr__(self): - info = self._registry.get(self) - obj = info and info.weakref() - if obj is None: - return '<%s object at %#x; dead>' % (type(self).__name__, id(self)) - else: - return '<%s object at %#x; for %r at %#x>' % \ - (type(self).__name__, id(self), type(obj).__name__, id(obj)) - - @classmethod - def _select_for_exit(cls): - # Return live finalizers marked for exit, oldest first - L = [(f,i) for (f,i) in cls._registry.items() if i.atexit] - L.sort(key=lambda item:item[1].index) - return [f for (f,i) in L] - - @classmethod - def _exitfunc(cls): - # At shutdown invoke finalizers for which atexit is true. - # This is called once all other non-daemonic threads have been - # joined. - reenable_gc = False - try: - if cls._registry: - import gc - if gc.isenabled(): - reenable_gc = True - gc.disable() - pending = None - while True: - if pending is None or finalize._dirty: - pending = cls._select_for_exit() - finalize._dirty = False - if not pending: - break - f = pending.pop() - try: - # gc is disabled, so (assuming no daemonic - # threads) the following is the only line in - # this function which might trigger creation - # of a new finalizer - f() - except Exception: - sys.excepthook(*sys.exc_info()) - assert f not in cls._registry - finally: - # prevent any more finalizers from executing during shutdown - finalize._shutdown = True - if reenable_gc: - gc.enable() diff --git a/Python313_13_x86_Template/Lib/webbrowser.py b/Python313_13_x86_Template/Lib/webbrowser.py deleted file mode 100644 index ee582410..00000000 --- a/Python313_13_x86_Template/Lib/webbrowser.py +++ /dev/null @@ -1,723 +0,0 @@ -#! /usr/bin/env python3 -"""Interfaces for launching and remotely controlling web browsers.""" -# Maintained by Georg Brandl. - -import os -import shlex -import shutil -import sys -import subprocess -import threading - -__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"] - - -class Error(Exception): - pass - - -_lock = threading.RLock() -_browsers = {} # Dictionary of available browser controllers -_tryorder = None # Preference order of available browsers -_os_preferred_browser = None # The preferred browser - - -def register(name, klass, instance=None, *, preferred=False): - """Register a browser connector.""" - with _lock: - if _tryorder is None: - register_standard_browsers() - _browsers[name.lower()] = [klass, instance] - - # Preferred browsers go to the front of the list. - # Need to match to the default browser returned by xdg-settings, which - # may be of the form e.g. "firefox.desktop". - if preferred or (_os_preferred_browser and f'{name}.desktop' == _os_preferred_browser): - _tryorder.insert(0, name) - else: - _tryorder.append(name) - - -def get(using=None): - """Return a browser launcher instance appropriate for the environment.""" - if _tryorder is None: - with _lock: - if _tryorder is None: - register_standard_browsers() - if using is not None: - alternatives = [using] - else: - alternatives = _tryorder - for browser in alternatives: - if '%s' in browser: - # User gave us a command line, split it into name and args - browser = shlex.split(browser) - if browser[-1] == '&': - return BackgroundBrowser(browser[:-1]) - else: - return GenericBrowser(browser) - else: - # User gave us a browser name or path. - try: - command = _browsers[browser.lower()] - except KeyError: - command = _synthesize(browser) - if command[1] is not None: - return command[1] - elif command[0] is not None: - return command[0]() - raise Error("could not locate runnable browser") - - -# Please note: the following definition hides a builtin function. -# It is recommended one does "import webbrowser" and uses webbrowser.open(url) -# instead of "from webbrowser import *". - -def open(url, new=0, autoraise=True): - """Display url using the default browser. - - If possible, open url in a location determined by new. - - 0: the same browser window (the default). - - 1: a new browser window. - - 2: a new browser page ("tab"). - If possible, autoraise raises the window (the default) or not. - - If opening the browser succeeds, return True. - If there is a problem, return False. - """ - if _tryorder is None: - with _lock: - if _tryorder is None: - register_standard_browsers() - for name in _tryorder: - browser = get(name) - if browser.open(url, new, autoraise): - return True - return False - - -def open_new(url): - """Open url in a new window of the default browser. - - If not possible, then open url in the only browser window. - """ - return open(url, 1) - - -def open_new_tab(url): - """Open url in a new page ("tab") of the default browser. - - If not possible, then the behavior becomes equivalent to open_new(). - """ - return open(url, 2) - - -def _synthesize(browser, *, preferred=False): - """Attempt to synthesize a controller based on existing controllers. - - This is useful to create a controller when a user specifies a path to - an entry in the BROWSER environment variable -- we can copy a general - controller to operate using a specific installation of the desired - browser in this way. - - If we can't create a controller in this way, or if there is no - executable for the requested browser, return [None, None]. - - """ - cmd = browser.split()[0] - if not shutil.which(cmd): - return [None, None] - name = os.path.basename(cmd) - try: - command = _browsers[name.lower()] - except KeyError: - return [None, None] - # now attempt to clone to fit the new name: - controller = command[1] - if controller and name.lower() == controller.basename: - import copy - controller = copy.copy(controller) - controller.name = browser - controller.basename = os.path.basename(browser) - register(browser, None, instance=controller, preferred=preferred) - return [None, controller] - return [None, None] - - -# General parent classes - -class BaseBrowser: - """Parent class for all browsers. Do not use directly.""" - - args = ['%s'] - - def __init__(self, name=""): - self.name = name - self.basename = name - - def open(self, url, new=0, autoraise=True): - raise NotImplementedError - - def open_new(self, url): - return self.open(url, 1) - - def open_new_tab(self, url): - return self.open(url, 2) - - @staticmethod - def _check_url(url): - """Ensures that the URL is safe to pass to subprocesses as a parameter""" - if url and url.lstrip().startswith("-"): - raise ValueError(f"Invalid URL (leading dash disallowed): {url!r}") - - -class GenericBrowser(BaseBrowser): - """Class for all browsers started with a command - and without remote functionality.""" - - def __init__(self, name): - if isinstance(name, str): - self.name = name - self.args = ["%s"] - else: - # name should be a list with arguments - self.name = name[0] - self.args = name[1:] - self.basename = os.path.basename(self.name) - - def open(self, url, new=0, autoraise=True): - sys.audit("webbrowser.open", url) - self._check_url(url) - cmdline = [self.name] + [arg.replace("%s", url) - for arg in self.args] - try: - if sys.platform[:3] == 'win': - p = subprocess.Popen(cmdline) - else: - p = subprocess.Popen(cmdline, close_fds=True) - return not p.wait() - except OSError: - return False - - -class BackgroundBrowser(GenericBrowser): - """Class for all browsers which are to be started in the - background.""" - - def open(self, url, new=0, autoraise=True): - cmdline = [self.name] + [arg.replace("%s", url) - for arg in self.args] - sys.audit("webbrowser.open", url) - self._check_url(url) - try: - if sys.platform[:3] == 'win': - p = subprocess.Popen(cmdline) - else: - p = subprocess.Popen(cmdline, close_fds=True, - start_new_session=True) - return p.poll() is None - except OSError: - return False - - -class UnixBrowser(BaseBrowser): - """Parent class for all Unix browsers with remote functionality.""" - - raise_opts = None - background = False - redirect_stdout = True - # In remote_args, %s will be replaced with the requested URL. %action will - # be replaced depending on the value of 'new' passed to open. - # remote_action is used for new=0 (open). If newwin is not None, it is - # used for new=1 (open_new). If newtab is not None, it is used for - # new=3 (open_new_tab). After both substitutions are made, any empty - # strings in the transformed remote_args list will be removed. - remote_args = ['%action', '%s'] - remote_action = None - remote_action_newwin = None - remote_action_newtab = None - - def _invoke(self, args, remote, autoraise, url=None): - raise_opt = [] - if remote and self.raise_opts: - # use autoraise argument only for remote invocation - autoraise = int(autoraise) - opt = self.raise_opts[autoraise] - if opt: - raise_opt = [opt] - - cmdline = [self.name] + raise_opt + args - - if remote or self.background: - inout = subprocess.DEVNULL - else: - # for TTY browsers, we need stdin/out - inout = None - p = subprocess.Popen(cmdline, close_fds=True, stdin=inout, - stdout=(self.redirect_stdout and inout or None), - stderr=inout, start_new_session=True) - if remote: - # wait at most five seconds. If the subprocess is not finished, the - # remote invocation has (hopefully) started a new instance. - try: - rc = p.wait(5) - # if remote call failed, open() will try direct invocation - return not rc - except subprocess.TimeoutExpired: - return True - elif self.background: - if p.poll() is None: - return True - else: - return False - else: - return not p.wait() - - def open(self, url, new=0, autoraise=True): - sys.audit("webbrowser.open", url) - self._check_url(url) - if new == 0: - action = self.remote_action - elif new == 1: - action = self.remote_action_newwin - elif new == 2: - if self.remote_action_newtab is None: - action = self.remote_action_newwin - else: - action = self.remote_action_newtab - else: - raise Error("Bad 'new' parameter to open(); " - f"expected 0, 1, or 2, got {new}") - - args = [arg.replace("%s", url).replace("%action", action) - for arg in self.remote_args] - args = [arg for arg in args if arg] - success = self._invoke(args, True, autoraise, url) - if not success: - # remote invocation failed, try straight way - args = [arg.replace("%s", url) for arg in self.args] - return self._invoke(args, False, False) - else: - return True - - -class Mozilla(UnixBrowser): - """Launcher class for Mozilla browsers.""" - - remote_args = ['%action', '%s'] - remote_action = "" - remote_action_newwin = "-new-window" - remote_action_newtab = "-new-tab" - background = True - - -class Epiphany(UnixBrowser): - """Launcher class for Epiphany browser.""" - - raise_opts = ["-noraise", ""] - remote_args = ['%action', '%s'] - remote_action = "-n" - remote_action_newwin = "-w" - background = True - - -class Chrome(UnixBrowser): - """Launcher class for Google Chrome browser.""" - - remote_args = ['%action', '%s'] - remote_action = "" - remote_action_newwin = "--new-window" - remote_action_newtab = "" - background = True - - -Chromium = Chrome - - -class Opera(UnixBrowser): - """Launcher class for Opera browser.""" - - remote_args = ['%action', '%s'] - remote_action = "" - remote_action_newwin = "--new-window" - remote_action_newtab = "" - background = True - - -class Elinks(UnixBrowser): - """Launcher class for Elinks browsers.""" - - remote_args = ['-remote', 'openURL(%s%action)'] - remote_action = "" - remote_action_newwin = ",new-window" - remote_action_newtab = ",new-tab" - background = False - - # elinks doesn't like its stdout to be redirected - - # it uses redirected stdout as a signal to do -dump - redirect_stdout = False - - -class Konqueror(BaseBrowser): - """Controller for the KDE File Manager (kfm, or Konqueror). - - See the output of ``kfmclient --commands`` - for more information on the Konqueror remote-control interface. - """ - - def open(self, url, new=0, autoraise=True): - sys.audit("webbrowser.open", url) - self._check_url(url) - # XXX Currently I know no way to prevent KFM from opening a new win. - if new == 2: - action = "newTab" - else: - action = "openURL" - - devnull = subprocess.DEVNULL - - try: - p = subprocess.Popen(["kfmclient", action, url], - close_fds=True, stdin=devnull, - stdout=devnull, stderr=devnull) - except OSError: - # fall through to next variant - pass - else: - p.wait() - # kfmclient's return code unfortunately has no meaning as it seems - return True - - try: - p = subprocess.Popen(["konqueror", "--silent", url], - close_fds=True, stdin=devnull, - stdout=devnull, stderr=devnull, - start_new_session=True) - except OSError: - # fall through to next variant - pass - else: - if p.poll() is None: - # Should be running now. - return True - - try: - p = subprocess.Popen(["kfm", "-d", url], - close_fds=True, stdin=devnull, - stdout=devnull, stderr=devnull, - start_new_session=True) - except OSError: - return False - else: - return p.poll() is None - - -class Edge(UnixBrowser): - """Launcher class for Microsoft Edge browser.""" - - remote_args = ['%action', '%s'] - remote_action = "" - remote_action_newwin = "--new-window" - remote_action_newtab = "" - background = True - - -# -# Platform support for Unix -# - -# These are the right tests because all these Unix browsers require either -# a console terminal or an X display to run. - -def register_X_browsers(): - - # use xdg-open if around - if shutil.which("xdg-open"): - register("xdg-open", None, BackgroundBrowser("xdg-open")) - - # Opens an appropriate browser for the URL scheme according to - # freedesktop.org settings (GNOME, KDE, XFCE, etc.) - if shutil.which("gio"): - register("gio", None, BackgroundBrowser(["gio", "open", "--", "%s"])) - - xdg_desktop = os.getenv("XDG_CURRENT_DESKTOP", "").split(":") - - # The default GNOME3 browser - if (("GNOME" in xdg_desktop or - "GNOME_DESKTOP_SESSION_ID" in os.environ) and - shutil.which("gvfs-open")): - register("gvfs-open", None, BackgroundBrowser("gvfs-open")) - - # The default KDE browser - if (("KDE" in xdg_desktop or - "KDE_FULL_SESSION" in os.environ) and - shutil.which("kfmclient")): - register("kfmclient", Konqueror, Konqueror("kfmclient")) - - # Common symbolic link for the default X11 browser - if shutil.which("x-www-browser"): - register("x-www-browser", None, BackgroundBrowser("x-www-browser")) - - # The Mozilla browsers - for browser in ("firefox", "iceweasel", "seamonkey", "mozilla-firefox", - "mozilla"): - if shutil.which(browser): - register(browser, None, Mozilla(browser)) - - # Konqueror/kfm, the KDE browser. - if shutil.which("kfm"): - register("kfm", Konqueror, Konqueror("kfm")) - elif shutil.which("konqueror"): - register("konqueror", Konqueror, Konqueror("konqueror")) - - # Gnome's Epiphany - if shutil.which("epiphany"): - register("epiphany", None, Epiphany("epiphany")) - - # Google Chrome/Chromium browsers - for browser in ("google-chrome", "chrome", "chromium", "chromium-browser"): - if shutil.which(browser): - register(browser, None, Chrome(browser)) - - # Opera, quite popular - if shutil.which("opera"): - register("opera", None, Opera("opera")) - - if shutil.which("microsoft-edge"): - register("microsoft-edge", None, Edge("microsoft-edge")) - - -def register_standard_browsers(): - global _tryorder - _tryorder = [] - - if sys.platform == 'darwin': - register("MacOSX", None, MacOSXOSAScript('default')) - register("chrome", None, MacOSXOSAScript('chrome')) - register("firefox", None, MacOSXOSAScript('firefox')) - register("safari", None, MacOSXOSAScript('safari')) - # OS X can use below Unix support (but we prefer using the OS X - # specific stuff) - - if sys.platform == "ios": - register("iosbrowser", None, IOSBrowser(), preferred=True) - - if sys.platform == "serenityos": - # SerenityOS webbrowser, simply called "Browser". - register("Browser", None, BackgroundBrowser("Browser")) - - if sys.platform[:3] == "win": - # First try to use the default Windows browser - register("windows-default", WindowsDefault) - - # Detect some common Windows browsers, fallback to Microsoft Edge - # location in 64-bit Windows - edge64 = os.path.join(os.environ.get("PROGRAMFILES(x86)", "C:\\Program Files (x86)"), - "Microsoft\\Edge\\Application\\msedge.exe") - # location in 32-bit Windows - edge32 = os.path.join(os.environ.get("PROGRAMFILES", "C:\\Program Files"), - "Microsoft\\Edge\\Application\\msedge.exe") - for browser in ("firefox", "seamonkey", "mozilla", "chrome", - "opera", edge64, edge32): - if shutil.which(browser): - register(browser, None, BackgroundBrowser(browser)) - if shutil.which("MicrosoftEdge.exe"): - register("microsoft-edge", None, Edge("MicrosoftEdge.exe")) - else: - # Prefer X browsers if present - # - # NOTE: Do not check for X11 browser on macOS, - # XQuartz installation sets a DISPLAY environment variable and will - # autostart when someone tries to access the display. Mac users in - # general don't need an X11 browser. - if sys.platform != "darwin" and (os.environ.get("DISPLAY") or os.environ.get("WAYLAND_DISPLAY")): - try: - cmd = "xdg-settings get default-web-browser".split() - raw_result = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) - result = raw_result.decode().strip() - except (FileNotFoundError, subprocess.CalledProcessError, - PermissionError, NotADirectoryError): - pass - else: - global _os_preferred_browser - _os_preferred_browser = result - - register_X_browsers() - - # Also try console browsers - if os.environ.get("TERM"): - # Common symbolic link for the default text-based browser - if shutil.which("www-browser"): - register("www-browser", None, GenericBrowser("www-browser")) - # The Links/elinks browsers - if shutil.which("links"): - register("links", None, GenericBrowser("links")) - if shutil.which("elinks"): - register("elinks", None, Elinks("elinks")) - # The Lynx browser , - if shutil.which("lynx"): - register("lynx", None, GenericBrowser("lynx")) - # The w3m browser - if shutil.which("w3m"): - register("w3m", None, GenericBrowser("w3m")) - - # OK, now that we know what the default preference orders for each - # platform are, allow user to override them with the BROWSER variable. - if "BROWSER" in os.environ: - userchoices = os.environ["BROWSER"].split(os.pathsep) - userchoices.reverse() - - # Treat choices in same way as if passed into get() but do register - # and prepend to _tryorder - for cmdline in userchoices: - if cmdline != '': - cmd = _synthesize(cmdline, preferred=True) - if cmd[1] is None: - register(cmdline, None, GenericBrowser(cmdline), preferred=True) - - # what to do if _tryorder is now empty? - - -# -# Platform support for Windows -# - -if sys.platform[:3] == "win": - class WindowsDefault(BaseBrowser): - def open(self, url, new=0, autoraise=True): - sys.audit("webbrowser.open", url) - self._check_url(url) - try: - os.startfile(url) - except OSError: - # [Error 22] No application is associated with the specified - # file for this operation: '' - return False - else: - return True - -# -# Platform support for macOS -# - -if sys.platform == 'darwin': - class MacOSXOSAScript(BaseBrowser): - def __init__(self, name='default'): - super().__init__(name) - - def open(self, url, new=0, autoraise=True): - sys.audit("webbrowser.open", url) - self._check_url(url) - url = url.replace('"', '%22') - if self.name == 'default': - script = f'open location "{url}"' # opens in default browser - else: - script = f''' - tell application "{self.name}" - activate - open location "{url}" - end - ''' - - osapipe = os.popen("/usr/bin/osascript", "w") - if osapipe is None: - return False - - osapipe.write(script) - rc = osapipe.close() - return not rc - -# -# Platform support for iOS -# -if sys.platform == "ios": - from _ios_support import objc - if objc: - # If objc exists, we know ctypes is also importable. - from ctypes import c_void_p, c_char_p, c_ulong - - class IOSBrowser(BaseBrowser): - def open(self, url, new=0, autoraise=True): - sys.audit("webbrowser.open", url) - self._check_url(url) - # If ctypes isn't available, we can't open a browser - if objc is None: - return False - - # All the messages in this call return object references. - objc.objc_msgSend.restype = c_void_p - - # This is the equivalent of: - # NSString url_string = - # [NSString stringWithCString:url.encode("utf-8") - # encoding:NSUTF8StringEncoding]; - NSString = objc.objc_getClass(b"NSString") - constructor = objc.sel_registerName(b"stringWithCString:encoding:") - objc.objc_msgSend.argtypes = [c_void_p, c_void_p, c_char_p, c_ulong] - url_string = objc.objc_msgSend( - NSString, - constructor, - url.encode("utf-8"), - 4, # NSUTF8StringEncoding = 4 - ) - - # Create an NSURL object representing the URL - # This is the equivalent of: - # NSURL *nsurl = [NSURL URLWithString:url]; - NSURL = objc.objc_getClass(b"NSURL") - urlWithString_ = objc.sel_registerName(b"URLWithString:") - objc.objc_msgSend.argtypes = [c_void_p, c_void_p, c_void_p] - ns_url = objc.objc_msgSend(NSURL, urlWithString_, url_string) - - # Get the shared UIApplication instance - # This code is the equivalent of: - # UIApplication shared_app = [UIApplication sharedApplication] - UIApplication = objc.objc_getClass(b"UIApplication") - sharedApplication = objc.sel_registerName(b"sharedApplication") - objc.objc_msgSend.argtypes = [c_void_p, c_void_p] - shared_app = objc.objc_msgSend(UIApplication, sharedApplication) - - # Open the URL on the shared application - # This code is the equivalent of: - # [shared_app openURL:ns_url - # options:NIL - # completionHandler:NIL]; - openURL_ = objc.sel_registerName(b"openURL:options:completionHandler:") - objc.objc_msgSend.argtypes = [ - c_void_p, c_void_p, c_void_p, c_void_p, c_void_p - ] - # Method returns void - objc.objc_msgSend.restype = None - objc.objc_msgSend(shared_app, openURL_, ns_url, None, None) - - return True - - -def parse_args(arg_list: list[str] | None): - import argparse - parser = argparse.ArgumentParser(description="Open URL in a web browser.") - parser.add_argument("url", help="URL to open") - - group = parser.add_mutually_exclusive_group() - group.add_argument("-n", "--new-window", action="store_const", - const=1, default=0, dest="new_win", - help="open new window") - group.add_argument("-t", "--new-tab", action="store_const", - const=2, default=0, dest="new_win", - help="open new tab") - - args = parser.parse_args(arg_list) - - return args - - -def main(arg_list: list[str] | None = None): - args = parse_args(arg_list) - - open(args.url, args.new_win) - - print("\a") - - -if __name__ == "__main__": - main() diff --git a/Python313_13_x86_Template/Lib/wsgiref/headers.py b/Python313_13_x86_Template/Lib/wsgiref/headers.py deleted file mode 100644 index 17559b0a..00000000 --- a/Python313_13_x86_Template/Lib/wsgiref/headers.py +++ /dev/null @@ -1,192 +0,0 @@ -"""Manage HTTP Response Headers - -Much of this module is red-handedly pilfered from email.message in the stdlib, -so portions are Copyright (C) 2001,2002 Python Software Foundation, and were -written by Barry Warsaw. -""" - -# Regular expression that matches `special' characters in parameters, the -# existence of which force quoting of the parameter value. -import re -tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]') -# Disallowed characters for headers and values. -# HTAB (\x09) is allowed in header values, but -# not in header names. (RFC 9110 Section 5.5) -_name_disallowed_re = re.compile(r'[\x00-\x1F\x7F]') -_value_disallowed_re = re.compile(r'[\x00-\x08\x0A-\x1F\x7F]') - -def _formatparam(param, value=None, quote=1): - """Convenience function to format and return a key=value pair. - - This will quote the value if needed or if quote is true. - """ - if value is not None and len(value) > 0: - if quote or tspecials.search(value): - value = value.replace('\\', '\\\\').replace('"', r'\"') - return '%s="%s"' % (param, value) - else: - return '%s=%s' % (param, value) - else: - return param - - -class Headers: - """Manage a collection of HTTP response headers""" - - def __init__(self, headers=None): - headers = headers if headers is not None else [] - if type(headers) is not list: - raise TypeError("Headers must be a list of name/value tuples") - self._headers = headers - if __debug__: - for k, v in headers: - self._convert_string_type(k, name=True) - self._convert_string_type(v, name=False) - - def _convert_string_type(self, value, *, name): - """Convert/check value type.""" - if type(value) is str: - regex = (_name_disallowed_re if name else _value_disallowed_re) - if regex.search(value): - raise ValueError("Control characters not allowed in headers") - return value - raise AssertionError("Header names/values must be" - " of type str (got {0})".format(repr(value))) - - def __len__(self): - """Return the total number of headers, including duplicates.""" - return len(self._headers) - - def __setitem__(self, name, val): - """Set the value of a header.""" - del self[name] - self._headers.append( - (self._convert_string_type(name, name=True), self._convert_string_type(val, name=False))) - - def __delitem__(self,name): - """Delete all occurrences of a header, if present. - - Does *not* raise an exception if the header is missing. - """ - name = self._convert_string_type(name.lower(), name=True) - self._headers[:] = [kv for kv in self._headers if kv[0].lower() != name] - - def __getitem__(self,name): - """Get the first header value for 'name' - - Return None if the header is missing instead of raising an exception. - - Note that if the header appeared multiple times, the first exactly which - occurrence gets returned is undefined. Use getall() to get all - the values matching a header field name. - """ - return self.get(name) - - def __contains__(self, name): - """Return true if the message contains the header.""" - return self.get(name) is not None - - - def get_all(self, name): - """Return a list of all the values for the named field. - - These will be sorted in the order they appeared in the original header - list or were added to this instance, and may contain duplicates. Any - fields deleted and re-inserted are always appended to the header list. - If no fields exist with the given name, returns an empty list. - """ - name = self._convert_string_type(name.lower(), name=True) - return [kv[1] for kv in self._headers if kv[0].lower()==name] - - - def get(self,name,default=None): - """Get the first header value for 'name', or return 'default'""" - name = self._convert_string_type(name.lower(), name=True) - for k,v in self._headers: - if k.lower()==name: - return v - return default - - - def keys(self): - """Return a list of all the header field names. - - These will be sorted in the order they appeared in the original header - list, or were added to this instance, and may contain duplicates. - Any fields deleted and re-inserted are always appended to the header - list. - """ - return [k for k, v in self._headers] - - def values(self): - """Return a list of all header values. - - These will be sorted in the order they appeared in the original header - list, or were added to this instance, and may contain duplicates. - Any fields deleted and re-inserted are always appended to the header - list. - """ - return [v for k, v in self._headers] - - def items(self): - """Get all the header fields and values. - - These will be sorted in the order they were in the original header - list, or were added to this instance, and may contain duplicates. - Any fields deleted and re-inserted are always appended to the header - list. - """ - return self._headers[:] - - def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self._headers) - - def __str__(self): - """str() returns the formatted headers, complete with end line, - suitable for direct HTTP transmission.""" - return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['','']) - - def __bytes__(self): - return str(self).encode('iso-8859-1') - - def setdefault(self,name,value): - """Return first matching header value for 'name', or 'value' - - If there is no header named 'name', add a new header with name 'name' - and value 'value'.""" - result = self.get(name) - if result is None: - self._headers.append((self._convert_string_type(name, name=True), - self._convert_string_type(value, name=False))) - return value - else: - return result - - def add_header(self, _name, _value, **_params): - """Extended header setting. - - _name is the header field to add. keyword arguments can be used to set - additional parameters for the header field, with underscores converted - to dashes. Normally the parameter will be added as key="value" unless - value is None, in which case only the key will be added. - - Example: - - h.add_header('content-disposition', 'attachment', filename='bud.gif') - - Note that unlike the corresponding 'email.message' method, this does - *not* handle '(charset, language, value)' tuples: all values must be - strings or None. - """ - parts = [] - if _value is not None: - _value = self._convert_string_type(_value, name=False) - parts.append(_value) - for k, v in _params.items(): - k = self._convert_string_type(k, name=True) - if v is None: - parts.append(k.replace('_', '-')) - else: - v = self._convert_string_type(v, name=False) - parts.append(_formatparam(k.replace('_', '-'), v)) - self._headers.append((self._convert_string_type(_name, name=True), "; ".join(parts))) diff --git a/Python313_13_x86_Template/Lib/xml/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/xml/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index f8843579..00000000 Binary files a/Python313_13_x86_Template/Lib/xml/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/xml/dom/__init__.py b/Python313_13_x86_Template/Lib/xml/dom/__init__.py deleted file mode 100644 index 97cf9a64..00000000 --- a/Python313_13_x86_Template/Lib/xml/dom/__init__.py +++ /dev/null @@ -1,140 +0,0 @@ -"""W3C Document Object Model implementation for Python. - -The Python mapping of the Document Object Model is documented in the -Python Library Reference in the section on the xml.dom package. - -This package contains the following modules: - -minidom -- A simple implementation of the Level 1 DOM with namespace - support added (based on the Level 2 specification) and other - minor Level 2 functionality. - -pulldom -- DOM builder supporting on-demand tree-building for selected - subtrees of the document. - -""" - - -class Node: - """Class giving the NodeType constants.""" - __slots__ = () - - # DOM implementations may use this as a base class for their own - # Node implementations. If they don't, the constants defined here - # should still be used as the canonical definitions as they match - # the values given in the W3C recommendation. Client code can - # safely refer to these values in all tests of Node.nodeType - # values. - - ELEMENT_NODE = 1 - ATTRIBUTE_NODE = 2 - TEXT_NODE = 3 - CDATA_SECTION_NODE = 4 - ENTITY_REFERENCE_NODE = 5 - ENTITY_NODE = 6 - PROCESSING_INSTRUCTION_NODE = 7 - COMMENT_NODE = 8 - DOCUMENT_NODE = 9 - DOCUMENT_TYPE_NODE = 10 - DOCUMENT_FRAGMENT_NODE = 11 - NOTATION_NODE = 12 - - -#ExceptionCode -INDEX_SIZE_ERR = 1 -DOMSTRING_SIZE_ERR = 2 -HIERARCHY_REQUEST_ERR = 3 -WRONG_DOCUMENT_ERR = 4 -INVALID_CHARACTER_ERR = 5 -NO_DATA_ALLOWED_ERR = 6 -NO_MODIFICATION_ALLOWED_ERR = 7 -NOT_FOUND_ERR = 8 -NOT_SUPPORTED_ERR = 9 -INUSE_ATTRIBUTE_ERR = 10 -INVALID_STATE_ERR = 11 -SYNTAX_ERR = 12 -INVALID_MODIFICATION_ERR = 13 -NAMESPACE_ERR = 14 -INVALID_ACCESS_ERR = 15 -VALIDATION_ERR = 16 - - -class DOMException(Exception): - """Abstract base class for DOM exceptions. - Exceptions with specific codes are specializations of this class.""" - - def __init__(self, *args, **kw): - if self.__class__ is DOMException: - raise RuntimeError( - "DOMException should not be instantiated directly") - Exception.__init__(self, *args, **kw) - - def _get_code(self): - return self.code - - -class IndexSizeErr(DOMException): - code = INDEX_SIZE_ERR - -class DomstringSizeErr(DOMException): - code = DOMSTRING_SIZE_ERR - -class HierarchyRequestErr(DOMException): - code = HIERARCHY_REQUEST_ERR - -class WrongDocumentErr(DOMException): - code = WRONG_DOCUMENT_ERR - -class InvalidCharacterErr(DOMException): - code = INVALID_CHARACTER_ERR - -class NoDataAllowedErr(DOMException): - code = NO_DATA_ALLOWED_ERR - -class NoModificationAllowedErr(DOMException): - code = NO_MODIFICATION_ALLOWED_ERR - -class NotFoundErr(DOMException): - code = NOT_FOUND_ERR - -class NotSupportedErr(DOMException): - code = NOT_SUPPORTED_ERR - -class InuseAttributeErr(DOMException): - code = INUSE_ATTRIBUTE_ERR - -class InvalidStateErr(DOMException): - code = INVALID_STATE_ERR - -class SyntaxErr(DOMException): - code = SYNTAX_ERR - -class InvalidModificationErr(DOMException): - code = INVALID_MODIFICATION_ERR - -class NamespaceErr(DOMException): - code = NAMESPACE_ERR - -class InvalidAccessErr(DOMException): - code = INVALID_ACCESS_ERR - -class ValidationErr(DOMException): - code = VALIDATION_ERR - -class UserDataHandler: - """Class giving the operation constants for UserDataHandler.handle().""" - - # Based on DOM Level 3 (WD 9 April 2002) - - NODE_CLONED = 1 - NODE_IMPORTED = 2 - NODE_DELETED = 3 - NODE_RENAMED = 4 - -XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace" -XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/" -XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml" -EMPTY_NAMESPACE = None -EMPTY_PREFIX = None - -from .domreg import getDOMImplementation, registerDOMImplementation diff --git a/Python313_13_x86_Template/Lib/xml/etree/ElementTree.py b/Python313_13_x86_Template/Lib/xml/etree/ElementTree.py deleted file mode 100644 index 9bb09ab5..00000000 --- a/Python313_13_x86_Template/Lib/xml/etree/ElementTree.py +++ /dev/null @@ -1,2098 +0,0 @@ -"""Lightweight XML support for Python. - - XML is an inherently hierarchical data format, and the most natural way to - represent it is with a tree. This module has two classes for this purpose: - - 1. ElementTree represents the whole XML document as a tree and - - 2. Element represents a single node in this tree. - - Interactions with the whole document (reading and writing to/from files) are - usually done on the ElementTree level. Interactions with a single XML element - and its sub-elements are done on the Element level. - - Element is a flexible container object designed to store hierarchical data - structures in memory. It can be described as a cross between a list and a - dictionary. Each Element has a number of properties associated with it: - - 'tag' - a string containing the element's name. - - 'attributes' - a Python dictionary storing the element's attributes. - - 'text' - a string containing the element's text content. - - 'tail' - an optional string containing text after the element's end tag. - - And a number of child elements stored in a Python sequence. - - To create an element instance, use the Element constructor, - or the SubElement factory function. - - You can also use the ElementTree class to wrap an element structure - and convert it to and from XML. - -""" - -#--------------------------------------------------------------------- -# Licensed to PSF under a Contributor Agreement. -# See https://www.python.org/psf/license for licensing details. -# -# ElementTree -# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved. -# -# fredrik@pythonware.com -# http://www.pythonware.com -# -------------------------------------------------------------------- -# The ElementTree toolkit is -# -# Copyright (c) 1999-2008 by Fredrik Lundh -# -# By obtaining, using, and/or copying this software and/or its -# associated documentation, you agree that you have read, understood, -# and will comply with the following terms and conditions: -# -# Permission to use, copy, modify, and distribute this software and -# its associated documentation for any purpose and without fee is -# hereby granted, provided that the above copyright notice appears in -# all copies, and that both that copyright notice and this permission -# notice appear in supporting documentation, and that the name of -# Secret Labs AB or the author not be used in advertising or publicity -# pertaining to distribution of the software without specific, written -# prior permission. -# -# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD -# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- -# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR -# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY -# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS -# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE -# OF THIS SOFTWARE. -# -------------------------------------------------------------------- - -__all__ = [ - # public symbols - "Comment", - "dump", - "Element", "ElementTree", - "fromstring", "fromstringlist", - "indent", "iselement", "iterparse", - "parse", "ParseError", - "PI", "ProcessingInstruction", - "QName", - "SubElement", - "tostring", "tostringlist", - "TreeBuilder", - "VERSION", - "XML", "XMLID", - "XMLParser", "XMLPullParser", - "register_namespace", - "canonicalize", "C14NWriterTarget", - ] - -VERSION = "1.3.0" - -import sys -import re -import warnings -import io -import collections -import collections.abc -import contextlib -import weakref - -from . import ElementPath - - -class ParseError(SyntaxError): - """An error when parsing an XML document. - - In addition to its exception value, a ParseError contains - two extra attributes: - 'code' - the specific exception code - 'position' - the line and column of the error - - """ - pass - -# -------------------------------------------------------------------- - - -def iselement(element): - """Return True if *element* appears to be an Element.""" - return hasattr(element, 'tag') - - -class Element: - """An XML element. - - This class is the reference implementation of the Element interface. - - An element's length is its number of subelements. That means if you - want to check if an element is truly empty, you should check BOTH - its length AND its text attribute. - - The element tag, attribute names, and attribute values can be either - bytes or strings. - - *tag* is the element name. *attrib* is an optional dictionary containing - element attributes. *extra* are additional element attributes given as - keyword arguments. - - Example form: - text...tail - - """ - - tag = None - """The element's name.""" - - attrib = None - """Dictionary of the element's attributes.""" - - text = None - """ - Text before first subelement. This is either a string or the value None. - Note that if there is no text, this attribute may be either - None or the empty string, depending on the parser. - - """ - - tail = None - """ - Text after this element's end tag, but before the next sibling element's - start tag. This is either a string or the value None. Note that if there - was no text, this attribute may be either None or an empty string, - depending on the parser. - - """ - - def __init__(self, tag, attrib={}, **extra): - if not isinstance(attrib, dict): - raise TypeError("attrib must be dict, not %s" % ( - attrib.__class__.__name__,)) - self.tag = tag - self.attrib = {**attrib, **extra} - self._children = [] - - def __repr__(self): - return "<%s %r at %#x>" % (self.__class__.__name__, self.tag, id(self)) - - def makeelement(self, tag, attrib): - """Create a new element with the same type. - - *tag* is a string containing the element name. - *attrib* is a dictionary containing the element attributes. - - Do not call this method, use the SubElement factory function instead. - - """ - return self.__class__(tag, attrib) - - def __copy__(self): - elem = self.makeelement(self.tag, self.attrib) - elem.text = self.text - elem.tail = self.tail - elem[:] = self - return elem - - def __len__(self): - return len(self._children) - - def __bool__(self): - warnings.warn( - "Testing an element's truth value will always return True in " - "future versions. " - "Use specific 'len(elem)' or 'elem is not None' test instead.", - DeprecationWarning, stacklevel=2 - ) - return len(self._children) != 0 # emulate old behaviour, for now - - def __getitem__(self, index): - return self._children[index] - - def __setitem__(self, index, element): - if isinstance(index, slice): - for elt in element: - self._assert_is_element(elt) - else: - self._assert_is_element(element) - self._children[index] = element - - def __delitem__(self, index): - del self._children[index] - - def append(self, subelement): - """Add *subelement* to the end of this element. - - The new element will appear in document order after the last existing - subelement (or directly after the text, if it's the first subelement), - but before the end tag for this element. - - """ - self._assert_is_element(subelement) - self._children.append(subelement) - - def extend(self, elements): - """Append subelements from a sequence. - - *elements* is a sequence with zero or more elements. - - """ - for element in elements: - self._assert_is_element(element) - self._children.append(element) - - def insert(self, index, subelement): - """Insert *subelement* at position *index*.""" - self._assert_is_element(subelement) - self._children.insert(index, subelement) - - def _assert_is_element(self, e): - # Need to refer to the actual Python implementation, not the - # shadowing C implementation. - if not isinstance(e, _Element_Py): - raise TypeError('expected an Element, not %s' % type(e).__name__) - - def remove(self, subelement): - """Remove matching subelement. - - Unlike the find methods, this method compares elements based on - identity, NOT ON tag value or contents. To remove subelements by - other means, the easiest way is to use a list comprehension to - select what elements to keep, and then use slice assignment to update - the parent element. - - ValueError is raised if a matching element could not be found. - - """ - # assert iselement(element) - self._children.remove(subelement) - - def find(self, path, namespaces=None): - """Find first matching element by tag name or path. - - *path* is a string having either an element tag or an XPath, - *namespaces* is an optional mapping from namespace prefix to full name. - - Return the first matching element, or None if no element was found. - - """ - return ElementPath.find(self, path, namespaces) - - def findtext(self, path, default=None, namespaces=None): - """Find text for first matching element by tag name or path. - - *path* is a string having either an element tag or an XPath, - *default* is the value to return if the element was not found, - *namespaces* is an optional mapping from namespace prefix to full name. - - Return text content of first matching element, or default value if - none was found. Note that if an element is found having no text - content, the empty string is returned. - - """ - return ElementPath.findtext(self, path, default, namespaces) - - def findall(self, path, namespaces=None): - """Find all matching subelements by tag name or path. - - *path* is a string having either an element tag or an XPath, - *namespaces* is an optional mapping from namespace prefix to full name. - - Returns list containing all matching elements in document order. - - """ - return ElementPath.findall(self, path, namespaces) - - def iterfind(self, path, namespaces=None): - """Find all matching subelements by tag name or path. - - *path* is a string having either an element tag or an XPath, - *namespaces* is an optional mapping from namespace prefix to full name. - - Return an iterable yielding all matching elements in document order. - - """ - return ElementPath.iterfind(self, path, namespaces) - - def clear(self): - """Reset element. - - This function removes all subelements, clears all attributes, and sets - the text and tail attributes to None. - - """ - self.attrib.clear() - self._children = [] - self.text = self.tail = None - - def get(self, key, default=None): - """Get element attribute. - - Equivalent to attrib.get, but some implementations may handle this a - bit more efficiently. *key* is what attribute to look for, and - *default* is what to return if the attribute was not found. - - Returns a string containing the attribute value, or the default if - attribute was not found. - - """ - return self.attrib.get(key, default) - - def set(self, key, value): - """Set element attribute. - - Equivalent to attrib[key] = value, but some implementations may handle - this a bit more efficiently. *key* is what attribute to set, and - *value* is the attribute value to set it to. - - """ - self.attrib[key] = value - - def keys(self): - """Get list of attribute names. - - Names are returned in an arbitrary order, just like an ordinary - Python dict. Equivalent to attrib.keys() - - """ - return self.attrib.keys() - - def items(self): - """Get element attributes as a sequence. - - The attributes are returned in arbitrary order. Equivalent to - attrib.items(). - - Return a list of (name, value) tuples. - - """ - return self.attrib.items() - - def iter(self, tag=None): - """Create tree iterator. - - The iterator loops over the element and all subelements in document - order, returning all elements with a matching tag. - - If the tree structure is modified during iteration, new or removed - elements may or may not be included. To get a stable set, use the - list() function on the iterator, and loop over the resulting list. - - *tag* is what tags to look for (default is to return all elements) - - Return an iterator containing all the matching elements. - - """ - if tag == "*": - tag = None - if tag is None or self.tag == tag: - yield self - for e in self._children: - yield from e.iter(tag) - - def itertext(self): - """Create text iterator. - - The iterator loops over the element and all subelements in document - order, returning all inner text. - - """ - tag = self.tag - if not isinstance(tag, str) and tag is not None: - return - t = self.text - if t: - yield t - for e in self: - yield from e.itertext() - t = e.tail - if t: - yield t - - -def SubElement(parent, tag, attrib={}, **extra): - """Subelement factory which creates an element instance, and appends it - to an existing parent. - - The element tag, attribute names, and attribute values can be either - bytes or Unicode strings. - - *parent* is the parent element, *tag* is the subelements name, *attrib* is - an optional directory containing element attributes, *extra* are - additional attributes given as keyword arguments. - - """ - attrib = {**attrib, **extra} - element = parent.makeelement(tag, attrib) - parent.append(element) - return element - - -def Comment(text=None): - """Comment element factory. - - This function creates a special element which the standard serializer - serializes as an XML comment. - - *text* is a string containing the comment string. - - """ - element = Element(Comment) - element.text = text - return element - - -def ProcessingInstruction(target, text=None): - """Processing Instruction element factory. - - This function creates a special element which the standard serializer - serializes as an XML comment. - - *target* is a string containing the processing instruction, *text* is a - string containing the processing instruction contents, if any. - - """ - element = Element(ProcessingInstruction) - element.text = target - if text: - element.text = element.text + " " + text - return element - -PI = ProcessingInstruction - - -class QName: - """Qualified name wrapper. - - This class can be used to wrap a QName attribute value in order to get - proper namespace handing on output. - - *text_or_uri* is a string containing the QName value either in the form - {uri}local, or if the tag argument is given, the URI part of a QName. - - *tag* is an optional argument which if given, will make the first - argument (text_or_uri) be interpreted as a URI, and this argument (tag) - be interpreted as a local name. - - """ - def __init__(self, text_or_uri, tag=None): - if tag: - text_or_uri = "{%s}%s" % (text_or_uri, tag) - self.text = text_or_uri - def __str__(self): - return self.text - def __repr__(self): - return '<%s %r>' % (self.__class__.__name__, self.text) - def __hash__(self): - return hash(self.text) - def __le__(self, other): - if isinstance(other, QName): - return self.text <= other.text - return self.text <= other - def __lt__(self, other): - if isinstance(other, QName): - return self.text < other.text - return self.text < other - def __ge__(self, other): - if isinstance(other, QName): - return self.text >= other.text - return self.text >= other - def __gt__(self, other): - if isinstance(other, QName): - return self.text > other.text - return self.text > other - def __eq__(self, other): - if isinstance(other, QName): - return self.text == other.text - return self.text == other - -# -------------------------------------------------------------------- - - -class ElementTree: - """An XML element hierarchy. - - This class also provides support for serialization to and from - standard XML. - - *element* is an optional root element node, - *file* is an optional file handle or file name of an XML file whose - contents will be used to initialize the tree with. - - """ - def __init__(self, element=None, file=None): - if element is not None and not iselement(element): - raise TypeError('expected an Element, not %s' % - type(element).__name__) - self._root = element # first node - if file: - self.parse(file) - - def getroot(self): - """Return root element of this tree.""" - return self._root - - def _setroot(self, element): - """Replace root element of this tree. - - This will discard the current contents of the tree and replace it - with the given element. Use with care! - - """ - if not iselement(element): - raise TypeError('expected an Element, not %s' - % type(element).__name__) - self._root = element - - def parse(self, source, parser=None): - """Load external XML document into element tree. - - *source* is a file name or file object, *parser* is an optional parser - instance that defaults to XMLParser. - - ParseError is raised if the parser fails to parse the document. - - Returns the root element of the given source document. - - """ - close_source = False - if not hasattr(source, "read"): - source = open(source, "rb") - close_source = True - try: - if parser is None: - # If no parser was specified, create a default XMLParser - parser = XMLParser() - if hasattr(parser, '_parse_whole'): - # The default XMLParser, when it comes from an accelerator, - # can define an internal _parse_whole API for efficiency. - # It can be used to parse the whole source without feeding - # it with chunks. - self._root = parser._parse_whole(source) - return self._root - while data := source.read(65536): - parser.feed(data) - self._root = parser.close() - return self._root - finally: - if close_source: - source.close() - - def iter(self, tag=None): - """Create and return tree iterator for the root element. - - The iterator loops over all elements in this tree, in document order. - - *tag* is a string with the tag name to iterate over - (default is to return all elements). - - """ - # assert self._root is not None - return self._root.iter(tag) - - def find(self, path, namespaces=None): - """Find first matching element by tag name or path. - - Same as getroot().find(path), which is Element.find() - - *path* is a string having either an element tag or an XPath, - *namespaces* is an optional mapping from namespace prefix to full name. - - Return the first matching element, or None if no element was found. - - """ - # assert self._root is not None - if path[:1] == "/": - path = "." + path - warnings.warn( - "This search is broken in 1.3 and earlier, and will be " - "fixed in a future version. If you rely on the current " - "behaviour, change it to %r" % path, - FutureWarning, stacklevel=2 - ) - return self._root.find(path, namespaces) - - def findtext(self, path, default=None, namespaces=None): - """Find first matching element by tag name or path. - - Same as getroot().findtext(path), which is Element.findtext() - - *path* is a string having either an element tag or an XPath, - *namespaces* is an optional mapping from namespace prefix to full name. - - Return the first matching element, or None if no element was found. - - """ - # assert self._root is not None - if path[:1] == "/": - path = "." + path - warnings.warn( - "This search is broken in 1.3 and earlier, and will be " - "fixed in a future version. If you rely on the current " - "behaviour, change it to %r" % path, - FutureWarning, stacklevel=2 - ) - return self._root.findtext(path, default, namespaces) - - def findall(self, path, namespaces=None): - """Find all matching subelements by tag name or path. - - Same as getroot().findall(path), which is Element.findall(). - - *path* is a string having either an element tag or an XPath, - *namespaces* is an optional mapping from namespace prefix to full name. - - Return list containing all matching elements in document order. - - """ - # assert self._root is not None - if path[:1] == "/": - path = "." + path - warnings.warn( - "This search is broken in 1.3 and earlier, and will be " - "fixed in a future version. If you rely on the current " - "behaviour, change it to %r" % path, - FutureWarning, stacklevel=2 - ) - return self._root.findall(path, namespaces) - - def iterfind(self, path, namespaces=None): - """Find all matching subelements by tag name or path. - - Same as getroot().iterfind(path), which is element.iterfind() - - *path* is a string having either an element tag or an XPath, - *namespaces* is an optional mapping from namespace prefix to full name. - - Return an iterable yielding all matching elements in document order. - - """ - # assert self._root is not None - if path[:1] == "/": - path = "." + path - warnings.warn( - "This search is broken in 1.3 and earlier, and will be " - "fixed in a future version. If you rely on the current " - "behaviour, change it to %r" % path, - FutureWarning, stacklevel=2 - ) - return self._root.iterfind(path, namespaces) - - def write(self, file_or_filename, - encoding=None, - xml_declaration=None, - default_namespace=None, - method=None, *, - short_empty_elements=True): - """Write element tree to a file as XML. - - Arguments: - *file_or_filename* -- file name or a file object opened for writing - - *encoding* -- the output encoding (default: US-ASCII) - - *xml_declaration* -- bool indicating if an XML declaration should be - added to the output. If None, an XML declaration - is added if encoding IS NOT either of: - US-ASCII, UTF-8, or Unicode - - *default_namespace* -- sets the default XML namespace (for "xmlns") - - *method* -- either "xml" (default), "html, "text", or "c14n" - - *short_empty_elements* -- controls the formatting of elements - that contain no content. If True (default) - they are emitted as a single self-closed - tag, otherwise they are emitted as a pair - of start/end tags - - """ - if self._root is None: - raise TypeError('ElementTree not initialized') - if not method: - method = "xml" - elif method not in _serialize: - raise ValueError("unknown method %r" % method) - if not encoding: - if method == "c14n": - encoding = "utf-8" - else: - encoding = "us-ascii" - with _get_writer(file_or_filename, encoding) as (write, declared_encoding): - if method == "xml" and (xml_declaration or - (xml_declaration is None and - encoding.lower() != "unicode" and - declared_encoding.lower() not in ("utf-8", "us-ascii"))): - write("\n" % ( - declared_encoding,)) - if method == "text": - _serialize_text(write, self._root) - else: - qnames, namespaces = _namespaces(self._root, default_namespace) - serialize = _serialize[method] - serialize(write, self._root, qnames, namespaces, - short_empty_elements=short_empty_elements) - - def write_c14n(self, file): - # lxml.etree compatibility. use output method instead - return self.write(file, method="c14n") - -# -------------------------------------------------------------------- -# serialization support - -@contextlib.contextmanager -def _get_writer(file_or_filename, encoding): - # returns text write method and release all resources after using - try: - write = file_or_filename.write - except AttributeError: - # file_or_filename is a file name - if encoding.lower() == "unicode": - encoding="utf-8" - with open(file_or_filename, "w", encoding=encoding, - errors="xmlcharrefreplace") as file: - yield file.write, encoding - else: - # file_or_filename is a file-like object - # encoding determines if it is a text or binary writer - if encoding.lower() == "unicode": - # use a text writer as is - yield write, getattr(file_or_filename, "encoding", None) or "utf-8" - else: - # wrap a binary writer with TextIOWrapper - with contextlib.ExitStack() as stack: - if isinstance(file_or_filename, io.BufferedIOBase): - file = file_or_filename - elif isinstance(file_or_filename, io.RawIOBase): - file = io.BufferedWriter(file_or_filename) - # Keep the original file open when the BufferedWriter is - # destroyed - stack.callback(file.detach) - else: - # This is to handle passed objects that aren't in the - # IOBase hierarchy, but just have a write method - file = io.BufferedIOBase() - file.writable = lambda: True - file.write = write - try: - # TextIOWrapper uses this methods to determine - # if BOM (for UTF-16, etc) should be added - file.seekable = file_or_filename.seekable - file.tell = file_or_filename.tell - except AttributeError: - pass - file = io.TextIOWrapper(file, - encoding=encoding, - errors="xmlcharrefreplace", - newline="\n") - # Keep the original file open when the TextIOWrapper is - # destroyed - stack.callback(file.detach) - yield file.write, encoding - -def _namespaces(elem, default_namespace=None): - # identify namespaces used in this tree - - # maps qnames to *encoded* prefix:local names - qnames = {None: None} - - # maps uri:s to prefixes - namespaces = {} - if default_namespace: - namespaces[default_namespace] = "" - - def add_qname(qname): - # calculate serialized qname representation - try: - if qname[:1] == "{": - uri, tag = qname[1:].rsplit("}", 1) - prefix = namespaces.get(uri) - if prefix is None: - prefix = _namespace_map.get(uri) - if prefix is None: - prefix = "ns%d" % len(namespaces) - if prefix != "xml": - namespaces[uri] = prefix - if prefix: - qnames[qname] = "%s:%s" % (prefix, tag) - else: - qnames[qname] = tag # default element - else: - if default_namespace: - # FIXME: can this be handled in XML 1.0? - raise ValueError( - "cannot use non-qualified names with " - "default_namespace option" - ) - qnames[qname] = qname - except TypeError: - _raise_serialization_error(qname) - - # populate qname and namespaces table - for elem in elem.iter(): - tag = elem.tag - if isinstance(tag, QName): - if tag.text not in qnames: - add_qname(tag.text) - elif isinstance(tag, str): - if tag not in qnames: - add_qname(tag) - elif tag is not None and tag is not Comment and tag is not PI: - _raise_serialization_error(tag) - for key, value in elem.items(): - if isinstance(key, QName): - key = key.text - if key not in qnames: - add_qname(key) - if isinstance(value, QName) and value.text not in qnames: - add_qname(value.text) - text = elem.text - if isinstance(text, QName) and text.text not in qnames: - add_qname(text.text) - return qnames, namespaces - -def _serialize_xml(write, elem, qnames, namespaces, - short_empty_elements, **kwargs): - tag = elem.tag - text = elem.text - if tag is Comment: - write("" % text) - elif tag is ProcessingInstruction: - write("" % text) - else: - tag = qnames[tag] - if tag is None: - if text: - write(_escape_cdata(text)) - for e in elem: - _serialize_xml(write, e, qnames, None, - short_empty_elements=short_empty_elements) - else: - write("<" + tag) - items = list(elem.items()) - if items or namespaces: - if namespaces: - for v, k in sorted(namespaces.items(), - key=lambda x: x[1]): # sort on prefix - if k: - k = ":" + k - write(" xmlns%s=\"%s\"" % ( - k, - _escape_attrib(v) - )) - for k, v in items: - if isinstance(k, QName): - k = k.text - if isinstance(v, QName): - v = qnames[v.text] - else: - v = _escape_attrib(v) - write(" %s=\"%s\"" % (qnames[k], v)) - if text or len(elem) or not short_empty_elements: - write(">") - if text: - write(_escape_cdata(text)) - for e in elem: - _serialize_xml(write, e, qnames, None, - short_empty_elements=short_empty_elements) - write("") - else: - write(" />") - if elem.tail: - write(_escape_cdata(elem.tail)) - -HTML_EMPTY = {"area", "base", "basefont", "br", "col", "embed", "frame", "hr", - "img", "input", "isindex", "link", "meta", "param", "source", - "track", "wbr"} - -def _serialize_html(write, elem, qnames, namespaces, **kwargs): - tag = elem.tag - text = elem.text - if tag is Comment: - write("" % _escape_cdata(text)) - elif tag is ProcessingInstruction: - write("" % _escape_cdata(text)) - else: - tag = qnames[tag] - if tag is None: - if text: - write(_escape_cdata(text)) - for e in elem: - _serialize_html(write, e, qnames, None) - else: - write("<" + tag) - items = list(elem.items()) - if items or namespaces: - if namespaces: - for v, k in sorted(namespaces.items(), - key=lambda x: x[1]): # sort on prefix - if k: - k = ":" + k - write(" xmlns%s=\"%s\"" % ( - k, - _escape_attrib(v) - )) - for k, v in items: - if isinstance(k, QName): - k = k.text - if isinstance(v, QName): - v = qnames[v.text] - else: - v = _escape_attrib_html(v) - # FIXME: handle boolean attributes - write(" %s=\"%s\"" % (qnames[k], v)) - write(">") - ltag = tag.lower() - if text: - if ltag == "script" or ltag == "style": - write(text) - else: - write(_escape_cdata(text)) - for e in elem: - _serialize_html(write, e, qnames, None) - if ltag not in HTML_EMPTY: - write("") - if elem.tail: - write(_escape_cdata(elem.tail)) - -def _serialize_text(write, elem): - for part in elem.itertext(): - write(part) - if elem.tail: - write(elem.tail) - -_serialize = { - "xml": _serialize_xml, - "html": _serialize_html, - "text": _serialize_text, -# this optional method is imported at the end of the module -# "c14n": _serialize_c14n, -} - - -def register_namespace(prefix, uri): - """Register a namespace prefix. - - The registry is global, and any existing mapping for either the - given prefix or the namespace URI will be removed. - - *prefix* is the namespace prefix, *uri* is a namespace uri. Tags and - attributes in this namespace will be serialized with prefix if possible. - - ValueError is raised if prefix is reserved or is invalid. - - """ - if re.match(r"ns\d+$", prefix): - raise ValueError("Prefix format reserved for internal use") - for k, v in list(_namespace_map.items()): - if k == uri or v == prefix: - del _namespace_map[k] - _namespace_map[uri] = prefix - -_namespace_map = { - # "well-known" namespace prefixes - "http://www.w3.org/XML/1998/namespace": "xml", - "http://www.w3.org/1999/xhtml": "html", - "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", - "http://schemas.xmlsoap.org/wsdl/": "wsdl", - # xml schema - "http://www.w3.org/2001/XMLSchema": "xs", - "http://www.w3.org/2001/XMLSchema-instance": "xsi", - # dublin core - "http://purl.org/dc/elements/1.1/": "dc", -} -# For tests and troubleshooting -register_namespace._namespace_map = _namespace_map - -def _raise_serialization_error(text): - raise TypeError( - "cannot serialize %r (type %s)" % (text, type(text).__name__) - ) - -def _escape_cdata(text): - # escape character data - try: - # it's worth avoiding do-nothing calls for strings that are - # shorter than 500 characters, or so. assume that's, by far, - # the most common case in most applications. - if "&" in text: - text = text.replace("&", "&") - if "<" in text: - text = text.replace("<", "<") - if ">" in text: - text = text.replace(">", ">") - return text - except (TypeError, AttributeError): - _raise_serialization_error(text) - -def _escape_attrib(text): - # escape attribute value - try: - if "&" in text: - text = text.replace("&", "&") - if "<" in text: - text = text.replace("<", "<") - if ">" in text: - text = text.replace(">", ">") - if "\"" in text: - text = text.replace("\"", """) - # Although section 2.11 of the XML specification states that CR or - # CR LN should be replaced with just LN, it applies only to EOLNs - # which take part of organizing file into lines. Within attributes, - # we are replacing these with entity numbers, so they do not count. - # http://www.w3.org/TR/REC-xml/#sec-line-ends - # The current solution, contained in following six lines, was - # discussed in issue 17582 and 39011. - if "\r" in text: - text = text.replace("\r", " ") - if "\n" in text: - text = text.replace("\n", " ") - if "\t" in text: - text = text.replace("\t", " ") - return text - except (TypeError, AttributeError): - _raise_serialization_error(text) - -def _escape_attrib_html(text): - # escape attribute value - try: - if "&" in text: - text = text.replace("&", "&") - if ">" in text: - text = text.replace(">", ">") - if "\"" in text: - text = text.replace("\"", """) - return text - except (TypeError, AttributeError): - _raise_serialization_error(text) - -# -------------------------------------------------------------------- - -def tostring(element, encoding=None, method=None, *, - xml_declaration=None, default_namespace=None, - short_empty_elements=True): - """Generate string representation of XML element. - - All subelements are included. If encoding is "unicode", a string - is returned. Otherwise a bytestring is returned. - - *element* is an Element instance, *encoding* is an optional output - encoding defaulting to US-ASCII, *method* is an optional output which can - be one of "xml" (default), "html", "text" or "c14n", *default_namespace* - sets the default XML namespace (for "xmlns"). - - Returns an (optionally) encoded string containing the XML data. - - """ - stream = io.StringIO() if encoding == 'unicode' else io.BytesIO() - ElementTree(element).write(stream, encoding, - xml_declaration=xml_declaration, - default_namespace=default_namespace, - method=method, - short_empty_elements=short_empty_elements) - return stream.getvalue() - -class _ListDataStream(io.BufferedIOBase): - """An auxiliary stream accumulating into a list reference.""" - def __init__(self, lst): - self.lst = lst - - def writable(self): - return True - - def seekable(self): - return True - - def write(self, b): - self.lst.append(b) - - def tell(self): - return len(self.lst) - -def tostringlist(element, encoding=None, method=None, *, - xml_declaration=None, default_namespace=None, - short_empty_elements=True): - lst = [] - stream = _ListDataStream(lst) - ElementTree(element).write(stream, encoding, - xml_declaration=xml_declaration, - default_namespace=default_namespace, - method=method, - short_empty_elements=short_empty_elements) - return lst - - -def dump(elem): - """Write element tree or element structure to sys.stdout. - - This function should be used for debugging only. - - *elem* is either an ElementTree, or a single Element. The exact output - format is implementation dependent. In this version, it's written as an - ordinary XML file. - - """ - # debugging - if not isinstance(elem, ElementTree): - elem = ElementTree(elem) - elem.write(sys.stdout, encoding="unicode") - tail = elem.getroot().tail - if not tail or tail[-1] != "\n": - sys.stdout.write("\n") - - -def indent(tree, space=" ", level=0): - """Indent an XML document by inserting newlines and indentation space - after elements. - - *tree* is the ElementTree or Element to modify. The (root) element - itself will not be changed, but the tail text of all elements in its - subtree will be adapted. - - *space* is the whitespace to insert for each indentation level, two - space characters by default. - - *level* is the initial indentation level. Setting this to a higher - value than 0 can be used for indenting subtrees that are more deeply - nested inside of a document. - """ - if isinstance(tree, ElementTree): - tree = tree.getroot() - if level < 0: - raise ValueError(f"Initial indentation level must be >= 0, got {level}") - if not len(tree): - return - - # Reduce the memory consumption by reusing indentation strings. - indentations = ["\n" + level * space] - - def _indent_children(elem, level): - # Start a new indentation level for the first child. - child_level = level + 1 - try: - child_indentation = indentations[child_level] - except IndexError: - child_indentation = indentations[level] + space - indentations.append(child_indentation) - - if not elem.text or not elem.text.strip(): - elem.text = child_indentation - - for child in elem: - if len(child): - _indent_children(child, child_level) - if not child.tail or not child.tail.strip(): - child.tail = child_indentation - - # Dedent after the last child by overwriting the previous indentation. - if not child.tail.strip(): - child.tail = indentations[level] - - _indent_children(tree, 0) - - -# -------------------------------------------------------------------- -# parsing - - -def parse(source, parser=None): - """Parse XML document into element tree. - - *source* is a filename or file object containing XML data, - *parser* is an optional parser instance defaulting to XMLParser. - - Return an ElementTree instance. - - """ - tree = ElementTree() - tree.parse(source, parser) - return tree - - -def iterparse(source, events=None, parser=None): - """Incrementally parse XML document into ElementTree. - - This class also reports what's going on to the user based on the - *events* it is initialized with. The supported events are the strings - "start", "end", "start-ns" and "end-ns" (the "ns" events are used to get - detailed namespace information). If *events* is omitted, only - "end" events are reported. - - *source* is a filename or file object containing XML data, *events* is - a list of events to report back, *parser* is an optional parser instance. - - Returns an iterator providing (event, elem) pairs. - - """ - # Use the internal, undocumented _parser argument for now; When the - # parser argument of iterparse is removed, this can be killed. - pullparser = XMLPullParser(events=events, _parser=parser) - - if not hasattr(source, "read"): - source = open(source, "rb") - close_source = True - else: - close_source = False - - def iterator(source): - try: - while True: - yield from pullparser.read_events() - # load event buffer - data = source.read(16 * 1024) - if not data: - break - pullparser.feed(data) - root = pullparser._close_and_return_root() - yield from pullparser.read_events() - it = wr() - if it is not None: - it.root = root - finally: - if close_source: - source.close() - - gen = iterator(source) - class IterParseIterator(collections.abc.Iterator): - __next__ = gen.__next__ - def close(self): - if close_source: - source.close() - gen.close() - - def __del__(self): - # TODO: Emit a ResourceWarning if it was not explicitly closed. - # (When the close() method will be supported in all maintained Python versions.) - if close_source: - source.close() - - it = IterParseIterator() - it.root = None - wr = weakref.ref(it) - return it - - -class XMLPullParser: - - def __init__(self, events=None, *, _parser=None): - # The _parser argument is for internal use only and must not be relied - # upon in user code. It will be removed in a future release. - # See https://bugs.python.org/issue17741 for more details. - - self._events_queue = collections.deque() - self._parser = _parser or XMLParser(target=TreeBuilder()) - # wire up the parser for event reporting - if events is None: - events = ("end",) - self._parser._setevents(self._events_queue, events) - - def feed(self, data): - """Feed encoded data to parser.""" - if self._parser is None: - raise ValueError("feed() called after end of stream") - if data: - try: - self._parser.feed(data) - except SyntaxError as exc: - self._events_queue.append(exc) - - def _close_and_return_root(self): - # iterparse needs this to set its root attribute properly :( - root = self._parser.close() - self._parser = None - return root - - def close(self): - """Finish feeding data to parser. - - Unlike XMLParser, does not return the root element. Use - read_events() to consume elements from XMLPullParser. - """ - self._close_and_return_root() - - def read_events(self): - """Return an iterator over currently available (event, elem) pairs. - - Events are consumed from the internal event queue as they are - retrieved from the iterator. - """ - events = self._events_queue - while events: - event = events.popleft() - if isinstance(event, Exception): - raise event - else: - yield event - - def flush(self): - if self._parser is None: - raise ValueError("flush() called after end of stream") - self._parser.flush() - - -def XML(text, parser=None): - """Parse XML document from string constant. - - This function can be used to embed "XML Literals" in Python code. - - *text* is a string containing XML data, *parser* is an - optional parser instance, defaulting to the standard XMLParser. - - Returns an Element instance. - - """ - if not parser: - parser = XMLParser(target=TreeBuilder()) - parser.feed(text) - return parser.close() - - -def XMLID(text, parser=None): - """Parse XML document from string constant for its IDs. - - *text* is a string containing XML data, *parser* is an - optional parser instance, defaulting to the standard XMLParser. - - Returns an (Element, dict) tuple, in which the - dict maps element id:s to elements. - - """ - if not parser: - parser = XMLParser(target=TreeBuilder()) - parser.feed(text) - tree = parser.close() - ids = {} - for elem in tree.iter(): - id = elem.get("id") - if id: - ids[id] = elem - return tree, ids - -# Parse XML document from string constant. Alias for XML(). -fromstring = XML - -def fromstringlist(sequence, parser=None): - """Parse XML document from sequence of string fragments. - - *sequence* is a list of other sequence, *parser* is an optional parser - instance, defaulting to the standard XMLParser. - - Returns an Element instance. - - """ - if not parser: - parser = XMLParser(target=TreeBuilder()) - for text in sequence: - parser.feed(text) - return parser.close() - -# -------------------------------------------------------------------- - - -class TreeBuilder: - """Generic element structure builder. - - This builder converts a sequence of start, data, and end method - calls to a well-formed element structure. - - You can use this class to build an element structure using a custom XML - parser, or a parser for some other XML-like format. - - *element_factory* is an optional element factory which is called - to create new Element instances, as necessary. - - *comment_factory* is a factory to create comments to be used instead of - the standard factory. If *insert_comments* is false (the default), - comments will not be inserted into the tree. - - *pi_factory* is a factory to create processing instructions to be used - instead of the standard factory. If *insert_pis* is false (the default), - processing instructions will not be inserted into the tree. - """ - def __init__(self, element_factory=None, *, - comment_factory=None, pi_factory=None, - insert_comments=False, insert_pis=False): - self._data = [] # data collector - self._elem = [] # element stack - self._last = None # last element - self._root = None # root element - self._tail = None # true if we're after an end tag - if comment_factory is None: - comment_factory = Comment - self._comment_factory = comment_factory - self.insert_comments = insert_comments - if pi_factory is None: - pi_factory = ProcessingInstruction - self._pi_factory = pi_factory - self.insert_pis = insert_pis - if element_factory is None: - element_factory = Element - self._factory = element_factory - - def close(self): - """Flush builder buffers and return toplevel document Element.""" - assert len(self._elem) == 0, "missing end tags" - assert self._root is not None, "missing toplevel element" - return self._root - - def _flush(self): - if self._data: - if self._last is not None: - text = "".join(self._data) - if self._tail: - assert self._last.tail is None, "internal error (tail)" - self._last.tail = text - else: - assert self._last.text is None, "internal error (text)" - self._last.text = text - self._data = [] - - def data(self, data): - """Add text to current element.""" - self._data.append(data) - - def start(self, tag, attrs): - """Open new element and return it. - - *tag* is the element name, *attrs* is a dict containing element - attributes. - - """ - self._flush() - self._last = elem = self._factory(tag, attrs) - if self._elem: - self._elem[-1].append(elem) - elif self._root is None: - self._root = elem - self._elem.append(elem) - self._tail = 0 - return elem - - def end(self, tag): - """Close and return current Element. - - *tag* is the element name. - - """ - self._flush() - self._last = self._elem.pop() - assert self._last.tag == tag,\ - "end tag mismatch (expected %s, got %s)" % ( - self._last.tag, tag) - self._tail = 1 - return self._last - - def comment(self, text): - """Create a comment using the comment_factory. - - *text* is the text of the comment. - """ - return self._handle_single( - self._comment_factory, self.insert_comments, text) - - def pi(self, target, text=None): - """Create a processing instruction using the pi_factory. - - *target* is the target name of the processing instruction. - *text* is the data of the processing instruction, or ''. - """ - return self._handle_single( - self._pi_factory, self.insert_pis, target, text) - - def _handle_single(self, factory, insert, *args): - elem = factory(*args) - if insert: - self._flush() - self._last = elem - if self._elem: - self._elem[-1].append(elem) - self._tail = 1 - return elem - - -# also see ElementTree and TreeBuilder -class XMLParser: - """Element structure builder for XML source data based on the expat parser. - - *target* is an optional target object which defaults to an instance of the - standard TreeBuilder class, *encoding* is an optional encoding string - which if given, overrides the encoding specified in the XML file: - http://www.iana.org/assignments/character-sets - - """ - - def __init__(self, *, target=None, encoding=None): - try: - from xml.parsers import expat - except ImportError: - try: - import pyexpat as expat - except ImportError: - raise ImportError( - "No module named expat; use SimpleXMLTreeBuilder instead" - ) - parser = expat.ParserCreate(encoding, "}") - if target is None: - target = TreeBuilder() - # underscored names are provided for compatibility only - self.parser = self._parser = parser - self.target = self._target = target - self._error = expat.error - self._names = {} # name memo cache - # main callbacks - parser.DefaultHandlerExpand = self._default - if hasattr(target, 'start'): - parser.StartElementHandler = self._start - if hasattr(target, 'end'): - parser.EndElementHandler = self._end - if hasattr(target, 'start_ns'): - parser.StartNamespaceDeclHandler = self._start_ns - if hasattr(target, 'end_ns'): - parser.EndNamespaceDeclHandler = self._end_ns - if hasattr(target, 'data'): - parser.CharacterDataHandler = target.data - # miscellaneous callbacks - if hasattr(target, 'comment'): - parser.CommentHandler = target.comment - if hasattr(target, 'pi'): - parser.ProcessingInstructionHandler = target.pi - # Configure pyexpat: buffering, new-style attribute handling. - parser.buffer_text = 1 - parser.ordered_attributes = 1 - self._doctype = None - self.entity = {} - try: - self.version = "Expat %d.%d.%d" % expat.version_info - except AttributeError: - pass # unknown - - def _setevents(self, events_queue, events_to_report): - # Internal API for XMLPullParser - # events_to_report: a list of events to report during parsing (same as - # the *events* of XMLPullParser's constructor. - # events_queue: a list of actual parsing events that will be populated - # by the underlying parser. - # - parser = self._parser - append = events_queue.append - for event_name in events_to_report: - if event_name == "start": - parser.ordered_attributes = 1 - def handler(tag, attrib_in, event=event_name, append=append, - start=self._start): - append((event, start(tag, attrib_in))) - parser.StartElementHandler = handler - elif event_name == "end": - def handler(tag, event=event_name, append=append, - end=self._end): - append((event, end(tag))) - parser.EndElementHandler = handler - elif event_name == "start-ns": - # TreeBuilder does not implement .start_ns() - if hasattr(self.target, "start_ns"): - def handler(prefix, uri, event=event_name, append=append, - start_ns=self._start_ns): - append((event, start_ns(prefix, uri))) - else: - def handler(prefix, uri, event=event_name, append=append): - append((event, (prefix or '', uri or ''))) - parser.StartNamespaceDeclHandler = handler - elif event_name == "end-ns": - # TreeBuilder does not implement .end_ns() - if hasattr(self.target, "end_ns"): - def handler(prefix, event=event_name, append=append, - end_ns=self._end_ns): - append((event, end_ns(prefix))) - else: - def handler(prefix, event=event_name, append=append): - append((event, None)) - parser.EndNamespaceDeclHandler = handler - elif event_name == 'comment': - def handler(text, event=event_name, append=append, self=self): - append((event, self.target.comment(text))) - parser.CommentHandler = handler - elif event_name == 'pi': - def handler(pi_target, data, event=event_name, append=append, - self=self): - append((event, self.target.pi(pi_target, data))) - parser.ProcessingInstructionHandler = handler - else: - raise ValueError("unknown event %r" % event_name) - - def _raiseerror(self, value): - err = ParseError(value) - err.code = value.code - err.position = value.lineno, value.offset - raise err - - def _fixname(self, key): - # expand qname, and convert name string to ascii, if possible - try: - name = self._names[key] - except KeyError: - name = key - if "}" in name: - name = "{" + name - self._names[key] = name - return name - - def _start_ns(self, prefix, uri): - return self.target.start_ns(prefix or '', uri or '') - - def _end_ns(self, prefix): - return self.target.end_ns(prefix or '') - - def _start(self, tag, attr_list): - # Handler for expat's StartElementHandler. Since ordered_attributes - # is set, the attributes are reported as a list of alternating - # attribute name,value. - fixname = self._fixname - tag = fixname(tag) - attrib = {} - if attr_list: - for i in range(0, len(attr_list), 2): - attrib[fixname(attr_list[i])] = attr_list[i+1] - return self.target.start(tag, attrib) - - def _end(self, tag): - return self.target.end(self._fixname(tag)) - - def _default(self, text): - prefix = text[:1] - if prefix == "&": - # deal with undefined entities - try: - data_handler = self.target.data - except AttributeError: - return - try: - data_handler(self.entity[text[1:-1]]) - except KeyError: - from xml.parsers import expat - err = expat.error( - "undefined entity %s: line %d, column %d" % - (text, self.parser.ErrorLineNumber, - self.parser.ErrorColumnNumber) - ) - err.code = 11 # XML_ERROR_UNDEFINED_ENTITY - err.lineno = self.parser.ErrorLineNumber - err.offset = self.parser.ErrorColumnNumber - raise err - elif prefix == "<" and text[:9] == "": - self._doctype = None - return - text = text.strip() - if not text: - return - self._doctype.append(text) - n = len(self._doctype) - if n > 2: - type = self._doctype[1] - if type == "PUBLIC" and n == 4: - name, type, pubid, system = self._doctype - if pubid: - pubid = pubid[1:-1] - elif type == "SYSTEM" and n == 3: - name, type, system = self._doctype - pubid = None - else: - return - if hasattr(self.target, "doctype"): - self.target.doctype(name, pubid, system[1:-1]) - elif hasattr(self, "doctype"): - warnings.warn( - "The doctype() method of XMLParser is ignored. " - "Define doctype() method on the TreeBuilder target.", - RuntimeWarning) - - self._doctype = None - - def feed(self, data): - """Feed encoded data to parser.""" - try: - self.parser.Parse(data, False) - except self._error as v: - self._raiseerror(v) - - def close(self): - """Finish feeding data to parser and return element structure.""" - try: - self.parser.Parse(b"", True) # end of data - except self._error as v: - self._raiseerror(v) - try: - close_handler = self.target.close - except AttributeError: - pass - else: - return close_handler() - finally: - # get rid of circular references - del self.parser, self._parser - del self.target, self._target - - def flush(self): - was_enabled = self.parser.GetReparseDeferralEnabled() - try: - self.parser.SetReparseDeferralEnabled(False) - self.parser.Parse(b"", False) - except self._error as v: - self._raiseerror(v) - finally: - self.parser.SetReparseDeferralEnabled(was_enabled) - -# -------------------------------------------------------------------- -# C14N 2.0 - -def canonicalize(xml_data=None, *, out=None, from_file=None, **options): - """Convert XML to its C14N 2.0 serialised form. - - If *out* is provided, it must be a file or file-like object that receives - the serialised canonical XML output (text, not bytes) through its ``.write()`` - method. To write to a file, open it in text mode with encoding "utf-8". - If *out* is not provided, this function returns the output as text string. - - Either *xml_data* (an XML string) or *from_file* (a file path or - file-like object) must be provided as input. - - The configuration options are the same as for the ``C14NWriterTarget``. - """ - if xml_data is None and from_file is None: - raise ValueError("Either 'xml_data' or 'from_file' must be provided as input") - sio = None - if out is None: - sio = out = io.StringIO() - - parser = XMLParser(target=C14NWriterTarget(out.write, **options)) - - if xml_data is not None: - parser.feed(xml_data) - parser.close() - elif from_file is not None: - parse(from_file, parser=parser) - - return sio.getvalue() if sio is not None else None - - -_looks_like_prefix_name = re.compile(r'^\w+:\w+$', re.UNICODE).match - - -class C14NWriterTarget: - """ - Canonicalization writer target for the XMLParser. - - Serialises parse events to XML C14N 2.0. - - The *write* function is used for writing out the resulting data stream - as text (not bytes). To write to a file, open it in text mode with encoding - "utf-8" and pass its ``.write`` method. - - Configuration options: - - - *with_comments*: set to true to include comments - - *strip_text*: set to true to strip whitespace before and after text content - - *rewrite_prefixes*: set to true to replace namespace prefixes by "n{number}" - - *qname_aware_tags*: a set of qname aware tag names in which prefixes - should be replaced in text content - - *qname_aware_attrs*: a set of qname aware attribute names in which prefixes - should be replaced in text content - - *exclude_attrs*: a set of attribute names that should not be serialised - - *exclude_tags*: a set of tag names that should not be serialised - """ - def __init__(self, write, *, - with_comments=False, strip_text=False, rewrite_prefixes=False, - qname_aware_tags=None, qname_aware_attrs=None, - exclude_attrs=None, exclude_tags=None): - self._write = write - self._data = [] - self._with_comments = with_comments - self._strip_text = strip_text - self._exclude_attrs = set(exclude_attrs) if exclude_attrs else None - self._exclude_tags = set(exclude_tags) if exclude_tags else None - - self._rewrite_prefixes = rewrite_prefixes - if qname_aware_tags: - self._qname_aware_tags = set(qname_aware_tags) - else: - self._qname_aware_tags = None - if qname_aware_attrs: - self._find_qname_aware_attrs = set(qname_aware_attrs).intersection - else: - self._find_qname_aware_attrs = None - - # Stack with globally and newly declared namespaces as (uri, prefix) pairs. - self._declared_ns_stack = [[ - ("http://www.w3.org/XML/1998/namespace", "xml"), - ]] - # Stack with user declared namespace prefixes as (uri, prefix) pairs. - self._ns_stack = [] - if not rewrite_prefixes: - self._ns_stack.append(list(_namespace_map.items())) - self._ns_stack.append([]) - self._prefix_map = {} - self._preserve_space = [False] - self._pending_start = None - self._root_seen = False - self._root_done = False - self._ignored_depth = 0 - - def _iter_namespaces(self, ns_stack, _reversed=reversed): - for namespaces in _reversed(ns_stack): - if namespaces: # almost no element declares new namespaces - yield from namespaces - - def _resolve_prefix_name(self, prefixed_name): - prefix, name = prefixed_name.split(':', 1) - for uri, p in self._iter_namespaces(self._ns_stack): - if p == prefix: - return f'{{{uri}}}{name}' - raise ValueError(f'Prefix {prefix} of QName "{prefixed_name}" is not declared in scope') - - def _qname(self, qname, uri=None): - if uri is None: - uri, tag = qname[1:].rsplit('}', 1) if qname[:1] == '{' else ('', qname) - else: - tag = qname - - prefixes_seen = set() - for u, prefix in self._iter_namespaces(self._declared_ns_stack): - if u == uri and prefix not in prefixes_seen: - return f'{prefix}:{tag}' if prefix else tag, tag, uri - prefixes_seen.add(prefix) - - # Not declared yet => add new declaration. - if self._rewrite_prefixes: - if uri in self._prefix_map: - prefix = self._prefix_map[uri] - else: - prefix = self._prefix_map[uri] = f'n{len(self._prefix_map)}' - self._declared_ns_stack[-1].append((uri, prefix)) - return f'{prefix}:{tag}', tag, uri - - if not uri and '' not in prefixes_seen: - # No default namespace declared => no prefix needed. - return tag, tag, uri - - for u, prefix in self._iter_namespaces(self._ns_stack): - if u == uri: - self._declared_ns_stack[-1].append((uri, prefix)) - return f'{prefix}:{tag}' if prefix else tag, tag, uri - - if not uri: - # As soon as a default namespace is defined, - # anything that has no namespace (and thus, no prefix) goes there. - return tag, tag, uri - - raise ValueError(f'Namespace "{uri}" is not declared in scope') - - def data(self, data): - if not self._ignored_depth: - self._data.append(data) - - def _flush(self, _join_text=''.join): - data = _join_text(self._data) - del self._data[:] - if self._strip_text and not self._preserve_space[-1]: - data = data.strip() - if self._pending_start is not None: - args, self._pending_start = self._pending_start, None - qname_text = data if data and _looks_like_prefix_name(data) else None - self._start(*args, qname_text) - if qname_text is not None: - return - if data and self._root_seen: - self._write(_escape_cdata_c14n(data)) - - def start_ns(self, prefix, uri): - if self._ignored_depth: - return - # we may have to resolve qnames in text content - if self._data: - self._flush() - self._ns_stack[-1].append((uri, prefix)) - - def start(self, tag, attrs): - if self._exclude_tags is not None and ( - self._ignored_depth or tag in self._exclude_tags): - self._ignored_depth += 1 - return - if self._data: - self._flush() - - new_namespaces = [] - self._declared_ns_stack.append(new_namespaces) - - if self._qname_aware_tags is not None and tag in self._qname_aware_tags: - # Need to parse text first to see if it requires a prefix declaration. - self._pending_start = (tag, attrs, new_namespaces) - return - self._start(tag, attrs, new_namespaces) - - def _start(self, tag, attrs, new_namespaces, qname_text=None): - if self._exclude_attrs is not None and attrs: - attrs = {k: v for k, v in attrs.items() if k not in self._exclude_attrs} - - qnames = {tag, *attrs} - resolved_names = {} - - # Resolve prefixes in attribute and tag text. - if qname_text is not None: - qname = resolved_names[qname_text] = self._resolve_prefix_name(qname_text) - qnames.add(qname) - if self._find_qname_aware_attrs is not None and attrs: - qattrs = self._find_qname_aware_attrs(attrs) - if qattrs: - for attr_name in qattrs: - value = attrs[attr_name] - if _looks_like_prefix_name(value): - qname = resolved_names[value] = self._resolve_prefix_name(value) - qnames.add(qname) - else: - qattrs = None - else: - qattrs = None - - # Assign prefixes in lexicographical order of used URIs. - parse_qname = self._qname - parsed_qnames = {n: parse_qname(n) for n in sorted( - qnames, key=lambda n: n.split('}', 1))} - - # Write namespace declarations in prefix order ... - if new_namespaces: - attr_list = [ - ('xmlns:' + prefix if prefix else 'xmlns', uri) - for uri, prefix in new_namespaces - ] - attr_list.sort() - else: - # almost always empty - attr_list = [] - - # ... followed by attributes in URI+name order - if attrs: - for k, v in sorted(attrs.items()): - if qattrs is not None and k in qattrs and v in resolved_names: - v = parsed_qnames[resolved_names[v]][0] - attr_qname, attr_name, uri = parsed_qnames[k] - # No prefix for attributes in default ('') namespace. - attr_list.append((attr_qname if uri else attr_name, v)) - - # Honour xml:space attributes. - space_behaviour = attrs.get('{http://www.w3.org/XML/1998/namespace}space') - self._preserve_space.append( - space_behaviour == 'preserve' if space_behaviour - else self._preserve_space[-1]) - - # Write the tag. - write = self._write - write('<' + parsed_qnames[tag][0]) - if attr_list: - write(''.join([f' {k}="{_escape_attrib_c14n(v)}"' for k, v in attr_list])) - write('>') - - # Write the resolved qname text content. - if qname_text is not None: - write(_escape_cdata_c14n(parsed_qnames[resolved_names[qname_text]][0])) - - self._root_seen = True - self._ns_stack.append([]) - - def end(self, tag): - if self._ignored_depth: - self._ignored_depth -= 1 - return - if self._data: - self._flush() - self._write(f'') - self._preserve_space.pop() - self._root_done = len(self._preserve_space) == 1 - self._declared_ns_stack.pop() - self._ns_stack.pop() - - def comment(self, text): - if not self._with_comments: - return - if self._ignored_depth: - return - if self._root_done: - self._write('\n') - elif self._root_seen and self._data: - self._flush() - self._write(f'') - if not self._root_seen: - self._write('\n') - - def pi(self, target, data): - if self._ignored_depth: - return - if self._root_done: - self._write('\n') - elif self._root_seen and self._data: - self._flush() - self._write( - f'' if data else f'') - if not self._root_seen: - self._write('\n') - - -def _escape_cdata_c14n(text): - # escape character data - try: - # it's worth avoiding do-nothing calls for strings that are - # shorter than 500 character, or so. assume that's, by far, - # the most common case in most applications. - if '&' in text: - text = text.replace('&', '&') - if '<' in text: - text = text.replace('<', '<') - if '>' in text: - text = text.replace('>', '>') - if '\r' in text: - text = text.replace('\r', ' ') - return text - except (TypeError, AttributeError): - _raise_serialization_error(text) - - -def _escape_attrib_c14n(text): - # escape attribute value - try: - if '&' in text: - text = text.replace('&', '&') - if '<' in text: - text = text.replace('<', '<') - if '"' in text: - text = text.replace('"', '"') - if '\t' in text: - text = text.replace('\t', ' ') - if '\n' in text: - text = text.replace('\n', ' ') - if '\r' in text: - text = text.replace('\r', ' ') - return text - except (TypeError, AttributeError): - _raise_serialization_error(text) - - -# -------------------------------------------------------------------- - -# Import the C accelerators -try: - # Element is going to be shadowed by the C implementation. We need to keep - # the Python version of it accessible for some "creative" by external code - # (see tests) - _Element_Py = Element - - # Element, SubElement, ParseError, TreeBuilder, XMLParser, _set_factories - from _elementtree import * - from _elementtree import _set_factories -except ImportError: - pass -else: - _set_factories(Comment, ProcessingInstruction) diff --git a/Python313_13_x86_Template/Lib/xml/parsers/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/xml/parsers/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index bc21ea8f..00000000 Binary files a/Python313_13_x86_Template/Lib/xml/parsers/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/xml/parsers/__pycache__/expat.cpython-313.pyc b/Python313_13_x86_Template/Lib/xml/parsers/__pycache__/expat.cpython-313.pyc deleted file mode 100644 index 449bbf5c..00000000 Binary files a/Python313_13_x86_Template/Lib/xml/parsers/__pycache__/expat.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/xml/sax/__init__.py b/Python313_13_x86_Template/Lib/xml/sax/__init__.py deleted file mode 100644 index b6573102..00000000 --- a/Python313_13_x86_Template/Lib/xml/sax/__init__.py +++ /dev/null @@ -1,94 +0,0 @@ -"""Simple API for XML (SAX) implementation for Python. - -This module provides an implementation of the SAX 2 interface; -information about the Java version of the interface can be found at -http://www.megginson.com/SAX/. The Python version of the interface is -documented at <...>. - -This package contains the following modules: - -handler -- Base classes and constants which define the SAX 2 API for - the 'client-side' of SAX for Python. - -saxutils -- Implementation of the convenience classes commonly used to - work with SAX. - -xmlreader -- Base classes and constants which define the SAX 2 API for - the parsers used with SAX for Python. - -expatreader -- Driver that allows use of the Expat parser with SAX. -""" - -from .xmlreader import InputSource -from .handler import ContentHandler, ErrorHandler -from ._exceptions import SAXException, SAXNotRecognizedException, \ - SAXParseException, SAXNotSupportedException, \ - SAXReaderNotAvailable - - -def parse(source, handler, errorHandler=ErrorHandler()): - parser = make_parser() - parser.setContentHandler(handler) - parser.setErrorHandler(errorHandler) - parser.parse(source) - -def parseString(string, handler, errorHandler=ErrorHandler()): - import io - if errorHandler is None: - errorHandler = ErrorHandler() - parser = make_parser() - parser.setContentHandler(handler) - parser.setErrorHandler(errorHandler) - - inpsrc = InputSource() - if isinstance(string, str): - inpsrc.setCharacterStream(io.StringIO(string)) - else: - inpsrc.setByteStream(io.BytesIO(string)) - parser.parse(inpsrc) - -# this is the parser list used by the make_parser function if no -# alternatives are given as parameters to the function - -default_parser_list = ["xml.sax.expatreader"] - -# tell modulefinder that importing sax potentially imports expatreader -_false = 0 -if _false: - import xml.sax.expatreader - -import os, sys -if not sys.flags.ignore_environment and "PY_SAX_PARSER" in os.environ: - default_parser_list = os.environ["PY_SAX_PARSER"].split(",") -del os, sys - - -def make_parser(parser_list=()): - """Creates and returns a SAX parser. - - Creates the first parser it is able to instantiate of the ones - given in the iterable created by chaining parser_list and - default_parser_list. The iterables must contain the names of Python - modules containing both a SAX parser and a create_parser function.""" - - for parser_name in list(parser_list) + default_parser_list: - try: - return _create_parser(parser_name) - except ImportError: - import sys - if parser_name in sys.modules: - # The parser module was found, but importing it - # failed unexpectedly, pass this exception through - raise - except SAXReaderNotAvailable: - # The parser module detected that it won't work properly, - # so try the next one - pass - - raise SAXReaderNotAvailable("No parsers found", None) - -# --- Internal utility methods used by make_parser - -def _create_parser(parser_name): - drv_module = __import__(parser_name,{},{},['create_parser']) - return drv_module.create_parser() diff --git a/Python313_13_x86_Template/Lib/xml/sax/handler.py b/Python313_13_x86_Template/Lib/xml/sax/handler.py deleted file mode 100644 index e8d417e5..00000000 --- a/Python313_13_x86_Template/Lib/xml/sax/handler.py +++ /dev/null @@ -1,387 +0,0 @@ -""" -This module contains the core classes of version 2.0 of SAX for Python. -This file provides only default classes with absolutely minimum -functionality, from which drivers and applications can be subclassed. - -Many of these classes are empty and are included only as documentation -of the interfaces. - -$Id$ -""" - -version = '2.0beta' - -#============================================================================ -# -# HANDLER INTERFACES -# -#============================================================================ - -# ===== ERRORHANDLER ===== - -class ErrorHandler: - """Basic interface for SAX error handlers. - - If you create an object that implements this interface, then - register the object with your XMLReader, the parser will call the - methods in your object to report all warnings and errors. There - are three levels of errors available: warnings, (possibly) - recoverable errors, and unrecoverable errors. All methods take a - SAXParseException as the only parameter.""" - - def error(self, exception): - "Handle a recoverable error." - raise exception - - def fatalError(self, exception): - "Handle a non-recoverable error." - raise exception - - def warning(self, exception): - "Handle a warning." - print(exception) - - -# ===== CONTENTHANDLER ===== - -class ContentHandler: - """Interface for receiving logical document content events. - - This is the main callback interface in SAX, and the one most - important to applications. The order of events in this interface - mirrors the order of the information in the document.""" - - def __init__(self): - self._locator = None - - def setDocumentLocator(self, locator): - """Called by the parser to give the application a locator for - locating the origin of document events. - - SAX parsers are strongly encouraged (though not absolutely - required) to supply a locator: if it does so, it must supply - the locator to the application by invoking this method before - invoking any of the other methods in the DocumentHandler - interface. - - The locator allows the application to determine the end - position of any document-related event, even if the parser is - not reporting an error. Typically, the application will use - this information for reporting its own errors (such as - character content that does not match an application's - business rules). The information returned by the locator is - probably not sufficient for use with a search engine. - - Note that the locator will return correct information only - during the invocation of the events in this interface. The - application should not attempt to use it at any other time.""" - self._locator = locator - - def startDocument(self): - """Receive notification of the beginning of a document. - - The SAX parser will invoke this method only once, before any - other methods in this interface or in DTDHandler (except for - setDocumentLocator).""" - - def endDocument(self): - """Receive notification of the end of a document. - - The SAX parser will invoke this method only once, and it will - be the last method invoked during the parse. The parser shall - not invoke this method until it has either abandoned parsing - (because of an unrecoverable error) or reached the end of - input.""" - - def startPrefixMapping(self, prefix, uri): - """Begin the scope of a prefix-URI Namespace mapping. - - The information from this event is not necessary for normal - Namespace processing: the SAX XML reader will automatically - replace prefixes for element and attribute names when the - http://xml.org/sax/features/namespaces feature is true (the - default). - - There are cases, however, when applications need to use - prefixes in character data or in attribute values, where they - cannot safely be expanded automatically; the - start/endPrefixMapping event supplies the information to the - application to expand prefixes in those contexts itself, if - necessary. - - Note that start/endPrefixMapping events are not guaranteed to - be properly nested relative to each-other: all - startPrefixMapping events will occur before the corresponding - startElement event, and all endPrefixMapping events will occur - after the corresponding endElement event, but their order is - not guaranteed.""" - - def endPrefixMapping(self, prefix): - """End the scope of a prefix-URI mapping. - - See startPrefixMapping for details. This event will always - occur after the corresponding endElement event, but the order - of endPrefixMapping events is not otherwise guaranteed.""" - - def startElement(self, name, attrs): - """Signals the start of an element in non-namespace mode. - - The name parameter contains the raw XML 1.0 name of the - element type as a string and the attrs parameter holds an - instance of the Attributes class containing the attributes of - the element.""" - - def endElement(self, name): - """Signals the end of an element in non-namespace mode. - - The name parameter contains the name of the element type, just - as with the startElement event.""" - - def startElementNS(self, name, qname, attrs): - """Signals the start of an element in namespace mode. - - The name parameter contains the name of the element type as a - (uri, localname) tuple, the qname parameter the raw XML 1.0 - name used in the source document, and the attrs parameter - holds an instance of the Attributes class containing the - attributes of the element. - - The uri part of the name tuple is None for elements which have - no namespace.""" - - def endElementNS(self, name, qname): - """Signals the end of an element in namespace mode. - - The name parameter contains the name of the element type, just - as with the startElementNS event.""" - - def characters(self, content): - """Receive notification of character data. - - The Parser will call this method to report each chunk of - character data. SAX parsers may return all contiguous - character data in a single chunk, or they may split it into - several chunks; however, all of the characters in any single - event must come from the same external entity so that the - Locator provides useful information.""" - - def ignorableWhitespace(self, whitespace): - """Receive notification of ignorable whitespace in element content. - - Validating Parsers must use this method to report each chunk - of ignorable whitespace (see the W3C XML 1.0 recommendation, - section 2.10): non-validating parsers may also use this method - if they are capable of parsing and using content models. - - SAX parsers may return all contiguous whitespace in a single - chunk, or they may split it into several chunks; however, all - of the characters in any single event must come from the same - external entity, so that the Locator provides useful - information.""" - - def processingInstruction(self, target, data): - """Receive notification of a processing instruction. - - The Parser will invoke this method once for each processing - instruction found: note that processing instructions may occur - before or after the main document element. - - A SAX parser should never report an XML declaration (XML 1.0, - section 2.8) or a text declaration (XML 1.0, section 4.3.1) - using this method.""" - - def skippedEntity(self, name): - """Receive notification of a skipped entity. - - The Parser will invoke this method once for each entity - skipped. Non-validating processors may skip entities if they - have not seen the declarations (because, for example, the - entity was declared in an external DTD subset). All processors - may skip external entities, depending on the values of the - http://xml.org/sax/features/external-general-entities and the - http://xml.org/sax/features/external-parameter-entities - properties.""" - - -# ===== DTDHandler ===== - -class DTDHandler: - """Handle DTD events. - - This interface specifies only those DTD events required for basic - parsing (unparsed entities and attributes).""" - - def notationDecl(self, name, publicId, systemId): - "Handle a notation declaration event." - - def unparsedEntityDecl(self, name, publicId, systemId, ndata): - "Handle an unparsed entity declaration event." - - -# ===== ENTITYRESOLVER ===== - -class EntityResolver: - """Basic interface for resolving entities. If you create an object - implementing this interface, then register the object with your - Parser, the parser will call the method in your object to - resolve all external entities. Note that DefaultHandler implements - this interface with the default behaviour.""" - - def resolveEntity(self, publicId, systemId): - """Resolve the system identifier of an entity and return either - the system identifier to read from as a string, or an InputSource - to read from.""" - return systemId - - -#============================================================================ -# -# CORE FEATURES -# -#============================================================================ - -feature_namespaces = "http://xml.org/sax/features/namespaces" -# true: Perform Namespace processing (default). -# false: Optionally do not perform Namespace processing -# (implies namespace-prefixes). -# access: (parsing) read-only; (not parsing) read/write - -feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes" -# true: Report the original prefixed names and attributes used for Namespace -# declarations. -# false: Do not report attributes used for Namespace declarations, and -# optionally do not report original prefixed names (default). -# access: (parsing) read-only; (not parsing) read/write - -feature_string_interning = "http://xml.org/sax/features/string-interning" -# true: All element names, prefixes, attribute names, Namespace URIs, and -# local names are interned using the built-in intern function. -# false: Names are not necessarily interned, although they may be (default). -# access: (parsing) read-only; (not parsing) read/write - -feature_validation = "http://xml.org/sax/features/validation" -# true: Report all validation errors (implies external-general-entities and -# external-parameter-entities). -# false: Do not report validation errors. -# access: (parsing) read-only; (not parsing) read/write - -feature_external_ges = "http://xml.org/sax/features/external-general-entities" -# true: Include all external general (text) entities. -# false: Do not include external general entities. -# access: (parsing) read-only; (not parsing) read/write - -feature_external_pes = "http://xml.org/sax/features/external-parameter-entities" -# true: Include all external parameter entities, including the external -# DTD subset. -# false: Do not include any external parameter entities, even the external -# DTD subset. -# access: (parsing) read-only; (not parsing) read/write - -all_features = [feature_namespaces, - feature_namespace_prefixes, - feature_string_interning, - feature_validation, - feature_external_ges, - feature_external_pes] - - -#============================================================================ -# -# CORE PROPERTIES -# -#============================================================================ - -property_lexical_handler = "http://xml.org/sax/properties/lexical-handler" -# data type: xml.sax.sax2lib.LexicalHandler -# description: An optional extension handler for lexical events like comments. -# access: read/write - -property_declaration_handler = "http://xml.org/sax/properties/declaration-handler" -# data type: xml.sax.sax2lib.DeclHandler -# description: An optional extension handler for DTD-related events other -# than notations and unparsed entities. -# access: read/write - -property_dom_node = "http://xml.org/sax/properties/dom-node" -# data type: org.w3c.dom.Node -# description: When parsing, the current DOM node being visited if this is -# a DOM iterator; when not parsing, the root DOM node for -# iteration. -# access: (parsing) read-only; (not parsing) read/write - -property_xml_string = "http://xml.org/sax/properties/xml-string" -# data type: String -# description: The literal string of characters that was the source for -# the current event. -# access: read-only - -property_encoding = "http://www.python.org/sax/properties/encoding" -# data type: String -# description: The name of the encoding to assume for input data. -# access: write: set the encoding, e.g. established by a higher-level -# protocol. May change during parsing (e.g. after -# processing a META tag) -# read: return the current encoding (possibly established through -# auto-detection. -# initial value: UTF-8 -# - -property_interning_dict = "http://www.python.org/sax/properties/interning-dict" -# data type: Dictionary -# description: The dictionary used to intern common strings in the document -# access: write: Request that the parser uses a specific dictionary, to -# allow interning across different documents -# read: return the current interning dictionary, or None -# - -all_properties = [property_lexical_handler, - property_dom_node, - property_declaration_handler, - property_xml_string, - property_encoding, - property_interning_dict] - - -class LexicalHandler: - """Optional SAX2 handler for lexical events. - - This handler is used to obtain lexical information about an XML - document, that is, information about how the document was encoded - (as opposed to what it contains, which is reported to the - ContentHandler), such as comments and CDATA marked section - boundaries. - - To set the LexicalHandler of an XMLReader, use the setProperty - method with the property identifier - 'http://xml.org/sax/properties/lexical-handler'.""" - - def comment(self, content): - """Reports a comment anywhere in the document (including the - DTD and outside the document element). - - content is a string that holds the contents of the comment.""" - - def startDTD(self, name, public_id, system_id): - """Report the start of the DTD declarations, if the document - has an associated DTD. - - A startEntity event will be reported before declaration events - from the external DTD subset are reported, and this can be - used to infer from which subset DTD declarations derive. - - name is the name of the document element type, public_id the - public identifier of the DTD (or None if none were supplied) - and system_id the system identfier of the external subset (or - None if none were supplied).""" - - def endDTD(self): - """Signals the end of DTD declarations.""" - - def startCDATA(self): - """Reports the beginning of a CDATA marked section. - - The contents of the CDATA marked section will be reported - through the characters event.""" - - def endCDATA(self): - """Reports the end of a CDATA marked section.""" diff --git a/Python313_13_x86_Template/Lib/xmlrpc/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/xmlrpc/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 730a86da..00000000 Binary files a/Python313_13_x86_Template/Lib/xmlrpc/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/xmlrpc/__pycache__/client.cpython-313.pyc b/Python313_13_x86_Template/Lib/xmlrpc/__pycache__/client.cpython-313.pyc deleted file mode 100644 index a907d4d3..00000000 Binary files a/Python313_13_x86_Template/Lib/xmlrpc/__pycache__/client.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/xmlrpc/server.py b/Python313_13_x86_Template/Lib/xmlrpc/server.py deleted file mode 100644 index 4dddb1d1..00000000 --- a/Python313_13_x86_Template/Lib/xmlrpc/server.py +++ /dev/null @@ -1,1002 +0,0 @@ -r"""XML-RPC Servers. - -This module can be used to create simple XML-RPC servers -by creating a server and either installing functions, a -class instance, or by extending the SimpleXMLRPCServer -class. - -It can also be used to handle XML-RPC requests in a CGI -environment using CGIXMLRPCRequestHandler. - -The Doc* classes can be used to create XML-RPC servers that -serve pydoc-style documentation in response to HTTP -GET requests. This documentation is dynamically generated -based on the functions and methods registered with the -server. - -A list of possible usage patterns follows: - -1. Install functions: - -server = SimpleXMLRPCServer(("localhost", 8000)) -server.register_function(pow) -server.register_function(lambda x,y: x+y, 'add') -server.serve_forever() - -2. Install an instance: - -class MyFuncs: - def __init__(self): - # make all of the sys functions available through sys.func_name - import sys - self.sys = sys - def _listMethods(self): - # implement this method so that system.listMethods - # knows to advertise the sys methods - return list_public_methods(self) + \ - ['sys.' + method for method in list_public_methods(self.sys)] - def pow(self, x, y): return pow(x, y) - def add(self, x, y) : return x + y - -server = SimpleXMLRPCServer(("localhost", 8000)) -server.register_introspection_functions() -server.register_instance(MyFuncs()) -server.serve_forever() - -3. Install an instance with custom dispatch method: - -class Math: - def _listMethods(self): - # this method must be present for system.listMethods - # to work - return ['add', 'pow'] - def _methodHelp(self, method): - # this method must be present for system.methodHelp - # to work - if method == 'add': - return "add(2,3) => 5" - elif method == 'pow': - return "pow(x, y[, z]) => number" - else: - # By convention, return empty - # string if no help is available - return "" - def _dispatch(self, method, params): - if method == 'pow': - return pow(*params) - elif method == 'add': - return params[0] + params[1] - else: - raise ValueError('bad method') - -server = SimpleXMLRPCServer(("localhost", 8000)) -server.register_introspection_functions() -server.register_instance(Math()) -server.serve_forever() - -4. Subclass SimpleXMLRPCServer: - -class MathServer(SimpleXMLRPCServer): - def _dispatch(self, method, params): - try: - # We are forcing the 'export_' prefix on methods that are - # callable through XML-RPC to prevent potential security - # problems - func = getattr(self, 'export_' + method) - except AttributeError: - raise Exception('method "%s" is not supported' % method) - else: - return func(*params) - - def export_add(self, x, y): - return x + y - -server = MathServer(("localhost", 8000)) -server.serve_forever() - -5. CGI script: - -server = CGIXMLRPCRequestHandler() -server.register_function(pow) -server.handle_request() -""" - -# Written by Brian Quinlan (brian@sweetapp.com). -# Based on code written by Fredrik Lundh. - -from xmlrpc.client import Fault, dumps, loads, gzip_encode, gzip_decode -from http.server import BaseHTTPRequestHandler -from functools import partial -from inspect import signature -import html -import http.server -import socketserver -import sys -import os -import re -import pydoc -import traceback -try: - import fcntl -except ImportError: - fcntl = None - -def resolve_dotted_attribute(obj, attr, allow_dotted_names=True): - """resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d - - Resolves a dotted attribute name to an object. Raises - an AttributeError if any attribute in the chain starts with a '_'. - - If the optional allow_dotted_names argument is false, dots are not - supported and this function operates similar to getattr(obj, attr). - """ - - if allow_dotted_names: - attrs = attr.split('.') - else: - attrs = [attr] - - for i in attrs: - if i.startswith('_'): - raise AttributeError( - 'attempt to access private attribute "%s"' % i - ) - else: - obj = getattr(obj,i) - return obj - -def list_public_methods(obj): - """Returns a list of attribute strings, found in the specified - object, which represent callable attributes""" - - return [member for member in dir(obj) - if not member.startswith('_') and - callable(getattr(obj, member))] - -class SimpleXMLRPCDispatcher: - """Mix-in class that dispatches XML-RPC requests. - - This class is used to register XML-RPC method handlers - and then to dispatch them. This class doesn't need to be - instanced directly when used by SimpleXMLRPCServer but it - can be instanced when used by the MultiPathXMLRPCServer - """ - - def __init__(self, allow_none=False, encoding=None, - use_builtin_types=False): - self.funcs = {} - self.instance = None - self.allow_none = allow_none - self.encoding = encoding or 'utf-8' - self.use_builtin_types = use_builtin_types - - def register_instance(self, instance, allow_dotted_names=False): - """Registers an instance to respond to XML-RPC requests. - - Only one instance can be installed at a time. - - If the registered instance has a _dispatch method then that - method will be called with the name of the XML-RPC method and - its parameters as a tuple - e.g. instance._dispatch('add',(2,3)) - - If the registered instance does not have a _dispatch method - then the instance will be searched to find a matching method - and, if found, will be called. Methods beginning with an '_' - are considered private and will not be called by - SimpleXMLRPCServer. - - If a registered function matches an XML-RPC request, then it - will be called instead of the registered instance. - - If the optional allow_dotted_names argument is true and the - instance does not have a _dispatch method, method names - containing dots are supported and resolved, as long as none of - the name segments start with an '_'. - - *** SECURITY WARNING: *** - - Enabling the allow_dotted_names options allows intruders - to access your module's global variables and may allow - intruders to execute arbitrary code on your machine. Only - use this option on a secure, closed network. - - """ - - self.instance = instance - self.allow_dotted_names = allow_dotted_names - - def register_function(self, function=None, name=None): - """Registers a function to respond to XML-RPC requests. - - The optional name argument can be used to set a Unicode name - for the function. - """ - # decorator factory - if function is None: - return partial(self.register_function, name=name) - - if name is None: - name = function.__name__ - self.funcs[name] = function - - return function - - def register_introspection_functions(self): - """Registers the XML-RPC introspection methods in the system - namespace. - - see http://xmlrpc.usefulinc.com/doc/reserved.html - """ - - self.funcs.update({'system.listMethods' : self.system_listMethods, - 'system.methodSignature' : self.system_methodSignature, - 'system.methodHelp' : self.system_methodHelp}) - - def register_multicall_functions(self): - """Registers the XML-RPC multicall method in the system - namespace. - - see http://www.xmlrpc.com/discuss/msgReader$1208""" - - self.funcs.update({'system.multicall' : self.system_multicall}) - - def _marshaled_dispatch(self, data, dispatch_method = None, path = None): - """Dispatches an XML-RPC method from marshalled (XML) data. - - XML-RPC methods are dispatched from the marshalled (XML) data - using the _dispatch method and the result is returned as - marshalled data. For backwards compatibility, a dispatch - function can be provided as an argument (see comment in - SimpleXMLRPCRequestHandler.do_POST) but overriding the - existing method through subclassing is the preferred means - of changing method dispatch behavior. - """ - - try: - params, method = loads(data, use_builtin_types=self.use_builtin_types) - - # generate response - if dispatch_method is not None: - response = dispatch_method(method, params) - else: - response = self._dispatch(method, params) - # wrap response in a singleton tuple - response = (response,) - response = dumps(response, methodresponse=1, - allow_none=self.allow_none, encoding=self.encoding) - except Fault as fault: - response = dumps(fault, allow_none=self.allow_none, - encoding=self.encoding) - except BaseException as exc: - response = dumps( - Fault(1, "%s:%s" % (type(exc), exc)), - encoding=self.encoding, allow_none=self.allow_none, - ) - - return response.encode(self.encoding, 'xmlcharrefreplace') - - def system_listMethods(self): - """system.listMethods() => ['add', 'subtract', 'multiple'] - - Returns a list of the methods supported by the server.""" - - methods = set(self.funcs.keys()) - if self.instance is not None: - # Instance can implement _listMethod to return a list of - # methods - if hasattr(self.instance, '_listMethods'): - methods |= set(self.instance._listMethods()) - # if the instance has a _dispatch method then we - # don't have enough information to provide a list - # of methods - elif not hasattr(self.instance, '_dispatch'): - methods |= set(list_public_methods(self.instance)) - return sorted(methods) - - def system_methodSignature(self, method_name): - """system.methodSignature('add') => [double, int, int] - - Returns a list describing the signature of the method. In the - above example, the add method takes two integers as arguments - and returns a double result. - - This server does NOT support system.methodSignature.""" - - # See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html - - return 'signatures not supported' - - def system_methodHelp(self, method_name): - """system.methodHelp('add') => "Adds two integers together" - - Returns a string containing documentation for the specified method.""" - - method = None - if method_name in self.funcs: - method = self.funcs[method_name] - elif self.instance is not None: - # Instance can implement _methodHelp to return help for a method - if hasattr(self.instance, '_methodHelp'): - return self.instance._methodHelp(method_name) - # if the instance has a _dispatch method then we - # don't have enough information to provide help - elif not hasattr(self.instance, '_dispatch'): - try: - method = resolve_dotted_attribute( - self.instance, - method_name, - self.allow_dotted_names - ) - except AttributeError: - pass - - # Note that we aren't checking that the method actually - # be a callable object of some kind - if method is None: - return "" - else: - return pydoc.getdoc(method) - - def system_multicall(self, call_list): - """system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \ -[[4], ...] - - Allows the caller to package multiple XML-RPC calls into a single - request. - - See http://www.xmlrpc.com/discuss/msgReader$1208 - """ - - results = [] - for call in call_list: - method_name = call['methodName'] - params = call['params'] - - try: - # XXX A marshalling error in any response will fail the entire - # multicall. If someone cares they should fix this. - results.append([self._dispatch(method_name, params)]) - except Fault as fault: - results.append( - {'faultCode' : fault.faultCode, - 'faultString' : fault.faultString} - ) - except BaseException as exc: - results.append( - {'faultCode' : 1, - 'faultString' : "%s:%s" % (type(exc), exc)} - ) - return results - - def _dispatch(self, method, params): - """Dispatches the XML-RPC method. - - XML-RPC calls are forwarded to a registered function that - matches the called XML-RPC method name. If no such function - exists then the call is forwarded to the registered instance, - if available. - - If the registered instance has a _dispatch method then that - method will be called with the name of the XML-RPC method and - its parameters as a tuple - e.g. instance._dispatch('add',(2,3)) - - If the registered instance does not have a _dispatch method - then the instance will be searched to find a matching method - and, if found, will be called. - - Methods beginning with an '_' are considered private and will - not be called. - """ - - try: - # call the matching registered function - func = self.funcs[method] - except KeyError: - pass - else: - if func is not None: - return func(*params) - raise Exception('method "%s" is not supported' % method) - - if self.instance is not None: - if hasattr(self.instance, '_dispatch'): - # call the `_dispatch` method on the instance - return self.instance._dispatch(method, params) - - # call the instance's method directly - try: - func = resolve_dotted_attribute( - self.instance, - method, - self.allow_dotted_names - ) - except AttributeError: - pass - else: - if func is not None: - return func(*params) - - raise Exception('method "%s" is not supported' % method) - -class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler): - """Simple XML-RPC request handler class. - - Handles all HTTP POST requests and attempts to decode them as - XML-RPC requests. - """ - - # Class attribute listing the accessible path components; - # paths not on this list will result in a 404 error. - rpc_paths = ('/', '/RPC2', '/pydoc.css') - - #if not None, encode responses larger than this, if possible - encode_threshold = 1400 #a common MTU - - #Override form StreamRequestHandler: full buffering of output - #and no Nagle. - wbufsize = -1 - disable_nagle_algorithm = True - - # a re to match a gzip Accept-Encoding - aepattern = re.compile(r""" - \s* ([^\s;]+) \s* #content-coding - (;\s* q \s*=\s* ([0-9\.]+))? #q - """, re.VERBOSE | re.IGNORECASE) - - def accept_encodings(self): - r = {} - ae = self.headers.get("Accept-Encoding", "") - for e in ae.split(","): - match = self.aepattern.match(e) - if match: - v = match.group(3) - v = float(v) if v else 1.0 - r[match.group(1)] = v - return r - - def is_rpc_path_valid(self): - if self.rpc_paths: - return self.path in self.rpc_paths - else: - # If .rpc_paths is empty, just assume all paths are legal - return True - - def do_POST(self): - """Handles the HTTP POST request. - - Attempts to interpret all HTTP POST requests as XML-RPC calls, - which are forwarded to the server's _dispatch method for handling. - """ - - # Check that the path is legal - if not self.is_rpc_path_valid(): - self.report_404() - return - - try: - # Get arguments by reading body of request. - # We read this in chunks to avoid straining - # socket.read(); around the 10 or 15Mb mark, some platforms - # begin to have problems (bug #792570). - max_chunk_size = 10*1024*1024 - size_remaining = int(self.headers["content-length"]) - L = [] - while size_remaining: - chunk_size = min(size_remaining, max_chunk_size) - chunk = self.rfile.read(chunk_size) - if not chunk: - break - L.append(chunk) - size_remaining -= len(L[-1]) - data = b''.join(L) - - data = self.decode_request_content(data) - if data is None: - return #response has been sent - - # In previous versions of SimpleXMLRPCServer, _dispatch - # could be overridden in this class, instead of in - # SimpleXMLRPCDispatcher. To maintain backwards compatibility, - # check to see if a subclass implements _dispatch and dispatch - # using that method if present. - response = self.server._marshaled_dispatch( - data, getattr(self, '_dispatch', None), self.path - ) - except Exception as e: # This should only happen if the module is buggy - # internal error, report as HTTP server error - self.send_response(500) - - # Send information about the exception if requested - if hasattr(self.server, '_send_traceback_header') and \ - self.server._send_traceback_header: - self.send_header("X-exception", str(e)) - trace = traceback.format_exc() - trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII') - self.send_header("X-traceback", trace) - - self.send_header("Content-length", "0") - self.end_headers() - else: - self.send_response(200) - self.send_header("Content-type", "text/xml") - if self.encode_threshold is not None: - if len(response) > self.encode_threshold: - q = self.accept_encodings().get("gzip", 0) - if q: - try: - response = gzip_encode(response) - self.send_header("Content-Encoding", "gzip") - except NotImplementedError: - pass - self.send_header("Content-length", str(len(response))) - self.end_headers() - self.wfile.write(response) - - def decode_request_content(self, data): - #support gzip encoding of request - encoding = self.headers.get("content-encoding", "identity").lower() - if encoding == "identity": - return data - if encoding == "gzip": - try: - return gzip_decode(data) - except NotImplementedError: - self.send_response(501, "encoding %r not supported" % encoding) - except ValueError: - self.send_response(400, "error decoding gzip content") - else: - self.send_response(501, "encoding %r not supported" % encoding) - self.send_header("Content-length", "0") - self.end_headers() - - def report_404 (self): - # Report a 404 error - self.send_response(404) - response = b'No such page' - self.send_header("Content-type", "text/plain") - self.send_header("Content-length", str(len(response))) - self.end_headers() - self.wfile.write(response) - - def log_request(self, code='-', size='-'): - """Selectively log an accepted request.""" - - if self.server.logRequests: - BaseHTTPRequestHandler.log_request(self, code, size) - -class SimpleXMLRPCServer(socketserver.TCPServer, - SimpleXMLRPCDispatcher): - """Simple XML-RPC server. - - Simple XML-RPC server that allows functions and a single instance - to be installed to handle requests. The default implementation - attempts to dispatch XML-RPC calls to the functions or instance - installed in the server. Override the _dispatch method inherited - from SimpleXMLRPCDispatcher to change this behavior. - """ - - allow_reuse_address = True - - # Warning: this is for debugging purposes only! Never set this to True in - # production code, as will be sending out sensitive information (exception - # and stack trace details) when exceptions are raised inside - # SimpleXMLRPCRequestHandler.do_POST - _send_traceback_header = False - - def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, - logRequests=True, allow_none=False, encoding=None, - bind_and_activate=True, use_builtin_types=False): - self.logRequests = logRequests - - SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types) - socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate) - - -class MultiPathXMLRPCServer(SimpleXMLRPCServer): - """Multipath XML-RPC Server - This specialization of SimpleXMLRPCServer allows the user to create - multiple Dispatcher instances and assign them to different - HTTP request paths. This makes it possible to run two or more - 'virtual XML-RPC servers' at the same port. - Make sure that the requestHandler accepts the paths in question. - """ - def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, - logRequests=True, allow_none=False, encoding=None, - bind_and_activate=True, use_builtin_types=False): - - SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none, - encoding, bind_and_activate, use_builtin_types) - self.dispatchers = {} - self.allow_none = allow_none - self.encoding = encoding or 'utf-8' - - def add_dispatcher(self, path, dispatcher): - self.dispatchers[path] = dispatcher - return dispatcher - - def get_dispatcher(self, path): - return self.dispatchers[path] - - def _marshaled_dispatch(self, data, dispatch_method = None, path = None): - try: - response = self.dispatchers[path]._marshaled_dispatch( - data, dispatch_method, path) - except BaseException as exc: - # report low level exception back to server - # (each dispatcher should have handled their own - # exceptions) - response = dumps( - Fault(1, "%s:%s" % (type(exc), exc)), - encoding=self.encoding, allow_none=self.allow_none) - response = response.encode(self.encoding, 'xmlcharrefreplace') - return response - -class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher): - """Simple handler for XML-RPC data passed through CGI.""" - - def __init__(self, allow_none=False, encoding=None, use_builtin_types=False): - SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types) - - def handle_xmlrpc(self, request_text): - """Handle a single XML-RPC request""" - - response = self._marshaled_dispatch(request_text) - - print('Content-Type: text/xml') - print('Content-Length: %d' % len(response)) - print() - sys.stdout.flush() - sys.stdout.buffer.write(response) - sys.stdout.buffer.flush() - - def handle_get(self): - """Handle a single HTTP GET request. - - Default implementation indicates an error because - XML-RPC uses the POST method. - """ - - code = 400 - message, explain = BaseHTTPRequestHandler.responses[code] - - response = http.server.DEFAULT_ERROR_MESSAGE % \ - { - 'code' : code, - 'message' : message, - 'explain' : explain - } - response = response.encode('utf-8') - print('Status: %d %s' % (code, message)) - print('Content-Type: %s' % http.server.DEFAULT_ERROR_CONTENT_TYPE) - print('Content-Length: %d' % len(response)) - print() - sys.stdout.flush() - sys.stdout.buffer.write(response) - sys.stdout.buffer.flush() - - def handle_request(self, request_text=None): - """Handle a single XML-RPC request passed through a CGI post method. - - If no XML data is given then it is read from stdin. The resulting - XML-RPC response is printed to stdout along with the correct HTTP - headers. - """ - - if request_text is None and \ - os.environ.get('REQUEST_METHOD', None) == 'GET': - self.handle_get() - else: - # POST data is normally available through stdin - try: - length = int(os.environ.get('CONTENT_LENGTH', None)) - except (ValueError, TypeError): - length = -1 - if request_text is None: - request_text = sys.stdin.read(length) - - self.handle_xmlrpc(request_text) - - -# ----------------------------------------------------------------------------- -# Self documenting XML-RPC Server. - -class ServerHTMLDoc(pydoc.HTMLDoc): - """Class used to generate pydoc HTML document for a server""" - - def markup(self, text, escape=None, funcs={}, classes={}, methods={}): - """Mark up some plain text, given a context of symbols to look for. - Each context dictionary maps object names to anchor names.""" - escape = escape or self.escape - results = [] - here = 0 - - # XXX Note that this regular expression does not allow for the - # hyperlinking of arbitrary strings being used as method - # names. Only methods with names consisting of word characters - # and '.'s are hyperlinked. - pattern = re.compile(r'\b((http|https|ftp)://\S+[\w/]|' - r'RFC[- ]?(\d+)|' - r'PEP[- ]?(\d+)|' - r'(self\.)?((?:\w|\.)+))\b') - while match := pattern.search(text, here): - start, end = match.span() - results.append(escape(text[here:start])) - - all, scheme, rfc, pep, selfdot, name = match.groups() - if scheme: - url = escape(all).replace('"', '"') - results.append('%s' % (url, url)) - elif rfc: - url = 'https://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc) - results.append('%s' % (url, escape(all))) - elif pep: - url = 'https://peps.python.org/pep-%04d/' % int(pep) - results.append('%s' % (url, escape(all))) - elif text[end:end+1] == '(': - results.append(self.namelink(name, methods, funcs, classes)) - elif selfdot: - results.append('self.%s' % name) - else: - results.append(self.namelink(name, classes)) - here = end - results.append(escape(text[here:])) - return ''.join(results) - - def docroutine(self, object, name, mod=None, - funcs={}, classes={}, methods={}, cl=None): - """Produce HTML documentation for a function or method object.""" - - anchor = (cl and cl.__name__ or '') + '-' + name - note = '' - - title = '%s' % ( - self.escape(anchor), self.escape(name)) - - if callable(object): - argspec = str(signature(object)) - else: - argspec = '(...)' - - if isinstance(object, tuple): - argspec = object[0] or argspec - docstring = object[1] or "" - else: - docstring = pydoc.getdoc(object) - - decl = title + argspec + (note and self.grey( - '%s' % note)) - - doc = self.markup( - docstring, self.preformat, funcs, classes, methods) - doc = doc and '
%s
' % doc - return '
%s
%s
\n' % (decl, doc) - - def docserver(self, server_name, package_documentation, methods): - """Produce HTML documentation for an XML-RPC server.""" - - fdict = {} - for key, value in methods.items(): - fdict[key] = '#-' + key - fdict[value] = fdict[key] - - server_name = self.escape(server_name) - head = '%s' % server_name - result = self.heading(head) - - doc = self.markup(package_documentation, self.preformat, fdict) - doc = doc and '%s' % doc - result = result + '

%s

\n' % doc - - contents = [] - method_items = sorted(methods.items()) - for key, value in method_items: - contents.append(self.docroutine(value, key, funcs=fdict)) - result = result + self.bigsection( - 'Methods', 'functions', ''.join(contents)) - - return result - - - def page(self, title, contents): - """Format an HTML page.""" - css_path = "/pydoc.css" - css_link = ( - '' % - css_path) - return '''\ - - - - -Python: %s -%s%s''' % (title, css_link, contents) - -class XMLRPCDocGenerator: - """Generates documentation for an XML-RPC server. - - This class is designed as mix-in and should not - be constructed directly. - """ - - def __init__(self): - # setup variables used for HTML documentation - self.server_name = 'XML-RPC Server Documentation' - self.server_documentation = \ - "This server exports the following methods through the XML-RPC "\ - "protocol." - self.server_title = 'XML-RPC Server Documentation' - - def set_server_title(self, server_title): - """Set the HTML title of the generated server documentation""" - - self.server_title = server_title - - def set_server_name(self, server_name): - """Set the name of the generated HTML server documentation""" - - self.server_name = server_name - - def set_server_documentation(self, server_documentation): - """Set the documentation string for the entire server.""" - - self.server_documentation = server_documentation - - def generate_html_documentation(self): - """generate_html_documentation() => html documentation for the server - - Generates HTML documentation for the server using introspection for - installed functions and instances that do not implement the - _dispatch method. Alternatively, instances can choose to implement - the _get_method_argstring(method_name) method to provide the - argument string used in the documentation and the - _methodHelp(method_name) method to provide the help text used - in the documentation.""" - - methods = {} - - for method_name in self.system_listMethods(): - if method_name in self.funcs: - method = self.funcs[method_name] - elif self.instance is not None: - method_info = [None, None] # argspec, documentation - if hasattr(self.instance, '_get_method_argstring'): - method_info[0] = self.instance._get_method_argstring(method_name) - if hasattr(self.instance, '_methodHelp'): - method_info[1] = self.instance._methodHelp(method_name) - - method_info = tuple(method_info) - if method_info != (None, None): - method = method_info - elif not hasattr(self.instance, '_dispatch'): - try: - method = resolve_dotted_attribute( - self.instance, - method_name - ) - except AttributeError: - method = method_info - else: - method = method_info - else: - assert 0, "Could not find method in self.functions and no "\ - "instance installed" - - methods[method_name] = method - - documenter = ServerHTMLDoc() - documentation = documenter.docserver( - self.server_name, - self.server_documentation, - methods - ) - - return documenter.page(html.escape(self.server_title), documentation) - -class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler): - """XML-RPC and documentation request handler class. - - Handles all HTTP POST requests and attempts to decode them as - XML-RPC requests. - - Handles all HTTP GET requests and interprets them as requests - for documentation. - """ - - def _get_css(self, url): - path_here = os.path.dirname(os.path.realpath(__file__)) - css_path = os.path.join(path_here, "..", "pydoc_data", "_pydoc.css") - with open(css_path, mode="rb") as fp: - return fp.read() - - def do_GET(self): - """Handles the HTTP GET request. - - Interpret all HTTP GET requests as requests for server - documentation. - """ - # Check that the path is legal - if not self.is_rpc_path_valid(): - self.report_404() - return - - if self.path.endswith('.css'): - content_type = 'text/css' - response = self._get_css(self.path) - else: - content_type = 'text/html' - response = self.server.generate_html_documentation().encode('utf-8') - - self.send_response(200) - self.send_header('Content-Type', '%s; charset=UTF-8' % content_type) - self.send_header("Content-length", str(len(response))) - self.end_headers() - self.wfile.write(response) - -class DocXMLRPCServer( SimpleXMLRPCServer, - XMLRPCDocGenerator): - """XML-RPC and HTML documentation server. - - Adds the ability to serve server documentation to the capabilities - of SimpleXMLRPCServer. - """ - - def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler, - logRequests=True, allow_none=False, encoding=None, - bind_and_activate=True, use_builtin_types=False): - SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, - allow_none, encoding, bind_and_activate, - use_builtin_types) - XMLRPCDocGenerator.__init__(self) - -class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler, - XMLRPCDocGenerator): - """Handler for XML-RPC data and documentation requests passed through - CGI""" - - def handle_get(self): - """Handles the HTTP GET request. - - Interpret all HTTP GET requests as requests for server - documentation. - """ - - response = self.generate_html_documentation().encode('utf-8') - - print('Content-Type: text/html') - print('Content-Length: %d' % len(response)) - print() - sys.stdout.flush() - sys.stdout.buffer.write(response) - sys.stdout.buffer.flush() - - def __init__(self): - CGIXMLRPCRequestHandler.__init__(self) - XMLRPCDocGenerator.__init__(self) - - -if __name__ == '__main__': - import datetime - - class ExampleService: - def getData(self): - return '42' - - class currentTime: - @staticmethod - def getCurrentTime(): - return datetime.datetime.now() - - with SimpleXMLRPCServer(("localhost", 8000)) as server: - server.register_function(pow) - server.register_function(lambda x,y: x+y, 'add') - server.register_instance(ExampleService(), allow_dotted_names=True) - server.register_multicall_functions() - print('Serving XML-RPC on localhost port 8000') - print('It is advisable to run this example server within a secure, closed network.') - try: - server.serve_forever() - except KeyboardInterrupt: - print("\nKeyboard interrupt received, exiting.") - sys.exit(0) diff --git a/Python313_13_x86_Template/Lib/zipapp.py b/Python313_13_x86_Template/Lib/zipapp.py deleted file mode 100644 index 4ffacc49..00000000 --- a/Python313_13_x86_Template/Lib/zipapp.py +++ /dev/null @@ -1,229 +0,0 @@ -import contextlib -import os -import pathlib -import shutil -import stat -import sys -import zipfile - -__all__ = ['ZipAppError', 'create_archive', 'get_interpreter'] - - -# The __main__.py used if the users specifies "-m module:fn". -# Note that this will always be written as UTF-8 (module and -# function names can be non-ASCII in Python 3). -# We add a coding cookie even though UTF-8 is the default in Python 3 -# because the resulting archive may be intended to be run under Python 2. -MAIN_TEMPLATE = """\ -# -*- coding: utf-8 -*- -import {module} -{module}.{fn}() -""" - - -# The Windows launcher defaults to UTF-8 when parsing shebang lines if the -# file has no BOM. So use UTF-8 on Windows. -# On Unix, use the filesystem encoding. -if sys.platform.startswith('win'): - shebang_encoding = 'utf-8' -else: - shebang_encoding = sys.getfilesystemencoding() - - -class ZipAppError(ValueError): - pass - - -@contextlib.contextmanager -def _maybe_open(archive, mode): - if isinstance(archive, (str, os.PathLike)): - with open(archive, mode) as f: - yield f - else: - yield archive - - -def _write_file_prefix(f, interpreter): - """Write a shebang line.""" - if interpreter: - shebang = b'#!' + interpreter.encode(shebang_encoding) + b'\n' - f.write(shebang) - - -def _copy_archive(archive, new_archive, interpreter=None): - """Copy an application archive, modifying the shebang line.""" - with _maybe_open(archive, 'rb') as src: - # Skip the shebang line from the source. - # Read 2 bytes of the source and check if they are #!. - first_2 = src.read(2) - if first_2 == b'#!': - # Discard the initial 2 bytes and the rest of the shebang line. - first_2 = b'' - src.readline() - - with _maybe_open(new_archive, 'wb') as dst: - _write_file_prefix(dst, interpreter) - # If there was no shebang, "first_2" contains the first 2 bytes - # of the source file, so write them before copying the rest - # of the file. - dst.write(first_2) - shutil.copyfileobj(src, dst) - - if interpreter and isinstance(new_archive, str): - os.chmod(new_archive, os.stat(new_archive).st_mode | stat.S_IEXEC) - - -def create_archive(source, target=None, interpreter=None, main=None, - filter=None, compressed=False): - """Create an application archive from SOURCE. - - The SOURCE can be the name of a directory, or a filename or a file-like - object referring to an existing archive. - - The content of SOURCE is packed into an application archive in TARGET, - which can be a filename or a file-like object. If SOURCE is a directory, - TARGET can be omitted and will default to the name of SOURCE with .pyz - appended. - - The created application archive will have a shebang line specifying - that it should run with INTERPRETER (there will be no shebang line if - INTERPRETER is None), and a __main__.py which runs MAIN (if MAIN is - not specified, an existing __main__.py will be used). It is an error - to specify MAIN for anything other than a directory source with no - __main__.py, and it is an error to omit MAIN if the directory has no - __main__.py. - """ - # Are we copying an existing archive? - source_is_file = False - if hasattr(source, 'read') and hasattr(source, 'readline'): - source_is_file = True - else: - source = pathlib.Path(source) - if source.is_file(): - source_is_file = True - - if source_is_file: - _copy_archive(source, target, interpreter) - return - - # We are creating a new archive from a directory. - if not source.exists(): - raise ZipAppError("Source does not exist") - has_main = (source / '__main__.py').is_file() - if main and has_main: - raise ZipAppError( - "Cannot specify entry point if the source has __main__.py") - if not (main or has_main): - raise ZipAppError("Archive has no entry point") - - main_py = None - if main: - # Check that main has the right format. - mod, sep, fn = main.partition(':') - mod_ok = all(part.isidentifier() for part in mod.split('.')) - fn_ok = all(part.isidentifier() for part in fn.split('.')) - if not (sep == ':' and mod_ok and fn_ok): - raise ZipAppError("Invalid entry point: " + main) - main_py = MAIN_TEMPLATE.format(module=mod, fn=fn) - - if target is None: - target = source.with_suffix('.pyz') - elif not hasattr(target, 'write'): - target = pathlib.Path(target) - - # Create the list of files to add to the archive now, in case - # the target is being created in the source directory - we - # don't want the target being added to itself - files_to_add = sorted(source.rglob('*')) - - # The target cannot be in the list of files to add. If it were, we'd - # end up overwriting the source file and writing the archive into - # itself, which is an error. We therefore check for that case and - # provide a helpful message for the user. - - # Note that we only do a simple path equality check. This won't - # catch every case, but it will catch the common case where the - # source is the CWD and the target is a file in the CWD. More - # thorough checks don't provide enough value to justify the extra - # cost. - - # If target is a file-like object, it will simply fail to compare - # equal to any of the entries in files_to_add, so there's no need - # to add a special check for that. - if target in files_to_add: - raise ZipAppError( - f"The target archive {target} overwrites one of the source files.") - - with _maybe_open(target, 'wb') as fd: - _write_file_prefix(fd, interpreter) - compression = (zipfile.ZIP_DEFLATED if compressed else - zipfile.ZIP_STORED) - with zipfile.ZipFile(fd, 'w', compression=compression) as z: - for child in files_to_add: - arcname = child.relative_to(source) - if filter is None or filter(arcname): - z.write(child, arcname.as_posix()) - if main_py: - z.writestr('__main__.py', main_py.encode('utf-8')) - - if interpreter and not hasattr(target, 'write'): - target.chmod(target.stat().st_mode | stat.S_IEXEC) - - -def get_interpreter(archive): - with _maybe_open(archive, 'rb') as f: - if f.read(2) == b'#!': - return f.readline().strip().decode(shebang_encoding) - - -def main(args=None): - """Run the zipapp command line interface. - - The ARGS parameter lets you specify the argument list directly. - Omitting ARGS (or setting it to None) works as for argparse, using - sys.argv[1:] as the argument list. - """ - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument('--output', '-o', default=None, - help="The name of the output archive. " - "Required if SOURCE is an archive.") - parser.add_argument('--python', '-p', default=None, - help="The name of the Python interpreter to use " - "(default: no shebang line).") - parser.add_argument('--main', '-m', default=None, - help="The main function of the application " - "(default: use an existing __main__.py).") - parser.add_argument('--compress', '-c', action='store_true', - help="Compress files with the deflate method. " - "Files are stored uncompressed by default.") - parser.add_argument('--info', default=False, action='store_true', - help="Display the interpreter from the archive.") - parser.add_argument('source', - help="Source directory (or existing archive).") - - args = parser.parse_args(args) - - # Handle `python -m zipapp archive.pyz --info`. - if args.info: - if not os.path.isfile(args.source): - raise SystemExit("Can only get info for an archive file") - interpreter = get_interpreter(args.source) - print("Interpreter: {}".format(interpreter or "")) - sys.exit(0) - - if os.path.isfile(args.source): - if args.output is None or (os.path.exists(args.output) and - os.path.samefile(args.source, args.output)): - raise SystemExit("In-place editing of archives is not supported") - if args.main: - raise SystemExit("Cannot change the main function when copying") - - create_archive(args.source, args.output, - interpreter=args.python, main=args.main, - compressed=args.compress) - - -if __name__ == '__main__': - main() diff --git a/Python313_13_x86_Template/Lib/zipfile/__init__.py b/Python313_13_x86_Template/Lib/zipfile/__init__.py deleted file mode 100644 index 3d889e9c..00000000 --- a/Python313_13_x86_Template/Lib/zipfile/__init__.py +++ /dev/null @@ -1,2375 +0,0 @@ -""" -Read and write ZIP files. - -XXX references to utf-8 need further investigation. -""" -import binascii -import importlib.util -import io -import os -import shutil -import stat -import struct -import sys -import threading -import time - -try: - import zlib # We may need its compression method - crc32 = zlib.crc32 -except ImportError: - zlib = None - crc32 = binascii.crc32 - -try: - import bz2 # We may need its compression method -except ImportError: - bz2 = None - -try: - import lzma # We may need its compression method -except ImportError: - lzma = None - -__all__ = ["BadZipFile", "BadZipfile", "error", - "ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA", - "is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile", - "Path"] - -class BadZipFile(Exception): - pass - - -class LargeZipFile(Exception): - """ - Raised when writing a zipfile, the zipfile requires ZIP64 extensions - and those extensions are disabled. - """ - -error = BadZipfile = BadZipFile # Pre-3.2 compatibility names - - -ZIP64_LIMIT = (1 << 31) - 1 -ZIP_FILECOUNT_LIMIT = (1 << 16) - 1 -ZIP_MAX_COMMENT = (1 << 16) - 1 - -# constants for Zip file compression methods -ZIP_STORED = 0 -ZIP_DEFLATED = 8 -ZIP_BZIP2 = 12 -ZIP_LZMA = 14 -# Other ZIP compression methods not supported - -DEFAULT_VERSION = 20 -ZIP64_VERSION = 45 -BZIP2_VERSION = 46 -LZMA_VERSION = 63 -# we recognize (but not necessarily support) all features up to that version -MAX_EXTRACT_VERSION = 63 - -# Below are some formats and associated data for reading/writing headers using -# the struct module. The names and structures of headers/records are those used -# in the PKWARE description of the ZIP file format: -# http://www.pkware.com/documents/casestudies/APPNOTE.TXT -# (URL valid as of January 2008) - -# The "end of central directory" structure, magic number, size, and indices -# (section V.I in the format document) -structEndArchive = b"<4s4H2LH" -stringEndArchive = b"PK\005\006" -sizeEndCentDir = struct.calcsize(structEndArchive) - -_ECD_SIGNATURE = 0 -_ECD_DISK_NUMBER = 1 -_ECD_DISK_START = 2 -_ECD_ENTRIES_THIS_DISK = 3 -_ECD_ENTRIES_TOTAL = 4 -_ECD_SIZE = 5 -_ECD_OFFSET = 6 -_ECD_COMMENT_SIZE = 7 -# These last two indices are not part of the structure as defined in the -# spec, but they are used internally by this module as a convenience -_ECD_COMMENT = 8 -_ECD_LOCATION = 9 - -# The "central directory" structure, magic number, size, and indices -# of entries in the structure (section V.F in the format document) -structCentralDir = "<4s4B4HL2L5H2L" -stringCentralDir = b"PK\001\002" -sizeCentralDir = struct.calcsize(structCentralDir) - -# indexes of entries in the central directory structure -_CD_SIGNATURE = 0 -_CD_CREATE_VERSION = 1 -_CD_CREATE_SYSTEM = 2 -_CD_EXTRACT_VERSION = 3 -_CD_EXTRACT_SYSTEM = 4 -_CD_FLAG_BITS = 5 -_CD_COMPRESS_TYPE = 6 -_CD_TIME = 7 -_CD_DATE = 8 -_CD_CRC = 9 -_CD_COMPRESSED_SIZE = 10 -_CD_UNCOMPRESSED_SIZE = 11 -_CD_FILENAME_LENGTH = 12 -_CD_EXTRA_FIELD_LENGTH = 13 -_CD_COMMENT_LENGTH = 14 -_CD_DISK_NUMBER_START = 15 -_CD_INTERNAL_FILE_ATTRIBUTES = 16 -_CD_EXTERNAL_FILE_ATTRIBUTES = 17 -_CD_LOCAL_HEADER_OFFSET = 18 - -# General purpose bit flags -# Zip Appnote: 4.4.4 general purpose bit flag: (2 bytes) -_MASK_ENCRYPTED = 1 << 0 -# Bits 1 and 2 have different meanings depending on the compression used. -_MASK_COMPRESS_OPTION_1 = 1 << 1 -# _MASK_COMPRESS_OPTION_2 = 1 << 2 -# _MASK_USE_DATA_DESCRIPTOR: If set, crc-32, compressed size and uncompressed -# size are zero in the local header and the real values are written in the data -# descriptor immediately following the compressed data. -_MASK_USE_DATA_DESCRIPTOR = 1 << 3 -# Bit 4: Reserved for use with compression method 8, for enhanced deflating. -# _MASK_RESERVED_BIT_4 = 1 << 4 -_MASK_COMPRESSED_PATCH = 1 << 5 -_MASK_STRONG_ENCRYPTION = 1 << 6 -# _MASK_UNUSED_BIT_7 = 1 << 7 -# _MASK_UNUSED_BIT_8 = 1 << 8 -# _MASK_UNUSED_BIT_9 = 1 << 9 -# _MASK_UNUSED_BIT_10 = 1 << 10 -_MASK_UTF_FILENAME = 1 << 11 -# Bit 12: Reserved by PKWARE for enhanced compression. -# _MASK_RESERVED_BIT_12 = 1 << 12 -# _MASK_ENCRYPTED_CENTRAL_DIR = 1 << 13 -# Bit 14, 15: Reserved by PKWARE -# _MASK_RESERVED_BIT_14 = 1 << 14 -# _MASK_RESERVED_BIT_15 = 1 << 15 - -# The "local file header" structure, magic number, size, and indices -# (section V.A in the format document) -structFileHeader = "<4s2B4HL2L2H" -stringFileHeader = b"PK\003\004" -sizeFileHeader = struct.calcsize(structFileHeader) - -_FH_SIGNATURE = 0 -_FH_EXTRACT_VERSION = 1 -_FH_EXTRACT_SYSTEM = 2 -_FH_GENERAL_PURPOSE_FLAG_BITS = 3 -_FH_COMPRESSION_METHOD = 4 -_FH_LAST_MOD_TIME = 5 -_FH_LAST_MOD_DATE = 6 -_FH_CRC = 7 -_FH_COMPRESSED_SIZE = 8 -_FH_UNCOMPRESSED_SIZE = 9 -_FH_FILENAME_LENGTH = 10 -_FH_EXTRA_FIELD_LENGTH = 11 - -# The "Zip64 end of central directory locator" structure, magic number, and size -structEndArchive64Locator = "<4sLQL" -stringEndArchive64Locator = b"PK\x06\x07" -sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator) - -# The "Zip64 end of central directory" record, magic number, size, and indices -# (section V.G in the format document) -structEndArchive64 = "<4sQ2H2L4Q" -stringEndArchive64 = b"PK\x06\x06" -sizeEndCentDir64 = struct.calcsize(structEndArchive64) - -_CD64_SIGNATURE = 0 -_CD64_DIRECTORY_RECSIZE = 1 -_CD64_CREATE_VERSION = 2 -_CD64_EXTRACT_VERSION = 3 -_CD64_DISK_NUMBER = 4 -_CD64_DISK_NUMBER_START = 5 -_CD64_NUMBER_ENTRIES_THIS_DISK = 6 -_CD64_NUMBER_ENTRIES_TOTAL = 7 -_CD64_DIRECTORY_SIZE = 8 -_CD64_OFFSET_START_CENTDIR = 9 - -_DD_SIGNATURE = 0x08074b50 - - -class _Extra(bytes): - FIELD_STRUCT = struct.Struct(' 1: - raise BadZipFile("zipfiles that span multiple disks are not supported") - - offset -= sizeEndCentDir64 - if reloff > offset: - raise BadZipFile("Corrupt zip64 end of central directory locator") - # First, check the assumption that there is no prepended data. - fpin.seek(reloff) - extrasz = offset - reloff - data = fpin.read(sizeEndCentDir64) - if len(data) != sizeEndCentDir64: - raise OSError("Unknown I/O error") - if not data.startswith(stringEndArchive64) and reloff != offset: - # Since we already have seen the Zip64 EOCD Locator, it's - # possible we got here because there is prepended data. - # Assume no 'zip64 extensible data' - fpin.seek(offset) - extrasz = 0 - data = fpin.read(sizeEndCentDir64) - if len(data) != sizeEndCentDir64: - raise OSError("Unknown I/O error") - if not data.startswith(stringEndArchive64): - raise BadZipFile("Zip64 end of central directory record not found") - - sig, sz, create_version, read_version, disk_num, disk_dir, \ - dircount, dircount2, dirsize, diroffset = \ - struct.unpack(structEndArchive64, data) - if (diroffset + dirsize != reloff or - sz + 12 != sizeEndCentDir64 + extrasz): - raise BadZipFile("Corrupt zip64 end of central directory record") - - # Update the original endrec using data from the ZIP64 record - endrec[_ECD_SIGNATURE] = sig - endrec[_ECD_DISK_NUMBER] = disk_num - endrec[_ECD_DISK_START] = disk_dir - endrec[_ECD_ENTRIES_THIS_DISK] = dircount - endrec[_ECD_ENTRIES_TOTAL] = dircount2 - endrec[_ECD_SIZE] = dirsize - endrec[_ECD_OFFSET] = diroffset - endrec[_ECD_LOCATION] = offset - extrasz - return endrec - - -def _EndRecData(fpin): - """Return data from the "End of Central Directory" record, or None. - - The data is a list of the nine items in the ZIP "End of central dir" - record followed by a tenth item, the file seek offset of this record.""" - - # Determine file size - fpin.seek(0, 2) - filesize = fpin.tell() - - # Check to see if this is ZIP file with no archive comment (the - # "end of central directory" structure should be the last item in the - # file if this is the case). - try: - fpin.seek(-sizeEndCentDir, 2) - except OSError: - return None - data = fpin.read(sizeEndCentDir) - if (len(data) == sizeEndCentDir and - data[0:4] == stringEndArchive and - data[-2:] == b"\000\000"): - # the signature is correct and there's no comment, unpack structure - endrec = struct.unpack(structEndArchive, data) - endrec=list(endrec) - - # Append a blank comment and record start offset - endrec.append(b"") - endrec.append(filesize - sizeEndCentDir) - - # Try to read the "Zip64 end of central directory" structure - return _EndRecData64(fpin, filesize - sizeEndCentDir, endrec) - - # Either this is not a ZIP file, or it is a ZIP file with an archive - # comment. Search the end of the file for the "end of central directory" - # record signature. The comment is the last item in the ZIP file and may be - # up to 64K long. It is assumed that the "end of central directory" magic - # number does not appear in the comment. - maxCommentStart = max(filesize - ZIP_MAX_COMMENT - sizeEndCentDir, 0) - fpin.seek(maxCommentStart, 0) - data = fpin.read(ZIP_MAX_COMMENT + sizeEndCentDir) - start = data.rfind(stringEndArchive) - if start >= 0: - # found the magic number; attempt to unpack and interpret - recData = data[start:start+sizeEndCentDir] - if len(recData) != sizeEndCentDir: - # Zip file is corrupted. - return None - endrec = list(struct.unpack(structEndArchive, recData)) - commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file - comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize] - endrec.append(comment) - endrec.append(maxCommentStart + start) - - # Try to read the "Zip64 end of central directory" structure - return _EndRecData64(fpin, maxCommentStart + start, endrec) - - # Unable to find a valid end of central directory structure - return None - -def _sanitize_filename(filename): - """Terminate the file name at the first null byte and - ensure paths always use forward slashes as the directory separator.""" - - # Terminate the file name at the first null byte. Null bytes in file - # names are used as tricks by viruses in archives. - null_byte = filename.find(chr(0)) - if null_byte >= 0: - filename = filename[0:null_byte] - # This is used to ensure paths in generated ZIP files always use - # forward slashes as the directory separator, as required by the - # ZIP format specification. - if os.sep != "/" and os.sep in filename: - filename = filename.replace(os.sep, "/") - if os.altsep and os.altsep != "/" and os.altsep in filename: - filename = filename.replace(os.altsep, "/") - return filename - - -class ZipInfo: - """Class with attributes describing each file in the ZIP archive.""" - - __slots__ = ( - 'orig_filename', - 'filename', - 'date_time', - 'compress_type', - 'compress_level', - 'comment', - 'extra', - 'create_system', - 'create_version', - 'extract_version', - 'reserved', - 'flag_bits', - 'volume', - 'internal_attr', - 'external_attr', - 'header_offset', - 'CRC', - 'compress_size', - 'file_size', - '_raw_time', - '_end_offset', - ) - - def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)): - self.orig_filename = filename # Original file name in archive - - # Terminate the file name at the first null byte and - # ensure paths always use forward slashes as the directory separator. - filename = _sanitize_filename(filename) - - self.filename = filename # Normalized file name - self.date_time = date_time # year, month, day, hour, min, sec - - if date_time[0] < 1980: - raise ValueError('ZIP does not support timestamps before 1980') - - # Standard values: - self.compress_type = ZIP_STORED # Type of compression for the file - self.compress_level = None # Level for the compressor - self.comment = b"" # Comment for each file - self.extra = b"" # ZIP extra data - if sys.platform == 'win32': - self.create_system = 0 # System which created ZIP archive - else: - # Assume everything else is unix-y - self.create_system = 3 # System which created ZIP archive - self.create_version = DEFAULT_VERSION # Version which created ZIP archive - self.extract_version = DEFAULT_VERSION # Version needed to extract archive - self.reserved = 0 # Must be zero - self.flag_bits = 0 # ZIP flag bits - self.volume = 0 # Volume number of file header - self.internal_attr = 0 # Internal attributes - self.external_attr = 0 # External file attributes - self.compress_size = 0 # Size of the compressed file - self.file_size = 0 # Size of the uncompressed file - self._end_offset = None # Start of the next local header or central directory - # Other attributes are set by class ZipFile: - # header_offset Byte offset to the file header - # CRC CRC-32 of the uncompressed file - - # Maintain backward compatibility with the old protected attribute name. - @property - def _compresslevel(self): - return self.compress_level - - @_compresslevel.setter - def _compresslevel(self, value): - self.compress_level = value - - def __repr__(self): - result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)] - if self.compress_type != ZIP_STORED: - result.append(' compress_type=%s' % - compressor_names.get(self.compress_type, - self.compress_type)) - hi = self.external_attr >> 16 - lo = self.external_attr & 0xFFFF - if hi: - result.append(' filemode=%r' % stat.filemode(hi)) - if lo: - result.append(' external_attr=%#x' % lo) - isdir = self.is_dir() - if not isdir or self.file_size: - result.append(' file_size=%r' % self.file_size) - if ((not isdir or self.compress_size) and - (self.compress_type != ZIP_STORED or - self.file_size != self.compress_size)): - result.append(' compress_size=%r' % self.compress_size) - result.append('>') - return ''.join(result) - - def FileHeader(self, zip64=None): - """Return the per-file header as a bytes object. - - When the optional zip64 arg is None rather than a bool, we will - decide based upon the file_size and compress_size, if known, - False otherwise. - """ - dt = self.date_time - dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] - dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) - if self.flag_bits & _MASK_USE_DATA_DESCRIPTOR: - # Set these to zero because we write them after the file data - CRC = compress_size = file_size = 0 - else: - CRC = self.CRC - compress_size = self.compress_size - file_size = self.file_size - - extra = self.extra - - min_version = 0 - if zip64 is None: - # We always explicitly pass zip64 within this module.... This - # remains for anyone using ZipInfo.FileHeader as a public API. - zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT - if zip64: - fmt = '= 4: - tp, ln = unpack(' len(extra): - raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln)) - if tp == 0x0001: - data = extra[4:ln+4] - # ZIP64 extension (large files and/or large archives) - try: - if self.file_size in (0xFFFF_FFFF_FFFF_FFFF, 0xFFFF_FFFF): - field = "File size" - self.file_size, = unpack(' 2107: - date_time = (2107, 12, 31, 23, 59, 59) - # Create ZipInfo instance to store file information - if arcname is None: - arcname = filename - arcname = os.path.normpath(os.path.splitdrive(arcname)[1]) - while arcname[0] in (os.sep, os.altsep): - arcname = arcname[1:] - if isdir: - arcname += '/' - zinfo = cls(arcname, date_time) - zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes - if isdir: - zinfo.file_size = 0 - zinfo.external_attr |= 0x10 # MS-DOS directory flag - else: - zinfo.file_size = st.st_size - - return zinfo - - def is_dir(self): - """Return True if this archive member is a directory.""" - if self.filename.endswith('/'): - return True - # The ZIP format specification requires to use forward slashes - # as the directory separator, but in practice some ZIP files - # created on Windows can use backward slashes. For compatibility - # with the extraction code which already handles this: - if os.path.altsep: - return self.filename.endswith((os.path.sep, os.path.altsep)) - return False - - -# ZIP encryption uses the CRC32 one-byte primitive for scrambling some -# internal keys. We noticed that a direct implementation is faster than -# relying on binascii.crc32(). - -_crctable = None -def _gen_crc(crc): - for j in range(8): - if crc & 1: - crc = (crc >> 1) ^ 0xEDB88320 - else: - crc >>= 1 - return crc - -# ZIP supports a password-based form of encryption. Even though known -# plaintext attacks have been found against it, it is still useful -# to be able to get data out of such a file. -# -# Usage: -# zd = _ZipDecrypter(mypwd) -# plain_bytes = zd(cypher_bytes) - -def _ZipDecrypter(pwd): - key0 = 305419896 - key1 = 591751049 - key2 = 878082192 - - global _crctable - if _crctable is None: - _crctable = list(map(_gen_crc, range(256))) - crctable = _crctable - - def crc32(ch, crc): - """Compute the CRC32 primitive on one byte.""" - return (crc >> 8) ^ crctable[(crc ^ ch) & 0xFF] - - def update_keys(c): - nonlocal key0, key1, key2 - key0 = crc32(c, key0) - key1 = (key1 + (key0 & 0xFF)) & 0xFFFFFFFF - key1 = (key1 * 134775813 + 1) & 0xFFFFFFFF - key2 = crc32(key1 >> 24, key2) - - for p in pwd: - update_keys(p) - - def decrypter(data): - """Decrypt a bytes object.""" - result = bytearray() - append = result.append - for c in data: - k = key2 | 2 - c ^= ((k * (k^1)) >> 8) & 0xFF - update_keys(c) - append(c) - return bytes(result) - - return decrypter - - -class LZMACompressor: - - def __init__(self): - self._comp = None - - def _init(self): - props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1}) - self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[ - lzma._decode_filter_properties(lzma.FILTER_LZMA1, props) - ]) - return struct.pack('> 8) & 0xff - else: - # compare against the CRC otherwise - check_byte = (zipinfo.CRC >> 24) & 0xff - h = self._init_decrypter() - if h != check_byte: - raise RuntimeError("Bad password for file %r" % zipinfo.orig_filename) - - - def _init_decrypter(self): - self._decrypter = _ZipDecrypter(self._pwd) - # The first 12 bytes in the cypher stream is an encryption header - # used to strengthen the algorithm. The first 11 bytes are - # completely random, while the 12th contains the MSB of the CRC, - # or the MSB of the file time depending on the header type - # and is used to check the correctness of the password. - header = self._fileobj.read(12) - self._compress_left -= 12 - return self._decrypter(header)[11] - - def __repr__(self): - result = ['<%s.%s' % (self.__class__.__module__, - self.__class__.__qualname__)] - if not self.closed: - result.append(' name=%r' % (self.name,)) - if self._compress_type != ZIP_STORED: - result.append(' compress_type=%s' % - compressor_names.get(self._compress_type, - self._compress_type)) - else: - result.append(' [closed]') - result.append('>') - return ''.join(result) - - def readline(self, limit=-1): - """Read and return a line from the stream. - - If limit is specified, at most limit bytes will be read. - """ - - if limit < 0: - # Shortcut common case - newline found in buffer. - i = self._readbuffer.find(b'\n', self._offset) + 1 - if i > 0: - line = self._readbuffer[self._offset: i] - self._offset = i - return line - - return io.BufferedIOBase.readline(self, limit) - - def peek(self, n=1): - """Returns buffered bytes without advancing the position.""" - if n > len(self._readbuffer) - self._offset: - chunk = self.read(n) - if len(chunk) > self._offset: - self._readbuffer = chunk + self._readbuffer[self._offset:] - self._offset = 0 - else: - self._offset -= len(chunk) - - # Return up to 512 bytes to reduce allocation overhead for tight loops. - return self._readbuffer[self._offset: self._offset + 512] - - def readable(self): - if self.closed: - raise ValueError("I/O operation on closed file.") - return True - - def read(self, n=-1): - """Read and return up to n bytes. - If the argument is omitted, None, or negative, data is read and returned until EOF is reached. - """ - if self.closed: - raise ValueError("read from closed file.") - if n is None or n < 0: - buf = self._readbuffer[self._offset:] - self._readbuffer = b'' - self._offset = 0 - while not self._eof: - buf += self._read1(self.MAX_N) - return buf - - end = n + self._offset - if end < len(self._readbuffer): - buf = self._readbuffer[self._offset:end] - self._offset = end - return buf - - n = end - len(self._readbuffer) - buf = self._readbuffer[self._offset:] - self._readbuffer = b'' - self._offset = 0 - while n > 0 and not self._eof: - data = self._read1(n) - if n < len(data): - self._readbuffer = data - self._offset = n - buf += data[:n] - break - buf += data - n -= len(data) - return buf - - def _update_crc(self, newdata): - # Update the CRC using the given data. - if self._expected_crc is None: - # No need to compute the CRC if we don't have a reference value - return - self._running_crc = crc32(newdata, self._running_crc) - # Check the CRC if we're at the end of the file - if self._eof and self._running_crc != self._expected_crc: - raise BadZipFile("Bad CRC-32 for file %r" % self.name) - - def read1(self, n): - """Read up to n bytes with at most one read() system call.""" - - if n is None or n < 0: - buf = self._readbuffer[self._offset:] - self._readbuffer = b'' - self._offset = 0 - while not self._eof: - data = self._read1(self.MAX_N) - if data: - buf += data - break - return buf - - end = n + self._offset - if end < len(self._readbuffer): - buf = self._readbuffer[self._offset:end] - self._offset = end - return buf - - n = end - len(self._readbuffer) - buf = self._readbuffer[self._offset:] - self._readbuffer = b'' - self._offset = 0 - if n > 0: - while not self._eof: - data = self._read1(n) - if n < len(data): - self._readbuffer = data - self._offset = n - buf += data[:n] - break - if data: - buf += data - break - return buf - - def _read1(self, n): - # Read up to n compressed bytes with at most one read() system call, - # decrypt and decompress them. - if self._eof or n <= 0: - return b'' - - # Read from file. - if self._compress_type == ZIP_DEFLATED: - ## Handle unconsumed data. - data = self._decompressor.unconsumed_tail - if n > len(data): - data += self._read2(n - len(data)) - else: - data = self._read2(n) - - if self._compress_type == ZIP_STORED: - self._eof = self._compress_left <= 0 - elif self._compress_type == ZIP_DEFLATED: - n = max(n, self.MIN_READ_SIZE) - data = self._decompressor.decompress(data, n) - self._eof = (self._decompressor.eof or - self._compress_left <= 0 and - not self._decompressor.unconsumed_tail) - if self._eof: - data += self._decompressor.flush() - else: - data = self._decompressor.decompress(data) - self._eof = self._decompressor.eof or self._compress_left <= 0 - - data = data[:self._left] - self._left -= len(data) - if self._left <= 0: - self._eof = True - self._update_crc(data) - return data - - def _read2(self, n): - if self._compress_left <= 0: - return b'' - - n = max(n, self.MIN_READ_SIZE) - n = min(n, self._compress_left) - - data = self._fileobj.read(n) - self._compress_left -= len(data) - if not data: - raise EOFError - - if self._decrypter is not None: - data = self._decrypter(data) - return data - - def close(self): - try: - if self._close_fileobj: - self._fileobj.close() - finally: - super().close() - - def seekable(self): - if self.closed: - raise ValueError("I/O operation on closed file.") - return self._seekable - - def seek(self, offset, whence=os.SEEK_SET): - if self.closed: - raise ValueError("seek on closed file.") - if not self._seekable: - raise io.UnsupportedOperation("underlying stream is not seekable") - curr_pos = self.tell() - if whence == os.SEEK_SET: - new_pos = offset - elif whence == os.SEEK_CUR: - new_pos = curr_pos + offset - elif whence == os.SEEK_END: - new_pos = self._orig_file_size + offset - else: - raise ValueError("whence must be os.SEEK_SET (0), " - "os.SEEK_CUR (1), or os.SEEK_END (2)") - - if new_pos > self._orig_file_size: - new_pos = self._orig_file_size - - if new_pos < 0: - new_pos = 0 - - read_offset = new_pos - curr_pos - buff_offset = read_offset + self._offset - - if buff_offset >= 0 and buff_offset < len(self._readbuffer): - # Just move the _offset index if the new position is in the _readbuffer - self._offset = buff_offset - read_offset = 0 - # Fast seek uncompressed unencrypted file - elif self._compress_type == ZIP_STORED and self._decrypter is None and read_offset != 0: - # disable CRC checking after first seeking - it would be invalid - self._expected_crc = None - # seek actual file taking already buffered data into account - read_offset -= len(self._readbuffer) - self._offset - self._fileobj.seek(read_offset, os.SEEK_CUR) - self._left -= read_offset - self._compress_left -= read_offset - self._eof = self._left <= 0 - read_offset = 0 - # flush read buffer - self._readbuffer = b'' - self._offset = 0 - elif read_offset < 0: - # Position is before the current position. Reset the ZipExtFile - self._fileobj.seek(self._orig_compress_start) - self._running_crc = self._orig_start_crc - self._expected_crc = self._orig_crc - self._compress_left = self._orig_compress_size - self._left = self._orig_file_size - self._readbuffer = b'' - self._offset = 0 - self._decompressor = _get_decompressor(self._compress_type) - self._eof = False - read_offset = new_pos - if self._decrypter is not None: - self._init_decrypter() - - while read_offset > 0: - read_len = min(self.MAX_SEEK_READ, read_offset) - self.read(read_len) - read_offset -= read_len - - return self.tell() - - def tell(self): - if self.closed: - raise ValueError("tell on closed file.") - if not self._seekable: - raise io.UnsupportedOperation("underlying stream is not seekable") - filepos = self._orig_file_size - self._left - len(self._readbuffer) + self._offset - return filepos - - -class _ZipWriteFile(io.BufferedIOBase): - def __init__(self, zf, zinfo, zip64): - self._zinfo = zinfo - self._zip64 = zip64 - self._zipfile = zf - self._compressor = _get_compressor(zinfo.compress_type, - zinfo.compress_level) - self._file_size = 0 - self._compress_size = 0 - self._crc = 0 - - @property - def _fileobj(self): - return self._zipfile.fp - - @property - def name(self): - return self._zinfo.filename - - @property - def mode(self): - return 'wb' - - def writable(self): - return True - - def write(self, data): - if self.closed: - raise ValueError('I/O operation on closed file.') - - # Accept any data that supports the buffer protocol - if isinstance(data, (bytes, bytearray)): - nbytes = len(data) - else: - data = memoryview(data) - nbytes = data.nbytes - self._file_size += nbytes - - self._crc = crc32(data, self._crc) - if self._compressor: - data = self._compressor.compress(data) - self._compress_size += len(data) - self._fileobj.write(data) - return nbytes - - def close(self): - if self.closed: - return - try: - super().close() - # Flush any data from the compressor, and update header info - if self._compressor: - buf = self._compressor.flush() - self._compress_size += len(buf) - self._fileobj.write(buf) - self._zinfo.compress_size = self._compress_size - else: - self._zinfo.compress_size = self._file_size - self._zinfo.CRC = self._crc - self._zinfo.file_size = self._file_size - - if not self._zip64: - if self._file_size > ZIP64_LIMIT: - raise RuntimeError("File size too large, try using force_zip64") - if self._compress_size > ZIP64_LIMIT: - raise RuntimeError("Compressed size too large, try using force_zip64") - - # Write updated header info - if self._zinfo.flag_bits & _MASK_USE_DATA_DESCRIPTOR: - # Write CRC and file sizes after the file data - fmt = '') - return ''.join(result) - - def _RealGetContents(self): - """Read in the table of contents for the ZIP file.""" - fp = self.fp - try: - endrec = _EndRecData(fp) - except OSError: - raise BadZipFile("File is not a zip file") - if not endrec: - raise BadZipFile("File is not a zip file") - if self.debug > 1: - print(endrec) - size_cd = endrec[_ECD_SIZE] # bytes in central directory - offset_cd = endrec[_ECD_OFFSET] # offset of central directory - self._comment = endrec[_ECD_COMMENT] # archive comment - - # "concat" is zero, unless zip was concatenated to another file - concat = endrec[_ECD_LOCATION] - size_cd - offset_cd - - if self.debug > 2: - inferred = concat + offset_cd - print("given, inferred, offset", offset_cd, inferred, concat) - # self.start_dir: Position of start of central directory - self.start_dir = offset_cd + concat - if self.start_dir < 0: - raise BadZipFile("Bad offset for central directory") - fp.seek(self.start_dir, 0) - data = fp.read(size_cd) - fp = io.BytesIO(data) - total = 0 - while total < size_cd: - centdir = fp.read(sizeCentralDir) - if len(centdir) != sizeCentralDir: - raise BadZipFile("Truncated central directory") - centdir = struct.unpack(structCentralDir, centdir) - if centdir[_CD_SIGNATURE] != stringCentralDir: - raise BadZipFile("Bad magic number for central directory") - if self.debug > 2: - print(centdir) - filename = fp.read(centdir[_CD_FILENAME_LENGTH]) - orig_filename_crc = crc32(filename) - flags = centdir[_CD_FLAG_BITS] - if flags & _MASK_UTF_FILENAME: - # UTF-8 file names extension - filename = filename.decode('utf-8') - else: - # Historical ZIP filename encoding - filename = filename.decode(self.metadata_encoding or 'cp437') - # Create ZipInfo instance to store file information - x = ZipInfo(filename) - x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) - x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) - x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] - (x.create_version, x.create_system, x.extract_version, x.reserved, - x.flag_bits, x.compress_type, t, d, - x.CRC, x.compress_size, x.file_size) = centdir[1:12] - if x.extract_version > MAX_EXTRACT_VERSION: - raise NotImplementedError("zip file version %.1f" % - (x.extract_version / 10)) - x.volume, x.internal_attr, x.external_attr = centdir[15:18] - # Convert date/time code to (year, month, day, hour, min, sec) - x._raw_time = t - x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, - t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) - x._decodeExtra(orig_filename_crc) - x.header_offset = x.header_offset + concat - self.filelist.append(x) - self.NameToInfo[x.filename] = x - - # update total bytes read from central directory - total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] - + centdir[_CD_EXTRA_FIELD_LENGTH] - + centdir[_CD_COMMENT_LENGTH]) - - if self.debug > 2: - print("total", total) - - end_offset = self.start_dir - for zinfo in reversed(sorted(self.filelist, - key=lambda zinfo: zinfo.header_offset)): - zinfo._end_offset = end_offset - end_offset = zinfo.header_offset - - def namelist(self): - """Return a list of file names in the archive.""" - return [data.filename for data in self.filelist] - - def infolist(self): - """Return a list of class ZipInfo instances for files in the - archive.""" - return self.filelist - - def printdir(self, file=None): - """Print a table of contents for the zip file.""" - print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"), - file=file) - for zinfo in self.filelist: - date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6] - print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size), - file=file) - - def testzip(self): - """Read all the files and check the CRC. - - Return None if all files could be read successfully, or the name - of the offending file otherwise.""" - chunk_size = 2 ** 20 - for zinfo in self.filelist: - try: - # Read by chunks, to avoid an OverflowError or a - # MemoryError with very large embedded files. - with self.open(zinfo.filename, "r") as f: - while f.read(chunk_size): # Check CRC-32 - pass - except BadZipFile: - return zinfo.filename - - def getinfo(self, name): - """Return the instance of ZipInfo given 'name'.""" - info = self.NameToInfo.get(name) - if info is None: - raise KeyError( - 'There is no item named %r in the archive' % name) - - return info - - def setpassword(self, pwd): - """Set default password for encrypted files.""" - if pwd and not isinstance(pwd, bytes): - raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) - if pwd: - self.pwd = pwd - else: - self.pwd = None - - @property - def comment(self): - """The comment text associated with the ZIP file.""" - return self._comment - - @comment.setter - def comment(self, comment): - if not isinstance(comment, bytes): - raise TypeError("comment: expected bytes, got %s" % type(comment).__name__) - # check for valid comment length - if len(comment) > ZIP_MAX_COMMENT: - import warnings - warnings.warn('Archive comment is too long; truncating to %d bytes' - % ZIP_MAX_COMMENT, stacklevel=2) - comment = comment[:ZIP_MAX_COMMENT] - self._comment = comment - self._didModify = True - - def read(self, name, pwd=None): - """Return file bytes for name. 'pwd' is the password to decrypt - encrypted files.""" - with self.open(name, "r", pwd) as fp: - return fp.read() - - def open(self, name, mode="r", pwd=None, *, force_zip64=False): - """Return file-like object for 'name'. - - name is a string for the file name within the ZIP file, or a ZipInfo - object. - - mode should be 'r' to read a file already in the ZIP file, or 'w' to - write to a file newly added to the archive. - - pwd is the password to decrypt files (only used for reading). - - When writing, if the file size is not known in advance but may exceed - 2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large - files. If the size is known in advance, it is best to pass a ZipInfo - instance for name, with zinfo.file_size set. - """ - if mode not in {"r", "w"}: - raise ValueError('open() requires mode "r" or "w"') - if pwd and (mode == "w"): - raise ValueError("pwd is only supported for reading files") - if not self.fp: - raise ValueError( - "Attempt to use ZIP archive that was already closed") - - # Make sure we have an info object - if isinstance(name, ZipInfo): - # 'name' is already an info object - zinfo = name - elif mode == 'w': - zinfo = ZipInfo(name) - zinfo.compress_type = self.compression - zinfo.compress_level = self.compresslevel - else: - # Get info object for name - zinfo = self.getinfo(name) - - if mode == 'w': - return self._open_to_write(zinfo, force_zip64=force_zip64) - - if self._writing: - raise ValueError("Can't read from the ZIP file while there " - "is an open writing handle on it. " - "Close the writing handle before trying to read.") - - # Open for reading: - self._fileRefCnt += 1 - zef_file = _SharedFile(self.fp, zinfo.header_offset, - self._fpclose, self._lock, lambda: self._writing) - try: - # Skip the file header: - fheader = zef_file.read(sizeFileHeader) - if len(fheader) != sizeFileHeader: - raise BadZipFile("Truncated file header") - fheader = struct.unpack(structFileHeader, fheader) - if fheader[_FH_SIGNATURE] != stringFileHeader: - raise BadZipFile("Bad magic number for file header") - - fname = zef_file.read(fheader[_FH_FILENAME_LENGTH]) - if fheader[_FH_EXTRA_FIELD_LENGTH]: - zef_file.seek(fheader[_FH_EXTRA_FIELD_LENGTH], whence=1) - - if zinfo.flag_bits & _MASK_COMPRESSED_PATCH: - # Zip 2.7: compressed patched data - raise NotImplementedError("compressed patched data (flag bit 5)") - - if zinfo.flag_bits & _MASK_STRONG_ENCRYPTION: - # strong encryption - raise NotImplementedError("strong encryption (flag bit 6)") - - if fheader[_FH_GENERAL_PURPOSE_FLAG_BITS] & _MASK_UTF_FILENAME: - # UTF-8 filename - fname_str = fname.decode("utf-8") - else: - fname_str = fname.decode(self.metadata_encoding or "cp437") - - if fname_str != zinfo.orig_filename: - raise BadZipFile( - 'File name in directory %r and header %r differ.' - % (zinfo.orig_filename, fname)) - - if (zinfo._end_offset is not None and - zef_file.tell() + zinfo.compress_size > zinfo._end_offset): - if zinfo._end_offset == zinfo.header_offset: - import warnings - warnings.warn( - f"Overlapped entries: {zinfo.orig_filename!r} " - f"(possible zip bomb)", - skip_file_prefixes=(os.path.dirname(__file__),)) - else: - raise BadZipFile( - f"Overlapped entries: {zinfo.orig_filename!r} " - f"(possible zip bomb)") - - # check for encrypted flag & handle password - is_encrypted = zinfo.flag_bits & _MASK_ENCRYPTED - if is_encrypted: - if not pwd: - pwd = self.pwd - if pwd and not isinstance(pwd, bytes): - raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) - if not pwd: - raise RuntimeError("File %r is encrypted, password " - "required for extraction" % name) - else: - pwd = None - - return ZipExtFile(zef_file, mode + 'b', zinfo, pwd, True) - except: - zef_file.close() - raise - - def _open_to_write(self, zinfo, force_zip64=False): - if force_zip64 and not self._allowZip64: - raise ValueError( - "force_zip64 is True, but allowZip64 was False when opening " - "the ZIP file." - ) - if self._writing: - raise ValueError("Can't write to the ZIP file while there is " - "another write handle open on it. " - "Close the first handle before opening another.") - - # Size and CRC are overwritten with correct data after processing the file - zinfo.compress_size = 0 - zinfo.CRC = 0 - - zinfo.flag_bits = 0x00 - if zinfo.compress_type == ZIP_LZMA: - # Compressed data includes an end-of-stream (EOS) marker - zinfo.flag_bits |= _MASK_COMPRESS_OPTION_1 - if not self._seekable: - zinfo.flag_bits |= _MASK_USE_DATA_DESCRIPTOR - - if not zinfo.external_attr: - zinfo.external_attr = 0o600 << 16 # permissions: ?rw------- - - # Compressed size can be larger than uncompressed size - zip64 = force_zip64 or (zinfo.file_size * 1.05 > ZIP64_LIMIT) - if not self._allowZip64 and zip64: - raise LargeZipFile("Filesize would require ZIP64 extensions") - - if self._seekable: - self.fp.seek(self.start_dir) - zinfo.header_offset = self.fp.tell() - - self._writecheck(zinfo) - self._didModify = True - - self.fp.write(zinfo.FileHeader(zip64)) - - self._writing = True - return _ZipWriteFile(self, zinfo, zip64) - - def extract(self, member, path=None, pwd=None): - """Extract a member from the archive to the current working directory, - using its full name. Its file information is extracted as accurately - as possible. `member' may be a filename or a ZipInfo object. You can - specify a different directory using `path'. You can specify the - password to decrypt the file using 'pwd'. - """ - if path is None: - path = os.getcwd() - else: - path = os.fspath(path) - - return self._extract_member(member, path, pwd) - - def extractall(self, path=None, members=None, pwd=None): - """Extract all members from the archive to the current working - directory. `path' specifies a different directory to extract to. - `members' is optional and must be a subset of the list returned - by namelist(). You can specify the password to decrypt all files - using 'pwd'. - """ - if members is None: - members = self.namelist() - - if path is None: - path = os.getcwd() - else: - path = os.fspath(path) - - for zipinfo in members: - self._extract_member(zipinfo, path, pwd) - - @classmethod - def _sanitize_windows_name(cls, arcname, pathsep): - """Replace bad characters and remove trailing dots from parts.""" - table = cls._windows_illegal_name_trans_table - if not table: - illegal = ':<>|"?*' - table = str.maketrans(illegal, '_' * len(illegal)) - cls._windows_illegal_name_trans_table = table - arcname = arcname.translate(table) - # remove trailing dots and spaces - arcname = (x.rstrip(' .') for x in arcname.split(pathsep)) - # rejoin, removing empty parts. - arcname = pathsep.join(x for x in arcname if x) - return arcname - - def _extract_member(self, member, targetpath, pwd): - """Extract the ZipInfo object 'member' to a physical - file on the path targetpath. - """ - if not isinstance(member, ZipInfo): - member = self.getinfo(member) - - # build the destination pathname, replacing - # forward slashes to platform specific separators. - arcname = member.filename.replace('/', os.path.sep) - - if os.path.altsep: - arcname = arcname.replace(os.path.altsep, os.path.sep) - # interpret absolute pathname as relative, remove drive letter or - # UNC path, redundant separators, "." and ".." components. - arcname = os.path.splitdrive(arcname)[1] - invalid_path_parts = ('', os.path.curdir, os.path.pardir) - arcname = os.path.sep.join(x for x in arcname.split(os.path.sep) - if x not in invalid_path_parts) - if os.path.sep == '\\': - # filter illegal characters on Windows - arcname = self._sanitize_windows_name(arcname, os.path.sep) - - if not arcname and not member.is_dir(): - raise ValueError("Empty filename.") - - targetpath = os.path.join(targetpath, arcname) - targetpath = os.path.normpath(targetpath) - - # Create all upper directories if necessary. - upperdirs = os.path.dirname(targetpath) - if upperdirs and not os.path.exists(upperdirs): - os.makedirs(upperdirs, exist_ok=True) - - if member.is_dir(): - if not os.path.isdir(targetpath): - try: - os.mkdir(targetpath) - except FileExistsError: - if not os.path.isdir(targetpath): - raise - return targetpath - - with self.open(member, pwd=pwd) as source, \ - open(targetpath, "wb") as target: - shutil.copyfileobj(source, target) - - return targetpath - - def _writecheck(self, zinfo): - """Check for errors before writing a file to the archive.""" - if zinfo.filename in self.NameToInfo: - import warnings - warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3) - if self.mode not in ('w', 'x', 'a'): - raise ValueError("write() requires mode 'w', 'x', or 'a'") - if not self.fp: - raise ValueError( - "Attempt to write ZIP archive that was already closed") - _check_compression(zinfo.compress_type) - if not self._allowZip64: - requires_zip64 = None - if len(self.filelist) >= ZIP_FILECOUNT_LIMIT: - requires_zip64 = "Files count" - elif zinfo.file_size > ZIP64_LIMIT: - requires_zip64 = "Filesize" - elif zinfo.header_offset > ZIP64_LIMIT: - requires_zip64 = "Zipfile size" - if requires_zip64: - raise LargeZipFile(requires_zip64 + - " would require ZIP64 extensions") - - def write(self, filename, arcname=None, - compress_type=None, compresslevel=None): - """Put the bytes from filename into the archive under the name - arcname.""" - if not self.fp: - raise ValueError( - "Attempt to write to ZIP archive that was already closed") - if self._writing: - raise ValueError( - "Can't write to ZIP archive while an open writing handle exists" - ) - - zinfo = ZipInfo.from_file(filename, arcname, - strict_timestamps=self._strict_timestamps) - - if zinfo.is_dir(): - zinfo.compress_size = 0 - zinfo.CRC = 0 - self.mkdir(zinfo) - else: - if compress_type is not None: - zinfo.compress_type = compress_type - else: - zinfo.compress_type = self.compression - - if compresslevel is not None: - zinfo.compress_level = compresslevel - else: - zinfo.compress_level = self.compresslevel - - with open(filename, "rb") as src, self.open(zinfo, 'w') as dest: - shutil.copyfileobj(src, dest, 1024*8) - - def writestr(self, zinfo_or_arcname, data, - compress_type=None, compresslevel=None): - """Write a file into the archive. The contents is 'data', which - may be either a 'str' or a 'bytes' instance; if it is a 'str', - it is encoded as UTF-8 first. - 'zinfo_or_arcname' is either a ZipInfo instance or - the name of the file in the archive.""" - if isinstance(data, str): - data = data.encode("utf-8") - if not isinstance(zinfo_or_arcname, ZipInfo): - zinfo = ZipInfo(filename=zinfo_or_arcname, - date_time=time.localtime(time.time())[:6]) - zinfo.compress_type = self.compression - zinfo.compress_level = self.compresslevel - if zinfo.filename.endswith('/'): - zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x - zinfo.external_attr |= 0x10 # MS-DOS directory flag - else: - zinfo.external_attr = 0o600 << 16 # ?rw------- - else: - zinfo = zinfo_or_arcname - - if not self.fp: - raise ValueError( - "Attempt to write to ZIP archive that was already closed") - if self._writing: - raise ValueError( - "Can't write to ZIP archive while an open writing handle exists." - ) - - if compress_type is not None: - zinfo.compress_type = compress_type - - if compresslevel is not None: - zinfo.compress_level = compresslevel - - zinfo.file_size = len(data) # Uncompressed size - with self._lock: - with self.open(zinfo, mode='w') as dest: - dest.write(data) - - def mkdir(self, zinfo_or_directory_name, mode=511): - """Creates a directory inside the zip archive.""" - if isinstance(zinfo_or_directory_name, ZipInfo): - zinfo = zinfo_or_directory_name - if not zinfo.is_dir(): - raise ValueError("The given ZipInfo does not describe a directory") - elif isinstance(zinfo_or_directory_name, str): - directory_name = zinfo_or_directory_name - if not directory_name.endswith("/"): - directory_name += "/" - zinfo = ZipInfo(directory_name) - zinfo.compress_size = 0 - zinfo.CRC = 0 - zinfo.external_attr = ((0o40000 | mode) & 0xFFFF) << 16 - zinfo.file_size = 0 - zinfo.external_attr |= 0x10 - else: - raise TypeError("Expected type str or ZipInfo") - - with self._lock: - if self._seekable: - self.fp.seek(self.start_dir) - zinfo.header_offset = self.fp.tell() # Start of header bytes - if zinfo.compress_type == ZIP_LZMA: - # Compressed data includes an end-of-stream (EOS) marker - zinfo.flag_bits |= _MASK_COMPRESS_OPTION_1 - - self._writecheck(zinfo) - self._didModify = True - - self.filelist.append(zinfo) - self.NameToInfo[zinfo.filename] = zinfo - self.fp.write(zinfo.FileHeader(False)) - self.start_dir = self.fp.tell() - - def __del__(self): - """Call the "close()" method in case the user forgot.""" - self.close() - - def close(self): - """Close the file, and for mode 'w', 'x' and 'a' write the ending - records.""" - if self.fp is None: - return - - if self._writing: - raise ValueError("Can't close the ZIP file while there is " - "an open writing handle on it. " - "Close the writing handle before closing the zip.") - - try: - if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records - with self._lock: - if self._seekable: - self.fp.seek(self.start_dir) - self._write_end_record() - finally: - fp = self.fp - self.fp = None - self._fpclose(fp) - - def _write_end_record(self): - for zinfo in self.filelist: # write central directory - dt = zinfo.date_time - dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] - dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) - extra = [] - if zinfo.file_size > ZIP64_LIMIT \ - or zinfo.compress_size > ZIP64_LIMIT: - extra.append(zinfo.file_size) - extra.append(zinfo.compress_size) - file_size = 0xffffffff - compress_size = 0xffffffff - else: - file_size = zinfo.file_size - compress_size = zinfo.compress_size - - if zinfo.header_offset > ZIP64_LIMIT: - extra.append(zinfo.header_offset) - header_offset = 0xffffffff - else: - header_offset = zinfo.header_offset - - extra_data = zinfo.extra - min_version = 0 - if extra: - # Append a ZIP64 field to the extra's - extra_data = _Extra.strip(extra_data, (1,)) - extra_data = struct.pack( - ' ZIP_FILECOUNT_LIMIT: - requires_zip64 = "Files count" - elif centDirOffset > ZIP64_LIMIT: - requires_zip64 = "Central directory offset" - elif centDirSize > ZIP64_LIMIT: - requires_zip64 = "Central directory size" - if requires_zip64: - # Need to write the ZIP64 end-of-archive records - if not self._allowZip64: - raise LargeZipFile(requires_zip64 + - " would require ZIP64 extensions") - zip64endrec = struct.pack( - structEndArchive64, stringEndArchive64, - sizeEndCentDir64 - 12, 45, 45, 0, 0, centDirCount, centDirCount, - centDirSize, centDirOffset) - self.fp.write(zip64endrec) - - zip64locrec = struct.pack( - structEndArchive64Locator, - stringEndArchive64Locator, 0, pos2, 1) - self.fp.write(zip64locrec) - centDirCount = min(centDirCount, 0xFFFF) - centDirSize = min(centDirSize, 0xFFFFFFFF) - centDirOffset = min(centDirOffset, 0xFFFFFFFF) - - endrec = struct.pack(structEndArchive, stringEndArchive, - 0, 0, centDirCount, centDirCount, - centDirSize, centDirOffset, len(self._comment)) - self.fp.write(endrec) - self.fp.write(self._comment) - if self.mode == "a": - self.fp.truncate() - self.fp.flush() - - def _fpclose(self, fp): - assert self._fileRefCnt > 0 - self._fileRefCnt -= 1 - if not self._fileRefCnt and not self._filePassed: - fp.close() - - -class PyZipFile(ZipFile): - """Class to create ZIP archives with Python library files and packages.""" - - def __init__(self, file, mode="r", compression=ZIP_STORED, - allowZip64=True, optimize=-1): - ZipFile.__init__(self, file, mode=mode, compression=compression, - allowZip64=allowZip64) - self._optimize = optimize - - def writepy(self, pathname, basename="", filterfunc=None): - """Add all files from "pathname" to the ZIP archive. - - If pathname is a package directory, search the directory and - all package subdirectories recursively for all *.py and enter - the modules into the archive. If pathname is a plain - directory, listdir *.py and enter all modules. Else, pathname - must be a Python *.py file and the module will be put into the - archive. Added modules are always module.pyc. - This method will compile the module.py into module.pyc if - necessary. - If filterfunc(pathname) is given, it is called with every argument. - When it is False, the file or directory is skipped. - """ - pathname = os.fspath(pathname) - if filterfunc and not filterfunc(pathname): - if self.debug: - label = 'path' if os.path.isdir(pathname) else 'file' - print('%s %r skipped by filterfunc' % (label, pathname)) - return - dir, name = os.path.split(pathname) - if os.path.isdir(pathname): - initname = os.path.join(pathname, "__init__.py") - if os.path.isfile(initname): - # This is a package directory, add it - if basename: - basename = "%s/%s" % (basename, name) - else: - basename = name - if self.debug: - print("Adding package in", pathname, "as", basename) - fname, arcname = self._get_codename(initname[0:-3], basename) - if self.debug: - print("Adding", arcname) - self.write(fname, arcname) - dirlist = sorted(os.listdir(pathname)) - dirlist.remove("__init__.py") - # Add all *.py files and package subdirectories - for filename in dirlist: - path = os.path.join(pathname, filename) - root, ext = os.path.splitext(filename) - if os.path.isdir(path): - if os.path.isfile(os.path.join(path, "__init__.py")): - # This is a package directory, add it - self.writepy(path, basename, - filterfunc=filterfunc) # Recursive call - elif ext == ".py": - if filterfunc and not filterfunc(path): - if self.debug: - print('file %r skipped by filterfunc' % path) - continue - fname, arcname = self._get_codename(path[0:-3], - basename) - if self.debug: - print("Adding", arcname) - self.write(fname, arcname) - else: - # This is NOT a package directory, add its files at top level - if self.debug: - print("Adding files from directory", pathname) - for filename in sorted(os.listdir(pathname)): - path = os.path.join(pathname, filename) - root, ext = os.path.splitext(filename) - if ext == ".py": - if filterfunc and not filterfunc(path): - if self.debug: - print('file %r skipped by filterfunc' % path) - continue - fname, arcname = self._get_codename(path[0:-3], - basename) - if self.debug: - print("Adding", arcname) - self.write(fname, arcname) - else: - if pathname[-3:] != ".py": - raise RuntimeError( - 'Files added with writepy() must end with ".py"') - fname, arcname = self._get_codename(pathname[0:-3], basename) - if self.debug: - print("Adding file", arcname) - self.write(fname, arcname) - - def _get_codename(self, pathname, basename): - """Return (filename, archivename) for the path. - - Given a module name path, return the correct file path and - archive name, compiling if necessary. For example, given - /python/lib/string, return (/python/lib/string.pyc, string). - """ - def _compile(file, optimize=-1): - import py_compile - if self.debug: - print("Compiling", file) - try: - py_compile.compile(file, doraise=True, optimize=optimize) - except py_compile.PyCompileError as err: - print(err.msg) - return False - return True - - file_py = pathname + ".py" - file_pyc = pathname + ".pyc" - pycache_opt0 = importlib.util.cache_from_source(file_py, optimization='') - pycache_opt1 = importlib.util.cache_from_source(file_py, optimization=1) - pycache_opt2 = importlib.util.cache_from_source(file_py, optimization=2) - if self._optimize == -1: - # legacy mode: use whatever file is present - if (os.path.isfile(file_pyc) and - os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime): - # Use .pyc file. - arcname = fname = file_pyc - elif (os.path.isfile(pycache_opt0) and - os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime): - # Use the __pycache__/*.pyc file, but write it to the legacy pyc - # file name in the archive. - fname = pycache_opt0 - arcname = file_pyc - elif (os.path.isfile(pycache_opt1) and - os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime): - # Use the __pycache__/*.pyc file, but write it to the legacy pyc - # file name in the archive. - fname = pycache_opt1 - arcname = file_pyc - elif (os.path.isfile(pycache_opt2) and - os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime): - # Use the __pycache__/*.pyc file, but write it to the legacy pyc - # file name in the archive. - fname = pycache_opt2 - arcname = file_pyc - else: - # Compile py into PEP 3147 pyc file. - if _compile(file_py): - if sys.flags.optimize == 0: - fname = pycache_opt0 - elif sys.flags.optimize == 1: - fname = pycache_opt1 - else: - fname = pycache_opt2 - arcname = file_pyc - else: - fname = arcname = file_py - else: - # new mode: use given optimization level - if self._optimize == 0: - fname = pycache_opt0 - arcname = file_pyc - else: - arcname = file_pyc - if self._optimize == 1: - fname = pycache_opt1 - elif self._optimize == 2: - fname = pycache_opt2 - else: - msg = "invalid value for 'optimize': {!r}".format(self._optimize) - raise ValueError(msg) - if not (os.path.isfile(fname) and - os.stat(fname).st_mtime >= os.stat(file_py).st_mtime): - if not _compile(file_py, optimize=self._optimize): - fname = arcname = file_py - archivename = os.path.split(arcname)[1] - if basename: - archivename = "%s/%s" % (basename, archivename) - return (fname, archivename) - - -def main(args=None): - import argparse - - description = 'A simple command-line interface for zipfile module.' - parser = argparse.ArgumentParser(description=description) - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('-l', '--list', metavar='', - help='Show listing of a zipfile') - group.add_argument('-e', '--extract', nargs=2, - metavar=('', ''), - help='Extract zipfile into target dir') - group.add_argument('-c', '--create', nargs='+', - metavar=('', ''), - help='Create zipfile from sources') - group.add_argument('-t', '--test', metavar='', - help='Test if a zipfile is valid') - parser.add_argument('--metadata-encoding', metavar='', - help='Specify encoding of member names for -l, -e and -t') - args = parser.parse_args(args) - - encoding = args.metadata_encoding - - if args.test is not None: - src = args.test - with ZipFile(src, 'r', metadata_encoding=encoding) as zf: - badfile = zf.testzip() - if badfile: - print("The following enclosed file is corrupted: {!r}".format(badfile)) - print("Done testing") - - elif args.list is not None: - src = args.list - with ZipFile(src, 'r', metadata_encoding=encoding) as zf: - zf.printdir() - - elif args.extract is not None: - src, curdir = args.extract - with ZipFile(src, 'r', metadata_encoding=encoding) as zf: - zf.extractall(curdir) - - elif args.create is not None: - if encoding: - print("Non-conforming encodings not supported with -c.", - file=sys.stderr) - sys.exit(1) - - zip_name = args.create.pop(0) - files = args.create - - def addToZip(zf, path, zippath): - if os.path.isfile(path): - zf.write(path, zippath, ZIP_DEFLATED) - elif os.path.isdir(path): - if zippath: - zf.write(path, zippath) - for nm in sorted(os.listdir(path)): - addToZip(zf, - os.path.join(path, nm), os.path.join(zippath, nm)) - # else: ignore - - with ZipFile(zip_name, 'w') as zf: - for path in files: - zippath = os.path.basename(path) - if not zippath: - zippath = os.path.basename(os.path.dirname(path)) - if zippath in ('', os.curdir, os.pardir): - zippath = '' - addToZip(zf, path, zippath) - - -from ._path import ( # noqa: E402 - Path, - - # used privately for tests - CompleteDirs, # noqa: F401 -) diff --git a/Python313_13_x86_Template/Lib/zipfile/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/zipfile/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 6e1ee558..00000000 Binary files a/Python313_13_x86_Template/Lib/zipfile/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/zipfile/_path/__init__.py b/Python313_13_x86_Template/Lib/zipfile/_path/__init__.py deleted file mode 100644 index 02f81171..00000000 --- a/Python313_13_x86_Template/Lib/zipfile/_path/__init__.py +++ /dev/null @@ -1,452 +0,0 @@ -""" -A Path-like interface for zipfiles. - -This codebase is shared between zipfile.Path in the stdlib -and zipp in PyPI. See -https://github.com/python/importlib_metadata/wiki/Development-Methodology -for more detail. -""" - -import contextlib -import io -import itertools -import pathlib -import posixpath -import re -import stat -import sys -import zipfile - -from .glob import Translator - -__all__ = ['Path'] - - -def _parents(path): - """ - Given a path with elements separated by - posixpath.sep, generate all parents of that path. - - >>> list(_parents('b/d')) - ['b'] - >>> list(_parents('/b/d/')) - ['/b'] - >>> list(_parents('b/d/f/')) - ['b/d', 'b'] - >>> list(_parents('b')) - [] - >>> list(_parents('')) - [] - """ - return itertools.islice(_ancestry(path), 1, None) - - -def _ancestry(path): - """ - Given a path with elements separated by - posixpath.sep, generate all elements of that path. - - >>> list(_ancestry('b/d')) - ['b/d', 'b'] - >>> list(_ancestry('/b/d/')) - ['/b/d', '/b'] - >>> list(_ancestry('b/d/f/')) - ['b/d/f', 'b/d', 'b'] - >>> list(_ancestry('b')) - ['b'] - >>> list(_ancestry('')) - [] - - Multiple separators are treated like a single. - - >>> list(_ancestry('//b//d///f//')) - ['//b//d///f', '//b//d', '//b'] - """ - path = path.rstrip(posixpath.sep) - while path.rstrip(posixpath.sep): - yield path - path, tail = posixpath.split(path) - - -_dedupe = dict.fromkeys -"""Deduplicate an iterable in original order""" - - -def _difference(minuend, subtrahend): - """ - Return items in minuend not in subtrahend, retaining order - with O(1) lookup. - """ - return itertools.filterfalse(set(subtrahend).__contains__, minuend) - - -class InitializedState: - """ - Mix-in to save the initialization state for pickling. - """ - - def __init__(self, *args, **kwargs): - self.__args = args - self.__kwargs = kwargs - super().__init__(*args, **kwargs) - - def __getstate__(self): - return self.__args, self.__kwargs - - def __setstate__(self, state): - args, kwargs = state - super().__init__(*args, **kwargs) - - -class CompleteDirs(InitializedState, zipfile.ZipFile): - """ - A ZipFile subclass that ensures that implied directories - are always included in the namelist. - - >>> list(CompleteDirs._implied_dirs(['foo/bar.txt', 'foo/bar/baz.txt'])) - ['foo/', 'foo/bar/'] - >>> list(CompleteDirs._implied_dirs(['foo/bar.txt', 'foo/bar/baz.txt', 'foo/bar/'])) - ['foo/'] - """ - - @staticmethod - def _implied_dirs(names): - parents = itertools.chain.from_iterable(map(_parents, names)) - as_dirs = (p + posixpath.sep for p in parents) - return _dedupe(_difference(as_dirs, names)) - - def namelist(self): - names = super().namelist() - return names + list(self._implied_dirs(names)) - - def _name_set(self): - return set(self.namelist()) - - def resolve_dir(self, name): - """ - If the name represents a directory, return that name - as a directory (with the trailing slash). - """ - names = self._name_set() - dirname = name + '/' - dir_match = name not in names and dirname in names - return dirname if dir_match else name - - def getinfo(self, name): - """ - Supplement getinfo for implied dirs. - """ - try: - return super().getinfo(name) - except KeyError: - if not name.endswith('/') or name not in self._name_set(): - raise - return zipfile.ZipInfo(filename=name) - - @classmethod - def make(cls, source): - """ - Given a source (filename or zipfile), return an - appropriate CompleteDirs subclass. - """ - if isinstance(source, CompleteDirs): - return source - - if not isinstance(source, zipfile.ZipFile): - return cls(source) - - # Only allow for FastLookup when supplied zipfile is read-only - if 'r' not in source.mode: - cls = CompleteDirs - - source.__class__ = cls - return source - - @classmethod - def inject(cls, zf: zipfile.ZipFile) -> zipfile.ZipFile: - """ - Given a writable zip file zf, inject directory entries for - any directories implied by the presence of children. - """ - for name in cls._implied_dirs(zf.namelist()): - zf.writestr(name, b"") - return zf - - -class FastLookup(CompleteDirs): - """ - ZipFile subclass to ensure implicit - dirs exist and are resolved rapidly. - """ - - def namelist(self): - with contextlib.suppress(AttributeError): - return self.__names - self.__names = super().namelist() - return self.__names - - def _name_set(self): - with contextlib.suppress(AttributeError): - return self.__lookup - self.__lookup = super()._name_set() - return self.__lookup - -def _extract_text_encoding(encoding=None, *args, **kwargs): - # compute stack level so that the caller of the caller sees any warning. - is_pypy = sys.implementation.name == 'pypy' - # PyPy no longer special cased after 7.3.19 (or maybe 7.3.18) - # See jaraco/zipp#143 - is_old_pypi = is_pypy and sys.pypy_version_info < (7, 3, 19) - stack_level = 3 + is_old_pypi - return io.text_encoding(encoding, stack_level), args, kwargs - - -class Path: - """ - A :class:`importlib.resources.abc.Traversable` interface for zip files. - - Implements many of the features users enjoy from - :class:`pathlib.Path`. - - Consider a zip file with this structure:: - - . - ├── a.txt - └── b - ├── c.txt - └── d - └── e.txt - - >>> data = io.BytesIO() - >>> zf = ZipFile(data, 'w') - >>> zf.writestr('a.txt', 'content of a') - >>> zf.writestr('b/c.txt', 'content of c') - >>> zf.writestr('b/d/e.txt', 'content of e') - >>> zf.filename = 'mem/abcde.zip' - - Path accepts the zipfile object itself or a filename - - >>> path = Path(zf) - - From there, several path operations are available. - - Directory iteration (including the zip file itself): - - >>> a, b = path.iterdir() - >>> a - Path('mem/abcde.zip', 'a.txt') - >>> b - Path('mem/abcde.zip', 'b/') - - name property: - - >>> b.name - 'b' - - join with divide operator: - - >>> c = b / 'c.txt' - >>> c - Path('mem/abcde.zip', 'b/c.txt') - >>> c.name - 'c.txt' - - Read text: - - >>> c.read_text(encoding='utf-8') - 'content of c' - - existence: - - >>> c.exists() - True - >>> (b / 'missing.txt').exists() - False - - Coercion to string: - - >>> import os - >>> str(c).replace(os.sep, posixpath.sep) - 'mem/abcde.zip/b/c.txt' - - At the root, ``name``, ``filename``, and ``parent`` - resolve to the zipfile. - - >>> str(path) - 'mem/abcde.zip/' - >>> path.name - 'abcde.zip' - >>> path.filename == pathlib.Path('mem/abcde.zip') - True - >>> str(path.parent) - 'mem' - - If the zipfile has no filename, such attributes are not - valid and accessing them will raise an Exception. - - >>> zf.filename = None - >>> path.name - Traceback (most recent call last): - ... - TypeError: ... - - >>> path.filename - Traceback (most recent call last): - ... - TypeError: ... - - >>> path.parent - Traceback (most recent call last): - ... - TypeError: ... - - # workaround python/cpython#106763 - >>> pass - """ - - __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" - - def __init__(self, root, at=""): - """ - Construct a Path from a ZipFile or filename. - - Note: When the source is an existing ZipFile object, - its type (__class__) will be mutated to a - specialized type. If the caller wishes to retain the - original type, the caller should either create a - separate ZipFile object or pass a filename. - """ - self.root = FastLookup.make(root) - self.at = at - - def __eq__(self, other): - """ - >>> Path(zipfile.ZipFile(io.BytesIO(), 'w')) == 'foo' - False - """ - if self.__class__ is not other.__class__: - return NotImplemented - return (self.root, self.at) == (other.root, other.at) - - def __hash__(self): - return hash((self.root, self.at)) - - def open(self, mode='r', *args, pwd=None, **kwargs): - """ - Open this entry as text or binary following the semantics - of ``pathlib.Path.open()`` by passing arguments through - to io.TextIOWrapper(). - """ - if self.is_dir(): - raise IsADirectoryError(self) - zip_mode = mode[0] - if zip_mode == 'r' and not self.exists(): - raise FileNotFoundError(self) - stream = self.root.open(self.at, zip_mode, pwd=pwd) - if 'b' in mode: - if args or kwargs: - raise ValueError("encoding args invalid for binary operation") - return stream - # Text mode: - encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) - return io.TextIOWrapper(stream, encoding, *args, **kwargs) - - def _base(self): - return pathlib.PurePosixPath(self.at) if self.at else self.filename - - @property - def name(self): - return self._base().name - - @property - def suffix(self): - return self._base().suffix - - @property - def suffixes(self): - return self._base().suffixes - - @property - def stem(self): - return self._base().stem - - @property - def filename(self): - return pathlib.Path(self.root.filename).joinpath(self.at) - - def read_text(self, *args, **kwargs): - encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) - with self.open('r', encoding, *args, **kwargs) as strm: - return strm.read() - - def read_bytes(self): - with self.open('rb') as strm: - return strm.read() - - def _is_child(self, path): - return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") - - def _next(self, at): - return self.__class__(self.root, at) - - def is_dir(self): - return not self.at or self.at.endswith("/") - - def is_file(self): - return self.exists() and not self.is_dir() - - def exists(self): - return self.at in self.root._name_set() - - def iterdir(self): - if not self.is_dir(): - raise ValueError("Can't listdir a file") - subs = map(self._next, self.root.namelist()) - return filter(self._is_child, subs) - - def match(self, path_pattern): - return pathlib.PurePosixPath(self.at).match(path_pattern) - - def is_symlink(self): - """ - Return whether this path is a symlink. - """ - info = self.root.getinfo(self.at) - mode = info.external_attr >> 16 - return stat.S_ISLNK(mode) - - def glob(self, pattern): - if not pattern: - raise ValueError(f"Unacceptable pattern: {pattern!r}") - - prefix = re.escape(self.at) - tr = Translator(seps='/') - matches = re.compile(prefix + tr.translate(pattern)).fullmatch - return map(self._next, filter(matches, self.root.namelist())) - - def rglob(self, pattern): - return self.glob(f'**/{pattern}') - - def relative_to(self, other, *extra): - return posixpath.relpath(str(self), str(other.joinpath(*extra))) - - def __str__(self): - return posixpath.join(self.root.filename, self.at) - - def __repr__(self): - return self.__repr.format(self=self) - - def joinpath(self, *other): - next = posixpath.join(self.at, *other) - return self._next(self.root.resolve_dir(next)) - - __truediv__ = joinpath - - @property - def parent(self): - if not self.at: - return self.filename.parent - parent_at = posixpath.dirname(self.at.rstrip('/')) - if parent_at: - parent_at += '/' - return self._next(parent_at) diff --git a/Python313_13_x86_Template/Lib/zipfile/_path/__pycache__/__init__.cpython-313.pyc b/Python313_13_x86_Template/Lib/zipfile/_path/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index f32d8612..00000000 Binary files a/Python313_13_x86_Template/Lib/zipfile/_path/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/zipfile/_path/__pycache__/glob.cpython-313.pyc b/Python313_13_x86_Template/Lib/zipfile/_path/__pycache__/glob.cpython-313.pyc deleted file mode 100644 index b7a9dca7..00000000 Binary files a/Python313_13_x86_Template/Lib/zipfile/_path/__pycache__/glob.cpython-313.pyc and /dev/null differ diff --git a/Python313_13_x86_Template/Lib/zipfile/_path/glob.py b/Python313_13_x86_Template/Lib/zipfile/_path/glob.py deleted file mode 100644 index 4ed74cc4..00000000 --- a/Python313_13_x86_Template/Lib/zipfile/_path/glob.py +++ /dev/null @@ -1,113 +0,0 @@ -import os -import re - -_default_seps = os.sep + str(os.altsep) * bool(os.altsep) - - -class Translator: - """ - >>> Translator('xyz') - Traceback (most recent call last): - ... - AssertionError: Invalid separators - - >>> Translator('') - Traceback (most recent call last): - ... - AssertionError: Invalid separators - """ - - seps: str - - def __init__(self, seps: str = _default_seps): - assert seps and set(seps) <= set(_default_seps), "Invalid separators" - self.seps = seps - - def translate(self, pattern): - """ - Given a glob pattern, produce a regex that matches it. - """ - return self.extend(self.match_dirs(self.translate_core(pattern))) - - def extend(self, pattern): - r""" - Extend regex for pattern-wide concerns. - - Apply '(?s:)' to create a non-matching group that - matches newlines (valid on Unix). - - Append '\Z' to imply fullmatch even when match is used. - """ - return rf'(?s:{pattern})\Z' - - def match_dirs(self, pattern): - """ - Ensure that zipfile.Path directory names are matched. - - zipfile.Path directory names always end in a slash. - """ - return rf'{pattern}[/]?' - - def translate_core(self, pattern): - r""" - Given a glob pattern, produce a regex that matches it. - - >>> t = Translator() - >>> t.translate_core('*.txt').replace('\\\\', '') - '[^/]*\\.txt' - >>> t.translate_core('a?txt') - 'a[^/]txt' - >>> t.translate_core('**/*').replace('\\\\', '') - '.*/[^/][^/]*' - """ - self.restrict_rglob(pattern) - return ''.join(map(self.replace, separate(self.star_not_empty(pattern)))) - - def replace(self, match): - """ - Perform the replacements for a match from :func:`separate`. - """ - return match.group('set') or ( - re.escape(match.group(0)) - .replace('\\*\\*', r'.*') - .replace('\\*', rf'[^{re.escape(self.seps)}]*') - .replace('\\?', r'[^/]') - ) - - def restrict_rglob(self, pattern): - """ - Raise ValueError if ** appears in anything but a full path segment. - - >>> Translator().translate('**foo') - Traceback (most recent call last): - ... - ValueError: ** must appear alone in a path segment - """ - seps_pattern = rf'[{re.escape(self.seps)}]+' - segments = re.split(seps_pattern, pattern) - if any('**' in segment and segment != '**' for segment in segments): - raise ValueError("** must appear alone in a path segment") - - def star_not_empty(self, pattern): - """ - Ensure that * will not match an empty segment. - """ - - def handle_segment(match): - segment = match.group(0) - return '?*' if segment == '*' else segment - - not_seps_pattern = rf'[^{re.escape(self.seps)}]+' - return re.sub(not_seps_pattern, handle_segment, pattern) - - -def separate(pattern): - """ - Separate out character sets to avoid translating their contents. - - >>> [m.group(0) for m in separate('*.txt')] - ['*.txt'] - >>> [m.group(0) for m in separate('a[?]txt')] - ['a', '[?]', 'txt'] - """ - return re.finditer(r'([^\[]+)|(?P[\[].*?[\]])|([\[][^\]]*$)', pattern) diff --git a/Python313_13_x86_Template/Lib/zipimport.py b/Python313_13_x86_Template/Lib/zipimport.py deleted file mode 100644 index fb312be1..00000000 --- a/Python313_13_x86_Template/Lib/zipimport.py +++ /dev/null @@ -1,803 +0,0 @@ -"""zipimport provides support for importing Python modules from Zip archives. - -This module exports two objects: -- zipimporter: a class; its constructor takes a path to a Zip archive. -- ZipImportError: exception raised by zipimporter objects. It's a - subclass of ImportError, so it can be caught as ImportError, too. - -It is usually not needed to use the zipimport module explicitly; it is -used by the builtin import mechanism for sys.path items that are paths -to Zip archives. -""" - -#from importlib import _bootstrap_external -#from importlib import _bootstrap # for _verbose_message -import _frozen_importlib_external as _bootstrap_external -from _frozen_importlib_external import _unpack_uint16, _unpack_uint32, _unpack_uint64 -import _frozen_importlib as _bootstrap # for _verbose_message -import _imp # for check_hash_based_pycs -import _io # for open -import marshal # for loads -import sys # for modules -import time # for mktime -import _warnings # For warn() - -__all__ = ['ZipImportError', 'zipimporter'] - - -path_sep = _bootstrap_external.path_sep -alt_path_sep = _bootstrap_external.path_separators[1:] - - -class ZipImportError(ImportError): - pass - -# _read_directory() cache -_zip_directory_cache = {} - -_module_type = type(sys) - -END_CENTRAL_DIR_SIZE = 22 -END_CENTRAL_DIR_SIZE_64 = 56 -END_CENTRAL_DIR_LOCATOR_SIZE_64 = 20 -STRING_END_ARCHIVE = b'PK\x05\x06' # standard EOCD signature -STRING_END_LOCATOR_64 = b'PK\x06\x07' # Zip64 EOCD Locator signature -STRING_END_ZIP_64 = b'PK\x06\x06' # Zip64 EOCD signature -MAX_COMMENT_LEN = (1 << 16) - 1 -MAX_UINT32 = 0xffffffff -ZIP64_EXTRA_TAG = 0x1 - -class zipimporter(_bootstrap_external._LoaderBasics): - """zipimporter(archivepath) -> zipimporter object - - Create a new zipimporter instance. 'archivepath' must be a path to - a zipfile, or to a specific path inside a zipfile. For example, it can be - '/tmp/myimport.zip', or '/tmp/myimport.zip/mydirectory', if mydirectory is a - valid directory inside the archive. - - 'ZipImportError is raised if 'archivepath' doesn't point to a valid Zip - archive. - - The 'archive' attribute of zipimporter objects contains the name of the - zipfile targeted. - """ - - # Split the "subdirectory" from the Zip archive path, lookup a matching - # entry in sys.path_importer_cache, fetch the file directory from there - # if found, or else read it from the archive. - def __init__(self, path): - if not isinstance(path, str): - raise TypeError(f"expected str, not {type(path)!r}") - if not path: - raise ZipImportError('archive path is empty', path=path) - if alt_path_sep: - path = path.replace(alt_path_sep, path_sep) - - prefix = [] - while True: - try: - st = _bootstrap_external._path_stat(path) - except (OSError, ValueError): - # On Windows a ValueError is raised for too long paths. - # Back up one path element. - dirname, basename = _bootstrap_external._path_split(path) - if dirname == path: - raise ZipImportError('not a Zip file', path=path) - path = dirname - prefix.append(basename) - else: - # it exists - if (st.st_mode & 0o170000) != 0o100000: # stat.S_ISREG - # it's a not file - raise ZipImportError('not a Zip file', path=path) - break - - if path not in _zip_directory_cache: - _zip_directory_cache[path] = _read_directory(path) - self.archive = path - # a prefix directory following the ZIP file path. - self.prefix = _bootstrap_external._path_join(*prefix[::-1]) - if self.prefix: - self.prefix += path_sep - - - def find_spec(self, fullname, target=None): - """Create a ModuleSpec for the specified module. - - Returns None if the module cannot be found. - """ - module_info = _get_module_info(self, fullname) - if module_info is not None: - return _bootstrap.spec_from_loader(fullname, self, is_package=module_info) - else: - # Not a module or regular package. See if this is a directory, and - # therefore possibly a portion of a namespace package. - - # We're only interested in the last path component of fullname - # earlier components are recorded in self.prefix. - modpath = _get_module_path(self, fullname) - if _is_dir(self, modpath): - # This is possibly a portion of a namespace - # package. Return the string representing its path, - # without a trailing separator. - path = f'{self.archive}{path_sep}{modpath}' - spec = _bootstrap.ModuleSpec(name=fullname, loader=None, - is_package=True) - spec.submodule_search_locations.append(path) - return spec - else: - return None - - def get_code(self, fullname): - """get_code(fullname) -> code object. - - Return the code object for the specified module. Raise ZipImportError - if the module couldn't be imported. - """ - code, ispackage, modpath = _get_module_code(self, fullname) - return code - - - def get_data(self, pathname): - """get_data(pathname) -> string with file data. - - Return the data associated with 'pathname'. Raise OSError if - the file wasn't found. - """ - if alt_path_sep: - pathname = pathname.replace(alt_path_sep, path_sep) - - key = pathname - if pathname.startswith(self.archive + path_sep): - key = pathname[len(self.archive + path_sep):] - - try: - toc_entry = self._get_files()[key] - except KeyError: - raise OSError(0, '', key) - return _get_data(self.archive, toc_entry) - - - # Return a string matching __file__ for the named module - def get_filename(self, fullname): - """get_filename(fullname) -> filename string. - - Return the filename for the specified module or raise ZipImportError - if it couldn't be imported. - """ - # Deciding the filename requires working out where the code - # would come from if the module was actually loaded - code, ispackage, modpath = _get_module_code(self, fullname) - return modpath - - - def get_source(self, fullname): - """get_source(fullname) -> source string. - - Return the source code for the specified module. Raise ZipImportError - if the module couldn't be found, return None if the archive does - contain the module, but has no source for it. - """ - mi = _get_module_info(self, fullname) - if mi is None: - raise ZipImportError(f"can't find module {fullname!r}", name=fullname) - - path = _get_module_path(self, fullname) - if mi: - fullpath = _bootstrap_external._path_join(path, '__init__.py') - else: - fullpath = f'{path}.py' - - try: - toc_entry = self._get_files()[fullpath] - except KeyError: - # we have the module, but no source - return None - return _get_data(self.archive, toc_entry).decode() - - - # Return a bool signifying whether the module is a package or not. - def is_package(self, fullname): - """is_package(fullname) -> bool. - - Return True if the module specified by fullname is a package. - Raise ZipImportError if the module couldn't be found. - """ - mi = _get_module_info(self, fullname) - if mi is None: - raise ZipImportError(f"can't find module {fullname!r}", name=fullname) - return mi - - - # Load and return the module named by 'fullname'. - def load_module(self, fullname): - """load_module(fullname) -> module. - - Load the module specified by 'fullname'. 'fullname' must be the - fully qualified (dotted) module name. It returns the imported - module, or raises ZipImportError if it could not be imported. - - Deprecated since Python 3.10. Use exec_module() instead. - """ - msg = ("zipimport.zipimporter.load_module() is deprecated and slated for " - "removal in Python 3.12; use exec_module() instead") - _warnings.warn(msg, DeprecationWarning) - code, ispackage, modpath = _get_module_code(self, fullname) - mod = sys.modules.get(fullname) - if mod is None or not isinstance(mod, _module_type): - mod = _module_type(fullname) - sys.modules[fullname] = mod - mod.__loader__ = self - - try: - if ispackage: - # add __path__ to the module *before* the code gets - # executed - path = _get_module_path(self, fullname) - fullpath = _bootstrap_external._path_join(self.archive, path) - mod.__path__ = [fullpath] - - if not hasattr(mod, '__builtins__'): - mod.__builtins__ = __builtins__ - _bootstrap_external._fix_up_module(mod.__dict__, fullname, modpath) - exec(code, mod.__dict__) - except: - del sys.modules[fullname] - raise - - try: - mod = sys.modules[fullname] - except KeyError: - raise ImportError(f'Loaded module {fullname!r} not found in sys.modules') - _bootstrap._verbose_message('import {} # loaded from Zip {}', fullname, modpath) - return mod - - - def get_resource_reader(self, fullname): - """Return the ResourceReader for a module in a zip file.""" - from importlib.readers import ZipReader - - return ZipReader(self, fullname) - - - def _get_files(self): - """Return the files within the archive path.""" - try: - files = _zip_directory_cache[self.archive] - except KeyError: - try: - files = _zip_directory_cache[self.archive] = _read_directory(self.archive) - except ZipImportError: - files = {} - - return files - - - def invalidate_caches(self): - """Invalidates the cache of file data of the archive path.""" - _zip_directory_cache.pop(self.archive, None) - - - def __repr__(self): - return f'' - - -# _zip_searchorder defines how we search for a module in the Zip -# archive: we first search for a package __init__, then for -# non-package .pyc, and .py entries. The .pyc entries -# are swapped by initzipimport() if we run in optimized mode. Also, -# '/' is replaced by path_sep there. -_zip_searchorder = ( - (path_sep + '__init__.pyc', True, True), - (path_sep + '__init__.py', False, True), - ('.pyc', True, False), - ('.py', False, False), -) - -# Given a module name, return the potential file path in the -# archive (without extension). -def _get_module_path(self, fullname): - return self.prefix + fullname.rpartition('.')[2] - -# Does this path represent a directory? -def _is_dir(self, path): - # See if this is a "directory". If so, it's eligible to be part - # of a namespace package. We test by seeing if the name, with an - # appended path separator, exists. - dirpath = path + path_sep - # If dirpath is present in self._get_files(), we have a directory. - return dirpath in self._get_files() - -# Return some information about a module. -def _get_module_info(self, fullname): - path = _get_module_path(self, fullname) - for suffix, isbytecode, ispackage in _zip_searchorder: - fullpath = path + suffix - if fullpath in self._get_files(): - return ispackage - return None - - -# implementation - -# _read_directory(archive) -> files dict (new reference) -# -# Given a path to a Zip archive, build a dict, mapping file names -# (local to the archive, using SEP as a separator) to toc entries. -# -# A toc_entry is a tuple: -# -# (__file__, # value to use for __file__, available for all files, -# # encoded to the filesystem encoding -# compress, # compression kind; 0 for uncompressed -# data_size, # size of compressed data on disk -# file_size, # size of decompressed data -# file_offset, # offset of file header from start of archive -# time, # mod time of file (in dos format) -# date, # mod data of file (in dos format) -# crc, # crc checksum of the data -# ) -# -# Directories can be recognized by the trailing path_sep in the name, -# data_size and file_offset are 0. -def _read_directory(archive): - try: - fp = _io.open_code(archive) - except OSError: - raise ZipImportError(f"can't open Zip file: {archive!r}", path=archive) - - with fp: - # GH-87235: On macOS all file descriptors for /dev/fd/N share the same - # file offset, reset the file offset after scanning the zipfile directory - # to not cause problems when some runs 'python3 /dev/fd/9 9= 0 and pos64+END_CENTRAL_DIR_SIZE_64+END_CENTRAL_DIR_LOCATOR_SIZE_64==pos): - # Zip64 at "correct" offset from standard EOCD - buffer = data[pos64:pos64 + END_CENTRAL_DIR_SIZE_64] - if len(buffer) != END_CENTRAL_DIR_SIZE_64: - raise ZipImportError( - f"corrupt Zip64 file: Expected {END_CENTRAL_DIR_SIZE_64} byte " - f"zip64 central directory, but read {len(buffer)} bytes.", - path=archive) - header_position = file_size - len(data) + pos64 - - central_directory_size = _unpack_uint64(buffer[40:48]) - central_directory_position = _unpack_uint64(buffer[48:56]) - num_entries = _unpack_uint64(buffer[24:32]) - elif pos >= 0: - buffer = data[pos:pos+END_CENTRAL_DIR_SIZE] - if len(buffer) != END_CENTRAL_DIR_SIZE: - raise ZipImportError(f"corrupt Zip file: {archive!r}", - path=archive) - - header_position = file_size - len(data) + pos - - # Buffer now contains a valid EOCD, and header_position gives the - # starting position of it. - central_directory_size = _unpack_uint32(buffer[12:16]) - central_directory_position = _unpack_uint32(buffer[16:20]) - num_entries = _unpack_uint16(buffer[8:10]) - - # N.b. if someday you want to prefer the standard (non-zip64) EOCD, - # you need to adjust position by 76 for arc to be 0. - else: - raise ZipImportError(f'not a Zip file: {archive!r}', - path=archive) - - # Buffer now contains a valid EOCD, and header_position gives the - # starting position of it. - # XXX: These are cursory checks but are not as exact or strict as they - # could be. Checking the arc-adjusted value is probably good too. - if header_position < central_directory_size: - raise ZipImportError(f'bad central directory size: {archive!r}', path=archive) - if header_position < central_directory_position: - raise ZipImportError(f'bad central directory offset: {archive!r}', path=archive) - header_position -= central_directory_size - # On just-a-zipfile these values are the same and arc_offset is zero; if - # the file has some bytes prepended, `arc_offset` is the number of such - # bytes. This is used for pex as well as self-extracting .exe. - arc_offset = header_position - central_directory_position - if arc_offset < 0: - raise ZipImportError(f'bad central directory size or offset: {archive!r}', path=archive) - - files = {} - # Start of Central Directory - count = 0 - try: - fp.seek(header_position) - except OSError: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - while True: - buffer = fp.read(46) - if len(buffer) < 4: - raise EOFError('EOF read where not expected') - # Start of file header - if buffer[:4] != b'PK\x01\x02': - if count != num_entries: - raise ZipImportError( - f"mismatched num_entries: {count} should be {num_entries} in {archive!r}", - path=archive, - ) - break # Bad: Central Dir File Header - if len(buffer) != 46: - raise EOFError('EOF read where not expected') - flags = _unpack_uint16(buffer[8:10]) - compress = _unpack_uint16(buffer[10:12]) - time = _unpack_uint16(buffer[12:14]) - date = _unpack_uint16(buffer[14:16]) - crc = _unpack_uint32(buffer[16:20]) - data_size = _unpack_uint32(buffer[20:24]) - file_size = _unpack_uint32(buffer[24:28]) - name_size = _unpack_uint16(buffer[28:30]) - extra_size = _unpack_uint16(buffer[30:32]) - comment_size = _unpack_uint16(buffer[32:34]) - file_offset = _unpack_uint32(buffer[42:46]) - header_size = name_size + extra_size + comment_size - - try: - name = fp.read(name_size) - except OSError: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - if len(name) != name_size: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - # On Windows, calling fseek to skip over the fields we don't use is - # slower than reading the data because fseek flushes stdio's - # internal buffers. See issue #8745. - try: - extra_data_len = header_size - name_size - extra_data = memoryview(fp.read(extra_data_len)) - - if len(extra_data) != extra_data_len: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - except OSError: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - - if flags & 0x800: - # UTF-8 file names extension - name = name.decode() - else: - # Historical ZIP filename encoding - try: - name = name.decode('ascii') - except UnicodeDecodeError: - name = name.decode('latin1').translate(cp437_table) - - name = name.replace('/', path_sep) - path = _bootstrap_external._path_join(archive, name) - - # Ordering matches unpacking below. - if ( - file_size == MAX_UINT32 or - data_size == MAX_UINT32 or - file_offset == MAX_UINT32 - ): - # need to decode extra_data looking for a zip64 extra (which might not - # be present) - while extra_data: - if len(extra_data) < 4: - raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) - tag = _unpack_uint16(extra_data[:2]) - size = _unpack_uint16(extra_data[2:4]) - if len(extra_data) < 4 + size: - raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) - if tag == ZIP64_EXTRA_TAG: - if (len(extra_data) - 4) % 8 != 0: - raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) - num_extra_values = (len(extra_data) - 4) // 8 - if num_extra_values > 3: - raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) - import struct - values = list(struct.unpack_from(f"<{min(num_extra_values, 3)}Q", - extra_data, offset=4)) - - # N.b. Here be dragons: the ordering of these is different than - # the header fields, and it's really easy to get it wrong since - # naturally-occuring zips that use all 3 are >4GB - if file_size == MAX_UINT32: - file_size = values.pop(0) - if data_size == MAX_UINT32: - data_size = values.pop(0) - if file_offset == MAX_UINT32: - file_offset = values.pop(0) - - break - - # For a typical zip, this bytes-slicing only happens 2-3 times, on - # small data like timestamps and filesizes. - extra_data = extra_data[4+size:] - else: - _bootstrap._verbose_message( - "zipimport: suspected zip64 but no zip64 extra for {!r}", - path, - ) - # XXX These two statements seem swapped because `central_directory_position` - # is a position within the actual file, but `file_offset` (when compared) is - # as encoded in the entry, not adjusted for this file. - # N.b. this must be after we've potentially read the zip64 extra which can - # change `file_offset`. - if file_offset > central_directory_position: - raise ZipImportError(f'bad local header offset: {archive!r}', path=archive) - file_offset += arc_offset - - t = (path, compress, data_size, file_size, file_offset, time, date, crc) - files[name] = t - count += 1 - finally: - fp.seek(start_offset) - _bootstrap._verbose_message('zipimport: found {} names in {!r}', count, archive) - return files - -# During bootstrap, we may need to load the encodings -# package from a ZIP file. But the cp437 encoding is implemented -# in Python in the encodings package. -# -# Break out of this dependency by using the translation table for -# the cp437 encoding. -cp437_table = ( - # ASCII part, 8 rows x 16 chars - '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f' - '\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f' - ' !"#$%&\'()*+,-./' - '0123456789:;<=>?' - '@ABCDEFGHIJKLMNO' - 'PQRSTUVWXYZ[\\]^_' - '`abcdefghijklmno' - 'pqrstuvwxyz{|}~\x7f' - # non-ASCII part, 16 rows x 8 chars - '\xc7\xfc\xe9\xe2\xe4\xe0\xe5\xe7' - '\xea\xeb\xe8\xef\xee\xec\xc4\xc5' - '\xc9\xe6\xc6\xf4\xf6\xf2\xfb\xf9' - '\xff\xd6\xdc\xa2\xa3\xa5\u20a7\u0192' - '\xe1\xed\xf3\xfa\xf1\xd1\xaa\xba' - '\xbf\u2310\xac\xbd\xbc\xa1\xab\xbb' - '\u2591\u2592\u2593\u2502\u2524\u2561\u2562\u2556' - '\u2555\u2563\u2551\u2557\u255d\u255c\u255b\u2510' - '\u2514\u2534\u252c\u251c\u2500\u253c\u255e\u255f' - '\u255a\u2554\u2569\u2566\u2560\u2550\u256c\u2567' - '\u2568\u2564\u2565\u2559\u2558\u2552\u2553\u256b' - '\u256a\u2518\u250c\u2588\u2584\u258c\u2590\u2580' - '\u03b1\xdf\u0393\u03c0\u03a3\u03c3\xb5\u03c4' - '\u03a6\u0398\u03a9\u03b4\u221e\u03c6\u03b5\u2229' - '\u2261\xb1\u2265\u2264\u2320\u2321\xf7\u2248' - '\xb0\u2219\xb7\u221a\u207f\xb2\u25a0\xa0' -) - -_importing_zlib = False - -# Return the zlib.decompress function object, or NULL if zlib couldn't -# be imported. The function is cached when found, so subsequent calls -# don't import zlib again. -def _get_decompress_func(): - global _importing_zlib - if _importing_zlib: - # Someone has a zlib.py[co] in their Zip file - # let's avoid a stack overflow. - _bootstrap._verbose_message('zipimport: zlib UNAVAILABLE') - raise ZipImportError("can't decompress data; zlib not available") - - _importing_zlib = True - try: - from zlib import decompress - except Exception: - _bootstrap._verbose_message('zipimport: zlib UNAVAILABLE') - raise ZipImportError("can't decompress data; zlib not available") - finally: - _importing_zlib = False - - _bootstrap._verbose_message('zipimport: zlib available') - return decompress - -# Given a path to a Zip file and a toc_entry, return the (uncompressed) data. -def _get_data(archive, toc_entry): - datapath, compress, data_size, file_size, file_offset, time, date, crc = toc_entry - if data_size < 0: - raise ZipImportError('negative data size') - - with _io.open_code(archive) as fp: - # Check to make sure the local file header is correct - try: - fp.seek(file_offset) - except OSError: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - buffer = fp.read(30) - if len(buffer) != 30: - raise EOFError('EOF read where not expected') - - if buffer[:4] != b'PK\x03\x04': - # Bad: Local File Header - raise ZipImportError(f'bad local file header: {archive!r}', path=archive) - - name_size = _unpack_uint16(buffer[26:28]) - extra_size = _unpack_uint16(buffer[28:30]) - header_size = 30 + name_size + extra_size - file_offset += header_size # Start of file data - try: - fp.seek(file_offset) - except OSError: - raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) - raw_data = fp.read(data_size) - if len(raw_data) != data_size: - raise OSError("zipimport: can't read data") - - if compress == 0: - # data is not compressed - return raw_data - - # Decompress with zlib - try: - decompress = _get_decompress_func() - except Exception: - raise ZipImportError("can't decompress data; zlib not available") - return decompress(raw_data, -15) - - -# Lenient date/time comparison function. The precision of the mtime -# in the archive is lower than the mtime stored in a .pyc: we -# must allow a difference of at most one second. -def _eq_mtime(t1, t2): - # dostime only stores even seconds, so be lenient - return abs(t1 - t2) <= 1 - - -# Given the contents of a .py[co] file, unmarshal the data -# and return the code object. Raises ImportError it the magic word doesn't -# match, or if the recorded .py[co] metadata does not match the source. -def _unmarshal_code(self, pathname, fullpath, fullname, data): - exc_details = { - 'name': fullname, - 'path': fullpath, - } - - flags = _bootstrap_external._classify_pyc(data, fullname, exc_details) - - hash_based = flags & 0b1 != 0 - if hash_based: - check_source = flags & 0b10 != 0 - if (_imp.check_hash_based_pycs != 'never' and - (check_source or _imp.check_hash_based_pycs == 'always')): - source_bytes = _get_pyc_source(self, fullpath) - if source_bytes is not None: - source_hash = _imp.source_hash( - _bootstrap_external._RAW_MAGIC_NUMBER, - source_bytes, - ) - - _bootstrap_external._validate_hash_pyc( - data, source_hash, fullname, exc_details) - else: - source_mtime, source_size = \ - _get_mtime_and_size_of_source(self, fullpath) - - if source_mtime: - # We don't use _bootstrap_external._validate_timestamp_pyc - # to allow for a more lenient timestamp check. - if (not _eq_mtime(_unpack_uint32(data[8:12]), source_mtime) or - _unpack_uint32(data[12:16]) != source_size): - _bootstrap._verbose_message( - f'bytecode is stale for {fullname!r}') - return None - - code = marshal.loads(data[16:]) - if not isinstance(code, _code_type): - raise TypeError(f'compiled module {pathname!r} is not a code object') - return code - -_code_type = type(_unmarshal_code.__code__) - - -# Replace any occurrences of '\r\n?' in the input string with '\n'. -# This converts DOS and Mac line endings to Unix line endings. -def _normalize_line_endings(source): - source = source.replace(b'\r\n', b'\n') - source = source.replace(b'\r', b'\n') - return source - -# Given a string buffer containing Python source code, compile it -# and return a code object. -def _compile_source(pathname, source): - source = _normalize_line_endings(source) - return compile(source, pathname, 'exec', dont_inherit=True) - -# Convert the date/time values found in the Zip archive to a value -# that's compatible with the time stamp stored in .pyc files. -def _parse_dostime(d, t): - return time.mktime(( - (d >> 9) + 1980, # bits 9..15: year - (d >> 5) & 0xF, # bits 5..8: month - d & 0x1F, # bits 0..4: day - t >> 11, # bits 11..15: hours - (t >> 5) & 0x3F, # bits 8..10: minutes - (t & 0x1F) * 2, # bits 0..7: seconds / 2 - -1, -1, -1)) - -# Given a path to a .pyc file in the archive, return the -# modification time of the matching .py file and its size, -# or (0, 0) if no source is available. -def _get_mtime_and_size_of_source(self, path): - try: - # strip 'c' or 'o' from *.py[co] - assert path[-1:] in ('c', 'o') - path = path[:-1] - toc_entry = self._get_files()[path] - # fetch the time stamp of the .py file for comparison - # with an embedded pyc time stamp - time = toc_entry[5] - date = toc_entry[6] - uncompressed_size = toc_entry[3] - return _parse_dostime(date, time), uncompressed_size - except (KeyError, IndexError, TypeError): - return 0, 0 - - -# Given a path to a .pyc file in the archive, return the -# contents of the matching .py file, or None if no source -# is available. -def _get_pyc_source(self, path): - # strip 'c' or 'o' from *.py[co] - assert path[-1:] in ('c', 'o') - path = path[:-1] - - try: - toc_entry = self._get_files()[path] - except KeyError: - return None - else: - return _get_data(self.archive, toc_entry) - - -# Get the code object associated with the module specified by -# 'fullname'. -def _get_module_code(self, fullname): - path = _get_module_path(self, fullname) - import_error = None - for suffix, isbytecode, ispackage in _zip_searchorder: - fullpath = path + suffix - _bootstrap._verbose_message('trying {}{}{}', self.archive, path_sep, fullpath, verbosity=2) - try: - toc_entry = self._get_files()[fullpath] - except KeyError: - pass - else: - modpath = toc_entry[0] - data = _get_data(self.archive, toc_entry) - code = None - if isbytecode: - try: - code = _unmarshal_code(self, modpath, fullpath, fullname, data) - except ImportError as exc: - import_error = exc - else: - code = _compile_source(modpath, data) - if code is None: - # bad magic number or non-matching mtime - # in byte code, try next - continue - modpath = toc_entry[0] - return code, ispackage, modpath - else: - if import_error: - msg = f"module load failed: {import_error}" - raise ZipImportError(msg, name=fullname) from import_error - else: - raise ZipImportError(f"can't find module {fullname!r}", name=fullname) diff --git a/Python313_13_x86_Template/NEWS.txt b/Python313_13_x86_Template/NEWS.txt deleted file mode 100644 index 3c37b7d3..00000000 --- a/Python313_13_x86_Template/NEWS.txt +++ /dev/null @@ -1,51043 +0,0 @@ -+++++++++++ -Python News -+++++++++++ - -What's New in Python 3.13.13 final? -=================================== - -*Release date: 2026-04-07* - -macOS ------ - -- gh-144551: Update macOS installer to use OpenSSL 3.0.19. - -- gh-137586: Invoke :program:`osascript` with absolute path in - :mod:`webbrowser` and :mod:`!turtledemo`. - -Windows -------- - -- gh-144551: Updated bundled version of OpenSSL to 3.0.19. - -- gh-140131: Fix REPL cursor position on Windows when module completion - suggestion line hits console width. - -Tests ------ - -- gh-144418: The Android testbed's emulator RAM has been increased from 2 GB - to 4 GB. - -- gh-146202: Fix a race condition in regrtest: make sure that the temporary - directory is created in the worker process. Previously, temp_cwd() could - fail on Windows if the "build" directory was not created. Patch by Victor - Stinner. - -- gh-144739: When Python was compiled with system expat older then 2.7.2 but - tests run with newer expat, still skip - :class:`!test.test_pyexpat.MemoryProtectionTest`. - -Security --------- - -- gh-145986: :mod:`xml.parsers.expat`: Fixed a crash caused by unbounded C - recursion when converting deeply nested XML content models with - :meth:`~xml.parsers.expat.xmlparser.ElementDeclHandler`. This addresses - :cve:`2026-4224`. - -- gh-145599: Reject control characters in :class:`http.cookies.Morsel` - :meth:`~http.cookies.Morsel.update` and - :meth:`~http.cookies.BaseCookie.js_output`. This addresses - :cve:`2026-3644`. - -- gh-145506: Fixes :cve:`2026-2297` by ensuring that - ``SourcelessFileLoader`` uses :func:`io.open_code` when opening ``.pyc`` - files. - -- gh-144370: Disallow usage of control characters in status in - :mod:`wsgiref.handlers` to prevent HTTP header injections. Patch by - Benedikt Johannes. - -- gh-143930: Reject leading dashes in URLs passed to - :func:`webbrowser.open`. - -Library -------- - -- gh-144503: Fix a regression introduced in 3.14.3 and 3.13.12 where the - :mod:`multiprocessing` ``forkserver`` start method would fail with - :exc:`BrokenPipeError` when the parent process had a very large - :data:`sys.argv`. The argv is now passed to the forkserver as separate - command-line arguments rather than being embedded in the ``-c`` command - string, avoiding the operating system's per-argument length limit. - -- gh-146613: :mod:`itertools`: Fix a crash in :func:`itertools.groupby` when - the grouper iterator is concurrently mutated. - -- gh-146080: :mod:`ssl`: fix a crash when an SNI callback tries to use an - SSL object that has already been garbage-collected. Patch by Bénédikt - Tran. - -- gh-146090: :mod:`sqlite3`: fix a crash when - :meth:`sqlite3.Connection.create_collation` fails with `SQLITE_BUSY - `__. Patch by Bénédikt Tran. - -- gh-146090: :mod:`sqlite3`: properly raise :exc:`MemoryError` instead of - :exc:`SystemError` when a context callback fails to be allocated. Patch by - Bénédikt Tran. - -- gh-145633: Fix ``struct.pack('f', float)``: use :c:func:`PyFloat_Pack4` to - raise :exc:`OverflowError`. Patch by Sergey B Kirpichev and Victor - Stinner. - -- gh-146310: The :mod:`ensurepip` module no longer looks for ``pip-*.whl`` - wheel packages in the current directory. - -- gh-146083: Update bundled `libexpat `_ to - version 2.7.5. - -- gh-146076: :mod:`zoneinfo`: fix crashes when deleting ``_weak_cache`` from - a :class:`zoneinfo.ZoneInfo` subclass. - -- gh-146054: Limit the size of :func:`encodings.search_function` cache. - Found by OSS Fuzz in :oss-fuzz:`493449985`. - -- gh-145883: :mod:`zoneinfo`: Fix heap buffer overflow reads from malformed - TZif data. Found by OSS Fuzz, issues :oss-fuzz:`492245058` and - :oss-fuzz:`492230068`. - -- gh-145750: Avoid undefined behaviour from signed integer overflow when - parsing format strings in the :mod:`struct` module. Found by OSS Fuzz in - :oss-fuzz:`488466741`. - -- gh-145492: Fix infinite recursion in :class:`collections.defaultdict` - ``__repr__`` when a ``defaultdict`` contains itself. Based on analysis by - KowalskiThomas in :gh:`145492`. - -- gh-145623: Fix crash in :mod:`struct` when calling :func:`repr` or - ``__sizeof__()`` on an uninitialized :class:`struct.Struct` object created - via ``Struct.__new__()`` without calling ``__init__()``. - -- gh-145616: Detect Android sysconfig ABI correctly on 32-bit ARM Android on - 64-bit ARM kernel - -- gh-145376: Fix null pointer dereference in unusual error scenario in - :mod:`hashlib`. - -- gh-145551: Fix InvalidStateError when cancelling process created by - :func:`asyncio.create_subprocess_exec` or - :func:`asyncio.create_subprocess_shell`. Patch by Daan De Meyer. - -- gh-145417: :mod:`venv`: Prevent incorrect preservation of SELinux context - when copying the ``Activate.ps1`` script. The script inherited the SELinux - security context of the system template directory, rather than the - destination project directory. - -- gh-145301: :mod:`hashlib`: fix a crash when the initialization of the - underlying C extension module fails. - -- gh-145264: Base64 decoder (see :func:`binascii.a2b_base64`, - :func:`base64.b64decode`, etc) no longer ignores excess data after the - first padded quad in non-strict (default) mode. Instead, in conformance - with :rfc:`4648`, section 3.3, it now ignores the pad character, "=", if - it is present before the end of the encoded data. - -- gh-145158: Avoid undefined behaviour from signed integer overflow when - parsing format strings in the :mod:`struct` module. - -- gh-144984: Fix crash in - :meth:`xml.parsers.expat.xmlparser.ExternalEntityParserCreate` when an - allocation fails. The error paths could dereference NULL ``handlers`` and - double-decrement the parent parser's reference count. - -- gh-88091: Fix :func:`unicodedata.decomposition` for Hangul characters. - -- gh-144835: Added missing explanations for some parameters in - :func:`glob.glob` and :func:`glob.iglob`. - -- gh-144833: Fixed a use-after-free in :mod:`ssl` when ``SSL_new()`` returns - NULL in ``newPySSLSocket()``. The error was reported via a dangling - pointer after the object had already been freed. - -- gh-144259: Fix inconsistent display of long multiline pasted content in - the REPL. - -- gh-144156: Fix the folding of headers by the :mod:`email` library when - :rfc:`2047` encoded words are used. Now whitespace is correctly preserved - and also correctly added between adjacent encoded words. The latter - property was broken by the fix for gh-92081, which mostly fixed previous - failures to preserve whitespace. - -- gh-66305: Fixed a hang on Windows in the :mod:`tempfile` module when - trying to create a temporary file or subdirectory in a non-writable - directory. - -- gh-140814: :func:`multiprocessing.freeze_support` no longer sets the - default start method as a side effect, which previously caused a - subsequent :func:`multiprocessing.set_start_method` call to raise - :exc:`RuntimeError`. - -- gh-144475: Calling :func:`repr` on :func:`functools.partial` is now safer - when the partial object's internal attributes are replaced while the - string representation is being generated. - -- gh-144538: Bump the version of pip bundled in ensurepip to version 26.0.1 - -- gh-144363: Update bundled `libexpat `_ to - 2.7.4 - -- gh-143637: Fixed a crash in socket.sendmsg() that could occur if ancillary - data is mutated re-entrantly during argument parsing. - -- gh-143880: Fix data race in :func:`functools.partial` in the :term:`free - threading` build. - -- gh-143543: Fix a crash in itertools.groupby that could occur when a - user-defined :meth:`~object.__eq__` method re-enters the iterator during - key comparison. - -- gh-140652: Fix a crash in :func:`!_interpchannels.list_all` after closing - a channel. - -- gh-143698: Allow *scheduler* and *setpgroup* arguments to be explicitly - :const:`None` when calling :func:`os.posix_spawn` or - :func:`os.posix_spawnp`. Patch by Bénédikt Tran. - -- gh-143698: Raise :exc:`TypeError` instead of :exc:`SystemError` when the - *scheduler* in :func:`os.posix_spawn` or :func:`os.posix_spawnp` is not a - tuple. Patch by Bénédikt Tran. - -- gh-143304: Fix :class:`ctypes.CDLL` to honor the ``handle`` parameter on - POSIX systems. - -- gh-142781: :mod:`zoneinfo`: fix a crash when instantiating - :class:`~zoneinfo.ZoneInfo` objects for which the internal class-level - cache is inconsistent. - -- gh-142763: Fix a race condition between :class:`zoneinfo.ZoneInfo` - creation and :func:`zoneinfo.ZoneInfo.clear_cache` that could raise - :exc:`KeyError`. - -- gh-142787: Fix assertion failure in :mod:`sqlite3` blob subscript when - slicing with indices that result in an empty slice. - -- gh-142352: Fix :meth:`asyncio.StreamWriter.start_tls` to transfer buffered - data from :class:`~asyncio.StreamReader` to the SSL layer, preventing data - loss when upgrading a connection to TLS mid-stream (e.g., when - implementing PROXY protocol support). - -- gh-141707: Don't change :class:`tarfile.TarInfo` type from ``AREGTYPE`` to - ``DIRTYPE`` when parsing GNU long name or link headers. - -- gh-139933: Improve :exc:`AttributeError` suggestions for classes with a - custom :meth:`~object.__dir__` method returning a list of unsortable - values. Patch by Bénédikt Tran. - -- gh-138891: Fix ``SyntaxError`` when ``inspect.get_annotations(f, - eval_str=True)`` is called on a function annotated with a :pep:`646` - ``star_expression`` - -- gh-137335: Get rid of any possibility of a name conflict for named pipes - in :mod:`multiprocessing` and :mod:`asyncio` on Windows, no matter how - small. - -- gh-80667: Support lookup for Tangut Ideographs in :mod:`unicodedata`. - -- bpo-40243: Fix :meth:`!unicodedata.ucd_3_2_0.numeric` for non-decimal - values. - -Documentation -------------- - -- gh-126676: Expand :mod:`argparse` documentation for ``type=bool`` with a - demonstration of the surprising behavior and pointers to common - alternatives. - -- gh-145450: Document missing public :class:`wave.Wave_write` getter - methods. - -Core and Builtins ------------------ - -- gh-148157: Fix an unlikely crash when parsing an invalid type comments for - function parameters. Found by OSS Fuzz in :oss-fuzz:`492782951`. - -- gh-146615: Fix a crash in :meth:`~object.__get__` for - :c:expr:`METH_METHOD` descriptors when an invalid (non-type) object is - passed as the second argument. Patch by Steven Sun. - -- gh-146128: Fix a bug which could cause constant values to be partially - corrupted in AArch64 JIT code. This issue is theoretical, and hasn't - actually been observed in unmodified Python interpreters. - -- gh-146250: Fixed a memory leak in :exc:`SyntaxError` when re-initializing - it. - -- gh-146245: Fixed reference leaks in :mod:`socket` when audit hooks raise - exceptions in :func:`socket.getaddrinfo` and :meth:`!socket.sendto`. - -- gh-146227: Fix wrong type in ``_Py_atomic_load_uint16`` in the C11 atomics - backend (``pyatomic_std.h``), which used a 32-bit atomic load instead of - 16-bit. Found by Mohammed Zuhaib. - -- gh-146056: Fix :func:`repr` for lists containing ``NULL``\ s. - -- gh-145990: ``python --help-env`` sections are now sorted by environment - variable name. - -- gh-145376: Fix GC tracking in ``structseq.__replace__()``. - -- gh-142183: Avoid a pathological case where repeated calls at a specific - stack depth could be significantly slower. - -- gh-145783: Fix an unlikely crash in the parser when certain errors were - erroneously not propagated. Found by OSS Fuzz in :oss-fuzz:`491369109`. - -- gh-145701: Fix :exc:`SystemError` when ``__classdict__`` or - ``__conditional_annotations__`` is in a class-scope inlined comprehension. - Found by OSS Fuzz in :oss-fuzz:`491105000`. - -- gh-145335: Fix a crash in :func:`os.pathconf` when called with ``-1`` as - the path argument. - -- gh-145234: Fixed a ``SystemError`` in the parser when an encoding cookie - (for example, UTF-7) decodes to carriage returns (``\r``). Newlines are - now normalized after decoding in the string tokenizer. - - Patch by Pablo Galindo. - -- gh-130555: Fix use-after-free in :meth:`dict.clear` when the dictionary - values are embedded in an object and a destructor causes re-entrant - mutation of the dictionary. - -- gh-145008: Fix a bug when calling certain methods at the recursion limit - which manifested as a corruption of Python's operand stack. Patch by Ken - Jin. - -- gh-144872: Fix heap buffer overflow in the parser found by OSS-Fuzz. - -- gh-144766: Fix a crash in fork child process when perf support is enabled. - -- gh-144759: Fix undefined behavior in the lexer when ``start`` and - ``multi_line_start`` pointers are ``NULL`` in - ``_PyLexer_remember_fstring_buffers()`` and - ``_PyLexer_restore_fstring_buffers()``. The ``NULL`` pointer arithmetic - (``NULL - valid_pointer``) is now guarded with explicit ``NULL`` checks. - -- gh-144601: Fix crash when importing a module whose ``PyInit`` function - raises an exception from a subinterpreter. - -- gh-143636: Fix a crash when calling :class:`SimpleNamespace.__replace__() - ` on non-namespace instances. Patch by Bénédikt - Tran. - -- gh-143650: Fix race condition in :mod:`importlib` where a thread could - receive a stale module reference when another thread's import fails. - -- gh-140594: Fix an out of bounds read when a single NUL character is read - from the standard input. Patch by Shamil Abdulaev. - -- gh-91636: While performing garbage collection, clear weakrefs to - unreachable objects that are created during running of finalizers. If - those weakrefs were are not cleared, they could reveal unreachable - objects. - -- gh-130327: Fix erroneous clearing of an object's :attr:`~object.__dict__` - if overwritten at runtime. - -- gh-80667: Literals using the ``\N{name}`` escape syntax can now construct - CJK ideographs and Hangul syllables using case-insensitive names. - -Build ------ - -- gh-146541: The Android testbed can now be built for 32-bit ARM and x86 - targets. - -- gh-146450: The Android build script was modified to improve parity with - other platform build scripts. - -- gh-145801: When Python build is optimized with GCC using PGO, use - ``-fprofile-update=atomic`` option to use atomic operations when updating - profile information. This option reduces the risk of gcov Data Files - (.gcda) corruption which can cause random GCC crashes. Patch by Victor - Stinner. - -- gh-129259: Fix AIX build failures caused by incorrect struct alignment in - ``_Py_CODEUNIT`` and ``_Py_BackoffCounter`` by adding AIX-specific - ``#pragma pack`` directives. - - -What's New in Python 3.13.12 final? -=================================== - -*Release date: 2026-02-03* - -Windows -------- - -- gh-128067: Fix a bug in PyREPL on Windows where output without a trailing - newline was overwritten by the next prompt. - -Tools/Demos ------------ - -- gh-142095: Make gdb 'py-bt' command use frame from thread local state when - available. Patch by Sam Gross and Victor Stinner. - -Tests ------ - -- gh-144415: The Android testbed now distinguishes between stdout/stderr - messages which were triggered by a newline, and those triggered by a - manual call to ``flush``. This fixes logging of progress indicators and - similar content. - -- gh-65784: Add support for parametrized resource ``wantobjects`` in - regrtests, which allows to run Tkinter tests with the specified value of - :data:`!tkinter.wantobjects`, for example ``-u wantobjects=0``. - -- gh-143553: Add support for parametrized resources, such as ``-u - xpickle=2.7``. - -- gh-142836: Accommodated Solaris in - ``test_pdb.test_script_target_anonymous_pipe``. - -- gh-129401: Fix a flaky test in ``test_repr_rlock`` that checks the - representation of :class:`multiprocessing.RLock`. - -- bpo-31391: Forward-port test_xpickle from Python 2 to Python 3 and add the - resource back to test's command line. - -Security --------- - -- gh-144125: :mod:`~email.generator.BytesGenerator` will now refuse to - serialize (write) headers that are unsafely folded or delimited; see - :attr:`~email.policy.Policy.verify_generated_headers`. (Contributed by Bas - Bloemsaat and Petr Viktorin in :gh:`121650`). - -- gh-143935: Fixed a bug in the folding of comments when flattening an email - message using a modern email policy. Comments consisting of a very long - sequence of non-foldable characters could trigger a forced line wrap that - omitted the required leading space on the continuation line, causing the - remainder of the comment to be interpreted as a new header field. This - enabled header injection with carefully crafted inputs. - -- gh-143925: Reject control characters in ``data:`` URL media types. - -- gh-143919: Reject control characters in :class:`http.cookies.Morsel` - fields and values. - -- gh-143916: Reject C0 control characters within wsgiref.headers.Headers - fields, values, and parameters. - -Library -------- - -- gh-144380: Improve performance of :class:`io.BufferedReader` line - iteration by ~49%. - -- gh-144169: Fix three crashes when non-string keyword arguments are - supplied to objects in the :mod:`ast` module. - -- gh-144100: Fixed a crash in ctypes when using a deprecated - ``POINTER(str)`` type in ``argtypes``. Instead of aborting, ctypes now - raises a proper Python exception when the pointer target type is - unresolved. - -- gh-144050: Fix :func:`stat.filemode` in the pure-Python implementation to - avoid misclassifying invalid mode values as block devices. - -- gh-144023: Fixed validation of file descriptor 0 in posix functions when - used with follow_symlinks parameter. - -- gh-143999: Fix an issue where :func:`inspect.getgeneratorstate` and - :func:`inspect.getcoroutinestate` could fail for generators wrapped by - :func:`types.coroutine` in the suspended state. - -- gh-143706: Fix :mod:`multiprocessing` forkserver so that :data:`sys.argv` - is correctly set before ``__main__`` is preloaded. Previously, - :data:`sys.argv` was empty during main module import in forkserver child - processes. This fixes a regression introduced in 3.13.8 and 3.14.1. Root - caused by Aaron Wieczorek, test provided by Thomas Watson, thanks! - -- gh-143638: Forbid reentrant calls of the :class:`pickle.Pickler` and - :class:`pickle.Unpickler` methods for the C implementation. Previously, - this could cause crash or data corruption, now concurrent calls of methods - of the same object raise :exc:`RuntimeError`. - -- gh-78724: Raise :exc:`RuntimeError`'s when user attempts to call methods - on half-initialized :class:`~struct.Struct` objects, For example, created - by ``Struct.__new__(Struct)``. Patch by Sergey B Kirpichev. - -- gh-143602: Fix a inconsistency issue in :meth:`~io.RawIOBase.write` that - leads to unexpected buffer overwrite by deduplicating the buffer exports. - -- gh-143547: Fix :func:`sys.unraisablehook` when the hook raises an - exception and changes :func:`sys.unraisablehook`: hold a strong reference - to the old hook. Patch by Victor Stinner. - -- gh-143378: Fix use-after-free crashes when a :class:`~io.BytesIO` object - is concurrently mutated during :meth:`~io.RawIOBase.write` or - :meth:`~io.IOBase.writelines`. - -- gh-143346: Fix incorrect wrapping of the Base64 data in - :class:`!plistlib._PlistWriter` when the indent contains a mix of tabs and - spaces. - -- gh-143310: :mod:`tkinter`: fix a crash when a Python :class:`list` is - mutated during the conversion to a Tcl object (e.g., when setting a Tcl - variable). Patch by Bénédikt Tran. - -- gh-143309: Fix a crash in :func:`os.execve` on non-Windows platforms when - given a custom environment mapping which is then mutated during parsing. - Patch by Bénédikt Tran. - -- gh-143308: :mod:`pickle`: fix use-after-free crashes when a - :class:`~pickle.PickleBuffer` is concurrently mutated by a custom buffer - callback during pickling. Patch by Bénédikt Tran and Aaron Wieczorek. - -- gh-143237: Fix support of named pipes in the rotating :mod:`logging` - handlers. - -- gh-143249: Fix possible buffer leaks in Windows overlapped I/O on error - handling. - -- gh-143241: :mod:`zoneinfo`: fix infinite loop in :meth:`ZoneInfo.from_file - ` when parsing a malformed TZif file. Patch - by Fatih Celik. - -- gh-142830: :mod:`sqlite3`: fix use-after-free crashes when the - connection's callbacks are mutated during a callback execution. Patch by - Bénédikt Tran. - -- gh-143200: :mod:`xml.etree.ElementTree`: fix use-after-free crashes in - :meth:`~object.__getitem__` and :meth:`~object.__setitem__` methods of - :class:`~xml.etree.ElementTree.Element` when the element is concurrently - mutated. Patch by Bénédikt Tran. - -- gh-142195: Updated timeout evaluation logic in :mod:`subprocess` to be - compatible with deterministic environments like Shadow where time moves - exactly as requested. - -- gh-143145: Fixed a possible reference leak in ctypes when constructing - results with multiple output parameters on error. - -- gh-122431: Corrected the error message in - :func:`readline.append_history_file` to state that ``nelements`` must be - non-negative instead of positive. - -- gh-143004: Fix a potential use-after-free in - :meth:`collections.Counter.update` when user code mutates the Counter - during an update. - -- gh-143046: The :mod:`asyncio` REPL no longer prints copyright and version - messages in the quiet mode (:option:`-q`). Patch by Bartosz Sławecki. - -- gh-140648: The :mod:`asyncio` REPL now respects the :option:`-I` flag - (isolated mode). Previously, it would load and execute - :envvar:`PYTHONSTARTUP` even if the flag was set. Contributed by Bartosz - Sławecki. - -- gh-142991: Fixed socket operations such as recvfrom() and sendto() for - FreeBSD divert(4) socket. - -- gh-143010: Fixed a bug in :mod:`mailbox` where the precise timing of an - external event could result in the library opening an existing file - instead of a file it expected to create. - -- gh-142881: Fix concurrent and reentrant call of :func:`atexit.unregister`. - -- gh-112127: Fix possible use-after-free in :func:`atexit.unregister` when - the callback is unregistered during comparison. - -- gh-142783: Fix zoneinfo use-after-free with descriptor _weak_cache. a - descriptor as _weak_cache could cause crashes during object creation. The - fix ensures proper reference counting for descriptor-provided objects. - -- gh-142754: Add the *ownerDocument* attribute to :mod:`xml.dom.minidom` - elements and attributes created by directly instantiating the ``Element`` - or ``Attr`` class. Note that this way of creating nodes is not supported; - creator functions like :py:meth:`xml.dom.Document.documentElement` should - be used instead. - -- gh-142784: The :mod:`asyncio` REPL now properly closes the loop upon the - end of interactive session. Previously, it could cause surprising - warnings. Contributed by Bartosz Sławecki. - -- gh-142555: :mod:`array`: fix a crash in ``a[i] = v`` when converting *i* - to an index via :meth:`i.__index__ ` or - :meth:`i.__float__ ` mutates the array. - -- gh-142594: Fix crash in ``TextIOWrapper.close()`` when the underlying - buffer's ``closed`` property calls :meth:`~io.TextIOBase.detach`. - -- gh-142451: :mod:`hmac`: Ensure that the :attr:`HMAC.block_size - ` attribute is correctly copied by :meth:`HMAC.copy - `. Patch by Bénédikt Tran. - -- gh-142495: :class:`collections.defaultdict` now prioritizes - :meth:`~object.__setitem__` when inserting default values from - ``default_factory``. This prevents race conditions where a default value - would overwrite a value set before ``default_factory`` returns. - -- gh-142651: :mod:`unittest.mock`: fix a thread safety issue where - :attr:`Mock.call_count ` may return - inaccurate values when the mock is called concurrently from multiple - threads. - -- gh-142595: Added type check during initialization of the :mod:`decimal` - module to prevent a crash in case of broken stdlib. Patch by Sergey B - Kirpichev. - -- gh-142517: The non-``compat32`` :mod:`email` policies now correctly handle - refolding encoded words that contain bytes that can not be decoded in - their specified character set. Previously this resulted in an encoding - exception during folding. - -- gh-112527: The help text for required options in :mod:`argparse` no longer - extended with " (default: None)". - -- gh-142315: Pdb can now run scripts from anonymous pipes used in process - substitution. Patch by Bartosz Sławecki. - -- gh-142282: Fix :func:`winreg.QueryValueEx` to not accidentally read - garbage buffer under race condition. - -- gh-75949: Fix :mod:`argparse` to preserve ``|`` separators in mutually - exclusive groups when the usage line wraps due to length. - -- gh-68552: ``MisplacedEnvelopeHeaderDefect`` and ``Missing header name`` - defects are now correctly passed to the ``handle_defect`` method of - ``policy`` in :class:`~email.parser.FeedParser`. - -- gh-142006: Fix a bug in the :mod:`email.policy.default` folding algorithm - which incorrectly resulted in a doubled newline when a line ending at - exactly max_line_length was followed by an unfoldable token. - -- gh-105836: Fix :meth:`asyncio.run_coroutine_threadsafe` leaving underlying - cancelled asyncio task running. - -- gh-139971: :mod:`pydoc`: Ensure that the link to the online documentation - of a :term:`stdlib` module is correct. - -- gh-139262: Some keystrokes can be swallowed in the new ``PyREPL`` on - Windows, especially when used together with the ALT key. Fix by Chris - Eibl. - -- gh-138897: Improved :data:`license`/:data:`copyright`/:data:`credits` - display in the :term:`REPL`: now uses a pager. - -- gh-79986: Add parsing for ``References`` and ``In-Reply-To`` headers to - the :mod:`email` library that parses the header content as lists of - message id tokens. This prevents them from being folded incorrectly. - -- gh-109263: Starting a process from spawn context in :mod:`multiprocessing` - no longer sets the start method globally. - -- gh-90871: Fixed an off by one error concerning the backlog parameter in - :meth:`~asyncio.loop.create_unix_server`. Contributed by Christian - Harries. - -- gh-133253: Fix thread-safety issues in :mod:`linecache`. - -- gh-132715: Skip writing objects during marshalling once a failure has - occurred. - -- gh-127529: Correct behavior of - :func:`!asyncio.selector_events.BaseSelectorEventLoop._accept_connection` - in handling :exc:`ConnectionAbortedError` in a loop. This improves - performance on OpenBSD. - -IDLE ----- - -- gh-143774: Better explain the operation of Format / Format Paragraph. - -Documentation -------------- - -- gh-140806: Add documentation for :func:`enum.bin`. - -Core and Builtins ------------------ - -- gh-144307: Prevent a reference leak in module teardown at interpreter - finalization. - -- gh-144194: Fix error handling in perf jitdump initialization on memory - allocation failure. - -- gh-141805: Fix crash in :class:`set` when objects with the same hash are - concurrently added to the set after removing an element with the same hash - while the set still contains elements with the same hash. - -- gh-143670: Fixes a crash in ``ga_repr_items_list`` function. - -- gh-143377: Fix a crash in :func:`!_interpreters.capture_exception` when - the exception is incorrectly formatted. Patch by Bénédikt Tran. - -- gh-143189: Fix crash when inserting a non-:class:`str` key into a split - table dictionary when the key matches an existing key in the split table - but has no corresponding value in the dict. - -- gh-143228: Fix use-after-free in perf trampoline when toggling profiling - while threads are running or during interpreter finalization with daemon - threads active. The fix uses reference counting to ensure trampolines are - not freed while any code object could still reference them. Pach by Pablo - Galindo - -- gh-142664: Fix a use-after-free crash in :meth:`memoryview.__hash__ - ` when the ``__hash__`` method of the referenced object - mutates that object or the view. Patch by Bénédikt Tran. - -- gh-142557: Fix a use-after-free crash in :ref:`bytearray.__mod__ - ` when the :class:`!bytearray` is mutated while - formatting the ``%``-style arguments. Patch by Bénédikt Tran. - -- gh-143195: Fix use-after-free crashes in :meth:`bytearray.hex` and - :meth:`memoryview.hex` when the separator's :meth:`~object.__len__` - mutates the original object. Patch by Bénédikt Tran. - -- gh-143135: Set :data:`sys.flags.inspect` to ``1`` when - :envvar:`PYTHONINSPECT` is ``0``. Previously, it was set to ``0`` in this - case. - -- gh-143003: Fix an overflow of the shared empty buffer in - :meth:`bytearray.extend` when ``__length_hint__()`` returns 0 for - non-empty iterator. - -- gh-143006: Fix a possible assertion error when comparing negative - non-integer ``float`` and ``int`` with the same number of bits in the - integer part. - -- gh-142776: Fix a file descriptor leak in import.c - -- gh-142829: Fix a use-after-free crash in :class:`contextvars.Context` - comparison when a custom ``__eq__`` method modifies the context via - :meth:`~contextvars.ContextVar.set`. - -- gh-142766: Clear the frame of a generator when :meth:`generator.close` is - called. - -- gh-142737: Tracebacks will be displayed in fallback mode even if - :func:`io.open` is lost. Previously, this would crash the interpreter. - Patch by Bartosz Sławecki. - -- gh-142554: Fix a crash in :func:`divmod` when :func:`!_pylong.int_divmod` - does not return a tuple of length two exactly. Patch by Bénédikt Tran. - -- gh-142560: Fix use-after-free in :class:`bytearray` search-like methods - (:meth:`~bytearray.find`, :meth:`~bytearray.count`, - :meth:`~bytearray.index`, :meth:`~bytearray.rindex`, and - :meth:`~bytearray.rfind`) by marking the storage as exported which causes - reallocation attempts to raise :exc:`BufferError`. For - :func:`~operator.contains`, :meth:`~bytearray.split`, and - :meth:`~bytearray.rsplit` the :ref:`buffer protocol ` is - used for this. - -- gh-142343: Fix SIGILL crash on m68k due to incorrect assembly constraint. - -- gh-141732: Ensure the :meth:`~object.__repr__` for :exc:`ExceptionGroup` - and :exc:`BaseExceptionGroup` does not change when the exception sequence - that was original passed in to its constructor is subsequently mutated. - -- gh-100964: Fix reference cycle in exhausted generator frames. Patch by - Savannah Ostrowski. - -- gh-140373: Correctly emit ``PY_UNWIND`` event when generator object is - closed. Patch by Mikhail Efimov. - -- gh-138568: Adjusted the built-in :func:`help` function so that empty - inputs are ignored in interactive mode. - -- gh-127773: Do not use the type attribute cache for types with incompatible - :term:`MRO`. - -C API ------ - -- gh-142571: :c:func:`!PyUnstable_CopyPerfMapFile` now checks that opening - the file succeeded before flushing. - -Build ------ - -- gh-142454: When calculating the digest of the JIT stencils input, sort the - hashed files by filenames before adding their content to the hasher. This - ensures deterministic hash input and hence deterministic hash, independent - on filesystem order. - -- gh-141808: When running ``make clean-retain-profile``, keep the generated - JIT stencils. That way, the stencils are not generated twice when - Profile-guided optimization (PGO) is used. It also allows distributors to - supply their own pre-built JIT stencils. - -- gh-138061: Ensure reproducible builds by making JIT stencil header - generation deterministic. - - -What's New in Python 3.13.11 final? -=================================== - -*Release date: 2025-12-05* - -Security --------- - -- gh-142145: Remove quadratic behavior in ``xml.minidom`` node ID cache - clearing. - -- gh-119451: Fix a potential memory denial of service in the - :mod:`http.client` module. When connecting to a malicious server, it could - cause an arbitrary amount of memory to be allocated. This could have led - to symptoms including a :exc:`MemoryError`, swapping, out of memory (OOM) - killed processes or containers, or even system crashes. - -- gh-119452: Fix a potential memory denial of service in the - :mod:`http.server` module. When a malicious user is connected to the CGI - server on Windows, it could cause an arbitrary amount of memory to be - allocated. This could have led to symptoms including a :exc:`MemoryError`, - swapping, out of memory (OOM) killed processes or containers, or even - system crashes. - -Library -------- - -- gh-140797: Revert changes to the undocumented :class:`!re.Scanner` class. - Capturing groups are still allowed for backward compatibility, although - using them can lead to incorrect result. They will be forbidden in future - Python versions. - -- gh-142206: The resource tracker in the :mod:`multiprocessing` module now - uses the original communication protocol, as in Python 3.14.0 and below, - by default. This avoids issues with upgrading Python while it is running. - (Note that such 'in-place' upgrades are not tested.) The tracker remains - compatible with subprocesses that use new protocol (that is, subprocesses - using Python 3.13.10, 3.14.1 and 3.15). - -Core and Builtins ------------------ - -- gh-142218: Fix crash when inserting into a split table dictionary with a - non :class:`str` key that matches an existing key. - - -What's New in Python 3.13.10 final? -=================================== - -*Release date: 2025-12-02* - -Tools/Demos ------------ - -- gh-141442: The iOS testbed now correctly handles test arguments that - contain spaces. - -Tests ------ - -- gh-140482: Preserve and restore the state of ``stty echo`` as part of the - test environment. - -- gh-140082: Update ``python -m test`` to set ``FORCE_COLOR=1`` when being - run with color enabled so that :mod:`unittest` which is run by it with - redirected output will output in color. - -- gh-136442: Use exitcode ``1`` instead of ``5`` if - :func:`unittest.TestCase.setUpClass` raises an exception - -Security --------- - -- gh-139700: Check consistency of the zip64 end of central directory record. - Support records with "zip64 extensible data" if there are no bytes - prepended to the ZIP file. - -- gh-137836: Add support of the "plaintext" element, RAWTEXT elements "xmp", - "iframe", "noembed" and "noframes", and optionally RAWTEXT element - "noscript" in :class:`html.parser.HTMLParser`. - -- gh-136063: :mod:`email.message`: ensure linear complexity for legacy HTTP - parameters parsing. Patch by Bénédikt Tran. - -- gh-136065: Fix quadratic complexity in :func:`os.path.expandvars`. - -- gh-119342: Fix a potential memory denial of service in the :mod:`plistlib` - module. When reading a Plist file received from untrusted source, it could - cause an arbitrary amount of memory to be allocated. This could have led - to symptoms including a :exc:`MemoryError`, swapping, out of memory (OOM) - killed processes or containers, or even system crashes. - -Library -------- - -- gh-74389: When the stdin being used by a :class:`subprocess.Popen` - instance is closed, this is now ignored in - :meth:`subprocess.Popen.communicate` instead of leaving the class in an - inconsistent state. - -- gh-87512: Fix :func:`subprocess.Popen.communicate` timeout handling on - Windows when writing large input. Previously, the timeout was ignored - during stdin writing, causing the method to block indefinitely if the - child process did not consume input quickly. The stdin write is now - performed in a background thread, allowing the timeout to be properly - enforced. - -- gh-141473: When :meth:`subprocess.Popen.communicate` was called with - *input* and a *timeout* and is called for a second time after a - :exc:`~subprocess.TimeoutExpired` exception before the process has died, - it should no longer hang. - -- gh-59000: Fix :mod:`pdb` breakpoint resolution for class methods when the - module defining the class is not imported. - -- gh-141570: Support :term:`file-like object` raising :exc:`OSError` from - :meth:`~io.IOBase.fileno` in color detection - (``_colorize.can_colorize()``). This can occur when ``sys.stdout`` is - redirected. - -- gh-141659: Fix bad file descriptor errors from ``_posixsubprocess`` on - AIX. - -- gh-141497: :mod:`ipaddress`: ensure that the methods - :meth:`IPv4Network.hosts() ` and - :meth:`IPv6Network.hosts() ` always return an - iterator. - -- gh-140938: The :func:`statistics.stdev` and :func:`statistics.pstdev` - functions now raise a :exc:`ValueError` when the input contains an - infinity or a NaN. - -- gh-124111: Updated Tcl threading configuration in :mod:`_tkinter` to - assume that threads are always available in Tcl 9 and later. - -- gh-137109: The :mod:`os.fork` and related forking APIs will no longer warn - in the common case where Linux or macOS platform APIs return the number of - threads in a process and find the answer to be 1 even when a - :func:`os.register_at_fork` ``after_in_parent=`` callback (re)starts a - thread. - -- gh-141314: Fix assertion failure in :meth:`io.TextIOWrapper.tell` when - reading files with standalone carriage return (``\r``) line endings. - -- gh-141311: Fix assertion failure in :func:`!io.BytesIO.readinto` and - undefined behavior arising when read position is above capcity in - :class:`io.BytesIO`. - -- gh-141141: Fix a thread safety issue with :func:`base64.b85decode`. - Contributed by Benel Tayar. - -- gh-140911: :mod:`collections`: Ensure that the methods - ``UserString.rindex()`` and ``UserString.index()`` accept - :class:`collections.UserString` instances as the sub argument. - -- gh-140797: The undocumented :class:`!re.Scanner` class now forbids regular - expressions containing capturing groups in its lexicon patterns. Patterns - using capturing groups could previously lead to crashes with segmentation - fault. Use non-capturing groups (?:...) instead. - -- gh-140815: :mod:`faulthandler` now detects if a frame or a code object is - invalid or freed. Patch by Victor Stinner. - -- gh-100218: Correctly set :attr:`~OSError.errno` when - :func:`socket.if_nametoindex` or :func:`socket.if_indextoname` raise an - :exc:`OSError`. Patch by Bénédikt Tran. - -- gh-140875: Fix handling of unclosed character references (named and - numerical) followed by the end of file in :class:`html.parser.HTMLParser` - with ``convert_charrefs=False``. - -- gh-140734: :mod:`multiprocessing`: fix off-by-one error when checking the - length of a temporary socket file path. Patch by Bénédikt Tran. - -- gh-140874: Bump the version of pip bundled in :mod:`ensurepip` to version - 25.3 - -- gh-140691: In :mod:`urllib.request`, when opening a FTP URL fails because - a data connection cannot be made, the control connection's socket is now - closed to avoid a :exc:`ResourceWarning`. - -- gh-103847: Fix hang when cancelling process created by - :func:`asyncio.create_subprocess_exec` or - :func:`asyncio.create_subprocess_shell`. Patch by Kumar Aditya. - -- gh-140590: Fix arguments checking for the - :meth:`!functools.partial.__setstate__` that may lead to internal state - corruption and crash. Patch by Sergey Miryanov. - -- gh-140634: Fix a reference counting bug in - :meth:`!os.sched_param.__reduce__`. - -- gh-140633: Ignore :exc:`AttributeError` when setting a module's - ``__file__`` attribute when loading an extension module packaged as Apple - Framework. - -- gh-140593: :mod:`xml.parsers.expat`: Fix a memory leak that could affect - users with :meth:`~xml.parsers.expat.xmlparser.ElementDeclHandler` set to - a custom element declaration handler. Patch by Sebastian Pipping. - -- gh-140607: Inside :meth:`io.RawIOBase.read`, validate that the count of - bytes returned by :meth:`io.RawIOBase.readinto` is valid (inside the - provided buffer). - -- gh-138162: Fix :class:`logging.LoggerAdapter` with ``merge_extra=True`` - and without the *extra* argument. - -- gh-140474: Fix memory leak in :class:`array.array` when creating arrays - from an empty :class:`str` and the ``u`` type code. - -- gh-140272: Fix memory leak in the :meth:`!clear` method of the - :mod:`dbm.gnu` database. - -- gh-140041: Fix import of :mod:`ctypes` on Android and Cygwin when ABI - flags are present. - -- gh-139905: Add suggestion to error message for :class:`typing.Generic` - subclasses when ``cls.__parameters__`` is missing due to a parent class - failing to call :meth:`super().__init_subclass__() - ` in its ``__init_subclass__``. - -- gh-139845: Fix to not print KeyboardInterrupt twice in default asyncio - REPL. - -- gh-139783: Fix :func:`inspect.getsourcelines` for the case when a - decorator is followed by a comment or an empty line. - -- gh-70765: :mod:`http.server`: fix default handling of HTTP/0.9 requests in - :class:`~http.server.BaseHTTPRequestHandler`. Previously, - :meth:`!BaseHTTPRequestHandler.parse_request` incorrectly waited for - headers in the request although those are not supported in HTTP/0.9. Patch - by Bénédikt Tran. - -- gh-139391: Fix an issue when, on non-Windows platforms, it was not - possible to gracefully exit a ``python -m asyncio`` process suspended by - Ctrl+Z and later resumed by :manpage:`fg` other than with :manpage:`kill`. - -- gh-101828: Fix ``'shift_jisx0213'``, ``'shift_jis_2004'``, - ``'euc_jisx0213'`` and ``'euc_jis_2004'`` codecs truncating null chars as - they were treated as part of multi-character sequences. - -- gh-139246: fix: paste zero-width in default repl width is wrong. - -- gh-90949: Add - :meth:`~xml.parsers.expat.xmlparser.SetAllocTrackerActivationThreshold` - and - :meth:`~xml.parsers.expat.xmlparser.SetAllocTrackerMaximumAmplification` - to :ref:`xmlparser ` objects to prevent use of - disproportional amounts of dynamic memory from within an Expat parser. - Patch by Bénédikt Tran. - -- gh-139065: Fix trailing space before a wrapped long word if the line - length is exactly *width* in :mod:`textwrap`. - -- gh-138993: Dedent :data:`credits` text. - -- gh-138859: Fix generic type parameterization raising a :exc:`TypeError` - when omitting a :class:`ParamSpec` that has a default which is not a list - of types. - -- gh-138775: Use of ``python -m`` with :mod:`base64` has been fixed to - detect input from a terminal so that it properly notices EOF. - -- gh-98896: Fix a failure in multiprocessing resource_tracker when - SharedMemory names contain colons. Patch by Rani Pinchuk. - -- gh-75989: :func:`tarfile.TarFile.extractall` and - :func:`tarfile.TarFile.extract` now overwrite symlinks when extracting - hardlinks. (Contributed by Alexander Enrique Urieles Nieto in - :gh:`75989`.) - -- gh-83424: Allows creating a :class:`ctypes.CDLL` without name when passing - a handle as an argument. - -- gh-136234: Fix :meth:`asyncio.WriteTransport.writelines` to be robust to - connection failure, by using the same behavior as - :meth:`~asyncio.WriteTransport.write`. - -- gh-136057: Fixed the bug in :mod:`pdb` and :mod:`bdb` where ``next`` and - ``step`` can't go over the line if a loop exists in the line. - -- gh-135307: :mod:`email`: Fix exception in ``set_content()`` when encoding - text and max_line_length is set to ``0`` or ``None`` (unlimited). - -- gh-134453: Fixed :func:`subprocess.Popen.communicate` ``input=`` handling - of :class:`memoryview` instances that were non-byte shaped on POSIX - platforms. Those are now properly cast to a byte shaped view instead of - truncating the input. Windows platforms did not have this bug. - -- gh-102431: Clarify constraints for "logical" arguments in methods of - :class:`decimal.Context`. - -IDLE ----- - -- gh-96491: Deduplicate version number in IDLE shell title bar after saving - to a file. - -Documentation -------------- - -- gh-141994: :mod:`xml.sax.handler`: Make Documentation of - :data:`xml.sax.handler.feature_external_ges` warn of opening up to - `external entity attacks - `_. Patch by - Sebastian Pipping. - -- gh-140578: Remove outdated sencence in the documentation for - :mod:`multiprocessing`, that implied that - :class:`concurrent.futures.ThreadPoolExecutor` did not exist. - -Core and Builtins ------------------ - -- gh-142048: Fix quadratically increasing garbage collection delays in - free-threaded build. - -- gh-141930: When importing a module, use Python's regular file object to - ensure that writes to ``.pyc`` files are complete or an appropriate error - is raised. - -- gh-120158: Fix inconsistent state when enabling or disabling monitoring - events too many times. - -- gh-141579: Fix :func:`sys.activate_stack_trampoline` to properly support - the ``perf_jit`` backend. Patch by Pablo Galindo. - -- gh-141312: Fix the assertion failure in the ``__setstate__`` method of the - range iterator when a non-integer argument is passed. Patch by Sergey - Miryanov. - -- gh-140939: Fix memory leak when :class:`bytearray` or :class:`bytes` is - formated with the ``%*b`` format with a large width that results in a - :exc:`MemoryError`. - -- gh-140530: Fix a reference leak when ``raise exc from cause`` fails. Patch - by Bénédikt Tran. - -- gh-140576: Fixed crash in :func:`tokenize.generate_tokens` in case of - specific incorrect input. Patch by Mikhail Efimov. - -- gh-140551: Fixed crash in :class:`dict` if :meth:`dict.clear` is called at - the lookup stage. Patch by Mikhail Efimov and Inada Naoki. - -- gh-140471: Fix potential buffer overflow in :class:`ast.AST` node - initialization when encountering malformed :attr:`~ast.AST._fields` - containing non-:class:`str`. - -- gh-140406: Fix memory leak when an object's :meth:`~object.__hash__` - method returns an object that isn't an :class:`int`. - -- gh-140306: Fix memory leaks in cross-interpreter channel operations and - shared namespace handling. - -- gh-140301: Fix memory leak of ``PyConfig`` in subinterpreters. - -- gh-140000: Fix potential memory leak when a reference cycle exists between - an instance of :class:`typing.TypeAliasType`, :class:`typing.TypeVar`, - :class:`typing.ParamSpec`, or :class:`typing.TypeVarTuple` and its - ``__name__`` attribute. Patch by Mikhail Efimov. - -- gh-139748: Fix reference leaks in error branches of functions accepting - path strings or bytes such as :func:`compile` and :func:`os.system`. Patch - by Bénédikt Tran. - -- gh-139516: Fix lambda colon erroneously start format spec in f-string in - tokenizer. - -- gh-139640: Fix swallowing some syntax warnings in different modules if - they accidentally have the same message and are emitted from the same - line. Fix duplicated warnings in the ``finally`` block. - -- gh-137400: Fix a crash in the :term:`free threading` build when disabling - profiling or tracing across all threads with - :c:func:`PyEval_SetProfileAllThreads` or - :c:func:`PyEval_SetTraceAllThreads` or their Python equivalents - :func:`threading.settrace_all_threads` and - :func:`threading.setprofile_all_threads`. - -- gh-133400: Fixed Ctrl+D (^D) behavior in _pyrepl module to match old - pre-3.13 REPL behavior. - -C API ------ - -- gh-140042: Removed the sqlite3_shutdown call that could cause closing - connections for sqlite when used with multiple sub interpreters. - -- gh-140487: Fix :c:macro:`Py_RETURN_NOTIMPLEMENTED` in limited C API 3.11 - and older: don't treat ``Py_NotImplemented`` as immortal. Patch by Victor - Stinner. - - -What's New in Python 3.13.9 final? -================================== - -*Release date: 2025-10-14* - -Library -------- - -- gh-139783: Fix :func:`inspect.getsourcelines` for the case when a - decorator is followed by a comment or an empty line. - - -What's New in Python 3.13.8 final? -================================== - -*Release date: 2025-10-07* - -macOS ------ - -- gh-124111: Update macOS installer to use Tcl/Tk 8.6.17. - -- gh-139573: Updated bundled version of OpenSSL to 3.0.18. - -Windows -------- - -- gh-139573: Updated bundled version of OpenSSL to 3.0.18. - -- gh-138896: Fix error installing C runtime on non-updated Windows machines - -Tools/Demos ------------ - -- gh-139330: SBOM generation tool didn't cross-check the version and - checksum values against the ``Modules/expat/refresh.sh`` script, leading - to the values becoming out-of-date during routine updates. - -- gh-137873: The iOS test runner has been simplified, resolving some issues - that have been observed using the runner in GitHub Actions and Azure - Pipelines test environments. - -Tests ------ - -- gh-139208: Fix regrtest ``--fast-ci --verbose``: don't ignore the - ``--verbose`` option anymore. Patch by Victor Stinner. - -Security --------- - -- gh-139400: :mod:`xml.parsers.expat`: Make sure that parent Expat parsers - are only garbage-collected once they are no longer referenced by - subparsers created by - :meth:`~xml.parsers.expat.xmlparser.ExternalEntityParserCreate`. Patch by - Sebastian Pipping. - -- gh-139283: :mod:`sqlite3`: correctly handle maximum number of rows to - fetch in :meth:`Cursor.fetchmany ` and reject - negative values for :attr:`Cursor.arraysize `. - Patch by Bénédikt Tran. - -- gh-135661: Fix CDATA section parsing in :class:`html.parser.HTMLParser` - according to the HTML5 standard: ``] ]>`` and ``]] >`` no longer end the - CDATA section. Add private method ``_set_support_cdata()`` which can be - used to specify how to parse ``<[CDATA[`` --- as a CDATA section in - foreign content (SVG or MathML) or as a bogus comment in the HTML - namespace. - -Library -------- - -- gh-139312: Upgrade bundled libexpat to 2.7.3 - -- gh-139289: Do a real lazy-import on :mod:`rlcompleter` in :mod:`pdb` and - restore the existing completer after importing :mod:`rlcompleter`. - -- gh-139210: Fix use-after-free when reporting unknown event in - :func:`xml.etree.ElementTree.iterparse`. Patch by Ken Jin. - -- gh-138860: Lazy import :mod:`rlcompleter` in :mod:`pdb` to avoid deadlock - in subprocess. - -- gh-112729: Fix crash when calling ``_interpreters.create`` when the - process is out of memory. - -- gh-139076: Fix a bug in the :mod:`pydoc` module that was hiding functions - in a Python module if they were implemented in an extension module and the - module did not have ``__all__``. - -- gh-138998: Update bundled libexpat to 2.7.2 - -- gh-130567: Fix possible crash in :func:`locale.strxfrm` due to a platform - bug on macOS. - -- gh-138779: Support device numbers larger than ``2**63-1`` for the - :attr:`~os.stat_result.st_rdev` field of the :class:`os.stat_result` - structure. - -- gh-128636: Fix crash in PyREPL when os.environ is overwritten with an - invalid value for mac - -- gh-88375: Fix normalization of the ``robots.txt`` rules and URLs in the - :mod:`urllib.robotparser` module. No longer ignore trailing ``?``. - Distinguish raw special characters ``?``, ``=`` and ``&`` from the - percent-encoded ones. - -- gh-138515: :mod:`email` is added to Emscripten build. - -- gh-111788: Fix parsing errors in the :mod:`urllib.robotparser` module. - Don't fail trying to parse weird paths. Don't fail trying to decode - non-UTF-8 ``robots.txt`` files. - -- gh-138432: :meth:`zoneinfo.reset_tzpath` will now convert any - :class:`os.PathLike` objects it receives into strings before adding them - to ``TZPATH``. It will raise ``TypeError`` if anything other than a string - is found after this conversion. If given an :class:`os.PathLike` object - that represents a relative path, it will now raise ``ValueError`` instead - of ``TypeError``, and present a more informative error message. - -- gh-138008: Fix segmentation faults in the :mod:`ctypes` module due to - invalid :attr:`~ctypes._CFuncPtr.argtypes`. Patch by Dung Nguyen. - -- gh-60462: Fix :func:`locale.strxfrm` on Solaris (and possibly other - platforms). - -- gh-138204: Forbid expansion of shared anonymous :mod:`memory maps ` - on Linux, which caused a bus error. - -- gh-138010: Fix an issue where defining a class with a - :deco:`warnings.deprecated`-decorated base class may not invoke the - correct :meth:`~object.__init_subclass__` method in cases involving - multiple inheritance. Patch by Brian Schubert. - -- gh-138133: Prevent infinite traceback loop when sending CTRL^C to Python - through ``strace``. - -- gh-134869: Fix an issue where pressing Ctrl+C during tab completion in the - REPL would leave the autocompletion menu in a corrupted state. - -- gh-137317: :func:`inspect.signature` now correctly handles classes that - use a descriptor on a wrapped :meth:`!__init__` or :meth:`!__new__` - method. Contributed by Yongyu Yan. - -- gh-137754: Fix import of the :mod:`zoneinfo` module if the C - implementation of the :mod:`datetime` module is not available. - -- gh-137490: Handle :data:`~errno.ECANCELED` in the same way as - :data:`~errno.EINTR` in :func:`signal.sigwaitinfo` on NetBSD. - -- gh-137477: Fix :func:`!inspect.getblock`, :func:`inspect.getsourcelines` - and :func:`inspect.getsource` for generator expressions. - -- gh-137017: Fix :obj:`threading.Thread.is_alive` to remain ``True`` until - the underlying OS thread is fully cleaned up. This avoids false negatives - in edge cases involving thread monitoring or premature - :obj:`threading.Thread.is_alive` calls. - -- gh-136134: :meth:`!SMTP.auth_cram_md5` now raises an - :exc:`~smtplib.SMTPException` instead of a :exc:`ValueError` if Python has - been built without MD5 support. In particular, :class:`~smtplib.SMTP` - clients will not attempt to use this method even if the remote server is - assumed to support it. Patch by Bénédikt Tran. - -- gh-136134: :meth:`IMAP4.login_cram_md5 ` now - raises an :exc:`IMAP4.error ` if CRAM-MD5 - authentication is not supported. Patch by Bénédikt Tran. - -- gh-135386: Fix opening a :mod:`dbm.sqlite3` database for reading from - read-only file or directory. - -- gh-126631: Fix :mod:`multiprocessing` ``forkserver`` bug which prevented - ``__main__`` from being preloaded. - -- gh-123085: In a bare call to :func:`importlib.resources.files`, ensure the - caller's frame is properly detected when ``importlib.resources`` is itself - available as a compiled module only (no source). - -- gh-118981: Fix potential hang in ``multiprocessing.popen_spawn_posix`` - that can happen when the child proc dies early by closing the child fds - right away. - -- gh-78319: UTF8 support for the IMAP APPEND command has been made RFC - compliant. - -- bpo-38735: Fix failure when importing a module from the root directory on - unix-like platforms with sys.pycache_prefix set. - -- bpo-41839: Allow negative priority values from - :func:`os.sched_get_priority_min` and :func:`os.sched_get_priority_max` - functions. - -Core and Builtins ------------------ - -- gh-134466: Don't run PyREPL in a degraded environment where setting - termios attributes is not allowed. - -- gh-71810: Raise :exc:`OverflowError` for ``(-1).to_bytes()`` for signed - conversions when bytes count is zero. Patch by Sergey B Kirpichev. - -- gh-105487: Remove non-existent :meth:`~object.__copy__`, - :meth:`~object.__deepcopy__`, and :attr:`~type.__bases__` from the - :meth:`~object.__dir__` entries of :class:`types.GenericAlias`. - -- gh-134163: Fix a hang when the process is out of memory inside an - exception handler. - -- gh-138479: Fix a crash when a generic object's ``__typing_subst__`` - returns an object that isn't a :class:`tuple`. - -- gh-137576: Fix for incorrect source code being shown in tracebacks from - the Basic REPL when :envvar:`PYTHONSTARTUP` is given. Patch by Adam Hartz. - -- gh-132744: Certain calls now check for runaway recursion and respect the - system recursion limit. - -C API ------ - -- gh-87135: Attempting to acquire the GIL after runtime finalization has - begun in a different thread now causes the thread to hang rather than - terminate, which avoids potential crashes or memory corruption caused by - attempting to terminate a thread that is running code not specifically - designed to support termination. In most cases this hanging is harmless - since the process will soon exit anyway. - - While not officially marked deprecated until 3.14, - ``PyThread_exit_thread`` is no longer called internally and remains solely - for interface compatibility. Its behavior is inconsistent across - platforms, and it can only be used safely in the unlikely case that every - function in the entire call stack has been designed to support the - platform-dependent termination mechanism. It is recommended that users of - this function change their design to not require thread termination. In - the unlikely case that thread termination is needed and can be done - safely, users may migrate to calling platform-specific APIs such as - ``pthread_exit`` (POSIX) or ``_endthreadex`` (Windows) directly. - -Build ------ - -- gh-135734: Python can correctly be configured and built with ``./configure - --enable-optimizations --disable-test-modules``. Previously, the profile - data generation step failed due to PGO tests where immortalization - couldn't be properly suppressed. Patch by Bénédikt Tran. - - -What's New in Python 3.13.7 final? -================================== - -*Release date: 2025-08-14* - -Library -------- - -- gh-137583: Fix a deadlock introduced in 3.13.6 when a call to - :meth:`ssl.SSLSocket.recv ` was blocked in one thread, - and then another method on the object (such as :meth:`ssl.SSLSocket.send - `) was subsequently called in another thread. - -- gh-137044: Return large limit values as positive integers instead of - negative integers in :func:`resource.getrlimit`. Accept large values and - reject negative values (except :data:`~resource.RLIM_INFINITY`) for limits - in :func:`resource.setrlimit`. - -- gh-136914: Fix retrieval of :attr:`doctest.DocTest.lineno` for objects - decorated with :func:`functools.cache` or - :class:`functools.cached_property`. - -- gh-131788: Make ``ResourceTracker.send`` from :mod:`multiprocessing` - re-entrant safe - -Documentation -------------- - -- gh-136155: We are now checking for fatal errors in EPUB builds in CI. - -Core and Builtins ------------------ - -- gh-137400: Fix a crash in the :term:`free threading` build when disabling - profiling or tracing across all threads with - :c:func:`PyEval_SetProfileAllThreads` or - :c:func:`PyEval_SetTraceAllThreads` or their Python equivalents - :func:`threading.settrace_all_threads` and - :func:`threading.setprofile_all_threads`. - - -What's New in Python 3.13.6 final? -================================== - -*Release date: 2025-08-06* - -macOS ------ - -- gh-137450: macOS installer shell path management improvements: separate - the installer ``Shell profile updater`` postinstall script from the - ``Update Shell Profile.command`` to enable more robust error handling. - -- gh-137134: Update macOS installer to ship with SQLite version 3.50.4. - -Windows -------- - -- gh-137134: Update Windows installer to ship with SQLite 3.50.4. - -Tools/Demos ------------ - -- gh-135968: Stubs for ``strip`` are now provided as part of an iOS install. - -Tests ------ - -- gh-135966: The iOS testbed now handles the ``app_packages`` folder as a - site directory. - -- gh-135494: Fix regrtest to support excluding tests from ``--pgo`` tests. - Patch by Victor Stinner. - -- gh-135489: Show verbose output for failing tests during PGO profiling step - with --enable-optimizations. - -Security --------- - -- gh-135661: Fix parsing start and end tags in - :class:`html.parser.HTMLParser` according to the HTML5 standard. - - * Whitespaces no longer accepted between ```` does not end the script section. - - * Vertical tabulation (``\v``) and non-ASCII whitespaces no longer recognized - as whitespaces. The only whitespaces are ``\t\n\r\f`` and space. - - * Null character (U+0000) no longer ends the tag name. - - * Attributes and slashes after the tag name in end tags are now ignored, - instead of terminating after the first ``>`` in quoted attribute value. - E.g. ````. - - * Multiple slashes and whitespaces between the last attribute and closing ``>`` - are now ignored in both start and end tags. E.g. ````. - - * Multiple ``=`` between attribute name and value are no longer collapsed. - E.g. ```` produces attribute "foo" with value "=bar". - -- gh-102555: Fix comment parsing in :class:`html.parser.HTMLParser` - according to the HTML5 standard. ``--!>`` now ends the comment. ``-- >`` - no longer ends the comment. Support abnormally ended empty comments - ``<-->`` and ``<--->``. - -- gh-135462: Fix quadratic complexity in processing specially crafted input - in :class:`html.parser.HTMLParser`. End-of-file errors are now handled - according to the HTML5 specs -- comments and declarations are - automatically closed, tags are ignored. - -- gh-118350: Fix support of escapable raw text mode (elements "textarea" and - "title") in :class:`html.parser.HTMLParser`. - -Library -------- - -- gh-132710: If possible, ensure that :func:`uuid.getnode` returns the same - result even across different processes. Previously, the result was - constant only within the same process. Patch by Bénédikt Tran. - -- gh-137273: Fix debug assertion failure in :func:`locale.setlocale` on - Windows. - -- gh-137257: Bump the version of pip bundled in ensurepip to version 25.2 - -- gh-81325: :class:`tarfile.TarFile` now accepts a :term:`path-like - ` when working on a tar archive. (Contributed by - Alexander Enrique Urieles Nieto in :gh:`81325`.) - -- gh-130522: Fix unraisable :exc:`TypeError` raised during - :term:`interpreter shutdown` in the :mod:`threading` module. - -- gh-130577: :mod:`tarfile` now validates archives to ensure member offsets - are non-negative. (Contributed by Alexander Enrique Urieles Nieto in - :gh:`130577`.) - -- gh-136549: Fix signature of :func:`threading.excepthook`. - -- gh-136523: Fix :class:`wave.Wave_write` emitting an unraisable when open - raises. - -- gh-52876: Add missing ``keepends`` (default ``True``) parameter to - :meth:`!codecs.StreamReaderWriter.readline` and - :meth:`!codecs.StreamReaderWriter.readlines`. - -- gh-85702: If ``zoneinfo._common.load_tzdata`` is given a package without a - resource a :exc:`zoneinfo.ZoneInfoNotFoundError` is raised rather than a - :exc:`PermissionError`. Patch by Victor Stinner. - -- gh-134759: Fix :exc:`UnboundLocalError` in - :func:`email.message.Message.get_payload` when the payload to decode is a - :class:`bytes` object. Patch by Kliment Lamonov. - -- gh-136028: Fix parsing month names containing "İ" (U+0130, LATIN CAPITAL - LETTER I WITH DOT ABOVE) in :func:`time.strptime`. This affects locales - az_AZ, ber_DZ, ber_MA and crh_UA. - -- gh-135995: In the palmos encoding, make byte ``0x9b`` decode to ``›`` - (U+203A - SINGLE RIGHT-POINTING ANGLE QUOTATION MARK). - -- gh-53203: Fix :func:`time.strptime` for ``%c`` and ``%x`` formats on - locales byn_ER, wal_ET and lzh_TW, and for ``%X`` format on locales ar_SA, - bg_BG and lzh_TW. - -- gh-91555: An earlier change, which was introduced in 3.13.4, has been - reverted. It disabled logging for a logger during handling of log messages - for that logger. Since the reversion, the behaviour should be as it was - before 3.13.4. - -- gh-135878: Fixes a crash of :class:`types.SimpleNamespace` on :term:`free - threading` builds, when several threads were calling its - :meth:`~object.__repr__` method at the same time. - -- gh-135836: Fix :exc:`IndexError` in :meth:`asyncio.loop.create_connection` - that could occur when non-\ :exc:`OSError` exception is raised during - connection and socket's ``close()`` raises :exc:`!OSError`. - -- gh-135836: Fix :exc:`IndexError` in :meth:`asyncio.loop.create_connection` - that could occur when the Happy Eyeballs algorithm resulted in an empty - exceptions list during connection attempts. - -- gh-135855: Raise :exc:`TypeError` instead of :exc:`SystemError` when - :func:`!_interpreters.set___main___attrs` is passed a non-dict object. - Patch by Brian Schubert. - -- gh-135815: :mod:`netrc`: skip security checks if :func:`os.getuid` is - missing. Patch by Bénédikt Tran. - -- gh-135640: Address bug where it was possible to call - :func:`xml.etree.ElementTree.ElementTree.write` on an ElementTree object - with an invalid root element. This behavior blanked the file passed to - ``write`` if it already existed. - -- gh-135444: Fix :meth:`asyncio.DatagramTransport.sendto` to account for - datagram header size when data cannot be sent. - -- gh-135497: Fix :func:`os.getlogin` failing for longer usernames on - BSD-based platforms. - -- gh-135487: Fix :meth:`!reprlib.Repr.repr_int` when given integers with - more than :func:`sys.get_int_max_str_digits` digits. Patch by Bénédikt - Tran. - -- gh-135335: :mod:`multiprocessing`: Flush ``stdout`` and ``stderr`` after - preloading modules in the ``forkserver``. - -- gh-135244: :mod:`uuid`: when the MAC address cannot be determined, the - 48-bit node ID is now generated with a cryptographically-secure - pseudo-random number generator (CSPRNG) as per :rfc:`RFC 9562, §6.10.3 - <9562#section-6.10-3>`. This affects :func:`~uuid.uuid1`. - -- gh-135069: Fix the "Invalid error handling" exception in - :class:`!encodings.idna.IncrementalDecoder` to correctly replace the - 'errors' parameter. - -- gh-134698: Fix a crash when calling methods of :class:`ssl.SSLContext` or - :class:`ssl.SSLSocket` across multiple threads. - -- gh-132124: On POSIX-compliant systems, - :func:`!multiprocessing.util.get_temp_dir` now ignores :envvar:`TMPDIR` - (and similar environment variables) if the path length of ``AF_UNIX`` - socket files exceeds the platform-specific maximum length when using the - *forkserver* start method. Patch by Bénédikt Tran. - -- gh-133439: Fix dot commands with trailing spaces are mistaken for - multi-line SQL statements in the sqlite3 command-line interface. - -- gh-132969: Prevent the :class:`~concurrent.futures.ProcessPoolExecutor` - executor thread, which remains running when :meth:`shutdown(wait=False) - `, from attempting to adjust the - pool's worker processes after the object state has already been reset - during shutdown. A combination of conditions, including a worker process - having terminated abormally, resulted in an exception and a potential hang - when the still-running executor thread attempted to replace dead workers - within the pool. - -- gh-130664: Support the ``'_'`` digit separator in formatting of the - integral part of :class:`~decimal.Decimal`'s. Patch by Sergey B - Kirpichev. - -- gh-85702: If ``zoneinfo._common.load_tzdata`` is given a package without a - resource a ``ZoneInfoNotFoundError`` is raised rather than a - :exc:`IsADirectoryError`. - -- gh-130664: Handle corner-case for :class:`~fractions.Fraction`'s - formatting: treat zero-padding (preceding the width field by a zero - (``'0'``) character) as an equivalent to a fill character of ``'0'`` with - an alignment type of ``'='``, just as in case of :class:`float`'s. - -Documentation -------------- - -- gh-135171: Document that the :term:`iterator` for the leftmost - :keyword:`!for` clause in the generator expression is created immediately. - -Core and Builtins ------------------ - -- gh-58124: Fix name of the Python encoding in Unicode errors of the code - page codec: use "cp65000" and "cp65001" instead of "CP_UTF7" and "CP_UTF8" - which are not valid Python code names. Patch by Victor Stinner. - -- gh-137314: Fixed a regression where raw f-strings incorrectly interpreted - escape sequences in format specifications. Raw f-strings now properly - preserve literal backslashes in format specs, matching the behavior from - Python 3.11. For example, ``rf"{obj:\xFF}"`` now correctly produces - ``'\\xFF'`` instead of ``'ÿ'``. Patch by Pablo Galindo. - -- gh-136541: Fix some issues with the perf trampolines on x86-64 and - aarch64. The trampolines were not being generated correctly for some - cases, which could lead to the perf integration not working correctly. - Patch by Pablo Galindo. - -- gh-109700: Fix memory error handling in :c:func:`PyDict_SetDefault`. - -- gh-78465: Fix error message for ``cls.__new__(cls, ...)`` where ``cls`` is - not instantiable builtin or extension type (with ``tp_new`` set to - ``NULL``). - -- gh-135871: Non-blocking mutex lock attempts now return immediately when - the lock is busy instead of briefly spinning in the :term:`free threading` - build. - -- gh-135607: Fix potential :mod:`weakref` races in an object's destructor on - the :term:`free threaded ` build. - -- gh-135496: Fix typo in the f-string conversion type error ("exclamanation" - -> "exclamation"). - -- gh-130077: Properly raise custom syntax errors when incorrect syntax - containing names that are prefixes of soft keywords is encountered. Patch - by Pablo Galindo. - -- gh-135148: Fixed a bug where f-string debug expressions (using =) would - incorrectly strip out parts of strings containing escaped quotes and # - characters. Patch by Pablo Galindo. - -- gh-133136: Limit excess memory usage in the :term:`free threading` build - when a large dictionary or list is resized and accessed by multiple - threads. - -- gh-132617: Fix :meth:`dict.update` modification check that could - incorrectly raise a "dict mutated during update" error when a different - dictionary was modified that happens to share the same underlying keys - object. - -- gh-91153: Fix a crash when a :class:`bytearray` is concurrently mutated - during item assignment. - -- gh-127971: Fix off-by-one read beyond the end of a string in string - search. - -- gh-125723: Fix crash with ``gi_frame.f_locals`` when generator frames - outlive their generator. Patch by Mikhail Efimov. - -Build ------ - -- gh-135497: Fix the detection of ``MAXLOGNAME`` in the ``configure.ac`` - script. - - -What's New in Python 3.13.5 final? -================================== - -*Release date: 2025-06-11* - -Windows -------- - -- gh-135151: Avoid distributing modified :file:`pyconfig.h` in the - traditional installer. Extension module builds must always specify - ``Py_GIL_DISABLED`` when targeting the free-threaded runtime. - -Tests ------ - -- gh-135120: Add :func:`!test.support.subTests`. - -Library -------- - -- gh-133967: Do not normalize :mod:`locale` name 'C.UTF-8' to 'en_US.UTF-8'. - -- gh-135326: Restore support of integer-like objects with :meth:`!__index__` - in :func:`random.getrandbits`. - -- gh-135321: Raise a correct exception for values greater than 0x7fffffff - for the ``BINSTRING`` opcode in the C implementation of :mod:`pickle`. - -- gh-135276: Backported bugfixes in zipfile.Path from zipp 3.23. Fixed - ``.name``, ``.stem`` and other basename-based properties on Windows when - working with a zipfile on disk. - -- gh-134151: :mod:`email`: Fix :exc:`TypeError` in - :func:`email.utils.decode_params` when sorting :rfc:`2231` continuations - that contain an unnumbered section. - -- gh-134152: :mod:`email`: Fix parsing of email message ID with invalid - domain. - -- gh-127081: Fix libc thread safety issues with :mod:`os` by replacing - ``getlogin`` with ``getlogin_r`` re-entrant version. - -- gh-131884: Fix formatting issues in :func:`json.dump` when both *indent* - and *skipkeys* are used. - -Core and Builtins ------------------ - -- gh-135171: Roll back changes to generator and list comprehensions that - went into 3.13.4 to fix GH-127682, but which involved semantic and - bytecode changes not appropriate for a bugfix release. - -C API ------ - -- gh-134989: Fix ``Py_RETURN_NONE``, ``Py_RETURN_TRUE`` and - ``Py_RETURN_FALSE`` macros in the limited C API 3.11 and older: don't - treat ``Py_None``, ``Py_True`` and ``Py_False`` as immortal. Patch by - Victor Stinner. - -- gh-134989: Implement :c:func:`PyObject_DelAttr` and - :c:func:`PyObject_DelAttrString` as macros in the limited C API 3.12 and - older. Patch by Victor Stinner. - - -What's New in Python 3.13.4 final? -================================== - -*Release date: 2025-06-03* - -Windows -------- - -- gh-130727: Fix a race in internal calls into WMI that can result in an - "invalid handle" exception under high load. Patch by Chris Eibl. - -- gh-76023: Make :func:`os.path.realpath` ignore Windows error 1005 when in - non-strict mode. - -- gh-133626: Ensures packages are not accidentally bundled into the - traditional installer. - -- gh-133512: Add warnings to :ref:`launcher` about use of subcommands - belonging to the Python install manager. - -Tests ------ - -- gh-133744: Fix multiprocessing interrupt test. Add an event to synchronize - the parent process with the child process: wait until the child process - starts sleeping. Patch by Victor Stinner. - -- gh-133639: Fix ``TestPyReplAutoindent.test_auto_indent_default()`` doesn't - run ``input_code``. - -- gh-133131: The iOS testbed will now select the most recently released - "SE-class" device for testing if a device isn't explicitly specified. - -- gh-109981: The test helper that counts the list of open file descriptors - now uses the optimised ``/dev/fd`` approach on all Apple platforms, not - just macOS. This avoids crashes caused by guarded file descriptors. - -Security --------- - -- gh-135034: Fixes multiple issues that allowed ``tarfile`` extraction - filters (``filter="data"`` and ``filter="tar"``) to be bypassed using - crafted symlinks and hard links. - - Addresses :cve:`2024-12718`, :cve:`2025-4138`, :cve:`2025-4330`, and - :cve:`2025-4517`. - -- gh-133767: Fix use-after-free in the "unicode-escape" decoder with a - non-"strict" error handler. - -- gh-128840: Short-circuit the processing of long IPv6 addresses early in - :mod:`ipaddress` to prevent excessive memory consumption and a minor - denial-of-service. - -Library -------- - -- gh-134718: :func:`ast.dump` now only omits ``None`` and ``[]`` values if - they are default values. - -- gh-128840: Fix parsing long IPv6 addresses with embedded IPv4 address. - -- gh-134696: Built-in HACL* and OpenSSL implementations of hash function - constructors now correctly accept the same *documented* named arguments. - For instance, :func:`~hashlib.md5` could be previously invoked as - ``md5(data=data)`` or ``md5(string=string)`` depending on the underlying - implementation but these calls were not compatible. Patch by Bénédikt - Tran. - -- gh-134210: :func:`curses.window.getch` now correctly handles signals. - Patch by Bénédikt Tran. - -- gh-80334: :func:`multiprocessing.freeze_support` now checks for work on - any "spawn" start method platform rather than only on Windows. - -- gh-114177: Fix :mod:`asyncio` to not close subprocess pipes which would - otherwise error out when the event loop is already closed. - -- gh-134152: Fixed :exc:`UnboundLocalError` that could occur during - :mod:`email` header parsing if an expected trailing delimiter is missing - in some contexts. - -- gh-62184: Remove import of C implementation of :class:`io.FileIO` from - Python implementation which has its own implementation - -- gh-133982: Emit :exc:`RuntimeWarning` in the Python implementation of - :mod:`io` when the :term:`file-like object ` is not closed - explicitly in the presence of multiple I/O layers. - -- gh-133890: The :mod:`tarfile` module now handles :exc:`UnicodeEncodeError` - in the same way as :exc:`OSError` when cannot extract a member. - -- gh-134097: Fix interaction of the new :term:`REPL` and :option:`-X - showrefcount <-X>` command line option. - -- gh-133889: The generated directory listing page in - :class:`http.server.SimpleHTTPRequestHandler` now only shows the decoded - path component of the requested URL, and not the query and fragment. - -- gh-134098: Fix handling paths that end with a percent-encoded slash - (``%2f`` or ``%2F``) in :class:`http.server.SimpleHTTPRequestHandler`. - -- gh-134062: :mod:`ipaddress`: fix collisions in :meth:`~object.__hash__` - for :class:`~ipaddress.IPv4Network` and :class:`~ipaddress.IPv6Network` - objects. - -- gh-133745: In 3.13.3 we accidentally changed the signature of the asyncio - ``create_task()`` family of methods and how it calls a custom task factory - in a backwards incompatible way. Since some 3rd party libraries have - already made changes to work around the issue that might break if we - simply reverted the changes, we're instead changing things to be backwards - compatible with 3.13.2 while still supporting those workarounds for - 3.13.3. In particular, the special-casing of ``name`` and ``context`` is - back (until 3.14) and consequently eager tasks may still find that their - name hasn't been set before they execute their first yielding await. - -- gh-71253: Raise :exc:`ValueError` in :func:`open` if *opener* returns a - negative file-descriptor in the Python implementation of :mod:`io` to - match the C implementation. - -- gh-77057: Fix handling of invalid markup declarations in - :class:`html.parser.HTMLParser`. - -- gh-133489: :func:`random.getrandbits` can now generate more that 2\ - :sup:`31` bits. :func:`random.randbytes` can now generate more that 256 - MiB. - -- gh-133290: Fix attribute caching issue when setting - :attr:`ctypes._Pointer._type_` in the undocumented and deprecated - :func:`!ctypes.SetPointerType` function and the undocumented - :meth:`!set_type` method. - -- gh-132876: ``ldexp()`` on Windows doesn't round subnormal results before - Windows 11, but should. Python's :func:`math.ldexp` wrapper now does - round them, so results may change slightly, in rare cases of very small - results, on Windows versions before 11. - -- gh-133089: Use original timeout value for :exc:`subprocess.TimeoutExpired` - when the func :meth:`subprocess.run` is called with a timeout instead of - sometimes a confusing partial remaining time out value used internally on - the final ``wait()``. - -- gh-133009: :mod:`xml.etree.ElementTree`: Fix a crash in - :meth:`Element.__deepcopy__ ` when the element is - concurrently mutated. Patch by Bénédikt Tran. - -- gh-132995: Bump the version of pip bundled in ensurepip to version 25.1.1 - -- gh-132017: Fix error when ``pyrepl`` is suspended, then resumed and - terminated. - -- gh-132673: Fix a crash when using ``_align_ = 0`` and ``_fields_ = []`` in - a :class:`ctypes.Structure`. - -- gh-132527: Include the valid typecode 'w' in the error message when an - invalid typecode is passed to :class:`array.array`. - -- gh-132439: Fix ``PyREPL`` on Windows: characters entered via AltGr are - swallowed. Patch by Chris Eibl. - -- gh-132429: Fix support of Bluetooth sockets on NetBSD and DragonFly BSD. - -- gh-132106: :meth:`QueueListener.start - ` now raises a :exc:`RuntimeError` - if the listener is already started. - -- gh-132417: Fix a ``NULL`` pointer dereference when a C function called - using :mod:`ctypes` with ``restype`` :class:`~ctypes.py_object` returns - ``NULL``. - -- gh-132385: Fix instance error suggestions trigger potential exceptions in - :meth:`object.__getattr__` in :mod:`traceback`. - -- gh-132308: A :class:`traceback.TracebackException` now correctly renders - the ``__context__`` and ``__cause__`` attributes from :ref:`falsey - ` :class:`Exception`, and the ``exceptions`` attribute from falsey - :class:`ExceptionGroup`. - -- gh-132250: Fixed the :exc:`SystemError` in :mod:`cProfile` when locating - the actual C function of a method raises an exception. - -- gh-132063: Prevent exceptions that evaluate as falsey (namely, when their - ``__bool__`` method returns ``False`` or their ``__len__`` method returns - 0) from being ignored by :class:`concurrent.futures.ProcessPoolExecutor` - and :class:`concurrent.futures.ThreadPoolExecutor`. - -- gh-119605: Respect ``follow_wrapped`` for :meth:`!__init__` and - :meth:`!__new__` methods when getting the class signature for a class with - :func:`inspect.signature`. Preserve class signature after wrapping with - :func:`warnings.deprecated`. Patch by Xuehai Pan. - -- gh-91555: Ignore log messages generated during handling of log messages, - to avoid deadlock or infinite recursion. [NOTE: This change has since been - reverted.] - -- gh-131434: Improve error reporting for incorrect format in - :func:`time.strptime`. - -- gh-131127: Systems using LibreSSL now successfully build. - -- gh-130999: Avoid exiting the new REPL and offer suggestions even if there - are non-string candidates when errors occur. - -- gh-130941: Fix :class:`configparser.ConfigParser` parsing empty - interpolation with ``allow_no_value`` set to ``True``. - -- gh-129098: Fix REPL traceback reporting when using :func:`compile` with an - inexisting file. Patch by Bénédikt Tran. - -- gh-130631: :func:`!http.cookiejar.join_header_words` is now more similar - to the original Perl version. It now quotes the same set of characters and - always quote values that end with ``"\n"``. - -- gh-129719: Fix missing :data:`!socket.CAN_RAW_ERR_FILTER` constant in the - socket module on Linux systems. It was missing since Python 3.11. - -- gh-124096: Turn on virtual terminal mode and enable bracketed paste in - REPL on Windows console. (If the terminal does not support bracketed - paste, enabling it does nothing.) - -- gh-122559: Remove :meth:`!__reduce__` and :meth:`!__reduce_ex__` methods - that always raise :exc:`TypeError` in the C implementation of - :class:`io.FileIO`, :class:`io.BufferedReader`, :class:`io.BufferedWriter` - and :class:`io.BufferedRandom` and replace them with default - :meth:`!__getstate__` methods that raise :exc:`!TypeError`. This restores - fine details of behavior of Python 3.11 and older versions. - -- gh-122179: :func:`hashlib.file_digest` now raises :exc:`BlockingIOError` - when no data is available during non-blocking I/O. Before, it added - spurious null bytes to the digest. - -- gh-86155: :meth:`html.parser.HTMLParser.close` no longer loses data when - the `` + """ % (output_string.replace('"', r'\"')) + + def OutputString(self, attrs=None): + # Build up our result + # + result = [] + append = result.append + + # First, the key=value pair + append("%s=%s" % (self.key, self.coded_value)) + + # Now add any defined attributes + if attrs is None: + attrs = self._reserved + items = sorted(self.items()) + for key, value in items: + if value == "": + continue + if key not in attrs: + continue + if key == "expires" and isinstance(value, int): + append("%s=%s" % (self._reserved[key], _getdate(value))) + elif key == "max-age" and isinstance(value, int): + append("%s=%d" % (self._reserved[key], value)) + elif key == "comment" and isinstance(value, str): + append("%s=%s" % (self._reserved[key], _quote(value))) + elif key in self._flags: + if value: + append(str(self._reserved[key])) + else: + append("%s=%s" % (self._reserved[key], value)) + + # Return the result + return _semispacejoin(result) + + __class_getitem__ = classmethod(types.GenericAlias) + + +# +# Pattern for finding cookie +# +# This used to be strict parsing based on the RFC2109 and RFC2068 +# specifications. I have since discovered that MSIE 3.0x doesn't +# follow the character rules outlined in those specs. As a +# result, the parsing rules here are less strict. +# + +_LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=" +_LegalValueChars = _LegalKeyChars + r'\[\]' +_CookiePattern = re.compile(r""" + \s* # Optional whitespace at start of cookie + (?P # Start of group 'key' + [""" + _LegalKeyChars + r"""]+? # Any word of at least one letter + ) # End of group 'key' + ( # Optional group: there may not be a value. + \s*=\s* # Equal Sign + (?P # Start of group 'val' + "(?:[^\\"]|\\.)*" # Any double-quoted string + | # or + # Special case for "expires" attr + (\w{3,6}day|\w{3}),\s # Day of the week or abbreviated day + [\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Date and time in specific format + | # or + [""" + _LegalValueChars + r"""]* # Any word or empty string + ) # End of group 'val' + )? # End of optional value group + \s* # Any number of spaces. + (\s+|;|$) # Ending either at space, semicolon, or EOS. + """, re.ASCII | re.VERBOSE) # re.ASCII may be removed if safe. + + +# At long last, here is the cookie class. Using this class is almost just like +# using a dictionary. See this module's docstring for example usage. +# +class BaseCookie(dict): + """A container class for a set of Morsels.""" + + def value_decode(self, val): + """real_value, coded_value = value_decode(STRING) + Called prior to setting a cookie's value from the network + representation. The VALUE is the value read from HTTP + header. + Override this function to modify the behavior of cookies. + """ + return val, val + + def value_encode(self, val): + """real_value, coded_value = value_encode(VALUE) + Called prior to setting a cookie's value from the dictionary + representation. The VALUE is the value being assigned. + Override this function to modify the behavior of cookies. + """ + strval = str(val) + return strval, strval + + def __init__(self, input=None): + if input: + self.load(input) + + def __set(self, key, real_value, coded_value): + """Private method for setting a cookie's value""" + M = self.get(key, Morsel()) + M.set(key, real_value, coded_value) + dict.__setitem__(self, key, M) + + def __setitem__(self, key, value): + """Dictionary style assignment.""" + if isinstance(value, Morsel): + # allow assignment of constructed Morsels (e.g. for pickling) + dict.__setitem__(self, key, value) + else: + rval, cval = self.value_encode(value) + self.__set(key, rval, cval) + + def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"): + """Return a string suitable for HTTP.""" + result = [] + items = sorted(self.items()) + for key, value in items: + value_output = value.output(attrs, header) + if _has_control_character(value_output): + raise CookieError("Control characters are not allowed in cookies") + result.append(value_output) + return sep.join(result) + + __str__ = output + + def __repr__(self): + l = [] + items = sorted(self.items()) + for key, value in items: + l.append('%s=%s' % (key, repr(value.value))) + return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l)) + + def js_output(self, attrs=None): + """Return a string suitable for JavaScript.""" + result = [] + items = sorted(self.items()) + for key, value in items: + result.append(value.js_output(attrs)) + return _nulljoin(result) + + def load(self, rawdata): + """Load cookies from a string (presumably HTTP_COOKIE) or + from a dictionary. Loading cookies from a dictionary 'd' + is equivalent to calling: + map(Cookie.__setitem__, d.keys(), d.values()) + """ + if isinstance(rawdata, str): + self.__parse_string(rawdata) + else: + # self.update() wouldn't call our custom __setitem__ + for key, value in rawdata.items(): + self[key] = value + return + + def __parse_string(self, str, patt=_CookiePattern): + i = 0 # Our starting point + n = len(str) # Length of string + parsed_items = [] # Parsed (type, key, value) triples + morsel_seen = False # A key=value pair was previously encountered + + TYPE_ATTRIBUTE = 1 + TYPE_KEYVALUE = 2 + + # We first parse the whole cookie string and reject it if it's + # syntactically invalid (this helps avoid some classes of injection + # attacks). + while 0 <= i < n: + # Start looking for a cookie + match = patt.match(str, i) + if not match: + # No more cookies + break + + key, value = match.group("key"), match.group("val") + i = match.end(0) + + if key[0] == "$": + if not morsel_seen: + # We ignore attributes which pertain to the cookie + # mechanism as a whole, such as "$Version". + # See RFC 2965. (Does anyone care?) + continue + parsed_items.append((TYPE_ATTRIBUTE, key[1:], value)) + elif key.lower() in Morsel._reserved: + if not morsel_seen: + # Invalid cookie string + return + if value is None: + if key.lower() in Morsel._flags: + parsed_items.append((TYPE_ATTRIBUTE, key, True)) + else: + # Invalid cookie string + return + else: + parsed_items.append((TYPE_ATTRIBUTE, key, _unquote(value))) + elif value is not None: + parsed_items.append((TYPE_KEYVALUE, key, self.value_decode(value))) + morsel_seen = True + else: + # Invalid cookie string + return + + # The cookie string is valid, apply it. + M = None # current morsel + for tp, key, value in parsed_items: + if tp == TYPE_ATTRIBUTE: + assert M is not None + M[key] = value + else: + assert tp == TYPE_KEYVALUE + rval, cval = value + self.__set(key, rval, cval) + M = self[key] + + +class SimpleCookie(BaseCookie): + """ + SimpleCookie supports strings as cookie values. When setting + the value using the dictionary assignment notation, SimpleCookie + calls the builtin str() to convert the value to a string. Values + received from HTTP are kept as strings. + """ + def value_decode(self, val): + return _unquote(val), val + + def value_encode(self, val): + strval = str(val) + return strval, _quote(strval) diff --git a/Python314_4_x64_Template/Lib/http/server.py b/Python314_4_x64_Template/Lib/http/server.py new file mode 100644 index 00000000..ac1f57c2 --- /dev/null +++ b/Python314_4_x64_Template/Lib/http/server.py @@ -0,0 +1,1441 @@ +"""HTTP server classes. + +Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see +SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST, +and (deprecated) CGIHTTPRequestHandler for CGI scripts. + +It does, however, optionally implement HTTP/1.1 persistent connections. + +Notes on CGIHTTPRequestHandler +------------------------------ + +This class is deprecated. It implements GET and POST requests to cgi-bin scripts. + +If the os.fork() function is not present (Windows), subprocess.Popen() is used, +with slightly altered but never documented semantics. Use from a threaded +process is likely to trigger a warning at os.fork() time. + +In all cases, the implementation is intentionally naive -- all +requests are executed synchronously. + +SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL +-- it may execute arbitrary Python code or external programs. + +Note that status code 200 is sent prior to execution of a CGI script, so +scripts cannot send other status codes such as 302 (redirect). + +XXX To do: + +- log requests even later (to capture byte count) +- log user-agent header and other interesting goodies +- send error log to separate file +""" + + +# See also: +# +# HTTP Working Group T. Berners-Lee +# INTERNET-DRAFT R. T. Fielding +# H. Frystyk Nielsen +# Expires September 8, 1995 March 8, 1995 +# +# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt +# +# and +# +# Network Working Group R. Fielding +# Request for Comments: 2616 et al +# Obsoletes: 2068 June 1999 +# Category: Standards Track +# +# URL: http://www.faqs.org/rfcs/rfc2616.html + +# Log files +# --------- +# +# Here's a quote from the NCSA httpd docs about log file format. +# +# | The logfile format is as follows. Each line consists of: +# | +# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb +# | +# | host: Either the DNS name or the IP number of the remote client +# | rfc931: Any information returned by identd for this person, +# | - otherwise. +# | authuser: If user sent a userid for authentication, the user name, +# | - otherwise. +# | DD: Day +# | Mon: Month (calendar name) +# | YYYY: Year +# | hh: hour (24-hour format, the machine's timezone) +# | mm: minutes +# | ss: seconds +# | request: The first line of the HTTP request as sent by the client. +# | ddd: the status code returned by the server, - if not available. +# | bbbb: the total number of bytes sent, +# | *not including the HTTP/1.0 header*, - if not available +# | +# | You can determine the name of the file accessed through request. +# +# (Actually, the latter is only true if you know the server configuration +# at the time the request was made!) + +__version__ = "0.6" + +__all__ = [ + "HTTPServer", "ThreadingHTTPServer", + "HTTPSServer", "ThreadingHTTPSServer", + "BaseHTTPRequestHandler", "SimpleHTTPRequestHandler", + "CGIHTTPRequestHandler", +] + +import copy +import datetime +import email.utils +import html +import http.client +import io +import itertools +import mimetypes +import os +import posixpath +import select +import shutil +import socket +import socketserver +import sys +import time +import urllib.parse + +from http import HTTPStatus + + +# Default error message template +DEFAULT_ERROR_MESSAGE = """\ + + + + + + Error response + + +

Error response

+

Error code: %(code)d

+

Message: %(message)s.

+

Error code explanation: %(code)s - %(explain)s.

+ + +""" + +DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8" + +# Data larger than this will be read in chunks, to prevent extreme +# overallocation. +_MIN_READ_BUF_SIZE = 1 << 20 + +class HTTPServer(socketserver.TCPServer): + + allow_reuse_address = True # Seems to make sense in testing environment + allow_reuse_port = False + + def server_bind(self): + """Override server_bind to store the server name.""" + socketserver.TCPServer.server_bind(self) + host, port = self.server_address[:2] + self.server_name = socket.getfqdn(host) + self.server_port = port + + +class ThreadingHTTPServer(socketserver.ThreadingMixIn, HTTPServer): + daemon_threads = True + + +class HTTPSServer(HTTPServer): + def __init__(self, server_address, RequestHandlerClass, + bind_and_activate=True, *, certfile, keyfile=None, + password=None, alpn_protocols=None): + try: + import ssl + except ImportError: + raise RuntimeError("SSL module is missing; " + "HTTPS support is unavailable") + + self.ssl = ssl + self.certfile = certfile + self.keyfile = keyfile + self.password = password + # Support by default HTTP/1.1 + self.alpn_protocols = ( + ["http/1.1"] if alpn_protocols is None else alpn_protocols + ) + + super().__init__(server_address, + RequestHandlerClass, + bind_and_activate) + + def server_activate(self): + """Wrap the socket in SSLSocket.""" + super().server_activate() + context = self._create_context() + self.socket = context.wrap_socket(self.socket, server_side=True) + + def _create_context(self): + """Create a secure SSL context.""" + context = self.ssl.create_default_context(self.ssl.Purpose.CLIENT_AUTH) + context.load_cert_chain(self.certfile, self.keyfile, self.password) + context.set_alpn_protocols(self.alpn_protocols) + return context + + +class ThreadingHTTPSServer(socketserver.ThreadingMixIn, HTTPSServer): + daemon_threads = True + + +class BaseHTTPRequestHandler(socketserver.StreamRequestHandler): + + """HTTP request handler base class. + + The following explanation of HTTP serves to guide you through the + code as well as to expose any misunderstandings I may have about + HTTP (so you don't need to read the code to figure out I'm wrong + :-). + + HTTP (HyperText Transfer Protocol) is an extensible protocol on + top of a reliable stream transport (e.g. TCP/IP). The protocol + recognizes three parts to a request: + + 1. One line identifying the request type and path + 2. An optional set of RFC-822-style headers + 3. An optional data part + + The headers and data are separated by a blank line. + + The first line of the request has the form + + + + where is a (case-sensitive) keyword such as GET or POST, + is a string containing path information for the request, + and should be the string "HTTP/1.0" or "HTTP/1.1". + is encoded using the URL encoding scheme (using %xx to signify + the ASCII character with hex code xx). + + The specification specifies that lines are separated by CRLF but + for compatibility with the widest range of clients recommends + servers also handle LF. Similarly, whitespace in the request line + is treated sensibly (allowing multiple spaces between components + and allowing trailing whitespace). + + Similarly, for output, lines ought to be separated by CRLF pairs + but most clients grok LF characters just fine. + + If the first line of the request has the form + + + + (i.e. is left out) then this is assumed to be an HTTP + 0.9 request; this form has no optional headers and data part and + the reply consists of just the data. + + The reply form of the HTTP 1.x protocol again has three parts: + + 1. One line giving the response code + 2. An optional set of RFC-822-style headers + 3. The data + + Again, the headers and data are separated by a blank line. + + The response code line has the form + + + + where is the protocol version ("HTTP/1.0" or "HTTP/1.1"), + is a 3-digit response code indicating success or + failure of the request, and is an optional + human-readable string explaining what the response code means. + + This server parses the request and the headers, and then calls a + function specific to the request type (). Specifically, + a request SPAM will be handled by a method do_SPAM(). If no + such method exists the server sends an error response to the + client. If it exists, it is called with no arguments: + + do_SPAM() + + Note that the request name is case sensitive (i.e. SPAM and spam + are different requests). + + The various request details are stored in instance variables: + + - client_address is the client IP address in the form (host, + port); + + - command, path and version are the broken-down request line; + + - headers is an instance of email.message.Message (or a derived + class) containing the header information; + + - rfile is a file object open for reading positioned at the + start of the optional input data part; + + - wfile is a file object open for writing. + + IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING! + + The first thing to be written must be the response line. Then + follow 0 or more header lines, then a blank line, and then the + actual data (if any). The meaning of the header lines depends on + the command executed by the server; in most cases, when data is + returned, there should be at least one header line of the form + + Content-type: / + + where and should be registered MIME types, + e.g. "text/html" or "text/plain". + + """ + + # The Python system version, truncated to its first component. + sys_version = "Python/" + sys.version.split()[0] + + # The server software version. You may want to override this. + # The format is multiple whitespace-separated strings, + # where each string is of the form name[/version]. + server_version = "BaseHTTP/" + __version__ + + error_message_format = DEFAULT_ERROR_MESSAGE + error_content_type = DEFAULT_ERROR_CONTENT_TYPE + + # The default request version. This only affects responses up until + # the point where the request line is parsed, so it mainly decides what + # the client gets back when sending a malformed request line. + # Most web servers default to HTTP 0.9, i.e. don't send a status line. + default_request_version = "HTTP/0.9" + + def parse_request(self): + """Parse a request (internal). + + The request should be stored in self.raw_requestline; the results + are in self.command, self.path, self.request_version and + self.headers. + + Return True for success, False for failure; on failure, any relevant + error response has already been sent back. + + """ + is_http_0_9 = False + self.command = None # set in case of error on the first line + self.request_version = version = self.default_request_version + self.close_connection = True + requestline = str(self.raw_requestline, 'iso-8859-1') + requestline = requestline.rstrip('\r\n') + self.requestline = requestline + words = requestline.split() + if len(words) == 0: + return False + + if len(words) >= 3: # Enough to determine protocol version + version = words[-1] + try: + if not version.startswith('HTTP/'): + raise ValueError + base_version_number = version.split('/', 1)[1] + version_number = base_version_number.split(".") + # RFC 2145 section 3.1 says there can be only one "." and + # - major and minor numbers MUST be treated as + # separate integers; + # - HTTP/2.4 is a lower version than HTTP/2.13, which in + # turn is lower than HTTP/12.3; + # - Leading zeros MUST be ignored by recipients. + if len(version_number) != 2: + raise ValueError + if any(not component.isdigit() for component in version_number): + raise ValueError("non digit in http version") + if any(len(component) > 10 for component in version_number): + raise ValueError("unreasonable length http version") + version_number = int(version_number[0]), int(version_number[1]) + except (ValueError, IndexError): + self.send_error( + HTTPStatus.BAD_REQUEST, + "Bad request version (%r)" % version) + return False + if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": + self.close_connection = False + if version_number >= (2, 0): + self.send_error( + HTTPStatus.HTTP_VERSION_NOT_SUPPORTED, + "Invalid HTTP version (%s)" % base_version_number) + return False + self.request_version = version + + if not 2 <= len(words) <= 3: + self.send_error( + HTTPStatus.BAD_REQUEST, + "Bad request syntax (%r)" % requestline) + return False + command, path = words[:2] + if len(words) == 2: + self.close_connection = True + if command != 'GET': + self.send_error( + HTTPStatus.BAD_REQUEST, + "Bad HTTP/0.9 request type (%r)" % command) + return False + is_http_0_9 = True + self.command, self.path = command, path + + # gh-87389: The purpose of replacing '//' with '/' is to protect + # against open redirect attacks possibly triggered if the path starts + # with '//' because http clients treat //path as an absolute URI + # without scheme (similar to http://path) rather than a path. + if self.path.startswith('//'): + self.path = '/' + self.path.lstrip('/') # Reduce to a single / + + # For HTTP/0.9, headers are not expected at all. + if is_http_0_9: + self.headers = {} + return True + + # Examine the headers and look for a Connection directive. + try: + self.headers = http.client.parse_headers(self.rfile, + _class=self.MessageClass) + except http.client.LineTooLong as err: + self.send_error( + HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE, + "Line too long", + str(err)) + return False + except http.client.HTTPException as err: + self.send_error( + HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE, + "Too many headers", + str(err) + ) + return False + + conntype = self.headers.get('Connection', "") + if conntype.lower() == 'close': + self.close_connection = True + elif (conntype.lower() == 'keep-alive' and + self.protocol_version >= "HTTP/1.1"): + self.close_connection = False + # Examine the headers and look for an Expect directive + expect = self.headers.get('Expect', "") + if (expect.lower() == "100-continue" and + self.protocol_version >= "HTTP/1.1" and + self.request_version >= "HTTP/1.1"): + if not self.handle_expect_100(): + return False + return True + + def handle_expect_100(self): + """Decide what to do with an "Expect: 100-continue" header. + + If the client is expecting a 100 Continue response, we must + respond with either a 100 Continue or a final response before + waiting for the request body. The default is to always respond + with a 100 Continue. You can behave differently (for example, + reject unauthorized requests) by overriding this method. + + This method should either return True (possibly after sending + a 100 Continue response) or send an error response and return + False. + + """ + self.send_response_only(HTTPStatus.CONTINUE) + self.end_headers() + return True + + def handle_one_request(self): + """Handle a single HTTP request. + + You normally don't need to override this method; see the class + __doc__ string for information on how to handle specific HTTP + commands such as GET and POST. + + """ + try: + self.raw_requestline = self.rfile.readline(65537) + if len(self.raw_requestline) > 65536: + self.requestline = '' + self.request_version = '' + self.command = '' + self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG) + return + if not self.raw_requestline: + self.close_connection = True + return + if not self.parse_request(): + # An error code has been sent, just exit + return + mname = 'do_' + self.command + if not hasattr(self, mname): + self.send_error( + HTTPStatus.NOT_IMPLEMENTED, + "Unsupported method (%r)" % self.command) + return + method = getattr(self, mname) + method() + self.wfile.flush() #actually send the response if not already done. + except TimeoutError as e: + #a read or a write timed out. Discard this connection + self.log_error("Request timed out: %r", e) + self.close_connection = True + return + + def handle(self): + """Handle multiple requests if necessary.""" + self.close_connection = True + + self.handle_one_request() + while not self.close_connection: + self.handle_one_request() + + def send_error(self, code, message=None, explain=None): + """Send and log an error reply. + + Arguments are + * code: an HTTP error code + 3 digits + * message: a simple optional 1 line reason phrase. + *( HTAB / SP / VCHAR / %x80-FF ) + defaults to short entry matching the response code + * explain: a detailed message defaults to the long entry + matching the response code. + + This sends an error response (so it must be called before any + output has been generated), logs the error, and finally sends + a piece of HTML explaining the error to the user. + + """ + + try: + shortmsg, longmsg = self.responses[code] + except KeyError: + shortmsg, longmsg = '???', '???' + if message is None: + message = shortmsg + if explain is None: + explain = longmsg + self.log_error("code %d, message %s", code, message) + self.send_response(code, message) + self.send_header('Connection', 'close') + + # Message body is omitted for cases described in: + # - RFC7230: 3.3. 1xx, 204(No Content), 304(Not Modified) + # - RFC7231: 6.3.6. 205(Reset Content) + body = None + if (code >= 200 and + code not in (HTTPStatus.NO_CONTENT, + HTTPStatus.RESET_CONTENT, + HTTPStatus.NOT_MODIFIED)): + # HTML encode to prevent Cross Site Scripting attacks + # (see bug #1100201) + content = (self.error_message_format % { + 'code': code, + 'message': html.escape(message, quote=False), + 'explain': html.escape(explain, quote=False) + }) + body = content.encode('UTF-8', 'replace') + self.send_header("Content-Type", self.error_content_type) + self.send_header('Content-Length', str(len(body))) + self.end_headers() + + if self.command != 'HEAD' and body: + self.wfile.write(body) + + def send_response(self, code, message=None): + """Add the response header to the headers buffer and log the + response code. + + Also send two standard headers with the server software + version and the current date. + + """ + self.log_request(code) + self.send_response_only(code, message) + self.send_header('Server', self.version_string()) + self.send_header('Date', self.date_time_string()) + + def send_response_only(self, code, message=None): + """Send the response header only.""" + if self.request_version != 'HTTP/0.9': + if message is None: + if code in self.responses: + message = self.responses[code][0] + else: + message = '' + if not hasattr(self, '_headers_buffer'): + self._headers_buffer = [] + self._headers_buffer.append(("%s %d %s\r\n" % + (self.protocol_version, code, message)).encode( + 'latin-1', 'strict')) + + def send_header(self, keyword, value): + """Send a MIME header to the headers buffer.""" + if self.request_version != 'HTTP/0.9': + if not hasattr(self, '_headers_buffer'): + self._headers_buffer = [] + self._headers_buffer.append( + ("%s: %s\r\n" % (keyword, value)).encode('latin-1', 'strict')) + + if keyword.lower() == 'connection': + if value.lower() == 'close': + self.close_connection = True + elif value.lower() == 'keep-alive': + self.close_connection = False + + def end_headers(self): + """Send the blank line ending the MIME headers.""" + if self.request_version != 'HTTP/0.9': + self._headers_buffer.append(b"\r\n") + self.flush_headers() + + def flush_headers(self): + if hasattr(self, '_headers_buffer'): + self.wfile.write(b"".join(self._headers_buffer)) + self._headers_buffer = [] + + def log_request(self, code='-', size='-'): + """Log an accepted request. + + This is called by send_response(). + + """ + if isinstance(code, HTTPStatus): + code = code.value + self.log_message('"%s" %s %s', + self.requestline, str(code), str(size)) + + def log_error(self, format, *args): + """Log an error. + + This is called when a request cannot be fulfilled. By + default it passes the message on to log_message(). + + Arguments are the same as for log_message(). + + XXX This should go to the separate error log. + + """ + + self.log_message(format, *args) + + # https://en.wikipedia.org/wiki/List_of_Unicode_characters#Control_codes + _control_char_table = str.maketrans( + {c: fr'\x{c:02x}' for c in itertools.chain(range(0x20), range(0x7f,0xa0))}) + _control_char_table[ord('\\')] = r'\\' + + def log_message(self, format, *args): + """Log an arbitrary message. + + This is used by all other logging functions. Override + it if you have specific logging wishes. + + The first argument, FORMAT, is a format string for the + message to be logged. If the format string contains + any % escapes requiring parameters, they should be + specified as subsequent arguments (it's just like + printf!). + + The client ip and current date/time are prefixed to + every message. + + Unicode control characters are replaced with escaped hex + before writing the output to stderr. + + """ + + message = format % args + sys.stderr.write("%s - - [%s] %s\n" % + (self.address_string(), + self.log_date_time_string(), + message.translate(self._control_char_table))) + + def version_string(self): + """Return the server software version string.""" + return self.server_version + ' ' + self.sys_version + + def date_time_string(self, timestamp=None): + """Return the current date and time formatted for a message header.""" + if timestamp is None: + timestamp = time.time() + return email.utils.formatdate(timestamp, usegmt=True) + + def log_date_time_string(self): + """Return the current time formatted for logging.""" + now = time.time() + year, month, day, hh, mm, ss, x, y, z = time.localtime(now) + s = "%02d/%3s/%04d %02d:%02d:%02d" % ( + day, self.monthname[month], year, hh, mm, ss) + return s + + weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + + monthname = [None, + 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', + 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + + def address_string(self): + """Return the client address.""" + + return self.client_address[0] + + # Essentially static class variables + + # The version of the HTTP protocol we support. + # Set this to HTTP/1.1 to enable automatic keepalive + protocol_version = "HTTP/1.0" + + # MessageClass used to parse headers + MessageClass = http.client.HTTPMessage + + # hack to maintain backwards compatibility + responses = { + v: (v.phrase, v.description) + for v in HTTPStatus.__members__.values() + } + + +class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): + + """Simple HTTP request handler with GET and HEAD commands. + + This serves files from the current directory and any of its + subdirectories. The MIME type for files is determined by + calling the .guess_type() method. + + The GET and HEAD requests are identical except that the HEAD + request omits the actual contents of the file. + + """ + + server_version = "SimpleHTTP/" + __version__ + index_pages = ("index.html", "index.htm") + extensions_map = _encodings_map_default = { + '.gz': 'application/gzip', + '.Z': 'application/octet-stream', + '.bz2': 'application/x-bzip2', + '.xz': 'application/x-xz', + } + + def __init__(self, *args, directory=None, **kwargs): + if directory is None: + directory = os.getcwd() + self.directory = os.fspath(directory) + super().__init__(*args, **kwargs) + + def do_GET(self): + """Serve a GET request.""" + f = self.send_head() + if f: + try: + self.copyfile(f, self.wfile) + finally: + f.close() + + def do_HEAD(self): + """Serve a HEAD request.""" + f = self.send_head() + if f: + f.close() + + def send_head(self): + """Common code for GET and HEAD commands. + + This sends the response code and MIME headers. + + Return value is either a file object (which has to be copied + to the outputfile by the caller unless the command was HEAD, + and must be closed by the caller under all circumstances), or + None, in which case the caller has nothing further to do. + + """ + path = self.translate_path(self.path) + f = None + if os.path.isdir(path): + parts = urllib.parse.urlsplit(self.path) + if not parts.path.endswith(('/', '%2f', '%2F')): + # redirect browser - doing basically what apache does + self.send_response(HTTPStatus.MOVED_PERMANENTLY) + new_parts = (parts[0], parts[1], parts[2] + '/', + parts[3], parts[4]) + new_url = urllib.parse.urlunsplit(new_parts) + self.send_header("Location", new_url) + self.send_header("Content-Length", "0") + self.end_headers() + return None + for index in self.index_pages: + index = os.path.join(path, index) + if os.path.isfile(index): + path = index + break + else: + return self.list_directory(path) + ctype = self.guess_type(path) + # check for trailing "/" which should return 404. See Issue17324 + # The test for this was added in test_httpserver.py + # However, some OS platforms accept a trailingSlash as a filename + # See discussion on python-dev and Issue34711 regarding + # parsing and rejection of filenames with a trailing slash + if path.endswith("/"): + self.send_error(HTTPStatus.NOT_FOUND, "File not found") + return None + try: + f = open(path, 'rb') + except OSError: + self.send_error(HTTPStatus.NOT_FOUND, "File not found") + return None + + try: + fs = os.fstat(f.fileno()) + # Use browser cache if possible + if ("If-Modified-Since" in self.headers + and "If-None-Match" not in self.headers): + # compare If-Modified-Since and time of last file modification + try: + ims = email.utils.parsedate_to_datetime( + self.headers["If-Modified-Since"]) + except (TypeError, IndexError, OverflowError, ValueError): + # ignore ill-formed values + pass + else: + if ims.tzinfo is None: + # obsolete format with no timezone, cf. + # https://tools.ietf.org/html/rfc7231#section-7.1.1.1 + ims = ims.replace(tzinfo=datetime.timezone.utc) + if ims.tzinfo is datetime.timezone.utc: + # compare to UTC datetime of last modification + last_modif = datetime.datetime.fromtimestamp( + fs.st_mtime, datetime.timezone.utc) + # remove microseconds, like in If-Modified-Since + last_modif = last_modif.replace(microsecond=0) + + if last_modif <= ims: + self.send_response(HTTPStatus.NOT_MODIFIED) + self.end_headers() + f.close() + return None + + self.send_response(HTTPStatus.OK) + self.send_header("Content-type", ctype) + self.send_header("Content-Length", str(fs[6])) + self.send_header("Last-Modified", + self.date_time_string(fs.st_mtime)) + self.end_headers() + return f + except: + f.close() + raise + + def list_directory(self, path): + """Helper to produce a directory listing (absent index.html). + + Return value is either a file object, or None (indicating an + error). In either case, the headers are sent, making the + interface the same as for send_head(). + + """ + try: + list = os.listdir(path) + except OSError: + self.send_error( + HTTPStatus.NOT_FOUND, + "No permission to list directory") + return None + list.sort(key=lambda a: a.lower()) + r = [] + displaypath = self.path + displaypath = displaypath.split('#', 1)[0] + displaypath = displaypath.split('?', 1)[0] + try: + displaypath = urllib.parse.unquote(displaypath, + errors='surrogatepass') + except UnicodeDecodeError: + displaypath = urllib.parse.unquote(displaypath) + displaypath = html.escape(displaypath, quote=False) + enc = sys.getfilesystemencoding() + title = f'Directory listing for {displaypath}' + r.append('') + r.append('') + r.append('') + r.append(f'') + r.append('') + r.append(f'{title}\n') + r.append(f'\n

{title}

') + r.append('
\n
\n
\n\n\n') + encoded = '\n'.join(r).encode(enc, 'surrogateescape') + f = io.BytesIO() + f.write(encoded) + f.seek(0) + self.send_response(HTTPStatus.OK) + self.send_header("Content-type", "text/html; charset=%s" % enc) + self.send_header("Content-Length", str(len(encoded))) + self.end_headers() + return f + + def translate_path(self, path): + """Translate a /-separated PATH to the local filename syntax. + + Components that mean special things to the local file system + (e.g. drive or directory names) are ignored. (XXX They should + probably be diagnosed.) + + """ + # abandon query parameters + path = path.split('#', 1)[0] + path = path.split('?', 1)[0] + # Don't forget explicit trailing slash when normalizing. Issue17324 + try: + path = urllib.parse.unquote(path, errors='surrogatepass') + except UnicodeDecodeError: + path = urllib.parse.unquote(path) + trailing_slash = path.endswith('/') + path = posixpath.normpath(path) + words = path.split('/') + words = filter(None, words) + path = self.directory + for word in words: + if os.path.dirname(word) or word in (os.curdir, os.pardir): + # Ignore components that are not a simple file/directory name + continue + path = os.path.join(path, word) + if trailing_slash: + path += '/' + return path + + def copyfile(self, source, outputfile): + """Copy all data between two file objects. + + The SOURCE argument is a file object open for reading + (or anything with a read() method) and the DESTINATION + argument is a file object open for writing (or + anything with a write() method). + + The only reason for overriding this would be to change + the block size or perhaps to replace newlines by CRLF + -- note however that this the default server uses this + to copy binary data as well. + + """ + shutil.copyfileobj(source, outputfile) + + def guess_type(self, path): + """Guess the type of a file. + + Argument is a PATH (a filename). + + Return value is a string of the form type/subtype, + usable for a MIME Content-type header. + + The default implementation looks the file's extension + up in the table self.extensions_map, using application/octet-stream + as a default; however it would be permissible (if + slow) to look inside the data to make a better guess. + + """ + base, ext = posixpath.splitext(path) + if ext in self.extensions_map: + return self.extensions_map[ext] + ext = ext.lower() + if ext in self.extensions_map: + return self.extensions_map[ext] + guess, _ = mimetypes.guess_file_type(path) + if guess: + return guess + return 'application/octet-stream' + + +# Utilities for CGIHTTPRequestHandler + +def _url_collapse_path(path): + """ + Given a URL path, remove extra '/'s and '.' path elements and collapse + any '..' references and returns a collapsed path. + + Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. + The utility of this function is limited to is_cgi method and helps + preventing some security attacks. + + Returns: The reconstituted URL, which will always start with a '/'. + + Raises: IndexError if too many '..' occur within the path. + + """ + # Query component should not be involved. + path, _, query = path.partition('?') + path = urllib.parse.unquote(path) + + # Similar to os.path.split(os.path.normpath(path)) but specific to URL + # path semantics rather than local operating system semantics. + path_parts = path.split('/') + head_parts = [] + for part in path_parts[:-1]: + if part == '..': + head_parts.pop() # IndexError if more '..' than prior parts + elif part and part != '.': + head_parts.append( part ) + if path_parts: + tail_part = path_parts.pop() + if tail_part: + if tail_part == '..': + head_parts.pop() + tail_part = '' + elif tail_part == '.': + tail_part = '' + else: + tail_part = '' + + if query: + tail_part = '?'.join((tail_part, query)) + + splitpath = ('/' + '/'.join(head_parts), tail_part) + collapsed_path = "/".join(splitpath) + + return collapsed_path + + + +nobody = None + +def nobody_uid(): + """Internal routine to get nobody's uid""" + global nobody + if nobody: + return nobody + try: + import pwd + except ImportError: + return -1 + try: + nobody = pwd.getpwnam('nobody')[2] + except KeyError: + nobody = 1 + max(x[2] for x in pwd.getpwall()) + return nobody + + +def executable(path): + """Test for executable file.""" + return os.access(path, os.X_OK) + + +class CGIHTTPRequestHandler(SimpleHTTPRequestHandler): + + """Complete HTTP server with GET, HEAD and POST commands. + + GET and HEAD also support running CGI scripts. + + The POST command is *only* implemented for CGI scripts. + + """ + + def __init__(self, *args, **kwargs): + import warnings + warnings._deprecated("http.server.CGIHTTPRequestHandler", + remove=(3, 15)) + super().__init__(*args, **kwargs) + + # Determine platform specifics + have_fork = hasattr(os, 'fork') + + # Make rfile unbuffered -- we need to read one line and then pass + # the rest to a subprocess, so we can't use buffered input. + rbufsize = 0 + + def do_POST(self): + """Serve a POST request. + + This is only implemented for CGI scripts. + + """ + + if self.is_cgi(): + self.run_cgi() + else: + self.send_error( + HTTPStatus.NOT_IMPLEMENTED, + "Can only POST to CGI scripts") + + def send_head(self): + """Version of send_head that support CGI scripts""" + if self.is_cgi(): + return self.run_cgi() + else: + return SimpleHTTPRequestHandler.send_head(self) + + def is_cgi(self): + """Test whether self.path corresponds to a CGI script. + + Returns True and updates the cgi_info attribute to the tuple + (dir, rest) if self.path requires running a CGI script. + Returns False otherwise. + + If any exception is raised, the caller should assume that + self.path was rejected as invalid and act accordingly. + + The default implementation tests whether the normalized url + path begins with one of the strings in self.cgi_directories + (and the next character is a '/' or the end of the string). + + """ + collapsed_path = _url_collapse_path(self.path) + dir_sep = collapsed_path.find('/', 1) + while dir_sep > 0 and not collapsed_path[:dir_sep] in self.cgi_directories: + dir_sep = collapsed_path.find('/', dir_sep+1) + if dir_sep > 0: + head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] + self.cgi_info = head, tail + return True + return False + + + cgi_directories = ['/cgi-bin', '/htbin'] + + def is_executable(self, path): + """Test whether argument path is an executable file.""" + return executable(path) + + def is_python(self, path): + """Test whether argument path is a Python script.""" + head, tail = os.path.splitext(path) + return tail.lower() in (".py", ".pyw") + + def run_cgi(self): + """Execute a CGI script.""" + dir, rest = self.cgi_info + path = dir + '/' + rest + i = path.find('/', len(dir)+1) + while i >= 0: + nextdir = path[:i] + nextrest = path[i+1:] + + scriptdir = self.translate_path(nextdir) + if os.path.isdir(scriptdir): + dir, rest = nextdir, nextrest + i = path.find('/', len(dir)+1) + else: + break + + # find an explicit query string, if present. + rest, _, query = rest.partition('?') + + # dissect the part after the directory name into a script name & + # a possible additional path, to be stored in PATH_INFO. + i = rest.find('/') + if i >= 0: + script, rest = rest[:i], rest[i:] + else: + script, rest = rest, '' + + scriptname = dir + '/' + script + scriptfile = self.translate_path(scriptname) + if not os.path.exists(scriptfile): + self.send_error( + HTTPStatus.NOT_FOUND, + "No such CGI script (%r)" % scriptname) + return + if not os.path.isfile(scriptfile): + self.send_error( + HTTPStatus.FORBIDDEN, + "CGI script is not a plain file (%r)" % scriptname) + return + ispy = self.is_python(scriptname) + if self.have_fork or not ispy: + if not self.is_executable(scriptfile): + self.send_error( + HTTPStatus.FORBIDDEN, + "CGI script is not executable (%r)" % scriptname) + return + + # Reference: https://www6.uniovi.es/~antonio/ncsa_httpd/cgi/env.html + # XXX Much of the following could be prepared ahead of time! + env = copy.deepcopy(os.environ) + env['SERVER_SOFTWARE'] = self.version_string() + env['SERVER_NAME'] = self.server.server_name + env['GATEWAY_INTERFACE'] = 'CGI/1.1' + env['SERVER_PROTOCOL'] = self.protocol_version + env['SERVER_PORT'] = str(self.server.server_port) + env['REQUEST_METHOD'] = self.command + uqrest = urllib.parse.unquote(rest) + env['PATH_INFO'] = uqrest + env['PATH_TRANSLATED'] = self.translate_path(uqrest) + env['SCRIPT_NAME'] = scriptname + env['QUERY_STRING'] = query + env['REMOTE_ADDR'] = self.client_address[0] + authorization = self.headers.get("authorization") + if authorization: + authorization = authorization.split() + if len(authorization) == 2: + import base64, binascii + env['AUTH_TYPE'] = authorization[0] + if authorization[0].lower() == "basic": + try: + authorization = authorization[1].encode('ascii') + authorization = base64.decodebytes(authorization).\ + decode('ascii') + except (binascii.Error, UnicodeError): + pass + else: + authorization = authorization.split(':') + if len(authorization) == 2: + env['REMOTE_USER'] = authorization[0] + # XXX REMOTE_IDENT + if self.headers.get('content-type') is None: + env['CONTENT_TYPE'] = self.headers.get_content_type() + else: + env['CONTENT_TYPE'] = self.headers['content-type'] + length = self.headers.get('content-length') + if length: + env['CONTENT_LENGTH'] = length + referer = self.headers.get('referer') + if referer: + env['HTTP_REFERER'] = referer + accept = self.headers.get_all('accept', ()) + env['HTTP_ACCEPT'] = ','.join(accept) + ua = self.headers.get('user-agent') + if ua: + env['HTTP_USER_AGENT'] = ua + co = filter(None, self.headers.get_all('cookie', [])) + cookie_str = ', '.join(co) + if cookie_str: + env['HTTP_COOKIE'] = cookie_str + # XXX Other HTTP_* headers + # Since we're setting the env in the parent, provide empty + # values to override previously set values + for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH', + 'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'): + env.setdefault(k, "") + + self.send_response(HTTPStatus.OK, "Script output follows") + self.flush_headers() + + decoded_query = query.replace('+', ' ') + + if self.have_fork: + # Unix -- fork as we should + args = [script] + if '=' not in decoded_query: + args.append(decoded_query) + nobody = nobody_uid() + self.wfile.flush() # Always flush before forking + pid = os.fork() + if pid != 0: + # Parent + pid, sts = os.waitpid(pid, 0) + # throw away additional data [see bug #427345] + while select.select([self.rfile], [], [], 0)[0]: + if not self.rfile.read(1): + break + exitcode = os.waitstatus_to_exitcode(sts) + if exitcode: + self.log_error(f"CGI script exit code {exitcode}") + return + # Child + try: + try: + os.setuid(nobody) + except OSError: + pass + os.dup2(self.rfile.fileno(), 0) + os.dup2(self.wfile.fileno(), 1) + os.execve(scriptfile, args, env) + except: + self.server.handle_error(self.request, self.client_address) + os._exit(127) + + else: + # Non-Unix -- use subprocess + import subprocess + cmdline = [scriptfile] + if self.is_python(scriptfile): + interp = sys.executable + if interp.lower().endswith("w.exe"): + # On Windows, use python.exe, not pythonw.exe + interp = interp[:-5] + interp[-4:] + cmdline = [interp, '-u'] + cmdline + if '=' not in query: + cmdline.append(query) + self.log_message("command: %s", subprocess.list2cmdline(cmdline)) + try: + nbytes = int(length) + except (TypeError, ValueError): + nbytes = 0 + p = subprocess.Popen(cmdline, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env = env + ) + if self.command.lower() == "post" and nbytes > 0: + cursize = 0 + data = self.rfile.read(min(nbytes, _MIN_READ_BUF_SIZE)) + while len(data) < nbytes and len(data) != cursize: + cursize = len(data) + # This is a geometric increase in read size (never more + # than doubling out the current length of data per loop + # iteration). + delta = min(cursize, nbytes - cursize) + try: + data += self.rfile.read(delta) + except TimeoutError: + break + else: + data = None + # throw away additional data [see bug #427345] + while select.select([self.rfile._sock], [], [], 0)[0]: + if not self.rfile._sock.recv(1): + break + stdout, stderr = p.communicate(data) + self.wfile.write(stdout) + if stderr: + self.log_error('%s', stderr) + p.stderr.close() + p.stdout.close() + status = p.returncode + if status: + self.log_error("CGI script exit status %#x", status) + else: + self.log_message("CGI script exited OK") + + +def _get_best_family(*address): + infos = socket.getaddrinfo( + *address, + type=socket.SOCK_STREAM, + flags=socket.AI_PASSIVE, + ) + family, type, proto, canonname, sockaddr = next(iter(infos)) + return family, sockaddr + + +def test(HandlerClass=BaseHTTPRequestHandler, + ServerClass=ThreadingHTTPServer, + protocol="HTTP/1.0", port=8000, bind=None, + tls_cert=None, tls_key=None, tls_password=None): + """Test the HTTP request handler class. + + This runs an HTTP server on port 8000 (or the port argument). + + """ + ServerClass.address_family, addr = _get_best_family(bind, port) + HandlerClass.protocol_version = protocol + + if tls_cert: + server = ServerClass(addr, HandlerClass, certfile=tls_cert, + keyfile=tls_key, password=tls_password) + else: + server = ServerClass(addr, HandlerClass) + + with server as httpd: + host, port = httpd.socket.getsockname()[:2] + url_host = f'[{host}]' if ':' in host else host + protocol = 'HTTPS' if tls_cert else 'HTTP' + print( + f"Serving {protocol} on {host} port {port} " + f"({protocol.lower()}://{url_host}:{port}/) ..." + ) + try: + httpd.serve_forever() + except KeyboardInterrupt: + print("\nKeyboard interrupt received, exiting.") + sys.exit(0) + +if __name__ == '__main__': + import argparse + import contextlib + + parser = argparse.ArgumentParser(color=True) + parser.add_argument('--cgi', action='store_true', + help='run as CGI server') + parser.add_argument('-b', '--bind', metavar='ADDRESS', + help='bind to this address ' + '(default: all interfaces)') + parser.add_argument('-d', '--directory', default=os.getcwd(), + help='serve this directory ' + '(default: current directory)') + parser.add_argument('-p', '--protocol', metavar='VERSION', + default='HTTP/1.0', + help='conform to this HTTP version ' + '(default: %(default)s)') + parser.add_argument('--tls-cert', metavar='PATH', + help='path to the TLS certificate chain file') + parser.add_argument('--tls-key', metavar='PATH', + help='path to the TLS key file') + parser.add_argument('--tls-password-file', metavar='PATH', + help='path to the password file for the TLS key') + parser.add_argument('port', default=8000, type=int, nargs='?', + help='bind to this port ' + '(default: %(default)s)') + args = parser.parse_args() + + if not args.tls_cert and args.tls_key: + parser.error("--tls-key requires --tls-cert to be set") + + tls_key_password = None + if args.tls_password_file: + if not args.tls_cert: + parser.error("--tls-password-file requires --tls-cert to be set") + + try: + with open(args.tls_password_file, "r", encoding="utf-8") as f: + tls_key_password = f.read().strip() + except OSError as e: + parser.error(f"Failed to read TLS password file: {e}") + + if args.cgi: + handler_class = CGIHTTPRequestHandler + else: + handler_class = SimpleHTTPRequestHandler + + # ensure dual-stack is not disabled; ref #38907 + class DualStackServerMixin: + + def server_bind(self): + # suppress exception when protocol is IPv4 + with contextlib.suppress(Exception): + self.socket.setsockopt( + socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) + return super().server_bind() + + def finish_request(self, request, client_address): + self.RequestHandlerClass(request, client_address, self, + directory=args.directory) + + class HTTPDualStackServer(DualStackServerMixin, ThreadingHTTPServer): + pass + class HTTPSDualStackServer(DualStackServerMixin, ThreadingHTTPSServer): + pass + + ServerClass = HTTPSDualStackServer if args.tls_cert else HTTPDualStackServer + + test( + HandlerClass=handler_class, + ServerClass=ServerClass, + port=args.port, + bind=args.bind, + protocol=args.protocol, + tls_cert=args.tls_cert, + tls_key=args.tls_key, + tls_password=tls_key_password, + ) diff --git a/Python314_4_x64_Template/Lib/imaplib.py b/Python314_4_x64_Template/Lib/imaplib.py new file mode 100644 index 00000000..cbe129b3 --- /dev/null +++ b/Python314_4_x64_Template/Lib/imaplib.py @@ -0,0 +1,1967 @@ +"""IMAP4 client. + +Based on RFC 2060. + +Public class: IMAP4 +Public variable: Debug +Public functions: Internaldate2tuple + Int2AP + ParseFlags + Time2Internaldate +""" + +# Author: Piers Lauder December 1997. +# +# Authentication code contributed by Donn Cave June 1998. +# String method conversion by ESR, February 2001. +# GET/SETACL contributed by Anthony Baxter April 2001. +# IMAP4_SSL contributed by Tino Lange March 2002. +# GET/SETQUOTA contributed by Andreas Zeidler June 2002. +# PROXYAUTH contributed by Rick Holbert November 2002. +# GET/SETANNOTATION contributed by Tomas Lindroos June 2005. +# IDLE contributed by Forest August 2024. + +__version__ = "2.60" + +import binascii, errno, random, re, socket, subprocess, sys, time, calendar +from datetime import datetime, timezone, timedelta +from io import DEFAULT_BUFFER_SIZE + +try: + import ssl + HAVE_SSL = True +except ImportError: + HAVE_SSL = False + +__all__ = ["IMAP4", "IMAP4_stream", "Internaldate2tuple", + "Int2AP", "ParseFlags", "Time2Internaldate"] + +# Globals + +CRLF = b'\r\n' +Debug = 0 +IMAP4_PORT = 143 +IMAP4_SSL_PORT = 993 +AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first + +# Maximal line length when calling readline(). This is to prevent +# reading arbitrary length lines. RFC 3501 and 2060 (IMAP 4rev1) +# don't specify a line length. RFC 2683 suggests limiting client +# command lines to 1000 octets and that servers should be prepared +# to accept command lines up to 8000 octets, so we used to use 10K here. +# In the modern world (eg: gmail) the response to, for example, a +# search command can be quite large, so we now use 1M. +_MAXLINE = 1000000 + + +# Commands + +Commands = { + # name valid states + 'APPEND': ('AUTH', 'SELECTED'), + 'AUTHENTICATE': ('NONAUTH',), + 'CAPABILITY': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), + 'CHECK': ('SELECTED',), + 'CLOSE': ('SELECTED',), + 'COPY': ('SELECTED',), + 'CREATE': ('AUTH', 'SELECTED'), + 'DELETE': ('AUTH', 'SELECTED'), + 'DELETEACL': ('AUTH', 'SELECTED'), + 'ENABLE': ('AUTH', ), + 'EXAMINE': ('AUTH', 'SELECTED'), + 'EXPUNGE': ('SELECTED',), + 'FETCH': ('SELECTED',), + 'GETACL': ('AUTH', 'SELECTED'), + 'GETANNOTATION':('AUTH', 'SELECTED'), + 'GETQUOTA': ('AUTH', 'SELECTED'), + 'GETQUOTAROOT': ('AUTH', 'SELECTED'), + 'IDLE': ('AUTH', 'SELECTED'), + 'MYRIGHTS': ('AUTH', 'SELECTED'), + 'LIST': ('AUTH', 'SELECTED'), + 'LOGIN': ('NONAUTH',), + 'LOGOUT': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), + 'LSUB': ('AUTH', 'SELECTED'), + 'MOVE': ('SELECTED',), + 'NAMESPACE': ('AUTH', 'SELECTED'), + 'NOOP': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), + 'PARTIAL': ('SELECTED',), # NB: obsolete + 'PROXYAUTH': ('AUTH',), + 'RENAME': ('AUTH', 'SELECTED'), + 'SEARCH': ('SELECTED',), + 'SELECT': ('AUTH', 'SELECTED'), + 'SETACL': ('AUTH', 'SELECTED'), + 'SETANNOTATION':('AUTH', 'SELECTED'), + 'SETQUOTA': ('AUTH', 'SELECTED'), + 'SORT': ('SELECTED',), + 'STARTTLS': ('NONAUTH',), + 'STATUS': ('AUTH', 'SELECTED'), + 'STORE': ('SELECTED',), + 'SUBSCRIBE': ('AUTH', 'SELECTED'), + 'THREAD': ('SELECTED',), + 'UID': ('SELECTED',), + 'UNSUBSCRIBE': ('AUTH', 'SELECTED'), + 'UNSELECT': ('SELECTED',), + } + +# Patterns to match server responses + +Continuation = re.compile(br'\+( (?P.*))?') +Flags = re.compile(br'.*FLAGS \((?P[^\)]*)\)') +InternalDate = re.compile(br'.*INTERNALDATE "' + br'(?P[ 0123][0-9])-(?P[A-Z][a-z][a-z])-(?P[0-9][0-9][0-9][0-9])' + br' (?P[0-9][0-9]):(?P[0-9][0-9]):(?P[0-9][0-9])' + br' (?P[-+])(?P[0-9][0-9])(?P[0-9][0-9])' + br'"') +# Literal is no longer used; kept for backward compatibility. +Literal = re.compile(br'.*{(?P\d+)}$', re.ASCII) +MapCRLF = re.compile(br'\r\n|\r|\n') +# We no longer exclude the ']' character from the data portion of the response +# code, even though it violates the RFC. Popular IMAP servers such as Gmail +# allow flags with ']', and there are programs (including imaplib!) that can +# produce them. The problem with this is if the 'text' portion of the response +# includes a ']' we'll parse the response wrong (which is the point of the RFC +# restriction). However, that seems less likely to be a problem in practice +# than being unable to correctly parse flags that include ']' chars, which +# was reported as a real-world problem in issue #21815. +Response_code = re.compile(br'\[(?P[A-Z-]+)( (?P.*))?\]') +Untagged_response = re.compile(br'\* (?P[A-Z-]+)( (?P.*))?') +# Untagged_status is no longer used; kept for backward compatibility +Untagged_status = re.compile( + br'\* (?P\d+) (?P[A-Z-]+)( (?P.*))?', re.ASCII) +# We compile these in _mode_xxx. +_Literal = br'.*{(?P\d+)}$' +_Untagged_status = br'\* (?P\d+) (?P[A-Z-]+)( (?P.*))?' + + + +class IMAP4: + + r"""IMAP4 client class. + + Instantiate with: IMAP4([host[, port[, timeout=None]]]) + + host - host's name (default: localhost); + port - port number (default: standard IMAP4 port). + timeout - socket timeout (default: None) + If timeout is not given or is None, + the global default socket timeout is used + + All IMAP4rev1 commands are supported by methods of the same + name (in lowercase). + + All arguments to commands are converted to strings, except for + AUTHENTICATE, and the last argument to APPEND which is passed as + an IMAP4 literal. If necessary (the string contains any + non-printing characters or white-space and isn't enclosed with + either parentheses or double quotes) each string is quoted. + However, the 'password' argument to the LOGIN command is always + quoted. If you want to avoid having an argument string quoted + (eg: the 'flags' argument to STORE) then enclose the string in + parentheses (eg: "(\Deleted)"). + + Each command returns a tuple: (type, [data, ...]) where 'type' + is usually 'OK' or 'NO', and 'data' is either the text from the + tagged response, or untagged results from command. Each 'data' + is either a string, or a tuple. If a tuple, then the first part + is the header of the response, and the second part contains + the data (ie: 'literal' value). + + Errors raise the exception class .error(""). + IMAP4 server errors raise .abort(""), + which is a sub-class of 'error'. Mailbox status changes + from READ-WRITE to READ-ONLY raise the exception class + .readonly(""), which is a sub-class of 'abort'. + + "error" exceptions imply a program error. + "abort" exceptions imply the connection should be reset, and + the command re-tried. + "readonly" exceptions imply the command should be re-tried. + + Note: to use this module, you must read the RFCs pertaining to the + IMAP4 protocol, as the semantics of the arguments to each IMAP4 + command are left to the invoker, not to mention the results. Also, + most IMAP servers implement a sub-set of the commands available here. + """ + + class error(Exception): pass # Logical errors - debug required + class abort(error): pass # Service errors - close and retry + class readonly(abort): pass # Mailbox status changed to READ-ONLY + class _responsetimeout(TimeoutError): pass # No response during IDLE + + def __init__(self, host='', port=IMAP4_PORT, timeout=None): + self.debug = Debug + self.state = 'LOGOUT' + self.literal = None # A literal argument to a command + self.tagged_commands = {} # Tagged commands awaiting response + self.untagged_responses = {} # {typ: [data, ...], ...} + self.continuation_response = '' # Last continuation response + self._idle_responses = [] # Response queue for idle iteration + self._idle_capture = False # Whether to queue responses for idle + self.is_readonly = False # READ-ONLY desired state + self.tagnum = 0 + self._tls_established = False + self._mode_ascii() + self._readbuf = [] + + # Open socket to server. + + self.open(host, port, timeout) + + try: + self._connect() + except Exception: + try: + self.shutdown() + except OSError: + pass + raise + + def _mode_ascii(self): + self.utf8_enabled = False + self._encoding = 'ascii' + self.Literal = re.compile(_Literal, re.ASCII) + self.Untagged_status = re.compile(_Untagged_status, re.ASCII) + + + def _mode_utf8(self): + self.utf8_enabled = True + self._encoding = 'utf-8' + self.Literal = re.compile(_Literal) + self.Untagged_status = re.compile(_Untagged_status) + + + def _connect(self): + # Create unique tag for this session, + # and compile tagged response matcher. + + self.tagpre = Int2AP(random.randint(4096, 65535)) + self.tagre = re.compile(br'(?P' + + self.tagpre + + br'\d+) (?P[A-Z]+) (?P.*)', re.ASCII) + + # Get server welcome message, + # request and store CAPABILITY response. + + if __debug__: + self._cmd_log_len = 10 + self._cmd_log_idx = 0 + self._cmd_log = {} # Last '_cmd_log_len' interactions + if self.debug >= 1: + self._mesg('imaplib version %s' % __version__) + self._mesg('new IMAP4 connection, tag=%s' % self.tagpre) + + self.welcome = self._get_response() + if 'PREAUTH' in self.untagged_responses: + self.state = 'AUTH' + elif 'OK' in self.untagged_responses: + self.state = 'NONAUTH' + else: + raise self.error(self.welcome) + + self._get_capabilities() + if __debug__: + if self.debug >= 3: + self._mesg('CAPABILITIES: %r' % (self.capabilities,)) + + for version in AllowedVersions: + if not version in self.capabilities: + continue + self.PROTOCOL_VERSION = version + return + + raise self.error('server not IMAP4 compliant') + + + def __getattr__(self, attr): + # Allow UPPERCASE variants of IMAP4 command methods. + if attr in Commands: + return getattr(self, attr.lower()) + raise AttributeError("Unknown IMAP4 command: '%s'" % attr) + + def __enter__(self): + return self + + def __exit__(self, *args): + if self.state == "LOGOUT": + return + + try: + self.logout() + except OSError: + pass + + + # Overridable methods + + + def _create_socket(self, timeout): + # Default value of IMAP4.host is '', but socket.getaddrinfo() + # (which is used by socket.create_connection()) expects None + # as a default value for host. + if timeout is not None and not timeout: + raise ValueError('Non-blocking socket (timeout=0) is not supported') + host = None if not self.host else self.host + sys.audit("imaplib.open", self, self.host, self.port) + address = (host, self.port) + if timeout is not None: + return socket.create_connection(address, timeout) + return socket.create_connection(address) + + def open(self, host='', port=IMAP4_PORT, timeout=None): + """Setup connection to remote server on "host:port" + (default: localhost:standard IMAP4 port). + This connection will be used by the routines: + read, readline, send, shutdown. + """ + self.host = host + self.port = port + self.sock = self._create_socket(timeout) + self._file = self.sock.makefile('rb') + + + @property + def file(self): + # The old 'file' attribute is no longer used now that we do our own + # read() and readline() buffering, with which it conflicts. + # As an undocumented interface, it should never have been accessed by + # external code, and therefore does not warrant deprecation. + # Nevertheless, we provide this property for now, to avoid suddenly + # breaking any code in the wild that might have been using it in a + # harmless way. + import warnings + warnings.warn( + 'IMAP4.file is unsupported, can cause errors, and may be removed.', + RuntimeWarning, + stacklevel=2) + return self._file + + + def read(self, size): + """Read 'size' bytes from remote.""" + # We need buffered read() to continue working after socket timeouts, + # since we use them during IDLE. Unfortunately, the standard library's + # SocketIO implementation makes this impossible, by setting a permanent + # error condition instead of letting the caller decide how to handle a + # timeout. We therefore implement our own buffered read(). + # https://github.com/python/cpython/issues/51571 + # + # Reading in chunks instead of delegating to a single + # BufferedReader.read() call also means we avoid its preallocation + # of an unreasonably large memory block if a malicious server claims + # it will send a huge literal without actually sending one. + # https://github.com/python/cpython/issues/119511 + + parts = [] + + while size > 0: + + if len(parts) < len(self._readbuf): + buf = self._readbuf[len(parts)] + else: + try: + buf = self.sock.recv(DEFAULT_BUFFER_SIZE) + except ConnectionError: + break + if not buf: + break + self._readbuf.append(buf) + + if len(buf) >= size: + parts.append(buf[:size]) + self._readbuf = [buf[size:]] + self._readbuf[len(parts):] + break + parts.append(buf) + size -= len(buf) + + return b''.join(parts) + + + def readline(self): + """Read line from remote.""" + # The comment in read() explains why we implement our own readline(). + + LF = b'\n' + parts = [] + length = 0 + + while length < _MAXLINE: + + if len(parts) < len(self._readbuf): + buf = self._readbuf[len(parts)] + else: + try: + buf = self.sock.recv(DEFAULT_BUFFER_SIZE) + except ConnectionError: + break + if not buf: + break + self._readbuf.append(buf) + + pos = buf.find(LF) + if pos != -1: + pos += 1 + parts.append(buf[:pos]) + self._readbuf = [buf[pos:]] + self._readbuf[len(parts):] + break + parts.append(buf) + length += len(buf) + + line = b''.join(parts) + if len(line) > _MAXLINE: + raise self.error("got more than %d bytes" % _MAXLINE) + return line + + + def send(self, data): + """Send data to remote.""" + sys.audit("imaplib.send", self, data) + self.sock.sendall(data) + + + def shutdown(self): + """Close I/O established in "open".""" + self._file.close() + try: + self.sock.shutdown(socket.SHUT_RDWR) + except OSError as exc: + # The server might already have closed the connection. + # On Windows, this may result in WSAEINVAL (error 10022): + # An invalid operation was attempted. + if (exc.errno != errno.ENOTCONN + and getattr(exc, 'winerror', 0) != 10022): + raise + finally: + self.sock.close() + + + def socket(self): + """Return socket instance used to connect to IMAP4 server. + + socket = .socket() + """ + return self.sock + + + + # Utility methods + + + def recent(self): + """Return most recent 'RECENT' responses if any exist, + else prompt server for an update using the 'NOOP' command. + + (typ, [data]) = .recent() + + 'data' is None if no new messages, + else list of RECENT responses, most recent last. + """ + name = 'RECENT' + typ, dat = self._untagged_response('OK', [None], name) + if dat[-1]: + return typ, dat + typ, dat = self.noop() # Prod server for response + return self._untagged_response(typ, dat, name) + + + def response(self, code): + """Return data for response 'code' if received, or None. + + Old value for response 'code' is cleared. + + (code, [data]) = .response(code) + """ + return self._untagged_response(code, [None], code.upper()) + + + + # IMAP4 commands + + + def append(self, mailbox, flags, date_time, message): + """Append message to named mailbox. + + (typ, [data]) = .append(mailbox, flags, date_time, message) + + All args except 'message' can be None. + """ + name = 'APPEND' + if not mailbox: + mailbox = 'INBOX' + if flags: + if (flags[0],flags[-1]) != ('(',')'): + flags = '(%s)' % flags + else: + flags = None + if date_time: + date_time = Time2Internaldate(date_time) + else: + date_time = None + literal = MapCRLF.sub(CRLF, message) + self.literal = literal + return self._simple_command(name, mailbox, flags, date_time) + + + def authenticate(self, mechanism, authobject): + """Authenticate command - requires response processing. + + 'mechanism' specifies which authentication mechanism is to + be used - it must appear in .capabilities in the + form AUTH=. + + 'authobject' must be a callable object: + + data = authobject(response) + + It will be called to process server continuation responses; the + response argument it is passed will be a bytes. It should return bytes + data that will be base64 encoded and sent to the server. It should + return None if the client abort response '*' should be sent instead. + """ + mech = mechanism.upper() + # XXX: shouldn't this code be removed, not commented out? + #cap = 'AUTH=%s' % mech + #if not cap in self.capabilities: # Let the server decide! + # raise self.error("Server doesn't allow %s authentication." % mech) + self.literal = _Authenticator(authobject).process + typ, dat = self._simple_command('AUTHENTICATE', mech) + if typ != 'OK': + raise self.error(dat[-1].decode('utf-8', 'replace')) + self.state = 'AUTH' + return typ, dat + + + def capability(self): + """(typ, [data]) = .capability() + Fetch capabilities list from server.""" + + name = 'CAPABILITY' + typ, dat = self._simple_command(name) + return self._untagged_response(typ, dat, name) + + + def check(self): + """Checkpoint mailbox on server. + + (typ, [data]) = .check() + """ + return self._simple_command('CHECK') + + + def close(self): + """Close currently selected mailbox. + + Deleted messages are removed from writable mailbox. + This is the recommended command before 'LOGOUT'. + + (typ, [data]) = .close() + """ + try: + typ, dat = self._simple_command('CLOSE') + finally: + self.state = 'AUTH' + return typ, dat + + + def copy(self, message_set, new_mailbox): + """Copy 'message_set' messages onto end of 'new_mailbox'. + + (typ, [data]) = .copy(message_set, new_mailbox) + """ + return self._simple_command('COPY', message_set, new_mailbox) + + + def create(self, mailbox): + """Create new mailbox. + + (typ, [data]) = .create(mailbox) + """ + return self._simple_command('CREATE', mailbox) + + + def delete(self, mailbox): + """Delete old mailbox. + + (typ, [data]) = .delete(mailbox) + """ + return self._simple_command('DELETE', mailbox) + + def deleteacl(self, mailbox, who): + """Delete the ACLs (remove any rights) set for who on mailbox. + + (typ, [data]) = .deleteacl(mailbox, who) + """ + return self._simple_command('DELETEACL', mailbox, who) + + def enable(self, capability): + """Send an RFC5161 enable string to the server. + + (typ, [data]) = .enable(capability) + """ + if 'ENABLE' not in self.capabilities: + raise IMAP4.error("Server does not support ENABLE") + typ, data = self._simple_command('ENABLE', capability) + if typ == 'OK' and 'UTF8=ACCEPT' in capability.upper(): + self._mode_utf8() + return typ, data + + def expunge(self): + """Permanently remove deleted items from selected mailbox. + + Generates 'EXPUNGE' response for each deleted message. + + (typ, [data]) = .expunge() + + 'data' is list of 'EXPUNGE'd message numbers in order received. + """ + name = 'EXPUNGE' + typ, dat = self._simple_command(name) + return self._untagged_response(typ, dat, name) + + + def fetch(self, message_set, message_parts): + """Fetch (parts of) messages. + + (typ, [data, ...]) = .fetch(message_set, message_parts) + + 'message_parts' should be a string of selected parts + enclosed in parentheses, eg: "(UID BODY[TEXT])". + + 'data' are tuples of message part envelope and data. + """ + name = 'FETCH' + typ, dat = self._simple_command(name, message_set, message_parts) + return self._untagged_response(typ, dat, name) + + + def getacl(self, mailbox): + """Get the ACLs for a mailbox. + + (typ, [data]) = .getacl(mailbox) + """ + typ, dat = self._simple_command('GETACL', mailbox) + return self._untagged_response(typ, dat, 'ACL') + + + def getannotation(self, mailbox, entry, attribute): + """(typ, [data]) = .getannotation(mailbox, entry, attribute) + Retrieve ANNOTATIONs.""" + + typ, dat = self._simple_command('GETANNOTATION', mailbox, entry, attribute) + return self._untagged_response(typ, dat, 'ANNOTATION') + + + def getquota(self, root): + """Get the quota root's resource usage and limits. + + Part of the IMAP4 QUOTA extension defined in rfc2087. + + (typ, [data]) = .getquota(root) + """ + typ, dat = self._simple_command('GETQUOTA', root) + return self._untagged_response(typ, dat, 'QUOTA') + + + def getquotaroot(self, mailbox): + """Get the list of quota roots for the named mailbox. + + (typ, [[QUOTAROOT responses...], [QUOTA responses]]) = .getquotaroot(mailbox) + """ + typ, dat = self._simple_command('GETQUOTAROOT', mailbox) + typ, quota = self._untagged_response(typ, dat, 'QUOTA') + typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT') + return typ, [quotaroot, quota] + + + def idle(self, duration=None): + """Return an iterable IDLE context manager producing untagged responses. + If the argument is not None, limit iteration to 'duration' seconds. + + with M.idle(duration=29 * 60) as idler: + for typ, data in idler: + print(typ, data) + + Note: 'duration' requires a socket connection (not IMAP4_stream). + """ + return Idler(self, duration) + + + def list(self, directory='""', pattern='*'): + """List mailbox names in directory matching pattern. + + (typ, [data]) = .list(directory='""', pattern='*') + + 'data' is list of LIST responses. + """ + name = 'LIST' + typ, dat = self._simple_command(name, directory, pattern) + return self._untagged_response(typ, dat, name) + + + def login(self, user, password): + """Identify client using plaintext password. + + (typ, [data]) = .login(user, password) + + NB: 'password' will be quoted. + """ + typ, dat = self._simple_command('LOGIN', user, self._quote(password)) + if typ != 'OK': + raise self.error(dat[-1]) + self.state = 'AUTH' + return typ, dat + + + def login_cram_md5(self, user, password): + """ Force use of CRAM-MD5 authentication. + + (typ, [data]) = .login_cram_md5(user, password) + """ + self.user, self.password = user, password + return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH) + + + def _CRAM_MD5_AUTH(self, challenge): + """ Authobject to use with CRAM-MD5 authentication. """ + import hmac + + if isinstance(self.password, str): + password = self.password.encode('utf-8') + else: + password = self.password + + try: + authcode = hmac.HMAC(password, challenge, 'md5') + except ValueError: # HMAC-MD5 is not available + raise self.error("CRAM-MD5 authentication is not supported") + return f"{self.user} {authcode.hexdigest()}" + + + def logout(self): + """Shutdown connection to server. + + (typ, [data]) = .logout() + + Returns server 'BYE' response. + """ + self.state = 'LOGOUT' + typ, dat = self._simple_command('LOGOUT') + self.shutdown() + return typ, dat + + + def lsub(self, directory='""', pattern='*'): + """List 'subscribed' mailbox names in directory matching pattern. + + (typ, [data, ...]) = .lsub(directory='""', pattern='*') + + 'data' are tuples of message part envelope and data. + """ + name = 'LSUB' + typ, dat = self._simple_command(name, directory, pattern) + return self._untagged_response(typ, dat, name) + + def myrights(self, mailbox): + """Show my ACLs for a mailbox (i.e. the rights that I have on mailbox). + + (typ, [data]) = .myrights(mailbox) + """ + typ,dat = self._simple_command('MYRIGHTS', mailbox) + return self._untagged_response(typ, dat, 'MYRIGHTS') + + def namespace(self): + """ Returns IMAP namespaces ala rfc2342 + + (typ, [data, ...]) = .namespace() + """ + name = 'NAMESPACE' + typ, dat = self._simple_command(name) + return self._untagged_response(typ, dat, name) + + + def noop(self): + """Send NOOP command. + + (typ, [data]) = .noop() + """ + if __debug__: + if self.debug >= 3: + self._dump_ur(self.untagged_responses) + return self._simple_command('NOOP') + + + def partial(self, message_num, message_part, start, length): + """Fetch truncated part of a message. + + (typ, [data, ...]) = .partial(message_num, message_part, start, length) + + 'data' is tuple of message part envelope and data. + """ + name = 'PARTIAL' + typ, dat = self._simple_command(name, message_num, message_part, start, length) + return self._untagged_response(typ, dat, 'FETCH') + + + def proxyauth(self, user): + """Assume authentication as "user". + + Allows an authorised administrator to proxy into any user's + mailbox. + + (typ, [data]) = .proxyauth(user) + """ + + name = 'PROXYAUTH' + return self._simple_command('PROXYAUTH', user) + + + def rename(self, oldmailbox, newmailbox): + """Rename old mailbox name to new. + + (typ, [data]) = .rename(oldmailbox, newmailbox) + """ + return self._simple_command('RENAME', oldmailbox, newmailbox) + + + def search(self, charset, *criteria): + """Search mailbox for matching messages. + + (typ, [data]) = .search(charset, criterion, ...) + + 'data' is space separated list of matching message numbers. + If UTF8 is enabled, charset MUST be None. + """ + name = 'SEARCH' + if charset: + if self.utf8_enabled: + raise IMAP4.error("Non-None charset not valid in UTF8 mode") + typ, dat = self._simple_command(name, 'CHARSET', charset, *criteria) + else: + typ, dat = self._simple_command(name, *criteria) + return self._untagged_response(typ, dat, name) + + + def select(self, mailbox='INBOX', readonly=False): + """Select a mailbox. + + Flush all untagged responses. + + (typ, [data]) = .select(mailbox='INBOX', readonly=False) + + 'data' is count of messages in mailbox ('EXISTS' response). + + Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so + other responses should be obtained via .response('FLAGS') etc. + """ + self.untagged_responses = {} # Flush old responses. + self.is_readonly = readonly + if readonly: + name = 'EXAMINE' + else: + name = 'SELECT' + typ, dat = self._simple_command(name, mailbox) + if typ != 'OK': + self.state = 'AUTH' # Might have been 'SELECTED' + return typ, dat + self.state = 'SELECTED' + if 'READ-ONLY' in self.untagged_responses \ + and not readonly: + if __debug__: + if self.debug >= 1: + self._dump_ur(self.untagged_responses) + raise self.readonly('%s is not writable' % mailbox) + return typ, self.untagged_responses.get('EXISTS', [None]) + + + def setacl(self, mailbox, who, what): + """Set a mailbox acl. + + (typ, [data]) = .setacl(mailbox, who, what) + """ + return self._simple_command('SETACL', mailbox, who, what) + + + def setannotation(self, *args): + """(typ, [data]) = .setannotation(mailbox[, entry, attribute]+) + Set ANNOTATIONs.""" + + typ, dat = self._simple_command('SETANNOTATION', *args) + return self._untagged_response(typ, dat, 'ANNOTATION') + + + def setquota(self, root, limits): + """Set the quota root's resource limits. + + (typ, [data]) = .setquota(root, limits) + """ + typ, dat = self._simple_command('SETQUOTA', root, limits) + return self._untagged_response(typ, dat, 'QUOTA') + + + def sort(self, sort_criteria, charset, *search_criteria): + """IMAP4rev1 extension SORT command. + + (typ, [data]) = .sort(sort_criteria, charset, search_criteria, ...) + """ + name = 'SORT' + #if not name in self.capabilities: # Let the server decide! + # raise self.error('unimplemented extension command: %s' % name) + if (sort_criteria[0],sort_criteria[-1]) != ('(',')'): + sort_criteria = '(%s)' % sort_criteria + typ, dat = self._simple_command(name, sort_criteria, charset, *search_criteria) + return self._untagged_response(typ, dat, name) + + + def starttls(self, ssl_context=None): + name = 'STARTTLS' + if not HAVE_SSL: + raise self.error('SSL support missing') + if self._tls_established: + raise self.abort('TLS session already established') + if name not in self.capabilities: + raise self.abort('TLS not supported by server') + # Generate a default SSL context if none was passed. + if ssl_context is None: + ssl_context = ssl._create_stdlib_context() + typ, dat = self._simple_command(name) + if typ == 'OK': + self.sock = ssl_context.wrap_socket(self.sock, + server_hostname=self.host) + self._file = self.sock.makefile('rb') + self._tls_established = True + self._get_capabilities() + else: + raise self.error("Couldn't establish TLS session") + return self._untagged_response(typ, dat, name) + + + def status(self, mailbox, names): + """Request named status conditions for mailbox. + + (typ, [data]) = .status(mailbox, names) + """ + name = 'STATUS' + #if self.PROTOCOL_VERSION == 'IMAP4': # Let the server decide! + # raise self.error('%s unimplemented in IMAP4 (obtain IMAP4rev1 server, or re-code)' % name) + typ, dat = self._simple_command(name, mailbox, names) + return self._untagged_response(typ, dat, name) + + + def store(self, message_set, command, flags): + """Alters flag dispositions for messages in mailbox. + + (typ, [data]) = .store(message_set, command, flags) + """ + if (flags[0],flags[-1]) != ('(',')'): + flags = '(%s)' % flags # Avoid quoting the flags + typ, dat = self._simple_command('STORE', message_set, command, flags) + return self._untagged_response(typ, dat, 'FETCH') + + + def subscribe(self, mailbox): + """Subscribe to new mailbox. + + (typ, [data]) = .subscribe(mailbox) + """ + return self._simple_command('SUBSCRIBE', mailbox) + + + def thread(self, threading_algorithm, charset, *search_criteria): + """IMAPrev1 extension THREAD command. + + (type, [data]) = .thread(threading_algorithm, charset, search_criteria, ...) + """ + name = 'THREAD' + typ, dat = self._simple_command(name, threading_algorithm, charset, *search_criteria) + return self._untagged_response(typ, dat, name) + + + def uid(self, command, *args): + """Execute "command arg ..." with messages identified by UID, + rather than message number. + + (typ, [data]) = .uid(command, arg1, arg2, ...) + + Returns response appropriate to 'command'. + """ + command = command.upper() + if not command in Commands: + raise self.error("Unknown IMAP4 UID command: %s" % command) + if self.state not in Commands[command]: + raise self.error("command %s illegal in state %s, " + "only allowed in states %s" % + (command, self.state, + ', '.join(Commands[command]))) + name = 'UID' + typ, dat = self._simple_command(name, command, *args) + if command in ('SEARCH', 'SORT', 'THREAD'): + name = command + else: + name = 'FETCH' + return self._untagged_response(typ, dat, name) + + + def unsubscribe(self, mailbox): + """Unsubscribe from old mailbox. + + (typ, [data]) = .unsubscribe(mailbox) + """ + return self._simple_command('UNSUBSCRIBE', mailbox) + + + def unselect(self): + """Free server's resources associated with the selected mailbox + and returns the server to the authenticated state. + This command performs the same actions as CLOSE, except + that no messages are permanently removed from the currently + selected mailbox. + + (typ, [data]) = .unselect() + """ + try: + typ, data = self._simple_command('UNSELECT') + finally: + self.state = 'AUTH' + return typ, data + + + def xatom(self, name, *args): + """Allow simple extension commands + notified by server in CAPABILITY response. + + Assumes command is legal in current state. + + (typ, [data]) = .xatom(name, arg, ...) + + Returns response appropriate to extension command 'name'. + """ + name = name.upper() + #if not name in self.capabilities: # Let the server decide! + # raise self.error('unknown extension command: %s' % name) + if not name in Commands: + Commands[name] = (self.state,) + return self._simple_command(name, *args) + + + + # Private methods + + + def _append_untagged(self, typ, dat): + if dat is None: + dat = b'' + + # During idle, queue untagged responses for delivery via iteration + if self._idle_capture: + # Responses containing literal strings are passed to us one data + # fragment at a time, while others arrive in a single call. + if (not self._idle_responses or + isinstance(self._idle_responses[-1][1][-1], bytes)): + # We are not continuing a fragmented response; start a new one + self._idle_responses.append((typ, [dat])) + else: + # We are continuing a fragmented response; append the fragment + response = self._idle_responses[-1] + assert response[0] == typ + response[1].append(dat) + if __debug__ and self.debug >= 5: + self._mesg(f'idle: queue untagged {typ} {dat!r}') + return + + ur = self.untagged_responses + if __debug__: + if self.debug >= 5: + self._mesg('untagged_responses[%s] %s += ["%r"]' % + (typ, len(ur.get(typ,'')), dat)) + if typ in ur: + ur[typ].append(dat) + else: + ur[typ] = [dat] + + + def _check_bye(self): + bye = self.untagged_responses.get('BYE') + if bye: + raise self.abort(bye[-1].decode(self._encoding, 'replace')) + + + def _command(self, name, *args): + + if self.state not in Commands[name]: + self.literal = None + raise self.error("command %s illegal in state %s, " + "only allowed in states %s" % + (name, self.state, + ', '.join(Commands[name]))) + + for typ in ('OK', 'NO', 'BAD'): + if typ in self.untagged_responses: + del self.untagged_responses[typ] + + if 'READ-ONLY' in self.untagged_responses \ + and not self.is_readonly: + raise self.readonly('mailbox status changed to READ-ONLY') + + tag = self._new_tag() + name = bytes(name, self._encoding) + data = tag + b' ' + name + for arg in args: + if arg is None: continue + if isinstance(arg, str): + arg = bytes(arg, self._encoding) + data = data + b' ' + arg + + literal = self.literal + if literal is not None: + self.literal = None + if type(literal) is type(self._command): + literator = literal + else: + literator = None + if self.utf8_enabled: + data = data + bytes(' UTF8 (~{%s}' % len(literal), self._encoding) + literal = literal + b')' + else: + data = data + bytes(' {%s}' % len(literal), self._encoding) + + if __debug__: + if self.debug >= 4: + self._mesg('> %r' % data) + else: + self._log('> %r' % data) + + try: + self.send(data + CRLF) + except OSError as val: + raise self.abort('socket error: %s' % val) + + if literal is None: + return tag + + while 1: + # Wait for continuation response + + while self._get_response(): + if self.tagged_commands[tag]: # BAD/NO? + return tag + + # Send literal + + if literator: + literal = literator(self.continuation_response) + + if __debug__: + if self.debug >= 4: + self._mesg('write literal size %s' % len(literal)) + + try: + self.send(literal) + self.send(CRLF) + except OSError as val: + raise self.abort('socket error: %s' % val) + + if not literator: + break + + return tag + + + def _command_complete(self, name, tag): + logout = (name == 'LOGOUT') + # BYE is expected after LOGOUT + if not logout: + self._check_bye() + try: + typ, data = self._get_tagged_response(tag, expect_bye=logout) + except self.abort as val: + raise self.abort('command: %s => %s' % (name, val)) + except self.error as val: + raise self.error('command: %s => %s' % (name, val)) + if not logout: + self._check_bye() + if typ == 'BAD': + raise self.error('%s command error: %s %s' % (name, typ, data)) + return typ, data + + + def _get_capabilities(self): + typ, dat = self.capability() + if dat == [None]: + raise self.error('no CAPABILITY response from server') + dat = str(dat[-1], self._encoding) + dat = dat.upper() + self.capabilities = tuple(dat.split()) + + + def _get_response(self, start_timeout=False): + + # Read response and store. + # + # Returns None for continuation responses, + # otherwise first response line received. + # + # If start_timeout is given, temporarily uses it as a socket + # timeout while waiting for the start of a response, raising + # _responsetimeout if one doesn't arrive. (Used by Idler.) + + if start_timeout is not False and self.sock: + assert start_timeout is None or start_timeout > 0 + saved_timeout = self.sock.gettimeout() + self.sock.settimeout(start_timeout) + try: + resp = self._get_line() + except TimeoutError as err: + raise self._responsetimeout from err + finally: + self.sock.settimeout(saved_timeout) + else: + resp = self._get_line() + + # Command completion response? + + if self._match(self.tagre, resp): + tag = self.mo.group('tag') + if not tag in self.tagged_commands: + raise self.abort('unexpected tagged response: %r' % resp) + + typ = self.mo.group('type') + typ = str(typ, self._encoding) + dat = self.mo.group('data') + self.tagged_commands[tag] = (typ, [dat]) + else: + dat2 = None + + # '*' (untagged) responses? + + if not self._match(Untagged_response, resp): + if self._match(self.Untagged_status, resp): + dat2 = self.mo.group('data2') + + if self.mo is None: + # Only other possibility is '+' (continuation) response... + + if self._match(Continuation, resp): + self.continuation_response = self.mo.group('data') + return None # NB: indicates continuation + + raise self.abort("unexpected response: %r" % resp) + + typ = self.mo.group('type') + typ = str(typ, self._encoding) + dat = self.mo.group('data') + if dat is None: dat = b'' # Null untagged response + if dat2: dat = dat + b' ' + dat2 + + # Is there a literal to come? + + while self._match(self.Literal, dat): + + # Read literal direct from connection. + + size = int(self.mo.group('size')) + if __debug__: + if self.debug >= 4: + self._mesg('read literal size %s' % size) + data = self.read(size) + + # Store response with literal as tuple + + self._append_untagged(typ, (dat, data)) + + # Read trailer - possibly containing another literal + + dat = self._get_line() + + self._append_untagged(typ, dat) + + # Bracketed response information? + + if typ in ('OK', 'NO', 'BAD') and self._match(Response_code, dat): + typ = self.mo.group('type') + typ = str(typ, self._encoding) + self._append_untagged(typ, self.mo.group('data')) + + if __debug__: + if self.debug >= 1 and typ in ('NO', 'BAD', 'BYE'): + self._mesg('%s response: %r' % (typ, dat)) + + return resp + + + def _get_tagged_response(self, tag, expect_bye=False): + + while 1: + result = self.tagged_commands[tag] + if result is not None: + del self.tagged_commands[tag] + return result + + if expect_bye: + typ = 'BYE' + bye = self.untagged_responses.pop(typ, None) + if bye is not None: + # Server replies to the "LOGOUT" command with "BYE" + return (typ, bye) + + # If we've seen a BYE at this point, the socket will be + # closed, so report the BYE now. + self._check_bye() + + # Some have reported "unexpected response" exceptions. + # Note that ignoring them here causes loops. + # Instead, send me details of the unexpected response and + # I'll update the code in '_get_response()'. + + try: + self._get_response() + except self.abort as val: + if __debug__: + if self.debug >= 1: + self.print_log() + raise + + + def _get_line(self): + + line = self.readline() + if not line: + raise self.abort('socket error: EOF') + + # Protocol mandates all lines terminated by CRLF + if not line.endswith(b'\r\n'): + raise self.abort('socket error: unterminated line: %r' % line) + + line = line[:-2] + if __debug__: + if self.debug >= 4: + self._mesg('< %r' % line) + else: + self._log('< %r' % line) + return line + + + def _match(self, cre, s): + + # Run compiled regular expression match method on 's'. + # Save result, return success. + + self.mo = cre.match(s) + if __debug__: + if self.mo is not None and self.debug >= 5: + self._mesg("\tmatched %r => %r" % (cre.pattern, self.mo.groups())) + return self.mo is not None + + + def _new_tag(self): + + tag = self.tagpre + bytes(str(self.tagnum), self._encoding) + self.tagnum = self.tagnum + 1 + self.tagged_commands[tag] = None + return tag + + + def _quote(self, arg): + + arg = arg.replace('\\', '\\\\') + arg = arg.replace('"', '\\"') + + return '"' + arg + '"' + + + def _simple_command(self, name, *args): + + return self._command_complete(name, self._command(name, *args)) + + + def _untagged_response(self, typ, dat, name): + if typ == 'NO': + return typ, dat + if not name in self.untagged_responses: + return typ, [None] + data = self.untagged_responses.pop(name) + if __debug__: + if self.debug >= 5: + self._mesg('untagged_responses[%s] => %s' % (name, data)) + return typ, data + + + if __debug__: + + def _mesg(self, s, secs=None): + if secs is None: + secs = time.time() + tm = time.strftime('%M:%S', time.localtime(secs)) + sys.stderr.write(' %s.%02d %s\n' % (tm, (secs*100)%100, s)) + sys.stderr.flush() + + def _dump_ur(self, untagged_resp_dict): + if not untagged_resp_dict: + return + items = (f'{key}: {value!r}' + for key, value in untagged_resp_dict.items()) + self._mesg('untagged responses dump:' + '\n\t\t'.join(items)) + + def _log(self, line): + # Keep log of last '_cmd_log_len' interactions for debugging. + self._cmd_log[self._cmd_log_idx] = (line, time.time()) + self._cmd_log_idx += 1 + if self._cmd_log_idx >= self._cmd_log_len: + self._cmd_log_idx = 0 + + def print_log(self): + self._mesg('last %d IMAP4 interactions:' % len(self._cmd_log)) + i, n = self._cmd_log_idx, self._cmd_log_len + while n: + try: + self._mesg(*self._cmd_log[i]) + except: + pass + i += 1 + if i >= self._cmd_log_len: + i = 0 + n -= 1 + + +class Idler: + """Iterable IDLE context manager: start IDLE & produce untagged responses. + + An object of this type is returned by the IMAP4.idle() method. + + Note: The name and structure of this class are subject to change. + """ + + def __init__(self, imap, duration=None): + if 'IDLE' not in imap.capabilities: + raise imap.error("Server does not support IMAP4 IDLE") + if duration is not None and not imap.sock: + # IMAP4_stream pipes don't support timeouts + raise imap.error('duration requires a socket connection') + self._duration = duration + self._deadline = None + self._imap = imap + self._tag = None + self._saved_state = None + + def __enter__(self): + imap = self._imap + assert not imap._idle_responses + assert not imap._idle_capture + + if __debug__ and imap.debug >= 4: + imap._mesg(f'idle start duration={self._duration}') + + # Start capturing untagged responses before sending IDLE, + # so we can deliver via iteration any that arrive while + # the IDLE command continuation request is still pending. + imap._idle_capture = True + + try: + self._tag = imap._command('IDLE') + # As with any command, the server is allowed to send us unrelated, + # untagged responses before acting on IDLE. These lines will be + # returned by _get_response(). When the server is ready, it will + # send an IDLE continuation request, indicated by _get_response() + # returning None. We therefore process responses in a loop until + # this occurs. + while resp := imap._get_response(): + if imap.tagged_commands[self._tag]: + typ, data = imap.tagged_commands.pop(self._tag) + if typ == 'NO': + raise imap.error(f'idle denied: {data}') + raise imap.abort(f'unexpected status response: {resp}') + + if __debug__ and imap.debug >= 4: + prompt = imap.continuation_response + imap._mesg(f'idle continuation prompt: {prompt}') + except BaseException: + imap._idle_capture = False + raise + + if self._duration is not None: + self._deadline = time.monotonic() + self._duration + + self._saved_state = imap.state + imap.state = 'IDLING' + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + imap = self._imap + + if __debug__ and imap.debug >= 4: + imap._mesg('idle done') + imap.state = self._saved_state + + # Stop intercepting untagged responses before sending DONE, + # since we can no longer deliver them via iteration. + imap._idle_capture = False + + # If we captured untagged responses while the IDLE command + # continuation request was still pending, but the user did not + # iterate over them before exiting IDLE, we must put them + # someplace where the user can retrieve them. The only + # sensible place for this is the untagged_responses dict, + # despite its unfortunate inability to preserve the relative + # order of different response types. + if leftovers := len(imap._idle_responses): + if __debug__ and imap.debug >= 4: + imap._mesg(f'idle quit with {leftovers} leftover responses') + while imap._idle_responses: + typ, data = imap._idle_responses.pop(0) + # Append one fragment at a time, just as _get_response() does + for datum in data: + imap._append_untagged(typ, datum) + + try: + imap.send(b'DONE' + CRLF) + status, [msg] = imap._command_complete('IDLE', self._tag) + if __debug__ and imap.debug >= 4: + imap._mesg(f'idle status: {status} {msg!r}') + except OSError: + if not exc_type: + raise + + return False # Do not suppress context body exceptions + + def __iter__(self): + return self + + def _pop(self, timeout, default=('', None)): + # Get the next response, or a default value on timeout. + # The timeout arg can be an int or float, or None for no timeout. + # Timeouts require a socket connection (not IMAP4_stream). + # This method ignores self._duration. + + # Historical Note: + # The timeout was originally implemented using select() after + # checking for the presence of already-buffered data. + # That allowed timeouts on pipe connetions like IMAP4_stream. + # However, it seemed possible that SSL data arriving without any + # IMAP data afterward could cause select() to indicate available + # application data when there was none, leading to a read() call + # that would block with no timeout. It was unclear under what + # conditions this would happen in practice. Our implementation was + # changed to use socket timeouts instead of select(), just to be + # safe. + + imap = self._imap + if imap.state != 'IDLING': + raise imap.error('_pop() only works during IDLE') + + if imap._idle_responses: + # Response is ready to return to the user + resp = imap._idle_responses.pop(0) + if __debug__ and imap.debug >= 4: + imap._mesg(f'idle _pop({timeout}) de-queued {resp[0]}') + return resp + + if __debug__ and imap.debug >= 4: + imap._mesg(f'idle _pop({timeout}) reading') + + if timeout is not None: + if timeout <= 0: + return default + timeout = float(timeout) # Required by socket.settimeout() + + try: + imap._get_response(timeout) # Reads line, calls _append_untagged() + except IMAP4._responsetimeout: + if __debug__ and imap.debug >= 4: + imap._mesg(f'idle _pop({timeout}) done') + return default + + resp = imap._idle_responses.pop(0) + + if __debug__ and imap.debug >= 4: + imap._mesg(f'idle _pop({timeout}) read {resp[0]}') + return resp + + def __next__(self): + imap = self._imap + + if self._duration is None: + timeout = None + else: + timeout = self._deadline - time.monotonic() + typ, data = self._pop(timeout) + + if not typ: + if __debug__ and imap.debug >= 4: + imap._mesg('idle iterator exhausted') + raise StopIteration + + return typ, data + + def burst(self, interval=0.1): + """Yield a burst of responses no more than 'interval' seconds apart. + + with M.idle() as idler: + # get a response and any others following by < 0.1 seconds + batch = list(idler.burst()) + print(f'processing {len(batch)} responses...') + print(batch) + + Note: This generator requires a socket connection (not IMAP4_stream). + """ + if not self._imap.sock: + raise self._imap.error('burst() requires a socket connection') + + try: + yield next(self) + except StopIteration: + return + + while response := self._pop(interval, None): + yield response + + +if HAVE_SSL: + + class IMAP4_SSL(IMAP4): + + """IMAP4 client class over SSL connection + + Instantiate with: IMAP4_SSL([host[, port[, ssl_context[, timeout=None]]]]) + + host - host's name (default: localhost); + port - port number (default: standard IMAP4 SSL port); + ssl_context - a SSLContext object that contains your certificate chain + and private key (default: None) + timeout - socket timeout (default: None) If timeout is not given or is None, + the global default socket timeout is used + + for more documentation see the docstring of the parent class IMAP4. + """ + + + def __init__(self, host='', port=IMAP4_SSL_PORT, + *, ssl_context=None, timeout=None): + if ssl_context is None: + ssl_context = ssl._create_stdlib_context() + self.ssl_context = ssl_context + IMAP4.__init__(self, host, port, timeout) + + def _create_socket(self, timeout): + sock = IMAP4._create_socket(self, timeout) + return self.ssl_context.wrap_socket(sock, + server_hostname=self.host) + + def open(self, host='', port=IMAP4_SSL_PORT, timeout=None): + """Setup connection to remote server on "host:port". + (default: localhost:standard IMAP4 SSL port). + This connection will be used by the routines: + read, readline, send, shutdown. + """ + IMAP4.open(self, host, port, timeout) + + __all__.append("IMAP4_SSL") + + +class IMAP4_stream(IMAP4): + + """IMAP4 client class over a stream + + Instantiate with: IMAP4_stream(command) + + "command" - a string that can be passed to subprocess.Popen() + + for more documentation see the docstring of the parent class IMAP4. + """ + + + def __init__(self, command): + self.command = command + IMAP4.__init__(self) + + + def open(self, host=None, port=None, timeout=None): + """Setup a stream connection. + This connection will be used by the routines: + read, readline, send, shutdown. + """ + self.host = None # For compatibility with parent class + self.port = None + self.sock = None + self._file = None + self.process = subprocess.Popen(self.command, + bufsize=DEFAULT_BUFFER_SIZE, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + shell=True, close_fds=True) + self.writefile = self.process.stdin + self.readfile = self.process.stdout + + def read(self, size): + """Read 'size' bytes from remote.""" + return self.readfile.read(size) + + + def readline(self): + """Read line from remote.""" + return self.readfile.readline() + + + def send(self, data): + """Send data to remote.""" + self.writefile.write(data) + self.writefile.flush() + + + def shutdown(self): + """Close I/O established in "open".""" + self.readfile.close() + self.writefile.close() + self.process.wait() + + + +class _Authenticator: + + """Private class to provide en/decoding + for base64-based authentication conversation. + """ + + def __init__(self, mechinst): + self.mech = mechinst # Callable object to provide/process data + + def process(self, data): + ret = self.mech(self.decode(data)) + if ret is None: + return b'*' # Abort conversation + return self.encode(ret) + + def encode(self, inp): + # + # Invoke binascii.b2a_base64 iteratively with + # short even length buffers, strip the trailing + # line feed from the result and append. "Even" + # means a number that factors to both 6 and 8, + # so when it gets to the end of the 8-bit input + # there's no partial 6-bit output. + # + oup = b'' + if isinstance(inp, str): + inp = inp.encode('utf-8') + while inp: + if len(inp) > 48: + t = inp[:48] + inp = inp[48:] + else: + t = inp + inp = b'' + e = binascii.b2a_base64(t) + if e: + oup = oup + e[:-1] + return oup + + def decode(self, inp): + if not inp: + return b'' + return binascii.a2b_base64(inp) + +Months = ' Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split(' ') +Mon2num = {s.encode():n+1 for n, s in enumerate(Months[1:])} + +def Internaldate2tuple(resp): + """Parse an IMAP4 INTERNALDATE string. + + Return corresponding local time. The return value is a + time.struct_time tuple or None if the string has wrong format. + """ + + mo = InternalDate.match(resp) + if not mo: + return None + + mon = Mon2num[mo.group('mon')] + zonen = mo.group('zonen') + + day = int(mo.group('day')) + year = int(mo.group('year')) + hour = int(mo.group('hour')) + min = int(mo.group('min')) + sec = int(mo.group('sec')) + zoneh = int(mo.group('zoneh')) + zonem = int(mo.group('zonem')) + + # INTERNALDATE timezone must be subtracted to get UT + + zone = (zoneh*60 + zonem)*60 + if zonen == b'-': + zone = -zone + + tt = (year, mon, day, hour, min, sec, -1, -1, -1) + utc = calendar.timegm(tt) - zone + + return time.localtime(utc) + + + +def Int2AP(num): + + """Convert integer to A-P string representation.""" + + val = b''; AP = b'ABCDEFGHIJKLMNOP' + num = int(abs(num)) + while num: + num, mod = divmod(num, 16) + val = AP[mod:mod+1] + val + return val + + + +def ParseFlags(resp): + + """Convert IMAP4 flags response to python tuple.""" + + mo = Flags.match(resp) + if not mo: + return () + + return tuple(mo.group('flags').split()) + + +def Time2Internaldate(date_time): + + """Convert date_time to IMAP4 INTERNALDATE representation. + + Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The + date_time argument can be a number (int or float) representing + seconds since epoch (as returned by time.time()), a 9-tuple + representing local time, an instance of time.struct_time (as + returned by time.localtime()), an aware datetime instance or a + double-quoted string. In the last case, it is assumed to already + be in the correct format. + """ + if isinstance(date_time, (int, float)): + dt = datetime.fromtimestamp(date_time, + timezone.utc).astimezone() + elif isinstance(date_time, tuple): + try: + gmtoff = date_time.tm_gmtoff + except AttributeError: + if time.daylight: + dst = date_time[8] + if dst == -1: + dst = time.localtime(time.mktime(date_time))[8] + gmtoff = -(time.timezone, time.altzone)[dst] + else: + gmtoff = -time.timezone + delta = timedelta(seconds=gmtoff) + dt = datetime(*date_time[:6], tzinfo=timezone(delta)) + elif isinstance(date_time, datetime): + if date_time.tzinfo is None: + raise ValueError("date_time must be aware") + dt = date_time + elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'): + return date_time # Assume in correct format + else: + raise ValueError("date_time not of a known type") + fmt = '"%d-{}-%Y %H:%M:%S %z"'.format(Months[dt.month]) + return dt.strftime(fmt) + + + +if __name__ == '__main__': + + # To test: invoke either as 'python imaplib.py [IMAP4_server_hostname]' + # or 'python imaplib.py -s "rsh IMAP4_server_hostname exec /etc/rimapd"' + # to test the IMAP4_stream class + + import getopt, getpass + + try: + optlist, args = getopt.getopt(sys.argv[1:], 'd:s:') + except getopt.error as val: + optlist, args = (), () + + stream_command = None + for opt,val in optlist: + if opt == '-d': + Debug = int(val) + elif opt == '-s': + stream_command = val + if not args: args = (stream_command,) + + if not args: args = ('',) + + host = args[0] + + USER = getpass.getuser() + PASSWD = getpass.getpass("IMAP password for %s on %s: " % (USER, host or "localhost")) + + test_mesg = 'From: %(user)s@localhost%(lf)sSubject: IMAP4 test%(lf)s%(lf)sdata...%(lf)s' % {'user':USER, 'lf':'\n'} + test_seq1 = ( + ('login', (USER, PASSWD)), + ('create', ('/tmp/xxx 1',)), + ('rename', ('/tmp/xxx 1', '/tmp/yyy')), + ('CREATE', ('/tmp/yyz 2',)), + ('append', ('/tmp/yyz 2', None, None, test_mesg)), + ('list', ('/tmp', 'yy*')), + ('select', ('/tmp/yyz 2',)), + ('search', (None, 'SUBJECT', 'test')), + ('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')), + ('store', ('1', 'FLAGS', r'(\Deleted)')), + ('namespace', ()), + ('expunge', ()), + ('recent', ()), + ('close', ()), + ) + + test_seq2 = ( + ('select', ()), + ('response',('UIDVALIDITY',)), + ('uid', ('SEARCH', 'ALL')), + ('response', ('EXISTS',)), + ('append', (None, None, None, test_mesg)), + ('recent', ()), + ('logout', ()), + ) + + def run(cmd, args): + M._mesg('%s %s' % (cmd, args)) + typ, dat = getattr(M, cmd)(*args) + M._mesg('%s => %s %s' % (cmd, typ, dat)) + if typ == 'NO': raise dat[0] + return dat + + try: + if stream_command: + M = IMAP4_stream(stream_command) + else: + M = IMAP4(host) + if M.state == 'AUTH': + test_seq1 = test_seq1[1:] # Login not needed + M._mesg('PROTOCOL_VERSION = %s' % M.PROTOCOL_VERSION) + M._mesg('CAPABILITIES = %r' % (M.capabilities,)) + + for cmd,args in test_seq1: + run(cmd, args) + + for ml in run('list', ('/tmp/', 'yy%')): + mo = re.match(r'.*"([^"]+)"$', ml) + if mo: path = mo.group(1) + else: path = ml.split()[-1] + run('delete', (path,)) + + for cmd,args in test_seq2: + dat = run(cmd, args) + + if (cmd,args) != ('uid', ('SEARCH', 'ALL')): + continue + + uid = dat[-1].split() + if not uid: continue + run('uid', ('FETCH', '%s' % uid[-1], + '(FLAGS INTERNALDATE RFC822.SIZE RFC822.HEADER RFC822.TEXT)')) + + print('\nAll tests OK.') + + except: + print('\nTests failed.') + + if not Debug: + print(''' +If you would like to see debugging output, +try: %s -d5 +''' % sys.argv[0]) + + raise diff --git a/Python313_13_x64_Template/Lib/importlib/__init__.py b/Python314_4_x64_Template/Lib/importlib/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/__init__.py rename to Python314_4_x64_Template/Lib/importlib/__init__.py diff --git a/Python314_4_x64_Template/Lib/importlib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..5da3dcf7 Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/__pycache__/_abc.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/__pycache__/_abc.cpython-314.pyc new file mode 100644 index 00000000..2780ad45 Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/__pycache__/_abc.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/__pycache__/abc.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/__pycache__/abc.cpython-314.pyc new file mode 100644 index 00000000..75ac4258 Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/__pycache__/abc.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/__pycache__/readers.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/__pycache__/readers.cpython-314.pyc new file mode 100644 index 00000000..62c734aa Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/__pycache__/readers.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/importlib/_abc.py b/Python314_4_x64_Template/Lib/importlib/_abc.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/_abc.py rename to Python314_4_x64_Template/Lib/importlib/_abc.py diff --git a/Python314_4_x64_Template/Lib/importlib/_bootstrap.py b/Python314_4_x64_Template/Lib/importlib/_bootstrap.py new file mode 100644 index 00000000..9d911e1d --- /dev/null +++ b/Python314_4_x64_Template/Lib/importlib/_bootstrap.py @@ -0,0 +1,1570 @@ +"""Core implementation of import. + +This module is NOT meant to be directly imported! It has been designed such +that it can be bootstrapped into Python as the implementation of import. As +such it requires the injection of specific modules and attributes in order to +work. One should use importlib as the public-facing version of this module. + +""" +# +# IMPORTANT: Whenever making changes to this module, be sure to run a top-level +# `make regen-importlib` followed by `make` in order to get the frozen version +# of the module updated. Not doing so will result in the Makefile to fail for +# all others who don't have a ./python around to freeze the module +# in the early stages of compilation. +# + +# See importlib._setup() for what is injected into the global namespace. + +# When editing this code be aware that code executed at import time CANNOT +# reference any injected objects! This includes not only global code but also +# anything specified at the class level. + +def _object_name(obj): + try: + return obj.__qualname__ + except AttributeError: + return type(obj).__qualname__ + +# Bootstrap-related code ###################################################### + +# Modules injected manually by _setup() +_thread = None +_warnings = None +_weakref = None + +# Import done by _install_external_importers() +_bootstrap_external = None + + +def _wrap(new, old): + """Simple substitute for functools.update_wrapper.""" + for replace in ['__module__', '__name__', '__qualname__', '__doc__']: + if hasattr(old, replace): + setattr(new, replace, getattr(old, replace)) + new.__dict__.update(old.__dict__) + + +def _new_module(name): + return type(sys)(name) + + +# Module-level locking ######################################################## + +# For a list that can have a weakref to it. +class _List(list): + __slots__ = ("__weakref__",) + + +# Copied from weakref.py with some simplifications and modifications unique to +# bootstrapping importlib. Many methods were simply deleting for simplicity, so if they +# are needed in the future they may work if simply copied back in. +class _WeakValueDictionary: + + def __init__(self): + self_weakref = _weakref.ref(self) + + # Inlined to avoid issues with inheriting from _weakref.ref before _weakref is + # set by _setup(). Since there's only one instance of this class, this is + # not expensive. + class KeyedRef(_weakref.ref): + + __slots__ = "key", + + def __new__(type, ob, key): + self = super().__new__(type, ob, type.remove) + self.key = key + return self + + def __init__(self, ob, key): + super().__init__(ob, self.remove) + + @staticmethod + def remove(wr): + nonlocal self_weakref + + self = self_weakref() + if self is not None: + if self._iterating: + self._pending_removals.append(wr.key) + else: + _weakref._remove_dead_weakref(self.data, wr.key) + + self._KeyedRef = KeyedRef + self.clear() + + def clear(self): + self._pending_removals = [] + self._iterating = set() + self.data = {} + + def _commit_removals(self): + pop = self._pending_removals.pop + d = self.data + while True: + try: + key = pop() + except IndexError: + return + _weakref._remove_dead_weakref(d, key) + + def get(self, key, default=None): + if self._pending_removals: + self._commit_removals() + try: + wr = self.data[key] + except KeyError: + return default + else: + if (o := wr()) is None: + return default + else: + return o + + def setdefault(self, key, default=None): + try: + o = self.data[key]() + except KeyError: + o = None + if o is None: + if self._pending_removals: + self._commit_removals() + self.data[key] = self._KeyedRef(default, key) + return default + else: + return o + + +# A dict mapping module names to weakrefs of _ModuleLock instances. +# Dictionary protected by the global import lock. +_module_locks = {} + +# A dict mapping thread IDs to weakref'ed lists of _ModuleLock instances. +# This maps a thread to the module locks it is blocking on acquiring. The +# values are lists because a single thread could perform a re-entrant import +# and be "in the process" of blocking on locks for more than one module. A +# thread can be "in the process" because a thread cannot actually block on +# acquiring more than one lock but it can have set up bookkeeping that reflects +# that it intends to block on acquiring more than one lock. +# +# The dictionary uses a WeakValueDictionary to avoid keeping unnecessary +# lists around, regardless of GC runs. This way there's no memory leak if +# the list is no longer needed (GH-106176). +_blocking_on = None + + +class _BlockingOnManager: + """A context manager responsible to updating ``_blocking_on``.""" + def __init__(self, thread_id, lock): + self.thread_id = thread_id + self.lock = lock + + def __enter__(self): + """Mark the running thread as waiting for self.lock. via _blocking_on.""" + # Interactions with _blocking_on are *not* protected by the global + # import lock here because each thread only touches the state that it + # owns (state keyed on its thread id). The global import lock is + # re-entrant (i.e., a single thread may take it more than once) so it + # wouldn't help us be correct in the face of re-entrancy either. + + self.blocked_on = _blocking_on.setdefault(self.thread_id, _List()) + self.blocked_on.append(self.lock) + + def __exit__(self, *args, **kwargs): + """Remove self.lock from this thread's _blocking_on list.""" + self.blocked_on.remove(self.lock) + + +class _DeadlockError(RuntimeError): + pass + + + +def _has_deadlocked(target_id, *, seen_ids, candidate_ids, blocking_on): + """Check if 'target_id' is holding the same lock as another thread(s). + + The search within 'blocking_on' starts with the threads listed in + 'candidate_ids'. 'seen_ids' contains any threads that are considered + already traversed in the search. + + Keyword arguments: + target_id -- The thread id to try to reach. + seen_ids -- A set of threads that have already been visited. + candidate_ids -- The thread ids from which to begin. + blocking_on -- A dict representing the thread/blocking-on graph. This may + be the same object as the global '_blocking_on' but it is + a parameter to reduce the impact that global mutable + state has on the result of this function. + """ + if target_id in candidate_ids: + # If we have already reached the target_id, we're done - signal that it + # is reachable. + return True + + # Otherwise, try to reach the target_id from each of the given candidate_ids. + for tid in candidate_ids: + if not (candidate_blocking_on := blocking_on.get(tid)): + # There are no edges out from this node, skip it. + continue + elif tid in seen_ids: + # bpo 38091: the chain of tid's we encounter here eventually leads + # to a fixed point or a cycle, but does not reach target_id. + # This means we would not actually deadlock. This can happen if + # other threads are at the beginning of acquire() below. + return False + seen_ids.add(tid) + + # Follow the edges out from this thread. + edges = [lock.owner for lock in candidate_blocking_on] + if _has_deadlocked(target_id, seen_ids=seen_ids, candidate_ids=edges, + blocking_on=blocking_on): + return True + + return False + + +class _ModuleLock: + """A recursive lock implementation which is able to detect deadlocks + (e.g. thread 1 trying to take locks A then B, and thread 2 trying to + take locks B then A). + """ + + def __init__(self, name): + # Create an RLock for protecting the import process for the + # corresponding module. Since it is an RLock, a single thread will be + # able to take it more than once. This is necessary to support + # re-entrancy in the import system that arises from (at least) signal + # handlers and the garbage collector. Consider the case of: + # + # import foo + # -> ... + # -> importlib._bootstrap._ModuleLock.acquire + # -> ... + # -> + # -> __del__ + # -> import foo + # -> ... + # -> importlib._bootstrap._ModuleLock.acquire + # -> _BlockingOnManager.__enter__ + # + # If a different thread than the running one holds the lock then the + # thread will have to block on taking the lock, which is what we want + # for thread safety. + self.lock = _thread.RLock() + self.wakeup = _thread.allocate_lock() + + # The name of the module for which this is a lock. + self.name = name + + # Can end up being set to None if this lock is not owned by any thread + # or the thread identifier for the owning thread. + self.owner = None + + # Represent the number of times the owning thread has acquired this lock + # via a list of True. This supports RLock-like ("re-entrant lock") + # behavior, necessary in case a single thread is following a circular + # import dependency and needs to take the lock for a single module + # more than once. + # + # Counts are represented as a list of True because list.append(True) + # and list.pop() are both atomic and thread-safe in CPython and it's hard + # to find another primitive with the same properties. + self.count = [] + + # This is a count of the number of threads that are blocking on + # self.wakeup.acquire() awaiting to get their turn holding this module + # lock. When the module lock is released, if this is greater than + # zero, it is decremented and `self.wakeup` is released one time. The + # intent is that this will let one other thread make more progress on + # acquiring this module lock. This repeats until all the threads have + # gotten a turn. + # + # This is incremented in self.acquire() when a thread notices it is + # going to have to wait for another thread to finish. + # + # See the comment above count for explanation of the representation. + self.waiters = [] + + def has_deadlock(self): + # To avoid deadlocks for concurrent or re-entrant circular imports, + # look at _blocking_on to see if any threads are blocking + # on getting the import lock for any module for which the import lock + # is held by this thread. + return _has_deadlocked( + # Try to find this thread. + target_id=_thread.get_ident(), + seen_ids=set(), + # Start from the thread that holds the import lock for this + # module. + candidate_ids=[self.owner], + # Use the global "blocking on" state. + blocking_on=_blocking_on, + ) + + def acquire(self): + """ + Acquire the module lock. If a potential deadlock is detected, + a _DeadlockError is raised. + Otherwise, the lock is always acquired and True is returned. + """ + tid = _thread.get_ident() + with _BlockingOnManager(tid, self): + while True: + # Protect interaction with state on self with a per-module + # lock. This makes it safe for more than one thread to try to + # acquire the lock for a single module at the same time. + with self.lock: + if self.count == [] or self.owner == tid: + # If the lock for this module is unowned then we can + # take the lock immediately and succeed. If the lock + # for this module is owned by the running thread then + # we can also allow the acquire to succeed. This + # supports circular imports (thread T imports module A + # which imports module B which imports module A). + self.owner = tid + self.count.append(True) + return True + + # At this point we know the lock is held (because count != + # 0) by another thread (because owner != tid). We'll have + # to get in line to take the module lock. + + # But first, check to see if this thread would create a + # deadlock by acquiring this module lock. If it would + # then just stop with an error. + # + # It's not clear who is expected to handle this error. + # There is one handler in _lock_unlock_module but many + # times this method is called when entering the context + # manager _ModuleLockManager instead - so _DeadlockError + # will just propagate up to application code. + # + # This seems to be more than just a hypothetical - + # https://stackoverflow.com/questions/59509154 + # https://github.com/encode/django-rest-framework/issues/7078 + if self.has_deadlock(): + raise _DeadlockError(f'deadlock detected by {self!r}') + + # Check to see if we're going to be able to acquire the + # lock. If we are going to have to wait then increment + # the waiters so `self.release` will know to unblock us + # later on. We do this part non-blockingly so we don't + # get stuck here before we increment waiters. We have + # this extra acquire call (in addition to the one below, + # outside the self.lock context manager) to make sure + # self.wakeup is held when the next acquire is called (so + # we block). This is probably needlessly complex and we + # should just take self.wakeup in the return codepath + # above. + if self.wakeup.acquire(False): + self.waiters.append(None) + + # Now take the lock in a blocking fashion. This won't + # complete until the thread holding this lock + # (self.owner) calls self.release. + self.wakeup.acquire() + + # Taking the lock has served its purpose (making us wait), so we can + # give it up now. We'll take it w/o blocking again on the + # next iteration around this 'while' loop. + self.wakeup.release() + + def release(self): + tid = _thread.get_ident() + with self.lock: + if self.owner != tid: + raise RuntimeError('cannot release un-acquired lock') + assert len(self.count) > 0 + self.count.pop() + if not len(self.count): + self.owner = None + if len(self.waiters) > 0: + self.waiters.pop() + self.wakeup.release() + + def locked(self): + return bool(self.count) + + def __repr__(self): + return f'_ModuleLock({self.name!r}) at {id(self)}' + + +class _DummyModuleLock: + """A simple _ModuleLock equivalent for Python builds without + multi-threading support.""" + + def __init__(self, name): + self.name = name + self.count = 0 + + def acquire(self): + self.count += 1 + return True + + def release(self): + if self.count == 0: + raise RuntimeError('cannot release un-acquired lock') + self.count -= 1 + + def __repr__(self): + return f'_DummyModuleLock({self.name!r}) at {id(self)}' + + +class _ModuleLockManager: + + def __init__(self, name): + self._name = name + self._lock = None + + def __enter__(self): + self._lock = _get_module_lock(self._name) + self._lock.acquire() + + def __exit__(self, *args, **kwargs): + self._lock.release() + + +# The following two functions are for consumption by Python/import.c. + +def _get_module_lock(name): + """Get or create the module lock for a given module name. + + Acquire/release internally the global import lock to protect + _module_locks.""" + + _imp.acquire_lock() + try: + try: + lock = _module_locks[name]() + except KeyError: + lock = None + + if lock is None: + if _thread is None: + lock = _DummyModuleLock(name) + else: + lock = _ModuleLock(name) + + def cb(ref, name=name): + _imp.acquire_lock() + try: + # bpo-31070: Check if another thread created a new lock + # after the previous lock was destroyed + # but before the weakref callback was called. + if _module_locks.get(name) is ref: + del _module_locks[name] + finally: + _imp.release_lock() + + _module_locks[name] = _weakref.ref(lock, cb) + finally: + _imp.release_lock() + + return lock + + +def _lock_unlock_module(name): + """Acquires then releases the module lock for a given module name. + + This is used to ensure a module is completely initialized, in the + event it is being imported by another thread. + """ + lock = _get_module_lock(name) + try: + lock.acquire() + except _DeadlockError: + # Concurrent circular import, we'll accept a partially initialized + # module object. + pass + else: + lock.release() + +# Frame stripping magic ############################################### +def _call_with_frames_removed(f, *args, **kwds): + """remove_importlib_frames in import.c will always remove sequences + of importlib frames that end with a call to this function + + Use it instead of a normal call in places where including the importlib + frames introduces unwanted noise into the traceback (e.g. when executing + module code) + """ + return f(*args, **kwds) + + +def _verbose_message(message, *args, verbosity=1): + """Print the message to stderr if -v/PYTHONVERBOSE is turned on.""" + if sys.flags.verbose >= verbosity: + if not message.startswith(('#', 'import ')): + message = '# ' + message + print(message.format(*args), file=sys.stderr) + + +def _requires_builtin(fxn): + """Decorator to verify the named module is built-in.""" + def _requires_builtin_wrapper(self, fullname): + if fullname not in sys.builtin_module_names: + raise ImportError(f'{fullname!r} is not a built-in module', + name=fullname) + return fxn(self, fullname) + _wrap(_requires_builtin_wrapper, fxn) + return _requires_builtin_wrapper + + +def _requires_frozen(fxn): + """Decorator to verify the named module is frozen.""" + def _requires_frozen_wrapper(self, fullname): + if not _imp.is_frozen(fullname): + raise ImportError(f'{fullname!r} is not a frozen module', + name=fullname) + return fxn(self, fullname) + _wrap(_requires_frozen_wrapper, fxn) + return _requires_frozen_wrapper + + +# Typically used by loader classes as a method replacement. +def _load_module_shim(self, fullname): + """Load the specified module into sys.modules and return it. + + This method is deprecated. Use loader.exec_module() instead. + + """ + msg = ("the load_module() method is deprecated and slated for removal in " + "Python 3.15; use exec_module() instead") + _warnings.warn(msg, DeprecationWarning) + spec = spec_from_loader(fullname, self) + if fullname in sys.modules: + module = sys.modules[fullname] + _exec(spec, module) + return sys.modules[fullname] + else: + return _load(spec) + +# Module specifications ####################################################### + +def _module_repr(module): + """The implementation of ModuleType.__repr__().""" + loader = getattr(module, '__loader__', None) + if spec := getattr(module, "__spec__", None): + return _module_repr_from_spec(spec) + # Fall through to a catch-all which always succeeds. + try: + name = module.__name__ + except AttributeError: + name = '?' + try: + filename = module.__file__ + except AttributeError: + if loader is None: + return f'' + else: + return f'' + else: + return f'' + + +class ModuleSpec: + """The specification for a module, used for loading. + + A module's spec is the source for information about the module. For + data associated with the module, including source, use the spec's + loader. + + `name` is the absolute name of the module. `loader` is the loader + to use when loading the module. `parent` is the name of the + package the module is in. The parent is derived from the name. + + `is_package` determines if the module is considered a package or + not. On modules this is reflected by the `__path__` attribute. + + `origin` is the specific location used by the loader from which to + load the module, if that information is available. When filename is + set, origin will match. + + `has_location` indicates that a spec's "origin" reflects a location. + When this is True, `__file__` attribute of the module is set. + + `cached` is the location of the cached bytecode file, if any. It + corresponds to the `__cached__` attribute. + + `submodule_search_locations` is the sequence of path entries to + search when importing submodules. If set, is_package should be + True--and False otherwise. + + Packages are simply modules that (may) have submodules. If a spec + has a non-None value in `submodule_search_locations`, the import + system will consider modules loaded from the spec as packages. + + Only finders (see importlib.abc.MetaPathFinder and + importlib.abc.PathEntryFinder) should modify ModuleSpec instances. + + """ + + def __init__(self, name, loader, *, origin=None, loader_state=None, + is_package=None): + self.name = name + self.loader = loader + self.origin = origin + self.loader_state = loader_state + self.submodule_search_locations = [] if is_package else None + self._uninitialized_submodules = [] + + # file-location attributes + self._set_fileattr = False + self._cached = None + + def __repr__(self): + args = [f'name={self.name!r}', f'loader={self.loader!r}'] + if self.origin is not None: + args.append(f'origin={self.origin!r}') + if self.submodule_search_locations is not None: + args.append(f'submodule_search_locations={self.submodule_search_locations}') + return f'{self.__class__.__name__}({", ".join(args)})' + + def __eq__(self, other): + smsl = self.submodule_search_locations + try: + return (self.name == other.name and + self.loader == other.loader and + self.origin == other.origin and + smsl == other.submodule_search_locations and + self.cached == other.cached and + self.has_location == other.has_location) + except AttributeError: + return NotImplemented + + @property + def cached(self): + if self._cached is None: + if self.origin is not None and self._set_fileattr: + if _bootstrap_external is None: + raise NotImplementedError + self._cached = _bootstrap_external._get_cached(self.origin) + return self._cached + + @cached.setter + def cached(self, cached): + self._cached = cached + + @property + def parent(self): + """The name of the module's parent.""" + if self.submodule_search_locations is None: + return self.name.rpartition('.')[0] + else: + return self.name + + @property + def has_location(self): + return self._set_fileattr + + @has_location.setter + def has_location(self, value): + self._set_fileattr = bool(value) + + +def spec_from_loader(name, loader, *, origin=None, is_package=None): + """Return a module spec based on various loader methods.""" + if origin is None: + origin = getattr(loader, '_ORIGIN', None) + + if not origin and hasattr(loader, 'get_filename'): + if _bootstrap_external is None: + raise NotImplementedError + spec_from_file_location = _bootstrap_external.spec_from_file_location + + if is_package is None: + return spec_from_file_location(name, loader=loader) + search = [] if is_package else None + return spec_from_file_location(name, loader=loader, + submodule_search_locations=search) + + if is_package is None: + if hasattr(loader, 'is_package'): + try: + is_package = loader.is_package(name) + except ImportError: + is_package = None # aka, undefined + else: + # the default + is_package = False + + return ModuleSpec(name, loader, origin=origin, is_package=is_package) + + +def _spec_from_module(module, loader=None, origin=None): + # This function is meant for use in _setup(). + try: + spec = module.__spec__ + except AttributeError: + pass + else: + if spec is not None: + return spec + + name = module.__name__ + if loader is None: + try: + loader = module.__loader__ + except AttributeError: + # loader will stay None. + pass + try: + location = module.__file__ + except AttributeError: + location = None + if origin is None: + if loader is not None: + origin = getattr(loader, '_ORIGIN', None) + if not origin and location is not None: + origin = location + try: + cached = module.__cached__ + except AttributeError: + cached = None + try: + submodule_search_locations = list(module.__path__) + except AttributeError: + submodule_search_locations = None + + spec = ModuleSpec(name, loader, origin=origin) + spec._set_fileattr = False if location is None else (origin == location) + spec.cached = cached + spec.submodule_search_locations = submodule_search_locations + return spec + + +def _init_module_attrs(spec, module, *, override=False): + # The passed-in module may be not support attribute assignment, + # in which case we simply don't set the attributes. + # __name__ + if (override or getattr(module, '__name__', None) is None): + try: + module.__name__ = spec.name + except AttributeError: + pass + # __loader__ + if override or getattr(module, '__loader__', None) is None: + loader = spec.loader + if loader is None: + # A backward compatibility hack. + if spec.submodule_search_locations is not None: + if _bootstrap_external is None: + raise NotImplementedError + NamespaceLoader = _bootstrap_external.NamespaceLoader + + loader = NamespaceLoader.__new__(NamespaceLoader) + loader._path = spec.submodule_search_locations + spec.loader = loader + # While the docs say that module.__file__ is not set for + # built-in modules, and the code below will avoid setting it if + # spec.has_location is false, this is incorrect for namespace + # packages. Namespace packages have no location, but their + # __spec__.origin is None, and thus their module.__file__ + # should also be None for consistency. While a bit of a hack, + # this is the best place to ensure this consistency. + # + # See # https://docs.python.org/3/library/importlib.html#importlib.abc.Loader.load_module + # and bpo-32305 + module.__file__ = None + try: + module.__loader__ = loader + except AttributeError: + pass + # __package__ + if override or getattr(module, '__package__', None) is None: + try: + module.__package__ = spec.parent + except AttributeError: + pass + # __spec__ + try: + module.__spec__ = spec + except AttributeError: + pass + # __path__ + if override or getattr(module, '__path__', None) is None: + if spec.submodule_search_locations is not None: + # XXX We should extend __path__ if it's already a list. + try: + module.__path__ = spec.submodule_search_locations + except AttributeError: + pass + # __file__/__cached__ + if spec.has_location: + if override or getattr(module, '__file__', None) is None: + try: + module.__file__ = spec.origin + except AttributeError: + pass + + if override or getattr(module, '__cached__', None) is None: + if spec.cached is not None: + try: + module.__cached__ = spec.cached + except AttributeError: + pass + return module + + +def module_from_spec(spec): + """Create a module based on the provided spec.""" + # Typically loaders will not implement create_module(). + module = None + if hasattr(spec.loader, 'create_module'): + # If create_module() returns `None` then it means default + # module creation should be used. + module = spec.loader.create_module(spec) + elif hasattr(spec.loader, 'exec_module'): + raise ImportError('loaders that define exec_module() ' + 'must also define create_module()') + if module is None: + module = _new_module(spec.name) + _init_module_attrs(spec, module) + return module + + +def _module_repr_from_spec(spec): + """Return the repr to use for the module.""" + name = '?' if spec.name is None else spec.name + if spec.origin is None: + loader = spec.loader + if loader is None: + return f'' + elif ( + _bootstrap_external is not None + and isinstance(loader, _bootstrap_external.NamespaceLoader) + ): + return f'' + else: + return f'' + else: + if spec.has_location: + return f'' + else: + return f'' + + +# Used by importlib.reload() and _load_module_shim(). +def _exec(spec, module): + """Execute the spec's specified module in an existing module's namespace.""" + name = spec.name + with _ModuleLockManager(name): + if sys.modules.get(name) is not module: + msg = f'module {name!r} not in sys.modules' + raise ImportError(msg, name=name) + try: + if spec.loader is None: + if spec.submodule_search_locations is None: + raise ImportError('missing loader', name=spec.name) + # Namespace package. + _init_module_attrs(spec, module, override=True) + else: + _init_module_attrs(spec, module, override=True) + if not hasattr(spec.loader, 'exec_module'): + msg = (f"{_object_name(spec.loader)}.exec_module() not found; " + "falling back to load_module()") + _warnings.warn(msg, ImportWarning) + spec.loader.load_module(name) + else: + spec.loader.exec_module(module) + finally: + # Update the order of insertion into sys.modules for module + # clean-up at shutdown. + module = sys.modules.pop(spec.name) + sys.modules[spec.name] = module + return module + + +def _load_backward_compatible(spec): + # It is assumed that all callers have been warned about using load_module() + # appropriately before calling this function. + try: + spec.loader.load_module(spec.name) + except: + if spec.name in sys.modules: + module = sys.modules.pop(spec.name) + sys.modules[spec.name] = module + raise + # The module must be in sys.modules at this point! + # Move it to the end of sys.modules. + module = sys.modules.pop(spec.name) + sys.modules[spec.name] = module + if getattr(module, '__loader__', None) is None: + try: + module.__loader__ = spec.loader + except AttributeError: + pass + if getattr(module, '__package__', None) is None: + try: + # Since module.__path__ may not line up with + # spec.submodule_search_paths, we can't necessarily rely + # on spec.parent here. + module.__package__ = module.__name__ + if not hasattr(module, '__path__'): + module.__package__ = spec.name.rpartition('.')[0] + except AttributeError: + pass + if getattr(module, '__spec__', None) is None: + try: + module.__spec__ = spec + except AttributeError: + pass + return module + +def _load_unlocked(spec): + # A helper for direct use by the import system. + if spec.loader is not None: + # Not a namespace package. + if not hasattr(spec.loader, 'exec_module'): + msg = (f"{_object_name(spec.loader)}.exec_module() not found; " + "falling back to load_module()") + _warnings.warn(msg, ImportWarning) + return _load_backward_compatible(spec) + + module = module_from_spec(spec) + + # This must be done before putting the module in sys.modules + # (otherwise an optimization shortcut in import.c becomes + # wrong). + spec._initializing = True + try: + sys.modules[spec.name] = module + try: + if spec.loader is None: + if spec.submodule_search_locations is None: + raise ImportError('missing loader', name=spec.name) + # A namespace package so do nothing. + else: + spec.loader.exec_module(module) + except: + try: + del sys.modules[spec.name] + except KeyError: + pass + raise + # Move the module to the end of sys.modules. + # We don't ensure that the import-related module attributes get + # set in the sys.modules replacement case. Such modules are on + # their own. + module = sys.modules.pop(spec.name) + sys.modules[spec.name] = module + _verbose_message('import {!r} # {!r}', spec.name, spec.loader) + finally: + spec._initializing = False + + return module + +# A method used during testing of _load_unlocked() and by +# _load_module_shim(). +def _load(spec): + """Return a new module object, loaded by the spec's loader. + + The module is not added to its parent. + + If a module is already in sys.modules, that existing module gets + clobbered. + + """ + with _ModuleLockManager(spec.name): + return _load_unlocked(spec) + + +# Loaders ##################################################################### + +class BuiltinImporter: + + """Meta path import for built-in modules. + + All methods are either class or static methods to avoid the need to + instantiate the class. + + """ + + _ORIGIN = "built-in" + + @classmethod + def find_spec(cls, fullname, path=None, target=None): + if _imp.is_builtin(fullname): + return spec_from_loader(fullname, cls, origin=cls._ORIGIN) + else: + return None + + @staticmethod + def create_module(spec): + """Create a built-in module""" + if spec.name not in sys.builtin_module_names: + raise ImportError(f'{spec.name!r} is not a built-in module', + name=spec.name) + return _call_with_frames_removed(_imp.create_builtin, spec) + + @staticmethod + def exec_module(module): + """Exec a built-in module""" + _call_with_frames_removed(_imp.exec_builtin, module) + + @classmethod + @_requires_builtin + def get_code(cls, fullname): + """Return None as built-in modules do not have code objects.""" + return None + + @classmethod + @_requires_builtin + def get_source(cls, fullname): + """Return None as built-in modules do not have source code.""" + return None + + @classmethod + @_requires_builtin + def is_package(cls, fullname): + """Return False as built-in modules are never packages.""" + return False + + load_module = classmethod(_load_module_shim) + + +class FrozenImporter: + + """Meta path import for frozen modules. + + All methods are either class or static methods to avoid the need to + instantiate the class. + + """ + + _ORIGIN = "frozen" + + @classmethod + def _fix_up_module(cls, module): + spec = module.__spec__ + state = spec.loader_state + if state is None: + # The module is missing FrozenImporter-specific values. + + # Fix up the spec attrs. + origname = vars(module).pop('__origname__', None) + assert origname, 'see PyImport_ImportFrozenModuleObject()' + ispkg = hasattr(module, '__path__') + assert _imp.is_frozen_package(module.__name__) == ispkg, ispkg + filename, pkgdir = cls._resolve_filename(origname, spec.name, ispkg) + spec.loader_state = type(sys.implementation)( + filename=filename, + origname=origname, + ) + __path__ = spec.submodule_search_locations + if ispkg: + assert __path__ == [], __path__ + if pkgdir: + spec.submodule_search_locations.insert(0, pkgdir) + else: + assert __path__ is None, __path__ + + # Fix up the module attrs (the bare minimum). + assert not hasattr(module, '__file__'), module.__file__ + if filename: + try: + module.__file__ = filename + except AttributeError: + pass + if ispkg: + if module.__path__ != __path__: + assert module.__path__ == [], module.__path__ + module.__path__.extend(__path__) + else: + # These checks ensure that _fix_up_module() is only called + # in the right places. + __path__ = spec.submodule_search_locations + ispkg = __path__ is not None + # Check the loader state. + assert sorted(vars(state)) == ['filename', 'origname'], state + if state.origname: + # The only frozen modules with "origname" set are stdlib modules. + (__file__, pkgdir, + ) = cls._resolve_filename(state.origname, spec.name, ispkg) + assert state.filename == __file__, (state.filename, __file__) + if pkgdir: + assert __path__ == [pkgdir], (__path__, pkgdir) + else: + assert __path__ == ([] if ispkg else None), __path__ + else: + __file__ = None + assert state.filename is None, state.filename + assert __path__ == ([] if ispkg else None), __path__ + # Check the file attrs. + if __file__: + assert hasattr(module, '__file__') + assert module.__file__ == __file__, (module.__file__, __file__) + else: + assert not hasattr(module, '__file__'), module.__file__ + if ispkg: + assert hasattr(module, '__path__') + assert module.__path__ == __path__, (module.__path__, __path__) + else: + assert not hasattr(module, '__path__'), module.__path__ + assert not spec.has_location + + @classmethod + def _resolve_filename(cls, fullname, alias=None, ispkg=False): + if not fullname or not getattr(sys, '_stdlib_dir', None): + return None, None + try: + sep = cls._SEP + except AttributeError: + sep = cls._SEP = '\\' if sys.platform == 'win32' else '/' + + if fullname != alias: + if fullname.startswith('<'): + fullname = fullname[1:] + if not ispkg: + fullname = f'{fullname}.__init__' + else: + ispkg = False + relfile = fullname.replace('.', sep) + if ispkg: + pkgdir = f'{sys._stdlib_dir}{sep}{relfile}' + filename = f'{pkgdir}{sep}__init__.py' + else: + pkgdir = None + filename = f'{sys._stdlib_dir}{sep}{relfile}.py' + return filename, pkgdir + + @classmethod + def find_spec(cls, fullname, path=None, target=None): + info = _call_with_frames_removed(_imp.find_frozen, fullname) + if info is None: + return None + # We get the marshaled data in exec_module() (the loader + # part of the importer), instead of here (the finder part). + # The loader is the usual place to get the data that will + # be loaded into the module. (For example, see _LoaderBasics + # in _bootstrap_external.py.) Most importantly, this importer + # is simpler if we wait to get the data. + # However, getting as much data in the finder as possible + # to later load the module is okay, and sometimes important. + # (That's why ModuleSpec.loader_state exists.) This is + # especially true if it avoids throwing away expensive data + # the loader would otherwise duplicate later and can be done + # efficiently. In this case it isn't worth it. + _, ispkg, origname = info + spec = spec_from_loader(fullname, cls, + origin=cls._ORIGIN, + is_package=ispkg) + filename, pkgdir = cls._resolve_filename(origname, fullname, ispkg) + spec.loader_state = type(sys.implementation)( + filename=filename, + origname=origname, + ) + if pkgdir: + spec.submodule_search_locations.insert(0, pkgdir) + return spec + + @staticmethod + def create_module(spec): + """Set __file__, if able.""" + module = _new_module(spec.name) + try: + filename = spec.loader_state.filename + except AttributeError: + pass + else: + if filename: + module.__file__ = filename + return module + + @staticmethod + def exec_module(module): + spec = module.__spec__ + name = spec.name + code = _call_with_frames_removed(_imp.get_frozen_object, name) + exec(code, module.__dict__) + + @classmethod + def load_module(cls, fullname): + """Load a frozen module. + + This method is deprecated. Use exec_module() instead. + + """ + # Warning about deprecation implemented in _load_module_shim(). + module = _load_module_shim(cls, fullname) + info = _imp.find_frozen(fullname) + assert info is not None + _, ispkg, origname = info + module.__origname__ = origname + vars(module).pop('__file__', None) + if ispkg: + module.__path__ = [] + cls._fix_up_module(module) + return module + + @classmethod + @_requires_frozen + def get_code(cls, fullname): + """Return the code object for the frozen module.""" + return _imp.get_frozen_object(fullname) + + @classmethod + @_requires_frozen + def get_source(cls, fullname): + """Return None as frozen modules do not have source code.""" + return None + + @classmethod + @_requires_frozen + def is_package(cls, fullname): + """Return True if the frozen module is a package.""" + return _imp.is_frozen_package(fullname) + + +# Import itself ############################################################### + +class _ImportLockContext: + + """Context manager for the import lock.""" + + def __enter__(self): + """Acquire the import lock.""" + _imp.acquire_lock() + + def __exit__(self, exc_type, exc_value, exc_traceback): + """Release the import lock regardless of any raised exceptions.""" + _imp.release_lock() + + +def _resolve_name(name, package, level): + """Resolve a relative module name to an absolute one.""" + bits = package.rsplit('.', level - 1) + if len(bits) < level: + raise ImportError('attempted relative import beyond top-level package') + base = bits[0] + return f'{base}.{name}' if name else base + + +def _find_spec(name, path, target=None): + """Find a module's spec.""" + meta_path = sys.meta_path + if meta_path is None: + raise ImportError("sys.meta_path is None, Python is likely " + "shutting down") + + # gh-130094: Copy sys.meta_path so that we have a consistent view of the + # list while iterating over it. + meta_path = list(meta_path) + if not meta_path: + _warnings.warn('sys.meta_path is empty', ImportWarning) + + # We check sys.modules here for the reload case. While a passed-in + # target will usually indicate a reload there is no guarantee, whereas + # sys.modules provides one. + is_reload = name in sys.modules + for finder in meta_path: + with _ImportLockContext(): + try: + find_spec = finder.find_spec + except AttributeError: + continue + else: + spec = find_spec(name, path, target) + if spec is not None: + # The parent import may have already imported this module. + if not is_reload and name in sys.modules: + module = sys.modules[name] + try: + __spec__ = module.__spec__ + except AttributeError: + # We use the found spec since that is the one that + # we would have used if the parent module hadn't + # beaten us to the punch. + return spec + else: + if __spec__ is None: + return spec + else: + return __spec__ + else: + return spec + else: + return None + + +def _sanity_check(name, package, level): + """Verify arguments are "sane".""" + if not isinstance(name, str): + raise TypeError(f'module name must be str, not {type(name)}') + if level < 0: + raise ValueError('level must be >= 0') + if level > 0: + if not isinstance(package, str): + raise TypeError('__package__ not set to a string') + elif not package: + raise ImportError('attempted relative import with no known parent ' + 'package') + if not name and level == 0: + raise ValueError('Empty module name') + + +_ERR_MSG_PREFIX = 'No module named ' + +def _find_and_load_unlocked(name, import_): + path = None + parent = name.rpartition('.')[0] + parent_spec = None + if parent: + if parent not in sys.modules: + _call_with_frames_removed(import_, parent) + # Crazy side-effects! + module = sys.modules.get(name) + if module is not None: + return module + parent_module = sys.modules[parent] + try: + path = parent_module.__path__ + except AttributeError: + msg = f'{_ERR_MSG_PREFIX}{name!r}; {parent!r} is not a package' + raise ModuleNotFoundError(msg, name=name) from None + parent_spec = parent_module.__spec__ + if getattr(parent_spec, '_initializing', False): + _call_with_frames_removed(import_, parent) + # Crazy side-effects (again)! + module = sys.modules.get(name) + if module is not None: + return module + child = name.rpartition('.')[2] + spec = _find_spec(name, path) + if spec is None: + raise ModuleNotFoundError(f'{_ERR_MSG_PREFIX}{name!r}', name=name) + else: + if parent_spec: + # Temporarily add child we are currently importing to parent's + # _uninitialized_submodules for circular import tracking. + parent_spec._uninitialized_submodules.append(child) + try: + module = _load_unlocked(spec) + finally: + if parent_spec: + parent_spec._uninitialized_submodules.pop() + if parent: + # Set the module as an attribute on its parent. + parent_module = sys.modules[parent] + try: + setattr(parent_module, child, module) + except AttributeError: + msg = f"Cannot set an attribute on {parent!r} for child module {child!r}" + _warnings.warn(msg, ImportWarning) + return module + + +_NEEDS_LOADING = object() + + +def _find_and_load(name, import_): + """Find and load the module.""" + + # Optimization: we avoid unneeded module locking if the module + # already exists in sys.modules and is fully initialized. + module = sys.modules.get(name, _NEEDS_LOADING) + if (module is _NEEDS_LOADING or + getattr(getattr(module, "__spec__", None), "_initializing", False)): + with _ModuleLockManager(name): + module = sys.modules.get(name, _NEEDS_LOADING) + if module is _NEEDS_LOADING: + return _find_and_load_unlocked(name, import_) + + # Optimization: only call _bootstrap._lock_unlock_module() if + # module.__spec__._initializing is True. + # NOTE: because of this, initializing must be set *before* + # putting the new module in sys.modules. + _lock_unlock_module(name) + else: + # Verify the module is still in sys.modules. Another thread may have + # removed it (due to import failure) between our sys.modules.get() + # above and the _initializing check. If removed, we retry the import + # to preserve normal semantics: the caller gets the exception from + # the actual import failure rather than a synthetic error. + if sys.modules.get(name) is not module: + return _find_and_load(name, import_) + + if module is None: + message = f'import of {name} halted; None in sys.modules' + raise ModuleNotFoundError(message, name=name) + + return module + + +def _gcd_import(name, package=None, level=0): + """Import and return the module based on its name, the package the call is + being made from, and the level adjustment. + + This function represents the greatest common denominator of functionality + between import_module and __import__. This includes setting __package__ if + the loader did not. + + """ + _sanity_check(name, package, level) + if level > 0: + name = _resolve_name(name, package, level) + return _find_and_load(name, _gcd_import) + + +def _handle_fromlist(module, fromlist, import_, *, recursive=False): + """Figure out what __import__ should return. + + The import_ parameter is a callable which takes the name of module to + import. It is required to decouple the function from assuming importlib's + import implementation is desired. + + """ + # The hell that is fromlist ... + # If a package was imported, try to import stuff from fromlist. + for x in fromlist: + if not isinstance(x, str): + if recursive: + where = module.__name__ + '.__all__' + else: + where = "``from list''" + raise TypeError(f"Item in {where} must be str, " + f"not {type(x).__name__}") + elif x == '*': + if not recursive and hasattr(module, '__all__'): + _handle_fromlist(module, module.__all__, import_, + recursive=True) + elif not hasattr(module, x): + from_name = f'{module.__name__}.{x}' + try: + _call_with_frames_removed(import_, from_name) + except ModuleNotFoundError as exc: + # Backwards-compatibility dictates we ignore failed + # imports triggered by fromlist for modules that don't + # exist. + if (exc.name == from_name and + sys.modules.get(from_name, _NEEDS_LOADING) is not None): + continue + raise + return module + + +def _calc___package__(globals): + """Calculate what __package__ should be. + + __package__ is not guaranteed to be defined or could be set to None + to represent that its proper value is unknown. + + """ + package = globals.get('__package__') + spec = globals.get('__spec__') + if package is not None: + if spec is not None and package != spec.parent: + _warnings.warn("__package__ != __spec__.parent " + f"({package!r} != {spec.parent!r})", + DeprecationWarning, stacklevel=3) + return package + elif spec is not None: + return spec.parent + else: + _warnings.warn("can't resolve package from __spec__ or __package__, " + "falling back on __name__ and __path__", + ImportWarning, stacklevel=3) + package = globals['__name__'] + if '__path__' not in globals: + package = package.rpartition('.')[0] + return package + + +def __import__(name, globals=None, locals=None, fromlist=(), level=0): + """Import a module. + + The 'globals' argument is used to infer where the import is occurring from + to handle relative imports. The 'locals' argument is ignored. The + 'fromlist' argument specifies what should exist as attributes on the module + being imported (e.g. ``from module import ``). The 'level' + argument represents the package location to import from in a relative + import (e.g. ``from ..pkg import mod`` would have a 'level' of 2). + + """ + if level == 0: + module = _gcd_import(name) + else: + globals_ = globals if globals is not None else {} + package = _calc___package__(globals_) + module = _gcd_import(name, package, level) + if not fromlist: + # Return up to the first dot in 'name'. This is complicated by the fact + # that 'name' may be relative. + if level == 0: + return _gcd_import(name.partition('.')[0]) + elif not name: + return module + else: + # Figure out where to slice the module's name up to the first dot + # in 'name'. + cut_off = len(name) - len(name.partition('.')[0]) + # Slice end needs to be positive to alleviate need to special-case + # when ``'.' not in name``. + return sys.modules[module.__name__[:len(module.__name__)-cut_off]] + elif hasattr(module, '__path__'): + return _handle_fromlist(module, fromlist, _gcd_import) + else: + return module + + +def _builtin_from_name(name): + spec = BuiltinImporter.find_spec(name) + if spec is None: + raise ImportError('no built-in module named ' + name) + return _load_unlocked(spec) + + +def _setup(sys_module, _imp_module): + """Setup importlib by importing needed built-in modules and injecting them + into the global namespace. + + As sys is needed for sys.modules access and _imp is needed to load built-in + modules, those two modules must be explicitly passed in. + + """ + global _imp, sys, _blocking_on + _imp = _imp_module + sys = sys_module + + # Set up the spec for existing builtin/frozen modules. + module_type = type(sys) + for name, module in sys.modules.items(): + if isinstance(module, module_type): + if name in sys.builtin_module_names: + loader = BuiltinImporter + elif _imp.is_frozen(name): + loader = FrozenImporter + else: + continue + spec = _spec_from_module(module, loader) + _init_module_attrs(spec, module) + if loader is FrozenImporter: + loader._fix_up_module(module) + + # Directly load built-in modules needed during bootstrap. + self_module = sys.modules[__name__] + for builtin_name in ('_thread', '_warnings', '_weakref'): + if builtin_name not in sys.modules: + builtin_module = _builtin_from_name(builtin_name) + else: + builtin_module = sys.modules[builtin_name] + setattr(self_module, builtin_name, builtin_module) + + # Instantiation requires _weakref to have been set. + _blocking_on = _WeakValueDictionary() + + +def _install(sys_module, _imp_module): + """Install importers for builtin and frozen modules""" + _setup(sys_module, _imp_module) + + sys.meta_path.append(BuiltinImporter) + sys.meta_path.append(FrozenImporter) + + +def _install_external_importers(): + """Install importers that require external filesystem access""" + global _bootstrap_external + import _frozen_importlib_external + _bootstrap_external = _frozen_importlib_external + _frozen_importlib_external._install(sys.modules[__name__]) diff --git a/Python314_4_x64_Template/Lib/importlib/_bootstrap_external.py b/Python314_4_x64_Template/Lib/importlib/_bootstrap_external.py new file mode 100644 index 00000000..6a828ae7 --- /dev/null +++ b/Python314_4_x64_Template/Lib/importlib/_bootstrap_external.py @@ -0,0 +1,1562 @@ +"""Core implementation of path-based import. + +This module is NOT meant to be directly imported! It has been designed such +that it can be bootstrapped into Python as the implementation of import. As +such it requires the injection of specific modules and attributes in order to +work. One should use importlib as the public-facing version of this module. + +""" +# IMPORTANT: Whenever making changes to this module, be sure to run a top-level +# `make regen-importlib` followed by `make` in order to get the frozen version +# of the module updated. Not doing so will result in the Makefile to fail for +# all others who don't have a ./python around to freeze the module in the early +# stages of compilation. +# + +# See importlib._setup() for what is injected into the global namespace. + +# When editing this code be aware that code executed at import time CANNOT +# reference any injected objects! This includes not only global code but also +# anything specified at the class level. + +# Module injected manually by _set_bootstrap_module() +_bootstrap = None + +# Import builtin modules +import _imp +import _io +import sys +import _warnings +import marshal + + +_MS_WINDOWS = (sys.platform == 'win32') +if _MS_WINDOWS: + import nt as _os + import winreg +else: + import posix as _os + + +if _MS_WINDOWS: + path_separators = ['\\', '/'] +else: + path_separators = ['/'] +# Assumption made in _path_join() +assert all(len(sep) == 1 for sep in path_separators) +path_sep = path_separators[0] +path_sep_tuple = tuple(path_separators) +path_separators = ''.join(path_separators) +_pathseps_with_colon = {f':{s}' for s in path_separators} + + +# Bootstrap-related code ###################################################### +_CASE_INSENSITIVE_PLATFORMS_STR_KEY = 'win', +_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY = 'cygwin', 'darwin', 'ios', 'tvos', 'watchos' +_CASE_INSENSITIVE_PLATFORMS = (_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY + + _CASE_INSENSITIVE_PLATFORMS_STR_KEY) + + +def _make_relax_case(): + if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS): + if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS_STR_KEY): + key = 'PYTHONCASEOK' + else: + key = b'PYTHONCASEOK' + + def _relax_case(): + """True if filenames must be checked case-insensitively and ignore environment flags are not set.""" + return not sys.flags.ignore_environment and key in _os.environ + else: + def _relax_case(): + """True if filenames must be checked case-insensitively.""" + return False + return _relax_case + +_relax_case = _make_relax_case() + + +def _pack_uint32(x): + """Convert a 32-bit integer to little-endian.""" + return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little') + + +def _unpack_uint64(data): + """Convert 8 bytes in little-endian to an integer.""" + assert len(data) == 8 + return int.from_bytes(data, 'little') + +def _unpack_uint32(data): + """Convert 4 bytes in little-endian to an integer.""" + assert len(data) == 4 + return int.from_bytes(data, 'little') + +def _unpack_uint16(data): + """Convert 2 bytes in little-endian to an integer.""" + assert len(data) == 2 + return int.from_bytes(data, 'little') + + +if _MS_WINDOWS: + def _path_join(*path_parts): + """Replacement for os.path.join().""" + if not path_parts: + return "" + if len(path_parts) == 1: + return path_parts[0] + root = "" + path = [] + for new_root, tail in map(_os._path_splitroot, path_parts): + if new_root.startswith(path_sep_tuple) or new_root.endswith(path_sep_tuple): + root = new_root.rstrip(path_separators) or root + path = [path_sep + tail] + elif new_root.endswith(':'): + if root.casefold() != new_root.casefold(): + # Drive relative paths have to be resolved by the OS, so we reset the + # tail but do not add a path_sep prefix. + root = new_root + path = [tail] + else: + path.append(tail) + else: + root = new_root or root + path.append(tail) + path = [p.rstrip(path_separators) for p in path if p] + if len(path) == 1 and not path[0]: + # Avoid losing the root's trailing separator when joining with nothing + return root + path_sep + return root + path_sep.join(path) + +else: + def _path_join(*path_parts): + """Replacement for os.path.join().""" + return path_sep.join([part.rstrip(path_separators) + for part in path_parts if part]) + + +def _path_split(path): + """Replacement for os.path.split().""" + i = max(path.rfind(p) for p in path_separators) + if i < 0: + return '', path + return path[:i], path[i + 1:] + + +def _path_stat(path): + """Stat the path. + + Made a separate function to make it easier to override in experiments + (e.g. cache stat results). + + """ + return _os.stat(path) + + +def _path_is_mode_type(path, mode): + """Test whether the path is the specified mode type.""" + try: + stat_info = _path_stat(path) + except OSError: + return False + return (stat_info.st_mode & 0o170000) == mode + + +def _path_isfile(path): + """Replacement for os.path.isfile.""" + return _path_is_mode_type(path, 0o100000) + + +def _path_isdir(path): + """Replacement for os.path.isdir.""" + if not path: + path = _os.getcwd() + return _path_is_mode_type(path, 0o040000) + + +if _MS_WINDOWS: + def _path_isabs(path): + """Replacement for os.path.isabs.""" + if not path: + return False + root = _os._path_splitroot(path)[0].replace('/', '\\') + return len(root) > 1 and (root.startswith('\\\\') or root.endswith('\\')) + +else: + def _path_isabs(path): + """Replacement for os.path.isabs.""" + return path.startswith(path_separators) + + +def _path_abspath(path): + """Replacement for os.path.abspath.""" + if not _path_isabs(path): + for sep in path_separators: + path = path.removeprefix(f".{sep}") + return _path_join(_os.getcwd(), path) + else: + return path + + +def _write_atomic(path, data, mode=0o666): + """Best-effort function to write data to a path atomically. + Be prepared to handle a FileExistsError if concurrent writing of the + temporary file is attempted.""" + # id() is used to generate a pseudo-random filename. + path_tmp = f'{path}.{id(path)}' + fd = _os.open(path_tmp, + _os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666) + try: + # We first write data to a temporary file, and then use os.replace() to + # perform an atomic rename. + with _io.open(fd, 'wb') as file: + file.write(data) + _os.replace(path_tmp, path) + except OSError: + try: + _os.unlink(path_tmp) + except OSError: + pass + raise + + +_code_type = type(_write_atomic.__code__) + +MAGIC_NUMBER = _imp.pyc_magic_number_token.to_bytes(4, 'little') + +_PYCACHE = '__pycache__' +_OPT = 'opt-' + +SOURCE_SUFFIXES = ['.py'] +if _MS_WINDOWS: + SOURCE_SUFFIXES.append('.pyw') + +EXTENSION_SUFFIXES = _imp.extension_suffixes() + +BYTECODE_SUFFIXES = ['.pyc'] +# Deprecated. +DEBUG_BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES = BYTECODE_SUFFIXES + +def cache_from_source(path, debug_override=None, *, optimization=None): + """Given the path to a .py file, return the path to its .pyc file. + + The .py file does not need to exist; this simply returns the path to the + .pyc file calculated as if the .py file were imported. + + The 'optimization' parameter controls the presumed optimization level of + the bytecode file. If 'optimization' is not None, the string representation + of the argument is taken and verified to be alphanumeric (else ValueError + is raised). + + The debug_override parameter is deprecated. If debug_override is not None, + a True value is the same as setting 'optimization' to the empty string + while a False value is equivalent to setting 'optimization' to '1'. + + If sys.implementation.cache_tag is None then NotImplementedError is raised. + + """ + if debug_override is not None: + _warnings.warn('the debug_override parameter is deprecated; use ' + "'optimization' instead", DeprecationWarning) + if optimization is not None: + message = 'debug_override or optimization must be set to None' + raise TypeError(message) + optimization = '' if debug_override else 1 + path = _os.fspath(path) + head, tail = _path_split(path) + base, sep, rest = tail.rpartition('.') + tag = sys.implementation.cache_tag + if tag is None: + raise NotImplementedError('sys.implementation.cache_tag is None') + almost_filename = ''.join([(base if base else rest), sep, tag]) + if optimization is None: + if sys.flags.optimize == 0: + optimization = '' + else: + optimization = sys.flags.optimize + optimization = str(optimization) + if optimization != '': + if not optimization.isalnum(): + raise ValueError(f'{optimization!r} is not alphanumeric') + almost_filename = f'{almost_filename}.{_OPT}{optimization}' + filename = almost_filename + BYTECODE_SUFFIXES[0] + if sys.pycache_prefix is not None: + # We need an absolute path to the py file to avoid the possibility of + # collisions within sys.pycache_prefix, if someone has two different + # `foo/bar.py` on their system and they import both of them using the + # same sys.pycache_prefix. Let's say sys.pycache_prefix is + # `C:\Bytecode`; the idea here is that if we get `Foo\Bar`, we first + # make it absolute (`C:\Somewhere\Foo\Bar`), then make it root-relative + # (`Somewhere\Foo\Bar`), so we end up placing the bytecode file in an + # unambiguous `C:\Bytecode\Somewhere\Foo\Bar\`. + head = _path_abspath(head) + + # Strip initial drive from a Windows path. We know we have an absolute + # path here, so the second part of the check rules out a POSIX path that + # happens to contain a colon at the second character. + # Slicing avoids issues with an empty (or short) `head`. + if head[1:2] == ':' and head[0:1] not in path_separators: + head = head[2:] + + # Strip initial path separator from `head` to complete the conversion + # back to a root-relative path before joining. + return _path_join( + sys.pycache_prefix, + head.lstrip(path_separators), + filename, + ) + return _path_join(head, _PYCACHE, filename) + + +def source_from_cache(path): + """Given the path to a .pyc. file, return the path to its .py file. + + The .pyc file does not need to exist; this simply returns the path to + the .py file calculated to correspond to the .pyc file. If path does + not conform to PEP 3147/488 format, ValueError will be raised. If + sys.implementation.cache_tag is None then NotImplementedError is raised. + + """ + if sys.implementation.cache_tag is None: + raise NotImplementedError('sys.implementation.cache_tag is None') + path = _os.fspath(path) + head, pycache_filename = _path_split(path) + found_in_pycache_prefix = False + if sys.pycache_prefix is not None: + stripped_path = sys.pycache_prefix.rstrip(path_separators) + if head.startswith(stripped_path + path_sep): + head = head[len(stripped_path):] + found_in_pycache_prefix = True + if not found_in_pycache_prefix: + head, pycache = _path_split(head) + if pycache != _PYCACHE: + raise ValueError(f'{_PYCACHE} not bottom-level directory in ' + f'{path!r}') + dot_count = pycache_filename.count('.') + if dot_count not in {2, 3}: + raise ValueError(f'expected only 2 or 3 dots in {pycache_filename!r}') + elif dot_count == 3: + optimization = pycache_filename.rsplit('.', 2)[-2] + if not optimization.startswith(_OPT): + raise ValueError("optimization portion of filename does not start " + f"with {_OPT!r}") + opt_level = optimization[len(_OPT):] + if not opt_level.isalnum(): + raise ValueError(f"optimization level {optimization!r} is not an " + "alphanumeric value") + base_filename = pycache_filename.partition('.')[0] + return _path_join(head, base_filename + SOURCE_SUFFIXES[0]) + + +def _get_sourcefile(bytecode_path): + """Convert a bytecode file path to a source path (if possible). + + This function exists purely for backwards-compatibility for + PyImport_ExecCodeModuleWithFilenames() in the C API. + + """ + if len(bytecode_path) == 0: + return None + rest, _, extension = bytecode_path.rpartition('.') + if not rest or extension.lower()[-3:-1] != 'py': + return bytecode_path + try: + source_path = source_from_cache(bytecode_path) + except (NotImplementedError, ValueError): + source_path = bytecode_path[:-1] + return source_path if _path_isfile(source_path) else bytecode_path + + +def _get_cached(filename): + if filename.endswith(tuple(SOURCE_SUFFIXES)): + try: + return cache_from_source(filename) + except NotImplementedError: + pass + elif filename.endswith(tuple(BYTECODE_SUFFIXES)): + return filename + else: + return None + + +def _calc_mode(path): + """Calculate the mode permissions for a bytecode file.""" + try: + mode = _path_stat(path).st_mode + except OSError: + mode = 0o666 + # We always ensure write access so we can update cached files + # later even when the source files are read-only on Windows (#6074) + mode |= 0o200 + return mode + + +def _check_name(method): + """Decorator to verify that the module being requested matches the one the + loader can handle. + + The first argument (self) must define _name which the second argument is + compared against. If the comparison fails then ImportError is raised. + + """ + def _check_name_wrapper(self, name=None, *args, **kwargs): + if name is None: + name = self.name + elif self.name != name: + raise ImportError('loader for %s cannot handle %s' % + (self.name, name), name=name) + return method(self, name, *args, **kwargs) + + # FIXME: @_check_name is used to define class methods before the + # _bootstrap module is set by _set_bootstrap_module(). + if _bootstrap is not None: + _wrap = _bootstrap._wrap + else: + def _wrap(new, old): + for replace in ['__module__', '__name__', '__qualname__', '__doc__']: + if hasattr(old, replace): + setattr(new, replace, getattr(old, replace)) + new.__dict__.update(old.__dict__) + + _wrap(_check_name_wrapper, method) + return _check_name_wrapper + + +def _classify_pyc(data, name, exc_details): + """Perform basic validity checking of a pyc header and return the flags field, + which determines how the pyc should be further validated against the source. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required, though.) + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + ImportError is raised when the magic number is incorrect or when the flags + field is invalid. EOFError is raised when the data is found to be truncated. + + """ + magic = data[:4] + if magic != MAGIC_NUMBER: + message = f'bad magic number in {name!r}: {magic!r}' + _bootstrap._verbose_message('{}', message) + raise ImportError(message, **exc_details) + if len(data) < 16: + message = f'reached EOF while reading pyc header of {name!r}' + _bootstrap._verbose_message('{}', message) + raise EOFError(message) + flags = _unpack_uint32(data[4:8]) + # Only the first two flags are defined. + if flags & ~0b11: + message = f'invalid flags {flags!r} in {name!r}' + raise ImportError(message, **exc_details) + return flags + + +def _validate_timestamp_pyc(data, source_mtime, source_size, name, + exc_details): + """Validate a pyc against the source last-modified time. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required.) + + *source_mtime* is the last modified timestamp of the source file. + + *source_size* is None or the size of the source file in bytes. + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + An ImportError is raised if the bytecode is stale. + + """ + if _unpack_uint32(data[8:12]) != (source_mtime & 0xFFFFFFFF): + message = f'bytecode is stale for {name!r}' + _bootstrap._verbose_message('{}', message) + raise ImportError(message, **exc_details) + if (source_size is not None and + _unpack_uint32(data[12:16]) != (source_size & 0xFFFFFFFF)): + raise ImportError(f'bytecode is stale for {name!r}', **exc_details) + + +def _validate_hash_pyc(data, source_hash, name, exc_details): + """Validate a hash-based pyc by checking the real source hash against the one in + the pyc header. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required.) + + *source_hash* is the importlib.util.source_hash() of the source file. + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + An ImportError is raised if the bytecode is stale. + + """ + if data[8:16] != source_hash: + raise ImportError( + f'hash in bytecode doesn\'t match hash of source {name!r}', + **exc_details, + ) + + +def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None): + """Compile bytecode as found in a pyc.""" + code = marshal.loads(data) + if isinstance(code, _code_type): + _bootstrap._verbose_message('code object from {!r}', bytecode_path) + if source_path is not None: + _imp._fix_co_filename(code, source_path) + return code + else: + raise ImportError(f'Non-code object in {bytecode_path!r}', + name=name, path=bytecode_path) + + +def _code_to_timestamp_pyc(code, mtime=0, source_size=0): + "Produce the data for a timestamp-based pyc." + data = bytearray(MAGIC_NUMBER) + data.extend(_pack_uint32(0)) + data.extend(_pack_uint32(mtime)) + data.extend(_pack_uint32(source_size)) + data.extend(marshal.dumps(code)) + return data + + +def _code_to_hash_pyc(code, source_hash, checked=True): + "Produce the data for a hash-based pyc." + data = bytearray(MAGIC_NUMBER) + flags = 0b1 | checked << 1 + data.extend(_pack_uint32(flags)) + assert len(source_hash) == 8 + data.extend(source_hash) + data.extend(marshal.dumps(code)) + return data + + +def decode_source(source_bytes): + """Decode bytes representing source code and return the string. + + Universal newline support is used in the decoding. + """ + import tokenize # To avoid bootstrap issues. + source_bytes_readline = _io.BytesIO(source_bytes).readline + encoding = tokenize.detect_encoding(source_bytes_readline) + newline_decoder = _io.IncrementalNewlineDecoder(None, True) + return newline_decoder.decode(source_bytes.decode(encoding[0])) + + +# Module specifications ####################################################### + +_POPULATE = object() + + +def spec_from_file_location(name, location=None, *, loader=None, + submodule_search_locations=_POPULATE): + """Return a module spec based on a file location. + + To indicate that the module is a package, set + submodule_search_locations to a list of directory paths. An + empty list is sufficient, though its not otherwise useful to the + import system. + + The loader must take a spec as its only __init__() arg. + + """ + if location is None: + # The caller may simply want a partially populated location- + # oriented spec. So we set the location to a bogus value and + # fill in as much as we can. + location = '' + if hasattr(loader, 'get_filename'): + # ExecutionLoader + try: + location = loader.get_filename(name) + except ImportError: + pass + else: + location = _os.fspath(location) + try: + location = _path_abspath(location) + except OSError: + pass + + # If the location is on the filesystem, but doesn't actually exist, + # we could return None here, indicating that the location is not + # valid. However, we don't have a good way of testing since an + # indirect location (e.g. a zip file or URL) will look like a + # non-existent file relative to the filesystem. + + spec = _bootstrap.ModuleSpec(name, loader, origin=location) + spec._set_fileattr = True + + # Pick a loader if one wasn't provided. + if loader is None: + for loader_class, suffixes in _get_supported_file_loaders(): + if location.endswith(tuple(suffixes)): + loader = loader_class(name, location) + spec.loader = loader + break + else: + return None + + # Set submodule_search_paths appropriately. + if submodule_search_locations is _POPULATE: + # Check the loader. + if hasattr(loader, 'is_package'): + try: + is_package = loader.is_package(name) + except ImportError: + pass + else: + if is_package: + spec.submodule_search_locations = [] + else: + spec.submodule_search_locations = submodule_search_locations + if spec.submodule_search_locations == []: + if location: + dirname = _path_split(location)[0] + spec.submodule_search_locations.append(dirname) + + return spec + + +def _bless_my_loader(module_globals): + """Helper function for _warnings.c + + See GH#97850 for details. + """ + # 2022-10-06(warsaw): For now, this helper is only used in _warnings.c and + # that use case only has the module globals. This function could be + # extended to accept either that or a module object. However, in the + # latter case, it would be better to raise certain exceptions when looking + # at a module, which should have either a __loader__ or __spec__.loader. + # For backward compatibility, it is possible that we'll get an empty + # dictionary for the module globals, and that cannot raise an exception. + if not isinstance(module_globals, dict): + return None + + missing = object() + loader = module_globals.get('__loader__', None) + spec = module_globals.get('__spec__', missing) + + if loader is None: + if spec is missing: + # If working with a module: + # raise AttributeError('Module globals is missing a __spec__') + return None + elif spec is None: + raise ValueError('Module globals is missing a __spec__.loader') + + spec_loader = getattr(spec, 'loader', missing) + + if spec_loader in (missing, None): + if loader is None: + exc = AttributeError if spec_loader is missing else ValueError + raise exc('Module globals is missing a __spec__.loader') + _warnings.warn( + 'Module globals is missing a __spec__.loader', + DeprecationWarning) + spec_loader = loader + + assert spec_loader is not None + if loader is not None and loader != spec_loader: + _warnings.warn( + 'Module globals; __loader__ != __spec__.loader', + DeprecationWarning) + return loader + + return spec_loader + + +# Loaders ##################################################################### + +class WindowsRegistryFinder: + + """Meta path finder for modules declared in the Windows registry.""" + + REGISTRY_KEY = ( + 'Software\\Python\\PythonCore\\{sys_version}' + '\\Modules\\{fullname}') + REGISTRY_KEY_DEBUG = ( + 'Software\\Python\\PythonCore\\{sys_version}' + '\\Modules\\{fullname}\\Debug') + DEBUG_BUILD = (_MS_WINDOWS and '_d.pyd' in EXTENSION_SUFFIXES) + + @staticmethod + def _open_registry(key): + try: + return winreg.OpenKey(winreg.HKEY_CURRENT_USER, key) + except OSError: + return winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key) + + @classmethod + def _search_registry(cls, fullname): + if cls.DEBUG_BUILD: + registry_key = cls.REGISTRY_KEY_DEBUG + else: + registry_key = cls.REGISTRY_KEY + key = registry_key.format(fullname=fullname, + sys_version='%d.%d' % sys.version_info[:2]) + try: + with cls._open_registry(key) as hkey: + filepath = winreg.QueryValue(hkey, '') + except OSError: + return None + return filepath + + @classmethod + def find_spec(cls, fullname, path=None, target=None): + _warnings.warn('importlib.machinery.WindowsRegistryFinder is ' + 'deprecated; use site configuration instead. ' + 'Future versions of Python may not enable this ' + 'finder by default.', + DeprecationWarning, stacklevel=2) + + filepath = cls._search_registry(fullname) + if filepath is None: + return None + try: + _path_stat(filepath) + except OSError: + return None + for loader, suffixes in _get_supported_file_loaders(): + if filepath.endswith(tuple(suffixes)): + spec = _bootstrap.spec_from_loader(fullname, + loader(fullname, filepath), + origin=filepath) + return spec + + +class _LoaderBasics: + + """Base class of common code needed by both SourceLoader and + SourcelessFileLoader.""" + + def is_package(self, fullname): + """Concrete implementation of InspectLoader.is_package by checking if + the path returned by get_filename has a filename of '__init__.py'.""" + filename = _path_split(self.get_filename(fullname))[1] + filename_base = filename.rsplit('.', 1)[0] + tail_name = fullname.rpartition('.')[2] + return filename_base == '__init__' and tail_name != '__init__' + + def create_module(self, spec): + """Use default semantics for module creation.""" + + def exec_module(self, module): + """Execute the module.""" + code = self.get_code(module.__name__) + if code is None: + raise ImportError(f'cannot load module {module.__name__!r} when ' + 'get_code() returns None') + _bootstrap._call_with_frames_removed(exec, code, module.__dict__) + + def load_module(self, fullname): + """This method is deprecated.""" + # Warning implemented in _load_module_shim(). + return _bootstrap._load_module_shim(self, fullname) + + +class SourceLoader(_LoaderBasics): + + def path_mtime(self, path): + """Optional method that returns the modification time (an int) for the + specified path (a str). + + Raises OSError when the path cannot be handled. + """ + raise OSError + + def path_stats(self, path): + """Optional method returning a metadata dict for the specified + path (a str). + + Possible keys: + - 'mtime' (mandatory) is the numeric timestamp of last source + code modification; + - 'size' (optional) is the size in bytes of the source code. + + Implementing this method allows the loader to read bytecode files. + Raises OSError when the path cannot be handled. + """ + return {'mtime': self.path_mtime(path)} + + def _cache_bytecode(self, source_path, cache_path, data): + """Optional method which writes data (bytes) to a file path (a str). + + Implementing this method allows for the writing of bytecode files. + + The source path is needed in order to correctly transfer permissions + """ + # For backwards compatibility, we delegate to set_data() + return self.set_data(cache_path, data) + + def set_data(self, path, data): + """Optional method which writes data (bytes) to a file path (a str). + + Implementing this method allows for the writing of bytecode files. + """ + + + def get_source(self, fullname): + """Concrete implementation of InspectLoader.get_source.""" + path = self.get_filename(fullname) + try: + source_bytes = self.get_data(path) + except OSError as exc: + raise ImportError('source not available through get_data()', + name=fullname) from exc + return decode_source(source_bytes) + + def source_to_code(self, data, path, *, _optimize=-1): + """Return the code object compiled from source. + + The 'data' argument can be any object type that compile() supports. + """ + return _bootstrap._call_with_frames_removed(compile, data, path, 'exec', + dont_inherit=True, optimize=_optimize) + + def get_code(self, fullname): + """Concrete implementation of InspectLoader.get_code. + + Reading of bytecode requires path_stats to be implemented. To write + bytecode, set_data must also be implemented. + + """ + source_path = self.get_filename(fullname) + source_mtime = None + source_bytes = None + source_hash = None + hash_based = False + check_source = True + try: + bytecode_path = cache_from_source(source_path) + except NotImplementedError: + bytecode_path = None + else: + try: + st = self.path_stats(source_path) + except OSError: + pass + else: + source_mtime = int(st['mtime']) + try: + data = self.get_data(bytecode_path) + except OSError: + pass + else: + exc_details = { + 'name': fullname, + 'path': bytecode_path, + } + try: + flags = _classify_pyc(data, fullname, exc_details) + bytes_data = memoryview(data)[16:] + hash_based = flags & 0b1 != 0 + if hash_based: + check_source = flags & 0b10 != 0 + if (_imp.check_hash_based_pycs != 'never' and + (check_source or + _imp.check_hash_based_pycs == 'always')): + source_bytes = self.get_data(source_path) + source_hash = _imp.source_hash( + _imp.pyc_magic_number_token, + source_bytes, + ) + _validate_hash_pyc(data, source_hash, fullname, + exc_details) + else: + _validate_timestamp_pyc( + data, + source_mtime, + st['size'], + fullname, + exc_details, + ) + except (ImportError, EOFError): + pass + else: + _bootstrap._verbose_message('{} matches {}', bytecode_path, + source_path) + return _compile_bytecode(bytes_data, name=fullname, + bytecode_path=bytecode_path, + source_path=source_path) + if source_bytes is None: + source_bytes = self.get_data(source_path) + code_object = self.source_to_code(source_bytes, source_path) + _bootstrap._verbose_message('code object from {}', source_path) + if (not sys.dont_write_bytecode and bytecode_path is not None and + source_mtime is not None): + if hash_based: + if source_hash is None: + source_hash = _imp.source_hash(_imp.pyc_magic_number_token, + source_bytes) + data = _code_to_hash_pyc(code_object, source_hash, check_source) + else: + data = _code_to_timestamp_pyc(code_object, source_mtime, + len(source_bytes)) + try: + self._cache_bytecode(source_path, bytecode_path, data) + except NotImplementedError: + pass + return code_object + + +class FileLoader: + + """Base file loader class which implements the loader protocol methods that + require file system usage.""" + + def __init__(self, fullname, path): + """Cache the module name and the path to the file found by the + finder.""" + self.name = fullname + self.path = path + + def __eq__(self, other): + return (self.__class__ == other.__class__ and + self.__dict__ == other.__dict__) + + def __hash__(self): + return hash(self.name) ^ hash(self.path) + + @_check_name + def load_module(self, fullname): + """Load a module from a file. + + This method is deprecated. Use exec_module() instead. + + """ + # The only reason for this method is for the name check. + # Issue #14857: Avoid the zero-argument form of super so the implementation + # of that form can be updated without breaking the frozen module. + return super(FileLoader, self).load_module(fullname) + + @_check_name + def get_filename(self, fullname): + """Return the path to the source file as found by the finder.""" + return self.path + + def get_data(self, path): + """Return the data from path as raw bytes.""" + if isinstance(self, (SourceLoader, SourcelessFileLoader, ExtensionFileLoader)): + with _io.open_code(str(path)) as file: + return file.read() + else: + with _io.FileIO(path, 'r') as file: + return file.read() + + @_check_name + def get_resource_reader(self, module): + from importlib.readers import FileReader + return FileReader(self) + + +class SourceFileLoader(FileLoader, SourceLoader): + + """Concrete implementation of SourceLoader using the file system.""" + + def path_stats(self, path): + """Return the metadata for the path.""" + st = _path_stat(path) + return {'mtime': st.st_mtime, 'size': st.st_size} + + def _cache_bytecode(self, source_path, bytecode_path, data): + # Adapt between the two APIs + mode = _calc_mode(source_path) + return self.set_data(bytecode_path, data, _mode=mode) + + def set_data(self, path, data, *, _mode=0o666): + """Write bytes data to a file.""" + parent, filename = _path_split(path) + path_parts = [] + # Figure out what directories are missing. + while parent and not _path_isdir(parent): + parent, part = _path_split(parent) + path_parts.append(part) + # Create needed directories. + for part in reversed(path_parts): + parent = _path_join(parent, part) + try: + _os.mkdir(parent) + except FileExistsError: + # Probably another Python process already created the dir. + continue + except OSError as exc: + # Could be a permission error, read-only filesystem: just forget + # about writing the data. + _bootstrap._verbose_message('could not create {!r}: {!r}', + parent, exc) + return + try: + _write_atomic(path, data, _mode) + _bootstrap._verbose_message('created {!r}', path) + except OSError as exc: + # Same as above: just don't write the bytecode. + _bootstrap._verbose_message('could not create {!r}: {!r}', path, + exc) + + +class SourcelessFileLoader(FileLoader, _LoaderBasics): + + """Loader which handles sourceless file imports.""" + + def get_code(self, fullname): + path = self.get_filename(fullname) + data = self.get_data(path) + # Call _classify_pyc to do basic validation of the pyc but ignore the + # result. There's no source to check against. + exc_details = { + 'name': fullname, + 'path': path, + } + _classify_pyc(data, fullname, exc_details) + return _compile_bytecode( + memoryview(data)[16:], + name=fullname, + bytecode_path=path, + ) + + def get_source(self, fullname): + """Return None as there is no source code.""" + return None + + +class ExtensionFileLoader(FileLoader, _LoaderBasics): + + """Loader for extension modules. + + The constructor is designed to work with FileFinder. + + """ + + def __init__(self, name, path): + self.name = name + self.path = path + + def __eq__(self, other): + return (self.__class__ == other.__class__ and + self.__dict__ == other.__dict__) + + def __hash__(self): + return hash(self.name) ^ hash(self.path) + + def create_module(self, spec): + """Create an uninitialized extension module""" + module = _bootstrap._call_with_frames_removed( + _imp.create_dynamic, spec) + _bootstrap._verbose_message('extension module {!r} loaded from {!r}', + spec.name, self.path) + return module + + def exec_module(self, module): + """Initialize an extension module""" + _bootstrap._call_with_frames_removed(_imp.exec_dynamic, module) + _bootstrap._verbose_message('extension module {!r} executed from {!r}', + self.name, self.path) + + def is_package(self, fullname): + """Return True if the extension module is a package.""" + file_name = _path_split(self.path)[1] + return any(file_name == '__init__' + suffix + for suffix in EXTENSION_SUFFIXES) + + def get_code(self, fullname): + """Return None as an extension module cannot create a code object.""" + return None + + def get_source(self, fullname): + """Return None as extension modules have no source code.""" + return None + + @_check_name + def get_filename(self, fullname): + """Return the path to the source file as found by the finder.""" + return self.path + + +class _NamespacePath: + """Represents a namespace package's path. It uses the module name + to find its parent module, and from there it looks up the parent's + __path__. When this changes, the module's own path is recomputed, + using path_finder. For top-level modules, the parent module's path + is sys.path.""" + + # When invalidate_caches() is called, this epoch is incremented + # https://bugs.python.org/issue45703 + _epoch = 0 + + def __init__(self, name, path, path_finder): + self._name = name + self._path = path + self._last_parent_path = tuple(self._get_parent_path()) + self._last_epoch = self._epoch + self._path_finder = path_finder + + def _find_parent_path_names(self): + """Returns a tuple of (parent-module-name, parent-path-attr-name)""" + parent, dot, me = self._name.rpartition('.') + if dot == '': + # This is a top-level module. sys.path contains the parent path. + return 'sys', 'path' + # Not a top-level module. parent-module.__path__ contains the + # parent path. + return parent, '__path__' + + def _get_parent_path(self): + parent_module_name, path_attr_name = self._find_parent_path_names() + return getattr(sys.modules[parent_module_name], path_attr_name) + + def _recalculate(self): + # If the parent's path has changed, recalculate _path + parent_path = tuple(self._get_parent_path()) # Make a copy + if parent_path != self._last_parent_path or self._epoch != self._last_epoch: + spec = self._path_finder(self._name, parent_path) + # Note that no changes are made if a loader is returned, but we + # do remember the new parent path + if spec is not None and spec.loader is None: + if spec.submodule_search_locations: + self._path = spec.submodule_search_locations + self._last_parent_path = parent_path # Save the copy + self._last_epoch = self._epoch + return self._path + + def __iter__(self): + return iter(self._recalculate()) + + def __getitem__(self, index): + return self._recalculate()[index] + + def __setitem__(self, index, path): + self._path[index] = path + + def __len__(self): + return len(self._recalculate()) + + def __repr__(self): + return f'_NamespacePath({self._path!r})' + + def __contains__(self, item): + return item in self._recalculate() + + def append(self, item): + self._path.append(item) + + +# This class is actually exposed publicly in a namespace package's __loader__ +# attribute, so it should be available through a non-private name. +# https://github.com/python/cpython/issues/92054 +class NamespaceLoader: + def __init__(self, name, path, path_finder): + self._path = _NamespacePath(name, path, path_finder) + + def is_package(self, fullname): + return True + + def get_source(self, fullname): + return '' + + def get_code(self, fullname): + return compile('', '', 'exec', dont_inherit=True) + + def create_module(self, spec): + """Use default semantics for module creation.""" + + def exec_module(self, module): + pass + + def load_module(self, fullname): + """Load a namespace module. + + This method is deprecated. Use exec_module() instead. + + """ + # The import system never calls this method. + _bootstrap._verbose_message('namespace module loaded with path {!r}', + self._path) + # Warning implemented in _load_module_shim(). + return _bootstrap._load_module_shim(self, fullname) + + def get_resource_reader(self, module): + from importlib.readers import NamespaceReader + return NamespaceReader(self._path) + + +# We use this exclusively in module_from_spec() for backward-compatibility. +_NamespaceLoader = NamespaceLoader + + +# Finders ##################################################################### + +class PathFinder: + + """Meta path finder for sys.path and package __path__ attributes.""" + + @staticmethod + def invalidate_caches(): + """Call the invalidate_caches() method on all path entry finders + stored in sys.path_importer_cache (where implemented).""" + for name, finder in list(sys.path_importer_cache.items()): + # Drop entry if finder name is a relative path. The current + # working directory may have changed. + if finder is None or not _path_isabs(name): + del sys.path_importer_cache[name] + elif hasattr(finder, 'invalidate_caches'): + finder.invalidate_caches() + # Also invalidate the caches of _NamespacePaths + # https://bugs.python.org/issue45703 + _NamespacePath._epoch += 1 + + from importlib.metadata import MetadataPathFinder + MetadataPathFinder.invalidate_caches() + + @staticmethod + def _path_hooks(path): + """Search sys.path_hooks for a finder for 'path'.""" + if sys.path_hooks is not None and not sys.path_hooks: + _warnings.warn('sys.path_hooks is empty', ImportWarning) + for hook in sys.path_hooks: + try: + return hook(path) + except ImportError: + continue + else: + return None + + @classmethod + def _path_importer_cache(cls, path): + """Get the finder for the path entry from sys.path_importer_cache. + + If the path entry is not in the cache, find the appropriate finder + and cache it. If no finder is available, store None. + + """ + if path == '': + try: + path = _os.getcwd() + except (FileNotFoundError, PermissionError): + # Don't cache the failure as the cwd can easily change to + # a valid directory later on. + return None + try: + finder = sys.path_importer_cache[path] + except KeyError: + finder = cls._path_hooks(path) + sys.path_importer_cache[path] = finder + return finder + + @classmethod + def _get_spec(cls, fullname, path, target=None): + """Find the loader or namespace_path for this module/package name.""" + # If this ends up being a namespace package, namespace_path is + # the list of paths that will become its __path__ + namespace_path = [] + for entry in path: + if not isinstance(entry, str): + continue + finder = cls._path_importer_cache(entry) + if finder is not None: + spec = finder.find_spec(fullname, target) + if spec is None: + continue + if spec.loader is not None: + return spec + portions = spec.submodule_search_locations + if portions is None: + raise ImportError('spec missing loader') + # This is possibly part of a namespace package. + # Remember these path entries (if any) for when we + # create a namespace package, and continue iterating + # on path. + namespace_path.extend(portions) + else: + spec = _bootstrap.ModuleSpec(fullname, None) + spec.submodule_search_locations = namespace_path + return spec + + @classmethod + def find_spec(cls, fullname, path=None, target=None): + """Try to find a spec for 'fullname' on sys.path or 'path'. + + The search is based on sys.path_hooks and sys.path_importer_cache. + """ + if path is None: + path = sys.path + spec = cls._get_spec(fullname, path, target) + if spec is None: + return None + elif spec.loader is None: + namespace_path = spec.submodule_search_locations + if namespace_path: + # We found at least one namespace path. Return a spec which + # can create the namespace package. + spec.origin = None + spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec) + return spec + else: + return None + else: + return spec + + @staticmethod + def find_distributions(*args, **kwargs): + """ + Find distributions. + + Return an iterable of all Distribution instances capable of + loading the metadata for packages matching ``context.name`` + (or all names if ``None`` indicated) along the paths in the list + of directories ``context.path``. + """ + from importlib.metadata import MetadataPathFinder + return MetadataPathFinder.find_distributions(*args, **kwargs) + + +class FileFinder: + + """File-based finder. + + Interactions with the file system are cached for performance, being + refreshed when the directory the finder is handling has been modified. + + """ + + def __init__(self, path, *loader_details): + """Initialize with the path to search on and a variable number of + 2-tuples containing the loader and the file suffixes the loader + recognizes.""" + loaders = [] + for loader, suffixes in loader_details: + loaders.extend((suffix, loader) for suffix in suffixes) + self._loaders = loaders + # Base (directory) path + if not path or path == '.': + self.path = _os.getcwd() + else: + self.path = _path_abspath(path) + self._path_mtime = -1 + self._path_cache = set() + self._relaxed_path_cache = set() + + def invalidate_caches(self): + """Invalidate the directory mtime.""" + self._path_mtime = -1 + + def _get_spec(self, loader_class, fullname, path, smsl, target): + loader = loader_class(fullname, path) + return spec_from_file_location(fullname, path, loader=loader, + submodule_search_locations=smsl) + + def find_spec(self, fullname, target=None): + """Try to find a spec for the specified module. + + Returns the matching spec, or None if not found. + """ + is_namespace = False + tail_module = fullname.rpartition('.')[2] + try: + mtime = _path_stat(self.path or _os.getcwd()).st_mtime + except OSError: + mtime = -1 + if mtime != self._path_mtime: + self._fill_cache() + self._path_mtime = mtime + # tail_module keeps the original casing, for __file__ and friends + if _relax_case(): + cache = self._relaxed_path_cache + cache_module = tail_module.lower() + else: + cache = self._path_cache + cache_module = tail_module + # Check if the module is the name of a directory (and thus a package). + if cache_module in cache: + base_path = _path_join(self.path, tail_module) + for suffix, loader_class in self._loaders: + init_filename = '__init__' + suffix + full_path = _path_join(base_path, init_filename) + if _path_isfile(full_path): + return self._get_spec(loader_class, fullname, full_path, [base_path], target) + else: + # If a namespace package, return the path if we don't + # find a module in the next section. + is_namespace = _path_isdir(base_path) + # Check for a file w/ a proper suffix exists. + for suffix, loader_class in self._loaders: + try: + full_path = _path_join(self.path, tail_module + suffix) + except ValueError: + return None + _bootstrap._verbose_message('trying {}', full_path, verbosity=2) + if cache_module + suffix in cache: + if _path_isfile(full_path): + return self._get_spec(loader_class, fullname, full_path, + None, target) + if is_namespace: + _bootstrap._verbose_message('possible namespace for {}', base_path) + spec = _bootstrap.ModuleSpec(fullname, None) + spec.submodule_search_locations = [base_path] + return spec + return None + + def _fill_cache(self): + """Fill the cache of potential modules and packages for this directory.""" + path = self.path + try: + contents = _os.listdir(path or _os.getcwd()) + except (FileNotFoundError, PermissionError, NotADirectoryError): + # Directory has either been removed, turned into a file, or made + # unreadable. + contents = [] + # We store two cached versions, to handle runtime changes of the + # PYTHONCASEOK environment variable. + if not sys.platform.startswith('win'): + self._path_cache = set(contents) + else: + # Windows users can import modules with case-insensitive file + # suffixes (for legacy reasons). Make the suffix lowercase here + # so it's done once instead of for every import. This is safe as + # the specified suffixes to check against are always specified in a + # case-sensitive manner. + lower_suffix_contents = set() + for item in contents: + name, dot, suffix = item.partition('.') + if dot: + new_name = f'{name}.{suffix.lower()}' + else: + new_name = name + lower_suffix_contents.add(new_name) + self._path_cache = lower_suffix_contents + if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS): + self._relaxed_path_cache = {fn.lower() for fn in contents} + + @classmethod + def path_hook(cls, *loader_details): + """A class method which returns a closure to use on sys.path_hook + which will return an instance using the specified loaders and the path + called on the closure. + + If the path called on the closure is not a directory, ImportError is + raised. + + """ + def path_hook_for_FileFinder(path): + """Path hook for importlib.machinery.FileFinder.""" + if not _path_isdir(path): + raise ImportError('only directories are supported', path=path) + return cls(path, *loader_details) + + return path_hook_for_FileFinder + + def __repr__(self): + return f'FileFinder({self.path!r})' + + +class AppleFrameworkLoader(ExtensionFileLoader): + """A loader for modules that have been packaged as frameworks for + compatibility with Apple's iOS App Store policies. + """ + def create_module(self, spec): + # If the ModuleSpec has been created by the FileFinder, it will have + # been created with an origin pointing to the .fwork file. We need to + # redirect this to the location in the Frameworks folder, using the + # content of the .fwork file. + if spec.origin.endswith(".fwork"): + with _io.FileIO(spec.origin, 'r') as file: + framework_binary = file.read().decode().strip() + bundle_path = _path_split(sys.executable)[0] + spec.origin = _path_join(bundle_path, framework_binary) + + # If the loader is created based on the spec for a loaded module, the + # path will be pointing at the Framework location. If this occurs, + # get the original .fwork location to use as the module's __file__. + if self.path.endswith(".fwork"): + path = self.path + else: + with _io.FileIO(self.path + ".origin", 'r') as file: + origin = file.read().decode().strip() + bundle_path = _path_split(sys.executable)[0] + path = _path_join(bundle_path, origin) + + module = _bootstrap._call_with_frames_removed(_imp.create_dynamic, spec) + + _bootstrap._verbose_message( + "Apple framework extension module {!r} loaded from {!r} (path {!r})", + spec.name, + spec.origin, + path, + ) + + # Ensure that the __file__ points at the .fwork location + try: + module.__file__ = path + except AttributeError: + # Not important enough to report. + # (The error is also ignored in _bootstrap._init_module_attrs or + # import_run_extension in import.c) + pass + + return module + +# Import setup ############################################################### + +def _fix_up_module(ns, name, pathname, cpathname=None): + # This function is used by PyImport_ExecCodeModuleObject(). + loader = ns.get('__loader__') + spec = ns.get('__spec__') + if not loader: + if spec: + loader = spec.loader + elif pathname == cpathname: + loader = SourcelessFileLoader(name, pathname) + else: + loader = SourceFileLoader(name, pathname) + if not spec: + spec = spec_from_file_location(name, pathname, loader=loader) + if cpathname: + spec.cached = _path_abspath(cpathname) + try: + ns['__spec__'] = spec + ns['__loader__'] = loader + ns['__file__'] = pathname + ns['__cached__'] = cpathname + except Exception: + # Not important enough to report. + pass + + +def _get_supported_file_loaders(): + """Returns a list of file-based module loaders. + + Each item is a tuple (loader, suffixes). + """ + extension_loaders = [] + if hasattr(_imp, 'create_dynamic'): + if sys.platform in {"ios", "tvos", "watchos"}: + extension_loaders = [(AppleFrameworkLoader, [ + suffix.replace(".so", ".fwork") + for suffix in _imp.extension_suffixes() + ])] + extension_loaders.append((ExtensionFileLoader, _imp.extension_suffixes())) + source = SourceFileLoader, SOURCE_SUFFIXES + bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES + return extension_loaders + [source, bytecode] + + +def _set_bootstrap_module(_bootstrap_module): + global _bootstrap + _bootstrap = _bootstrap_module + + +def _install(_bootstrap_module): + """Install the path-based import components.""" + _set_bootstrap_module(_bootstrap_module) + supported_loaders = _get_supported_file_loaders() + sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)]) + sys.meta_path.append(PathFinder) diff --git a/Python314_4_x64_Template/Lib/importlib/abc.py b/Python314_4_x64_Template/Lib/importlib/abc.py new file mode 100644 index 00000000..1e47495f --- /dev/null +++ b/Python314_4_x64_Template/Lib/importlib/abc.py @@ -0,0 +1,234 @@ +"""Abstract base classes related to import.""" +from . import _bootstrap_external +from . import machinery +try: + import _frozen_importlib +except ImportError as exc: + if exc.name != '_frozen_importlib': + raise + _frozen_importlib = None +try: + import _frozen_importlib_external +except ImportError: + _frozen_importlib_external = _bootstrap_external +from ._abc import Loader +import abc + + +__all__ = [ + 'Loader', 'MetaPathFinder', 'PathEntryFinder', + 'ResourceLoader', 'InspectLoader', 'ExecutionLoader', + 'FileLoader', 'SourceLoader', +] + + +def _register(abstract_cls, *classes): + for cls in classes: + abstract_cls.register(cls) + if _frozen_importlib is not None: + try: + frozen_cls = getattr(_frozen_importlib, cls.__name__) + except AttributeError: + frozen_cls = getattr(_frozen_importlib_external, cls.__name__) + abstract_cls.register(frozen_cls) + + +class MetaPathFinder(metaclass=abc.ABCMeta): + + """Abstract base class for import finders on sys.meta_path.""" + + # We don't define find_spec() here since that would break + # hasattr checks we do to support backward compatibility. + + def invalidate_caches(self): + """An optional method for clearing the finder's cache, if any. + This method is used by importlib.invalidate_caches(). + """ + +_register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter, + machinery.PathFinder, machinery.WindowsRegistryFinder) + + +class PathEntryFinder(metaclass=abc.ABCMeta): + + """Abstract base class for path entry finders used by PathFinder.""" + + def invalidate_caches(self): + """An optional method for clearing the finder's cache, if any. + This method is used by PathFinder.invalidate_caches(). + """ + +_register(PathEntryFinder, machinery.FileFinder) + + +class ResourceLoader(Loader): + + """Abstract base class for loaders which can return data from their + back-end storage to facilitate reading data to perform an import. + + This ABC represents one of the optional protocols specified by PEP 302. + + For directly loading resources, use TraversableResources instead. This class + primarily exists for backwards compatibility with other ABCs in this module. + + """ + + @abc.abstractmethod + def get_data(self, path): + """Abstract method which when implemented should return the bytes for + the specified path. The path must be a str.""" + raise OSError + + +class InspectLoader(Loader): + + """Abstract base class for loaders which support inspection about the + modules they can load. + + This ABC represents one of the optional protocols specified by PEP 302. + + """ + + def is_package(self, fullname): + """Optional method which when implemented should return whether the + module is a package. The fullname is a str. Returns a bool. + + Raises ImportError if the module cannot be found. + """ + raise ImportError + + def get_code(self, fullname): + """Method which returns the code object for the module. + + The fullname is a str. Returns a types.CodeType if possible, else + returns None if a code object does not make sense + (e.g. built-in module). Raises ImportError if the module cannot be + found. + """ + source = self.get_source(fullname) + if source is None: + return None + return self.source_to_code(source) + + @abc.abstractmethod + def get_source(self, fullname): + """Abstract method which should return the source code for the + module. The fullname is a str. Returns a str. + + Raises ImportError if the module cannot be found. + """ + raise ImportError + + @staticmethod + def source_to_code(data, path=''): + """Compile 'data' into a code object. + + The 'data' argument can be anything that compile() can handle. The'path' + argument should be where the data was retrieved (when applicable).""" + return compile(data, path, 'exec', dont_inherit=True) + + exec_module = _bootstrap_external._LoaderBasics.exec_module + load_module = _bootstrap_external._LoaderBasics.load_module + +_register(InspectLoader, machinery.BuiltinImporter, machinery.FrozenImporter, machinery.NamespaceLoader) + + +class ExecutionLoader(InspectLoader): + + """Abstract base class for loaders that wish to support the execution of + modules as scripts. + + This ABC represents one of the optional protocols specified in PEP 302. + + """ + + @abc.abstractmethod + def get_filename(self, fullname): + """Abstract method which should return the value that __file__ is to be + set to. + + Raises ImportError if the module cannot be found. + """ + raise ImportError + + def get_code(self, fullname): + """Method to return the code object for fullname. + + Should return None if not applicable (e.g. built-in module). + Raise ImportError if the module cannot be found. + """ + source = self.get_source(fullname) + if source is None: + return None + try: + path = self.get_filename(fullname) + except ImportError: + return self.source_to_code(source) + else: + return self.source_to_code(source, path) + +_register( + ExecutionLoader, + machinery.ExtensionFileLoader, + machinery.AppleFrameworkLoader, +) + + +class FileLoader(_bootstrap_external.FileLoader, ResourceLoader, ExecutionLoader): + + """Abstract base class partially implementing the ResourceLoader and + ExecutionLoader ABCs.""" + +_register(FileLoader, machinery.SourceFileLoader, + machinery.SourcelessFileLoader) + + +class SourceLoader(_bootstrap_external.SourceLoader, ResourceLoader, ExecutionLoader): + + """Abstract base class for loading source code (and optionally any + corresponding bytecode). + + To support loading from source code, the abstractmethods inherited from + ResourceLoader and ExecutionLoader need to be implemented. To also support + loading from bytecode, the optional methods specified directly by this ABC + is required. + + Inherited abstractmethods not implemented in this ABC: + + * ResourceLoader.get_data + * ExecutionLoader.get_filename + + """ + + def path_mtime(self, path): + """Return the (int) modification time for the path (str).""" + import warnings + warnings.warn('SourceLoader.path_mtime is deprecated in favour of ' + 'SourceLoader.path_stats().', + DeprecationWarning, stacklevel=2) + if self.path_stats.__func__ is SourceLoader.path_stats: + raise OSError + return int(self.path_stats(path)['mtime']) + + def path_stats(self, path): + """Return a metadata dict for the source pointed to by the path (str). + Possible keys: + - 'mtime' (mandatory) is the numeric timestamp of last source + code modification; + - 'size' (optional) is the size in bytes of the source code. + """ + if self.path_mtime.__func__ is SourceLoader.path_mtime: + raise OSError + return {'mtime': self.path_mtime(path)} + + def set_data(self, path, data): + """Write the bytes to the path (if possible). + + Accepts a str path and data as bytes. + + Any needed intermediary directories are to be created. If for some + reason the file cannot be written because of permissions, fail + silently. + """ + +_register(SourceLoader, machinery.SourceFileLoader) diff --git a/Python314_4_x64_Template/Lib/importlib/machinery.py b/Python314_4_x64_Template/Lib/importlib/machinery.py new file mode 100644 index 00000000..63d72644 --- /dev/null +++ b/Python314_4_x64_Template/Lib/importlib/machinery.py @@ -0,0 +1,50 @@ +"""The machinery of importlib: finders, loaders, hooks, etc.""" + +from ._bootstrap import ModuleSpec +from ._bootstrap import BuiltinImporter +from ._bootstrap import FrozenImporter +from ._bootstrap_external import ( + SOURCE_SUFFIXES, BYTECODE_SUFFIXES, EXTENSION_SUFFIXES, + DEBUG_BYTECODE_SUFFIXES as _DEBUG_BYTECODE_SUFFIXES, + OPTIMIZED_BYTECODE_SUFFIXES as _OPTIMIZED_BYTECODE_SUFFIXES +) +from ._bootstrap_external import WindowsRegistryFinder +from ._bootstrap_external import PathFinder +from ._bootstrap_external import FileFinder +from ._bootstrap_external import SourceFileLoader +from ._bootstrap_external import SourcelessFileLoader +from ._bootstrap_external import ExtensionFileLoader +from ._bootstrap_external import AppleFrameworkLoader +from ._bootstrap_external import NamespaceLoader + + +def all_suffixes(): + """Returns a list of all recognized module suffixes for this process""" + return SOURCE_SUFFIXES + BYTECODE_SUFFIXES + EXTENSION_SUFFIXES + + +__all__ = ['AppleFrameworkLoader', 'BYTECODE_SUFFIXES', 'BuiltinImporter', + 'DEBUG_BYTECODE_SUFFIXES', 'EXTENSION_SUFFIXES', + 'ExtensionFileLoader', 'FileFinder', 'FrozenImporter', 'ModuleSpec', + 'NamespaceLoader', 'OPTIMIZED_BYTECODE_SUFFIXES', 'PathFinder', + 'SOURCE_SUFFIXES', 'SourceFileLoader', 'SourcelessFileLoader', + 'WindowsRegistryFinder', 'all_suffixes'] + + +def __getattr__(name): + import warnings + + if name == 'DEBUG_BYTECODE_SUFFIXES': + warnings.warn('importlib.machinery.DEBUG_BYTECODE_SUFFIXES is ' + 'deprecated; use importlib.machinery.BYTECODE_SUFFIXES ' + 'instead.', + DeprecationWarning, stacklevel=2) + return _DEBUG_BYTECODE_SUFFIXES + elif name == 'OPTIMIZED_BYTECODE_SUFFIXES': + warnings.warn('importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES is ' + 'deprecated; use importlib.machinery.BYTECODE_SUFFIXES ' + 'instead.', + DeprecationWarning, stacklevel=2) + return _OPTIMIZED_BYTECODE_SUFFIXES + + raise AttributeError(f'module {__name__!r} has no attribute {name!r}') diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/__init__.py b/Python314_4_x64_Template/Lib/importlib/metadata/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/metadata/__init__.py rename to Python314_4_x64_Template/Lib/importlib/metadata/__init__.py diff --git a/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..eafb0707 Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_adapters.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_adapters.cpython-314.pyc new file mode 100644 index 00000000..86a33230 Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_adapters.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_collections.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_collections.cpython-314.pyc new file mode 100644 index 00000000..ac44c18e Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_collections.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_functools.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_functools.cpython-314.pyc new file mode 100644 index 00000000..b489c3b8 Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_functools.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_itertools.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_itertools.cpython-314.pyc new file mode 100644 index 00000000..b94cd1a9 Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_itertools.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_meta.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_meta.cpython-314.pyc new file mode 100644 index 00000000..73385174 Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_meta.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_text.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_text.cpython-314.pyc new file mode 100644 index 00000000..e9b02073 Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/metadata/__pycache__/_text.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/_adapters.py b/Python314_4_x64_Template/Lib/importlib/metadata/_adapters.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/metadata/_adapters.py rename to Python314_4_x64_Template/Lib/importlib/metadata/_adapters.py diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/_collections.py b/Python314_4_x64_Template/Lib/importlib/metadata/_collections.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/metadata/_collections.py rename to Python314_4_x64_Template/Lib/importlib/metadata/_collections.py diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/_functools.py b/Python314_4_x64_Template/Lib/importlib/metadata/_functools.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/metadata/_functools.py rename to Python314_4_x64_Template/Lib/importlib/metadata/_functools.py diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/_itertools.py b/Python314_4_x64_Template/Lib/importlib/metadata/_itertools.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/metadata/_itertools.py rename to Python314_4_x64_Template/Lib/importlib/metadata/_itertools.py diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/_meta.py b/Python314_4_x64_Template/Lib/importlib/metadata/_meta.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/metadata/_meta.py rename to Python314_4_x64_Template/Lib/importlib/metadata/_meta.py diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/_text.py b/Python314_4_x64_Template/Lib/importlib/metadata/_text.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/metadata/_text.py rename to Python314_4_x64_Template/Lib/importlib/metadata/_text.py diff --git a/Python313_13_x64_Template/Lib/importlib/metadata/diagnose.py b/Python314_4_x64_Template/Lib/importlib/metadata/diagnose.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/metadata/diagnose.py rename to Python314_4_x64_Template/Lib/importlib/metadata/diagnose.py diff --git a/Python313_13_x64_Template/Lib/importlib/readers.py b/Python314_4_x64_Template/Lib/importlib/readers.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/readers.py rename to Python314_4_x64_Template/Lib/importlib/readers.py diff --git a/Python313_13_x64_Template/Lib/importlib/resources/__init__.py b/Python314_4_x64_Template/Lib/importlib/resources/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/resources/__init__.py rename to Python314_4_x64_Template/Lib/importlib/resources/__init__.py diff --git a/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..69ce4c3b Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/_adapters.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/_adapters.cpython-314.pyc new file mode 100644 index 00000000..17e3c272 Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/_adapters.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/_common.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/_common.cpython-314.pyc new file mode 100644 index 00000000..2090b05c Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/_common.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/_functional.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/_functional.cpython-314.pyc new file mode 100644 index 00000000..ef39ad2d Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/_functional.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/_itertools.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/_itertools.cpython-314.pyc new file mode 100644 index 00000000..ad4bbf6d Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/_itertools.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/abc.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/abc.cpython-314.pyc new file mode 100644 index 00000000..1627c2bc Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/abc.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/readers.cpython-314.pyc b/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/readers.cpython-314.pyc new file mode 100644 index 00000000..e294fe47 Binary files /dev/null and b/Python314_4_x64_Template/Lib/importlib/resources/__pycache__/readers.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/importlib/resources/_adapters.py b/Python314_4_x64_Template/Lib/importlib/resources/_adapters.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/resources/_adapters.py rename to Python314_4_x64_Template/Lib/importlib/resources/_adapters.py diff --git a/Python314_4_x64_Template/Lib/importlib/resources/_common.py b/Python314_4_x64_Template/Lib/importlib/resources/_common.py new file mode 100644 index 00000000..4e9014c4 --- /dev/null +++ b/Python314_4_x64_Template/Lib/importlib/resources/_common.py @@ -0,0 +1,211 @@ +import os +import pathlib +import tempfile +import functools +import contextlib +import types +import importlib +import inspect +import warnings +import itertools + +from typing import Union, Optional, cast +from .abc import ResourceReader, Traversable + +Package = Union[types.ModuleType, str] +Anchor = Package + + +def package_to_anchor(func): + """ + Replace 'package' parameter as 'anchor' and warn about the change. + + Other errors should fall through. + + >>> files('a', 'b') + Traceback (most recent call last): + TypeError: files() takes from 0 to 1 positional arguments but 2 were given + + Remove this compatibility in Python 3.14. + """ + undefined = object() + + @functools.wraps(func) + def wrapper(anchor=undefined, package=undefined): + if package is not undefined: + if anchor is not undefined: + return func(anchor, package) + warnings.warn( + "First parameter to files is renamed to 'anchor'", + DeprecationWarning, + stacklevel=2, + ) + return func(package) + elif anchor is undefined: + return func() + return func(anchor) + + return wrapper + + +@package_to_anchor +def files(anchor: Optional[Anchor] = None) -> Traversable: + """ + Get a Traversable resource for an anchor. + """ + return from_package(resolve(anchor)) + + +def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]: + """ + Return the package's loader if it's a ResourceReader. + """ + # We can't use + # a issubclass() check here because apparently abc.'s __subclasscheck__() + # hook wants to create a weak reference to the object, but + # zipimport.zipimporter does not support weak references, resulting in a + # TypeError. That seems terrible. + spec = package.__spec__ + reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore[union-attr] + if reader is None: + return None + return reader(spec.name) # type: ignore[union-attr] + + +@functools.singledispatch +def resolve(cand: Optional[Anchor]) -> types.ModuleType: + return cast(types.ModuleType, cand) + + +@resolve.register +def _(cand: str) -> types.ModuleType: + return importlib.import_module(cand) + + +@resolve.register +def _(cand: None) -> types.ModuleType: + return resolve(_infer_caller().f_globals['__name__']) + + +def _infer_caller(): + """ + Walk the stack and find the frame of the first caller not in this module. + """ + + def is_this_file(frame_info): + return frame_info.filename == stack[0].filename + + def is_wrapper(frame_info): + return frame_info.function == 'wrapper' + + stack = inspect.stack() + not_this_file = itertools.filterfalse(is_this_file, stack) + # also exclude 'wrapper' due to singledispatch in the call stack + callers = itertools.filterfalse(is_wrapper, not_this_file) + return next(callers).frame + + +def from_package(package: types.ModuleType): + """ + Return a Traversable object for the given package. + + """ + # deferred for performance (python/cpython#109829) + from ._adapters import wrap_spec + + spec = wrap_spec(package) + reader = spec.loader.get_resource_reader(spec.name) + return reader.files() + + +@contextlib.contextmanager +def _tempfile( + reader, + suffix='', + # gh-93353: Keep a reference to call os.remove() in late Python + # finalization. + *, + _os_remove=os.remove, +): + # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' + # blocks due to the need to close the temporary file to work on Windows + # properly. + fd, raw_path = tempfile.mkstemp(suffix=suffix) + try: + try: + os.write(fd, reader()) + finally: + os.close(fd) + del reader + yield pathlib.Path(raw_path) + finally: + try: + _os_remove(raw_path) + except FileNotFoundError: + pass + + +def _temp_file(path): + return _tempfile(path.read_bytes, suffix=path.name) + + +def _is_present_dir(path: Traversable) -> bool: + """ + Some Traversables implement ``is_dir()`` to raise an + exception (i.e. ``FileNotFoundError``) when the + directory doesn't exist. This function wraps that call + to always return a boolean and only return True + if there's a dir and it exists. + """ + with contextlib.suppress(FileNotFoundError): + return path.is_dir() + return False + + +@functools.singledispatch +def as_file(path): + """ + Given a Traversable object, return that object as a + path on the local file system in a context manager. + """ + return _temp_dir(path) if _is_present_dir(path) else _temp_file(path) + + +@as_file.register(pathlib.Path) +@contextlib.contextmanager +def _(path): + """ + Degenerate behavior for pathlib.Path objects. + """ + yield path + + +@contextlib.contextmanager +def _temp_path(dir: tempfile.TemporaryDirectory): + """ + Wrap tempfile.TemporaryDirectory to return a pathlib object. + """ + with dir as result: + yield pathlib.Path(result) + + +@contextlib.contextmanager +def _temp_dir(path): + """ + Given a traversable dir, recursively replicate the whole tree + to the file system in a context manager. + """ + assert path.is_dir() + with _temp_path(tempfile.TemporaryDirectory()) as temp_dir: + yield _write_contents(temp_dir, path) + + +def _write_contents(target, source): + child = target.joinpath(source.name) + if source.is_dir(): + child.mkdir() + for item in source.iterdir(): + _write_contents(child, item) + else: + child.write_bytes(source.read_bytes()) + return child diff --git a/Python313_13_x64_Template/Lib/importlib/resources/_functional.py b/Python314_4_x64_Template/Lib/importlib/resources/_functional.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/resources/_functional.py rename to Python314_4_x64_Template/Lib/importlib/resources/_functional.py diff --git a/Python313_13_x64_Template/Lib/importlib/resources/_itertools.py b/Python314_4_x64_Template/Lib/importlib/resources/_itertools.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/resources/_itertools.py rename to Python314_4_x64_Template/Lib/importlib/resources/_itertools.py diff --git a/Python313_13_x64_Template/Lib/importlib/resources/abc.py b/Python314_4_x64_Template/Lib/importlib/resources/abc.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/resources/abc.py rename to Python314_4_x64_Template/Lib/importlib/resources/abc.py diff --git a/Python313_13_x64_Template/Lib/importlib/resources/readers.py b/Python314_4_x64_Template/Lib/importlib/resources/readers.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/resources/readers.py rename to Python314_4_x64_Template/Lib/importlib/resources/readers.py diff --git a/Python313_13_x64_Template/Lib/importlib/resources/simple.py b/Python314_4_x64_Template/Lib/importlib/resources/simple.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/resources/simple.py rename to Python314_4_x64_Template/Lib/importlib/resources/simple.py diff --git a/Python313_13_x64_Template/Lib/importlib/simple.py b/Python314_4_x64_Template/Lib/importlib/simple.py similarity index 100% rename from Python313_13_x64_Template/Lib/importlib/simple.py rename to Python314_4_x64_Template/Lib/importlib/simple.py diff --git a/Python314_4_x64_Template/Lib/importlib/util.py b/Python314_4_x64_Template/Lib/importlib/util.py new file mode 100644 index 00000000..2b564e9b --- /dev/null +++ b/Python314_4_x64_Template/Lib/importlib/util.py @@ -0,0 +1,279 @@ +"""Utility code for constructing importers, etc.""" +from ._abc import Loader +from ._bootstrap import module_from_spec +from ._bootstrap import _resolve_name +from ._bootstrap import spec_from_loader +from ._bootstrap import _find_spec +from ._bootstrap_external import MAGIC_NUMBER +from ._bootstrap_external import cache_from_source +from ._bootstrap_external import decode_source +from ._bootstrap_external import source_from_cache +from ._bootstrap_external import spec_from_file_location + +import _imp +import sys +import types + + +def source_hash(source_bytes): + "Return the hash of *source_bytes* as used in hash-based pyc files." + return _imp.source_hash(_imp.pyc_magic_number_token, source_bytes) + + +def resolve_name(name, package): + """Resolve a relative module name to an absolute one.""" + if not name.startswith('.'): + return name + elif not package: + raise ImportError(f'no package specified for {repr(name)} ' + '(required for relative module names)') + level = 0 + for character in name: + if character != '.': + break + level += 1 + return _resolve_name(name[level:], package, level) + + +def _find_spec_from_path(name, path=None): + """Return the spec for the specified module. + + First, sys.modules is checked to see if the module was already imported. If + so, then sys.modules[name].__spec__ is returned. If that happens to be + set to None, then ValueError is raised. If the module is not in + sys.modules, then sys.meta_path is searched for a suitable spec with the + value of 'path' given to the finders. None is returned if no spec could + be found. + + Dotted names do not have their parent packages implicitly imported. You will + most likely need to explicitly import all parent packages in the proper + order for a submodule to get the correct spec. + + """ + if name not in sys.modules: + return _find_spec(name, path) + else: + module = sys.modules[name] + if module is None: + return None + try: + spec = module.__spec__ + except AttributeError: + raise ValueError(f'{name}.__spec__ is not set') from None + else: + if spec is None: + raise ValueError(f'{name}.__spec__ is None') + return spec + + +def find_spec(name, package=None): + """Return the spec for the specified module. + + First, sys.modules is checked to see if the module was already imported. If + so, then sys.modules[name].__spec__ is returned. If that happens to be + set to None, then ValueError is raised. If the module is not in + sys.modules, then sys.meta_path is searched for a suitable spec with the + value of 'path' given to the finders. None is returned if no spec could + be found. + + If the name is for submodule (contains a dot), the parent module is + automatically imported. + + The name and package arguments work the same as importlib.import_module(). + In other words, relative module names (with leading dots) work. + + """ + fullname = resolve_name(name, package) if name.startswith('.') else name + if fullname not in sys.modules: + parent_name = fullname.rpartition('.')[0] + if parent_name: + parent = __import__(parent_name, fromlist=['__path__']) + try: + parent_path = parent.__path__ + except AttributeError as e: + raise ModuleNotFoundError( + f"__path__ attribute not found on {parent_name!r} " + f"while trying to find {fullname!r}", name=fullname) from e + else: + parent_path = None + return _find_spec(fullname, parent_path) + else: + module = sys.modules[fullname] + if module is None: + return None + try: + spec = module.__spec__ + except AttributeError: + raise ValueError(f'{name}.__spec__ is not set') from None + else: + if spec is None: + raise ValueError(f'{name}.__spec__ is None') + return spec + + +# Normally we would use contextlib.contextmanager. However, this module +# is imported by runpy, which means we want to avoid any unnecessary +# dependencies. Thus we use a class. + +class _incompatible_extension_module_restrictions: + """A context manager that can temporarily skip the compatibility check. + + NOTE: This function is meant to accommodate an unusual case; one + which is likely to eventually go away. There's is a pretty good + chance this is not what you were looking for. + + WARNING: Using this function to disable the check can lead to + unexpected behavior and even crashes. It should only be used during + extension module development. + + If "disable_check" is True then the compatibility check will not + happen while the context manager is active. Otherwise the check + *will* happen. + + Normally, extensions that do not support multiple interpreters + may not be imported in a subinterpreter. That implies modules + that do not implement multi-phase init or that explicitly of out. + + Likewise for modules import in a subinterpreter with its own GIL + when the extension does not support a per-interpreter GIL. This + implies the module does not have a Py_mod_multiple_interpreters slot + set to Py_MOD_PER_INTERPRETER_GIL_SUPPORTED. + + In both cases, this context manager may be used to temporarily + disable the check for compatible extension modules. + + You can get the same effect as this function by implementing the + basic interface of multi-phase init (PEP 489) and lying about + support for multiple interpreters (or per-interpreter GIL). + """ + + def __init__(self, *, disable_check): + self.disable_check = bool(disable_check) + + def __enter__(self): + self.old = _imp._override_multi_interp_extensions_check(self.override) + return self + + def __exit__(self, *args): + old = self.old + del self.old + _imp._override_multi_interp_extensions_check(old) + + @property + def override(self): + return -1 if self.disable_check else 1 + + +class _LazyModule(types.ModuleType): + + """A subclass of the module type which triggers loading upon attribute access.""" + + def __getattribute__(self, attr): + """Trigger the load of the module and return the attribute.""" + __spec__ = object.__getattribute__(self, '__spec__') + loader_state = __spec__.loader_state + with loader_state['lock']: + # Only the first thread to get the lock should trigger the load + # and reset the module's class. The rest can now getattr(). + if object.__getattribute__(self, '__class__') is _LazyModule: + __class__ = loader_state['__class__'] + + # Reentrant calls from the same thread must be allowed to proceed without + # triggering the load again. + # exec_module() and self-referential imports are the primary ways this can + # happen, but in any case we must return something to avoid deadlock. + if loader_state['is_loading']: + return __class__.__getattribute__(self, attr) + loader_state['is_loading'] = True + + __dict__ = __class__.__getattribute__(self, '__dict__') + + # All module metadata must be gathered from __spec__ in order to avoid + # using mutated values. + # Get the original name to make sure no object substitution occurred + # in sys.modules. + original_name = __spec__.name + # Figure out exactly what attributes were mutated between the creation + # of the module and now. + attrs_then = loader_state['__dict__'] + attrs_now = __dict__ + attrs_updated = {} + for key, value in attrs_now.items(): + # Code that set an attribute may have kept a reference to the + # assigned object, making identity more important than equality. + if key not in attrs_then: + attrs_updated[key] = value + elif id(attrs_now[key]) != id(attrs_then[key]): + attrs_updated[key] = value + __spec__.loader.exec_module(self) + # If exec_module() was used directly there is no guarantee the module + # object was put into sys.modules. + if original_name in sys.modules: + if id(self) != id(sys.modules[original_name]): + raise ValueError(f"module object for {original_name!r} " + "substituted in sys.modules during a lazy " + "load") + # Update after loading since that's what would happen in an eager + # loading situation. + __dict__.update(attrs_updated) + # Finally, stop triggering this method, if the module did not + # already update its own __class__. + if isinstance(self, _LazyModule): + object.__setattr__(self, '__class__', __class__) + + return getattr(self, attr) + + def __delattr__(self, attr): + """Trigger the load and then perform the deletion.""" + # To trigger the load and raise an exception if the attribute + # doesn't exist. + self.__getattribute__(attr) + delattr(self, attr) + + +class LazyLoader(Loader): + + """A loader that creates a module which defers loading until attribute access.""" + + @staticmethod + def __check_eager_loader(loader): + if not hasattr(loader, 'exec_module'): + raise TypeError('loader must define exec_module()') + + @classmethod + def factory(cls, loader): + """Construct a callable which returns the eager loader made lazy.""" + cls.__check_eager_loader(loader) + return lambda *args, **kwargs: cls(loader(*args, **kwargs)) + + def __init__(self, loader): + self.__check_eager_loader(loader) + self.loader = loader + + def create_module(self, spec): + return self.loader.create_module(spec) + + def exec_module(self, module): + """Make the module load lazily.""" + # Threading is only needed for lazy loading, and importlib.util can + # be pulled in at interpreter startup, so defer until needed. + import threading + module.__spec__.loader = self.loader + module.__loader__ = self.loader + # Don't need to worry about deep-copying as trying to set an attribute + # on an object would have triggered the load, + # e.g. ``module.__spec__.loader = None`` would trigger a load from + # trying to access module.__spec__. + loader_state = {} + loader_state['__dict__'] = module.__dict__.copy() + loader_state['__class__'] = module.__class__ + loader_state['lock'] = threading.RLock() + loader_state['is_loading'] = False + module.__spec__.loader_state = loader_state + module.__class__ = _LazyModule + + +__all__ = ['LazyLoader', 'Loader', 'MAGIC_NUMBER', + 'cache_from_source', 'decode_source', 'find_spec', + 'module_from_spec', 'resolve_name', 'source_from_cache', + 'source_hash', 'spec_from_file_location', 'spec_from_loader'] diff --git a/Python314_4_x64_Template/Lib/inspect.py b/Python314_4_x64_Template/Lib/inspect.py new file mode 100644 index 00000000..2d229051 --- /dev/null +++ b/Python314_4_x64_Template/Lib/inspect.py @@ -0,0 +1,3409 @@ +"""Get useful information from live Python objects. + +This module encapsulates the interface provided by the internal special +attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion. +It also provides some help for examining source code and class layout. + +Here are some of the useful functions provided by this module: + + ismodule(), isclass(), ismethod(), ispackage(), isfunction(), + isgeneratorfunction(), isgenerator(), istraceback(), isframe(), + iscode(), isbuiltin(), isroutine() - check object types + getmembers() - get members of an object that satisfy a given condition + + getfile(), getsourcefile(), getsource() - find an object's source code + getdoc(), getcomments() - get documentation on an object + getmodule() - determine the module that an object came from + getclasstree() - arrange classes so as to represent their hierarchy + + getargvalues(), getcallargs() - get info about function arguments + getfullargspec() - same, with support for Python 3 features + formatargvalues() - format an argument spec + getouterframes(), getinnerframes() - get info about frames + currentframe() - get the current stack frame + stack(), trace() - get info about frames on the stack or in a traceback + + signature() - get a Signature object for the callable +""" + +# This module is in the public domain. No warranties. + +__author__ = ('Ka-Ping Yee ', + 'Yury Selivanov ') + +__all__ = [ + "AGEN_CLOSED", + "AGEN_CREATED", + "AGEN_RUNNING", + "AGEN_SUSPENDED", + "ArgInfo", + "Arguments", + "Attribute", + "BlockFinder", + "BoundArguments", + "BufferFlags", + "CORO_CLOSED", + "CORO_CREATED", + "CORO_RUNNING", + "CORO_SUSPENDED", + "CO_ASYNC_GENERATOR", + "CO_COROUTINE", + "CO_GENERATOR", + "CO_ITERABLE_COROUTINE", + "CO_NESTED", + "CO_NEWLOCALS", + "CO_NOFREE", + "CO_OPTIMIZED", + "CO_VARARGS", + "CO_VARKEYWORDS", + "CO_HAS_DOCSTRING", + "CO_METHOD", + "ClassFoundException", + "ClosureVars", + "EndOfBlock", + "FrameInfo", + "FullArgSpec", + "GEN_CLOSED", + "GEN_CREATED", + "GEN_RUNNING", + "GEN_SUSPENDED", + "Parameter", + "Signature", + "TPFLAGS_IS_ABSTRACT", + "Traceback", + "classify_class_attrs", + "cleandoc", + "currentframe", + "findsource", + "formatannotation", + "formatannotationrelativeto", + "formatargvalues", + "get_annotations", + "getabsfile", + "getargs", + "getargvalues", + "getasyncgenlocals", + "getasyncgenstate", + "getattr_static", + "getblock", + "getcallargs", + "getclasstree", + "getclosurevars", + "getcomments", + "getcoroutinelocals", + "getcoroutinestate", + "getdoc", + "getfile", + "getframeinfo", + "getfullargspec", + "getgeneratorlocals", + "getgeneratorstate", + "getinnerframes", + "getlineno", + "getmembers", + "getmembers_static", + "getmodule", + "getmodulename", + "getmro", + "getouterframes", + "getsource", + "getsourcefile", + "getsourcelines", + "indentsize", + "isabstract", + "isasyncgen", + "isasyncgenfunction", + "isawaitable", + "isbuiltin", + "isclass", + "iscode", + "iscoroutine", + "iscoroutinefunction", + "isdatadescriptor", + "isframe", + "isfunction", + "isgenerator", + "isgeneratorfunction", + "isgetsetdescriptor", + "ismemberdescriptor", + "ismethod", + "ismethoddescriptor", + "ismethodwrapper", + "ismodule", + "ispackage", + "isroutine", + "istraceback", + "markcoroutinefunction", + "signature", + "stack", + "trace", + "unwrap", + "walktree", +] + + +import abc +from annotationlib import Format, ForwardRef +from annotationlib import get_annotations # re-exported +import ast +import dis +import collections.abc +import enum +import importlib.machinery +import itertools +import linecache +import os +import re +import sys +import tokenize +import token +import types +import functools +import builtins +from keyword import iskeyword +from operator import attrgetter +from collections import namedtuple, OrderedDict +from weakref import ref as make_weakref + +# Create constants for the compiler flags in Include/code.h +# We try to get them from dis to avoid duplication +mod_dict = globals() +for k, v in dis.COMPILER_FLAG_NAMES.items(): + mod_dict["CO_" + v] = k +del k, v, mod_dict + +# See Include/object.h +TPFLAGS_IS_ABSTRACT = 1 << 20 + + +# ----------------------------------------------------------- type-checking +def ismodule(object): + """Return true if the object is a module.""" + return isinstance(object, types.ModuleType) + +def isclass(object): + """Return true if the object is a class.""" + return isinstance(object, type) + +def ismethod(object): + """Return true if the object is an instance method.""" + return isinstance(object, types.MethodType) + +def ispackage(object): + """Return true if the object is a package.""" + return ismodule(object) and hasattr(object, "__path__") + +def ismethoddescriptor(object): + """Return true if the object is a method descriptor. + + But not if ismethod() or isclass() or isfunction() are true. + + This is new in Python 2.2, and, for example, is true of int.__add__. + An object passing this test has a __get__ attribute, but not a + __set__ attribute or a __delete__ attribute. Beyond that, the set + of attributes varies; __name__ is usually sensible, and __doc__ + often is. + + Methods implemented via descriptors that also pass one of the other + tests return false from the ismethoddescriptor() test, simply because + the other tests promise more -- you can, e.g., count on having the + __func__ attribute (etc) when an object passes ismethod().""" + if isclass(object) or ismethod(object) or isfunction(object): + # mutual exclusion + return False + tp = type(object) + return (hasattr(tp, "__get__") + and not hasattr(tp, "__set__") + and not hasattr(tp, "__delete__")) + +def isdatadescriptor(object): + """Return true if the object is a data descriptor. + + Data descriptors have a __set__ or a __delete__ attribute. Examples are + properties (defined in Python) and getsets and members (defined in C). + Typically, data descriptors will also have __name__ and __doc__ attributes + (properties, getsets, and members have both of these attributes), but this + is not guaranteed.""" + if isclass(object) or ismethod(object) or isfunction(object): + # mutual exclusion + return False + tp = type(object) + return hasattr(tp, "__set__") or hasattr(tp, "__delete__") + +if hasattr(types, 'MemberDescriptorType'): + # CPython and equivalent + def ismemberdescriptor(object): + """Return true if the object is a member descriptor. + + Member descriptors are specialized descriptors defined in extension + modules.""" + return isinstance(object, types.MemberDescriptorType) +else: + # Other implementations + def ismemberdescriptor(object): + """Return true if the object is a member descriptor. + + Member descriptors are specialized descriptors defined in extension + modules.""" + return False + +if hasattr(types, 'GetSetDescriptorType'): + # CPython and equivalent + def isgetsetdescriptor(object): + """Return true if the object is a getset descriptor. + + getset descriptors are specialized descriptors defined in extension + modules.""" + return isinstance(object, types.GetSetDescriptorType) +else: + # Other implementations + def isgetsetdescriptor(object): + """Return true if the object is a getset descriptor. + + getset descriptors are specialized descriptors defined in extension + modules.""" + return False + +def isfunction(object): + """Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + __qualname__ qualified name of this function + __module__ name of the module the function was defined in or None + __code__ code object containing compiled function bytecode + __defaults__ tuple of any default values for arguments + __globals__ global namespace in which this function was defined + __annotations__ dict of parameter annotations + __kwdefaults__ dict of keyword only parameters with defaults + __dict__ namespace which is supporting arbitrary function attributes + __closure__ a tuple of cells or None + __type_params__ tuple of type parameters""" + return isinstance(object, types.FunctionType) + +def _has_code_flag(f, flag): + """Return true if ``f`` is a function (or a method or functools.partial + wrapper wrapping a function or a functools.partialmethod wrapping a + function) whose code object has the given ``flag`` + set in its flags.""" + f = functools._unwrap_partialmethod(f) + while ismethod(f): + f = f.__func__ + f = functools._unwrap_partial(f) + if not (isfunction(f) or _signature_is_functionlike(f)): + return False + return bool(f.__code__.co_flags & flag) + +def isgeneratorfunction(obj): + """Return true if the object is a user-defined generator function. + + Generator function objects provide the same attributes as functions. + See help(isfunction) for a list of attributes.""" + return _has_code_flag(obj, CO_GENERATOR) + +# A marker for markcoroutinefunction and iscoroutinefunction. +_is_coroutine_mark = object() + +def _has_coroutine_mark(f): + while ismethod(f): + f = f.__func__ + f = functools._unwrap_partial(f) + return getattr(f, "_is_coroutine_marker", None) is _is_coroutine_mark + +def markcoroutinefunction(func): + """ + Decorator to ensure callable is recognised as a coroutine function. + """ + if hasattr(func, '__func__'): + func = func.__func__ + func._is_coroutine_marker = _is_coroutine_mark + return func + +def iscoroutinefunction(obj): + """Return true if the object is a coroutine function. + + Coroutine functions are normally defined with "async def" syntax, but may + be marked via markcoroutinefunction. + """ + return _has_code_flag(obj, CO_COROUTINE) or _has_coroutine_mark(obj) + +def isasyncgenfunction(obj): + """Return true if the object is an asynchronous generator function. + + Asynchronous generator functions are defined with "async def" + syntax and have "yield" expressions in their body. + """ + return _has_code_flag(obj, CO_ASYNC_GENERATOR) + +def isasyncgen(object): + """Return true if the object is an asynchronous generator.""" + return isinstance(object, types.AsyncGeneratorType) + +def isgenerator(object): + """Return true if the object is a generator. + + Generator objects provide these attributes: + gi_code code object + gi_frame frame object or possibly None once the generator has + been exhausted + gi_running set to 1 when generator is executing, 0 otherwise + gi_suspended set to 1 when the generator is suspended at a yield point, 0 otherwise + gi_yieldfrom object being iterated by yield from or None + + __iter__() defined to support iteration over container + close() raises a new GeneratorExit exception inside the + generator to terminate the iteration + send() resumes the generator and "sends" a value that becomes + the result of the current yield-expression + throw() used to raise an exception inside the generator""" + return isinstance(object, types.GeneratorType) + +def iscoroutine(object): + """Return true if the object is a coroutine.""" + return isinstance(object, types.CoroutineType) + +def isawaitable(object): + """Return true if object can be passed to an ``await`` expression.""" + return (isinstance(object, types.CoroutineType) or + isinstance(object, types.GeneratorType) and + bool(object.gi_code.co_flags & CO_ITERABLE_COROUTINE) or + isinstance(object, collections.abc.Awaitable)) + +def istraceback(object): + """Return true if the object is a traceback. + + Traceback objects provide these attributes: + tb_frame frame object at this level + tb_lasti index of last attempted instruction in bytecode + tb_lineno current line number in Python source code + tb_next next inner traceback object (called by this level)""" + return isinstance(object, types.TracebackType) + +def isframe(object): + """Return true if the object is a frame object. + + Frame objects provide these attributes: + f_back next outer frame object (this frame's caller) + f_builtins built-in namespace seen by this frame + f_code code object being executed in this frame + f_globals global namespace seen by this frame + f_lasti index of last attempted instruction in bytecode + f_lineno current line number in Python source code + f_locals local namespace seen by this frame + f_trace tracing function for this frame, or None + f_trace_lines is a tracing event triggered for each source line? + f_trace_opcodes are per-opcode events being requested? + + clear() used to clear all references to local variables""" + return isinstance(object, types.FrameType) + +def iscode(object): + """Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including *, ** args + or keyword only arguments) + co_code string of raw compiled bytecode + co_cellvars tuple of names of cell variables + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + | 16=nested | 32=generator | 64=nofree | 128=coroutine + | 256=iterable_coroutine | 512=async_generator + | 0x4000000=has_docstring + co_freevars tuple of names of free variables + co_posonlyargcount number of positional only arguments + co_kwonlyargcount number of keyword only arguments (not including ** arg) + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names other than arguments and function locals + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables + co_qualname fully qualified function name + + co_lines() returns an iterator that yields successive bytecode ranges + co_positions() returns an iterator of source code positions for each bytecode instruction + replace() returns a copy of the code object with a new values""" + return isinstance(object, types.CodeType) + +def isbuiltin(object): + """Return true if the object is a built-in function or method. + + Built-in functions and methods provide these attributes: + __doc__ documentation string + __name__ original name of this function or method + __self__ instance to which a method is bound, or None""" + return isinstance(object, types.BuiltinFunctionType) + +def ismethodwrapper(object): + """Return true if the object is a method wrapper.""" + return isinstance(object, types.MethodWrapperType) + +def isroutine(object): + """Return true if the object is any kind of function or method.""" + return (isbuiltin(object) + or isfunction(object) + or ismethod(object) + or ismethoddescriptor(object) + or ismethodwrapper(object) + or isinstance(object, functools._singledispatchmethod_get)) + +def isabstract(object): + """Return true if the object is an abstract base class (ABC).""" + if not isinstance(object, type): + return False + if object.__flags__ & TPFLAGS_IS_ABSTRACT: + return True + if not issubclass(type(object), abc.ABCMeta): + return False + if hasattr(object, '__abstractmethods__'): + # It looks like ABCMeta.__new__ has finished running; + # TPFLAGS_IS_ABSTRACT should have been accurate. + return False + # It looks like ABCMeta.__new__ has not finished running yet; we're + # probably in __init_subclass__. We'll look for abstractmethods manually. + for name, value in object.__dict__.items(): + if getattr(value, "__isabstractmethod__", False): + return True + for base in object.__bases__: + for name in getattr(base, "__abstractmethods__", ()): + value = getattr(object, name, None) + if getattr(value, "__isabstractmethod__", False): + return True + return False + +def _getmembers(object, predicate, getter): + results = [] + processed = set() + names = dir(object) + if isclass(object): + mro = getmro(object) + # add any DynamicClassAttributes to the list of names if object is a class; + # this may result in duplicate entries if, for example, a virtual + # attribute with the same name as a DynamicClassAttribute exists + try: + for base in object.__bases__: + for k, v in base.__dict__.items(): + if isinstance(v, types.DynamicClassAttribute): + names.append(k) + except AttributeError: + pass + else: + mro = () + for key in names: + # First try to get the value via getattr. Some descriptors don't + # like calling their __get__ (see bug #1785), so fall back to + # looking in the __dict__. + try: + value = getter(object, key) + # handle the duplicate key + if key in processed: + raise AttributeError + except AttributeError: + for base in mro: + if key in base.__dict__: + value = base.__dict__[key] + break + else: + # could be a (currently) missing slot member, or a buggy + # __dir__; discard and move on + continue + if not predicate or predicate(value): + results.append((key, value)) + processed.add(key) + results.sort(key=lambda pair: pair[0]) + return results + +def getmembers(object, predicate=None): + """Return all members of an object as (name, value) pairs sorted by name. + Optionally, only return members that satisfy a given predicate.""" + return _getmembers(object, predicate, getattr) + +def getmembers_static(object, predicate=None): + """Return all members of an object as (name, value) pairs sorted by name + without triggering dynamic lookup via the descriptor protocol, + __getattr__ or __getattribute__. Optionally, only return members that + satisfy a given predicate. + + Note: this function may not be able to retrieve all members + that getmembers can fetch (like dynamically created attributes) + and may find members that getmembers can't (like descriptors + that raise AttributeError). It can also return descriptor objects + instead of instance members in some cases. + """ + return _getmembers(object, predicate, getattr_static) + +Attribute = namedtuple('Attribute', 'name kind defining_class object') + +def classify_class_attrs(cls): + """Return list of attribute-descriptor tuples. + + For each name in dir(cls), the return list contains a 4-tuple + with these elements: + + 0. The name (a string). + + 1. The kind of attribute this is, one of these strings: + 'class method' created via classmethod() + 'static method' created via staticmethod() + 'property' created via property() + 'method' any other flavor of method or descriptor + 'data' not a method + + 2. The class which defined this attribute (a class). + + 3. The object as obtained by calling getattr; if this fails, or if the + resulting object does not live anywhere in the class' mro (including + metaclasses) then the object is looked up in the defining class's + dict (found by walking the mro). + + If one of the items in dir(cls) is stored in the metaclass it will now + be discovered and not have None be listed as the class in which it was + defined. Any items whose home class cannot be discovered are skipped. + """ + + mro = getmro(cls) + metamro = getmro(type(cls)) # for attributes stored in the metaclass + metamro = tuple(cls for cls in metamro if cls not in (type, object)) + class_bases = (cls,) + mro + all_bases = class_bases + metamro + names = dir(cls) + # :dd any DynamicClassAttributes to the list of names; + # this may result in duplicate entries if, for example, a virtual + # attribute with the same name as a DynamicClassAttribute exists. + for base in mro: + for k, v in base.__dict__.items(): + if isinstance(v, types.DynamicClassAttribute) and v.fget is not None: + names.append(k) + result = [] + processed = set() + + for name in names: + # Get the object associated with the name, and where it was defined. + # Normal objects will be looked up with both getattr and directly in + # its class' dict (in case getattr fails [bug #1785], and also to look + # for a docstring). + # For DynamicClassAttributes on the second pass we only look in the + # class's dict. + # + # Getting an obj from the __dict__ sometimes reveals more than + # using getattr. Static and class methods are dramatic examples. + homecls = None + get_obj = None + dict_obj = None + if name not in processed: + try: + if name == '__dict__': + raise Exception("__dict__ is special, don't want the proxy") + get_obj = getattr(cls, name) + except Exception: + pass + else: + homecls = getattr(get_obj, "__objclass__", homecls) + if homecls not in class_bases: + # if the resulting object does not live somewhere in the + # mro, drop it and search the mro manually + homecls = None + last_cls = None + # first look in the classes + for srch_cls in class_bases: + srch_obj = getattr(srch_cls, name, None) + if srch_obj is get_obj: + last_cls = srch_cls + # then check the metaclasses + for srch_cls in metamro: + try: + srch_obj = srch_cls.__getattr__(cls, name) + except AttributeError: + continue + if srch_obj is get_obj: + last_cls = srch_cls + if last_cls is not None: + homecls = last_cls + for base in all_bases: + if name in base.__dict__: + dict_obj = base.__dict__[name] + if homecls not in metamro: + homecls = base + break + if homecls is None: + # unable to locate the attribute anywhere, most likely due to + # buggy custom __dir__; discard and move on + continue + obj = get_obj if get_obj is not None else dict_obj + # Classify the object or its descriptor. + if isinstance(dict_obj, (staticmethod, types.BuiltinMethodType)): + kind = "static method" + obj = dict_obj + elif isinstance(dict_obj, (classmethod, types.ClassMethodDescriptorType)): + kind = "class method" + obj = dict_obj + elif isinstance(dict_obj, property): + kind = "property" + obj = dict_obj + elif isroutine(obj): + kind = "method" + else: + kind = "data" + result.append(Attribute(name, kind, homecls, obj)) + processed.add(name) + return result + +# ----------------------------------------------------------- class helpers + +def getmro(cls): + "Return tuple of base classes (including cls) in method resolution order." + return cls.__mro__ + +# -------------------------------------------------------- function helpers + +def unwrap(func, *, stop=None): + """Get the object wrapped by *func*. + + Follows the chain of :attr:`__wrapped__` attributes returning the last + object in the chain. + + *stop* is an optional callback accepting an object in the wrapper chain + as its sole argument that allows the unwrapping to be terminated early if + the callback returns a true value. If the callback never returns a true + value, the last object in the chain is returned as usual. For example, + :func:`signature` uses this to stop unwrapping if any object in the + chain has a ``__signature__`` attribute defined. + + :exc:`ValueError` is raised if a cycle is encountered. + + """ + f = func # remember the original func for error reporting + # Memoise by id to tolerate non-hashable objects, but store objects to + # ensure they aren't destroyed, which would allow their IDs to be reused. + memo = {id(f): f} + recursion_limit = sys.getrecursionlimit() + while not isinstance(func, type) and hasattr(func, '__wrapped__'): + if stop is not None and stop(func): + break + func = func.__wrapped__ + id_func = id(func) + if (id_func in memo) or (len(memo) >= recursion_limit): + raise ValueError('wrapper loop when unwrapping {!r}'.format(f)) + memo[id_func] = func + return func + +# -------------------------------------------------- source code extraction +def indentsize(line): + """Return the indent size, in spaces, at the start of a line of text.""" + expline = line.expandtabs() + return len(expline) - len(expline.lstrip()) + +def _findclass(func): + cls = sys.modules.get(func.__module__) + if cls is None: + return None + for name in func.__qualname__.split('.')[:-1]: + cls = getattr(cls, name) + if not isclass(cls): + return None + return cls + +def _finddoc(obj): + if isclass(obj): + for base in obj.__mro__: + if base is not object: + try: + doc = base.__doc__ + except AttributeError: + continue + if doc is not None: + return doc + return None + + if ismethod(obj): + name = obj.__func__.__name__ + self = obj.__self__ + if (isclass(self) and + getattr(getattr(self, name, None), '__func__') is obj.__func__): + # classmethod + cls = self + else: + cls = self.__class__ + elif isfunction(obj): + name = obj.__name__ + cls = _findclass(obj) + if cls is None or getattr(cls, name) is not obj: + return None + elif isbuiltin(obj): + name = obj.__name__ + self = obj.__self__ + if (isclass(self) and + self.__qualname__ + '.' + name == obj.__qualname__): + # classmethod + cls = self + else: + cls = self.__class__ + # Should be tested before isdatadescriptor(). + elif isinstance(obj, property): + name = obj.__name__ + cls = _findclass(obj.fget) + if cls is None or getattr(cls, name) is not obj: + return None + elif ismethoddescriptor(obj) or isdatadescriptor(obj): + name = obj.__name__ + cls = obj.__objclass__ + if getattr(cls, name) is not obj: + return None + if ismemberdescriptor(obj): + slots = getattr(cls, '__slots__', None) + if isinstance(slots, dict) and name in slots: + return slots[name] + else: + return None + for base in cls.__mro__: + try: + doc = getattr(base, name).__doc__ + except AttributeError: + continue + if doc is not None: + return doc + return None + +def getdoc(object): + """Get the documentation string for an object. + + All tabs are expanded to spaces. To clean up docstrings that are + indented to line up with blocks of code, any whitespace than can be + uniformly removed from the second line onwards is removed.""" + try: + doc = object.__doc__ + except AttributeError: + return None + if doc is None: + try: + doc = _finddoc(object) + except (AttributeError, TypeError): + return None + if not isinstance(doc, str): + return None + return cleandoc(doc) + +def cleandoc(doc): + """Clean up indentation from docstrings. + + Any whitespace that can be uniformly removed from the second line + onwards is removed.""" + lines = doc.expandtabs().split('\n') + + # Find minimum indentation of any non-blank lines after first line. + margin = sys.maxsize + for line in lines[1:]: + content = len(line.lstrip(' ')) + if content: + indent = len(line) - content + margin = min(margin, indent) + # Remove indentation. + if lines: + lines[0] = lines[0].lstrip(' ') + if margin < sys.maxsize: + for i in range(1, len(lines)): + lines[i] = lines[i][margin:] + # Remove any trailing or leading blank lines. + while lines and not lines[-1]: + lines.pop() + while lines and not lines[0]: + lines.pop(0) + return '\n'.join(lines) + + +def getfile(object): + """Work out which source or compiled file an object was defined in.""" + if ismodule(object): + if getattr(object, '__file__', None): + return object.__file__ + raise TypeError('{!r} is a built-in module'.format(object)) + if isclass(object): + if hasattr(object, '__module__'): + module = sys.modules.get(object.__module__) + if getattr(module, '__file__', None): + return module.__file__ + if object.__module__ == '__main__': + raise OSError('source code not available') + raise TypeError('{!r} is a built-in class'.format(object)) + if ismethod(object): + object = object.__func__ + if isfunction(object): + object = object.__code__ + if istraceback(object): + object = object.tb_frame + if isframe(object): + object = object.f_code + if iscode(object): + return object.co_filename + raise TypeError('module, class, method, function, traceback, frame, or ' + 'code object was expected, got {}'.format( + type(object).__name__)) + +def getmodulename(path): + """Return the module name for a given file, or None.""" + fname = os.path.basename(path) + # Check for paths that look like an actual module file + suffixes = [(-len(suffix), suffix) + for suffix in importlib.machinery.all_suffixes()] + suffixes.sort() # try longest suffixes first, in case they overlap + for neglen, suffix in suffixes: + if fname.endswith(suffix): + return fname[:neglen] + return None + +def getsourcefile(object): + """Return the filename that can be used to locate an object's source. + Return None if no way can be identified to get the source. + """ + filename = getfile(object) + all_bytecode_suffixes = importlib.machinery.BYTECODE_SUFFIXES[:] + if any(filename.endswith(s) for s in all_bytecode_suffixes): + filename = (os.path.splitext(filename)[0] + + importlib.machinery.SOURCE_SUFFIXES[0]) + elif any(filename.endswith(s) for s in + importlib.machinery.EXTENSION_SUFFIXES): + return None + elif filename.endswith(".fwork"): + # Apple mobile framework markers are another type of non-source file + return None + + # return a filename found in the linecache even if it doesn't exist on disk + if filename in linecache.cache: + return filename + if os.path.exists(filename): + return filename + # only return a non-existent filename if the module has a PEP 302 loader + module = getmodule(object, filename) + if getattr(module, '__loader__', None) is not None: + return filename + elif getattr(getattr(module, "__spec__", None), "loader", None) is not None: + return filename + +def getabsfile(object, _filename=None): + """Return an absolute path to the source or compiled file for an object. + + The idea is for each object to have a unique origin, so this routine + normalizes the result as much as possible.""" + if _filename is None: + _filename = getsourcefile(object) or getfile(object) + return os.path.normcase(os.path.abspath(_filename)) + +modulesbyfile = {} +_filesbymodname = {} + +def getmodule(object, _filename=None): + """Return the module an object was defined in, or None if not found.""" + if ismodule(object): + return object + if hasattr(object, '__module__'): + return sys.modules.get(object.__module__) + + # Try the filename to modulename cache + if _filename is not None and _filename in modulesbyfile: + return sys.modules.get(modulesbyfile[_filename]) + # Try the cache again with the absolute file name + try: + file = getabsfile(object, _filename) + except (TypeError, FileNotFoundError): + return None + if file in modulesbyfile: + return sys.modules.get(modulesbyfile[file]) + # Update the filename to module name cache and check yet again + # Copy sys.modules in order to cope with changes while iterating + for modname, module in sys.modules.copy().items(): + if ismodule(module) and hasattr(module, '__file__'): + f = module.__file__ + if f == _filesbymodname.get(modname, None): + # Have already mapped this module, so skip it + continue + _filesbymodname[modname] = f + f = getabsfile(module) + # Always map to the name the module knows itself by + modulesbyfile[f] = modulesbyfile[ + os.path.realpath(f)] = module.__name__ + if file in modulesbyfile: + return sys.modules.get(modulesbyfile[file]) + # Check the main module + main = sys.modules['__main__'] + if not hasattr(object, '__name__'): + return None + if hasattr(main, object.__name__): + mainobject = getattr(main, object.__name__) + if mainobject is object: + return main + # Check builtins + builtin = sys.modules['builtins'] + if hasattr(builtin, object.__name__): + builtinobject = getattr(builtin, object.__name__) + if builtinobject is object: + return builtin + + +class ClassFoundException(Exception): + pass + + +def findsource(object): + """Return the entire source file and starting line number for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of all the lines + in the file and the line number indexes a line in that list. An OSError + is raised if the source code cannot be retrieved.""" + + file = getsourcefile(object) + if file: + # Invalidate cache if needed. + linecache.checkcache(file) + else: + file = getfile(object) + # Allow filenames in form of "" to pass through. + # `doctest` monkeypatches `linecache` module to enable + # inspection, so let `linecache.getlines` to be called. + if (not (file.startswith('<') and file.endswith('>'))) or file.endswith('.fwork'): + raise OSError('source code not available') + + module = getmodule(object, file) + if module: + lines = linecache.getlines(file, module.__dict__) + if not lines and file.startswith('<') and hasattr(object, "__code__"): + lines = linecache._getlines_from_code(object.__code__) + else: + lines = linecache.getlines(file) + if not lines: + raise OSError('could not get source code') + + if ismodule(object): + return lines, 0 + + if isclass(object): + try: + lnum = vars(object)['__firstlineno__'] - 1 + except (TypeError, KeyError): + raise OSError('source code not available') + if lnum >= len(lines): + raise OSError('lineno is out of bounds') + return lines, lnum + + if ismethod(object): + object = object.__func__ + if isfunction(object): + object = object.__code__ + if istraceback(object): + object = object.tb_frame + if isframe(object): + object = object.f_code + if iscode(object): + if not hasattr(object, 'co_firstlineno'): + raise OSError('could not find function definition') + lnum = object.co_firstlineno - 1 + if lnum >= len(lines): + raise OSError('lineno is out of bounds') + return lines, lnum + raise OSError('could not find code object') + +def getcomments(object): + """Get lines of comments immediately preceding an object's source code. + + Returns None when source can't be found. + """ + try: + lines, lnum = findsource(object) + except (OSError, TypeError): + return None + + if ismodule(object): + # Look for a comment block at the top of the file. + start = 0 + if lines and lines[0][:2] == '#!': start = 1 + while start < len(lines) and lines[start].strip() in ('', '#'): + start = start + 1 + if start < len(lines) and lines[start][:1] == '#': + comments = [] + end = start + while end < len(lines) and lines[end][:1] == '#': + comments.append(lines[end].expandtabs()) + end = end + 1 + return ''.join(comments) + + # Look for a preceding block of comments at the same indentation. + elif lnum > 0: + indent = indentsize(lines[lnum]) + end = lnum - 1 + if end >= 0 and lines[end].lstrip()[:1] == '#' and \ + indentsize(lines[end]) == indent: + comments = [lines[end].expandtabs().lstrip()] + if end > 0: + end = end - 1 + comment = lines[end].expandtabs().lstrip() + while comment[:1] == '#' and indentsize(lines[end]) == indent: + comments[:0] = [comment] + end = end - 1 + if end < 0: break + comment = lines[end].expandtabs().lstrip() + while comments and comments[0].strip() == '#': + comments[:1] = [] + while comments and comments[-1].strip() == '#': + comments[-1:] = [] + return ''.join(comments) + +class EndOfBlock(Exception): pass + +class BlockFinder: + """Provide a tokeneater() method to detect the end of a code block.""" + def __init__(self): + self.indent = 0 + self.singleline = False + self.started = False + self.passline = False + self.indecorator = False + self.last = 1 + self.body_col0 = None + + def tokeneater(self, type, token, srowcol, erowcol, line): + if not self.started and not self.indecorator: + if type in (tokenize.INDENT, tokenize.COMMENT, tokenize.NL): + pass + elif token == "async": + pass + # skip any decorators + elif token == "@": + self.indecorator = True + else: + # For "def" and "class" scan to the end of the block. + # For "lambda" and generator expression scan to + # the end of the logical line. + self.singleline = token not in ("def", "class") + self.started = True + self.passline = True # skip to the end of the line + elif type == tokenize.NEWLINE: + self.passline = False # stop skipping when a NEWLINE is seen + self.last = srowcol[0] + if self.singleline: + raise EndOfBlock + # hitting a NEWLINE when in a decorator without args + # ends the decorator + if self.indecorator: + self.indecorator = False + elif self.passline: + pass + elif type == tokenize.INDENT: + if self.body_col0 is None and self.started: + self.body_col0 = erowcol[1] + self.indent = self.indent + 1 + self.passline = True + elif type == tokenize.DEDENT: + self.indent = self.indent - 1 + # the end of matching indent/dedent pairs end a block + # (note that this only works for "def"/"class" blocks, + # not e.g. for "if: else:" or "try: finally:" blocks) + if self.indent <= 0: + raise EndOfBlock + elif type == tokenize.COMMENT: + if self.body_col0 is not None and srowcol[1] >= self.body_col0: + # Include comments if indented at least as much as the block + self.last = srowcol[0] + elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): + # any other token on the same indentation level end the previous + # block as well, except the pseudo-tokens COMMENT and NL. + raise EndOfBlock + +def getblock(lines): + """Extract the block of code at the top of the given list of lines.""" + blockfinder = BlockFinder() + try: + tokens = tokenize.generate_tokens(iter(lines).__next__) + for _token in tokens: + blockfinder.tokeneater(*_token) + except (EndOfBlock, IndentationError): + pass + except SyntaxError as e: + if "unmatched" not in e.msg: + raise e from None + _, *_token_info = _token + try: + blockfinder.tokeneater(tokenize.NEWLINE, *_token_info) + except (EndOfBlock, IndentationError): + pass + return lines[:blockfinder.last] + +def getsourcelines(object): + """Return a list of source lines and starting line number for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of the lines + corresponding to the object and the line number indicates where in the + original source file the first line of code was found. An OSError is + raised if the source code cannot be retrieved.""" + object = unwrap(object) + lines, lnum = findsource(object) + + if istraceback(object): + object = object.tb_frame + + # for module or frame that corresponds to module, return all source lines + if (ismodule(object) or + (isframe(object) and object.f_code.co_name == "")): + return lines, 0 + else: + return getblock(lines[lnum:]), lnum + 1 + +def getsource(object): + """Return the text of the source code for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a single string. An + OSError is raised if the source code cannot be retrieved.""" + lines, lnum = getsourcelines(object) + return ''.join(lines) + +# --------------------------------------------------- class tree extraction +def walktree(classes, children, parent): + """Recursive helper function for getclasstree().""" + results = [] + classes.sort(key=attrgetter('__module__', '__name__')) + for c in classes: + results.append((c, c.__bases__)) + if c in children: + results.append(walktree(children[c], children, c)) + return results + +def getclasstree(classes, unique=False): + """Arrange the given list of classes into a hierarchy of nested lists. + + Where a nested list appears, it contains classes derived from the class + whose entry immediately precedes the list. Each entry is a 2-tuple + containing a class and a tuple of its base classes. If the 'unique' + argument is true, exactly one entry appears in the returned structure + for each class in the given list. Otherwise, classes using multiple + inheritance and their descendants will appear multiple times.""" + children = {} + roots = [] + for c in classes: + if c.__bases__: + for parent in c.__bases__: + if parent not in children: + children[parent] = [] + if c not in children[parent]: + children[parent].append(c) + if unique and parent in classes: break + elif c not in roots: + roots.append(c) + for parent in children: + if parent not in classes: + roots.append(parent) + return walktree(roots, children, None) + +# ------------------------------------------------ argument list extraction +Arguments = namedtuple('Arguments', 'args, varargs, varkw') + +def getargs(co): + """Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where + 'args' is the list of argument names. Keyword-only arguments are + appended. 'varargs' and 'varkw' are the names of the * and ** + arguments or None.""" + if not iscode(co): + raise TypeError('{!r} is not a code object'.format(co)) + + names = co.co_varnames + nargs = co.co_argcount + nkwargs = co.co_kwonlyargcount + args = list(names[:nargs]) + kwonlyargs = list(names[nargs:nargs+nkwargs]) + + nargs += nkwargs + varargs = None + if co.co_flags & CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + return Arguments(args + kwonlyargs, varargs, varkw) + + +FullArgSpec = namedtuple('FullArgSpec', + 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations') + +def getfullargspec(func): + """Get the names and default values of a callable object's parameters. + + A tuple of seven things is returned: + (args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations). + 'args' is a list of the parameter names. + 'varargs' and 'varkw' are the names of the * and ** parameters or None. + 'defaults' is an n-tuple of the default values of the last n parameters. + 'kwonlyargs' is a list of keyword-only parameter names. + 'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults. + 'annotations' is a dictionary mapping parameter names to annotations. + + Notable differences from inspect.signature(): + - the "self" parameter is always reported, even for bound methods + - wrapper chains defined by __wrapped__ *not* unwrapped automatically + """ + try: + # Re: `skip_bound_arg=False` + # + # There is a notable difference in behaviour between getfullargspec + # and Signature: the former always returns 'self' parameter for bound + # methods, whereas the Signature always shows the actual calling + # signature of the passed object. + # + # To simulate this behaviour, we "unbind" bound methods, to trick + # inspect.signature to always return their first parameter ("self", + # usually) + + # Re: `follow_wrapper_chains=False` + # + # getfullargspec() historically ignored __wrapped__ attributes, + # so we ensure that remains the case in 3.3+ + + sig = _signature_from_callable(func, + follow_wrapper_chains=False, + skip_bound_arg=False, + sigcls=Signature, + eval_str=False) + except Exception as ex: + # Most of the times 'signature' will raise ValueError. + # But, it can also raise AttributeError, and, maybe something + # else. So to be fully backwards compatible, we catch all + # possible exceptions here, and reraise a TypeError. + raise TypeError('unsupported callable') from ex + + args = [] + varargs = None + varkw = None + posonlyargs = [] + kwonlyargs = [] + annotations = {} + defaults = () + kwdefaults = {} + + if sig.return_annotation is not sig.empty: + annotations['return'] = sig.return_annotation + + for param in sig.parameters.values(): + kind = param.kind + name = param.name + + if kind is _POSITIONAL_ONLY: + posonlyargs.append(name) + if param.default is not param.empty: + defaults += (param.default,) + elif kind is _POSITIONAL_OR_KEYWORD: + args.append(name) + if param.default is not param.empty: + defaults += (param.default,) + elif kind is _VAR_POSITIONAL: + varargs = name + elif kind is _KEYWORD_ONLY: + kwonlyargs.append(name) + if param.default is not param.empty: + kwdefaults[name] = param.default + elif kind is _VAR_KEYWORD: + varkw = name + + if param.annotation is not param.empty: + annotations[name] = param.annotation + + if not kwdefaults: + # compatibility with 'func.__kwdefaults__' + kwdefaults = None + + if not defaults: + # compatibility with 'func.__defaults__' + defaults = None + + return FullArgSpec(posonlyargs + args, varargs, varkw, defaults, + kwonlyargs, kwdefaults, annotations) + + +ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals') + +def getargvalues(frame): + """Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names. + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame.""" + args, varargs, varkw = getargs(frame.f_code) + return ArgInfo(args, varargs, varkw, frame.f_locals) + +def formatannotation(annotation, base_module=None, *, quote_annotation_strings=True): + if not quote_annotation_strings and isinstance(annotation, str): + return annotation + if getattr(annotation, '__module__', None) == 'typing': + def repl(match): + text = match.group() + return text.removeprefix('typing.') + return re.sub(r'[\w\.]+', repl, repr(annotation)) + if isinstance(annotation, types.GenericAlias): + return str(annotation) + if isinstance(annotation, type): + if annotation.__module__ in ('builtins', base_module): + return annotation.__qualname__ + return annotation.__module__+'.'+annotation.__qualname__ + if isinstance(annotation, ForwardRef): + return annotation.__forward_arg__ + return repr(annotation) + +def formatannotationrelativeto(object): + module = getattr(object, '__module__', None) + def _formatannotation(annotation): + return formatannotation(annotation, module) + return _formatannotation + + +def formatargvalues(args, varargs, varkw, locals, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value)): + """Format an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments.""" + def convert(name, locals=locals, + formatarg=formatarg, formatvalue=formatvalue): + return formatarg(name) + formatvalue(locals[name]) + specs = [] + for i in range(len(args)): + specs.append(convert(args[i])) + if varargs: + specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) + if varkw: + specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) + return '(' + ', '.join(specs) + ')' + +def _missing_arguments(f_name, argnames, pos, values): + names = [repr(name) for name in argnames if name not in values] + missing = len(names) + if missing == 1: + s = names[0] + elif missing == 2: + s = "{} and {}".format(*names) + else: + tail = ", {} and {}".format(*names[-2:]) + del names[-2:] + s = ", ".join(names) + tail + raise TypeError("%s() missing %i required %s argument%s: %s" % + (f_name, missing, + "positional" if pos else "keyword-only", + "" if missing == 1 else "s", s)) + +def _too_many(f_name, args, kwonly, varargs, defcount, given, values): + atleast = len(args) - defcount + kwonly_given = len([arg for arg in kwonly if arg in values]) + if varargs: + plural = atleast != 1 + sig = "at least %d" % (atleast,) + elif defcount: + plural = True + sig = "from %d to %d" % (atleast, len(args)) + else: + plural = len(args) != 1 + sig = str(len(args)) + kwonly_sig = "" + if kwonly_given: + msg = " positional argument%s (and %d keyword-only argument%s)" + kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given, + "s" if kwonly_given != 1 else "")) + raise TypeError("%s() takes %s positional argument%s but %d%s %s given" % + (f_name, sig, "s" if plural else "", given, kwonly_sig, + "was" if given == 1 and not kwonly_given else "were")) + +def getcallargs(func, /, *positional, **named): + """Get the mapping of arguments to values. + + A dict is returned, with keys the function argument names (including the + names of the * and ** arguments, if any), and values the respective bound + values from 'positional' and 'named'.""" + spec = getfullargspec(func) + args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec + f_name = func.__name__ + arg2value = {} + + + if ismethod(func) and func.__self__ is not None: + # implicit 'self' (or 'cls' for classmethods) argument + positional = (func.__self__,) + positional + num_pos = len(positional) + num_args = len(args) + num_defaults = len(defaults) if defaults else 0 + + n = min(num_pos, num_args) + for i in range(n): + arg2value[args[i]] = positional[i] + if varargs: + arg2value[varargs] = tuple(positional[n:]) + possible_kwargs = set(args + kwonlyargs) + if varkw: + arg2value[varkw] = {} + for kw, value in named.items(): + if kw not in possible_kwargs: + if not varkw: + raise TypeError("%s() got an unexpected keyword argument %r" % + (f_name, kw)) + arg2value[varkw][kw] = value + continue + if kw in arg2value: + raise TypeError("%s() got multiple values for argument %r" % + (f_name, kw)) + arg2value[kw] = value + if num_pos > num_args and not varargs: + _too_many(f_name, args, kwonlyargs, varargs, num_defaults, + num_pos, arg2value) + if num_pos < num_args: + req = args[:num_args - num_defaults] + for arg in req: + if arg not in arg2value: + _missing_arguments(f_name, req, True, arg2value) + for i, arg in enumerate(args[num_args - num_defaults:]): + if arg not in arg2value: + arg2value[arg] = defaults[i] + missing = 0 + for kwarg in kwonlyargs: + if kwarg not in arg2value: + if kwonlydefaults and kwarg in kwonlydefaults: + arg2value[kwarg] = kwonlydefaults[kwarg] + else: + missing += 1 + if missing: + _missing_arguments(f_name, kwonlyargs, False, arg2value) + return arg2value + +ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound') + +def getclosurevars(func): + """ + Get the mapping of free variables to their current values. + + Returns a named tuple of dicts mapping the current nonlocal, global + and builtin references as seen by the body of the function. A final + set of unbound names that could not be resolved is also provided. + """ + + if ismethod(func): + func = func.__func__ + + if not isfunction(func): + raise TypeError("{!r} is not a Python function".format(func)) + + code = func.__code__ + # Nonlocal references are named in co_freevars and resolved + # by looking them up in __closure__ by positional index + if func.__closure__ is None: + nonlocal_vars = {} + else: + nonlocal_vars = { + var : cell.cell_contents + for var, cell in zip(code.co_freevars, func.__closure__) + } + + # Global and builtin references are named in co_names and resolved + # by looking them up in __globals__ or __builtins__ + global_ns = func.__globals__ + builtin_ns = global_ns.get("__builtins__", builtins.__dict__) + if ismodule(builtin_ns): + builtin_ns = builtin_ns.__dict__ + global_vars = {} + builtin_vars = {} + unbound_names = set() + global_names = set() + for instruction in dis.get_instructions(code): + opname = instruction.opname + name = instruction.argval + if opname == "LOAD_ATTR": + unbound_names.add(name) + elif opname == "LOAD_GLOBAL": + global_names.add(name) + for name in global_names: + try: + global_vars[name] = global_ns[name] + except KeyError: + try: + builtin_vars[name] = builtin_ns[name] + except KeyError: + unbound_names.add(name) + + return ClosureVars(nonlocal_vars, global_vars, + builtin_vars, unbound_names) + +# -------------------------------------------------- stack frame extraction + +_Traceback = namedtuple('_Traceback', 'filename lineno function code_context index') + +class Traceback(_Traceback): + def __new__(cls, filename, lineno, function, code_context, index, *, positions=None): + instance = super().__new__(cls, filename, lineno, function, code_context, index) + instance.positions = positions + return instance + + def __repr__(self): + return ('Traceback(filename={!r}, lineno={!r}, function={!r}, ' + 'code_context={!r}, index={!r}, positions={!r})'.format( + self.filename, self.lineno, self.function, self.code_context, + self.index, self.positions)) + +def _get_code_position_from_tb(tb): + code, instruction_index = tb.tb_frame.f_code, tb.tb_lasti + return _get_code_position(code, instruction_index) + +def _get_code_position(code, instruction_index): + if instruction_index < 0: + return (None, None, None, None) + positions_gen = code.co_positions() + # The nth entry in code.co_positions() corresponds to instruction (2*n)th since Python 3.10+ + return next(itertools.islice(positions_gen, instruction_index // 2, None)) + +def getframeinfo(frame, context=1): + """Get information about a frame or traceback object. + + A tuple of five things is returned: the filename, the line number of + the current line, the function name, a list of lines of context from + the source code, and the index of the current line within that list. + The optional second argument specifies the number of lines of context + to return, which are centered around the current line.""" + if istraceback(frame): + positions = _get_code_position_from_tb(frame) + lineno = frame.tb_lineno + frame = frame.tb_frame + else: + lineno = frame.f_lineno + positions = _get_code_position(frame.f_code, frame.f_lasti) + + if positions[0] is None: + frame, *positions = (frame, lineno, *positions[1:]) + else: + frame, *positions = (frame, *positions) + + lineno = positions[0] + + if not isframe(frame): + raise TypeError('{!r} is not a frame or traceback object'.format(frame)) + + filename = getsourcefile(frame) or getfile(frame) + if context > 0: + start = lineno - 1 - context//2 + try: + lines, lnum = findsource(frame) + except OSError: + lines = index = None + else: + start = max(0, min(start, len(lines) - context)) + lines = lines[start:start+context] + index = lineno - 1 - start + else: + lines = index = None + + return Traceback(filename, lineno, frame.f_code.co_name, lines, + index, positions=dis.Positions(*positions)) + +def getlineno(frame): + """Get the line number from a frame object, allowing for optimization.""" + # FrameType.f_lineno is now a descriptor that grovels co_lnotab + return frame.f_lineno + +_FrameInfo = namedtuple('_FrameInfo', ('frame',) + Traceback._fields) +class FrameInfo(_FrameInfo): + def __new__(cls, frame, filename, lineno, function, code_context, index, *, positions=None): + instance = super().__new__(cls, frame, filename, lineno, function, code_context, index) + instance.positions = positions + return instance + + def __repr__(self): + return ('FrameInfo(frame={!r}, filename={!r}, lineno={!r}, function={!r}, ' + 'code_context={!r}, index={!r}, positions={!r})'.format( + self.frame, self.filename, self.lineno, self.function, + self.code_context, self.index, self.positions)) + +def getouterframes(frame, context=1): + """Get a list of records for a frame and all higher (calling) frames. + + Each record contains a frame object, filename, line number, function + name, a list of lines of context, and index within the context.""" + framelist = [] + while frame: + traceback_info = getframeinfo(frame, context) + frameinfo = (frame,) + traceback_info + framelist.append(FrameInfo(*frameinfo, positions=traceback_info.positions)) + frame = frame.f_back + return framelist + +def getinnerframes(tb, context=1): + """Get a list of records for a traceback's frame and all lower frames. + + Each record contains a frame object, filename, line number, function + name, a list of lines of context, and index within the context.""" + framelist = [] + while tb: + traceback_info = getframeinfo(tb, context) + frameinfo = (tb.tb_frame,) + traceback_info + framelist.append(FrameInfo(*frameinfo, positions=traceback_info.positions)) + tb = tb.tb_next + return framelist + +def currentframe(): + """Return the frame of the caller or None if this is not possible.""" + return sys._getframe(1) if hasattr(sys, "_getframe") else None + +def stack(context=1): + """Return a list of records for the stack above the caller's frame.""" + return getouterframes(sys._getframe(1), context) + +def trace(context=1): + """Return a list of records for the stack below the current exception.""" + exc = sys.exception() + tb = None if exc is None else exc.__traceback__ + return getinnerframes(tb, context) + + +# ------------------------------------------------ static version of getattr + +_sentinel = object() +_static_getmro = type.__dict__['__mro__'].__get__ +_get_dunder_dict_of_class = type.__dict__["__dict__"].__get__ + + +def _check_instance(obj, attr): + instance_dict = {} + try: + instance_dict = object.__getattribute__(obj, "__dict__") + except AttributeError: + pass + return dict.get(instance_dict, attr, _sentinel) + + +def _check_class(klass, attr): + for entry in _static_getmro(klass): + if _shadowed_dict(type(entry)) is _sentinel and attr in entry.__dict__: + return entry.__dict__[attr] + return _sentinel + + +@functools.lru_cache() +def _shadowed_dict_from_weakref_mro_tuple(*weakref_mro): + for weakref_entry in weakref_mro: + # Normally we'd have to check whether the result of weakref_entry() + # is None here, in case the object the weakref is pointing to has died. + # In this specific case, however, we know that the only caller of this + # function is `_shadowed_dict()`, and that therefore this weakref is + # guaranteed to point to an object that is still alive. + entry = weakref_entry() + dunder_dict = _get_dunder_dict_of_class(entry) + if '__dict__' in dunder_dict: + class_dict = dunder_dict['__dict__'] + if not (type(class_dict) is types.GetSetDescriptorType and + class_dict.__name__ == "__dict__" and + class_dict.__objclass__ is entry): + return class_dict + return _sentinel + + +def _shadowed_dict(klass): + # gh-118013: the inner function here is decorated with lru_cache for + # performance reasons, *but* make sure not to pass strong references + # to the items in the mro. Doing so can lead to unexpected memory + # consumption in cases where classes are dynamically created and + # destroyed, and the dynamically created classes happen to be the only + # objects that hold strong references to other objects that take up a + # significant amount of memory. + return _shadowed_dict_from_weakref_mro_tuple( + *[make_weakref(entry) for entry in _static_getmro(klass)] + ) + + +def getattr_static(obj, attr, default=_sentinel): + """Retrieve attributes without triggering dynamic lookup via the + descriptor protocol, __getattr__ or __getattribute__. + + Note: this function may not be able to retrieve all attributes + that getattr can fetch (like dynamically created attributes) + and may find attributes that getattr can't (like descriptors + that raise AttributeError). It can also return descriptor objects + instead of instance members in some cases. See the + documentation for details. + """ + instance_result = _sentinel + + objtype = type(obj) + if type not in _static_getmro(objtype): + klass = objtype + dict_attr = _shadowed_dict(klass) + if (dict_attr is _sentinel or + type(dict_attr) is types.MemberDescriptorType): + instance_result = _check_instance(obj, attr) + else: + klass = obj + + klass_result = _check_class(klass, attr) + + if instance_result is not _sentinel and klass_result is not _sentinel: + if _check_class(type(klass_result), "__get__") is not _sentinel and ( + _check_class(type(klass_result), "__set__") is not _sentinel + or _check_class(type(klass_result), "__delete__") is not _sentinel + ): + return klass_result + + if instance_result is not _sentinel: + return instance_result + if klass_result is not _sentinel: + return klass_result + + if obj is klass: + # for types we check the metaclass too + for entry in _static_getmro(type(klass)): + if ( + _shadowed_dict(type(entry)) is _sentinel + and attr in entry.__dict__ + ): + return entry.__dict__[attr] + if default is not _sentinel: + return default + raise AttributeError(attr) + + +# ------------------------------------------------ generator introspection + +GEN_CREATED = 'GEN_CREATED' +GEN_RUNNING = 'GEN_RUNNING' +GEN_SUSPENDED = 'GEN_SUSPENDED' +GEN_CLOSED = 'GEN_CLOSED' + +def getgeneratorstate(generator): + """Get current state of a generator-iterator. + + Possible states are: + GEN_CREATED: Waiting to start execution. + GEN_RUNNING: Currently being executed by the interpreter. + GEN_SUSPENDED: Currently suspended at a yield expression. + GEN_CLOSED: Execution has completed. + """ + if generator.gi_running: + return GEN_RUNNING + if generator.gi_suspended: + return GEN_SUSPENDED + if generator.gi_frame is None: + return GEN_CLOSED + return GEN_CREATED + + +def getgeneratorlocals(generator): + """ + Get the mapping of generator local variables to their current values. + + A dict is returned, with the keys the local variable names and values the + bound values.""" + + if not isgenerator(generator): + raise TypeError("{!r} is not a Python generator".format(generator)) + + frame = getattr(generator, "gi_frame", None) + if frame is not None: + return generator.gi_frame.f_locals + else: + return {} + + +# ------------------------------------------------ coroutine introspection + +CORO_CREATED = 'CORO_CREATED' +CORO_RUNNING = 'CORO_RUNNING' +CORO_SUSPENDED = 'CORO_SUSPENDED' +CORO_CLOSED = 'CORO_CLOSED' + +def getcoroutinestate(coroutine): + """Get current state of a coroutine object. + + Possible states are: + CORO_CREATED: Waiting to start execution. + CORO_RUNNING: Currently being executed by the interpreter. + CORO_SUSPENDED: Currently suspended at an await expression. + CORO_CLOSED: Execution has completed. + """ + if coroutine.cr_running: + return CORO_RUNNING + if coroutine.cr_suspended: + return CORO_SUSPENDED + if coroutine.cr_frame is None: + return CORO_CLOSED + return CORO_CREATED + + +def getcoroutinelocals(coroutine): + """ + Get the mapping of coroutine local variables to their current values. + + A dict is returned, with the keys the local variable names and values the + bound values.""" + frame = getattr(coroutine, "cr_frame", None) + if frame is not None: + return frame.f_locals + else: + return {} + + +# ----------------------------------- asynchronous generator introspection + +AGEN_CREATED = 'AGEN_CREATED' +AGEN_RUNNING = 'AGEN_RUNNING' +AGEN_SUSPENDED = 'AGEN_SUSPENDED' +AGEN_CLOSED = 'AGEN_CLOSED' + + +def getasyncgenstate(agen): + """Get current state of an asynchronous generator object. + + Possible states are: + AGEN_CREATED: Waiting to start execution. + AGEN_RUNNING: Currently being executed by the interpreter. + AGEN_SUSPENDED: Currently suspended at a yield expression. + AGEN_CLOSED: Execution has completed. + """ + if agen.ag_running: + return AGEN_RUNNING + if agen.ag_suspended: + return AGEN_SUSPENDED + if agen.ag_frame is None: + return AGEN_CLOSED + return AGEN_CREATED + + +def getasyncgenlocals(agen): + """ + Get the mapping of asynchronous generator local variables to their current + values. + + A dict is returned, with the keys the local variable names and values the + bound values.""" + + if not isasyncgen(agen): + raise TypeError(f"{agen!r} is not a Python async generator") + + frame = getattr(agen, "ag_frame", None) + if frame is not None: + return agen.ag_frame.f_locals + else: + return {} + + +############################################################################### +### Function Signature Object (PEP 362) +############################################################################### + + +_NonUserDefinedCallables = (types.WrapperDescriptorType, + types.MethodWrapperType, + types.ClassMethodDescriptorType, + types.BuiltinFunctionType) + + +def _signature_get_user_defined_method(cls, method_name, *, follow_wrapper_chains=True): + """Private helper. Checks if ``cls`` has an attribute + named ``method_name`` and returns it only if it is a + pure python function. + """ + if method_name == '__new__': + meth = getattr(cls, method_name, None) + else: + meth = getattr_static(cls, method_name, None) + if meth is None: + return None + + # NOTE: The meth may wraps a non-user-defined callable. + # In this case, we treat the meth as non-user-defined callable too. + # (e.g. cls.__new__ generated by @warnings.deprecated) + unwrapped_meth = None + if follow_wrapper_chains: + unwrapped_meth = unwrap(meth, stop=(lambda m: hasattr(m, "__signature__") + or _signature_is_builtin(m))) + + if (isinstance(meth, _NonUserDefinedCallables) + or isinstance(unwrapped_meth, _NonUserDefinedCallables)): + # Once '__signature__' will be added to 'C'-level + # callables, this check won't be necessary + return None + if method_name != '__new__': + meth = _descriptor_get(meth, cls) + return meth + + +def _signature_get_partial(wrapped_sig, partial, extra_args=()): + """Private helper to calculate how 'wrapped_sig' signature will + look like after applying a 'functools.partial' object (or alike) + on it. + """ + + old_params = wrapped_sig.parameters + new_params = OrderedDict(old_params.items()) + + partial_args = partial.args or () + partial_keywords = partial.keywords or {} + + if extra_args: + partial_args = extra_args + partial_args + + try: + ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords) + except TypeError as ex: + msg = 'partial object {!r} has incorrect arguments'.format(partial) + raise ValueError(msg) from ex + + + transform_to_kwonly = False + for param_name, param in old_params.items(): + try: + arg_value = ba.arguments[param_name] + except KeyError: + pass + else: + if param.kind is _POSITIONAL_ONLY: + # If positional-only parameter is bound by partial, + # it effectively disappears from the signature + # However, if it is a Placeholder it is not removed + # And also looses default value + if arg_value is functools.Placeholder: + new_params[param_name] = param.replace(default=_empty) + else: + new_params.pop(param_name) + continue + + if param.kind is _POSITIONAL_OR_KEYWORD: + if param_name in partial_keywords: + # This means that this parameter, and all parameters + # after it should be keyword-only (and var-positional + # should be removed). Here's why. Consider the following + # function: + # foo(a, b, *args, c): + # pass + # + # "partial(foo, a='spam')" will have the following + # signature: "(*, a='spam', b, c)". Because attempting + # to call that partial with "(10, 20)" arguments will + # raise a TypeError, saying that "a" argument received + # multiple values. + transform_to_kwonly = True + # Set the new default value + new_params[param_name] = param.replace(default=arg_value) + else: + # was passed as a positional argument + # Do not pop if it is a Placeholder + # also change kind to positional only + # and remove default + if arg_value is functools.Placeholder: + new_param = param.replace( + kind=_POSITIONAL_ONLY, + default=_empty + ) + new_params[param_name] = new_param + else: + new_params.pop(param_name) + continue + + if param.kind is _KEYWORD_ONLY: + # Set the new default value + new_params[param_name] = param.replace(default=arg_value) + + if transform_to_kwonly: + assert param.kind is not _POSITIONAL_ONLY + + if param.kind is _POSITIONAL_OR_KEYWORD: + new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY) + new_params[param_name] = new_param + new_params.move_to_end(param_name) + elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD): + new_params.move_to_end(param_name) + elif param.kind is _VAR_POSITIONAL: + new_params.pop(param.name) + + return wrapped_sig.replace(parameters=new_params.values()) + + +def _signature_bound_method(sig): + """Private helper to transform signatures for unbound + functions to bound methods. + """ + + params = tuple(sig.parameters.values()) + + if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + raise ValueError('invalid method signature') + + kind = params[0].kind + if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY): + # Drop first parameter: + # '(p1, p2[, ...])' -> '(p2[, ...])' + params = params[1:] + else: + if kind is not _VAR_POSITIONAL: + # Unless we add a new parameter type we never + # get here + raise ValueError('invalid argument type') + # It's a var-positional parameter. + # Do nothing. '(*args[, ...])' -> '(*args[, ...])' + + return sig.replace(parameters=params) + + +def _signature_is_builtin(obj): + """Private helper to test if `obj` is a callable that might + support Argument Clinic's __text_signature__ protocol. + """ + return (isbuiltin(obj) or + ismethoddescriptor(obj) or + isinstance(obj, _NonUserDefinedCallables) or + # Can't test 'isinstance(type)' here, as it would + # also be True for regular python classes. + # Can't use the `in` operator here, as it would + # invoke the custom __eq__ method. + obj is type or obj is object) + + +def _signature_is_functionlike(obj): + """Private helper to test if `obj` is a duck type of FunctionType. + A good example of such objects are functions compiled with + Cython, which have all attributes that a pure Python function + would have, but have their code statically compiled. + """ + + if not callable(obj) or isclass(obj): + # All function-like objects are obviously callables, + # and not classes. + return False + + name = getattr(obj, '__name__', None) + code = getattr(obj, '__code__', None) + defaults = getattr(obj, '__defaults__', _void) # Important to use _void ... + kwdefaults = getattr(obj, '__kwdefaults__', _void) # ... and not None here + + return (isinstance(code, types.CodeType) and + isinstance(name, str) and + (defaults is None or isinstance(defaults, tuple)) and + (kwdefaults is None or isinstance(kwdefaults, dict))) + + +def _signature_strip_non_python_syntax(signature): + """ + Private helper function. Takes a signature in Argument Clinic's + extended signature format. + + Returns a tuple of two things: + * that signature re-rendered in standard Python syntax, and + * the index of the "self" parameter (generally 0), or None if + the function does not have a "self" parameter. + """ + + if not signature: + return signature, None + + self_parameter = None + + lines = [l.encode('ascii') for l in signature.split('\n') if l] + generator = iter(lines).__next__ + token_stream = tokenize.tokenize(generator) + + text = [] + add = text.append + + current_parameter = 0 + OP = token.OP + ERRORTOKEN = token.ERRORTOKEN + + # token stream always starts with ENCODING token, skip it + t = next(token_stream) + assert t.type == tokenize.ENCODING + + for t in token_stream: + type, string = t.type, t.string + + if type == OP: + if string == ',': + current_parameter += 1 + + if (type == OP) and (string == '$'): + assert self_parameter is None + self_parameter = current_parameter + continue + + add(string) + if (string == ','): + add(' ') + clean_signature = ''.join(text).strip().replace("\n", "") + return clean_signature, self_parameter + + +def _signature_fromstr(cls, obj, s, skip_bound_arg=True): + """Private helper to parse content of '__text_signature__' + and return a Signature based on it. + """ + Parameter = cls._parameter_cls + + clean_signature, self_parameter = _signature_strip_non_python_syntax(s) + + program = "def foo" + clean_signature + ": pass" + + try: + module = ast.parse(program) + except SyntaxError: + module = None + + if not isinstance(module, ast.Module): + raise ValueError("{!r} builtin has invalid signature".format(obj)) + + f = module.body[0] + + parameters = [] + empty = Parameter.empty + + module = None + module_dict = {} + + module_name = getattr(obj, '__module__', None) + if not module_name: + objclass = getattr(obj, '__objclass__', None) + module_name = getattr(objclass, '__module__', None) + + if module_name: + module = sys.modules.get(module_name, None) + if module: + module_dict = module.__dict__ + sys_module_dict = sys.modules.copy() + + def parse_name(node): + assert isinstance(node, ast.arg) + if node.annotation is not None: + raise ValueError("Annotations are not currently supported") + return node.arg + + def wrap_value(s): + try: + value = eval(s, module_dict) + except NameError: + try: + value = eval(s, sys_module_dict) + except NameError: + raise ValueError + + if isinstance(value, (str, int, float, bytes, bool, type(None))): + return ast.Constant(value) + raise ValueError + + class RewriteSymbolics(ast.NodeTransformer): + def visit_Attribute(self, node): + a = [] + n = node + while isinstance(n, ast.Attribute): + a.append(n.attr) + n = n.value + if not isinstance(n, ast.Name): + raise ValueError + a.append(n.id) + value = ".".join(reversed(a)) + return wrap_value(value) + + def visit_Name(self, node): + if not isinstance(node.ctx, ast.Load): + raise ValueError() + return wrap_value(node.id) + + def visit_BinOp(self, node): + # Support constant folding of a couple simple binary operations + # commonly used to define default values in text signatures + left = self.visit(node.left) + right = self.visit(node.right) + if not isinstance(left, ast.Constant) or not isinstance(right, ast.Constant): + raise ValueError + if isinstance(node.op, ast.Add): + return ast.Constant(left.value + right.value) + elif isinstance(node.op, ast.Sub): + return ast.Constant(left.value - right.value) + elif isinstance(node.op, ast.BitOr): + return ast.Constant(left.value | right.value) + raise ValueError + + def p(name_node, default_node, default=empty): + name = parse_name(name_node) + if default_node and default_node is not _empty: + try: + default_node = RewriteSymbolics().visit(default_node) + default = ast.literal_eval(default_node) + except ValueError: + raise ValueError("{!r} builtin has invalid signature".format(obj)) from None + parameters.append(Parameter(name, kind, default=default, annotation=empty)) + + # non-keyword-only parameters + total_non_kw_args = len(f.args.posonlyargs) + len(f.args.args) + required_non_kw_args = total_non_kw_args - len(f.args.defaults) + defaults = itertools.chain(itertools.repeat(None, required_non_kw_args), f.args.defaults) + + kind = Parameter.POSITIONAL_ONLY + for (name, default) in zip(f.args.posonlyargs, defaults): + p(name, default) + + kind = Parameter.POSITIONAL_OR_KEYWORD + for (name, default) in zip(f.args.args, defaults): + p(name, default) + + # *args + if f.args.vararg: + kind = Parameter.VAR_POSITIONAL + p(f.args.vararg, empty) + + # keyword-only arguments + kind = Parameter.KEYWORD_ONLY + for name, default in zip(f.args.kwonlyargs, f.args.kw_defaults): + p(name, default) + + # **kwargs + if f.args.kwarg: + kind = Parameter.VAR_KEYWORD + p(f.args.kwarg, empty) + + if self_parameter is not None: + # Possibly strip the bound argument: + # - We *always* strip first bound argument if + # it is a module. + # - We don't strip first bound argument if + # skip_bound_arg is False. + assert parameters + _self = getattr(obj, '__self__', None) + self_isbound = _self is not None + self_ismodule = ismodule(_self) + if self_isbound and (self_ismodule or skip_bound_arg): + parameters.pop(0) + else: + # for builtins, self parameter is always positional-only! + p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY) + parameters[0] = p + + return cls(parameters, return_annotation=cls.empty) + + +def _signature_from_builtin(cls, func, skip_bound_arg=True): + """Private helper function to get signature for + builtin callables. + """ + + if not _signature_is_builtin(func): + raise TypeError("{!r} is not a Python builtin " + "function".format(func)) + + s = getattr(func, "__text_signature__", None) + if not s: + raise ValueError("no signature found for builtin {!r}".format(func)) + + return _signature_fromstr(cls, func, s, skip_bound_arg) + + +def _signature_from_function(cls, func, skip_bound_arg=True, + globals=None, locals=None, eval_str=False, + *, annotation_format=Format.VALUE): + """Private helper: constructs Signature for the given python function.""" + + is_duck_function = False + if not isfunction(func): + if _signature_is_functionlike(func): + is_duck_function = True + else: + # If it's not a pure Python function, and not a duck type + # of pure function: + raise TypeError('{!r} is not a Python function'.format(func)) + + s = getattr(func, "__text_signature__", None) + if s: + return _signature_fromstr(cls, func, s, skip_bound_arg) + + Parameter = cls._parameter_cls + + # Parameter information. + func_code = func.__code__ + pos_count = func_code.co_argcount + arg_names = func_code.co_varnames + posonly_count = func_code.co_posonlyargcount + positional = arg_names[:pos_count] + keyword_only_count = func_code.co_kwonlyargcount + keyword_only = arg_names[pos_count:pos_count + keyword_only_count] + annotations = get_annotations(func, globals=globals, locals=locals, eval_str=eval_str, + format=annotation_format) + defaults = func.__defaults__ + kwdefaults = func.__kwdefaults__ + + if defaults: + pos_default_count = len(defaults) + else: + pos_default_count = 0 + + parameters = [] + + non_default_count = pos_count - pos_default_count + posonly_left = posonly_count + + # Non-keyword-only parameters w/o defaults. + for name in positional[:non_default_count]: + kind = _POSITIONAL_ONLY if posonly_left else _POSITIONAL_OR_KEYWORD + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=kind)) + if posonly_left: + posonly_left -= 1 + + # ... w/ defaults. + for offset, name in enumerate(positional[non_default_count:]): + kind = _POSITIONAL_ONLY if posonly_left else _POSITIONAL_OR_KEYWORD + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=kind, + default=defaults[offset])) + if posonly_left: + posonly_left -= 1 + + # *args + if func_code.co_flags & CO_VARARGS: + name = arg_names[pos_count + keyword_only_count] + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_VAR_POSITIONAL)) + + # Keyword-only parameters. + for name in keyword_only: + default = _empty + if kwdefaults is not None: + default = kwdefaults.get(name, _empty) + + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_KEYWORD_ONLY, + default=default)) + # **kwargs + if func_code.co_flags & CO_VARKEYWORDS: + index = pos_count + keyword_only_count + if func_code.co_flags & CO_VARARGS: + index += 1 + + name = arg_names[index] + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_VAR_KEYWORD)) + + # Is 'func' is a pure Python function - don't validate the + # parameters list (for correct order and defaults), it should be OK. + return cls(parameters, + return_annotation=annotations.get('return', _empty), + __validate_parameters__=is_duck_function) + + +def _descriptor_get(descriptor, obj): + if isclass(descriptor): + return descriptor + get = getattr(type(descriptor), '__get__', _sentinel) + if get is _sentinel: + return descriptor + return get(descriptor, obj, type(obj)) + + +def _signature_from_callable(obj, *, + follow_wrapper_chains=True, + skip_bound_arg=True, + globals=None, + locals=None, + eval_str=False, + sigcls, + annotation_format=Format.VALUE): + + """Private helper function to get signature for arbitrary + callable objects. + """ + + _get_signature_of = functools.partial(_signature_from_callable, + follow_wrapper_chains=follow_wrapper_chains, + skip_bound_arg=skip_bound_arg, + globals=globals, + locals=locals, + sigcls=sigcls, + eval_str=eval_str, + annotation_format=annotation_format) + + if not callable(obj): + raise TypeError('{!r} is not a callable object'.format(obj)) + + if isinstance(obj, types.MethodType): + # In this case we skip the first parameter of the underlying + # function (usually `self` or `cls`). + sig = _get_signature_of(obj.__func__) + + if skip_bound_arg: + return _signature_bound_method(sig) + else: + return sig + + # Was this function wrapped by a decorator? + if follow_wrapper_chains: + # Unwrap until we find an explicit signature or a MethodType (which will be + # handled explicitly below). + obj = unwrap(obj, stop=(lambda f: hasattr(f, "__signature__") + or isinstance(f, types.MethodType))) + if isinstance(obj, types.MethodType): + # If the unwrapped object is a *method*, we might want to + # skip its first parameter (self). + # See test_signature_wrapped_bound_method for details. + return _get_signature_of(obj) + + try: + sig = obj.__signature__ + except AttributeError: + pass + else: + if sig is not None: + if not isinstance(sig, Signature): + raise TypeError( + 'unexpected object {!r} in __signature__ ' + 'attribute'.format(sig)) + return sig + + try: + partialmethod = obj.__partialmethod__ + except AttributeError: + pass + else: + if isinstance(partialmethod, functools.partialmethod): + # Unbound partialmethod (see functools.partialmethod) + # This means, that we need to calculate the signature + # as if it's a regular partial object, but taking into + # account that the first positional argument + # (usually `self`, or `cls`) will not be passed + # automatically (as for boundmethods) + + wrapped_sig = _get_signature_of(partialmethod.func) + + sig = _signature_get_partial(wrapped_sig, partialmethod, (None,)) + first_wrapped_param = tuple(wrapped_sig.parameters.values())[0] + if first_wrapped_param.kind is Parameter.VAR_POSITIONAL: + # First argument of the wrapped callable is `*args`, as in + # `partialmethod(lambda *args)`. + return sig + else: + sig_params = tuple(sig.parameters.values()) + assert (not sig_params or + first_wrapped_param is not sig_params[0]) + # If there were placeholders set, + # first param is transformed to positional only + if partialmethod.args.count(functools.Placeholder): + first_wrapped_param = first_wrapped_param.replace( + kind=Parameter.POSITIONAL_ONLY) + new_params = (first_wrapped_param,) + sig_params + return sig.replace(parameters=new_params) + + if isinstance(obj, functools.partial): + wrapped_sig = _get_signature_of(obj.func) + return _signature_get_partial(wrapped_sig, obj) + + if isfunction(obj) or _signature_is_functionlike(obj): + # If it's a pure Python function, or an object that is duck type + # of a Python function (Cython functions, for instance), then: + return _signature_from_function(sigcls, obj, + skip_bound_arg=skip_bound_arg, + globals=globals, locals=locals, eval_str=eval_str, + annotation_format=annotation_format) + + if _signature_is_builtin(obj): + return _signature_from_builtin(sigcls, obj, + skip_bound_arg=skip_bound_arg) + + if isinstance(obj, type): + # obj is a class or a metaclass + + # First, let's see if it has an overloaded __call__ defined + # in its metaclass + call = _signature_get_user_defined_method( + type(obj), + '__call__', + follow_wrapper_chains=follow_wrapper_chains, + ) + if call is not None: + return _get_signature_of(call) + + # NOTE: The user-defined method can be a function with a thin wrapper + # around object.__new__ (e.g., generated by `@warnings.deprecated`) + new = _signature_get_user_defined_method( + obj, + '__new__', + follow_wrapper_chains=follow_wrapper_chains, + ) + init = _signature_get_user_defined_method( + obj, + '__init__', + follow_wrapper_chains=follow_wrapper_chains, + ) + + # Go through the MRO and see if any class has user-defined + # pure Python __new__ or __init__ method + for base in obj.__mro__: + # Now we check if the 'obj' class has an own '__new__' method + if new is not None and '__new__' in base.__dict__: + sig = _get_signature_of(new) + if skip_bound_arg: + sig = _signature_bound_method(sig) + return sig + # or an own '__init__' method + elif init is not None and '__init__' in base.__dict__: + return _get_signature_of(init) + + # At this point we know, that `obj` is a class, with no user- + # defined '__init__', '__new__', or class-level '__call__' + + for base in obj.__mro__[:-1]: + # Since '__text_signature__' is implemented as a + # descriptor that extracts text signature from the + # class docstring, if 'obj' is derived from a builtin + # class, its own '__text_signature__' may be 'None'. + # Therefore, we go through the MRO (except the last + # class in there, which is 'object') to find the first + # class with non-empty text signature. + try: + text_sig = base.__text_signature__ + except AttributeError: + pass + else: + if text_sig: + # If 'base' class has a __text_signature__ attribute: + # return a signature based on it + return _signature_fromstr(sigcls, base, text_sig) + + # No '__text_signature__' was found for the 'obj' class. + # Last option is to check if its '__init__' is + # object.__init__ or type.__init__. + if type not in obj.__mro__: + obj_init = obj.__init__ + obj_new = obj.__new__ + if follow_wrapper_chains: + obj_init = unwrap(obj_init) + obj_new = unwrap(obj_new) + # We have a class (not metaclass), but no user-defined + # __init__ or __new__ for it + if obj_init is object.__init__ and obj_new is object.__new__: + # Return a signature of 'object' builtin. + return sigcls.from_callable(object) + else: + raise ValueError( + 'no signature found for builtin type {!r}'.format(obj)) + + else: + # An object with __call__ + call = getattr_static(type(obj), '__call__', None) + if call is not None: + try: + text_sig = obj.__text_signature__ + except AttributeError: + pass + else: + if text_sig: + return _signature_fromstr(sigcls, obj, text_sig) + call = _descriptor_get(call, obj) + return _get_signature_of(call) + + raise ValueError('callable {!r} is not supported by signature'.format(obj)) + + +class _void: + """A private marker - used in Parameter & Signature.""" + + +class _empty: + """Marker object for Signature.empty and Parameter.empty.""" + + +class _ParameterKind(enum.IntEnum): + POSITIONAL_ONLY = 'positional-only' + POSITIONAL_OR_KEYWORD = 'positional or keyword' + VAR_POSITIONAL = 'variadic positional' + KEYWORD_ONLY = 'keyword-only' + VAR_KEYWORD = 'variadic keyword' + + def __new__(cls, description): + value = len(cls.__members__) + member = int.__new__(cls, value) + member._value_ = value + member.description = description + return member + + def __str__(self): + return self.name + +_POSITIONAL_ONLY = _ParameterKind.POSITIONAL_ONLY +_POSITIONAL_OR_KEYWORD = _ParameterKind.POSITIONAL_OR_KEYWORD +_VAR_POSITIONAL = _ParameterKind.VAR_POSITIONAL +_KEYWORD_ONLY = _ParameterKind.KEYWORD_ONLY +_VAR_KEYWORD = _ParameterKind.VAR_KEYWORD + + +class Parameter: + """Represents a parameter in a function signature. + + Has the following public attributes: + + * name : str + The name of the parameter as a string. + * default : object + The default value for the parameter if specified. If the + parameter has no default value, this attribute is set to + `Parameter.empty`. + * annotation + The annotation for the parameter if specified. If the + parameter has no annotation, this attribute is set to + `Parameter.empty`. + * kind + Describes how argument values are bound to the parameter. + Possible values: `Parameter.POSITIONAL_ONLY`, + `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`, + `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`. + Every value has a `description` attribute describing meaning. + """ + + __slots__ = ('_name', '_kind', '_default', '_annotation') + + POSITIONAL_ONLY = _POSITIONAL_ONLY + POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD + VAR_POSITIONAL = _VAR_POSITIONAL + KEYWORD_ONLY = _KEYWORD_ONLY + VAR_KEYWORD = _VAR_KEYWORD + + empty = _empty + + def __init__(self, name, kind, *, default=_empty, annotation=_empty): + try: + self._kind = _ParameterKind(kind) + except ValueError: + raise ValueError(f'value {kind!r} is not a valid Parameter.kind') + if default is not _empty: + if self._kind in (_VAR_POSITIONAL, _VAR_KEYWORD): + msg = '{} parameters cannot have default values' + msg = msg.format(self._kind.description) + raise ValueError(msg) + self._default = default + self._annotation = annotation + + if name is _empty: + raise ValueError('name is a required attribute for Parameter') + + if not isinstance(name, str): + msg = 'name must be a str, not a {}'.format(type(name).__name__) + raise TypeError(msg) + + if name[0] == '.' and name[1:].isdigit(): + # These are implicit arguments generated by comprehensions. In + # order to provide a friendlier interface to users, we recast + # their name as "implicitN" and treat them as positional-only. + # See issue 19611. + if self._kind != _POSITIONAL_OR_KEYWORD: + msg = ( + 'implicit arguments must be passed as ' + 'positional or keyword arguments, not {}' + ) + msg = msg.format(self._kind.description) + raise ValueError(msg) + self._kind = _POSITIONAL_ONLY + name = 'implicit{}'.format(name[1:]) + + # It's possible for C functions to have a positional-only parameter + # where the name is a keyword, so for compatibility we'll allow it. + is_keyword = iskeyword(name) and self._kind is not _POSITIONAL_ONLY + if is_keyword or not name.isidentifier(): + raise ValueError('{!r} is not a valid parameter name'.format(name)) + + self._name = name + + def __reduce__(self): + return (type(self), + (self._name, self._kind), + {'_default': self._default, + '_annotation': self._annotation}) + + def __setstate__(self, state): + self._default = state['_default'] + self._annotation = state['_annotation'] + + @property + def name(self): + return self._name + + @property + def default(self): + return self._default + + @property + def annotation(self): + return self._annotation + + @property + def kind(self): + return self._kind + + def replace(self, *, name=_void, kind=_void, + annotation=_void, default=_void): + """Creates a customized copy of the Parameter.""" + + if name is _void: + name = self._name + + if kind is _void: + kind = self._kind + + if annotation is _void: + annotation = self._annotation + + if default is _void: + default = self._default + + return type(self)(name, kind, default=default, annotation=annotation) + + def __str__(self): + return self._format() + + def _format(self, *, quote_annotation_strings=True): + kind = self.kind + formatted = self._name + + # Add annotation and default value + if self._annotation is not _empty: + annotation = formatannotation(self._annotation, + quote_annotation_strings=quote_annotation_strings) + formatted = '{}: {}'.format(formatted, annotation) + + if self._default is not _empty: + if self._annotation is not _empty: + formatted = '{} = {}'.format(formatted, repr(self._default)) + else: + formatted = '{}={}'.format(formatted, repr(self._default)) + + if kind == _VAR_POSITIONAL: + formatted = '*' + formatted + elif kind == _VAR_KEYWORD: + formatted = '**' + formatted + + return formatted + + __replace__ = replace + + def __repr__(self): + return '<{} "{}">'.format(self.__class__.__name__, self) + + def __hash__(self): + return hash((self._name, self._kind, self._annotation, self._default)) + + def __eq__(self, other): + if self is other: + return True + if not isinstance(other, Parameter): + return NotImplemented + return (self._name == other._name and + self._kind == other._kind and + self._default == other._default and + self._annotation == other._annotation) + + +class BoundArguments: + """Result of `Signature.bind` call. Holds the mapping of arguments + to the function's parameters. + + Has the following public attributes: + + * arguments : dict + An ordered mutable mapping of parameters' names to arguments' values. + Does not contain arguments' default values. + * signature : Signature + The Signature object that created this instance. + * args : tuple + Tuple of positional arguments values. + * kwargs : dict + Dict of keyword arguments values. + """ + + __slots__ = ('arguments', '_signature', '__weakref__') + + def __init__(self, signature, arguments): + self.arguments = arguments + self._signature = signature + + @property + def signature(self): + return self._signature + + @property + def args(self): + args = [] + for param_name, param in self._signature.parameters.items(): + if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + break + + try: + arg = self.arguments[param_name] + except KeyError: + # We're done here. Other arguments + # will be mapped in 'BoundArguments.kwargs' + break + else: + if param.kind == _VAR_POSITIONAL: + # *args + args.extend(arg) + else: + # plain argument + args.append(arg) + + return tuple(args) + + @property + def kwargs(self): + kwargs = {} + kwargs_started = False + for param_name, param in self._signature.parameters.items(): + if not kwargs_started: + if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + kwargs_started = True + else: + if param_name not in self.arguments: + kwargs_started = True + continue + + if not kwargs_started: + continue + + try: + arg = self.arguments[param_name] + except KeyError: + pass + else: + if param.kind == _VAR_KEYWORD: + # **kwargs + kwargs.update(arg) + else: + # plain keyword argument + kwargs[param_name] = arg + + return kwargs + + def apply_defaults(self): + """Set default values for missing arguments. + + For variable-positional arguments (*args) the default is an + empty tuple. + + For variable-keyword arguments (**kwargs) the default is an + empty dict. + """ + arguments = self.arguments + new_arguments = [] + for name, param in self._signature.parameters.items(): + try: + new_arguments.append((name, arguments[name])) + except KeyError: + if param.default is not _empty: + val = param.default + elif param.kind is _VAR_POSITIONAL: + val = () + elif param.kind is _VAR_KEYWORD: + val = {} + else: + # This BoundArguments was likely produced by + # Signature.bind_partial(). + continue + new_arguments.append((name, val)) + self.arguments = dict(new_arguments) + + def __eq__(self, other): + if self is other: + return True + if not isinstance(other, BoundArguments): + return NotImplemented + return (self.signature == other.signature and + self.arguments == other.arguments) + + def __setstate__(self, state): + self._signature = state['_signature'] + self.arguments = state['arguments'] + + def __getstate__(self): + return {'_signature': self._signature, 'arguments': self.arguments} + + def __repr__(self): + args = [] + for arg, value in self.arguments.items(): + args.append('{}={!r}'.format(arg, value)) + return '<{} ({})>'.format(self.__class__.__name__, ', '.join(args)) + + +class Signature: + """A Signature object represents the overall signature of a function. + It stores a Parameter object for each parameter accepted by the + function, as well as information specific to the function itself. + + A Signature object has the following public attributes and methods: + + * parameters : OrderedDict + An ordered mapping of parameters' names to the corresponding + Parameter objects (keyword-only arguments are in the same order + as listed in `code.co_varnames`). + * return_annotation : object + The annotation for the return type of the function if specified. + If the function has no annotation for its return type, this + attribute is set to `Signature.empty`. + * bind(*args, **kwargs) -> BoundArguments + Creates a mapping from positional and keyword arguments to + parameters. + * bind_partial(*args, **kwargs) -> BoundArguments + Creates a partial mapping from positional and keyword arguments + to parameters (simulating 'functools.partial' behavior.) + """ + + __slots__ = ('_return_annotation', '_parameters') + + _parameter_cls = Parameter + _bound_arguments_cls = BoundArguments + + empty = _empty + + def __init__(self, parameters=None, *, return_annotation=_empty, + __validate_parameters__=True): + """Constructs Signature from the given list of Parameter + objects and 'return_annotation'. All arguments are optional. + """ + + if parameters is None: + params = OrderedDict() + else: + if __validate_parameters__: + params = OrderedDict() + top_kind = _POSITIONAL_ONLY + seen_default = False + seen_var_parameters = set() + + for param in parameters: + kind = param.kind + name = param.name + + if kind in (_VAR_POSITIONAL, _VAR_KEYWORD): + if kind in seen_var_parameters: + msg = f'more than one {kind.description} parameter' + raise ValueError(msg) + + seen_var_parameters.add(kind) + + if kind < top_kind: + msg = ( + 'wrong parameter order: {} parameter before {} ' + 'parameter' + ) + msg = msg.format(top_kind.description, + kind.description) + raise ValueError(msg) + elif kind > top_kind: + top_kind = kind + + if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD): + if param.default is _empty: + if seen_default: + # No default for this parameter, but the + # previous parameter of had a default + msg = 'non-default argument follows default ' \ + 'argument' + raise ValueError(msg) + else: + # There is a default for this parameter. + seen_default = True + + if name in params: + msg = 'duplicate parameter name: {!r}'.format(name) + raise ValueError(msg) + + params[name] = param + else: + params = OrderedDict((param.name, param) for param in parameters) + + self._parameters = types.MappingProxyType(params) + self._return_annotation = return_annotation + + @classmethod + def from_callable(cls, obj, *, + follow_wrapped=True, globals=None, locals=None, eval_str=False, + annotation_format=Format.VALUE): + """Constructs Signature for the given callable object.""" + return _signature_from_callable(obj, sigcls=cls, + follow_wrapper_chains=follow_wrapped, + globals=globals, locals=locals, eval_str=eval_str, + annotation_format=annotation_format) + + @property + def parameters(self): + return self._parameters + + @property + def return_annotation(self): + return self._return_annotation + + def replace(self, *, parameters=_void, return_annotation=_void): + """Creates a customized copy of the Signature. + Pass 'parameters' and/or 'return_annotation' arguments + to override them in the new copy. + """ + + if parameters is _void: + parameters = self.parameters.values() + + if return_annotation is _void: + return_annotation = self._return_annotation + + return type(self)(parameters, + return_annotation=return_annotation) + + __replace__ = replace + + def _hash_basis(self): + params = tuple(param for param in self.parameters.values() + if param.kind != _KEYWORD_ONLY) + + kwo_params = {param.name: param for param in self.parameters.values() + if param.kind == _KEYWORD_ONLY} + + return params, kwo_params, self.return_annotation + + def __hash__(self): + params, kwo_params, return_annotation = self._hash_basis() + kwo_params = frozenset(kwo_params.values()) + return hash((params, kwo_params, return_annotation)) + + def __eq__(self, other): + if self is other: + return True + if not isinstance(other, Signature): + return NotImplemented + return self._hash_basis() == other._hash_basis() + + def _bind(self, args, kwargs, *, partial=False): + """Private method. Don't use directly.""" + + arguments = {} + + parameters = iter(self.parameters.values()) + parameters_ex = () + arg_vals = iter(args) + + pos_only_param_in_kwargs = [] + + while True: + # Let's iterate through the positional arguments and corresponding + # parameters + try: + arg_val = next(arg_vals) + except StopIteration: + # No more positional arguments + try: + param = next(parameters) + except StopIteration: + # No more parameters. That's it. Just need to check that + # we have no `kwargs` after this while loop + break + else: + if param.kind == _VAR_POSITIONAL: + # That's OK, just empty *args. Let's start parsing + # kwargs + break + elif param.name in kwargs: + if param.kind == _POSITIONAL_ONLY: + if param.default is _empty: + msg = f'missing a required positional-only argument: {param.name!r}' + raise TypeError(msg) + # Raise a TypeError once we are sure there is no + # **kwargs param later. + pos_only_param_in_kwargs.append(param) + continue + parameters_ex = (param,) + break + elif (param.kind == _VAR_KEYWORD or + param.default is not _empty): + # That's fine too - we have a default value for this + # parameter. So, lets start parsing `kwargs`, starting + # with the current parameter + parameters_ex = (param,) + break + else: + # No default, not VAR_KEYWORD, not VAR_POSITIONAL, + # not in `kwargs` + if partial: + parameters_ex = (param,) + break + else: + if param.kind == _KEYWORD_ONLY: + argtype = ' keyword-only' + else: + argtype = '' + msg = 'missing a required{argtype} argument: {arg!r}' + msg = msg.format(arg=param.name, argtype=argtype) + raise TypeError(msg) from None + else: + # We have a positional argument to process + try: + param = next(parameters) + except StopIteration: + raise TypeError('too many positional arguments') from None + else: + if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + # Looks like we have no parameter for this positional + # argument + raise TypeError( + 'too many positional arguments') from None + + if param.kind == _VAR_POSITIONAL: + # We have an '*args'-like argument, let's fill it with + # all positional arguments we have left and move on to + # the next phase + values = [arg_val] + values.extend(arg_vals) + arguments[param.name] = tuple(values) + break + + if param.name in kwargs and param.kind != _POSITIONAL_ONLY: + raise TypeError( + 'multiple values for argument {arg!r}'.format( + arg=param.name)) from None + + arguments[param.name] = arg_val + + # Now, we iterate through the remaining parameters to process + # keyword arguments + kwargs_param = None + for param in itertools.chain(parameters_ex, parameters): + if param.kind == _VAR_KEYWORD: + # Memorize that we have a '**kwargs'-like parameter + kwargs_param = param + continue + + if param.kind == _VAR_POSITIONAL: + # Named arguments don't refer to '*args'-like parameters. + # We only arrive here if the positional arguments ended + # before reaching the last parameter before *args. + continue + + param_name = param.name + try: + arg_val = kwargs.pop(param_name) + except KeyError: + # We have no value for this parameter. It's fine though, + # if it has a default value, or it is an '*args'-like + # parameter, left alone by the processing of positional + # arguments. + if (not partial and param.kind != _VAR_POSITIONAL and + param.default is _empty): + raise TypeError('missing a required argument: {arg!r}'. \ + format(arg=param_name)) from None + + else: + arguments[param_name] = arg_val + + if kwargs: + if kwargs_param is not None: + # Process our '**kwargs'-like parameter + arguments[kwargs_param.name] = kwargs + elif pos_only_param_in_kwargs: + raise TypeError( + 'got some positional-only arguments passed as ' + 'keyword arguments: {arg!r}'.format( + arg=', '.join( + param.name + for param in pos_only_param_in_kwargs + ), + ), + ) + else: + raise TypeError( + 'got an unexpected keyword argument {arg!r}'.format( + arg=next(iter(kwargs)))) + + return self._bound_arguments_cls(self, arguments) + + def bind(self, /, *args, **kwargs): + """Get a BoundArguments object, that maps the passed `args` + and `kwargs` to the function's signature. Raises `TypeError` + if the passed arguments can not be bound. + """ + return self._bind(args, kwargs) + + def bind_partial(self, /, *args, **kwargs): + """Get a BoundArguments object, that partially maps the + passed `args` and `kwargs` to the function's signature. + Raises `TypeError` if the passed arguments can not be bound. + """ + return self._bind(args, kwargs, partial=True) + + def __reduce__(self): + return (type(self), + (tuple(self._parameters.values()),), + {'_return_annotation': self._return_annotation}) + + def __setstate__(self, state): + self._return_annotation = state['_return_annotation'] + + def __repr__(self): + return '<{} {}>'.format(self.__class__.__name__, self) + + def __str__(self): + return self.format() + + def format(self, *, max_width=None, quote_annotation_strings=True): + """Create a string representation of the Signature object. + + If *max_width* integer is passed, + signature will try to fit into the *max_width*. + If signature is longer than *max_width*, + all parameters will be on separate lines. + + If *quote_annotation_strings* is False, annotations + in the signature are displayed without opening and closing quotation + marks. This is useful when the signature was created with the + STRING format or when ``from __future__ import annotations`` was used. + """ + result = [] + render_pos_only_separator = False + render_kw_only_separator = True + for param in self.parameters.values(): + formatted = param._format(quote_annotation_strings=quote_annotation_strings) + + kind = param.kind + + if kind == _POSITIONAL_ONLY: + render_pos_only_separator = True + elif render_pos_only_separator: + # It's not a positional-only parameter, and the flag + # is set to 'True' (there were pos-only params before.) + result.append('/') + render_pos_only_separator = False + + if kind == _VAR_POSITIONAL: + # OK, we have an '*args'-like parameter, so we won't need + # a '*' to separate keyword-only arguments + render_kw_only_separator = False + elif kind == _KEYWORD_ONLY and render_kw_only_separator: + # We have a keyword-only parameter to render and we haven't + # rendered an '*args'-like parameter before, so add a '*' + # separator to the parameters list ("foo(arg1, *, arg2)" case) + result.append('*') + # This condition should be only triggered once, so + # reset the flag + render_kw_only_separator = False + + result.append(formatted) + + if render_pos_only_separator: + # There were only positional-only parameters, hence the + # flag was not reset to 'False' + result.append('/') + + rendered = '({})'.format(', '.join(result)) + if max_width is not None and len(rendered) > max_width: + rendered = '(\n {}\n)'.format(',\n '.join(result)) + + if self.return_annotation is not _empty: + anno = formatannotation(self.return_annotation, + quote_annotation_strings=quote_annotation_strings) + rendered += ' -> {}'.format(anno) + + return rendered + + +def signature(obj, *, follow_wrapped=True, globals=None, locals=None, eval_str=False, + annotation_format=Format.VALUE): + """Get a signature object for the passed callable.""" + return Signature.from_callable(obj, follow_wrapped=follow_wrapped, + globals=globals, locals=locals, eval_str=eval_str, + annotation_format=annotation_format) + + +class BufferFlags(enum.IntFlag): + SIMPLE = 0x0 + WRITABLE = 0x1 + FORMAT = 0x4 + ND = 0x8 + STRIDES = 0x10 | ND + C_CONTIGUOUS = 0x20 | STRIDES + F_CONTIGUOUS = 0x40 | STRIDES + ANY_CONTIGUOUS = 0x80 | STRIDES + INDIRECT = 0x100 | STRIDES + CONTIG = ND | WRITABLE + CONTIG_RO = ND + STRIDED = STRIDES | WRITABLE + STRIDED_RO = STRIDES + RECORDS = STRIDES | WRITABLE | FORMAT + RECORDS_RO = STRIDES | FORMAT + FULL = INDIRECT | WRITABLE | FORMAT + FULL_RO = INDIRECT | FORMAT + READ = 0x100 + WRITE = 0x200 + + +def _main(): + """ Logic for inspecting an object given at command line """ + import argparse + import importlib + + parser = argparse.ArgumentParser(color=True) + parser.add_argument( + 'object', + help="The object to be analysed. " + "It supports the 'module:qualname' syntax") + parser.add_argument( + '-d', '--details', action='store_true', + help='Display info about the module rather than its source code') + + args = parser.parse_args() + + target = args.object + mod_name, has_attrs, attrs = target.partition(":") + try: + obj = module = importlib.import_module(mod_name) + except Exception as exc: + msg = "Failed to import {} ({}: {})".format(mod_name, + type(exc).__name__, + exc) + print(msg, file=sys.stderr) + sys.exit(2) + + if has_attrs: + parts = attrs.split(".") + obj = module + for part in parts: + obj = getattr(obj, part) + + if module.__name__ in sys.builtin_module_names: + print("Can't get info for builtin modules.", file=sys.stderr) + sys.exit(1) + + if args.details: + print('Target: {}'.format(target)) + print('Origin: {}'.format(getsourcefile(module))) + print('Cached: {}'.format(module.__cached__)) + if obj is module: + print('Loader: {}'.format(repr(module.__loader__))) + if hasattr(module, '__path__'): + print('Submodule search path: {}'.format(module.__path__)) + else: + try: + __, lineno = findsource(obj) + except Exception: + pass + else: + print('Line: {}'.format(lineno)) + + print('\n') + else: + print(getsource(obj)) + + +if __name__ == "__main__": + _main() diff --git a/Python314_4_x64_Template/Lib/io.py b/Python314_4_x64_Template/Lib/io.py new file mode 100644 index 00000000..63ffadb1 --- /dev/null +++ b/Python314_4_x64_Template/Lib/io.py @@ -0,0 +1,150 @@ +"""The io module provides the Python interfaces to stream handling. The +builtin open function is defined in this module. + +At the top of the I/O hierarchy is the abstract base class IOBase. It +defines the basic interface to a stream. Note, however, that there is no +separation between reading and writing to streams; implementations are +allowed to raise an OSError if they do not support a given operation. + +Extending IOBase is RawIOBase which deals simply with the reading and +writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide +an interface to OS files. + +BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its +subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer +streams that are readable, writable, and both respectively. +BufferedRandom provides a buffered interface to random access +streams. BytesIO is a simple stream of in-memory bytes. + +Another IOBase subclass, TextIOBase, deals with the encoding and decoding +of streams into text. TextIOWrapper, which extends it, is a buffered text +interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO +is an in-memory stream for text. + +Argument names are not part of the specification, and only the arguments +of open() are intended to be used as keyword arguments. + +data: + +DEFAULT_BUFFER_SIZE + + An int containing the default buffer size used by the module's buffered + I/O classes. open() uses the file's blksize (as obtained by os.stat) if + possible. +""" +# New I/O library conforming to PEP 3116. + +__author__ = ("Guido van Rossum , " + "Mike Verdone , " + "Mark Russell , " + "Antoine Pitrou , " + "Amaury Forgeot d'Arc , " + "Benjamin Peterson ") + +__all__ = ["BlockingIOError", "open", "open_code", "IOBase", "RawIOBase", + "FileIO", "BytesIO", "StringIO", "BufferedIOBase", + "BufferedReader", "BufferedWriter", "BufferedRWPair", + "BufferedRandom", "TextIOBase", "TextIOWrapper", + "UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END", + "DEFAULT_BUFFER_SIZE", "text_encoding", "IncrementalNewlineDecoder", + "Reader", "Writer"] + + +import _io +import abc + +from _collections_abc import _check_methods +from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation, + open, open_code, FileIO, BytesIO, StringIO, BufferedReader, + BufferedWriter, BufferedRWPair, BufferedRandom, + IncrementalNewlineDecoder, text_encoding, TextIOWrapper) + + +# for seek() +SEEK_SET = 0 +SEEK_CUR = 1 +SEEK_END = 2 + +# Declaring ABCs in C is tricky so we do it here. +# Method descriptions and default implementations are inherited from the C +# version however. +class IOBase(_io._IOBase, metaclass=abc.ABCMeta): + __doc__ = _io._IOBase.__doc__ + +class RawIOBase(_io._RawIOBase, IOBase): + __doc__ = _io._RawIOBase.__doc__ + +class BufferedIOBase(_io._BufferedIOBase, IOBase): + __doc__ = _io._BufferedIOBase.__doc__ + +class TextIOBase(_io._TextIOBase, IOBase): + __doc__ = _io._TextIOBase.__doc__ + +RawIOBase.register(FileIO) + +for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom, + BufferedRWPair): + BufferedIOBase.register(klass) + +for klass in (StringIO, TextIOWrapper): + TextIOBase.register(klass) +del klass + +try: + from _io import _WindowsConsoleIO +except ImportError: + pass +else: + RawIOBase.register(_WindowsConsoleIO) + +# +# Static Typing Support +# + +GenericAlias = type(list[int]) + + +class Reader(metaclass=abc.ABCMeta): + """Protocol for simple I/O reader instances. + + This protocol only supports blocking I/O. + """ + + __slots__ = () + + @abc.abstractmethod + def read(self, size=..., /): + """Read data from the input stream and return it. + + If *size* is specified, at most *size* items (bytes/characters) will be + read. + """ + + @classmethod + def __subclasshook__(cls, C): + if cls is Reader: + return _check_methods(C, "read") + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) + + +class Writer(metaclass=abc.ABCMeta): + """Protocol for simple I/O writer instances. + + This protocol only supports blocking I/O. + """ + + __slots__ = () + + @abc.abstractmethod + def write(self, data, /): + """Write *data* to the output stream and return the number of items written.""" + + @classmethod + def __subclasshook__(cls, C): + if cls is Writer: + return _check_methods(C, "write") + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) diff --git a/Python314_4_x64_Template/Lib/ipaddress.py b/Python314_4_x64_Template/Lib/ipaddress.py new file mode 100644 index 00000000..ca732e4f --- /dev/null +++ b/Python314_4_x64_Template/Lib/ipaddress.py @@ -0,0 +1,2417 @@ +# Copyright 2007 Google Inc. +# Licensed to PSF under a Contributor Agreement. + +"""A fast, lightweight IPv4/IPv6 manipulation library in Python. + +This library is used to create/poke/manipulate IPv4 and IPv6 addresses +and networks. + +""" + +__version__ = '1.0' + + +import functools + +IPV4LENGTH = 32 +IPV6LENGTH = 128 + + +class AddressValueError(ValueError): + """A Value Error related to the address.""" + + +class NetmaskValueError(ValueError): + """A Value Error related to the netmask.""" + + +def ip_address(address): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP address. Either IPv4 or + IPv6 addresses may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Address or IPv6Address object. + + Raises: + ValueError: if the *address* passed isn't either a v4 or a v6 + address + + """ + try: + return IPv4Address(address) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Address(address) + except (AddressValueError, NetmaskValueError): + pass + + raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 address') + + +def ip_network(address, strict=True): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP network. Either IPv4 or + IPv6 networks may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Network or IPv6Network object. + + Raises: + ValueError: if the string passed isn't either a v4 or a v6 + address. Or if the network has host bits set. + + """ + try: + return IPv4Network(address, strict) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Network(address, strict) + except (AddressValueError, NetmaskValueError): + pass + + raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 network') + + +def ip_interface(address): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP address. Either IPv4 or + IPv6 addresses may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Interface or IPv6Interface object. + + Raises: + ValueError: if the string passed isn't either a v4 or a v6 + address. + + Notes: + The IPv?Interface classes describe an Address on a particular + Network, so they're basically a combination of both the Address + and Network classes. + + """ + try: + return IPv4Interface(address) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Interface(address) + except (AddressValueError, NetmaskValueError): + pass + + raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 interface') + + +def v4_int_to_packed(address): + """Represent an address as 4 packed bytes in network (big-endian) order. + + Args: + address: An integer representation of an IPv4 IP address. + + Returns: + The integer address packed as 4 bytes in network (big-endian) order. + + Raises: + ValueError: If the integer is negative or too large to be an + IPv4 IP address. + + """ + try: + return address.to_bytes(4) # big endian + except OverflowError: + raise ValueError("Address negative or too large for IPv4") + + +def v6_int_to_packed(address): + """Represent an address as 16 packed bytes in network (big-endian) order. + + Args: + address: An integer representation of an IPv6 IP address. + + Returns: + The integer address packed as 16 bytes in network (big-endian) order. + + """ + try: + return address.to_bytes(16) # big endian + except OverflowError: + raise ValueError("Address negative or too large for IPv6") + + +def _split_optional_netmask(address): + """Helper to split the netmask and raise AddressValueError if needed""" + addr = str(address).split('/') + if len(addr) > 2: + raise AddressValueError(f"Only one '/' permitted in {address!r}") + return addr + + +def _find_address_range(addresses): + """Find a sequence of sorted deduplicated IPv#Address. + + Args: + addresses: a list of IPv#Address objects. + + Yields: + A tuple containing the first and last IP addresses in the sequence. + + """ + it = iter(addresses) + first = last = next(it) + for ip in it: + if ip._ip != last._ip + 1: + yield first, last + first = ip + last = ip + yield first, last + + +def _count_righthand_zero_bits(number, bits): + """Count the number of zero bits on the right hand side. + + Args: + number: an integer. + bits: maximum number of bits to count. + + Returns: + The number of zero bits on the right hand side of the number. + + """ + if number == 0: + return bits + return min(bits, (~number & (number-1)).bit_length()) + + +def summarize_address_range(first, last): + """Summarize a network range given the first and last IP addresses. + + Example: + >>> list(summarize_address_range(IPv4Address('192.0.2.0'), + ... IPv4Address('192.0.2.130'))) + ... #doctest: +NORMALIZE_WHITESPACE + [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), + IPv4Network('192.0.2.130/32')] + + Args: + first: the first IPv4Address or IPv6Address in the range. + last: the last IPv4Address or IPv6Address in the range. + + Returns: + An iterator of the summarized IPv(4|6) network objects. + + Raise: + TypeError: + If the first and last objects are not IP addresses. + If the first and last objects are not the same version. + ValueError: + If the last object is not greater than the first. + If the version of the first address is not 4 or 6. + + """ + if (not (isinstance(first, _BaseAddress) and + isinstance(last, _BaseAddress))): + raise TypeError('first and last must be IP addresses, not networks') + if first.version != last.version: + raise TypeError("%s and %s are not of the same version" % ( + first, last)) + if first > last: + raise ValueError('last IP address must be greater than first') + + if first.version == 4: + ip = IPv4Network + elif first.version == 6: + ip = IPv6Network + else: + raise ValueError('unknown IP version') + + ip_bits = first.max_prefixlen + first_int = first._ip + last_int = last._ip + while first_int <= last_int: + nbits = min(_count_righthand_zero_bits(first_int, ip_bits), + (last_int - first_int + 1).bit_length() - 1) + net = ip((first_int, ip_bits - nbits)) + yield net + first_int += 1 << nbits + if first_int - 1 == ip._ALL_ONES: + break + + +def _collapse_addresses_internal(addresses): + """Loops through the addresses, collapsing concurrent netblocks. + + Example: + + ip1 = IPv4Network('192.0.2.0/26') + ip2 = IPv4Network('192.0.2.64/26') + ip3 = IPv4Network('192.0.2.128/26') + ip4 = IPv4Network('192.0.2.192/26') + + _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> + [IPv4Network('192.0.2.0/24')] + + This shouldn't be called directly; it is called via + collapse_addresses([]). + + Args: + addresses: A list of IPv4Network's or IPv6Network's + + Returns: + A list of IPv4Network's or IPv6Network's depending on what we were + passed. + + """ + # First merge + to_merge = list(addresses) + subnets = {} + while to_merge: + net = to_merge.pop() + supernet = net.supernet() + existing = subnets.get(supernet) + if existing is None: + subnets[supernet] = net + elif existing != net: + # Merge consecutive subnets + del subnets[supernet] + to_merge.append(supernet) + # Then iterate over resulting networks, skipping subsumed subnets + last = None + for net in sorted(subnets.values()): + if last is not None: + # Since they are sorted, last.network_address <= net.network_address + # is a given. + if last.broadcast_address >= net.broadcast_address: + continue + yield net + last = net + + +def collapse_addresses(addresses): + """Collapse a list of IP objects. + + Example: + collapse_addresses([IPv4Network('192.0.2.0/25'), + IPv4Network('192.0.2.128/25')]) -> + [IPv4Network('192.0.2.0/24')] + + Args: + addresses: An iterable of IPv4Network or IPv6Network objects. + + Returns: + An iterator of the collapsed IPv(4|6)Network objects. + + Raises: + TypeError: If passed a list of mixed version objects. + + """ + addrs = [] + ips = [] + nets = [] + + # split IP addresses and networks + for ip in addresses: + if isinstance(ip, _BaseAddress): + if ips and ips[-1].version != ip.version: + raise TypeError("%s and %s are not of the same version" % ( + ip, ips[-1])) + ips.append(ip) + elif ip._prefixlen == ip.max_prefixlen: + if ips and ips[-1].version != ip.version: + raise TypeError("%s and %s are not of the same version" % ( + ip, ips[-1])) + try: + ips.append(ip.ip) + except AttributeError: + ips.append(ip.network_address) + else: + if nets and nets[-1].version != ip.version: + raise TypeError("%s and %s are not of the same version" % ( + ip, nets[-1])) + nets.append(ip) + + # sort and dedup + ips = sorted(set(ips)) + + # find consecutive address ranges in the sorted sequence and summarize them + if ips: + for first, last in _find_address_range(ips): + addrs.extend(summarize_address_range(first, last)) + + return _collapse_addresses_internal(addrs + nets) + + +def get_mixed_type_key(obj): + """Return a key suitable for sorting between networks and addresses. + + Address and Network objects are not sortable by default; they're + fundamentally different so the expression + + IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') + + doesn't make any sense. There are some times however, where you may wish + to have ipaddress sort these for you anyway. If you need to do this, you + can use this function as the key= argument to sorted(). + + Args: + obj: either a Network or Address object. + Returns: + appropriate key. + + """ + if isinstance(obj, _BaseNetwork): + return obj._get_networks_key() + elif isinstance(obj, _BaseAddress): + return obj._get_address_key() + return NotImplemented + + +class _IPAddressBase: + + """The mother class.""" + + __slots__ = () + + @property + def exploded(self): + """Return the longhand version of the IP address as a string.""" + return self._explode_shorthand_ip_string() + + @property + def compressed(self): + """Return the shorthand version of the IP address as a string.""" + return str(self) + + @property + def reverse_pointer(self): + """The name of the reverse DNS pointer for the IP address, e.g.: + >>> ipaddress.ip_address("127.0.0.1").reverse_pointer + '1.0.0.127.in-addr.arpa' + >>> ipaddress.ip_address("2001:db8::1").reverse_pointer + '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' + + """ + return self._reverse_pointer() + + def _check_int_address(self, address): + if address < 0: + msg = "%d (< 0) is not permitted as an IPv%d address" + raise AddressValueError(msg % (address, self.version)) + if address > self._ALL_ONES: + msg = "%d (>= 2**%d) is not permitted as an IPv%d address" + raise AddressValueError(msg % (address, self.max_prefixlen, + self.version)) + + def _check_packed_address(self, address, expected_len): + address_len = len(address) + if address_len != expected_len: + msg = "%r (len %d != %d) is not permitted as an IPv%d address" + raise AddressValueError(msg % (address, address_len, + expected_len, self.version)) + + @classmethod + def _ip_int_from_prefix(cls, prefixlen): + """Turn the prefix length into a bitwise netmask + + Args: + prefixlen: An integer, the prefix length. + + Returns: + An integer. + + """ + return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen) + + @classmethod + def _prefix_from_ip_int(cls, ip_int): + """Return prefix length from the bitwise netmask. + + Args: + ip_int: An integer, the netmask in expanded bitwise format + + Returns: + An integer, the prefix length. + + Raises: + ValueError: If the input intermingles zeroes & ones + """ + trailing_zeroes = _count_righthand_zero_bits(ip_int, + cls.max_prefixlen) + prefixlen = cls.max_prefixlen - trailing_zeroes + leading_ones = ip_int >> trailing_zeroes + all_ones = (1 << prefixlen) - 1 + if leading_ones != all_ones: + byteslen = cls.max_prefixlen // 8 + details = ip_int.to_bytes(byteslen, 'big') + msg = 'Netmask pattern %r mixes zeroes & ones' + raise ValueError(msg % details) + return prefixlen + + @classmethod + def _report_invalid_netmask(cls, netmask_str): + msg = '%r is not a valid netmask' % netmask_str + raise NetmaskValueError(msg) from None + + @classmethod + def _prefix_from_prefix_string(cls, prefixlen_str): + """Return prefix length from a numeric string + + Args: + prefixlen_str: The string to be converted + + Returns: + An integer, the prefix length. + + Raises: + NetmaskValueError: If the input is not a valid netmask + """ + # int allows a leading +/- as well as surrounding whitespace, + # so we ensure that isn't the case + if not (prefixlen_str.isascii() and prefixlen_str.isdigit()): + cls._report_invalid_netmask(prefixlen_str) + try: + prefixlen = int(prefixlen_str) + except ValueError: + cls._report_invalid_netmask(prefixlen_str) + if not (0 <= prefixlen <= cls.max_prefixlen): + cls._report_invalid_netmask(prefixlen_str) + return prefixlen + + @classmethod + def _prefix_from_ip_string(cls, ip_str): + """Turn a netmask/hostmask string into a prefix length + + Args: + ip_str: The netmask/hostmask to be converted + + Returns: + An integer, the prefix length. + + Raises: + NetmaskValueError: If the input is not a valid netmask/hostmask + """ + # Parse the netmask/hostmask like an IP address. + try: + ip_int = cls._ip_int_from_string(ip_str) + except AddressValueError: + cls._report_invalid_netmask(ip_str) + + # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). + # Note that the two ambiguous cases (all-ones and all-zeroes) are + # treated as netmasks. + try: + return cls._prefix_from_ip_int(ip_int) + except ValueError: + pass + + # Invert the bits, and try matching a /0+1+/ hostmask instead. + ip_int ^= cls._ALL_ONES + try: + return cls._prefix_from_ip_int(ip_int) + except ValueError: + cls._report_invalid_netmask(ip_str) + + @classmethod + def _split_addr_prefix(cls, address): + """Helper function to parse address of Network/Interface. + + Arg: + address: Argument of Network/Interface. + + Returns: + (addr, prefix) tuple. + """ + # a packed address or integer + if isinstance(address, (bytes, int)): + return address, cls.max_prefixlen + + if not isinstance(address, tuple): + # Assume input argument to be string or any object representation + # which converts into a formatted IP prefix string. + address = _split_optional_netmask(address) + + # Constructing from a tuple (addr, [mask]) + if len(address) > 1: + return address + return address[0], cls.max_prefixlen + + def __reduce__(self): + return self.__class__, (str(self),) + + +_address_fmt_re = None + +@functools.total_ordering +class _BaseAddress(_IPAddressBase): + + """A generic IP object. + + This IP class contains the version independent methods which are + used by single IP addresses. + """ + + __slots__ = () + + def __int__(self): + return self._ip + + def __eq__(self, other): + try: + return (self._ip == other._ip + and self.version == other.version) + except AttributeError: + return NotImplemented + + def __lt__(self, other): + if not isinstance(other, _BaseAddress): + return NotImplemented + if self.version != other.version: + raise TypeError('%s and %s are not of the same version' % ( + self, other)) + if self._ip != other._ip: + return self._ip < other._ip + return False + + # Shorthand for Integer addition and subtraction. This is not + # meant to ever support addition/subtraction of addresses. + def __add__(self, other): + if not isinstance(other, int): + return NotImplemented + return self.__class__(int(self) + other) + + def __sub__(self, other): + if not isinstance(other, int): + return NotImplemented + return self.__class__(int(self) - other) + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, str(self)) + + def __str__(self): + return str(self._string_from_ip_int(self._ip)) + + def __hash__(self): + return hash(hex(int(self._ip))) + + def _get_address_key(self): + return (self.version, self) + + def __reduce__(self): + return self.__class__, (self._ip,) + + def __format__(self, fmt): + """Returns an IP address as a formatted string. + + Supported presentation types are: + 's': returns the IP address as a string (default) + 'b': converts to binary and returns a zero-padded string + 'X' or 'x': converts to upper- or lower-case hex and returns a zero-padded string + 'n': the same as 'b' for IPv4 and 'x' for IPv6 + + For binary and hex presentation types, the alternate form specifier + '#' and the grouping option '_' are supported. + """ + + # Support string formatting + if not fmt or fmt[-1] == 's': + return format(str(self), fmt) + + # From here on down, support for 'bnXx' + global _address_fmt_re + if _address_fmt_re is None: + import re + _address_fmt_re = re.compile('(#?)(_?)([xbnX])') + + m = _address_fmt_re.fullmatch(fmt) + if not m: + return super().__format__(fmt) + + alternate, grouping, fmt_base = m.groups() + + # Set some defaults + if fmt_base == 'n': + if self.version == 4: + fmt_base = 'b' # Binary is default for ipv4 + else: + fmt_base = 'x' # Hex is default for ipv6 + + if fmt_base == 'b': + padlen = self.max_prefixlen + else: + padlen = self.max_prefixlen // 4 + + if grouping: + padlen += padlen // 4 - 1 + + if alternate: + padlen += 2 # 0b or 0x + + return format(int(self), f'{alternate}0{padlen}{grouping}{fmt_base}') + + +@functools.total_ordering +class _BaseNetwork(_IPAddressBase): + """A generic IP network object. + + This IP class contains the version independent methods which are + used by networks. + """ + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, str(self)) + + def __str__(self): + return '%s/%d' % (self.network_address, self.prefixlen) + + def hosts(self): + """Generate Iterator over usable hosts in a network. + + This is like __iter__ except it doesn't return the network + or broadcast addresses. + + """ + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in range(network + 1, broadcast): + yield self._address_class(x) + + def __iter__(self): + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in range(network, broadcast + 1): + yield self._address_class(x) + + def __getitem__(self, n): + network = int(self.network_address) + broadcast = int(self.broadcast_address) + if n >= 0: + if network + n > broadcast: + raise IndexError('address out of range') + return self._address_class(network + n) + else: + n += 1 + if broadcast + n < network: + raise IndexError('address out of range') + return self._address_class(broadcast + n) + + def __lt__(self, other): + if not isinstance(other, _BaseNetwork): + return NotImplemented + if self.version != other.version: + raise TypeError('%s and %s are not of the same version' % ( + self, other)) + if self.network_address != other.network_address: + return self.network_address < other.network_address + if self.netmask != other.netmask: + return self.netmask < other.netmask + return False + + def __eq__(self, other): + try: + return (self.version == other.version and + self.network_address == other.network_address and + int(self.netmask) == int(other.netmask)) + except AttributeError: + return NotImplemented + + def __hash__(self): + return hash((int(self.network_address), int(self.netmask))) + + def __contains__(self, other): + # always false if one is v4 and the other is v6. + if self.version != other.version: + return False + # dealing with another network. + if isinstance(other, _BaseNetwork): + return False + # dealing with another address + else: + # address + return other._ip & self.netmask._ip == self.network_address._ip + + def overlaps(self, other): + """Tell if self is partly contained in other.""" + return self.network_address in other or ( + self.broadcast_address in other or ( + other.network_address in self or ( + other.broadcast_address in self))) + + @functools.cached_property + def broadcast_address(self): + return self._address_class(int(self.network_address) | + int(self.hostmask)) + + @functools.cached_property + def hostmask(self): + return self._address_class(int(self.netmask) ^ self._ALL_ONES) + + @property + def with_prefixlen(self): + return '%s/%d' % (self.network_address, self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self.network_address, self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self.network_address, self.hostmask) + + @property + def num_addresses(self): + """Number of hosts in the current subnet.""" + return int(self.broadcast_address) - int(self.network_address) + 1 + + @property + def _address_class(self): + # Returning bare address objects (rather than interfaces) allows for + # more consistent behaviour across the network address, broadcast + # address and individual host addresses. + msg = '%200s has no associated address class' % (type(self),) + raise NotImplementedError(msg) + + @property + def prefixlen(self): + return self._prefixlen + + def address_exclude(self, other): + """Remove an address from a larger block. + + For example: + + addr1 = ip_network('192.0.2.0/28') + addr2 = ip_network('192.0.2.1/32') + list(addr1.address_exclude(addr2)) = + [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), + IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] + + or IPv6: + + addr1 = ip_network('2001:db8::1/32') + addr2 = ip_network('2001:db8::1/128') + list(addr1.address_exclude(addr2)) = + [ip_network('2001:db8::1/128'), + ip_network('2001:db8::2/127'), + ip_network('2001:db8::4/126'), + ip_network('2001:db8::8/125'), + ... + ip_network('2001:db8:8000::/33')] + + Args: + other: An IPv4Network or IPv6Network object of the same type. + + Returns: + An iterator of the IPv(4|6)Network objects which is self + minus other. + + Raises: + TypeError: If self and other are of differing address + versions, or if other is not a network object. + ValueError: If other is not completely contained by self. + + """ + if not self.version == other.version: + raise TypeError("%s and %s are not of the same version" % ( + self, other)) + + if not isinstance(other, _BaseNetwork): + raise TypeError("%s is not a network object" % other) + + if not other.subnet_of(self): + raise ValueError('%s not contained in %s' % (other, self)) + if other == self: + return + + # Make sure we're comparing the network of other. + other = other.__class__('%s/%s' % (other.network_address, + other.prefixlen)) + + s1, s2 = self.subnets() + while s1 != other and s2 != other: + if other.subnet_of(s1): + yield s2 + s1, s2 = s1.subnets() + elif other.subnet_of(s2): + yield s1 + s1, s2 = s2.subnets() + else: + # If we got here, there's a bug somewhere. + raise AssertionError('Error performing exclusion: ' + 's1: %s s2: %s other: %s' % + (s1, s2, other)) + if s1 == other: + yield s2 + elif s2 == other: + yield s1 + else: + # If we got here, there's a bug somewhere. + raise AssertionError('Error performing exclusion: ' + 's1: %s s2: %s other: %s' % + (s1, s2, other)) + + def compare_networks(self, other): + """Compare two IP objects. + + This is only concerned about the comparison of the integer + representation of the network addresses. This means that the + host bits aren't considered at all in this method. If you want + to compare host bits, you can easily enough do a + 'HostA._ip < HostB._ip' + + Args: + other: An IP object. + + Returns: + If the IP versions of self and other are the same, returns: + + -1 if self < other: + eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') + IPv6Network('2001:db8::1000/124') < + IPv6Network('2001:db8::2000/124') + 0 if self == other + eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') + IPv6Network('2001:db8::1000/124') == + IPv6Network('2001:db8::1000/124') + 1 if self > other + eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') + IPv6Network('2001:db8::2000/124') > + IPv6Network('2001:db8::1000/124') + + Raises: + TypeError if the IP versions are different. + + """ + # does this need to raise a ValueError? + if self.version != other.version: + raise TypeError('%s and %s are not of the same type' % ( + self, other)) + # self.version == other.version below here: + if self.network_address < other.network_address: + return -1 + if self.network_address > other.network_address: + return 1 + # self.network_address == other.network_address below here: + if self.netmask < other.netmask: + return -1 + if self.netmask > other.netmask: + return 1 + return 0 + + def _get_networks_key(self): + """Network-only key function. + + Returns an object that identifies this address' network and + netmask. This function is a suitable "key" argument for sorted() + and list.sort(). + + """ + return (self.version, self.network_address, self.netmask) + + def subnets(self, prefixlen_diff=1, new_prefix=None): + """The subnets which join to make the current subnet. + + In the case that self contains only one IP + (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 + for IPv6), yield an iterator with just ourself. + + Args: + prefixlen_diff: An integer, the amount the prefix length + should be increased by. This should not be set if + new_prefix is also set. + new_prefix: The desired new prefix length. This must be a + larger number (smaller prefix) than the existing prefix. + This should not be set if prefixlen_diff is also set. + + Returns: + An iterator of IPv(4|6) objects. + + Raises: + ValueError: The prefixlen_diff is too small or too large. + OR + prefixlen_diff and new_prefix are both set or new_prefix + is a smaller number than the current prefix (smaller + number means a larger network) + + """ + if self._prefixlen == self.max_prefixlen: + yield self + return + + if new_prefix is not None: + if new_prefix < self._prefixlen: + raise ValueError('new prefix must be longer') + if prefixlen_diff != 1: + raise ValueError('cannot set prefixlen_diff and new_prefix') + prefixlen_diff = new_prefix - self._prefixlen + + if prefixlen_diff < 0: + raise ValueError('prefix length diff must be > 0') + new_prefixlen = self._prefixlen + prefixlen_diff + + if new_prefixlen > self.max_prefixlen: + raise ValueError( + 'prefix length diff %d is invalid for netblock %s' % ( + new_prefixlen, self)) + + start = int(self.network_address) + end = int(self.broadcast_address) + 1 + step = (int(self.hostmask) + 1) >> prefixlen_diff + for new_addr in range(start, end, step): + current = self.__class__((new_addr, new_prefixlen)) + yield current + + def supernet(self, prefixlen_diff=1, new_prefix=None): + """The supernet containing the current network. + + Args: + prefixlen_diff: An integer, the amount the prefix length of + the network should be decreased by. For example, given a + /24 network and a prefixlen_diff of 3, a supernet with a + /21 netmask is returned. + + Returns: + An IPv4 network object. + + Raises: + ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have + a negative prefix length. + OR + If prefixlen_diff and new_prefix are both set or new_prefix is a + larger number than the current prefix (larger number means a + smaller network) + + """ + if self._prefixlen == 0: + return self + + if new_prefix is not None: + if new_prefix > self._prefixlen: + raise ValueError('new prefix must be shorter') + if prefixlen_diff != 1: + raise ValueError('cannot set prefixlen_diff and new_prefix') + prefixlen_diff = self._prefixlen - new_prefix + + new_prefixlen = self.prefixlen - prefixlen_diff + if new_prefixlen < 0: + raise ValueError( + 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % + (self.prefixlen, prefixlen_diff)) + return self.__class__(( + int(self.network_address) & (int(self.netmask) << prefixlen_diff), + new_prefixlen + )) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is a multicast address. + See RFC 2373 2.7 for details. + + """ + return (self.network_address.is_multicast and + self.broadcast_address.is_multicast) + + @staticmethod + def _is_subnet_of(a, b): + try: + # Always false if one is v4 and the other is v6. + if a.version != b.version: + raise TypeError(f"{a} and {b} are not of the same version") + return (b.network_address <= a.network_address and + b.broadcast_address >= a.broadcast_address) + except AttributeError: + raise TypeError(f"Unable to test subnet containment " + f"between {a} and {b}") + + def subnet_of(self, other): + """Return True if this network is a subnet of other.""" + return self._is_subnet_of(self, other) + + def supernet_of(self, other): + """Return True if this network is a supernet of other.""" + return self._is_subnet_of(other, self) + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within one of the + reserved IPv6 Network ranges. + + """ + return (self.network_address.is_reserved and + self.broadcast_address.is_reserved) + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is reserved per RFC 4291. + + """ + return (self.network_address.is_link_local and + self.broadcast_address.is_link_local) + + @property + def is_private(self): + """Test if this network belongs to a private range. + + Returns: + A boolean, True if the network is reserved per + iana-ipv4-special-registry or iana-ipv6-special-registry. + + """ + return any(self.network_address in priv_network and + self.broadcast_address in priv_network + for priv_network in self._constants._private_networks) and all( + self.network_address not in network and + self.broadcast_address not in network + for network in self._constants._private_networks_exceptions + ) + + @property + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, True if the address is not reserved per + iana-ipv4-special-registry or iana-ipv6-special-registry. + + """ + return not self.is_private + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 2373 2.5.2. + + """ + return (self.network_address.is_unspecified and + self.broadcast_address.is_unspecified) + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback address as defined in + RFC 2373 2.5.3. + + """ + return (self.network_address.is_loopback and + self.broadcast_address.is_loopback) + + +class _BaseConstants: + + _private_networks = [] + + +_BaseNetwork._constants = _BaseConstants + + +class _BaseV4: + + """Base IPv4 object. + + The following methods are used by IPv4 objects in both single IP + addresses and networks. + + """ + + __slots__ = () + version = 4 + # Equivalent to 255.255.255.255 or 32 bits of 1's. + _ALL_ONES = (2**IPV4LENGTH) - 1 + + max_prefixlen = IPV4LENGTH + # There are only a handful of valid v4 netmasks, so we cache them all + # when constructed (see _make_netmask()). + _netmask_cache = {} + + def _explode_shorthand_ip_string(self): + return str(self) + + @classmethod + def _make_netmask(cls, arg): + """Make a (netmask, prefix_len) tuple from the given argument. + + Argument can be: + - an integer (the prefix length) + - a string representing the prefix length (e.g. "24") + - a string representing the prefix netmask (e.g. "255.255.255.0") + """ + if arg not in cls._netmask_cache: + if isinstance(arg, int): + prefixlen = arg + if not (0 <= prefixlen <= cls.max_prefixlen): + cls._report_invalid_netmask(prefixlen) + else: + try: + # Check for a netmask in prefix length form + prefixlen = cls._prefix_from_prefix_string(arg) + except NetmaskValueError: + # Check for a netmask or hostmask in dotted-quad form. + # This may raise NetmaskValueError. + prefixlen = cls._prefix_from_ip_string(arg) + netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen)) + cls._netmask_cache[arg] = netmask, prefixlen + return cls._netmask_cache[arg] + + @classmethod + def _ip_int_from_string(cls, ip_str): + """Turn the given IP string into an integer for comparison. + + Args: + ip_str: A string, the IP ip_str. + + Returns: + The IP ip_str as an integer. + + Raises: + AddressValueError: if ip_str isn't a valid IPv4 Address. + + """ + if not ip_str: + raise AddressValueError('Address cannot be empty') + + octets = ip_str.split('.') + if len(octets) != 4: + raise AddressValueError("Expected 4 octets in %r" % ip_str) + + try: + return int.from_bytes(map(cls._parse_octet, octets), 'big') + except ValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) from None + + @classmethod + def _parse_octet(cls, octet_str): + """Convert a decimal octet into an integer. + + Args: + octet_str: A string, the number to parse. + + Returns: + The octet as an integer. + + Raises: + ValueError: if the octet isn't strictly a decimal from [0..255]. + + """ + if not octet_str: + raise ValueError("Empty octet not permitted") + # Reject non-ASCII digits. + if not (octet_str.isascii() and octet_str.isdigit()): + msg = "Only decimal digits permitted in %r" + raise ValueError(msg % octet_str) + # We do the length check second, since the invalid character error + # is likely to be more informative for the user + if len(octet_str) > 3: + msg = "At most 3 characters permitted in %r" + raise ValueError(msg % octet_str) + # Handle leading zeros as strict as glibc's inet_pton() + # See security bug bpo-36384 + if octet_str != '0' and octet_str[0] == '0': + msg = "Leading zeros are not permitted in %r" + raise ValueError(msg % octet_str) + # Convert to integer (we know digits are legal) + octet_int = int(octet_str, 10) + if octet_int > 255: + raise ValueError("Octet %d (> 255) not permitted" % octet_int) + return octet_int + + @classmethod + def _string_from_ip_int(cls, ip_int): + """Turns a 32-bit integer into dotted decimal notation. + + Args: + ip_int: An integer, the IP address. + + Returns: + The IP address as a string in dotted decimal notation. + + """ + return '.'.join(map(str, ip_int.to_bytes(4, 'big'))) + + def _reverse_pointer(self): + """Return the reverse DNS pointer name for the IPv4 address. + + This implements the method described in RFC1035 3.5. + + """ + reverse_octets = str(self).split('.')[::-1] + return '.'.join(reverse_octets) + '.in-addr.arpa' + +class IPv4Address(_BaseV4, _BaseAddress): + + """Represent and manipulate single IPv4 Addresses.""" + + __slots__ = ('_ip', '__weakref__') + + def __init__(self, address): + + """ + Args: + address: A string or integer representing the IP + + Additionally, an integer can be passed, so + IPv4Address('192.0.2.1') == IPv4Address(3221225985). + or, more generally + IPv4Address(int(IPv4Address('192.0.2.1'))) == + IPv4Address('192.0.2.1') + + Raises: + AddressValueError: If ipaddress isn't a valid IPv4 address. + + """ + # Efficient constructor from integer. + if isinstance(address, int): + self._check_int_address(address) + self._ip = address + return + + # Constructing from a packed address + if isinstance(address, bytes): + self._check_packed_address(address, 4) + self._ip = int.from_bytes(address) # big endian + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP string. + addr_str = str(address) + if '/' in addr_str: + raise AddressValueError(f"Unexpected '/' in {address!r}") + self._ip = self._ip_int_from_string(addr_str) + + @property + def packed(self): + """The binary representation of this address.""" + return v4_int_to_packed(self._ip) + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within the + reserved IPv4 Network range. + + """ + return self in self._constants._reserved_network + + @property + @functools.lru_cache() + def is_private(self): + """``True`` if the address is defined as not globally reachable by + iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ + (for IPv6) with the following exceptions: + + * ``is_private`` is ``False`` for ``100.64.0.0/10`` + * For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the + semantics of the underlying IPv4 addresses and the following condition holds + (see :attr:`IPv6Address.ipv4_mapped`):: + + address.is_private == address.ipv4_mapped.is_private + + ``is_private`` has value opposite to :attr:`is_global`, except for the ``100.64.0.0/10`` + IPv4 range where they are both ``False``. + """ + return ( + any(self in net for net in self._constants._private_networks) + and all(self not in net for net in self._constants._private_networks_exceptions) + ) + + @property + @functools.lru_cache() + def is_global(self): + """``True`` if the address is defined as globally reachable by + iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ + (for IPv6) with the following exception: + + For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the + semantics of the underlying IPv4 addresses and the following condition holds + (see :attr:`IPv6Address.ipv4_mapped`):: + + address.is_global == address.ipv4_mapped.is_global + + ``is_global`` has value opposite to :attr:`is_private`, except for the ``100.64.0.0/10`` + IPv4 range where they are both ``False``. + """ + return self not in self._constants._public_network and not self.is_private + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is multicast. + See RFC 3171 for details. + + """ + return self in self._constants._multicast_network + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 5735 3. + + """ + return self == self._constants._unspecified_address + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback per RFC 3330. + + """ + return self in self._constants._loopback_network + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is link-local per RFC 3927. + + """ + return self in self._constants._linklocal_network + + @property + def ipv6_mapped(self): + """Return the IPv4-mapped IPv6 address. + + Returns: + The IPv4-mapped IPv6 address per RFC 4291. + + """ + return IPv6Address(f'::ffff:{self}') + + +class IPv4Interface(IPv4Address): + + def __init__(self, address): + addr, mask = self._split_addr_prefix(address) + + IPv4Address.__init__(self, addr) + self.network = IPv4Network((addr, mask), strict=False) + self.netmask = self.network.netmask + self._prefixlen = self.network._prefixlen + + @functools.cached_property + def hostmask(self): + return self.network.hostmask + + def __str__(self): + return '%s/%d' % (self._string_from_ip_int(self._ip), + self._prefixlen) + + def __eq__(self, other): + address_equal = IPv4Address.__eq__(self, other) + if address_equal is NotImplemented or not address_equal: + return address_equal + try: + return self.network == other.network + except AttributeError: + # An interface with an associated network is NOT the + # same as an unassociated address. That's why the hash + # takes the extra info into account. + return False + + def __lt__(self, other): + address_less = IPv4Address.__lt__(self, other) + if address_less is NotImplemented: + return NotImplemented + try: + return (self.network < other.network or + self.network == other.network and address_less) + except AttributeError: + # We *do* allow addresses and interfaces to be sorted. The + # unassociated address is considered less than all interfaces. + return False + + def __hash__(self): + return hash((self._ip, self._prefixlen, int(self.network.network_address))) + + __reduce__ = _IPAddressBase.__reduce__ + + @property + def ip(self): + return IPv4Address(self._ip) + + @property + def with_prefixlen(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.hostmask) + + +class IPv4Network(_BaseV4, _BaseNetwork): + + """This class represents and manipulates 32-bit IPv4 network + addresses.. + + Attributes: [examples for IPv4Network('192.0.2.0/27')] + .network_address: IPv4Address('192.0.2.0') + .hostmask: IPv4Address('0.0.0.31') + .broadcast_address: IPv4Address('192.0.2.32') + .netmask: IPv4Address('255.255.255.224') + .prefixlen: 27 + + """ + # Class to use when creating address objects + _address_class = IPv4Address + + def __init__(self, address, strict=True): + """Instantiate a new IPv4 network object. + + Args: + address: A string or integer representing the IP [& network]. + '192.0.2.0/24' + '192.0.2.0/255.255.255.0' + '192.0.2.0/0.0.0.255' + are all functionally the same in IPv4. Similarly, + '192.0.2.1' + '192.0.2.1/255.255.255.255' + '192.0.2.1/32' + are also functionally equivalent. That is to say, failing to + provide a subnetmask will create an object with a mask of /32. + + If the mask (portion after the / in the argument) is given in + dotted quad form, it is treated as a netmask if it starts with a + non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it + starts with a zero field (e.g. 0.255.255.255 == /8), with the + single exception of an all-zero mask which is treated as a + netmask == /0. If no mask is given, a default of /32 is used. + + Additionally, an integer can be passed, so + IPv4Network('192.0.2.1') == IPv4Network(3221225985) + or, more generally + IPv4Interface(int(IPv4Interface('192.0.2.1'))) == + IPv4Interface('192.0.2.1') + + Raises: + AddressValueError: If ipaddress isn't a valid IPv4 address. + NetmaskValueError: If the netmask isn't valid for + an IPv4 address. + ValueError: If strict is True and a network address is not + supplied. + """ + addr, mask = self._split_addr_prefix(address) + + self.network_address = IPv4Address(addr) + self.netmask, self._prefixlen = self._make_netmask(mask) + packed = int(self.network_address) + if packed & int(self.netmask) != packed: + if strict: + raise ValueError('%s has host bits set' % self) + else: + self.network_address = IPv4Address(packed & + int(self.netmask)) + + if self._prefixlen == (self.max_prefixlen - 1): + self.hosts = self.__iter__ + elif self._prefixlen == (self.max_prefixlen): + self.hosts = lambda: iter((IPv4Address(addr),)) + + @property + @functools.lru_cache() + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, True if the address is not reserved per + iana-ipv4-special-registry. + + """ + return (not (self.network_address in IPv4Network('100.64.0.0/10') and + self.broadcast_address in IPv4Network('100.64.0.0/10')) and + not self.is_private) + + +class _IPv4Constants: + _linklocal_network = IPv4Network('169.254.0.0/16') + + _loopback_network = IPv4Network('127.0.0.0/8') + + _multicast_network = IPv4Network('224.0.0.0/4') + + _public_network = IPv4Network('100.64.0.0/10') + + # Not globally reachable address blocks listed on + # https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml + _private_networks = [ + IPv4Network('0.0.0.0/8'), + IPv4Network('10.0.0.0/8'), + IPv4Network('127.0.0.0/8'), + IPv4Network('169.254.0.0/16'), + IPv4Network('172.16.0.0/12'), + IPv4Network('192.0.0.0/24'), + IPv4Network('192.0.0.170/31'), + IPv4Network('192.0.2.0/24'), + IPv4Network('192.168.0.0/16'), + IPv4Network('198.18.0.0/15'), + IPv4Network('198.51.100.0/24'), + IPv4Network('203.0.113.0/24'), + IPv4Network('240.0.0.0/4'), + IPv4Network('255.255.255.255/32'), + ] + + _private_networks_exceptions = [ + IPv4Network('192.0.0.9/32'), + IPv4Network('192.0.0.10/32'), + ] + + _reserved_network = IPv4Network('240.0.0.0/4') + + _unspecified_address = IPv4Address('0.0.0.0') + + +IPv4Address._constants = _IPv4Constants +IPv4Network._constants = _IPv4Constants + + +class _BaseV6: + + """Base IPv6 object. + + The following methods are used by IPv6 objects in both single IP + addresses and networks. + + """ + + __slots__ = () + version = 6 + _ALL_ONES = (2**IPV6LENGTH) - 1 + _HEXTET_COUNT = 8 + _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef') + max_prefixlen = IPV6LENGTH + + # There are only a bunch of valid v6 netmasks, so we cache them all + # when constructed (see _make_netmask()). + _netmask_cache = {} + + @classmethod + def _make_netmask(cls, arg): + """Make a (netmask, prefix_len) tuple from the given argument. + + Argument can be: + - an integer (the prefix length) + - a string representing the prefix length (e.g. "24") + - a string representing the prefix netmask (e.g. "255.255.255.0") + """ + if arg not in cls._netmask_cache: + if isinstance(arg, int): + prefixlen = arg + if not (0 <= prefixlen <= cls.max_prefixlen): + cls._report_invalid_netmask(prefixlen) + else: + prefixlen = cls._prefix_from_prefix_string(arg) + netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen)) + cls._netmask_cache[arg] = netmask, prefixlen + return cls._netmask_cache[arg] + + @classmethod + def _ip_int_from_string(cls, ip_str): + """Turn an IPv6 ip_str into an integer. + + Args: + ip_str: A string, the IPv6 ip_str. + + Returns: + An int, the IPv6 address + + Raises: + AddressValueError: if ip_str isn't a valid IPv6 Address. + + """ + if not ip_str: + raise AddressValueError('Address cannot be empty') + if len(ip_str) > 45: + shorten = ip_str + if len(shorten) > 100: + shorten = f'{ip_str[:45]}({len(ip_str)-90} chars elided){ip_str[-45:]}' + raise AddressValueError(f"At most 45 characters expected in " + f"{shorten!r}") + + # We want to allow more parts than the max to be 'split' + # to preserve the correct error message when there are + # too many parts combined with '::' + _max_parts = cls._HEXTET_COUNT + 1 + parts = ip_str.split(':', maxsplit=_max_parts) + + # An IPv6 address needs at least 2 colons (3 parts). + _min_parts = 3 + if len(parts) < _min_parts: + msg = "At least %d parts expected in %r" % (_min_parts, ip_str) + raise AddressValueError(msg) + + # If the address has an IPv4-style suffix, convert it to hexadecimal. + if '.' in parts[-1]: + try: + ipv4_int = IPv4Address(parts.pop())._ip + except AddressValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) from None + parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) + parts.append('%x' % (ipv4_int & 0xFFFF)) + + # An IPv6 address can't have more than 8 colons (9 parts). + # The extra colon comes from using the "::" notation for a single + # leading or trailing zero part. + if len(parts) > _max_parts: + msg = "At most %d colons permitted in %r" % (_max_parts-1, ip_str) + raise AddressValueError(msg) + + # Disregarding the endpoints, find '::' with nothing in between. + # This indicates that a run of zeroes has been skipped. + skip_index = None + for i in range(1, len(parts) - 1): + if not parts[i]: + if skip_index is not None: + # Can't have more than one '::' + msg = "At most one '::' permitted in %r" % ip_str + raise AddressValueError(msg) + skip_index = i + + # parts_hi is the number of parts to copy from above/before the '::' + # parts_lo is the number of parts to copy from below/after the '::' + if skip_index is not None: + # If we found a '::', then check if it also covers the endpoints. + parts_hi = skip_index + parts_lo = len(parts) - skip_index - 1 + if not parts[0]: + parts_hi -= 1 + if parts_hi: + msg = "Leading ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # ^: requires ^:: + if not parts[-1]: + parts_lo -= 1 + if parts_lo: + msg = "Trailing ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # :$ requires ::$ + parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) + if parts_skipped < 1: + msg = "Expected at most %d other parts with '::' in %r" + raise AddressValueError(msg % (cls._HEXTET_COUNT-1, ip_str)) + else: + # Otherwise, allocate the entire address to parts_hi. The + # endpoints could still be empty, but _parse_hextet() will check + # for that. + if len(parts) != cls._HEXTET_COUNT: + msg = "Exactly %d parts expected without '::' in %r" + raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) + if not parts[0]: + msg = "Leading ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # ^: requires ^:: + if not parts[-1]: + msg = "Trailing ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # :$ requires ::$ + parts_hi = len(parts) + parts_lo = 0 + parts_skipped = 0 + + try: + # Now, parse the hextets into a 128-bit integer. + ip_int = 0 + for i in range(parts_hi): + ip_int <<= 16 + ip_int |= cls._parse_hextet(parts[i]) + ip_int <<= 16 * parts_skipped + for i in range(-parts_lo, 0): + ip_int <<= 16 + ip_int |= cls._parse_hextet(parts[i]) + return ip_int + except ValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) from None + + @classmethod + def _parse_hextet(cls, hextet_str): + """Convert an IPv6 hextet string into an integer. + + Args: + hextet_str: A string, the number to parse. + + Returns: + The hextet as an integer. + + Raises: + ValueError: if the input isn't strictly a hex number from + [0..FFFF]. + + """ + # Reject non-ASCII digits. + if not cls._HEX_DIGITS.issuperset(hextet_str): + raise ValueError("Only hex digits permitted in %r" % hextet_str) + # We do the length check second, since the invalid character error + # is likely to be more informative for the user + if len(hextet_str) > 4: + msg = "At most 4 characters permitted in %r" + raise ValueError(msg % hextet_str) + # Length check means we can skip checking the integer value + return int(hextet_str, 16) + + @classmethod + def _compress_hextets(cls, hextets): + """Compresses a list of hextets. + + Compresses a list of strings, replacing the longest continuous + sequence of "0" in the list with "" and adding empty strings at + the beginning or at the end of the string such that subsequently + calling ":".join(hextets) will produce the compressed version of + the IPv6 address. + + Args: + hextets: A list of strings, the hextets to compress. + + Returns: + A list of strings. + + """ + best_doublecolon_start = -1 + best_doublecolon_len = 0 + doublecolon_start = -1 + doublecolon_len = 0 + for index, hextet in enumerate(hextets): + if hextet == '0': + doublecolon_len += 1 + if doublecolon_start == -1: + # Start of a sequence of zeros. + doublecolon_start = index + if doublecolon_len > best_doublecolon_len: + # This is the longest sequence of zeros so far. + best_doublecolon_len = doublecolon_len + best_doublecolon_start = doublecolon_start + else: + doublecolon_len = 0 + doublecolon_start = -1 + + if best_doublecolon_len > 1: + best_doublecolon_end = (best_doublecolon_start + + best_doublecolon_len) + # For zeros at the end of the address. + if best_doublecolon_end == len(hextets): + hextets += [''] + hextets[best_doublecolon_start:best_doublecolon_end] = [''] + # For zeros at the beginning of the address. + if best_doublecolon_start == 0: + hextets = [''] + hextets + + return hextets + + @classmethod + def _string_from_ip_int(cls, ip_int=None): + """Turns a 128-bit integer into hexadecimal notation. + + Args: + ip_int: An integer, the IP address. + + Returns: + A string, the hexadecimal representation of the address. + + Raises: + ValueError: The address is bigger than 128 bits of all ones. + + """ + if ip_int is None: + ip_int = int(cls._ip) + + if ip_int > cls._ALL_ONES: + raise ValueError('IPv6 address is too large') + + hex_str = '%032x' % ip_int + hextets = ['%x' % int(hex_str[x:x+4], 16) for x in range(0, 32, 4)] + + hextets = cls._compress_hextets(hextets) + return ':'.join(hextets) + + def _explode_shorthand_ip_string(self): + """Expand a shortened IPv6 address. + + Returns: + A string, the expanded IPv6 address. + + """ + if isinstance(self, IPv6Network): + ip_str = str(self.network_address) + elif isinstance(self, IPv6Interface): + ip_str = str(self.ip) + else: + ip_str = str(self) + + ip_int = self._ip_int_from_string(ip_str) + hex_str = '%032x' % ip_int + parts = [hex_str[x:x+4] for x in range(0, 32, 4)] + if isinstance(self, (_BaseNetwork, IPv6Interface)): + return '%s/%d' % (':'.join(parts), self._prefixlen) + return ':'.join(parts) + + def _reverse_pointer(self): + """Return the reverse DNS pointer name for the IPv6 address. + + This implements the method described in RFC3596 2.5. + + """ + reverse_chars = self.exploded[::-1].replace(':', '') + return '.'.join(reverse_chars) + '.ip6.arpa' + + @staticmethod + def _split_scope_id(ip_str): + """Helper function to parse IPv6 string address with scope id. + + See RFC 4007 for details. + + Args: + ip_str: A string, the IPv6 address. + + Returns: + (addr, scope_id) tuple. + + """ + addr, sep, scope_id = ip_str.partition('%') + if not sep: + scope_id = None + elif not scope_id or '%' in scope_id: + raise AddressValueError('Invalid IPv6 address: "%r"' % ip_str) + return addr, scope_id + +class IPv6Address(_BaseV6, _BaseAddress): + + """Represent and manipulate single IPv6 Addresses.""" + + __slots__ = ('_ip', '_scope_id', '__weakref__') + + def __init__(self, address): + """Instantiate a new IPv6 address object. + + Args: + address: A string or integer representing the IP + + Additionally, an integer can be passed, so + IPv6Address('2001:db8::') == + IPv6Address(42540766411282592856903984951653826560) + or, more generally + IPv6Address(int(IPv6Address('2001:db8::'))) == + IPv6Address('2001:db8::') + + Raises: + AddressValueError: If address isn't a valid IPv6 address. + + """ + # Efficient constructor from integer. + if isinstance(address, int): + self._check_int_address(address) + self._ip = address + self._scope_id = None + return + + # Constructing from a packed address + if isinstance(address, bytes): + self._check_packed_address(address, 16) + self._ip = int.from_bytes(address, 'big') + self._scope_id = None + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP string. + addr_str = str(address) + if '/' in addr_str: + raise AddressValueError(f"Unexpected '/' in {address!r}") + addr_str, self._scope_id = self._split_scope_id(addr_str) + + self._ip = self._ip_int_from_string(addr_str) + + def _explode_shorthand_ip_string(self): + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is None: + return super()._explode_shorthand_ip_string() + prefix_len = 30 + raw_exploded_str = super()._explode_shorthand_ip_string() + return f"{raw_exploded_str[:prefix_len]}{ipv4_mapped!s}" + + def _reverse_pointer(self): + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is None: + return super()._reverse_pointer() + prefix_len = 30 + raw_exploded_str = super()._explode_shorthand_ip_string()[:prefix_len] + # ipv4 encoded using hexadecimal nibbles instead of decimals + ipv4_int = ipv4_mapped._ip + reverse_chars = f"{raw_exploded_str}{ipv4_int:008x}"[::-1].replace(':', '') + return '.'.join(reverse_chars) + '.ip6.arpa' + + def _ipv4_mapped_ipv6_to_str(self): + """Return convenient text representation of IPv4-mapped IPv6 address + + See RFC 4291 2.5.5.2, 2.2 p.3 for details. + + Returns: + A string, 'x:x:x:x:x:x:d.d.d.d', where the 'x's are the hexadecimal values of + the six high-order 16-bit pieces of the address, and the 'd's are + the decimal values of the four low-order 8-bit pieces of the + address (standard IPv4 representation) as defined in RFC 4291 2.2 p.3. + + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is None: + raise AddressValueError("Can not apply to non-IPv4-mapped IPv6 address %s" % str(self)) + high_order_bits = self._ip >> 32 + return "%s:%s" % (self._string_from_ip_int(high_order_bits), str(ipv4_mapped)) + + def __str__(self): + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is None: + ip_str = super().__str__() + else: + ip_str = self._ipv4_mapped_ipv6_to_str() + return ip_str + '%' + self._scope_id if self._scope_id else ip_str + + def __hash__(self): + return hash((self._ip, self._scope_id)) + + def __eq__(self, other): + address_equal = super().__eq__(other) + if address_equal is NotImplemented: + return NotImplemented + if not address_equal: + return False + return self._scope_id == getattr(other, '_scope_id', None) + + def __reduce__(self): + return (self.__class__, (str(self),)) + + @property + def scope_id(self): + """Identifier of a particular zone of the address's scope. + + See RFC 4007 for details. + + Returns: + A string identifying the zone of the address if specified, else None. + + """ + return self._scope_id + + @property + def packed(self): + """The binary representation of this address.""" + return v6_int_to_packed(self._ip) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is a multicast address. + See RFC 2373 2.7 for details. + + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_multicast + return self in self._constants._multicast_network + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within one of the + reserved IPv6 Network ranges. + + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_reserved + return any(self in x for x in self._constants._reserved_networks) + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is reserved per RFC 4291. + + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_link_local + return self in self._constants._linklocal_network + + @property + def is_site_local(self): + """Test if the address is reserved for site-local. + + Note that the site-local address space has been deprecated by RFC 3879. + Use is_private to test if this address is in the space of unique local + addresses as defined by RFC 4193. + + Returns: + A boolean, True if the address is reserved per RFC 3513 2.5.6. + + """ + return self in self._constants._sitelocal_network + + @property + @functools.lru_cache() + def is_private(self): + """``True`` if the address is defined as not globally reachable by + iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ + (for IPv6) with the following exceptions: + + * ``is_private`` is ``False`` for ``100.64.0.0/10`` + * For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the + semantics of the underlying IPv4 addresses and the following condition holds + (see :attr:`IPv6Address.ipv4_mapped`):: + + address.is_private == address.ipv4_mapped.is_private + + ``is_private`` has value opposite to :attr:`is_global`, except for the ``100.64.0.0/10`` + IPv4 range where they are both ``False``. + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_private + return ( + any(self in net for net in self._constants._private_networks) + and all(self not in net for net in self._constants._private_networks_exceptions) + ) + + @property + def is_global(self): + """``True`` if the address is defined as globally reachable by + iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ + (for IPv6) with the following exception: + + For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the + semantics of the underlying IPv4 addresses and the following condition holds + (see :attr:`IPv6Address.ipv4_mapped`):: + + address.is_global == address.ipv4_mapped.is_global + + ``is_global`` has value opposite to :attr:`is_private`, except for the ``100.64.0.0/10`` + IPv4 range where they are both ``False``. + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_global + return not self.is_private + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 2373 2.5.2. + + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_unspecified + return self._ip == 0 + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback address as defined in + RFC 2373 2.5.3. + + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_loopback + return self._ip == 1 + + @property + def ipv4_mapped(self): + """Return the IPv4 mapped address. + + Returns: + If the IPv6 address is a v4 mapped address, return the + IPv4 mapped address. Return None otherwise. + + """ + if (self._ip >> 32) != 0xFFFF: + return None + return IPv4Address(self._ip & 0xFFFFFFFF) + + @property + def teredo(self): + """Tuple of embedded teredo IPs. + + Returns: + Tuple of the (server, client) IPs or None if the address + doesn't appear to be a teredo address (doesn't start with + 2001::/32) + + """ + if (self._ip >> 96) != 0x20010000: + return None + return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), + IPv4Address(~self._ip & 0xFFFFFFFF)) + + @property + def sixtofour(self): + """Return the IPv4 6to4 embedded address. + + Returns: + The IPv4 6to4-embedded address if present or None if the + address doesn't appear to contain a 6to4 embedded address. + + """ + if (self._ip >> 112) != 0x2002: + return None + return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) + + +class IPv6Interface(IPv6Address): + + def __init__(self, address): + addr, mask = self._split_addr_prefix(address) + + IPv6Address.__init__(self, addr) + self.network = IPv6Network((addr, mask), strict=False) + self.netmask = self.network.netmask + self._prefixlen = self.network._prefixlen + + @functools.cached_property + def hostmask(self): + return self.network.hostmask + + def __str__(self): + return '%s/%d' % (super().__str__(), + self._prefixlen) + + def __eq__(self, other): + address_equal = IPv6Address.__eq__(self, other) + if address_equal is NotImplemented or not address_equal: + return address_equal + try: + return self.network == other.network + except AttributeError: + # An interface with an associated network is NOT the + # same as an unassociated address. That's why the hash + # takes the extra info into account. + return False + + def __lt__(self, other): + address_less = IPv6Address.__lt__(self, other) + if address_less is NotImplemented: + return address_less + try: + return (self.network < other.network or + self.network == other.network and address_less) + except AttributeError: + # We *do* allow addresses and interfaces to be sorted. The + # unassociated address is considered less than all interfaces. + return False + + def __hash__(self): + return hash((self._ip, self._prefixlen, int(self.network.network_address))) + + __reduce__ = _IPAddressBase.__reduce__ + + @property + def ip(self): + return IPv6Address(self._ip) + + @property + def with_prefixlen(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.hostmask) + + @property + def is_unspecified(self): + return self._ip == 0 and self.network.is_unspecified + + @property + def is_loopback(self): + return super().is_loopback and self.network.is_loopback + + +class IPv6Network(_BaseV6, _BaseNetwork): + + """This class represents and manipulates 128-bit IPv6 networks. + + Attributes: [examples for IPv6('2001:db8::1000/124')] + .network_address: IPv6Address('2001:db8::1000') + .hostmask: IPv6Address('::f') + .broadcast_address: IPv6Address('2001:db8::100f') + .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') + .prefixlen: 124 + + """ + + # Class to use when creating address objects + _address_class = IPv6Address + + def __init__(self, address, strict=True): + """Instantiate a new IPv6 Network object. + + Args: + address: A string or integer representing the IPv6 network or the + IP and prefix/netmask. + '2001:db8::/128' + '2001:db8:0000:0000:0000:0000:0000:0000/128' + '2001:db8::' + are all functionally the same in IPv6. That is to say, + failing to provide a subnetmask will create an object with + a mask of /128. + + Additionally, an integer can be passed, so + IPv6Network('2001:db8::') == + IPv6Network(42540766411282592856903984951653826560) + or, more generally + IPv6Network(int(IPv6Network('2001:db8::'))) == + IPv6Network('2001:db8::') + + strict: A boolean. If true, ensure that we have been passed + A true network address, eg, 2001:db8::1000/124 and not an + IP address on a network, eg, 2001:db8::1/124. + + Raises: + AddressValueError: If address isn't a valid IPv6 address. + NetmaskValueError: If the netmask isn't valid for + an IPv6 address. + ValueError: If strict was True and a network address was not + supplied. + """ + addr, mask = self._split_addr_prefix(address) + + self.network_address = IPv6Address(addr) + self.netmask, self._prefixlen = self._make_netmask(mask) + packed = int(self.network_address) + if packed & int(self.netmask) != packed: + if strict: + raise ValueError('%s has host bits set' % self) + else: + self.network_address = IPv6Address(packed & + int(self.netmask)) + + if self._prefixlen == (self.max_prefixlen - 1): + self.hosts = self.__iter__ + elif self._prefixlen == self.max_prefixlen: + self.hosts = lambda: iter((IPv6Address(addr),)) + + def hosts(self): + """Generate Iterator over usable hosts in a network. + + This is like __iter__ except it doesn't return the + Subnet-Router anycast address. + + """ + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in range(network + 1, broadcast + 1): + yield self._address_class(x) + + @property + def is_site_local(self): + """Test if the address is reserved for site-local. + + Note that the site-local address space has been deprecated by RFC 3879. + Use is_private to test if this address is in the space of unique local + addresses as defined by RFC 4193. + + Returns: + A boolean, True if the address is reserved per RFC 3513 2.5.6. + + """ + return (self.network_address.is_site_local and + self.broadcast_address.is_site_local) + + +class _IPv6Constants: + + _linklocal_network = IPv6Network('fe80::/10') + + _multicast_network = IPv6Network('ff00::/8') + + # Not globally reachable address blocks listed on + # https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml + _private_networks = [ + IPv6Network('::1/128'), + IPv6Network('::/128'), + IPv6Network('::ffff:0:0/96'), + IPv6Network('64:ff9b:1::/48'), + IPv6Network('100::/64'), + IPv6Network('2001::/23'), + IPv6Network('2001:db8::/32'), + # IANA says N/A, let's consider it not globally reachable to be safe + IPv6Network('2002::/16'), + # RFC 9637: https://www.rfc-editor.org/rfc/rfc9637.html#section-6-2.2 + IPv6Network('3fff::/20'), + IPv6Network('fc00::/7'), + IPv6Network('fe80::/10'), + ] + + _private_networks_exceptions = [ + IPv6Network('2001:1::1/128'), + IPv6Network('2001:1::2/128'), + IPv6Network('2001:3::/32'), + IPv6Network('2001:4:112::/48'), + IPv6Network('2001:20::/28'), + IPv6Network('2001:30::/28'), + ] + + _reserved_networks = [ + IPv6Network('::/8'), IPv6Network('100::/8'), + IPv6Network('200::/7'), IPv6Network('400::/6'), + IPv6Network('800::/5'), IPv6Network('1000::/4'), + IPv6Network('4000::/3'), IPv6Network('6000::/3'), + IPv6Network('8000::/3'), IPv6Network('A000::/3'), + IPv6Network('C000::/3'), IPv6Network('E000::/4'), + IPv6Network('F000::/5'), IPv6Network('F800::/6'), + IPv6Network('FE00::/9'), + ] + + _sitelocal_network = IPv6Network('fec0::/10') + + +IPv6Address._constants = _IPv6Constants +IPv6Network._constants = _IPv6Constants diff --git a/Python314_4_x64_Template/Lib/json/__init__.py b/Python314_4_x64_Template/Lib/json/__init__.py new file mode 100644 index 00000000..9eaa4f3f --- /dev/null +++ b/Python314_4_x64_Template/Lib/json/__init__.py @@ -0,0 +1,365 @@ +r"""JSON (JavaScript Object Notation) is a subset of +JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data +interchange format. + +:mod:`json` exposes an API familiar to users of the standard library +:mod:`marshal` and :mod:`pickle` modules. It is derived from a +version of the externally maintained simplejson library. + +Encoding basic Python object hierarchies:: + + >>> import json + >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) + '["foo", {"bar": ["baz", null, 1.0, 2]}]' + >>> print(json.dumps("\"foo\bar")) + "\"foo\bar" + >>> print(json.dumps('\u1234')) + "\u1234" + >>> print(json.dumps('\\')) + "\\" + >>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)) + {"a": 0, "b": 0, "c": 0} + >>> from io import StringIO + >>> io = StringIO() + >>> json.dump(['streaming API'], io) + >>> io.getvalue() + '["streaming API"]' + +Compact encoding:: + + >>> import json + >>> mydict = {'4': 5, '6': 7} + >>> json.dumps([1,2,3,mydict], separators=(',', ':')) + '[1,2,3,{"4":5,"6":7}]' + +Pretty printing:: + + >>> import json + >>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)) + { + "4": 5, + "6": 7 + } + +Decoding JSON:: + + >>> import json + >>> obj = ['foo', {'bar': ['baz', None, 1.0, 2]}] + >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj + True + >>> json.loads('"\\"foo\\bar"') == '"foo\x08ar' + True + >>> from io import StringIO + >>> io = StringIO('["streaming API"]') + >>> json.load(io)[0] == 'streaming API' + True + +Specializing JSON object decoding:: + + >>> import json + >>> def as_complex(dct): + ... if '__complex__' in dct: + ... return complex(dct['real'], dct['imag']) + ... return dct + ... + >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', + ... object_hook=as_complex) + (1+2j) + >>> from decimal import Decimal + >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1') + True + +Specializing JSON object encoding:: + + >>> import json + >>> def encode_complex(obj): + ... if isinstance(obj, complex): + ... return [obj.real, obj.imag] + ... raise TypeError(f'Object of type {obj.__class__.__name__} ' + ... f'is not JSON serializable') + ... + >>> json.dumps(2 + 1j, default=encode_complex) + '[2.0, 1.0]' + >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j) + '[2.0, 1.0]' + >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j)) + '[2.0, 1.0]' + + +Using json from the shell to validate and pretty-print:: + + $ echo '{"json":"obj"}' | python -m json + { + "json": "obj" + } + $ echo '{ 1.2:3.4}' | python -m json + Expecting property name enclosed in double quotes: line 1 column 3 (char 2) +""" +__version__ = '2.0.9' +__all__ = [ + 'dump', 'dumps', 'load', 'loads', + 'JSONDecoder', 'JSONDecodeError', 'JSONEncoder', +] + +__author__ = 'Bob Ippolito ' + +from .decoder import JSONDecoder, JSONDecodeError +from .encoder import JSONEncoder +import codecs + +_default_encoder = JSONEncoder( + skipkeys=False, + ensure_ascii=True, + check_circular=True, + allow_nan=True, + indent=None, + separators=None, + default=None, +) + +def dump(obj, fp, *, skipkeys=False, ensure_ascii=True, check_circular=True, + allow_nan=True, cls=None, indent=None, separators=None, + default=None, sort_keys=False, **kw): + """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a + ``.write()``-supporting file-like object). + + If ``skipkeys`` is true then ``dict`` keys that are not basic types + (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped + instead of raising a ``TypeError``. + + If ``ensure_ascii`` is false, then the strings written to ``fp`` can + contain non-ASCII and non-printable characters if they appear in strings + contained in ``obj``. Otherwise, all such characters are escaped in JSON + strings. + + If ``check_circular`` is false, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``RecursionError`` (or worse). + + If ``allow_nan`` is false, then it will be a ``ValueError`` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) + in strict compliance of the JSON specification, instead of using the + JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If ``indent`` is a non-negative integer, then JSON array elements and + object members will be pretty-printed with that indent level. An indent + level of 0 will only insert newlines. ``None`` is the most compact + representation. + + If specified, ``separators`` should be an ``(item_separator, + key_separator)`` tuple. The default is ``(', ', ': ')`` if *indent* is + ``None`` and ``(',', ': ')`` otherwise. To get the most compact JSON + representation, you should specify ``(',', ':')`` to eliminate + whitespace. + + ``default(obj)`` is a function that should return a serializable version + of obj or raise TypeError. The default simply raises TypeError. + + If *sort_keys* is true (default: ``False``), then the output of + dictionaries will be sorted by key. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg; otherwise ``JSONEncoder`` is used. + + """ + # cached encoder + if (not skipkeys and ensure_ascii and + check_circular and allow_nan and + cls is None and indent is None and separators is None and + default is None and not sort_keys and not kw): + iterable = _default_encoder.iterencode(obj) + else: + if cls is None: + cls = JSONEncoder + iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, allow_nan=allow_nan, indent=indent, + separators=separators, + default=default, sort_keys=sort_keys, **kw).iterencode(obj) + # could accelerate with writelines in some versions of Python, at + # a debuggability cost + for chunk in iterable: + fp.write(chunk) + + +def dumps(obj, *, skipkeys=False, ensure_ascii=True, check_circular=True, + allow_nan=True, cls=None, indent=None, separators=None, + default=None, sort_keys=False, **kw): + """Serialize ``obj`` to a JSON formatted ``str``. + + If ``skipkeys`` is true then ``dict`` keys that are not basic types + (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped + instead of raising a ``TypeError``. + + If ``ensure_ascii`` is false, then the return value can contain + non-ASCII and non-printable characters if they appear in strings + contained in ``obj``. Otherwise, all such characters are escaped in + JSON strings. + + If ``check_circular`` is false, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``RecursionError`` (or worse). + + If ``allow_nan`` is false, then it will be a ``ValueError`` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in + strict compliance of the JSON specification, instead of using the + JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If ``indent`` is a non-negative integer, then JSON array elements and + object members will be pretty-printed with that indent level. An indent + level of 0 will only insert newlines. ``None`` is the most compact + representation. + + If specified, ``separators`` should be an ``(item_separator, + key_separator)`` tuple. The default is ``(', ', ': ')`` if *indent* is + ``None`` and ``(',', ': ')`` otherwise. To get the most compact JSON + representation, you should specify ``(',', ':')`` to eliminate + whitespace. + + ``default(obj)`` is a function that should return a serializable version + of obj or raise TypeError. The default simply raises TypeError. + + If *sort_keys* is true (default: ``False``), then the output of + dictionaries will be sorted by key. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg; otherwise ``JSONEncoder`` is used. + + """ + # cached encoder + if (not skipkeys and ensure_ascii and + check_circular and allow_nan and + cls is None and indent is None and separators is None and + default is None and not sort_keys and not kw): + return _default_encoder.encode(obj) + if cls is None: + cls = JSONEncoder + return cls( + skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, allow_nan=allow_nan, indent=indent, + separators=separators, default=default, sort_keys=sort_keys, + **kw).encode(obj) + + +_default_decoder = JSONDecoder(object_hook=None, object_pairs_hook=None) + + +def detect_encoding(b): + bstartswith = b.startswith + if bstartswith((codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)): + return 'utf-32' + if bstartswith((codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)): + return 'utf-16' + if bstartswith(codecs.BOM_UTF8): + return 'utf-8-sig' + + if len(b) >= 4: + if not b[0]: + # 00 00 -- -- - utf-32-be + # 00 XX -- -- - utf-16-be + return 'utf-16-be' if b[1] else 'utf-32-be' + if not b[1]: + # XX 00 00 00 - utf-32-le + # XX 00 00 XX - utf-16-le + # XX 00 XX -- - utf-16-le + return 'utf-16-le' if b[2] or b[3] else 'utf-32-le' + elif len(b) == 2: + if not b[0]: + # 00 XX - utf-16-be + return 'utf-16-be' + if not b[1]: + # XX 00 - utf-16-le + return 'utf-16-le' + # default + return 'utf-8' + + +def load(fp, *, cls=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): + """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing + a JSON document) to a Python object. + + ``object_hook`` is an optional function that will be called with the + result of any object literal decode (a ``dict``). The return value of + ``object_hook`` will be used instead of the ``dict``. This feature + can be used to implement custom decoders (e.g. JSON-RPC class hinting). + + ``object_pairs_hook`` is an optional function that will be called with + the result of any object literal decoded with an ordered list of pairs. + The return value of ``object_pairs_hook`` will be used instead of the + ``dict``. This feature can be used to implement custom decoders. If + ``object_hook`` is also defined, the ``object_pairs_hook`` takes + priority. + + To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` + kwarg; otherwise ``JSONDecoder`` is used. + """ + return loads(fp.read(), + cls=cls, object_hook=object_hook, + parse_float=parse_float, parse_int=parse_int, + parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw) + + +def loads(s, *, cls=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): + """Deserialize ``s`` (a ``str``, ``bytes`` or ``bytearray`` instance + containing a JSON document) to a Python object. + + ``object_hook`` is an optional function that will be called with the + result of any object literal decode (a ``dict``). The return value of + ``object_hook`` will be used instead of the ``dict``. This feature + can be used to implement custom decoders (e.g. JSON-RPC class hinting). + + ``object_pairs_hook`` is an optional function that will be called with + the result of any object literal decoded with an ordered list of pairs. + The return value of ``object_pairs_hook`` will be used instead of the + ``dict``. This feature can be used to implement custom decoders. If + ``object_hook`` is also defined, the ``object_pairs_hook`` takes + priority. + + ``parse_float``, if specified, will be called with the string + of every JSON float to be decoded. By default this is equivalent to + float(num_str). This can be used to use another datatype or parser + for JSON floats (e.g. decimal.Decimal). + + ``parse_int``, if specified, will be called with the string + of every JSON int to be decoded. By default this is equivalent to + int(num_str). This can be used to use another datatype or parser + for JSON integers (e.g. float). + + ``parse_constant``, if specified, will be called with one of the + following strings: -Infinity, Infinity, NaN. + This can be used to raise an exception if invalid JSON numbers + are encountered. + + To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` + kwarg; otherwise ``JSONDecoder`` is used. + """ + if isinstance(s, str): + if s.startswith('\ufeff'): + raise JSONDecodeError("Unexpected UTF-8 BOM (decode using utf-8-sig)", + s, 0) + else: + if not isinstance(s, (bytes, bytearray)): + raise TypeError(f'the JSON object must be str, bytes or bytearray, ' + f'not {s.__class__.__name__}') + s = s.decode(detect_encoding(s), 'surrogatepass') + + if (cls is None and object_hook is None and + parse_int is None and parse_float is None and + parse_constant is None and object_pairs_hook is None and not kw): + return _default_decoder.decode(s) + if cls is None: + cls = JSONDecoder + if object_hook is not None: + kw['object_hook'] = object_hook + if object_pairs_hook is not None: + kw['object_pairs_hook'] = object_pairs_hook + if parse_float is not None: + kw['parse_float'] = parse_float + if parse_int is not None: + kw['parse_int'] = parse_int + if parse_constant is not None: + kw['parse_constant'] = parse_constant + return cls(**kw).decode(s) diff --git a/Python314_4_x64_Template/Lib/json/__main__.py b/Python314_4_x64_Template/Lib/json/__main__.py new file mode 100644 index 00000000..1808eadd --- /dev/null +++ b/Python314_4_x64_Template/Lib/json/__main__.py @@ -0,0 +1,20 @@ +"""Command-line tool to validate and pretty-print JSON + +Usage:: + + $ echo '{"json":"obj"}' | python -m json + { + "json": "obj" + } + $ echo '{ 1.2:3.4}' | python -m json + Expecting property name enclosed in double quotes: line 1 column 3 (char 2) + +""" +import json.tool + + +if __name__ == '__main__': + try: + json.tool.main() + except BrokenPipeError as exc: + raise SystemExit(exc.errno) diff --git a/Python314_4_x64_Template/Lib/json/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/json/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..865f33ba Binary files /dev/null and b/Python314_4_x64_Template/Lib/json/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/json/__pycache__/decoder.cpython-314.pyc b/Python314_4_x64_Template/Lib/json/__pycache__/decoder.cpython-314.pyc new file mode 100644 index 00000000..387f6ea3 Binary files /dev/null and b/Python314_4_x64_Template/Lib/json/__pycache__/decoder.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/json/__pycache__/encoder.cpython-314.pyc b/Python314_4_x64_Template/Lib/json/__pycache__/encoder.cpython-314.pyc new file mode 100644 index 00000000..bf1c1ace Binary files /dev/null and b/Python314_4_x64_Template/Lib/json/__pycache__/encoder.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/json/__pycache__/scanner.cpython-314.pyc b/Python314_4_x64_Template/Lib/json/__pycache__/scanner.cpython-314.pyc new file mode 100644 index 00000000..765deeb9 Binary files /dev/null and b/Python314_4_x64_Template/Lib/json/__pycache__/scanner.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/json/decoder.py b/Python314_4_x64_Template/Lib/json/decoder.py similarity index 100% rename from Python313_13_x64_Template/Lib/json/decoder.py rename to Python314_4_x64_Template/Lib/json/decoder.py diff --git a/Python314_4_x64_Template/Lib/json/encoder.py b/Python314_4_x64_Template/Lib/json/encoder.py new file mode 100644 index 00000000..5cf6d64f --- /dev/null +++ b/Python314_4_x64_Template/Lib/json/encoder.py @@ -0,0 +1,461 @@ +"""Implementation of JSONEncoder +""" +import re + +try: + from _json import encode_basestring_ascii as c_encode_basestring_ascii +except ImportError: + c_encode_basestring_ascii = None +try: + from _json import encode_basestring as c_encode_basestring +except ImportError: + c_encode_basestring = None +try: + from _json import make_encoder as c_make_encoder +except ImportError: + c_make_encoder = None + +ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') +ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') +HAS_UTF8 = re.compile(b'[\x80-\xff]') +ESCAPE_DCT = { + '\\': '\\\\', + '"': '\\"', + '\b': '\\b', + '\f': '\\f', + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', +} +for i in range(0x20): + ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) + #ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) +del i + +INFINITY = float('inf') + +def py_encode_basestring(s): + """Return a JSON representation of a Python string + + """ + def replace(match): + return ESCAPE_DCT[match.group(0)] + return '"' + ESCAPE.sub(replace, s) + '"' + + +encode_basestring = (c_encode_basestring or py_encode_basestring) + + +def py_encode_basestring_ascii(s): + """Return an ASCII-only JSON representation of a Python string + + """ + def replace(match): + s = match.group(0) + try: + return ESCAPE_DCT[s] + except KeyError: + n = ord(s) + if n < 0x10000: + return '\\u{0:04x}'.format(n) + #return '\\u%04x' % (n,) + else: + # surrogate pair + n -= 0x10000 + s1 = 0xd800 | ((n >> 10) & 0x3ff) + s2 = 0xdc00 | (n & 0x3ff) + return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) + return '"' + ESCAPE_ASCII.sub(replace, s) + '"' + + +encode_basestring_ascii = ( + c_encode_basestring_ascii or py_encode_basestring_ascii) + +class JSONEncoder(object): + """Extensible JSON encoder for Python data structures. + + Supports the following objects and types by default: + + +-------------------+---------------+ + | Python | JSON | + +===================+===============+ + | dict | object | + +-------------------+---------------+ + | list, tuple | array | + +-------------------+---------------+ + | str | string | + +-------------------+---------------+ + | int, float | number | + +-------------------+---------------+ + | True | true | + +-------------------+---------------+ + | False | false | + +-------------------+---------------+ + | None | null | + +-------------------+---------------+ + + To extend this to recognize other objects, subclass and implement a + ``.default()`` method with another method that returns a serializable + object for ``o`` if possible, otherwise it should call the superclass + implementation (to raise ``TypeError``). + + """ + item_separator = ', ' + key_separator = ': ' + def __init__(self, *, skipkeys=False, ensure_ascii=True, + check_circular=True, allow_nan=True, sort_keys=False, + indent=None, separators=None, default=None): + """Constructor for JSONEncoder, with sensible defaults. + + If skipkeys is false, then it is a TypeError to attempt + encoding of keys that are not str, int, float, bool or None. + If skipkeys is True, such items are simply skipped. + + If ensure_ascii is true, the output is guaranteed to be str objects + with all incoming non-ASCII and non-printable characters escaped. + If ensure_ascii is false, the output can contain non-ASCII and + non-printable characters. + + If check_circular is true, then lists, dicts, and custom encoded + objects will be checked for circular references during encoding to + prevent an infinite recursion (which would cause an RecursionError). + Otherwise, no such check takes place. + + If allow_nan is true, then NaN, Infinity, and -Infinity will be + encoded as such. This behavior is not JSON specification compliant, + but is consistent with most JavaScript based encoders and decoders. + Otherwise, it will be a ValueError to encode such floats. + + If sort_keys is true, then the output of dictionaries will be + sorted by key; this is useful for regression tests to ensure + that JSON serializations can be compared on a day-to-day basis. + + If indent is a non-negative integer, then JSON array + elements and object members will be pretty-printed with that + indent level. An indent level of 0 will only insert newlines. + None is the most compact representation. + + If specified, separators should be an (item_separator, + key_separator) tuple. The default is (', ', ': ') if *indent* is + ``None`` and (',', ': ') otherwise. To get the most compact JSON + representation, you should specify (',', ':') to eliminate + whitespace. + + If specified, default is a function that gets called for objects + that can't otherwise be serialized. It should return a JSON + encodable version of the object or raise a ``TypeError``. + + """ + + self.skipkeys = skipkeys + self.ensure_ascii = ensure_ascii + self.check_circular = check_circular + self.allow_nan = allow_nan + self.sort_keys = sort_keys + self.indent = indent + if separators is not None: + self.item_separator, self.key_separator = separators + elif indent is not None: + self.item_separator = ',' + if default is not None: + self.default = default + + def default(self, o): + """Implement this method in a subclass such that it returns + a serializable object for ``o``, or calls the base implementation + (to raise a ``TypeError``). + + For example, to support arbitrary iterators, you could + implement default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + # Let the base class default method raise the TypeError + return super().default(o) + + """ + raise TypeError(f'Object of type {o.__class__.__name__} ' + f'is not JSON serializable') + + def encode(self, o): + """Return a JSON string representation of a Python data structure. + + >>> from json.encoder import JSONEncoder + >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) + '{"foo": ["bar", "baz"]}' + + """ + # This is for extremely simple cases and benchmarks. + if isinstance(o, str): + if self.ensure_ascii: + return encode_basestring_ascii(o) + else: + return encode_basestring(o) + # This doesn't pass the iterator directly to ''.join() because the + # exceptions aren't as detailed. The list call should be roughly + # equivalent to the PySequence_Fast that ''.join() would do. + chunks = self.iterencode(o, _one_shot=True) + if not isinstance(chunks, (list, tuple)): + chunks = list(chunks) + return ''.join(chunks) + + def iterencode(self, o, _one_shot=False): + """Encode the given object and yield each string + representation as available. + + For example:: + + for chunk in JSONEncoder().iterencode(bigobject): + mysocket.write(chunk) + + """ + if self.check_circular: + markers = {} + else: + markers = None + if self.ensure_ascii: + _encoder = encode_basestring_ascii + else: + _encoder = encode_basestring + + def floatstr(o, allow_nan=self.allow_nan, + _repr=float.__repr__, _inf=INFINITY, _neginf=-INFINITY): + # Check for specials. Note that this type of test is processor + # and/or platform-specific, so do tests which don't depend on the + # internals. + + if o != o: + text = 'NaN' + elif o == _inf: + text = 'Infinity' + elif o == _neginf: + text = '-Infinity' + else: + return _repr(o) + + if not allow_nan: + raise ValueError( + "Out of range float values are not JSON compliant: " + + repr(o)) + + return text + + + if self.indent is None or isinstance(self.indent, str): + indent = self.indent + else: + indent = ' ' * self.indent + if _one_shot and c_make_encoder is not None: + _iterencode = c_make_encoder( + markers, self.default, _encoder, indent, + self.key_separator, self.item_separator, self.sort_keys, + self.skipkeys, self.allow_nan) + else: + _iterencode = _make_iterencode( + markers, self.default, _encoder, indent, floatstr, + self.key_separator, self.item_separator, self.sort_keys, + self.skipkeys, _one_shot) + return _iterencode(o, 0) + +def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, + _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, + ## HACK: hand-optimized bytecode; turn globals into locals + ValueError=ValueError, + dict=dict, + float=float, + id=id, + int=int, + isinstance=isinstance, + list=list, + str=str, + tuple=tuple, + _intstr=int.__repr__, + ): + + def _iterencode_list(lst, _current_indent_level): + if not lst: + yield '[]' + return + if markers is not None: + markerid = id(lst) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = lst + buf = '[' + if _indent is not None: + _current_indent_level += 1 + newline_indent = '\n' + _indent * _current_indent_level + separator = _item_separator + newline_indent + buf += newline_indent + else: + newline_indent = None + separator = _item_separator + for i, value in enumerate(lst): + if i: + buf = separator + try: + if isinstance(value, str): + yield buf + _encoder(value) + elif value is None: + yield buf + 'null' + elif value is True: + yield buf + 'true' + elif value is False: + yield buf + 'false' + elif isinstance(value, int): + # Subclasses of int/float may override __repr__, but we still + # want to encode them as integers/floats in JSON. One example + # within the standard library is IntEnum. + yield buf + _intstr(value) + elif isinstance(value, float): + # see comment above for int + yield buf + _floatstr(value) + else: + yield buf + if isinstance(value, (list, tuple)): + chunks = _iterencode_list(value, _current_indent_level) + elif isinstance(value, dict): + chunks = _iterencode_dict(value, _current_indent_level) + else: + chunks = _iterencode(value, _current_indent_level) + yield from chunks + except GeneratorExit: + raise + except BaseException as exc: + exc.add_note(f'when serializing {type(lst).__name__} item {i}') + raise + if newline_indent is not None: + _current_indent_level -= 1 + yield '\n' + _indent * _current_indent_level + yield ']' + if markers is not None: + del markers[markerid] + + def _iterencode_dict(dct, _current_indent_level): + if not dct: + yield '{}' + return + if markers is not None: + markerid = id(dct) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = dct + yield '{' + if _indent is not None: + _current_indent_level += 1 + newline_indent = '\n' + _indent * _current_indent_level + item_separator = _item_separator + newline_indent + else: + newline_indent = None + item_separator = _item_separator + first = True + if _sort_keys: + items = sorted(dct.items()) + else: + items = dct.items() + for key, value in items: + if isinstance(key, str): + pass + # JavaScript is weakly typed for these, so it makes sense to + # also allow them. Many encoders seem to do something like this. + elif isinstance(key, float): + # see comment for int/float in _make_iterencode + key = _floatstr(key) + elif key is True: + key = 'true' + elif key is False: + key = 'false' + elif key is None: + key = 'null' + elif isinstance(key, int): + # see comment for int/float in _make_iterencode + key = _intstr(key) + elif _skipkeys: + continue + else: + raise TypeError(f'keys must be str, int, float, bool or None, ' + f'not {key.__class__.__name__}') + if first: + first = False + if newline_indent is not None: + yield newline_indent + else: + yield item_separator + yield _encoder(key) + yield _key_separator + try: + if isinstance(value, str): + yield _encoder(value) + elif value is None: + yield 'null' + elif value is True: + yield 'true' + elif value is False: + yield 'false' + elif isinstance(value, int): + # see comment for int/float in _make_iterencode + yield _intstr(value) + elif isinstance(value, float): + # see comment for int/float in _make_iterencode + yield _floatstr(value) + else: + if isinstance(value, (list, tuple)): + chunks = _iterencode_list(value, _current_indent_level) + elif isinstance(value, dict): + chunks = _iterencode_dict(value, _current_indent_level) + else: + chunks = _iterencode(value, _current_indent_level) + yield from chunks + except GeneratorExit: + raise + except BaseException as exc: + exc.add_note(f'when serializing {type(dct).__name__} item {key!r}') + raise + if not first and newline_indent is not None: + _current_indent_level -= 1 + yield '\n' + _indent * _current_indent_level + yield '}' + if markers is not None: + del markers[markerid] + + def _iterencode(o, _current_indent_level): + if isinstance(o, str): + yield _encoder(o) + elif o is None: + yield 'null' + elif o is True: + yield 'true' + elif o is False: + yield 'false' + elif isinstance(o, int): + # see comment for int/float in _make_iterencode + yield _intstr(o) + elif isinstance(o, float): + # see comment for int/float in _make_iterencode + yield _floatstr(o) + elif isinstance(o, (list, tuple)): + yield from _iterencode_list(o, _current_indent_level) + elif isinstance(o, dict): + yield from _iterencode_dict(o, _current_indent_level) + else: + if markers is not None: + markerid = id(o) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = o + newobj = _default(o) + try: + yield from _iterencode(newobj, _current_indent_level) + except GeneratorExit: + raise + except BaseException as exc: + exc.add_note(f'when serializing {type(o).__name__} object') + raise + if markers is not None: + del markers[markerid] + return _iterencode diff --git a/Python313_13_x64_Template/Lib/json/scanner.py b/Python314_4_x64_Template/Lib/json/scanner.py similarity index 100% rename from Python313_13_x64_Template/Lib/json/scanner.py rename to Python314_4_x64_Template/Lib/json/scanner.py diff --git a/Python314_4_x64_Template/Lib/json/tool.py b/Python314_4_x64_Template/Lib/json/tool.py new file mode 100644 index 00000000..1967817a --- /dev/null +++ b/Python314_4_x64_Template/Lib/json/tool.py @@ -0,0 +1,121 @@ +"""Command-line tool to validate and pretty-print JSON + +See `json.__main__` for a usage example (invocation as +`python -m json.tool` is supported for backwards compatibility). +""" +import argparse +import json +import re +import sys +from _colorize import get_theme, can_colorize + + +# The string we are colorizing is valid JSON, +# so we can use a looser but simpler regex to match +# the various parts, most notably strings and numbers, +# where the regex given by the spec is much more complex. +_color_pattern = re.compile(r''' + (?P"(\\.|[^"\\])*")(?=:) | + (?P"(\\.|[^"\\])*") | + (?PNaN|-?Infinity|[0-9\-+.Ee]+) | + (?Ptrue|false) | + (?Pnull) +''', re.VERBOSE) + +_group_to_theme_color = { + "key": "definition", + "string": "string", + "number": "number", + "boolean": "keyword", + "null": "keyword", +} + + +def _colorize_json(json_str, theme): + def _replace_match_callback(match): + for group, color in _group_to_theme_color.items(): + if m := match.group(group): + return f"{theme[color]}{m}{theme.reset}" + return match.group() + + return re.sub(_color_pattern, _replace_match_callback, json_str) + + +def main(): + description = ('A simple command line interface for json module ' + 'to validate and pretty-print JSON objects.') + parser = argparse.ArgumentParser(description=description, color=True) + parser.add_argument('infile', nargs='?', + help='a JSON file to be validated or pretty-printed', + default='-') + parser.add_argument('outfile', nargs='?', + help='write the output of infile to outfile', + default=None) + parser.add_argument('--sort-keys', action='store_true', default=False, + help='sort the output of dictionaries alphabetically by key') + parser.add_argument('--no-ensure-ascii', dest='ensure_ascii', action='store_false', + help='disable escaping of non-ASCII characters') + parser.add_argument('--json-lines', action='store_true', default=False, + help='parse input using the JSON Lines format. ' + 'Use with --no-indent or --compact to produce valid JSON Lines output.') + group = parser.add_mutually_exclusive_group() + group.add_argument('--indent', default=4, type=int, + help='separate items with newlines and use this number ' + 'of spaces for indentation') + group.add_argument('--tab', action='store_const', dest='indent', + const='\t', help='separate items with newlines and use ' + 'tabs for indentation') + group.add_argument('--no-indent', action='store_const', dest='indent', + const=None, + help='separate items with spaces rather than newlines') + group.add_argument('--compact', action='store_true', + help='suppress all whitespace separation (most compact)') + options = parser.parse_args() + + dump_args = { + 'sort_keys': options.sort_keys, + 'indent': options.indent, + 'ensure_ascii': options.ensure_ascii, + } + if options.compact: + dump_args['indent'] = None + dump_args['separators'] = ',', ':' + + try: + if options.infile == '-': + infile = sys.stdin + else: + infile = open(options.infile, encoding='utf-8') + try: + if options.json_lines: + objs = (json.loads(line) for line in infile) + else: + objs = (json.load(infile),) + finally: + if infile is not sys.stdin: + infile.close() + + if options.outfile is None: + outfile = sys.stdout + else: + outfile = open(options.outfile, 'w', encoding='utf-8') + with outfile: + if can_colorize(file=outfile): + t = get_theme(tty_file=outfile).syntax + for obj in objs: + json_str = json.dumps(obj, **dump_args) + outfile.write(_colorize_json(json_str, t)) + outfile.write('\n') + else: + for obj in objs: + json.dump(obj, outfile, **dump_args) + outfile.write('\n') + except ValueError as e: + raise SystemExit(e) + + +if __name__ == '__main__': + try: + main() + except BrokenPipeError as exc: + raise SystemExit(exc.errno) diff --git a/Python313_13_x64_Template/Lib/keyword.py b/Python314_4_x64_Template/Lib/keyword.py similarity index 100% rename from Python313_13_x64_Template/Lib/keyword.py rename to Python314_4_x64_Template/Lib/keyword.py diff --git a/Python314_4_x64_Template/Lib/linecache.py b/Python314_4_x64_Template/Lib/linecache.py new file mode 100644 index 00000000..ef3b2d91 --- /dev/null +++ b/Python314_4_x64_Template/Lib/linecache.py @@ -0,0 +1,256 @@ +"""Cache lines from Python source files. + +This is intended to read lines from modules imported -- hence if a filename +is not found, it will look down the module search path for a file by +that name. +""" + +__all__ = ["getline", "clearcache", "checkcache", "lazycache"] + + +# The cache. Maps filenames to either a thunk which will provide source code, +# or a tuple (size, mtime, lines, fullname) once loaded. +cache = {} +_interactive_cache = {} + + +def clearcache(): + """Clear the cache entirely.""" + cache.clear() + + +def getline(filename, lineno, module_globals=None): + """Get a line for a Python source file from the cache. + Update the cache if it doesn't contain an entry for this file already.""" + + lines = getlines(filename, module_globals) + if 1 <= lineno <= len(lines): + return lines[lineno - 1] + return '' + + +def getlines(filename, module_globals=None): + """Get the lines for a Python source file from the cache. + Update the cache if it doesn't contain an entry for this file already.""" + + entry = cache.get(filename, None) + if entry is not None and len(entry) != 1: + return entry[2] + + try: + return updatecache(filename, module_globals) + except MemoryError: + clearcache() + return [] + + +def _getline_from_code(filename, lineno): + lines = _getlines_from_code(filename) + if 1 <= lineno <= len(lines): + return lines[lineno - 1] + return '' + +def _make_key(code): + return (code.co_filename, code.co_qualname, code.co_firstlineno) + +def _getlines_from_code(code): + code_id = _make_key(code) + entry = _interactive_cache.get(code_id, None) + if entry is not None and len(entry) != 1: + return entry[2] + return [] + + +def _source_unavailable(filename): + """Return True if the source code is unavailable for such file name.""" + return ( + not filename + or (filename.startswith('<') + and filename.endswith('>') + and not filename.startswith('')): + return None + # Try for a __loader__, if available + if module_globals and '__name__' in module_globals: + spec = module_globals.get('__spec__') + name = getattr(spec, 'name', None) or module_globals['__name__'] + loader = getattr(spec, 'loader', None) + if loader is None: + loader = module_globals.get('__loader__') + get_source = getattr(loader, 'get_source', None) + + if name and get_source: + def get_lines(name=name, *args, **kwargs): + return get_source(name, *args, **kwargs) + return (get_lines,) + return None + + + +def _register_code(code, string, name): + entry = (len(string), + None, + [line + '\n' for line in string.splitlines()], + name) + stack = [code] + while stack: + code = stack.pop() + for const in code.co_consts: + if isinstance(const, type(code)): + stack.append(const) + key = _make_key(code) + _interactive_cache[key] = entry diff --git a/Python314_4_x64_Template/Lib/locale.py b/Python314_4_x64_Template/Lib/locale.py new file mode 100644 index 00000000..dfedc638 --- /dev/null +++ b/Python314_4_x64_Template/Lib/locale.py @@ -0,0 +1,1783 @@ +"""Locale support module. + +The module provides low-level access to the C lib's locale APIs and adds high +level number formatting APIs as well as a locale aliasing engine to complement +these. + +The aliasing engine includes support for many commonly used locale names and +maps them to values suitable for passing to the C lib's setlocale() function. It +also includes default encodings for all supported locale names. + +""" + +import sys +import encodings +import encodings.aliases +import _collections_abc +from builtins import str as _builtin_str +import functools + +# Try importing the _locale module. +# +# If this fails, fall back on a basic 'C' locale emulation. + +# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before +# trying the import. So __all__ is also fiddled at the end of the file. +__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error", + "setlocale", "localeconv", "strcoll", "strxfrm", + "str", "atof", "atoi", "format_string", "currency", + "normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY", + "LC_NUMERIC", "LC_ALL", "CHAR_MAX", "getencoding"] + +def _strcoll(a,b): + """ strcoll(string,string) -> int. + Compares two strings according to the locale. + """ + return (a > b) - (a < b) + +def _strxfrm(s): + """ strxfrm(string) -> string. + Returns a string that behaves for cmp locale-aware. + """ + return s + +try: + + from _locale import * + +except ImportError: + + # Locale emulation + + CHAR_MAX = 127 + LC_ALL = 6 + LC_COLLATE = 3 + LC_CTYPE = 0 + LC_MESSAGES = 5 + LC_MONETARY = 4 + LC_NUMERIC = 1 + LC_TIME = 2 + Error = ValueError + + def localeconv(): + """ localeconv() -> dict. + Returns numeric and monetary locale-specific parameters. + """ + # 'C' locale default values + return {'grouping': [127], + 'currency_symbol': '', + 'n_sign_posn': 127, + 'p_cs_precedes': 127, + 'n_cs_precedes': 127, + 'mon_grouping': [], + 'n_sep_by_space': 127, + 'decimal_point': '.', + 'negative_sign': '', + 'positive_sign': '', + 'p_sep_by_space': 127, + 'int_curr_symbol': '', + 'p_sign_posn': 127, + 'thousands_sep': '', + 'mon_thousands_sep': '', + 'frac_digits': 127, + 'mon_decimal_point': '', + 'int_frac_digits': 127} + + def setlocale(category, value=None): + """ setlocale(integer,string=None) -> string. + Activates/queries locale processing. + """ + if value not in (None, '', 'C'): + raise Error('_locale emulation only supports "C" locale') + return 'C' + +# These may or may not exist in _locale, so be sure to set them. +if 'strxfrm' not in globals(): + strxfrm = _strxfrm +if 'strcoll' not in globals(): + strcoll = _strcoll + + +_localeconv = localeconv + +# With this dict, you can override some items of localeconv's return value. +# This is useful for testing purposes. +_override_localeconv = {} + +@functools.wraps(_localeconv) +def localeconv(): + d = _localeconv() + if _override_localeconv: + d.update(_override_localeconv) + return d + + +### Number formatting APIs + +# Author: Martin von Loewis +# improved by Georg Brandl + +# Iterate over grouping intervals +def _grouping_intervals(grouping): + last_interval = None + for interval in grouping: + # if grouping is -1, we are done + if interval == CHAR_MAX: + return + # 0: re-use last group ad infinitum + if interval == 0: + if last_interval is None: + raise ValueError("invalid grouping") + while True: + yield last_interval + yield interval + last_interval = interval + +#perform the grouping from right to left +def _group(s, monetary=False): + conv = localeconv() + thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep'] + grouping = conv[monetary and 'mon_grouping' or 'grouping'] + if not grouping: + return (s, 0) + if s[-1] == ' ': + stripped = s.rstrip() + right_spaces = s[len(stripped):] + s = stripped + else: + right_spaces = '' + left_spaces = '' + groups = [] + for interval in _grouping_intervals(grouping): + if not s or s[-1] not in "0123456789": + # only non-digit characters remain (sign, spaces) + left_spaces = s + s = '' + break + groups.append(s[-interval:]) + s = s[:-interval] + if s: + groups.append(s) + groups.reverse() + return ( + left_spaces + thousands_sep.join(groups) + right_spaces, + len(thousands_sep) * (len(groups) - 1) + ) + +# Strip a given amount of excess padding from the given string +def _strip_padding(s, amount): + lpos = 0 + while amount and s[lpos] == ' ': + lpos += 1 + amount -= 1 + rpos = len(s) - 1 + while amount and s[rpos] == ' ': + rpos -= 1 + amount -= 1 + return s[lpos:rpos+1] + +_percent_re = None + +def _format(percent, value, grouping=False, monetary=False, *additional): + if additional: + formatted = percent % ((value,) + additional) + else: + formatted = percent % value + if percent[-1] in 'eEfFgGdiu': + formatted = _localize(formatted, grouping, monetary) + return formatted + +# Transform formatted as locale number according to the locale settings +def _localize(formatted, grouping=False, monetary=False): + # floats and decimal ints need special action! + if '.' in formatted: + seps = 0 + parts = formatted.split('.') + if grouping: + parts[0], seps = _group(parts[0], monetary=monetary) + decimal_point = localeconv()[monetary and 'mon_decimal_point' + or 'decimal_point'] + formatted = decimal_point.join(parts) + if seps: + formatted = _strip_padding(formatted, seps) + else: + seps = 0 + if grouping: + formatted, seps = _group(formatted, monetary=monetary) + if seps: + formatted = _strip_padding(formatted, seps) + return formatted + +def format_string(f, val, grouping=False, monetary=False): + """Formats a string in the same way that the % formatting would use, + but takes the current locale into account. + + Grouping is applied if the third parameter is true. + Conversion uses monetary thousands separator and grouping strings if + forth parameter monetary is true.""" + global _percent_re + if _percent_re is None: + import re + + _percent_re = re.compile(r'%(?:\((?P.*?)\))?(?P[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]') + + percents = list(_percent_re.finditer(f)) + new_f = _percent_re.sub('%s', f) + + if isinstance(val, _collections_abc.Mapping): + new_val = [] + for perc in percents: + if perc.group()[-1]=='%': + new_val.append('%') + else: + new_val.append(_format(perc.group(), val, grouping, monetary)) + else: + if not isinstance(val, tuple): + val = (val,) + new_val = [] + i = 0 + for perc in percents: + if perc.group()[-1]=='%': + new_val.append('%') + else: + starcount = perc.group('modifiers').count('*') + new_val.append(_format(perc.group(), + val[i], + grouping, + monetary, + *val[i+1:i+1+starcount])) + i += (1 + starcount) + val = tuple(new_val) + + return new_f % val + +def currency(val, symbol=True, grouping=False, international=False): + """Formats val according to the currency settings + in the current locale.""" + conv = localeconv() + + # check for illegal values + digits = conv[international and 'int_frac_digits' or 'frac_digits'] + if digits == 127: + raise ValueError("Currency formatting is not possible using " + "the 'C' locale.") + + s = _localize(f'{abs(val):.{digits}f}', grouping, monetary=True) + # '<' and '>' are markers if the sign must be inserted between symbol and value + s = '<' + s + '>' + + if symbol: + smb = conv[international and 'int_curr_symbol' or 'currency_symbol'] + precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes'] + separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space'] + + if precedes: + s = smb + (separated and ' ' or '') + s + else: + if international and smb[-1] == ' ': + smb = smb[:-1] + s = s + (separated and ' ' or '') + smb + + sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn'] + sign = conv[val<0 and 'negative_sign' or 'positive_sign'] + + if sign_pos == 0: + s = '(' + s + ')' + elif sign_pos == 1: + s = sign + s + elif sign_pos == 2: + s = s + sign + elif sign_pos == 3: + s = s.replace('<', sign) + elif sign_pos == 4: + s = s.replace('>', sign) + else: + # the default if nothing specified; + # this should be the most fitting sign position + s = sign + s + + return s.replace('<', '').replace('>', '') + +def str(val): + """Convert float to string, taking the locale into account.""" + return _format("%.12g", val) + +def delocalize(string): + "Parses a string as a normalized number according to the locale settings." + + conv = localeconv() + + #First, get rid of the grouping + ts = conv['thousands_sep'] + if ts: + string = string.replace(ts, '') + + #next, replace the decimal point with a dot + dd = conv['decimal_point'] + if dd: + string = string.replace(dd, '.') + return string + +def localize(string, grouping=False, monetary=False): + """Parses a string as locale number according to the locale settings.""" + return _localize(string, grouping, monetary) + +def atof(string, func=float): + "Parses a string as a float according to the locale settings." + return func(delocalize(string)) + +def atoi(string): + "Converts a string to an integer according to the locale settings." + return int(delocalize(string)) + +def _test(): + setlocale(LC_ALL, "") + #do grouping + s1 = format_string("%d", 123456789,1) + print(s1, "is", atoi(s1)) + #standard formatting + s1 = str(3.14) + print(s1, "is", atof(s1)) + +### Locale name aliasing engine + +# Author: Marc-Andre Lemburg, mal@lemburg.com +# Various tweaks by Fredrik Lundh + +# store away the low-level version of setlocale (it's +# overridden below) +_setlocale = setlocale + +def _replace_encoding(code, encoding): + if '.' in code: + langname = code[:code.index('.')] + else: + langname = code + # Convert the encoding to a C lib compatible encoding string + norm_encoding = encodings.normalize_encoding(encoding) + #print('norm encoding: %r' % norm_encoding) + norm_encoding = encodings.aliases.aliases.get(norm_encoding.lower(), + norm_encoding) + #print('aliased encoding: %r' % norm_encoding) + encoding = norm_encoding + norm_encoding = norm_encoding.lower() + if norm_encoding in locale_encoding_alias: + encoding = locale_encoding_alias[norm_encoding] + else: + norm_encoding = norm_encoding.replace('_', '') + norm_encoding = norm_encoding.replace('-', '') + if norm_encoding in locale_encoding_alias: + encoding = locale_encoding_alias[norm_encoding] + #print('found encoding %r' % encoding) + return langname + '.' + encoding + +def _append_modifier(code, modifier): + if modifier == 'euro': + if '.' not in code: + return code + '.ISO8859-15' + _, _, encoding = code.partition('.') + if encoding in ('ISO8859-15', 'UTF-8'): + return code + if encoding == 'ISO8859-1': + return _replace_encoding(code, 'ISO8859-15') + return code + '@' + modifier + +def normalize(localename): + + """ Returns a normalized locale code for the given locale + name. + + The returned locale code is formatted for use with + setlocale(). + + If normalization fails, the original name is returned + unchanged. + + If the given encoding is not known, the function defaults to + the default encoding for the locale code just like setlocale() + does. + + """ + # Normalize the locale name and extract the encoding and modifier + code = localename.lower() + if ':' in code: + # ':' is sometimes used as encoding delimiter. + code = code.replace(':', '.') + if '@' in code: + code, modifier = code.split('@', 1) + else: + modifier = '' + if '.' in code: + langname, encoding = code.split('.')[:2] + else: + langname = code + encoding = '' + + # First lookup: fullname (possibly with encoding and modifier) + lang_enc = langname + if encoding: + norm_encoding = encoding.replace('-', '') + norm_encoding = norm_encoding.replace('_', '') + lang_enc += '.' + norm_encoding + lookup_name = lang_enc + if modifier: + lookup_name += '@' + modifier + code = locale_alias.get(lookup_name, None) + if code is not None: + return code + #print('first lookup failed') + + if modifier: + # Second try: fullname without modifier (possibly with encoding) + code = locale_alias.get(lang_enc, None) + if code is not None: + #print('lookup without modifier succeeded') + if '@' not in code: + return _append_modifier(code, modifier) + if code.split('@', 1)[1].lower() == modifier: + return code + #print('second lookup failed') + + if encoding: + # Third try: langname (without encoding, possibly with modifier) + lookup_name = langname + if modifier: + lookup_name += '@' + modifier + code = locale_alias.get(lookup_name, None) + if code is not None: + #print('lookup without encoding succeeded') + if '@' not in code: + return _replace_encoding(code, encoding) + code, modifier = code.split('@', 1) + return _replace_encoding(code, encoding) + '@' + modifier + + if modifier: + # Fourth try: langname (without encoding and modifier) + code = locale_alias.get(langname, None) + if code is not None: + #print('lookup without modifier and encoding succeeded') + if '@' not in code: + code = _replace_encoding(code, encoding) + return _append_modifier(code, modifier) + code, defmod = code.split('@', 1) + if defmod.lower() == modifier: + return _replace_encoding(code, encoding) + '@' + defmod + + return localename + +def _parse_localename(localename): + + """ Parses the locale code for localename and returns the + result as tuple (language code, encoding). + + The localename is normalized and passed through the locale + alias engine. A ValueError is raised in case the locale name + cannot be parsed. + + The language code corresponds to RFC 1766. code and encoding + can be None in case the values cannot be determined or are + unknown to this implementation. + + """ + code = normalize(localename) + if '@' in code: + # Deal with locale modifiers + code, modifier = code.split('@', 1) + if modifier == 'euro' and '.' not in code: + # Assume Latin-9 for @euro locales. This is bogus, + # since some systems may use other encodings for these + # locales. Also, we ignore other modifiers. + return code, 'iso-8859-15' + + if '.' in code: + return tuple(code.split('.')[:2]) + elif code == 'C': + return None, None + elif code == 'UTF-8': + # On macOS "LC_CTYPE=UTF-8" is a valid locale setting + # for getting UTF-8 handling for text. + return None, 'UTF-8' + raise ValueError('unknown locale: %s' % localename) + +def _build_localename(localetuple): + + """ Builds a locale code from the given tuple (language code, + encoding). + + No aliasing or normalizing takes place. + + """ + try: + language, encoding = localetuple + + if language is None: + language = 'C' + if encoding is None: + return language + else: + return language + '.' + encoding + except (TypeError, ValueError): + raise TypeError('Locale must be None, a string, or an iterable of ' + 'two strings -- language code, encoding.') from None + +def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')): + + """ Tries to determine the default locale settings and returns + them as tuple (language code, encoding). + + According to POSIX, a program which has not called + setlocale(LC_ALL, "") runs using the portable 'C' locale. + Calling setlocale(LC_ALL, "") lets it use the default locale as + defined by the LANG variable. Since we don't want to interfere + with the current locale setting we thus emulate the behavior + in the way described above. + + To maintain compatibility with other platforms, not only the + LANG variable is tested, but a list of variables given as + envvars parameter. The first found to be defined will be + used. envvars defaults to the search path used in GNU gettext; + it must always contain the variable name 'LANG'. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + """ + + import warnings + warnings._deprecated( + "locale.getdefaultlocale", + "{name!r} is deprecated and slated for removal in Python {remove}. " + "Use setlocale(), getencoding() and getlocale() instead.", + remove=(3, 15)) + return _getdefaultlocale(envvars) + + +def _getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')): + try: + # check if it's supported by the _locale module + import _locale + code, encoding = _locale._getdefaultlocale() + except (ImportError, AttributeError): + pass + else: + # make sure the code/encoding values are valid + if sys.platform == "win32" and code and code[:2] == "0x": + # map windows language identifier to language name + code = windows_locale.get(int(code, 0)) + # ...add other platform-specific processing here, if + # necessary... + return code, encoding + + # fall back on POSIX behaviour + import os + lookup = os.environ.get + for variable in envvars: + localename = lookup(variable,None) + if localename: + if variable == 'LANGUAGE': + localename = localename.split(':')[0] + break + else: + localename = 'C' + return _parse_localename(localename) + + +def getlocale(category=LC_CTYPE): + + """ Returns the current setting for the given locale category as + tuple (language code, encoding). + + category may be one of the LC_* value except LC_ALL. It + defaults to LC_CTYPE. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + """ + localename = _setlocale(category) + if category == LC_ALL and ';' in localename: + raise TypeError('category LC_ALL is not supported') + return _parse_localename(localename) + +def setlocale(category, locale=None): + + """ Set the locale for the given category. The locale can be + a string, an iterable of two strings (language code and encoding), + or None. + + Iterables are converted to strings using the locale aliasing + engine. Locale strings are passed directly to the C lib. + + category may be given as one of the LC_* values. + + """ + if locale and not isinstance(locale, _builtin_str): + # convert to string + locale = normalize(_build_localename(locale)) + return _setlocale(category, locale) + + +try: + from _locale import getencoding +except ImportError: + # When _locale.getencoding() is missing, locale.getencoding() uses the + # Python filesystem encoding. + def getencoding(): + return sys.getfilesystemencoding() + + +try: + CODESET +except NameError: + def getpreferredencoding(do_setlocale=True): + """Return the charset that the user is likely using.""" + if sys.flags.warn_default_encoding: + import warnings + warnings.warn( + "UTF-8 Mode affects locale.getpreferredencoding(). Consider locale.getencoding() instead.", + EncodingWarning, 2) + if sys.flags.utf8_mode: + return 'utf-8' + return getencoding() +else: + # On Unix, if CODESET is available, use that. + def getpreferredencoding(do_setlocale=True): + """Return the charset that the user is likely using, + according to the system configuration.""" + + if sys.flags.warn_default_encoding: + import warnings + warnings.warn( + "UTF-8 Mode affects locale.getpreferredencoding(). Consider locale.getencoding() instead.", + EncodingWarning, 2) + if sys.flags.utf8_mode: + return 'utf-8' + + if not do_setlocale: + return getencoding() + + old_loc = setlocale(LC_CTYPE) + try: + try: + setlocale(LC_CTYPE, "") + except Error: + pass + return getencoding() + finally: + setlocale(LC_CTYPE, old_loc) + + +### Database +# +# The following data was extracted from the locale.alias file which +# comes with X11 and then hand edited removing the explicit encoding +# definitions and adding some more aliases. The file is usually +# available as /usr/lib/X11/locale/locale.alias. +# + +# +# The local_encoding_alias table maps lowercase encoding alias names +# to C locale encoding names (case-sensitive). Note that normalize() +# first looks up the encoding in the encodings.aliases dictionary and +# then applies this mapping to find the correct C lib name for the +# encoding. +# +locale_encoding_alias = { + + # Mappings for non-standard encoding names used in locale names + '437': 'C', + 'c': 'C', + 'en': 'ISO8859-1', + 'jis': 'JIS7', + 'jis7': 'JIS7', + 'ajec': 'eucJP', + 'koi8c': 'KOI8-C', + 'microsoftcp1251': 'CP1251', + 'microsoftcp1255': 'CP1255', + 'microsoftcp1256': 'CP1256', + '88591': 'ISO8859-1', + '88592': 'ISO8859-2', + '88595': 'ISO8859-5', + '885915': 'ISO8859-15', + + # Mappings from Python codec names to C lib encoding names + 'ascii': 'ISO8859-1', + 'latin_1': 'ISO8859-1', + 'iso8859_1': 'ISO8859-1', + 'iso8859_10': 'ISO8859-10', + 'iso8859_11': 'ISO8859-11', + 'iso8859_13': 'ISO8859-13', + 'iso8859_14': 'ISO8859-14', + 'iso8859_15': 'ISO8859-15', + 'iso8859_16': 'ISO8859-16', + 'iso8859_2': 'ISO8859-2', + 'iso8859_3': 'ISO8859-3', + 'iso8859_4': 'ISO8859-4', + 'iso8859_5': 'ISO8859-5', + 'iso8859_6': 'ISO8859-6', + 'iso8859_7': 'ISO8859-7', + 'iso8859_8': 'ISO8859-8', + 'iso8859_9': 'ISO8859-9', + 'iso2022_jp': 'JIS7', + 'shift_jis': 'SJIS', + 'tactis': 'TACTIS', + 'euc_jp': 'eucJP', + 'euc_kr': 'eucKR', + 'utf_8': 'UTF-8', + 'koi8_r': 'KOI8-R', + 'koi8_t': 'KOI8-T', + 'koi8_u': 'KOI8-U', + 'kz1048': 'RK1048', + 'cp1251': 'CP1251', + 'cp1255': 'CP1255', + 'cp1256': 'CP1256', + + # XXX This list is still incomplete. If you know more + # mappings, please file a bug report. Thanks. +} + +for k, v in sorted(locale_encoding_alias.items()): + k = k.replace('_', '') + locale_encoding_alias.setdefault(k, v) +del k, v + +# +# The locale_alias table maps lowercase alias names to C locale names +# (case-sensitive). Encodings are always separated from the locale +# name using a dot ('.'); they should only be given in case the +# language name is needed to interpret the given encoding alias +# correctly (CJK codes often have this need). +# +# Note that the normalize() function which uses this tables +# removes '_' and '-' characters from the encoding part of the +# locale name before doing the lookup. This saves a lot of +# space in the table. +# +# MAL 2004-12-10: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.4 +# and older): +# +# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1' +# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' +# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' +# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' +# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' +# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP' +# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13' +# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13' +# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' +# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' +# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11' +# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312' +# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5' +# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5' +# +# MAL 2008-05-30: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.5 +# and older): +# +# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2' +# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2' +# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' +# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' +# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8' +# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# +# AP 2010-04-12: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.6.5 +# and older): +# +# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' +# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' +# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin' +# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin' +# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8' +# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# +# SS 2013-12-20: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 3.3.3 +# and older): +# +# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'sd' -> 'sd_IN@devanagari.UTF-8' to 'sd_IN.UTF-8' +# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8' +# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# +# SS 2014-10-01: +# Updated alias mapping with glibc 2.19 supported locales. +# +# SS 2018-05-05: +# Updated alias mapping with glibc 2.27 supported locales. +# +# These are the differences compared to the old mapping (Python 3.6.5 +# and older): +# +# updated 'ca_es@valencia' -> 'ca_ES.ISO8859-15@valencia' to 'ca_ES.UTF-8@valencia' +# updated 'kk_kz' -> 'kk_KZ.RK1048' to 'kk_KZ.ptcp154' +# updated 'russian' -> 'ru_RU.ISO8859-5' to 'ru_RU.KOI8-R' +# +# SS 2025-02-04: +# Updated alias mapping with glibc 2.41 supported locales and the latest +# X lib alias mapping. +# +# These are the differences compared to the old mapping (Python 3.13.1 +# and older): +# +# updated 'c.utf8' -> 'C.UTF-8' to 'en_US.UTF-8' +# updated 'de_it' -> 'de_IT.ISO8859-1' to 'de_IT.UTF-8' +# removed 'de_li.utf8' +# updated 'en_il' -> 'en_IL.UTF-8' to 'en_IL.ISO8859-1' +# removed 'english.iso88591' +# updated 'es_cu' -> 'es_CU.UTF-8' to 'es_CU.ISO8859-1' +# updated 'russian' -> 'ru_RU.KOI8-R' to 'ru_RU.ISO8859-5' +# updated 'sr@latn' -> 'sr_CS.UTF-8@latin' to 'sr_RS.UTF-8@latin' +# removed 'univ' +# removed 'universal' +# +# SS 2025-06-10: +# Remove 'c.utf8' -> 'en_US.UTF-8' because 'en_US.UTF-8' does not exist +# on all platforms. + +locale_alias = { + 'a3': 'az_AZ.KOI8-C', + 'a3_az': 'az_AZ.KOI8-C', + 'a3_az.koic': 'az_AZ.KOI8-C', + 'aa_dj': 'aa_DJ.ISO8859-1', + 'aa_er': 'aa_ER.UTF-8', + 'aa_et': 'aa_ET.UTF-8', + 'af': 'af_ZA.ISO8859-1', + 'af_za': 'af_ZA.ISO8859-1', + 'agr_pe': 'agr_PE.UTF-8', + 'ak_gh': 'ak_GH.UTF-8', + 'am': 'am_ET.UTF-8', + 'am_et': 'am_ET.UTF-8', + 'american': 'en_US.ISO8859-1', + 'an_es': 'an_ES.ISO8859-15', + 'anp_in': 'anp_IN.UTF-8', + 'ar': 'ar_AA.ISO8859-6', + 'ar_aa': 'ar_AA.ISO8859-6', + 'ar_ae': 'ar_AE.ISO8859-6', + 'ar_bh': 'ar_BH.ISO8859-6', + 'ar_dz': 'ar_DZ.ISO8859-6', + 'ar_eg': 'ar_EG.ISO8859-6', + 'ar_in': 'ar_IN.UTF-8', + 'ar_iq': 'ar_IQ.ISO8859-6', + 'ar_jo': 'ar_JO.ISO8859-6', + 'ar_kw': 'ar_KW.ISO8859-6', + 'ar_lb': 'ar_LB.ISO8859-6', + 'ar_ly': 'ar_LY.ISO8859-6', + 'ar_ma': 'ar_MA.ISO8859-6', + 'ar_om': 'ar_OM.ISO8859-6', + 'ar_qa': 'ar_QA.ISO8859-6', + 'ar_sa': 'ar_SA.ISO8859-6', + 'ar_sd': 'ar_SD.ISO8859-6', + 'ar_ss': 'ar_SS.UTF-8', + 'ar_sy': 'ar_SY.ISO8859-6', + 'ar_tn': 'ar_TN.ISO8859-6', + 'ar_ye': 'ar_YE.ISO8859-6', + 'arabic': 'ar_AA.ISO8859-6', + 'as': 'as_IN.UTF-8', + 'as_in': 'as_IN.UTF-8', + 'ast_es': 'ast_ES.ISO8859-15', + 'ayc_pe': 'ayc_PE.UTF-8', + 'az': 'az_AZ.ISO8859-9E', + 'az_az': 'az_AZ.ISO8859-9E', + 'az_az.iso88599e': 'az_AZ.ISO8859-9E', + 'az_ir': 'az_IR.UTF-8', + 'be': 'be_BY.CP1251', + 'be@latin': 'be_BY.UTF-8@latin', + 'be_bg.utf8': 'bg_BG.UTF-8', + 'be_by': 'be_BY.CP1251', + 'be_by@latin': 'be_BY.UTF-8@latin', + 'bem_zm': 'bem_ZM.UTF-8', + 'ber_dz': 'ber_DZ.UTF-8', + 'ber_ma': 'ber_MA.UTF-8', + 'bg': 'bg_BG.CP1251', + 'bg_bg': 'bg_BG.CP1251', + 'bhb_in.utf8': 'bhb_IN.UTF-8', + 'bho_in': 'bho_IN.UTF-8', + 'bho_np': 'bho_NP.UTF-8', + 'bi_vu': 'bi_VU.UTF-8', + 'bn_bd': 'bn_BD.UTF-8', + 'bn_in': 'bn_IN.UTF-8', + 'bo_cn': 'bo_CN.UTF-8', + 'bo_in': 'bo_IN.UTF-8', + 'bokmal': 'nb_NO.ISO8859-1', + 'bokm\xe5l': 'nb_NO.ISO8859-1', + 'br': 'br_FR.ISO8859-1', + 'br_fr': 'br_FR.ISO8859-1', + 'brx_in': 'brx_IN.UTF-8', + 'bs': 'bs_BA.ISO8859-2', + 'bs_ba': 'bs_BA.ISO8859-2', + 'bulgarian': 'bg_BG.CP1251', + 'byn_er': 'byn_ER.UTF-8', + 'c': 'C', + 'c-french': 'fr_CA.ISO8859-1', + 'c.ascii': 'C', + 'c.en': 'C', + 'c.iso88591': 'en_US.ISO8859-1', + 'c_c': 'C', + 'c_c.c': 'C', + 'ca': 'ca_ES.ISO8859-1', + 'ca_ad': 'ca_AD.ISO8859-1', + 'ca_es': 'ca_ES.ISO8859-1', + 'ca_es@valencia': 'ca_ES.UTF-8@valencia', + 'ca_fr': 'ca_FR.ISO8859-1', + 'ca_it': 'ca_IT.ISO8859-1', + 'catalan': 'ca_ES.ISO8859-1', + 'ce_ru': 'ce_RU.UTF-8', + 'cextend': 'en_US.ISO8859-1', + 'chinese-s': 'zh_CN.eucCN', + 'chinese-t': 'zh_TW.eucTW', + 'chr_us': 'chr_US.UTF-8', + 'ckb_iq': 'ckb_IQ.UTF-8', + 'cmn_tw': 'cmn_TW.UTF-8', + 'crh_ru': 'crh_RU.UTF-8', + 'crh_ua': 'crh_UA.UTF-8', + 'croatian': 'hr_HR.ISO8859-2', + 'cs': 'cs_CZ.ISO8859-2', + 'cs_cs': 'cs_CZ.ISO8859-2', + 'cs_cz': 'cs_CZ.ISO8859-2', + 'csb_pl': 'csb_PL.UTF-8', + 'cv_ru': 'cv_RU.UTF-8', + 'cy': 'cy_GB.ISO8859-1', + 'cy_gb': 'cy_GB.ISO8859-1', + 'cz': 'cs_CZ.ISO8859-2', + 'cz_cz': 'cs_CZ.ISO8859-2', + 'czech': 'cs_CZ.ISO8859-2', + 'da': 'da_DK.ISO8859-1', + 'da_dk': 'da_DK.ISO8859-1', + 'danish': 'da_DK.ISO8859-1', + 'dansk': 'da_DK.ISO8859-1', + 'de': 'de_DE.ISO8859-1', + 'de_at': 'de_AT.ISO8859-1', + 'de_be': 'de_BE.ISO8859-1', + 'de_ch': 'de_CH.ISO8859-1', + 'de_de': 'de_DE.ISO8859-1', + 'de_it': 'de_IT.UTF-8', + 'de_li': 'de_LI.ISO8859-1', + 'de_lu': 'de_LU.ISO8859-1', + 'deutsch': 'de_DE.ISO8859-1', + 'doi_in': 'doi_IN.UTF-8', + 'dsb_de': 'dsb_DE.UTF-8', + 'dutch': 'nl_NL.ISO8859-1', + 'dutch.iso88591': 'nl_BE.ISO8859-1', + 'dv_mv': 'dv_MV.UTF-8', + 'dz_bt': 'dz_BT.UTF-8', + 'ee': 'ee_EE.ISO8859-4', + 'ee_ee': 'ee_EE.ISO8859-4', + 'eesti': 'et_EE.ISO8859-1', + 'el': 'el_GR.ISO8859-7', + 'el_cy': 'el_CY.ISO8859-7', + 'el_gr': 'el_GR.ISO8859-7', + 'el_gr@euro': 'el_GR.ISO8859-15', + 'en': 'en_US.ISO8859-1', + 'en_ag': 'en_AG.UTF-8', + 'en_au': 'en_AU.ISO8859-1', + 'en_be': 'en_BE.ISO8859-1', + 'en_bw': 'en_BW.ISO8859-1', + 'en_ca': 'en_CA.ISO8859-1', + 'en_dk': 'en_DK.ISO8859-1', + 'en_dl.utf8': 'en_DL.UTF-8', + 'en_gb': 'en_GB.ISO8859-1', + 'en_hk': 'en_HK.ISO8859-1', + 'en_ie': 'en_IE.ISO8859-1', + 'en_il': 'en_IL.ISO8859-1', + 'en_in': 'en_IN.ISO8859-1', + 'en_ng': 'en_NG.UTF-8', + 'en_nz': 'en_NZ.ISO8859-1', + 'en_ph': 'en_PH.ISO8859-1', + 'en_sc.utf8': 'en_SC.UTF-8', + 'en_sg': 'en_SG.ISO8859-1', + 'en_uk': 'en_GB.ISO8859-1', + 'en_us': 'en_US.ISO8859-1', + 'en_us@euro@euro': 'en_US.ISO8859-15', + 'en_za': 'en_ZA.ISO8859-1', + 'en_zm': 'en_ZM.UTF-8', + 'en_zw': 'en_ZW.ISO8859-1', + 'en_zw.utf8': 'en_ZS.UTF-8', + 'eng_gb': 'en_GB.ISO8859-1', + 'english': 'en_EN.ISO8859-1', + 'english_uk': 'en_GB.ISO8859-1', + 'english_united-states': 'en_US.ISO8859-1', + 'english_united-states.437': 'C', + 'english_us': 'en_US.ISO8859-1', + 'eo': 'eo_XX.ISO8859-3', + 'eo.utf8': 'eo.UTF-8', + 'eo_eo': 'eo_EO.ISO8859-3', + 'eo_us.utf8': 'eo_US.UTF-8', + 'eo_xx': 'eo_XX.ISO8859-3', + 'es': 'es_ES.ISO8859-1', + 'es_ar': 'es_AR.ISO8859-1', + 'es_bo': 'es_BO.ISO8859-1', + 'es_cl': 'es_CL.ISO8859-1', + 'es_co': 'es_CO.ISO8859-1', + 'es_cr': 'es_CR.ISO8859-1', + 'es_cu': 'es_CU.ISO8859-1', + 'es_do': 'es_DO.ISO8859-1', + 'es_ec': 'es_EC.ISO8859-1', + 'es_es': 'es_ES.ISO8859-1', + 'es_gt': 'es_GT.ISO8859-1', + 'es_hn': 'es_HN.ISO8859-1', + 'es_mx': 'es_MX.ISO8859-1', + 'es_ni': 'es_NI.ISO8859-1', + 'es_pa': 'es_PA.ISO8859-1', + 'es_pe': 'es_PE.ISO8859-1', + 'es_pr': 'es_PR.ISO8859-1', + 'es_py': 'es_PY.ISO8859-1', + 'es_sv': 'es_SV.ISO8859-1', + 'es_us': 'es_US.ISO8859-1', + 'es_uy': 'es_UY.ISO8859-1', + 'es_ve': 'es_VE.ISO8859-1', + 'estonian': 'et_EE.ISO8859-1', + 'et': 'et_EE.ISO8859-15', + 'et_ee': 'et_EE.ISO8859-15', + 'eu': 'eu_ES.ISO8859-1', + 'eu_es': 'eu_ES.ISO8859-1', + 'eu_fr': 'eu_FR.ISO8859-1', + 'fa': 'fa_IR.UTF-8', + 'fa_ir': 'fa_IR.UTF-8', + 'fa_ir.isiri3342': 'fa_IR.ISIRI-3342', + 'ff_sn': 'ff_SN.UTF-8', + 'fi': 'fi_FI.ISO8859-15', + 'fi_fi': 'fi_FI.ISO8859-15', + 'fil_ph': 'fil_PH.UTF-8', + 'finnish': 'fi_FI.ISO8859-1', + 'fo': 'fo_FO.ISO8859-1', + 'fo_fo': 'fo_FO.ISO8859-1', + 'fr': 'fr_FR.ISO8859-1', + 'fr_be': 'fr_BE.ISO8859-1', + 'fr_ca': 'fr_CA.ISO8859-1', + 'fr_ch': 'fr_CH.ISO8859-1', + 'fr_fr': 'fr_FR.ISO8859-1', + 'fr_lu': 'fr_LU.ISO8859-1', + 'fran\xe7ais': 'fr_FR.ISO8859-1', + 'fre_fr': 'fr_FR.ISO8859-1', + 'french': 'fr_FR.ISO8859-1', + 'french.iso88591': 'fr_CH.ISO8859-1', + 'french_france': 'fr_FR.ISO8859-1', + 'fur_it': 'fur_IT.UTF-8', + 'fy_de': 'fy_DE.UTF-8', + 'fy_nl': 'fy_NL.UTF-8', + 'ga': 'ga_IE.ISO8859-1', + 'ga_ie': 'ga_IE.ISO8859-1', + 'galego': 'gl_ES.ISO8859-1', + 'galician': 'gl_ES.ISO8859-1', + 'gbm_in': 'gbm_IN.UTF-8', + 'gd': 'gd_GB.ISO8859-1', + 'gd_gb': 'gd_GB.ISO8859-1', + 'ger_de': 'de_DE.ISO8859-1', + 'german': 'de_DE.ISO8859-1', + 'german.iso88591': 'de_CH.ISO8859-1', + 'german_germany': 'de_DE.ISO8859-1', + 'gez_er': 'gez_ER.UTF-8', + 'gez_et': 'gez_ET.UTF-8', + 'gl': 'gl_ES.ISO8859-1', + 'gl_es': 'gl_ES.ISO8859-1', + 'greek': 'el_GR.ISO8859-7', + 'gu_in': 'gu_IN.UTF-8', + 'gv': 'gv_GB.ISO8859-1', + 'gv_gb': 'gv_GB.ISO8859-1', + 'ha_ng': 'ha_NG.UTF-8', + 'hak_tw': 'hak_TW.UTF-8', + 'he': 'he_IL.ISO8859-8', + 'he_il': 'he_IL.ISO8859-8', + 'hebrew': 'he_IL.ISO8859-8', + 'hi': 'hi_IN.ISCII-DEV', + 'hi_in': 'hi_IN.ISCII-DEV', + 'hi_in.isciidev': 'hi_IN.ISCII-DEV', + 'hif_fj': 'hif_FJ.UTF-8', + 'hne': 'hne_IN.UTF-8', + 'hne_in': 'hne_IN.UTF-8', + 'hr': 'hr_HR.ISO8859-2', + 'hr_hr': 'hr_HR.ISO8859-2', + 'hrvatski': 'hr_HR.ISO8859-2', + 'hsb_de': 'hsb_DE.ISO8859-2', + 'ht_ht': 'ht_HT.UTF-8', + 'hu': 'hu_HU.ISO8859-2', + 'hu_hu': 'hu_HU.ISO8859-2', + 'hungarian': 'hu_HU.ISO8859-2', + 'hy_am': 'hy_AM.UTF-8', + 'hy_am.armscii8': 'hy_AM.ARMSCII_8', + 'ia': 'ia.UTF-8', + 'ia_fr': 'ia_FR.UTF-8', + 'icelandic': 'is_IS.ISO8859-1', + 'id': 'id_ID.ISO8859-1', + 'id_id': 'id_ID.ISO8859-1', + 'ie': 'ie.UTF-8', + 'ig_ng': 'ig_NG.UTF-8', + 'ik_ca': 'ik_CA.UTF-8', + 'in': 'id_ID.ISO8859-1', + 'in_id': 'id_ID.ISO8859-1', + 'is': 'is_IS.ISO8859-1', + 'is_is': 'is_IS.ISO8859-1', + 'iso-8859-1': 'en_US.ISO8859-1', + 'iso-8859-15': 'en_US.ISO8859-15', + 'iso8859-1': 'en_US.ISO8859-1', + 'iso8859-15': 'en_US.ISO8859-15', + 'iso_8859_1': 'en_US.ISO8859-1', + 'iso_8859_15': 'en_US.ISO8859-15', + 'it': 'it_IT.ISO8859-1', + 'it_ch': 'it_CH.ISO8859-1', + 'it_it': 'it_IT.ISO8859-1', + 'italian': 'it_IT.ISO8859-1', + 'iu': 'iu_CA.NUNACOM-8', + 'iu_ca': 'iu_CA.NUNACOM-8', + 'iu_ca.nunacom8': 'iu_CA.NUNACOM-8', + 'iw': 'he_IL.ISO8859-8', + 'iw_il': 'he_IL.ISO8859-8', + 'iw_il.utf8': 'iw_IL.UTF-8', + 'ja': 'ja_JP.eucJP', + 'ja_jp': 'ja_JP.eucJP', + 'ja_jp.euc': 'ja_JP.eucJP', + 'ja_jp.mscode': 'ja_JP.SJIS', + 'ja_jp.pck': 'ja_JP.SJIS', + 'japan': 'ja_JP.eucJP', + 'japanese': 'ja_JP.eucJP', + 'japanese-euc': 'ja_JP.eucJP', + 'japanese.euc': 'ja_JP.eucJP', + 'jp_jp': 'ja_JP.eucJP', + 'ka': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS', + 'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY', + 'kab_dz': 'kab_DZ.UTF-8', + 'kk_kz': 'kk_KZ.ptcp154', + 'kl': 'kl_GL.ISO8859-1', + 'kl_gl': 'kl_GL.ISO8859-1', + 'km_kh': 'km_KH.UTF-8', + 'kn': 'kn_IN.UTF-8', + 'kn_in': 'kn_IN.UTF-8', + 'ko': 'ko_KR.eucKR', + 'ko_kr': 'ko_KR.eucKR', + 'ko_kr.euc': 'ko_KR.eucKR', + 'kok_in': 'kok_IN.UTF-8', + 'korean': 'ko_KR.eucKR', + 'korean.euc': 'ko_KR.eucKR', + 'ks': 'ks_IN.UTF-8', + 'ks_in': 'ks_IN.UTF-8', + 'ks_in@devanagari.utf8': 'ks_IN.UTF-8@devanagari', + 'ku_tr': 'ku_TR.ISO8859-9', + 'kv_ru': 'kv_RU.UTF-8', + 'kw': 'kw_GB.ISO8859-1', + 'kw_gb': 'kw_GB.ISO8859-1', + 'ky': 'ky_KG.UTF-8', + 'ky_kg': 'ky_KG.UTF-8', + 'lb_lu': 'lb_LU.UTF-8', + 'lg_ug': 'lg_UG.ISO8859-10', + 'li_be': 'li_BE.UTF-8', + 'li_nl': 'li_NL.UTF-8', + 'lij_it': 'lij_IT.UTF-8', + 'lithuanian': 'lt_LT.ISO8859-13', + 'ln_cd': 'ln_CD.UTF-8', + 'lo': 'lo_LA.MULELAO-1', + 'lo_la': 'lo_LA.MULELAO-1', + 'lo_la.cp1133': 'lo_LA.IBM-CP1133', + 'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133', + 'lo_la.mulelao1': 'lo_LA.MULELAO-1', + 'lt': 'lt_LT.ISO8859-13', + 'lt_lt': 'lt_LT.ISO8859-13', + 'ltg_lv.utf8': 'ltg_LV.UTF-8', + 'lv': 'lv_LV.ISO8859-13', + 'lv_lv': 'lv_LV.ISO8859-13', + 'lzh_tw': 'lzh_TW.UTF-8', + 'mag_in': 'mag_IN.UTF-8', + 'mai': 'mai_IN.UTF-8', + 'mai_in': 'mai_IN.UTF-8', + 'mai_np': 'mai_NP.UTF-8', + 'mdf_ru': 'mdf_RU.UTF-8', + 'mfe_mu': 'mfe_MU.UTF-8', + 'mg_mg': 'mg_MG.ISO8859-15', + 'mhr_ru': 'mhr_RU.UTF-8', + 'mi': 'mi_NZ.ISO8859-1', + 'mi_nz': 'mi_NZ.ISO8859-1', + 'miq_ni': 'miq_NI.UTF-8', + 'mjw_in': 'mjw_IN.UTF-8', + 'mk': 'mk_MK.ISO8859-5', + 'mk_mk': 'mk_MK.ISO8859-5', + 'ml': 'ml_IN.UTF-8', + 'ml_in': 'ml_IN.UTF-8', + 'mn_mn': 'mn_MN.UTF-8', + 'mni_in': 'mni_IN.UTF-8', + 'mnw_mm': 'mnw_MM.UTF-8', + 'mr': 'mr_IN.UTF-8', + 'mr_in': 'mr_IN.UTF-8', + 'ms': 'ms_MY.ISO8859-1', + 'ms_my': 'ms_MY.ISO8859-1', + 'mt': 'mt_MT.ISO8859-3', + 'mt_mt': 'mt_MT.ISO8859-3', + 'my_mm': 'my_MM.UTF-8', + 'nan_tw': 'nan_TW.UTF-8', + 'nb': 'nb_NO.ISO8859-1', + 'nb_no': 'nb_NO.ISO8859-1', + 'nds_de': 'nds_DE.UTF-8', + 'nds_nl': 'nds_NL.UTF-8', + 'ne_np': 'ne_NP.UTF-8', + 'nhn_mx': 'nhn_MX.UTF-8', + 'niu_nu': 'niu_NU.UTF-8', + 'niu_nz': 'niu_NZ.UTF-8', + 'nl': 'nl_NL.ISO8859-1', + 'nl_aw': 'nl_AW.UTF-8', + 'nl_be': 'nl_BE.ISO8859-1', + 'nl_nl': 'nl_NL.ISO8859-1', + 'nn': 'nn_NO.ISO8859-1', + 'nn_no': 'nn_NO.ISO8859-1', + 'no': 'no_NO.ISO8859-1', + 'no@nynorsk': 'ny_NO.ISO8859-1', + 'no_no': 'no_NO.ISO8859-1', + 'no_no.iso88591@bokmal': 'no_NO.ISO8859-1', + 'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1', + 'norwegian': 'no_NO.ISO8859-1', + 'nr': 'nr_ZA.ISO8859-1', + 'nr_za': 'nr_ZA.ISO8859-1', + 'nso': 'nso_ZA.ISO8859-15', + 'nso_za': 'nso_ZA.ISO8859-15', + 'ny': 'ny_NO.ISO8859-1', + 'ny_no': 'ny_NO.ISO8859-1', + 'nynorsk': 'nn_NO.ISO8859-1', + 'oc': 'oc_FR.ISO8859-1', + 'oc_fr': 'oc_FR.ISO8859-1', + 'om_et': 'om_ET.UTF-8', + 'om_ke': 'om_KE.ISO8859-1', + 'or': 'or_IN.UTF-8', + 'or_in': 'or_IN.UTF-8', + 'os_ru': 'os_RU.UTF-8', + 'pa': 'pa_IN.UTF-8', + 'pa_in': 'pa_IN.UTF-8', + 'pa_pk': 'pa_PK.UTF-8', + 'pap_an': 'pap_AN.UTF-8', + 'pap_aw': 'pap_AW.UTF-8', + 'pap_cw': 'pap_CW.UTF-8', + 'pd': 'pd_US.ISO8859-1', + 'pd_de': 'pd_DE.ISO8859-1', + 'pd_us': 'pd_US.ISO8859-1', + 'ph': 'ph_PH.ISO8859-1', + 'ph_ph': 'ph_PH.ISO8859-1', + 'pl': 'pl_PL.ISO8859-2', + 'pl_pl': 'pl_PL.ISO8859-2', + 'polish': 'pl_PL.ISO8859-2', + 'portuguese': 'pt_PT.ISO8859-1', + 'portuguese_brazil': 'pt_BR.ISO8859-1', + 'posix': 'C', + 'posix-utf2': 'C', + 'pp': 'pp_AN.ISO8859-1', + 'pp_an': 'pp_AN.ISO8859-1', + 'ps_af': 'ps_AF.UTF-8', + 'pt': 'pt_PT.ISO8859-1', + 'pt_br': 'pt_BR.ISO8859-1', + 'pt_pt': 'pt_PT.ISO8859-1', + 'quz_pe': 'quz_PE.UTF-8', + 'raj_in': 'raj_IN.UTF-8', + 'rif_ma': 'rif_MA.UTF-8', + 'ro': 'ro_RO.ISO8859-2', + 'ro_ro': 'ro_RO.ISO8859-2', + 'romanian': 'ro_RO.ISO8859-2', + 'ru': 'ru_RU.UTF-8', + 'ru_ru': 'ru_RU.UTF-8', + 'ru_ua': 'ru_UA.KOI8-U', + 'rumanian': 'ro_RO.ISO8859-2', + 'russian': 'ru_RU.ISO8859-5', + 'rw': 'rw_RW.ISO8859-1', + 'rw_rw': 'rw_RW.ISO8859-1', + 'sa_in': 'sa_IN.UTF-8', + 'sah_ru': 'sah_RU.UTF-8', + 'sat_in': 'sat_IN.UTF-8', + 'sc_it': 'sc_IT.UTF-8', + 'scn_it': 'scn_IT.UTF-8', + 'sd': 'sd_IN.UTF-8', + 'sd_in': 'sd_IN.UTF-8', + 'sd_in@devanagari.utf8': 'sd_IN.UTF-8@devanagari', + 'sd_pk': 'sd_PK.UTF-8', + 'se_no': 'se_NO.UTF-8', + 'serbocroatian': 'sr_RS.UTF-8@latin', + 'sgs_lt': 'sgs_LT.UTF-8', + 'sh': 'sr_RS.UTF-8@latin', + 'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2', + 'sh_hr': 'sh_HR.ISO8859-2', + 'sh_hr.iso88592': 'hr_HR.ISO8859-2', + 'sh_sp': 'sr_CS.ISO8859-2', + 'sh_yu': 'sr_RS.UTF-8@latin', + 'shn_mm': 'shn_MM.UTF-8', + 'shs_ca': 'shs_CA.UTF-8', + 'si': 'si_LK.UTF-8', + 'si_lk': 'si_LK.UTF-8', + 'sid_et': 'sid_ET.UTF-8', + 'sinhala': 'si_LK.UTF-8', + 'sk': 'sk_SK.ISO8859-2', + 'sk_sk': 'sk_SK.ISO8859-2', + 'sl': 'sl_SI.ISO8859-2', + 'sl_cs': 'sl_CS.ISO8859-2', + 'sl_si': 'sl_SI.ISO8859-2', + 'slovak': 'sk_SK.ISO8859-2', + 'slovene': 'sl_SI.ISO8859-2', + 'slovenian': 'sl_SI.ISO8859-2', + 'sm_ws': 'sm_WS.UTF-8', + 'so_dj': 'so_DJ.ISO8859-1', + 'so_et': 'so_ET.UTF-8', + 'so_ke': 'so_KE.ISO8859-1', + 'so_so': 'so_SO.ISO8859-1', + 'sp': 'sr_CS.ISO8859-5', + 'sp_yu': 'sr_CS.ISO8859-5', + 'spanish': 'es_ES.ISO8859-1', + 'spanish_spain': 'es_ES.ISO8859-1', + 'sq': 'sq_AL.ISO8859-2', + 'sq_al': 'sq_AL.ISO8859-2', + 'sq_mk': 'sq_MK.UTF-8', + 'sr': 'sr_RS.UTF-8', + 'sr@cyrillic': 'sr_RS.UTF-8', + 'sr@latn': 'sr_RS.UTF-8@latin', + 'sr_cs': 'sr_CS.UTF-8', + 'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2', + 'sr_cs@latn': 'sr_CS.UTF-8@latin', + 'sr_me': 'sr_ME.UTF-8', + 'sr_rs': 'sr_RS.UTF-8', + 'sr_rs@latn': 'sr_RS.UTF-8@latin', + 'sr_sp': 'sr_CS.ISO8859-2', + 'sr_yu': 'sr_RS.UTF-8@latin', + 'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251', + 'sr_yu.iso88592': 'sr_CS.ISO8859-2', + 'sr_yu.iso88595': 'sr_CS.ISO8859-5', + 'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5', + 'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251', + 'sr_yu.utf8': 'sr_RS.UTF-8', + 'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8', + 'sr_yu@cyrillic': 'sr_RS.UTF-8', + 'ss': 'ss_ZA.ISO8859-1', + 'ss_za': 'ss_ZA.ISO8859-1', + 'ssy_er': 'ssy_ER.UTF-8', + 'st': 'st_ZA.ISO8859-1', + 'st_za': 'st_ZA.ISO8859-1', + 'su_id': 'su_ID.UTF-8', + 'sv': 'sv_SE.ISO8859-1', + 'sv_fi': 'sv_FI.ISO8859-1', + 'sv_se': 'sv_SE.ISO8859-1', + 'sw_ke': 'sw_KE.UTF-8', + 'sw_tz': 'sw_TZ.UTF-8', + 'swedish': 'sv_SE.ISO8859-1', + 'syr': 'syr.UTF-8', + 'szl_pl': 'szl_PL.UTF-8', + 'ta': 'ta_IN.TSCII-0', + 'ta_in': 'ta_IN.TSCII-0', + 'ta_in.tscii': 'ta_IN.TSCII-0', + 'ta_in.tscii0': 'ta_IN.TSCII-0', + 'ta_lk': 'ta_LK.UTF-8', + 'tcy_in.utf8': 'tcy_IN.UTF-8', + 'te': 'te_IN.UTF-8', + 'te_in': 'te_IN.UTF-8', + 'tg': 'tg_TJ.KOI8-C', + 'tg_tj': 'tg_TJ.KOI8-C', + 'th': 'th_TH.ISO8859-11', + 'th_th': 'th_TH.ISO8859-11', + 'th_th.tactis': 'th_TH.TIS620', + 'th_th.tis620': 'th_TH.TIS620', + 'thai': 'th_TH.ISO8859-11', + 'the_np': 'the_NP.UTF-8', + 'ti_er': 'ti_ER.UTF-8', + 'ti_et': 'ti_ET.UTF-8', + 'tig_er': 'tig_ER.UTF-8', + 'tk_tm': 'tk_TM.UTF-8', + 'tl': 'tl_PH.ISO8859-1', + 'tl_ph': 'tl_PH.ISO8859-1', + 'tn': 'tn_ZA.ISO8859-15', + 'tn_za': 'tn_ZA.ISO8859-15', + 'to_to': 'to_TO.UTF-8', + 'tok': 'tok.UTF-8', + 'tpi_pg': 'tpi_PG.UTF-8', + 'tr': 'tr_TR.ISO8859-9', + 'tr_cy': 'tr_CY.ISO8859-9', + 'tr_tr': 'tr_TR.ISO8859-9', + 'ts': 'ts_ZA.ISO8859-1', + 'ts_za': 'ts_ZA.ISO8859-1', + 'tt': 'tt_RU.TATAR-CYR', + 'tt_ru': 'tt_RU.TATAR-CYR', + 'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR', + 'tt_ru@iqtelif': 'tt_RU.UTF-8@iqtelif', + 'turkish': 'tr_TR.ISO8859-9', + 'ug_cn': 'ug_CN.UTF-8', + 'uk': 'uk_UA.KOI8-U', + 'uk_ua': 'uk_UA.KOI8-U', + 'univ.utf8': 'en_US.UTF-8', + 'universal.utf8@ucs4': 'en_US.UTF-8', + 'unm_us': 'unm_US.UTF-8', + 'ur': 'ur_PK.CP1256', + 'ur_in': 'ur_IN.UTF-8', + 'ur_pk': 'ur_PK.CP1256', + 'uz': 'uz_UZ.UTF-8', + 'uz_uz': 'uz_UZ.UTF-8', + 'uz_uz@cyrillic': 'uz_UZ.UTF-8', + 've': 've_ZA.UTF-8', + 've_za': 've_ZA.UTF-8', + 'vi': 'vi_VN.TCVN', + 'vi_vn': 'vi_VN.TCVN', + 'vi_vn.tcvn': 'vi_VN.TCVN', + 'vi_vn.tcvn5712': 'vi_VN.TCVN', + 'vi_vn.viscii': 'vi_VN.VISCII', + 'vi_vn.viscii111': 'vi_VN.VISCII', + 'wa': 'wa_BE.ISO8859-1', + 'wa_be': 'wa_BE.ISO8859-1', + 'wae_ch': 'wae_CH.UTF-8', + 'wal_et': 'wal_ET.UTF-8', + 'wo_sn': 'wo_SN.UTF-8', + 'xh': 'xh_ZA.ISO8859-1', + 'xh_za': 'xh_ZA.ISO8859-1', + 'yi': 'yi_US.CP1255', + 'yi_us': 'yi_US.CP1255', + 'yo_ng': 'yo_NG.UTF-8', + 'yue_hk': 'yue_HK.UTF-8', + 'yuw_pg': 'yuw_PG.UTF-8', + 'zgh_ma': 'zgh_MA.UTF-8', + 'zh': 'zh_CN.eucCN', + 'zh_cn': 'zh_CN.gb2312', + 'zh_cn.big5': 'zh_TW.big5', + 'zh_cn.euc': 'zh_CN.eucCN', + 'zh_hk': 'zh_HK.big5hkscs', + 'zh_hk.big5hk': 'zh_HK.big5hkscs', + 'zh_sg': 'zh_SG.GB2312', + 'zh_sg.gbk': 'zh_SG.GBK', + 'zh_tw': 'zh_TW.big5', + 'zh_tw.euc': 'zh_TW.eucTW', + 'zh_tw.euctw': 'zh_TW.eucTW', + 'zu': 'zu_ZA.ISO8859-1', + 'zu_za': 'zu_ZA.ISO8859-1', +} + +# +# This maps Windows language identifiers to locale strings. +# +# This list has been updated from +# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp +# to include every locale up to Windows Vista. +# +# NOTE: this mapping is incomplete. If your language is missing, please +# submit a bug report as detailed in the Python devguide at: +# https://devguide.python.org/triage/issue-tracker/ +# Make sure you include the missing language identifier and the suggested +# locale code. +# + +windows_locale = { + 0x0436: "af_ZA", # Afrikaans + 0x041c: "sq_AL", # Albanian + 0x0484: "gsw_FR",# Alsatian - France + 0x045e: "am_ET", # Amharic - Ethiopia + 0x0401: "ar_SA", # Arabic - Saudi Arabia + 0x0801: "ar_IQ", # Arabic - Iraq + 0x0c01: "ar_EG", # Arabic - Egypt + 0x1001: "ar_LY", # Arabic - Libya + 0x1401: "ar_DZ", # Arabic - Algeria + 0x1801: "ar_MA", # Arabic - Morocco + 0x1c01: "ar_TN", # Arabic - Tunisia + 0x2001: "ar_OM", # Arabic - Oman + 0x2401: "ar_YE", # Arabic - Yemen + 0x2801: "ar_SY", # Arabic - Syria + 0x2c01: "ar_JO", # Arabic - Jordan + 0x3001: "ar_LB", # Arabic - Lebanon + 0x3401: "ar_KW", # Arabic - Kuwait + 0x3801: "ar_AE", # Arabic - United Arab Emirates + 0x3c01: "ar_BH", # Arabic - Bahrain + 0x4001: "ar_QA", # Arabic - Qatar + 0x042b: "hy_AM", # Armenian + 0x044d: "as_IN", # Assamese - India + 0x042c: "az_AZ", # Azeri - Latin + 0x082c: "az_AZ", # Azeri - Cyrillic + 0x046d: "ba_RU", # Bashkir + 0x042d: "eu_ES", # Basque - Russia + 0x0423: "be_BY", # Belarusian + 0x0445: "bn_IN", # Begali + 0x201a: "bs_BA", # Bosnian - Cyrillic + 0x141a: "bs_BA", # Bosnian - Latin + 0x047e: "br_FR", # Breton - France + 0x0402: "bg_BG", # Bulgarian +# 0x0455: "my_MM", # Burmese - Not supported + 0x0403: "ca_ES", # Catalan + 0x0004: "zh_CHS",# Chinese - Simplified + 0x0404: "zh_TW", # Chinese - Taiwan + 0x0804: "zh_CN", # Chinese - PRC + 0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R. + 0x1004: "zh_SG", # Chinese - Singapore + 0x1404: "zh_MO", # Chinese - Macao S.A.R. + 0x7c04: "zh_CHT",# Chinese - Traditional + 0x0483: "co_FR", # Corsican - France + 0x041a: "hr_HR", # Croatian + 0x101a: "hr_BA", # Croatian - Bosnia + 0x0405: "cs_CZ", # Czech + 0x0406: "da_DK", # Danish + 0x048c: "gbz_AF",# Dari - Afghanistan + 0x0465: "div_MV",# Divehi - Maldives + 0x0413: "nl_NL", # Dutch - The Netherlands + 0x0813: "nl_BE", # Dutch - Belgium + 0x0409: "en_US", # English - United States + 0x0809: "en_GB", # English - United Kingdom + 0x0c09: "en_AU", # English - Australia + 0x1009: "en_CA", # English - Canada + 0x1409: "en_NZ", # English - New Zealand + 0x1809: "en_IE", # English - Ireland + 0x1c09: "en_ZA", # English - South Africa + 0x2009: "en_JA", # English - Jamaica + 0x2409: "en_CB", # English - Caribbean + 0x2809: "en_BZ", # English - Belize + 0x2c09: "en_TT", # English - Trinidad + 0x3009: "en_ZW", # English - Zimbabwe + 0x3409: "en_PH", # English - Philippines + 0x4009: "en_IN", # English - India + 0x4409: "en_MY", # English - Malaysia + 0x4809: "en_IN", # English - Singapore + 0x0425: "et_EE", # Estonian + 0x0438: "fo_FO", # Faroese + 0x0464: "fil_PH",# Filipino + 0x040b: "fi_FI", # Finnish + 0x040c: "fr_FR", # French - France + 0x080c: "fr_BE", # French - Belgium + 0x0c0c: "fr_CA", # French - Canada + 0x100c: "fr_CH", # French - Switzerland + 0x140c: "fr_LU", # French - Luxembourg + 0x180c: "fr_MC", # French - Monaco + 0x0462: "fy_NL", # Frisian - Netherlands + 0x0456: "gl_ES", # Galician + 0x0437: "ka_GE", # Georgian + 0x0407: "de_DE", # German - Germany + 0x0807: "de_CH", # German - Switzerland + 0x0c07: "de_AT", # German - Austria + 0x1007: "de_LU", # German - Luxembourg + 0x1407: "de_LI", # German - Liechtenstein + 0x0408: "el_GR", # Greek + 0x046f: "kl_GL", # Greenlandic - Greenland + 0x0447: "gu_IN", # Gujarati + 0x0468: "ha_NG", # Hausa - Latin + 0x040d: "he_IL", # Hebrew + 0x0439: "hi_IN", # Hindi + 0x040e: "hu_HU", # Hungarian + 0x040f: "is_IS", # Icelandic + 0x0421: "id_ID", # Indonesian + 0x045d: "iu_CA", # Inuktitut - Syllabics + 0x085d: "iu_CA", # Inuktitut - Latin + 0x083c: "ga_IE", # Irish - Ireland + 0x0410: "it_IT", # Italian - Italy + 0x0810: "it_CH", # Italian - Switzerland + 0x0411: "ja_JP", # Japanese + 0x044b: "kn_IN", # Kannada - India + 0x043f: "kk_KZ", # Kazakh + 0x0453: "kh_KH", # Khmer - Cambodia + 0x0486: "qut_GT",# K'iche - Guatemala + 0x0487: "rw_RW", # Kinyarwanda - Rwanda + 0x0457: "kok_IN",# Konkani + 0x0412: "ko_KR", # Korean + 0x0440: "ky_KG", # Kyrgyz + 0x0454: "lo_LA", # Lao - Lao PDR + 0x0426: "lv_LV", # Latvian + 0x0427: "lt_LT", # Lithuanian + 0x082e: "dsb_DE",# Lower Sorbian - Germany + 0x046e: "lb_LU", # Luxembourgish + 0x042f: "mk_MK", # FYROM Macedonian + 0x043e: "ms_MY", # Malay - Malaysia + 0x083e: "ms_BN", # Malay - Brunei Darussalam + 0x044c: "ml_IN", # Malayalam - India + 0x043a: "mt_MT", # Maltese + 0x0481: "mi_NZ", # Maori + 0x047a: "arn_CL",# Mapudungun + 0x044e: "mr_IN", # Marathi + 0x047c: "moh_CA",# Mohawk - Canada + 0x0450: "mn_MN", # Mongolian - Cyrillic + 0x0850: "mn_CN", # Mongolian - PRC + 0x0461: "ne_NP", # Nepali + 0x0414: "nb_NO", # Norwegian - Bokmal + 0x0814: "nn_NO", # Norwegian - Nynorsk + 0x0482: "oc_FR", # Occitan - France + 0x0448: "or_IN", # Oriya - India + 0x0463: "ps_AF", # Pashto - Afghanistan + 0x0429: "fa_IR", # Persian + 0x0415: "pl_PL", # Polish + 0x0416: "pt_BR", # Portuguese - Brazil + 0x0816: "pt_PT", # Portuguese - Portugal + 0x0446: "pa_IN", # Punjabi + 0x046b: "quz_BO",# Quechua (Bolivia) + 0x086b: "quz_EC",# Quechua (Ecuador) + 0x0c6b: "quz_PE",# Quechua (Peru) + 0x0418: "ro_RO", # Romanian - Romania + 0x0417: "rm_CH", # Romansh + 0x0419: "ru_RU", # Russian + 0x243b: "smn_FI",# Sami Finland + 0x103b: "smj_NO",# Sami Norway + 0x143b: "smj_SE",# Sami Sweden + 0x043b: "se_NO", # Sami Northern Norway + 0x083b: "se_SE", # Sami Northern Sweden + 0x0c3b: "se_FI", # Sami Northern Finland + 0x203b: "sms_FI",# Sami Skolt + 0x183b: "sma_NO",# Sami Southern Norway + 0x1c3b: "sma_SE",# Sami Southern Sweden + 0x044f: "sa_IN", # Sanskrit + 0x0c1a: "sr_SP", # Serbian - Cyrillic + 0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic + 0x081a: "sr_SP", # Serbian - Latin + 0x181a: "sr_BA", # Serbian - Bosnia Latin + 0x045b: "si_LK", # Sinhala - Sri Lanka + 0x046c: "ns_ZA", # Northern Sotho + 0x0432: "tn_ZA", # Setswana - Southern Africa + 0x041b: "sk_SK", # Slovak + 0x0424: "sl_SI", # Slovenian + 0x040a: "es_ES", # Spanish - Spain + 0x080a: "es_MX", # Spanish - Mexico + 0x0c0a: "es_ES", # Spanish - Spain (Modern) + 0x100a: "es_GT", # Spanish - Guatemala + 0x140a: "es_CR", # Spanish - Costa Rica + 0x180a: "es_PA", # Spanish - Panama + 0x1c0a: "es_DO", # Spanish - Dominican Republic + 0x200a: "es_VE", # Spanish - Venezuela + 0x240a: "es_CO", # Spanish - Colombia + 0x280a: "es_PE", # Spanish - Peru + 0x2c0a: "es_AR", # Spanish - Argentina + 0x300a: "es_EC", # Spanish - Ecuador + 0x340a: "es_CL", # Spanish - Chile + 0x380a: "es_UR", # Spanish - Uruguay + 0x3c0a: "es_PY", # Spanish - Paraguay + 0x400a: "es_BO", # Spanish - Bolivia + 0x440a: "es_SV", # Spanish - El Salvador + 0x480a: "es_HN", # Spanish - Honduras + 0x4c0a: "es_NI", # Spanish - Nicaragua + 0x500a: "es_PR", # Spanish - Puerto Rico + 0x540a: "es_US", # Spanish - United States +# 0x0430: "", # Sutu - Not supported + 0x0441: "sw_KE", # Swahili + 0x041d: "sv_SE", # Swedish - Sweden + 0x081d: "sv_FI", # Swedish - Finland + 0x045a: "syr_SY",# Syriac + 0x0428: "tg_TJ", # Tajik - Cyrillic + 0x085f: "tmz_DZ",# Tamazight - Latin + 0x0449: "ta_IN", # Tamil + 0x0444: "tt_RU", # Tatar + 0x044a: "te_IN", # Telugu + 0x041e: "th_TH", # Thai + 0x0851: "bo_BT", # Tibetan - Bhutan + 0x0451: "bo_CN", # Tibetan - PRC + 0x041f: "tr_TR", # Turkish + 0x0442: "tk_TM", # Turkmen - Cyrillic + 0x0480: "ug_CN", # Uighur - Arabic + 0x0422: "uk_UA", # Ukrainian + 0x042e: "wen_DE",# Upper Sorbian - Germany + 0x0420: "ur_PK", # Urdu + 0x0820: "ur_IN", # Urdu - India + 0x0443: "uz_UZ", # Uzbek - Latin + 0x0843: "uz_UZ", # Uzbek - Cyrillic + 0x042a: "vi_VN", # Vietnamese + 0x0452: "cy_GB", # Welsh + 0x0488: "wo_SN", # Wolof - Senegal + 0x0434: "xh_ZA", # Xhosa - South Africa + 0x0485: "sah_RU",# Yakut - Cyrillic + 0x0478: "ii_CN", # Yi - PRC + 0x046a: "yo_NG", # Yoruba - Nigeria + 0x0435: "zu_ZA", # Zulu +} + +def _print_locale(): + + """ Test function. + """ + categories = {} + def _init_categories(categories=categories): + for k,v in globals().items(): + if k[:3] == 'LC_': + categories[k] = v + _init_categories() + del categories['LC_ALL'] + + print('Locale defaults as determined by getdefaultlocale():') + print('-'*72) + lang, enc = getdefaultlocale() + print('Language: ', lang or '(undefined)') + print('Encoding: ', enc or '(undefined)') + print() + + print('Locale settings on startup:') + print('-'*72) + for name,category in categories.items(): + print(name, '...') + lang, enc = getlocale(category) + print(' Language: ', lang or '(undefined)') + print(' Encoding: ', enc or '(undefined)') + print() + + try: + setlocale(LC_ALL, "") + except: + print('NOTE:') + print('setlocale(LC_ALL, "") does not support the default locale') + print('given in the OS environment variables.') + else: + print() + print('Locale settings after calling setlocale(LC_ALL, ""):') + print('-'*72) + for name,category in categories.items(): + print(name, '...') + lang, enc = getlocale(category) + print(' Language: ', lang or '(undefined)') + print(' Encoding: ', enc or '(undefined)') + print() + +### + +try: + LC_MESSAGES +except NameError: + pass +else: + __all__.append("LC_MESSAGES") + +if __name__=='__main__': + print('Locale aliasing:') + print() + _print_locale() + print() + print('Number formatting:') + print() + _test() diff --git a/Python313_13_x64_Template/Lib/logging/__init__.py b/Python314_4_x64_Template/Lib/logging/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/logging/__init__.py rename to Python314_4_x64_Template/Lib/logging/__init__.py diff --git a/Python314_4_x64_Template/Lib/logging/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/logging/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..d3d5ff00 Binary files /dev/null and b/Python314_4_x64_Template/Lib/logging/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/logging/__pycache__/config.cpython-314.pyc b/Python314_4_x64_Template/Lib/logging/__pycache__/config.cpython-314.pyc new file mode 100644 index 00000000..dc1df3fb Binary files /dev/null and b/Python314_4_x64_Template/Lib/logging/__pycache__/config.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/logging/__pycache__/handlers.cpython-314.pyc b/Python314_4_x64_Template/Lib/logging/__pycache__/handlers.cpython-314.pyc new file mode 100644 index 00000000..05752a2d Binary files /dev/null and b/Python314_4_x64_Template/Lib/logging/__pycache__/handlers.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/logging/config.py b/Python314_4_x64_Template/Lib/logging/config.py new file mode 100644 index 00000000..3d9aa00f --- /dev/null +++ b/Python314_4_x64_Template/Lib/logging/config.py @@ -0,0 +1,1077 @@ +# Copyright 2001-2023 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Configuration functions for the logging package for Python. The core package +is based on PEP 282 and comments thereto in comp.lang.python, and influenced +by Apache's log4j system. + +Copyright (C) 2001-2022 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import errno +import functools +import io +import logging +import logging.handlers +import os +import queue +import re +import struct +import threading +import traceback + +from socketserver import ThreadingTCPServer, StreamRequestHandler + + +DEFAULT_LOGGING_CONFIG_PORT = 9030 + +RESET_ERROR = errno.ECONNRESET + +# +# The following code implements a socket listener for on-the-fly +# reconfiguration of logging. +# +# _listener holds the server object doing the listening +_listener = None + +def fileConfig(fname, defaults=None, disable_existing_loggers=True, encoding=None): + """ + Read the logging configuration from a ConfigParser-format file. + + This can be called several times from an application, allowing an end user + the ability to select from various pre-canned configurations (if the + developer provides a mechanism to present the choices and load the chosen + configuration). + """ + import configparser + + if isinstance(fname, str): + if not os.path.exists(fname): + raise FileNotFoundError(f"{fname} doesn't exist") + elif not os.path.getsize(fname): + raise RuntimeError(f'{fname} is an empty file') + + if isinstance(fname, configparser.RawConfigParser): + cp = fname + else: + try: + cp = configparser.ConfigParser(defaults) + if hasattr(fname, 'readline'): + cp.read_file(fname) + else: + encoding = io.text_encoding(encoding) + cp.read(fname, encoding=encoding) + except configparser.ParsingError as e: + raise RuntimeError(f'{fname} is invalid: {e}') + + formatters = _create_formatters(cp) + + # critical section + with logging._lock: + _clearExistingHandlers() + + # Handlers add themselves to logging._handlers + handlers = _install_handlers(cp, formatters) + _install_loggers(cp, handlers, disable_existing_loggers) + + +def _resolve(name): + """Resolve a dotted name to a global object.""" + name = name.split('.') + used = name.pop(0) + found = __import__(used) + for n in name: + used = used + '.' + n + try: + found = getattr(found, n) + except AttributeError: + __import__(used) + found = getattr(found, n) + return found + +def _strip_spaces(alist): + return map(str.strip, alist) + +def _create_formatters(cp): + """Create and return formatters""" + flist = cp["formatters"]["keys"] + if not len(flist): + return {} + flist = flist.split(",") + flist = _strip_spaces(flist) + formatters = {} + for form in flist: + sectname = "formatter_%s" % form + fs = cp.get(sectname, "format", raw=True, fallback=None) + dfs = cp.get(sectname, "datefmt", raw=True, fallback=None) + stl = cp.get(sectname, "style", raw=True, fallback='%') + defaults = cp.get(sectname, "defaults", raw=True, fallback=None) + + c = logging.Formatter + class_name = cp[sectname].get("class") + if class_name: + c = _resolve(class_name) + + if defaults is not None: + defaults = eval(defaults, vars(logging)) + f = c(fs, dfs, stl, defaults=defaults) + else: + f = c(fs, dfs, stl) + formatters[form] = f + return formatters + + +def _install_handlers(cp, formatters): + """Install and return handlers""" + hlist = cp["handlers"]["keys"] + if not len(hlist): + return {} + hlist = hlist.split(",") + hlist = _strip_spaces(hlist) + handlers = {} + fixups = [] #for inter-handler references + for hand in hlist: + section = cp["handler_%s" % hand] + klass = section["class"] + fmt = section.get("formatter", "") + try: + klass = eval(klass, vars(logging)) + except (AttributeError, NameError): + klass = _resolve(klass) + args = section.get("args", '()') + args = eval(args, vars(logging)) + kwargs = section.get("kwargs", '{}') + kwargs = eval(kwargs, vars(logging)) + h = klass(*args, **kwargs) + h.name = hand + if "level" in section: + level = section["level"] + h.setLevel(level) + if len(fmt): + h.setFormatter(formatters[fmt]) + if issubclass(klass, logging.handlers.MemoryHandler): + target = section.get("target", "") + if len(target): #the target handler may not be loaded yet, so keep for later... + fixups.append((h, target)) + handlers[hand] = h + #now all handlers are loaded, fixup inter-handler references... + for h, t in fixups: + h.setTarget(handlers[t]) + return handlers + +def _handle_existing_loggers(existing, child_loggers, disable_existing): + """ + When (re)configuring logging, handle loggers which were in the previous + configuration but are not in the new configuration. There's no point + deleting them as other threads may continue to hold references to them; + and by disabling them, you stop them doing any logging. + + However, don't disable children of named loggers, as that's probably not + what was intended by the user. Also, allow existing loggers to NOT be + disabled if disable_existing is false. + """ + root = logging.root + for log in existing: + logger = root.manager.loggerDict[log] + if log in child_loggers: + if not isinstance(logger, logging.PlaceHolder): + logger.setLevel(logging.NOTSET) + logger.handlers = [] + logger.propagate = True + else: + logger.disabled = disable_existing + +def _install_loggers(cp, handlers, disable_existing): + """Create and install loggers""" + + # configure the root first + llist = cp["loggers"]["keys"] + llist = llist.split(",") + llist = list(_strip_spaces(llist)) + llist.remove("root") + section = cp["logger_root"] + root = logging.root + log = root + if "level" in section: + level = section["level"] + log.setLevel(level) + for h in root.handlers[:]: + root.removeHandler(h) + hlist = section["handlers"] + if len(hlist): + hlist = hlist.split(",") + hlist = _strip_spaces(hlist) + for hand in hlist: + log.addHandler(handlers[hand]) + + #and now the others... + #we don't want to lose the existing loggers, + #since other threads may have pointers to them. + #existing is set to contain all existing loggers, + #and as we go through the new configuration we + #remove any which are configured. At the end, + #what's left in existing is the set of loggers + #which were in the previous configuration but + #which are not in the new configuration. + existing = list(root.manager.loggerDict.keys()) + #The list needs to be sorted so that we can + #avoid disabling child loggers of explicitly + #named loggers. With a sorted list it is easier + #to find the child loggers. + existing.sort() + #We'll keep the list of existing loggers + #which are children of named loggers here... + child_loggers = [] + #now set up the new ones... + for log in llist: + section = cp["logger_%s" % log] + qn = section["qualname"] + propagate = section.getint("propagate", fallback=1) + logger = logging.getLogger(qn) + if qn in existing: + i = existing.index(qn) + 1 # start with the entry after qn + prefixed = qn + "." + pflen = len(prefixed) + num_existing = len(existing) + while i < num_existing: + if existing[i][:pflen] == prefixed: + child_loggers.append(existing[i]) + i += 1 + existing.remove(qn) + if "level" in section: + level = section["level"] + logger.setLevel(level) + for h in logger.handlers[:]: + logger.removeHandler(h) + logger.propagate = propagate + logger.disabled = 0 + hlist = section["handlers"] + if len(hlist): + hlist = hlist.split(",") + hlist = _strip_spaces(hlist) + for hand in hlist: + logger.addHandler(handlers[hand]) + + #Disable any old loggers. There's no point deleting + #them as other threads may continue to hold references + #and by disabling them, you stop them doing any logging. + #However, don't disable children of named loggers, as that's + #probably not what was intended by the user. + #for log in existing: + # logger = root.manager.loggerDict[log] + # if log in child_loggers: + # logger.level = logging.NOTSET + # logger.handlers = [] + # logger.propagate = 1 + # elif disable_existing_loggers: + # logger.disabled = 1 + _handle_existing_loggers(existing, child_loggers, disable_existing) + + +def _clearExistingHandlers(): + """Clear and close existing handlers""" + logging._handlers.clear() + logging.shutdown(logging._handlerList[:]) + del logging._handlerList[:] + + +IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) + + +def valid_ident(s): + m = IDENTIFIER.match(s) + if not m: + raise ValueError('Not a valid Python identifier: %r' % s) + return True + + +class ConvertingMixin(object): + """For ConvertingXXX's, this mixin class provides common functions""" + + def convert_with_key(self, key, value, replace=True): + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + if replace: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def convert(self, value): + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + return result + + +# The ConvertingXXX classes are wrappers around standard Python containers, +# and they serve to convert any suitable values in the container. The +# conversion converts base dicts, lists and tuples to their wrapped +# equivalents, whereas strings which match a conversion format are converted +# appropriately. +# +# Each wrapper should have a configurator attribute holding the actual +# configurator to use for conversion. + +class ConvertingDict(dict, ConvertingMixin): + """A converting dictionary wrapper.""" + + def __getitem__(self, key): + value = dict.__getitem__(self, key) + return self.convert_with_key(key, value) + + def get(self, key, default=None): + value = dict.get(self, key, default) + return self.convert_with_key(key, value) + + def pop(self, key, default=None): + value = dict.pop(self, key, default) + return self.convert_with_key(key, value, replace=False) + +class ConvertingList(list, ConvertingMixin): + """A converting list wrapper.""" + def __getitem__(self, key): + value = list.__getitem__(self, key) + return self.convert_with_key(key, value) + + def pop(self, idx=-1): + value = list.pop(self, idx) + return self.convert(value) + +class ConvertingTuple(tuple, ConvertingMixin): + """A converting tuple wrapper.""" + def __getitem__(self, key): + value = tuple.__getitem__(self, key) + # Can't replace a tuple entry. + return self.convert_with_key(key, value, replace=False) + +class BaseConfigurator(object): + """ + The configurator base class which defines some useful defaults. + """ + + CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') + + WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') + DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') + INDEX_PATTERN = re.compile(r'^\[([^\[\]]*)\]\s*') + DIGIT_PATTERN = re.compile(r'^\d+$') + + value_converters = { + 'ext' : 'ext_convert', + 'cfg' : 'cfg_convert', + } + + # We might want to use a different one, e.g. importlib + importer = staticmethod(__import__) + + def __init__(self, config): + self.config = ConvertingDict(config) + self.config.configurator = self + + def resolve(self, s): + """ + Resolve strings to objects using standard import and attribute + syntax. + """ + name = s.split('.') + used = name.pop(0) + try: + found = self.importer(used) + for frag in name: + used += '.' + frag + try: + found = getattr(found, frag) + except AttributeError: + self.importer(used) + found = getattr(found, frag) + return found + except ImportError as e: + v = ValueError('Cannot resolve %r: %s' % (s, e)) + raise v from e + + def ext_convert(self, value): + """Default converter for the ext:// protocol.""" + return self.resolve(value) + + def cfg_convert(self, value): + """Default converter for the cfg:// protocol.""" + rest = value + m = self.WORD_PATTERN.match(rest) + if m is None: + raise ValueError("Unable to convert %r" % value) + else: + rest = rest[m.end():] + d = self.config[m.groups()[0]] + #print d, rest + while rest: + m = self.DOT_PATTERN.match(rest) + if m: + d = d[m.groups()[0]] + else: + m = self.INDEX_PATTERN.match(rest) + if m: + idx = m.groups()[0] + if not self.DIGIT_PATTERN.match(idx): + d = d[idx] + else: + try: + n = int(idx) # try as number first (most likely) + d = d[n] + except TypeError: + d = d[idx] + if m: + rest = rest[m.end():] + else: + raise ValueError('Unable to convert ' + '%r at %r' % (value, rest)) + #rest should be empty + return d + + def convert(self, value): + """ + Convert values to an appropriate type. dicts, lists and tuples are + replaced by their converting alternatives. Strings are checked to + see if they have a conversion format and are converted if they do. + """ + if not isinstance(value, ConvertingDict) and isinstance(value, dict): + value = ConvertingDict(value) + value.configurator = self + elif not isinstance(value, ConvertingList) and isinstance(value, list): + value = ConvertingList(value) + value.configurator = self + elif not isinstance(value, ConvertingTuple) and\ + isinstance(value, tuple) and not hasattr(value, '_fields'): + value = ConvertingTuple(value) + value.configurator = self + elif isinstance(value, str): # str for py3k + m = self.CONVERT_PATTERN.match(value) + if m: + d = m.groupdict() + prefix = d['prefix'] + converter = self.value_converters.get(prefix, None) + if converter: + suffix = d['suffix'] + converter = getattr(self, converter) + value = converter(suffix) + return value + + def configure_custom(self, config): + """Configure an object with a user-supplied factory.""" + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + # Check for valid identifiers + kwargs = {k: config[k] for k in config if (k != '.' and valid_ident(k))} + result = c(**kwargs) + props = config.pop('.', None) + if props: + for name, value in props.items(): + setattr(result, name, value) + return result + + def as_tuple(self, value): + """Utility function which converts lists to tuples.""" + if isinstance(value, list): + value = tuple(value) + return value + +def _is_queue_like_object(obj): + """Check that *obj* implements the Queue API.""" + if isinstance(obj, (queue.Queue, queue.SimpleQueue)): + return True + # defer importing multiprocessing as much as possible + from multiprocessing.queues import Queue as MPQueue + if isinstance(obj, MPQueue): + return True + # Depending on the multiprocessing start context, we cannot create + # a multiprocessing.managers.BaseManager instance 'mm' to get the + # runtime type of mm.Queue() or mm.JoinableQueue() (see gh-119819). + # + # Since we only need an object implementing the Queue API, we only + # do a protocol check, but we do not use typing.runtime_checkable() + # and typing.Protocol to reduce import time (see gh-121723). + # + # Ideally, we would have wanted to simply use strict type checking + # instead of a protocol-based type checking since the latter does + # not check the method signatures. + # + # Note that only 'put_nowait' and 'get' are required by the logging + # queue handler and queue listener (see gh-124653) and that other + # methods are either optional or unused. + minimal_queue_interface = ['put_nowait', 'get'] + return all(callable(getattr(obj, method, None)) + for method in minimal_queue_interface) + +class DictConfigurator(BaseConfigurator): + """ + Configure logging using a dictionary-like object to describe the + configuration. + """ + + def configure(self): + """Do the configuration.""" + + config = self.config + if 'version' not in config: + raise ValueError("dictionary doesn't specify a version") + if config['version'] != 1: + raise ValueError("Unsupported version: %s" % config['version']) + incremental = config.pop('incremental', False) + EMPTY_DICT = {} + with logging._lock: + if incremental: + handlers = config.get('handlers', EMPTY_DICT) + for name in handlers: + if name not in logging._handlers: + raise ValueError('No handler found with ' + 'name %r' % name) + else: + try: + handler = logging._handlers[name] + handler_config = handlers[name] + level = handler_config.get('level', None) + if level: + handler.setLevel(logging._checkLevel(level)) + except Exception as e: + raise ValueError('Unable to configure handler ' + '%r' % name) from e + loggers = config.get('loggers', EMPTY_DICT) + for name in loggers: + try: + self.configure_logger(name, loggers[name], True) + except Exception as e: + raise ValueError('Unable to configure logger ' + '%r' % name) from e + root = config.get('root', None) + if root: + try: + self.configure_root(root, True) + except Exception as e: + raise ValueError('Unable to configure root ' + 'logger') from e + else: + disable_existing = config.pop('disable_existing_loggers', True) + + _clearExistingHandlers() + + # Do formatters first - they don't refer to anything else + formatters = config.get('formatters', EMPTY_DICT) + for name in formatters: + try: + formatters[name] = self.configure_formatter( + formatters[name]) + except Exception as e: + raise ValueError('Unable to configure ' + 'formatter %r' % name) from e + # Next, do filters - they don't refer to anything else, either + filters = config.get('filters', EMPTY_DICT) + for name in filters: + try: + filters[name] = self.configure_filter(filters[name]) + except Exception as e: + raise ValueError('Unable to configure ' + 'filter %r' % name) from e + + # Next, do handlers - they refer to formatters and filters + # As handlers can refer to other handlers, sort the keys + # to allow a deterministic order of configuration + handlers = config.get('handlers', EMPTY_DICT) + deferred = [] + for name in sorted(handlers): + try: + handler = self.configure_handler(handlers[name]) + handler.name = name + handlers[name] = handler + except Exception as e: + if ' not configured yet' in str(e.__cause__): + deferred.append(name) + else: + raise ValueError('Unable to configure handler ' + '%r' % name) from e + + # Now do any that were deferred + for name in deferred: + try: + handler = self.configure_handler(handlers[name]) + handler.name = name + handlers[name] = handler + except Exception as e: + raise ValueError('Unable to configure handler ' + '%r' % name) from e + + # Next, do loggers - they refer to handlers and filters + + #we don't want to lose the existing loggers, + #since other threads may have pointers to them. + #existing is set to contain all existing loggers, + #and as we go through the new configuration we + #remove any which are configured. At the end, + #what's left in existing is the set of loggers + #which were in the previous configuration but + #which are not in the new configuration. + root = logging.root + existing = list(root.manager.loggerDict.keys()) + #The list needs to be sorted so that we can + #avoid disabling child loggers of explicitly + #named loggers. With a sorted list it is easier + #to find the child loggers. + existing.sort() + #We'll keep the list of existing loggers + #which are children of named loggers here... + child_loggers = [] + #now set up the new ones... + loggers = config.get('loggers', EMPTY_DICT) + for name in loggers: + if name in existing: + i = existing.index(name) + 1 # look after name + prefixed = name + "." + pflen = len(prefixed) + num_existing = len(existing) + while i < num_existing: + if existing[i][:pflen] == prefixed: + child_loggers.append(existing[i]) + i += 1 + existing.remove(name) + try: + self.configure_logger(name, loggers[name]) + except Exception as e: + raise ValueError('Unable to configure logger ' + '%r' % name) from e + + #Disable any old loggers. There's no point deleting + #them as other threads may continue to hold references + #and by disabling them, you stop them doing any logging. + #However, don't disable children of named loggers, as that's + #probably not what was intended by the user. + #for log in existing: + # logger = root.manager.loggerDict[log] + # if log in child_loggers: + # logger.level = logging.NOTSET + # logger.handlers = [] + # logger.propagate = True + # elif disable_existing: + # logger.disabled = True + _handle_existing_loggers(existing, child_loggers, + disable_existing) + + # And finally, do the root logger + root = config.get('root', None) + if root: + try: + self.configure_root(root) + except Exception as e: + raise ValueError('Unable to configure root ' + 'logger') from e + + def configure_formatter(self, config): + """Configure a formatter from a dictionary.""" + if '()' in config: + factory = config['()'] # for use in exception handler + try: + result = self.configure_custom(config) + except TypeError as te: + if "'format'" not in str(te): + raise + # logging.Formatter and its subclasses expect the `fmt` + # parameter instead of `format`. Retry passing configuration + # with `fmt`. + config['fmt'] = config.pop('format') + config['()'] = factory + result = self.configure_custom(config) + else: + fmt = config.get('format', None) + dfmt = config.get('datefmt', None) + style = config.get('style', '%') + cname = config.get('class', None) + defaults = config.get('defaults', None) + + if not cname: + c = logging.Formatter + else: + c = _resolve(cname) + + kwargs = {} + + # Add defaults only if it exists. + # Prevents TypeError in custom formatter callables that do not + # accept it. + if defaults is not None: + kwargs['defaults'] = defaults + + # A TypeError would be raised if "validate" key is passed in with a formatter callable + # that does not accept "validate" as a parameter + if 'validate' in config: # if user hasn't mentioned it, the default will be fine + result = c(fmt, dfmt, style, config['validate'], **kwargs) + else: + result = c(fmt, dfmt, style, **kwargs) + + return result + + def configure_filter(self, config): + """Configure a filter from a dictionary.""" + if '()' in config: + result = self.configure_custom(config) + else: + name = config.get('name', '') + result = logging.Filter(name) + return result + + def add_filters(self, filterer, filters): + """Add filters to a filterer from a list of names.""" + for f in filters: + try: + if callable(f) or callable(getattr(f, 'filter', None)): + filter_ = f + else: + filter_ = self.config['filters'][f] + filterer.addFilter(filter_) + except Exception as e: + raise ValueError('Unable to add filter %r' % f) from e + + def _configure_queue_handler(self, klass, **kwargs): + if 'queue' in kwargs: + q = kwargs.pop('queue') + else: + q = queue.Queue() # unbounded + + rhl = kwargs.pop('respect_handler_level', False) + lklass = kwargs.pop('listener', logging.handlers.QueueListener) + handlers = kwargs.pop('handlers', []) + + listener = lklass(q, *handlers, respect_handler_level=rhl) + handler = klass(q, **kwargs) + handler.listener = listener + return handler + + def configure_handler(self, config): + """Configure a handler from a dictionary.""" + config_copy = dict(config) # for restoring in case of error + formatter = config.pop('formatter', None) + if formatter: + try: + formatter = self.config['formatters'][formatter] + except Exception as e: + raise ValueError('Unable to set formatter ' + '%r' % formatter) from e + level = config.pop('level', None) + filters = config.pop('filters', None) + if '()' in config: + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + factory = c + else: + cname = config.pop('class') + if callable(cname): + klass = cname + else: + klass = self.resolve(cname) + if issubclass(klass, logging.handlers.MemoryHandler): + if 'flushLevel' in config: + config['flushLevel'] = logging._checkLevel(config['flushLevel']) + if 'target' in config: + # Special case for handler which refers to another handler + try: + tn = config['target'] + th = self.config['handlers'][tn] + if not isinstance(th, logging.Handler): + config.update(config_copy) # restore for deferred cfg + raise TypeError('target not configured yet') + config['target'] = th + except Exception as e: + raise ValueError('Unable to set target handler %r' % tn) from e + elif issubclass(klass, logging.handlers.QueueHandler): + # Another special case for handler which refers to other handlers + # if 'handlers' not in config: + # raise ValueError('No handlers specified for a QueueHandler') + if 'queue' in config: + qspec = config['queue'] + + if isinstance(qspec, str): + q = self.resolve(qspec) + if not callable(q): + raise TypeError('Invalid queue specifier %r' % qspec) + config['queue'] = q() + elif isinstance(qspec, dict): + if '()' not in qspec: + raise TypeError('Invalid queue specifier %r' % qspec) + config['queue'] = self.configure_custom(dict(qspec)) + elif not _is_queue_like_object(qspec): + raise TypeError('Invalid queue specifier %r' % qspec) + + if 'listener' in config: + lspec = config['listener'] + if isinstance(lspec, type): + if not issubclass(lspec, logging.handlers.QueueListener): + raise TypeError('Invalid listener specifier %r' % lspec) + else: + if isinstance(lspec, str): + listener = self.resolve(lspec) + if isinstance(listener, type) and\ + not issubclass(listener, logging.handlers.QueueListener): + raise TypeError('Invalid listener specifier %r' % lspec) + elif isinstance(lspec, dict): + if '()' not in lspec: + raise TypeError('Invalid listener specifier %r' % lspec) + listener = self.configure_custom(dict(lspec)) + else: + raise TypeError('Invalid listener specifier %r' % lspec) + if not callable(listener): + raise TypeError('Invalid listener specifier %r' % lspec) + config['listener'] = listener + if 'handlers' in config: + hlist = [] + try: + for hn in config['handlers']: + h = self.config['handlers'][hn] + if not isinstance(h, logging.Handler): + config.update(config_copy) # restore for deferred cfg + raise TypeError('Required handler %r ' + 'is not configured yet' % hn) + hlist.append(h) + except Exception as e: + raise ValueError('Unable to set required handler %r' % hn) from e + config['handlers'] = hlist + elif issubclass(klass, logging.handlers.SMTPHandler) and\ + 'mailhost' in config: + config['mailhost'] = self.as_tuple(config['mailhost']) + elif issubclass(klass, logging.handlers.SysLogHandler) and\ + 'address' in config: + config['address'] = self.as_tuple(config['address']) + if issubclass(klass, logging.handlers.QueueHandler): + factory = functools.partial(self._configure_queue_handler, klass) + else: + factory = klass + kwargs = {k: config[k] for k in config if (k != '.' and valid_ident(k))} + # When deprecation ends for using the 'strm' parameter, remove the + # "except TypeError ..." + try: + result = factory(**kwargs) + except TypeError as te: + if "'stream'" not in str(te): + raise + #The argument name changed from strm to stream + #Retry with old name. + #This is so that code can be used with older Python versions + #(e.g. by Django) + kwargs['strm'] = kwargs.pop('stream') + result = factory(**kwargs) + + import warnings + warnings.warn( + "Support for custom logging handlers with the 'strm' argument " + "is deprecated and scheduled for removal in Python 3.16. " + "Define handlers with the 'stream' argument instead.", + DeprecationWarning, + stacklevel=2, + ) + if formatter: + result.setFormatter(formatter) + if level is not None: + result.setLevel(logging._checkLevel(level)) + if filters: + self.add_filters(result, filters) + props = config.pop('.', None) + if props: + for name, value in props.items(): + setattr(result, name, value) + return result + + def add_handlers(self, logger, handlers): + """Add handlers to a logger from a list of names.""" + for h in handlers: + try: + logger.addHandler(self.config['handlers'][h]) + except Exception as e: + raise ValueError('Unable to add handler %r' % h) from e + + def common_logger_config(self, logger, config, incremental=False): + """ + Perform configuration which is common to root and non-root loggers. + """ + level = config.get('level', None) + if level is not None: + logger.setLevel(logging._checkLevel(level)) + if not incremental: + #Remove any existing handlers + for h in logger.handlers[:]: + logger.removeHandler(h) + handlers = config.get('handlers', None) + if handlers: + self.add_handlers(logger, handlers) + filters = config.get('filters', None) + if filters: + self.add_filters(logger, filters) + + def configure_logger(self, name, config, incremental=False): + """Configure a non-root logger from a dictionary.""" + logger = logging.getLogger(name) + self.common_logger_config(logger, config, incremental) + logger.disabled = False + propagate = config.get('propagate', None) + if propagate is not None: + logger.propagate = propagate + + def configure_root(self, config, incremental=False): + """Configure a root logger from a dictionary.""" + root = logging.getLogger() + self.common_logger_config(root, config, incremental) + +dictConfigClass = DictConfigurator + +def dictConfig(config): + """Configure logging using a dictionary.""" + dictConfigClass(config).configure() + + +def listen(port=DEFAULT_LOGGING_CONFIG_PORT, verify=None): + """ + Start up a socket server on the specified port, and listen for new + configurations. + + These will be sent as a file suitable for processing by fileConfig(). + Returns a Thread object on which you can call start() to start the server, + and which you can join() when appropriate. To stop the server, call + stopListening(). + + Use the ``verify`` argument to verify any bytes received across the wire + from a client. If specified, it should be a callable which receives a + single argument - the bytes of configuration data received across the + network - and it should return either ``None``, to indicate that the + passed in bytes could not be verified and should be discarded, or a + byte string which is then passed to the configuration machinery as + normal. Note that you can return transformed bytes, e.g. by decrypting + the bytes passed in. + """ + + class ConfigStreamHandler(StreamRequestHandler): + """ + Handler for a logging configuration request. + + It expects a completely new logging configuration and uses fileConfig + to install it. + """ + def handle(self): + """ + Handle a request. + + Each request is expected to be a 4-byte length, packed using + struct.pack(">L", n), followed by the config file. + Uses fileConfig() to do the grunt work. + """ + try: + conn = self.connection + chunk = conn.recv(4) + if len(chunk) == 4: + slen = struct.unpack(">L", chunk)[0] + chunk = self.connection.recv(slen) + while len(chunk) < slen: + chunk = chunk + conn.recv(slen - len(chunk)) + if self.server.verify is not None: + chunk = self.server.verify(chunk) + if chunk is not None: # verified, can process + chunk = chunk.decode("utf-8") + try: + import json + d =json.loads(chunk) + assert isinstance(d, dict) + dictConfig(d) + except Exception: + #Apply new configuration. + + file = io.StringIO(chunk) + try: + fileConfig(file) + except Exception: + traceback.print_exc() + if self.server.ready: + self.server.ready.set() + except OSError as e: + if e.errno != RESET_ERROR: + raise + + class ConfigSocketReceiver(ThreadingTCPServer): + """ + A simple TCP socket-based logging config receiver. + """ + + allow_reuse_address = True + allow_reuse_port = False + + def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT, + handler=None, ready=None, verify=None): + ThreadingTCPServer.__init__(self, (host, port), handler) + with logging._lock: + self.abort = 0 + self.timeout = 1 + self.ready = ready + self.verify = verify + + def serve_until_stopped(self): + import select + abort = 0 + while not abort: + rd, wr, ex = select.select([self.socket.fileno()], + [], [], + self.timeout) + if rd: + self.handle_request() + with logging._lock: + abort = self.abort + self.server_close() + + class Server(threading.Thread): + + def __init__(self, rcvr, hdlr, port, verify): + super(Server, self).__init__() + self.rcvr = rcvr + self.hdlr = hdlr + self.port = port + self.verify = verify + self.ready = threading.Event() + + def run(self): + server = self.rcvr(port=self.port, handler=self.hdlr, + ready=self.ready, + verify=self.verify) + if self.port == 0: + self.port = server.server_address[1] + self.ready.set() + global _listener + with logging._lock: + _listener = server + server.serve_until_stopped() + + return Server(ConfigSocketReceiver, ConfigStreamHandler, port, verify) + +def stopListening(): + """ + Stop the listening server which was created with a call to listen(). + """ + global _listener + with logging._lock: + if _listener: + _listener.abort = 1 + _listener = None diff --git a/Python314_4_x64_Template/Lib/logging/handlers.py b/Python314_4_x64_Template/Lib/logging/handlers.py new file mode 100644 index 00000000..4a07258f --- /dev/null +++ b/Python314_4_x64_Template/Lib/logging/handlers.py @@ -0,0 +1,1645 @@ +# Copyright 2001-2021 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Additional handlers for the logging package for Python. The core package is +based on PEP 282 and comments thereto in comp.lang.python. + +Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging.handlers' and log away! +""" + +import copy +import io +import logging +import os +import pickle +import queue +import re +import socket +import struct +import threading +import time + +# +# Some constants... +# + +DEFAULT_TCP_LOGGING_PORT = 9020 +DEFAULT_UDP_LOGGING_PORT = 9021 +DEFAULT_HTTP_LOGGING_PORT = 9022 +DEFAULT_SOAP_LOGGING_PORT = 9023 +SYSLOG_UDP_PORT = 514 +SYSLOG_TCP_PORT = 514 + +_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day + +class BaseRotatingHandler(logging.FileHandler): + """ + Base class for handlers that rotate log files at a certain point. + Not meant to be instantiated directly. Instead, use RotatingFileHandler + or TimedRotatingFileHandler. + """ + namer = None + rotator = None + + def __init__(self, filename, mode, encoding=None, delay=False, errors=None): + """ + Use the specified filename for streamed logging + """ + logging.FileHandler.__init__(self, filename, mode=mode, + encoding=encoding, delay=delay, + errors=errors) + self.mode = mode + self.encoding = encoding + self.errors = errors + + def emit(self, record): + """ + Emit a record. + + Output the record to the file, catering for rollover as described + in doRollover(). + """ + try: + if self.shouldRollover(record): + self.doRollover() + logging.FileHandler.emit(self, record) + except Exception: + self.handleError(record) + + def rotation_filename(self, default_name): + """ + Modify the filename of a log file when rotating. + + This is provided so that a custom filename can be provided. + + The default implementation calls the 'namer' attribute of the + handler, if it's callable, passing the default name to + it. If the attribute isn't callable (the default is None), the name + is returned unchanged. + + :param default_name: The default name for the log file. + """ + if not callable(self.namer): + result = default_name + else: + result = self.namer(default_name) + return result + + def rotate(self, source, dest): + """ + When rotating, rotate the current log. + + The default implementation calls the 'rotator' attribute of the + handler, if it's callable, passing the source and dest arguments to + it. If the attribute isn't callable (the default is None), the source + is simply renamed to the destination. + + :param source: The source filename. This is normally the base + filename, e.g. 'test.log' + :param dest: The destination filename. This is normally + what the source is rotated to, e.g. 'test.log.1'. + """ + if not callable(self.rotator): + # Issue 18940: A file may not have been created if delay is True. + if os.path.exists(source): + os.rename(source, dest) + else: + self.rotator(source, dest) + +class RotatingFileHandler(BaseRotatingHandler): + """ + Handler for logging to a set of files, which switches from one file + to the next when the current file reaches a certain size. + """ + def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, + encoding=None, delay=False, errors=None): + """ + Open the specified file and use it as the stream for logging. + + By default, the file grows indefinitely. You can specify particular + values of maxBytes and backupCount to allow the file to rollover at + a predetermined size. + + Rollover occurs whenever the current log file is nearly maxBytes in + length. If backupCount is >= 1, the system will successively create + new files with the same pathname as the base file, but with extensions + ".1", ".2" etc. appended to it. For example, with a backupCount of 5 + and a base file name of "app.log", you would get "app.log", + "app.log.1", "app.log.2", ... through to "app.log.5". The file being + written to is always "app.log" - when it gets filled up, it is closed + and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. + exist, then they are renamed to "app.log.2", "app.log.3" etc. + respectively. + + If maxBytes is zero, rollover never occurs. + """ + # If rotation/rollover is wanted, it doesn't make sense to use another + # mode. If for example 'w' were specified, then if there were multiple + # runs of the calling application, the logs from previous runs would be + # lost if the 'w' is respected, because the log file would be truncated + # on each run. + if maxBytes > 0: + mode = 'a' + if "b" not in mode: + encoding = io.text_encoding(encoding) + BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding, + delay=delay, errors=errors) + self.maxBytes = maxBytes + self.backupCount = backupCount + + def doRollover(self): + """ + Do a rollover, as described in __init__(). + """ + if self.stream: + self.stream.close() + self.stream = None + if self.backupCount > 0: + for i in range(self.backupCount - 1, 0, -1): + sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i)) + dfn = self.rotation_filename("%s.%d" % (self.baseFilename, + i + 1)) + if os.path.exists(sfn): + if os.path.exists(dfn): + os.remove(dfn) + os.rename(sfn, dfn) + dfn = self.rotation_filename(self.baseFilename + ".1") + if os.path.exists(dfn): + os.remove(dfn) + self.rotate(self.baseFilename, dfn) + if not self.delay: + self.stream = self._open() + + def shouldRollover(self, record): + """ + Determine if rollover should occur. + + Basically, see if the supplied record would cause the file to exceed + the size limit we have. + """ + if self.stream is None: # delay was set... + self.stream = self._open() + if self.maxBytes > 0: # are we rolling over? + try: + pos = self.stream.tell() + except io.UnsupportedOperation: + # gh-143237: Never rollover a named pipe. + return False + if not pos: + # gh-116263: Never rollover an empty file + return False + msg = "%s\n" % self.format(record) + if pos + len(msg) >= self.maxBytes: + # See bpo-45401: Never rollover anything other than regular files + if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename): + return False + return True + return False + +class TimedRotatingFileHandler(BaseRotatingHandler): + """ + Handler for logging to a file, rotating the log file at certain timed + intervals. + + If backupCount is > 0, when rollover is done, no more than backupCount + files are kept - the oldest ones are deleted. + """ + def __init__(self, filename, when='h', interval=1, backupCount=0, + encoding=None, delay=False, utc=False, atTime=None, + errors=None): + encoding = io.text_encoding(encoding) + BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding, + delay=delay, errors=errors) + self.when = when.upper() + self.backupCount = backupCount + self.utc = utc + self.atTime = atTime + # Calculate the real rollover interval, which is just the number of + # seconds between rollovers. Also set the filename suffix used when + # a rollover occurs. Current 'when' events supported: + # S - Seconds + # M - Minutes + # H - Hours + # D - Days + # midnight - roll over at midnight + # W{0-6} - roll over on a certain day; 0 - Monday + # + # Case of the 'when' specifier is not important; lower or upper case + # will work. + if self.when == 'S': + self.interval = 1 # one second + self.suffix = "%Y-%m-%d_%H-%M-%S" + extMatch = r"(? '6': + raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) + self.dayOfWeek = int(self.when[1]) + self.suffix = "%Y-%m-%d" + extMatch = r"(?= self.rolloverAt: + # See #89564: Never rollover anything other than regular files + if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename): + # The file is not a regular file, so do not rollover, but do + # set the next rollover time to avoid repeated checks. + self.rolloverAt = self.computeRollover(t) + return False + + return True + return False + + def getFilesToDelete(self): + """ + Determine the files to delete when rolling over. + + More specific than the earlier method, which just used glob.glob(). + """ + dirName, baseName = os.path.split(self.baseFilename) + fileNames = os.listdir(dirName) + result = [] + if self.namer is None: + prefix = baseName + '.' + plen = len(prefix) + for fileName in fileNames: + if fileName[:plen] == prefix: + suffix = fileName[plen:] + if self.extMatch.fullmatch(suffix): + result.append(os.path.join(dirName, fileName)) + else: + for fileName in fileNames: + # Our files could be just about anything after custom naming, + # but they should contain the datetime suffix. + # Try to find the datetime suffix in the file name and verify + # that the file name can be generated by this handler. + m = self.extMatch.search(fileName) + while m: + dfn = self.namer(self.baseFilename + "." + m[0]) + if os.path.basename(dfn) == fileName: + result.append(os.path.join(dirName, fileName)) + break + m = self.extMatch.search(fileName, m.start() + 1) + + if len(result) < self.backupCount: + result = [] + else: + result.sort() + result = result[:len(result) - self.backupCount] + return result + + def doRollover(self): + """ + do a rollover; in this case, a date/time stamp is appended to the filename + when the rollover happens. However, you want the file to be named for the + start of the interval, not the current time. If there is a backup count, + then we have to get a list of matching filenames, sort them and remove + the one with the oldest suffix. + """ + # get the time that this sequence started at and make it a TimeTuple + currentTime = int(time.time()) + t = self.rolloverAt - self.interval + if self.utc: + timeTuple = time.gmtime(t) + else: + timeTuple = time.localtime(t) + dstNow = time.localtime(currentTime)[-1] + dstThen = timeTuple[-1] + if dstNow != dstThen: + if dstNow: + addend = 3600 + else: + addend = -3600 + timeTuple = time.localtime(t + addend) + dfn = self.rotation_filename(self.baseFilename + "." + + time.strftime(self.suffix, timeTuple)) + if os.path.exists(dfn): + # Already rolled over. + return + + if self.stream: + self.stream.close() + self.stream = None + self.rotate(self.baseFilename, dfn) + if self.backupCount > 0: + for s in self.getFilesToDelete(): + os.remove(s) + if not self.delay: + self.stream = self._open() + self.rolloverAt = self.computeRollover(currentTime) + +class WatchedFileHandler(logging.FileHandler): + """ + A handler for logging to a file, which watches the file + to see if it has changed while in use. This can happen because of + usage of programs such as newsyslog and logrotate which perform + log file rotation. This handler, intended for use under Unix, + watches the file to see if it has changed since the last emit. + (A file has changed if its device or inode have changed.) + If it has changed, the old file stream is closed, and the file + opened to get a new stream. + + This handler is not appropriate for use under Windows, because + under Windows open files cannot be moved or renamed - logging + opens the files with exclusive locks - and so there is no need + for such a handler. + + This handler is based on a suggestion and patch by Chad J. + Schroeder. + """ + def __init__(self, filename, mode='a', encoding=None, delay=False, + errors=None): + if "b" not in mode: + encoding = io.text_encoding(encoding) + logging.FileHandler.__init__(self, filename, mode=mode, + encoding=encoding, delay=delay, + errors=errors) + self.dev, self.ino = -1, -1 + self._statstream() + + def _statstream(self): + if self.stream is None: + return + sres = os.fstat(self.stream.fileno()) + self.dev = sres.st_dev + self.ino = sres.st_ino + + def reopenIfNeeded(self): + """ + Reopen log file if needed. + + Checks if the underlying file has changed, and if it + has, close the old stream and reopen the file to get the + current stream. + """ + if self.stream is None: + return + + # Reduce the chance of race conditions by stat'ing by path only + # once and then fstat'ing our new fd if we opened a new log stream. + # See issue #14632: Thanks to John Mulligan for the problem report + # and patch. + try: + # stat the file by path, checking for existence + sres = os.stat(self.baseFilename) + + # compare file system stat with that of our stream file handle + reopen = (sres.st_dev != self.dev or sres.st_ino != self.ino) + except FileNotFoundError: + reopen = True + + if not reopen: + return + + # we have an open file handle, clean it up + self.stream.flush() + self.stream.close() + self.stream = None # See Issue #21742: _open () might fail. + + # open a new file handle and get new stat info from that fd + self.stream = self._open() + self._statstream() + + def emit(self, record): + """ + Emit a record. + + If underlying file has changed, reopen the file before emitting the + record to it. + """ + self.reopenIfNeeded() + logging.FileHandler.emit(self, record) + + +class SocketHandler(logging.Handler): + """ + A handler class which writes logging records, in pickle format, to + a streaming socket. The socket is kept open across logging calls. + If the peer resets it, an attempt is made to reconnect on the next call. + The pickle which is sent is that of the LogRecord's attribute dictionary + (__dict__), so that the receiver does not need to have the logging module + installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + """ + + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + + When the attribute *closeOnError* is set to True - if a socket error + occurs, the socket is silently closed and then reopened on the next + logging call. + """ + logging.Handler.__init__(self) + self.host = host + self.port = port + if port is None: + self.address = host + else: + self.address = (host, port) + self.sock = None + self.closeOnError = False + self.retryTime = None + # + # Exponential backoff parameters. + # + self.retryStart = 1.0 + self.retryMax = 30.0 + self.retryFactor = 2.0 + + def makeSocket(self, timeout=1): + """ + A factory method which allows subclasses to define the precise + type of socket they want. + """ + if self.port is not None: + result = socket.create_connection(self.address, timeout=timeout) + else: + result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + result.settimeout(timeout) + try: + result.connect(self.address) + except OSError: + result.close() # Issue 19182 + raise + return result + + def createSocket(self): + """ + Try to create a socket, using an exponential backoff with + a max retry time. Thanks to Robert Olson for the original patch + (SF #815911) which has been slightly refactored. + """ + now = time.time() + # Either retryTime is None, in which case this + # is the first time back after a disconnect, or + # we've waited long enough. + if self.retryTime is None: + attempt = True + else: + attempt = (now >= self.retryTime) + if attempt: + try: + self.sock = self.makeSocket() + self.retryTime = None # next time, no delay before trying + except OSError: + #Creation failed, so set the retry time and return. + if self.retryTime is None: + self.retryPeriod = self.retryStart + else: + self.retryPeriod = self.retryPeriod * self.retryFactor + if self.retryPeriod > self.retryMax: + self.retryPeriod = self.retryMax + self.retryTime = now + self.retryPeriod + + def send(self, s): + """ + Send a pickled string to the socket. + + This function allows for partial sends which can happen when the + network is busy. + """ + if self.sock is None: + self.createSocket() + #self.sock can be None either because we haven't reached the retry + #time yet, or because we have reached the retry time and retried, + #but are still unable to connect. + if self.sock: + try: + self.sock.sendall(s) + except OSError: #pragma: no cover + self.sock.close() + self.sock = None # so we can call createSocket next time + + def makePickle(self, record): + """ + Pickles the record in binary format with a length prefix, and + returns it ready for transmission across the socket. + """ + ei = record.exc_info + if ei: + # just to get traceback text into record.exc_text ... + dummy = self.format(record) + # See issue #14436: If msg or args are objects, they may not be + # available on the receiving end. So we convert the msg % args + # to a string, save it as msg and zap the args. + d = dict(record.__dict__) + d['msg'] = record.getMessage() + d['args'] = None + d['exc_info'] = None + # Issue #25685: delete 'message' if present: redundant with 'msg' + d.pop('message', None) + s = pickle.dumps(d, 1) + slen = struct.pack(">L", len(s)) + return slen + s + + def handleError(self, record): + """ + Handle an error during logging. + + An error has occurred during logging. Most likely cause - + connection lost. Close the socket so that we can retry on the + next event. + """ + if self.closeOnError and self.sock: + self.sock.close() + self.sock = None #try to reconnect next time + else: + logging.Handler.handleError(self, record) + + def emit(self, record): + """ + Emit a record. + + Pickles the record and writes it to the socket in binary format. + If there is an error with the socket, silently drop the packet. + If there was a problem with the socket, re-establishes the + socket. + """ + try: + s = self.makePickle(record) + self.send(s) + except Exception: + self.handleError(record) + + def close(self): + """ + Closes the socket. + """ + with self.lock: + sock = self.sock + if sock: + self.sock = None + sock.close() + logging.Handler.close(self) + +class DatagramHandler(SocketHandler): + """ + A handler class which writes logging records, in pickle format, to + a datagram socket. The pickle which is sent is that of the LogRecord's + attribute dictionary (__dict__), so that the receiver does not need to + have the logging module installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + + """ + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + """ + SocketHandler.__init__(self, host, port) + self.closeOnError = False + + def makeSocket(self): + """ + The factory method of SocketHandler is here overridden to create + a UDP socket (SOCK_DGRAM). + """ + if self.port is None: + family = socket.AF_UNIX + else: + family = socket.AF_INET + s = socket.socket(family, socket.SOCK_DGRAM) + return s + + def send(self, s): + """ + Send a pickled string to a socket. + + This function no longer allows for partial sends which can happen + when the network is busy - UDP does not guarantee delivery and + can deliver packets out of sequence. + """ + if self.sock is None: + self.createSocket() + self.sock.sendto(s, self.address) + +class SysLogHandler(logging.Handler): + """ + A handler class which sends formatted logging records to a syslog + server. Based on Sam Rushing's syslog module: + http://www.nightmare.com/squirl/python-ext/misc/syslog.py + Contributed by Nicolas Untz (after which minor refactoring changes + have been made). + """ + + # from : + # ====================================================================== + # priorities/facilities are encoded into a single 32-bit quantity, where + # the bottom 3 bits are the priority (0-7) and the top 28 bits are the + # facility (0-big number). Both the priorities and the facilities map + # roughly one-to-one to strings in the syslogd(8) source code. This + # mapping is included in this file. + # + # priorities (these are ordered) + + LOG_EMERG = 0 # system is unusable + LOG_ALERT = 1 # action must be taken immediately + LOG_CRIT = 2 # critical conditions + LOG_ERR = 3 # error conditions + LOG_WARNING = 4 # warning conditions + LOG_NOTICE = 5 # normal but significant condition + LOG_INFO = 6 # informational + LOG_DEBUG = 7 # debug-level messages + + # facility codes + LOG_KERN = 0 # kernel messages + LOG_USER = 1 # random user-level messages + LOG_MAIL = 2 # mail system + LOG_DAEMON = 3 # system daemons + LOG_AUTH = 4 # security/authorization messages + LOG_SYSLOG = 5 # messages generated internally by syslogd + LOG_LPR = 6 # line printer subsystem + LOG_NEWS = 7 # network news subsystem + LOG_UUCP = 8 # UUCP subsystem + LOG_CRON = 9 # clock daemon + LOG_AUTHPRIV = 10 # security/authorization messages (private) + LOG_FTP = 11 # FTP daemon + LOG_NTP = 12 # NTP subsystem + LOG_SECURITY = 13 # Log audit + LOG_CONSOLE = 14 # Log alert + LOG_SOLCRON = 15 # Scheduling daemon (Solaris) + + # other codes through 15 reserved for system use + LOG_LOCAL0 = 16 # reserved for local use + LOG_LOCAL1 = 17 # reserved for local use + LOG_LOCAL2 = 18 # reserved for local use + LOG_LOCAL3 = 19 # reserved for local use + LOG_LOCAL4 = 20 # reserved for local use + LOG_LOCAL5 = 21 # reserved for local use + LOG_LOCAL6 = 22 # reserved for local use + LOG_LOCAL7 = 23 # reserved for local use + + priority_names = { + "alert": LOG_ALERT, + "crit": LOG_CRIT, + "critical": LOG_CRIT, + "debug": LOG_DEBUG, + "emerg": LOG_EMERG, + "err": LOG_ERR, + "error": LOG_ERR, # DEPRECATED + "info": LOG_INFO, + "notice": LOG_NOTICE, + "panic": LOG_EMERG, # DEPRECATED + "warn": LOG_WARNING, # DEPRECATED + "warning": LOG_WARNING, + } + + facility_names = { + "auth": LOG_AUTH, + "authpriv": LOG_AUTHPRIV, + "console": LOG_CONSOLE, + "cron": LOG_CRON, + "daemon": LOG_DAEMON, + "ftp": LOG_FTP, + "kern": LOG_KERN, + "lpr": LOG_LPR, + "mail": LOG_MAIL, + "news": LOG_NEWS, + "ntp": LOG_NTP, + "security": LOG_SECURITY, + "solaris-cron": LOG_SOLCRON, + "syslog": LOG_SYSLOG, + "user": LOG_USER, + "uucp": LOG_UUCP, + "local0": LOG_LOCAL0, + "local1": LOG_LOCAL1, + "local2": LOG_LOCAL2, + "local3": LOG_LOCAL3, + "local4": LOG_LOCAL4, + "local5": LOG_LOCAL5, + "local6": LOG_LOCAL6, + "local7": LOG_LOCAL7, + } + + # Originally added to work around GH-43683. Unnecessary since GH-50043 but kept + # for backwards compatibility. + priority_map = { + "DEBUG" : "debug", + "INFO" : "info", + "WARNING" : "warning", + "ERROR" : "error", + "CRITICAL" : "critical" + } + + def __init__(self, address=('localhost', SYSLOG_UDP_PORT), + facility=LOG_USER, socktype=None, timeout=None): + """ + Initialize a handler. + + If address is specified as a string, a UNIX socket is used. To log to a + local syslogd, "SysLogHandler(address="/dev/log")" can be used. + If facility is not specified, LOG_USER is used. If socktype is + specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific + socket type will be used. For Unix sockets, you can also specify a + socktype of None, in which case socket.SOCK_DGRAM will be used, falling + back to socket.SOCK_STREAM. + """ + logging.Handler.__init__(self) + + self.address = address + self.facility = facility + self.socktype = socktype + self.timeout = timeout + self.socket = None + self.createSocket() + + def _connect_unixsocket(self, address): + use_socktype = self.socktype + if use_socktype is None: + use_socktype = socket.SOCK_DGRAM + self.socket = socket.socket(socket.AF_UNIX, use_socktype) + try: + self.socket.connect(address) + # it worked, so set self.socktype to the used type + self.socktype = use_socktype + except OSError: + self.socket.close() + if self.socktype is not None: + # user didn't specify falling back, so fail + raise + use_socktype = socket.SOCK_STREAM + self.socket = socket.socket(socket.AF_UNIX, use_socktype) + try: + self.socket.connect(address) + # it worked, so set self.socktype to the used type + self.socktype = use_socktype + except OSError: + self.socket.close() + raise + + def createSocket(self): + """ + Try to create a socket and, if it's not a datagram socket, connect it + to the other end. This method is called during handler initialization, + but it's not regarded as an error if the other end isn't listening yet + --- the method will be called again when emitting an event, + if there is no socket at that point. + """ + address = self.address + socktype = self.socktype + + if isinstance(address, str): + self.unixsocket = True + # Syslog server may be unavailable during handler initialisation. + # C's openlog() function also ignores connection errors. + # Moreover, we ignore these errors while logging, so it's not worse + # to ignore it also here. + try: + self._connect_unixsocket(address) + except OSError: + pass + else: + self.unixsocket = False + if socktype is None: + socktype = socket.SOCK_DGRAM + host, port = address + ress = socket.getaddrinfo(host, port, 0, socktype) + if not ress: + raise OSError("getaddrinfo returns an empty list") + for res in ress: + af, socktype, proto, _, sa = res + err = sock = None + try: + sock = socket.socket(af, socktype, proto) + if self.timeout: + sock.settimeout(self.timeout) + if socktype == socket.SOCK_STREAM: + sock.connect(sa) + break + except OSError as exc: + err = exc + if sock is not None: + sock.close() + if err is not None: + raise err + self.socket = sock + self.socktype = socktype + + def encodePriority(self, facility, priority): + """ + Encode the facility and priority. You can pass in strings or + integers - if strings are passed, the facility_names and + priority_names mapping dictionaries are used to convert them to + integers. + """ + if isinstance(facility, str): + facility = self.facility_names[facility] + if isinstance(priority, str): + priority = self.priority_names[priority] + return (facility << 3) | priority + + def close(self): + """ + Closes the socket. + """ + with self.lock: + sock = self.socket + if sock: + self.socket = None + sock.close() + logging.Handler.close(self) + + def mapPriority(self, levelName): + """ + Map a logging level name to a key in the priority_names map. + This is useful in two scenarios: when custom levels are being + used, and in the case where you can't do a straightforward + mapping by lowercasing the logging level name because of locale- + specific issues (see SF #1524081). + """ + return self.priority_map.get(levelName, "warning") + + ident = '' # prepended to all messages + append_nul = True # some old syslog daemons expect a NUL terminator + + def emit(self, record): + """ + Emit a record. + + The record is formatted, and then sent to the syslog server. If + exception information is present, it is NOT sent to the server. + """ + try: + msg = self.format(record) + if self.ident: + msg = self.ident + msg + if self.append_nul: + msg += '\000' + + # We need to convert record level to lowercase, maybe this will + # change in the future. + prio = '<%d>' % self.encodePriority(self.facility, + self.mapPriority(record.levelname)) + prio = prio.encode('utf-8') + # Message is a string. Convert to bytes as required by RFC 5424 + msg = msg.encode('utf-8') + msg = prio + msg + + if not self.socket: + self.createSocket() + + if self.unixsocket: + try: + self.socket.send(msg) + except OSError: + self.socket.close() + self._connect_unixsocket(self.address) + self.socket.send(msg) + elif self.socktype == socket.SOCK_DGRAM: + self.socket.sendto(msg, self.address) + else: + self.socket.sendall(msg) + except Exception: + self.handleError(record) + +class SMTPHandler(logging.Handler): + """ + A handler class which sends an SMTP email for each logging event. + """ + def __init__(self, mailhost, fromaddr, toaddrs, subject, + credentials=None, secure=None, timeout=5.0): + """ + Initialize the handler. + + Initialize the instance with the from and to addresses and subject + line of the email. To specify a non-standard SMTP port, use the + (host, port) tuple format for the mailhost argument. To specify + authentication credentials, supply a (username, password) tuple + for the credentials argument. To specify the use of a secure + protocol (TLS), pass in a tuple for the secure argument. This will + only be used when authentication credentials are supplied. The tuple + will be either an empty tuple, or a single-value tuple with the name + of a keyfile, or a 2-value tuple with the names of the keyfile and + certificate file. (This tuple is passed to the + `ssl.SSLContext.load_cert_chain` method). + A timeout in seconds can be specified for the SMTP connection (the + default is one second). + """ + logging.Handler.__init__(self) + if isinstance(mailhost, (list, tuple)): + self.mailhost, self.mailport = mailhost + else: + self.mailhost, self.mailport = mailhost, None + if isinstance(credentials, (list, tuple)): + self.username, self.password = credentials + else: + self.username = None + self.fromaddr = fromaddr + if isinstance(toaddrs, str): + toaddrs = [toaddrs] + self.toaddrs = toaddrs + self.subject = subject + self.secure = secure + self.timeout = timeout + + def getSubject(self, record): + """ + Determine the subject for the email. + + If you want to specify a subject line which is record-dependent, + override this method. + """ + return self.subject + + def emit(self, record): + """ + Emit a record. + + Format the record and send it to the specified addressees. + """ + try: + import smtplib + from email.message import EmailMessage + import email.utils + + port = self.mailport + if not port: + port = smtplib.SMTP_PORT + smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout) + msg = EmailMessage() + msg['From'] = self.fromaddr + msg['To'] = ','.join(self.toaddrs) + msg['Subject'] = self.getSubject(record) + msg['Date'] = email.utils.localtime() + msg.set_content(self.format(record)) + if self.username: + if self.secure is not None: + import ssl + + try: + keyfile = self.secure[0] + except IndexError: + keyfile = None + + try: + certfile = self.secure[1] + except IndexError: + certfile = None + + context = ssl._create_stdlib_context( + certfile=certfile, keyfile=keyfile + ) + smtp.ehlo() + smtp.starttls(context=context) + smtp.ehlo() + smtp.login(self.username, self.password) + smtp.send_message(msg) + smtp.quit() + except Exception: + self.handleError(record) + +class NTEventLogHandler(logging.Handler): + """ + A handler class which sends events to the NT Event Log. Adds a + registry entry for the specified application name. If no dllname is + provided, win32service.pyd (which contains some basic message + placeholders) is used. Note that use of these placeholders will make + your event logs big, as the entire message source is held in the log. + If you want slimmer logs, you have to pass in the name of your own DLL + which contains the message definitions you want to use in the event log. + """ + def __init__(self, appname, dllname=None, logtype="Application"): + logging.Handler.__init__(self) + try: + import win32evtlogutil, win32evtlog + self.appname = appname + self._welu = win32evtlogutil + if not dllname: + dllname = os.path.split(self._welu.__file__) + dllname = os.path.split(dllname[0]) + dllname = os.path.join(dllname[0], r'win32service.pyd') + self.dllname = dllname + self.logtype = logtype + # Administrative privileges are required to add a source to the registry. + # This may not be available for a user that just wants to add to an + # existing source - handle this specific case. + try: + self._welu.AddSourceToRegistry(appname, dllname, logtype) + except Exception as e: + # This will probably be a pywintypes.error. Only raise if it's not + # an "access denied" error, else let it pass + if getattr(e, 'winerror', None) != 5: # not access denied + raise + self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE + self.typemap = { + logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, + logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, + logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, + } + except ImportError: + print("The Python Win32 extensions for NT (service, event "\ + "logging) appear not to be available.") + self._welu = None + + def getMessageID(self, record): + """ + Return the message ID for the event record. If you are using your + own messages, you could do this by having the msg passed to the + logger being an ID rather than a formatting string. Then, in here, + you could use a dictionary lookup to get the message ID. This + version returns 1, which is the base message ID in win32service.pyd. + """ + return 1 + + def getEventCategory(self, record): + """ + Return the event category for the record. + + Override this if you want to specify your own categories. This version + returns 0. + """ + return 0 + + def getEventType(self, record): + """ + Return the event type for the record. + + Override this if you want to specify your own types. This version does + a mapping using the handler's typemap attribute, which is set up in + __init__() to a dictionary which contains mappings for DEBUG, INFO, + WARNING, ERROR and CRITICAL. If you are using your own levels you will + either need to override this method or place a suitable dictionary in + the handler's typemap attribute. + """ + return self.typemap.get(record.levelno, self.deftype) + + def emit(self, record): + """ + Emit a record. + + Determine the message ID, event category and event type. Then + log the message in the NT event log. + """ + if self._welu: + try: + id = self.getMessageID(record) + cat = self.getEventCategory(record) + type = self.getEventType(record) + msg = self.format(record) + self._welu.ReportEvent(self.appname, id, cat, type, [msg]) + except Exception: + self.handleError(record) + + def close(self): + """ + Clean up this handler. + + You can remove the application name from the registry as a + source of event log entries. However, if you do this, you will + not be able to see the events as you intended in the Event Log + Viewer - it needs to be able to access the registry to get the + DLL name. + """ + #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) + logging.Handler.close(self) + +class HTTPHandler(logging.Handler): + """ + A class which sends records to a web server, using either GET or + POST semantics. + """ + def __init__(self, host, url, method="GET", secure=False, credentials=None, + context=None): + """ + Initialize the instance with the host, the request URL, and the method + ("GET" or "POST") + """ + logging.Handler.__init__(self) + method = method.upper() + if method not in ["GET", "POST"]: + raise ValueError("method must be GET or POST") + if not secure and context is not None: + raise ValueError("context parameter only makes sense " + "with secure=True") + self.host = host + self.url = url + self.method = method + self.secure = secure + self.credentials = credentials + self.context = context + + def mapLogRecord(self, record): + """ + Default implementation of mapping the log record into a dict + that is sent as the CGI data. Overwrite in your class. + Contributed by Franz Glasner. + """ + return record.__dict__ + + def getConnection(self, host, secure): + """ + get a HTTP[S]Connection. + + Override when a custom connection is required, for example if + there is a proxy. + """ + import http.client + if secure: + connection = http.client.HTTPSConnection(host, context=self.context) + else: + connection = http.client.HTTPConnection(host) + return connection + + def emit(self, record): + """ + Emit a record. + + Send the record to the web server as a percent-encoded dictionary + """ + try: + import urllib.parse + host = self.host + h = self.getConnection(host, self.secure) + url = self.url + data = urllib.parse.urlencode(self.mapLogRecord(record)) + if self.method == "GET": + if (url.find('?') >= 0): + sep = '&' + else: + sep = '?' + url = url + "%c%s" % (sep, data) + h.putrequest(self.method, url) + # support multiple hosts on one IP address... + # need to strip optional :port from host, if present + i = host.find(":") + if i >= 0: + host = host[:i] + # See issue #30904: putrequest call above already adds this header + # on Python 3.x. + # h.putheader("Host", host) + if self.method == "POST": + h.putheader("Content-type", + "application/x-www-form-urlencoded") + h.putheader("Content-length", str(len(data))) + if self.credentials: + import base64 + s = ('%s:%s' % self.credentials).encode('utf-8') + s = 'Basic ' + base64.b64encode(s).strip().decode('ascii') + h.putheader('Authorization', s) + h.endheaders() + if self.method == "POST": + h.send(data.encode('utf-8')) + h.getresponse() #can't do anything with the result + except Exception: + self.handleError(record) + +class BufferingHandler(logging.Handler): + """ + A handler class which buffers logging records in memory. Whenever each + record is added to the buffer, a check is made to see if the buffer should + be flushed. If it should, then flush() is expected to do what's needed. + """ + def __init__(self, capacity): + """ + Initialize the handler with the buffer size. + """ + logging.Handler.__init__(self) + self.capacity = capacity + self.buffer = [] + + def shouldFlush(self, record): + """ + Should the handler flush its buffer? + + Returns true if the buffer is up to capacity. This method can be + overridden to implement custom flushing strategies. + """ + return (len(self.buffer) >= self.capacity) + + def emit(self, record): + """ + Emit a record. + + Append the record. If shouldFlush() tells us to, call flush() to process + the buffer. + """ + self.buffer.append(record) + if self.shouldFlush(record): + self.flush() + + def flush(self): + """ + Override to implement custom flushing behaviour. + + This version just zaps the buffer to empty. + """ + with self.lock: + self.buffer.clear() + + def close(self): + """ + Close the handler. + + This version just flushes and chains to the parent class' close(). + """ + try: + self.flush() + finally: + logging.Handler.close(self) + +class MemoryHandler(BufferingHandler): + """ + A handler class which buffers logging records in memory, periodically + flushing them to a target handler. Flushing occurs whenever the buffer + is full, or when an event of a certain severity or greater is seen. + """ + def __init__(self, capacity, flushLevel=logging.ERROR, target=None, + flushOnClose=True): + """ + Initialize the handler with the buffer size, the level at which + flushing should occur and an optional target. + + Note that without a target being set either here or via setTarget(), + a MemoryHandler is no use to anyone! + + The ``flushOnClose`` argument is ``True`` for backward compatibility + reasons - the old behaviour is that when the handler is closed, the + buffer is flushed, even if the flush level hasn't been exceeded nor the + capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``. + """ + BufferingHandler.__init__(self, capacity) + self.flushLevel = flushLevel + self.target = target + # See Issue #26559 for why this has been added + self.flushOnClose = flushOnClose + + def shouldFlush(self, record): + """ + Check for buffer full or a record at the flushLevel or higher. + """ + return (len(self.buffer) >= self.capacity) or \ + (record.levelno >= self.flushLevel) + + def setTarget(self, target): + """ + Set the target handler for this handler. + """ + with self.lock: + self.target = target + + def flush(self): + """ + For a MemoryHandler, flushing means just sending the buffered + records to the target, if there is one. Override if you want + different behaviour. + + The record buffer is only cleared if a target has been set. + """ + with self.lock: + if self.target: + for record in self.buffer: + self.target.handle(record) + self.buffer.clear() + + def close(self): + """ + Flush, if appropriately configured, set the target to None and lose the + buffer. + """ + try: + if self.flushOnClose: + self.flush() + finally: + with self.lock: + self.target = None + BufferingHandler.close(self) + + +class QueueHandler(logging.Handler): + """ + This handler sends events to a queue. Typically, it would be used together + with a multiprocessing Queue to centralise logging to file in one process + (in a multi-process application), so as to avoid file write contention + between processes. + + This code is new in Python 3.2, but this class can be copy pasted into + user code for use with earlier Python versions. + """ + + def __init__(self, queue): + """ + Initialise an instance, using the passed queue. + """ + logging.Handler.__init__(self) + self.queue = queue + self.listener = None # will be set to listener if configured via dictConfig() + + def enqueue(self, record): + """ + Enqueue a record. + + The base implementation uses put_nowait. You may want to override + this method if you want to use blocking, timeouts or custom queue + implementations. + """ + self.queue.put_nowait(record) + + def prepare(self, record): + """ + Prepare a record for queuing. The object returned by this method is + enqueued. + + The base implementation formats the record to merge the message and + arguments, and removes unpickleable items from the record in-place. + Specifically, it overwrites the record's `msg` and + `message` attributes with the merged message (obtained by + calling the handler's `format` method), and sets the `args`, + `exc_info` and `exc_text` attributes to None. + + You might want to override this method if you want to convert + the record to a dict or JSON string, or send a modified copy + of the record while leaving the original intact. + """ + # The format operation gets traceback text into record.exc_text + # (if there's exception data), and also returns the formatted + # message. We can then use this to replace the original + # msg + args, as these might be unpickleable. We also zap the + # exc_info, exc_text and stack_info attributes, as they are no longer + # needed and, if not None, will typically not be pickleable. + msg = self.format(record) + # bpo-35726: make copy of record to avoid affecting other handlers in the chain. + record = copy.copy(record) + record.message = msg + record.msg = msg + record.args = None + record.exc_info = None + record.exc_text = None + record.stack_info = None + return record + + def emit(self, record): + """ + Emit a record. + + Writes the LogRecord to the queue, preparing it for pickling first. + """ + try: + self.enqueue(self.prepare(record)) + except Exception: + self.handleError(record) + + +class QueueListener(object): + """ + This class implements an internal threaded listener which watches for + LogRecords being added to a queue, removes them and passes them to a + list of handlers for processing. + """ + _sentinel = None + + def __init__(self, queue, *handlers, respect_handler_level=False): + """ + Initialise an instance with the specified queue and + handlers. + """ + self.queue = queue + self.handlers = handlers + self._thread = None + self.respect_handler_level = respect_handler_level + + def __enter__(self): + """ + For use as a context manager. Starts the listener. + """ + self.start() + return self + + def __exit__(self, *args): + """ + For use as a context manager. Stops the listener. + """ + self.stop() + + def dequeue(self, block): + """ + Dequeue a record and return it, optionally blocking. + + The base implementation uses get. You may want to override this method + if you want to use timeouts or work with custom queue implementations. + """ + return self.queue.get(block) + + def start(self): + """ + Start the listener. + + This starts up a background thread to monitor the queue for + LogRecords to process. + """ + if self._thread is not None: + raise RuntimeError("Listener already started") + + self._thread = t = threading.Thread(target=self._monitor) + t.daemon = True + t.start() + + def prepare(self, record): + """ + Prepare a record for handling. + + This method just returns the passed-in record. You may want to + override this method if you need to do any custom marshalling or + manipulation of the record before passing it to the handlers. + """ + return record + + def handle(self, record): + """ + Handle a record. + + This just loops through the handlers offering them the record + to handle. + """ + record = self.prepare(record) + for handler in self.handlers: + if not self.respect_handler_level: + process = True + else: + process = record.levelno >= handler.level + if process: + handler.handle(record) + + def _monitor(self): + """ + Monitor the queue for records, and ask the handler + to deal with them. + + This method runs on a separate, internal thread. + The thread will terminate if it sees a sentinel object in the queue. + """ + q = self.queue + has_task_done = hasattr(q, 'task_done') + while True: + try: + record = self.dequeue(True) + if record is self._sentinel: + if has_task_done: + q.task_done() + break + self.handle(record) + if has_task_done: + q.task_done() + except queue.Empty: + break + + def enqueue_sentinel(self): + """ + This is used to enqueue the sentinel record. + + The base implementation uses put_nowait. You may want to override this + method if you want to use timeouts or work with custom queue + implementations. + """ + self.queue.put_nowait(self._sentinel) + + def stop(self): + """ + Stop the listener. + + This asks the thread to terminate, and then waits for it to do so. + Note that if you don't call this before your application exits, there + may be some records still left on the queue, which won't be processed. + """ + if self._thread: # see gh-114706 - allow calling this more than once + self.enqueue_sentinel() + self._thread.join() + self._thread = None diff --git a/Python314_4_x64_Template/Lib/lzma.py b/Python314_4_x64_Template/Lib/lzma.py new file mode 100644 index 00000000..316066d0 --- /dev/null +++ b/Python314_4_x64_Template/Lib/lzma.py @@ -0,0 +1,364 @@ +"""Interface to the liblzma compression library. + +This module provides a class for reading and writing compressed files, +classes for incremental (de)compression, and convenience functions for +one-shot (de)compression. + +These classes and functions support both the XZ and legacy LZMA +container formats, as well as raw compressed data streams. +""" + +__all__ = [ + "CHECK_NONE", "CHECK_CRC32", "CHECK_CRC64", "CHECK_SHA256", + "CHECK_ID_MAX", "CHECK_UNKNOWN", + "FILTER_LZMA1", "FILTER_LZMA2", "FILTER_DELTA", "FILTER_X86", "FILTER_IA64", + "FILTER_ARM", "FILTER_ARMTHUMB", "FILTER_POWERPC", "FILTER_SPARC", + "FORMAT_AUTO", "FORMAT_XZ", "FORMAT_ALONE", "FORMAT_RAW", + "MF_HC3", "MF_HC4", "MF_BT2", "MF_BT3", "MF_BT4", + "MODE_FAST", "MODE_NORMAL", "PRESET_DEFAULT", "PRESET_EXTREME", + + "LZMACompressor", "LZMADecompressor", "LZMAFile", "LZMAError", + "open", "compress", "decompress", "is_check_supported", +] + +import builtins +import io +import os +from compression._common import _streams +from _lzma import * +from _lzma import _encode_filter_properties, _decode_filter_properties # noqa: F401 + + +# Value 0 no longer used +_MODE_READ = 1 +# Value 2 no longer used +_MODE_WRITE = 3 + + +class LZMAFile(_streams.BaseStream): + + """A file object providing transparent LZMA (de)compression. + + An LZMAFile can act as a wrapper for an existing file object, or + refer directly to a named file on disk. + + Note that LZMAFile provides a *binary* file interface - data read + is returned as bytes, and data to be written must be given as bytes. + """ + + def __init__(self, filename=None, mode="r", *, + format=None, check=-1, preset=None, filters=None): + """Open an LZMA-compressed file in binary mode. + + filename can be either an actual file name (given as a str, + bytes, or PathLike object), in which case the named file is + opened, or it can be an existing file object to read from or + write to. + + mode can be "r" for reading (default), "w" for (over)writing, + "x" for creating exclusively, or "a" for appending. These can + equivalently be given as "rb", "wb", "xb" and "ab" respectively. + + format specifies the container format to use for the file. + If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the + default is FORMAT_XZ. + + check specifies the integrity check to use. This argument can + only be used when opening a file for writing. For FORMAT_XZ, + the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not + support integrity checks - for these formats, check must be + omitted, or be CHECK_NONE. + + When opening a file for reading, the *preset* argument is not + meaningful, and should be omitted. The *filters* argument should + also be omitted, except when format is FORMAT_RAW (in which case + it is required). + + When opening a file for writing, the settings used by the + compressor can be specified either as a preset compression + level (with the *preset* argument), or in detail as a custom + filter chain (with the *filters* argument). For FORMAT_XZ and + FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset + level. For FORMAT_RAW, the caller must always specify a filter + chain; the raw compressor does not support preset compression + levels. + + preset (if provided) should be an integer in the range 0-9, + optionally OR-ed with the constant PRESET_EXTREME. + + filters (if provided) should be a sequence of dicts. Each dict + should have an entry for "id" indicating ID of the filter, plus + additional entries for options to the filter. + """ + self._fp = None + self._closefp = False + self._mode = None + + if mode in ("r", "rb"): + if check != -1: + raise ValueError("Cannot specify an integrity check " + "when opening a file for reading") + if preset is not None: + raise ValueError("Cannot specify a preset compression " + "level when opening a file for reading") + if format is None: + format = FORMAT_AUTO + mode_code = _MODE_READ + elif mode in ("w", "wb", "a", "ab", "x", "xb"): + if format is None: + format = FORMAT_XZ + mode_code = _MODE_WRITE + self._compressor = LZMACompressor(format=format, check=check, + preset=preset, filters=filters) + self._pos = 0 + else: + raise ValueError("Invalid mode: {!r}".format(mode)) + + if isinstance(filename, (str, bytes, os.PathLike)): + if "b" not in mode: + mode += "b" + self._fp = builtins.open(filename, mode) + self._closefp = True + self._mode = mode_code + elif hasattr(filename, "read") or hasattr(filename, "write"): + self._fp = filename + self._mode = mode_code + else: + raise TypeError("filename must be a str, bytes, file or PathLike object") + + if self._mode == _MODE_READ: + raw = _streams.DecompressReader(self._fp, LZMADecompressor, + trailing_error=LZMAError, format=format, filters=filters) + self._buffer = io.BufferedReader(raw) + + def close(self): + """Flush and close the file. + + May be called more than once without error. Once the file is + closed, any other operation on it will raise a ValueError. + """ + if self.closed: + return + try: + if self._mode == _MODE_READ: + self._buffer.close() + self._buffer = None + elif self._mode == _MODE_WRITE: + self._fp.write(self._compressor.flush()) + self._compressor = None + finally: + try: + if self._closefp: + self._fp.close() + finally: + self._fp = None + self._closefp = False + + @property + def closed(self): + """True if this file is closed.""" + return self._fp is None + + @property + def name(self): + self._check_not_closed() + return self._fp.name + + @property + def mode(self): + return 'wb' if self._mode == _MODE_WRITE else 'rb' + + def fileno(self): + """Return the file descriptor for the underlying file.""" + self._check_not_closed() + return self._fp.fileno() + + def seekable(self): + """Return whether the file supports seeking.""" + return self.readable() and self._buffer.seekable() + + def readable(self): + """Return whether the file was opened for reading.""" + self._check_not_closed() + return self._mode == _MODE_READ + + def writable(self): + """Return whether the file was opened for writing.""" + self._check_not_closed() + return self._mode == _MODE_WRITE + + def peek(self, size=-1): + """Return buffered data without advancing the file position. + + Always returns at least one byte of data, unless at EOF. + The exact number of bytes returned is unspecified. + """ + self._check_can_read() + # Relies on the undocumented fact that BufferedReader.peek() always + # returns at least one byte (except at EOF) + return self._buffer.peek(size) + + def read(self, size=-1): + """Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b"" if the file is already at EOF. + """ + self._check_can_read() + return self._buffer.read(size) + + def read1(self, size=-1): + """Read up to size uncompressed bytes, while trying to avoid + making multiple reads from the underlying stream. Reads up to a + buffer's worth of data if size is negative. + + Returns b"" if the file is at EOF. + """ + self._check_can_read() + if size < 0: + size = io.DEFAULT_BUFFER_SIZE + return self._buffer.read1(size) + + def readline(self, size=-1): + """Read a line of uncompressed bytes from the file. + + The terminating newline (if present) is retained. If size is + non-negative, no more than size bytes will be read (in which + case the line may be incomplete). Returns b'' if already at EOF. + """ + self._check_can_read() + return self._buffer.readline(size) + + def write(self, data): + """Write a bytes object to the file. + + Returns the number of uncompressed bytes written, which is + always the length of data in bytes. Note that due to buffering, + the file on disk may not reflect the data written until close() + is called. + """ + self._check_can_write() + if isinstance(data, (bytes, bytearray)): + length = len(data) + else: + # accept any data that supports the buffer protocol + data = memoryview(data) + length = data.nbytes + + compressed = self._compressor.compress(data) + self._fp.write(compressed) + self._pos += length + return length + + def seek(self, offset, whence=io.SEEK_SET): + """Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Possible values for whence are: + + 0: start of stream (default): offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + """ + self._check_can_seek() + return self._buffer.seek(offset, whence) + + def tell(self): + """Return the current file position.""" + self._check_not_closed() + if self._mode == _MODE_READ: + return self._buffer.tell() + return self._pos + + +def open(filename, mode="rb", *, + format=None, check=-1, preset=None, filters=None, + encoding=None, errors=None, newline=None): + """Open an LZMA-compressed file in binary or text mode. + + filename can be either an actual file name (given as a str, bytes, + or PathLike object), in which case the named file is opened, or it + can be an existing file object to read from or write to. + + The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb", + "a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text + mode. + + The format, check, preset and filters arguments specify the + compression settings, as for LZMACompressor, LZMADecompressor and + LZMAFile. + + For binary mode, this function is equivalent to the LZMAFile + constructor: LZMAFile(filename, mode, ...). In this case, the + encoding, errors and newline arguments must not be provided. + + For text mode, an LZMAFile object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error + handling behavior, and line ending(s). + + """ + if "t" in mode: + if "b" in mode: + raise ValueError("Invalid mode: %r" % (mode,)) + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if errors is not None: + raise ValueError("Argument 'errors' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + lz_mode = mode.replace("t", "") + binary_file = LZMAFile(filename, lz_mode, format=format, check=check, + preset=preset, filters=filters) + + if "t" in mode: + encoding = io.text_encoding(encoding) + return io.TextIOWrapper(binary_file, encoding, errors, newline) + else: + return binary_file + + +def compress(data, format=FORMAT_XZ, check=-1, preset=None, filters=None): + """Compress a block of data. + + Refer to LZMACompressor's docstring for a description of the + optional arguments *format*, *check*, *preset* and *filters*. + + For incremental compression, use an LZMACompressor instead. + """ + comp = LZMACompressor(format, check, preset, filters) + return comp.compress(data) + comp.flush() + + +def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None): + """Decompress a block of data. + + Refer to LZMADecompressor's docstring for a description of the + optional arguments *format*, *check* and *filters*. + + For incremental decompression, use an LZMADecompressor instead. + """ + results = [] + while True: + decomp = LZMADecompressor(format, memlimit, filters) + try: + res = decomp.decompress(data) + except LZMAError: + if results: + break # Leftover data is not a valid LZMA/XZ stream; ignore it. + else: + raise # Error on the first iteration; bail out. + results.append(res) + if not decomp.eof: + raise LZMAError("Compressed data ended before the " + "end-of-stream marker was reached") + data = decomp.unused_data + if not data: + break + return b"".join(results) diff --git a/Python313_13_x64_Template/Lib/mailbox.py b/Python314_4_x64_Template/Lib/mailbox.py similarity index 100% rename from Python313_13_x64_Template/Lib/mailbox.py rename to Python314_4_x64_Template/Lib/mailbox.py diff --git a/Python314_4_x64_Template/Lib/mimetypes.py b/Python314_4_x64_Template/Lib/mimetypes.py new file mode 100644 index 00000000..7d0f4c1f --- /dev/null +++ b/Python314_4_x64_Template/Lib/mimetypes.py @@ -0,0 +1,747 @@ +"""Guess the MIME type of a file. + +This module defines two useful functions: + +guess_type(url, strict=True) -- guess the MIME type and encoding of a URL. + +guess_extension(type, strict=True) -- guess the extension for a given MIME type. + +It also contains the following, for tuning the behavior: + +Data: + +knownfiles -- list of files to parse +inited -- flag set when init() has been called +suffix_map -- dictionary mapping suffixes to suffixes +encodings_map -- dictionary mapping suffixes to encodings +types_map -- dictionary mapping suffixes to types + +Functions: + +init([files]) -- parse a list of files, default knownfiles (on Windows, the + default values are taken from the registry) +read_mime_types(file) -- parse one file, return a dictionary or None +""" + +try: + from _winapi import _mimetypes_read_windows_registry +except ImportError: + _mimetypes_read_windows_registry = None + +try: + import winreg as _winreg +except ImportError: + _winreg = None + +__all__ = [ + "knownfiles", "inited", "MimeTypes", + "guess_type", "guess_file_type", "guess_all_extensions", "guess_extension", + "add_type", "init", "read_mime_types", + "suffix_map", "encodings_map", "types_map", "common_types" +] + +knownfiles = [ + "/etc/mime.types", + "/etc/httpd/mime.types", # Mac OS X + "/etc/httpd/conf/mime.types", # Apache + "/etc/apache/mime.types", # Apache 1 + "/etc/apache2/mime.types", # Apache 2 + "/usr/local/etc/httpd/conf/mime.types", + "/usr/local/lib/netscape/mime.types", + "/usr/local/etc/httpd/conf/mime.types", # Apache 1.2 + "/usr/local/etc/mime.types", # Apache 1.3 + ] + +inited = False +_db = None + + +class MimeTypes: + """MIME-types datastore. + + This datastore can handle information from mime.types-style files + and supports basic determination of MIME type from a filename or + URL, and can guess a reasonable extension given a MIME type. + """ + + def __init__(self, filenames=(), strict=True): + if not inited: + init() + self.encodings_map = _encodings_map_default.copy() + self.suffix_map = _suffix_map_default.copy() + self.types_map = ({}, {}) # dict for (non-strict, strict) + self.types_map_inv = ({}, {}) + for (ext, type) in _types_map_default.items(): + self.add_type(type, ext, True) + for (ext, type) in _common_types_default.items(): + self.add_type(type, ext, False) + for name in filenames: + self.read(name, strict) + + def add_type(self, type, ext, strict=True): + """Add a mapping between a type and an extension. + + When the extension is already known, the new + type will replace the old one. When the type + is already known the extension will be added + to the list of known extensions. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + + Valid extensions are empty or start with a '.'. + """ + if ext and not ext.startswith('.'): + from warnings import _deprecated + + _deprecated( + "Undotted extensions", + "Using undotted extensions is deprecated and " + "will raise a ValueError in Python {remove}", + remove=(3, 16), + ) + + if not type: + return + self.types_map[strict][ext] = type + exts = self.types_map_inv[strict].setdefault(type, []) + if ext not in exts: + exts.append(ext) + + def guess_type(self, url, strict=True): + """Guess the type of a file which is either a URL or a path-like object. + + Return value is a tuple (type, encoding) where type is None if + the type can't be guessed (no or unknown suffix) or a string + of the form type/subtype, usable for a MIME Content-type + header; and encoding is None for no encoding or the name of + the program used to encode (e.g. compress or gzip). The + mappings are table driven. Encoding suffixes are case + sensitive; type suffixes are first tried case sensitive, then + case insensitive. + + The suffixes .tgz, .taz and .tz (case sensitive!) are all + mapped to '.tar.gz'. (This is table-driven too, using the + dictionary suffix_map.) + + Optional 'strict' argument when False adds a bunch of commonly found, + but non-standard types. + """ + # Lazy import to improve module import time + import os + import urllib.parse + + # TODO: Deprecate accepting file paths (in particular path-like objects). + url = os.fspath(url) + p = urllib.parse.urlparse(url) + if p.scheme and len(p.scheme) > 1: + scheme = p.scheme + url = p.path + else: + return self.guess_file_type(url, strict=strict) + if scheme == 'data': + # syntax of data URLs: + # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data + # mediatype := [ type "/" subtype ] *( ";" parameter ) + # data := *urlchar + # parameter := attribute "=" value + # type/subtype defaults to "text/plain" + comma = url.find(',') + if comma < 0: + # bad data URL + return None, None + semi = url.find(';', 0, comma) + if semi >= 0: + type = url[:semi] + else: + type = url[:comma] + if '=' in type or '/' not in type: + type = 'text/plain' + return type, None # never compressed, so encoding is None + + # Lazy import to improve module import time + import posixpath + + return self._guess_file_type(url, strict, posixpath.splitext) + + def guess_file_type(self, path, *, strict=True): + """Guess the type of a file based on its path. + + Similar to guess_type(), but takes file path instead of URL. + """ + # Lazy import to improve module import time + import os + + path = os.fsdecode(path) + path = os.path.splitdrive(path)[1] + return self._guess_file_type(path, strict, os.path.splitext) + + def _guess_file_type(self, path, strict, splitext): + base, ext = splitext(path) + while (ext_lower := ext.lower()) in self.suffix_map: + base, ext = splitext(base + self.suffix_map[ext_lower]) + # encodings_map is case sensitive + if ext in self.encodings_map: + encoding = self.encodings_map[ext] + base, ext = splitext(base) + else: + encoding = None + ext = ext.lower() + types_map = self.types_map[True] + if ext in types_map: + return types_map[ext], encoding + elif strict: + return None, encoding + types_map = self.types_map[False] + if ext in types_map: + return types_map[ext], encoding + else: + return None, encoding + + def guess_all_extensions(self, type, strict=True): + """Guess the extensions for a file based on its MIME type. + + Return value is a list of strings giving the possible filename + extensions, including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data stream, + but would be mapped to the MIME type 'type' by guess_type(). + + Optional 'strict' argument when false adds a bunch of commonly found, + but non-standard types. + """ + type = type.lower() + extensions = list(self.types_map_inv[True].get(type, [])) + if not strict: + for ext in self.types_map_inv[False].get(type, []): + if ext not in extensions: + extensions.append(ext) + return extensions + + def guess_extension(self, type, strict=True): + """Guess the extension for a file based on its MIME type. + + Return value is a string giving a filename extension, + including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data + stream, but would be mapped to the MIME type 'type' by + guess_type(). If no extension can be guessed for 'type', None + is returned. + + Optional 'strict' argument when false adds a bunch of commonly found, + but non-standard types. + """ + extensions = self.guess_all_extensions(type, strict) + if not extensions: + return None + return extensions[0] + + def read(self, filename, strict=True): + """ + Read a single mime.types-format file, specified by pathname. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + """ + with open(filename, encoding='utf-8') as fp: + self.readfp(fp, strict) + + def readfp(self, fp, strict=True): + """ + Read a single mime.types-format file. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + """ + while line := fp.readline(): + words = line.split() + for i in range(len(words)): + if words[i][0] == '#': + del words[i:] + break + if not words: + continue + type, suffixes = words[0], words[1:] + for suff in suffixes: + self.add_type(type, '.' + suff, strict) + + def read_windows_registry(self, strict=True): + """ + Load the MIME types database from Windows registry. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + """ + + if not _mimetypes_read_windows_registry and not _winreg: + return + + add_type = self.add_type + if strict: + add_type = lambda type, ext: self.add_type(type, ext, True) + + # Accelerated function if it is available + if _mimetypes_read_windows_registry: + _mimetypes_read_windows_registry(add_type) + elif _winreg: + self._read_windows_registry(add_type) + + @classmethod + def _read_windows_registry(cls, add_type): + def enum_types(mimedb): + i = 0 + while True: + try: + ctype = _winreg.EnumKey(mimedb, i) + except OSError: + break + else: + if '\0' not in ctype: + yield ctype + i += 1 + + with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, '') as hkcr: + for subkeyname in enum_types(hkcr): + try: + with _winreg.OpenKey(hkcr, subkeyname) as subkey: + # Only check file extensions + if not subkeyname.startswith("."): + continue + # raises OSError if no 'Content Type' value + mimetype, datatype = _winreg.QueryValueEx( + subkey, 'Content Type') + if datatype != _winreg.REG_SZ: + continue + add_type(mimetype, subkeyname) + except OSError: + continue + +def guess_type(url, strict=True): + """Guess the type of a file based on its URL. + + Return value is a tuple (type, encoding) where type is None if the + type can't be guessed (no or unknown suffix) or a string of the + form type/subtype, usable for a MIME Content-type header; and + encoding is None for no encoding or the name of the program used + to encode (e.g. compress or gzip). The mappings are table + driven. Encoding suffixes are case sensitive; type suffixes are + first tried case sensitive, then case insensitive. + + The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped + to ".tar.gz". (This is table-driven too, using the dictionary + suffix_map). + + Optional 'strict' argument when false adds a bunch of commonly found, but + non-standard types. + """ + if _db is None: + init() + return _db.guess_type(url, strict) + + +def guess_file_type(path, *, strict=True): + """Guess the type of a file based on its path. + + Similar to guess_type(), but takes file path instead of URL. + """ + if _db is None: + init() + return _db.guess_file_type(path, strict=strict) + + +def guess_all_extensions(type, strict=True): + """Guess the extensions for a file based on its MIME type. + + Return value is a list of strings giving the possible filename + extensions, including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data + stream, but would be mapped to the MIME type 'type' by + guess_type(). If no extension can be guessed for 'type', None + is returned. + + Optional 'strict' argument when false adds a bunch of commonly found, + but non-standard types. + """ + if _db is None: + init() + return _db.guess_all_extensions(type, strict) + +def guess_extension(type, strict=True): + """Guess the extension for a file based on its MIME type. + + Return value is a string giving a filename extension, including the + leading dot ('.'). The extension is not guaranteed to have been + associated with any particular data stream, but would be mapped to the + MIME type 'type' by guess_type(). If no extension can be guessed for + 'type', None is returned. + + Optional 'strict' argument when false adds a bunch of commonly found, + but non-standard types. + """ + if _db is None: + init() + return _db.guess_extension(type, strict) + +def add_type(type, ext, strict=True): + """Add a mapping between a type and an extension. + + When the extension is already known, the new + type will replace the old one. When the type + is already known the extension will be added + to the list of known extensions. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + """ + if _db is None: + init() + return _db.add_type(type, ext, strict) + + +def init(files=None): + global suffix_map, types_map, encodings_map, common_types + global inited, _db + inited = True # so that MimeTypes.__init__() doesn't call us again + + if files is None or _db is None: + db = MimeTypes() + # Quick return if not supported + db.read_windows_registry() + + if files is None: + files = knownfiles + else: + files = knownfiles + list(files) + else: + db = _db + + # Lazy import to improve module import time + import os + + for file in files: + if os.path.isfile(file): + db.read(file) + encodings_map = db.encodings_map + suffix_map = db.suffix_map + types_map = db.types_map[True] + common_types = db.types_map[False] + # Make the DB a global variable now that it is fully initialized + _db = db + + +def read_mime_types(file): + try: + f = open(file, encoding='utf-8') + except OSError: + return None + with f: + db = MimeTypes() + db.readfp(f, True) + return db.types_map[True] + + +def _default_mime_types(): + global suffix_map, _suffix_map_default + global encodings_map, _encodings_map_default + global types_map, _types_map_default + global common_types, _common_types_default + + suffix_map = _suffix_map_default = { + '.svgz': '.svg.gz', + '.tgz': '.tar.gz', + '.taz': '.tar.gz', + '.tz': '.tar.gz', + '.tbz2': '.tar.bz2', + '.txz': '.tar.xz', + } + + encodings_map = _encodings_map_default = { + '.gz': 'gzip', + '.Z': 'compress', + '.bz2': 'bzip2', + '.xz': 'xz', + '.br': 'br', + } + + # Before adding new types, make sure they are either registered with IANA, + # at https://www.iana.org/assignments/media-types/media-types.xhtml + # or extensions, i.e. using the x- prefix + + # If you add to these, please keep them sorted by mime type. + # Make sure the entry with the preferred file extension for a particular mime type + # appears before any others of the same mimetype. + types_map = _types_map_default = { + '.js' : 'text/javascript', + '.mjs' : 'text/javascript', + '.epub' : 'application/epub+zip', + '.gz' : 'application/gzip', + '.json' : 'application/json', + '.webmanifest': 'application/manifest+json', + '.doc' : 'application/msword', + '.dot' : 'application/msword', + '.wiz' : 'application/msword', + '.nq' : 'application/n-quads', + '.nt' : 'application/n-triples', + '.bin' : 'application/octet-stream', + '.a' : 'application/octet-stream', + '.dll' : 'application/octet-stream', + '.exe' : 'application/octet-stream', + '.o' : 'application/octet-stream', + '.obj' : 'application/octet-stream', + '.so' : 'application/octet-stream', + '.oda' : 'application/oda', + '.ogx' : 'application/ogg', + '.pdf' : 'application/pdf', + '.p7c' : 'application/pkcs7-mime', + '.ps' : 'application/postscript', + '.ai' : 'application/postscript', + '.eps' : 'application/postscript', + '.trig' : 'application/trig', + '.m3u' : 'application/vnd.apple.mpegurl', + '.m3u8' : 'application/vnd.apple.mpegurl', + '.xls' : 'application/vnd.ms-excel', + '.xlb' : 'application/vnd.ms-excel', + '.eot' : 'application/vnd.ms-fontobject', + '.ppt' : 'application/vnd.ms-powerpoint', + '.pot' : 'application/vnd.ms-powerpoint', + '.ppa' : 'application/vnd.ms-powerpoint', + '.pps' : 'application/vnd.ms-powerpoint', + '.pwz' : 'application/vnd.ms-powerpoint', + '.odg' : 'application/vnd.oasis.opendocument.graphics', + '.odp' : 'application/vnd.oasis.opendocument.presentation', + '.ods' : 'application/vnd.oasis.opendocument.spreadsheet', + '.odt' : 'application/vnd.oasis.opendocument.text', + '.pptx' : 'application/vnd.openxmlformats-officedocument.presentationml.presentation', + '.xlsx' : 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + '.docx' : 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', + '.rar' : 'application/vnd.rar', + '.wasm' : 'application/wasm', + '.7z' : 'application/x-7z-compressed', + '.bcpio' : 'application/x-bcpio', + '.cpio' : 'application/x-cpio', + '.csh' : 'application/x-csh', + '.deb' : 'application/x-debian-package', + '.dvi' : 'application/x-dvi', + '.gtar' : 'application/x-gtar', + '.hdf' : 'application/x-hdf', + '.h5' : 'application/x-hdf5', + '.latex' : 'application/x-latex', + '.mif' : 'application/x-mif', + '.cdf' : 'application/x-netcdf', + '.nc' : 'application/x-netcdf', + '.p12' : 'application/x-pkcs12', + '.php' : 'application/x-httpd-php', + '.pfx' : 'application/x-pkcs12', + '.ram' : 'application/x-pn-realaudio', + '.pyc' : 'application/x-python-code', + '.pyo' : 'application/x-python-code', + '.rpm' : 'application/x-rpm', + '.sh' : 'application/x-sh', + '.shar' : 'application/x-shar', + '.swf' : 'application/x-shockwave-flash', + '.sv4cpio': 'application/x-sv4cpio', + '.sv4crc' : 'application/x-sv4crc', + '.tar' : 'application/x-tar', + '.tcl' : 'application/x-tcl', + '.tex' : 'application/x-tex', + '.texi' : 'application/x-texinfo', + '.texinfo': 'application/x-texinfo', + '.roff' : 'application/x-troff', + '.t' : 'application/x-troff', + '.tr' : 'application/x-troff', + '.man' : 'application/x-troff-man', + '.me' : 'application/x-troff-me', + '.ms' : 'application/x-troff-ms', + '.ustar' : 'application/x-ustar', + '.src' : 'application/x-wais-source', + '.xsl' : 'application/xml', + '.rdf' : 'application/xml', + '.wsdl' : 'application/xml', + '.xpdl' : 'application/xml', + '.yaml' : 'application/yaml', + '.yml' : 'application/yaml', + '.zip' : 'application/zip', + '.3gp' : 'audio/3gpp', + '.3gpp' : 'audio/3gpp', + '.3g2' : 'audio/3gpp2', + '.3gpp2' : 'audio/3gpp2', + '.aac' : 'audio/aac', + '.adts' : 'audio/aac', + '.loas' : 'audio/aac', + '.ass' : 'audio/aac', + '.au' : 'audio/basic', + '.snd' : 'audio/basic', + '.flac' : 'audio/flac', + '.mka' : 'audio/matroska', + '.m4a' : 'audio/mp4', + '.mp3' : 'audio/mpeg', + '.mp2' : 'audio/mpeg', + '.ogg' : 'audio/ogg', + '.opus' : 'audio/opus', + '.aif' : 'audio/x-aiff', + '.aifc' : 'audio/x-aiff', + '.aiff' : 'audio/x-aiff', + '.ra' : 'audio/x-pn-realaudio', + '.wav' : 'audio/vnd.wave', + '.otf' : 'font/otf', + '.ttf' : 'font/ttf', + '.weba' : 'audio/webm', + '.woff' : 'font/woff', + '.woff2' : 'font/woff2', + '.avif' : 'image/avif', + '.bmp' : 'image/bmp', + '.emf' : 'image/emf', + '.fits' : 'image/fits', + '.g3' : 'image/g3fax', + '.gif' : 'image/gif', + '.ief' : 'image/ief', + '.jp2' : 'image/jp2', + '.jpg' : 'image/jpeg', + '.jpe' : 'image/jpeg', + '.jpeg' : 'image/jpeg', + '.jpm' : 'image/jpm', + '.jpx' : 'image/jpx', + '.heic' : 'image/heic', + '.heif' : 'image/heif', + '.png' : 'image/png', + '.svg' : 'image/svg+xml', + '.t38' : 'image/t38', + '.tiff' : 'image/tiff', + '.tif' : 'image/tiff', + '.tfx' : 'image/tiff-fx', + '.ico' : 'image/vnd.microsoft.icon', + '.webp' : 'image/webp', + '.wmf' : 'image/wmf', + '.ras' : 'image/x-cmu-raster', + '.pnm' : 'image/x-portable-anymap', + '.pbm' : 'image/x-portable-bitmap', + '.pgm' : 'image/x-portable-graymap', + '.ppm' : 'image/x-portable-pixmap', + '.rgb' : 'image/x-rgb', + '.xbm' : 'image/x-xbitmap', + '.xpm' : 'image/x-xpixmap', + '.xwd' : 'image/x-xwindowdump', + '.eml' : 'message/rfc822', + '.mht' : 'message/rfc822', + '.mhtml' : 'message/rfc822', + '.nws' : 'message/rfc822', + '.gltf' : 'model/gltf+json', + '.glb' : 'model/gltf-binary', + '.stl' : 'model/stl', + '.css' : 'text/css', + '.csv' : 'text/csv', + '.html' : 'text/html', + '.htm' : 'text/html', + '.md' : 'text/markdown', + '.markdown': 'text/markdown', + '.n3' : 'text/n3', + '.txt' : 'text/plain', + '.bat' : 'text/plain', + '.c' : 'text/plain', + '.h' : 'text/plain', + '.ksh' : 'text/plain', + '.pl' : 'text/plain', + '.srt' : 'text/plain', + '.rtx' : 'text/richtext', + '.rtf' : 'text/rtf', + '.tsv' : 'text/tab-separated-values', + '.vtt' : 'text/vtt', + '.py' : 'text/x-python', + '.rst' : 'text/x-rst', + '.etx' : 'text/x-setext', + '.sgm' : 'text/x-sgml', + '.sgml' : 'text/x-sgml', + '.vcf' : 'text/x-vcard', + '.xml' : 'text/xml', + '.mkv' : 'video/matroska', + '.mk3d' : 'video/matroska-3d', + '.mp4' : 'video/mp4', + '.mpeg' : 'video/mpeg', + '.m1v' : 'video/mpeg', + '.mpa' : 'video/mpeg', + '.mpe' : 'video/mpeg', + '.mpg' : 'video/mpeg', + '.ogv' : 'video/ogg', + '.mov' : 'video/quicktime', + '.qt' : 'video/quicktime', + '.webm' : 'video/webm', + '.avi' : 'video/vnd.avi', + '.m4v' : 'video/x-m4v', + '.wmv' : 'video/x-ms-wmv', + '.movie' : 'video/x-sgi-movie', + } + + # These are non-standard types, commonly found in the wild. They will + # only match if strict=0 flag is given to the API methods. + + # Please sort these too + common_types = _common_types_default = { + '.rtf' : 'application/rtf', + '.apk' : 'application/vnd.android.package-archive', + '.midi': 'audio/midi', + '.mid' : 'audio/midi', + '.jpg' : 'image/jpg', + '.pict': 'image/pict', + '.pct' : 'image/pict', + '.pic' : 'image/pict', + '.xul' : 'text/xul', + } + + +_default_mime_types() + + +def _parse_args(args): + from argparse import ArgumentParser + + parser = ArgumentParser( + description='map filename extensions to MIME types', color=True + ) + parser.add_argument( + '-e', '--extension', + action='store_true', + help='guess extension instead of type' + ) + parser.add_argument( + '-l', '--lenient', + action='store_true', + help='additionally search for common but non-standard types' + ) + parser.add_argument('type', nargs='+', help='a type to search') + args = parser.parse_args(args) + return args, parser.format_help() + + +def _main(args=None): + """Run the mimetypes command-line interface and return a text to print.""" + args, help_text = _parse_args(args) + + results = [] + if args.extension: + for gtype in args.type: + guess = guess_extension(gtype, not args.lenient) + if guess: + results.append(str(guess)) + else: + results.append(f"error: unknown type {gtype}") + return results + else: + for gtype in args.type: + guess, encoding = guess_type(gtype, not args.lenient) + if guess: + results.append(f"type: {guess} encoding: {encoding}") + else: + results.append(f"error: media type unknown for {gtype}") + return results + + +if __name__ == '__main__': + import sys + + results = _main() + print("\n".join(results)) + sys.exit(any(result.startswith("error: ") for result in results)) diff --git a/Python313_13_x64_Template/Lib/modulefinder.py b/Python314_4_x64_Template/Lib/modulefinder.py similarity index 100% rename from Python313_13_x64_Template/Lib/modulefinder.py rename to Python314_4_x64_Template/Lib/modulefinder.py diff --git a/Python313_13_x64_Template/Lib/multiprocessing/__init__.py b/Python314_4_x64_Template/Lib/multiprocessing/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/multiprocessing/__init__.py rename to Python314_4_x64_Template/Lib/multiprocessing/__init__.py diff --git a/Python314_4_x64_Template/Lib/multiprocessing/connection.py b/Python314_4_x64_Template/Lib/multiprocessing/connection.py new file mode 100644 index 00000000..a6e1b0c7 --- /dev/null +++ b/Python314_4_x64_Template/Lib/multiprocessing/connection.py @@ -0,0 +1,1229 @@ +# +# A higher level module for using sockets (or Windows named pipes) +# +# multiprocessing/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] + +import errno +import io +import itertools +import os +import sys +import socket +import struct +import tempfile +import time + + +from . import util + +from . import AuthenticationError, BufferTooShort +from .context import reduction +_ForkingPickler = reduction.ForkingPickler + +try: + import _multiprocessing + import _winapi + from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE +except ImportError: + if sys.platform == 'win32': + raise + _winapi = None + +# +# +# + +# 64 KiB is the default PIPE buffer size of most POSIX platforms. +BUFSIZE = 64 * 1024 + +# A very generous timeout when it comes to local connections... +CONNECTION_TIMEOUT = 20. + +_mmap_counter = itertools.count() +_MAX_PIPE_ATTEMPTS = 100 + +default_family = 'AF_INET' +families = ['AF_INET'] + +if hasattr(socket, 'AF_UNIX'): + default_family = 'AF_UNIX' + families += ['AF_UNIX'] + +if sys.platform == 'win32': + default_family = 'AF_PIPE' + families += ['AF_PIPE'] + + +def _init_timeout(timeout=CONNECTION_TIMEOUT): + return time.monotonic() + timeout + +def _check_timeout(t): + return time.monotonic() > t + +# +# +# + +def arbitrary_address(family): + ''' + Return an arbitrary free address for the given family + ''' + if family == 'AF_INET': + return ('localhost', 0) + elif family == 'AF_UNIX': + return tempfile.mktemp(prefix='sock-', dir=util.get_temp_dir()) + elif family == 'AF_PIPE': + return (r'\\.\pipe\pyc-%d-%d-%s' % + (os.getpid(), next(_mmap_counter), os.urandom(8).hex())) + else: + raise ValueError('unrecognized family') + +def _validate_family(family): + ''' + Checks if the family is valid for the current environment. + ''' + if sys.platform != 'win32' and family == 'AF_PIPE': + raise ValueError('Family %s is not recognized.' % family) + + if sys.platform == 'win32' and family == 'AF_UNIX': + # double check + if not hasattr(socket, family): + raise ValueError('Family %s is not recognized.' % family) + +def address_type(address): + ''' + Return the types of the address + + This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' + ''' + if type(address) == tuple: + return 'AF_INET' + elif type(address) is str and address.startswith('\\\\'): + return 'AF_PIPE' + elif type(address) is str or util.is_abstract_socket_namespace(address): + return 'AF_UNIX' + else: + raise ValueError('address type of %r unrecognized' % address) + +# +# Connection classes +# + +class _ConnectionBase: + _handle = None + + def __init__(self, handle, readable=True, writable=True): + handle = handle.__index__() + if handle < 0: + raise ValueError("invalid handle") + if not readable and not writable: + raise ValueError( + "at least one of `readable` and `writable` must be True") + self._handle = handle + self._readable = readable + self._writable = writable + + # XXX should we use util.Finalize instead of a __del__? + + def __del__(self): + if self._handle is not None: + self._close() + + def _check_closed(self): + if self._handle is None: + raise OSError("handle is closed") + + def _check_readable(self): + if not self._readable: + raise OSError("connection is write-only") + + def _check_writable(self): + if not self._writable: + raise OSError("connection is read-only") + + def _bad_message_length(self): + if self._writable: + self._readable = False + else: + self.close() + raise OSError("bad message length") + + @property + def closed(self): + """True if the connection is closed""" + return self._handle is None + + @property + def readable(self): + """True if the connection is readable""" + return self._readable + + @property + def writable(self): + """True if the connection is writable""" + return self._writable + + def fileno(self): + """File descriptor or handle of the connection""" + self._check_closed() + return self._handle + + def close(self): + """Close the connection""" + if self._handle is not None: + try: + self._close() + finally: + self._handle = None + + def _detach(self): + """Stop managing the underlying file descriptor or handle.""" + self._handle = None + + def send_bytes(self, buf, offset=0, size=None): + """Send the bytes data from a bytes-like object""" + self._check_closed() + self._check_writable() + m = memoryview(buf) + if m.itemsize > 1: + m = m.cast('B') + n = m.nbytes + if offset < 0: + raise ValueError("offset is negative") + if n < offset: + raise ValueError("buffer length < offset") + if size is None: + size = n - offset + elif size < 0: + raise ValueError("size is negative") + elif offset + size > n: + raise ValueError("buffer length < offset + size") + self._send_bytes(m[offset:offset + size]) + + def send(self, obj): + """Send a (picklable) object""" + self._check_closed() + self._check_writable() + self._send_bytes(_ForkingPickler.dumps(obj)) + + def recv_bytes(self, maxlength=None): + """ + Receive bytes data as a bytes object. + """ + self._check_closed() + self._check_readable() + if maxlength is not None and maxlength < 0: + raise ValueError("negative maxlength") + buf = self._recv_bytes(maxlength) + if buf is None: + self._bad_message_length() + return buf.getvalue() + + def recv_bytes_into(self, buf, offset=0): + """ + Receive bytes data into a writeable bytes-like object. + Return the number of bytes read. + """ + self._check_closed() + self._check_readable() + with memoryview(buf) as m: + # Get bytesize of arbitrary buffer + itemsize = m.itemsize + bytesize = itemsize * len(m) + if offset < 0: + raise ValueError("negative offset") + elif offset > bytesize: + raise ValueError("offset too large") + result = self._recv_bytes() + size = result.tell() + if bytesize < offset + size: + raise BufferTooShort(result.getvalue()) + # Message can fit in dest + result.seek(0) + result.readinto(m[offset // itemsize : + (offset + size) // itemsize]) + return size + + def recv(self): + """Receive a (picklable) object""" + self._check_closed() + self._check_readable() + buf = self._recv_bytes() + return _ForkingPickler.loads(buf.getbuffer()) + + def poll(self, timeout=0.0): + """Whether there is any input available to be read""" + self._check_closed() + self._check_readable() + return self._poll(timeout) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +if _winapi: + + class PipeConnection(_ConnectionBase): + """ + Connection class based on a Windows named pipe. + Overlapped I/O is used, so the handles must have been created + with FILE_FLAG_OVERLAPPED. + """ + _got_empty_message = False + _send_ov = None + + def _close(self, _CloseHandle=_winapi.CloseHandle): + ov = self._send_ov + if ov is not None: + # Interrupt WaitForMultipleObjects() in _send_bytes() + ov.cancel() + _CloseHandle(self._handle) + + def _send_bytes(self, buf): + if self._send_ov is not None: + # A connection should only be used by a single thread + raise ValueError("concurrent send_bytes() calls " + "are not supported") + ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) + self._send_ov = ov + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + self._send_ov = None + nwritten, err = ov.GetOverlappedResult(True) + if err == _winapi.ERROR_OPERATION_ABORTED: + # close() was called by another thread while + # WaitForMultipleObjects() was waiting for the overlapped + # operation. + raise OSError(errno.EPIPE, "handle is closed") + assert err == 0 + assert nwritten == len(buf) + + def _recv_bytes(self, maxsize=None): + if self._got_empty_message: + self._got_empty_message = False + return io.BytesIO() + else: + bsize = 128 if maxsize is None else min(maxsize, 128) + try: + ov, err = _winapi.ReadFile(self._handle, bsize, + overlapped=True) + + sentinel = object() + return_value = sentinel + try: + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + nread, err = ov.GetOverlappedResult(True) + if err == 0: + f = io.BytesIO() + f.write(ov.getbuffer()) + return_value = f + elif err == _winapi.ERROR_MORE_DATA: + return_value = self._get_more_data(ov, maxsize) + except: + if return_value is sentinel: + raise + + if return_value is not sentinel: + return return_value + except OSError as e: + if e.winerror == _winapi.ERROR_BROKEN_PIPE: + raise EOFError + else: + raise + raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") + + def _poll(self, timeout): + if (self._got_empty_message or + _winapi.PeekNamedPipe(self._handle)[0] != 0): + return True + return bool(wait([self], timeout)) + + def _get_more_data(self, ov, maxsize): + buf = ov.getbuffer() + f = io.BytesIO() + f.write(buf) + left = _winapi.PeekNamedPipe(self._handle)[1] + assert left > 0 + if maxsize is not None and len(buf) + left > maxsize: + self._bad_message_length() + ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) + rbytes, err = ov.GetOverlappedResult(True) + assert err == 0 + assert rbytes == left + f.write(ov.getbuffer()) + return f + + +class Connection(_ConnectionBase): + """ + Connection class based on an arbitrary file descriptor (Unix only), or + a socket handle (Windows). + """ + + if _winapi: + def _close(self, _close=_multiprocessing.closesocket): + _close(self._handle) + _write = _multiprocessing.send + _read = _multiprocessing.recv + else: + def _close(self, _close=os.close): + _close(self._handle) + _write = os.write + _read = os.read + + def _send(self, buf, write=_write): + remaining = len(buf) + while True: + n = write(self._handle, buf) + remaining -= n + if remaining == 0: + break + buf = buf[n:] + + def _recv(self, size, read=_read): + buf = io.BytesIO() + handle = self._handle + remaining = size + while remaining > 0: + to_read = min(BUFSIZE, remaining) + chunk = read(handle, to_read) + n = len(chunk) + if n == 0: + if remaining == size: + raise EOFError + else: + raise OSError("got end of file during message") + buf.write(chunk) + remaining -= n + return buf + + def _send_bytes(self, buf): + n = len(buf) + if n > 0x7fffffff: + pre_header = struct.pack("!i", -1) + header = struct.pack("!Q", n) + self._send(pre_header) + self._send(header) + self._send(buf) + else: + # For wire compatibility with 3.7 and lower + header = struct.pack("!i", n) + if n > 16384: + # The payload is large so Nagle's algorithm won't be triggered + # and we'd better avoid the cost of concatenation. + self._send(header) + self._send(buf) + else: + # Issue #20540: concatenate before sending, to avoid delays due + # to Nagle's algorithm on a TCP socket. + # Also note we want to avoid sending a 0-length buffer separately, + # to avoid "broken pipe" errors if the other end closed the pipe. + self._send(header + buf) + + def _recv_bytes(self, maxsize=None): + buf = self._recv(4) + size, = struct.unpack("!i", buf.getvalue()) + if size == -1: + buf = self._recv(8) + size, = struct.unpack("!Q", buf.getvalue()) + if maxsize is not None and size > maxsize: + return None + return self._recv(size) + + def _poll(self, timeout): + r = wait([self], timeout) + return bool(r) + + +# +# Public functions +# + +class Listener(object): + ''' + Returns a listener object. + + This is a wrapper for a bound socket which is 'listening' for + connections, or for a Windows named pipe. + ''' + def __init__(self, address=None, family=None, backlog=1, authkey=None): + family = family or (address and address_type(address)) \ + or default_family + _validate_family(family) + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + if family == 'AF_PIPE': + if address: + self._listener = PipeListener(address, backlog) + else: + for attempts in itertools.count(): + address = arbitrary_address(family) + try: + self._listener = PipeListener(address, backlog) + break + except OSError as e: + if attempts >= _MAX_PIPE_ATTEMPTS: + raise + if e.winerror not in (_winapi.ERROR_PIPE_BUSY, + _winapi.ERROR_ACCESS_DENIED): + raise + else: + address = address or arbitrary_address(family) + self._listener = SocketListener(address, family, backlog) + + self._authkey = authkey + + def accept(self): + ''' + Accept a connection on the bound socket or named pipe of `self`. + + Returns a `Connection` object. + ''' + if self._listener is None: + raise OSError('listener is closed') + + c = self._listener.accept() + if self._authkey is not None: + deliver_challenge(c, self._authkey) + answer_challenge(c, self._authkey) + return c + + def close(self): + ''' + Close the bound socket or named pipe of `self`. + ''' + listener = self._listener + if listener is not None: + self._listener = None + listener.close() + + @property + def address(self): + return self._listener._address + + @property + def last_accepted(self): + return self._listener._last_accepted + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +def Client(address, family=None, authkey=None): + ''' + Returns a connection to the address of a `Listener` + ''' + family = family or address_type(address) + _validate_family(family) + if family == 'AF_PIPE': + c = PipeClient(address) + else: + c = SocketClient(address) + + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + if authkey is not None: + answer_challenge(c, authkey) + deliver_challenge(c, authkey) + + return c + + +if sys.platform != 'win32': + + def Pipe(duplex=True): + ''' + Returns pair of connection objects at either end of a pipe + ''' + if duplex: + s1, s2 = socket.socketpair() + s1.setblocking(True) + s2.setblocking(True) + c1 = Connection(s1.detach()) + c2 = Connection(s2.detach()) + else: + fd1, fd2 = os.pipe() + c1 = Connection(fd1, writable=False) + c2 = Connection(fd2, readable=False) + + return c1, c2 + +else: + + def Pipe(duplex=True): + ''' + Returns pair of connection objects at either end of a pipe + ''' + if duplex: + openmode = _winapi.PIPE_ACCESS_DUPLEX + access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE + obsize, ibsize = BUFSIZE, BUFSIZE + else: + openmode = _winapi.PIPE_ACCESS_INBOUND + access = _winapi.GENERIC_WRITE + obsize, ibsize = 0, BUFSIZE + + for attempts in itertools.count(): + address = arbitrary_address('AF_PIPE') + try: + h1 = _winapi.CreateNamedPipe( + address, openmode | _winapi.FILE_FLAG_OVERLAPPED | + _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, + # default security descriptor: the handle cannot be inherited + _winapi.NULL + ) + break + except OSError as e: + if attempts >= _MAX_PIPE_ATTEMPTS: + raise + if e.winerror not in (_winapi.ERROR_PIPE_BUSY, + _winapi.ERROR_ACCESS_DENIED): + raise + h2 = _winapi.CreateFile( + address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + _winapi.SetNamedPipeHandleState( + h2, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + + overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) + _, err = overlapped.GetOverlappedResult(True) + assert err == 0 + + c1 = PipeConnection(h1, writable=duplex) + c2 = PipeConnection(h2, readable=duplex) + + return c1, c2 + +# +# Definitions for connections based on sockets +# + +class SocketListener(object): + ''' + Representation of a socket which is bound to an address and listening + ''' + def __init__(self, address, family, backlog=1): + self._socket = socket.socket(getattr(socket, family)) + try: + # SO_REUSEADDR has different semantics on Windows (issue #2550). + if os.name == 'posix': + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + self._socket.setblocking(True) + self._socket.bind(address) + self._socket.listen(backlog) + self._address = self._socket.getsockname() + except OSError: + self._socket.close() + raise + self._family = family + self._last_accepted = None + + if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): + # Linux abstract socket namespaces do not need to be explicitly unlinked + self._unlink = util.Finalize( + self, os.unlink, args=(address,), exitpriority=0 + ) + else: + self._unlink = None + + def accept(self): + s, self._last_accepted = self._socket.accept() + s.setblocking(True) + return Connection(s.detach()) + + def close(self): + try: + self._socket.close() + finally: + unlink = self._unlink + if unlink is not None: + self._unlink = None + unlink() + + +def SocketClient(address): + ''' + Return a connection object connected to the socket given by `address` + ''' + family = address_type(address) + with socket.socket( getattr(socket, family) ) as s: + s.setblocking(True) + s.connect(address) + return Connection(s.detach()) + +# +# Definitions for connections based on named pipes +# + +if sys.platform == 'win32': + + class PipeListener(object): + ''' + Representation of a named pipe + ''' + def __init__(self, address, backlog=None): + self._address = address + self._handle_queue = [self._new_handle(first=True)] + + self._last_accepted = None + util.sub_debug('listener created with address=%r', self._address) + self.close = util.Finalize( + self, PipeListener._finalize_pipe_listener, + args=(self._handle_queue, self._address), exitpriority=0 + ) + + def _new_handle(self, first=False): + flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED + if first: + flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + return _winapi.CreateNamedPipe( + self._address, flags, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, + _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL + ) + + def accept(self): + self._handle_queue.append(self._new_handle()) + handle = self._handle_queue.pop(0) + try: + ov = _winapi.ConnectNamedPipe(handle, overlapped=True) + except OSError as e: + if e.winerror != _winapi.ERROR_NO_DATA: + raise + # ERROR_NO_DATA can occur if a client has already connected, + # written data and then disconnected -- see Issue 14725. + else: + try: + res = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + except: + ov.cancel() + _winapi.CloseHandle(handle) + raise + finally: + _, err = ov.GetOverlappedResult(True) + assert err == 0 + return PipeConnection(handle) + + @staticmethod + def _finalize_pipe_listener(queue, address): + util.sub_debug('closing listener with address=%r', address) + for handle in queue: + _winapi.CloseHandle(handle) + + def PipeClient(address): + ''' + Return a connection object connected to the pipe given by `address` + ''' + t = _init_timeout() + while 1: + try: + _winapi.WaitNamedPipe(address, 1000) + h = _winapi.CreateFile( + address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, + 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + except OSError as e: + if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, + _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): + raise + else: + break + else: + raise + + _winapi.SetNamedPipeHandleState( + h, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + return PipeConnection(h) + +# +# Authentication stuff +# + +MESSAGE_LENGTH = 40 # MUST be > 20 + +_CHALLENGE = b'#CHALLENGE#' +_WELCOME = b'#WELCOME#' +_FAILURE = b'#FAILURE#' + +# multiprocessing.connection Authentication Handshake Protocol Description +# (as documented for reference after reading the existing code) +# ============================================================================= +# +# On Windows: native pipes with "overlapped IO" are used to send the bytes, +# instead of the length prefix SIZE scheme described below. (ie: the OS deals +# with message sizes for us) +# +# Protocol error behaviors: +# +# On POSIX, any failure to receive the length prefix into SIZE, for SIZE greater +# than the requested maxsize to receive, or receiving fewer than SIZE bytes +# results in the connection being closed and auth to fail. +# +# On Windows, receiving too few bytes is never a low level _recv_bytes read +# error, receiving too many will trigger an error only if receive maxsize +# value was larger than 128 OR the if the data arrived in smaller pieces. +# +# Serving side Client side +# ------------------------------ --------------------------------------- +# 0. Open a connection on the pipe. +# 1. Accept connection. +# 2. Random 20+ bytes -> MESSAGE +# Modern servers always send +# more than 20 bytes and include +# a {digest} prefix on it with +# their preferred HMAC digest. +# Legacy ones send ==20 bytes. +# 3. send 4 byte length (net order) +# prefix followed by: +# b'#CHALLENGE#' + MESSAGE +# 4. Receive 4 bytes, parse as network byte +# order integer. If it is -1, receive an +# additional 8 bytes, parse that as network +# byte order. The result is the length of +# the data that follows -> SIZE. +# 5. Receive min(SIZE, 256) bytes -> M1 +# 6. Assert that M1 starts with: +# b'#CHALLENGE#' +# 7. Strip that prefix from M1 into -> M2 +# 7.1. Parse M2: if it is exactly 20 bytes in +# length this indicates a legacy server +# supporting only HMAC-MD5. Otherwise the +# 7.2. preferred digest is looked up from an +# expected "{digest}" prefix on M2. No prefix +# or unsupported digest? <- AuthenticationError +# 7.3. Put divined algorithm name in -> D_NAME +# 8. Compute HMAC-D_NAME of AUTHKEY, M2 -> C_DIGEST +# 9. Send 4 byte length prefix (net order) +# followed by C_DIGEST bytes. +# 10. Receive 4 or 4+8 byte length +# prefix (#4 dance) -> SIZE. +# 11. Receive min(SIZE, 256) -> C_D. +# 11.1. Parse C_D: legacy servers +# accept it as is, "md5" -> D_NAME +# 11.2. modern servers check the length +# of C_D, IF it is 16 bytes? +# 11.2.1. "md5" -> D_NAME +# and skip to step 12. +# 11.3. longer? expect and parse a "{digest}" +# prefix into -> D_NAME. +# Strip the prefix and store remaining +# bytes in -> C_D. +# 11.4. Don't like D_NAME? <- AuthenticationError +# 12. Compute HMAC-D_NAME of AUTHKEY, +# MESSAGE into -> M_DIGEST. +# 13. Compare M_DIGEST == C_D: +# 14a: Match? Send length prefix & +# b'#WELCOME#' +# <- RETURN +# 14b: Mismatch? Send len prefix & +# b'#FAILURE#' +# <- CLOSE & AuthenticationError +# 15. Receive 4 or 4+8 byte length prefix (net +# order) again as in #4 into -> SIZE. +# 16. Receive min(SIZE, 256) bytes -> M3. +# 17. Compare M3 == b'#WELCOME#': +# 17a. Match? <- RETURN +# 17b. Mismatch? <- CLOSE & AuthenticationError +# +# If this RETURNed, the connection remains open: it has been authenticated. +# +# Length prefixes are used consistently. Even on the legacy protocol, this +# was good fortune and allowed us to evolve the protocol by using the length +# of the opening challenge or length of the returned digest as a signal as +# to which protocol the other end supports. + +_ALLOWED_DIGESTS = frozenset( + {b'md5', b'sha256', b'sha384', b'sha3_256', b'sha3_384'}) +_MAX_DIGEST_LEN = max(len(_) for _ in _ALLOWED_DIGESTS) + +# Old hmac-md5 only server versions from Python <=3.11 sent a message of this +# length. It happens to not match the length of any supported digest so we can +# use a message of this length to indicate that we should work in backwards +# compatible md5-only mode without a {digest_name} prefix on our response. +_MD5ONLY_MESSAGE_LENGTH = 20 +_MD5_DIGEST_LEN = 16 +_LEGACY_LENGTHS = (_MD5ONLY_MESSAGE_LENGTH, _MD5_DIGEST_LEN) + + +def _get_digest_name_and_payload(message): # type: (bytes) -> tuple[str, bytes] + """Returns a digest name and the payload for a response hash. + + If a legacy protocol is detected based on the message length + or contents the digest name returned will be empty to indicate + legacy mode where MD5 and no digest prefix should be sent. + """ + # modern message format: b"{digest}payload" longer than 20 bytes + # legacy message format: 16 or 20 byte b"payload" + if len(message) in _LEGACY_LENGTHS: + # Either this was a legacy server challenge, or we're processing + # a reply from a legacy client that sent an unprefixed 16-byte + # HMAC-MD5 response. All messages using the modern protocol will + # be longer than either of these lengths. + return '', message + if (message.startswith(b'{') and + (curly := message.find(b'}', 1, _MAX_DIGEST_LEN+2)) > 0): + digest = message[1:curly] + if digest in _ALLOWED_DIGESTS: + payload = message[curly+1:] + return digest.decode('ascii'), payload + raise AuthenticationError( + 'unsupported message length, missing digest prefix, ' + f'or unsupported digest: {message=}') + + +def _create_response(authkey, message): + """Create a MAC based on authkey and message + + The MAC algorithm defaults to HMAC-MD5, unless MD5 is not available or + the message has a '{digest_name}' prefix. For legacy HMAC-MD5, the response + is the raw MAC, otherwise the response is prefixed with '{digest_name}', + e.g. b'{sha256}abcdefg...' + + Note: The MAC protects the entire message including the digest_name prefix. + """ + import hmac + digest_name = _get_digest_name_and_payload(message)[0] + # The MAC protects the entire message: digest header and payload. + if not digest_name: + # Legacy server without a {digest} prefix on message. + # Generate a legacy non-prefixed HMAC-MD5 reply. + try: + return hmac.new(authkey, message, 'md5').digest() + except ValueError: + # HMAC-MD5 is not available (FIPS mode?), fall back to + # HMAC-SHA2-256 modern protocol. The legacy server probably + # doesn't support it and will reject us anyways. :shrug: + digest_name = 'sha256' + # Modern protocol, indicate the digest used in the reply. + response = hmac.new(authkey, message, digest_name).digest() + return b'{%s}%s' % (digest_name.encode('ascii'), response) + + +def _verify_challenge(authkey, message, response): + """Verify MAC challenge + + If our message did not include a digest_name prefix, the client is allowed + to select a stronger digest_name from _ALLOWED_DIGESTS. + + In case our message is prefixed, a client cannot downgrade to a weaker + algorithm, because the MAC is calculated over the entire message + including the '{digest_name}' prefix. + """ + import hmac + response_digest, response_mac = _get_digest_name_and_payload(response) + response_digest = response_digest or 'md5' + try: + expected = hmac.new(authkey, message, response_digest).digest() + except ValueError: + raise AuthenticationError(f'{response_digest=} unsupported') + if len(expected) != len(response_mac): + raise AuthenticationError( + f'expected {response_digest!r} of length {len(expected)} ' + f'got {len(response_mac)}') + if not hmac.compare_digest(expected, response_mac): + raise AuthenticationError('digest received was wrong') + + +def deliver_challenge(connection, authkey: bytes, digest_name='sha256'): + if not isinstance(authkey, bytes): + raise ValueError( + "Authkey must be bytes, not {0!s}".format(type(authkey))) + assert MESSAGE_LENGTH > _MD5ONLY_MESSAGE_LENGTH, "protocol constraint" + message = os.urandom(MESSAGE_LENGTH) + message = b'{%s}%s' % (digest_name.encode('ascii'), message) + # Even when sending a challenge to a legacy client that does not support + # digest prefixes, they'll take the entire thing as a challenge and + # respond to it with a raw HMAC-MD5. + connection.send_bytes(_CHALLENGE + message) + response = connection.recv_bytes(256) # reject large message + try: + _verify_challenge(authkey, message, response) + except AuthenticationError: + connection.send_bytes(_FAILURE) + raise + else: + connection.send_bytes(_WELCOME) + + +def answer_challenge(connection, authkey: bytes): + if not isinstance(authkey, bytes): + raise ValueError( + "Authkey must be bytes, not {0!s}".format(type(authkey))) + message = connection.recv_bytes(256) # reject large message + if not message.startswith(_CHALLENGE): + raise AuthenticationError( + f'Protocol error, expected challenge: {message=}') + message = message[len(_CHALLENGE):] + if len(message) < _MD5ONLY_MESSAGE_LENGTH: + raise AuthenticationError(f'challenge too short: {len(message)} bytes') + digest = _create_response(authkey, message) + connection.send_bytes(digest) + response = connection.recv_bytes(256) # reject large message + if response != _WELCOME: + raise AuthenticationError('digest sent was rejected') + +# +# Support for using xmlrpclib for serialization +# + +class ConnectionWrapper(object): + def __init__(self, conn, dumps, loads): + self._conn = conn + self._dumps = dumps + self._loads = loads + for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): + obj = getattr(conn, attr) + setattr(self, attr, obj) + def send(self, obj): + s = self._dumps(obj) + self._conn.send_bytes(s) + def recv(self): + s = self._conn.recv_bytes() + return self._loads(s) + +def _xml_dumps(obj): + return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') + +def _xml_loads(s): + (obj,), method = xmlrpclib.loads(s.decode('utf-8')) + return obj + +class XmlListener(Listener): + def accept(self): + global xmlrpclib + import xmlrpc.client as xmlrpclib + obj = Listener.accept(self) + return ConnectionWrapper(obj, _xml_dumps, _xml_loads) + +def XmlClient(*args, **kwds): + global xmlrpclib + import xmlrpc.client as xmlrpclib + return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) + +# +# Wait +# + +if sys.platform == 'win32': + + def _exhaustive_wait(handles, timeout): + # Return ALL handles which are currently signalled. (Only + # returning the first signalled might create starvation issues.) + L = list(handles) + ready = [] + # Windows limits WaitForMultipleObjects at 64 handles, and we use a + # few for synchronisation, so we switch to batched waits at 60. + if len(L) > 60: + try: + res = _winapi.BatchedWaitForMultipleObjects(L, False, timeout) + except TimeoutError: + return [] + ready.extend(L[i] for i in res) + if res: + L = [h for i, h in enumerate(L) if i > res[0] & i not in res] + timeout = 0 + while L: + short_L = L[:60] if len(L) > 60 else L + res = _winapi.WaitForMultipleObjects(short_L, False, timeout) + if res == WAIT_TIMEOUT: + break + elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): + res -= WAIT_OBJECT_0 + elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): + res -= WAIT_ABANDONED_0 + else: + raise RuntimeError('Should not get here') + ready.append(L[res]) + L = L[res+1:] + timeout = 0 + return ready + + _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} + + def wait(object_list, timeout=None): + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + if timeout is None: + timeout = INFINITE + elif timeout < 0: + timeout = 0 + else: + timeout = int(timeout * 1000 + 0.5) + + object_list = list(object_list) + waithandle_to_obj = {} + ov_list = [] + ready_objects = set() + ready_handles = set() + + try: + for o in object_list: + try: + fileno = getattr(o, 'fileno') + except AttributeError: + waithandle_to_obj[o.__index__()] = o + else: + # start an overlapped read of length zero + try: + ov, err = _winapi.ReadFile(fileno(), 0, True) + except OSError as e: + ov, err = None, e.winerror + if err not in _ready_errors: + raise + if err == _winapi.ERROR_IO_PENDING: + ov_list.append(ov) + waithandle_to_obj[ov.event] = o + else: + # If o.fileno() is an overlapped pipe handle and + # err == 0 then there is a zero length message + # in the pipe, but it HAS NOT been consumed... + if ov and sys.getwindowsversion()[:2] >= (6, 2): + # ... except on Windows 8 and later, where + # the message HAS been consumed. + try: + _, err = ov.GetOverlappedResult(False) + except OSError as e: + err = e.winerror + if not err and hasattr(o, '_got_empty_message'): + o._got_empty_message = True + ready_objects.add(o) + timeout = 0 + + ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) + finally: + # request that overlapped reads stop + for ov in ov_list: + ov.cancel() + + # wait for all overlapped reads to stop + for ov in ov_list: + try: + _, err = ov.GetOverlappedResult(True) + except OSError as e: + err = e.winerror + if err not in _ready_errors: + raise + if err != _winapi.ERROR_OPERATION_ABORTED: + o = waithandle_to_obj[ov.event] + ready_objects.add(o) + if err == 0: + # If o.fileno() is an overlapped pipe handle then + # a zero length message HAS been consumed. + if hasattr(o, '_got_empty_message'): + o._got_empty_message = True + + ready_objects.update(waithandle_to_obj[h] for h in ready_handles) + return [o for o in object_list if o in ready_objects] + +else: + + import selectors + + # poll/select have the advantage of not requiring any extra file + # descriptor, contrarily to epoll/kqueue (also, they require a single + # syscall). + if hasattr(selectors, 'PollSelector'): + _WaitSelector = selectors.PollSelector + else: + _WaitSelector = selectors.SelectSelector + + def wait(object_list, timeout=None): + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + with _WaitSelector() as selector: + for obj in object_list: + selector.register(obj, selectors.EVENT_READ) + + if timeout is not None: + deadline = time.monotonic() + timeout + + while True: + ready = selector.select(timeout) + if ready: + return [key.fileobj for (key, events) in ready] + else: + if timeout is not None: + timeout = deadline - time.monotonic() + if timeout < 0: + return ready + +# +# Make connection and socket objects shareable if possible +# + +if sys.platform == 'win32': + def reduce_connection(conn): + handle = conn.fileno() + with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: + from . import resource_sharer + ds = resource_sharer.DupSocket(s) + return rebuild_connection, (ds, conn.readable, conn.writable) + def rebuild_connection(ds, readable, writable): + sock = ds.detach() + return Connection(sock.detach(), readable, writable) + reduction.register(Connection, reduce_connection) + + def reduce_pipe_connection(conn): + access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | + (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) + dh = reduction.DupHandle(conn.fileno(), access) + return rebuild_pipe_connection, (dh, conn.readable, conn.writable) + def rebuild_pipe_connection(dh, readable, writable): + handle = dh.detach() + return PipeConnection(handle, readable, writable) + reduction.register(PipeConnection, reduce_pipe_connection) + +else: + def reduce_connection(conn): + df = reduction.DupFd(conn.fileno()) + return rebuild_connection, (df, conn.readable, conn.writable) + def rebuild_connection(df, readable, writable): + fd = df.detach() + return Connection(fd, readable, writable) + reduction.register(Connection, reduce_connection) diff --git a/Python314_4_x64_Template/Lib/multiprocessing/context.py b/Python314_4_x64_Template/Lib/multiprocessing/context.py new file mode 100644 index 00000000..5fa6d7e4 --- /dev/null +++ b/Python314_4_x64_Template/Lib/multiprocessing/context.py @@ -0,0 +1,383 @@ +import os +import sys +import threading + +from . import process +from . import reduction + +__all__ = () + +# +# Exceptions +# + +class ProcessError(Exception): + pass + +class BufferTooShort(ProcessError): + pass + +class TimeoutError(ProcessError): + pass + +class AuthenticationError(ProcessError): + pass + +# +# Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py +# + +class BaseContext(object): + + ProcessError = ProcessError + BufferTooShort = BufferTooShort + TimeoutError = TimeoutError + AuthenticationError = AuthenticationError + + current_process = staticmethod(process.current_process) + parent_process = staticmethod(process.parent_process) + active_children = staticmethod(process.active_children) + + def cpu_count(self): + '''Returns the number of CPUs in the system''' + num = os.cpu_count() + if num is None: + raise NotImplementedError('cannot determine number of cpus') + else: + return num + + def Manager(self): + '''Returns a manager associated with a running server process + + The managers methods such as `Lock()`, `Condition()` and `Queue()` + can be used to create shared objects. + ''' + from .managers import SyncManager + m = SyncManager(ctx=self.get_context()) + m.start() + return m + + def Pipe(self, duplex=True): + '''Returns two connection object connected by a pipe''' + from .connection import Pipe + return Pipe(duplex) + + def Lock(self): + '''Returns a non-recursive lock object''' + from .synchronize import Lock + return Lock(ctx=self.get_context()) + + def RLock(self): + '''Returns a recursive lock object''' + from .synchronize import RLock + return RLock(ctx=self.get_context()) + + def Condition(self, lock=None): + '''Returns a condition object''' + from .synchronize import Condition + return Condition(lock, ctx=self.get_context()) + + def Semaphore(self, value=1): + '''Returns a semaphore object''' + from .synchronize import Semaphore + return Semaphore(value, ctx=self.get_context()) + + def BoundedSemaphore(self, value=1): + '''Returns a bounded semaphore object''' + from .synchronize import BoundedSemaphore + return BoundedSemaphore(value, ctx=self.get_context()) + + def Event(self): + '''Returns an event object''' + from .synchronize import Event + return Event(ctx=self.get_context()) + + def Barrier(self, parties, action=None, timeout=None): + '''Returns a barrier object''' + from .synchronize import Barrier + return Barrier(parties, action, timeout, ctx=self.get_context()) + + def Queue(self, maxsize=0): + '''Returns a queue object''' + from .queues import Queue + return Queue(maxsize, ctx=self.get_context()) + + def JoinableQueue(self, maxsize=0): + '''Returns a queue object''' + from .queues import JoinableQueue + return JoinableQueue(maxsize, ctx=self.get_context()) + + def SimpleQueue(self): + '''Returns a queue object''' + from .queues import SimpleQueue + return SimpleQueue(ctx=self.get_context()) + + def Pool(self, processes=None, initializer=None, initargs=(), + maxtasksperchild=None): + '''Returns a process pool object''' + from .pool import Pool + return Pool(processes, initializer, initargs, maxtasksperchild, + context=self.get_context()) + + def RawValue(self, typecode_or_type, *args): + '''Returns a shared object''' + from .sharedctypes import RawValue + return RawValue(typecode_or_type, *args) + + def RawArray(self, typecode_or_type, size_or_initializer): + '''Returns a shared array''' + from .sharedctypes import RawArray + return RawArray(typecode_or_type, size_or_initializer) + + def Value(self, typecode_or_type, *args, lock=True): + '''Returns a synchronized shared object''' + from .sharedctypes import Value + return Value(typecode_or_type, *args, lock=lock, + ctx=self.get_context()) + + def Array(self, typecode_or_type, size_or_initializer, *, lock=True): + '''Returns a synchronized shared array''' + from .sharedctypes import Array + return Array(typecode_or_type, size_or_initializer, lock=lock, + ctx=self.get_context()) + + def freeze_support(self): + '''Check whether this is a fake forked process in a frozen executable. + If so then run code specified by commandline and exit. + ''' + # gh-140814: allow_none=True avoids locking in the default start + # method, which would cause a later set_start_method() to fail. + # None is safe to pass through: spawn.freeze_support() + # independently detects whether this process is a spawned + # child, so the start method check here is only an optimization. + if (getattr(sys, 'frozen', False) + and self.get_start_method(allow_none=True) in ('spawn', None)): + from .spawn import freeze_support + freeze_support() + + def get_logger(self): + '''Return package logger -- if it does not already exist then + it is created. + ''' + from .util import get_logger + return get_logger() + + def log_to_stderr(self, level=None): + '''Turn on logging and add a handler which prints to stderr''' + from .util import log_to_stderr + return log_to_stderr(level) + + def allow_connection_pickling(self): + '''Install support for sending connections and sockets + between processes + ''' + # This is undocumented. In previous versions of multiprocessing + # its only effect was to make socket objects inheritable on Windows. + from . import connection # noqa: F401 + + def set_executable(self, executable): + '''Sets the path to a python.exe or pythonw.exe binary used to run + child processes instead of sys.executable when using the 'spawn' + start method. Useful for people embedding Python. + ''' + from .spawn import set_executable + set_executable(executable) + + def set_forkserver_preload(self, module_names): + '''Set list of module names to try to load in forkserver process. + This is really just a hint. + ''' + from .forkserver import set_forkserver_preload + set_forkserver_preload(module_names) + + def get_context(self, method=None): + if method is None: + return self + try: + ctx = _concrete_contexts[method] + except KeyError: + raise ValueError('cannot find context for %r' % method) from None + ctx._check_available() + return ctx + + def get_start_method(self, allow_none=False): + return self._name + + def set_start_method(self, method, force=False): + raise ValueError('cannot set start method of concrete context') + + @property + def reducer(self): + '''Controls how objects will be reduced to a form that can be + shared with other processes.''' + return globals().get('reduction') + + @reducer.setter + def reducer(self, reduction): + globals()['reduction'] = reduction + + def _check_available(self): + pass + +# +# Type of default context -- underlying context can be set at most once +# + +class Process(process.BaseProcess): + _start_method = None + @staticmethod + def _Popen(process_obj): + return _default_context.get_context().Process._Popen(process_obj) + + @staticmethod + def _after_fork(): + return _default_context.get_context().Process._after_fork() + +class DefaultContext(BaseContext): + Process = Process + + def __init__(self, context): + self._default_context = context + self._actual_context = None + + def get_context(self, method=None): + if method is None: + if self._actual_context is None: + self._actual_context = self._default_context + return self._actual_context + else: + return super().get_context(method) + + def set_start_method(self, method, force=False): + if self._actual_context is not None and not force: + raise RuntimeError('context has already been set') + if method is None and force: + self._actual_context = None + return + self._actual_context = self.get_context(method) + + def get_start_method(self, allow_none=False): + if self._actual_context is None: + if allow_none: + return None + self._actual_context = self._default_context + return self._actual_context._name + + def get_all_start_methods(self): + """Returns a list of the supported start methods, default first.""" + default = self._default_context.get_start_method() + start_method_names = [default] + start_method_names.extend( + name for name in _concrete_contexts if name != default + ) + return start_method_names + + +# +# Context types for fixed start method +# + +if sys.platform != 'win32': + + class ForkProcess(process.BaseProcess): + _start_method = 'fork' + @staticmethod + def _Popen(process_obj): + from .popen_fork import Popen + return Popen(process_obj) + + class SpawnProcess(process.BaseProcess): + _start_method = 'spawn' + @staticmethod + def _Popen(process_obj): + from .popen_spawn_posix import Popen + return Popen(process_obj) + + @staticmethod + def _after_fork(): + # process is spawned, nothing to do + pass + + class ForkServerProcess(process.BaseProcess): + _start_method = 'forkserver' + @staticmethod + def _Popen(process_obj): + from .popen_forkserver import Popen + return Popen(process_obj) + + class ForkContext(BaseContext): + _name = 'fork' + Process = ForkProcess + + class SpawnContext(BaseContext): + _name = 'spawn' + Process = SpawnProcess + + class ForkServerContext(BaseContext): + _name = 'forkserver' + Process = ForkServerProcess + def _check_available(self): + if not reduction.HAVE_SEND_HANDLE: + raise ValueError('forkserver start method not available') + + _concrete_contexts = { + 'fork': ForkContext(), + 'spawn': SpawnContext(), + 'forkserver': ForkServerContext(), + } + # bpo-33725: running arbitrary code after fork() is no longer reliable + # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. + # gh-84559: We changed everyones default to a thread safeish one in 3.14. + if reduction.HAVE_SEND_HANDLE and sys.platform != 'darwin': + _default_context = DefaultContext(_concrete_contexts['forkserver']) + else: + _default_context = DefaultContext(_concrete_contexts['spawn']) + +else: # Windows + + class SpawnProcess(process.BaseProcess): + _start_method = 'spawn' + @staticmethod + def _Popen(process_obj): + from .popen_spawn_win32 import Popen + return Popen(process_obj) + + @staticmethod + def _after_fork(): + # process is spawned, nothing to do + pass + + class SpawnContext(BaseContext): + _name = 'spawn' + Process = SpawnProcess + + _concrete_contexts = { + 'spawn': SpawnContext(), + } + _default_context = DefaultContext(_concrete_contexts['spawn']) + +# +# Force the start method +# + +def _force_start_method(method): + _default_context._actual_context = _concrete_contexts[method] + +# +# Check that the current thread is spawning a child process +# + +_tls = threading.local() + +def get_spawning_popen(): + return getattr(_tls, 'spawning_popen', None) + +def set_spawning_popen(popen): + _tls.spawning_popen = popen + +def assert_spawning(obj): + if get_spawning_popen() is None: + raise RuntimeError( + '%s objects should only be shared between processes' + ' through inheritance' % type(obj).__name__ + ) diff --git a/Python314_4_x64_Template/Lib/multiprocessing/dummy/__init__.py b/Python314_4_x64_Template/Lib/multiprocessing/dummy/__init__.py new file mode 100644 index 00000000..7dc5d1c8 --- /dev/null +++ b/Python314_4_x64_Template/Lib/multiprocessing/dummy/__init__.py @@ -0,0 +1,126 @@ +# +# Support for the API of the multiprocessing package using threads +# +# multiprocessing/dummy/__init__.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ + 'Process', 'current_process', 'active_children', 'freeze_support', + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', + 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' + ] + +# +# Imports +# + +import threading +import sys +import weakref +import array + +from .connection import Pipe +from threading import Lock, RLock, Semaphore, BoundedSemaphore +from threading import Event, Condition, Barrier +from queue import Queue + +# +# +# + +class DummyProcess(threading.Thread): + + def __init__(self, group=None, target=None, name=None, args=(), kwargs=None): + threading.Thread.__init__(self, group, target, name, args, kwargs) + self._pid = None + self._children = weakref.WeakKeyDictionary() + self._start_called = False + self._parent = current_process() + + def start(self): + if self._parent is not current_process(): + raise RuntimeError( + "Parent is {0!r} but current_process is {1!r}".format( + self._parent, current_process())) + self._start_called = True + if hasattr(self._parent, '_children'): + self._parent._children[self] = None + threading.Thread.start(self) + + @property + def exitcode(self): + if self._start_called and not self.is_alive(): + return 0 + else: + return None + +# +# +# + +Process = DummyProcess +current_process = threading.current_thread +current_process()._children = weakref.WeakKeyDictionary() + +def active_children(): + children = current_process()._children + for p in list(children): + if not p.is_alive(): + children.pop(p, None) + return list(children) + +def freeze_support(): + pass + +# +# +# + +class Namespace(object): + def __init__(self, /, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) + +dict = dict +list = list + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + + @property + def value(self): + return self._value + + @value.setter + def value(self, value): + self._value = value + + def __repr__(self): + return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) + +def Manager(): + return sys.modules[__name__] + +def shutdown(): + pass + +def Pool(processes=None, initializer=None, initargs=()): + from ..pool import ThreadPool + return ThreadPool(processes, initializer, initargs) + +JoinableQueue = Queue diff --git a/Python313_13_x64_Template/Lib/multiprocessing/dummy/connection.py b/Python314_4_x64_Template/Lib/multiprocessing/dummy/connection.py similarity index 100% rename from Python313_13_x64_Template/Lib/multiprocessing/dummy/connection.py rename to Python314_4_x64_Template/Lib/multiprocessing/dummy/connection.py diff --git a/Python314_4_x64_Template/Lib/multiprocessing/forkserver.py b/Python314_4_x64_Template/Lib/multiprocessing/forkserver.py new file mode 100644 index 00000000..e431b3f1 --- /dev/null +++ b/Python314_4_x64_Template/Lib/multiprocessing/forkserver.py @@ -0,0 +1,429 @@ +import atexit +import errno +import os +import selectors +import signal +import socket +import struct +import sys +import threading +import warnings + +from . import AuthenticationError +from . import connection +from . import process +from .context import reduction +from . import resource_tracker +from . import spawn +from . import util + +__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', + 'set_forkserver_preload'] + +# +# +# + +MAXFDS_TO_SEND = 256 +SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t +_AUTHKEY_LEN = 32 # <= PIPEBUF so it fits a single write to an empty pipe. + +# +# Forkserver class +# + +class ForkServer(object): + + def __init__(self): + self._forkserver_authkey = None + self._forkserver_address = None + self._forkserver_alive_fd = None + self._forkserver_pid = None + self._inherited_fds = None + self._lock = threading.Lock() + self._preload_modules = ['__main__'] + + def _stop(self): + # Method used by unit tests to stop the server + with self._lock: + self._stop_unlocked() + + def _stop_unlocked(self): + if self._forkserver_pid is None: + return + + # close the "alive" file descriptor asks the server to stop + os.close(self._forkserver_alive_fd) + self._forkserver_alive_fd = None + + os.waitpid(self._forkserver_pid, 0) + self._forkserver_pid = None + + if not util.is_abstract_socket_namespace(self._forkserver_address): + os.unlink(self._forkserver_address) + self._forkserver_address = None + self._forkserver_authkey = None + + def set_forkserver_preload(self, modules_names): + '''Set list of module names to try to load in forkserver process.''' + if not all(type(mod) is str for mod in modules_names): + raise TypeError('module_names must be a list of strings') + self._preload_modules = modules_names + + def get_inherited_fds(self): + '''Return list of fds inherited from parent process. + + This returns None if the current process was not started by fork + server. + ''' + return self._inherited_fds + + def connect_to_new_process(self, fds): + '''Request forkserver to create a child process. + + Returns a pair of fds (status_r, data_w). The calling process can read + the child process's pid and (eventually) its returncode from status_r. + The calling process should write to data_w the pickled preparation and + process data. + ''' + self.ensure_running() + assert self._forkserver_authkey + if len(fds) + 4 >= MAXFDS_TO_SEND: + raise ValueError('too many fds') + with socket.socket(socket.AF_UNIX) as client: + client.connect(self._forkserver_address) + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + allfds = [child_r, child_w, self._forkserver_alive_fd, + resource_tracker.getfd()] + allfds += fds + try: + client.setblocking(True) + wrapped_client = connection.Connection(client.fileno()) + # The other side of this exchange happens in the child as + # implemented in main(). + try: + connection.answer_challenge( + wrapped_client, self._forkserver_authkey) + connection.deliver_challenge( + wrapped_client, self._forkserver_authkey) + finally: + wrapped_client._detach() + del wrapped_client + reduction.sendfds(client, allfds) + return parent_r, parent_w + except: + os.close(parent_r) + os.close(parent_w) + raise + finally: + os.close(child_r) + os.close(child_w) + + def ensure_running(self): + '''Make sure that a fork server is running. + + This can be called from any process. Note that usually a child + process will just reuse the forkserver started by its parent, so + ensure_running() will do nothing. + ''' + with self._lock: + resource_tracker.ensure_running() + if self._forkserver_pid is not None: + # forkserver was launched before, is it still running? + pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) + if not pid: + # still alive + return + # dead, launch it again + os.close(self._forkserver_alive_fd) + self._forkserver_authkey = None + self._forkserver_address = None + self._forkserver_alive_fd = None + self._forkserver_pid = None + + # gh-144503: sys_argv is passed as real argv elements after the + # ``-c cmd`` rather than repr'd into main_kws so that a large + # parent sys.argv cannot push the single ``-c`` command string + # over the OS per-argument length limit (MAX_ARG_STRLEN on Linux). + # The child sees them as sys.argv[1:]. + cmd = ('import sys; ' + 'from multiprocessing.forkserver import main; ' + 'main(%d, %d, %r, sys_argv=sys.argv[1:], **%r)') + + main_kws = {} + sys_argv = None + if self._preload_modules: + data = spawn.get_preparation_data('ignore') + if 'sys_path' in data: + main_kws['sys_path'] = data['sys_path'] + if 'init_main_from_path' in data: + main_kws['main_path'] = data['init_main_from_path'] + if 'sys_argv' in data: + sys_argv = data['sys_argv'] + + with socket.socket(socket.AF_UNIX) as listener: + address = connection.arbitrary_address('AF_UNIX') + listener.bind(address) + if not util.is_abstract_socket_namespace(address): + os.chmod(address, 0o600) + listener.listen() + + # all client processes own the write end of the "alive" pipe; + # when they all terminate the read end becomes ready. + alive_r, alive_w = os.pipe() + # A short lived pipe to initialize the forkserver authkey. + authkey_r, authkey_w = os.pipe() + try: + fds_to_pass = [listener.fileno(), alive_r, authkey_r] + main_kws['authkey_r'] = authkey_r + cmd %= (listener.fileno(), alive_r, self._preload_modules, + main_kws) + exe = spawn.get_executable() + args = [exe] + util._args_from_interpreter_flags() + args += ['-c', cmd] + if sys_argv is not None: + args += sys_argv + pid = util.spawnv_passfds(exe, args, fds_to_pass) + except: + os.close(alive_w) + os.close(authkey_w) + raise + finally: + os.close(alive_r) + os.close(authkey_r) + # Authenticate our control socket to prevent access from + # processes we have not shared this key with. + try: + self._forkserver_authkey = os.urandom(_AUTHKEY_LEN) + os.write(authkey_w, self._forkserver_authkey) + finally: + os.close(authkey_w) + self._forkserver_address = address + self._forkserver_alive_fd = alive_w + self._forkserver_pid = pid + +# +# +# + +def main(listener_fd, alive_r, preload, main_path=None, sys_path=None, + *, sys_argv=None, authkey_r=None): + """Run forkserver.""" + if authkey_r is not None: + try: + authkey = os.read(authkey_r, _AUTHKEY_LEN) + assert len(authkey) == _AUTHKEY_LEN, f'{len(authkey)} < {_AUTHKEY_LEN}' + finally: + os.close(authkey_r) + else: + authkey = b'' + + if preload: + if sys_argv is not None: + sys.argv[:] = sys_argv + if sys_path is not None: + sys.path[:] = sys_path + if '__main__' in preload and main_path is not None: + process.current_process()._inheriting = True + try: + spawn.import_main_path(main_path) + finally: + del process.current_process()._inheriting + for modname in preload: + try: + __import__(modname) + except ImportError: + pass + + # gh-135335: flush stdout/stderr in case any of the preloaded modules + # wrote to them, otherwise children might inherit buffered data + util._flush_std_streams() + + util._close_stdin() + + sig_r, sig_w = os.pipe() + os.set_blocking(sig_r, False) + os.set_blocking(sig_w, False) + + def sigchld_handler(*_unused): + # Dummy signal handler, doesn't do anything + pass + + handlers = { + # unblocking SIGCHLD allows the wakeup fd to notify our event loop + signal.SIGCHLD: sigchld_handler, + # protect the process from ^C + signal.SIGINT: signal.SIG_IGN, + } + old_handlers = {sig: signal.signal(sig, val) + for (sig, val) in handlers.items()} + + # calling os.write() in the Python signal handler is racy + signal.set_wakeup_fd(sig_w) + + # map child pids to client fds + pid_to_fd = {} + + with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ + selectors.DefaultSelector() as selector: + _forkserver._forkserver_address = listener.getsockname() + + selector.register(listener, selectors.EVENT_READ) + selector.register(alive_r, selectors.EVENT_READ) + selector.register(sig_r, selectors.EVENT_READ) + + while True: + try: + while True: + rfds = [key.fileobj for (key, events) in selector.select()] + if rfds: + break + + if alive_r in rfds: + # EOF because no more client processes left + assert os.read(alive_r, 1) == b'', "Not at EOF?" + raise SystemExit + + if sig_r in rfds: + # Got SIGCHLD + os.read(sig_r, 65536) # exhaust + while True: + # Scan for child processes + try: + pid, sts = os.waitpid(-1, os.WNOHANG) + except ChildProcessError: + break + if pid == 0: + break + child_w = pid_to_fd.pop(pid, None) + if child_w is not None: + returncode = os.waitstatus_to_exitcode(sts) + + # Send exit code to client process + try: + write_signed(child_w, returncode) + except BrokenPipeError: + # client vanished + pass + os.close(child_w) + else: + # This shouldn't happen really + warnings.warn('forkserver: waitpid returned ' + 'unexpected pid %d' % pid) + + if listener in rfds: + # Incoming fork request + with listener.accept()[0] as s: + try: + if authkey: + wrapped_s = connection.Connection(s.fileno()) + # The other side of this exchange happens in + # in connect_to_new_process(). + try: + connection.deliver_challenge( + wrapped_s, authkey) + connection.answer_challenge( + wrapped_s, authkey) + finally: + wrapped_s._detach() + del wrapped_s + # Receive fds from client + fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) + except (EOFError, BrokenPipeError, AuthenticationError): + s.close() + continue + if len(fds) > MAXFDS_TO_SEND: + raise RuntimeError( + "Too many ({0:n}) fds to send".format( + len(fds))) + child_r, child_w, *fds = fds + s.close() + pid = os.fork() + if pid == 0: + # Child + code = 1 + try: + listener.close() + selector.close() + unused_fds = [alive_r, child_w, sig_r, sig_w] + unused_fds.extend(pid_to_fd.values()) + atexit._clear() + atexit.register(util._exit_function) + code = _serve_one(child_r, fds, + unused_fds, + old_handlers) + except Exception: + sys.excepthook(*sys.exc_info()) + sys.stderr.flush() + finally: + atexit._run_exitfuncs() + os._exit(code) + else: + # Send pid to client process + try: + write_signed(child_w, pid) + except BrokenPipeError: + # client vanished + pass + pid_to_fd[pid] = child_w + os.close(child_r) + for fd in fds: + os.close(fd) + + except OSError as e: + if e.errno != errno.ECONNABORTED: + raise + + +def _serve_one(child_r, fds, unused_fds, handlers): + # close unnecessary stuff and reset signal handlers + signal.set_wakeup_fd(-1) + for sig, val in handlers.items(): + signal.signal(sig, val) + for fd in unused_fds: + os.close(fd) + + (_forkserver._forkserver_alive_fd, + resource_tracker._resource_tracker._fd, + *_forkserver._inherited_fds) = fds + + # Run process object received over pipe + parent_sentinel = os.dup(child_r) + code = spawn._main(child_r, parent_sentinel) + + return code + + +# +# Read and write signed numbers +# + +def read_signed(fd): + data = bytearray(SIGNED_STRUCT.size) + unread = memoryview(data) + while unread: + count = os.readinto(fd, unread) + if count == 0: + raise EOFError('unexpected EOF') + unread = unread[count:] + + return SIGNED_STRUCT.unpack(data)[0] + +def write_signed(fd, n): + msg = SIGNED_STRUCT.pack(n) + while msg: + nbytes = os.write(fd, msg) + if nbytes == 0: + raise RuntimeError('should not get here') + msg = msg[nbytes:] + +# +# +# + +_forkserver = ForkServer() +ensure_running = _forkserver.ensure_running +get_inherited_fds = _forkserver.get_inherited_fds +connect_to_new_process = _forkserver.connect_to_new_process +set_forkserver_preload = _forkserver.set_forkserver_preload diff --git a/Python313_13_x64_Template/Lib/multiprocessing/heap.py b/Python314_4_x64_Template/Lib/multiprocessing/heap.py similarity index 100% rename from Python313_13_x64_Template/Lib/multiprocessing/heap.py rename to Python314_4_x64_Template/Lib/multiprocessing/heap.py diff --git a/Python314_4_x64_Template/Lib/multiprocessing/managers.py b/Python314_4_x64_Template/Lib/multiprocessing/managers.py new file mode 100644 index 00000000..91bcf243 --- /dev/null +++ b/Python314_4_x64_Template/Lib/multiprocessing/managers.py @@ -0,0 +1,1438 @@ +# +# Module providing manager classes for dealing +# with shared objects +# +# multiprocessing/managers.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] + +# +# Imports +# + +import sys +import threading +import signal +import array +import collections.abc +import queue +import time +import types +import os +from os import getpid + +from traceback import format_exc + +from . import connection +from .context import reduction, get_spawning_popen, ProcessError +from . import pool +from . import process +from . import util +from . import get_context +try: + from . import shared_memory +except ImportError: + HAS_SHMEM = False +else: + HAS_SHMEM = True + __all__.append('SharedMemoryManager') + +# +# Register some things for pickling +# + +def reduce_array(a): + return array.array, (a.typecode, a.tobytes()) +reduction.register(array.array, reduce_array) + +view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] +def rebuild_as_list(obj): + return list, (list(obj),) +for view_type in view_types: + reduction.register(view_type, rebuild_as_list) +del view_type, view_types + +# +# Type for identifying shared objects +# + +class Token(object): + ''' + Type to uniquely identify a shared object + ''' + __slots__ = ('typeid', 'address', 'id') + + def __init__(self, typeid, address, id): + (self.typeid, self.address, self.id) = (typeid, address, id) + + def __getstate__(self): + return (self.typeid, self.address, self.id) + + def __setstate__(self, state): + (self.typeid, self.address, self.id) = state + + def __repr__(self): + return '%s(typeid=%r, address=%r, id=%r)' % \ + (self.__class__.__name__, self.typeid, self.address, self.id) + +# +# Function for communication with a manager's server process +# + +def dispatch(c, id, methodname, args=(), kwds={}): + ''' + Send a message to manager using connection `c` and return response + ''' + c.send((id, methodname, args, kwds)) + kind, result = c.recv() + if kind == '#RETURN': + return result + try: + raise convert_to_error(kind, result) + finally: + del result # break reference cycle + +def convert_to_error(kind, result): + if kind == '#ERROR': + return result + elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): + if not isinstance(result, str): + raise TypeError( + "Result {0!r} (kind '{1}') type is {2}, not str".format( + result, kind, type(result))) + if kind == '#UNSERIALIZABLE': + return RemoteError('Unserializable message: %s\n' % result) + else: + return RemoteError(result) + else: + return ValueError('Unrecognized message type {!r}'.format(kind)) + +class RemoteError(Exception): + def __str__(self): + return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) + +# +# Functions for finding the method names of an object +# + +def all_methods(obj): + ''' + Return a list of names of methods of `obj` + ''' + temp = [] + for name in dir(obj): + func = getattr(obj, name) + if callable(func): + temp.append(name) + return temp + +def public_methods(obj): + ''' + Return a list of names of methods of `obj` which do not start with '_' + ''' + return [name for name in all_methods(obj) if name[0] != '_'] + +# +# Server which is run in a process controlled by a manager +# + +class Server(object): + ''' + Server class which runs in a process controlled by a manager object + ''' + public = ['shutdown', 'create', 'accept_connection', 'get_methods', + 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] + + def __init__(self, registry, address, authkey, serializer): + if not isinstance(authkey, bytes): + raise TypeError( + "Authkey {0!r} is type {1!s}, not bytes".format( + authkey, type(authkey))) + self.registry = registry + self.authkey = process.AuthenticationString(authkey) + Listener, Client = listener_client[serializer] + + # do authentication later + self.listener = Listener(address=address, backlog=128) + self.address = self.listener.address + + self.id_to_obj = {'0': (None, ())} + self.id_to_refcount = {} + self.id_to_local_proxy_obj = {} + self.mutex = threading.Lock() + + def serve_forever(self): + ''' + Run the server forever + ''' + self.stop_event = threading.Event() + process.current_process()._manager_server = self + try: + accepter = threading.Thread(target=self.accepter) + accepter.daemon = True + accepter.start() + try: + while not self.stop_event.is_set(): + self.stop_event.wait(1) + except (KeyboardInterrupt, SystemExit): + pass + finally: + if sys.stdout != sys.__stdout__: # what about stderr? + util.debug('resetting stdout, stderr') + sys.stdout = sys.__stdout__ + sys.stderr = sys.__stderr__ + sys.exit(0) + + def accepter(self): + while True: + try: + c = self.listener.accept() + except OSError: + continue + t = threading.Thread(target=self.handle_request, args=(c,)) + t.daemon = True + t.start() + + def _handle_request(self, c): + request = None + try: + connection.deliver_challenge(c, self.authkey) + connection.answer_challenge(c, self.authkey) + request = c.recv() + ignore, funcname, args, kwds = request + assert funcname in self.public, '%r unrecognized' % funcname + func = getattr(self, funcname) + except Exception: + msg = ('#TRACEBACK', format_exc()) + else: + try: + result = func(c, *args, **kwds) + except Exception: + msg = ('#TRACEBACK', format_exc()) + else: + msg = ('#RETURN', result) + + try: + c.send(msg) + except Exception as e: + try: + c.send(('#TRACEBACK', format_exc())) + except Exception: + pass + util.info('Failure to send message: %r', msg) + util.info(' ... request was %r', request) + util.info(' ... exception was %r', e) + + def handle_request(self, conn): + ''' + Handle a new connection + ''' + try: + self._handle_request(conn) + except SystemExit: + # Server.serve_client() calls sys.exit(0) on EOF + pass + finally: + conn.close() + + def serve_client(self, conn): + ''' + Handle requests from the proxies in a particular process/thread + ''' + util.debug('starting server thread to service %r', + threading.current_thread().name) + + recv = conn.recv + send = conn.send + id_to_obj = self.id_to_obj + + while not self.stop_event.is_set(): + + try: + methodname = obj = None + request = recv() + ident, methodname, args, kwds = request + try: + obj, exposed, gettypeid = id_to_obj[ident] + except KeyError as ke: + try: + obj, exposed, gettypeid = \ + self.id_to_local_proxy_obj[ident] + except KeyError: + raise ke + + if methodname not in exposed: + raise AttributeError( + 'method %r of %r object is not in exposed=%r' % + (methodname, type(obj), exposed) + ) + + function = getattr(obj, methodname) + + try: + res = function(*args, **kwds) + except Exception as e: + msg = ('#ERROR', e) + else: + typeid = gettypeid and gettypeid.get(methodname, None) + if typeid: + rident, rexposed = self.create(conn, typeid, res) + token = Token(typeid, self.address, rident) + msg = ('#PROXY', (rexposed, token)) + else: + msg = ('#RETURN', res) + + except AttributeError: + if methodname is None: + msg = ('#TRACEBACK', format_exc()) + else: + try: + fallback_func = self.fallback_mapping[methodname] + result = fallback_func( + self, conn, ident, obj, *args, **kwds + ) + msg = ('#RETURN', result) + except Exception: + msg = ('#TRACEBACK', format_exc()) + + except EOFError: + util.debug('got EOF -- exiting thread serving %r', + threading.current_thread().name) + sys.exit(0) + + except Exception: + msg = ('#TRACEBACK', format_exc()) + + try: + try: + send(msg) + except Exception: + send(('#UNSERIALIZABLE', format_exc())) + except Exception as e: + util.info('exception in thread serving %r', + threading.current_thread().name) + util.info(' ... message was %r', msg) + util.info(' ... exception was %r', e) + conn.close() + sys.exit(1) + + def fallback_getvalue(self, conn, ident, obj): + return obj + + def fallback_str(self, conn, ident, obj): + return str(obj) + + def fallback_repr(self, conn, ident, obj): + return repr(obj) + + fallback_mapping = { + '__str__':fallback_str, + '__repr__':fallback_repr, + '#GETVALUE':fallback_getvalue + } + + def dummy(self, c): + pass + + def debug_info(self, c): + ''' + Return some info --- useful to spot problems with refcounting + ''' + # Perhaps include debug info about 'c'? + with self.mutex: + result = [] + keys = list(self.id_to_refcount.keys()) + keys.sort() + for ident in keys: + if ident != '0': + result.append(' %s: refcount=%s\n %s' % + (ident, self.id_to_refcount[ident], + str(self.id_to_obj[ident][0])[:75])) + return '\n'.join(result) + + def number_of_objects(self, c): + ''' + Number of shared objects + ''' + # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' + return len(self.id_to_refcount) + + def shutdown(self, c): + ''' + Shutdown this process + ''' + try: + util.debug('manager received shutdown message') + c.send(('#RETURN', None)) + except: + import traceback + traceback.print_exc() + finally: + self.stop_event.set() + + def create(self, c, typeid, /, *args, **kwds): + ''' + Create a new shared object and return its id + ''' + with self.mutex: + callable, exposed, method_to_typeid, proxytype = \ + self.registry[typeid] + + if callable is None: + if kwds or (len(args) != 1): + raise ValueError( + "Without callable, must have one non-keyword argument") + obj = args[0] + else: + obj = callable(*args, **kwds) + + if exposed is None: + exposed = public_methods(obj) + if method_to_typeid is not None: + if not isinstance(method_to_typeid, dict): + raise TypeError( + "Method_to_typeid {0!r}: type {1!s}, not dict".format( + method_to_typeid, type(method_to_typeid))) + exposed = list(exposed) + list(method_to_typeid) + + ident = '%x' % id(obj) # convert to string because xmlrpclib + # only has 32 bit signed integers + util.debug('%r callable returned object with id %r', typeid, ident) + + self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) + if ident not in self.id_to_refcount: + self.id_to_refcount[ident] = 0 + + self.incref(c, ident) + return ident, tuple(exposed) + + def get_methods(self, c, token): + ''' + Return the methods of the shared object indicated by token + ''' + return tuple(self.id_to_obj[token.id][1]) + + def accept_connection(self, c, name): + ''' + Spawn a new thread to serve this connection + ''' + threading.current_thread().name = name + c.send(('#RETURN', None)) + self.serve_client(c) + + def incref(self, c, ident): + with self.mutex: + try: + self.id_to_refcount[ident] += 1 + except KeyError as ke: + # If no external references exist but an internal (to the + # manager) still does and a new external reference is created + # from it, restore the manager's tracking of it from the + # previously stashed internal ref. + if ident in self.id_to_local_proxy_obj: + self.id_to_refcount[ident] = 1 + self.id_to_obj[ident] = \ + self.id_to_local_proxy_obj[ident] + util.debug('Server re-enabled tracking & INCREF %r', ident) + else: + raise ke + + def decref(self, c, ident): + if ident not in self.id_to_refcount and \ + ident in self.id_to_local_proxy_obj: + util.debug('Server DECREF skipping %r', ident) + return + + with self.mutex: + if self.id_to_refcount[ident] <= 0: + raise AssertionError( + "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( + ident, self.id_to_obj[ident], + self.id_to_refcount[ident])) + self.id_to_refcount[ident] -= 1 + if self.id_to_refcount[ident] == 0: + del self.id_to_refcount[ident] + + if ident not in self.id_to_refcount: + # Two-step process in case the object turns out to contain other + # proxy objects (e.g. a managed list of managed lists). + # Otherwise, deleting self.id_to_obj[ident] would trigger the + # deleting of the stored value (another managed object) which would + # in turn attempt to acquire the mutex that is already held here. + self.id_to_obj[ident] = (None, (), None) # thread-safe + util.debug('disposing of obj with id %r', ident) + with self.mutex: + del self.id_to_obj[ident] + + +# +# Class to represent state of a manager +# + +class State(object): + __slots__ = ['value'] + INITIAL = 0 + STARTED = 1 + SHUTDOWN = 2 + +# +# Mapping from serializer name to Listener and Client types +# + +listener_client = { + 'pickle' : (connection.Listener, connection.Client), + 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) + } + +# +# Definition of BaseManager +# + +class BaseManager(object): + ''' + Base class for managers + ''' + _registry = {} + _Server = Server + + def __init__(self, address=None, authkey=None, serializer='pickle', + ctx=None, *, shutdown_timeout=1.0): + if authkey is None: + authkey = process.current_process().authkey + self._address = address # XXX not final address if eg ('', 0) + self._authkey = process.AuthenticationString(authkey) + self._state = State() + self._state.value = State.INITIAL + self._serializer = serializer + self._Listener, self._Client = listener_client[serializer] + self._ctx = ctx or get_context() + self._shutdown_timeout = shutdown_timeout + + def get_server(self): + ''' + Return server object with serve_forever() method and address attribute + ''' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return Server(self._registry, self._address, + self._authkey, self._serializer) + + def connect(self): + ''' + Connect manager object to the server process + ''' + Listener, Client = listener_client[self._serializer] + conn = Client(self._address, authkey=self._authkey) + dispatch(conn, None, 'dummy') + self._state.value = State.STARTED + + def start(self, initializer=None, initargs=()): + ''' + Spawn a server process for this manager object + ''' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + + if initializer is not None and not callable(initializer): + raise TypeError('initializer must be a callable') + + # pipe over which we will retrieve address of server + reader, writer = connection.Pipe(duplex=False) + + # spawn process which runs a server + self._process = self._ctx.Process( + target=type(self)._run_server, + args=(self._registry, self._address, self._authkey, + self._serializer, writer, initializer, initargs), + ) + ident = ':'.join(str(i) for i in self._process._identity) + self._process.name = type(self).__name__ + '-' + ident + self._process.start() + + # get address of server + writer.close() + self._address = reader.recv() + reader.close() + + # register a finalizer + self._state.value = State.STARTED + self.shutdown = util.Finalize( + self, type(self)._finalize_manager, + args=(self._process, self._address, self._authkey, self._state, + self._Client, self._shutdown_timeout), + exitpriority=0 + ) + + @classmethod + def _run_server(cls, registry, address, authkey, serializer, writer, + initializer=None, initargs=()): + ''' + Create a server, report its address and run it + ''' + # bpo-36368: protect server process from KeyboardInterrupt signals + signal.signal(signal.SIGINT, signal.SIG_IGN) + + if initializer is not None: + initializer(*initargs) + + # create server + server = cls._Server(registry, address, authkey, serializer) + + # inform parent process of the server's address + writer.send(server.address) + writer.close() + + # run the manager + util.info('manager serving at %r', server.address) + server.serve_forever() + + def _create(self, typeid, /, *args, **kwds): + ''' + Create a new shared object; return the token and exposed tuple + ''' + assert self._state.value == State.STARTED, 'server not yet started' + conn = self._Client(self._address, authkey=self._authkey) + try: + id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) + finally: + conn.close() + return Token(typeid, self._address, id), exposed + + def join(self, timeout=None): + ''' + Join the manager process (if it has been spawned) + ''' + if self._process is not None: + self._process.join(timeout) + if not self._process.is_alive(): + self._process = None + + def _debug_info(self): + ''' + Return some info about the servers shared objects and connections + ''' + conn = self._Client(self._address, authkey=self._authkey) + try: + return dispatch(conn, None, 'debug_info') + finally: + conn.close() + + def _number_of_objects(self): + ''' + Return the number of shared objects + ''' + conn = self._Client(self._address, authkey=self._authkey) + try: + return dispatch(conn, None, 'number_of_objects') + finally: + conn.close() + + def __enter__(self): + if self._state.value == State.INITIAL: + self.start() + if self._state.value != State.STARTED: + if self._state.value == State.INITIAL: + raise ProcessError("Unable to start server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown() + + @staticmethod + def _finalize_manager(process, address, authkey, state, _Client, + shutdown_timeout): + ''' + Shutdown the manager process; will be registered as a finalizer + ''' + if process.is_alive(): + util.info('sending shutdown message to manager') + try: + conn = _Client(address, authkey=authkey) + try: + dispatch(conn, None, 'shutdown') + finally: + conn.close() + except Exception: + pass + + process.join(timeout=shutdown_timeout) + if process.is_alive(): + util.info('manager still alive') + if hasattr(process, 'terminate'): + util.info('trying to `terminate()` manager process') + process.terminate() + process.join(timeout=shutdown_timeout) + if process.is_alive(): + util.info('manager still alive after terminate') + process.kill() + process.join() + + state.value = State.SHUTDOWN + try: + del BaseProxy._address_to_local[address] + except KeyError: + pass + + @property + def address(self): + return self._address + + @classmethod + def register(cls, typeid, callable=None, proxytype=None, exposed=None, + method_to_typeid=None, create_method=True): + ''' + Register a typeid with the manager type + ''' + if '_registry' not in cls.__dict__: + cls._registry = cls._registry.copy() + + if proxytype is None: + proxytype = AutoProxy + + exposed = exposed or getattr(proxytype, '_exposed_', None) + + method_to_typeid = method_to_typeid or \ + getattr(proxytype, '_method_to_typeid_', None) + + if method_to_typeid: + for key, value in list(method_to_typeid.items()): # isinstance? + assert type(key) is str, '%r is not a string' % key + assert type(value) is str, '%r is not a string' % value + + cls._registry[typeid] = ( + callable, exposed, method_to_typeid, proxytype + ) + + if create_method: + def temp(self, /, *args, **kwds): + util.debug('requesting creation of a shared %r object', typeid) + token, exp = self._create(typeid, *args, **kwds) + proxy = proxytype( + token, self._serializer, manager=self, + authkey=self._authkey, exposed=exp + ) + conn = self._Client(token.address, authkey=self._authkey) + dispatch(conn, None, 'decref', (token.id,)) + return proxy + temp.__name__ = typeid + setattr(cls, typeid, temp) + +# +# Subclass of set which get cleared after a fork +# + +class ProcessLocalSet(set): + def __init__(self): + util.register_after_fork(self, lambda obj: obj.clear()) + def __reduce__(self): + return type(self), () + +# +# Definition of BaseProxy +# + +class BaseProxy(object): + ''' + A base for proxies of shared objects + ''' + _address_to_local = {} + _mutex = util.ForkAwareThreadLock() + + # Each instance gets a `_serial` number. Unlike `id(...)`, this number + # is never reused. + _next_serial = 1 + + def __init__(self, token, serializer, manager=None, + authkey=None, exposed=None, incref=True, manager_owned=False): + with BaseProxy._mutex: + tls_serials = BaseProxy._address_to_local.get(token.address, None) + if tls_serials is None: + tls_serials = util.ForkAwareLocal(), ProcessLocalSet() + BaseProxy._address_to_local[token.address] = tls_serials + + self._serial = BaseProxy._next_serial + BaseProxy._next_serial += 1 + + # self._tls is used to record the connection used by this + # thread to communicate with the manager at token.address + self._tls = tls_serials[0] + + # self._all_serials is a set used to record the identities of all + # shared objects for which the current process owns references and + # which are in the manager at token.address + self._all_serials = tls_serials[1] + + self._token = token + self._id = self._token.id + self._manager = manager + self._serializer = serializer + self._Client = listener_client[serializer][1] + + # Should be set to True only when a proxy object is being created + # on the manager server; primary use case: nested proxy objects. + # RebuildProxy detects when a proxy is being created on the manager + # and sets this value appropriately. + self._owned_by_manager = manager_owned + + if authkey is not None: + self._authkey = process.AuthenticationString(authkey) + elif self._manager is not None: + self._authkey = self._manager._authkey + else: + self._authkey = process.current_process().authkey + + if incref: + self._incref() + + util.register_after_fork(self, BaseProxy._after_fork) + + def _connect(self): + util.debug('making connection to manager') + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + conn = self._Client(self._token.address, authkey=self._authkey) + dispatch(conn, None, 'accept_connection', (name,)) + self._tls.connection = conn + + def _callmethod(self, methodname, args=(), kwds={}): + ''' + Try to call a method of the referent and return a copy of the result + ''' + try: + conn = self._tls.connection + except AttributeError: + util.debug('thread %r does not own a connection', + threading.current_thread().name) + self._connect() + conn = self._tls.connection + + conn.send((self._id, methodname, args, kwds)) + kind, result = conn.recv() + + if kind == '#RETURN': + return result + elif kind == '#PROXY': + exposed, token = result + proxytype = self._manager._registry[token.typeid][-1] + token.address = self._token.address + proxy = proxytype( + token, self._serializer, manager=self._manager, + authkey=self._authkey, exposed=exposed + ) + conn = self._Client(token.address, authkey=self._authkey) + dispatch(conn, None, 'decref', (token.id,)) + return proxy + try: + raise convert_to_error(kind, result) + finally: + del result # break reference cycle + + def _getvalue(self): + ''' + Get a copy of the value of the referent + ''' + return self._callmethod('#GETVALUE') + + def _incref(self): + if self._owned_by_manager: + util.debug('owned_by_manager skipped INCREF of %r', self._token.id) + return + + conn = self._Client(self._token.address, authkey=self._authkey) + dispatch(conn, None, 'incref', (self._id,)) + util.debug('INCREF %r', self._token.id) + + self._all_serials.add(self._serial) + + state = self._manager and self._manager._state + + self._close = util.Finalize( + self, BaseProxy._decref, + args=(self._token, self._serial, self._authkey, state, + self._tls, self._all_serials, self._Client), + exitpriority=10 + ) + + @staticmethod + def _decref(token, serial, authkey, state, tls, idset, _Client): + idset.discard(serial) + + # check whether manager is still alive + if state is None or state.value == State.STARTED: + # tell manager this process no longer cares about referent + try: + util.debug('DECREF %r', token.id) + conn = _Client(token.address, authkey=authkey) + dispatch(conn, None, 'decref', (token.id,)) + except Exception as e: + util.debug('... decref failed %s', e) + + else: + util.debug('DECREF %r -- manager already shutdown', token.id) + + # check whether we can close this thread's connection because + # the process owns no more references to objects for this manager + if not idset and hasattr(tls, 'connection'): + util.debug('thread %r has no more proxies so closing conn', + threading.current_thread().name) + tls.connection.close() + del tls.connection + + def _after_fork(self): + self._manager = None + try: + self._incref() + except Exception as e: + # the proxy may just be for a manager which has shutdown + util.info('incref failed: %s' % e) + + def __reduce__(self): + kwds = {} + if get_spawning_popen() is not None: + kwds['authkey'] = self._authkey + + if getattr(self, '_isauto', False): + kwds['exposed'] = self._exposed_ + return (RebuildProxy, + (AutoProxy, self._token, self._serializer, kwds)) + else: + return (RebuildProxy, + (type(self), self._token, self._serializer, kwds)) + + def __deepcopy__(self, memo): + return self._getvalue() + + def __repr__(self): + return '<%s object, typeid %r at %#x>' % \ + (type(self).__name__, self._token.typeid, id(self)) + + def __str__(self): + ''' + Return representation of the referent (or a fall-back if that fails) + ''' + try: + return self._callmethod('__repr__') + except Exception: + return repr(self)[:-1] + "; '__str__()' failed>" + +# +# Function used for unpickling +# + +def RebuildProxy(func, token, serializer, kwds): + ''' + Function used for unpickling proxy objects. + ''' + server = getattr(process.current_process(), '_manager_server', None) + if server and server.address == token.address: + util.debug('Rebuild a proxy owned by manager, token=%r', token) + kwds['manager_owned'] = True + if token.id not in server.id_to_local_proxy_obj: + server.id_to_local_proxy_obj[token.id] = \ + server.id_to_obj[token.id] + incref = ( + kwds.pop('incref', True) and + not getattr(process.current_process(), '_inheriting', False) + ) + return func(token, serializer, incref=incref, **kwds) + +# +# Functions to create proxies and proxy types +# + +def MakeProxyType(name, exposed, _cache={}): + ''' + Return a proxy type whose methods are given by `exposed` + ''' + exposed = tuple(exposed) + try: + return _cache[(name, exposed)] + except KeyError: + pass + + dic = {} + + for meth in exposed: + exec('''def %s(self, /, *args, **kwds): + return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) + + ProxyType = type(name, (BaseProxy,), dic) + ProxyType._exposed_ = exposed + _cache[(name, exposed)] = ProxyType + return ProxyType + + +def AutoProxy(token, serializer, manager=None, authkey=None, + exposed=None, incref=True, manager_owned=False): + ''' + Return an auto-proxy for `token` + ''' + _Client = listener_client[serializer][1] + + if exposed is None: + conn = _Client(token.address, authkey=authkey) + try: + exposed = dispatch(conn, None, 'get_methods', (token,)) + finally: + conn.close() + + if authkey is None and manager is not None: + authkey = manager._authkey + if authkey is None: + authkey = process.current_process().authkey + + ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) + proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, + incref=incref, manager_owned=manager_owned) + proxy._isauto = True + return proxy + +# +# Types/callables which we will register with SyncManager +# + +class Namespace(object): + def __init__(self, /, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + def get(self): + return self._value + def set(self, value): + self._value = value + def __repr__(self): + return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) + value = property(get, set) + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +# +# Proxy types used by SyncManager +# + +class IteratorProxy(BaseProxy): + _exposed_ = ('__next__', 'send', 'throw', 'close') + def __iter__(self): + return self + def __next__(self, *args): + return self._callmethod('__next__', args) + def send(self, *args): + return self._callmethod('send', args) + def throw(self, *args): + return self._callmethod('throw', args) + def close(self, *args): + return self._callmethod('close', args) + + +class AcquirerProxy(BaseProxy): + _exposed_ = ('acquire', 'release', 'locked') + def acquire(self, blocking=True, timeout=None): + args = (blocking,) if timeout is None else (blocking, timeout) + return self._callmethod('acquire', args) + def release(self): + return self._callmethod('release') + def locked(self): + return self._callmethod('locked') + def __enter__(self): + return self._callmethod('acquire') + def __exit__(self, exc_type, exc_val, exc_tb): + return self._callmethod('release') + + +class ConditionProxy(AcquirerProxy): + _exposed_ = ('acquire', 'release', 'locked', 'wait', 'notify', 'notify_all') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + def notify(self, n=1): + return self._callmethod('notify', (n,)) + def notify_all(self): + return self._callmethod('notify_all') + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = time.monotonic() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - time.monotonic() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + + +class EventProxy(BaseProxy): + _exposed_ = ('is_set', 'set', 'clear', 'wait') + def is_set(self): + return self._callmethod('is_set') + def set(self): + return self._callmethod('set') + def clear(self): + return self._callmethod('clear') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + + +class BarrierProxy(BaseProxy): + _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + def abort(self): + return self._callmethod('abort') + def reset(self): + return self._callmethod('reset') + @property + def parties(self): + return self._callmethod('__getattribute__', ('parties',)) + @property + def n_waiting(self): + return self._callmethod('__getattribute__', ('n_waiting',)) + @property + def broken(self): + return self._callmethod('__getattribute__', ('broken',)) + + +class NamespaceProxy(BaseProxy): + _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') + def __getattr__(self, key): + if key[0] == '_': + return object.__getattribute__(self, key) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__getattribute__', (key,)) + def __setattr__(self, key, value): + if key[0] == '_': + return object.__setattr__(self, key, value) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__setattr__', (key, value)) + def __delattr__(self, key): + if key[0] == '_': + return object.__delattr__(self, key) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__delattr__', (key,)) + + +class ValueProxy(BaseProxy): + _exposed_ = ('get', 'set') + def get(self): + return self._callmethod('get') + def set(self, value): + return self._callmethod('set', (value,)) + value = property(get, set) + + __class_getitem__ = classmethod(types.GenericAlias) + + +BaseListProxy = MakeProxyType('BaseListProxy', ( + '__add__', '__contains__', '__delitem__', '__getitem__', '__imul__', + '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', + 'append', 'clear', 'copy', 'count', 'extend', 'index', 'insert', 'pop', + 'remove', 'reverse', 'sort', + )) +class ListProxy(BaseListProxy): + def __iadd__(self, value): + self._callmethod('extend', (value,)) + return self + def __imul__(self, value): + self._callmethod('__imul__', (value,)) + return self + + __class_getitem__ = classmethod(types.GenericAlias) + +collections.abc.MutableSequence.register(BaseListProxy) + +_BaseDictProxy = MakeProxyType('_BaseDictProxy', ( + '__contains__', '__delitem__', '__getitem__', '__ior__', '__iter__', + '__len__', '__or__', '__reversed__', '__ror__', + '__setitem__', 'clear', 'copy', 'fromkeys', 'get', 'items', + 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' + )) +_BaseDictProxy._method_to_typeid_ = { + '__iter__': 'Iterator', + } +class DictProxy(_BaseDictProxy): + def __ior__(self, value): + self._callmethod('__ior__', (value,)) + return self + + __class_getitem__ = classmethod(types.GenericAlias) + +collections.abc.MutableMapping.register(_BaseDictProxy) + +_BaseSetProxy = MakeProxyType("_BaseSetProxy", ( + '__and__', '__class_getitem__', '__contains__', '__iand__', '__ior__', + '__isub__', '__iter__', '__ixor__', '__len__', '__or__', '__rand__', + '__ror__', '__rsub__', '__rxor__', '__sub__', '__xor__', + '__ge__', '__gt__', '__le__', '__lt__', + 'add', 'clear', 'copy', 'difference', 'difference_update', 'discard', + 'intersection', 'intersection_update', 'isdisjoint', 'issubset', + 'issuperset', 'pop', 'remove', 'symmetric_difference', + 'symmetric_difference_update', 'union', 'update', +)) + +class SetProxy(_BaseSetProxy): + def __ior__(self, value): + self._callmethod('__ior__', (value,)) + return self + def __iand__(self, value): + self._callmethod('__iand__', (value,)) + return self + def __ixor__(self, value): + self._callmethod('__ixor__', (value,)) + return self + def __isub__(self, value): + self._callmethod('__isub__', (value,)) + return self + + __class_getitem__ = classmethod(types.GenericAlias) + +collections.abc.MutableMapping.register(_BaseSetProxy) + + +ArrayProxy = MakeProxyType('ArrayProxy', ( + '__len__', '__getitem__', '__setitem__' + )) + + +BasePoolProxy = MakeProxyType('PoolProxy', ( + 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', + 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', + )) +BasePoolProxy._method_to_typeid_ = { + 'apply_async': 'AsyncResult', + 'map_async': 'AsyncResult', + 'starmap_async': 'AsyncResult', + 'imap': 'Iterator', + 'imap_unordered': 'Iterator' + } +class PoolProxy(BasePoolProxy): + def __enter__(self): + return self + def __exit__(self, exc_type, exc_val, exc_tb): + self.terminate() + +# +# Definition of SyncManager +# + +class SyncManager(BaseManager): + ''' + Subclass of `BaseManager` which supports a number of shared object types. + + The types registered are those intended for the synchronization + of threads, plus `dict`, `list` and `Namespace`. + + The `multiprocessing.Manager()` function creates started instances of + this class. + ''' + +SyncManager.register('Queue', queue.Queue) +SyncManager.register('JoinableQueue', queue.Queue) +SyncManager.register('Event', threading.Event, EventProxy) +SyncManager.register('Lock', threading.Lock, AcquirerProxy) +SyncManager.register('RLock', threading.RLock, AcquirerProxy) +SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) +SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, + AcquirerProxy) +SyncManager.register('Condition', threading.Condition, ConditionProxy) +SyncManager.register('Barrier', threading.Barrier, BarrierProxy) +SyncManager.register('Pool', pool.Pool, PoolProxy) +SyncManager.register('list', list, ListProxy) +SyncManager.register('dict', dict, DictProxy) +SyncManager.register('set', set, SetProxy) +SyncManager.register('Value', Value, ValueProxy) +SyncManager.register('Array', Array, ArrayProxy) +SyncManager.register('Namespace', Namespace, NamespaceProxy) + +# types returned by methods of PoolProxy +SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) +SyncManager.register('AsyncResult', create_method=False) + +# +# Definition of SharedMemoryManager and SharedMemoryServer +# + +if HAS_SHMEM: + class _SharedMemoryTracker: + "Manages one or more shared memory segments." + + def __init__(self, name, segment_names=[]): + self.shared_memory_context_name = name + self.segment_names = segment_names + + def register_segment(self, segment_name): + "Adds the supplied shared memory block name to tracker." + util.debug(f"Register segment {segment_name!r} in pid {getpid()}") + self.segment_names.append(segment_name) + + def destroy_segment(self, segment_name): + """Calls unlink() on the shared memory block with the supplied name + and removes it from the list of blocks being tracked.""" + util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") + self.segment_names.remove(segment_name) + segment = shared_memory.SharedMemory(segment_name) + segment.close() + segment.unlink() + + def unlink(self): + "Calls destroy_segment() on all tracked shared memory blocks." + for segment_name in self.segment_names[:]: + self.destroy_segment(segment_name) + + def __del__(self): + util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") + self.unlink() + + def __getstate__(self): + return (self.shared_memory_context_name, self.segment_names) + + def __setstate__(self, state): + self.__init__(*state) + + + class SharedMemoryServer(Server): + + public = Server.public + \ + ['track_segment', 'release_segment', 'list_segments'] + + def __init__(self, *args, **kwargs): + Server.__init__(self, *args, **kwargs) + address = self.address + # The address of Linux abstract namespaces can be bytes + if isinstance(address, bytes): + address = os.fsdecode(address) + self.shared_memory_context = \ + _SharedMemoryTracker(f"shm_{address}_{getpid()}") + util.debug(f"SharedMemoryServer started by pid {getpid()}") + + def create(self, c, typeid, /, *args, **kwargs): + """Create a new distributed-shared object (not backed by a shared + memory block) and return its id to be used in a Proxy Object.""" + # Unless set up as a shared proxy, don't make shared_memory_context + # a standard part of kwargs. This makes things easier for supplying + # simple functions. + if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): + kwargs['shared_memory_context'] = self.shared_memory_context + return Server.create(self, c, typeid, *args, **kwargs) + + def shutdown(self, c): + "Call unlink() on all tracked shared memory, terminate the Server." + self.shared_memory_context.unlink() + return Server.shutdown(self, c) + + def track_segment(self, c, segment_name): + "Adds the supplied shared memory block name to Server's tracker." + self.shared_memory_context.register_segment(segment_name) + + def release_segment(self, c, segment_name): + """Calls unlink() on the shared memory block with the supplied name + and removes it from the tracker instance inside the Server.""" + self.shared_memory_context.destroy_segment(segment_name) + + def list_segments(self, c): + """Returns a list of names of shared memory blocks that the Server + is currently tracking.""" + return self.shared_memory_context.segment_names + + + class SharedMemoryManager(BaseManager): + """Like SyncManager but uses SharedMemoryServer instead of Server. + + It provides methods for creating and returning SharedMemory instances + and for creating a list-like object (ShareableList) backed by shared + memory. It also provides methods that create and return Proxy Objects + that support synchronization across processes (i.e. multi-process-safe + locks and semaphores). + """ + + _Server = SharedMemoryServer + + def __init__(self, *args, **kwargs): + if os.name == "posix": + # bpo-36867: Ensure the resource_tracker is running before + # launching the manager process, so that concurrent + # shared_memory manipulation both in the manager and in the + # current process does not create two resource_tracker + # processes. + from . import resource_tracker + resource_tracker.ensure_running() + BaseManager.__init__(self, *args, **kwargs) + util.debug(f"{self.__class__.__name__} created by pid {getpid()}") + + def __del__(self): + util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") + + def get_server(self): + 'Better than monkeypatching for now; merge into Server ultimately' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started SharedMemoryServer") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("SharedMemoryManager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return self._Server(self._registry, self._address, + self._authkey, self._serializer) + + def SharedMemory(self, size): + """Returns a new SharedMemory instance with the specified size in + bytes, to be tracked by the manager.""" + with self._Client(self._address, authkey=self._authkey) as conn: + sms = shared_memory.SharedMemory(None, create=True, size=size) + try: + dispatch(conn, None, 'track_segment', (sms.name,)) + except BaseException as e: + sms.unlink() + raise e + return sms + + def ShareableList(self, sequence): + """Returns a new ShareableList instance populated with the values + from the input sequence, to be tracked by the manager.""" + with self._Client(self._address, authkey=self._authkey) as conn: + sl = shared_memory.ShareableList(sequence) + try: + dispatch(conn, None, 'track_segment', (sl.shm.name,)) + except BaseException as e: + sl.shm.unlink() + raise e + return sl diff --git a/Python313_13_x64_Template/Lib/multiprocessing/pool.py b/Python314_4_x64_Template/Lib/multiprocessing/pool.py similarity index 100% rename from Python313_13_x64_Template/Lib/multiprocessing/pool.py rename to Python314_4_x64_Template/Lib/multiprocessing/pool.py diff --git a/Python314_4_x64_Template/Lib/multiprocessing/popen_fork.py b/Python314_4_x64_Template/Lib/multiprocessing/popen_fork.py new file mode 100644 index 00000000..7affa1b9 --- /dev/null +++ b/Python314_4_x64_Template/Lib/multiprocessing/popen_fork.py @@ -0,0 +1,90 @@ +import atexit +import os +import signal + +from . import util + +__all__ = ['Popen'] + +# +# Start child process using fork +# + +class Popen(object): + method = 'fork' + + def __init__(self, process_obj): + util._flush_std_streams() + self.returncode = None + self.finalizer = None + self._launch(process_obj) + + def duplicate_for_child(self, fd): + return fd + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + try: + pid, sts = os.waitpid(self.pid, flag) + except OSError: + # Child process not yet created. See #1731717 + # e.errno == errno.ECHILD == 10 + return None + if pid == self.pid: + self.returncode = os.waitstatus_to_exitcode(sts) + return self.returncode + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is not None: + from multiprocessing.connection import wait + if not wait([self.sentinel], timeout): + return None + # This shouldn't block if wait() returned successfully. + return self.poll(os.WNOHANG if timeout == 0.0 else 0) + return self.returncode + + def _send_signal(self, sig): + if self.returncode is None: + try: + os.kill(self.pid, sig) + except ProcessLookupError: + pass + except OSError: + if self.wait(timeout=0.1) is None: + raise + + def interrupt(self): + self._send_signal(signal.SIGINT) + + def terminate(self): + self._send_signal(signal.SIGTERM) + + def kill(self): + self._send_signal(signal.SIGKILL) + + def _launch(self, process_obj): + code = 1 + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + self.pid = os.fork() + if self.pid == 0: + try: + atexit._clear() + atexit.register(util._exit_function) + os.close(parent_r) + os.close(parent_w) + code = process_obj._bootstrap(parent_sentinel=child_r) + finally: + atexit._run_exitfuncs() + os._exit(code) + else: + os.close(child_w) + os.close(child_r) + self.finalizer = util.Finalize(self, util.close_fds, + (parent_r, parent_w,)) + self.sentinel = parent_r + + def close(self): + if self.finalizer is not None: + self.finalizer() diff --git a/Python313_13_x64_Template/Lib/multiprocessing/popen_forkserver.py b/Python314_4_x64_Template/Lib/multiprocessing/popen_forkserver.py similarity index 100% rename from Python313_13_x64_Template/Lib/multiprocessing/popen_forkserver.py rename to Python314_4_x64_Template/Lib/multiprocessing/popen_forkserver.py diff --git a/Python313_13_x64_Template/Lib/multiprocessing/popen_spawn_posix.py b/Python314_4_x64_Template/Lib/multiprocessing/popen_spawn_posix.py similarity index 100% rename from Python313_13_x64_Template/Lib/multiprocessing/popen_spawn_posix.py rename to Python314_4_x64_Template/Lib/multiprocessing/popen_spawn_posix.py diff --git a/Python313_13_x64_Template/Lib/multiprocessing/popen_spawn_win32.py b/Python314_4_x64_Template/Lib/multiprocessing/popen_spawn_win32.py similarity index 100% rename from Python313_13_x64_Template/Lib/multiprocessing/popen_spawn_win32.py rename to Python314_4_x64_Template/Lib/multiprocessing/popen_spawn_win32.py diff --git a/Python314_4_x64_Template/Lib/multiprocessing/process.py b/Python314_4_x64_Template/Lib/multiprocessing/process.py new file mode 100644 index 00000000..262513f2 --- /dev/null +++ b/Python314_4_x64_Template/Lib/multiprocessing/process.py @@ -0,0 +1,443 @@ +# +# Module providing the `Process` class which emulates `threading.Thread` +# +# multiprocessing/process.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['BaseProcess', 'current_process', 'active_children', + 'parent_process'] + +# +# Imports +# + +import os +import sys +import signal +import itertools +import threading +from _weakrefset import WeakSet + +# +# +# + +try: + ORIGINAL_DIR = os.path.abspath(os.getcwd()) +except OSError: + ORIGINAL_DIR = None + +# +# Public functions +# + +def current_process(): + ''' + Return process object representing the current process + ''' + return _current_process + +def active_children(): + ''' + Return list of process objects corresponding to live child processes + ''' + _cleanup() + return list(_children) + + +def parent_process(): + ''' + Return process object representing the parent process + ''' + return _parent_process + +# +# +# + +def _cleanup(): + # check for processes which have finished + for p in list(_children): + if (child_popen := p._popen) and child_popen.poll() is not None: + _children.discard(p) + +# +# The `Process` class +# + +class BaseProcess(object): + ''' + Process objects represent activity that is run in a separate process + + The class is analogous to `threading.Thread` + ''' + def _Popen(self): + raise NotImplementedError + + def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, + *, daemon=None): + assert group is None, 'group argument must be None for now' + count = next(_process_counter) + self._identity = _current_process._identity + (count,) + self._config = _current_process._config.copy() + self._parent_pid = os.getpid() + self._parent_name = _current_process.name + self._popen = None + self._closed = False + self._target = target + self._args = tuple(args) + self._kwargs = dict(kwargs) if kwargs else {} + self._name = name or type(self).__name__ + '-' + \ + ':'.join(str(i) for i in self._identity) + if daemon is not None: + self.daemon = daemon + _dangling.add(self) + + def _check_closed(self): + if self._closed: + raise ValueError("process object is closed") + + def run(self): + ''' + Method to be run in sub-process; can be overridden in sub-class + ''' + if self._target: + self._target(*self._args, **self._kwargs) + + def start(self): + ''' + Start child process + ''' + self._check_closed() + assert self._popen is None, 'cannot start a process twice' + assert self._parent_pid == os.getpid(), \ + 'can only start a process object created by current process' + assert not _current_process._config.get('daemon'), \ + 'daemonic processes are not allowed to have children' + _cleanup() + self._popen = self._Popen(self) + self._sentinel = self._popen.sentinel + # Avoid a refcycle if the target function holds an indirect + # reference to the process object (see bpo-30775) + del self._target, self._args, self._kwargs + _children.add(self) + + def interrupt(self): + ''' + Terminate process; sends SIGINT signal + ''' + self._check_closed() + self._popen.interrupt() + + def terminate(self): + ''' + Terminate process; sends SIGTERM signal or uses TerminateProcess() + ''' + self._check_closed() + self._popen.terminate() + + def kill(self): + ''' + Terminate process; sends SIGKILL signal or uses TerminateProcess() + ''' + self._check_closed() + self._popen.kill() + + def join(self, timeout=None): + ''' + Wait until child process terminates + ''' + self._check_closed() + assert self._parent_pid == os.getpid(), 'can only join a child process' + assert self._popen is not None, 'can only join a started process' + res = self._popen.wait(timeout) + if res is not None: + _children.discard(self) + + def is_alive(self): + ''' + Return whether process is alive + ''' + self._check_closed() + if self is _current_process: + return True + assert self._parent_pid == os.getpid(), 'can only test a child process' + + if self._popen is None: + return False + + returncode = self._popen.poll() + if returncode is None: + return True + else: + _children.discard(self) + return False + + def close(self): + ''' + Close the Process object. + + This method releases resources held by the Process object. It is + an error to call this method if the child process is still running. + ''' + if self._popen is not None: + if self._popen.poll() is None: + raise ValueError("Cannot close a process while it is still running. " + "You should first call join() or terminate().") + self._popen.close() + self._popen = None + del self._sentinel + _children.discard(self) + self._closed = True + + @property + def name(self): + return self._name + + @name.setter + def name(self, name): + assert isinstance(name, str), 'name must be a string' + self._name = name + + @property + def daemon(self): + ''' + Return whether process is a daemon + ''' + return self._config.get('daemon', False) + + @daemon.setter + def daemon(self, daemonic): + ''' + Set whether process is a daemon + ''' + assert self._popen is None, 'process has already started' + self._config['daemon'] = daemonic + + @property + def authkey(self): + return self._config['authkey'] + + @authkey.setter + def authkey(self, authkey): + ''' + Set authorization key of process + ''' + self._config['authkey'] = AuthenticationString(authkey) + + @property + def exitcode(self): + ''' + Return exit code of process or `None` if it has yet to stop + ''' + self._check_closed() + if self._popen is None: + return self._popen + return self._popen.poll() + + @property + def ident(self): + ''' + Return identifier (PID) of process or `None` if it has yet to start + ''' + self._check_closed() + if self is _current_process: + return os.getpid() + else: + return self._popen and self._popen.pid + + pid = ident + + @property + def sentinel(self): + ''' + Return a file descriptor (Unix) or handle (Windows) suitable for + waiting for process termination. + ''' + self._check_closed() + try: + return self._sentinel + except AttributeError: + raise ValueError("process not started") from None + + def __repr__(self): + exitcode = None + if self is _current_process: + status = 'started' + elif self._closed: + status = 'closed' + elif self._parent_pid != os.getpid(): + status = 'unknown' + elif self._popen is None: + status = 'initial' + else: + exitcode = self._popen.poll() + if exitcode is not None: + status = 'stopped' + else: + status = 'started' + + info = [type(self).__name__, 'name=%r' % self._name] + if self._popen is not None: + info.append('pid=%s' % self._popen.pid) + info.append('parent=%s' % self._parent_pid) + info.append(status) + if exitcode is not None: + exitcode = _exitcode_to_name.get(exitcode, exitcode) + info.append('exitcode=%s' % exitcode) + if self.daemon: + info.append('daemon') + return '<%s>' % ' '.join(info) + + ## + + def _bootstrap(self, parent_sentinel=None): + from . import util, context + global _current_process, _parent_process, _process_counter, _children + + try: + if self._start_method is not None: + context._force_start_method(self._start_method) + _process_counter = itertools.count(1) + _children = set() + util._close_stdin() + old_process = _current_process + _current_process = self + _parent_process = _ParentProcess( + self._parent_name, self._parent_pid, parent_sentinel) + if threading._HAVE_THREAD_NATIVE_ID: + threading.main_thread()._set_native_id() + try: + self._after_fork() + finally: + # delay finalization of the old process object until after + # _run_after_forkers() is executed + del old_process + util.info('child process calling self.run()') + self.run() + exitcode = 0 + except SystemExit as e: + if e.code is None: + exitcode = 0 + elif isinstance(e.code, int): + exitcode = e.code + else: + sys.stderr.write(str(e.code) + '\n') + exitcode = 1 + except: + exitcode = 1 + import traceback + sys.stderr.write('Process %s:\n' % self.name) + traceback.print_exc() + finally: + threading._shutdown() + util.info('process exiting with exitcode %d' % exitcode) + util._flush_std_streams() + + return exitcode + + @staticmethod + def _after_fork(): + from . import util + util._finalizer_registry.clear() + util._run_after_forkers() + + +# +# We subclass bytes to avoid accidental transmission of auth keys over network +# + +class AuthenticationString(bytes): + def __reduce__(self): + from .context import get_spawning_popen + if get_spawning_popen() is None: + raise TypeError( + 'Pickling an AuthenticationString object is ' + 'disallowed for security reasons' + ) + return AuthenticationString, (bytes(self),) + + +# +# Create object representing the parent process +# + +class _ParentProcess(BaseProcess): + + def __init__(self, name, pid, sentinel): + self._identity = () + self._name = name + self._pid = pid + self._parent_pid = None + self._popen = None + self._closed = False + self._sentinel = sentinel + self._config = {} + + def is_alive(self): + from multiprocessing.connection import wait + return not wait([self._sentinel], timeout=0) + + @property + def ident(self): + return self._pid + + def join(self, timeout=None): + ''' + Wait until parent process terminates + ''' + from multiprocessing.connection import wait + wait([self._sentinel], timeout=timeout) + + pid = ident + +# +# Create object representing the main process +# + +class _MainProcess(BaseProcess): + + def __init__(self): + self._identity = () + self._name = 'MainProcess' + self._parent_pid = None + self._popen = None + self._closed = False + self._config = {'authkey': AuthenticationString(os.urandom(32)), + 'semprefix': '/mp'} + # Note that some versions of FreeBSD only allow named + # semaphores to have names of up to 14 characters. Therefore + # we choose a short prefix. + # + # On MacOSX in a sandbox it may be necessary to use a + # different prefix -- see #19478. + # + # Everything in self._config will be inherited by descendant + # processes. + + def close(self): + pass + + +_parent_process = None +_current_process = _MainProcess() +_process_counter = itertools.count(1) +_children = set() +del _MainProcess + +# +# Give names to some return codes +# + +_exitcode_to_name = {} + +for name, signum in list(signal.__dict__.items()): + if name[:3]=='SIG' and '_' not in name: + _exitcode_to_name[-signum] = f'-{name}' +del name, signum + +# For debug and leak testing +_dangling = WeakSet() diff --git a/Python314_4_x64_Template/Lib/multiprocessing/queues.py b/Python314_4_x64_Template/Lib/multiprocessing/queues.py new file mode 100644 index 00000000..981599ac --- /dev/null +++ b/Python314_4_x64_Template/Lib/multiprocessing/queues.py @@ -0,0 +1,399 @@ +# +# Module implementing queues +# +# multiprocessing/queues.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] + +import sys +import os +import threading +import collections +import time +import types +import weakref +import errno + +from queue import Empty, Full + +from . import connection +from . import context +_ForkingPickler = context.reduction.ForkingPickler + +from .util import debug, info, Finalize, register_after_fork, is_exiting + +# +# Queue type using a pipe, buffer and thread +# + +class Queue(object): + + def __init__(self, maxsize=0, *, ctx): + if maxsize <= 0: + # Can raise ImportError (see issues #3770 and #23400) + from .synchronize import SEM_VALUE_MAX as maxsize + self._maxsize = maxsize + self._reader, self._writer = connection.Pipe(duplex=False) + self._rlock = ctx.Lock() + self._opid = os.getpid() + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = ctx.Lock() + self._sem = ctx.BoundedSemaphore(maxsize) + # For use by concurrent.futures + self._ignore_epipe = False + self._reset() + + if sys.platform != 'win32': + register_after_fork(self, Queue._after_fork) + + def __getstate__(self): + context.assert_spawning(self) + return (self._ignore_epipe, self._maxsize, self._reader, self._writer, + self._rlock, self._wlock, self._sem, self._opid) + + def __setstate__(self, state): + (self._ignore_epipe, self._maxsize, self._reader, self._writer, + self._rlock, self._wlock, self._sem, self._opid) = state + self._reset() + + def _after_fork(self): + debug('Queue._after_fork()') + self._reset(after_fork=True) + + def _reset(self, after_fork=False): + if after_fork: + self._notempty._at_fork_reinit() + else: + self._notempty = threading.Condition(threading.Lock()) + self._buffer = collections.deque() + self._thread = None + self._jointhread = None + self._joincancelled = False + self._closed = False + self._close = None + self._send_bytes = self._writer.send_bytes + self._recv_bytes = self._reader.recv_bytes + self._poll = self._reader.poll + + def put(self, obj, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if not self._sem.acquire(block, timeout): + raise Full + + with self._notempty: + if self._thread is None: + self._start_thread() + self._buffer.append(obj) + self._notempty.notify() + + def get(self, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if block and timeout is None: + with self._rlock: + res = self._recv_bytes() + self._sem.release() + else: + if block: + deadline = time.monotonic() + timeout + if not self._rlock.acquire(block, timeout): + raise Empty + try: + if block: + timeout = deadline - time.monotonic() + if not self._poll(timeout): + raise Empty + elif not self._poll(): + raise Empty + res = self._recv_bytes() + self._sem.release() + finally: + self._rlock.release() + # unserialize the data after having released the lock + return _ForkingPickler.loads(res) + + def qsize(self): + # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() + return self._maxsize - self._sem.get_value() + + def empty(self): + return not self._poll() + + def full(self): + return self._sem._semlock._is_zero() + + def get_nowait(self): + return self.get(False) + + def put_nowait(self, obj): + return self.put(obj, False) + + def close(self): + self._closed = True + close = self._close + if close: + self._close = None + close() + + def join_thread(self): + debug('Queue.join_thread()') + assert self._closed, "Queue {0!r} not closed".format(self) + if self._jointhread: + self._jointhread() + + def cancel_join_thread(self): + debug('Queue.cancel_join_thread()') + self._joincancelled = True + try: + self._jointhread.cancel() + except AttributeError: + pass + + def _terminate_broken(self): + # Close a Queue on error. + + # gh-94777: Prevent queue writing to a pipe which is no longer read. + self._reader.close() + + # gh-107219: Close the connection writer which can unblock + # Queue._feed() if it was stuck in send_bytes(). + if sys.platform == 'win32': + self._writer.close() + + self.close() + self.join_thread() + + def _start_thread(self): + debug('Queue._start_thread()') + + # Start thread which transfers data from buffer to pipe + self._buffer.clear() + self._thread = threading.Thread( + target=Queue._feed, + args=(self._buffer, self._notempty, self._send_bytes, + self._wlock, self._reader.close, self._writer.close, + self._ignore_epipe, self._on_queue_feeder_error, + self._sem), + name='QueueFeederThread', + daemon=True, + ) + + try: + debug('doing self._thread.start()') + self._thread.start() + debug('... done self._thread.start()') + except: + # gh-109047: During Python finalization, creating a thread + # can fail with RuntimeError. + self._thread = None + raise + + if not self._joincancelled: + self._jointhread = Finalize( + self._thread, Queue._finalize_join, + [weakref.ref(self._thread)], + exitpriority=-5 + ) + + # Send sentinel to the thread queue object when garbage collected + self._close = Finalize( + self, Queue._finalize_close, + [self._buffer, self._notempty], + exitpriority=10 + ) + + @staticmethod + def _finalize_join(twr): + debug('joining queue thread') + thread = twr() + if thread is not None: + thread.join() + debug('... queue thread joined') + else: + debug('... queue thread already dead') + + @staticmethod + def _finalize_close(buffer, notempty): + debug('telling queue thread to quit') + with notempty: + buffer.append(_sentinel) + notempty.notify() + + @staticmethod + def _feed(buffer, notempty, send_bytes, writelock, reader_close, + writer_close, ignore_epipe, onerror, queue_sem): + debug('starting thread to feed data to pipe') + nacquire = notempty.acquire + nrelease = notempty.release + nwait = notempty.wait + bpopleft = buffer.popleft + sentinel = _sentinel + if sys.platform != 'win32': + wacquire = writelock.acquire + wrelease = writelock.release + else: + wacquire = None + + while 1: + try: + nacquire() + try: + if not buffer: + nwait() + finally: + nrelease() + try: + while 1: + obj = bpopleft() + if obj is sentinel: + debug('feeder thread got sentinel -- exiting') + reader_close() + writer_close() + return + + # serialize the data before acquiring the lock + obj = _ForkingPickler.dumps(obj) + if wacquire is None: + send_bytes(obj) + else: + wacquire() + try: + send_bytes(obj) + finally: + wrelease() + except IndexError: + pass + except Exception as e: + if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: + return + # Since this runs in a daemon thread the resources it uses + # may be become unusable while the process is cleaning up. + # We ignore errors which happen after the process has + # started to cleanup. + if is_exiting(): + info('error in queue thread: %s', e) + return + else: + # Since the object has not been sent in the queue, we need + # to decrease the size of the queue. The error acts as + # if the object had been silently removed from the queue + # and this step is necessary to have a properly working + # queue. + queue_sem.release() + onerror(e, obj) + + @staticmethod + def _on_queue_feeder_error(e, obj): + """ + Private API hook called when feeding data in the background thread + raises an exception. For overriding by concurrent.futures. + """ + import traceback + traceback.print_exc() + + __class_getitem__ = classmethod(types.GenericAlias) + + +_sentinel = object() + +# +# A queue type which also supports join() and task_done() methods +# +# Note that if you do not call task_done() for each finished task then +# eventually the counter's semaphore may overflow causing Bad Things +# to happen. +# + +class JoinableQueue(Queue): + + def __init__(self, maxsize=0, *, ctx): + Queue.__init__(self, maxsize, ctx=ctx) + self._unfinished_tasks = ctx.Semaphore(0) + self._cond = ctx.Condition() + + def __getstate__(self): + return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) + + def __setstate__(self, state): + Queue.__setstate__(self, state[:-2]) + self._cond, self._unfinished_tasks = state[-2:] + + def put(self, obj, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if not self._sem.acquire(block, timeout): + raise Full + + with self._notempty, self._cond: + if self._thread is None: + self._start_thread() + self._buffer.append(obj) + self._unfinished_tasks.release() + self._notempty.notify() + + def task_done(self): + with self._cond: + if not self._unfinished_tasks.acquire(False): + raise ValueError('task_done() called too many times') + if self._unfinished_tasks._semlock._is_zero(): + self._cond.notify_all() + + def join(self): + with self._cond: + if not self._unfinished_tasks._semlock._is_zero(): + self._cond.wait() + +# +# Simplified Queue type -- really just a locked pipe +# + +class SimpleQueue(object): + + def __init__(self, *, ctx): + self._reader, self._writer = connection.Pipe(duplex=False) + self._rlock = ctx.Lock() + self._poll = self._reader.poll + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = ctx.Lock() + + def close(self): + self._reader.close() + self._writer.close() + + def empty(self): + return not self._poll() + + def __getstate__(self): + context.assert_spawning(self) + return (self._reader, self._writer, self._rlock, self._wlock) + + def __setstate__(self, state): + (self._reader, self._writer, self._rlock, self._wlock) = state + self._poll = self._reader.poll + + def get(self): + with self._rlock: + res = self._reader.recv_bytes() + # unserialize the data after having released the lock + return _ForkingPickler.loads(res) + + def put(self, obj): + # serialize the data before acquiring the lock + obj = _ForkingPickler.dumps(obj) + if self._wlock is None: + # writes to a message oriented win32 pipe are atomic + self._writer.send_bytes(obj) + else: + with self._wlock: + self._writer.send_bytes(obj) + + __class_getitem__ = classmethod(types.GenericAlias) diff --git a/Python314_4_x64_Template/Lib/multiprocessing/reduction.py b/Python314_4_x64_Template/Lib/multiprocessing/reduction.py new file mode 100644 index 00000000..fcccd3ee --- /dev/null +++ b/Python314_4_x64_Template/Lib/multiprocessing/reduction.py @@ -0,0 +1,281 @@ +# +# Module which deals with pickling of objects. +# +# multiprocessing/reduction.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +from abc import ABCMeta +import copyreg +import functools +import io +import os +import pickle +import socket +import sys + +from . import context + +__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] + + +HAVE_SEND_HANDLE = (sys.platform == 'win32' or + (hasattr(socket, 'CMSG_LEN') and + hasattr(socket, 'SCM_RIGHTS') and + hasattr(socket.socket, 'sendmsg'))) + +# +# Pickler subclass +# + +class ForkingPickler(pickle.Pickler): + '''Pickler subclass used by multiprocessing.''' + _extra_reducers = {} + _copyreg_dispatch_table = copyreg.dispatch_table + + def __init__(self, *args): + super().__init__(*args) + self.dispatch_table = self._copyreg_dispatch_table.copy() + self.dispatch_table.update(self._extra_reducers) + + @classmethod + def register(cls, type, reduce): + '''Register a reduce function for a type.''' + cls._extra_reducers[type] = reduce + + @classmethod + def dumps(cls, obj, protocol=None): + buf = io.BytesIO() + cls(buf, protocol).dump(obj) + return buf.getbuffer() + + loads = pickle.loads + +register = ForkingPickler.register + +def dump(obj, file, protocol=None): + '''Replacement for pickle.dump() using ForkingPickler.''' + ForkingPickler(file, protocol).dump(obj) + +# +# Platform specific definitions +# + +if sys.platform == 'win32': + # Windows + __all__ += ['DupHandle', 'duplicate', 'steal_handle'] + import _winapi + + def duplicate(handle, target_process=None, inheritable=False, + *, source_process=None): + '''Duplicate a handle. (target_process is a handle not a pid!)''' + current_process = _winapi.GetCurrentProcess() + if source_process is None: + source_process = current_process + if target_process is None: + target_process = current_process + return _winapi.DuplicateHandle( + source_process, handle, target_process, + 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) + + def steal_handle(source_pid, handle): + '''Steal a handle from process identified by source_pid.''' + source_process_handle = _winapi.OpenProcess( + _winapi.PROCESS_DUP_HANDLE, False, source_pid) + try: + return _winapi.DuplicateHandle( + source_process_handle, handle, + _winapi.GetCurrentProcess(), 0, False, + _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(source_process_handle) + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) + conn.send(dh) + + def recv_handle(conn): + '''Receive a handle over a local connection.''' + return conn.recv().detach() + + class DupHandle(object): + '''Picklable wrapper for a handle.''' + def __init__(self, handle, access, pid=None): + if pid is None: + # We just duplicate the handle in the current process and + # let the receiving process steal the handle. + pid = os.getpid() + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) + try: + self._handle = _winapi.DuplicateHandle( + _winapi.GetCurrentProcess(), + handle, proc, access, False, 0) + finally: + _winapi.CloseHandle(proc) + self._access = access + self._pid = pid + + def detach(self): + '''Get the handle. This should only be called once.''' + # retrieve handle from process which currently owns it + if self._pid == os.getpid(): + # The handle has already been duplicated for this process. + return self._handle + # We must steal the handle from the process whose pid is self._pid. + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, + self._pid) + try: + return _winapi.DuplicateHandle( + proc, self._handle, _winapi.GetCurrentProcess(), + self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(proc) + +else: + # Unix + __all__ += ['DupFd', 'sendfds', 'recvfds'] + import array + + def sendfds(sock, fds): + '''Send an array of fds over an AF_UNIX socket.''' + fds = array.array('i', fds) + msg = bytes([len(fds) % 256]) + sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) + if sock.recv(1) != b'A': + raise RuntimeError('did not receive acknowledgement of fd') + + def recvfds(sock, size): + '''Receive an array of fds over an AF_UNIX socket.''' + a = array.array('i') + bytes_size = a.itemsize * size + msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) + if not msg and not ancdata: + raise EOFError + try: + # We send/recv an Ack byte after the fds to work around an old + # macOS bug; it isn't clear if this is still required but it + # makes unit testing fd sending easier. + # See: https://github.com/python/cpython/issues/58874 + sock.send(b'A') # Acknowledge + if len(ancdata) != 1: + raise RuntimeError('received %d items of ancdata' % + len(ancdata)) + cmsg_level, cmsg_type, cmsg_data = ancdata[0] + if (cmsg_level == socket.SOL_SOCKET and + cmsg_type == socket.SCM_RIGHTS): + if len(cmsg_data) % a.itemsize != 0: + raise ValueError + a.frombytes(cmsg_data) + if len(a) % 256 != msg[0]: + raise AssertionError( + "Len is {0:n} but msg[0] is {1!r}".format( + len(a), msg[0])) + return list(a) + except (ValueError, IndexError): + pass + raise RuntimeError('Invalid data received') + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: + sendfds(s, [handle]) + + def recv_handle(conn): + '''Receive a handle over a local connection.''' + with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: + return recvfds(s, 1)[0] + + def DupFd(fd): + '''Return a wrapper for an fd.''' + popen_obj = context.get_spawning_popen() + if popen_obj is not None: + return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) + elif HAVE_SEND_HANDLE: + from . import resource_sharer + return resource_sharer.DupFd(fd) + else: + raise ValueError('SCM_RIGHTS appears not to be available') + +# +# Try making some callable types picklable +# + +def _reduce_method(m): + if m.__self__ is None: + return getattr, (m.__class__, m.__func__.__name__) + else: + return getattr, (m.__self__, m.__func__.__name__) +class _C: + def f(self): + pass +register(type(_C().f), _reduce_method) + + +def _reduce_method_descriptor(m): + return getattr, (m.__objclass__, m.__name__) +register(type(list.append), _reduce_method_descriptor) +register(type(int.__add__), _reduce_method_descriptor) + + +def _reduce_partial(p): + return _rebuild_partial, (p.func, p.args, p.keywords or {}) +def _rebuild_partial(func, args, keywords): + return functools.partial(func, *args, **keywords) +register(functools.partial, _reduce_partial) + +# +# Make sockets picklable +# + +if sys.platform == 'win32': + def _reduce_socket(s): + from .resource_sharer import DupSocket + return _rebuild_socket, (DupSocket(s),) + def _rebuild_socket(ds): + return ds.detach() + register(socket.socket, _reduce_socket) + +else: + def _reduce_socket(s): + df = DupFd(s.fileno()) + return _rebuild_socket, (df, s.family, s.type, s.proto) + def _rebuild_socket(df, family, type, proto): + fd = df.detach() + return socket.socket(family, type, proto, fileno=fd) + register(socket.socket, _reduce_socket) + + +class AbstractReducer(metaclass=ABCMeta): + '''Abstract base class for use in implementing a Reduction class + suitable for use in replacing the standard reduction mechanism + used in multiprocessing.''' + ForkingPickler = ForkingPickler + register = register + dump = dump + send_handle = send_handle + recv_handle = recv_handle + + if sys.platform == 'win32': + steal_handle = steal_handle + duplicate = duplicate + DupHandle = DupHandle + else: + sendfds = sendfds + recvfds = recvfds + DupFd = DupFd + + _reduce_method = _reduce_method + _reduce_method_descriptor = _reduce_method_descriptor + _rebuild_partial = _rebuild_partial + _reduce_socket = _reduce_socket + _rebuild_socket = _rebuild_socket + + def __init__(self, *args): + register(type(_C().f), _reduce_method) + register(type(list.append), _reduce_method_descriptor) + register(type(int.__add__), _reduce_method_descriptor) + register(functools.partial, _reduce_partial) + register(socket.socket, _reduce_socket) diff --git a/Python313_13_x64_Template/Lib/multiprocessing/resource_sharer.py b/Python314_4_x64_Template/Lib/multiprocessing/resource_sharer.py similarity index 100% rename from Python313_13_x64_Template/Lib/multiprocessing/resource_sharer.py rename to Python314_4_x64_Template/Lib/multiprocessing/resource_sharer.py diff --git a/Python314_4_x64_Template/Lib/multiprocessing/resource_tracker.py b/Python314_4_x64_Template/Lib/multiprocessing/resource_tracker.py new file mode 100644 index 00000000..2ef2d1ec --- /dev/null +++ b/Python314_4_x64_Template/Lib/multiprocessing/resource_tracker.py @@ -0,0 +1,416 @@ +############################################################################### +# Server process to keep track of unlinked resources (like shared memory +# segments, semaphores etc.) and clean them. +# +# On Unix we run a server process which keeps track of unlinked +# resources. The server ignores SIGINT and SIGTERM and reads from a +# pipe. Every other process of the program has a copy of the writable +# end of the pipe, so we get EOF when all other processes have exited. +# Then the server process unlinks any remaining resource names. +# +# This is important because there may be system limits for such resources: for +# instance, the system only supports a limited number of named semaphores, and +# shared-memory segments live in the RAM. If a python process leaks such a +# resource, this resource will not be removed till the next reboot. Without +# this resource tracker process, "killall python" would probably leave unlinked +# resources. + +import base64 +import os +import signal +import sys +import threading +import warnings +from collections import deque + +import json + +from . import spawn +from . import util + +__all__ = ['ensure_running', 'register', 'unregister'] + +_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') +_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) + +def cleanup_noop(name): + raise RuntimeError('noop should never be registered or cleaned up') + +_CLEANUP_FUNCS = { + 'noop': cleanup_noop, + 'dummy': lambda name: None, # Dummy resource used in tests +} + +if os.name == 'posix': + import _multiprocessing + import _posixshmem + + # Use sem_unlink() to clean up named semaphores. + # + # sem_unlink() may be missing if the Python build process detected the + # absence of POSIX named semaphores. In that case, no named semaphores were + # ever opened, so no cleanup would be necessary. + if hasattr(_multiprocessing, 'sem_unlink'): + _CLEANUP_FUNCS['semaphore'] = _multiprocessing.sem_unlink + _CLEANUP_FUNCS['shared_memory'] = _posixshmem.shm_unlink + + +class ReentrantCallError(RuntimeError): + pass + + +class ResourceTracker(object): + + def __init__(self): + self._lock = threading.RLock() + self._fd = None + self._pid = None + self._exitcode = None + self._reentrant_messages = deque() + + # True to use colon-separated lines, rather than JSON lines, + # for internal communication. (Mainly for testing). + # Filenames not supported by the simple format will always be sent + # using JSON. + # The reader should understand all formats. + self._use_simple_format = True + + def _reentrant_call_error(self): + # gh-109629: this happens if an explicit call to the ResourceTracker + # gets interrupted by a garbage collection, invoking a finalizer (*) + # that itself calls back into ResourceTracker. + # (*) for example the SemLock finalizer + raise ReentrantCallError( + "Reentrant call into the multiprocessing resource tracker") + + def __del__(self): + # making sure child processess are cleaned before ResourceTracker + # gets destructed. + # see https://github.com/python/cpython/issues/88887 + self._stop(use_blocking_lock=False) + + def _stop(self, use_blocking_lock=True): + if use_blocking_lock: + with self._lock: + self._stop_locked() + else: + acquired = self._lock.acquire(blocking=False) + try: + self._stop_locked() + finally: + if acquired: + self._lock.release() + + def _stop_locked( + self, + close=os.close, + waitpid=os.waitpid, + waitstatus_to_exitcode=os.waitstatus_to_exitcode, + ): + # This shouldn't happen (it might when called by a finalizer) + # so we check for it anyway. + if self._lock._recursion_count() > 1: + raise self._reentrant_call_error() + if self._fd is None: + # not running + return + if self._pid is None: + return + + # closing the "alive" file descriptor stops main() + close(self._fd) + self._fd = None + + try: + _, status = waitpid(self._pid, 0) + except ChildProcessError: + self._pid = None + self._exitcode = None + return + + self._pid = None + + try: + self._exitcode = waitstatus_to_exitcode(status) + except ValueError: + # os.waitstatus_to_exitcode may raise an exception for invalid values + self._exitcode = None + + def getfd(self): + self.ensure_running() + return self._fd + + def ensure_running(self): + '''Make sure that resource tracker process is running. + + This can be run from any process. Usually a child process will use + the resource created by its parent.''' + return self._ensure_running_and_write() + + def _teardown_dead_process(self): + os.close(self._fd) + + # Clean-up to avoid dangling processes. + try: + # _pid can be None if this process is a child from another + # python process, which has started the resource_tracker. + if self._pid is not None: + os.waitpid(self._pid, 0) + except ChildProcessError: + # The resource_tracker has already been terminated. + pass + self._fd = None + self._pid = None + self._exitcode = None + + warnings.warn('resource_tracker: process died unexpectedly, ' + 'relaunching. Some resources might leak.') + + def _launch(self): + fds_to_pass = [] + try: + fds_to_pass.append(sys.stderr.fileno()) + except Exception: + pass + r, w = os.pipe() + try: + fds_to_pass.append(r) + # process will out live us, so no need to wait on pid + exe = spawn.get_executable() + args = [ + exe, + *util._args_from_interpreter_flags(), + '-c', + f'from multiprocessing.resource_tracker import main;main({r})', + ] + # bpo-33613: Register a signal mask that will block the signals. + # This signal mask will be inherited by the child that is going + # to be spawned and will protect the child from a race condition + # that can make the child die before it registers signal handlers + # for SIGINT and SIGTERM. The mask is unregistered after spawning + # the child. + prev_sigmask = None + try: + if _HAVE_SIGMASK: + prev_sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) + pid = util.spawnv_passfds(exe, args, fds_to_pass) + finally: + if prev_sigmask is not None: + signal.pthread_sigmask(signal.SIG_SETMASK, prev_sigmask) + except: + os.close(w) + raise + else: + self._fd = w + self._pid = pid + finally: + os.close(r) + + def _make_probe_message(self): + """Return a probe message.""" + if self._use_simple_format: + return b'PROBE:0:noop\n' + return ( + json.dumps( + {"cmd": "PROBE", "rtype": "noop"}, + ensure_ascii=True, + separators=(",", ":"), + ) + + "\n" + ).encode("ascii") + + def _ensure_running_and_write(self, msg=None): + with self._lock: + if self._lock._recursion_count() > 1: + # The code below is certainly not reentrant-safe, so bail out + if msg is None: + raise self._reentrant_call_error() + return self._reentrant_messages.append(msg) + + if self._fd is not None: + # resource tracker was launched before, is it still running? + if msg is None: + to_send = self._make_probe_message() + else: + to_send = msg + try: + self._write(to_send) + except OSError: + self._teardown_dead_process() + self._launch() + + msg = None # message was sent in probe + else: + self._launch() + + while True: + try: + reentrant_msg = self._reentrant_messages.popleft() + except IndexError: + break + self._write(reentrant_msg) + if msg is not None: + self._write(msg) + + def _check_alive(self): + '''Check that the pipe has not been closed by sending a probe.''' + try: + # We cannot use send here as it calls ensure_running, creating + # a cycle. + os.write(self._fd, self._make_probe_message()) + except OSError: + return False + else: + return True + + def register(self, name, rtype): + '''Register name of resource with resource tracker.''' + self._send('REGISTER', name, rtype) + + def unregister(self, name, rtype): + '''Unregister name of resource with resource tracker.''' + self._send('UNREGISTER', name, rtype) + + def _write(self, msg): + nbytes = os.write(self._fd, msg) + assert nbytes == len(msg), f"{nbytes=} != {len(msg)=}" + + def _send(self, cmd, name, rtype): + if self._use_simple_format and '\n' not in name: + msg = f"{cmd}:{name}:{rtype}\n".encode("ascii") + if len(msg) > 512: + # posix guarantees that writes to a pipe of less than PIPE_BUF + # bytes are atomic, and that PIPE_BUF >= 512 + raise ValueError('msg too long') + self._ensure_running_and_write(msg) + return + + # POSIX guarantees that writes to a pipe of less than PIPE_BUF (512 on Linux) + # bytes are atomic. Therefore, we want the message to be shorter than 512 bytes. + # POSIX shm_open() and sem_open() require the name, including its leading slash, + # to be at most NAME_MAX bytes (255 on Linux) + # With json.dump(..., ensure_ascii=True) every non-ASCII byte becomes a 6-char + # escape like \uDC80. + # As we want the overall message to be kept atomic and therefore smaller than 512, + # we encode encode the raw name bytes with URL-safe Base64 - so a 255 long name + # will not exceed 340 bytes. + b = name.encode('utf-8', 'surrogateescape') + if len(b) > 255: + raise ValueError('shared memory name too long (max 255 bytes)') + b64 = base64.urlsafe_b64encode(b).decode('ascii') + + payload = {"cmd": cmd, "rtype": rtype, "base64_name": b64} + msg = (json.dumps(payload, ensure_ascii=True, separators=(",", ":")) + "\n").encode("ascii") + + # The entire JSON message is guaranteed < PIPE_BUF (512 bytes) by construction. + assert len(msg) <= 512, f"internal error: message too long ({len(msg)} bytes)" + assert msg.startswith(b'{') + + self._ensure_running_and_write(msg) + +_resource_tracker = ResourceTracker() +ensure_running = _resource_tracker.ensure_running +register = _resource_tracker.register +unregister = _resource_tracker.unregister +getfd = _resource_tracker.getfd + + +def _decode_message(line): + if line.startswith(b'{'): + try: + obj = json.loads(line.decode('ascii')) + except Exception as e: + raise ValueError("malformed resource_tracker message: %r" % (line,)) from e + + cmd = obj["cmd"] + rtype = obj["rtype"] + b64 = obj.get("base64_name", "") + + if not isinstance(cmd, str) or not isinstance(rtype, str) or not isinstance(b64, str): + raise ValueError("malformed resource_tracker fields: %r" % (obj,)) + + try: + name = base64.urlsafe_b64decode(b64).decode('utf-8', 'surrogateescape') + except ValueError as e: + raise ValueError("malformed resource_tracker base64_name: %r" % (b64,)) from e + else: + cmd, rest = line.strip().decode('ascii').split(':', maxsplit=1) + name, rtype = rest.rsplit(':', maxsplit=1) + return cmd, rtype, name + + +def main(fd): + '''Run resource tracker.''' + # protect the process from ^C and "killall python" etc + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGTERM, signal.SIG_IGN) + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) + + for f in (sys.stdin, sys.stdout): + try: + f.close() + except Exception: + pass + + cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} + exit_code = 0 + + try: + # keep track of registered/unregistered resources + with open(fd, 'rb') as f: + for line in f: + try: + cmd, rtype, name = _decode_message(line) + cleanup_func = _CLEANUP_FUNCS.get(rtype, None) + if cleanup_func is None: + raise ValueError( + f'Cannot register {name} for automatic cleanup: ' + f'unknown resource type {rtype}') + + if cmd == 'REGISTER': + cache[rtype].add(name) + elif cmd == 'UNREGISTER': + cache[rtype].remove(name) + elif cmd == 'PROBE': + pass + else: + raise RuntimeError('unrecognized command %r' % cmd) + except Exception: + exit_code = 3 + try: + sys.excepthook(*sys.exc_info()) + except: + pass + finally: + # all processes have terminated; cleanup any remaining resources + for rtype, rtype_cache in cache.items(): + if rtype_cache: + try: + exit_code = 1 + if rtype == 'dummy': + # The test 'dummy' resource is expected to leak. + # We skip the warning (and *only* the warning) for it. + pass + else: + warnings.warn( + f'resource_tracker: There appear to be ' + f'{len(rtype_cache)} leaked {rtype} objects to ' + f'clean up at shutdown: {rtype_cache}' + ) + except Exception: + pass + for name in rtype_cache: + # For some reason the process which created and registered this + # resource has failed to unregister it. Presumably it has + # died. We therefore unlink it. + try: + try: + _CLEANUP_FUNCS[rtype](name) + except Exception as e: + exit_code = 2 + warnings.warn('resource_tracker: %r: %s' % (name, e)) + finally: + pass + + sys.exit(exit_code) diff --git a/Python314_4_x64_Template/Lib/multiprocessing/shared_memory.py b/Python314_4_x64_Template/Lib/multiprocessing/shared_memory.py new file mode 100644 index 00000000..99a8ce33 --- /dev/null +++ b/Python314_4_x64_Template/Lib/multiprocessing/shared_memory.py @@ -0,0 +1,544 @@ +"""Provides shared memory for direct access across processes. + +The API of this package is currently provisional. Refer to the +documentation for details. +""" + + +__all__ = [ 'SharedMemory', 'ShareableList' ] + + +from functools import partial +import mmap +import os +import errno +import struct +import secrets +import types + +if os.name == "nt": + import _winapi + _USE_POSIX = False +else: + import _posixshmem + _USE_POSIX = True + +from . import resource_tracker + +_O_CREX = os.O_CREAT | os.O_EXCL + +# FreeBSD (and perhaps other BSDs) limit names to 14 characters. +_SHM_SAFE_NAME_LENGTH = 14 + +# Shared memory block name prefix +if _USE_POSIX: + _SHM_NAME_PREFIX = '/psm_' +else: + _SHM_NAME_PREFIX = 'wnsm_' + + +def _make_filename(): + "Create a random filename for the shared memory object." + # number of random bytes to use for name + nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 + assert nbytes >= 2, '_SHM_NAME_PREFIX too long' + name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) + assert len(name) <= _SHM_SAFE_NAME_LENGTH + return name + + +class SharedMemory: + """Creates a new shared memory block or attaches to an existing + shared memory block. + + Every shared memory block is assigned a unique name. This enables + one process to create a shared memory block with a particular name + so that a different process can attach to that same shared memory + block using that same name. + + As a resource for sharing data across processes, shared memory blocks + may outlive the original process that created them. When one process + no longer needs access to a shared memory block that might still be + needed by other processes, the close() method should be called. + When a shared memory block is no longer needed by any process, the + unlink() method should be called to ensure proper cleanup.""" + + # Defaults; enables close() and unlink() to run without errors. + _name = None + _fd = -1 + _mmap = None + _buf = None + _flags = os.O_RDWR + _mode = 0o600 + _prepend_leading_slash = True if _USE_POSIX else False + _track = True + + def __init__(self, name=None, create=False, size=0, *, track=True): + if not size >= 0: + raise ValueError("'size' must be a positive integer") + if create: + self._flags = _O_CREX | os.O_RDWR + if size == 0: + raise ValueError("'size' must be a positive number different from zero") + if name is None and not self._flags & os.O_EXCL: + raise ValueError("'name' can only be None if create=True") + + self._track = track + if _USE_POSIX: + + # POSIX Shared Memory + + if name is None: + while True: + name = _make_filename() + try: + self._fd = _posixshmem.shm_open( + name, + self._flags, + mode=self._mode + ) + except FileExistsError: + continue + self._name = name + break + else: + name = "/" + name if self._prepend_leading_slash else name + self._fd = _posixshmem.shm_open( + name, + self._flags, + mode=self._mode + ) + self._name = name + try: + if create and size: + os.ftruncate(self._fd, size) + stats = os.fstat(self._fd) + size = stats.st_size + self._mmap = mmap.mmap(self._fd, size) + except OSError: + self.unlink() + raise + if self._track: + resource_tracker.register(self._name, "shared_memory") + + else: + + # Windows Named Shared Memory + + if create: + while True: + temp_name = _make_filename() if name is None else name + # Create and reserve shared memory block with this name + # until it can be attached to by mmap. + h_map = _winapi.CreateFileMapping( + _winapi.INVALID_HANDLE_VALUE, + _winapi.NULL, + _winapi.PAGE_READWRITE, + (size >> 32) & 0xFFFFFFFF, + size & 0xFFFFFFFF, + temp_name + ) + try: + last_error_code = _winapi.GetLastError() + if last_error_code == _winapi.ERROR_ALREADY_EXISTS: + if name is not None: + raise FileExistsError( + errno.EEXIST, + os.strerror(errno.EEXIST), + name, + _winapi.ERROR_ALREADY_EXISTS + ) + else: + continue + self._mmap = mmap.mmap(-1, size, tagname=temp_name) + finally: + _winapi.CloseHandle(h_map) + self._name = temp_name + break + + else: + self._name = name + # Dynamically determine the existing named shared memory + # block's size which is likely a multiple of mmap.PAGESIZE. + h_map = _winapi.OpenFileMapping( + _winapi.FILE_MAP_READ, + False, + name + ) + try: + p_buf = _winapi.MapViewOfFile( + h_map, + _winapi.FILE_MAP_READ, + 0, + 0, + 0 + ) + finally: + _winapi.CloseHandle(h_map) + try: + size = _winapi.VirtualQuerySize(p_buf) + finally: + _winapi.UnmapViewOfFile(p_buf) + self._mmap = mmap.mmap(-1, size, tagname=name) + + self._size = size + self._buf = memoryview(self._mmap) + + def __del__(self): + try: + self.close() + except OSError: + pass + + def __reduce__(self): + return ( + self.__class__, + ( + self.name, + False, + self.size, + ), + ) + + def __repr__(self): + return f'{self.__class__.__name__}({self.name!r}, size={self.size})' + + @property + def buf(self): + "A memoryview of contents of the shared memory block." + return self._buf + + @property + def name(self): + "Unique name that identifies the shared memory block." + reported_name = self._name + if _USE_POSIX and self._prepend_leading_slash: + if self._name.startswith("/"): + reported_name = self._name[1:] + return reported_name + + @property + def size(self): + "Size in bytes." + return self._size + + def close(self): + """Closes access to the shared memory from this instance but does + not destroy the shared memory block.""" + if self._buf is not None: + self._buf.release() + self._buf = None + if self._mmap is not None: + self._mmap.close() + self._mmap = None + if _USE_POSIX and self._fd >= 0: + os.close(self._fd) + self._fd = -1 + + def unlink(self): + """Requests that the underlying shared memory block be destroyed. + + Unlink should be called once (and only once) across all handles + which have access to the shared memory block, even if these + handles belong to different processes. Closing and unlinking may + happen in any order, but trying to access data inside a shared + memory block after unlinking may result in memory errors, + depending on platform. + + This method has no effect on Windows, where the only way to + delete a shared memory block is to close all handles.""" + + if _USE_POSIX and self._name: + _posixshmem.shm_unlink(self._name) + if self._track: + resource_tracker.unregister(self._name, "shared_memory") + + +_encoding = "utf8" + +class ShareableList: + """Pattern for a mutable list-like object shareable via a shared + memory block. It differs from the built-in list type in that these + lists can not change their overall length (i.e. no append, insert, + etc.) + + Because values are packed into a memoryview as bytes, the struct + packing format for any storable value must require no more than 8 + characters to describe its format.""" + + # The shared memory area is organized as follows: + # - 8 bytes: number of items (N) as a 64-bit integer + # - (N + 1) * 8 bytes: offsets of each element from the start of the + # data area + # - K bytes: the data area storing item values (with encoding and size + # depending on their respective types) + # - N * 8 bytes: `struct` format string for each element + # - N bytes: index into _back_transforms_mapping for each element + # (for reconstructing the corresponding Python value) + _types_mapping = { + int: "q", + float: "d", + bool: "xxxxxxx?", + str: "%ds", + bytes: "%ds", + None.__class__: "xxxxxx?x", + } + _alignment = 8 + _back_transforms_mapping = { + 0: lambda value: value, # int, float, bool + 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str + 2: lambda value: value.rstrip(b'\x00'), # bytes + 3: lambda _value: None, # None + } + + @staticmethod + def _extract_recreation_code(value): + """Used in concert with _back_transforms_mapping to convert values + into the appropriate Python objects when retrieving them from + the list as well as when storing them.""" + if not isinstance(value, (str, bytes, None.__class__)): + return 0 + elif isinstance(value, str): + return 1 + elif isinstance(value, bytes): + return 2 + else: + return 3 # NoneType + + def __init__(self, sequence=None, *, name=None): + if name is None or sequence is not None: + sequence = sequence or () + _formats = [ + self._types_mapping[type(item)] + if not isinstance(item, (str, bytes)) + else self._types_mapping[type(item)] % ( + self._alignment * (len(item) // self._alignment + 1), + ) + for item in sequence + ] + self._list_len = len(_formats) + assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len + offset = 0 + # The offsets of each list element into the shared memory's + # data area (0 meaning the start of the data area, not the start + # of the shared memory area). + self._allocated_offsets = [0] + for fmt in _formats: + offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) + self._allocated_offsets.append(offset) + _recreation_codes = [ + self._extract_recreation_code(item) for item in sequence + ] + requested_size = struct.calcsize( + "q" + self._format_size_metainfo + + "".join(_formats) + + self._format_packing_metainfo + + self._format_back_transform_codes + ) + + self.shm = SharedMemory(name, create=True, size=requested_size) + else: + self.shm = SharedMemory(name) + + if sequence is not None: + _enc = _encoding + struct.pack_into( + "q" + self._format_size_metainfo, + self.shm.buf, + 0, + self._list_len, + *(self._allocated_offsets) + ) + struct.pack_into( + "".join(_formats), + self.shm.buf, + self._offset_data_start, + *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) + ) + struct.pack_into( + self._format_packing_metainfo, + self.shm.buf, + self._offset_packing_formats, + *(v.encode(_enc) for v in _formats) + ) + struct.pack_into( + self._format_back_transform_codes, + self.shm.buf, + self._offset_back_transform_codes, + *(_recreation_codes) + ) + + else: + self._list_len = len(self) # Obtains size from offset 0 in buffer. + self._allocated_offsets = list( + struct.unpack_from( + self._format_size_metainfo, + self.shm.buf, + 1 * 8 + ) + ) + + def _get_packing_format(self, position): + "Gets the packing format for a single value stored in the list." + position = position if position >= 0 else position + self._list_len + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + v = struct.unpack_from( + "8s", + self.shm.buf, + self._offset_packing_formats + position * 8 + )[0] + fmt = v.rstrip(b'\x00') + fmt_as_str = fmt.decode(_encoding) + + return fmt_as_str + + def _get_back_transform(self, position): + "Gets the back transformation function for a single value." + + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + transform_code = struct.unpack_from( + "b", + self.shm.buf, + self._offset_back_transform_codes + position + )[0] + transform_function = self._back_transforms_mapping[transform_code] + + return transform_function + + def _set_packing_format_and_transform(self, position, fmt_as_str, value): + """Sets the packing format and back transformation code for a + single value in the list at the specified position.""" + + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + struct.pack_into( + "8s", + self.shm.buf, + self._offset_packing_formats + position * 8, + fmt_as_str.encode(_encoding) + ) + + transform_code = self._extract_recreation_code(value) + struct.pack_into( + "b", + self.shm.buf, + self._offset_back_transform_codes + position, + transform_code + ) + + def __getitem__(self, position): + position = position if position >= 0 else position + self._list_len + try: + offset = self._offset_data_start + self._allocated_offsets[position] + (v,) = struct.unpack_from( + self._get_packing_format(position), + self.shm.buf, + offset + ) + except IndexError: + raise IndexError("index out of range") + + back_transform = self._get_back_transform(position) + v = back_transform(v) + + return v + + def __setitem__(self, position, value): + position = position if position >= 0 else position + self._list_len + try: + item_offset = self._allocated_offsets[position] + offset = self._offset_data_start + item_offset + current_format = self._get_packing_format(position) + except IndexError: + raise IndexError("assignment index out of range") + + if not isinstance(value, (str, bytes)): + new_format = self._types_mapping[type(value)] + encoded_value = value + else: + allocated_length = self._allocated_offsets[position + 1] - item_offset + + encoded_value = (value.encode(_encoding) + if isinstance(value, str) else value) + if len(encoded_value) > allocated_length: + raise ValueError("bytes/str item exceeds available storage") + if current_format[-1] == "s": + new_format = current_format + else: + new_format = self._types_mapping[str] % ( + allocated_length, + ) + + self._set_packing_format_and_transform( + position, + new_format, + value + ) + struct.pack_into(new_format, self.shm.buf, offset, encoded_value) + + def __reduce__(self): + return partial(self.__class__, name=self.shm.name), () + + def __len__(self): + return struct.unpack_from("q", self.shm.buf, 0)[0] + + def __repr__(self): + return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' + + @property + def format(self): + "The struct packing format used by all currently stored items." + return "".join( + self._get_packing_format(i) for i in range(self._list_len) + ) + + @property + def _format_size_metainfo(self): + "The struct packing format used for the items' storage offsets." + return "q" * (self._list_len + 1) + + @property + def _format_packing_metainfo(self): + "The struct packing format used for the items' packing formats." + return "8s" * self._list_len + + @property + def _format_back_transform_codes(self): + "The struct packing format used for the items' back transforms." + return "b" * self._list_len + + @property + def _offset_data_start(self): + # - 8 bytes for the list length + # - (N + 1) * 8 bytes for the element offsets + return (self._list_len + 2) * 8 + + @property + def _offset_packing_formats(self): + return self._offset_data_start + self._allocated_offsets[-1] + + @property + def _offset_back_transform_codes(self): + return self._offset_packing_formats + self._list_len * 8 + + def count(self, value): + "L.count(value) -> integer -- return number of occurrences of value." + + return sum(value == entry for entry in self) + + def index(self, value): + """L.index(value) -> integer -- return first index of value. + Raises ValueError if the value is not present.""" + + for position, entry in enumerate(self): + if value == entry: + return position + else: + raise ValueError("ShareableList.index(x): x not in list") + + __class_getitem__ = classmethod(types.GenericAlias) diff --git a/Python313_13_x64_Template/Lib/multiprocessing/sharedctypes.py b/Python314_4_x64_Template/Lib/multiprocessing/sharedctypes.py similarity index 100% rename from Python313_13_x64_Template/Lib/multiprocessing/sharedctypes.py rename to Python314_4_x64_Template/Lib/multiprocessing/sharedctypes.py diff --git a/Python313_13_x64_Template/Lib/multiprocessing/spawn.py b/Python314_4_x64_Template/Lib/multiprocessing/spawn.py similarity index 100% rename from Python313_13_x64_Template/Lib/multiprocessing/spawn.py rename to Python314_4_x64_Template/Lib/multiprocessing/spawn.py diff --git a/Python314_4_x64_Template/Lib/multiprocessing/synchronize.py b/Python314_4_x64_Template/Lib/multiprocessing/synchronize.py new file mode 100644 index 00000000..9188114a --- /dev/null +++ b/Python314_4_x64_Template/Lib/multiprocessing/synchronize.py @@ -0,0 +1,411 @@ +# +# Module implementing synchronization primitives +# +# multiprocessing/synchronize.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' + ] + +import threading +import sys +import tempfile +import _multiprocessing +import time + +from . import context +from . import process +from . import util + +# TODO: Do any platforms still lack a functioning sem_open? +try: + from _multiprocessing import SemLock, sem_unlink +except ImportError: + raise ImportError("This platform lacks a functioning sem_open" + + " implementation. https://github.com/python/cpython/issues/48020.") + +# +# Constants +# + +# These match the enum in Modules/_multiprocessing/semaphore.c +RECURSIVE_MUTEX = 0 +SEMAPHORE = 1 + +SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX + +# +# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` +# + +class SemLock(object): + + _rand = tempfile._RandomNameSequence() + + def __init__(self, kind, value, maxvalue, *, ctx): + if ctx is None: + ctx = context._default_context.get_context() + self._is_fork_ctx = ctx.get_start_method() == 'fork' + unlink_now = sys.platform == 'win32' or self._is_fork_ctx + for i in range(100): + try: + sl = self._semlock = _multiprocessing.SemLock( + kind, value, maxvalue, self._make_name(), + unlink_now) + except FileExistsError: + pass + else: + break + else: + raise FileExistsError('cannot find name for semaphore') + + util.debug('created semlock with handle %s' % sl.handle) + self._make_methods() + + if sys.platform != 'win32': + def _after_fork(obj): + obj._semlock._after_fork() + util.register_after_fork(self, _after_fork) + + if self._semlock.name is not None: + # We only get here if we are on Unix with forking + # disabled. When the object is garbage collected or the + # process shuts down we unlink the semaphore name + from .resource_tracker import register + register(self._semlock.name, "semaphore") + util.Finalize(self, SemLock._cleanup, (self._semlock.name,), + exitpriority=0) + + @staticmethod + def _cleanup(name): + from .resource_tracker import unregister + sem_unlink(name) + unregister(name, "semaphore") + + def _make_methods(self): + self.acquire = self._semlock.acquire + self.release = self._semlock.release + + def locked(self): + return self._semlock._is_zero() + + def __enter__(self): + return self._semlock.__enter__() + + def __exit__(self, *args): + return self._semlock.__exit__(*args) + + def __getstate__(self): + context.assert_spawning(self) + sl = self._semlock + if sys.platform == 'win32': + h = context.get_spawning_popen().duplicate_for_child(sl.handle) + else: + if self._is_fork_ctx: + raise RuntimeError('A SemLock created in a fork context is being ' + 'shared with a process in a spawn context. This is ' + 'not supported. Please use the same context to create ' + 'multiprocessing objects and Process.') + h = sl.handle + return (h, sl.kind, sl.maxvalue, sl.name) + + def __setstate__(self, state): + self._semlock = _multiprocessing.SemLock._rebuild(*state) + util.debug('recreated blocker with handle %r' % state[0]) + self._make_methods() + # Ensure that deserialized SemLock can be serialized again (gh-108520). + self._is_fork_ctx = False + + @staticmethod + def _make_name(): + return '%s-%s' % (process.current_process()._config['semprefix'], + next(SemLock._rand)) + +# +# Semaphore +# + +class Semaphore(SemLock): + + def __init__(self, value=1, *, ctx): + SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) + + def get_value(self): + '''Returns current value of Semaphore. + + Raises NotImplementedError on Mac OSX + because of broken sem_getvalue(). + ''' + return self._semlock._get_value() + + def __repr__(self): + try: + value = self.get_value() + except Exception: + value = 'unknown' + return '<%s(value=%s)>' % (self.__class__.__name__, value) + +# +# Bounded semaphore +# + +class BoundedSemaphore(Semaphore): + + def __init__(self, value=1, *, ctx): + SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) + + def __repr__(self): + try: + value = self.get_value() + except Exception: + value = 'unknown' + return '<%s(value=%s, maxvalue=%s)>' % \ + (self.__class__.__name__, value, self._semlock.maxvalue) + +# +# Non-recursive lock +# + +class Lock(SemLock): + + def __init__(self, *, ctx): + SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + elif not self._semlock._is_zero(): + name = 'None' + elif self._semlock._count() > 0: + name = 'SomeOtherThread' + else: + name = 'SomeOtherProcess' + except Exception: + name = 'unknown' + return '<%s(owner=%s)>' % (self.__class__.__name__, name) + +# +# Recursive lock +# + +class RLock(SemLock): + + def __init__(self, *, ctx): + SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + count = self._semlock._count() + elif not self._semlock._is_zero(): + name, count = 'None', 0 + elif self._semlock._count() > 0: + name, count = 'SomeOtherThread', 'nonzero' + else: + name, count = 'SomeOtherProcess', 'nonzero' + except Exception: + name, count = 'unknown', 'unknown' + return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) + +# +# Condition variable +# + +class Condition(object): + + def __init__(self, lock=None, *, ctx): + self._lock = lock or ctx.RLock() + self._sleeping_count = ctx.Semaphore(0) + self._woken_count = ctx.Semaphore(0) + self._wait_semaphore = ctx.Semaphore(0) + self._make_methods() + + def __getstate__(self): + context.assert_spawning(self) + return (self._lock, self._sleeping_count, + self._woken_count, self._wait_semaphore) + + def __setstate__(self, state): + (self._lock, self._sleeping_count, + self._woken_count, self._wait_semaphore) = state + self._make_methods() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def _make_methods(self): + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __repr__(self): + try: + num_waiters = (self._sleeping_count.get_value() - + self._woken_count.get_value()) + except Exception: + num_waiters = 'unknown' + return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) + + def wait(self, timeout=None): + assert self._lock._semlock._is_mine(), \ + 'must acquire() condition before using wait()' + + # indicate that this thread is going to sleep + self._sleeping_count.release() + + # release lock + count = self._lock._semlock._count() + for i in range(count): + self._lock.release() + + try: + # wait for notification or timeout + return self._wait_semaphore.acquire(True, timeout) + finally: + # indicate that this thread has woken + self._woken_count.release() + + # reacquire lock + for i in range(count): + self._lock.acquire() + + def notify(self, n=1): + assert self._lock._semlock._is_mine(), 'lock is not owned' + assert not self._wait_semaphore.acquire( + False), ('notify: Should not have been able to acquire ' + + '_wait_semaphore') + + # to take account of timeouts since last notify*() we subtract + # woken_count from sleeping_count and rezero woken_count + while self._woken_count.acquire(False): + res = self._sleeping_count.acquire(False) + assert res, ('notify: Bug in sleeping_count.acquire' + + '- res should not be False') + + sleepers = 0 + while sleepers < n and self._sleeping_count.acquire(False): + self._wait_semaphore.release() # wake up one sleeper + sleepers += 1 + + if sleepers: + for i in range(sleepers): + self._woken_count.acquire() # wait for a sleeper to wake + + # rezero wait_semaphore in case some timeouts just happened + while self._wait_semaphore.acquire(False): + pass + + def notify_all(self): + self.notify(n=sys.maxsize) + + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = time.monotonic() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - time.monotonic() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + +# +# Event +# + +class Event(object): + + def __init__(self, *, ctx): + self._cond = ctx.Condition(ctx.Lock()) + self._flag = ctx.Semaphore(0) + + def is_set(self): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + return True + return False + + def set(self): + with self._cond: + self._flag.acquire(False) + self._flag.release() + self._cond.notify_all() + + def clear(self): + with self._cond: + self._flag.acquire(False) + + def wait(self, timeout=None): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + else: + self._cond.wait(timeout) + + if self._flag.acquire(False): + self._flag.release() + return True + return False + + def __repr__(self): + set_status = 'set' if self.is_set() else 'unset' + return f"<{type(self).__qualname__} at {id(self):#x} {set_status}>" +# +# Barrier +# + +class Barrier(threading.Barrier): + + def __init__(self, parties, action=None, timeout=None, *, ctx): + import struct + from .heap import BufferWrapper + wrapper = BufferWrapper(struct.calcsize('i') * 2) + cond = ctx.Condition() + self.__setstate__((parties, action, timeout, cond, wrapper)) + self._state = 0 + self._count = 0 + + def __setstate__(self, state): + (self._parties, self._action, self._timeout, + self._cond, self._wrapper) = state + self._array = self._wrapper.create_memoryview().cast('i') + + def __getstate__(self): + return (self._parties, self._action, self._timeout, + self._cond, self._wrapper) + + @property + def _state(self): + return self._array[0] + + @_state.setter + def _state(self, value): + self._array[0] = value + + @property + def _count(self): + return self._array[1] + + @_count.setter + def _count(self, value): + self._array[1] = value diff --git a/Python314_4_x64_Template/Lib/multiprocessing/util.py b/Python314_4_x64_Template/Lib/multiprocessing/util.py new file mode 100644 index 00000000..549fb07c --- /dev/null +++ b/Python314_4_x64_Template/Lib/multiprocessing/util.py @@ -0,0 +1,560 @@ +# +# Module providing various facilities to other parts of the package +# +# multiprocessing/util.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import os +import itertools +import sys +import weakref +import atexit +import threading # we want threading to install it's + # cleanup function before multiprocessing does +from subprocess import _args_from_interpreter_flags # noqa: F401 + +from . import process + +__all__ = [ + 'sub_debug', 'debug', 'info', 'sub_warning', 'warn', 'get_logger', + 'log_to_stderr', 'get_temp_dir', 'register_after_fork', + 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', + 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', + ] + +# +# Logging +# + +NOTSET = 0 +SUBDEBUG = 5 +DEBUG = 10 +INFO = 20 +SUBWARNING = 25 +WARNING = 30 + +LOGGER_NAME = 'multiprocessing' +DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' + +_logger = None +_log_to_stderr = False + +def sub_debug(msg, *args): + if _logger: + _logger.log(SUBDEBUG, msg, *args, stacklevel=2) + +def debug(msg, *args): + if _logger: + _logger.log(DEBUG, msg, *args, stacklevel=2) + +def info(msg, *args): + if _logger: + _logger.log(INFO, msg, *args, stacklevel=2) + +def warn(msg, *args): + if _logger: + _logger.log(WARNING, msg, *args, stacklevel=2) + +def sub_warning(msg, *args): + if _logger: + _logger.log(SUBWARNING, msg, *args, stacklevel=2) + +def get_logger(): + ''' + Returns logger used by multiprocessing + ''' + global _logger + import logging + + with logging._lock: + if not _logger: + + _logger = logging.getLogger(LOGGER_NAME) + _logger.propagate = 0 + + # XXX multiprocessing should cleanup before logging + if hasattr(atexit, 'unregister'): + atexit.unregister(_exit_function) + atexit.register(_exit_function) + else: + atexit._exithandlers.remove((_exit_function, (), {})) + atexit._exithandlers.append((_exit_function, (), {})) + + return _logger + +def log_to_stderr(level=None): + ''' + Turn on logging and add a handler which prints to stderr + ''' + global _log_to_stderr + import logging + + logger = get_logger() + formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + + if level: + logger.setLevel(level) + _log_to_stderr = True + return _logger + + +# Abstract socket support + +def _platform_supports_abstract_sockets(): + return sys.platform in ("linux", "android") + + +def is_abstract_socket_namespace(address): + if not address: + return False + if isinstance(address, bytes): + return address[0] == 0 + elif isinstance(address, str): + return address[0] == "\0" + raise TypeError(f'address type of {address!r} unrecognized') + + +abstract_sockets_supported = _platform_supports_abstract_sockets() + +# +# Function returning a temp directory which will be removed on exit +# + +# Maximum length of a NULL-terminated [1] socket file path is usually +# between 92 and 108 [2], but Linux is known to use a size of 108 [3]. +# BSD-based systems usually use a size of 104 or 108 and Windows does +# not create AF_UNIX sockets. +# +# [1]: https://github.com/python/cpython/issues/140734 +# [2]: https://pubs.opengroup.org/onlinepubs/9799919799/basedefs/sys_un.h.html +# [3]: https://man7.org/linux/man-pages/man7/unix.7.html + +if sys.platform == 'linux': + _SUN_PATH_MAX = 108 +elif sys.platform.startswith(('openbsd', 'freebsd')): + _SUN_PATH_MAX = 104 +else: + # On Windows platforms, we do not create AF_UNIX sockets. + _SUN_PATH_MAX = None if os.name == 'nt' else 92 + +def _remove_temp_dir(rmtree, tempdir): + rmtree(tempdir) + + current_process = process.current_process() + # current_process() can be None if the finalizer is called + # late during Python finalization + if current_process is not None: + current_process._config['tempdir'] = None + +def _get_base_temp_dir(tempfile): + """Get a temporary directory where socket files will be created. + + To prevent additional imports, pass a pre-imported 'tempfile' module. + """ + if os.name == 'nt': + return None + # Most of the time, the default temporary directory is /tmp. Thus, + # listener sockets files "$TMPDIR/pymp-XXXXXXXX/sock-XXXXXXXX" do + # not have a path length exceeding SUN_PATH_MAX. + # + # If users specify their own temporary directory, we may be unable + # to create those files. Therefore, we fall back to the system-wide + # temporary directory /tmp, assumed to exist on POSIX systems. + # + # See https://github.com/python/cpython/issues/132124. + base_tempdir = tempfile.gettempdir() + # Files created in a temporary directory are suffixed by a string + # generated by tempfile._RandomNameSequence, which, by design, + # is 8 characters long. + # + # Thus, the socket file path length (without NULL terminator) will be: + # + # len(base_tempdir + '/pymp-XXXXXXXX' + '/sock-XXXXXXXX') + sun_path_len = len(base_tempdir) + 14 + 14 + # Strict inequality to account for the NULL terminator. + # See https://github.com/python/cpython/issues/140734. + if sun_path_len < _SUN_PATH_MAX: + return base_tempdir + # Fallback to the default system-wide temporary directory. + # This ignores user-defined environment variables. + # + # On POSIX systems, /tmp MUST be writable by any application [1]. + # We however emit a warning if this is not the case to prevent + # obscure errors later in the execution. + # + # On some legacy systems, /var/tmp and /usr/tmp can be present + # and will be used instead. + # + # [1]: https://refspecs.linuxfoundation.org/FHS_3.0/fhs/ch03s18.html + dirlist = ['/tmp', '/var/tmp', '/usr/tmp'] + try: + base_system_tempdir = tempfile._get_default_tempdir(dirlist) + except FileNotFoundError: + warn("Process-wide temporary directory %s will not be usable for " + "creating socket files and no usable system-wide temporary " + "directory was found in %s", base_tempdir, dirlist) + # At this point, the system-wide temporary directory is not usable + # but we may assume that the user-defined one is, even if we will + # not be able to write socket files out there. + return base_tempdir + warn("Ignoring user-defined temporary directory: %s", base_tempdir) + # at most max(map(len, dirlist)) + 14 + 14 = 36 characters + assert len(base_system_tempdir) + 14 + 14 < _SUN_PATH_MAX + return base_system_tempdir + +def get_temp_dir(): + # get name of a temp directory which will be automatically cleaned up + tempdir = process.current_process()._config.get('tempdir') + if tempdir is None: + import shutil, tempfile + base_tempdir = _get_base_temp_dir(tempfile) + tempdir = tempfile.mkdtemp(prefix='pymp-', dir=base_tempdir) + info('created temp directory %s', tempdir) + # keep a strong reference to shutil.rmtree(), since the finalizer + # can be called late during Python shutdown + Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), + exitpriority=-100) + process.current_process()._config['tempdir'] = tempdir + return tempdir + +# +# Support for reinitialization of objects when bootstrapping a child process +# + +_afterfork_registry = weakref.WeakValueDictionary() +_afterfork_counter = itertools.count() + +def _run_after_forkers(): + items = list(_afterfork_registry.items()) + items.sort() + for (index, ident, func), obj in items: + try: + func(obj) + except Exception as e: + info('after forker raised exception %s', e) + +def register_after_fork(obj, func): + _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj + +# +# Finalization using weakrefs +# + +_finalizer_registry = {} +_finalizer_counter = itertools.count() + + +class Finalize(object): + ''' + Class which supports object finalization using weakrefs + ''' + def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): + if (exitpriority is not None) and not isinstance(exitpriority,int): + raise TypeError( + "Exitpriority ({0!r}) must be None or int, not {1!s}".format( + exitpriority, type(exitpriority))) + + if obj is not None: + self._weakref = weakref.ref(obj, self) + elif exitpriority is None: + raise ValueError("Without object, exitpriority cannot be None") + + self._callback = callback + self._args = args + self._kwargs = kwargs or {} + self._key = (exitpriority, next(_finalizer_counter)) + self._pid = os.getpid() + + _finalizer_registry[self._key] = self + + def __call__(self, wr=None, + # Need to bind these locally because the globals can have + # been cleared at shutdown + _finalizer_registry=_finalizer_registry, + sub_debug=sub_debug, getpid=os.getpid): + ''' + Run the callback unless it has already been called or cancelled + ''' + try: + del _finalizer_registry[self._key] + except KeyError: + sub_debug('finalizer no longer registered') + else: + if self._pid != getpid(): + sub_debug('finalizer ignored because different process') + res = None + else: + sub_debug('finalizer calling %s with args %s and kwargs %s', + self._callback, self._args, self._kwargs) + res = self._callback(*self._args, **self._kwargs) + self._weakref = self._callback = self._args = \ + self._kwargs = self._key = None + return res + + def cancel(self): + ''' + Cancel finalization of the object + ''' + try: + del _finalizer_registry[self._key] + except KeyError: + pass + else: + self._weakref = self._callback = self._args = \ + self._kwargs = self._key = None + + def still_active(self): + ''' + Return whether this finalizer is still waiting to invoke callback + ''' + return self._key in _finalizer_registry + + def __repr__(self): + try: + obj = self._weakref() + except (AttributeError, TypeError): + obj = None + + if obj is None: + return '<%s object, dead>' % self.__class__.__name__ + + x = '<%s object, callback=%s' % ( + self.__class__.__name__, + getattr(self._callback, '__name__', self._callback)) + if self._args: + x += ', args=' + str(self._args) + if self._kwargs: + x += ', kwargs=' + str(self._kwargs) + if self._key[0] is not None: + x += ', exitpriority=' + str(self._key[0]) + return x + '>' + + +def _run_finalizers(minpriority=None): + ''' + Run all finalizers whose exit priority is not None and at least minpriority + + Finalizers with highest priority are called first; finalizers with + the same priority will be called in reverse order of creation. + ''' + if _finalizer_registry is None: + # This function may be called after this module's globals are + # destroyed. See the _exit_function function in this module for more + # notes. + return + + if minpriority is None: + f = lambda p : p[0] is not None + else: + f = lambda p : p[0] is not None and p[0] >= minpriority + + # Careful: _finalizer_registry may be mutated while this function + # is running (either by a GC run or by another thread). + + # list(_finalizer_registry) should be atomic, while + # list(_finalizer_registry.items()) is not. + keys = [key for key in list(_finalizer_registry) if f(key)] + keys.sort(reverse=True) + + for key in keys: + finalizer = _finalizer_registry.get(key) + # key may have been removed from the registry + if finalizer is not None: + sub_debug('calling %s', finalizer) + try: + finalizer() + except Exception: + import traceback + traceback.print_exc() + + if minpriority is None: + _finalizer_registry.clear() + +# +# Clean up on exit +# + +def is_exiting(): + ''' + Returns true if the process is shutting down + ''' + return _exiting or _exiting is None + +_exiting = False + +def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, + active_children=process.active_children, + current_process=process.current_process): + # We hold on to references to functions in the arglist due to the + # situation described below, where this function is called after this + # module's globals are destroyed. + + global _exiting + + if not _exiting: + _exiting = True + + info('process shutting down') + debug('running all "atexit" finalizers with priority >= 0') + _run_finalizers(0) + + if current_process() is not None: + # We check if the current process is None here because if + # it's None, any call to ``active_children()`` will raise + # an AttributeError (active_children winds up trying to + # get attributes from util._current_process). One + # situation where this can happen is if someone has + # manipulated sys.modules, causing this module to be + # garbage collected. The destructor for the module type + # then replaces all values in the module dict with None. + # For instance, after setuptools runs a test it replaces + # sys.modules with a copy created earlier. See issues + # #9775 and #15881. Also related: #4106, #9205, and + # #9207. + + for p in active_children(): + if p.daemon: + info('calling terminate() for daemon %s', p.name) + p._popen.terminate() + + for p in active_children(): + info('calling join() for process %s', p.name) + p.join() + + debug('running the remaining "atexit" finalizers') + _run_finalizers() + +atexit.register(_exit_function) + +# +# Some fork aware types +# + +class ForkAwareThreadLock(object): + def __init__(self): + self._lock = threading.Lock() + self.acquire = self._lock.acquire + self.release = self._lock.release + register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) + + def _at_fork_reinit(self): + self._lock._at_fork_reinit() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + +class ForkAwareLocal(threading.local): + def __init__(self): + register_after_fork(self, lambda obj : obj.__dict__.clear()) + def __reduce__(self): + return type(self), () + +# +# Close fds except those specified +# + +try: + MAXFD = os.sysconf("SC_OPEN_MAX") +except Exception: + MAXFD = 256 + +def close_all_fds_except(fds): + fds = list(fds) + [-1, MAXFD] + fds.sort() + assert fds[-1] == MAXFD, 'fd too large' + for i in range(len(fds) - 1): + os.closerange(fds[i]+1, fds[i+1]) +# +# Close sys.stdin and replace stdin with os.devnull +# + +def _close_stdin(): + if sys.stdin is None: + return + + try: + sys.stdin.close() + except (OSError, ValueError): + pass + + try: + fd = os.open(os.devnull, os.O_RDONLY) + try: + sys.stdin = open(fd, encoding="utf-8", closefd=False) + except: + os.close(fd) + raise + except (OSError, ValueError): + pass + +# +# Flush standard streams, if any +# + +def _flush_std_streams(): + try: + sys.stdout.flush() + except (AttributeError, ValueError): + pass + try: + sys.stderr.flush() + except (AttributeError, ValueError): + pass + +# +# Start a program with only specified fds kept open +# + +def spawnv_passfds(path, args, passfds): + import _posixsubprocess + passfds = tuple(sorted(map(int, passfds))) + errpipe_read, errpipe_write = os.pipe() + try: + return _posixsubprocess.fork_exec( + args, [path], True, passfds, None, None, + -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, + False, False, -1, None, None, None, -1, None) + finally: + os.close(errpipe_read) + os.close(errpipe_write) + + +def close_fds(*fds): + """Close each file descriptor given as an argument""" + for fd in fds: + os.close(fd) + + +def _cleanup_tests(): + """Cleanup multiprocessing resources when multiprocessing tests + completed.""" + + from test import support + + # cleanup multiprocessing + process._cleanup() + + # Stop the ForkServer process if it's running + from multiprocessing import forkserver + forkserver._forkserver._stop() + + # Stop the ResourceTracker process if it's running + from multiprocessing import resource_tracker + resource_tracker._resource_tracker._stop() + + # bpo-37421: Explicitly call _run_finalizers() to remove immediately + # temporary directories created by multiprocessing.util.get_temp_dir(). + _run_finalizers() + support.gc_collect() + + support.reap_children() diff --git a/Python313_13_x64_Template/Lib/netrc.py b/Python314_4_x64_Template/Lib/netrc.py similarity index 100% rename from Python313_13_x64_Template/Lib/netrc.py rename to Python314_4_x64_Template/Lib/netrc.py diff --git a/Python313_13_x64_Template/Lib/ntpath.py b/Python314_4_x64_Template/Lib/ntpath.py similarity index 100% rename from Python313_13_x64_Template/Lib/ntpath.py rename to Python314_4_x64_Template/Lib/ntpath.py diff --git a/Python314_4_x64_Template/Lib/nturl2path.py b/Python314_4_x64_Template/Lib/nturl2path.py new file mode 100644 index 00000000..57c7858d --- /dev/null +++ b/Python314_4_x64_Template/Lib/nturl2path.py @@ -0,0 +1,74 @@ +"""Convert a NT pathname to a file URL and vice versa. + +This module only exists to provide OS-specific code +for urllib.requests, thus do not use directly. +""" +# Testing is done through test_nturl2path. + +import warnings + + +warnings._deprecated( + __name__, + message=f"{warnings._DEPRECATED_MSG}; use 'urllib.request' instead", + remove=(3, 19)) + +def url2pathname(url): + """OS-specific conversion from a relative URL of the 'file' scheme + to a file system path; not recommended for general use.""" + # e.g. + # ///C|/foo/bar/spam.foo + # and + # ///C:/foo/bar/spam.foo + # become + # C:\foo\bar\spam.foo + import urllib.parse + if url[:3] == '///': + # URL has an empty authority section, so the path begins on the third + # character. + url = url[2:] + elif url[:12] == '//localhost/': + # Skip past 'localhost' authority. + url = url[11:] + if url[:3] == '///': + # Skip past extra slash before UNC drive in URL path. + url = url[1:] + else: + if url[:1] == '/' and url[2:3] in (':', '|'): + # Skip past extra slash before DOS drive in URL path. + url = url[1:] + if url[1:2] == '|': + # Older URLs use a pipe after a drive letter + url = url[:1] + ':' + url[2:] + return urllib.parse.unquote(url.replace('/', '\\')) + +def pathname2url(p): + """OS-specific conversion from a file system path to a relative URL + of the 'file' scheme; not recommended for general use.""" + # e.g. + # C:\foo\bar\spam.foo + # becomes + # ///C:/foo/bar/spam.foo + import ntpath + import urllib.parse + # First, clean up some special forms. We are going to sacrifice + # the additional information anyway + p = p.replace('\\', '/') + if p[:4] == '//?/': + p = p[4:] + if p[:4].upper() == 'UNC/': + p = '//' + p[4:] + drive, root, tail = ntpath.splitroot(p) + if drive: + if drive[1:] == ':': + # DOS drive specified. Add three slashes to the start, producing + # an authority section with a zero-length authority, and a path + # section starting with a single slash. + drive = f'///{drive}' + drive = urllib.parse.quote(drive, safe='/:') + elif root: + # Add explicitly empty authority to path beginning with one slash. + root = f'//{root}' + + tail = urllib.parse.quote(tail) + return drive + root + tail diff --git a/Python313_13_x64_Template/Lib/numbers.py b/Python314_4_x64_Template/Lib/numbers.py similarity index 100% rename from Python313_13_x64_Template/Lib/numbers.py rename to Python314_4_x64_Template/Lib/numbers.py diff --git a/Python314_4_x64_Template/Lib/opcode.py b/Python314_4_x64_Template/Lib/opcode.py new file mode 100644 index 00000000..0e9520b6 --- /dev/null +++ b/Python314_4_x64_Template/Lib/opcode.py @@ -0,0 +1,122 @@ + +""" +opcode module - potentially shared between dis and other modules which +operate on bytecodes (e.g. peephole optimizers). +""" + + +__all__ = ["cmp_op", "stack_effect", "hascompare", "opname", "opmap", + "HAVE_ARGUMENT", "EXTENDED_ARG", "hasarg", "hasconst", "hasname", + "hasjump", "hasjrel", "hasjabs", "hasfree", "haslocal", "hasexc"] + +import builtins +import _opcode +from _opcode import stack_effect + +from _opcode_metadata import (_specializations, _specialized_opmap, opmap, # noqa: F401 + HAVE_ARGUMENT, MIN_INSTRUMENTED_OPCODE) # noqa: F401 +EXTENDED_ARG = opmap['EXTENDED_ARG'] + +opname = ['<%r>' % (op,) for op in range(max(opmap.values()) + 1)] +for m in (opmap, _specialized_opmap): + for op, i in m.items(): + opname[i] = op + +cmp_op = ('<', '<=', '==', '!=', '>', '>=') + +# These lists are documented as part of the dis module's API +hasarg = [op for op in opmap.values() if _opcode.has_arg(op)] +hasconst = [op for op in opmap.values() if _opcode.has_const(op)] +hasname = [op for op in opmap.values() if _opcode.has_name(op)] +hasjump = [op for op in opmap.values() if _opcode.has_jump(op)] +hasjrel = hasjump # for backward compatibility +hasjabs = [] +hasfree = [op for op in opmap.values() if _opcode.has_free(op)] +haslocal = [op for op in opmap.values() if _opcode.has_local(op)] +hasexc = [op for op in opmap.values() if _opcode.has_exc(op)] + + +_intrinsic_1_descs = _opcode.get_intrinsic1_descs() +_intrinsic_2_descs = _opcode.get_intrinsic2_descs() +_special_method_names = _opcode.get_special_method_names() +_common_constants = [builtins.AssertionError, builtins.NotImplementedError, + builtins.tuple, builtins.all, builtins.any] +_nb_ops = _opcode.get_nb_ops() + +hascompare = [opmap["COMPARE_OP"]] + +_cache_format = { + "LOAD_GLOBAL": { + "counter": 1, + "index": 1, + "module_keys_version": 1, + "builtin_keys_version": 1, + }, + "BINARY_OP": { + "counter": 1, + "descr": 4, + }, + "UNPACK_SEQUENCE": { + "counter": 1, + }, + "COMPARE_OP": { + "counter": 1, + }, + "CONTAINS_OP": { + "counter": 1, + }, + "FOR_ITER": { + "counter": 1, + }, + "LOAD_SUPER_ATTR": { + "counter": 1, + }, + "LOAD_ATTR": { + "counter": 1, + "version": 2, + "keys_version": 2, + "descr": 4, + }, + "STORE_ATTR": { + "counter": 1, + "version": 2, + "index": 1, + }, + "CALL": { + "counter": 1, + "func_version": 2, + }, + "CALL_KW": { + "counter": 1, + "func_version": 2, + }, + "STORE_SUBSCR": { + "counter": 1, + }, + "SEND": { + "counter": 1, + }, + "JUMP_BACKWARD": { + "counter": 1, + }, + "TO_BOOL": { + "counter": 1, + "version": 2, + }, + "POP_JUMP_IF_TRUE": { + "counter": 1, + }, + "POP_JUMP_IF_FALSE": { + "counter": 1, + }, + "POP_JUMP_IF_NONE": { + "counter": 1, + }, + "POP_JUMP_IF_NOT_NONE": { + "counter": 1, + }, +} + +_inline_cache_entries = { + name : sum(value.values()) for (name, value) in _cache_format.items() +} diff --git a/Python314_4_x64_Template/Lib/operator.py b/Python314_4_x64_Template/Lib/operator.py new file mode 100644 index 00000000..1b765522 --- /dev/null +++ b/Python314_4_x64_Template/Lib/operator.py @@ -0,0 +1,475 @@ +""" +Operator Interface + +This module exports a set of functions corresponding to the intrinsic +operators of Python. For example, operator.add(x, y) is equivalent +to the expression x+y. The function names are those used for special +methods; variants without leading and trailing '__' are also provided +for convenience. + +This is the pure Python implementation of the module. +""" + +__all__ = ['abs', 'add', 'and_', 'attrgetter', 'call', 'concat', 'contains', 'countOf', + 'delitem', 'eq', 'floordiv', 'ge', 'getitem', 'gt', 'iadd', 'iand', + 'iconcat', 'ifloordiv', 'ilshift', 'imatmul', 'imod', 'imul', + 'index', 'indexOf', 'inv', 'invert', 'ior', 'ipow', 'irshift', + 'is_', 'is_none', 'is_not', 'is_not_none', 'isub', 'itemgetter', 'itruediv', + 'ixor', 'le', 'length_hint', 'lshift', 'lt', 'matmul', 'methodcaller', 'mod', + 'mul', 'ne', 'neg', 'not_', 'or_', 'pos', 'pow', 'rshift', + 'setitem', 'sub', 'truediv', 'truth', 'xor'] + +from builtins import abs as _abs + + +# Comparison Operations *******************************************************# + +def lt(a, b): + "Same as a < b." + return a < b + +def le(a, b): + "Same as a <= b." + return a <= b + +def eq(a, b): + "Same as a == b." + return a == b + +def ne(a, b): + "Same as a != b." + return a != b + +def ge(a, b): + "Same as a >= b." + return a >= b + +def gt(a, b): + "Same as a > b." + return a > b + +# Logical Operations **********************************************************# + +def not_(a): + "Same as not a." + return not a + +def truth(a): + "Return True if a is true, False otherwise." + return True if a else False + +def is_(a, b): + "Same as a is b." + return a is b + +def is_not(a, b): + "Same as a is not b." + return a is not b + +def is_none(a): + "Same as a is None." + return a is None + +def is_not_none(a): + "Same as a is not None." + return a is not None + +# Mathematical/Bitwise Operations *********************************************# + +def abs(a): + "Same as abs(a)." + return _abs(a) + +def add(a, b): + "Same as a + b." + return a + b + +def and_(a, b): + "Same as a & b." + return a & b + +def floordiv(a, b): + "Same as a // b." + return a // b + +def index(a): + "Same as a.__index__()." + return a.__index__() + +def inv(a): + "Same as ~a." + return ~a +invert = inv + +def lshift(a, b): + "Same as a << b." + return a << b + +def mod(a, b): + "Same as a % b." + return a % b + +def mul(a, b): + "Same as a * b." + return a * b + +def matmul(a, b): + "Same as a @ b." + return a @ b + +def neg(a): + "Same as -a." + return -a + +def or_(a, b): + "Same as a | b." + return a | b + +def pos(a): + "Same as +a." + return +a + +def pow(a, b): + "Same as a ** b." + return a ** b + +def rshift(a, b): + "Same as a >> b." + return a >> b + +def sub(a, b): + "Same as a - b." + return a - b + +def truediv(a, b): + "Same as a / b." + return a / b + +def xor(a, b): + "Same as a ^ b." + return a ^ b + +# Sequence Operations *********************************************************# + +def concat(a, b): + "Same as a + b, for a and b sequences." + if not hasattr(a, '__getitem__'): + msg = "'%s' object can't be concatenated" % type(a).__name__ + raise TypeError(msg) + return a + b + +def contains(a, b): + "Same as b in a (note reversed operands)." + return b in a + +def countOf(a, b): + "Return the number of items in a which are, or which equal, b." + count = 0 + for i in a: + if i is b or i == b: + count += 1 + return count + +def delitem(a, b): + "Same as del a[b]." + del a[b] + +def getitem(a, b): + "Same as a[b]." + return a[b] + +def indexOf(a, b): + "Return the first index of b in a." + for i, j in enumerate(a): + if j is b or j == b: + return i + else: + raise ValueError('sequence.index(x): x not in sequence') + +def setitem(a, b, c): + "Same as a[b] = c." + a[b] = c + +def length_hint(obj, default=0): + """ + Return an estimate of the number of items in obj. + This is useful for presizing containers when building from an iterable. + + If the object supports len(), the result will be exact. Otherwise, it may + over- or under-estimate by an arbitrary amount. The result will be an + integer >= 0. + """ + if not isinstance(default, int): + msg = ("'%s' object cannot be interpreted as an integer" % + type(default).__name__) + raise TypeError(msg) + + try: + return len(obj) + except TypeError: + pass + + try: + hint = type(obj).__length_hint__ + except AttributeError: + return default + + try: + val = hint(obj) + except TypeError: + return default + if val is NotImplemented: + return default + if not isinstance(val, int): + msg = ('__length_hint__ must be integer, not %s' % + type(val).__name__) + raise TypeError(msg) + if val < 0: + msg = '__length_hint__() should return >= 0' + raise ValueError(msg) + return val + +# Other Operations ************************************************************# + +def call(obj, /, *args, **kwargs): + """Same as obj(*args, **kwargs).""" + return obj(*args, **kwargs) + +# Generalized Lookup Objects **************************************************# + +class attrgetter: + """ + Return a callable object that fetches the given attribute(s) from its operand. + After f = attrgetter('name'), the call f(r) returns r.name. + After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date). + After h = attrgetter('name.first', 'name.last'), the call h(r) returns + (r.name.first, r.name.last). + """ + __slots__ = ('_attrs', '_call') + + def __init__(self, attr, /, *attrs): + if not attrs: + if not isinstance(attr, str): + raise TypeError('attribute name must be a string') + self._attrs = (attr,) + names = attr.split('.') + def func(obj): + for name in names: + obj = getattr(obj, name) + return obj + self._call = func + else: + self._attrs = (attr,) + attrs + getters = tuple(map(attrgetter, self._attrs)) + def func(obj): + return tuple(getter(obj) for getter in getters) + self._call = func + + def __call__(self, obj, /): + return self._call(obj) + + def __repr__(self): + return '%s.%s(%s)' % (self.__class__.__module__, + self.__class__.__qualname__, + ', '.join(map(repr, self._attrs))) + + def __reduce__(self): + return self.__class__, self._attrs + +class itemgetter: + """ + Return a callable object that fetches the given item(s) from its operand. + After f = itemgetter(2), the call f(r) returns r[2]. + After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]) + """ + __slots__ = ('_items', '_call') + + def __init__(self, item, /, *items): + if not items: + self._items = (item,) + def func(obj): + return obj[item] + self._call = func + else: + self._items = items = (item,) + items + def func(obj): + return tuple(obj[i] for i in items) + self._call = func + + def __call__(self, obj, /): + return self._call(obj) + + def __repr__(self): + return '%s.%s(%s)' % (self.__class__.__module__, + self.__class__.__name__, + ', '.join(map(repr, self._items))) + + def __reduce__(self): + return self.__class__, self._items + +class methodcaller: + """ + Return a callable object that calls the given method on its operand. + After f = methodcaller('name'), the call f(r) returns r.name(). + After g = methodcaller('name', 'date', foo=1), the call g(r) returns + r.name('date', foo=1). + """ + __slots__ = ('_name', '_args', '_kwargs') + + def __init__(self, name, /, *args, **kwargs): + self._name = name + if not isinstance(self._name, str): + raise TypeError('method name must be a string') + self._args = args + self._kwargs = kwargs + + def __call__(self, obj, /): + return getattr(obj, self._name)(*self._args, **self._kwargs) + + def __repr__(self): + args = [repr(self._name)] + args.extend(map(repr, self._args)) + args.extend('%s=%r' % (k, v) for k, v in self._kwargs.items()) + return '%s.%s(%s)' % (self.__class__.__module__, + self.__class__.__name__, + ', '.join(args)) + + def __reduce__(self): + if not self._kwargs: + return self.__class__, (self._name,) + self._args + else: + from functools import partial + return partial(self.__class__, self._name, **self._kwargs), self._args + + +# In-place Operations *********************************************************# + +def iadd(a, b): + "Same as a += b." + a += b + return a + +def iand(a, b): + "Same as a &= b." + a &= b + return a + +def iconcat(a, b): + "Same as a += b, for a and b sequences." + if not hasattr(a, '__getitem__'): + msg = "'%s' object can't be concatenated" % type(a).__name__ + raise TypeError(msg) + a += b + return a + +def ifloordiv(a, b): + "Same as a //= b." + a //= b + return a + +def ilshift(a, b): + "Same as a <<= b." + a <<= b + return a + +def imod(a, b): + "Same as a %= b." + a %= b + return a + +def imul(a, b): + "Same as a *= b." + a *= b + return a + +def imatmul(a, b): + "Same as a @= b." + a @= b + return a + +def ior(a, b): + "Same as a |= b." + a |= b + return a + +def ipow(a, b): + "Same as a **= b." + a **=b + return a + +def irshift(a, b): + "Same as a >>= b." + a >>= b + return a + +def isub(a, b): + "Same as a -= b." + a -= b + return a + +def itruediv(a, b): + "Same as a /= b." + a /= b + return a + +def ixor(a, b): + "Same as a ^= b." + a ^= b + return a + + +try: + from _operator import * +except ImportError: + pass +else: + from _operator import __doc__ # noqa: F401 + +# All of these "__func__ = func" assignments have to happen after importing +# from _operator to make sure they're set to the right function +__lt__ = lt +__le__ = le +__eq__ = eq +__ne__ = ne +__ge__ = ge +__gt__ = gt +__not__ = not_ +__abs__ = abs +__add__ = add +__and__ = and_ +__call__ = call +__floordiv__ = floordiv +__index__ = index +__inv__ = inv +__invert__ = invert +__lshift__ = lshift +__mod__ = mod +__mul__ = mul +__matmul__ = matmul +__neg__ = neg +__or__ = or_ +__pos__ = pos +__pow__ = pow +__rshift__ = rshift +__sub__ = sub +__truediv__ = truediv +__xor__ = xor +__concat__ = concat +__contains__ = contains +__delitem__ = delitem +__getitem__ = getitem +__setitem__ = setitem +__iadd__ = iadd +__iand__ = iand +__iconcat__ = iconcat +__ifloordiv__ = ifloordiv +__ilshift__ = ilshift +__imod__ = imod +__imul__ = imul +__imatmul__ = imatmul +__ior__ = ior +__ipow__ = ipow +__irshift__ = irshift +__isub__ = isub +__itruediv__ = itruediv +__ixor__ = ixor diff --git a/Python314_4_x64_Template/Lib/optparse.py b/Python314_4_x64_Template/Lib/optparse.py new file mode 100644 index 00000000..38cf16d2 --- /dev/null +++ b/Python314_4_x64_Template/Lib/optparse.py @@ -0,0 +1,1671 @@ +"""A powerful, extensible, and easy-to-use option parser. + +By Greg Ward + +Originally distributed as Optik. + +For support, use the optik-users@lists.sourceforge.net mailing list +(http://lists.sourceforge.net/lists/listinfo/optik-users). + +Simple usage example: + + from optparse import OptionParser + + parser = OptionParser() + parser.add_option("-f", "--file", dest="filename", + help="write report to FILE", metavar="FILE") + parser.add_option("-q", "--quiet", + action="store_false", dest="verbose", default=True, + help="don't print status messages to stdout") + + (options, args) = parser.parse_args() +""" + +__version__ = "1.5.3" + +__all__ = ['Option', + 'make_option', + 'SUPPRESS_HELP', + 'SUPPRESS_USAGE', + 'Values', + 'OptionContainer', + 'OptionGroup', + 'OptionParser', + 'HelpFormatter', + 'IndentedHelpFormatter', + 'TitledHelpFormatter', + 'OptParseError', + 'OptionError', + 'OptionConflictError', + 'OptionValueError', + 'BadOptionError', + 'check_choice'] + +__copyright__ = """ +Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved. +Copyright (c) 2002 Python Software Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import sys, os +from gettext import gettext as _, ngettext + + +def _repr(self): + return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self) + + +# This file was generated from: +# Id: option_parser.py 527 2006-07-23 15:21:30Z greg +# Id: option.py 522 2006-06-11 16:22:03Z gward +# Id: help.py 527 2006-07-23 15:21:30Z greg +# Id: errors.py 509 2006-04-20 00:58:24Z gward + + +class OptParseError (Exception): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + + +class OptionError (OptParseError): + """ + Raised if an Option instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + +class OptionConflictError (OptionError): + """ + Raised if conflicting options are added to an OptionParser. + """ + +class OptionValueError (OptParseError): + """ + Raised if an invalid option value is encountered on the command + line. + """ + +class BadOptionError (OptParseError): + """ + Raised if an invalid option is seen on the command line. + """ + def __init__(self, opt_str): + self.opt_str = opt_str + + def __str__(self): + return _("no such option: %s") % self.opt_str + +class AmbiguousOptionError (BadOptionError): + """ + Raised if an ambiguous option is seen on the command line. + """ + def __init__(self, opt_str, possibilities): + BadOptionError.__init__(self, opt_str) + self.possibilities = possibilities + + def __str__(self): + return (_("ambiguous option: %s (%s?)") + % (self.opt_str, ", ".join(self.possibilities))) + + +class HelpFormatter: + + """ + Abstract base class for formatting option help. OptionParser + instances should use one of the HelpFormatter subclasses for + formatting help; by default IndentedHelpFormatter is used. + + Instance attributes: + parser : OptionParser + the controlling OptionParser instance + indent_increment : int + the number of columns to indent per nesting level + max_help_position : int + the maximum starting column for option help text + help_position : int + the calculated starting column for option help text; + initially the same as the maximum + width : int + total number of columns for output (pass None to constructor for + this value to be taken from the $COLUMNS environment variable) + level : int + current indentation level + current_indent : int + current indentation level (in columns) + help_width : int + number of columns available for option help text (calculated) + default_tag : str + text to replace with each option's default value, "%default" + by default. Set to false value to disable default value expansion. + option_strings : { Option : str } + maps Option instances to the snippet of help text explaining + the syntax of that option, e.g. "-h, --help" or + "-fFILE, --file=FILE" + _short_opt_fmt : str + format string controlling how short options with values are + printed in help text. Must be either "%s%s" ("-fFILE") or + "%s %s" ("-f FILE"), because those are the two syntaxes that + Optik supports. + _long_opt_fmt : str + similar but for long options; must be either "%s %s" ("--file FILE") + or "%s=%s" ("--file=FILE"). + """ + + NO_DEFAULT_VALUE = "none" + + def __init__(self, + indent_increment, + max_help_position, + width, + short_first): + self.parser = None + self.indent_increment = indent_increment + if width is None: + try: + width = int(os.environ['COLUMNS']) + except (KeyError, ValueError): + width = 80 + width -= 2 + self.width = width + self.help_position = self.max_help_position = \ + min(max_help_position, max(width - 20, indent_increment * 2)) + self.current_indent = 0 + self.level = 0 + self.help_width = None # computed later + self.short_first = short_first + self.default_tag = "%default" + self.option_strings = {} + self._short_opt_fmt = "%s %s" + self._long_opt_fmt = "%s=%s" + + def set_parser(self, parser): + self.parser = parser + + def set_short_opt_delimiter(self, delim): + if delim not in ("", " "): + raise ValueError( + "invalid metavar delimiter for short options: %r" % delim) + self._short_opt_fmt = "%s" + delim + "%s" + + def set_long_opt_delimiter(self, delim): + if delim not in ("=", " "): + raise ValueError( + "invalid metavar delimiter for long options: %r" % delim) + self._long_opt_fmt = "%s" + delim + "%s" + + def indent(self): + self.current_indent += self.indent_increment + self.level += 1 + + def dedent(self): + self.current_indent -= self.indent_increment + assert self.current_indent >= 0, "Indent decreased below 0." + self.level -= 1 + + def format_usage(self, usage): + raise NotImplementedError("subclasses must implement") + + def format_heading(self, heading): + raise NotImplementedError("subclasses must implement") + + def _format_text(self, text): + """ + Format a paragraph of free-form text for inclusion in the + help output at the current indentation level. + """ + import textwrap + text_width = max(self.width - self.current_indent, 11) + indent = " "*self.current_indent + return textwrap.fill(text, + text_width, + initial_indent=indent, + subsequent_indent=indent) + + def format_description(self, description): + if description: + return self._format_text(description) + "\n" + else: + return "" + + def format_epilog(self, epilog): + if epilog: + return "\n" + self._format_text(epilog) + "\n" + else: + return "" + + + def expand_default(self, option): + if self.parser is None or not self.default_tag: + return option.help + + default_value = self.parser.defaults.get(option.dest) + if default_value is NO_DEFAULT or default_value is None: + default_value = self.NO_DEFAULT_VALUE + + return option.help.replace(self.default_tag, str(default_value)) + + def format_option(self, option): + # The help for each option consists of two parts: + # * the opt strings and metavars + # eg. ("-x", or "-fFILENAME, --file=FILENAME") + # * the user-supplied help string + # eg. ("turn on expert mode", "read data from FILENAME") + # + # If possible, we write both of these on the same line: + # -x turn on expert mode + # + # But if the opt string list is too long, we put the help + # string on a second line, indented to the same column it would + # start in if it fit on the first line. + # -fFILENAME, --file=FILENAME + # read data from FILENAME + result = [] + opts = self.option_strings[option] + opt_width = self.help_position - self.current_indent - 2 + if len(opts) > opt_width: + opts = "%*s%s\n" % (self.current_indent, "", opts) + indent_first = self.help_position + else: # start help on same line as opts + opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts) + indent_first = 0 + result.append(opts) + if option.help: + import textwrap + help_text = self.expand_default(option) + help_lines = textwrap.wrap(help_text, self.help_width) + result.append("%*s%s\n" % (indent_first, "", help_lines[0])) + result.extend(["%*s%s\n" % (self.help_position, "", line) + for line in help_lines[1:]]) + elif opts[-1] != "\n": + result.append("\n") + return "".join(result) + + def store_option_strings(self, parser): + self.indent() + max_len = 0 + for opt in parser.option_list: + strings = self.format_option_strings(opt) + self.option_strings[opt] = strings + max_len = max(max_len, len(strings) + self.current_indent) + self.indent() + for group in parser.option_groups: + for opt in group.option_list: + strings = self.format_option_strings(opt) + self.option_strings[opt] = strings + max_len = max(max_len, len(strings) + self.current_indent) + self.dedent() + self.dedent() + self.help_position = min(max_len + 2, self.max_help_position) + self.help_width = max(self.width - self.help_position, 11) + + def format_option_strings(self, option): + """Return a comma-separated list of option strings & metavariables.""" + if option.takes_value(): + metavar = option.metavar or option.dest.upper() + short_opts = [self._short_opt_fmt % (sopt, metavar) + for sopt in option._short_opts] + long_opts = [self._long_opt_fmt % (lopt, metavar) + for lopt in option._long_opts] + else: + short_opts = option._short_opts + long_opts = option._long_opts + + if self.short_first: + opts = short_opts + long_opts + else: + opts = long_opts + short_opts + + return ", ".join(opts) + +class IndentedHelpFormatter (HelpFormatter): + """Format help with indented section bodies. + """ + + def __init__(self, + indent_increment=2, + max_help_position=24, + width=None, + short_first=1): + HelpFormatter.__init__( + self, indent_increment, max_help_position, width, short_first) + + def format_usage(self, usage): + return _("Usage: %s\n") % usage + + def format_heading(self, heading): + return "%*s%s:\n" % (self.current_indent, "", heading) + + +class TitledHelpFormatter (HelpFormatter): + """Format help with underlined section headers. + """ + + def __init__(self, + indent_increment=0, + max_help_position=24, + width=None, + short_first=0): + HelpFormatter.__init__ ( + self, indent_increment, max_help_position, width, short_first) + + def format_usage(self, usage): + return "%s %s\n" % (self.format_heading(_("Usage")), usage) + + def format_heading(self, heading): + return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading)) + + +def _parse_num(val, type): + if val[:2].lower() == "0x": # hexadecimal + radix = 16 + elif val[:2].lower() == "0b": # binary + radix = 2 + val = val[2:] or "0" # have to remove "0b" prefix + elif val[:1] == "0": # octal + radix = 8 + else: # decimal + radix = 10 + + return type(val, radix) + +def _parse_int(val): + return _parse_num(val, int) + +_builtin_cvt = { "int" : (_parse_int, _("integer")), + "long" : (_parse_int, _("integer")), + "float" : (float, _("floating-point")), + "complex" : (complex, _("complex")) } + +def check_builtin(option, opt, value): + (cvt, what) = _builtin_cvt[option.type] + try: + return cvt(value) + except ValueError: + raise OptionValueError( + _("option %s: invalid %s value: %r") % (opt, what, value)) + +def check_choice(option, opt, value): + if value in option.choices: + return value + else: + choices = ", ".join(map(repr, option.choices)) + raise OptionValueError( + _("option %s: invalid choice: %r (choose from %s)") + % (opt, value, choices)) + +# Not supplying a default is different from a default of None, +# so we need an explicit "not supplied" value. +NO_DEFAULT = ("NO", "DEFAULT") + + +class Option: + """ + Instance attributes: + _short_opts : [string] + _long_opts : [string] + + action : string + type : string + dest : string + default : any + nargs : int + const : any + choices : [string] + callback : function + callback_args : (any*) + callback_kwargs : { string : any } + help : string + metavar : string + """ + + # The list of instance attributes that may be set through + # keyword args to the constructor. + ATTRS = ['action', + 'type', + 'dest', + 'default', + 'nargs', + 'const', + 'choices', + 'callback', + 'callback_args', + 'callback_kwargs', + 'help', + 'metavar'] + + # The set of actions allowed by option parsers. Explicitly listed + # here so the constructor can validate its arguments. + ACTIONS = ("store", + "store_const", + "store_true", + "store_false", + "append", + "append_const", + "count", + "callback", + "help", + "version") + + # The set of actions that involve storing a value somewhere; + # also listed just for constructor argument validation. (If + # the action is one of these, there must be a destination.) + STORE_ACTIONS = ("store", + "store_const", + "store_true", + "store_false", + "append", + "append_const", + "count") + + # The set of actions for which it makes sense to supply a value + # type, ie. which may consume an argument from the command line. + TYPED_ACTIONS = ("store", + "append", + "callback") + + # The set of actions which *require* a value type, ie. that + # always consume an argument from the command line. + ALWAYS_TYPED_ACTIONS = ("store", + "append") + + # The set of actions which take a 'const' attribute. + CONST_ACTIONS = ("store_const", + "append_const") + + # The set of known types for option parsers. Again, listed here for + # constructor argument validation. + TYPES = ("string", "int", "long", "float", "complex", "choice") + + # Dictionary of argument checking functions, which convert and + # validate option arguments according to the option type. + # + # Signature of checking functions is: + # check(option : Option, opt : string, value : string) -> any + # where + # option is the Option instance calling the checker + # opt is the actual option seen on the command-line + # (eg. "-a", "--file") + # value is the option argument seen on the command-line + # + # The return value should be in the appropriate Python type + # for option.type -- eg. an integer if option.type == "int". + # + # If no checker is defined for a type, arguments will be + # unchecked and remain strings. + TYPE_CHECKER = { "int" : check_builtin, + "long" : check_builtin, + "float" : check_builtin, + "complex": check_builtin, + "choice" : check_choice, + } + + + # CHECK_METHODS is a list of unbound method objects; they are called + # by the constructor, in order, after all attributes are + # initialized. The list is created and filled in later, after all + # the methods are actually defined. (I just put it here because I + # like to define and document all class attributes in the same + # place.) Subclasses that add another _check_*() method should + # define their own CHECK_METHODS list that adds their check method + # to those from this class. + CHECK_METHODS = None + + + # -- Constructor/initialization methods ---------------------------- + + def __init__(self, *opts, **attrs): + # Set _short_opts, _long_opts attrs from 'opts' tuple. + # Have to be set now, in case no option strings are supplied. + self._short_opts = [] + self._long_opts = [] + opts = self._check_opt_strings(opts) + self._set_opt_strings(opts) + + # Set all other attrs (action, type, etc.) from 'attrs' dict + self._set_attrs(attrs) + + # Check all the attributes we just set. There are lots of + # complicated interdependencies, but luckily they can be farmed + # out to the _check_*() methods listed in CHECK_METHODS -- which + # could be handy for subclasses! The one thing these all share + # is that they raise OptionError if they discover a problem. + for checker in self.CHECK_METHODS: + checker(self) + + def _check_opt_strings(self, opts): + # Filter out None because early versions of Optik had exactly + # one short option and one long option, either of which + # could be None. + opts = [opt for opt in opts if opt] + if not opts: + raise TypeError("at least one option string must be supplied") + return opts + + def _set_opt_strings(self, opts): + for opt in opts: + if len(opt) < 2: + raise OptionError( + "invalid option string %r: " + "must be at least two characters long" % opt, self) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise OptionError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise OptionError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self) + self._long_opts.append(opt) + + def _set_attrs(self, attrs): + for attr in self.ATTRS: + if attr in attrs: + setattr(self, attr, attrs[attr]) + del attrs[attr] + else: + if attr == 'default': + setattr(self, attr, NO_DEFAULT) + else: + setattr(self, attr, None) + if attrs: + attrs = sorted(attrs.keys()) + raise OptionError( + "invalid keyword arguments: %s" % ", ".join(attrs), + self) + + + # -- Constructor validation methods -------------------------------- + + def _check_action(self): + if self.action is None: + self.action = "store" + elif self.action not in self.ACTIONS: + raise OptionError("invalid action: %r" % self.action, self) + + def _check_type(self): + if self.type is None: + if self.action in self.ALWAYS_TYPED_ACTIONS: + if self.choices is not None: + # The "choices" attribute implies "choice" type. + self.type = "choice" + else: + # No type given? "string" is the most sensible default. + self.type = "string" + else: + # Allow type objects or builtin type conversion functions + # (int, str, etc.) as an alternative to their names. + if isinstance(self.type, type): + self.type = self.type.__name__ + + if self.type == "str": + self.type = "string" + + if self.type not in self.TYPES: + raise OptionError("invalid option type: %r" % self.type, self) + if self.action not in self.TYPED_ACTIONS: + raise OptionError( + "must not supply a type for action %r" % self.action, self) + + def _check_choice(self): + if self.type == "choice": + if self.choices is None: + raise OptionError( + "must supply a list of choices for type 'choice'", self) + elif not isinstance(self.choices, (tuple, list)): + raise OptionError( + "choices must be a list of strings ('%s' supplied)" + % str(type(self.choices)).split("'")[1], self) + elif self.choices is not None: + raise OptionError( + "must not supply choices for type %r" % self.type, self) + + def _check_dest(self): + # No destination given, and we need one for this action. The + # self.type check is for callbacks that take a value. + takes_value = (self.action in self.STORE_ACTIONS or + self.type is not None) + if self.dest is None and takes_value: + + # Glean a destination from the first long option string, + # or from the first short option string if no long options. + if self._long_opts: + # eg. "--foo-bar" -> "foo_bar" + self.dest = self._long_opts[0][2:].replace('-', '_') + else: + self.dest = self._short_opts[0][1] + + def _check_const(self): + if self.action not in self.CONST_ACTIONS and self.const is not None: + raise OptionError( + "'const' must not be supplied for action %r" % self.action, + self) + + def _check_nargs(self): + if self.action in self.TYPED_ACTIONS: + if self.nargs is None: + self.nargs = 1 + elif self.nargs is not None: + raise OptionError( + "'nargs' must not be supplied for action %r" % self.action, + self) + + def _check_callback(self): + if self.action == "callback": + if not callable(self.callback): + raise OptionError( + "callback not callable: %r" % self.callback, self) + if (self.callback_args is not None and + not isinstance(self.callback_args, tuple)): + raise OptionError( + "callback_args, if supplied, must be a tuple: not %r" + % self.callback_args, self) + if (self.callback_kwargs is not None and + not isinstance(self.callback_kwargs, dict)): + raise OptionError( + "callback_kwargs, if supplied, must be a dict: not %r" + % self.callback_kwargs, self) + else: + if self.callback is not None: + raise OptionError( + "callback supplied (%r) for non-callback option" + % self.callback, self) + if self.callback_args is not None: + raise OptionError( + "callback_args supplied for non-callback option", self) + if self.callback_kwargs is not None: + raise OptionError( + "callback_kwargs supplied for non-callback option", self) + + + CHECK_METHODS = [_check_action, + _check_type, + _check_choice, + _check_dest, + _check_const, + _check_nargs, + _check_callback] + + + # -- Miscellaneous methods ----------------------------------------- + + def __str__(self): + return "/".join(self._short_opts + self._long_opts) + + __repr__ = _repr + + def takes_value(self): + return self.type is not None + + def get_opt_string(self): + if self._long_opts: + return self._long_opts[0] + else: + return self._short_opts[0] + + + # -- Processing methods -------------------------------------------- + + def check_value(self, opt, value): + checker = self.TYPE_CHECKER.get(self.type) + if checker is None: + return value + else: + return checker(self, opt, value) + + def convert_value(self, opt, value): + if value is not None: + if self.nargs == 1: + return self.check_value(opt, value) + else: + return tuple([self.check_value(opt, v) for v in value]) + + def process(self, opt, value, values, parser): + + # First, convert the value(s) to the right type. Howl if any + # value(s) are bogus. + value = self.convert_value(opt, value) + + # And then take whatever action is expected of us. + # This is a separate method to make life easier for + # subclasses to add new actions. + return self.take_action( + self.action, self.dest, opt, value, values, parser) + + def take_action(self, action, dest, opt, value, values, parser): + if action == "store": + setattr(values, dest, value) + elif action == "store_const": + setattr(values, dest, self.const) + elif action == "store_true": + setattr(values, dest, True) + elif action == "store_false": + setattr(values, dest, False) + elif action == "append": + values.ensure_value(dest, []).append(value) + elif action == "append_const": + values.ensure_value(dest, []).append(self.const) + elif action == "count": + setattr(values, dest, values.ensure_value(dest, 0) + 1) + elif action == "callback": + args = self.callback_args or () + kwargs = self.callback_kwargs or {} + self.callback(self, opt, value, parser, *args, **kwargs) + elif action == "help": + parser.print_help() + parser.exit() + elif action == "version": + parser.print_version() + parser.exit() + else: + raise ValueError("unknown action %r" % self.action) + + return 1 + +# class Option + + +SUPPRESS_HELP = "SUPPRESS"+"HELP" +SUPPRESS_USAGE = "SUPPRESS"+"USAGE" + +class Values: + + def __init__(self, defaults=None): + if defaults: + for (attr, val) in defaults.items(): + setattr(self, attr, val) + + def __str__(self): + return str(self.__dict__) + + __repr__ = _repr + + def __eq__(self, other): + if isinstance(other, Values): + return self.__dict__ == other.__dict__ + elif isinstance(other, dict): + return self.__dict__ == other + else: + return NotImplemented + + def _update_careful(self, dict): + """ + Update the option values from an arbitrary dictionary, but only + use keys from dict that already have a corresponding attribute + in self. Any keys in dict without a corresponding attribute + are silently ignored. + """ + for attr in dir(self): + if attr in dict: + dval = dict[attr] + if dval is not None: + setattr(self, attr, dval) + + def _update_loose(self, dict): + """ + Update the option values from an arbitrary dictionary, + using all keys from the dictionary regardless of whether + they have a corresponding attribute in self or not. + """ + self.__dict__.update(dict) + + def _update(self, dict, mode): + if mode == "careful": + self._update_careful(dict) + elif mode == "loose": + self._update_loose(dict) + else: + raise ValueError("invalid update mode: %r" % mode) + + def read_module(self, modname, mode="careful"): + __import__(modname) + mod = sys.modules[modname] + self._update(vars(mod), mode) + + def read_file(self, filename, mode="careful"): + vars = {} + exec(open(filename).read(), vars) + self._update(vars, mode) + + def ensure_value(self, attr, value): + if not hasattr(self, attr) or getattr(self, attr) is None: + setattr(self, attr, value) + return getattr(self, attr) + + +class OptionContainer: + + """ + Abstract base class. + + Class attributes: + standard_option_list : [Option] + list of standard options that will be accepted by all instances + of this parser class (intended to be overridden by subclasses). + + Instance attributes: + option_list : [Option] + the list of Option objects contained by this OptionContainer + _short_opt : { string : Option } + dictionary mapping short option strings, eg. "-f" or "-X", + to the Option instances that implement them. If an Option + has multiple short option strings, it will appear in this + dictionary multiple times. [1] + _long_opt : { string : Option } + dictionary mapping long option strings, eg. "--file" or + "--exclude", to the Option instances that implement them. + Again, a given Option can occur multiple times in this + dictionary. [1] + defaults : { string : any } + dictionary mapping option destination names to default + values for each destination [1] + + [1] These mappings are common to (shared by) all components of the + controlling OptionParser, where they are initially created. + + """ + + def __init__(self, option_class, conflict_handler, description): + # Initialize the option list and related data structures. + # This method must be provided by subclasses, and it must + # initialize at least the following instance attributes: + # option_list, _short_opt, _long_opt, defaults. + self._create_option_list() + + self.option_class = option_class + self.set_conflict_handler(conflict_handler) + self.set_description(description) + + def _create_option_mappings(self): + # For use by OptionParser constructor -- create the main + # option mappings used by this OptionParser and all + # OptionGroups that it owns. + self._short_opt = {} # single letter -> Option instance + self._long_opt = {} # long option -> Option instance + self.defaults = {} # maps option dest -> default value + + + def _share_option_mappings(self, parser): + # For use by OptionGroup constructor -- use shared option + # mappings from the OptionParser that owns this OptionGroup. + self._short_opt = parser._short_opt + self._long_opt = parser._long_opt + self.defaults = parser.defaults + + def set_conflict_handler(self, handler): + if handler not in ("error", "resolve"): + raise ValueError("invalid conflict_resolution value %r" % handler) + self.conflict_handler = handler + + def set_description(self, description): + self.description = description + + def get_description(self): + return self.description + + + def destroy(self): + """see OptionParser.destroy().""" + del self._short_opt + del self._long_opt + del self.defaults + + + # -- Option-adding methods ----------------------------------------- + + def _check_conflict(self, option): + conflict_opts = [] + for opt in option._short_opts: + if opt in self._short_opt: + conflict_opts.append((opt, self._short_opt[opt])) + for opt in option._long_opts: + if opt in self._long_opt: + conflict_opts.append((opt, self._long_opt[opt])) + + if conflict_opts: + handler = self.conflict_handler + if handler == "error": + raise OptionConflictError( + "conflicting option string(s): %s" + % ", ".join([co[0] for co in conflict_opts]), + option) + elif handler == "resolve": + for (opt, c_option) in conflict_opts: + if opt.startswith("--"): + c_option._long_opts.remove(opt) + del self._long_opt[opt] + else: + c_option._short_opts.remove(opt) + del self._short_opt[opt] + if not (c_option._short_opts or c_option._long_opts): + c_option.container.option_list.remove(c_option) + + def add_option(self, *args, **kwargs): + """add_option(Option) + add_option(opt_str, ..., kwarg=val, ...) + """ + if isinstance(args[0], str): + option = self.option_class(*args, **kwargs) + elif len(args) == 1 and not kwargs: + option = args[0] + if not isinstance(option, Option): + raise TypeError("not an Option instance: %r" % option) + else: + raise TypeError("invalid arguments") + + self._check_conflict(option) + + self.option_list.append(option) + option.container = self + for opt in option._short_opts: + self._short_opt[opt] = option + for opt in option._long_opts: + self._long_opt[opt] = option + + if option.dest is not None: # option has a dest, we need a default + if option.default is not NO_DEFAULT: + self.defaults[option.dest] = option.default + elif option.dest not in self.defaults: + self.defaults[option.dest] = None + + return option + + def add_options(self, option_list): + for option in option_list: + self.add_option(option) + + # -- Option query/removal methods ---------------------------------- + + def get_option(self, opt_str): + return (self._short_opt.get(opt_str) or + self._long_opt.get(opt_str)) + + def has_option(self, opt_str): + return (opt_str in self._short_opt or + opt_str in self._long_opt) + + def remove_option(self, opt_str): + option = self._short_opt.get(opt_str) + if option is None: + option = self._long_opt.get(opt_str) + if option is None: + raise ValueError("no such option %r" % opt_str) + + for opt in option._short_opts: + del self._short_opt[opt] + for opt in option._long_opts: + del self._long_opt[opt] + option.container.option_list.remove(option) + + + # -- Help-formatting methods --------------------------------------- + + def format_option_help(self, formatter): + if not self.option_list: + return "" + result = [] + for option in self.option_list: + if not option.help is SUPPRESS_HELP: + result.append(formatter.format_option(option)) + return "".join(result) + + def format_description(self, formatter): + return formatter.format_description(self.get_description()) + + def format_help(self, formatter): + result = [] + if self.description: + result.append(self.format_description(formatter)) + if self.option_list: + result.append(self.format_option_help(formatter)) + return "\n".join(result) + + +class OptionGroup (OptionContainer): + + def __init__(self, parser, title, description=None): + self.parser = parser + OptionContainer.__init__( + self, parser.option_class, parser.conflict_handler, description) + self.title = title + + def _create_option_list(self): + self.option_list = [] + self._share_option_mappings(self.parser) + + def set_title(self, title): + self.title = title + + def destroy(self): + """see OptionParser.destroy().""" + OptionContainer.destroy(self) + del self.option_list + + # -- Help-formatting methods --------------------------------------- + + def format_help(self, formatter): + result = formatter.format_heading(self.title) + formatter.indent() + result += OptionContainer.format_help(self, formatter) + formatter.dedent() + return result + + +class OptionParser (OptionContainer): + + """ + Class attributes: + standard_option_list : [Option] + list of standard options that will be accepted by all instances + of this parser class (intended to be overridden by subclasses). + + Instance attributes: + usage : string + a usage string for your program. Before it is displayed + to the user, "%prog" will be expanded to the name of + your program (self.prog or os.path.basename(sys.argv[0])). + prog : string + the name of the current program (to override + os.path.basename(sys.argv[0])). + description : string + A paragraph of text giving a brief overview of your program. + optparse reformats this paragraph to fit the current terminal + width and prints it when the user requests help (after usage, + but before the list of options). + epilog : string + paragraph of help text to print after option help + + option_groups : [OptionGroup] + list of option groups in this parser (option groups are + irrelevant for parsing the command-line, but very useful + for generating help) + + allow_interspersed_args : bool = true + if true, positional arguments may be interspersed with options. + Assuming -a and -b each take a single argument, the command-line + -ablah foo bar -bboo baz + will be interpreted the same as + -ablah -bboo -- foo bar baz + If this flag were false, that command line would be interpreted as + -ablah -- foo bar -bboo baz + -- ie. we stop processing options as soon as we see the first + non-option argument. (This is the tradition followed by + Python's getopt module, Perl's Getopt::Std, and other argument- + parsing libraries, but it is generally annoying to users.) + + process_default_values : bool = true + if true, option default values are processed similarly to option + values from the command line: that is, they are passed to the + type-checking function for the option's type (as long as the + default value is a string). (This really only matters if you + have defined custom types; see SF bug #955889.) Set it to false + to restore the behaviour of Optik 1.4.1 and earlier. + + rargs : [string] + the argument list currently being parsed. Only set when + parse_args() is active, and continually trimmed down as + we consume arguments. Mainly there for the benefit of + callback options. + largs : [string] + the list of leftover arguments that we have skipped while + parsing options. If allow_interspersed_args is false, this + list is always empty. + values : Values + the set of option values currently being accumulated. Only + set when parse_args() is active. Also mainly for callbacks. + + Because of the 'rargs', 'largs', and 'values' attributes, + OptionParser is not thread-safe. If, for some perverse reason, you + need to parse command-line arguments simultaneously in different + threads, use different OptionParser instances. + + """ + + standard_option_list = [] + + def __init__(self, + usage=None, + option_list=None, + option_class=Option, + version=None, + conflict_handler="error", + description=None, + formatter=None, + add_help_option=True, + prog=None, + epilog=None): + OptionContainer.__init__( + self, option_class, conflict_handler, description) + self.set_usage(usage) + self.prog = prog + self.version = version + self.allow_interspersed_args = True + self.process_default_values = True + if formatter is None: + formatter = IndentedHelpFormatter() + self.formatter = formatter + self.formatter.set_parser(self) + self.epilog = epilog + + # Populate the option list; initial sources are the + # standard_option_list class attribute, the 'option_list' + # argument, and (if applicable) the _add_version_option() and + # _add_help_option() methods. + self._populate_option_list(option_list, + add_help=add_help_option) + + self._init_parsing_state() + + + def destroy(self): + """ + Declare that you are done with this OptionParser. This cleans up + reference cycles so the OptionParser (and all objects referenced by + it) can be garbage-collected promptly. After calling destroy(), the + OptionParser is unusable. + """ + OptionContainer.destroy(self) + for group in self.option_groups: + group.destroy() + del self.option_list + del self.option_groups + del self.formatter + + + # -- Private methods ----------------------------------------------- + # (used by our or OptionContainer's constructor) + + def _create_option_list(self): + self.option_list = [] + self.option_groups = [] + self._create_option_mappings() + + def _add_help_option(self): + self.add_option("-h", "--help", + action="help", + help=_("show this help message and exit")) + + def _add_version_option(self): + self.add_option("--version", + action="version", + help=_("show program's version number and exit")) + + def _populate_option_list(self, option_list, add_help=True): + if self.standard_option_list: + self.add_options(self.standard_option_list) + if option_list: + self.add_options(option_list) + if self.version: + self._add_version_option() + if add_help: + self._add_help_option() + + def _init_parsing_state(self): + # These are set in parse_args() for the convenience of callbacks. + self.rargs = None + self.largs = None + self.values = None + + + # -- Simple modifier methods --------------------------------------- + + def set_usage(self, usage): + if usage is None: + self.usage = _("%prog [options]") + elif usage is SUPPRESS_USAGE: + self.usage = None + # For backwards compatibility with Optik 1.3 and earlier. + elif usage.lower().startswith("usage: "): + self.usage = usage[7:] + else: + self.usage = usage + + def enable_interspersed_args(self): + """Set parsing to not stop on the first non-option, allowing + interspersing switches with command arguments. This is the + default behavior. See also disable_interspersed_args() and the + class documentation description of the attribute + allow_interspersed_args.""" + self.allow_interspersed_args = True + + def disable_interspersed_args(self): + """Set parsing to stop on the first non-option. Use this if + you have a command processor which runs another command that + has options of its own and you want to make sure these options + don't get confused. + """ + self.allow_interspersed_args = False + + def set_process_default_values(self, process): + self.process_default_values = process + + def set_default(self, dest, value): + self.defaults[dest] = value + + def set_defaults(self, **kwargs): + self.defaults.update(kwargs) + + def _get_all_options(self): + options = self.option_list[:] + for group in self.option_groups: + options.extend(group.option_list) + return options + + def get_default_values(self): + if not self.process_default_values: + # Old, pre-Optik 1.5 behaviour. + return Values(self.defaults) + + defaults = self.defaults.copy() + for option in self._get_all_options(): + default = defaults.get(option.dest) + if isinstance(default, str): + opt_str = option.get_opt_string() + defaults[option.dest] = option.check_value(opt_str, default) + + return Values(defaults) + + + # -- OptionGroup methods ------------------------------------------- + + def add_option_group(self, *args, **kwargs): + # XXX lots of overlap with OptionContainer.add_option() + if isinstance(args[0], str): + group = OptionGroup(self, *args, **kwargs) + elif len(args) == 1 and not kwargs: + group = args[0] + if not isinstance(group, OptionGroup): + raise TypeError("not an OptionGroup instance: %r" % group) + if group.parser is not self: + raise ValueError("invalid OptionGroup (wrong parser)") + else: + raise TypeError("invalid arguments") + + self.option_groups.append(group) + return group + + def get_option_group(self, opt_str): + option = (self._short_opt.get(opt_str) or + self._long_opt.get(opt_str)) + if option and option.container is not self: + return option.container + return None + + + # -- Option-parsing methods ---------------------------------------- + + def _get_args(self, args): + if args is None: + return sys.argv[1:] + else: + return args[:] # don't modify caller's list + + def parse_args(self, args=None, values=None): + """ + parse_args(args : [string] = sys.argv[1:], + values : Values = None) + -> (values : Values, args : [string]) + + Parse the command-line options found in 'args' (default: + sys.argv[1:]). Any errors result in a call to 'error()', which + by default prints the usage message to stderr and calls + sys.exit() with an error message. On success returns a pair + (values, args) where 'values' is a Values instance (with all + your option values) and 'args' is the list of arguments left + over after parsing options. + """ + rargs = self._get_args(args) + if values is None: + values = self.get_default_values() + + # Store the halves of the argument list as attributes for the + # convenience of callbacks: + # rargs + # the rest of the command-line (the "r" stands for + # "remaining" or "right-hand") + # largs + # the leftover arguments -- ie. what's left after removing + # options and their arguments (the "l" stands for "leftover" + # or "left-hand") + self.rargs = rargs + self.largs = largs = [] + self.values = values + + try: + stop = self._process_args(largs, rargs, values) + except (BadOptionError, OptionValueError) as err: + self.error(str(err)) + + args = largs + rargs + return self.check_values(values, args) + + def check_values(self, values, args): + """ + check_values(values : Values, args : [string]) + -> (values : Values, args : [string]) + + Check that the supplied option values and leftover arguments are + valid. Returns the option values and leftover arguments + (possibly adjusted, possibly completely new -- whatever you + like). Default implementation just returns the passed-in + values; subclasses may override as desired. + """ + return (values, args) + + def _process_args(self, largs, rargs, values): + """_process_args(largs : [string], + rargs : [string], + values : Values) + + Process command-line arguments and populate 'values', consuming + options and arguments from 'rargs'. If 'allow_interspersed_args' is + false, stop at the first non-option argument. If true, accumulate any + interspersed non-option arguments in 'largs'. + """ + while rargs: + arg = rargs[0] + # We handle bare "--" explicitly, and bare "-" is handled by the + # standard arg handler since the short arg case ensures that the + # len of the opt string is greater than 1. + if arg == "--": + del rargs[0] + return + elif arg[0:2] == "--": + # process a single long option (possibly with value(s)) + self._process_long_opt(rargs, values) + elif arg[:1] == "-" and len(arg) > 1: + # process a cluster of short options (possibly with + # value(s) for the last one only) + self._process_short_opts(rargs, values) + elif self.allow_interspersed_args: + largs.append(arg) + del rargs[0] + else: + return # stop now, leave this arg in rargs + + # Say this is the original argument list: + # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] + # ^ + # (we are about to process arg(i)). + # + # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of + # [arg0, ..., arg(i-1)] (any options and their arguments will have + # been removed from largs). + # + # The while loop will usually consume 1 or more arguments per pass. + # If it consumes 1 (eg. arg is an option that takes no arguments), + # then after _process_arg() is done the situation is: + # + # largs = subset of [arg0, ..., arg(i)] + # rargs = [arg(i+1), ..., arg(N-1)] + # + # If allow_interspersed_args is false, largs will always be + # *empty* -- still a subset of [arg0, ..., arg(i-1)], but + # not a very interesting subset! + + def _match_long_opt(self, opt): + """_match_long_opt(opt : string) -> string + + Determine which long option string 'opt' matches, ie. which one + it is an unambiguous abbreviation for. Raises BadOptionError if + 'opt' doesn't unambiguously match any long option string. + """ + return _match_abbrev(opt, self._long_opt) + + def _process_long_opt(self, rargs, values): + arg = rargs.pop(0) + + # Value explicitly attached to arg? Pretend it's the next + # argument. + if "=" in arg: + (opt, next_arg) = arg.split("=", 1) + rargs.insert(0, next_arg) + had_explicit_value = True + else: + opt = arg + had_explicit_value = False + + opt = self._match_long_opt(opt) + option = self._long_opt[opt] + if option.takes_value(): + nargs = option.nargs + if len(rargs) < nargs: + self.error(ngettext( + "%(option)s option requires %(number)d argument", + "%(option)s option requires %(number)d arguments", + nargs) % {"option": opt, "number": nargs}) + elif nargs == 1: + value = rargs.pop(0) + else: + value = tuple(rargs[0:nargs]) + del rargs[0:nargs] + + elif had_explicit_value: + self.error(_("%s option does not take a value") % opt) + + else: + value = None + + option.process(opt, value, values, self) + + def _process_short_opts(self, rargs, values): + arg = rargs.pop(0) + stop = False + i = 1 + for ch in arg[1:]: + opt = "-" + ch + option = self._short_opt.get(opt) + i += 1 # we have consumed a character + + if not option: + raise BadOptionError(opt) + if option.takes_value(): + # Any characters left in arg? Pretend they're the + # next arg, and stop consuming characters of arg. + if i < len(arg): + rargs.insert(0, arg[i:]) + stop = True + + nargs = option.nargs + if len(rargs) < nargs: + self.error(ngettext( + "%(option)s option requires %(number)d argument", + "%(option)s option requires %(number)d arguments", + nargs) % {"option": opt, "number": nargs}) + elif nargs == 1: + value = rargs.pop(0) + else: + value = tuple(rargs[0:nargs]) + del rargs[0:nargs] + + else: # option doesn't take a value + value = None + + option.process(opt, value, values, self) + + if stop: + break + + + # -- Feedback methods ---------------------------------------------- + + def get_prog_name(self): + if self.prog is None: + return os.path.basename(sys.argv[0]) + else: + return self.prog + + def expand_prog_name(self, s): + return s.replace("%prog", self.get_prog_name()) + + def get_description(self): + return self.expand_prog_name(self.description) + + def exit(self, status=0, msg=None): + if msg: + sys.stderr.write(msg) + sys.exit(status) + + def error(self, msg): + """error(msg : string) + + Print a usage message incorporating 'msg' to stderr and exit. + If you override this in a subclass, it should not return -- it + should either exit or raise an exception. + """ + self.print_usage(sys.stderr) + self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg)) + + def get_usage(self): + if self.usage: + return self.formatter.format_usage( + self.expand_prog_name(self.usage)) + else: + return "" + + def print_usage(self, file=None): + """print_usage(file : file = stdout) + + Print the usage message for the current program (self.usage) to + 'file' (default stdout). Any occurrence of the string "%prog" in + self.usage is replaced with the name of the current program + (basename of sys.argv[0]). Does nothing if self.usage is empty + or not defined. + """ + if self.usage: + print(self.get_usage(), file=file) + + def get_version(self): + if self.version: + return self.expand_prog_name(self.version) + else: + return "" + + def print_version(self, file=None): + """print_version(file : file = stdout) + + Print the version message for this program (self.version) to + 'file' (default stdout). As with print_usage(), any occurrence + of "%prog" in self.version is replaced by the current program's + name. Does nothing if self.version is empty or undefined. + """ + if self.version: + print(self.get_version(), file=file) + + def format_option_help(self, formatter=None): + if formatter is None: + formatter = self.formatter + formatter.store_option_strings(self) + result = [] + result.append(formatter.format_heading(_("Options"))) + formatter.indent() + if self.option_list: + result.append(OptionContainer.format_option_help(self, formatter)) + result.append("\n") + for group in self.option_groups: + result.append(group.format_help(formatter)) + result.append("\n") + formatter.dedent() + # Drop the last "\n", or the header if no options or option groups: + return "".join(result[:-1]) + + def format_epilog(self, formatter): + return formatter.format_epilog(self.epilog) + + def format_help(self, formatter=None): + if formatter is None: + formatter = self.formatter + result = [] + if self.usage: + result.append(self.get_usage() + "\n") + if self.description: + result.append(self.format_description(formatter) + "\n") + result.append(self.format_option_help(formatter)) + result.append(self.format_epilog(formatter)) + return "".join(result) + + def print_help(self, file=None): + """print_help(file : file = stdout) + + Print an extended help message, listing all options and any + help text provided with them, to 'file' (default stdout). + """ + if file is None: + file = sys.stdout + file.write(self.format_help()) + +# class OptionParser + + +def _match_abbrev(s, wordmap): + """_match_abbrev(s : string, wordmap : {string : Option}) -> string + + Return the string key in 'wordmap' for which 's' is an unambiguous + abbreviation. If 's' is found to be ambiguous or doesn't match any of + 'words', raise BadOptionError. + """ + # Is there an exact match? + if s in wordmap: + return s + else: + # Isolate all words with s as a prefix. + possibilities = [word for word in wordmap.keys() + if word.startswith(s)] + # No exact match, so there had better be just one possibility. + if len(possibilities) == 1: + return possibilities[0] + elif not possibilities: + raise BadOptionError(s) + else: + # More than one possible completion: ambiguous prefix. + possibilities.sort() + raise AmbiguousOptionError(s, possibilities) + + +# Some day, there might be many Option classes. As of Optik 1.3, the +# preferred way to instantiate Options is indirectly, via make_option(), +# which will become a factory function when there are many Option +# classes. +make_option = Option diff --git a/Python314_4_x64_Template/Lib/os.py b/Python314_4_x64_Template/Lib/os.py new file mode 100644 index 00000000..ac03b416 --- /dev/null +++ b/Python314_4_x64_Template/Lib/os.py @@ -0,0 +1,1191 @@ +r"""OS routines for NT or Posix depending on what system we're on. + +This exports: + - all functions from posix or nt, e.g. unlink, stat, etc. + - os.path is either posixpath or ntpath + - os.name is either 'posix' or 'nt' + - os.curdir is a string representing the current directory (always '.') + - os.pardir is a string representing the parent directory (always '..') + - os.sep is the (or a most common) pathname separator ('/' or '\\') + - os.extsep is the extension separator (always '.') + - os.altsep is the alternate pathname separator (None or '/') + - os.pathsep is the component separator used in $PATH etc + - os.linesep is the line separator in text files ('\n' or '\r\n') + - os.defpath is the default search path for executables + - os.devnull is the file path of the null device ('/dev/null', etc.) + +Programs that import and use 'os' stand a better chance of being +portable between different platforms. Of course, they must then +only use functions that are defined by all platforms (e.g., unlink +and opendir), and leave all pathname manipulation to os.path +(e.g., split and join). +""" + +#' +import abc +import sys +import stat as st + +from _collections_abc import _check_methods + +GenericAlias = type(list[int]) + +_names = sys.builtin_module_names + +# Note: more names are added to __all__ later. +__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep", + "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR", + "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen", + "extsep"] + +def _exists(name): + return name in globals() + +def _get_exports_list(module): + try: + return list(module.__all__) + except AttributeError: + return [n for n in dir(module) if n[0] != '_'] + +# Any new dependencies of the os module and/or changes in path separator +# requires updating importlib as well. +if 'posix' in _names: + name = 'posix' + linesep = '\n' + from posix import * + try: + from posix import _exit + __all__.append('_exit') + except ImportError: + pass + import posixpath as path + + try: + from posix import _have_functions + except ImportError: + pass + try: + from posix import _create_environ + except ImportError: + pass + + import posix + __all__.extend(_get_exports_list(posix)) + del posix + +elif 'nt' in _names: + name = 'nt' + linesep = '\r\n' + from nt import * + try: + from nt import _exit + __all__.append('_exit') + except ImportError: + pass + import ntpath as path + + import nt + __all__.extend(_get_exports_list(nt)) + del nt + + try: + from nt import _have_functions + except ImportError: + pass + try: + from nt import _create_environ + except ImportError: + pass + +else: + raise ImportError('no os specific module found') + +sys.modules['os.path'] = path +from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep, + devnull) + +del _names + + +if _exists("_have_functions"): + _globals = globals() + def _add(str, fn): + if (fn in _globals) and (str in _have_functions): + _set.add(_globals[fn]) + + _set = set() + _add("HAVE_FACCESSAT", "access") + _add("HAVE_FCHMODAT", "chmod") + _add("HAVE_FCHOWNAT", "chown") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_LSTAT", "lstat") + _add("HAVE_FUTIMESAT", "utime") + _add("HAVE_LINKAT", "link") + _add("HAVE_MKDIRAT", "mkdir") + _add("HAVE_MKFIFOAT", "mkfifo") + _add("HAVE_MKNODAT", "mknod") + _add("HAVE_OPENAT", "open") + _add("HAVE_READLINKAT", "readlink") + _add("HAVE_RENAMEAT", "rename") + _add("HAVE_SYMLINKAT", "symlink") + _add("HAVE_UNLINKAT", "unlink") + _add("HAVE_UNLINKAT", "rmdir") + _add("HAVE_UTIMENSAT", "utime") + supports_dir_fd = _set + + _set = set() + _add("HAVE_FACCESSAT", "access") + supports_effective_ids = _set + + _set = set() + _add("HAVE_FCHDIR", "chdir") + _add("HAVE_FCHMOD", "chmod") + _add("MS_WINDOWS", "chmod") + _add("HAVE_FCHOWN", "chown") + _add("HAVE_FDOPENDIR", "listdir") + _add("HAVE_FDOPENDIR", "scandir") + _add("HAVE_FEXECVE", "execve") + _set.add(stat) # fstat always works + _add("HAVE_FTRUNCATE", "truncate") + _add("HAVE_FUTIMENS", "utime") + _add("HAVE_FUTIMES", "utime") + _add("HAVE_FPATHCONF", "pathconf") + if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3 + _add("HAVE_FSTATVFS", "statvfs") + supports_fd = _set + + _set = set() + _add("HAVE_FACCESSAT", "access") + # Some platforms don't support lchmod(). Often the function exists + # anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP. + # (No, I don't know why that's a good design.) ./configure will detect + # this and reject it--so HAVE_LCHMOD still won't be defined on such + # platforms. This is Very Helpful. + # + # However, sometimes platforms without a working lchmod() *do* have + # fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15, + # OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes + # it behave like lchmod(). So in theory it would be a suitable + # replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s + # flag doesn't work *either*. Sadly ./configure isn't sophisticated + # enough to detect this condition--it only determines whether or not + # fchmodat() minimally works. + # + # Therefore we simply ignore fchmodat() when deciding whether or not + # os.chmod supports follow_symlinks. Just checking lchmod() is + # sufficient. After all--if you have a working fchmodat(), your + # lchmod() almost certainly works too. + # + # _add("HAVE_FCHMODAT", "chmod") + _add("HAVE_FCHOWNAT", "chown") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_LCHFLAGS", "chflags") + _add("HAVE_LCHMOD", "chmod") + _add("MS_WINDOWS", "chmod") + if _exists("lchown"): # mac os x10.3 + _add("HAVE_LCHOWN", "chown") + _add("HAVE_LINKAT", "link") + _add("HAVE_LUTIMES", "utime") + _add("HAVE_LSTAT", "stat") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_UTIMENSAT", "utime") + _add("MS_WINDOWS", "stat") + supports_follow_symlinks = _set + + del _set + del _have_functions + del _globals + del _add + + +# Python uses fixed values for the SEEK_ constants; they are mapped +# to native constants if necessary in posixmodule.c +# Other possible SEEK values are directly imported from posixmodule.c +SEEK_SET = 0 +SEEK_CUR = 1 +SEEK_END = 2 + +# Super directory utilities. +# (Inspired by Eric Raymond; the doc strings are mostly his) + +def makedirs(name, mode=0o777, exist_ok=False): + """makedirs(name [, mode=0o777][, exist_ok=False]) + + Super-mkdir; create a leaf directory and all intermediate ones. Works like + mkdir, except that any intermediate path segment (not just the rightmost) + will be created if it does not exist. If the target directory already + exists, raise an OSError if exist_ok is False. Otherwise no exception is + raised. This is recursive. + + """ + head, tail = path.split(name) + if not tail: + head, tail = path.split(head) + if head and tail and not path.exists(head): + try: + makedirs(head, exist_ok=exist_ok) + except FileExistsError: + # Defeats race condition when another thread created the path + pass + cdir = curdir + if isinstance(tail, bytes): + cdir = bytes(curdir, 'ASCII') + if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists + return + try: + mkdir(name, mode) + except OSError: + # Cannot rely on checking for EEXIST, since the operating system + # could give priority to other errors like EACCES or EROFS + if not exist_ok or not path.isdir(name): + raise + +def removedirs(name): + """removedirs(name) + + Super-rmdir; remove a leaf directory and all empty intermediate + ones. Works like rmdir except that, if the leaf directory is + successfully removed, directories corresponding to rightmost path + segments will be pruned away until either the whole path is + consumed or an error occurs. Errors during this latter phase are + ignored -- they generally mean that a directory was not empty. + + """ + rmdir(name) + head, tail = path.split(name) + if not tail: + head, tail = path.split(head) + while head and tail: + try: + rmdir(head) + except OSError: + break + head, tail = path.split(head) + +def renames(old, new): + """renames(old, new) + + Super-rename; create directories as necessary and delete any left + empty. Works like rename, except creation of any intermediate + directories needed to make the new pathname good is attempted + first. After the rename, directories corresponding to rightmost + path segments of the old name will be pruned until either the + whole path is consumed or a nonempty directory is found. + + Note: this function can fail with the new directory structure made + if you lack permissions needed to unlink the leaf directory or + file. + + """ + head, tail = path.split(new) + if head and tail and not path.exists(head): + makedirs(head) + rename(old, new) + head, tail = path.split(old) + if head and tail: + try: + removedirs(head) + except OSError: + pass + +__all__.extend(["makedirs", "removedirs", "renames"]) + +# Private sentinel that makes walk() classify all symlinks and junctions as +# regular files. +_walk_symlinks_as_files = object() + +def walk(top, topdown=True, onerror=None, followlinks=False): + """Directory tree generator. + + For each directory in the directory tree rooted at top (including top + itself, but excluding '.' and '..'), yields a 3-tuple + + dirpath, dirnames, filenames + + dirpath is a string, the path to the directory. dirnames is a list of + the names of the subdirectories in dirpath (including symlinks to directories, + and excluding '.' and '..'). + filenames is a list of the names of the non-directory files in dirpath. + Note that the names in the lists are just names, with no path components. + To get a full path (which begins with top) to a file or directory in + dirpath, do os.path.join(dirpath, name). + + If optional arg 'topdown' is true or not specified, the triple for a + directory is generated before the triples for any of its subdirectories + (directories are generated top down). If topdown is false, the triple + for a directory is generated after the triples for all of its + subdirectories (directories are generated bottom up). + + When topdown is true, the caller can modify the dirnames list in-place + (e.g., via del or slice assignment), and walk will only recurse into the + subdirectories whose names remain in dirnames; this can be used to prune the + search, or to impose a specific order of visiting. Modifying dirnames when + topdown is false has no effect on the behavior of os.walk(), since the + directories in dirnames have already been generated by the time dirnames + itself is generated. No matter the value of topdown, the list of + subdirectories is retrieved before the tuples for the directory and its + subdirectories are generated. + + By default errors from the os.scandir() call are ignored. If + optional arg 'onerror' is specified, it should be a function; it + will be called with one argument, an OSError instance. It can + report the error to continue with the walk, or raise the exception + to abort the walk. Note that the filename is available as the + filename attribute of the exception object. + + By default, os.walk does not follow symbolic links to subdirectories on + systems that support them. In order to get this functionality, set the + optional argument 'followlinks' to true. + + Caution: if you pass a relative pathname for top, don't change the + current working directory between resumptions of walk. walk never + changes the current directory, and assumes that the client doesn't + either. + + Example: + + import os + from os.path import join, getsize + for root, dirs, files in os.walk('python/Lib/xml'): + print(root, "consumes ") + print(sum(getsize(join(root, name)) for name in files), end=" ") + print("bytes in", len(files), "non-directory files") + if '__pycache__' in dirs: + dirs.remove('__pycache__') # don't visit __pycache__ directories + + """ + sys.audit("os.walk", top, topdown, onerror, followlinks) + + stack = [fspath(top)] + islink, join = path.islink, path.join + while stack: + top = stack.pop() + if isinstance(top, tuple): + yield top + continue + + dirs = [] + nondirs = [] + walk_dirs = [] + + # We may not have read permission for top, in which case we can't + # get a list of the files the directory contains. + # We suppress the exception here, rather than blow up for a + # minor reason when (say) a thousand readable directories are still + # left to visit. + try: + with scandir(top) as entries: + for entry in entries: + try: + if followlinks is _walk_symlinks_as_files: + is_dir = entry.is_dir(follow_symlinks=False) and not entry.is_junction() + else: + is_dir = entry.is_dir() + except OSError: + # If is_dir() raises an OSError, consider the entry not to + # be a directory, same behaviour as os.path.isdir(). + is_dir = False + + if is_dir: + dirs.append(entry.name) + else: + nondirs.append(entry.name) + + if not topdown and is_dir: + # Bottom-up: traverse into sub-directory, but exclude + # symlinks to directories if followlinks is False + if followlinks: + walk_into = True + else: + try: + is_symlink = entry.is_symlink() + except OSError: + # If is_symlink() raises an OSError, consider the + # entry not to be a symbolic link, same behaviour + # as os.path.islink(). + is_symlink = False + walk_into = not is_symlink + + if walk_into: + walk_dirs.append(entry.path) + except OSError as error: + if onerror is not None: + onerror(error) + continue + + if topdown: + # Yield before sub-directory traversal if going top down + yield top, dirs, nondirs + # Traverse into sub-directories + for dirname in reversed(dirs): + new_path = join(top, dirname) + # bpo-23605: os.path.islink() is used instead of caching + # entry.is_symlink() result during the loop on os.scandir() because + # the caller can replace the directory entry during the "yield" + # above. + if followlinks or not islink(new_path): + stack.append(new_path) + else: + # Yield after sub-directory traversal if going bottom up + stack.append((top, dirs, nondirs)) + # Traverse into sub-directories + for new_path in reversed(walk_dirs): + stack.append(new_path) + +__all__.append("walk") + +if {open, stat} <= supports_dir_fd and {scandir, stat} <= supports_fd: + + def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None): + """Directory tree generator. + + This behaves exactly like walk(), except that it yields a 4-tuple + + dirpath, dirnames, filenames, dirfd + + `dirpath`, `dirnames` and `filenames` are identical to walk() output, + and `dirfd` is a file descriptor referring to the directory `dirpath`. + + The advantage of fwalk() over walk() is that it's safe against symlink + races (when follow_symlinks is False). + + If dir_fd is not None, it should be a file descriptor open to a directory, + and top should be relative; top will then be relative to that directory. + (dir_fd is always supported for fwalk.) + + Caution: + Since fwalk() yields file descriptors, those are only valid until the + next iteration step, so you should dup() them if you want to keep them + for a longer period. + + Example: + + import os + for root, dirs, files, rootfd in os.fwalk('python/Lib/xml'): + print(root, "consumes", end="") + print(sum(os.stat(name, dir_fd=rootfd).st_size for name in files), + end="") + print("bytes in", len(files), "non-directory files") + if '__pycache__' in dirs: + dirs.remove('__pycache__') # don't visit __pycache__ directories + """ + sys.audit("os.fwalk", top, topdown, onerror, follow_symlinks, dir_fd) + top = fspath(top) + stack = [(_fwalk_walk, (True, dir_fd, top, top, None))] + isbytes = isinstance(top, bytes) + try: + while stack: + yield from _fwalk(stack, isbytes, topdown, onerror, follow_symlinks) + finally: + # Close any file descriptors still on the stack. + while stack: + action, value = stack.pop() + if action == _fwalk_close: + close(value) + + # Each item in the _fwalk() stack is a pair (action, args). + _fwalk_walk = 0 # args: (isroot, dirfd, toppath, topname, entry) + _fwalk_yield = 1 # args: (toppath, dirnames, filenames, topfd) + _fwalk_close = 2 # args: dirfd + + def _fwalk(stack, isbytes, topdown, onerror, follow_symlinks): + # Note: This uses O(depth of the directory tree) file descriptors: if + # necessary, it can be adapted to only require O(1) FDs, see issue + # #13734. + + action, value = stack.pop() + if action == _fwalk_close: + close(value) + return + elif action == _fwalk_yield: + yield value + return + assert action == _fwalk_walk + isroot, dirfd, toppath, topname, entry = value + try: + if not follow_symlinks: + # Note: To guard against symlink races, we use the standard + # lstat()/open()/fstat() trick. + if entry is None: + orig_st = stat(topname, follow_symlinks=False, dir_fd=dirfd) + else: + orig_st = entry.stat(follow_symlinks=False) + topfd = open(topname, O_RDONLY | O_NONBLOCK, dir_fd=dirfd) + except OSError as err: + if isroot: + raise + if onerror is not None: + onerror(err) + return + stack.append((_fwalk_close, topfd)) + if not follow_symlinks: + if isroot and not st.S_ISDIR(orig_st.st_mode): + return + if not path.samestat(orig_st, stat(topfd)): + return + + scandir_it = scandir(topfd) + dirs = [] + nondirs = [] + entries = None if topdown or follow_symlinks else [] + for entry in scandir_it: + name = entry.name + if isbytes: + name = fsencode(name) + try: + if entry.is_dir(): + dirs.append(name) + if entries is not None: + entries.append(entry) + else: + nondirs.append(name) + except OSError: + try: + # Add dangling symlinks, ignore disappeared files + if entry.is_symlink(): + nondirs.append(name) + except OSError: + pass + + if topdown: + yield toppath, dirs, nondirs, topfd + else: + stack.append((_fwalk_yield, (toppath, dirs, nondirs, topfd))) + + toppath = path.join(toppath, toppath[:0]) # Add trailing slash. + if entries is None: + stack.extend( + (_fwalk_walk, (False, topfd, toppath + name, name, None)) + for name in dirs[::-1]) + else: + stack.extend( + (_fwalk_walk, (False, topfd, toppath + name, name, entry)) + for name, entry in zip(dirs[::-1], entries[::-1])) + + __all__.append("fwalk") + +def execl(file, *args): + """execl(file, *args) + + Execute the executable file with argument list args, replacing the + current process. """ + execv(file, args) + +def execle(file, *args): + """execle(file, *args, env) + + Execute the executable file with argument list args and + environment env, replacing the current process. """ + env = args[-1] + execve(file, args[:-1], env) + +def execlp(file, *args): + """execlp(file, *args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. """ + execvp(file, args) + +def execlpe(file, *args): + """execlpe(file, *args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the current + process. """ + env = args[-1] + execvpe(file, args[:-1], env) + +def execvp(file, args): + """execvp(file, args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. + args may be a list or tuple of strings. """ + _execvpe(file, args) + +def execvpe(file, args, env): + """execvpe(file, args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the + current process. + args may be a list or tuple of strings. """ + _execvpe(file, args, env) + +__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"]) + +def _execvpe(file, args, env=None): + if env is not None: + exec_func = execve + argrest = (args, env) + else: + exec_func = execv + argrest = (args,) + env = environ + + if path.dirname(file): + exec_func(file, *argrest) + return + saved_exc = None + path_list = get_exec_path(env) + if name != 'nt': + file = fsencode(file) + path_list = map(fsencode, path_list) + for dir in path_list: + fullname = path.join(dir, file) + try: + exec_func(fullname, *argrest) + except (FileNotFoundError, NotADirectoryError) as e: + last_exc = e + except OSError as e: + last_exc = e + if saved_exc is None: + saved_exc = e + if saved_exc is not None: + raise saved_exc + raise last_exc + + +def get_exec_path(env=None): + """Returns the sequence of directories that will be searched for the + named executable (similar to a shell) when launching a process. + + *env* must be an environment variable dict or None. If *env* is None, + os.environ will be used. + """ + # Use a local import instead of a global import to limit the number of + # modules loaded at startup: the os module is always loaded at startup by + # Python. It may also avoid a bootstrap issue. + import warnings + + if env is None: + env = environ + + # {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a + # BytesWarning when using python -b or python -bb: ignore the warning + with warnings.catch_warnings(): + warnings.simplefilter("ignore", BytesWarning) + + try: + path_list = env.get('PATH') + except TypeError: + path_list = None + + if supports_bytes_environ: + try: + path_listb = env[b'PATH'] + except (KeyError, TypeError): + pass + else: + if path_list is not None: + raise ValueError( + "env cannot contain 'PATH' and b'PATH' keys") + path_list = path_listb + + if path_list is not None and isinstance(path_list, bytes): + path_list = fsdecode(path_list) + + if path_list is None: + path_list = defpath + return path_list.split(pathsep) + + +# Change environ to automatically call putenv() and unsetenv() +from _collections_abc import MutableMapping, Mapping + +class _Environ(MutableMapping): + def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue): + self.encodekey = encodekey + self.decodekey = decodekey + self.encodevalue = encodevalue + self.decodevalue = decodevalue + self._data = data + + def __getitem__(self, key): + try: + value = self._data[self.encodekey(key)] + except KeyError: + # raise KeyError with the original key value + raise KeyError(key) from None + return self.decodevalue(value) + + def __setitem__(self, key, value): + key = self.encodekey(key) + value = self.encodevalue(value) + putenv(key, value) + self._data[key] = value + + def __delitem__(self, key): + encodedkey = self.encodekey(key) + unsetenv(encodedkey) + try: + del self._data[encodedkey] + except KeyError: + # raise KeyError with the original key value + raise KeyError(key) from None + + def __iter__(self): + # list() from dict object is an atomic operation + keys = list(self._data) + for key in keys: + yield self.decodekey(key) + + def __len__(self): + return len(self._data) + + def __repr__(self): + formatted_items = ", ".join( + f"{self.decodekey(key)!r}: {self.decodevalue(value)!r}" + for key, value in self._data.items() + ) + return f"environ({{{formatted_items}}})" + + def copy(self): + return dict(self) + + def setdefault(self, key, value): + if key not in self: + self[key] = value + return self[key] + + def __ior__(self, other): + self.update(other) + return self + + def __or__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + new = dict(self) + new.update(other) + return new + + def __ror__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + new = dict(other) + new.update(self) + return new + +def _create_environ_mapping(): + if name == 'nt': + # Where Env Var Names Must Be UPPERCASE + def check_str(value): + if not isinstance(value, str): + raise TypeError("str expected, not %s" % type(value).__name__) + return value + encode = check_str + decode = str + def encodekey(key): + return encode(key).upper() + data = {} + for key, value in environ.items(): + data[encodekey(key)] = value + else: + # Where Env Var Names Can Be Mixed Case + encoding = sys.getfilesystemencoding() + def encode(value): + if not isinstance(value, str): + raise TypeError("str expected, not %s" % type(value).__name__) + return value.encode(encoding, 'surrogateescape') + def decode(value): + return value.decode(encoding, 'surrogateescape') + encodekey = encode + data = environ + return _Environ(data, + encodekey, decode, + encode, decode) + +# unicode environ +environ = _create_environ_mapping() +del _create_environ_mapping + + +if _exists("_create_environ"): + def reload_environ(): + data = _create_environ() + if name == 'nt': + encodekey = environ.encodekey + data = {encodekey(key): value + for key, value in data.items()} + + # modify in-place to keep os.environb in sync + env_data = environ._data + env_data.clear() + env_data.update(data) + + __all__.append("reload_environ") + +def getenv(key, default=None): + """Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are str.""" + return environ.get(key, default) + +supports_bytes_environ = (name != 'nt') +__all__.extend(("getenv", "supports_bytes_environ")) + +if supports_bytes_environ: + def _check_bytes(value): + if not isinstance(value, bytes): + raise TypeError("bytes expected, not %s" % type(value).__name__) + return value + + # bytes environ + environb = _Environ(environ._data, + _check_bytes, bytes, + _check_bytes, bytes) + del _check_bytes + + def getenvb(key, default=None): + """Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are bytes.""" + return environb.get(key, default) + + __all__.extend(("environb", "getenvb")) + +def _fscodec(): + encoding = sys.getfilesystemencoding() + errors = sys.getfilesystemencodeerrors() + + def fsencode(filename): + """Encode filename (an os.PathLike, bytes, or str) to the filesystem + encoding with 'surrogateescape' error handler, return bytes unchanged. + On Windows, use 'strict' error handler if the file system encoding is + 'mbcs' (which is the default encoding). + """ + filename = fspath(filename) # Does type-checking of `filename`. + if isinstance(filename, str): + return filename.encode(encoding, errors) + else: + return filename + + def fsdecode(filename): + """Decode filename (an os.PathLike, bytes, or str) from the filesystem + encoding with 'surrogateescape' error handler, return str unchanged. On + Windows, use 'strict' error handler if the file system encoding is + 'mbcs' (which is the default encoding). + """ + filename = fspath(filename) # Does type-checking of `filename`. + if isinstance(filename, bytes): + return filename.decode(encoding, errors) + else: + return filename + + return fsencode, fsdecode + +fsencode, fsdecode = _fscodec() +del _fscodec + +# Supply spawn*() (probably only for Unix) +if _exists("fork") and not _exists("spawnv") and _exists("execv"): + + P_WAIT = 0 + P_NOWAIT = P_NOWAITO = 1 + + __all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"]) + + # XXX Should we support P_DETACH? I suppose it could fork()**2 + # and close the std I/O streams. Also, P_OVERLAY is the same + # as execv*()? + + def _spawnvef(mode, file, args, env, func): + # Internal helper; func is the exec*() function to use + if not isinstance(args, (tuple, list)): + raise TypeError('argv must be a tuple or a list') + if not args or not args[0]: + raise ValueError('argv first element cannot be empty') + pid = fork() + if not pid: + # Child + try: + if env is None: + func(file, args) + else: + func(file, args, env) + except: + _exit(127) + else: + # Parent + if mode == P_NOWAIT: + return pid # Caller is responsible for waiting! + while 1: + wpid, sts = waitpid(pid, 0) + if WIFSTOPPED(sts): + continue + + return waitstatus_to_exitcode(sts) + + def spawnv(mode, file, args): + """spawnv(mode, file, args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, None, execv) + + def spawnve(mode, file, args, env): + """spawnve(mode, file, args, env) -> integer + +Execute file with arguments from args in a subprocess with the +specified environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, env, execve) + + # Note: spawnvp[e] isn't currently supported on Windows + + def spawnvp(mode, file, args): + """spawnvp(mode, file, args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, None, execvp) + + def spawnvpe(mode, file, args, env): + """spawnvpe(mode, file, args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, env, execvpe) + + + __all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"]) + + +if _exists("spawnv"): + # These aren't supplied by the basic Windows code + # but can be easily implemented in Python + + def spawnl(mode, file, *args): + """spawnl(mode, file, *args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return spawnv(mode, file, args) + + def spawnle(mode, file, *args): + """spawnle(mode, file, *args, env) -> integer + +Execute file with arguments from args in a subprocess with the +supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + env = args[-1] + return spawnve(mode, file, args[:-1], env) + + + __all__.extend(["spawnl", "spawnle"]) + + +if _exists("spawnvp"): + # At the moment, Windows doesn't implement spawnvp[e], + # so it won't have spawnlp[e] either. + def spawnlp(mode, file, *args): + """spawnlp(mode, file, *args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return spawnvp(mode, file, args) + + def spawnlpe(mode, file, *args): + """spawnlpe(mode, file, *args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + env = args[-1] + return spawnvpe(mode, file, args[:-1], env) + + + __all__.extend(["spawnlp", "spawnlpe"]) + +# VxWorks has no user space shell provided. As a result, running +# command in a shell can't be supported. +if sys.platform != 'vxworks': + # Supply os.popen() + def popen(cmd, mode="r", buffering=-1): + if not isinstance(cmd, str): + raise TypeError("invalid cmd type (%s, expected string)" % type(cmd)) + if mode not in ("r", "w"): + raise ValueError("invalid mode %r" % mode) + if buffering == 0 or buffering is None: + raise ValueError("popen() does not support unbuffered streams") + import subprocess + if mode == "r": + proc = subprocess.Popen(cmd, + shell=True, text=True, + stdout=subprocess.PIPE, + bufsize=buffering) + return _wrap_close(proc.stdout, proc) + else: + proc = subprocess.Popen(cmd, + shell=True, text=True, + stdin=subprocess.PIPE, + bufsize=buffering) + return _wrap_close(proc.stdin, proc) + + # Helper for popen() -- a proxy for a file whose close waits for the process + class _wrap_close: + def __init__(self, stream, proc): + self._stream = stream + self._proc = proc + def close(self): + self._stream.close() + returncode = self._proc.wait() + if returncode == 0: + return None + if name == 'nt': + return returncode + else: + return returncode << 8 # Shift left to match old behavior + def __enter__(self): + return self + def __exit__(self, *args): + self.close() + def __getattr__(self, name): + return getattr(self._stream, name) + def __iter__(self): + return iter(self._stream) + + __all__.append("popen") + +# Supply os.fdopen() +def fdopen(fd, mode="r", buffering=-1, encoding=None, *args, **kwargs): + if not isinstance(fd, int): + raise TypeError("invalid fd type (%s, expected integer)" % type(fd)) + import io + if "b" not in mode: + encoding = io.text_encoding(encoding) + return io.open(fd, mode, buffering, encoding, *args, **kwargs) + + +# For testing purposes, make sure the function is available when the C +# implementation exists. +def _fspath(path): + """Return the path representation of a path-like object. + + If str or bytes is passed in, it is returned unchanged. Otherwise the + os.PathLike interface is used to get the path representation. If the + path representation is not str or bytes, TypeError is raised. If the + provided path is not str, bytes, or os.PathLike, TypeError is raised. + """ + if isinstance(path, (str, bytes)): + return path + + # Work from the object's type to match method resolution of other magic + # methods. + path_type = type(path) + try: + path_repr = path_type.__fspath__(path) + except AttributeError: + if hasattr(path_type, '__fspath__'): + raise + else: + raise TypeError("expected str, bytes or os.PathLike object, " + "not " + path_type.__name__) + except TypeError: + if path_type.__fspath__ is None: + raise TypeError("expected str, bytes or os.PathLike object, " + "not " + path_type.__name__) from None + else: + raise + if isinstance(path_repr, (str, bytes)): + return path_repr + else: + raise TypeError("expected {}.__fspath__() to return str or bytes, " + "not {}".format(path_type.__name__, + type(path_repr).__name__)) + +# If there is no C implementation, make the pure Python version the +# implementation as transparently as possible. +if not _exists('fspath'): + fspath = _fspath + fspath.__name__ = "fspath" + + +class PathLike(abc.ABC): + + """Abstract base class for implementing the file system path protocol.""" + + __slots__ = () + + @abc.abstractmethod + def __fspath__(self): + """Return the file system path representation of the object.""" + raise NotImplementedError + + @classmethod + def __subclasshook__(cls, subclass): + if cls is PathLike: + return _check_methods(subclass, '__fspath__') + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) + + +if name == 'nt': + class _AddedDllDirectory: + def __init__(self, path, cookie, remove_dll_directory): + self.path = path + self._cookie = cookie + self._remove_dll_directory = remove_dll_directory + def close(self): + self._remove_dll_directory(self._cookie) + self.path = None + def __enter__(self): + return self + def __exit__(self, *args): + self.close() + def __repr__(self): + if self.path: + return "".format(self.path) + return "" + + def add_dll_directory(path): + """Add a path to the DLL search path. + + This search path is used when resolving dependencies for imported + extension modules (the module itself is resolved through sys.path), + and also by ctypes. + + Remove the directory by calling close() on the returned object or + using it in a with statement. + """ + import nt + cookie = nt._add_dll_directory(path) + return _AddedDllDirectory( + path, + cookie, + nt._remove_dll_directory + ) + + +if _exists('sched_getaffinity') and sys._get_cpu_count_config() < 0: + def process_cpu_count(): + """ + Get the number of CPUs of the current process. + + Return the number of logical CPUs usable by the calling thread of the + current process. Return None if indeterminable. + """ + return len(sched_getaffinity(0)) +else: + # Just an alias to cpu_count() (same docstring) + process_cpu_count = cpu_count diff --git a/Python314_4_x64_Template/Lib/pathlib/__init__.py b/Python314_4_x64_Template/Lib/pathlib/__init__.py new file mode 100644 index 00000000..0d763d1f --- /dev/null +++ b/Python314_4_x64_Template/Lib/pathlib/__init__.py @@ -0,0 +1,1307 @@ +"""Object-oriented filesystem paths. + +This module provides classes to represent abstract paths and concrete +paths with operations that have semantics appropriate for different +operating systems. +""" + +import io +import ntpath +import operator +import os +import posixpath +import sys +from errno import * +from glob import _StringGlobber, _no_recurse_symlinks +from itertools import chain +from stat import S_ISDIR, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO +from _collections_abc import Sequence + +try: + import pwd +except ImportError: + pwd = None +try: + import grp +except ImportError: + grp = None + +from pathlib._os import ( + PathInfo, DirEntryInfo, + ensure_different_files, ensure_distinct_paths, + copyfile2, copyfileobj, magic_open, copy_info, +) + + +__all__ = [ + "UnsupportedOperation", + "PurePath", "PurePosixPath", "PureWindowsPath", + "Path", "PosixPath", "WindowsPath", + ] + + +class UnsupportedOperation(NotImplementedError): + """An exception that is raised when an unsupported operation is attempted. + """ + pass + + +class _PathParents(Sequence): + """This object provides sequence-like access to the logical ancestors + of a path. Don't try to construct it yourself.""" + __slots__ = ('_path', '_drv', '_root', '_tail') + + def __init__(self, path): + self._path = path + self._drv = path.drive + self._root = path.root + self._tail = path._tail + + def __len__(self): + return len(self._tail) + + def __getitem__(self, idx): + if isinstance(idx, slice): + return tuple(self[i] for i in range(*idx.indices(len(self)))) + + if idx >= len(self) or idx < -len(self): + raise IndexError(idx) + if idx < 0: + idx += len(self) + return self._path._from_parsed_parts(self._drv, self._root, + self._tail[:-idx - 1]) + + def __repr__(self): + return "<{}.parents>".format(type(self._path).__name__) + + +class PurePath: + """Base class for manipulating paths without I/O. + + PurePath represents a filesystem path and offers operations which + don't imply any actual filesystem I/O. Depending on your system, + instantiating a PurePath will return either a PurePosixPath or a + PureWindowsPath object. You can also instantiate either of these classes + directly, regardless of your system. + """ + + __slots__ = ( + # The `_raw_paths` slot stores unjoined string paths. This is set in + # the `__init__()` method. + '_raw_paths', + + # The `_drv`, `_root` and `_tail_cached` slots store parsed and + # normalized parts of the path. They are set when any of the `drive`, + # `root` or `_tail` properties are accessed for the first time. The + # three-part division corresponds to the result of + # `os.path.splitroot()`, except that the tail is further split on path + # separators (i.e. it is a list of strings), and that the root and + # tail are normalized. + '_drv', '_root', '_tail_cached', + + # The `_str` slot stores the string representation of the path, + # computed from the drive, root and tail when `__str__()` is called + # for the first time. It's used to implement `_str_normcase` + '_str', + + # The `_str_normcase_cached` slot stores the string path with + # normalized case. It is set when the `_str_normcase` property is + # accessed for the first time. It's used to implement `__eq__()` + # `__hash__()`, and `_parts_normcase` + '_str_normcase_cached', + + # The `_parts_normcase_cached` slot stores the case-normalized + # string path after splitting on path separators. It's set when the + # `_parts_normcase` property is accessed for the first time. It's used + # to implement comparison methods like `__lt__()`. + '_parts_normcase_cached', + + # The `_hash` slot stores the hash of the case-normalized string + # path. It's set when `__hash__()` is called for the first time. + '_hash', + ) + parser = os.path + + def __new__(cls, *args, **kwargs): + """Construct a PurePath from one or several strings and or existing + PurePath objects. The strings and path objects are combined so as + to yield a canonicalized path, which is incorporated into the + new PurePath object. + """ + if cls is PurePath: + cls = PureWindowsPath if os.name == 'nt' else PurePosixPath + return object.__new__(cls) + + def __init__(self, *args): + paths = [] + for arg in args: + if isinstance(arg, PurePath): + if arg.parser is not self.parser: + # GH-103631: Convert separators for backwards compatibility. + paths.append(arg.as_posix()) + else: + paths.extend(arg._raw_paths) + else: + try: + path = os.fspath(arg) + except TypeError: + path = arg + if not isinstance(path, str): + raise TypeError( + "argument should be a str or an os.PathLike " + "object where __fspath__ returns a str, " + f"not {type(path).__name__!r}") + paths.append(path) + self._raw_paths = paths + + def with_segments(self, *pathsegments): + """Construct a new path object from any number of path-like objects. + Subclasses may override this method to customize how new path objects + are created from methods like `iterdir()`. + """ + return type(self)(*pathsegments) + + def joinpath(self, *pathsegments): + """Combine this path with one or several arguments, and return a + new path representing either a subpath (if all arguments are relative + paths) or a totally different path (if one of the arguments is + anchored). + """ + return self.with_segments(self, *pathsegments) + + def __truediv__(self, key): + try: + return self.with_segments(self, key) + except TypeError: + return NotImplemented + + def __rtruediv__(self, key): + try: + return self.with_segments(key, self) + except TypeError: + return NotImplemented + + def __reduce__(self): + return self.__class__, tuple(self._raw_paths) + + def __repr__(self): + return "{}({!r})".format(self.__class__.__name__, self.as_posix()) + + def __fspath__(self): + return str(self) + + def __bytes__(self): + """Return the bytes representation of the path. This is only + recommended to use under Unix.""" + return os.fsencode(self) + + @property + def _str_normcase(self): + # String with normalized case, for hashing and equality checks + try: + return self._str_normcase_cached + except AttributeError: + if self.parser is posixpath: + self._str_normcase_cached = str(self) + else: + self._str_normcase_cached = str(self).lower() + return self._str_normcase_cached + + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self._str_normcase) + return self._hash + + def __eq__(self, other): + if not isinstance(other, PurePath): + return NotImplemented + return self._str_normcase == other._str_normcase and self.parser is other.parser + + @property + def _parts_normcase(self): + # Cached parts with normalized case, for comparisons. + try: + return self._parts_normcase_cached + except AttributeError: + self._parts_normcase_cached = self._str_normcase.split(self.parser.sep) + return self._parts_normcase_cached + + def __lt__(self, other): + if not isinstance(other, PurePath) or self.parser is not other.parser: + return NotImplemented + return self._parts_normcase < other._parts_normcase + + def __le__(self, other): + if not isinstance(other, PurePath) or self.parser is not other.parser: + return NotImplemented + return self._parts_normcase <= other._parts_normcase + + def __gt__(self, other): + if not isinstance(other, PurePath) or self.parser is not other.parser: + return NotImplemented + return self._parts_normcase > other._parts_normcase + + def __ge__(self, other): + if not isinstance(other, PurePath) or self.parser is not other.parser: + return NotImplemented + return self._parts_normcase >= other._parts_normcase + + def __str__(self): + """Return the string representation of the path, suitable for + passing to system calls.""" + try: + return self._str + except AttributeError: + self._str = self._format_parsed_parts(self.drive, self.root, + self._tail) or '.' + return self._str + + @classmethod + def _format_parsed_parts(cls, drv, root, tail): + if drv or root: + return drv + root + cls.parser.sep.join(tail) + elif tail and cls.parser.splitdrive(tail[0])[0]: + tail = ['.'] + tail + return cls.parser.sep.join(tail) + + def _from_parsed_parts(self, drv, root, tail): + path = self._from_parsed_string(self._format_parsed_parts(drv, root, tail)) + path._drv = drv + path._root = root + path._tail_cached = tail + return path + + def _from_parsed_string(self, path_str): + path = self.with_segments(path_str) + path._str = path_str or '.' + return path + + @classmethod + def _parse_path(cls, path): + if not path: + return '', '', [] + sep = cls.parser.sep + altsep = cls.parser.altsep + if altsep: + path = path.replace(altsep, sep) + drv, root, rel = cls.parser.splitroot(path) + if not root and drv.startswith(sep) and not drv.endswith(sep): + drv_parts = drv.split(sep) + if len(drv_parts) == 4 and drv_parts[2] not in '?.': + # e.g. //server/share + root = sep + elif len(drv_parts) == 6: + # e.g. //?/unc/server/share + root = sep + return drv, root, [x for x in rel.split(sep) if x and x != '.'] + + @classmethod + def _parse_pattern(cls, pattern): + """Parse a glob pattern to a list of parts. This is much like + _parse_path, except: + + - Rather than normalizing and returning the drive and root, we raise + NotImplementedError if either are present. + - If the path has no real parts, we raise ValueError. + - If the path ends in a slash, then a final empty part is added. + """ + drv, root, rel = cls.parser.splitroot(pattern) + if root or drv: + raise NotImplementedError("Non-relative patterns are unsupported") + sep = cls.parser.sep + altsep = cls.parser.altsep + if altsep: + rel = rel.replace(altsep, sep) + parts = [x for x in rel.split(sep) if x and x != '.'] + if not parts: + raise ValueError(f"Unacceptable pattern: {str(pattern)!r}") + elif rel.endswith(sep): + # GH-65238: preserve trailing slash in glob patterns. + parts.append('') + return parts + + def as_posix(self): + """Return the string representation of the path with forward (/) + slashes.""" + return str(self).replace(self.parser.sep, '/') + + @property + def _raw_path(self): + paths = self._raw_paths + if len(paths) == 1: + return paths[0] + elif paths: + # Join path segments from the initializer. + return self.parser.join(*paths) + else: + return '' + + @property + def drive(self): + """The drive prefix (letter or UNC path), if any.""" + try: + return self._drv + except AttributeError: + self._drv, self._root, self._tail_cached = self._parse_path(self._raw_path) + return self._drv + + @property + def root(self): + """The root of the path, if any.""" + try: + return self._root + except AttributeError: + self._drv, self._root, self._tail_cached = self._parse_path(self._raw_path) + return self._root + + @property + def _tail(self): + try: + return self._tail_cached + except AttributeError: + self._drv, self._root, self._tail_cached = self._parse_path(self._raw_path) + return self._tail_cached + + @property + def anchor(self): + """The concatenation of the drive and root, or ''.""" + return self.drive + self.root + + @property + def parts(self): + """An object providing sequence-like access to the + components in the filesystem path.""" + if self.drive or self.root: + return (self.drive + self.root,) + tuple(self._tail) + else: + return tuple(self._tail) + + @property + def parent(self): + """The logical parent of the path.""" + drv = self.drive + root = self.root + tail = self._tail + if not tail: + return self + return self._from_parsed_parts(drv, root, tail[:-1]) + + @property + def parents(self): + """A sequence of this path's logical parents.""" + # The value of this property should not be cached on the path object, + # as doing so would introduce a reference cycle. + return _PathParents(self) + + @property + def name(self): + """The final path component, if any.""" + tail = self._tail + if not tail: + return '' + return tail[-1] + + def with_name(self, name): + """Return a new path with the file name changed.""" + p = self.parser + if not name or p.sep in name or (p.altsep and p.altsep in name) or name == '.': + raise ValueError(f"Invalid name {name!r}") + tail = self._tail.copy() + if not tail: + raise ValueError(f"{self!r} has an empty name") + tail[-1] = name + return self._from_parsed_parts(self.drive, self.root, tail) + + def with_stem(self, stem): + """Return a new path with the stem changed.""" + suffix = self.suffix + if not suffix: + return self.with_name(stem) + elif not stem: + # If the suffix is non-empty, we can't make the stem empty. + raise ValueError(f"{self!r} has a non-empty suffix") + else: + return self.with_name(stem + suffix) + + def with_suffix(self, suffix): + """Return a new path with the file suffix changed. If the path + has no suffix, add given suffix. If the given suffix is an empty + string, remove the suffix from the path. + """ + stem = self.stem + if not stem: + # If the stem is empty, we can't make the suffix non-empty. + raise ValueError(f"{self!r} has an empty name") + elif suffix and not suffix.startswith('.'): + raise ValueError(f"Invalid suffix {suffix!r}") + else: + return self.with_name(stem + suffix) + + @property + def stem(self): + """The final path component, minus its last suffix.""" + name = self.name + i = name.rfind('.') + if i != -1: + stem = name[:i] + # Stem must contain at least one non-dot character. + if stem.lstrip('.'): + return stem + return name + + @property + def suffix(self): + """ + The final component's last suffix, if any. + + This includes the leading period. For example: '.txt' + """ + name = self.name.lstrip('.') + i = name.rfind('.') + if i != -1: + return name[i:] + return '' + + @property + def suffixes(self): + """ + A list of the final component's suffixes, if any. + + These include the leading periods. For example: ['.tar', '.gz'] + """ + return ['.' + ext for ext in self.name.lstrip('.').split('.')[1:]] + + def relative_to(self, other, *, walk_up=False): + """Return the relative path to another path identified by the passed + arguments. If the operation is not possible (because this is not + related to the other path), raise ValueError. + + The *walk_up* parameter controls whether `..` may be used to resolve + the path. + """ + if not hasattr(other, 'with_segments'): + other = self.with_segments(other) + for step, path in enumerate(chain([other], other.parents)): + if path == self or path in self.parents: + break + elif not walk_up: + raise ValueError(f"{str(self)!r} is not in the subpath of {str(other)!r}") + elif path.name == '..': + raise ValueError(f"'..' segment in {str(other)!r} cannot be walked") + else: + raise ValueError(f"{str(self)!r} and {str(other)!r} have different anchors") + parts = ['..'] * step + self._tail[len(path._tail):] + return self._from_parsed_parts('', '', parts) + + def is_relative_to(self, other): + """Return True if the path is relative to another path or False. + """ + if not hasattr(other, 'with_segments'): + other = self.with_segments(other) + return other == self or other in self.parents + + def is_absolute(self): + """True if the path is absolute (has both a root and, if applicable, + a drive).""" + if self.parser is posixpath: + # Optimization: work with raw paths on POSIX. + for path in self._raw_paths: + if path.startswith('/'): + return True + return False + return self.parser.isabs(self) + + def is_reserved(self): + """Return True if the path contains one of the special names reserved + by the system, if any.""" + import warnings + msg = ("pathlib.PurePath.is_reserved() is deprecated and scheduled " + "for removal in Python 3.15. Use os.path.isreserved() to " + "detect reserved paths on Windows.") + warnings._deprecated("pathlib.PurePath.is_reserved", msg, remove=(3, 15)) + if self.parser is ntpath: + return self.parser.isreserved(self) + return False + + def as_uri(self): + """Return the path as a URI.""" + import warnings + msg = ("pathlib.PurePath.as_uri() is deprecated and scheduled " + "for removal in Python 3.19. Use pathlib.Path.as_uri().") + warnings._deprecated("pathlib.PurePath.as_uri", msg, remove=(3, 19)) + if not self.is_absolute(): + raise ValueError("relative path can't be expressed as a file URI") + + drive = self.drive + if len(drive) == 2 and drive[1] == ':': + # It's a path on a local drive => 'file:///c:/a/b' + prefix = 'file:///' + drive + path = self.as_posix()[2:] + elif drive: + # It's a path on a network drive => 'file://host/share/a/b' + prefix = 'file:' + path = self.as_posix() + else: + # It's a posix path => 'file:///etc/hosts' + prefix = 'file://' + path = str(self) + from urllib.parse import quote_from_bytes + return prefix + quote_from_bytes(os.fsencode(path)) + + def full_match(self, pattern, *, case_sensitive=None): + """ + Return True if this path matches the given glob-style pattern. The + pattern is matched against the entire path. + """ + if not hasattr(pattern, 'with_segments'): + pattern = self.with_segments(pattern) + if case_sensitive is None: + case_sensitive = self.parser is posixpath + + # The string representation of an empty path is a single dot ('.'). Empty + # paths shouldn't match wildcards, so we change it to the empty string. + path = str(self) if self.parts else '' + pattern = str(pattern) if pattern.parts else '' + globber = _StringGlobber(self.parser.sep, case_sensitive, recursive=True) + return globber.compile(pattern)(path) is not None + + def match(self, path_pattern, *, case_sensitive=None): + """ + Return True if this path matches the given pattern. If the pattern is + relative, matching is done from the right; otherwise, the entire path + is matched. The recursive wildcard '**' is *not* supported by this + method. + """ + if not hasattr(path_pattern, 'with_segments'): + path_pattern = self.with_segments(path_pattern) + if case_sensitive is None: + case_sensitive = self.parser is posixpath + path_parts = self.parts[::-1] + pattern_parts = path_pattern.parts[::-1] + if not pattern_parts: + raise ValueError("empty pattern") + if len(path_parts) < len(pattern_parts): + return False + if len(path_parts) > len(pattern_parts) and path_pattern.anchor: + return False + globber = _StringGlobber(self.parser.sep, case_sensitive) + for path_part, pattern_part in zip(path_parts, pattern_parts): + match = globber.compile(pattern_part) + if match(path_part) is None: + return False + return True + +# Subclassing os.PathLike makes isinstance() checks slower, +# which in turn makes Path construction slower. Register instead! +os.PathLike.register(PurePath) + + +class PurePosixPath(PurePath): + """PurePath subclass for non-Windows systems. + + On a POSIX system, instantiating a PurePath should return this object. + However, you can also instantiate it directly on any system. + """ + parser = posixpath + __slots__ = () + + +class PureWindowsPath(PurePath): + """PurePath subclass for Windows systems. + + On a Windows system, instantiating a PurePath should return this object. + However, you can also instantiate it directly on any system. + """ + parser = ntpath + __slots__ = () + + +class Path(PurePath): + """PurePath subclass that can make system calls. + + Path represents a filesystem path but unlike PurePath, also offers + methods to do system calls on path objects. Depending on your system, + instantiating a Path will return either a PosixPath or a WindowsPath + object. You can also instantiate a PosixPath or WindowsPath directly, + but cannot instantiate a WindowsPath on a POSIX system or vice versa. + """ + __slots__ = ('_info',) + + def __new__(cls, *args, **kwargs): + if cls is Path: + cls = WindowsPath if os.name == 'nt' else PosixPath + return object.__new__(cls) + + @property + def info(self): + """ + A PathInfo object that exposes the file type and other file attributes + of this path. + """ + try: + return self._info + except AttributeError: + self._info = PathInfo(self) + return self._info + + def stat(self, *, follow_symlinks=True): + """ + Return the result of the stat() system call on this path, like + os.stat() does. + """ + return os.stat(self, follow_symlinks=follow_symlinks) + + def lstat(self): + """ + Like stat(), except if the path points to a symlink, the symlink's + status information is returned, rather than its target's. + """ + return os.lstat(self) + + def exists(self, *, follow_symlinks=True): + """ + Whether this path exists. + + This method normally follows symlinks; to check whether a symlink exists, + add the argument follow_symlinks=False. + """ + if follow_symlinks: + return os.path.exists(self) + return os.path.lexists(self) + + def is_dir(self, *, follow_symlinks=True): + """ + Whether this path is a directory. + """ + if follow_symlinks: + return os.path.isdir(self) + try: + return S_ISDIR(self.stat(follow_symlinks=follow_symlinks).st_mode) + except (OSError, ValueError): + return False + + def is_file(self, *, follow_symlinks=True): + """ + Whether this path is a regular file (also True for symlinks pointing + to regular files). + """ + if follow_symlinks: + return os.path.isfile(self) + try: + return S_ISREG(self.stat(follow_symlinks=follow_symlinks).st_mode) + except (OSError, ValueError): + return False + + def is_mount(self): + """ + Check if this path is a mount point + """ + return os.path.ismount(self) + + def is_symlink(self): + """ + Whether this path is a symbolic link. + """ + return os.path.islink(self) + + def is_junction(self): + """ + Whether this path is a junction. + """ + return os.path.isjunction(self) + + def is_block_device(self): + """ + Whether this path is a block device. + """ + try: + return S_ISBLK(self.stat().st_mode) + except (OSError, ValueError): + return False + + def is_char_device(self): + """ + Whether this path is a character device. + """ + try: + return S_ISCHR(self.stat().st_mode) + except (OSError, ValueError): + return False + + def is_fifo(self): + """ + Whether this path is a FIFO. + """ + try: + return S_ISFIFO(self.stat().st_mode) + except (OSError, ValueError): + return False + + def is_socket(self): + """ + Whether this path is a socket. + """ + try: + return S_ISSOCK(self.stat().st_mode) + except (OSError, ValueError): + return False + + def samefile(self, other_path): + """Return whether other_path is the same or not as this file + (as returned by os.path.samefile()). + """ + st = self.stat() + try: + other_st = other_path.stat() + except AttributeError: + other_st = self.with_segments(other_path).stat() + return (st.st_ino == other_st.st_ino and + st.st_dev == other_st.st_dev) + + def open(self, mode='r', buffering=-1, encoding=None, + errors=None, newline=None): + """ + Open the file pointed to by this path and return a file object, as + the built-in open() function does. + """ + if "b" not in mode: + encoding = io.text_encoding(encoding) + return io.open(self, mode, buffering, encoding, errors, newline) + + def read_bytes(self): + """ + Open the file in bytes mode, read it, and close the file. + """ + with self.open(mode='rb', buffering=0) as f: + return f.read() + + def read_text(self, encoding=None, errors=None, newline=None): + """ + Open the file in text mode, read it, and close the file. + """ + # Call io.text_encoding() here to ensure any warning is raised at an + # appropriate stack level. + encoding = io.text_encoding(encoding) + with self.open(mode='r', encoding=encoding, errors=errors, newline=newline) as f: + return f.read() + + def write_bytes(self, data): + """ + Open the file in bytes mode, write to it, and close the file. + """ + # type-check for the buffer interface before truncating the file + view = memoryview(data) + with self.open(mode='wb') as f: + return f.write(view) + + def write_text(self, data, encoding=None, errors=None, newline=None): + """ + Open the file in text mode, write to it, and close the file. + """ + # Call io.text_encoding() here to ensure any warning is raised at an + # appropriate stack level. + encoding = io.text_encoding(encoding) + if not isinstance(data, str): + raise TypeError('data must be str, not %s' % + data.__class__.__name__) + with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f: + return f.write(data) + + _remove_leading_dot = operator.itemgetter(slice(2, None)) + _remove_trailing_slash = operator.itemgetter(slice(-1)) + + def _filter_trailing_slash(self, paths): + sep = self.parser.sep + anchor_len = len(self.anchor) + for path_str in paths: + if len(path_str) > anchor_len and path_str[-1] == sep: + path_str = path_str[:-1] + yield path_str + + def _from_dir_entry(self, dir_entry, path_str): + path = self.with_segments(path_str) + path._str = path_str + path._info = DirEntryInfo(dir_entry) + return path + + def iterdir(self): + """Yield path objects of the directory contents. + + The children are yielded in arbitrary order, and the + special entries '.' and '..' are not included. + """ + root_dir = str(self) + with os.scandir(root_dir) as scandir_it: + entries = list(scandir_it) + if root_dir == '.': + return (self._from_dir_entry(e, e.name) for e in entries) + else: + return (self._from_dir_entry(e, e.path) for e in entries) + + def glob(self, pattern, *, case_sensitive=None, recurse_symlinks=False): + """Iterate over this subtree and yield all existing files (of any + kind, including directories) matching the given relative pattern. + """ + sys.audit("pathlib.Path.glob", self, pattern) + if case_sensitive is None: + case_sensitive = self.parser is posixpath + case_pedantic = False + else: + # The user has expressed a case sensitivity choice, but we don't + # know the case sensitivity of the underlying filesystem, so we + # must use scandir() for everything, including non-wildcard parts. + case_pedantic = True + parts = self._parse_pattern(pattern) + recursive = True if recurse_symlinks else _no_recurse_symlinks + globber = _StringGlobber(self.parser.sep, case_sensitive, case_pedantic, recursive) + select = globber.selector(parts[::-1]) + root = str(self) + paths = select(self.parser.join(root, '')) + + # Normalize results + if root == '.': + paths = map(self._remove_leading_dot, paths) + if parts[-1] == '': + paths = map(self._remove_trailing_slash, paths) + elif parts[-1] == '**': + paths = self._filter_trailing_slash(paths) + paths = map(self._from_parsed_string, paths) + return paths + + def rglob(self, pattern, *, case_sensitive=None, recurse_symlinks=False): + """Recursively yield all existing files (of any kind, including + directories) matching the given relative pattern, anywhere in + this subtree. + """ + sys.audit("pathlib.Path.rglob", self, pattern) + pattern = self.parser.join('**', pattern) + return self.glob(pattern, case_sensitive=case_sensitive, recurse_symlinks=recurse_symlinks) + + def walk(self, top_down=True, on_error=None, follow_symlinks=False): + """Walk the directory tree from this directory, similar to os.walk().""" + sys.audit("pathlib.Path.walk", self, on_error, follow_symlinks) + root_dir = str(self) + if not follow_symlinks: + follow_symlinks = os._walk_symlinks_as_files + results = os.walk(root_dir, top_down, on_error, follow_symlinks) + for path_str, dirnames, filenames in results: + if root_dir == '.': + path_str = path_str[2:] + yield self._from_parsed_string(path_str), dirnames, filenames + + def absolute(self): + """Return an absolute version of this path + No normalization or symlink resolution is performed. + + Use resolve() to resolve symlinks and remove '..' segments. + """ + if self.is_absolute(): + return self + if self.root: + drive = os.path.splitroot(os.getcwd())[0] + return self._from_parsed_parts(drive, self.root, self._tail) + if self.drive: + # There is a CWD on each drive-letter drive. + cwd = os.path.abspath(self.drive) + else: + cwd = os.getcwd() + if not self._tail: + # Fast path for "empty" paths, e.g. Path("."), Path("") or Path(). + # We pass only one argument to with_segments() to avoid the cost + # of joining, and we exploit the fact that getcwd() returns a + # fully-normalized string by storing it in _str. This is used to + # implement Path.cwd(). + return self._from_parsed_string(cwd) + drive, root, rel = os.path.splitroot(cwd) + if not rel: + return self._from_parsed_parts(drive, root, self._tail) + tail = rel.split(self.parser.sep) + tail.extend(self._tail) + return self._from_parsed_parts(drive, root, tail) + + @classmethod + def cwd(cls): + """Return a new path pointing to the current working directory.""" + cwd = os.getcwd() + path = cls(cwd) + path._str = cwd # getcwd() returns a normalized path + return path + + def resolve(self, strict=False): + """ + Make the path absolute, resolving all symlinks on the way and also + normalizing it. + """ + + return self.with_segments(os.path.realpath(self, strict=strict)) + + if pwd: + def owner(self, *, follow_symlinks=True): + """ + Return the login name of the file owner. + """ + uid = self.stat(follow_symlinks=follow_symlinks).st_uid + return pwd.getpwuid(uid).pw_name + else: + def owner(self, *, follow_symlinks=True): + """ + Return the login name of the file owner. + """ + f = f"{type(self).__name__}.owner()" + raise UnsupportedOperation(f"{f} is unsupported on this system") + + if grp: + def group(self, *, follow_symlinks=True): + """ + Return the group name of the file gid. + """ + gid = self.stat(follow_symlinks=follow_symlinks).st_gid + return grp.getgrgid(gid).gr_name + else: + def group(self, *, follow_symlinks=True): + """ + Return the group name of the file gid. + """ + f = f"{type(self).__name__}.group()" + raise UnsupportedOperation(f"{f} is unsupported on this system") + + if hasattr(os, "readlink"): + def readlink(self): + """ + Return the path to which the symbolic link points. + """ + return self.with_segments(os.readlink(self)) + else: + def readlink(self): + """ + Return the path to which the symbolic link points. + """ + f = f"{type(self).__name__}.readlink()" + raise UnsupportedOperation(f"{f} is unsupported on this system") + + def touch(self, mode=0o666, exist_ok=True): + """ + Create this file with the given access mode, if it doesn't exist. + """ + + if exist_ok: + # First try to bump modification time + # Implementation note: GNU touch uses the UTIME_NOW option of + # the utimensat() / futimens() functions. + try: + os.utime(self, None) + except OSError: + # Avoid exception chaining + pass + else: + return + flags = os.O_CREAT | os.O_WRONLY + if not exist_ok: + flags |= os.O_EXCL + fd = os.open(self, flags, mode) + os.close(fd) + + def mkdir(self, mode=0o777, parents=False, exist_ok=False): + """ + Create a new directory at this given path. + """ + try: + os.mkdir(self, mode) + except FileNotFoundError: + if not parents or self.parent == self: + raise + self.parent.mkdir(parents=True, exist_ok=True) + self.mkdir(mode, parents=False, exist_ok=exist_ok) + except OSError: + # Cannot rely on checking for EEXIST, since the operating system + # could give priority to other errors like EACCES or EROFS + if not exist_ok or not self.is_dir(): + raise + + def chmod(self, mode, *, follow_symlinks=True): + """ + Change the permissions of the path, like os.chmod(). + """ + os.chmod(self, mode, follow_symlinks=follow_symlinks) + + def lchmod(self, mode): + """ + Like chmod(), except if the path points to a symlink, the symlink's + permissions are changed, rather than its target's. + """ + self.chmod(mode, follow_symlinks=False) + + def unlink(self, missing_ok=False): + """ + Remove this file or link. + If the path is a directory, use rmdir() instead. + """ + try: + os.unlink(self) + except FileNotFoundError: + if not missing_ok: + raise + + def rmdir(self): + """ + Remove this directory. The directory must be empty. + """ + os.rmdir(self) + + def _delete(self): + """ + Delete this file or directory (including all sub-directories). + """ + if self.is_symlink() or self.is_junction(): + self.unlink() + elif self.is_dir(): + # Lazy import to improve module import time + import shutil + shutil.rmtree(self) + else: + self.unlink() + + def rename(self, target): + """ + Rename this path to the target path. + + The target path may be absolute or relative. Relative paths are + interpreted relative to the current working directory, *not* the + directory of the Path object. + + Returns the new Path instance pointing to the target path. + """ + os.rename(self, target) + if not hasattr(target, 'with_segments'): + target = self.with_segments(target) + return target + + def replace(self, target): + """ + Rename this path to the target path, overwriting if that path exists. + + The target path may be absolute or relative. Relative paths are + interpreted relative to the current working directory, *not* the + directory of the Path object. + + Returns the new Path instance pointing to the target path. + """ + os.replace(self, target) + if not hasattr(target, 'with_segments'): + target = self.with_segments(target) + return target + + def copy(self, target, **kwargs): + """ + Recursively copy this file or directory tree to the given destination. + """ + if not hasattr(target, 'with_segments'): + target = self.with_segments(target) + ensure_distinct_paths(self, target) + target._copy_from(self, **kwargs) + return target.joinpath() # Empty join to ensure fresh metadata. + + def copy_into(self, target_dir, **kwargs): + """ + Copy this file or directory tree into the given existing directory. + """ + name = self.name + if not name: + raise ValueError(f"{self!r} has an empty name") + elif hasattr(target_dir, 'with_segments'): + target = target_dir / name + else: + target = self.with_segments(target_dir, name) + return self.copy(target, **kwargs) + + def _copy_from(self, source, follow_symlinks=True, preserve_metadata=False): + """ + Recursively copy the given path to this path. + """ + if not follow_symlinks and source.info.is_symlink(): + self._copy_from_symlink(source, preserve_metadata) + elif source.info.is_dir(): + children = source.iterdir() + os.mkdir(self) + for child in children: + self.joinpath(child.name)._copy_from( + child, follow_symlinks, preserve_metadata) + if preserve_metadata: + copy_info(source.info, self) + else: + self._copy_from_file(source, preserve_metadata) + + def _copy_from_file(self, source, preserve_metadata=False): + ensure_different_files(source, self) + with magic_open(source, 'rb') as source_f: + with open(self, 'wb') as target_f: + copyfileobj(source_f, target_f) + if preserve_metadata: + copy_info(source.info, self) + + if copyfile2: + # Use fast OS routine for local file copying where available. + _copy_from_file_fallback = _copy_from_file + def _copy_from_file(self, source, preserve_metadata=False): + try: + source = os.fspath(source) + except TypeError: + pass + else: + copyfile2(source, str(self)) + return + self._copy_from_file_fallback(source, preserve_metadata) + + if os.name == 'nt': + # If a directory-symlink is copied *before* its target, then + # os.symlink() incorrectly creates a file-symlink on Windows. Avoid + # this by passing *target_is_dir* to os.symlink() on Windows. + def _copy_from_symlink(self, source, preserve_metadata=False): + os.symlink(str(source.readlink()), self, source.info.is_dir()) + if preserve_metadata: + copy_info(source.info, self, follow_symlinks=False) + else: + def _copy_from_symlink(self, source, preserve_metadata=False): + os.symlink(str(source.readlink()), self) + if preserve_metadata: + copy_info(source.info, self, follow_symlinks=False) + + def move(self, target): + """ + Recursively move this file or directory tree to the given destination. + """ + # Use os.replace() if the target is os.PathLike and on the same FS. + try: + target = self.with_segments(target) + except TypeError: + pass + else: + ensure_different_files(self, target) + try: + os.replace(self, target) + except OSError as err: + if err.errno != EXDEV: + raise + else: + return target.joinpath() # Empty join to ensure fresh metadata. + # Fall back to copy+delete. + target = self.copy(target, follow_symlinks=False, preserve_metadata=True) + self._delete() + return target + + def move_into(self, target_dir): + """ + Move this file or directory tree into the given existing directory. + """ + name = self.name + if not name: + raise ValueError(f"{self!r} has an empty name") + elif hasattr(target_dir, 'with_segments'): + target = target_dir / name + else: + target = self.with_segments(target_dir, name) + return self.move(target) + + if hasattr(os, "symlink"): + def symlink_to(self, target, target_is_directory=False): + """ + Make this path a symlink pointing to the target path. + Note the order of arguments (link, target) is the reverse of os.symlink. + """ + os.symlink(target, self, target_is_directory) + else: + def symlink_to(self, target, target_is_directory=False): + """ + Make this path a symlink pointing to the target path. + Note the order of arguments (link, target) is the reverse of os.symlink. + """ + f = f"{type(self).__name__}.symlink_to()" + raise UnsupportedOperation(f"{f} is unsupported on this system") + + if hasattr(os, "link"): + def hardlink_to(self, target): + """ + Make this path a hard link pointing to the same file as *target*. + + Note the order of arguments (self, target) is the reverse of os.link's. + """ + os.link(target, self) + else: + def hardlink_to(self, target): + """ + Make this path a hard link pointing to the same file as *target*. + + Note the order of arguments (self, target) is the reverse of os.link's. + """ + f = f"{type(self).__name__}.hardlink_to()" + raise UnsupportedOperation(f"{f} is unsupported on this system") + + def expanduser(self): + """ Return a new path with expanded ~ and ~user constructs + (as returned by os.path.expanduser) + """ + if (not (self.drive or self.root) and + self._tail and self._tail[0][:1] == '~'): + homedir = os.path.expanduser(self._tail[0]) + if homedir[:1] == "~": + raise RuntimeError("Could not determine home directory.") + drv, root, tail = self._parse_path(homedir) + return self._from_parsed_parts(drv, root, tail + self._tail[1:]) + + return self + + @classmethod + def home(cls): + """Return a new path pointing to expanduser('~'). + """ + homedir = os.path.expanduser("~") + if homedir == "~": + raise RuntimeError("Could not determine home directory.") + return cls(homedir) + + def as_uri(self): + """Return the path as a URI.""" + if not self.is_absolute(): + raise ValueError("relative paths can't be expressed as file URIs") + from urllib.request import pathname2url + return pathname2url(str(self), add_scheme=True) + + @classmethod + def from_uri(cls, uri): + """Return a new path from the given 'file' URI.""" + from urllib.error import URLError + from urllib.request import url2pathname + try: + path = cls(url2pathname(uri, require_scheme=True)) + except URLError as exc: + raise ValueError(exc.reason) from None + if not path.is_absolute(): + raise ValueError(f"URI is not absolute: {uri!r}") + return path + + +class PosixPath(Path, PurePosixPath): + """Path subclass for non-Windows systems. + + On a POSIX system, instantiating a Path should return this object. + """ + __slots__ = () + + if os.name == 'nt': + def __new__(cls, *args, **kwargs): + raise UnsupportedOperation( + f"cannot instantiate {cls.__name__!r} on your system") + +class WindowsPath(Path, PureWindowsPath): + """Path subclass for Windows systems. + + On a Windows system, instantiating a Path should return this object. + """ + __slots__ = () + + if os.name != 'nt': + def __new__(cls, *args, **kwargs): + raise UnsupportedOperation( + f"cannot instantiate {cls.__name__!r} on your system") diff --git a/Python314_4_x64_Template/Lib/pathlib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/pathlib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..62cfc0e3 Binary files /dev/null and b/Python314_4_x64_Template/Lib/pathlib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/pathlib/__pycache__/_os.cpython-314.pyc b/Python314_4_x64_Template/Lib/pathlib/__pycache__/_os.cpython-314.pyc new file mode 100644 index 00000000..be04cf53 Binary files /dev/null and b/Python314_4_x64_Template/Lib/pathlib/__pycache__/_os.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/pathlib/_local.py b/Python314_4_x64_Template/Lib/pathlib/_local.py new file mode 100644 index 00000000..58e137f2 --- /dev/null +++ b/Python314_4_x64_Template/Lib/pathlib/_local.py @@ -0,0 +1,12 @@ +""" +This module exists so that pathlib objects pickled under Python 3.13 can be +unpickled in 3.14+. +""" + +from pathlib import * + +__all__ = [ + "UnsupportedOperation", + "PurePath", "PurePosixPath", "PureWindowsPath", + "Path", "PosixPath", "WindowsPath", +] diff --git a/Python314_4_x64_Template/Lib/pathlib/_os.py b/Python314_4_x64_Template/Lib/pathlib/_os.py new file mode 100644 index 00000000..03983694 --- /dev/null +++ b/Python314_4_x64_Template/Lib/pathlib/_os.py @@ -0,0 +1,530 @@ +""" +Low-level OS functionality wrappers used by pathlib. +""" + +from errno import * +from io import TextIOWrapper, text_encoding +from stat import S_ISDIR, S_ISREG, S_ISLNK, S_IMODE +import os +import sys +try: + import fcntl +except ImportError: + fcntl = None +try: + import posix +except ImportError: + posix = None +try: + import _winapi +except ImportError: + _winapi = None + + +def _get_copy_blocksize(infd): + """Determine blocksize for fastcopying on Linux. + Hopefully the whole file will be copied in a single call. + The copying itself should be performed in a loop 'till EOF is + reached (0 return) so a blocksize smaller or bigger than the actual + file size should not make any difference, also in case the file + content changes while being copied. + """ + try: + blocksize = max(os.fstat(infd).st_size, 2 ** 23) # min 8 MiB + except OSError: + blocksize = 2 ** 27 # 128 MiB + # On 32-bit architectures truncate to 1 GiB to avoid OverflowError, + # see gh-82500. + if sys.maxsize < 2 ** 32: + blocksize = min(blocksize, 2 ** 30) + return blocksize + + +if fcntl and hasattr(fcntl, 'FICLONE'): + def _ficlone(source_fd, target_fd): + """ + Perform a lightweight copy of two files, where the data blocks are + copied only when modified. This is known as Copy on Write (CoW), + instantaneous copy or reflink. + """ + fcntl.ioctl(target_fd, fcntl.FICLONE, source_fd) +else: + _ficlone = None + + +if posix and hasattr(posix, '_fcopyfile'): + def _fcopyfile(source_fd, target_fd): + """ + Copy a regular file content using high-performance fcopyfile(3) + syscall (macOS). + """ + posix._fcopyfile(source_fd, target_fd, posix._COPYFILE_DATA) +else: + _fcopyfile = None + + +if hasattr(os, 'copy_file_range'): + def _copy_file_range(source_fd, target_fd): + """ + Copy data from one regular mmap-like fd to another by using a + high-performance copy_file_range(2) syscall that gives filesystems + an opportunity to implement the use of reflinks or server-side + copy. + This should work on Linux >= 4.5 only. + """ + blocksize = _get_copy_blocksize(source_fd) + offset = 0 + while True: + sent = os.copy_file_range(source_fd, target_fd, blocksize, + offset_dst=offset) + if sent == 0: + break # EOF + offset += sent +else: + _copy_file_range = None + + +if hasattr(os, 'sendfile'): + def _sendfile(source_fd, target_fd): + """Copy data from one regular mmap-like fd to another by using + high-performance sendfile(2) syscall. + This should work on Linux >= 2.6.33 only. + """ + blocksize = _get_copy_blocksize(source_fd) + offset = 0 + while True: + sent = os.sendfile(target_fd, source_fd, offset, blocksize) + if sent == 0: + break # EOF + offset += sent +else: + _sendfile = None + + +if _winapi and hasattr(_winapi, 'CopyFile2'): + def copyfile2(source, target): + """ + Copy from one file to another using CopyFile2 (Windows only). + """ + _winapi.CopyFile2(source, target, 0) +else: + copyfile2 = None + + +def copyfileobj(source_f, target_f): + """ + Copy data from file-like object source_f to file-like object target_f. + """ + try: + source_fd = source_f.fileno() + target_fd = target_f.fileno() + except Exception: + pass # Fall through to generic code. + else: + try: + # Use OS copy-on-write where available. + if _ficlone: + try: + _ficlone(source_fd, target_fd) + return + except OSError as err: + if err.errno not in (EBADF, EOPNOTSUPP, ETXTBSY, EXDEV): + raise err + + # Use OS copy where available. + if _fcopyfile: + try: + _fcopyfile(source_fd, target_fd) + return + except OSError as err: + if err.errno not in (EINVAL, ENOTSUP): + raise err + if _copy_file_range: + try: + _copy_file_range(source_fd, target_fd) + return + except OSError as err: + if err.errno not in (ETXTBSY, EXDEV): + raise err + if _sendfile: + try: + _sendfile(source_fd, target_fd) + return + except OSError as err: + if err.errno != ENOTSOCK: + raise err + except OSError as err: + # Produce more useful error messages. + err.filename = source_f.name + err.filename2 = target_f.name + raise err + + # Last resort: copy with fileobj read() and write(). + read_source = source_f.read + write_target = target_f.write + while buf := read_source(1024 * 1024): + write_target(buf) + + +def magic_open(path, mode='r', buffering=-1, encoding=None, errors=None, + newline=None): + """ + Open the file pointed to by this path and return a file object, as + the built-in open() function does. + """ + text = 'b' not in mode + if text: + # Call io.text_encoding() here to ensure any warning is raised at an + # appropriate stack level. + encoding = text_encoding(encoding) + try: + return open(path, mode, buffering, encoding, errors, newline) + except TypeError: + pass + cls = type(path) + mode = ''.join(sorted(c for c in mode if c not in 'bt')) + if text: + try: + attr = getattr(cls, f'__open_{mode}__') + except AttributeError: + pass + else: + return attr(path, buffering, encoding, errors, newline) + elif encoding is not None: + raise ValueError("binary mode doesn't take an encoding argument") + elif errors is not None: + raise ValueError("binary mode doesn't take an errors argument") + elif newline is not None: + raise ValueError("binary mode doesn't take a newline argument") + + try: + attr = getattr(cls, f'__open_{mode}b__') + except AttributeError: + pass + else: + stream = attr(path, buffering) + if text: + stream = TextIOWrapper(stream, encoding, errors, newline) + return stream + + raise TypeError(f"{cls.__name__} can't be opened with mode {mode!r}") + + +def ensure_distinct_paths(source, target): + """ + Raise OSError(EINVAL) if the other path is within this path. + """ + # Note: there is no straightforward, foolproof algorithm to determine + # if one directory is within another (a particularly perverse example + # would be a single network share mounted in one location via NFS, and + # in another location via CIFS), so we simply checks whether the + # other path is lexically equal to, or within, this path. + if source == target: + err = OSError(EINVAL, "Source and target are the same path") + elif source in target.parents: + err = OSError(EINVAL, "Source path is a parent of target path") + else: + return + err.filename = str(source) + err.filename2 = str(target) + raise err + + +def ensure_different_files(source, target): + """ + Raise OSError(EINVAL) if both paths refer to the same file. + """ + try: + source_file_id = source.info._file_id + target_file_id = target.info._file_id + except AttributeError: + if source != target: + return + else: + try: + if source_file_id() != target_file_id(): + return + except (OSError, ValueError): + return + err = OSError(EINVAL, "Source and target are the same file") + err.filename = str(source) + err.filename2 = str(target) + raise err + + +def copy_info(info, target, follow_symlinks=True): + """Copy metadata from the given PathInfo to the given local path.""" + copy_times_ns = ( + hasattr(info, '_access_time_ns') and + hasattr(info, '_mod_time_ns') and + (follow_symlinks or os.utime in os.supports_follow_symlinks)) + if copy_times_ns: + t0 = info._access_time_ns(follow_symlinks=follow_symlinks) + t1 = info._mod_time_ns(follow_symlinks=follow_symlinks) + os.utime(target, ns=(t0, t1), follow_symlinks=follow_symlinks) + + # We must copy extended attributes before the file is (potentially) + # chmod()'ed read-only, otherwise setxattr() will error with -EACCES. + copy_xattrs = ( + hasattr(info, '_xattrs') and + hasattr(os, 'setxattr') and + (follow_symlinks or os.setxattr in os.supports_follow_symlinks)) + if copy_xattrs: + xattrs = info._xattrs(follow_symlinks=follow_symlinks) + for attr, value in xattrs: + try: + os.setxattr(target, attr, value, follow_symlinks=follow_symlinks) + except OSError as e: + if e.errno not in (EPERM, ENOTSUP, ENODATA, EINVAL, EACCES): + raise + + copy_posix_permissions = ( + hasattr(info, '_posix_permissions') and + (follow_symlinks or os.chmod in os.supports_follow_symlinks)) + if copy_posix_permissions: + posix_permissions = info._posix_permissions(follow_symlinks=follow_symlinks) + try: + os.chmod(target, posix_permissions, follow_symlinks=follow_symlinks) + except NotImplementedError: + # if we got a NotImplementedError, it's because + # * follow_symlinks=False, + # * lchown() is unavailable, and + # * either + # * fchownat() is unavailable or + # * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW. + # (it returned ENOSUP.) + # therefore we're out of options--we simply cannot chown the + # symlink. give up, suppress the error. + # (which is what shutil always did in this circumstance.) + pass + + copy_bsd_flags = ( + hasattr(info, '_bsd_flags') and + hasattr(os, 'chflags') and + (follow_symlinks or os.chflags in os.supports_follow_symlinks)) + if copy_bsd_flags: + bsd_flags = info._bsd_flags(follow_symlinks=follow_symlinks) + try: + os.chflags(target, bsd_flags, follow_symlinks=follow_symlinks) + except OSError as why: + if why.errno not in (EOPNOTSUPP, ENOTSUP): + raise + + +class _PathInfoBase: + __slots__ = ('_path', '_stat_result', '_lstat_result') + + def __init__(self, path): + self._path = str(path) + + def __repr__(self): + path_type = "WindowsPath" if os.name == "nt" else "PosixPath" + return f"<{path_type}.info>" + + def _stat(self, *, follow_symlinks=True, ignore_errors=False): + """Return the status as an os.stat_result, or None if stat() fails and + ignore_errors is true.""" + if follow_symlinks: + try: + result = self._stat_result + except AttributeError: + pass + else: + if ignore_errors or result is not None: + return result + try: + self._stat_result = os.stat(self._path) + except (OSError, ValueError): + self._stat_result = None + if not ignore_errors: + raise + return self._stat_result + else: + try: + result = self._lstat_result + except AttributeError: + pass + else: + if ignore_errors or result is not None: + return result + try: + self._lstat_result = os.lstat(self._path) + except (OSError, ValueError): + self._lstat_result = None + if not ignore_errors: + raise + return self._lstat_result + + def _posix_permissions(self, *, follow_symlinks=True): + """Return the POSIX file permissions.""" + return S_IMODE(self._stat(follow_symlinks=follow_symlinks).st_mode) + + def _file_id(self, *, follow_symlinks=True): + """Returns the identifier of the file.""" + st = self._stat(follow_symlinks=follow_symlinks) + return st.st_dev, st.st_ino + + def _access_time_ns(self, *, follow_symlinks=True): + """Return the access time in nanoseconds.""" + return self._stat(follow_symlinks=follow_symlinks).st_atime_ns + + def _mod_time_ns(self, *, follow_symlinks=True): + """Return the modify time in nanoseconds.""" + return self._stat(follow_symlinks=follow_symlinks).st_mtime_ns + + if hasattr(os.stat_result, 'st_flags'): + def _bsd_flags(self, *, follow_symlinks=True): + """Return the flags.""" + return self._stat(follow_symlinks=follow_symlinks).st_flags + + if hasattr(os, 'listxattr'): + def _xattrs(self, *, follow_symlinks=True): + """Return the xattrs as a list of (attr, value) pairs, or an empty + list if extended attributes aren't supported.""" + try: + return [ + (attr, os.getxattr(self._path, attr, follow_symlinks=follow_symlinks)) + for attr in os.listxattr(self._path, follow_symlinks=follow_symlinks)] + except OSError as err: + if err.errno not in (EPERM, ENOTSUP, ENODATA, EINVAL, EACCES): + raise + return [] + + +class _WindowsPathInfo(_PathInfoBase): + """Implementation of pathlib.types.PathInfo that provides status + information for Windows paths. Don't try to construct it yourself.""" + __slots__ = ('_exists', '_is_dir', '_is_file', '_is_symlink') + + def exists(self, *, follow_symlinks=True): + """Whether this path exists.""" + if not follow_symlinks and self.is_symlink(): + return True + try: + return self._exists + except AttributeError: + if os.path.exists(self._path): + self._exists = True + return True + else: + self._exists = self._is_dir = self._is_file = False + return False + + def is_dir(self, *, follow_symlinks=True): + """Whether this path is a directory.""" + if not follow_symlinks and self.is_symlink(): + return False + try: + return self._is_dir + except AttributeError: + if os.path.isdir(self._path): + self._is_dir = self._exists = True + return True + else: + self._is_dir = False + return False + + def is_file(self, *, follow_symlinks=True): + """Whether this path is a regular file.""" + if not follow_symlinks and self.is_symlink(): + return False + try: + return self._is_file + except AttributeError: + if os.path.isfile(self._path): + self._is_file = self._exists = True + return True + else: + self._is_file = False + return False + + def is_symlink(self): + """Whether this path is a symbolic link.""" + try: + return self._is_symlink + except AttributeError: + self._is_symlink = os.path.islink(self._path) + return self._is_symlink + + +class _PosixPathInfo(_PathInfoBase): + """Implementation of pathlib.types.PathInfo that provides status + information for POSIX paths. Don't try to construct it yourself.""" + __slots__ = () + + def exists(self, *, follow_symlinks=True): + """Whether this path exists.""" + st = self._stat(follow_symlinks=follow_symlinks, ignore_errors=True) + if st is None: + return False + return True + + def is_dir(self, *, follow_symlinks=True): + """Whether this path is a directory.""" + st = self._stat(follow_symlinks=follow_symlinks, ignore_errors=True) + if st is None: + return False + return S_ISDIR(st.st_mode) + + def is_file(self, *, follow_symlinks=True): + """Whether this path is a regular file.""" + st = self._stat(follow_symlinks=follow_symlinks, ignore_errors=True) + if st is None: + return False + return S_ISREG(st.st_mode) + + def is_symlink(self): + """Whether this path is a symbolic link.""" + st = self._stat(follow_symlinks=False, ignore_errors=True) + if st is None: + return False + return S_ISLNK(st.st_mode) + + +PathInfo = _WindowsPathInfo if os.name == 'nt' else _PosixPathInfo + + +class DirEntryInfo(_PathInfoBase): + """Implementation of pathlib.types.PathInfo that provides status + information by querying a wrapped os.DirEntry object. Don't try to + construct it yourself.""" + __slots__ = ('_entry',) + + def __init__(self, entry): + super().__init__(entry.path) + self._entry = entry + + def _stat(self, *, follow_symlinks=True, ignore_errors=False): + try: + return self._entry.stat(follow_symlinks=follow_symlinks) + except OSError: + if not ignore_errors: + raise + return None + + def exists(self, *, follow_symlinks=True): + """Whether this path exists.""" + if not follow_symlinks: + return True + return self._stat(ignore_errors=True) is not None + + def is_dir(self, *, follow_symlinks=True): + """Whether this path is a directory.""" + try: + return self._entry.is_dir(follow_symlinks=follow_symlinks) + except OSError: + return False + + def is_file(self, *, follow_symlinks=True): + """Whether this path is a regular file.""" + try: + return self._entry.is_file(follow_symlinks=follow_symlinks) + except OSError: + return False + + def is_symlink(self): + """Whether this path is a symbolic link.""" + try: + return self._entry.is_symlink() + except OSError: + return False diff --git a/Python314_4_x64_Template/Lib/pathlib/types.py b/Python314_4_x64_Template/Lib/pathlib/types.py new file mode 100644 index 00000000..d8f5c34a --- /dev/null +++ b/Python314_4_x64_Template/Lib/pathlib/types.py @@ -0,0 +1,430 @@ +""" +Protocols for supporting classes in pathlib. +""" + +# This module also provides abstract base classes for rich path objects. +# These ABCs are a *private* part of the Python standard library, but they're +# made available as a PyPI package called "pathlib-abc". It's possible they'll +# become an official part of the standard library in future. +# +# Three ABCs are provided -- _JoinablePath, _ReadablePath and _WritablePath + + +from abc import ABC, abstractmethod +from glob import _PathGlobber +from io import text_encoding +from pathlib._os import magic_open, ensure_distinct_paths, ensure_different_files, copyfileobj +from pathlib import PurePath, Path +from typing import Optional, Protocol, runtime_checkable + + +def _explode_path(path, split): + """ + Split the path into a 2-tuple (anchor, parts), where *anchor* is the + uppermost parent of the path (equivalent to path.parents[-1]), and + *parts* is a reversed list of parts following the anchor. + """ + parent, name = split(path) + names = [] + while path != parent: + names.append(name) + path = parent + parent, name = split(path) + return path, names + + +@runtime_checkable +class _PathParser(Protocol): + """Protocol for path parsers, which do low-level path manipulation. + + Path parsers provide a subset of the os.path API, specifically those + functions needed to provide JoinablePath functionality. Each JoinablePath + subclass references its path parser via a 'parser' class attribute. + """ + + sep: str + altsep: Optional[str] + def split(self, path: str) -> tuple[str, str]: ... + def splitext(self, path: str) -> tuple[str, str]: ... + def normcase(self, path: str) -> str: ... + + +@runtime_checkable +class PathInfo(Protocol): + """Protocol for path info objects, which support querying the file type. + Methods may return cached results. + """ + def exists(self, *, follow_symlinks: bool = True) -> bool: ... + def is_dir(self, *, follow_symlinks: bool = True) -> bool: ... + def is_file(self, *, follow_symlinks: bool = True) -> bool: ... + def is_symlink(self) -> bool: ... + + +class _JoinablePath(ABC): + """Abstract base class for pure path objects. + + This class *does not* provide several magic methods that are defined in + its implementation PurePath. They are: __init__, __fspath__, __bytes__, + __reduce__, __hash__, __eq__, __lt__, __le__, __gt__, __ge__. + """ + __slots__ = () + + @property + @abstractmethod + def parser(self): + """Implementation of pathlib._types.Parser used for low-level path + parsing and manipulation. + """ + raise NotImplementedError + + @abstractmethod + def with_segments(self, *pathsegments): + """Construct a new path object from any number of path-like objects. + Subclasses may override this method to customize how new path objects + are created from methods like `iterdir()`. + """ + raise NotImplementedError + + @abstractmethod + def __str__(self): + """Return the string representation of the path, suitable for + passing to system calls.""" + raise NotImplementedError + + @property + def anchor(self): + """The concatenation of the drive and root, or ''.""" + return _explode_path(str(self), self.parser.split)[0] + + @property + def name(self): + """The final path component, if any.""" + return self.parser.split(str(self))[1] + + @property + def suffix(self): + """ + The final component's last suffix, if any. + + This includes the leading period. For example: '.txt' + """ + return self.parser.splitext(self.name)[1] + + @property + def suffixes(self): + """ + A list of the final component's suffixes, if any. + + These include the leading periods. For example: ['.tar', '.gz'] + """ + split = self.parser.splitext + stem, suffix = split(self.name) + suffixes = [] + while suffix: + suffixes.append(suffix) + stem, suffix = split(stem) + return suffixes[::-1] + + @property + def stem(self): + """The final path component, minus its last suffix.""" + return self.parser.splitext(self.name)[0] + + def with_name(self, name): + """Return a new path with the file name changed.""" + split = self.parser.split + if split(name)[0]: + raise ValueError(f"Invalid name {name!r}") + path = str(self) + path = path.removesuffix(split(path)[1]) + name + return self.with_segments(path) + + def with_stem(self, stem): + """Return a new path with the stem changed.""" + suffix = self.suffix + if not suffix: + return self.with_name(stem) + elif not stem: + # If the suffix is non-empty, we can't make the stem empty. + raise ValueError(f"{self!r} has a non-empty suffix") + else: + return self.with_name(stem + suffix) + + def with_suffix(self, suffix): + """Return a new path with the file suffix changed. If the path + has no suffix, add given suffix. If the given suffix is an empty + string, remove the suffix from the path. + """ + stem = self.stem + if not stem: + # If the stem is empty, we can't make the suffix non-empty. + raise ValueError(f"{self!r} has an empty name") + elif suffix and not suffix.startswith('.'): + raise ValueError(f"Invalid suffix {suffix!r}") + else: + return self.with_name(stem + suffix) + + @property + def parts(self): + """An object providing sequence-like access to the + components in the filesystem path.""" + anchor, parts = _explode_path(str(self), self.parser.split) + if anchor: + parts.append(anchor) + return tuple(reversed(parts)) + + def joinpath(self, *pathsegments): + """Combine this path with one or several arguments, and return a + new path representing either a subpath (if all arguments are relative + paths) or a totally different path (if one of the arguments is + anchored). + """ + return self.with_segments(str(self), *pathsegments) + + def __truediv__(self, key): + try: + return self.with_segments(str(self), key) + except TypeError: + return NotImplemented + + def __rtruediv__(self, key): + try: + return self.with_segments(key, str(self)) + except TypeError: + return NotImplemented + + @property + def parent(self): + """The logical parent of the path.""" + path = str(self) + parent = self.parser.split(path)[0] + if path != parent: + return self.with_segments(parent) + return self + + @property + def parents(self): + """A sequence of this path's logical parents.""" + split = self.parser.split + path = str(self) + parent = split(path)[0] + parents = [] + while path != parent: + parents.append(self.with_segments(parent)) + path = parent + parent = split(path)[0] + return tuple(parents) + + def full_match(self, pattern): + """ + Return True if this path matches the given glob-style pattern. The + pattern is matched against the entire path. + """ + case_sensitive = self.parser.normcase('Aa') == 'Aa' + globber = _PathGlobber(self.parser.sep, case_sensitive, recursive=True) + match = globber.compile(pattern, altsep=self.parser.altsep) + return match(str(self)) is not None + + +class _ReadablePath(_JoinablePath): + """Abstract base class for readable path objects. + + The Path class implements this ABC for local filesystem paths. Users may + create subclasses to implement readable virtual filesystem paths, such as + paths in archive files or on remote storage systems. + """ + __slots__ = () + + @property + @abstractmethod + def info(self): + """ + A PathInfo object that exposes the file type and other file attributes + of this path. + """ + raise NotImplementedError + + @abstractmethod + def __open_rb__(self, buffering=-1): + """ + Open the file pointed to by this path for reading in binary mode and + return a file object, like open(mode='rb'). + """ + raise NotImplementedError + + def read_bytes(self): + """ + Open the file in bytes mode, read it, and close the file. + """ + with magic_open(self, mode='rb', buffering=0) as f: + return f.read() + + def read_text(self, encoding=None, errors=None, newline=None): + """ + Open the file in text mode, read it, and close the file. + """ + # Call io.text_encoding() here to ensure any warning is raised at an + # appropriate stack level. + encoding = text_encoding(encoding) + with magic_open(self, mode='r', encoding=encoding, errors=errors, newline=newline) as f: + return f.read() + + @abstractmethod + def iterdir(self): + """Yield path objects of the directory contents. + + The children are yielded in arbitrary order, and the + special entries '.' and '..' are not included. + """ + raise NotImplementedError + + def glob(self, pattern, *, recurse_symlinks=True): + """Iterate over this subtree and yield all existing files (of any + kind, including directories) matching the given relative pattern. + """ + anchor, parts = _explode_path(pattern, self.parser.split) + if anchor: + raise NotImplementedError("Non-relative patterns are unsupported") + elif not parts: + raise ValueError(f"Unacceptable pattern: {pattern!r}") + elif not recurse_symlinks: + raise NotImplementedError("recurse_symlinks=False is unsupported") + case_sensitive = self.parser.normcase('Aa') == 'Aa' + globber = _PathGlobber(self.parser.sep, case_sensitive, recursive=True) + select = globber.selector(parts) + return select(self.joinpath('')) + + def walk(self, top_down=True, on_error=None, follow_symlinks=False): + """Walk the directory tree from this directory, similar to os.walk().""" + paths = [self] + while paths: + path = paths.pop() + if isinstance(path, tuple): + yield path + continue + dirnames = [] + filenames = [] + if not top_down: + paths.append((path, dirnames, filenames)) + try: + for child in path.iterdir(): + if child.info.is_dir(follow_symlinks=follow_symlinks): + if not top_down: + paths.append(child) + dirnames.append(child.name) + else: + filenames.append(child.name) + except OSError as error: + if on_error is not None: + on_error(error) + if not top_down: + while not isinstance(paths.pop(), tuple): + pass + continue + if top_down: + yield path, dirnames, filenames + paths += [path.joinpath(d) for d in reversed(dirnames)] + + @abstractmethod + def readlink(self): + """ + Return the path to which the symbolic link points. + """ + raise NotImplementedError + + def copy(self, target, **kwargs): + """ + Recursively copy this file or directory tree to the given destination. + """ + ensure_distinct_paths(self, target) + target._copy_from(self, **kwargs) + return target.joinpath() # Empty join to ensure fresh metadata. + + def copy_into(self, target_dir, **kwargs): + """ + Copy this file or directory tree into the given existing directory. + """ + name = self.name + if not name: + raise ValueError(f"{self!r} has an empty name") + return self.copy(target_dir / name, **kwargs) + + +class _WritablePath(_JoinablePath): + """Abstract base class for writable path objects. + + The Path class implements this ABC for local filesystem paths. Users may + create subclasses to implement writable virtual filesystem paths, such as + paths in archive files or on remote storage systems. + """ + __slots__ = () + + @abstractmethod + def symlink_to(self, target, target_is_directory=False): + """ + Make this path a symlink pointing to the target path. + Note the order of arguments (link, target) is the reverse of os.symlink. + """ + raise NotImplementedError + + @abstractmethod + def mkdir(self): + """ + Create a new directory at this given path. + """ + raise NotImplementedError + + @abstractmethod + def __open_wb__(self, buffering=-1): + """ + Open the file pointed to by this path for writing in binary mode and + return a file object, like open(mode='wb'). + """ + raise NotImplementedError + + def write_bytes(self, data): + """ + Open the file in bytes mode, write to it, and close the file. + """ + # type-check for the buffer interface before truncating the file + view = memoryview(data) + with magic_open(self, mode='wb') as f: + return f.write(view) + + def write_text(self, data, encoding=None, errors=None, newline=None): + """ + Open the file in text mode, write to it, and close the file. + """ + # Call io.text_encoding() here to ensure any warning is raised at an + # appropriate stack level. + encoding = text_encoding(encoding) + if not isinstance(data, str): + raise TypeError('data must be str, not %s' % + data.__class__.__name__) + with magic_open(self, mode='w', encoding=encoding, errors=errors, newline=newline) as f: + return f.write(data) + + def _copy_from(self, source, follow_symlinks=True): + """ + Recursively copy the given path to this path. + """ + stack = [(source, self)] + while stack: + src, dst = stack.pop() + if not follow_symlinks and src.info.is_symlink(): + dst.symlink_to(str(src.readlink()), src.info.is_dir()) + elif src.info.is_dir(): + children = src.iterdir() + dst.mkdir() + for child in children: + stack.append((child, dst.joinpath(child.name))) + else: + ensure_different_files(src, dst) + with magic_open(src, 'rb') as source_f: + with magic_open(dst, 'wb') as target_f: + copyfileobj(source_f, target_f) + + +_JoinablePath.register(PurePath) +_ReadablePath.register(Path) +_WritablePath.register(Path) diff --git a/Python314_4_x64_Template/Lib/pdb.py b/Python314_4_x64_Template/Lib/pdb.py new file mode 100644 index 00000000..903baeb8 --- /dev/null +++ b/Python314_4_x64_Template/Lib/pdb.py @@ -0,0 +1,3678 @@ +""" +The Python Debugger Pdb +======================= + +To use the debugger in its simplest form: + + >>> import pdb + >>> pdb.run('') + +The debugger's prompt is '(Pdb) '. This will stop in the first +function call in . + +Alternatively, if a statement terminated with an unhandled exception, +you can use pdb's post-mortem facility to inspect the contents of the +traceback: + + >>> + + >>> import pdb + >>> pdb.pm() + +The commands recognized by the debugger are listed in the next +section. Most can be abbreviated as indicated; e.g., h(elp) means +that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel', +nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in +square brackets. Alternatives in the command syntax are separated +by a vertical bar (|). + +A blank line repeats the previous command literally, except for +'list', where it lists the next 11 lines. + +Commands that the debugger doesn't recognize are assumed to be Python +statements and are executed in the context of the program being +debugged. Python statements can also be prefixed with an exclamation +point ('!'). This is a powerful way to inspect the program being +debugged; it is even possible to change variables or call functions. +When an exception occurs in such a statement, the exception name is +printed but the debugger's state is not changed. + +The debugger supports aliases, which can save typing. And aliases can +have parameters (see the alias help entry) which allows one a certain +level of adaptability to the context under examination. + +Multiple commands may be entered on a single line, separated by the +pair ';;'. No intelligence is applied to separating the commands; the +input is split at the first ';;', even if it is in the middle of a +quoted string. + +If a file ".pdbrc" exists in your home directory or in the current +directory, it is read in and executed as if it had been typed at the +debugger prompt. This is particularly useful for aliases. If both +files exist, the one in the home directory is read first and aliases +defined there can be overridden by the local file. This behavior can be +disabled by passing the "readrc=False" argument to the Pdb constructor. + +Aside from aliases, the debugger is not directly programmable; but it +is implemented as a class from which you can derive your own debugger +class, which you can make as fancy as you like. + + +Debugger commands +================= + +""" +# NOTE: the actual command documentation is collected from docstrings of the +# commands and is appended to __doc__ after the class has been defined. + +import os +import io +import re +import sys +import cmd +import bdb +import dis +import code +import glob +import json +import stat +import token +import types +import atexit +import codeop +import pprint +import signal +import socket +import typing +import asyncio +import inspect +import weakref +import builtins +import tempfile +import textwrap +import tokenize +import itertools +import traceback +import linecache +import selectors +import threading +import _colorize +import _pyrepl.utils + +from contextlib import ExitStack, closing, contextmanager +from types import CodeType +from warnings import deprecated + + +class Restart(Exception): + """Causes a debugger to be restarted for the debugged python program.""" + pass + +__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace", + "post_mortem", "set_default_backend", "get_default_backend", "help"] + + +def find_first_executable_line(code): + """ Try to find the first executable line of the code object. + + Equivalently, find the line number of the instruction that's + after RESUME + + Return code.co_firstlineno if no executable line is found. + """ + prev = None + for instr in dis.get_instructions(code): + if prev is not None and prev.opname == 'RESUME': + if instr.positions.lineno is not None: + return instr.positions.lineno + return code.co_firstlineno + prev = instr + return code.co_firstlineno + +def find_function(funcname, filename): + cre = re.compile(r'def\s+%s(\s*\[.+\])?\s*[(]' % re.escape(funcname)) + try: + fp = tokenize.open(filename) + except OSError: + lines = linecache.getlines(filename) + if not lines: + return None + fp = io.StringIO(''.join(lines)) + funcdef = "" + funcstart = 0 + # consumer of this info expects the first line to be 1 + with fp: + for lineno, line in enumerate(fp, start=1): + if cre.match(line): + funcstart, funcdef = lineno, line + elif funcdef: + funcdef += line + + if funcdef: + try: + code = compile(funcdef, filename, 'exec') + except SyntaxError: + continue + # We should always be able to find the code object here + funccode = next(c for c in code.co_consts if + isinstance(c, CodeType) and c.co_name == funcname) + lineno_offset = find_first_executable_line(funccode) + return funcname, filename, funcstart + lineno_offset - 1 + return None + +def lasti2lineno(code, lasti): + linestarts = list(dis.findlinestarts(code)) + linestarts.reverse() + for i, lineno in linestarts: + if lasti >= i: + return lineno + return 0 + + +class _rstr(str): + """String that doesn't quote its repr.""" + def __repr__(self): + return self + + +class _ExecutableTarget: + filename: str + code: CodeType | str + namespace: dict + + +class _ScriptTarget(_ExecutableTarget): + def __init__(self, target): + self._check(target) + self._target = self._safe_realpath(target) + + # If PYTHONSAFEPATH (-P) is not set, sys.path[0] is the directory + # of pdb, and we should replace it with the directory of the script + if not sys.flags.safe_path: + sys.path[0] = os.path.dirname(self._target) + + @staticmethod + def _check(target): + """ + Check that target is plausibly a script. + """ + if not os.path.exists(target): + print(f'Error: {target} does not exist') + sys.exit(1) + if os.path.isdir(target): + print(f'Error: {target} is a directory') + sys.exit(1) + + @staticmethod + def _safe_realpath(path): + """ + Return the canonical path (realpath) if it is accessible from the userspace. + Otherwise (for example, if the path is a symlink to an anonymous pipe), + return the original path. + + See GH-142315. + """ + realpath = os.path.realpath(path) + return realpath if os.path.exists(realpath) else path + + def __repr__(self): + return self._target + + @property + def filename(self): + return self._target + + @property + def code(self): + # Open the file each time because the file may be modified + with io.open_code(self._target) as fp: + return f"exec(compile({fp.read()!r}, {self._target!r}, 'exec'))" + + @property + def namespace(self): + return dict( + __name__='__main__', + __file__=self._target, + __builtins__=__builtins__, + __spec__=None, + ) + + +class _ModuleTarget(_ExecutableTarget): + def __init__(self, target): + self._target = target + + import runpy + try: + _, self._spec, self._code = runpy._get_module_details(self._target) + except ImportError as e: + print(f"ImportError: {e}") + sys.exit(1) + except Exception: + traceback.print_exc() + sys.exit(1) + + def __repr__(self): + return self._target + + @property + def filename(self): + return self._code.co_filename + + @property + def code(self): + return self._code + + @property + def namespace(self): + return dict( + __name__='__main__', + __file__=os.path.normcase(os.path.abspath(self.filename)), + __package__=self._spec.parent, + __loader__=self._spec.loader, + __spec__=self._spec, + __builtins__=__builtins__, + ) + + +class _ZipTarget(_ExecutableTarget): + def __init__(self, target): + import runpy + + self._target = os.path.realpath(target) + sys.path.insert(0, self._target) + try: + _, self._spec, self._code = runpy._get_main_module_details() + except ImportError as e: + print(f"ImportError: {e}") + sys.exit(1) + except Exception: + traceback.print_exc() + sys.exit(1) + + def __repr__(self): + return self._target + + @property + def filename(self): + return self._code.co_filename + + @property + def code(self): + return self._code + + @property + def namespace(self): + return dict( + __name__='__main__', + __file__=os.path.normcase(os.path.abspath(self.filename)), + __package__=self._spec.parent, + __loader__=self._spec.loader, + __spec__=self._spec, + __builtins__=__builtins__, + ) + + +class _PdbInteractiveConsole(code.InteractiveConsole): + def __init__(self, ns, message): + self._message = message + super().__init__(locals=ns, local_exit=True) + + def write(self, data): + self._message(data, end='') + + +# Interaction prompt line will separate file and call info from code +# text using value of line_prefix string. A newline and arrow may +# be to your liking. You can set it once pdb is imported using the +# command "pdb.line_prefix = '\n% '". +# line_prefix = ': ' # Use this to get the old situation back +line_prefix = '\n-> ' # Probably a better default + + +# The default backend to use for Pdb instances if not specified +# Should be either 'settrace' or 'monitoring' +_default_backend = 'settrace' + + +def set_default_backend(backend): + """Set the default backend to use for Pdb instances.""" + global _default_backend + if backend not in ('settrace', 'monitoring'): + raise ValueError("Invalid backend: %s" % backend) + _default_backend = backend + + +def get_default_backend(): + """Get the default backend to use for Pdb instances.""" + return _default_backend + + +class Pdb(bdb.Bdb, cmd.Cmd): + _previous_sigint_handler = None + + # Limit the maximum depth of chained exceptions, we should be handling cycles, + # but in case there are recursions, we stop at 999. + MAX_CHAINED_EXCEPTION_DEPTH = 999 + + _file_mtime_table = {} + + _last_pdb_instance = None + + def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None, + nosigint=False, readrc=True, mode=None, backend=None, colorize=False): + bdb.Bdb.__init__(self, skip=skip, backend=backend if backend else get_default_backend()) + cmd.Cmd.__init__(self, completekey, stdin, stdout) + sys.audit("pdb.Pdb") + if stdout: + self.use_rawinput = 0 + self.prompt = '(Pdb) ' + self.aliases = {} + self.displaying = {} + self.mainpyfile = '' + self._wait_for_mainpyfile = False + self.tb_lineno = {} + self.mode = mode + self.colorize = colorize and _colorize.can_colorize(file=stdout or sys.stdout) + # Try to load readline if it exists + try: + import readline + # remove some common file name delimiters + readline.set_completer_delims(' \t\n`@#%^&*()=+[{]}\\|;:\'",<>?') + except ImportError: + pass + + self.allow_kbdint = False + self.nosigint = nosigint + # Consider these characters as part of the command so when the users type + # c.a or c['a'], it won't be recognized as a c(ontinue) command + self.identchars = cmd.Cmd.identchars + '=.[](),"\'+-*/%@&|<>~^' + + # Read ~/.pdbrc and ./.pdbrc + self.rcLines = [] + if readrc: + try: + with open(os.path.expanduser('~/.pdbrc'), encoding='utf-8') as rcFile: + self.rcLines.extend(rcFile) + except OSError: + pass + try: + with open(".pdbrc", encoding='utf-8') as rcFile: + self.rcLines.extend(rcFile) + except OSError: + pass + + self.commands = {} # associates a command list to breakpoint numbers + self.commands_defining = False # True while in the process of defining + # a command list + self.commands_bnum = None # The breakpoint number for which we are + # defining a list + + self.async_shim_frame = None + self.async_awaitable = None + + self._chained_exceptions = tuple() + self._chained_exception_index = 0 + + self._current_task = None + + def set_trace(self, frame=None, *, commands=None): + Pdb._last_pdb_instance = self + if frame is None: + frame = sys._getframe().f_back + + if commands is not None: + self.rcLines.extend(commands) + + super().set_trace(frame) + + async def set_trace_async(self, frame=None, *, commands=None): + if self.async_awaitable is not None: + # We are already in a set_trace_async call, do not mess with it + return + + if frame is None: + frame = sys._getframe().f_back + + # We need set_trace to set up the basics, however, this will call + # set_stepinstr() will we need to compensate for, because we don't + # want to trigger on calls + self.set_trace(frame, commands=commands) + # Changing the stopframe will disable trace dispatch on calls + self.stopframe = frame + # We need to stop tracing because we don't have the privilege to avoid + # triggering tracing functions as normal, as we are not already in + # tracing functions + self.stop_trace() + + self.async_shim_frame = sys._getframe() + self.async_awaitable = None + + while True: + self.async_awaitable = None + # Simulate a trace event + # This should bring up pdb and make pdb believe it's debugging the + # caller frame + self.trace_dispatch(frame, "opcode", None) + if self.async_awaitable is not None: + try: + if self.breaks: + with self.set_enterframe(frame): + # set_continue requires enterframe to work + self.set_continue() + self.start_trace() + await self.async_awaitable + except Exception: + self._error_exc() + else: + break + + self.async_shim_frame = None + + # start the trace (the actual command is already set by set_* calls) + if self.returnframe is None and self.stoplineno == -1 and not self.breaks: + # This means we did a continue without any breakpoints, we should not + # start the trace + return + + self.start_trace() + + def sigint_handler(self, signum, frame): + if self.allow_kbdint: + raise KeyboardInterrupt + self.message("\nProgram interrupted. (Use 'cont' to resume).") + self.set_step() + self.set_trace(frame) + + def reset(self): + bdb.Bdb.reset(self) + self.forget() + + def forget(self): + self.lineno = None + self.stack = [] + self.curindex = 0 + if hasattr(self, 'curframe') and self.curframe: + self.curframe.f_globals.pop('__pdb_convenience_variables', None) + self.curframe = None + self.tb_lineno.clear() + + def setup(self, f, tb): + self.forget() + self.stack, self.curindex = self.get_stack(f, tb) + while tb: + # when setting up post-mortem debugging with a traceback, save all + # the original line numbers to be displayed along the current line + # numbers (which can be different, e.g. due to finally clauses) + lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti) + self.tb_lineno[tb.tb_frame] = lineno + tb = tb.tb_next + self.curframe = self.stack[self.curindex][0] + self.set_convenience_variable(self.curframe, '_frame', self.curframe) + if self._current_task: + self.set_convenience_variable(self.curframe, '_asynctask', self._current_task) + self._save_initial_file_mtime(self.curframe) + + if self._chained_exceptions: + self.set_convenience_variable( + self.curframe, + '_exception', + self._chained_exceptions[self._chained_exception_index], + ) + + if self.rcLines: + self.cmdqueue = [ + line for line in self.rcLines + if line.strip() and not line.strip().startswith("#") + ] + self.rcLines = [] + + @property + @deprecated("The frame locals reference is no longer cached. Use 'curframe.f_locals' instead.") + def curframe_locals(self): + return self.curframe.f_locals + + @curframe_locals.setter + @deprecated("Setting 'curframe_locals' no longer has any effect. Update the contents of 'curframe.f_locals' instead.") + def curframe_locals(self, value): + pass + + # Override Bdb methods + + def user_call(self, frame, argument_list): + """This method is called when there is the remote possibility + that we ever need to stop in this function.""" + if self._wait_for_mainpyfile: + return + if self.stop_here(frame): + self.message('--Call--') + self.interaction(frame, None) + + def user_line(self, frame): + """This function is called when we stop or break at this line.""" + if self._wait_for_mainpyfile: + if (self.mainpyfile != self.canonic(frame.f_code.co_filename)): + return + self._wait_for_mainpyfile = False + if self.trace_opcodes: + # GH-127321 + # We want to avoid stopping at an opcode that does not have + # an associated line number because pdb does not like it + if frame.f_lineno is None: + self.set_stepinstr() + return + self.bp_commands(frame) + self.interaction(frame, None) + + user_opcode = user_line + + def bp_commands(self, frame): + """Call every command that was set for the current active breakpoint + (if there is one). + + Returns True if the normal interaction function must be called, + False otherwise.""" + # self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit + if getattr(self, "currentbp", False) and \ + self.currentbp in self.commands: + currentbp = self.currentbp + self.currentbp = 0 + for line in self.commands[currentbp]: + self.cmdqueue.append(line) + self.cmdqueue.append(f'_pdbcmd_restore_lastcmd {self.lastcmd}') + + def user_return(self, frame, return_value): + """This function is called when a return trap is set here.""" + if self._wait_for_mainpyfile: + return + frame.f_locals['__return__'] = return_value + self.set_convenience_variable(frame, '_retval', return_value) + self.message('--Return--') + self.interaction(frame, None) + + def user_exception(self, frame, exc_info): + """This function is called if an exception occurs, + but only if we are to stop at or just below this level.""" + if self._wait_for_mainpyfile: + return + exc_type, exc_value, exc_traceback = exc_info + frame.f_locals['__exception__'] = exc_type, exc_value + self.set_convenience_variable(frame, '_exception', exc_value) + + # An 'Internal StopIteration' exception is an exception debug event + # issued by the interpreter when handling a subgenerator run with + # 'yield from' or a generator controlled by a for loop. No exception has + # actually occurred in this case. The debugger uses this debug event to + # stop when the debuggee is returning from such generators. + prefix = 'Internal ' if (not exc_traceback + and exc_type is StopIteration) else '' + self.message('%s%s' % (prefix, self._format_exc(exc_value))) + self.interaction(frame, exc_traceback) + + # General interaction function + def _cmdloop(self): + while True: + try: + # keyboard interrupts allow for an easy way to cancel + # the current command, so allow them during interactive input + self.allow_kbdint = True + self.cmdloop() + self.allow_kbdint = False + break + except KeyboardInterrupt: + self.message('--KeyboardInterrupt--') + + def _save_initial_file_mtime(self, frame): + """save the mtime of the all the files in the frame stack in the file mtime table + if they haven't been saved yet.""" + while frame: + filename = frame.f_code.co_filename + if filename not in self._file_mtime_table: + try: + self._file_mtime_table[filename] = os.path.getmtime(filename) + except Exception: + pass + frame = frame.f_back + + def _validate_file_mtime(self): + """Check if the source file of the current frame has been modified. + If so, give a warning and reset the modify time to current.""" + try: + filename = self.curframe.f_code.co_filename + mtime = os.path.getmtime(filename) + except Exception: + return + if (filename in self._file_mtime_table and + mtime != self._file_mtime_table[filename]): + self.message(f"*** WARNING: file '{filename}' was edited, " + "running stale code until the program is rerun") + self._file_mtime_table[filename] = mtime + + # Called before loop, handles display expressions + # Set up convenience variable containers + def _show_display(self): + displaying = self.displaying.get(self.curframe) + if displaying: + for expr, oldvalue in displaying.items(): + newvalue = self._getval_except(expr) + # check for identity first; this prevents custom __eq__ to + # be called at every loop, and also prevents instances whose + # fields are changed to be displayed + if newvalue is not oldvalue and newvalue != oldvalue: + displaying[expr] = newvalue + self.message('display %s: %s [old: %s]' % + (expr, self._safe_repr(newvalue, expr), + self._safe_repr(oldvalue, expr))) + + def _get_tb_and_exceptions(self, tb_or_exc): + """ + Given a tracecack or an exception, return a tuple of chained exceptions + and current traceback to inspect. + + This will deal with selecting the right ``__cause__`` or ``__context__`` + as well as handling cycles, and return a flattened list of exceptions we + can jump to with do_exceptions. + + """ + _exceptions = [] + if isinstance(tb_or_exc, BaseException): + traceback, current = tb_or_exc.__traceback__, tb_or_exc + + while current is not None: + if current in _exceptions: + break + _exceptions.append(current) + if current.__cause__ is not None: + current = current.__cause__ + elif ( + current.__context__ is not None and not current.__suppress_context__ + ): + current = current.__context__ + + if len(_exceptions) >= self.MAX_CHAINED_EXCEPTION_DEPTH: + self.message( + f"More than {self.MAX_CHAINED_EXCEPTION_DEPTH}" + " chained exceptions found, not all exceptions" + "will be browsable with `exceptions`." + ) + break + else: + traceback = tb_or_exc + return tuple(reversed(_exceptions)), traceback + + @contextmanager + def _hold_exceptions(self, exceptions): + """ + Context manager to ensure proper cleaning of exceptions references + + When given a chained exception instead of a traceback, + pdb may hold references to many objects which may leak memory. + + We use this context manager to make sure everything is properly cleaned + + """ + try: + self._chained_exceptions = exceptions + self._chained_exception_index = len(exceptions) - 1 + yield + finally: + # we can't put those in forget as otherwise they would + # be cleared on exception change + self._chained_exceptions = tuple() + self._chained_exception_index = 0 + + def _get_asyncio_task(self): + try: + task = asyncio.current_task() + except RuntimeError: + task = None + return task + + def interaction(self, frame, tb_or_exc): + # Restore the previous signal handler at the Pdb prompt. + if Pdb._previous_sigint_handler: + try: + signal.signal(signal.SIGINT, Pdb._previous_sigint_handler) + except ValueError: # ValueError: signal only works in main thread + pass + else: + Pdb._previous_sigint_handler = None + + self._current_task = self._get_asyncio_task() + + _chained_exceptions, tb = self._get_tb_and_exceptions(tb_or_exc) + if isinstance(tb_or_exc, BaseException): + assert tb is not None, "main exception must have a traceback" + with self._hold_exceptions(_chained_exceptions): + self.setup(frame, tb) + # We should print the stack entry if and only if the user input + # is expected, and we should print it right before the user input. + # We achieve this by appending _pdbcmd_print_frame_status to the + # command queue. If cmdqueue is not exhausted, the user input is + # not expected and we will not print the stack entry. + self.cmdqueue.append('_pdbcmd_print_frame_status') + self._cmdloop() + # If _pdbcmd_print_frame_status is not used, pop it out + if self.cmdqueue and self.cmdqueue[-1] == '_pdbcmd_print_frame_status': + self.cmdqueue.pop() + self.forget() + + def displayhook(self, obj): + """Custom displayhook for the exec in default(), which prevents + assignment of the _ variable in the builtins. + """ + # reproduce the behavior of the standard displayhook, not printing None + if obj is not None: + self.message(repr(obj)) + + @contextmanager + def _enable_multiline_input(self): + try: + import readline + except ImportError: + yield + return + + def input_auto_indent(): + last_index = readline.get_current_history_length() + last_line = readline.get_history_item(last_index) + if last_line: + if last_line.isspace(): + # If the last line is empty, we don't need to indent + return + + last_line = last_line.rstrip('\r\n') + indent = len(last_line) - len(last_line.lstrip()) + if last_line.endswith(":"): + indent += 4 + readline.insert_text(' ' * indent) + + completenames = self.completenames + try: + self.completenames = self.complete_multiline_names + readline.set_startup_hook(input_auto_indent) + yield + finally: + readline.set_startup_hook() + self.completenames = completenames + return + + def _exec_in_closure(self, source, globals, locals): + """ Run source code in closure so code object created within source + can find variables in locals correctly + + returns True if the source is executed, False otherwise + """ + + # Determine if the source should be executed in closure. Only when the + # source compiled to multiple code objects, we should use this feature. + # Otherwise, we can just raise an exception and normal exec will be used. + + code = compile(source, "", "exec") + if not any(isinstance(const, CodeType) for const in code.co_consts): + return False + + # locals could be a proxy which does not support pop + # copy it first to avoid modifying the original locals + locals_copy = dict(locals) + + locals_copy["__pdb_eval__"] = { + "result": None, + "write_back": {} + } + + # If the source is an expression, we need to print its value + try: + compile(source, "", "eval") + except SyntaxError: + pass + else: + source = "__pdb_eval__['result'] = " + source + + # Add write-back to update the locals + source = ("try:\n" + + textwrap.indent(source, " ") + "\n" + + "finally:\n" + + " __pdb_eval__['write_back'] = locals()") + + # Build a closure source code with freevars from locals like: + # def __pdb_outer(): + # var = None + # def __pdb_scope(): # This is the code object we want to execute + # nonlocal var + # + # return __pdb_scope.__code__ + source_with_closure = ("def __pdb_outer():\n" + + "\n".join(f" {var} = None" for var in locals_copy) + "\n" + + " def __pdb_scope():\n" + + "\n".join(f" nonlocal {var}" for var in locals_copy) + "\n" + + textwrap.indent(source, " ") + "\n" + + " return __pdb_scope.__code__" + ) + + # Get the code object of __pdb_scope() + # The exec fills locals_copy with the __pdb_outer() function and we can call + # that to get the code object of __pdb_scope() + ns = {} + try: + exec(source_with_closure, {}, ns) + except Exception: + return False + code = ns["__pdb_outer"]() + + cells = tuple(types.CellType(locals_copy.get(var)) for var in code.co_freevars) + + try: + exec(code, globals, locals_copy, closure=cells) + except Exception: + return False + + # get the data we need from the statement + pdb_eval = locals_copy["__pdb_eval__"] + + # __pdb_eval__ should not be updated back to locals + pdb_eval["write_back"].pop("__pdb_eval__") + + # Write all local variables back to locals + locals.update(pdb_eval["write_back"]) + eval_result = pdb_eval["result"] + if eval_result is not None: + self.message(repr(eval_result)) + + return True + + def _exec_await(self, source, globals, locals): + """ Run source code that contains await by playing with async shim frame""" + # Put the source in an async function + source_async = ( + "async def __pdb_await():\n" + + textwrap.indent(source, " ") + '\n' + + " __pdb_locals.update(locals())" + ) + ns = globals | locals + # We use __pdb_locals to do write back + ns["__pdb_locals"] = locals + exec(source_async, ns) + self.async_awaitable = ns["__pdb_await"]() + + def _read_code(self, line): + buffer = line + is_await_code = False + code = None + try: + if (code := codeop.compile_command(line + '\n', '', 'single')) is None: + # Multi-line mode + with self._enable_multiline_input(): + buffer = line + continue_prompt = "... " + while (code := codeop.compile_command(buffer, '', 'single')) is None: + if self.use_rawinput: + try: + line = input(continue_prompt) + except (EOFError, KeyboardInterrupt): + self.lastcmd = "" + print('\n') + return None, None, False + else: + self.stdout.write(continue_prompt) + self.stdout.flush() + line = self.stdin.readline() + if not len(line): + self.lastcmd = "" + self.stdout.write('\n') + self.stdout.flush() + return None, None, False + else: + line = line.rstrip('\r\n') + if line.isspace(): + # empty line, just continue + buffer += '\n' + else: + buffer += '\n' + line + self.lastcmd = buffer + except SyntaxError as e: + # Maybe it's an await expression/statement + if ( + self.async_shim_frame is not None + and e.msg == "'await' outside function" + ): + is_await_code = True + else: + raise + + return code, buffer, is_await_code + + def default(self, line): + if line[:1] == '!': line = line[1:].strip() + locals = self.curframe.f_locals + globals = self.curframe.f_globals + try: + code, buffer, is_await_code = self._read_code(line) + if buffer is None: + return + save_stdout = sys.stdout + save_stdin = sys.stdin + save_displayhook = sys.displayhook + try: + sys.stdin = self.stdin + sys.stdout = self.stdout + sys.displayhook = self.displayhook + if is_await_code: + self._exec_await(buffer, globals, locals) + return True + else: + if not self._exec_in_closure(buffer, globals, locals): + exec(code, globals, locals) + finally: + sys.stdout = save_stdout + sys.stdin = save_stdin + sys.displayhook = save_displayhook + except: + self._error_exc() + + def _replace_convenience_variables(self, line): + """Replace the convenience variables in 'line' with their values. + e.g. $foo is replaced by __pdb_convenience_variables["foo"]. + Note: such pattern in string literals will be skipped""" + + if "$" not in line: + return line + + dollar_start = dollar_end = (-1, -1) + replace_variables = [] + try: + for t in tokenize.generate_tokens(io.StringIO(line).readline): + token_type, token_string, start, end, _ = t + if token_type == token.OP and token_string == '$': + dollar_start, dollar_end = start, end + elif start == dollar_end and token_type == token.NAME: + # line is a one-line command so we only care about column + replace_variables.append((dollar_start[1], end[1], token_string)) + except tokenize.TokenError: + return line + + if not replace_variables: + return line + + last_end = 0 + line_pieces = [] + for start, end, name in replace_variables: + line_pieces.append(line[last_end:start] + f'__pdb_convenience_variables["{name}"]') + last_end = end + line_pieces.append(line[last_end:]) + + return ''.join(line_pieces) + + def precmd(self, line): + """Handle alias expansion and ';;' separator.""" + if not line.strip(): + return line + args = line.split() + while args[0] in self.aliases: + line = self.aliases[args[0]] + for idx in range(1, 10): + if f'%{idx}' in line: + if idx >= len(args): + self.error(f"Not enough arguments for alias '{args[0]}'") + # This is a no-op + return "!" + line = line.replace(f'%{idx}', args[idx]) + elif '%*' not in line: + if idx < len(args): + self.error(f"Too many arguments for alias '{args[0]}'") + # This is a no-op + return "!" + break + + line = line.replace("%*", ' '.join(args[1:])) + args = line.split() + # split into ';;' separated commands + # unless it's an alias command + if args[0] != 'alias': + marker = line.find(';;') + if marker >= 0: + # queue up everything after marker + next = line[marker+2:].lstrip() + self.cmdqueue.insert(0, next) + line = line[:marker].rstrip() + + # Replace all the convenience variables + line = self._replace_convenience_variables(line) + + return line + + def onecmd(self, line): + """Interpret the argument as though it had been typed in response + to the prompt. + + Checks whether this line is typed at the normal prompt or in + a breakpoint command list definition. + """ + if not self.commands_defining: + if line.startswith('_pdbcmd'): + command, arg, line = self.parseline(line) + if hasattr(self, command): + return getattr(self, command)(arg) + return cmd.Cmd.onecmd(self, line) + else: + return self.handle_command_def(line) + + def handle_command_def(self, line): + """Handles one command line during command list definition.""" + cmd, arg, line = self.parseline(line) + if not cmd: + return False + if cmd == 'end': + return True # end of cmd list + elif cmd == 'EOF': + self.message('') + return True # end of cmd list + cmdlist = self.commands[self.commands_bnum] + if cmd == 'silent': + cmdlist.append('_pdbcmd_silence_frame_status') + return False # continue to handle other cmd def in the cmd list + if arg: + cmdlist.append(cmd+' '+arg) + else: + cmdlist.append(cmd) + # Determine if we must stop + try: + func = getattr(self, 'do_' + cmd) + except AttributeError: + func = self.default + # one of the resuming commands + if func.__name__ in self.commands_resuming: + return True + return False + + def _colorize_code(self, code): + if self.colorize: + colors = list(_pyrepl.utils.gen_colors(code)) + chars, _ = _pyrepl.utils.disp_str(code, colors=colors, force_color=True) + code = "".join(chars) + return code + + # interface abstraction functions + + def message(self, msg, end='\n'): + print(msg, end=end, file=self.stdout) + + def error(self, msg): + print('***', msg, file=self.stdout) + + # convenience variables + + def set_convenience_variable(self, frame, name, value): + if '__pdb_convenience_variables' not in frame.f_globals: + frame.f_globals['__pdb_convenience_variables'] = {} + frame.f_globals['__pdb_convenience_variables'][name] = value + + # Generic completion functions. Individual complete_foo methods can be + # assigned below to one of these functions. + + @property + def rlcompleter(self): + """Return the `Completer` class from `rlcompleter`, while avoiding the + side effects of changing the completer from `import rlcompleter`. + + This is a compromise between GH-138860 and GH-139289. If GH-139289 is + fixed, then we don't need this and we can just `import rlcompleter` in + `Pdb.__init__`. + """ + if not hasattr(self, "_rlcompleter"): + try: + import readline + except ImportError: + # readline is not available, just get the Completer + from rlcompleter import Completer + self._rlcompleter = Completer + else: + # importing rlcompleter could have side effect of changing + # the current completer, we need to restore it + prev_completer = readline.get_completer() + from rlcompleter import Completer + self._rlcompleter = Completer + readline.set_completer(prev_completer) + return self._rlcompleter + + def completenames(self, text, line, begidx, endidx): + # Overwrite completenames() of cmd so for the command completion, + # if no current command matches, check for expressions as well + commands = super().completenames(text, line, begidx, endidx) + for alias in self.aliases: + if alias.startswith(text): + commands.append(alias) + if commands: + return commands + else: + expressions = self._complete_expression(text, line, begidx, endidx) + if expressions: + return expressions + return self.completedefault(text, line, begidx, endidx) + + def _complete_location(self, text, line, begidx, endidx): + # Complete a file/module/function location for break/tbreak/clear. + if line.strip().endswith((':', ',')): + # Here comes a line number or a condition which we can't complete. + return [] + # First, try to find matching functions (i.e. expressions). + try: + ret = self._complete_expression(text, line, begidx, endidx) + except Exception: + ret = [] + # Then, try to complete file names as well. + globs = glob.glob(glob.escape(text) + '*') + for fn in globs: + if os.path.isdir(fn): + ret.append(fn + '/') + elif os.path.isfile(fn) and fn.lower().endswith(('.py', '.pyw')): + ret.append(fn + ':') + return ret + + def _complete_bpnumber(self, text, line, begidx, endidx): + # Complete a breakpoint number. (This would be more helpful if we could + # display additional info along with the completions, such as file/line + # of the breakpoint.) + return [str(i) for i, bp in enumerate(bdb.Breakpoint.bpbynumber) + if bp is not None and str(i).startswith(text)] + + def _complete_expression(self, text, line, begidx, endidx): + # Complete an arbitrary expression. + if not self.curframe: + return [] + # Collect globals and locals. It is usually not really sensible to also + # complete builtins, and they clutter the namespace quite heavily, so we + # leave them out. + ns = {**self.curframe.f_globals, **self.curframe.f_locals} + if '.' in text: + # Walk an attribute chain up to the last part, similar to what + # rlcompleter does. This will bail if any of the parts are not + # simple attribute access, which is what we want. + dotted = text.split('.') + try: + if dotted[0].startswith('$'): + obj = self.curframe.f_globals['__pdb_convenience_variables'][dotted[0][1:]] + else: + obj = ns[dotted[0]] + for part in dotted[1:-1]: + obj = getattr(obj, part) + except (KeyError, AttributeError): + return [] + prefix = '.'.join(dotted[:-1]) + '.' + return [prefix + n for n in dir(obj) if n.startswith(dotted[-1])] + else: + if text.startswith("$"): + # Complete convenience variables + conv_vars = self.curframe.f_globals.get('__pdb_convenience_variables', {}) + return [f"${name}" for name in conv_vars if name.startswith(text[1:])] + # Complete a simple name. + return [n for n in ns.keys() if n.startswith(text)] + + def _complete_indentation(self, text, line, begidx, endidx): + try: + import readline + except ImportError: + return [] + # Fill in spaces to form a 4-space indent + return [' ' * (4 - readline.get_begidx() % 4)] + + def complete_multiline_names(self, text, line, begidx, endidx): + # If text is space-only, the user entered before any text. + # That normally means they want to indent the current line. + if not text.strip(): + return self._complete_indentation(text, line, begidx, endidx) + return self.completedefault(text, line, begidx, endidx) + + def completedefault(self, text, line, begidx, endidx): + if text.startswith("$"): + # Complete convenience variables + conv_vars = self.curframe.f_globals.get('__pdb_convenience_variables', {}) + return [f"${name}" for name in conv_vars if name.startswith(text[1:])] + + state = 0 + matches = [] + completer = self.rlcompleter(self.curframe.f_globals | self.curframe.f_locals) + while (match := completer.complete(text, state)) is not None: + matches.append(match) + state += 1 + return matches + + @contextmanager + def _enable_rlcompleter(self, ns): + try: + import readline + except ImportError: + yield + return + + try: + completer = self.rlcompleter(ns) + old_completer = readline.get_completer() + readline.set_completer(completer.complete) + yield + finally: + readline.set_completer(old_completer) + + # Pdb meta commands, only intended to be used internally by pdb + + def _pdbcmd_print_frame_status(self, arg): + self.print_stack_trace(0) + self._validate_file_mtime() + self._show_display() + + def _pdbcmd_silence_frame_status(self, arg): + if self.cmdqueue and self.cmdqueue[-1] == '_pdbcmd_print_frame_status': + self.cmdqueue.pop() + + def _pdbcmd_restore_lastcmd(self, arg): + self.lastcmd = arg + + # Command definitions, called by cmdloop() + # The argument is the remaining string on the command line + # Return true to exit from the command loop + + def do_commands(self, arg): + """(Pdb) commands [bpnumber] + (com) ... + (com) end + (Pdb) + + Specify a list of commands for breakpoint number bpnumber. + The commands themselves are entered on the following lines. + Type a line containing just 'end' to terminate the commands. + The commands are executed when the breakpoint is hit. + + To remove all commands from a breakpoint, type commands and + follow it immediately with end; that is, give no commands. + + With no bpnumber argument, commands refers to the last + breakpoint set. + + You can use breakpoint commands to start your program up + again. Simply use the continue command, or step, or any other + command that resumes execution. + + Specifying any command resuming execution (currently continue, + step, next, return, jump, quit and their abbreviations) + terminates the command list (as if that command was + immediately followed by end). This is because any time you + resume execution (even with a simple next or step), you may + encounter another breakpoint -- which could have its own + command list, leading to ambiguities about which list to + execute. + + If you use the 'silent' command in the command list, the usual + message about stopping at a breakpoint is not printed. This + may be desirable for breakpoints that are to print a specific + message and then continue. If none of the other commands + print anything, you will see no sign that the breakpoint was + reached. + """ + if not arg: + bnum = len(bdb.Breakpoint.bpbynumber) - 1 + else: + try: + bnum = int(arg) + except: + self._print_invalid_arg(arg) + return + try: + self.get_bpbynumber(bnum) + except ValueError as err: + self.error('cannot set commands: %s' % err) + return + + self.commands_bnum = bnum + # Save old definitions for the case of a keyboard interrupt. + if bnum in self.commands: + old_commands = self.commands[bnum] + else: + old_commands = None + self.commands[bnum] = [] + + prompt_back = self.prompt + self.prompt = '(com) ' + self.commands_defining = True + try: + self.cmdloop() + except KeyboardInterrupt: + # Restore old definitions. + if old_commands: + self.commands[bnum] = old_commands + else: + del self.commands[bnum] + self.error('command definition aborted, old commands restored') + finally: + self.commands_defining = False + self.prompt = prompt_back + + complete_commands = _complete_bpnumber + + def do_break(self, arg, temporary=False): + """b(reak) [ ([filename:]lineno | function) [, condition] ] + + Without argument, list all breaks. + + With a line number argument, set a break at this line in the + current file. With a function name, set a break at the first + executable line of that function. If a second argument is + present, it is a string specifying an expression which must + evaluate to true before the breakpoint is honored. + + The line number may be prefixed with a filename and a colon, + to specify a breakpoint in another file (probably one that + hasn't been loaded yet). The file is searched for on + sys.path; the .py suffix may be omitted. + """ + if not arg: + if self.breaks: # There's at least one + self.message("Num Type Disp Enb Where") + for bp in bdb.Breakpoint.bpbynumber: + if bp: + self.message(bp.bpformat()) + return + # parse arguments; comma has lowest precedence + # and cannot occur in filename + filename = None + lineno = None + cond = None + module_globals = None + comma = arg.find(',') + if comma > 0: + # parse stuff after comma: "condition" + cond = arg[comma+1:].lstrip() + if err := self._compile_error_message(cond): + self.error('Invalid condition %s: %r' % (cond, err)) + return + arg = arg[:comma].rstrip() + # parse stuff before comma: [filename:]lineno | function + colon = arg.rfind(':') + funcname = None + if colon >= 0: + filename = arg[:colon].rstrip() + f = self.lookupmodule(filename) + if not f: + self.error('%r not found from sys.path' % filename) + return + else: + filename = f + arg = arg[colon+1:].lstrip() + try: + lineno = int(arg) + except ValueError: + self.error('Bad lineno: %s' % arg) + return + else: + # no colon; can be lineno or function + try: + lineno = int(arg) + except ValueError: + try: + func = eval(arg, + self.curframe.f_globals, + self.curframe.f_locals) + except: + func = arg + try: + if hasattr(func, '__func__'): + func = func.__func__ + code = func.__code__ + #use co_name to identify the bkpt (function names + #could be aliased, but co_name is invariant) + funcname = code.co_name + lineno = find_first_executable_line(code) + filename = code.co_filename + module_globals = func.__globals__ + except: + # last thing to try + (ok, filename, ln) = self.lineinfo(arg) + if not ok: + self.error('The specified object %r is not a function ' + 'or was not found along sys.path.' % arg) + return + funcname = ok # ok contains a function name + lineno = int(ln) + if not filename: + filename = self.defaultFile() + filename = self.canonic(filename) + # Check for reasonable breakpoint + line = self.checkline(filename, lineno, module_globals) + if line: + # now set the break point + err = self.set_break(filename, line, temporary, cond, funcname) + if err: + self.error(err) + else: + bp = self.get_breaks(filename, line)[-1] + self.message("Breakpoint %d at %s:%d" % + (bp.number, bp.file, bp.line)) + + # To be overridden in derived debuggers + def defaultFile(self): + """Produce a reasonable default.""" + filename = self.curframe.f_code.co_filename + if filename == '' and self.mainpyfile: + filename = self.mainpyfile + return filename + + do_b = do_break + + complete_break = _complete_location + complete_b = _complete_location + + def do_tbreak(self, arg): + """tbreak [ ([filename:]lineno | function) [, condition] ] + + Same arguments as break, but sets a temporary breakpoint: it + is automatically deleted when first hit. + """ + self.do_break(arg, True) + + complete_tbreak = _complete_location + + def lineinfo(self, identifier): + failed = (None, None, None) + # Input is identifier, may be in single quotes + idstring = identifier.split("'") + if len(idstring) == 1: + # not in single quotes + id = idstring[0].strip() + elif len(idstring) == 3: + # quoted + id = idstring[1].strip() + else: + return failed + if id == '': return failed + parts = id.split('.') + # Protection for derived debuggers + if parts[0] == 'self': + del parts[0] + if len(parts) == 0: + return failed + # Best first guess at file to look at + fname = self.defaultFile() + if len(parts) == 1: + item = parts[0] + else: + # More than one part. + # First is module, second is method/class + f = self.lookupmodule(parts[0]) + if f: + fname = f + item = parts[1] + else: + return failed + answer = find_function(item, self.canonic(fname)) + return answer or failed + + def checkline(self, filename, lineno, module_globals=None): + """Check whether specified line seems to be executable. + + Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank + line or EOF). Warning: testing is not comprehensive. + """ + # this method should be callable before starting debugging, so default + # to "no globals" if there is no current frame + frame = getattr(self, 'curframe', None) + if module_globals is None: + module_globals = frame.f_globals if frame else None + line = linecache.getline(filename, lineno, module_globals) + if not line: + self.message('End of file') + return 0 + line = line.strip() + # Don't allow setting breakpoint at a blank line + if (not line or (line[0] == '#') or + (line[:3] == '"""') or line[:3] == "'''"): + self.error('Blank or comment') + return 0 + return lineno + + def do_enable(self, arg): + """enable bpnumber [bpnumber ...] + + Enables the breakpoints given as a space separated list of + breakpoint numbers. + """ + if not arg: + self._print_invalid_arg(arg) + return + args = arg.split() + for i in args: + try: + bp = self.get_bpbynumber(i) + except ValueError as err: + self.error(err) + else: + bp.enable() + self.message('Enabled %s' % bp) + + complete_enable = _complete_bpnumber + + def do_disable(self, arg): + """disable bpnumber [bpnumber ...] + + Disables the breakpoints given as a space separated list of + breakpoint numbers. Disabling a breakpoint means it cannot + cause the program to stop execution, but unlike clearing a + breakpoint, it remains in the list of breakpoints and can be + (re-)enabled. + """ + if not arg: + self._print_invalid_arg(arg) + return + args = arg.split() + for i in args: + try: + bp = self.get_bpbynumber(i) + except ValueError as err: + self.error(err) + else: + bp.disable() + self.message('Disabled %s' % bp) + + complete_disable = _complete_bpnumber + + def do_condition(self, arg): + """condition bpnumber [condition] + + Set a new condition for the breakpoint, an expression which + must evaluate to true before the breakpoint is honored. If + condition is absent, any existing condition is removed; i.e., + the breakpoint is made unconditional. + """ + if not arg: + self._print_invalid_arg(arg) + return + args = arg.split(' ', 1) + try: + cond = args[1] + if err := self._compile_error_message(cond): + self.error('Invalid condition %s: %r' % (cond, err)) + return + except IndexError: + cond = None + try: + bp = self.get_bpbynumber(args[0].strip()) + except IndexError: + self.error('Breakpoint number expected') + except ValueError as err: + self.error(err) + else: + bp.cond = cond + if not cond: + self.message('Breakpoint %d is now unconditional.' % bp.number) + else: + self.message('New condition set for breakpoint %d.' % bp.number) + + complete_condition = _complete_bpnumber + + def do_ignore(self, arg): + """ignore bpnumber [count] + + Set the ignore count for the given breakpoint number. If + count is omitted, the ignore count is set to 0. A breakpoint + becomes active when the ignore count is zero. When non-zero, + the count is decremented each time the breakpoint is reached + and the breakpoint is not disabled and any associated + condition evaluates to true. + """ + if not arg: + self._print_invalid_arg(arg) + return + args = arg.split() + if not args: + self.error('Breakpoint number expected') + return + if len(args) == 1: + count = 0 + elif len(args) == 2: + try: + count = int(args[1]) + except ValueError: + self._print_invalid_arg(arg) + return + else: + self._print_invalid_arg(arg) + return + try: + bp = self.get_bpbynumber(args[0].strip()) + except ValueError as err: + self.error(err) + else: + bp.ignore = count + if count > 0: + if count > 1: + countstr = '%d crossings' % count + else: + countstr = '1 crossing' + self.message('Will ignore next %s of breakpoint %d.' % + (countstr, bp.number)) + else: + self.message('Will stop next time breakpoint %d is reached.' + % bp.number) + + complete_ignore = _complete_bpnumber + + def _prompt_for_confirmation(self, prompt, default): + try: + reply = input(prompt) + except EOFError: + reply = default + return reply.strip().lower() + + def do_clear(self, arg): + """cl(ear) [filename:lineno | bpnumber ...] + + With a space separated list of breakpoint numbers, clear + those breakpoints. Without argument, clear all breaks (but + first ask confirmation). With a filename:lineno argument, + clear all breaks at that line in that file. + """ + if not arg: + reply = self._prompt_for_confirmation( + 'Clear all breaks? ', + default='no', + ) + if reply in ('y', 'yes'): + bplist = [bp for bp in bdb.Breakpoint.bpbynumber if bp] + self.clear_all_breaks() + for bp in bplist: + self.message('Deleted %s' % bp) + return + if ':' in arg: + # Make sure it works for "clear C:\foo\bar.py:12" + i = arg.rfind(':') + filename = arg[:i] + arg = arg[i+1:] + try: + lineno = int(arg) + except ValueError: + err = "Invalid line number (%s)" % arg + else: + bplist = self.get_breaks(filename, lineno)[:] + err = self.clear_break(filename, lineno) + if err: + self.error(err) + else: + for bp in bplist: + self.message('Deleted %s' % bp) + return + numberlist = arg.split() + for i in numberlist: + try: + bp = self.get_bpbynumber(i) + except ValueError as err: + self.error(err) + else: + self.clear_bpbynumber(i) + self.message('Deleted %s' % bp) + do_cl = do_clear # 'c' is already an abbreviation for 'continue' + + complete_clear = _complete_location + complete_cl = _complete_location + + def do_where(self, arg): + """w(here) [count] + + Print a stack trace. If count is not specified, print the full stack. + If count is 0, print the current frame entry. If count is positive, + print count entries from the most recent frame. If count is negative, + print -count entries from the least recent frame. + An arrow indicates the "current frame", which determines the + context of most commands. 'bt' is an alias for this command. + """ + if not arg: + count = None + else: + try: + count = int(arg) + except ValueError: + self.error('Invalid count (%s)' % arg) + return + self.print_stack_trace(count) + do_w = do_where + do_bt = do_where + + def _select_frame(self, number): + assert 0 <= number < len(self.stack) + self.curindex = number + self.curframe = self.stack[self.curindex][0] + self.set_convenience_variable(self.curframe, '_frame', self.curframe) + self.print_stack_entry(self.stack[self.curindex]) + self.lineno = None + + def do_exceptions(self, arg): + """exceptions [number] + + List or change current exception in an exception chain. + + Without arguments, list all the current exception in the exception + chain. Exceptions will be numbered, with the current exception indicated + with an arrow. + + If given an integer as argument, switch to the exception at that index. + """ + if not self._chained_exceptions: + self.message( + "Did not find chained exceptions. To move between" + " exceptions, pdb/post_mortem must be given an exception" + " object rather than a traceback." + ) + return + if not arg: + for ix, exc in enumerate(self._chained_exceptions): + prompt = ">" if ix == self._chained_exception_index else " " + rep = repr(exc) + if len(rep) > 80: + rep = rep[:77] + "..." + indicator = ( + " -" + if self._chained_exceptions[ix].__traceback__ is None + else f"{ix:>3}" + ) + self.message(f"{prompt} {indicator} {rep}") + else: + try: + number = int(arg) + except ValueError: + self.error("Argument must be an integer") + return + if 0 <= number < len(self._chained_exceptions): + if self._chained_exceptions[number].__traceback__ is None: + self.error("This exception does not have a traceback, cannot jump to it") + return + + self._chained_exception_index = number + self.setup(None, self._chained_exceptions[number].__traceback__) + self.print_stack_entry(self.stack[self.curindex]) + else: + self.error("No exception with that number") + + def do_up(self, arg): + """u(p) [count] + + Move the current frame count (default one) levels up in the + stack trace (to an older frame). + """ + if self.curindex == 0: + self.error('Oldest frame') + return + try: + count = int(arg or 1) + except ValueError: + self.error('Invalid frame count (%s)' % arg) + return + if count < 0: + newframe = 0 + else: + newframe = max(0, self.curindex - count) + self._select_frame(newframe) + do_u = do_up + + def do_down(self, arg): + """d(own) [count] + + Move the current frame count (default one) levels down in the + stack trace (to a newer frame). + """ + if self.curindex + 1 == len(self.stack): + self.error('Newest frame') + return + try: + count = int(arg or 1) + except ValueError: + self.error('Invalid frame count (%s)' % arg) + return + if count < 0: + newframe = len(self.stack) - 1 + else: + newframe = min(len(self.stack) - 1, self.curindex + count) + self._select_frame(newframe) + do_d = do_down + + def do_until(self, arg): + """unt(il) [lineno] + + Without argument, continue execution until the line with a + number greater than the current one is reached. With a line + number, continue execution until a line with a number greater + or equal to that is reached. In both cases, also stop when + the current frame returns. + """ + if arg: + try: + lineno = int(arg) + except ValueError: + self.error('Error in argument: %r' % arg) + return + if lineno <= self.curframe.f_lineno: + self.error('"until" line number is smaller than current ' + 'line number') + return + else: + lineno = None + self.set_until(self.curframe, lineno) + return 1 + do_unt = do_until + + def do_step(self, arg): + """s(tep) + + Execute the current line, stop at the first possible occasion + (either in a function that is called or in the current + function). + """ + if arg: + self._print_invalid_arg(arg) + return + self.set_step() + return 1 + do_s = do_step + + def do_next(self, arg): + """n(ext) + + Continue execution until the next line in the current function + is reached or it returns. + """ + if arg: + self._print_invalid_arg(arg) + return + self.set_next(self.curframe) + return 1 + do_n = do_next + + def do_run(self, arg): + """run [args...] + + Restart the debugged python program. If a string is supplied + it is split with "shlex", and the result is used as the new + sys.argv. History, breakpoints, actions and debugger options + are preserved. "restart" is an alias for "run". + """ + if self.mode == 'inline': + self.error('run/restart command is disabled when pdb is running in inline mode.\n' + 'Use the command line interface to enable restarting your program\n' + 'e.g. "python -m pdb myscript.py"') + return + if arg: + import shlex + argv0 = sys.argv[0:1] + try: + sys.argv = shlex.split(arg) + except ValueError as e: + self.error('Cannot run %s: %s' % (arg, e)) + return + sys.argv[:0] = argv0 + # this is caught in the main debugger loop + raise Restart + + do_restart = do_run + + def do_return(self, arg): + """r(eturn) + + Continue execution until the current function returns. + """ + if arg: + self._print_invalid_arg(arg) + return + self.set_return(self.curframe) + return 1 + do_r = do_return + + def do_continue(self, arg): + """c(ont(inue)) + + Continue execution, only stop when a breakpoint is encountered. + """ + if arg: + self._print_invalid_arg(arg) + return + if not self.nosigint: + try: + Pdb._previous_sigint_handler = \ + signal.signal(signal.SIGINT, self.sigint_handler) + except ValueError: + # ValueError happens when do_continue() is invoked from + # a non-main thread in which case we just continue without + # SIGINT set. Would printing a message here (once) make + # sense? + pass + self.set_continue() + return 1 + do_c = do_cont = do_continue + + def do_jump(self, arg): + """j(ump) lineno + + Set the next line that will be executed. Only available in + the bottom-most frame. This lets you jump back and execute + code again, or jump forward to skip code that you don't want + to run. + + It should be noted that not all jumps are allowed -- for + instance it is not possible to jump into the middle of a + for loop or out of a finally clause. + """ + if not arg: + self._print_invalid_arg(arg) + return + if self.curindex + 1 != len(self.stack): + self.error('You can only jump within the bottom frame') + return + try: + arg = int(arg) + except ValueError: + self.error("The 'jump' command requires a line number") + else: + try: + # Do the jump, fix up our copy of the stack, and display the + # new position + self.curframe.f_lineno = arg + self.stack[self.curindex] = self.stack[self.curindex][0], arg + self.print_stack_entry(self.stack[self.curindex]) + except ValueError as e: + self.error('Jump failed: %s' % e) + do_j = do_jump + + def _create_recursive_debugger(self): + return Pdb(self.completekey, self.stdin, self.stdout) + + def do_debug(self, arg): + """debug code + + Enter a recursive debugger that steps through the code + argument (which is an arbitrary expression or statement to be + executed in the current environment). + """ + if not arg: + self._print_invalid_arg(arg) + return + self.stop_trace() + globals = self.curframe.f_globals + locals = self.curframe.f_locals + p = self._create_recursive_debugger() + p.prompt = "(%s) " % self.prompt.strip() + self.message("ENTERING RECURSIVE DEBUGGER") + try: + sys.call_tracing(p.run, (arg, globals, locals)) + except Exception: + self._error_exc() + self.message("LEAVING RECURSIVE DEBUGGER") + self.start_trace() + self.lastcmd = p.lastcmd + + complete_debug = _complete_expression + + def do_quit(self, arg): + """q(uit) | exit + + Quit from the debugger. The program being executed is aborted. + """ + # Show prompt to kill process when in 'inline' mode and if pdb was not + # started from an interactive console. The attribute sys.ps1 is only + # defined if the interpreter is in interactive mode. + if self.mode == 'inline' and not hasattr(sys, 'ps1'): + while True: + try: + reply = input('Quitting pdb will kill the process. Quit anyway? [y/n] ') + reply = reply.lower().strip() + except EOFError: + reply = 'y' + self.message('') + if reply == 'y' or reply == '': + sys.exit(1) + elif reply.lower() == 'n': + return + + self._user_requested_quit = True + self.set_quit() + return 1 + + do_q = do_quit + do_exit = do_quit + + def do_EOF(self, arg): + """EOF + + Handles the receipt of EOF as a command. + """ + self.message('') + return self.do_quit(arg) + + def do_args(self, arg): + """a(rgs) + + Print the argument list of the current function. + """ + if arg: + self._print_invalid_arg(arg) + return + co = self.curframe.f_code + dict = self.curframe.f_locals + n = co.co_argcount + co.co_kwonlyargcount + if co.co_flags & inspect.CO_VARARGS: n = n+1 + if co.co_flags & inspect.CO_VARKEYWORDS: n = n+1 + for i in range(n): + name = co.co_varnames[i] + if name in dict: + self.message('%s = %s' % (name, self._safe_repr(dict[name], name))) + else: + self.message('%s = *** undefined ***' % (name,)) + do_a = do_args + + def do_retval(self, arg): + """retval + + Print the return value for the last return of a function. + """ + if arg: + self._print_invalid_arg(arg) + return + if '__return__' in self.curframe.f_locals: + self.message(self._safe_repr(self.curframe.f_locals['__return__'], "retval")) + else: + self.error('Not yet returned!') + do_rv = do_retval + + def _getval(self, arg): + try: + return eval(arg, self.curframe.f_globals, self.curframe.f_locals) + except: + self._error_exc() + raise + + def _getval_except(self, arg, frame=None): + try: + if frame is None: + return eval(arg, self.curframe.f_globals, self.curframe.f_locals) + else: + return eval(arg, frame.f_globals, frame.f_locals) + except BaseException as exc: + return _rstr('** raised %s **' % self._format_exc(exc)) + + def _error_exc(self): + exc = sys.exception() + self.error(self._format_exc(exc)) + + def _msg_val_func(self, arg, func): + try: + val = self._getval(arg) + except: + return # _getval() has displayed the error + try: + self.message(func(val)) + except: + self._error_exc() + + def _safe_repr(self, obj, expr): + try: + return repr(obj) + except Exception as e: + return _rstr(f"*** repr({expr}) failed: {self._format_exc(e)} ***") + + def do_p(self, arg): + """p expression + + Print the value of the expression. + """ + if not arg: + self._print_invalid_arg(arg) + return + self._msg_val_func(arg, repr) + + def do_pp(self, arg): + """pp expression + + Pretty-print the value of the expression. + """ + if not arg: + self._print_invalid_arg(arg) + return + self._msg_val_func(arg, pprint.pformat) + + complete_print = _complete_expression + complete_p = _complete_expression + complete_pp = _complete_expression + + def do_list(self, arg): + """l(ist) [first[, last] | .] + + List source code for the current file. Without arguments, + list 11 lines around the current line or continue the previous + listing. With . as argument, list 11 lines around the current + line. With one argument, list 11 lines starting at that line. + With two arguments, list the given range; if the second + argument is less than the first, it is a count. + + The current line in the current frame is indicated by "->". + If an exception is being debugged, the line where the + exception was originally raised or propagated is indicated by + ">>", if it differs from the current line. + """ + self.lastcmd = 'list' + last = None + if arg and arg != '.': + try: + if ',' in arg: + first, last = arg.split(',') + first = int(first.strip()) + last = int(last.strip()) + if last < first: + # assume it's a count + last = first + last + else: + first = int(arg.strip()) + first = max(1, first - 5) + except ValueError: + self.error('Error in argument: %r' % arg) + return + elif self.lineno is None or arg == '.': + first = max(1, self.curframe.f_lineno - 5) + else: + first = self.lineno + 1 + if last is None: + last = first + 10 + filename = self.curframe.f_code.co_filename + breaklist = self.get_file_breaks(filename) + try: + lines = linecache.getlines(filename, self.curframe.f_globals) + self._print_lines(lines[first-1:last], first, breaklist, + self.curframe) + self.lineno = min(last, len(lines)) + if len(lines) < last: + self.message('[EOF]') + except KeyboardInterrupt: + pass + self._validate_file_mtime() + do_l = do_list + + def do_longlist(self, arg): + """ll | longlist + + List the whole source code for the current function or frame. + """ + if arg: + self._print_invalid_arg(arg) + return + filename = self.curframe.f_code.co_filename + breaklist = self.get_file_breaks(filename) + try: + lines, lineno = self._getsourcelines(self.curframe) + except OSError as err: + self.error(err) + return + self._print_lines(lines, lineno, breaklist, self.curframe) + self._validate_file_mtime() + do_ll = do_longlist + + def do_source(self, arg): + """source expression + + Try to get source code for the given object and display it. + """ + if not arg: + self._print_invalid_arg(arg) + return + try: + obj = self._getval(arg) + except: + return + try: + lines, lineno = self._getsourcelines(obj) + except (OSError, TypeError) as err: + self.error(err) + return + self._print_lines(lines, lineno) + + complete_source = _complete_expression + + def _print_lines(self, lines, start, breaks=(), frame=None): + """Print a range of lines.""" + if frame: + current_lineno = frame.f_lineno + exc_lineno = self.tb_lineno.get(frame, -1) + else: + current_lineno = exc_lineno = -1 + for lineno, line in enumerate(lines, start): + s = str(lineno).rjust(3) + if len(s) < 4: + s += ' ' + if lineno in breaks: + s += 'B' + else: + s += ' ' + if lineno == current_lineno: + s += '->' + elif lineno == exc_lineno: + s += '>>' + if self.colorize: + line = self._colorize_code(line) + self.message(s + '\t' + line.rstrip()) + + def do_whatis(self, arg): + """whatis expression + + Print the type of the argument. + """ + if not arg: + self._print_invalid_arg(arg) + return + try: + value = self._getval(arg) + except: + # _getval() already printed the error + return + code = None + # Is it an instance method? + try: + code = value.__func__.__code__ + except Exception: + pass + if code: + self.message('Method %s' % code.co_name) + return + # Is it a function? + try: + code = value.__code__ + except Exception: + pass + if code: + self.message('Function %s' % code.co_name) + return + # Is it a class? + if value.__class__ is type: + self.message('Class %s.%s' % (value.__module__, value.__qualname__)) + return + # None of the above... + self.message(type(value)) + + complete_whatis = _complete_expression + + def do_display(self, arg): + """display [expression] + + Display the value of the expression if it changed, each time execution + stops in the current frame. + + Without expression, list all display expressions for the current frame. + """ + if not arg: + if self.displaying: + self.message('Currently displaying:') + for key, val in self.displaying.get(self.curframe, {}).items(): + self.message('%s: %s' % (key, self._safe_repr(val, key))) + else: + self.message('No expression is being displayed') + else: + if err := self._compile_error_message(arg): + self.error('Unable to display %s: %r' % (arg, err)) + else: + val = self._getval_except(arg) + self.displaying.setdefault(self.curframe, {})[arg] = val + self.message('display %s: %s' % (arg, self._safe_repr(val, arg))) + + complete_display = _complete_expression + + def do_undisplay(self, arg): + """undisplay [expression] + + Do not display the expression any more in the current frame. + + Without expression, clear all display expressions for the current frame. + """ + if arg: + try: + del self.displaying.get(self.curframe, {})[arg] + except KeyError: + self.error('not displaying %s' % arg) + else: + self.displaying.pop(self.curframe, None) + + def complete_undisplay(self, text, line, begidx, endidx): + return [e for e in self.displaying.get(self.curframe, {}) + if e.startswith(text)] + + def do_interact(self, arg): + """interact + + Start an interactive interpreter whose global namespace + contains all the (global and local) names found in the current scope. + """ + ns = {**self.curframe.f_globals, **self.curframe.f_locals} + with self._enable_rlcompleter(ns): + console = _PdbInteractiveConsole(ns, message=self.message) + console.interact(banner="*pdb interact start*", + exitmsg="*exit from pdb interact command*") + + def do_alias(self, arg): + """alias [name [command]] + + Create an alias called 'name' that executes 'command'. The + command must *not* be enclosed in quotes. Replaceable + parameters can be indicated by %1, %2, and so on, while %* is + replaced by all the parameters. If no command is given, the + current alias for name is shown. If no name is given, all + aliases are listed. + + Aliases may be nested and can contain anything that can be + legally typed at the pdb prompt. Note! You *can* override + internal pdb commands with aliases! Those internal commands + are then hidden until the alias is removed. Aliasing is + recursively applied to the first word of the command line; all + other words in the line are left alone. + + As an example, here are two useful aliases (especially when + placed in the .pdbrc file): + + # Print instance variables (usage "pi classInst") + alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k]) + # Print instance variables in self + alias ps pi self + """ + args = arg.split() + if len(args) == 0: + keys = sorted(self.aliases.keys()) + for alias in keys: + self.message("%s = %s" % (alias, self.aliases[alias])) + return + if len(args) == 1: + if args[0] in self.aliases: + self.message("%s = %s" % (args[0], self.aliases[args[0]])) + else: + self.error(f"Unknown alias '{args[0]}'") + else: + # Do a validation check to make sure no replaceable parameters + # are skipped if %* is not used. + alias = ' '.join(args[1:]) + if '%*' not in alias: + consecutive = True + for idx in range(1, 10): + if f'%{idx}' not in alias: + consecutive = False + if f'%{idx}' in alias and not consecutive: + self.error("Replaceable parameters must be consecutive") + return + self.aliases[args[0]] = alias + + def do_unalias(self, arg): + """unalias name + + Delete the specified alias. + """ + args = arg.split() + if len(args) == 0: + self._print_invalid_arg(arg) + return + if args[0] in self.aliases: + del self.aliases[args[0]] + + def complete_unalias(self, text, line, begidx, endidx): + return [a for a in self.aliases if a.startswith(text)] + + # List of all the commands making the program resume execution. + commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return', + 'do_until', 'do_quit', 'do_jump'] + + # Print a traceback starting at the top stack frame. + # The most recently entered frame is printed last; + # this is different from dbx and gdb, but consistent with + # the Python interpreter's stack trace. + # It is also consistent with the up/down commands (which are + # compatible with dbx and gdb: up moves towards 'main()' + # and down moves towards the most recent stack frame). + # * if count is None, prints the full stack + # * if count = 0, prints the current frame entry + # * if count < 0, prints -count least recent frame entries + # * if count > 0, prints count most recent frame entries + + def print_stack_trace(self, count=None): + if count is None: + stack_to_print = self.stack + elif count == 0: + stack_to_print = [self.stack[self.curindex]] + elif count < 0: + stack_to_print = self.stack[:-count] + else: + stack_to_print = self.stack[-count:] + try: + for frame_lineno in stack_to_print: + self.print_stack_entry(frame_lineno) + except KeyboardInterrupt: + pass + + def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix): + frame, lineno = frame_lineno + if frame is self.curframe: + prefix = '> ' + else: + prefix = ' ' + stack_entry = self.format_stack_entry(frame_lineno, prompt_prefix) + if self.colorize: + lines = stack_entry.split(prompt_prefix, 1) + if len(lines) > 1: + # We have some code to display + lines[1] = self._colorize_code(lines[1]) + stack_entry = prompt_prefix.join(lines) + self.message(prefix + stack_entry) + + # Provide help + + def do_help(self, arg): + """h(elp) + + Without argument, print the list of available commands. + With a command name as argument, print help about that command. + "help pdb" shows the full pdb documentation. + "help exec" gives help on the ! command. + """ + if not arg: + return cmd.Cmd.do_help(self, arg) + try: + try: + topic = getattr(self, 'help_' + arg) + return topic() + except AttributeError: + command = getattr(self, 'do_' + arg) + except AttributeError: + self.error('No help for %r' % arg) + else: + if sys.flags.optimize >= 2: + self.error('No help for %r; please do not run Python with -OO ' + 'if you need command help' % arg) + return + if command.__doc__ is None: + self.error('No help for %r; __doc__ string missing' % arg) + return + self.message(self._help_message_from_doc(command.__doc__)) + + do_h = do_help + + def help_exec(self): + """(!) statement + + Execute the (one-line) statement in the context of the current + stack frame. The exclamation point can be omitted unless the + first word of the statement resembles a debugger command, e.g.: + (Pdb) ! n=42 + (Pdb) + + To assign to a global variable you must always prefix the command with + a 'global' command, e.g.: + (Pdb) global list_options; list_options = ['-l'] + (Pdb) + """ + self.message((self.help_exec.__doc__ or '').strip()) + + def help_pdb(self): + help() + + # other helper functions + + def lookupmodule(self, filename): + """Helper function for break/clear parsing -- may be overridden. + + lookupmodule() translates (possibly incomplete) file or module name + into an absolute file name. + + filename could be in format of: + * an absolute path like '/path/to/file.py' + * a relative path like 'file.py' or 'dir/file.py' + * a module name like 'module' or 'package.module' + + files and modules will be searched in sys.path. + """ + if not filename.endswith('.py'): + # A module is passed in so convert it to equivalent file + filename = filename.replace('.', os.sep) + '.py' + + if os.path.isabs(filename): + if os.path.exists(filename): + return filename + return None + + for dirname in sys.path: + while os.path.islink(dirname): + dirname = os.readlink(dirname) + fullname = os.path.join(dirname, filename) + if os.path.exists(fullname): + return fullname + return None + + def _run(self, target: _ExecutableTarget): + # When bdb sets tracing, a number of call and line events happen + # BEFORE debugger even reaches user's code (and the exact sequence of + # events depends on python version). Take special measures to + # avoid stopping before reaching the main script (see user_line and + # user_call for details). + self._wait_for_mainpyfile = True + self._user_requested_quit = False + + self.mainpyfile = self.canonic(target.filename) + + # The target has to run in __main__ namespace (or imports from + # __main__ will break). Clear __main__ and replace with + # the target namespace. + import __main__ + __main__.__dict__.clear() + __main__.__dict__.update(target.namespace) + + # Clear the mtime table for program reruns, assume all the files + # are up to date. + self._file_mtime_table.clear() + + self.run(target.code) + + def _format_exc(self, exc: BaseException): + return traceback.format_exception_only(exc)[-1].strip() + + def _compile_error_message(self, expr): + """Return the error message as string if compiling `expr` fails.""" + try: + compile(expr, "", "eval") + except SyntaxError as exc: + return _rstr(self._format_exc(exc)) + return "" + + def _getsourcelines(self, obj): + # GH-103319 + # inspect.getsourcelines() returns lineno = 0 for + # module-level frame which breaks our code print line number + # This method should be replaced by inspect.getsourcelines(obj) + # once this bug is fixed in inspect + lines, lineno = inspect.getsourcelines(obj) + lineno = max(1, lineno) + return lines, lineno + + def _help_message_from_doc(self, doc, usage_only=False): + lines = [line.strip() for line in doc.rstrip().splitlines()] + if not lines: + return "No help message found." + if "" in lines: + usage_end = lines.index("") + else: + usage_end = 1 + formatted = [] + indent = " " * len(self.prompt) + for i, line in enumerate(lines): + if i == 0: + prefix = "Usage: " + elif i < usage_end: + prefix = " " + else: + if usage_only: + break + prefix = "" + formatted.append(indent + prefix + line) + return "\n".join(formatted) + + def _print_invalid_arg(self, arg): + """Return the usage string for a function.""" + + if not arg: + self.error("Argument is required for this command") + else: + self.error(f"Invalid argument: {arg}") + + # Yes it's a bit hacky. Get the caller name, get the method based on + # that name, and get the docstring from that method. + # This should NOT fail if the caller is a method of this class. + doc = inspect.getdoc(getattr(self, sys._getframe(1).f_code.co_name)) + if doc is not None: + self.message(self._help_message_from_doc(doc, usage_only=True)) + +# Collect all command help into docstring, if not run with -OO + +if __doc__ is not None: + # unfortunately we can't guess this order from the class definition + _help_order = [ + 'help', 'where', 'down', 'up', 'break', 'tbreak', 'clear', 'disable', + 'enable', 'ignore', 'condition', 'commands', 'step', 'next', 'until', + 'jump', 'return', 'retval', 'run', 'continue', 'list', 'longlist', + 'args', 'p', 'pp', 'whatis', 'source', 'display', 'undisplay', + 'interact', 'alias', 'unalias', 'debug', 'quit', + ] + + for _command in _help_order: + __doc__ += getattr(Pdb, 'do_' + _command).__doc__.strip() + '\n\n' + __doc__ += Pdb.help_exec.__doc__ + + del _help_order, _command + + +# Simplified interface + +def run(statement, globals=None, locals=None): + """Execute the *statement* (given as a string or a code object) + under debugger control. + + The debugger prompt appears before any code is executed; you can set + breakpoints and type continue, or you can step through the statement + using step or next. + + The optional *globals* and *locals* arguments specify the + environment in which the code is executed; by default the + dictionary of the module __main__ is used (see the explanation of + the built-in exec() or eval() functions.). + """ + Pdb().run(statement, globals, locals) + +def runeval(expression, globals=None, locals=None): + """Evaluate the *expression* (given as a string or a code object) + under debugger control. + + When runeval() returns, it returns the value of the expression. + Otherwise this function is similar to run(). + """ + return Pdb().runeval(expression, globals, locals) + +def runctx(statement, globals, locals): + # B/W compatibility + run(statement, globals, locals) + +def runcall(*args, **kwds): + """Call the function (a function or method object, not a string) + with the given arguments. + + When runcall() returns, it returns whatever the function call + returned. The debugger prompt appears as soon as the function is + entered. + """ + return Pdb().runcall(*args, **kwds) + +def set_trace(*, header=None, commands=None): + """Enter the debugger at the calling stack frame. + + This is useful to hard-code a breakpoint at a given point in a + program, even if the code is not otherwise being debugged (e.g. when + an assertion fails). If given, *header* is printed to the console + just before debugging begins. *commands* is an optional list of + pdb commands to run when the debugger starts. + """ + if Pdb._last_pdb_instance is not None: + pdb = Pdb._last_pdb_instance + else: + pdb = Pdb(mode='inline', backend='monitoring', colorize=True) + if header is not None: + pdb.message(header) + pdb.set_trace(sys._getframe().f_back, commands=commands) + +async def set_trace_async(*, header=None, commands=None): + """Enter the debugger at the calling stack frame, but in async mode. + + This should be used as await pdb.set_trace_async(). Users can do await + if they enter the debugger with this function. Otherwise it's the same + as set_trace(). + """ + if Pdb._last_pdb_instance is not None: + pdb = Pdb._last_pdb_instance + else: + pdb = Pdb(mode='inline', backend='monitoring', colorize=True) + if header is not None: + pdb.message(header) + await pdb.set_trace_async(sys._getframe().f_back, commands=commands) + +# Remote PDB + +class _PdbServer(Pdb): + def __init__( + self, + sockfile, + signal_server=None, + owns_sockfile=True, + colorize=False, + **kwargs, + ): + self._owns_sockfile = owns_sockfile + self._interact_state = None + self._sockfile = sockfile + self._command_name_cache = [] + self._write_failed = False + if signal_server: + # Only started by the top level _PdbServer, not recursive ones. + self._start_signal_listener(signal_server) + # Override the `colorize` attribute set by the parent constructor, + # because it checks the server's stdout, rather than the client's. + super().__init__(colorize=False, **kwargs) + self.colorize = colorize + + @staticmethod + def protocol_version(): + # By default, assume a client and server are compatible if they run + # the same Python major.minor version. We'll try to keep backwards + # compatibility between patch versions of a minor version if possible. + # If we do need to change the protocol in a patch version, we'll change + # `revision` to the patch version where the protocol changed. + # We can ignore compatibility for pre-release versions; sys.remote_exec + # can't attach to a pre-release version except from that same version. + v = sys.version_info + revision = 0 + return int(f"{v.major:02X}{v.minor:02X}{revision:02X}F0", 16) + + def _ensure_valid_message(self, msg): + # Ensure the message conforms to our protocol. + # If anything needs to be changed here for a patch release of Python, + # the 'revision' in protocol_version() should be updated. + match msg: + case {"message": str(), "type": str()}: + # Have the client show a message. The client chooses how to + # format the message based on its type. The currently defined + # types are "info" and "error". If a message has a type the + # client doesn't recognize, it must be treated as "info". + pass + case {"help": str()}: + # Have the client show the help for a given argument. + pass + case {"prompt": str(), "state": str()}: + # Have the client display the given prompt and wait for a reply + # from the user. If the client recognizes the state it may + # enable mode-specific features like multi-line editing. + # If it doesn't recognize the state it must prompt for a single + # line only and send it directly to the server. A server won't + # progress until it gets a "reply" or "signal" message, but can + # process "complete" requests while waiting for the reply. + pass + case { + "completions": list(completions) + } if all(isinstance(c, str) for c in completions): + # Return valid completions for a client's "complete" request. + pass + case { + "command_list": list(command_list) + } if all(isinstance(c, str) for c in command_list): + # Report the list of legal PDB commands to the client. + # Due to aliases this list is not static, but the client + # needs to know it for multi-line editing. + pass + case _: + raise AssertionError( + f"PDB message doesn't follow the schema! {msg}" + ) + + @classmethod + def _start_signal_listener(cls, address): + def listener(sock): + with closing(sock): + # Check if the interpreter is finalizing every quarter of a second. + # Clean up and exit if so. + sock.settimeout(0.25) + sock.shutdown(socket.SHUT_WR) + while not shut_down.is_set(): + try: + data = sock.recv(1024) + except socket.timeout: + continue + if data == b"": + return # EOF + signal.raise_signal(signal.SIGINT) + + def stop_thread(): + shut_down.set() + thread.join() + + # Use a daemon thread so that we don't detach until after all non-daemon + # threads are done. Use an atexit handler to stop gracefully at that point, + # so that our thread is stopped before the interpreter is torn down. + shut_down = threading.Event() + thread = threading.Thread( + target=listener, + args=[socket.create_connection(address, timeout=5)], + daemon=True, + ) + atexit.register(stop_thread) + thread.start() + + def _send(self, **kwargs): + self._ensure_valid_message(kwargs) + json_payload = json.dumps(kwargs) + try: + self._sockfile.write(json_payload.encode() + b"\n") + self._sockfile.flush() + except (OSError, ValueError): + # We get an OSError if the network connection has dropped, and a + # ValueError if detach() if the sockfile has been closed. We'll + # handle this the next time we try to read from the client instead + # of trying to handle it from everywhere _send() may be called. + # Track this with a flag rather than assuming readline() will ever + # return an empty string because the socket may be half-closed. + self._write_failed = True + + @typing.override + def message(self, msg, end="\n"): + self._send(message=str(msg) + end, type="info") + + @typing.override + def error(self, msg): + self._send(message=str(msg), type="error") + + def _get_input(self, prompt, state) -> str: + # Before displaying a (Pdb) prompt, send the list of PDB commands + # unless we've already sent an up-to-date list. + if state == "pdb" and not self._command_name_cache: + self._command_name_cache = self.completenames("", "", 0, 0) + self._send(command_list=self._command_name_cache) + self._send(prompt=prompt, state=state) + return self._read_reply() + + def _read_reply(self): + # Loop until we get a 'reply' or 'signal' from the client, + # processing out-of-band 'complete' requests as they arrive. + while True: + if self._write_failed: + raise EOFError + + msg = self._sockfile.readline() + if not msg: + raise EOFError + + try: + payload = json.loads(msg) + except json.JSONDecodeError: + self.error(f"Disconnecting: client sent invalid JSON {msg!r}") + raise EOFError + + match payload: + case {"reply": str(reply)}: + return reply + case {"signal": str(signal)}: + if signal == "INT": + raise KeyboardInterrupt + elif signal == "EOF": + raise EOFError + else: + self.error( + f"Received unrecognized signal: {signal}" + ) + # Our best hope of recovering is to pretend we + # got an EOF to exit whatever mode we're in. + raise EOFError + case { + "complete": { + "text": str(text), + "line": str(line), + "begidx": int(begidx), + "endidx": int(endidx), + } + }: + items = self._complete_any(text, line, begidx, endidx) + self._send(completions=items) + continue + # Valid JSON, but doesn't meet the schema. + self.error(f"Ignoring invalid message from client: {msg}") + + def _complete_any(self, text, line, begidx, endidx): + # If we're in 'interact' mode, we need to use the default completer + if self._interact_state: + compfunc = self.completedefault + else: + if begidx == 0: + return self.completenames(text, line, begidx, endidx) + + cmd = self.parseline(line)[0] + if cmd: + compfunc = getattr(self, "complete_" + cmd, self.completedefault) + else: + compfunc = self.completedefault + return compfunc(text, line, begidx, endidx) + + def cmdloop(self, intro=None): + self.preloop() + if intro is not None: + self.intro = intro + if self.intro: + self.message(str(self.intro)) + stop = None + while not stop: + if self._interact_state is not None: + try: + reply = self._get_input(prompt=">>> ", state="interact") + except KeyboardInterrupt: + # Match how KeyboardInterrupt is handled in a REPL + self.message("\nKeyboardInterrupt") + except EOFError: + self.message("\n*exit from pdb interact command*") + self._interact_state = None + else: + self._run_in_python_repl(reply) + continue + + if not self.cmdqueue: + try: + state = "commands" if self.commands_defining else "pdb" + reply = self._get_input(prompt=self.prompt, state=state) + except EOFError: + reply = "EOF" + + self.cmdqueue.append(reply) + + line = self.cmdqueue.pop(0) + line = self.precmd(line) + stop = self.onecmd(line) + stop = self.postcmd(stop, line) + self.postloop() + + def postloop(self): + super().postloop() + if self.quitting: + self.detach() + + def detach(self): + # Detach the debugger and close the socket without raising BdbQuit + self.quitting = False + if self._owns_sockfile: + # Don't try to reuse this instance, it's not valid anymore. + Pdb._last_pdb_instance = None + try: + self._sockfile.close() + except OSError: + # close() can fail if the connection was broken unexpectedly. + pass + + def do_debug(self, arg): + # Clear our cached list of valid commands; the recursive debugger might + # send its own differing list, and so ours needs to be re-sent. + self._command_name_cache = [] + return super().do_debug(arg) + + def do_alias(self, arg): + # Clear our cached list of valid commands; one might be added. + self._command_name_cache = [] + return super().do_alias(arg) + + def do_unalias(self, arg): + # Clear our cached list of valid commands; one might be removed. + self._command_name_cache = [] + return super().do_unalias(arg) + + def do_help(self, arg): + # Tell the client to render the help, since it might need a pager. + self._send(help=arg) + + do_h = do_help + + def _interact_displayhook(self, obj): + # Like the default `sys.displayhook` except sending a socket message. + if obj is not None: + self.message(repr(obj)) + builtins._ = obj + + def _run_in_python_repl(self, lines): + # Run one 'interact' mode code block against an existing namespace. + assert self._interact_state + save_displayhook = sys.displayhook + try: + sys.displayhook = self._interact_displayhook + code_obj = self._interact_state["compiler"](lines + "\n") + if code_obj is None: + raise SyntaxError("Incomplete command") + exec(code_obj, self._interact_state["ns"]) + except: + self._error_exc() + finally: + sys.displayhook = save_displayhook + + def do_interact(self, arg): + # Prepare to run 'interact' mode code blocks, and trigger the client + # to start treating all input as Python commands, not PDB ones. + self.message("*pdb interact start*") + self._interact_state = dict( + compiler=codeop.CommandCompiler(), + ns={**self.curframe.f_globals, **self.curframe.f_locals}, + ) + + @typing.override + def _create_recursive_debugger(self): + return _PdbServer( + self._sockfile, + owns_sockfile=False, + colorize=self.colorize, + ) + + @typing.override + def _prompt_for_confirmation(self, prompt, default): + try: + return self._get_input(prompt=prompt, state="confirm") + except (EOFError, KeyboardInterrupt): + return default + + def do_run(self, arg): + self.error("remote PDB cannot restart the program") + + do_restart = do_run + + def _error_exc(self): + if self._interact_state and isinstance(sys.exception(), SystemExit): + # If we get a SystemExit in 'interact' mode, exit the REPL. + self._interact_state = None + ret = super()._error_exc() + self.message("*exit from pdb interact command*") + return ret + else: + return super()._error_exc() + + def default(self, line): + # Unlike Pdb, don't prompt for more lines of a multi-line command. + # The remote needs to send us the whole block in one go. + try: + candidate = line.removeprefix("!") + "\n" + if codeop.compile_command(candidate, "", "single") is None: + raise SyntaxError("Incomplete command") + return super().default(candidate) + except: + self._error_exc() + + +class _PdbClient: + def __init__(self, pid, server_socket, interrupt_sock): + self.pid = pid + self.read_buf = b"" + self.signal_read = None + self.signal_write = None + self.sigint_received = False + self.raise_on_sigint = False + self.server_socket = server_socket + self.interrupt_sock = interrupt_sock + self.pdb_instance = Pdb() + self.pdb_commands = set() + self.completion_matches = [] + self.state = "dumb" + self.write_failed = False + self.multiline_block = False + + def _ensure_valid_message(self, msg): + # Ensure the message conforms to our protocol. + # If anything needs to be changed here for a patch release of Python, + # the 'revision' in protocol_version() should be updated. + match msg: + case {"reply": str()}: + # Send input typed by a user at a prompt to the remote PDB. + pass + case {"signal": "EOF"}: + # Tell the remote PDB that the user pressed ^D at a prompt. + pass + case {"signal": "INT"}: + # Tell the remote PDB that the user pressed ^C at a prompt. + pass + case { + "complete": { + "text": str(), + "line": str(), + "begidx": int(), + "endidx": int(), + } + }: + # Ask the remote PDB what completions are valid for the given + # parameters, using readline's completion protocol. + pass + case _: + raise AssertionError( + f"PDB message doesn't follow the schema! {msg}" + ) + + def _send(self, **kwargs): + self._ensure_valid_message(kwargs) + json_payload = json.dumps(kwargs) + try: + self.server_socket.sendall(json_payload.encode() + b"\n") + except OSError: + # This means that the client has abruptly disconnected, but we'll + # handle that the next time we try to read from the client instead + # of trying to handle it from everywhere _send() may be called. + # Track this with a flag rather than assuming readline() will ever + # return an empty string because the socket may be half-closed. + self.write_failed = True + + def _readline(self): + if self.sigint_received: + # There's a pending unhandled SIGINT. Handle it now. + self.sigint_received = False + raise KeyboardInterrupt + + # Wait for either a SIGINT or a line or EOF from the PDB server. + selector = selectors.DefaultSelector() + selector.register(self.signal_read, selectors.EVENT_READ) + selector.register(self.server_socket, selectors.EVENT_READ) + + while b"\n" not in self.read_buf: + for key, _ in selector.select(): + if key.fileobj == self.signal_read: + self.signal_read.recv(1024) + if self.sigint_received: + # If not, we're reading wakeup events for sigints that + # we've previously handled, and can ignore them. + self.sigint_received = False + raise KeyboardInterrupt + elif key.fileobj == self.server_socket: + data = self.server_socket.recv(16 * 1024) + self.read_buf += data + if not data and b"\n" not in self.read_buf: + # EOF without a full final line. Drop the partial line. + self.read_buf = b"" + return b"" + + ret, sep, self.read_buf = self.read_buf.partition(b"\n") + return ret + sep + + def read_input(self, prompt, multiline_block): + self.multiline_block = multiline_block + with self._sigint_raises_keyboard_interrupt(): + return input(prompt) + + def read_command(self, prompt): + reply = self.read_input(prompt, multiline_block=False) + if self.state == "dumb": + # No logic applied whatsoever, just pass the raw reply back. + return reply + + prefix = "" + if self.state == "pdb": + # PDB command entry mode + cmd = self.pdb_instance.parseline(reply)[0] + if cmd in self.pdb_commands or reply.strip() == "": + # Recognized PDB command, or blank line repeating last command + return reply + + # Otherwise, explicit or implicit exec command + if reply.startswith("!"): + prefix = "!" + reply = reply.removeprefix(prefix).lstrip() + + if codeop.compile_command(reply + "\n", "", "single") is not None: + # Valid single-line statement + return prefix + reply + + # Otherwise, valid first line of a multi-line statement + more_prompt = "...".ljust(len(prompt)) + while codeop.compile_command(reply, "", "single") is None: + reply += "\n" + self.read_input(more_prompt, multiline_block=True) + + return prefix + reply + + @contextmanager + def readline_completion(self, completer): + try: + import readline + except ImportError: + yield + return + + old_completer = readline.get_completer() + try: + readline.set_completer(completer) + if readline.backend == "editline": + # libedit uses "^I" instead of "tab" + command_string = "bind ^I rl_complete" + else: + command_string = "tab: complete" + readline.parse_and_bind(command_string) + yield + finally: + readline.set_completer(old_completer) + + @contextmanager + def _sigint_handler(self): + # Signal handling strategy: + # - When we call input() we want a SIGINT to raise KeyboardInterrupt + # - Otherwise we want to write to the wakeup FD and set a flag. + # We'll break out of select() when the wakeup FD is written to, + # and we'll check the flag whenever we're about to accept input. + def handler(signum, frame): + self.sigint_received = True + if self.raise_on_sigint: + # One-shot; don't raise again until the flag is set again. + self.raise_on_sigint = False + self.sigint_received = False + raise KeyboardInterrupt + + sentinel = object() + old_handler = sentinel + old_wakeup_fd = sentinel + + self.signal_read, self.signal_write = socket.socketpair() + with (closing(self.signal_read), closing(self.signal_write)): + self.signal_read.setblocking(False) + self.signal_write.setblocking(False) + + try: + old_handler = signal.signal(signal.SIGINT, handler) + + try: + old_wakeup_fd = signal.set_wakeup_fd( + self.signal_write.fileno(), + warn_on_full_buffer=False, + ) + yield + finally: + # Restore the old wakeup fd if we installed a new one + if old_wakeup_fd is not sentinel: + signal.set_wakeup_fd(old_wakeup_fd) + finally: + self.signal_read = self.signal_write = None + if old_handler is not sentinel: + # Restore the old handler if we installed a new one + signal.signal(signal.SIGINT, old_handler) + + @contextmanager + def _sigint_raises_keyboard_interrupt(self): + if self.sigint_received: + # There's a pending unhandled SIGINT. Handle it now. + self.sigint_received = False + raise KeyboardInterrupt + + try: + self.raise_on_sigint = True + yield + finally: + self.raise_on_sigint = False + + def cmdloop(self): + with ( + self._sigint_handler(), + self.readline_completion(self.complete), + ): + while not self.write_failed: + try: + if not (payload_bytes := self._readline()): + break + except KeyboardInterrupt: + self.send_interrupt() + continue + + try: + payload = json.loads(payload_bytes) + except json.JSONDecodeError: + print( + f"*** Invalid JSON from remote: {payload_bytes!r}", + flush=True, + ) + continue + + self.process_payload(payload) + + def send_interrupt(self): + if self.interrupt_sock is not None: + # Write to a socket that the PDB server listens on. This triggers + # the remote to raise a SIGINT for itself. We do this because + # Windows doesn't allow triggering SIGINT remotely. + # See https://stackoverflow.com/a/35792192 for many more details. + self.interrupt_sock.sendall(signal.SIGINT.to_bytes()) + else: + # On Unix we can just send a SIGINT to the remote process. + # This is preferable to using the signal thread approach that we + # use on Windows because it can interrupt IO in the main thread. + os.kill(self.pid, signal.SIGINT) + + def process_payload(self, payload): + match payload: + case { + "command_list": command_list + } if all(isinstance(c, str) for c in command_list): + self.pdb_commands = set(command_list) + case {"message": str(msg), "type": str(msg_type)}: + if msg_type == "error": + print("***", msg, flush=True) + else: + print(msg, end="", flush=True) + case {"help": str(arg)}: + self.pdb_instance.do_help(arg) + case {"prompt": str(prompt), "state": str(state)}: + if state not in ("pdb", "interact"): + state = "dumb" + self.state = state + self.prompt_for_reply(prompt) + case _: + raise RuntimeError(f"Unrecognized payload {payload}") + + def prompt_for_reply(self, prompt): + while True: + try: + payload = {"reply": self.read_command(prompt)} + except EOFError: + payload = {"signal": "EOF"} + except KeyboardInterrupt: + payload = {"signal": "INT"} + except Exception as exc: + msg = traceback.format_exception_only(exc)[-1].strip() + print("***", msg, flush=True) + continue + + self._send(**payload) + return + + def complete(self, text, state): + import readline + + if state == 0: + self.completion_matches = [] + if self.state not in ("pdb", "interact"): + return None + + origline = readline.get_line_buffer() + line = origline.lstrip() + if self.multiline_block: + # We're completing a line contained in a multi-line block. + # Force the remote to treat it as a Python expression. + line = "! " + line + offset = len(origline) - len(line) + begidx = readline.get_begidx() - offset + endidx = readline.get_endidx() - offset + + msg = { + "complete": { + "text": text, + "line": line, + "begidx": begidx, + "endidx": endidx, + } + } + + self._send(**msg) + if self.write_failed: + return None + + payload = self._readline() + if not payload: + return None + + payload = json.loads(payload) + if "completions" not in payload: + raise RuntimeError( + f"Failed to get valid completions. Got: {payload}" + ) + + self.completion_matches = payload["completions"] + try: + return self.completion_matches[state] + except IndexError: + return None + + +def _connect( + *, + host, + port, + frame, + commands, + version, + signal_raising_thread, + colorize, +): + with closing(socket.create_connection((host, port))) as conn: + sockfile = conn.makefile("rwb") + + # The client requests this thread on Windows but not on Unix. + # Most tests don't request this thread, to keep them simpler. + if signal_raising_thread: + signal_server = (host, port) + else: + signal_server = None + + remote_pdb = _PdbServer( + sockfile, + signal_server=signal_server, + colorize=colorize, + ) + weakref.finalize(remote_pdb, sockfile.close) + + if Pdb._last_pdb_instance is not None: + remote_pdb.error("Another PDB instance is already attached.") + elif version != remote_pdb.protocol_version(): + target_ver = f"0x{remote_pdb.protocol_version():08X}" + attach_ver = f"0x{version:08X}" + remote_pdb.error( + f"The target process is running a Python version that is" + f" incompatible with this PDB module." + f"\nTarget process pdb protocol version: {target_ver}" + f"\nLocal pdb module's protocol version: {attach_ver}" + ) + else: + remote_pdb.rcLines.extend(commands.splitlines()) + remote_pdb.set_trace(frame=frame) + + +def attach(pid, commands=()): + """Attach to a running process with the given PID.""" + with ExitStack() as stack: + server = stack.enter_context( + closing(socket.create_server(("localhost", 0))) + ) + port = server.getsockname()[1] + + connect_script = stack.enter_context( + tempfile.NamedTemporaryFile("w", delete_on_close=False) + ) + + use_signal_thread = sys.platform == "win32" + colorize = _colorize.can_colorize() + + connect_script.write( + textwrap.dedent( + f""" + import pdb, sys + pdb._connect( + host="localhost", + port={port}, + frame=sys._getframe(1), + commands={json.dumps("\n".join(commands))}, + version={_PdbServer.protocol_version()}, + signal_raising_thread={use_signal_thread!r}, + colorize={colorize!r}, + ) + """ + ) + ) + connect_script.close() + orig_mode = os.stat(connect_script.name).st_mode + os.chmod(connect_script.name, orig_mode | stat.S_IROTH | stat.S_IRGRP) + sys.remote_exec(pid, connect_script.name) + + # TODO Add a timeout? Or don't bother since the user can ^C? + client_sock, _ = server.accept() + stack.enter_context(closing(client_sock)) + + if use_signal_thread: + interrupt_sock, _ = server.accept() + stack.enter_context(closing(interrupt_sock)) + interrupt_sock.setblocking(False) + else: + interrupt_sock = None + + _PdbClient(pid, client_sock, interrupt_sock).cmdloop() + + +# Post-Mortem interface + +def post_mortem(t=None): + """Enter post-mortem debugging of the given *traceback*, or *exception* + object. + + If no traceback is given, it uses the one of the exception that is + currently being handled (an exception must be being handled if the + default is to be used). + + If `t` is an exception object, the `exceptions` command makes it possible to + list and inspect its chained exceptions (if any). + """ + return _post_mortem(t, Pdb()) + + +def _post_mortem(t, pdb_instance): + """ + Private version of post_mortem, which allow to pass a pdb instance + for testing purposes. + """ + # handling the default + if t is None: + exc = sys.exception() + if exc is not None: + t = exc.__traceback__ + + if t is None or (isinstance(t, BaseException) and t.__traceback__ is None): + raise ValueError("A valid traceback must be passed if no " + "exception is being handled") + + pdb_instance.reset() + pdb_instance.interaction(None, t) + + +def pm(): + """Enter post-mortem debugging of the traceback found in sys.last_exc.""" + post_mortem(sys.last_exc) + + +# Main program for testing + +TESTCMD = 'import x; x.main()' + +def test(): + run(TESTCMD) + +# print help +def help(): + import pydoc + pydoc.pager(__doc__) + +_usage = """\ +Debug the Python program given by pyfile. Alternatively, +an executable module or package to debug can be specified using +the -m switch. You can also attach to a running Python process +using the -p option with its PID. + +Initial commands are read from .pdbrc files in your home directory +and in the current directory, if they exist. Commands supplied with +-c are executed after commands from .pdbrc files. + +To let the script run until an exception occurs, use "-c continue". +To let the script run up to a given line X in the debugged file, use +"-c 'until X'".""" + + +def exit_with_permission_help_text(): + """ + Prints a message pointing to platform-specific permission help text and exits the program. + This function is called when a PermissionError is encountered while trying + to attach to a process. + """ + print( + "Error: The specified process cannot be attached to due to insufficient permissions.\n" + "See the Python documentation for details on required privileges and troubleshooting:\n" + "https://docs.python.org/3.14/howto/remote_debugging.html#permission-requirements\n" + ) + sys.exit(1) + + +def parse_args(): + # We want pdb to be as intuitive as possible to users, so we need to do some + # heuristic parsing to deal with ambiguity. + # For example: + # "python -m pdb -m foo -p 1" should pass "-p 1" to "foo". + # "python -m pdb foo.py -m bar" should pass "-m bar" to "foo.py". + # "python -m pdb -m foo -m bar" should pass "-m bar" to "foo". + # This require some customized parsing logic to find the actual debug target. + + import argparse + + parser = argparse.ArgumentParser( + usage="%(prog)s [-h] [-c command] (-m module | -p pid | pyfile) [args ...]", + description=_usage, + formatter_class=argparse.RawDescriptionHelpFormatter, + allow_abbrev=False, + color=True, + ) + + # Get all the commands out first. For backwards compatibility, we allow + # -c commands to be after the target. + parser.add_argument('-c', '--command', action='append', default=[], metavar='command', dest='commands', + help='pdb commands to execute as if given in a .pdbrc file') + + opts, args = parser.parse_known_args() + + if not args: + # If no arguments were given (python -m pdb), print the whole help message. + # Without this check, argparse would only complain about missing required arguments. + # We need to add the arguments definitions here to get a proper help message. + parser.add_argument('-m', metavar='module', dest='module') + parser.add_argument('-p', '--pid', type=int, help="attach to the specified PID", default=None) + parser.print_help() + sys.exit(2) + elif args[0] == '-p' or args[0] == '--pid': + # Attach to a pid + parser.add_argument('-p', '--pid', type=int, help="attach to the specified PID", default=None) + opts, args = parser.parse_known_args() + if args: + # For --pid, any extra arguments are invalid. + parser.error(f"unrecognized arguments: {' '.join(args)}") + elif args[0] == '-m': + # Debug a module, we only need the first -m module argument. + # The rest is passed to the module itself. + parser.add_argument('-m', metavar='module', dest='module') + opt_module = parser.parse_args(args[:2]) + opts.module = opt_module.module + args = args[2:] + elif args[0].startswith('-'): + # Invalid argument before the script name. + invalid_args = list(itertools.takewhile(lambda a: a.startswith('-'), args)) + parser.error(f"unrecognized arguments: {' '.join(invalid_args)}") + + # Otherwise it's debugging a script and we already parsed all -c commands. + + return opts, args + +def main(): + opts, args = parse_args() + + if getattr(opts, 'pid', None) is not None: + try: + attach(opts.pid, opts.commands) + except PermissionError as e: + exit_with_permission_help_text() + return + elif getattr(opts, 'module', None) is not None: + file = opts.module + target = _ModuleTarget(file) + else: + file = args.pop(0) + if file.endswith('.pyz'): + target = _ZipTarget(file) + else: + target = _ScriptTarget(file) + + sys.argv[:] = [file] + args # Hide "pdb.py" and pdb options from argument list + + # Note on saving/restoring sys.argv: it's a good idea when sys.argv was + # modified by the script being debugged. It's a bad idea when it was + # changed by the user from the command line. There is a "restart" command + # which allows explicit specification of command line arguments. + pdb = Pdb(mode='cli', backend='monitoring', colorize=True) + pdb.rcLines.extend(opts.commands) + while True: + try: + pdb._run(target) + except Restart: + print("Restarting", target, "with arguments:") + print("\t" + " ".join(sys.argv[1:])) + except SystemExit as e: + # In most cases SystemExit does not warrant a post-mortem session. + print("The program exited via sys.exit(). Exit status:", end=' ') + print(e) + except BaseException as e: + traceback.print_exception(e, colorize=_colorize.can_colorize()) + print("Uncaught exception. Entering post mortem debugging") + print("Running 'cont' or 'step' will restart the program") + try: + pdb.interaction(None, e) + except Restart: + print("Restarting", target, "with arguments:") + print("\t" + " ".join(sys.argv[1:])) + continue + if pdb._user_requested_quit: + break + print("The program finished and will be restarted") + + +# When invoked as main program, invoke the debugger on a script +if __name__ == '__main__': + import pdb + pdb.main() diff --git a/Python314_4_x64_Template/Lib/pickle.py b/Python314_4_x64_Template/Lib/pickle.py new file mode 100644 index 00000000..beaefae0 --- /dev/null +++ b/Python314_4_x64_Template/Lib/pickle.py @@ -0,0 +1,1931 @@ +"""Create portable serialized representations of Python objects. + +See module copyreg for a mechanism for registering custom picklers. +See module pickletools source for extensive comments. + +Classes: + + Pickler + Unpickler + +Functions: + + dump(object, file) + dumps(object) -> string + load(file) -> object + loads(bytes) -> object + +Misc variables: + + __version__ + format_version + compatible_formats + +""" + +from types import FunctionType +from copyreg import dispatch_table +from copyreg import _extension_registry, _inverted_registry, _extension_cache +from itertools import batched +from functools import partial +import sys +from sys import maxsize +from struct import pack, unpack +import io +import codecs +import _compat_pickle + +__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler", + "Unpickler", "dump", "dumps", "load", "loads"] + +try: + from _pickle import PickleBuffer + __all__.append("PickleBuffer") + _HAVE_PICKLE_BUFFER = True +except ImportError: + _HAVE_PICKLE_BUFFER = False + + +# Shortcut for use in isinstance testing +bytes_types = (bytes, bytearray) + +# These are purely informational; no code uses these. +format_version = "5.0" # File format version we write +compatible_formats = ["1.0", # Original protocol 0 + "1.1", # Protocol 0 with INST added + "1.2", # Original protocol 1 + "1.3", # Protocol 1 with BINFLOAT added + "2.0", # Protocol 2 + "3.0", # Protocol 3 + "4.0", # Protocol 4 + "5.0", # Protocol 5 + ] # Old format versions we can read + +# This is the highest protocol number we know how to read. +HIGHEST_PROTOCOL = 5 + +# The protocol we write by default. May be less than HIGHEST_PROTOCOL. +# Only bump this if the oldest still supported version of Python already +# includes it. +DEFAULT_PROTOCOL = 5 + +class PickleError(Exception): + """A common base class for the other pickling exceptions.""" + pass + +class PicklingError(PickleError): + """This exception is raised when an unpicklable object is passed to the + dump() method. + + """ + pass + +class UnpicklingError(PickleError): + """This exception is raised when there is a problem unpickling an object, + such as a security violation. + + Note that other exceptions may also be raised during unpickling, including + (but not necessarily limited to) AttributeError, EOFError, ImportError, + and IndexError. + + """ + pass + +# An instance of _Stop is raised by Unpickler.load_stop() in response to +# the STOP opcode, passing the object that is the result of unpickling. +class _Stop(Exception): + def __init__(self, value): + self.value = value + +# Pickle opcodes. See pickletools.py for extensive docs. The listing +# here is in kind-of alphabetical order of 1-character pickle code. +# pickletools groups them by purpose. + +MARK = b'(' # push special markobject on stack +STOP = b'.' # every pickle ends with STOP +POP = b'0' # discard topmost stack item +POP_MARK = b'1' # discard stack top through topmost markobject +DUP = b'2' # duplicate top stack item +FLOAT = b'F' # push float object; decimal string argument +INT = b'I' # push integer or bool; decimal string argument +BININT = b'J' # push four-byte signed int +BININT1 = b'K' # push 1-byte unsigned int +LONG = b'L' # push long; decimal string argument +BININT2 = b'M' # push 2-byte unsigned int +NONE = b'N' # push None +PERSID = b'P' # push persistent object; id is taken from string arg +BINPERSID = b'Q' # " " " ; " " " " stack +REDUCE = b'R' # apply callable to argtuple, both on stack +STRING = b'S' # push string; NL-terminated string argument +BINSTRING = b'T' # push string; counted binary string argument +SHORT_BINSTRING= b'U' # " " ; " " " " < 256 bytes +UNICODE = b'V' # push Unicode string; raw-unicode-escaped'd argument +BINUNICODE = b'X' # " " " ; counted UTF-8 string argument +APPEND = b'a' # append stack top to list below it +BUILD = b'b' # call __setstate__ or __dict__.update() +GLOBAL = b'c' # push self.find_class(modname, name); 2 string args +DICT = b'd' # build a dict from stack items +EMPTY_DICT = b'}' # push empty dict +APPENDS = b'e' # extend list on stack by topmost stack slice +GET = b'g' # push item from memo on stack; index is string arg +BINGET = b'h' # " " " " " " ; " " 1-byte arg +INST = b'i' # build & push class instance +LONG_BINGET = b'j' # push item from memo on stack; index is 4-byte arg +LIST = b'l' # build list from topmost stack items +EMPTY_LIST = b']' # push empty list +OBJ = b'o' # build & push class instance +PUT = b'p' # store stack top in memo; index is string arg +BINPUT = b'q' # " " " " " ; " " 1-byte arg +LONG_BINPUT = b'r' # " " " " " ; " " 4-byte arg +SETITEM = b's' # add key+value pair to dict +TUPLE = b't' # build tuple from topmost stack items +EMPTY_TUPLE = b')' # push empty tuple +SETITEMS = b'u' # modify dict by adding topmost key+value pairs +BINFLOAT = b'G' # push float; arg is 8-byte float encoding + +TRUE = b'I01\n' # not an opcode; see INT docs in pickletools.py +FALSE = b'I00\n' # not an opcode; see INT docs in pickletools.py + +# Protocol 2 + +PROTO = b'\x80' # identify pickle protocol +NEWOBJ = b'\x81' # build object by applying cls.__new__ to argtuple +EXT1 = b'\x82' # push object from extension registry; 1-byte index +EXT2 = b'\x83' # ditto, but 2-byte index +EXT4 = b'\x84' # ditto, but 4-byte index +TUPLE1 = b'\x85' # build 1-tuple from stack top +TUPLE2 = b'\x86' # build 2-tuple from two topmost stack items +TUPLE3 = b'\x87' # build 3-tuple from three topmost stack items +NEWTRUE = b'\x88' # push True +NEWFALSE = b'\x89' # push False +LONG1 = b'\x8a' # push long from < 256 bytes +LONG4 = b'\x8b' # push really big long + +_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3] + +# Protocol 3 (Python 3.x) + +BINBYTES = b'B' # push bytes; counted binary string argument +SHORT_BINBYTES = b'C' # " " ; " " " " < 256 bytes + +# Protocol 4 + +SHORT_BINUNICODE = b'\x8c' # push short string; UTF-8 length < 256 bytes +BINUNICODE8 = b'\x8d' # push very long string +BINBYTES8 = b'\x8e' # push very long bytes string +EMPTY_SET = b'\x8f' # push empty set on the stack +ADDITEMS = b'\x90' # modify set by adding topmost stack items +FROZENSET = b'\x91' # build frozenset from topmost stack items +NEWOBJ_EX = b'\x92' # like NEWOBJ but work with keyword only arguments +STACK_GLOBAL = b'\x93' # same as GLOBAL but using names on the stacks +MEMOIZE = b'\x94' # store top of the stack in memo +FRAME = b'\x95' # indicate the beginning of a new frame + +# Protocol 5 + +BYTEARRAY8 = b'\x96' # push bytearray +NEXT_BUFFER = b'\x97' # push next out-of-band buffer +READONLY_BUFFER = b'\x98' # make top of stack readonly + +__all__.extend(x for x in dir() if x.isupper() and not x.startswith('_')) + + +class _Framer: + + _FRAME_SIZE_MIN = 4 + _FRAME_SIZE_TARGET = 64 * 1024 + + def __init__(self, file_write): + self.file_write = file_write + self.current_frame = None + + def start_framing(self): + self.current_frame = io.BytesIO() + + def end_framing(self): + if self.current_frame and self.current_frame.tell() > 0: + self.commit_frame(force=True) + self.current_frame = None + + def commit_frame(self, force=False): + if self.current_frame: + f = self.current_frame + if f.tell() >= self._FRAME_SIZE_TARGET or force: + data = f.getbuffer() + write = self.file_write + if len(data) >= self._FRAME_SIZE_MIN: + # Issue a single call to the write method of the underlying + # file object for the frame opcode with the size of the + # frame. The concatenation is expected to be less expensive + # than issuing an additional call to write. + write(FRAME + pack("' in dotted_path: + raise PicklingError(f"Can't pickle local object {obj!r}") + if module_name is None: + # Protect the iteration by using a list copy of sys.modules against dynamic + # modules that trigger imports of other modules upon calls to getattr. + for module_name, module in sys.modules.copy().items(): + if (module_name == '__main__' + or module_name == '__mp_main__' # bpo-42406 + or module is None): + continue + try: + if _getattribute(module, dotted_path) is obj: + return module_name + except AttributeError: + pass + module_name = '__main__' + + try: + __import__(module_name, level=0) + module = sys.modules[module_name] + except (ImportError, ValueError, KeyError) as exc: + raise PicklingError(f"Can't pickle {obj!r}: {exc!s}") + try: + if _getattribute(module, dotted_path) is obj: + return module_name + except AttributeError: + raise PicklingError(f"Can't pickle {obj!r}: " + f"it's not found as {module_name}.{name}") + + raise PicklingError( + f"Can't pickle {obj!r}: it's not the same object as {module_name}.{name}") + +def encode_long(x): + r"""Encode a long to a two's complement little-endian binary string. + Note that 0 is a special case, returning an empty string, to save a + byte in the LONG1 pickling context. + + >>> encode_long(0) + b'' + >>> encode_long(255) + b'\xff\x00' + >>> encode_long(32767) + b'\xff\x7f' + >>> encode_long(-256) + b'\x00\xff' + >>> encode_long(-32768) + b'\x00\x80' + >>> encode_long(-128) + b'\x80' + >>> encode_long(127) + b'\x7f' + >>> + """ + if x == 0: + return b'' + nbytes = (x.bit_length() >> 3) + 1 + result = x.to_bytes(nbytes, byteorder='little', signed=True) + if x < 0 and nbytes > 1: + if result[-1] == 0xff and (result[-2] & 0x80) != 0: + result = result[:-1] + return result + +def decode_long(data): + r"""Decode a long from a two's complement little-endian binary string. + + >>> decode_long(b'') + 0 + >>> decode_long(b"\xff\x00") + 255 + >>> decode_long(b"\xff\x7f") + 32767 + >>> decode_long(b"\x00\xff") + -256 + >>> decode_long(b"\x00\x80") + -32768 + >>> decode_long(b"\x80") + -128 + >>> decode_long(b"\x7f") + 127 + """ + return int.from_bytes(data, byteorder='little', signed=True) + +def _T(obj): + cls = type(obj) + module = cls.__module__ + if module in (None, 'builtins', '__main__'): + return cls.__qualname__ + return f'{module}.{cls.__qualname__}' + + +_NoValue = object() + +# Pickling machinery + +class _Pickler: + + def __init__(self, file, protocol=None, *, fix_imports=True, + buffer_callback=None): + """This takes a binary file for writing a pickle data stream. + + The optional *protocol* argument tells the pickler to use the + given protocol; supported protocols are 0, 1, 2, 3, 4 and 5. + The default protocol is 5. It was introduced in Python 3.8, and + is incompatible with previous versions. + + Specifying a negative protocol version selects the highest + protocol version supported. The higher the protocol used, the + more recent the version of Python needed to read the pickle + produced. + + The *file* argument must have a write() method that accepts a + single bytes argument. It can thus be a file object opened for + binary writing, an io.BytesIO instance, or any other custom + object that meets this interface. + + If *fix_imports* is True and *protocol* is less than 3, pickle + will try to map the new Python 3 names to the old module names + used in Python 2, so that the pickle data stream is readable + with Python 2. + + If *buffer_callback* is None (the default), buffer views are + serialized into *file* as part of the pickle stream. + + If *buffer_callback* is not None, then it can be called any number + of times with a buffer view. If the callback returns a false value + (such as None), the given buffer is out-of-band; otherwise the + buffer is serialized in-band, i.e. inside the pickle stream. + + It is an error if *buffer_callback* is not None and *protocol* + is None or smaller than 5. + """ + if protocol is None: + protocol = DEFAULT_PROTOCOL + if protocol < 0: + protocol = HIGHEST_PROTOCOL + elif not 0 <= protocol <= HIGHEST_PROTOCOL: + raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL) + if buffer_callback is not None and protocol < 5: + raise ValueError("buffer_callback needs protocol >= 5") + self._buffer_callback = buffer_callback + try: + self._file_write = file.write + except AttributeError: + raise TypeError("file must have a 'write' attribute") + self.framer = _Framer(self._file_write) + self.write = self.framer.write + self._write_large_bytes = self.framer.write_large_bytes + self.memo = {} + self.proto = int(protocol) + self.bin = protocol >= 1 + self.fast = 0 + self.fix_imports = fix_imports and protocol < 3 + + def clear_memo(self): + """Clears the pickler's "memo". + + The memo is the data structure that remembers which objects the + pickler has already seen, so that shared or recursive objects + are pickled by reference and not by value. This method is + useful when re-using picklers. + """ + self.memo.clear() + + def dump(self, obj): + """Write a pickled representation of obj to the open file.""" + # Check whether Pickler was initialized correctly. This is + # only needed to mimic the behavior of _pickle.Pickler.dump(). + if not hasattr(self, "_file_write"): + raise PicklingError("Pickler.__init__() was not called by " + "%s.__init__()" % (self.__class__.__name__,)) + if self.proto >= 2: + self.write(PROTO + pack("= 4: + self.framer.start_framing() + self.save(obj) + self.write(STOP) + self.framer.end_framing() + + def memoize(self, obj): + """Store an object in the memo.""" + + # The Pickler memo is a dictionary mapping object ids to 2-tuples + # that contain the Unpickler memo key and the object being memoized. + # The memo key is written to the pickle and will become + # the key in the Unpickler's memo. The object is stored in the + # Pickler memo so that transient objects are kept alive during + # pickling. + + # The use of the Unpickler memo length as the memo key is just a + # convention. The only requirement is that the memo values be unique. + # But there appears no advantage to any other scheme, and this + # scheme allows the Unpickler memo to be implemented as a plain (but + # growable) array, indexed by memo key. + if self.fast: + return + assert id(obj) not in self.memo + idx = len(self.memo) + self.write(self.put(idx)) + self.memo[id(obj)] = idx, obj + + # Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i. + def put(self, idx): + if self.proto >= 4: + return MEMOIZE + elif self.bin: + if idx < 256: + return BINPUT + pack("= 2 and func_name == "__newobj_ex__": + cls, args, kwargs = args + if not hasattr(cls, "__new__"): + raise PicklingError("first argument to __newobj_ex__() has no __new__") + if obj is not None and cls is not obj.__class__: + raise PicklingError(f"first argument to __newobj_ex__() " + f"must be {obj.__class__!r}, not {cls!r}") + if self.proto >= 4: + try: + save(cls) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} class') + raise + try: + save(args) + save(kwargs) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} __new__ arguments') + raise + write(NEWOBJ_EX) + else: + func = partial(cls.__new__, cls, *args, **kwargs) + try: + save(func) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} reconstructor') + raise + save(()) + write(REDUCE) + elif self.proto >= 2 and func_name == "__newobj__": + # A __reduce__ implementation can direct protocol 2 or newer to + # use the more efficient NEWOBJ opcode, while still + # allowing protocol 0 and 1 to work normally. For this to + # work, the function returned by __reduce__ should be + # called __newobj__, and its first argument should be a + # class. The implementation for __newobj__ + # should be as follows, although pickle has no way to + # verify this: + # + # def __newobj__(cls, *args): + # return cls.__new__(cls, *args) + # + # Protocols 0 and 1 will pickle a reference to __newobj__, + # while protocol 2 (and above) will pickle a reference to + # cls, the remaining args tuple, and the NEWOBJ code, + # which calls cls.__new__(cls, *args) at unpickling time + # (see load_newobj below). If __reduce__ returns a + # three-tuple, the state from the third tuple item will be + # pickled regardless of the protocol, calling __setstate__ + # at unpickling time (see load_build below). + # + # Note that no standard __newobj__ implementation exists; + # you have to provide your own. This is to enforce + # compatibility with Python 2.2 (pickles written using + # protocol 0 or 1 in Python 2.3 should be unpicklable by + # Python 2.2). + cls = args[0] + if not hasattr(cls, "__new__"): + raise PicklingError("first argument to __newobj__() has no __new__") + if obj is not None and cls is not obj.__class__: + raise PicklingError(f"first argument to __newobj__() " + f"must be {obj.__class__!r}, not {cls!r}") + args = args[1:] + try: + save(cls) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} class') + raise + try: + save(args) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} __new__ arguments') + raise + write(NEWOBJ) + else: + try: + save(func) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} reconstructor') + raise + try: + save(args) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} reconstructor arguments') + raise + write(REDUCE) + + if obj is not None: + # If the object is already in the memo, this means it is + # recursive. In this case, throw away everything we put on the + # stack, and fetch the object back from the memo. + if id(obj) in self.memo: + write(POP + self.get(self.memo[id(obj)][0])) + else: + self.memoize(obj) + + # More new special cases (that work with older protocols as + # well): when __reduce__ returns a tuple with 4 or 5 items, + # the 4th and 5th item should be iterators that provide list + # items and dict items (as (key, value) tuples), or None. + + if listitems is not None: + self._batch_appends(listitems, obj) + + if dictitems is not None: + self._batch_setitems(dictitems, obj) + + if state is not None: + if state_setter is None: + try: + save(state) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} state') + raise + write(BUILD) + else: + # If a state_setter is specified, call it instead of load_build + # to update obj's with its previous state. + # First, push state_setter and its tuple of expected arguments + # (obj, state) onto the stack. + try: + save(state_setter) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} state setter') + raise + save(obj) # simple BINGET opcode as obj is already memoized. + try: + save(state) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} state') + raise + write(TUPLE2) + # Trigger a state_setter(obj, state) function call. + write(REDUCE) + # The purpose of state_setter is to carry-out an + # inplace modification of obj. We do not care about what the + # method might return, so its output is eventually removed from + # the stack. + write(POP) + + # Methods below this point are dispatched through the dispatch table + + dispatch = {} + + def save_none(self, obj): + self.write(NONE) + dispatch[type(None)] = save_none + + def save_bool(self, obj): + if self.proto >= 2: + self.write(NEWTRUE if obj else NEWFALSE) + else: + self.write(TRUE if obj else FALSE) + dispatch[bool] = save_bool + + def save_long(self, obj): + if self.bin: + # If the int is small enough to fit in a signed 4-byte 2's-comp + # format, we can store it more efficiently than the general + # case. + # First one- and two-byte unsigned ints: + if obj >= 0: + if obj <= 0xff: + self.write(BININT1 + pack("= 2: + encoded = encode_long(obj) + n = len(encoded) + if n < 256: + self.write(LONG1 + pack("d', obj)) + else: + self.write(FLOAT + repr(obj).encode("ascii") + b'\n') + dispatch[float] = save_float + + def _save_bytes_no_memo(self, obj): + # helper for writing bytes objects for protocol >= 3 + # without memoizing them + assert self.proto >= 3 + n = len(obj) + if n <= 0xff: + self.write(SHORT_BINBYTES + pack(" 0xffffffff and self.proto >= 4: + self._write_large_bytes(BINBYTES8 + pack("= self.framer._FRAME_SIZE_TARGET: + self._write_large_bytes(BINBYTES + pack("= 5 + # without memoizing them + assert self.proto >= 5 + n = len(obj) + if n >= self.framer._FRAME_SIZE_TARGET: + self._write_large_bytes(BYTEARRAY8 + pack("= 5") + with obj.raw() as m: + if not m.contiguous: + raise PicklingError("PickleBuffer can not be pickled when " + "pointing to a non-contiguous buffer") + in_band = True + if self._buffer_callback is not None: + in_band = bool(self._buffer_callback(obj)) + if in_band: + # Write data in-band + # XXX The C implementation avoids a copy here + buf = m.tobytes() + in_memo = id(buf) in self.memo + if m.readonly: + if in_memo: + self._save_bytes_no_memo(buf) + else: + self.save_bytes(buf) + else: + if in_memo: + self._save_bytearray_no_memo(buf) + else: + self.save_bytearray(buf) + else: + # Write data out-of-band + self.write(NEXT_BUFFER) + if m.readonly: + self.write(READONLY_BUFFER) + + dispatch[PickleBuffer] = save_picklebuffer + + def save_str(self, obj): + if self.bin: + encoded = obj.encode('utf-8', 'surrogatepass') + n = len(encoded) + if n <= 0xff and self.proto >= 4: + self.write(SHORT_BINUNICODE + pack(" 0xffffffff and self.proto >= 4: + self._write_large_bytes(BINUNICODE8 + pack("= self.framer._FRAME_SIZE_TARGET: + self._write_large_bytes(BINUNICODE + pack("= 2: + for i, element in enumerate(obj): + try: + save(element) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {i}') + raise + # Subtle. Same as in the big comment below. + if id(obj) in memo: + get = self.get(memo[id(obj)][0]) + self.write(POP * n + get) + else: + self.write(_tuplesize2code[n]) + self.memoize(obj) + return + + # proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple + # has more than 3 elements. + write = self.write + write(MARK) + for i, element in enumerate(obj): + try: + save(element) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {i}') + raise + + if id(obj) in memo: + # Subtle. d was not in memo when we entered save_tuple(), so + # the process of saving the tuple's elements must have saved + # the tuple itself: the tuple is recursive. The proper action + # now is to throw away everything we put on the stack, and + # simply GET the tuple (it's already constructed). This check + # could have been done in the "for element" loop instead, but + # recursive tuples are a rare thing. + get = self.get(memo[id(obj)][0]) + if self.bin: + write(POP_MARK + get) + else: # proto 0 -- POP_MARK not available + write(POP * (n+1) + get) + return + + # No recursion. + write(TUPLE) + self.memoize(obj) + + dispatch[tuple] = save_tuple + + def save_list(self, obj): + if self.bin: + self.write(EMPTY_LIST) + else: # proto 0 -- can't use EMPTY_LIST + self.write(MARK + LIST) + + self.memoize(obj) + self._batch_appends(obj, obj) + + dispatch[list] = save_list + + _BATCHSIZE = 1000 + + def _batch_appends(self, items, obj): + # Helper to batch up APPENDS sequences + save = self.save + write = self.write + + if not self.bin: + for i, x in enumerate(items): + try: + save(x) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {i}') + raise + write(APPEND) + return + + start = 0 + for batch in batched(items, self._BATCHSIZE): + batch_len = len(batch) + if batch_len != 1: + write(MARK) + for i, x in enumerate(batch, start): + try: + save(x) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {i}') + raise + write(APPENDS) + else: + try: + save(batch[0]) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {start}') + raise + write(APPEND) + start += batch_len + + def save_dict(self, obj): + if self.bin: + self.write(EMPTY_DICT) + else: # proto 0 -- can't use EMPTY_DICT + self.write(MARK + DICT) + + self.memoize(obj) + self._batch_setitems(obj.items(), obj) + + dispatch[dict] = save_dict + + def _batch_setitems(self, items, obj): + # Helper to batch up SETITEMS sequences; proto >= 1 only + save = self.save + write = self.write + + if not self.bin: + for k, v in items: + save(k) + try: + save(v) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {k!r}') + raise + write(SETITEM) + return + + for batch in batched(items, self._BATCHSIZE): + if len(batch) != 1: + write(MARK) + for k, v in batch: + save(k) + try: + save(v) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {k!r}') + raise + write(SETITEMS) + else: + k, v = batch[0] + save(k) + try: + save(v) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {k!r}') + raise + write(SETITEM) + + def save_set(self, obj): + save = self.save + write = self.write + + if self.proto < 4: + self.save_reduce(set, (list(obj),), obj=obj) + return + + write(EMPTY_SET) + self.memoize(obj) + + for batch in batched(obj, self._BATCHSIZE): + write(MARK) + try: + for item in batch: + save(item) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} element') + raise + write(ADDITEMS) + dispatch[set] = save_set + + def save_frozenset(self, obj): + save = self.save + write = self.write + + if self.proto < 4: + self.save_reduce(frozenset, (list(obj),), obj=obj) + return + + write(MARK) + try: + for item in obj: + save(item) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} element') + raise + + if id(obj) in self.memo: + # If the object is already in the memo, this means it is + # recursive. In this case, throw away everything we put on the + # stack, and fetch the object back from the memo. + write(POP_MARK + self.get(self.memo[id(obj)][0])) + return + + write(FROZENSET) + self.memoize(obj) + dispatch[frozenset] = save_frozenset + + def save_global(self, obj, name=None): + write = self.write + memo = self.memo + + if name is None: + name = getattr(obj, '__qualname__', None) + if name is None: + name = obj.__name__ + + module_name = whichmodule(obj, name) + if self.proto >= 2: + code = _extension_registry.get((module_name, name), _NoValue) + if code is not _NoValue: + if code <= 0xff: + data = pack("= 4: + self.save(module_name) + self.save(name) + write(STACK_GLOBAL) + elif '.' in name: + # In protocol < 4, objects with multi-part __qualname__ + # are represented as + # getattr(getattr(..., attrname1), attrname2). + dotted_path = name.split('.') + name = dotted_path.pop(0) + save = self.save + for attrname in dotted_path: + save(getattr) + if self.proto < 2: + write(MARK) + self._save_toplevel_by_name(module_name, name) + for attrname in dotted_path: + save(attrname) + if self.proto < 2: + write(TUPLE) + else: + write(TUPLE2) + write(REDUCE) + else: + self._save_toplevel_by_name(module_name, name) + + self.memoize(obj) + + def _save_toplevel_by_name(self, module_name, name): + if self.proto >= 3: + # Non-ASCII identifiers are supported only with protocols >= 3. + encoding = "utf-8" + else: + if self.fix_imports: + r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING + r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING + if (module_name, name) in r_name_mapping: + module_name, name = r_name_mapping[(module_name, name)] + elif module_name in r_import_mapping: + module_name = r_import_mapping[module_name] + encoding = "ascii" + try: + self.write(GLOBAL + bytes(module_name, encoding) + b'\n') + except UnicodeEncodeError: + raise PicklingError( + f"can't pickle module identifier {module_name!r} using " + f"pickle protocol {self.proto}") + try: + self.write(bytes(name, encoding) + b'\n') + except UnicodeEncodeError: + raise PicklingError( + f"can't pickle global identifier {name!r} using " + f"pickle protocol {self.proto}") + + def save_type(self, obj): + if obj is type(None): + return self.save_reduce(type, (None,), obj=obj) + elif obj is type(NotImplemented): + return self.save_reduce(type, (NotImplemented,), obj=obj) + elif obj is type(...): + return self.save_reduce(type, (...,), obj=obj) + return self.save_global(obj) + + dispatch[FunctionType] = save_global + dispatch[type] = save_type + + +# Unpickling machinery + +class _Unpickler: + + def __init__(self, file, *, fix_imports=True, + encoding="ASCII", errors="strict", buffers=None): + """This takes a binary file for reading a pickle data stream. + + The protocol version of the pickle is detected automatically, so + no proto argument is needed. + + The argument *file* must have two methods, a read() method that + takes an integer argument, and a readline() method that requires + no arguments. Both methods should return bytes. Thus *file* + can be a binary file object opened for reading, an io.BytesIO + object, or any other custom object that meets this interface. + + The file-like object must have two methods, a read() method + that takes an integer argument, and a readline() method that + requires no arguments. Both methods should return bytes. + Thus file-like object can be a binary file object opened for + reading, a BytesIO object, or any other custom object that + meets this interface. + + If *buffers* is not None, it should be an iterable of buffer-enabled + objects that is consumed each time the pickle stream references + an out-of-band buffer view. Such buffers have been given in order + to the *buffer_callback* of a Pickler object. + + If *buffers* is None (the default), then the buffers are taken + from the pickle stream, assuming they are serialized there. + It is an error for *buffers* to be None if the pickle stream + was produced with a non-None *buffer_callback*. + + Other optional arguments are *fix_imports*, *encoding* and + *errors*, which are used to control compatibility support for + pickle stream generated by Python 2. If *fix_imports* is True, + pickle will try to map the old Python 2 names to the new names + used in Python 3. The *encoding* and *errors* tell pickle how + to decode 8-bit string instances pickled by Python 2; these + default to 'ASCII' and 'strict', respectively. *encoding* can be + 'bytes' to read these 8-bit string instances as bytes objects. + """ + self._buffers = iter(buffers) if buffers is not None else None + self._file_readline = file.readline + self._file_read = file.read + self.memo = {} + self.encoding = encoding + self.errors = errors + self.proto = 0 + self.fix_imports = fix_imports + + def load(self): + """Read a pickled object representation from the open file. + + Return the reconstituted object hierarchy specified in the file. + """ + # Check whether Unpickler was initialized correctly. This is + # only needed to mimic the behavior of _pickle.Unpickler.dump(). + if not hasattr(self, "_file_read"): + raise UnpicklingError("Unpickler.__init__() was not called by " + "%s.__init__()" % (self.__class__.__name__,)) + self._unframer = _Unframer(self._file_read, self._file_readline) + self.read = self._unframer.read + self.readinto = self._unframer.readinto + self.readline = self._unframer.readline + self.metastack = [] + self.stack = [] + self.append = self.stack.append + self.proto = 0 + read = self.read + dispatch = self.dispatch + try: + while True: + key = read(1) + if not key: + raise EOFError + assert isinstance(key, bytes_types) + dispatch[key[0]](self) + except _Stop as stopinst: + return stopinst.value + + # Return a list of items pushed in the stack after last MARK instruction. + def pop_mark(self): + items = self.stack + self.stack = self.metastack.pop() + self.append = self.stack.append + return items + + def persistent_load(self, pid): + raise UnpicklingError("unsupported persistent id encountered") + + dispatch = {} + + def load_proto(self): + proto = self.read(1)[0] + if not 0 <= proto <= HIGHEST_PROTOCOL: + raise ValueError("unsupported pickle protocol: %d" % proto) + self.proto = proto + dispatch[PROTO[0]] = load_proto + + def load_frame(self): + frame_size, = unpack(' sys.maxsize: + raise ValueError("frame size > sys.maxsize: %d" % frame_size) + self._unframer.load_frame(frame_size) + dispatch[FRAME[0]] = load_frame + + def load_persid(self): + try: + pid = self.readline()[:-1].decode("ascii") + except UnicodeDecodeError: + raise UnpicklingError( + "persistent IDs in protocol 0 must be ASCII strings") + self.append(self.persistent_load(pid)) + dispatch[PERSID[0]] = load_persid + + def load_binpersid(self): + pid = self.stack.pop() + self.append(self.persistent_load(pid)) + dispatch[BINPERSID[0]] = load_binpersid + + def load_none(self): + self.append(None) + dispatch[NONE[0]] = load_none + + def load_false(self): + self.append(False) + dispatch[NEWFALSE[0]] = load_false + + def load_true(self): + self.append(True) + dispatch[NEWTRUE[0]] = load_true + + def load_int(self): + data = self.readline() + if data == FALSE[1:]: + val = False + elif data == TRUE[1:]: + val = True + else: + val = int(data) + self.append(val) + dispatch[INT[0]] = load_int + + def load_binint(self): + self.append(unpack('d', self.read(8))[0]) + dispatch[BINFLOAT[0]] = load_binfloat + + def _decode_string(self, value): + # Used to allow strings from Python 2 to be decoded either as + # bytes or Unicode strings. This should be used only with the + # STRING, BINSTRING and SHORT_BINSTRING opcodes. + if self.encoding == "bytes": + return value + else: + return value.decode(self.encoding, self.errors) + + def load_string(self): + data = self.readline()[:-1] + # Strip outermost quotes + if len(data) >= 2 and data[0] == data[-1] and data[0] in b'"\'': + data = data[1:-1] + else: + raise UnpicklingError("the STRING opcode argument must be quoted") + self.append(self._decode_string(codecs.escape_decode(data)[0])) + dispatch[STRING[0]] = load_string + + def load_binstring(self): + # Deprecated BINSTRING uses signed 32-bit length + len, = unpack(' maxsize: + raise UnpicklingError("BINBYTES exceeds system's maximum size " + "of %d bytes" % maxsize) + self.append(self.read(len)) + dispatch[BINBYTES[0]] = load_binbytes + + def load_unicode(self): + self.append(str(self.readline()[:-1], 'raw-unicode-escape')) + dispatch[UNICODE[0]] = load_unicode + + def load_binunicode(self): + len, = unpack(' maxsize: + raise UnpicklingError("BINUNICODE exceeds system's maximum size " + "of %d bytes" % maxsize) + self.append(str(self.read(len), 'utf-8', 'surrogatepass')) + dispatch[BINUNICODE[0]] = load_binunicode + + def load_binunicode8(self): + len, = unpack(' maxsize: + raise UnpicklingError("BINUNICODE8 exceeds system's maximum size " + "of %d bytes" % maxsize) + self.append(str(self.read(len), 'utf-8', 'surrogatepass')) + dispatch[BINUNICODE8[0]] = load_binunicode8 + + def load_binbytes8(self): + len, = unpack(' maxsize: + raise UnpicklingError("BINBYTES8 exceeds system's maximum size " + "of %d bytes" % maxsize) + self.append(self.read(len)) + dispatch[BINBYTES8[0]] = load_binbytes8 + + def load_bytearray8(self): + len, = unpack(' maxsize: + raise UnpicklingError("BYTEARRAY8 exceeds system's maximum size " + "of %d bytes" % maxsize) + b = bytearray(len) + self.readinto(b) + self.append(b) + dispatch[BYTEARRAY8[0]] = load_bytearray8 + + def load_next_buffer(self): + if self._buffers is None: + raise UnpicklingError("pickle stream refers to out-of-band data " + "but no *buffers* argument was given") + try: + buf = next(self._buffers) + except StopIteration: + raise UnpicklingError("not enough out-of-band buffers") + self.append(buf) + dispatch[NEXT_BUFFER[0]] = load_next_buffer + + def load_readonly_buffer(self): + buf = self.stack[-1] + with memoryview(buf) as m: + if not m.readonly: + self.stack[-1] = m.toreadonly() + dispatch[READONLY_BUFFER[0]] = load_readonly_buffer + + def load_short_binstring(self): + len = self.read(1)[0] + data = self.read(len) + self.append(self._decode_string(data)) + dispatch[SHORT_BINSTRING[0]] = load_short_binstring + + def load_short_binbytes(self): + len = self.read(1)[0] + self.append(self.read(len)) + dispatch[SHORT_BINBYTES[0]] = load_short_binbytes + + def load_short_binunicode(self): + len = self.read(1)[0] + self.append(str(self.read(len), 'utf-8', 'surrogatepass')) + dispatch[SHORT_BINUNICODE[0]] = load_short_binunicode + + def load_tuple(self): + items = self.pop_mark() + self.append(tuple(items)) + dispatch[TUPLE[0]] = load_tuple + + def load_empty_tuple(self): + self.append(()) + dispatch[EMPTY_TUPLE[0]] = load_empty_tuple + + def load_tuple1(self): + self.stack[-1] = (self.stack[-1],) + dispatch[TUPLE1[0]] = load_tuple1 + + def load_tuple2(self): + self.stack[-2:] = [(self.stack[-2], self.stack[-1])] + dispatch[TUPLE2[0]] = load_tuple2 + + def load_tuple3(self): + self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])] + dispatch[TUPLE3[0]] = load_tuple3 + + def load_empty_list(self): + self.append([]) + dispatch[EMPTY_LIST[0]] = load_empty_list + + def load_empty_dictionary(self): + self.append({}) + dispatch[EMPTY_DICT[0]] = load_empty_dictionary + + def load_empty_set(self): + self.append(set()) + dispatch[EMPTY_SET[0]] = load_empty_set + + def load_frozenset(self): + items = self.pop_mark() + self.append(frozenset(items)) + dispatch[FROZENSET[0]] = load_frozenset + + def load_list(self): + items = self.pop_mark() + self.append(items) + dispatch[LIST[0]] = load_list + + def load_dict(self): + items = self.pop_mark() + d = {items[i]: items[i+1] + for i in range(0, len(items), 2)} + self.append(d) + dispatch[DICT[0]] = load_dict + + # INST and OBJ differ only in how they get a class object. It's not + # only sensible to do the rest in a common routine, the two routines + # previously diverged and grew different bugs. + # klass is the class to instantiate, and k points to the topmost mark + # object, following which are the arguments for klass.__init__. + def _instantiate(self, klass, args): + if (args or not isinstance(klass, type) or + hasattr(klass, "__getinitargs__")): + try: + value = klass(*args) + except TypeError as err: + raise TypeError("in constructor for %s: %s" % + (klass.__name__, str(err)), err.__traceback__) + else: + value = klass.__new__(klass) + self.append(value) + + def load_inst(self): + module = self.readline()[:-1].decode("ascii") + name = self.readline()[:-1].decode("ascii") + klass = self.find_class(module, name) + self._instantiate(klass, self.pop_mark()) + dispatch[INST[0]] = load_inst + + def load_obj(self): + # Stack is ... markobject classobject arg1 arg2 ... + args = self.pop_mark() + cls = args.pop(0) + self._instantiate(cls, args) + dispatch[OBJ[0]] = load_obj + + def load_newobj(self): + args = self.stack.pop() + cls = self.stack.pop() + obj = cls.__new__(cls, *args) + self.append(obj) + dispatch[NEWOBJ[0]] = load_newobj + + def load_newobj_ex(self): + kwargs = self.stack.pop() + args = self.stack.pop() + cls = self.stack.pop() + obj = cls.__new__(cls, *args, **kwargs) + self.append(obj) + dispatch[NEWOBJ_EX[0]] = load_newobj_ex + + def load_global(self): + module = self.readline()[:-1].decode("utf-8") + name = self.readline()[:-1].decode("utf-8") + klass = self.find_class(module, name) + self.append(klass) + dispatch[GLOBAL[0]] = load_global + + def load_stack_global(self): + name = self.stack.pop() + module = self.stack.pop() + if type(name) is not str or type(module) is not str: + raise UnpicklingError("STACK_GLOBAL requires str") + self.append(self.find_class(module, name)) + dispatch[STACK_GLOBAL[0]] = load_stack_global + + def load_ext1(self): + code = self.read(1)[0] + self.get_extension(code) + dispatch[EXT1[0]] = load_ext1 + + def load_ext2(self): + code, = unpack('= 4 and '.' in name: + dotted_path = name.split('.') + try: + return _getattribute(sys.modules[module], dotted_path) + except AttributeError: + raise AttributeError( + f"Can't resolve path {name!r} on module {module!r}") + else: + return getattr(sys.modules[module], name) + + def load_reduce(self): + stack = self.stack + args = stack.pop() + func = stack[-1] + stack[-1] = func(*args) + dispatch[REDUCE[0]] = load_reduce + + def load_pop(self): + if self.stack: + del self.stack[-1] + else: + self.pop_mark() + dispatch[POP[0]] = load_pop + + def load_pop_mark(self): + self.pop_mark() + dispatch[POP_MARK[0]] = load_pop_mark + + def load_dup(self): + self.append(self.stack[-1]) + dispatch[DUP[0]] = load_dup + + def load_get(self): + i = int(self.readline()[:-1]) + try: + self.append(self.memo[i]) + except KeyError: + msg = f'Memo value not found at index {i}' + raise UnpicklingError(msg) from None + dispatch[GET[0]] = load_get + + def load_binget(self): + i = self.read(1)[0] + try: + self.append(self.memo[i]) + except KeyError as exc: + msg = f'Memo value not found at index {i}' + raise UnpicklingError(msg) from None + dispatch[BINGET[0]] = load_binget + + def load_long_binget(self): + i, = unpack(' maxsize: + raise ValueError("negative LONG_BINPUT argument") + self.memo[i] = self.stack[-1] + dispatch[LONG_BINPUT[0]] = load_long_binput + + def load_memoize(self): + memo = self.memo + memo[len(memo)] = self.stack[-1] + dispatch[MEMOIZE[0]] = load_memoize + + def load_append(self): + stack = self.stack + value = stack.pop() + list = stack[-1] + list.append(value) + dispatch[APPEND[0]] = load_append + + def load_appends(self): + items = self.pop_mark() + list_obj = self.stack[-1] + try: + extend = list_obj.extend + except AttributeError: + pass + else: + extend(items) + return + # Even if the PEP 307 requires extend() and append() methods, + # fall back on append() if the object has no extend() method + # for backward compatibility. + append = list_obj.append + for item in items: + append(item) + dispatch[APPENDS[0]] = load_appends + + def load_setitem(self): + stack = self.stack + value = stack.pop() + key = stack.pop() + dict = stack[-1] + dict[key] = value + dispatch[SETITEM[0]] = load_setitem + + def load_setitems(self): + items = self.pop_mark() + dict = self.stack[-1] + for i in range(0, len(items), 2): + dict[items[i]] = items[i + 1] + dispatch[SETITEMS[0]] = load_setitems + + def load_additems(self): + items = self.pop_mark() + set_obj = self.stack[-1] + if isinstance(set_obj, set): + set_obj.update(items) + else: + add = set_obj.add + for item in items: + add(item) + dispatch[ADDITEMS[0]] = load_additems + + def load_build(self): + stack = self.stack + state = stack.pop() + inst = stack[-1] + setstate = getattr(inst, "__setstate__", _NoValue) + if setstate is not _NoValue: + setstate(state) + return + slotstate = None + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if state: + inst_dict = inst.__dict__ + intern = sys.intern + for k, v in state.items(): + if type(k) is str: + inst_dict[intern(k)] = v + else: + inst_dict[k] = v + if slotstate: + for k, v in slotstate.items(): + setattr(inst, k, v) + dispatch[BUILD[0]] = load_build + + def load_mark(self): + self.metastack.append(self.stack) + self.stack = [] + self.append = self.stack.append + dispatch[MARK[0]] = load_mark + + def load_stop(self): + value = self.stack.pop() + raise _Stop(value) + dispatch[STOP[0]] = load_stop + + +# Shorthands + +def _dump(obj, file, protocol=None, *, fix_imports=True, buffer_callback=None): + _Pickler(file, protocol, fix_imports=fix_imports, + buffer_callback=buffer_callback).dump(obj) + +def _dumps(obj, protocol=None, *, fix_imports=True, buffer_callback=None): + f = io.BytesIO() + _Pickler(f, protocol, fix_imports=fix_imports, + buffer_callback=buffer_callback).dump(obj) + res = f.getvalue() + assert isinstance(res, bytes_types) + return res + +def _load(file, *, fix_imports=True, encoding="ASCII", errors="strict", + buffers=None): + return _Unpickler(file, fix_imports=fix_imports, buffers=buffers, + encoding=encoding, errors=errors).load() + +def _loads(s, /, *, fix_imports=True, encoding="ASCII", errors="strict", + buffers=None): + if isinstance(s, str): + raise TypeError("Can't load pickle from unicode string") + file = io.BytesIO(s) + return _Unpickler(file, fix_imports=fix_imports, buffers=buffers, + encoding=encoding, errors=errors).load() + +# Use the faster _pickle if possible +try: + from _pickle import ( + PickleError, + PicklingError, + UnpicklingError, + Pickler, + Unpickler, + dump, + dumps, + load, + loads + ) +except ImportError: + Pickler, Unpickler = _Pickler, _Unpickler + dump, dumps, load, loads = _dump, _dumps, _load, _loads + + +def _main(args=None): + import argparse + import pprint + parser = argparse.ArgumentParser( + description='display contents of the pickle files', + color=True, + ) + parser.add_argument( + 'pickle_file', + nargs='+', help='the pickle file') + args = parser.parse_args(args) + for fn in args.pickle_file: + if fn == '-': + obj = load(sys.stdin.buffer) + else: + with open(fn, 'rb') as f: + obj = load(f) + pprint.pprint(obj) + + +if __name__ == "__main__": + _main() diff --git a/Python314_4_x64_Template/Lib/pickletools.py b/Python314_4_x64_Template/Lib/pickletools.py new file mode 100644 index 00000000..254b6c7f --- /dev/null +++ b/Python314_4_x64_Template/Lib/pickletools.py @@ -0,0 +1,2887 @@ +'''"Executable documentation" for the pickle module. + +Extensive comments about the pickle protocols and pickle-machine opcodes +can be found here. Some functions meant for external use: + +genops(pickle) + Generate all the opcodes in a pickle, as (opcode, arg, position) triples. + +dis(pickle, out=None, memo=None, indentlevel=4) + Print a symbolic disassembly of a pickle. +''' + +import codecs +import io +import pickle +import re +import sys + +__all__ = ['dis', 'genops', 'optimize'] + +bytes_types = pickle.bytes_types + +# Other ideas: +# +# - A pickle verifier: read a pickle and check it exhaustively for +# well-formedness. dis() does a lot of this already. +# +# - A protocol identifier: examine a pickle and return its protocol number +# (== the highest .proto attr value among all the opcodes in the pickle). +# dis() already prints this info at the end. +# +# - A pickle optimizer: for example, tuple-building code is sometimes more +# elaborate than necessary, catering for the possibility that the tuple +# is recursive. Or lots of times a PUT is generated that's never accessed +# by a later GET. + + +# "A pickle" is a program for a virtual pickle machine (PM, but more accurately +# called an unpickling machine). It's a sequence of opcodes, interpreted by the +# PM, building an arbitrarily complex Python object. +# +# For the most part, the PM is very simple: there are no looping, testing, or +# conditional instructions, no arithmetic and no function calls. Opcodes are +# executed once each, from first to last, until a STOP opcode is reached. +# +# The PM has two data areas, "the stack" and "the memo". +# +# Many opcodes push Python objects onto the stack; e.g., INT pushes a Python +# integer object on the stack, whose value is gotten from a decimal string +# literal immediately following the INT opcode in the pickle bytestream. Other +# opcodes take Python objects off the stack. The result of unpickling is +# whatever object is left on the stack when the final STOP opcode is executed. +# +# The memo is simply an array of objects, or it can be implemented as a dict +# mapping little integers to objects. The memo serves as the PM's "long term +# memory", and the little integers indexing the memo are akin to variable +# names. Some opcodes pop a stack object into the memo at a given index, +# and others push a memo object at a given index onto the stack again. +# +# At heart, that's all the PM has. Subtleties arise for these reasons: +# +# + Object identity. Objects can be arbitrarily complex, and subobjects +# may be shared (for example, the list [a, a] refers to the same object a +# twice). It can be vital that unpickling recreate an isomorphic object +# graph, faithfully reproducing sharing. +# +# + Recursive objects. For example, after "L = []; L.append(L)", L is a +# list, and L[0] is the same list. This is related to the object identity +# point, and some sequences of pickle opcodes are subtle in order to +# get the right result in all cases. +# +# + Things pickle doesn't know everything about. Examples of things pickle +# does know everything about are Python's builtin scalar and container +# types, like ints and tuples. They generally have opcodes dedicated to +# them. For things like module references and instances of user-defined +# classes, pickle's knowledge is limited. Historically, many enhancements +# have been made to the pickle protocol in order to do a better (faster, +# and/or more compact) job on those. +# +# + Backward compatibility and micro-optimization. As explained below, +# pickle opcodes never go away, not even when better ways to do a thing +# get invented. The repertoire of the PM just keeps growing over time. +# For example, protocol 0 had two opcodes for building Python integers (INT +# and LONG), protocol 1 added three more for more-efficient pickling of short +# integers, and protocol 2 added two more for more-efficient pickling of +# long integers (before protocol 2, the only ways to pickle a Python long +# took time quadratic in the number of digits, for both pickling and +# unpickling). "Opcode bloat" isn't so much a subtlety as a source of +# wearying complication. +# +# +# Pickle protocols: +# +# For compatibility, the meaning of a pickle opcode never changes. Instead new +# pickle opcodes get added, and each version's unpickler can handle all the +# pickle opcodes in all protocol versions to date. So old pickles continue to +# be readable forever. The pickler can generally be told to restrict itself to +# the subset of opcodes available under previous protocol versions too, so that +# users can create pickles under the current version readable by older +# versions. However, a pickle does not contain its version number embedded +# within it. If an older unpickler tries to read a pickle using a later +# protocol, the result is most likely an exception due to seeing an unknown (in +# the older unpickler) opcode. +# +# The original pickle used what's now called "protocol 0", and what was called +# "text mode" before Python 2.3. The entire pickle bytestream is made up of +# printable 7-bit ASCII characters, plus the newline character, in protocol 0. +# That's why it was called text mode. Protocol 0 is small and elegant, but +# sometimes painfully inefficient. +# +# The second major set of additions is now called "protocol 1", and was called +# "binary mode" before Python 2.3. This added many opcodes with arguments +# consisting of arbitrary bytes, including NUL bytes and unprintable "high bit" +# bytes. Binary mode pickles can be substantially smaller than equivalent +# text mode pickles, and sometimes faster too; e.g., BININT represents a 4-byte +# int as 4 bytes following the opcode, which is cheaper to unpickle than the +# (perhaps) 11-character decimal string attached to INT. Protocol 1 also added +# a number of opcodes that operate on many stack elements at once (like APPENDS +# and SETITEMS), and "shortcut" opcodes (like EMPTY_DICT and EMPTY_TUPLE). +# +# The third major set of additions came in Python 2.3, and is called "protocol +# 2". This added: +# +# - A better way to pickle instances of new-style classes (NEWOBJ). +# +# - A way for a pickle to identify its protocol (PROTO). +# +# - Time- and space- efficient pickling of long ints (LONG{1,4}). +# +# - Shortcuts for small tuples (TUPLE{1,2,3}}. +# +# - Dedicated opcodes for bools (NEWTRUE, NEWFALSE). +# +# - The "extension registry", a vector of popular objects that can be pushed +# efficiently by index (EXT{1,2,4}). This is akin to the memo and GET, but +# the registry contents are predefined (there's nothing akin to the memo's +# PUT). +# +# Another independent change with Python 2.3 is the abandonment of any +# pretense that it might be safe to load pickles received from untrusted +# parties -- no sufficient security analysis has been done to guarantee +# this and there isn't a use case that warrants the expense of such an +# analysis. +# +# To this end, all tests for __safe_for_unpickling__ or for +# copyreg.safe_constructors are removed from the unpickling code. +# References to these variables in the descriptions below are to be seen +# as describing unpickling in Python 2.2 and before. + + +# Meta-rule: Descriptions are stored in instances of descriptor objects, +# with plain constructors. No meta-language is defined from which +# descriptors could be constructed. If you want, e.g., XML, write a little +# program to generate XML from the objects. + +############################################################################## +# Some pickle opcodes have an argument, following the opcode in the +# bytestream. An argument is of a specific type, described by an instance +# of ArgumentDescriptor. These are not to be confused with arguments taken +# off the stack -- ArgumentDescriptor applies only to arguments embedded in +# the opcode stream, immediately following an opcode. + +# Represents the number of bytes consumed by an argument delimited by the +# next newline character. +UP_TO_NEWLINE = -1 + +# Represents the number of bytes consumed by a two-argument opcode where +# the first argument gives the number of bytes in the second argument. +TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int +TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int +TAKEN_FROM_ARGUMENT4U = -4 # num bytes is 4-byte unsigned little-endian int +TAKEN_FROM_ARGUMENT8U = -5 # num bytes is 8-byte unsigned little-endian int + +class ArgumentDescriptor(object): + __slots__ = ( + # name of descriptor record, also a module global name; a string + 'name', + + # length of argument, in bytes; an int; UP_TO_NEWLINE and + # TAKEN_FROM_ARGUMENT{1,4,8} are negative values for variable-length + # cases + 'n', + + # a function taking a file-like object, reading this kind of argument + # from the object at the current position, advancing the current + # position by n bytes, and returning the value of the argument + 'reader', + + # human-readable docs for this arg descriptor; a string + 'doc', + ) + + def __init__(self, name, n, reader, doc): + assert isinstance(name, str) + self.name = name + + assert isinstance(n, int) and (n >= 0 or + n in (UP_TO_NEWLINE, + TAKEN_FROM_ARGUMENT1, + TAKEN_FROM_ARGUMENT4, + TAKEN_FROM_ARGUMENT4U, + TAKEN_FROM_ARGUMENT8U)) + self.n = n + + self.reader = reader + + assert isinstance(doc, str) + self.doc = doc + +from struct import unpack as _unpack + +def read_uint1(f): + r""" + >>> import io + >>> read_uint1(io.BytesIO(b'\xff')) + 255 + """ + + data = f.read(1) + if data: + return data[0] + raise ValueError("not enough data in stream to read uint1") + +uint1 = ArgumentDescriptor( + name='uint1', + n=1, + reader=read_uint1, + doc="One-byte unsigned integer.") + + +def read_uint2(f): + r""" + >>> import io + >>> read_uint2(io.BytesIO(b'\xff\x00')) + 255 + >>> read_uint2(io.BytesIO(b'\xff\xff')) + 65535 + """ + + data = f.read(2) + if len(data) == 2: + return _unpack(">> import io + >>> read_int4(io.BytesIO(b'\xff\x00\x00\x00')) + 255 + >>> read_int4(io.BytesIO(b'\x00\x00\x00\x80')) == -(2**31) + True + """ + + data = f.read(4) + if len(data) == 4: + return _unpack(">> import io + >>> read_uint4(io.BytesIO(b'\xff\x00\x00\x00')) + 255 + >>> read_uint4(io.BytesIO(b'\x00\x00\x00\x80')) == 2**31 + True + """ + + data = f.read(4) + if len(data) == 4: + return _unpack(">> import io + >>> read_uint8(io.BytesIO(b'\xff\x00\x00\x00\x00\x00\x00\x00')) + 255 + >>> read_uint8(io.BytesIO(b'\xff' * 8)) == 2**64-1 + True + """ + + data = f.read(8) + if len(data) == 8: + return _unpack(">> import io + >>> read_stringnl(io.BytesIO(b"'abcd'\nefg\n")) + 'abcd' + + >>> read_stringnl(io.BytesIO(b"\n")) + Traceback (most recent call last): + ... + ValueError: no string quotes around b'' + + >>> read_stringnl(io.BytesIO(b"\n"), stripquotes=False) + '' + + >>> read_stringnl(io.BytesIO(b"''\n")) + '' + + >>> read_stringnl(io.BytesIO(b'"abcd"')) + Traceback (most recent call last): + ... + ValueError: no newline found when trying to read stringnl + + Embedded escapes are undone in the result. + >>> read_stringnl(io.BytesIO(br"'a\n\\b\x00c\td'" + b"\n'e'")) + 'a\n\\b\x00c\td' + """ + + data = f.readline() + if not data.endswith(b'\n'): + raise ValueError("no newline found when trying to read stringnl") + data = data[:-1] # lose the newline + + if stripquotes: + for q in (b'"', b"'"): + if data.startswith(q): + if not data.endswith(q): + raise ValueError("string quote %r not found at both " + "ends of %r" % (q, data)) + data = data[1:-1] + break + else: + raise ValueError("no string quotes around %r" % data) + + if decode: + data = codecs.escape_decode(data)[0].decode(encoding) + return data + +stringnl = ArgumentDescriptor( + name='stringnl', + n=UP_TO_NEWLINE, + reader=read_stringnl, + doc="""A newline-terminated string. + + This is a repr-style string, with embedded escapes, and + bracketing quotes. + """) + +def read_stringnl_noescape(f): + return read_stringnl(f, stripquotes=False, encoding='utf-8') + +stringnl_noescape = ArgumentDescriptor( + name='stringnl_noescape', + n=UP_TO_NEWLINE, + reader=read_stringnl_noescape, + doc="""A newline-terminated string. + + This is a str-style string, without embedded escapes, + or bracketing quotes. It should consist solely of + printable ASCII characters. + """) + +def read_stringnl_noescape_pair(f): + r""" + >>> import io + >>> read_stringnl_noescape_pair(io.BytesIO(b"Queue\nEmpty\njunk")) + 'Queue Empty' + """ + + return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f)) + +stringnl_noescape_pair = ArgumentDescriptor( + name='stringnl_noescape_pair', + n=UP_TO_NEWLINE, + reader=read_stringnl_noescape_pair, + doc="""A pair of newline-terminated strings. + + These are str-style strings, without embedded + escapes, or bracketing quotes. They should + consist solely of printable ASCII characters. + The pair is returned as a single string, with + a single blank separating the two strings. + """) + + +def read_string1(f): + r""" + >>> import io + >>> read_string1(io.BytesIO(b"\x00")) + '' + >>> read_string1(io.BytesIO(b"\x03abcdef")) + 'abc' + """ + + n = read_uint1(f) + assert n >= 0 + data = f.read(n) + if len(data) == n: + return data.decode("latin-1") + raise ValueError("expected %d bytes in a string1, but only %d remain" % + (n, len(data))) + +string1 = ArgumentDescriptor( + name="string1", + n=TAKEN_FROM_ARGUMENT1, + reader=read_string1, + doc="""A counted string. + + The first argument is a 1-byte unsigned int giving the number + of bytes in the string, and the second argument is that many + bytes. + """) + + +def read_string4(f): + r""" + >>> import io + >>> read_string4(io.BytesIO(b"\x00\x00\x00\x00abc")) + '' + >>> read_string4(io.BytesIO(b"\x03\x00\x00\x00abcdef")) + 'abc' + >>> read_string4(io.BytesIO(b"\x00\x00\x00\x03abcdef")) + Traceback (most recent call last): + ... + ValueError: expected 50331648 bytes in a string4, but only 6 remain + """ + + n = read_int4(f) + if n < 0: + raise ValueError("string4 byte count < 0: %d" % n) + data = f.read(n) + if len(data) == n: + return data.decode("latin-1") + raise ValueError("expected %d bytes in a string4, but only %d remain" % + (n, len(data))) + +string4 = ArgumentDescriptor( + name="string4", + n=TAKEN_FROM_ARGUMENT4, + reader=read_string4, + doc="""A counted string. + + The first argument is a 4-byte little-endian signed int giving + the number of bytes in the string, and the second argument is + that many bytes. + """) + + +def read_bytes1(f): + r""" + >>> import io + >>> read_bytes1(io.BytesIO(b"\x00")) + b'' + >>> read_bytes1(io.BytesIO(b"\x03abcdef")) + b'abc' + """ + + n = read_uint1(f) + assert n >= 0 + data = f.read(n) + if len(data) == n: + return data + raise ValueError("expected %d bytes in a bytes1, but only %d remain" % + (n, len(data))) + +bytes1 = ArgumentDescriptor( + name="bytes1", + n=TAKEN_FROM_ARGUMENT1, + reader=read_bytes1, + doc="""A counted bytes string. + + The first argument is a 1-byte unsigned int giving the number + of bytes, and the second argument is that many bytes. + """) + + +def read_bytes4(f): + r""" + >>> import io + >>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x00abc")) + b'' + >>> read_bytes4(io.BytesIO(b"\x03\x00\x00\x00abcdef")) + b'abc' + >>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x03abcdef")) + Traceback (most recent call last): + ... + ValueError: expected 50331648 bytes in a bytes4, but only 6 remain + """ + + n = read_uint4(f) + assert n >= 0 + if n > sys.maxsize: + raise ValueError("bytes4 byte count > sys.maxsize: %d" % n) + data = f.read(n) + if len(data) == n: + return data + raise ValueError("expected %d bytes in a bytes4, but only %d remain" % + (n, len(data))) + +bytes4 = ArgumentDescriptor( + name="bytes4", + n=TAKEN_FROM_ARGUMENT4U, + reader=read_bytes4, + doc="""A counted bytes string. + + The first argument is a 4-byte little-endian unsigned int giving + the number of bytes, and the second argument is that many bytes. + """) + + +def read_bytes8(f): + r""" + >>> import io, struct, sys + >>> read_bytes8(io.BytesIO(b"\x00\x00\x00\x00\x00\x00\x00\x00abc")) + b'' + >>> read_bytes8(io.BytesIO(b"\x03\x00\x00\x00\x00\x00\x00\x00abcdef")) + b'abc' + >>> bigsize8 = struct.pack(">> read_bytes8(io.BytesIO(bigsize8 + b"abcdef")) #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: expected ... bytes in a bytes8, but only 6 remain + """ + + n = read_uint8(f) + assert n >= 0 + if n > sys.maxsize: + raise ValueError("bytes8 byte count > sys.maxsize: %d" % n) + data = f.read(n) + if len(data) == n: + return data + raise ValueError("expected %d bytes in a bytes8, but only %d remain" % + (n, len(data))) + +bytes8 = ArgumentDescriptor( + name="bytes8", + n=TAKEN_FROM_ARGUMENT8U, + reader=read_bytes8, + doc="""A counted bytes string. + + The first argument is an 8-byte little-endian unsigned int giving + the number of bytes, and the second argument is that many bytes. + """) + + +def read_bytearray8(f): + r""" + >>> import io, struct, sys + >>> read_bytearray8(io.BytesIO(b"\x00\x00\x00\x00\x00\x00\x00\x00abc")) + bytearray(b'') + >>> read_bytearray8(io.BytesIO(b"\x03\x00\x00\x00\x00\x00\x00\x00abcdef")) + bytearray(b'abc') + >>> bigsize8 = struct.pack(">> read_bytearray8(io.BytesIO(bigsize8 + b"abcdef")) #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: expected ... bytes in a bytearray8, but only 6 remain + """ + + n = read_uint8(f) + assert n >= 0 + if n > sys.maxsize: + raise ValueError("bytearray8 byte count > sys.maxsize: %d" % n) + data = f.read(n) + if len(data) == n: + return bytearray(data) + raise ValueError("expected %d bytes in a bytearray8, but only %d remain" % + (n, len(data))) + +bytearray8 = ArgumentDescriptor( + name="bytearray8", + n=TAKEN_FROM_ARGUMENT8U, + reader=read_bytearray8, + doc="""A counted bytearray. + + The first argument is an 8-byte little-endian unsigned int giving + the number of bytes, and the second argument is that many bytes. + """) + +def read_unicodestringnl(f): + r""" + >>> import io + >>> read_unicodestringnl(io.BytesIO(b"abc\\uabcd\njunk")) == 'abc\uabcd' + True + """ + + data = f.readline() + if not data.endswith(b'\n'): + raise ValueError("no newline found when trying to read " + "unicodestringnl") + data = data[:-1] # lose the newline + return str(data, 'raw-unicode-escape') + +unicodestringnl = ArgumentDescriptor( + name='unicodestringnl', + n=UP_TO_NEWLINE, + reader=read_unicodestringnl, + doc="""A newline-terminated Unicode string. + + This is raw-unicode-escape encoded, so consists of + printable ASCII characters, and may contain embedded + escape sequences. + """) + + +def read_unicodestring1(f): + r""" + >>> import io + >>> s = 'abcd\uabcd' + >>> enc = s.encode('utf-8') + >>> enc + b'abcd\xea\xaf\x8d' + >>> n = bytes([len(enc)]) # little-endian 1-byte length + >>> t = read_unicodestring1(io.BytesIO(n + enc + b'junk')) + >>> s == t + True + + >>> read_unicodestring1(io.BytesIO(n + enc[:-1])) + Traceback (most recent call last): + ... + ValueError: expected 7 bytes in a unicodestring1, but only 6 remain + """ + + n = read_uint1(f) + assert n >= 0 + data = f.read(n) + if len(data) == n: + return str(data, 'utf-8', 'surrogatepass') + raise ValueError("expected %d bytes in a unicodestring1, but only %d " + "remain" % (n, len(data))) + +unicodestring1 = ArgumentDescriptor( + name="unicodestring1", + n=TAKEN_FROM_ARGUMENT1, + reader=read_unicodestring1, + doc="""A counted Unicode string. + + The first argument is a 1-byte little-endian signed int + giving the number of bytes in the string, and the second + argument-- the UTF-8 encoding of the Unicode string -- + contains that many bytes. + """) + + +def read_unicodestring4(f): + r""" + >>> import io + >>> s = 'abcd\uabcd' + >>> enc = s.encode('utf-8') + >>> enc + b'abcd\xea\xaf\x8d' + >>> n = bytes([len(enc), 0, 0, 0]) # little-endian 4-byte length + >>> t = read_unicodestring4(io.BytesIO(n + enc + b'junk')) + >>> s == t + True + + >>> read_unicodestring4(io.BytesIO(n + enc[:-1])) + Traceback (most recent call last): + ... + ValueError: expected 7 bytes in a unicodestring4, but only 6 remain + """ + + n = read_uint4(f) + assert n >= 0 + if n > sys.maxsize: + raise ValueError("unicodestring4 byte count > sys.maxsize: %d" % n) + data = f.read(n) + if len(data) == n: + return str(data, 'utf-8', 'surrogatepass') + raise ValueError("expected %d bytes in a unicodestring4, but only %d " + "remain" % (n, len(data))) + +unicodestring4 = ArgumentDescriptor( + name="unicodestring4", + n=TAKEN_FROM_ARGUMENT4U, + reader=read_unicodestring4, + doc="""A counted Unicode string. + + The first argument is a 4-byte little-endian signed int + giving the number of bytes in the string, and the second + argument-- the UTF-8 encoding of the Unicode string -- + contains that many bytes. + """) + + +def read_unicodestring8(f): + r""" + >>> import io + >>> s = 'abcd\uabcd' + >>> enc = s.encode('utf-8') + >>> enc + b'abcd\xea\xaf\x8d' + >>> n = bytes([len(enc)]) + b'\0' * 7 # little-endian 8-byte length + >>> t = read_unicodestring8(io.BytesIO(n + enc + b'junk')) + >>> s == t + True + + >>> read_unicodestring8(io.BytesIO(n + enc[:-1])) + Traceback (most recent call last): + ... + ValueError: expected 7 bytes in a unicodestring8, but only 6 remain + """ + + n = read_uint8(f) + assert n >= 0 + if n > sys.maxsize: + raise ValueError("unicodestring8 byte count > sys.maxsize: %d" % n) + data = f.read(n) + if len(data) == n: + return str(data, 'utf-8', 'surrogatepass') + raise ValueError("expected %d bytes in a unicodestring8, but only %d " + "remain" % (n, len(data))) + +unicodestring8 = ArgumentDescriptor( + name="unicodestring8", + n=TAKEN_FROM_ARGUMENT8U, + reader=read_unicodestring8, + doc="""A counted Unicode string. + + The first argument is an 8-byte little-endian signed int + giving the number of bytes in the string, and the second + argument-- the UTF-8 encoding of the Unicode string -- + contains that many bytes. + """) + + +def read_decimalnl_short(f): + r""" + >>> import io + >>> read_decimalnl_short(io.BytesIO(b"1234\n56")) + 1234 + + >>> read_decimalnl_short(io.BytesIO(b"1234L\n56")) + Traceback (most recent call last): + ... + ValueError: invalid literal for int() with base 10: b'1234L' + """ + + s = read_stringnl(f, decode=False, stripquotes=False) + + # There's a hack for True and False here. + if s == b"00": + return False + elif s == b"01": + return True + + return int(s) + +def read_decimalnl_long(f): + r""" + >>> import io + + >>> read_decimalnl_long(io.BytesIO(b"1234L\n56")) + 1234 + + >>> read_decimalnl_long(io.BytesIO(b"123456789012345678901234L\n6")) + 123456789012345678901234 + """ + + s = read_stringnl(f, decode=False, stripquotes=False) + if s[-1:] == b'L': + s = s[:-1] + return int(s) + + +decimalnl_short = ArgumentDescriptor( + name='decimalnl_short', + n=UP_TO_NEWLINE, + reader=read_decimalnl_short, + doc="""A newline-terminated decimal integer literal. + + This never has a trailing 'L', and the integer fit + in a short Python int on the box where the pickle + was written -- but there's no guarantee it will fit + in a short Python int on the box where the pickle + is read. + """) + +decimalnl_long = ArgumentDescriptor( + name='decimalnl_long', + n=UP_TO_NEWLINE, + reader=read_decimalnl_long, + doc="""A newline-terminated decimal integer literal. + + This has a trailing 'L', and can represent integers + of any size. + """) + + +def read_floatnl(f): + r""" + >>> import io + >>> read_floatnl(io.BytesIO(b"-1.25\n6")) + -1.25 + """ + s = read_stringnl(f, decode=False, stripquotes=False) + return float(s) + +floatnl = ArgumentDescriptor( + name='floatnl', + n=UP_TO_NEWLINE, + reader=read_floatnl, + doc="""A newline-terminated decimal floating literal. + + In general this requires 17 significant digits for roundtrip + identity, and pickling then unpickling infinities, NaNs, and + minus zero doesn't work across boxes, or on some boxes even + on itself (e.g., Windows can't read the strings it produces + for infinities or NaNs). + """) + +def read_float8(f): + r""" + >>> import io, struct + >>> raw = struct.pack(">d", -1.25) + >>> raw + b'\xbf\xf4\x00\x00\x00\x00\x00\x00' + >>> read_float8(io.BytesIO(raw + b"\n")) + -1.25 + """ + + data = f.read(8) + if len(data) == 8: + return _unpack(">d", data)[0] + raise ValueError("not enough data in stream to read float8") + + +float8 = ArgumentDescriptor( + name='float8', + n=8, + reader=read_float8, + doc="""An 8-byte binary representation of a float, big-endian. + + The format is unique to Python, and shared with the struct + module (format string '>d') "in theory" (the struct and pickle + implementations don't share the code -- they should). It's + strongly related to the IEEE-754 double format, and, in normal + cases, is in fact identical to the big-endian 754 double format. + On other boxes the dynamic range is limited to that of a 754 + double, and "add a half and chop" rounding is used to reduce + the precision to 53 bits. However, even on a 754 box, + infinities, NaNs, and minus zero may not be handled correctly + (may not survive roundtrip pickling intact). + """) + +# Protocol 2 formats + +from pickle import decode_long + +def read_long1(f): + r""" + >>> import io + >>> read_long1(io.BytesIO(b"\x00")) + 0 + >>> read_long1(io.BytesIO(b"\x02\xff\x00")) + 255 + >>> read_long1(io.BytesIO(b"\x02\xff\x7f")) + 32767 + >>> read_long1(io.BytesIO(b"\x02\x00\xff")) + -256 + >>> read_long1(io.BytesIO(b"\x02\x00\x80")) + -32768 + """ + + n = read_uint1(f) + data = f.read(n) + if len(data) != n: + raise ValueError("not enough data in stream to read long1") + return decode_long(data) + +long1 = ArgumentDescriptor( + name="long1", + n=TAKEN_FROM_ARGUMENT1, + reader=read_long1, + doc="""A binary long, little-endian, using 1-byte size. + + This first reads one byte as an unsigned size, then reads that + many bytes and interprets them as a little-endian 2's-complement long. + If the size is 0, that's taken as a shortcut for the long 0L. + """) + +def read_long4(f): + r""" + >>> import io + >>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x00")) + 255 + >>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x7f")) + 32767 + >>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\xff")) + -256 + >>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\x80")) + -32768 + >>> read_long1(io.BytesIO(b"\x00\x00\x00\x00")) + 0 + """ + + n = read_int4(f) + if n < 0: + raise ValueError("long4 byte count < 0: %d" % n) + data = f.read(n) + if len(data) != n: + raise ValueError("not enough data in stream to read long4") + return decode_long(data) + +long4 = ArgumentDescriptor( + name="long4", + n=TAKEN_FROM_ARGUMENT4, + reader=read_long4, + doc="""A binary representation of a long, little-endian. + + This first reads four bytes as a signed size (but requires the + size to be >= 0), then reads that many bytes and interprets them + as a little-endian 2's-complement long. If the size is 0, that's taken + as a shortcut for the int 0, although LONG1 should really be used + then instead (and in any case where # of bytes < 256). + """) + + +############################################################################## +# Object descriptors. The stack used by the pickle machine holds objects, +# and in the stack_before and stack_after attributes of OpcodeInfo +# descriptors we need names to describe the various types of objects that can +# appear on the stack. + +class StackObject(object): + __slots__ = ( + # name of descriptor record, for info only + 'name', + + # type of object, or tuple of type objects (meaning the object can + # be of any type in the tuple) + 'obtype', + + # human-readable docs for this kind of stack object; a string + 'doc', + ) + + def __init__(self, name, obtype, doc): + assert isinstance(name, str) + self.name = name + + assert isinstance(obtype, type) or isinstance(obtype, tuple) + if isinstance(obtype, tuple): + for contained in obtype: + assert isinstance(contained, type) + self.obtype = obtype + + assert isinstance(doc, str) + self.doc = doc + + def __repr__(self): + return self.name + + +pyint = pylong = StackObject( + name='int', + obtype=int, + doc="A Python integer object.") + +pyinteger_or_bool = StackObject( + name='int_or_bool', + obtype=(int, bool), + doc="A Python integer or boolean object.") + +pybool = StackObject( + name='bool', + obtype=bool, + doc="A Python boolean object.") + +pyfloat = StackObject( + name='float', + obtype=float, + doc="A Python float object.") + +pybytes_or_str = pystring = StackObject( + name='bytes_or_str', + obtype=(bytes, str), + doc="A Python bytes or (Unicode) string object.") + +pybytes = StackObject( + name='bytes', + obtype=bytes, + doc="A Python bytes object.") + +pybytearray = StackObject( + name='bytearray', + obtype=bytearray, + doc="A Python bytearray object.") + +pyunicode = StackObject( + name='str', + obtype=str, + doc="A Python (Unicode) string object.") + +pynone = StackObject( + name="None", + obtype=type(None), + doc="The Python None object.") + +pytuple = StackObject( + name="tuple", + obtype=tuple, + doc="A Python tuple object.") + +pylist = StackObject( + name="list", + obtype=list, + doc="A Python list object.") + +pydict = StackObject( + name="dict", + obtype=dict, + doc="A Python dict object.") + +pyset = StackObject( + name="set", + obtype=set, + doc="A Python set object.") + +pyfrozenset = StackObject( + name="frozenset", + obtype=set, + doc="A Python frozenset object.") + +pybuffer = StackObject( + name='buffer', + obtype=object, + doc="A Python buffer-like object.") + +anyobject = StackObject( + name='any', + obtype=object, + doc="Any kind of object whatsoever.") + +markobject = StackObject( + name="mark", + obtype=StackObject, + doc="""'The mark' is a unique object. + +Opcodes that operate on a variable number of objects +generally don't embed the count of objects in the opcode, +or pull it off the stack. Instead the MARK opcode is used +to push a special marker object on the stack, and then +some other opcodes grab all the objects from the top of +the stack down to (but not including) the topmost marker +object. +""") + +stackslice = StackObject( + name="stackslice", + obtype=StackObject, + doc="""An object representing a contiguous slice of the stack. + +This is used in conjunction with markobject, to represent all +of the stack following the topmost markobject. For example, +the POP_MARK opcode changes the stack from + + [..., markobject, stackslice] +to + [...] + +No matter how many object are on the stack after the topmost +markobject, POP_MARK gets rid of all of them (including the +topmost markobject too). +""") + +############################################################################## +# Descriptors for pickle opcodes. + +class OpcodeInfo(object): + + __slots__ = ( + # symbolic name of opcode; a string + 'name', + + # the code used in a bytestream to represent the opcode; a + # one-character string + 'code', + + # If the opcode has an argument embedded in the byte string, an + # instance of ArgumentDescriptor specifying its type. Note that + # arg.reader(s) can be used to read and decode the argument from + # the bytestream s, and arg.doc documents the format of the raw + # argument bytes. If the opcode doesn't have an argument embedded + # in the bytestream, arg should be None. + 'arg', + + # what the stack looks like before this opcode runs; a list + 'stack_before', + + # what the stack looks like after this opcode runs; a list + 'stack_after', + + # the protocol number in which this opcode was introduced; an int + 'proto', + + # human-readable docs for this opcode; a string + 'doc', + ) + + def __init__(self, name, code, arg, + stack_before, stack_after, proto, doc): + assert isinstance(name, str) + self.name = name + + assert isinstance(code, str) + assert len(code) == 1 + self.code = code + + assert arg is None or isinstance(arg, ArgumentDescriptor) + self.arg = arg + + assert isinstance(stack_before, list) + for x in stack_before: + assert isinstance(x, StackObject) + self.stack_before = stack_before + + assert isinstance(stack_after, list) + for x in stack_after: + assert isinstance(x, StackObject) + self.stack_after = stack_after + + assert isinstance(proto, int) and 0 <= proto <= pickle.HIGHEST_PROTOCOL + self.proto = proto + + assert isinstance(doc, str) + self.doc = doc + +I = OpcodeInfo +opcodes = [ + + # Ways to spell integers. + + I(name='INT', + code='I', + arg=decimalnl_short, + stack_before=[], + stack_after=[pyinteger_or_bool], + proto=0, + doc="""Push an integer or bool. + + The argument is a newline-terminated decimal literal string. + + The intent may have been that this always fit in a short Python int, + but INT can be generated in pickles written on a 64-bit box that + require a Python long on a 32-bit box. The difference between this + and LONG then is that INT skips a trailing 'L', and produces a short + int whenever possible. + + Another difference is due to that, when bool was introduced as a + distinct type in 2.3, builtin names True and False were also added to + 2.2.2, mapping to ints 1 and 0. For compatibility in both directions, + True gets pickled as INT + "I01\\n", and False as INT + "I00\\n". + Leading zeroes are never produced for a genuine integer. The 2.3 + (and later) unpicklers special-case these and return bool instead; + earlier unpicklers ignore the leading "0" and return the int. + """), + + I(name='BININT', + code='J', + arg=int4, + stack_before=[], + stack_after=[pyint], + proto=1, + doc="""Push a four-byte signed integer. + + This handles the full range of Python (short) integers on a 32-bit + box, directly as binary bytes (1 for the opcode and 4 for the integer). + If the integer is non-negative and fits in 1 or 2 bytes, pickling via + BININT1 or BININT2 saves space. + """), + + I(name='BININT1', + code='K', + arg=uint1, + stack_before=[], + stack_after=[pyint], + proto=1, + doc="""Push a one-byte unsigned integer. + + This is a space optimization for pickling very small non-negative ints, + in range(256). + """), + + I(name='BININT2', + code='M', + arg=uint2, + stack_before=[], + stack_after=[pyint], + proto=1, + doc="""Push a two-byte unsigned integer. + + This is a space optimization for pickling small positive ints, in + range(256, 2**16). Integers in range(256) can also be pickled via + BININT2, but BININT1 instead saves a byte. + """), + + I(name='LONG', + code='L', + arg=decimalnl_long, + stack_before=[], + stack_after=[pyint], + proto=0, + doc="""Push a long integer. + + The same as INT, except that the literal ends with 'L', and always + unpickles to a Python long. There doesn't seem a real purpose to the + trailing 'L'. + + Note that LONG takes time quadratic in the number of digits when + unpickling (this is simply due to the nature of decimal->binary + conversion). Proto 2 added linear-time (in C; still quadratic-time + in Python) LONG1 and LONG4 opcodes. + """), + + I(name="LONG1", + code='\x8a', + arg=long1, + stack_before=[], + stack_after=[pyint], + proto=2, + doc="""Long integer using one-byte length. + + A more efficient encoding of a Python long; the long1 encoding + says it all."""), + + I(name="LONG4", + code='\x8b', + arg=long4, + stack_before=[], + stack_after=[pyint], + proto=2, + doc="""Long integer using four-byte length. + + A more efficient encoding of a Python long; the long4 encoding + says it all."""), + + # Ways to spell strings (8-bit, not Unicode). + + I(name='STRING', + code='S', + arg=stringnl, + stack_before=[], + stack_after=[pybytes_or_str], + proto=0, + doc="""Push a Python string object. + + The argument is a repr-style string, with bracketing quote characters, + and perhaps embedded escapes. The argument extends until the next + newline character. These are usually decoded into a str instance + using the encoding given to the Unpickler constructor. or the default, + 'ASCII'. If the encoding given was 'bytes' however, they will be + decoded as bytes object instead. + """), + + I(name='BINSTRING', + code='T', + arg=string4, + stack_before=[], + stack_after=[pybytes_or_str], + proto=1, + doc="""Push a Python string object. + + There are two arguments: the first is a 4-byte little-endian + signed int giving the number of bytes in the string, and the + second is that many bytes, which are taken literally as the string + content. These are usually decoded into a str instance using the + encoding given to the Unpickler constructor. or the default, + 'ASCII'. If the encoding given was 'bytes' however, they will be + decoded as bytes object instead. + """), + + I(name='SHORT_BINSTRING', + code='U', + arg=string1, + stack_before=[], + stack_after=[pybytes_or_str], + proto=1, + doc="""Push a Python string object. + + There are two arguments: the first is a 1-byte unsigned int giving + the number of bytes in the string, and the second is that many + bytes, which are taken literally as the string content. These are + usually decoded into a str instance using the encoding given to + the Unpickler constructor. or the default, 'ASCII'. If the + encoding given was 'bytes' however, they will be decoded as bytes + object instead. + """), + + # Bytes (protocol 3 and higher) + + I(name='BINBYTES', + code='B', + arg=bytes4, + stack_before=[], + stack_after=[pybytes], + proto=3, + doc="""Push a Python bytes object. + + There are two arguments: the first is a 4-byte little-endian unsigned int + giving the number of bytes, and the second is that many bytes, which are + taken literally as the bytes content. + """), + + I(name='SHORT_BINBYTES', + code='C', + arg=bytes1, + stack_before=[], + stack_after=[pybytes], + proto=3, + doc="""Push a Python bytes object. + + There are two arguments: the first is a 1-byte unsigned int giving + the number of bytes, and the second is that many bytes, which are taken + literally as the string content. + """), + + I(name='BINBYTES8', + code='\x8e', + arg=bytes8, + stack_before=[], + stack_after=[pybytes], + proto=4, + doc="""Push a Python bytes object. + + There are two arguments: the first is an 8-byte unsigned int giving + the number of bytes in the string, and the second is that many bytes, + which are taken literally as the string content. + """), + + # Bytearray (protocol 5 and higher) + + I(name='BYTEARRAY8', + code='\x96', + arg=bytearray8, + stack_before=[], + stack_after=[pybytearray], + proto=5, + doc="""Push a Python bytearray object. + + There are two arguments: the first is an 8-byte unsigned int giving + the number of bytes in the bytearray, and the second is that many bytes, + which are taken literally as the bytearray content. + """), + + # Out-of-band buffer (protocol 5 and higher) + + I(name='NEXT_BUFFER', + code='\x97', + arg=None, + stack_before=[], + stack_after=[pybuffer], + proto=5, + doc="Push an out-of-band buffer object."), + + I(name='READONLY_BUFFER', + code='\x98', + arg=None, + stack_before=[pybuffer], + stack_after=[pybuffer], + proto=5, + doc="Make an out-of-band buffer object read-only."), + + # Ways to spell None. + + I(name='NONE', + code='N', + arg=None, + stack_before=[], + stack_after=[pynone], + proto=0, + doc="Push None on the stack."), + + # Ways to spell bools, starting with proto 2. See INT for how this was + # done before proto 2. + + I(name='NEWTRUE', + code='\x88', + arg=None, + stack_before=[], + stack_after=[pybool], + proto=2, + doc="Push True onto the stack."), + + I(name='NEWFALSE', + code='\x89', + arg=None, + stack_before=[], + stack_after=[pybool], + proto=2, + doc="Push False onto the stack."), + + # Ways to spell Unicode strings. + + I(name='UNICODE', + code='V', + arg=unicodestringnl, + stack_before=[], + stack_after=[pyunicode], + proto=0, # this may be pure-text, but it's a later addition + doc="""Push a Python Unicode string object. + + The argument is a raw-unicode-escape encoding of a Unicode string, + and so may contain embedded escape sequences. The argument extends + until the next newline character. + """), + + I(name='SHORT_BINUNICODE', + code='\x8c', + arg=unicodestring1, + stack_before=[], + stack_after=[pyunicode], + proto=4, + doc="""Push a Python Unicode string object. + + There are two arguments: the first is a 1-byte little-endian signed int + giving the number of bytes in the string. The second is that many + bytes, and is the UTF-8 encoding of the Unicode string. + """), + + I(name='BINUNICODE', + code='X', + arg=unicodestring4, + stack_before=[], + stack_after=[pyunicode], + proto=1, + doc="""Push a Python Unicode string object. + + There are two arguments: the first is a 4-byte little-endian unsigned int + giving the number of bytes in the string. The second is that many + bytes, and is the UTF-8 encoding of the Unicode string. + """), + + I(name='BINUNICODE8', + code='\x8d', + arg=unicodestring8, + stack_before=[], + stack_after=[pyunicode], + proto=4, + doc="""Push a Python Unicode string object. + + There are two arguments: the first is an 8-byte little-endian signed int + giving the number of bytes in the string. The second is that many + bytes, and is the UTF-8 encoding of the Unicode string. + """), + + # Ways to spell floats. + + I(name='FLOAT', + code='F', + arg=floatnl, + stack_before=[], + stack_after=[pyfloat], + proto=0, + doc="""Newline-terminated decimal float literal. + + The argument is repr(a_float), and in general requires 17 significant + digits for roundtrip conversion to be an identity (this is so for + IEEE-754 double precision values, which is what Python float maps to + on most boxes). + + In general, FLOAT cannot be used to transport infinities, NaNs, or + minus zero across boxes (or even on a single box, if the platform C + library can't read the strings it produces for such things -- Windows + is like that), but may do less damage than BINFLOAT on boxes with + greater precision or dynamic range than IEEE-754 double. + """), + + I(name='BINFLOAT', + code='G', + arg=float8, + stack_before=[], + stack_after=[pyfloat], + proto=1, + doc="""Float stored in binary form, with 8 bytes of data. + + This generally requires less than half the space of FLOAT encoding. + In general, BINFLOAT cannot be used to transport infinities, NaNs, or + minus zero, raises an exception if the exponent exceeds the range of + an IEEE-754 double, and retains no more than 53 bits of precision (if + there are more than that, "add a half and chop" rounding is used to + cut it back to 53 significant bits). + """), + + # Ways to build lists. + + I(name='EMPTY_LIST', + code=']', + arg=None, + stack_before=[], + stack_after=[pylist], + proto=1, + doc="Push an empty list."), + + I(name='APPEND', + code='a', + arg=None, + stack_before=[pylist, anyobject], + stack_after=[pylist], + proto=0, + doc="""Append an object to a list. + + Stack before: ... pylist anyobject + Stack after: ... pylist+[anyobject] + + although pylist is really extended in-place. + """), + + I(name='APPENDS', + code='e', + arg=None, + stack_before=[pylist, markobject, stackslice], + stack_after=[pylist], + proto=1, + doc="""Extend a list by a slice of stack objects. + + Stack before: ... pylist markobject stackslice + Stack after: ... pylist+stackslice + + although pylist is really extended in-place. + """), + + I(name='LIST', + code='l', + arg=None, + stack_before=[markobject, stackslice], + stack_after=[pylist], + proto=0, + doc="""Build a list out of the topmost stack slice, after markobject. + + All the stack entries following the topmost markobject are placed into + a single Python list, which single list object replaces all of the + stack from the topmost markobject onward. For example, + + Stack before: ... markobject 1 2 3 'abc' + Stack after: ... [1, 2, 3, 'abc'] + """), + + # Ways to build tuples. + + I(name='EMPTY_TUPLE', + code=')', + arg=None, + stack_before=[], + stack_after=[pytuple], + proto=1, + doc="Push an empty tuple."), + + I(name='TUPLE', + code='t', + arg=None, + stack_before=[markobject, stackslice], + stack_after=[pytuple], + proto=0, + doc="""Build a tuple out of the topmost stack slice, after markobject. + + All the stack entries following the topmost markobject are placed into + a single Python tuple, which single tuple object replaces all of the + stack from the topmost markobject onward. For example, + + Stack before: ... markobject 1 2 3 'abc' + Stack after: ... (1, 2, 3, 'abc') + """), + + I(name='TUPLE1', + code='\x85', + arg=None, + stack_before=[anyobject], + stack_after=[pytuple], + proto=2, + doc="""Build a one-tuple out of the topmost item on the stack. + + This code pops one value off the stack and pushes a tuple of + length 1 whose one item is that value back onto it. In other + words: + + stack[-1] = tuple(stack[-1:]) + """), + + I(name='TUPLE2', + code='\x86', + arg=None, + stack_before=[anyobject, anyobject], + stack_after=[pytuple], + proto=2, + doc="""Build a two-tuple out of the top two items on the stack. + + This code pops two values off the stack and pushes a tuple of + length 2 whose items are those values back onto it. In other + words: + + stack[-2:] = [tuple(stack[-2:])] + """), + + I(name='TUPLE3', + code='\x87', + arg=None, + stack_before=[anyobject, anyobject, anyobject], + stack_after=[pytuple], + proto=2, + doc="""Build a three-tuple out of the top three items on the stack. + + This code pops three values off the stack and pushes a tuple of + length 3 whose items are those values back onto it. In other + words: + + stack[-3:] = [tuple(stack[-3:])] + """), + + # Ways to build dicts. + + I(name='EMPTY_DICT', + code='}', + arg=None, + stack_before=[], + stack_after=[pydict], + proto=1, + doc="Push an empty dict."), + + I(name='DICT', + code='d', + arg=None, + stack_before=[markobject, stackslice], + stack_after=[pydict], + proto=0, + doc="""Build a dict out of the topmost stack slice, after markobject. + + All the stack entries following the topmost markobject are placed into + a single Python dict, which single dict object replaces all of the + stack from the topmost markobject onward. The stack slice alternates + key, value, key, value, .... For example, + + Stack before: ... markobject 1 2 3 'abc' + Stack after: ... {1: 2, 3: 'abc'} + """), + + I(name='SETITEM', + code='s', + arg=None, + stack_before=[pydict, anyobject, anyobject], + stack_after=[pydict], + proto=0, + doc="""Add a key+value pair to an existing dict. + + Stack before: ... pydict key value + Stack after: ... pydict + + where pydict has been modified via pydict[key] = value. + """), + + I(name='SETITEMS', + code='u', + arg=None, + stack_before=[pydict, markobject, stackslice], + stack_after=[pydict], + proto=1, + doc="""Add an arbitrary number of key+value pairs to an existing dict. + + The slice of the stack following the topmost markobject is taken as + an alternating sequence of keys and values, added to the dict + immediately under the topmost markobject. Everything at and after the + topmost markobject is popped, leaving the mutated dict at the top + of the stack. + + Stack before: ... pydict markobject key_1 value_1 ... key_n value_n + Stack after: ... pydict + + where pydict has been modified via pydict[key_i] = value_i for i in + 1, 2, ..., n, and in that order. + """), + + # Ways to build sets + + I(name='EMPTY_SET', + code='\x8f', + arg=None, + stack_before=[], + stack_after=[pyset], + proto=4, + doc="Push an empty set."), + + I(name='ADDITEMS', + code='\x90', + arg=None, + stack_before=[pyset, markobject, stackslice], + stack_after=[pyset], + proto=4, + doc="""Add an arbitrary number of items to an existing set. + + The slice of the stack following the topmost markobject is taken as + a sequence of items, added to the set immediately under the topmost + markobject. Everything at and after the topmost markobject is popped, + leaving the mutated set at the top of the stack. + + Stack before: ... pyset markobject item_1 ... item_n + Stack after: ... pyset + + where pyset has been modified via pyset.add(item_i) = item_i for i in + 1, 2, ..., n, and in that order. + """), + + # Way to build frozensets + + I(name='FROZENSET', + code='\x91', + arg=None, + stack_before=[markobject, stackslice], + stack_after=[pyfrozenset], + proto=4, + doc="""Build a frozenset out of the topmost slice, after markobject. + + All the stack entries following the topmost markobject are placed into + a single Python frozenset, which single frozenset object replaces all + of the stack from the topmost markobject onward. For example, + + Stack before: ... markobject 1 2 3 + Stack after: ... frozenset({1, 2, 3}) + """), + + # Stack manipulation. + + I(name='POP', + code='0', + arg=None, + stack_before=[anyobject], + stack_after=[], + proto=0, + doc="Discard the top stack item, shrinking the stack by one item."), + + I(name='DUP', + code='2', + arg=None, + stack_before=[anyobject], + stack_after=[anyobject, anyobject], + proto=0, + doc="Push the top stack item onto the stack again, duplicating it."), + + I(name='MARK', + code='(', + arg=None, + stack_before=[], + stack_after=[markobject], + proto=0, + doc="""Push markobject onto the stack. + + markobject is a unique object, used by other opcodes to identify a + region of the stack containing a variable number of objects for them + to work on. See markobject.doc for more detail. + """), + + I(name='POP_MARK', + code='1', + arg=None, + stack_before=[markobject, stackslice], + stack_after=[], + proto=1, + doc="""Pop all the stack objects at and above the topmost markobject. + + When an opcode using a variable number of stack objects is done, + POP_MARK is used to remove those objects, and to remove the markobject + that delimited their starting position on the stack. + """), + + # Memo manipulation. There are really only two operations (get and put), + # each in all-text, "short binary", and "long binary" flavors. + + I(name='GET', + code='g', + arg=decimalnl_short, + stack_before=[], + stack_after=[anyobject], + proto=0, + doc="""Read an object from the memo and push it on the stack. + + The index of the memo object to push is given by the newline-terminated + decimal string following. BINGET and LONG_BINGET are space-optimized + versions. + """), + + I(name='BINGET', + code='h', + arg=uint1, + stack_before=[], + stack_after=[anyobject], + proto=1, + doc="""Read an object from the memo and push it on the stack. + + The index of the memo object to push is given by the 1-byte unsigned + integer following. + """), + + I(name='LONG_BINGET', + code='j', + arg=uint4, + stack_before=[], + stack_after=[anyobject], + proto=1, + doc="""Read an object from the memo and push it on the stack. + + The index of the memo object to push is given by the 4-byte unsigned + little-endian integer following. + """), + + I(name='PUT', + code='p', + arg=decimalnl_short, + stack_before=[], + stack_after=[], + proto=0, + doc="""Store the stack top into the memo. The stack is not popped. + + The index of the memo location to write into is given by the newline- + terminated decimal string following. BINPUT and LONG_BINPUT are + space-optimized versions. + """), + + I(name='BINPUT', + code='q', + arg=uint1, + stack_before=[], + stack_after=[], + proto=1, + doc="""Store the stack top into the memo. The stack is not popped. + + The index of the memo location to write into is given by the 1-byte + unsigned integer following. + """), + + I(name='LONG_BINPUT', + code='r', + arg=uint4, + stack_before=[], + stack_after=[], + proto=1, + doc="""Store the stack top into the memo. The stack is not popped. + + The index of the memo location to write into is given by the 4-byte + unsigned little-endian integer following. + """), + + I(name='MEMOIZE', + code='\x94', + arg=None, + stack_before=[anyobject], + stack_after=[anyobject], + proto=4, + doc="""Store the stack top into the memo. The stack is not popped. + + The index of the memo location to write is the number of + elements currently present in the memo. + """), + + # Access the extension registry (predefined objects). Akin to the GET + # family. + + I(name='EXT1', + code='\x82', + arg=uint1, + stack_before=[], + stack_after=[anyobject], + proto=2, + doc="""Extension code. + + This code and the similar EXT2 and EXT4 allow using a registry + of popular objects that are pickled by name, typically classes. + It is envisioned that through a global negotiation and + registration process, third parties can set up a mapping between + ints and object names. + + In order to guarantee pickle interchangeability, the extension + code registry ought to be global, although a range of codes may + be reserved for private use. + + EXT1 has a 1-byte integer argument. This is used to index into the + extension registry, and the object at that index is pushed on the stack. + """), + + I(name='EXT2', + code='\x83', + arg=uint2, + stack_before=[], + stack_after=[anyobject], + proto=2, + doc="""Extension code. + + See EXT1. EXT2 has a two-byte integer argument. + """), + + I(name='EXT4', + code='\x84', + arg=int4, + stack_before=[], + stack_after=[anyobject], + proto=2, + doc="""Extension code. + + See EXT1. EXT4 has a four-byte integer argument. + """), + + # Push a class object, or module function, on the stack, via its module + # and name. + + I(name='GLOBAL', + code='c', + arg=stringnl_noescape_pair, + stack_before=[], + stack_after=[anyobject], + proto=0, + doc="""Push a global object (module.attr) on the stack. + + Two newline-terminated strings follow the GLOBAL opcode. The first is + taken as a module name, and the second as a class name. The class + object module.class is pushed on the stack. More accurately, the + object returned by self.find_class(module, class) is pushed on the + stack, so unpickling subclasses can override this form of lookup. + """), + + I(name='STACK_GLOBAL', + code='\x93', + arg=None, + stack_before=[pyunicode, pyunicode], + stack_after=[anyobject], + proto=4, + doc="""Push a global object (module.attr) on the stack. + """), + + # Ways to build objects of classes pickle doesn't know about directly + # (user-defined classes). I despair of documenting this accurately + # and comprehensibly -- you really have to read the pickle code to + # find all the special cases. + + I(name='REDUCE', + code='R', + arg=None, + stack_before=[anyobject, anyobject], + stack_after=[anyobject], + proto=0, + doc="""Push an object built from a callable and an argument tuple. + + The opcode is named to remind of the __reduce__() method. + + Stack before: ... callable pytuple + Stack after: ... callable(*pytuple) + + The callable and the argument tuple are the first two items returned + by a __reduce__ method. Applying the callable to the argtuple is + supposed to reproduce the original object, or at least get it started. + If the __reduce__ method returns a 3-tuple, the last component is an + argument to be passed to the object's __setstate__, and then the REDUCE + opcode is followed by code to create setstate's argument, and then a + BUILD opcode to apply __setstate__ to that argument. + + If not isinstance(callable, type), REDUCE complains unless the + callable has been registered with the copyreg module's + safe_constructors dict, or the callable has a magic + '__safe_for_unpickling__' attribute with a true value. I'm not sure + why it does this, but I've sure seen this complaint often enough when + I didn't want to . + """), + + I(name='BUILD', + code='b', + arg=None, + stack_before=[anyobject, anyobject], + stack_after=[anyobject], + proto=0, + doc="""Finish building an object, via __setstate__ or dict update. + + Stack before: ... anyobject argument + Stack after: ... anyobject + + where anyobject may have been mutated, as follows: + + If the object has a __setstate__ method, + + anyobject.__setstate__(argument) + + is called. + + Else the argument must be a dict, the object must have a __dict__, and + the object is updated via + + anyobject.__dict__.update(argument) + """), + + I(name='INST', + code='i', + arg=stringnl_noescape_pair, + stack_before=[markobject, stackslice], + stack_after=[anyobject], + proto=0, + doc="""Build a class instance. + + This is the protocol 0 version of protocol 1's OBJ opcode. + INST is followed by two newline-terminated strings, giving a + module and class name, just as for the GLOBAL opcode (and see + GLOBAL for more details about that). self.find_class(module, name) + is used to get a class object. + + In addition, all the objects on the stack following the topmost + markobject are gathered into a tuple and popped (along with the + topmost markobject), just as for the TUPLE opcode. + + Now it gets complicated. If all of these are true: + + + The argtuple is empty (markobject was at the top of the stack + at the start). + + + The class object does not have a __getinitargs__ attribute. + + then we want to create an old-style class instance without invoking + its __init__() method (pickle has waffled on this over the years; not + calling __init__() is current wisdom). In this case, an instance of + an old-style dummy class is created, and then we try to rebind its + __class__ attribute to the desired class object. If this succeeds, + the new instance object is pushed on the stack, and we're done. + + Else (the argtuple is not empty, it's not an old-style class object, + or the class object does have a __getinitargs__ attribute), the code + first insists that the class object have a __safe_for_unpickling__ + attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE, + it doesn't matter whether this attribute has a true or false value, it + only matters whether it exists (XXX this is a bug). If + __safe_for_unpickling__ doesn't exist, UnpicklingError is raised. + + Else (the class object does have a __safe_for_unpickling__ attr), + the class object obtained from INST's arguments is applied to the + argtuple obtained from the stack, and the resulting instance object + is pushed on the stack. + + NOTE: checks for __safe_for_unpickling__ went away in Python 2.3. + NOTE: the distinction between old-style and new-style classes does + not make sense in Python 3. + """), + + I(name='OBJ', + code='o', + arg=None, + stack_before=[markobject, anyobject, stackslice], + stack_after=[anyobject], + proto=1, + doc="""Build a class instance. + + This is the protocol 1 version of protocol 0's INST opcode, and is + very much like it. The major difference is that the class object + is taken off the stack, allowing it to be retrieved from the memo + repeatedly if several instances of the same class are created. This + can be much more efficient (in both time and space) than repeatedly + embedding the module and class names in INST opcodes. + + Unlike INST, OBJ takes no arguments from the opcode stream. Instead + the class object is taken off the stack, immediately above the + topmost markobject: + + Stack before: ... markobject classobject stackslice + Stack after: ... new_instance_object + + As for INST, the remainder of the stack above the markobject is + gathered into an argument tuple, and then the logic seems identical, + except that no __safe_for_unpickling__ check is done (XXX this is + a bug). See INST for the gory details. + + NOTE: In Python 2.3, INST and OBJ are identical except for how they + get the class object. That was always the intent; the implementations + had diverged for accidental reasons. + """), + + I(name='NEWOBJ', + code='\x81', + arg=None, + stack_before=[anyobject, anyobject], + stack_after=[anyobject], + proto=2, + doc="""Build an object instance. + + The stack before should be thought of as containing a class + object followed by an argument tuple (the tuple being the stack + top). Call these cls and args. They are popped off the stack, + and the value returned by cls.__new__(cls, *args) is pushed back + onto the stack. + """), + + I(name='NEWOBJ_EX', + code='\x92', + arg=None, + stack_before=[anyobject, anyobject, anyobject], + stack_after=[anyobject], + proto=4, + doc="""Build an object instance. + + The stack before should be thought of as containing a class + object followed by an argument tuple and by a keyword argument dict + (the dict being the stack top). Call these cls and args. They are + popped off the stack, and the value returned by + cls.__new__(cls, *args, *kwargs) is pushed back onto the stack. + """), + + # Machine control. + + I(name='PROTO', + code='\x80', + arg=uint1, + stack_before=[], + stack_after=[], + proto=2, + doc="""Protocol version indicator. + + For protocol 2 and above, a pickle must start with this opcode. + The argument is the protocol version, an int in range(2, 256). + """), + + I(name='STOP', + code='.', + arg=None, + stack_before=[anyobject], + stack_after=[], + proto=0, + doc="""Stop the unpickling machine. + + Every pickle ends with this opcode. The object at the top of the stack + is popped, and that's the result of unpickling. The stack should be + empty then. + """), + + # Framing support. + + I(name='FRAME', + code='\x95', + arg=uint8, + stack_before=[], + stack_after=[], + proto=4, + doc="""Indicate the beginning of a new frame. + + The unpickler may use this opcode to safely prefetch data from its + underlying stream. + """), + + # Ways to deal with persistent IDs. + + I(name='PERSID', + code='P', + arg=stringnl_noescape, + stack_before=[], + stack_after=[anyobject], + proto=0, + doc="""Push an object identified by a persistent ID. + + The pickle module doesn't define what a persistent ID means. PERSID's + argument is a newline-terminated str-style (no embedded escapes, no + bracketing quote characters) string, which *is* "the persistent ID". + The unpickler passes this string to self.persistent_load(). Whatever + object that returns is pushed on the stack. There is no implementation + of persistent_load() in Python's unpickler: it must be supplied by an + unpickler subclass. + """), + + I(name='BINPERSID', + code='Q', + arg=None, + stack_before=[anyobject], + stack_after=[anyobject], + proto=1, + doc="""Push an object identified by a persistent ID. + + Like PERSID, except the persistent ID is popped off the stack (instead + of being a string embedded in the opcode bytestream). The persistent + ID is passed to self.persistent_load(), and whatever object that + returns is pushed on the stack. See PERSID for more detail. + """), +] +del I + +# Verify uniqueness of .name and .code members. +name2i = {} +code2i = {} + +for i, d in enumerate(opcodes): + if d.name in name2i: + raise ValueError("repeated name %r at indices %d and %d" % + (d.name, name2i[d.name], i)) + if d.code in code2i: + raise ValueError("repeated code %r at indices %d and %d" % + (d.code, code2i[d.code], i)) + + name2i[d.name] = i + code2i[d.code] = i + +del name2i, code2i, i, d + +############################################################################## +# Build a code2op dict, mapping opcode characters to OpcodeInfo records. +# Also ensure we've got the same stuff as pickle.py, although the +# introspection here is dicey. + +code2op = {} +for d in opcodes: + code2op[d.code] = d +del d + +def assure_pickle_consistency(verbose=False): + + copy = code2op.copy() + for name in pickle.__all__: + if not re.match("[A-Z][A-Z0-9_]+$", name): + if verbose: + print("skipping %r: it doesn't look like an opcode name" % name) + continue + picklecode = getattr(pickle, name) + if not isinstance(picklecode, bytes) or len(picklecode) != 1: + if verbose: + print(("skipping %r: value %r doesn't look like a pickle " + "code" % (name, picklecode))) + continue + picklecode = picklecode.decode("latin-1") + if picklecode in copy: + if verbose: + print("checking name %r w/ code %r for consistency" % ( + name, picklecode)) + d = copy[picklecode] + if d.name != name: + raise ValueError("for pickle code %r, pickle.py uses name %r " + "but we're using name %r" % (picklecode, + name, + d.name)) + # Forget this one. Any left over in copy at the end are a problem + # of a different kind. + del copy[picklecode] + else: + raise ValueError("pickle.py appears to have a pickle opcode with " + "name %r and code %r, but we don't" % + (name, picklecode)) + if copy: + msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"] + for code, d in copy.items(): + msg.append(" name %r with code %r" % (d.name, code)) + raise ValueError("\n".join(msg)) + +assure_pickle_consistency() +del assure_pickle_consistency + +############################################################################## +# A pickle opcode generator. + +def _genops(data, yield_end_pos=False): + if isinstance(data, bytes_types): + data = io.BytesIO(data) + + if hasattr(data, "tell"): + getpos = data.tell + else: + getpos = lambda: None + + while True: + pos = getpos() + code = data.read(1) + opcode = code2op.get(code.decode("latin-1")) + if opcode is None: + if code == b"": + raise ValueError("pickle exhausted before seeing STOP") + else: + raise ValueError("at position %s, opcode %r unknown" % ( + "" if pos is None else pos, + code)) + if opcode.arg is None: + arg = None + else: + arg = opcode.arg.reader(data) + if yield_end_pos: + yield opcode, arg, pos, getpos() + else: + yield opcode, arg, pos + if code == b'.': + assert opcode.name == 'STOP' + break + +def genops(pickle): + """Generate all the opcodes in a pickle. + + 'pickle' is a file-like object, or string, containing the pickle. + + Each opcode in the pickle is generated, from the current pickle position, + stopping after a STOP opcode is delivered. A triple is generated for + each opcode: + + opcode, arg, pos + + opcode is an OpcodeInfo record, describing the current opcode. + + If the opcode has an argument embedded in the pickle, arg is its decoded + value, as a Python object. If the opcode doesn't have an argument, arg + is None. + + If the pickle has a tell() method, pos was the value of pickle.tell() + before reading the current opcode. If the pickle is a bytes object, + it's wrapped in a BytesIO object, and the latter's tell() result is + used. Else (the pickle doesn't have a tell(), and it's not obvious how + to query its current position) pos is None. + """ + return _genops(pickle) + +############################################################################## +# A pickle optimizer. + +def optimize(p): + 'Optimize a pickle string by removing unused PUT opcodes' + put = 'PUT' + get = 'GET' + oldids = set() # set of all PUT ids + newids = {} # set of ids used by a GET opcode + opcodes = [] # (op, idx) or (pos, end_pos) + proto = 0 + protoheader = b'' + for opcode, arg, pos, end_pos in _genops(p, yield_end_pos=True): + if 'PUT' in opcode.name: + oldids.add(arg) + opcodes.append((put, arg)) + elif opcode.name == 'MEMOIZE': + idx = len(oldids) + oldids.add(idx) + opcodes.append((put, idx)) + elif 'FRAME' in opcode.name: + pass + elif 'GET' in opcode.name: + if opcode.proto > proto: + proto = opcode.proto + newids[arg] = None + opcodes.append((get, arg)) + elif opcode.name == 'PROTO': + if arg > proto: + proto = arg + if pos == 0: + protoheader = p[pos:end_pos] + else: + opcodes.append((pos, end_pos)) + else: + opcodes.append((pos, end_pos)) + del oldids + + # Copy the opcodes except for PUTS without a corresponding GET + out = io.BytesIO() + # Write the PROTO header before any framing + out.write(protoheader) + pickler = pickle._Pickler(out, proto) + if proto >= 4: + pickler.framer.start_framing() + idx = 0 + for op, arg in opcodes: + frameless = False + if op is put: + if arg not in newids: + continue + data = pickler.put(idx) + newids[arg] = idx + idx += 1 + elif op is get: + data = pickler.get(newids[arg]) + else: + data = p[op:arg] + frameless = len(data) > pickler.framer._FRAME_SIZE_TARGET + pickler.framer.commit_frame(force=frameless) + if frameless: + pickler.framer.file_write(data) + else: + pickler.write(data) + pickler.framer.end_framing() + return out.getvalue() + +############################################################################## +# A symbolic pickle disassembler. + +def dis(pickle, out=None, memo=None, indentlevel=4, annotate=0): + """Produce a symbolic disassembly of a pickle. + + 'pickle' is a file-like object, or string, containing a (at least one) + pickle. The pickle is disassembled from the current position, through + the first STOP opcode encountered. + + Optional arg 'out' is a file-like object to which the disassembly is + printed. It defaults to sys.stdout. + + Optional arg 'memo' is a Python dict, used as the pickle's memo. It + may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes. + Passing the same memo object to another dis() call then allows disassembly + to proceed across multiple pickles that were all created by the same + pickler with the same memo. Ordinarily you don't need to worry about this. + + Optional arg 'indentlevel' is the number of blanks by which to indent + a new MARK level. It defaults to 4. + + Optional arg 'annotate' if nonzero instructs dis() to add short + description of the opcode on each line of disassembled output. + The value given to 'annotate' must be an integer and is used as a + hint for the column where annotation should start. The default + value is 0, meaning no annotations. + + In addition to printing the disassembly, some sanity checks are made: + + + All embedded opcode arguments "make sense". + + + Explicit and implicit pop operations have enough items on the stack. + + + When an opcode implicitly refers to a markobject, a markobject is + actually on the stack. + + + A memo entry isn't referenced before it's defined. + + + The markobject isn't stored in the memo. + """ + + # Most of the hair here is for sanity checks, but most of it is needed + # anyway to detect when a protocol 0 POP takes a MARK off the stack + # (which in turn is needed to indent MARK blocks correctly). + + stack = [] # crude emulation of unpickler stack + if memo is None: + memo = {} # crude emulation of unpickler memo + maxproto = -1 # max protocol number seen + markstack = [] # bytecode positions of MARK opcodes + indentchunk = ' ' * indentlevel + errormsg = None + annocol = annotate # column hint for annotations + for opcode, arg, pos in genops(pickle): + if pos is not None: + print("%5d:" % pos, end=' ', file=out) + + line = "%-4s %s%s" % (repr(opcode.code)[1:-1], + indentchunk * len(markstack), + opcode.name) + + maxproto = max(maxproto, opcode.proto) + before = opcode.stack_before # don't mutate + after = opcode.stack_after # don't mutate + numtopop = len(before) + + # See whether a MARK should be popped. + markmsg = None + if markobject in before or (opcode.name == "POP" and + stack and + stack[-1] is markobject): + assert markobject not in after + if __debug__: + if markobject in before: + assert before[-1] is stackslice + if markstack: + markpos = markstack.pop() + if markpos is None: + markmsg = "(MARK at unknown opcode offset)" + else: + markmsg = "(MARK at %d)" % markpos + # Pop everything at and after the topmost markobject. + while stack[-1] is not markobject: + stack.pop() + stack.pop() + # Stop later code from popping too much. + try: + numtopop = before.index(markobject) + except ValueError: + assert opcode.name == "POP" + numtopop = 0 + else: + errormsg = "no MARK exists on stack" + + # Check for correct memo usage. + if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT", "MEMOIZE"): + if opcode.name == "MEMOIZE": + memo_idx = len(memo) + markmsg = "(as %d)" % memo_idx + else: + assert arg is not None + memo_idx = arg + if not stack: + errormsg = "stack is empty -- can't store into memo" + elif stack[-1] is markobject: + errormsg = "can't store markobject in the memo" + else: + memo[memo_idx] = stack[-1] + elif opcode.name in ("GET", "BINGET", "LONG_BINGET"): + if arg in memo: + assert len(after) == 1 + after = [memo[arg]] # for better stack emulation + else: + errormsg = "memo key %r has never been stored into" % arg + + if arg is not None or markmsg: + # make a mild effort to align arguments + line += ' ' * (10 - len(opcode.name)) + if arg is not None: + if opcode.name in ("STRING", "BINSTRING", "SHORT_BINSTRING"): + line += ' ' + ascii(arg) + else: + line += ' ' + repr(arg) + if markmsg: + line += ' ' + markmsg + if annotate: + line += ' ' * (annocol - len(line)) + # make a mild effort to align annotations + annocol = len(line) + if annocol > 50: + annocol = annotate + line += ' ' + opcode.doc.split('\n', 1)[0] + print(line, file=out) + + if errormsg: + # Note that we delayed complaining until the offending opcode + # was printed. + raise ValueError(errormsg) + + # Emulate the stack effects. + if len(stack) < numtopop: + raise ValueError("tries to pop %d items from stack with " + "only %d items" % (numtopop, len(stack))) + if numtopop: + del stack[-numtopop:] + if markobject in after: + assert markobject not in before + markstack.append(pos) + + stack.extend(after) + + print("highest protocol among opcodes =", maxproto, file=out) + if stack: + raise ValueError("stack not empty after STOP: %r" % stack) + +# For use in the doctest, simply as an example of a class to pickle. +class _Example: + def __init__(self, value): + self.value = value + +_dis_test = r""" +>>> import pickle +>>> x = [1, 2, (3, 4), {b'abc': "def"}] +>>> pkl0 = pickle.dumps(x, 0) +>>> dis(pkl0) + 0: ( MARK + 1: l LIST (MARK at 0) + 2: p PUT 0 + 5: I INT 1 + 8: a APPEND + 9: I INT 2 + 12: a APPEND + 13: ( MARK + 14: I INT 3 + 17: I INT 4 + 20: t TUPLE (MARK at 13) + 21: p PUT 1 + 24: a APPEND + 25: ( MARK + 26: d DICT (MARK at 25) + 27: p PUT 2 + 30: c GLOBAL '_codecs encode' + 46: p PUT 3 + 49: ( MARK + 50: V UNICODE 'abc' + 55: p PUT 4 + 58: V UNICODE 'latin1' + 66: p PUT 5 + 69: t TUPLE (MARK at 49) + 70: p PUT 6 + 73: R REDUCE + 74: p PUT 7 + 77: V UNICODE 'def' + 82: p PUT 8 + 85: s SETITEM + 86: a APPEND + 87: . STOP +highest protocol among opcodes = 0 + +Try again with a "binary" pickle. + +>>> pkl1 = pickle.dumps(x, 1) +>>> dis(pkl1) + 0: ] EMPTY_LIST + 1: q BINPUT 0 + 3: ( MARK + 4: K BININT1 1 + 6: K BININT1 2 + 8: ( MARK + 9: K BININT1 3 + 11: K BININT1 4 + 13: t TUPLE (MARK at 8) + 14: q BINPUT 1 + 16: } EMPTY_DICT + 17: q BINPUT 2 + 19: c GLOBAL '_codecs encode' + 35: q BINPUT 3 + 37: ( MARK + 38: X BINUNICODE 'abc' + 46: q BINPUT 4 + 48: X BINUNICODE 'latin1' + 59: q BINPUT 5 + 61: t TUPLE (MARK at 37) + 62: q BINPUT 6 + 64: R REDUCE + 65: q BINPUT 7 + 67: X BINUNICODE 'def' + 75: q BINPUT 8 + 77: s SETITEM + 78: e APPENDS (MARK at 3) + 79: . STOP +highest protocol among opcodes = 1 + +Exercise the INST/OBJ/BUILD family. + +>>> import pickletools +>>> dis(pickle.dumps(pickletools.dis, 0)) + 0: c GLOBAL 'pickletools dis' + 17: p PUT 0 + 20: . STOP +highest protocol among opcodes = 0 + +>>> from pickletools import _Example +>>> x = [_Example(42)] * 2 +>>> dis(pickle.dumps(x, 0)) + 0: ( MARK + 1: l LIST (MARK at 0) + 2: p PUT 0 + 5: c GLOBAL 'copy_reg _reconstructor' + 30: p PUT 1 + 33: ( MARK + 34: c GLOBAL 'pickletools _Example' + 56: p PUT 2 + 59: c GLOBAL '__builtin__ object' + 79: p PUT 3 + 82: N NONE + 83: t TUPLE (MARK at 33) + 84: p PUT 4 + 87: R REDUCE + 88: p PUT 5 + 91: ( MARK + 92: d DICT (MARK at 91) + 93: p PUT 6 + 96: V UNICODE 'value' + 103: p PUT 7 + 106: I INT 42 + 110: s SETITEM + 111: b BUILD + 112: a APPEND + 113: g GET 5 + 116: a APPEND + 117: . STOP +highest protocol among opcodes = 0 + +>>> dis(pickle.dumps(x, 1)) + 0: ] EMPTY_LIST + 1: q BINPUT 0 + 3: ( MARK + 4: c GLOBAL 'copy_reg _reconstructor' + 29: q BINPUT 1 + 31: ( MARK + 32: c GLOBAL 'pickletools _Example' + 54: q BINPUT 2 + 56: c GLOBAL '__builtin__ object' + 76: q BINPUT 3 + 78: N NONE + 79: t TUPLE (MARK at 31) + 80: q BINPUT 4 + 82: R REDUCE + 83: q BINPUT 5 + 85: } EMPTY_DICT + 86: q BINPUT 6 + 88: X BINUNICODE 'value' + 98: q BINPUT 7 + 100: K BININT1 42 + 102: s SETITEM + 103: b BUILD + 104: h BINGET 5 + 106: e APPENDS (MARK at 3) + 107: . STOP +highest protocol among opcodes = 1 + +Try "the canonical" recursive-object test. + +>>> L = [] +>>> T = L, +>>> L.append(T) +>>> L[0] is T +True +>>> T[0] is L +True +>>> L[0][0] is L +True +>>> T[0][0] is T +True +>>> dis(pickle.dumps(L, 0)) + 0: ( MARK + 1: l LIST (MARK at 0) + 2: p PUT 0 + 5: ( MARK + 6: g GET 0 + 9: t TUPLE (MARK at 5) + 10: p PUT 1 + 13: a APPEND + 14: . STOP +highest protocol among opcodes = 0 + +>>> dis(pickle.dumps(L, 1)) + 0: ] EMPTY_LIST + 1: q BINPUT 0 + 3: ( MARK + 4: h BINGET 0 + 6: t TUPLE (MARK at 3) + 7: q BINPUT 1 + 9: a APPEND + 10: . STOP +highest protocol among opcodes = 1 + +Note that, in the protocol 0 pickle of the recursive tuple, the disassembler +has to emulate the stack in order to realize that the POP opcode at 16 gets +rid of the MARK at 0. + +>>> dis(pickle.dumps(T, 0)) + 0: ( MARK + 1: ( MARK + 2: l LIST (MARK at 1) + 3: p PUT 0 + 6: ( MARK + 7: g GET 0 + 10: t TUPLE (MARK at 6) + 11: p PUT 1 + 14: a APPEND + 15: 0 POP + 16: 0 POP (MARK at 0) + 17: g GET 1 + 20: . STOP +highest protocol among opcodes = 0 + +>>> dis(pickle.dumps(T, 1)) + 0: ( MARK + 1: ] EMPTY_LIST + 2: q BINPUT 0 + 4: ( MARK + 5: h BINGET 0 + 7: t TUPLE (MARK at 4) + 8: q BINPUT 1 + 10: a APPEND + 11: 1 POP_MARK (MARK at 0) + 12: h BINGET 1 + 14: . STOP +highest protocol among opcodes = 1 + +Try protocol 2. + +>>> dis(pickle.dumps(L, 2)) + 0: \x80 PROTO 2 + 2: ] EMPTY_LIST + 3: q BINPUT 0 + 5: h BINGET 0 + 7: \x85 TUPLE1 + 8: q BINPUT 1 + 10: a APPEND + 11: . STOP +highest protocol among opcodes = 2 + +>>> dis(pickle.dumps(T, 2)) + 0: \x80 PROTO 2 + 2: ] EMPTY_LIST + 3: q BINPUT 0 + 5: h BINGET 0 + 7: \x85 TUPLE1 + 8: q BINPUT 1 + 10: a APPEND + 11: 0 POP + 12: h BINGET 1 + 14: . STOP +highest protocol among opcodes = 2 + +Try protocol 3 with annotations: + +>>> dis(pickle.dumps(T, 3), annotate=1) + 0: \x80 PROTO 3 Protocol version indicator. + 2: ] EMPTY_LIST Push an empty list. + 3: q BINPUT 0 Store the stack top into the memo. The stack is not popped. + 5: h BINGET 0 Read an object from the memo and push it on the stack. + 7: \x85 TUPLE1 Build a one-tuple out of the topmost item on the stack. + 8: q BINPUT 1 Store the stack top into the memo. The stack is not popped. + 10: a APPEND Append an object to a list. + 11: 0 POP Discard the top stack item, shrinking the stack by one item. + 12: h BINGET 1 Read an object from the memo and push it on the stack. + 14: . STOP Stop the unpickling machine. +highest protocol among opcodes = 2 + +""" + +_memo_test = r""" +>>> import pickle +>>> import io +>>> f = io.BytesIO() +>>> p = pickle.Pickler(f, 2) +>>> x = [1, 2, 3] +>>> p.dump(x) +>>> p.dump(x) +>>> f.seek(0) +0 +>>> memo = {} +>>> dis(f, memo=memo) + 0: \x80 PROTO 2 + 2: ] EMPTY_LIST + 3: q BINPUT 0 + 5: ( MARK + 6: K BININT1 1 + 8: K BININT1 2 + 10: K BININT1 3 + 12: e APPENDS (MARK at 5) + 13: . STOP +highest protocol among opcodes = 2 +>>> dis(f, memo=memo) + 14: \x80 PROTO 2 + 16: h BINGET 0 + 18: . STOP +highest protocol among opcodes = 2 +""" + +__test__ = {'disassembler_test': _dis_test, + 'disassembler_memo_test': _memo_test, + } + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser( + description='disassemble one or more pickle files', + color=True, + ) + parser.add_argument( + 'pickle_file', + nargs='+', help='the pickle file') + parser.add_argument( + '-o', '--output', + help='the file where the output should be written') + parser.add_argument( + '-m', '--memo', action='store_true', + help='preserve memo between disassemblies') + parser.add_argument( + '-l', '--indentlevel', default=4, type=int, + help='the number of blanks by which to indent a new MARK level') + parser.add_argument( + '-a', '--annotate', action='store_true', + help='annotate each line with a short opcode description') + parser.add_argument( + '-p', '--preamble', default="==> {name} <==", + help='if more than one pickle file is specified, print this before' + ' each disassembly') + args = parser.parse_args() + annotate = 30 if args.annotate else 0 + memo = {} if args.memo else None + if args.output is None: + output = sys.stdout + else: + output = open(args.output, 'w') + try: + for arg in args.pickle_file: + if len(args.pickle_file) > 1: + name = '' if arg == '-' else arg + preamble = args.preamble.format(name=name) + output.write(preamble + '\n') + if arg == '-': + dis(sys.stdin.buffer, output, memo, args.indentlevel, annotate) + else: + with open(arg, 'rb') as f: + dis(f, output, memo, args.indentlevel, annotate) + finally: + if output is not sys.stdout: + output.close() diff --git a/Python314_4_x64_Template/Lib/pkgutil.py b/Python314_4_x64_Template/Lib/pkgutil.py new file mode 100644 index 00000000..8772a667 --- /dev/null +++ b/Python314_4_x64_Template/Lib/pkgutil.py @@ -0,0 +1,474 @@ +"""Utilities to support packages.""" + +from collections import namedtuple +from functools import singledispatch as simplegeneric +import importlib +import importlib.util +import importlib.machinery +import os +import os.path +import sys + +__all__ = [ + 'get_importer', 'iter_importers', + 'walk_packages', 'iter_modules', 'get_data', + 'read_code', 'extend_path', + 'ModuleInfo', +] + + +ModuleInfo = namedtuple('ModuleInfo', 'module_finder name ispkg') +ModuleInfo.__doc__ = 'A namedtuple with minimal info about a module.' + + +def read_code(stream): + # This helper is needed in order for the PEP 302 emulation to + # correctly handle compiled files + import marshal + + magic = stream.read(4) + if magic != importlib.util.MAGIC_NUMBER: + return None + + stream.read(12) # Skip rest of the header + return marshal.load(stream) + + +def walk_packages(path=None, prefix='', onerror=None): + """Yields ModuleInfo for all modules recursively + on path, or, if path is None, all accessible modules. + + 'path' should be either None or a list of paths to look for + modules in. + + 'prefix' is a string to output on the front of every module name + on output. + + Note that this function must import all *packages* (NOT all + modules!) on the given path, in order to access the __path__ + attribute to find submodules. + + 'onerror' is a function which gets called with one argument (the + name of the package which was being imported) if any exception + occurs while trying to import a package. If no onerror function is + supplied, ImportErrors are caught and ignored, while all other + exceptions are propagated, terminating the search. + + Examples: + + # list all modules python can access + walk_packages() + + # list all submodules of ctypes + walk_packages(ctypes.__path__, ctypes.__name__+'.') + """ + + def seen(p, m={}): + if p in m: + return True + m[p] = True + + for info in iter_modules(path, prefix): + yield info + + if info.ispkg: + try: + __import__(info.name) + except ImportError: + if onerror is not None: + onerror(info.name) + except Exception: + if onerror is not None: + onerror(info.name) + else: + raise + else: + path = getattr(sys.modules[info.name], '__path__', None) or [] + + # don't traverse path items we've seen before + path = [p for p in path if not seen(p)] + + yield from walk_packages(path, info.name+'.', onerror) + + +def iter_modules(path=None, prefix=''): + """Yields ModuleInfo for all submodules on path, + or, if path is None, all top-level modules on sys.path. + + 'path' should be either None or a list of paths to look for + modules in. + + 'prefix' is a string to output on the front of every module name + on output. + """ + if path is None: + importers = iter_importers() + elif isinstance(path, str): + raise ValueError("path must be None or list of paths to look for " + "modules in") + else: + importers = map(get_importer, path) + + yielded = {} + for i in importers: + for name, ispkg in iter_importer_modules(i, prefix): + if name not in yielded: + yielded[name] = 1 + yield ModuleInfo(i, name, ispkg) + + +@simplegeneric +def iter_importer_modules(importer, prefix=''): + if not hasattr(importer, 'iter_modules'): + return [] + return importer.iter_modules(prefix) + + +# Implement a file walker for the normal importlib path hook +def _iter_file_finder_modules(importer, prefix=''): + if importer.path is None or not os.path.isdir(importer.path): + return + + yielded = {} + import inspect + try: + filenames = os.listdir(importer.path) + except OSError: + # ignore unreadable directories like import does + filenames = [] + filenames.sort() # handle packages before same-named modules + + for fn in filenames: + modname = inspect.getmodulename(fn) + if modname=='__init__' or modname in yielded: + continue + + path = os.path.join(importer.path, fn) + ispkg = False + + if not modname and os.path.isdir(path) and '.' not in fn: + modname = fn + try: + dircontents = os.listdir(path) + except OSError: + # ignore unreadable directories like import does + dircontents = [] + for fn in dircontents: + subname = inspect.getmodulename(fn) + if subname=='__init__': + ispkg = True + break + else: + continue # not a package + + if modname and '.' not in modname: + yielded[modname] = 1 + yield prefix + modname, ispkg + +iter_importer_modules.register( + importlib.machinery.FileFinder, _iter_file_finder_modules) + + +try: + import zipimport + from zipimport import zipimporter + + def iter_zipimport_modules(importer, prefix=''): + dirlist = sorted(zipimport._zip_directory_cache[importer.archive]) + _prefix = importer.prefix + plen = len(_prefix) + yielded = {} + import inspect + for fn in dirlist: + if not fn.startswith(_prefix): + continue + + fn = fn[plen:].split(os.sep) + + if len(fn)==2 and fn[1].startswith('__init__.py'): + if fn[0] not in yielded: + yielded[fn[0]] = 1 + yield prefix + fn[0], True + + if len(fn)!=1: + continue + + modname = inspect.getmodulename(fn[0]) + if modname=='__init__': + continue + + if modname and '.' not in modname and modname not in yielded: + yielded[modname] = 1 + yield prefix + modname, False + + iter_importer_modules.register(zipimporter, iter_zipimport_modules) + +except ImportError: + pass + + +def get_importer(path_item): + """Retrieve a finder for the given path item + + The returned finder is cached in sys.path_importer_cache + if it was newly created by a path hook. + + The cache (or part of it) can be cleared manually if a + rescan of sys.path_hooks is necessary. + """ + path_item = os.fsdecode(path_item) + try: + importer = sys.path_importer_cache[path_item] + except KeyError: + for path_hook in sys.path_hooks: + try: + importer = path_hook(path_item) + sys.path_importer_cache.setdefault(path_item, importer) + break + except ImportError: + pass + else: + importer = None + return importer + + +def iter_importers(fullname=""): + """Yield finders for the given module name + + If fullname contains a '.', the finders will be for the package + containing fullname, otherwise they will be all registered top level + finders (i.e. those on both sys.meta_path and sys.path_hooks). + + If the named module is in a package, that package is imported as a side + effect of invoking this function. + + If no module name is specified, all top level finders are produced. + """ + if fullname.startswith('.'): + msg = "Relative module name {!r} not supported".format(fullname) + raise ImportError(msg) + if '.' in fullname: + # Get the containing package's __path__ + pkg_name = fullname.rpartition(".")[0] + pkg = importlib.import_module(pkg_name) + path = getattr(pkg, '__path__', None) + if path is None: + return + else: + yield from sys.meta_path + path = sys.path + for item in path: + yield get_importer(item) + + +def extend_path(path, name): + """Extend a package's path. + + Intended use is to place the following code in a package's __init__.py: + + from pkgutil import extend_path + __path__ = extend_path(__path__, __name__) + + For each directory on sys.path that has a subdirectory that + matches the package name, add the subdirectory to the package's + __path__. This is useful if one wants to distribute different + parts of a single logical package as multiple directories. + + It also looks for *.pkg files beginning where * matches the name + argument. This feature is similar to *.pth files (see site.py), + except that it doesn't special-case lines starting with 'import'. + A *.pkg file is trusted at face value: apart from checking for + duplicates, all entries found in a *.pkg file are added to the + path, regardless of whether they are exist the filesystem. (This + is a feature.) + + If the input path is not a list (as is the case for frozen + packages) it is returned unchanged. The input path is not + modified; an extended copy is returned. Items are only appended + to the copy at the end. + + It is assumed that sys.path is a sequence. Items of sys.path that + are not (unicode or 8-bit) strings referring to existing + directories are ignored. Unicode items of sys.path that cause + errors when used as filenames may cause this function to raise an + exception (in line with os.path.isdir() behavior). + """ + + if not isinstance(path, list): + # This could happen e.g. when this is called from inside a + # frozen package. Return the path unchanged in that case. + return path + + sname_pkg = name + ".pkg" + + path = path[:] # Start with a copy of the existing path + + parent_package, _, final_name = name.rpartition('.') + if parent_package: + try: + search_path = sys.modules[parent_package].__path__ + except (KeyError, AttributeError): + # We can't do anything: find_loader() returns None when + # passed a dotted name. + return path + else: + search_path = sys.path + + for dir in search_path: + if not isinstance(dir, str): + continue + + finder = get_importer(dir) + if finder is not None: + portions = [] + if hasattr(finder, 'find_spec'): + spec = finder.find_spec(final_name) + if spec is not None: + portions = spec.submodule_search_locations or [] + # Is this finder PEP 420 compliant? + elif hasattr(finder, 'find_loader'): + _, portions = finder.find_loader(final_name) + + for portion in portions: + # XXX This may still add duplicate entries to path on + # case-insensitive filesystems + if portion not in path: + path.append(portion) + + # XXX Is this the right thing for subpackages like zope.app? + # It looks for a file named "zope.app.pkg" + pkgfile = os.path.join(dir, sname_pkg) + if os.path.isfile(pkgfile): + try: + f = open(pkgfile) + except OSError as msg: + sys.stderr.write("Can't open %s: %s\n" % + (pkgfile, msg)) + else: + with f: + for line in f: + line = line.rstrip('\n') + if not line or line.startswith('#'): + continue + path.append(line) # Don't check for existence! + + return path + + +def get_data(package, resource): + """Get a resource from a package. + + This is a wrapper round the PEP 302 loader get_data API. The package + argument should be the name of a package, in standard module format + (foo.bar). The resource argument should be in the form of a relative + filename, using '/' as the path separator. The parent directory name '..' + is not allowed, and nor is a rooted name (starting with a '/'). + + The function returns a binary string, which is the contents of the + specified resource. + + For packages located in the filesystem, which have already been imported, + this is the rough equivalent of + + d = os.path.dirname(sys.modules[package].__file__) + data = open(os.path.join(d, resource), 'rb').read() + + If the package cannot be located or loaded, or it uses a PEP 302 loader + which does not support get_data(), then None is returned. + """ + + spec = importlib.util.find_spec(package) + if spec is None: + return None + loader = spec.loader + if loader is None or not hasattr(loader, 'get_data'): + return None + # XXX needs test + mod = (sys.modules.get(package) or + importlib._bootstrap._load(spec)) + if mod is None or not hasattr(mod, '__file__'): + return None + + # Modify the resource name to be compatible with the loader.get_data + # signature - an os.path format "filename" starting with the dirname of + # the package's __file__ + parts = resource.split('/') + parts.insert(0, os.path.dirname(mod.__file__)) + resource_name = os.path.join(*parts) + return loader.get_data(resource_name) + + +_NAME_PATTERN = None + +def resolve_name(name): + """ + Resolve a name to an object. + + It is expected that `name` will be a string in one of the following + formats, where W is shorthand for a valid Python identifier and dot stands + for a literal period in these pseudo-regexes: + + W(.W)* + W(.W)*:(W(.W)*)? + + The first form is intended for backward compatibility only. It assumes that + some part of the dotted name is a package, and the rest is an object + somewhere within that package, possibly nested inside other objects. + Because the place where the package stops and the object hierarchy starts + can't be inferred by inspection, repeated attempts to import must be done + with this form. + + In the second form, the caller makes the division point clear through the + provision of a single colon: the dotted name to the left of the colon is a + package to be imported, and the dotted name to the right is the object + hierarchy within that package. Only one import is needed in this form. If + it ends with the colon, then a module object is returned. + + The function will return an object (which might be a module), or raise one + of the following exceptions: + + ValueError - if `name` isn't in a recognised format + ImportError - if an import failed when it shouldn't have + AttributeError - if a failure occurred when traversing the object hierarchy + within the imported package to get to the desired object. + """ + global _NAME_PATTERN + if _NAME_PATTERN is None: + # Lazy import to speedup Python startup time + import re + dotted_words = r'(?!\d)(\w+)(\.(?!\d)(\w+))*' + _NAME_PATTERN = re.compile(f'^(?P{dotted_words})' + f'(?P:(?P{dotted_words})?)?$', + re.UNICODE) + + m = _NAME_PATTERN.match(name) + if not m: + raise ValueError(f'invalid format: {name!r}') + gd = m.groupdict() + if gd.get('cln'): + # there is a colon - a one-step import is all that's needed + mod = importlib.import_module(gd['pkg']) + parts = gd.get('obj') + parts = parts.split('.') if parts else [] + else: + # no colon - have to iterate to find the package boundary + parts = name.split('.') + modname = parts.pop(0) + # first part *must* be a module/package. + mod = importlib.import_module(modname) + while parts: + p = parts[0] + s = f'{modname}.{p}' + try: + mod = importlib.import_module(s) + parts.pop(0) + modname = s + except ImportError: + break + # if we reach this point, mod is the module, already imported, and + # parts is the list of parts in the object hierarchy to be traversed, or + # an empty list if just the module is wanted. + result = mod + for p in parts: + result = getattr(result, p) + return result diff --git a/Python314_4_x64_Template/Lib/platform.py b/Python314_4_x64_Template/Lib/platform.py new file mode 100644 index 00000000..b017b841 --- /dev/null +++ b/Python314_4_x64_Template/Lib/platform.py @@ -0,0 +1,1515 @@ +""" This module tries to retrieve as much platform-identifying data as + possible. It makes this information available via function APIs. + + If called from the command line, it prints the platform + information concatenated as single string to stdout. The output + format is usable as part of a filename. + +""" +# This module is maintained by Marc-Andre Lemburg . +# If you find problems, please submit bug reports/patches via the +# Python issue tracker (https://github.com/python/cpython/issues) and +# mention "@malemburg". +# +# Still needed: +# * support for MS-DOS (PythonDX ?) +# * support for Amiga and other still unsupported platforms running Python +# * support for additional Linux distributions +# +# Many thanks to all those who helped adding platform-specific +# checks (in no particular order): +# +# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell, +# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef +# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg +# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark +# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support), +# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter, Steve +# Dower +# +# History: +# +# +# +# 1.0.9 - added invalidate_caches() function to invalidate cached values +# 1.0.8 - changed Windows support to read version from kernel32.dll +# 1.0.7 - added DEV_NULL +# 1.0.6 - added linux_distribution() +# 1.0.5 - fixed Java support to allow running the module on Jython +# 1.0.4 - added IronPython support +# 1.0.3 - added normalization of Windows system name +# 1.0.2 - added more Windows support +# 1.0.1 - reformatted to make doc.py happy +# 1.0.0 - reformatted a bit and checked into Python CVS +# 0.8.0 - added sys.version parser and various new access +# APIs (python_version(), python_compiler(), etc.) +# 0.7.2 - fixed architecture() to use sizeof(pointer) where available +# 0.7.1 - added support for Caldera OpenLinux +# 0.7.0 - some fixes for WinCE; untabified the source file +# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and +# vms_lib.getsyi() configured +# 0.6.1 - added code to prevent 'uname -p' on platforms which are +# known not to support it +# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k; +# did some cleanup of the interfaces - some APIs have changed +# 0.5.5 - fixed another type in the MacOS code... should have +# used more coffee today ;-) +# 0.5.4 - fixed a few typos in the MacOS code +# 0.5.3 - added experimental MacOS support; added better popen() +# workarounds in _syscmd_ver() -- still not 100% elegant +# though +# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all +# return values (the system uname command tends to return +# 'unknown' instead of just leaving the field empty) +# 0.5.1 - included code for slackware dist; added exception handlers +# to cover up situations where platforms don't have os.popen +# (e.g. Mac) or fail on socket.gethostname(); fixed libc +# detection RE +# 0.5.0 - changed the API names referring to system commands to *syscmd*; +# added java_ver(); made syscmd_ver() a private +# API (was system_ver() in previous versions) -- use uname() +# instead; extended the win32_ver() to also return processor +# type information +# 0.4.0 - added win32_ver() and modified the platform() output for WinXX +# 0.3.4 - fixed a bug in _follow_symlinks() +# 0.3.3 - fixed popen() and "file" command invocation bugs +# 0.3.2 - added architecture() API and support for it in platform() +# 0.3.1 - fixed syscmd_ver() RE to support Windows NT +# 0.3.0 - added system alias support +# 0.2.3 - removed 'wince' again... oh well. +# 0.2.2 - added 'wince' to syscmd_ver() supported platforms +# 0.2.1 - added cache logic and changed the platform string format +# 0.2.0 - changed the API to use functions instead of module globals +# since some action take too long to be run on module import +# 0.1.0 - first release +# +# You can always get the latest version of this module at: +# +# http://www.egenix.com/files/python/platform.py +# +# If that URL should fail, try contacting the author. + +__copyright__ = """ + Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com + Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com + + Permission to use, copy, modify, and distribute this software and its + documentation for any purpose and without fee or royalty is hereby granted, + provided that the above copyright notice appear in all copies and that + both that copyright notice and this permission notice appear in + supporting documentation or portions thereof, including modifications, + that you make. + + EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO + THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND + FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, + INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING + FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + WITH THE USE OR PERFORMANCE OF THIS SOFTWARE ! + +""" + +__version__ = '1.0.9' + +import collections +import os +import re +import sys +import functools +import itertools +try: + import _wmi +except ImportError: + _wmi = None + +### Globals & Constants + +# Helper for comparing two version number strings. +# Based on the description of the PHP's version_compare(): +# http://php.net/manual/en/function.version-compare.php + +_ver_stages = { + # any string not found in this dict, will get 0 assigned + 'dev': 10, + 'alpha': 20, 'a': 20, + 'beta': 30, 'b': 30, + 'c': 40, + 'RC': 50, 'rc': 50, + # number, will get 100 assigned + 'pl': 200, 'p': 200, +} + + +def _comparable_version(version): + component_re = re.compile(r'([0-9]+|[._+-])') + result = [] + for v in component_re.split(version): + if v not in '._+-': + try: + v = int(v, 10) + t = 100 + except ValueError: + t = _ver_stages.get(v, 0) + result.extend((t, v)) + return result + +### Platform specific APIs + + +def libc_ver(executable=None, lib='', version='', chunksize=16384): + + """ Tries to determine the libc version that the file executable + (which defaults to the Python interpreter) is linked against. + + Returns a tuple of strings (lib,version) which default to the + given parameters in case the lookup fails. + + Note that the function has intimate knowledge of how different + libc versions add symbols to the executable and thus is probably + only usable for executables compiled using gcc. + + The file is read and scanned in chunks of chunksize bytes. + + """ + if not executable: + if sys.platform == "emscripten": + # Emscripten's os.confstr reports that it is glibc, so special case + # it. + ver = ".".join(str(x) for x in sys._emscripten_info.emscripten_version) + return ("emscripten", ver) + try: + ver = os.confstr('CS_GNU_LIBC_VERSION') + # parse 'glibc 2.28' as ('glibc', '2.28') + parts = ver.split(maxsplit=1) + if len(parts) == 2: + return tuple(parts) + except (AttributeError, ValueError, OSError): + # os.confstr() or CS_GNU_LIBC_VERSION value not available + pass + + executable = sys.executable + + if not executable: + # sys.executable is not set. + return lib, version + + libc_search = re.compile(br""" + (__libc_init) + | (GLIBC_([0-9.]+)) + | (libc(_\w+)?\.so(?:\.(\d[0-9.]*))?) + | (musl-([0-9.]+)) + | ((?:libc\.|ld-)musl(?:-\w+)?.so(?:\.(\d[0-9.]*))?) + """, + re.ASCII | re.VERBOSE) + + V = _comparable_version + # We use os.path.realpath() + # here to work around problems with Cygwin not being + # able to open symlinks for reading + executable = os.path.realpath(executable) + ver = None + with open(executable, 'rb') as f: + binary = f.read(chunksize) + pos = 0 + while pos < len(binary): + if b'libc' in binary or b'GLIBC' in binary or b'musl' in binary: + m = libc_search.search(binary, pos) + else: + m = None + if not m or m.end() == len(binary): + chunk = f.read(chunksize) + if chunk: + binary = binary[max(pos, len(binary) - 1000):] + chunk + pos = 0 + continue + if not m: + break + decoded_groups = [s.decode('latin1') if s is not None else s + for s in m.groups()] + (libcinit, glibc, glibcversion, so, threads, soversion, + musl, muslversion, musl_so, musl_sover) = decoded_groups + if libcinit and not lib: + lib = 'libc' + elif glibc: + if lib != 'glibc': + lib = 'glibc' + ver = glibcversion + elif V(glibcversion) > V(ver): + ver = glibcversion + elif so: + if lib not in ('glibc', 'musl'): + lib = 'libc' + if soversion and (not ver or V(soversion) > V(ver)): + ver = soversion + if threads and ver[-len(threads):] != threads: + ver = ver + threads + elif musl: + lib = 'musl' + if not ver or V(muslversion) > V(ver): + ver = muslversion + elif musl_so: + lib = 'musl' + if musl_sover and (not ver or V(musl_sover) > V(ver)): + ver = musl_sover + pos = m.end() + return lib, version if ver is None else ver + +def _norm_version(version, build=''): + + """ Normalize the version and build strings and return a single + version string using the format major.minor.build (or patchlevel). + """ + l = version.split('.') + if build: + l.append(build) + try: + strings = list(map(str, map(int, l))) + except ValueError: + strings = l + version = '.'.join(strings[:3]) + return version + + +# Examples of VER command output: +# +# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195] +# Windows XP: Microsoft Windows XP [Version 5.1.2600] +# Windows Vista: Microsoft Windows [Version 6.0.6002] +# +# Note that the "Version" string gets localized on different +# Windows versions. + +def _syscmd_ver(system='', release='', version='', + + supported_platforms=('win32', 'win16', 'dos')): + + """ Tries to figure out the OS version used and returns + a tuple (system, release, version). + + It uses the "ver" shell command for this which is known + to exists on Windows, DOS. XXX Others too ? + + In case this fails, the given parameters are used as + defaults. + + """ + if sys.platform not in supported_platforms: + return system, release, version + + # Try some common cmd strings + import subprocess + for cmd in ('ver', 'command /c ver', 'cmd /c ver'): + try: + info = subprocess.check_output(cmd, + stdin=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + text=True, + encoding="locale", + shell=True) + except (OSError, subprocess.CalledProcessError) as why: + #print('Command %s failed: %s' % (cmd, why)) + continue + else: + break + else: + return system, release, version + + ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) ' + r'.*' + r'\[.* ([\d.]+)\])') + + # Parse the output + info = info.strip() + m = ver_output.match(info) + if m is not None: + system, release, version = m.groups() + # Strip trailing dots from version and release + if release[-1] == '.': + release = release[:-1] + if version[-1] == '.': + version = version[:-1] + # Normalize the version and build strings (eliminating additional + # zeros) + version = _norm_version(version) + return system, release, version + + +def _wmi_query(table, *keys): + global _wmi + if not _wmi: + raise OSError("not supported") + table = { + "OS": "Win32_OperatingSystem", + "CPU": "Win32_Processor", + }[table] + try: + data = _wmi.exec_query("SELECT {} FROM {}".format( + ",".join(keys), + table, + )).split("\0") + except OSError: + _wmi = None + raise OSError("not supported") + split_data = (i.partition("=") for i in data) + dict_data = {i[0]: i[2] for i in split_data} + return (dict_data[k] for k in keys) + + +_WIN32_CLIENT_RELEASES = [ + ((10, 1, 0), "post11"), + ((10, 0, 22000), "11"), + ((6, 4, 0), "10"), + ((6, 3, 0), "8.1"), + ((6, 2, 0), "8"), + ((6, 1, 0), "7"), + ((6, 0, 0), "Vista"), + ((5, 2, 3790), "XP64"), + ((5, 2, 0), "XPMedia"), + ((5, 1, 0), "XP"), + ((5, 0, 0), "2000"), +] + +_WIN32_SERVER_RELEASES = [ + ((10, 1, 0), "post2025Server"), + ((10, 0, 26100), "2025Server"), + ((10, 0, 20348), "2022Server"), + ((10, 0, 17763), "2019Server"), + ((6, 4, 0), "2016Server"), + ((6, 3, 0), "2012ServerR2"), + ((6, 2, 0), "2012Server"), + ((6, 1, 0), "2008ServerR2"), + ((6, 0, 0), "2008Server"), + ((5, 2, 0), "2003Server"), + ((5, 0, 0), "2000Server"), +] + +def win32_is_iot(): + return win32_edition() in ('IoTUAP', 'NanoServer', 'WindowsCoreHeadless', 'IoTEdgeOS') + +def win32_edition(): + try: + import winreg + except ImportError: + pass + else: + try: + cvkey = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion' + with winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE, cvkey) as key: + return winreg.QueryValueEx(key, 'EditionId')[0] + except OSError: + pass + + return None + +def _win32_ver(version, csd, ptype): + # Try using WMI first, as this is the canonical source of data + try: + (version, product_type, ptype, spmajor, spminor) = _wmi_query( + 'OS', + 'Version', + 'ProductType', + 'BuildType', + 'ServicePackMajorVersion', + 'ServicePackMinorVersion', + ) + is_client = (int(product_type) == 1) + if spminor and spminor != '0': + csd = f'SP{spmajor}.{spminor}' + else: + csd = f'SP{spmajor}' + return version, csd, ptype, is_client + except OSError: + pass + + # Fall back to a combination of sys.getwindowsversion and "ver" + try: + from sys import getwindowsversion + except ImportError: + return version, csd, ptype, True + + winver = getwindowsversion() + is_client = (getattr(winver, 'product_type', 1) == 1) + try: + version = _syscmd_ver()[2] + major, minor, build = map(int, version.split('.')) + except ValueError: + major, minor, build = winver.platform_version or winver[:3] + version = '{0}.{1}.{2}'.format(major, minor, build) + + # getwindowsversion() reflect the compatibility mode Python is + # running under, and so the service pack value is only going to be + # valid if the versions match. + if winver[:2] == (major, minor): + try: + csd = 'SP{}'.format(winver.service_pack_major) + except AttributeError: + if csd[:13] == 'Service Pack ': + csd = 'SP' + csd[13:] + + try: + import winreg + except ImportError: + pass + else: + try: + cvkey = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion' + with winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE, cvkey) as key: + ptype = winreg.QueryValueEx(key, 'CurrentType')[0] + except OSError: + pass + + return version, csd, ptype, is_client + +def win32_ver(release='', version='', csd='', ptype=''): + is_client = False + + version, csd, ptype, is_client = _win32_ver(version, csd, ptype) + + if version: + intversion = tuple(map(int, version.split('.'))) + releases = _WIN32_CLIENT_RELEASES if is_client else _WIN32_SERVER_RELEASES + release = next((r for v, r in releases if v <= intversion), release) + + return release, version, csd, ptype + + +def _mac_ver_xml(): + fn = '/System/Library/CoreServices/SystemVersion.plist' + if not os.path.exists(fn): + return None + + try: + import plistlib + except ImportError: + return None + + with open(fn, 'rb') as f: + pl = plistlib.load(f) + release = pl['ProductVersion'] + versioninfo = ('', '', '') + machine = os.uname().machine + if machine in ('ppc', 'Power Macintosh'): + # Canonical name + machine = 'PowerPC' + + return release, versioninfo, machine + + +def mac_ver(release='', versioninfo=('', '', ''), machine=''): + + """ Get macOS version information and return it as tuple (release, + versioninfo, machine) with versioninfo being a tuple (version, + dev_stage, non_release_version). + + Entries which cannot be determined are set to the parameter values + which default to ''. All tuple entries are strings. + """ + + # First try reading the information from an XML file which should + # always be present + info = _mac_ver_xml() + if info is not None: + return info + + # If that also doesn't work return the default values + return release, versioninfo, machine + + +# A namedtuple for iOS version information. +IOSVersionInfo = collections.namedtuple( + "IOSVersionInfo", + ["system", "release", "model", "is_simulator"] +) + + +def ios_ver(system="", release="", model="", is_simulator=False): + """Get iOS version information, and return it as a namedtuple: + (system, release, model, is_simulator). + + If values can't be determined, they are set to values provided as + parameters. + """ + if sys.platform == "ios": + import _ios_support + result = _ios_support.get_platform_ios() + if result is not None: + return IOSVersionInfo(*result) + + return IOSVersionInfo(system, release, model, is_simulator) + + +def _java_getprop(name, default): + """This private helper is deprecated in 3.13 and will be removed in 3.15""" + from java.lang import System + try: + value = System.getProperty(name) + if value is None: + return default + return value + except AttributeError: + return default + +def java_ver(release='', vendor='', vminfo=('', '', ''), osinfo=('', '', '')): + + """ Version interface for Jython. + + Returns a tuple (release, vendor, vminfo, osinfo) with vminfo being + a tuple (vm_name, vm_release, vm_vendor) and osinfo being a + tuple (os_name, os_version, os_arch). + + Values which cannot be determined are set to the defaults + given as parameters (which all default to ''). + + """ + import warnings + warnings._deprecated('java_ver', remove=(3, 15)) + # Import the needed APIs + try: + import java.lang # noqa: F401 + except ImportError: + return release, vendor, vminfo, osinfo + + vendor = _java_getprop('java.vendor', vendor) + release = _java_getprop('java.version', release) + vm_name, vm_release, vm_vendor = vminfo + vm_name = _java_getprop('java.vm.name', vm_name) + vm_vendor = _java_getprop('java.vm.vendor', vm_vendor) + vm_release = _java_getprop('java.vm.version', vm_release) + vminfo = vm_name, vm_release, vm_vendor + os_name, os_version, os_arch = osinfo + os_arch = _java_getprop('java.os.arch', os_arch) + os_name = _java_getprop('java.os.name', os_name) + os_version = _java_getprop('java.os.version', os_version) + osinfo = os_name, os_version, os_arch + + return release, vendor, vminfo, osinfo + + +AndroidVer = collections.namedtuple( + "AndroidVer", "release api_level manufacturer model device is_emulator") + +def android_ver(release="", api_level=0, manufacturer="", model="", device="", + is_emulator=False): + if sys.platform == "android": + try: + from ctypes import CDLL, c_char_p, create_string_buffer + except ImportError: + pass + else: + # An NDK developer confirmed that this is an officially-supported + # API (https://stackoverflow.com/a/28416743). Use `getattr` to avoid + # private name mangling. + system_property_get = getattr(CDLL("libc.so"), "__system_property_get") + system_property_get.argtypes = (c_char_p, c_char_p) + + def getprop(name, default): + # https://android.googlesource.com/platform/bionic/+/refs/tags/android-5.0.0_r1/libc/include/sys/system_properties.h#39 + PROP_VALUE_MAX = 92 + buffer = create_string_buffer(PROP_VALUE_MAX) + length = system_property_get(name.encode("UTF-8"), buffer) + if length == 0: + # This API doesn’t distinguish between an empty property and + # a missing one. + return default + else: + return buffer.value.decode("UTF-8", "backslashreplace") + + release = getprop("ro.build.version.release", release) + api_level = int(getprop("ro.build.version.sdk", api_level)) + manufacturer = getprop("ro.product.manufacturer", manufacturer) + model = getprop("ro.product.model", model) + device = getprop("ro.product.device", device) + is_emulator = getprop("ro.kernel.qemu", "0") == "1" + + return AndroidVer( + release, api_level, manufacturer, model, device, is_emulator) + + +### System name aliasing + +def system_alias(system, release, version): + + """ Returns (system, release, version) aliased to common + marketing names used for some systems. + + It also does some reordering of the information in some cases + where it would otherwise cause confusion. + + """ + if system == 'SunOS': + # Sun's OS + if release < '5': + # These releases use the old name SunOS + return system, release, version + # Modify release (marketing release = SunOS release - 3) + l = release.split('.') + if l: + try: + major = int(l[0]) + except ValueError: + pass + else: + major = major - 3 + l[0] = str(major) + release = '.'.join(l) + if release < '6': + system = 'Solaris' + else: + # XXX Whatever the new SunOS marketing name is... + system = 'Solaris' + + elif system in ('win32', 'win16'): + # In case one of the other tricks + system = 'Windows' + + # bpo-35516: Don't replace Darwin with macOS since input release and + # version arguments can be different than the currently running version. + + return system, release, version + +### Various internal helpers + +def _platform(*args): + + """ Helper to format the platform string in a filename + compatible format e.g. "system-version-machine". + """ + # Format the platform string + platform = '-'.join(x.strip() for x in filter(len, args)) + + # Cleanup some possible filename obstacles... + platform = platform.replace(' ', '_') + platform = platform.replace('/', '-') + platform = platform.replace('\\', '-') + platform = platform.replace(':', '-') + platform = platform.replace(';', '-') + platform = platform.replace('"', '-') + platform = platform.replace('(', '-') + platform = platform.replace(')', '-') + + # No need to report 'unknown' information... + platform = platform.replace('unknown', '') + + # Fold '--'s and remove trailing '-' + while True: + cleaned = platform.replace('--', '-') + if cleaned == platform: + break + platform = cleaned + while platform and platform[-1] == '-': + platform = platform[:-1] + + return platform + +def _node(default=''): + + """ Helper to determine the node name of this machine. + """ + try: + import socket + except ImportError: + # No sockets... + return default + try: + return socket.gethostname() + except OSError: + # Still not working... + return default + +def _follow_symlinks(filepath): + + """ In case filepath is a symlink, follow it until a + real file is reached. + """ + filepath = os.path.abspath(filepath) + while os.path.islink(filepath): + filepath = os.path.normpath( + os.path.join(os.path.dirname(filepath), os.readlink(filepath))) + return filepath + + +def _syscmd_file(target, default=''): + + """ Interface to the system's file command. + + The function uses the -b option of the file command to have it + omit the filename in its output. Follow the symlinks. It returns + default in case the command should fail. + + """ + if sys.platform in {'dos', 'win32', 'win16', 'ios', 'tvos', 'watchos'}: + # XXX Others too ? + return default + + try: + import subprocess + except ImportError: + return default + target = _follow_symlinks(target) + # "file" output is locale dependent: force the usage of the C locale + # to get deterministic behavior. + env = dict(os.environ, LC_ALL='C') + try: + # -b: do not prepend filenames to output lines (brief mode) + output = subprocess.check_output(['file', '-b', target], + stderr=subprocess.DEVNULL, + env=env) + except (OSError, subprocess.CalledProcessError): + return default + if not output: + return default + # With the C locale, the output should be mostly ASCII-compatible. + # Decode from Latin-1 to prevent Unicode decode error. + return output.decode('latin-1') + +### Information about the used architecture + +# Default values for architecture; non-empty strings override the +# defaults given as parameters +_default_architecture = { + 'win32': ('', 'WindowsPE'), + 'win16': ('', 'Windows'), + 'dos': ('', 'MSDOS'), +} + +def architecture(executable=sys.executable, bits='', linkage=''): + + """ Queries the given executable (defaults to the Python interpreter + binary) for various architecture information. + + Returns a tuple (bits, linkage) which contains information about + the bit architecture and the linkage format used for the + executable. Both values are returned as strings. + + Values that cannot be determined are returned as given by the + parameter presets. If bits is given as '', the sizeof(pointer) + (or sizeof(long) on Python version < 1.5.2) is used as + indicator for the supported pointer size. + + The function relies on the system's "file" command to do the + actual work. This is available on most if not all Unix + platforms. On some non-Unix platforms where the "file" command + does not exist and the executable is set to the Python interpreter + binary defaults from _default_architecture are used. + + """ + # Use the sizeof(pointer) as default number of bits if nothing + # else is given as default. + if not bits: + import struct + size = struct.calcsize('P') + bits = str(size * 8) + 'bit' + + # Get data from the 'file' system command + if executable: + fileout = _syscmd_file(executable, '') + else: + fileout = '' + + if not fileout and \ + executable == sys.executable: + # "file" command did not return anything; we'll try to provide + # some sensible defaults then... + if sys.platform in _default_architecture: + b, l = _default_architecture[sys.platform] + if b: + bits = b + if l: + linkage = l + return bits, linkage + + if 'executable' not in fileout and 'shared object' not in fileout: + # Format not supported + return bits, linkage + + # Bits + if '32-bit' in fileout: + bits = '32bit' + elif '64-bit' in fileout: + bits = '64bit' + + # Linkage + if 'ELF' in fileout: + linkage = 'ELF' + elif 'Mach-O' in fileout: + linkage = "Mach-O" + elif 'PE' in fileout: + # E.g. Windows uses this format + if 'Windows' in fileout: + linkage = 'WindowsPE' + else: + linkage = 'PE' + elif 'COFF' in fileout: + linkage = 'COFF' + elif 'MS-DOS' in fileout: + linkage = 'MSDOS' + else: + # XXX the A.OUT format also falls under this class... + pass + + return bits, linkage + + +def _get_machine_win32(): + # Try to use the PROCESSOR_* environment variables + # available on Win XP and later; see + # http://support.microsoft.com/kb/888731 and + # http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM + + # WOW64 processes mask the native architecture + try: + [arch, *_] = _wmi_query('CPU', 'Architecture') + except OSError: + pass + else: + try: + arch = ['x86', 'MIPS', 'Alpha', 'PowerPC', None, + 'ARM', 'ia64', None, None, + 'AMD64', None, None, 'ARM64', + ][int(arch)] + except (ValueError, IndexError): + pass + else: + if arch: + return arch + return ( + os.environ.get('PROCESSOR_ARCHITEW6432', '') or + os.environ.get('PROCESSOR_ARCHITECTURE', '') + ) + + +class _Processor: + @classmethod + def get(cls): + func = getattr(cls, f'get_{sys.platform}', cls.from_subprocess) + return func() or '' + + def get_win32(): + try: + manufacturer, caption = _wmi_query('CPU', 'Manufacturer', 'Caption') + except OSError: + return os.environ.get('PROCESSOR_IDENTIFIER', _get_machine_win32()) + else: + return f'{caption}, {manufacturer}' + + def get_OpenVMS(): + try: + import vms_lib + except ImportError: + pass + else: + csid, cpu_number = vms_lib.getsyi('SYI$_CPU', 0) + return 'Alpha' if cpu_number >= 128 else 'VAX' + + # On the iOS simulator, os.uname returns the architecture as uname.machine. + # On device it returns the model name for some reason; but there's only one + # CPU architecture for iOS devices, so we know the right answer. + def get_ios(): + if sys.implementation._multiarch.endswith("simulator"): + return os.uname().machine + return 'arm64' + + def from_subprocess(): + """ + Fall back to `uname -p` + """ + try: + import subprocess + except ImportError: + return None + try: + return subprocess.check_output( + ['uname', '-p'], + stderr=subprocess.DEVNULL, + text=True, + encoding="utf8", + ).strip() + except (OSError, subprocess.CalledProcessError): + pass + + +def _unknown_as_blank(val): + return '' if val == 'unknown' else val + + +### Portable uname() interface + +class uname_result( + collections.namedtuple( + "uname_result_base", + "system node release version machine") + ): + """ + A uname_result that's largely compatible with a + simple namedtuple except that 'processor' is + resolved late and cached to avoid calling "uname" + except when needed. + """ + + _fields = ('system', 'node', 'release', 'version', 'machine', 'processor') + + @functools.cached_property + def processor(self): + return _unknown_as_blank(_Processor.get()) + + def __iter__(self): + return itertools.chain( + super().__iter__(), + (self.processor,) + ) + + @classmethod + def _make(cls, iterable): + # override factory to affect length check + num_fields = len(cls._fields) - 1 + result = cls.__new__(cls, *iterable) + if len(result) != num_fields + 1: + msg = f'Expected {num_fields} arguments, got {len(result)}' + raise TypeError(msg) + return result + + def __getitem__(self, key): + return tuple(self)[key] + + def __len__(self): + return len(tuple(iter(self))) + + def __reduce__(self): + return uname_result, tuple(self)[:len(self._fields) - 1] + + +_uname_cache = None + + +def uname(): + + """ Fairly portable uname interface. Returns a tuple + of strings (system, node, release, version, machine, processor) + identifying the underlying platform. + + Note that unlike the os.uname function this also returns + possible processor information as an additional tuple entry. + + Entries which cannot be determined are set to ''. + + """ + global _uname_cache + + if _uname_cache is not None: + return _uname_cache + + # Get some infos from the builtin os.uname API... + try: + system, node, release, version, machine = infos = os.uname() + except AttributeError: + system = sys.platform + node = _node() + release = version = machine = '' + infos = () + + if not any(infos): + # uname is not available + + # Try win32_ver() on win32 platforms + if system == 'win32': + release, version, csd, ptype = win32_ver() + machine = machine or _get_machine_win32() + + # Try the 'ver' system command available on some + # platforms + if not (release and version): + system, release, version = _syscmd_ver(system) + # Normalize system to what win32_ver() normally returns + # (_syscmd_ver() tends to return the vendor name as well) + if system == 'Microsoft Windows': + system = 'Windows' + elif system == 'Microsoft' and release == 'Windows': + # Under Windows Vista and Windows Server 2008, + # Microsoft changed the output of the ver command. The + # release is no longer printed. This causes the + # system and release to be misidentified. + system = 'Windows' + if '6.0' == version[:3]: + release = 'Vista' + else: + release = '' + + # In case we still don't know anything useful, we'll try to + # help ourselves + if system in ('win32', 'win16'): + if not version: + if system == 'win32': + version = '32bit' + else: + version = '16bit' + system = 'Windows' + + elif system[:4] == 'java': + release, vendor, vminfo, osinfo = java_ver() + system = 'Java' + version = ', '.join(vminfo) + if not version: + version = vendor + + # System specific extensions + if system == 'OpenVMS': + # OpenVMS seems to have release and version mixed up + if not release or release == '0': + release = version + version = '' + + # normalize name + if system == 'Microsoft' and release == 'Windows': + system = 'Windows' + release = 'Vista' + + # On Android, return the name and version of the OS rather than the kernel. + if sys.platform == 'android': + system = 'Android' + release = android_ver().release + + # Normalize responses on iOS + if sys.platform == 'ios': + system, release, _, _ = ios_ver() + + vals = system, node, release, version, machine + # Replace 'unknown' values with the more portable '' + _uname_cache = uname_result(*map(_unknown_as_blank, vals)) + return _uname_cache + +### Direct interfaces to some of the uname() return values + +def system(): + + """ Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'. + + An empty string is returned if the value cannot be determined. + + """ + return uname().system + +def node(): + + """ Returns the computer's network name (which may not be fully + qualified) + + An empty string is returned if the value cannot be determined. + + """ + return uname().node + +def release(): + + """ Returns the system's release, e.g. '2.2.0' or 'NT' + + An empty string is returned if the value cannot be determined. + + """ + return uname().release + +def version(): + + """ Returns the system's release version, e.g. '#3 on degas' + + An empty string is returned if the value cannot be determined. + + """ + return uname().version + +def machine(): + + """ Returns the machine type, e.g. 'i386' + + An empty string is returned if the value cannot be determined. + + """ + return uname().machine + +def processor(): + + """ Returns the (true) processor name, e.g. 'amdk6' + + An empty string is returned if the value cannot be + determined. Note that many platforms do not provide this + information or simply return the same value as for machine(), + e.g. NetBSD does this. + + """ + return uname().processor + +### Various APIs for extracting information from sys.version + +_sys_version_cache = {} + +def _sys_version(sys_version=None): + + """ Returns a parsed version of Python's sys.version as tuple + (name, version, branch, revision, buildno, builddate, compiler) + referring to the Python implementation name, version, branch, + revision, build number, build date/time as string and the compiler + identification string. + + Note that unlike the Python sys.version, the returned value + for the Python version will always include the patchlevel (it + defaults to '.0'). + + The function returns empty strings for tuple entries that + cannot be determined. + + sys_version may be given to parse an alternative version + string, e.g. if the version was read from a different Python + interpreter. + + """ + # Get the Python version + if sys_version is None: + sys_version = sys.version + + # Try the cache first + result = _sys_version_cache.get(sys_version, None) + if result is not None: + return result + + if sys.platform.startswith('java'): + # Jython + jython_sys_version_parser = re.compile( + r'([\w.+]+)\s*' # "version" + r'\(#?([^,]+)' # "(#buildno" + r'(?:,\s*([\w ]*)' # ", builddate" + r'(?:,\s*([\w :]*))?)?\)\s*' # ", buildtime)" + r'\[([^\]]+)\]?', re.ASCII) # "[compiler]" + name = 'Jython' + match = jython_sys_version_parser.match(sys_version) + if match is None: + raise ValueError( + 'failed to parse Jython sys.version: %s' % + repr(sys_version)) + version, buildno, builddate, buildtime, _ = match.groups() + if builddate is None: + builddate = '' + compiler = sys.platform + + elif "PyPy" in sys_version: + # PyPy + pypy_sys_version_parser = re.compile( + r'([\w.+]+)\s*' + r'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*' + r'\[PyPy [^\]]+\]?') + + name = "PyPy" + match = pypy_sys_version_parser.match(sys_version) + if match is None: + raise ValueError("failed to parse PyPy sys.version: %s" % + repr(sys_version)) + version, buildno, builddate, buildtime = match.groups() + compiler = "" + + else: + # CPython + cpython_sys_version_parser = re.compile( + r'([\w.+]+)\s*' # "version" + r'(?:free-threading build\s+)?' # "free-threading-build" + r'\(#?([^,]+)' # "(#buildno" + r'(?:,\s*([\w ]*)' # ", builddate" + r'(?:,\s*([\w :]*))?)?\)\s*' # ", buildtime)" + r'\[([^\]]+)\]?', re.ASCII) # "[compiler]" + match = cpython_sys_version_parser.match(sys_version) + if match is None: + raise ValueError( + 'failed to parse CPython sys.version: %s' % + repr(sys_version)) + version, buildno, builddate, buildtime, compiler = \ + match.groups() + name = 'CPython' + if builddate is None: + builddate = '' + elif buildtime: + builddate = builddate + ' ' + buildtime + + if hasattr(sys, '_git'): + _, branch, revision = sys._git + elif hasattr(sys, '_mercurial'): + _, branch, revision = sys._mercurial + else: + branch = '' + revision = '' + + # Add the patchlevel version if missing + l = version.split('.') + if len(l) == 2: + l.append('0') + version = '.'.join(l) + + # Build and cache the result + result = (name, version, branch, revision, buildno, builddate, compiler) + _sys_version_cache[sys_version] = result + return result + +def python_implementation(): + + """ Returns a string identifying the Python implementation. + + Currently, the following implementations are identified: + 'CPython' (C implementation of Python), + 'Jython' (Java implementation of Python), + 'PyPy' (Python implementation of Python). + + """ + return _sys_version()[0] + +def python_version(): + + """ Returns the Python version as string 'major.minor.patchlevel' + + Note that unlike the Python sys.version, the returned value + will always include the patchlevel (it defaults to 0). + + """ + return _sys_version()[1] + +def python_version_tuple(): + + """ Returns the Python version as tuple (major, minor, patchlevel) + of strings. + + Note that unlike the Python sys.version, the returned value + will always include the patchlevel (it defaults to 0). + + """ + return tuple(_sys_version()[1].split('.')) + +def python_branch(): + + """ Returns a string identifying the Python implementation + branch. + + For CPython this is the SCM branch from which the + Python binary was built. + + If not available, an empty string is returned. + + """ + + return _sys_version()[2] + +def python_revision(): + + """ Returns a string identifying the Python implementation + revision. + + For CPython this is the SCM revision from which the + Python binary was built. + + If not available, an empty string is returned. + + """ + return _sys_version()[3] + +def python_build(): + + """ Returns a tuple (buildno, builddate) stating the Python + build number and date as strings. + + """ + return _sys_version()[4:6] + +def python_compiler(): + + """ Returns a string identifying the compiler used for compiling + Python. + + """ + return _sys_version()[6] + +### The Opus Magnum of platform strings :-) + +_platform_cache = {} + +def platform(aliased=False, terse=False): + + """ Returns a single string identifying the underlying platform + with as much useful information as possible (but no more :). + + The output is intended to be human readable rather than + machine parseable. It may look different on different + platforms and this is intended. + + If "aliased" is true, the function will use aliases for + various platforms that report system names which differ from + their common names, e.g. SunOS will be reported as + Solaris. The system_alias() function is used to implement + this. + + Setting terse to true causes the function to return only the + absolute minimum information needed to identify the platform. + + """ + result = _platform_cache.get((aliased, terse), None) + if result is not None: + return result + + # Get uname information and then apply platform specific cosmetics + # to it... + system, node, release, version, machine, processor = uname() + if machine == processor: + processor = '' + if aliased: + system, release, version = system_alias(system, release, version) + + if system == 'Darwin': + # macOS and iOS both report as a "Darwin" kernel + if sys.platform == "ios": + system, release, _, _ = ios_ver() + else: + macos_release = mac_ver()[0] + if macos_release: + system = 'macOS' + release = macos_release + + if system == 'Windows': + # MS platforms + rel, vers, csd, ptype = win32_ver(version) + if terse: + platform = _platform(system, release) + else: + platform = _platform(system, release, version, csd) + + elif system == 'Linux': + # check for libc vs. glibc + libcname, libcversion = libc_ver() + platform = _platform(system, release, machine, processor, + 'with', + libcname+libcversion) + elif system == 'Java': + # Java platforms + r, v, vminfo, (os_name, os_version, os_arch) = java_ver() + if terse or not os_name: + platform = _platform(system, release, version) + else: + platform = _platform(system, release, version, + 'on', + os_name, os_version, os_arch) + + else: + # Generic handler + if terse: + platform = _platform(system, release) + else: + bits, linkage = architecture(sys.executable) + platform = _platform(system, release, machine, + processor, bits, linkage) + + _platform_cache[(aliased, terse)] = platform + return platform + +### freedesktop.org os-release standard +# https://www.freedesktop.org/software/systemd/man/os-release.html + +# /etc takes precedence over /usr/lib +_os_release_candidates = ("/etc/os-release", "/usr/lib/os-release") +_os_release_cache = None + + +def _parse_os_release(lines): + # These fields are mandatory fields with well-known defaults + # in practice all Linux distributions override NAME, ID, and PRETTY_NAME. + info = { + "NAME": "Linux", + "ID": "linux", + "PRETTY_NAME": "Linux", + } + + # NAME=value with optional quotes (' or "). The regular expression is less + # strict than shell lexer, but that's ok. + os_release_line = re.compile( + "^(?P[a-zA-Z0-9_]+)=(?P[\"\']?)(?P.*)(?P=quote)$" + ) + # unescape five special characters mentioned in the standard + os_release_unescape = re.compile(r"\\([\\\$\"\'`])") + + for line in lines: + mo = os_release_line.match(line) + if mo is not None: + info[mo.group('name')] = os_release_unescape.sub( + r"\1", mo.group('value') + ) + + return info + + +def freedesktop_os_release(): + """Return operation system identification from freedesktop.org os-release + """ + global _os_release_cache + + if _os_release_cache is None: + errno = None + for candidate in _os_release_candidates: + try: + with open(candidate, encoding="utf-8") as f: + _os_release_cache = _parse_os_release(f) + break + except OSError as e: + errno = e.errno + else: + raise OSError( + errno, + f"Unable to read files {', '.join(_os_release_candidates)}" + ) + + return _os_release_cache.copy() + + +def invalidate_caches(): + """Invalidate the cached results.""" + global _uname_cache + _uname_cache = None + + global _os_release_cache + _os_release_cache = None + + _sys_version_cache.clear() + _platform_cache.clear() + + +### Command line interface + +def _parse_args(args: list[str] | None): + import argparse + + parser = argparse.ArgumentParser(color=True) + parser.add_argument("args", nargs="*", choices=["nonaliased", "terse"]) + parser.add_argument( + "--terse", + action="store_true", + help=( + "return only the absolute minimum information needed " + "to identify the platform" + ), + ) + parser.add_argument( + "--nonaliased", + dest="aliased", + action="store_false", + help=( + "disable system/OS name aliasing. If aliasing is enabled, " + "some platforms report system names different from " + "their common names, e.g. SunOS is reported as Solaris" + ), + ) + + return parser.parse_args(args) + + +def _main(args: list[str] | None = None): + args = _parse_args(args) + + terse = args.terse or ("terse" in args.args) + aliased = args.aliased and ('nonaliased' not in args.args) + + print(platform(aliased, terse)) + + +if __name__ == "__main__": + _main() diff --git a/Python313_13_x64_Template/Lib/plistlib.py b/Python314_4_x64_Template/Lib/plistlib.py similarity index 100% rename from Python313_13_x64_Template/Lib/plistlib.py rename to Python314_4_x64_Template/Lib/plistlib.py diff --git a/Python313_13_x64_Template/Lib/poplib.py b/Python314_4_x64_Template/Lib/poplib.py similarity index 100% rename from Python313_13_x64_Template/Lib/poplib.py rename to Python314_4_x64_Template/Lib/poplib.py diff --git a/Python314_4_x64_Template/Lib/posixpath.py b/Python314_4_x64_Template/Lib/posixpath.py new file mode 100644 index 00000000..ad86cc06 --- /dev/null +++ b/Python314_4_x64_Template/Lib/posixpath.py @@ -0,0 +1,592 @@ +"""Common operations on Posix pathnames. + +Instead of importing this module directly, import os and refer to +this module as os.path. The "os.path" name is an alias for this +module on Posix systems; on other systems (e.g. Windows), +os.path provides the same operations in a manner specific to that +platform, and is an alias to another module (e.g. ntpath). + +Some of this can actually be useful on non-Posix systems too, e.g. +for manipulation of the pathname component of URLs. +""" + +# Strings representing various path-related bits and pieces. +# These are primarily for export; internally, they are hardcoded. +# Should be set before imports for resolving cyclic dependency. +curdir = '.' +pardir = '..' +extsep = '.' +sep = '/' +pathsep = ':' +defpath = '/bin:/usr/bin' +altsep = None +devnull = '/dev/null' + +import errno +import os +import sys +import stat +import genericpath +from genericpath import * + +__all__ = ["normcase","isabs","join","splitdrive","splitroot","split","splitext", + "basename","dirname","commonprefix","getsize","getmtime", + "getatime","getctime","islink","exists","lexists","isdir","isfile", + "ismount", "expanduser","expandvars","normpath","abspath", + "samefile","sameopenfile","samestat", + "curdir","pardir","sep","pathsep","defpath","altsep","extsep", + "devnull","realpath","supports_unicode_filenames","relpath", + "commonpath", "isjunction","isdevdrive","ALLOW_MISSING"] + + +def _get_sep(path): + if isinstance(path, bytes): + return b'/' + else: + return '/' + +# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac. +# On MS-DOS this may also turn slashes into backslashes; however, other +# normalizations (such as optimizing '../' away) are not allowed +# (another function should be defined to do that). + +def normcase(s): + """Normalize case of pathname. Has no effect under Posix""" + return os.fspath(s) + + +# Return whether a path is absolute. +# Trivial in Posix, harder on the Mac or MS-DOS. + +def isabs(s): + """Test whether a path is absolute""" + s = os.fspath(s) + sep = _get_sep(s) + return s.startswith(sep) + + +# Join pathnames. +# Ignore the previous parts if a part is absolute. +# Insert a '/' unless the first part is empty or already ends in '/'. + +def join(a, *p): + """Join two or more pathname components, inserting '/' as needed. + If any component is an absolute path, all previous path components + will be discarded. An empty last part will result in a path that + ends with a separator.""" + a = os.fspath(a) + sep = _get_sep(a) + path = a + try: + for b in p: + b = os.fspath(b) + if b.startswith(sep) or not path: + path = b + elif path.endswith(sep): + path += b + else: + path += sep + b + except (TypeError, AttributeError, BytesWarning): + genericpath._check_arg_types('join', a, *p) + raise + return path + + +# Split a path in head (everything up to the last '/') and tail (the +# rest). If the path ends in '/', tail will be empty. If there is no +# '/' in the path, head will be empty. +# Trailing '/'es are stripped from head unless it is the root. + +def split(p): + """Split a pathname. Returns tuple "(head, tail)" where "tail" is + everything after the final slash. Either part may be empty.""" + p = os.fspath(p) + sep = _get_sep(p) + i = p.rfind(sep) + 1 + head, tail = p[:i], p[i:] + if head and head != sep*len(head): + head = head.rstrip(sep) + return head, tail + + +# Split a path in root and extension. +# The extension is everything starting at the last dot in the last +# pathname component; the root is everything before that. +# It is always true that root + ext == p. + +def splitext(p): + p = os.fspath(p) + if isinstance(p, bytes): + sep = b'/' + extsep = b'.' + else: + sep = '/' + extsep = '.' + return genericpath._splitext(p, sep, None, extsep) +splitext.__doc__ = genericpath._splitext.__doc__ + +# Split a pathname into a drive specification and the rest of the +# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty. + +def splitdrive(p): + """Split a pathname into drive and path. On Posix, drive is always + empty.""" + p = os.fspath(p) + return p[:0], p + + +try: + from posix import _path_splitroot_ex as splitroot +except ImportError: + def splitroot(p): + """Split a pathname into drive, root and tail. + + The tail contains anything after the root.""" + p = os.fspath(p) + if isinstance(p, bytes): + sep = b'/' + empty = b'' + else: + sep = '/' + empty = '' + if p[:1] != sep: + # Relative path, e.g.: 'foo' + return empty, empty, p + elif p[1:2] != sep or p[2:3] == sep: + # Absolute path, e.g.: '/foo', '///foo', '////foo', etc. + return empty, sep, p[1:] + else: + # Precisely two leading slashes, e.g.: '//foo'. Implementation defined per POSIX, see + # https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_13 + return empty, p[:2], p[2:] + + +# Return the tail (basename) part of a path, same as split(path)[1]. + +def basename(p): + """Returns the final component of a pathname""" + p = os.fspath(p) + sep = _get_sep(p) + i = p.rfind(sep) + 1 + return p[i:] + + +# Return the head (dirname) part of a path, same as split(path)[0]. + +def dirname(p): + """Returns the directory component of a pathname""" + p = os.fspath(p) + sep = _get_sep(p) + i = p.rfind(sep) + 1 + head = p[:i] + if head and head != sep*len(head): + head = head.rstrip(sep) + return head + + +# Is a path a mount point? +# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?) + +def ismount(path): + """Test whether a path is a mount point""" + try: + s1 = os.lstat(path) + except (OSError, ValueError): + # It doesn't exist -- so not a mount point. :-) + return False + else: + # A symlink can never be a mount point + if stat.S_ISLNK(s1.st_mode): + return False + + path = os.fspath(path) + if isinstance(path, bytes): + parent = join(path, b'..') + else: + parent = join(path, '..') + try: + s2 = os.lstat(parent) + except OSError: + parent = realpath(parent) + try: + s2 = os.lstat(parent) + except OSError: + return False + + # path/.. on a different device as path or the same i-node as path + return s1.st_dev != s2.st_dev or s1.st_ino == s2.st_ino + + +# Expand paths beginning with '~' or '~user'. +# '~' means $HOME; '~user' means that user's home directory. +# If the path doesn't begin with '~', or if the user or $HOME is unknown, +# the path is returned unchanged (leaving error reporting to whatever +# function is called with the expanded path as argument). +# See also module 'glob' for expansion of *, ? and [...] in pathnames. +# (A function should also be defined to do full *sh-style environment +# variable expansion.) + +def expanduser(path): + """Expand ~ and ~user constructions. If user or $HOME is unknown, + do nothing.""" + path = os.fspath(path) + if isinstance(path, bytes): + tilde = b'~' + else: + tilde = '~' + if not path.startswith(tilde): + return path + sep = _get_sep(path) + i = path.find(sep, 1) + if i < 0: + i = len(path) + if i == 1: + if 'HOME' not in os.environ: + try: + import pwd + except ImportError: + # pwd module unavailable, return path unchanged + return path + try: + userhome = pwd.getpwuid(os.getuid()).pw_dir + except KeyError: + # bpo-10496: if the current user identifier doesn't exist in the + # password database, return the path unchanged + return path + else: + userhome = os.environ['HOME'] + else: + try: + import pwd + except ImportError: + # pwd module unavailable, return path unchanged + return path + name = path[1:i] + if isinstance(name, bytes): + name = os.fsdecode(name) + try: + pwent = pwd.getpwnam(name) + except KeyError: + # bpo-10496: if the user name from the path doesn't exist in the + # password database, return the path unchanged + return path + userhome = pwent.pw_dir + # if no user home, return the path unchanged on VxWorks + if userhome is None and sys.platform == "vxworks": + return path + if isinstance(path, bytes): + userhome = os.fsencode(userhome) + userhome = userhome.rstrip(sep) + return (userhome + path[i:]) or sep + + +# Expand paths containing shell variable substitutions. +# This expands the forms $variable and ${variable} only. +# Non-existent variables are left unchanged. + +_varpattern = r'\$(\w+|\{[^}]*\}?)' +_varsub = None +_varsubb = None + +def expandvars(path): + """Expand shell variables of form $var and ${var}. Unknown variables + are left unchanged.""" + path = os.fspath(path) + global _varsub, _varsubb + if isinstance(path, bytes): + if b'$' not in path: + return path + if not _varsubb: + import re + _varsubb = re.compile(_varpattern.encode(), re.ASCII).sub + sub = _varsubb + start = b'{' + end = b'}' + environ = getattr(os, 'environb', None) + else: + if '$' not in path: + return path + if not _varsub: + import re + _varsub = re.compile(_varpattern, re.ASCII).sub + sub = _varsub + start = '{' + end = '}' + environ = os.environ + + def repl(m): + name = m[1] + if name.startswith(start): + if not name.endswith(end): + return m[0] + name = name[1:-1] + try: + if environ is None: + value = os.fsencode(os.environ[os.fsdecode(name)]) + else: + value = environ[name] + except KeyError: + return m[0] + else: + return value + + return sub(repl, path) + + +# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B. +# It should be understood that this may change the meaning of the path +# if it contains symbolic links! + +try: + from posix import _path_normpath as normpath + +except ImportError: + def normpath(path): + """Normalize path, eliminating double slashes, etc.""" + path = os.fspath(path) + if isinstance(path, bytes): + sep = b'/' + dot = b'.' + dotdot = b'..' + else: + sep = '/' + dot = '.' + dotdot = '..' + if not path: + return dot + _, initial_slashes, path = splitroot(path) + comps = path.split(sep) + new_comps = [] + for comp in comps: + if not comp or comp == dot: + continue + if (comp != dotdot or (not initial_slashes and not new_comps) or + (new_comps and new_comps[-1] == dotdot)): + new_comps.append(comp) + elif new_comps: + new_comps.pop() + comps = new_comps + path = initial_slashes + sep.join(comps) + return path or dot + + +def abspath(path): + """Return an absolute path.""" + path = os.fspath(path) + if isinstance(path, bytes): + if not path.startswith(b'/'): + path = join(os.getcwdb(), path) + else: + if not path.startswith('/'): + path = join(os.getcwd(), path) + return normpath(path) + + +# Return a canonical path (i.e. the absolute location of a file on the +# filesystem). + +def realpath(filename, *, strict=False): + """Return the canonical path of the specified filename, eliminating any +symbolic links encountered in the path.""" + filename = os.fspath(filename) + if isinstance(filename, bytes): + sep = b'/' + curdir = b'.' + pardir = b'..' + getcwd = os.getcwdb + else: + sep = '/' + curdir = '.' + pardir = '..' + getcwd = os.getcwd + if strict is ALLOW_MISSING: + ignored_error = FileNotFoundError + strict = True + elif strict: + ignored_error = () + else: + ignored_error = OSError + + lstat = os.lstat + readlink = os.readlink + maxlinks = None + + # The stack of unresolved path parts. When popped, a special value of None + # indicates that a symlink target has been resolved, and that the original + # symlink path can be retrieved by popping again. The [::-1] slice is a + # very fast way of spelling list(reversed(...)). + rest = filename.split(sep)[::-1] + + # Number of unprocessed parts in 'rest'. This can differ from len(rest) + # later, because 'rest' might contain markers for unresolved symlinks. + part_count = len(rest) + + # The resolved path, which is absolute throughout this function. + # Note: getcwd() returns a normalized and symlink-free path. + path = sep if filename.startswith(sep) else getcwd() + + # Mapping from symlink paths to *fully resolved* symlink targets. If a + # symlink is encountered but not yet resolved, the value is None. This is + # used both to detect symlink loops and to speed up repeated traversals of + # the same links. + seen = {} + + # Number of symlinks traversed. When the number of traversals is limited + # by *maxlinks*, this is used instead of *seen* to detect symlink loops. + link_count = 0 + + while part_count: + name = rest.pop() + if name is None: + # resolved symlink target + seen[rest.pop()] = path + continue + part_count -= 1 + if not name or name == curdir: + # current dir + continue + if name == pardir: + # parent dir + path = path[:path.rindex(sep)] or sep + continue + if path == sep: + newpath = path + name + else: + newpath = path + sep + name + try: + st_mode = lstat(newpath).st_mode + if not stat.S_ISLNK(st_mode): + if strict and part_count and not stat.S_ISDIR(st_mode): + raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), + newpath) + path = newpath + continue + elif maxlinks is not None: + link_count += 1 + if link_count > maxlinks: + if strict: + raise OSError(errno.ELOOP, os.strerror(errno.ELOOP), + newpath) + path = newpath + continue + elif newpath in seen: + # Already seen this path + path = seen[newpath] + if path is not None: + # use cached value + continue + # The symlink is not resolved, so we must have a symlink loop. + if strict: + raise OSError(errno.ELOOP, os.strerror(errno.ELOOP), + newpath) + path = newpath + continue + target = readlink(newpath) + except ignored_error: + pass + else: + # Resolve the symbolic link + if target.startswith(sep): + # Symlink target is absolute; reset resolved path. + path = sep + if maxlinks is None: + # Mark this symlink as seen but not fully resolved. + seen[newpath] = None + # Push the symlink path onto the stack, and signal its specialness + # by also pushing None. When these entries are popped, we'll + # record the fully-resolved symlink target in the 'seen' mapping. + rest.append(newpath) + rest.append(None) + # Push the unresolved symlink target parts onto the stack. + target_parts = target.split(sep)[::-1] + rest.extend(target_parts) + part_count += len(target_parts) + continue + # An error occurred and was ignored. + path = newpath + + return path + + +supports_unicode_filenames = (sys.platform == 'darwin') + +def relpath(path, start=None): + """Return a relative version of a path""" + + path = os.fspath(path) + if not path: + raise ValueError("no path specified") + + if isinstance(path, bytes): + curdir = b'.' + sep = b'/' + pardir = b'..' + else: + curdir = '.' + sep = '/' + pardir = '..' + + if start is None: + start = curdir + else: + start = os.fspath(start) + + try: + start_tail = abspath(start).lstrip(sep) + path_tail = abspath(path).lstrip(sep) + start_list = start_tail.split(sep) if start_tail else [] + path_list = path_tail.split(sep) if path_tail else [] + # Work out how much of the filepath is shared by start and path. + i = len(commonprefix([start_list, path_list])) + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return sep.join(rel_list) + except (TypeError, AttributeError, BytesWarning, DeprecationWarning): + genericpath._check_arg_types('relpath', path, start) + raise + + +# Return the longest common sub-path of the sequence of paths given as input. +# The paths are not normalized before comparing them (this is the +# responsibility of the caller). Any trailing separator is stripped from the +# returned path. + +def commonpath(paths): + """Given a sequence of path names, returns the longest common sub-path.""" + + paths = tuple(map(os.fspath, paths)) + + if not paths: + raise ValueError('commonpath() arg is an empty sequence') + + if isinstance(paths[0], bytes): + sep = b'/' + curdir = b'.' + else: + sep = '/' + curdir = '.' + + try: + split_paths = [path.split(sep) for path in paths] + + try: + isabs, = {p.startswith(sep) for p in paths} + except ValueError: + raise ValueError("Can't mix absolute and relative paths") from None + + split_paths = [[c for c in s if c and c != curdir] for s in split_paths] + s1 = min(split_paths) + s2 = max(split_paths) + common = s1 + for i, c in enumerate(s1): + if c != s2[i]: + common = s1[:i] + break + + prefix = sep if isabs else sep[:0] + return prefix + sep.join(common) + except (TypeError, AttributeError): + genericpath._check_arg_types('commonpath', *paths) + raise diff --git a/Python314_4_x64_Template/Lib/pprint.py b/Python314_4_x64_Template/Lib/pprint.py new file mode 100644 index 00000000..dc0953ce --- /dev/null +++ b/Python314_4_x64_Template/Lib/pprint.py @@ -0,0 +1,675 @@ +# Author: Fred L. Drake, Jr. +# fdrake@acm.org +# +# This is a simple little module I wrote to make life easier. I didn't +# see anything quite like it in the library, though I may have overlooked +# something. I wrote this when I was trying to read some heavily nested +# tuples with fairly non-descriptive content. This is modeled very much +# after Lisp/Scheme - style pretty-printing of lists. If you find it +# useful, thank small children who sleep at night. + +"""Support to pretty-print lists, tuples, & dictionaries recursively. + +Very simple, but useful, especially in debugging data structures. + +Classes +------- + +PrettyPrinter() + Handle pretty-printing operations onto a stream using a configured + set of formatting parameters. + +Functions +--------- + +pformat() + Format a Python object into a pretty-printed representation. + +pprint() + Pretty-print a Python object to a stream [default is sys.stdout]. + +saferepr() + Generate a 'standard' repr()-like value, but protect against recursive + data structures. + +""" + +import collections as _collections +import sys as _sys +import types as _types +from io import StringIO as _StringIO + +__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr", + "PrettyPrinter", "pp"] + + +def pprint(object, stream=None, indent=1, width=80, depth=None, *, + compact=False, sort_dicts=True, underscore_numbers=False): + """Pretty-print a Python object to a stream [default is sys.stdout].""" + printer = PrettyPrinter( + stream=stream, indent=indent, width=width, depth=depth, + compact=compact, sort_dicts=sort_dicts, + underscore_numbers=underscore_numbers) + printer.pprint(object) + + +def pformat(object, indent=1, width=80, depth=None, *, + compact=False, sort_dicts=True, underscore_numbers=False): + """Format a Python object into a pretty-printed representation.""" + return PrettyPrinter(indent=indent, width=width, depth=depth, + compact=compact, sort_dicts=sort_dicts, + underscore_numbers=underscore_numbers).pformat(object) + + +def pp(object, *args, sort_dicts=False, **kwargs): + """Pretty-print a Python object""" + pprint(object, *args, sort_dicts=sort_dicts, **kwargs) + + +def saferepr(object): + """Version of repr() which can handle recursive data structures.""" + return PrettyPrinter()._safe_repr(object, {}, None, 0)[0] + + +def isreadable(object): + """Determine if saferepr(object) is readable by eval().""" + return PrettyPrinter()._safe_repr(object, {}, None, 0)[1] + + +def isrecursive(object): + """Determine if object requires a recursive representation.""" + return PrettyPrinter()._safe_repr(object, {}, None, 0)[2] + + +class _safe_key: + """Helper function for key functions when sorting unorderable objects. + + The wrapped-object will fallback to a Py2.x style comparison for + unorderable types (sorting first comparing the type name and then by + the obj ids). Does not work recursively, so dict.items() must have + _safe_key applied to both the key and the value. + + """ + + __slots__ = ['obj'] + + def __init__(self, obj): + self.obj = obj + + def __lt__(self, other): + try: + return self.obj < other.obj + except TypeError: + return ((str(type(self.obj)), id(self.obj)) < \ + (str(type(other.obj)), id(other.obj))) + + +def _safe_tuple(t): + "Helper function for comparing 2-tuples" + return _safe_key(t[0]), _safe_key(t[1]) + + +class PrettyPrinter: + def __init__(self, indent=1, width=80, depth=None, stream=None, *, + compact=False, sort_dicts=True, underscore_numbers=False): + """Handle pretty printing operations onto a stream using a set of + configured parameters. + + indent + Number of spaces to indent for each level of nesting. + + width + Attempted maximum number of columns in the output. + + depth + The maximum depth to print out nested structures. + + stream + The desired output stream. If omitted (or false), the standard + output stream available at construction will be used. + + compact + If true, several items will be combined in one line. + + sort_dicts + If true, dict keys are sorted. + + underscore_numbers + If true, digit groups are separated with underscores. + + """ + indent = int(indent) + width = int(width) + if indent < 0: + raise ValueError('indent must be >= 0') + if depth is not None and depth <= 0: + raise ValueError('depth must be > 0') + if not width: + raise ValueError('width must be != 0') + self._depth = depth + self._indent_per_level = indent + self._width = width + if stream is not None: + self._stream = stream + else: + self._stream = _sys.stdout + self._compact = bool(compact) + self._sort_dicts = sort_dicts + self._underscore_numbers = underscore_numbers + + def pprint(self, object): + if self._stream is not None: + self._format(object, self._stream, 0, 0, {}, 0) + self._stream.write("\n") + + def pformat(self, object): + sio = _StringIO() + self._format(object, sio, 0, 0, {}, 0) + return sio.getvalue() + + def isrecursive(self, object): + return self.format(object, {}, 0, 0)[2] + + def isreadable(self, object): + s, readable, recursive = self.format(object, {}, 0, 0) + return readable and not recursive + + def _format(self, object, stream, indent, allowance, context, level): + objid = id(object) + if objid in context: + stream.write(_recursion(object)) + self._recursive = True + self._readable = False + return + rep = self._repr(object, context, level) + max_width = self._width - indent - allowance + if len(rep) > max_width: + p = self._dispatch.get(type(object).__repr__, None) + # Lazy import to improve module import time + from dataclasses import is_dataclass + + if p is not None: + context[objid] = 1 + p(self, object, stream, indent, allowance, context, level + 1) + del context[objid] + return + elif (is_dataclass(object) and + not isinstance(object, type) and + object.__dataclass_params__.repr and + # Check dataclass has generated repr method. + hasattr(object.__repr__, "__wrapped__") and + "__create_fn__" in object.__repr__.__wrapped__.__qualname__): + context[objid] = 1 + self._pprint_dataclass(object, stream, indent, allowance, context, level + 1) + del context[objid] + return + stream.write(rep) + + def _pprint_dataclass(self, object, stream, indent, allowance, context, level): + # Lazy import to improve module import time + from dataclasses import fields as dataclass_fields + + cls_name = object.__class__.__name__ + indent += len(cls_name) + 1 + items = [(f.name, getattr(object, f.name)) for f in dataclass_fields(object) if f.repr] + stream.write(cls_name + '(') + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(')') + + _dispatch = {} + + def _pprint_dict(self, object, stream, indent, allowance, context, level): + write = stream.write + write('{') + if self._indent_per_level > 1: + write((self._indent_per_level - 1) * ' ') + length = len(object) + if length: + if self._sort_dicts: + items = sorted(object.items(), key=_safe_tuple) + else: + items = object.items() + self._format_dict_items(items, stream, indent, allowance + 1, + context, level) + write('}') + + _dispatch[dict.__repr__] = _pprint_dict + + def _pprint_ordered_dict(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + '(') + self._format(list(object.items()), stream, + indent + len(cls.__name__) + 1, allowance + 1, + context, level) + stream.write(')') + + _dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict + + def _pprint_list(self, object, stream, indent, allowance, context, level): + stream.write('[') + self._format_items(object, stream, indent, allowance + 1, + context, level) + stream.write(']') + + _dispatch[list.__repr__] = _pprint_list + + def _pprint_tuple(self, object, stream, indent, allowance, context, level): + stream.write('(') + endchar = ',)' if len(object) == 1 else ')' + self._format_items(object, stream, indent, allowance + len(endchar), + context, level) + stream.write(endchar) + + _dispatch[tuple.__repr__] = _pprint_tuple + + def _pprint_set(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + typ = object.__class__ + if typ is set: + stream.write('{') + endchar = '}' + else: + stream.write(typ.__name__ + '({') + endchar = '})' + indent += len(typ.__name__) + 1 + object = sorted(object, key=_safe_key) + self._format_items(object, stream, indent, allowance + len(endchar), + context, level) + stream.write(endchar) + + _dispatch[set.__repr__] = _pprint_set + _dispatch[frozenset.__repr__] = _pprint_set + + def _pprint_str(self, object, stream, indent, allowance, context, level): + write = stream.write + if not len(object): + write(repr(object)) + return + chunks = [] + lines = object.splitlines(True) + if level == 1: + indent += 1 + allowance += 1 + max_width1 = max_width = self._width - indent + for i, line in enumerate(lines): + rep = repr(line) + if i == len(lines) - 1: + max_width1 -= allowance + if len(rep) <= max_width1: + chunks.append(rep) + else: + # Lazy import to improve module import time + import re + + # A list of alternating (non-space, space) strings + parts = re.findall(r'\S*\s*', line) + assert parts + assert not parts[-1] + parts.pop() # drop empty last part + max_width2 = max_width + current = '' + for j, part in enumerate(parts): + candidate = current + part + if j == len(parts) - 1 and i == len(lines) - 1: + max_width2 -= allowance + if len(repr(candidate)) > max_width2: + if current: + chunks.append(repr(current)) + current = part + else: + current = candidate + if current: + chunks.append(repr(current)) + if len(chunks) == 1: + write(rep) + return + if level == 1: + write('(') + for i, rep in enumerate(chunks): + if i > 0: + write('\n' + ' '*indent) + write(rep) + if level == 1: + write(')') + + _dispatch[str.__repr__] = _pprint_str + + def _pprint_bytes(self, object, stream, indent, allowance, context, level): + write = stream.write + if len(object) <= 4: + write(repr(object)) + return + parens = level == 1 + if parens: + indent += 1 + allowance += 1 + write('(') + delim = '' + for rep in _wrap_bytes_repr(object, self._width - indent, allowance): + write(delim) + write(rep) + if not delim: + delim = '\n' + ' '*indent + if parens: + write(')') + + _dispatch[bytes.__repr__] = _pprint_bytes + + def _pprint_bytearray(self, object, stream, indent, allowance, context, level): + write = stream.write + write('bytearray(') + self._pprint_bytes(bytes(object), stream, indent + 10, + allowance + 1, context, level + 1) + write(')') + + _dispatch[bytearray.__repr__] = _pprint_bytearray + + def _pprint_mappingproxy(self, object, stream, indent, allowance, context, level): + stream.write('mappingproxy(') + self._format(object.copy(), stream, indent + 13, allowance + 1, + context, level) + stream.write(')') + + _dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy + + def _pprint_simplenamespace(self, object, stream, indent, allowance, context, level): + if type(object) is _types.SimpleNamespace: + # The SimpleNamespace repr is "namespace" instead of the class + # name, so we do the same here. For subclasses; use the class name. + cls_name = 'namespace' + else: + cls_name = object.__class__.__name__ + indent += len(cls_name) + 1 + items = object.__dict__.items() + stream.write(cls_name + '(') + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(')') + + _dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace + + def _format_dict_items(self, items, stream, indent, allowance, context, + level): + write = stream.write + indent += self._indent_per_level + delimnl = ',\n' + ' ' * indent + last_index = len(items) - 1 + for i, (key, ent) in enumerate(items): + last = i == last_index + rep = self._repr(key, context, level) + write(rep) + write(': ') + self._format(ent, stream, indent + len(rep) + 2, + allowance if last else 1, + context, level) + if not last: + write(delimnl) + + def _format_namespace_items(self, items, stream, indent, allowance, context, level): + write = stream.write + delimnl = ',\n' + ' ' * indent + last_index = len(items) - 1 + for i, (key, ent) in enumerate(items): + last = i == last_index + write(key) + write('=') + if id(ent) in context: + # Special-case representation of recursion to match standard + # recursive dataclass repr. + write("...") + else: + self._format(ent, stream, indent + len(key) + 1, + allowance if last else 1, + context, level) + if not last: + write(delimnl) + + def _format_items(self, items, stream, indent, allowance, context, level): + write = stream.write + indent += self._indent_per_level + if self._indent_per_level > 1: + write((self._indent_per_level - 1) * ' ') + delimnl = ',\n' + ' ' * indent + delim = '' + width = max_width = self._width - indent + 1 + it = iter(items) + try: + next_ent = next(it) + except StopIteration: + return + last = False + while not last: + ent = next_ent + try: + next_ent = next(it) + except StopIteration: + last = True + max_width -= allowance + width -= allowance + if self._compact: + rep = self._repr(ent, context, level) + w = len(rep) + 2 + if width < w: + width = max_width + if delim: + delim = delimnl + if width >= w: + width -= w + write(delim) + delim = ', ' + write(rep) + continue + write(delim) + delim = delimnl + self._format(ent, stream, indent, + allowance if last else 1, + context, level) + + def _repr(self, object, context, level): + repr, readable, recursive = self.format(object, context.copy(), + self._depth, level) + if not readable: + self._readable = False + if recursive: + self._recursive = True + return repr + + def format(self, object, context, maxlevels, level): + """Format object for a specific context, returning a string + and flags indicating whether the representation is 'readable' + and whether the object represents a recursive construct. + """ + return self._safe_repr(object, context, maxlevels, level) + + def _pprint_default_dict(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + rdf = self._repr(object.default_factory, context, level) + cls = object.__class__ + indent += len(cls.__name__) + 1 + stream.write('%s(%s,\n%s' % (cls.__name__, rdf, ' ' * indent)) + self._pprint_dict(object, stream, indent, allowance + 1, context, level) + stream.write(')') + + _dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict + + def _pprint_counter(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + '({') + if self._indent_per_level > 1: + stream.write((self._indent_per_level - 1) * ' ') + items = object.most_common() + self._format_dict_items(items, stream, + indent + len(cls.__name__) + 1, allowance + 2, + context, level) + stream.write('})') + + _dispatch[_collections.Counter.__repr__] = _pprint_counter + + def _pprint_chain_map(self, object, stream, indent, allowance, context, level): + if not len(object.maps): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + '(') + indent += len(cls.__name__) + 1 + for i, m in enumerate(object.maps): + if i == len(object.maps) - 1: + self._format(m, stream, indent, allowance + 1, context, level) + stream.write(')') + else: + self._format(m, stream, indent, 1, context, level) + stream.write(',\n' + ' ' * indent) + + _dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map + + def _pprint_deque(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + '(') + indent += len(cls.__name__) + 1 + stream.write('[') + if object.maxlen is None: + self._format_items(object, stream, indent, allowance + 2, + context, level) + stream.write('])') + else: + self._format_items(object, stream, indent, 2, + context, level) + rml = self._repr(object.maxlen, context, level) + stream.write('],\n%smaxlen=%s)' % (' ' * indent, rml)) + + _dispatch[_collections.deque.__repr__] = _pprint_deque + + def _pprint_user_dict(self, object, stream, indent, allowance, context, level): + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserDict.__repr__] = _pprint_user_dict + + def _pprint_user_list(self, object, stream, indent, allowance, context, level): + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserList.__repr__] = _pprint_user_list + + def _pprint_user_string(self, object, stream, indent, allowance, context, level): + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserString.__repr__] = _pprint_user_string + + def _safe_repr(self, object, context, maxlevels, level): + # Return triple (repr_string, isreadable, isrecursive). + typ = type(object) + if typ in _builtin_scalars: + return repr(object), True, False + + r = getattr(typ, "__repr__", None) + + if issubclass(typ, int) and r is int.__repr__: + if self._underscore_numbers: + return f"{object:_d}", True, False + else: + return repr(object), True, False + + if issubclass(typ, dict) and r is dict.__repr__: + if not object: + return "{}", True, False + objid = id(object) + if maxlevels and level >= maxlevels: + return "{...}", False, objid in context + if objid in context: + return _recursion(object), False, True + context[objid] = 1 + readable = True + recursive = False + components = [] + append = components.append + level += 1 + if self._sort_dicts: + items = sorted(object.items(), key=_safe_tuple) + else: + items = object.items() + for k, v in items: + krepr, kreadable, krecur = self.format( + k, context, maxlevels, level) + vrepr, vreadable, vrecur = self.format( + v, context, maxlevels, level) + append("%s: %s" % (krepr, vrepr)) + readable = readable and kreadable and vreadable + if krecur or vrecur: + recursive = True + del context[objid] + return "{%s}" % ", ".join(components), readable, recursive + + if (issubclass(typ, list) and r is list.__repr__) or \ + (issubclass(typ, tuple) and r is tuple.__repr__): + if issubclass(typ, list): + if not object: + return "[]", True, False + format = "[%s]" + elif len(object) == 1: + format = "(%s,)" + else: + if not object: + return "()", True, False + format = "(%s)" + objid = id(object) + if maxlevels and level >= maxlevels: + return format % "...", False, objid in context + if objid in context: + return _recursion(object), False, True + context[objid] = 1 + readable = True + recursive = False + components = [] + append = components.append + level += 1 + for o in object: + orepr, oreadable, orecur = self.format( + o, context, maxlevels, level) + append(orepr) + if not oreadable: + readable = False + if orecur: + recursive = True + del context[objid] + return format % ", ".join(components), readable, recursive + + rep = repr(object) + return rep, (rep and not rep.startswith('<')), False + + +_builtin_scalars = frozenset({str, bytes, bytearray, float, complex, + bool, type(None)}) + + +def _recursion(object): + return ("" + % (type(object).__name__, id(object))) + + +def _wrap_bytes_repr(object, width, allowance): + current = b'' + last = len(object) // 4 * 4 + for i in range(0, len(object), 4): + part = object[i: i+4] + candidate = current + part + if i == last: + width -= allowance + if len(repr(candidate)) > width: + if current: + yield repr(current) + current = part + else: + current = candidate + if current: + yield repr(current) diff --git a/Python314_4_x64_Template/Lib/profile.py b/Python314_4_x64_Template/Lib/profile.py new file mode 100644 index 00000000..a5afb12c --- /dev/null +++ b/Python314_4_x64_Template/Lib/profile.py @@ -0,0 +1,615 @@ +# +# Class for profiling python code. rev 1.0 6/2/94 +# +# Written by James Roskind +# Based on prior profile module by Sjoerd Mullender... +# which was hacked somewhat by: Guido van Rossum + +"""Class for profiling Python code.""" + +# Copyright Disney Enterprises, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific language +# governing permissions and limitations under the License. + + +import importlib.machinery +import io +import sys +import time +import marshal + +__all__ = ["run", "runctx", "Profile"] + +# Sample timer for use with +#i_count = 0 +#def integer_timer(): +# global i_count +# i_count = i_count + 1 +# return i_count +#itimes = integer_timer # replace with C coded timer returning integers + +class _Utils: + """Support class for utility functions which are shared by + profile.py and cProfile.py modules. + Not supposed to be used directly. + """ + + def __init__(self, profiler): + self.profiler = profiler + + def run(self, statement, filename, sort): + prof = self.profiler() + try: + prof.run(statement) + except SystemExit: + pass + finally: + self._show(prof, filename, sort) + + def runctx(self, statement, globals, locals, filename, sort): + prof = self.profiler() + try: + prof.runctx(statement, globals, locals) + except SystemExit: + pass + finally: + self._show(prof, filename, sort) + + def _show(self, prof, filename, sort): + if filename is not None: + prof.dump_stats(filename) + else: + prof.print_stats(sort) + + +#************************************************************************** +# The following are the static member functions for the profiler class +# Note that an instance of Profile() is *not* needed to call them. +#************************************************************************** + +def run(statement, filename=None, sort=-1): + """Run statement under profiler optionally saving results in filename + + This function takes a single argument that can be passed to the + "exec" statement, and an optional file name. In all cases this + routine attempts to "exec" its first argument and gather profiling + statistics from the execution. If no file name is present, then this + function automatically prints a simple profiling report, sorted by the + standard name string (file/line/function-name) that is presented in + each line. + """ + return _Utils(Profile).run(statement, filename, sort) + +def runctx(statement, globals, locals, filename=None, sort=-1): + """Run statement under profiler, supplying your own globals and locals, + optionally saving results in filename. + + statement and filename have the same semantics as profile.run + """ + return _Utils(Profile).runctx(statement, globals, locals, filename, sort) + + +class Profile: + """Profiler class. + + self.cur is always a tuple. Each such tuple corresponds to a stack + frame that is currently active (self.cur[-2]). The following are the + definitions of its members. We use this external "parallel stack" to + avoid contaminating the program that we are profiling. (old profiler + used to write into the frames local dictionary!!) Derived classes + can change the definition of some entries, as long as they leave + [-2:] intact (frame and previous tuple). In case an internal error is + detected, the -3 element is used as the function name. + + [ 0] = Time that needs to be charged to the parent frame's function. + It is used so that a function call will not have to access the + timing data for the parent frame. + [ 1] = Total time spent in this frame's function, excluding time in + subfunctions (this latter is tallied in cur[2]). + [ 2] = Total time spent in subfunctions, excluding time executing the + frame's function (this latter is tallied in cur[1]). + [-3] = Name of the function that corresponds to this frame. + [-2] = Actual frame that we correspond to (used to sync exception handling). + [-1] = Our parent 6-tuple (corresponds to frame.f_back). + + Timing data for each function is stored as a 5-tuple in the dictionary + self.timings[]. The index is always the name stored in self.cur[-3]. + The following are the definitions of the members: + + [0] = The number of times this function was called, not counting direct + or indirect recursion, + [1] = Number of times this function appears on the stack, minus one + [2] = Total time spent internal to this function + [3] = Cumulative time that this function was present on the stack. In + non-recursive functions, this is the total execution time from start + to finish of each invocation of a function, including time spent in + all subfunctions. + [4] = A dictionary indicating for each function name, the number of times + it was called by us. + """ + + bias = 0 # calibration constant + + def __init__(self, timer=None, bias=None): + self.timings = {} + self.cur = None + self.cmd = "" + self.c_func_name = "" + + if bias is None: + bias = self.bias + self.bias = bias # Materialize in local dict for lookup speed. + + if not timer: + self.timer = self.get_time = time.process_time + self.dispatcher = self.trace_dispatch_i + else: + self.timer = timer + t = self.timer() # test out timer function + try: + length = len(t) + except TypeError: + self.get_time = timer + self.dispatcher = self.trace_dispatch_i + else: + if length == 2: + self.dispatcher = self.trace_dispatch + else: + self.dispatcher = self.trace_dispatch_l + # This get_time() implementation needs to be defined + # here to capture the passed-in timer in the parameter + # list (for performance). Note that we can't assume + # the timer() result contains two values in all + # cases. + def get_time_timer(timer=timer, sum=sum): + return sum(timer()) + self.get_time = get_time_timer + self.t = self.get_time() + self.simulate_call('profiler') + + # Heavily optimized dispatch routine for time.process_time() timer + + def trace_dispatch(self, frame, event, arg): + timer = self.timer + t = timer() + t = t[0] + t[1] - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame,t): + t = timer() + self.t = t[0] + t[1] + else: + r = timer() + self.t = r[0] + r[1] - t # put back unrecorded delta + + # Dispatch routine for best timer program (return = scalar, fastest if + # an integer but float works too -- and time.process_time() relies on that). + + def trace_dispatch_i(self, frame, event, arg): + timer = self.timer + t = timer() - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame, t): + self.t = timer() + else: + self.t = timer() - t # put back unrecorded delta + + # Dispatch routine for macintosh (timer returns time in ticks of + # 1/60th second) + + def trace_dispatch_mac(self, frame, event, arg): + timer = self.timer + t = timer()/60.0 - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame, t): + self.t = timer()/60.0 + else: + self.t = timer()/60.0 - t # put back unrecorded delta + + # SLOW generic dispatch routine for timer returning lists of numbers + + def trace_dispatch_l(self, frame, event, arg): + get_time = self.get_time + t = get_time() - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame, t): + self.t = get_time() + else: + self.t = get_time() - t # put back unrecorded delta + + # In the event handlers, the first 3 elements of self.cur are unpacked + # into vrbls w/ 3-letter names. The last two characters are meant to be + # mnemonic: + # _pt self.cur[0] "parent time" time to be charged to parent frame + # _it self.cur[1] "internal time" time spent directly in the function + # _et self.cur[2] "external time" time spent in subfunctions + + def trace_dispatch_exception(self, frame, t): + rpt, rit, ret, rfn, rframe, rcur = self.cur + if (rframe is not frame) and rcur: + return self.trace_dispatch_return(rframe, t) + self.cur = rpt, rit+t, ret, rfn, rframe, rcur + return 1 + + + def trace_dispatch_call(self, frame, t): + if self.cur and frame.f_back is not self.cur[-2]: + rpt, rit, ret, rfn, rframe, rcur = self.cur + if not isinstance(rframe, Profile.fake_frame): + assert rframe.f_back is frame.f_back, ("Bad call", rfn, + rframe, rframe.f_back, + frame, frame.f_back) + self.trace_dispatch_return(rframe, 0) + assert (self.cur is None or \ + frame.f_back is self.cur[-2]), ("Bad call", + self.cur[-3]) + fcode = frame.f_code + fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name) + self.cur = (t, 0, 0, fn, frame, self.cur) + timings = self.timings + if fn in timings: + cc, ns, tt, ct, callers = timings[fn] + timings[fn] = cc, ns + 1, tt, ct, callers + else: + timings[fn] = 0, 0, 0, 0, {} + return 1 + + def trace_dispatch_c_call (self, frame, t): + fn = ("", 0, self.c_func_name) + self.cur = (t, 0, 0, fn, frame, self.cur) + timings = self.timings + if fn in timings: + cc, ns, tt, ct, callers = timings[fn] + timings[fn] = cc, ns+1, tt, ct, callers + else: + timings[fn] = 0, 0, 0, 0, {} + return 1 + + def trace_dispatch_return(self, frame, t): + if frame is not self.cur[-2]: + assert frame is self.cur[-2].f_back, ("Bad return", self.cur[-3]) + self.trace_dispatch_return(self.cur[-2], 0) + + # Prefix "r" means part of the Returning or exiting frame. + # Prefix "p" means part of the Previous or Parent or older frame. + + rpt, rit, ret, rfn, frame, rcur = self.cur + rit = rit + t + frame_total = rit + ret + + ppt, pit, pet, pfn, pframe, pcur = rcur + self.cur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur + + timings = self.timings + cc, ns, tt, ct, callers = timings[rfn] + if not ns: + # This is the only occurrence of the function on the stack. + # Else this is a (directly or indirectly) recursive call, and + # its cumulative time will get updated when the topmost call to + # it returns. + ct = ct + frame_total + cc = cc + 1 + + if pfn in callers: + callers[pfn] = callers[pfn] + 1 # hack: gather more + # stats such as the amount of time added to ct courtesy + # of this specific call, and the contribution to cc + # courtesy of this call. + else: + callers[pfn] = 1 + + timings[rfn] = cc, ns - 1, tt + rit, ct, callers + + return 1 + + + dispatch = { + "call": trace_dispatch_call, + "exception": trace_dispatch_exception, + "return": trace_dispatch_return, + "c_call": trace_dispatch_c_call, + "c_exception": trace_dispatch_return, # the C function returned + "c_return": trace_dispatch_return, + } + + + # The next few functions play with self.cmd. By carefully preloading + # our parallel stack, we can force the profiled result to include + # an arbitrary string as the name of the calling function. + # We use self.cmd as that string, and the resulting stats look + # very nice :-). + + def set_cmd(self, cmd): + if self.cur[-1]: return # already set + self.cmd = cmd + self.simulate_call(cmd) + + class fake_code: + def __init__(self, filename, line, name): + self.co_filename = filename + self.co_line = line + self.co_name = name + self.co_firstlineno = 0 + + def __repr__(self): + return repr((self.co_filename, self.co_line, self.co_name)) + + class fake_frame: + def __init__(self, code, prior): + self.f_code = code + self.f_back = prior + + def simulate_call(self, name): + code = self.fake_code('profile', 0, name) + if self.cur: + pframe = self.cur[-2] + else: + pframe = None + frame = self.fake_frame(code, pframe) + self.dispatch['call'](self, frame, 0) + + # collect stats from pending stack, including getting final + # timings for self.cmd frame. + + def simulate_cmd_complete(self): + get_time = self.get_time + t = get_time() - self.t + while self.cur[-1]: + # We *can* cause assertion errors here if + # dispatch_trace_return checks for a frame match! + self.dispatch['return'](self, self.cur[-2], t) + t = 0 + self.t = get_time() - t + + + def print_stats(self, sort=-1): + import pstats + if not isinstance(sort, tuple): + sort = (sort,) + pstats.Stats(self).strip_dirs().sort_stats(*sort).print_stats() + + def dump_stats(self, file): + with open(file, 'wb') as f: + self.create_stats() + marshal.dump(self.stats, f) + + def create_stats(self): + self.simulate_cmd_complete() + self.snapshot_stats() + + def snapshot_stats(self): + self.stats = {} + for func, (cc, ns, tt, ct, callers) in self.timings.items(): + callers = callers.copy() + nc = 0 + for callcnt in callers.values(): + nc += callcnt + self.stats[func] = cc, nc, tt, ct, callers + + + # The following two methods can be called by clients to use + # a profiler to profile a statement, given as a string. + + def run(self, cmd): + import __main__ + dict = __main__.__dict__ + return self.runctx(cmd, dict, dict) + + def runctx(self, cmd, globals, locals): + self.set_cmd(cmd) + sys.setprofile(self.dispatcher) + try: + exec(cmd, globals, locals) + finally: + sys.setprofile(None) + return self + + # This method is more useful to profile a single function call. + def runcall(self, func, /, *args, **kw): + self.set_cmd(repr(func)) + sys.setprofile(self.dispatcher) + try: + return func(*args, **kw) + finally: + sys.setprofile(None) + + + #****************************************************************** + # The following calculates the overhead for using a profiler. The + # problem is that it takes a fair amount of time for the profiler + # to stop the stopwatch (from the time it receives an event). + # Similarly, there is a delay from the time that the profiler + # re-starts the stopwatch before the user's code really gets to + # continue. The following code tries to measure the difference on + # a per-event basis. + # + # Note that this difference is only significant if there are a lot of + # events, and relatively little user code per event. For example, + # code with small functions will typically benefit from having the + # profiler calibrated for the current platform. This *could* be + # done on the fly during init() time, but it is not worth the + # effort. Also note that if too large a value specified, then + # execution time on some functions will actually appear as a + # negative number. It is *normal* for some functions (with very + # low call counts) to have such negative stats, even if the + # calibration figure is "correct." + # + # One alternative to profile-time calibration adjustments (i.e., + # adding in the magic little delta during each event) is to track + # more carefully the number of events (and cumulatively, the number + # of events during sub functions) that are seen. If this were + # done, then the arithmetic could be done after the fact (i.e., at + # display time). Currently, we track only call/return events. + # These values can be deduced by examining the callees and callers + # vectors for each functions. Hence we *can* almost correct the + # internal time figure at print time (note that we currently don't + # track exception event processing counts). Unfortunately, there + # is currently no similar information for cumulative sub-function + # time. It would not be hard to "get all this info" at profiler + # time. Specifically, we would have to extend the tuples to keep + # counts of this in each frame, and then extend the defs of timing + # tuples to include the significant two figures. I'm a bit fearful + # that this additional feature will slow the heavily optimized + # event/time ratio (i.e., the profiler would run slower, fur a very + # low "value added" feature.) + #************************************************************** + + def calibrate(self, m, verbose=0): + if self.__class__ is not Profile: + raise TypeError("Subclasses must override .calibrate().") + + saved_bias = self.bias + self.bias = 0 + try: + return self._calibrate_inner(m, verbose) + finally: + self.bias = saved_bias + + def _calibrate_inner(self, m, verbose): + get_time = self.get_time + + # Set up a test case to be run with and without profiling. Include + # lots of calls, because we're trying to quantify stopwatch overhead. + # Do not raise any exceptions, though, because we want to know + # exactly how many profile events are generated (one call event, + + # one return event, per Python-level call). + + def f1(n): + for i in range(n): + x = 1 + + def f(m, f1=f1): + for i in range(m): + f1(100) + + f(m) # warm up the cache + + # elapsed_noprofile <- time f(m) takes without profiling. + t0 = get_time() + f(m) + t1 = get_time() + elapsed_noprofile = t1 - t0 + if verbose: + print("elapsed time without profiling =", elapsed_noprofile) + + # elapsed_profile <- time f(m) takes with profiling. The difference + # is profiling overhead, only some of which the profiler subtracts + # out on its own. + p = Profile() + t0 = get_time() + p.runctx('f(m)', globals(), locals()) + t1 = get_time() + elapsed_profile = t1 - t0 + if verbose: + print("elapsed time with profiling =", elapsed_profile) + + # reported_time <- "CPU seconds" the profiler charged to f and f1. + total_calls = 0.0 + reported_time = 0.0 + for (filename, line, funcname), (cc, ns, tt, ct, callers) in \ + p.timings.items(): + if funcname in ("f", "f1"): + total_calls += cc + reported_time += tt + + if verbose: + print("'CPU seconds' profiler reported =", reported_time) + print("total # calls =", total_calls) + if total_calls != m + 1: + raise ValueError("internal error: total calls = %d" % total_calls) + + # reported_time - elapsed_noprofile = overhead the profiler wasn't + # able to measure. Divide by twice the number of calls (since there + # are two profiler events per call in this test) to get the hidden + # overhead per event. + mean = (reported_time - elapsed_noprofile) / 2.0 / total_calls + if verbose: + print("mean stopwatch overhead per profile event =", mean) + return mean + +#**************************************************************************** + +def main(): + import os + from optparse import OptionParser + + usage = "profile.py [-o output_file_path] [-s sort] [-m module | scriptfile] [arg] ..." + parser = OptionParser(usage=usage) + parser.allow_interspersed_args = False + parser.add_option('-o', '--outfile', dest="outfile", + help="Save stats to ", default=None) + parser.add_option('-m', dest="module", action="store_true", + help="Profile a library module.", default=False) + parser.add_option('-s', '--sort', dest="sort", + help="Sort order when printing to stdout, based on pstats.Stats class", + default=-1) + + if not sys.argv[1:]: + parser.print_usage() + sys.exit(2) + + (options, args) = parser.parse_args() + sys.argv[:] = args + + # The script that we're profiling may chdir, so capture the absolute path + # to the output file at startup. + if options.outfile is not None: + options.outfile = os.path.abspath(options.outfile) + + if len(args) > 0: + if options.module: + import runpy + code = "run_module(modname, run_name='__main__')" + globs = { + 'run_module': runpy.run_module, + 'modname': args[0] + } + else: + progname = args[0] + sys.path.insert(0, os.path.dirname(progname)) + with io.open_code(progname) as fp: + code = compile(fp.read(), progname, 'exec') + spec = importlib.machinery.ModuleSpec(name='__main__', loader=None, + origin=progname) + globs = { + '__spec__': spec, + '__file__': spec.origin, + '__name__': spec.name, + '__package__': None, + '__cached__': None, + } + try: + runctx(code, globs, None, options.outfile, options.sort) + except BrokenPipeError as exc: + # Prevent "Exception ignored" during interpreter shutdown. + sys.stdout = None + sys.exit(exc.errno) + else: + parser.print_usage() + return parser + +# When invoked as main program, invoke the profiler on a script +if __name__ == '__main__': + main() diff --git a/Python314_4_x64_Template/Lib/pstats.py b/Python314_4_x64_Template/Lib/pstats.py new file mode 100644 index 00000000..becaf355 --- /dev/null +++ b/Python314_4_x64_Template/Lib/pstats.py @@ -0,0 +1,777 @@ +"""Class for printing reports on profiled python code.""" + +# Written by James Roskind +# Based on prior profile module by Sjoerd Mullender... +# which was hacked somewhat by: Guido van Rossum + +# Copyright Disney Enterprises, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific language +# governing permissions and limitations under the License. + + +import sys +import os +import time +import marshal +import re + +from enum import StrEnum, _simple_enum +from functools import cmp_to_key +from dataclasses import dataclass + +__all__ = ["Stats", "SortKey", "FunctionProfile", "StatsProfile"] + +@_simple_enum(StrEnum) +class SortKey: + CALLS = 'calls', 'ncalls' + CUMULATIVE = 'cumulative', 'cumtime' + FILENAME = 'filename', 'module' + LINE = 'line' + NAME = 'name' + NFL = 'nfl' + PCALLS = 'pcalls' + STDNAME = 'stdname' + TIME = 'time', 'tottime' + + def __new__(cls, *values): + value = values[0] + obj = str.__new__(cls, value) + obj._value_ = value + for other_value in values[1:]: + cls._value2member_map_[other_value] = obj + obj._all_values = values + return obj + + +@dataclass(unsafe_hash=True) +class FunctionProfile: + ncalls: str + tottime: float + percall_tottime: float + cumtime: float + percall_cumtime: float + file_name: str + line_number: int + +@dataclass(unsafe_hash=True) +class StatsProfile: + '''Class for keeping track of an item in inventory.''' + total_tt: float + func_profiles: dict[str, FunctionProfile] + +class Stats: + """This class is used for creating reports from data generated by the + Profile class. It is a "friend" of that class, and imports data either + by direct access to members of Profile class, or by reading in a dictionary + that was emitted (via marshal) from the Profile class. + + The big change from the previous Profiler (in terms of raw functionality) + is that an "add()" method has been provided to combine Stats from + several distinct profile runs. Both the constructor and the add() + method now take arbitrarily many file names as arguments. + + All the print methods now take an argument that indicates how many lines + to print. If the arg is a floating-point number between 0 and 1.0, then + it is taken as a decimal percentage of the available lines to be printed + (e.g., .1 means print 10% of all available lines). If it is an integer, + it is taken to mean the number of lines of data that you wish to have + printed. + + The sort_stats() method now processes some additional options (i.e., in + addition to the old -1, 0, 1, or 2 that are respectively interpreted as + 'stdname', 'calls', 'time', and 'cumulative'). It takes either an + arbitrary number of quoted strings or SortKey enum to select the sort + order. + + For example sort_stats('time', 'name') or sort_stats(SortKey.TIME, + SortKey.NAME) sorts on the major key of 'internal function time', and on + the minor key of 'the name of the function'. Look at the two tables in + sort_stats() and get_sort_arg_defs(self) for more examples. + + All methods return self, so you can string together commands like: + Stats('foo', 'goo').strip_dirs().sort_stats('calls').\ + print_stats(5).print_callers(5) + """ + + def __init__(self, *args, stream=None): + self.stream = stream or sys.stdout + if not len(args): + arg = None + else: + arg = args[0] + args = args[1:] + self.init(arg) + self.add(*args) + + def init(self, arg): + self.all_callees = None # calc only if needed + self.files = [] + self.fcn_list = None + self.total_tt = 0 + self.total_calls = 0 + self.prim_calls = 0 + self.max_name_len = 0 + self.top_level = set() + self.stats = {} + self.sort_arg_dict = {} + self.load_stats(arg) + try: + self.get_top_level_stats() + except Exception: + print("Invalid timing data %s" % + (self.files[-1] if self.files else ''), file=self.stream) + raise + + def load_stats(self, arg): + if arg is None: + self.stats = {} + return + elif isinstance(arg, str): + with open(arg, 'rb') as f: + self.stats = marshal.load(f) + try: + file_stats = os.stat(arg) + arg = time.ctime(file_stats.st_mtime) + " " + arg + except: # in case this is not unix + pass + self.files = [arg] + elif hasattr(arg, 'create_stats'): + arg.create_stats() + self.stats = arg.stats + arg.stats = {} + if not self.stats: + raise TypeError("Cannot create or construct a %r object from %r" + % (self.__class__, arg)) + return + + def get_top_level_stats(self): + for func, (cc, nc, tt, ct, callers) in self.stats.items(): + self.total_calls += nc + self.prim_calls += cc + self.total_tt += tt + if ("jprofile", 0, "profiler") in callers: + self.top_level.add(func) + if len(func_std_string(func)) > self.max_name_len: + self.max_name_len = len(func_std_string(func)) + + def add(self, *arg_list): + if not arg_list: + return self + for item in reversed(arg_list): + if type(self) != type(item): + item = Stats(item) + self.files += item.files + self.total_calls += item.total_calls + self.prim_calls += item.prim_calls + self.total_tt += item.total_tt + for func in item.top_level: + self.top_level.add(func) + + if self.max_name_len < item.max_name_len: + self.max_name_len = item.max_name_len + + self.fcn_list = None + + for func, stat in item.stats.items(): + if func in self.stats: + old_func_stat = self.stats[func] + else: + old_func_stat = (0, 0, 0, 0, {},) + self.stats[func] = add_func_stats(old_func_stat, stat) + return self + + def dump_stats(self, filename): + """Write the profile data to a file we know how to load back.""" + with open(filename, 'wb') as f: + marshal.dump(self.stats, f) + + # list the tuple indices and directions for sorting, + # along with some printable description + sort_arg_dict_default = { + "calls" : (((1,-1), ), "call count"), + "ncalls" : (((1,-1), ), "call count"), + "cumtime" : (((3,-1), ), "cumulative time"), + "cumulative": (((3,-1), ), "cumulative time"), + "filename" : (((4, 1), ), "file name"), + "line" : (((5, 1), ), "line number"), + "module" : (((4, 1), ), "file name"), + "name" : (((6, 1), ), "function name"), + "nfl" : (((6, 1),(4, 1),(5, 1),), "name/file/line"), + "pcalls" : (((0,-1), ), "primitive call count"), + "stdname" : (((7, 1), ), "standard name"), + "time" : (((2,-1), ), "internal time"), + "tottime" : (((2,-1), ), "internal time"), + } + + def get_sort_arg_defs(self): + """Expand all abbreviations that are unique.""" + if not self.sort_arg_dict: + self.sort_arg_dict = dict = {} + bad_list = {} + for word, tup in self.sort_arg_dict_default.items(): + fragment = word + while fragment: + if fragment in dict: + bad_list[fragment] = 0 + break + dict[fragment] = tup + fragment = fragment[:-1] + for word in bad_list: + del dict[word] + return self.sort_arg_dict + + def sort_stats(self, *field): + if not field: + self.fcn_list = 0 + return self + if len(field) == 1 and isinstance(field[0], int): + # Be compatible with old profiler + field = [ {-1: "stdname", + 0: "calls", + 1: "time", + 2: "cumulative"}[field[0]] ] + elif len(field) >= 2: + for arg in field[1:]: + if type(arg) != type(field[0]): + raise TypeError("Can't have mixed argument type") + + sort_arg_defs = self.get_sort_arg_defs() + + sort_tuple = () + self.sort_type = "" + connector = "" + for word in field: + if isinstance(word, SortKey): + word = word.value + sort_tuple = sort_tuple + sort_arg_defs[word][0] + self.sort_type += connector + sort_arg_defs[word][1] + connector = ", " + + stats_list = [] + for func, (cc, nc, tt, ct, callers) in self.stats.items(): + stats_list.append((cc, nc, tt, ct) + func + + (func_std_string(func), func)) + + stats_list.sort(key=cmp_to_key(TupleComp(sort_tuple).compare)) + + self.fcn_list = fcn_list = [] + for tuple in stats_list: + fcn_list.append(tuple[-1]) + return self + + def reverse_order(self): + if self.fcn_list: + self.fcn_list.reverse() + return self + + def strip_dirs(self): + oldstats = self.stats + self.stats = newstats = {} + max_name_len = 0 + for func, (cc, nc, tt, ct, callers) in oldstats.items(): + newfunc = func_strip_path(func) + if len(func_std_string(newfunc)) > max_name_len: + max_name_len = len(func_std_string(newfunc)) + newcallers = {} + for func2, caller in callers.items(): + newcallers[func_strip_path(func2)] = caller + + if newfunc in newstats: + newstats[newfunc] = add_func_stats( + newstats[newfunc], + (cc, nc, tt, ct, newcallers)) + else: + newstats[newfunc] = (cc, nc, tt, ct, newcallers) + old_top = self.top_level + self.top_level = new_top = set() + for func in old_top: + new_top.add(func_strip_path(func)) + + self.max_name_len = max_name_len + + self.fcn_list = None + self.all_callees = None + return self + + def calc_callees(self): + if self.all_callees: + return + self.all_callees = all_callees = {} + for func, (cc, nc, tt, ct, callers) in self.stats.items(): + if not func in all_callees: + all_callees[func] = {} + for func2, caller in callers.items(): + if not func2 in all_callees: + all_callees[func2] = {} + all_callees[func2][func] = caller + return + + #****************************************************************** + # The following functions support actual printing of reports + #****************************************************************** + + # Optional "amount" is either a line count, or a percentage of lines. + + def eval_print_amount(self, sel, list, msg): + new_list = list + if isinstance(sel, str): + try: + rex = re.compile(sel) + except re.PatternError: + msg += " \n" % sel + return new_list, msg + new_list = [] + for func in list: + if rex.search(func_std_string(func)): + new_list.append(func) + else: + count = len(list) + if isinstance(sel, float) and 0.0 <= sel < 1.0: + count = int(count * sel + .5) + new_list = list[:count] + elif isinstance(sel, int) and 0 <= sel < count: + count = sel + new_list = list[:count] + if len(list) != len(new_list): + msg += " List reduced from %r to %r due to restriction <%r>\n" % ( + len(list), len(new_list), sel) + + return new_list, msg + + def get_stats_profile(self): + """This method returns an instance of StatsProfile, which contains a mapping + of function names to instances of FunctionProfile. Each FunctionProfile + instance holds information related to the function's profile such as how + long the function took to run, how many times it was called, etc... + """ + func_list = self.fcn_list[:] if self.fcn_list else list(self.stats.keys()) + if not func_list: + return StatsProfile(0, {}) + + total_tt = float(f8(self.total_tt)) + func_profiles = {} + stats_profile = StatsProfile(total_tt, func_profiles) + + for func in func_list: + cc, nc, tt, ct, callers = self.stats[func] + file_name, line_number, func_name = func + ncalls = str(nc) if nc == cc else (str(nc) + '/' + str(cc)) + tottime = float(f8(tt)) + percall_tottime = -1 if nc == 0 else float(f8(tt/nc)) + cumtime = float(f8(ct)) + percall_cumtime = -1 if cc == 0 else float(f8(ct/cc)) + func_profile = FunctionProfile( + ncalls, + tottime, # time spent in this function alone + percall_tottime, + cumtime, # time spent in the function plus all functions that this function called, + percall_cumtime, + file_name, + line_number + ) + func_profiles[func_name] = func_profile + + return stats_profile + + def get_print_list(self, sel_list): + width = self.max_name_len + if self.fcn_list: + stat_list = self.fcn_list[:] + msg = " Ordered by: " + self.sort_type + '\n' + else: + stat_list = list(self.stats.keys()) + msg = " Random listing order was used\n" + + for selection in sel_list: + stat_list, msg = self.eval_print_amount(selection, stat_list, msg) + + count = len(stat_list) + + if not stat_list: + return 0, stat_list + print(msg, file=self.stream) + if count < len(self.stats): + width = 0 + for func in stat_list: + if len(func_std_string(func)) > width: + width = len(func_std_string(func)) + return width+2, stat_list + + def print_stats(self, *amount): + for filename in self.files: + print(filename, file=self.stream) + if self.files: + print(file=self.stream) + indent = ' ' * 8 + for func in self.top_level: + print(indent, func_get_function_name(func), file=self.stream) + + print(indent, self.total_calls, "function calls", end=' ', file=self.stream) + if self.total_calls != self.prim_calls: + print("(%d primitive calls)" % self.prim_calls, end=' ', file=self.stream) + print("in %.3f seconds" % self.total_tt, file=self.stream) + print(file=self.stream) + width, list = self.get_print_list(amount) + if list: + self.print_title() + for func in list: + self.print_line(func) + print(file=self.stream) + print(file=self.stream) + return self + + def print_callees(self, *amount): + width, list = self.get_print_list(amount) + if list: + self.calc_callees() + + self.print_call_heading(width, "called...") + for func in list: + if func in self.all_callees: + self.print_call_line(width, func, self.all_callees[func]) + else: + self.print_call_line(width, func, {}) + print(file=self.stream) + print(file=self.stream) + return self + + def print_callers(self, *amount): + width, list = self.get_print_list(amount) + if list: + self.print_call_heading(width, "was called by...") + for func in list: + cc, nc, tt, ct, callers = self.stats[func] + self.print_call_line(width, func, callers, "<-") + print(file=self.stream) + print(file=self.stream) + return self + + def print_call_heading(self, name_size, column_title): + print("Function ".ljust(name_size) + column_title, file=self.stream) + # print sub-header only if we have new-style callers + subheader = False + for cc, nc, tt, ct, callers in self.stats.values(): + if callers: + value = next(iter(callers.values())) + subheader = isinstance(value, tuple) + break + if subheader: + print(" "*name_size + " ncalls tottime cumtime", file=self.stream) + + def print_call_line(self, name_size, source, call_dict, arrow="->"): + print(func_std_string(source).ljust(name_size) + arrow, end=' ', file=self.stream) + if not call_dict: + print(file=self.stream) + return + clist = sorted(call_dict.keys()) + indent = "" + for func in clist: + name = func_std_string(func) + value = call_dict[func] + if isinstance(value, tuple): + nc, cc, tt, ct = value + if nc != cc: + substats = '%d/%d' % (nc, cc) + else: + substats = '%d' % (nc,) + substats = '%s %s %s %s' % (substats.rjust(7+2*len(indent)), + f8(tt), f8(ct), name) + left_width = name_size + 1 + else: + substats = '%s(%r) %s' % (name, value, f8(self.stats[func][3])) + left_width = name_size + 3 + print(indent*left_width + substats, file=self.stream) + indent = " " + + def print_title(self): + print(' ncalls tottime percall cumtime percall', end=' ', file=self.stream) + print('filename:lineno(function)', file=self.stream) + + def print_line(self, func): # hack: should print percentages + cc, nc, tt, ct, callers = self.stats[func] + c = str(nc) + if nc != cc: + c = c + '/' + str(cc) + print(c.rjust(9), end=' ', file=self.stream) + print(f8(tt), end=' ', file=self.stream) + if nc == 0: + print(' '*8, end=' ', file=self.stream) + else: + print(f8(tt/nc), end=' ', file=self.stream) + print(f8(ct), end=' ', file=self.stream) + if cc == 0: + print(' '*8, end=' ', file=self.stream) + else: + print(f8(ct/cc), end=' ', file=self.stream) + print(func_std_string(func), file=self.stream) + +class TupleComp: + """This class provides a generic function for comparing any two tuples. + Each instance records a list of tuple-indices (from most significant + to least significant), and sort direction (ascending or descending) for + each tuple-index. The compare functions can then be used as the function + argument to the system sort() function when a list of tuples need to be + sorted in the instances order.""" + + def __init__(self, comp_select_list): + self.comp_select_list = comp_select_list + + def compare (self, left, right): + for index, direction in self.comp_select_list: + l = left[index] + r = right[index] + if l < r: + return -direction + if l > r: + return direction + return 0 + + +#************************************************************************** +# func_name is a triple (file:string, line:int, name:string) + +def func_strip_path(func_name): + filename, line, name = func_name + return os.path.basename(filename), line, name + +def func_get_function_name(func): + return func[2] + +def func_std_string(func_name): # match what old profile produced + if func_name[:2] == ('~', 0): + # special case for built-in functions + name = func_name[2] + if name.startswith('<') and name.endswith('>'): + return '{%s}' % name[1:-1] + else: + return name + else: + return "%s:%d(%s)" % func_name + +#************************************************************************** +# The following functions combine statistics for pairs functions. +# The bulk of the processing involves correctly handling "call" lists, +# such as callers and callees. +#************************************************************************** + +def add_func_stats(target, source): + """Add together all the stats for two profile entries.""" + cc, nc, tt, ct, callers = source + t_cc, t_nc, t_tt, t_ct, t_callers = target + return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct, + add_callers(t_callers, callers)) + +def add_callers(target, source): + """Combine two caller lists in a single list.""" + new_callers = {} + for func, caller in target.items(): + new_callers[func] = caller + for func, caller in source.items(): + if func in new_callers: + if isinstance(caller, tuple): + # format used by cProfile + new_callers[func] = tuple(i + j for i, j in zip(caller, new_callers[func])) + else: + # format used by profile + new_callers[func] += caller + else: + new_callers[func] = caller + return new_callers + +def count_calls(callers): + """Sum the caller statistics to get total number of calls received.""" + nc = 0 + for calls in callers.values(): + nc += calls + return nc + +#************************************************************************** +# The following functions support printing of reports +#************************************************************************** + +def f8(x): + return "%8.3f" % x + +#************************************************************************** +# Statistics browser added by ESR, April 2001 +#************************************************************************** + +if __name__ == '__main__': + import cmd + try: + import readline # noqa: F401 + except ImportError: + pass + + class ProfileBrowser(cmd.Cmd): + def __init__(self, profile=None): + cmd.Cmd.__init__(self) + self.prompt = "% " + self.stats = None + self.stream = sys.stdout + if profile is not None: + self.do_read(profile) + + def generic(self, fn, line): + args = line.split() + processed = [] + for term in args: + try: + processed.append(int(term)) + continue + except ValueError: + pass + try: + frac = float(term) + if frac > 1 or frac < 0: + print("Fraction argument must be in [0, 1]", file=self.stream) + continue + processed.append(frac) + continue + except ValueError: + pass + processed.append(term) + if self.stats: + getattr(self.stats, fn)(*processed) + else: + print("No statistics object is loaded.", file=self.stream) + return 0 + def generic_help(self): + print("Arguments may be:", file=self.stream) + print("* An integer maximum number of entries to print.", file=self.stream) + print("* A decimal fractional number between 0 and 1, controlling", file=self.stream) + print(" what fraction of selected entries to print.", file=self.stream) + print("* A regular expression; only entries with function names", file=self.stream) + print(" that match it are printed.", file=self.stream) + + def do_add(self, line): + if self.stats: + try: + self.stats.add(line) + except OSError as e: + print("Failed to load statistics for %s: %s" % (line, e), file=self.stream) + else: + print("No statistics object is loaded.", file=self.stream) + return 0 + def help_add(self): + print("Add profile info from given file to current statistics object.", file=self.stream) + + def do_callees(self, line): + return self.generic('print_callees', line) + def help_callees(self): + print("Print callees statistics from the current stat object.", file=self.stream) + self.generic_help() + + def do_callers(self, line): + return self.generic('print_callers', line) + def help_callers(self): + print("Print callers statistics from the current stat object.", file=self.stream) + self.generic_help() + + def do_EOF(self, line): + print("", file=self.stream) + return 1 + def help_EOF(self): + print("Leave the profile browser.", file=self.stream) + + def do_quit(self, line): + return 1 + def help_quit(self): + print("Leave the profile browser.", file=self.stream) + + def do_read(self, line): + if line: + try: + self.stats = Stats(line) + except OSError as err: + print(err.args[1], file=self.stream) + return + except Exception as err: + print(err.__class__.__name__ + ':', err, file=self.stream) + return + self.prompt = line + "% " + elif len(self.prompt) > 2: + line = self.prompt[:-2] + self.do_read(line) + else: + print("No statistics object is current -- cannot reload.", file=self.stream) + return 0 + def help_read(self): + print("Read in profile data from a specified file.", file=self.stream) + print("Without argument, reload the current file.", file=self.stream) + + def do_reverse(self, line): + if self.stats: + self.stats.reverse_order() + else: + print("No statistics object is loaded.", file=self.stream) + return 0 + def help_reverse(self): + print("Reverse the sort order of the profiling report.", file=self.stream) + + def do_sort(self, line): + if not self.stats: + print("No statistics object is loaded.", file=self.stream) + return + abbrevs = self.stats.get_sort_arg_defs() + if line and all((x in abbrevs) for x in line.split()): + self.stats.sort_stats(*line.split()) + else: + print("Valid sort keys (unique prefixes are accepted):", file=self.stream) + for (key, value) in Stats.sort_arg_dict_default.items(): + print("%s -- %s" % (key, value[1]), file=self.stream) + return 0 + def help_sort(self): + print("Sort profile data according to specified keys.", file=self.stream) + print("(Typing `sort' without arguments lists valid keys.)", file=self.stream) + def complete_sort(self, text, *args): + return [a for a in Stats.sort_arg_dict_default if a.startswith(text)] + + def do_stats(self, line): + return self.generic('print_stats', line) + def help_stats(self): + print("Print statistics from the current stat object.", file=self.stream) + self.generic_help() + + def do_strip(self, line): + if self.stats: + self.stats.strip_dirs() + else: + print("No statistics object is loaded.", file=self.stream) + def help_strip(self): + print("Strip leading path information from filenames in the report.", file=self.stream) + + def help_help(self): + print("Show help for a given command.", file=self.stream) + + def postcmd(self, stop, line): + if stop: + return stop + return None + + if len(sys.argv) > 1: + initprofile = sys.argv[1] + else: + initprofile = None + try: + browser = ProfileBrowser(initprofile) + for profile in sys.argv[2:]: + browser.do_add(profile) + print("Welcome to the profile statistics browser.", file=browser.stream) + browser.cmdloop() + print("Goodbye.", file=browser.stream) + except KeyboardInterrupt: + pass + +# That's all, folks. diff --git a/Python314_4_x64_Template/Lib/pty.py b/Python314_4_x64_Template/Lib/pty.py new file mode 100644 index 00000000..4b25ac32 --- /dev/null +++ b/Python314_4_x64_Template/Lib/pty.py @@ -0,0 +1,182 @@ +"""Pseudo terminal utilities.""" + +# Bugs: No signal handling. Doesn't set slave termios and window size. +# Only tested on Linux, FreeBSD, and macOS. +# See: W. Richard Stevens. 1992. Advanced Programming in the +# UNIX Environment. Chapter 19. +# Author: Steen Lumholt -- with additions by Guido. + +from select import select +import os +import sys +import tty + +# names imported directly for test mocking purposes +from os import close, waitpid +from tty import setraw, tcgetattr, tcsetattr + +__all__ = ["openpty", "fork", "spawn"] + +STDIN_FILENO = 0 +STDOUT_FILENO = 1 +STDERR_FILENO = 2 + +CHILD = 0 + +def openpty(): + """openpty() -> (master_fd, slave_fd) + Open a pty master/slave pair, using os.openpty() if possible.""" + + try: + return os.openpty() + except (AttributeError, OSError): + pass + master_fd, slave_name = _open_terminal() + + slave_fd = os.open(slave_name, os.O_RDWR) + try: + from fcntl import ioctl, I_PUSH + except ImportError: + return master_fd, slave_fd + try: + ioctl(slave_fd, I_PUSH, "ptem") + ioctl(slave_fd, I_PUSH, "ldterm") + except OSError: + pass + return master_fd, slave_fd + +def _open_terminal(): + """Open pty master and return (master_fd, tty_name).""" + for x in 'pqrstuvwxyzPQRST': + for y in '0123456789abcdef': + pty_name = '/dev/pty' + x + y + try: + fd = os.open(pty_name, os.O_RDWR) + except OSError: + continue + return (fd, '/dev/tty' + x + y) + raise OSError('out of pty devices') + + +def fork(): + """fork() -> (pid, master_fd) + Fork and make the child a session leader with a controlling terminal.""" + + try: + pid, fd = os.forkpty() + except (AttributeError, OSError): + pass + else: + if pid == CHILD: + try: + os.setsid() + except OSError: + # os.forkpty() already set us session leader + pass + return pid, fd + + master_fd, slave_fd = openpty() + pid = os.fork() + if pid == CHILD: + os.close(master_fd) + os.login_tty(slave_fd) + else: + os.close(slave_fd) + + # Parent and child process. + return pid, master_fd + +def _read(fd): + """Default read function.""" + return os.read(fd, 1024) + +def _copy(master_fd, master_read=_read, stdin_read=_read): + """Parent copy loop. + Copies + pty master -> standard output (master_read) + standard input -> pty master (stdin_read)""" + if os.get_blocking(master_fd): + # If we write more than tty/ndisc is willing to buffer, we may block + # indefinitely. So we set master_fd to non-blocking temporarily during + # the copy operation. + os.set_blocking(master_fd, False) + try: + _copy(master_fd, master_read=master_read, stdin_read=stdin_read) + finally: + # restore blocking mode for backwards compatibility + os.set_blocking(master_fd, True) + return + high_waterlevel = 4096 + stdin_avail = master_fd != STDIN_FILENO + stdout_avail = master_fd != STDOUT_FILENO + i_buf = b'' + o_buf = b'' + while 1: + rfds = [] + wfds = [] + if stdin_avail and len(i_buf) < high_waterlevel: + rfds.append(STDIN_FILENO) + if stdout_avail and len(o_buf) < high_waterlevel: + rfds.append(master_fd) + if stdout_avail and len(o_buf) > 0: + wfds.append(STDOUT_FILENO) + if len(i_buf) > 0: + wfds.append(master_fd) + + rfds, wfds, _xfds = select(rfds, wfds, []) + + if STDOUT_FILENO in wfds: + try: + n = os.write(STDOUT_FILENO, o_buf) + o_buf = o_buf[n:] + except OSError: + stdout_avail = False + + if master_fd in rfds: + # Some OSes signal EOF by returning an empty byte string, + # some throw OSErrors. + try: + data = master_read(master_fd) + except OSError: + data = b"" + if not data: # Reached EOF. + return # Assume the child process has exited and is + # unreachable, so we clean up. + o_buf += data + + if master_fd in wfds: + n = os.write(master_fd, i_buf) + i_buf = i_buf[n:] + + if stdin_avail and STDIN_FILENO in rfds: + data = stdin_read(STDIN_FILENO) + if not data: + stdin_avail = False + else: + i_buf += data + +def spawn(argv, master_read=_read, stdin_read=_read): + """Create a spawned process.""" + if isinstance(argv, str): + argv = (argv,) + sys.audit('pty.spawn', argv) + + pid, master_fd = fork() + if pid == CHILD: + os.execlp(argv[0], *argv) + + try: + mode = tcgetattr(STDIN_FILENO) + setraw(STDIN_FILENO) + restore = True + except tty.error: # This is the same as termios.error + restore = False + + try: + _copy(master_fd, master_read, stdin_read) + finally: + if restore: + tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode) + + close(master_fd) + return waitpid(pid, 0)[1] diff --git a/Python314_4_x64_Template/Lib/py_compile.py b/Python314_4_x64_Template/Lib/py_compile.py new file mode 100644 index 00000000..43d8ec90 --- /dev/null +++ b/Python314_4_x64_Template/Lib/py_compile.py @@ -0,0 +1,212 @@ +"""Routine to "compile" a .py file to a .pyc file. + +This module has intimate knowledge of the format of .pyc files. +""" + +import enum +import importlib._bootstrap_external +import importlib.machinery +import importlib.util +import os +import os.path +import sys +import traceback + +__all__ = ["compile", "main", "PyCompileError", "PycInvalidationMode"] + + +class PyCompileError(Exception): + """Exception raised when an error occurs while attempting to + compile the file. + + To raise this exception, use + + raise PyCompileError(exc_type,exc_value,file[,msg]) + + where + + exc_type: exception type to be used in error message + type name can be accesses as class variable + 'exc_type_name' + + exc_value: exception value to be used in error message + can be accesses as class variable 'exc_value' + + file: name of file being compiled to be used in error message + can be accesses as class variable 'file' + + msg: string message to be written as error message + If no value is given, a default exception message will be + given, consistent with 'standard' py_compile output. + message (or default) can be accesses as class variable + 'msg' + + """ + + def __init__(self, exc_type, exc_value, file, msg=''): + exc_type_name = exc_type.__name__ + if exc_type is SyntaxError: + tbtext = ''.join(traceback.format_exception_only( + exc_type, exc_value)) + errmsg = tbtext.replace('File ""', 'File "%s"' % file) + else: + errmsg = "Sorry: %s: %s" % (exc_type_name,exc_value) + + Exception.__init__(self,msg or errmsg,exc_type_name,exc_value,file) + + self.exc_type_name = exc_type_name + self.exc_value = exc_value + self.file = file + self.msg = msg or errmsg + + def __str__(self): + return self.msg + + +class PycInvalidationMode(enum.Enum): + TIMESTAMP = 1 + CHECKED_HASH = 2 + UNCHECKED_HASH = 3 + + +def _get_default_invalidation_mode(): + if os.environ.get('SOURCE_DATE_EPOCH'): + return PycInvalidationMode.CHECKED_HASH + else: + return PycInvalidationMode.TIMESTAMP + + +def compile(file, cfile=None, dfile=None, doraise=False, optimize=-1, + invalidation_mode=None, quiet=0): + """Byte-compile one Python source file to Python bytecode. + + :param file: The source file name. + :param cfile: The target byte compiled file name. When not given, this + defaults to the PEP 3147/PEP 488 location. + :param dfile: Purported file name, i.e. the file name that shows up in + error messages. Defaults to the source file name. + :param doraise: Flag indicating whether or not an exception should be + raised when a compile error is found. If an exception occurs and this + flag is set to False, a string indicating the nature of the exception + will be printed, and the function will return to the caller. If an + exception occurs and this flag is set to True, a PyCompileError + exception will be raised. + :param optimize: The optimization level for the compiler. Valid values + are -1, 0, 1 and 2. A value of -1 means to use the optimization + level of the current interpreter, as given by -O command line options. + :param invalidation_mode: + :param quiet: Return full output with False or 0, errors only with 1, + and no output with 2. + + :return: Path to the resulting byte compiled file. + + Note that it isn't necessary to byte-compile Python modules for + execution efficiency -- Python itself byte-compiles a module when + it is loaded, and if it can, writes out the bytecode to the + corresponding .pyc file. + + However, if a Python installation is shared between users, it is a + good idea to byte-compile all modules upon installation, since + other users may not be able to write in the source directories, + and thus they won't be able to write the .pyc file, and then + they would be byte-compiling every module each time it is loaded. + This can slow down program start-up considerably. + + See compileall.py for a script/module that uses this module to + byte-compile all installed files (or all files in selected + directories). + + Do note that FileExistsError is raised if cfile ends up pointing at a + non-regular file or symlink. Because the compilation uses a file renaming, + the resulting file would be regular and thus not the same type of file as + it was previously. + """ + if invalidation_mode is None: + invalidation_mode = _get_default_invalidation_mode() + if cfile is None: + if optimize >= 0: + optimization = optimize if optimize >= 1 else '' + cfile = importlib.util.cache_from_source(file, + optimization=optimization) + else: + cfile = importlib.util.cache_from_source(file) + if os.path.islink(cfile): + msg = ('{} is a symlink and will be changed into a regular file if ' + 'import writes a byte-compiled file to it') + raise FileExistsError(msg.format(cfile)) + elif os.path.exists(cfile) and not os.path.isfile(cfile): + msg = ('{} is a non-regular file and will be changed into a regular ' + 'one if import writes a byte-compiled file to it') + raise FileExistsError(msg.format(cfile)) + loader = importlib.machinery.SourceFileLoader('', file) + source_bytes = loader.get_data(file) + try: + code = loader.source_to_code(source_bytes, dfile or file, + _optimize=optimize) + except Exception as err: + py_exc = PyCompileError(err.__class__, err, dfile or file) + if quiet < 2: + if doraise: + raise py_exc + else: + sys.stderr.write(py_exc.msg + '\n') + return + try: + dirname = os.path.dirname(cfile) + if dirname: + os.makedirs(dirname) + except FileExistsError: + pass + if invalidation_mode == PycInvalidationMode.TIMESTAMP: + source_stats = loader.path_stats(file) + bytecode = importlib._bootstrap_external._code_to_timestamp_pyc( + code, source_stats['mtime'], source_stats['size']) + else: + source_hash = importlib.util.source_hash(source_bytes) + bytecode = importlib._bootstrap_external._code_to_hash_pyc( + code, + source_hash, + (invalidation_mode == PycInvalidationMode.CHECKED_HASH), + ) + mode = importlib._bootstrap_external._calc_mode(file) + importlib._bootstrap_external._write_atomic(cfile, bytecode, mode) + return cfile + + +def main(): + import argparse + + description = 'A simple command-line interface for py_compile module.' + parser = argparse.ArgumentParser(description=description, color=True) + parser.add_argument( + '-q', '--quiet', + action='store_true', + help='Suppress error output', + ) + parser.add_argument( + 'filenames', + nargs='+', + help='Files to compile', + ) + args = parser.parse_args() + if args.filenames == ['-']: + filenames = [filename.rstrip('\n') for filename in sys.stdin.readlines()] + else: + filenames = args.filenames + for filename in filenames: + try: + compile(filename, doraise=True) + except PyCompileError as error: + if args.quiet: + parser.exit(1) + else: + parser.exit(1, error.msg) + except OSError as error: + if args.quiet: + parser.exit(1) + else: + parser.exit(1, str(error)) + + +if __name__ == "__main__": + main() diff --git a/Python313_13_x64_Template/Lib/pyclbr.py b/Python314_4_x64_Template/Lib/pyclbr.py similarity index 100% rename from Python313_13_x64_Template/Lib/pyclbr.py rename to Python314_4_x64_Template/Lib/pyclbr.py diff --git a/Python314_4_x64_Template/Lib/pydoc.py b/Python314_4_x64_Template/Lib/pydoc.py new file mode 100644 index 00000000..1f8a6ef3 --- /dev/null +++ b/Python314_4_x64_Template/Lib/pydoc.py @@ -0,0 +1,2887 @@ +"""Generate Python documentation in HTML or text for interactive use. + +At the Python interactive prompt, calling help(thing) on a Python object +documents the object, and calling help() starts up an interactive +help session. + +Or, at the shell command line outside of Python: + +Run "pydoc " to show documentation on something. may be +the name of a function, module, package, or a dotted reference to a +class or function within a module or module in a package. If the +argument contains a path segment delimiter (e.g. slash on Unix, +backslash on Windows) it is treated as the path to a Python source file. + +Run "pydoc -k " to search for a keyword in the synopsis lines +of all available modules. + +Run "pydoc -n " to start an HTTP server with the given +hostname (default: localhost) on the local machine. + +Run "pydoc -p " to start an HTTP server on the given port on the +local machine. Port number 0 can be used to get an arbitrary unused port. + +Run "pydoc -b" to start an HTTP server on an arbitrary unused port and +open a web browser to interactively browse documentation. Combine with +the -n and -p options to control the hostname and port used. + +Run "pydoc -w " to write out the HTML documentation for a module +to a file named ".html". + +Module docs for core modules are assumed to be in + + https://docs.python.org/X.Y/library/ + +This can be overridden by setting the PYTHONDOCS environment variable +to a different URL or to a local directory containing the Library +Reference Manual pages. +""" +__all__ = ['help'] +__author__ = "Ka-Ping Yee " +__date__ = "26 February 2001" + +__credits__ = """Guido van Rossum, for an excellent programming language. +Tommy Burnette, the original creator of manpy. +Paul Prescod, for all his work on onlinehelp. +Richard Chamberlain, for the first implementation of textdoc. +""" + +# Known bugs that can't be fixed here: +# - synopsis() cannot be prevented from clobbering existing +# loaded modules. +# - If the __file__ attribute on a module is a relative path and +# the current directory is changed with os.chdir(), an incorrect +# path will be displayed. + +import ast +import __future__ +import builtins +import importlib._bootstrap +import importlib._bootstrap_external +import importlib.machinery +import importlib.util +import inspect +import io +import os +import pkgutil +import platform +import re +import sys +import sysconfig +import textwrap +import time +import tokenize +import urllib.parse +import warnings +from annotationlib import Format +from collections import deque +from reprlib import Repr +from traceback import format_exception_only + +from _pyrepl.pager import (get_pager, pipe_pager, + plain_pager, tempfile_pager, tty_pager) + +# Expose plain() as pydoc.plain() +from _pyrepl.pager import plain # noqa: F401 + + +# --------------------------------------------------------- old names + +getpager = get_pager +pipepager = pipe_pager +plainpager = plain_pager +tempfilepager = tempfile_pager +ttypager = tty_pager + + +# --------------------------------------------------------- common routines + +def pathdirs(): + """Convert sys.path into a list of absolute, existing, unique paths.""" + dirs = [] + normdirs = [] + for dir in sys.path: + dir = os.path.abspath(dir or '.') + normdir = os.path.normcase(dir) + if normdir not in normdirs and os.path.isdir(dir): + dirs.append(dir) + normdirs.append(normdir) + return dirs + +def _findclass(func): + cls = sys.modules.get(func.__module__) + if cls is None: + return None + for name in func.__qualname__.split('.')[:-1]: + cls = getattr(cls, name) + if not inspect.isclass(cls): + return None + return cls + +def _finddoc(obj): + if inspect.ismethod(obj): + name = obj.__func__.__name__ + self = obj.__self__ + if (inspect.isclass(self) and + getattr(getattr(self, name, None), '__func__') is obj.__func__): + # classmethod + cls = self + else: + cls = self.__class__ + elif inspect.isfunction(obj): + name = obj.__name__ + cls = _findclass(obj) + if cls is None or getattr(cls, name) is not obj: + return None + elif inspect.isbuiltin(obj): + name = obj.__name__ + self = obj.__self__ + if (inspect.isclass(self) and + self.__qualname__ + '.' + name == obj.__qualname__): + # classmethod + cls = self + else: + cls = self.__class__ + # Should be tested before isdatadescriptor(). + elif isinstance(obj, property): + name = obj.__name__ + cls = _findclass(obj.fget) + if cls is None or getattr(cls, name) is not obj: + return None + elif inspect.ismethoddescriptor(obj) or inspect.isdatadescriptor(obj): + name = obj.__name__ + cls = obj.__objclass__ + if getattr(cls, name) is not obj: + return None + if inspect.ismemberdescriptor(obj): + slots = getattr(cls, '__slots__', None) + if isinstance(slots, dict) and name in slots: + return slots[name] + else: + return None + for base in cls.__mro__: + try: + doc = _getowndoc(getattr(base, name)) + except AttributeError: + continue + if doc is not None: + return doc + return None + +def _getowndoc(obj): + """Get the documentation string for an object if it is not + inherited from its class.""" + try: + doc = object.__getattribute__(obj, '__doc__') + if doc is None: + return None + if obj is not type: + typedoc = type(obj).__doc__ + if isinstance(typedoc, str) and typedoc == doc: + return None + return doc + except AttributeError: + return None + +def _getdoc(object): + """Get the documentation string for an object. + + All tabs are expanded to spaces. To clean up docstrings that are + indented to line up with blocks of code, any whitespace than can be + uniformly removed from the second line onwards is removed.""" + doc = _getowndoc(object) + if doc is None: + try: + doc = _finddoc(object) + except (AttributeError, TypeError): + return None + if not isinstance(doc, str): + return None + return inspect.cleandoc(doc) + +def getdoc(object): + """Get the doc string or comments for an object.""" + result = _getdoc(object) or inspect.getcomments(object) + return result and re.sub('^ *\n', '', result.rstrip()) or '' + +def splitdoc(doc): + """Split a doc string into a synopsis line (if any) and the rest.""" + lines = doc.strip().split('\n') + if len(lines) == 1: + return lines[0], '' + elif len(lines) >= 2 and not lines[1].rstrip(): + return lines[0], '\n'.join(lines[2:]) + return '', '\n'.join(lines) + +def _getargspec(object): + try: + signature = inspect.signature(object, annotation_format=Format.STRING) + if signature: + name = getattr(object, '__name__', '') + # function are always single-line and should not be formatted + max_width = (80 - len(name)) if name != '' else None + return signature.format(max_width=max_width, quote_annotation_strings=False) + except (ValueError, TypeError): + argspec = getattr(object, '__text_signature__', None) + if argspec: + if argspec[:2] == '($': + argspec = '(' + argspec[2:] + if getattr(object, '__self__', None) is not None: + # Strip the bound argument. + m = re.match(r'\(\w+(?:(?=\))|,\s*(?:/(?:(?=\))|,\s*))?)', argspec) + if m: + argspec = '(' + argspec[m.end():] + return argspec + return None + +def classname(object, modname): + """Get a class name and qualify it with a module name if necessary.""" + name = object.__name__ + if object.__module__ != modname: + name = object.__module__ + '.' + name + return name + +def parentname(object, modname): + """Get a name of the enclosing class (qualified it with a module name + if necessary) or module.""" + if '.' in object.__qualname__: + name = object.__qualname__.rpartition('.')[0] + if object.__module__ != modname and object.__module__ is not None: + return object.__module__ + '.' + name + else: + return name + else: + if object.__module__ != modname: + return object.__module__ + +def isdata(object): + """Check if an object is of a type that probably means it's data.""" + return not (inspect.ismodule(object) or inspect.isclass(object) or + inspect.isroutine(object) or inspect.isframe(object) or + inspect.istraceback(object) or inspect.iscode(object)) + +def replace(text, *pairs): + """Do a series of global replacements on a string.""" + while pairs: + text = pairs[1].join(text.split(pairs[0])) + pairs = pairs[2:] + return text + +def cram(text, maxlen): + """Omit part of a string if needed to make it fit in a maximum length.""" + if len(text) > maxlen: + pre = max(0, (maxlen-3)//2) + post = max(0, maxlen-3-pre) + return text[:pre] + '...' + text[len(text)-post:] + return text + +_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE) +def stripid(text): + """Remove the hexadecimal id from a Python object representation.""" + # The behaviour of %p is implementation-dependent in terms of case. + return _re_stripid.sub(r'\1', text) + +def _is_bound_method(fn): + """ + Returns True if fn is a bound method, regardless of whether + fn was implemented in Python or in C. + """ + if inspect.ismethod(fn): + return True + if inspect.isbuiltin(fn): + self = getattr(fn, '__self__', None) + return not (inspect.ismodule(self) or (self is None)) + return False + + +def allmethods(cl): + methods = {} + for key, value in inspect.getmembers(cl, inspect.isroutine): + methods[key] = 1 + for base in cl.__bases__: + methods.update(allmethods(base)) # all your base are belong to us + for key in methods.keys(): + methods[key] = getattr(cl, key) + return methods + +def _split_list(s, predicate): + """Split sequence s via predicate, and return pair ([true], [false]). + + The return value is a 2-tuple of lists, + ([x for x in s if predicate(x)], + [x for x in s if not predicate(x)]) + """ + + yes = [] + no = [] + for x in s: + if predicate(x): + yes.append(x) + else: + no.append(x) + return yes, no + +_future_feature_names = set(__future__.all_feature_names) + +def visiblename(name, all=None, obj=None): + """Decide whether to show documentation on a variable.""" + # Certain special names are redundant or internal. + # XXX Remove __initializing__? + if name in {'__author__', '__builtins__', '__cached__', '__credits__', + '__date__', '__doc__', '__file__', '__spec__', + '__loader__', '__module__', '__name__', '__package__', + '__path__', '__qualname__', '__slots__', '__version__', + '__static_attributes__', '__firstlineno__', + '__annotate_func__', '__annotations_cache__'}: + return 0 + # Private names are hidden, but special names are displayed. + if name.startswith('__') and name.endswith('__'): return 1 + # Namedtuples have public fields and methods with a single leading underscore + if name.startswith('_') and hasattr(obj, '_fields'): + return True + # Ignore __future__ imports. + if obj is not __future__ and name in _future_feature_names: + if isinstance(getattr(obj, name, None), __future__._Feature): + return False + if all is not None: + # only document that which the programmer exported in __all__ + return name in all + else: + return not name.startswith('_') + +def classify_class_attrs(object): + """Wrap inspect.classify_class_attrs, with fixup for data descriptors and bound methods.""" + results = [] + for (name, kind, cls, value) in inspect.classify_class_attrs(object): + if inspect.isdatadescriptor(value): + kind = 'data descriptor' + if isinstance(value, property) and value.fset is None: + kind = 'readonly property' + elif kind == 'method' and _is_bound_method(value): + kind = 'static method' + results.append((name, kind, cls, value)) + return results + +def sort_attributes(attrs, object): + 'Sort the attrs list in-place by _fields and then alphabetically by name' + # This allows data descriptors to be ordered according + # to a _fields attribute if present. + fields = getattr(object, '_fields', []) + try: + field_order = {name : i-len(fields) for (i, name) in enumerate(fields)} + except TypeError: + field_order = {} + keyfunc = lambda attr: (field_order.get(attr[0], 0), attr[0]) + attrs.sort(key=keyfunc) + +# ----------------------------------------------------- module manipulation + +def ispackage(path): + """Guess whether a path refers to a package directory.""" + warnings.warn('The pydoc.ispackage() function is deprecated', + DeprecationWarning, stacklevel=2) + if os.path.isdir(path): + for ext in ('.py', '.pyc'): + if os.path.isfile(os.path.join(path, '__init__' + ext)): + return True + return False + +def source_synopsis(file): + """Return the one-line summary of a file object, if present""" + + string = '' + try: + tokens = tokenize.generate_tokens(file.readline) + for tok_type, tok_string, _, _, _ in tokens: + if tok_type == tokenize.STRING: + string += tok_string + elif tok_type == tokenize.NEWLINE: + with warnings.catch_warnings(): + # Ignore the "invalid escape sequence" warning. + warnings.simplefilter("ignore", SyntaxWarning) + docstring = ast.literal_eval(string) + if not isinstance(docstring, str): + return None + return docstring.strip().split('\n')[0].strip() + elif tok_type == tokenize.OP and tok_string in ('(', ')'): + string += tok_string + elif tok_type not in (tokenize.COMMENT, tokenize.NL, tokenize.ENCODING): + return None + except (tokenize.TokenError, UnicodeDecodeError, SyntaxError): + return None + return None + +def synopsis(filename, cache={}): + """Get the one-line summary out of a module file.""" + mtime = os.stat(filename).st_mtime + lastupdate, result = cache.get(filename, (None, None)) + if lastupdate is None or lastupdate < mtime: + # Look for binary suffixes first, falling back to source. + if filename.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)): + loader_cls = importlib.machinery.SourcelessFileLoader + elif filename.endswith(tuple(importlib.machinery.EXTENSION_SUFFIXES)): + loader_cls = importlib.machinery.ExtensionFileLoader + else: + loader_cls = None + # Now handle the choice. + if loader_cls is None: + # Must be a source file. + try: + file = tokenize.open(filename) + except OSError: + # module can't be opened, so skip it + return None + # text modules can be directly examined + with file: + result = source_synopsis(file) + else: + # Must be a binary module, which has to be imported. + loader = loader_cls('__temp__', filename) + # XXX We probably don't need to pass in the loader here. + spec = importlib.util.spec_from_file_location('__temp__', filename, + loader=loader) + try: + module = importlib._bootstrap._load(spec) + except: + return None + del sys.modules['__temp__'] + result = module.__doc__.splitlines()[0] if module.__doc__ else None + # Cache the result. + cache[filename] = (mtime, result) + return result + +class ErrorDuringImport(Exception): + """Errors that occurred while trying to import something to document it.""" + def __init__(self, filename, exc_info): + if not isinstance(exc_info, tuple): + assert isinstance(exc_info, BaseException) + self.exc = type(exc_info) + self.value = exc_info + self.tb = exc_info.__traceback__ + else: + warnings.warn("A tuple value for exc_info is deprecated, use an exception instance", + DeprecationWarning) + + self.exc, self.value, self.tb = exc_info + self.filename = filename + + def __str__(self): + exc = self.exc.__name__ + return 'problem in %s - %s: %s' % (self.filename, exc, self.value) + +def importfile(path): + """Import a Python source file or compiled file given its path.""" + magic = importlib.util.MAGIC_NUMBER + with open(path, 'rb') as file: + is_bytecode = magic == file.read(len(magic)) + filename = os.path.basename(path) + name, ext = os.path.splitext(filename) + if is_bytecode: + loader = importlib._bootstrap_external.SourcelessFileLoader(name, path) + else: + loader = importlib._bootstrap_external.SourceFileLoader(name, path) + # XXX We probably don't need to pass in the loader here. + spec = importlib.util.spec_from_file_location(name, path, loader=loader) + try: + return importlib._bootstrap._load(spec) + except BaseException as err: + raise ErrorDuringImport(path, err) + +def safeimport(path, forceload=0, cache={}): + """Import a module; handle errors; return None if the module isn't found. + + If the module *is* found but an exception occurs, it's wrapped in an + ErrorDuringImport exception and reraised. Unlike __import__, if a + package path is specified, the module at the end of the path is returned, + not the package at the beginning. If the optional 'forceload' argument + is 1, we reload the module from disk (unless it's a dynamic extension).""" + try: + # If forceload is 1 and the module has been previously loaded from + # disk, we always have to reload the module. Checking the file's + # mtime isn't good enough (e.g. the module could contain a class + # that inherits from another module that has changed). + if forceload and path in sys.modules: + if path not in sys.builtin_module_names: + # Remove the module from sys.modules and re-import to try + # and avoid problems with partially loaded modules. + # Also remove any submodules because they won't appear + # in the newly loaded module's namespace if they're already + # in sys.modules. + subs = [m for m in sys.modules if m.startswith(path + '.')] + for key in [path] + subs: + # Prevent garbage collection. + cache[key] = sys.modules[key] + del sys.modules[key] + module = importlib.import_module(path) + except BaseException as err: + # Did the error occur before or after the module was found? + if path in sys.modules: + # An error occurred while executing the imported module. + raise ErrorDuringImport(sys.modules[path].__file__, err) + elif type(err) is SyntaxError: + # A SyntaxError occurred before we could execute the module. + raise ErrorDuringImport(err.filename, err) + elif isinstance(err, ImportError) and err.name == path: + # No such module in the path. + return None + else: + # Some other error occurred during the importing process. + raise ErrorDuringImport(path, err) + return module + +# ---------------------------------------------------- formatter base class + +class Doc: + + PYTHONDOCS = os.environ.get("PYTHONDOCS", + "https://docs.python.org/%d.%d/library" + % sys.version_info[:2]) + + def document(self, object, name=None, *args): + """Generate documentation for an object.""" + args = (object, name) + args + # 'try' clause is to attempt to handle the possibility that inspect + # identifies something in a way that pydoc itself has issues handling; + # think 'super' and how it is a descriptor (which raises the exception + # by lacking a __name__ attribute) and an instance. + try: + if inspect.ismodule(object): return self.docmodule(*args) + if inspect.isclass(object): return self.docclass(*args) + if inspect.isroutine(object): return self.docroutine(*args) + except AttributeError: + pass + if inspect.isdatadescriptor(object): return self.docdata(*args) + return self.docother(*args) + + def fail(self, object, name=None, *args): + """Raise an exception for unimplemented types.""" + message = "don't know how to document object%s of type %s" % ( + name and ' ' + repr(name), type(object).__name__) + raise TypeError(message) + + docmodule = docclass = docroutine = docother = docproperty = docdata = fail + + def getdocloc(self, object, basedir=sysconfig.get_path('stdlib')): + """Return the location of module docs or None""" + + try: + file = inspect.getabsfile(object) + except TypeError: + file = '(built-in)' + + docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS) + + basedir = os.path.normcase(basedir) + if (isinstance(object, type(os)) and + (object.__name__ in ('errno', 'exceptions', 'gc', + 'marshal', 'posix', 'signal', 'sys', + '_thread', 'zipimport') or + (file.startswith(basedir) and + not file.startswith(os.path.join(basedir, 'site-packages')))) and + object.__name__ not in ('xml.etree', 'test.test_pydoc.pydoc_mod')): + + try: + from pydoc_data import module_docs + except ImportError: + module_docs = None + + if module_docs and object.__name__ in module_docs.module_docs: + doc_name = module_docs.module_docs[object.__name__] + if docloc.startswith(("http://", "https://")): + docloc = "{}/{}".format(docloc.rstrip("/"), doc_name) + else: + docloc = os.path.join(docloc, doc_name) + else: + docloc = None + else: + docloc = None + return docloc + +# -------------------------------------------- HTML documentation generator + +class HTMLRepr(Repr): + """Class for safely making an HTML representation of a Python object.""" + def __init__(self): + Repr.__init__(self) + self.maxlist = self.maxtuple = 20 + self.maxdict = 10 + self.maxstring = self.maxother = 100 + + def escape(self, text): + return replace(text, '&', '&', '<', '<', '>', '>') + + def repr(self, object): + return Repr.repr(self, object) + + def repr1(self, x, level): + if hasattr(type(x), '__name__'): + methodname = 'repr_' + '_'.join(type(x).__name__.split()) + if hasattr(self, methodname): + return getattr(self, methodname)(x, level) + return self.escape(cram(stripid(repr(x)), self.maxother)) + + def repr_string(self, x, level): + test = cram(x, self.maxstring) + testrepr = repr(test) + if '\\' in test and '\\' not in replace(testrepr, r'\\', ''): + # Backslashes are only literal in the string and are never + # needed to make any special characters, so show a raw string. + return 'r' + testrepr[0] + self.escape(test) + testrepr[0] + return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)', + r'\1', + self.escape(testrepr)) + + repr_str = repr_string + + def repr_instance(self, x, level): + try: + return self.escape(cram(stripid(repr(x)), self.maxstring)) + except: + return self.escape('<%s instance>' % x.__class__.__name__) + + repr_unicode = repr_string + +class HTMLDoc(Doc): + """Formatter class for HTML documentation.""" + + # ------------------------------------------- HTML formatting utilities + + _repr_instance = HTMLRepr() + repr = _repr_instance.repr + escape = _repr_instance.escape + + def page(self, title, contents): + """Format an HTML page.""" + return '''\ + + + + +Python: %s + +%s +''' % (title, contents) + + def heading(self, title, extras=''): + """Format a page heading.""" + return ''' + + + +
 
%s
%s
+ ''' % (title, extras or ' ') + + def section(self, title, cls, contents, width=6, + prelude='', marginalia=None, gap=' '): + """Format a section with a heading.""" + if marginalia is None: + marginalia = '' + ' ' * width + '' + result = '''

+ + + + ''' % (cls, title) + if prelude: + result = result + ''' + + +''' % (cls, marginalia, cls, prelude, gap) + else: + result = result + ''' +''' % (cls, marginalia, gap) + + return result + '\n
 
%s
%s%s
%s
%s%s%s
' % contents + + def bigsection(self, title, *args): + """Format a section with a big heading.""" + title = '%s' % title + return self.section(title, *args) + + def preformat(self, text): + """Format literal preformatted text.""" + text = self.escape(text.expandtabs()) + return replace(text, '\n\n', '\n \n', '\n\n', '\n \n', + ' ', ' ', '\n', '
\n') + + def multicolumn(self, list, format): + """Format a list of items into a multi-column list.""" + result = '' + rows = (len(list) + 3) // 4 + for col in range(4): + result = result + '' + for i in range(rows*col, rows*col+rows): + if i < len(list): + result = result + format(list[i]) + '
\n' + result = result + '' + return '%s
' % result + + def grey(self, text): return '%s' % text + + def namelink(self, name, *dicts): + """Make a link for an identifier, given name-to-URL mappings.""" + for dict in dicts: + if name in dict: + return '
%s' % (dict[name], name) + return name + + def classlink(self, object, modname): + """Make a link for a class.""" + name, module = object.__name__, sys.modules.get(object.__module__) + if hasattr(module, name) and getattr(module, name) is object: + return '%s' % ( + module.__name__, name, classname(object, modname)) + return classname(object, modname) + + def parentlink(self, object, modname): + """Make a link for the enclosing class or module.""" + link = None + name, module = object.__name__, sys.modules.get(object.__module__) + if hasattr(module, name) and getattr(module, name) is object: + if '.' in object.__qualname__: + name = object.__qualname__.rpartition('.')[0] + if object.__module__ != modname: + link = '%s.html#%s' % (module.__name__, name) + else: + link = '#%s' % name + else: + if object.__module__ != modname: + link = '%s.html' % module.__name__ + if link: + return '%s' % (link, parentname(object, modname)) + else: + return parentname(object, modname) + + def modulelink(self, object): + """Make a link for a module.""" + return '%s' % (object.__name__, object.__name__) + + def modpkglink(self, modpkginfo): + """Make a link for a module or package to display in an index.""" + name, path, ispackage, shadowed = modpkginfo + if shadowed: + return self.grey(name) + if path: + url = '%s.%s.html' % (path, name) + else: + url = '%s.html' % name + if ispackage: + text = '%s (package)' % name + else: + text = name + return '%s' % (url, text) + + def filelink(self, url, path): + """Make a link to source file.""" + return '%s' % (url, path) + + def markup(self, text, escape=None, funcs={}, classes={}, methods={}): + """Mark up some plain text, given a context of symbols to look for. + Each context dictionary maps object names to anchor names.""" + escape = escape or self.escape + results = [] + here = 0 + pattern = re.compile(r'\b((http|https|ftp)://\S+[\w/]|' + r'RFC[- ]?(\d+)|' + r'PEP[- ]?(\d+)|' + r'(self\.)?(\w+))') + while match := pattern.search(text, here): + start, end = match.span() + results.append(escape(text[here:start])) + + all, scheme, rfc, pep, selfdot, name = match.groups() + if scheme: + url = escape(all).replace('"', '"') + results.append('%s' % (url, url)) + elif rfc: + url = 'https://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc) + results.append('%s' % (url, escape(all))) + elif pep: + url = 'https://peps.python.org/pep-%04d/' % int(pep) + results.append('%s' % (url, escape(all))) + elif selfdot: + # Create a link for methods like 'self.method(...)' + # and use for attributes like 'self.attr' + if text[end:end+1] == '(': + results.append('self.' + self.namelink(name, methods)) + else: + results.append('self.%s' % name) + elif text[end:end+1] == '(': + results.append(self.namelink(name, methods, funcs, classes)) + else: + results.append(self.namelink(name, classes)) + here = end + results.append(escape(text[here:])) + return ''.join(results) + + # ---------------------------------------------- type-specific routines + + def formattree(self, tree, modname, parent=None): + """Produce HTML for a class tree as given by inspect.getclasstree().""" + result = '' + for entry in tree: + if isinstance(entry, tuple): + c, bases = entry + result = result + '

' + result = result + self.classlink(c, modname) + if bases and bases != (parent,): + parents = [] + for base in bases: + parents.append(self.classlink(base, modname)) + result = result + '(' + ', '.join(parents) + ')' + result = result + '\n
' + elif isinstance(entry, list): + result = result + '
\n%s
\n' % self.formattree( + entry, modname, c) + return '
\n%s
\n' % result + + def docmodule(self, object, name=None, mod=None, *ignored): + """Produce HTML documentation for a module object.""" + name = object.__name__ # ignore the passed-in name + try: + all = object.__all__ + except AttributeError: + all = None + parts = name.split('.') + links = [] + for i in range(len(parts)-1): + links.append( + '%s' % + ('.'.join(parts[:i+1]), parts[i])) + linkedname = '.'.join(links + parts[-1:]) + head = '%s' % linkedname + try: + path = inspect.getabsfile(object) + url = urllib.parse.quote(path) + filelink = self.filelink(url, path) + except TypeError: + filelink = '(built-in)' + info = [] + if hasattr(object, '__version__'): + version = str(object.__version__) + if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': + version = version[11:-1].strip() + info.append('version %s' % self.escape(version)) + if hasattr(object, '__date__'): + info.append(self.escape(str(object.__date__))) + if info: + head = head + ' (%s)' % ', '.join(info) + docloc = self.getdocloc(object) + if docloc is not None: + docloc = '
Module Reference' % locals() + else: + docloc = '' + result = self.heading(head, 'index
' + filelink + docloc) + + modules = inspect.getmembers(object, inspect.ismodule) + + classes, cdict = [], {} + for key, value in inspect.getmembers(object, inspect.isclass): + # if __all__ exists, believe it. Otherwise use old heuristic. + if (all is not None or + (inspect.getmodule(value) or object) is object): + if visiblename(key, all, object): + classes.append((key, value)) + cdict[key] = cdict[value] = '#' + key + for key, value in classes: + for base in value.__bases__: + key, modname = base.__name__, base.__module__ + module = sys.modules.get(modname) + if modname != name and module and hasattr(module, key): + if getattr(module, key) is base: + if not key in cdict: + cdict[key] = cdict[base] = modname + '.html#' + key + funcs, fdict = [], {} + for key, value in inspect.getmembers(object, inspect.isroutine): + # if __all__ exists, believe it. Otherwise use a heuristic. + if (all is not None + or inspect.isbuiltin(value) + or (inspect.getmodule(value) or object) is object): + if visiblename(key, all, object): + funcs.append((key, value)) + fdict[key] = '#-' + key + if inspect.isfunction(value): fdict[value] = fdict[key] + data = [] + for key, value in inspect.getmembers(object, isdata): + if visiblename(key, all, object): + data.append((key, value)) + + doc = self.markup(getdoc(object), self.preformat, fdict, cdict) + doc = doc and '%s' % doc + result = result + '

%s

\n' % doc + + if hasattr(object, '__path__'): + modpkgs = [] + for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): + modpkgs.append((modname, name, ispkg, 0)) + modpkgs.sort() + contents = self.multicolumn(modpkgs, self.modpkglink) + result = result + self.bigsection( + 'Package Contents', 'pkg-content', contents) + elif modules: + contents = self.multicolumn( + modules, lambda t: self.modulelink(t[1])) + result = result + self.bigsection( + 'Modules', 'pkg-content', contents) + + if classes: + classlist = [value for (key, value) in classes] + contents = [ + self.formattree(inspect.getclasstree(classlist, 1), name)] + for key, value in classes: + contents.append(self.document(value, key, name, fdict, cdict)) + result = result + self.bigsection( + 'Classes', 'index', ' '.join(contents)) + if funcs: + contents = [] + for key, value in funcs: + contents.append(self.document(value, key, name, fdict, cdict)) + result = result + self.bigsection( + 'Functions', 'functions', ' '.join(contents)) + if data: + contents = [] + for key, value in data: + contents.append(self.document(value, key)) + result = result + self.bigsection( + 'Data', 'data', '
\n'.join(contents)) + if hasattr(object, '__author__'): + contents = self.markup(str(object.__author__), self.preformat) + result = result + self.bigsection('Author', 'author', contents) + if hasattr(object, '__credits__'): + contents = self.markup(str(object.__credits__), self.preformat) + result = result + self.bigsection('Credits', 'credits', contents) + + return result + + def docclass(self, object, name=None, mod=None, funcs={}, classes={}, + *ignored): + """Produce HTML documentation for a class object.""" + realname = object.__name__ + name = name or realname + bases = object.__bases__ + + contents = [] + push = contents.append + + # Cute little class to pump out a horizontal rule between sections. + class HorizontalRule: + def __init__(self): + self.needone = 0 + def maybe(self): + if self.needone: + push('
\n') + self.needone = 1 + hr = HorizontalRule() + + # List the mro, if non-trivial. + mro = deque(inspect.getmro(object)) + if len(mro) > 2: + hr.maybe() + push('
Method resolution order:
\n') + for base in mro: + push('
%s
\n' % self.classlink(base, + object.__module__)) + push('
\n') + + def spill(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + try: + value = getattr(object, name) + except Exception: + # Some descriptors may meet a failure in their __get__. + # (bug #1785) + push(self.docdata(value, name, mod)) + else: + push(self.document(value, name, mod, + funcs, classes, mdict, object, homecls)) + push('\n') + return attrs + + def spilldescriptors(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + push(self.docdata(value, name, mod)) + return attrs + + def spilldata(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + base = self.docother(getattr(object, name), name, mod) + doc = getdoc(value) + if not doc: + push('
%s
\n' % base) + else: + doc = self.markup(getdoc(value), self.preformat, + funcs, classes, mdict) + doc = '
%s' % doc + push('
%s%s
\n' % (base, doc)) + push('\n') + return attrs + + attrs = [(name, kind, cls, value) + for name, kind, cls, value in classify_class_attrs(object) + if visiblename(name, obj=object)] + + mdict = {} + for key, kind, homecls, value in attrs: + mdict[key] = anchor = '#' + name + '-' + key + try: + value = getattr(object, name) + except Exception: + # Some descriptors may meet a failure in their __get__. + # (bug #1785) + pass + try: + # The value may not be hashable (e.g., a data attr with + # a dict or list value). + mdict[value] = anchor + except TypeError: + pass + + while attrs: + if mro: + thisclass = mro.popleft() + else: + thisclass = attrs[0][2] + attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) + + if object is not builtins.object and thisclass is builtins.object: + attrs = inherited + continue + elif thisclass is object: + tag = 'defined here' + else: + tag = 'inherited from %s' % self.classlink(thisclass, + object.__module__) + tag += ':
\n' + + sort_attributes(attrs, object) + + # Pump out the attrs, segregated by kind. + attrs = spill('Methods %s' % tag, attrs, + lambda t: t[1] == 'method') + attrs = spill('Class methods %s' % tag, attrs, + lambda t: t[1] == 'class method') + attrs = spill('Static methods %s' % tag, attrs, + lambda t: t[1] == 'static method') + attrs = spilldescriptors("Readonly properties %s" % tag, attrs, + lambda t: t[1] == 'readonly property') + attrs = spilldescriptors('Data descriptors %s' % tag, attrs, + lambda t: t[1] == 'data descriptor') + attrs = spilldata('Data and other attributes %s' % tag, attrs, + lambda t: t[1] == 'data') + assert attrs == [] + attrs = inherited + + contents = ''.join(contents) + + if name == realname: + title = 'class %s' % ( + name, realname) + else: + title = '%s = class %s' % ( + name, name, realname) + if bases: + parents = [] + for base in bases: + parents.append(self.classlink(base, object.__module__)) + title = title + '(%s)' % ', '.join(parents) + + decl = '' + argspec = _getargspec(object) + if argspec and argspec != '()': + decl = name + self.escape(argspec) + '\n\n' + + doc = getdoc(object) + if decl: + doc = decl + (doc or '') + doc = self.markup(doc, self.preformat, funcs, classes, mdict) + doc = doc and '%s
 
' % doc + + return self.section(title, 'title', contents, 3, doc) + + def formatvalue(self, object): + """Format an argument default value as text.""" + return self.grey('=' + self.repr(object)) + + def docroutine(self, object, name=None, mod=None, + funcs={}, classes={}, methods={}, cl=None, homecls=None): + """Produce HTML documentation for a function or method object.""" + realname = object.__name__ + name = name or realname + if homecls is None: + homecls = cl + anchor = ('' if cl is None else cl.__name__) + '-' + name + note = '' + skipdocs = False + imfunc = None + if _is_bound_method(object): + imself = object.__self__ + if imself is cl: + imfunc = getattr(object, '__func__', None) + elif inspect.isclass(imself): + note = ' class method of %s' % self.classlink(imself, mod) + else: + note = ' method of %s instance' % self.classlink( + imself.__class__, mod) + elif (inspect.ismethoddescriptor(object) or + inspect.ismethodwrapper(object)): + try: + objclass = object.__objclass__ + except AttributeError: + pass + else: + if cl is None: + note = ' unbound %s method' % self.classlink(objclass, mod) + elif objclass is not homecls: + note = ' from ' + self.classlink(objclass, mod) + else: + imfunc = object + if inspect.isfunction(imfunc) and homecls is not None and ( + imfunc.__module__ != homecls.__module__ or + imfunc.__qualname__ != homecls.__qualname__ + '.' + realname): + pname = self.parentlink(imfunc, mod) + if pname: + note = ' from %s' % pname + + if (inspect.iscoroutinefunction(object) or + inspect.isasyncgenfunction(object)): + asyncqualifier = 'async ' + else: + asyncqualifier = '' + + if name == realname: + title = '%s' % (anchor, realname) + else: + if (cl is not None and + inspect.getattr_static(cl, realname, []) is object): + reallink = '%s' % ( + cl.__name__ + '-' + realname, realname) + skipdocs = True + if note.startswith(' from '): + note = '' + else: + reallink = realname + title = '%s = %s' % ( + anchor, name, reallink) + argspec = None + if inspect.isroutine(object): + argspec = _getargspec(object) + if argspec and realname == '': + title = '%s lambda ' % name + # XXX lambda's won't usually have func_annotations['return'] + # since the syntax doesn't support but it is possible. + # So removing parentheses isn't truly safe. + if not object.__annotations__: + argspec = argspec[1:-1] # remove parentheses + if not argspec: + argspec = '(...)' + + decl = asyncqualifier + title + self.escape(argspec) + (note and + self.grey('%s' % note)) + + if skipdocs: + return '
%s
\n' % decl + else: + doc = self.markup( + getdoc(object), self.preformat, funcs, classes, methods) + doc = doc and '
%s
' % doc + return '
%s
%s
\n' % (decl, doc) + + def docdata(self, object, name=None, mod=None, cl=None, *ignored): + """Produce html documentation for a data descriptor.""" + results = [] + push = results.append + + if name: + push('
%s
\n' % name) + doc = self.markup(getdoc(object), self.preformat) + if doc: + push('
%s
\n' % doc) + push('
\n') + + return ''.join(results) + + docproperty = docdata + + def docother(self, object, name=None, mod=None, *ignored): + """Produce HTML documentation for a data object.""" + lhs = name and '%s = ' % name or '' + return lhs + self.repr(object) + + def index(self, dir, shadowed=None): + """Generate an HTML index for a directory of modules.""" + modpkgs = [] + if shadowed is None: shadowed = {} + for importer, name, ispkg in pkgutil.iter_modules([dir]): + if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name): + # ignore a module if its name contains a surrogate character + continue + modpkgs.append((name, '', ispkg, name in shadowed)) + shadowed[name] = 1 + + modpkgs.sort() + contents = self.multicolumn(modpkgs, self.modpkglink) + return self.bigsection(dir, 'index', contents) + +# -------------------------------------------- text documentation generator + +class TextRepr(Repr): + """Class for safely making a text representation of a Python object.""" + def __init__(self): + Repr.__init__(self) + self.maxlist = self.maxtuple = 20 + self.maxdict = 10 + self.maxstring = self.maxother = 100 + + def repr1(self, x, level): + if hasattr(type(x), '__name__'): + methodname = 'repr_' + '_'.join(type(x).__name__.split()) + if hasattr(self, methodname): + return getattr(self, methodname)(x, level) + return cram(stripid(repr(x)), self.maxother) + + def repr_string(self, x, level): + test = cram(x, self.maxstring) + testrepr = repr(test) + if '\\' in test and '\\' not in replace(testrepr, r'\\', ''): + # Backslashes are only literal in the string and are never + # needed to make any special characters, so show a raw string. + return 'r' + testrepr[0] + test + testrepr[0] + return testrepr + + repr_str = repr_string + + def repr_instance(self, x, level): + try: + return cram(stripid(repr(x)), self.maxstring) + except: + return '<%s instance>' % x.__class__.__name__ + +class TextDoc(Doc): + """Formatter class for text documentation.""" + + # ------------------------------------------- text formatting utilities + + _repr_instance = TextRepr() + repr = _repr_instance.repr + + def bold(self, text): + """Format a string in bold by overstriking.""" + return ''.join(ch + '\b' + ch for ch in text) + + def indent(self, text, prefix=' '): + """Indent text by prepending a given prefix to each line.""" + if not text: return '' + lines = [(prefix + line).rstrip() for line in text.split('\n')] + return '\n'.join(lines) + + def section(self, title, contents): + """Format a section with a given heading.""" + clean_contents = self.indent(contents).rstrip() + return self.bold(title) + '\n' + clean_contents + '\n\n' + + # ---------------------------------------------- type-specific routines + + def formattree(self, tree, modname, parent=None, prefix=''): + """Render in text a class tree as returned by inspect.getclasstree().""" + result = '' + for entry in tree: + if isinstance(entry, tuple): + c, bases = entry + result = result + prefix + classname(c, modname) + if bases and bases != (parent,): + parents = (classname(c, modname) for c in bases) + result = result + '(%s)' % ', '.join(parents) + result = result + '\n' + elif isinstance(entry, list): + result = result + self.formattree( + entry, modname, c, prefix + ' ') + return result + + def docmodule(self, object, name=None, mod=None, *ignored): + """Produce text documentation for a given module object.""" + name = object.__name__ # ignore the passed-in name + synop, desc = splitdoc(getdoc(object)) + result = self.section('NAME', name + (synop and ' - ' + synop)) + all = getattr(object, '__all__', None) + docloc = self.getdocloc(object) + if docloc is not None: + result = result + self.section('MODULE REFERENCE', docloc + """ + +The following documentation is automatically generated from the Python +source files. It may be incomplete, incorrect or include features that +are considered implementation detail and may vary between Python +implementations. When in doubt, consult the module reference at the +location listed above. +""") + + if desc: + result = result + self.section('DESCRIPTION', desc) + + classes = [] + for key, value in inspect.getmembers(object, inspect.isclass): + # if __all__ exists, believe it. Otherwise use old heuristic. + if (all is not None + or (inspect.getmodule(value) or object) is object): + if visiblename(key, all, object): + classes.append((key, value)) + funcs = [] + for key, value in inspect.getmembers(object, inspect.isroutine): + # if __all__ exists, believe it. Otherwise use a heuristic. + if (all is not None + or inspect.isbuiltin(value) + or (inspect.getmodule(value) or object) is object): + if visiblename(key, all, object): + funcs.append((key, value)) + data = [] + for key, value in inspect.getmembers(object, isdata): + if visiblename(key, all, object): + data.append((key, value)) + + modpkgs = [] + modpkgs_names = set() + if hasattr(object, '__path__'): + for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): + modpkgs_names.add(modname) + if ispkg: + modpkgs.append(modname + ' (package)') + else: + modpkgs.append(modname) + + modpkgs.sort() + result = result + self.section( + 'PACKAGE CONTENTS', '\n'.join(modpkgs)) + + # Detect submodules as sometimes created by C extensions + submodules = [] + for key, value in inspect.getmembers(object, inspect.ismodule): + if value.__name__.startswith(name + '.') and key not in modpkgs_names: + submodules.append(key) + if submodules: + submodules.sort() + result = result + self.section( + 'SUBMODULES', '\n'.join(submodules)) + + if classes: + classlist = [value for key, value in classes] + contents = [self.formattree( + inspect.getclasstree(classlist, 1), name)] + for key, value in classes: + contents.append(self.document(value, key, name)) + result = result + self.section('CLASSES', '\n'.join(contents)) + + if funcs: + contents = [] + for key, value in funcs: + contents.append(self.document(value, key, name)) + result = result + self.section('FUNCTIONS', '\n'.join(contents)) + + if data: + contents = [] + for key, value in data: + contents.append(self.docother(value, key, name, maxlen=70)) + result = result + self.section('DATA', '\n'.join(contents)) + + if hasattr(object, '__version__'): + version = str(object.__version__) + if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': + version = version[11:-1].strip() + result = result + self.section('VERSION', version) + if hasattr(object, '__date__'): + result = result + self.section('DATE', str(object.__date__)) + if hasattr(object, '__author__'): + result = result + self.section('AUTHOR', str(object.__author__)) + if hasattr(object, '__credits__'): + result = result + self.section('CREDITS', str(object.__credits__)) + try: + file = inspect.getabsfile(object) + except TypeError: + file = '(built-in)' + result = result + self.section('FILE', file) + return result + + def docclass(self, object, name=None, mod=None, *ignored): + """Produce text documentation for a given class object.""" + realname = object.__name__ + name = name or realname + bases = object.__bases__ + + def makename(c, m=object.__module__): + return classname(c, m) + + if name == realname: + title = 'class ' + self.bold(realname) + else: + title = self.bold(name) + ' = class ' + realname + if bases: + parents = map(makename, bases) + title = title + '(%s)' % ', '.join(parents) + + contents = [] + push = contents.append + + argspec = _getargspec(object) + if argspec and argspec != '()': + push(name + argspec + '\n') + + doc = getdoc(object) + if doc: + push(doc + '\n') + + # List the mro, if non-trivial. + mro = deque(inspect.getmro(object)) + if len(mro) > 2: + push("Method resolution order:") + for base in mro: + push(' ' + makename(base)) + push('') + + # List the built-in subclasses, if any: + subclasses = sorted( + (str(cls.__name__) for cls in type.__subclasses__(object) + if (not cls.__name__.startswith("_") and + getattr(cls, '__module__', '') == "builtins")), + key=str.lower + ) + no_of_subclasses = len(subclasses) + MAX_SUBCLASSES_TO_DISPLAY = 4 + if subclasses: + push("Built-in subclasses:") + for subclassname in subclasses[:MAX_SUBCLASSES_TO_DISPLAY]: + push(' ' + subclassname) + if no_of_subclasses > MAX_SUBCLASSES_TO_DISPLAY: + push(' ... and ' + + str(no_of_subclasses - MAX_SUBCLASSES_TO_DISPLAY) + + ' other subclasses') + push('') + + # Cute little class to pump out a horizontal rule between sections. + class HorizontalRule: + def __init__(self): + self.needone = 0 + def maybe(self): + if self.needone: + push('-' * 70) + self.needone = 1 + hr = HorizontalRule() + + def spill(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + try: + value = getattr(object, name) + except Exception: + # Some descriptors may meet a failure in their __get__. + # (bug #1785) + push(self.docdata(value, name, mod)) + else: + push(self.document(value, + name, mod, object, homecls)) + return attrs + + def spilldescriptors(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + push(self.docdata(value, name, mod)) + return attrs + + def spilldata(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + doc = getdoc(value) + try: + obj = getattr(object, name) + except AttributeError: + obj = homecls.__dict__[name] + push(self.docother(obj, name, mod, maxlen=70, doc=doc) + + '\n') + return attrs + + attrs = [(name, kind, cls, value) + for name, kind, cls, value in classify_class_attrs(object) + if visiblename(name, obj=object)] + + while attrs: + if mro: + thisclass = mro.popleft() + else: + thisclass = attrs[0][2] + attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) + + if object is not builtins.object and thisclass is builtins.object: + attrs = inherited + continue + elif thisclass is object: + tag = "defined here" + else: + tag = "inherited from %s" % classname(thisclass, + object.__module__) + + sort_attributes(attrs, object) + + # Pump out the attrs, segregated by kind. + attrs = spill("Methods %s:\n" % tag, attrs, + lambda t: t[1] == 'method') + attrs = spill("Class methods %s:\n" % tag, attrs, + lambda t: t[1] == 'class method') + attrs = spill("Static methods %s:\n" % tag, attrs, + lambda t: t[1] == 'static method') + attrs = spilldescriptors("Readonly properties %s:\n" % tag, attrs, + lambda t: t[1] == 'readonly property') + attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs, + lambda t: t[1] == 'data descriptor') + attrs = spilldata("Data and other attributes %s:\n" % tag, attrs, + lambda t: t[1] == 'data') + + assert attrs == [] + attrs = inherited + + contents = '\n'.join(contents) + if not contents: + return title + '\n' + return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n' + + def formatvalue(self, object): + """Format an argument default value as text.""" + return '=' + self.repr(object) + + def docroutine(self, object, name=None, mod=None, cl=None, homecls=None): + """Produce text documentation for a function or method object.""" + realname = object.__name__ + name = name or realname + if homecls is None: + homecls = cl + note = '' + skipdocs = False + imfunc = None + if _is_bound_method(object): + imself = object.__self__ + if imself is cl: + imfunc = getattr(object, '__func__', None) + elif inspect.isclass(imself): + note = ' class method of %s' % classname(imself, mod) + else: + note = ' method of %s instance' % classname( + imself.__class__, mod) + elif (inspect.ismethoddescriptor(object) or + inspect.ismethodwrapper(object)): + try: + objclass = object.__objclass__ + except AttributeError: + pass + else: + if cl is None: + note = ' unbound %s method' % classname(objclass, mod) + elif objclass is not homecls: + note = ' from ' + classname(objclass, mod) + else: + imfunc = object + if inspect.isfunction(imfunc) and homecls is not None and ( + imfunc.__module__ != homecls.__module__ or + imfunc.__qualname__ != homecls.__qualname__ + '.' + realname): + pname = parentname(imfunc, mod) + if pname: + note = ' from %s' % pname + + if (inspect.iscoroutinefunction(object) or + inspect.isasyncgenfunction(object)): + asyncqualifier = 'async ' + else: + asyncqualifier = '' + + if name == realname: + title = self.bold(realname) + else: + if (cl is not None and + inspect.getattr_static(cl, realname, []) is object): + skipdocs = True + if note.startswith(' from '): + note = '' + title = self.bold(name) + ' = ' + realname + argspec = None + + if inspect.isroutine(object): + argspec = _getargspec(object) + if argspec and realname == '': + title = self.bold(name) + ' lambda ' + # XXX lambda's won't usually have func_annotations['return'] + # since the syntax doesn't support but it is possible. + # So removing parentheses isn't truly safe. + if not object.__annotations__: + argspec = argspec[1:-1] + if not argspec: + argspec = '(...)' + decl = asyncqualifier + title + argspec + note + + if skipdocs: + return decl + '\n' + else: + doc = getdoc(object) or '' + return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n') + + def docdata(self, object, name=None, mod=None, cl=None, *ignored): + """Produce text documentation for a data descriptor.""" + results = [] + push = results.append + + if name: + push(self.bold(name)) + push('\n') + doc = getdoc(object) or '' + if doc: + push(self.indent(doc)) + push('\n') + return ''.join(results) + + docproperty = docdata + + def docother(self, object, name=None, mod=None, parent=None, *ignored, + maxlen=None, doc=None): + """Produce text documentation for a data object.""" + repr = self.repr(object) + if maxlen: + line = (name and name + ' = ' or '') + repr + chop = maxlen - len(line) + if chop < 0: repr = repr[:chop] + '...' + line = (name and self.bold(name) + ' = ' or '') + repr + if not doc: + doc = getdoc(object) + if doc: + line += '\n' + self.indent(str(doc)) + '\n' + return line + +class _PlainTextDoc(TextDoc): + """Subclass of TextDoc which overrides string styling""" + def bold(self, text): + return text + +# --------------------------------------------------------- user interfaces + +def pager(text, title=''): + """The first time this is called, determine what kind of pager to use.""" + global pager + pager = get_pager() + pager(text, title) + +def describe(thing): + """Produce a short description of the given thing.""" + if inspect.ismodule(thing): + if thing.__name__ in sys.builtin_module_names: + return 'built-in module ' + thing.__name__ + if hasattr(thing, '__path__'): + return 'package ' + thing.__name__ + else: + return 'module ' + thing.__name__ + if inspect.isbuiltin(thing): + return 'built-in function ' + thing.__name__ + if inspect.isgetsetdescriptor(thing): + return 'getset descriptor %s.%s.%s' % ( + thing.__objclass__.__module__, thing.__objclass__.__name__, + thing.__name__) + if inspect.ismemberdescriptor(thing): + return 'member descriptor %s.%s.%s' % ( + thing.__objclass__.__module__, thing.__objclass__.__name__, + thing.__name__) + if inspect.isclass(thing): + return 'class ' + thing.__name__ + if inspect.isfunction(thing): + return 'function ' + thing.__name__ + if inspect.ismethod(thing): + return 'method ' + thing.__name__ + if inspect.ismethodwrapper(thing): + return 'method wrapper ' + thing.__name__ + if inspect.ismethoddescriptor(thing): + try: + return 'method descriptor ' + thing.__name__ + except AttributeError: + pass + return type(thing).__name__ + +def locate(path, forceload=0): + """Locate an object by name or dotted path, importing as necessary.""" + parts = [part for part in path.split('.') if part] + module, n = None, 0 + while n < len(parts): + nextmodule = safeimport('.'.join(parts[:n+1]), forceload) + if nextmodule: module, n = nextmodule, n + 1 + else: break + if module: + object = module + else: + object = builtins + for part in parts[n:]: + try: + object = getattr(object, part) + except AttributeError: + return None + return object + +# --------------------------------------- interactive interpreter interface + +text = TextDoc() +plaintext = _PlainTextDoc() +html = HTMLDoc() + +def resolve(thing, forceload=0): + """Given an object or a path to an object, get the object and its name.""" + if isinstance(thing, str): + object = locate(thing, forceload) + if object is None: + raise ImportError('''\ +No Python documentation found for %r. +Use help() to get the interactive help utility. +Use help(str) for help on the str class.''' % thing) + return object, thing + else: + name = getattr(thing, '__name__', None) + return thing, name if isinstance(name, str) else None + +def render_doc(thing, title='Python Library Documentation: %s', forceload=0, + renderer=None): + """Render text documentation, given an object or a path to an object.""" + if renderer is None: + renderer = text + object, name = resolve(thing, forceload) + desc = describe(object) + module = inspect.getmodule(object) + if name and '.' in name: + desc += ' in ' + name[:name.rfind('.')] + elif module and module is not object: + desc += ' in module ' + module.__name__ + + if not (inspect.ismodule(object) or + inspect.isclass(object) or + inspect.isroutine(object) or + inspect.isdatadescriptor(object) or + _getdoc(object)): + # If the passed object is a piece of data or an instance, + # document its available methods instead of its value. + if hasattr(object, '__origin__'): + object = object.__origin__ + else: + object = type(object) + desc += ' object' + return title % desc + '\n\n' + renderer.document(object, name) + +def doc(thing, title='Python Library Documentation: %s', forceload=0, + output=None, is_cli=False): + """Display text documentation, given an object or a path to an object.""" + if output is None: + try: + if isinstance(thing, str): + what = thing + else: + what = getattr(thing, '__qualname__', None) + if not isinstance(what, str): + what = getattr(thing, '__name__', None) + if not isinstance(what, str): + what = type(thing).__name__ + ' object' + pager(render_doc(thing, title, forceload), f'Help on {what!s}') + except ImportError as exc: + if is_cli: + raise + print(exc) + else: + try: + s = render_doc(thing, title, forceload, plaintext) + except ImportError as exc: + s = str(exc) + output.write(s) + +def writedoc(thing, forceload=0): + """Write HTML documentation to a file in the current directory.""" + object, name = resolve(thing, forceload) + page = html.page(describe(object), html.document(object, name)) + with open(name + '.html', 'w', encoding='utf-8') as file: + file.write(page) + print('wrote', name + '.html') + +def writedocs(dir, pkgpath='', done=None): + """Write out HTML documentation for all modules in a directory tree.""" + if done is None: done = {} + for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath): + writedoc(modname) + return + + +def _introdoc(): + import textwrap + ver = '%d.%d' % sys.version_info[:2] + if os.environ.get('PYTHON_BASIC_REPL'): + pyrepl_keys = '' + else: + # Additional help for keyboard shortcuts if enhanced REPL is used. + pyrepl_keys = ''' + You can use the following keyboard shortcuts at the main interpreter prompt. + F1: enter interactive help, F2: enter history browsing mode, F3: enter paste + mode (press again to exit). + ''' + return textwrap.dedent(f'''\ + Welcome to Python {ver}'s help utility! If this is your first time using + Python, you should definitely check out the tutorial at + https://docs.python.org/{ver}/tutorial/. + + Enter the name of any module, keyword, or topic to get help on writing + Python programs and using Python modules. To get a list of available + modules, keywords, symbols, or topics, enter "modules", "keywords", + "symbols", or "topics". + {pyrepl_keys} + Each module also comes with a one-line summary of what it does; to list + the modules whose name or summary contain a given string such as "spam", + enter "modules spam". + + To quit this help utility and return to the interpreter, + enter "q", "quit" or "exit". + ''') + +class Helper: + + # These dictionaries map a topic name to either an alias, or a tuple + # (label, seealso-items). The "label" is the label of the corresponding + # section in the .rst file under Doc/ and an index into the dictionary + # in pydoc_data/topics.py. + # + # CAUTION: if you change one of these dictionaries, be sure to adapt the + # list of needed labels in Doc/tools/extensions/pyspecific.py and + # regenerate the pydoc_data/topics.py file by running + # make pydoc-topics + # in Doc/ and copying the output file into the Lib/ directory. + + keywords = { + 'False': '', + 'None': '', + 'True': '', + 'and': 'BOOLEAN', + 'as': 'with', + 'assert': ('assert', ''), + 'async': ('async', ''), + 'await': ('await', ''), + 'break': ('break', 'while for'), + 'class': ('class', 'CLASSES SPECIALMETHODS'), + 'continue': ('continue', 'while for'), + 'def': ('function', ''), + 'del': ('del', 'BASICMETHODS'), + 'elif': 'if', + 'else': ('else', 'while for'), + 'except': 'try', + 'finally': 'try', + 'for': ('for', 'break continue while'), + 'from': 'import', + 'global': ('global', 'nonlocal NAMESPACES'), + 'if': ('if', 'TRUTHVALUE'), + 'import': ('import', 'MODULES'), + 'in': ('in', 'SEQUENCEMETHODS'), + 'is': 'COMPARISON', + 'lambda': ('lambda', 'FUNCTIONS'), + 'nonlocal': ('nonlocal', 'global NAMESPACES'), + 'not': 'BOOLEAN', + 'or': 'BOOLEAN', + 'pass': ('pass', ''), + 'raise': ('raise', 'EXCEPTIONS'), + 'return': ('return', 'FUNCTIONS'), + 'try': ('try', 'EXCEPTIONS'), + 'while': ('while', 'break continue if TRUTHVALUE'), + 'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'), + 'yield': ('yield', ''), + } + # Either add symbols to this dictionary or to the symbols dictionary + # directly: Whichever is easier. They are merged later. + _strprefixes = [p + q for p in ('b', 'f', 'r', 'u') for q in ("'", '"')] + _symbols_inverse = { + 'STRINGS' : ("'", "'''", '"', '"""', *_strprefixes), + 'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&', + '|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'), + 'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'), + 'UNARY' : ('-', '~'), + 'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=', + '^=', '<<=', '>>=', '**=', '//='), + 'BITWISE' : ('<<', '>>', '&', '|', '^', '~'), + 'COMPLEX' : ('j', 'J') + } + symbols = { + '%': 'OPERATORS FORMATTING', + '**': 'POWER', + ',': 'TUPLES LISTS FUNCTIONS', + '.': 'ATTRIBUTES FLOAT MODULES OBJECTS', + '...': 'ELLIPSIS', + ':': 'SLICINGS DICTIONARYLITERALS', + '@': 'def class', + '\\': 'STRINGS', + ':=': 'ASSIGNMENTEXPRESSIONS', + '_': 'PRIVATENAMES', + '__': 'PRIVATENAMES SPECIALMETHODS', + '`': 'BACKQUOTES', + '(': 'TUPLES FUNCTIONS CALLS', + ')': 'TUPLES FUNCTIONS CALLS', + '[': 'LISTS SUBSCRIPTS SLICINGS', + ']': 'LISTS SUBSCRIPTS SLICINGS' + } + for topic, symbols_ in _symbols_inverse.items(): + for symbol in symbols_: + topics = symbols.get(symbol, topic) + if topic not in topics: + topics = topics + ' ' + topic + symbols[symbol] = topics + del topic, symbols_, symbol, topics + + topics = { + 'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS ' + 'FUNCTIONS CLASSES MODULES FILES inspect'), + 'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS ' + 'FORMATTING TYPES'), + 'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'), + 'FORMATTING': ('formatstrings', 'OPERATORS'), + 'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS ' + 'FORMATTING TYPES'), + 'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'), + 'INTEGER': ('integers', 'int range'), + 'FLOAT': ('floating', 'float math'), + 'COMPLEX': ('imaginary', 'complex cmath'), + 'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'), + 'MAPPINGS': 'DICTIONARIES', + 'FUNCTIONS': ('typesfunctions', 'def TYPES'), + 'METHODS': ('typesmethods', 'class def CLASSES TYPES'), + 'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'), + 'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'), + 'FRAMEOBJECTS': 'TYPES', + 'TRACEBACKS': 'TYPES', + 'NONE': ('bltin-null-object', ''), + 'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'), + 'SPECIALATTRIBUTES': ('specialattrs', ''), + 'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'), + 'MODULES': ('typesmodules', 'import'), + 'PACKAGES': 'import', + 'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN ' + 'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER ' + 'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES ' + 'LISTS DICTIONARIES'), + 'OPERATORS': 'EXPRESSIONS', + 'PRECEDENCE': 'EXPRESSIONS', + 'OBJECTS': ('objects', 'TYPES'), + 'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS ' + 'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS ' + 'NUMBERMETHODS CLASSES'), + 'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'), + 'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'), + 'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'), + 'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS ' + 'SPECIALMETHODS'), + 'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'), + 'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT ' + 'SPECIALMETHODS'), + 'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'), + 'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'), + 'DYNAMICFEATURES': ('dynamic-features', ''), + 'SCOPING': 'NAMESPACES', + 'FRAMES': 'NAMESPACES', + 'EXCEPTIONS': ('exceptions', 'try except finally raise'), + 'CONVERSIONS': ('conversions', ''), + 'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'), + 'SPECIALIDENTIFIERS': ('id-classes', ''), + 'PRIVATENAMES': ('atom-identifiers', ''), + 'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS ' + 'LISTLITERALS DICTIONARYLITERALS'), + 'TUPLES': 'SEQUENCES', + 'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'), + 'LISTS': ('typesseq-mutable', 'LISTLITERALS'), + 'LISTLITERALS': ('lists', 'LISTS LITERALS'), + 'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'), + 'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'), + 'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'), + 'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'), + 'SLICINGS': ('slicings', 'SEQUENCEMETHODS'), + 'CALLS': ('calls', 'EXPRESSIONS'), + 'POWER': ('power', 'EXPRESSIONS'), + 'UNARY': ('unary', 'EXPRESSIONS'), + 'BINARY': ('binary', 'EXPRESSIONS'), + 'SHIFTING': ('shifting', 'EXPRESSIONS'), + 'BITWISE': ('bitwise', 'EXPRESSIONS'), + 'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'), + 'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'), + 'ASSERTION': 'assert', + 'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'), + 'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'), + 'ASSIGNMENTEXPRESSIONS': ('assignment-expressions', ''), + 'DELETION': 'del', + 'RETURNING': 'return', + 'IMPORTING': 'import', + 'CONDITIONAL': 'if', + 'LOOPING': ('compound', 'for while break continue'), + 'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'), + 'DEBUGGING': ('debugger', 'pdb'), + 'CONTEXTMANAGERS': ('context-managers', 'with'), + } + + def __init__(self, input=None, output=None): + self._input = input + self._output = output + + @property + def input(self): + return self._input or sys.stdin + + @property + def output(self): + return self._output or sys.stdout + + def __repr__(self): + if inspect.stack()[1][3] == '?': + self() + return '' + return '<%s.%s instance>' % (self.__class__.__module__, + self.__class__.__qualname__) + + _GoInteractive = object() + def __call__(self, request=_GoInteractive): + if request is not self._GoInteractive: + try: + self.help(request) + except ImportError as err: + self.output.write(f'{err}\n') + else: + self.intro() + self.interact() + self.output.write(''' +You are now leaving help and returning to the Python interpreter. +If you want to ask for help on a particular object directly from the +interpreter, you can type "help(object)". Executing "help('string')" +has the same effect as typing a particular string at the help> prompt. +''') + + def interact(self): + self.output.write('\n') + while True: + try: + request = self.getline('help> ') + except (KeyboardInterrupt, EOFError): + break + request = request.strip() + if not request: + continue # back to the prompt + + # Make sure significant trailing quoting marks of literals don't + # get deleted while cleaning input + if (len(request) > 2 and request[0] == request[-1] in ("'", '"') + and request[0] not in request[1:-1]): + request = request[1:-1] + if request.lower() in ('q', 'quit', 'exit'): break + if request == 'help': + self.intro() + else: + self.help(request) + + def getline(self, prompt): + """Read one line, using input() when appropriate.""" + if self.input is sys.stdin: + return input(prompt) + else: + self.output.write(prompt) + self.output.flush() + return self.input.readline() + + def help(self, request, is_cli=False): + if isinstance(request, str): + request = request.strip() + if request == 'keywords': self.listkeywords() + elif request == 'symbols': self.listsymbols() + elif request == 'topics': self.listtopics() + elif request == 'modules': self.listmodules() + elif request[:8] == 'modules ': + self.listmodules(request.split()[1]) + elif request in self.symbols: self.showsymbol(request) + elif request in ['True', 'False', 'None']: + # special case these keywords since they are objects too + doc(eval(request), 'Help on %s:', output=self._output, is_cli=is_cli) + elif request in self.keywords: self.showtopic(request) + elif request in self.topics: self.showtopic(request) + elif request: doc(request, 'Help on %s:', output=self._output, is_cli=is_cli) + else: doc(str, 'Help on %s:', output=self._output, is_cli=is_cli) + elif isinstance(request, Helper): self() + else: doc(request, 'Help on %s:', output=self._output, is_cli=is_cli) + self.output.write('\n') + + def intro(self): + self.output.write(_introdoc()) + + def list(self, items, columns=4, width=80): + items = sorted(items) + colw = width // columns + rows = (len(items) + columns - 1) // columns + for row in range(rows): + for col in range(columns): + i = col * rows + row + if i < len(items): + self.output.write(items[i]) + if col < columns - 1: + self.output.write(' ' + ' ' * (colw - 1 - len(items[i]))) + self.output.write('\n') + + def listkeywords(self): + self.output.write(''' +Here is a list of the Python keywords. Enter any keyword to get more help. + +''') + self.list(self.keywords.keys()) + + def listsymbols(self): + self.output.write(''' +Here is a list of the punctuation symbols which Python assigns special meaning +to. Enter any symbol to get more help. + +''') + self.list(self.symbols.keys()) + + def listtopics(self): + self.output.write(''' +Here is a list of available topics. Enter any topic name to get more help. + +''') + self.list(self.topics.keys(), columns=3) + + def showtopic(self, topic, more_xrefs=''): + try: + import pydoc_data.topics + except ImportError: + self.output.write(''' +Sorry, topic and keyword documentation is not available because the +module "pydoc_data.topics" could not be found. +''') + return + target = self.topics.get(topic, self.keywords.get(topic)) + if not target: + self.output.write('no documentation found for %s\n' % repr(topic)) + return + if isinstance(target, str): + return self.showtopic(target, more_xrefs) + + label, xrefs = target + try: + doc = pydoc_data.topics.topics[label] + except KeyError: + self.output.write('no documentation found for %s\n' % repr(topic)) + return + doc = doc.strip() + '\n' + if more_xrefs: + xrefs = (xrefs or '') + ' ' + more_xrefs + if xrefs: + import textwrap + text = 'Related help topics: ' + ', '.join(xrefs.split()) + '\n' + wrapped_text = textwrap.wrap(text, 72) + doc += '\n%s\n' % '\n'.join(wrapped_text) + + if self._output is None: + pager(doc, f'Help on {topic!s}') + else: + self.output.write(doc) + + def _gettopic(self, topic, more_xrefs=''): + """Return unbuffered tuple of (topic, xrefs). + + If an error occurs here, the exception is caught and displayed by + the url handler. + + This function duplicates the showtopic method but returns its + result directly so it can be formatted for display in an html page. + """ + try: + import pydoc_data.topics + except ImportError: + return(''' +Sorry, topic and keyword documentation is not available because the +module "pydoc_data.topics" could not be found. +''' , '') + target = self.topics.get(topic, self.keywords.get(topic)) + if not target: + raise ValueError('could not find topic') + if isinstance(target, str): + return self._gettopic(target, more_xrefs) + label, xrefs = target + doc = pydoc_data.topics.topics[label] + if more_xrefs: + xrefs = (xrefs or '') + ' ' + more_xrefs + return doc, xrefs + + def showsymbol(self, symbol): + target = self.symbols[symbol] + topic, _, xrefs = target.partition(' ') + self.showtopic(topic, xrefs) + + def listmodules(self, key=''): + if key: + self.output.write(''' +Here is a list of modules whose name or summary contains '{}'. +If there are any, enter a module name to get more help. + +'''.format(key)) + apropos(key) + else: + self.output.write(''' +Please wait a moment while I gather a list of all available modules... + +''') + modules = {} + def callback(path, modname, desc, modules=modules): + if modname and modname[-9:] == '.__init__': + modname = modname[:-9] + ' (package)' + if modname.find('.') < 0: + modules[modname] = 1 + def onerror(modname): + callback(None, modname, None) + ModuleScanner().run(callback, onerror=onerror) + self.list(modules.keys()) + self.output.write(''' +Enter any module name to get more help. Or, type "modules spam" to search +for modules whose name or summary contain the string "spam". +''') + +help = Helper() + +class ModuleScanner: + """An interruptible scanner that searches module synopses.""" + + def run(self, callback, key=None, completer=None, onerror=None): + if key: key = key.lower() + self.quit = False + seen = {} + + for modname in sys.builtin_module_names: + if modname != '__main__': + seen[modname] = 1 + if key is None: + callback(None, modname, '') + else: + name = __import__(modname).__doc__ or '' + desc = name.split('\n')[0] + name = modname + ' - ' + desc + if name.lower().find(key) >= 0: + callback(None, modname, desc) + + for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror): + if self.quit: + break + + if key is None: + callback(None, modname, '') + else: + try: + spec = importer.find_spec(modname) + except SyntaxError: + # raised by tests for bad coding cookies or BOM + continue + loader = spec.loader + if hasattr(loader, 'get_source'): + try: + source = loader.get_source(modname) + except Exception: + if onerror: + onerror(modname) + continue + desc = source_synopsis(io.StringIO(source)) or '' + if hasattr(loader, 'get_filename'): + path = loader.get_filename(modname) + else: + path = None + else: + try: + module = importlib._bootstrap._load(spec) + except ImportError: + if onerror: + onerror(modname) + continue + desc = module.__doc__.splitlines()[0] if module.__doc__ else '' + path = getattr(module,'__file__',None) + name = modname + ' - ' + desc + if name.lower().find(key) >= 0: + callback(path, modname, desc) + + if completer: + completer() + +def apropos(key): + """Print all the one-line module summaries that contain a substring.""" + def callback(path, modname, desc): + if modname[-9:] == '.__init__': + modname = modname[:-9] + ' (package)' + print(modname, desc and '- ' + desc) + def onerror(modname): + pass + with warnings.catch_warnings(): + warnings.filterwarnings('ignore') # ignore problems during import + ModuleScanner().run(callback, key, onerror=onerror) + +# --------------------------------------- enhanced web browser interface + +def _start_server(urlhandler, hostname, port): + """Start an HTTP server thread on a specific port. + + Start an HTML/text server thread, so HTML or text documents can be + browsed dynamically and interactively with a web browser. Example use: + + >>> import time + >>> import pydoc + + Define a URL handler. To determine what the client is asking + for, check the URL and content_type. + + Then get or generate some text or HTML code and return it. + + >>> def my_url_handler(url, content_type): + ... text = 'the URL sent was: (%s, %s)' % (url, content_type) + ... return text + + Start server thread on port 0. + If you use port 0, the server will pick a random port number. + You can then use serverthread.port to get the port number. + + >>> port = 0 + >>> serverthread = pydoc._start_server(my_url_handler, port) + + Check that the server is really started. If it is, open browser + and get first page. Use serverthread.url as the starting page. + + >>> if serverthread.serving: + ... import webbrowser + + The next two lines are commented out so a browser doesn't open if + doctest is run on this module. + + #... webbrowser.open(serverthread.url) + #True + + Let the server do its thing. We just need to monitor its status. + Use time.sleep so the loop doesn't hog the CPU. + + >>> starttime = time.monotonic() + >>> timeout = 1 #seconds + + This is a short timeout for testing purposes. + + >>> while serverthread.serving: + ... time.sleep(.01) + ... if serverthread.serving and time.monotonic() - starttime > timeout: + ... serverthread.stop() + ... break + + Print any errors that may have occurred. + + >>> print(serverthread.error) + None + """ + import http.server + import email.message + import select + import threading + + class DocHandler(http.server.BaseHTTPRequestHandler): + + def do_GET(self): + """Process a request from an HTML browser. + + The URL received is in self.path. + Get an HTML page from self.urlhandler and send it. + """ + if self.path.endswith('.css'): + content_type = 'text/css' + else: + content_type = 'text/html' + self.send_response(200) + self.send_header('Content-Type', '%s; charset=UTF-8' % content_type) + self.end_headers() + self.wfile.write(self.urlhandler( + self.path, content_type).encode('utf-8')) + + def log_message(self, *args): + # Don't log messages. + pass + + class DocServer(http.server.HTTPServer): + + def __init__(self, host, port, callback): + self.host = host + self.address = (self.host, port) + self.callback = callback + self.base.__init__(self, self.address, self.handler) + self.quit = False + + def serve_until_quit(self): + while not self.quit: + rd, wr, ex = select.select([self.socket.fileno()], [], [], 1) + if rd: + self.handle_request() + self.server_close() + + def server_activate(self): + self.base.server_activate(self) + if self.callback: + self.callback(self) + + class ServerThread(threading.Thread): + + def __init__(self, urlhandler, host, port): + self.urlhandler = urlhandler + self.host = host + self.port = int(port) + threading.Thread.__init__(self) + self.serving = False + self.error = None + self.docserver = None + + def run(self): + """Start the server.""" + try: + DocServer.base = http.server.HTTPServer + DocServer.handler = DocHandler + DocHandler.MessageClass = email.message.Message + DocHandler.urlhandler = staticmethod(self.urlhandler) + docsvr = DocServer(self.host, self.port, self.ready) + self.docserver = docsvr + docsvr.serve_until_quit() + except Exception as err: + self.error = err + + def ready(self, server): + self.serving = True + self.host = server.host + self.port = server.server_port + self.url = 'http://%s:%d/' % (self.host, self.port) + + def stop(self): + """Stop the server and this thread nicely""" + self.docserver.quit = True + self.join() + # explicitly break a reference cycle: DocServer.callback + # has indirectly a reference to ServerThread. + self.docserver = None + self.serving = False + self.url = None + + thread = ServerThread(urlhandler, hostname, port) + thread.start() + # Wait until thread.serving is True and thread.docserver is set + # to make sure we are really up before returning. + while not thread.error and not (thread.serving and thread.docserver): + time.sleep(.01) + return thread + + +def _url_handler(url, content_type="text/html"): + """The pydoc url handler for use with the pydoc server. + + If the content_type is 'text/css', the _pydoc.css style + sheet is read and returned if it exits. + + If the content_type is 'text/html', then the result of + get_html_page(url) is returned. + """ + class _HTMLDoc(HTMLDoc): + + def page(self, title, contents): + """Format an HTML page.""" + css_path = "pydoc_data/_pydoc.css" + css_link = ( + '' % + css_path) + return '''\ + + + + +Pydoc: %s +%s%s
%s
+''' % (title, css_link, html_navbar(), contents) + + + html = _HTMLDoc() + + def html_navbar(): + version = html.escape("%s [%s, %s]" % (platform.python_version(), + platform.python_build()[0], + platform.python_compiler())) + return """ +
+ Python %s
%s +
+
+ +
+
+ + +
  +
+ + +
+
+
+ """ % (version, html.escape(platform.platform(terse=True))) + + def html_index(): + """Module Index page.""" + + def bltinlink(name): + return '%s' % (name, name) + + heading = html.heading( + 'Index of Modules' + ) + names = [name for name in sys.builtin_module_names + if name != '__main__'] + contents = html.multicolumn(names, bltinlink) + contents = [heading, '

' + html.bigsection( + 'Built-in Modules', 'index', contents)] + + seen = {} + for dir in sys.path: + contents.append(html.index(dir, seen)) + + contents.append( + '

pydoc by Ka-Ping Yee' + '<ping@lfw.org>

') + return 'Index of Modules', ''.join(contents) + + def html_search(key): + """Search results page.""" + # scan for modules + search_result = [] + + def callback(path, modname, desc): + if modname[-9:] == '.__init__': + modname = modname[:-9] + ' (package)' + search_result.append((modname, desc and '- ' + desc)) + + with warnings.catch_warnings(): + warnings.filterwarnings('ignore') # ignore problems during import + def onerror(modname): + pass + ModuleScanner().run(callback, key, onerror=onerror) + + # format page + def bltinlink(name): + return '%s' % (name, name) + + results = [] + heading = html.heading( + 'Search Results', + ) + for name, desc in search_result: + results.append(bltinlink(name) + desc) + contents = heading + html.bigsection( + 'key = %s' % key, 'index', '
'.join(results)) + return 'Search Results', contents + + def html_topics(): + """Index of topic texts available.""" + + def bltinlink(name): + return '%s' % (name, name) + + heading = html.heading( + 'INDEX', + ) + names = sorted(Helper.topics.keys()) + + contents = html.multicolumn(names, bltinlink) + contents = heading + html.bigsection( + 'Topics', 'index', contents) + return 'Topics', contents + + def html_keywords(): + """Index of keywords.""" + heading = html.heading( + 'INDEX', + ) + names = sorted(Helper.keywords.keys()) + + def bltinlink(name): + return '%s' % (name, name) + + contents = html.multicolumn(names, bltinlink) + contents = heading + html.bigsection( + 'Keywords', 'index', contents) + return 'Keywords', contents + + def html_topicpage(topic): + """Topic or keyword help page.""" + buf = io.StringIO() + htmlhelp = Helper(buf, buf) + contents, xrefs = htmlhelp._gettopic(topic) + if topic in htmlhelp.keywords: + title = 'KEYWORD' + else: + title = 'TOPIC' + heading = html.heading( + '%s' % title, + ) + contents = '
%s
' % html.markup(contents) + contents = html.bigsection(topic , 'index', contents) + if xrefs: + xrefs = sorted(xrefs.split()) + + def bltinlink(name): + return '%s' % (name, name) + + xrefs = html.multicolumn(xrefs, bltinlink) + xrefs = html.section('Related help topics: ', 'index', xrefs) + return ('%s %s' % (title, topic), + ''.join((heading, contents, xrefs))) + + def html_getobj(url): + obj = locate(url, forceload=1) + if obj is None and url != 'None': + raise ValueError('could not find object') + title = describe(obj) + content = html.document(obj, url) + return title, content + + def html_error(url, exc): + heading = html.heading( + 'Error', + ) + contents = '
'.join(html.escape(line) for line in + format_exception_only(type(exc), exc)) + contents = heading + html.bigsection(url, 'error', contents) + return "Error - %s" % url, contents + + def get_html_page(url): + """Generate an HTML page for url.""" + complete_url = url + if url.endswith('.html'): + url = url[:-5] + try: + if url in ("", "index"): + title, content = html_index() + elif url == "topics": + title, content = html_topics() + elif url == "keywords": + title, content = html_keywords() + elif '=' in url: + op, _, url = url.partition('=') + if op == "search?key": + title, content = html_search(url) + elif op == "topic?key": + # try topics first, then objects. + try: + title, content = html_topicpage(url) + except ValueError: + title, content = html_getobj(url) + elif op == "get?key": + # try objects first, then topics. + if url in ("", "index"): + title, content = html_index() + else: + try: + title, content = html_getobj(url) + except ValueError: + title, content = html_topicpage(url) + else: + raise ValueError('bad pydoc url') + else: + title, content = html_getobj(url) + except Exception as exc: + # Catch any errors and display them in an error page. + title, content = html_error(complete_url, exc) + return html.page(title, content) + + if url.startswith('/'): + url = url[1:] + if content_type == 'text/css': + path_here = os.path.dirname(os.path.realpath(__file__)) + css_path = os.path.join(path_here, url) + with open(css_path) as fp: + return ''.join(fp.readlines()) + elif content_type == 'text/html': + return get_html_page(url) + # Errors outside the url handler are caught by the server. + raise TypeError('unknown content type %r for url %s' % (content_type, url)) + + +def browse(port=0, *, open_browser=True, hostname='localhost'): + """Start the enhanced pydoc web server and open a web browser. + + Use port '0' to start the server on an arbitrary port. + Set open_browser to False to suppress opening a browser. + """ + import webbrowser + serverthread = _start_server(_url_handler, hostname, port) + if serverthread.error: + print(serverthread.error) + return + if serverthread.serving: + server_help_msg = 'Server commands: [b]rowser, [q]uit' + if open_browser: + webbrowser.open(serverthread.url) + try: + print('Server ready at', serverthread.url) + print(server_help_msg) + while serverthread.serving: + cmd = input('server> ') + cmd = cmd.lower() + if cmd == 'q': + break + elif cmd == 'b': + webbrowser.open(serverthread.url) + else: + print(server_help_msg) + except (KeyboardInterrupt, EOFError): + print() + finally: + if serverthread.serving: + serverthread.stop() + print('Server stopped') + + +# -------------------------------------------------- command-line interface + +def ispath(x): + return isinstance(x, str) and x.find(os.sep) >= 0 + +def _get_revised_path(given_path, argv0): + """Ensures current directory is on returned path, and argv0 directory is not + + Exception: argv0 dir is left alone if it's also pydoc's directory. + + Returns a new path entry list, or None if no adjustment is needed. + """ + # Scripts may get the current directory in their path by default if they're + # run with the -m switch, or directly from the current directory. + # The interactive prompt also allows imports from the current directory. + + # Accordingly, if the current directory is already present, don't make + # any changes to the given_path + if '' in given_path or os.curdir in given_path or os.getcwd() in given_path: + return None + + # Otherwise, add the current directory to the given path, and remove the + # script directory (as long as the latter isn't also pydoc's directory. + stdlib_dir = os.path.dirname(__file__) + script_dir = os.path.dirname(argv0) + revised_path = given_path.copy() + if script_dir in given_path and not os.path.samefile(script_dir, stdlib_dir): + revised_path.remove(script_dir) + revised_path.insert(0, os.getcwd()) + return revised_path + + +# Note: the tests only cover _get_revised_path, not _adjust_cli_path itself +def _adjust_cli_sys_path(): + """Ensures current directory is on sys.path, and __main__ directory is not. + + Exception: __main__ dir is left alone if it's also pydoc's directory. + """ + revised_path = _get_revised_path(sys.path, sys.argv[0]) + if revised_path is not None: + sys.path[:] = revised_path + + +def cli(): + """Command-line interface (looks at sys.argv to decide what to do).""" + import getopt + class BadUsage(Exception): pass + + _adjust_cli_sys_path() + + try: + opts, args = getopt.getopt(sys.argv[1:], 'bk:n:p:w') + writing = False + start_server = False + open_browser = False + port = 0 + hostname = 'localhost' + for opt, val in opts: + if opt == '-b': + start_server = True + open_browser = True + if opt == '-k': + apropos(val) + return + if opt == '-p': + start_server = True + port = val + if opt == '-w': + writing = True + if opt == '-n': + start_server = True + hostname = val + + if start_server: + browse(port, hostname=hostname, open_browser=open_browser) + return + + if not args: raise BadUsage + for arg in args: + if ispath(arg) and not os.path.exists(arg): + print('file %r does not exist' % arg) + sys.exit(1) + try: + if ispath(arg) and os.path.isfile(arg): + arg = importfile(arg) + if writing: + if ispath(arg) and os.path.isdir(arg): + writedocs(arg) + else: + writedoc(arg) + else: + help.help(arg, is_cli=True) + except (ImportError, ErrorDuringImport) as value: + print(value) + sys.exit(1) + + except (getopt.error, BadUsage): + cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0] + print("""pydoc - the Python documentation tool + +{cmd} ... + Show text documentation on something. may be the name of a + Python keyword, topic, function, module, or package, or a dotted + reference to a class or function within a module or module in a + package. If contains a '{sep}', it is used as the path to a + Python source file to document. If name is 'keywords', 'topics', + or 'modules', a listing of these things is displayed. + +{cmd} -k + Search for a keyword in the synopsis lines of all available modules. + +{cmd} -n + Start an HTTP server with the given hostname (default: localhost). + +{cmd} -p + Start an HTTP server on the given port on the local machine. Port + number 0 can be used to get an arbitrary unused port. + +{cmd} -b + Start an HTTP server on an arbitrary unused port and open a web browser + to interactively browse documentation. This option can be used in + combination with -n and/or -p. + +{cmd} -w ... + Write out the HTML documentation for a module to a file in the current + directory. If contains a '{sep}', it is treated as a filename; if + it names a directory, documentation is written for all the contents. +""".format(cmd=cmd, sep=os.sep)) + +if __name__ == '__main__': + cli() diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__init__.py b/Python314_4_x64_Template/Lib/pydoc_data/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/__init__.py rename to Python314_4_x64_Template/Lib/pydoc_data/__init__.py diff --git a/Python313_13_x64_Template/Lib/pydoc_data/_pydoc.css b/Python314_4_x64_Template/Lib/pydoc_data/_pydoc.css similarity index 100% rename from Python313_13_x64_Template/Lib/pydoc_data/_pydoc.css rename to Python314_4_x64_Template/Lib/pydoc_data/_pydoc.css diff --git a/Python314_4_x64_Template/Lib/pydoc_data/module_docs.py b/Python314_4_x64_Template/Lib/pydoc_data/module_docs.py new file mode 100644 index 00000000..d6583783 --- /dev/null +++ b/Python314_4_x64_Template/Lib/pydoc_data/module_docs.py @@ -0,0 +1,320 @@ +# Autogenerated by Sphinx on Tue Apr 7 16:13:12 2026 +# as part of the release process. + +module_docs = { + '__future__': '__future__#module-__future__', + '__main__': '__main__#module-__main__', + '_thread': '_thread#module-_thread', + '_tkinter': 'tkinter#module-_tkinter', + 'abc': 'abc#module-abc', + 'aifc': 'aifc#module-aifc', + 'annotationlib': 'annotationlib#module-annotationlib', + 'argparse': 'argparse#module-argparse', + 'array': 'array#module-array', + 'ast': 'ast#module-ast', + 'asynchat': 'asynchat#module-asynchat', + 'asyncio': 'asyncio#module-asyncio', + 'asyncore': 'asyncore#module-asyncore', + 'atexit': 'atexit#module-atexit', + 'audioop': 'audioop#module-audioop', + 'base64': 'base64#module-base64', + 'bdb': 'bdb#module-bdb', + 'binascii': 'binascii#module-binascii', + 'bisect': 'bisect#module-bisect', + 'builtins': 'builtins#module-builtins', + 'bz2': 'bz2#module-bz2', + 'cProfile': 'profile#module-cProfile', + 'calendar': 'calendar#module-calendar', + 'cgi': 'cgi#module-cgi', + 'cgitb': 'cgitb#module-cgitb', + 'chunk': 'chunk#module-chunk', + 'cmath': 'cmath#module-cmath', + 'cmd': 'cmd#module-cmd', + 'code': 'code#module-code', + 'codecs': 'codecs#module-codecs', + 'codeop': 'codeop#module-codeop', + 'collections': 'collections#module-collections', + 'collections.abc': 'collections.abc#module-collections.abc', + 'colorsys': 'colorsys#module-colorsys', + 'compileall': 'compileall#module-compileall', + 'compression': 'compression#module-compression', + 'compression.zstd': 'compression.zstd#module-compression.zstd', + 'concurrent.futures': 'concurrent.futures#module-concurrent.futures', + 'concurrent.interpreters': 'concurrent.interpreters#module-concurrent.interpreters', + 'configparser': 'configparser#module-configparser', + 'contextlib': 'contextlib#module-contextlib', + 'contextvars': 'contextvars#module-contextvars', + 'copy': 'copy#module-copy', + 'copyreg': 'copyreg#module-copyreg', + 'crypt': 'crypt#module-crypt', + 'csv': 'csv#module-csv', + 'ctypes': 'ctypes#module-ctypes', + 'curses': 'curses#module-curses', + 'curses.ascii': 'curses.ascii#module-curses.ascii', + 'curses.panel': 'curses.panel#module-curses.panel', + 'curses.textpad': 'curses#module-curses.textpad', + 'dataclasses': 'dataclasses#module-dataclasses', + 'datetime': 'datetime#module-datetime', + 'dbm': 'dbm#module-dbm', + 'dbm.dumb': 'dbm#module-dbm.dumb', + 'dbm.gnu': 'dbm#module-dbm.gnu', + 'dbm.ndbm': 'dbm#module-dbm.ndbm', + 'dbm.sqlite3': 'dbm#module-dbm.sqlite3', + 'decimal': 'decimal#module-decimal', + 'difflib': 'difflib#module-difflib', + 'dis': 'dis#module-dis', + 'distutils': 'distutils#module-distutils', + 'doctest': 'doctest#module-doctest', + 'email': 'email#module-email', + 'email.charset': 'email.charset#module-email.charset', + 'email.contentmanager': 'email.contentmanager#module-email.contentmanager', + 'email.encoders': 'email.encoders#module-email.encoders', + 'email.errors': 'email.errors#module-email.errors', + 'email.generator': 'email.generator#module-email.generator', + 'email.header': 'email.header#module-email.header', + 'email.headerregistry': 'email.headerregistry#module-email.headerregistry', + 'email.iterators': 'email.iterators#module-email.iterators', + 'email.message': 'email.message#module-email.message', + 'email.mime': 'email.mime#module-email.mime', + 'email.mime.application': 'email.mime#module-email.mime.application', + 'email.mime.audio': 'email.mime#module-email.mime.audio', + 'email.mime.base': 'email.mime#module-email.mime.base', + 'email.mime.image': 'email.mime#module-email.mime.image', + 'email.mime.message': 'email.mime#module-email.mime.message', + 'email.mime.multipart': 'email.mime#module-email.mime.multipart', + 'email.mime.nonmultipart': 'email.mime#module-email.mime.nonmultipart', + 'email.mime.text': 'email.mime#module-email.mime.text', + 'email.parser': 'email.parser#module-email.parser', + 'email.policy': 'email.policy#module-email.policy', + 'email.utils': 'email.utils#module-email.utils', + 'encodings': 'codecs#module-encodings', + 'encodings.idna': 'codecs#module-encodings.idna', + 'encodings.mbcs': 'codecs#module-encodings.mbcs', + 'encodings.utf_8_sig': 'codecs#module-encodings.utf_8_sig', + 'ensurepip': 'ensurepip#module-ensurepip', + 'enum': 'enum#module-enum', + 'errno': 'errno#module-errno', + 'faulthandler': 'faulthandler#module-faulthandler', + 'fcntl': 'fcntl#module-fcntl', + 'filecmp': 'filecmp#module-filecmp', + 'fileinput': 'fileinput#module-fileinput', + 'fnmatch': 'fnmatch#module-fnmatch', + 'fractions': 'fractions#module-fractions', + 'ftplib': 'ftplib#module-ftplib', + 'functools': 'functools#module-functools', + 'gc': 'gc#module-gc', + 'getopt': 'getopt#module-getopt', + 'getpass': 'getpass#module-getpass', + 'gettext': 'gettext#module-gettext', + 'glob': 'glob#module-glob', + 'graphlib': 'graphlib#module-graphlib', + 'grp': 'grp#module-grp', + 'gzip': 'gzip#module-gzip', + 'hashlib': 'hashlib#module-hashlib', + 'heapq': 'heapq#module-heapq', + 'hmac': 'hmac#module-hmac', + 'html': 'html#module-html', + 'html.entities': 'html.entities#module-html.entities', + 'html.parser': 'html.parser#module-html.parser', + 'http': 'http#module-http', + 'http.client': 'http.client#module-http.client', + 'http.cookiejar': 'http.cookiejar#module-http.cookiejar', + 'http.cookies': 'http.cookies#module-http.cookies', + 'http.server': 'http.server#module-http.server', + 'idlelib': 'idle#module-idlelib', + 'imaplib': 'imaplib#module-imaplib', + 'imghdr': 'imghdr#module-imghdr', + 'imp': 'imp#module-imp', + 'importlib': 'importlib#module-importlib', + 'importlib.abc': 'importlib#module-importlib.abc', + 'importlib.machinery': 'importlib#module-importlib.machinery', + 'importlib.metadata': 'importlib.metadata#module-importlib.metadata', + 'importlib.resources': 'importlib.resources#module-importlib.resources', + 'importlib.resources.abc': 'importlib.resources.abc#module-importlib.resources.abc', + 'importlib.util': 'importlib#module-importlib.util', + 'inspect': 'inspect#module-inspect', + 'io': 'io#module-io', + 'ipaddress': 'ipaddress#module-ipaddress', + 'itertools': 'itertools#module-itertools', + 'json': 'json#module-json', + 'json.tool': 'json#module-json.tool', + 'keyword': 'keyword#module-keyword', + 'linecache': 'linecache#module-linecache', + 'locale': 'locale#module-locale', + 'logging': 'logging#module-logging', + 'logging.config': 'logging.config#module-logging.config', + 'logging.handlers': 'logging.handlers#module-logging.handlers', + 'lzma': 'lzma#module-lzma', + 'mailbox': 'mailbox#module-mailbox', + 'mailcap': 'mailcap#module-mailcap', + 'marshal': 'marshal#module-marshal', + 'math': 'math#module-math', + 'mimetypes': 'mimetypes#module-mimetypes', + 'mmap': 'mmap#module-mmap', + 'modulefinder': 'modulefinder#module-modulefinder', + 'msilib': 'msilib#module-msilib', + 'msvcrt': 'msvcrt#module-msvcrt', + 'multiprocessing': 'multiprocessing#module-multiprocessing', + 'multiprocessing.connection': 'multiprocessing#module-multiprocessing.connection', + 'multiprocessing.dummy': 'multiprocessing#module-multiprocessing.dummy', + 'multiprocessing.managers': 'multiprocessing#module-multiprocessing.managers', + 'multiprocessing.pool': 'multiprocessing#module-multiprocessing.pool', + 'multiprocessing.shared_memory': 'multiprocessing.shared_memory#module-multiprocessing.shared_memory', + 'multiprocessing.sharedctypes': 'multiprocessing#module-multiprocessing.sharedctypes', + 'netrc': 'netrc#module-netrc', + 'nis': 'nis#module-nis', + 'nntplib': 'nntplib#module-nntplib', + 'numbers': 'numbers#module-numbers', + 'operator': 'operator#module-operator', + 'optparse': 'optparse#module-optparse', + 'os': 'os#module-os', + 'os.path': 'os.path#module-os.path', + 'ossaudiodev': 'ossaudiodev#module-ossaudiodev', + 'pathlib': 'pathlib#module-pathlib', + 'pathlib.types': 'pathlib#module-pathlib.types', + 'pdb': 'pdb#module-pdb', + 'pickle': 'pickle#module-pickle', + 'pickletools': 'pickletools#module-pickletools', + 'pipes': 'pipes#module-pipes', + 'pkgutil': 'pkgutil#module-pkgutil', + 'platform': 'platform#module-platform', + 'plistlib': 'plistlib#module-plistlib', + 'poplib': 'poplib#module-poplib', + 'posix': 'posix#module-posix', + 'pprint': 'pprint#module-pprint', + 'profile': 'profile#module-profile', + 'pstats': 'profile#module-pstats', + 'pty': 'pty#module-pty', + 'pwd': 'pwd#module-pwd', + 'py_compile': 'py_compile#module-py_compile', + 'pyclbr': 'pyclbr#module-pyclbr', + 'pydoc': 'pydoc#module-pydoc', + 'queue': 'queue#module-queue', + 'quopri': 'quopri#module-quopri', + 'random': 'random#module-random', + 're': 're#module-re', + 'readline': 'readline#module-readline', + 'reprlib': 'reprlib#module-reprlib', + 'resource': 'resource#module-resource', + 'rlcompleter': 'rlcompleter#module-rlcompleter', + 'runpy': 'runpy#module-runpy', + 'sched': 'sched#module-sched', + 'secrets': 'secrets#module-secrets', + 'select': 'select#module-select', + 'selectors': 'selectors#module-selectors', + 'shelve': 'shelve#module-shelve', + 'shlex': 'shlex#module-shlex', + 'shutil': 'shutil#module-shutil', + 'signal': 'signal#module-signal', + 'site': 'site#module-site', + 'sitecustomize': 'site#module-sitecustomize', + 'smtpd': 'smtpd#module-smtpd', + 'smtplib': 'smtplib#module-smtplib', + 'sndhdr': 'sndhdr#module-sndhdr', + 'socket': 'socket#module-socket', + 'socketserver': 'socketserver#module-socketserver', + 'spwd': 'spwd#module-spwd', + 'sqlite3': 'sqlite3#module-sqlite3', + 'ssl': 'ssl#module-ssl', + 'stat': 'stat#module-stat', + 'statistics': 'statistics#module-statistics', + 'string': 'string#module-string', + 'string.templatelib': 'string.templatelib#module-string.templatelib', + 'stringprep': 'stringprep#module-stringprep', + 'struct': 'struct#module-struct', + 'subprocess': 'subprocess#module-subprocess', + 'sunau': 'sunau#module-sunau', + 'symtable': 'symtable#module-symtable', + 'sys': 'sys#module-sys', + 'sys.monitoring': 'sys.monitoring#module-sys.monitoring', + 'sysconfig': 'sysconfig#module-sysconfig', + 'syslog': 'syslog#module-syslog', + 'tabnanny': 'tabnanny#module-tabnanny', + 'tarfile': 'tarfile#module-tarfile', + 'telnetlib': 'telnetlib#module-telnetlib', + 'tempfile': 'tempfile#module-tempfile', + 'termios': 'termios#module-termios', + 'test': 'test#module-test', + 'test.regrtest': 'test#module-test.regrtest', + 'test.support': 'test#module-test.support', + 'test.support.bytecode_helper': 'test#module-test.support.bytecode_helper', + 'test.support.import_helper': 'test#module-test.support.import_helper', + 'test.support.os_helper': 'test#module-test.support.os_helper', + 'test.support.script_helper': 'test#module-test.support.script_helper', + 'test.support.socket_helper': 'test#module-test.support.socket_helper', + 'test.support.threading_helper': 'test#module-test.support.threading_helper', + 'test.support.warnings_helper': 'test#module-test.support.warnings_helper', + 'textwrap': 'textwrap#module-textwrap', + 'threading': 'threading#module-threading', + 'time': 'time#module-time', + 'timeit': 'timeit#module-timeit', + 'tkinter': 'tkinter#module-tkinter', + 'tkinter.colorchooser': 'tkinter.colorchooser#module-tkinter.colorchooser', + 'tkinter.commondialog': 'dialog#module-tkinter.commondialog', + 'tkinter.dnd': 'tkinter.dnd#module-tkinter.dnd', + 'tkinter.filedialog': 'dialog#module-tkinter.filedialog', + 'tkinter.font': 'tkinter.font#module-tkinter.font', + 'tkinter.messagebox': 'tkinter.messagebox#module-tkinter.messagebox', + 'tkinter.scrolledtext': 'tkinter.scrolledtext#module-tkinter.scrolledtext', + 'tkinter.simpledialog': 'dialog#module-tkinter.simpledialog', + 'tkinter.ttk': 'tkinter.ttk#module-tkinter.ttk', + 'token': 'token#module-token', + 'tokenize': 'tokenize#module-tokenize', + 'tomllib': 'tomllib#module-tomllib', + 'trace': 'trace#module-trace', + 'traceback': 'traceback#module-traceback', + 'tracemalloc': 'tracemalloc#module-tracemalloc', + 'tty': 'tty#module-tty', + 'turtle': 'turtle#module-turtle', + 'turtledemo': 'turtle#module-turtledemo', + 'types': 'types#module-types', + 'typing': 'typing#module-typing', + 'unicodedata': 'unicodedata#module-unicodedata', + 'unittest': 'unittest#module-unittest', + 'unittest.mock': 'unittest.mock#module-unittest.mock', + 'urllib': 'urllib#module-urllib', + 'urllib.error': 'urllib.error#module-urllib.error', + 'urllib.parse': 'urllib.parse#module-urllib.parse', + 'urllib.request': 'urllib.request#module-urllib.request', + 'urllib.response': 'urllib.request#module-urllib.response', + 'urllib.robotparser': 'urllib.robotparser#module-urllib.robotparser', + 'usercustomize': 'site#module-usercustomize', + 'uu': 'uu#module-uu', + 'uuid': 'uuid#module-uuid', + 'venv': 'venv#module-venv', + 'warnings': 'warnings#module-warnings', + 'wave': 'wave#module-wave', + 'weakref': 'weakref#module-weakref', + 'webbrowser': 'webbrowser#module-webbrowser', + 'winreg': 'winreg#module-winreg', + 'winsound': 'winsound#module-winsound', + 'wsgiref': 'wsgiref#module-wsgiref', + 'wsgiref.handlers': 'wsgiref#module-wsgiref.handlers', + 'wsgiref.headers': 'wsgiref#module-wsgiref.headers', + 'wsgiref.simple_server': 'wsgiref#module-wsgiref.simple_server', + 'wsgiref.types': 'wsgiref#module-wsgiref.types', + 'wsgiref.util': 'wsgiref#module-wsgiref.util', + 'wsgiref.validate': 'wsgiref#module-wsgiref.validate', + 'xdrlib': 'xdrlib#module-xdrlib', + 'xml': 'xml#module-xml', + 'xml.dom': 'xml.dom#module-xml.dom', + 'xml.dom.minidom': 'xml.dom.minidom#module-xml.dom.minidom', + 'xml.dom.pulldom': 'xml.dom.pulldom#module-xml.dom.pulldom', + 'xml.etree.ElementInclude': 'xml.etree.elementtree#module-xml.etree.ElementInclude', + 'xml.etree.ElementTree': 'xml.etree.elementtree#module-xml.etree.ElementTree', + 'xml.parsers.expat': 'pyexpat#module-xml.parsers.expat', + 'xml.parsers.expat.errors': 'pyexpat#module-xml.parsers.expat.errors', + 'xml.parsers.expat.model': 'pyexpat#module-xml.parsers.expat.model', + 'xml.sax': 'xml.sax#module-xml.sax', + 'xml.sax.handler': 'xml.sax.handler#module-xml.sax.handler', + 'xml.sax.saxutils': 'xml.sax.utils#module-xml.sax.saxutils', + 'xml.sax.xmlreader': 'xml.sax.reader#module-xml.sax.xmlreader', + 'xmlrpc': 'xmlrpc#module-xmlrpc', + 'xmlrpc.client': 'xmlrpc.client#module-xmlrpc.client', + 'xmlrpc.server': 'xmlrpc.server#module-xmlrpc.server', + 'zipapp': 'zipapp#module-zipapp', + 'zipfile': 'zipfile#module-zipfile', + 'zipimport': 'zipimport#module-zipimport', + 'zlib': 'zlib#module-zlib', + 'zoneinfo': 'zoneinfo#module-zoneinfo', +} diff --git a/Python314_4_x64_Template/Lib/pydoc_data/topics.py b/Python314_4_x64_Template/Lib/pydoc_data/topics.py new file mode 100644 index 00000000..6dca99ce --- /dev/null +++ b/Python314_4_x64_Template/Lib/pydoc_data/topics.py @@ -0,0 +1,14506 @@ +# Autogenerated by Sphinx on Tue Apr 7 16:13:12 2026 +# as part of the release process. + +topics = { + 'assert': r'''The "assert" statement +********************** + +Assert statements are a convenient way to insert debugging assertions +into a program: + + assert_stmt: "assert" expression ["," expression] + +The simple form, "assert expression", is equivalent to + + if __debug__: + if not expression: raise AssertionError + +The extended form, "assert expression1, expression2", is equivalent to + + if __debug__: + if not expression1: raise AssertionError(expression2) + +These equivalences assume that "__debug__" and "AssertionError" refer +to the built-in variables with those names. In the current +implementation, the built-in variable "__debug__" is "True" under +normal circumstances, "False" when optimization is requested (command +line option "-O"). The current code generator emits no code for an +"assert" statement when optimization is requested at compile time. +Note that it is unnecessary to include the source code for the +expression that failed in the error message; it will be displayed as +part of the stack trace. + +Assignments to "__debug__" are illegal. The value for the built-in +variable is determined when the interpreter starts. +''', + 'assignment': r'''Assignment statements +********************* + +Assignment statements are used to (re)bind names to values and to +modify attributes or items of mutable objects: + + assignment_stmt: (target_list "=")+ (starred_expression | yield_expression) + target_list: target ("," target)* [","] + target: identifier + | "(" [target_list] ")" + | "[" [target_list] "]" + | attributeref + | subscription + | "*" target + +(See section Primaries for the syntax definitions for *attributeref* +and *subscription*.) + +An assignment statement evaluates the expression list (remember that +this can be a single expression or a comma-separated list, the latter +yielding a tuple) and assigns the single resulting object to each of +the target lists, from left to right. + +Assignment is defined recursively depending on the form of the target +(list). When a target is part of a mutable object (an attribute +reference or subscription), the mutable object must ultimately perform +the assignment and decide about its validity, and may raise an +exception if the assignment is unacceptable. The rules observed by +various types and the exceptions raised are given with the definition +of the object types (see section The standard type hierarchy). + +Assignment of an object to a target list, optionally enclosed in +parentheses or square brackets, is recursively defined as follows. + +* If the target list is a single target with no trailing comma, + optionally in parentheses, the object is assigned to that target. + +* Else: + + * If the target list contains one target prefixed with an asterisk, + called a “starred” target: The object must be an iterable with at + least as many items as there are targets in the target list, minus + one. The first items of the iterable are assigned, from left to + right, to the targets before the starred target. The final items + of the iterable are assigned to the targets after the starred + target. A list of the remaining items in the iterable is then + assigned to the starred target (the list can be empty). + + * Else: The object must be an iterable with the same number of items + as there are targets in the target list, and the items are + assigned, from left to right, to the corresponding targets. + +Assignment of an object to a single target is recursively defined as +follows. + +* If the target is an identifier (name): + + * If the name does not occur in a "global" or "nonlocal" statement + in the current code block: the name is bound to the object in the + current local namespace. + + * Otherwise: the name is bound to the object in the global namespace + or the outer namespace determined by "nonlocal", respectively. + + The name is rebound if it was already bound. This may cause the + reference count for the object previously bound to the name to reach + zero, causing the object to be deallocated and its destructor (if it + has one) to be called. + +* If the target is an attribute reference: The primary expression in + the reference is evaluated. It should yield an object with + assignable attributes; if this is not the case, "TypeError" is + raised. That object is then asked to assign the assigned object to + the given attribute; if it cannot perform the assignment, it raises + an exception (usually but not necessarily "AttributeError"). + + Note: If the object is a class instance and the attribute reference + occurs on both sides of the assignment operator, the right-hand side + expression, "a.x" can access either an instance attribute or (if no + instance attribute exists) a class attribute. The left-hand side + target "a.x" is always set as an instance attribute, creating it if + necessary. Thus, the two occurrences of "a.x" do not necessarily + refer to the same attribute: if the right-hand side expression + refers to a class attribute, the left-hand side creates a new + instance attribute as the target of the assignment: + + class Cls: + x = 3 # class variable + inst = Cls() + inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3 + + This description does not necessarily apply to descriptor + attributes, such as properties created with "property()". + +* If the target is a subscription: The primary expression in the + reference is evaluated. Next, the subscript expression is evaluated. + Then, the primary’s "__setitem__()" method is called with two + arguments: the subscript and the assigned object. + + Typically, "__setitem__()" is defined on mutable sequence objects + (such as lists) and mapping objects (such as dictionaries), and + behaves as follows. + + If the primary is a mutable sequence object (such as a list), the + subscript must yield an integer. If it is negative, the sequence’s + length is added to it. The resulting value must be a nonnegative + integer less than the sequence’s length, and the sequence is asked + to assign the assigned object to its item with that index. If the + index is out of range, "IndexError" is raised (assignment to a + subscripted sequence cannot add new items to a list). + + If the primary is a mapping object (such as a dictionary), the + subscript must have a type compatible with the mapping’s key type, + and the mapping is then asked to create a key/value pair which maps + the subscript to the assigned object. This can either replace an + existing key/value pair with the same key value, or insert a new + key/value pair (if no key with the same value existed). + + If the target is a slicing: The primary expression should evaluate + to a mutable sequence object (such as a list). The assigned object + should be *iterable*. The slicing’s lower and upper bounds should be + integers; if they are "None" (or not present), the defaults are zero + and the sequence’s length. If either bound is negative, the + sequence’s length is added to it. The resulting bounds are clipped + to lie between zero and the sequence’s length, inclusive. Finally, + the sequence object is asked to replace the slice with the items of + the assigned sequence. The length of the slice may be different + from the length of the assigned sequence, thus changing the length + of the target sequence, if the target sequence allows it. + +Although the definition of assignment implies that overlaps between +the left-hand side and the right-hand side are ‘simultaneous’ (for +example "a, b = b, a" swaps two variables), overlaps *within* the +collection of assigned-to variables occur left-to-right, sometimes +resulting in confusion. For instance, the following program prints +"[0, 2]": + + x = [0, 1] + i = 0 + i, x[i] = 1, 2 # i is updated, then x[i] is updated + print(x) + +See also: + + **PEP 3132** - Extended Iterable Unpacking + The specification for the "*target" feature. + + +Augmented assignment statements +=============================== + +Augmented assignment is the combination, in a single statement, of a +binary operation and an assignment statement: + + augmented_assignment_stmt: augtarget augop (expression_list | yield_expression) + augtarget: identifier | attributeref | subscription + augop: "+=" | "-=" | "*=" | "@=" | "/=" | "//=" | "%=" | "**=" + | ">>=" | "<<=" | "&=" | "^=" | "|=" + +(See section Primaries for the syntax definitions of the last three +symbols.) + +An augmented assignment evaluates the target (which, unlike normal +assignment statements, cannot be an unpacking) and the expression +list, performs the binary operation specific to the type of assignment +on the two operands, and assigns the result to the original target. +The target is only evaluated once. + +An augmented assignment statement like "x += 1" can be rewritten as "x += x + 1" to achieve a similar, but not exactly equal effect. In the +augmented version, "x" is only evaluated once. Also, when possible, +the actual operation is performed *in-place*, meaning that rather than +creating a new object and assigning that to the target, the old object +is modified instead. + +Unlike normal assignments, augmented assignments evaluate the left- +hand side *before* evaluating the right-hand side. For example, "a[i] ++= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs +the addition, and lastly, it writes the result back to "a[i]". + +With the exception of assigning to tuples and multiple targets in a +single statement, the assignment done by augmented assignment +statements is handled the same way as normal assignments. Similarly, +with the exception of the possible *in-place* behavior, the binary +operation performed by augmented assignment is the same as the normal +binary operations. + +For targets which are attribute references, the same caveat about +class and instance attributes applies as for regular assignments. + + +Annotated assignment statements +=============================== + +*Annotation* assignment is the combination, in a single statement, of +a variable or attribute annotation and an optional assignment +statement: + + annotated_assignment_stmt: augtarget ":" expression + ["=" (starred_expression | yield_expression)] + +The difference from normal Assignment statements is that only a single +target is allowed. + +The assignment target is considered “simple” if it consists of a +single name that is not enclosed in parentheses. For simple assignment +targets, if in class or module scope, the annotations are gathered in +a lazily evaluated annotation scope. The annotations can be evaluated +using the "__annotations__" attribute of a class or module, or using +the facilities in the "annotationlib" module. + +If the assignment target is not simple (an attribute, subscript node, +or parenthesized name), the annotation is never evaluated. + +If a name is annotated in a function scope, then this name is local +for that scope. Annotations are never evaluated and stored in function +scopes. + +If the right hand side is present, an annotated assignment performs +the actual assignment as if there was no annotation present. If the +right hand side is not present for an expression target, then the +interpreter evaluates the target except for the last "__setitem__()" +or "__setattr__()" call. + +See also: + + **PEP 526** - Syntax for Variable Annotations + The proposal that added syntax for annotating the types of + variables (including class variables and instance variables), + instead of expressing them through comments. + + **PEP 484** - Type hints + The proposal that added the "typing" module to provide a standard + syntax for type annotations that can be used in static analysis + tools and IDEs. + +Changed in version 3.8: Now annotated assignments allow the same +expressions in the right hand side as regular assignments. Previously, +some expressions (like un-parenthesized tuple expressions) caused a +syntax error. + +Changed in version 3.14: Annotations are now lazily evaluated in a +separate annotation scope. If the assignment target is not simple, +annotations are never evaluated. +''', + 'assignment-expressions': r'''Assignment expressions +********************** + + assignment_expression: [identifier ":="] expression + +An assignment expression (sometimes also called a “named expression” +or “walrus”) assigns an "expression" to an "identifier", while also +returning the value of the "expression". + +One common use case is when handling matched regular expressions: + + if matching := pattern.search(data): + do_something(matching) + +Or, when processing a file stream in chunks: + + while chunk := file.read(9000): + process(chunk) + +Assignment expressions must be surrounded by parentheses when used as +expression statements and when used as sub-expressions in slicing, +conditional, lambda, keyword-argument, and comprehension-if +expressions and in "assert", "with", and "assignment" statements. In +all other places where they can be used, parentheses are not required, +including in "if" and "while" statements. + +Added in version 3.8: See **PEP 572** for more details about +assignment expressions. +''', + 'async': r'''Coroutines +********** + +Added in version 3.5. + + +Coroutine function definition +============================= + + async_funcdef: [decorators] "async" "def" funcname "(" [parameter_list] ")" + ["->" expression] ":" suite + +Execution of Python coroutines can be suspended and resumed at many +points (see *coroutine*). "await" expressions, "async for" and "async +with" can only be used in the body of a coroutine function. + +Functions defined with "async def" syntax are always coroutine +functions, even if they do not contain "await" or "async" keywords. + +It is a "SyntaxError" to use a "yield from" expression inside the body +of a coroutine function. + +An example of a coroutine function: + + async def func(param1, param2): + do_stuff() + await some_coroutine() + +Changed in version 3.7: "await" and "async" are now keywords; +previously they were only treated as such inside the body of a +coroutine function. + + +The "async for" statement +========================= + + async_for_stmt: "async" for_stmt + +An *asynchronous iterable* provides an "__aiter__" method that +directly returns an *asynchronous iterator*, which can call +asynchronous code in its "__anext__" method. + +The "async for" statement allows convenient iteration over +asynchronous iterables. + +The following code: + + async for TARGET in ITER: + SUITE + else: + SUITE2 + +Is semantically equivalent to: + + iter = (ITER).__aiter__() + running = True + + while running: + try: + TARGET = await iter.__anext__() + except StopAsyncIteration: + running = False + else: + SUITE + else: + SUITE2 + +except that implicit special method lookup is used for "__aiter__()" +and "__anext__()". + +It is a "SyntaxError" to use an "async for" statement outside the body +of a coroutine function. + + +The "async with" statement +========================== + + async_with_stmt: "async" with_stmt + +An *asynchronous context manager* is a *context manager* that is able +to suspend execution in its *enter* and *exit* methods. + +The following code: + + async with EXPRESSION as TARGET: + SUITE + +is semantically equivalent to: + + manager = (EXPRESSION) + aenter = manager.__aenter__ + aexit = manager.__aexit__ + value = await aenter() + hit_except = False + + try: + TARGET = value + SUITE + except: + hit_except = True + if not await aexit(*sys.exc_info()): + raise + finally: + if not hit_except: + await aexit(None, None, None) + +except that implicit special method lookup is used for "__aenter__()" +and "__aexit__()". + +It is a "SyntaxError" to use an "async with" statement outside the +body of a coroutine function. + +See also: + + **PEP 492** - Coroutines with async and await syntax + The proposal that made coroutines a proper standalone concept in + Python, and added supporting syntax. +''', + 'atom-identifiers': r'''Identifiers (Names) +******************* + +An identifier occurring as an atom is a name. See section Names +(identifiers and keywords) for lexical definition and section Naming +and binding for documentation of naming and binding. + +When the name is bound to an object, evaluation of the atom yields +that object. When a name is not bound, an attempt to evaluate it +raises a "NameError" exception. + + +Private name mangling +===================== + +When an identifier that textually occurs in a class definition begins +with two or more underscore characters and does not end in two or more +underscores, it is considered a *private name* of that class. + +See also: The class specifications. + +More precisely, private names are transformed to a longer form before +code is generated for them. If the transformed name is longer than +255 characters, implementation-defined truncation may happen. + +The transformation is independent of the syntactical context in which +the identifier is used but only the following private identifiers are +mangled: + +* Any name used as the name of a variable that is assigned or read or + any name of an attribute being accessed. + + The "__name__" attribute of nested functions, classes, and type + aliases is however not mangled. + +* The name of imported modules, e.g., "__spam" in "import __spam". If + the module is part of a package (i.e., its name contains a dot), the + name is *not* mangled, e.g., the "__foo" in "import __foo.bar" is + not mangled. + +* The name of an imported member, e.g., "__f" in "from spam import + __f". + +The transformation rule is defined as follows: + +* The class name, with leading underscores removed and a single + leading underscore inserted, is inserted in front of the identifier, + e.g., the identifier "__spam" occurring in a class named "Foo", + "_Foo" or "__Foo" is transformed to "_Foo__spam". + +* If the class name consists only of underscores, the transformation + is the identity, e.g., the identifier "__spam" occurring in a class + named "_" or "__" is left as is. +''', + 'atom-literals': r'''Literals +******** + +A *literal* is a textual representation of a value. Python supports +numeric, string and bytes literals. Format strings and template +strings are treated as string literals. + +Numeric literals consist of a single "NUMBER" token, which names an +integer, floating-point number, or an imaginary number. See the +Numeric literals section in Lexical analysis documentation for +details. + +String and bytes literals may consist of several tokens. See section +String literal concatenation for details. + +Note that negative and complex numbers, like "-3" or "3+4.2j", are +syntactically not literals, but unary or binary arithmetic operations +involving the "-" or "+" operator. + +Evaluation of a literal yields an object of the given type ("int", +"float", "complex", "str", "bytes", or "Template") with the given +value. The value may be approximated in the case of floating-point and +imaginary literals. + +The formal grammar for literals is: + + literal: strings | NUMBER + + +Literals and object identity +============================ + +All literals correspond to immutable data types, and hence the +object’s identity is less important than its value. Multiple +evaluations of literals with the same value (either the same +occurrence in the program text or a different occurrence) may obtain +the same object or a different object with the same value. + +CPython implementation detail: For example, in CPython, *small* +integers with the same value evaluate to the same object: + + >>> x = 7 + >>> y = 7 + >>> x is y + True + +However, large integers evaluate to different objects: + + >>> x = 123456789 + >>> y = 123456789 + >>> x is y + False + +This behavior may change in future versions of CPython. In particular, +the boundary between “small” and “large” integers has already changed +in the past.CPython will emit a "SyntaxWarning" when you compare +literals using "is": + + >>> x = 7 + >>> x is 7 + :1: SyntaxWarning: "is" with 'int' literal. Did you mean "=="? + True + +See When can I rely on identity tests with the is operator? for more +information. + +Template strings are immutable but may reference mutable objects as +"Interpolation" values. For the purposes of this section, two +t-strings have the “same value” if both their structure and the +*identity* of the values match. + +**CPython implementation detail:** Currently, each evaluation of a +template string results in a different object. + + +String literal concatenation +============================ + +Multiple adjacent string or bytes literals, possibly using different +quoting conventions, are allowed, and their meaning is the same as +their concatenation: + + >>> "hello" 'world' + "helloworld" + +This feature is defined at the syntactical level, so it only works +with literals. To concatenate string expressions at run time, the ‘+’ +operator may be used: + + >>> greeting = "Hello" + >>> space = " " + >>> name = "Blaise" + >>> print(greeting + space + name) # not: print(greeting space name) + Hello Blaise + +Literal concatenation can freely mix raw strings, triple-quoted +strings, and formatted string literals. For example: + + >>> "Hello" r', ' f"{name}!" + "Hello, Blaise!" + +This feature can be used to reduce the number of backslashes needed, +to split long strings conveniently across long lines, or even to add +comments to parts of strings. For example: + + re.compile("[A-Za-z_]" # letter or underscore + "[A-Za-z0-9_]*" # letter, digit or underscore + ) + +However, bytes literals may only be combined with other byte literals; +not with string literals of any kind. Also, template string literals +may only be combined with other template string literals: + + >>> t"Hello" t"{name}!" + Template(strings=('Hello', '!'), interpolations=(...)) + +Formally: + + strings: (STRING | fstring)+ | tstring+ +''', + 'attribute-access': r'''Customizing attribute access +**************************** + +The following methods can be defined to customize the meaning of +attribute access (use of, assignment to, or deletion of "x.name") for +class instances. + +object.__getattr__(self, name) + + Called when the default attribute access fails with an + "AttributeError" (either "__getattribute__()" raises an + "AttributeError" because *name* is not an instance attribute or an + attribute in the class tree for "self"; or "__get__()" of a *name* + property raises "AttributeError"). This method should either + return the (computed) attribute value or raise an "AttributeError" + exception. The "object" class itself does not provide this method. + + Note that if the attribute is found through the normal mechanism, + "__getattr__()" is not called. (This is an intentional asymmetry + between "__getattr__()" and "__setattr__()".) This is done both for + efficiency reasons and because otherwise "__getattr__()" would have + no way to access other attributes of the instance. Note that at + least for instance variables, you can take total control by not + inserting any values in the instance attribute dictionary (but + instead inserting them in another object). See the + "__getattribute__()" method below for a way to actually get total + control over attribute access. + +object.__getattribute__(self, name) + + Called unconditionally to implement attribute accesses for + instances of the class. If the class also defines "__getattr__()", + the latter will not be called unless "__getattribute__()" either + calls it explicitly or raises an "AttributeError". This method + should return the (computed) attribute value or raise an + "AttributeError" exception. In order to avoid infinite recursion in + this method, its implementation should always call the base class + method with the same name to access any attributes it needs, for + example, "object.__getattribute__(self, name)". + + Note: + + This method may still be bypassed when looking up special methods + as the result of implicit invocation via language syntax or + built-in functions. See Special method lookup. + + For certain sensitive attribute accesses, raises an auditing event + "object.__getattr__" with arguments "obj" and "name". + +object.__setattr__(self, name, value) + + Called when an attribute assignment is attempted. This is called + instead of the normal mechanism (i.e. store the value in the + instance dictionary). *name* is the attribute name, *value* is the + value to be assigned to it. + + If "__setattr__()" wants to assign to an instance attribute, it + should call the base class method with the same name, for example, + "object.__setattr__(self, name, value)". + + For certain sensitive attribute assignments, raises an auditing + event "object.__setattr__" with arguments "obj", "name", "value". + +object.__delattr__(self, name) + + Like "__setattr__()" but for attribute deletion instead of + assignment. This should only be implemented if "del obj.name" is + meaningful for the object. + + For certain sensitive attribute deletions, raises an auditing event + "object.__delattr__" with arguments "obj" and "name". + +object.__dir__(self) + + Called when "dir()" is called on the object. An iterable must be + returned. "dir()" converts the returned iterable to a list and + sorts it. + + +Customizing module attribute access +=================================== + +module.__getattr__() +module.__dir__() + +Special names "__getattr__" and "__dir__" can be also used to +customize access to module attributes. The "__getattr__" function at +the module level should accept one argument which is the name of an +attribute and return the computed value or raise an "AttributeError". +If an attribute is not found on a module object through the normal +lookup, i.e. "object.__getattribute__()", then "__getattr__" is +searched in the module "__dict__" before raising an "AttributeError". +If found, it is called with the attribute name and the result is +returned. + +The "__dir__" function should accept no arguments, and return an +iterable of strings that represents the names accessible on module. If +present, this function overrides the standard "dir()" search on a +module. + +module.__class__ + +For a more fine grained customization of the module behavior (setting +attributes, properties, etc.), one can set the "__class__" attribute +of a module object to a subclass of "types.ModuleType". For example: + + import sys + from types import ModuleType + + class VerboseModule(ModuleType): + def __repr__(self): + return f'Verbose {self.__name__}' + + def __setattr__(self, attr, value): + print(f'Setting {attr}...') + super().__setattr__(attr, value) + + sys.modules[__name__].__class__ = VerboseModule + +Note: + + Defining module "__getattr__" and setting module "__class__" only + affect lookups made using the attribute access syntax – directly + accessing the module globals (whether by code within the module, or + via a reference to the module’s globals dictionary) is unaffected. + +Changed in version 3.5: "__class__" module attribute is now writable. + +Added in version 3.7: "__getattr__" and "__dir__" module attributes. + +See also: + + **PEP 562** - Module __getattr__ and __dir__ + Describes the "__getattr__" and "__dir__" functions on modules. + + +Implementing Descriptors +======================== + +The following methods only apply when an instance of the class +containing the method (a so-called *descriptor* class) appears in an +*owner* class (the descriptor must be in either the owner’s class +dictionary or in the class dictionary for one of its parents). In the +examples below, “the attribute” refers to the attribute whose name is +the key of the property in the owner class’ "__dict__". The "object" +class itself does not implement any of these protocols. + +object.__get__(self, instance, owner=None) + + Called to get the attribute of the owner class (class attribute + access) or of an instance of that class (instance attribute + access). The optional *owner* argument is the owner class, while + *instance* is the instance that the attribute was accessed through, + or "None" when the attribute is accessed through the *owner*. + + This method should return the computed attribute value or raise an + "AttributeError" exception. + + **PEP 252** specifies that "__get__()" is callable with one or two + arguments. Python’s own built-in descriptors support this + specification; however, it is likely that some third-party tools + have descriptors that require both arguments. Python’s own + "__getattribute__()" implementation always passes in both arguments + whether they are required or not. + +object.__set__(self, instance, value) + + Called to set the attribute on an instance *instance* of the owner + class to a new value, *value*. + + Note, adding "__set__()" or "__delete__()" changes the kind of + descriptor to a “data descriptor”. See Invoking Descriptors for + more details. + +object.__delete__(self, instance) + + Called to delete the attribute on an instance *instance* of the + owner class. + +Instances of descriptors may also have the "__objclass__" attribute +present: + +object.__objclass__ + + The attribute "__objclass__" is interpreted by the "inspect" module + as specifying the class where this object was defined (setting this + appropriately can assist in runtime introspection of dynamic class + attributes). For callables, it may indicate that an instance of the + given type (or a subclass) is expected or required as the first + positional argument (for example, CPython sets this attribute for + unbound methods that are implemented in C). + + +Invoking Descriptors +==================== + +In general, a descriptor is an object attribute with “binding +behavior”, one whose attribute access has been overridden by methods +in the descriptor protocol: "__get__()", "__set__()", and +"__delete__()". If any of those methods are defined for an object, it +is said to be a descriptor. + +The default behavior for attribute access is to get, set, or delete +the attribute from an object’s dictionary. For instance, "a.x" has a +lookup chain starting with "a.__dict__['x']", then +"type(a).__dict__['x']", and continuing through the base classes of +"type(a)" excluding metaclasses. + +However, if the looked-up value is an object defining one of the +descriptor methods, then Python may override the default behavior and +invoke the descriptor method instead. Where this occurs in the +precedence chain depends on which descriptor methods were defined and +how they were called. + +The starting point for descriptor invocation is a binding, "a.x". How +the arguments are assembled depends on "a": + +Direct Call + The simplest and least common call is when user code directly + invokes a descriptor method: "x.__get__(a)". + +Instance Binding + If binding to an object instance, "a.x" is transformed into the + call: "type(a).__dict__['x'].__get__(a, type(a))". + +Class Binding + If binding to a class, "A.x" is transformed into the call: + "A.__dict__['x'].__get__(None, A)". + +Super Binding + A dotted lookup such as "super(A, a).x" searches + "a.__class__.__mro__" for a base class "B" following "A" and then + returns "B.__dict__['x'].__get__(a, A)". If not a descriptor, "x" + is returned unchanged. + +For instance bindings, the precedence of descriptor invocation depends +on which descriptor methods are defined. A descriptor can define any +combination of "__get__()", "__set__()" and "__delete__()". If it +does not define "__get__()", then accessing the attribute will return +the descriptor object itself unless there is a value in the object’s +instance dictionary. If the descriptor defines "__set__()" and/or +"__delete__()", it is a data descriptor; if it defines neither, it is +a non-data descriptor. Normally, data descriptors define both +"__get__()" and "__set__()", while non-data descriptors have just the +"__get__()" method. Data descriptors with "__get__()" and "__set__()" +(and/or "__delete__()") defined always override a redefinition in an +instance dictionary. In contrast, non-data descriptors can be +overridden by instances. + +Python methods (including those decorated with "@staticmethod" and +"@classmethod") are implemented as non-data descriptors. Accordingly, +instances can redefine and override methods. This allows individual +instances to acquire behaviors that differ from other instances of the +same class. + +The "property()" function is implemented as a data descriptor. +Accordingly, instances cannot override the behavior of a property. + + +__slots__ +========= + +*__slots__* allow us to explicitly declare data members (like +properties) and deny the creation of "__dict__" and *__weakref__* +(unless explicitly declared in *__slots__* or available in a parent.) + +The space saved over using "__dict__" can be significant. Attribute +lookup speed can be significantly improved as well. + +object.__slots__ + + This class variable can be assigned a string, iterable, or sequence + of strings with variable names used by instances. *__slots__* + reserves space for the declared variables and prevents the + automatic creation of "__dict__" and *__weakref__* for each + instance. + +Notes on using *__slots__*: + +* When inheriting from a class without *__slots__*, the "__dict__" and + *__weakref__* attribute of the instances will always be accessible. + +* Without a "__dict__" variable, instances cannot be assigned new + variables not listed in the *__slots__* definition. Attempts to + assign to an unlisted variable name raises "AttributeError". If + dynamic assignment of new variables is desired, then add + "'__dict__'" to the sequence of strings in the *__slots__* + declaration. + +* Without a *__weakref__* variable for each instance, classes defining + *__slots__* do not support "weak references" to its instances. If + weak reference support is needed, then add "'__weakref__'" to the + sequence of strings in the *__slots__* declaration. + +* *__slots__* are implemented at the class level by creating + descriptors for each variable name. As a result, class attributes + cannot be used to set default values for instance variables defined + by *__slots__*; otherwise, the class attribute would overwrite the + descriptor assignment. + +* The action of a *__slots__* declaration is not limited to the class + where it is defined. *__slots__* declared in parents are available + in child classes. However, instances of a child subclass will get a + "__dict__" and *__weakref__* unless the subclass also defines + *__slots__* (which should only contain names of any *additional* + slots). + +* If a class defines a slot also defined in a base class, the instance + variable defined by the base class slot is inaccessible (except by + retrieving its descriptor directly from the base class). This + renders the meaning of the program undefined. In the future, a + check may be added to prevent this. + +* "TypeError" will be raised if nonempty *__slots__* are defined for a + class derived from a ""variable-length" built-in type" such as + "int", "bytes", and "tuple". + +* Any non-string *iterable* may be assigned to *__slots__*. + +* If a "dictionary" is used to assign *__slots__*, the dictionary keys + will be used as the slot names. The values of the dictionary can be + used to provide per-attribute docstrings that will be recognised by + "inspect.getdoc()" and displayed in the output of "help()". + +* "__class__" assignment works only if both classes have the same + *__slots__*. + +* Multiple inheritance with multiple slotted parent classes can be + used, but only one parent is allowed to have attributes created by + slots (the other bases must have empty slot layouts) - violations + raise "TypeError". + +* If an *iterator* is used for *__slots__* then a *descriptor* is + created for each of the iterator’s values. However, the *__slots__* + attribute will be an empty iterator. +''', + 'attribute-references': r'''Attribute references +******************** + +An attribute reference is a primary followed by a period and a name: + + attributeref: primary "." identifier + +The primary must evaluate to an object of a type that supports +attribute references, which most objects do. This object is then +asked to produce the attribute whose name is the identifier. The type +and value produced is determined by the object. Multiple evaluations +of the same attribute reference may yield different objects. + +This production can be customized by overriding the +"__getattribute__()" method or the "__getattr__()" method. The +"__getattribute__()" method is called first and either returns a value +or raises "AttributeError" if the attribute is not available. + +If an "AttributeError" is raised and the object has a "__getattr__()" +method, that method is called as a fallback. +''', + 'augassign': r'''Augmented assignment statements +******************************* + +Augmented assignment is the combination, in a single statement, of a +binary operation and an assignment statement: + + augmented_assignment_stmt: augtarget augop (expression_list | yield_expression) + augtarget: identifier | attributeref | subscription + augop: "+=" | "-=" | "*=" | "@=" | "/=" | "//=" | "%=" | "**=" + | ">>=" | "<<=" | "&=" | "^=" | "|=" + +(See section Primaries for the syntax definitions of the last three +symbols.) + +An augmented assignment evaluates the target (which, unlike normal +assignment statements, cannot be an unpacking) and the expression +list, performs the binary operation specific to the type of assignment +on the two operands, and assigns the result to the original target. +The target is only evaluated once. + +An augmented assignment statement like "x += 1" can be rewritten as "x += x + 1" to achieve a similar, but not exactly equal effect. In the +augmented version, "x" is only evaluated once. Also, when possible, +the actual operation is performed *in-place*, meaning that rather than +creating a new object and assigning that to the target, the old object +is modified instead. + +Unlike normal assignments, augmented assignments evaluate the left- +hand side *before* evaluating the right-hand side. For example, "a[i] ++= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs +the addition, and lastly, it writes the result back to "a[i]". + +With the exception of assigning to tuples and multiple targets in a +single statement, the assignment done by augmented assignment +statements is handled the same way as normal assignments. Similarly, +with the exception of the possible *in-place* behavior, the binary +operation performed by augmented assignment is the same as the normal +binary operations. + +For targets which are attribute references, the same caveat about +class and instance attributes applies as for regular assignments. +''', + 'await': r'''Await expression +**************** + +Suspend the execution of *coroutine* on an *awaitable* object. Can +only be used inside a *coroutine function*. + + await_expr: "await" primary + +Added in version 3.5. +''', + 'binary': r'''Binary arithmetic operations +**************************** + +The binary arithmetic operations have the conventional priority +levels. Note that some of these operations also apply to certain non- +numeric types. Apart from the power operator, there are only two +levels, one for multiplicative operators and one for additive +operators: + + m_expr: u_expr | m_expr "*" u_expr | m_expr "@" m_expr | + m_expr "//" u_expr | m_expr "/" u_expr | + m_expr "%" u_expr + a_expr: m_expr | a_expr "+" m_expr | a_expr "-" m_expr + +The "*" (multiplication) operator yields the product of its arguments. +The arguments must either both be numbers, or one argument must be an +integer and the other must be a sequence. In the former case, the +numbers are converted to a common real type and then multiplied +together. In the latter case, sequence repetition is performed; a +negative repetition factor yields an empty sequence. + +This operation can be customized using the special "__mul__()" and +"__rmul__()" methods. + +Changed in version 3.14: If only one operand is a complex number, the +other operand is converted to a floating-point number. + +The "@" (at) operator is intended to be used for matrix +multiplication. No builtin Python types implement this operator. + +This operation can be customized using the special "__matmul__()" and +"__rmatmul__()" methods. + +Added in version 3.5. + +The "/" (division) and "//" (floor division) operators yield the +quotient of their arguments. The numeric arguments are first +converted to a common type. Division of integers yields a float, while +floor division of integers results in an integer; the result is that +of mathematical division with the ‘floor’ function applied to the +result. Division by zero raises the "ZeroDivisionError" exception. + +The division operation can be customized using the special +"__truediv__()" and "__rtruediv__()" methods. The floor division +operation can be customized using the special "__floordiv__()" and +"__rfloordiv__()" methods. + +The "%" (modulo) operator yields the remainder from the division of +the first argument by the second. The numeric arguments are first +converted to a common type. A zero right argument raises the +"ZeroDivisionError" exception. The arguments may be floating-point +numbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 + +0.34".) The modulo operator always yields a result with the same sign +as its second operand (or zero); the absolute value of the result is +strictly smaller than the absolute value of the second operand [1]. + +The floor division and modulo operators are connected by the following +identity: "x == (x//y)*y + (x%y)". Floor division and modulo are also +connected with the built-in function "divmod()": "divmod(x, y) == +(x//y, x%y)". [2]. + +In addition to performing the modulo operation on numbers, the "%" +operator is also overloaded by string objects to perform old-style +string formatting (also known as interpolation). The syntax for +string formatting is described in the Python Library Reference, +section printf-style String Formatting. + +The *modulo* operation can be customized using the special "__mod__()" +and "__rmod__()" methods. + +The floor division operator, the modulo operator, and the "divmod()" +function are not defined for complex numbers. Instead, convert to a +floating-point number using the "abs()" function if appropriate. + +The "+" (addition) operator yields the sum of its arguments. The +arguments must either both be numbers or both be sequences of the same +type. In the former case, the numbers are converted to a common real +type and then added together. In the latter case, the sequences are +concatenated. + +This operation can be customized using the special "__add__()" and +"__radd__()" methods. + +Changed in version 3.14: If only one operand is a complex number, the +other operand is converted to a floating-point number. + +The "-" (subtraction) operator yields the difference of its arguments. +The numeric arguments are first converted to a common real type. + +This operation can be customized using the special "__sub__()" and +"__rsub__()" methods. + +Changed in version 3.14: If only one operand is a complex number, the +other operand is converted to a floating-point number. +''', + 'bitwise': r'''Binary bitwise operations +************************* + +Each of the three bitwise operations has a different priority level: + + and_expr: shift_expr | and_expr "&" shift_expr + xor_expr: and_expr | xor_expr "^" and_expr + or_expr: xor_expr | or_expr "|" xor_expr + +The "&" operator yields the bitwise AND of its arguments, which must +be integers or one of them must be a custom object overriding +"__and__()" or "__rand__()" special methods. + +The "^" operator yields the bitwise XOR (exclusive OR) of its +arguments, which must be integers or one of them must be a custom +object overriding "__xor__()" or "__rxor__()" special methods. + +The "|" operator yields the bitwise (inclusive) OR of its arguments, +which must be integers or one of them must be a custom object +overriding "__or__()" or "__ror__()" special methods. +''', + 'bltin-code-objects': r'''Code Objects +************ + +Code objects are used by the implementation to represent “pseudo- +compiled” executable Python code such as a function body. They differ +from function objects because they don’t contain a reference to their +global execution environment. Code objects are returned by the built- +in "compile()" function and can be extracted from function objects +through their "__code__" attribute. See also the "code" module. + +Accessing "__code__" raises an auditing event "object.__getattr__" +with arguments "obj" and ""__code__"". + +A code object can be executed or evaluated by passing it (instead of a +source string) to the "exec()" or "eval()" built-in functions. + +See The standard type hierarchy for more information. +''', + 'bltin-ellipsis-object': r'''The Ellipsis Object +******************* + +This object is commonly used to indicate that something is omitted. It +supports no special operations. There is exactly one ellipsis object, +named "Ellipsis" (a built-in name). "type(Ellipsis)()" produces the +"Ellipsis" singleton. + +It is written as "Ellipsis" or "...". + +In typical use, "..." as the "Ellipsis" object appears in a few +different places, for instance: + +* In type annotations, such as callable arguments or tuple elements. + +* As the body of a function instead of a pass statement. + +* In third-party libraries, such as Numpy’s slicing and striding. + +Python also uses three dots in ways that are not "Ellipsis" objects, +for instance: + +* Doctest’s "ELLIPSIS", as a pattern for missing content. + +* The default Python prompt of the *interactive* shell when partial + input is incomplete. + +Lastly, the Python documentation often uses three dots in conventional +English usage to mean omitted content, even in code examples that also +use them as the "Ellipsis". +''', + 'bltin-null-object': r'''The Null Object +*************** + +This object is returned by functions that don’t explicitly return a +value. It supports no special operations. There is exactly one null +object, named "None" (a built-in name). "type(None)()" produces the +same singleton. + +It is written as "None". +''', + 'bltin-type-objects': r'''Type Objects +************ + +Type objects represent the various object types. An object’s type is +accessed by the built-in function "type()". There are no special +operations on types. The standard module "types" defines names for +all standard built-in types. + +Types are written like this: "". +''', + 'booleans': r'''Boolean operations +****************** + + or_test: and_test | or_test "or" and_test + and_test: not_test | and_test "and" not_test + not_test: comparison | "not" not_test + +In the context of Boolean operations, and also when expressions are +used by control flow statements, the following values are interpreted +as false: "False", "None", numeric zero of all types, and empty +strings and containers (including strings, tuples, lists, +dictionaries, sets and frozensets). All other values are interpreted +as true. User-defined objects can customize their truth value by +providing a "__bool__()" method. + +The operator "not" yields "True" if its argument is false, "False" +otherwise. + +The expression "x and y" first evaluates *x*; if *x* is false, its +value is returned; otherwise, *y* is evaluated and the resulting value +is returned. + +The expression "x or y" first evaluates *x*; if *x* is true, its value +is returned; otherwise, *y* is evaluated and the resulting value is +returned. + +Note that neither "and" nor "or" restrict the value and type they +return to "False" and "True", but rather return the last evaluated +argument. This is sometimes useful, e.g., if "s" is a string that +should be replaced by a default value if it is empty, the expression +"s or 'foo'" yields the desired value. Because "not" has to create a +new value, it returns a boolean value regardless of the type of its +argument (for example, "not 'foo'" produces "False" rather than "''".) +''', + 'break': r'''The "break" statement +********************* + + break_stmt: "break" + +"break" may only occur syntactically nested in a "for" or "while" +loop, but not nested in a function or class definition within that +loop. + +It terminates the nearest enclosing loop, skipping the optional "else" +clause if the loop has one. + +If a "for" loop is terminated by "break", the loop control target +keeps its current value. + +When "break" passes control out of a "try" statement with a "finally" +clause, that "finally" clause is executed before really leaving the +loop. +''', + 'callable-types': r'''Emulating callable objects +************************** + +object.__call__(self[, args...]) + + Called when the instance is “called” as a function; if this method + is defined, "x(arg1, arg2, ...)" roughly translates to + "type(x).__call__(x, arg1, ...)". The "object" class itself does + not provide this method. +''', + 'calls': r'''Calls +***** + +A call calls a callable object (e.g., a *function*) with a possibly +empty series of *arguments*: + + call: primary "(" [argument_list [","] | comprehension] ")" + argument_list: positional_arguments ["," starred_and_keywords] + ["," keywords_arguments] + | starred_and_keywords ["," keywords_arguments] + | keywords_arguments + positional_arguments: positional_item ("," positional_item)* + positional_item: assignment_expression | "*" expression + starred_and_keywords: ("*" expression | keyword_item) + ("," "*" expression | "," keyword_item)* + keywords_arguments: (keyword_item | "**" expression) + ("," keyword_item | "," "**" expression)* + keyword_item: identifier "=" expression + +An optional trailing comma may be present after the positional and +keyword arguments but does not affect the semantics. + +The primary must evaluate to a callable object (user-defined +functions, built-in functions, methods of built-in objects, class +objects, methods of class instances, and all objects having a +"__call__()" method are callable). All argument expressions are +evaluated before the call is attempted. Please refer to section +Function definitions for the syntax of formal *parameter* lists. + +If keyword arguments are present, they are first converted to +positional arguments, as follows. First, a list of unfilled slots is +created for the formal parameters. If there are N positional +arguments, they are placed in the first N slots. Next, for each +keyword argument, the identifier is used to determine the +corresponding slot (if the identifier is the same as the first formal +parameter name, the first slot is used, and so on). If the slot is +already filled, a "TypeError" exception is raised. Otherwise, the +argument is placed in the slot, filling it (even if the expression is +"None", it fills the slot). When all arguments have been processed, +the slots that are still unfilled are filled with the corresponding +default value from the function definition. (Default values are +calculated, once, when the function is defined; thus, a mutable object +such as a list or dictionary used as default value will be shared by +all calls that don’t specify an argument value for the corresponding +slot; this should usually be avoided.) If there are any unfilled +slots for which no default value is specified, a "TypeError" exception +is raised. Otherwise, the list of filled slots is used as the +argument list for the call. + +**CPython implementation detail:** An implementation may provide +built-in functions whose positional parameters do not have names, even +if they are ‘named’ for the purpose of documentation, and which +therefore cannot be supplied by keyword. In CPython, this is the case +for functions implemented in C that use "PyArg_ParseTuple()" to parse +their arguments. + +If there are more positional arguments than there are formal parameter +slots, a "TypeError" exception is raised, unless a formal parameter +using the syntax "*identifier" is present; in this case, that formal +parameter receives a tuple containing the excess positional arguments +(or an empty tuple if there were no excess positional arguments). + +If any keyword argument does not correspond to a formal parameter +name, a "TypeError" exception is raised, unless a formal parameter +using the syntax "**identifier" is present; in this case, that formal +parameter receives a dictionary containing the excess keyword +arguments (using the keywords as keys and the argument values as +corresponding values), or a (new) empty dictionary if there were no +excess keyword arguments. + +If the syntax "*expression" appears in the function call, "expression" +must evaluate to an *iterable*. Elements from these iterables are +treated as if they were additional positional arguments. For the call +"f(x1, x2, *y, x3, x4)", if *y* evaluates to a sequence *y1*, …, *yM*, +this is equivalent to a call with M+4 positional arguments *x1*, *x2*, +*y1*, …, *yM*, *x3*, *x4*. + +A consequence of this is that although the "*expression" syntax may +appear *after* explicit keyword arguments, it is processed *before* +the keyword arguments (and any "**expression" arguments – see below). +So: + + >>> def f(a, b): + ... print(a, b) + ... + >>> f(b=1, *(2,)) + 2 1 + >>> f(a=1, *(2,)) + Traceback (most recent call last): + File "", line 1, in + TypeError: f() got multiple values for keyword argument 'a' + >>> f(1, *(2,)) + 1 2 + +It is unusual for both keyword arguments and the "*expression" syntax +to be used in the same call, so in practice this confusion does not +often arise. + +If the syntax "**expression" appears in the function call, +"expression" must evaluate to a *mapping*, the contents of which are +treated as additional keyword arguments. If a parameter matching a key +has already been given a value (by an explicit keyword argument, or +from another unpacking), a "TypeError" exception is raised. + +When "**expression" is used, each key in this mapping must be a +string. Each value from the mapping is assigned to the first formal +parameter eligible for keyword assignment whose name is equal to the +key. A key need not be a Python identifier (e.g. ""max-temp °F"" is +acceptable, although it will not match any formal parameter that could +be declared). If there is no match to a formal parameter the key-value +pair is collected by the "**" parameter, if there is one, or if there +is not, a "TypeError" exception is raised. + +Formal parameters using the syntax "*identifier" or "**identifier" +cannot be used as positional argument slots or as keyword argument +names. + +Changed in version 3.5: Function calls accept any number of "*" and +"**" unpackings, positional arguments may follow iterable unpackings +("*"), and keyword arguments may follow dictionary unpackings ("**"). +Originally proposed by **PEP 448**. + +A call always returns some value, possibly "None", unless it raises an +exception. How this value is computed depends on the type of the +callable object. + +If it is— + +a user-defined function: + The code block for the function is executed, passing it the + argument list. The first thing the code block will do is bind the + formal parameters to the arguments; this is described in section + Function definitions. When the code block executes a "return" + statement, this specifies the return value of the function call. + If execution reaches the end of the code block without executing a + "return" statement, the return value is "None". + +a built-in function or method: + The result is up to the interpreter; see Built-in Functions for the + descriptions of built-in functions and methods. + +a class object: + A new instance of that class is returned. + +a class instance method: + The corresponding user-defined function is called, with an argument + list that is one longer than the argument list of the call: the + instance becomes the first argument. + +a class instance: + The class must define a "__call__()" method; the effect is then the + same as if that method was called. +''', + 'class': r'''Class definitions +***************** + +A class definition defines a class object (see section The standard +type hierarchy): + + classdef: [decorators] "class" classname [type_params] [inheritance] ":" suite + inheritance: "(" [argument_list] ")" + classname: identifier + +A class definition is an executable statement. The inheritance list +usually gives a list of base classes (see Metaclasses for more +advanced uses), so each item in the list should evaluate to a class +object which allows subclassing. Classes without an inheritance list +inherit, by default, from the base class "object"; hence, + + class Foo: + pass + +is equivalent to + + class Foo(object): + pass + +The class’s suite is then executed in a new execution frame (see +Naming and binding), using a newly created local namespace and the +original global namespace. (Usually, the suite contains mostly +function definitions.) When the class’s suite finishes execution, its +execution frame is discarded but its local namespace is saved. [5] A +class object is then created using the inheritance list for the base +classes and the saved local namespace for the attribute dictionary. +The class name is bound to this class object in the original local +namespace. + +The order in which attributes are defined in the class body is +preserved in the new class’s "__dict__". Note that this is reliable +only right after the class is created and only for classes that were +defined using the definition syntax. + +Class creation can be customized heavily using metaclasses. + +Classes can also be decorated: just like when decorating functions, + + @f1(arg) + @f2 + class Foo: pass + +is roughly equivalent to + + class Foo: pass + Foo = f1(arg)(f2(Foo)) + +The evaluation rules for the decorator expressions are the same as for +function decorators. The result is then bound to the class name. + +Changed in version 3.9: Classes may be decorated with any valid +"assignment_expression". Previously, the grammar was much more +restrictive; see **PEP 614** for details. + +A list of type parameters may be given in square brackets immediately +after the class’s name. This indicates to static type checkers that +the class is generic. At runtime, the type parameters can be retrieved +from the class’s "__type_params__" attribute. See Generic classes for +more. + +Changed in version 3.12: Type parameter lists are new in Python 3.12. + +**Programmer’s note:** Variables defined in the class definition are +class attributes; they are shared by instances. Instance attributes +can be set in a method with "self.name = value". Both class and +instance attributes are accessible through the notation “"self.name"”, +and an instance attribute hides a class attribute with the same name +when accessed in this way. Class attributes can be used as defaults +for instance attributes, but using mutable values there can lead to +unexpected results. Descriptors can be used to create instance +variables with different implementation details. + +See also: + + **PEP 3115** - Metaclasses in Python 3000 + The proposal that changed the declaration of metaclasses to the + current syntax, and the semantics for how classes with + metaclasses are constructed. + + **PEP 3129** - Class Decorators + The proposal that added class decorators. Function and method + decorators were introduced in **PEP 318**. +''', + 'comparisons': r'''Comparisons +*********** + +Unlike C, all comparison operations in Python have the same priority, +which is lower than that of any arithmetic, shifting or bitwise +operation. Also unlike C, expressions like "a < b < c" have the +interpretation that is conventional in mathematics: + + comparison: or_expr (comp_operator or_expr)* + comp_operator: "<" | ">" | "==" | ">=" | "<=" | "!=" + | "is" ["not"] | ["not"] "in" + +Comparisons yield boolean values: "True" or "False". Custom *rich +comparison methods* may return non-boolean values. In this case Python +will call "bool()" on such value in boolean contexts. + +Comparisons can be chained arbitrarily, e.g., "x < y <= z" is +equivalent to "x < y and y <= z", except that "y" is evaluated only +once (but in both cases "z" is not evaluated at all when "x < y" is +found to be false). + +Formally, if *a*, *b*, *c*, …, *y*, *z* are expressions and *op1*, +*op2*, …, *opN* are comparison operators, then "a op1 b op2 c ... y +opN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except +that each expression is evaluated at most once. + +Note that "a op1 b op2 c" doesn’t imply any kind of comparison between +*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though +perhaps not pretty). + + +Value comparisons +================= + +The operators "<", ">", "==", ">=", "<=", and "!=" compare the values +of two objects. The objects do not need to have the same type. + +Chapter Objects, values and types states that objects have a value (in +addition to type and identity). The value of an object is a rather +abstract notion in Python: For example, there is no canonical access +method for an object’s value. Also, there is no requirement that the +value of an object should be constructed in a particular way, e.g. +comprised of all its data attributes. Comparison operators implement a +particular notion of what the value of an object is. One can think of +them as defining the value of an object indirectly, by means of their +comparison implementation. + +Because all types are (direct or indirect) subtypes of "object", they +inherit the default comparison behavior from "object". Types can +customize their comparison behavior by implementing *rich comparison +methods* like "__lt__()", described in Basic customization. + +The default behavior for equality comparison ("==" and "!=") is based +on the identity of the objects. Hence, equality comparison of +instances with the same identity results in equality, and equality +comparison of instances with different identities results in +inequality. A motivation for this default behavior is the desire that +all objects should be reflexive (i.e. "x is y" implies "x == y"). + +A default order comparison ("<", ">", "<=", and ">=") is not provided; +an attempt raises "TypeError". A motivation for this default behavior +is the lack of a similar invariant as for equality. + +The behavior of the default equality comparison, that instances with +different identities are always unequal, may be in contrast to what +types will need that have a sensible definition of object value and +value-based equality. Such types will need to customize their +comparison behavior, and in fact, a number of built-in types have done +that. + +The following list describes the comparison behavior of the most +important built-in types. + +* Numbers of built-in numeric types (Numeric Types — int, float, + complex) and of the standard library types "fractions.Fraction" and + "decimal.Decimal" can be compared within and across their types, + with the restriction that complex numbers do not support order + comparison. Within the limits of the types involved, they compare + mathematically (algorithmically) correct without loss of precision. + + The not-a-number values "float('NaN')" and "decimal.Decimal('NaN')" + are special. Any ordered comparison of a number to a not-a-number + value is false. A counter-intuitive implication is that not-a-number + values are not equal to themselves. For example, if "x = + float('NaN')", "3 < x", "x < 3" and "x == x" are all false, while "x + != x" is true. This behavior is compliant with IEEE 754. + +* "None" and "NotImplemented" are singletons. **PEP 8** advises that + comparisons for singletons should always be done with "is" or "is + not", never the equality operators. + +* Binary sequences (instances of "bytes" or "bytearray") can be + compared within and across their types. They compare + lexicographically using the numeric values of their elements. + +* Strings (instances of "str") compare lexicographically using the + numerical Unicode code points (the result of the built-in function + "ord()") of their characters. [3] + + Strings and binary sequences cannot be directly compared. + +* Sequences (instances of "tuple", "list", or "range") can be compared + only within each of their types, with the restriction that ranges do + not support order comparison. Equality comparison across these + types results in inequality, and ordering comparison across these + types raises "TypeError". + + Sequences compare lexicographically using comparison of + corresponding elements. The built-in containers typically assume + identical objects are equal to themselves. That lets them bypass + equality tests for identical objects to improve performance and to + maintain their internal invariants. + + Lexicographical comparison between built-in collections works as + follows: + + * For two collections to compare equal, they must be of the same + type, have the same length, and each pair of corresponding + elements must compare equal (for example, "[1,2] == (1,2)" is + false because the type is not the same). + + * Collections that support order comparison are ordered the same as + their first unequal elements (for example, "[1,2,x] <= [1,2,y]" + has the same value as "x <= y"). If a corresponding element does + not exist, the shorter collection is ordered first (for example, + "[1,2] < [1,2,3]" is true). + +* Mappings (instances of "dict") compare equal if and only if they + have equal "(key, value)" pairs. Equality comparison of the keys and + values enforces reflexivity. + + Order comparisons ("<", ">", "<=", and ">=") raise "TypeError". + +* Sets (instances of "set" or "frozenset") can be compared within and + across their types. + + They define order comparison operators to mean subset and superset + tests. Those relations do not define total orderings (for example, + the two sets "{1,2}" and "{2,3}" are not equal, nor subsets of one + another, nor supersets of one another). Accordingly, sets are not + appropriate arguments for functions which depend on total ordering + (for example, "min()", "max()", and "sorted()" produce undefined + results given a list of sets as inputs). + + Comparison of sets enforces reflexivity of its elements. + +* Most other built-in types have no comparison methods implemented, so + they inherit the default comparison behavior. + +User-defined classes that customize their comparison behavior should +follow some consistency rules, if possible: + +* Equality comparison should be reflexive. In other words, identical + objects should compare equal: + + "x is y" implies "x == y" + +* Comparison should be symmetric. In other words, the following + expressions should have the same result: + + "x == y" and "y == x" + + "x != y" and "y != x" + + "x < y" and "y > x" + + "x <= y" and "y >= x" + +* Comparison should be transitive. The following (non-exhaustive) + examples illustrate that: + + "x > y and y > z" implies "x > z" + + "x < y and y <= z" implies "x < z" + +* Inverse comparison should result in the boolean negation. In other + words, the following expressions should have the same result: + + "x == y" and "not x != y" + + "x < y" and "not x >= y" (for total ordering) + + "x > y" and "not x <= y" (for total ordering) + + The last two expressions apply to totally ordered collections (e.g. + to sequences, but not to sets or mappings). See also the + "total_ordering()" decorator. + +* The "hash()" result should be consistent with equality. Objects that + are equal should either have the same hash value, or be marked as + unhashable. + +Python does not enforce these consistency rules. In fact, the +not-a-number values are an example for not following these rules. + + +Membership test operations +========================== + +The operators "in" and "not in" test for membership. "x in s" +evaluates to "True" if *x* is a member of *s*, and "False" otherwise. +"x not in s" returns the negation of "x in s". All built-in sequences +and set types support this as well as dictionary, for which "in" tests +whether the dictionary has a given key. For container types such as +list, tuple, set, frozenset, dict, or collections.deque, the +expression "x in y" is equivalent to "any(x is e or x == e for e in +y)". + +For the string and bytes types, "x in y" is "True" if and only if *x* +is a substring of *y*. An equivalent test is "y.find(x) != -1". +Empty strings are always considered to be a substring of any other +string, so """ in "abc"" will return "True". + +For user-defined classes which define the "__contains__()" method, "x +in y" returns "True" if "y.__contains__(x)" returns a true value, and +"False" otherwise. + +For user-defined classes which do not define "__contains__()" but do +define "__iter__()", "x in y" is "True" if some value "z", for which +the expression "x is z or x == z" is true, is produced while iterating +over "y". If an exception is raised during the iteration, it is as if +"in" raised that exception. + +Lastly, the old-style iteration protocol is tried: if a class defines +"__getitem__()", "x in y" is "True" if and only if there is a non- +negative integer index *i* such that "x is y[i] or x == y[i]", and no +lower integer index raises the "IndexError" exception. (If any other +exception is raised, it is as if "in" raised that exception). + +The operator "not in" is defined to have the inverse truth value of +"in". + + +Identity comparisons +==================== + +The operators "is" and "is not" test for an object’s identity: "x is +y" is true if and only if *x* and *y* are the same object. An +Object’s identity is determined using the "id()" function. "x is not +y" yields the inverse truth value. [4] +''', + 'compound': r'''Compound statements +******************* + +Compound statements contain (groups of) other statements; they affect +or control the execution of those other statements in some way. In +general, compound statements span multiple lines, although in simple +incarnations a whole compound statement may be contained in one line. + +The "if", "while" and "for" statements implement traditional control +flow constructs. "try" specifies exception handlers and/or cleanup +code for a group of statements, while the "with" statement allows the +execution of initialization and finalization code around a block of +code. Function and class definitions are also syntactically compound +statements. + +A compound statement consists of one or more ‘clauses.’ A clause +consists of a header and a ‘suite.’ The clause headers of a +particular compound statement are all at the same indentation level. +Each clause header begins with a uniquely identifying keyword and ends +with a colon. A suite is a group of statements controlled by a +clause. A suite can be one or more semicolon-separated simple +statements on the same line as the header, following the header’s +colon, or it can be one or more indented statements on subsequent +lines. Only the latter form of a suite can contain nested compound +statements; the following is illegal, mostly because it wouldn’t be +clear to which "if" clause a following "else" clause would belong: + + if test1: if test2: print(x) + +Also note that the semicolon binds tighter than the colon in this +context, so that in the following example, either all or none of the +"print()" calls are executed: + + if x < y < z: print(x); print(y); print(z) + +Summarizing: + + compound_stmt: if_stmt + | while_stmt + | for_stmt + | try_stmt + | with_stmt + | match_stmt + | funcdef + | classdef + | async_with_stmt + | async_for_stmt + | async_funcdef + suite: stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT + statement: stmt_list NEWLINE | compound_stmt + stmt_list: simple_stmt (";" simple_stmt)* [";"] + +Note that statements always end in a "NEWLINE" possibly followed by a +"DEDENT". Also note that optional continuation clauses always begin +with a keyword that cannot start a statement, thus there are no +ambiguities (the ‘dangling "else"’ problem is solved in Python by +requiring nested "if" statements to be indented). + +The formatting of the grammar rules in the following sections places +each clause on a separate line for clarity. + + +The "if" statement +================== + +The "if" statement is used for conditional execution: + + if_stmt: "if" assignment_expression ":" suite + ("elif" assignment_expression ":" suite)* + ["else" ":" suite] + +It selects exactly one of the suites by evaluating the expressions one +by one until one is found to be true (see section Boolean operations +for the definition of true and false); then that suite is executed +(and no other part of the "if" statement is executed or evaluated). +If all expressions are false, the suite of the "else" clause, if +present, is executed. + + +The "while" statement +===================== + +The "while" statement is used for repeated execution as long as an +expression is true: + + while_stmt: "while" assignment_expression ":" suite + ["else" ":" suite] + +This repeatedly tests the expression and, if it is true, executes the +first suite; if the expression is false (which may be the first time +it is tested) the suite of the "else" clause, if present, is executed +and the loop terminates. + +A "break" statement executed in the first suite terminates the loop +without executing the "else" clause’s suite. A "continue" statement +executed in the first suite skips the rest of the suite and goes back +to testing the expression. + + +The "for" statement +=================== + +The "for" statement is used to iterate over the elements of a sequence +(such as a string, tuple or list) or other iterable object: + + for_stmt: "for" target_list "in" starred_expression_list ":" suite + ["else" ":" suite] + +The "starred_expression_list" expression is evaluated once; it should +yield an *iterable* object. An *iterator* is created for that +iterable. The first item provided by the iterator is then assigned to +the target list using the standard rules for assignments (see +Assignment statements), and the suite is executed. This repeats for +each item provided by the iterator. When the iterator is exhausted, +the suite in the "else" clause, if present, is executed, and the loop +terminates. + +A "break" statement executed in the first suite terminates the loop +without executing the "else" clause’s suite. A "continue" statement +executed in the first suite skips the rest of the suite and continues +with the next item, or with the "else" clause if there is no next +item. + +The for-loop makes assignments to the variables in the target list. +This overwrites all previous assignments to those variables including +those made in the suite of the for-loop: + + for i in range(10): + print(i) + i = 5 # this will not affect the for-loop + # because i will be overwritten with the next + # index in the range + +Names in the target list are not deleted when the loop is finished, +but if the sequence is empty, they will not have been assigned to at +all by the loop. Hint: the built-in type "range()" represents +immutable arithmetic sequences of integers. For instance, iterating +"range(3)" successively yields 0, 1, and then 2. + +Changed in version 3.11: Starred elements are now allowed in the +expression list. + + +The "try" statement +=================== + +The "try" statement specifies exception handlers and/or cleanup code +for a group of statements: + + try_stmt: try1_stmt | try2_stmt | try3_stmt + try1_stmt: "try" ":" suite + ("except" [expression ["as" identifier]] ":" suite)+ + ["else" ":" suite] + ["finally" ":" suite] + try2_stmt: "try" ":" suite + ("except" "*" expression ["as" identifier] ":" suite)+ + ["else" ":" suite] + ["finally" ":" suite] + try3_stmt: "try" ":" suite + "finally" ":" suite + +Additional information on exceptions can be found in section +Exceptions, and information on using the "raise" statement to generate +exceptions may be found in section The raise statement. + +Changed in version 3.14: Support for optionally dropping grouping +parentheses when using multiple exception types. See **PEP 758**. + + +"except" clause +--------------- + +The "except" clause(s) specify one or more exception handlers. When no +exception occurs in the "try" clause, no exception handler is +executed. When an exception occurs in the "try" suite, a search for an +exception handler is started. This search inspects the "except" +clauses in turn until one is found that matches the exception. An +expression-less "except" clause, if present, must be last; it matches +any exception. + +For an "except" clause with an expression, the expression must +evaluate to an exception type or a tuple of exception types. +Parentheses can be dropped if multiple exception types are provided +and the "as" clause is not used. The raised exception matches an +"except" clause whose expression evaluates to the class or a *non- +virtual base class* of the exception object, or to a tuple that +contains such a class. + +If no "except" clause matches the exception, the search for an +exception handler continues in the surrounding code and on the +invocation stack. [1] + +If the evaluation of an expression in the header of an "except" clause +raises an exception, the original search for a handler is canceled and +a search starts for the new exception in the surrounding code and on +the call stack (it is treated as if the entire "try" statement raised +the exception). + +When a matching "except" clause is found, the exception is assigned to +the target specified after the "as" keyword in that "except" clause, +if present, and the "except" clause’s suite is executed. All "except" +clauses must have an executable block. When the end of this block is +reached, execution continues normally after the entire "try" +statement. (This means that if two nested handlers exist for the same +exception, and the exception occurs in the "try" clause of the inner +handler, the outer handler will not handle the exception.) + +When an exception has been assigned using "as target", it is cleared +at the end of the "except" clause. This is as if + + except E as N: + foo + +was translated to + + except E as N: + try: + foo + finally: + del N + +This means the exception must be assigned to a different name to be +able to refer to it after the "except" clause. Exceptions are cleared +because with the traceback attached to them, they form a reference +cycle with the stack frame, keeping all locals in that frame alive +until the next garbage collection occurs. + +Before an "except" clause’s suite is executed, the exception is stored +in the "sys" module, where it can be accessed from within the body of +the "except" clause by calling "sys.exception()". When leaving an +exception handler, the exception stored in the "sys" module is reset +to its previous value: + + >>> print(sys.exception()) + None + >>> try: + ... raise TypeError + ... except: + ... print(repr(sys.exception())) + ... try: + ... raise ValueError + ... except: + ... print(repr(sys.exception())) + ... print(repr(sys.exception())) + ... + TypeError() + ValueError() + TypeError() + >>> print(sys.exception()) + None + + +"except*" clause +---------------- + +The "except*" clause(s) specify one or more handlers for groups of +exceptions ("BaseExceptionGroup" instances). A "try" statement can +have either "except" or "except*" clauses, but not both. The exception +type for matching is mandatory in the case of "except*", so "except*:" +is a syntax error. The type is interpreted as in the case of "except", +but matching is performed on the exceptions contained in the group +that is being handled. An "TypeError" is raised if a matching type is +a subclass of "BaseExceptionGroup", because that would have ambiguous +semantics. + +When an exception group is raised in the try block, each "except*" +clause splits (see "split()") it into the subgroups of matching and +non-matching exceptions. If the matching subgroup is not empty, it +becomes the handled exception (the value returned from +"sys.exception()") and assigned to the target of the "except*" clause +(if there is one). Then, the body of the "except*" clause executes. If +the non-matching subgroup is not empty, it is processed by the next +"except*" in the same manner. This continues until all exceptions in +the group have been matched, or the last "except*" clause has run. + +After all "except*" clauses execute, the group of unhandled exceptions +is merged with any exceptions that were raised or re-raised from +within "except*" clauses. This merged exception group propagates on.: + + >>> try: + ... raise ExceptionGroup("eg", + ... [ValueError(1), TypeError(2), OSError(3), OSError(4)]) + ... except* TypeError as e: + ... print(f'caught {type(e)} with nested {e.exceptions}') + ... except* OSError as e: + ... print(f'caught {type(e)} with nested {e.exceptions}') + ... + caught with nested (TypeError(2),) + caught with nested (OSError(3), OSError(4)) + + Exception Group Traceback (most recent call last): + | File "", line 2, in + | raise ExceptionGroup("eg", + | [ValueError(1), TypeError(2), OSError(3), OSError(4)]) + | ExceptionGroup: eg (1 sub-exception) + +-+---------------- 1 ---------------- + | ValueError: 1 + +------------------------------------ + +If the exception raised from the "try" block is not an exception group +and its type matches one of the "except*" clauses, it is caught and +wrapped by an exception group with an empty message string. This +ensures that the type of the target "e" is consistently +"BaseExceptionGroup": + + >>> try: + ... raise BlockingIOError + ... except* BlockingIOError as e: + ... print(repr(e)) + ... + ExceptionGroup('', (BlockingIOError(),)) + +"break", "continue" and "return" cannot appear in an "except*" clause. + + +"else" clause +------------- + +The optional "else" clause is executed if the control flow leaves the +"try" suite, no exception was raised, and no "return", "continue", or +"break" statement was executed. Exceptions in the "else" clause are +not handled by the preceding "except" clauses. + + +"finally" clause +---------------- + +If "finally" is present, it specifies a ‘cleanup’ handler. The "try" +clause is executed, including any "except" and "else" clauses. If an +exception occurs in any of the clauses and is not handled, the +exception is temporarily saved. The "finally" clause is executed. If +there is a saved exception it is re-raised at the end of the "finally" +clause. If the "finally" clause raises another exception, the saved +exception is set as the context of the new exception. If the "finally" +clause executes a "return", "break" or "continue" statement, the saved +exception is discarded. For example, this function returns 42. + + def f(): + try: + 1/0 + finally: + return 42 + +The exception information is not available to the program during +execution of the "finally" clause. + +When a "return", "break" or "continue" statement is executed in the +"try" suite of a "try"…"finally" statement, the "finally" clause is +also executed ‘on the way out.’ + +The return value of a function is determined by the last "return" +statement executed. Since the "finally" clause always executes, a +"return" statement executed in the "finally" clause will always be the +last one executed. The following function returns ‘finally’. + + def foo(): + try: + return 'try' + finally: + return 'finally' + +Changed in version 3.8: Prior to Python 3.8, a "continue" statement +was illegal in the "finally" clause due to a problem with the +implementation. + +Changed in version 3.14: The compiler emits a "SyntaxWarning" when a +"return", "break" or "continue" appears in a "finally" block (see +**PEP 765**). + + +The "with" statement +==================== + +The "with" statement is used to wrap the execution of a block with +methods defined by a context manager (see section With Statement +Context Managers). This allows common "try"…"except"…"finally" usage +patterns to be encapsulated for convenient reuse. + + with_stmt: "with" ( "(" with_stmt_contents ","? ")" | with_stmt_contents ) ":" suite + with_stmt_contents: with_item ("," with_item)* + with_item: expression ["as" target] + +The execution of the "with" statement with one “item” proceeds as +follows: + +1. The context expression (the expression given in the "with_item") is + evaluated to obtain a context manager. + +2. The context manager’s "__enter__()" is loaded for later use. + +3. The context manager’s "__exit__()" is loaded for later use. + +4. The context manager’s "__enter__()" method is invoked. + +5. If a target was included in the "with" statement, the return value + from "__enter__()" is assigned to it. + + Note: + + The "with" statement guarantees that if the "__enter__()" method + returns without an error, then "__exit__()" will always be + called. Thus, if an error occurs during the assignment to the + target list, it will be treated the same as an error occurring + within the suite would be. See step 7 below. + +6. The suite is executed. + +7. The context manager’s "__exit__()" method is invoked. If an + exception caused the suite to be exited, its type, value, and + traceback are passed as arguments to "__exit__()". Otherwise, three + "None" arguments are supplied. + + If the suite was exited due to an exception, and the return value + from the "__exit__()" method was false, the exception is reraised. + If the return value was true, the exception is suppressed, and + execution continues with the statement following the "with" + statement. + + If the suite was exited for any reason other than an exception, the + return value from "__exit__()" is ignored, and execution proceeds + at the normal location for the kind of exit that was taken. + +The following code: + + with EXPRESSION as TARGET: + SUITE + +is semantically equivalent to: + + manager = (EXPRESSION) + enter = manager.__enter__ + exit = manager.__exit__ + value = enter() + hit_except = False + + try: + TARGET = value + SUITE + except: + hit_except = True + if not exit(*sys.exc_info()): + raise + finally: + if not hit_except: + exit(None, None, None) + +except that implicit special method lookup is used for "__enter__()" +and "__exit__()". + +With more than one item, the context managers are processed as if +multiple "with" statements were nested: + + with A() as a, B() as b: + SUITE + +is semantically equivalent to: + + with A() as a: + with B() as b: + SUITE + +You can also write multi-item context managers in multiple lines if +the items are surrounded by parentheses. For example: + + with ( + A() as a, + B() as b, + ): + SUITE + +Changed in version 3.1: Support for multiple context expressions. + +Changed in version 3.10: Support for using grouping parentheses to +break the statement in multiple lines. + +See also: + + **PEP 343** - The “with” statement + The specification, background, and examples for the Python "with" + statement. + + +The "match" statement +===================== + +Added in version 3.10. + +The match statement is used for pattern matching. Syntax: + + match_stmt: 'match' subject_expr ":" NEWLINE INDENT case_block+ DEDENT + subject_expr: `!star_named_expression` "," `!star_named_expressions`? + | `!named_expression` + case_block: 'case' patterns [guard] ":" `!block` + +Note: + + This section uses single quotes to denote soft keywords. + +Pattern matching takes a pattern as input (following "case") and a +subject value (following "match"). The pattern (which may contain +subpatterns) is matched against the subject value. The outcomes are: + +* A match success or failure (also termed a pattern success or + failure). + +* Possible binding of matched values to a name. The prerequisites for + this are further discussed below. + +The "match" and "case" keywords are soft keywords. + +See also: + + * **PEP 634** – Structural Pattern Matching: Specification + + * **PEP 636** – Structural Pattern Matching: Tutorial + + +Overview +-------- + +Here’s an overview of the logical flow of a match statement: + +1. The subject expression "subject_expr" is evaluated and a resulting + subject value obtained. If the subject expression contains a comma, + a tuple is constructed using the standard rules. + +2. Each pattern in a "case_block" is attempted to match with the + subject value. The specific rules for success or failure are + described below. The match attempt can also bind some or all of the + standalone names within the pattern. The precise pattern binding + rules vary per pattern type and are specified below. **Name + bindings made during a successful pattern match outlive the + executed block and can be used after the match statement**. + + Note: + + During failed pattern matches, some subpatterns may succeed. Do + not rely on bindings being made for a failed match. Conversely, + do not rely on variables remaining unchanged after a failed + match. The exact behavior is dependent on implementation and may + vary. This is an intentional decision made to allow different + implementations to add optimizations. + +3. If the pattern succeeds, the corresponding guard (if present) is + evaluated. In this case all name bindings are guaranteed to have + happened. + + * If the guard evaluates as true or is missing, the "block" inside + "case_block" is executed. + + * Otherwise, the next "case_block" is attempted as described above. + + * If there are no further case blocks, the match statement is + completed. + +Note: + + Users should generally never rely on a pattern being evaluated. + Depending on implementation, the interpreter may cache values or use + other optimizations which skip repeated evaluations. + +A sample match statement: + + >>> flag = False + >>> match (100, 200): + ... case (100, 300): # Mismatch: 200 != 300 + ... print('Case 1') + ... case (100, 200) if flag: # Successful match, but guard fails + ... print('Case 2') + ... case (100, y): # Matches and binds y to 200 + ... print(f'Case 3, y: {y}') + ... case _: # Pattern not attempted + ... print('Case 4, I match anything!') + ... + Case 3, y: 200 + +In this case, "if flag" is a guard. Read more about that in the next +section. + + +Guards +------ + + guard: "if" `!named_expression` + +A "guard" (which is part of the "case") must succeed for code inside +the "case" block to execute. It takes the form: "if" followed by an +expression. + +The logical flow of a "case" block with a "guard" follows: + +1. Check that the pattern in the "case" block succeeded. If the + pattern failed, the "guard" is not evaluated and the next "case" + block is checked. + +2. If the pattern succeeded, evaluate the "guard". + + * If the "guard" condition evaluates as true, the case block is + selected. + + * If the "guard" condition evaluates as false, the case block is + not selected. + + * If the "guard" raises an exception during evaluation, the + exception bubbles up. + +Guards are allowed to have side effects as they are expressions. +Guard evaluation must proceed from the first to the last case block, +one at a time, skipping case blocks whose pattern(s) don’t all +succeed. (I.e., guard evaluation must happen in order.) Guard +evaluation must stop once a case block is selected. + + +Irrefutable Case Blocks +----------------------- + +An irrefutable case block is a match-all case block. A match +statement may have at most one irrefutable case block, and it must be +last. + +A case block is considered irrefutable if it has no guard and its +pattern is irrefutable. A pattern is considered irrefutable if we can +prove from its syntax alone that it will always succeed. Only the +following patterns are irrefutable: + +* AS Patterns whose left-hand side is irrefutable + +* OR Patterns containing at least one irrefutable pattern + +* Capture Patterns + +* Wildcard Patterns + +* parenthesized irrefutable patterns + + +Patterns +-------- + +Note: + + This section uses grammar notations beyond standard EBNF: + + * the notation "SEP.RULE+" is shorthand for "RULE (SEP RULE)*" + + * the notation "!RULE" is shorthand for a negative lookahead + assertion + +The top-level syntax for "patterns" is: + + patterns: open_sequence_pattern | pattern + pattern: as_pattern | or_pattern + closed_pattern: | literal_pattern + | capture_pattern + | wildcard_pattern + | value_pattern + | group_pattern + | sequence_pattern + | mapping_pattern + | class_pattern + +The descriptions below will include a description “in simple terms” of +what a pattern does for illustration purposes (credits to Raymond +Hettinger for a document that inspired most of the descriptions). Note +that these descriptions are purely for illustration purposes and **may +not** reflect the underlying implementation. Furthermore, they do not +cover all valid forms. + + +OR Patterns +~~~~~~~~~~~ + +An OR pattern is two or more patterns separated by vertical bars "|". +Syntax: + + or_pattern: "|".closed_pattern+ + +Only the final subpattern may be irrefutable, and each subpattern must +bind the same set of names to avoid ambiguity. + +An OR pattern matches each of its subpatterns in turn to the subject +value, until one succeeds. The OR pattern is then considered +successful. Otherwise, if none of the subpatterns succeed, the OR +pattern fails. + +In simple terms, "P1 | P2 | ..." will try to match "P1", if it fails +it will try to match "P2", succeeding immediately if any succeeds, +failing otherwise. + + +AS Patterns +~~~~~~~~~~~ + +An AS pattern matches an OR pattern on the left of the "as" keyword +against a subject. Syntax: + + as_pattern: or_pattern "as" capture_pattern + +If the OR pattern fails, the AS pattern fails. Otherwise, the AS +pattern binds the subject to the name on the right of the as keyword +and succeeds. "capture_pattern" cannot be a "_". + +In simple terms "P as NAME" will match with "P", and on success it +will set "NAME = ". + + +Literal Patterns +~~~~~~~~~~~~~~~~ + +A literal pattern corresponds to most literals in Python. Syntax: + + literal_pattern: signed_number + | signed_number "+" NUMBER + | signed_number "-" NUMBER + | strings + | "None" + | "True" + | "False" + signed_number: ["-"] NUMBER + +The rule "strings" and the token "NUMBER" are defined in the standard +Python grammar. Triple-quoted strings are supported. Raw strings and +byte strings are supported. f-strings and t-strings are not +supported. + +The forms "signed_number '+' NUMBER" and "signed_number '-' NUMBER" +are for expressing complex numbers; they require a real number on the +left and an imaginary number on the right. E.g. "3 + 4j". + +In simple terms, "LITERAL" will succeed only if " == +LITERAL". For the singletons "None", "True" and "False", the "is" +operator is used. + + +Capture Patterns +~~~~~~~~~~~~~~~~ + +A capture pattern binds the subject value to a name. Syntax: + + capture_pattern: !'_' NAME + +A single underscore "_" is not a capture pattern (this is what "!'_'" +expresses). It is instead treated as a "wildcard_pattern". + +In a given pattern, a given name can only be bound once. E.g. "case +x, x: ..." is invalid while "case [x] | x: ..." is allowed. + +Capture patterns always succeed. The binding follows scoping rules +established by the assignment expression operator in **PEP 572**; the +name becomes a local variable in the closest containing function scope +unless there’s an applicable "global" or "nonlocal" statement. + +In simple terms "NAME" will always succeed and it will set "NAME = +". + + +Wildcard Patterns +~~~~~~~~~~~~~~~~~ + +A wildcard pattern always succeeds (matches anything) and binds no +name. Syntax: + + wildcard_pattern: '_' + +"_" is a soft keyword within any pattern, but only within patterns. +It is an identifier, as usual, even within "match" subject +expressions, "guard"s, and "case" blocks. + +In simple terms, "_" will always succeed. + + +Value Patterns +~~~~~~~~~~~~~~ + +A value pattern represents a named value in Python. Syntax: + + value_pattern: attr + attr: name_or_attr "." NAME + name_or_attr: attr | NAME + +The dotted name in the pattern is looked up using standard Python name +resolution rules. The pattern succeeds if the value found compares +equal to the subject value (using the "==" equality operator). + +In simple terms "NAME1.NAME2" will succeed only if " == +NAME1.NAME2" + +Note: + + If the same value occurs multiple times in the same match statement, + the interpreter may cache the first value found and reuse it rather + than repeat the same lookup. This cache is strictly tied to a given + execution of a given match statement. + + +Group Patterns +~~~~~~~~~~~~~~ + +A group pattern allows users to add parentheses around patterns to +emphasize the intended grouping. Otherwise, it has no additional +syntax. Syntax: + + group_pattern: "(" pattern ")" + +In simple terms "(P)" has the same effect as "P". + + +Sequence Patterns +~~~~~~~~~~~~~~~~~ + +A sequence pattern contains several subpatterns to be matched against +sequence elements. The syntax is similar to the unpacking of a list or +tuple. + + sequence_pattern: "[" [maybe_sequence_pattern] "]" + | "(" [open_sequence_pattern] ")" + open_sequence_pattern: maybe_star_pattern "," [maybe_sequence_pattern] + maybe_sequence_pattern: ",".maybe_star_pattern+ ","? + maybe_star_pattern: star_pattern | pattern + star_pattern: "*" (capture_pattern | wildcard_pattern) + +There is no difference if parentheses or square brackets are used for +sequence patterns (i.e. "(...)" vs "[...]" ). + +Note: + + A single pattern enclosed in parentheses without a trailing comma + (e.g. "(3 | 4)") is a group pattern. While a single pattern enclosed + in square brackets (e.g. "[3 | 4]") is still a sequence pattern. + +At most one star subpattern may be in a sequence pattern. The star +subpattern may occur in any position. If no star subpattern is +present, the sequence pattern is a fixed-length sequence pattern; +otherwise it is a variable-length sequence pattern. + +The following is the logical flow for matching a sequence pattern +against a subject value: + +1. If the subject value is not a sequence [2], the sequence pattern + fails. + +2. If the subject value is an instance of "str", "bytes" or + "bytearray" the sequence pattern fails. + +3. The subsequent steps depend on whether the sequence pattern is + fixed or variable-length. + + If the sequence pattern is fixed-length: + + 1. If the length of the subject sequence is not equal to the number + of subpatterns, the sequence pattern fails + + 2. Subpatterns in the sequence pattern are matched to their + corresponding items in the subject sequence from left to right. + Matching stops as soon as a subpattern fails. If all + subpatterns succeed in matching their corresponding item, the + sequence pattern succeeds. + + Otherwise, if the sequence pattern is variable-length: + + 1. If the length of the subject sequence is less than the number of + non-star subpatterns, the sequence pattern fails. + + 2. The leading non-star subpatterns are matched to their + corresponding items as for fixed-length sequences. + + 3. If the previous step succeeds, the star subpattern matches a + list formed of the remaining subject items, excluding the + remaining items corresponding to non-star subpatterns following + the star subpattern. + + 4. Remaining non-star subpatterns are matched to their + corresponding subject items, as for a fixed-length sequence. + + Note: + + The length of the subject sequence is obtained via "len()" (i.e. + via the "__len__()" protocol). This length may be cached by the + interpreter in a similar manner as value patterns. + +In simple terms "[P1, P2, P3," … ", P]" matches only if all the +following happens: + +* check "" is a sequence + +* "len(subject) == " + +* "P1" matches "[0]" (note that this match can also bind + names) + +* "P2" matches "[1]" (note that this match can also bind + names) + +* … and so on for the corresponding pattern/element. + + +Mapping Patterns +~~~~~~~~~~~~~~~~ + +A mapping pattern contains one or more key-value patterns. The syntax +is similar to the construction of a dictionary. Syntax: + + mapping_pattern: "{" [items_pattern] "}" + items_pattern: ",".key_value_pattern+ ","? + key_value_pattern: (literal_pattern | value_pattern) ":" pattern + | double_star_pattern + double_star_pattern: "**" capture_pattern + +At most one double star pattern may be in a mapping pattern. The +double star pattern must be the last subpattern in the mapping +pattern. + +Duplicate keys in mapping patterns are disallowed. Duplicate literal +keys will raise a "SyntaxError". Two keys that otherwise have the same +value will raise a "ValueError" at runtime. + +The following is the logical flow for matching a mapping pattern +against a subject value: + +1. If the subject value is not a mapping [3],the mapping pattern + fails. + +2. If every key given in the mapping pattern is present in the subject + mapping, and the pattern for each key matches the corresponding + item of the subject mapping, the mapping pattern succeeds. + +3. If duplicate keys are detected in the mapping pattern, the pattern + is considered invalid. A "SyntaxError" is raised for duplicate + literal values; or a "ValueError" for named keys of the same value. + +Note: + + Key-value pairs are matched using the two-argument form of the + mapping subject’s "get()" method. Matched key-value pairs must + already be present in the mapping, and not created on-the-fly via + "__missing__()" or "__getitem__()". + +In simple terms "{KEY1: P1, KEY2: P2, ... }" matches only if all the +following happens: + +* check "" is a mapping + +* "KEY1 in " + +* "P1" matches "[KEY1]" + +* … and so on for the corresponding KEY/pattern pair. + + +Class Patterns +~~~~~~~~~~~~~~ + +A class pattern represents a class and its positional and keyword +arguments (if any). Syntax: + + class_pattern: name_or_attr "(" [pattern_arguments ","?] ")" + pattern_arguments: positional_patterns ["," keyword_patterns] + | keyword_patterns + positional_patterns: ",".pattern+ + keyword_patterns: ",".keyword_pattern+ + keyword_pattern: NAME "=" pattern + +The same keyword should not be repeated in class patterns. + +The following is the logical flow for matching a class pattern against +a subject value: + +1. If "name_or_attr" is not an instance of the builtin "type" , raise + "TypeError". + +2. If the subject value is not an instance of "name_or_attr" (tested + via "isinstance()"), the class pattern fails. + +3. If no pattern arguments are present, the pattern succeeds. + Otherwise, the subsequent steps depend on whether keyword or + positional argument patterns are present. + + For a number of built-in types (specified below), a single + positional subpattern is accepted which will match the entire + subject; for these types keyword patterns also work as for other + types. + + If only keyword patterns are present, they are processed as + follows, one by one: + + 1. The keyword is looked up as an attribute on the subject. + + * If this raises an exception other than "AttributeError", the + exception bubbles up. + + * If this raises "AttributeError", the class pattern has failed. + + * Else, the subpattern associated with the keyword pattern is + matched against the subject’s attribute value. If this fails, + the class pattern fails; if this succeeds, the match proceeds + to the next keyword. + + 2. If all keyword patterns succeed, the class pattern succeeds. + + If any positional patterns are present, they are converted to + keyword patterns using the "__match_args__" attribute on the class + "name_or_attr" before matching: + + 1. The equivalent of "getattr(cls, "__match_args__", ())" is + called. + + * If this raises an exception, the exception bubbles up. + + * If the returned value is not a tuple, the conversion fails and + "TypeError" is raised. + + * If there are more positional patterns than + "len(cls.__match_args__)", "TypeError" is raised. + + * Otherwise, positional pattern "i" is converted to a keyword + pattern using "__match_args__[i]" as the keyword. + "__match_args__[i]" must be a string; if not "TypeError" is + raised. + + * If there are duplicate keywords, "TypeError" is raised. + + See also: + + Customizing positional arguments in class pattern matching + + 2. Once all positional patterns have been converted to keyword + patterns, the match proceeds as if there were only keyword + patterns. + + For the following built-in types the handling of positional + subpatterns is different: + + * "bool" + + * "bytearray" + + * "bytes" + + * "dict" + + * "float" + + * "frozenset" + + * "int" + + * "list" + + * "set" + + * "str" + + * "tuple" + + These classes accept a single positional argument, and the pattern + there is matched against the whole object rather than an attribute. + For example "int(0|1)" matches the value "0", but not the value + "0.0". + +In simple terms "CLS(P1, attr=P2)" matches only if the following +happens: + +* "isinstance(, CLS)" + +* convert "P1" to a keyword pattern using "CLS.__match_args__" + +* For each keyword argument "attr=P2": + + * "hasattr(, "attr")" + + * "P2" matches ".attr" + +* … and so on for the corresponding keyword argument/pattern pair. + +See also: + + * **PEP 634** – Structural Pattern Matching: Specification + + * **PEP 636** – Structural Pattern Matching: Tutorial + + +Function definitions +==================== + +A function definition defines a user-defined function object (see +section The standard type hierarchy): + + funcdef: [decorators] "def" funcname [type_params] "(" [parameter_list] ")" + ["->" expression] ":" suite + decorators: decorator+ + decorator: "@" assignment_expression NEWLINE + parameter_list: defparameter ("," defparameter)* "," "/" ["," [parameter_list_no_posonly]] + | parameter_list_no_posonly + parameter_list_no_posonly: defparameter ("," defparameter)* ["," [parameter_list_starargs]] + | parameter_list_starargs + parameter_list_starargs: "*" [star_parameter] ("," defparameter)* ["," [parameter_star_kwargs]] + | "*" ("," defparameter)+ ["," [parameter_star_kwargs]] + | parameter_star_kwargs + parameter_star_kwargs: "**" parameter [","] + parameter: identifier [":" expression] + star_parameter: identifier [":" ["*"] expression] + defparameter: parameter ["=" expression] + funcname: identifier + +A function definition is an executable statement. Its execution binds +the function name in the current local namespace to a function object +(a wrapper around the executable code for the function). This +function object contains a reference to the current global namespace +as the global namespace to be used when the function is called. + +The function definition does not execute the function body; this gets +executed only when the function is called. [4] + +A function definition may be wrapped by one or more *decorator* +expressions. Decorator expressions are evaluated when the function is +defined, in the scope that contains the function definition. The +result must be a callable, which is invoked with the function object +as the only argument. The returned value is bound to the function name +instead of the function object. Multiple decorators are applied in +nested fashion. For example, the following code + + @f1(arg) + @f2 + def func(): pass + +is roughly equivalent to + + def func(): pass + func = f1(arg)(f2(func)) + +except that the original function is not temporarily bound to the name +"func". + +Changed in version 3.9: Functions may be decorated with any valid +"assignment_expression". Previously, the grammar was much more +restrictive; see **PEP 614** for details. + +A list of type parameters may be given in square brackets between the +function’s name and the opening parenthesis for its parameter list. +This indicates to static type checkers that the function is generic. +At runtime, the type parameters can be retrieved from the function’s +"__type_params__" attribute. See Generic functions for more. + +Changed in version 3.12: Type parameter lists are new in Python 3.12. + +When one or more *parameters* have the form *parameter* "=" +*expression*, the function is said to have “default parameter values.” +For a parameter with a default value, the corresponding *argument* may +be omitted from a call, in which case the parameter’s default value is +substituted. If a parameter has a default value, all following +parameters up until the “"*"” must also have a default value — this is +a syntactic restriction that is not expressed by the grammar. + +**Default parameter values are evaluated from left to right when the +function definition is executed.** This means that the expression is +evaluated once, when the function is defined, and that the same “pre- +computed” value is used for each call. This is especially important +to understand when a default parameter value is a mutable object, such +as a list or a dictionary: if the function modifies the object (e.g. +by appending an item to a list), the default parameter value is in +effect modified. This is generally not what was intended. A way +around this is to use "None" as the default, and explicitly test for +it in the body of the function, e.g.: + + def whats_on_the_telly(penguin=None): + if penguin is None: + penguin = [] + penguin.append("property of the zoo") + return penguin + +Function call semantics are described in more detail in section Calls. +A function call always assigns values to all parameters mentioned in +the parameter list, either from positional arguments, from keyword +arguments, or from default values. If the form “"*identifier"” is +present, it is initialized to a tuple receiving any excess positional +parameters, defaulting to the empty tuple. If the form +“"**identifier"” is present, it is initialized to a new ordered +mapping receiving any excess keyword arguments, defaulting to a new +empty mapping of the same type. Parameters after “"*"” or +“"*identifier"” are keyword-only parameters and may only be passed by +keyword arguments. Parameters before “"/"” are positional-only +parameters and may only be passed by positional arguments. + +Changed in version 3.8: The "/" function parameter syntax may be used +to indicate positional-only parameters. See **PEP 570** for details. + +Parameters may have an *annotation* of the form “": expression"” +following the parameter name. Any parameter may have an annotation, +even those of the form "*identifier" or "**identifier". (As a special +case, parameters of the form "*identifier" may have an annotation “": +*expression"”.) Functions may have “return” annotation of the form +“"-> expression"” after the parameter list. These annotations can be +any valid Python expression. The presence of annotations does not +change the semantics of a function. See Annotations for more +information on annotations. + +Changed in version 3.11: Parameters of the form “"*identifier"” may +have an annotation “": *expression"”. See **PEP 646**. + +It is also possible to create anonymous functions (functions not bound +to a name), for immediate use in expressions. This uses lambda +expressions, described in section Lambdas. Note that the lambda +expression is merely a shorthand for a simplified function definition; +a function defined in a “"def"” statement can be passed around or +assigned to another name just like a function defined by a lambda +expression. The “"def"” form is actually more powerful since it +allows the execution of multiple statements and annotations. + +**Programmer’s note:** Functions are first-class objects. A “"def"” +statement executed inside a function definition defines a local +function that can be returned or passed around. Free variables used +in the nested function can access the local variables of the function +containing the def. See section Naming and binding for details. + +See also: + + **PEP 3107** - Function Annotations + The original specification for function annotations. + + **PEP 484** - Type Hints + Definition of a standard meaning for annotations: type hints. + + **PEP 526** - Syntax for Variable Annotations + Ability to type hint variable declarations, including class + variables and instance variables. + + **PEP 563** - Postponed Evaluation of Annotations + Support for forward references within annotations by preserving + annotations in a string form at runtime instead of eager + evaluation. + + **PEP 318** - Decorators for Functions and Methods + Function and method decorators were introduced. Class decorators + were introduced in **PEP 3129**. + + +Class definitions +================= + +A class definition defines a class object (see section The standard +type hierarchy): + + classdef: [decorators] "class" classname [type_params] [inheritance] ":" suite + inheritance: "(" [argument_list] ")" + classname: identifier + +A class definition is an executable statement. The inheritance list +usually gives a list of base classes (see Metaclasses for more +advanced uses), so each item in the list should evaluate to a class +object which allows subclassing. Classes without an inheritance list +inherit, by default, from the base class "object"; hence, + + class Foo: + pass + +is equivalent to + + class Foo(object): + pass + +The class’s suite is then executed in a new execution frame (see +Naming and binding), using a newly created local namespace and the +original global namespace. (Usually, the suite contains mostly +function definitions.) When the class’s suite finishes execution, its +execution frame is discarded but its local namespace is saved. [5] A +class object is then created using the inheritance list for the base +classes and the saved local namespace for the attribute dictionary. +The class name is bound to this class object in the original local +namespace. + +The order in which attributes are defined in the class body is +preserved in the new class’s "__dict__". Note that this is reliable +only right after the class is created and only for classes that were +defined using the definition syntax. + +Class creation can be customized heavily using metaclasses. + +Classes can also be decorated: just like when decorating functions, + + @f1(arg) + @f2 + class Foo: pass + +is roughly equivalent to + + class Foo: pass + Foo = f1(arg)(f2(Foo)) + +The evaluation rules for the decorator expressions are the same as for +function decorators. The result is then bound to the class name. + +Changed in version 3.9: Classes may be decorated with any valid +"assignment_expression". Previously, the grammar was much more +restrictive; see **PEP 614** for details. + +A list of type parameters may be given in square brackets immediately +after the class’s name. This indicates to static type checkers that +the class is generic. At runtime, the type parameters can be retrieved +from the class’s "__type_params__" attribute. See Generic classes for +more. + +Changed in version 3.12: Type parameter lists are new in Python 3.12. + +**Programmer’s note:** Variables defined in the class definition are +class attributes; they are shared by instances. Instance attributes +can be set in a method with "self.name = value". Both class and +instance attributes are accessible through the notation “"self.name"”, +and an instance attribute hides a class attribute with the same name +when accessed in this way. Class attributes can be used as defaults +for instance attributes, but using mutable values there can lead to +unexpected results. Descriptors can be used to create instance +variables with different implementation details. + +See also: + + **PEP 3115** - Metaclasses in Python 3000 + The proposal that changed the declaration of metaclasses to the + current syntax, and the semantics for how classes with + metaclasses are constructed. + + **PEP 3129** - Class Decorators + The proposal that added class decorators. Function and method + decorators were introduced in **PEP 318**. + + +Coroutines +========== + +Added in version 3.5. + + +Coroutine function definition +----------------------------- + + async_funcdef: [decorators] "async" "def" funcname "(" [parameter_list] ")" + ["->" expression] ":" suite + +Execution of Python coroutines can be suspended and resumed at many +points (see *coroutine*). "await" expressions, "async for" and "async +with" can only be used in the body of a coroutine function. + +Functions defined with "async def" syntax are always coroutine +functions, even if they do not contain "await" or "async" keywords. + +It is a "SyntaxError" to use a "yield from" expression inside the body +of a coroutine function. + +An example of a coroutine function: + + async def func(param1, param2): + do_stuff() + await some_coroutine() + +Changed in version 3.7: "await" and "async" are now keywords; +previously they were only treated as such inside the body of a +coroutine function. + + +The "async for" statement +------------------------- + + async_for_stmt: "async" for_stmt + +An *asynchronous iterable* provides an "__aiter__" method that +directly returns an *asynchronous iterator*, which can call +asynchronous code in its "__anext__" method. + +The "async for" statement allows convenient iteration over +asynchronous iterables. + +The following code: + + async for TARGET in ITER: + SUITE + else: + SUITE2 + +Is semantically equivalent to: + + iter = (ITER).__aiter__() + running = True + + while running: + try: + TARGET = await iter.__anext__() + except StopAsyncIteration: + running = False + else: + SUITE + else: + SUITE2 + +except that implicit special method lookup is used for "__aiter__()" +and "__anext__()". + +It is a "SyntaxError" to use an "async for" statement outside the body +of a coroutine function. + + +The "async with" statement +-------------------------- + + async_with_stmt: "async" with_stmt + +An *asynchronous context manager* is a *context manager* that is able +to suspend execution in its *enter* and *exit* methods. + +The following code: + + async with EXPRESSION as TARGET: + SUITE + +is semantically equivalent to: + + manager = (EXPRESSION) + aenter = manager.__aenter__ + aexit = manager.__aexit__ + value = await aenter() + hit_except = False + + try: + TARGET = value + SUITE + except: + hit_except = True + if not await aexit(*sys.exc_info()): + raise + finally: + if not hit_except: + await aexit(None, None, None) + +except that implicit special method lookup is used for "__aenter__()" +and "__aexit__()". + +It is a "SyntaxError" to use an "async with" statement outside the +body of a coroutine function. + +See also: + + **PEP 492** - Coroutines with async and await syntax + The proposal that made coroutines a proper standalone concept in + Python, and added supporting syntax. + + +Type parameter lists +==================== + +Added in version 3.12. + +Changed in version 3.13: Support for default values was added (see +**PEP 696**). + + type_params: "[" type_param ("," type_param)* "]" + type_param: typevar | typevartuple | paramspec + typevar: identifier (":" expression)? ("=" expression)? + typevartuple: "*" identifier ("=" expression)? + paramspec: "**" identifier ("=" expression)? + +Functions (including coroutines), classes and type aliases may contain +a type parameter list: + + def max[T](args: list[T]) -> T: + ... + + async def amax[T](args: list[T]) -> T: + ... + + class Bag[T]: + def __iter__(self) -> Iterator[T]: + ... + + def add(self, arg: T) -> None: + ... + + type ListOrSet[T] = list[T] | set[T] + +Semantically, this indicates that the function, class, or type alias +is generic over a type variable. This information is primarily used by +static type checkers, and at runtime, generic objects behave much like +their non-generic counterparts. + +Type parameters are declared in square brackets ("[]") immediately +after the name of the function, class, or type alias. The type +parameters are accessible within the scope of the generic object, but +not elsewhere. Thus, after a declaration "def func[T](): pass", the +name "T" is not available in the module scope. Below, the semantics of +generic objects are described with more precision. The scope of type +parameters is modeled with a special function (technically, an +annotation scope) that wraps the creation of the generic object. + +Generic functions, classes, and type aliases have a "__type_params__" +attribute listing their type parameters. + +Type parameters come in three kinds: + +* "typing.TypeVar", introduced by a plain name (e.g., "T"). + Semantically, this represents a single type to a type checker. + +* "typing.TypeVarTuple", introduced by a name prefixed with a single + asterisk (e.g., "*Ts"). Semantically, this stands for a tuple of any + number of types. + +* "typing.ParamSpec", introduced by a name prefixed with two asterisks + (e.g., "**P"). Semantically, this stands for the parameters of a + callable. + +"typing.TypeVar" declarations can define *bounds* and *constraints* +with a colon (":") followed by an expression. A single expression +after the colon indicates a bound (e.g. "T: int"). Semantically, this +means that the "typing.TypeVar" can only represent types that are a +subtype of this bound. A parenthesized tuple of expressions after the +colon indicates a set of constraints (e.g. "T: (str, bytes)"). Each +member of the tuple should be a type (again, this is not enforced at +runtime). Constrained type variables can only take on one of the types +in the list of constraints. + +For "typing.TypeVar"s declared using the type parameter list syntax, +the bound and constraints are not evaluated when the generic object is +created, but only when the value is explicitly accessed through the +attributes "__bound__" and "__constraints__". To accomplish this, the +bounds or constraints are evaluated in a separate annotation scope. + +"typing.TypeVarTuple"s and "typing.ParamSpec"s cannot have bounds or +constraints. + +All three flavors of type parameters can also have a *default value*, +which is used when the type parameter is not explicitly provided. This +is added by appending a single equals sign ("=") followed by an +expression. Like the bounds and constraints of type variables, the +default value is not evaluated when the object is created, but only +when the type parameter’s "__default__" attribute is accessed. To this +end, the default value is evaluated in a separate annotation scope. If +no default value is specified for a type parameter, the "__default__" +attribute is set to the special sentinel object "typing.NoDefault". + +The following example indicates the full set of allowed type parameter +declarations: + + def overly_generic[ + SimpleTypeVar, + TypeVarWithDefault = int, + TypeVarWithBound: int, + TypeVarWithConstraints: (str, bytes), + *SimpleTypeVarTuple = (int, float), + **SimpleParamSpec = (str, bytearray), + ]( + a: SimpleTypeVar, + b: TypeVarWithDefault, + c: TypeVarWithBound, + d: Callable[SimpleParamSpec, TypeVarWithConstraints], + *e: SimpleTypeVarTuple, + ): ... + + +Generic functions +----------------- + +Generic functions are declared as follows: + + def func[T](arg: T): ... + +This syntax is equivalent to: + + annotation-def TYPE_PARAMS_OF_func(): + T = typing.TypeVar("T") + def func(arg: T): ... + func.__type_params__ = (T,) + return func + func = TYPE_PARAMS_OF_func() + +Here "annotation-def" indicates an annotation scope, which is not +actually bound to any name at runtime. (One other liberty is taken in +the translation: the syntax does not go through attribute access on +the "typing" module, but creates an instance of "typing.TypeVar" +directly.) + +The annotations of generic functions are evaluated within the +annotation scope used for declaring the type parameters, but the +function’s defaults and decorators are not. + +The following example illustrates the scoping rules for these cases, +as well as for additional flavors of type parameters: + + @decorator + def func[T: int, *Ts, **P](*args: *Ts, arg: Callable[P, T] = some_default): + ... + +Except for the lazy evaluation of the "TypeVar" bound, this is +equivalent to: + + DEFAULT_OF_arg = some_default + + annotation-def TYPE_PARAMS_OF_func(): + + annotation-def BOUND_OF_T(): + return int + # In reality, BOUND_OF_T() is evaluated only on demand. + T = typing.TypeVar("T", bound=BOUND_OF_T()) + + Ts = typing.TypeVarTuple("Ts") + P = typing.ParamSpec("P") + + def func(*args: *Ts, arg: Callable[P, T] = DEFAULT_OF_arg): + ... + + func.__type_params__ = (T, Ts, P) + return func + func = decorator(TYPE_PARAMS_OF_func()) + +The capitalized names like "DEFAULT_OF_arg" are not actually bound at +runtime. + + +Generic classes +--------------- + +Generic classes are declared as follows: + + class Bag[T]: ... + +This syntax is equivalent to: + + annotation-def TYPE_PARAMS_OF_Bag(): + T = typing.TypeVar("T") + class Bag(typing.Generic[T]): + __type_params__ = (T,) + ... + return Bag + Bag = TYPE_PARAMS_OF_Bag() + +Here again "annotation-def" (not a real keyword) indicates an +annotation scope, and the name "TYPE_PARAMS_OF_Bag" is not actually +bound at runtime. + +Generic classes implicitly inherit from "typing.Generic". The base +classes and keyword arguments of generic classes are evaluated within +the type scope for the type parameters, and decorators are evaluated +outside that scope. This is illustrated by this example: + + @decorator + class Bag(Base[T], arg=T): ... + +This is equivalent to: + + annotation-def TYPE_PARAMS_OF_Bag(): + T = typing.TypeVar("T") + class Bag(Base[T], typing.Generic[T], arg=T): + __type_params__ = (T,) + ... + return Bag + Bag = decorator(TYPE_PARAMS_OF_Bag()) + + +Generic type aliases +-------------------- + +The "type" statement can also be used to create a generic type alias: + + type ListOrSet[T] = list[T] | set[T] + +Except for the lazy evaluation of the value, this is equivalent to: + + annotation-def TYPE_PARAMS_OF_ListOrSet(): + T = typing.TypeVar("T") + + annotation-def VALUE_OF_ListOrSet(): + return list[T] | set[T] + # In reality, the value is lazily evaluated + return typing.TypeAliasType("ListOrSet", VALUE_OF_ListOrSet(), type_params=(T,)) + ListOrSet = TYPE_PARAMS_OF_ListOrSet() + +Here, "annotation-def" (not a real keyword) indicates an annotation +scope. The capitalized names like "TYPE_PARAMS_OF_ListOrSet" are not +actually bound at runtime. + + +Annotations +=========== + +Changed in version 3.14: Annotations are now lazily evaluated by +default. + +Variables and function parameters may carry *annotations*, created by +adding a colon after the name, followed by an expression: + + x: annotation = 1 + def f(param: annotation): ... + +Functions may also carry a return annotation following an arrow: + + def f() -> annotation: ... + +Annotations are conventionally used for *type hints*, but this is not +enforced by the language, and in general annotations may contain +arbitrary expressions. The presence of annotations does not change the +runtime semantics of the code, except if some mechanism is used that +introspects and uses the annotations (such as "dataclasses" or +"functools.singledispatch()"). + +By default, annotations are lazily evaluated in an annotation scope. +This means that they are not evaluated when the code containing the +annotation is evaluated. Instead, the interpreter saves information +that can be used to evaluate the annotation later if requested. The +"annotationlib" module provides tools for evaluating annotations. + +If the future statement "from __future__ import annotations" is +present, all annotations are instead stored as strings: + + >>> from __future__ import annotations + >>> def f(param: annotation): ... + >>> f.__annotations__ + {'param': 'annotation'} + +This future statement will be deprecated and removed in a future +version of Python, but not before Python 3.13 reaches its end of life +(see **PEP 749**). When it is used, introspection tools like +"annotationlib.get_annotations()" and "typing.get_type_hints()" are +less likely to be able to resolve annotations at runtime. + +-[ Footnotes ]- + +[1] The exception is propagated to the invocation stack unless there + is a "finally" clause which happens to raise another exception. + That new exception causes the old one to be lost. + +[2] In pattern matching, a sequence is defined as one of the + following: + + * a class that inherits from "collections.abc.Sequence" + + * a Python class that has been registered as + "collections.abc.Sequence" + + * a builtin class that has its (CPython) "Py_TPFLAGS_SEQUENCE" bit + set + + * a class that inherits from any of the above + + The following standard library classes are sequences: + + * "array.array" + + * "collections.deque" + + * "list" + + * "memoryview" + + * "range" + + * "tuple" + + Note: + + Subject values of type "str", "bytes", and "bytearray" do not + match sequence patterns. + +[3] In pattern matching, a mapping is defined as one of the following: + + * a class that inherits from "collections.abc.Mapping" + + * a Python class that has been registered as + "collections.abc.Mapping" + + * a builtin class that has its (CPython) "Py_TPFLAGS_MAPPING" bit + set + + * a class that inherits from any of the above + + The standard library classes "dict" and "types.MappingProxyType" + are mappings. + +[4] A string literal appearing as the first statement in the function + body is transformed into the function’s "__doc__" attribute and + therefore the function’s *docstring*. + +[5] A string literal appearing as the first statement in the class + body is transformed into the namespace’s "__doc__" item and + therefore the class’s *docstring*. +''', + 'context-managers': r'''With Statement Context Managers +******************************* + +A *context manager* is an object that defines the runtime context to +be established when executing a "with" statement. The context manager +handles the entry into, and the exit from, the desired runtime context +for the execution of the block of code. Context managers are normally +invoked using the "with" statement (described in section The with +statement), but can also be used by directly invoking their methods. + +Typical uses of context managers include saving and restoring various +kinds of global state, locking and unlocking resources, closing opened +files, etc. + +For more information on context managers, see Context Manager Types. +The "object" class itself does not provide the context manager +methods. + +object.__enter__(self) + + Enter the runtime context related to this object. The "with" + statement will bind this method’s return value to the target(s) + specified in the "as" clause of the statement, if any. + +object.__exit__(self, exc_type, exc_value, traceback) + + Exit the runtime context related to this object. The parameters + describe the exception that caused the context to be exited. If the + context was exited without an exception, all three arguments will + be "None". + + If an exception is supplied, and the method wishes to suppress the + exception (i.e., prevent it from being propagated), it should + return a true value. Otherwise, the exception will be processed + normally upon exit from this method. + + Note that "__exit__()" methods should not reraise the passed-in + exception; this is the caller’s responsibility. + +See also: + + **PEP 343** - The “with” statement + The specification, background, and examples for the Python "with" + statement. +''', + 'continue': r'''The "continue" statement +************************ + + continue_stmt: "continue" + +"continue" may only occur syntactically nested in a "for" or "while" +loop, but not nested in a function or class definition within that +loop. It continues with the next cycle of the nearest enclosing loop. + +When "continue" passes control out of a "try" statement with a +"finally" clause, that "finally" clause is executed before really +starting the next loop cycle. +''', + 'conversions': r'''Arithmetic conversions +********************** + +When a description of an arithmetic operator below uses the phrase +“the numeric arguments are converted to a common real type”, this +means that the operator implementation for built-in numeric types +works as described in the Numeric Types section of the standard +library documentation. + +Some additional rules apply for certain operators and non-numeric +operands (for example, a string as a left argument to the "%" +operator). Extensions must define their own conversion behavior. +''', + 'customization': r'''Basic customization +******************* + +object.__new__(cls[, ...]) + + Called to create a new instance of class *cls*. "__new__()" is a + static method (special-cased so you need not declare it as such) + that takes the class of which an instance was requested as its + first argument. The remaining arguments are those passed to the + object constructor expression (the call to the class). The return + value of "__new__()" should be the new object instance (usually an + instance of *cls*). + + Typical implementations create a new instance of the class by + invoking the superclass’s "__new__()" method using + "super().__new__(cls[, ...])" with appropriate arguments and then + modifying the newly created instance as necessary before returning + it. + + If "__new__()" is invoked during object construction and it returns + an instance of *cls*, then the new instance’s "__init__()" method + will be invoked like "__init__(self[, ...])", where *self* is the + new instance and the remaining arguments are the same as were + passed to the object constructor. + + If "__new__()" does not return an instance of *cls*, then the new + instance’s "__init__()" method will not be invoked. + + "__new__()" is intended mainly to allow subclasses of immutable + types (like int, str, or tuple) to customize instance creation. It + is also commonly overridden in custom metaclasses in order to + customize class creation. + +object.__init__(self[, ...]) + + Called after the instance has been created (by "__new__()"), but + before it is returned to the caller. The arguments are those + passed to the class constructor expression. If a base class has an + "__init__()" method, the derived class’s "__init__()" method, if + any, must explicitly call it to ensure proper initialization of the + base class part of the instance; for example: + "super().__init__([args...])". + + Because "__new__()" and "__init__()" work together in constructing + objects ("__new__()" to create it, and "__init__()" to customize + it), no non-"None" value may be returned by "__init__()"; doing so + will cause a "TypeError" to be raised at runtime. + +object.__del__(self) + + Called when the instance is about to be destroyed. This is also + called a finalizer or (improperly) a destructor. If a base class + has a "__del__()" method, the derived class’s "__del__()" method, + if any, must explicitly call it to ensure proper deletion of the + base class part of the instance. + + It is possible (though not recommended!) for the "__del__()" method + to postpone destruction of the instance by creating a new reference + to it. This is called object *resurrection*. It is + implementation-dependent whether "__del__()" is called a second + time when a resurrected object is about to be destroyed; the + current *CPython* implementation only calls it once. + + It is not guaranteed that "__del__()" methods are called for + objects that still exist when the interpreter exits. + "weakref.finalize" provides a straightforward way to register a + cleanup function to be called when an object is garbage collected. + + Note: + + "del x" doesn’t directly call "x.__del__()" — the former + decrements the reference count for "x" by one, and the latter is + only called when "x"’s reference count reaches zero. + + **CPython implementation detail:** It is possible for a reference + cycle to prevent the reference count of an object from going to + zero. In this case, the cycle will be later detected and deleted + by the *cyclic garbage collector*. A common cause of reference + cycles is when an exception has been caught in a local variable. + The frame’s locals then reference the exception, which references + its own traceback, which references the locals of all frames caught + in the traceback. + + See also: Documentation for the "gc" module. + + Warning: + + Due to the precarious circumstances under which "__del__()" + methods are invoked, exceptions that occur during their execution + are ignored, and a warning is printed to "sys.stderr" instead. + In particular: + + * "__del__()" can be invoked when arbitrary code is being + executed, including from any arbitrary thread. If "__del__()" + needs to take a lock or invoke any other blocking resource, it + may deadlock as the resource may already be taken by the code + that gets interrupted to execute "__del__()". + + * "__del__()" can be executed during interpreter shutdown. As a + consequence, the global variables it needs to access (including + other modules) may already have been deleted or set to "None". + Python guarantees that globals whose name begins with a single + underscore are deleted from their module before other globals + are deleted; if no other references to such globals exist, this + may help in assuring that imported modules are still available + at the time when the "__del__()" method is called. + +object.__repr__(self) + + Called by the "repr()" built-in function to compute the “official” + string representation of an object. If at all possible, this + should look like a valid Python expression that could be used to + recreate an object with the same value (given an appropriate + environment). If this is not possible, a string of the form + "<...some useful description...>" should be returned. The return + value must be a string object. If a class defines "__repr__()" but + not "__str__()", then "__repr__()" is also used when an “informal” + string representation of instances of that class is required. + + This is typically used for debugging, so it is important that the + representation is information-rich and unambiguous. A default + implementation is provided by the "object" class itself. + +object.__str__(self) + + Called by "str(object)", the default "__format__()" implementation, + and the built-in function "print()", to compute the “informal” or + nicely printable string representation of an object. The return + value must be a str object. + + This method differs from "object.__repr__()" in that there is no + expectation that "__str__()" return a valid Python expression: a + more convenient or concise representation can be used. + + The default implementation defined by the built-in type "object" + calls "object.__repr__()". + +object.__bytes__(self) + + Called by bytes to compute a byte-string representation of an + object. This should return a "bytes" object. The "object" class + itself does not provide this method. + +object.__format__(self, format_spec) + + Called by the "format()" built-in function, and by extension, + evaluation of formatted string literals and the "str.format()" + method, to produce a “formatted” string representation of an + object. The *format_spec* argument is a string that contains a + description of the formatting options desired. The interpretation + of the *format_spec* argument is up to the type implementing + "__format__()", however most classes will either delegate + formatting to one of the built-in types, or use a similar + formatting option syntax. + + See Format specification mini-language for a description of the + standard formatting syntax. + + The return value must be a string object. + + The default implementation by the "object" class should be given an + empty *format_spec* string. It delegates to "__str__()". + + Changed in version 3.4: The __format__ method of "object" itself + raises a "TypeError" if passed any non-empty string. + + Changed in version 3.7: "object.__format__(x, '')" is now + equivalent to "str(x)" rather than "format(str(x), '')". + +object.__lt__(self, other) +object.__le__(self, other) +object.__eq__(self, other) +object.__ne__(self, other) +object.__gt__(self, other) +object.__ge__(self, other) + + These are the so-called “rich comparison” methods. The + correspondence between operator symbols and method names is as + follows: "xy" calls + "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)". + + A rich comparison method may return the singleton "NotImplemented" + if it does not implement the operation for a given pair of + arguments. By convention, "False" and "True" are returned for a + successful comparison. However, these methods can return any value, + so if the comparison operator is used in a Boolean context (e.g., + in the condition of an "if" statement), Python will call "bool()" + on the value to determine if the result is true or false. + + By default, "object" implements "__eq__()" by using "is", returning + "NotImplemented" in the case of a false comparison: "True if x is y + else NotImplemented". For "__ne__()", by default it delegates to + "__eq__()" and inverts the result unless it is "NotImplemented". + There are no other implied relationships among the comparison + operators or default implementations; for example, the truth of + "(x.__hash__". + + If a class that does not override "__eq__()" wishes to suppress + hash support, it should include "__hash__ = None" in the class + definition. A class which defines its own "__hash__()" that + explicitly raises a "TypeError" would be incorrectly identified as + hashable by an "isinstance(obj, collections.abc.Hashable)" call. + + Note: + + By default, the "__hash__()" values of str and bytes objects are + “salted” with an unpredictable random value. Although they + remain constant within an individual Python process, they are not + predictable between repeated invocations of Python.This is + intended to provide protection against a denial-of-service caused + by carefully chosen inputs that exploit the worst case + performance of a dict insertion, *O*(*n*^2) complexity. See + https://ocert.org/advisories/ocert-2011-003.html for + details.Changing hash values affects the iteration order of sets. + Python has never made guarantees about this ordering (and it + typically varies between 32-bit and 64-bit builds).See also + "PYTHONHASHSEED". + + Changed in version 3.3: Hash randomization is enabled by default. + +object.__bool__(self) + + Called to implement truth value testing and the built-in operation + "bool()"; should return "False" or "True". When this method is not + defined, "__len__()" is called, if it is defined, and the object is + considered true if its result is nonzero. If a class defines + neither "__len__()" nor "__bool__()" (which is true of the "object" + class itself), all its instances are considered true. +''', + 'debugger': r'''"pdb" — The Python Debugger +*************************** + +**Source code:** Lib/pdb.py + +====================================================================== + +The module "pdb" defines an interactive source code debugger for +Python programs. It supports setting (conditional) breakpoints and +single stepping at the source line level, inspection of stack frames, +source code listing, and evaluation of arbitrary Python code in the +context of any stack frame. It also supports post-mortem debugging +and can be called under program control. + +The debugger is extensible – it is actually defined as the class +"Pdb". This is currently undocumented but easily understood by reading +the source. The extension interface uses the modules "bdb" and "cmd". + +See also: + + Module "faulthandler" + Used to dump Python tracebacks explicitly, on a fault, after a + timeout, or on a user signal. + + Module "traceback" + Standard interface to extract, format and print stack traces of + Python programs. + +The typical usage to break into the debugger is to insert: + + import pdb; pdb.set_trace() + +Or: + + breakpoint() + +at the location you want to break into the debugger, and then run the +program. You can then step through the code following this statement, +and continue running without the debugger using the "continue" +command. + +Changed in version 3.7: The built-in "breakpoint()", when called with +defaults, can be used instead of "import pdb; pdb.set_trace()". + + def double(x): + breakpoint() + return x * 2 + val = 3 + print(f"{val} * 2 is {double(val)}") + +The debugger’s prompt is "(Pdb)", which is the indicator that you are +in debug mode: + + > ...(2)double() + -> breakpoint() + (Pdb) p x + 3 + (Pdb) continue + 3 * 2 is 6 + +Changed in version 3.3: Tab-completion via the "readline" module is +available for commands and command arguments, e.g. the current global +and local names are offered as arguments of the "p" command. + + +Command-line interface +====================== + +You can also invoke "pdb" from the command line to debug other +scripts. For example: + + python -m pdb [-c command] (-m module | -p pid | pyfile) [args ...] + +When invoked as a module, pdb will automatically enter post-mortem +debugging if the program being debugged exits abnormally. After post- +mortem debugging (or after normal exit of the program), pdb will +restart the program. Automatic restarting preserves pdb’s state (such +as breakpoints) and in most cases is more useful than quitting the +debugger upon program’s exit. + +-c, --command + + To execute commands as if given in a ".pdbrc" file; see Debugger + commands. + + Changed in version 3.2: Added the "-c" option. + +-m + + To execute modules similar to the way "python -m" does. As with a + script, the debugger will pause execution just before the first + line of the module. + + Changed in version 3.7: Added the "-m" option. + +-p, --pid + + Attach to the process with the specified PID. + + Added in version 3.14. + +To attach to a running Python process for remote debugging, use the +"-p" or "--pid" option with the target process’s PID: + + python -m pdb -p 1234 + +Note: + + Attaching to a process that is blocked in a system call or waiting + for I/O will only work once the next bytecode instruction is + executed or when the process receives a signal. + +Typical usage to execute a statement under control of the debugger is: + + >>> import pdb + >>> def f(x): + ... print(1 / x) + >>> pdb.run("f(2)") + > (1)() + (Pdb) continue + 0.5 + >>> + +The typical usage to inspect a crashed program is: + + >>> import pdb + >>> def f(x): + ... print(1 / x) + ... + >>> f(0) + Traceback (most recent call last): + File "", line 1, in + File "", line 2, in f + ZeroDivisionError: division by zero + >>> pdb.pm() + > (2)f() + (Pdb) p x + 0 + (Pdb) + +Changed in version 3.13: The implementation of **PEP 667** means that +name assignments made via "pdb" will immediately affect the active +scope, even when running inside an *optimized scope*. + +The module defines the following functions; each enters the debugger +in a slightly different way: + +pdb.run(statement, globals=None, locals=None) + + Execute the *statement* (given as a string or a code object) under + debugger control. The debugger prompt appears before any code is + executed; you can set breakpoints and type "continue", or you can + step through the statement using "step" or "next" (all these + commands are explained below). The optional *globals* and *locals* + arguments specify the environment in which the code is executed; by + default the dictionary of the module "__main__" is used. (See the + explanation of the built-in "exec()" or "eval()" functions.) + +pdb.runeval(expression, globals=None, locals=None) + + Evaluate the *expression* (given as a string or a code object) + under debugger control. When "runeval()" returns, it returns the + value of the *expression*. Otherwise this function is similar to + "run()". + +pdb.runcall(function, *args, **kwds) + + Call the *function* (a function or method object, not a string) + with the given arguments. When "runcall()" returns, it returns + whatever the function call returned. The debugger prompt appears + as soon as the function is entered. + +pdb.set_trace(*, header=None, commands=None) + + Enter the debugger at the calling stack frame. This is useful to + hard-code a breakpoint at a given point in a program, even if the + code is not otherwise being debugged (e.g. when an assertion + fails). If given, *header* is printed to the console just before + debugging begins. The *commands* argument, if given, is a list of + commands to execute when the debugger starts. + + Changed in version 3.7: The keyword-only argument *header*. + + Changed in version 3.13: "set_trace()" will enter the debugger + immediately, rather than on the next line of code to be executed. + + Added in version 3.14: The *commands* argument. + +awaitable pdb.set_trace_async(*, header=None, commands=None) + + async version of "set_trace()". This function should be used inside + an async function with "await". + + async def f(): + await pdb.set_trace_async() + + "await" statements are supported if the debugger is invoked by this + function. + + Added in version 3.14. + +pdb.post_mortem(t=None) + + Enter post-mortem debugging of the given exception or traceback + object. If no value is given, it uses the exception that is + currently being handled, or raises "ValueError" if there isn’t one. + + Changed in version 3.13: Support for exception objects was added. + +pdb.pm() + + Enter post-mortem debugging of the exception found in + "sys.last_exc". + +pdb.set_default_backend(backend) + + There are two supported backends for pdb: "'settrace'" and + "'monitoring'". See "bdb.Bdb" for details. The user can set the + default backend to use if none is specified when instantiating + "Pdb". If no backend is specified, the default is "'settrace'". + + Note: + + "breakpoint()" and "set_trace()" will not be affected by this + function. They always use "'monitoring'" backend. + + Added in version 3.14. + +pdb.get_default_backend() + + Returns the default backend for pdb. + + Added in version 3.14. + +The "run*" functions and "set_trace()" are aliases for instantiating +the "Pdb" class and calling the method of the same name. If you want +to access further features, you have to do this yourself: + +class pdb.Pdb(completekey='tab', stdin=None, stdout=None, skip=None, nosigint=False, readrc=True, mode=None, backend=None, colorize=False) + + "Pdb" is the debugger class. + + The *completekey*, *stdin* and *stdout* arguments are passed to the + underlying "cmd.Cmd" class; see the description there. + + The *skip* argument, if given, must be an iterable of glob-style + module name patterns. The debugger will not step into frames that + originate in a module that matches one of these patterns. [1] + + By default, Pdb sets a handler for the SIGINT signal (which is sent + when the user presses "Ctrl"-"C" on the console) when you give a + "continue" command. This allows you to break into the debugger + again by pressing "Ctrl"-"C". If you want Pdb not to touch the + SIGINT handler, set *nosigint* to true. + + The *readrc* argument defaults to true and controls whether Pdb + will load .pdbrc files from the filesystem. + + The *mode* argument specifies how the debugger was invoked. It + impacts the workings of some debugger commands. Valid values are + "'inline'" (used by the breakpoint() builtin), "'cli'" (used by the + command line invocation) or "None" (for backwards compatible + behaviour, as before the *mode* argument was added). + + The *backend* argument specifies the backend to use for the + debugger. If "None" is passed, the default backend will be used. + See "set_default_backend()". Otherwise the supported backends are + "'settrace'" and "'monitoring'". + + The *colorize* argument, if set to "True", will enable colorized + output in the debugger, if color is supported. This will highlight + source code displayed in pdb. + + Example call to enable tracing with *skip*: + + import pdb; pdb.Pdb(skip=['django.*']).set_trace() + + Raises an auditing event "pdb.Pdb" with no arguments. + + Changed in version 3.1: Added the *skip* parameter. + + Changed in version 3.2: Added the *nosigint* parameter. Previously, + a SIGINT handler was never set by Pdb. + + Changed in version 3.6: The *readrc* argument. + + Added in version 3.14: Added the *mode* argument. + + Added in version 3.14: Added the *backend* argument. + + Added in version 3.14: Added the *colorize* argument. + + Changed in version 3.14: Inline breakpoints like "breakpoint()" or + "pdb.set_trace()" will always stop the program at calling frame, + ignoring the *skip* pattern (if any). + + run(statement, globals=None, locals=None) + runeval(expression, globals=None, locals=None) + runcall(function, *args, **kwds) + set_trace() + + See the documentation for the functions explained above. + + +Debugger commands +================= + +The commands recognized by the debugger are listed below. Most +commands can be abbreviated to one or two letters as indicated; e.g. +"h(elp)" means that either "h" or "help" can be used to enter the help +command (but not "he" or "hel", nor "H" or "Help" or "HELP"). +Arguments to commands must be separated by whitespace (spaces or +tabs). Optional arguments are enclosed in square brackets ("[]") in +the command syntax; the square brackets must not be typed. +Alternatives in the command syntax are separated by a vertical bar +("|"). + +Entering a blank line repeats the last command entered. Exception: if +the last command was a "list" command, the next 11 lines are listed. + +Commands that the debugger doesn’t recognize are assumed to be Python +statements and are executed in the context of the program being +debugged. Python statements can also be prefixed with an exclamation +point ("!"). This is a powerful way to inspect the program being +debugged; it is even possible to change a variable or call a function. +When an exception occurs in such a statement, the exception name is +printed but the debugger’s state is not changed. + +Changed in version 3.13: Expressions/Statements whose prefix is a pdb +command are now correctly identified and executed. + +The debugger supports aliases. Aliases can have parameters which +allows one a certain level of adaptability to the context under +examination. + +Multiple commands may be entered on a single line, separated by ";;". +(A single ";" is not used as it is the separator for multiple commands +in a line that is passed to the Python parser.) No intelligence is +applied to separating the commands; the input is split at the first +";;" pair, even if it is in the middle of a quoted string. A +workaround for strings with double semicolons is to use implicit +string concatenation "';'';'" or "";"";"". + +To set a temporary global variable, use a *convenience variable*. A +*convenience variable* is a variable whose name starts with "$". For +example, "$foo = 1" sets a global variable "$foo" which you can use in +the debugger session. The *convenience variables* are cleared when +the program resumes execution so it’s less likely to interfere with +your program compared to using normal variables like "foo = 1". + +There are four preset *convenience variables*: + +* "$_frame": the current frame you are debugging + +* "$_retval": the return value if the frame is returning + +* "$_exception": the exception if the frame is raising an exception + +* "$_asynctask": the asyncio task if pdb stops in an async function + +Added in version 3.12: Added the *convenience variable* feature. + +Added in version 3.14: Added the "$_asynctask" convenience variable. + +If a file ".pdbrc" exists in the user’s home directory or in the +current directory, it is read with "'utf-8'" encoding and executed as +if it had been typed at the debugger prompt, with the exception that +empty lines and lines starting with "#" are ignored. This is +particularly useful for aliases. If both files exist, the one in the +home directory is read first and aliases defined there can be +overridden by the local file. + +Changed in version 3.2: ".pdbrc" can now contain commands that +continue debugging, such as "continue" or "next". Previously, these +commands had no effect. + +Changed in version 3.11: ".pdbrc" is now read with "'utf-8'" encoding. +Previously, it was read with the system locale encoding. + +h(elp) [command] + + Without argument, print the list of available commands. With a + *command* as argument, print help about that command. "help pdb" + displays the full documentation (the docstring of the "pdb" + module). Since the *command* argument must be an identifier, "help + exec" must be entered to get help on the "!" command. + +w(here) [count] + + Print a stack trace, with the most recent frame at the bottom. if + *count* is 0, print the current frame entry. If *count* is + negative, print the least recent - *count* frames. If *count* is + positive, print the most recent *count* frames. An arrow (">") + indicates the current frame, which determines the context of most + commands. + + Changed in version 3.14: *count* argument is added. + +d(own) [count] + + Move the current frame *count* (default one) levels down in the + stack trace (to a newer frame). + +u(p) [count] + + Move the current frame *count* (default one) levels up in the stack + trace (to an older frame). + +b(reak) [([filename:]lineno | function) [, condition]] + + With a *lineno* argument, set a break at line *lineno* in the + current file. The line number may be prefixed with a *filename* and + a colon, to specify a breakpoint in another file (possibly one that + hasn’t been loaded yet). The file is searched on "sys.path". + Acceptable forms of *filename* are "/abspath/to/file.py", + "relpath/file.py", "module" and "package.module". + + With a *function* argument, set a break at the first executable + statement within that function. *function* can be any expression + that evaluates to a function in the current namespace. + + If a second argument is present, it is an expression which must + evaluate to true before the breakpoint is honored. + + Without argument, list all breaks, including for each breakpoint, + the number of times that breakpoint has been hit, the current + ignore count, and the associated condition if any. + + Each breakpoint is assigned a number to which all the other + breakpoint commands refer. + +tbreak [([filename:]lineno | function) [, condition]] + + Temporary breakpoint, which is removed automatically when it is + first hit. The arguments are the same as for "break". + +cl(ear) [filename:lineno | bpnumber ...] + + With a *filename:lineno* argument, clear all the breakpoints at + this line. With a space separated list of breakpoint numbers, clear + those breakpoints. Without argument, clear all breaks (but first + ask confirmation). + +disable bpnumber [bpnumber ...] + + Disable the breakpoints given as a space separated list of + breakpoint numbers. Disabling a breakpoint means it cannot cause + the program to stop execution, but unlike clearing a breakpoint, it + remains in the list of breakpoints and can be (re-)enabled. + +enable bpnumber [bpnumber ...] + + Enable the breakpoints specified. + +ignore bpnumber [count] + + Set the ignore count for the given breakpoint number. If *count* + is omitted, the ignore count is set to 0. A breakpoint becomes + active when the ignore count is zero. When non-zero, the *count* + is decremented each time the breakpoint is reached and the + breakpoint is not disabled and any associated condition evaluates + to true. + +condition bpnumber [condition] + + Set a new *condition* for the breakpoint, an expression which must + evaluate to true before the breakpoint is honored. If *condition* + is absent, any existing condition is removed; i.e., the breakpoint + is made unconditional. + +commands [bpnumber] + + Specify a list of commands for breakpoint number *bpnumber*. The + commands themselves appear on the following lines. Type a line + containing just "end" to terminate the commands. An example: + + (Pdb) commands 1 + (com) p some_variable + (com) end + (Pdb) + + To remove all commands from a breakpoint, type "commands" and + follow it immediately with "end"; that is, give no commands. + + With no *bpnumber* argument, "commands" refers to the last + breakpoint set. + + You can use breakpoint commands to start your program up again. + Simply use the "continue" command, or "step", or any other command + that resumes execution. + + Specifying any command resuming execution (currently "continue", + "step", "next", "return", "until", "jump", "quit" and their + abbreviations) terminates the command list (as if that command was + immediately followed by end). This is because any time you resume + execution (even with a simple next or step), you may encounter + another breakpoint—which could have its own command list, leading + to ambiguities about which list to execute. + + If the list of commands contains the "silent" command, or a command + that resumes execution, then the breakpoint message containing + information about the frame is not displayed. + + Changed in version 3.14: Frame information will not be displayed if + a command that resumes execution is present in the command list. + +s(tep) + + Execute the current line, stop at the first possible occasion + (either in a function that is called or on the next line in the + current function). + +n(ext) + + Continue execution until the next line in the current function is + reached or it returns. (The difference between "next" and "step" + is that "step" stops inside a called function, while "next" + executes called functions at (nearly) full speed, only stopping at + the next line in the current function.) + +unt(il) [lineno] + + Without argument, continue execution until the line with a number + greater than the current one is reached. + + With *lineno*, continue execution until a line with a number + greater or equal to *lineno* is reached. In both cases, also stop + when the current frame returns. + + Changed in version 3.2: Allow giving an explicit line number. + +r(eturn) + + Continue execution until the current function returns. + +c(ont(inue)) + + Continue execution, only stop when a breakpoint is encountered. + +j(ump) lineno + + Set the next line that will be executed. Only available in the + bottom-most frame. This lets you jump back and execute code again, + or jump forward to skip code that you don’t want to run. + + It should be noted that not all jumps are allowed – for instance it + is not possible to jump into the middle of a "for" loop or out of a + "finally" clause. + +l(ist) [first[, last]] + + List source code for the current file. Without arguments, list 11 + lines around the current line or continue the previous listing. + With "." as argument, list 11 lines around the current line. With + one argument, list 11 lines around at that line. With two + arguments, list the given range; if the second argument is less + than the first, it is interpreted as a count. + + The current line in the current frame is indicated by "->". If an + exception is being debugged, the line where the exception was + originally raised or propagated is indicated by ">>", if it differs + from the current line. + + Changed in version 3.2: Added the ">>" marker. + +ll | longlist + + List all source code for the current function or frame. + Interesting lines are marked as for "list". + + Added in version 3.2. + +a(rgs) + + Print the arguments of the current function and their current + values. + +p expression + + Evaluate *expression* in the current context and print its value. + + Note: + + "print()" can also be used, but is not a debugger command — this + executes the Python "print()" function. + +pp expression + + Like the "p" command, except the value of *expression* is pretty- + printed using the "pprint" module. + +whatis expression + + Print the type of *expression*. + +source expression + + Try to get source code of *expression* and display it. + + Added in version 3.2. + +display [expression] + + Display the value of *expression* if it changed, each time + execution stops in the current frame. + + Without *expression*, list all display expressions for the current + frame. + + Note: + + Display evaluates *expression* and compares to the result of the + previous evaluation of *expression*, so when the result is + mutable, display may not be able to pick up the changes. + + Example: + + lst = [] + breakpoint() + pass + lst.append(1) + print(lst) + + Display won’t realize "lst" has been changed because the result of + evaluation is modified in place by "lst.append(1)" before being + compared: + + > example.py(3)() + -> pass + (Pdb) display lst + display lst: [] + (Pdb) n + > example.py(4)() + -> lst.append(1) + (Pdb) n + > example.py(5)() + -> print(lst) + (Pdb) + + You can do some tricks with copy mechanism to make it work: + + > example.py(3)() + -> pass + (Pdb) display lst[:] + display lst[:]: [] + (Pdb) n + > example.py(4)() + -> lst.append(1) + (Pdb) n + > example.py(5)() + -> print(lst) + display lst[:]: [1] [old: []] + (Pdb) + + Added in version 3.2. + +undisplay [expression] + + Do not display *expression* anymore in the current frame. Without + *expression*, clear all display expressions for the current frame. + + Added in version 3.2. + +interact + + Start an interactive interpreter (using the "code" module) in a new + global namespace initialised from the local and global namespaces + for the current scope. Use "exit()" or "quit()" to exit the + interpreter and return to the debugger. + + Note: + + As "interact" creates a new dedicated namespace for code + execution, assignments to variables will not affect the original + namespaces. However, modifications to any referenced mutable + objects will be reflected in the original namespaces as usual. + + Added in version 3.2. + + Changed in version 3.13: "exit()" and "quit()" can be used to exit + the "interact" command. + + Changed in version 3.13: "interact" directs its output to the + debugger’s output channel rather than "sys.stderr". + +alias [name [command]] + + Create an alias called *name* that executes *command*. The + *command* must *not* be enclosed in quotes. Replaceable parameters + can be indicated by "%1", "%2", … and "%9", while "%*" is replaced + by all the parameters. If *command* is omitted, the current alias + for *name* is shown. If no arguments are given, all aliases are + listed. + + Aliases may be nested and can contain anything that can be legally + typed at the pdb prompt. Note that internal pdb commands *can* be + overridden by aliases. Such a command is then hidden until the + alias is removed. Aliasing is recursively applied to the first + word of the command line; all other words in the line are left + alone. + + As an example, here are two useful aliases (especially when placed + in the ".pdbrc" file): + + # Print instance variables (usage "pi classInst") + alias pi for k in %1.__dict__.keys(): print(f"%1.{k} = {%1.__dict__[k]}") + # Print instance variables in self + alias ps pi self + +unalias name + + Delete the specified alias *name*. + +! statement + + Execute the (one-line) *statement* in the context of the current + stack frame. The exclamation point can be omitted unless the first + word of the statement resembles a debugger command, e.g.: + + (Pdb) ! n=42 + (Pdb) + + To set a global variable, you can prefix the assignment command + with a "global" statement on the same line, e.g.: + + (Pdb) global list_options; list_options = ['-l'] + (Pdb) + +run [args ...] +restart [args ...] + + Restart the debugged Python program. If *args* is supplied, it is + split with "shlex" and the result is used as the new "sys.argv". + History, breakpoints, actions and debugger options are preserved. + "restart" is an alias for "run". + + Changed in version 3.14: "run" and "restart" commands are disabled + when the debugger is invoked in "'inline'" mode. + +q(uit) + + Quit from the debugger. The program being executed is aborted. An + end-of-file input is equivalent to "quit". + + A confirmation prompt will be shown if the debugger is invoked in + "'inline'" mode. Either "y", "Y", "" or "EOF" will confirm + the quit. + + Changed in version 3.14: A confirmation prompt will be shown if the + debugger is invoked in "'inline'" mode. After the confirmation, the + debugger will call "sys.exit()" immediately, instead of raising + "bdb.BdbQuit" in the next trace event. + +debug code + + Enter a recursive debugger that steps through *code* (which is an + arbitrary expression or statement to be executed in the current + environment). + +retval + + Print the return value for the last return of the current function. + +exceptions [excnumber] + + List or jump between chained exceptions. + + When using "pdb.pm()" or "Pdb.post_mortem(...)" with a chained + exception instead of a traceback, it allows the user to move + between the chained exceptions using "exceptions" command to list + exceptions, and "exceptions " to switch to that exception. + + Example: + + def out(): + try: + middle() + except Exception as e: + raise ValueError("reraise middle() error") from e + + def middle(): + try: + return inner(0) + except Exception as e: + raise ValueError("Middle fail") + + def inner(x): + 1 / x + + out() + + calling "pdb.pm()" will allow to move between exceptions: + + > example.py(5)out() + -> raise ValueError("reraise middle() error") from e + + (Pdb) exceptions + 0 ZeroDivisionError('division by zero') + 1 ValueError('Middle fail') + > 2 ValueError('reraise middle() error') + + (Pdb) exceptions 0 + > example.py(16)inner() + -> 1 / x + + (Pdb) up + > example.py(10)middle() + -> return inner(0) + + Added in version 3.13. + +-[ Footnotes ]- + +[1] Whether a frame is considered to originate in a certain module is + determined by the "__name__" in the frame globals. +''', + 'del': r'''The "del" statement +******************* + + del_stmt: "del" target_list + +Deletion is recursively defined very similar to the way assignment is +defined. Rather than spelling it out in full details, here are some +hints. + +Deletion of a target list recursively deletes each target, from left +to right. + +Deletion of a name removes the binding of that name from the local or +global namespace, depending on whether the name occurs in a "global" +statement in the same code block. Trying to delete an unbound name +raises a "NameError" exception. + +Deletion of attribute references and subscriptions is passed to the +primary object involved; deletion of a slicing is in general +equivalent to assignment of an empty slice of the right type (but even +this is determined by the sliced object). + +Changed in version 3.2: Previously it was illegal to delete a name +from the local namespace if it occurs as a free variable in a nested +block. +''', + 'dict': r'''Dictionary displays +******************* + +A dictionary display is a possibly empty series of dict items +(key/value pairs) enclosed in curly braces: + + dict_display: "{" [dict_item_list | dict_comprehension] "}" + dict_item_list: dict_item ("," dict_item)* [","] + dict_item: expression ":" expression | "**" or_expr + dict_comprehension: expression ":" expression comp_for + +A dictionary display yields a new dictionary object. + +If a comma-separated sequence of dict items is given, they are +evaluated from left to right to define the entries of the dictionary: +each key object is used as a key into the dictionary to store the +corresponding value. This means that you can specify the same key +multiple times in the dict item list, and the final dictionary’s value +for that key will be the last one given. + +A double asterisk "**" denotes *dictionary unpacking*. Its operand +must be a *mapping*. Each mapping item is added to the new +dictionary. Later values replace values already set by earlier dict +items and earlier dictionary unpackings. + +Added in version 3.5: Unpacking into dictionary displays, originally +proposed by **PEP 448**. + +A dict comprehension, in contrast to list and set comprehensions, +needs two expressions separated with a colon followed by the usual +“for” and “if” clauses. When the comprehension is run, the resulting +key and value elements are inserted in the new dictionary in the order +they are produced. + +Restrictions on the types of the key values are listed earlier in +section The standard type hierarchy. (To summarize, the key type +should be *hashable*, which excludes all mutable objects.) Clashes +between duplicate keys are not detected; the last value (textually +rightmost in the display) stored for a given key value prevails. + +Changed in version 3.8: Prior to Python 3.8, in dict comprehensions, +the evaluation order of key and value was not well-defined. In +CPython, the value was evaluated before the key. Starting with 3.8, +the key is evaluated before the value, as proposed by **PEP 572**. +''', + 'dynamic-features': r'''Interaction with dynamic features +********************************* + +Name resolution of free variables occurs at runtime, not at compile +time. This means that the following code will print 42: + + i = 10 + def f(): + print(i) + i = 42 + f() + +The "eval()" and "exec()" functions do not have access to the full +environment for resolving names. Names may be resolved in the local +and global namespaces of the caller. Free variables are not resolved +in the nearest enclosing namespace, but in the global namespace. [1] +The "exec()" and "eval()" functions have optional arguments to +override the global and local namespace. If only one namespace is +specified, it is used for both. +''', + 'else': r'''The "if" statement +****************** + +The "if" statement is used for conditional execution: + + if_stmt: "if" assignment_expression ":" suite + ("elif" assignment_expression ":" suite)* + ["else" ":" suite] + +It selects exactly one of the suites by evaluating the expressions one +by one until one is found to be true (see section Boolean operations +for the definition of true and false); then that suite is executed +(and no other part of the "if" statement is executed or evaluated). +If all expressions are false, the suite of the "else" clause, if +present, is executed. +''', + 'exceptions': r'''Exceptions +********** + +Exceptions are a means of breaking out of the normal flow of control +of a code block in order to handle errors or other exceptional +conditions. An exception is *raised* at the point where the error is +detected; it may be *handled* by the surrounding code block or by any +code block that directly or indirectly invoked the code block where +the error occurred. + +The Python interpreter raises an exception when it detects a run-time +error (such as division by zero). A Python program can also +explicitly raise an exception with the "raise" statement. Exception +handlers are specified with the "try" … "except" statement. The +"finally" clause of such a statement can be used to specify cleanup +code which does not handle the exception, but is executed whether an +exception occurred or not in the preceding code. + +Python uses the “termination” model of error handling: an exception +handler can find out what happened and continue execution at an outer +level, but it cannot repair the cause of the error and retry the +failing operation (except by re-entering the offending piece of code +from the top). + +When an exception is not handled at all, the interpreter terminates +execution of the program, or returns to its interactive main loop. In +either case, it prints a stack traceback, except when the exception is +"SystemExit". + +Exceptions are identified by class instances. The "except" clause is +selected depending on the class of the instance: it must reference the +class of the instance or a *non-virtual base class* thereof. The +instance can be received by the handler and can carry additional +information about the exceptional condition. + +Note: + + Exception messages are not part of the Python API. Their contents + may change from one version of Python to the next without warning + and should not be relied on by code which will run under multiple + versions of the interpreter. + +See also the description of the "try" statement in section The try +statement and "raise" statement in section The raise statement. +''', + 'execmodel': r'''Execution model +*************** + + +Structure of a program +====================== + +A Python program is constructed from code blocks. A *block* is a piece +of Python program text that is executed as a unit. The following are +blocks: a module, a function body, and a class definition. Each +command typed interactively is a block. A script file (a file given +as standard input to the interpreter or specified as a command line +argument to the interpreter) is a code block. A script command (a +command specified on the interpreter command line with the "-c" +option) is a code block. A module run as a top level script (as module +"__main__") from the command line using a "-m" argument is also a code +block. The string argument passed to the built-in functions "eval()" +and "exec()" is a code block. + +A code block is executed in an *execution frame*. A frame contains +some administrative information (used for debugging) and determines +where and how execution continues after the code block’s execution has +completed. + + +Naming and binding +================== + + +Binding of names +---------------- + +*Names* refer to objects. Names are introduced by name binding +operations. + +The following constructs bind names: + +* formal parameters to functions, + +* class definitions, + +* function definitions, + +* assignment expressions, + +* targets that are identifiers if occurring in an assignment: + + * "for" loop header, + + * after "as" in a "with" statement, "except" clause, "except*" + clause, or in the as-pattern in structural pattern matching, + + * in a capture pattern in structural pattern matching + +* "import" statements. + +* "type" statements. + +* type parameter lists. + +The "import" statement of the form "from ... import *" binds all names +defined in the imported module, except those beginning with an +underscore. This form may only be used at the module level. + +A target occurring in a "del" statement is also considered bound for +this purpose (though the actual semantics are to unbind the name). + +Each assignment or import statement occurs within a block defined by a +class or function definition or at the module level (the top-level +code block). + +If a name is bound in a block, it is a local variable of that block, +unless declared as "nonlocal" or "global". If a name is bound at the +module level, it is a global variable. (The variables of the module +code block are local and global.) If a variable is used in a code +block but not defined there, it is a *free variable*. + +Each occurrence of a name in the program text refers to the *binding* +of that name established by the following name resolution rules. + + +Resolution of names +------------------- + +A *scope* defines the visibility of a name within a block. If a local +variable is defined in a block, its scope includes that block. If the +definition occurs in a function block, the scope extends to any blocks +contained within the defining one, unless a contained block introduces +a different binding for the name. + +When a name is used in a code block, it is resolved using the nearest +enclosing scope. The set of all such scopes visible to a code block +is called the block’s *environment*. + +When a name is not found at all, a "NameError" exception is raised. If +the current scope is a function scope, and the name refers to a local +variable that has not yet been bound to a value at the point where the +name is used, an "UnboundLocalError" exception is raised. +"UnboundLocalError" is a subclass of "NameError". + +If a name binding operation occurs anywhere within a code block, all +uses of the name within the block are treated as references to the +current block. This can lead to errors when a name is used within a +block before it is bound. This rule is subtle. Python lacks +declarations and allows name binding operations to occur anywhere +within a code block. The local variables of a code block can be +determined by scanning the entire text of the block for name binding +operations. See the FAQ entry on UnboundLocalError for examples. + +If the "global" statement occurs within a block, all uses of the names +specified in the statement refer to the bindings of those names in the +top-level namespace. Names are resolved in the top-level namespace by +searching the global namespace, i.e. the namespace of the module +containing the code block, and the builtins namespace, the namespace +of the module "builtins". The global namespace is searched first. If +the names are not found there, the builtins namespace is searched +next. If the names are also not found in the builtins namespace, new +variables are created in the global namespace. The global statement +must precede all uses of the listed names. + +The "global" statement has the same scope as a name binding operation +in the same block. If the nearest enclosing scope for a free variable +contains a global statement, the free variable is treated as a global. + +The "nonlocal" statement causes corresponding names to refer to +previously bound variables in the nearest enclosing function scope. +"SyntaxError" is raised at compile time if the given name does not +exist in any enclosing function scope. Type parameters cannot be +rebound with the "nonlocal" statement. + +The namespace for a module is automatically created the first time a +module is imported. The main module for a script is always called +"__main__". + +Class definition blocks and arguments to "exec()" and "eval()" are +special in the context of name resolution. A class definition is an +executable statement that may use and define names. These references +follow the normal rules for name resolution with an exception that +unbound local variables are looked up in the global namespace. The +namespace of the class definition becomes the attribute dictionary of +the class. The scope of names defined in a class block is limited to +the class block; it does not extend to the code blocks of methods. +This includes comprehensions and generator expressions, but it does +not include annotation scopes, which have access to their enclosing +class scopes. This means that the following will fail: + + class A: + a = 42 + b = list(a + i for i in range(10)) + +However, the following will succeed: + + class A: + type Alias = Nested + class Nested: pass + + print(A.Alias.__value__) # + + +Annotation scopes +----------------- + +*Annotations*, type parameter lists and "type" statements introduce +*annotation scopes*, which behave mostly like function scopes, but +with some exceptions discussed below. + +Annotation scopes are used in the following contexts: + +* *Function annotations*. + +* *Variable annotations*. + +* Type parameter lists for generic type aliases. + +* Type parameter lists for generic functions. A generic function’s + annotations are executed within the annotation scope, but its + defaults and decorators are not. + +* Type parameter lists for generic classes. A generic class’s base + classes and keyword arguments are executed within the annotation + scope, but its decorators are not. + +* The bounds, constraints, and default values for type parameters + (lazily evaluated). + +* The value of type aliases (lazily evaluated). + +Annotation scopes differ from function scopes in the following ways: + +* Annotation scopes have access to their enclosing class namespace. If + an annotation scope is immediately within a class scope, or within + another annotation scope that is immediately within a class scope, + the code in the annotation scope can use names defined in the class + scope as if it were executed directly within the class body. This + contrasts with regular functions defined within classes, which + cannot access names defined in the class scope. + +* Expressions in annotation scopes cannot contain "yield", "yield + from", "await", or ":=" expressions. (These expressions are allowed + in other scopes contained within the annotation scope.) + +* Names defined in annotation scopes cannot be rebound with "nonlocal" + statements in inner scopes. This includes only type parameters, as + no other syntactic elements that can appear within annotation scopes + can introduce new names. + +* While annotation scopes have an internal name, that name is not + reflected in the *qualified name* of objects defined within the + scope. Instead, the "__qualname__" of such objects is as if the + object were defined in the enclosing scope. + +Added in version 3.12: Annotation scopes were introduced in Python +3.12 as part of **PEP 695**. + +Changed in version 3.13: Annotation scopes are also used for type +parameter defaults, as introduced by **PEP 696**. + +Changed in version 3.14: Annotation scopes are now also used for +annotations, as specified in **PEP 649** and **PEP 749**. + + +Lazy evaluation +--------------- + +Most annotation scopes are *lazily evaluated*. This includes +annotations, the values of type aliases created through the "type" +statement, and the bounds, constraints, and default values of type +variables created through the type parameter syntax. This means that +they are not evaluated when the type alias or type variable is +created, or when the object carrying annotations is created. Instead, +they are only evaluated when necessary, for example when the +"__value__" attribute on a type alias is accessed. + +Example: + + >>> type Alias = 1/0 + >>> Alias.__value__ + Traceback (most recent call last): + ... + ZeroDivisionError: division by zero + >>> def func[T: 1/0](): pass + >>> T = func.__type_params__[0] + >>> T.__bound__ + Traceback (most recent call last): + ... + ZeroDivisionError: division by zero + +Here the exception is raised only when the "__value__" attribute of +the type alias or the "__bound__" attribute of the type variable is +accessed. + +This behavior is primarily useful for references to types that have +not yet been defined when the type alias or type variable is created. +For example, lazy evaluation enables creation of mutually recursive +type aliases: + + from typing import Literal + + type SimpleExpr = int | Parenthesized + type Parenthesized = tuple[Literal["("], Expr, Literal[")"]] + type Expr = SimpleExpr | tuple[SimpleExpr, Literal["+", "-"], Expr] + +Lazily evaluated values are evaluated in annotation scope, which means +that names that appear inside the lazily evaluated value are looked up +as if they were used in the immediately enclosing scope. + +Added in version 3.12. + + +Builtins and restricted execution +--------------------------------- + +**CPython implementation detail:** Users should not touch +"__builtins__"; it is strictly an implementation detail. Users +wanting to override values in the builtins namespace should "import" +the "builtins" module and modify its attributes appropriately. + +The builtins namespace associated with the execution of a code block +is actually found by looking up the name "__builtins__" in its global +namespace; this should be a dictionary or a module (in the latter case +the module’s dictionary is used). By default, when in the "__main__" +module, "__builtins__" is the built-in module "builtins"; when in any +other module, "__builtins__" is an alias for the dictionary of the +"builtins" module itself. + + +Interaction with dynamic features +--------------------------------- + +Name resolution of free variables occurs at runtime, not at compile +time. This means that the following code will print 42: + + i = 10 + def f(): + print(i) + i = 42 + f() + +The "eval()" and "exec()" functions do not have access to the full +environment for resolving names. Names may be resolved in the local +and global namespaces of the caller. Free variables are not resolved +in the nearest enclosing namespace, but in the global namespace. [1] +The "exec()" and "eval()" functions have optional arguments to +override the global and local namespace. If only one namespace is +specified, it is used for both. + + +Exceptions +========== + +Exceptions are a means of breaking out of the normal flow of control +of a code block in order to handle errors or other exceptional +conditions. An exception is *raised* at the point where the error is +detected; it may be *handled* by the surrounding code block or by any +code block that directly or indirectly invoked the code block where +the error occurred. + +The Python interpreter raises an exception when it detects a run-time +error (such as division by zero). A Python program can also +explicitly raise an exception with the "raise" statement. Exception +handlers are specified with the "try" … "except" statement. The +"finally" clause of such a statement can be used to specify cleanup +code which does not handle the exception, but is executed whether an +exception occurred or not in the preceding code. + +Python uses the “termination” model of error handling: an exception +handler can find out what happened and continue execution at an outer +level, but it cannot repair the cause of the error and retry the +failing operation (except by re-entering the offending piece of code +from the top). + +When an exception is not handled at all, the interpreter terminates +execution of the program, or returns to its interactive main loop. In +either case, it prints a stack traceback, except when the exception is +"SystemExit". + +Exceptions are identified by class instances. The "except" clause is +selected depending on the class of the instance: it must reference the +class of the instance or a *non-virtual base class* thereof. The +instance can be received by the handler and can carry additional +information about the exceptional condition. + +Note: + + Exception messages are not part of the Python API. Their contents + may change from one version of Python to the next without warning + and should not be relied on by code which will run under multiple + versions of the interpreter. + +See also the description of the "try" statement in section The try +statement and "raise" statement in section The raise statement. + + +Runtime Components +================== + + +General Computing Model +----------------------- + +Python’s execution model does not operate in a vacuum. It runs on a +host machine and through that host’s runtime environment, including +its operating system (OS), if there is one. When a program runs, the +conceptual layers of how it runs on the host look something like this: + + **host machine** + **process** (global resources) + **thread** (runs machine code) + +Each process represents a program running on the host. Think of each +process itself as the data part of its program. Think of the process’ +threads as the execution part of the program. This distinction will +be important to understand the conceptual Python runtime. + +The process, as the data part, is the execution context in which the +program runs. It mostly consists of the set of resources assigned to +the program by the host, including memory, signals, file handles, +sockets, and environment variables. + +Processes are isolated and independent from one another. (The same is +true for hosts.) The host manages the process’ access to its assigned +resources, in addition to coordinating between processes. + +Each thread represents the actual execution of the program’s machine +code, running relative to the resources assigned to the program’s +process. It’s strictly up to the host how and when that execution +takes place. + +From the point of view of Python, a program always starts with exactly +one thread. However, the program may grow to run in multiple +simultaneous threads. Not all hosts support multiple threads per +process, but most do. Unlike processes, threads in a process are not +isolated and independent from one another. Specifically, all threads +in a process share all of the process’ resources. + +The fundamental point of threads is that each one does *run* +independently, at the same time as the others. That may be only +conceptually at the same time (“concurrently”) or physically (“in +parallel”). Either way, the threads effectively run at a non- +synchronized rate. + +Note: + + That non-synchronized rate means none of the process’ memory is + guaranteed to stay consistent for the code running in any given + thread. Thus multi-threaded programs must take care to coordinate + access to intentionally shared resources. Likewise, they must take + care to be absolutely diligent about not accessing any *other* + resources in multiple threads; otherwise two threads running at the + same time might accidentally interfere with each other’s use of some + shared data. All this is true for both Python programs and the + Python runtime.The cost of this broad, unstructured requirement is + the tradeoff for the kind of raw concurrency that threads provide. + The alternative to the required discipline generally means dealing + with non-deterministic bugs and data corruption. + + +Python Runtime Model +-------------------- + +The same conceptual layers apply to each Python program, with some +extra data layers specific to Python: + + **host machine** + **process** (global resources) + Python global runtime (*state*) + Python interpreter (*state*) + **thread** (runs Python bytecode and “C-API”) + Python thread *state* + +At the conceptual level: when a Python program starts, it looks +exactly like that diagram, with one of each. The runtime may grow to +include multiple interpreters, and each interpreter may grow to +include multiple thread states. + +Note: + + A Python implementation won’t necessarily implement the runtime + layers distinctly or even concretely. The only exception is places + where distinct layers are directly specified or exposed to users, + like through the "threading" module. + +Note: + + The initial interpreter is typically called the “main” interpreter. + Some Python implementations, like CPython, assign special roles to + the main interpreter.Likewise, the host thread where the runtime was + initialized is known as the “main” thread. It may be different from + the process’ initial thread, though they are often the same. In + some cases “main thread” may be even more specific and refer to the + initial thread state. A Python runtime might assign specific + responsibilities to the main thread, such as handling signals. + +As a whole, the Python runtime consists of the global runtime state, +interpreters, and thread states. The runtime ensures all that state +stays consistent over its lifetime, particularly when used with +multiple host threads. + +The global runtime, at the conceptual level, is just a set of +interpreters. While those interpreters are otherwise isolated and +independent from one another, they may share some data or other +resources. The runtime is responsible for managing these global +resources safely. The actual nature and management of these resources +is implementation-specific. Ultimately, the external utility of the +global runtime is limited to managing interpreters. + +In contrast, an “interpreter” is conceptually what we would normally +think of as the (full-featured) “Python runtime”. When machine code +executing in a host thread interacts with the Python runtime, it calls +into Python in the context of a specific interpreter. + +Note: + + The term “interpreter” here is not the same as the “bytecode + interpreter”, which is what regularly runs in threads, executing + compiled Python code.In an ideal world, “Python runtime” would refer + to what we currently call “interpreter”. However, it’s been called + “interpreter” at least since introduced in 1997 (CPython:a027efa5b). + +Each interpreter completely encapsulates all of the non-process- +global, non-thread-specific state needed for the Python runtime to +work. Notably, the interpreter’s state persists between uses. It +includes fundamental data like "sys.modules". The runtime ensures +multiple threads using the same interpreter will safely share it +between them. + +A Python implementation may support using multiple interpreters at the +same time in the same process. They are independent and isolated from +one another. For example, each interpreter has its own "sys.modules". + +For thread-specific runtime state, each interpreter has a set of +thread states, which it manages, in the same way the global runtime +contains a set of interpreters. It can have thread states for as many +host threads as it needs. It may even have multiple thread states for +the same host thread, though that isn’t as common. + +Each thread state, conceptually, has all the thread-specific runtime +data an interpreter needs to operate in one host thread. The thread +state includes the current raised exception and the thread’s Python +call stack. It may include other thread-specific resources. + +Note: + + The term “Python thread” can sometimes refer to a thread state, but + normally it means a thread created using the "threading" module. + +Each thread state, over its lifetime, is always tied to exactly one +interpreter and exactly one host thread. It will only ever be used in +that thread and with that interpreter. + +Multiple thread states may be tied to the same host thread, whether +for different interpreters or even the same interpreter. However, for +any given host thread, only one of the thread states tied to it can be +used by the thread at a time. + +Thread states are isolated and independent from one another and don’t +share any data, except for possibly sharing an interpreter and objects +or other resources belonging to that interpreter. + +Once a program is running, new Python threads can be created using the +"threading" module (on platforms and Python implementations that +support threads). Additional processes can be created using the "os", +"subprocess", and "multiprocessing" modules. Interpreters can be +created and used with the "interpreters" module. Coroutines (async) +can be run using "asyncio" in each interpreter, typically only in a +single thread (often the main thread). + +-[ Footnotes ]- + +[1] This limitation occurs because the code that is executed by these + operations is not available at the time the module is compiled. +''', + 'exprlists': r'''Expression lists +**************** + + starred_expression: "*" or_expr | expression + flexible_expression: assignment_expression | starred_expression + flexible_expression_list: flexible_expression ("," flexible_expression)* [","] + starred_expression_list: starred_expression ("," starred_expression)* [","] + expression_list: expression ("," expression)* [","] + yield_list: expression_list | starred_expression "," [starred_expression_list] + +Except when part of a list or set display, an expression list +containing at least one comma yields a tuple. The length of the tuple +is the number of expressions in the list. The expressions are +evaluated from left to right. + +An asterisk "*" denotes *iterable unpacking*. Its operand must be an +*iterable*. The iterable is expanded into a sequence of items, which +are included in the new tuple, list, or set, at the site of the +unpacking. + +Added in version 3.5: Iterable unpacking in expression lists, +originally proposed by **PEP 448**. + +Added in version 3.11: Any item in an expression list may be starred. +See **PEP 646**. + +A trailing comma is required only to create a one-item tuple, such as +"1,"; it is optional in all other cases. A single expression without a +trailing comma doesn’t create a tuple, but rather yields the value of +that expression. (To create an empty tuple, use an empty pair of +parentheses: "()".) +''', + 'floating': r'''Floating-point literals +*********************** + +Floating-point (float) literals, such as "3.14" or "1.5", denote +approximations of real numbers. + +They consist of *integer* and *fraction* parts, each composed of +decimal digits. The parts are separated by a decimal point, ".": + + 2.71828 + 4.0 + +Unlike in integer literals, leading zeros are allowed. For example, +"077.010" is legal, and denotes the same number as "77.01". + +As in integer literals, single underscores may occur between digits to +help readability: + + 96_485.332_123 + 3.14_15_93 + +Either of these parts, but not both, can be empty. For example: + + 10. # (equivalent to 10.0) + .001 # (equivalent to 0.001) + +Optionally, the integer and fraction may be followed by an *exponent*: +the letter "e" or "E", followed by an optional sign, "+" or "-", and a +number in the same format as the integer and fraction parts. The "e" +or "E" represents “times ten raised to the power of”: + + 1.0e3 # (represents 1.0×10³, or 1000.0) + 1.166e-5 # (represents 1.166×10⁻⁵, or 0.00001166) + 6.02214076e+23 # (represents 6.02214076×10²³, or 602214076000000000000000.) + +In floats with only integer and exponent parts, the decimal point may +be omitted: + + 1e3 # (equivalent to 1.e3 and 1.0e3) + 0e0 # (equivalent to 0.) + +Formally, floating-point literals are described by the following +lexical definitions: + + floatnumber: + | digitpart "." [digitpart] [exponent] + | "." digitpart [exponent] + | digitpart exponent + digitpart: digit (["_"] digit)* + exponent: ("e" | "E") ["+" | "-"] digitpart + +Changed in version 3.6: Underscores are now allowed for grouping +purposes in literals. +''', + 'for': r'''The "for" statement +******************* + +The "for" statement is used to iterate over the elements of a sequence +(such as a string, tuple or list) or other iterable object: + + for_stmt: "for" target_list "in" starred_expression_list ":" suite + ["else" ":" suite] + +The "starred_expression_list" expression is evaluated once; it should +yield an *iterable* object. An *iterator* is created for that +iterable. The first item provided by the iterator is then assigned to +the target list using the standard rules for assignments (see +Assignment statements), and the suite is executed. This repeats for +each item provided by the iterator. When the iterator is exhausted, +the suite in the "else" clause, if present, is executed, and the loop +terminates. + +A "break" statement executed in the first suite terminates the loop +without executing the "else" clause’s suite. A "continue" statement +executed in the first suite skips the rest of the suite and continues +with the next item, or with the "else" clause if there is no next +item. + +The for-loop makes assignments to the variables in the target list. +This overwrites all previous assignments to those variables including +those made in the suite of the for-loop: + + for i in range(10): + print(i) + i = 5 # this will not affect the for-loop + # because i will be overwritten with the next + # index in the range + +Names in the target list are not deleted when the loop is finished, +but if the sequence is empty, they will not have been assigned to at +all by the loop. Hint: the built-in type "range()" represents +immutable arithmetic sequences of integers. For instance, iterating +"range(3)" successively yields 0, 1, and then 2. + +Changed in version 3.11: Starred elements are now allowed in the +expression list. +''', + 'formatstrings': r'''Format string syntax +******************** + +The "str.format()" method and the "Formatter" class share the same +syntax for format strings (although in the case of "Formatter", +subclasses can define their own format string syntax). The syntax is +related to that of formatted string literals and template string +literals, but it is less sophisticated and, in particular, does not +support arbitrary expressions in interpolations. + +Format strings contain “replacement fields” surrounded by curly braces +"{}". Anything that is not contained in braces is considered literal +text, which is copied unchanged to the output. If you need to include +a brace character in the literal text, it can be escaped by doubling: +"{{" and "}}". + +The grammar for a replacement field is as follows: + + replacement_field: "{" [field_name] ["!" conversion] [":" format_spec] "}" + field_name: arg_name ("." attribute_name | "[" element_index "]")* + arg_name: [identifier | digit+] + attribute_name: identifier + element_index: digit+ | index_string + index_string: + + conversion: "r" | "s" | "a" + format_spec: format-spec:format_spec + +In less formal terms, the replacement field can start with a +*field_name* that specifies the object whose value is to be formatted +and inserted into the output instead of the replacement field. The +*field_name* is optionally followed by a *conversion* field, which is +preceded by an exclamation point "'!'", and a *format_spec*, which is +preceded by a colon "':'". These specify a non-default format for the +replacement value. + +See also the Format specification mini-language section. + +The *field_name* itself begins with an *arg_name* that is either a +number or a keyword. If it’s a number, it refers to a positional +argument, and if it’s a keyword, it refers to a named keyword +argument. An *arg_name* is treated as a number if a call to +"str.isdecimal()" on the string would return true. If the numerical +arg_names in a format string are 0, 1, 2, … in sequence, they can all +be omitted (not just some) and the numbers 0, 1, 2, … will be +automatically inserted in that order. Because *arg_name* is not quote- +delimited, it is not possible to specify arbitrary dictionary keys +(e.g., the strings "'10'" or "':-]'") within a format string. The +*arg_name* can be followed by any number of index or attribute +expressions. An expression of the form "'.name'" selects the named +attribute using "getattr()", while an expression of the form +"'[index]'" does an index lookup using "__getitem__()". + +Changed in version 3.1: The positional argument specifiers can be +omitted for "str.format()", so "'{} {}'.format(a, b)" is equivalent to +"'{0} {1}'.format(a, b)". + +Changed in version 3.4: The positional argument specifiers can be +omitted for "Formatter". + +Some simple format string examples: + + "First, thou shalt count to {0}" # References first positional argument + "Bring me a {}" # Implicitly references the first positional argument + "From {} to {}" # Same as "From {0} to {1}" + "My quest is {name}" # References keyword argument 'name' + "Weight in tons {0.weight}" # 'weight' attribute of first positional arg + "Units destroyed: {players[0]}" # First element of keyword argument 'players'. + +The *conversion* field causes a type coercion before formatting. +Normally, the job of formatting a value is done by the "__format__()" +method of the value itself. However, in some cases it is desirable to +force a type to be formatted as a string, overriding its own +definition of formatting. By converting the value to a string before +calling "__format__()", the normal formatting logic is bypassed. + +Three conversion flags are currently supported: "'!s'" which calls +"str()" on the value, "'!r'" which calls "repr()" and "'!a'" which +calls "ascii()". + +Some examples: + + "Harold's a clever {0!s}" # Calls str() on the argument first + "Bring out the holy {name!r}" # Calls repr() on the argument first + "More {!a}" # Calls ascii() on the argument first + +The *format_spec* field contains a specification of how the value +should be presented, including such details as field width, alignment, +padding, decimal precision and so on. Each value type can define its +own “formatting mini-language” or interpretation of the *format_spec*. + +Most built-in types support a common formatting mini-language, which +is described in the next section. + +A *format_spec* field can also include nested replacement fields +within it. These nested replacement fields may contain a field name, +conversion flag and format specification, but deeper nesting is not +allowed. The replacement fields within the format_spec are +substituted before the *format_spec* string is interpreted. This +allows the formatting of a value to be dynamically specified. + +See the Format examples section for some examples. + + +Format specification mini-language +================================== + +“Format specifications” are used within replacement fields contained +within a format string to define how individual values are presented +(see Format string syntax, f-strings, and t-strings). They can also be +passed directly to the built-in "format()" function. Each formattable +type may define how the format specification is to be interpreted. + +Most built-in types implement the following options for format +specifications, although some of the formatting options are only +supported by the numeric types. + +A general convention is that an empty format specification produces +the same result as if you had called "str()" on the value. A non-empty +format specification typically modifies the result. + +The general form of a *standard format specifier* is: + + format_spec: [options][width_and_precision][type] + options: [[fill]align][sign]["z"]["#"]["0"] + fill: + align: "<" | ">" | "=" | "^" + sign: "+" | "-" | " " + width_and_precision: [width_with_grouping][precision_with_grouping] + width_with_grouping: [width][grouping] + precision_with_grouping: "." [precision][grouping] | "." grouping + width: digit+ + precision: digit+ + grouping: "," | "_" + type: "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" + | "G" | "n" | "o" | "s" | "x" | "X" | "%" + +If a valid *align* value is specified, it can be preceded by a *fill* +character that can be any character and defaults to a space if +omitted. It is not possible to use a literal curly brace (”"{"” or +“"}"”) as the *fill* character in a formatted string literal or when +using the "str.format()" method. However, it is possible to insert a +curly brace with a nested replacement field. This limitation doesn’t +affect the "format()" function. + +The meaning of the various alignment options is as follows: + ++-----------+------------------------------------------------------------+ +| Option | Meaning | +|===========|============================================================| +| "'<'" | Forces the field to be left-aligned within the available | +| | space (this is the default for most objects). | ++-----------+------------------------------------------------------------+ +| "'>'" | Forces the field to be right-aligned within the available | +| | space (this is the default for numbers). | ++-----------+------------------------------------------------------------+ +| "'='" | Forces the padding to be placed after the sign (if any) | +| | but before the digits. This is used for printing fields | +| | in the form ‘+000000120’. This alignment option is only | +| | valid for numeric types, excluding "complex". It becomes | +| | the default for numbers when ‘0’ immediately precedes the | +| | field width. | ++-----------+------------------------------------------------------------+ +| "'^'" | Forces the field to be centered within the available | +| | space. | ++-----------+------------------------------------------------------------+ + +Note that unless a minimum field width is defined, the field width +will always be the same size as the data to fill it, so that the +alignment option has no meaning in this case. + +The *sign* option is only valid for number types, and can be one of +the following: + ++-----------+------------------------------------------------------------+ +| Option | Meaning | +|===========|============================================================| +| "'+'" | Indicates that a sign should be used for both positive as | +| | well as negative numbers. | ++-----------+------------------------------------------------------------+ +| "'-'" | Indicates that a sign should be used only for negative | +| | numbers (this is the default behavior). | ++-----------+------------------------------------------------------------+ +| space | Indicates that a leading space should be used on positive | +| | numbers, and a minus sign on negative numbers. | ++-----------+------------------------------------------------------------+ + +The "'z'" option coerces negative zero floating-point values to +positive zero after rounding to the format precision. This option is +only valid for floating-point presentation types. + +Changed in version 3.11: Added the "'z'" option (see also **PEP +682**). + +The "'#'" option causes the “alternate form” to be used for the +conversion. The alternate form is defined differently for different +types. This option is only valid for integer, float and complex +types. For integers, when binary, octal, or hexadecimal output is +used, this option adds the respective prefix "'0b'", "'0o'", "'0x'", +or "'0X'" to the output value. For float and complex the alternate +form causes the result of the conversion to always contain a decimal- +point character, even if no digits follow it. Normally, a decimal- +point character appears in the result of these conversions only if a +digit follows it. In addition, for "'g'" and "'G'" conversions, +trailing zeros are not removed from the result. + +The *width* is a decimal integer defining the minimum total field +width, including any prefixes, separators, and other formatting +characters. If not specified, then the field width will be determined +by the content. + +When no explicit alignment is given, preceding the *width* field by a +zero ("'0'") character enables sign-aware zero-padding for numeric +types, excluding "complex". This is equivalent to a *fill* character +of "'0'" with an *alignment* type of "'='". + +Changed in version 3.10: Preceding the *width* field by "'0'" no +longer affects the default alignment for strings. + +The *precision* is a decimal integer indicating how many digits should +be displayed after the decimal point for presentation types "'f'" and +"'F'", or before and after the decimal point for presentation types +"'g'" or "'G'". For string presentation types the field indicates the +maximum field size - in other words, how many characters will be used +from the field content. The *precision* is not allowed for integer +presentation types. + +The *grouping* option after *width* and *precision* fields specifies a +digit group separator for the integral and fractional parts of a +number respectively. It can be one of the following: + ++-----------+------------------------------------------------------------+ +| Option | Meaning | +|===========|============================================================| +| "','" | Inserts a comma every 3 digits for integer presentation | +| | type "'d'" and floating-point presentation types, | +| | excluding "'n'". For other presentation types, this option | +| | is not supported. | ++-----------+------------------------------------------------------------+ +| "'_'" | Inserts an underscore every 3 digits for integer | +| | presentation type "'d'" and floating-point presentation | +| | types, excluding "'n'". For integer presentation types | +| | "'b'", "'o'", "'x'", and "'X'", underscores are inserted | +| | every 4 digits. For other presentation types, this option | +| | is not supported. | ++-----------+------------------------------------------------------------+ + +For a locale aware separator, use the "'n'" presentation type instead. + +Changed in version 3.1: Added the "','" option (see also **PEP 378**). + +Changed in version 3.6: Added the "'_'" option (see also **PEP 515**). + +Changed in version 3.14: Support the *grouping* option for the +fractional part. + +Finally, the *type* determines how the data should be presented. + +The available string presentation types are: + + +-----------+------------------------------------------------------------+ + | Type | Meaning | + |===========|============================================================| + | "'s'" | String format. This is the default type for strings and | + | | may be omitted. | + +-----------+------------------------------------------------------------+ + | None | The same as "'s'". | + +-----------+------------------------------------------------------------+ + +The available integer presentation types are: + + +-----------+------------------------------------------------------------+ + | Type | Meaning | + |===========|============================================================| + | "'b'" | Binary format. Outputs the number in base 2. | + +-----------+------------------------------------------------------------+ + | "'c'" | Character. Converts the integer to the corresponding | + | | unicode character before printing. | + +-----------+------------------------------------------------------------+ + | "'d'" | Decimal Integer. Outputs the number in base 10. | + +-----------+------------------------------------------------------------+ + | "'o'" | Octal format. Outputs the number in base 8. | + +-----------+------------------------------------------------------------+ + | "'x'" | Hex format. Outputs the number in base 16, using lower- | + | | case letters for the digits above 9. | + +-----------+------------------------------------------------------------+ + | "'X'" | Hex format. Outputs the number in base 16, using upper- | + | | case letters for the digits above 9. In case "'#'" is | + | | specified, the prefix "'0x'" will be upper-cased to "'0X'" | + | | as well. | + +-----------+------------------------------------------------------------+ + | "'n'" | Number. This is the same as "'d'", except that it uses the | + | | current locale setting to insert the appropriate digit | + | | group separators. | + +-----------+------------------------------------------------------------+ + | None | The same as "'d'". | + +-----------+------------------------------------------------------------+ + +In addition to the above presentation types, integers can be formatted +with the floating-point presentation types listed below (except "'n'" +and "None"). When doing so, "float()" is used to convert the integer +to a floating-point number before formatting. + +The available presentation types for "float" and "Decimal" values are: + + +-----------+------------------------------------------------------------+ + | Type | Meaning | + |===========|============================================================| + | "'e'" | Scientific notation. For a given precision "p", formats | + | | the number in scientific notation with the letter ‘e’ | + | | separating the coefficient from the exponent. The | + | | coefficient has one digit before and "p" digits after the | + | | decimal point, for a total of "p + 1" significant digits. | + | | With no precision given, uses a precision of "6" digits | + | | after the decimal point for "float", and shows all | + | | coefficient digits for "Decimal". If "p=0", the decimal | + | | point is omitted unless the "#" option is used. For | + | | "float", the exponent always contains at least two digits, | + | | and is zero if the value is zero. | + +-----------+------------------------------------------------------------+ + | "'E'" | Scientific notation. Same as "'e'" except it uses an upper | + | | case ‘E’ as the separator character. | + +-----------+------------------------------------------------------------+ + | "'f'" | Fixed-point notation. For a given precision "p", formats | + | | the number as a decimal number with exactly "p" digits | + | | following the decimal point. With no precision given, uses | + | | a precision of "6" digits after the decimal point for | + | | "float", and uses a precision large enough to show all | + | | coefficient digits for "Decimal". If "p=0", the decimal | + | | point is omitted unless the "#" option is used. | + +-----------+------------------------------------------------------------+ + | "'F'" | Fixed-point notation. Same as "'f'", but converts "nan" to | + | | "NAN" and "inf" to "INF". | + +-----------+------------------------------------------------------------+ + | "'g'" | General format. For a given precision "p >= 1", this | + | | rounds the number to "p" significant digits and then | + | | formats the result in either fixed-point format or in | + | | scientific notation, depending on its magnitude. A | + | | precision of "0" is treated as equivalent to a precision | + | | of "1". The precise rules are as follows: suppose that | + | | the result formatted with presentation type "'e'" and | + | | precision "p-1" would have exponent "exp". Then, if "m <= | + | | exp < p", where "m" is -4 for floats and -6 for | + | | "Decimals", the number is formatted with presentation type | + | | "'f'" and precision "p-1-exp". Otherwise, the number is | + | | formatted with presentation type "'e'" and precision | + | | "p-1". In both cases insignificant trailing zeros are | + | | removed from the significand, and the decimal point is | + | | also removed if there are no remaining digits following | + | | it, unless the "'#'" option is used. With no precision | + | | given, uses a precision of "6" significant digits for | + | | "float". For "Decimal", the coefficient of the result is | + | | formed from the coefficient digits of the value; | + | | scientific notation is used for values smaller than "1e-6" | + | | in absolute value and values where the place value of the | + | | least significant digit is larger than 1, and fixed-point | + | | notation is used otherwise. Positive and negative | + | | infinity, positive and negative zero, and nans, are | + | | formatted as "inf", "-inf", "0", "-0" and "nan" | + | | respectively, regardless of the precision. | + +-----------+------------------------------------------------------------+ + | "'G'" | General format. Same as "'g'" except switches to "'E'" if | + | | the number gets too large. The representations of infinity | + | | and NaN are uppercased, too. | + +-----------+------------------------------------------------------------+ + | "'n'" | Number. This is the same as "'g'", except that it uses the | + | | current locale setting to insert the appropriate digit | + | | group separators for the integral part of a number. | + +-----------+------------------------------------------------------------+ + | "'%'" | Percentage. Multiplies the number by 100 and displays in | + | | fixed ("'f'") format, followed by a percent sign. | + +-----------+------------------------------------------------------------+ + | None | For "float" this is like the "'g'" type, except that when | + | | fixed- point notation is used to format the result, it | + | | always includes at least one digit past the decimal point, | + | | and switches to the scientific notation when "exp >= p - | + | | 1". When the precision is not specified, the latter will | + | | be as large as needed to represent the given value | + | | faithfully. For "Decimal", this is the same as either | + | | "'g'" or "'G'" depending on the value of | + | | "context.capitals" for the current decimal context. The | + | | overall effect is to match the output of "str()" as | + | | altered by the other format modifiers. | + +-----------+------------------------------------------------------------+ + +The result should be correctly rounded to a given precision "p" of +digits after the decimal point. The rounding mode for "float" matches +that of the "round()" builtin. For "Decimal", the rounding mode of +the current context will be used. + +The available presentation types for "complex" are the same as those +for "float" ("'%'" is not allowed). Both the real and imaginary +components of a complex number are formatted as floating-point +numbers, according to the specified presentation type. They are +separated by the mandatory sign of the imaginary part, the latter +being terminated by a "j" suffix. If the presentation type is +missing, the result will match the output of "str()" (complex numbers +with a non-zero real part are also surrounded by parentheses), +possibly altered by other format modifiers. + + +Format examples +=============== + +This section contains examples of the "str.format()" syntax and +comparison with the old "%"-formatting. + +In most of the cases the syntax is similar to the old "%"-formatting, +with the addition of the "{}" and with ":" used instead of "%". For +example, "'%03.2f'" can be translated to "'{:03.2f}'". + +The new format syntax also supports new and different options, shown +in the following examples. + +Accessing arguments by position: + + >>> '{0}, {1}, {2}'.format('a', 'b', 'c') + 'a, b, c' + >>> '{}, {}, {}'.format('a', 'b', 'c') # 3.1+ only + 'a, b, c' + >>> '{2}, {1}, {0}'.format('a', 'b', 'c') + 'c, b, a' + >>> '{2}, {1}, {0}'.format(*'abc') # unpacking argument sequence + 'c, b, a' + >>> '{0}{1}{0}'.format('abra', 'cad') # arguments' indices can be repeated + 'abracadabra' + +Accessing arguments by name: + + >>> 'Coordinates: {latitude}, {longitude}'.format(latitude='37.24N', longitude='-115.81W') + 'Coordinates: 37.24N, -115.81W' + >>> coord = {'latitude': '37.24N', 'longitude': '-115.81W'} + >>> 'Coordinates: {latitude}, {longitude}'.format(**coord) + 'Coordinates: 37.24N, -115.81W' + +Accessing arguments’ attributes: + + >>> c = 3-5j + >>> ('The complex number {0} is formed from the real part {0.real} ' + ... 'and the imaginary part {0.imag}.').format(c) + 'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.' + >>> class Point: + ... def __init__(self, x, y): + ... self.x, self.y = x, y + ... def __str__(self): + ... return 'Point({self.x}, {self.y})'.format(self=self) + ... + >>> str(Point(4, 2)) + 'Point(4, 2)' + +Accessing arguments’ items: + + >>> coord = (3, 5) + >>> 'X: {0[0]}; Y: {0[1]}'.format(coord) + 'X: 3; Y: 5' + +Replacing "%s" and "%r": + + >>> "repr() shows quotes: {!r}; str() doesn't: {!s}".format('test1', 'test2') + "repr() shows quotes: 'test1'; str() doesn't: test2" + +Aligning the text and specifying a width: + + >>> '{:<30}'.format('left aligned') + 'left aligned ' + >>> '{:>30}'.format('right aligned') + ' right aligned' + >>> '{:^30}'.format('centered') + ' centered ' + >>> '{:*^30}'.format('centered') # use '*' as a fill char + '***********centered***********' + +Replacing "%+f", "%-f", and "% f" and specifying a sign: + + >>> '{:+f}; {:+f}'.format(3.14, -3.14) # show it always + '+3.140000; -3.140000' + >>> '{: f}; {: f}'.format(3.14, -3.14) # show a space for positive numbers + ' 3.140000; -3.140000' + >>> '{:-f}; {:-f}'.format(3.14, -3.14) # show only the minus -- same as '{:f}; {:f}' + '3.140000; -3.140000' + +Replacing "%x" and "%o" and converting the value to different bases: + + >>> # format also supports binary numbers + >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42) + 'int: 42; hex: 2a; oct: 52; bin: 101010' + >>> # with 0x, 0o, or 0b as prefix: + >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42) + 'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010' + +Using the comma or the underscore as a digit group separator: + + >>> '{:,}'.format(1234567890) + '1,234,567,890' + >>> '{:_}'.format(1234567890) + '1_234_567_890' + >>> '{:_b}'.format(1234567890) + '100_1001_1001_0110_0000_0010_1101_0010' + >>> '{:_x}'.format(1234567890) + '4996_02d2' + >>> '{:_}'.format(123456789.123456789) + '123_456_789.12345679' + >>> '{:.,}'.format(123456789.123456789) + '123456789.123,456,79' + >>> '{:,._}'.format(123456789.123456789) + '123,456,789.123_456_79' + +Expressing a percentage: + + >>> points = 19 + >>> total = 22 + >>> 'Correct answers: {:.2%}'.format(points/total) + 'Correct answers: 86.36%' + +Using type-specific formatting: + + >>> import datetime as dt + >>> d = dt.datetime(2010, 7, 4, 12, 15, 58) + >>> '{:%Y-%m-%d %H:%M:%S}'.format(d) + '2010-07-04 12:15:58' + +Nesting arguments and more complex examples: + + >>> for align, text in zip('<^>', ['left', 'center', 'right']): + ... '{0:{fill}{align}16}'.format(text, fill=align, align=align) + ... + 'left<<<<<<<<<<<<' + '^^^^^center^^^^^' + '>>>>>>>>>>>right' + >>> + >>> octets = [192, 168, 0, 1] + >>> '{:02X}{:02X}{:02X}{:02X}'.format(*octets) + 'C0A80001' + >>> int(_, 16) + 3232235521 + >>> + >>> width = 5 + >>> for num in range(5,12): + ... for base in 'dXob': + ... print('{0:{width}{base}}'.format(num, base=base, width=width), end=' ') + ... print() + ... + 5 5 5 101 + 6 6 6 110 + 7 7 7 111 + 8 8 10 1000 + 9 9 11 1001 + 10 A 12 1010 + 11 B 13 1011 +''', + 'function': r'''Function definitions +******************** + +A function definition defines a user-defined function object (see +section The standard type hierarchy): + + funcdef: [decorators] "def" funcname [type_params] "(" [parameter_list] ")" + ["->" expression] ":" suite + decorators: decorator+ + decorator: "@" assignment_expression NEWLINE + parameter_list: defparameter ("," defparameter)* "," "/" ["," [parameter_list_no_posonly]] + | parameter_list_no_posonly + parameter_list_no_posonly: defparameter ("," defparameter)* ["," [parameter_list_starargs]] + | parameter_list_starargs + parameter_list_starargs: "*" [star_parameter] ("," defparameter)* ["," [parameter_star_kwargs]] + | "*" ("," defparameter)+ ["," [parameter_star_kwargs]] + | parameter_star_kwargs + parameter_star_kwargs: "**" parameter [","] + parameter: identifier [":" expression] + star_parameter: identifier [":" ["*"] expression] + defparameter: parameter ["=" expression] + funcname: identifier + +A function definition is an executable statement. Its execution binds +the function name in the current local namespace to a function object +(a wrapper around the executable code for the function). This +function object contains a reference to the current global namespace +as the global namespace to be used when the function is called. + +The function definition does not execute the function body; this gets +executed only when the function is called. [4] + +A function definition may be wrapped by one or more *decorator* +expressions. Decorator expressions are evaluated when the function is +defined, in the scope that contains the function definition. The +result must be a callable, which is invoked with the function object +as the only argument. The returned value is bound to the function name +instead of the function object. Multiple decorators are applied in +nested fashion. For example, the following code + + @f1(arg) + @f2 + def func(): pass + +is roughly equivalent to + + def func(): pass + func = f1(arg)(f2(func)) + +except that the original function is not temporarily bound to the name +"func". + +Changed in version 3.9: Functions may be decorated with any valid +"assignment_expression". Previously, the grammar was much more +restrictive; see **PEP 614** for details. + +A list of type parameters may be given in square brackets between the +function’s name and the opening parenthesis for its parameter list. +This indicates to static type checkers that the function is generic. +At runtime, the type parameters can be retrieved from the function’s +"__type_params__" attribute. See Generic functions for more. + +Changed in version 3.12: Type parameter lists are new in Python 3.12. + +When one or more *parameters* have the form *parameter* "=" +*expression*, the function is said to have “default parameter values.” +For a parameter with a default value, the corresponding *argument* may +be omitted from a call, in which case the parameter’s default value is +substituted. If a parameter has a default value, all following +parameters up until the “"*"” must also have a default value — this is +a syntactic restriction that is not expressed by the grammar. + +**Default parameter values are evaluated from left to right when the +function definition is executed.** This means that the expression is +evaluated once, when the function is defined, and that the same “pre- +computed” value is used for each call. This is especially important +to understand when a default parameter value is a mutable object, such +as a list or a dictionary: if the function modifies the object (e.g. +by appending an item to a list), the default parameter value is in +effect modified. This is generally not what was intended. A way +around this is to use "None" as the default, and explicitly test for +it in the body of the function, e.g.: + + def whats_on_the_telly(penguin=None): + if penguin is None: + penguin = [] + penguin.append("property of the zoo") + return penguin + +Function call semantics are described in more detail in section Calls. +A function call always assigns values to all parameters mentioned in +the parameter list, either from positional arguments, from keyword +arguments, or from default values. If the form “"*identifier"” is +present, it is initialized to a tuple receiving any excess positional +parameters, defaulting to the empty tuple. If the form +“"**identifier"” is present, it is initialized to a new ordered +mapping receiving any excess keyword arguments, defaulting to a new +empty mapping of the same type. Parameters after “"*"” or +“"*identifier"” are keyword-only parameters and may only be passed by +keyword arguments. Parameters before “"/"” are positional-only +parameters and may only be passed by positional arguments. + +Changed in version 3.8: The "/" function parameter syntax may be used +to indicate positional-only parameters. See **PEP 570** for details. + +Parameters may have an *annotation* of the form “": expression"” +following the parameter name. Any parameter may have an annotation, +even those of the form "*identifier" or "**identifier". (As a special +case, parameters of the form "*identifier" may have an annotation “": +*expression"”.) Functions may have “return” annotation of the form +“"-> expression"” after the parameter list. These annotations can be +any valid Python expression. The presence of annotations does not +change the semantics of a function. See Annotations for more +information on annotations. + +Changed in version 3.11: Parameters of the form “"*identifier"” may +have an annotation “": *expression"”. See **PEP 646**. + +It is also possible to create anonymous functions (functions not bound +to a name), for immediate use in expressions. This uses lambda +expressions, described in section Lambdas. Note that the lambda +expression is merely a shorthand for a simplified function definition; +a function defined in a “"def"” statement can be passed around or +assigned to another name just like a function defined by a lambda +expression. The “"def"” form is actually more powerful since it +allows the execution of multiple statements and annotations. + +**Programmer’s note:** Functions are first-class objects. A “"def"” +statement executed inside a function definition defines a local +function that can be returned or passed around. Free variables used +in the nested function can access the local variables of the function +containing the def. See section Naming and binding for details. + +See also: + + **PEP 3107** - Function Annotations + The original specification for function annotations. + + **PEP 484** - Type Hints + Definition of a standard meaning for annotations: type hints. + + **PEP 526** - Syntax for Variable Annotations + Ability to type hint variable declarations, including class + variables and instance variables. + + **PEP 563** - Postponed Evaluation of Annotations + Support for forward references within annotations by preserving + annotations in a string form at runtime instead of eager + evaluation. + + **PEP 318** - Decorators for Functions and Methods + Function and method decorators were introduced. Class decorators + were introduced in **PEP 3129**. +''', + 'global': r'''The "global" statement +********************** + + global_stmt: "global" identifier ("," identifier)* + +The "global" statement causes the listed identifiers to be interpreted +as globals. It would be impossible to assign to a global variable +without "global", although free variables may refer to globals without +being declared global. + +The "global" statement applies to the entire current scope (module, +function body or class definition). A "SyntaxError" is raised if a +variable is used or assigned to prior to its global declaration in the +scope. + +At the module level, all variables are global, so a "global" statement +has no effect. However, variables must still not be used or assigned +to prior to their "global" declaration. This requirement is relaxed in +the interactive prompt (*REPL*). + +**Programmer’s note:** "global" is a directive to the parser. It +applies only to code parsed at the same time as the "global" +statement. In particular, a "global" statement contained in a string +or code object supplied to the built-in "exec()" function does not +affect the code block *containing* the function call, and code +contained in such a string is unaffected by "global" statements in the +code containing the function call. The same applies to the "eval()" +and "compile()" functions. +''', + 'id-classes': r'''Reserved classes of identifiers +******************************* + +Certain classes of identifiers (besides keywords) have special +meanings. These classes are identified by the patterns of leading and +trailing underscore characters: + +"_*" + Not imported by "from module import *". + +"_" + In a "case" pattern within a "match" statement, "_" is a soft + keyword that denotes a wildcard. + + Separately, the interactive interpreter makes the result of the + last evaluation available in the variable "_". (It is stored in the + "builtins" module, alongside built-in functions like "print".) + + Elsewhere, "_" is a regular identifier. It is often used to name + “special” items, but it is not special to Python itself. + + Note: + + The name "_" is often used in conjunction with + internationalization; refer to the documentation for the + "gettext" module for more information on this convention.It is + also commonly used for unused variables. + +"__*__" + System-defined names, informally known as “dunder” names. These + names are defined by the interpreter and its implementation + (including the standard library). Current system names are + discussed in the Special method names section and elsewhere. More + will likely be defined in future versions of Python. *Any* use of + "__*__" names, in any context, that does not follow explicitly + documented use, is subject to breakage without warning. + +"__*" + Class-private names. Names in this category, when used within the + context of a class definition, are re-written to use a mangled form + to help avoid name clashes between “private” attributes of base and + derived classes. See section Identifiers (Names). +''', + 'identifiers': r'''Names (identifiers and keywords) +******************************** + +"NAME" tokens represent *identifiers*, *keywords*, and *soft +keywords*. + +Names are composed of the following characters: + +* uppercase and lowercase letters ("A-Z" and "a-z"), + +* the underscore ("_"), + +* digits ("0" through "9"), which cannot appear as the first + character, and + +* non-ASCII characters. Valid names may only contain “letter-like” and + “digit-like” characters; see Non-ASCII characters in names for + details. + +Names must contain at least one character, but have no upper length +limit. Case is significant. + +Formally, names are described by the following lexical definitions: + + NAME: name_start name_continue* + name_start: "a"..."z" | "A"..."Z" | "_" | + name_continue: name_start | "0"..."9" + identifier: + +Note that not all names matched by this grammar are valid; see Non- +ASCII characters in names for details. + + +Keywords +======== + +The following names are used as reserved words, or *keywords* of the +language, and cannot be used as ordinary identifiers. They must be +spelled exactly as written here: + + False await else import pass + None break except in raise + True class finally is return + and continue for lambda try + as def from nonlocal while + assert del global not with + async elif if or yield + + +Soft Keywords +============= + +Added in version 3.10. + +Some names are only reserved under specific contexts. These are known +as *soft keywords*: + +* "match", "case", and "_", when used in the "match" statement. + +* "type", when used in the "type" statement. + +These syntactically act as keywords in their specific contexts, but +this distinction is done at the parser level, not when tokenizing. + +As soft keywords, their use in the grammar is possible while still +preserving compatibility with existing code that uses these names as +identifier names. + +Changed in version 3.12: "type" is now a soft keyword. + + +Reserved classes of identifiers +=============================== + +Certain classes of identifiers (besides keywords) have special +meanings. These classes are identified by the patterns of leading and +trailing underscore characters: + +"_*" + Not imported by "from module import *". + +"_" + In a "case" pattern within a "match" statement, "_" is a soft + keyword that denotes a wildcard. + + Separately, the interactive interpreter makes the result of the + last evaluation available in the variable "_". (It is stored in the + "builtins" module, alongside built-in functions like "print".) + + Elsewhere, "_" is a regular identifier. It is often used to name + “special” items, but it is not special to Python itself. + + Note: + + The name "_" is often used in conjunction with + internationalization; refer to the documentation for the + "gettext" module for more information on this convention.It is + also commonly used for unused variables. + +"__*__" + System-defined names, informally known as “dunder” names. These + names are defined by the interpreter and its implementation + (including the standard library). Current system names are + discussed in the Special method names section and elsewhere. More + will likely be defined in future versions of Python. *Any* use of + "__*__" names, in any context, that does not follow explicitly + documented use, is subject to breakage without warning. + +"__*" + Class-private names. Names in this category, when used within the + context of a class definition, are re-written to use a mangled form + to help avoid name clashes between “private” attributes of base and + derived classes. See section Identifiers (Names). + + +Non-ASCII characters in names +============================= + +Names that contain non-ASCII characters need additional normalization +and validation beyond the rules and grammar explained above. For +example, "ř_1", "蛇", or "साँप" are valid names, but "r〰2", "€", or +"🐍" are not. + +This section explains the exact rules. + +All names are converted into the normalization form NFKC while +parsing. This means that, for example, some typographic variants of +characters are converted to their “basic” form. For example, +"fiⁿₐˡᵢᶻₐᵗᵢᵒₙ" normalizes to "finalization", so Python treats them as +the same name: + + >>> fiⁿₐˡᵢᶻₐᵗᵢᵒₙ = 3 + >>> finalization + 3 + +Note: + + Normalization is done at the lexical level only. Run-time functions + that take names as *strings* generally do not normalize their + arguments. For example, the variable defined above is accessible at + run time in the "globals()" dictionary as + "globals()["finalization"]" but not "globals()["fiⁿₐˡᵢᶻₐᵗᵢᵒₙ"]". + +Similarly to how ASCII-only names must contain only letters, digits +and the underscore, and cannot start with a digit, a valid name must +start with a character in the “letter-like” set "xid_start", and the +remaining characters must be in the “letter- and digit-like” set +"xid_continue". + +These sets are based on the *XID_Start* and *XID_Continue* sets as +defined by the Unicode standard annex UAX-31. Python’s "xid_start" +additionally includes the underscore ("_"). Note that Python does not +necessarily conform to UAX-31. + +A non-normative listing of characters in the *XID_Start* and +*XID_Continue* sets as defined by Unicode is available in the +DerivedCoreProperties.txt file in the Unicode Character Database. For +reference, the construction rules for the "xid_*" sets are given +below. + +The set "id_start" is defined as the union of: + +* Unicode category "" - uppercase letters (includes "A" to "Z") + +* Unicode category "" - lowercase letters (includes "a" to "z") + +* Unicode category "" - titlecase letters + +* Unicode category "" - modifier letters + +* Unicode category "" - other letters + +* Unicode category "" - letter numbers + +* {""_""} - the underscore + +* "" - an explicit set of characters in PropList.txt + to support backwards compatibility + +The set "xid_start" then closes this set under NFKC normalization, by +removing all characters whose normalization is not of the form +"id_start id_continue*". + +The set "id_continue" is defined as the union of: + +* "id_start" (see above) + +* Unicode category "" - decimal numbers (includes "0" to "9") + +* Unicode category "" - connector punctuations + +* Unicode category "" - nonspacing marks + +* Unicode category "" - spacing combining marks + +* "" - another explicit set of characters in + PropList.txt to support backwards compatibility + +Again, "xid_continue" closes this set under NFKC normalization. + +Unicode categories use the version of the Unicode Character Database +as included in the "unicodedata" module. + +See also: + + * **PEP 3131** – Supporting Non-ASCII Identifiers + + * **PEP 672** – Unicode-related Security Considerations for Python +''', + 'if': r'''The "if" statement +****************** + +The "if" statement is used for conditional execution: + + if_stmt: "if" assignment_expression ":" suite + ("elif" assignment_expression ":" suite)* + ["else" ":" suite] + +It selects exactly one of the suites by evaluating the expressions one +by one until one is found to be true (see section Boolean operations +for the definition of true and false); then that suite is executed +(and no other part of the "if" statement is executed or evaluated). +If all expressions are false, the suite of the "else" clause, if +present, is executed. +''', + 'imaginary': r'''Imaginary literals +****************** + +Python has complex number objects, but no complex literals. Instead, +*imaginary literals* denote complex numbers with a zero real part. + +For example, in math, the complex number 3+4.2*i* is written as the +real number 3 added to the imaginary number 4.2*i*. Python uses a +similar syntax, except the imaginary unit is written as "j" rather +than *i*: + + 3+4.2j + +This is an expression composed of the integer literal "3", the +operator ‘"+"’, and the imaginary literal "4.2j". Since these are +three separate tokens, whitespace is allowed between them: + + 3 + 4.2j + +No whitespace is allowed *within* each token. In particular, the "j" +suffix, may not be separated from the number before it. + +The number before the "j" has the same syntax as a floating-point +literal. Thus, the following are valid imaginary literals: + + 4.2j + 3.14j + 10.j + .001j + 1e100j + 3.14e-10j + 3.14_15_93j + +Unlike in a floating-point literal the decimal point can be omitted if +the imaginary number only has an integer part. The number is still +evaluated as a floating-point number, not an integer: + + 10j + 0j + 1000000000000000000000000j # equivalent to 1e+24j + +The "j" suffix is case-insensitive. That means you can use "J" +instead: + + 3.14J # equivalent to 3.14j + +Formally, imaginary literals are described by the following lexical +definition: + + imagnumber: (floatnumber | digitpart) ("j" | "J") +''', + 'import': r'''The "import" statement +********************** + + import_stmt: "import" module ["as" identifier] ("," module ["as" identifier])* + | "from" relative_module "import" identifier ["as" identifier] + ("," identifier ["as" identifier])* + | "from" relative_module "import" "(" identifier ["as" identifier] + ("," identifier ["as" identifier])* [","] ")" + | "from" relative_module "import" "*" + module: (identifier ".")* identifier + relative_module: "."* module | "."+ + +The basic import statement (no "from" clause) is executed in two +steps: + +1. find a module, loading and initializing it if necessary + +2. define a name or names in the local namespace for the scope where + the "import" statement occurs. + +When the statement contains multiple clauses (separated by commas) the +two steps are carried out separately for each clause, just as though +the clauses had been separated out into individual import statements. + +The details of the first step, finding and loading modules, are +described in greater detail in the section on the import system, which +also describes the various types of packages and modules that can be +imported, as well as all the hooks that can be used to customize the +import system. Note that failures in this step may indicate either +that the module could not be located, *or* that an error occurred +while initializing the module, which includes execution of the +module’s code. + +If the requested module is retrieved successfully, it will be made +available in the local namespace in one of three ways: + +* If the module name is followed by "as", then the name following "as" + is bound directly to the imported module. + +* If no other name is specified, and the module being imported is a + top level module, the module’s name is bound in the local namespace + as a reference to the imported module + +* If the module being imported is *not* a top level module, then the + name of the top level package that contains the module is bound in + the local namespace as a reference to the top level package. The + imported module must be accessed using its full qualified name + rather than directly + +The "from" form uses a slightly more complex process: + +1. find the module specified in the "from" clause, loading and + initializing it if necessary; + +2. for each of the identifiers specified in the "import" clauses: + + 1. check if the imported module has an attribute by that name + + 2. if not, attempt to import a submodule with that name and then + check the imported module again for that attribute + + 3. if the attribute is not found, "ImportError" is raised. + + 4. otherwise, a reference to that value is stored in the local + namespace, using the name in the "as" clause if it is present, + otherwise using the attribute name + +Examples: + + import foo # foo imported and bound locally + import foo.bar.baz # foo, foo.bar, and foo.bar.baz imported, foo bound locally + import foo.bar.baz as fbb # foo, foo.bar, and foo.bar.baz imported, foo.bar.baz bound as fbb + from foo.bar import baz # foo, foo.bar, and foo.bar.baz imported, foo.bar.baz bound as baz + from foo import attr # foo imported and foo.attr bound as attr + +If the list of identifiers is replaced by a star ("'*'"), all public +names defined in the module are bound in the local namespace for the +scope where the "import" statement occurs. + +The *public names* defined by a module are determined by checking the +module’s namespace for a variable named "__all__"; if defined, it must +be a sequence of strings which are names defined or imported by that +module. Names containing non-ASCII characters must be in the +normalization form NFKC; see Non-ASCII characters in names for +details. The names given in "__all__" are all considered public and +are required to exist. If "__all__" is not defined, the set of public +names includes all names found in the module’s namespace which do not +begin with an underscore character ("'_'"). "__all__" should contain +the entire public API. It is intended to avoid accidentally exporting +items that are not part of the API (such as library modules which were +imported and used within the module). + +The wild card form of import — "from module import *" — is only +allowed at the module level. Attempting to use it in class or +function definitions will raise a "SyntaxError". + +When specifying what module to import you do not have to specify the +absolute name of the module. When a module or package is contained +within another package it is possible to make a relative import within +the same top package without having to mention the package name. By +using leading dots in the specified module or package after "from" you +can specify how high to traverse up the current package hierarchy +without specifying exact names. One leading dot means the current +package where the module making the import exists. Two dots means up +one package level. Three dots is up two levels, etc. So if you execute +"from . import mod" from a module in the "pkg" package then you will +end up importing "pkg.mod". If you execute "from ..subpkg2 import mod" +from within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The +specification for relative imports is contained in the Package +Relative Imports section. + +"importlib.import_module()" is provided to support applications that +determine dynamically the modules to be loaded. + +Raises an auditing event "import" with arguments "module", "filename", +"sys.path", "sys.meta_path", "sys.path_hooks". + + +Future statements +================= + +A *future statement* is a directive to the compiler that a particular +module should be compiled using syntax or semantics that will be +available in a specified future release of Python where the feature +becomes standard. + +The future statement is intended to ease migration to future versions +of Python that introduce incompatible changes to the language. It +allows use of the new features on a per-module basis before the +release in which the feature becomes standard. + + future_stmt: "from" "__future__" "import" feature ["as" identifier] + ("," feature ["as" identifier])* + | "from" "__future__" "import" "(" feature ["as" identifier] + ("," feature ["as" identifier])* [","] ")" + feature: identifier + +A future statement must appear near the top of the module. The only +lines that can appear before a future statement are: + +* the module docstring (if any), + +* comments, + +* blank lines, and + +* other future statements. + +The only feature that requires using the future statement is +"annotations" (see **PEP 563**). + +All historical features enabled by the future statement are still +recognized by Python 3. The list includes "absolute_import", +"division", "generators", "generator_stop", "unicode_literals", +"print_function", "nested_scopes" and "with_statement". They are all +redundant because they are always enabled, and only kept for backwards +compatibility. + +A future statement is recognized and treated specially at compile +time: Changes to the semantics of core constructs are often +implemented by generating different code. It may even be the case +that a new feature introduces new incompatible syntax (such as a new +reserved word), in which case the compiler may need to parse the +module differently. Such decisions cannot be pushed off until +runtime. + +For any given release, the compiler knows which feature names have +been defined, and raises a compile-time error if a future statement +contains a feature not known to it. + +The direct runtime semantics are the same as for any import statement: +there is a standard module "__future__", described later, and it will +be imported in the usual way at the time the future statement is +executed. + +The interesting runtime semantics depend on the specific feature +enabled by the future statement. + +Note that there is nothing special about the statement: + + import __future__ [as name] + +That is not a future statement; it’s an ordinary import statement with +no special semantics or syntax restrictions. + +Code compiled by calls to the built-in functions "exec()" and +"compile()" that occur in a module "M" containing a future statement +will, by default, use the new syntax or semantics associated with the +future statement. This can be controlled by optional arguments to +"compile()" — see the documentation of that function for details. + +A future statement typed at an interactive interpreter prompt will +take effect for the rest of the interpreter session. If an +interpreter is started with the "-i" option, is passed a script name +to execute, and the script includes a future statement, it will be in +effect in the interactive session started after the script is +executed. + +See also: + + **PEP 236** - Back to the __future__ + The original proposal for the __future__ mechanism. +''', + 'in': r'''Membership test operations +************************** + +The operators "in" and "not in" test for membership. "x in s" +evaluates to "True" if *x* is a member of *s*, and "False" otherwise. +"x not in s" returns the negation of "x in s". All built-in sequences +and set types support this as well as dictionary, for which "in" tests +whether the dictionary has a given key. For container types such as +list, tuple, set, frozenset, dict, or collections.deque, the +expression "x in y" is equivalent to "any(x is e or x == e for e in +y)". + +For the string and bytes types, "x in y" is "True" if and only if *x* +is a substring of *y*. An equivalent test is "y.find(x) != -1". +Empty strings are always considered to be a substring of any other +string, so """ in "abc"" will return "True". + +For user-defined classes which define the "__contains__()" method, "x +in y" returns "True" if "y.__contains__(x)" returns a true value, and +"False" otherwise. + +For user-defined classes which do not define "__contains__()" but do +define "__iter__()", "x in y" is "True" if some value "z", for which +the expression "x is z or x == z" is true, is produced while iterating +over "y". If an exception is raised during the iteration, it is as if +"in" raised that exception. + +Lastly, the old-style iteration protocol is tried: if a class defines +"__getitem__()", "x in y" is "True" if and only if there is a non- +negative integer index *i* such that "x is y[i] or x == y[i]", and no +lower integer index raises the "IndexError" exception. (If any other +exception is raised, it is as if "in" raised that exception). + +The operator "not in" is defined to have the inverse truth value of +"in". +''', + 'integers': r'''Integer literals +**************** + +Integer literals denote whole numbers. For example: + + 7 + 3 + 2147483647 + +There is no limit for the length of integer literals apart from what +can be stored in available memory: + + 7922816251426433759354395033679228162514264337593543950336 + +Underscores can be used to group digits for enhanced readability, and +are ignored for determining the numeric value of the literal. For +example, the following literals are equivalent: + + 100_000_000_000 + 100000000000 + 1_00_00_00_00_000 + +Underscores can only occur between digits. For example, "_123", +"321_", and "123__321" are *not* valid literals. + +Integers can be specified in binary (base 2), octal (base 8), or +hexadecimal (base 16) using the prefixes "0b", "0o" and "0x", +respectively. Hexadecimal digits 10 through 15 are represented by +letters "A"-"F", case-insensitive. For example: + + 0b100110111 + 0b_1110_0101 + 0o177 + 0o377 + 0xdeadbeef + 0xDead_Beef + +An underscore can follow the base specifier. For example, "0x_1f" is a +valid literal, but "0_x1f" and "0x__1f" are not. + +Leading zeros in a non-zero decimal number are not allowed. For +example, "0123" is not a valid literal. This is for disambiguation +with C-style octal literals, which Python used before version 3.0. + +Formally, integer literals are described by the following lexical +definitions: + + integer: decinteger | bininteger | octinteger | hexinteger | zerointeger + decinteger: nonzerodigit (["_"] digit)* + bininteger: "0" ("b" | "B") (["_"] bindigit)+ + octinteger: "0" ("o" | "O") (["_"] octdigit)+ + hexinteger: "0" ("x" | "X") (["_"] hexdigit)+ + zerointeger: "0"+ (["_"] "0")* + nonzerodigit: "1"..."9" + digit: "0"..."9" + bindigit: "0" | "1" + octdigit: "0"..."7" + hexdigit: digit | "a"..."f" | "A"..."F" + +Changed in version 3.6: Underscores are now allowed for grouping +purposes in literals. +''', + 'lambda': r'''Lambdas +******* + + lambda_expr: "lambda" [parameter_list] ":" expression + +Lambda expressions (sometimes called lambda forms) are used to create +anonymous functions. The expression "lambda parameters: expression" +yields a function object. The unnamed object behaves like a function +object defined with: + + def (parameters): + return expression + +See section Function definitions for the syntax of parameter lists. +Note that functions created with lambda expressions cannot contain +statements or annotations. +''', + 'lists': r'''List displays +************* + +A list display is a possibly empty series of expressions enclosed in +square brackets: + + list_display: "[" [flexible_expression_list | comprehension] "]" + +A list display yields a new list object, the contents being specified +by either a list of expressions or a comprehension. When a comma- +separated list of expressions is supplied, its elements are evaluated +from left to right and placed into the list object in that order. +When a comprehension is supplied, the list is constructed from the +elements resulting from the comprehension. +''', + 'naming': r'''Naming and binding +****************** + + +Binding of names +================ + +*Names* refer to objects. Names are introduced by name binding +operations. + +The following constructs bind names: + +* formal parameters to functions, + +* class definitions, + +* function definitions, + +* assignment expressions, + +* targets that are identifiers if occurring in an assignment: + + * "for" loop header, + + * after "as" in a "with" statement, "except" clause, "except*" + clause, or in the as-pattern in structural pattern matching, + + * in a capture pattern in structural pattern matching + +* "import" statements. + +* "type" statements. + +* type parameter lists. + +The "import" statement of the form "from ... import *" binds all names +defined in the imported module, except those beginning with an +underscore. This form may only be used at the module level. + +A target occurring in a "del" statement is also considered bound for +this purpose (though the actual semantics are to unbind the name). + +Each assignment or import statement occurs within a block defined by a +class or function definition or at the module level (the top-level +code block). + +If a name is bound in a block, it is a local variable of that block, +unless declared as "nonlocal" or "global". If a name is bound at the +module level, it is a global variable. (The variables of the module +code block are local and global.) If a variable is used in a code +block but not defined there, it is a *free variable*. + +Each occurrence of a name in the program text refers to the *binding* +of that name established by the following name resolution rules. + + +Resolution of names +=================== + +A *scope* defines the visibility of a name within a block. If a local +variable is defined in a block, its scope includes that block. If the +definition occurs in a function block, the scope extends to any blocks +contained within the defining one, unless a contained block introduces +a different binding for the name. + +When a name is used in a code block, it is resolved using the nearest +enclosing scope. The set of all such scopes visible to a code block +is called the block’s *environment*. + +When a name is not found at all, a "NameError" exception is raised. If +the current scope is a function scope, and the name refers to a local +variable that has not yet been bound to a value at the point where the +name is used, an "UnboundLocalError" exception is raised. +"UnboundLocalError" is a subclass of "NameError". + +If a name binding operation occurs anywhere within a code block, all +uses of the name within the block are treated as references to the +current block. This can lead to errors when a name is used within a +block before it is bound. This rule is subtle. Python lacks +declarations and allows name binding operations to occur anywhere +within a code block. The local variables of a code block can be +determined by scanning the entire text of the block for name binding +operations. See the FAQ entry on UnboundLocalError for examples. + +If the "global" statement occurs within a block, all uses of the names +specified in the statement refer to the bindings of those names in the +top-level namespace. Names are resolved in the top-level namespace by +searching the global namespace, i.e. the namespace of the module +containing the code block, and the builtins namespace, the namespace +of the module "builtins". The global namespace is searched first. If +the names are not found there, the builtins namespace is searched +next. If the names are also not found in the builtins namespace, new +variables are created in the global namespace. The global statement +must precede all uses of the listed names. + +The "global" statement has the same scope as a name binding operation +in the same block. If the nearest enclosing scope for a free variable +contains a global statement, the free variable is treated as a global. + +The "nonlocal" statement causes corresponding names to refer to +previously bound variables in the nearest enclosing function scope. +"SyntaxError" is raised at compile time if the given name does not +exist in any enclosing function scope. Type parameters cannot be +rebound with the "nonlocal" statement. + +The namespace for a module is automatically created the first time a +module is imported. The main module for a script is always called +"__main__". + +Class definition blocks and arguments to "exec()" and "eval()" are +special in the context of name resolution. A class definition is an +executable statement that may use and define names. These references +follow the normal rules for name resolution with an exception that +unbound local variables are looked up in the global namespace. The +namespace of the class definition becomes the attribute dictionary of +the class. The scope of names defined in a class block is limited to +the class block; it does not extend to the code blocks of methods. +This includes comprehensions and generator expressions, but it does +not include annotation scopes, which have access to their enclosing +class scopes. This means that the following will fail: + + class A: + a = 42 + b = list(a + i for i in range(10)) + +However, the following will succeed: + + class A: + type Alias = Nested + class Nested: pass + + print(A.Alias.__value__) # + + +Annotation scopes +================= + +*Annotations*, type parameter lists and "type" statements introduce +*annotation scopes*, which behave mostly like function scopes, but +with some exceptions discussed below. + +Annotation scopes are used in the following contexts: + +* *Function annotations*. + +* *Variable annotations*. + +* Type parameter lists for generic type aliases. + +* Type parameter lists for generic functions. A generic function’s + annotations are executed within the annotation scope, but its + defaults and decorators are not. + +* Type parameter lists for generic classes. A generic class’s base + classes and keyword arguments are executed within the annotation + scope, but its decorators are not. + +* The bounds, constraints, and default values for type parameters + (lazily evaluated). + +* The value of type aliases (lazily evaluated). + +Annotation scopes differ from function scopes in the following ways: + +* Annotation scopes have access to their enclosing class namespace. If + an annotation scope is immediately within a class scope, or within + another annotation scope that is immediately within a class scope, + the code in the annotation scope can use names defined in the class + scope as if it were executed directly within the class body. This + contrasts with regular functions defined within classes, which + cannot access names defined in the class scope. + +* Expressions in annotation scopes cannot contain "yield", "yield + from", "await", or ":=" expressions. (These expressions are allowed + in other scopes contained within the annotation scope.) + +* Names defined in annotation scopes cannot be rebound with "nonlocal" + statements in inner scopes. This includes only type parameters, as + no other syntactic elements that can appear within annotation scopes + can introduce new names. + +* While annotation scopes have an internal name, that name is not + reflected in the *qualified name* of objects defined within the + scope. Instead, the "__qualname__" of such objects is as if the + object were defined in the enclosing scope. + +Added in version 3.12: Annotation scopes were introduced in Python +3.12 as part of **PEP 695**. + +Changed in version 3.13: Annotation scopes are also used for type +parameter defaults, as introduced by **PEP 696**. + +Changed in version 3.14: Annotation scopes are now also used for +annotations, as specified in **PEP 649** and **PEP 749**. + + +Lazy evaluation +=============== + +Most annotation scopes are *lazily evaluated*. This includes +annotations, the values of type aliases created through the "type" +statement, and the bounds, constraints, and default values of type +variables created through the type parameter syntax. This means that +they are not evaluated when the type alias or type variable is +created, or when the object carrying annotations is created. Instead, +they are only evaluated when necessary, for example when the +"__value__" attribute on a type alias is accessed. + +Example: + + >>> type Alias = 1/0 + >>> Alias.__value__ + Traceback (most recent call last): + ... + ZeroDivisionError: division by zero + >>> def func[T: 1/0](): pass + >>> T = func.__type_params__[0] + >>> T.__bound__ + Traceback (most recent call last): + ... + ZeroDivisionError: division by zero + +Here the exception is raised only when the "__value__" attribute of +the type alias or the "__bound__" attribute of the type variable is +accessed. + +This behavior is primarily useful for references to types that have +not yet been defined when the type alias or type variable is created. +For example, lazy evaluation enables creation of mutually recursive +type aliases: + + from typing import Literal + + type SimpleExpr = int | Parenthesized + type Parenthesized = tuple[Literal["("], Expr, Literal[")"]] + type Expr = SimpleExpr | tuple[SimpleExpr, Literal["+", "-"], Expr] + +Lazily evaluated values are evaluated in annotation scope, which means +that names that appear inside the lazily evaluated value are looked up +as if they were used in the immediately enclosing scope. + +Added in version 3.12. + + +Builtins and restricted execution +================================= + +**CPython implementation detail:** Users should not touch +"__builtins__"; it is strictly an implementation detail. Users +wanting to override values in the builtins namespace should "import" +the "builtins" module and modify its attributes appropriately. + +The builtins namespace associated with the execution of a code block +is actually found by looking up the name "__builtins__" in its global +namespace; this should be a dictionary or a module (in the latter case +the module’s dictionary is used). By default, when in the "__main__" +module, "__builtins__" is the built-in module "builtins"; when in any +other module, "__builtins__" is an alias for the dictionary of the +"builtins" module itself. + + +Interaction with dynamic features +================================= + +Name resolution of free variables occurs at runtime, not at compile +time. This means that the following code will print 42: + + i = 10 + def f(): + print(i) + i = 42 + f() + +The "eval()" and "exec()" functions do not have access to the full +environment for resolving names. Names may be resolved in the local +and global namespaces of the caller. Free variables are not resolved +in the nearest enclosing namespace, but in the global namespace. [1] +The "exec()" and "eval()" functions have optional arguments to +override the global and local namespace. If only one namespace is +specified, it is used for both. +''', + 'nonlocal': r'''The "nonlocal" statement +************************ + + nonlocal_stmt: "nonlocal" identifier ("," identifier)* + +When the definition of a function or class is nested (enclosed) within +the definitions of other functions, its nonlocal scopes are the local +scopes of the enclosing functions. The "nonlocal" statement causes the +listed identifiers to refer to names previously bound in nonlocal +scopes. It allows encapsulated code to rebind such nonlocal +identifiers. If a name is bound in more than one nonlocal scope, the +nearest binding is used. If a name is not bound in any nonlocal scope, +or if there is no nonlocal scope, a "SyntaxError" is raised. + +The "nonlocal" statement applies to the entire scope of a function or +class body. A "SyntaxError" is raised if a variable is used or +assigned to prior to its nonlocal declaration in the scope. + +See also: + + **PEP 3104** - Access to Names in Outer Scopes + The specification for the "nonlocal" statement. + +**Programmer’s note:** "nonlocal" is a directive to the parser and +applies only to code parsed along with it. See the note for the +"global" statement. +''', + 'numbers': r'''Numeric literals +**************** + +"NUMBER" tokens represent numeric literals, of which there are three +types: integers, floating-point numbers, and imaginary numbers. + + NUMBER: integer | floatnumber | imagnumber + +The numeric value of a numeric literal is the same as if it were +passed as a string to the "int", "float" or "complex" class +constructor, respectively. Note that not all valid inputs for those +constructors are also valid literals. + +Numeric literals do not include a sign; a phrase like "-1" is actually +an expression composed of the unary operator ‘"-"’ and the literal +"1". + + +Integer literals +================ + +Integer literals denote whole numbers. For example: + + 7 + 3 + 2147483647 + +There is no limit for the length of integer literals apart from what +can be stored in available memory: + + 7922816251426433759354395033679228162514264337593543950336 + +Underscores can be used to group digits for enhanced readability, and +are ignored for determining the numeric value of the literal. For +example, the following literals are equivalent: + + 100_000_000_000 + 100000000000 + 1_00_00_00_00_000 + +Underscores can only occur between digits. For example, "_123", +"321_", and "123__321" are *not* valid literals. + +Integers can be specified in binary (base 2), octal (base 8), or +hexadecimal (base 16) using the prefixes "0b", "0o" and "0x", +respectively. Hexadecimal digits 10 through 15 are represented by +letters "A"-"F", case-insensitive. For example: + + 0b100110111 + 0b_1110_0101 + 0o177 + 0o377 + 0xdeadbeef + 0xDead_Beef + +An underscore can follow the base specifier. For example, "0x_1f" is a +valid literal, but "0_x1f" and "0x__1f" are not. + +Leading zeros in a non-zero decimal number are not allowed. For +example, "0123" is not a valid literal. This is for disambiguation +with C-style octal literals, which Python used before version 3.0. + +Formally, integer literals are described by the following lexical +definitions: + + integer: decinteger | bininteger | octinteger | hexinteger | zerointeger + decinteger: nonzerodigit (["_"] digit)* + bininteger: "0" ("b" | "B") (["_"] bindigit)+ + octinteger: "0" ("o" | "O") (["_"] octdigit)+ + hexinteger: "0" ("x" | "X") (["_"] hexdigit)+ + zerointeger: "0"+ (["_"] "0")* + nonzerodigit: "1"..."9" + digit: "0"..."9" + bindigit: "0" | "1" + octdigit: "0"..."7" + hexdigit: digit | "a"..."f" | "A"..."F" + +Changed in version 3.6: Underscores are now allowed for grouping +purposes in literals. + + +Floating-point literals +======================= + +Floating-point (float) literals, such as "3.14" or "1.5", denote +approximations of real numbers. + +They consist of *integer* and *fraction* parts, each composed of +decimal digits. The parts are separated by a decimal point, ".": + + 2.71828 + 4.0 + +Unlike in integer literals, leading zeros are allowed. For example, +"077.010" is legal, and denotes the same number as "77.01". + +As in integer literals, single underscores may occur between digits to +help readability: + + 96_485.332_123 + 3.14_15_93 + +Either of these parts, but not both, can be empty. For example: + + 10. # (equivalent to 10.0) + .001 # (equivalent to 0.001) + +Optionally, the integer and fraction may be followed by an *exponent*: +the letter "e" or "E", followed by an optional sign, "+" or "-", and a +number in the same format as the integer and fraction parts. The "e" +or "E" represents “times ten raised to the power of”: + + 1.0e3 # (represents 1.0×10³, or 1000.0) + 1.166e-5 # (represents 1.166×10⁻⁵, or 0.00001166) + 6.02214076e+23 # (represents 6.02214076×10²³, or 602214076000000000000000.) + +In floats with only integer and exponent parts, the decimal point may +be omitted: + + 1e3 # (equivalent to 1.e3 and 1.0e3) + 0e0 # (equivalent to 0.) + +Formally, floating-point literals are described by the following +lexical definitions: + + floatnumber: + | digitpart "." [digitpart] [exponent] + | "." digitpart [exponent] + | digitpart exponent + digitpart: digit (["_"] digit)* + exponent: ("e" | "E") ["+" | "-"] digitpart + +Changed in version 3.6: Underscores are now allowed for grouping +purposes in literals. + + +Imaginary literals +================== + +Python has complex number objects, but no complex literals. Instead, +*imaginary literals* denote complex numbers with a zero real part. + +For example, in math, the complex number 3+4.2*i* is written as the +real number 3 added to the imaginary number 4.2*i*. Python uses a +similar syntax, except the imaginary unit is written as "j" rather +than *i*: + + 3+4.2j + +This is an expression composed of the integer literal "3", the +operator ‘"+"’, and the imaginary literal "4.2j". Since these are +three separate tokens, whitespace is allowed between them: + + 3 + 4.2j + +No whitespace is allowed *within* each token. In particular, the "j" +suffix, may not be separated from the number before it. + +The number before the "j" has the same syntax as a floating-point +literal. Thus, the following are valid imaginary literals: + + 4.2j + 3.14j + 10.j + .001j + 1e100j + 3.14e-10j + 3.14_15_93j + +Unlike in a floating-point literal the decimal point can be omitted if +the imaginary number only has an integer part. The number is still +evaluated as a floating-point number, not an integer: + + 10j + 0j + 1000000000000000000000000j # equivalent to 1e+24j + +The "j" suffix is case-insensitive. That means you can use "J" +instead: + + 3.14J # equivalent to 3.14j + +Formally, imaginary literals are described by the following lexical +definition: + + imagnumber: (floatnumber | digitpart) ("j" | "J") +''', + 'numeric-types': r'''Emulating numeric types +*********************** + +The following methods can be defined to emulate numeric objects. +Methods corresponding to operations that are not supported by the +particular kind of number implemented (e.g., bitwise operations for +non-integral numbers) should be left undefined. + +object.__add__(self, other) +object.__sub__(self, other) +object.__mul__(self, other) +object.__matmul__(self, other) +object.__truediv__(self, other) +object.__floordiv__(self, other) +object.__mod__(self, other) +object.__divmod__(self, other) +object.__pow__(self, other[, modulo]) +object.__lshift__(self, other) +object.__rshift__(self, other) +object.__and__(self, other) +object.__xor__(self, other) +object.__or__(self, other) + + These methods are called to implement the binary arithmetic + operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", + "pow()", "**", "<<", ">>", "&", "^", "|"). For instance, to + evaluate the expression "x + y", where *x* is an instance of a + class that has an "__add__()" method, "type(x).__add__(x, y)" is + called. The "__divmod__()" method should be the equivalent to + using "__floordiv__()" and "__mod__()"; it should not be related to + "__truediv__()". Note that "__pow__()" should be defined to accept + an optional third argument if the three-argument version of the + built-in "pow()" function is to be supported. + + If one of those methods does not support the operation with the + supplied arguments, it should return "NotImplemented". + +object.__radd__(self, other) +object.__rsub__(self, other) +object.__rmul__(self, other) +object.__rmatmul__(self, other) +object.__rtruediv__(self, other) +object.__rfloordiv__(self, other) +object.__rmod__(self, other) +object.__rdivmod__(self, other) +object.__rpow__(self, other[, modulo]) +object.__rlshift__(self, other) +object.__rrshift__(self, other) +object.__rand__(self, other) +object.__rxor__(self, other) +object.__ror__(self, other) + + These methods are called to implement the binary arithmetic + operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", + "pow()", "**", "<<", ">>", "&", "^", "|") with reflected (swapped) + operands. These functions are only called if the operands are of + different types, when the left operand does not support the + corresponding operation [3], or the right operand’s class is + derived from the left operand’s class. [4] For instance, to + evaluate the expression "x - y", where *y* is an instance of a + class that has an "__rsub__()" method, "type(y).__rsub__(y, x)" is + called if "type(x).__sub__(x, y)" returns "NotImplemented" or + "type(y)" is a subclass of "type(x)". [5] + + Note that "__rpow__()" should be defined to accept an optional + third argument if the three-argument version of the built-in + "pow()" function is to be supported. + + Changed in version 3.14: Three-argument "pow()" now try calling + "__rpow__()" if necessary. Previously it was only called in two- + argument "pow()" and the binary power operator. + + Note: + + If the right operand’s type is a subclass of the left operand’s + type and that subclass provides a different implementation of the + reflected method for the operation, this method will be called + before the left operand’s non-reflected method. This behavior + allows subclasses to override their ancestors’ operations. + +object.__iadd__(self, other) +object.__isub__(self, other) +object.__imul__(self, other) +object.__imatmul__(self, other) +object.__itruediv__(self, other) +object.__ifloordiv__(self, other) +object.__imod__(self, other) +object.__ipow__(self, other[, modulo]) +object.__ilshift__(self, other) +object.__irshift__(self, other) +object.__iand__(self, other) +object.__ixor__(self, other) +object.__ior__(self, other) + + These methods are called to implement the augmented arithmetic + assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", "**=", + "<<=", ">>=", "&=", "^=", "|="). These methods should attempt to + do the operation in-place (modifying *self*) and return the result + (which could be, but does not have to be, *self*). If a specific + method is not defined, or if that method returns "NotImplemented", + the augmented assignment falls back to the normal methods. For + instance, if *x* is an instance of a class with an "__iadd__()" + method, "x += y" is equivalent to "x = x.__iadd__(y)" . If + "__iadd__()" does not exist, or if "x.__iadd__(y)" returns + "NotImplemented", "x.__add__(y)" and "y.__radd__(x)" are + considered, as with the evaluation of "x + y". In certain + situations, augmented assignment can result in unexpected errors + (see Why does a_tuple[i] += [‘item’] raise an exception when the + addition works?), but this behavior is in fact part of the data + model. + +object.__neg__(self) +object.__pos__(self) +object.__abs__(self) +object.__invert__(self) + + Called to implement the unary arithmetic operations ("-", "+", + "abs()" and "~"). + +object.__complex__(self) +object.__int__(self) +object.__float__(self) + + Called to implement the built-in functions "complex()", "int()" and + "float()". Should return a value of the appropriate type. + +object.__index__(self) + + Called to implement "operator.index()", and whenever Python needs + to losslessly convert the numeric object to an integer object (such + as in slicing, or in the built-in "bin()", "hex()" and "oct()" + functions). Presence of this method indicates that the numeric + object is an integer type. Must return an integer. + + If "__int__()", "__float__()" and "__complex__()" are not defined + then corresponding built-in functions "int()", "float()" and + "complex()" fall back to "__index__()". + +object.__round__(self[, ndigits]) +object.__trunc__(self) +object.__floor__(self) +object.__ceil__(self) + + Called to implement the built-in function "round()" and "math" + functions "trunc()", "floor()" and "ceil()". Unless *ndigits* is + passed to "__round__()" all these methods should return the value + of the object truncated to an "Integral" (typically an "int"). + + Changed in version 3.14: "int()" no longer delegates to the + "__trunc__()" method. +''', + 'objects': r'''Objects, values and types +************************* + +*Objects* are Python’s abstraction for data. All data in a Python +program is represented by objects or by relations between objects. +Even code is represented by objects. + +Every object has an identity, a type and a value. An object’s +*identity* never changes once it has been created; you may think of it +as the object’s address in memory. The "is" operator compares the +identity of two objects; the "id()" function returns an integer +representing its identity. + +**CPython implementation detail:** For CPython, "id(x)" is the memory +address where "x" is stored. + +An object’s type determines the operations that the object supports +(e.g., “does it have a length?”) and also defines the possible values +for objects of that type. The "type()" function returns an object’s +type (which is an object itself). Like its identity, an object’s +*type* is also unchangeable. [1] + +The *value* of some objects can change. Objects whose value can +change are said to be *mutable*; objects whose value is unchangeable +once they are created are called *immutable*. (The value of an +immutable container object that contains a reference to a mutable +object can change when the latter’s value is changed; however the +container is still considered immutable, because the collection of +objects it contains cannot be changed. So, immutability is not +strictly the same as having an unchangeable value, it is more subtle.) +An object’s mutability is determined by its type; for instance, +numbers, strings and tuples are immutable, while dictionaries and +lists are mutable. + +Objects are never explicitly destroyed; however, when they become +unreachable they may be garbage-collected. An implementation is +allowed to postpone garbage collection or omit it altogether — it is a +matter of implementation quality how garbage collection is +implemented, as long as no objects are collected that are still +reachable. + +**CPython implementation detail:** CPython currently uses a reference- +counting scheme with (optional) delayed detection of cyclically linked +garbage, which collects most objects as soon as they become +unreachable, but is not guaranteed to collect garbage containing +circular references. See the documentation of the "gc" module for +information on controlling the collection of cyclic garbage. Other +implementations act differently and CPython may change. Do not depend +on immediate finalization of objects when they become unreachable (so +you should always close files explicitly). + +Note that the use of the implementation’s tracing or debugging +facilities may keep objects alive that would normally be collectable. +Also note that catching an exception with a "try"…"except" statement +may keep objects alive. + +Some objects contain references to “external” resources such as open +files or windows. It is understood that these resources are freed +when the object is garbage-collected, but since garbage collection is +not guaranteed to happen, such objects also provide an explicit way to +release the external resource, usually a "close()" method. Programs +are strongly recommended to explicitly close such objects. The +"try"…"finally" statement and the "with" statement provide convenient +ways to do this. + +Some objects contain references to other objects; these are called +*containers*. Examples of containers are tuples, lists and +dictionaries. The references are part of a container’s value. In +most cases, when we talk about the value of a container, we imply the +values, not the identities of the contained objects; however, when we +talk about the mutability of a container, only the identities of the +immediately contained objects are implied. So, if an immutable +container (like a tuple) contains a reference to a mutable object, its +value changes if that mutable object is changed. + +Types affect almost all aspects of object behavior. Even the +importance of object identity is affected in some sense: for immutable +types, operations that compute new values may actually return a +reference to any existing object with the same type and value, while +for mutable objects this is not allowed. For example, after "a = 1; b += 1", *a* and *b* may or may not refer to the same object with the +value one, depending on the implementation. This is because "int" is +an immutable type, so the reference to "1" can be reused. This +behaviour depends on the implementation used, so should not be relied +upon, but is something to be aware of when making use of object +identity tests. However, after "c = []; d = []", *c* and *d* are +guaranteed to refer to two different, unique, newly created empty +lists. (Note that "e = f = []" assigns the *same* object to both *e* +and *f*.) +''', + 'operator-summary': r'''Operator precedence +******************* + +The following table summarizes the operator precedence in Python, from +highest precedence (most binding) to lowest precedence (least +binding). Operators in the same box have the same precedence. Unless +the syntax is explicitly given, operators are binary. Operators in +the same box group left to right (except for exponentiation and +conditional expressions, which group from right to left). + +Note that comparisons, membership tests, and identity tests, all have +the same precedence and have a left-to-right chaining feature as +described in the Comparisons section. + ++-------------------------------------------------+---------------------------------------+ +| Operator | Description | +|=================================================|=======================================| +| "(expressions...)", "[expressions...]", "{key: | Binding or parenthesized expression, | +| value...}", "{expressions...}" | list display, dictionary display, set | +| | display | ++-------------------------------------------------+---------------------------------------+ +| "x[index]", "x[index:index]" "x(arguments...)", | Subscription (including slicing), | +| "x.attribute" | call, attribute reference | ++-------------------------------------------------+---------------------------------------+ +| "await x" | Await expression | ++-------------------------------------------------+---------------------------------------+ +| "**" | Exponentiation [5] | ++-------------------------------------------------+---------------------------------------+ +| "+x", "-x", "~x" | Positive, negative, bitwise NOT | ++-------------------------------------------------+---------------------------------------+ +| "*", "@", "/", "//", "%" | Multiplication, matrix | +| | multiplication, division, floor | +| | division, remainder [6] | ++-------------------------------------------------+---------------------------------------+ +| "+", "-" | Addition and subtraction | ++-------------------------------------------------+---------------------------------------+ +| "<<", ">>" | Shifts | ++-------------------------------------------------+---------------------------------------+ +| "&" | Bitwise AND | ++-------------------------------------------------+---------------------------------------+ +| "^" | Bitwise XOR | ++-------------------------------------------------+---------------------------------------+ +| "|" | Bitwise OR | ++-------------------------------------------------+---------------------------------------+ +| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership | +| ">=", "!=", "==" | tests and identity tests | ++-------------------------------------------------+---------------------------------------+ +| "not x" | Boolean NOT | ++-------------------------------------------------+---------------------------------------+ +| "and" | Boolean AND | ++-------------------------------------------------+---------------------------------------+ +| "or" | Boolean OR | ++-------------------------------------------------+---------------------------------------+ +| "if" – "else" | Conditional expression | ++-------------------------------------------------+---------------------------------------+ +| "lambda" | Lambda expression | ++-------------------------------------------------+---------------------------------------+ +| ":=" | Assignment expression | ++-------------------------------------------------+---------------------------------------+ + +-[ Footnotes ]- + +[1] While "abs(x%y) < abs(y)" is true mathematically, for floats it + may not be true numerically due to roundoff. For example, and + assuming a platform on which a Python float is an IEEE 754 double- + precision number, in order that "-1e-100 % 1e100" have the same + sign as "1e100", the computed result is "-1e-100 + 1e100", which + is numerically exactly equal to "1e100". The function + "math.fmod()" returns a result whose sign matches the sign of the + first argument instead, and so returns "-1e-100" in this case. + Which approach is more appropriate depends on the application. + +[2] If x is very close to an exact integer multiple of y, it’s + possible for "x//y" to be one larger than "(x-x%y)//y" due to + rounding. In such cases, Python returns the latter result, in + order to preserve that "divmod(x,y)[0] * y + x % y" be very close + to "x". + +[3] The Unicode standard distinguishes between *code points* (e.g. + U+0041) and *abstract characters* (e.g. “LATIN CAPITAL LETTER A”). + While most abstract characters in Unicode are only represented + using one code point, there is a number of abstract characters + that can in addition be represented using a sequence of more than + one code point. For example, the abstract character “LATIN + CAPITAL LETTER C WITH CEDILLA” can be represented as a single + *precomposed character* at code position U+00C7, or as a sequence + of a *base character* at code position U+0043 (LATIN CAPITAL + LETTER C), followed by a *combining character* at code position + U+0327 (COMBINING CEDILLA). + + The comparison operators on strings compare at the level of + Unicode code points. This may be counter-intuitive to humans. For + example, ""\u00C7" == "\u0043\u0327"" is "False", even though both + strings represent the same abstract character “LATIN CAPITAL + LETTER C WITH CEDILLA”. + + To compare strings at the level of abstract characters (that is, + in a way intuitive to humans), use "unicodedata.normalize()". + +[4] Due to automatic garbage-collection, free lists, and the dynamic + nature of descriptors, you may notice seemingly unusual behaviour + in certain uses of the "is" operator, like those involving + comparisons between instance methods, or constants. Check their + documentation for more info. + +[5] The power operator "**" binds less tightly than an arithmetic or + bitwise unary operator on its right, that is, "2**-1" is "0.5". + +[6] The "%" operator is also used for string formatting; the same + precedence applies. +''', + 'pass': r'''The "pass" statement +******************** + + pass_stmt: "pass" + +"pass" is a null operation — when it is executed, nothing happens. It +is useful as a placeholder when a statement is required syntactically, +but no code needs to be executed, for example: + + def f(arg): pass # a function that does nothing (yet) + + class C: pass # a class with no methods (yet) +''', + 'power': r'''The power operator +****************** + +The power operator binds more tightly than unary operators on its +left; it binds less tightly than unary operators on its right. The +syntax is: + + power: (await_expr | primary) ["**" u_expr] + +Thus, in an unparenthesized sequence of power and unary operators, the +operators are evaluated from right to left (this does not constrain +the evaluation order for the operands): "-1**2" results in "-1". + +The power operator has the same semantics as the built-in "pow()" +function, when called with two arguments: it yields its left argument +raised to the power of its right argument. Numeric arguments are first +converted to a common type, and the result is of that type. + +For int operands, the result has the same type as the operands unless +the second argument is negative; in that case, all arguments are +converted to float and a float result is delivered. For example, +"10**2" returns "100", but "10**-2" returns "0.01". + +Raising "0.0" to a negative power results in a "ZeroDivisionError". +Raising a negative number to a fractional power results in a "complex" +number. (In earlier versions it raised a "ValueError".) + +This operation can be customized using the special "__pow__()" and +"__rpow__()" methods. +''', + 'raise': r'''The "raise" statement +********************* + + raise_stmt: "raise" [expression ["from" expression]] + +If no expressions are present, "raise" re-raises the exception that is +currently being handled, which is also known as the *active +exception*. If there isn’t currently an active exception, a +"RuntimeError" exception is raised indicating that this is an error. + +Otherwise, "raise" evaluates the first expression as the exception +object. It must be either a subclass or an instance of +"BaseException". If it is a class, the exception instance will be +obtained when needed by instantiating the class with no arguments. + +The *type* of the exception is the exception instance’s class, the +*value* is the instance itself. + +A traceback object is normally created automatically when an exception +is raised and attached to it as the "__traceback__" attribute. You can +create an exception and set your own traceback in one step using the +"with_traceback()" exception method (which returns the same exception +instance, with its traceback set to its argument), like so: + + raise Exception("foo occurred").with_traceback(tracebackobj) + +The "from" clause is used for exception chaining: if given, the second +*expression* must be another exception class or instance. If the +second expression is an exception instance, it will be attached to the +raised exception as the "__cause__" attribute (which is writable). If +the expression is an exception class, the class will be instantiated +and the resulting exception instance will be attached to the raised +exception as the "__cause__" attribute. If the raised exception is not +handled, both exceptions will be printed: + + >>> try: + ... print(1 / 0) + ... except Exception as exc: + ... raise RuntimeError("Something bad happened") from exc + ... + Traceback (most recent call last): + File "", line 2, in + print(1 / 0) + ~~^~~ + ZeroDivisionError: division by zero + + The above exception was the direct cause of the following exception: + + Traceback (most recent call last): + File "", line 4, in + raise RuntimeError("Something bad happened") from exc + RuntimeError: Something bad happened + +A similar mechanism works implicitly if a new exception is raised when +an exception is already being handled. An exception may be handled +when an "except" or "finally" clause, or a "with" statement, is used. +The previous exception is then attached as the new exception’s +"__context__" attribute: + + >>> try: + ... print(1 / 0) + ... except: + ... raise RuntimeError("Something bad happened") + ... + Traceback (most recent call last): + File "", line 2, in + print(1 / 0) + ~~^~~ + ZeroDivisionError: division by zero + + During handling of the above exception, another exception occurred: + + Traceback (most recent call last): + File "", line 4, in + raise RuntimeError("Something bad happened") + RuntimeError: Something bad happened + +Exception chaining can be explicitly suppressed by specifying "None" +in the "from" clause: + + >>> try: + ... print(1 / 0) + ... except: + ... raise RuntimeError("Something bad happened") from None + ... + Traceback (most recent call last): + File "", line 4, in + RuntimeError: Something bad happened + +Additional information on exceptions can be found in section +Exceptions, and information about handling exceptions is in section +The try statement. + +Changed in version 3.3: "None" is now permitted as "Y" in "raise X +from Y".Added the "__suppress_context__" attribute to suppress +automatic display of the exception context. + +Changed in version 3.11: If the traceback of the active exception is +modified in an "except" clause, a subsequent "raise" statement re- +raises the exception with the modified traceback. Previously, the +exception was re-raised with the traceback it had when it was caught. +''', + 'return': r'''The "return" statement +********************** + + return_stmt: "return" [expression_list] + +"return" may only occur syntactically nested in a function definition, +not within a nested class definition. + +If an expression list is present, it is evaluated, else "None" is +substituted. + +"return" leaves the current function call with the expression list (or +"None") as return value. + +When "return" passes control out of a "try" statement with a "finally" +clause, that "finally" clause is executed before really leaving the +function. + +In a generator function, the "return" statement indicates that the +generator is done and will cause "StopIteration" to be raised. The +returned value (if any) is used as an argument to construct +"StopIteration" and becomes the "StopIteration.value" attribute. + +In an asynchronous generator function, an empty "return" statement +indicates that the asynchronous generator is done and will cause +"StopAsyncIteration" to be raised. A non-empty "return" statement is +a syntax error in an asynchronous generator function. +''', + 'sequence-types': r'''Emulating container types +************************* + +The following methods can be defined to implement container objects. +None of them are provided by the "object" class itself. Containers +usually are *sequences* (such as "lists" or "tuples") or *mappings* +(like *dictionaries*), but can represent other containers as well. +The first set of methods is used either to emulate a sequence or to +emulate a mapping; the difference is that for a sequence, the +allowable keys should be the integers *k* for which "0 <= k < N" where +*N* is the length of the sequence, or "slice" objects, which define a +range of items. It is also recommended that mappings provide the +methods "keys()", "values()", "items()", "get()", "clear()", +"setdefault()", "pop()", "popitem()", "copy()", and "update()" +behaving similar to those for Python’s standard "dictionary" objects. +The "collections.abc" module provides a "MutableMapping" *abstract +base class* to help create those methods from a base set of +"__getitem__()", "__setitem__()", "__delitem__()", and "keys()". + +Mutable sequences should provide methods "append()", "clear()", +"count()", "extend()", "index()", "insert()", "pop()", "remove()", and +"reverse()", like Python standard "list" objects. Finally, sequence +types should implement addition (meaning concatenation) and +multiplication (meaning repetition) by defining the methods +"__add__()", "__radd__()", "__iadd__()", "__mul__()", "__rmul__()" and +"__imul__()" described below; they should not define other numerical +operators. + +It is recommended that both mappings and sequences implement the +"__contains__()" method to allow efficient use of the "in" operator; +for mappings, "in" should search the mapping’s keys; for sequences, it +should search through the values. It is further recommended that both +mappings and sequences implement the "__iter__()" method to allow +efficient iteration through the container; for mappings, "__iter__()" +should iterate through the object’s keys; for sequences, it should +iterate through the values. + +object.__len__(self) + + Called to implement the built-in function "len()". Should return + the length of the object, an integer ">=" 0. Also, an object that + doesn’t define a "__bool__()" method and whose "__len__()" method + returns zero is considered to be false in a Boolean context. + + **CPython implementation detail:** In CPython, the length is + required to be at most "sys.maxsize". If the length is larger than + "sys.maxsize" some features (such as "len()") may raise + "OverflowError". To prevent raising "OverflowError" by truth value + testing, an object must define a "__bool__()" method. + +object.__length_hint__(self) + + Called to implement "operator.length_hint()". Should return an + estimated length for the object (which may be greater or less than + the actual length). The length must be an integer ">=" 0. The + return value may also be "NotImplemented", which is treated the + same as if the "__length_hint__" method didn’t exist at all. This + method is purely an optimization and is never required for + correctness. + + Added in version 3.4. + +object.__getitem__(self, subscript) + + Called to implement *subscription*, that is, "self[subscript]". See + Subscriptions and slicings for details on the syntax. + + There are two types of built-in objects that support subscription + via "__getitem__()": + + * **sequences**, where *subscript* (also called *index*) should be + an integer or a "slice" object. See the sequence documentation + for the expected behavior, including handling "slice" objects and + negative indices. + + * **mappings**, where *subscript* is also called the *key*. See + mapping documentation for the expected behavior. + + If *subscript* is of an inappropriate type, "__getitem__()" should + raise "TypeError". If *subscript* has an inappropriate value, + "__getitem__()" should raise an "LookupError" or one of its + subclasses ("IndexError" for sequences; "KeyError" for mappings). + + Note: + + Slicing is handled by "__getitem__()", "__setitem__()", and + "__delitem__()". A call like + + a[1:2] = b + + is translated to + + a[slice(1, 2, None)] = b + + and so forth. Missing slice items are always filled in with + "None". + + Note: + + The sequence iteration protocol (used, for example, in "for" + loops), expects that an "IndexError" will be raised for illegal + indexes to allow proper detection of the end of a sequence. + + Note: + + When subscripting a *class*, the special class method + "__class_getitem__()" may be called instead of "__getitem__()". + See __class_getitem__ versus __getitem__ for more details. + +object.__setitem__(self, key, value) + + Called to implement assignment to "self[key]". Same note as for + "__getitem__()". This should only be implemented for mappings if + the objects support changes to the values for keys, or if new keys + can be added, or for sequences if elements can be replaced. The + same exceptions should be raised for improper *key* values as for + the "__getitem__()" method. + +object.__delitem__(self, key) + + Called to implement deletion of "self[key]". Same note as for + "__getitem__()". This should only be implemented for mappings if + the objects support removal of keys, or for sequences if elements + can be removed from the sequence. The same exceptions should be + raised for improper *key* values as for the "__getitem__()" method. + +object.__missing__(self, key) + + Called by "dict"."__getitem__()" to implement "self[key]" for dict + subclasses when key is not in the dictionary. + +object.__iter__(self) + + This method is called when an *iterator* is required for a + container. This method should return a new iterator object that can + iterate over all the objects in the container. For mappings, it + should iterate over the keys of the container. + +object.__reversed__(self) + + Called (if present) by the "reversed()" built-in to implement + reverse iteration. It should return a new iterator object that + iterates over all the objects in the container in reverse order. + + If the "__reversed__()" method is not provided, the "reversed()" + built-in will fall back to using the sequence protocol ("__len__()" + and "__getitem__()"). Objects that support the sequence protocol + should only provide "__reversed__()" if they can provide an + implementation that is more efficient than the one provided by + "reversed()". + +The membership test operators ("in" and "not in") are normally +implemented as an iteration through a container. However, container +objects can supply the following special method with a more efficient +implementation, which also does not require the object be iterable. + +object.__contains__(self, item) + + Called to implement membership test operators. Should return true + if *item* is in *self*, false otherwise. For mapping objects, this + should consider the keys of the mapping rather than the values or + the key-item pairs. + + For objects that don’t define "__contains__()", the membership test + first tries iteration via "__iter__()", then the old sequence + iteration protocol via "__getitem__()", see this section in the + language reference. +''', + 'shifting': r'''Shifting operations +******************* + +The shifting operations have lower priority than the arithmetic +operations: + + shift_expr: a_expr | shift_expr ("<<" | ">>") a_expr + +These operators accept integers as arguments. They shift the first +argument to the left or right by the number of bits given by the +second argument. + +The left shift operation can be customized using the special +"__lshift__()" and "__rlshift__()" methods. The right shift operation +can be customized using the special "__rshift__()" and "__rrshift__()" +methods. + +A right shift by *n* bits is defined as floor division by "pow(2,n)". +A left shift by *n* bits is defined as multiplication with "pow(2,n)". +''', + 'slicings': r'''Slicings +******** + +A more advanced form of subscription, *slicing*, is commonly used to +extract a portion of a sequence. In this form, the subscript is a +*slice*: up to three expressions separated by colons. Any of the +expressions may be omitted, but a slice must contain at least one +colon: + + >>> number_names = ['zero', 'one', 'two', 'three', 'four', 'five'] + >>> number_names[1:3] + ['one', 'two'] + >>> number_names[1:] + ['one', 'two', 'three', 'four', 'five'] + >>> number_names[:3] + ['zero', 'one', 'two'] + >>> number_names[:] + ['zero', 'one', 'two', 'three', 'four', 'five'] + >>> number_names[::2] + ['zero', 'two', 'four'] + >>> number_names[:-3] + ['zero', 'one', 'two'] + >>> del number_names[4:] + >>> number_names + ['zero', 'one', 'two', 'three'] + +When a slice is evaluated, the interpreter constructs a "slice" object +whose "start", "stop" and "step" attributes, respectively, are the +results of the expressions between the colons. Any missing expression +evaluates to "None". This "slice" object is then passed to the +"__getitem__()" or "__class_getitem__()" *special method*, as above. + + # continuing with the SubscriptionDemo instance defined above: + >>> demo[2:3] + subscripted with: slice(2, 3, None) + >>> demo[::'spam'] + subscripted with: slice(None, None, 'spam') +''', + 'specialattrs': r'''Special Attributes +****************** + +The implementation adds a few special read-only attributes to several +object types, where they are relevant. Some of these are not reported +by the "dir()" built-in function. + +definition.__name__ + + The name of the class, function, method, descriptor, or generator + instance. + +definition.__qualname__ + + The *qualified name* of the class, function, method, descriptor, or + generator instance. + + Added in version 3.3. + +definition.__module__ + + The name of the module in which a class or function was defined. + +definition.__doc__ + + The documentation string of a class or function, or "None" if + undefined. + +definition.__type_params__ + + The type parameters of generic classes, functions, and type + aliases. For classes and functions that are not generic, this will + be an empty tuple. + + Added in version 3.12. +''', + 'specialnames': r'''Special method names +******************** + +A class can implement certain operations that are invoked by special +syntax (such as arithmetic operations or subscripting and slicing) by +defining methods with special names. This is Python’s approach to +*operator overloading*, allowing classes to define their own behavior +with respect to language operators. For instance, if a class defines +a method named "__getitem__()", and "x" is an instance of this class, +then "x[i]" is roughly equivalent to "type(x).__getitem__(x, i)". +Except where mentioned, attempts to execute an operation raise an +exception when no appropriate method is defined (typically +"AttributeError" or "TypeError"). + +Setting a special method to "None" indicates that the corresponding +operation is not available. For example, if a class sets "__iter__()" +to "None", the class is not iterable, so calling "iter()" on its +instances will raise a "TypeError" (without falling back to +"__getitem__()"). [2] + +When implementing a class that emulates any built-in type, it is +important that the emulation only be implemented to the degree that it +makes sense for the object being modelled. For example, some +sequences may work well with retrieval of individual elements, but +extracting a slice may not make sense. (One example of this is the +NodeList interface in the W3C’s Document Object Model.) + + +Basic customization +=================== + +object.__new__(cls[, ...]) + + Called to create a new instance of class *cls*. "__new__()" is a + static method (special-cased so you need not declare it as such) + that takes the class of which an instance was requested as its + first argument. The remaining arguments are those passed to the + object constructor expression (the call to the class). The return + value of "__new__()" should be the new object instance (usually an + instance of *cls*). + + Typical implementations create a new instance of the class by + invoking the superclass’s "__new__()" method using + "super().__new__(cls[, ...])" with appropriate arguments and then + modifying the newly created instance as necessary before returning + it. + + If "__new__()" is invoked during object construction and it returns + an instance of *cls*, then the new instance’s "__init__()" method + will be invoked like "__init__(self[, ...])", where *self* is the + new instance and the remaining arguments are the same as were + passed to the object constructor. + + If "__new__()" does not return an instance of *cls*, then the new + instance’s "__init__()" method will not be invoked. + + "__new__()" is intended mainly to allow subclasses of immutable + types (like int, str, or tuple) to customize instance creation. It + is also commonly overridden in custom metaclasses in order to + customize class creation. + +object.__init__(self[, ...]) + + Called after the instance has been created (by "__new__()"), but + before it is returned to the caller. The arguments are those + passed to the class constructor expression. If a base class has an + "__init__()" method, the derived class’s "__init__()" method, if + any, must explicitly call it to ensure proper initialization of the + base class part of the instance; for example: + "super().__init__([args...])". + + Because "__new__()" and "__init__()" work together in constructing + objects ("__new__()" to create it, and "__init__()" to customize + it), no non-"None" value may be returned by "__init__()"; doing so + will cause a "TypeError" to be raised at runtime. + +object.__del__(self) + + Called when the instance is about to be destroyed. This is also + called a finalizer or (improperly) a destructor. If a base class + has a "__del__()" method, the derived class’s "__del__()" method, + if any, must explicitly call it to ensure proper deletion of the + base class part of the instance. + + It is possible (though not recommended!) for the "__del__()" method + to postpone destruction of the instance by creating a new reference + to it. This is called object *resurrection*. It is + implementation-dependent whether "__del__()" is called a second + time when a resurrected object is about to be destroyed; the + current *CPython* implementation only calls it once. + + It is not guaranteed that "__del__()" methods are called for + objects that still exist when the interpreter exits. + "weakref.finalize" provides a straightforward way to register a + cleanup function to be called when an object is garbage collected. + + Note: + + "del x" doesn’t directly call "x.__del__()" — the former + decrements the reference count for "x" by one, and the latter is + only called when "x"’s reference count reaches zero. + + **CPython implementation detail:** It is possible for a reference + cycle to prevent the reference count of an object from going to + zero. In this case, the cycle will be later detected and deleted + by the *cyclic garbage collector*. A common cause of reference + cycles is when an exception has been caught in a local variable. + The frame’s locals then reference the exception, which references + its own traceback, which references the locals of all frames caught + in the traceback. + + See also: Documentation for the "gc" module. + + Warning: + + Due to the precarious circumstances under which "__del__()" + methods are invoked, exceptions that occur during their execution + are ignored, and a warning is printed to "sys.stderr" instead. + In particular: + + * "__del__()" can be invoked when arbitrary code is being + executed, including from any arbitrary thread. If "__del__()" + needs to take a lock or invoke any other blocking resource, it + may deadlock as the resource may already be taken by the code + that gets interrupted to execute "__del__()". + + * "__del__()" can be executed during interpreter shutdown. As a + consequence, the global variables it needs to access (including + other modules) may already have been deleted or set to "None". + Python guarantees that globals whose name begins with a single + underscore are deleted from their module before other globals + are deleted; if no other references to such globals exist, this + may help in assuring that imported modules are still available + at the time when the "__del__()" method is called. + +object.__repr__(self) + + Called by the "repr()" built-in function to compute the “official” + string representation of an object. If at all possible, this + should look like a valid Python expression that could be used to + recreate an object with the same value (given an appropriate + environment). If this is not possible, a string of the form + "<...some useful description...>" should be returned. The return + value must be a string object. If a class defines "__repr__()" but + not "__str__()", then "__repr__()" is also used when an “informal” + string representation of instances of that class is required. + + This is typically used for debugging, so it is important that the + representation is information-rich and unambiguous. A default + implementation is provided by the "object" class itself. + +object.__str__(self) + + Called by "str(object)", the default "__format__()" implementation, + and the built-in function "print()", to compute the “informal” or + nicely printable string representation of an object. The return + value must be a str object. + + This method differs from "object.__repr__()" in that there is no + expectation that "__str__()" return a valid Python expression: a + more convenient or concise representation can be used. + + The default implementation defined by the built-in type "object" + calls "object.__repr__()". + +object.__bytes__(self) + + Called by bytes to compute a byte-string representation of an + object. This should return a "bytes" object. The "object" class + itself does not provide this method. + +object.__format__(self, format_spec) + + Called by the "format()" built-in function, and by extension, + evaluation of formatted string literals and the "str.format()" + method, to produce a “formatted” string representation of an + object. The *format_spec* argument is a string that contains a + description of the formatting options desired. The interpretation + of the *format_spec* argument is up to the type implementing + "__format__()", however most classes will either delegate + formatting to one of the built-in types, or use a similar + formatting option syntax. + + See Format specification mini-language for a description of the + standard formatting syntax. + + The return value must be a string object. + + The default implementation by the "object" class should be given an + empty *format_spec* string. It delegates to "__str__()". + + Changed in version 3.4: The __format__ method of "object" itself + raises a "TypeError" if passed any non-empty string. + + Changed in version 3.7: "object.__format__(x, '')" is now + equivalent to "str(x)" rather than "format(str(x), '')". + +object.__lt__(self, other) +object.__le__(self, other) +object.__eq__(self, other) +object.__ne__(self, other) +object.__gt__(self, other) +object.__ge__(self, other) + + These are the so-called “rich comparison” methods. The + correspondence between operator symbols and method names is as + follows: "xy" calls + "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)". + + A rich comparison method may return the singleton "NotImplemented" + if it does not implement the operation for a given pair of + arguments. By convention, "False" and "True" are returned for a + successful comparison. However, these methods can return any value, + so if the comparison operator is used in a Boolean context (e.g., + in the condition of an "if" statement), Python will call "bool()" + on the value to determine if the result is true or false. + + By default, "object" implements "__eq__()" by using "is", returning + "NotImplemented" in the case of a false comparison: "True if x is y + else NotImplemented". For "__ne__()", by default it delegates to + "__eq__()" and inverts the result unless it is "NotImplemented". + There are no other implied relationships among the comparison + operators or default implementations; for example, the truth of + "(x.__hash__". + + If a class that does not override "__eq__()" wishes to suppress + hash support, it should include "__hash__ = None" in the class + definition. A class which defines its own "__hash__()" that + explicitly raises a "TypeError" would be incorrectly identified as + hashable by an "isinstance(obj, collections.abc.Hashable)" call. + + Note: + + By default, the "__hash__()" values of str and bytes objects are + “salted” with an unpredictable random value. Although they + remain constant within an individual Python process, they are not + predictable between repeated invocations of Python.This is + intended to provide protection against a denial-of-service caused + by carefully chosen inputs that exploit the worst case + performance of a dict insertion, *O*(*n*^2) complexity. See + https://ocert.org/advisories/ocert-2011-003.html for + details.Changing hash values affects the iteration order of sets. + Python has never made guarantees about this ordering (and it + typically varies between 32-bit and 64-bit builds).See also + "PYTHONHASHSEED". + + Changed in version 3.3: Hash randomization is enabled by default. + +object.__bool__(self) + + Called to implement truth value testing and the built-in operation + "bool()"; should return "False" or "True". When this method is not + defined, "__len__()" is called, if it is defined, and the object is + considered true if its result is nonzero. If a class defines + neither "__len__()" nor "__bool__()" (which is true of the "object" + class itself), all its instances are considered true. + + +Customizing attribute access +============================ + +The following methods can be defined to customize the meaning of +attribute access (use of, assignment to, or deletion of "x.name") for +class instances. + +object.__getattr__(self, name) + + Called when the default attribute access fails with an + "AttributeError" (either "__getattribute__()" raises an + "AttributeError" because *name* is not an instance attribute or an + attribute in the class tree for "self"; or "__get__()" of a *name* + property raises "AttributeError"). This method should either + return the (computed) attribute value or raise an "AttributeError" + exception. The "object" class itself does not provide this method. + + Note that if the attribute is found through the normal mechanism, + "__getattr__()" is not called. (This is an intentional asymmetry + between "__getattr__()" and "__setattr__()".) This is done both for + efficiency reasons and because otherwise "__getattr__()" would have + no way to access other attributes of the instance. Note that at + least for instance variables, you can take total control by not + inserting any values in the instance attribute dictionary (but + instead inserting them in another object). See the + "__getattribute__()" method below for a way to actually get total + control over attribute access. + +object.__getattribute__(self, name) + + Called unconditionally to implement attribute accesses for + instances of the class. If the class also defines "__getattr__()", + the latter will not be called unless "__getattribute__()" either + calls it explicitly or raises an "AttributeError". This method + should return the (computed) attribute value or raise an + "AttributeError" exception. In order to avoid infinite recursion in + this method, its implementation should always call the base class + method with the same name to access any attributes it needs, for + example, "object.__getattribute__(self, name)". + + Note: + + This method may still be bypassed when looking up special methods + as the result of implicit invocation via language syntax or + built-in functions. See Special method lookup. + + For certain sensitive attribute accesses, raises an auditing event + "object.__getattr__" with arguments "obj" and "name". + +object.__setattr__(self, name, value) + + Called when an attribute assignment is attempted. This is called + instead of the normal mechanism (i.e. store the value in the + instance dictionary). *name* is the attribute name, *value* is the + value to be assigned to it. + + If "__setattr__()" wants to assign to an instance attribute, it + should call the base class method with the same name, for example, + "object.__setattr__(self, name, value)". + + For certain sensitive attribute assignments, raises an auditing + event "object.__setattr__" with arguments "obj", "name", "value". + +object.__delattr__(self, name) + + Like "__setattr__()" but for attribute deletion instead of + assignment. This should only be implemented if "del obj.name" is + meaningful for the object. + + For certain sensitive attribute deletions, raises an auditing event + "object.__delattr__" with arguments "obj" and "name". + +object.__dir__(self) + + Called when "dir()" is called on the object. An iterable must be + returned. "dir()" converts the returned iterable to a list and + sorts it. + + +Customizing module attribute access +----------------------------------- + +module.__getattr__() +module.__dir__() + +Special names "__getattr__" and "__dir__" can be also used to +customize access to module attributes. The "__getattr__" function at +the module level should accept one argument which is the name of an +attribute and return the computed value or raise an "AttributeError". +If an attribute is not found on a module object through the normal +lookup, i.e. "object.__getattribute__()", then "__getattr__" is +searched in the module "__dict__" before raising an "AttributeError". +If found, it is called with the attribute name and the result is +returned. + +The "__dir__" function should accept no arguments, and return an +iterable of strings that represents the names accessible on module. If +present, this function overrides the standard "dir()" search on a +module. + +module.__class__ + +For a more fine grained customization of the module behavior (setting +attributes, properties, etc.), one can set the "__class__" attribute +of a module object to a subclass of "types.ModuleType". For example: + + import sys + from types import ModuleType + + class VerboseModule(ModuleType): + def __repr__(self): + return f'Verbose {self.__name__}' + + def __setattr__(self, attr, value): + print(f'Setting {attr}...') + super().__setattr__(attr, value) + + sys.modules[__name__].__class__ = VerboseModule + +Note: + + Defining module "__getattr__" and setting module "__class__" only + affect lookups made using the attribute access syntax – directly + accessing the module globals (whether by code within the module, or + via a reference to the module’s globals dictionary) is unaffected. + +Changed in version 3.5: "__class__" module attribute is now writable. + +Added in version 3.7: "__getattr__" and "__dir__" module attributes. + +See also: + + **PEP 562** - Module __getattr__ and __dir__ + Describes the "__getattr__" and "__dir__" functions on modules. + + +Implementing Descriptors +------------------------ + +The following methods only apply when an instance of the class +containing the method (a so-called *descriptor* class) appears in an +*owner* class (the descriptor must be in either the owner’s class +dictionary or in the class dictionary for one of its parents). In the +examples below, “the attribute” refers to the attribute whose name is +the key of the property in the owner class’ "__dict__". The "object" +class itself does not implement any of these protocols. + +object.__get__(self, instance, owner=None) + + Called to get the attribute of the owner class (class attribute + access) or of an instance of that class (instance attribute + access). The optional *owner* argument is the owner class, while + *instance* is the instance that the attribute was accessed through, + or "None" when the attribute is accessed through the *owner*. + + This method should return the computed attribute value or raise an + "AttributeError" exception. + + **PEP 252** specifies that "__get__()" is callable with one or two + arguments. Python’s own built-in descriptors support this + specification; however, it is likely that some third-party tools + have descriptors that require both arguments. Python’s own + "__getattribute__()" implementation always passes in both arguments + whether they are required or not. + +object.__set__(self, instance, value) + + Called to set the attribute on an instance *instance* of the owner + class to a new value, *value*. + + Note, adding "__set__()" or "__delete__()" changes the kind of + descriptor to a “data descriptor”. See Invoking Descriptors for + more details. + +object.__delete__(self, instance) + + Called to delete the attribute on an instance *instance* of the + owner class. + +Instances of descriptors may also have the "__objclass__" attribute +present: + +object.__objclass__ + + The attribute "__objclass__" is interpreted by the "inspect" module + as specifying the class where this object was defined (setting this + appropriately can assist in runtime introspection of dynamic class + attributes). For callables, it may indicate that an instance of the + given type (or a subclass) is expected or required as the first + positional argument (for example, CPython sets this attribute for + unbound methods that are implemented in C). + + +Invoking Descriptors +-------------------- + +In general, a descriptor is an object attribute with “binding +behavior”, one whose attribute access has been overridden by methods +in the descriptor protocol: "__get__()", "__set__()", and +"__delete__()". If any of those methods are defined for an object, it +is said to be a descriptor. + +The default behavior for attribute access is to get, set, or delete +the attribute from an object’s dictionary. For instance, "a.x" has a +lookup chain starting with "a.__dict__['x']", then +"type(a).__dict__['x']", and continuing through the base classes of +"type(a)" excluding metaclasses. + +However, if the looked-up value is an object defining one of the +descriptor methods, then Python may override the default behavior and +invoke the descriptor method instead. Where this occurs in the +precedence chain depends on which descriptor methods were defined and +how they were called. + +The starting point for descriptor invocation is a binding, "a.x". How +the arguments are assembled depends on "a": + +Direct Call + The simplest and least common call is when user code directly + invokes a descriptor method: "x.__get__(a)". + +Instance Binding + If binding to an object instance, "a.x" is transformed into the + call: "type(a).__dict__['x'].__get__(a, type(a))". + +Class Binding + If binding to a class, "A.x" is transformed into the call: + "A.__dict__['x'].__get__(None, A)". + +Super Binding + A dotted lookup such as "super(A, a).x" searches + "a.__class__.__mro__" for a base class "B" following "A" and then + returns "B.__dict__['x'].__get__(a, A)". If not a descriptor, "x" + is returned unchanged. + +For instance bindings, the precedence of descriptor invocation depends +on which descriptor methods are defined. A descriptor can define any +combination of "__get__()", "__set__()" and "__delete__()". If it +does not define "__get__()", then accessing the attribute will return +the descriptor object itself unless there is a value in the object’s +instance dictionary. If the descriptor defines "__set__()" and/or +"__delete__()", it is a data descriptor; if it defines neither, it is +a non-data descriptor. Normally, data descriptors define both +"__get__()" and "__set__()", while non-data descriptors have just the +"__get__()" method. Data descriptors with "__get__()" and "__set__()" +(and/or "__delete__()") defined always override a redefinition in an +instance dictionary. In contrast, non-data descriptors can be +overridden by instances. + +Python methods (including those decorated with "@staticmethod" and +"@classmethod") are implemented as non-data descriptors. Accordingly, +instances can redefine and override methods. This allows individual +instances to acquire behaviors that differ from other instances of the +same class. + +The "property()" function is implemented as a data descriptor. +Accordingly, instances cannot override the behavior of a property. + + +__slots__ +--------- + +*__slots__* allow us to explicitly declare data members (like +properties) and deny the creation of "__dict__" and *__weakref__* +(unless explicitly declared in *__slots__* or available in a parent.) + +The space saved over using "__dict__" can be significant. Attribute +lookup speed can be significantly improved as well. + +object.__slots__ + + This class variable can be assigned a string, iterable, or sequence + of strings with variable names used by instances. *__slots__* + reserves space for the declared variables and prevents the + automatic creation of "__dict__" and *__weakref__* for each + instance. + +Notes on using *__slots__*: + +* When inheriting from a class without *__slots__*, the "__dict__" and + *__weakref__* attribute of the instances will always be accessible. + +* Without a "__dict__" variable, instances cannot be assigned new + variables not listed in the *__slots__* definition. Attempts to + assign to an unlisted variable name raises "AttributeError". If + dynamic assignment of new variables is desired, then add + "'__dict__'" to the sequence of strings in the *__slots__* + declaration. + +* Without a *__weakref__* variable for each instance, classes defining + *__slots__* do not support "weak references" to its instances. If + weak reference support is needed, then add "'__weakref__'" to the + sequence of strings in the *__slots__* declaration. + +* *__slots__* are implemented at the class level by creating + descriptors for each variable name. As a result, class attributes + cannot be used to set default values for instance variables defined + by *__slots__*; otherwise, the class attribute would overwrite the + descriptor assignment. + +* The action of a *__slots__* declaration is not limited to the class + where it is defined. *__slots__* declared in parents are available + in child classes. However, instances of a child subclass will get a + "__dict__" and *__weakref__* unless the subclass also defines + *__slots__* (which should only contain names of any *additional* + slots). + +* If a class defines a slot also defined in a base class, the instance + variable defined by the base class slot is inaccessible (except by + retrieving its descriptor directly from the base class). This + renders the meaning of the program undefined. In the future, a + check may be added to prevent this. + +* "TypeError" will be raised if nonempty *__slots__* are defined for a + class derived from a ""variable-length" built-in type" such as + "int", "bytes", and "tuple". + +* Any non-string *iterable* may be assigned to *__slots__*. + +* If a "dictionary" is used to assign *__slots__*, the dictionary keys + will be used as the slot names. The values of the dictionary can be + used to provide per-attribute docstrings that will be recognised by + "inspect.getdoc()" and displayed in the output of "help()". + +* "__class__" assignment works only if both classes have the same + *__slots__*. + +* Multiple inheritance with multiple slotted parent classes can be + used, but only one parent is allowed to have attributes created by + slots (the other bases must have empty slot layouts) - violations + raise "TypeError". + +* If an *iterator* is used for *__slots__* then a *descriptor* is + created for each of the iterator’s values. However, the *__slots__* + attribute will be an empty iterator. + + +Customizing class creation +========================== + +Whenever a class inherits from another class, "__init_subclass__()" is +called on the parent class. This way, it is possible to write classes +which change the behavior of subclasses. This is closely related to +class decorators, but where class decorators only affect the specific +class they’re applied to, "__init_subclass__" solely applies to future +subclasses of the class defining the method. + +classmethod object.__init_subclass__(cls) + + This method is called whenever the containing class is subclassed. + *cls* is then the new subclass. If defined as a normal instance + method, this method is implicitly converted to a class method. + + Keyword arguments which are given to a new class are passed to the + parent class’s "__init_subclass__". For compatibility with other + classes using "__init_subclass__", one should take out the needed + keyword arguments and pass the others over to the base class, as + in: + + class Philosopher: + def __init_subclass__(cls, /, default_name, **kwargs): + super().__init_subclass__(**kwargs) + cls.default_name = default_name + + class AustralianPhilosopher(Philosopher, default_name="Bruce"): + pass + + The default implementation "object.__init_subclass__" does nothing, + but raises an error if it is called with any arguments. + + Note: + + The metaclass hint "metaclass" is consumed by the rest of the + type machinery, and is never passed to "__init_subclass__" + implementations. The actual metaclass (rather than the explicit + hint) can be accessed as "type(cls)". + + Added in version 3.6. + +When a class is created, "type.__new__()" scans the class variables +and makes callbacks to those with a "__set_name__()" hook. + +object.__set_name__(self, owner, name) + + Automatically called at the time the owning class *owner* is + created. The object has been assigned to *name* in that class: + + class A: + x = C() # Automatically calls: x.__set_name__(A, 'x') + + If the class variable is assigned after the class is created, + "__set_name__()" will not be called automatically. If needed, + "__set_name__()" can be called directly: + + class A: + pass + + c = C() + A.x = c # The hook is not called + c.__set_name__(A, 'x') # Manually invoke the hook + + See Creating the class object for more details. + + Added in version 3.6. + + +Metaclasses +----------- + +By default, classes are constructed using "type()". The class body is +executed in a new namespace and the class name is bound locally to the +result of "type(name, bases, namespace)". + +The class creation process can be customized by passing the +"metaclass" keyword argument in the class definition line, or by +inheriting from an existing class that included such an argument. In +the following example, both "MyClass" and "MySubclass" are instances +of "Meta": + + class Meta(type): + pass + + class MyClass(metaclass=Meta): + pass + + class MySubclass(MyClass): + pass + +Any other keyword arguments that are specified in the class definition +are passed through to all metaclass operations described below. + +When a class definition is executed, the following steps occur: + +* MRO entries are resolved; + +* the appropriate metaclass is determined; + +* the class namespace is prepared; + +* the class body is executed; + +* the class object is created. + + +Resolving MRO entries +--------------------- + +object.__mro_entries__(self, bases) + + If a base that appears in a class definition is not an instance of + "type", then an "__mro_entries__()" method is searched on the base. + If an "__mro_entries__()" method is found, the base is substituted + with the result of a call to "__mro_entries__()" when creating the + class. The method is called with the original bases tuple passed to + the *bases* parameter, and must return a tuple of classes that will + be used instead of the base. The returned tuple may be empty: in + these cases, the original base is ignored. + +See also: + + "types.resolve_bases()" + Dynamically resolve bases that are not instances of "type". + + "types.get_original_bases()" + Retrieve a class’s “original bases” prior to modifications by + "__mro_entries__()". + + **PEP 560** + Core support for typing module and generic types. + + +Determining the appropriate metaclass +------------------------------------- + +The appropriate metaclass for a class definition is determined as +follows: + +* if no bases and no explicit metaclass are given, then "type()" is + used; + +* if an explicit metaclass is given and it is *not* an instance of + "type()", then it is used directly as the metaclass; + +* if an instance of "type()" is given as the explicit metaclass, or + bases are defined, then the most derived metaclass is used. + +The most derived metaclass is selected from the explicitly specified +metaclass (if any) and the metaclasses (i.e. "type(cls)") of all +specified base classes. The most derived metaclass is one which is a +subtype of *all* of these candidate metaclasses. If none of the +candidate metaclasses meets that criterion, then the class definition +will fail with "TypeError". + + +Preparing the class namespace +----------------------------- + +Once the appropriate metaclass has been identified, then the class +namespace is prepared. If the metaclass has a "__prepare__" attribute, +it is called as "namespace = metaclass.__prepare__(name, bases, +**kwds)" (where the additional keyword arguments, if any, come from +the class definition). The "__prepare__" method should be implemented +as a "classmethod". The namespace returned by "__prepare__" is passed +in to "__new__", but when the final class object is created the +namespace is copied into a new "dict". + +If the metaclass has no "__prepare__" attribute, then the class +namespace is initialised as an empty ordered mapping. + +See also: + + **PEP 3115** - Metaclasses in Python 3000 + Introduced the "__prepare__" namespace hook + + +Executing the class body +------------------------ + +The class body is executed (approximately) as "exec(body, globals(), +namespace)". The key difference from a normal call to "exec()" is that +lexical scoping allows the class body (including any methods) to +reference names from the current and outer scopes when the class +definition occurs inside a function. + +However, even when the class definition occurs inside the function, +methods defined inside the class still cannot see names defined at the +class scope. Class variables must be accessed through the first +parameter of instance or class methods, or through the implicit +lexically scoped "__class__" reference described in the next section. + + +Creating the class object +------------------------- + +Once the class namespace has been populated by executing the class +body, the class object is created by calling "metaclass(name, bases, +namespace, **kwds)" (the additional keywords passed here are the same +as those passed to "__prepare__"). + +This class object is the one that will be referenced by the zero- +argument form of "super()". "__class__" is an implicit closure +reference created by the compiler if any methods in a class body refer +to either "__class__" or "super". This allows the zero argument form +of "super()" to correctly identify the class being defined based on +lexical scoping, while the class or instance that was used to make the +current call is identified based on the first argument passed to the +method. + +**CPython implementation detail:** In CPython 3.6 and later, the +"__class__" cell is passed to the metaclass as a "__classcell__" entry +in the class namespace. If present, this must be propagated up to the +"type.__new__" call in order for the class to be initialised +correctly. Failing to do so will result in a "RuntimeError" in Python +3.8. + +When using the default metaclass "type", or any metaclass that +ultimately calls "type.__new__", the following additional +customization steps are invoked after creating the class object: + +1. The "type.__new__" method collects all of the attributes in the + class namespace that define a "__set_name__()" method; + +2. Those "__set_name__" methods are called with the class being + defined and the assigned name of that particular attribute; + +3. The "__init_subclass__()" hook is called on the immediate parent of + the new class in its method resolution order. + +After the class object is created, it is passed to the class +decorators included in the class definition (if any) and the resulting +object is bound in the local namespace as the defined class. + +When a new class is created by "type.__new__", the object provided as +the namespace parameter is copied to a new ordered mapping and the +original object is discarded. The new copy is wrapped in a read-only +proxy, which becomes the "__dict__" attribute of the class object. + +See also: + + **PEP 3135** - New super + Describes the implicit "__class__" closure reference + + +Uses for metaclasses +-------------------- + +The potential uses for metaclasses are boundless. Some ideas that have +been explored include enum, logging, interface checking, automatic +delegation, automatic property creation, proxies, frameworks, and +automatic resource locking/synchronization. + + +Customizing instance and subclass checks +======================================== + +The following methods are used to override the default behavior of the +"isinstance()" and "issubclass()" built-in functions. + +In particular, the metaclass "abc.ABCMeta" implements these methods in +order to allow the addition of Abstract Base Classes (ABCs) as +“virtual base classes” to any class or type (including built-in +types), including other ABCs. + +type.__instancecheck__(self, instance) + + Return true if *instance* should be considered a (direct or + indirect) instance of *class*. If defined, called to implement + "isinstance(instance, class)". + +type.__subclasscheck__(self, subclass) + + Return true if *subclass* should be considered a (direct or + indirect) subclass of *class*. If defined, called to implement + "issubclass(subclass, class)". + +Note that these methods are looked up on the type (metaclass) of a +class. They cannot be defined as class methods in the actual class. +This is consistent with the lookup of special methods that are called +on instances, only in this case the instance is itself a class. + +See also: + + **PEP 3119** - Introducing Abstract Base Classes + Includes the specification for customizing "isinstance()" and + "issubclass()" behavior through "__instancecheck__()" and + "__subclasscheck__()", with motivation for this functionality in + the context of adding Abstract Base Classes (see the "abc" + module) to the language. + + +Emulating generic types +======================= + +When using *type annotations*, it is often useful to *parameterize* a +*generic type* using Python’s square-brackets notation. For example, +the annotation "list[int]" might be used to signify a "list" in which +all the elements are of type "int". + +See also: + + **PEP 484** - Type Hints + Introducing Python’s framework for type annotations + + Generic Alias Types + Documentation for objects representing parameterized generic + classes + + Generics, user-defined generics and "typing.Generic" + Documentation on how to implement generic classes that can be + parameterized at runtime and understood by static type-checkers. + +A class can *generally* only be parameterized if it defines the +special class method "__class_getitem__()". + +classmethod object.__class_getitem__(cls, key) + + Return an object representing the specialization of a generic class + by type arguments found in *key*. + + When defined on a class, "__class_getitem__()" is automatically a + class method. As such, there is no need for it to be decorated with + "@classmethod" when it is defined. + + +The purpose of *__class_getitem__* +---------------------------------- + +The purpose of "__class_getitem__()" is to allow runtime +parameterization of standard-library generic classes in order to more +easily apply *type hints* to these classes. + +To implement custom generic classes that can be parameterized at +runtime and understood by static type-checkers, users should either +inherit from a standard library class that already implements +"__class_getitem__()", or inherit from "typing.Generic", which has its +own implementation of "__class_getitem__()". + +Custom implementations of "__class_getitem__()" on classes defined +outside of the standard library may not be understood by third-party +type-checkers such as mypy. Using "__class_getitem__()" on any class +for purposes other than type hinting is discouraged. + + +*__class_getitem__* versus *__getitem__* +---------------------------------------- + +Usually, the subscription of an object using square brackets will call +the "__getitem__()" instance method defined on the object’s class. +However, if the object being subscribed is itself a class, the class +method "__class_getitem__()" may be called instead. +"__class_getitem__()" should return a GenericAlias object if it is +properly defined. + +Presented with the *expression* "obj[x]", the Python interpreter +follows something like the following process to decide whether +"__getitem__()" or "__class_getitem__()" should be called: + + from inspect import isclass + + def subscribe(obj, x): + """Return the result of the expression 'obj[x]'""" + + class_of_obj = type(obj) + + # If the class of obj defines __getitem__, + # call class_of_obj.__getitem__(obj, x) + if hasattr(class_of_obj, '__getitem__'): + return class_of_obj.__getitem__(obj, x) + + # Else, if obj is a class and defines __class_getitem__, + # call obj.__class_getitem__(x) + elif isclass(obj) and hasattr(obj, '__class_getitem__'): + return obj.__class_getitem__(x) + + # Else, raise an exception + else: + raise TypeError( + f"'{class_of_obj.__name__}' object is not subscriptable" + ) + +In Python, all classes are themselves instances of other classes. The +class of a class is known as that class’s *metaclass*, and most +classes have the "type" class as their metaclass. "type" does not +define "__getitem__()", meaning that expressions such as "list[int]", +"dict[str, float]" and "tuple[str, bytes]" all result in +"__class_getitem__()" being called: + + >>> # list has class "type" as its metaclass, like most classes: + >>> type(list) + + >>> type(dict) == type(list) == type(tuple) == type(str) == type(bytes) + True + >>> # "list[int]" calls "list.__class_getitem__(int)" + >>> list[int] + list[int] + >>> # list.__class_getitem__ returns a GenericAlias object: + >>> type(list[int]) + + +However, if a class has a custom metaclass that defines +"__getitem__()", subscribing the class may result in different +behaviour. An example of this can be found in the "enum" module: + + >>> from enum import Enum + >>> class Menu(Enum): + ... """A breakfast menu""" + ... SPAM = 'spam' + ... BACON = 'bacon' + ... + >>> # Enum classes have a custom metaclass: + >>> type(Menu) + + >>> # EnumMeta defines __getitem__, + >>> # so __class_getitem__ is not called, + >>> # and the result is not a GenericAlias object: + >>> Menu['SPAM'] + + >>> type(Menu['SPAM']) + + +See also: + + **PEP 560** - Core Support for typing module and generic types + Introducing "__class_getitem__()", and outlining when a + subscription results in "__class_getitem__()" being called + instead of "__getitem__()" + + +Emulating callable objects +========================== + +object.__call__(self[, args...]) + + Called when the instance is “called” as a function; if this method + is defined, "x(arg1, arg2, ...)" roughly translates to + "type(x).__call__(x, arg1, ...)". The "object" class itself does + not provide this method. + + +Emulating container types +========================= + +The following methods can be defined to implement container objects. +None of them are provided by the "object" class itself. Containers +usually are *sequences* (such as "lists" or "tuples") or *mappings* +(like *dictionaries*), but can represent other containers as well. +The first set of methods is used either to emulate a sequence or to +emulate a mapping; the difference is that for a sequence, the +allowable keys should be the integers *k* for which "0 <= k < N" where +*N* is the length of the sequence, or "slice" objects, which define a +range of items. It is also recommended that mappings provide the +methods "keys()", "values()", "items()", "get()", "clear()", +"setdefault()", "pop()", "popitem()", "copy()", and "update()" +behaving similar to those for Python’s standard "dictionary" objects. +The "collections.abc" module provides a "MutableMapping" *abstract +base class* to help create those methods from a base set of +"__getitem__()", "__setitem__()", "__delitem__()", and "keys()". + +Mutable sequences should provide methods "append()", "clear()", +"count()", "extend()", "index()", "insert()", "pop()", "remove()", and +"reverse()", like Python standard "list" objects. Finally, sequence +types should implement addition (meaning concatenation) and +multiplication (meaning repetition) by defining the methods +"__add__()", "__radd__()", "__iadd__()", "__mul__()", "__rmul__()" and +"__imul__()" described below; they should not define other numerical +operators. + +It is recommended that both mappings and sequences implement the +"__contains__()" method to allow efficient use of the "in" operator; +for mappings, "in" should search the mapping’s keys; for sequences, it +should search through the values. It is further recommended that both +mappings and sequences implement the "__iter__()" method to allow +efficient iteration through the container; for mappings, "__iter__()" +should iterate through the object’s keys; for sequences, it should +iterate through the values. + +object.__len__(self) + + Called to implement the built-in function "len()". Should return + the length of the object, an integer ">=" 0. Also, an object that + doesn’t define a "__bool__()" method and whose "__len__()" method + returns zero is considered to be false in a Boolean context. + + **CPython implementation detail:** In CPython, the length is + required to be at most "sys.maxsize". If the length is larger than + "sys.maxsize" some features (such as "len()") may raise + "OverflowError". To prevent raising "OverflowError" by truth value + testing, an object must define a "__bool__()" method. + +object.__length_hint__(self) + + Called to implement "operator.length_hint()". Should return an + estimated length for the object (which may be greater or less than + the actual length). The length must be an integer ">=" 0. The + return value may also be "NotImplemented", which is treated the + same as if the "__length_hint__" method didn’t exist at all. This + method is purely an optimization and is never required for + correctness. + + Added in version 3.4. + +object.__getitem__(self, subscript) + + Called to implement *subscription*, that is, "self[subscript]". See + Subscriptions and slicings for details on the syntax. + + There are two types of built-in objects that support subscription + via "__getitem__()": + + * **sequences**, where *subscript* (also called *index*) should be + an integer or a "slice" object. See the sequence documentation + for the expected behavior, including handling "slice" objects and + negative indices. + + * **mappings**, where *subscript* is also called the *key*. See + mapping documentation for the expected behavior. + + If *subscript* is of an inappropriate type, "__getitem__()" should + raise "TypeError". If *subscript* has an inappropriate value, + "__getitem__()" should raise an "LookupError" or one of its + subclasses ("IndexError" for sequences; "KeyError" for mappings). + + Note: + + Slicing is handled by "__getitem__()", "__setitem__()", and + "__delitem__()". A call like + + a[1:2] = b + + is translated to + + a[slice(1, 2, None)] = b + + and so forth. Missing slice items are always filled in with + "None". + + Note: + + The sequence iteration protocol (used, for example, in "for" + loops), expects that an "IndexError" will be raised for illegal + indexes to allow proper detection of the end of a sequence. + + Note: + + When subscripting a *class*, the special class method + "__class_getitem__()" may be called instead of "__getitem__()". + See __class_getitem__ versus __getitem__ for more details. + +object.__setitem__(self, key, value) + + Called to implement assignment to "self[key]". Same note as for + "__getitem__()". This should only be implemented for mappings if + the objects support changes to the values for keys, or if new keys + can be added, or for sequences if elements can be replaced. The + same exceptions should be raised for improper *key* values as for + the "__getitem__()" method. + +object.__delitem__(self, key) + + Called to implement deletion of "self[key]". Same note as for + "__getitem__()". This should only be implemented for mappings if + the objects support removal of keys, or for sequences if elements + can be removed from the sequence. The same exceptions should be + raised for improper *key* values as for the "__getitem__()" method. + +object.__missing__(self, key) + + Called by "dict"."__getitem__()" to implement "self[key]" for dict + subclasses when key is not in the dictionary. + +object.__iter__(self) + + This method is called when an *iterator* is required for a + container. This method should return a new iterator object that can + iterate over all the objects in the container. For mappings, it + should iterate over the keys of the container. + +object.__reversed__(self) + + Called (if present) by the "reversed()" built-in to implement + reverse iteration. It should return a new iterator object that + iterates over all the objects in the container in reverse order. + + If the "__reversed__()" method is not provided, the "reversed()" + built-in will fall back to using the sequence protocol ("__len__()" + and "__getitem__()"). Objects that support the sequence protocol + should only provide "__reversed__()" if they can provide an + implementation that is more efficient than the one provided by + "reversed()". + +The membership test operators ("in" and "not in") are normally +implemented as an iteration through a container. However, container +objects can supply the following special method with a more efficient +implementation, which also does not require the object be iterable. + +object.__contains__(self, item) + + Called to implement membership test operators. Should return true + if *item* is in *self*, false otherwise. For mapping objects, this + should consider the keys of the mapping rather than the values or + the key-item pairs. + + For objects that don’t define "__contains__()", the membership test + first tries iteration via "__iter__()", then the old sequence + iteration protocol via "__getitem__()", see this section in the + language reference. + + +Emulating numeric types +======================= + +The following methods can be defined to emulate numeric objects. +Methods corresponding to operations that are not supported by the +particular kind of number implemented (e.g., bitwise operations for +non-integral numbers) should be left undefined. + +object.__add__(self, other) +object.__sub__(self, other) +object.__mul__(self, other) +object.__matmul__(self, other) +object.__truediv__(self, other) +object.__floordiv__(self, other) +object.__mod__(self, other) +object.__divmod__(self, other) +object.__pow__(self, other[, modulo]) +object.__lshift__(self, other) +object.__rshift__(self, other) +object.__and__(self, other) +object.__xor__(self, other) +object.__or__(self, other) + + These methods are called to implement the binary arithmetic + operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", + "pow()", "**", "<<", ">>", "&", "^", "|"). For instance, to + evaluate the expression "x + y", where *x* is an instance of a + class that has an "__add__()" method, "type(x).__add__(x, y)" is + called. The "__divmod__()" method should be the equivalent to + using "__floordiv__()" and "__mod__()"; it should not be related to + "__truediv__()". Note that "__pow__()" should be defined to accept + an optional third argument if the three-argument version of the + built-in "pow()" function is to be supported. + + If one of those methods does not support the operation with the + supplied arguments, it should return "NotImplemented". + +object.__radd__(self, other) +object.__rsub__(self, other) +object.__rmul__(self, other) +object.__rmatmul__(self, other) +object.__rtruediv__(self, other) +object.__rfloordiv__(self, other) +object.__rmod__(self, other) +object.__rdivmod__(self, other) +object.__rpow__(self, other[, modulo]) +object.__rlshift__(self, other) +object.__rrshift__(self, other) +object.__rand__(self, other) +object.__rxor__(self, other) +object.__ror__(self, other) + + These methods are called to implement the binary arithmetic + operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", + "pow()", "**", "<<", ">>", "&", "^", "|") with reflected (swapped) + operands. These functions are only called if the operands are of + different types, when the left operand does not support the + corresponding operation [3], or the right operand’s class is + derived from the left operand’s class. [4] For instance, to + evaluate the expression "x - y", where *y* is an instance of a + class that has an "__rsub__()" method, "type(y).__rsub__(y, x)" is + called if "type(x).__sub__(x, y)" returns "NotImplemented" or + "type(y)" is a subclass of "type(x)". [5] + + Note that "__rpow__()" should be defined to accept an optional + third argument if the three-argument version of the built-in + "pow()" function is to be supported. + + Changed in version 3.14: Three-argument "pow()" now try calling + "__rpow__()" if necessary. Previously it was only called in two- + argument "pow()" and the binary power operator. + + Note: + + If the right operand’s type is a subclass of the left operand’s + type and that subclass provides a different implementation of the + reflected method for the operation, this method will be called + before the left operand’s non-reflected method. This behavior + allows subclasses to override their ancestors’ operations. + +object.__iadd__(self, other) +object.__isub__(self, other) +object.__imul__(self, other) +object.__imatmul__(self, other) +object.__itruediv__(self, other) +object.__ifloordiv__(self, other) +object.__imod__(self, other) +object.__ipow__(self, other[, modulo]) +object.__ilshift__(self, other) +object.__irshift__(self, other) +object.__iand__(self, other) +object.__ixor__(self, other) +object.__ior__(self, other) + + These methods are called to implement the augmented arithmetic + assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", "**=", + "<<=", ">>=", "&=", "^=", "|="). These methods should attempt to + do the operation in-place (modifying *self*) and return the result + (which could be, but does not have to be, *self*). If a specific + method is not defined, or if that method returns "NotImplemented", + the augmented assignment falls back to the normal methods. For + instance, if *x* is an instance of a class with an "__iadd__()" + method, "x += y" is equivalent to "x = x.__iadd__(y)" . If + "__iadd__()" does not exist, or if "x.__iadd__(y)" returns + "NotImplemented", "x.__add__(y)" and "y.__radd__(x)" are + considered, as with the evaluation of "x + y". In certain + situations, augmented assignment can result in unexpected errors + (see Why does a_tuple[i] += [‘item’] raise an exception when the + addition works?), but this behavior is in fact part of the data + model. + +object.__neg__(self) +object.__pos__(self) +object.__abs__(self) +object.__invert__(self) + + Called to implement the unary arithmetic operations ("-", "+", + "abs()" and "~"). + +object.__complex__(self) +object.__int__(self) +object.__float__(self) + + Called to implement the built-in functions "complex()", "int()" and + "float()". Should return a value of the appropriate type. + +object.__index__(self) + + Called to implement "operator.index()", and whenever Python needs + to losslessly convert the numeric object to an integer object (such + as in slicing, or in the built-in "bin()", "hex()" and "oct()" + functions). Presence of this method indicates that the numeric + object is an integer type. Must return an integer. + + If "__int__()", "__float__()" and "__complex__()" are not defined + then corresponding built-in functions "int()", "float()" and + "complex()" fall back to "__index__()". + +object.__round__(self[, ndigits]) +object.__trunc__(self) +object.__floor__(self) +object.__ceil__(self) + + Called to implement the built-in function "round()" and "math" + functions "trunc()", "floor()" and "ceil()". Unless *ndigits* is + passed to "__round__()" all these methods should return the value + of the object truncated to an "Integral" (typically an "int"). + + Changed in version 3.14: "int()" no longer delegates to the + "__trunc__()" method. + + +With Statement Context Managers +=============================== + +A *context manager* is an object that defines the runtime context to +be established when executing a "with" statement. The context manager +handles the entry into, and the exit from, the desired runtime context +for the execution of the block of code. Context managers are normally +invoked using the "with" statement (described in section The with +statement), but can also be used by directly invoking their methods. + +Typical uses of context managers include saving and restoring various +kinds of global state, locking and unlocking resources, closing opened +files, etc. + +For more information on context managers, see Context Manager Types. +The "object" class itself does not provide the context manager +methods. + +object.__enter__(self) + + Enter the runtime context related to this object. The "with" + statement will bind this method’s return value to the target(s) + specified in the "as" clause of the statement, if any. + +object.__exit__(self, exc_type, exc_value, traceback) + + Exit the runtime context related to this object. The parameters + describe the exception that caused the context to be exited. If the + context was exited without an exception, all three arguments will + be "None". + + If an exception is supplied, and the method wishes to suppress the + exception (i.e., prevent it from being propagated), it should + return a true value. Otherwise, the exception will be processed + normally upon exit from this method. + + Note that "__exit__()" methods should not reraise the passed-in + exception; this is the caller’s responsibility. + +See also: + + **PEP 343** - The “with” statement + The specification, background, and examples for the Python "with" + statement. + + +Customizing positional arguments in class pattern matching +========================================================== + +When using a class name in a pattern, positional arguments in the +pattern are not allowed by default, i.e. "case MyClass(x, y)" is +typically invalid without special support in "MyClass". To be able to +use that kind of pattern, the class needs to define a *__match_args__* +attribute. + +object.__match_args__ + + This class variable can be assigned a tuple of strings. When this + class is used in a class pattern with positional arguments, each + positional argument will be converted into a keyword argument, + using the corresponding value in *__match_args__* as the keyword. + The absence of this attribute is equivalent to setting it to "()". + +For example, if "MyClass.__match_args__" is "("left", "center", +"right")" that means that "case MyClass(x, y)" is equivalent to "case +MyClass(left=x, center=y)". Note that the number of arguments in the +pattern must be smaller than or equal to the number of elements in +*__match_args__*; if it is larger, the pattern match attempt will +raise a "TypeError". + +Added in version 3.10. + +See also: + + **PEP 634** - Structural Pattern Matching + The specification for the Python "match" statement. + + +Emulating buffer types +====================== + +The buffer protocol provides a way for Python objects to expose +efficient access to a low-level memory array. This protocol is +implemented by builtin types such as "bytes" and "memoryview", and +third-party libraries may define additional buffer types. + +While buffer types are usually implemented in C, it is also possible +to implement the protocol in Python. + +object.__buffer__(self, flags) + + Called when a buffer is requested from *self* (for example, by the + "memoryview" constructor). The *flags* argument is an integer + representing the kind of buffer requested, affecting for example + whether the returned buffer is read-only or writable. + "inspect.BufferFlags" provides a convenient way to interpret the + flags. The method must return a "memoryview" object. + + **Thread safety:** In *free-threaded* Python, implementations must + manage any internal export counter using atomic operations. The + method must be safe to call concurrently from multiple threads, and + the returned buffer’s underlying data must remain valid until the + corresponding "__release_buffer__()" call completes. See Thread + safety for memoryview objects for details. + +object.__release_buffer__(self, buffer) + + Called when a buffer is no longer needed. The *buffer* argument is + a "memoryview" object that was previously returned by + "__buffer__()". The method must release any resources associated + with the buffer. This method should return "None". + + **Thread safety:** In *free-threaded* Python, any export counter + decrement must use atomic operations. Resource cleanup must be + thread-safe, as the final release may race with concurrent releases + from other threads. + + Buffer objects that do not need to perform any cleanup are not + required to implement this method. + +Added in version 3.12. + +See also: + + **PEP 688** - Making the buffer protocol accessible in Python + Introduces the Python "__buffer__" and "__release_buffer__" + methods. + + "collections.abc.Buffer" + ABC for buffer types. + + +Annotations +=========== + +Functions, classes, and modules may contain *annotations*, which are a +way to associate information (usually *type hints*) with a symbol. + +object.__annotations__ + + This attribute contains the annotations for an object. It is lazily + evaluated, so accessing the attribute may execute arbitrary code + and raise exceptions. If evaluation is successful, the attribute is + set to a dictionary mapping from variable names to annotations. + + Changed in version 3.14: Annotations are now lazily evaluated. + +object.__annotate__(format) + + An *annotate function*. Returns a new dictionary object mapping + attribute/parameter names to their annotation values. + + Takes a format parameter specifying the format in which annotations + values should be provided. It must be a member of the + "annotationlib.Format" enum, or an integer with a value + corresponding to a member of the enum. + + If an annotate function doesn’t support the requested format, it + must raise "NotImplementedError". Annotate functions must always + support "VALUE" format; they must not raise "NotImplementedError()" + when called with this format. + + When called with "VALUE" format, an annotate function may raise + "NameError"; it must not raise "NameError" when called requesting + any other format. + + If an object does not have any annotations, "__annotate__" should + preferably be set to "None" (it can’t be deleted), rather than set + to a function that returns an empty dict. + + Added in version 3.14. + +See also: + + **PEP 649** — Deferred evaluation of annotation using descriptors + Introduces lazy evaluation of annotations and the "__annotate__" + function. + + +Special method lookup +===================== + +For custom classes, implicit invocations of special methods are only +guaranteed to work correctly if defined on an object’s type, not in +the object’s instance dictionary. That behaviour is the reason why +the following code raises an exception: + + >>> class C: + ... pass + ... + >>> c = C() + >>> c.__len__ = lambda: 5 + >>> len(c) + Traceback (most recent call last): + File "", line 1, in + TypeError: object of type 'C' has no len() + +The rationale behind this behaviour lies with a number of special +methods such as "__hash__()" and "__repr__()" that are implemented by +all objects, including type objects. If the implicit lookup of these +methods used the conventional lookup process, they would fail when +invoked on the type object itself: + + >>> 1 .__hash__() == hash(1) + True + >>> int.__hash__() == hash(int) + Traceback (most recent call last): + File "", line 1, in + TypeError: descriptor '__hash__' of 'int' object needs an argument + +Incorrectly attempting to invoke an unbound method of a class in this +way is sometimes referred to as ‘metaclass confusion’, and is avoided +by bypassing the instance when looking up special methods: + + >>> type(1).__hash__(1) == hash(1) + True + >>> type(int).__hash__(int) == hash(int) + True + +In addition to bypassing any instance attributes in the interest of +correctness, implicit special method lookup generally also bypasses +the "__getattribute__()" method even of the object’s metaclass: + + >>> class Meta(type): + ... def __getattribute__(*args): + ... print("Metaclass getattribute invoked") + ... return type.__getattribute__(*args) + ... + >>> class C(object, metaclass=Meta): + ... def __len__(self): + ... return 10 + ... def __getattribute__(*args): + ... print("Class getattribute invoked") + ... return object.__getattribute__(*args) + ... + >>> c = C() + >>> c.__len__() # Explicit lookup via instance + Class getattribute invoked + 10 + >>> type(c).__len__(c) # Explicit lookup via type + Metaclass getattribute invoked + 10 + >>> len(c) # Implicit lookup + 10 + +Bypassing the "__getattribute__()" machinery in this fashion provides +significant scope for speed optimisations within the interpreter, at +the cost of some flexibility in the handling of special methods (the +special method *must* be set on the class object itself in order to be +consistently invoked by the interpreter). +''', + 'string-methods': r'''String Methods +************** + +Strings implement all of the common sequence operations, along with +the additional methods described below. + +Strings also support two styles of string formatting, one providing a +large degree of flexibility and customization (see "str.format()", +Format string syntax and Custom string formatting) and the other based +on C "printf" style formatting that handles a narrower range of types +and is slightly harder to use correctly, but is often faster for the +cases it can handle (printf-style String Formatting). + +The Text Processing Services section of the standard library covers a +number of other modules that provide various text related utilities +(including regular expression support in the "re" module). + +str.capitalize() + + Return a copy of the string with its first character capitalized + and the rest lowercased. + + Changed in version 3.8: The first character is now put into + titlecase rather than uppercase. This means that characters like + digraphs will only have their first letter capitalized, instead of + the full character. + +str.casefold() + + Return a casefolded copy of the string. Casefolded strings may be + used for caseless matching. + + Casefolding is similar to lowercasing but more aggressive because + it is intended to remove all case distinctions in a string. For + example, the German lowercase letter "'ß'" is equivalent to ""ss"". + Since it is already lowercase, "lower()" would do nothing to "'ß'"; + "casefold()" converts it to ""ss"". For example: + + >>> 'straße'.lower() + 'straße' + >>> 'straße'.casefold() + 'strasse' + + The casefolding algorithm is described in section 3.13 ‘Default + Case Folding’ of the Unicode Standard. + + Added in version 3.3. + +str.center(width, fillchar=' ', /) + + Return centered in a string of length *width*. Padding is done + using the specified *fillchar* (default is an ASCII space). The + original string is returned if *width* is less than or equal to + "len(s)". For example: + + >>> 'Python'.center(10) + ' Python ' + >>> 'Python'.center(10, '-') + '--Python--' + >>> 'Python'.center(4) + 'Python' + +str.count(sub[, start[, end]]) + + Return the number of non-overlapping occurrences of substring *sub* + in the range [*start*, *end*]. Optional arguments *start* and + *end* are interpreted as in slice notation. + + If *sub* is empty, returns the number of empty strings between + characters which is the length of the string plus one. For example: + + >>> 'spam, spam, spam'.count('spam') + 3 + >>> 'spam, spam, spam'.count('spam', 5) + 2 + >>> 'spam, spam, spam'.count('spam', 5, 10) + 1 + >>> 'spam, spam, spam'.count('eggs') + 0 + >>> 'spam, spam, spam'.count('') + 17 + +str.encode(encoding='utf-8', errors='strict') + + Return the string encoded to "bytes". + + *encoding* defaults to "'utf-8'"; see Standard Encodings for + possible values. + + *errors* controls how encoding errors are handled. If "'strict'" + (the default), a "UnicodeError" exception is raised. Other possible + values are "'ignore'", "'replace'", "'xmlcharrefreplace'", + "'backslashreplace'" and any other name registered via + "codecs.register_error()". See Error Handlers for details. + + For performance reasons, the value of *errors* is not checked for + validity unless an encoding error actually occurs, Python + Development Mode is enabled or a debug build is used. For example: + + >>> encoded_str_to_bytes = 'Python'.encode() + >>> type(encoded_str_to_bytes) + + >>> encoded_str_to_bytes + b'Python' + + Changed in version 3.1: Added support for keyword arguments. + + Changed in version 3.9: The value of the *errors* argument is now + checked in Python Development Mode and in debug mode. + +str.endswith(suffix[, start[, end]]) + + Return "True" if the string ends with the specified *suffix*, + otherwise return "False". *suffix* can also be a tuple of suffixes + to look for. With optional *start*, test beginning at that + position. With optional *end*, stop comparing at that position. + Using *start* and *end* is equivalent to + "str[start:end].endswith(suffix)". For example: + + >>> 'Python'.endswith('on') + True + >>> 'a tuple of suffixes'.endswith(('at', 'in')) + False + >>> 'a tuple of suffixes'.endswith(('at', 'es')) + True + >>> 'Python is amazing'.endswith('is', 0, 9) + True + + See also "startswith()" and "removesuffix()". + +str.expandtabs(tabsize=8) + + Return a copy of the string where all tab characters are replaced + by one or more spaces, depending on the current column and the + given tab size. Tab positions occur every *tabsize* characters + (default is 8, giving tab positions at columns 0, 8, 16 and so on). + To expand the string, the current column is set to zero and the + string is examined character by character. If the character is a + tab ("\t"), one or more space characters are inserted in the result + until the current column is equal to the next tab position. (The + tab character itself is not copied.) If the character is a newline + ("\n") or return ("\r"), it is copied and the current column is + reset to zero. Any other character is copied unchanged and the + current column is incremented by one regardless of how the + character is represented when printed. For example: + + >>> '01\t012\t0123\t01234'.expandtabs() + '01 012 0123 01234' + >>> '01\t012\t0123\t01234'.expandtabs(4) + '01 012 0123 01234' + >>> print('01\t012\n0123\t01234'.expandtabs(4)) + 01 012 + 0123 01234 + +str.find(sub[, start[, end]]) + + Return the lowest index in the string where substring *sub* is + found within the slice "s[start:end]". Optional arguments *start* + and *end* are interpreted as in slice notation. Return "-1" if + *sub* is not found. For example: + + >>> 'spam, spam, spam'.find('sp') + 0 + >>> 'spam, spam, spam'.find('sp', 5) + 6 + + See also "rfind()" and "index()". + + Note: + + The "find()" method should be used only if you need to know the + position of *sub*. To check if *sub* is a substring or not, use + the "in" operator: + + >>> 'Py' in 'Python' + True + +str.format(*args, **kwargs) + + Perform a string formatting operation. The string on which this + method is called can contain literal text or replacement fields + delimited by braces "{}". Each replacement field contains either + the numeric index of a positional argument, or the name of a + keyword argument. Returns a copy of the string where each + replacement field is replaced with the string value of the + corresponding argument. For example: + + >>> "The sum of 1 + 2 is {0}".format(1+2) + 'The sum of 1 + 2 is 3' + >>> "The sum of {a} + {b} is {answer}".format(answer=1+2, a=1, b=2) + 'The sum of 1 + 2 is 3' + >>> "{1} expects the {0} Inquisition!".format("Spanish", "Nobody") + 'Nobody expects the Spanish Inquisition!' + + See Format string syntax for a description of the various + formatting options that can be specified in format strings. + + Note: + + When formatting a number ("int", "float", "complex", + "decimal.Decimal" and subclasses) with the "n" type (ex: + "'{:n}'.format(1234)"), the function temporarily sets the + "LC_CTYPE" locale to the "LC_NUMERIC" locale to decode + "decimal_point" and "thousands_sep" fields of "localeconv()" if + they are non-ASCII or longer than 1 byte, and the "LC_NUMERIC" + locale is different than the "LC_CTYPE" locale. This temporary + change affects other threads. + + Changed in version 3.7: When formatting a number with the "n" type, + the function sets temporarily the "LC_CTYPE" locale to the + "LC_NUMERIC" locale in some cases. + +str.format_map(mapping, /) + + Similar to "str.format(**mapping)", except that "mapping" is used + directly and not copied to a "dict". This is useful if for example + "mapping" is a dict subclass: + + >>> class Default(dict): + ... def __missing__(self, key): + ... return key + ... + >>> '{name} was born in {country}'.format_map(Default(name='Guido')) + 'Guido was born in country' + + Added in version 3.2. + +str.index(sub[, start[, end]]) + + Like "find()", but raise "ValueError" when the substring is not + found. For example: + + >>> 'spam, spam, spam'.index('spam') + 0 + >>> 'spam, spam, spam'.index('eggs') + Traceback (most recent call last): + File "", line 1, in + 'spam, spam, spam'.index('eggs') + ~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^ + ValueError: substring not found + + See also "rindex()". + +str.isalnum() + + Return "True" if all characters in the string are alphanumeric and + there is at least one character, "False" otherwise. A character + "c" is alphanumeric if one of the following returns "True": + "c.isalpha()", "c.isdecimal()", "c.isdigit()", or "c.isnumeric()". + For example: + + >>> 'abc123'.isalnum() + True + >>> 'abc123!@#'.isalnum() + False + >>> ''.isalnum() + False + >>> ' '.isalnum() + False + +str.isalpha() + + Return "True" if all characters in the string are alphabetic and + there is at least one character, "False" otherwise. Alphabetic + characters are those characters defined in the Unicode character + database as “Letter”, i.e., those with general category property + being one of “Lm”, “Lt”, “Lu”, “Ll”, or “Lo”. Note that this is + different from the Alphabetic property defined in the section 4.10 + ‘Letters, Alphabetic, and Ideographic’ of the Unicode Standard. For + example: + + >>> 'Letters and spaces'.isalpha() + False + >>> 'LettersOnly'.isalpha() + True + >>> 'µ'.isalpha() # non-ASCII characters can be considered alphabetical too + True + + See Unicode Properties. + +str.isascii() + + Return "True" if the string is empty or all characters in the + string are ASCII, "False" otherwise. ASCII characters have code + points in the range U+0000-U+007F. For example: + + >>> 'ASCII characters'.isascii() + True + >>> 'µ'.isascii() + False + + Added in version 3.7. + +str.isdecimal() + + Return "True" if all characters in the string are decimal + characters and there is at least one character, "False" otherwise. + Decimal characters are those that can be used to form numbers in + base 10, such as U+0660, ARABIC-INDIC DIGIT ZERO. Formally a + decimal character is a character in the Unicode General Category + “Nd”. For example: + + >>> '0123456789'.isdecimal() + True + >>> '٠١٢٣٤٥٦٧٨٩'.isdecimal() # Arabic-Indic digits zero to nine + True + >>> 'alphabetic'.isdecimal() + False + +str.isdigit() + + Return "True" if all characters in the string are digits and there + is at least one character, "False" otherwise. Digits include + decimal characters and digits that need special handling, such as + the compatibility superscript digits. This covers digits which + cannot be used to form numbers in base 10, like the Kharosthi + numbers. Formally, a digit is a character that has the property + value Numeric_Type=Digit or Numeric_Type=Decimal. + +str.isidentifier() + + Return "True" if the string is a valid identifier according to the + language definition, section Names (identifiers and keywords). + + "keyword.iskeyword()" can be used to test whether string "s" is a + reserved identifier, such as "def" and "class". + + Example: + + >>> from keyword import iskeyword + + >>> 'hello'.isidentifier(), iskeyword('hello') + (True, False) + >>> 'def'.isidentifier(), iskeyword('def') + (True, True) + +str.islower() + + Return "True" if all cased characters [4] in the string are + lowercase and there is at least one cased character, "False" + otherwise. + +str.isnumeric() + + Return "True" if all characters in the string are numeric + characters, and there is at least one character, "False" otherwise. + Numeric characters include digit characters, and all characters + that have the Unicode numeric value property, e.g. U+2155, VULGAR + FRACTION ONE FIFTH. Formally, numeric characters are those with + the property value Numeric_Type=Digit, Numeric_Type=Decimal or + Numeric_Type=Numeric. For example: + + >>> '0123456789'.isnumeric() + True + >>> '٠١٢٣٤٥٦٧٨٩'.isnumeric() # Arabic-indic digit zero to nine + True + >>> '⅕'.isnumeric() # Vulgar fraction one fifth + True + >>> '²'.isdecimal(), '²'.isdigit(), '²'.isnumeric() + (False, True, True) + + See also "isdecimal()" and "isdigit()". Numeric characters are a + superset of decimal numbers. + +str.isprintable() + + Return "True" if all characters in the string are printable, + "False" if it contains at least one non-printable character. + + Here “printable” means the character is suitable for "repr()" to + use in its output; “non-printable” means that "repr()" on built-in + types will hex-escape the character. It has no bearing on the + handling of strings written to "sys.stdout" or "sys.stderr". + + The printable characters are those which in the Unicode character + database (see "unicodedata") have a general category in group + Letter, Mark, Number, Punctuation, or Symbol (L, M, N, P, or S); + plus the ASCII space 0x20. Nonprintable characters are those in + group Separator or Other (Z or C), except the ASCII space. + + For example: + + >>> ''.isprintable(), ' '.isprintable() + (True, True) + >>> '\t'.isprintable(), '\n'.isprintable() + (False, False) + + See also "isspace()". + +str.isspace() + + Return "True" if there are only whitespace characters in the string + and there is at least one character, "False" otherwise. + + For example: + + >>> ''.isspace() + False + >>> ' '.isspace() + True + >>> '\t\n'.isspace() # TAB and BREAK LINE + True + >>> '\u3000'.isspace() # IDEOGRAPHIC SPACE + True + + A character is *whitespace* if in the Unicode character database + (see "unicodedata"), either its general category is "Zs" + (“Separator, space”), or its bidirectional class is one of "WS", + "B", or "S". + + See also "isprintable()". + +str.istitle() + + Return "True" if the string is a titlecased string and there is at + least one character, for example uppercase characters may only + follow uncased characters and lowercase characters only cased ones. + Return "False" otherwise. + + For example: + + >>> 'Spam, Spam, Spam'.istitle() + True + >>> 'spam, spam, spam'.istitle() + False + >>> 'SPAM, SPAM, SPAM'.istitle() + False + + See also "title()". + +str.isupper() + + Return "True" if all cased characters [4] in the string are + uppercase and there is at least one cased character, "False" + otherwise. + + >>> 'BANANA'.isupper() + True + >>> 'banana'.isupper() + False + >>> 'baNana'.isupper() + False + >>> ' '.isupper() + False + +str.join(iterable, /) + + Return a string which is the concatenation of the strings in + *iterable*. A "TypeError" will be raised if there are any non- + string values in *iterable*, including "bytes" objects. The + separator between elements is the string providing this method. For + example: + + >>> ', '.join(['spam', 'spam', 'spam']) + 'spam, spam, spam' + >>> '-'.join('Python') + 'P-y-t-h-o-n' + + See also "split()". + +str.ljust(width, fillchar=' ', /) + + Return the string left justified in a string of length *width*. + Padding is done using the specified *fillchar* (default is an ASCII + space). The original string is returned if *width* is less than or + equal to "len(s)". + + For example: + + >>> 'Python'.ljust(10) + 'Python ' + >>> 'Python'.ljust(10, '.') + 'Python....' + >>> 'Monty Python'.ljust(10, '.') + 'Monty Python' + + See also "rjust()". + +str.lower() + + Return a copy of the string with all the cased characters [4] + converted to lowercase. For example: + + >>> 'Lower Method Example'.lower() + 'lower method example' + + The lowercasing algorithm used is described in section 3.13 + ‘Default Case Folding’ of the Unicode Standard. + +str.lstrip(chars=None, /) + + Return a copy of the string with leading characters removed. The + *chars* argument is a string specifying the set of characters to be + removed. If omitted or "None", the *chars* argument defaults to + removing whitespace. The *chars* argument is not a prefix; rather, + all combinations of its values are stripped: + + >>> ' spacious '.lstrip() + 'spacious ' + >>> 'www.example.com'.lstrip('cmowz.') + 'example.com' + + See "str.removeprefix()" for a method that will remove a single + prefix string rather than all of a set of characters. For example: + + >>> 'Arthur: three!'.lstrip('Arthur: ') + 'ee!' + >>> 'Arthur: three!'.removeprefix('Arthur: ') + 'three!' + +static str.maketrans(dict, /) +static str.maketrans(from, to, remove='', /) + + This static method returns a translation table usable for + "str.translate()". + + If there is only one argument, it must be a dictionary mapping + Unicode ordinals (integers) or characters (strings of length 1) to + Unicode ordinals, strings (of arbitrary lengths) or "None". + Character keys will then be converted to ordinals. + + If there are two arguments, they must be strings of equal length, + and in the resulting dictionary, each character in *from* will be + mapped to the character at the same position in *to*. If there is + a third argument, it must be a string, whose characters will be + mapped to "None" in the result. + +str.partition(sep, /) + + Split the string at the first occurrence of *sep*, and return a + 3-tuple containing the part before the separator, the separator + itself, and the part after the separator. If the separator is not + found, return a 3-tuple containing the string itself, followed by + two empty strings. + + For example: + + >>> 'Monty Python'.partition(' ') + ('Monty', ' ', 'Python') + >>> "Monty Python's Flying Circus".partition(' ') + ('Monty', ' ', "Python's Flying Circus") + >>> 'Monty Python'.partition('-') + ('Monty Python', '', '') + + See also "rpartition()". + +str.removeprefix(prefix, /) + + If the string starts with the *prefix* string, return + "string[len(prefix):]". Otherwise, return a copy of the original + string: + + >>> 'TestHook'.removeprefix('Test') + 'Hook' + >>> 'BaseTestCase'.removeprefix('Test') + 'BaseTestCase' + + Added in version 3.9. + + See also "removesuffix()" and "startswith()". + +str.removesuffix(suffix, /) + + If the string ends with the *suffix* string and that *suffix* is + not empty, return "string[:-len(suffix)]". Otherwise, return a copy + of the original string: + + >>> 'MiscTests'.removesuffix('Tests') + 'Misc' + >>> 'TmpDirMixin'.removesuffix('Tests') + 'TmpDirMixin' + + Added in version 3.9. + + See also "removeprefix()" and "endswith()". + +str.replace(old, new, /, count=-1) + + Return a copy of the string with all occurrences of substring *old* + replaced by *new*. If *count* is given, only the first *count* + occurrences are replaced. If *count* is not specified or "-1", then + all occurrences are replaced. For example: + + >>> 'spam, spam, spam'.replace('spam', 'eggs') + 'eggs, eggs, eggs' + >>> 'spam, spam, spam'.replace('spam', 'eggs', 1) + 'eggs, spam, spam' + + Changed in version 3.13: *count* is now supported as a keyword + argument. + +str.rfind(sub[, start[, end]]) + + Return the highest index in the string where substring *sub* is + found, such that *sub* is contained within "s[start:end]". + Optional arguments *start* and *end* are interpreted as in slice + notation. Return "-1" on failure. For example: + + >>> 'spam, spam, spam'.rfind('sp') + 12 + >>> 'spam, spam, spam'.rfind('sp', 0, 10) + 6 + + See also "find()" and "rindex()". + +str.rindex(sub[, start[, end]]) + + Like "rfind()" but raises "ValueError" when the substring *sub* is + not found. For example: + + >>> 'spam, spam, spam'.rindex('spam') + 12 + >>> 'spam, spam, spam'.rindex('eggs') + Traceback (most recent call last): + File "", line 1, in + 'spam, spam, spam'.rindex('eggs') + ~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^ + ValueError: substring not found + + See also "index()" and "find()". + +str.rjust(width, fillchar=' ', /) + + Return the string right justified in a string of length *width*. + Padding is done using the specified *fillchar* (default is an ASCII + space). The original string is returned if *width* is less than or + equal to "len(s)". + + For example: + + >>> 'Python'.rjust(10) + ' Python' + >>> 'Python'.rjust(10, '.') + '....Python' + >>> 'Monty Python'.rjust(10, '.') + 'Monty Python' + + See also "ljust()" and "zfill()". + +str.rpartition(sep, /) + + Split the string at the last occurrence of *sep*, and return a + 3-tuple containing the part before the separator, the separator + itself, and the part after the separator. If the separator is not + found, return a 3-tuple containing two empty strings, followed by + the string itself. + + For example: + + >>> 'Monty Python'.rpartition(' ') + ('Monty', ' ', 'Python') + >>> "Monty Python's Flying Circus".rpartition(' ') + ("Monty Python's Flying", ' ', 'Circus') + >>> 'Monty Python'.rpartition('-') + ('', '', 'Monty Python') + + See also "partition()". + +str.rsplit(sep=None, maxsplit=-1) + + Return a list of the words in the string, using *sep* as the + delimiter string. If *maxsplit* is given, at most *maxsplit* splits + are done, the *rightmost* ones. If *sep* is not specified or + "None", any whitespace string is a separator. Except for splitting + from the right, "rsplit()" behaves like "split()" which is + described in detail below. + +str.rstrip(chars=None, /) + + Return a copy of the string with trailing characters removed. The + *chars* argument is a string specifying the set of characters to be + removed. If omitted or "None", the *chars* argument defaults to + removing whitespace. The *chars* argument is not a suffix; rather, + all combinations of its values are stripped. For example: + + >>> ' spacious '.rstrip() + ' spacious' + >>> 'mississippi'.rstrip('ipz') + 'mississ' + + See "removesuffix()" for a method that will remove a single suffix + string rather than all of a set of characters. For example: + + >>> 'Monty Python'.rstrip(' Python') + 'M' + >>> 'Monty Python'.removesuffix(' Python') + 'Monty' + + See also "strip()". + +str.split(sep=None, maxsplit=-1) + + Return a list of the words in the string, using *sep* as the + delimiter string. If *maxsplit* is given, at most *maxsplit* + splits are done (thus, the list will have at most "maxsplit+1" + elements). If *maxsplit* is not specified or "-1", then there is + no limit on the number of splits (all possible splits are made). + + If *sep* is given, consecutive delimiters are not grouped together + and are deemed to delimit empty strings (for example, + "'1,,2'.split(',')" returns "['1', '', '2']"). The *sep* argument + may consist of multiple characters as a single delimiter (to split + with multiple delimiters, use "re.split()"). Splitting an empty + string with a specified separator returns "['']". + + For example: + + >>> '1,2,3'.split(',') + ['1', '2', '3'] + >>> '1,2,3'.split(',', maxsplit=1) + ['1', '2,3'] + >>> '1,2,,3,'.split(',') + ['1', '2', '', '3', ''] + >>> '1<>2<>3<4'.split('<>') + ['1', '2', '3<4'] + + If *sep* is not specified or is "None", a different splitting + algorithm is applied: runs of consecutive whitespace are regarded + as a single separator, and the result will contain no empty strings + at the start or end if the string has leading or trailing + whitespace. Consequently, splitting an empty string or a string + consisting of just whitespace with a "None" separator returns "[]". + + For example: + + >>> '1 2 3'.split() + ['1', '2', '3'] + >>> '1 2 3'.split(maxsplit=1) + ['1', '2 3'] + >>> ' 1 2 3 '.split() + ['1', '2', '3'] + + If *sep* is not specified or is "None" and *maxsplit* is "0", only + leading runs of consecutive whitespace are considered. + + For example: + + >>> "".split(None, 0) + [] + >>> " ".split(None, 0) + [] + >>> " foo ".split(maxsplit=0) + ['foo '] + + See also "join()". + +str.splitlines(keepends=False) + + Return a list of the lines in the string, breaking at line + boundaries. Line breaks are not included in the resulting list + unless *keepends* is given and true. + + This method splits on the following line boundaries. In + particular, the boundaries are a superset of *universal newlines*. + + +-------------------------+-------------------------------+ + | Representation | Description | + |=========================|===============================| + | "\n" | Line Feed | + +-------------------------+-------------------------------+ + | "\r" | Carriage Return | + +-------------------------+-------------------------------+ + | "\r\n" | Carriage Return + Line Feed | + +-------------------------+-------------------------------+ + | "\v" or "\x0b" | Line Tabulation | + +-------------------------+-------------------------------+ + | "\f" or "\x0c" | Form Feed | + +-------------------------+-------------------------------+ + | "\x1c" | File Separator | + +-------------------------+-------------------------------+ + | "\x1d" | Group Separator | + +-------------------------+-------------------------------+ + | "\x1e" | Record Separator | + +-------------------------+-------------------------------+ + | "\x85" | Next Line (C1 Control Code) | + +-------------------------+-------------------------------+ + | "\u2028" | Line Separator | + +-------------------------+-------------------------------+ + | "\u2029" | Paragraph Separator | + +-------------------------+-------------------------------+ + + Changed in version 3.2: "\v" and "\f" added to list of line + boundaries. + + For example: + + >>> 'ab c\n\nde fg\rkl\r\n'.splitlines() + ['ab c', '', 'de fg', 'kl'] + >>> 'ab c\n\nde fg\rkl\r\n'.splitlines(keepends=True) + ['ab c\n', '\n', 'de fg\r', 'kl\r\n'] + + Unlike "split()" when a delimiter string *sep* is given, this + method returns an empty list for the empty string, and a terminal + line break does not result in an extra line: + + >>> "".splitlines() + [] + >>> "One line\n".splitlines() + ['One line'] + + For comparison, "split('\n')" gives: + + >>> ''.split('\n') + [''] + >>> 'Two lines\n'.split('\n') + ['Two lines', ''] + +str.startswith(prefix[, start[, end]]) + + Return "True" if string starts with the *prefix*, otherwise return + "False". *prefix* can also be a tuple of prefixes to look for. + With optional *start*, test string beginning at that position. + With optional *end*, stop comparing string at that position. + + For example: + + >>> 'Python'.startswith('Py') + True + >>> 'a tuple of prefixes'.startswith(('at', 'a')) + True + >>> 'Python is amazing'.startswith('is', 7) + True + + See also "endswith()" and "removeprefix()". + +str.strip(chars=None, /) + + Return a copy of the string with the leading and trailing + characters removed. The *chars* argument is a string specifying the + set of characters to be removed. If omitted or "None", the *chars* + argument defaults to removing whitespace. The *chars* argument is + not a prefix or suffix; rather, all combinations of its values are + stripped. + + For example: + + >>> ' spacious '.strip() + 'spacious' + >>> 'www.example.com'.strip('cmowz.') + 'example' + + The outermost leading and trailing *chars* argument values are + stripped from the string. Characters are removed from the leading + end until reaching a string character that is not contained in the + set of characters in *chars*. A similar action takes place on the + trailing end. + + For example: + + >>> comment_string = '#....... Section 3.2.1 Issue #32 .......' + >>> comment_string.strip('.#! ') + 'Section 3.2.1 Issue #32' + + See also "rstrip()". + +str.swapcase() + + Return a copy of the string with uppercase characters converted to + lowercase and vice versa. Note that it is not necessarily true that + "s.swapcase().swapcase() == s". + +str.title() + + Return a titlecased version of the string where words start with an + uppercase character and the remaining characters are lowercase. + + For example: + + >>> 'Hello world'.title() + 'Hello World' + + The algorithm uses a simple language-independent definition of a + word as groups of consecutive letters. The definition works in + many contexts but it means that apostrophes in contractions and + possessives form word boundaries, which may not be the desired + result: + + >>> "they're bill's friends from the UK".title() + "They'Re Bill'S Friends From The Uk" + + The "string.capwords()" function does not have this problem, as it + splits words on spaces only. + + Alternatively, a workaround for apostrophes can be constructed + using regular expressions: + + >>> import re + >>> def titlecase(s): + ... return re.sub(r"[A-Za-z]+('[A-Za-z]+)?", + ... lambda mo: mo.group(0).capitalize(), + ... s) + ... + >>> titlecase("they're bill's friends.") + "They're Bill's Friends." + + See also "istitle()". + +str.translate(table, /) + + Return a copy of the string in which each character has been mapped + through the given translation table. The table must be an object + that implements indexing via "__getitem__()", typically a *mapping* + or *sequence*. When indexed by a Unicode ordinal (an integer), the + table object can do any of the following: return a Unicode ordinal + or a string, to map the character to one or more other characters; + return "None", to delete the character from the return string; or + raise a "LookupError" exception, to map the character to itself. + + You can use "str.maketrans()" to create a translation map from + character-to-character mappings in different formats. + + See also the "codecs" module for a more flexible approach to custom + character mappings. + +str.upper() + + Return a copy of the string with all the cased characters [4] + converted to uppercase. Note that "s.upper().isupper()" might be + "False" if "s" contains uncased characters or if the Unicode + category of the resulting character(s) is not “Lu” (Letter, + uppercase), but e.g. “Lt” (Letter, titlecase). + + The uppercasing algorithm used is described in section 3.13 + ‘Default Case Folding’ of the Unicode Standard. + +str.zfill(width, /) + + Return a copy of the string left filled with ASCII "'0'" digits to + make a string of length *width*. A leading sign prefix + ("'+'"/"'-'") is handled by inserting the padding *after* the sign + character rather than before. The original string is returned if + *width* is less than or equal to "len(s)". + + For example: + + >>> "42".zfill(5) + '00042' + >>> "-42".zfill(5) + '-0042' + + See also "rjust()". +''', + 'strings': '''String and Bytes literals +************************* + +String literals are text enclosed in single quotes ("'") or double +quotes ("""). For example: + + "spam" + 'eggs' + +The quote used to start the literal also terminates it, so a string +literal can only contain the other quote (except with escape +sequences, see below). For example: + + 'Say "Hello", please.' + "Don't do that!" + +Except for this limitation, the choice of quote character ("'" or """) +does not affect how the literal is parsed. + +Inside a string literal, the backslash ("\\") character introduces an +*escape sequence*, which has special meaning depending on the +character after the backslash. For example, "\\"" denotes the double +quote character, and does *not* end the string: + + >>> print("Say \\"Hello\\" to everyone!") + Say "Hello" to everyone! + +See escape sequences below for a full list of such sequences, and more +details. + + +Triple-quoted strings +===================== + +Strings can also be enclosed in matching groups of three single or +double quotes. These are generally referred to as *triple-quoted +strings*: + + """This is a triple-quoted string.""" + +In triple-quoted literals, unescaped quotes are allowed (and are +retained), except that three unescaped quotes in a row terminate the +literal, if they are of the same kind ("'" or """) used at the start: + + """This string has "quotes" inside.""" + +Unescaped newlines are also allowed and retained: + + \'\'\'This triple-quoted string + continues on the next line.\'\'\' + + +String prefixes +=============== + +String literals can have an optional *prefix* that influences how the +content of the literal is parsed, for example: + + b"data" + f'{result=}' + +The allowed prefixes are: + +* "b": Bytes literal + +* "r": Raw string + +* "f": Formatted string literal (“f-string”) + +* "t": Template string literal (“t-string”) + +* "u": No effect (allowed for backwards compatibility) + +See the linked sections for details on each type. + +Prefixes are case-insensitive (for example, ‘"B"’ works the same as +‘"b"’). The ‘"r"’ prefix can be combined with ‘"f"’, ‘"t"’ or ‘"b"’, +so ‘"fr"’, ‘"rf"’, ‘"tr"’, ‘"rt"’, ‘"br"’, and ‘"rb"’ are also valid +prefixes. + +Added in version 3.3: The "'rb'" prefix of raw bytes literals has been +added as a synonym of "'br'".Support for the unicode legacy literal +("u'value'") was reintroduced to simplify the maintenance of dual +Python 2.x and 3.x codebases. See **PEP 414** for more information. + + +Formal grammar +============== + +String literals, except “f-strings” and “t-strings”, are described by +the following lexical definitions. + +These definitions use negative lookaheads ("!") to indicate that an +ending quote ends the literal. + + STRING: [stringprefix] (stringcontent) + stringprefix: <("r" | "u" | "b" | "br" | "rb"), case-insensitive> + stringcontent: + | "\'\'\'" ( !"\'\'\'" longstringitem)* "\'\'\'" + | '"""' ( !'"""' longstringitem)* '"""' + | "'" ( !"'" stringitem)* "'" + | '"' ( !'"' stringitem)* '"' + stringitem: stringchar | stringescapeseq + stringchar: + longstringitem: stringitem | newline + stringescapeseq: "\\" + +Note that as in all lexical definitions, whitespace is significant. In +particular, the prefix (if any) must be immediately followed by the +starting quote. + + +Escape sequences +================ + +Unless an ‘"r"’ or ‘"R"’ prefix is present, escape sequences in string +and bytes literals are interpreted according to rules similar to those +used by Standard C. The recognized escape sequences are: + ++----------------------------------------------------+----------------------------------------------------+ +| Escape Sequence | Meaning | +|====================================================|====================================================| +| "\\" | Ignored end of line | ++----------------------------------------------------+----------------------------------------------------+ +| "\\\\" | Backslash | ++----------------------------------------------------+----------------------------------------------------+ +| "\\'" | Single quote | ++----------------------------------------------------+----------------------------------------------------+ +| "\\"" | Double quote | ++----------------------------------------------------+----------------------------------------------------+ +| "\\a" | ASCII Bell (BEL) | ++----------------------------------------------------+----------------------------------------------------+ +| "\\b" | ASCII Backspace (BS) | ++----------------------------------------------------+----------------------------------------------------+ +| "\\f" | ASCII Formfeed (FF) | ++----------------------------------------------------+----------------------------------------------------+ +| "\\n" | ASCII Linefeed (LF) | ++----------------------------------------------------+----------------------------------------------------+ +| "\\r" | ASCII Carriage Return (CR) | ++----------------------------------------------------+----------------------------------------------------+ +| "\\t" | ASCII Horizontal Tab (TAB) | ++----------------------------------------------------+----------------------------------------------------+ +| "\\v" | ASCII Vertical Tab (VT) | ++----------------------------------------------------+----------------------------------------------------+ +| "\\*ooo*" | Octal character | ++----------------------------------------------------+----------------------------------------------------+ +| "\\x*hh*" | Hexadecimal character | ++----------------------------------------------------+----------------------------------------------------+ +| "\\N{*name*}" | Named Unicode character | ++----------------------------------------------------+----------------------------------------------------+ +| "\\u*xxxx*" | Hexadecimal Unicode character | ++----------------------------------------------------+----------------------------------------------------+ +| "\\U*xxxxxxxx*" | Hexadecimal Unicode character | ++----------------------------------------------------+----------------------------------------------------+ + + +Ignored end of line +------------------- + +A backslash can be added at the end of a line to ignore the newline: + + >>> 'This string will not include \\ + ... backslashes or newline characters.' + 'This string will not include backslashes or newline characters.' + +The same result can be achieved using triple-quoted strings, or +parentheses and string literal concatenation. + + +Escaped characters +------------------ + +To include a backslash in a non-raw Python string literal, it must be +doubled. The "\\\\" escape sequence denotes a single backslash +character: + + >>> print('C:\\\\Program Files') + C:\\Program Files + +Similarly, the "\\'" and "\\"" sequences denote the single and double +quote character, respectively: + + >>> print('\\' and \\"') + ' and " + + +Octal character +--------------- + +The sequence "\\*ooo*" denotes a *character* with the octal (base 8) +value *ooo*: + + >>> '\\120' + 'P' + +Up to three octal digits (0 through 7) are accepted. + +In a bytes literal, *character* means a *byte* with the given value. +In a string literal, it means a Unicode character with the given +value. + +Changed in version 3.11: Octal escapes with value larger than "0o377" +(255) produce a "DeprecationWarning". + +Changed in version 3.12: Octal escapes with value larger than "0o377" +(255) produce a "SyntaxWarning". In a future Python version they will +raise a "SyntaxError". + + +Hexadecimal character +--------------------- + +The sequence "\\x*hh*" denotes a *character* with the hex (base 16) +value *hh*: + + >>> '\\x50' + 'P' + +Unlike in Standard C, exactly two hex digits are required. + +In a bytes literal, *character* means a *byte* with the given value. +In a string literal, it means a Unicode character with the given +value. + + +Named Unicode character +----------------------- + +The sequence "\\N{*name*}" denotes a Unicode character with the given +*name*: + + >>> '\\N{LATIN CAPITAL LETTER P}' + 'P' + >>> '\\N{SNAKE}' + '🐍' + +This sequence cannot appear in bytes literals. + +Changed in version 3.3: Support for name aliases has been added. + + +Hexadecimal Unicode characters +------------------------------ + +These sequences "\\u*xxxx*" and "\\U*xxxxxxxx*" denote the Unicode +character with the given hex (base 16) value. Exactly four digits are +required for "\\u"; exactly eight digits are required for "\\U". The +latter can encode any Unicode character. + + >>> '\\u1234' + 'ሴ' + >>> '\\U0001f40d' + '🐍' + +These sequences cannot appear in bytes literals. + + +Unrecognized escape sequences +----------------------------- + +Unlike in Standard C, all unrecognized escape sequences are left in +the string unchanged, that is, *the backslash is left in the result*: + + >>> print('\\q') + \\q + >>> list('\\q') + ['\\\\', 'q'] + +Note that for bytes literals, the escape sequences only recognized in +string literals ("\\N...", "\\u...", "\\U...") fall into the category of +unrecognized escapes. + +Changed in version 3.6: Unrecognized escape sequences produce a +"DeprecationWarning". + +Changed in version 3.12: Unrecognized escape sequences produce a +"SyntaxWarning". In a future Python version they will raise a +"SyntaxError". + + +Bytes literals +============== + +*Bytes literals* are always prefixed with ‘"b"’ or ‘"B"’; they produce +an instance of the "bytes" type instead of the "str" type. They may +only contain ASCII characters; bytes with a numeric value of 128 or +greater must be expressed with escape sequences (typically Hexadecimal +character or Octal character): + + >>> b'\\x89PNG\\r\\n\\x1a\\n' + b'\\x89PNG\\r\\n\\x1a\\n' + >>> list(b'\\x89PNG\\r\\n\\x1a\\n') + [137, 80, 78, 71, 13, 10, 26, 10] + +Similarly, a zero byte must be expressed using an escape sequence +(typically "\\0" or "\\x00"). + + +Raw string literals +=================== + +Both string and bytes literals may optionally be prefixed with a +letter ‘"r"’ or ‘"R"’; such constructs are called *raw string +literals* and *raw bytes literals* respectively and treat backslashes +as literal characters. As a result, in raw string literals, escape +sequences are not treated specially: + + >>> r'\\d{4}-\\d{2}-\\d{2}' + '\\\\d{4}-\\\\d{2}-\\\\d{2}' + +Even in a raw literal, quotes can be escaped with a backslash, but the +backslash remains in the result; for example, "r"\\""" is a valid +string literal consisting of two characters: a backslash and a double +quote; "r"\\"" is not a valid string literal (even a raw string cannot +end in an odd number of backslashes). Specifically, *a raw literal +cannot end in a single backslash* (since the backslash would escape +the following quote character). Note also that a single backslash +followed by a newline is interpreted as those two characters as part +of the literal, *not* as a line continuation. + + +f-strings +========= + +Added in version 3.6. + +Changed in version 3.7: The "await" and "async for" can be used in +expressions within f-strings. + +Changed in version 3.8: Added the debug specifier ("=") + +Changed in version 3.12: Many restrictions on expressions within +f-strings have been removed. Notably, nested strings, comments, and +backslashes are now permitted. + +A *formatted string literal* or *f-string* is a string literal that is +prefixed with ‘"f"’ or ‘"F"’. Unlike other string literals, f-strings +do not have a constant value. They may contain *replacement fields* +delimited by curly braces "{}". Replacement fields contain expressions +which are evaluated at run time. For example: + + >>> who = 'nobody' + >>> nationality = 'Spanish' + >>> f'{who.title()} expects the {nationality} Inquisition!' + 'Nobody expects the Spanish Inquisition!' + +Any doubled curly braces ("{{" or "}}") outside replacement fields are +replaced with the corresponding single curly brace: + + >>> print(f'{{...}}') + {...} + +Other characters outside replacement fields are treated like in +ordinary string literals. This means that escape sequences are decoded +(except when a literal is also marked as a raw string), and newlines +are possible in triple-quoted f-strings: + + >>> name = 'Galahad' + >>> favorite_color = 'blue' + >>> print(f'{name}:\\t{favorite_color}') + Galahad: blue + >>> print(rf"C:\\Users\\{name}") + C:\\Users\\Galahad + >>> print(f\'\'\'Three shall be the number of the counting + ... and the number of the counting shall be three.\'\'\') + Three shall be the number of the counting + and the number of the counting shall be three. + +Expressions in formatted string literals are treated like regular +Python expressions. Each expression is evaluated in the context where +the formatted string literal appears, in order from left to right. An +empty expression is not allowed, and both "lambda" and assignment +expressions ":=" must be surrounded by explicit parentheses: + + >>> f'{(half := 1/2)}, {half * 42}' + '0.5, 21.0' + +Reusing the outer f-string quoting type inside a replacement field is +permitted: + + >>> a = dict(x=2) + >>> f"abc {a["x"]} def" + 'abc 2 def' + +Backslashes are also allowed in replacement fields and are evaluated +the same way as in any other context: + + >>> a = ["a", "b", "c"] + >>> print(f"List a contains:\\n{"\\n".join(a)}") + List a contains: + a + b + c + +It is possible to nest f-strings: + + >>> name = 'world' + >>> f'Repeated:{f' hello {name}' * 3}' + 'Repeated: hello world hello world hello world' + +Portable Python programs should not use more than 5 levels of nesting. + +**CPython implementation detail:** CPython does not limit nesting of +f-strings. + +Replacement expressions can contain newlines in both single-quoted and +triple-quoted f-strings and they can contain comments. Everything that +comes after a "#" inside a replacement field is a comment (even +closing braces and quotes). This means that replacement fields with +comments must be closed in a different line: + + >>> a = 2 + >>> f"abc{a # This comment }" continues until the end of the line + ... + 3}" + 'abc5' + +After the expression, replacement fields may optionally contain: + +* a *debug specifier* – an equal sign ("="), optionally surrounded by + whitespace on one or both sides; + +* a *conversion specifier* – "!s", "!r" or "!a"; and/or + +* a *format specifier* prefixed with a colon (":"). + +See the Standard Library section on f-strings for details on how these +fields are evaluated. + +As that section explains, *format specifiers* are passed as the second +argument to the "format()" function to format a replacement field +value. For example, they can be used to specify a field width and +padding characters using the Format Specification Mini-Language: + + >>> number = 14.3 + >>> f'{number:20.7f}' + ' 14.3000000' + +Top-level format specifiers may include nested replacement fields: + + >>> field_size = 20 + >>> precision = 7 + >>> f'{number:{field_size}.{precision}f}' + ' 14.3000000' + +These nested fields may include their own conversion fields and format +specifiers: + + >>> number = 3 + >>> f'{number:{field_size}}' + ' 3' + >>> f'{number:{field_size:05}}' + '00000000000000000003' + +However, these nested fields may not include more deeply nested +replacement fields. + +Formatted string literals cannot be used as *docstrings*, even if they +do not include expressions: + + >>> def foo(): + ... f"Not a docstring" + ... + >>> print(foo.__doc__) + None + +See also: + + * **PEP 498** – Literal String Interpolation + + * **PEP 701** – Syntactic formalization of f-strings + + * "str.format()", which uses a related format string mechanism. + + +t-strings +========= + +Added in version 3.14. + +A *template string literal* or *t-string* is a string literal that is +prefixed with ‘"t"’ or ‘"T"’. These strings follow the same syntax +rules as formatted string literals. For differences in evaluation +rules, see the Standard Library section on t-strings + + +Formal grammar for f-strings +============================ + +F-strings are handled partly by the *lexical analyzer*, which produces +the tokens "FSTRING_START", "FSTRING_MIDDLE" and "FSTRING_END", and +partly by the parser, which handles expressions in the replacement +field. The exact way the work is split is a CPython implementation +detail. + +Correspondingly, the f-string grammar is a mix of lexical and +syntactic definitions. + +Whitespace is significant in these situations: + +* There may be no whitespace in "FSTRING_START" (between the prefix + and quote). + +* Whitespace in "FSTRING_MIDDLE" is part of the literal string + contents. + +* In "fstring_replacement_field", if "f_debug_specifier" is present, + all whitespace after the opening brace until the + "f_debug_specifier", as well as whitespace immediately following + "f_debug_specifier", is retained as part of the expression. + + **CPython implementation detail:** The expression is not handled in + the tokenization phase; it is retrieved from the source code using + locations of the "{" token and the token after "=". + +The "FSTRING_MIDDLE" definition uses negative lookaheads ("!") to +indicate special characters (backslash, newline, "{", "}") and +sequences ("f_quote"). + + fstring: FSTRING_START fstring_middle* FSTRING_END + + FSTRING_START: fstringprefix ("'" | '"' | "\'\'\'" | '"""') + FSTRING_END: f_quote + fstringprefix: <("f" | "fr" | "rf"), case-insensitive> + f_debug_specifier: '=' + f_quote: + + fstring_middle: + | fstring_replacement_field + | FSTRING_MIDDLE + FSTRING_MIDDLE: + | (!"\\" !newline !'{' !'}' !f_quote) source_character + | stringescapeseq + | "{{" + | "}}" + | + fstring_replacement_field: + | '{' f_expression [f_debug_specifier] [fstring_conversion] + [fstring_full_format_spec] '}' + fstring_conversion: + | "!" ("s" | "r" | "a") + fstring_full_format_spec: + | ':' fstring_format_spec* + fstring_format_spec: + | FSTRING_MIDDLE + | fstring_replacement_field + f_expression: + | ','.(conditional_expression | "*" or_expr)+ [","] + | yield_expression + +Note: + + In the above grammar snippet, the "f_quote" and "FSTRING_MIDDLE" + rules are context-sensitive – they depend on the contents of + "FSTRING_START" of the nearest enclosing "fstring".Constructing a + more traditional formal grammar from this template is left as an + exercise for the reader. + +The grammar for t-strings is identical to the one for f-strings, with +*t* instead of *f* at the beginning of rule and token names and in the +prefix. + + tstring: TSTRING_START tstring_middle* TSTRING_END + + +''', + 'subscriptions': r'''Subscriptions and slicings +************************** + +The *subscription* syntax is usually used for selecting an element +from a container – for example, to get a value from a "dict": + + >>> digits_by_name = {'one': 1, 'two': 2} + >>> digits_by_name['two'] # Subscripting a dictionary using the key 'two' + 2 + +In the subscription syntax, the object being subscribed – a primary – +is followed by a *subscript* in square brackets. In the simplest case, +the subscript is a single expression. + +Depending on the type of the object being subscribed, the subscript is +sometimes called a *key* (for mappings), *index* (for sequences), or +*type argument* (for *generic types*). Syntactically, these are all +equivalent: + + >>> colors = ['red', 'blue', 'green', 'black'] + >>> colors[3] # Subscripting a list using the index 3 + 'black' + + >>> list[str] # Parameterizing the list type using the type argument str + list[str] + +At runtime, the interpreter will evaluate the primary and the +subscript, and call the primary’s "__getitem__()" or +"__class_getitem__()" *special method* with the subscript as argument. +For more details on which of these methods is called, see +__class_getitem__ versus __getitem__. + +To show how subscription works, we can define a custom object that +implements "__getitem__()" and prints out the value of the subscript: + + >>> class SubscriptionDemo: + ... def __getitem__(self, key): + ... print(f'subscripted with: {key!r}') + ... + >>> demo = SubscriptionDemo() + >>> demo[1] + subscripted with: 1 + >>> demo['a' * 3] + subscripted with: 'aaa' + +See "__getitem__()" documentation for how built-in types handle +subscription. + +Subscriptions may also be used as targets in assignment or deletion +statements. In these cases, the interpreter will call the subscripted +object’s "__setitem__()" or "__delitem__()" *special method*, +respectively, instead of "__getitem__()". + + >>> colors = ['red', 'blue', 'green', 'black'] + >>> colors[3] = 'white' # Setting item at index + >>> colors + ['red', 'blue', 'green', 'white'] + >>> del colors[3] # Deleting item at index 3 + >>> colors + ['red', 'blue', 'green'] + +All advanced forms of *subscript* documented in the following sections +are also usable for assignment and deletion. + + +Slicings +======== + +A more advanced form of subscription, *slicing*, is commonly used to +extract a portion of a sequence. In this form, the subscript is a +*slice*: up to three expressions separated by colons. Any of the +expressions may be omitted, but a slice must contain at least one +colon: + + >>> number_names = ['zero', 'one', 'two', 'three', 'four', 'five'] + >>> number_names[1:3] + ['one', 'two'] + >>> number_names[1:] + ['one', 'two', 'three', 'four', 'five'] + >>> number_names[:3] + ['zero', 'one', 'two'] + >>> number_names[:] + ['zero', 'one', 'two', 'three', 'four', 'five'] + >>> number_names[::2] + ['zero', 'two', 'four'] + >>> number_names[:-3] + ['zero', 'one', 'two'] + >>> del number_names[4:] + >>> number_names + ['zero', 'one', 'two', 'three'] + +When a slice is evaluated, the interpreter constructs a "slice" object +whose "start", "stop" and "step" attributes, respectively, are the +results of the expressions between the colons. Any missing expression +evaluates to "None". This "slice" object is then passed to the +"__getitem__()" or "__class_getitem__()" *special method*, as above. + + # continuing with the SubscriptionDemo instance defined above: + >>> demo[2:3] + subscripted with: slice(2, 3, None) + >>> demo[::'spam'] + subscripted with: slice(None, None, 'spam') + + +Comma-separated subscripts +========================== + +The subscript can also be given as two or more comma-separated +expressions or slices: + + # continuing with the SubscriptionDemo instance defined above: + >>> demo[1, 2, 3] + subscripted with: (1, 2, 3) + >>> demo[1:2, 3] + subscripted with: (slice(1, 2, None), 3) + +This form is commonly used with numerical libraries for slicing multi- +dimensional data. In this case, the interpreter constructs a "tuple" +of the results of the expressions or slices, and passes this tuple to +the "__getitem__()" or "__class_getitem__()" *special method*, as +above. + +The subscript may also be given as a single expression or slice +followed by a comma, to specify a one-element tuple: + + >>> demo['spam',] + subscripted with: ('spam',) + + +“Starred” subscriptions +======================= + +Added in version 3.11: Expressions in *tuple_slices* may be starred. +See **PEP 646**. + +The subscript can also contain a starred expression. In this case, the +interpreter unpacks the result into a tuple, and passes this tuple to +"__getitem__()" or "__class_getitem__()": + + # continuing with the SubscriptionDemo instance defined above: + >>> demo[*range(10)] + subscripted with: (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) + +Starred expressions may be combined with comma-separated expressions +and slices: + + >>> demo['a', 'b', *range(3), 'c'] + subscripted with: ('a', 'b', 0, 1, 2, 'c') + + +Formal subscription grammar +=========================== + + subscription: primary '[' subscript ']' + subscript: single_subscript | tuple_subscript + single_subscript: proper_slice | assignment_expression + proper_slice: [expression] ":" [expression] [ ":" [expression] ] + tuple_subscript: ','.(single_subscript | starred_expression)+ [','] + +Recall that the "|" operator denotes ordered choice. Specifically, in +"subscript", if both alternatives would match, the first +("single_subscript") has priority. +''', + 'truth': r'''Truth Value Testing +******************* + +Any object can be tested for truth value, for use in an "if" or +"while" condition or as operand of the Boolean operations below. + +By default, an object is considered true unless its class defines +either a "__bool__()" method that returns "False" or a "__len__()" +method that returns zero, when called with the object. [1] If one of +the methods raises an exception when called, the exception is +propagated and the object does not have a truth value (for example, +"NotImplemented"). Here are most of the built-in objects considered +false: + +* constants defined to be false: "None" and "False" + +* zero of any numeric type: "0", "0.0", "0j", "Decimal(0)", + "Fraction(0, 1)" + +* empty sequences and collections: "''", "()", "[]", "{}", "set()", + "range(0)" + +Operations and built-in functions that have a Boolean result always +return "0" or "False" for false and "1" or "True" for true, unless +otherwise stated. (Important exception: the Boolean operations "or" +and "and" always return one of their operands.) +''', + 'try': r'''The "try" statement +******************* + +The "try" statement specifies exception handlers and/or cleanup code +for a group of statements: + + try_stmt: try1_stmt | try2_stmt | try3_stmt + try1_stmt: "try" ":" suite + ("except" [expression ["as" identifier]] ":" suite)+ + ["else" ":" suite] + ["finally" ":" suite] + try2_stmt: "try" ":" suite + ("except" "*" expression ["as" identifier] ":" suite)+ + ["else" ":" suite] + ["finally" ":" suite] + try3_stmt: "try" ":" suite + "finally" ":" suite + +Additional information on exceptions can be found in section +Exceptions, and information on using the "raise" statement to generate +exceptions may be found in section The raise statement. + +Changed in version 3.14: Support for optionally dropping grouping +parentheses when using multiple exception types. See **PEP 758**. + + +"except" clause +=============== + +The "except" clause(s) specify one or more exception handlers. When no +exception occurs in the "try" clause, no exception handler is +executed. When an exception occurs in the "try" suite, a search for an +exception handler is started. This search inspects the "except" +clauses in turn until one is found that matches the exception. An +expression-less "except" clause, if present, must be last; it matches +any exception. + +For an "except" clause with an expression, the expression must +evaluate to an exception type or a tuple of exception types. +Parentheses can be dropped if multiple exception types are provided +and the "as" clause is not used. The raised exception matches an +"except" clause whose expression evaluates to the class or a *non- +virtual base class* of the exception object, or to a tuple that +contains such a class. + +If no "except" clause matches the exception, the search for an +exception handler continues in the surrounding code and on the +invocation stack. [1] + +If the evaluation of an expression in the header of an "except" clause +raises an exception, the original search for a handler is canceled and +a search starts for the new exception in the surrounding code and on +the call stack (it is treated as if the entire "try" statement raised +the exception). + +When a matching "except" clause is found, the exception is assigned to +the target specified after the "as" keyword in that "except" clause, +if present, and the "except" clause’s suite is executed. All "except" +clauses must have an executable block. When the end of this block is +reached, execution continues normally after the entire "try" +statement. (This means that if two nested handlers exist for the same +exception, and the exception occurs in the "try" clause of the inner +handler, the outer handler will not handle the exception.) + +When an exception has been assigned using "as target", it is cleared +at the end of the "except" clause. This is as if + + except E as N: + foo + +was translated to + + except E as N: + try: + foo + finally: + del N + +This means the exception must be assigned to a different name to be +able to refer to it after the "except" clause. Exceptions are cleared +because with the traceback attached to them, they form a reference +cycle with the stack frame, keeping all locals in that frame alive +until the next garbage collection occurs. + +Before an "except" clause’s suite is executed, the exception is stored +in the "sys" module, where it can be accessed from within the body of +the "except" clause by calling "sys.exception()". When leaving an +exception handler, the exception stored in the "sys" module is reset +to its previous value: + + >>> print(sys.exception()) + None + >>> try: + ... raise TypeError + ... except: + ... print(repr(sys.exception())) + ... try: + ... raise ValueError + ... except: + ... print(repr(sys.exception())) + ... print(repr(sys.exception())) + ... + TypeError() + ValueError() + TypeError() + >>> print(sys.exception()) + None + + +"except*" clause +================ + +The "except*" clause(s) specify one or more handlers for groups of +exceptions ("BaseExceptionGroup" instances). A "try" statement can +have either "except" or "except*" clauses, but not both. The exception +type for matching is mandatory in the case of "except*", so "except*:" +is a syntax error. The type is interpreted as in the case of "except", +but matching is performed on the exceptions contained in the group +that is being handled. An "TypeError" is raised if a matching type is +a subclass of "BaseExceptionGroup", because that would have ambiguous +semantics. + +When an exception group is raised in the try block, each "except*" +clause splits (see "split()") it into the subgroups of matching and +non-matching exceptions. If the matching subgroup is not empty, it +becomes the handled exception (the value returned from +"sys.exception()") and assigned to the target of the "except*" clause +(if there is one). Then, the body of the "except*" clause executes. If +the non-matching subgroup is not empty, it is processed by the next +"except*" in the same manner. This continues until all exceptions in +the group have been matched, or the last "except*" clause has run. + +After all "except*" clauses execute, the group of unhandled exceptions +is merged with any exceptions that were raised or re-raised from +within "except*" clauses. This merged exception group propagates on.: + + >>> try: + ... raise ExceptionGroup("eg", + ... [ValueError(1), TypeError(2), OSError(3), OSError(4)]) + ... except* TypeError as e: + ... print(f'caught {type(e)} with nested {e.exceptions}') + ... except* OSError as e: + ... print(f'caught {type(e)} with nested {e.exceptions}') + ... + caught with nested (TypeError(2),) + caught with nested (OSError(3), OSError(4)) + + Exception Group Traceback (most recent call last): + | File "", line 2, in + | raise ExceptionGroup("eg", + | [ValueError(1), TypeError(2), OSError(3), OSError(4)]) + | ExceptionGroup: eg (1 sub-exception) + +-+---------------- 1 ---------------- + | ValueError: 1 + +------------------------------------ + +If the exception raised from the "try" block is not an exception group +and its type matches one of the "except*" clauses, it is caught and +wrapped by an exception group with an empty message string. This +ensures that the type of the target "e" is consistently +"BaseExceptionGroup": + + >>> try: + ... raise BlockingIOError + ... except* BlockingIOError as e: + ... print(repr(e)) + ... + ExceptionGroup('', (BlockingIOError(),)) + +"break", "continue" and "return" cannot appear in an "except*" clause. + + +"else" clause +============= + +The optional "else" clause is executed if the control flow leaves the +"try" suite, no exception was raised, and no "return", "continue", or +"break" statement was executed. Exceptions in the "else" clause are +not handled by the preceding "except" clauses. + + +"finally" clause +================ + +If "finally" is present, it specifies a ‘cleanup’ handler. The "try" +clause is executed, including any "except" and "else" clauses. If an +exception occurs in any of the clauses and is not handled, the +exception is temporarily saved. The "finally" clause is executed. If +there is a saved exception it is re-raised at the end of the "finally" +clause. If the "finally" clause raises another exception, the saved +exception is set as the context of the new exception. If the "finally" +clause executes a "return", "break" or "continue" statement, the saved +exception is discarded. For example, this function returns 42. + + def f(): + try: + 1/0 + finally: + return 42 + +The exception information is not available to the program during +execution of the "finally" clause. + +When a "return", "break" or "continue" statement is executed in the +"try" suite of a "try"…"finally" statement, the "finally" clause is +also executed ‘on the way out.’ + +The return value of a function is determined by the last "return" +statement executed. Since the "finally" clause always executes, a +"return" statement executed in the "finally" clause will always be the +last one executed. The following function returns ‘finally’. + + def foo(): + try: + return 'try' + finally: + return 'finally' + +Changed in version 3.8: Prior to Python 3.8, a "continue" statement +was illegal in the "finally" clause due to a problem with the +implementation. + +Changed in version 3.14: The compiler emits a "SyntaxWarning" when a +"return", "break" or "continue" appears in a "finally" block (see +**PEP 765**). +''', + 'types': r'''The standard type hierarchy +*************************** + +Below is a list of the types that are built into Python. Extension +modules (written in C, Java, or other languages, depending on the +implementation) can define additional types. Future versions of +Python may add types to the type hierarchy (e.g., rational numbers, +efficiently stored arrays of integers, etc.), although such additions +will often be provided via the standard library instead. + +Some of the type descriptions below contain a paragraph listing +‘special attributes.’ These are attributes that provide access to the +implementation and are not intended for general use. Their definition +may change in the future. + + +None +==== + +This type has a single value. There is a single object with this +value. This object is accessed through the built-in name "None". It is +used to signify the absence of a value in many situations, e.g., it is +returned from functions that don’t explicitly return anything. Its +truth value is false. + + +NotImplemented +============== + +This type has a single value. There is a single object with this +value. This object is accessed through the built-in name +"NotImplemented". Numeric methods and rich comparison methods should +return this value if they do not implement the operation for the +operands provided. (The interpreter will then try the reflected +operation, or some other fallback, depending on the operator.) It +should not be evaluated in a boolean context. + +See Implementing the arithmetic operations for more details. + +Changed in version 3.9: Evaluating "NotImplemented" in a boolean +context was deprecated. + +Changed in version 3.14: Evaluating "NotImplemented" in a boolean +context now raises a "TypeError". It previously evaluated to "True" +and emitted a "DeprecationWarning" since Python 3.9. + + +Ellipsis +======== + +This type has a single value. There is a single object with this +value. This object is accessed through the literal "..." or the built- +in name "Ellipsis". Its truth value is true. + + +"numbers.Number" +================ + +These are created by numeric literals and returned as results by +arithmetic operators and arithmetic built-in functions. Numeric +objects are immutable; once created their value never changes. Python +numbers are of course strongly related to mathematical numbers, but +subject to the limitations of numerical representation in computers. + +The string representations of the numeric classes, computed by +"__repr__()" and "__str__()", have the following properties: + +* They are valid numeric literals which, when passed to their class + constructor, produce an object having the value of the original + numeric. + +* The representation is in base 10, when possible. + +* Leading zeros, possibly excepting a single zero before a decimal + point, are not shown. + +* Trailing zeros, possibly excepting a single zero after a decimal + point, are not shown. + +* A sign is shown only when the number is negative. + +Python distinguishes between integers, floating-point numbers, and +complex numbers: + + +"numbers.Integral" +------------------ + +These represent elements from the mathematical set of integers +(positive and negative). + +Note: + + The rules for integer representation are intended to give the most + meaningful interpretation of shift and mask operations involving + negative integers. + +There are two types of integers: + +Integers ("int") + These represent numbers in an unlimited range, subject to available + (virtual) memory only. For the purpose of shift and mask + operations, a binary representation is assumed, and negative + numbers are represented in a variant of 2’s complement which gives + the illusion of an infinite string of sign bits extending to the + left. + +Booleans ("bool") + These represent the truth values False and True. The two objects + representing the values "False" and "True" are the only Boolean + objects. The Boolean type is a subtype of the integer type, and + Boolean values behave like the values 0 and 1, respectively, in + almost all contexts, the exception being that when converted to a + string, the strings ""False"" or ""True"" are returned, + respectively. + + +"numbers.Real" ("float") +------------------------ + +These represent machine-level double precision floating-point numbers. +You are at the mercy of the underlying machine architecture (and C or +Java implementation) for the accepted range and handling of overflow. +Python does not support single-precision floating-point numbers; the +savings in processor and memory usage that are usually the reason for +using these are dwarfed by the overhead of using objects in Python, so +there is no reason to complicate the language with two kinds of +floating-point numbers. + + +"numbers.Complex" ("complex") +----------------------------- + +These represent complex numbers as a pair of machine-level double +precision floating-point numbers. The same caveats apply as for +floating-point numbers. The real and imaginary parts of a complex +number "z" can be retrieved through the read-only attributes "z.real" +and "z.imag". + + +Sequences +========= + +These represent finite ordered sets indexed by non-negative numbers. +The built-in function "len()" returns the number of items of a +sequence. When the length of a sequence is *n*, the index set contains +the numbers 0, 1, …, *n*-1. Item *i* of sequence *a* is selected by +"a[i]". Some sequences, including built-in sequences, interpret +negative subscripts by adding the sequence length. For example, +"a[-2]" equals "a[n-2]", the second to last item of sequence a with +length "n". + +The resulting value must be a nonnegative integer less than the number +of items in the sequence. If it is not, an "IndexError" is raised. + +Sequences also support slicing: "a[start:stop]" selects all items with +index *k* such that *start* "<=" *k* "<" *stop*. When used as an +expression, a slice is a sequence of the same type. The comment above +about negative subscripts also applies to negative slice positions. +Note that no error is raised if a slice position is less than zero or +larger than the length of the sequence. + +If *start* is missing or "None", slicing behaves as if *start* was +zero. If *stop* is missing or "None", slicing behaves as if *stop* was +equal to the length of the sequence. + +Some sequences also support “extended slicing” with a third “step” +parameter: "a[i:j:k]" selects all items of *a* with index *x* where "x += i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*. + +Sequences are distinguished according to their mutability: + + +Immutable sequences +------------------- + +An object of an immutable sequence type cannot change once it is +created. (If the object contains references to other objects, these +other objects may be mutable and may be changed; however, the +collection of objects directly referenced by an immutable object +cannot change.) + +The following types are immutable sequences: + +Strings + A string ("str") is a sequence of values that represent + *characters*, or more formally, *Unicode code points*. All the code + points in the range "0" to "0x10FFFF" can be represented in a + string. + + Python doesn’t have a dedicated *character* type. Instead, every + code point in the string is represented as a string object with + length "1". + + The built-in function "ord()" converts a code point from its string + form to an integer in the range "0" to "0x10FFFF"; "chr()" converts + an integer in the range "0" to "0x10FFFF" to the corresponding + length "1" string object. "str.encode()" can be used to convert a + "str" to "bytes" using the given text encoding, and + "bytes.decode()" can be used to achieve the opposite. + +Tuples + The items of a "tuple" are arbitrary Python objects. Tuples of two + or more items are formed by comma-separated lists of expressions. + A tuple of one item (a ‘singleton’) can be formed by affixing a + comma to an expression (an expression by itself does not create a + tuple, since parentheses must be usable for grouping of + expressions). An empty tuple can be formed by an empty pair of + parentheses. + +Bytes + A "bytes" object is an immutable array. The items are 8-bit bytes, + represented by integers in the range 0 <= x < 256. Bytes literals + (like "b'abc'") and the built-in "bytes()" constructor can be used + to create bytes objects. Also, bytes objects can be decoded to + strings via the "decode()" method. + + +Mutable sequences +----------------- + +Mutable sequences can be changed after they are created. The +subscription and slicing notations can be used as the target of +assignment and "del" (delete) statements. + +Note: + + The "collections" and "array" module provide additional examples of + mutable sequence types. + +There are currently two intrinsic mutable sequence types: + +Lists + The items of a list are arbitrary Python objects. Lists are formed + by placing a comma-separated list of expressions in square + brackets. (Note that there are no special cases needed to form + lists of length 0 or 1.) + +Byte Arrays + A bytearray object is a mutable array. They are created by the + built-in "bytearray()" constructor. Aside from being mutable (and + hence unhashable), byte arrays otherwise provide the same interface + and functionality as immutable "bytes" objects. + + +Set types +========= + +These represent unordered, finite sets of unique, immutable objects. +As such, they cannot be indexed by any subscript. However, they can be +iterated over, and the built-in function "len()" returns the number of +items in a set. Common uses for sets are fast membership testing, +removing duplicates from a sequence, and computing mathematical +operations such as intersection, union, difference, and symmetric +difference. + +For set elements, the same immutability rules apply as for dictionary +keys. Note that numeric types obey the normal rules for numeric +comparison: if two numbers compare equal (e.g., "1" and "1.0"), only +one of them can be contained in a set. + +There are currently two intrinsic set types: + +Sets + These represent a mutable set. They are created by the built-in + "set()" constructor and can be modified afterwards by several + methods, such as "add()". + +Frozen sets + These represent an immutable set. They are created by the built-in + "frozenset()" constructor. As a frozenset is immutable and + *hashable*, it can be used again as an element of another set, or + as a dictionary key. + + +Mappings +======== + +These represent finite sets of objects indexed by arbitrary index +sets. The subscript notation "a[k]" selects the item indexed by "k" +from the mapping "a"; this can be used in expressions and as the +target of assignments or "del" statements. The built-in function +"len()" returns the number of items in a mapping. + +There is currently a single intrinsic mapping type: + + +Dictionaries +------------ + +These represent finite sets of objects indexed by nearly arbitrary +values. The only types of values not acceptable as keys are values +containing lists or dictionaries or other mutable types that are +compared by value rather than by object identity, the reason being +that the efficient implementation of dictionaries requires a key’s +hash value to remain constant. Numeric types used for keys obey the +normal rules for numeric comparison: if two numbers compare equal +(e.g., "1" and "1.0") then they can be used interchangeably to index +the same dictionary entry. + +Dictionaries preserve insertion order, meaning that keys will be +produced in the same order they were added sequentially over the +dictionary. Replacing an existing key does not change the order, +however removing a key and re-inserting it will add it to the end +instead of keeping its old place. + +Dictionaries are mutable; they can be created by the "{}" notation +(see section Dictionary displays). + +The extension modules "dbm.ndbm" and "dbm.gnu" provide additional +examples of mapping types, as does the "collections" module. + +Changed in version 3.7: Dictionaries did not preserve insertion order +in versions of Python before 3.6. In CPython 3.6, insertion order was +preserved, but it was considered an implementation detail at that time +rather than a language guarantee. + + +Callable types +============== + +These are the types to which the function call operation (see section +Calls) can be applied: + + +User-defined functions +---------------------- + +A user-defined function object is created by a function definition +(see section Function definitions). It should be called with an +argument list containing the same number of items as the function’s +formal parameter list. + + +Special read-only attributes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ++----------------------------------------------------+----------------------------------------------------+ +| Attribute | Meaning | +|====================================================|====================================================| +| function.__builtins__ | A reference to the "dictionary" that holds the | +| | function’s builtins namespace. Added in version | +| | 3.10. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__globals__ | A reference to the "dictionary" that holds the | +| | function’s global variables – the global namespace | +| | of the module in which the function was defined. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__closure__ | "None" or a "tuple" of cells that contain bindings | +| | for the names specified in the "co_freevars" | +| | attribute of the function’s "code object". A cell | +| | object has the attribute "cell_contents". This can | +| | be used to get the value of the cell, as well as | +| | set the value. | ++----------------------------------------------------+----------------------------------------------------+ + + +Special writable attributes +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Most of these attributes check the type of the assigned value: + ++----------------------------------------------------+----------------------------------------------------+ +| Attribute | Meaning | +|====================================================|====================================================| +| function.__doc__ | The function’s documentation string, or "None" if | +| | unavailable. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__name__ | The function’s name. See also: "__name__ | +| | attributes". | ++----------------------------------------------------+----------------------------------------------------+ +| function.__qualname__ | The function’s *qualified name*. See also: | +| | "__qualname__ attributes". Added in version 3.3. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__module__ | The name of the module the function was defined | +| | in, or "None" if unavailable. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__defaults__ | A "tuple" containing default *parameter* values | +| | for those parameters that have defaults, or "None" | +| | if no parameters have a default value. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__code__ | The code object representing the compiled function | +| | body. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__dict__ | The namespace supporting arbitrary function | +| | attributes. See also: "__dict__ attributes". | ++----------------------------------------------------+----------------------------------------------------+ +| function.__annotations__ | A "dictionary" containing annotations of | +| | *parameters*. The keys of the dictionary are the | +| | parameter names, and "'return'" for the return | +| | annotation, if provided. See also: | +| | "object.__annotations__". Changed in version | +| | 3.14: Annotations are now lazily evaluated. See | +| | **PEP 649**. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__annotate__ | The *annotate function* for this function, or | +| | "None" if the function has no annotations. See | +| | "object.__annotate__". Added in version 3.14. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__kwdefaults__ | A "dictionary" containing defaults for keyword- | +| | only *parameters*. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__type_params__ | A "tuple" containing the type parameters of a | +| | generic function. Added in version 3.12. | ++----------------------------------------------------+----------------------------------------------------+ + +Function objects also support getting and setting arbitrary +attributes, which can be used, for example, to attach metadata to +functions. Regular attribute dot-notation is used to get and set such +attributes. + +**CPython implementation detail:** CPython’s current implementation +only supports function attributes on user-defined functions. Function +attributes on built-in functions may be supported in the future. + +Additional information about a function’s definition can be retrieved +from its code object (accessible via the "__code__" attribute). + + +Instance methods +---------------- + +An instance method object combines a class, a class instance and any +callable object (normally a user-defined function). + +Special read-only attributes: + ++----------------------------------------------------+----------------------------------------------------+ +| method.__self__ | Refers to the class instance object to which the | +| | method is bound | ++----------------------------------------------------+----------------------------------------------------+ +| method.__func__ | Refers to the original function object | ++----------------------------------------------------+----------------------------------------------------+ +| method.__doc__ | The method’s documentation (same as | +| | "method.__func__.__doc__"). A "string" if the | +| | original function had a docstring, else "None". | ++----------------------------------------------------+----------------------------------------------------+ +| method.__name__ | The name of the method (same as | +| | "method.__func__.__name__") | ++----------------------------------------------------+----------------------------------------------------+ +| method.__module__ | The name of the module the method was defined in, | +| | or "None" if unavailable. | ++----------------------------------------------------+----------------------------------------------------+ + +Methods also support accessing (but not setting) the arbitrary +function attributes on the underlying function object. + +User-defined method objects may be created when getting an attribute +of a class (perhaps via an instance of that class), if that attribute +is a user-defined function object or a "classmethod" object. + +When an instance method object is created by retrieving a user-defined +function object from a class via one of its instances, its "__self__" +attribute is the instance, and the method object is said to be +*bound*. The new method’s "__func__" attribute is the original +function object. + +When an instance method object is created by retrieving a +"classmethod" object from a class or instance, its "__self__" +attribute is the class itself, and its "__func__" attribute is the +function object underlying the class method. + +When an instance method object is called, the underlying function +("__func__") is called, inserting the class instance ("__self__") in +front of the argument list. For instance, when "C" is a class which +contains a definition for a function "f()", and "x" is an instance of +"C", calling "x.f(1)" is equivalent to calling "C.f(x, 1)". + +When an instance method object is derived from a "classmethod" object, +the “class instance” stored in "__self__" will actually be the class +itself, so that calling either "x.f(1)" or "C.f(1)" is equivalent to +calling "f(C,1)" where "f" is the underlying function. + +It is important to note that user-defined functions which are +attributes of a class instance are not converted to bound methods; +this *only* happens when the function is an attribute of the class. + + +Generator functions +------------------- + +A function or method which uses the "yield" statement (see section The +yield statement) is called a *generator function*. Such a function, +when called, always returns an *iterator* object which can be used to +execute the body of the function: calling the iterator’s +"iterator.__next__()" method will cause the function to execute until +it provides a value using the "yield" statement. When the function +executes a "return" statement or falls off the end, a "StopIteration" +exception is raised and the iterator will have reached the end of the +set of values to be returned. + + +Coroutine functions +------------------- + +A function or method which is defined using "async def" is called a +*coroutine function*. Such a function, when called, returns a +*coroutine* object. It may contain "await" expressions, as well as +"async with" and "async for" statements. See also the Coroutine +Objects section. + + +Asynchronous generator functions +-------------------------------- + +A function or method which is defined using "async def" and which uses +the "yield" statement is called a *asynchronous generator function*. +Such a function, when called, returns an *asynchronous iterator* +object which can be used in an "async for" statement to execute the +body of the function. + +Calling the asynchronous iterator’s "aiterator.__anext__" method will +return an *awaitable* which when awaited will execute until it +provides a value using the "yield" expression. When the function +executes an empty "return" statement or falls off the end, a +"StopAsyncIteration" exception is raised and the asynchronous iterator +will have reached the end of the set of values to be yielded. + + +Built-in functions +------------------ + +A built-in function object is a wrapper around a C function. Examples +of built-in functions are "len()" and "math.sin()" ("math" is a +standard built-in module). The number and type of the arguments are +determined by the C function. Special read-only attributes: + +* "__doc__" is the function’s documentation string, or "None" if + unavailable. See "function.__doc__". + +* "__name__" is the function’s name. See "function.__name__". + +* "__self__" is set to "None" (but see the next item). + +* "__module__" is the name of the module the function was defined in + or "None" if unavailable. See "function.__module__". + + +Built-in methods +---------------- + +This is really a different disguise of a built-in function, this time +containing an object passed to the C function as an implicit extra +argument. An example of a built-in method is "alist.append()", +assuming *alist* is a list object. In this case, the special read-only +attribute "__self__" is set to the object denoted by *alist*. (The +attribute has the same semantics as it does with "other instance +methods".) + + +Classes +------- + +Classes are callable. These objects normally act as factories for new +instances of themselves, but variations are possible for class types +that override "__new__()". The arguments of the call are passed to +"__new__()" and, in the typical case, to "__init__()" to initialize +the new instance. + + +Class Instances +--------------- + +Instances of arbitrary classes can be made callable by defining a +"__call__()" method in their class. + + +Modules +======= + +Modules are a basic organizational unit of Python code, and are +created by the import system as invoked either by the "import" +statement, or by calling functions such as "importlib.import_module()" +and built-in "__import__()". A module object has a namespace +implemented by a "dictionary" object (this is the dictionary +referenced by the "__globals__" attribute of functions defined in the +module). Attribute references are translated to lookups in this +dictionary, e.g., "m.x" is equivalent to "m.__dict__["x"]". A module +object does not contain the code object used to initialize the module +(since it isn’t needed once the initialization is done). + +Attribute assignment updates the module’s namespace dictionary, e.g., +"m.x = 1" is equivalent to "m.__dict__["x"] = 1". + + +Import-related attributes on module objects +------------------------------------------- + +Module objects have the following attributes that relate to the import +system. When a module is created using the machinery associated with +the import system, these attributes are filled in based on the +module’s *spec*, before the *loader* executes and loads the module. + +To create a module dynamically rather than using the import system, +it’s recommended to use "importlib.util.module_from_spec()", which +will set the various import-controlled attributes to appropriate +values. It’s also possible to use the "types.ModuleType" constructor +to create modules directly, but this technique is more error-prone, as +most attributes must be manually set on the module object after it has +been created when using this approach. + +Caution: + + With the exception of "__name__", it is **strongly** recommended + that you rely on "__spec__" and its attributes instead of any of the + other individual attributes listed in this subsection. Note that + updating an attribute on "__spec__" will not update the + corresponding attribute on the module itself: + + >>> import typing + >>> typing.__name__, typing.__spec__.name + ('typing', 'typing') + >>> typing.__spec__.name = 'spelling' + >>> typing.__name__, typing.__spec__.name + ('typing', 'spelling') + >>> typing.__name__ = 'keyboard_smashing' + >>> typing.__name__, typing.__spec__.name + ('keyboard_smashing', 'spelling') + +module.__name__ + + The name used to uniquely identify the module in the import system. + For a directly executed module, this will be set to ""__main__"". + + This attribute must be set to the fully qualified name of the + module. It is expected to match the value of + "module.__spec__.name". + +module.__spec__ + + A record of the module’s import-system-related state. + + Set to the "module spec" that was used when importing the module. + See Module specs for more details. + + Added in version 3.4. + +module.__package__ + + The *package* a module belongs to. + + If the module is top-level (that is, not a part of any specific + package) then the attribute should be set to "''" (the empty + string). Otherwise, it should be set to the name of the module’s + package (which can be equal to "module.__name__" if the module + itself is a package). See **PEP 366** for further details. + + This attribute is used instead of "__name__" to calculate explicit + relative imports for main modules. It defaults to "None" for + modules created dynamically using the "types.ModuleType" + constructor; use "importlib.util.module_from_spec()" instead to + ensure the attribute is set to a "str". + + It is **strongly** recommended that you use + "module.__spec__.parent" instead of "module.__package__". + "__package__" is now only used as a fallback if "__spec__.parent" + is not set, and this fallback path is deprecated. + + Changed in version 3.4: This attribute now defaults to "None" for + modules created dynamically using the "types.ModuleType" + constructor. Previously the attribute was optional. + + Changed in version 3.6: The value of "__package__" is expected to + be the same as "__spec__.parent". "__package__" is now only used as + a fallback during import resolution if "__spec__.parent" is not + defined. + + Changed in version 3.10: "ImportWarning" is raised if an import + resolution falls back to "__package__" instead of + "__spec__.parent". + + Changed in version 3.12: Raise "DeprecationWarning" instead of + "ImportWarning" when falling back to "__package__" during import + resolution. + + Deprecated since version 3.13, will be removed in version 3.15: + "__package__" will cease to be set or taken into consideration by + the import system or standard library. + +module.__loader__ + + The *loader* object that the import machinery used to load the + module. + + This attribute is mostly useful for introspection, but can be used + for additional loader-specific functionality, for example getting + data associated with a loader. + + "__loader__" defaults to "None" for modules created dynamically + using the "types.ModuleType" constructor; use + "importlib.util.module_from_spec()" instead to ensure the attribute + is set to a *loader* object. + + It is **strongly** recommended that you use + "module.__spec__.loader" instead of "module.__loader__". + + Changed in version 3.4: This attribute now defaults to "None" for + modules created dynamically using the "types.ModuleType" + constructor. Previously the attribute was optional. + + Deprecated since version 3.12, will be removed in version 3.16: + Setting "__loader__" on a module while failing to set + "__spec__.loader" is deprecated. In Python 3.16, "__loader__" will + cease to be set or taken into consideration by the import system or + the standard library. + +module.__path__ + + A (possibly empty) *sequence* of strings enumerating the locations + where the package’s submodules will be found. Non-package modules + should not have a "__path__" attribute. See __path__ attributes on + modules for more details. + + It is **strongly** recommended that you use + "module.__spec__.submodule_search_locations" instead of + "module.__path__". + +module.__file__ + +module.__cached__ + + "__file__" and "__cached__" are both optional attributes that may + or may not be set. Both attributes should be a "str" when they are + available. + + "__file__" indicates the pathname of the file from which the module + was loaded (if loaded from a file), or the pathname of the shared + library file for extension modules loaded dynamically from a shared + library. It might be missing for certain types of modules, such as + C modules that are statically linked into the interpreter, and the + import system may opt to leave it unset if it has no semantic + meaning (for example, a module loaded from a database). + + If "__file__" is set then the "__cached__" attribute might also be + set, which is the path to any compiled version of the code (for + example, a byte-compiled file). The file does not need to exist to + set this attribute; the path can simply point to where the compiled + file *would* exist (see **PEP 3147**). + + Note that "__cached__" may be set even if "__file__" is not set. + However, that scenario is quite atypical. Ultimately, the *loader* + is what makes use of the module spec provided by the *finder* (from + which "__file__" and "__cached__" are derived). So if a loader can + load from a cached module but otherwise does not load from a file, + that atypical scenario may be appropriate. + + It is **strongly** recommended that you use + "module.__spec__.cached" instead of "module.__cached__". + + Deprecated since version 3.13, will be removed in version 3.15: + Setting "__cached__" on a module while failing to set + "__spec__.cached" is deprecated. In Python 3.15, "__cached__" will + cease to be set or taken into consideration by the import system or + standard library. + + +Other writable attributes on module objects +------------------------------------------- + +As well as the import-related attributes listed above, module objects +also have the following writable attributes: + +module.__doc__ + + The module’s documentation string, or "None" if unavailable. See + also: "__doc__ attributes". + +module.__annotations__ + + A dictionary containing *variable annotations* collected during + module body execution. For best practices on working with + "__annotations__", see "annotationlib". + + Changed in version 3.14: Annotations are now lazily evaluated. See + **PEP 649**. + +module.__annotate__ + + The *annotate function* for this module, or "None" if the module + has no annotations. See also: "__annotate__" attributes. + + Added in version 3.14. + + +Module dictionaries +------------------- + +Module objects also have the following special read-only attribute: + +module.__dict__ + + The module’s namespace as a dictionary object. Uniquely among the + attributes listed here, "__dict__" cannot be accessed as a global + variable from within a module; it can only be accessed as an + attribute on module objects. + + **CPython implementation detail:** Because of the way CPython + clears module dictionaries, the module dictionary will be cleared + when the module falls out of scope even if the dictionary still has + live references. To avoid this, copy the dictionary or keep the + module around while using its dictionary directly. + + +Custom classes +============== + +Custom class types are typically created by class definitions (see +section Class definitions). A class has a namespace implemented by a +dictionary object. Class attribute references are translated to +lookups in this dictionary, e.g., "C.x" is translated to +"C.__dict__["x"]" (although there are a number of hooks which allow +for other means of locating attributes). When the attribute name is +not found there, the attribute search continues in the base classes. +This search of the base classes uses the C3 method resolution order +which behaves correctly even in the presence of ‘diamond’ inheritance +structures where there are multiple inheritance paths leading back to +a common ancestor. Additional details on the C3 MRO used by Python can +be found at The Python 2.3 Method Resolution Order. + +When a class attribute reference (for class "C", say) would yield a +class method object, it is transformed into an instance method object +whose "__self__" attribute is "C". When it would yield a +"staticmethod" object, it is transformed into the object wrapped by +the static method object. See section Implementing Descriptors for +another way in which attributes retrieved from a class may differ from +those actually contained in its "__dict__". + +Class attribute assignments update the class’s dictionary, never the +dictionary of a base class. + +A class object can be called (see above) to yield a class instance +(see below). + + +Special attributes +------------------ + ++----------------------------------------------------+----------------------------------------------------+ +| Attribute | Meaning | +|====================================================|====================================================| +| type.__name__ | The class’s name. See also: "__name__ attributes". | ++----------------------------------------------------+----------------------------------------------------+ +| type.__qualname__ | The class’s *qualified name*. See also: | +| | "__qualname__ attributes". | ++----------------------------------------------------+----------------------------------------------------+ +| type.__module__ | The name of the module in which the class was | +| | defined. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__dict__ | A "mapping proxy" providing a read-only view of | +| | the class’s namespace. See also: "__dict__ | +| | attributes". | ++----------------------------------------------------+----------------------------------------------------+ +| type.__bases__ | A "tuple" containing the class’s bases. In most | +| | cases, for a class defined as "class X(A, B, C)", | +| | "X.__bases__" will be exactly equal to "(A, B, | +| | C)". | ++----------------------------------------------------+----------------------------------------------------+ +| type.__base__ | **CPython implementation detail:** The single base | +| | class in the inheritance chain that is responsible | +| | for the memory layout of instances. This attribute | +| | corresponds to "tp_base" at the C level. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__doc__ | The class’s documentation string, or "None" if | +| | undefined. Not inherited by subclasses. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__annotations__ | A dictionary containing *variable annotations* | +| | collected during class body execution. See also: | +| | "__annotations__ attributes". For best practices | +| | on working with "__annotations__", please see | +| | "annotationlib". Use | +| | "annotationlib.get_annotations()" instead of | +| | accessing this attribute directly. Warning: | +| | Accessing the "__annotations__" attribute directly | +| | on a class object may return annotations for the | +| | wrong class, specifically in certain cases where | +| | the class, its base class, or a metaclass is | +| | defined under "from __future__ import | +| | annotations". See **749** for details.This | +| | attribute does not exist on certain builtin | +| | classes. On user-defined classes without | +| | "__annotations__", it is an empty dictionary. | +| | Changed in version 3.14: Annotations are now | +| | lazily evaluated. See **PEP 649**. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__annotate__() | The *annotate function* for this class, or "None" | +| | if the class has no annotations. See also: | +| | "__annotate__ attributes". Added in version 3.14. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__type_params__ | A "tuple" containing the type parameters of a | +| | generic class. Added in version 3.12. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__static_attributes__ | A "tuple" containing names of attributes of this | +| | class which are assigned through "self.X" from any | +| | function in its body. Added in version 3.13. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__firstlineno__ | The line number of the first line of the class | +| | definition, including decorators. Setting the | +| | "__module__" attribute removes the | +| | "__firstlineno__" item from the type’s dictionary. | +| | Added in version 3.13. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__mro__ | The "tuple" of classes that are considered when | +| | looking for base classes during method resolution. | ++----------------------------------------------------+----------------------------------------------------+ + + +Special methods +--------------- + +In addition to the special attributes described above, all Python +classes also have the following two methods available: + +type.mro() + + This method can be overridden by a metaclass to customize the + method resolution order for its instances. It is called at class + instantiation, and its result is stored in "__mro__". + +type.__subclasses__() + + Each class keeps a list of weak references to its immediate + subclasses. This method returns a list of all those references + still alive. The list is in definition order. Example: + + >>> class A: pass + >>> class B(A): pass + >>> A.__subclasses__() + [] + + +Class instances +=============== + +A class instance is created by calling a class object (see above). A +class instance has a namespace implemented as a dictionary which is +the first place in which attribute references are searched. When an +attribute is not found there, and the instance’s class has an +attribute by that name, the search continues with the class +attributes. If a class attribute is found that is a user-defined +function object, it is transformed into an instance method object +whose "__self__" attribute is the instance. Static method and class +method objects are also transformed; see above under “Classes”. See +section Implementing Descriptors for another way in which attributes +of a class retrieved via its instances may differ from the objects +actually stored in the class’s "__dict__". If no class attribute is +found, and the object’s class has a "__getattr__()" method, that is +called to satisfy the lookup. + +Attribute assignments and deletions update the instance’s dictionary, +never a class’s dictionary. If the class has a "__setattr__()" or +"__delattr__()" method, this is called instead of updating the +instance dictionary directly. + +Class instances can pretend to be numbers, sequences, or mappings if +they have methods with certain special names. See section Special +method names. + + +Special attributes +------------------ + +object.__class__ + + The class to which a class instance belongs. + +object.__dict__ + + A dictionary or other mapping object used to store an object’s + (writable) attributes. Not all instances have a "__dict__" + attribute; see the section on __slots__ for more details. + + +I/O objects (also known as file objects) +======================================== + +A *file object* represents an open file. Various shortcuts are +available to create file objects: the "open()" built-in function, and +also "os.popen()", "os.fdopen()", and the "makefile()" method of +socket objects (and perhaps by other functions or methods provided by +extension modules). + +File objects implement common methods, listed below, to simplify usage +in generic code. They are expected to be With Statement Context +Managers. + +The objects "sys.stdin", "sys.stdout" and "sys.stderr" are initialized +to file objects corresponding to the interpreter’s standard input, +output and error streams; they are all open in text mode and therefore +follow the interface defined by the "io.TextIOBase" abstract class. + +file.read(size=-1, /) + + Retrieve up to *size* data from the file. As a convenience if + *size* is unspecified or -1 retrieve all data available. + +file.write(data, /) + + Store *data* to the file. + +file.close() + + Flush any buffers and close the underlying file. + + +Internal types +============== + +A few types used internally by the interpreter are exposed to the +user. Their definitions may change with future versions of the +interpreter, but they are mentioned here for completeness. + + +Code objects +------------ + +Code objects represent *byte-compiled* executable Python code, or +*bytecode*. The difference between a code object and a function object +is that the function object contains an explicit reference to the +function’s globals (the module in which it was defined), while a code +object contains no context; also the default argument values are +stored in the function object, not in the code object (because they +represent values calculated at run-time). Unlike function objects, +code objects are immutable and contain no references (directly or +indirectly) to mutable objects. + + +Special read-only attributes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_name | The function name | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_qualname | The fully qualified function name Added in | +| | version 3.11. | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_argcount | The total number of positional *parameters* | +| | (including positional-only parameters and | +| | parameters with default values) that the function | +| | has | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_posonlyargcount | The number of positional-only *parameters* | +| | (including arguments with default values) that the | +| | function has | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_kwonlyargcount | The number of keyword-only *parameters* (including | +| | arguments with default values) that the function | +| | has | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_nlocals | The number of local variables used by the function | +| | (including parameters) | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_varnames | A "tuple" containing the names of the local | +| | variables in the function (starting with the | +| | parameter names) | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_cellvars | A "tuple" containing the names of local variables | +| | that are referenced from at least one *nested | +| | scope* inside the function | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_freevars | A "tuple" containing the names of *free (closure) | +| | variables* that a *nested scope* references in an | +| | outer scope. See also "function.__closure__". | +| | Note: references to global and builtin names are | +| | *not* included. | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_code | A string representing the sequence of *bytecode* | +| | instructions in the function | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_consts | A "tuple" containing the literals used by the | +| | *bytecode* in the function | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_names | A "tuple" containing the names used by the | +| | *bytecode* in the function | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_filename | The name of the file from which the code was | +| | compiled | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_firstlineno | The line number of the first line of the function | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_lnotab | A string encoding the mapping from *bytecode* | +| | offsets to line numbers. For details, see the | +| | source code of the interpreter. Deprecated since | +| | version 3.12: This attribute of code objects is | +| | deprecated, and may be removed in Python 3.15. | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_stacksize | The required stack size of the code object | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_flags | An "integer" encoding a number of flags for the | +| | interpreter. | ++----------------------------------------------------+----------------------------------------------------+ + +The following flag bits are defined for "co_flags": bit "0x04" is set +if the function uses the "*arguments" syntax to accept an arbitrary +number of positional arguments; bit "0x08" is set if the function uses +the "**keywords" syntax to accept arbitrary keyword arguments; bit +"0x20" is set if the function is a generator. See Code Objects Bit +Flags for details on the semantics of each flags that might be +present. + +Future feature declarations (for example, "from __future__ import +division") also use bits in "co_flags" to indicate whether a code +object was compiled with a particular feature enabled. See +"compiler_flag". + +Other bits in "co_flags" are reserved for internal use. + +If a code object represents a function and has a docstring, the +"CO_HAS_DOCSTRING" bit is set in "co_flags" and the first item in +"co_consts" is the docstring of the function. + + +Methods on code objects +~~~~~~~~~~~~~~~~~~~~~~~ + +codeobject.co_positions() + + Returns an iterable over the source code positions of each + *bytecode* instruction in the code object. + + The iterator returns "tuple"s containing the "(start_line, + end_line, start_column, end_column)". The *i-th* tuple corresponds + to the position of the source code that compiled to the *i-th* code + unit. Column information is 0-indexed utf-8 byte offsets on the + given source line. + + This positional information can be missing. A non-exhaustive lists + of cases where this may happen: + + * Running the interpreter with "-X" "no_debug_ranges". + + * Loading a pyc file compiled while using "-X" "no_debug_ranges". + + * Position tuples corresponding to artificial instructions. + + * Line and column numbers that can’t be represented due to + implementation specific limitations. + + When this occurs, some or all of the tuple elements can be "None". + + Added in version 3.11. + + Note: + + This feature requires storing column positions in code objects + which may result in a small increase of disk usage of compiled + Python files or interpreter memory usage. To avoid storing the + extra information and/or deactivate printing the extra traceback + information, the "-X" "no_debug_ranges" command line flag or the + "PYTHONNODEBUGRANGES" environment variable can be used. + +codeobject.co_lines() + + Returns an iterator that yields information about successive ranges + of *bytecode*s. Each item yielded is a "(start, end, lineno)" + "tuple": + + * "start" (an "int") represents the offset (inclusive) of the start + of the *bytecode* range + + * "end" (an "int") represents the offset (exclusive) of the end of + the *bytecode* range + + * "lineno" is an "int" representing the line number of the + *bytecode* range, or "None" if the bytecodes in the given range + have no line number + + The items yielded will have the following properties: + + * The first range yielded will have a "start" of 0. + + * The "(start, end)" ranges will be non-decreasing and consecutive. + That is, for any pair of "tuple"s, the "start" of the second will + be equal to the "end" of the first. + + * No range will be backwards: "end >= start" for all triples. + + * The last "tuple" yielded will have "end" equal to the size of the + *bytecode*. + + Zero-width ranges, where "start == end", are allowed. Zero-width + ranges are used for lines that are present in the source code, but + have been eliminated by the *bytecode* compiler. + + Added in version 3.10. + + See also: + + **PEP 626** - Precise line numbers for debugging and other tools. + The PEP that introduced the "co_lines()" method. + +codeobject.replace(**kwargs) + + Return a copy of the code object with new values for the specified + fields. + + Code objects are also supported by the generic function + "copy.replace()". + + Added in version 3.8. + + +Frame objects +------------- + +Frame objects represent execution frames. They may occur in traceback +objects, and are also passed to registered trace functions. + + +Special read-only attributes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_back | Points to the previous stack frame (towards the | +| | caller), or "None" if this is the bottom stack | +| | frame | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_code | The code object being executed in this frame. | +| | Accessing this attribute raises an auditing event | +| | "object.__getattr__" with arguments "obj" and | +| | ""f_code"". | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_locals | The mapping used by the frame to look up local | +| | variables. If the frame refers to an *optimized | +| | scope*, this may return a write-through proxy | +| | object. Changed in version 3.13: Return a proxy | +| | for optimized scopes. | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_globals | The dictionary used by the frame to look up global | +| | variables | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_builtins | The dictionary used by the frame to look up built- | +| | in (intrinsic) names | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_lasti | The “precise instruction” of the frame object | +| | (this is an index into the *bytecode* string of | +| | the code object) | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_generator | The *generator* or *coroutine* object that owns | +| | this frame, or "None" if the frame is a normal | +| | function. Added in version 3.14. | ++----------------------------------------------------+----------------------------------------------------+ + + +Special writable attributes +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_trace | If not "None", this is a function called for | +| | various events during code execution (this is used | +| | by debuggers). Normally an event is triggered for | +| | each new source line (see "f_trace_lines"). | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_trace_lines | Set this attribute to "False" to disable | +| | triggering a tracing event for each source line. | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_trace_opcodes | Set this attribute to "True" to allow per-opcode | +| | events to be requested. Note that this may lead to | +| | undefined interpreter behaviour if exceptions | +| | raised by the trace function escape to the | +| | function being traced. | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_lineno | The current line number of the frame – writing to | +| | this from within a trace function jumps to the | +| | given line (only for the bottom-most frame). A | +| | debugger can implement a Jump command (aka Set | +| | Next Statement) by writing to this attribute. | ++----------------------------------------------------+----------------------------------------------------+ + + +Frame object methods +~~~~~~~~~~~~~~~~~~~~ + +Frame objects support one method: + +frame.clear() + + This method clears all references to local variables held by the + frame. Also, if the frame belonged to a *generator*, the generator + is finalized. This helps break reference cycles involving frame + objects (for example when catching an exception and storing its + traceback for later use). + + "RuntimeError" is raised if the frame is currently executing or + suspended. + + Added in version 3.4. + + Changed in version 3.13: Attempting to clear a suspended frame + raises "RuntimeError" (as has always been the case for executing + frames). + + +Traceback objects +----------------- + +Traceback objects represent the stack trace of an exception. A +traceback object is implicitly created when an exception occurs, and +may also be explicitly created by calling "types.TracebackType". + +Changed in version 3.7: Traceback objects can now be explicitly +instantiated from Python code. + +For implicitly created tracebacks, when the search for an exception +handler unwinds the execution stack, at each unwound level a traceback +object is inserted in front of the current traceback. When an +exception handler is entered, the stack trace is made available to the +program. (See section The try statement.) It is accessible as the +third item of the tuple returned by "sys.exc_info()", and as the +"__traceback__" attribute of the caught exception. + +When the program contains no suitable handler, the stack trace is +written (nicely formatted) to the standard error stream; if the +interpreter is interactive, it is also made available to the user as +"sys.last_traceback". + +For explicitly created tracebacks, it is up to the creator of the +traceback to determine how the "tb_next" attributes should be linked +to form a full stack trace. + +Special read-only attributes: + ++----------------------------------------------------+----------------------------------------------------+ +| traceback.tb_frame | Points to the execution frame of the current | +| | level. Accessing this attribute raises an | +| | auditing event "object.__getattr__" with arguments | +| | "obj" and ""tb_frame"". | ++----------------------------------------------------+----------------------------------------------------+ +| traceback.tb_lineno | Gives the line number where the exception occurred | ++----------------------------------------------------+----------------------------------------------------+ +| traceback.tb_lasti | Indicates the “precise instruction”. | ++----------------------------------------------------+----------------------------------------------------+ + +The line number and last instruction in the traceback may differ from +the line number of its frame object if the exception occurred in a +"try" statement with no matching except clause or with a "finally" +clause. + +traceback.tb_next + + The special writable attribute "tb_next" is the next level in the + stack trace (towards the frame where the exception occurred), or + "None" if there is no next level. + + Changed in version 3.7: This attribute is now writable + + +Slice objects +------------- + +Slice objects are used to represent slices for "__getitem__()" +methods. They are also created by the built-in "slice()" function. + +Special read-only attributes: "start" is the lower bound; "stop" is +the upper bound; "step" is the step value; each is "None" if omitted. +These attributes can have any type. + +Slice objects support one method: + +slice.indices(self, length) + + This method takes a single integer argument *length* and computes + information about the slice that the slice object would describe if + applied to a sequence of *length* items. It returns a tuple of + three integers; respectively these are the *start* and *stop* + indices and the *step* or stride length of the slice. Missing or + out-of-bounds indices are handled in a manner consistent with + regular slices. + + +Static method objects +--------------------- + +Static method objects provide a way of defeating the transformation of +function objects to method objects described above. A static method +object is a wrapper around any other object, usually a user-defined +method object. When a static method object is retrieved from a class +or a class instance, the object actually returned is the wrapped +object, which is not subject to any further transformation. Static +method objects are also callable. Static method objects are created by +the built-in "staticmethod()" constructor. + + +Class method objects +-------------------- + +A class method object, like a static method object, is a wrapper +around another object that alters the way in which that object is +retrieved from classes and class instances. The behaviour of class +method objects upon such retrieval is described above, under “instance +methods”. Class method objects are created by the built-in +"classmethod()" constructor. +''', + 'typesfunctions': r'''Functions +********* + +Function objects are created by function definitions. The only +operation on a function object is to call it: "func(argument-list)". + +There are really two flavors of function objects: built-in functions +and user-defined functions. Both support the same operation (to call +the function), but the implementation is different, hence the +different object types. + +See Function definitions for more information. +''', + 'typesmapping': r'''Mapping Types — "dict" +********************** + +A *mapping* object maps *hashable* values to arbitrary objects. +Mappings are mutable objects. There is currently only one standard +mapping type, the *dictionary*. (For other containers see the built- +in "list", "set", and "tuple" classes, and the "collections" module.) + +A dictionary’s keys are *almost* arbitrary values. Values that are +not *hashable*, that is, values containing lists, dictionaries or +other mutable types (that are compared by value rather than by object +identity) may not be used as keys. Values that compare equal (such as +"1", "1.0", and "True") can be used interchangeably to index the same +dictionary entry. + +class dict(**kwargs) +class dict(mapping, /, **kwargs) +class dict(iterable, /, **kwargs) + + Return a new dictionary initialized from an optional positional + argument and a possibly empty set of keyword arguments. + + Dictionaries can be created by several means: + + * Use a comma-separated list of "key: value" pairs within braces: + "{'jack': 4098, 'sjoerd': 4127}" or "{4098: 'jack', 4127: + 'sjoerd'}" + + * Use a dict comprehension: "{}", "{x: x ** 2 for x in range(10)}" + + * Use the type constructor: "dict()", "dict([('foo', 100), ('bar', + 200)])", "dict(foo=100, bar=200)" + + If no positional argument is given, an empty dictionary is created. + If a positional argument is given and it defines a "keys()" method, + a dictionary is created by calling "__getitem__()" on the argument + with each returned key from the method. Otherwise, the positional + argument must be an *iterable* object. Each item in the iterable + must itself be an iterable with exactly two elements. The first + element of each item becomes a key in the new dictionary, and the + second element the corresponding value. If a key occurs more than + once, the last value for that key becomes the corresponding value + in the new dictionary. + + If keyword arguments are given, the keyword arguments and their + values are added to the dictionary created from the positional + argument. If a key being added is already present, the value from + the keyword argument replaces the value from the positional + argument. + + Dictionaries compare equal if and only if they have the same "(key, + value)" pairs (regardless of ordering). Order comparisons (‘<’, + ‘<=’, ‘>=’, ‘>’) raise "TypeError". To illustrate dictionary + creation and equality, the following examples all return a + dictionary equal to "{"one": 1, "two": 2, "three": 3}": + + >>> a = dict(one=1, two=2, three=3) + >>> b = {'one': 1, 'two': 2, 'three': 3} + >>> c = dict(zip(['one', 'two', 'three'], [1, 2, 3])) + >>> d = dict([('two', 2), ('one', 1), ('three', 3)]) + >>> e = dict({'three': 3, 'one': 1, 'two': 2}) + >>> f = dict({'one': 1, 'three': 3}, two=2) + >>> a == b == c == d == e == f + True + + Providing keyword arguments as in the first example only works for + keys that are valid Python identifiers. Otherwise, any valid keys + can be used. + + Dictionaries preserve insertion order. Note that updating a key + does not affect the order. Keys added after deletion are inserted + at the end. + + >>> d = {"one": 1, "two": 2, "three": 3, "four": 4} + >>> d + {'one': 1, 'two': 2, 'three': 3, 'four': 4} + >>> list(d) + ['one', 'two', 'three', 'four'] + >>> list(d.values()) + [1, 2, 3, 4] + >>> d["one"] = 42 + >>> d + {'one': 42, 'two': 2, 'three': 3, 'four': 4} + >>> del d["two"] + >>> d["two"] = None + >>> d + {'one': 42, 'three': 3, 'four': 4, 'two': None} + + Changed in version 3.7: Dictionary order is guaranteed to be + insertion order. This behavior was an implementation detail of + CPython from 3.6. + + These are the operations that dictionaries support (and therefore, + custom mapping types should support too): + + list(d) + + Return a list of all the keys used in the dictionary *d*. + + len(d) + + Return the number of items in the dictionary *d*. + + d[key] + + Return the item of *d* with key *key*. Raises a "KeyError" if + *key* is not in the map. + + If a subclass of dict defines a method "__missing__()" and *key* + is not present, the "d[key]" operation calls that method with + the key *key* as argument. The "d[key]" operation then returns + or raises whatever is returned or raised by the + "__missing__(key)" call. No other operations or methods invoke + "__missing__()". If "__missing__()" is not defined, "KeyError" + is raised. "__missing__()" must be a method; it cannot be an + instance variable: + + >>> class Counter(dict): + ... def __missing__(self, key): + ... return 0 + ... + >>> c = Counter() + >>> c['red'] + 0 + >>> c['red'] += 1 + >>> c['red'] + 1 + + The example above shows part of the implementation of + "collections.Counter". A different "__missing__()" method is + used by "collections.defaultdict". + + d[key] = value + + Set "d[key]" to *value*. + + del d[key] + + Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not + in the map. + + key in d + + Return "True" if *d* has a key *key*, else "False". + + key not in d + + Equivalent to "not key in d". + + iter(d) + + Return an iterator over the keys of the dictionary. This is a + shortcut for "iter(d.keys())". + + clear() + + Remove all items from the dictionary. + + copy() + + Return a shallow copy of the dictionary. + + classmethod fromkeys(iterable, value=None, /) + + Create a new dictionary with keys from *iterable* and values set + to *value*. + + "fromkeys()" is a class method that returns a new dictionary. + *value* defaults to "None". All of the values refer to just a + single instance, so it generally doesn’t make sense for *value* + to be a mutable object such as an empty list. To get distinct + values, use a dict comprehension instead. + + get(key, default=None, /) + + Return the value for *key* if *key* is in the dictionary, else + *default*. If *default* is not given, it defaults to "None", so + that this method never raises a "KeyError". + + items() + + Return a new view of the dictionary’s items ("(key, value)" + pairs). See the documentation of view objects. + + keys() + + Return a new view of the dictionary’s keys. See the + documentation of view objects. + + pop(key, /) + pop(key, default, /) + + If *key* is in the dictionary, remove it and return its value, + else return *default*. If *default* is not given and *key* is + not in the dictionary, a "KeyError" is raised. + + popitem() + + Remove and return a "(key, value)" pair from the dictionary. + Pairs are returned in LIFO (last-in, first-out) order. + + "popitem()" is useful to destructively iterate over a + dictionary, as often used in set algorithms. If the dictionary + is empty, calling "popitem()" raises a "KeyError". + + Changed in version 3.7: LIFO order is now guaranteed. In prior + versions, "popitem()" would return an arbitrary key/value pair. + + reversed(d) + + Return a reverse iterator over the keys of the dictionary. This + is a shortcut for "reversed(d.keys())". + + Added in version 3.8. + + setdefault(key, default=None, /) + + If *key* is in the dictionary, return its value. If not, insert + *key* with a value of *default* and return *default*. *default* + defaults to "None". + + update(**kwargs) + update(mapping, /, **kwargs) + update(iterable, /, **kwargs) + + Update the dictionary with the key/value pairs from *mapping* or + *iterable* and *kwargs*, overwriting existing keys. Return + "None". + + "update()" accepts either another object with a "keys()" method + (in which case "__getitem__()" is called with every key returned + from the method) or an iterable of key/value pairs (as tuples or + other iterables of length two). If keyword arguments are + specified, the dictionary is then updated with those key/value + pairs: "d.update(red=1, blue=2)". + + values() + + Return a new view of the dictionary’s values. See the + documentation of view objects. + + An equality comparison between one "dict.values()" view and + another will always return "False". This also applies when + comparing "dict.values()" to itself: + + >>> d = {'a': 1} + >>> d.values() == d.values() + False + + d | other + + Create a new dictionary with the merged keys and values of *d* + and *other*, which must both be dictionaries. The values of + *other* take priority when *d* and *other* share keys. + + Added in version 3.9. + + d |= other + + Update the dictionary *d* with keys and values from *other*, + which may be either a *mapping* or an *iterable* of key/value + pairs. The values of *other* take priority when *d* and *other* + share keys. + + Added in version 3.9. + + Dictionaries and dictionary views are reversible. + + >>> d = {"one": 1, "two": 2, "three": 3, "four": 4} + >>> d + {'one': 1, 'two': 2, 'three': 3, 'four': 4} + >>> list(reversed(d)) + ['four', 'three', 'two', 'one'] + >>> list(reversed(d.values())) + [4, 3, 2, 1] + >>> list(reversed(d.items())) + [('four', 4), ('three', 3), ('two', 2), ('one', 1)] + + Changed in version 3.8: Dictionaries are now reversible. + +See also: + + "types.MappingProxyType" can be used to create a read-only view of a + "dict". + +See also: + + For detailed information on thread-safety guarantees for "dict" + objects, see Thread safety for dict objects. + + +Dictionary view objects +======================= + +The objects returned by "dict.keys()", "dict.values()" and +"dict.items()" are *view objects*. They provide a dynamic view on the +dictionary’s entries, which means that when the dictionary changes, +the view reflects these changes. + +Dictionary views can be iterated over to yield their respective data, +and support membership tests: + +len(dictview) + + Return the number of entries in the dictionary. + +iter(dictview) + + Return an iterator over the keys, values or items (represented as + tuples of "(key, value)") in the dictionary. + + Keys and values are iterated over in insertion order. This allows + the creation of "(value, key)" pairs using "zip()": "pairs = + zip(d.values(), d.keys())". Another way to create the same list is + "pairs = [(v, k) for (k, v) in d.items()]". + + Iterating views while adding or deleting entries in the dictionary + may raise a "RuntimeError" or fail to iterate over all entries. + + Changed in version 3.7: Dictionary order is guaranteed to be + insertion order. + +x in dictview + + Return "True" if *x* is in the underlying dictionary’s keys, values + or items (in the latter case, *x* should be a "(key, value)" + tuple). + +reversed(dictview) + + Return a reverse iterator over the keys, values or items of the + dictionary. The view will be iterated in reverse order of the + insertion. + + Changed in version 3.8: Dictionary views are now reversible. + +dictview.mapping + + Return a "types.MappingProxyType" that wraps the original + dictionary to which the view refers. + + Added in version 3.10. + +Keys views are set-like since their entries are unique and *hashable*. +Items views also have set-like operations since the (key, value) pairs +are unique and the keys are hashable. If all values in an items view +are hashable as well, then the items view can interoperate with other +sets. (Values views are not treated as set-like since the entries are +generally not unique.) For set-like views, all of the operations +defined for the abstract base class "collections.abc.Set" are +available (for example, "==", "<", or "^"). While using set +operators, set-like views accept any iterable as the other operand, +unlike sets which only accept sets as the input. + +An example of dictionary view usage: + + >>> dishes = {'eggs': 2, 'sausage': 1, 'bacon': 1, 'spam': 500} + >>> keys = dishes.keys() + >>> values = dishes.values() + + >>> # iteration + >>> n = 0 + >>> for val in values: + ... n += val + ... + >>> print(n) + 504 + + >>> # keys and values are iterated over in the same order (insertion order) + >>> list(keys) + ['eggs', 'sausage', 'bacon', 'spam'] + >>> list(values) + [2, 1, 1, 500] + + >>> # view objects are dynamic and reflect dict changes + >>> del dishes['eggs'] + >>> del dishes['sausage'] + >>> list(keys) + ['bacon', 'spam'] + + >>> # set operations + >>> keys & {'eggs', 'bacon', 'salad'} + {'bacon'} + >>> keys ^ {'sausage', 'juice'} == {'juice', 'sausage', 'bacon', 'spam'} + True + >>> keys | ['juice', 'juice', 'juice'] == {'bacon', 'spam', 'juice'} + True + + >>> # get back a read-only proxy for the original dictionary + >>> values.mapping + mappingproxy({'bacon': 1, 'spam': 500}) + >>> values.mapping['spam'] + 500 +''', + 'typesmethods': r'''Methods +******* + +Methods are functions that are called using the attribute notation. +There are two flavors: built-in methods (such as "append()" on lists) +and class instance method. Built-in methods are described with the +types that support them. + +If you access a method (a function defined in a class namespace) +through an instance, you get a special object: a *bound method* (also +called instance method) object. When called, it will add the "self" +argument to the argument list. Bound methods have two special read- +only attributes: "m.__self__" is the object on which the method +operates, and "m.__func__" is the function implementing the method. +Calling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to +calling "m.__func__(m.__self__, arg-1, arg-2, ..., arg-n)". + +Like function objects, bound method objects support getting arbitrary +attributes. However, since method attributes are actually stored on +the underlying function object ("method.__func__"), setting method +attributes on bound methods is disallowed. Attempting to set an +attribute on a method results in an "AttributeError" being raised. In +order to set a method attribute, you need to explicitly set it on the +underlying function object: + + >>> class C: + ... def method(self): + ... pass + ... + >>> c = C() + >>> c.method.whoami = 'my name is method' # can't set on the method + Traceback (most recent call last): + File "", line 1, in + AttributeError: 'method' object has no attribute 'whoami' + >>> c.method.__func__.whoami = 'my name is method' + >>> c.method.whoami + 'my name is method' + +See Instance methods for more information. +''', + 'typesmodules': r'''Modules +******* + +The only special operation on a module is attribute access: "m.name", +where *m* is a module and *name* accesses a name defined in *m*’s +symbol table. Module attributes can be assigned to. (Note that the +"import" statement is not, strictly speaking, an operation on a module +object; "import foo" does not require a module object named *foo* to +exist, rather it requires an (external) *definition* for a module +named *foo* somewhere.) + +A special attribute of every module is "__dict__". This is the +dictionary containing the module’s symbol table. Modifying this +dictionary will actually change the module’s symbol table, but direct +assignment to the "__dict__" attribute is not possible (you can write +"m.__dict__['a'] = 1", which defines "m.a" to be "1", but you can’t +write "m.__dict__ = {}"). Modifying "__dict__" directly is not +recommended. + +Modules built into the interpreter are written like this: "". If loaded from a file, they are written as +"". +''', + 'typesseq': r'''Sequence Types — "list", "tuple", "range" +***************************************** + +There are three basic sequence types: lists, tuples, and range +objects. Additional sequence types tailored for processing of binary +data and text strings are described in dedicated sections. + + +Common Sequence Operations +========================== + +The operations in the following table are supported by most sequence +types, both mutable and immutable. The "collections.abc.Sequence" ABC +is provided to make it easier to correctly implement these operations +on custom sequence types. + +This table lists the sequence operations sorted in ascending priority. +In the table, *s* and *t* are sequences of the same type, *n*, *i*, +*j* and *k* are integers and *x* is an arbitrary object that meets any +type and value restrictions imposed by *s*. + +The "in" and "not in" operations have the same priorities as the +comparison operations. The "+" (concatenation) and "*" (repetition) +operations have the same priority as the corresponding numeric +operations. [3] + ++----------------------------+----------------------------------+------------+ +| Operation | Result | Notes | +|============================|==================================|============| +| "x in s" | "True" if an item of *s* is | (1) | +| | equal to *x*, else "False" | | ++----------------------------+----------------------------------+------------+ +| "x not in s" | "False" if an item of *s* is | (1) | +| | equal to *x*, else "True" | | ++----------------------------+----------------------------------+------------+ +| "s + t" | the concatenation of *s* and *t* | (6)(7) | ++----------------------------+----------------------------------+------------+ +| "s * n" or "n * s" | equivalent to adding *s* to | (2)(7) | +| | itself *n* times | | ++----------------------------+----------------------------------+------------+ +| "s[i]" | *i*th item of *s*, origin 0 | (3)(8) | ++----------------------------+----------------------------------+------------+ +| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) | ++----------------------------+----------------------------------+------------+ +| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) | +| | with step *k* | | ++----------------------------+----------------------------------+------------+ +| "len(s)" | length of *s* | | ++----------------------------+----------------------------------+------------+ +| "min(s)" | smallest item of *s* | | ++----------------------------+----------------------------------+------------+ +| "max(s)" | largest item of *s* | | ++----------------------------+----------------------------------+------------+ + +Sequences of the same type also support comparisons. In particular, +tuples and lists are compared lexicographically by comparing +corresponding elements. This means that to compare equal, every +element must compare equal and the two sequences must be of the same +type and have the same length. (For full details see Comparisons in +the language reference.) + +Forward and reversed iterators over mutable sequences access values +using an index. That index will continue to march forward (or +backward) even if the underlying sequence is mutated. The iterator +terminates only when an "IndexError" or a "StopIteration" is +encountered (or when the index drops below zero). + +Notes: + +1. While the "in" and "not in" operations are used only for simple + containment testing in the general case, some specialised sequences + (such as "str", "bytes" and "bytearray") also use them for + subsequence testing: + + >>> "gg" in "eggs" + True + +2. Values of *n* less than "0" are treated as "0" (which yields an + empty sequence of the same type as *s*). Note that items in the + sequence *s* are not copied; they are referenced multiple times. + This often haunts new Python programmers; consider: + + >>> lists = [[]] * 3 + >>> lists + [[], [], []] + >>> lists[0].append(3) + >>> lists + [[3], [3], [3]] + + What has happened is that "[[]]" is a one-element list containing + an empty list, so all three elements of "[[]] * 3" are references + to this single empty list. Modifying any of the elements of + "lists" modifies this single list. You can create a list of + different lists this way: + + >>> lists = [[] for i in range(3)] + >>> lists[0].append(3) + >>> lists[1].append(5) + >>> lists[2].append(7) + >>> lists + [[3], [5], [7]] + + Further explanation is available in the FAQ entry How do I create a + multidimensional list?. + +3. If *i* or *j* is negative, the index is relative to the end of + sequence *s*: "len(s) + i" or "len(s) + j" is substituted. But + note that "-0" is still "0". + +4. The slice of *s* from *i* to *j* is defined as the sequence of + items with index *k* such that "i <= k < j". + + * If *i* is omitted or "None", use "0". + + * If *j* is omitted or "None", use "len(s)". + + * If *i* or *j* is less than "-len(s)", use "0". + + * If *i* or *j* is greater than "len(s)", use "len(s)". + + * If *i* is greater than or equal to *j*, the slice is empty. + +5. The slice of *s* from *i* to *j* with step *k* is defined as the + sequence of items with index "x = i + n*k" such that "0 <= n < + (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k", + "i+3*k" and so on, stopping when *j* is reached (but never + including *j*). When *k* is positive, *i* and *j* are reduced to + "len(s)" if they are greater. When *k* is negative, *i* and *j* are + reduced to "len(s) - 1" if they are greater. If *i* or *j* are + omitted or "None", they become “end” values (which end depends on + the sign of *k*). Note, *k* cannot be zero. If *k* is "None", it + is treated like "1". + +6. Concatenating immutable sequences always results in a new object. + This means that building up a sequence by repeated concatenation + will have a quadratic runtime cost in the total sequence length. + To get a linear runtime cost, you must switch to one of the + alternatives below: + + * if concatenating "str" objects, you can build a list and use + "str.join()" at the end or else write to an "io.StringIO" + instance and retrieve its value when complete + + * if concatenating "bytes" objects, you can similarly use + "bytes.join()" or "io.BytesIO", or you can do in-place + concatenation with a "bytearray" object. "bytearray" objects are + mutable and have an efficient overallocation mechanism + + * if concatenating "tuple" objects, extend a "list" instead + + * for other types, investigate the relevant class documentation + +7. Some sequence types (such as "range") only support item sequences + that follow specific patterns, and hence don’t support sequence + concatenation or repetition. + +8. An "IndexError" is raised if *i* is outside the sequence range. + +-[ Sequence Methods ]- + +Sequence types also support the following methods: + +sequence.count(value, /) + + Return the total number of occurrences of *value* in *sequence*. + +sequence.index(value[, start[, stop]]) + + Return the index of the first occurrence of *value* in *sequence*. + + Raises "ValueError" if *value* is not found in *sequence*. + + The *start* or *stop* arguments allow for efficient searching of + subsections of the sequence, beginning at *start* and ending at + *stop*. This is roughly equivalent to "start + + sequence[start:stop].index(value)", only without copying any data. + + Caution: + + Not all sequence types support passing the *start* and *stop* + arguments. + + +Immutable Sequence Types +======================== + +The only operation that immutable sequence types generally implement +that is not also implemented by mutable sequence types is support for +the "hash()" built-in. + +This support allows immutable sequences, such as "tuple" instances, to +be used as "dict" keys and stored in "set" and "frozenset" instances. + +Attempting to hash an immutable sequence that contains unhashable +values will result in "TypeError". + + +Mutable Sequence Types +====================== + +The operations in the following table are defined on mutable sequence +types. The "collections.abc.MutableSequence" ABC is provided to make +it easier to correctly implement these operations on custom sequence +types. + +In the table *s* is an instance of a mutable sequence type, *t* is any +iterable object and *x* is an arbitrary object that meets any type and +value restrictions imposed by *s* (for example, "bytearray" only +accepts integers that meet the value restriction "0 <= x <= 255"). + ++--------------------------------+----------------------------------+-----------------------+ +| Operation | Result | Notes | +|================================|==================================|=======================| +| "s[i] = x" | item *i* of *s* is replaced by | | +| | *x* | | ++--------------------------------+----------------------------------+-----------------------+ +| "del s[i]" | removes item *i* of *s* | | ++--------------------------------+----------------------------------+-----------------------+ +| "s[i:j] = t" | slice of *s* from *i* to *j* is | | +| | replaced by the contents of the | | +| | iterable *t* | | ++--------------------------------+----------------------------------+-----------------------+ +| "del s[i:j]" | removes the elements of "s[i:j]" | | +| | from the list (same as "s[i:j] = | | +| | []") | | ++--------------------------------+----------------------------------+-----------------------+ +| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) | +| | replaced by those of *t* | | ++--------------------------------+----------------------------------+-----------------------+ +| "del s[i:j:k]" | removes the elements of | | +| | "s[i:j:k]" from the list | | ++--------------------------------+----------------------------------+-----------------------+ +| "s += t" | extends *s* with the contents of | | +| | *t* (for the most part the same | | +| | as "s[len(s):len(s)] = t") | | ++--------------------------------+----------------------------------+-----------------------+ +| "s *= n" | updates *s* with its contents | (2) | +| | repeated *n* times | | ++--------------------------------+----------------------------------+-----------------------+ + +Notes: + +1. If *k* is not equal to "1", *t* must have the same length as the + slice it is replacing. + +2. The value *n* is an integer, or an object implementing + "__index__()". Zero and negative values of *n* clear the sequence. + Items in the sequence are not copied; they are referenced multiple + times, as explained for "s * n" under Common Sequence Operations. + +-[ Mutable Sequence Methods ]- + +Mutable sequence types also support the following methods: + +sequence.append(value, /) + + Append *value* to the end of the sequence. This is equivalent to + writing "seq[len(seq):len(seq)] = [value]". + +sequence.clear() + + Added in version 3.3. + + Remove all items from *sequence*. This is equivalent to writing + "del sequence[:]". + +sequence.copy() + + Added in version 3.3. + + Create a shallow copy of *sequence*. This is equivalent to writing + "sequence[:]". + + Hint: + + The "copy()" method is not part of the "MutableSequence" "ABC", + but most concrete mutable sequence types provide it. + +sequence.extend(iterable, /) + + Extend *sequence* with the contents of *iterable*. For the most + part, this is the same as writing "seq[len(seq):len(seq)] = + iterable". + +sequence.insert(index, value, /) + + Insert *value* into *sequence* at the given *index*. This is + equivalent to writing "sequence[index:index] = [value]". + +sequence.pop(index=-1, /) + + Retrieve the item at *index* and also removes it from *sequence*. + By default, the last item in *sequence* is removed and returned. + +sequence.remove(value, /) + + Remove the first item from *sequence* where "sequence[i] == value". + + Raises "ValueError" if *value* is not found in *sequence*. + +sequence.reverse() + + Reverse the items of *sequence* in place. This method maintains + economy of space when reversing a large sequence. To remind users + that it operates by side-effect, it returns "None". + + +Lists +===== + +Lists are mutable sequences, typically used to store collections of +homogeneous items (where the precise degree of similarity will vary by +application). + +class list(iterable=(), /) + + Lists may be constructed in several ways: + + * Using a pair of square brackets to denote the empty list: "[]" + + * Using square brackets, separating items with commas: "[a]", "[a, + b, c]" + + * Using a list comprehension: "[x for x in iterable]" + + * Using the type constructor: "list()" or "list(iterable)" + + The constructor builds a list whose items are the same and in the + same order as *iterable*’s items. *iterable* may be either a + sequence, a container that supports iteration, or an iterator + object. If *iterable* is already a list, a copy is made and + returned, similar to "iterable[:]". For example, "list('abc')" + returns "['a', 'b', 'c']" and "list( (1, 2, 3) )" returns "[1, 2, + 3]". If no argument is given, the constructor creates a new empty + list, "[]". + + Many other operations also produce lists, including the "sorted()" + built-in. + + Lists implement all of the common and mutable sequence operations. + Lists also provide the following additional method: + + sort(*, key=None, reverse=False) + + This method sorts the list in place, using only "<" comparisons + between items. Exceptions are not suppressed - if any comparison + operations fail, the entire sort operation will fail (and the + list will likely be left in a partially modified state). + + "sort()" accepts two arguments that can only be passed by + keyword (keyword-only arguments): + + *key* specifies a function of one argument that is used to + extract a comparison key from each list element (for example, + "key=str.lower"). The key corresponding to each item in the list + is calculated once and then used for the entire sorting process. + The default value of "None" means that list items are sorted + directly without calculating a separate key value. + + The "functools.cmp_to_key()" utility is available to convert a + 2.x style *cmp* function to a *key* function. + + *reverse* is a boolean value. If set to "True", then the list + elements are sorted as if each comparison were reversed. + + This method modifies the sequence in place for economy of space + when sorting a large sequence. To remind users that it operates + by side effect, it does not return the sorted sequence (use + "sorted()" to explicitly request a new sorted list instance). + + The "sort()" method is guaranteed to be stable. A sort is + stable if it guarantees not to change the relative order of + elements that compare equal — this is helpful for sorting in + multiple passes (for example, sort by department, then by salary + grade). + + For sorting examples and a brief sorting tutorial, see Sorting + Techniques. + + **CPython implementation detail:** While a list is being sorted, + the effect of attempting to mutate, or even inspect, the list is + undefined. The C implementation of Python makes the list appear + empty for the duration, and raises "ValueError" if it can detect + that the list has been mutated during a sort. + +See also: + + For detailed information on thread-safety guarantees for "list" + objects, see Thread safety for list objects. + + +Tuples +====== + +Tuples are immutable sequences, typically used to store collections of +heterogeneous data (such as the 2-tuples produced by the "enumerate()" +built-in). Tuples are also used for cases where an immutable sequence +of homogeneous data is needed (such as allowing storage in a "set" or +"dict" instance). + +class tuple(iterable=(), /) + + Tuples may be constructed in a number of ways: + + * Using a pair of parentheses to denote the empty tuple: "()" + + * Using a trailing comma for a singleton tuple: "a," or "(a,)" + + * Separating items with commas: "a, b, c" or "(a, b, c)" + + * Using the "tuple()" built-in: "tuple()" or "tuple(iterable)" + + The constructor builds a tuple whose items are the same and in the + same order as *iterable*’s items. *iterable* may be either a + sequence, a container that supports iteration, or an iterator + object. If *iterable* is already a tuple, it is returned + unchanged. For example, "tuple('abc')" returns "('a', 'b', 'c')" + and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument is + given, the constructor creates a new empty tuple, "()". + + Note that it is actually the comma which makes a tuple, not the + parentheses. The parentheses are optional, except in the empty + tuple case, or when they are needed to avoid syntactic ambiguity. + For example, "f(a, b, c)" is a function call with three arguments, + while "f((a, b, c))" is a function call with a 3-tuple as the sole + argument. + + Tuples implement all of the common sequence operations. + +For heterogeneous collections of data where access by name is clearer +than access by index, "collections.namedtuple()" may be a more +appropriate choice than a simple tuple object. + + +Ranges +====== + +The "range" type represents an immutable sequence of numbers and is +commonly used for looping a specific number of times in "for" loops. + +class range(stop, /) +class range(start, stop, step=1, /) + + The arguments to the range constructor must be integers (either + built-in "int" or any object that implements the "__index__()" + special method). If the *step* argument is omitted, it defaults to + "1". If the *start* argument is omitted, it defaults to "0". If + *step* is zero, "ValueError" is raised. + + For a positive *step*, the contents of a range "r" are determined + by the formula "r[i] = start + step*i" where "i >= 0" and "r[i] < + stop". + + For a negative *step*, the contents of the range are still + determined by the formula "r[i] = start + step*i", but the + constraints are "i >= 0" and "r[i] > stop". + + A range object will be empty if "r[0]" does not meet the value + constraint. Ranges do support negative indices, but these are + interpreted as indexing from the end of the sequence determined by + the positive indices. + + Ranges containing absolute values larger than "sys.maxsize" are + permitted but some features (such as "len()") may raise + "OverflowError". + + Range examples: + + >>> list(range(10)) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> list(range(1, 11)) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> list(range(0, 30, 5)) + [0, 5, 10, 15, 20, 25] + >>> list(range(0, 10, 3)) + [0, 3, 6, 9] + >>> list(range(0, -10, -1)) + [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] + >>> list(range(0)) + [] + >>> list(range(1, 0)) + [] + + Ranges implement all of the common sequence operations except + concatenation and repetition (due to the fact that range objects + can only represent sequences that follow a strict pattern and + repetition and concatenation will usually violate that pattern). + + start + + The value of the *start* parameter (or "0" if the parameter was + not supplied) + + stop + + The value of the *stop* parameter + + step + + The value of the *step* parameter (or "1" if the parameter was + not supplied) + +The advantage of the "range" type over a regular "list" or "tuple" is +that a "range" object will always take the same (small) amount of +memory, no matter the size of the range it represents (as it only +stores the "start", "stop" and "step" values, calculating individual +items and subranges as needed). + +Range objects implement the "collections.abc.Sequence" ABC, and +provide features such as containment tests, element index lookup, +slicing and support for negative indices (see Sequence Types — list, +tuple, range): + +>>> r = range(0, 20, 2) +>>> r +range(0, 20, 2) +>>> 11 in r +False +>>> 10 in r +True +>>> r.index(10) +5 +>>> r[5] +10 +>>> r[:5] +range(0, 10, 2) +>>> r[-1] +18 + +Testing range objects for equality with "==" and "!=" compares them as +sequences. That is, two range objects are considered equal if they +represent the same sequence of values. (Note that two range objects +that compare equal might have different "start", "stop" and "step" +attributes, for example "range(0) == range(2, 1, 3)" or "range(0, 3, +2) == range(0, 4, 2)".) + +Changed in version 3.2: Implement the Sequence ABC. Support slicing +and negative indices. Test "int" objects for membership in constant +time instead of iterating through all items. + +Changed in version 3.3: Define ‘==’ and ‘!=’ to compare range objects +based on the sequence of values they define (instead of comparing +based on object identity).Added the "start", "stop" and "step" +attributes. + +See also: + + * The linspace recipe shows how to implement a lazy version of range + suitable for floating-point applications. +''', + 'typesseq-mutable': r'''Mutable Sequence Types +********************** + +The operations in the following table are defined on mutable sequence +types. The "collections.abc.MutableSequence" ABC is provided to make +it easier to correctly implement these operations on custom sequence +types. + +In the table *s* is an instance of a mutable sequence type, *t* is any +iterable object and *x* is an arbitrary object that meets any type and +value restrictions imposed by *s* (for example, "bytearray" only +accepts integers that meet the value restriction "0 <= x <= 255"). + ++--------------------------------+----------------------------------+-----------------------+ +| Operation | Result | Notes | +|================================|==================================|=======================| +| "s[i] = x" | item *i* of *s* is replaced by | | +| | *x* | | ++--------------------------------+----------------------------------+-----------------------+ +| "del s[i]" | removes item *i* of *s* | | ++--------------------------------+----------------------------------+-----------------------+ +| "s[i:j] = t" | slice of *s* from *i* to *j* is | | +| | replaced by the contents of the | | +| | iterable *t* | | ++--------------------------------+----------------------------------+-----------------------+ +| "del s[i:j]" | removes the elements of "s[i:j]" | | +| | from the list (same as "s[i:j] = | | +| | []") | | ++--------------------------------+----------------------------------+-----------------------+ +| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) | +| | replaced by those of *t* | | ++--------------------------------+----------------------------------+-----------------------+ +| "del s[i:j:k]" | removes the elements of | | +| | "s[i:j:k]" from the list | | ++--------------------------------+----------------------------------+-----------------------+ +| "s += t" | extends *s* with the contents of | | +| | *t* (for the most part the same | | +| | as "s[len(s):len(s)] = t") | | ++--------------------------------+----------------------------------+-----------------------+ +| "s *= n" | updates *s* with its contents | (2) | +| | repeated *n* times | | ++--------------------------------+----------------------------------+-----------------------+ + +Notes: + +1. If *k* is not equal to "1", *t* must have the same length as the + slice it is replacing. + +2. The value *n* is an integer, or an object implementing + "__index__()". Zero and negative values of *n* clear the sequence. + Items in the sequence are not copied; they are referenced multiple + times, as explained for "s * n" under Common Sequence Operations. + +-[ Mutable Sequence Methods ]- + +Mutable sequence types also support the following methods: + +sequence.append(value, /) + + Append *value* to the end of the sequence. This is equivalent to + writing "seq[len(seq):len(seq)] = [value]". + +sequence.clear() + + Added in version 3.3. + + Remove all items from *sequence*. This is equivalent to writing + "del sequence[:]". + +sequence.copy() + + Added in version 3.3. + + Create a shallow copy of *sequence*. This is equivalent to writing + "sequence[:]". + + Hint: + + The "copy()" method is not part of the "MutableSequence" "ABC", + but most concrete mutable sequence types provide it. + +sequence.extend(iterable, /) + + Extend *sequence* with the contents of *iterable*. For the most + part, this is the same as writing "seq[len(seq):len(seq)] = + iterable". + +sequence.insert(index, value, /) + + Insert *value* into *sequence* at the given *index*. This is + equivalent to writing "sequence[index:index] = [value]". + +sequence.pop(index=-1, /) + + Retrieve the item at *index* and also removes it from *sequence*. + By default, the last item in *sequence* is removed and returned. + +sequence.remove(value, /) + + Remove the first item from *sequence* where "sequence[i] == value". + + Raises "ValueError" if *value* is not found in *sequence*. + +sequence.reverse() + + Reverse the items of *sequence* in place. This method maintains + economy of space when reversing a large sequence. To remind users + that it operates by side-effect, it returns "None". +''', + 'unary': r'''Unary arithmetic and bitwise operations +*************************************** + +All unary arithmetic and bitwise operations have the same priority: + + u_expr: power | "-" u_expr | "+" u_expr | "~" u_expr + +The unary "-" (minus) operator yields the negation of its numeric +argument; the operation can be overridden with the "__neg__()" special +method. + +The unary "+" (plus) operator yields its numeric argument unchanged; +the operation can be overridden with the "__pos__()" special method. + +The unary "~" (invert) operator yields the bitwise inversion of its +integer argument. The bitwise inversion of "x" is defined as +"-(x+1)". It only applies to integral numbers or to custom objects +that override the "__invert__()" special method. + +In all three cases, if the argument does not have the proper type, a +"TypeError" exception is raised. +''', + 'while': r'''The "while" statement +********************* + +The "while" statement is used for repeated execution as long as an +expression is true: + + while_stmt: "while" assignment_expression ":" suite + ["else" ":" suite] + +This repeatedly tests the expression and, if it is true, executes the +first suite; if the expression is false (which may be the first time +it is tested) the suite of the "else" clause, if present, is executed +and the loop terminates. + +A "break" statement executed in the first suite terminates the loop +without executing the "else" clause’s suite. A "continue" statement +executed in the first suite skips the rest of the suite and goes back +to testing the expression. +''', + 'with': r'''The "with" statement +******************** + +The "with" statement is used to wrap the execution of a block with +methods defined by a context manager (see section With Statement +Context Managers). This allows common "try"…"except"…"finally" usage +patterns to be encapsulated for convenient reuse. + + with_stmt: "with" ( "(" with_stmt_contents ","? ")" | with_stmt_contents ) ":" suite + with_stmt_contents: with_item ("," with_item)* + with_item: expression ["as" target] + +The execution of the "with" statement with one “item” proceeds as +follows: + +1. The context expression (the expression given in the "with_item") is + evaluated to obtain a context manager. + +2. The context manager’s "__enter__()" is loaded for later use. + +3. The context manager’s "__exit__()" is loaded for later use. + +4. The context manager’s "__enter__()" method is invoked. + +5. If a target was included in the "with" statement, the return value + from "__enter__()" is assigned to it. + + Note: + + The "with" statement guarantees that if the "__enter__()" method + returns without an error, then "__exit__()" will always be + called. Thus, if an error occurs during the assignment to the + target list, it will be treated the same as an error occurring + within the suite would be. See step 7 below. + +6. The suite is executed. + +7. The context manager’s "__exit__()" method is invoked. If an + exception caused the suite to be exited, its type, value, and + traceback are passed as arguments to "__exit__()". Otherwise, three + "None" arguments are supplied. + + If the suite was exited due to an exception, and the return value + from the "__exit__()" method was false, the exception is reraised. + If the return value was true, the exception is suppressed, and + execution continues with the statement following the "with" + statement. + + If the suite was exited for any reason other than an exception, the + return value from "__exit__()" is ignored, and execution proceeds + at the normal location for the kind of exit that was taken. + +The following code: + + with EXPRESSION as TARGET: + SUITE + +is semantically equivalent to: + + manager = (EXPRESSION) + enter = manager.__enter__ + exit = manager.__exit__ + value = enter() + hit_except = False + + try: + TARGET = value + SUITE + except: + hit_except = True + if not exit(*sys.exc_info()): + raise + finally: + if not hit_except: + exit(None, None, None) + +except that implicit special method lookup is used for "__enter__()" +and "__exit__()". + +With more than one item, the context managers are processed as if +multiple "with" statements were nested: + + with A() as a, B() as b: + SUITE + +is semantically equivalent to: + + with A() as a: + with B() as b: + SUITE + +You can also write multi-item context managers in multiple lines if +the items are surrounded by parentheses. For example: + + with ( + A() as a, + B() as b, + ): + SUITE + +Changed in version 3.1: Support for multiple context expressions. + +Changed in version 3.10: Support for using grouping parentheses to +break the statement in multiple lines. + +See also: + + **PEP 343** - The “with” statement + The specification, background, and examples for the Python "with" + statement. +''', + 'yield': r'''The "yield" statement +********************* + + yield_stmt: yield_expression + +A "yield" statement is semantically equivalent to a yield expression. +The "yield" statement can be used to omit the parentheses that would +otherwise be required in the equivalent yield expression statement. +For example, the yield statements + + yield + yield from + +are equivalent to the yield expression statements + + (yield ) + (yield from ) + +Yield expressions and statements are only used when defining a +*generator* function, and are only used in the body of the generator +function. Using "yield" in a function definition is sufficient to +cause that definition to create a generator function instead of a +normal function. + +For full details of "yield" semantics, refer to the Yield expressions +section. +''', +} diff --git a/Python313_13_x64_Template/Lib/queue.py b/Python314_4_x64_Template/Lib/queue.py similarity index 100% rename from Python313_13_x64_Template/Lib/queue.py rename to Python314_4_x64_Template/Lib/queue.py diff --git a/Python314_4_x64_Template/Lib/quopri.py b/Python314_4_x64_Template/Lib/quopri.py new file mode 100644 index 00000000..129fd2f5 --- /dev/null +++ b/Python314_4_x64_Template/Lib/quopri.py @@ -0,0 +1,235 @@ +"""Conversions to/from quoted-printable transport encoding as per RFC 1521.""" + +# (Dec 1991 version). + +__all__ = ["encode", "decode", "encodestring", "decodestring"] + +ESCAPE = b'=' +MAXLINESIZE = 76 +HEX = b'0123456789ABCDEF' +EMPTYSTRING = b'' + +try: + from binascii import a2b_qp, b2a_qp +except ImportError: + a2b_qp = None + b2a_qp = None + + +def needsquoting(c, quotetabs, header): + """Decide whether a particular byte ordinal needs to be quoted. + + The 'quotetabs' flag indicates whether embedded tabs and spaces should be + quoted. Note that line-ending tabs and spaces are always encoded, as per + RFC 1521. + """ + assert isinstance(c, bytes) + if c in b' \t': + return quotetabs + # if header, we have to escape _ because _ is used to escape space + if c == b'_': + return header + return c == ESCAPE or not (b' ' <= c <= b'~') + +def quote(c): + """Quote a single character.""" + assert isinstance(c, bytes) and len(c)==1 + c = ord(c) + return ESCAPE + bytes((HEX[c//16], HEX[c%16])) + + + +def encode(input, output, quotetabs, header=False): + """Read 'input', apply quoted-printable encoding, and write to 'output'. + + 'input' and 'output' are binary file objects. The 'quotetabs' flag + indicates whether embedded tabs and spaces should be quoted. Note that + line-ending tabs and spaces are always encoded, as per RFC 1521. + The 'header' flag indicates whether we are encoding spaces as _ as per RFC + 1522.""" + + if b2a_qp is not None: + data = input.read() + odata = b2a_qp(data, quotetabs=quotetabs, header=header) + output.write(odata) + return + + def write(s, output=output, lineEnd=b'\n'): + # RFC 1521 requires that the line ending in a space or tab must have + # that trailing character encoded. + if s and s[-1:] in b' \t': + output.write(s[:-1] + quote(s[-1:]) + lineEnd) + elif s == b'.': + output.write(quote(s) + lineEnd) + else: + output.write(s + lineEnd) + + prevline = None + while line := input.readline(): + outline = [] + # Strip off any readline induced trailing newline + stripped = b'' + if line[-1:] == b'\n': + line = line[:-1] + stripped = b'\n' + # Calculate the un-length-limited encoded line + for c in line: + c = bytes((c,)) + if needsquoting(c, quotetabs, header): + c = quote(c) + if header and c == b' ': + outline.append(b'_') + else: + outline.append(c) + # First, write out the previous line + if prevline is not None: + write(prevline) + # Now see if we need any soft line breaks because of RFC-imposed + # length limitations. Then do the thisline->prevline dance. + thisline = EMPTYSTRING.join(outline) + while len(thisline) > MAXLINESIZE: + # Don't forget to include the soft line break `=' sign in the + # length calculation! + write(thisline[:MAXLINESIZE-1], lineEnd=b'=\n') + thisline = thisline[MAXLINESIZE-1:] + # Write out the current line + prevline = thisline + # Write out the last line, without a trailing newline + if prevline is not None: + write(prevline, lineEnd=stripped) + +def encodestring(s, quotetabs=False, header=False): + if b2a_qp is not None: + return b2a_qp(s, quotetabs=quotetabs, header=header) + from io import BytesIO + infp = BytesIO(s) + outfp = BytesIO() + encode(infp, outfp, quotetabs, header) + return outfp.getvalue() + + + +def decode(input, output, header=False): + """Read 'input', apply quoted-printable decoding, and write to 'output'. + 'input' and 'output' are binary file objects. + If 'header' is true, decode underscore as space (per RFC 1522).""" + + if a2b_qp is not None: + data = input.read() + odata = a2b_qp(data, header=header) + output.write(odata) + return + + new = b'' + while line := input.readline(): + i, n = 0, len(line) + if n > 0 and line[n-1:n] == b'\n': + partial = 0; n = n-1 + # Strip trailing whitespace + while n > 0 and line[n-1:n] in b" \t\r": + n = n-1 + else: + partial = 1 + while i < n: + c = line[i:i+1] + if c == b'_' and header: + new = new + b' '; i = i+1 + elif c != ESCAPE: + new = new + c; i = i+1 + elif i+1 == n and not partial: + partial = 1; break + elif i+1 < n and line[i+1:i+2] == ESCAPE: + new = new + ESCAPE; i = i+2 + elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]): + new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3 + else: # Bad escape sequence -- leave it in + new = new + c; i = i+1 + if not partial: + output.write(new + b'\n') + new = b'' + if new: + output.write(new) + +def decodestring(s, header=False): + if a2b_qp is not None: + return a2b_qp(s, header=header) + from io import BytesIO + infp = BytesIO(s) + outfp = BytesIO() + decode(infp, outfp, header=header) + return outfp.getvalue() + + + +# Other helper functions +def ishex(c): + """Return true if the byte ordinal 'c' is a hexadecimal digit in ASCII.""" + assert isinstance(c, bytes) + return b'0' <= c <= b'9' or b'a' <= c <= b'f' or b'A' <= c <= b'F' + +def unhex(s): + """Get the integer value of a hexadecimal number.""" + bits = 0 + for c in s: + c = bytes((c,)) + if b'0' <= c <= b'9': + i = ord('0') + elif b'a' <= c <= b'f': + i = ord('a')-10 + elif b'A' <= c <= b'F': + i = ord(b'A')-10 + else: + assert False, "non-hex digit "+repr(c) + bits = bits*16 + (ord(c) - i) + return bits + + + +def main(): + import sys + import getopt + try: + opts, args = getopt.getopt(sys.argv[1:], 'td') + except getopt.error as msg: + sys.stdout = sys.stderr + print(msg) + print("usage: quopri [-t | -d] [file] ...") + print("-t: quote tabs") + print("-d: decode; default encode") + sys.exit(2) + deco = False + tabs = False + for o, a in opts: + if o == '-t': tabs = True + if o == '-d': deco = True + if tabs and deco: + sys.stdout = sys.stderr + print("-t and -d are mutually exclusive") + sys.exit(2) + if not args: args = ['-'] + sts = 0 + for file in args: + if file == '-': + fp = sys.stdin.buffer + else: + try: + fp = open(file, "rb") + except OSError as msg: + sys.stderr.write("%s: can't open (%s)\n" % (file, msg)) + sts = 1 + continue + try: + if deco: + decode(fp, sys.stdout.buffer) + else: + encode(fp, sys.stdout.buffer, tabs) + finally: + if file != '-': + fp.close() + if sts: + sys.exit(sts) + + + +if __name__ == '__main__': + main() diff --git a/Python314_4_x64_Template/Lib/random.py b/Python314_4_x64_Template/Lib/random.py new file mode 100644 index 00000000..86d562f0 --- /dev/null +++ b/Python314_4_x64_Template/Lib/random.py @@ -0,0 +1,1078 @@ +"""Random variable generators. + + bytes + ----- + uniform bytes (values between 0 and 255) + + integers + -------- + uniform within range + + sequences + --------- + pick random element + pick random sample + pick weighted random sample + generate random permutation + + distributions on the real line: + ------------------------------ + uniform + triangular + normal (Gaussian) + lognormal + negative exponential + gamma + beta + pareto + Weibull + + distributions on the circle (angles 0 to 2pi) + --------------------------------------------- + circular uniform + von Mises + + discrete distributions + ---------------------- + binomial + + +General notes on the underlying Mersenne Twister core generator: + +* The period is 2**19937-1. +* It is one of the most extensively tested generators in existence. +* The random() method is implemented in C, executes in a single Python step, + and is, therefore, threadsafe. + +""" + +# Translated by Guido van Rossum from C source provided by +# Adrian Baddeley. Adapted by Raymond Hettinger for use with +# the Mersenne Twister and os.urandom() core generators. + +from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil +from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin +from math import tau as TWOPI, floor as _floor, isfinite as _isfinite +from math import lgamma as _lgamma, fabs as _fabs, log2 as _log2 +from os import urandom as _urandom +from _collections_abc import Sequence as _Sequence +from operator import index as _index +from itertools import accumulate as _accumulate, repeat as _repeat +from bisect import bisect as _bisect +import os as _os +import _random + +__all__ = [ + "Random", + "SystemRandom", + "betavariate", + "binomialvariate", + "choice", + "choices", + "expovariate", + "gammavariate", + "gauss", + "getrandbits", + "getstate", + "lognormvariate", + "normalvariate", + "paretovariate", + "randbytes", + "randint", + "random", + "randrange", + "sample", + "seed", + "setstate", + "shuffle", + "triangular", + "uniform", + "vonmisesvariate", + "weibullvariate", +] + +NV_MAGICCONST = 4 * _exp(-0.5) / _sqrt(2.0) +LOG4 = _log(4.0) +SG_MAGICCONST = 1.0 + _log(4.5) +BPF = 53 # Number of bits in a float +RECIP_BPF = 2 ** -BPF +_ONE = 1 +_sha512 = None + + +class Random(_random.Random): + """Random number generator base class used by bound module functions. + + Used to instantiate instances of Random to get generators that don't + share state. + + Class Random can also be subclassed if you want to use a different basic + generator of your own devising: in that case, override the following + methods: random(), seed(), getstate(), and setstate(). + Optionally, implement a getrandbits() method so that randrange() + can cover arbitrarily large ranges. + + """ + + VERSION = 3 # used by getstate/setstate + + def __init__(self, x=None): + """Initialize an instance. + + Optional argument x controls seeding, as for Random.seed(). + """ + + self.seed(x) + self.gauss_next = None + + def seed(self, a=None, version=2): + """Initialize internal state from a seed. + + The only supported seed types are None, int, float, + str, bytes, and bytearray. + + None or no argument seeds from current time or from an operating + system specific randomness source if available. + + If *a* is an int, all bits are used. + + For version 2 (the default), all of the bits are used if *a* is a str, + bytes, or bytearray. For version 1 (provided for reproducing random + sequences from older versions of Python), the algorithm for str and + bytes generates a narrower range of seeds. + + """ + + if version == 1 and isinstance(a, (str, bytes)): + a = a.decode('latin-1') if isinstance(a, bytes) else a + x = ord(a[0]) << 7 if a else 0 + for c in map(ord, a): + x = ((1000003 * x) ^ c) & 0xFFFFFFFFFFFFFFFF + x ^= len(a) + a = -2 if x == -1 else x + + elif version == 2 and isinstance(a, (str, bytes, bytearray)): + global _sha512 + if _sha512 is None: + try: + # hashlib is pretty heavy to load, try lean internal + # module first + from _sha2 import sha512 as _sha512 + except ImportError: + # fallback to official implementation + from hashlib import sha512 as _sha512 + + if isinstance(a, str): + a = a.encode() + a = int.from_bytes(a + _sha512(a).digest()) + + elif not isinstance(a, (type(None), int, float, str, bytes, bytearray)): + raise TypeError('The only supported seed types are:\n' + 'None, int, float, str, bytes, and bytearray.') + + super().seed(a) + self.gauss_next = None + + def getstate(self): + """Return internal state; can be passed to setstate() later.""" + return self.VERSION, super().getstate(), self.gauss_next + + def setstate(self, state): + """Restore internal state from object returned by getstate().""" + version = state[0] + if version == 3: + version, internalstate, self.gauss_next = state + super().setstate(internalstate) + elif version == 2: + version, internalstate, self.gauss_next = state + # In version 2, the state was saved as signed ints, which causes + # inconsistencies between 32/64-bit systems. The state is + # really unsigned 32-bit ints, so we convert negative ints from + # version 2 to positive longs for version 3. + try: + internalstate = tuple(x % (2 ** 32) for x in internalstate) + except ValueError as e: + raise TypeError from e + super().setstate(internalstate) + else: + raise ValueError("state with version %s passed to " + "Random.setstate() of version %s" % + (version, self.VERSION)) + + + ## ------------------------------------------------------- + ## ---- Methods below this point do not need to be overridden or extended + ## ---- when subclassing for the purpose of using a different core generator. + + + ## -------------------- pickle support ------------------- + + # Issue 17489: Since __reduce__ was defined to fix #759889 this is no + # longer called; we leave it here because it has been here since random was + # rewritten back in 2001 and why risk breaking something. + def __getstate__(self): # for pickle + return self.getstate() + + def __setstate__(self, state): # for pickle + self.setstate(state) + + def __reduce__(self): + return self.__class__, (), self.getstate() + + + ## ---- internal support method for evenly distributed integers ---- + + def __init_subclass__(cls, /, **kwargs): + """Control how subclasses generate random integers. + + The algorithm a subclass can use depends on the random() and/or + getrandbits() implementation available to it and determines + whether it can generate random integers from arbitrarily large + ranges. + """ + + for c in cls.__mro__: + if '_randbelow' in c.__dict__: + # just inherit it + break + if 'getrandbits' in c.__dict__: + cls._randbelow = cls._randbelow_with_getrandbits + break + if 'random' in c.__dict__: + cls._randbelow = cls._randbelow_without_getrandbits + break + + def _randbelow_with_getrandbits(self, n): + "Return a random int in the range [0,n). Defined for n > 0." + + k = n.bit_length() + r = self.getrandbits(k) # 0 <= r < 2**k + while r >= n: + r = self.getrandbits(k) + return r + + def _randbelow_without_getrandbits(self, n, maxsize=1< 0. + + The implementation does not use getrandbits, but only random. + """ + + random = self.random + if n >= maxsize: + from warnings import warn + warn("Underlying random() generator does not supply \n" + "enough bits to choose from a population range this large.\n" + "To remove the range limitation, add a getrandbits() method.") + return _floor(random() * n) + rem = maxsize % n + limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0 + r = random() + while r >= limit: + r = random() + return _floor(r * maxsize) % n + + _randbelow = _randbelow_with_getrandbits + + + ## -------------------------------------------------------- + ## ---- Methods below this point generate custom distributions + ## ---- based on the methods defined above. They do not + ## ---- directly touch the underlying generator and only + ## ---- access randomness through the methods: random(), + ## ---- getrandbits(), or _randbelow(). + + + ## -------------------- bytes methods --------------------- + + def randbytes(self, n): + """Generate n random bytes.""" + return self.getrandbits(n * 8).to_bytes(n, 'little') + + + ## -------------------- integer methods ------------------- + + def randrange(self, start, stop=None, step=_ONE): + """Choose a random item from range(stop) or range(start, stop[, step]). + + Roughly equivalent to ``choice(range(start, stop, step))`` but + supports arbitrarily large ranges and is optimized for common cases. + + """ + + # This code is a bit messy to make it fast for the + # common case while still doing adequate error checking. + istart = _index(start) + if stop is None: + # We don't check for "step != 1" because it hasn't been + # type checked and converted to an integer yet. + if step is not _ONE: + raise TypeError("Missing a non-None stop argument") + if istart > 0: + return self._randbelow(istart) + raise ValueError("empty range for randrange()") + + # Stop argument supplied. + istop = _index(stop) + width = istop - istart + istep = _index(step) + # Fast path. + if istep == 1: + if width > 0: + return istart + self._randbelow(width) + raise ValueError(f"empty range in randrange({start}, {stop})") + + # Non-unit step argument supplied. + if istep > 0: + n = (width + istep - 1) // istep + elif istep < 0: + n = (width + istep + 1) // istep + else: + raise ValueError("zero step for randrange()") + if n <= 0: + raise ValueError(f"empty range in randrange({start}, {stop}, {step})") + return istart + istep * self._randbelow(n) + + def randint(self, a, b): + """Return random integer in range [a, b], including both end points. + """ + a = _index(a) + b = _index(b) + if b < a: + raise ValueError(f"empty range in randint({a}, {b})") + return a + self._randbelow(b - a + 1) + + + ## -------------------- sequence methods ------------------- + + def choice(self, seq): + """Choose a random element from a non-empty sequence.""" + + # As an accommodation for NumPy, we don't use "if not seq" + # because bool(numpy.array()) raises a ValueError. + if not len(seq): + raise IndexError('Cannot choose from an empty sequence') + return seq[self._randbelow(len(seq))] + + def shuffle(self, x): + """Shuffle list x in place, and return None.""" + + randbelow = self._randbelow + for i in reversed(range(1, len(x))): + # pick an element in x[:i+1] with which to exchange x[i] + j = randbelow(i + 1) + x[i], x[j] = x[j], x[i] + + def sample(self, population, k, *, counts=None): + """Chooses k unique random elements from a population sequence. + + Returns a new list containing elements from the population while + leaving the original population unchanged. The resulting list is + in selection order so that all sub-slices will also be valid random + samples. This allows raffle winners (the sample) to be partitioned + into grand prize and second place winners (the subslices). + + Members of the population need not be hashable or unique. If the + population contains repeats, then each occurrence is a possible + selection in the sample. + + Repeated elements can be specified one at a time or with the optional + counts parameter. For example: + + sample(['red', 'blue'], counts=[4, 2], k=5) + + is equivalent to: + + sample(['red', 'red', 'red', 'red', 'blue', 'blue'], k=5) + + To choose a sample from a range of integers, use range() for the + population argument. This is especially fast and space efficient + for sampling from a large population: + + sample(range(10000000), 60) + + """ + + # Sampling without replacement entails tracking either potential + # selections (the pool) in a list or previous selections in a set. + + # When the number of selections is small compared to the + # population, then tracking selections is efficient, requiring + # only a small set and an occasional reselection. For + # a larger number of selections, the pool tracking method is + # preferred since the list takes less space than the + # set and it doesn't suffer from frequent reselections. + + # The number of calls to _randbelow() is kept at or near k, the + # theoretical minimum. This is important because running time + # is dominated by _randbelow() and because it extracts the + # least entropy from the underlying random number generators. + + # Memory requirements are kept to the smaller of a k-length + # set or an n-length list. + + # There are other sampling algorithms that do not require + # auxiliary memory, but they were rejected because they made + # too many calls to _randbelow(), making them slower and + # causing them to eat more entropy than necessary. + + if not isinstance(population, _Sequence): + raise TypeError("Population must be a sequence. " + "For dicts or sets, use sorted(d).") + n = len(population) + if counts is not None: + cum_counts = list(_accumulate(counts)) + if len(cum_counts) != n: + raise ValueError('The number of counts does not match the population') + total = cum_counts.pop() if cum_counts else 0 + if not isinstance(total, int): + raise TypeError('Counts must be integers') + if total < 0: + raise ValueError('Counts must be non-negative') + selections = self.sample(range(total), k=k) + bisect = _bisect + return [population[bisect(cum_counts, s)] for s in selections] + randbelow = self._randbelow + if not 0 <= k <= n: + raise ValueError("Sample larger than population or is negative") + result = [None] * k + setsize = 21 # size of a small set minus size of an empty list + if k > 5: + setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets + if n <= setsize: + # An n-length list is smaller than a k-length set. + # Invariant: non-selected at pool[0 : n-i] + pool = list(population) + for i in range(k): + j = randbelow(n - i) + result[i] = pool[j] + pool[j] = pool[n - i - 1] # move non-selected item into vacancy + else: + selected = set() + selected_add = selected.add + for i in range(k): + j = randbelow(n) + while j in selected: + j = randbelow(n) + selected_add(j) + result[i] = population[j] + return result + + def choices(self, population, weights=None, *, cum_weights=None, k=1): + """Return a k sized list of population elements chosen with replacement. + + If the relative weights or cumulative weights are not specified, + the selections are made with equal probability. + + """ + random = self.random + n = len(population) + if cum_weights is None: + if weights is None: + floor = _floor + n += 0.0 # convert to float for a small speed improvement + return [population[floor(random() * n)] for i in _repeat(None, k)] + try: + cum_weights = list(_accumulate(weights)) + except TypeError: + if not isinstance(weights, int): + raise + k = weights + raise TypeError( + f'The number of choices must be a keyword argument: {k=}' + ) from None + elif weights is not None: + raise TypeError('Cannot specify both weights and cumulative weights') + if len(cum_weights) != n: + raise ValueError('The number of weights does not match the population') + total = cum_weights[-1] + 0.0 # convert to float + if total <= 0.0: + raise ValueError('Total of weights must be greater than zero') + if not _isfinite(total): + raise ValueError('Total of weights must be finite') + bisect = _bisect + hi = n - 1 + return [population[bisect(cum_weights, random() * total, 0, hi)] + for i in _repeat(None, k)] + + + ## -------------------- real-valued distributions ------------------- + + def uniform(self, a, b): + """Get a random number in the range [a, b) or [a, b] depending on rounding. + + The mean (expected value) and variance of the random variable are: + + E[X] = (a + b) / 2 + Var[X] = (b - a) ** 2 / 12 + + """ + return a + (b - a) * self.random() + + def triangular(self, low=0.0, high=1.0, mode=None): + """Triangular distribution. + + Continuous distribution bounded by given lower and upper limits, + and having a given mode value in-between. + + http://en.wikipedia.org/wiki/Triangular_distribution + + The mean (expected value) and variance of the random variable are: + + E[X] = (low + high + mode) / 3 + Var[X] = (low**2 + high**2 + mode**2 - low*high - low*mode - high*mode) / 18 + + """ + u = self.random() + try: + c = 0.5 if mode is None else (mode - low) / (high - low) + except ZeroDivisionError: + return low + if u > c: + u = 1.0 - u + c = 1.0 - c + low, high = high, low + return low + (high - low) * _sqrt(u * c) + + def normalvariate(self, mu=0.0, sigma=1.0): + """Normal distribution. + + mu is the mean, and sigma is the standard deviation. + + """ + # Uses Kinderman and Monahan method. Reference: Kinderman, + # A.J. and Monahan, J.F., "Computer generation of random + # variables using the ratio of uniform deviates", ACM Trans + # Math Software, 3, (1977), pp257-260. + + random = self.random + while True: + u1 = random() + u2 = 1.0 - random() + z = NV_MAGICCONST * (u1 - 0.5) / u2 + zz = z * z / 4.0 + if zz <= -_log(u2): + break + return mu + z * sigma + + def gauss(self, mu=0.0, sigma=1.0): + """Gaussian distribution. + + mu is the mean, and sigma is the standard deviation. This is + slightly faster than the normalvariate() function. + + Not thread-safe without a lock around calls. + + """ + # When x and y are two variables from [0, 1), uniformly + # distributed, then + # + # cos(2*pi*x)*sqrt(-2*log(1-y)) + # sin(2*pi*x)*sqrt(-2*log(1-y)) + # + # are two *independent* variables with normal distribution + # (mu = 0, sigma = 1). + # (Lambert Meertens) + # (corrected version; bug discovered by Mike Miller, fixed by LM) + + # Multithreading note: When two threads call this function + # simultaneously, it is possible that they will receive the + # same return value. The window is very small though. To + # avoid this, you have to use a lock around all calls. (I + # didn't want to slow this down in the serial case by using a + # lock here.) + + random = self.random + z = self.gauss_next + self.gauss_next = None + if z is None: + x2pi = random() * TWOPI + g2rad = _sqrt(-2.0 * _log(1.0 - random())) + z = _cos(x2pi) * g2rad + self.gauss_next = _sin(x2pi) * g2rad + + return mu + z * sigma + + def lognormvariate(self, mu, sigma): + """Log normal distribution. + + If you take the natural logarithm of this distribution, you'll get a + normal distribution with mean mu and standard deviation sigma. + mu can have any value, and sigma must be greater than zero. + + """ + return _exp(self.normalvariate(mu, sigma)) + + def expovariate(self, lambd=1.0): + """Exponential distribution. + + lambd is 1.0 divided by the desired mean. It should be + nonzero. (The parameter would be called "lambda", but that is + a reserved word in Python.) Returned values range from 0 to + positive infinity if lambd is positive, and from negative + infinity to 0 if lambd is negative. + + The mean (expected value) and variance of the random variable are: + + E[X] = 1 / lambd + Var[X] = 1 / lambd ** 2 + + """ + # we use 1-random() instead of random() to preclude the + # possibility of taking the log of zero. + + return -_log(1.0 - self.random()) / lambd + + def vonmisesvariate(self, mu, kappa): + """Circular data distribution. + + mu is the mean angle, expressed in radians between 0 and 2*pi, and + kappa is the concentration parameter, which must be greater than or + equal to zero. If kappa is equal to zero, this distribution reduces + to a uniform random angle over the range 0 to 2*pi. + + """ + # Based upon an algorithm published in: Fisher, N.I., + # "Statistical Analysis of Circular Data", Cambridge + # University Press, 1993. + + # Thanks to Magnus Kessler for a correction to the + # implementation of step 4. + + random = self.random + if kappa <= 1e-6: + return TWOPI * random() + + s = 0.5 / kappa + r = s + _sqrt(1.0 + s * s) + + while True: + u1 = random() + z = _cos(_pi * u1) + + d = z / (r + z) + u2 = random() + if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): + break + + q = 1.0 / r + f = (q + z) / (1.0 + q * z) + u3 = random() + if u3 > 0.5: + theta = (mu + _acos(f)) % TWOPI + else: + theta = (mu - _acos(f)) % TWOPI + + return theta + + def gammavariate(self, alpha, beta): + """Gamma distribution. Not the gamma function! + + Conditions on the parameters are alpha > 0 and beta > 0. + + The probability distribution function is: + + x ** (alpha - 1) * math.exp(-x / beta) + pdf(x) = -------------------------------------- + math.gamma(alpha) * beta ** alpha + + The mean (expected value) and variance of the random variable are: + + E[X] = alpha * beta + Var[X] = alpha * beta ** 2 + + """ + + # Warning: a few older sources define the gamma distribution in terms + # of alpha > -1.0 + if alpha <= 0.0 or beta <= 0.0: + raise ValueError('gammavariate: alpha and beta must be > 0.0') + + random = self.random + if alpha > 1.0: + + # Uses R.C.H. Cheng, "The generation of Gamma + # variables with non-integral shape parameters", + # Applied Statistics, (1977), 26, No. 1, p71-74 + + ainv = _sqrt(2.0 * alpha - 1.0) + bbb = alpha - LOG4 + ccc = alpha + ainv + + while True: + u1 = random() + if not 1e-7 < u1 < 0.9999999: + continue + u2 = 1.0 - random() + v = _log(u1 / (1.0 - u1)) / ainv + x = alpha * _exp(v) + z = u1 * u1 * u2 + r = bbb + ccc * v - x + if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z): + return x * beta + + elif alpha == 1.0: + # expovariate(1/beta) + return -_log(1.0 - random()) * beta + + else: + # alpha is between 0 and 1 (exclusive) + # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle + while True: + u = random() + b = (_e + alpha) / _e + p = b * u + if p <= 1.0: + x = p ** (1.0 / alpha) + else: + x = -_log((b - p) / alpha) + u1 = random() + if p > 1.0: + if u1 <= x ** (alpha - 1.0): + break + elif u1 <= _exp(-x): + break + return x * beta + + def betavariate(self, alpha, beta): + """Beta distribution. + + Conditions on the parameters are alpha > 0 and beta > 0. + Returned values range between 0 and 1. + + The mean (expected value) and variance of the random variable are: + + E[X] = alpha / (alpha + beta) + Var[X] = alpha * beta / ((alpha + beta)**2 * (alpha + beta + 1)) + + """ + ## See + ## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html + ## for Ivan Frohne's insightful analysis of why the original implementation: + ## + ## def betavariate(self, alpha, beta): + ## # Discrete Event Simulation in C, pp 87-88. + ## + ## y = self.expovariate(alpha) + ## z = self.expovariate(1.0/beta) + ## return z/(y+z) + ## + ## was dead wrong, and how it probably got that way. + + # This version due to Janne Sinkkonen, and matches all the std + # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution"). + y = self.gammavariate(alpha, 1.0) + if y: + return y / (y + self.gammavariate(beta, 1.0)) + return 0.0 + + def paretovariate(self, alpha): + """Pareto distribution. alpha is the shape parameter.""" + # Jain, pg. 495 + + u = 1.0 - self.random() + return u ** (-1.0 / alpha) + + def weibullvariate(self, alpha, beta): + """Weibull distribution. + + alpha is the scale parameter and beta is the shape parameter. + + """ + # Jain, pg. 499; bug fix courtesy Bill Arms + + u = 1.0 - self.random() + return alpha * (-_log(u)) ** (1.0 / beta) + + + ## -------------------- discrete distributions --------------------- + + def binomialvariate(self, n=1, p=0.5): + """Binomial random variable. + + Gives the number of successes for *n* independent trials + with the probability of success in each trial being *p*: + + sum(random() < p for i in range(n)) + + Returns an integer in the range: + + 0 <= X <= n + + The integer is chosen with the probability: + + P(X == k) = math.comb(n, k) * p ** k * (1 - p) ** (n - k) + + The mean (expected value) and variance of the random variable are: + + E[X] = n * p + Var[X] = n * p * (1 - p) + + """ + # Error check inputs and handle edge cases + if n < 0: + raise ValueError("n must be non-negative") + if p <= 0.0 or p >= 1.0: + if p == 0.0: + return 0 + if p == 1.0: + return n + raise ValueError("p must be in the range 0.0 <= p <= 1.0") + + random = self.random + + # Fast path for a common case + if n == 1: + return _index(random() < p) + + # Exploit symmetry to establish: p <= 0.5 + if p > 0.5: + return n - self.binomialvariate(n, 1.0 - p) + + if n * p < 10.0: + # BG: Geometric method by Devroye with running time of O(np). + # https://dl.acm.org/doi/pdf/10.1145/42372.42381 + x = y = 0 + c = _log2(1.0 - p) + if not c: + return x + while True: + y += _floor(_log2(random()) / c) + 1 + if y > n: + return x + x += 1 + + # BTRS: Transformed rejection with squeeze method by Wolfgang Hörmann + # https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.47.8407&rep=rep1&type=pdf + assert n*p >= 10.0 and p <= 0.5 + setup_complete = False + + spq = _sqrt(n * p * (1.0 - p)) # Standard deviation of the distribution + b = 1.15 + 2.53 * spq + a = -0.0873 + 0.0248 * b + 0.01 * p + c = n * p + 0.5 + vr = 0.92 - 4.2 / b + + while True: + + u = random() + u -= 0.5 + us = 0.5 - _fabs(u) + k = _floor((2.0 * a / us + b) * u + c) + if k < 0 or k > n: + continue + + # The early-out "squeeze" test substantially reduces + # the number of acceptance condition evaluations. + v = random() + if us >= 0.07 and v <= vr: + return k + + # Acceptance-rejection test. + # Note, the original paper erroneously omits the call to log(v) + # when comparing to the log of the rescaled binomial distribution. + if not setup_complete: + alpha = (2.83 + 5.1 / b) * spq + lpq = _log(p / (1.0 - p)) + m = _floor((n + 1) * p) # Mode of the distribution + h = _lgamma(m + 1) + _lgamma(n - m + 1) + setup_complete = True # Only needs to be done once + v *= alpha / (a / (us * us) + b) + if _log(v) <= h - _lgamma(k + 1) - _lgamma(n - k + 1) + (k - m) * lpq: + return k + + +## ------------------------------------------------------------------ +## --------------- Operating System Random Source ------------------ + + +class SystemRandom(Random): + """Alternate random number generator using sources provided + by the operating system (such as /dev/urandom on Unix or + CryptGenRandom on Windows). + + Not available on all systems (see os.urandom() for details). + + """ + + def random(self): + """Get the next random number in the range 0.0 <= X < 1.0.""" + return (int.from_bytes(_urandom(7)) >> 3) * RECIP_BPF + + def getrandbits(self, k): + """getrandbits(k) -> x. Generates an int with k random bits.""" + if k < 0: + raise ValueError('number of bits must be non-negative') + numbytes = (k + 7) // 8 # bits / 8 and rounded up + x = int.from_bytes(_urandom(numbytes)) + return x >> (numbytes * 8 - k) # trim excess bits + + def randbytes(self, n): + """Generate n random bytes.""" + # os.urandom(n) fails with ValueError for n < 0 + # and returns an empty bytes string for n == 0. + return _urandom(n) + + def seed(self, *args, **kwds): + "Stub method. Not used for a system random number generator." + return None + + def _notimplemented(self, *args, **kwds): + "Method should not be called for a system random number generator." + raise NotImplementedError('System entropy source does not have state.') + getstate = setstate = _notimplemented + + +# ---------------------------------------------------------------------- +# Create one instance, seeded from current time, and export its methods +# as module-level functions. The functions share state across all uses +# (both in the user's code and in the Python libraries), but that's fine +# for most programs and is easier for the casual user than making them +# instantiate their own Random() instance. + +_inst = Random() +seed = _inst.seed +random = _inst.random +uniform = _inst.uniform +triangular = _inst.triangular +randint = _inst.randint +choice = _inst.choice +randrange = _inst.randrange +sample = _inst.sample +shuffle = _inst.shuffle +choices = _inst.choices +normalvariate = _inst.normalvariate +lognormvariate = _inst.lognormvariate +expovariate = _inst.expovariate +vonmisesvariate = _inst.vonmisesvariate +gammavariate = _inst.gammavariate +gauss = _inst.gauss +betavariate = _inst.betavariate +binomialvariate = _inst.binomialvariate +paretovariate = _inst.paretovariate +weibullvariate = _inst.weibullvariate +getstate = _inst.getstate +setstate = _inst.setstate +getrandbits = _inst.getrandbits +randbytes = _inst.randbytes + + +## ------------------------------------------------------ +## ----------------- test program ----------------------- + +def _test_generator(n, func, args): + from statistics import stdev, fmean as mean + from time import perf_counter + + t0 = perf_counter() + data = [func(*args) for i in _repeat(None, n)] + t1 = perf_counter() + + xbar = mean(data) + sigma = stdev(data, xbar) + low = min(data) + high = max(data) + + print(f'{t1 - t0:.3f} sec, {n} times {func.__name__}{args!r}') + print('avg %g, stddev %g, min %g, max %g\n' % (xbar, sigma, low, high)) + + +def _test(N=10_000): + _test_generator(N, random, ()) + _test_generator(N, normalvariate, (0.0, 1.0)) + _test_generator(N, lognormvariate, (0.0, 1.0)) + _test_generator(N, vonmisesvariate, (0.0, 1.0)) + _test_generator(N, binomialvariate, (15, 0.60)) + _test_generator(N, binomialvariate, (100, 0.75)) + _test_generator(N, gammavariate, (0.01, 1.0)) + _test_generator(N, gammavariate, (0.1, 1.0)) + _test_generator(N, gammavariate, (0.1, 2.0)) + _test_generator(N, gammavariate, (0.5, 1.0)) + _test_generator(N, gammavariate, (0.9, 1.0)) + _test_generator(N, gammavariate, (1.0, 1.0)) + _test_generator(N, gammavariate, (2.0, 1.0)) + _test_generator(N, gammavariate, (20.0, 1.0)) + _test_generator(N, gammavariate, (200.0, 1.0)) + _test_generator(N, gauss, (0.0, 1.0)) + _test_generator(N, betavariate, (3.0, 3.0)) + _test_generator(N, triangular, (0.0, 1.0, 1.0 / 3.0)) + + +## ------------------------------------------------------ +## ------------------ fork support --------------------- + +if hasattr(_os, "fork"): + _os.register_at_fork(after_in_child=_inst.seed) + + +# ------------------------------------------------------ +# -------------- command-line interface ---------------- + + +def _parse_args(arg_list: list[str] | None): + import argparse + parser = argparse.ArgumentParser( + formatter_class=argparse.RawTextHelpFormatter, color=True) + group = parser.add_mutually_exclusive_group() + group.add_argument( + "-c", "--choice", nargs="+", + help="print a random choice") + group.add_argument( + "-i", "--integer", type=int, metavar="N", + help="print a random integer between 1 and N inclusive") + group.add_argument( + "-f", "--float", type=float, metavar="N", + help="print a random floating-point number between 0 and N inclusive") + group.add_argument( + "--test", type=int, const=10_000, nargs="?", + help=argparse.SUPPRESS) + parser.add_argument("input", nargs="*", + help="""\ +if no options given, output depends on the input + string or multiple: same as --choice + integer: same as --integer + float: same as --float""") + args = parser.parse_args(arg_list) + return args, parser.format_help() + + +def main(arg_list: list[str] | None = None) -> int | str: + args, help_text = _parse_args(arg_list) + + # Explicit arguments + if args.choice: + return choice(args.choice) + + if args.integer is not None: + return randint(1, args.integer) + + if args.float is not None: + return uniform(0, args.float) + + if args.test: + _test(args.test) + return "" + + # No explicit argument, select based on input + if len(args.input) == 1: + val = args.input[0] + try: + # Is it an integer? + val = int(val) + return randint(1, val) + except ValueError: + try: + # Is it a float? + val = float(val) + return uniform(0, val) + except ValueError: + # Split in case of space-separated string: "a b c" + return choice(val.split()) + + if len(args.input) >= 2: + return choice(args.input) + + return help_text + + +if __name__ == '__main__': + print(main()) diff --git a/Python314_4_x64_Template/Lib/re/__init__.py b/Python314_4_x64_Template/Lib/re/__init__.py new file mode 100644 index 00000000..af2808a7 --- /dev/null +++ b/Python314_4_x64_Template/Lib/re/__init__.py @@ -0,0 +1,428 @@ +# +# Secret Labs' Regular Expression Engine +# +# re-compatible interface for the sre matching engine +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# This version of the SRE library can be redistributed under CNRI's +# Python 1.6 license. For any other use, please contact Secret Labs +# AB (info@pythonware.com). +# +# Portions of this engine have been developed in cooperation with +# CNRI. Hewlett-Packard provided funding for 1.6 integration and +# other compatibility work. +# + +r"""Support for regular expressions (RE). + +This module provides regular expression matching operations similar to +those found in Perl. It supports both 8-bit and Unicode strings; both +the pattern and the strings being processed can contain null bytes and +characters outside the US ASCII range. + +Regular expressions can contain both special and ordinary characters. +Most ordinary characters, like "A", "a", or "0", are the simplest +regular expressions; they simply match themselves. You can +concatenate ordinary characters, so last matches the string 'last'. + +The special characters are: + "." Matches any character except a newline. + "^" Matches the start of the string. + "$" Matches the end of the string or just before the newline at + the end of the string. + "*" Matches 0 or more (greedy) repetitions of the preceding RE. + Greedy means that it will match as many repetitions as possible. + "+" Matches 1 or more (greedy) repetitions of the preceding RE. + "?" Matches 0 or 1 (greedy) of the preceding RE. + *?,+?,?? Non-greedy versions of the previous three special characters. + {m,n} Matches from m to n repetitions of the preceding RE. + {m,n}? Non-greedy version of the above. + "\\" Either escapes special characters or signals a special sequence. + [] Indicates a set of characters. + A "^" as the first character indicates a complementing set. + "|" A|B, creates an RE that will match either A or B. + (...) Matches the RE inside the parentheses. + The contents can be retrieved or matched later in the string. + (?aiLmsux) The letters set the corresponding flags defined below. + (?:...) Non-grouping version of regular parentheses. + (?P...) The substring matched by the group is accessible by name. + (?P=name) Matches the text matched earlier by the group named name. + (?#...) A comment; ignored. + (?=...) Matches if ... matches next, but doesn't consume the string. + (?!...) Matches if ... doesn't match next. + (?<=...) Matches if preceded by ... (must be fixed length). + (?= _MAXCACHE: + # Drop the least recently used item. + # next(iter(_cache)) is known to have linear amortized time, + # but it is used here to avoid a dependency from using OrderedDict. + # For the small _MAXCACHE value it doesn't make much of a difference. + try: + del _cache[next(iter(_cache))] + except (StopIteration, RuntimeError, KeyError): + pass + # Append to the end. + _cache[key] = p + + if len(_cache2) >= _MAXCACHE2: + # Drop the oldest item. + try: + del _cache2[next(iter(_cache2))] + except (StopIteration, RuntimeError, KeyError): + pass + _cache2[key] = p + return p + +@functools.lru_cache(_MAXCACHE) +def _compile_template(pattern, repl): + # internal: compile replacement pattern + return _sre.template(pattern, _parser.parse_template(repl, pattern)) + +# register myself for pickling + +import copyreg + +def _pickle(p): + return _compile, (p.pattern, p.flags) + +copyreg.pickle(Pattern, _pickle, _compile) + +# -------------------------------------------------------------------- +# experimental stuff (see python-dev discussions for details) + +class Scanner: + def __init__(self, lexicon, flags=0): + from ._constants import BRANCH, SUBPATTERN + if isinstance(flags, RegexFlag): + flags = flags.value + self.lexicon = lexicon + # combine phrases into a compound pattern + p = [] + s = _parser.State() + s.flags = flags + for phrase, action in lexicon: + gid = s.opengroup() + p.append(_parser.SubPattern(s, [ + (SUBPATTERN, (gid, 0, 0, _parser.parse(phrase, flags))), + ])) + s.closegroup(gid, p[-1]) + p = _parser.SubPattern(s, [(BRANCH, (None, p))]) + self.scanner = _compiler.compile(p) + def scan(self, string): + result = [] + append = result.append + match = self.scanner.scanner(string).match + i = 0 + while True: + m = match() + if not m: + break + j = m.end() + if i == j: + break + action = self.lexicon[m.lastindex-1][1] + if callable(action): + self.match = m + action = action(self, m.group()) + if action is not None: + append(action) + i = j + return result, string[i:] diff --git a/Python314_4_x64_Template/Lib/re/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/re/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..ce0ccb50 Binary files /dev/null and b/Python314_4_x64_Template/Lib/re/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/re/__pycache__/_casefix.cpython-314.pyc b/Python314_4_x64_Template/Lib/re/__pycache__/_casefix.cpython-314.pyc new file mode 100644 index 00000000..87924a0b Binary files /dev/null and b/Python314_4_x64_Template/Lib/re/__pycache__/_casefix.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/re/__pycache__/_compiler.cpython-314.pyc b/Python314_4_x64_Template/Lib/re/__pycache__/_compiler.cpython-314.pyc new file mode 100644 index 00000000..e2e0ef23 Binary files /dev/null and b/Python314_4_x64_Template/Lib/re/__pycache__/_compiler.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/re/__pycache__/_constants.cpython-314.pyc b/Python314_4_x64_Template/Lib/re/__pycache__/_constants.cpython-314.pyc new file mode 100644 index 00000000..63aa3948 Binary files /dev/null and b/Python314_4_x64_Template/Lib/re/__pycache__/_constants.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/re/__pycache__/_parser.cpython-314.pyc b/Python314_4_x64_Template/Lib/re/__pycache__/_parser.cpython-314.pyc new file mode 100644 index 00000000..0565794a Binary files /dev/null and b/Python314_4_x64_Template/Lib/re/__pycache__/_parser.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/re/_casefix.py b/Python314_4_x64_Template/Lib/re/_casefix.py similarity index 100% rename from Python313_13_x64_Template/Lib/re/_casefix.py rename to Python314_4_x64_Template/Lib/re/_casefix.py diff --git a/Python314_4_x64_Template/Lib/re/_compiler.py b/Python314_4_x64_Template/Lib/re/_compiler.py new file mode 100644 index 00000000..20dd561d --- /dev/null +++ b/Python314_4_x64_Template/Lib/re/_compiler.py @@ -0,0 +1,782 @@ +# +# Secret Labs' Regular Expression Engine +# +# convert template to internal format +# +# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. +# +# See the __init__.py file for information on usage and redistribution. +# + +"""Internal support module for sre""" + +import _sre +from . import _parser +from ._constants import * +from ._casefix import _EXTRA_CASES + +assert _sre.MAGIC == MAGIC, "SRE module mismatch" + +_LITERAL_CODES = {LITERAL, NOT_LITERAL} +_SUCCESS_CODES = {SUCCESS, FAILURE} +_ASSERT_CODES = {ASSERT, ASSERT_NOT} +_UNIT_CODES = _LITERAL_CODES | {ANY, IN} + +_REPEATING_CODES = { + MIN_REPEAT: (REPEAT, MIN_UNTIL, MIN_REPEAT_ONE), + MAX_REPEAT: (REPEAT, MAX_UNTIL, REPEAT_ONE), + POSSESSIVE_REPEAT: (POSSESSIVE_REPEAT, SUCCESS, POSSESSIVE_REPEAT_ONE), +} + +_CHARSET_ALL = [(NEGATE, None)] + +def _combine_flags(flags, add_flags, del_flags, + TYPE_FLAGS=_parser.TYPE_FLAGS): + if add_flags & TYPE_FLAGS: + flags &= ~TYPE_FLAGS + return (flags | add_flags) & ~del_flags + +def _compile(code, pattern, flags): + # internal: compile a (sub)pattern + emit = code.append + _len = len + LITERAL_CODES = _LITERAL_CODES + REPEATING_CODES = _REPEATING_CODES + SUCCESS_CODES = _SUCCESS_CODES + ASSERT_CODES = _ASSERT_CODES + iscased = None + tolower = None + fixes = None + if flags & SRE_FLAG_IGNORECASE and not flags & SRE_FLAG_LOCALE: + if flags & SRE_FLAG_UNICODE: + iscased = _sre.unicode_iscased + tolower = _sre.unicode_tolower + fixes = _EXTRA_CASES + else: + iscased = _sre.ascii_iscased + tolower = _sre.ascii_tolower + for op, av in pattern: + if op in LITERAL_CODES: + if not flags & SRE_FLAG_IGNORECASE: + emit(op) + emit(av) + elif flags & SRE_FLAG_LOCALE: + emit(OP_LOCALE_IGNORE[op]) + emit(av) + elif not iscased(av): + emit(op) + emit(av) + else: + lo = tolower(av) + if not fixes: # ascii + emit(OP_IGNORE[op]) + emit(lo) + elif lo not in fixes: + emit(OP_UNICODE_IGNORE[op]) + emit(lo) + else: + emit(IN_UNI_IGNORE) + skip = _len(code); emit(0) + if op is NOT_LITERAL: + emit(NEGATE) + for k in (lo,) + fixes[lo]: + emit(LITERAL) + emit(k) + emit(FAILURE) + code[skip] = _len(code) - skip + elif op is IN: + charset, hascased = _optimize_charset(av, iscased, tolower, fixes) + if not charset: + emit(FAILURE) + elif charset == _CHARSET_ALL: + emit(ANY_ALL) + else: + if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE: + emit(IN_LOC_IGNORE) + elif not hascased: + emit(IN) + elif not fixes: # ascii + emit(IN_IGNORE) + else: + emit(IN_UNI_IGNORE) + skip = _len(code); emit(0) + _compile_charset(charset, flags, code) + code[skip] = _len(code) - skip + elif op is ANY: + if flags & SRE_FLAG_DOTALL: + emit(ANY_ALL) + else: + emit(ANY) + elif op in REPEATING_CODES: + if _simple(av[2]): + emit(REPEATING_CODES[op][2]) + skip = _len(code); emit(0) + emit(av[0]) + emit(av[1]) + _compile(code, av[2], flags) + emit(SUCCESS) + code[skip] = _len(code) - skip + else: + emit(REPEATING_CODES[op][0]) + skip = _len(code); emit(0) + emit(av[0]) + emit(av[1]) + _compile(code, av[2], flags) + code[skip] = _len(code) - skip + emit(REPEATING_CODES[op][1]) + elif op is SUBPATTERN: + group, add_flags, del_flags, p = av + if group: + emit(MARK) + emit((group-1)*2) + # _compile_info(code, p, _combine_flags(flags, add_flags, del_flags)) + _compile(code, p, _combine_flags(flags, add_flags, del_flags)) + if group: + emit(MARK) + emit((group-1)*2+1) + elif op is ATOMIC_GROUP: + # Atomic Groups are handled by starting with an Atomic + # Group op code, then putting in the atomic group pattern + # and finally a success op code to tell any repeat + # operations within the Atomic Group to stop eating and + # pop their stack if they reach it + emit(ATOMIC_GROUP) + skip = _len(code); emit(0) + _compile(code, av, flags) + emit(SUCCESS) + code[skip] = _len(code) - skip + elif op in SUCCESS_CODES: + emit(op) + elif op in ASSERT_CODES: + emit(op) + skip = _len(code); emit(0) + if av[0] >= 0: + emit(0) # look ahead + else: + lo, hi = av[1].getwidth() + if lo > MAXCODE: + raise error("looks too much behind") + if lo != hi: + raise PatternError("look-behind requires fixed-width pattern") + emit(lo) # look behind + _compile(code, av[1], flags) + emit(SUCCESS) + code[skip] = _len(code) - skip + elif op is AT: + emit(op) + if flags & SRE_FLAG_MULTILINE: + av = AT_MULTILINE.get(av, av) + if flags & SRE_FLAG_LOCALE: + av = AT_LOCALE.get(av, av) + elif flags & SRE_FLAG_UNICODE: + av = AT_UNICODE.get(av, av) + emit(av) + elif op is BRANCH: + emit(op) + tail = [] + tailappend = tail.append + for av in av[1]: + skip = _len(code); emit(0) + # _compile_info(code, av, flags) + _compile(code, av, flags) + emit(JUMP) + tailappend(_len(code)); emit(0) + code[skip] = _len(code) - skip + emit(FAILURE) # end of branch + for tail in tail: + code[tail] = _len(code) - tail + elif op is CATEGORY: + emit(op) + if flags & SRE_FLAG_LOCALE: + av = CH_LOCALE[av] + elif flags & SRE_FLAG_UNICODE: + av = CH_UNICODE[av] + emit(av) + elif op is GROUPREF: + if not flags & SRE_FLAG_IGNORECASE: + emit(op) + elif flags & SRE_FLAG_LOCALE: + emit(GROUPREF_LOC_IGNORE) + elif not fixes: # ascii + emit(GROUPREF_IGNORE) + else: + emit(GROUPREF_UNI_IGNORE) + emit(av-1) + elif op is GROUPREF_EXISTS: + emit(op) + emit(av[0]-1) + skipyes = _len(code); emit(0) + _compile(code, av[1], flags) + if av[2]: + emit(JUMP) + skipno = _len(code); emit(0) + code[skipyes] = _len(code) - skipyes + 1 + _compile(code, av[2], flags) + code[skipno] = _len(code) - skipno + else: + code[skipyes] = _len(code) - skipyes + 1 + else: + raise PatternError(f"internal: unsupported operand type {op!r}") + +def _compile_charset(charset, flags, code): + # compile charset subprogram + emit = code.append + for op, av in charset: + emit(op) + if op is NEGATE: + pass + elif op is LITERAL: + emit(av) + elif op is RANGE or op is RANGE_UNI_IGNORE: + emit(av[0]) + emit(av[1]) + elif op is CHARSET: + code.extend(av) + elif op is BIGCHARSET: + code.extend(av) + elif op is CATEGORY: + if flags & SRE_FLAG_LOCALE: + emit(CH_LOCALE[av]) + elif flags & SRE_FLAG_UNICODE: + emit(CH_UNICODE[av]) + else: + emit(av) + else: + raise PatternError(f"internal: unsupported set operator {op!r}") + emit(FAILURE) + +def _optimize_charset(charset, iscased=None, fixup=None, fixes=None): + # internal: optimize character set + out = [] + tail = [] + charmap = bytearray(256) + hascased = False + for op, av in charset: + while True: + try: + if op is LITERAL: + if fixup: # IGNORECASE and not LOCALE + av = fixup(av) + charmap[av] = 1 + if fixes and av in fixes: + for k in fixes[av]: + charmap[k] = 1 + if not hascased and iscased(av): + hascased = True + else: + charmap[av] = 1 + elif op is RANGE: + r = range(av[0], av[1]+1) + if fixup: # IGNORECASE and not LOCALE + if fixes: + for i in map(fixup, r): + charmap[i] = 1 + if i in fixes: + for k in fixes[i]: + charmap[k] = 1 + else: + for i in map(fixup, r): + charmap[i] = 1 + if not hascased: + hascased = any(map(iscased, r)) + else: + for i in r: + charmap[i] = 1 + elif op is NEGATE: + out.append((op, av)) + elif op is CATEGORY and tail and (CATEGORY, CH_NEGATE[av]) in tail: + # Optimize [\s\S] etc. + out = [] if out else _CHARSET_ALL + return out, False + else: + tail.append((op, av)) + except IndexError: + if len(charmap) == 256: + # character set contains non-UCS1 character codes + charmap += b'\0' * 0xff00 + continue + # Character set contains non-BMP character codes. + # For range, all BMP characters in the range are already + # proceeded. + if fixup: # IGNORECASE and not LOCALE + # For now, IN_UNI_IGNORE+LITERAL and + # IN_UNI_IGNORE+RANGE_UNI_IGNORE work for all non-BMP + # characters, because two characters (at least one of + # which is not in the BMP) match case-insensitively + # if and only if: + # 1) c1.lower() == c2.lower() + # 2) c1.lower() == c2 or c1.lower().upper() == c2 + # Also, both c.lower() and c.lower().upper() are single + # characters for every non-BMP character. + if op is RANGE: + if fixes: # not ASCII + op = RANGE_UNI_IGNORE + hascased = True + else: + assert op is LITERAL + if not hascased and iscased(av): + hascased = True + tail.append((op, av)) + break + + # compress character map + runs = [] + q = 0 + while True: + p = charmap.find(1, q) + if p < 0: + break + if len(runs) >= 2: + runs = None + break + q = charmap.find(0, p) + if q < 0: + runs.append((p, len(charmap))) + break + runs.append((p, q)) + if runs is not None: + # use literal/range + for p, q in runs: + if q - p == 1: + out.append((LITERAL, p)) + else: + out.append((RANGE, (p, q - 1))) + out += tail + # if the case was changed or new representation is more compact + if hascased or len(out) < len(charset): + return out, hascased + # else original character set is good enough + return charset, hascased + + # use bitmap + if len(charmap) == 256: + data = _mk_bitmap(charmap) + out.append((CHARSET, data)) + out += tail + return out, hascased + + # To represent a big charset, first a bitmap of all characters in the + # set is constructed. Then, this bitmap is sliced into chunks of 256 + # characters, duplicate chunks are eliminated, and each chunk is + # given a number. In the compiled expression, the charset is + # represented by a 32-bit word sequence, consisting of one word for + # the number of different chunks, a sequence of 256 bytes (64 words) + # of chunk numbers indexed by their original chunk position, and a + # sequence of 256-bit chunks (8 words each). + + # Compression is normally good: in a typical charset, large ranges of + # Unicode will be either completely excluded (e.g. if only cyrillic + # letters are to be matched), or completely included (e.g. if large + # subranges of Kanji match). These ranges will be represented by + # chunks of all one-bits or all zero-bits. + + # Matching can be also done efficiently: the more significant byte of + # the Unicode character is an index into the chunk number, and the + # less significant byte is a bit index in the chunk (just like the + # CHARSET matching). + + charmap = bytes(charmap) # should be hashable + comps = {} + mapping = bytearray(256) + block = 0 + data = bytearray() + for i in range(0, 65536, 256): + chunk = charmap[i: i + 256] + if chunk in comps: + mapping[i // 256] = comps[chunk] + else: + mapping[i // 256] = comps[chunk] = block + block += 1 + data += chunk + data = _mk_bitmap(data) + data[0:0] = [block] + _bytes_to_codes(mapping) + out.append((BIGCHARSET, data)) + out += tail + return out, hascased + +_CODEBITS = _sre.CODESIZE * 8 +MAXCODE = (1 << _CODEBITS) - 1 +_BITS_TRANS = b'0' + b'1' * 255 +def _mk_bitmap(bits, _CODEBITS=_CODEBITS, _int=int): + s = bits.translate(_BITS_TRANS)[::-1] + return [_int(s[i - _CODEBITS: i], 2) + for i in range(len(s), 0, -_CODEBITS)] + +def _bytes_to_codes(b): + # Convert block indices to word array + a = memoryview(b).cast('I') + assert a.itemsize == _sre.CODESIZE + assert len(a) * a.itemsize == len(b) + return a.tolist() + +def _simple(p): + # check if this subpattern is a "simple" operator + if len(p) != 1: + return False + op, av = p[0] + if op is SUBPATTERN: + return av[0] is None and _simple(av[-1]) + return op in _UNIT_CODES + +def _generate_overlap_table(prefix): + """ + Generate an overlap table for the following prefix. + An overlap table is a table of the same size as the prefix which + informs about the potential self-overlap for each index in the prefix: + - if overlap[i] == 0, prefix[i:] can't overlap prefix[0:...] + - if overlap[i] == k with 0 < k <= i, prefix[i-k+1:i+1] overlaps with + prefix[0:k] + """ + table = [0] * len(prefix) + for i in range(1, len(prefix)): + idx = table[i - 1] + while prefix[i] != prefix[idx]: + if idx == 0: + table[i] = 0 + break + idx = table[idx - 1] + else: + table[i] = idx + 1 + return table + +def _get_iscased(flags): + if not flags & SRE_FLAG_IGNORECASE: + return None + elif flags & SRE_FLAG_UNICODE: + return _sre.unicode_iscased + else: + return _sre.ascii_iscased + +def _get_literal_prefix(pattern, flags): + # look for literal prefix + prefix = [] + prefixappend = prefix.append + prefix_skip = None + iscased = _get_iscased(flags) + for op, av in pattern.data: + if op is LITERAL: + if iscased and iscased(av): + break + prefixappend(av) + elif op is SUBPATTERN: + group, add_flags, del_flags, p = av + flags1 = _combine_flags(flags, add_flags, del_flags) + if flags1 & SRE_FLAG_IGNORECASE and flags1 & SRE_FLAG_LOCALE: + break + prefix1, prefix_skip1, got_all = _get_literal_prefix(p, flags1) + if prefix_skip is None: + if group is not None: + prefix_skip = len(prefix) + elif prefix_skip1 is not None: + prefix_skip = len(prefix) + prefix_skip1 + prefix.extend(prefix1) + if not got_all: + break + else: + break + else: + return prefix, prefix_skip, True + return prefix, prefix_skip, False + +def _get_charset_prefix(pattern, flags): + while True: + if not pattern.data: + return None + op, av = pattern.data[0] + if op is not SUBPATTERN: + break + group, add_flags, del_flags, pattern = av + flags = _combine_flags(flags, add_flags, del_flags) + if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE: + return None + + iscased = _get_iscased(flags) + if op is LITERAL: + if iscased and iscased(av): + return None + return [(op, av)] + elif op is BRANCH: + charset = [] + charsetappend = charset.append + for p in av[1]: + if not p: + return None + op, av = p[0] + if op is LITERAL and not (iscased and iscased(av)): + charsetappend((op, av)) + else: + return None + return charset + elif op is IN: + charset = av + if iscased: + for op, av in charset: + if op is LITERAL: + if iscased(av): + return None + elif op is RANGE: + if av[1] > 0xffff: + return None + if any(map(iscased, range(av[0], av[1]+1))): + return None + return charset + return None + +def _compile_info(code, pattern, flags): + # internal: compile an info block. in the current version, + # this contains min/max pattern width, and an optional literal + # prefix or a character map + lo, hi = pattern.getwidth() + if hi > MAXCODE: + hi = MAXCODE + if lo == 0: + code.extend([INFO, 4, 0, lo, hi]) + return + # look for a literal prefix + prefix = [] + prefix_skip = 0 + charset = None # not used + if not (flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE): + # look for literal prefix + prefix, prefix_skip, got_all = _get_literal_prefix(pattern, flags) + # if no prefix, look for charset prefix + if not prefix: + charset = _get_charset_prefix(pattern, flags) + if charset: + charset, hascased = _optimize_charset(charset) + assert not hascased + if charset == _CHARSET_ALL: + charset = None +## if prefix: +## print("*** PREFIX", prefix, prefix_skip) +## if charset: +## print("*** CHARSET", charset) + # add an info block + emit = code.append + emit(INFO) + skip = len(code); emit(0) + # literal flag + mask = 0 + if prefix: + mask = SRE_INFO_PREFIX + if prefix_skip is None and got_all: + mask = mask | SRE_INFO_LITERAL + elif charset: + mask = mask | SRE_INFO_CHARSET + emit(mask) + # pattern length + if lo < MAXCODE: + emit(lo) + else: + emit(MAXCODE) + prefix = prefix[:MAXCODE] + emit(hi) + # add literal prefix + if prefix: + emit(len(prefix)) # length + if prefix_skip is None: + prefix_skip = len(prefix) + emit(prefix_skip) # skip + code.extend(prefix) + # generate overlap table + code.extend(_generate_overlap_table(prefix)) + elif charset: + _compile_charset(charset, flags, code) + code[skip] = len(code) - skip + +def isstring(obj): + return isinstance(obj, (str, bytes)) + +def _code(p, flags): + + flags = p.state.flags | flags + code = [] + + # compile info block + _compile_info(code, p, flags) + + # compile the pattern + _compile(code, p.data, flags) + + code.append(SUCCESS) + + return code + +def _hex_code(code): + return '[%s]' % ', '.join('%#0*x' % (_sre.CODESIZE*2+2, x) for x in code) + +def dis(code): + import sys + + labels = set() + level = 0 + offset_width = len(str(len(code) - 1)) + + def dis_(start, end): + def print_(*args, to=None): + if to is not None: + labels.add(to) + args += ('(to %d)' % (to,),) + print('%*d%s ' % (offset_width, start, ':' if start in labels else '.'), + end=' '*(level-1)) + print(*args) + + def print_2(*args): + print(end=' '*(offset_width + 2*level)) + print(*args) + + nonlocal level + level += 1 + i = start + while i < end: + start = i + op = code[i] + i += 1 + op = OPCODES[op] + if op in (SUCCESS, FAILURE, ANY, ANY_ALL, + MAX_UNTIL, MIN_UNTIL, NEGATE): + print_(op) + elif op in (LITERAL, NOT_LITERAL, + LITERAL_IGNORE, NOT_LITERAL_IGNORE, + LITERAL_UNI_IGNORE, NOT_LITERAL_UNI_IGNORE, + LITERAL_LOC_IGNORE, NOT_LITERAL_LOC_IGNORE): + arg = code[i] + i += 1 + print_(op, '%#02x (%r)' % (arg, chr(arg))) + elif op is AT: + arg = code[i] + i += 1 + arg = str(ATCODES[arg]) + assert arg[:3] == 'AT_' + print_(op, arg[3:]) + elif op is CATEGORY: + arg = code[i] + i += 1 + arg = str(CHCODES[arg]) + assert arg[:9] == 'CATEGORY_' + print_(op, arg[9:]) + elif op in (IN, IN_IGNORE, IN_UNI_IGNORE, IN_LOC_IGNORE): + skip = code[i] + print_(op, skip, to=i+skip) + dis_(i+1, i+skip) + i += skip + elif op in (RANGE, RANGE_UNI_IGNORE): + lo, hi = code[i: i+2] + i += 2 + print_(op, '%#02x %#02x (%r-%r)' % (lo, hi, chr(lo), chr(hi))) + elif op is CHARSET: + print_(op, _hex_code(code[i: i + 256//_CODEBITS])) + i += 256//_CODEBITS + elif op is BIGCHARSET: + arg = code[i] + i += 1 + mapping = list(b''.join(x.to_bytes(_sre.CODESIZE, sys.byteorder) + for x in code[i: i + 256//_sre.CODESIZE])) + print_(op, arg, mapping) + i += 256//_sre.CODESIZE + level += 1 + for j in range(arg): + print_2(_hex_code(code[i: i + 256//_CODEBITS])) + i += 256//_CODEBITS + level -= 1 + elif op in (MARK, GROUPREF, GROUPREF_IGNORE, GROUPREF_UNI_IGNORE, + GROUPREF_LOC_IGNORE): + arg = code[i] + i += 1 + print_(op, arg) + elif op is JUMP: + skip = code[i] + print_(op, skip, to=i+skip) + i += 1 + elif op is BRANCH: + skip = code[i] + print_(op, skip, to=i+skip) + while skip: + dis_(i+1, i+skip) + i += skip + start = i + skip = code[i] + if skip: + print_('branch', skip, to=i+skip) + else: + print_(FAILURE) + i += 1 + elif op in (REPEAT, REPEAT_ONE, MIN_REPEAT_ONE, + POSSESSIVE_REPEAT, POSSESSIVE_REPEAT_ONE): + skip, min, max = code[i: i+3] + if max == MAXREPEAT: + max = 'MAXREPEAT' + print_(op, skip, min, max, to=i+skip) + dis_(i+3, i+skip) + i += skip + elif op is GROUPREF_EXISTS: + arg, skip = code[i: i+2] + print_(op, arg, skip, to=i+skip) + i += 2 + elif op in (ASSERT, ASSERT_NOT): + skip, arg = code[i: i+2] + print_(op, skip, arg, to=i+skip) + dis_(i+2, i+skip) + i += skip + elif op is ATOMIC_GROUP: + skip = code[i] + print_(op, skip, to=i+skip) + dis_(i+1, i+skip) + i += skip + elif op is INFO: + skip, flags, min, max = code[i: i+4] + if max == MAXREPEAT: + max = 'MAXREPEAT' + print_(op, skip, bin(flags), min, max, to=i+skip) + start = i+4 + if flags & SRE_INFO_PREFIX: + prefix_len, prefix_skip = code[i+4: i+6] + print_2(' prefix_skip', prefix_skip) + start = i + 6 + prefix = code[start: start+prefix_len] + print_2(' prefix', + '[%s]' % ', '.join('%#02x' % x for x in prefix), + '(%r)' % ''.join(map(chr, prefix))) + start += prefix_len + print_2(' overlap', code[start: start+prefix_len]) + start += prefix_len + if flags & SRE_INFO_CHARSET: + level += 1 + print_2('in') + dis_(start, i+skip) + level -= 1 + i += skip + else: + raise ValueError(op) + + level -= 1 + + dis_(0, len(code)) + + +def compile(p, flags=0): + # internal: convert pattern list to internal format + + if isstring(p): + pattern = p + p = _parser.parse(p, flags) + else: + pattern = None + + code = _code(p, flags) + + if flags & SRE_FLAG_DEBUG: + print() + dis(code) + + # map in either direction + groupindex = p.state.groupdict + indexgroup = [None] * p.state.groups + for k, i in groupindex.items(): + indexgroup[i] = k + + return _sre.compile( + pattern, flags | p.state.flags, code, + p.state.groups-1, + groupindex, tuple(indexgroup) + ) diff --git a/Python314_4_x64_Template/Lib/re/_constants.py b/Python314_4_x64_Template/Lib/re/_constants.py new file mode 100644 index 00000000..d6f32302 --- /dev/null +++ b/Python314_4_x64_Template/Lib/re/_constants.py @@ -0,0 +1,224 @@ +# +# Secret Labs' Regular Expression Engine +# +# various symbols used by the regular expression engine. +# run this script to update the _sre include files! +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# See the __init__.py file for information on usage and redistribution. +# + +"""Internal support module for sre""" + +# update when constants are added or removed + +MAGIC = 20230612 + +from _sre import MAXREPEAT, MAXGROUPS # noqa: F401 + +# SRE standard exception (access as sre.error) +# should this really be here? + +class PatternError(Exception): + """Exception raised for invalid regular expressions. + + Attributes: + + msg: The unformatted error message + pattern: The regular expression pattern + pos: The index in the pattern where compilation failed (may be None) + lineno: The line corresponding to pos (may be None) + colno: The column corresponding to pos (may be None) + """ + + __module__ = 're' + + def __init__(self, msg, pattern=None, pos=None): + self.msg = msg + self.pattern = pattern + self.pos = pos + if pattern is not None and pos is not None: + msg = '%s at position %d' % (msg, pos) + if isinstance(pattern, str): + newline = '\n' + else: + newline = b'\n' + self.lineno = pattern.count(newline, 0, pos) + 1 + self.colno = pos - pattern.rfind(newline, 0, pos) + if newline in pattern: + msg = '%s (line %d, column %d)' % (msg, self.lineno, self.colno) + else: + self.lineno = self.colno = None + super().__init__(msg) + + +# Backward compatibility after renaming in 3.13 +error = PatternError + +class _NamedIntConstant(int): + def __new__(cls, value, name): + self = super(_NamedIntConstant, cls).__new__(cls, value) + self.name = name + return self + + def __repr__(self): + return self.name + + __reduce__ = None + +MAXREPEAT = _NamedIntConstant(MAXREPEAT, 'MAXREPEAT') + +def _makecodes(*names): + items = [_NamedIntConstant(i, name) for i, name in enumerate(names)] + globals().update({item.name: item for item in items}) + return items + +# operators +OPCODES = _makecodes( + # failure=0 success=1 (just because it looks better that way :-) + 'FAILURE', 'SUCCESS', + + 'ANY', 'ANY_ALL', + 'ASSERT', 'ASSERT_NOT', + 'AT', + 'BRANCH', + 'CATEGORY', + 'CHARSET', 'BIGCHARSET', + 'GROUPREF', 'GROUPREF_EXISTS', + 'IN', + 'INFO', + 'JUMP', + 'LITERAL', + 'MARK', + 'MAX_UNTIL', + 'MIN_UNTIL', + 'NOT_LITERAL', + 'NEGATE', + 'RANGE', + 'REPEAT', + 'REPEAT_ONE', + 'SUBPATTERN', + 'MIN_REPEAT_ONE', + 'ATOMIC_GROUP', + 'POSSESSIVE_REPEAT', + 'POSSESSIVE_REPEAT_ONE', + + 'GROUPREF_IGNORE', + 'IN_IGNORE', + 'LITERAL_IGNORE', + 'NOT_LITERAL_IGNORE', + + 'GROUPREF_LOC_IGNORE', + 'IN_LOC_IGNORE', + 'LITERAL_LOC_IGNORE', + 'NOT_LITERAL_LOC_IGNORE', + + 'GROUPREF_UNI_IGNORE', + 'IN_UNI_IGNORE', + 'LITERAL_UNI_IGNORE', + 'NOT_LITERAL_UNI_IGNORE', + 'RANGE_UNI_IGNORE', + + # The following opcodes are only occurred in the parser output, + # but not in the compiled code. + 'MIN_REPEAT', 'MAX_REPEAT', +) +del OPCODES[-2:] # remove MIN_REPEAT and MAX_REPEAT + +# positions +ATCODES = _makecodes( + 'AT_BEGINNING', 'AT_BEGINNING_LINE', 'AT_BEGINNING_STRING', + 'AT_BOUNDARY', 'AT_NON_BOUNDARY', + 'AT_END', 'AT_END_LINE', 'AT_END_STRING', + + 'AT_LOC_BOUNDARY', 'AT_LOC_NON_BOUNDARY', + + 'AT_UNI_BOUNDARY', 'AT_UNI_NON_BOUNDARY', +) + +# categories +CHCODES = _makecodes( + 'CATEGORY_DIGIT', 'CATEGORY_NOT_DIGIT', + 'CATEGORY_SPACE', 'CATEGORY_NOT_SPACE', + 'CATEGORY_WORD', 'CATEGORY_NOT_WORD', + 'CATEGORY_LINEBREAK', 'CATEGORY_NOT_LINEBREAK', + + 'CATEGORY_LOC_WORD', 'CATEGORY_LOC_NOT_WORD', + + 'CATEGORY_UNI_DIGIT', 'CATEGORY_UNI_NOT_DIGIT', + 'CATEGORY_UNI_SPACE', 'CATEGORY_UNI_NOT_SPACE', + 'CATEGORY_UNI_WORD', 'CATEGORY_UNI_NOT_WORD', + 'CATEGORY_UNI_LINEBREAK', 'CATEGORY_UNI_NOT_LINEBREAK', +) + + +# replacement operations for "ignore case" mode +OP_IGNORE = { + LITERAL: LITERAL_IGNORE, + NOT_LITERAL: NOT_LITERAL_IGNORE, +} + +OP_LOCALE_IGNORE = { + LITERAL: LITERAL_LOC_IGNORE, + NOT_LITERAL: NOT_LITERAL_LOC_IGNORE, +} + +OP_UNICODE_IGNORE = { + LITERAL: LITERAL_UNI_IGNORE, + NOT_LITERAL: NOT_LITERAL_UNI_IGNORE, +} + +AT_MULTILINE = { + AT_BEGINNING: AT_BEGINNING_LINE, + AT_END: AT_END_LINE +} + +AT_LOCALE = { + AT_BOUNDARY: AT_LOC_BOUNDARY, + AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY +} + +AT_UNICODE = { + AT_BOUNDARY: AT_UNI_BOUNDARY, + AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY +} + +CH_LOCALE = { + CATEGORY_DIGIT: CATEGORY_DIGIT, + CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT, + CATEGORY_SPACE: CATEGORY_SPACE, + CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE, + CATEGORY_WORD: CATEGORY_LOC_WORD, + CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD, + CATEGORY_LINEBREAK: CATEGORY_LINEBREAK, + CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK +} + +CH_UNICODE = { + CATEGORY_DIGIT: CATEGORY_UNI_DIGIT, + CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT, + CATEGORY_SPACE: CATEGORY_UNI_SPACE, + CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE, + CATEGORY_WORD: CATEGORY_UNI_WORD, + CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD, + CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK, + CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK +} + +CH_NEGATE = dict(zip(CHCODES[::2] + CHCODES[1::2], CHCODES[1::2] + CHCODES[::2])) + +# flags +SRE_FLAG_IGNORECASE = 2 # case insensitive +SRE_FLAG_LOCALE = 4 # honour system locale +SRE_FLAG_MULTILINE = 8 # treat target as multiline string +SRE_FLAG_DOTALL = 16 # treat target as a single string +SRE_FLAG_UNICODE = 32 # use unicode "locale" +SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments +SRE_FLAG_DEBUG = 128 # debugging +SRE_FLAG_ASCII = 256 # use ascii "locale" + +# flags for INFO primitive +SRE_INFO_PREFIX = 1 # has prefix +SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix) +SRE_INFO_CHARSET = 4 # pattern starts with character from given set diff --git a/Python314_4_x64_Template/Lib/re/_parser.py b/Python314_4_x64_Template/Lib/re/_parser.py new file mode 100644 index 00000000..35ab7ede --- /dev/null +++ b/Python314_4_x64_Template/Lib/re/_parser.py @@ -0,0 +1,1066 @@ +# +# Secret Labs' Regular Expression Engine +# +# convert re-style regular expression to sre pattern +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# See the __init__.py file for information on usage and redistribution. +# + +"""Internal support module for sre""" + +# XXX: show string offset and offending character for all errors + +from ._constants import * + +SPECIAL_CHARS = ".\\[{()*+?^$|" +REPEAT_CHARS = "*+?{" + +DIGITS = frozenset("0123456789") + +OCTDIGITS = frozenset("01234567") +HEXDIGITS = frozenset("0123456789abcdefABCDEF") +ASCIILETTERS = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +WHITESPACE = frozenset(" \t\n\r\v\f") + +_REPEATCODES = frozenset({MIN_REPEAT, MAX_REPEAT, POSSESSIVE_REPEAT}) +_UNITCODES = frozenset({ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY}) + +ESCAPES = { + r"\a": (LITERAL, ord("\a")), + r"\b": (LITERAL, ord("\b")), + r"\f": (LITERAL, ord("\f")), + r"\n": (LITERAL, ord("\n")), + r"\r": (LITERAL, ord("\r")), + r"\t": (LITERAL, ord("\t")), + r"\v": (LITERAL, ord("\v")), + r"\\": (LITERAL, ord("\\")) +} + +CATEGORIES = { + r"\A": (AT, AT_BEGINNING_STRING), # start of string + r"\b": (AT, AT_BOUNDARY), + r"\B": (AT, AT_NON_BOUNDARY), + r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]), + r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]), + r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]), + r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]), + r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]), + r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]), + r"\z": (AT, AT_END_STRING), # end of string + r"\Z": (AT, AT_END_STRING), # end of string (obsolete) +} + +FLAGS = { + # standard flags + "i": SRE_FLAG_IGNORECASE, + "L": SRE_FLAG_LOCALE, + "m": SRE_FLAG_MULTILINE, + "s": SRE_FLAG_DOTALL, + "x": SRE_FLAG_VERBOSE, + # extensions + "a": SRE_FLAG_ASCII, + "u": SRE_FLAG_UNICODE, +} + +TYPE_FLAGS = SRE_FLAG_ASCII | SRE_FLAG_LOCALE | SRE_FLAG_UNICODE +GLOBAL_FLAGS = SRE_FLAG_DEBUG + +# Maximal value returned by SubPattern.getwidth(). +# Must be larger than MAXREPEAT, MAXCODE and sys.maxsize. +MAXWIDTH = 1 << 64 + +class State: + # keeps track of state for parsing + def __init__(self): + self.flags = 0 + self.groupdict = {} + self.groupwidths = [None] # group 0 + self.lookbehindgroups = None + self.grouprefpos = {} + @property + def groups(self): + return len(self.groupwidths) + def opengroup(self, name=None): + gid = self.groups + self.groupwidths.append(None) + if self.groups > MAXGROUPS: + raise error("too many groups") + if name is not None: + ogid = self.groupdict.get(name, None) + if ogid is not None: + raise error("redefinition of group name %r as group %d; " + "was group %d" % (name, gid, ogid)) + self.groupdict[name] = gid + return gid + def closegroup(self, gid, p): + self.groupwidths[gid] = p.getwidth() + def checkgroup(self, gid): + return gid < self.groups and self.groupwidths[gid] is not None + + def checklookbehindgroup(self, gid, source): + if self.lookbehindgroups is not None: + if not self.checkgroup(gid): + raise source.error('cannot refer to an open group') + if gid >= self.lookbehindgroups: + raise source.error('cannot refer to group defined in the same ' + 'lookbehind subpattern') + +class SubPattern: + # a subpattern, in intermediate form + def __init__(self, state, data=None): + self.state = state + if data is None: + data = [] + self.data = data + self.width = None + + def dump(self, level=0): + seqtypes = (tuple, list) + for op, av in self.data: + print(level*" " + str(op), end='') + if op is IN: + # member sublanguage + print() + for op, a in av: + print((level+1)*" " + str(op), a) + elif op is BRANCH: + print() + for i, a in enumerate(av[1]): + if i: + print(level*" " + "OR") + a.dump(level+1) + elif op is GROUPREF_EXISTS: + condgroup, item_yes, item_no = av + print('', condgroup) + item_yes.dump(level+1) + if item_no: + print(level*" " + "ELSE") + item_no.dump(level+1) + elif isinstance(av, SubPattern): + print() + av.dump(level+1) + elif isinstance(av, seqtypes): + nl = False + for a in av: + if isinstance(a, SubPattern): + if not nl: + print() + a.dump(level+1) + nl = True + else: + if not nl: + print(' ', end='') + print(a, end='') + nl = False + if not nl: + print() + else: + print('', av) + def __repr__(self): + return repr(self.data) + def __len__(self): + return len(self.data) + def __delitem__(self, index): + del self.data[index] + def __getitem__(self, index): + if isinstance(index, slice): + return SubPattern(self.state, self.data[index]) + return self.data[index] + def __setitem__(self, index, code): + self.data[index] = code + def insert(self, index, code): + self.data.insert(index, code) + def append(self, code): + self.data.append(code) + def getwidth(self): + # determine the width (min, max) for this subpattern + if self.width is not None: + return self.width + lo = hi = 0 + for op, av in self.data: + if op is BRANCH: + i = MAXWIDTH + j = 0 + for av in av[1]: + l, h = av.getwidth() + i = min(i, l) + j = max(j, h) + lo = lo + i + hi = hi + j + elif op is ATOMIC_GROUP: + i, j = av.getwidth() + lo = lo + i + hi = hi + j + elif op is SUBPATTERN: + i, j = av[-1].getwidth() + lo = lo + i + hi = hi + j + elif op in _REPEATCODES: + i, j = av[2].getwidth() + lo = lo + i * av[0] + if av[1] == MAXREPEAT and j: + hi = MAXWIDTH + else: + hi = hi + j * av[1] + elif op in _UNITCODES: + lo = lo + 1 + hi = hi + 1 + elif op is GROUPREF: + i, j = self.state.groupwidths[av] + lo = lo + i + hi = hi + j + elif op is GROUPREF_EXISTS: + i, j = av[1].getwidth() + if av[2] is not None: + l, h = av[2].getwidth() + i = min(i, l) + j = max(j, h) + else: + i = 0 + lo = lo + i + hi = hi + j + elif op is SUCCESS: + break + self.width = min(lo, MAXWIDTH), min(hi, MAXWIDTH) + return self.width + +class Tokenizer: + def __init__(self, string): + self.istext = isinstance(string, str) + self.string = string + if not self.istext: + string = str(string, 'latin1') + self.decoded_string = string + self.index = 0 + self.next = None + self.__next() + def __next(self): + index = self.index + try: + char = self.decoded_string[index] + except IndexError: + self.next = None + return + if char == "\\": + index += 1 + try: + char += self.decoded_string[index] + except IndexError: + raise error("bad escape (end of pattern)", + self.string, len(self.string) - 1) from None + self.index = index + 1 + self.next = char + def match(self, char): + if char == self.next: + self.__next() + return True + return False + def get(self): + this = self.next + self.__next() + return this + def getwhile(self, n, charset): + result = '' + for _ in range(n): + c = self.next + if c not in charset: + break + result += c + self.__next() + return result + def getuntil(self, terminator, name): + result = '' + while True: + c = self.next + self.__next() + if c is None: + if not result: + raise self.error("missing " + name) + raise self.error("missing %s, unterminated name" % terminator, + len(result)) + if c == terminator: + if not result: + raise self.error("missing " + name, 1) + break + result += c + return result + @property + def pos(self): + return self.index - len(self.next or '') + def tell(self): + return self.index - len(self.next or '') + def seek(self, index): + self.index = index + self.__next() + + def error(self, msg, offset=0): + if not self.istext: + msg = msg.encode('ascii', 'backslashreplace').decode('ascii') + return error(msg, self.string, self.tell() - offset) + + def checkgroupname(self, name, offset): + if not (self.istext or name.isascii()): + msg = "bad character in group name %a" % name + raise self.error(msg, len(name) + offset) + if not name.isidentifier(): + msg = "bad character in group name %r" % name + raise self.error(msg, len(name) + offset) + +def _class_escape(source, escape): + # handle escape code inside character class + code = ESCAPES.get(escape) + if code: + return code + code = CATEGORIES.get(escape) + if code and code[0] is IN: + return code + try: + c = escape[1:2] + if c == "x": + # hexadecimal escape (exactly two digits) + escape += source.getwhile(2, HEXDIGITS) + if len(escape) != 4: + raise source.error("incomplete escape %s" % escape, len(escape)) + return LITERAL, int(escape[2:], 16) + elif c == "u" and source.istext: + # unicode escape (exactly four digits) + escape += source.getwhile(4, HEXDIGITS) + if len(escape) != 6: + raise source.error("incomplete escape %s" % escape, len(escape)) + return LITERAL, int(escape[2:], 16) + elif c == "U" and source.istext: + # unicode escape (exactly eight digits) + escape += source.getwhile(8, HEXDIGITS) + if len(escape) != 10: + raise source.error("incomplete escape %s" % escape, len(escape)) + c = int(escape[2:], 16) + chr(c) # raise ValueError for invalid code + return LITERAL, c + elif c == "N" and source.istext: + import unicodedata + # named unicode escape e.g. \N{EM DASH} + if not source.match('{'): + raise source.error("missing {") + charname = source.getuntil('}', 'character name') + try: + c = ord(unicodedata.lookup(charname)) + except (KeyError, TypeError): + raise source.error("undefined character name %r" % charname, + len(charname) + len(r'\N{}')) from None + return LITERAL, c + elif c in OCTDIGITS: + # octal escape (up to three digits) + escape += source.getwhile(2, OCTDIGITS) + c = int(escape[1:], 8) + if c > 0o377: + raise source.error('octal escape value %s outside of ' + 'range 0-0o377' % escape, len(escape)) + return LITERAL, c + elif c in DIGITS: + raise ValueError + if len(escape) == 2: + if c in ASCIILETTERS: + raise source.error('bad escape %s' % escape, len(escape)) + return LITERAL, ord(escape[1]) + except ValueError: + pass + raise source.error("bad escape %s" % escape, len(escape)) + +def _escape(source, escape, state): + # handle escape code in expression + code = CATEGORIES.get(escape) + if code: + return code + code = ESCAPES.get(escape) + if code: + return code + try: + c = escape[1:2] + if c == "x": + # hexadecimal escape + escape += source.getwhile(2, HEXDIGITS) + if len(escape) != 4: + raise source.error("incomplete escape %s" % escape, len(escape)) + return LITERAL, int(escape[2:], 16) + elif c == "u" and source.istext: + # unicode escape (exactly four digits) + escape += source.getwhile(4, HEXDIGITS) + if len(escape) != 6: + raise source.error("incomplete escape %s" % escape, len(escape)) + return LITERAL, int(escape[2:], 16) + elif c == "U" and source.istext: + # unicode escape (exactly eight digits) + escape += source.getwhile(8, HEXDIGITS) + if len(escape) != 10: + raise source.error("incomplete escape %s" % escape, len(escape)) + c = int(escape[2:], 16) + chr(c) # raise ValueError for invalid code + return LITERAL, c + elif c == "N" and source.istext: + import unicodedata + # named unicode escape e.g. \N{EM DASH} + if not source.match('{'): + raise source.error("missing {") + charname = source.getuntil('}', 'character name') + try: + c = ord(unicodedata.lookup(charname)) + except (KeyError, TypeError): + raise source.error("undefined character name %r" % charname, + len(charname) + len(r'\N{}')) from None + return LITERAL, c + elif c == "0": + # octal escape + escape += source.getwhile(2, OCTDIGITS) + return LITERAL, int(escape[1:], 8) + elif c in DIGITS: + # octal escape *or* decimal group reference (sigh) + if source.next in DIGITS: + escape += source.get() + if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and + source.next in OCTDIGITS): + # got three octal digits; this is an octal escape + escape += source.get() + c = int(escape[1:], 8) + if c > 0o377: + raise source.error('octal escape value %s outside of ' + 'range 0-0o377' % escape, + len(escape)) + return LITERAL, c + # not an octal escape, so this is a group reference + group = int(escape[1:]) + if group < state.groups: + if not state.checkgroup(group): + raise source.error("cannot refer to an open group", + len(escape)) + state.checklookbehindgroup(group, source) + return GROUPREF, group + raise source.error("invalid group reference %d" % group, len(escape) - 1) + if len(escape) == 2: + if c in ASCIILETTERS: + raise source.error("bad escape %s" % escape, len(escape)) + return LITERAL, ord(escape[1]) + except ValueError: + pass + raise source.error("bad escape %s" % escape, len(escape)) + +def _uniq(items): + return list(dict.fromkeys(items)) + +def _parse_sub(source, state, verbose, nested): + # parse an alternation: a|b|c + + items = [] + itemsappend = items.append + sourcematch = source.match + start = source.tell() + while True: + itemsappend(_parse(source, state, verbose, nested + 1, + not nested and not items)) + if not sourcematch("|"): + break + if not nested: + verbose = state.flags & SRE_FLAG_VERBOSE + + if len(items) == 1: + return items[0] + + subpattern = SubPattern(state) + + # check if all items share a common prefix + while True: + prefix = None + for item in items: + if not item: + break + if prefix is None: + prefix = item[0] + elif item[0] != prefix: + break + else: + # all subitems start with a common "prefix". + # move it out of the branch + for item in items: + del item[0] + subpattern.append(prefix) + continue # check next one + break + + # check if the branch can be replaced by a character set + set = [] + for item in items: + if len(item) != 1: + break + op, av = item[0] + if op is LITERAL: + set.append((op, av)) + elif op is IN and av[0][0] is not NEGATE: + set.extend(av) + else: + break + else: + # we can store this as a character set instead of a + # branch (the compiler may optimize this even more) + subpattern.append((IN, _uniq(set))) + return subpattern + + subpattern.append((BRANCH, (None, items))) + return subpattern + +def _parse(source, state, verbose, nested, first=False): + # parse a simple pattern + subpattern = SubPattern(state) + + # precompute constants into local variables + subpatternappend = subpattern.append + sourceget = source.get + sourcematch = source.match + _len = len + _ord = ord + + while True: + + this = source.next + if this is None: + break # end of pattern + if this in "|)": + break # end of subpattern + sourceget() + + if verbose: + # skip whitespace and comments + if this in WHITESPACE: + continue + if this == "#": + while True: + this = sourceget() + if this is None or this == "\n": + break + continue + + if this[0] == "\\": + code = _escape(source, this, state) + subpatternappend(code) + + elif this not in SPECIAL_CHARS: + subpatternappend((LITERAL, _ord(this))) + + elif this == "[": + here = source.tell() - 1 + # character set + set = [] + setappend = set.append +## if sourcematch(":"): +## pass # handle character classes + if source.next == '[': + import warnings + warnings.warn( + 'Possible nested set at position %d' % source.tell(), + FutureWarning, stacklevel=nested + 6 + ) + negate = sourcematch("^") + # check remaining characters + while True: + this = sourceget() + if this is None: + raise source.error("unterminated character set", + source.tell() - here) + if this == "]" and set: + break + elif this[0] == "\\": + code1 = _class_escape(source, this) + else: + if set and this in '-&~|' and source.next == this: + import warnings + warnings.warn( + 'Possible set %s at position %d' % ( + 'difference' if this == '-' else + 'intersection' if this == '&' else + 'symmetric difference' if this == '~' else + 'union', + source.tell() - 1), + FutureWarning, stacklevel=nested + 6 + ) + code1 = LITERAL, _ord(this) + if sourcematch("-"): + # potential range + that = sourceget() + if that is None: + raise source.error("unterminated character set", + source.tell() - here) + if that == "]": + if code1[0] is IN: + code1 = code1[1][0] + setappend(code1) + setappend((LITERAL, _ord("-"))) + break + if that[0] == "\\": + code2 = _class_escape(source, that) + else: + if that == '-': + import warnings + warnings.warn( + 'Possible set difference at position %d' % ( + source.tell() - 2), + FutureWarning, stacklevel=nested + 6 + ) + code2 = LITERAL, _ord(that) + if code1[0] != LITERAL or code2[0] != LITERAL: + msg = "bad character range %s-%s" % (this, that) + raise source.error(msg, len(this) + 1 + len(that)) + lo = code1[1] + hi = code2[1] + if hi < lo: + msg = "bad character range %s-%s" % (this, that) + raise source.error(msg, len(this) + 1 + len(that)) + setappend((RANGE, (lo, hi))) + else: + if code1[0] is IN: + code1 = code1[1][0] + setappend(code1) + + set = _uniq(set) + # XXX: should move set optimization to compiler! + if _len(set) == 1 and set[0][0] is LITERAL: + # optimization + if negate: + subpatternappend((NOT_LITERAL, set[0][1])) + else: + subpatternappend(set[0]) + else: + if negate: + set.insert(0, (NEGATE, None)) + # charmap optimization can't be added here because + # global flags still are not known + subpatternappend((IN, set)) + + elif this in REPEAT_CHARS: + # repeat previous item + here = source.tell() + if this == "?": + min, max = 0, 1 + elif this == "*": + min, max = 0, MAXREPEAT + + elif this == "+": + min, max = 1, MAXREPEAT + elif this == "{": + if source.next == "}": + subpatternappend((LITERAL, _ord(this))) + continue + + min, max = 0, MAXREPEAT + lo = hi = "" + while source.next in DIGITS: + lo += sourceget() + if sourcematch(","): + while source.next in DIGITS: + hi += sourceget() + else: + hi = lo + if not sourcematch("}"): + subpatternappend((LITERAL, _ord(this))) + source.seek(here) + continue + + if lo: + min = int(lo) + if min >= MAXREPEAT: + raise OverflowError("the repetition number is too large") + if hi: + max = int(hi) + if max >= MAXREPEAT: + raise OverflowError("the repetition number is too large") + if max < min: + raise source.error("min repeat greater than max repeat", + source.tell() - here) + else: + raise AssertionError("unsupported quantifier %r" % (char,)) + # figure out which item to repeat + if subpattern: + item = subpattern[-1:] + else: + item = None + if not item or item[0][0] is AT: + raise source.error("nothing to repeat", + source.tell() - here + len(this)) + if item[0][0] in _REPEATCODES: + raise source.error("multiple repeat", + source.tell() - here + len(this)) + if item[0][0] is SUBPATTERN: + group, add_flags, del_flags, p = item[0][1] + if group is None and not add_flags and not del_flags: + item = p + if sourcematch("?"): + # Non-Greedy Match + subpattern[-1] = (MIN_REPEAT, (min, max, item)) + elif sourcematch("+"): + # Possessive Match (Always Greedy) + subpattern[-1] = (POSSESSIVE_REPEAT, (min, max, item)) + else: + # Greedy Match + subpattern[-1] = (MAX_REPEAT, (min, max, item)) + + elif this == ".": + subpatternappend((ANY, None)) + + elif this == "(": + start = source.tell() - 1 + capture = True + atomic = False + name = None + add_flags = 0 + del_flags = 0 + if sourcematch("?"): + # options + char = sourceget() + if char is None: + raise source.error("unexpected end of pattern") + if char == "P": + # python extensions + if sourcematch("<"): + # named group: skip forward to end of name + name = source.getuntil(">", "group name") + source.checkgroupname(name, 1) + elif sourcematch("="): + # named backreference + name = source.getuntil(")", "group name") + source.checkgroupname(name, 1) + gid = state.groupdict.get(name) + if gid is None: + msg = "unknown group name %r" % name + raise source.error(msg, len(name) + 1) + if not state.checkgroup(gid): + raise source.error("cannot refer to an open group", + len(name) + 1) + state.checklookbehindgroup(gid, source) + subpatternappend((GROUPREF, gid)) + continue + + else: + char = sourceget() + if char is None: + raise source.error("unexpected end of pattern") + raise source.error("unknown extension ?P" + char, + len(char) + 2) + elif char == ":": + # non-capturing group + capture = False + elif char == "#": + # comment + while True: + if source.next is None: + raise source.error("missing ), unterminated comment", + source.tell() - start) + if sourceget() == ")": + break + continue + + elif char in "=!<": + # lookahead assertions + dir = 1 + if char == "<": + char = sourceget() + if char is None: + raise source.error("unexpected end of pattern") + if char not in "=!": + raise source.error("unknown extension ?<" + char, + len(char) + 2) + dir = -1 # lookbehind + lookbehindgroups = state.lookbehindgroups + if lookbehindgroups is None: + state.lookbehindgroups = state.groups + p = _parse_sub(source, state, verbose, nested + 1) + if dir < 0: + if lookbehindgroups is None: + state.lookbehindgroups = None + if not sourcematch(")"): + raise source.error("missing ), unterminated subpattern", + source.tell() - start) + if char == "=": + subpatternappend((ASSERT, (dir, p))) + elif p: + subpatternappend((ASSERT_NOT, (dir, p))) + else: + subpatternappend((FAILURE, ())) + continue + + elif char == "(": + # conditional backreference group + condname = source.getuntil(")", "group name") + if not (condname.isdecimal() and condname.isascii()): + source.checkgroupname(condname, 1) + condgroup = state.groupdict.get(condname) + if condgroup is None: + msg = "unknown group name %r" % condname + raise source.error(msg, len(condname) + 1) + else: + condgroup = int(condname) + if not condgroup: + raise source.error("bad group number", + len(condname) + 1) + if condgroup >= MAXGROUPS: + msg = "invalid group reference %d" % condgroup + raise source.error(msg, len(condname) + 1) + if condgroup not in state.grouprefpos: + state.grouprefpos[condgroup] = ( + source.tell() - len(condname) - 1 + ) + state.checklookbehindgroup(condgroup, source) + item_yes = _parse(source, state, verbose, nested + 1) + if source.match("|"): + item_no = _parse(source, state, verbose, nested + 1) + if source.next == "|": + raise source.error("conditional backref with more than two branches") + else: + item_no = None + if not source.match(")"): + raise source.error("missing ), unterminated subpattern", + source.tell() - start) + subpatternappend((GROUPREF_EXISTS, (condgroup, item_yes, item_no))) + continue + + elif char == ">": + # non-capturing, atomic group + capture = False + atomic = True + elif char in FLAGS or char == "-": + # flags + flags = _parse_flags(source, state, char) + if flags is None: # global flags + if not first or subpattern: + raise source.error('global flags not at the start ' + 'of the expression', + source.tell() - start) + verbose = state.flags & SRE_FLAG_VERBOSE + continue + + add_flags, del_flags = flags + capture = False + else: + raise source.error("unknown extension ?" + char, + len(char) + 1) + + # parse group contents + if capture: + try: + group = state.opengroup(name) + except error as err: + raise source.error(err.msg, len(name) + 1) from None + else: + group = None + sub_verbose = ((verbose or (add_flags & SRE_FLAG_VERBOSE)) and + not (del_flags & SRE_FLAG_VERBOSE)) + p = _parse_sub(source, state, sub_verbose, nested + 1) + if not source.match(")"): + raise source.error("missing ), unterminated subpattern", + source.tell() - start) + if group is not None: + state.closegroup(group, p) + if atomic: + assert group is None + subpatternappend((ATOMIC_GROUP, p)) + else: + subpatternappend((SUBPATTERN, (group, add_flags, del_flags, p))) + + elif this == "^": + subpatternappend((AT, AT_BEGINNING)) + + elif this == "$": + subpatternappend((AT, AT_END)) + + else: + raise AssertionError("unsupported special character %r" % (char,)) + + # unpack non-capturing groups + for i in range(len(subpattern))[::-1]: + op, av = subpattern[i] + if op is SUBPATTERN: + group, add_flags, del_flags, p = av + if group is None and not add_flags and not del_flags: + subpattern[i: i+1] = p + + return subpattern + +def _parse_flags(source, state, char): + sourceget = source.get + add_flags = 0 + del_flags = 0 + if char != "-": + while True: + flag = FLAGS[char] + if source.istext: + if char == 'L': + msg = "bad inline flags: cannot use 'L' flag with a str pattern" + raise source.error(msg) + else: + if char == 'u': + msg = "bad inline flags: cannot use 'u' flag with a bytes pattern" + raise source.error(msg) + add_flags |= flag + if (flag & TYPE_FLAGS) and (add_flags & TYPE_FLAGS) != flag: + msg = "bad inline flags: flags 'a', 'u' and 'L' are incompatible" + raise source.error(msg) + char = sourceget() + if char is None: + raise source.error("missing -, : or )") + if char in ")-:": + break + if char not in FLAGS: + msg = "unknown flag" if char.isalpha() else "missing -, : or )" + raise source.error(msg, len(char)) + if char == ")": + state.flags |= add_flags + return None + if add_flags & GLOBAL_FLAGS: + raise source.error("bad inline flags: cannot turn on global flag", 1) + if char == "-": + char = sourceget() + if char is None: + raise source.error("missing flag") + if char not in FLAGS: + msg = "unknown flag" if char.isalpha() else "missing flag" + raise source.error(msg, len(char)) + while True: + flag = FLAGS[char] + if flag & TYPE_FLAGS: + msg = "bad inline flags: cannot turn off flags 'a', 'u' and 'L'" + raise source.error(msg) + del_flags |= flag + char = sourceget() + if char is None: + raise source.error("missing :") + if char == ":": + break + if char not in FLAGS: + msg = "unknown flag" if char.isalpha() else "missing :" + raise source.error(msg, len(char)) + assert char == ":" + if del_flags & GLOBAL_FLAGS: + raise source.error("bad inline flags: cannot turn off global flag", 1) + if add_flags & del_flags: + raise source.error("bad inline flags: flag turned on and off", 1) + return add_flags, del_flags + +def fix_flags(src, flags): + # Check and fix flags according to the type of pattern (str or bytes) + if isinstance(src, str): + if flags & SRE_FLAG_LOCALE: + raise ValueError("cannot use LOCALE flag with a str pattern") + if not flags & SRE_FLAG_ASCII: + flags |= SRE_FLAG_UNICODE + elif flags & SRE_FLAG_UNICODE: + raise ValueError("ASCII and UNICODE flags are incompatible") + else: + if flags & SRE_FLAG_UNICODE: + raise ValueError("cannot use UNICODE flag with a bytes pattern") + if flags & SRE_FLAG_LOCALE and flags & SRE_FLAG_ASCII: + raise ValueError("ASCII and LOCALE flags are incompatible") + return flags + +def parse(str, flags=0, state=None): + # parse 're' pattern into list of (opcode, argument) tuples + + source = Tokenizer(str) + + if state is None: + state = State() + state.flags = flags + state.str = str + + p = _parse_sub(source, state, flags & SRE_FLAG_VERBOSE, 0) + p.state.flags = fix_flags(str, p.state.flags) + + if source.next is not None: + assert source.next == ")" + raise source.error("unbalanced parenthesis") + + for g in p.state.grouprefpos: + if g >= p.state.groups: + msg = "invalid group reference %d" % g + raise error(msg, str, p.state.grouprefpos[g]) + + if flags & SRE_FLAG_DEBUG: + p.dump() + + return p + +def parse_template(source, pattern): + # parse 're' replacement string into list of literals and + # group references + s = Tokenizer(source) + sget = s.get + result = [] + literal = [] + lappend = literal.append + def addliteral(): + if s.istext: + result.append(''.join(literal)) + else: + # The tokenizer implicitly decodes bytes objects as latin-1, we must + # therefore re-encode the final representation. + result.append(''.join(literal).encode('latin-1')) + del literal[:] + def addgroup(index, pos): + if index > pattern.groups: + raise s.error("invalid group reference %d" % index, pos) + addliteral() + result.append(index) + groupindex = pattern.groupindex + while True: + this = sget() + if this is None: + break # end of replacement string + if this[0] == "\\": + # group + c = this[1] + if c == "g": + if not s.match("<"): + raise s.error("missing <") + name = s.getuntil(">", "group name") + if not (name.isdecimal() and name.isascii()): + s.checkgroupname(name, 1) + try: + index = groupindex[name] + except KeyError: + raise IndexError("unknown group name %r" % name) from None + else: + index = int(name) + if index >= MAXGROUPS: + raise s.error("invalid group reference %d" % index, + len(name) + 1) + addgroup(index, len(name) + 1) + elif c == "0": + if s.next in OCTDIGITS: + this += sget() + if s.next in OCTDIGITS: + this += sget() + lappend(chr(int(this[1:], 8) & 0xff)) + elif c in DIGITS: + isoctal = False + if s.next in DIGITS: + this += sget() + if (c in OCTDIGITS and this[2] in OCTDIGITS and + s.next in OCTDIGITS): + this += sget() + isoctal = True + c = int(this[1:], 8) + if c > 0o377: + raise s.error('octal escape value %s outside of ' + 'range 0-0o377' % this, len(this)) + lappend(chr(c)) + if not isoctal: + addgroup(int(this[1:]), len(this) - 1) + else: + try: + this = chr(ESCAPES[this][1]) + except KeyError: + if c in ASCIILETTERS: + raise s.error('bad escape %s' % this, len(this)) from None + lappend(this) + else: + lappend(this) + addliteral() + return result diff --git a/Python314_4_x64_Template/Lib/reprlib.py b/Python314_4_x64_Template/Lib/reprlib.py new file mode 100644 index 00000000..ab182476 --- /dev/null +++ b/Python314_4_x64_Template/Lib/reprlib.py @@ -0,0 +1,230 @@ +"""Redo the builtin repr() (representation) but with limits on most sizes.""" + +__all__ = ["Repr", "repr", "recursive_repr"] + +import builtins +from itertools import islice +from _thread import get_ident + +def recursive_repr(fillvalue='...'): + 'Decorator to make a repr function return fillvalue for a recursive call' + + def decorating_function(user_function): + repr_running = set() + + def wrapper(self): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + + # Can't use functools.wraps() here because of bootstrap issues + wrapper.__module__ = getattr(user_function, '__module__') + wrapper.__doc__ = getattr(user_function, '__doc__') + wrapper.__name__ = getattr(user_function, '__name__') + wrapper.__qualname__ = getattr(user_function, '__qualname__') + wrapper.__annotate__ = getattr(user_function, '__annotate__', None) + wrapper.__type_params__ = getattr(user_function, '__type_params__', ()) + wrapper.__wrapped__ = user_function + return wrapper + + return decorating_function + +class Repr: + _lookup = { + 'tuple': 'builtins', + 'list': 'builtins', + 'array': 'array', + 'set': 'builtins', + 'frozenset': 'builtins', + 'deque': 'collections', + 'dict': 'builtins', + 'str': 'builtins', + 'int': 'builtins' + } + + def __init__( + self, *, maxlevel=6, maxtuple=6, maxlist=6, maxarray=5, maxdict=4, + maxset=6, maxfrozenset=6, maxdeque=6, maxstring=30, maxlong=40, + maxother=30, fillvalue='...', indent=None, + ): + self.maxlevel = maxlevel + self.maxtuple = maxtuple + self.maxlist = maxlist + self.maxarray = maxarray + self.maxdict = maxdict + self.maxset = maxset + self.maxfrozenset = maxfrozenset + self.maxdeque = maxdeque + self.maxstring = maxstring + self.maxlong = maxlong + self.maxother = maxother + self.fillvalue = fillvalue + self.indent = indent + + def repr(self, x): + return self.repr1(x, self.maxlevel) + + def repr1(self, x, level): + cls = type(x) + typename = cls.__name__ + + if ' ' in typename: + parts = typename.split() + typename = '_'.join(parts) + + method = getattr(self, 'repr_' + typename, None) + if method: + # not defined in this class + if typename not in self._lookup: + return method(x, level) + module = getattr(cls, '__module__', None) + # defined in this class and is the module intended + if module == self._lookup[typename]: + return method(x, level) + + return self.repr_instance(x, level) + + def _join(self, pieces, level): + if self.indent is None: + return ', '.join(pieces) + if not pieces: + return '' + indent = self.indent + if isinstance(indent, int): + if indent < 0: + raise ValueError( + f'Repr.indent cannot be negative int (was {indent!r})' + ) + indent *= ' ' + try: + sep = ',\n' + (self.maxlevel - level + 1) * indent + except TypeError as error: + raise TypeError( + f'Repr.indent must be a str, int or None, not {type(indent)}' + ) from error + return sep.join(('', *pieces, ''))[1:-len(indent) or None] + + def _repr_iterable(self, x, level, left, right, maxiter, trail=''): + n = len(x) + if level <= 0 and n: + s = self.fillvalue + else: + newlevel = level - 1 + repr1 = self.repr1 + pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)] + if n > maxiter: + pieces.append(self.fillvalue) + s = self._join(pieces, level) + if n == 1 and trail and self.indent is None: + right = trail + right + return '%s%s%s' % (left, s, right) + + def repr_tuple(self, x, level): + return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',') + + def repr_list(self, x, level): + return self._repr_iterable(x, level, '[', ']', self.maxlist) + + def repr_array(self, x, level): + if not x: + return "array('%s')" % x.typecode + header = "array('%s', [" % x.typecode + return self._repr_iterable(x, level, header, '])', self.maxarray) + + def repr_set(self, x, level): + if not x: + return 'set()' + x = _possibly_sorted(x) + return self._repr_iterable(x, level, '{', '}', self.maxset) + + def repr_frozenset(self, x, level): + if not x: + return 'frozenset()' + x = _possibly_sorted(x) + return self._repr_iterable(x, level, 'frozenset({', '})', + self.maxfrozenset) + + def repr_deque(self, x, level): + return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque) + + def repr_dict(self, x, level): + n = len(x) + if n == 0: + return '{}' + if level <= 0: + return '{' + self.fillvalue + '}' + newlevel = level - 1 + repr1 = self.repr1 + pieces = [] + for key in islice(_possibly_sorted(x), self.maxdict): + keyrepr = repr1(key, newlevel) + valrepr = repr1(x[key], newlevel) + pieces.append('%s: %s' % (keyrepr, valrepr)) + if n > self.maxdict: + pieces.append(self.fillvalue) + s = self._join(pieces, level) + return '{%s}' % (s,) + + def repr_str(self, x, level): + s = builtins.repr(x[:self.maxstring]) + if len(s) > self.maxstring: + i = max(0, (self.maxstring-3)//2) + j = max(0, self.maxstring-3-i) + s = builtins.repr(x[:i] + x[len(x)-j:]) + s = s[:i] + self.fillvalue + s[len(s)-j:] + return s + + def repr_int(self, x, level): + try: + s = builtins.repr(x) + except ValueError as exc: + assert 'sys.set_int_max_str_digits()' in str(exc) + # Those imports must be deferred due to Python's build system + # where the reprlib module is imported before the math module. + import math, sys + # Integers with more than sys.get_int_max_str_digits() digits + # are rendered differently as their repr() raises a ValueError. + # See https://github.com/python/cpython/issues/135487. + k = 1 + int(math.log10(abs(x))) + # Note: math.log10(abs(x)) may be overestimated or underestimated, + # but for simplicity, we do not compute the exact number of digits. + max_digits = sys.get_int_max_str_digits() + return (f'<{x.__class__.__name__} instance with roughly {k} ' + f'digits (limit at {max_digits}) at 0x{id(x):x}>') + if len(s) > self.maxlong: + i = max(0, (self.maxlong-3)//2) + j = max(0, self.maxlong-3-i) + s = s[:i] + self.fillvalue + s[len(s)-j:] + return s + + def repr_instance(self, x, level): + try: + s = builtins.repr(x) + # Bugs in x.__repr__() can cause arbitrary + # exceptions -- then make up something + except Exception: + return '<%s instance at %#x>' % (x.__class__.__name__, id(x)) + if len(s) > self.maxother: + i = max(0, (self.maxother-3)//2) + j = max(0, self.maxother-3-i) + s = s[:i] + self.fillvalue + s[len(s)-j:] + return s + + +def _possibly_sorted(x): + # Since not all sequences of items can be sorted and comparison + # functions may raise arbitrary exceptions, return an unsorted + # sequence in that case. + try: + return sorted(x) + except Exception: + return list(x) + +aRepr = Repr() +repr = aRepr.repr diff --git a/Python313_13_x64_Template/Lib/rlcompleter.py b/Python314_4_x64_Template/Lib/rlcompleter.py similarity index 100% rename from Python313_13_x64_Template/Lib/rlcompleter.py rename to Python314_4_x64_Template/Lib/rlcompleter.py diff --git a/Python313_13_x64_Template/Lib/runpy.py b/Python314_4_x64_Template/Lib/runpy.py similarity index 100% rename from Python313_13_x64_Template/Lib/runpy.py rename to Python314_4_x64_Template/Lib/runpy.py diff --git a/Python313_13_x64_Template/Lib/sched.py b/Python314_4_x64_Template/Lib/sched.py similarity index 100% rename from Python313_13_x64_Template/Lib/sched.py rename to Python314_4_x64_Template/Lib/sched.py diff --git a/Python313_13_x64_Template/Lib/secrets.py b/Python314_4_x64_Template/Lib/secrets.py similarity index 100% rename from Python313_13_x64_Template/Lib/secrets.py rename to Python314_4_x64_Template/Lib/secrets.py diff --git a/Python313_13_x64_Template/Lib/selectors.py b/Python314_4_x64_Template/Lib/selectors.py similarity index 100% rename from Python313_13_x64_Template/Lib/selectors.py rename to Python314_4_x64_Template/Lib/selectors.py diff --git a/Python313_13_x64_Template/Lib/shelve.py b/Python314_4_x64_Template/Lib/shelve.py similarity index 100% rename from Python313_13_x64_Template/Lib/shelve.py rename to Python314_4_x64_Template/Lib/shelve.py diff --git a/Python314_4_x64_Template/Lib/shlex.py b/Python314_4_x64_Template/Lib/shlex.py new file mode 100644 index 00000000..5959f52d --- /dev/null +++ b/Python314_4_x64_Template/Lib/shlex.py @@ -0,0 +1,351 @@ +"""A lexical analyzer class for simple shell-like syntaxes.""" + +# Module and documentation by Eric S. Raymond, 21 Dec 1998 +# Input stacking and error message cleanup added by ESR, March 2000 +# push_source() and pop_source() made explicit by ESR, January 2001. +# Posix compliance, split(), string arguments, and +# iterator interface by Gustavo Niemeyer, April 2003. +# changes to tokenize more like Posix shells by Vinay Sajip, July 2016. + +import sys +from io import StringIO + +__all__ = ["shlex", "split", "quote", "join"] + +class shlex: + "A lexical analyzer class for simple shell-like syntaxes." + def __init__(self, instream=None, infile=None, posix=False, + punctuation_chars=False): + from collections import deque # deferred import for performance + + if isinstance(instream, str): + instream = StringIO(instream) + if instream is not None: + self.instream = instream + self.infile = infile + else: + self.instream = sys.stdin + self.infile = None + self.posix = posix + if posix: + self.eof = None + else: + self.eof = '' + self.commenters = '#' + self.wordchars = ('abcdfeghijklmnopqrstuvwxyz' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_') + if self.posix: + self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ' + 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ') + self.whitespace = ' \t\r\n' + self.whitespace_split = False + self.quotes = '\'"' + self.escape = '\\' + self.escapedquotes = '"' + self.state = ' ' + self.pushback = deque() + self.lineno = 1 + self.debug = 0 + self.token = '' + self.filestack = deque() + self.source = None + if not punctuation_chars: + punctuation_chars = '' + elif punctuation_chars is True: + punctuation_chars = '();<>|&' + self._punctuation_chars = punctuation_chars + if punctuation_chars: + # _pushback_chars is a push back queue used by lookahead logic + self._pushback_chars = deque() + # these chars added because allowed in file names, args, wildcards + self.wordchars += '~-./*?=' + #remove any punctuation chars from wordchars + t = self.wordchars.maketrans(dict.fromkeys(punctuation_chars)) + self.wordchars = self.wordchars.translate(t) + + @property + def punctuation_chars(self): + return self._punctuation_chars + + def push_token(self, tok): + "Push a token onto the stack popped by the get_token method" + if self.debug >= 1: + print("shlex: pushing token " + repr(tok)) + self.pushback.appendleft(tok) + + def push_source(self, newstream, newfile=None): + "Push an input source onto the lexer's input source stack." + if isinstance(newstream, str): + newstream = StringIO(newstream) + self.filestack.appendleft((self.infile, self.instream, self.lineno)) + self.infile = newfile + self.instream = newstream + self.lineno = 1 + if self.debug: + if newfile is not None: + print('shlex: pushing to file %s' % (self.infile,)) + else: + print('shlex: pushing to stream %s' % (self.instream,)) + + def pop_source(self): + "Pop the input source stack." + self.instream.close() + (self.infile, self.instream, self.lineno) = self.filestack.popleft() + if self.debug: + print('shlex: popping to %s, line %d' \ + % (self.instream, self.lineno)) + self.state = ' ' + + def get_token(self): + "Get a token from the input stream (or from stack if it's nonempty)" + if self.pushback: + tok = self.pushback.popleft() + if self.debug >= 1: + print("shlex: popping token " + repr(tok)) + return tok + # No pushback. Get a token. + raw = self.read_token() + # Handle inclusions + if self.source is not None: + while raw == self.source: + spec = self.sourcehook(self.read_token()) + if spec: + (newfile, newstream) = spec + self.push_source(newstream, newfile) + raw = self.get_token() + # Maybe we got EOF instead? + while raw == self.eof: + if not self.filestack: + return self.eof + else: + self.pop_source() + raw = self.get_token() + # Neither inclusion nor EOF + if self.debug >= 1: + if raw != self.eof: + print("shlex: token=" + repr(raw)) + else: + print("shlex: token=EOF") + return raw + + def read_token(self): + quoted = False + escapedstate = ' ' + while True: + if self.punctuation_chars and self._pushback_chars: + nextchar = self._pushback_chars.pop() + else: + nextchar = self.instream.read(1) + if nextchar == '\n': + self.lineno += 1 + if self.debug >= 3: + print("shlex: in state %r I see character: %r" % (self.state, + nextchar)) + if self.state is None: + self.token = '' # past end of file + break + elif self.state == ' ': + if not nextchar: + self.state = None # end of file + break + elif nextchar in self.whitespace: + if self.debug >= 2: + print("shlex: I see whitespace in whitespace state") + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + elif nextchar in self.commenters: + self.instream.readline() + self.lineno += 1 + elif self.posix and nextchar in self.escape: + escapedstate = 'a' + self.state = nextchar + elif nextchar in self.wordchars: + self.token = nextchar + self.state = 'a' + elif nextchar in self.punctuation_chars: + self.token = nextchar + self.state = 'c' + elif nextchar in self.quotes: + if not self.posix: + self.token = nextchar + self.state = nextchar + elif self.whitespace_split: + self.token = nextchar + self.state = 'a' + else: + self.token = nextchar + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + elif self.state in self.quotes: + quoted = True + if not nextchar: # end of file + if self.debug >= 2: + print("shlex: I see EOF in quotes state") + # XXX what error should be raised here? + raise ValueError("No closing quotation") + if nextchar == self.state: + if not self.posix: + self.token += nextchar + self.state = ' ' + break + else: + self.state = 'a' + elif (self.posix and nextchar in self.escape and self.state + in self.escapedquotes): + escapedstate = self.state + self.state = nextchar + else: + self.token += nextchar + elif self.state in self.escape: + if not nextchar: # end of file + if self.debug >= 2: + print("shlex: I see EOF in escape state") + # XXX what error should be raised here? + raise ValueError("No escaped character") + # In posix shells, only the quote itself or the escape + # character may be escaped within quotes. + if (escapedstate in self.quotes and + nextchar != self.state and nextchar != escapedstate): + self.token += self.state + self.token += nextchar + self.state = escapedstate + elif self.state in ('a', 'c'): + if not nextchar: + self.state = None # end of file + break + elif nextchar in self.whitespace: + if self.debug >= 2: + print("shlex: I see whitespace in word state") + self.state = ' ' + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + elif nextchar in self.commenters: + self.instream.readline() + self.lineno += 1 + if self.posix: + self.state = ' ' + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + elif self.state == 'c': + if nextchar in self.punctuation_chars: + self.token += nextchar + else: + if nextchar not in self.whitespace: + self._pushback_chars.append(nextchar) + self.state = ' ' + break + elif self.posix and nextchar in self.quotes: + self.state = nextchar + elif self.posix and nextchar in self.escape: + escapedstate = 'a' + self.state = nextchar + elif (nextchar in self.wordchars or nextchar in self.quotes + or (self.whitespace_split and + nextchar not in self.punctuation_chars)): + self.token += nextchar + else: + if self.punctuation_chars: + self._pushback_chars.append(nextchar) + else: + self.pushback.appendleft(nextchar) + if self.debug >= 2: + print("shlex: I see punctuation in word state") + self.state = ' ' + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + result = self.token + self.token = '' + if self.posix and not quoted and result == '': + result = None + if self.debug > 1: + if result: + print("shlex: raw token=" + repr(result)) + else: + print("shlex: raw token=EOF") + return result + + def sourcehook(self, newfile): + "Hook called on a filename to be sourced." + import os.path + if newfile[0] == '"': + newfile = newfile[1:-1] + # This implements cpp-like semantics for relative-path inclusion. + if isinstance(self.infile, str) and not os.path.isabs(newfile): + newfile = os.path.join(os.path.dirname(self.infile), newfile) + return (newfile, open(newfile, "r")) + + def error_leader(self, infile=None, lineno=None): + "Emit a C-compiler-like, Emacs-friendly error-message leader." + if infile is None: + infile = self.infile + if lineno is None: + lineno = self.lineno + return "\"%s\", line %d: " % (infile, lineno) + + def __iter__(self): + return self + + def __next__(self): + token = self.get_token() + if token == self.eof: + raise StopIteration + return token + +def split(s, comments=False, posix=True): + """Split the string *s* using shell-like syntax.""" + if s is None: + raise ValueError("s argument must not be None") + lex = shlex(s, posix=posix) + lex.whitespace_split = True + if not comments: + lex.commenters = '' + return list(lex) + + +def join(split_command): + """Return a shell-escaped string from *split_command*.""" + return ' '.join(quote(arg) for arg in split_command) + + +def quote(s): + """Return a shell-escaped version of the string *s*.""" + if not s: + return "''" + + if not isinstance(s, str): + raise TypeError(f"expected string object, got {type(s).__name__!r}") + + # Use bytes.translate() for performance + safe_chars = (b'%+,-./0123456789:=@' + b'ABCDEFGHIJKLMNOPQRSTUVWXYZ_' + b'abcdefghijklmnopqrstuvwxyz') + # No quoting is needed if `s` is an ASCII string consisting only of `safe_chars` + if s.isascii() and not s.encode().translate(None, delete=safe_chars): + return s + + # use single quotes, and put single quotes into double quotes + # the string $'b is then quoted as '$'"'"'b' + return "'" + s.replace("'", "'\"'\"'") + "'" + + +def _print_tokens(lexer): + while tt := lexer.get_token(): + print("Token: " + repr(tt)) + +if __name__ == '__main__': + if len(sys.argv) == 1: + _print_tokens(shlex()) + else: + fn = sys.argv[1] + with open(fn) as f: + _print_tokens(shlex(f, fn)) diff --git a/Python314_4_x64_Template/Lib/shutil.py b/Python314_4_x64_Template/Lib/shutil.py new file mode 100644 index 00000000..8d8fe145 --- /dev/null +++ b/Python314_4_x64_Template/Lib/shutil.py @@ -0,0 +1,1667 @@ +"""Utility functions for copying and archiving files and directory trees. + +XXX The functions here don't copy the resource fork or other metadata on Mac. + +""" + +import os +import sys +import stat +import fnmatch +import collections +import errno + +try: + import zlib + del zlib + _ZLIB_SUPPORTED = True +except ImportError: + _ZLIB_SUPPORTED = False + +try: + import bz2 + del bz2 + _BZ2_SUPPORTED = True +except ImportError: + _BZ2_SUPPORTED = False + +try: + import lzma + del lzma + _LZMA_SUPPORTED = True +except ImportError: + _LZMA_SUPPORTED = False + +try: + from compression import zstd + del zstd + _ZSTD_SUPPORTED = True +except ImportError: + _ZSTD_SUPPORTED = False + +_WINDOWS = os.name == 'nt' +posix = nt = None +if os.name == 'posix': + import posix +elif _WINDOWS: + import nt + +if sys.platform == 'win32': + import _winapi +else: + _winapi = None + +COPY_BUFSIZE = 1024 * 1024 if _WINDOWS else 256 * 1024 +# This should never be removed, see rationale in: +# https://bugs.python.org/issue43743#msg393429 +_USE_CP_SENDFILE = (hasattr(os, "sendfile") + and sys.platform.startswith(("linux", "android", "sunos"))) +_USE_CP_COPY_FILE_RANGE = hasattr(os, "copy_file_range") +_HAS_FCOPYFILE = posix and hasattr(posix, "_fcopyfile") # macOS + +# CMD defaults in Windows 10 +_WIN_DEFAULT_PATHEXT = ".COM;.EXE;.BAT;.CMD;.VBS;.JS;.WS;.MSC" + +__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", + "copytree", "move", "rmtree", "Error", "SpecialFileError", + "make_archive", "get_archive_formats", + "register_archive_format", "unregister_archive_format", + "get_unpack_formats", "register_unpack_format", + "unregister_unpack_format", "unpack_archive", + "ignore_patterns", "chown", "which", "get_terminal_size", + "SameFileError"] + # disk_usage is added later, if available on the platform + +class Error(OSError): + pass + +class SameFileError(Error): + """Raised when source and destination are the same file.""" + +class SpecialFileError(OSError): + """Raised when trying to do a kind of operation (e.g. copying) which is + not supported on a special file (e.g. a named pipe)""" + + +class ReadError(OSError): + """Raised when an archive cannot be read""" + +class RegistryError(Exception): + """Raised when a registry operation with the archiving + and unpacking registries fails""" + +class _GiveupOnFastCopy(Exception): + """Raised as a signal to fallback on using raw read()/write() + file copy when fast-copy functions fail to do so. + """ + +def _fastcopy_fcopyfile(fsrc, fdst, flags): + """Copy a regular file content or metadata by using high-performance + fcopyfile(3) syscall (macOS). + """ + try: + infd = fsrc.fileno() + outfd = fdst.fileno() + except Exception as err: + raise _GiveupOnFastCopy(err) # not a regular file + + try: + posix._fcopyfile(infd, outfd, flags) + except OSError as err: + err.filename = fsrc.name + err.filename2 = fdst.name + if err.errno in {errno.EINVAL, errno.ENOTSUP}: + raise _GiveupOnFastCopy(err) + else: + raise err from None + +def _determine_linux_fastcopy_blocksize(infd): + """Determine blocksize for fastcopying on Linux. + + Hopefully the whole file will be copied in a single call. + The copying itself should be performed in a loop 'till EOF is + reached (0 return) so a blocksize smaller or bigger than the actual + file size should not make any difference, also in case the file + content changes while being copied. + """ + try: + blocksize = max(os.fstat(infd).st_size, 2 ** 23) # min 8 MiB + except OSError: + blocksize = 2 ** 27 # 128 MiB + # On 32-bit architectures truncate to 1 GiB to avoid OverflowError, + # see gh-82500. + if sys.maxsize < 2 ** 32: + blocksize = min(blocksize, 2 ** 30) + return blocksize + +def _fastcopy_copy_file_range(fsrc, fdst): + """Copy data from one regular mmap-like fd to another by using + a high-performance copy_file_range(2) syscall that gives filesystems + an opportunity to implement the use of reflinks or server-side copy. + + This should work on Linux >= 4.5 only. + """ + try: + infd = fsrc.fileno() + outfd = fdst.fileno() + except Exception as err: + raise _GiveupOnFastCopy(err) # not a regular file + + blocksize = _determine_linux_fastcopy_blocksize(infd) + offset = 0 + while True: + try: + n_copied = os.copy_file_range(infd, outfd, blocksize, offset_dst=offset) + except OSError as err: + # ...in oder to have a more informative exception. + err.filename = fsrc.name + err.filename2 = fdst.name + + if err.errno == errno.ENOSPC: # filesystem is full + raise err from None + + # Give up on first call and if no data was copied. + if offset == 0 and os.lseek(outfd, 0, os.SEEK_CUR) == 0: + raise _GiveupOnFastCopy(err) + + raise err + else: + if n_copied == 0: + # If no bytes have been copied yet, copy_file_range + # might silently fail. + # https://lore.kernel.org/linux-fsdevel/20210126233840.GG4626@dread.disaster.area/T/#m05753578c7f7882f6e9ffe01f981bc223edef2b0 + if offset == 0: + raise _GiveupOnFastCopy() + break + offset += n_copied + +def _fastcopy_sendfile(fsrc, fdst): + """Copy data from one regular mmap-like fd to another by using + high-performance sendfile(2) syscall. + This should work on Linux >= 2.6.33, Android and Solaris. + """ + # Note: copyfileobj() is left alone in order to not introduce any + # unexpected breakage. Possible risks by using zero-copy calls + # in copyfileobj() are: + # - fdst cannot be open in "a"(ppend) mode + # - fsrc and fdst may be open in "t"(ext) mode + # - fsrc may be a BufferedReader (which hides unread data in a buffer), + # GzipFile (which decompresses data), HTTPResponse (which decodes + # chunks). + # - possibly others (e.g. encrypted fs/partition?) + global _USE_CP_SENDFILE + try: + infd = fsrc.fileno() + outfd = fdst.fileno() + except Exception as err: + raise _GiveupOnFastCopy(err) # not a regular file + + blocksize = _determine_linux_fastcopy_blocksize(infd) + offset = 0 + while True: + try: + sent = os.sendfile(outfd, infd, offset, blocksize) + except OSError as err: + # ...in order to have a more informative exception. + err.filename = fsrc.name + err.filename2 = fdst.name + + if err.errno == errno.ENOTSOCK: + # sendfile() on this platform (probably Linux < 2.6.33) + # does not support copies between regular files (only + # sockets). + _USE_CP_SENDFILE = False + raise _GiveupOnFastCopy(err) + + if err.errno == errno.ENOSPC: # filesystem is full + raise err from None + + # Give up on first call and if no data was copied. + if offset == 0 and os.lseek(outfd, 0, os.SEEK_CUR) == 0: + raise _GiveupOnFastCopy(err) + + raise err + else: + if sent == 0: + break # EOF + offset += sent + +def _copyfileobj_readinto(fsrc, fdst, length=COPY_BUFSIZE): + """readinto()/memoryview() based variant of copyfileobj(). + *fsrc* must support readinto() method and both files must be + open in binary mode. + """ + # Localize variable access to minimize overhead. + fsrc_readinto = fsrc.readinto + fdst_write = fdst.write + with memoryview(bytearray(length)) as mv: + while True: + n = fsrc_readinto(mv) + if not n: + break + elif n < length: + with mv[:n] as smv: + fdst_write(smv) + break + else: + fdst_write(mv) + +def copyfileobj(fsrc, fdst, length=0): + """copy data from file-like object fsrc to file-like object fdst""" + if not length: + length = COPY_BUFSIZE + # Localize variable access to minimize overhead. + fsrc_read = fsrc.read + fdst_write = fdst.write + while buf := fsrc_read(length): + fdst_write(buf) + +def _samefile(src, dst): + # Macintosh, Unix. + if isinstance(src, os.DirEntry) and hasattr(os.path, 'samestat'): + try: + return os.path.samestat(src.stat(), os.stat(dst)) + except OSError: + return False + + if hasattr(os.path, 'samefile'): + try: + return os.path.samefile(src, dst) + except OSError: + return False + + # All other platforms: check for same pathname. + return (os.path.normcase(os.path.abspath(src)) == + os.path.normcase(os.path.abspath(dst))) + +def _stat(fn): + return fn.stat() if isinstance(fn, os.DirEntry) else os.stat(fn) + +def _islink(fn): + return fn.is_symlink() if isinstance(fn, os.DirEntry) else os.path.islink(fn) + +def copyfile(src, dst, *, follow_symlinks=True): + """Copy data from src to dst in the most efficient way possible. + + If follow_symlinks is not set and src is a symbolic link, a new + symlink will be created instead of copying the file it points to. + + """ + sys.audit("shutil.copyfile", src, dst) + + if _samefile(src, dst): + raise SameFileError("{!r} and {!r} are the same file".format(src, dst)) + + file_size = 0 + for i, fn in enumerate([src, dst]): + try: + st = _stat(fn) + except OSError: + # File most likely does not exist + pass + else: + # XXX What about other special files? (sockets, devices...) + if stat.S_ISFIFO(st.st_mode): + fn = fn.path if isinstance(fn, os.DirEntry) else fn + raise SpecialFileError("`%s` is a named pipe" % fn) + if _WINDOWS and i == 0: + file_size = st.st_size + + if not follow_symlinks and _islink(src): + os.symlink(os.readlink(src), dst) + else: + with open(src, 'rb') as fsrc: + try: + with open(dst, 'wb') as fdst: + # macOS + if _HAS_FCOPYFILE: + try: + _fastcopy_fcopyfile(fsrc, fdst, posix._COPYFILE_DATA) + return dst + except _GiveupOnFastCopy: + pass + # Linux / Android / Solaris + elif _USE_CP_SENDFILE or _USE_CP_COPY_FILE_RANGE: + # reflink may be implicit in copy_file_range. + if _USE_CP_COPY_FILE_RANGE: + try: + _fastcopy_copy_file_range(fsrc, fdst) + return dst + except _GiveupOnFastCopy: + pass + if _USE_CP_SENDFILE: + try: + _fastcopy_sendfile(fsrc, fdst) + return dst + except _GiveupOnFastCopy: + pass + # Windows, see: + # https://github.com/python/cpython/pull/7160#discussion_r195405230 + elif _WINDOWS and file_size > 0: + _copyfileobj_readinto(fsrc, fdst, min(file_size, COPY_BUFSIZE)) + return dst + + copyfileobj(fsrc, fdst) + + # Issue 43219, raise a less confusing exception + except IsADirectoryError as e: + if not os.path.exists(dst): + raise FileNotFoundError(f'Directory does not exist: {dst}') from e + else: + raise + + return dst + +def copymode(src, dst, *, follow_symlinks=True): + """Copy mode bits from src to dst. + + If follow_symlinks is not set, symlinks aren't followed if and only + if both `src` and `dst` are symlinks. If `lchmod` isn't available + (e.g. Linux) this method does nothing. + + """ + sys.audit("shutil.copymode", src, dst) + + if not follow_symlinks and _islink(src) and os.path.islink(dst): + if hasattr(os, 'lchmod'): + stat_func, chmod_func = os.lstat, os.lchmod + else: + return + else: + stat_func = _stat + if os.name == 'nt' and os.path.islink(dst): + def chmod_func(*args): + os.chmod(*args, follow_symlinks=True) + else: + chmod_func = os.chmod + + st = stat_func(src) + chmod_func(dst, stat.S_IMODE(st.st_mode)) + +if hasattr(os, 'listxattr'): + def _copyxattr(src, dst, *, follow_symlinks=True): + """Copy extended filesystem attributes from `src` to `dst`. + + Overwrite existing attributes. + + If `follow_symlinks` is false, symlinks won't be followed. + + """ + + try: + names = os.listxattr(src, follow_symlinks=follow_symlinks) + except OSError as e: + if e.errno not in (errno.ENOTSUP, errno.ENODATA, errno.EINVAL): + raise + return + for name in names: + try: + value = os.getxattr(src, name, follow_symlinks=follow_symlinks) + os.setxattr(dst, name, value, follow_symlinks=follow_symlinks) + except OSError as e: + if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA, + errno.EINVAL, errno.EACCES): + raise +else: + def _copyxattr(*args, **kwargs): + pass + +def copystat(src, dst, *, follow_symlinks=True): + """Copy file metadata + + Copy the permission bits, last access time, last modification time, and + flags from `src` to `dst`. On Linux, copystat() also copies the "extended + attributes" where possible. The file contents, owner, and group are + unaffected. `src` and `dst` are path-like objects or path names given as + strings. + + If the optional flag `follow_symlinks` is not set, symlinks aren't + followed if and only if both `src` and `dst` are symlinks. + """ + sys.audit("shutil.copystat", src, dst) + + def _nop(*args, ns=None, follow_symlinks=None): + pass + + # follow symlinks (aka don't not follow symlinks) + follow = follow_symlinks or not (_islink(src) and os.path.islink(dst)) + if follow: + # use the real function if it exists + def lookup(name): + return getattr(os, name, _nop) + else: + # use the real function only if it exists + # *and* it supports follow_symlinks + def lookup(name): + fn = getattr(os, name, _nop) + if fn in os.supports_follow_symlinks: + return fn + return _nop + + if isinstance(src, os.DirEntry): + st = src.stat(follow_symlinks=follow) + else: + st = lookup("stat")(src, follow_symlinks=follow) + mode = stat.S_IMODE(st.st_mode) + lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns), + follow_symlinks=follow) + # We must copy extended attributes before the file is (potentially) + # chmod()'ed read-only, otherwise setxattr() will error with -EACCES. + _copyxattr(src, dst, follow_symlinks=follow) + try: + lookup("chmod")(dst, mode, follow_symlinks=follow) + except NotImplementedError: + # if we got a NotImplementedError, it's because + # * follow_symlinks=False, + # * lchown() is unavailable, and + # * either + # * fchownat() is unavailable or + # * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW. + # (it returned ENOSUP.) + # therefore we're out of options--we simply cannot chown the + # symlink. give up, suppress the error. + # (which is what shutil always did in this circumstance.) + pass + if hasattr(st, 'st_flags'): + try: + lookup("chflags")(dst, st.st_flags, follow_symlinks=follow) + except OSError as why: + for err in 'EOPNOTSUPP', 'ENOTSUP': + if hasattr(errno, err) and why.errno == getattr(errno, err): + break + else: + raise + +def copy(src, dst, *, follow_symlinks=True): + """Copy data and mode bits ("cp src dst"). Return the file's destination. + + The destination may be a directory. + + If follow_symlinks is false, symlinks won't be followed. This + resembles GNU's "cp -P src dst". + + If source and destination are the same file, a SameFileError will be + raised. + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst, follow_symlinks=follow_symlinks) + copymode(src, dst, follow_symlinks=follow_symlinks) + return dst + +def copy2(src, dst, *, follow_symlinks=True): + """Copy data and metadata. Return the file's destination. + + Metadata is copied with copystat(). Please see the copystat function + for more information. + + The destination may be a directory. + + If follow_symlinks is false, symlinks won't be followed. This + resembles GNU's "cp -P src dst". + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + + if hasattr(_winapi, "CopyFile2"): + src_ = os.fsdecode(src) + dst_ = os.fsdecode(dst) + flags = _winapi.COPY_FILE_ALLOW_DECRYPTED_DESTINATION # for compat + if not follow_symlinks: + flags |= _winapi.COPY_FILE_COPY_SYMLINK + try: + _winapi.CopyFile2(src_, dst_, flags) + return dst + except OSError as exc: + if (exc.winerror == _winapi.ERROR_PRIVILEGE_NOT_HELD + and not follow_symlinks): + # Likely encountered a symlink we aren't allowed to create. + # Fall back on the old code + pass + elif exc.winerror == _winapi.ERROR_ACCESS_DENIED: + # Possibly encountered a hidden or readonly file we can't + # overwrite. Fall back on old code + pass + else: + raise + + copyfile(src, dst, follow_symlinks=follow_symlinks) + copystat(src, dst, follow_symlinks=follow_symlinks) + return dst + +def ignore_patterns(*patterns): + """Function that can be used as copytree() ignore parameter. + + Patterns is a sequence of glob-style patterns + that are used to exclude files""" + def _ignore_patterns(path, names): + ignored_names = [] + for pattern in patterns: + ignored_names.extend(fnmatch.filter(names, pattern)) + return set(ignored_names) + return _ignore_patterns + +def _copytree(entries, src, dst, symlinks, ignore, copy_function, + ignore_dangling_symlinks, dirs_exist_ok=False): + if ignore is not None: + ignored_names = ignore(os.fspath(src), [x.name for x in entries]) + else: + ignored_names = () + + os.makedirs(dst, exist_ok=dirs_exist_ok) + errors = [] + use_srcentry = copy_function is copy2 or copy_function is copy + + for srcentry in entries: + if srcentry.name in ignored_names: + continue + srcname = os.path.join(src, srcentry.name) + dstname = os.path.join(dst, srcentry.name) + srcobj = srcentry if use_srcentry else srcname + try: + is_symlink = srcentry.is_symlink() + if is_symlink and os.name == 'nt': + # Special check for directory junctions, which appear as + # symlinks but we want to recurse. + lstat = srcentry.stat(follow_symlinks=False) + if lstat.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT: + is_symlink = False + if is_symlink: + linkto = os.readlink(srcname) + if symlinks: + # We can't just leave it to `copy_function` because legacy + # code with a custom `copy_function` may rely on copytree + # doing the right thing. + os.symlink(linkto, dstname) + copystat(srcobj, dstname, follow_symlinks=not symlinks) + else: + # ignore dangling symlink if the flag is on + if not os.path.exists(linkto) and ignore_dangling_symlinks: + continue + # otherwise let the copy occur. copy2 will raise an error + if srcentry.is_dir(): + copytree(srcobj, dstname, symlinks, ignore, + copy_function, ignore_dangling_symlinks, + dirs_exist_ok) + else: + copy_function(srcobj, dstname) + elif srcentry.is_dir(): + copytree(srcobj, dstname, symlinks, ignore, copy_function, + ignore_dangling_symlinks, dirs_exist_ok) + else: + # Will raise a SpecialFileError for unsupported file types + copy_function(srcobj, dstname) + # catch the Error from the recursive copytree so that we can + # continue with other files + except Error as err: + errors.extend(err.args[0]) + except OSError as why: + errors.append((srcname, dstname, str(why))) + try: + copystat(src, dst) + except OSError as why: + # Copying file access times may fail on Windows + if getattr(why, 'winerror', None) is None: + errors.append((src, dst, str(why))) + if errors: + raise Error(errors) + return dst + +def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, + ignore_dangling_symlinks=False, dirs_exist_ok=False): + """Recursively copy a directory tree and return the destination directory. + + If exception(s) occur, an Error is raised with a list of reasons. + + If the optional symlinks flag is true, symbolic links in the + source tree result in symbolic links in the destination tree; if + it is false, the contents of the files pointed to by symbolic + links are copied. If the file pointed to by the symlink doesn't + exist, an exception will be added in the list of errors raised in + an Error exception at the end of the copy process. + + You can set the optional ignore_dangling_symlinks flag to true if you + want to silence this exception. Notice that this has no effect on + platforms that don't support os.symlink. + + The optional ignore argument is a callable. If given, it + is called with the `src` parameter, which is the directory + being visited by copytree(), and `names` which is the list of + `src` contents, as returned by os.listdir(): + + callable(src, names) -> ignored_names + + Since copytree() is called recursively, the callable will be + called once for each directory that is copied. It returns a + list of names relative to the `src` directory that should + not be copied. + + The optional copy_function argument is a callable that will be used + to copy each file. It will be called with the source path and the + destination path as arguments. By default, copy2() is used, but any + function that supports the same signature (like copy()) can be used. + + If dirs_exist_ok is false (the default) and `dst` already exists, a + `FileExistsError` is raised. If `dirs_exist_ok` is true, the copying + operation will continue if it encounters existing directories, and files + within the `dst` tree will be overwritten by corresponding files from the + `src` tree. + """ + sys.audit("shutil.copytree", src, dst) + with os.scandir(src) as itr: + entries = list(itr) + return _copytree(entries=entries, src=src, dst=dst, symlinks=symlinks, + ignore=ignore, copy_function=copy_function, + ignore_dangling_symlinks=ignore_dangling_symlinks, + dirs_exist_ok=dirs_exist_ok) + +if hasattr(os.stat_result, 'st_file_attributes'): + def _rmtree_islink(st): + return (stat.S_ISLNK(st.st_mode) or + (st.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT + and st.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT)) +else: + def _rmtree_islink(st): + return stat.S_ISLNK(st.st_mode) + +# version vulnerable to race conditions +def _rmtree_unsafe(path, dir_fd, onexc): + if dir_fd is not None: + raise NotImplementedError("dir_fd unavailable on this platform") + try: + st = os.lstat(path) + except OSError as err: + onexc(os.lstat, path, err) + return + try: + if _rmtree_islink(st): + # symlinks to directories are forbidden, see bug #1669 + raise OSError("Cannot call rmtree on a symbolic link") + except OSError as err: + onexc(os.path.islink, path, err) + # can't continue even if onexc hook returns + return + def onerror(err): + if not isinstance(err, FileNotFoundError): + onexc(os.scandir, err.filename, err) + results = os.walk(path, topdown=False, onerror=onerror, followlinks=os._walk_symlinks_as_files) + for dirpath, dirnames, filenames in results: + for name in dirnames: + fullname = os.path.join(dirpath, name) + try: + os.rmdir(fullname) + except FileNotFoundError: + continue + except OSError as err: + onexc(os.rmdir, fullname, err) + for name in filenames: + fullname = os.path.join(dirpath, name) + try: + os.unlink(fullname) + except FileNotFoundError: + continue + except OSError as err: + onexc(os.unlink, fullname, err) + try: + os.rmdir(path) + except FileNotFoundError: + pass + except OSError as err: + onexc(os.rmdir, path, err) + +# Version using fd-based APIs to protect against races +def _rmtree_safe_fd(path, dir_fd, onexc): + # While the unsafe rmtree works fine on bytes, the fd based does not. + if isinstance(path, bytes): + path = os.fsdecode(path) + stack = [(os.lstat, dir_fd, path, None)] + try: + while stack: + _rmtree_safe_fd_step(stack, onexc) + finally: + # Close any file descriptors still on the stack. + while stack: + func, fd, path, entry = stack.pop() + if func is not os.close: + continue + try: + os.close(fd) + except OSError as err: + onexc(os.close, path, err) + +def _rmtree_safe_fd_step(stack, onexc): + # Each stack item has four elements: + # * func: The first operation to perform: os.lstat, os.close or os.rmdir. + # Walking a directory starts with an os.lstat() to detect symlinks; in + # this case, func is updated before subsequent operations and passed to + # onexc() if an error occurs. + # * dirfd: Open file descriptor, or None if we're processing the top-level + # directory given to rmtree() and the user didn't supply dir_fd. + # * path: Path of file to operate upon. This is passed to onexc() if an + # error occurs. + # * orig_entry: os.DirEntry, or None if we're processing the top-level + # directory given to rmtree(). We used the cached stat() of the entry to + # save a call to os.lstat() when walking subdirectories. + func, dirfd, path, orig_entry = stack.pop() + name = path if orig_entry is None else orig_entry.name + try: + if func is os.close: + os.close(dirfd) + return + if func is os.rmdir: + os.rmdir(name, dir_fd=dirfd) + return + + # Note: To guard against symlink races, we use the standard + # lstat()/open()/fstat() trick. + assert func is os.lstat + if orig_entry is None: + orig_st = os.lstat(name, dir_fd=dirfd) + else: + orig_st = orig_entry.stat(follow_symlinks=False) + + func = os.open # For error reporting. + topfd = os.open(name, os.O_RDONLY | os.O_NONBLOCK, dir_fd=dirfd) + + func = os.path.islink # For error reporting. + try: + if not os.path.samestat(orig_st, os.fstat(topfd)): + # Symlinks to directories are forbidden, see GH-46010. + raise OSError("Cannot call rmtree on a symbolic link") + stack.append((os.rmdir, dirfd, path, orig_entry)) + finally: + stack.append((os.close, topfd, path, orig_entry)) + + func = os.scandir # For error reporting. + with os.scandir(topfd) as scandir_it: + entries = list(scandir_it) + for entry in entries: + fullname = os.path.join(path, entry.name) + try: + if entry.is_dir(follow_symlinks=False): + # Traverse into sub-directory. + stack.append((os.lstat, topfd, fullname, entry)) + continue + except FileNotFoundError: + continue + except OSError: + pass + try: + os.unlink(entry.name, dir_fd=topfd) + except FileNotFoundError: + continue + except OSError as err: + onexc(os.unlink, fullname, err) + except FileNotFoundError as err: + if orig_entry is None or func is os.close: + err.filename = path + onexc(func, path, err) + except OSError as err: + err.filename = path + onexc(func, path, err) + +_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <= + os.supports_dir_fd and + os.scandir in os.supports_fd and + os.stat in os.supports_follow_symlinks) +_rmtree_impl = _rmtree_safe_fd if _use_fd_functions else _rmtree_unsafe + +def rmtree(path, ignore_errors=False, onerror=None, *, onexc=None, dir_fd=None): + """Recursively delete a directory tree. + + If dir_fd is not None, it should be a file descriptor open to a directory; + path will then be relative to that directory. + dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError. + + If ignore_errors is set, errors are ignored; otherwise, if onexc or + onerror is set, it is called to handle the error with arguments (func, + path, exc_info) where func is platform and implementation dependent; + path is the argument to that function that caused it to fail; and + the value of exc_info describes the exception. For onexc it is the + exception instance, and for onerror it is a tuple as returned by + sys.exc_info(). If ignore_errors is false and both onexc and + onerror are None, the exception is reraised. + + onerror is deprecated and only remains for backwards compatibility. + If both onerror and onexc are set, onerror is ignored and onexc is used. + """ + + sys.audit("shutil.rmtree", path, dir_fd) + if ignore_errors: + def onexc(*args): + pass + elif onerror is None and onexc is None: + def onexc(*args): + raise + elif onexc is None: + if onerror is None: + def onexc(*args): + raise + else: + # delegate to onerror + def onexc(*args): + func, path, exc = args + if exc is None: + exc_info = None, None, None + else: + exc_info = type(exc), exc, exc.__traceback__ + return onerror(func, path, exc_info) + + _rmtree_impl(path, dir_fd, onexc) + +# Allow introspection of whether or not the hardening against symlink +# attacks is supported on the current platform +rmtree.avoids_symlink_attacks = _use_fd_functions + +def _basename(path): + """A basename() variant which first strips the trailing slash, if present. + Thus we always get the last component of the path, even for directories. + + path: Union[PathLike, str] + + e.g. + >>> os.path.basename('/bar/foo') + 'foo' + >>> os.path.basename('/bar/foo/') + '' + >>> _basename('/bar/foo/') + 'foo' + """ + path = os.fspath(path) + sep = os.path.sep + (os.path.altsep or '') + return os.path.basename(path.rstrip(sep)) + +def move(src, dst, copy_function=copy2): + """Recursively move a file or directory to another location. This is + similar to the Unix "mv" command. Return the file or directory's + destination. + + If dst is an existing directory or a symlink to a directory, then src is + moved inside that directory. The destination path in that directory must + not already exist. + + If dst already exists but is not a directory, it may be overwritten + depending on os.rename() semantics. + + If the destination is on our current filesystem, then rename() is used. + Otherwise, src is copied to the destination and then removed. Symlinks are + recreated under the new name if os.rename() fails because of cross + filesystem renames. + + The optional `copy_function` argument is a callable that will be used + to copy the source or it will be delegated to `copytree`. + By default, copy2() is used, but any function that supports the same + signature (like copy()) can be used. + + A lot more could be done here... A look at a mv.c shows a lot of + the issues this implementation glosses over. + + """ + sys.audit("shutil.move", src, dst) + real_dst = dst + if os.path.isdir(dst): + if _samefile(src, dst) and not os.path.islink(src): + # We might be on a case insensitive filesystem, + # perform the rename anyway. + os.rename(src, dst) + return + + # Using _basename instead of os.path.basename is important, as we must + # ignore any trailing slash to avoid the basename returning '' + real_dst = os.path.join(dst, _basename(src)) + + if os.path.exists(real_dst): + raise Error("Destination path '%s' already exists" % real_dst) + try: + os.rename(src, real_dst) + except OSError: + if os.path.islink(src): + linkto = os.readlink(src) + os.symlink(linkto, real_dst) + os.unlink(src) + elif os.path.isdir(src): + if _destinsrc(src, dst): + raise Error("Cannot move a directory '%s' into itself" + " '%s'." % (src, dst)) + if (_is_immutable(src) + or (not os.access(src, os.W_OK) and os.listdir(src) + and sys.platform == 'darwin')): + raise PermissionError("Cannot move the non-empty directory " + "'%s': Lacking write permission to '%s'." + % (src, src)) + copytree(src, real_dst, copy_function=copy_function, + symlinks=True) + rmtree(src) + else: + copy_function(src, real_dst) + os.unlink(src) + return real_dst + +def _destinsrc(src, dst): + src = os.path.abspath(src) + dst = os.path.abspath(dst) + if not src.endswith(os.path.sep): + src += os.path.sep + if not dst.endswith(os.path.sep): + dst += os.path.sep + return dst.startswith(src) + +def _is_immutable(src): + st = _stat(src) + immutable_states = [stat.UF_IMMUTABLE, stat.SF_IMMUTABLE] + return hasattr(st, 'st_flags') and st.st_flags in immutable_states + +def _get_gid(name): + """Returns a gid, given a group name.""" + if name is None: + return None + + try: + from grp import getgrnam + except ImportError: + return None + + try: + result = getgrnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _get_uid(name): + """Returns an uid, given a user name.""" + if name is None: + return None + + try: + from pwd import getpwnam + except ImportError: + return None + + try: + result = getpwnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, + owner=None, group=None, logger=None, root_dir=None): + """Create a (possibly compressed) tar file from all the files under + 'base_dir'. + + 'compress' must be "gzip" (the default), "bzip2", "xz", "zst", or None. + + 'owner' and 'group' can be used to define an owner and a group for the + archive that is being built. If not provided, the current owner and group + will be used. + + The output tar file will be named 'base_name' + ".tar", possibly plus + the appropriate compression extension (".gz", ".bz2", ".xz", or ".zst"). + + Returns the output filename. + """ + if compress is None: + tar_compression = '' + elif _ZLIB_SUPPORTED and compress == 'gzip': + tar_compression = 'gz' + elif _BZ2_SUPPORTED and compress == 'bzip2': + tar_compression = 'bz2' + elif _LZMA_SUPPORTED and compress == 'xz': + tar_compression = 'xz' + elif _ZSTD_SUPPORTED and compress == 'zst': + tar_compression = 'zst' + else: + raise ValueError("bad value for 'compress', or compression format not " + "supported : {0}".format(compress)) + + import tarfile # late import for breaking circular dependency + + compress_ext = '.' + tar_compression if compress else '' + archive_name = base_name + '.tar' + compress_ext + archive_dir = os.path.dirname(archive_name) + + if archive_dir and not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # creating the tarball + if logger is not None: + logger.info('Creating tar archive') + + uid = _get_uid(owner) + gid = _get_gid(group) + + def _set_uid_gid(tarinfo): + if gid is not None: + tarinfo.gid = gid + tarinfo.gname = group + if uid is not None: + tarinfo.uid = uid + tarinfo.uname = owner + return tarinfo + + if not dry_run: + tar = tarfile.open(archive_name, 'w|%s' % tar_compression) + arcname = base_dir + if root_dir is not None: + base_dir = os.path.join(root_dir, base_dir) + try: + tar.add(base_dir, arcname, filter=_set_uid_gid) + finally: + tar.close() + + if root_dir is not None: + archive_name = os.path.abspath(archive_name) + return archive_name + +def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, + logger=None, owner=None, group=None, root_dir=None): + """Create a zip file from all the files under 'base_dir'. + + The output zip file will be named 'base_name' + ".zip". Returns the + name of the output zip file. + """ + import zipfile # late import for breaking circular dependency + + zip_filename = base_name + ".zip" + archive_dir = os.path.dirname(base_name) + + if archive_dir and not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + if logger is not None: + logger.info("creating '%s' and adding '%s' to it", + zip_filename, base_dir) + + if not dry_run: + with zipfile.ZipFile(zip_filename, "w", + compression=zipfile.ZIP_DEFLATED) as zf: + arcname = os.path.normpath(base_dir) + if root_dir is not None: + base_dir = os.path.join(root_dir, base_dir) + base_dir = os.path.normpath(base_dir) + if arcname != os.curdir: + zf.write(base_dir, arcname) + if logger is not None: + logger.info("adding '%s'", base_dir) + for dirpath, dirnames, filenames in os.walk(base_dir): + arcdirpath = dirpath + if root_dir is not None: + arcdirpath = os.path.relpath(arcdirpath, root_dir) + arcdirpath = os.path.normpath(arcdirpath) + for name in sorted(dirnames): + path = os.path.join(dirpath, name) + arcname = os.path.join(arcdirpath, name) + zf.write(path, arcname) + if logger is not None: + logger.info("adding '%s'", path) + for name in filenames: + path = os.path.join(dirpath, name) + path = os.path.normpath(path) + if os.path.isfile(path): + arcname = os.path.join(arcdirpath, name) + zf.write(path, arcname) + if logger is not None: + logger.info("adding '%s'", path) + + if root_dir is not None: + zip_filename = os.path.abspath(zip_filename) + return zip_filename + +_make_tarball.supports_root_dir = True +_make_zipfile.supports_root_dir = True + +# Maps the name of the archive format to a tuple containing: +# * the archiving function +# * extra keyword arguments +# * description +_ARCHIVE_FORMATS = { + 'tar': (_make_tarball, [('compress', None)], + "uncompressed tar file"), +} + +if _ZLIB_SUPPORTED: + _ARCHIVE_FORMATS['gztar'] = (_make_tarball, [('compress', 'gzip')], + "gzip'ed tar-file") + _ARCHIVE_FORMATS['zip'] = (_make_zipfile, [], "ZIP file") + +if _BZ2_SUPPORTED: + _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], + "bzip2'ed tar-file") + +if _LZMA_SUPPORTED: + _ARCHIVE_FORMATS['xztar'] = (_make_tarball, [('compress', 'xz')], + "xz'ed tar-file") + +if _ZSTD_SUPPORTED: + _ARCHIVE_FORMATS['zstdtar'] = (_make_tarball, [('compress', 'zst')], + "zstd'ed tar-file") + +def get_archive_formats(): + """Returns a list of supported formats for archiving and unarchiving. + + Each element of the returned sequence is a tuple (name, description) + """ + formats = [(name, registry[2]) for name, registry in + _ARCHIVE_FORMATS.items()] + formats.sort() + return formats + +def register_archive_format(name, function, extra_args=None, description=''): + """Registers an archive format. + + name is the name of the format. function is the callable that will be + used to create archives. If provided, extra_args is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_archive_formats() function. + """ + if extra_args is None: + extra_args = [] + if not callable(function): + raise TypeError('The %s object is not callable' % function) + if not isinstance(extra_args, (tuple, list)): + raise TypeError('extra_args needs to be a sequence') + for element in extra_args: + if not isinstance(element, (tuple, list)) or len(element) !=2: + raise TypeError('extra_args elements are : (arg_name, value)') + + _ARCHIVE_FORMATS[name] = (function, extra_args, description) + +def unregister_archive_format(name): + del _ARCHIVE_FORMATS[name] + +def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, + dry_run=0, owner=None, group=None, logger=None): + """Create an archive file (eg. zip or tar). + + 'base_name' is the name of the file to create, minus any format-specific + extension; 'format' is the archive format: one of "zip", "tar", "gztar", + "bztar", "xztar", or "zstdtar". Or any other registered format. + + 'root_dir' is a directory that will be the root directory of the + archive; ie. we typically chdir into 'root_dir' before creating the + archive. 'base_dir' is the directory where we start archiving from; + ie. 'base_dir' will be the common prefix of all files and + directories in the archive. 'root_dir' and 'base_dir' both default + to the current directory. Returns the name of the archive file. + + 'owner' and 'group' are used when creating a tar archive. By default, + uses the current owner and group. + """ + sys.audit("shutil.make_archive", base_name, format, root_dir, base_dir) + try: + format_info = _ARCHIVE_FORMATS[format] + except KeyError: + raise ValueError("unknown archive format '%s'" % format) from None + + kwargs = {'dry_run': dry_run, 'logger': logger, + 'owner': owner, 'group': group} + + func = format_info[0] + for arg, val in format_info[1]: + kwargs[arg] = val + + if base_dir is None: + base_dir = os.curdir + + supports_root_dir = getattr(func, 'supports_root_dir', False) + save_cwd = None + if root_dir is not None: + stmd = os.stat(root_dir).st_mode + if not stat.S_ISDIR(stmd): + raise NotADirectoryError(errno.ENOTDIR, 'Not a directory', root_dir) + + if supports_root_dir: + # Support path-like base_name here for backwards-compatibility. + base_name = os.fspath(base_name) + kwargs['root_dir'] = root_dir + else: + save_cwd = os.getcwd() + if logger is not None: + logger.debug("changing into '%s'", root_dir) + base_name = os.path.abspath(base_name) + if not dry_run: + os.chdir(root_dir) + + try: + filename = func(base_name, base_dir, **kwargs) + finally: + if save_cwd is not None: + if logger is not None: + logger.debug("changing back to '%s'", save_cwd) + os.chdir(save_cwd) + + return filename + + +def get_unpack_formats(): + """Returns a list of supported formats for unpacking. + + Each element of the returned sequence is a tuple + (name, extensions, description) + """ + formats = [(name, info[0], info[3]) for name, info in + _UNPACK_FORMATS.items()] + formats.sort() + return formats + +def _check_unpack_options(extensions, function, extra_args): + """Checks what gets registered as an unpacker.""" + # first make sure no other unpacker is registered for this extension + existing_extensions = {} + for name, info in _UNPACK_FORMATS.items(): + for ext in info[0]: + existing_extensions[ext] = name + + for extension in extensions: + if extension in existing_extensions: + msg = '%s is already registered for "%s"' + raise RegistryError(msg % (extension, + existing_extensions[extension])) + + if not callable(function): + raise TypeError('The registered function must be a callable') + + +def register_unpack_format(name, extensions, function, extra_args=None, + description=''): + """Registers an unpack format. + + `name` is the name of the format. `extensions` is a list of extensions + corresponding to the format. + + `function` is the callable that will be + used to unpack archives. The callable will receive archives to unpack. + If it's unable to handle an archive, it needs to raise a ReadError + exception. + + If provided, `extra_args` is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_unpack_formats() function. + """ + if extra_args is None: + extra_args = [] + _check_unpack_options(extensions, function, extra_args) + _UNPACK_FORMATS[name] = extensions, function, extra_args, description + +def unregister_unpack_format(name): + """Removes the pack format from the registry.""" + del _UNPACK_FORMATS[name] + +def _ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + if not os.path.isdir(dirname): + os.makedirs(dirname) + +def _unpack_zipfile(filename, extract_dir): + """Unpack zip `filename` to `extract_dir` + """ + import zipfile # late import for breaking circular dependency + + if not zipfile.is_zipfile(filename): + raise ReadError("%s is not a zip file" % filename) + + zip = zipfile.ZipFile(filename) + try: + for info in zip.infolist(): + name = info.filename + + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name: + continue + + targetpath = os.path.join(extract_dir, *name.split('/')) + if not targetpath: + continue + + _ensure_directory(targetpath) + if not name.endswith('/'): + # file + with zip.open(name, 'r') as source, \ + open(targetpath, 'wb') as target: + copyfileobj(source, target) + finally: + zip.close() + +def _unpack_tarfile(filename, extract_dir, *, filter=None): + """Unpack tar/tar.gz/tar.bz2/tar.xz/tar.zst `filename` to `extract_dir` + """ + import tarfile # late import for breaking circular dependency + try: + tarobj = tarfile.open(filename) + except tarfile.TarError: + raise ReadError( + "%s is not a compressed or uncompressed tar file" % filename) + try: + tarobj.extractall(extract_dir, filter=filter) + finally: + tarobj.close() + +# Maps the name of the unpack format to a tuple containing: +# * extensions +# * the unpacking function +# * extra keyword arguments +# * description +_UNPACK_FORMATS = { + 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), + 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file"), +} + +if _ZLIB_SUPPORTED: + _UNPACK_FORMATS['gztar'] = (['.tar.gz', '.tgz'], _unpack_tarfile, [], + "gzip'ed tar-file") + +if _BZ2_SUPPORTED: + _UNPACK_FORMATS['bztar'] = (['.tar.bz2', '.tbz2'], _unpack_tarfile, [], + "bzip2'ed tar-file") + +if _LZMA_SUPPORTED: + _UNPACK_FORMATS['xztar'] = (['.tar.xz', '.txz'], _unpack_tarfile, [], + "xz'ed tar-file") + +if _ZSTD_SUPPORTED: + _UNPACK_FORMATS['zstdtar'] = (['.tar.zst', '.tzst'], _unpack_tarfile, [], + "zstd'ed tar-file") + +def _find_unpack_format(filename): + for name, info in _UNPACK_FORMATS.items(): + for extension in info[0]: + if filename.endswith(extension): + return name + return None + +def unpack_archive(filename, extract_dir=None, format=None, *, filter=None): + """Unpack an archive. + + `filename` is the name of the archive. + + `extract_dir` is the name of the target directory, where the archive + is unpacked. If not provided, the current working directory is used. + + `format` is the archive format: one of "zip", "tar", "gztar", "bztar", + "xztar", or "zstdtar". Or any other registered format. If not provided, + unpack_archive will use the filename extension and see if an unpacker + was registered for that extension. + + In case none is found, a ValueError is raised. + + If `filter` is given, it is passed to the underlying + extraction function. + """ + sys.audit("shutil.unpack_archive", filename, extract_dir, format) + + if extract_dir is None: + extract_dir = os.getcwd() + + extract_dir = os.fspath(extract_dir) + filename = os.fspath(filename) + + if filter is None: + filter_kwargs = {} + else: + filter_kwargs = {'filter': filter} + if format is not None: + try: + format_info = _UNPACK_FORMATS[format] + except KeyError: + raise ValueError("Unknown unpack format '{0}'".format(format)) from None + + func = format_info[1] + func(filename, extract_dir, **dict(format_info[2]), **filter_kwargs) + else: + # we need to look at the registered unpackers supported extensions + format = _find_unpack_format(filename) + if format is None: + raise ReadError("Unknown archive format '{0}'".format(filename)) + + func = _UNPACK_FORMATS[format][1] + kwargs = dict(_UNPACK_FORMATS[format][2]) | filter_kwargs + func(filename, extract_dir, **kwargs) + + +if hasattr(os, 'statvfs'): + + __all__.append('disk_usage') + _ntuple_diskusage = collections.namedtuple('usage', 'total used free') + _ntuple_diskusage.total.__doc__ = 'Total space in bytes' + _ntuple_diskusage.used.__doc__ = 'Used space in bytes' + _ntuple_diskusage.free.__doc__ = 'Free space in bytes' + + def disk_usage(path): + """Return disk usage statistics about the given path. + + Returned value is a named tuple with attributes 'total', 'used' and + 'free', which are the amount of total, used and free space, in bytes. + """ + st = os.statvfs(path) + free = st.f_bavail * st.f_frsize + total = st.f_blocks * st.f_frsize + used = (st.f_blocks - st.f_bfree) * st.f_frsize + return _ntuple_diskusage(total, used, free) + +elif _WINDOWS: + + __all__.append('disk_usage') + _ntuple_diskusage = collections.namedtuple('usage', 'total used free') + + def disk_usage(path): + """Return disk usage statistics about the given path. + + Returned values is a named tuple with attributes 'total', 'used' and + 'free', which are the amount of total, used and free space, in bytes. + """ + total, free = nt._getdiskusage(path) + used = total - free + return _ntuple_diskusage(total, used, free) + + +def chown(path, user=None, group=None, *, dir_fd=None, follow_symlinks=True): + """Change owner user and group of the given path. + + user and group can be the uid/gid or the user/group names, and in that case, + they are converted to their respective uid/gid. + + If dir_fd is set, it should be an open file descriptor to the directory to + be used as the root of *path* if it is relative. + + If follow_symlinks is set to False and the last element of the path is a + symbolic link, chown will modify the link itself and not the file being + referenced by the link. + """ + sys.audit('shutil.chown', path, user, group) + + if user is None and group is None: + raise ValueError("user and/or group must be set") + + _user = user + _group = group + + # -1 means don't change it + if user is None: + _user = -1 + # user can either be an int (the uid) or a string (the system username) + elif isinstance(user, str): + _user = _get_uid(user) + if _user is None: + raise LookupError("no such user: {!r}".format(user)) + + if group is None: + _group = -1 + elif not isinstance(group, int): + _group = _get_gid(group) + if _group is None: + raise LookupError("no such group: {!r}".format(group)) + + os.chown(path, _user, _group, dir_fd=dir_fd, + follow_symlinks=follow_symlinks) + +def get_terminal_size(fallback=(80, 24)): + """Get the size of the terminal window. + + For each of the two dimensions, the environment variable, COLUMNS + and LINES respectively, is checked. If the variable is defined and + the value is a positive integer, it is used. + + When COLUMNS or LINES is not defined, which is the common case, + the terminal connected to sys.__stdout__ is queried + by invoking os.get_terminal_size. + + If the terminal size cannot be successfully queried, either because + the system doesn't support querying, or because we are not + connected to a terminal, the value given in fallback parameter + is used. Fallback defaults to (80, 24) which is the default + size used by many terminal emulators. + + The value returned is a named tuple of type os.terminal_size. + """ + # columns, lines are the working values + try: + columns = int(os.environ['COLUMNS']) + except (KeyError, ValueError): + columns = 0 + + try: + lines = int(os.environ['LINES']) + except (KeyError, ValueError): + lines = 0 + + # only query if necessary + if columns <= 0 or lines <= 0: + try: + size = os.get_terminal_size(sys.__stdout__.fileno()) + except (AttributeError, ValueError, OSError): + # stdout is None, closed, detached, or not a terminal, or + # os.get_terminal_size() is unsupported + size = os.terminal_size(fallback) + if columns <= 0: + columns = size.columns or fallback[0] + if lines <= 0: + lines = size.lines or fallback[1] + + return os.terminal_size((columns, lines)) + + +# Check that a given file can be accessed with the correct mode. +# Additionally check that `file` is not a directory, as on Windows +# directories pass the os.access check. +def _access_check(fn, mode): + return (os.path.exists(fn) and os.access(fn, mode) + and not os.path.isdir(fn)) + + +def _win_path_needs_curdir(cmd, mode): + """ + On Windows, we can use NeedCurrentDirectoryForExePath to figure out + if we should add the cwd to PATH when searching for executables if + the mode is executable. + """ + return (not (mode & os.X_OK)) or _winapi.NeedCurrentDirectoryForExePath( + os.fsdecode(cmd)) + + +def which(cmd, mode=os.F_OK | os.X_OK, path=None): + """Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + + """ + use_bytes = isinstance(cmd, bytes) + + # If we're given a path with a directory part, look it up directly rather + # than referring to PATH directories. This includes checking relative to + # the current directory, e.g. ./script + dirname, cmd = os.path.split(cmd) + if dirname: + path = [dirname] + else: + if path is None: + path = os.environ.get("PATH", None) + if path is None: + try: + path = os.confstr("CS_PATH") + except (AttributeError, ValueError): + # os.confstr() or CS_PATH is not available + path = os.defpath + # bpo-35755: Don't use os.defpath if the PATH environment variable + # is set to an empty string + + # PATH='' doesn't match, whereas PATH=':' looks in the current + # directory + if not path: + return None + + if use_bytes: + path = os.fsencode(path) + path = path.split(os.fsencode(os.pathsep)) + else: + path = os.fsdecode(path) + path = path.split(os.pathsep) + + if sys.platform == "win32" and _win_path_needs_curdir(cmd, mode): + curdir = os.curdir + if use_bytes: + curdir = os.fsencode(curdir) + path.insert(0, curdir) + + if sys.platform == "win32": + # PATHEXT is necessary to check on Windows. + pathext_source = os.getenv("PATHEXT") or _WIN_DEFAULT_PATHEXT + pathext = pathext_source.split(os.pathsep) + pathext = [ext.rstrip('.') for ext in pathext if ext] + + if use_bytes: + pathext = [os.fsencode(ext) for ext in pathext] + + files = [cmd + ext for ext in pathext] + + # If X_OK in mode, simulate the cmd.exe behavior: look at direct + # match if and only if the extension is in PATHEXT. + # If X_OK not in mode, simulate the first result of where.exe: + # always look at direct match before a PATHEXT match. + normcmd = cmd.upper() + if not (mode & os.X_OK) or any(normcmd.endswith(ext.upper()) for ext in pathext): + files.insert(0, cmd) + else: + # On other platforms you don't have things like PATHEXT to tell you + # what file suffixes are executable, so just pass on cmd as-is. + files = [cmd] + + seen = set() + for dir in path: + normdir = os.path.normcase(dir) + if normdir not in seen: + seen.add(normdir) + for thefile in files: + name = os.path.join(dir, thefile) + if _access_check(name, mode): + return name + return None + +def __getattr__(name): + if name == "ExecError": + import warnings + warnings._deprecated( + "shutil.ExecError", + f"{warnings._DEPRECATED_MSG}; it " + "isn't raised by any shutil function.", + remove=(3, 16) + ) + return RuntimeError + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/Python313_13_x64_Template/Lib/signal.py b/Python314_4_x64_Template/Lib/signal.py similarity index 100% rename from Python313_13_x64_Template/Lib/signal.py rename to Python314_4_x64_Template/Lib/signal.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/INSTALLER b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/INSTALLER similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/INSTALLER rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/INSTALLER diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/METADATA b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/METADATA similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/METADATA rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/METADATA diff --git a/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/RECORD b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/RECORD new file mode 100644 index 00000000..c1feab63 --- /dev/null +++ b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/RECORD @@ -0,0 +1,878 @@ +../../Scripts/pip.exe,sha256=sWTM2eaXGMS7HvW5PZJ-Dcop1cyz4cOhF52p-a-6Xv8,108403 +../../Scripts/pip3.14.exe,sha256=sWTM2eaXGMS7HvW5PZJ-Dcop1cyz4cOhF52p-a-6Xv8,108403 +../../Scripts/pip3.exe,sha256=sWTM2eaXGMS7HvW5PZJ-Dcop1cyz4cOhF52p-a-6Xv8,108403 +pip-26.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip-26.0.1.dist-info/METADATA,sha256=ZqIZuNGsG6l2gHiKlQjVQghFQhgSWfhEDHuCVPW3aN8,4675 +pip-26.0.1.dist-info/RECORD,, +pip-26.0.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip-26.0.1.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82 +pip-26.0.1.dist-info/entry_points.txt,sha256=Vhf8s0IYgX37mtd4vGL73BPcxdKnqeCFPzB5-d30x8o,84 +pip-26.0.1.dist-info/licenses/AUTHORS.txt,sha256=grSl9YDNOpOFFJTX8ZYKSdgfouXi_DzlRyYGE2-u5aI,11731 +pip-26.0.1.dist-info/licenses/LICENSE.txt,sha256=Y0MApmnUmurmWxLGxIySTFGkzfPR_whtw0VtyLyqIQQ,1093 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt,sha256=hu7uh74qQ_P_H1ZJb0UfaSQ5JvAl_tuwM2ZsMExMFhs,558 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/certifi/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt,sha256=GrNuPipLqGMWJThPh-ngkdsfrtA0xbIzJbMjmr8sxSU,1099 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt,sha256=gI4QyKarjesUn_mz-xn0R6gICUYG1xKpylf-rTVSWZ0,14531 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/distro/LICENSE,sha256=y16Ofl9KOYjhBjwULGDcLfdWBfTEZRXnduOspt-XbhQ,11325 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md,sha256=t6M2q_OwThgOwGXN0W5wXQeeHMehT5EKpukYfza5zYc,1541 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/msgpack/COPYING,sha256=SS3tuoXaWHL3jmCRvNH-pHTWYNNay03ulkuKqz8AdCc,614 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE,sha256=KeD9YukphQ6G6yjD_czwzv30-pSHkBHP-z0NS-1tTbY,1089 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/pygments/LICENSE,sha256=qdZvHVJt8C4p3Oc0NtNOVuhjL0bCdbvf_HBWnogvnxc,1331 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE,sha256=GyKwSbUmfW38I6Z79KhNjsBLn9-xpR02DkK0NCyLQVQ,1081 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/requests/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE,sha256=84j9OMrRMRLB3A9mm76A5_hFQe26-3LzAw0sp2QsPJ0,751 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/rich/LICENSE,sha256=3u18F6QxgVgZCj6iOcyHmlpQJxzruYrnAl9I--WNyhU,1056 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/truststore/LICENSE,sha256=M757fo-k_Rmxdg4ajtimaL2rhSyRtpLdQUJLy3Jan8o,1086 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt,sha256=w3vxhuJ8-dvpYZ5V7f486nswCRzrPaY8fay-Dm13kHs,1115 +pip/__init__.py,sha256=3EhKF2588Ab15tmBszgD3Bp0N26sJx7VhS2Akn_qY38,355 +pip/__main__.py,sha256=WzbhHXTbSE6gBY19mNN9m4s5o_365LOvTYSgqgbdBhE,854 +pip/__pip-runner__.py,sha256=JOoEZTwrtv7jRaXBkgSQKAE04yNyfFmGHxqpHiGHvL0,1450 +pip/__pycache__/__init__.cpython-314.pyc,, +pip/__pycache__/__main__.cpython-314.pyc,, +pip/__pycache__/__pip-runner__.cpython-314.pyc,, +pip/_internal/__init__.py,sha256=S7i9Dn9aSZS0MG-2Wrve3dV9TImPzvQn5jjhp9t_uf0,511 +pip/_internal/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/__pycache__/build_env.cpython-314.pyc,, +pip/_internal/__pycache__/cache.cpython-314.pyc,, +pip/_internal/__pycache__/configuration.cpython-314.pyc,, +pip/_internal/__pycache__/exceptions.cpython-314.pyc,, +pip/_internal/__pycache__/main.cpython-314.pyc,, +pip/_internal/__pycache__/pyproject.cpython-314.pyc,, +pip/_internal/__pycache__/self_outdated_check.cpython-314.pyc,, +pip/_internal/__pycache__/wheel_builder.cpython-314.pyc,, +pip/_internal/build_env.py,sha256=XpgOIlTQLgz3PvDT2n7j2NzX_rVFZLCIG7t7b2ddhcM,21911 +pip/_internal/cache.py,sha256=nMh48Yv3yu1HS1yCdscouu6B6B5zYBWdV6bhqs7gL-E,10345 +pip/_internal/cli/__init__.py,sha256=Iqg_tKA771XuMO1P4t_sDHnSKPzkUb9D0DqunAmw_ko,131 +pip/_internal/cli/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/cli/__pycache__/autocompletion.cpython-314.pyc,, +pip/_internal/cli/__pycache__/base_command.cpython-314.pyc,, +pip/_internal/cli/__pycache__/cmdoptions.cpython-314.pyc,, +pip/_internal/cli/__pycache__/command_context.cpython-314.pyc,, +pip/_internal/cli/__pycache__/index_command.cpython-314.pyc,, +pip/_internal/cli/__pycache__/main.cpython-314.pyc,, +pip/_internal/cli/__pycache__/main_parser.cpython-314.pyc,, +pip/_internal/cli/__pycache__/parser.cpython-314.pyc,, +pip/_internal/cli/__pycache__/progress_bars.cpython-314.pyc,, +pip/_internal/cli/__pycache__/req_command.cpython-314.pyc,, +pip/_internal/cli/__pycache__/spinners.cpython-314.pyc,, +pip/_internal/cli/__pycache__/status_codes.cpython-314.pyc,, +pip/_internal/cli/autocompletion.py,sha256=ZG2cM03nlcNrs-WG_SFTW46isx9s2Go5lUD_8-iv70o,7193 +pip/_internal/cli/base_command.py,sha256=6OW75PSGzkH8Fz761WZ3OSz1TsuO3-suc6iap-sQjTM,9168 +pip/_internal/cli/cmdoptions.py,sha256=hfA9B29Nnq2vYMWhFVg7EcWjdlfdPBPU4WwWT2Lkq4A,36164 +pip/_internal/cli/command_context.py,sha256=kmu3EWZbfBega1oDamnGJTA_UaejhIQNuMj2CVmMXu0,817 +pip/_internal/cli/index_command.py,sha256=s3x75lpDXWJtCkBacTQ3qAAprldHMJCniEQ5qkQ0FiI,6484 +pip/_internal/cli/main.py,sha256=ljDQBkvBtC8xTjOdb6rDJzJUNi1s-PnVR_W5C-Mq0Dk,3137 +pip/_internal/cli/main_parser.py,sha256=YjzJAjqf78ARNsLlnJT9l6fNbpyDPJA-arOIXYsK5Ik,4403 +pip/_internal/cli/parser.py,sha256=EIFExrWX_1nrl1Ib--GOor70WYqLtduHByenb1u9xH4,13827 +pip/_internal/cli/progress_bars.py,sha256=IW1PH5n2FPqUBTP7ULQ5Yu-wyNNO9XGY3g1PT4RMu44,4706 +pip/_internal/cli/req_command.py,sha256=QjDXId0hFdopwE8hNx2eustumxUNbnOCvG_ORmUC7vM,16482 +pip/_internal/cli/spinners.py,sha256=EJzZIZNyUtJljp3-WjcsyIrqxW-HUsfWzhuW84n_Tqw,7362 +pip/_internal/cli/status_codes.py,sha256=sEFHUaUJbqv8iArL3HAtcztWZmGOFX01hTesSytDEh0,116 +pip/_internal/commands/__init__.py,sha256=aNeCbQurGWihfhQq7BqaLXHqWDQ0i3I04OS7kxK6plQ,4026 +pip/_internal/commands/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/commands/__pycache__/cache.cpython-314.pyc,, +pip/_internal/commands/__pycache__/check.cpython-314.pyc,, +pip/_internal/commands/__pycache__/completion.cpython-314.pyc,, +pip/_internal/commands/__pycache__/configuration.cpython-314.pyc,, +pip/_internal/commands/__pycache__/debug.cpython-314.pyc,, +pip/_internal/commands/__pycache__/download.cpython-314.pyc,, +pip/_internal/commands/__pycache__/freeze.cpython-314.pyc,, +pip/_internal/commands/__pycache__/hash.cpython-314.pyc,, +pip/_internal/commands/__pycache__/help.cpython-314.pyc,, +pip/_internal/commands/__pycache__/index.cpython-314.pyc,, +pip/_internal/commands/__pycache__/inspect.cpython-314.pyc,, +pip/_internal/commands/__pycache__/install.cpython-314.pyc,, +pip/_internal/commands/__pycache__/list.cpython-314.pyc,, +pip/_internal/commands/__pycache__/lock.cpython-314.pyc,, +pip/_internal/commands/__pycache__/search.cpython-314.pyc,, +pip/_internal/commands/__pycache__/show.cpython-314.pyc,, +pip/_internal/commands/__pycache__/uninstall.cpython-314.pyc,, +pip/_internal/commands/__pycache__/wheel.cpython-314.pyc,, +pip/_internal/commands/cache.py,sha256=XjT7kjY8GSISMksFHsLvjS9Ogfi5extNlUUv-dUoWCM,9142 +pip/_internal/commands/check.py,sha256=hVFBQezQ3zj4EydoWbFQj_afPUppMt7r9JPAlY22U6Y,2244 +pip/_internal/commands/completion.py,sha256=LjvRIZ6QUiDXJL3IOMFeD-_J97HfjMGgEk0j2tWGu1U,4565 +pip/_internal/commands/configuration.py,sha256=6gNOGrVWnOLU15zUnAiNuOMhf76RRIZvCdVD0degPRk,10105 +pip/_internal/commands/debug.py,sha256=_8IqM8Fx1_lY2STu_qspr63tufF7zyFJCyYAXtxz0N4,6805 +pip/_internal/commands/download.py,sha256=LUNVobuvCdagjLBuPBaxHeBiHEiIe03fTO2m6ahC8qw,5178 +pip/_internal/commands/freeze.py,sha256=fxoW8AAc-bAqB_fXdNq2VnZ3JfWkFMg-bR6LcdDVO7A,3099 +pip/_internal/commands/hash.py,sha256=GO9pRN3wXC2kQaovK57TaLYBMc3IltOH92O6QEw6YE0,1679 +pip/_internal/commands/help.py,sha256=Bz3LcjNQXkz4Cu__pL4CZ86o4-HNLZj1NZWdlJhjuu0,1108 +pip/_internal/commands/index.py,sha256=kDpx2MO6ZxTt5PpeY4jqcssVbYhzxpkpreDe_6PPhks,5520 +pip/_internal/commands/inspect.py,sha256=ogm4UT7LRo8bIQcWUS1IiA25QdD4VHLa7JaPAodDttM,3177 +pip/_internal/commands/install.py,sha256=L6X1qi49ROVTGABhwwxDgBBTijlOpVn6XSDVZ7QW1Kc,30588 +pip/_internal/commands/list.py,sha256=L5nWuwawqSrBNsuxfyHLAagfz7XJP86tC9nK3L9YiI8,13497 +pip/_internal/commands/lock.py,sha256=145ihjUK_-7gP8O65XPDi_xMhlh5hne1ptkHdfnbAnQ,6027 +pip/_internal/commands/search.py,sha256=zbMsX_YASj6kXA6XIBgTDv0bGK51xG-CV3IynZJcE-c,5782 +pip/_internal/commands/show.py,sha256=oLVJIfKWmDKm0SsQGEi3pozNiqrXjTras_fbBSYKpBA,8066 +pip/_internal/commands/uninstall.py,sha256=CsOihqvb6ZA6O67L70oXeoLHeOfNzMM88H9g-9aocgw,3868 +pip/_internal/commands/wheel.py,sha256=L9vEzJ_E42scF_Hgh5X4Hk39nqJDKxGg4u7glDYbNWc,5880 +pip/_internal/configuration.py,sha256=WxwwSwY_Bm6QzDgf32BsujEyO8dgRedegCpgbUfDvM8,14568 +pip/_internal/distributions/__init__.py,sha256=Hq6kt6gXBgjNit5hTTWLAzeCNOKoB-N0pGYSqehrli8,858 +pip/_internal/distributions/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/distributions/__pycache__/base.cpython-314.pyc,, +pip/_internal/distributions/__pycache__/installed.cpython-314.pyc,, +pip/_internal/distributions/__pycache__/sdist.cpython-314.pyc,, +pip/_internal/distributions/__pycache__/wheel.cpython-314.pyc,, +pip/_internal/distributions/base.py,sha256=l-OTCAIs25lsapejA6IYpPZxSM5-BET4sdZDkql8jiY,1830 +pip/_internal/distributions/installed.py,sha256=kgIEE_1NzjZxLBSC-v5s64uOFZlVEt3aPrjTtL6x2XY,929 +pip/_internal/distributions/sdist.py,sha256=RYwQIbuxpKy6OjlBZCAefxpMDaoocUQ4dFtheGsiTOQ,6627 +pip/_internal/distributions/wheel.py,sha256=_HbG0OehF8dwj4UX-xV__tXLwgPus9OjMEf2NTRqBbE,1364 +pip/_internal/exceptions.py,sha256=JdPCrQ9iTLvE-GBebzBEeGP3hoTffWEKqbYEsa6cEZc,32165 +pip/_internal/index/__init__.py,sha256=tzwMH_fhQeubwMqHdSivasg1cRgTSbNg2CiMVnzMmyU,29 +pip/_internal/index/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/index/__pycache__/collector.cpython-314.pyc,, +pip/_internal/index/__pycache__/package_finder.cpython-314.pyc,, +pip/_internal/index/__pycache__/sources.cpython-314.pyc,, +pip/_internal/index/collector.py,sha256=R7Gcx_4GEoSEI-iazfAZVEPG3Lp6mbZT4lbAD6NjAc0,16144 +pip/_internal/index/package_finder.py,sha256=a3_L4FDNsuDf3y8Af9J7sfsHR1ahs8o13Ths-WYwFh0,41776 +pip/_internal/index/sources.py,sha256=nXJkOjhLy-O2FsrKU9RIqCOqgY2PsoKWybtZjjRgqU0,8639 +pip/_internal/locations/__init__.py,sha256=Sd67ap1LIemvXArUDFqm8U-HuZvj9i3ApEuiIwUc9UE,14157 +pip/_internal/locations/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/locations/__pycache__/_distutils.cpython-314.pyc,, +pip/_internal/locations/__pycache__/_sysconfig.cpython-314.pyc,, +pip/_internal/locations/__pycache__/base.cpython-314.pyc,, +pip/_internal/locations/_distutils.py,sha256=jpFj4V00rD9IR3vA9TqrGkwcdNVFc58LsChZavge9JY,5975 +pip/_internal/locations/_sysconfig.py,sha256=8CpTjtxaCzHSCrKpaxWnHE7aKcJrRJRmntR1ZLVysLk,7779 +pip/_internal/locations/base.py,sha256=AImjYJWxOtDkc0KKc6Y4Gz677cg91caMA4L94B9FZEg,2550 +pip/_internal/main.py,sha256=1cHqjsfFCrMFf3B5twzocxTJUdHMLoXUpy5lJoFqUi8,338 +pip/_internal/metadata/__init__.py,sha256=vp-JAxiWg_-l5F8AT0Jcey72uUnh8CDwwol9-KktHZ8,5824 +pip/_internal/metadata/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/metadata/__pycache__/_json.cpython-314.pyc,, +pip/_internal/metadata/__pycache__/base.cpython-314.pyc,, +pip/_internal/metadata/__pycache__/pkg_resources.cpython-314.pyc,, +pip/_internal/metadata/_json.py,sha256=hNvnMHOXLAyNlzirWhPL9Nx2CvCqa1iRma6Osq1YfV8,2711 +pip/_internal/metadata/base.py,sha256=BGuMenlcQT8i7j9iclrfdC3vSwgvhr8gjn955cCy16s,25420 +pip/_internal/metadata/importlib/__init__.py,sha256=jUUidoxnHcfITHHaAWG1G2i5fdBYklv_uJcjo2x7VYE,135 +pip/_internal/metadata/importlib/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/metadata/importlib/__pycache__/_compat.cpython-314.pyc,, +pip/_internal/metadata/importlib/__pycache__/_dists.cpython-314.pyc,, +pip/_internal/metadata/importlib/__pycache__/_envs.cpython-314.pyc,, +pip/_internal/metadata/importlib/_compat.py,sha256=sneVh4_6WxQZK4ljdl3ylVuP-q0ttSqbgl9mWt0HnOg,2804 +pip/_internal/metadata/importlib/_dists.py,sha256=znZD7MN4RC73-87KXAn6tKZv9lAQRI0AxxK2bubDvPw,8420 +pip/_internal/metadata/importlib/_envs.py,sha256=H3qVLXVh4LWvrPvu_ekXf3dfbtwnlhNJQP2pxXpccfU,5333 +pip/_internal/metadata/pkg_resources.py,sha256=NO76ZrfR2-LKJTyaXrmQoGhmJMArALvacrlZHViSDT8,10544 +pip/_internal/models/__init__.py,sha256=AjmCEBxX_MH9f_jVjIGNCFJKYCYeSEe18yyvNx4uRKQ,62 +pip/_internal/models/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/models/__pycache__/candidate.cpython-314.pyc,, +pip/_internal/models/__pycache__/direct_url.cpython-314.pyc,, +pip/_internal/models/__pycache__/format_control.cpython-314.pyc,, +pip/_internal/models/__pycache__/index.cpython-314.pyc,, +pip/_internal/models/__pycache__/installation_report.cpython-314.pyc,, +pip/_internal/models/__pycache__/link.cpython-314.pyc,, +pip/_internal/models/__pycache__/release_control.cpython-314.pyc,, +pip/_internal/models/__pycache__/scheme.cpython-314.pyc,, +pip/_internal/models/__pycache__/search_scope.cpython-314.pyc,, +pip/_internal/models/__pycache__/selection_prefs.cpython-314.pyc,, +pip/_internal/models/__pycache__/target_python.cpython-314.pyc,, +pip/_internal/models/__pycache__/wheel.cpython-314.pyc,, +pip/_internal/models/candidate.py,sha256=zzgFRuw_kWPjKpGw7LC0ZUMD2CQ2EberUIYs8izjdCA,753 +pip/_internal/models/direct_url.py,sha256=4NMWacu_QzPPWREC1te7v6Wfv-2HkI4tvSJF-CBgLh4,6555 +pip/_internal/models/format_control.py,sha256=PwemYG1L27BM0f1KP61rm24wShENFyxqlD1TWu34alc,2471 +pip/_internal/models/index.py,sha256=tYnL8oxGi4aSNWur0mG8DAP7rC6yuha_MwJO8xw0crI,1030 +pip/_internal/models/installation_report.py,sha256=cqfWJ93ThCxjcacqSWryOCD2XtIn1CZrgzZxAv5FQZ0,2839 +pip/_internal/models/link.py,sha256=zti5UCx1hT03etYqm6MCqFd714clmTgX8rTZT9CKZDQ,21992 +pip/_internal/models/release_control.py,sha256=XD14Hy_XLh9xWR1p7JHqPZPEv3Nnb1BZGMpClk76sLs,3403 +pip/_internal/models/scheme.py,sha256=PakmHJM3e8OOWSZFtfz1Az7f1meONJnkGuQxFlt3wBE,575 +pip/_internal/models/search_scope.py,sha256=1hxU2IVsAaLZVjp0CbzJbYaYzCxv72_Qbg3JL0qhXo0,4507 +pip/_internal/models/selection_prefs.py,sha256=IDOA3euRtyqWUyIK7lX2bzIZasYiEvunKA6H3Mngk-M,2221 +pip/_internal/models/target_python.py,sha256=I0eFS-eia3kwhrOvgsphFZtNAB2IwXZ9Sr9fp6IjBP4,4243 +pip/_internal/models/wheel.py,sha256=1SdfDvN7ALTsbyZ9EOsNy1GPirP1n6EjHyzPrZyLSh8,2920 +pip/_internal/network/__init__.py,sha256=FMy06P__y6jMjUc8z3ZcQdKF-pmZ2zM14_vBeHPGhUI,49 +pip/_internal/network/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/network/__pycache__/auth.cpython-314.pyc,, +pip/_internal/network/__pycache__/cache.cpython-314.pyc,, +pip/_internal/network/__pycache__/download.cpython-314.pyc,, +pip/_internal/network/__pycache__/lazy_wheel.cpython-314.pyc,, +pip/_internal/network/__pycache__/session.cpython-314.pyc,, +pip/_internal/network/__pycache__/utils.cpython-314.pyc,, +pip/_internal/network/__pycache__/xmlrpc.cpython-314.pyc,, +pip/_internal/network/auth.py,sha256=azFp14I9cyWAAzkxF2VM0Q_xtHnbNz3_NQXszy87KQo,20806 +pip/_internal/network/cache.py,sha256=kmRXKQrG9E26xQRj211LHeEGpDg_SlYU9Dn1fJ-AMeI,4862 +pip/_internal/network/download.py,sha256=8sVwIc9MWwpGlMPYCkO1S9U-FD8TA2utw42tj00skjM,12667 +pip/_internal/network/lazy_wheel.py,sha256=y9gVksdJCSjnLfYzs_m3DYUAtl3hc_k-xFPDBd9DgOs,7646 +pip/_internal/network/session.py,sha256=7zK7EeQCSRFipu4ZzcWl1V3AMKkiXdtGqFr7GvU2LrY,19555 +pip/_internal/network/utils.py,sha256=ACsXd1msqNCidHVXsu7LHUSr8NgaypcOKQ4KG-Z_wJM,4091 +pip/_internal/network/xmlrpc.py,sha256=_-Rnk3vOff8uF9hAGmT6SLALflY1gMBcbGwS12fb_Y4,1830 +pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/operations/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/operations/__pycache__/check.cpython-314.pyc,, +pip/_internal/operations/__pycache__/freeze.cpython-314.pyc,, +pip/_internal/operations/__pycache__/prepare.cpython-314.pyc,, +pip/_internal/operations/build/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/operations/build/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/operations/build/__pycache__/build_tracker.cpython-314.pyc,, +pip/_internal/operations/build/__pycache__/metadata.cpython-314.pyc,, +pip/_internal/operations/build/__pycache__/metadata_editable.cpython-314.pyc,, +pip/_internal/operations/build/__pycache__/wheel.cpython-314.pyc,, +pip/_internal/operations/build/__pycache__/wheel_editable.cpython-314.pyc,, +pip/_internal/operations/build/build_tracker.py,sha256=W3b5cmkMWPaE6QIwfzsTayJo7-OlxFHWDxfPuax1KcE,4771 +pip/_internal/operations/build/metadata.py,sha256=INHaeiRfOiLYCXApfDNRo9Cw2xI4VwTc0KItvfdfOjk,1421 +pip/_internal/operations/build/metadata_editable.py,sha256=oWudMsnjy4loO_Jy7g4N9nxsnaEX_iDlVRgCy7pu1rs,1509 +pip/_internal/operations/build/wheel.py,sha256=3bP-nNiJ4S8JvMaBnyessXQUBhxTqt1GBx6DQ1iPJDY,1136 +pip/_internal/operations/build/wheel_editable.py,sha256=q3kfElclM6FutVbFwE87JOTpVWt5ixDf3_UkHAIVfz4,1478 +pip/_internal/operations/check.py,sha256=yC2XWth6iehGGE_fj7XRJLjVKBsTIG3ZoWRkFi3rOwc,5894 +pip/_internal/operations/freeze.py,sha256=PDdY-y_ZtZZJLAKcaWPIGRKAGW7DXR48f0aMRU0j7BA,9854 +pip/_internal/operations/install/__init__.py,sha256=ak-UETcQPKlFZaWoYKWu5QVXbpFBvg0sXc3i0O4vSYY,50 +pip/_internal/operations/install/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/operations/install/__pycache__/wheel.cpython-314.pyc,, +pip/_internal/operations/install/wheel.py,sha256=FQIl2AnNadHV5YGGOVEmOHtUUNO8lpzj3Icoo4S2xis,27923 +pip/_internal/operations/prepare.py,sha256=ptVsmQf0Mo6jirk1Q5Djdse_wJw5Zdh1Fla2iL9HAJM,28830 +pip/_internal/pyproject.py,sha256=J-sTWqC-XfsKQgz9m1bypMWZPHItsSHzIN_NWeIRmhM,4555 +pip/_internal/req/__init__.py,sha256=WcY9z7D3rlIKX1QY8_tRnAsS_poebiGGdtQ7EJ5JQQo,3041 +pip/_internal/req/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/req/__pycache__/constructors.cpython-314.pyc,, +pip/_internal/req/__pycache__/pep723.cpython-314.pyc,, +pip/_internal/req/__pycache__/req_dependency_group.cpython-314.pyc,, +pip/_internal/req/__pycache__/req_file.cpython-314.pyc,, +pip/_internal/req/__pycache__/req_install.cpython-314.pyc,, +pip/_internal/req/__pycache__/req_set.cpython-314.pyc,, +pip/_internal/req/__pycache__/req_uninstall.cpython-314.pyc,, +pip/_internal/req/constructors.py,sha256=R-6n8irjnaa2DMMXlR4YMouXzykFBlzUFjhOZ1NcUUg,18688 +pip/_internal/req/pep723.py,sha256=olZL3tLmHWJhyLNfbD6U9UuikuzTcLDB06qd9WavTjs,1225 +pip/_internal/req/req_dependency_group.py,sha256=0yEQCUaO5Bza66Y3D5o9JRf0qII5QgCRugn1x5aRivA,2618 +pip/_internal/req/req_file.py,sha256=e32ZQ3kJaL_Sdtf32twGKqIau_AqR43MeSycl0iS2Mw,20685 +pip/_internal/req/req_install.py,sha256=vv5cbs3P5gf43e_1v72gwSQ2N_D_qpsfuXOyerMhDuI,31273 +pip/_internal/req/req_set.py,sha256=awkqIXnYA4Prmsj0Qb3zhqdbYUmXd-1o0P-KZ3mvRQs,2828 +pip/_internal/req/req_uninstall.py,sha256=dCmOHt-9RaJBq921L4tMH3PmIBDetGplnbjRKXmGt00,24099 +pip/_internal/resolution/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/resolution/__pycache__/base.cpython-314.pyc,, +pip/_internal/resolution/base.py,sha256=RIsqSP79olPdOgtPKW-oOQ364ICVopehA6RfGkRfe2s,577 +pip/_internal/resolution/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/legacy/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/resolution/legacy/__pycache__/resolver.cpython-314.pyc,, +pip/_internal/resolution/legacy/resolver.py,sha256=bwUqE66etz2bcPabqxed18-iyqqb-kx3Er2aT6GeUJY,24060 +pip/_internal/resolution/resolvelib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/base.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/base.py,sha256=_AoP0ZWlaSct8CRDn2ol3CbNn4zDtnh_0zQGjXASDKI,5047 +pip/_internal/resolution/resolvelib/candidates.py,sha256=50AN7BfB-pCfEmbKNlFZSXtdC0C8ms1waJrF2arknQE,20454 +pip/_internal/resolution/resolvelib/factory.py,sha256=82mLwnPlig37mMrDwcgKHJTE9mPczVuJIxeaUb7CQ0Y,34028 +pip/_internal/resolution/resolvelib/found_candidates.py,sha256=8bZYDCZLXSdLHy_s1o5f4r15HmKvqFUhzBUQOF21Lr4,6018 +pip/_internal/resolution/resolvelib/provider.py,sha256=tbVPfFv4Vg780yZ2_XGoGFP5LVo0U2bFnZov3jpSAIk,11441 +pip/_internal/resolution/resolvelib/reporter.py,sha256=faSgjqme0k_uzv1fvM5T0ZatPQ2eEktNvKBqfvXeGjc,3909 +pip/_internal/resolution/resolvelib/requirements.py,sha256=Izl9n8nc188lA1BSPS8QxfudfDQPHgngw-ij6hXt0nQ,8239 +pip/_internal/resolution/resolvelib/resolver.py,sha256=wQ94Hkep-7kWEHAc-NbMJhmzeEzgEAtxeBxyKVzZoeo,13437 +pip/_internal/self_outdated_check.py,sha256=zDKsyLMufFHuEZY16WRu129FBbBp-ADuxyWMIN4ihPE,8284 +pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/utils/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/utils/__pycache__/_jaraco_text.cpython-314.pyc,, +pip/_internal/utils/__pycache__/_log.cpython-314.pyc,, +pip/_internal/utils/__pycache__/appdirs.cpython-314.pyc,, +pip/_internal/utils/__pycache__/compat.cpython-314.pyc,, +pip/_internal/utils/__pycache__/compatibility_tags.cpython-314.pyc,, +pip/_internal/utils/__pycache__/datetime.cpython-314.pyc,, +pip/_internal/utils/__pycache__/deprecation.cpython-314.pyc,, +pip/_internal/utils/__pycache__/direct_url_helpers.cpython-314.pyc,, +pip/_internal/utils/__pycache__/egg_link.cpython-314.pyc,, +pip/_internal/utils/__pycache__/entrypoints.cpython-314.pyc,, +pip/_internal/utils/__pycache__/filesystem.cpython-314.pyc,, +pip/_internal/utils/__pycache__/filetypes.cpython-314.pyc,, +pip/_internal/utils/__pycache__/glibc.cpython-314.pyc,, +pip/_internal/utils/__pycache__/hashes.cpython-314.pyc,, +pip/_internal/utils/__pycache__/logging.cpython-314.pyc,, +pip/_internal/utils/__pycache__/misc.cpython-314.pyc,, +pip/_internal/utils/__pycache__/packaging.cpython-314.pyc,, +pip/_internal/utils/__pycache__/pylock.cpython-314.pyc,, +pip/_internal/utils/__pycache__/retry.cpython-314.pyc,, +pip/_internal/utils/__pycache__/subprocess.cpython-314.pyc,, +pip/_internal/utils/__pycache__/temp_dir.cpython-314.pyc,, +pip/_internal/utils/__pycache__/unpacking.cpython-314.pyc,, +pip/_internal/utils/__pycache__/urls.cpython-314.pyc,, +pip/_internal/utils/__pycache__/virtualenv.cpython-314.pyc,, +pip/_internal/utils/__pycache__/wheel.cpython-314.pyc,, +pip/_internal/utils/_jaraco_text.py,sha256=M15uUPIh5NpP1tdUGBxRau6q1ZAEtI8-XyLEETscFfE,3350 +pip/_internal/utils/_log.py,sha256=-jHLOE_THaZz5BFcCnoSL9EYAtJ0nXem49s9of4jvKw,1015 +pip/_internal/utils/appdirs.py,sha256=LrzDPZMKVh0rubtCx9vu3XlZbLCSug6VSj4Qsvt66BA,1681 +pip/_internal/utils/compat.py,sha256=C9LHXJAKkwAH8Hn3nPkz9EYK3rqPBeO_IXkOG2zzsdQ,2514 +pip/_internal/utils/compatibility_tags.py,sha256=DiNSLqpuruXUamGQwOJ2WZByDGLTGaXi9O-Xf8fOi34,6630 +pip/_internal/utils/datetime.py,sha256=kuJOf1mW8G5tRFN6jWardddS-9qSaR53lK1jmx3NTZY,868 +pip/_internal/utils/deprecation.py,sha256=HVhvyO5qiRFcG88PhZlp_87qdKQNwPTUIIHWtsTR2yI,3696 +pip/_internal/utils/direct_url_helpers.py,sha256=ttKv4GMUqlRwPPog9_CUopy6SDgoxVILzeBJzgfn2tg,3200 +pip/_internal/utils/egg_link.py,sha256=YWfsrbmfcrfWgqQYy6OuIjsyb9IfL1q_2v4zsms1WjI,2459 +pip/_internal/utils/entrypoints.py,sha256=uPjAyShKObdotjQjJUzprQ6r3xQvDIZwUYfHHqZ7Dok,3324 +pip/_internal/utils/filesystem.py,sha256=mJ_PP8z1V1x4HMhydWIWDyEmWikLX0f-NXPCXEcjiLo,6892 +pip/_internal/utils/filetypes.py,sha256=sEMa38qaqjvx1Zid3OCAUja31BOBU-USuSMPBvU3yjo,689 +pip/_internal/utils/glibc.py,sha256=sEh8RJJLYSdRvTqAO4THVPPA-YSDVLD4SI9So-bxX1U,3726 +pip/_internal/utils/hashes.py,sha256=d32UI1en8nyqZzdZQvxUVdfeBoe4ADWx7HtrIM4-XQ4,4998 +pip/_internal/utils/logging.py,sha256=6lJWMC6c7_aD_i4sdgaaeb-Tm3kWpYg0hba_V1-OLnE,13414 +pip/_internal/utils/misc.py,sha256=phFIbHm2kmliHDXJ0eNPxgGP423ZpvZoMKKtJ1_Zvjs,23722 +pip/_internal/utils/packaging.py,sha256=s5tpUmFumwV0H9JSTzryrIY4JwQM8paGt7Sm7eNwt2Y,1601 +pip/_internal/utils/pylock.py,sha256=nKQknZgyswWgzi--hRQX_DLUYQ3g5wGTCwVNQNdoJ54,3817 +pip/_internal/utils/retry.py,sha256=83wReEB2rcntMZ5VLd7ascaYSjn_kLdlQCqxILxWkPM,1461 +pip/_internal/utils/subprocess.py,sha256=r4-Ba_Yc3uZXQpi0K4pZFsCT_QqdSvtF3XJ-204QWaA,8983 +pip/_internal/utils/temp_dir.py,sha256=D9c8D7WOProOO8GGDqpBeVSj10NGFmunG0o2TodjjIU,9307 +pip/_internal/utils/unpacking.py,sha256=4hNg6dqHOn_KzGCzSC76nChG97d_UjtF9AnLSof672o,12972 +pip/_internal/utils/urls.py,sha256=aF_eg9ul5d8bMCxfSSSxQcfs-OpJdbStYqZHoy2K1RE,1601 +pip/_internal/utils/virtualenv.py,sha256=mX-UPyw1MPxhwUxKhbqWWX70J6PHXAJjVVrRnG0h9mc,3455 +pip/_internal/utils/wheel.py,sha256=YdRuj6MicG-Q9Mg03FbUv1WTLam6Lc7AgijY4voVyis,4468 +pip/_internal/vcs/__init__.py,sha256=UAqvzpbi0VbZo3Ub6skEeZAw-ooIZR-zX_WpCbxyCoU,596 +pip/_internal/vcs/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/vcs/__pycache__/bazaar.cpython-314.pyc,, +pip/_internal/vcs/__pycache__/git.cpython-314.pyc,, +pip/_internal/vcs/__pycache__/mercurial.cpython-314.pyc,, +pip/_internal/vcs/__pycache__/subversion.cpython-314.pyc,, +pip/_internal/vcs/__pycache__/versioncontrol.cpython-314.pyc,, +pip/_internal/vcs/bazaar.py,sha256=3W1eHjkYx2vc6boeb2NBh4I_rlGAXM-vrzfNhLm1Rxg,3734 +pip/_internal/vcs/git.py,sha256=TTeqDuzS-_BFSNuUStVWmE2nGDpKuvUhBBJk_CCQXV0,19144 +pip/_internal/vcs/mercurial.py,sha256=w1ZJWLKqNP1onEjkfjlwBVnMqPZNSIER8ayjQcnTq4w,5575 +pip/_internal/vcs/subversion.py,sha256=uUgdPvxmvEB8Qwtjr0Hc0XgFjbiNi5cbvI4vARLOJXo,11787 +pip/_internal/vcs/versioncontrol.py,sha256=Ma_HMZBVveSkeYvxacvqeujnkSIaF1XjxTsS3BwcJ8E,22599 +pip/_internal/wheel_builder.py,sha256=yvEULStZtty9Kplp89tDis3hGdyKQ-2BUbFLmJ_5ink,9010 +pip/_vendor/README.rst,sha256=pKKBwCWhu3M3qQ9dDnsmxb3KdsRr-nWmMq2srbH_Bi0,9394 +pip/_vendor/__init__.py,sha256=WzusPTGWIMeQQWSVJ0h2rafGkVTa9WKJ2HT-2-EoZrU,4907 +pip/_vendor/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/cachecontrol/LICENSE.txt,sha256=hu7uh74qQ_P_H1ZJb0UfaSQ5JvAl_tuwM2ZsMExMFhs,558 +pip/_vendor/cachecontrol/__init__.py,sha256=GxwRkm_TQBtPZpfpVK9r6S9dAy2DVnVgDVHJKTiPZ1k,820 +pip/_vendor/cachecontrol/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/adapter.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/cache.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/controller.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/serialize.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-314.pyc,, +pip/_vendor/cachecontrol/_cmd.py,sha256=iist2EpzJvDVIhMAxXq8iFnTBsiZAd6iplxfmNboNyk,1737 +pip/_vendor/cachecontrol/adapter.py,sha256=W-HW-l01gyCsnxkOyCbqx7sxrWYoBbKrDsKkVVQN6NE,6586 +pip/_vendor/cachecontrol/cache.py,sha256=OXwv7Fn2AwnKNiahJHnjtvaKLndvVLv_-zO-ltlV9qI,1953 +pip/_vendor/cachecontrol/caches/__init__.py,sha256=dtrrroK5BnADR1GWjCZ19aZ0tFsMfvFBtLQQU1sp_ag,303 +pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-314.pyc,, +pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-314.pyc,, +pip/_vendor/cachecontrol/caches/file_cache.py,sha256=d8upFmy_zwaCmlbWEVBlLXFddt8Zw8c5SFpxeOZsdfw,4117 +pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=9rmqwtYu_ljVkW6_oLqbC7EaX_a8YT_yLuna-eS0dgo,1386 +pip/_vendor/cachecontrol/controller.py,sha256=xBauC-vUSu5GsJsxD4-W-JaKqqbBz0MN6Zv8PA2N8hI,19102 +pip/_vendor/cachecontrol/filewrapper.py,sha256=DhxC_rSk-beKdbsYhfvBUDovQHX9r3gHH_jP9-q_mKk,4354 +pip/_vendor/cachecontrol/heuristics.py,sha256=gqMXU8w0gQuEQiSdu3Yg-0vd9kW7nrWKbLca75rheGE,4881 +pip/_vendor/cachecontrol/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/cachecontrol/serialize.py,sha256=HQd2IllQ05HzPkVLMXTF2uX5mjEQjDBkxCqUJUODpZk,5163 +pip/_vendor/cachecontrol/wrapper.py,sha256=hsGc7g8QGQTT-4f8tgz3AM5qwScg6FO0BSdLSRdEvpU,1417 +pip/_vendor/certifi/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989 +pip/_vendor/certifi/__init__.py,sha256=969deMMS7Uchipr0oO4dbRBUvRi0uNYCn07VmG1aTrg,94 +pip/_vendor/certifi/__main__.py,sha256=1k3Cr95vCxxGRGDljrW3wMdpZdL3Nhf0u1n-k2qdsCY,255 +pip/_vendor/certifi/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/certifi/__pycache__/__main__.cpython-314.pyc,, +pip/_vendor/certifi/__pycache__/core.cpython-314.pyc,, +pip/_vendor/certifi/cacert.pem,sha256=Tzl1_zCrvzVEO0hgZK6Ly0Hf9wf_31dsdtKS-0WKoKk,270954 +pip/_vendor/certifi/core.py,sha256=gu_ECVI1m3Rq0ytpsNE61hgQGcKaOAt9Rs9G8KsTCOI,3442 +pip/_vendor/certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/dependency_groups/LICENSE.txt,sha256=GrNuPipLqGMWJThPh-ngkdsfrtA0xbIzJbMjmr8sxSU,1099 +pip/_vendor/dependency_groups/__init__.py,sha256=C3OFu0NGwDzQ4LOmmSOFPsRSvkbBn-mdd4j_5YqJw-s,250 +pip/_vendor/dependency_groups/__main__.py,sha256=UNTM7P5mfVtT7wDi9kOTXWgV3fu3e8bTrt1Qp1jvjKo,1709 +pip/_vendor/dependency_groups/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/dependency_groups/__pycache__/__main__.cpython-314.pyc,, +pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-314.pyc,, +pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-314.pyc,, +pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-314.pyc,, +pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-314.pyc,, +pip/_vendor/dependency_groups/_implementation.py,sha256=Gqb2DlQELRakeHlKf6QtQSW0M-bcEomxHw4JsvID1ls,8041 +pip/_vendor/dependency_groups/_lint_dependency_groups.py,sha256=yp-DDqKXtbkDTNa0ifa-FmOA8ra24lPZEXftW-R5AuI,1710 +pip/_vendor/dependency_groups/_pip_wrapper.py,sha256=nuVW_w_ntVxpE26ELEvngMY0N04sFLsijXRyZZROFG8,1865 +pip/_vendor/dependency_groups/_toml_compat.py,sha256=BHnXnFacm3DeolsA35GjI6qkDApvua-1F20kv3BfZWE,285 +pip/_vendor/dependency_groups/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/distlib/LICENSE.txt,sha256=gI4QyKarjesUn_mz-xn0R6gICUYG1xKpylf-rTVSWZ0,14531 +pip/_vendor/distlib/__init__.py,sha256=Deo3uo98aUyIfdKJNqofeSEFWwDzrV2QeGLXLsgq0Ag,625 +pip/_vendor/distlib/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/distlib/__pycache__/compat.cpython-314.pyc,, +pip/_vendor/distlib/__pycache__/resources.cpython-314.pyc,, +pip/_vendor/distlib/__pycache__/scripts.cpython-314.pyc,, +pip/_vendor/distlib/__pycache__/util.cpython-314.pyc,, +pip/_vendor/distlib/compat.py,sha256=2jRSjRI4o-vlXeTK2BCGIUhkc6e9ZGhSsacRM5oseTw,41467 +pip/_vendor/distlib/resources.py,sha256=LwbPksc0A1JMbi6XnuPdMBUn83X7BPuFNWqPGEKI698,10820 +pip/_vendor/distlib/scripts.py,sha256=Qvp76E9Jc3IgyYubnpqI9fS7eseGOe4FjpeVKqKt9Iw,18612 +pip/_vendor/distlib/t32.exe,sha256=a0GV5kCoWsMutvliiCKmIgV98eRZ33wXoS-XrqvJQVs,97792 +pip/_vendor/distlib/t64-arm.exe,sha256=68TAa32V504xVBnufojh0PcenpR3U4wAqTqf-MZqbPw,182784 +pip/_vendor/distlib/t64.exe,sha256=gaYY8hy4fbkHYTTnA4i26ct8IQZzkBG2pRdy0iyuBrc,108032 +pip/_vendor/distlib/util.py,sha256=vMPGvsS4j9hF6Y9k3Tyom1aaHLb0rFmZAEyzeAdel9w,66682 +pip/_vendor/distlib/w32.exe,sha256=R4csx3-OGM9kL4aPIzQKRo5TfmRSHZo6QWyLhDhNBks,91648 +pip/_vendor/distlib/w64-arm.exe,sha256=xdyYhKj0WDcVUOCb05blQYvzdYIKMbmJn2SZvzkcey4,168448 +pip/_vendor/distlib/w64.exe,sha256=ejGf-rojoBfXseGLpya6bFTFPWRG21X5KvU8J5iU-K0,101888 +pip/_vendor/distro/LICENSE,sha256=y16Ofl9KOYjhBjwULGDcLfdWBfTEZRXnduOspt-XbhQ,11325 +pip/_vendor/distro/__init__.py,sha256=2fHjF-SfgPvjyNZ1iHh_wjqWdR_Yo5ODHwZC0jLBPhc,981 +pip/_vendor/distro/__main__.py,sha256=bu9d3TifoKciZFcqRBuygV3GSuThnVD_m2IK4cz96Vs,64 +pip/_vendor/distro/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/distro/__pycache__/__main__.cpython-314.pyc,, +pip/_vendor/distro/__pycache__/distro.cpython-314.pyc,, +pip/_vendor/distro/distro.py,sha256=XqbefacAhDT4zr_trnbA15eY8vdK4GTghgmvUGrEM_4,49430 +pip/_vendor/distro/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/idna/LICENSE.md,sha256=t6M2q_OwThgOwGXN0W5wXQeeHMehT5EKpukYfza5zYc,1541 +pip/_vendor/idna/__init__.py,sha256=MPqNDLZbXqGaNdXxAFhiqFPKEQXju2jNQhCey6-5eJM,868 +pip/_vendor/idna/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/idna/__pycache__/codec.cpython-314.pyc,, +pip/_vendor/idna/__pycache__/compat.cpython-314.pyc,, +pip/_vendor/idna/__pycache__/core.cpython-314.pyc,, +pip/_vendor/idna/__pycache__/idnadata.cpython-314.pyc,, +pip/_vendor/idna/__pycache__/intranges.cpython-314.pyc,, +pip/_vendor/idna/__pycache__/package_data.cpython-314.pyc,, +pip/_vendor/idna/__pycache__/uts46data.cpython-314.pyc,, +pip/_vendor/idna/codec.py,sha256=M2SGWN7cs_6B32QmKTyTN6xQGZeYQgQ2wiX3_DR6loE,3438 +pip/_vendor/idna/compat.py,sha256=RzLy6QQCdl9784aFhb2EX9EKGCJjg0P3PilGdeXXcx8,316 +pip/_vendor/idna/core.py,sha256=P26_XVycuMTZ1R2mNK1ZREVzM5mvTzdabBXfyZVU1Lc,13246 +pip/_vendor/idna/idnadata.py,sha256=SG8jhaGE53iiD6B49pt2pwTv_UvClciWE-N54oR2p4U,79623 +pip/_vendor/idna/intranges.py,sha256=amUtkdhYcQG8Zr-CoMM_kVRacxkivC1WgxN1b63KKdU,1898 +pip/_vendor/idna/package_data.py,sha256=_CUavOxobnbyNG2FLyHoN8QHP3QM9W1tKuw7eq9QwBk,21 +pip/_vendor/idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/idna/uts46data.py,sha256=H9J35VkD0F9L9mKOqjeNGd2A-Va6FlPoz6Jz4K7h-ps,243725 +pip/_vendor/msgpack/COPYING,sha256=SS3tuoXaWHL3jmCRvNH-pHTWYNNay03ulkuKqz8AdCc,614 +pip/_vendor/msgpack/__init__.py,sha256=RA8gcqK17YpkxBnNwXJVa1oa2LygWDgfF1nA1NPw3mo,1109 +pip/_vendor/msgpack/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/msgpack/__pycache__/exceptions.cpython-314.pyc,, +pip/_vendor/msgpack/__pycache__/ext.cpython-314.pyc,, +pip/_vendor/msgpack/__pycache__/fallback.cpython-314.pyc,, +pip/_vendor/msgpack/exceptions.py,sha256=dCTWei8dpkrMsQDcjQk74ATl9HsIBH0ybt8zOPNqMYc,1081 +pip/_vendor/msgpack/ext.py,sha256=kteJv03n9tYzd5oo3xYopVTo4vRaAxonBQQJhXohZZo,5726 +pip/_vendor/msgpack/fallback.py,sha256=0g1Pzp0vtmBEmJ5w9F3s_-JMVURP8RS4G1cc5TRaAsI,32390 +pip/_vendor/packaging/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 +pip/_vendor/packaging/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 +pip/_vendor/packaging/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 +pip/_vendor/packaging/__init__.py,sha256=y4lVbpeBzCGk-IPDw5BGBZ_b0P3ukEEJZAbGYc6Ey8c,494 +pip/_vendor/packaging/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/_elffile.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/_manylinux.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/_musllinux.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/_parser.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/_structures.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/_tokenizer.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/markers.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/metadata.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/pylock.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/requirements.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/specifiers.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/tags.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/utils.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/version.cpython-314.pyc,, +pip/_vendor/packaging/_elffile.py,sha256=-sKkptYqzYw2-x3QByJa5mB4rfPWu1pxkZHRx1WAFCY,3211 +pip/_vendor/packaging/_manylinux.py,sha256=Hf6nB0cOrayEs96-p3oIXAgGnFquv20DO5l-o2_Xnv0,9559 +pip/_vendor/packaging/_musllinux.py,sha256=Z6swjH3MA7XS3qXnmMN7QPhqP3fnoYI0eQ18e9-HgAE,2707 +pip/_vendor/packaging/_parser.py,sha256=U_DajsEx2VoC_F46fSVV3hDKNCWoQYkPkasO3dld0ig,10518 +pip/_vendor/packaging/_structures.py,sha256=Hn49Ta8zV9Wo8GiCL8Nl2ARZY983Un3pruZGVNldPwE,1514 +pip/_vendor/packaging/_tokenizer.py,sha256=M8EwNIdXeL9NMFuFrQtiOKwjka_xFx8KjRQnfE8O_z8,5421 +pip/_vendor/packaging/licenses/__init__.py,sha256=TwXLHZCXwSgdFwRLPxW602T6mSieunSFHM6fp8pgW78,5819 +pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-314.pyc,, +pip/_vendor/packaging/licenses/_spdx.py,sha256=WW7DXiyg68up_YND_wpRYlr1SHhiV4FfJLQffghhMxQ,51122 +pip/_vendor/packaging/markers.py,sha256=ZX-cLvW1S3cZcEc0fHI4z7zSx5U2T19yMpDP_mE-CYw,12771 +pip/_vendor/packaging/metadata.py,sha256=CWVZpN_HfoYMSSDuCP7igOvGgqA9AOmpW8f3qTisfnc,39360 +pip/_vendor/packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/packaging/pylock.py,sha256=-R1uNfJ4PaLto7Mg62YsGOHgvskuiIEqPwxOywl42Jk,22537 +pip/_vendor/packaging/requirements.py,sha256=PMCAWD8aNMnVD-6uZMedhBuAVX2573eZ4yPBLXmz04I,2870 +pip/_vendor/packaging/specifiers.py,sha256=tF2nC-jwW94FYe6So9dNGenQx1Hdif7ErmWlVp1QiXE,40821 +pip/_vendor/packaging/tags.py,sha256=cXLV1pJD3UtJlDg7Wz3zrfdQhRZqr8jumSAKKAAd2xE,22856 +pip/_vendor/packaging/utils.py,sha256=N4c6oZzFJy6klTZ3AnkNz7sSkJesuFWPp68LA3B5dAo,5040 +pip/_vendor/packaging/version.py,sha256=RVRKq8_GD5Bcak6E1kGG8K7siNZYW9n_XK8M2ZLl0H8,23284 +pip/_vendor/pkg_resources/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 +pip/_vendor/pkg_resources/__init__.py,sha256=vbTJ0_ruUgGxQjlEqsruFmiNPVyh2t9q-zyTDT053xI,124451 +pip/_vendor/pkg_resources/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/platformdirs/LICENSE,sha256=KeD9YukphQ6G6yjD_czwzv30-pSHkBHP-z0NS-1tTbY,1089 +pip/_vendor/platformdirs/__init__.py,sha256=UfeSHWl8AeTtbOBOoHAxK4dODOWkZtfy-m_i7cWdJ8c,22344 +pip/_vendor/platformdirs/__main__.py,sha256=jBJ8zb7Mpx5ebcqF83xrpO94MaeCpNGHVf9cvDN2JLg,1505 +pip/_vendor/platformdirs/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/platformdirs/__pycache__/__main__.cpython-314.pyc,, +pip/_vendor/platformdirs/__pycache__/android.cpython-314.pyc,, +pip/_vendor/platformdirs/__pycache__/api.cpython-314.pyc,, +pip/_vendor/platformdirs/__pycache__/macos.cpython-314.pyc,, +pip/_vendor/platformdirs/__pycache__/unix.cpython-314.pyc,, +pip/_vendor/platformdirs/__pycache__/version.cpython-314.pyc,, +pip/_vendor/platformdirs/__pycache__/windows.cpython-314.pyc,, +pip/_vendor/platformdirs/android.py,sha256=r0DshVBf-RO1jXJGX8C4Til7F1XWt-bkdWMgmvEiaYg,9013 +pip/_vendor/platformdirs/api.py,sha256=wPHOlwOsfz2oqQZ6A2FcCu5kEAj-JondzoNOHYFQ0h8,9281 +pip/_vendor/platformdirs/macos.py,sha256=0XoOgin1NK7Qki7iskD-oS8xKxw6bXgoKEgdqpCRAFQ,6322 +pip/_vendor/platformdirs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/platformdirs/unix.py,sha256=WZmkUA--L3JNRGmz32s35YfoD3ica6xKIPdCV_HhLcs,10458 +pip/_vendor/platformdirs/version.py,sha256=BI_dKLSMwlkl57vlxZnT8oVjPiUC2W_sdx_8_h99HeQ,704 +pip/_vendor/platformdirs/windows.py,sha256=XvCfklGUMVxJbXit51jpYMN-lNeScPB82qS1CAeplL0,10362 +pip/_vendor/pygments/LICENSE,sha256=qdZvHVJt8C4p3Oc0NtNOVuhjL0bCdbvf_HBWnogvnxc,1331 +pip/_vendor/pygments/__init__.py,sha256=8uNqJCCwXqbEx5aSsBr0FykUQOBDKBihO5mPqiw1aqo,2983 +pip/_vendor/pygments/__main__.py,sha256=WrndpSe6i1ckX_SQ1KaxD9CTKGzD0EuCOFxcbwFpoLU,353 +pip/_vendor/pygments/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/__main__.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/console.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/filter.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/formatter.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/lexer.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/modeline.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/plugin.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/regexopt.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/scanner.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/sphinxext.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/style.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/token.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/unistring.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/util.cpython-314.pyc,, +pip/_vendor/pygments/console.py,sha256=AagDWqwea2yBWf10KC9ptBgMpMjxKp8yABAmh-NQOVk,1718 +pip/_vendor/pygments/filter.py,sha256=YLtpTnZiu07nY3oK9nfR6E9Y1FBHhP5PX8gvkJWcfag,1910 +pip/_vendor/pygments/filters/__init__.py,sha256=4U4jtA0X3iP83uQnB9-TI-HDSw8E8y8zMYHa0UjbbaI,40392 +pip/_vendor/pygments/filters/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/pygments/formatter.py,sha256=KZQMmyo_xkOIkQG8g66LYEkBh1bx7a0HyGCBcvhI9Ew,4390 +pip/_vendor/pygments/formatters/__init__.py,sha256=KTwBmnXlaopJhQDOemVHYHskiDghuq-08YtP6xPNJPg,5385 +pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-314.pyc,, +pip/_vendor/pygments/formatters/_mapping.py,sha256=1Cw37FuQlNacnxRKmtlPX4nyLoX9_ttko5ZwscNUZZ4,4176 +pip/_vendor/pygments/lexer.py,sha256=_kBrOJ_NT5Tl0IVM0rA9c8eysP6_yrlGzEQI0eVYB-A,35349 +pip/_vendor/pygments/lexers/__init__.py,sha256=wbIME35GH7bI1B9rNPJFqWT-ij_RApZDYPUlZycaLzA,12115 +pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-314.pyc,, +pip/_vendor/pygments/lexers/__pycache__/python.cpython-314.pyc,, +pip/_vendor/pygments/lexers/_mapping.py,sha256=l4tCXM8e9aPC2BD6sjIr0deT-J-z5tHgCwL-p1fS0PE,77602 +pip/_vendor/pygments/lexers/python.py,sha256=vxjn1cOHclIKJKxoyiBsQTY65GHbkZtZRuKQ2AVCKaw,53853 +pip/_vendor/pygments/modeline.py,sha256=K5eSkR8GS1r5OkXXTHOcV0aM_6xpk9eWNEIAW-OOJ2g,1005 +pip/_vendor/pygments/plugin.py,sha256=tPx0rJCTIZ9ioRgLNYG4pifCbAwTRUZddvLw-NfAk2w,1891 +pip/_vendor/pygments/regexopt.py,sha256=wXaP9Gjp_hKAdnICqoDkRxAOQJSc4v3X6mcxx3z-TNs,3072 +pip/_vendor/pygments/scanner.py,sha256=nNcETRR1tRuiTaHmHSTTECVYFPcLf6mDZu1e4u91A9E,3092 +pip/_vendor/pygments/sphinxext.py,sha256=5x7Zh9YlU6ISJ31dMwduiaanb5dWZnKg3MyEQsseNnQ,7981 +pip/_vendor/pygments/style.py,sha256=PlOZqlsnTVd58RGy50vkA2cXQ_lP5bF5EGMEBTno6DA,6420 +pip/_vendor/pygments/styles/__init__.py,sha256=x9ebctfyvCAFpMTlMJ5YxwcNYBzjgq6zJaKkNm78r4M,2042 +pip/_vendor/pygments/styles/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-314.pyc,, +pip/_vendor/pygments/styles/_mapping.py,sha256=6lovFUE29tz6EsV3XYY4hgozJ7q1JL7cfO3UOlgnS8w,3312 +pip/_vendor/pygments/token.py,sha256=WbdWGhYm_Vosb0DDxW9lHNPgITXfWTsQmHt6cy9RbcM,6226 +pip/_vendor/pygments/unistring.py,sha256=al-_rBemRuGvinsrM6atNsHTmJ6DUbw24q2O2Ru1cBc,63208 +pip/_vendor/pygments/util.py,sha256=oRtSpiAo5jM9ulntkvVbgXUdiAW57jnuYGB7t9fYuhc,10031 +pip/_vendor/pyproject_hooks/LICENSE,sha256=GyKwSbUmfW38I6Z79KhNjsBLn9-xpR02DkK0NCyLQVQ,1081 +pip/_vendor/pyproject_hooks/__init__.py,sha256=cPB_a9LXz5xvsRbX1o2qyAdjLatZJdQ_Lc5McNX-X7Y,691 +pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-314.pyc,, +pip/_vendor/pyproject_hooks/_impl.py,sha256=jY-raxnmyRyB57ruAitrJRUzEexuAhGTpgMygqx67Z4,14936 +pip/_vendor/pyproject_hooks/_in_process/__init__.py,sha256=MJNPpfIxcO-FghxpBbxkG1rFiQf6HOUbV4U5mq0HFns,557 +pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-314.pyc,, +pip/_vendor/pyproject_hooks/_in_process/_in_process.py,sha256=qcXMhmx__MIJq10gGHW3mA4Tl8dy8YzHMccwnNoKlw0,12216 +pip/_vendor/pyproject_hooks/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/requests/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142 +pip/_vendor/requests/__init__.py,sha256=HlB_HzhrzGtfD_aaYUwUh1zWXLZ75_YCLyit75d0Vz8,5057 +pip/_vendor/requests/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/__version__.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/_internal_utils.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/adapters.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/api.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/auth.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/certs.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/compat.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/cookies.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/exceptions.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/help.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/hooks.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/models.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/packages.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/sessions.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/status_codes.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/structures.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/utils.cpython-314.pyc,, +pip/_vendor/requests/__version__.py,sha256=QKDceK8K_ujqwDDc3oYrR0odOBYgKVOQQ5vFap_G_cg,435 +pip/_vendor/requests/_internal_utils.py,sha256=nMQymr4hs32TqVo5AbCrmcJEhvPUh7xXlluyqwslLiQ,1495 +pip/_vendor/requests/adapters.py,sha256=2MLFOK9GpYNhiTd6zLDUrAgSkIB-76i6pmSuUJjHC2w,26429 +pip/_vendor/requests/api.py,sha256=_Zb9Oa7tzVIizTKwFrPjDEY9ejtm_OnSRERnADxGsQs,6449 +pip/_vendor/requests/auth.py,sha256=kF75tqnLctZ9Mf_hm9TZIj4cQWnN5uxRz8oWsx5wmR0,10186 +pip/_vendor/requests/certs.py,sha256=kHDlkK_beuHXeMPc5jta2wgl8gdKeUWt5f2nTDVrvt8,441 +pip/_vendor/requests/compat.py,sha256=QfbmdTFiZzjSHMXiMrd4joCRU6RabtQ9zIcPoVaHIus,1822 +pip/_vendor/requests/cookies.py,sha256=bNi-iqEj4NPZ00-ob-rHvzkvObzN3lEpgw3g6paS3Xw,18590 +pip/_vendor/requests/exceptions.py,sha256=D1wqzYWne1mS2rU43tP9CeN1G7QAy7eqL9o1god6Ejw,4272 +pip/_vendor/requests/help.py,sha256=hRKaf9u0G7fdwrqMHtF3oG16RKktRf6KiwtSq2Fo1_0,3813 +pip/_vendor/requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733 +pip/_vendor/requests/models.py,sha256=taljlg6vJ4b-xMu2TaMNFFkaiwMex_VsEQ6qUTN3wzY,35575 +pip/_vendor/requests/packages.py,sha256=_ZQDCJTJ8SP3kVWunSqBsRZNPzj2c1WFVqbdr08pz3U,1057 +pip/_vendor/requests/sessions.py,sha256=Cl1dpEnOfwrzzPbku-emepNeN4Rt_0_58Iy2x-JGTm8,30503 +pip/_vendor/requests/status_codes.py,sha256=iJUAeA25baTdw-6PfD0eF4qhpINDJRJI-yaMqxs4LEI,4322 +pip/_vendor/requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912 +pip/_vendor/requests/utils.py,sha256=WS3wHSQaaEfceu1syiFo5jf4e_CWKUTep_IabOVI_J0,33225 +pip/_vendor/resolvelib/LICENSE,sha256=84j9OMrRMRLB3A9mm76A5_hFQe26-3LzAw0sp2QsPJ0,751 +pip/_vendor/resolvelib/__init__.py,sha256=yoX-d4STvwGGCiQRE5cJC9Cter69SgVgqClxOCvSP7M,541 +pip/_vendor/resolvelib/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/resolvelib/__pycache__/providers.cpython-314.pyc,, +pip/_vendor/resolvelib/__pycache__/reporters.cpython-314.pyc,, +pip/_vendor/resolvelib/__pycache__/structs.cpython-314.pyc,, +pip/_vendor/resolvelib/providers.py,sha256=pIWJbIdJJ9GFtNbtwTH0Ia43Vj6hYCEJj2DOLue15FM,8914 +pip/_vendor/resolvelib/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/resolvelib/reporters.py,sha256=pNJf4nFxLpAeKxlBUi2GEj0a2Ij1nikY0UabTKXesT4,2037 +pip/_vendor/resolvelib/resolvers/__init__.py,sha256=728M3EvmnPbVXS7ExXlv2kMu6b7wEsoPutEfl-uVk_I,640 +pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-314.pyc,, +pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-314.pyc,, +pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-314.pyc,, +pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-314.pyc,, +pip/_vendor/resolvelib/resolvers/abstract.py,sha256=CNeQPnpAudY77nmzOkONSmAgRlzIf06X-X9mvRYODms,1543 +pip/_vendor/resolvelib/resolvers/criterion.py,sha256=lcmZGv5sKHOnFD_RzZwvlGSj19MeA-5rCMpdf2Sgw7Y,1768 +pip/_vendor/resolvelib/resolvers/exceptions.py,sha256=ln_jaQtgLlRUSFY627yiHG2gD7AgaXzRKaElFVh7fDQ,1768 +pip/_vendor/resolvelib/resolvers/resolution.py,sha256=3J_zkW-sD3EY-BlNXjyln__njpyH5n0UZJT6uV7CheA,24212 +pip/_vendor/resolvelib/structs.py,sha256=pu-EJiR2IBITr2SQeNPRa0rXhjlStfmO_GEgAhr3004,6420 +pip/_vendor/rich/LICENSE,sha256=3u18F6QxgVgZCj6iOcyHmlpQJxzruYrnAl9I--WNyhU,1056 +pip/_vendor/rich/__init__.py,sha256=dRxjIL-SbFVY0q3IjSMrfgBTHrm1LZDgLOygVBwiYZc,6090 +pip/_vendor/rich/__main__.py,sha256=e_aVC-tDzarWQW9SuZMuCgBr6ODV_iDNV2Wh2xkxOlw,7896 +pip/_vendor/rich/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/__main__.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_cell_widths.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_emoji_codes.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_emoji_replace.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_export_format.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_extension.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_fileno.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_inspect.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_log_render.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_loop.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_null_file.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_palettes.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_pick.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_ratio.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_spinners.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_stack.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_timer.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_win32_console.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_windows.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_windows_renderer.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_wrap.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/abc.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/align.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/ansi.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/bar.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/box.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/cells.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/color.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/color_triplet.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/columns.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/console.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/constrain.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/containers.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/control.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/default_styles.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/diagnose.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/emoji.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/errors.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/file_proxy.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/filesize.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/highlighter.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/json.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/jupyter.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/layout.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/live.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/live_render.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/logging.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/markup.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/measure.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/padding.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/pager.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/palette.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/panel.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/pretty.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/progress.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/progress_bar.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/prompt.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/protocol.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/region.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/repr.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/rule.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/scope.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/screen.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/segment.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/spinner.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/status.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/style.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/styled.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/syntax.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/table.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/terminal_theme.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/text.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/theme.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/themes.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/traceback.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/tree.cpython-314.pyc,, +pip/_vendor/rich/_cell_widths.py,sha256=fbmeyetEdHjzE_Vx2l1uK7tnPOhMs2X1lJfO3vsKDpA,10209 +pip/_vendor/rich/_emoji_codes.py,sha256=hu1VL9nbVdppJrVoijVshRlcRRe_v3dju3Mmd2sKZdY,140235 +pip/_vendor/rich/_emoji_replace.py,sha256=n-kcetsEUx2ZUmhQrfeMNc-teeGhpuSQ5F8VPBsyvDo,1064 +pip/_vendor/rich/_export_format.py,sha256=RI08pSrm5tBSzPMvnbTqbD9WIalaOoN5d4M1RTmLq1Y,2128 +pip/_vendor/rich/_extension.py,sha256=Xt47QacCKwYruzjDi-gOBq724JReDj9Cm9xUi5fr-34,265 +pip/_vendor/rich/_fileno.py,sha256=HWZxP5C2ajMbHryvAQZseflVfQoGzsKOHzKGsLD8ynQ,799 +pip/_vendor/rich/_inspect.py,sha256=ROT0PLC2GMWialWZkqJIjmYq7INRijQQkoSokWTaAiI,9656 +pip/_vendor/rich/_log_render.py,sha256=1ByI0PA1ZpxZY3CGJOK54hjlq4X-Bz_boIjIqCd8Kns,3225 +pip/_vendor/rich/_loop.py,sha256=hV_6CLdoPm0va22Wpw4zKqM0RYsz3TZxXj0PoS-9eDQ,1236 +pip/_vendor/rich/_null_file.py,sha256=ADGKp1yt-k70FMKV6tnqCqecB-rSJzp-WQsD7LPL-kg,1394 +pip/_vendor/rich/_palettes.py,sha256=cdev1JQKZ0JvlguV9ipHgznTdnvlIzUFDBb0It2PzjI,7063 +pip/_vendor/rich/_pick.py,sha256=evDt8QN4lF5CiwrUIXlOJCntitBCOsI3ZLPEIAVRLJU,423 +pip/_vendor/rich/_ratio.py,sha256=IOtl78sQCYZsmHyxhe45krkb68u9xVz7zFsXVJD-b2Y,5325 +pip/_vendor/rich/_spinners.py,sha256=U2r1_g_1zSjsjiUdAESc2iAMc3i4ri_S8PYP6kQ5z1I,19919 +pip/_vendor/rich/_stack.py,sha256=-C8OK7rxn3sIUdVwxZBBpeHhIzX0eI-VM3MemYfaXm0,351 +pip/_vendor/rich/_timer.py,sha256=zelxbT6oPFZnNrwWPpc1ktUeAT-Vc4fuFcRZLQGLtMI,417 +pip/_vendor/rich/_win32_console.py,sha256=BSaDRIMwBLITn_m0mTRLPqME5q-quGdSMuYMpYeYJwc,22755 +pip/_vendor/rich/_windows.py,sha256=aBwaD_S56SbgopIvayVmpk0Y28uwY2C5Bab1wl3Bp-I,1925 +pip/_vendor/rich/_windows_renderer.py,sha256=t74ZL3xuDCP3nmTp9pH1L5LiI2cakJuQRQleHCJerlk,2783 +pip/_vendor/rich/_wrap.py,sha256=FlSsom5EX0LVkA3KWy34yHnCfLtqX-ZIepXKh-70rpc,3404 +pip/_vendor/rich/abc.py,sha256=ON-E-ZqSSheZ88VrKX2M3PXpFbGEUUZPMa_Af0l-4f0,890 +pip/_vendor/rich/align.py,sha256=dg-7uY0ukMLLlUEsBDRLva22_sQgIJD4BK0dmZHFHug,10324 +pip/_vendor/rich/ansi.py,sha256=Avs1LHbSdcyOvDOdpELZUoULcBiYewY76eNBp6uFBhs,6921 +pip/_vendor/rich/bar.py,sha256=ldbVHOzKJOnflVNuv1xS7g6dLX2E3wMnXkdPbpzJTcs,3263 +pip/_vendor/rich/box.py,sha256=kmavBc_dn73L_g_8vxWSwYJD2uzBXOUFTtJOfpbczcM,10686 +pip/_vendor/rich/cells.py,sha256=KrQkj5-LghCCpJLSNQIyAZjndc4bnEqOEmi5YuZ9UCY,5130 +pip/_vendor/rich/color.py,sha256=3HSULVDj7qQkXUdFWv78JOiSZzfy5y1nkcYhna296V0,18211 +pip/_vendor/rich/color_triplet.py,sha256=3lhQkdJbvWPoLDO-AnYImAWmJvV5dlgYNCVZ97ORaN4,1054 +pip/_vendor/rich/columns.py,sha256=HUX0KcMm9dsKNi11fTbiM_h2iDtl8ySCaVcxlalEzq8,7131 +pip/_vendor/rich/console.py,sha256=t9azZpmRMVU5cphVBZSShNsmBxd2-IAWcTTlhor-E1s,100849 +pip/_vendor/rich/constrain.py,sha256=1VIPuC8AgtKWrcncQrjBdYqA3JVWysu6jZo1rrh7c7Q,1288 +pip/_vendor/rich/containers.py,sha256=c_56TxcedGYqDepHBMTuZdUIijitAQgnox-Qde0Z1qo,5502 +pip/_vendor/rich/control.py,sha256=EUTSUFLQbxY6Zmo_sdM-5Ls323vIHTBfN8TPulqeHUY,6487 +pip/_vendor/rich/default_styles.py,sha256=khQFqqaoDs3bprMqWpHw8nO5UpG2DN6QtuTd6LzZwYc,8257 +pip/_vendor/rich/diagnose.py,sha256=fJl1TItRn19gGwouqTg-8zPUW3YqQBqGltrfPQs1H9w,1025 +pip/_vendor/rich/emoji.py,sha256=Wd4bQubZdSy6-PyrRQNuMHtn2VkljK9uPZPVlu2cmx0,2367 +pip/_vendor/rich/errors.py,sha256=5pP3Kc5d4QJ_c0KFsxrfyhjiPVe7J1zOqSFbFAzcV-Y,642 +pip/_vendor/rich/file_proxy.py,sha256=Tl9THMDZ-Pk5Wm8sI1gGg_U5DhusmxD-FZ0fUbcU0W0,1683 +pip/_vendor/rich/filesize.py,sha256=_iz9lIpRgvW7MNSeCZnLg-HwzbP4GETg543WqD8SFs0,2484 +pip/_vendor/rich/highlighter.py,sha256=G_sn-8DKjM1sEjLG_oc4ovkWmiUpWvj8bXi0yed2LnY,9586 +pip/_vendor/rich/json.py,sha256=vVEoKdawoJRjAFayPwXkMBPLy7RSTs-f44wSQDR2nJ0,5031 +pip/_vendor/rich/jupyter.py,sha256=QyoKoE_8IdCbrtiSHp9TsTSNyTHY0FO5whE7jOTd9UE,3252 +pip/_vendor/rich/layout.py,sha256=ajkSFAtEVv9EFTcFs-w4uZfft7nEXhNzL7ZVdgrT5rI,14004 +pip/_vendor/rich/live.py,sha256=tF3ukAAJZ_N2ZbGclqZ-iwLoIoZ8f0HHUz79jAyJqj8,15180 +pip/_vendor/rich/live_render.py,sha256=It_39YdzrBm8o3LL0kaGorPFg-BfZWAcrBjLjFokbx4,3521 +pip/_vendor/rich/logging.py,sha256=5KaPPSMP9FxcXPBcKM4cGd_zW78PMgf-YbMVnvfSw0o,12468 +pip/_vendor/rich/markup.py,sha256=3euGKP5s41NCQwaSjTnJxus5iZMHjxpIM0W6fCxra38,8451 +pip/_vendor/rich/measure.py,sha256=HmrIJX8sWRTHbgh8MxEay_83VkqNW_70s8aKP5ZcYI8,5305 +pip/_vendor/rich/padding.py,sha256=KVEI3tOwo9sgK1YNSuH__M1_jUWmLZwRVV_KmOtVzyM,4908 +pip/_vendor/rich/pager.py,sha256=SO_ETBFKbg3n_AgOzXm41Sv36YxXAyI3_R-KOY2_uSc,828 +pip/_vendor/rich/palette.py,sha256=lInvR1ODDT2f3UZMfL1grq7dY_pDdKHw4bdUgOGaM4Y,3396 +pip/_vendor/rich/panel.py,sha256=9sQl00hPIqH5G2gALQo4NepFwpP0k9wT-s_gOms5pIc,11157 +pip/_vendor/rich/pretty.py,sha256=gy3S72u4FRg2ytoo7N1ZDWDIvB4unbzd5iUGdgm-8fc,36391 +pip/_vendor/rich/progress.py,sha256=CUc2lkU-X59mVdGfjMCBkZeiGPL3uxdONjhNJF2T7wY,60408 +pip/_vendor/rich/progress_bar.py,sha256=mZTPpJUwcfcdgQCTTz3kyY-fc79ddLwtx6Ghhxfo064,8162 +pip/_vendor/rich/prompt.py,sha256=l0RhQU-0UVTV9e08xW1BbIj0Jq2IXyChX4lC0lFNzt4,12447 +pip/_vendor/rich/protocol.py,sha256=5hHHDDNHckdk8iWH5zEbi-zuIVSF5hbU2jIo47R7lTE,1391 +pip/_vendor/rich/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/rich/region.py,sha256=rNT9xZrVZTYIXZC0NYn41CJQwYNbR-KecPOxTgQvB8Y,166 +pip/_vendor/rich/repr.py,sha256=5MZJZmONgC6kud-QW-_m1okXwL2aR6u6y-pUcUCJz28,4431 +pip/_vendor/rich/rule.py,sha256=0fNaS_aERa3UMRc3T5WMpN_sumtDxfaor2y3of1ftBk,4602 +pip/_vendor/rich/scope.py,sha256=TMUU8qo17thyqQCPqjDLYpg_UU1k5qVd-WwiJvnJVas,2843 +pip/_vendor/rich/screen.py,sha256=YoeReESUhx74grqb0mSSb9lghhysWmFHYhsbMVQjXO8,1591 +pip/_vendor/rich/segment.py,sha256=otnKeKGEV-WRlQVosfJVeFDcDxAKHpvJ_hLzSu5lumM,24743 +pip/_vendor/rich/spinner.py,sha256=onIhpKlljRHppTZasxO8kXgtYyCHUkpSgKglRJ3o51g,4214 +pip/_vendor/rich/status.py,sha256=kkPph3YeAZBo-X-4wPp8gTqZyU466NLwZBA4PZTTewo,4424 +pip/_vendor/rich/style.py,sha256=W9Ccy8Py8lNICtlfcp-ryzMTuQaGxAU3av7-g5fHu0s,26990 +pip/_vendor/rich/styled.py,sha256=eZNnzGrI4ki_54pgY3Oj0T-x3lxdXTYh4_ryDB24wBU,1258 +pip/_vendor/rich/syntax.py,sha256=eDKIRwl--eZ0Lwo2da2RRtfutXGavrJO61Cl5OkS59U,36371 +pip/_vendor/rich/table.py,sha256=ZmT7V7MMCOYKw7TGY9SZLyYDf6JdM-WVf07FdVuVhTI,40049 +pip/_vendor/rich/terminal_theme.py,sha256=1j5-ufJfnvlAo5Qsi_ACZiXDmwMXzqgmFByObT9-yJY,3370 +pip/_vendor/rich/text.py,sha256=AO7JPCz6-gaN1thVLXMBntEmDPVYFgFNG1oM61_sanU,47552 +pip/_vendor/rich/theme.py,sha256=oNyhXhGagtDlbDye3tVu3esWOWk0vNkuxFw-_unlaK0,3771 +pip/_vendor/rich/themes.py,sha256=0xgTLozfabebYtcJtDdC5QkX5IVUEaviqDUJJh4YVFk,102 +pip/_vendor/rich/traceback.py,sha256=c0WmB_L04_UfZbLaoH982_U_s7eosxKMUiAVmDPdRYU,35861 +pip/_vendor/rich/tree.py,sha256=yWnQ6rAvRGJ3qZGqBrxS2SW2TKBTNrP0SdY8QxOFPuw,9451 +pip/_vendor/tomli/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 +pip/_vendor/tomli/__init__.py,sha256=qzEGl8QHhqgQPCuLzfKyPIuH3KKPspf-UVPbZ0ppBD4,314 +pip/_vendor/tomli/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/tomli/__pycache__/_parser.cpython-314.pyc,, +pip/_vendor/tomli/__pycache__/_re.cpython-314.pyc,, +pip/_vendor/tomli/__pycache__/_types.cpython-314.pyc,, +pip/_vendor/tomli/_parser.py,sha256=bO8tUYmnyA2K6m4TnbQbfUqmIFcDv7mG1KuC9gqRVmA,25778 +pip/_vendor/tomli/_re.py,sha256=n8-Io8ZK1U-F6jzlg7Pabc40hLFJsawE2uNLKH9w7iU,3235 +pip/_vendor/tomli/_types.py,sha256=-GTG2VUqkpxwMqzmVO4F7ybKddIbAnuAHXfmWQcTi3Q,254 +pip/_vendor/tomli/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 +pip/_vendor/tomli_w/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 +pip/_vendor/tomli_w/__init__.py,sha256=0F8yDtXx3Uunhm874KrAcP76srsM98y7WyHQwCulZbo,169 +pip/_vendor/tomli_w/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/tomli_w/__pycache__/_writer.cpython-314.pyc,, +pip/_vendor/tomli_w/_writer.py,sha256=dsifFS2xYf1i76mmRyfz9y125xC7Z_HQ845ZKhJsYXs,6961 +pip/_vendor/tomli_w/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 +pip/_vendor/truststore/LICENSE,sha256=M757fo-k_Rmxdg4ajtimaL2rhSyRtpLdQUJLy3Jan8o,1086 +pip/_vendor/truststore/__init__.py,sha256=Bu7kqkmpunhLsj5xCu8gT_25ktoPXcSnwe8VHk1GmJo,1320 +pip/_vendor/truststore/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/truststore/__pycache__/_api.cpython-314.pyc,, +pip/_vendor/truststore/__pycache__/_macos.cpython-314.pyc,, +pip/_vendor/truststore/__pycache__/_openssl.cpython-314.pyc,, +pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-314.pyc,, +pip/_vendor/truststore/__pycache__/_windows.cpython-314.pyc,, +pip/_vendor/truststore/_api.py,sha256=CYJCV5BTfttZYfqY3movdMBE-8az7uhET_LYbKT2Nn4,11413 +pip/_vendor/truststore/_macos.py,sha256=nZlLkOmszUE0g6ryRwBVGY5COzPyudcsiJtDWarM5LQ,20503 +pip/_vendor/truststore/_openssl.py,sha256=zB-SQvJydks7tQ0yIwrP6GD3fQNSSaPiq7zw4yF5T40,2412 +pip/_vendor/truststore/_ssl_constants.py,sha256=NUD4fVKdSD02ri7-db0tnO0VqLP9aHuzmStcW7tAl08,1130 +pip/_vendor/truststore/_windows.py,sha256=rAHyKYD8M7t-bXfG8VgOVa3TpfhVhbt4rZQlO45YuP8,17993 +pip/_vendor/truststore/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/LICENSE.txt,sha256=w3vxhuJ8-dvpYZ5V7f486nswCRzrPaY8fay-Dm13kHs,1115 +pip/_vendor/urllib3/__init__.py,sha256=iXLcYiJySn0GNbWOOZDDApgBL1JgP44EZ8i1760S8Mc,3333 +pip/_vendor/urllib3/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/_collections.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/_version.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/connection.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/connectionpool.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/exceptions.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/fields.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/filepost.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/poolmanager.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/request.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/response.cpython-314.pyc,, +pip/_vendor/urllib3/_collections.py,sha256=pyASJJhW7wdOpqJj9QJA8FyGRfr8E8uUUhqUvhF0728,11372 +pip/_vendor/urllib3/_version.py,sha256=t9wGB6ooOTXXgiY66K1m6BZS1CJyXHAU8EoWDTe6Shk,64 +pip/_vendor/urllib3/connection.py,sha256=ttIA909BrbTUzwkqEe_TzZVh4JOOj7g61Ysei2mrwGg,20314 +pip/_vendor/urllib3/connectionpool.py,sha256=e2eiAwNbFNCKxj4bwDKNK-w7HIdSz3OmMxU_TIt-evQ,40408 +pip/_vendor/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957 +pip/_vendor/urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/bindings.py,sha256=4Xk64qIkPBt09A5q-RIFUuDhNc9mXilVapm7WnYnzRw,17632 +pip/_vendor/urllib3/contrib/_securetransport/low_level.py,sha256=B2JBB2_NRP02xK6DCa1Pa9IuxrPwxzDzZbixQkb7U9M,13922 +pip/_vendor/urllib3/contrib/appengine.py,sha256=VR68eAVE137lxTgjBDwCna5UiBZTOKa01Aj_-5BaCz4,11036 +pip/_vendor/urllib3/contrib/ntlmpool.py,sha256=NlfkW7WMdW8ziqudopjHoW299og1BTWi0IeIibquFwk,4528 +pip/_vendor/urllib3/contrib/pyopenssl.py,sha256=hDJh4MhyY_p-oKlFcYcQaVQRDv6GMmBGuW9yjxyeejM,17081 +pip/_vendor/urllib3/contrib/securetransport.py,sha256=Fef1IIUUFHqpevzXiDPbIGkDKchY2FVKeVeLGR1Qq3g,34446 +pip/_vendor/urllib3/contrib/socks.py,sha256=aRi9eWXo9ZEb95XUxef4Z21CFlnnjbEiAo9HOseoMt4,7097 +pip/_vendor/urllib3/exceptions.py,sha256=0Mnno3KHTNfXRfY7638NufOPkUb6mXOm-Lqj-4x2w8A,8217 +pip/_vendor/urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579 +pip/_vendor/urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440 +pip/_vendor/urllib3/packages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/urllib3/packages/__pycache__/six.cpython-314.pyc,, +pip/_vendor/urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-314.pyc,, +pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-314.pyc,, +pip/_vendor/urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417 +pip/_vendor/urllib3/packages/backports/weakref_finalize.py,sha256=tRCal5OAhNSRyb0DhHp-38AtIlCsRP8BxF3NX-6rqIA,5343 +pip/_vendor/urllib3/packages/six.py,sha256=b9LM0wBXv7E7SrbCjAm4wwN-hrH-iNxv18LgWNMMKPo,34665 +pip/_vendor/urllib3/poolmanager.py,sha256=aWyhXRtNO4JUnCSVVqKTKQd8EXTvUm1VN9pgs2bcONo,19990 +pip/_vendor/urllib3/request.py,sha256=YTWFNr7QIwh7E1W9dde9LM77v2VWTJ5V78XuTTw7D1A,6691 +pip/_vendor/urllib3/response.py,sha256=fmDJAFkG71uFTn-sVSTh2Iw0WmcXQYqkbRjihvwBjU8,30641 +pip/_vendor/urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155 +pip/_vendor/urllib3/util/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/connection.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/proxy.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/queue.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/request.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/response.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/retry.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/timeout.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/url.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/wait.cpython-314.pyc,, +pip/_vendor/urllib3/util/connection.py,sha256=5Lx2B1PW29KxBn2T0xkN1CBgRBa3gGVJBKoQoRogEVk,4901 +pip/_vendor/urllib3/util/proxy.py,sha256=zUvPPCJrp6dOF0N4GAVbOcl6o-4uXKSrGiTkkr5vUS4,1605 +pip/_vendor/urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498 +pip/_vendor/urllib3/util/request.py,sha256=C0OUt2tcU6LRiQJ7YYNP9GvPrSvl7ziIBekQ-5nlBZk,3997 +pip/_vendor/urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510 +pip/_vendor/urllib3/util/retry.py,sha256=6ENvOZ8PBDzh8kgixpql9lIrb2dxH-k7ZmBanJF2Ng4,22050 +pip/_vendor/urllib3/util/ssl_.py,sha256=QDuuTxPSCj1rYtZ4xpD7Ux-r20TD50aHyqKyhQ7Bq4A,17460 +pip/_vendor/urllib3/util/ssl_match_hostname.py,sha256=Ir4cZVEjmAk8gUAIHWSi7wtOO83UCYABY2xFD1Ql_WA,5758 +pip/_vendor/urllib3/util/ssltransport.py,sha256=NA-u5rMTrDFDFC8QzRKUEKMG0561hOD4qBTr3Z4pv6E,6895 +pip/_vendor/urllib3/util/timeout.py,sha256=cwq4dMk87mJHSBktK1miYJ-85G-3T3RmT20v7SFCpno,10168 +pip/_vendor/urllib3/util/url.py,sha256=lCAE7M5myA8EDdW0sJuyyZhVB9K_j38ljWhHAnFaWoE,14296 +pip/_vendor/urllib3/util/wait.py,sha256=fOX0_faozG2P7iVojQoE1mbydweNyTcm-hXEfFrTtLI,5403 +pip/_vendor/vendor.txt,sha256=f2msFLZ-chXWIZSKW31NLGyMWmt_-Vfy7sY5dHYgmnw,342 +pip/py.typed,sha256=EBVvvPRTn_eIpz5e5QztSCdrMX7Qwd7VP93RSoIlZ2I,286 diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/REQUESTED b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/REQUESTED similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/REQUESTED rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/REQUESTED diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/WHEEL b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/WHEEL similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/WHEEL rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/WHEEL diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/entry_points.txt b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/entry_points.txt similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/entry_points.txt rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/entry_points.txt diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/AUTHORS.txt b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/AUTHORS.txt similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/AUTHORS.txt rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/AUTHORS.txt diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/LICENSE.txt b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/LICENSE.txt similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/LICENSE.txt rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/LICENSE.txt diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/certifi/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/certifi/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/certifi/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/certifi/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distro/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distro/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distro/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distro/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/msgpack/COPYING b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/msgpack/COPYING similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/msgpack/COPYING rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/msgpack/COPYING diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pygments/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pygments/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pygments/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pygments/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/requests/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/requests/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/requests/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/requests/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/rich/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/rich/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/rich/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/rich/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/truststore/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/truststore/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/truststore/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/truststore/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt b/Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt rename to Python314_4_x64_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/__init__.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/__main__.py b/Python314_4_x64_Template/Lib/site-packages/pip/__main__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/__main__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/__main__.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/__pip-runner__.py b/Python314_4_x64_Template/Lib/site-packages/pip/__pip-runner__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/__pip-runner__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/__pip-runner__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..d40b4983 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/__pycache__/__main__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/__pycache__/__main__.cpython-314.pyc new file mode 100644 index 00000000..d9a1c52a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/__pycache__/__main__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/__pycache__/__pip-runner__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/__pycache__/__pip-runner__.cpython-314.pyc new file mode 100644 index 00000000..80b69bb2 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/__pycache__/__pip-runner__.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..df459bf2 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-314.pyc new file mode 100644 index 00000000..47964191 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-314.pyc new file mode 100644 index 00000000..e28b2fc2 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-314.pyc new file mode 100644 index 00000000..fe4a4edc Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-314.pyc new file mode 100644 index 00000000..951a25f2 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/main.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/main.cpython-314.pyc new file mode 100644 index 00000000..ec825c6a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/main.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-314.pyc new file mode 100644 index 00000000..fb131877 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-314.pyc new file mode 100644 index 00000000..156d6f2c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-314.pyc new file mode 100644 index 00000000..bde3418a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/build_env.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/build_env.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/build_env.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/build_env.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cache.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cache.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/cache.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/cache.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..42782d7d Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-314.pyc new file mode 100644 index 00000000..30e58745 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-314.pyc new file mode 100644 index 00000000..8057681b Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-314.pyc new file mode 100644 index 00000000..279dd42d Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-314.pyc new file mode 100644 index 00000000..08c8b907 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-314.pyc new file mode 100644 index 00000000..89891b0f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-314.pyc new file mode 100644 index 00000000..44856bd4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-314.pyc new file mode 100644 index 00000000..718cb144 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-314.pyc new file mode 100644 index 00000000..fcc5937e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-314.pyc new file mode 100644 index 00000000..2fe819cd Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-314.pyc new file mode 100644 index 00000000..c443ec52 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-314.pyc new file mode 100644 index 00000000..d659009b Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-314.pyc new file mode 100644 index 00000000..26e43190 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/autocompletion.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/autocompletion.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/autocompletion.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/autocompletion.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/base_command.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/base_command.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/base_command.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/base_command.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/cmdoptions.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/cmdoptions.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/cmdoptions.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/cmdoptions.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/command_context.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/command_context.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/command_context.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/command_context.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/index_command.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/index_command.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/index_command.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/index_command.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/main.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/main.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/main.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/main.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/main_parser.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/main_parser.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/main_parser.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/main_parser.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/parser.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/parser.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/parser.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/parser.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/progress_bars.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/progress_bars.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/progress_bars.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/progress_bars.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/req_command.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/req_command.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/req_command.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/req_command.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/spinners.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/spinners.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/spinners.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/spinners.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/status_codes.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/status_codes.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/cli/status_codes.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/cli/status_codes.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..07ad6e7c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-314.pyc new file mode 100644 index 00000000..148d01ad Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-314.pyc new file mode 100644 index 00000000..dc514730 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-314.pyc new file mode 100644 index 00000000..725790e0 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-314.pyc new file mode 100644 index 00000000..6e88c786 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-314.pyc new file mode 100644 index 00000000..6dded241 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-314.pyc new file mode 100644 index 00000000..6c16da71 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-314.pyc new file mode 100644 index 00000000..9af2305e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-314.pyc new file mode 100644 index 00000000..e8027004 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-314.pyc new file mode 100644 index 00000000..ba0edef7 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/index.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/index.cpython-314.pyc new file mode 100644 index 00000000..fcc1af19 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/index.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-314.pyc new file mode 100644 index 00000000..13f3a0a1 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-314.pyc new file mode 100644 index 00000000..91f8ffdf Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-314.pyc new file mode 100644 index 00000000..c7527240 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/lock.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/lock.cpython-314.pyc new file mode 100644 index 00000000..3d813f3f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/lock.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-314.pyc new file mode 100644 index 00000000..281f4315 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-314.pyc new file mode 100644 index 00000000..a62e00f4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-314.pyc new file mode 100644 index 00000000..91550791 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-314.pyc new file mode 100644 index 00000000..8b2c7e7a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/cache.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/cache.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/cache.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/cache.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/check.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/check.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/check.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/check.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/completion.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/completion.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/completion.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/completion.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/configuration.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/configuration.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/configuration.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/configuration.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/debug.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/debug.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/debug.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/debug.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/download.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/download.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/download.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/download.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/freeze.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/freeze.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/freeze.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/freeze.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/hash.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/hash.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/hash.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/hash.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/help.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/help.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/help.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/help.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/index.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/index.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/index.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/index.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/inspect.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/inspect.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/inspect.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/inspect.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/install.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/install.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/install.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/install.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/list.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/list.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/list.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/list.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/lock.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/lock.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/lock.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/lock.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/search.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/search.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/search.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/search.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/show.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/show.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/show.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/show.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/uninstall.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/uninstall.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/uninstall.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/uninstall.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/wheel.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/wheel.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/commands/wheel.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/commands/wheel.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/configuration.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/configuration.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/configuration.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/configuration.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..737c28e1 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-314.pyc new file mode 100644 index 00000000..f98c7731 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-314.pyc new file mode 100644 index 00000000..c2b0722a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-314.pyc new file mode 100644 index 00000000..76c7e1b6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-314.pyc new file mode 100644 index 00000000..d6d03389 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/base.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/base.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/base.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/base.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/installed.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/installed.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/installed.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/installed.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/sdist.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/sdist.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/sdist.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/sdist.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/wheel.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/wheel.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/distributions/wheel.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/distributions/wheel.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/exceptions.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/exceptions.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/exceptions.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/exceptions.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..bc2b82b5 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-314.pyc new file mode 100644 index 00000000..04eadbe4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-314.pyc new file mode 100644 index 00000000..08ce81c5 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-314.pyc new file mode 100644 index 00000000..7e8dc43a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/collector.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/collector.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/collector.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/collector.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/package_finder.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/package_finder.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/package_finder.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/package_finder.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/sources.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/sources.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/index/sources.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/index/sources.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..71b48c99 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-314.pyc new file mode 100644 index 00000000..c53d005c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-314.pyc new file mode 100644 index 00000000..812b01ef Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-314.pyc new file mode 100644 index 00000000..2f104876 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/_distutils.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/_distutils.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/_distutils.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/_distutils.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/_sysconfig.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/_sysconfig.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/_sysconfig.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/_sysconfig.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/base.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/base.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/locations/base.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/locations/base.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/main.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/main.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/main.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/main.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..25b15303 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-314.pyc new file mode 100644 index 00000000..bce7e003 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-314.pyc new file mode 100644 index 00000000..8eda2b01 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-314.pyc new file mode 100644 index 00000000..bf503ba0 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/_json.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/_json.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/_json.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/_json.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/base.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/base.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/base.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/base.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..51ddb5a7 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-314.pyc new file mode 100644 index 00000000..cecd2ff8 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-314.pyc new file mode 100644 index 00000000..7e0fe4e6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-314.pyc new file mode 100644 index 00000000..a62bcfe2 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/_compat.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/_compat.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/_compat.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/_compat.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/pkg_resources.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/pkg_resources.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/metadata/pkg_resources.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/metadata/pkg_resources.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..584ad3a2 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-314.pyc new file mode 100644 index 00000000..bd80eac0 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-314.pyc new file mode 100644 index 00000000..a66e2d79 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-314.pyc new file mode 100644 index 00000000..f6afb0be Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-314.pyc new file mode 100644 index 00000000..fd01604e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-314.pyc new file mode 100644 index 00000000..f093b177 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-314.pyc new file mode 100644 index 00000000..8682fa1e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/release_control.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/release_control.cpython-314.pyc new file mode 100644 index 00000000..53e9b9a8 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/release_control.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-314.pyc new file mode 100644 index 00000000..de993772 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-314.pyc new file mode 100644 index 00000000..258ce430 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-314.pyc new file mode 100644 index 00000000..4a46459d Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-314.pyc new file mode 100644 index 00000000..8b31e18d Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-314.pyc new file mode 100644 index 00000000..be550541 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/candidate.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/candidate.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/candidate.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/candidate.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/direct_url.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/direct_url.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/direct_url.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/direct_url.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/format_control.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/format_control.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/format_control.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/format_control.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/index.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/index.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/index.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/index.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/installation_report.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/installation_report.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/installation_report.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/installation_report.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/link.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/link.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/link.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/link.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/release_control.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/release_control.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/release_control.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/release_control.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/scheme.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/scheme.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/scheme.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/scheme.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/search_scope.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/search_scope.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/search_scope.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/search_scope.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/selection_prefs.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/selection_prefs.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/selection_prefs.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/selection_prefs.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/target_python.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/target_python.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/target_python.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/target_python.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/wheel.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/wheel.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/models/wheel.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/models/wheel.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..d4ca9492 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-314.pyc new file mode 100644 index 00000000..18eb8005 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-314.pyc new file mode 100644 index 00000000..c052abee Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-314.pyc new file mode 100644 index 00000000..d5efe7d7 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-314.pyc new file mode 100644 index 00000000..e9c05c9b Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-314.pyc new file mode 100644 index 00000000..26ea07d3 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-314.pyc new file mode 100644 index 00000000..f7fd64f4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-314.pyc new file mode 100644 index 00000000..f38185f4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/auth.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/auth.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/auth.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/auth.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/cache.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/cache.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/cache.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/cache.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/download.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/download.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/download.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/download.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/lazy_wheel.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/lazy_wheel.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/lazy_wheel.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/lazy_wheel.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/session.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/session.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/session.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/session.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/utils.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/utils.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/utils.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/utils.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/xmlrpc.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/xmlrpc.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/network/xmlrpc.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/network/xmlrpc.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..f96e1781 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-314.pyc new file mode 100644 index 00000000..5e6e5c11 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-314.pyc new file mode 100644 index 00000000..07c09692 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-314.pyc new file mode 100644 index 00000000..395a440c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..731c7f96 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-314.pyc new file mode 100644 index 00000000..1f8ec657 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-314.pyc new file mode 100644 index 00000000..8b4b4f82 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-314.pyc new file mode 100644 index 00000000..4b084f55 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-314.pyc new file mode 100644 index 00000000..75f1722a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-314.pyc new file mode 100644 index 00000000..0c30c534 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/build_tracker.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/build_tracker.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/build_tracker.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/build_tracker.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/metadata.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/metadata.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/metadata.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/metadata.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/metadata_editable.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/metadata_editable.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/metadata_editable.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/metadata_editable.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/wheel.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/wheel.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/wheel.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/wheel.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/wheel_editable.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/wheel_editable.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/build/wheel_editable.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/build/wheel_editable.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/check.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/check.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/check.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/check.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/freeze.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/freeze.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/freeze.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/freeze.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/install/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/install/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/install/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/install/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..81b4b8e0 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-314.pyc new file mode 100644 index 00000000..b8d25e27 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/install/wheel.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/install/wheel.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/install/wheel.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/install/wheel.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/prepare.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/prepare.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/operations/prepare.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/operations/prepare.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/pyproject.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/pyproject.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/pyproject.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/pyproject.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..a1354e90 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-314.pyc new file mode 100644 index 00000000..4a204d4e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/pep723.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/pep723.cpython-314.pyc new file mode 100644 index 00000000..717e55e8 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/pep723.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-314.pyc new file mode 100644 index 00000000..c21ec7b6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-314.pyc new file mode 100644 index 00000000..43daf2ed Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-314.pyc new file mode 100644 index 00000000..ef7cb146 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-314.pyc new file mode 100644 index 00000000..a8fe2991 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-314.pyc new file mode 100644 index 00000000..ac7c0ecc Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/constructors.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/constructors.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/constructors.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/constructors.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/pep723.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/pep723.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/pep723.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/pep723.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/req_dependency_group.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/req_dependency_group.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/req_dependency_group.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/req_dependency_group.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/req_file.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/req_file.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/req_file.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/req_file.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/req_install.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/req_install.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/req_install.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/req_install.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/req_set.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/req_set.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/req_set.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/req_set.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/req_uninstall.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/req_uninstall.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/req/req_uninstall.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/req/req_uninstall.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..397e1c9f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-314.pyc new file mode 100644 index 00000000..6c22bdf4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/base.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/base.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/base.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/base.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..4341627c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-314.pyc new file mode 100644 index 00000000..b128d583 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..617bfb49 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-314.pyc new file mode 100644 index 00000000..43d1a7ff Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-314.pyc new file mode 100644 index 00000000..81306fd7 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-314.pyc new file mode 100644 index 00000000..86031448 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-314.pyc new file mode 100644 index 00000000..f955b38b Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-314.pyc new file mode 100644 index 00000000..e5fff52f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-314.pyc new file mode 100644 index 00000000..7c886c0f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-314.pyc new file mode 100644 index 00000000..0712d2a9 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-314.pyc new file mode 100644 index 00000000..862783c8 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/provider.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/provider.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/provider.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/provider.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/reporter.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/reporter.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/reporter.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/reporter.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/self_outdated_check.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/self_outdated_check.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/self_outdated_check.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/self_outdated_check.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..07af7291 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-314.pyc new file mode 100644 index 00000000..c020b94d Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_log.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_log.cpython-314.pyc new file mode 100644 index 00000000..8e0d2b9a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_log.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-314.pyc new file mode 100644 index 00000000..c9c9c2ba Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-314.pyc new file mode 100644 index 00000000..9331018f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-314.pyc new file mode 100644 index 00000000..acb9edba Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-314.pyc new file mode 100644 index 00000000..f1d66be6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-314.pyc new file mode 100644 index 00000000..0a0c6680 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-314.pyc new file mode 100644 index 00000000..c0d72bf2 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-314.pyc new file mode 100644 index 00000000..00e6fa56 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-314.pyc new file mode 100644 index 00000000..5646ae41 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-314.pyc new file mode 100644 index 00000000..7746f72e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-314.pyc new file mode 100644 index 00000000..9a3d3993 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-314.pyc new file mode 100644 index 00000000..4a554151 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-314.pyc new file mode 100644 index 00000000..f43f6ad2 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-314.pyc new file mode 100644 index 00000000..39eff00f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-314.pyc new file mode 100644 index 00000000..d13edc0a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-314.pyc new file mode 100644 index 00000000..65cc08b8 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/pylock.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/pylock.cpython-314.pyc new file mode 100644 index 00000000..d71c2e26 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/pylock.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/retry.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/retry.cpython-314.pyc new file mode 100644 index 00000000..a2928848 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/retry.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-314.pyc new file mode 100644 index 00000000..87e5bd12 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-314.pyc new file mode 100644 index 00000000..f015730b Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-314.pyc new file mode 100644 index 00000000..ebb2932f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-314.pyc new file mode 100644 index 00000000..35e937e3 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-314.pyc new file mode 100644 index 00000000..b192bac6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-314.pyc new file mode 100644 index 00000000..907278cf Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/_jaraco_text.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/_jaraco_text.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/_jaraco_text.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/_jaraco_text.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/_log.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/_log.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/_log.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/_log.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/appdirs.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/appdirs.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/appdirs.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/appdirs.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/compat.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/compat.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/compat.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/compat.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/compatibility_tags.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/compatibility_tags.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/compatibility_tags.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/compatibility_tags.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/datetime.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/datetime.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/datetime.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/datetime.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/deprecation.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/deprecation.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/deprecation.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/deprecation.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/direct_url_helpers.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/direct_url_helpers.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/direct_url_helpers.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/direct_url_helpers.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/egg_link.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/egg_link.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/egg_link.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/egg_link.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/entrypoints.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/entrypoints.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/entrypoints.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/entrypoints.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/filesystem.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/filesystem.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/filesystem.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/filesystem.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/filetypes.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/filetypes.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/filetypes.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/filetypes.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/glibc.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/glibc.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/glibc.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/glibc.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/hashes.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/hashes.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/hashes.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/hashes.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/logging.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/logging.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/logging.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/logging.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/misc.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/misc.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/misc.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/misc.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/packaging.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/packaging.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/packaging.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/packaging.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/pylock.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/pylock.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/pylock.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/pylock.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/retry.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/retry.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/retry.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/retry.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/subprocess.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/subprocess.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/subprocess.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/subprocess.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/temp_dir.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/temp_dir.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/temp_dir.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/temp_dir.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/unpacking.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/unpacking.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/unpacking.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/unpacking.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/urls.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/urls.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/urls.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/urls.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/virtualenv.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/virtualenv.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/virtualenv.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/virtualenv.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/wheel.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/wheel.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/utils/wheel.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/utils/wheel.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..5310d554 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-314.pyc new file mode 100644 index 00000000..45648a68 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-314.pyc new file mode 100644 index 00000000..13abcf03 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-314.pyc new file mode 100644 index 00000000..1a486f12 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-314.pyc new file mode 100644 index 00000000..3194c042 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-314.pyc new file mode 100644 index 00000000..2da4899b Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/bazaar.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/bazaar.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/bazaar.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/bazaar.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/git.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/git.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/git.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/git.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/mercurial.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/mercurial.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/mercurial.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/mercurial.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/subversion.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/subversion.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/subversion.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/subversion.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/versioncontrol.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/versioncontrol.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/vcs/versioncontrol.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/vcs/versioncontrol.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_internal/wheel_builder.py b/Python314_4_x64_Template/Lib/site-packages/pip/_internal/wheel_builder.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_internal/wheel_builder.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_internal/wheel_builder.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/README.rst b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/README.rst similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/README.rst rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/README.rst diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..d7632088 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/LICENSE.txt b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/LICENSE.txt similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/LICENSE.txt rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/LICENSE.txt diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..8794c93a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-314.pyc new file mode 100644 index 00000000..5c2d439b Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-314.pyc new file mode 100644 index 00000000..d6d45885 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-314.pyc new file mode 100644 index 00000000..cb311b82 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-314.pyc new file mode 100644 index 00000000..8998d34f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-314.pyc new file mode 100644 index 00000000..e9b6774c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-314.pyc new file mode 100644 index 00000000..b8a065a4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-314.pyc new file mode 100644 index 00000000..16c58f6c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-314.pyc new file mode 100644 index 00000000..8ac1ab32 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/cache.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/cache.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/cache.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/cache.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..af7e5bd8 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-314.pyc new file mode 100644 index 00000000..1814d94e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-314.pyc new file mode 100644 index 00000000..b1c6ff0d Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/controller.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/controller.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/controller.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/controller.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/py.typed b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/py.typed similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/py.typed rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/py.typed diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/__init__.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/__main__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/__main__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/__main__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/__main__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..bc1e7eeb Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-314.pyc new file mode 100644 index 00000000..b4eb6454 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-314.pyc new file mode 100644 index 00000000..24df3547 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/cacert.pem b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/cacert.pem similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/cacert.pem rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/cacert.pem diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/core.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/core.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/core.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/core.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/py.typed b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/py.typed similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/certifi/py.typed rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/certifi/py.typed diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/LICENSE.txt b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/LICENSE.txt similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/LICENSE.txt rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/LICENSE.txt diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__init__.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__main__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__main__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__main__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__main__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..101f93c6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-314.pyc new file mode 100644 index 00000000..0d895686 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-314.pyc new file mode 100644 index 00000000..c08bb01e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-314.pyc new file mode 100644 index 00000000..7672920d Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-314.pyc new file mode 100644 index 00000000..b3731428 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-314.pyc new file mode 100644 index 00000000..97589415 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_implementation.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_implementation.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_implementation.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_implementation.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_lint_dependency_groups.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_lint_dependency_groups.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_lint_dependency_groups.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_lint_dependency_groups.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_pip_wrapper.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_pip_wrapper.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_pip_wrapper.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_pip_wrapper.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_toml_compat.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_toml_compat.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_toml_compat.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/_toml_compat.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/py.typed b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/py.typed similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/py.typed rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/dependency_groups/py.typed diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/LICENSE.txt b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/LICENSE.txt similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/LICENSE.txt rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/LICENSE.txt diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..f2f67af9 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-314.pyc new file mode 100644 index 00000000..797eb9b6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-314.pyc new file mode 100644 index 00000000..2ca88c2c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-314.pyc new file mode 100644 index 00000000..a028d851 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-314.pyc new file mode 100644 index 00000000..41fe0d9f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/compat.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/compat.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/compat.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/compat.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/resources.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/resources.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/resources.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/resources.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/scripts.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/scripts.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/scripts.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/scripts.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/t32.exe b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/t32.exe similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/t32.exe rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/t32.exe diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/t64-arm.exe b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/t64-arm.exe similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/t64-arm.exe rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/t64-arm.exe diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/t64.exe b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/t64.exe similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/t64.exe rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/t64.exe diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/util.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/util.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/util.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/util.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/w32.exe b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/w32.exe similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/w32.exe rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/w32.exe diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/w64-arm.exe b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/w64-arm.exe similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/w64-arm.exe rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/w64-arm.exe diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/w64.exe b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/w64.exe similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distlib/w64.exe rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distlib/w64.exe diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/__init__.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/__main__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/__main__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/__main__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/__main__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..ba445b92 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-314.pyc new file mode 100644 index 00000000..b1e5ee89 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-314.pyc new file mode 100644 index 00000000..4683bd2a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/distro.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/distro.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/distro.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/distro.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/py.typed b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/py.typed similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/distro/py.typed rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/distro/py.typed diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/LICENSE.md b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/LICENSE.md similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/LICENSE.md rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/LICENSE.md diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..a6f02651 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-314.pyc new file mode 100644 index 00000000..9d81c8fd Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-314.pyc new file mode 100644 index 00000000..44043015 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-314.pyc new file mode 100644 index 00000000..e3a363ff Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-314.pyc new file mode 100644 index 00000000..0a6b37db Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-314.pyc new file mode 100644 index 00000000..7a087529 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-314.pyc new file mode 100644 index 00000000..c03b76b5 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-314.pyc new file mode 100644 index 00000000..ba3cc370 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/codec.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/codec.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/codec.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/codec.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/compat.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/compat.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/compat.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/compat.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/core.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/core.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/core.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/core.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/idnadata.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/idnadata.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/idnadata.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/idnadata.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/intranges.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/intranges.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/intranges.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/intranges.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/package_data.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/package_data.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/package_data.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/package_data.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/py.typed b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/py.typed similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/py.typed rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/py.typed diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/uts46data.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/uts46data.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/idna/uts46data.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/idna/uts46data.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/COPYING b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/COPYING similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/COPYING rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/COPYING diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..e5d230f4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-314.pyc new file mode 100644 index 00000000..72667e1b Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-314.pyc new file mode 100644 index 00000000..1f1661c5 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-314.pyc new file mode 100644 index 00000000..80242bf8 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/exceptions.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/exceptions.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/exceptions.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/exceptions.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/ext.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/ext.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/ext.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/ext.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/fallback.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/fallback.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/msgpack/fallback.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/msgpack/fallback.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.APACHE b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.APACHE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.APACHE rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.APACHE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.BSD b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.BSD similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.BSD rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.BSD diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..fc40f12f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-314.pyc new file mode 100644 index 00000000..b2bbe774 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-314.pyc new file mode 100644 index 00000000..e911644e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-314.pyc new file mode 100644 index 00000000..94c25a7f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-314.pyc new file mode 100644 index 00000000..d1c9a829 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-314.pyc new file mode 100644 index 00000000..a39c25e5 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-314.pyc new file mode 100644 index 00000000..ccc3641f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-314.pyc new file mode 100644 index 00000000..9fb30007 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-314.pyc new file mode 100644 index 00000000..0a6161de Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/pylock.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/pylock.cpython-314.pyc new file mode 100644 index 00000000..dd85eeac Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/pylock.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-314.pyc new file mode 100644 index 00000000..e6616eff Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-314.pyc new file mode 100644 index 00000000..cdfcacf9 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-314.pyc new file mode 100644 index 00000000..e30e9efb Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-314.pyc new file mode 100644 index 00000000..61c7aa04 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-314.pyc new file mode 100644 index 00000000..9b83ef80 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/_elffile.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/_elffile.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/_elffile.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/_elffile.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/_manylinux.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/_manylinux.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/_manylinux.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/_manylinux.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/_musllinux.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/_musllinux.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/_musllinux.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/_musllinux.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/_parser.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/_parser.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/_parser.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/_parser.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/_structures.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/_structures.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/_structures.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/_structures.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/_tokenizer.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/_tokenizer.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/_tokenizer.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/_tokenizer.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..41487464 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-314.pyc new file mode 100644 index 00000000..d1a9759c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/_spdx.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/_spdx.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/_spdx.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/licenses/_spdx.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/markers.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/markers.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/markers.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/markers.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/metadata.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/metadata.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/metadata.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/metadata.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/py.typed b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/py.typed similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/py.typed rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/py.typed diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/pylock.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/pylock.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/pylock.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/pylock.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/requirements.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/requirements.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/requirements.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/requirements.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/specifiers.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/specifiers.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/specifiers.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/specifiers.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/tags.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/tags.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/tags.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/tags.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/utils.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/utils.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/utils.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/utils.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/version.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/version.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/packaging/version.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/packaging/version.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pkg_resources/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pkg_resources/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pkg_resources/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pkg_resources/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..d06c353b Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__init__.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__main__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__main__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__main__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__main__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..eced46c1 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-314.pyc new file mode 100644 index 00000000..f6c8900a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-314.pyc new file mode 100644 index 00000000..f54f9b78 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-314.pyc new file mode 100644 index 00000000..7f0780e6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-314.pyc new file mode 100644 index 00000000..610ffaec Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-314.pyc new file mode 100644 index 00000000..4ed789e4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-314.pyc new file mode 100644 index 00000000..98fdcde6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-314.pyc new file mode 100644 index 00000000..ce24ec6f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/android.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/android.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/android.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/android.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/api.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/api.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/api.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/api.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/macos.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/macos.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/macos.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/macos.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/py.typed b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/py.typed similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/py.typed rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/py.typed diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/unix.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/unix.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/unix.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/unix.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/version.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/version.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/version.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/version.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/windows.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/windows.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/windows.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/platformdirs/windows.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__init__.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__main__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__main__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/__main__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__main__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..da622252 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-314.pyc new file mode 100644 index 00000000..27500d32 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-314.pyc new file mode 100644 index 00000000..e1a698a2 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-314.pyc new file mode 100644 index 00000000..b291a327 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-314.pyc new file mode 100644 index 00000000..c6f602f9 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-314.pyc new file mode 100644 index 00000000..8f90802e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-314.pyc new file mode 100644 index 00000000..9357db3a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-314.pyc new file mode 100644 index 00000000..20217084 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-314.pyc new file mode 100644 index 00000000..73a94826 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-314.pyc new file mode 100644 index 00000000..a1489f01 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-314.pyc new file mode 100644 index 00000000..e4d60601 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-314.pyc new file mode 100644 index 00000000..7647b7de Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-314.pyc new file mode 100644 index 00000000..bd1ee485 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-314.pyc new file mode 100644 index 00000000..ca10257e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-314.pyc new file mode 100644 index 00000000..623bb146 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/console.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/console.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/console.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/console.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/filter.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/filter.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/filter.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/filter.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/filters/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/filters/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/filters/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/filters/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..e723cf81 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatter.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatter.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatter.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatter.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..af7f6948 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-314.pyc new file mode 100644 index 00000000..e3ed2c01 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/_mapping.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/_mapping.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/_mapping.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/formatters/_mapping.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexer.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexer.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexer.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexer.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..946017c6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-314.pyc new file mode 100644 index 00000000..8daacc3c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-314.pyc new file mode 100644 index 00000000..646d6c62 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/_mapping.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/_mapping.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/_mapping.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/_mapping.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/python.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/python.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/python.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/lexers/python.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/modeline.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/modeline.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/modeline.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/modeline.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/plugin.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/plugin.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/plugin.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/plugin.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/regexopt.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/regexopt.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/regexopt.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/regexopt.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/scanner.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/scanner.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/scanner.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/scanner.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/sphinxext.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/sphinxext.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/sphinxext.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/sphinxext.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/style.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/style.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/style.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/style.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..576b703c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-314.pyc new file mode 100644 index 00000000..a80274bf Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/_mapping.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/_mapping.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/_mapping.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/styles/_mapping.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/token.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/token.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/token.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/token.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/unistring.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/unistring.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/unistring.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/unistring.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/util.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/util.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pygments/util.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pygments/util.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..8a0e2da8 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-314.pyc new file mode 100644 index 00000000..0f6a8e5a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_impl.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_impl.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_impl.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_impl.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..3a0a2f8c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-314.pyc new file mode 100644 index 00000000..c553d0da Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/py.typed b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/py.typed similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/py.typed rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/py.typed diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..b8cc7c70 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-314.pyc new file mode 100644 index 00000000..ca7a4e93 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-314.pyc new file mode 100644 index 00000000..3a9f7737 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-314.pyc new file mode 100644 index 00000000..2aadbfb7 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-314.pyc new file mode 100644 index 00000000..6db05f1f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-314.pyc new file mode 100644 index 00000000..0a04e336 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-314.pyc new file mode 100644 index 00000000..a10ca2d7 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-314.pyc new file mode 100644 index 00000000..104c94f6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-314.pyc new file mode 100644 index 00000000..44716dfa Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-314.pyc new file mode 100644 index 00000000..3aa95d4d Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-314.pyc new file mode 100644 index 00000000..fe8272a9 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-314.pyc new file mode 100644 index 00000000..10e7cfce Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-314.pyc new file mode 100644 index 00000000..07632c74 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-314.pyc new file mode 100644 index 00000000..aed684b8 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-314.pyc new file mode 100644 index 00000000..f3132f7f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-314.pyc new file mode 100644 index 00000000..40bf209e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-314.pyc new file mode 100644 index 00000000..8d8e9924 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-314.pyc new file mode 100644 index 00000000..c1078da0 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__version__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__version__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/__version__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/__version__.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/_internal_utils.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/_internal_utils.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/_internal_utils.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/_internal_utils.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/adapters.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/adapters.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/adapters.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/adapters.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/api.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/api.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/api.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/api.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/auth.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/auth.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/auth.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/auth.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/certs.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/certs.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/certs.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/certs.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/compat.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/compat.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/compat.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/compat.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/cookies.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/cookies.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/cookies.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/cookies.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/exceptions.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/exceptions.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/exceptions.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/exceptions.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/help.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/help.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/help.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/help.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/hooks.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/hooks.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/hooks.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/hooks.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/models.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/models.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/models.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/models.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/packages.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/packages.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/packages.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/packages.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/sessions.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/sessions.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/sessions.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/sessions.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/status_codes.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/status_codes.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/status_codes.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/status_codes.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/structures.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/structures.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/structures.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/structures.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/utils.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/utils.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/requests/utils.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/requests/utils.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..566a69a6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-314.pyc new file mode 100644 index 00000000..347ceeec Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-314.pyc new file mode 100644 index 00000000..7cabf96f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-314.pyc new file mode 100644 index 00000000..abb8251d Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/providers.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/providers.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/providers.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/providers.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/py.typed b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/py.typed similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/py.typed rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/py.typed diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/reporters.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/reporters.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/reporters.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/reporters.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..27f5e14f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-314.pyc new file mode 100644 index 00000000..2c03199a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-314.pyc new file mode 100644 index 00000000..0af06519 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-314.pyc new file mode 100644 index 00000000..0047e1e7 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-314.pyc new file mode 100644 index 00000000..baed71a8 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/abstract.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/abstract.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/abstract.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/abstract.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/criterion.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/criterion.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/criterion.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/criterion.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/exceptions.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/exceptions.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/exceptions.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/exceptions.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/resolution.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/resolution.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/resolution.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/resolution.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/structs.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/structs.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/structs.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/resolvelib/structs.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__init__.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__main__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__main__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/__main__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__main__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..1fddbe24 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-314.pyc new file mode 100644 index 00000000..ce189a98 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-314.pyc new file mode 100644 index 00000000..a93e7c1e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-314.pyc new file mode 100644 index 00000000..62235a5f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-314.pyc new file mode 100644 index 00000000..e6924211 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-314.pyc new file mode 100644 index 00000000..bc242014 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-314.pyc new file mode 100644 index 00000000..0e0cc4e8 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-314.pyc new file mode 100644 index 00000000..6dfd9bb9 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-314.pyc new file mode 100644 index 00000000..74a192ea Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-314.pyc new file mode 100644 index 00000000..96282483 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-314.pyc new file mode 100644 index 00000000..469883db Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-314.pyc new file mode 100644 index 00000000..3a7917f1 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-314.pyc new file mode 100644 index 00000000..15b63d54 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-314.pyc new file mode 100644 index 00000000..927dda6b Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-314.pyc new file mode 100644 index 00000000..5dc602ca Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-314.pyc new file mode 100644 index 00000000..94f9853d Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-314.pyc new file mode 100644 index 00000000..905e4a1f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-314.pyc new file mode 100644 index 00000000..e5458ae0 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-314.pyc new file mode 100644 index 00000000..26b406a3 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-314.pyc new file mode 100644 index 00000000..f8e9c182 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-314.pyc new file mode 100644 index 00000000..ee638151 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-314.pyc new file mode 100644 index 00000000..da8e7a09 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-314.pyc new file mode 100644 index 00000000..aff763c3 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/align.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/align.cpython-314.pyc new file mode 100644 index 00000000..348a6788 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/align.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-314.pyc new file mode 100644 index 00000000..305171c1 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-314.pyc new file mode 100644 index 00000000..5f1ba3aa Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/box.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/box.cpython-314.pyc new file mode 100644 index 00000000..9baa4c27 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/box.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-314.pyc new file mode 100644 index 00000000..6ed88242 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color.cpython-314.pyc new file mode 100644 index 00000000..0504e688 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-314.pyc new file mode 100644 index 00000000..586a4e14 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-314.pyc new file mode 100644 index 00000000..87ee2b86 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/console.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/console.cpython-314.pyc new file mode 100644 index 00000000..bcaac19e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/console.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-314.pyc new file mode 100644 index 00000000..7eb00de6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-314.pyc new file mode 100644 index 00000000..648cf326 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/control.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/control.cpython-314.pyc new file mode 100644 index 00000000..bcd3b057 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/control.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-314.pyc new file mode 100644 index 00000000..7cab9a5e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-314.pyc new file mode 100644 index 00000000..abaebe15 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-314.pyc new file mode 100644 index 00000000..032a6f51 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-314.pyc new file mode 100644 index 00000000..836e5ffa Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-314.pyc new file mode 100644 index 00000000..3c8d17bc Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-314.pyc new file mode 100644 index 00000000..76bb6d1a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-314.pyc new file mode 100644 index 00000000..7f5deb5c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/json.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/json.cpython-314.pyc new file mode 100644 index 00000000..6e2dfaff Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/json.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-314.pyc new file mode 100644 index 00000000..360d61d5 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-314.pyc new file mode 100644 index 00000000..ef8a9d8f Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live.cpython-314.pyc new file mode 100644 index 00000000..2c5b6396 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-314.pyc new file mode 100644 index 00000000..4a42bda4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-314.pyc new file mode 100644 index 00000000..d4f25f03 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-314.pyc new file mode 100644 index 00000000..2cb03dda Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-314.pyc new file mode 100644 index 00000000..aa059d14 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-314.pyc new file mode 100644 index 00000000..ea811b2b Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-314.pyc new file mode 100644 index 00000000..d56f6d59 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-314.pyc new file mode 100644 index 00000000..c594407b Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-314.pyc new file mode 100644 index 00000000..6f525297 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-314.pyc new file mode 100644 index 00000000..5a80cef2 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-314.pyc new file mode 100644 index 00000000..57c7270a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-314.pyc new file mode 100644 index 00000000..30fc1b45 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/prompt.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/prompt.cpython-314.pyc new file mode 100644 index 00000000..f3f7f705 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/prompt.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-314.pyc new file mode 100644 index 00000000..21163f11 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/region.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/region.cpython-314.pyc new file mode 100644 index 00000000..9a2337b2 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/region.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-314.pyc new file mode 100644 index 00000000..26e6fa9e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/rule.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/rule.cpython-314.pyc new file mode 100644 index 00000000..2dcc4ade Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/rule.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-314.pyc new file mode 100644 index 00000000..089136fe Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-314.pyc new file mode 100644 index 00000000..16ab03db Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-314.pyc new file mode 100644 index 00000000..47c0994a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-314.pyc new file mode 100644 index 00000000..d3389a47 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/status.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/status.cpython-314.pyc new file mode 100644 index 00000000..4c12cf63 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/status.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/style.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/style.cpython-314.pyc new file mode 100644 index 00000000..139fcf80 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/style.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-314.pyc new file mode 100644 index 00000000..8c053b8d Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-314.pyc new file mode 100644 index 00000000..084e3eb8 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/table.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/table.cpython-314.pyc new file mode 100644 index 00000000..5c512884 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/table.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-314.pyc new file mode 100644 index 00000000..91421578 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/text.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/text.cpython-314.pyc new file mode 100644 index 00000000..e3d7c85e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/text.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-314.pyc new file mode 100644 index 00000000..60e4b36e Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-314.pyc new file mode 100644 index 00000000..d212a7ba Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-314.pyc new file mode 100644 index 00000000..94143816 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/tree.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/tree.cpython-314.pyc new file mode 100644 index 00000000..83fd9c52 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/tree.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_cell_widths.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_cell_widths.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_cell_widths.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_cell_widths.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_emoji_codes.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_emoji_codes.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_emoji_codes.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_emoji_codes.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_emoji_replace.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_emoji_replace.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_emoji_replace.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_emoji_replace.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_export_format.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_export_format.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_export_format.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_export_format.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_extension.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_extension.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_extension.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_extension.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_fileno.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_fileno.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_fileno.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_fileno.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_inspect.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_inspect.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_inspect.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_inspect.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_log_render.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_log_render.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_log_render.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_log_render.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_loop.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_loop.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_loop.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_loop.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_null_file.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_null_file.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_null_file.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_null_file.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_palettes.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_palettes.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_palettes.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_palettes.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_pick.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_pick.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_pick.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_pick.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_ratio.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_ratio.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_ratio.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_ratio.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_spinners.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_spinners.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_spinners.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_spinners.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_stack.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_stack.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_stack.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_stack.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_timer.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_timer.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_timer.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_timer.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_win32_console.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_win32_console.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_win32_console.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_win32_console.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_windows.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_windows.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_windows.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_windows.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_windows_renderer.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_windows_renderer.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_windows_renderer.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_windows_renderer.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_wrap.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_wrap.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/_wrap.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/_wrap.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/abc.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/abc.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/abc.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/abc.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/align.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/align.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/align.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/align.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/ansi.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/ansi.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/ansi.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/ansi.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/bar.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/bar.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/bar.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/bar.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/box.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/box.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/box.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/box.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/cells.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/cells.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/cells.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/cells.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/color.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/color.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/color.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/color.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/color_triplet.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/color_triplet.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/color_triplet.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/color_triplet.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/columns.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/columns.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/columns.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/columns.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/console.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/console.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/console.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/console.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/constrain.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/constrain.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/constrain.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/constrain.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/containers.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/containers.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/containers.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/containers.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/control.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/control.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/control.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/control.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/default_styles.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/default_styles.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/default_styles.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/default_styles.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/diagnose.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/diagnose.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/diagnose.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/diagnose.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/emoji.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/emoji.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/emoji.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/emoji.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/errors.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/errors.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/errors.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/errors.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/file_proxy.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/file_proxy.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/file_proxy.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/file_proxy.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/filesize.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/filesize.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/filesize.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/filesize.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/highlighter.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/highlighter.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/highlighter.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/highlighter.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/json.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/json.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/json.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/json.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/jupyter.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/jupyter.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/jupyter.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/jupyter.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/layout.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/layout.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/layout.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/layout.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/live.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/live.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/live.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/live.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/live_render.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/live_render.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/live_render.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/live_render.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/logging.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/logging.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/logging.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/logging.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/markup.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/markup.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/markup.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/markup.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/measure.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/measure.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/measure.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/measure.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/padding.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/padding.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/padding.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/padding.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/pager.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/pager.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/pager.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/pager.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/palette.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/palette.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/palette.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/palette.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/panel.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/panel.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/panel.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/panel.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/pretty.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/pretty.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/pretty.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/pretty.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/progress.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/progress.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/progress.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/progress.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/progress_bar.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/progress_bar.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/progress_bar.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/progress_bar.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/prompt.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/prompt.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/prompt.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/prompt.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/protocol.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/protocol.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/protocol.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/protocol.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/py.typed b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/py.typed similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/py.typed rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/py.typed diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/region.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/region.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/region.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/region.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/repr.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/repr.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/repr.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/repr.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/rule.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/rule.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/rule.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/rule.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/scope.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/scope.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/scope.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/scope.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/screen.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/screen.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/screen.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/screen.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/segment.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/segment.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/segment.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/segment.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/spinner.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/spinner.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/spinner.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/spinner.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/status.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/status.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/status.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/status.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/style.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/style.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/style.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/style.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/styled.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/styled.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/styled.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/styled.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/syntax.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/syntax.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/syntax.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/syntax.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/table.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/table.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/table.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/table.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/terminal_theme.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/terminal_theme.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/terminal_theme.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/terminal_theme.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/text.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/text.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/text.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/text.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/theme.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/theme.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/theme.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/theme.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/themes.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/themes.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/themes.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/themes.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/traceback.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/traceback.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/traceback.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/traceback.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/tree.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/tree.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/rich/tree.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/rich/tree.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..da10c780 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_parser.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_parser.cpython-314.pyc new file mode 100644 index 00000000..121b3a16 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_parser.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_re.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_re.cpython-314.pyc new file mode 100644 index 00000000..9bf2e7ea Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_re.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_types.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_types.cpython-314.pyc new file mode 100644 index 00000000..a349639d Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_types.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/_parser.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/_parser.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/_parser.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/_parser.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/_re.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/_re.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/_re.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/_re.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/_types.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/_types.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/_types.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/_types.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/py.typed b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/py.typed similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli/py.typed rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli/py.typed diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..4f6c345c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/_writer.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/_writer.cpython-314.pyc new file mode 100644 index 00000000..2467069b Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/_writer.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/_writer.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/_writer.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/_writer.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/_writer.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/py.typed b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/py.typed similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/py.typed rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/tomli_w/py.typed diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/LICENSE b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/LICENSE similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/LICENSE rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/LICENSE diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..d936d7bb Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_api.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_api.cpython-314.pyc new file mode 100644 index 00000000..d72f29a7 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_api.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_macos.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_macos.cpython-314.pyc new file mode 100644 index 00000000..b579543a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_macos.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_openssl.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_openssl.cpython-314.pyc new file mode 100644 index 00000000..90a16823 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_openssl.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-314.pyc new file mode 100644 index 00000000..196ff820 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_windows.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_windows.cpython-314.pyc new file mode 100644 index 00000000..b74fbc2d Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_windows.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/_api.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/_api.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/_api.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/_api.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/_macos.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/_macos.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/_macos.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/_macos.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/_openssl.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/_openssl.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/_openssl.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/_openssl.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/_ssl_constants.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/_ssl_constants.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/_ssl_constants.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/_ssl_constants.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/_windows.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/_windows.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/_windows.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/_windows.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/py.typed b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/py.typed similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/truststore/py.typed rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/truststore/py.typed diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/LICENSE.txt b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/LICENSE.txt similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/LICENSE.txt rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/LICENSE.txt diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..de8f24ac Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-314.pyc new file mode 100644 index 00000000..87cba416 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-314.pyc new file mode 100644 index 00000000..fe44f5a4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-314.pyc new file mode 100644 index 00000000..2029805c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-314.pyc new file mode 100644 index 00000000..1f652067 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-314.pyc new file mode 100644 index 00000000..7349389c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-314.pyc new file mode 100644 index 00000000..f942b547 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-314.pyc new file mode 100644 index 00000000..6aca024a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-314.pyc new file mode 100644 index 00000000..a95fcc67 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-314.pyc new file mode 100644 index 00000000..a18347c1 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-314.pyc new file mode 100644 index 00000000..db3b649a Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/_collections.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/_collections.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/_collections.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/_collections.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/_version.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/_version.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/_version.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/_version.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/connection.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/connection.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/connection.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/connection.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..94795d03 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-314.pyc new file mode 100644 index 00000000..4e84b1d1 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-314.pyc new file mode 100644 index 00000000..ce929629 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-314.pyc new file mode 100644 index 00000000..42bd9899 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-314.pyc new file mode 100644 index 00000000..ae32b02c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-314.pyc new file mode 100644 index 00000000..4980af62 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-314.pyc new file mode 100644 index 00000000..2f99b6d2 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..4b22d80c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-314.pyc new file mode 100644 index 00000000..105c7491 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-314.pyc new file mode 100644 index 00000000..62abfc04 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/exceptions.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/exceptions.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/exceptions.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/exceptions.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/fields.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/fields.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/fields.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/fields.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/filepost.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/filepost.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/filepost.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/filepost.py diff --git a/Python313_13_x64_Template/Lib/urllib/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/urllib/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..b1e8a1c5 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-314.pyc new file mode 100644 index 00000000..fcb3918c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/__phello__/ham/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/__phello__/ham/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..692204c6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-314.pyc new file mode 100644 index 00000000..1968e2d6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-314.pyc new file mode 100644 index 00000000..d5abbb94 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/weakref_finalize.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/weakref_finalize.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/weakref_finalize.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/weakref_finalize.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/six.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/six.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/six.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/packages/six.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/request.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/request.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/request.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/request.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/response.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/response.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/response.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/response.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__init__.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__init__.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__init__.py diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..b21278f4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-314.pyc new file mode 100644 index 00000000..5a37a3b9 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-314.pyc new file mode 100644 index 00000000..983681f0 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-314.pyc new file mode 100644 index 00000000..5366a7a4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-314.pyc new file mode 100644 index 00000000..6a294bc4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-314.pyc new file mode 100644 index 00000000..71f9c5c2 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-314.pyc new file mode 100644 index 00000000..780e6c2c Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-314.pyc new file mode 100644 index 00000000..c45b0412 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-314.pyc new file mode 100644 index 00000000..996c9fbc Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-314.pyc new file mode 100644 index 00000000..e38f6515 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-314.pyc new file mode 100644 index 00000000..c5c32555 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-314.pyc new file mode 100644 index 00000000..7b3b05dc Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-314.pyc b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-314.pyc new file mode 100644 index 00000000..20163787 Binary files /dev/null and b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/connection.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/connection.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/connection.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/connection.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/queue.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/queue.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/queue.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/queue.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/request.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/request.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/request.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/request.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/response.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/response.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/response.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/response.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/retry.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/retry.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/retry.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/retry.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssltransport.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssltransport.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssltransport.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssltransport.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/url.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/url.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/url.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/url.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/wait.py b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/wait.py similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/wait.py rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/urllib3/util/wait.py diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/_vendor/vendor.txt b/Python314_4_x64_Template/Lib/site-packages/pip/_vendor/vendor.txt similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/_vendor/vendor.txt rename to Python314_4_x64_Template/Lib/site-packages/pip/_vendor/vendor.txt diff --git a/Python313_13_x64_Template/Lib/site-packages/pip/py.typed b/Python314_4_x64_Template/Lib/site-packages/pip/py.typed similarity index 100% rename from Python313_13_x64_Template/Lib/site-packages/pip/py.typed rename to Python314_4_x64_Template/Lib/site-packages/pip/py.typed diff --git a/Python314_4_x64_Template/Lib/site.py b/Python314_4_x64_Template/Lib/site.py new file mode 100644 index 00000000..aeb7c6cf --- /dev/null +++ b/Python314_4_x64_Template/Lib/site.py @@ -0,0 +1,779 @@ +"""Append module search paths for third-party packages to sys.path. + +**************************************************************** +* This module is automatically imported during initialization. * +**************************************************************** + +This will append site-specific paths to the module search path. On +Unix (including Mac OSX), it starts with sys.prefix and +sys.exec_prefix (if different) and appends +lib/python/site-packages. +On other platforms (such as Windows), it tries each of the +prefixes directly, as well as with lib/site-packages appended. The +resulting directories, if they exist, are appended to sys.path, and +also inspected for path configuration files. + +If a file named "pyvenv.cfg" exists one directory above sys.executable, +sys.prefix and sys.exec_prefix are set to that directory and +it is also checked for site-packages (sys.base_prefix and +sys.base_exec_prefix will always be the "real" prefixes of the Python +installation). If "pyvenv.cfg" (a bootstrap configuration file) contains +the key "include-system-site-packages" set to anything other than "false" +(case-insensitive), the system-level prefixes will still also be +searched for site-packages; otherwise they won't. + +All of the resulting site-specific directories, if they exist, are +appended to sys.path, and also inspected for path configuration +files. + +A path configuration file is a file whose name has the form +.pth; its contents are additional directories (one per line) +to be added to sys.path. Non-existing directories (or +non-directories) are never added to sys.path; no directory is added to +sys.path more than once. Blank lines and lines beginning with +'#' are skipped. Lines starting with 'import' are executed. + +For example, suppose sys.prefix and sys.exec_prefix are set to +/usr/local and there is a directory /usr/local/lib/python2.5/site-packages +with three subdirectories, foo, bar and spam, and two path +configuration files, foo.pth and bar.pth. Assume foo.pth contains the +following: + + # foo package configuration + foo + bar + bletch + +and bar.pth contains: + + # bar package configuration + bar + +Then the following directories are added to sys.path, in this order: + + /usr/local/lib/python2.5/site-packages/bar + /usr/local/lib/python2.5/site-packages/foo + +Note that bletch is omitted because it doesn't exist; bar precedes foo +because bar.pth comes alphabetically before foo.pth; and spam is +omitted because it is not mentioned in either path configuration file. + +The readline module is also automatically configured to enable +completion for systems that support it. This can be overridden in +sitecustomize, usercustomize or PYTHONSTARTUP. Starting Python in +isolated mode (-I) disables automatic readline configuration. + +After these operations, an attempt is made to import a module +named sitecustomize, which can perform arbitrary additional +site-specific customizations. If this import fails with an +ImportError exception, it is silently ignored. +""" + +import sys +import os +import builtins +import _sitebuiltins +import _io as io +import stat +import errno + +# Prefixes for site-packages; add additional prefixes like /usr/local here +PREFIXES = [sys.prefix, sys.exec_prefix] +# Enable per user site-packages directory +# set it to False to disable the feature or True to force the feature +ENABLE_USER_SITE = None + +# for distutils.commands.install +# These values are initialized by the getuserbase() and getusersitepackages() +# functions, through the main() function when Python starts. +USER_SITE = None +USER_BASE = None + + +def _trace(message): + if sys.flags.verbose: + print(message, file=sys.stderr) + + +def _warn(*args, **kwargs): + import warnings + + warnings.warn(*args, **kwargs) + + +def makepath(*paths): + dir = os.path.join(*paths) + try: + dir = os.path.abspath(dir) + except OSError: + pass + return dir, os.path.normcase(dir) + + +def abs_paths(): + """Set all module __file__ and __cached__ attributes to an absolute path""" + for m in set(sys.modules.values()): + loader_module = None + try: + loader_module = m.__loader__.__module__ + except AttributeError: + try: + loader_module = m.__spec__.loader.__module__ + except AttributeError: + pass + if loader_module not in {'_frozen_importlib', '_frozen_importlib_external'}: + continue # don't mess with a PEP 302-supplied __file__ + try: + m.__file__ = os.path.abspath(m.__file__) + except (AttributeError, OSError, TypeError): + pass + try: + m.__cached__ = os.path.abspath(m.__cached__) + except (AttributeError, OSError, TypeError): + pass + + +def removeduppaths(): + """ Remove duplicate entries from sys.path along with making them + absolute""" + # This ensures that the initial path provided by the interpreter contains + # only absolute pathnames, even if we're running from the build directory. + L = [] + known_paths = set() + for dir in sys.path: + # Filter out duplicate paths (on case-insensitive file systems also + # if they only differ in case); turn relative paths into absolute + # paths. + dir, dircase = makepath(dir) + if dircase not in known_paths: + L.append(dir) + known_paths.add(dircase) + sys.path[:] = L + return known_paths + + +def _init_pathinfo(): + """Return a set containing all existing file system items from sys.path.""" + d = set() + for item in sys.path: + try: + if os.path.exists(item): + _, itemcase = makepath(item) + d.add(itemcase) + except TypeError: + continue + return d + + +def addpackage(sitedir, name, known_paths): + """Process a .pth file within the site-packages directory: + For each line in the file, either combine it with sitedir to a path + and add that to known_paths, or execute it if it starts with 'import '. + """ + if known_paths is None: + known_paths = _init_pathinfo() + reset = True + else: + reset = False + fullname = os.path.join(sitedir, name) + try: + st = os.lstat(fullname) + except OSError: + return + if ((getattr(st, 'st_flags', 0) & stat.UF_HIDDEN) or + (getattr(st, 'st_file_attributes', 0) & stat.FILE_ATTRIBUTE_HIDDEN)): + _trace(f"Skipping hidden .pth file: {fullname!r}") + return + _trace(f"Processing .pth file: {fullname!r}") + try: + with io.open_code(fullname) as f: + pth_content = f.read() + except OSError: + return + + try: + # Accept BOM markers in .pth files as we do in source files + # (Windows PowerShell 5.1 makes it hard to emit UTF-8 files without a BOM) + pth_content = pth_content.decode("utf-8-sig") + except UnicodeDecodeError: + # Fallback to locale encoding for backward compatibility. + # We will deprecate this fallback in the future. + import locale + pth_content = pth_content.decode(locale.getencoding()) + _trace(f"Cannot read {fullname!r} as UTF-8. " + f"Using fallback encoding {locale.getencoding()!r}") + + for n, line in enumerate(pth_content.splitlines(), 1): + if line.startswith("#"): + continue + if line.strip() == "": + continue + try: + if line.startswith(("import ", "import\t")): + exec(line) + continue + line = line.rstrip() + dir, dircase = makepath(sitedir, line) + if dircase not in known_paths and os.path.exists(dir): + sys.path.append(dir) + known_paths.add(dircase) + except Exception as exc: + print(f"Error processing line {n:d} of {fullname}:\n", + file=sys.stderr) + import traceback + for record in traceback.format_exception(exc): + for line in record.splitlines(): + print(' '+line, file=sys.stderr) + print("\nRemainder of file ignored", file=sys.stderr) + break + if reset: + known_paths = None + return known_paths + + +def addsitedir(sitedir, known_paths=None): + """Add 'sitedir' argument to sys.path if missing and handle .pth files in + 'sitedir'""" + _trace(f"Adding directory: {sitedir!r}") + if known_paths is None: + known_paths = _init_pathinfo() + reset = True + else: + reset = False + sitedir, sitedircase = makepath(sitedir) + if not sitedircase in known_paths: + sys.path.append(sitedir) # Add path component + known_paths.add(sitedircase) + try: + names = os.listdir(sitedir) + except OSError: + return + names = [name for name in names + if name.endswith(".pth") and not name.startswith(".")] + for name in sorted(names): + addpackage(sitedir, name, known_paths) + if reset: + known_paths = None + return known_paths + + +def check_enableusersite(): + """Check if user site directory is safe for inclusion + + The function tests for the command line flag (including environment var), + process uid/gid equal to effective uid/gid. + + None: Disabled for security reasons + False: Disabled by user (command line option) + True: Safe and enabled + """ + if sys.flags.no_user_site: + return False + + if hasattr(os, "getuid") and hasattr(os, "geteuid"): + # check process uid == effective uid + if os.geteuid() != os.getuid(): + return None + if hasattr(os, "getgid") and hasattr(os, "getegid"): + # check process gid == effective gid + if os.getegid() != os.getgid(): + return None + + return True + + +# NOTE: sysconfig and it's dependencies are relatively large but site module +# needs very limited part of them. +# To speedup startup time, we have copy of them. +# +# See https://bugs.python.org/issue29585 + +# Copy of sysconfig._get_implementation() +def _get_implementation(): + return 'Python' + +# Copy of sysconfig._getuserbase() +def _getuserbase(): + env_base = os.environ.get("PYTHONUSERBASE", None) + if env_base: + return env_base + + # Emscripten, iOS, tvOS, VxWorks, WASI, and watchOS have no home directories + if sys.platform in {"emscripten", "ios", "tvos", "vxworks", "wasi", "watchos"}: + return None + + def joinuser(*args): + return os.path.expanduser(os.path.join(*args)) + + if os.name == "nt": + base = os.environ.get("APPDATA") or "~" + return joinuser(base, _get_implementation()) + + if sys.platform == "darwin" and sys._framework: + return joinuser("~", "Library", sys._framework, + "%d.%d" % sys.version_info[:2]) + + return joinuser("~", ".local") + + +# Same to sysconfig.get_path('purelib', os.name+'_user') +def _get_path(userbase): + version = sys.version_info + if hasattr(sys, 'abiflags') and 't' in sys.abiflags: + abi_thread = 't' + else: + abi_thread = '' + + implementation = _get_implementation() + implementation_lower = implementation.lower() + if os.name == 'nt': + ver_nodot = sys.winver.replace('.', '') + return f'{userbase}\\{implementation}{ver_nodot}\\site-packages' + + if sys.platform == 'darwin' and sys._framework: + return f'{userbase}/lib/{implementation_lower}/site-packages' + + return f'{userbase}/lib/python{version[0]}.{version[1]}{abi_thread}/site-packages' + + +def getuserbase(): + """Returns the `user base` directory path. + + The `user base` directory can be used to store data. If the global + variable ``USER_BASE`` is not initialized yet, this function will also set + it. + """ + global USER_BASE + if USER_BASE is None: + USER_BASE = _getuserbase() + return USER_BASE + + +def getusersitepackages(): + """Returns the user-specific site-packages directory path. + + If the global variable ``USER_SITE`` is not initialized yet, this + function will also set it. + """ + global USER_SITE, ENABLE_USER_SITE + userbase = getuserbase() # this will also set USER_BASE + + if USER_SITE is None: + if userbase is None: + ENABLE_USER_SITE = False # disable user site and return None + else: + USER_SITE = _get_path(userbase) + + return USER_SITE + +def addusersitepackages(known_paths): + """Add a per user site-package to sys.path + + Each user has its own python directory with site-packages in the + home directory. + """ + # get the per user site-package path + # this call will also make sure USER_BASE and USER_SITE are set + _trace("Processing user site-packages") + user_site = getusersitepackages() + + if ENABLE_USER_SITE and os.path.isdir(user_site): + addsitedir(user_site, known_paths) + return known_paths + +def getsitepackages(prefixes=None): + """Returns a list containing all global site-packages directories. + + For each directory present in ``prefixes`` (or the global ``PREFIXES``), + this function will find its `site-packages` subdirectory depending on the + system environment, and will return a list of full paths. + """ + sitepackages = [] + seen = set() + + if prefixes is None: + prefixes = PREFIXES + + for prefix in prefixes: + if not prefix or prefix in seen: + continue + seen.add(prefix) + + implementation = _get_implementation().lower() + ver = sys.version_info + if hasattr(sys, 'abiflags') and 't' in sys.abiflags: + abi_thread = 't' + else: + abi_thread = '' + if os.sep == '/': + libdirs = [sys.platlibdir] + if sys.platlibdir != "lib": + libdirs.append("lib") + + for libdir in libdirs: + path = os.path.join(prefix, libdir, + f"{implementation}{ver[0]}.{ver[1]}{abi_thread}", + "site-packages") + sitepackages.append(path) + else: + sitepackages.append(prefix) + sitepackages.append(os.path.join(prefix, "Lib", "site-packages")) + return sitepackages + +def addsitepackages(known_paths, prefixes=None): + """Add site-packages to sys.path""" + _trace("Processing global site-packages") + for sitedir in getsitepackages(prefixes): + if os.path.isdir(sitedir): + addsitedir(sitedir, known_paths) + + return known_paths + +def setquit(): + """Define new builtins 'quit' and 'exit'. + + These are objects which make the interpreter exit when called. + The repr of each object contains a hint at how it works. + + """ + if os.sep == '\\': + eof = 'Ctrl-Z plus Return' + else: + eof = 'Ctrl-D (i.e. EOF)' + + builtins.quit = _sitebuiltins.Quitter('quit', eof) + builtins.exit = _sitebuiltins.Quitter('exit', eof) + + +def setcopyright(): + """Set 'copyright' and 'credits' in builtins""" + builtins.copyright = _sitebuiltins._Printer("copyright", sys.copyright) + builtins.credits = _sitebuiltins._Printer("credits", """\ +Thanks to CWI, CNRI, BeOpen, Zope Corporation, the Python Software +Foundation, and a cast of thousands for supporting Python +development. See www.python.org for more information.""") + files, dirs = [], [] + # Not all modules are required to have a __file__ attribute. See + # PEP 420 for more details. + here = getattr(sys, '_stdlib_dir', None) + if not here and hasattr(os, '__file__'): + here = os.path.dirname(os.__file__) + if here: + files.extend(["LICENSE.txt", "LICENSE"]) + dirs.extend([os.path.join(here, os.pardir), here, os.curdir]) + builtins.license = _sitebuiltins._Printer( + "license", + "See https://www.python.org/psf/license/", + files, dirs) + + +def sethelper(): + builtins.help = _sitebuiltins._Helper() + + +def gethistoryfile(): + """Check if the PYTHON_HISTORY environment variable is set and define + it as the .python_history file. If PYTHON_HISTORY is not set, use the + default .python_history file. + """ + if not sys.flags.ignore_environment: + history = os.environ.get("PYTHON_HISTORY") + if history: + return history + return os.path.join(os.path.expanduser('~'), + '.python_history') + + +def enablerlcompleter(): + """Enable default readline configuration on interactive prompts, by + registering a sys.__interactivehook__. + """ + sys.__interactivehook__ = register_readline + + +def register_readline(): + """Configure readline completion on interactive prompts. + + If the readline module can be imported, the hook will set the Tab key + as completion key and register ~/.python_history as history file. + This can be overridden in the sitecustomize or usercustomize module, + or in a PYTHONSTARTUP file. + """ + if not sys.flags.ignore_environment: + PYTHON_BASIC_REPL = os.getenv("PYTHON_BASIC_REPL") + else: + PYTHON_BASIC_REPL = False + + import atexit + + try: + try: + import readline + except ImportError: + readline = None + else: + import rlcompleter # noqa: F401 + except ImportError: + return + + try: + if PYTHON_BASIC_REPL: + CAN_USE_PYREPL = False + else: + original_path = sys.path + sys.path = [p for p in original_path if p != ''] + try: + import _pyrepl.readline + if os.name == "nt": + import _pyrepl.windows_console + console_errors = (_pyrepl.windows_console._error,) + else: + import _pyrepl.unix_console + console_errors = _pyrepl.unix_console._error + from _pyrepl.main import CAN_USE_PYREPL + finally: + sys.path = original_path + except ImportError: + return + + if readline is not None: + # Reading the initialization (config) file may not be enough to set a + # completion key, so we set one first and then read the file. + if readline.backend == 'editline': + readline.parse_and_bind('bind ^I rl_complete') + else: + readline.parse_and_bind('tab: complete') + + try: + readline.read_init_file() + except OSError: + # An OSError here could have many causes, but the most likely one + # is that there's no .inputrc file (or .editrc file in the case of + # Mac OS X + libedit) in the expected location. In that case, we + # want to ignore the exception. + pass + + if readline is None or readline.get_current_history_length() == 0: + # If no history was loaded, default to .python_history, + # or PYTHON_HISTORY. + # The guard is necessary to avoid doubling history size at + # each interpreter exit when readline was already configured + # through a PYTHONSTARTUP hook, see: + # http://bugs.python.org/issue5845#msg198636 + history = gethistoryfile() + + if CAN_USE_PYREPL: + readline_module = _pyrepl.readline + exceptions = (OSError, *console_errors) + else: + if readline is None: + return + readline_module = readline + exceptions = OSError + + try: + readline_module.read_history_file(history) + except exceptions: + pass + + def write_history(): + try: + readline_module.write_history_file(history) + except FileNotFoundError, PermissionError: + # home directory does not exist or is not writable + # https://bugs.python.org/issue19891 + pass + except OSError: + if errno.EROFS: + pass # gh-128066: read-only file system + else: + raise + + atexit.register(write_history) + + +def venv(known_paths): + global PREFIXES, ENABLE_USER_SITE + + env = os.environ + if sys.platform == 'darwin' and '__PYVENV_LAUNCHER__' in env: + executable = sys._base_executable = os.environ['__PYVENV_LAUNCHER__'] + else: + executable = sys.executable + exe_dir = os.path.dirname(os.path.abspath(executable)) + site_prefix = os.path.dirname(exe_dir) + sys._home = None + conf_basename = 'pyvenv.cfg' + candidate_conf = next( + ( + conffile for conffile in ( + os.path.join(exe_dir, conf_basename), + os.path.join(site_prefix, conf_basename) + ) + if os.path.isfile(conffile) + ), + None + ) + + if candidate_conf: + virtual_conf = candidate_conf + system_site = "true" + # Issue 25185: Use UTF-8, as that's what the venv module uses when + # writing the file. + with open(virtual_conf, encoding='utf-8') as f: + for line in f: + if '=' in line: + key, _, value = line.partition('=') + key = key.strip().lower() + value = value.strip() + if key == 'include-system-site-packages': + system_site = value.lower() + elif key == 'home': + sys._home = value + + if sys.prefix != site_prefix: + _warn(f'Unexpected value in sys.prefix, expected {site_prefix}, got {sys.prefix}', RuntimeWarning) + if sys.exec_prefix != site_prefix: + _warn(f'Unexpected value in sys.exec_prefix, expected {site_prefix}, got {sys.exec_prefix}', RuntimeWarning) + + # Doing this here ensures venv takes precedence over user-site + addsitepackages(known_paths, [sys.prefix]) + + if system_site == "true": + PREFIXES += [sys.base_prefix, sys.base_exec_prefix] + else: + ENABLE_USER_SITE = False + + return known_paths + + +def execsitecustomize(): + """Run custom site specific code, if available.""" + try: + try: + import sitecustomize # noqa: F401 + except ImportError as exc: + if exc.name == 'sitecustomize': + pass + else: + raise + except Exception as err: + if sys.flags.verbose: + sys.excepthook(*sys.exc_info()) + else: + sys.stderr.write( + "Error in sitecustomize; set PYTHONVERBOSE for traceback:\n" + "%s: %s\n" % + (err.__class__.__name__, err)) + + +def execusercustomize(): + """Run custom user specific code, if available.""" + try: + try: + import usercustomize # noqa: F401 + except ImportError as exc: + if exc.name == 'usercustomize': + pass + else: + raise + except Exception as err: + if sys.flags.verbose: + sys.excepthook(*sys.exc_info()) + else: + sys.stderr.write( + "Error in usercustomize; set PYTHONVERBOSE for traceback:\n" + "%s: %s\n" % + (err.__class__.__name__, err)) + + +def main(): + """Add standard site-specific directories to the module search path. + + This function is called automatically when this module is imported, + unless the python interpreter was started with the -S flag. + """ + global ENABLE_USER_SITE + + orig_path = sys.path[:] + known_paths = removeduppaths() + if orig_path != sys.path: + # removeduppaths() might make sys.path absolute. + # fix __file__ and __cached__ of already imported modules too. + abs_paths() + + known_paths = venv(known_paths) + if ENABLE_USER_SITE is None: + ENABLE_USER_SITE = check_enableusersite() + known_paths = addusersitepackages(known_paths) + known_paths = addsitepackages(known_paths) + setquit() + setcopyright() + sethelper() + if not sys.flags.isolated: + enablerlcompleter() + execsitecustomize() + if ENABLE_USER_SITE: + execusercustomize() + +# Prevent extending of sys.path when python was started with -S and +# site is imported later. +if not sys.flags.no_site: + main() + +def _script(): + help = """\ + %s [--user-base] [--user-site] + + Without arguments print some useful information + With arguments print the value of USER_BASE and/or USER_SITE separated + by '%s'. + + Exit codes with --user-base or --user-site: + 0 - user site directory is enabled + 1 - user site directory is disabled by user + 2 - user site directory is disabled by super user + or for security reasons + >2 - unknown error + """ + args = sys.argv[1:] + if not args: + user_base = getuserbase() + user_site = getusersitepackages() + print("sys.path = [") + for dir in sys.path: + print(" %r," % (dir,)) + print("]") + def exists(path): + if path is not None and os.path.isdir(path): + return "exists" + else: + return "doesn't exist" + print(f"USER_BASE: {user_base!r} ({exists(user_base)})") + print(f"USER_SITE: {user_site!r} ({exists(user_site)})") + print(f"ENABLE_USER_SITE: {ENABLE_USER_SITE!r}") + sys.exit(0) + + buffer = [] + if '--user-base' in args: + buffer.append(USER_BASE) + if '--user-site' in args: + buffer.append(USER_SITE) + + if buffer: + print(os.pathsep.join(buffer)) + if ENABLE_USER_SITE: + sys.exit(0) + elif ENABLE_USER_SITE is False: + sys.exit(1) + elif ENABLE_USER_SITE is None: + sys.exit(2) + else: + sys.exit(3) + else: + import textwrap + print(textwrap.dedent(help % (sys.argv[0], os.pathsep))) + sys.exit(10) + +if __name__ == '__main__': + _script() diff --git a/Python314_4_x64_Template/Lib/smtplib.py b/Python314_4_x64_Template/Lib/smtplib.py new file mode 100644 index 00000000..72093f7f --- /dev/null +++ b/Python314_4_x64_Template/Lib/smtplib.py @@ -0,0 +1,1121 @@ +'''SMTP/ESMTP client class. + +This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP +Authentication) and RFC 2487 (Secure SMTP over TLS). + +Notes: + +Please remember, when doing ESMTP, that the names of the SMTP service +extensions are NOT the same thing as the option keywords for the RCPT +and MAIL commands! + +Example: + + >>> import smtplib + >>> s=smtplib.SMTP("localhost") + >>> print(s.help()) + This is Sendmail version 8.8.4 + Topics: + HELO EHLO MAIL RCPT DATA + RSET NOOP QUIT HELP VRFY + EXPN VERB ETRN DSN + For more info use "HELP ". + To report bugs in the implementation send email to + sendmail-bugs@sendmail.org. + For local information send email to Postmaster at your site. + End of HELP info + >>> s.putcmd("vrfy","someone@here") + >>> s.getreply() + (250, "Somebody OverHere ") + >>> s.quit() +''' + +# Author: The Dragon De Monsyne +# ESMTP support, test code and doc fixes added by +# Eric S. Raymond +# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data) +# by Carey Evans , for picky mail servers. +# RFC 2554 (authentication) support by Gerhard Haering . +# +# This was modified from the Python 1.5 library HTTP lib. + +import socket +import io +import re +import email.utils +import email.message +import email.generator +import base64 +import hmac +import copy +import datetime +import sys +from email.base64mime import body_encode as encode_base64 + +__all__ = ["SMTPException", "SMTPNotSupportedError", "SMTPServerDisconnected", "SMTPResponseException", + "SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError", + "SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError", + "quoteaddr", "quotedata", "SMTP"] + +SMTP_PORT = 25 +SMTP_SSL_PORT = 465 +CRLF = "\r\n" +bCRLF = b"\r\n" +_MAXLINE = 8192 # more than 8 times larger than RFC 821, 4.5.3 +_MAXCHALLENGE = 5 # Maximum number of AUTH challenges sent + +OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I) + +# Exception classes used by this module. +class SMTPException(OSError): + """Base class for all exceptions raised by this module.""" + +class SMTPNotSupportedError(SMTPException): + """The command or option is not supported by the SMTP server. + + This exception is raised when an attempt is made to run a command or a + command with an option which is not supported by the server. + """ + +class SMTPServerDisconnected(SMTPException): + """Not connected to any SMTP server. + + This exception is raised when the server unexpectedly disconnects, + or when an attempt is made to use the SMTP instance before + connecting it to a server. + """ + +class SMTPResponseException(SMTPException): + """Base class for all exceptions that include an SMTP error code. + + These exceptions are generated in some instances when the SMTP + server returns an error code. The error code is stored in the + `smtp_code' attribute of the error, and the `smtp_error' attribute + is set to the error message. + """ + + def __init__(self, code, msg): + self.smtp_code = code + self.smtp_error = msg + self.args = (code, msg) + +class SMTPSenderRefused(SMTPResponseException): + """Sender address refused. + + In addition to the attributes set by on all SMTPResponseException + exceptions, this sets 'sender' to the string that the SMTP refused. + """ + + def __init__(self, code, msg, sender): + self.smtp_code = code + self.smtp_error = msg + self.sender = sender + self.args = (code, msg, sender) + +class SMTPRecipientsRefused(SMTPException): + """All recipient addresses refused. + + The errors for each recipient are accessible through the attribute + 'recipients', which is a dictionary of exactly the same sort as + SMTP.sendmail() returns. + """ + + def __init__(self, recipients): + self.recipients = recipients + self.args = (recipients,) + + +class SMTPDataError(SMTPResponseException): + """The SMTP server didn't accept the data.""" + +class SMTPConnectError(SMTPResponseException): + """Error during connection establishment.""" + +class SMTPHeloError(SMTPResponseException): + """The server refused our HELO reply.""" + +class SMTPAuthenticationError(SMTPResponseException): + """Authentication error. + + Most probably the server didn't accept the username/password + combination provided. + """ + +def quoteaddr(addrstring): + """Quote a subset of the email addresses defined by RFC 821. + + Should be able to handle anything email.utils.parseaddr can handle. + """ + displayname, addr = email.utils.parseaddr(addrstring) + if (displayname, addr) == ('', ''): + # parseaddr couldn't parse it, use it as is and hope for the best. + if addrstring.strip().startswith('<'): + return addrstring + return "<%s>" % addrstring + return "<%s>" % addr + +def _addr_only(addrstring): + displayname, addr = email.utils.parseaddr(addrstring) + if (displayname, addr) == ('', ''): + # parseaddr couldn't parse it, so use it as is. + return addrstring + return addr + +# Legacy method kept for backward compatibility. +def quotedata(data): + """Quote data for email. + + Double leading '.', and change Unix newline '\\n', or Mac '\\r' into + internet CRLF end-of-line. + """ + return re.sub(r'(?m)^\.', '..', + re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data)) + +def _quote_periods(bindata): + return re.sub(br'(?m)^\.', b'..', bindata) + +def _fix_eols(data): + return re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data) + + +try: + hmac.digest(b'', b'', 'md5') +except ValueError: + _have_cram_md5_support = False +else: + _have_cram_md5_support = True + + +try: + import ssl +except ImportError: + _have_ssl = False +else: + _have_ssl = True + + +class SMTP: + """This class manages a connection to an SMTP or ESMTP server. + SMTP Objects: + SMTP objects have the following attributes: + helo_resp + This is the message given by the server in response to the + most recent HELO command. + + ehlo_resp + This is the message given by the server in response to the + most recent EHLO command. This is usually multiline. + + does_esmtp + This is a True value _after you do an EHLO command_, if the + server supports ESMTP. + + esmtp_features + This is a dictionary, which, if the server supports ESMTP, + will _after you do an EHLO command_, contain the names of the + SMTP service extensions this server supports, and their + parameters (if any). + + Note, all extension names are mapped to lower case in the + dictionary. + + See each method's docstrings for details. In general, there is a + method of the same name to perform each SMTP command. There is also a + method called 'sendmail' that will do an entire mail transaction. + """ + debuglevel = 0 + + sock = None + file = None + helo_resp = None + ehlo_msg = "ehlo" + ehlo_resp = None + does_esmtp = False + default_port = SMTP_PORT + + def __init__(self, host='', port=0, local_hostname=None, + timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None): + """Initialize a new instance. + + If specified, `host` is the name of the remote host to which to + connect. If specified, `port` specifies the port to which to connect. + By default, smtplib.SMTP_PORT is used. If a host is specified the + connect method is called, and if it returns anything other than a + success code an SMTPConnectError is raised. If specified, + `local_hostname` is used as the FQDN of the local host in the HELO/EHLO + command. Otherwise, the local hostname is found using + socket.getfqdn(). The `source_address` parameter takes a 2-tuple (host, + port) for the socket to bind to as its source address before + connecting. If the host is '' and port is 0, the OS default behavior + will be used. + + """ + self._host = host + self.timeout = timeout + self.esmtp_features = {} + self.command_encoding = 'ascii' + self.source_address = source_address + self._auth_challenge_count = 0 + + if host: + (code, msg) = self.connect(host, port) + if code != 220: + self.close() + raise SMTPConnectError(code, msg) + if local_hostname is not None: + self.local_hostname = local_hostname + else: + # RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and + # if that can't be calculated, that we should use a domain literal + # instead (essentially an encoded IP address like [A.B.C.D]). + fqdn = socket.getfqdn() + if '.' in fqdn: + self.local_hostname = fqdn + else: + # We can't find an fqdn hostname, so use a domain literal + addr = '127.0.0.1' + try: + addr = socket.gethostbyname(socket.gethostname()) + except socket.gaierror: + pass + self.local_hostname = '[%s]' % addr + + def __enter__(self): + return self + + def __exit__(self, *args): + try: + code, message = self.docmd("QUIT") + if code != 221: + raise SMTPResponseException(code, message) + except SMTPServerDisconnected: + pass + finally: + self.close() + + def set_debuglevel(self, debuglevel): + """Set the debug output level. + + A non-false value results in debug messages for connection and for all + messages sent to and received from the server. + + """ + self.debuglevel = debuglevel + + def _print_debug(self, *args): + if self.debuglevel > 1: + print(datetime.datetime.now().time(), *args, file=sys.stderr) + else: + print(*args, file=sys.stderr) + + def _get_socket(self, host, port, timeout): + # This makes it simpler for SMTP_SSL to use the SMTP connect code + # and just alter the socket connection bit. + if timeout is not None and not timeout: + raise ValueError('Non-blocking socket (timeout=0) is not supported') + if self.debuglevel > 0: + self._print_debug('connect: to', (host, port), self.source_address) + return socket.create_connection((host, port), timeout, + self.source_address) + + def connect(self, host='localhost', port=0, source_address=None): + """Connect to a host on a given port. + + If the hostname ends with a colon (':') followed by a number, and + there is no port specified, that suffix will be stripped off and the + number interpreted as the port number to use. + + Note: This method is automatically invoked by __init__, if a host is + specified during instantiation. + + """ + + if source_address: + self.source_address = source_address + + if not port and (host.find(':') == host.rfind(':')): + i = host.rfind(':') + if i >= 0: + host, port = host[:i], host[i + 1:] + try: + port = int(port) + except ValueError: + raise OSError("nonnumeric port") + if not port: + port = self.default_port + sys.audit("smtplib.connect", self, host, port) + self.sock = self._get_socket(host, port, self.timeout) + self.file = None + (code, msg) = self.getreply() + if self.debuglevel > 0: + self._print_debug('connect:', repr(msg)) + return (code, msg) + + def send(self, s): + """Send 's' to the server.""" + if self.debuglevel > 0: + self._print_debug('send:', repr(s)) + if self.sock: + if isinstance(s, str): + # send is used by the 'data' command, where command_encoding + # should not be used, but 'data' needs to convert the string to + # binary itself anyway, so that's not a problem. + s = s.encode(self.command_encoding) + sys.audit("smtplib.send", self, s) + try: + self.sock.sendall(s) + except OSError: + self.close() + raise SMTPServerDisconnected('Server not connected') + else: + raise SMTPServerDisconnected('please run connect() first') + + def putcmd(self, cmd, args=""): + """Send a command to the server.""" + if args == "": + s = cmd + else: + s = f'{cmd} {args}' + if '\r' in s or '\n' in s: + s = s.replace('\n', '\\n').replace('\r', '\\r') + raise ValueError( + f'command and arguments contain prohibited newline characters: {s}' + ) + self.send(f'{s}{CRLF}') + + def getreply(self): + """Get a reply from the server. + + Returns a tuple consisting of: + + - server response code (e.g. '250', or such, if all goes well) + Note: returns -1 if it can't read response code. + + - server response string corresponding to response code (multiline + responses are converted to a single, multiline string). + + Raises SMTPServerDisconnected if end-of-file is reached. + """ + resp = [] + if self.file is None: + self.file = self.sock.makefile('rb') + while 1: + try: + line = self.file.readline(_MAXLINE + 1) + except OSError as e: + self.close() + raise SMTPServerDisconnected("Connection unexpectedly closed: " + + str(e)) + if not line: + self.close() + raise SMTPServerDisconnected("Connection unexpectedly closed") + if self.debuglevel > 0: + self._print_debug('reply:', repr(line)) + if len(line) > _MAXLINE: + self.close() + raise SMTPResponseException(500, "Line too long.") + resp.append(line[4:].strip(b' \t\r\n')) + code = line[:3] + # Check that the error code is syntactically correct. + # Don't attempt to read a continuation line if it is broken. + try: + errcode = int(code) + except ValueError: + errcode = -1 + break + # Check if multiline response. + if line[3:4] != b"-": + break + + errmsg = b"\n".join(resp) + if self.debuglevel > 0: + self._print_debug('reply: retcode (%s); Msg: %a' % (errcode, errmsg)) + return errcode, errmsg + + def docmd(self, cmd, args=""): + """Send a command, and return its response code.""" + self.putcmd(cmd, args) + return self.getreply() + + # std smtp commands + def helo(self, name=''): + """SMTP 'helo' command. + Hostname to send for this command defaults to the FQDN of the local + host. + """ + self.putcmd("helo", name or self.local_hostname) + (code, msg) = self.getreply() + self.helo_resp = msg + return (code, msg) + + def ehlo(self, name=''): + """ SMTP 'ehlo' command. + Hostname to send for this command defaults to the FQDN of the local + host. + """ + self.esmtp_features = {} + self.putcmd(self.ehlo_msg, name or self.local_hostname) + (code, msg) = self.getreply() + # According to RFC1869 some (badly written) + # MTA's will disconnect on an ehlo. Toss an exception if + # that happens -ddm + if code == -1 and len(msg) == 0: + self.close() + raise SMTPServerDisconnected("Server not connected") + self.ehlo_resp = msg + if code != 250: + return (code, msg) + self.does_esmtp = True + #parse the ehlo response -ddm + assert isinstance(self.ehlo_resp, bytes), repr(self.ehlo_resp) + resp = self.ehlo_resp.decode("latin-1").split('\n') + del resp[0] + for each in resp: + # To be able to communicate with as many SMTP servers as possible, + # we have to take the old-style auth advertisement into account, + # because: + # 1) Else our SMTP feature parser gets confused. + # 2) There are some servers that only advertise the auth methods we + # support using the old style. + auth_match = OLDSTYLE_AUTH.match(each) + if auth_match: + # This doesn't remove duplicates, but that's no problem + self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \ + + " " + auth_match.groups(0)[0] + continue + + # RFC 1869 requires a space between ehlo keyword and parameters. + # It's actually stricter, in that only spaces are allowed between + # parameters, but were not going to check for that here. Note + # that the space isn't present if there are no parameters. + m = re.match(r'(?P[A-Za-z0-9][A-Za-z0-9\-]*) ?', each) + if m: + feature = m.group("feature").lower() + params = m.string[m.end("feature"):].strip() + if feature == "auth": + self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \ + + " " + params + else: + self.esmtp_features[feature] = params + return (code, msg) + + def has_extn(self, opt): + """Does the server support a given SMTP service extension?""" + return opt.lower() in self.esmtp_features + + def help(self, args=''): + """SMTP 'help' command. + Returns help text from server.""" + self.putcmd("help", args) + return self.getreply()[1] + + def rset(self): + """SMTP 'rset' command -- resets session.""" + self.command_encoding = 'ascii' + return self.docmd("rset") + + def _rset(self): + """Internal 'rset' command which ignores any SMTPServerDisconnected error. + + Used internally in the library, since the server disconnected error + should appear to the application when the *next* command is issued, if + we are doing an internal "safety" reset. + """ + try: + self.rset() + except SMTPServerDisconnected: + pass + + def noop(self): + """SMTP 'noop' command -- doesn't do anything :>""" + return self.docmd("noop") + + def mail(self, sender, options=()): + """SMTP 'mail' command -- begins mail xfer session. + + This method may raise the following exceptions: + + SMTPNotSupportedError The options parameter includes 'SMTPUTF8' + but the SMTPUTF8 extension is not supported by + the server. + """ + optionlist = '' + if options and self.does_esmtp: + if any(x.lower()=='smtputf8' for x in options): + if self.has_extn('smtputf8'): + self.command_encoding = 'utf-8' + else: + raise SMTPNotSupportedError( + 'SMTPUTF8 not supported by server') + optionlist = ' ' + ' '.join(options) + self.putcmd("mail", "from:%s%s" % (quoteaddr(sender), optionlist)) + return self.getreply() + + def rcpt(self, recip, options=()): + """SMTP 'rcpt' command -- indicates 1 recipient for this mail.""" + optionlist = '' + if options and self.does_esmtp: + optionlist = ' ' + ' '.join(options) + self.putcmd("rcpt", "to:%s%s" % (quoteaddr(recip), optionlist)) + return self.getreply() + + def data(self, msg): + """SMTP 'DATA' command -- sends message data to server. + + Automatically quotes lines beginning with a period per rfc821. + Raises SMTPDataError if there is an unexpected reply to the + DATA command; the return value from this method is the final + response code received when the all data is sent. If msg + is a string, lone '\\r' and '\\n' characters are converted to + '\\r\\n' characters. If msg is bytes, it is transmitted as is. + """ + self.putcmd("data") + (code, repl) = self.getreply() + if self.debuglevel > 0: + self._print_debug('data:', (code, repl)) + if code != 354: + raise SMTPDataError(code, repl) + else: + if isinstance(msg, str): + msg = _fix_eols(msg).encode('ascii') + q = _quote_periods(msg) + if q[-2:] != bCRLF: + q = q + bCRLF + q = q + b"." + bCRLF + self.send(q) + (code, msg) = self.getreply() + if self.debuglevel > 0: + self._print_debug('data:', (code, msg)) + return (code, msg) + + def verify(self, address): + """SMTP 'verify' command -- checks for address validity.""" + self.putcmd("vrfy", _addr_only(address)) + return self.getreply() + # a.k.a. + vrfy = verify + + def expn(self, address): + """SMTP 'expn' command -- expands a mailing list.""" + self.putcmd("expn", _addr_only(address)) + return self.getreply() + + # some useful methods + + def ehlo_or_helo_if_needed(self): + """Call self.ehlo() and/or self.helo() if needed. + + If there has been no previous EHLO or HELO command this session, this + method tries ESMTP EHLO first. + + This method may raise the following exceptions: + + SMTPHeloError The server didn't reply properly to + the helo greeting. + """ + if self.helo_resp is None and self.ehlo_resp is None: + if not (200 <= self.ehlo()[0] <= 299): + (code, resp) = self.helo() + if not (200 <= code <= 299): + raise SMTPHeloError(code, resp) + + def auth(self, mechanism, authobject, *, initial_response_ok=True): + """Authentication command - requires response processing. + + 'mechanism' specifies which authentication mechanism is to + be used - the valid values are those listed in the 'auth' + element of 'esmtp_features'. + + 'authobject' must be a callable object taking a single argument: + + data = authobject(challenge) + + It will be called to process the server's challenge response; the + challenge argument it is passed will be a bytes. It should return + an ASCII string that will be base64 encoded and sent to the server. + + Keyword arguments: + - initial_response_ok: Allow sending the RFC 4954 initial-response + to the AUTH command, if the authentication methods supports it. + """ + # RFC 4954 allows auth methods to provide an initial response. Not all + # methods support it. By definition, if they return something other + # than None when challenge is None, then they do. See issue #15014. + mechanism = mechanism.upper() + initial_response = (authobject() if initial_response_ok else None) + if initial_response is not None: + response = encode_base64(initial_response.encode('ascii'), eol='') + (code, resp) = self.docmd("AUTH", mechanism + " " + response) + self._auth_challenge_count = 1 + else: + (code, resp) = self.docmd("AUTH", mechanism) + self._auth_challenge_count = 0 + # If server responds with a challenge, send the response. + while code == 334: + self._auth_challenge_count += 1 + challenge = base64.decodebytes(resp) + response = encode_base64( + authobject(challenge).encode('ascii'), eol='') + (code, resp) = self.docmd(response) + # If server keeps sending challenges, something is wrong. + if self._auth_challenge_count > _MAXCHALLENGE: + raise SMTPException( + "Server AUTH mechanism infinite loop. Last response: " + + repr((code, resp)) + ) + if code in (235, 503): + return (code, resp) + raise SMTPAuthenticationError(code, resp) + + def auth_cram_md5(self, challenge=None): + """ Authobject to use with CRAM-MD5 authentication. Requires self.user + and self.password to be set.""" + # CRAM-MD5 does not support initial-response. + if challenge is None: + return None + if not _have_cram_md5_support: + raise SMTPException("CRAM-MD5 is not supported") + password = self.password.encode('ascii') + authcode = hmac.HMAC(password, challenge, 'md5') + return f"{self.user} {authcode.hexdigest()}" + + def auth_plain(self, challenge=None): + """ Authobject to use with PLAIN authentication. Requires self.user and + self.password to be set.""" + return "\0%s\0%s" % (self.user, self.password) + + def auth_login(self, challenge=None): + """ Authobject to use with LOGIN authentication. Requires self.user and + self.password to be set.""" + if challenge is None or self._auth_challenge_count < 2: + return self.user + else: + return self.password + + def login(self, user, password, *, initial_response_ok=True): + """Log in on an SMTP server that requires authentication. + + The arguments are: + - user: The user name to authenticate with. + - password: The password for the authentication. + + Keyword arguments: + - initial_response_ok: Allow sending the RFC 4954 initial-response + to the AUTH command, if the authentication methods supports it. + + If there has been no previous EHLO or HELO command this session, this + method tries ESMTP EHLO first. + + This method will return normally if the authentication was successful. + + This method may raise the following exceptions: + + SMTPHeloError The server didn't reply properly to + the helo greeting. + SMTPAuthenticationError The server didn't accept the username/ + password combination. + SMTPNotSupportedError The AUTH command is not supported by the + server. + SMTPException No suitable authentication method was + found. + """ + + self.ehlo_or_helo_if_needed() + if not self.has_extn("auth"): + raise SMTPNotSupportedError( + "SMTP AUTH extension not supported by server.") + + # Authentication methods the server claims to support + advertised_authlist = self.esmtp_features["auth"].split() + + # Authentication methods we can handle in our preferred order: + if _have_cram_md5_support: + preferred_auths = ['CRAM-MD5', 'PLAIN', 'LOGIN'] + else: + preferred_auths = ['PLAIN', 'LOGIN'] + # We try the supported authentications in our preferred order, if + # the server supports them. + authlist = [auth for auth in preferred_auths + if auth in advertised_authlist] + if not authlist: + raise SMTPException("No suitable authentication method found.") + + # Some servers advertise authentication methods they don't really + # support, so if authentication fails, we continue until we've tried + # all methods. + self.user, self.password = user, password + for authmethod in authlist: + method_name = 'auth_' + authmethod.lower().replace('-', '_') + try: + (code, resp) = self.auth( + authmethod, getattr(self, method_name), + initial_response_ok=initial_response_ok) + # 235 == 'Authentication successful' + # 503 == 'Error: already authenticated' + if code in (235, 503): + return (code, resp) + except SMTPAuthenticationError as e: + last_exception = e + + # We could not login successfully. Return result of last attempt. + raise last_exception + + def starttls(self, *, context=None): + """Puts the connection to the SMTP server into TLS mode. + + If there has been no previous EHLO or HELO command this session, this + method tries ESMTP EHLO first. + + If the server supports TLS, this will encrypt the rest of the SMTP + session. If you provide the context parameter, + the identity of the SMTP server and client can be checked. This, + however, depends on whether the socket module really checks the + certificates. + + This method may raise the following exceptions: + + SMTPHeloError The server didn't reply properly to + the helo greeting. + """ + self.ehlo_or_helo_if_needed() + if not self.has_extn("starttls"): + raise SMTPNotSupportedError( + "STARTTLS extension not supported by server.") + (resp, reply) = self.docmd("STARTTLS") + if resp == 220: + if not _have_ssl: + raise RuntimeError("No SSL support included in this Python") + if context is None: + context = ssl._create_stdlib_context() + self.sock = context.wrap_socket(self.sock, + server_hostname=self._host) + self.file = None + # RFC 3207: + # The client MUST discard any knowledge obtained from + # the server, such as the list of SMTP service extensions, + # which was not obtained from the TLS negotiation itself. + self.helo_resp = None + self.ehlo_resp = None + self.esmtp_features = {} + self.does_esmtp = False + else: + # RFC 3207: + # 501 Syntax error (no parameters allowed) + # 454 TLS not available due to temporary reason + raise SMTPResponseException(resp, reply) + return (resp, reply) + + def sendmail(self, from_addr, to_addrs, msg, mail_options=(), + rcpt_options=()): + """This command performs an entire mail transaction. + + The arguments are: + - from_addr : The address sending this mail. + - to_addrs : A list of addresses to send this mail to. A bare + string will be treated as a list with 1 address. + - msg : The message to send. + - mail_options : List of ESMTP options (such as 8bitmime) for the + mail command. + - rcpt_options : List of ESMTP options (such as DSN commands) for + all the rcpt commands. + + msg may be a string containing characters in the ASCII range, or a byte + string. A string is encoded to bytes using the ascii codec, and lone + \\r and \\n characters are converted to \\r\\n characters. + + If there has been no previous EHLO or HELO command this session, this + method tries ESMTP EHLO first. If the server does ESMTP, message size + and each of the specified options will be passed to it. If EHLO + fails, HELO will be tried and ESMTP options suppressed. + + This method will return normally if the mail is accepted for at least + one recipient. It returns a dictionary, with one entry for each + recipient that was refused. Each entry contains a tuple of the SMTP + error code and the accompanying error message sent by the server. + + This method may raise the following exceptions: + + SMTPHeloError The server didn't reply properly to + the helo greeting. + SMTPRecipientsRefused The server rejected ALL recipients + (no mail was sent). + SMTPSenderRefused The server didn't accept the from_addr. + SMTPDataError The server replied with an unexpected + error code (other than a refusal of + a recipient). + SMTPNotSupportedError The mail_options parameter includes 'SMTPUTF8' + but the SMTPUTF8 extension is not supported by + the server. + + Note: the connection will be open even after an exception is raised. + + Example: + + >>> import smtplib + >>> s=smtplib.SMTP("localhost") + >>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"] + >>> msg = '''\\ + ... From: Me@my.org + ... Subject: testin'... + ... + ... This is a test ''' + >>> s.sendmail("me@my.org",tolist,msg) + { "three@three.org" : ( 550 ,"User unknown" ) } + >>> s.quit() + + In the above example, the message was accepted for delivery to three + of the four addresses, and one was rejected, with the error code + 550. If all addresses are accepted, then the method will return an + empty dictionary. + + """ + self.ehlo_or_helo_if_needed() + esmtp_opts = [] + if isinstance(msg, str): + msg = _fix_eols(msg).encode('ascii') + if self.does_esmtp: + if self.has_extn('size'): + esmtp_opts.append("size=%d" % len(msg)) + for option in mail_options: + esmtp_opts.append(option) + (code, resp) = self.mail(from_addr, esmtp_opts) + if code != 250: + if code == 421: + self.close() + else: + self._rset() + raise SMTPSenderRefused(code, resp, from_addr) + senderrs = {} + if isinstance(to_addrs, str): + to_addrs = [to_addrs] + for each in to_addrs: + (code, resp) = self.rcpt(each, rcpt_options) + if (code != 250) and (code != 251): + senderrs[each] = (code, resp) + if code == 421: + self.close() + raise SMTPRecipientsRefused(senderrs) + if len(senderrs) == len(to_addrs): + # the server refused all our recipients + self._rset() + raise SMTPRecipientsRefused(senderrs) + (code, resp) = self.data(msg) + if code != 250: + if code == 421: + self.close() + else: + self._rset() + raise SMTPDataError(code, resp) + #if we got here then somebody got our mail + return senderrs + + def send_message(self, msg, from_addr=None, to_addrs=None, + mail_options=(), rcpt_options=()): + """Converts message to a bytestring and passes it to sendmail. + + The arguments are as for sendmail, except that msg is an + email.message.Message object. If from_addr is None or to_addrs is + None, these arguments are taken from the headers of the Message as + described in RFC 5322 (a ValueError is raised if there is more than + one set of 'Resent-' headers). Regardless of the values of from_addr and + to_addr, any Bcc field (or Resent-Bcc field, when the Message is a + resent) of the Message object won't be transmitted. The Message + object is then serialized using email.generator.BytesGenerator and + sendmail is called to transmit the message. If the sender or any of + the recipient addresses contain non-ASCII and the server advertises the + SMTPUTF8 capability, the policy is cloned with utf8 set to True for the + serialization, and SMTPUTF8 and BODY=8BITMIME are asserted on the send. + If the server does not support SMTPUTF8, an SMTPNotSupported error is + raised. Otherwise the generator is called without modifying the + policy. + + """ + # 'Resent-Date' is a mandatory field if the Message is resent (RFC 5322 + # Section 3.6.6). In such a case, we use the 'Resent-*' fields. However, + # if there is more than one 'Resent-' block there's no way to + # unambiguously determine which one is the most recent in all cases, + # so rather than guess we raise a ValueError in that case. + # + # TODO implement heuristics to guess the correct Resent-* block with an + # option allowing the user to enable the heuristics. (It should be + # possible to guess correctly almost all of the time.) + + self.ehlo_or_helo_if_needed() + resent = msg.get_all('Resent-Date') + if resent is None: + header_prefix = '' + elif len(resent) == 1: + header_prefix = 'Resent-' + else: + raise ValueError("message has more than one 'Resent-' header block") + if from_addr is None: + # Prefer the sender field per RFC 5322 section 3.6.2. + from_addr = (msg[header_prefix + 'Sender'] + if (header_prefix + 'Sender') in msg + else msg[header_prefix + 'From']) + from_addr = email.utils.getaddresses([from_addr])[0][1] + if to_addrs is None: + addr_fields = [f for f in (msg[header_prefix + 'To'], + msg[header_prefix + 'Bcc'], + msg[header_prefix + 'Cc']) + if f is not None] + to_addrs = [a[1] for a in email.utils.getaddresses(addr_fields)] + # Make a local copy so we can delete the bcc headers. + msg_copy = copy.copy(msg) + del msg_copy['Bcc'] + del msg_copy['Resent-Bcc'] + international = False + try: + ''.join([from_addr, *to_addrs]).encode('ascii') + except UnicodeEncodeError: + if not self.has_extn('smtputf8'): + raise SMTPNotSupportedError( + "One or more source or delivery addresses require" + " internationalized email support, but the server" + " does not advertise the required SMTPUTF8 capability") + international = True + with io.BytesIO() as bytesmsg: + if international: + g = email.generator.BytesGenerator( + bytesmsg, policy=msg.policy.clone(utf8=True)) + mail_options = (*mail_options, 'SMTPUTF8', 'BODY=8BITMIME') + else: + g = email.generator.BytesGenerator(bytesmsg) + g.flatten(msg_copy, linesep='\r\n') + flatmsg = bytesmsg.getvalue() + return self.sendmail(from_addr, to_addrs, flatmsg, mail_options, + rcpt_options) + + def close(self): + """Close the connection to the SMTP server.""" + try: + file = self.file + self.file = None + if file: + file.close() + finally: + sock = self.sock + self.sock = None + if sock: + sock.close() + + def quit(self): + """Terminate the SMTP session.""" + res = self.docmd("quit") + # A new EHLO is required after reconnecting with connect() + self.ehlo_resp = self.helo_resp = None + self.esmtp_features = {} + self.does_esmtp = False + self.close() + return res + +if _have_ssl: + + class SMTP_SSL(SMTP): + """ This is a subclass derived from SMTP that connects over an SSL + encrypted socket (to use this class you need a socket module that was + compiled with SSL support). If host is not specified, '' (the local + host) is used. If port is omitted, the standard SMTP-over-SSL port + (465) is used. local_hostname and source_address have the same meaning + as they do in the SMTP class. context also optional, can contain a + SSLContext. + + """ + + default_port = SMTP_SSL_PORT + + def __init__(self, host='', port=0, local_hostname=None, + *, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None, context=None): + if context is None: + context = ssl._create_stdlib_context() + self.context = context + SMTP.__init__(self, host, port, local_hostname, timeout, + source_address) + + def _get_socket(self, host, port, timeout): + if self.debuglevel > 0: + self._print_debug('connect:', (host, port)) + new_socket = super()._get_socket(host, port, timeout) + new_socket = self.context.wrap_socket(new_socket, + server_hostname=self._host) + return new_socket + + __all__.append("SMTP_SSL") + +# +# LMTP extension +# +LMTP_PORT = 2003 + +class LMTP(SMTP): + """LMTP - Local Mail Transfer Protocol + + The LMTP protocol, which is very similar to ESMTP, is heavily based + on the standard SMTP client. It's common to use Unix sockets for + LMTP, so our connect() method must support that as well as a regular + host:port server. local_hostname and source_address have the same + meaning as they do in the SMTP class. To specify a Unix socket, + you must use an absolute path as the host, starting with a '/'. + + Authentication is supported, using the regular SMTP mechanism. When + using a Unix socket, LMTP generally don't support or require any + authentication, but your mileage might vary.""" + + ehlo_msg = "lhlo" + + def __init__(self, host='', port=LMTP_PORT, local_hostname=None, + source_address=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): + """Initialize a new instance.""" + super().__init__(host, port, local_hostname=local_hostname, + source_address=source_address, timeout=timeout) + + def connect(self, host='localhost', port=0, source_address=None): + """Connect to the LMTP daemon, on either a Unix or a TCP socket.""" + if host[0] != '/': + return super().connect(host, port, source_address=source_address) + + if self.timeout is not None and not self.timeout: + raise ValueError('Non-blocking socket (timeout=0) is not supported') + + # Handle Unix-domain sockets. + try: + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: + self.sock.settimeout(self.timeout) + self.file = None + self.sock.connect(host) + except OSError: + if self.debuglevel > 0: + self._print_debug('connect fail:', host) + if self.sock: + self.sock.close() + self.sock = None + raise + (code, msg) = self.getreply() + if self.debuglevel > 0: + self._print_debug('connect:', msg) + return (code, msg) + + +# Test the sendmail method, which tests most of the others. +# Note: This always sends to localhost. +if __name__ == '__main__': + def prompt(prompt): + sys.stdout.write(prompt + ": ") + sys.stdout.flush() + return sys.stdin.readline().strip() + + fromaddr = prompt("From") + toaddrs = prompt("To").split(',') + print("Enter message, end with ^D:") + msg = '' + while line := sys.stdin.readline(): + msg = msg + line + print("Message length is %d" % len(msg)) + + server = SMTP('localhost') + server.set_debuglevel(1) + server.sendmail(fromaddr, toaddrs, msg) + server.quit() diff --git a/Python314_4_x64_Template/Lib/socket.py b/Python314_4_x64_Template/Lib/socket.py new file mode 100644 index 00000000..727b0e75 --- /dev/null +++ b/Python314_4_x64_Template/Lib/socket.py @@ -0,0 +1,988 @@ +# Wrapper module for _socket, providing some additional facilities +# implemented in Python. + +"""\ +This module provides socket operations and some related functions. +On Unix, it supports IP (Internet Protocol) and Unix domain sockets. +On other systems, it only supports IP. Functions specific for a +socket are available as methods of the socket object. + +Functions: + +socket() -- create a new socket object +socketpair() -- create a pair of new socket objects [*] +fromfd() -- create a socket object from an open file descriptor [*] +send_fds() -- Send file descriptor to the socket. +recv_fds() -- Receive file descriptors from the socket. +fromshare() -- create a socket object from data received from socket.share() [*] +gethostname() -- return the current hostname +gethostbyname() -- map a hostname to its IP number +gethostbyaddr() -- map an IP number or hostname to DNS info +getservbyname() -- map a service name and a protocol name to a port number +getprotobyname() -- map a protocol name (e.g. 'tcp') to a number +ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order +htons(), htonl() -- convert 16, 32 bit int from host to network byte order +inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format +inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89) +socket.getdefaulttimeout() -- get the default timeout value +socket.setdefaulttimeout() -- set the default timeout value +create_connection() -- connects to an address, with an optional timeout and + optional source address. +create_server() -- create a TCP socket and bind it to a specified address. + + [*] not available on all platforms! + +Special objects: + +SocketType -- type object for socket objects +error -- exception raised for I/O errors +has_ipv6 -- boolean value indicating if IPv6 is supported + +IntEnum constants: + +AF_INET, AF_UNIX -- socket domains (first argument to socket() call) +SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument) + +Integer constants: + +Many other constants may be defined; these may be used in calls to +the setsockopt() and getsockopt() methods. +""" + +import _socket +from _socket import * + +import io +import os +import sys +from enum import IntEnum, IntFlag + +try: + import errno +except ImportError: + errno = None +EBADF = getattr(errno, 'EBADF', 9) +EAGAIN = getattr(errno, 'EAGAIN', 11) +EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11) + +__all__ = ["fromfd", "getfqdn", "create_connection", "create_server", + "has_dualstack_ipv6", "AddressFamily", "SocketKind"] +__all__.extend(os._get_exports_list(_socket)) + +# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for +# nicer string representations. +# Note that _socket only knows about the integer values. The public interface +# in this module understands the enums and translates them back from integers +# where needed (e.g. .family property of a socket object). + +IntEnum._convert_( + 'AddressFamily', + __name__, + lambda C: C.isupper() and C.startswith('AF_')) + +IntEnum._convert_( + 'SocketKind', + __name__, + lambda C: C.isupper() and C.startswith('SOCK_')) + +IntFlag._convert_( + 'MsgFlag', + __name__, + lambda C: C.isupper() and C.startswith('MSG_')) + +IntFlag._convert_( + 'AddressInfo', + __name__, + lambda C: C.isupper() and C.startswith('AI_')) + +_LOCALHOST = '127.0.0.1' +_LOCALHOST_V6 = '::1' + + +def _intenum_converter(value, enum_klass): + """Convert a numeric family value to an IntEnum member. + + If it's not a known member, return the numeric value itself. + """ + try: + return enum_klass(value) + except ValueError: + return value + + +# WSA error codes +if sys.platform.lower().startswith("win"): + errorTab = { + 6: "Specified event object handle is invalid.", + 8: "Insufficient memory available.", + 87: "One or more parameters are invalid.", + 995: "Overlapped operation aborted.", + 996: "Overlapped I/O event object not in signaled state.", + 997: "Overlapped operation will complete later.", + 10004: "The operation was interrupted.", + 10009: "A bad file handle was passed.", + 10013: "Permission denied.", + 10014: "A fault occurred on the network??", + 10022: "An invalid operation was attempted.", + 10024: "Too many open files.", + 10035: "The socket operation would block.", + 10036: "A blocking operation is already in progress.", + 10037: "Operation already in progress.", + 10038: "Socket operation on nonsocket.", + 10039: "Destination address required.", + 10040: "Message too long.", + 10041: "Protocol wrong type for socket.", + 10042: "Bad protocol option.", + 10043: "Protocol not supported.", + 10044: "Socket type not supported.", + 10045: "Operation not supported.", + 10046: "Protocol family not supported.", + 10047: "Address family not supported by protocol family.", + 10048: "The network address is in use.", + 10049: "Cannot assign requested address.", + 10050: "Network is down.", + 10051: "Network is unreachable.", + 10052: "Network dropped connection on reset.", + 10053: "Software caused connection abort.", + 10054: "The connection has been reset.", + 10055: "No buffer space available.", + 10056: "Socket is already connected.", + 10057: "Socket is not connected.", + 10058: "The network has been shut down.", + 10059: "Too many references.", + 10060: "The operation timed out.", + 10061: "Connection refused.", + 10062: "Cannot translate name.", + 10063: "The name is too long.", + 10064: "The host is down.", + 10065: "The host is unreachable.", + 10066: "Directory not empty.", + 10067: "Too many processes.", + 10068: "User quota exceeded.", + 10069: "Disk quota exceeded.", + 10070: "Stale file handle reference.", + 10071: "Item is remote.", + 10091: "Network subsystem is unavailable.", + 10092: "Winsock.dll version out of range.", + 10093: "Successful WSAStartup not yet performed.", + 10101: "Graceful shutdown in progress.", + 10102: "No more results from WSALookupServiceNext.", + 10103: "Call has been canceled.", + 10104: "Procedure call table is invalid.", + 10105: "Service provider is invalid.", + 10106: "Service provider failed to initialize.", + 10107: "System call failure.", + 10108: "Service not found.", + 10109: "Class type not found.", + 10110: "No more results from WSALookupServiceNext.", + 10111: "Call was canceled.", + 10112: "Database query was refused.", + 11001: "Host not found.", + 11002: "Nonauthoritative host not found.", + 11003: "This is a nonrecoverable error.", + 11004: "Valid name, no data record requested type.", + 11005: "QoS receivers.", + 11006: "QoS senders.", + 11007: "No QoS senders.", + 11008: "QoS no receivers.", + 11009: "QoS request confirmed.", + 11010: "QoS admission error.", + 11011: "QoS policy failure.", + 11012: "QoS bad style.", + 11013: "QoS bad object.", + 11014: "QoS traffic control error.", + 11015: "QoS generic error.", + 11016: "QoS service type error.", + 11017: "QoS flowspec error.", + 11018: "Invalid QoS provider buffer.", + 11019: "Invalid QoS filter style.", + 11020: "Invalid QoS filter style.", + 11021: "Incorrect QoS filter count.", + 11022: "Invalid QoS object length.", + 11023: "Incorrect QoS flow count.", + 11024: "Unrecognized QoS object.", + 11025: "Invalid QoS policy object.", + 11026: "Invalid QoS flow descriptor.", + 11027: "Invalid QoS provider-specific flowspec.", + 11028: "Invalid QoS provider-specific filterspec.", + 11029: "Invalid QoS shape discard mode object.", + 11030: "Invalid QoS shaping rate object.", + 11031: "Reserved policy QoS element type." + } + __all__.append("errorTab") + + +class _GiveupOnSendfile(Exception): pass + + +class socket(_socket.socket): + + """A subclass of _socket.socket adding the makefile() method.""" + + __slots__ = ["__weakref__", "_io_refs", "_closed"] + + def __init__(self, family=-1, type=-1, proto=-1, fileno=None): + # For user code address family and type values are IntEnum members, but + # for the underlying _socket.socket they're just integers. The + # constructor of _socket.socket converts the given argument to an + # integer automatically. + if fileno is None: + if family == -1: + family = AF_INET + if type == -1: + type = SOCK_STREAM + if proto == -1: + proto = 0 + _socket.socket.__init__(self, family, type, proto, fileno) + self._io_refs = 0 + self._closed = False + + def __enter__(self): + return self + + def __exit__(self, *args): + if not self._closed: + self.close() + + def __repr__(self): + """Wrap __repr__() to reveal the real class name and socket + address(es). + """ + closed = getattr(self, '_closed', False) + s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \ + % (self.__class__.__module__, + self.__class__.__qualname__, + " [closed]" if closed else "", + self.fileno(), + self.family, + self.type, + self.proto) + if not closed: + # getsockname and getpeername may not be available on WASI. + try: + laddr = self.getsockname() + if laddr: + s += ", laddr=%s" % str(laddr) + except (error, AttributeError): + pass + try: + raddr = self.getpeername() + if raddr: + s += ", raddr=%s" % str(raddr) + except (error, AttributeError): + pass + s += '>' + return s + + def __getstate__(self): + raise TypeError(f"cannot pickle {self.__class__.__name__!r} object") + + def dup(self): + """dup() -> socket object + + Duplicate the socket. Return a new socket object connected to the same + system resource. The new socket is non-inheritable. + """ + fd = dup(self.fileno()) + sock = self.__class__(self.family, self.type, self.proto, fileno=fd) + sock.settimeout(self.gettimeout()) + return sock + + def accept(self): + """accept() -> (socket object, address info) + + Wait for an incoming connection. Return a new socket + representing the connection, and the address of the client. + For IP sockets, the address info is a pair (hostaddr, port). + """ + fd, addr = self._accept() + sock = socket(self.family, self.type, self.proto, fileno=fd) + # Issue #7995: if no default timeout is set and the listening + # socket had a (non-zero) timeout, force the new socket in blocking + # mode to override platform-specific socket flags inheritance. + if getdefaulttimeout() is None and self.gettimeout(): + sock.setblocking(True) + return sock, addr + + def makefile(self, mode="r", buffering=None, *, + encoding=None, errors=None, newline=None): + """makefile(...) -> an I/O stream connected to the socket + + The arguments are as for io.open() after the filename, except the only + supported mode values are 'r' (default), 'w', 'b', or a combination of + those. + """ + # XXX refactor to share code? + if not set(mode) <= {"r", "w", "b"}: + raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,)) + writing = "w" in mode + reading = "r" in mode or not writing + assert reading or writing + binary = "b" in mode + rawmode = "" + if reading: + rawmode += "r" + if writing: + rawmode += "w" + raw = SocketIO(self, rawmode) + self._io_refs += 1 + if buffering is None: + buffering = -1 + if buffering < 0: + buffering = io.DEFAULT_BUFFER_SIZE + if buffering == 0: + if not binary: + raise ValueError("unbuffered streams must be binary") + return raw + if reading and writing: + buffer = io.BufferedRWPair(raw, raw, buffering) + elif reading: + buffer = io.BufferedReader(raw, buffering) + else: + assert writing + buffer = io.BufferedWriter(raw, buffering) + if binary: + return buffer + encoding = io.text_encoding(encoding) + text = io.TextIOWrapper(buffer, encoding, errors, newline) + text.mode = mode + return text + + if hasattr(os, 'sendfile'): + + def _sendfile_use_sendfile(self, file, offset=0, count=None): + # Lazy import to improve module import time + import selectors + + self._check_sendfile_params(file, offset, count) + sockno = self.fileno() + try: + fileno = file.fileno() + except (AttributeError, io.UnsupportedOperation) as err: + raise _GiveupOnSendfile(err) # not a regular file + try: + fsize = os.fstat(fileno).st_size + except OSError as err: + raise _GiveupOnSendfile(err) # not a regular file + if not fsize: + return 0 # empty file + # Truncate to 1GiB to avoid OverflowError, see bpo-38319. + blocksize = min(count or fsize, 2 ** 30) + timeout = self.gettimeout() + if timeout == 0: + raise ValueError("non-blocking sockets are not supported") + # poll/select have the advantage of not requiring any + # extra file descriptor, contrarily to epoll/kqueue + # (also, they require a single syscall). + if hasattr(selectors, 'PollSelector'): + selector = selectors.PollSelector() + else: + selector = selectors.SelectSelector() + selector.register(sockno, selectors.EVENT_WRITE) + + total_sent = 0 + # localize variable access to minimize overhead + selector_select = selector.select + os_sendfile = os.sendfile + try: + while True: + if timeout and not selector_select(timeout): + raise TimeoutError('timed out') + if count: + blocksize = min(count - total_sent, blocksize) + if blocksize <= 0: + break + try: + sent = os_sendfile(sockno, fileno, offset, blocksize) + except BlockingIOError: + if not timeout: + # Block until the socket is ready to send some + # data; avoids hogging CPU resources. + selector_select() + continue + except OSError as err: + if total_sent == 0: + # We can get here for different reasons, the main + # one being 'file' is not a regular mmap(2)-like + # file, in which case we'll fall back on using + # plain send(). + raise _GiveupOnSendfile(err) + raise err from None + else: + if sent == 0: + break # EOF + offset += sent + total_sent += sent + return total_sent + finally: + if total_sent > 0 and hasattr(file, 'seek'): + file.seek(offset) + else: + def _sendfile_use_sendfile(self, file, offset=0, count=None): + raise _GiveupOnSendfile( + "os.sendfile() not available on this platform") + + def _sendfile_use_send(self, file, offset=0, count=None): + self._check_sendfile_params(file, offset, count) + if self.gettimeout() == 0: + raise ValueError("non-blocking sockets are not supported") + if offset: + file.seek(offset) + blocksize = min(count, 8192) if count else 8192 + total_sent = 0 + # localize variable access to minimize overhead + file_read = file.read + sock_send = self.send + try: + while True: + if count: + blocksize = min(count - total_sent, blocksize) + if blocksize <= 0: + break + data = memoryview(file_read(blocksize)) + if not data: + break # EOF + while True: + try: + sent = sock_send(data) + except BlockingIOError: + continue + else: + total_sent += sent + if sent < len(data): + data = data[sent:] + else: + break + return total_sent + finally: + if total_sent > 0 and hasattr(file, 'seek'): + file.seek(offset + total_sent) + + def _check_sendfile_params(self, file, offset, count): + if 'b' not in getattr(file, 'mode', 'b'): + raise ValueError("file should be opened in binary mode") + if not self.type & SOCK_STREAM: + raise ValueError("only SOCK_STREAM type sockets are supported") + if count is not None: + if not isinstance(count, int): + raise TypeError( + "count must be a positive integer (got {!r})".format(count)) + if count <= 0: + raise ValueError( + "count must be a positive integer (got {!r})".format(count)) + + def sendfile(self, file, offset=0, count=None): + """sendfile(file[, offset[, count]]) -> sent + + Send a file until EOF is reached by using high-performance + os.sendfile() and return the total number of bytes which + were sent. + *file* must be a regular file object opened in binary mode. + If os.sendfile() is not available (e.g. Windows) or file is + not a regular file socket.send() will be used instead. + *offset* tells from where to start reading the file. + If specified, *count* is the total number of bytes to transmit + as opposed to sending the file until EOF is reached. + File position is updated on return or also in case of error in + which case file.tell() can be used to figure out the number of + bytes which were sent. + The socket must be of SOCK_STREAM type. + Non-blocking sockets are not supported. + """ + try: + return self._sendfile_use_sendfile(file, offset, count) + except _GiveupOnSendfile: + return self._sendfile_use_send(file, offset, count) + + def _decref_socketios(self): + if self._io_refs > 0: + self._io_refs -= 1 + if self._closed: + self.close() + + def _real_close(self, _ss=_socket.socket): + # This function should not reference any globals. See issue #808164. + _ss.close(self) + + def close(self): + # This function should not reference any globals. See issue #808164. + self._closed = True + if self._io_refs <= 0: + self._real_close() + + def detach(self): + """detach() -> file descriptor + + Close the socket object without closing the underlying file descriptor. + The object cannot be used after this call, but the file descriptor + can be reused for other purposes. The file descriptor is returned. + """ + self._closed = True + return super().detach() + + @property + def family(self): + """Read-only access to the address family for this socket. + """ + return _intenum_converter(super().family, AddressFamily) + + @property + def type(self): + """Read-only access to the socket type. + """ + return _intenum_converter(super().type, SocketKind) + + if os.name == 'nt': + def get_inheritable(self): + return os.get_handle_inheritable(self.fileno()) + def set_inheritable(self, inheritable): + os.set_handle_inheritable(self.fileno(), inheritable) + else: + def get_inheritable(self): + return os.get_inheritable(self.fileno()) + def set_inheritable(self, inheritable): + os.set_inheritable(self.fileno(), inheritable) + get_inheritable.__doc__ = "Get the inheritable flag of the socket" + set_inheritable.__doc__ = "Set the inheritable flag of the socket" + +def fromfd(fd, family, type, proto=0): + """ fromfd(fd, family, type[, proto]) -> socket object + + Create a socket object from a duplicate of the given file + descriptor. The remaining arguments are the same as for socket(). + """ + nfd = dup(fd) + return socket(family, type, proto, nfd) + +if hasattr(_socket.socket, "sendmsg"): + def send_fds(sock, buffers, fds, flags=0, address=None): + """ send_fds(sock, buffers, fds[, flags[, address]]) -> integer + + Send the list of file descriptors fds over an AF_UNIX socket. + """ + import array + + return sock.sendmsg(buffers, [(_socket.SOL_SOCKET, + _socket.SCM_RIGHTS, array.array("i", fds))]) + __all__.append("send_fds") + +if hasattr(_socket.socket, "recvmsg"): + def recv_fds(sock, bufsize, maxfds, flags=0): + """ recv_fds(sock, bufsize, maxfds[, flags]) -> (data, list of file + descriptors, msg_flags, address) + + Receive up to maxfds file descriptors returning the message + data and a list containing the descriptors. + """ + import array + + # Array of ints + fds = array.array("i") + msg, ancdata, flags, addr = sock.recvmsg(bufsize, + _socket.CMSG_LEN(maxfds * fds.itemsize)) + for cmsg_level, cmsg_type, cmsg_data in ancdata: + if (cmsg_level == _socket.SOL_SOCKET and cmsg_type == _socket.SCM_RIGHTS): + fds.frombytes(cmsg_data[: + len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) + + return msg, list(fds), flags, addr + __all__.append("recv_fds") + +if hasattr(_socket.socket, "share"): + def fromshare(info): + """ fromshare(info) -> socket object + + Create a socket object from the bytes object returned by + socket.share(pid). + """ + return socket(0, 0, 0, info) + __all__.append("fromshare") + +# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain. +# This is used if _socket doesn't natively provide socketpair. It's +# always defined so that it can be patched in for testing purposes. +def _fallback_socketpair(family=AF_INET, type=SOCK_STREAM, proto=0): + if family == AF_INET: + host = _LOCALHOST + elif family == AF_INET6: + host = _LOCALHOST_V6 + else: + raise ValueError("Only AF_INET and AF_INET6 socket address families " + "are supported") + if type != SOCK_STREAM: + raise ValueError("Only SOCK_STREAM socket type is supported") + if proto != 0: + raise ValueError("Only protocol zero is supported") + + # We create a connected TCP socket. Note the trick with + # setblocking(False) that prevents us from having to create a thread. + lsock = socket(family, type, proto) + try: + lsock.bind((host, 0)) + lsock.listen() + # On IPv6, ignore flow_info and scope_id + addr, port = lsock.getsockname()[:2] + csock = socket(family, type, proto) + try: + csock.setblocking(False) + try: + csock.connect((addr, port)) + except (BlockingIOError, InterruptedError): + pass + csock.setblocking(True) + ssock, _ = lsock.accept() + except: + csock.close() + raise + finally: + lsock.close() + + # Authenticating avoids using a connection from something else + # able to connect to {host}:{port} instead of us. + # We expect only AF_INET and AF_INET6 families. + try: + if ( + ssock.getsockname() != csock.getpeername() + or csock.getsockname() != ssock.getpeername() + ): + raise ConnectionError("Unexpected peer connection") + except: + # getsockname() and getpeername() can fail + # if either socket isn't connected. + ssock.close() + csock.close() + raise + + return (ssock, csock) + +if hasattr(_socket, "socketpair"): + def socketpair(family=None, type=SOCK_STREAM, proto=0): + if family is None: + try: + family = AF_UNIX + except NameError: + family = AF_INET + a, b = _socket.socketpair(family, type, proto) + a = socket(family, type, proto, a.detach()) + b = socket(family, type, proto, b.detach()) + return a, b + +else: + socketpair = _fallback_socketpair + __all__.append("socketpair") + +socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object) +Create a pair of socket objects from the sockets returned by the platform +socketpair() function. +The arguments are the same as for socket() except the default family is AF_UNIX +if defined on the platform; otherwise, the default is AF_INET. +""" + +_blocking_errnos = { EAGAIN, EWOULDBLOCK } + +class SocketIO(io.RawIOBase): + + """Raw I/O implementation for stream sockets. + + This class supports the makefile() method on sockets. It provides + the raw I/O interface on top of a socket object. + """ + + # One might wonder why not let FileIO do the job instead. There are two + # main reasons why FileIO is not adapted: + # - it wouldn't work under Windows (where you can't used read() and + # write() on a socket handle) + # - it wouldn't work with socket timeouts (FileIO would ignore the + # timeout and consider the socket non-blocking) + + # XXX More docs + + def __init__(self, sock, mode): + if mode not in ("r", "w", "rw", "rb", "wb", "rwb"): + raise ValueError("invalid mode: %r" % mode) + io.RawIOBase.__init__(self) + self._sock = sock + if "b" not in mode: + mode += "b" + self._mode = mode + self._reading = "r" in mode + self._writing = "w" in mode + self._timeout_occurred = False + + def readinto(self, b): + """Read up to len(b) bytes into the writable buffer *b* and return + the number of bytes read. If the socket is non-blocking and no bytes + are available, None is returned. + + If *b* is non-empty, a 0 return value indicates that the connection + was shutdown at the other end. + """ + self._checkClosed() + self._checkReadable() + if self._timeout_occurred: + raise OSError("cannot read from timed out object") + try: + return self._sock.recv_into(b) + except timeout: + self._timeout_occurred = True + raise + except error as e: + if e.errno in _blocking_errnos: + return None + raise + + def write(self, b): + """Write the given bytes or bytearray object *b* to the socket + and return the number of bytes written. This can be less than + len(b) if not all data could be written. If the socket is + non-blocking and no bytes could be written None is returned. + """ + self._checkClosed() + self._checkWritable() + try: + return self._sock.send(b) + except error as e: + # XXX what about EINTR? + if e.errno in _blocking_errnos: + return None + raise + + def readable(self): + """True if the SocketIO is open for reading. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return self._reading + + def writable(self): + """True if the SocketIO is open for writing. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return self._writing + + def seekable(self): + """True if the SocketIO is open for seeking. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return super().seekable() + + def fileno(self): + """Return the file descriptor of the underlying socket. + """ + self._checkClosed() + return self._sock.fileno() + + @property + def name(self): + if not self.closed: + return self.fileno() + else: + return -1 + + @property + def mode(self): + return self._mode + + def close(self): + """Close the SocketIO object. This doesn't close the underlying + socket, except if all references to it have disappeared. + """ + if self.closed: + return + io.RawIOBase.close(self) + self._sock._decref_socketios() + self._sock = None + + +def getfqdn(name=''): + """Get fully qualified domain name from name. + + An empty argument is interpreted as meaning the local host. + + First the hostname returned by gethostbyaddr() is checked, then + possibly existing aliases. In case no FQDN is available and `name` + was given, it is returned unchanged. If `name` was empty, '0.0.0.0' or '::', + hostname from gethostname() is returned. + """ + name = name.strip() + if not name or name in ('0.0.0.0', '::'): + name = gethostname() + try: + hostname, aliases, ipaddrs = gethostbyaddr(name) + except error: + pass + else: + aliases.insert(0, hostname) + for name in aliases: + if '.' in name: + break + else: + name = hostname + return name + + +_GLOBAL_DEFAULT_TIMEOUT = object() + +def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, + source_address=None, *, all_errors=False): + """Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. If *source_address* is set it must be a tuple of (host, port) + for the socket to bind as a source address before making the connection. + A host of '' or port 0 tells the OS to use the default. When a connection + cannot be created, raises the last error if *all_errors* is False, + and an ExceptionGroup of all errors if *all_errors* is True. + """ + + host, port = address + exceptions = [] + for res in getaddrinfo(host, port, 0, SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket(af, socktype, proto) + if timeout is not _GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if source_address: + sock.bind(source_address) + sock.connect(sa) + # Break explicitly a reference cycle + exceptions.clear() + return sock + + except error as exc: + if not all_errors: + exceptions.clear() # raise only the last error + exceptions.append(exc) + if sock is not None: + sock.close() + + if len(exceptions): + try: + if not all_errors: + raise exceptions[0] + raise ExceptionGroup("create_connection failed", exceptions) + finally: + # Break explicitly a reference cycle + exceptions.clear() + else: + raise error("getaddrinfo returns an empty list") + + +def has_dualstack_ipv6(): + """Return True if the platform supports creating a SOCK_STREAM socket + which can handle both AF_INET and AF_INET6 (IPv4 / IPv6) connections. + """ + if not has_ipv6 \ + or not hasattr(_socket, 'IPPROTO_IPV6') \ + or not hasattr(_socket, 'IPV6_V6ONLY'): + return False + try: + with socket(AF_INET6, SOCK_STREAM) as sock: + sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0) + return True + except error: + return False + + +def create_server(address, *, family=AF_INET, backlog=None, reuse_port=False, + dualstack_ipv6=False): + """Convenience function which creates a SOCK_STREAM type socket + bound to *address* (a 2-tuple (host, port)) and return the socket + object. + + *family* should be either AF_INET or AF_INET6. + *backlog* is the queue size passed to socket.listen(). + *reuse_port* dictates whether to use the SO_REUSEPORT socket option. + *dualstack_ipv6*: if true and the platform supports it, it will + create an AF_INET6 socket able to accept both IPv4 or IPv6 + connections. When false it will explicitly disable this option on + platforms that enable it by default (e.g. Linux). + + >>> with create_server(('', 8000)) as server: + ... while True: + ... conn, addr = server.accept() + ... # handle new connection + """ + if reuse_port and not hasattr(_socket, "SO_REUSEPORT"): + raise ValueError("SO_REUSEPORT not supported on this platform") + if dualstack_ipv6: + if not has_dualstack_ipv6(): + raise ValueError("dualstack_ipv6 not supported on this platform") + if family != AF_INET6: + raise ValueError("dualstack_ipv6 requires AF_INET6 family") + sock = socket(family, SOCK_STREAM) + try: + # Note about Windows. We don't set SO_REUSEADDR because: + # 1) It's unnecessary: bind() will succeed even in case of a + # previous closed socket on the same address and still in + # TIME_WAIT state. + # 2) If set, another socket is free to bind() on the same + # address, effectively preventing this one from accepting + # connections. Also, it may set the process in a state where + # it'll no longer respond to any signals or graceful kills. + # See: https://learn.microsoft.com/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse + if os.name not in ('nt', 'cygwin') and \ + hasattr(_socket, 'SO_REUSEADDR'): + try: + sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) + except error: + # Fail later on bind(), for platforms which may not + # support this option. + pass + # Since Linux 6.12.9, SO_REUSEPORT is not allowed + # on other address families than AF_INET/AF_INET6. + if reuse_port and family in (AF_INET, AF_INET6): + sock.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1) + if has_ipv6 and family == AF_INET6: + if dualstack_ipv6: + sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0) + elif hasattr(_socket, "IPV6_V6ONLY") and \ + hasattr(_socket, "IPPROTO_IPV6"): + sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1) + try: + sock.bind(address) + except error as err: + msg = '%s (while attempting to bind on address %r)' % \ + (err.strerror, address) + raise error(err.errno, msg) from None + if backlog is None: + sock.listen() + else: + sock.listen(backlog) + return sock + except error: + sock.close() + raise + + +def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): + """Resolve host and port into list of address info entries. + + Translate the host/port argument into a sequence of 5-tuples that contain + all the necessary arguments for creating a socket connected to that service. + host is a domain name, a string representation of an IPv4/v6 address or + None. port is a string service name such as 'http', a numeric port number or + None. By passing None as the value of host and port, you can pass NULL to + the underlying C API. + + The family, type and proto arguments can be optionally specified in order to + narrow the list of addresses returned. Passing zero as a value for each of + these arguments selects the full range of results. + """ + # We override this function since we want to translate the numeric family + # and socket type values to enum constants. + addrlist = [] + for res in _socket.getaddrinfo(host, port, family, type, proto, flags): + af, socktype, proto, canonname, sa = res + addrlist.append((_intenum_converter(af, AddressFamily), + _intenum_converter(socktype, SocketKind), + proto, canonname, sa)) + return addrlist diff --git a/Python313_13_x64_Template/Lib/socketserver.py b/Python314_4_x64_Template/Lib/socketserver.py similarity index 100% rename from Python313_13_x64_Template/Lib/socketserver.py rename to Python314_4_x64_Template/Lib/socketserver.py diff --git a/Python314_4_x64_Template/Lib/sqlite3/__init__.py b/Python314_4_x64_Template/Lib/sqlite3/__init__.py new file mode 100644 index 00000000..ed727fae --- /dev/null +++ b/Python314_4_x64_Template/Lib/sqlite3/__init__.py @@ -0,0 +1,57 @@ +# pysqlite2/__init__.py: the pysqlite2 package. +# +# Copyright (C) 2005 Gerhard Häring +# +# This file is part of pysqlite. +# +# This software is provided 'as-is', without any express or implied +# warranty. In no event will the authors be held liable for any damages +# arising from the use of this software. +# +# Permission is granted to anyone to use this software for any purpose, +# including commercial applications, and to alter it and redistribute it +# freely, subject to the following restrictions: +# +# 1. The origin of this software must not be misrepresented; you must not +# claim that you wrote the original software. If you use this software +# in a product, an acknowledgment in the product documentation would be +# appreciated but is not required. +# 2. Altered source versions must be plainly marked as such, and must not be +# misrepresented as being the original software. +# 3. This notice may not be removed or altered from any source distribution. + +""" +The sqlite3 extension module provides a DB-API 2.0 (PEP 249) compliant +interface to the SQLite library, and requires SQLite 3.15.2 or newer. + +To use the module, start by creating a database Connection object: + + import sqlite3 + cx = sqlite3.connect("test.db") # test.db will be created or opened + +The special path name ":memory:" can be provided to connect to a transient +in-memory database: + + cx = sqlite3.connect(":memory:") # connect to a database in RAM + +Once a connection has been established, create a Cursor object and call +its execute() method to perform SQL queries: + + cu = cx.cursor() + + # create a table + cu.execute("create table lang(name, first_appeared)") + + # insert values into a table + cu.execute("insert into lang values (?, ?)", ("C", 1972)) + + # execute a query and iterate over the result + for row in cu.execute("select * from lang"): + print(row) + + cx.close() + +The sqlite3 module is written by Gerhard Häring . +""" + +from sqlite3.dbapi2 import * diff --git a/Python314_4_x64_Template/Lib/sqlite3/__main__.py b/Python314_4_x64_Template/Lib/sqlite3/__main__.py new file mode 100644 index 00000000..4ccf292d --- /dev/null +++ b/Python314_4_x64_Template/Lib/sqlite3/__main__.py @@ -0,0 +1,139 @@ +"""A simple SQLite CLI for the sqlite3 module. + +Apart from using 'argparse' for the command-line interface, +this module implements the REPL as a thin wrapper around +the InteractiveConsole class from the 'code' stdlib module. +""" +import sqlite3 +import sys + +from argparse import ArgumentParser +from code import InteractiveConsole +from textwrap import dedent + + +def execute(c, sql, suppress_errors=True): + """Helper that wraps execution of SQL code. + + This is used both by the REPL and by direct execution from the CLI. + + 'c' may be a cursor or a connection. + 'sql' is the SQL string to execute. + """ + + try: + for row in c.execute(sql): + print(row) + except sqlite3.Error as e: + tp = type(e).__name__ + try: + print(f"{tp} ({e.sqlite_errorname}): {e}", file=sys.stderr) + except AttributeError: + print(f"{tp}: {e}", file=sys.stderr) + if not suppress_errors: + sys.exit(1) + + +class SqliteInteractiveConsole(InteractiveConsole): + """A simple SQLite REPL.""" + + def __init__(self, connection): + super().__init__() + self._con = connection + self._cur = connection.cursor() + + def runsource(self, source, filename="", symbol="single"): + """Override runsource, the core of the InteractiveConsole REPL. + + Return True if more input is needed; buffering is done automatically. + Return False if input is a complete statement ready for execution. + """ + if not source or source.isspace(): + return False + if source[0] == ".": + match source[1:].strip(): + case "version": + print(f"{sqlite3.sqlite_version}") + case "help": + print("Enter SQL code and press enter.") + case "quit": + sys.exit(0) + case "": + pass + case _ as unknown: + self.write("Error: unknown command or invalid arguments:" + f' "{unknown}".\n') + else: + if not sqlite3.complete_statement(source): + return True + execute(self._cur, source) + return False + + +def main(*args): + parser = ArgumentParser( + description="Python sqlite3 CLI", + color=True, + ) + parser.add_argument( + "filename", type=str, default=":memory:", nargs="?", + help=( + "SQLite database to open (defaults to ':memory:'). " + "A new database is created if the file does not previously exist." + ), + ) + parser.add_argument( + "sql", type=str, nargs="?", + help=( + "An SQL query to execute. " + "Any returned rows are printed to stdout." + ), + ) + parser.add_argument( + "-v", "--version", action="version", + version=f"SQLite version {sqlite3.sqlite_version}", + help="Print underlying SQLite library version", + ) + args = parser.parse_args(*args) + + if args.filename == ":memory:": + db_name = "a transient in-memory database" + else: + db_name = repr(args.filename) + + # Prepare REPL banner and prompts. + if sys.platform == "win32" and "idlelib.run" not in sys.modules: + eofkey = "CTRL-Z" + else: + eofkey = "CTRL-D" + banner = dedent(f""" + sqlite3 shell, running on SQLite version {sqlite3.sqlite_version} + Connected to {db_name} + + Each command will be run using execute() on the cursor. + Type ".help" for more information; type ".quit" or {eofkey} to quit. + """).strip() + sys.ps1 = "sqlite> " + sys.ps2 = " ... " + + con = sqlite3.connect(args.filename, isolation_level=None) + try: + if args.sql: + # SQL statement provided on the command-line; execute it directly. + execute(con, args.sql, suppress_errors=False) + else: + # No SQL provided; start the REPL. + console = SqliteInteractiveConsole(con) + try: + import readline # noqa: F401 + except ImportError: + pass + console.interact(banner, exitmsg="") + finally: + con.close() + + sys.exit(0) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/Python314_4_x64_Template/Lib/sqlite3/dbapi2.py b/Python314_4_x64_Template/Lib/sqlite3/dbapi2.py new file mode 100644 index 00000000..03157605 --- /dev/null +++ b/Python314_4_x64_Template/Lib/sqlite3/dbapi2.py @@ -0,0 +1,96 @@ +# pysqlite2/dbapi2.py: the DB-API 2.0 interface +# +# Copyright (C) 2004-2005 Gerhard Häring +# +# This file is part of pysqlite. +# +# This software is provided 'as-is', without any express or implied +# warranty. In no event will the authors be held liable for any damages +# arising from the use of this software. +# +# Permission is granted to anyone to use this software for any purpose, +# including commercial applications, and to alter it and redistribute it +# freely, subject to the following restrictions: +# +# 1. The origin of this software must not be misrepresented; you must not +# claim that you wrote the original software. If you use this software +# in a product, an acknowledgment in the product documentation would be +# appreciated but is not required. +# 2. Altered source versions must be plainly marked as such, and must not be +# misrepresented as being the original software. +# 3. This notice may not be removed or altered from any source distribution. + +import datetime +import time +import collections.abc + +from _sqlite3 import * + +paramstyle = "qmark" + +apilevel = "2.0" + +Date = datetime.date + +Time = datetime.time + +Timestamp = datetime.datetime + +def DateFromTicks(ticks): + return Date(*time.localtime(ticks)[:3]) + +def TimeFromTicks(ticks): + return Time(*time.localtime(ticks)[3:6]) + +def TimestampFromTicks(ticks): + return Timestamp(*time.localtime(ticks)[:6]) + + +sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")]) + +Binary = memoryview +collections.abc.Sequence.register(Row) + +def register_adapters_and_converters(): + from warnings import warn + + msg = ("The default {what} is deprecated as of Python 3.12; " + "see the sqlite3 documentation for suggested replacement recipes") + + def adapt_date(val): + warn(msg.format(what="date adapter"), DeprecationWarning, stacklevel=2) + return val.isoformat() + + def adapt_datetime(val): + warn(msg.format(what="datetime adapter"), DeprecationWarning, stacklevel=2) + return val.isoformat(" ") + + def convert_date(val): + warn(msg.format(what="date converter"), DeprecationWarning, stacklevel=2) + return datetime.date(*map(int, val.split(b"-"))) + + def convert_timestamp(val): + warn(msg.format(what="timestamp converter"), DeprecationWarning, stacklevel=2) + datepart, timepart = val.split(b" ") + year, month, day = map(int, datepart.split(b"-")) + timepart_full = timepart.split(b".") + hours, minutes, seconds = map(int, timepart_full[0].split(b":")) + if len(timepart_full) == 2: + microseconds = int('{:0<6.6}'.format(timepart_full[1].decode())) + else: + microseconds = 0 + + val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds) + return val + + + register_adapter(datetime.date, adapt_date) + register_adapter(datetime.datetime, adapt_datetime) + register_converter("date", convert_date) + register_converter("timestamp", convert_timestamp) + +register_adapters_and_converters() + +# Clean up namespace + +del(register_adapters_and_converters) diff --git a/Python313_13_x64_Template/Lib/sqlite3/dump.py b/Python314_4_x64_Template/Lib/sqlite3/dump.py similarity index 100% rename from Python313_13_x64_Template/Lib/sqlite3/dump.py rename to Python314_4_x64_Template/Lib/sqlite3/dump.py diff --git a/Python313_13_x64_Template/Lib/sre_compile.py b/Python314_4_x64_Template/Lib/sre_compile.py similarity index 100% rename from Python313_13_x64_Template/Lib/sre_compile.py rename to Python314_4_x64_Template/Lib/sre_compile.py diff --git a/Python313_13_x64_Template/Lib/sre_constants.py b/Python314_4_x64_Template/Lib/sre_constants.py similarity index 100% rename from Python313_13_x64_Template/Lib/sre_constants.py rename to Python314_4_x64_Template/Lib/sre_constants.py diff --git a/Python313_13_x64_Template/Lib/sre_parse.py b/Python314_4_x64_Template/Lib/sre_parse.py similarity index 100% rename from Python313_13_x64_Template/Lib/sre_parse.py rename to Python314_4_x64_Template/Lib/sre_parse.py diff --git a/Python314_4_x64_Template/Lib/ssl.py b/Python314_4_x64_Template/Lib/ssl.py new file mode 100644 index 00000000..8889aff9 --- /dev/null +++ b/Python314_4_x64_Template/Lib/ssl.py @@ -0,0 +1,1529 @@ +# Wrapper module for _ssl, providing some additional facilities +# implemented in Python. Written by Bill Janssen. + +"""This module provides some more Pythonic support for SSL. + +Object types: + + SSLSocket -- subtype of socket.socket which does SSL over the socket + +Exceptions: + + SSLError -- exception raised for I/O errors + +Functions: + + cert_time_to_seconds -- convert time string used for certificate + notBefore and notAfter functions to integer + seconds past the Epoch (the time values + returned from time.time()) + + get_server_certificate (addr, ssl_version, ca_certs, timeout) -- Retrieve the + certificate from the server at the specified + address and return it as a PEM-encoded string + + +Integer constants: + +SSL_ERROR_ZERO_RETURN +SSL_ERROR_WANT_READ +SSL_ERROR_WANT_WRITE +SSL_ERROR_WANT_X509_LOOKUP +SSL_ERROR_SYSCALL +SSL_ERROR_SSL +SSL_ERROR_WANT_CONNECT + +SSL_ERROR_EOF +SSL_ERROR_INVALID_ERROR_CODE + +The following group define certificate requirements that one side is +allowing/requiring from the other side: + +CERT_NONE - no certificates from the other side are required (or will + be looked at if provided) +CERT_OPTIONAL - certificates are not required, but if provided will be + validated, and if validation fails, the connection will + also fail +CERT_REQUIRED - certificates are required, and will be validated, and + if validation fails, the connection will also fail + +The following constants identify various SSL protocol variants: + +PROTOCOL_SSLv2 +PROTOCOL_SSLv3 +PROTOCOL_SSLv23 +PROTOCOL_TLS +PROTOCOL_TLS_CLIENT +PROTOCOL_TLS_SERVER +PROTOCOL_TLSv1 +PROTOCOL_TLSv1_1 +PROTOCOL_TLSv1_2 + +The following constants identify various SSL alert message descriptions as per +http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-6 + +ALERT_DESCRIPTION_CLOSE_NOTIFY +ALERT_DESCRIPTION_UNEXPECTED_MESSAGE +ALERT_DESCRIPTION_BAD_RECORD_MAC +ALERT_DESCRIPTION_RECORD_OVERFLOW +ALERT_DESCRIPTION_DECOMPRESSION_FAILURE +ALERT_DESCRIPTION_HANDSHAKE_FAILURE +ALERT_DESCRIPTION_BAD_CERTIFICATE +ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE +ALERT_DESCRIPTION_CERTIFICATE_REVOKED +ALERT_DESCRIPTION_CERTIFICATE_EXPIRED +ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN +ALERT_DESCRIPTION_ILLEGAL_PARAMETER +ALERT_DESCRIPTION_UNKNOWN_CA +ALERT_DESCRIPTION_ACCESS_DENIED +ALERT_DESCRIPTION_DECODE_ERROR +ALERT_DESCRIPTION_DECRYPT_ERROR +ALERT_DESCRIPTION_PROTOCOL_VERSION +ALERT_DESCRIPTION_INSUFFICIENT_SECURITY +ALERT_DESCRIPTION_INTERNAL_ERROR +ALERT_DESCRIPTION_USER_CANCELLED +ALERT_DESCRIPTION_NO_RENEGOTIATION +ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION +ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +ALERT_DESCRIPTION_UNRECOGNIZED_NAME +ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE +ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE +ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY +""" + +import sys +import os +from collections import namedtuple +from enum import Enum as _Enum, IntEnum as _IntEnum, IntFlag as _IntFlag +from enum import _simple_enum + +import _ssl # if we can't import it, let the error propagate + +from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION +from _ssl import _SSLContext, MemoryBIO, SSLSession +from _ssl import ( + SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError, + SSLSyscallError, SSLEOFError, SSLCertVerificationError + ) +from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj +from _ssl import RAND_status, RAND_add, RAND_bytes +try: + from _ssl import RAND_egd +except ImportError: + # RAND_egd is not supported on some platforms + pass + + +from _ssl import ( + HAS_SNI, HAS_ECDH, HAS_NPN, HAS_ALPN, HAS_SSLv2, HAS_SSLv3, HAS_TLSv1, + HAS_TLSv1_1, HAS_TLSv1_2, HAS_TLSv1_3, HAS_PSK, HAS_PHA +) +from _ssl import _DEFAULT_CIPHERS, _OPENSSL_API_VERSION + +_IntEnum._convert_( + '_SSLMethod', __name__, + lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23', + source=_ssl) + +_IntFlag._convert_( + 'Options', __name__, + lambda name: name.startswith('OP_'), + source=_ssl) + +_IntEnum._convert_( + 'AlertDescription', __name__, + lambda name: name.startswith('ALERT_DESCRIPTION_'), + source=_ssl) + +_IntEnum._convert_( + 'SSLErrorNumber', __name__, + lambda name: name.startswith('SSL_ERROR_'), + source=_ssl) + +_IntFlag._convert_( + 'VerifyFlags', __name__, + lambda name: name.startswith('VERIFY_'), + source=_ssl) + +_IntEnum._convert_( + 'VerifyMode', __name__, + lambda name: name.startswith('CERT_'), + source=_ssl) + +PROTOCOL_SSLv23 = _SSLMethod.PROTOCOL_SSLv23 = _SSLMethod.PROTOCOL_TLS +_PROTOCOL_NAMES = {value: name for name, value in _SSLMethod.__members__.items()} + +_SSLv2_IF_EXISTS = getattr(_SSLMethod, 'PROTOCOL_SSLv2', None) + + +@_simple_enum(_IntEnum) +class TLSVersion: + MINIMUM_SUPPORTED = _ssl.PROTO_MINIMUM_SUPPORTED + SSLv3 = _ssl.PROTO_SSLv3 + TLSv1 = _ssl.PROTO_TLSv1 + TLSv1_1 = _ssl.PROTO_TLSv1_1 + TLSv1_2 = _ssl.PROTO_TLSv1_2 + TLSv1_3 = _ssl.PROTO_TLSv1_3 + MAXIMUM_SUPPORTED = _ssl.PROTO_MAXIMUM_SUPPORTED + + +@_simple_enum(_IntEnum) +class _TLSContentType: + """Content types (record layer) + + See RFC 8446, section B.1 + """ + CHANGE_CIPHER_SPEC = 20 + ALERT = 21 + HANDSHAKE = 22 + APPLICATION_DATA = 23 + # pseudo content types + HEADER = 0x100 + INNER_CONTENT_TYPE = 0x101 + + +@_simple_enum(_IntEnum) +class _TLSAlertType: + """Alert types for TLSContentType.ALERT messages + + See RFC 8446, section B.2 + """ + CLOSE_NOTIFY = 0 + UNEXPECTED_MESSAGE = 10 + BAD_RECORD_MAC = 20 + DECRYPTION_FAILED = 21 + RECORD_OVERFLOW = 22 + DECOMPRESSION_FAILURE = 30 + HANDSHAKE_FAILURE = 40 + NO_CERTIFICATE = 41 + BAD_CERTIFICATE = 42 + UNSUPPORTED_CERTIFICATE = 43 + CERTIFICATE_REVOKED = 44 + CERTIFICATE_EXPIRED = 45 + CERTIFICATE_UNKNOWN = 46 + ILLEGAL_PARAMETER = 47 + UNKNOWN_CA = 48 + ACCESS_DENIED = 49 + DECODE_ERROR = 50 + DECRYPT_ERROR = 51 + EXPORT_RESTRICTION = 60 + PROTOCOL_VERSION = 70 + INSUFFICIENT_SECURITY = 71 + INTERNAL_ERROR = 80 + INAPPROPRIATE_FALLBACK = 86 + USER_CANCELED = 90 + NO_RENEGOTIATION = 100 + MISSING_EXTENSION = 109 + UNSUPPORTED_EXTENSION = 110 + CERTIFICATE_UNOBTAINABLE = 111 + UNRECOGNIZED_NAME = 112 + BAD_CERTIFICATE_STATUS_RESPONSE = 113 + BAD_CERTIFICATE_HASH_VALUE = 114 + UNKNOWN_PSK_IDENTITY = 115 + CERTIFICATE_REQUIRED = 116 + NO_APPLICATION_PROTOCOL = 120 + + +@_simple_enum(_IntEnum) +class _TLSMessageType: + """Message types (handshake protocol) + + See RFC 8446, section B.3 + """ + HELLO_REQUEST = 0 + CLIENT_HELLO = 1 + SERVER_HELLO = 2 + HELLO_VERIFY_REQUEST = 3 + NEWSESSION_TICKET = 4 + END_OF_EARLY_DATA = 5 + HELLO_RETRY_REQUEST = 6 + ENCRYPTED_EXTENSIONS = 8 + CERTIFICATE = 11 + SERVER_KEY_EXCHANGE = 12 + CERTIFICATE_REQUEST = 13 + SERVER_DONE = 14 + CERTIFICATE_VERIFY = 15 + CLIENT_KEY_EXCHANGE = 16 + FINISHED = 20 + CERTIFICATE_URL = 21 + CERTIFICATE_STATUS = 22 + SUPPLEMENTAL_DATA = 23 + KEY_UPDATE = 24 + NEXT_PROTO = 67 + MESSAGE_HASH = 254 + CHANGE_CIPHER_SPEC = 0x0101 + + +if sys.platform == "win32": + from _ssl import enum_certificates, enum_crls + +from socket import socket, SOCK_STREAM, create_connection +from socket import SOL_SOCKET, SO_TYPE, _GLOBAL_DEFAULT_TIMEOUT +import socket as _socket +import base64 # for DER-to-PEM translation +import errno +import warnings + + +socket_error = OSError # keep that public name in module namespace + +CHANNEL_BINDING_TYPES = ['tls-unique'] + +HAS_NEVER_CHECK_COMMON_NAME = hasattr(_ssl, 'HOSTFLAG_NEVER_CHECK_SUBJECT') + + +_RESTRICTED_SERVER_CIPHERS = _DEFAULT_CIPHERS + +CertificateError = SSLCertVerificationError + + +def _dnsname_match(dn, hostname): + """Matching according to RFC 6125, section 6.4.3 + + - Hostnames are compared lower-case. + - For IDNA, both dn and hostname must be encoded as IDN A-label (ACE). + - Partial wildcards like 'www*.example.org', multiple wildcards, sole + wildcard or wildcards in labels other then the left-most label are not + supported and a CertificateError is raised. + - A wildcard must match at least one character. + """ + if not dn: + return False + + wildcards = dn.count('*') + # speed up common case w/o wildcards + if not wildcards: + return dn.lower() == hostname.lower() + + if wildcards > 1: + raise CertificateError( + "too many wildcards in certificate DNS name: {!r}.".format(dn)) + + dn_leftmost, sep, dn_remainder = dn.partition('.') + + if '*' in dn_remainder: + # Only match wildcard in leftmost segment. + raise CertificateError( + "wildcard can only be present in the leftmost label: " + "{!r}.".format(dn)) + + if not sep: + # no right side + raise CertificateError( + "sole wildcard without additional labels are not support: " + "{!r}.".format(dn)) + + if dn_leftmost != '*': + # no partial wildcard matching + raise CertificateError( + "partial wildcards in leftmost label are not supported: " + "{!r}.".format(dn)) + + hostname_leftmost, sep, hostname_remainder = hostname.partition('.') + if not hostname_leftmost or not sep: + # wildcard must match at least one char + return False + return dn_remainder.lower() == hostname_remainder.lower() + + +def _inet_paton(ipname): + """Try to convert an IP address to packed binary form + + Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6 + support. + """ + # inet_aton() also accepts strings like '1', '127.1', some also trailing + # data like '127.0.0.1 whatever'. + try: + addr = _socket.inet_aton(ipname) + except OSError: + # not an IPv4 address + pass + else: + if _socket.inet_ntoa(addr) == ipname: + # only accept injective ipnames + return addr + else: + # refuse for short IPv4 notation and additional trailing data + raise ValueError( + "{!r} is not a quad-dotted IPv4 address.".format(ipname) + ) + + try: + return _socket.inet_pton(_socket.AF_INET6, ipname) + except OSError: + raise ValueError("{!r} is neither an IPv4 nor an IP6 " + "address.".format(ipname)) + except AttributeError: + # AF_INET6 not available + pass + + raise ValueError("{!r} is not an IPv4 address.".format(ipname)) + + +def _ipaddress_match(cert_ipaddress, host_ip): + """Exact matching of IP addresses. + + RFC 6125 explicitly doesn't define an algorithm for this + (section 1.7.2 - "Out of Scope"). + """ + # OpenSSL may add a trailing newline to a subjectAltName's IP address, + # commonly with IPv6 addresses. Strip off trailing \n. + ip = _inet_paton(cert_ipaddress.rstrip()) + return ip == host_ip + + +DefaultVerifyPaths = namedtuple("DefaultVerifyPaths", + "cafile capath openssl_cafile_env openssl_cafile openssl_capath_env " + "openssl_capath") + +def get_default_verify_paths(): + """Return paths to default cafile and capath. + """ + parts = _ssl.get_default_verify_paths() + + # environment vars shadow paths + cafile = os.environ.get(parts[0], parts[1]) + capath = os.environ.get(parts[2], parts[3]) + + return DefaultVerifyPaths(cafile if os.path.isfile(cafile) else None, + capath if os.path.isdir(capath) else None, + *parts) + + +class _ASN1Object(namedtuple("_ASN1Object", "nid shortname longname oid")): + """ASN.1 object identifier lookup + """ + __slots__ = () + + def __new__(cls, oid): + return super().__new__(cls, *_txt2obj(oid, name=False)) + + @classmethod + def fromnid(cls, nid): + """Create _ASN1Object from OpenSSL numeric ID + """ + return super().__new__(cls, *_nid2obj(nid)) + + @classmethod + def fromname(cls, name): + """Create _ASN1Object from short name, long name or OID + """ + return super().__new__(cls, *_txt2obj(name, name=True)) + + +class Purpose(_ASN1Object, _Enum): + """SSLContext purpose flags with X509v3 Extended Key Usage objects + """ + SERVER_AUTH = '1.3.6.1.5.5.7.3.1' + CLIENT_AUTH = '1.3.6.1.5.5.7.3.2' + + +class SSLContext(_SSLContext): + """An SSLContext holds various SSL-related configuration options and + data, such as certificates and possibly a private key.""" + _windows_cert_stores = ("CA", "ROOT") + + sslsocket_class = None # SSLSocket is assigned later. + sslobject_class = None # SSLObject is assigned later. + + def __new__(cls, protocol=None, *args, **kwargs): + if protocol is None: + warnings.warn( + "ssl.SSLContext() without protocol argument is deprecated.", + category=DeprecationWarning, + stacklevel=2 + ) + protocol = PROTOCOL_TLS + self = _SSLContext.__new__(cls, protocol) + return self + + def _encode_hostname(self, hostname): + if hostname is None: + return None + elif isinstance(hostname, str): + return hostname.encode('idna').decode('ascii') + else: + return hostname.decode('ascii') + + def wrap_socket(self, sock, server_side=False, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, + server_hostname=None, session=None): + # SSLSocket class handles server_hostname encoding before it calls + # ctx._wrap_socket() + return self.sslsocket_class._create( + sock=sock, + server_side=server_side, + do_handshake_on_connect=do_handshake_on_connect, + suppress_ragged_eofs=suppress_ragged_eofs, + server_hostname=server_hostname, + context=self, + session=session + ) + + def wrap_bio(self, incoming, outgoing, server_side=False, + server_hostname=None, session=None): + # Need to encode server_hostname here because _wrap_bio() can only + # handle ASCII str. + return self.sslobject_class._create( + incoming, outgoing, server_side=server_side, + server_hostname=self._encode_hostname(server_hostname), + session=session, context=self, + ) + + def set_npn_protocols(self, npn_protocols): + warnings.warn( + "ssl NPN is deprecated, use ALPN instead", + DeprecationWarning, + stacklevel=2 + ) + protos = bytearray() + for protocol in npn_protocols: + b = bytes(protocol, 'ascii') + if len(b) == 0 or len(b) > 255: + raise SSLError('NPN protocols must be 1 to 255 in length') + protos.append(len(b)) + protos.extend(b) + + self._set_npn_protocols(protos) + + def set_servername_callback(self, server_name_callback): + if server_name_callback is None: + self.sni_callback = None + else: + if not callable(server_name_callback): + raise TypeError("not a callable object") + + def shim_cb(sslobj, servername, sslctx): + servername = self._encode_hostname(servername) + return server_name_callback(sslobj, servername, sslctx) + + self.sni_callback = shim_cb + + def set_alpn_protocols(self, alpn_protocols): + protos = bytearray() + for protocol in alpn_protocols: + b = bytes(protocol, 'ascii') + if len(b) == 0 or len(b) > 255: + raise SSLError('ALPN protocols must be 1 to 255 in length') + protos.append(len(b)) + protos.extend(b) + + self._set_alpn_protocols(protos) + + def _load_windows_store_certs(self, storename, purpose): + try: + for cert, encoding, trust in enum_certificates(storename): + # CA certs are never PKCS#7 encoded + if encoding == "x509_asn": + if trust is True or purpose.oid in trust: + try: + self.load_verify_locations(cadata=cert) + except SSLError as exc: + warnings.warn(f"Bad certificate in Windows certificate store: {exc!s}") + except PermissionError: + warnings.warn("unable to enumerate Windows certificate store") + + def load_default_certs(self, purpose=Purpose.SERVER_AUTH): + if not isinstance(purpose, _ASN1Object): + raise TypeError(purpose) + if sys.platform == "win32": + for storename in self._windows_cert_stores: + self._load_windows_store_certs(storename, purpose) + self.set_default_verify_paths() + + if hasattr(_SSLContext, 'minimum_version'): + @property + def minimum_version(self): + return TLSVersion(super().minimum_version) + + @minimum_version.setter + def minimum_version(self, value): + if value == TLSVersion.SSLv3: + self.options &= ~Options.OP_NO_SSLv3 + super(SSLContext, SSLContext).minimum_version.__set__(self, value) + + @property + def maximum_version(self): + return TLSVersion(super().maximum_version) + + @maximum_version.setter + def maximum_version(self, value): + super(SSLContext, SSLContext).maximum_version.__set__(self, value) + + @property + def options(self): + return Options(super().options) + + @options.setter + def options(self, value): + super(SSLContext, SSLContext).options.__set__(self, value) + + if hasattr(_ssl, 'HOSTFLAG_NEVER_CHECK_SUBJECT'): + @property + def hostname_checks_common_name(self): + ncs = self._host_flags & _ssl.HOSTFLAG_NEVER_CHECK_SUBJECT + return ncs != _ssl.HOSTFLAG_NEVER_CHECK_SUBJECT + + @hostname_checks_common_name.setter + def hostname_checks_common_name(self, value): + if value: + self._host_flags &= ~_ssl.HOSTFLAG_NEVER_CHECK_SUBJECT + else: + self._host_flags |= _ssl.HOSTFLAG_NEVER_CHECK_SUBJECT + else: + @property + def hostname_checks_common_name(self): + return True + + @property + def _msg_callback(self): + """TLS message callback + + The message callback provides a debugging hook to analyze TLS + connections. The callback is called for any TLS protocol message + (header, handshake, alert, and more), but not for application data. + Due to technical limitations, the callback can't be used to filter + traffic or to abort a connection. Any exception raised in the + callback is delayed until the handshake, read, or write operation + has been performed. + + def msg_cb(conn, direction, version, content_type, msg_type, data): + pass + + conn + :class:`SSLSocket` or :class:`SSLObject` instance + direction + ``read`` or ``write`` + version + :class:`TLSVersion` enum member or int for unknown version. For a + frame header, it's the header version. + content_type + :class:`_TLSContentType` enum member or int for unsupported + content type. + msg_type + Either a :class:`_TLSContentType` enum number for a header + message, a :class:`_TLSAlertType` enum member for an alert + message, a :class:`_TLSMessageType` enum member for other + messages, or int for unsupported message types. + data + Raw, decrypted message content as bytes + """ + inner = super()._msg_callback + if inner is not None: + return inner.user_function + else: + return None + + @_msg_callback.setter + def _msg_callback(self, callback): + if callback is None: + super(SSLContext, SSLContext)._msg_callback.__set__(self, None) + return + + if not hasattr(callback, '__call__'): + raise TypeError(f"{callback} is not callable.") + + def inner(conn, direction, version, content_type, msg_type, data): + try: + version = TLSVersion(version) + except ValueError: + pass + + try: + content_type = _TLSContentType(content_type) + except ValueError: + pass + + if content_type == _TLSContentType.HEADER: + msg_enum = _TLSContentType + elif content_type == _TLSContentType.ALERT: + msg_enum = _TLSAlertType + else: + msg_enum = _TLSMessageType + try: + msg_type = msg_enum(msg_type) + except ValueError: + pass + + return callback(conn, direction, version, + content_type, msg_type, data) + + inner.user_function = callback + + super(SSLContext, SSLContext)._msg_callback.__set__(self, inner) + + @property + def protocol(self): + return _SSLMethod(super().protocol) + + @property + def verify_flags(self): + return VerifyFlags(super().verify_flags) + + @verify_flags.setter + def verify_flags(self, value): + super(SSLContext, SSLContext).verify_flags.__set__(self, value) + + @property + def verify_mode(self): + value = super().verify_mode + try: + return VerifyMode(value) + except ValueError: + return value + + @verify_mode.setter + def verify_mode(self, value): + super(SSLContext, SSLContext).verify_mode.__set__(self, value) + + +def create_default_context(purpose=Purpose.SERVER_AUTH, *, cafile=None, + capath=None, cadata=None): + """Create a SSLContext object with default settings. + + NOTE: The protocol and settings may change anytime without prior + deprecation. The values represent a fair balance between maximum + compatibility and security. + """ + if not isinstance(purpose, _ASN1Object): + raise TypeError(purpose) + + # SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION, + # OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE + # by default. + if purpose == Purpose.SERVER_AUTH: + # verify certs and host name in client mode + context = SSLContext(PROTOCOL_TLS_CLIENT) + context.verify_mode = CERT_REQUIRED + context.check_hostname = True + elif purpose == Purpose.CLIENT_AUTH: + context = SSLContext(PROTOCOL_TLS_SERVER) + else: + raise ValueError(purpose) + + # `VERIFY_X509_PARTIAL_CHAIN` makes OpenSSL's chain building behave more + # like RFC 3280 and 5280, which specify that chain building stops with the + # first trust anchor, even if that anchor is not self-signed. + # + # `VERIFY_X509_STRICT` makes OpenSSL more conservative about the + # certificates it accepts, including "disabling workarounds for + # some broken certificates." + context.verify_flags |= (_ssl.VERIFY_X509_PARTIAL_CHAIN | + _ssl.VERIFY_X509_STRICT) + + if cafile or capath or cadata: + context.load_verify_locations(cafile, capath, cadata) + elif context.verify_mode != CERT_NONE: + # no explicit cafile, capath or cadata but the verify mode is + # CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system + # root CA certificates for the given purpose. This may fail silently. + context.load_default_certs(purpose) + # OpenSSL 1.1.1 keylog file + if hasattr(context, 'keylog_filename'): + keylogfile = os.environ.get('SSLKEYLOGFILE') + if keylogfile and not sys.flags.ignore_environment: + context.keylog_filename = keylogfile + return context + +def _create_unverified_context(protocol=None, *, cert_reqs=CERT_NONE, + check_hostname=False, purpose=Purpose.SERVER_AUTH, + certfile=None, keyfile=None, + cafile=None, capath=None, cadata=None): + """Create a SSLContext object for Python stdlib modules + + All Python stdlib modules shall use this function to create SSLContext + objects in order to keep common settings in one place. The configuration + is less restrict than create_default_context()'s to increase backward + compatibility. + """ + if not isinstance(purpose, _ASN1Object): + raise TypeError(purpose) + + # SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION, + # OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE + # by default. + if purpose == Purpose.SERVER_AUTH: + # verify certs and host name in client mode + if protocol is None: + protocol = PROTOCOL_TLS_CLIENT + elif purpose == Purpose.CLIENT_AUTH: + if protocol is None: + protocol = PROTOCOL_TLS_SERVER + else: + raise ValueError(purpose) + + context = SSLContext(protocol) + context.check_hostname = check_hostname + if cert_reqs is not None: + context.verify_mode = cert_reqs + if check_hostname: + context.check_hostname = True + + if keyfile and not certfile: + raise ValueError("certfile must be specified") + if certfile or keyfile: + context.load_cert_chain(certfile, keyfile) + + # load CA root certs + if cafile or capath or cadata: + context.load_verify_locations(cafile, capath, cadata) + elif context.verify_mode != CERT_NONE: + # no explicit cafile, capath or cadata but the verify mode is + # CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system + # root CA certificates for the given purpose. This may fail silently. + context.load_default_certs(purpose) + # OpenSSL 1.1.1 keylog file + if hasattr(context, 'keylog_filename'): + keylogfile = os.environ.get('SSLKEYLOGFILE') + if keylogfile and not sys.flags.ignore_environment: + context.keylog_filename = keylogfile + return context + +# Used by http.client if no context is explicitly passed. +_create_default_https_context = create_default_context + + +# Backwards compatibility alias, even though it's not a public name. +_create_stdlib_context = _create_unverified_context + + +class SSLObject: + """This class implements an interface on top of a low-level SSL object as + implemented by OpenSSL. This object captures the state of an SSL connection + but does not provide any network IO itself. IO needs to be performed + through separate "BIO" objects which are OpenSSL's IO abstraction layer. + + This class does not have a public constructor. Instances are returned by + ``SSLContext.wrap_bio``. This class is typically used by framework authors + that want to implement asynchronous IO for SSL through memory buffers. + + When compared to ``SSLSocket``, this object lacks the following features: + + * Any form of network IO, including methods such as ``recv`` and ``send``. + * The ``do_handshake_on_connect`` and ``suppress_ragged_eofs`` machinery. + """ + def __init__(self, *args, **kwargs): + raise TypeError( + f"{self.__class__.__name__} does not have a public " + f"constructor. Instances are returned by SSLContext.wrap_bio()." + ) + + @classmethod + def _create(cls, incoming, outgoing, server_side=False, + server_hostname=None, session=None, context=None): + self = cls.__new__(cls) + sslobj = context._wrap_bio( + incoming, outgoing, server_side=server_side, + server_hostname=server_hostname, + owner=self, session=session + ) + self._sslobj = sslobj + return self + + @property + def context(self): + """The SSLContext that is currently in use.""" + return self._sslobj.context + + @context.setter + def context(self, ctx): + self._sslobj.context = ctx + + @property + def session(self): + """The SSLSession for client socket.""" + return self._sslobj.session + + @session.setter + def session(self, session): + self._sslobj.session = session + + @property + def session_reused(self): + """Was the client session reused during handshake""" + return self._sslobj.session_reused + + @property + def server_side(self): + """Whether this is a server-side socket.""" + return self._sslobj.server_side + + @property + def server_hostname(self): + """The currently set server hostname (for SNI), or ``None`` if no + server hostname is set.""" + return self._sslobj.server_hostname + + def read(self, len=1024, buffer=None): + """Read up to 'len' bytes from the SSL object and return them. + + If 'buffer' is provided, read into this buffer and return the number of + bytes read. + """ + if buffer is not None: + v = self._sslobj.read(len, buffer) + else: + v = self._sslobj.read(len) + return v + + def write(self, data): + """Write 'data' to the SSL object and return the number of bytes + written. + + The 'data' argument must support the buffer interface. + """ + return self._sslobj.write(data) + + def getpeercert(self, binary_form=False): + """Returns a formatted version of the data in the certificate provided + by the other end of the SSL channel. + + Return None if no certificate was provided, {} if a certificate was + provided, but not validated. + """ + return self._sslobj.getpeercert(binary_form) + + def get_verified_chain(self): + """Returns verified certificate chain provided by the other + end of the SSL channel as a list of DER-encoded bytes. + + If certificate verification was disabled method acts the same as + ``SSLSocket.get_unverified_chain``. + """ + chain = self._sslobj.get_verified_chain() + + if chain is None: + return [] + + return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] + + def get_unverified_chain(self): + """Returns raw certificate chain provided by the other + end of the SSL channel as a list of DER-encoded bytes. + """ + chain = self._sslobj.get_unverified_chain() + + if chain is None: + return [] + + return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] + + def selected_npn_protocol(self): + """Return the currently selected NPN protocol as a string, or ``None`` + if a next protocol was not negotiated or if NPN is not supported by one + of the peers.""" + warnings.warn( + "ssl NPN is deprecated, use ALPN instead", + DeprecationWarning, + stacklevel=2 + ) + + def selected_alpn_protocol(self): + """Return the currently selected ALPN protocol as a string, or ``None`` + if a next protocol was not negotiated or if ALPN is not supported by one + of the peers.""" + return self._sslobj.selected_alpn_protocol() + + def cipher(self): + """Return the currently selected cipher as a 3-tuple ``(name, + ssl_version, secret_bits)``.""" + return self._sslobj.cipher() + + def shared_ciphers(self): + """Return a list of ciphers shared by the client during the handshake or + None if this is not a valid server connection. + """ + return self._sslobj.shared_ciphers() + + def compression(self): + """Return the current compression algorithm in use, or ``None`` if + compression was not negotiated or not supported by one of the peers.""" + return self._sslobj.compression() + + def pending(self): + """Return the number of bytes that can be read immediately.""" + return self._sslobj.pending() + + def do_handshake(self): + """Start the SSL/TLS handshake.""" + self._sslobj.do_handshake() + + def unwrap(self): + """Start the SSL shutdown handshake.""" + return self._sslobj.shutdown() + + def get_channel_binding(self, cb_type="tls-unique"): + """Get channel binding data for current connection. Raise ValueError + if the requested `cb_type` is not supported. Return bytes of the data + or None if the data is not available (e.g. before the handshake).""" + return self._sslobj.get_channel_binding(cb_type) + + def version(self): + """Return a string identifying the protocol version used by the + current SSL channel. """ + return self._sslobj.version() + + def verify_client_post_handshake(self): + return self._sslobj.verify_client_post_handshake() + + +def _sslcopydoc(func): + """Copy docstring from SSLObject to SSLSocket""" + func.__doc__ = getattr(SSLObject, func.__name__).__doc__ + return func + + +class SSLSocket(socket): + """This class implements a subtype of socket.socket that wraps + the underlying OS socket in an SSL context when necessary, and + provides read and write methods over that channel. """ + + def __init__(self, *args, **kwargs): + raise TypeError( + f"{self.__class__.__name__} does not have a public " + f"constructor. Instances are returned by " + f"SSLContext.wrap_socket()." + ) + + @classmethod + def _create(cls, sock, server_side=False, do_handshake_on_connect=True, + suppress_ragged_eofs=True, server_hostname=None, + context=None, session=None): + if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM: + raise NotImplementedError("only stream sockets are supported") + if server_side: + if server_hostname: + raise ValueError("server_hostname can only be specified " + "in client mode") + if session is not None: + raise ValueError("session can only be specified in " + "client mode") + if context.check_hostname and not server_hostname: + raise ValueError("check_hostname requires server_hostname") + + sock_timeout = sock.gettimeout() + kwargs = dict( + family=sock.family, type=sock.type, proto=sock.proto, + fileno=sock.fileno() + ) + self = cls.__new__(cls, **kwargs) + super(SSLSocket, self).__init__(**kwargs) + sock.detach() + # Now SSLSocket is responsible for closing the file descriptor. + try: + self._context = context + self._session = session + self._closed = False + self._sslobj = None + self.server_side = server_side + self.server_hostname = context._encode_hostname(server_hostname) + self.do_handshake_on_connect = do_handshake_on_connect + self.suppress_ragged_eofs = suppress_ragged_eofs + + # See if we are connected + try: + self.getpeername() + except OSError as e: + if e.errno != errno.ENOTCONN: + raise + connected = False + blocking = self.getblocking() + self.setblocking(False) + try: + # We are not connected so this is not supposed to block, but + # testing revealed otherwise on macOS and Windows so we do + # the non-blocking dance regardless. Our raise when any data + # is found means consuming the data is harmless. + notconn_pre_handshake_data = self.recv(1) + except OSError as e: + # EINVAL occurs for recv(1) on non-connected on unix sockets. + if e.errno not in (errno.ENOTCONN, errno.EINVAL): + raise + notconn_pre_handshake_data = b'' + self.setblocking(blocking) + if notconn_pre_handshake_data: + # This prevents pending data sent to the socket before it was + # closed from escaping to the caller who could otherwise + # presume it came through a successful TLS connection. + reason = "Closed before TLS handshake with data in recv buffer." + notconn_pre_handshake_data_error = SSLError(e.errno, reason) + # Add the SSLError attributes that _ssl.c always adds. + notconn_pre_handshake_data_error.reason = reason + notconn_pre_handshake_data_error.library = None + try: + raise notconn_pre_handshake_data_error + finally: + # Explicitly break the reference cycle. + notconn_pre_handshake_data_error = None + else: + connected = True + + self.settimeout(sock_timeout) # Must come after setblocking() calls. + self._connected = connected + if connected: + # create the SSL object + self._sslobj = self._context._wrap_socket( + self, server_side, self.server_hostname, + owner=self, session=self._session, + ) + if do_handshake_on_connect: + timeout = self.gettimeout() + if timeout == 0.0: + # non-blocking + raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets") + self.do_handshake() + except: + try: + self.close() + except OSError: + pass + raise + return self + + @property + @_sslcopydoc + def context(self): + return self._context + + @context.setter + def context(self, ctx): + self._context = ctx + self._sslobj.context = ctx + + @property + @_sslcopydoc + def session(self): + if self._sslobj is not None: + return self._sslobj.session + + @session.setter + def session(self, session): + self._session = session + if self._sslobj is not None: + self._sslobj.session = session + + @property + @_sslcopydoc + def session_reused(self): + if self._sslobj is not None: + return self._sslobj.session_reused + + def dup(self): + raise NotImplementedError("Can't dup() %s instances" % + self.__class__.__name__) + + def _checkClosed(self, msg=None): + # raise an exception here if you wish to check for spurious closes + pass + + def _check_connected(self): + if not self._connected: + # getpeername() will raise ENOTCONN if the socket is really + # not connected; note that we can be connected even without + # _connected being set, e.g. if connect() first returned + # EAGAIN. + self.getpeername() + + def read(self, len=1024, buffer=None): + """Read up to LEN bytes and return them. + Return zero-length string on EOF.""" + + self._checkClosed() + if self._sslobj is None: + raise ValueError("Read on closed or unwrapped SSL socket.") + try: + if buffer is not None: + return self._sslobj.read(len, buffer) + else: + return self._sslobj.read(len) + except SSLError as x: + if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs: + if buffer is not None: + return 0 + else: + return b'' + else: + raise + + def write(self, data): + """Write DATA to the underlying SSL channel. Returns + number of bytes of DATA actually transmitted.""" + + self._checkClosed() + if self._sslobj is None: + raise ValueError("Write on closed or unwrapped SSL socket.") + return self._sslobj.write(data) + + @_sslcopydoc + def getpeercert(self, binary_form=False): + self._checkClosed() + self._check_connected() + return self._sslobj.getpeercert(binary_form) + + @_sslcopydoc + def get_verified_chain(self): + chain = self._sslobj.get_verified_chain() + + if chain is None: + return [] + + return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] + + @_sslcopydoc + def get_unverified_chain(self): + chain = self._sslobj.get_unverified_chain() + + if chain is None: + return [] + + return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] + + @_sslcopydoc + def selected_npn_protocol(self): + self._checkClosed() + warnings.warn( + "ssl NPN is deprecated, use ALPN instead", + DeprecationWarning, + stacklevel=2 + ) + return None + + @_sslcopydoc + def selected_alpn_protocol(self): + self._checkClosed() + if self._sslobj is None or not _ssl.HAS_ALPN: + return None + else: + return self._sslobj.selected_alpn_protocol() + + @_sslcopydoc + def cipher(self): + self._checkClosed() + if self._sslobj is None: + return None + else: + return self._sslobj.cipher() + + @_sslcopydoc + def shared_ciphers(self): + self._checkClosed() + if self._sslobj is None: + return None + else: + return self._sslobj.shared_ciphers() + + @_sslcopydoc + def compression(self): + self._checkClosed() + if self._sslobj is None: + return None + else: + return self._sslobj.compression() + + def send(self, data, flags=0): + self._checkClosed() + if self._sslobj is not None: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to send() on %s" % + self.__class__) + return self._sslobj.write(data) + else: + return super().send(data, flags) + + def sendto(self, data, flags_or_addr, addr=None): + self._checkClosed() + if self._sslobj is not None: + raise ValueError("sendto not allowed on instances of %s" % + self.__class__) + elif addr is None: + return super().sendto(data, flags_or_addr) + else: + return super().sendto(data, flags_or_addr, addr) + + def sendmsg(self, *args, **kwargs): + # Ensure programs don't send data unencrypted if they try to + # use this method. + raise NotImplementedError("sendmsg not allowed on instances of %s" % + self.__class__) + + def sendall(self, data, flags=0): + self._checkClosed() + if self._sslobj is not None: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to sendall() on %s" % + self.__class__) + count = 0 + with memoryview(data) as view, view.cast("B") as byte_view: + amount = len(byte_view) + while count < amount: + v = self.send(byte_view[count:]) + count += v + else: + return super().sendall(data, flags) + + def sendfile(self, file, offset=0, count=None): + """Send a file, possibly by using os.sendfile() if this is a + clear-text socket. Return the total number of bytes sent. + """ + if self._sslobj is not None: + return self._sendfile_use_send(file, offset, count) + else: + # os.sendfile() works with plain sockets only + return super().sendfile(file, offset, count) + + def recv(self, buflen=1024, flags=0): + self._checkClosed() + if self._sslobj is not None: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to recv() on %s" % + self.__class__) + return self.read(buflen) + else: + return super().recv(buflen, flags) + + def recv_into(self, buffer, nbytes=None, flags=0): + self._checkClosed() + if nbytes is None: + if buffer is not None: + with memoryview(buffer) as view: + nbytes = view.nbytes + if not nbytes: + nbytes = 1024 + else: + nbytes = 1024 + if self._sslobj is not None: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to recv_into() on %s" % + self.__class__) + return self.read(nbytes, buffer) + else: + return super().recv_into(buffer, nbytes, flags) + + def recvfrom(self, buflen=1024, flags=0): + self._checkClosed() + if self._sslobj is not None: + raise ValueError("recvfrom not allowed on instances of %s" % + self.__class__) + else: + return super().recvfrom(buflen, flags) + + def recvfrom_into(self, buffer, nbytes=None, flags=0): + self._checkClosed() + if self._sslobj is not None: + raise ValueError("recvfrom_into not allowed on instances of %s" % + self.__class__) + else: + return super().recvfrom_into(buffer, nbytes, flags) + + def recvmsg(self, *args, **kwargs): + raise NotImplementedError("recvmsg not allowed on instances of %s" % + self.__class__) + + def recvmsg_into(self, *args, **kwargs): + raise NotImplementedError("recvmsg_into not allowed on instances of " + "%s" % self.__class__) + + @_sslcopydoc + def pending(self): + self._checkClosed() + if self._sslobj is not None: + return self._sslobj.pending() + else: + return 0 + + def shutdown(self, how): + self._checkClosed() + self._sslobj = None + super().shutdown(how) + + @_sslcopydoc + def unwrap(self): + if self._sslobj: + s = self._sslobj.shutdown() + self._sslobj = None + return s + else: + raise ValueError("No SSL wrapper around " + str(self)) + + @_sslcopydoc + def verify_client_post_handshake(self): + if self._sslobj: + return self._sslobj.verify_client_post_handshake() + else: + raise ValueError("No SSL wrapper around " + str(self)) + + def _real_close(self): + self._sslobj = None + super()._real_close() + + @_sslcopydoc + def do_handshake(self, block=False): + self._check_connected() + timeout = self.gettimeout() + try: + if timeout == 0.0 and block: + self.settimeout(None) + self._sslobj.do_handshake() + finally: + self.settimeout(timeout) + + def _real_connect(self, addr, connect_ex): + if self.server_side: + raise ValueError("can't connect in server-side mode") + # Here we assume that the socket is client-side, and not + # connected at the time of the call. We connect it, then wrap it. + if self._connected or self._sslobj is not None: + raise ValueError("attempt to connect already-connected SSLSocket!") + self._sslobj = self.context._wrap_socket( + self, False, self.server_hostname, + owner=self, session=self._session + ) + try: + if connect_ex: + rc = super().connect_ex(addr) + else: + rc = None + super().connect(addr) + if not rc: + self._connected = True + if self.do_handshake_on_connect: + self.do_handshake() + return rc + except (OSError, ValueError): + self._sslobj = None + raise + + def connect(self, addr): + """Connects to remote ADDR, and then wraps the connection in + an SSL channel.""" + self._real_connect(addr, False) + + def connect_ex(self, addr): + """Connects to remote ADDR, and then wraps the connection in + an SSL channel.""" + return self._real_connect(addr, True) + + def accept(self): + """Accepts a new connection from a remote client, and returns + a tuple containing that new connection wrapped with a server-side + SSL channel, and the address of the remote client.""" + + newsock, addr = super().accept() + newsock = self.context.wrap_socket(newsock, + do_handshake_on_connect=self.do_handshake_on_connect, + suppress_ragged_eofs=self.suppress_ragged_eofs, + server_side=True) + return newsock, addr + + @_sslcopydoc + def get_channel_binding(self, cb_type="tls-unique"): + if self._sslobj is not None: + return self._sslobj.get_channel_binding(cb_type) + else: + if cb_type not in CHANNEL_BINDING_TYPES: + raise ValueError( + "{0} channel binding type not implemented".format(cb_type) + ) + return None + + @_sslcopydoc + def version(self): + if self._sslobj is not None: + return self._sslobj.version() + else: + return None + + +# Python does not support forward declaration of types. +SSLContext.sslsocket_class = SSLSocket +SSLContext.sslobject_class = SSLObject + + +# some utility functions + +def cert_time_to_seconds(cert_time): + """Return the time in seconds since the Epoch, given the timestring + representing the "notBefore" or "notAfter" date from a certificate + in ``"%b %d %H:%M:%S %Y %Z"`` strptime format (C locale). + + "notBefore" or "notAfter" dates must use UTC (RFC 5280). + + Month is one of: Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec + UTC should be specified as GMT (see ASN1_TIME_print()) + """ + from time import strptime + from calendar import timegm + + months = ( + "Jan","Feb","Mar","Apr","May","Jun", + "Jul","Aug","Sep","Oct","Nov","Dec" + ) + time_format = ' %d %H:%M:%S %Y GMT' # NOTE: no month, fixed GMT + try: + month_number = months.index(cert_time[:3].title()) + 1 + except ValueError: + raise ValueError('time data %r does not match ' + 'format "%%b%s"' % (cert_time, time_format)) + else: + # found valid month + tt = strptime(cert_time[3:], time_format) + # return an integer, the previous mktime()-based implementation + # returned a float (fractional seconds are always zero here). + return timegm((tt[0], month_number) + tt[2:6]) + +PEM_HEADER = "-----BEGIN CERTIFICATE-----" +PEM_FOOTER = "-----END CERTIFICATE-----" + +def DER_cert_to_PEM_cert(der_cert_bytes): + """Takes a certificate in binary DER format and returns the + PEM version of it as a string.""" + + f = str(base64.standard_b64encode(der_cert_bytes), 'ASCII', 'strict') + ss = [PEM_HEADER] + ss += [f[i:i+64] for i in range(0, len(f), 64)] + ss.append(PEM_FOOTER + '\n') + return '\n'.join(ss) + +def PEM_cert_to_DER_cert(pem_cert_string): + """Takes a certificate in ASCII PEM format and returns the + DER-encoded version of it as a byte sequence""" + + if not pem_cert_string.startswith(PEM_HEADER): + raise ValueError("Invalid PEM encoding; must start with %s" + % PEM_HEADER) + if not pem_cert_string.strip().endswith(PEM_FOOTER): + raise ValueError("Invalid PEM encoding; must end with %s" + % PEM_FOOTER) + d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)] + return base64.decodebytes(d.encode('ASCII', 'strict')) + +def get_server_certificate(addr, ssl_version=PROTOCOL_TLS_CLIENT, + ca_certs=None, timeout=_GLOBAL_DEFAULT_TIMEOUT): + """Retrieve the certificate from the server at the specified address, + and return it as a PEM-encoded string. + If 'ca_certs' is specified, validate the server cert against it. + If 'ssl_version' is specified, use it in the connection attempt. + If 'timeout' is specified, use it in the connection attempt. + """ + + host, port = addr + if ca_certs is not None: + cert_reqs = CERT_REQUIRED + else: + cert_reqs = CERT_NONE + context = _create_stdlib_context(ssl_version, + cert_reqs=cert_reqs, + cafile=ca_certs) + with create_connection(addr, timeout=timeout) as sock: + with context.wrap_socket(sock, server_hostname=host) as sslsock: + dercert = sslsock.getpeercert(True) + return DER_cert_to_PEM_cert(dercert) + +def get_protocol_name(protocol_code): + return _PROTOCOL_NAMES.get(protocol_code, '') diff --git a/Python313_13_x64_Template/Lib/stat.py b/Python314_4_x64_Template/Lib/stat.py similarity index 100% rename from Python313_13_x64_Template/Lib/stat.py rename to Python314_4_x64_Template/Lib/stat.py diff --git a/Python314_4_x64_Template/Lib/statistics.py b/Python314_4_x64_Template/Lib/statistics.py new file mode 100644 index 00000000..26cf9255 --- /dev/null +++ b/Python314_4_x64_Template/Lib/statistics.py @@ -0,0 +1,1879 @@ +""" +Basic statistics module. + +This module provides functions for calculating statistics of data, including +averages, variance, and standard deviation. + +Calculating averages +-------------------- + +================== ================================================== +Function Description +================== ================================================== +mean Arithmetic mean (average) of data. +fmean Fast, floating-point arithmetic mean. +geometric_mean Geometric mean of data. +harmonic_mean Harmonic mean of data. +median Median (middle value) of data. +median_low Low median of data. +median_high High median of data. +median_grouped Median, or 50th percentile, of grouped data. +mode Mode (most common value) of data. +multimode List of modes (most common values of data). +quantiles Divide data into intervals with equal probability. +================== ================================================== + +Calculate the arithmetic mean ("the average") of data: + +>>> mean([-1.0, 2.5, 3.25, 5.75]) +2.625 + + +Calculate the standard median of discrete data: + +>>> median([2, 3, 4, 5]) +3.5 + + +Calculate the median, or 50th percentile, of data grouped into class intervals +centred on the data values provided. E.g. if your data points are rounded to +the nearest whole number: + +>>> median_grouped([2, 2, 3, 3, 3, 4]) #doctest: +ELLIPSIS +2.8333333333... + +This should be interpreted in this way: you have two data points in the class +interval 1.5-2.5, three data points in the class interval 2.5-3.5, and one in +the class interval 3.5-4.5. The median of these data points is 2.8333... + + +Calculating variability or spread +--------------------------------- + +================== ============================================= +Function Description +================== ============================================= +pvariance Population variance of data. +variance Sample variance of data. +pstdev Population standard deviation of data. +stdev Sample standard deviation of data. +================== ============================================= + +Calculate the standard deviation of sample data: + +>>> stdev([2.5, 3.25, 5.5, 11.25, 11.75]) #doctest: +ELLIPSIS +4.38961843444... + +If you have previously calculated the mean, you can pass it as the optional +second argument to the four "spread" functions to avoid recalculating it: + +>>> data = [1, 2, 2, 4, 4, 4, 5, 6] +>>> mu = mean(data) +>>> pvariance(data, mu) +2.5 + + +Statistics for relations between two inputs +------------------------------------------- + +================== ==================================================== +Function Description +================== ==================================================== +covariance Sample covariance for two variables. +correlation Pearson's correlation coefficient for two variables. +linear_regression Intercept and slope for simple linear regression. +================== ==================================================== + +Calculate covariance, Pearson's correlation, and simple linear regression +for two inputs: + +>>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9] +>>> y = [1, 2, 3, 1, 2, 3, 1, 2, 3] +>>> covariance(x, y) +0.75 +>>> correlation(x, y) #doctest: +ELLIPSIS +0.31622776601... +>>> linear_regression(x, y) #doctest: +LinearRegression(slope=0.1, intercept=1.5) + + +Exceptions +---------- + +A single exception is defined: StatisticsError is a subclass of ValueError. + +""" + +__all__ = [ + 'NormalDist', + 'StatisticsError', + 'correlation', + 'covariance', + 'fmean', + 'geometric_mean', + 'harmonic_mean', + 'kde', + 'kde_random', + 'linear_regression', + 'mean', + 'median', + 'median_grouped', + 'median_high', + 'median_low', + 'mode', + 'multimode', + 'pstdev', + 'pvariance', + 'quantiles', + 'stdev', + 'variance', +] + +import math +import numbers +import random +import sys + +from fractions import Fraction +from decimal import Decimal +from itertools import count, groupby, repeat +from bisect import bisect_left, bisect_right +from math import hypot, sqrt, fabs, exp, erfc, tau, log, fsum, sumprod +from math import isfinite, isinf, pi, cos, sin, tan, cosh, asin, atan, acos +from functools import reduce +from operator import itemgetter +from collections import Counter, namedtuple, defaultdict + +_SQRT2 = sqrt(2.0) +_random = random + +## Exceptions ############################################################## + +class StatisticsError(ValueError): + pass + + +## Measures of central tendency (averages) ################################# + +def mean(data): + """Return the sample arithmetic mean of data. + + >>> mean([1, 2, 3, 4, 4]) + 2.8 + + >>> from fractions import Fraction as F + >>> mean([F(3, 7), F(1, 21), F(5, 3), F(1, 3)]) + Fraction(13, 21) + + >>> from decimal import Decimal as D + >>> mean([D("0.5"), D("0.75"), D("0.625"), D("0.375")]) + Decimal('0.5625') + + If ``data`` is empty, StatisticsError will be raised. + + """ + T, total, n = _sum(data) + if n < 1: + raise StatisticsError('mean requires at least one data point') + return _convert(total / n, T) + + +def fmean(data, weights=None): + """Convert data to floats and compute the arithmetic mean. + + This runs faster than the mean() function and it always returns a float. + If the input dataset is empty, it raises a StatisticsError. + + >>> fmean([3.5, 4.0, 5.25]) + 4.25 + + """ + if weights is None: + + try: + n = len(data) + except TypeError: + # Handle iterators that do not define __len__(). + counter = count() + total = fsum(map(itemgetter(0), zip(data, counter))) + n = next(counter) + else: + total = fsum(data) + + if not n: + raise StatisticsError('fmean requires at least one data point') + + return total / n + + if not isinstance(weights, (list, tuple)): + weights = list(weights) + + try: + num = sumprod(data, weights) + except ValueError: + raise StatisticsError('data and weights must be the same length') + + den = fsum(weights) + + if not den: + raise StatisticsError('sum of weights must be non-zero') + + return num / den + + +def geometric_mean(data): + """Convert data to floats and compute the geometric mean. + + Raises a StatisticsError if the input dataset is empty + or if it contains a negative value. + + Returns zero if the product of inputs is zero. + + No special efforts are made to achieve exact results. + (However, this may change in the future.) + + >>> round(geometric_mean([54, 24, 36]), 9) + 36.0 + + """ + n = 0 + found_zero = False + + def count_positive(iterable): + nonlocal n, found_zero + for n, x in enumerate(iterable, start=1): + if x > 0.0 or math.isnan(x): + yield x + elif x == 0.0: + found_zero = True + else: + raise StatisticsError('No negative inputs allowed', x) + + total = fsum(map(log, count_positive(data))) + + if not n: + raise StatisticsError('Must have a non-empty dataset') + if math.isnan(total): + return math.nan + if found_zero: + return math.nan if total == math.inf else 0.0 + + return exp(total / n) + + +def harmonic_mean(data, weights=None): + """Return the harmonic mean of data. + + The harmonic mean is the reciprocal of the arithmetic mean of the + reciprocals of the data. It can be used for averaging ratios or + rates, for example speeds. + + Suppose a car travels 40 km/hr for 5 km and then speeds-up to + 60 km/hr for another 5 km. What is the average speed? + + >>> harmonic_mean([40, 60]) + 48.0 + + Suppose a car travels 40 km/hr for 5 km, and when traffic clears, + speeds-up to 60 km/hr for the remaining 30 km of the journey. What + is the average speed? + + >>> harmonic_mean([40, 60], weights=[5, 30]) + 56.0 + + If ``data`` is empty, or any element is less than zero, + ``harmonic_mean`` will raise ``StatisticsError``. + + """ + if iter(data) is data: + data = list(data) + + errmsg = 'harmonic mean does not support negative values' + + n = len(data) + if n < 1: + raise StatisticsError('harmonic_mean requires at least one data point') + elif n == 1 and weights is None: + x = data[0] + if isinstance(x, (numbers.Real, Decimal)): + if x < 0: + raise StatisticsError(errmsg) + return x + else: + raise TypeError('unsupported type') + + if weights is None: + weights = repeat(1, n) + sum_weights = n + else: + if iter(weights) is weights: + weights = list(weights) + if len(weights) != n: + raise StatisticsError('Number of weights does not match data size') + _, sum_weights, _ = _sum(w for w in _fail_neg(weights, errmsg)) + + try: + data = _fail_neg(data, errmsg) + T, total, count = _sum(w / x if w else 0 for w, x in zip(weights, data)) + except ZeroDivisionError: + return 0 + + if total <= 0: + raise StatisticsError('Weighted sum must be positive') + + return _convert(sum_weights / total, T) + + +def median(data): + """Return the median (middle value) of numeric data. + + When the number of data points is odd, return the middle data point. + When the number of data points is even, the median is interpolated by + taking the average of the two middle values: + + >>> median([1, 3, 5]) + 3 + >>> median([1, 3, 5, 7]) + 4.0 + + """ + data = sorted(data) + n = len(data) + if n == 0: + raise StatisticsError("no median for empty data") + if n % 2 == 1: + return data[n // 2] + else: + i = n // 2 + return (data[i - 1] + data[i]) / 2 + + +def median_low(data): + """Return the low median of numeric data. + + When the number of data points is odd, the middle value is returned. + When it is even, the smaller of the two middle values is returned. + + >>> median_low([1, 3, 5]) + 3 + >>> median_low([1, 3, 5, 7]) + 3 + + """ + # Potentially the sorting step could be replaced with a quickselect. + # However, it would require an excellent implementation to beat our + # highly optimized builtin sort. + data = sorted(data) + n = len(data) + if n == 0: + raise StatisticsError("no median for empty data") + if n % 2 == 1: + return data[n // 2] + else: + return data[n // 2 - 1] + + +def median_high(data): + """Return the high median of data. + + When the number of data points is odd, the middle value is returned. + When it is even, the larger of the two middle values is returned. + + >>> median_high([1, 3, 5]) + 3 + >>> median_high([1, 3, 5, 7]) + 5 + + """ + data = sorted(data) + n = len(data) + if n == 0: + raise StatisticsError("no median for empty data") + return data[n // 2] + + +def median_grouped(data, interval=1.0): + """Estimates the median for numeric data binned around the midpoints + of consecutive, fixed-width intervals. + + The *data* can be any iterable of numeric data with each value being + exactly the midpoint of a bin. At least one value must be present. + + The *interval* is width of each bin. + + For example, demographic information may have been summarized into + consecutive ten-year age groups with each group being represented + by the 5-year midpoints of the intervals: + + >>> demographics = Counter({ + ... 25: 172, # 20 to 30 years old + ... 35: 484, # 30 to 40 years old + ... 45: 387, # 40 to 50 years old + ... 55: 22, # 50 to 60 years old + ... 65: 6, # 60 to 70 years old + ... }) + + The 50th percentile (median) is the 536th person out of the 1071 + member cohort. That person is in the 30 to 40 year old age group. + + The regular median() function would assume that everyone in the + tricenarian age group was exactly 35 years old. A more tenable + assumption is that the 484 members of that age group are evenly + distributed between 30 and 40. For that, we use median_grouped(). + + >>> data = list(demographics.elements()) + >>> median(data) + 35 + >>> round(median_grouped(data, interval=10), 1) + 37.5 + + The caller is responsible for making sure the data points are separated + by exact multiples of *interval*. This is essential for getting a + correct result. The function does not check this precondition. + + Inputs may be any numeric type that can be coerced to a float during + the interpolation step. + + """ + data = sorted(data) + n = len(data) + if not n: + raise StatisticsError("no median for empty data") + + # Find the value at the midpoint. Remember this corresponds to the + # midpoint of the class interval. + x = data[n // 2] + + # Using O(log n) bisection, find where all the x values occur in the data. + # All x will lie within data[i:j]. + i = bisect_left(data, x) + j = bisect_right(data, x, lo=i) + + # Coerce to floats, raising a TypeError if not possible + try: + interval = float(interval) + x = float(x) + except ValueError: + raise TypeError(f'Value cannot be converted to a float') + + # Interpolate the median using the formula found at: + # https://www.cuemath.com/data/median-of-grouped-data/ + L = x - interval / 2.0 # Lower limit of the median interval + cf = i # Cumulative frequency of the preceding interval + f = j - i # Number of elements in the median internal + return L + interval * (n / 2 - cf) / f + + +def mode(data): + """Return the most common data point from discrete or nominal data. + + ``mode`` assumes discrete data, and returns a single value. This is the + standard treatment of the mode as commonly taught in schools: + + >>> mode([1, 1, 2, 3, 3, 3, 3, 4]) + 3 + + This also works with nominal (non-numeric) data: + + >>> mode(["red", "blue", "blue", "red", "green", "red", "red"]) + 'red' + + If there are multiple modes with same frequency, return the first one + encountered: + + >>> mode(['red', 'red', 'green', 'blue', 'blue']) + 'red' + + If *data* is empty, ``mode``, raises StatisticsError. + + """ + pairs = Counter(iter(data)).most_common(1) + try: + return pairs[0][0] + except IndexError: + raise StatisticsError('no mode for empty data') from None + + +def multimode(data): + """Return a list of the most frequently occurring values. + + Will return more than one result if there are multiple modes + or an empty list if *data* is empty. + + >>> multimode('aabbbbbbbbcc') + ['b'] + >>> multimode('aabbbbccddddeeffffgg') + ['b', 'd', 'f'] + >>> multimode('') + [] + + """ + counts = Counter(iter(data)) + if not counts: + return [] + maxcount = max(counts.values()) + return [value for value, count in counts.items() if count == maxcount] + + +## Measures of spread ###################################################### + +def variance(data, xbar=None): + """Return the sample variance of data. + + data should be an iterable of Real-valued numbers, with at least two + values. The optional argument xbar, if given, should be the mean of + the data. If it is missing or None, the mean is automatically calculated. + + Use this function when your data is a sample from a population. To + calculate the variance from the entire population, see ``pvariance``. + + Examples: + + >>> data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5] + >>> variance(data) + 1.3720238095238095 + + If you have already calculated the mean of your data, you can pass it as + the optional second argument ``xbar`` to avoid recalculating it: + + >>> m = mean(data) + >>> variance(data, m) + 1.3720238095238095 + + This function does not check that ``xbar`` is actually the mean of + ``data``. Giving arbitrary values for ``xbar`` may lead to invalid or + impossible results. + + Decimals and Fractions are supported: + + >>> from decimal import Decimal as D + >>> variance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")]) + Decimal('31.01875') + + >>> from fractions import Fraction as F + >>> variance([F(1, 6), F(1, 2), F(5, 3)]) + Fraction(67, 108) + + """ + # http://mathworld.wolfram.com/SampleVariance.html + + T, ss, c, n = _ss(data, xbar) + if n < 2: + raise StatisticsError('variance requires at least two data points') + return _convert(ss / (n - 1), T) + + +def pvariance(data, mu=None): + """Return the population variance of ``data``. + + data should be a sequence or iterable of Real-valued numbers, with at least one + value. The optional argument mu, if given, should be the mean of + the data. If it is missing or None, the mean is automatically calculated. + + Use this function to calculate the variance from the entire population. + To estimate the variance from a sample, the ``variance`` function is + usually a better choice. + + Examples: + + >>> data = [0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25] + >>> pvariance(data) + 1.25 + + If you have already calculated the mean of the data, you can pass it as + the optional second argument to avoid recalculating it: + + >>> mu = mean(data) + >>> pvariance(data, mu) + 1.25 + + Decimals and Fractions are supported: + + >>> from decimal import Decimal as D + >>> pvariance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")]) + Decimal('24.815') + + >>> from fractions import Fraction as F + >>> pvariance([F(1, 4), F(5, 4), F(1, 2)]) + Fraction(13, 72) + + """ + # http://mathworld.wolfram.com/Variance.html + + T, ss, c, n = _ss(data, mu) + if n < 1: + raise StatisticsError('pvariance requires at least one data point') + return _convert(ss / n, T) + + +def stdev(data, xbar=None): + """Return the square root of the sample variance. + + See ``variance`` for arguments and other details. + + >>> stdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75]) + 1.0810874155219827 + + """ + T, ss, c, n = _ss(data, xbar) + if n < 2: + raise StatisticsError('stdev requires at least two data points') + mss = ss / (n - 1) + try: + mss_numerator = mss.numerator + mss_denominator = mss.denominator + except AttributeError: + raise ValueError('inf or nan encountered in data') + if issubclass(T, Decimal): + return _decimal_sqrt_of_frac(mss_numerator, mss_denominator) + return _float_sqrt_of_frac(mss_numerator, mss_denominator) + + +def pstdev(data, mu=None): + """Return the square root of the population variance. + + See ``pvariance`` for arguments and other details. + + >>> pstdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75]) + 0.986893273527251 + + """ + T, ss, c, n = _ss(data, mu) + if n < 1: + raise StatisticsError('pstdev requires at least one data point') + mss = ss / n + try: + mss_numerator = mss.numerator + mss_denominator = mss.denominator + except AttributeError: + raise ValueError('inf or nan encountered in data') + if issubclass(T, Decimal): + return _decimal_sqrt_of_frac(mss_numerator, mss_denominator) + return _float_sqrt_of_frac(mss_numerator, mss_denominator) + + +## Statistics for relations between two inputs ############################# + +def covariance(x, y, /): + """Covariance + + Return the sample covariance of two inputs *x* and *y*. Covariance + is a measure of the joint variability of two inputs. + + >>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> y = [1, 2, 3, 1, 2, 3, 1, 2, 3] + >>> covariance(x, y) + 0.75 + >>> z = [9, 8, 7, 6, 5, 4, 3, 2, 1] + >>> covariance(x, z) + -7.5 + >>> covariance(z, x) + -7.5 + + """ + # https://en.wikipedia.org/wiki/Covariance + n = len(x) + if len(y) != n: + raise StatisticsError('covariance requires that both inputs have same number of data points') + if n < 2: + raise StatisticsError('covariance requires at least two data points') + xbar = fsum(x) / n + ybar = fsum(y) / n + sxy = sumprod((xi - xbar for xi in x), (yi - ybar for yi in y)) + return sxy / (n - 1) + + +def correlation(x, y, /, *, method='linear'): + """Pearson's correlation coefficient + + Return the Pearson's correlation coefficient for two inputs. Pearson's + correlation coefficient *r* takes values between -1 and +1. It measures + the strength and direction of a linear relationship. + + >>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> y = [9, 8, 7, 6, 5, 4, 3, 2, 1] + >>> correlation(x, x) + 1.0 + >>> correlation(x, y) + -1.0 + + If *method* is "ranked", computes Spearman's rank correlation coefficient + for two inputs. The data is replaced by ranks. Ties are averaged + so that equal values receive the same rank. The resulting coefficient + measures the strength of a monotonic relationship. + + Spearman's rank correlation coefficient is appropriate for ordinal + data or for continuous data that doesn't meet the linear proportion + requirement for Pearson's correlation coefficient. + + """ + # https://en.wikipedia.org/wiki/Pearson_correlation_coefficient + # https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient + n = len(x) + if len(y) != n: + raise StatisticsError('correlation requires that both inputs have same number of data points') + if n < 2: + raise StatisticsError('correlation requires at least two data points') + if method not in {'linear', 'ranked'}: + raise ValueError(f'Unknown method: {method!r}') + + if method == 'ranked': + start = (n - 1) / -2 # Center rankings around zero + x = _rank(x, start=start) + y = _rank(y, start=start) + + else: + xbar = fsum(x) / n + ybar = fsum(y) / n + x = [xi - xbar for xi in x] + y = [yi - ybar for yi in y] + + sxy = sumprod(x, y) + sxx = sumprod(x, x) + syy = sumprod(y, y) + + try: + return sxy / _sqrtprod(sxx, syy) + except ZeroDivisionError: + raise StatisticsError('at least one of the inputs is constant') + + +LinearRegression = namedtuple('LinearRegression', ('slope', 'intercept')) + + +def linear_regression(x, y, /, *, proportional=False): + """Slope and intercept for simple linear regression. + + Return the slope and intercept of simple linear regression + parameters estimated using ordinary least squares. Simple linear + regression describes relationship between an independent variable + *x* and a dependent variable *y* in terms of a linear function: + + y = slope * x + intercept + noise + + where *slope* and *intercept* are the regression parameters that are + estimated, and noise represents the variability of the data that was + not explained by the linear regression (it is equal to the + difference between predicted and actual values of the dependent + variable). + + The parameters are returned as a named tuple. + + >>> x = [1, 2, 3, 4, 5] + >>> noise = NormalDist().samples(5, seed=42) + >>> y = [3 * x[i] + 2 + noise[i] for i in range(5)] + >>> linear_regression(x, y) #doctest: +ELLIPSIS + LinearRegression(slope=3.17495..., intercept=1.00925...) + + If *proportional* is true, the independent variable *x* and the + dependent variable *y* are assumed to be directly proportional. + The data is fit to a line passing through the origin. + + Since the *intercept* will always be 0.0, the underlying linear + function simplifies to: + + y = slope * x + noise + + >>> y = [3 * x[i] + noise[i] for i in range(5)] + >>> linear_regression(x, y, proportional=True) #doctest: +ELLIPSIS + LinearRegression(slope=2.90475..., intercept=0.0) + + """ + # https://en.wikipedia.org/wiki/Simple_linear_regression + n = len(x) + if len(y) != n: + raise StatisticsError('linear regression requires that both inputs have same number of data points') + if n < 2: + raise StatisticsError('linear regression requires at least two data points') + + if not proportional: + xbar = fsum(x) / n + ybar = fsum(y) / n + x = [xi - xbar for xi in x] # List because used three times below + y = (yi - ybar for yi in y) # Generator because only used once below + + sxy = sumprod(x, y) + 0.0 # Add zero to coerce result to a float + sxx = sumprod(x, x) + + try: + slope = sxy / sxx # equivalent to: covariance(x, y) / variance(x) + except ZeroDivisionError: + raise StatisticsError('x is constant') + + intercept = 0.0 if proportional else ybar - slope * xbar + return LinearRegression(slope=slope, intercept=intercept) + + +## Kernel Density Estimation ############################################### + +_kernel_specs = {} + +def register(*kernels): + "Load the kernel's pdf, cdf, invcdf, and support into _kernel_specs." + def deco(builder): + spec = dict(zip(('pdf', 'cdf', 'invcdf', 'support'), builder())) + for kernel in kernels: + _kernel_specs[kernel] = spec + return builder + return deco + +@register('normal', 'gauss') +def normal_kernel(): + sqrt2pi = sqrt(2 * pi) + neg_sqrt2 = -sqrt(2) + pdf = lambda t: exp(-1/2 * t * t) / sqrt2pi + cdf = lambda t: 1/2 * erfc(t / neg_sqrt2) + invcdf = lambda t: _normal_dist_inv_cdf(t, 0.0, 1.0) + support = None + return pdf, cdf, invcdf, support + +@register('logistic') +def logistic_kernel(): + # 1.0 / (exp(t) + 2.0 + exp(-t)) + pdf = lambda t: 1/2 / (1.0 + cosh(t)) + cdf = lambda t: 1.0 - 1.0 / (exp(t) + 1.0) + invcdf = lambda p: log(p / (1.0 - p)) + support = None + return pdf, cdf, invcdf, support + +@register('sigmoid') +def sigmoid_kernel(): + # (2/pi) / (exp(t) + exp(-t)) + c1 = 1 / pi + c2 = 2 / pi + c3 = pi / 2 + pdf = lambda t: c1 / cosh(t) + cdf = lambda t: c2 * atan(exp(t)) + invcdf = lambda p: log(tan(p * c3)) + support = None + return pdf, cdf, invcdf, support + +@register('rectangular', 'uniform') +def rectangular_kernel(): + pdf = lambda t: 1/2 + cdf = lambda t: 1/2 * t + 1/2 + invcdf = lambda p: 2.0 * p - 1.0 + support = 1.0 + return pdf, cdf, invcdf, support + +@register('triangular') +def triangular_kernel(): + pdf = lambda t: 1.0 - abs(t) + cdf = lambda t: t*t * (1/2 if t < 0.0 else -1/2) + t + 1/2 + invcdf = lambda p: sqrt(2.0*p) - 1.0 if p < 1/2 else 1.0 - sqrt(2.0 - 2.0*p) + support = 1.0 + return pdf, cdf, invcdf, support + +@register('parabolic', 'epanechnikov') +def parabolic_kernel(): + pdf = lambda t: 3/4 * (1.0 - t * t) + cdf = lambda t: sumprod((-1/4, 3/4, 1/2), (t**3, t, 1.0)) + invcdf = lambda p: 2.0 * cos((acos(2.0*p - 1.0) + pi) / 3.0) + support = 1.0 + return pdf, cdf, invcdf, support + +def _newton_raphson(f_inv_estimate, f, f_prime, tolerance=1e-12): + def f_inv(y): + "Return x such that f(x) ≈ y within the specified tolerance." + x = f_inv_estimate(y) + while abs(diff := f(x) - y) > tolerance: + x -= diff / f_prime(x) + return x + return f_inv + +def _quartic_invcdf_estimate(p): + # A handrolled piecewise approximation. There is no magic here. + sign, p = (1.0, p) if p <= 1/2 else (-1.0, 1.0 - p) + if p < 0.0106: + return ((2.0 * p) ** 0.3838 - 1.0) * sign + x = (2.0 * p) ** 0.4258865685331 - 1.0 + if p < 0.499: + x += 0.026818732 * sin(7.101753784 * p + 2.73230839482953) + return x * sign + +@register('quartic', 'biweight') +def quartic_kernel(): + pdf = lambda t: 15/16 * (1.0 - t * t) ** 2 + cdf = lambda t: sumprod((3/16, -5/8, 15/16, 1/2), + (t**5, t**3, t, 1.0)) + invcdf = _newton_raphson(_quartic_invcdf_estimate, f=cdf, f_prime=pdf) + support = 1.0 + return pdf, cdf, invcdf, support + +def _triweight_invcdf_estimate(p): + # A handrolled piecewise approximation. There is no magic here. + sign, p = (1.0, p) if p <= 1/2 else (-1.0, 1.0 - p) + x = (2.0 * p) ** 0.3400218741872791 - 1.0 + if 0.00001 < p < 0.499: + x -= 0.033 * sin(1.07 * tau * (p - 0.035)) + return x * sign + +@register('triweight') +def triweight_kernel(): + pdf = lambda t: 35/32 * (1.0 - t * t) ** 3 + cdf = lambda t: sumprod((-5/32, 21/32, -35/32, 35/32, 1/2), + (t**7, t**5, t**3, t, 1.0)) + invcdf = _newton_raphson(_triweight_invcdf_estimate, f=cdf, f_prime=pdf) + support = 1.0 + return pdf, cdf, invcdf, support + +@register('cosine') +def cosine_kernel(): + c1 = pi / 4 + c2 = pi / 2 + pdf = lambda t: c1 * cos(c2 * t) + cdf = lambda t: 1/2 * sin(c2 * t) + 1/2 + invcdf = lambda p: 2.0 * asin(2.0 * p - 1.0) / pi + support = 1.0 + return pdf, cdf, invcdf, support + +del register, normal_kernel, logistic_kernel, sigmoid_kernel +del rectangular_kernel, triangular_kernel, parabolic_kernel +del quartic_kernel, triweight_kernel, cosine_kernel + + +def kde(data, h, kernel='normal', *, cumulative=False): + """Kernel Density Estimation: Create a continuous probability density + function or cumulative distribution function from discrete samples. + + The basic idea is to smooth the data using a kernel function + to help draw inferences about a population from a sample. + + The degree of smoothing is controlled by the scaling parameter h + which is called the bandwidth. Smaller values emphasize local + features while larger values give smoother results. + + The kernel determines the relative weights of the sample data + points. Generally, the choice of kernel shape does not matter + as much as the more influential bandwidth smoothing parameter. + + Kernels that give some weight to every sample point: + + normal (gauss) + logistic + sigmoid + + Kernels that only give weight to sample points within + the bandwidth: + + rectangular (uniform) + triangular + parabolic (epanechnikov) + quartic (biweight) + triweight + cosine + + If *cumulative* is true, will return a cumulative distribution function. + + A StatisticsError will be raised if the data sequence is empty. + + Example + ------- + + Given a sample of six data points, construct a continuous + function that estimates the underlying probability density: + + >>> sample = [-2.1, -1.3, -0.4, 1.9, 5.1, 6.2] + >>> f_hat = kde(sample, h=1.5) + + Compute the area under the curve: + + >>> area = sum(f_hat(x) for x in range(-20, 20)) + >>> round(area, 4) + 1.0 + + Plot the estimated probability density function at + evenly spaced points from -6 to 10: + + >>> for x in range(-6, 11): + ... density = f_hat(x) + ... plot = ' ' * int(density * 400) + 'x' + ... print(f'{x:2}: {density:.3f} {plot}') + ... + -6: 0.002 x + -5: 0.009 x + -4: 0.031 x + -3: 0.070 x + -2: 0.111 x + -1: 0.125 x + 0: 0.110 x + 1: 0.086 x + 2: 0.068 x + 3: 0.059 x + 4: 0.066 x + 5: 0.082 x + 6: 0.082 x + 7: 0.058 x + 8: 0.028 x + 9: 0.009 x + 10: 0.002 x + + Estimate P(4.5 < X <= 7.5), the probability that a new sample value + will be between 4.5 and 7.5: + + >>> cdf = kde(sample, h=1.5, cumulative=True) + >>> round(cdf(7.5) - cdf(4.5), 2) + 0.22 + + References + ---------- + + Kernel density estimation and its application: + https://www.itm-conferences.org/articles/itmconf/pdf/2018/08/itmconf_sam2018_00037.pdf + + Kernel functions in common use: + https://en.wikipedia.org/wiki/Kernel_(statistics)#kernel_functions_in_common_use + + Interactive graphical demonstration and exploration: + https://demonstrations.wolfram.com/KernelDensityEstimation/ + + Kernel estimation of cumulative distribution function of a random variable with bounded support + https://www.econstor.eu/bitstream/10419/207829/1/10.21307_stattrans-2016-037.pdf + + """ + + n = len(data) + if not n: + raise StatisticsError('Empty data sequence') + + if not isinstance(data[0], (int, float)): + raise TypeError('Data sequence must contain ints or floats') + + if h <= 0.0: + raise StatisticsError(f'Bandwidth h must be positive, not {h=!r}') + + kernel_spec = _kernel_specs.get(kernel) + if kernel_spec is None: + raise StatisticsError(f'Unknown kernel name: {kernel!r}') + K = kernel_spec['pdf'] + W = kernel_spec['cdf'] + support = kernel_spec['support'] + + if support is None: + + def pdf(x): + return sum(K((x - x_i) / h) for x_i in data) / (len(data) * h) + + def cdf(x): + return sum(W((x - x_i) / h) for x_i in data) / len(data) + + else: + + sample = sorted(data) + bandwidth = h * support + + def pdf(x): + nonlocal n, sample + if len(data) != n: + sample = sorted(data) + n = len(data) + i = bisect_left(sample, x - bandwidth) + j = bisect_right(sample, x + bandwidth) + supported = sample[i : j] + return sum(K((x - x_i) / h) for x_i in supported) / (n * h) + + def cdf(x): + nonlocal n, sample + if len(data) != n: + sample = sorted(data) + n = len(data) + i = bisect_left(sample, x - bandwidth) + j = bisect_right(sample, x + bandwidth) + supported = sample[i : j] + return sum((W((x - x_i) / h) for x_i in supported), i) / n + + if cumulative: + cdf.__doc__ = f'CDF estimate with {h=!r} and {kernel=!r}' + return cdf + + else: + pdf.__doc__ = f'PDF estimate with {h=!r} and {kernel=!r}' + return pdf + + +def kde_random(data, h, kernel='normal', *, seed=None): + """Return a function that makes a random selection from the estimated + probability density function created by kde(data, h, kernel). + + Providing a *seed* allows reproducible selections within a single + thread. The seed may be an integer, float, str, or bytes. + + A StatisticsError will be raised if the *data* sequence is empty. + + Example: + + >>> data = [-2.1, -1.3, -0.4, 1.9, 5.1, 6.2] + >>> rand = kde_random(data, h=1.5, seed=8675309) + >>> new_selections = [rand() for i in range(10)] + >>> [round(x, 1) for x in new_selections] + [0.7, 6.2, 1.2, 6.9, 7.0, 1.8, 2.5, -0.5, -1.8, 5.6] + + """ + n = len(data) + if not n: + raise StatisticsError('Empty data sequence') + + if not isinstance(data[0], (int, float)): + raise TypeError('Data sequence must contain ints or floats') + + if h <= 0.0: + raise StatisticsError(f'Bandwidth h must be positive, not {h=!r}') + + kernel_spec = _kernel_specs.get(kernel) + if kernel_spec is None: + raise StatisticsError(f'Unknown kernel name: {kernel!r}') + invcdf = kernel_spec['invcdf'] + + prng = _random.Random(seed) + random = prng.random + choice = prng.choice + + def rand(): + return choice(data) + h * invcdf(random()) + + rand.__doc__ = f'Random KDE selection with {h=!r} and {kernel=!r}' + + return rand + + +## Quantiles ############################################################### + +# There is no one perfect way to compute quantiles. Here we offer +# two methods that serve common needs. Most other packages +# surveyed offered at least one or both of these two, making them +# "standard" in the sense of "widely-adopted and reproducible". +# They are also easy to explain, easy to compute manually, and have +# straight-forward interpretations that aren't surprising. + +# The default method is known as "R6", "PERCENTILE.EXC", or "expected +# value of rank order statistics". The alternative method is known as +# "R7", "PERCENTILE.INC", or "mode of rank order statistics". + +# For sample data where there is a positive probability for values +# beyond the range of the data, the R6 exclusive method is a +# reasonable choice. Consider a random sample of nine values from a +# population with a uniform distribution from 0.0 to 1.0. The +# distribution of the third ranked sample point is described by +# betavariate(alpha=3, beta=7) which has mode=0.250, median=0.286, and +# mean=0.300. Only the latter (which corresponds with R6) gives the +# desired cut point with 30% of the population falling below that +# value, making it comparable to a result from an inv_cdf() function. +# The R6 exclusive method is also idempotent. + +# For describing population data where the end points are known to +# be included in the data, the R7 inclusive method is a reasonable +# choice. Instead of the mean, it uses the mode of the beta +# distribution for the interior points. Per Hyndman & Fan, "One nice +# property is that the vertices of Q7(p) divide the range into n - 1 +# intervals, and exactly 100p% of the intervals lie to the left of +# Q7(p) and 100(1 - p)% of the intervals lie to the right of Q7(p)." + +# If needed, other methods could be added. However, for now, the +# position is that fewer options make for easier choices and that +# external packages can be used for anything more advanced. + +def quantiles(data, *, n=4, method='exclusive'): + """Divide *data* into *n* continuous intervals with equal probability. + + Returns a list of (n - 1) cut points separating the intervals. + + Set *n* to 4 for quartiles (the default). Set *n* to 10 for deciles. + Set *n* to 100 for percentiles which gives the 99 cuts points that + separate *data* in to 100 equal sized groups. + + The *data* can be any iterable containing sample. + The cut points are linearly interpolated between data points. + + If *method* is set to *inclusive*, *data* is treated as population + data. The minimum value is treated as the 0th percentile and the + maximum value is treated as the 100th percentile. + + """ + if n < 1: + raise StatisticsError('n must be at least 1') + + data = sorted(data) + + ld = len(data) + if ld < 2: + if ld == 1: + return data * (n - 1) + raise StatisticsError('must have at least one data point') + + if method == 'inclusive': + m = ld - 1 + result = [] + for i in range(1, n): + j, delta = divmod(i * m, n) + interpolated = (data[j] * (n - delta) + data[j + 1] * delta) / n + result.append(interpolated) + return result + + if method == 'exclusive': + m = ld + 1 + result = [] + for i in range(1, n): + j = i * m // n # rescale i to m/n + j = 1 if j < 1 else ld-1 if j > ld-1 else j # clamp to 1 .. ld-1 + delta = i*m - j*n # exact integer math + interpolated = (data[j - 1] * (n - delta) + data[j] * delta) / n + result.append(interpolated) + return result + + raise ValueError(f'Unknown method: {method!r}') + + +## Normal Distribution ##################################################### + +class NormalDist: + "Normal distribution of a random variable" + # https://en.wikipedia.org/wiki/Normal_distribution + # https://en.wikipedia.org/wiki/Variance#Properties + + __slots__ = { + '_mu': 'Arithmetic mean of a normal distribution', + '_sigma': 'Standard deviation of a normal distribution', + } + + def __init__(self, mu=0.0, sigma=1.0): + "NormalDist where mu is the mean and sigma is the standard deviation." + if sigma < 0.0: + raise StatisticsError('sigma must be non-negative') + self._mu = float(mu) + self._sigma = float(sigma) + + @classmethod + def from_samples(cls, data): + "Make a normal distribution instance from sample data." + return cls(*_mean_stdev(data)) + + def samples(self, n, *, seed=None): + "Generate *n* samples for a given mean and standard deviation." + rnd = random.random if seed is None else random.Random(seed).random + inv_cdf = _normal_dist_inv_cdf + mu = self._mu + sigma = self._sigma + return [inv_cdf(rnd(), mu, sigma) for _ in repeat(None, n)] + + def pdf(self, x): + "Probability density function. P(x <= X < x+dx) / dx" + variance = self._sigma * self._sigma + if not variance: + raise StatisticsError('pdf() not defined when sigma is zero') + diff = x - self._mu + return exp(diff * diff / (-2.0 * variance)) / sqrt(tau * variance) + + def cdf(self, x): + "Cumulative distribution function. P(X <= x)" + if not self._sigma: + raise StatisticsError('cdf() not defined when sigma is zero') + return 0.5 * erfc((self._mu - x) / (self._sigma * _SQRT2)) + + def inv_cdf(self, p): + """Inverse cumulative distribution function. x : P(X <= x) = p + + Finds the value of the random variable such that the probability of + the variable being less than or equal to that value equals the given + probability. + + This function is also called the percent point function or quantile + function. + """ + if p <= 0.0 or p >= 1.0: + raise StatisticsError('p must be in the range 0.0 < p < 1.0') + return _normal_dist_inv_cdf(p, self._mu, self._sigma) + + def quantiles(self, n=4): + """Divide into *n* continuous intervals with equal probability. + + Returns a list of (n - 1) cut points separating the intervals. + + Set *n* to 4 for quartiles (the default). Set *n* to 10 for deciles. + Set *n* to 100 for percentiles which gives the 99 cuts points that + separate the normal distribution in to 100 equal sized groups. + """ + return [self.inv_cdf(i / n) for i in range(1, n)] + + def overlap(self, other): + """Compute the overlapping coefficient (OVL) between two normal distributions. + + Measures the agreement between two normal probability distributions. + Returns a value between 0.0 and 1.0 giving the overlapping area in + the two underlying probability density functions. + + >>> N1 = NormalDist(2.4, 1.6) + >>> N2 = NormalDist(3.2, 2.0) + >>> N1.overlap(N2) + 0.8035050657330205 + """ + # See: "The overlapping coefficient as a measure of agreement between + # probability distributions and point estimation of the overlap of two + # normal densities" -- Henry F. Inman and Edwin L. Bradley Jr + # http://dx.doi.org/10.1080/03610928908830127 + if not isinstance(other, NormalDist): + raise TypeError('Expected another NormalDist instance') + X, Y = self, other + if (Y._sigma, Y._mu) < (X._sigma, X._mu): # sort to assure commutativity + X, Y = Y, X + X_var, Y_var = X.variance, Y.variance + if not X_var or not Y_var: + raise StatisticsError('overlap() not defined when sigma is zero') + dv = Y_var - X_var + dm = fabs(Y._mu - X._mu) + if not dv: + return erfc(dm / (2.0 * X._sigma * _SQRT2)) + a = X._mu * Y_var - Y._mu * X_var + b = X._sigma * Y._sigma * sqrt(dm * dm + dv * log(Y_var / X_var)) + x1 = (a + b) / dv + x2 = (a - b) / dv + return 1.0 - (fabs(Y.cdf(x1) - X.cdf(x1)) + fabs(Y.cdf(x2) - X.cdf(x2))) + + def zscore(self, x): + """Compute the Standard Score. (x - mean) / stdev + + Describes *x* in terms of the number of standard deviations + above or below the mean of the normal distribution. + """ + # https://www.statisticshowto.com/probability-and-statistics/z-score/ + if not self._sigma: + raise StatisticsError('zscore() not defined when sigma is zero') + return (x - self._mu) / self._sigma + + @property + def mean(self): + "Arithmetic mean of the normal distribution." + return self._mu + + @property + def median(self): + "Return the median of the normal distribution" + return self._mu + + @property + def mode(self): + """Return the mode of the normal distribution + + The mode is the value x where which the probability density + function (pdf) takes its maximum value. + """ + return self._mu + + @property + def stdev(self): + "Standard deviation of the normal distribution." + return self._sigma + + @property + def variance(self): + "Square of the standard deviation." + return self._sigma * self._sigma + + def __add__(x1, x2): + """Add a constant or another NormalDist instance. + + If *other* is a constant, translate mu by the constant, + leaving sigma unchanged. + + If *other* is a NormalDist, add both the means and the variances. + Mathematically, this works only if the two distributions are + independent or if they are jointly normally distributed. + """ + if isinstance(x2, NormalDist): + return NormalDist(x1._mu + x2._mu, hypot(x1._sigma, x2._sigma)) + return NormalDist(x1._mu + x2, x1._sigma) + + def __sub__(x1, x2): + """Subtract a constant or another NormalDist instance. + + If *other* is a constant, translate by the constant mu, + leaving sigma unchanged. + + If *other* is a NormalDist, subtract the means and add the variances. + Mathematically, this works only if the two distributions are + independent or if they are jointly normally distributed. + """ + if isinstance(x2, NormalDist): + return NormalDist(x1._mu - x2._mu, hypot(x1._sigma, x2._sigma)) + return NormalDist(x1._mu - x2, x1._sigma) + + def __mul__(x1, x2): + """Multiply both mu and sigma by a constant. + + Used for rescaling, perhaps to change measurement units. + Sigma is scaled with the absolute value of the constant. + """ + return NormalDist(x1._mu * x2, x1._sigma * fabs(x2)) + + def __truediv__(x1, x2): + """Divide both mu and sigma by a constant. + + Used for rescaling, perhaps to change measurement units. + Sigma is scaled with the absolute value of the constant. + """ + return NormalDist(x1._mu / x2, x1._sigma / fabs(x2)) + + def __pos__(x1): + "Return a copy of the instance." + return NormalDist(x1._mu, x1._sigma) + + def __neg__(x1): + "Negates mu while keeping sigma the same." + return NormalDist(-x1._mu, x1._sigma) + + __radd__ = __add__ + + def __rsub__(x1, x2): + "Subtract a NormalDist from a constant or another NormalDist." + return -(x1 - x2) + + __rmul__ = __mul__ + + def __eq__(x1, x2): + "Two NormalDist objects are equal if their mu and sigma are both equal." + if not isinstance(x2, NormalDist): + return NotImplemented + return x1._mu == x2._mu and x1._sigma == x2._sigma + + def __hash__(self): + "NormalDist objects hash equal if their mu and sigma are both equal." + return hash((self._mu, self._sigma)) + + def __repr__(self): + return f'{type(self).__name__}(mu={self._mu!r}, sigma={self._sigma!r})' + + def __getstate__(self): + return self._mu, self._sigma + + def __setstate__(self, state): + self._mu, self._sigma = state + + +## Private utilities ####################################################### + +def _sum(data): + """_sum(data) -> (type, sum, count) + + Return a high-precision sum of the given numeric data as a fraction, + together with the type to be converted to and the count of items. + + Examples + -------- + + >>> _sum([3, 2.25, 4.5, -0.5, 0.25]) + (, Fraction(19, 2), 5) + + Some sources of round-off error will be avoided: + + # Built-in sum returns zero. + >>> _sum([1e50, 1, -1e50] * 1000) + (, Fraction(1000, 1), 3000) + + Fractions and Decimals are also supported: + + >>> from fractions import Fraction as F + >>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)]) + (, Fraction(63, 20), 4) + + >>> from decimal import Decimal as D + >>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")] + >>> _sum(data) + (, Fraction(6963, 10000), 4) + + Mixed types are currently treated as an error, except that int is + allowed. + + """ + count = 0 + types = set() + types_add = types.add + partials = {} + partials_get = partials.get + + for typ, values in groupby(data, type): + types_add(typ) + for n, d in map(_exact_ratio, values): + count += 1 + partials[d] = partials_get(d, 0) + n + + if None in partials: + # The sum will be a NAN or INF. We can ignore all the finite + # partials, and just look at this special one. + total = partials[None] + assert not _isfinite(total) + else: + # Sum all the partial sums using builtin sum. + total = sum(Fraction(n, d) for d, n in partials.items()) + + T = reduce(_coerce, types, int) # or raise TypeError + return (T, total, count) + + +def _ss(data, c=None): + """Return the exact mean and sum of square deviations of sequence data. + + Calculations are done in a single pass, allowing the input to be an iterator. + + If given *c* is used the mean; otherwise, it is calculated from the data. + Use the *c* argument with care, as it can lead to garbage results. + + """ + if c is not None: + T, ssd, count = _sum((d := x - c) * d for x in data) + return (T, ssd, c, count) + + count = 0 + types = set() + types_add = types.add + sx_partials = defaultdict(int) + sxx_partials = defaultdict(int) + + for typ, values in groupby(data, type): + types_add(typ) + for n, d in map(_exact_ratio, values): + count += 1 + sx_partials[d] += n + sxx_partials[d] += n * n + + if not count: + ssd = c = Fraction(0) + + elif None in sx_partials: + # The sum will be a NAN or INF. We can ignore all the finite + # partials, and just look at this special one. + ssd = c = sx_partials[None] + assert not _isfinite(ssd) + + else: + sx = sum(Fraction(n, d) for d, n in sx_partials.items()) + sxx = sum(Fraction(n, d*d) for d, n in sxx_partials.items()) + # This formula has poor numeric properties for floats, + # but with fractions it is exact. + ssd = (count * sxx - sx * sx) / count + c = sx / count + + T = reduce(_coerce, types, int) # or raise TypeError + return (T, ssd, c, count) + + +def _isfinite(x): + try: + return x.is_finite() # Likely a Decimal. + except AttributeError: + return math.isfinite(x) # Coerces to float first. + + +def _coerce(T, S): + """Coerce types T and S to a common type, or raise TypeError. + + Coercion rules are currently an implementation detail. See the CoerceTest + test class in test_statistics for details. + + """ + # See http://bugs.python.org/issue24068. + assert T is not bool, "initial type T is bool" + # If the types are the same, no need to coerce anything. Put this + # first, so that the usual case (no coercion needed) happens as soon + # as possible. + if T is S: return T + # Mixed int & other coerce to the other type. + if S is int or S is bool: return T + if T is int: return S + # If one is a (strict) subclass of the other, coerce to the subclass. + if issubclass(S, T): return S + if issubclass(T, S): return T + # Ints coerce to the other type. + if issubclass(T, int): return S + if issubclass(S, int): return T + # Mixed fraction & float coerces to float (or float subclass). + if issubclass(T, Fraction) and issubclass(S, float): + return S + if issubclass(T, float) and issubclass(S, Fraction): + return T + # Any other combination is disallowed. + msg = "don't know how to coerce %s and %s" + raise TypeError(msg % (T.__name__, S.__name__)) + + +def _exact_ratio(x): + """Return Real number x to exact (numerator, denominator) pair. + + >>> _exact_ratio(0.25) + (1, 4) + + x is expected to be an int, Fraction, Decimal or float. + + """ + try: + return x.as_integer_ratio() + except AttributeError: + pass + except (OverflowError, ValueError): + # float NAN or INF. + assert not _isfinite(x) + return (x, None) + + try: + # x may be an Integral ABC. + return (x.numerator, x.denominator) + except AttributeError: + msg = f"can't convert type '{type(x).__name__}' to numerator/denominator" + raise TypeError(msg) + + +def _convert(value, T): + """Convert value to given numeric type T.""" + if type(value) is T: + # This covers the cases where T is Fraction, or where value is + # a NAN or INF (Decimal or float). + return value + + if issubclass(T, int) and value.denominator != 1: + T = float + + try: + # FIXME: what do we do if this overflows? + return T(value) + except TypeError: + if issubclass(T, Decimal): + return T(value.numerator) / T(value.denominator) + else: + raise + + +def _fail_neg(values, errmsg='negative value'): + """Iterate over values, failing if any are less than zero.""" + for x in values: + if x < 0: + raise StatisticsError(errmsg) + yield x + + +def _rank(data, /, *, key=None, reverse=False, ties='average', start=1) -> list[float]: + """Rank order a dataset. The lowest value has rank 1. + + Ties are averaged so that equal values receive the same rank: + + >>> data = [31, 56, 31, 25, 75, 18] + >>> _rank(data) + [3.5, 5.0, 3.5, 2.0, 6.0, 1.0] + + The operation is idempotent: + + >>> _rank([3.5, 5.0, 3.5, 2.0, 6.0, 1.0]) + [3.5, 5.0, 3.5, 2.0, 6.0, 1.0] + + It is possible to rank the data in reverse order so that the + highest value has rank 1. Also, a key-function can extract + the field to be ranked: + + >>> goals = [('eagles', 45), ('bears', 48), ('lions', 44)] + >>> _rank(goals, key=itemgetter(1), reverse=True) + [2.0, 1.0, 3.0] + + Ranks are conventionally numbered starting from one; however, + setting *start* to zero allows the ranks to be used as array indices: + + >>> prize = ['Gold', 'Silver', 'Bronze', 'Certificate'] + >>> scores = [8.1, 7.3, 9.4, 8.3] + >>> [prize[int(i)] for i in _rank(scores, start=0, reverse=True)] + ['Bronze', 'Certificate', 'Gold', 'Silver'] + + """ + # If this function becomes public at some point, more thought + # needs to be given to the signature. A list of ints is + # plausible when ties is "min" or "max". When ties is "average", + # either list[float] or list[Fraction] is plausible. + + # Default handling of ties matches scipy.stats.mstats.spearmanr. + if ties != 'average': + raise ValueError(f'Unknown tie resolution method: {ties!r}') + if key is not None: + data = map(key, data) + val_pos = sorted(zip(data, count()), reverse=reverse) + i = start - 1 + result = [0] * len(val_pos) + for _, g in groupby(val_pos, key=itemgetter(0)): + group = list(g) + size = len(group) + rank = i + (size + 1) / 2 + for value, orig_pos in group: + result[orig_pos] = rank + i += size + return result + + +def _integer_sqrt_of_frac_rto(n: int, m: int) -> int: + """Square root of n/m, rounded to the nearest integer using round-to-odd.""" + # Reference: https://www.lri.fr/~melquion/doc/05-imacs17_1-expose.pdf + a = math.isqrt(n // m) + return a | (a*a*m != n) + + +# For 53 bit precision floats, the bit width used in +# _float_sqrt_of_frac() is 109. +_sqrt_bit_width: int = 2 * sys.float_info.mant_dig + 3 + + +def _float_sqrt_of_frac(n: int, m: int) -> float: + """Square root of n/m as a float, correctly rounded.""" + # See principle and proof sketch at: https://bugs.python.org/msg407078 + q = (n.bit_length() - m.bit_length() - _sqrt_bit_width) // 2 + if q >= 0: + numerator = _integer_sqrt_of_frac_rto(n, m << 2 * q) << q + denominator = 1 + else: + numerator = _integer_sqrt_of_frac_rto(n << -2 * q, m) + denominator = 1 << -q + return numerator / denominator # Convert to float + + +def _decimal_sqrt_of_frac(n: int, m: int) -> Decimal: + """Square root of n/m as a Decimal, correctly rounded.""" + # Premise: For decimal, computing (n/m).sqrt() can be off + # by 1 ulp from the correctly rounded result. + # Method: Check the result, moving up or down a step if needed. + if n <= 0: + if not n: + return Decimal('0.0') + n, m = -n, -m + + root = (Decimal(n) / Decimal(m)).sqrt() + nr, dr = root.as_integer_ratio() + + plus = root.next_plus() + np, dp = plus.as_integer_ratio() + # test: n / m > ((root + plus) / 2) ** 2 + if 4 * n * (dr*dp)**2 > m * (dr*np + dp*nr)**2: + return plus + + minus = root.next_minus() + nm, dm = minus.as_integer_ratio() + # test: n / m < ((root + minus) / 2) ** 2 + if 4 * n * (dr*dm)**2 < m * (dr*nm + dm*nr)**2: + return minus + + return root + + +def _mean_stdev(data): + """In one pass, compute the mean and sample standard deviation as floats.""" + T, ss, xbar, n = _ss(data) + if n < 2: + raise StatisticsError('stdev requires at least two data points') + mss = ss / (n - 1) + try: + return float(xbar), _float_sqrt_of_frac(mss.numerator, mss.denominator) + except AttributeError: + # Handle Nans and Infs gracefully + return float(xbar), float(xbar) / float(ss) + + +def _sqrtprod(x: float, y: float) -> float: + "Return sqrt(x * y) computed with improved accuracy and without overflow/underflow." + + h = sqrt(x * y) + + if not isfinite(h): + if isinf(h) and not isinf(x) and not isinf(y): + # Finite inputs overflowed, so scale down, and recompute. + scale = 2.0 ** -512 # sqrt(1 / sys.float_info.max) + return _sqrtprod(scale * x, scale * y) / scale + return h + + if not h: + if x and y: + # Non-zero inputs underflowed, so scale up, and recompute. + # Scale: 1 / sqrt(sys.float_info.min * sys.float_info.epsilon) + scale = 2.0 ** 537 + return _sqrtprod(scale * x, scale * y) / scale + return h + + # Improve accuracy with a differential correction. + # https://www.wolframalpha.com/input/?i=Maclaurin+series+sqrt%28h**2+%2B+x%29+at+x%3D0 + d = sumprod((x, h), (y, -h)) + return h + d / (2.0 * h) + + +def _normal_dist_inv_cdf(p, mu, sigma): + # There is no closed-form solution to the inverse CDF for the normal + # distribution, so we use a rational approximation instead: + # Wichura, M.J. (1988). "Algorithm AS241: The Percentage Points of the + # Normal Distribution". Applied Statistics. Blackwell Publishing. 37 + # (3): 477–484. doi:10.2307/2347330. JSTOR 2347330. + q = p - 0.5 + + if fabs(q) <= 0.425: + r = 0.180625 - q * q + # Hash sum: 55.88319_28806_14901_4439 + num = (((((((2.50908_09287_30122_6727e+3 * r + + 3.34305_75583_58812_8105e+4) * r + + 6.72657_70927_00870_0853e+4) * r + + 4.59219_53931_54987_1457e+4) * r + + 1.37316_93765_50946_1125e+4) * r + + 1.97159_09503_06551_4427e+3) * r + + 1.33141_66789_17843_7745e+2) * r + + 3.38713_28727_96366_6080e+0) * q + den = (((((((5.22649_52788_52854_5610e+3 * r + + 2.87290_85735_72194_2674e+4) * r + + 3.93078_95800_09271_0610e+4) * r + + 2.12137_94301_58659_5867e+4) * r + + 5.39419_60214_24751_1077e+3) * r + + 6.87187_00749_20579_0830e+2) * r + + 4.23133_30701_60091_1252e+1) * r + + 1.0) + x = num / den + return mu + (x * sigma) + + r = p if q <= 0.0 else 1.0 - p + r = sqrt(-log(r)) + if r <= 5.0: + r = r - 1.6 + # Hash sum: 49.33206_50330_16102_89036 + num = (((((((7.74545_01427_83414_07640e-4 * r + + 2.27238_44989_26918_45833e-2) * r + + 2.41780_72517_74506_11770e-1) * r + + 1.27045_82524_52368_38258e+0) * r + + 3.64784_83247_63204_60504e+0) * r + + 5.76949_72214_60691_40550e+0) * r + + 4.63033_78461_56545_29590e+0) * r + + 1.42343_71107_49683_57734e+0) + den = (((((((1.05075_00716_44416_84324e-9 * r + + 5.47593_80849_95344_94600e-4) * r + + 1.51986_66563_61645_71966e-2) * r + + 1.48103_97642_74800_74590e-1) * r + + 6.89767_33498_51000_04550e-1) * r + + 1.67638_48301_83803_84940e+0) * r + + 2.05319_16266_37758_82187e+0) * r + + 1.0) + else: + r = r - 5.0 + # Hash sum: 47.52583_31754_92896_71629 + num = (((((((2.01033_43992_92288_13265e-7 * r + + 2.71155_55687_43487_57815e-5) * r + + 1.24266_09473_88078_43860e-3) * r + + 2.65321_89526_57612_30930e-2) * r + + 2.96560_57182_85048_91230e-1) * r + + 1.78482_65399_17291_33580e+0) * r + + 5.46378_49111_64114_36990e+0) * r + + 6.65790_46435_01103_77720e+0) + den = (((((((2.04426_31033_89939_78564e-15 * r + + 1.42151_17583_16445_88870e-7) * r + + 1.84631_83175_10054_68180e-5) * r + + 7.86869_13114_56132_59100e-4) * r + + 1.48753_61290_85061_48525e-2) * r + + 1.36929_88092_27358_05310e-1) * r + + 5.99832_20655_58879_37690e-1) * r + + 1.0) + + x = num / den + if q < 0.0: + x = -x + + return mu + (x * sigma) + + +# If available, use C implementation +try: + from _statistics import _normal_dist_inv_cdf +except ImportError: + pass diff --git a/Python314_4_x64_Template/Lib/string/__init__.py b/Python314_4_x64_Template/Lib/string/__init__.py new file mode 100644 index 00000000..eab5067c --- /dev/null +++ b/Python314_4_x64_Template/Lib/string/__init__.py @@ -0,0 +1,325 @@ +"""A collection of string constants. + +Public module variables: + +whitespace -- a string containing all ASCII whitespace +ascii_lowercase -- a string containing all ASCII lowercase letters +ascii_uppercase -- a string containing all ASCII uppercase letters +ascii_letters -- a string containing all ASCII letters +digits -- a string containing all ASCII decimal digits +hexdigits -- a string containing all ASCII hexadecimal digits +octdigits -- a string containing all ASCII octal digits +punctuation -- a string containing all ASCII punctuation characters +printable -- a string containing all ASCII characters considered printable + +""" + +__all__ = ["ascii_letters", "ascii_lowercase", "ascii_uppercase", "capwords", + "digits", "hexdigits", "octdigits", "printable", "punctuation", + "whitespace", "Formatter", "Template"] + +import _string + +# Some strings for ctype-style character classification +whitespace = ' \t\n\r\v\f' +ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz' +ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' +ascii_letters = ascii_lowercase + ascii_uppercase +digits = '0123456789' +hexdigits = digits + 'abcdef' + 'ABCDEF' +octdigits = '01234567' +punctuation = r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~""" +printable = digits + ascii_letters + punctuation + whitespace + +# Functions which aren't available as string methods. + +# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def". +def capwords(s, sep=None): + """capwords(s [,sep]) -> string + + Split the argument into words using split, capitalize each + word using capitalize, and join the capitalized words using + join. If the optional second argument sep is absent or None, + runs of whitespace characters are replaced by a single space + and leading and trailing whitespace are removed, otherwise + sep is used to split and join the words. + + """ + return (sep or ' ').join(map(str.capitalize, s.split(sep))) + + +#################################################################### +_sentinel_dict = {} + + +class _TemplatePattern: + # This descriptor is overwritten in ``Template._compile_pattern()``. + def __get__(self, instance, cls=None): + if cls is None: + return self + return cls._compile_pattern() +_TemplatePattern = _TemplatePattern() + + +class Template: + """A string class for supporting $-substitutions.""" + + delimiter = '$' + # r'[a-z]' matches to non-ASCII letters when used with IGNORECASE, but + # without the ASCII flag. We can't add re.ASCII to flags because of + # backward compatibility. So we use the ?a local flag and [a-z] pattern. + # See https://bugs.python.org/issue31672 + idpattern = r'(?a:[_a-z][_a-z0-9]*)' + braceidpattern = None + flags = None # default: re.IGNORECASE + + pattern = _TemplatePattern # use a descriptor to compile the pattern + + def __init_subclass__(cls): + super().__init_subclass__() + cls._compile_pattern() + + @classmethod + def _compile_pattern(cls): + import re # deferred import, for performance + + pattern = cls.__dict__.get('pattern', _TemplatePattern) + if pattern is _TemplatePattern: + delim = re.escape(cls.delimiter) + id = cls.idpattern + bid = cls.braceidpattern or cls.idpattern + pattern = fr""" + {delim}(?: + (?P{delim}) | # Escape sequence of two delimiters + (?P{id}) | # delimiter and a Python identifier + {{(?P{bid})}} | # delimiter and a braced identifier + (?P) # Other ill-formed delimiter exprs + ) + """ + if cls.flags is None: + cls.flags = re.IGNORECASE + pat = cls.pattern = re.compile(pattern, cls.flags | re.VERBOSE) + return pat + + def __init__(self, template): + self.template = template + + # Search for $$, $identifier, ${identifier}, and any bare $'s + + def _invalid(self, mo): + i = mo.start('invalid') + lines = self.template[:i].splitlines(keepends=True) + if not lines: + colno = 1 + lineno = 1 + else: + colno = i - len(''.join(lines[:-1])) + lineno = len(lines) + raise ValueError('Invalid placeholder in string: line %d, col %d' % + (lineno, colno)) + + def substitute(self, mapping=_sentinel_dict, /, **kws): + if mapping is _sentinel_dict: + mapping = kws + elif kws: + from collections import ChainMap + mapping = ChainMap(kws, mapping) + # Helper function for .sub() + def convert(mo): + # Check the most common path first. + named = mo.group('named') or mo.group('braced') + if named is not None: + return str(mapping[named]) + if mo.group('escaped') is not None: + return self.delimiter + if mo.group('invalid') is not None: + self._invalid(mo) + raise ValueError('Unrecognized named group in pattern', + self.pattern) + return self.pattern.sub(convert, self.template) + + def safe_substitute(self, mapping=_sentinel_dict, /, **kws): + if mapping is _sentinel_dict: + mapping = kws + elif kws: + from collections import ChainMap + mapping = ChainMap(kws, mapping) + # Helper function for .sub() + def convert(mo): + named = mo.group('named') or mo.group('braced') + if named is not None: + try: + return str(mapping[named]) + except KeyError: + return mo.group() + if mo.group('escaped') is not None: + return self.delimiter + if mo.group('invalid') is not None: + return mo.group() + raise ValueError('Unrecognized named group in pattern', + self.pattern) + return self.pattern.sub(convert, self.template) + + def is_valid(self): + for mo in self.pattern.finditer(self.template): + if mo.group('invalid') is not None: + return False + if (mo.group('named') is None + and mo.group('braced') is None + and mo.group('escaped') is None): + # If all the groups are None, there must be + # another group we're not expecting + raise ValueError('Unrecognized named group in pattern', + self.pattern) + return True + + def get_identifiers(self): + ids = [] + for mo in self.pattern.finditer(self.template): + named = mo.group('named') or mo.group('braced') + if named is not None and named not in ids: + # add a named group only the first time it appears + ids.append(named) + elif (named is None + and mo.group('invalid') is None + and mo.group('escaped') is None): + # If all the groups are None, there must be + # another group we're not expecting + raise ValueError('Unrecognized named group in pattern', + self.pattern) + return ids + + +######################################################################## +# the Formatter class +# see PEP 3101 for details and purpose of this class + +# The hard parts are reused from the C implementation. They're exposed as "_" +# prefixed methods of str. + +# The overall parser is implemented in _string.formatter_parser. +# The field name parser is implemented in _string.formatter_field_name_split + +class Formatter: + def format(self, format_string, /, *args, **kwargs): + return self.vformat(format_string, args, kwargs) + + def vformat(self, format_string, args, kwargs): + used_args = set() + result, _ = self._vformat(format_string, args, kwargs, used_args, 2) + self.check_unused_args(used_args, args, kwargs) + return result + + def _vformat(self, format_string, args, kwargs, used_args, recursion_depth, + auto_arg_index=0): + if recursion_depth < 0: + raise ValueError('Max string recursion exceeded') + result = [] + for literal_text, field_name, format_spec, conversion in \ + self.parse(format_string): + + # output the literal text + if literal_text: + result.append(literal_text) + + # if there's a field, output it + if field_name is not None: + # this is some markup, find the object and do + # the formatting + + # handle arg indexing when empty field first parts are given. + field_first, _ = _string.formatter_field_name_split(field_name) + if field_first == '': + if auto_arg_index is False: + raise ValueError('cannot switch from manual field ' + 'specification to automatic field ' + 'numbering') + field_name = str(auto_arg_index) + field_name + auto_arg_index += 1 + elif isinstance(field_first, int): + if auto_arg_index: + raise ValueError('cannot switch from automatic field ' + 'numbering to manual field ' + 'specification') + # disable auto arg incrementing, if it gets + # used later on, then an exception will be raised + auto_arg_index = False + + # given the field_name, find the object it references + # and the argument it came from + obj, arg_used = self.get_field(field_name, args, kwargs) + used_args.add(arg_used) + + # do any conversion on the resulting object + obj = self.convert_field(obj, conversion) + + # expand the format spec, if needed + format_spec, auto_arg_index = self._vformat( + format_spec, args, kwargs, + used_args, recursion_depth-1, + auto_arg_index=auto_arg_index) + + # format the object and append to the result + result.append(self.format_field(obj, format_spec)) + + return ''.join(result), auto_arg_index + + + def get_value(self, key, args, kwargs): + if isinstance(key, int): + return args[key] + else: + return kwargs[key] + + + def check_unused_args(self, used_args, args, kwargs): + pass + + + def format_field(self, value, format_spec): + return format(value, format_spec) + + + def convert_field(self, value, conversion): + # do any conversion on the resulting object + if conversion is None: + return value + elif conversion == 's': + return str(value) + elif conversion == 'r': + return repr(value) + elif conversion == 'a': + return ascii(value) + raise ValueError("Unknown conversion specifier {0!s}".format(conversion)) + + + # returns an iterable that contains tuples of the form: + # (literal_text, field_name, format_spec, conversion) + # literal_text can be zero length + # field_name can be None, in which case there's no + # object to format and output + # if field_name is not None, it is looked up, formatted + # with format_spec and conversion and then used + def parse(self, format_string): + return _string.formatter_parser(format_string) + + + # given a field_name, find the object it references. + # field_name: the field being looked up, e.g. "0.name" + # or "lookup[3]" + # used_args: a set of which args have been used + # args, kwargs: as passed in to vformat + def get_field(self, field_name, args, kwargs): + first, rest = _string.formatter_field_name_split(field_name) + + obj = self.get_value(first, args, kwargs) + + # loop through the rest of the field_name, doing + # getattr or getitem as needed + for is_attr, i in rest: + if is_attr: + obj = getattr(obj, i) + else: + obj = obj[i] + + return obj, first diff --git a/Python314_4_x64_Template/Lib/string/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/string/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..5281c918 Binary files /dev/null and b/Python314_4_x64_Template/Lib/string/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/string/templatelib.py b/Python314_4_x64_Template/Lib/string/templatelib.py new file mode 100644 index 00000000..81648724 --- /dev/null +++ b/Python314_4_x64_Template/Lib/string/templatelib.py @@ -0,0 +1,33 @@ +"""Support for template string literals (t-strings).""" + +t = t"{0}" +Template = type(t) +Interpolation = type(t.interpolations[0]) +del t + +def convert(obj, /, conversion): + """Convert *obj* using formatted string literal semantics.""" + if conversion is None: + return obj + if conversion == 'r': + return repr(obj) + if conversion == 's': + return str(obj) + if conversion == 'a': + return ascii(obj) + raise ValueError(f'invalid conversion specifier: {conversion}') + +def _template_unpickle(*args): + import itertools + + if len(args) != 2: + raise ValueError('Template expects tuple of length 2 to unpickle') + + strings, interpolations = args + parts = [] + for string, interpolation in itertools.zip_longest(strings, interpolations): + if string is not None: + parts.append(string) + if interpolation is not None: + parts.append(interpolation) + return Template(*parts) diff --git a/Python313_13_x64_Template/Lib/stringprep.py b/Python314_4_x64_Template/Lib/stringprep.py similarity index 100% rename from Python313_13_x64_Template/Lib/stringprep.py rename to Python314_4_x64_Template/Lib/stringprep.py diff --git a/Python314_4_x64_Template/Lib/struct.py b/Python314_4_x64_Template/Lib/struct.py new file mode 100644 index 00000000..ff98e8c4 --- /dev/null +++ b/Python314_4_x64_Template/Lib/struct.py @@ -0,0 +1,15 @@ +__all__ = [ + # Functions + 'calcsize', 'pack', 'pack_into', 'unpack', 'unpack_from', + 'iter_unpack', + + # Classes + 'Struct', + + # Exceptions + 'error' + ] + +from _struct import * +from _struct import _clearcache # noqa: F401 +from _struct import __doc__ # noqa: F401 diff --git a/Python314_4_x64_Template/Lib/subprocess.py b/Python314_4_x64_Template/Lib/subprocess.py new file mode 100644 index 00000000..52b7b711 --- /dev/null +++ b/Python314_4_x64_Template/Lib/subprocess.py @@ -0,0 +1,2257 @@ +# subprocess - Subprocesses with accessible I/O streams +# +# For more information about this module, see PEP 324. +# +# Copyright (c) 2003-2005 by Peter Astrand +# +# Licensed to PSF under a Contributor Agreement. + +r"""Subprocesses with accessible I/O streams + +This module allows you to spawn processes, connect to their +input/output/error pipes, and obtain their return codes. + +For a complete description of this module see the Python documentation. + +Main API +======== +run(...): Runs a command, waits for it to complete, then returns a + CompletedProcess instance. +Popen(...): A class for flexibly executing a command in a new process + +Constants +--------- +DEVNULL: Special value that indicates that os.devnull should be used +PIPE: Special value that indicates a pipe should be created +STDOUT: Special value that indicates that stderr should go to stdout + + +Older API +========= +call(...): Runs a command, waits for it to complete, then returns + the return code. +check_call(...): Same as call() but raises CalledProcessError() + if return code is not 0 +check_output(...): Same as check_call() but returns the contents of + stdout instead of a return code +getoutput(...): Runs a command in the shell, waits for it to complete, + then returns the output +getstatusoutput(...): Runs a command in the shell, waits for it to complete, + then returns a (exitcode, output) tuple +""" + +import builtins +import errno +import io +import locale +import os +import time +import signal +import sys +import threading +import warnings +import contextlib +from time import monotonic as _time +import types + +try: + import fcntl +except ImportError: + fcntl = None + + +__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput", + "getoutput", "check_output", "run", "CalledProcessError", "DEVNULL", + "SubprocessError", "TimeoutExpired", "CompletedProcess"] + # NOTE: We intentionally exclude list2cmdline as it is + # considered an internal implementation detail. issue10838. + +# use presence of msvcrt to detect Windows-like platforms (see bpo-8110) +try: + import msvcrt +except ModuleNotFoundError: + _mswindows = False +else: + _mswindows = True + +# some platforms do not support subprocesses +_can_fork_exec = sys.platform not in {"emscripten", "wasi", "ios", "tvos", "watchos"} + +if _mswindows: + import _winapi + from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP, # noqa: F401 + STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, + STD_ERROR_HANDLE, SW_HIDE, + STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW, + STARTF_FORCEONFEEDBACK, STARTF_FORCEOFFFEEDBACK, + ABOVE_NORMAL_PRIORITY_CLASS, BELOW_NORMAL_PRIORITY_CLASS, + HIGH_PRIORITY_CLASS, IDLE_PRIORITY_CLASS, + NORMAL_PRIORITY_CLASS, REALTIME_PRIORITY_CLASS, + CREATE_NO_WINDOW, DETACHED_PROCESS, + CREATE_DEFAULT_ERROR_MODE, CREATE_BREAKAWAY_FROM_JOB) + + __all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP", + "STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE", + "STD_ERROR_HANDLE", "SW_HIDE", + "STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW", + "STARTF_FORCEONFEEDBACK", "STARTF_FORCEOFFFEEDBACK", + "STARTUPINFO", + "ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS", + "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS", + "NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS", + "CREATE_NO_WINDOW", "DETACHED_PROCESS", + "CREATE_DEFAULT_ERROR_MODE", "CREATE_BREAKAWAY_FROM_JOB"]) +else: + if _can_fork_exec: + from _posixsubprocess import fork_exec as _fork_exec + # used in methods that are called by __del__ + class _del_safe: + waitpid = os.waitpid + waitstatus_to_exitcode = os.waitstatus_to_exitcode + WIFSTOPPED = os.WIFSTOPPED + WSTOPSIG = os.WSTOPSIG + WNOHANG = os.WNOHANG + ECHILD = errno.ECHILD + else: + class _del_safe: + waitpid = None + waitstatus_to_exitcode = None + WIFSTOPPED = None + WSTOPSIG = None + WNOHANG = None + ECHILD = errno.ECHILD + + import select + import selectors + + +# Exception classes used by this module. +class SubprocessError(Exception): pass + + +class CalledProcessError(SubprocessError): + """Raised when run() is called with check=True and the process + returns a non-zero exit status. + + Attributes: + cmd, returncode, stdout, stderr, output + """ + def __init__(self, returncode, cmd, output=None, stderr=None): + self.returncode = returncode + self.cmd = cmd + self.output = output + self.stderr = stderr + + def __str__(self): + if self.returncode and self.returncode < 0: + try: + return "Command '%s' died with %r." % ( + self.cmd, signal.Signals(-self.returncode)) + except ValueError: + return "Command '%s' died with unknown signal %d." % ( + self.cmd, -self.returncode) + else: + return "Command '%s' returned non-zero exit status %d." % ( + self.cmd, self.returncode) + + @property + def stdout(self): + """Alias for output attribute, to match stderr""" + return self.output + + @stdout.setter + def stdout(self, value): + # There's no obvious reason to set this, but allow it anyway so + # .stdout is a transparent alias for .output + self.output = value + + +class TimeoutExpired(SubprocessError): + """This exception is raised when the timeout expires while waiting for a + child process. + + Attributes: + cmd, output, stdout, stderr, timeout + """ + def __init__(self, cmd, timeout, output=None, stderr=None): + self.cmd = cmd + self.timeout = timeout + self.output = output + self.stderr = stderr + + def __str__(self): + return ("Command '%s' timed out after %s seconds" % + (self.cmd, self.timeout)) + + @property + def stdout(self): + return self.output + + @stdout.setter + def stdout(self, value): + # There's no obvious reason to set this, but allow it anyway so + # .stdout is a transparent alias for .output + self.output = value + + +if _mswindows: + class STARTUPINFO: + def __init__(self, *, dwFlags=0, hStdInput=None, hStdOutput=None, + hStdError=None, wShowWindow=0, lpAttributeList=None): + self.dwFlags = dwFlags + self.hStdInput = hStdInput + self.hStdOutput = hStdOutput + self.hStdError = hStdError + self.wShowWindow = wShowWindow + self.lpAttributeList = lpAttributeList or {"handle_list": []} + + def copy(self): + attr_list = self.lpAttributeList.copy() + if 'handle_list' in attr_list: + attr_list['handle_list'] = list(attr_list['handle_list']) + + return STARTUPINFO(dwFlags=self.dwFlags, + hStdInput=self.hStdInput, + hStdOutput=self.hStdOutput, + hStdError=self.hStdError, + wShowWindow=self.wShowWindow, + lpAttributeList=attr_list) + + + class Handle(int): + closed = False + + def Close(self, CloseHandle=_winapi.CloseHandle): + if not self.closed: + self.closed = True + CloseHandle(self) + + def Detach(self): + if not self.closed: + self.closed = True + return int(self) + raise ValueError("already closed") + + def __repr__(self): + return "%s(%d)" % (self.__class__.__name__, int(self)) + + __del__ = Close +else: + # When select or poll has indicated that the file is writable, + # we can write up to _PIPE_BUF bytes without risk of blocking. + # POSIX defines PIPE_BUF as >= 512. + _PIPE_BUF = getattr(select, 'PIPE_BUF', 512) + + # poll/select have the advantage of not requiring any extra file + # descriptor, contrarily to epoll/kqueue (also, they require a single + # syscall). + if hasattr(selectors, 'PollSelector'): + _PopenSelector = selectors.PollSelector + else: + _PopenSelector = selectors.SelectSelector + + +if _mswindows: + # On Windows we just need to close `Popen._handle` when we no longer need + # it, so that the kernel can free it. `Popen._handle` gets closed + # implicitly when the `Popen` instance is finalized (see `Handle.__del__`, + # which is calling `CloseHandle` as requested in [1]), so there is nothing + # for `_cleanup` to do. + # + # [1] https://docs.microsoft.com/en-us/windows/desktop/ProcThread/ + # creating-processes + _active = None + + def _cleanup(): + pass +else: + # This lists holds Popen instances for which the underlying process had not + # exited at the time its __del__ method got called: those processes are + # wait()ed for synchronously from _cleanup() when a new Popen object is + # created, to avoid zombie processes. + _active = [] + + def _cleanup(): + if _active is None: + return + for inst in _active[:]: + res = inst._internal_poll(_deadstate=sys.maxsize) + if res is not None: + try: + _active.remove(inst) + except ValueError: + # This can happen if two threads create a new Popen instance. + # It's harmless that it was already removed, so ignore. + pass + +PIPE = -1 +STDOUT = -2 +DEVNULL = -3 + + +# XXX This function is only used by multiprocessing and the test suite, +# but it's here so that it can be imported when Python is compiled without +# threads. + +def _optim_args_from_interpreter_flags(): + """Return a list of command-line arguments reproducing the current + optimization settings in sys.flags.""" + args = [] + value = sys.flags.optimize + if value > 0: + args.append('-' + 'O' * value) + return args + + +def _args_from_interpreter_flags(): + """Return a list of command-line arguments reproducing the current + settings in sys.flags, sys.warnoptions and sys._xoptions.""" + flag_opt_map = { + 'debug': 'd', + # 'inspect': 'i', + # 'interactive': 'i', + 'dont_write_bytecode': 'B', + 'no_site': 'S', + 'verbose': 'v', + 'bytes_warning': 'b', + 'quiet': 'q', + # -O is handled in _optim_args_from_interpreter_flags() + } + args = _optim_args_from_interpreter_flags() + for flag, opt in flag_opt_map.items(): + v = getattr(sys.flags, flag) + if v > 0: + args.append('-' + opt * v) + + if sys.flags.isolated: + args.append('-I') + else: + if sys.flags.ignore_environment: + args.append('-E') + if sys.flags.no_user_site: + args.append('-s') + if sys.flags.safe_path: + args.append('-P') + + # -W options + warnopts = sys.warnoptions[:] + xoptions = getattr(sys, '_xoptions', {}) + bytes_warning = sys.flags.bytes_warning + dev_mode = sys.flags.dev_mode + + if bytes_warning > 1: + warnopts.remove("error::BytesWarning") + elif bytes_warning: + warnopts.remove("default::BytesWarning") + if dev_mode: + warnopts.remove('default') + for opt in warnopts: + args.append('-W' + opt) + + # -X options + if dev_mode: + args.extend(('-X', 'dev')) + for opt in sorted(xoptions): + if opt == 'dev': + # handled above via sys.flags.dev_mode + continue + value = xoptions[opt] + if value is True: + arg = opt + else: + arg = '%s=%s' % (opt, value) + args.extend(('-X', arg)) + + return args + + +def _text_encoding(): + # Return default text encoding and emit EncodingWarning if + # sys.flags.warn_default_encoding is true. + if sys.flags.warn_default_encoding: + f = sys._getframe() + filename = f.f_code.co_filename + stacklevel = 2 + while f := f.f_back: + if f.f_code.co_filename != filename: + break + stacklevel += 1 + warnings.warn("'encoding' argument not specified.", + EncodingWarning, stacklevel) + + if sys.flags.utf8_mode: + return "utf-8" + else: + return locale.getencoding() + + +def call(*popenargs, timeout=None, **kwargs): + """Run command with arguments. Wait for command to complete or + for timeout seconds, then return the returncode attribute. + + The arguments are the same as for the Popen constructor. Example: + + retcode = call(["ls", "-l"]) + """ + with Popen(*popenargs, **kwargs) as p: + try: + return p.wait(timeout=timeout) + except: # Including KeyboardInterrupt, wait handled that. + p.kill() + # We don't call p.wait() again as p.__exit__ does that for us. + raise + + +def check_call(*popenargs, **kwargs): + """Run command with arguments. Wait for command to complete. If + the exit code was zero then return, otherwise raise + CalledProcessError. The CalledProcessError object will have the + return code in the returncode attribute. + + The arguments are the same as for the call function. Example: + + check_call(["ls", "-l"]) + """ + retcode = call(*popenargs, **kwargs) + if retcode: + cmd = kwargs.get("args") + if cmd is None: + cmd = popenargs[0] + raise CalledProcessError(retcode, cmd) + return 0 + + +def check_output(*popenargs, timeout=None, **kwargs): + r"""Run command with arguments and return its output. + + If the exit code was non-zero it raises a CalledProcessError. The + CalledProcessError object will have the return code in the returncode + attribute and output in the output attribute. + + The arguments are the same as for the Popen constructor. Example: + + >>> check_output(["ls", "-l", "/dev/null"]) + b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' + + The stdout argument is not allowed as it is used internally. + To capture standard error in the result, use stderr=STDOUT. + + >>> check_output(["/bin/sh", "-c", + ... "ls -l non_existent_file ; exit 0"], + ... stderr=STDOUT) + b'ls: non_existent_file: No such file or directory\n' + + There is an additional optional argument, "input", allowing you to + pass a string to the subprocess's stdin. If you use this argument + you may not also use the Popen constructor's "stdin" argument, as + it too will be used internally. Example: + + >>> check_output(["sed", "-e", "s/foo/bar/"], + ... input=b"when in the course of fooman events\n") + b'when in the course of barman events\n' + + By default, all communication is in bytes, and therefore any "input" + should be bytes, and the return value will be bytes. If in text mode, + any "input" should be a string, and the return value will be a string + decoded according to locale encoding, or by "encoding" if set. Text mode + is triggered by setting any of text, encoding, errors or universal_newlines. + """ + for kw in ('stdout', 'check'): + if kw in kwargs: + raise ValueError(f'{kw} argument not allowed, it will be overridden.') + + if 'input' in kwargs and kwargs['input'] is None: + # Explicitly passing input=None was previously equivalent to passing an + # empty string. That is maintained here for backwards compatibility. + if kwargs.get('universal_newlines') or kwargs.get('text') or kwargs.get('encoding') \ + or kwargs.get('errors'): + empty = '' + else: + empty = b'' + kwargs['input'] = empty + + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + **kwargs).stdout + + +class CompletedProcess(object): + """A process that has finished running. + + This is returned by run(). + + Attributes: + args: The list or str args passed to run(). + returncode: The exit code of the process, negative for signals. + stdout: The standard output (None if not captured). + stderr: The standard error (None if not captured). + """ + def __init__(self, args, returncode, stdout=None, stderr=None): + self.args = args + self.returncode = returncode + self.stdout = stdout + self.stderr = stderr + + def __repr__(self): + args = ['args={!r}'.format(self.args), + 'returncode={!r}'.format(self.returncode)] + if self.stdout is not None: + args.append('stdout={!r}'.format(self.stdout)) + if self.stderr is not None: + args.append('stderr={!r}'.format(self.stderr)) + return "{}({})".format(type(self).__name__, ', '.join(args)) + + __class_getitem__ = classmethod(types.GenericAlias) + + + def check_returncode(self): + """Raise CalledProcessError if the exit code is non-zero.""" + if self.returncode: + raise CalledProcessError(self.returncode, self.args, self.stdout, + self.stderr) + + +def run(*popenargs, + input=None, capture_output=False, timeout=None, check=False, **kwargs): + """Run command with arguments and return a CompletedProcess instance. + + The returned instance will have attributes args, returncode, stdout and + stderr. By default, stdout and stderr are not captured, and those attributes + will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them, + or pass capture_output=True to capture both. + + If check is True and the exit code was non-zero, it raises a + CalledProcessError. The CalledProcessError object will have the return code + in the returncode attribute, and output & stderr attributes if those streams + were captured. + + If timeout (seconds) is given and the process takes too long, + a TimeoutExpired exception will be raised. + + There is an optional argument "input", allowing you to + pass bytes or a string to the subprocess's stdin. If you use this argument + you may not also use the Popen constructor's "stdin" argument, as + it will be used internally. + + By default, all communication is in bytes, and therefore any "input" should + be bytes, and the stdout and stderr will be bytes. If in text mode, any + "input" should be a string, and stdout and stderr will be strings decoded + according to locale encoding, or by "encoding" if set. Text mode is + triggered by setting any of text, encoding, errors or universal_newlines. + + The other arguments are the same as for the Popen constructor. + """ + if input is not None: + if kwargs.get('stdin') is not None: + raise ValueError('stdin and input arguments may not both be used.') + kwargs['stdin'] = PIPE + + if capture_output: + if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None: + raise ValueError('stdout and stderr arguments may not be used ' + 'with capture_output.') + kwargs['stdout'] = PIPE + kwargs['stderr'] = PIPE + + with Popen(*popenargs, **kwargs) as process: + try: + stdout, stderr = process.communicate(input, timeout=timeout) + except TimeoutExpired as exc: + process.kill() + if _mswindows: + # Windows accumulates the output in a single blocking + # read() call run on child threads, with the timeout + # being done in a join() on those threads. communicate() + # _after_ kill() is required to collect that and add it + # to the exception. + exc.stdout, exc.stderr = process.communicate() + else: + # POSIX _communicate already populated the output so + # far into the TimeoutExpired exception. + process.wait() + raise + except: # Including KeyboardInterrupt, communicate handled that. + process.kill() + # We don't call process.wait() as .__exit__ does that for us. + raise + retcode = process.poll() + if check and retcode: + raise CalledProcessError(retcode, process.args, + output=stdout, stderr=stderr) + return CompletedProcess(process.args, retcode, stdout, stderr) + + +def list2cmdline(seq): + """ + Translate a sequence of arguments into a command line + string, using the same rules as the MS C runtime: + + 1) Arguments are delimited by white space, which is either a + space or a tab. + + 2) A string surrounded by double quotation marks is + interpreted as a single argument, regardless of white space + contained within. A quoted string can be embedded in an + argument. + + 3) A double quotation mark preceded by a backslash is + interpreted as a literal double quotation mark. + + 4) Backslashes are interpreted literally, unless they + immediately precede a double quotation mark. + + 5) If backslashes immediately precede a double quotation mark, + every pair of backslashes is interpreted as a literal + backslash. If the number of backslashes is odd, the last + backslash escapes the next double quotation mark as + described in rule 3. + """ + + # See + # http://msdn.microsoft.com/en-us/library/17w5ykft.aspx + # or search http://msdn.microsoft.com for + # "Parsing C++ Command-Line Arguments" + result = [] + needquote = False + for arg in map(os.fsdecode, seq): + bs_buf = [] + + # Add a space to separate this argument from the others + if result: + result.append(' ') + + needquote = (" " in arg) or ("\t" in arg) or not arg + if needquote: + result.append('"') + + for c in arg: + if c == '\\': + # Don't know if we need to double yet. + bs_buf.append(c) + elif c == '"': + # Double backslashes. + result.append('\\' * len(bs_buf)*2) + bs_buf = [] + result.append('\\"') + else: + # Normal char + if bs_buf: + result.extend(bs_buf) + bs_buf = [] + result.append(c) + + # Add remaining backslashes, if any. + if bs_buf: + result.extend(bs_buf) + + if needquote: + result.extend(bs_buf) + result.append('"') + + return ''.join(result) + + +# Various tools for executing commands and looking at their output and status. +# + +def getstatusoutput(cmd, *, encoding=None, errors=None): + """Return (exitcode, output) of executing cmd in a shell. + + Execute the string 'cmd' in a shell with 'check_output' and + return a 2-tuple (status, output). The locale encoding is used + to decode the output and process newlines. + + A trailing newline is stripped from the output. + The exit status for the command can be interpreted + according to the rules for the function 'wait'. Example: + + >>> import subprocess + >>> subprocess.getstatusoutput('ls /bin/ls') + (0, '/bin/ls') + >>> subprocess.getstatusoutput('cat /bin/junk') + (1, 'cat: /bin/junk: No such file or directory') + >>> subprocess.getstatusoutput('/bin/junk') + (127, 'sh: /bin/junk: not found') + >>> subprocess.getstatusoutput('/bin/kill $$') + (-15, '') + """ + try: + data = check_output(cmd, shell=True, text=True, stderr=STDOUT, + encoding=encoding, errors=errors) + exitcode = 0 + except CalledProcessError as ex: + data = ex.output + exitcode = ex.returncode + if data[-1:] == '\n': + data = data[:-1] + return exitcode, data + +def getoutput(cmd, *, encoding=None, errors=None): + """Return output (stdout or stderr) of executing cmd in a shell. + + Like getstatusoutput(), except the exit status is ignored and the return + value is a string containing the command's output. Example: + + >>> import subprocess + >>> subprocess.getoutput('ls /bin/ls') + '/bin/ls' + """ + return getstatusoutput(cmd, encoding=encoding, errors=errors)[1] + + + +def _use_posix_spawn(): + """Check if posix_spawn() can be used for subprocess. + + subprocess requires a posix_spawn() implementation that properly reports + errors to the parent process, & sets errno on the following failures: + + * Process attribute actions failed. + * File actions failed. + * exec() failed. + + Prefer an implementation which can use vfork() in some cases for best + performance. + """ + if _mswindows or not hasattr(os, 'posix_spawn'): + # os.posix_spawn() is not available + return False + + if ((_env := os.environ.get('_PYTHON_SUBPROCESS_USE_POSIX_SPAWN')) in ('0', '1')): + return bool(int(_env)) + + if sys.platform in ('darwin', 'sunos5'): + # posix_spawn() is a syscall on both macOS and Solaris, + # and properly reports errors + return True + + # Check libc name and runtime libc version + try: + ver = os.confstr('CS_GNU_LIBC_VERSION') + # parse 'glibc 2.28' as ('glibc', (2, 28)) + parts = ver.split(maxsplit=1) + if len(parts) != 2: + # reject unknown format + raise ValueError + libc = parts[0] + version = tuple(map(int, parts[1].split('.'))) + + if sys.platform == 'linux' and libc == 'glibc' and version >= (2, 24): + # glibc 2.24 has a new Linux posix_spawn implementation using vfork + # which properly reports errors to the parent process. + return True + # Note: Don't use the implementation in earlier glibc because it doesn't + # use vfork (even if glibc 2.26 added a pipe to properly report errors + # to the parent process). + except (AttributeError, ValueError, OSError): + # os.confstr() or CS_GNU_LIBC_VERSION value not available + pass + + # By default, assume that posix_spawn() does not properly report errors. + return False + + +# These are primarily fail-safe knobs for negatives. A True value does not +# guarantee the given libc/syscall API will be used. +_USE_POSIX_SPAWN = _use_posix_spawn() +_HAVE_POSIX_SPAWN_CLOSEFROM = hasattr(os, 'POSIX_SPAWN_CLOSEFROM') + + +class Popen: + """ Execute a child program in a new process. + + For a complete description of the arguments see the Python documentation. + + Arguments: + args: A string, or a sequence of program arguments. + + bufsize: supplied as the buffering argument to the open() function when + creating the stdin/stdout/stderr pipe file objects + + executable: A replacement program to execute. + + stdin, stdout and stderr: These specify the executed programs' standard + input, standard output and standard error file handles, respectively. + + preexec_fn: (POSIX only) An object to be called in the child process + just before the child is executed. + + close_fds: Controls closing or inheriting of file descriptors. + + shell: If true, the command will be executed through the shell. + + cwd: Sets the current directory before the child is executed. + + env: Defines the environment variables for the new process. + + text: If true, decode stdin, stdout and stderr using the given encoding + (if set) or the system default otherwise. + + universal_newlines: Alias of text, provided for backwards compatibility. + + startupinfo and creationflags (Windows only) + + restore_signals (POSIX only) + + start_new_session (POSIX only) + + process_group (POSIX only) + + group (POSIX only) + + extra_groups (POSIX only) + + user (POSIX only) + + umask (POSIX only) + + pass_fds (POSIX only) + + encoding and errors: Text mode encoding and error handling to use for + file objects stdin, stdout and stderr. + + Attributes: + stdin, stdout, stderr, pid, returncode + """ + _child_created = False # Set here since __del__ checks it + + def __init__(self, args, bufsize=-1, executable=None, + stdin=None, stdout=None, stderr=None, + preexec_fn=None, close_fds=True, + shell=False, cwd=None, env=None, universal_newlines=None, + startupinfo=None, creationflags=0, + restore_signals=True, start_new_session=False, + pass_fds=(), *, user=None, group=None, extra_groups=None, + encoding=None, errors=None, text=None, umask=-1, pipesize=-1, + process_group=None): + """Create new Popen instance.""" + if not _can_fork_exec: + raise OSError( + errno.ENOTSUP, f"{sys.platform} does not support processes." + ) + + _cleanup() + # Held while anything is calling waitpid before returncode has been + # updated to prevent clobbering returncode if wait() or poll() are + # called from multiple threads at once. After acquiring the lock, + # code must re-check self.returncode to see if another thread just + # finished a waitpid() call. + self._waitpid_lock = threading.Lock() + + self._input = None + self._communication_started = False + if bufsize is None: + bufsize = -1 # Restore default + if not isinstance(bufsize, int): + raise TypeError("bufsize must be an integer") + + if stdout is STDOUT: + raise ValueError("STDOUT can only be used for stderr") + + if pipesize is None: + pipesize = -1 # Restore default + if not isinstance(pipesize, int): + raise TypeError("pipesize must be an integer") + + if _mswindows: + if preexec_fn is not None: + raise ValueError("preexec_fn is not supported on Windows " + "platforms") + else: + # POSIX + if pass_fds and not close_fds: + warnings.warn("pass_fds overriding close_fds.", RuntimeWarning) + close_fds = True + if startupinfo is not None: + raise ValueError("startupinfo is only supported on Windows " + "platforms") + if creationflags != 0: + raise ValueError("creationflags is only supported on Windows " + "platforms") + + self.args = args + self.stdin = None + self.stdout = None + self.stderr = None + self.pid = None + self.returncode = None + self.encoding = encoding + self.errors = errors + self.pipesize = pipesize + + # Validate the combinations of text and universal_newlines + if (text is not None and universal_newlines is not None + and bool(universal_newlines) != bool(text)): + raise SubprocessError('Cannot disambiguate when both text ' + 'and universal_newlines are supplied but ' + 'different. Pass one or the other.') + + self.text_mode = encoding or errors or text or universal_newlines + if self.text_mode and encoding is None: + self.encoding = encoding = _text_encoding() + + # How long to resume waiting on a child after the first ^C. + # There is no right value for this. The purpose is to be polite + # yet remain good for interactive users trying to exit a tool. + self._sigint_wait_secs = 0.25 # 1/xkcd221.getRandomNumber() + + self._closed_child_pipe_fds = False + + if self.text_mode: + if bufsize == 1: + line_buffering = True + # Use the default buffer size for the underlying binary streams + # since they don't support line buffering. + bufsize = -1 + else: + line_buffering = False + + if process_group is None: + process_group = -1 # The internal APIs are int-only + + gid = None + if group is not None: + if not hasattr(os, 'setregid'): + raise ValueError("The 'group' parameter is not supported on the " + "current platform") + + elif isinstance(group, str): + try: + import grp + except ImportError: + raise ValueError("The group parameter cannot be a string " + "on systems without the grp module") + + gid = grp.getgrnam(group).gr_gid + elif isinstance(group, int): + gid = group + else: + raise TypeError("Group must be a string or an integer, not {}" + .format(type(group))) + + if gid < 0: + raise ValueError(f"Group ID cannot be negative, got {gid}") + + gids = None + if extra_groups is not None: + if not hasattr(os, 'setgroups'): + raise ValueError("The 'extra_groups' parameter is not " + "supported on the current platform") + + elif isinstance(extra_groups, str): + raise ValueError("Groups must be a list, not a string") + + gids = [] + for extra_group in extra_groups: + if isinstance(extra_group, str): + try: + import grp + except ImportError: + raise ValueError("Items in extra_groups cannot be " + "strings on systems without the " + "grp module") + + gids.append(grp.getgrnam(extra_group).gr_gid) + elif isinstance(extra_group, int): + gids.append(extra_group) + else: + raise TypeError("Items in extra_groups must be a string " + "or integer, not {}" + .format(type(extra_group))) + + # make sure that the gids are all positive here so we can do less + # checking in the C code + for gid_check in gids: + if gid_check < 0: + raise ValueError(f"Group ID cannot be negative, got {gid_check}") + + uid = None + if user is not None: + if not hasattr(os, 'setreuid'): + raise ValueError("The 'user' parameter is not supported on " + "the current platform") + + elif isinstance(user, str): + try: + import pwd + except ImportError: + raise ValueError("The user parameter cannot be a string " + "on systems without the pwd module") + uid = pwd.getpwnam(user).pw_uid + elif isinstance(user, int): + uid = user + else: + raise TypeError("User must be a string or an integer") + + if uid < 0: + raise ValueError(f"User ID cannot be negative, got {uid}") + + # Input and output objects. The general principle is like + # this: + # + # Parent Child + # ------ ----- + # p2cwrite ---stdin---> p2cread + # c2pread <--stdout--- c2pwrite + # errread <--stderr--- errwrite + # + # On POSIX, the child objects are file descriptors. On + # Windows, these are Windows file handles. The parent objects + # are file descriptors on both platforms. The parent objects + # are -1 when not using PIPEs. The child objects are -1 + # when not redirecting. + + (p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) = self._get_handles(stdin, stdout, stderr) + + # From here on, raising exceptions may cause file descriptor leakage + + # We wrap OS handles *before* launching the child, otherwise a + # quickly terminating child could make our fds unwrappable + # (see #8458). + + if _mswindows: + if p2cwrite != -1: + p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0) + if c2pread != -1: + c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0) + if errread != -1: + errread = msvcrt.open_osfhandle(errread.Detach(), 0) + + try: + if p2cwrite != -1: + self.stdin = io.open(p2cwrite, 'wb', bufsize) + if self.text_mode: + self.stdin = io.TextIOWrapper(self.stdin, write_through=True, + line_buffering=line_buffering, + encoding=encoding, errors=errors) + if c2pread != -1: + self.stdout = io.open(c2pread, 'rb', bufsize) + if self.text_mode: + self.stdout = io.TextIOWrapper(self.stdout, + encoding=encoding, errors=errors) + if errread != -1: + self.stderr = io.open(errread, 'rb', bufsize) + if self.text_mode: + self.stderr = io.TextIOWrapper(self.stderr, + encoding=encoding, errors=errors) + + self._execute_child(args, executable, preexec_fn, close_fds, + pass_fds, cwd, env, + startupinfo, creationflags, shell, + p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite, + restore_signals, + gid, gids, uid, umask, + start_new_session, process_group) + except: + # Cleanup if the child failed starting. + for f in filter(None, (self.stdin, self.stdout, self.stderr)): + try: + f.close() + except OSError: + pass # Ignore EBADF or other errors. + + if not self._closed_child_pipe_fds: + to_close = [] + if stdin == PIPE: + to_close.append(p2cread) + if stdout == PIPE: + to_close.append(c2pwrite) + if stderr == PIPE: + to_close.append(errwrite) + if hasattr(self, '_devnull'): + to_close.append(self._devnull) + for fd in to_close: + try: + if _mswindows and isinstance(fd, Handle): + fd.Close() + else: + os.close(fd) + except OSError: + pass + + raise + + def __repr__(self): + obj_repr = ( + f"<{self.__class__.__name__}: " + f"returncode: {self.returncode} args: {self.args!r}>" + ) + if len(obj_repr) > 80: + obj_repr = obj_repr[:76] + "...>" + return obj_repr + + __class_getitem__ = classmethod(types.GenericAlias) + + @property + def universal_newlines(self): + # universal_newlines as retained as an alias of text_mode for API + # compatibility. bpo-31756 + return self.text_mode + + @universal_newlines.setter + def universal_newlines(self, universal_newlines): + self.text_mode = bool(universal_newlines) + + def _translate_newlines(self, data, encoding, errors): + data = data.decode(encoding, errors) + return data.replace("\r\n", "\n").replace("\r", "\n") + + def __enter__(self): + return self + + def __exit__(self, exc_type, value, traceback): + if self.stdout: + self.stdout.close() + if self.stderr: + self.stderr.close() + try: # Flushing a BufferedWriter may raise an error + if self.stdin: + self.stdin.close() + finally: + if exc_type == KeyboardInterrupt: + # https://bugs.python.org/issue25942 + # In the case of a KeyboardInterrupt we assume the SIGINT + # was also already sent to our child processes. We can't + # block indefinitely as that is not user friendly. + # If we have not already waited a brief amount of time in + # an interrupted .wait() or .communicate() call, do so here + # for consistency. + if self._sigint_wait_secs > 0: + try: + self._wait(timeout=self._sigint_wait_secs) + except TimeoutExpired: + pass + self._sigint_wait_secs = 0 # Note that this has been done. + else: + # Wait for the process to terminate, to avoid zombies. + self.wait() + + def __del__(self, _maxsize=sys.maxsize, _warn=warnings.warn): + if not self._child_created: + # We didn't get to successfully create a child process. + return + if self.returncode is None: + # Not reading subprocess exit status creates a zombie process which + # is only destroyed at the parent python process exit + _warn("subprocess %s is still running" % self.pid, + ResourceWarning, source=self) + # In case the child hasn't been waited on, check if it's done. + self._internal_poll(_deadstate=_maxsize) + if self.returncode is None and _active is not None: + # Child is still running, keep us alive until we can wait on it. + _active.append(self) + + def _get_devnull(self): + if not hasattr(self, '_devnull'): + self._devnull = os.open(os.devnull, os.O_RDWR) + return self._devnull + + def _stdin_write(self, input): + if input: + try: + self.stdin.write(input) + except BrokenPipeError: + pass # communicate() must ignore broken pipe errors. + except OSError as exc: + if exc.errno == errno.EINVAL: + # bpo-19612, bpo-30418: On Windows, stdin.write() fails + # with EINVAL if the child process exited or if the child + # process is still running but closed the pipe. + pass + else: + raise + + try: + self.stdin.close() + except BrokenPipeError: + pass # communicate() must ignore broken pipe errors. + except OSError as exc: + if exc.errno == errno.EINVAL: + pass + else: + raise + + def communicate(self, input=None, timeout=None): + """Interact with process: Send data to stdin and close it. + Read data from stdout and stderr, until end-of-file is + reached. Wait for process to terminate. + + The optional "input" argument should be data to be sent to the + child process, or None, if no data should be sent to the child. + communicate() returns a tuple (stdout, stderr). + + By default, all communication is in bytes, and therefore any + "input" should be bytes, and the (stdout, stderr) will be bytes. + If in text mode (indicated by self.text_mode), any "input" should + be a string, and (stdout, stderr) will be strings decoded + according to locale encoding, or by "encoding" if set. Text mode + is triggered by setting any of text, encoding, errors or + universal_newlines. + """ + + if self._communication_started and input: + raise ValueError("Cannot send input after starting communication") + + # Optimization: If we are not worried about timeouts, we haven't + # started communicating, and we have one or zero pipes, using select() + # or threads is unnecessary. + if (timeout is None and not self._communication_started and + [self.stdin, self.stdout, self.stderr].count(None) >= 2): + stdout = None + stderr = None + if self.stdin: + self._stdin_write(input) + elif self.stdout: + stdout = self.stdout.read() + self.stdout.close() + elif self.stderr: + stderr = self.stderr.read() + self.stderr.close() + self.wait() + else: + if timeout is not None: + endtime = _time() + timeout + else: + endtime = None + + try: + stdout, stderr = self._communicate(input, endtime, timeout) + except KeyboardInterrupt: + # https://bugs.python.org/issue25942 + # See the detailed comment in .wait(). + if timeout is not None: + sigint_timeout = min(self._sigint_wait_secs, + self._remaining_time(endtime)) + else: + sigint_timeout = self._sigint_wait_secs + self._sigint_wait_secs = 0 # nothing else should wait. + try: + self._wait(timeout=sigint_timeout) + except TimeoutExpired: + pass + raise # resume the KeyboardInterrupt + + finally: + self._communication_started = True + try: + sts = self.wait(timeout=self._remaining_time(endtime)) + except TimeoutExpired as exc: + exc.timeout = timeout + raise + + return (stdout, stderr) + + + def poll(self): + """Check if child process has terminated. Set and return returncode + attribute.""" + return self._internal_poll() + + + def _remaining_time(self, endtime): + """Convenience for _communicate when computing timeouts.""" + if endtime is None: + return None + else: + return endtime - _time() + + + def _check_timeout(self, endtime, orig_timeout, stdout_seq, stderr_seq, + skip_check_and_raise=False): + """Convenience for checking if a timeout has expired.""" + if endtime is None: + return + if skip_check_and_raise or _time() > endtime: + raise TimeoutExpired( + self.args, orig_timeout, + output=b''.join(stdout_seq) if stdout_seq else None, + stderr=b''.join(stderr_seq) if stderr_seq else None) + + + def wait(self, timeout=None): + """Wait for child process to terminate; returns self.returncode.""" + if timeout is not None: + endtime = _time() + timeout + try: + return self._wait(timeout=timeout) + except KeyboardInterrupt: + # https://bugs.python.org/issue25942 + # The first keyboard interrupt waits briefly for the child to + # exit under the common assumption that it also received the ^C + # generated SIGINT and will exit rapidly. + if timeout is not None: + sigint_timeout = min(self._sigint_wait_secs, + self._remaining_time(endtime)) + else: + sigint_timeout = self._sigint_wait_secs + self._sigint_wait_secs = 0 # nothing else should wait. + try: + self._wait(timeout=sigint_timeout) + except TimeoutExpired: + pass + raise # resume the KeyboardInterrupt + + def _close_pipe_fds(self, + p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite): + # self._devnull is not always defined. + devnull_fd = getattr(self, '_devnull', None) + + with contextlib.ExitStack() as stack: + if _mswindows: + if p2cread != -1: + stack.callback(p2cread.Close) + if c2pwrite != -1: + stack.callback(c2pwrite.Close) + if errwrite != -1: + stack.callback(errwrite.Close) + else: + if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd: + stack.callback(os.close, p2cread) + if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd: + stack.callback(os.close, c2pwrite) + if errwrite != -1 and errread != -1 and errwrite != devnull_fd: + stack.callback(os.close, errwrite) + + if devnull_fd is not None: + stack.callback(os.close, devnull_fd) + + # Prevent a double close of these handles/fds from __init__ on error. + self._closed_child_pipe_fds = True + + @contextlib.contextmanager + def _on_error_fd_closer(self): + """Helper to ensure file descriptors opened in _get_handles are closed""" + to_close = [] + try: + yield to_close + except: + if hasattr(self, '_devnull'): + to_close.append(self._devnull) + del self._devnull + for fd in to_close: + try: + if _mswindows and isinstance(fd, Handle): + fd.Close() + else: + os.close(fd) + except OSError: + pass + raise + + if _mswindows: + # + # Windows methods + # + def _get_handles(self, stdin, stdout, stderr): + """Construct and return tuple with IO objects: + p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite + """ + if stdin is None and stdout is None and stderr is None: + return (-1, -1, -1, -1, -1, -1) + + p2cread, p2cwrite = -1, -1 + c2pread, c2pwrite = -1, -1 + errread, errwrite = -1, -1 + + with self._on_error_fd_closer() as err_close_fds: + if stdin is None: + p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE) + if p2cread is None: + p2cread, _ = _winapi.CreatePipe(None, 0) + p2cread = Handle(p2cread) + err_close_fds.append(p2cread) + _winapi.CloseHandle(_) + elif stdin == PIPE: + p2cread, p2cwrite = _winapi.CreatePipe(None, 0) + p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite) + err_close_fds.extend((p2cread, p2cwrite)) + elif stdin == DEVNULL: + p2cread = msvcrt.get_osfhandle(self._get_devnull()) + elif isinstance(stdin, int): + p2cread = msvcrt.get_osfhandle(stdin) + else: + # Assuming file-like object + p2cread = msvcrt.get_osfhandle(stdin.fileno()) + p2cread = self._make_inheritable(p2cread) + + if stdout is None: + c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE) + if c2pwrite is None: + _, c2pwrite = _winapi.CreatePipe(None, 0) + c2pwrite = Handle(c2pwrite) + err_close_fds.append(c2pwrite) + _winapi.CloseHandle(_) + elif stdout == PIPE: + c2pread, c2pwrite = _winapi.CreatePipe(None, 0) + c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite) + err_close_fds.extend((c2pread, c2pwrite)) + elif stdout == DEVNULL: + c2pwrite = msvcrt.get_osfhandle(self._get_devnull()) + elif isinstance(stdout, int): + c2pwrite = msvcrt.get_osfhandle(stdout) + else: + # Assuming file-like object + c2pwrite = msvcrt.get_osfhandle(stdout.fileno()) + c2pwrite = self._make_inheritable(c2pwrite) + + if stderr is None: + errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE) + if errwrite is None: + _, errwrite = _winapi.CreatePipe(None, 0) + errwrite = Handle(errwrite) + err_close_fds.append(errwrite) + _winapi.CloseHandle(_) + elif stderr == PIPE: + errread, errwrite = _winapi.CreatePipe(None, 0) + errread, errwrite = Handle(errread), Handle(errwrite) + err_close_fds.extend((errread, errwrite)) + elif stderr == STDOUT: + errwrite = c2pwrite + elif stderr == DEVNULL: + errwrite = msvcrt.get_osfhandle(self._get_devnull()) + elif isinstance(stderr, int): + errwrite = msvcrt.get_osfhandle(stderr) + else: + # Assuming file-like object + errwrite = msvcrt.get_osfhandle(stderr.fileno()) + errwrite = self._make_inheritable(errwrite) + + return (p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) + + + def _make_inheritable(self, handle): + """Return a duplicate of handle, which is inheritable""" + h = _winapi.DuplicateHandle( + _winapi.GetCurrentProcess(), handle, + _winapi.GetCurrentProcess(), 0, 1, + _winapi.DUPLICATE_SAME_ACCESS) + return Handle(h) + + + def _filter_handle_list(self, handle_list): + """Filter out console handles that can't be used + in lpAttributeList["handle_list"] and make sure the list + isn't empty. This also removes duplicate handles.""" + # An handle with it's lowest two bits set might be a special console + # handle that if passed in lpAttributeList["handle_list"], will + # cause it to fail. + return list({handle for handle in handle_list + if handle & 0x3 != 0x3 + or _winapi.GetFileType(handle) != + _winapi.FILE_TYPE_CHAR}) + + + def _execute_child(self, args, executable, preexec_fn, close_fds, + pass_fds, cwd, env, + startupinfo, creationflags, shell, + p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite, + unused_restore_signals, + unused_gid, unused_gids, unused_uid, + unused_umask, + unused_start_new_session, unused_process_group): + """Execute program (MS Windows version)""" + + assert not pass_fds, "pass_fds not supported on Windows." + + if isinstance(args, str): + pass + elif isinstance(args, bytes): + if shell: + raise TypeError('bytes args is not allowed on Windows') + args = list2cmdline([args]) + elif isinstance(args, os.PathLike): + if shell: + raise TypeError('path-like args is not allowed when ' + 'shell is true') + args = list2cmdline([args]) + else: + args = list2cmdline(args) + + if executable is not None: + executable = os.fsdecode(executable) + + # Process startup details + if startupinfo is None: + startupinfo = STARTUPINFO() + else: + # bpo-34044: Copy STARTUPINFO since it is modified above, + # so the caller can reuse it multiple times. + startupinfo = startupinfo.copy() + + use_std_handles = -1 not in (p2cread, c2pwrite, errwrite) + if use_std_handles: + startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES + startupinfo.hStdInput = p2cread + startupinfo.hStdOutput = c2pwrite + startupinfo.hStdError = errwrite + + attribute_list = startupinfo.lpAttributeList + have_handle_list = bool(attribute_list and + "handle_list" in attribute_list and + attribute_list["handle_list"]) + + # If we were given an handle_list or need to create one + if have_handle_list or (use_std_handles and close_fds): + if attribute_list is None: + attribute_list = startupinfo.lpAttributeList = {} + handle_list = attribute_list["handle_list"] = \ + list(attribute_list.get("handle_list", [])) + + if use_std_handles: + handle_list += [int(p2cread), int(c2pwrite), int(errwrite)] + + handle_list[:] = self._filter_handle_list(handle_list) + + if handle_list: + if not close_fds: + warnings.warn("startupinfo.lpAttributeList['handle_list'] " + "overriding close_fds", RuntimeWarning) + + # When using the handle_list we always request to inherit + # handles but the only handles that will be inherited are + # the ones in the handle_list + close_fds = False + + if shell: + startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW + startupinfo.wShowWindow = _winapi.SW_HIDE + if not executable: + # gh-101283: without a fully-qualified path, before Windows + # checks the system directories, it first looks in the + # application directory, and also the current directory if + # NeedCurrentDirectoryForExePathW(ExeName) is true, so try + # to avoid executing unqualified "cmd.exe". + comspec = os.environ.get('ComSpec') + if not comspec: + system_root = os.environ.get('SystemRoot', '') + comspec = os.path.join(system_root, 'System32', 'cmd.exe') + if not os.path.isabs(comspec): + raise FileNotFoundError('shell not found: neither %ComSpec% nor %SystemRoot% is set') + if os.path.isabs(comspec): + executable = comspec + else: + comspec = executable + + args = '{} /c "{}"'.format (comspec, args) + + if cwd is not None: + cwd = os.fsdecode(cwd) + + sys.audit("subprocess.Popen", executable, args, cwd, env) + + # Start the process + try: + hp, ht, pid, tid = _winapi.CreateProcess(executable, args, + # no special security + None, None, + int(not close_fds), + creationflags, + env, + cwd, + startupinfo) + finally: + # Child is launched. Close the parent's copy of those pipe + # handles that only the child should have open. You need + # to make sure that no handles to the write end of the + # output pipe are maintained in this process or else the + # pipe will not close when the child process exits and the + # ReadFile will hang. + self._close_pipe_fds(p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) + + # Retain the process handle, but close the thread handle + self._child_created = True + self._handle = Handle(hp) + self.pid = pid + _winapi.CloseHandle(ht) + + def _internal_poll(self, _deadstate=None, + _WaitForSingleObject=_winapi.WaitForSingleObject, + _WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0, + _GetExitCodeProcess=_winapi.GetExitCodeProcess): + """Check if child process has terminated. Returns returncode + attribute. + + This method is called by __del__, so it can only refer to objects + in its local scope. + + """ + if self.returncode is None: + if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0: + self.returncode = _GetExitCodeProcess(self._handle) + return self.returncode + + + def _wait(self, timeout): + """Internal implementation of wait() on Windows.""" + if timeout is None: + timeout_millis = _winapi.INFINITE + elif timeout <= 0: + timeout_millis = 0 + else: + timeout_millis = int(timeout * 1000) + if self.returncode is None: + # API note: Returns immediately if timeout_millis == 0. + result = _winapi.WaitForSingleObject(self._handle, + timeout_millis) + if result == _winapi.WAIT_TIMEOUT: + raise TimeoutExpired(self.args, timeout) + self.returncode = _winapi.GetExitCodeProcess(self._handle) + return self.returncode + + + def _readerthread(self, fh, buffer): + buffer.append(fh.read()) + fh.close() + + + def _writerthread(self, input): + self._stdin_write(input) + + + def _communicate(self, input, endtime, orig_timeout): + # Start reader threads feeding into a list hanging off of this + # object, unless they've already been started. + if self.stdout and not hasattr(self, "_stdout_buff"): + self._stdout_buff = [] + self.stdout_thread = \ + threading.Thread(target=self._readerthread, + args=(self.stdout, self._stdout_buff)) + self.stdout_thread.daemon = True + self.stdout_thread.start() + if self.stderr and not hasattr(self, "_stderr_buff"): + self._stderr_buff = [] + self.stderr_thread = \ + threading.Thread(target=self._readerthread, + args=(self.stderr, self._stderr_buff)) + self.stderr_thread.daemon = True + self.stderr_thread.start() + + # Start writer thread to send input to stdin, unless already + # started. The thread writes input and closes stdin when done, + # or continues in the background on timeout. + if self.stdin and not hasattr(self, "_stdin_thread"): + self._stdin_thread = \ + threading.Thread(target=self._writerthread, + args=(input,)) + self._stdin_thread.daemon = True + self._stdin_thread.start() + + # Wait for the writer thread, or time out. If we time out, the + # thread remains writing and the fd left open in case the user + # calls communicate again. + if hasattr(self, "_stdin_thread"): + self._stdin_thread.join(self._remaining_time(endtime)) + if self._stdin_thread.is_alive(): + raise TimeoutExpired(self.args, orig_timeout) + + # Wait for the reader threads, or time out. If we time out, the + # threads remain reading and the fds left open in case the user + # calls communicate again. + if self.stdout is not None: + self.stdout_thread.join(self._remaining_time(endtime)) + if self.stdout_thread.is_alive(): + raise TimeoutExpired(self.args, orig_timeout) + if self.stderr is not None: + self.stderr_thread.join(self._remaining_time(endtime)) + if self.stderr_thread.is_alive(): + raise TimeoutExpired(self.args, orig_timeout) + + # Collect the output from and close both pipes, now that we know + # both have been read successfully. + stdout = None + stderr = None + if self.stdout: + stdout = self._stdout_buff + self.stdout.close() + if self.stderr: + stderr = self._stderr_buff + self.stderr.close() + + # All data exchanged. Translate lists into strings. + stdout = stdout[0] if stdout else None + stderr = stderr[0] if stderr else None + + return (stdout, stderr) + + def send_signal(self, sig): + """Send a signal to the process.""" + # Don't signal a process that we know has already died. + if self.returncode is not None: + return + if sig == signal.SIGTERM: + self.terminate() + elif sig == signal.CTRL_C_EVENT: + os.kill(self.pid, signal.CTRL_C_EVENT) + elif sig == signal.CTRL_BREAK_EVENT: + os.kill(self.pid, signal.CTRL_BREAK_EVENT) + else: + raise ValueError("Unsupported signal: {}".format(sig)) + + def terminate(self): + """Terminates the process.""" + # Don't terminate a process that we know has already died. + if self.returncode is not None: + return + try: + _winapi.TerminateProcess(self._handle, 1) + except PermissionError: + # ERROR_ACCESS_DENIED (winerror 5) is received when the + # process already died. + rc = _winapi.GetExitCodeProcess(self._handle) + if rc == _winapi.STILL_ACTIVE: + raise + self.returncode = rc + + kill = terminate + + else: + # + # POSIX methods + # + def _get_handles(self, stdin, stdout, stderr): + """Construct and return tuple with IO objects: + p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite + """ + p2cread, p2cwrite = -1, -1 + c2pread, c2pwrite = -1, -1 + errread, errwrite = -1, -1 + + with self._on_error_fd_closer() as err_close_fds: + if stdin is None: + pass + elif stdin == PIPE: + p2cread, p2cwrite = os.pipe() + err_close_fds.extend((p2cread, p2cwrite)) + if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"): + fcntl.fcntl(p2cwrite, fcntl.F_SETPIPE_SZ, self.pipesize) + elif stdin == DEVNULL: + p2cread = self._get_devnull() + elif isinstance(stdin, int): + p2cread = stdin + else: + # Assuming file-like object + p2cread = stdin.fileno() + + if stdout is None: + pass + elif stdout == PIPE: + c2pread, c2pwrite = os.pipe() + err_close_fds.extend((c2pread, c2pwrite)) + if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"): + fcntl.fcntl(c2pwrite, fcntl.F_SETPIPE_SZ, self.pipesize) + elif stdout == DEVNULL: + c2pwrite = self._get_devnull() + elif isinstance(stdout, int): + c2pwrite = stdout + else: + # Assuming file-like object + c2pwrite = stdout.fileno() + + if stderr is None: + pass + elif stderr == PIPE: + errread, errwrite = os.pipe() + err_close_fds.extend((errread, errwrite)) + if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"): + fcntl.fcntl(errwrite, fcntl.F_SETPIPE_SZ, self.pipesize) + elif stderr == STDOUT: + if c2pwrite != -1: + errwrite = c2pwrite + else: # child's stdout is not set, use parent's stdout + errwrite = sys.__stdout__.fileno() + elif stderr == DEVNULL: + errwrite = self._get_devnull() + elif isinstance(stderr, int): + errwrite = stderr + else: + # Assuming file-like object + errwrite = stderr.fileno() + + return (p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) + + + def _posix_spawn(self, args, executable, env, restore_signals, close_fds, + p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite): + """Execute program using os.posix_spawn().""" + kwargs = {} + if restore_signals: + # See _Py_RestoreSignals() in Python/pylifecycle.c + sigset = [] + for signame in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'): + signum = getattr(signal, signame, None) + if signum is not None: + sigset.append(signum) + kwargs['setsigdef'] = sigset + + file_actions = [] + for fd in (p2cwrite, c2pread, errread): + if fd != -1: + file_actions.append((os.POSIX_SPAWN_CLOSE, fd)) + for fd, fd2 in ( + (p2cread, 0), + (c2pwrite, 1), + (errwrite, 2), + ): + if fd != -1: + file_actions.append((os.POSIX_SPAWN_DUP2, fd, fd2)) + + if close_fds: + file_actions.append((os.POSIX_SPAWN_CLOSEFROM, 3)) + + if file_actions: + kwargs['file_actions'] = file_actions + + self.pid = os.posix_spawn(executable, args, env, **kwargs) + self._child_created = True + + self._close_pipe_fds(p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) + + def _execute_child(self, args, executable, preexec_fn, close_fds, + pass_fds, cwd, env, + startupinfo, creationflags, shell, + p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite, + restore_signals, + gid, gids, uid, umask, + start_new_session, process_group): + """Execute program (POSIX version)""" + + if isinstance(args, (str, bytes)): + args = [args] + elif isinstance(args, os.PathLike): + if shell: + raise TypeError('path-like args is not allowed when ' + 'shell is true') + args = [args] + else: + args = list(args) + + if shell: + # On Android the default shell is at '/system/bin/sh'. + unix_shell = ('/system/bin/sh' if + hasattr(sys, 'getandroidapilevel') else '/bin/sh') + args = [unix_shell, "-c"] + args + if executable: + args[0] = executable + + if executable is None: + executable = args[0] + + sys.audit("subprocess.Popen", executable, args, cwd, env) + + if (_USE_POSIX_SPAWN + and os.path.dirname(executable) + and preexec_fn is None + and (not close_fds or _HAVE_POSIX_SPAWN_CLOSEFROM) + and not pass_fds + and cwd is None + and (p2cread == -1 or p2cread > 2) + and (c2pwrite == -1 or c2pwrite > 2) + and (errwrite == -1 or errwrite > 2) + and not start_new_session + and process_group == -1 + and gid is None + and gids is None + and uid is None + and umask < 0): + self._posix_spawn(args, executable, env, restore_signals, close_fds, + p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) + return + + orig_executable = executable + + # For transferring possible exec failure from child to parent. + # Data format: "exception name:hex errno:description" + # Pickle is not used; it is complex and involves memory allocation. + errpipe_read, errpipe_write = os.pipe() + # errpipe_write must not be in the standard io 0, 1, or 2 fd range. + low_fds_to_close = [] + while errpipe_write < 3: + low_fds_to_close.append(errpipe_write) + errpipe_write = os.dup(errpipe_write) + for low_fd in low_fds_to_close: + os.close(low_fd) + try: + try: + # We must avoid complex work that could involve + # malloc or free in the child process to avoid + # potential deadlocks, thus we do all this here. + # and pass it to fork_exec() + + if env is not None: + env_list = [] + for k, v in env.items(): + k = os.fsencode(k) + if b'=' in k: + raise ValueError("illegal environment variable name") + env_list.append(k + b'=' + os.fsencode(v)) + else: + env_list = None # Use execv instead of execve. + executable = os.fsencode(executable) + if os.path.dirname(executable): + executable_list = (executable,) + else: + # This matches the behavior of os._execvpe(). + executable_list = tuple( + os.path.join(os.fsencode(dir), executable) + for dir in os.get_exec_path(env)) + fds_to_keep = set(pass_fds) + fds_to_keep.add(errpipe_write) + self.pid = _fork_exec( + args, executable_list, + close_fds, tuple(sorted(map(int, fds_to_keep))), + cwd, env_list, + p2cread, p2cwrite, c2pread, c2pwrite, + errread, errwrite, + errpipe_read, errpipe_write, + restore_signals, start_new_session, + process_group, gid, gids, uid, umask, + preexec_fn) + self._child_created = True + finally: + # be sure the FD is closed no matter what + os.close(errpipe_write) + + self._close_pipe_fds(p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) + + # Wait for exec to fail or succeed; possibly raising an + # exception (limited in size) + errpipe_data = bytearray() + while True: + part = os.read(errpipe_read, 50000) + errpipe_data += part + if not part or len(errpipe_data) > 50000: + break + finally: + # be sure the FD is closed no matter what + os.close(errpipe_read) + + if errpipe_data: + try: + pid, sts = os.waitpid(self.pid, 0) + if pid == self.pid: + self._handle_exitstatus(sts) + else: + self.returncode = sys.maxsize + except ChildProcessError: + pass + + try: + exception_name, hex_errno, err_msg = ( + errpipe_data.split(b':', 2)) + # The encoding here should match the encoding + # written in by the subprocess implementations + # like _posixsubprocess + err_msg = err_msg.decode() + except ValueError: + exception_name = b'SubprocessError' + hex_errno = b'0' + err_msg = 'Bad exception data from child: {!r}'.format( + bytes(errpipe_data)) + child_exception_type = getattr( + builtins, exception_name.decode('ascii'), + SubprocessError) + if issubclass(child_exception_type, OSError) and hex_errno: + errno_num = int(hex_errno, 16) + if err_msg == "noexec:chdir": + err_msg = "" + # The error must be from chdir(cwd). + err_filename = cwd + elif err_msg == "noexec": + err_msg = "" + err_filename = None + else: + err_filename = orig_executable + if errno_num != 0: + err_msg = os.strerror(errno_num) + if err_filename is not None: + raise child_exception_type(errno_num, err_msg, err_filename) + else: + raise child_exception_type(errno_num, err_msg) + raise child_exception_type(err_msg) + + + def _handle_exitstatus(self, sts, _del_safe=_del_safe): + """All callers to this function MUST hold self._waitpid_lock.""" + # This method is called (indirectly) by __del__, so it cannot + # refer to anything outside of its local scope. + if _del_safe.WIFSTOPPED(sts): + self.returncode = -_del_safe.WSTOPSIG(sts) + else: + self.returncode = _del_safe.waitstatus_to_exitcode(sts) + + def _internal_poll(self, _deadstate=None, _del_safe=_del_safe): + """Check if child process has terminated. Returns returncode + attribute. + + This method is called by __del__, so it cannot reference anything + outside of the local scope (nor can any methods it calls). + + """ + if self.returncode is None: + if not self._waitpid_lock.acquire(False): + # Something else is busy calling waitpid. Don't allow two + # at once. We know nothing yet. + return None + try: + if self.returncode is not None: + return self.returncode # Another thread waited. + pid, sts = _del_safe.waitpid(self.pid, _del_safe.WNOHANG) + if pid == self.pid: + self._handle_exitstatus(sts) + except OSError as e: + if _deadstate is not None: + self.returncode = _deadstate + elif e.errno == _del_safe.ECHILD: + # This happens if SIGCLD is set to be ignored or + # waiting for child processes has otherwise been + # disabled for our process. This child is dead, we + # can't get the status. + # http://bugs.python.org/issue15756 + self.returncode = 0 + finally: + self._waitpid_lock.release() + return self.returncode + + + def _try_wait(self, wait_flags): + """All callers to this function MUST hold self._waitpid_lock.""" + try: + (pid, sts) = os.waitpid(self.pid, wait_flags) + except ChildProcessError: + # This happens if SIGCLD is set to be ignored or waiting + # for child processes has otherwise been disabled for our + # process. This child is dead, we can't get the status. + pid = self.pid + sts = 0 + return (pid, sts) + + + def _wait(self, timeout): + """Internal implementation of wait() on POSIX.""" + if self.returncode is not None: + return self.returncode + + if timeout is not None: + endtime = _time() + timeout + # Enter a busy loop if we have a timeout. This busy loop was + # cribbed from Lib/threading.py in Thread.wait() at r71065. + delay = 0.0005 # 500 us -> initial delay of 1 ms + while True: + if self._waitpid_lock.acquire(False): + try: + if self.returncode is not None: + break # Another thread waited. + (pid, sts) = self._try_wait(os.WNOHANG) + assert pid == self.pid or pid == 0 + if pid == self.pid: + self._handle_exitstatus(sts) + break + finally: + self._waitpid_lock.release() + remaining = self._remaining_time(endtime) + if remaining <= 0: + raise TimeoutExpired(self.args, timeout) + delay = min(delay * 2, remaining, .05) + time.sleep(delay) + else: + while self.returncode is None: + with self._waitpid_lock: + if self.returncode is not None: + break # Another thread waited. + (pid, sts) = self._try_wait(0) + # Check the pid and loop as waitpid has been known to + # return 0 even without WNOHANG in odd situations. + # http://bugs.python.org/issue14396. + if pid == self.pid: + self._handle_exitstatus(sts) + return self.returncode + + + def _communicate(self, input, endtime, orig_timeout): + if self.stdin and not self._communication_started: + # Flush stdio buffer. This might block, if the user has + # been writing to .stdin in an uncontrolled fashion. + try: + self.stdin.flush() + except BrokenPipeError: + pass # communicate() must ignore BrokenPipeError. + except ValueError: + # ignore ValueError: I/O operation on closed file. + if not self.stdin.closed: + raise + if not input: + try: + self.stdin.close() + except BrokenPipeError: + pass # communicate() must ignore BrokenPipeError. + + stdout = None + stderr = None + + # Only create this mapping if we haven't already. + if not self._communication_started: + self._fileobj2output = {} + if self.stdout: + self._fileobj2output[self.stdout] = [] + if self.stderr: + self._fileobj2output[self.stderr] = [] + + if self.stdout: + stdout = self._fileobj2output[self.stdout] + if self.stderr: + stderr = self._fileobj2output[self.stderr] + + self._save_input(input) + + if self._input: + if not isinstance(self._input, memoryview): + input_view = memoryview(self._input) + else: + input_view = self._input.cast("b") # byte input required + + with _PopenSelector() as selector: + if self.stdin and not self.stdin.closed and self._input: + selector.register(self.stdin, selectors.EVENT_WRITE) + if self.stdout and not self.stdout.closed: + selector.register(self.stdout, selectors.EVENT_READ) + if self.stderr and not self.stderr.closed: + selector.register(self.stderr, selectors.EVENT_READ) + + while selector.get_map(): + timeout = self._remaining_time(endtime) + if timeout is not None and timeout <= 0: + self._check_timeout(endtime, orig_timeout, + stdout, stderr, + skip_check_and_raise=True) + raise RuntimeError( # Impossible :) + '_check_timeout(..., skip_check_and_raise=True) ' + 'failed to raise TimeoutExpired.') + + ready = selector.select(timeout) + self._check_timeout(endtime, orig_timeout, stdout, stderr) + + # XXX Rewrite these to use non-blocking I/O on the file + # objects; they are no longer using C stdio! + + for key, events in ready: + if key.fileobj is self.stdin: + chunk = input_view[self._input_offset : + self._input_offset + _PIPE_BUF] + try: + self._input_offset += os.write(key.fd, chunk) + except BrokenPipeError: + selector.unregister(key.fileobj) + key.fileobj.close() + else: + if self._input_offset >= len(input_view): + selector.unregister(key.fileobj) + key.fileobj.close() + elif key.fileobj in (self.stdout, self.stderr): + data = os.read(key.fd, 32768) + if not data: + selector.unregister(key.fileobj) + key.fileobj.close() + self._fileobj2output[key.fileobj].append(data) + try: + self.wait(timeout=self._remaining_time(endtime)) + except TimeoutExpired as exc: + exc.timeout = orig_timeout + raise + + # All data exchanged. Translate lists into strings. + if stdout is not None: + stdout = b''.join(stdout) + if stderr is not None: + stderr = b''.join(stderr) + + # Translate newlines, if requested. + # This also turns bytes into strings. + if self.text_mode: + if stdout is not None: + stdout = self._translate_newlines(stdout, + self.stdout.encoding, + self.stdout.errors) + if stderr is not None: + stderr = self._translate_newlines(stderr, + self.stderr.encoding, + self.stderr.errors) + + return (stdout, stderr) + + + def _save_input(self, input): + # This method is called from the _communicate_with_*() methods + # so that if we time out while communicating, we can continue + # sending input if we retry. + if self.stdin and self._input is None: + self._input_offset = 0 + self._input = input + if input is not None and self.text_mode: + self._input = self._input.encode(self.stdin.encoding, + self.stdin.errors) + + + def send_signal(self, sig): + """Send a signal to the process.""" + # bpo-38630: Polling reduces the risk of sending a signal to the + # wrong process if the process completed, the Popen.returncode + # attribute is still None, and the pid has been reassigned + # (recycled) to a new different process. This race condition can + # happens in two cases. + # + # Case 1. Thread A calls Popen.poll(), thread B calls + # Popen.send_signal(). In thread A, waitpid() succeed and returns + # the exit status. Thread B calls kill() because poll() in thread A + # did not set returncode yet. Calling poll() in thread B prevents + # the race condition thanks to Popen._waitpid_lock. + # + # Case 2. waitpid(pid, 0) has been called directly, without + # using Popen methods: returncode is still None is this case. + # Calling Popen.poll() will set returncode to a default value, + # since waitpid() fails with ProcessLookupError. + self.poll() + if self.returncode is not None: + # Skip signalling a process that we know has already died. + return + + # The race condition can still happen if the race condition + # described above happens between the returncode test + # and the kill() call. + try: + os.kill(self.pid, sig) + except ProcessLookupError: + # Suppress the race condition error; bpo-40550. + pass + + def terminate(self): + """Terminate the process with SIGTERM + """ + self.send_signal(signal.SIGTERM) + + def kill(self): + """Kill the process with SIGKILL + """ + self.send_signal(signal.SIGKILL) diff --git a/Python314_4_x64_Template/Lib/symtable.py b/Python314_4_x64_Template/Lib/symtable.py new file mode 100644 index 00000000..7a30e1ac --- /dev/null +++ b/Python314_4_x64_Template/Lib/symtable.py @@ -0,0 +1,451 @@ +"""Interface to the compiler's internal symbol tables""" + +import _symtable +from _symtable import ( + USE, + DEF_GLOBAL, # noqa: F401 + DEF_NONLOCAL, DEF_LOCAL, + DEF_PARAM, DEF_TYPE_PARAM, DEF_FREE_CLASS, + DEF_IMPORT, DEF_BOUND, DEF_ANNOT, + DEF_COMP_ITER, DEF_COMP_CELL, + SCOPE_OFF, SCOPE_MASK, + FREE, LOCAL, GLOBAL_IMPLICIT, GLOBAL_EXPLICIT, CELL +) + +import weakref +from enum import StrEnum + +__all__ = ["symtable", "SymbolTableType", "SymbolTable", "Class", "Function", "Symbol"] + +def symtable(code, filename, compile_type): + """ Return the toplevel *SymbolTable* for the source code. + + *filename* is the name of the file with the code + and *compile_type* is the *compile()* mode argument. + """ + top = _symtable.symtable(code, filename, compile_type) + return _newSymbolTable(top, filename) + +class SymbolTableFactory: + def __init__(self): + self.__memo = weakref.WeakValueDictionary() + + def new(self, table, filename): + if table.type == _symtable.TYPE_FUNCTION: + return Function(table, filename) + if table.type == _symtable.TYPE_CLASS: + return Class(table, filename) + return SymbolTable(table, filename) + + def __call__(self, table, filename): + key = table, filename + obj = self.__memo.get(key, None) + if obj is None: + obj = self.__memo[key] = self.new(table, filename) + return obj + +_newSymbolTable = SymbolTableFactory() + + +class SymbolTableType(StrEnum): + MODULE = "module" + FUNCTION = "function" + CLASS = "class" + ANNOTATION = "annotation" + TYPE_ALIAS = "type alias" + TYPE_PARAMETERS = "type parameters" + TYPE_VARIABLE = "type variable" + + +class SymbolTable: + + def __init__(self, raw_table, filename): + self._table = raw_table + self._filename = filename + self._symbols = {} + + def __repr__(self): + if self.__class__ == SymbolTable: + kind = "" + else: + kind = "%s " % self.__class__.__name__ + + if self._table.name == "top": + return "<{0}SymbolTable for module {1}>".format(kind, self._filename) + else: + return "<{0}SymbolTable for {1} in {2}>".format(kind, + self._table.name, + self._filename) + + def get_type(self): + """Return the type of the symbol table. + + The value returned is one of the values in + the ``SymbolTableType`` enumeration. + """ + if self._table.type == _symtable.TYPE_MODULE: + return SymbolTableType.MODULE + if self._table.type == _symtable.TYPE_FUNCTION: + return SymbolTableType.FUNCTION + if self._table.type == _symtable.TYPE_CLASS: + return SymbolTableType.CLASS + if self._table.type == _symtable.TYPE_ANNOTATION: + return SymbolTableType.ANNOTATION + if self._table.type == _symtable.TYPE_TYPE_ALIAS: + return SymbolTableType.TYPE_ALIAS + if self._table.type == _symtable.TYPE_TYPE_PARAMETERS: + return SymbolTableType.TYPE_PARAMETERS + if self._table.type == _symtable.TYPE_TYPE_VARIABLE: + return SymbolTableType.TYPE_VARIABLE + assert False, f"unexpected type: {self._table.type}" + + def get_id(self): + """Return an identifier for the table. + """ + return self._table.id + + def get_name(self): + """Return the table's name. + + This corresponds to the name of the class, function + or 'top' if the table is for a class, function or + global respectively. + """ + return self._table.name + + def get_lineno(self): + """Return the number of the first line in the + block for the table. + """ + return self._table.lineno + + def is_optimized(self): + """Return *True* if the locals in the table + are optimizable. + """ + return bool(self._table.type == _symtable.TYPE_FUNCTION) + + def is_nested(self): + """Return *True* if the block is a nested class + or function.""" + return bool(self._table.nested) + + def has_children(self): + """Return *True* if the block has nested namespaces. + """ + return bool(self._table.children) + + def get_identifiers(self): + """Return a view object containing the names of symbols in the table. + """ + return self._table.symbols.keys() + + def lookup(self, name): + """Lookup a *name* in the table. + + Returns a *Symbol* instance. + """ + sym = self._symbols.get(name) + if sym is None: + flags = self._table.symbols[name] + namespaces = self.__check_children(name) + module_scope = (self._table.name == "top") + sym = self._symbols[name] = Symbol(name, flags, namespaces, + module_scope=module_scope) + return sym + + def get_symbols(self): + """Return a list of *Symbol* instances for + names in the table. + """ + return [self.lookup(ident) for ident in self.get_identifiers()] + + def __check_children(self, name): + return [_newSymbolTable(st, self._filename) + for st in self._table.children + if st.name == name] + + def get_children(self): + """Return a list of the nested symbol tables. + """ + return [_newSymbolTable(st, self._filename) + for st in self._table.children] + + +def _get_scope(flags): # like _PyST_GetScope() + return (flags >> SCOPE_OFF) & SCOPE_MASK + + +class Function(SymbolTable): + + # Default values for instance variables + __params = None + __locals = None + __frees = None + __globals = None + __nonlocals = None + + def __idents_matching(self, test_func): + return tuple(ident for ident in self.get_identifiers() + if test_func(self._table.symbols[ident])) + + def get_parameters(self): + """Return a tuple of parameters to the function. + """ + if self.__params is None: + self.__params = self.__idents_matching(lambda x:x & DEF_PARAM) + return self.__params + + def get_locals(self): + """Return a tuple of locals in the function. + """ + if self.__locals is None: + locs = (LOCAL, CELL) + test = lambda x: _get_scope(x) in locs + self.__locals = self.__idents_matching(test) + return self.__locals + + def get_globals(self): + """Return a tuple of globals in the function. + """ + if self.__globals is None: + glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT) + test = lambda x: _get_scope(x) in glob + self.__globals = self.__idents_matching(test) + return self.__globals + + def get_nonlocals(self): + """Return a tuple of nonlocals in the function. + """ + if self.__nonlocals is None: + self.__nonlocals = self.__idents_matching(lambda x:x & DEF_NONLOCAL) + return self.__nonlocals + + def get_frees(self): + """Return a tuple of free variables in the function. + """ + if self.__frees is None: + is_free = lambda x: _get_scope(x) == FREE + self.__frees = self.__idents_matching(is_free) + return self.__frees + + +class Class(SymbolTable): + + __methods = None + + def get_methods(self): + """Return a tuple of methods declared in the class. + """ + import warnings + typename = f'{self.__class__.__module__}.{self.__class__.__name__}' + warnings.warn(f'{typename}.get_methods() is deprecated ' + f'and will be removed in Python 3.16.', + DeprecationWarning, stacklevel=2) + + if self.__methods is None: + d = {} + + def is_local_symbol(ident): + flags = self._table.symbols.get(ident, 0) + return ((flags >> SCOPE_OFF) & SCOPE_MASK) == LOCAL + + for st in self._table.children: + # pick the function-like symbols that are local identifiers + if is_local_symbol(st.name): + match st.type: + case _symtable.TYPE_FUNCTION: + # generators are of type TYPE_FUNCTION with a ".0" + # parameter as a first parameter (which makes them + # distinguishable from a function named 'genexpr') + if st.name == 'genexpr' and '.0' in st.varnames: + continue + d[st.name] = 1 + case _symtable.TYPE_TYPE_PARAMETERS: + # Get the function-def block in the annotation + # scope 'st' with the same identifier, if any. + scope_name = st.name + for c in st.children: + if c.name == scope_name and c.type == _symtable.TYPE_FUNCTION: + # A generic generator of type TYPE_FUNCTION + # cannot be a direct child of 'st' (but it + # can be a descendant), e.g.: + # + # class A: + # type genexpr[genexpr] = (x for x in []) + assert scope_name != 'genexpr' or '.0' not in c.varnames + d[scope_name] = 1 + break + self.__methods = tuple(d) + return self.__methods + + +class Symbol: + + def __init__(self, name, flags, namespaces=None, *, module_scope=False): + self.__name = name + self.__flags = flags + self.__scope = _get_scope(flags) + self.__namespaces = namespaces or () + self.__module_scope = module_scope + + def __repr__(self): + flags_str = '|'.join(self._flags_str()) + return f'' + + def _scope_str(self): + return _scopes_value_to_name.get(self.__scope) or str(self.__scope) + + def _flags_str(self): + for flagname, flagvalue in _flags: + if self.__flags & flagvalue == flagvalue: + yield flagname + + def get_name(self): + """Return a name of a symbol. + """ + return self.__name + + def is_referenced(self): + """Return *True* if the symbol is used in + its block. + """ + return bool(self.__flags & USE) + + def is_parameter(self): + """Return *True* if the symbol is a parameter. + """ + return bool(self.__flags & DEF_PARAM) + + def is_type_parameter(self): + """Return *True* if the symbol is a type parameter. + """ + return bool(self.__flags & DEF_TYPE_PARAM) + + def is_global(self): + """Return *True* if the symbol is global. + """ + return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT) + or (self.__module_scope and self.__flags & DEF_BOUND)) + + def is_nonlocal(self): + """Return *True* if the symbol is nonlocal.""" + return bool(self.__flags & DEF_NONLOCAL) + + def is_declared_global(self): + """Return *True* if the symbol is declared global + with a global statement.""" + return bool(self.__scope == GLOBAL_EXPLICIT) + + def is_local(self): + """Return *True* if the symbol is local. + """ + return bool(self.__scope in (LOCAL, CELL) + or (self.__module_scope and self.__flags & DEF_BOUND)) + + def is_annotated(self): + """Return *True* if the symbol is annotated. + """ + return bool(self.__flags & DEF_ANNOT) + + def is_free(self): + """Return *True* if a referenced symbol is + not assigned to. + """ + return bool(self.__scope == FREE) + + def is_free_class(self): + """Return *True* if a class-scoped symbol is free from + the perspective of a method.""" + return bool(self.__flags & DEF_FREE_CLASS) + + def is_imported(self): + """Return *True* if the symbol is created from + an import statement. + """ + return bool(self.__flags & DEF_IMPORT) + + def is_assigned(self): + """Return *True* if a symbol is assigned to.""" + return bool(self.__flags & DEF_LOCAL) + + def is_comp_iter(self): + """Return *True* if the symbol is a comprehension iteration variable. + """ + return bool(self.__flags & DEF_COMP_ITER) + + def is_comp_cell(self): + """Return *True* if the symbol is a cell in an inlined comprehension. + """ + return bool(self.__flags & DEF_COMP_CELL) + + def is_namespace(self): + """Returns *True* if name binding introduces new namespace. + + If the name is used as the target of a function or class + statement, this will be true. + + Note that a single name can be bound to multiple objects. If + is_namespace() is true, the name may also be bound to other + objects, like an int or list, that does not introduce a new + namespace. + """ + return bool(self.__namespaces) + + def get_namespaces(self): + """Return a list of namespaces bound to this name""" + return self.__namespaces + + def get_namespace(self): + """Return the single namespace bound to this name. + + Raises ValueError if the name is bound to multiple namespaces + or no namespace. + """ + if len(self.__namespaces) == 0: + raise ValueError("name is not bound to any namespaces") + elif len(self.__namespaces) > 1: + raise ValueError("name is bound to multiple namespaces") + else: + return self.__namespaces[0] + + +_flags = [('USE', USE)] +_flags.extend(kv for kv in globals().items() if kv[0].startswith('DEF_')) +_scopes_names = ('FREE', 'LOCAL', 'GLOBAL_IMPLICIT', 'GLOBAL_EXPLICIT', 'CELL') +_scopes_value_to_name = {globals()[n]: n for n in _scopes_names} + + +def main(args): + import sys + def print_symbols(table, level=0): + indent = ' ' * level + nested = "nested " if table.is_nested() else "" + if table.get_type() == 'module': + what = f'from file {table._filename!r}' + else: + what = f'{table.get_name()!r}' + print(f'{indent}symbol table for {nested}{table.get_type()} {what}:') + for ident in table.get_identifiers(): + symbol = table.lookup(ident) + flags = ', '.join(symbol._flags_str()).lower() + print(f' {indent}{symbol._scope_str().lower()} symbol {symbol.get_name()!r}: {flags}') + print() + + for table2 in table.get_children(): + print_symbols(table2, level + 1) + + for filename in args or ['-']: + if filename == '-': + src = sys.stdin.read() + filename = '' + else: + with open(filename, 'rb') as f: + src = f.read() + mod = symtable(src, filename, 'exec') + print_symbols(mod) + + +if __name__ == "__main__": + import sys + main(sys.argv[1:]) diff --git a/Python314_4_x64_Template/Lib/sysconfig/__init__.py b/Python314_4_x64_Template/Lib/sysconfig/__init__.py new file mode 100644 index 00000000..faf8273b --- /dev/null +++ b/Python314_4_x64_Template/Lib/sysconfig/__init__.py @@ -0,0 +1,797 @@ +"""Access to Python's configuration information.""" + +import os +import sys +import threading +from os.path import realpath + +__all__ = [ + 'get_config_h_filename', + 'get_config_var', + 'get_config_vars', + 'get_makefile_filename', + 'get_path', + 'get_path_names', + 'get_paths', + 'get_platform', + 'get_python_version', + 'get_scheme_names', + 'parse_config_h', +] + +# Keys for get_config_var() that are never converted to Python integers. +_ALWAYS_STR = { + 'IPHONEOS_DEPLOYMENT_TARGET', + 'MACOSX_DEPLOYMENT_TARGET', +} + +_INSTALL_SCHEMES = { + 'posix_prefix': { + 'stdlib': '{installed_base}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', + 'platstdlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', + 'purelib': '{base}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', + 'platlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}/site-packages', + 'include': + '{installed_base}/include/{implementation_lower}{py_version_short}{abiflags}', + 'platinclude': + '{installed_platbase}/include/{implementation_lower}{py_version_short}{abiflags}', + 'scripts': '{base}/bin', + 'data': '{base}', + }, + 'posix_home': { + 'stdlib': '{installed_base}/lib/{implementation_lower}', + 'platstdlib': '{base}/lib/{implementation_lower}', + 'purelib': '{base}/lib/{implementation_lower}', + 'platlib': '{base}/lib/{implementation_lower}', + 'include': '{installed_base}/include/{implementation_lower}', + 'platinclude': '{installed_base}/include/{implementation_lower}', + 'scripts': '{base}/bin', + 'data': '{base}', + }, + 'nt': { + 'stdlib': '{installed_base}/Lib', + 'platstdlib': '{base}/Lib', + 'purelib': '{base}/Lib/site-packages', + 'platlib': '{base}/Lib/site-packages', + 'include': '{installed_base}/Include', + 'platinclude': '{installed_base}/Include', + 'scripts': '{base}/Scripts', + 'data': '{base}', + }, + + # Downstream distributors can overwrite the default install scheme. + # This is done to support downstream modifications where distributors change + # the installation layout (eg. different site-packages directory). + # So, distributors will change the default scheme to one that correctly + # represents their layout. + # This presents an issue for projects/people that need to bootstrap virtual + # environments, like virtualenv. As distributors might now be customizing + # the default install scheme, there is no guarantee that the information + # returned by sysconfig.get_default_scheme/get_paths is correct for + # a virtual environment, the only guarantee we have is that it is correct + # for the *current* environment. When bootstrapping a virtual environment, + # we need to know its layout, so that we can place the files in the + # correct locations. + # The "*_venv" install scheme is a scheme to bootstrap virtual environments, + # essentially identical to the default posix_prefix/nt schemes. + # Downstream distributors who patch posix_prefix/nt scheme are encouraged to + # leave the following schemes unchanged + 'posix_venv': { + 'stdlib': '{installed_base}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', + 'platstdlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', + 'purelib': '{base}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', + 'platlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}/site-packages', + 'include': + '{installed_base}/include/{implementation_lower}{py_version_short}{abiflags}', + 'platinclude': + '{installed_platbase}/include/{implementation_lower}{py_version_short}{abiflags}', + 'scripts': '{base}/bin', + 'data': '{base}', + }, + 'nt_venv': { + 'stdlib': '{installed_base}/Lib', + 'platstdlib': '{base}/Lib', + 'purelib': '{base}/Lib/site-packages', + 'platlib': '{base}/Lib/site-packages', + 'include': '{installed_base}/Include', + 'platinclude': '{installed_base}/Include', + 'scripts': '{base}/Scripts', + 'data': '{base}', + }, + } + +# For the OS-native venv scheme, we essentially provide an alias: +if os.name == 'nt': + _INSTALL_SCHEMES['venv'] = _INSTALL_SCHEMES['nt_venv'] +else: + _INSTALL_SCHEMES['venv'] = _INSTALL_SCHEMES['posix_venv'] + +def _get_implementation(): + return 'Python' + +# NOTE: site.py has copy of this function. +# Sync it when modify this function. +def _getuserbase(): + env_base = os.environ.get("PYTHONUSERBASE", None) + if env_base: + return env_base + + # Emscripten, iOS, tvOS, VxWorks, WASI, and watchOS have no home directories. + # Use _PYTHON_HOST_PLATFORM to get the correct platform when cross-compiling. + system_name = os.environ.get('_PYTHON_HOST_PLATFORM', sys.platform).split('-')[0] + if system_name in {"emscripten", "ios", "tvos", "vxworks", "wasi", "watchos"}: + return None + + def joinuser(*args): + return os.path.expanduser(os.path.join(*args)) + + if os.name == "nt": + base = os.environ.get("APPDATA") or "~" + return joinuser(base, _get_implementation()) + + if sys.platform == "darwin" and sys._framework: + return joinuser("~", "Library", sys._framework, + f"{sys.version_info[0]}.{sys.version_info[1]}") + + return joinuser("~", ".local") + +_HAS_USER_BASE = (_getuserbase() is not None) + +if _HAS_USER_BASE: + _INSTALL_SCHEMES |= { + # NOTE: When modifying "purelib" scheme, update site._get_path() too. + 'nt_user': { + 'stdlib': '{userbase}/{implementation}{py_version_nodot_plat}', + 'platstdlib': '{userbase}/{implementation}{py_version_nodot_plat}', + 'purelib': '{userbase}/{implementation}{py_version_nodot_plat}/site-packages', + 'platlib': '{userbase}/{implementation}{py_version_nodot_plat}/site-packages', + 'include': '{userbase}/{implementation}{py_version_nodot_plat}/Include', + 'scripts': '{userbase}/{implementation}{py_version_nodot_plat}/Scripts', + 'data': '{userbase}', + }, + 'posix_user': { + 'stdlib': '{userbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', + 'platstdlib': '{userbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', + 'purelib': '{userbase}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', + 'platlib': '{userbase}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', + 'include': '{userbase}/include/{implementation_lower}{py_version_short}{abi_thread}', + 'scripts': '{userbase}/bin', + 'data': '{userbase}', + }, + 'osx_framework_user': { + 'stdlib': '{userbase}/lib/{implementation_lower}', + 'platstdlib': '{userbase}/lib/{implementation_lower}', + 'purelib': '{userbase}/lib/{implementation_lower}/site-packages', + 'platlib': '{userbase}/lib/{implementation_lower}/site-packages', + 'include': '{userbase}/include/{implementation_lower}{py_version_short}', + 'scripts': '{userbase}/bin', + 'data': '{userbase}', + }, + } + +_SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include', + 'scripts', 'data') + +_PY_VERSION = sys.version.split()[0] +_PY_VERSION_SHORT = f'{sys.version_info[0]}.{sys.version_info[1]}' +_PY_VERSION_SHORT_NO_DOT = f'{sys.version_info[0]}{sys.version_info[1]}' +_BASE_PREFIX = os.path.normpath(sys.base_prefix) +_BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix) +# Mutex guarding initialization of _CONFIG_VARS. +_CONFIG_VARS_LOCK = threading.RLock() +_CONFIG_VARS = None +# True iff _CONFIG_VARS has been fully initialized. +_CONFIG_VARS_INITIALIZED = False +_USER_BASE = None + + +def _safe_realpath(path): + try: + return realpath(path) + except OSError: + return path + +if sys.executable: + _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) +else: + # sys.executable can be empty if argv[0] has been changed and Python is + # unable to retrieve the real program name + _PROJECT_BASE = _safe_realpath(os.getcwd()) + +# In a virtual environment, `sys._home` gives us the target directory +# `_PROJECT_BASE` for the executable that created it when the virtual +# python is an actual executable ('venv --copies' or Windows). +_sys_home = getattr(sys, '_home', None) +if _sys_home: + _PROJECT_BASE = _sys_home + +if os.name == 'nt': + # In a source build, the executable is in a subdirectory of the root + # that we want (\PCbuild\). + # `_BASE_PREFIX` is used as the base installation is where the source + # will be. The realpath is needed to prevent mount point confusion + # that can occur with just string comparisons. + if _safe_realpath(_PROJECT_BASE).startswith( + _safe_realpath(f'{_BASE_PREFIX}\\PCbuild')): + _PROJECT_BASE = _BASE_PREFIX + +# set for cross builds +if "_PYTHON_PROJECT_BASE" in os.environ: + _PROJECT_BASE = _safe_realpath(os.environ["_PYTHON_PROJECT_BASE"]) + +def is_python_build(check_home=None): + if check_home is not None: + import warnings + warnings.warn( + ( + 'The check_home argument of sysconfig.is_python_build is ' + 'deprecated and its value is ignored. ' + 'It will be removed in Python 3.15.' + ), + DeprecationWarning, + stacklevel=2, + ) + for fn in ("Setup", "Setup.local"): + if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): + return True + return False + +_PYTHON_BUILD = is_python_build() + +if _PYTHON_BUILD: + for scheme in ('posix_prefix', 'posix_home'): + # On POSIX-y platforms, Python will: + # - Build from .h files in 'headers' (which is only added to the + # scheme when building CPython) + # - Install .h files to 'include' + scheme = _INSTALL_SCHEMES[scheme] + scheme['headers'] = scheme['include'] + scheme['include'] = '{srcdir}/Include' + scheme['platinclude'] = '{projectbase}/.' + del scheme + + +def _subst_vars(s, local_vars): + try: + return s.format(**local_vars) + except KeyError as var: + try: + return s.format(**os.environ) + except KeyError: + raise AttributeError(f'{var}') from None + +def _extend_dict(target_dict, other_dict): + target_keys = target_dict.keys() + for key, value in other_dict.items(): + if key in target_keys: + continue + target_dict[key] = value + + +def _expand_vars(scheme, vars): + res = {} + if vars is None: + vars = {} + _extend_dict(vars, get_config_vars()) + if os.name == 'nt': + # On Windows we want to substitute 'lib' for schemes rather + # than the native value (without modifying vars, in case it + # was passed in) + vars = vars | {'platlibdir': 'lib'} + + for key, value in _INSTALL_SCHEMES[scheme].items(): + if os.name in ('posix', 'nt'): + value = os.path.expanduser(value) + res[key] = os.path.normpath(_subst_vars(value, vars)) + return res + + +def _get_preferred_schemes(): + if os.name == 'nt': + return { + 'prefix': 'nt', + 'home': 'posix_home', + 'user': 'nt_user', + } + if sys.platform == 'darwin' and sys._framework: + return { + 'prefix': 'posix_prefix', + 'home': 'posix_home', + 'user': 'osx_framework_user', + } + + return { + 'prefix': 'posix_prefix', + 'home': 'posix_home', + 'user': 'posix_user', + } + + +def get_preferred_scheme(key): + if key == 'prefix' and sys.prefix != sys.base_prefix: + return 'venv' + scheme = _get_preferred_schemes()[key] + if scheme not in _INSTALL_SCHEMES: + raise ValueError( + f"{key!r} returned {scheme!r}, which is not a valid scheme " + f"on this platform" + ) + return scheme + + +def get_default_scheme(): + return get_preferred_scheme('prefix') + + +def get_makefile_filename(): + """Return the path of the Makefile.""" + + # GH-127429: When cross-compiling, use the Makefile from the target, instead of the host Python. + if cross_base := os.environ.get('_PYTHON_PROJECT_BASE'): + return os.path.join(cross_base, 'Makefile') + + if _PYTHON_BUILD: + return os.path.join(_PROJECT_BASE, "Makefile") + + if hasattr(sys, 'abiflags'): + config_dir_name = f'config-{_PY_VERSION_SHORT}{sys.abiflags}' + else: + config_dir_name = 'config' + + if hasattr(sys.implementation, '_multiarch'): + config_dir_name += f'-{sys.implementation._multiarch}' + + return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') + + +def _import_from_directory(path, name): + if name not in sys.modules: + import importlib.machinery + import importlib.util + + spec = importlib.machinery.PathFinder.find_spec(name, [path]) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + sys.modules[name] = module + return sys.modules[name] + + +def _get_sysconfigdata_name(): + multiarch = getattr(sys.implementation, '_multiarch', '') + return os.environ.get( + '_PYTHON_SYSCONFIGDATA_NAME', + f'_sysconfigdata_{sys.abiflags}_{sys.platform}_{multiarch}', + ) + + +def _get_sysconfigdata(): + import importlib + + name = _get_sysconfigdata_name() + path = os.environ.get('_PYTHON_SYSCONFIGDATA_PATH') + module = _import_from_directory(path, name) if path else importlib.import_module(name) + + return module.build_time_vars + + +def _installation_is_relocated(): + """Is the Python installation running from a different prefix than what was targetted when building?""" + if os.name != 'posix': + raise NotImplementedError('sysconfig._installation_is_relocated() is currently only supported on POSIX') + + data = _get_sysconfigdata() + return ( + data['prefix'] != getattr(sys, 'base_prefix', '') + or data['exec_prefix'] != getattr(sys, 'base_exec_prefix', '') + ) + + +def _init_posix(vars): + """Initialize the module as appropriate for POSIX systems.""" + # GH-126920: Make sure we don't overwrite any of the keys already set + vars.update(_get_sysconfigdata() | vars) + + +def _init_non_posix(vars): + """Initialize the module as appropriate for NT""" + # set basic install directories + import _winapi + import _sysconfig + vars['LIBDEST'] = get_path('stdlib') + vars['BINLIBDEST'] = get_path('platstdlib') + vars['INCLUDEPY'] = get_path('include') + + # Add EXT_SUFFIX, SOABI, Py_DEBUG, and Py_GIL_DISABLED + vars.update(_sysconfig.config_vars()) + + # NOTE: ABIFLAGS is only an emulated value. It is not present during build + # on Windows. sys.abiflags is absent on Windows and vars['abiflags'] + # is already widely used to calculate paths, so it should remain an + # empty string. + vars['ABIFLAGS'] = ''.join( + ( + 't' if vars['Py_GIL_DISABLED'] else '', + '_d' if vars['Py_DEBUG'] else '', + ), + ) + + vars['LIBDIR'] = _safe_realpath(os.path.join(get_config_var('installed_base'), 'libs')) + if hasattr(sys, 'dllhandle'): + dllhandle = _winapi.GetModuleFileName(sys.dllhandle) + vars['LIBRARY'] = os.path.basename(_safe_realpath(dllhandle)) + vars['LDLIBRARY'] = vars['LIBRARY'] + vars['EXE'] = '.exe' + vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT + vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) + vars['TZPATH'] = '' + +# +# public APIs +# + + +def parse_config_h(fp, vars=None): + """Parse a config.h-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + if vars is None: + vars = {} + import re + define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") + undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") + + while True: + line = fp.readline() + if not line: + break + m = define_rx.match(line) + if m: + n, v = m.group(1, 2) + try: + if n in _ALWAYS_STR: + raise ValueError + v = int(v) + except ValueError: + pass + vars[n] = v + else: + m = undef_rx.match(line) + if m: + vars[m.group(1)] = 0 + return vars + + +def get_config_h_filename(): + """Return the path of pyconfig.h.""" + if _PYTHON_BUILD: + if os.name == "nt": + inc_dir = os.path.join(_PROJECT_BASE, 'PC') + else: + inc_dir = _PROJECT_BASE + else: + inc_dir = get_path('platinclude') + return os.path.join(inc_dir, 'pyconfig.h') + + +def get_scheme_names(): + """Return a tuple containing the schemes names.""" + return tuple(sorted(_INSTALL_SCHEMES)) + + +def get_path_names(): + """Return a tuple containing the paths names.""" + return _SCHEME_KEYS + + +def get_paths(scheme=get_default_scheme(), vars=None, expand=True): + """Return a mapping containing an install scheme. + + ``scheme`` is the install scheme name. If not provided, it will + return the default scheme for the current platform. + """ + if expand: + return _expand_vars(scheme, vars) + else: + return _INSTALL_SCHEMES[scheme] + + +def get_path(name, scheme=get_default_scheme(), vars=None, expand=True): + """Return a path corresponding to the scheme. + + ``scheme`` is the install scheme name. + """ + return get_paths(scheme, vars, expand)[name] + + +def _init_config_vars(): + global _CONFIG_VARS + _CONFIG_VARS = {} + + prefix = os.path.normpath(sys.prefix) + exec_prefix = os.path.normpath(sys.exec_prefix) + base_prefix = _BASE_PREFIX + base_exec_prefix = _BASE_EXEC_PREFIX + + try: + abiflags = sys.abiflags + except AttributeError: + abiflags = '' + + if os.name == 'posix': + _init_posix(_CONFIG_VARS) + # If we are cross-compiling, load the prefixes from the Makefile instead. + if '_PYTHON_PROJECT_BASE' in os.environ: + prefix = _CONFIG_VARS['host_prefix'] + exec_prefix = _CONFIG_VARS['host_exec_prefix'] + base_prefix = _CONFIG_VARS['host_prefix'] + base_exec_prefix = _CONFIG_VARS['host_exec_prefix'] + abiflags = _CONFIG_VARS['ABIFLAGS'] + + # Normalized versions of prefix and exec_prefix are handy to have; + # in fact, these are the standard versions used most places in the + # Distutils. + _CONFIG_VARS['prefix'] = prefix + _CONFIG_VARS['exec_prefix'] = exec_prefix + _CONFIG_VARS['py_version'] = _PY_VERSION + _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT + _CONFIG_VARS['py_version_nodot'] = _PY_VERSION_SHORT_NO_DOT + _CONFIG_VARS['installed_base'] = base_prefix + _CONFIG_VARS['base'] = prefix + _CONFIG_VARS['installed_platbase'] = base_exec_prefix + _CONFIG_VARS['platbase'] = exec_prefix + _CONFIG_VARS['projectbase'] = _PROJECT_BASE + _CONFIG_VARS['platlibdir'] = sys.platlibdir + _CONFIG_VARS['implementation'] = _get_implementation() + _CONFIG_VARS['implementation_lower'] = _get_implementation().lower() + _CONFIG_VARS['abiflags'] = abiflags + try: + _CONFIG_VARS['py_version_nodot_plat'] = sys.winver.replace('.', '') + except AttributeError: + _CONFIG_VARS['py_version_nodot_plat'] = '' + + if os.name == 'nt': + _init_non_posix(_CONFIG_VARS) + _CONFIG_VARS['VPATH'] = sys._vpath + if _HAS_USER_BASE: + # Setting 'userbase' is done below the call to the + # init function to enable using 'get_config_var' in + # the init-function. + _CONFIG_VARS['userbase'] = _getuserbase() + + # e.g., 't' for free-threaded or '' for default build + _CONFIG_VARS['abi_thread'] = 't' if _CONFIG_VARS.get('Py_GIL_DISABLED') else '' + + # Always convert srcdir to an absolute path + srcdir = _CONFIG_VARS.get('srcdir', _PROJECT_BASE) + if os.name == 'posix': + if _PYTHON_BUILD: + # If srcdir is a relative path (typically '.' or '..') + # then it should be interpreted relative to the directory + # containing Makefile. + base = os.path.dirname(get_makefile_filename()) + srcdir = os.path.join(base, srcdir) + else: + # srcdir is not meaningful since the installation is + # spread about the filesystem. We choose the + # directory containing the Makefile since we know it + # exists. + srcdir = os.path.dirname(get_makefile_filename()) + _CONFIG_VARS['srcdir'] = _safe_realpath(srcdir) + + # OS X platforms require special customization to handle + # multi-architecture, multi-os-version installers + if sys.platform == 'darwin': + import _osx_support + _osx_support.customize_config_vars(_CONFIG_VARS) + + global _CONFIG_VARS_INITIALIZED + _CONFIG_VARS_INITIALIZED = True + + +def get_config_vars(*args): + """With no arguments, return a dictionary of all configuration + variables relevant for the current platform. + + On Unix, this means every variable defined in Python's installed Makefile; + On Windows it's a much smaller set. + + With arguments, return a list of values that result from looking up + each argument in the configuration variable dictionary. + """ + global _CONFIG_VARS_INITIALIZED + + # Avoid claiming the lock once initialization is complete. + if _CONFIG_VARS_INITIALIZED: + # GH-126789: If sys.prefix or sys.exec_prefix were updated, invalidate the cache. + prefix = os.path.normpath(sys.prefix) + exec_prefix = os.path.normpath(sys.exec_prefix) + if _CONFIG_VARS['prefix'] != prefix or _CONFIG_VARS['exec_prefix'] != exec_prefix: + with _CONFIG_VARS_LOCK: + _CONFIG_VARS_INITIALIZED = False + _init_config_vars() + else: + # Initialize the config_vars cache. + with _CONFIG_VARS_LOCK: + # Test again with the lock held to avoid races. Note that + # we test _CONFIG_VARS here, not _CONFIG_VARS_INITIALIZED, + # to ensure that recursive calls to get_config_vars() + # don't re-enter init_config_vars(). + if _CONFIG_VARS is None: + _init_config_vars() + + if args: + vals = [] + for name in args: + vals.append(_CONFIG_VARS.get(name)) + return vals + else: + return _CONFIG_VARS + + +def get_config_var(name): + """Return the value of a single variable using the dictionary returned by + 'get_config_vars()'. + + Equivalent to get_config_vars().get(name) + """ + return get_config_vars().get(name) + + +def get_platform(): + """Return a string that identifies the current platform. + + This is used mainly to distinguish platform-specific build directories and + platform-specific built distributions. Typically includes the OS name and + version and the architecture (as supplied by 'os.uname()'), although the + exact information included depends on the OS; on Linux, the kernel version + isn't particularly important. + + Examples of returned values: + + + Windows: + + - win-amd64 (64-bit Windows on AMD64, aka x86_64, Intel64, and EM64T) + - win-arm64 (64-bit Windows on ARM64, aka AArch64) + - win32 (all others - specifically, sys.platform is returned) + + POSIX based OS: + + - linux-x86_64 + - macosx-15.5-arm64 + - macosx-26.0-universal2 (macOS on Apple Silicon or Intel) + - android-24-arm64_v8a + + For other non-POSIX platforms, currently just returns :data:`sys.platform`.""" + if os.name == 'nt': + if 'amd64' in sys.version.lower(): + return 'win-amd64' + if '(arm)' in sys.version.lower(): + return 'win-arm32' + if '(arm64)' in sys.version.lower(): + return 'win-arm64' + return sys.platform + + if os.name != "posix" or not hasattr(os, 'uname'): + # XXX what about the architecture? NT is Intel or Alpha + return sys.platform + + # Set for cross builds explicitly + if "_PYTHON_HOST_PLATFORM" in os.environ: + osname, _, machine = os.environ["_PYTHON_HOST_PLATFORM"].partition('-') + release = None + else: + # Try to distinguish various flavours of Unix + osname, host, release, version, machine = os.uname() + + # Convert the OS name to lowercase, remove '/' characters, and translate + # spaces (for "Power Macintosh") + osname = osname.lower().replace('/', '') + machine = machine.replace(' ', '_') + machine = machine.replace('/', '-') + + if osname == "android" or sys.platform == "android": + osname = "android" + release = get_config_var("ANDROID_API_LEVEL") + + # Wheel tags use the ABI names from Android's own tools. + # When Python is running on 32-bit ARM Android on a 64-bit ARM kernel, + # 'os.uname().machine' is 'armv8l'. Such devices run the same userspace + # code as 'armv7l' devices. + # During the build process of the Android testbed when targeting 32-bit ARM, + # '_PYTHON_HOST_PLATFORM' is 'arm-linux-androideabi', so 'machine' becomes + # 'arm'. + machine = { + "aarch64": "arm64_v8a", + "arm": "armeabi_v7a", + "armv7l": "armeabi_v7a", + "armv8l": "armeabi_v7a", + "i686": "x86", + "x86_64": "x86_64", + }[machine] + elif osname == "linux": + # At least on Linux/Intel, 'machine' is the processor -- + # i386, etc. + # XXX what about Alpha, SPARC, etc? + return f"{osname}-{machine}" + elif osname[:5] == "sunos": + if release[0] >= "5": # SunOS 5 == Solaris 2 + osname = "solaris" + release = f"{int(release[0]) - 3}.{release[2:]}" + # We can't use "platform.architecture()[0]" because a + # bootstrap problem. We use a dict to get an error + # if some suspicious happens. + bitness = {2147483647:"32bit", 9223372036854775807:"64bit"} + machine += f".{bitness[sys.maxsize]}" + # fall through to standard osname-release-machine representation + elif osname[:3] == "aix": + from _aix_support import aix_platform + return aix_platform() + elif osname[:6] == "cygwin": + osname = "cygwin" + import re + rel_re = re.compile(r'[\d.]+') + m = rel_re.match(release) + if m: + release = m.group() + elif osname[:6] == "darwin": + if sys.platform == "ios": + release = get_config_vars().get("IPHONEOS_DEPLOYMENT_TARGET", "13.0") + osname = sys.platform + machine = sys.implementation._multiarch + else: + import _osx_support + osname, release, machine = _osx_support.get_platform_osx( + get_config_vars(), + osname, release, machine) + + return '-'.join(map(str, filter(None, (osname, release, machine)))) + + +def get_python_version(): + return _PY_VERSION_SHORT + + +def _get_python_version_abi(): + return _PY_VERSION_SHORT + get_config_var("abi_thread") + + +def expand_makefile_vars(s, vars): + """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in + 'string' according to 'vars' (a dictionary mapping variable names to + values). Variables not present in 'vars' are silently expanded to the + empty string. The variable values in 'vars' should not contain further + variable expansions; if 'vars' is the output of 'parse_makefile()', + you're fine. Returns a variable-expanded version of 's'. + """ + + import warnings + warnings.warn( + 'sysconfig.expand_makefile_vars is deprecated and will be removed in ' + 'Python 3.16. Use sysconfig.get_paths(vars=...) instead.', + DeprecationWarning, + stacklevel=2, + ) + + import re + + _findvar1_rx = r"\$\(([A-Za-z][A-Za-z0-9_]*)\)" + _findvar2_rx = r"\${([A-Za-z][A-Za-z0-9_]*)}" + + # This algorithm does multiple expansion, so if vars['foo'] contains + # "${bar}", it will expand ${foo} to ${bar}, and then expand + # ${bar}... and so forth. This is fine as long as 'vars' comes from + # 'parse_makefile()', which takes care of such expansions eagerly, + # according to make's variable expansion semantics. + + while True: + m = re.search(_findvar1_rx, s) or re.search(_findvar2_rx, s) + if m: + (beg, end) = m.span() + s = s[0:beg] + vars.get(m.group(1)) + s[end:] + else: + break + return s diff --git a/Python314_4_x64_Template/Lib/sysconfig/__main__.py b/Python314_4_x64_Template/Lib/sysconfig/__main__.py new file mode 100644 index 00000000..bc2197cf --- /dev/null +++ b/Python314_4_x64_Template/Lib/sysconfig/__main__.py @@ -0,0 +1,276 @@ +import json +import os +import sys +import types +from sysconfig import ( + _ALWAYS_STR, + _PYTHON_BUILD, + _get_sysconfigdata_name, + get_config_h_filename, + get_config_var, + get_config_vars, + get_default_scheme, + get_makefile_filename, + get_paths, + get_platform, + get_python_version, + parse_config_h, +) + + +# Regexes needed for parsing Makefile (and similar syntaxes, +# like old-style Setup files). +_variable_rx = r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)" +_findvar1_rx = r"\$\(([A-Za-z][A-Za-z0-9_]*)\)" +_findvar2_rx = r"\${([A-Za-z][A-Za-z0-9_]*)}" + + +def _parse_makefile(filename, vars=None, keep_unresolved=True): + """Parse a Makefile-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + import re + + if vars is None: + vars = {} + done = {} + notdone = {} + + with open(filename, encoding=sys.getfilesystemencoding(), + errors="surrogateescape") as f: + lines = f.readlines() + + for line in lines: + if line.startswith('#') or line.strip() == '': + continue + m = re.match(_variable_rx, line) + if m: + n, v = m.group(1, 2) + v = v.strip() + # `$$' is a literal `$' in make + tmpv = v.replace('$$', '') + + if "$" in tmpv: + notdone[n] = v + else: + try: + if n in _ALWAYS_STR: + raise ValueError + + v = int(v) + except ValueError: + # insert literal `$' + done[n] = v.replace('$$', '$') + else: + done[n] = v + + # do variable interpolation here + variables = list(notdone.keys()) + + # Variables with a 'PY_' prefix in the makefile. These need to + # be made available without that prefix through sysconfig. + # Special care is needed to ensure that variable expansion works, even + # if the expansion uses the name without a prefix. + renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') + + while len(variables) > 0: + for name in tuple(variables): + value = notdone[name] + m1 = re.search(_findvar1_rx, value) + m2 = re.search(_findvar2_rx, value) + if m1 and m2: + m = m1 if m1.start() < m2.start() else m2 + else: + m = m1 if m1 else m2 + if m is not None: + n = m.group(1) + found = True + if n in done: + item = str(done[n]) + elif n in notdone: + # get it on a subsequent round + found = False + elif n in os.environ: + # do it like make: fall back to environment + item = os.environ[n] + + elif n in renamed_variables: + if (name.startswith('PY_') and + name[3:] in renamed_variables): + item = "" + + elif 'PY_' + n in notdone: + found = False + + else: + item = str(done['PY_' + n]) + + else: + done[n] = item = "" + + if found: + after = value[m.end():] + value = value[:m.start()] + item + after + if "$" in after: + notdone[name] = value + else: + try: + if name in _ALWAYS_STR: + raise ValueError + value = int(value) + except ValueError: + done[name] = value.strip() + else: + done[name] = value + variables.remove(name) + + if name.startswith('PY_') \ + and name[3:] in renamed_variables: + + name = name[3:] + if name not in done: + done[name] = value + + else: + # Adds unresolved variables to the done dict. + # This is disabled when called from distutils.sysconfig + if keep_unresolved: + done[name] = value + # bogus variable reference (e.g. "prefix=$/opt/python"); + # just drop it since we can't deal + variables.remove(name) + + # strip spurious spaces + for k, v in done.items(): + if isinstance(v, str): + done[k] = v.strip() + + # save the results in the global dictionary + vars.update(done) + return vars + + +def _print_config_dict(d, stream): + print ("{", file=stream) + for k, v in sorted(d.items()): + print(f" {k!r}: {v!r},", file=stream) + print ("}", file=stream) + + +def _get_pybuilddir(): + pybuilddir = f'build/lib.{get_platform()}-{get_python_version()}' + if get_config_var('Py_DEBUG') == '1': + pybuilddir += '-pydebug' + return pybuilddir + + +def _get_json_data_name(): + name = _get_sysconfigdata_name() + assert name.startswith('_sysconfigdata') + return name.replace('_sysconfigdata', '_sysconfig_vars') + '.json' + + +def _generate_posix_vars(): + """Generate the Python module containing build-time variables.""" + vars = {} + # load the installed Makefile: + makefile = get_makefile_filename() + try: + _parse_makefile(makefile, vars) + except OSError as e: + msg = f"invalid Python installation: unable to open {makefile}" + if hasattr(e, "strerror"): + msg = f"{msg} ({e.strerror})" + raise OSError(msg) + # load the installed pyconfig.h: + config_h = get_config_h_filename() + try: + with open(config_h, encoding="utf-8") as f: + parse_config_h(f, vars) + except OSError as e: + msg = f"invalid Python installation: unable to open {config_h}" + if hasattr(e, "strerror"): + msg = f"{msg} ({e.strerror})" + raise OSError(msg) + # On AIX, there are wrong paths to the linker scripts in the Makefile + # -- these paths are relative to the Python source, but when installed + # the scripts are in another directory. + if _PYTHON_BUILD: + vars['BLDSHARED'] = vars['LDSHARED'] + + name = _get_sysconfigdata_name() + + # There's a chicken-and-egg situation on OS X with regards to the + # _sysconfigdata module after the changes introduced by #15298: + # get_config_vars() is called by get_platform() as part of the + # `make pybuilddir.txt` target -- which is a precursor to the + # _sysconfigdata.py module being constructed. Unfortunately, + # get_config_vars() eventually calls _init_posix(), which attempts + # to import _sysconfigdata, which we won't have built yet. In order + # for _init_posix() to work, if we're on Darwin, just mock up the + # _sysconfigdata module manually and populate it with the build vars. + # This is more than sufficient for ensuring the subsequent call to + # get_platform() succeeds. + # GH-127178: Since we started generating a .json file, we also need this to + # be able to run sysconfig.get_config_vars(). + module = types.ModuleType(name) + module.build_time_vars = vars + sys.modules[name] = module + + pybuilddir = _get_pybuilddir() + os.makedirs(pybuilddir, exist_ok=True) + destfile = os.path.join(pybuilddir, name + '.py') + + with open(destfile, 'w', encoding='utf8') as f: + f.write('# system configuration generated and used by' + ' the sysconfig module\n') + f.write('build_time_vars = ') + _print_config_dict(vars, stream=f) + + print(f'Written {destfile}') + + install_vars = get_config_vars() + # Fix config vars to match the values after install (of the default environment) + install_vars['projectbase'] = install_vars['BINDIR'] + install_vars['srcdir'] = install_vars['LIBPL'] + # Write a JSON file with the output of sysconfig.get_config_vars + jsonfile = os.path.join(pybuilddir, _get_json_data_name()) + with open(jsonfile, 'w') as f: + json.dump(install_vars, f, indent=2) + + print(f'Written {jsonfile}') + + # Create file used for sys.path fixup -- see Modules/getpath.c + with open('pybuilddir.txt', 'w', encoding='utf8') as f: + f.write(pybuilddir) + + +def _print_dict(title, data): + for index, (key, value) in enumerate(sorted(data.items())): + if index == 0: + print(f'{title}: ') + print(f'\t{key} = "{value}"') + + +def _main(): + """Display all information sysconfig detains.""" + if '--generate-posix-vars' in sys.argv: + _generate_posix_vars() + return + print(f'Platform: "{get_platform()}"') + print(f'Python version: "{get_python_version()}"') + print(f'Current installation scheme: "{get_default_scheme()}"') + print() + _print_dict('Paths', get_paths()) + print() + _print_dict('Variables', get_config_vars()) + + +if __name__ == '__main__': + try: + _main() + except BrokenPipeError: + pass diff --git a/Python314_4_x64_Template/Lib/sysconfig/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/sysconfig/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..57d4879c Binary files /dev/null and b/Python314_4_x64_Template/Lib/sysconfig/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/tabnanny.py b/Python314_4_x64_Template/Lib/tabnanny.py new file mode 100644 index 00000000..c0097351 --- /dev/null +++ b/Python314_4_x64_Template/Lib/tabnanny.py @@ -0,0 +1,338 @@ +"""The Tab Nanny despises ambiguous indentation. She knows no mercy. + +tabnanny -- Detection of ambiguous indentation + +For the time being this module is intended to be called as a script. +However it is possible to import it into an IDE and use the function +check() described below. + +Warning: The API provided by this module is likely to change in future +releases; such changes may not be backward compatible. +""" + +# Released to the public domain, by Tim Peters, 15 April 1998. + +# XXX Note: this is now a standard library module. +# XXX The API needs to undergo changes however; the current code is too +# XXX script-like. This will be addressed later. + +__version__ = "6" + +import os +import sys +import tokenize + +__all__ = ["check", "NannyNag", "process_tokens"] + +verbose = 0 +filename_only = 0 + +def errprint(*args): + sep = "" + for arg in args: + sys.stderr.write(sep + str(arg)) + sep = " " + sys.stderr.write("\n") + sys.exit(1) + +def main(): + import getopt + + global verbose, filename_only + try: + opts, args = getopt.getopt(sys.argv[1:], "qv") + except getopt.error as msg: + errprint(msg) + for o, a in opts: + if o == '-q': + filename_only = filename_only + 1 + if o == '-v': + verbose = verbose + 1 + if not args: + errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...") + for arg in args: + check(arg) + +class NannyNag(Exception): + """ + Raised by process_tokens() if detecting an ambiguous indent. + Captured and handled in check(). + """ + def __init__(self, lineno, msg, line): + self.lineno, self.msg, self.line = lineno, msg, line + def get_lineno(self): + return self.lineno + def get_msg(self): + return self.msg + def get_line(self): + return self.line + +def check(file): + """check(file_or_dir) + + If file_or_dir is a directory and not a symbolic link, then recursively + descend the directory tree named by file_or_dir, checking all .py files + along the way. If file_or_dir is an ordinary Python source file, it is + checked for whitespace related problems. The diagnostic messages are + written to standard output using the print statement. + """ + + if os.path.isdir(file) and not os.path.islink(file): + if verbose: + print("%r: listing directory" % (file,)) + names = os.listdir(file) + for name in names: + fullname = os.path.join(file, name) + if (os.path.isdir(fullname) and + not os.path.islink(fullname) or + os.path.normcase(name[-3:]) == ".py"): + check(fullname) + return + + try: + f = tokenize.open(file) + except OSError as msg: + errprint("%r: I/O Error: %s" % (file, msg)) + return + + if verbose > 1: + print("checking %r ..." % file) + + try: + process_tokens(tokenize.generate_tokens(f.readline)) + + except tokenize.TokenError as msg: + errprint("%r: Token Error: %s" % (file, msg)) + return + + except IndentationError as msg: + errprint("%r: Indentation Error: %s" % (file, msg)) + return + + except SyntaxError as msg: + errprint("%r: Syntax Error: %s" % (file, msg)) + return + + except NannyNag as nag: + badline = nag.get_lineno() + line = nag.get_line() + if verbose: + print("%r: *** Line %d: trouble in tab city! ***" % (file, badline)) + print("offending line: %r" % (line,)) + print(nag.get_msg()) + else: + if ' ' in file: file = '"' + file + '"' + if filename_only: print(file) + else: print(file, badline, repr(line)) + return + + finally: + f.close() + + if verbose: + print("%r: Clean bill of health." % (file,)) + +class Whitespace: + # the characters used for space and tab + S, T = ' \t' + + # members: + # raw + # the original string + # n + # the number of leading whitespace characters in raw + # nt + # the number of tabs in raw[:n] + # norm + # the normal form as a pair (count, trailing), where: + # count + # a tuple such that raw[:n] contains count[i] + # instances of S * i + T + # trailing + # the number of trailing spaces in raw[:n] + # It's A Theorem that m.indent_level(t) == + # n.indent_level(t) for all t >= 1 iff m.norm == n.norm. + # is_simple + # true iff raw[:n] is of the form (T*)(S*) + + def __init__(self, ws): + self.raw = ws + S, T = Whitespace.S, Whitespace.T + count = [] + b = n = nt = 0 + for ch in self.raw: + if ch == S: + n = n + 1 + b = b + 1 + elif ch == T: + n = n + 1 + nt = nt + 1 + if b >= len(count): + count = count + [0] * (b - len(count) + 1) + count[b] = count[b] + 1 + b = 0 + else: + break + self.n = n + self.nt = nt + self.norm = tuple(count), b + self.is_simple = len(count) <= 1 + + # return length of longest contiguous run of spaces (whether or not + # preceding a tab) + def longest_run_of_spaces(self): + count, trailing = self.norm + return max(len(count)-1, trailing) + + def indent_level(self, tabsize): + # count, il = self.norm + # for i in range(len(count)): + # if count[i]: + # il = il + (i//tabsize + 1)*tabsize * count[i] + # return il + + # quicker: + # il = trailing + sum (i//ts + 1)*ts*count[i] = + # trailing + ts * sum (i//ts + 1)*count[i] = + # trailing + ts * sum i//ts*count[i] + count[i] = + # trailing + ts * [(sum i//ts*count[i]) + (sum count[i])] = + # trailing + ts * [(sum i//ts*count[i]) + num_tabs] + # and note that i//ts*count[i] is 0 when i < ts + + count, trailing = self.norm + il = 0 + for i in range(tabsize, len(count)): + il = il + i//tabsize * count[i] + return trailing + tabsize * (il + self.nt) + + # return true iff self.indent_level(t) == other.indent_level(t) + # for all t >= 1 + def equal(self, other): + return self.norm == other.norm + + # return a list of tuples (ts, i1, i2) such that + # i1 == self.indent_level(ts) != other.indent_level(ts) == i2. + # Intended to be used after not self.equal(other) is known, in which + # case it will return at least one witnessing tab size. + def not_equal_witness(self, other): + n = max(self.longest_run_of_spaces(), + other.longest_run_of_spaces()) + 1 + a = [] + for ts in range(1, n+1): + if self.indent_level(ts) != other.indent_level(ts): + a.append( (ts, + self.indent_level(ts), + other.indent_level(ts)) ) + return a + + # Return True iff self.indent_level(t) < other.indent_level(t) + # for all t >= 1. + # The algorithm is due to Vincent Broman. + # Easy to prove it's correct. + # XXXpost that. + # Trivial to prove n is sharp (consider T vs ST). + # Unknown whether there's a faster general way. I suspected so at + # first, but no longer. + # For the special (but common!) case where M and N are both of the + # form (T*)(S*), M.less(N) iff M.len() < N.len() and + # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded. + # XXXwrite that up. + # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1. + def less(self, other): + if self.n >= other.n: + return False + if self.is_simple and other.is_simple: + return self.nt <= other.nt + n = max(self.longest_run_of_spaces(), + other.longest_run_of_spaces()) + 1 + # the self.n >= other.n test already did it for ts=1 + for ts in range(2, n+1): + if self.indent_level(ts) >= other.indent_level(ts): + return False + return True + + # return a list of tuples (ts, i1, i2) such that + # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2. + # Intended to be used after not self.less(other) is known, in which + # case it will return at least one witnessing tab size. + def not_less_witness(self, other): + n = max(self.longest_run_of_spaces(), + other.longest_run_of_spaces()) + 1 + a = [] + for ts in range(1, n+1): + if self.indent_level(ts) >= other.indent_level(ts): + a.append( (ts, + self.indent_level(ts), + other.indent_level(ts)) ) + return a + +def format_witnesses(w): + firsts = (str(tup[0]) for tup in w) + prefix = "at tab size" + if len(w) > 1: + prefix = prefix + "s" + return prefix + " " + ', '.join(firsts) + +def process_tokens(tokens): + try: + _process_tokens(tokens) + except TabError as e: + raise NannyNag(e.lineno, e.msg, e.text) + +def _process_tokens(tokens): + INDENT = tokenize.INDENT + DEDENT = tokenize.DEDENT + NEWLINE = tokenize.NEWLINE + JUNK = tokenize.COMMENT, tokenize.NL + indents = [Whitespace("")] + check_equal = 0 + + for (type, token, start, end, line) in tokens: + if type == NEWLINE: + # a program statement, or ENDMARKER, will eventually follow, + # after some (possibly empty) run of tokens of the form + # (NL | COMMENT)* (INDENT | DEDENT+)? + # If an INDENT appears, setting check_equal is wrong, and will + # be undone when we see the INDENT. + check_equal = 1 + + elif type == INDENT: + check_equal = 0 + thisguy = Whitespace(token) + if not indents[-1].less(thisguy): + witness = indents[-1].not_less_witness(thisguy) + msg = "indent not greater e.g. " + format_witnesses(witness) + raise NannyNag(start[0], msg, line) + indents.append(thisguy) + + elif type == DEDENT: + # there's nothing we need to check here! what's important is + # that when the run of DEDENTs ends, the indentation of the + # program statement (or ENDMARKER) that triggered the run is + # equal to what's left at the top of the indents stack + + # Ouch! This assert triggers if the last line of the source + # is indented *and* lacks a newline -- then DEDENTs pop out + # of thin air. + # assert check_equal # else no earlier NEWLINE, or an earlier INDENT + check_equal = 1 + + del indents[-1] + + elif check_equal and type not in JUNK: + # this is the first "real token" following a NEWLINE, so it + # must be the first token of the next program statement, or an + # ENDMARKER; the "line" argument exposes the leading whitespace + # for this statement; in the case of ENDMARKER, line is an empty + # string, so will properly match the empty string with which the + # "indents" stack was seeded + check_equal = 0 + thisguy = Whitespace(line) + if not indents[-1].equal(thisguy): + witness = indents[-1].not_equal_witness(thisguy) + msg = "indent not equal e.g. " + format_witnesses(witness) + raise NannyNag(start[0], msg, line) + + +if __name__ == '__main__': + main() diff --git a/Python314_4_x64_Template/Lib/tarfile.py b/Python314_4_x64_Template/Lib/tarfile.py new file mode 100644 index 00000000..414aefe9 --- /dev/null +++ b/Python314_4_x64_Template/Lib/tarfile.py @@ -0,0 +1,3157 @@ +#------------------------------------------------------------------- +# tarfile.py +#------------------------------------------------------------------- +# Copyright (C) 2002 Lars Gustaebel +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +"""Read from and write to tar format archives. +""" + +version = "0.9.0" +__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" +__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." + +#--------- +# Imports +#--------- +from builtins import open as bltn_open +import sys +import os +import io +import shutil +import stat +import time +import struct +import copy +import re + +try: + import pwd +except ImportError: + pwd = None +try: + import grp +except ImportError: + grp = None + +# os.symlink on Windows prior to 6.0 raises NotImplementedError +# OSError (winerror=1314) will be raised if the caller does not hold the +# SeCreateSymbolicLinkPrivilege privilege +symlink_exception = (AttributeError, NotImplementedError, OSError) + +# from tarfile import * +__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError", "ReadError", + "CompressionError", "StreamError", "ExtractError", "HeaderError", + "ENCODING", "USTAR_FORMAT", "GNU_FORMAT", "PAX_FORMAT", + "DEFAULT_FORMAT", "open","fully_trusted_filter", "data_filter", + "tar_filter", "FilterError", "AbsoluteLinkError", + "OutsideDestinationError", "SpecialFileError", "AbsolutePathError", + "LinkOutsideDestinationError", "LinkFallbackError"] + + +#--------------------------------------------------------- +# tar constants +#--------------------------------------------------------- +NUL = b"\0" # the null character +BLOCKSIZE = 512 # length of processing blocks +RECORDSIZE = BLOCKSIZE * 20 # length of records +GNU_MAGIC = b"ustar \0" # magic gnu tar string +POSIX_MAGIC = b"ustar\x0000" # magic posix tar string + +LENGTH_NAME = 100 # maximum length of a filename +LENGTH_LINK = 100 # maximum length of a linkname +LENGTH_PREFIX = 155 # maximum length of the prefix field + +REGTYPE = b"0" # regular file +AREGTYPE = b"\0" # regular file +LNKTYPE = b"1" # link (inside tarfile) +SYMTYPE = b"2" # symbolic link +CHRTYPE = b"3" # character special device +BLKTYPE = b"4" # block special device +DIRTYPE = b"5" # directory +FIFOTYPE = b"6" # fifo special device +CONTTYPE = b"7" # contiguous file + +GNUTYPE_LONGNAME = b"L" # GNU tar longname +GNUTYPE_LONGLINK = b"K" # GNU tar longlink +GNUTYPE_SPARSE = b"S" # GNU tar sparse file + +XHDTYPE = b"x" # POSIX.1-2001 extended header +XGLTYPE = b"g" # POSIX.1-2001 global header +SOLARIS_XHDTYPE = b"X" # Solaris extended header + +USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format +GNU_FORMAT = 1 # GNU tar format +PAX_FORMAT = 2 # POSIX.1-2001 (pax) format +DEFAULT_FORMAT = PAX_FORMAT + +#--------------------------------------------------------- +# tarfile constants +#--------------------------------------------------------- +# File types that tarfile supports: +SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, + SYMTYPE, DIRTYPE, FIFOTYPE, + CONTTYPE, CHRTYPE, BLKTYPE, + GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# File types that will be treated as a regular file. +REGULAR_TYPES = (REGTYPE, AREGTYPE, + CONTTYPE, GNUTYPE_SPARSE) + +# File types that are part of the GNU tar format. +GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# Fields from a pax header that override a TarInfo attribute. +PAX_FIELDS = ("path", "linkpath", "size", "mtime", + "uid", "gid", "uname", "gname") + +# Fields from a pax header that are affected by hdrcharset. +PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"} + +# Fields in a pax header that are numbers, all other fields +# are treated as strings. +PAX_NUMBER_FIELDS = { + "atime": float, + "ctime": float, + "mtime": float, + "uid": int, + "gid": int, + "size": int +} + +#--------------------------------------------------------- +# initialization +#--------------------------------------------------------- +if os.name == "nt": + ENCODING = "utf-8" +else: + ENCODING = sys.getfilesystemencoding() + +#--------------------------------------------------------- +# Some useful functions +#--------------------------------------------------------- + +def stn(s, length, encoding, errors): + """Convert a string to a null-terminated bytes object. + """ + if s is None: + raise ValueError("metadata cannot contain None") + s = s.encode(encoding, errors) + return s[:length] + (length - len(s)) * NUL + +def nts(s, encoding, errors): + """Convert a null-terminated bytes object to a string. + """ + p = s.find(b"\0") + if p != -1: + s = s[:p] + return s.decode(encoding, errors) + +def nti(s): + """Convert a number field to a python number. + """ + # There are two possible encodings for a number field, see + # itn() below. + if s[0] in (0o200, 0o377): + n = 0 + for i in range(len(s) - 1): + n <<= 8 + n += s[i + 1] + if s[0] == 0o377: + n = -(256 ** (len(s) - 1) - n) + else: + try: + s = nts(s, "ascii", "strict") + n = int(s.strip() or "0", 8) + except ValueError: + raise InvalidHeaderError("invalid header") + return n + +def itn(n, digits=8, format=DEFAULT_FORMAT): + """Convert a python number to a number field. + """ + # POSIX 1003.1-1988 requires numbers to be encoded as a string of + # octal digits followed by a null-byte, this allows values up to + # (8**(digits-1))-1. GNU tar allows storing numbers greater than + # that if necessary. A leading 0o200 or 0o377 byte indicate this + # particular encoding, the following digits-1 bytes are a big-endian + # base-256 representation. This allows values up to (256**(digits-1))-1. + # A 0o200 byte indicates a positive number, a 0o377 byte a negative + # number. + original_n = n + n = int(n) + if 0 <= n < 8 ** (digits - 1): + s = bytes("%0*o" % (digits - 1, n), "ascii") + NUL + elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1): + if n >= 0: + s = bytearray([0o200]) + else: + s = bytearray([0o377]) + n = 256 ** digits + n + + for i in range(digits - 1): + s.insert(1, n & 0o377) + n >>= 8 + else: + raise ValueError("overflow in number field") + + return s + +def calc_chksums(buf): + """Calculate the checksum for a member's header by summing up all + characters except for the chksum field which is treated as if + it was filled with spaces. According to the GNU tar sources, + some tars (Sun and NeXT) calculate chksum with signed char, + which will be different if there are chars in the buffer with + the high bit set. So we calculate two checksums, unsigned and + signed. + """ + unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf)) + signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf)) + return unsigned_chksum, signed_chksum + +def copyfileobj(src, dst, length=None, exception=OSError, bufsize=None): + """Copy length bytes from fileobj src to fileobj dst. + If length is None, copy the entire content. + """ + bufsize = bufsize or 16 * 1024 + if length == 0: + return + if length is None: + shutil.copyfileobj(src, dst, bufsize) + return + + blocks, remainder = divmod(length, bufsize) + for b in range(blocks): + buf = src.read(bufsize) + if len(buf) < bufsize: + raise exception("unexpected end of data") + dst.write(buf) + + if remainder != 0: + buf = src.read(remainder) + if len(buf) < remainder: + raise exception("unexpected end of data") + dst.write(buf) + return + +def _safe_print(s): + encoding = getattr(sys.stdout, 'encoding', None) + if encoding is not None: + s = s.encode(encoding, 'backslashreplace').decode(encoding) + print(s, end=' ') + + +class TarError(Exception): + """Base exception.""" + pass +class ExtractError(TarError): + """General exception for extract errors.""" + pass +class ReadError(TarError): + """Exception for unreadable tar archives.""" + pass +class CompressionError(TarError): + """Exception for unavailable compression methods.""" + pass +class StreamError(TarError): + """Exception for unsupported operations on stream-like TarFiles.""" + pass +class HeaderError(TarError): + """Base exception for header errors.""" + pass +class EmptyHeaderError(HeaderError): + """Exception for empty headers.""" + pass +class TruncatedHeaderError(HeaderError): + """Exception for truncated headers.""" + pass +class EOFHeaderError(HeaderError): + """Exception for end of file headers.""" + pass +class InvalidHeaderError(HeaderError): + """Exception for invalid headers.""" + pass +class SubsequentHeaderError(HeaderError): + """Exception for missing and invalid extended headers.""" + pass + +#--------------------------- +# internal stream interface +#--------------------------- +class _LowLevelFile: + """Low-level file object. Supports reading and writing. + It is used instead of a regular file object for streaming + access. + """ + + def __init__(self, name, mode): + mode = { + "r": os.O_RDONLY, + "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, + }[mode] + if hasattr(os, "O_BINARY"): + mode |= os.O_BINARY + self.fd = os.open(name, mode, 0o666) + + def close(self): + os.close(self.fd) + + def read(self, size): + return os.read(self.fd, size) + + def write(self, s): + os.write(self.fd, s) + +class _Stream: + """Class that serves as an adapter between TarFile and + a stream-like object. The stream-like object only + needs to have a read() or write() method that works with bytes, + and the method is accessed blockwise. + Use of gzip or bzip2 compression is possible. + A stream-like object could be for example: sys.stdin.buffer, + sys.stdout.buffer, a socket, a tape device etc. + + _Stream is intended to be used only internally. + """ + + def __init__(self, name, mode, comptype, fileobj, bufsize, + compresslevel, preset): + """Construct a _Stream object. + """ + self._extfileobj = True + if fileobj is None: + fileobj = _LowLevelFile(name, mode) + self._extfileobj = False + + if comptype == '*': + # Enable transparent compression detection for the + # stream interface + fileobj = _StreamProxy(fileobj) + comptype = fileobj.getcomptype() + + self.name = os.fspath(name) if name is not None else "" + self.mode = mode + self.comptype = comptype + self.fileobj = fileobj + self.bufsize = bufsize + self.buf = b"" + self.pos = 0 + self.closed = False + + try: + if comptype == "gz": + try: + import zlib + except ImportError: + raise CompressionError("zlib module is not available") from None + self.zlib = zlib + self.crc = zlib.crc32(b"") + if mode == "r": + self.exception = zlib.error + self._init_read_gz() + else: + self._init_write_gz(compresslevel) + + elif comptype == "bz2": + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") from None + if mode == "r": + self.dbuf = b"" + self.cmp = bz2.BZ2Decompressor() + self.exception = OSError + else: + self.cmp = bz2.BZ2Compressor(compresslevel) + + elif comptype == "xz": + try: + import lzma + except ImportError: + raise CompressionError("lzma module is not available") from None + if mode == "r": + self.dbuf = b"" + self.cmp = lzma.LZMADecompressor() + self.exception = lzma.LZMAError + else: + self.cmp = lzma.LZMACompressor(preset=preset) + elif comptype == "zst": + try: + from compression import zstd + except ImportError: + raise CompressionError("compression.zstd module is not available") from None + if mode == "r": + self.dbuf = b"" + self.cmp = zstd.ZstdDecompressor() + self.exception = zstd.ZstdError + else: + self.cmp = zstd.ZstdCompressor() + elif comptype != "tar": + raise CompressionError("unknown compression type %r" % comptype) + + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + def __del__(self): + if hasattr(self, "closed") and not self.closed: + self.close() + + def _init_write_gz(self, compresslevel): + """Initialize for writing with gzip compression. + """ + self.cmp = self.zlib.compressobj(compresslevel, + self.zlib.DEFLATED, + -self.zlib.MAX_WBITS, + self.zlib.DEF_MEM_LEVEL, + 0) + timestamp = struct.pack(" self.bufsize: + self.fileobj.write(self.buf[:self.bufsize]) + self.buf = self.buf[self.bufsize:] + + def close(self): + """Close the _Stream object. No operation should be + done on it afterwards. + """ + if self.closed: + return + + self.closed = True + try: + if self.mode == "w" and self.comptype != "tar": + self.buf += self.cmp.flush() + + if self.mode == "w" and self.buf: + self.fileobj.write(self.buf) + self.buf = b"" + if self.comptype == "gz": + self.fileobj.write(struct.pack("= 0: + blocks, remainder = divmod(pos - self.pos, self.bufsize) + for i in range(blocks): + self.read(self.bufsize) + self.read(remainder) + else: + raise StreamError("seeking backwards is not allowed") + return self.pos + + def read(self, size): + """Return the next size number of bytes from the stream.""" + assert size is not None + buf = self._read(size) + self.pos += len(buf) + return buf + + def _read(self, size): + """Return size bytes from the stream. + """ + if self.comptype == "tar": + return self.__read(size) + + c = len(self.dbuf) + t = [self.dbuf] + while c < size: + # Skip underlying buffer to avoid unaligned double buffering. + if self.buf: + buf = self.buf + self.buf = b"" + else: + buf = self.fileobj.read(self.bufsize) + if not buf: + break + try: + buf = self.cmp.decompress(buf) + except self.exception as e: + raise ReadError("invalid compressed data") from e + t.append(buf) + c += len(buf) + t = b"".join(t) + self.dbuf = t[size:] + return t[:size] + + def __read(self, size): + """Return size bytes from stream. If internal buffer is empty, + read another block from the stream. + """ + c = len(self.buf) + t = [self.buf] + while c < size: + buf = self.fileobj.read(self.bufsize) + if not buf: + break + t.append(buf) + c += len(buf) + t = b"".join(t) + self.buf = t[size:] + return t[:size] +# class _Stream + +class _StreamProxy(object): + """Small proxy class that enables transparent compression + detection for the Stream interface (mode 'r|*'). + """ + + def __init__(self, fileobj): + self.fileobj = fileobj + self.buf = self.fileobj.read(BLOCKSIZE) + + def read(self, size): + self.read = self.fileobj.read + return self.buf + + def getcomptype(self): + if self.buf.startswith(b"\x1f\x8b\x08"): + return "gz" + elif self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY": + return "bz2" + elif self.buf.startswith((b"\x5d\x00\x00\x80", b"\xfd7zXZ")): + return "xz" + elif self.buf.startswith(b"\x28\xb5\x2f\xfd"): + return "zst" + else: + return "tar" + + def close(self): + self.fileobj.close() +# class StreamProxy + +#------------------------ +# Extraction file object +#------------------------ +class _FileInFile(object): + """A thin wrapper around an existing file object that + provides a part of its data as an individual file + object. + """ + + def __init__(self, fileobj, offset, size, name, blockinfo=None): + self.fileobj = fileobj + self.offset = offset + self.size = size + self.position = 0 + self.name = name + self.closed = False + + if blockinfo is None: + blockinfo = [(0, size)] + + # Construct a map with data and zero blocks. + self.map_index = 0 + self.map = [] + lastpos = 0 + realpos = self.offset + for offset, size in blockinfo: + if offset > lastpos: + self.map.append((False, lastpos, offset, None)) + self.map.append((True, offset, offset + size, realpos)) + realpos += size + lastpos = offset + size + if lastpos < self.size: + self.map.append((False, lastpos, self.size, None)) + + def flush(self): + pass + + @property + def mode(self): + return 'rb' + + def readable(self): + return True + + def writable(self): + return False + + def seekable(self): + return self.fileobj.seekable() + + def tell(self): + """Return the current file position. + """ + return self.position + + def seek(self, position, whence=io.SEEK_SET): + """Seek to a position in the file. + """ + if whence == io.SEEK_SET: + self.position = min(max(position, 0), self.size) + elif whence == io.SEEK_CUR: + if position < 0: + self.position = max(self.position + position, 0) + else: + self.position = min(self.position + position, self.size) + elif whence == io.SEEK_END: + self.position = max(min(self.size + position, self.size), 0) + else: + raise ValueError("Invalid argument") + return self.position + + def read(self, size=None): + """Read data from the file. + """ + if size is None: + size = self.size - self.position + else: + size = min(size, self.size - self.position) + + buf = b"" + while size > 0: + while True: + data, start, stop, offset = self.map[self.map_index] + if start <= self.position < stop: + break + else: + self.map_index += 1 + if self.map_index == len(self.map): + self.map_index = 0 + length = min(size, stop - self.position) + if data: + self.fileobj.seek(offset + (self.position - start)) + b = self.fileobj.read(length) + if len(b) != length: + raise ReadError("unexpected end of data") + buf += b + else: + buf += NUL * length + size -= length + self.position += length + return buf + + def readinto(self, b): + buf = self.read(len(b)) + b[:len(buf)] = buf + return len(buf) + + def close(self): + self.closed = True +#class _FileInFile + +class ExFileObject(io.BufferedReader): + + def __init__(self, tarfile, tarinfo): + fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data, + tarinfo.size, tarinfo.name, tarinfo.sparse) + super().__init__(fileobj) +#class ExFileObject + + +#----------------------------- +# extraction filters (PEP 706) +#----------------------------- + +class FilterError(TarError): + pass + +class AbsolutePathError(FilterError): + def __init__(self, tarinfo): + self.tarinfo = tarinfo + super().__init__(f'member {tarinfo.name!r} has an absolute path') + +class OutsideDestinationError(FilterError): + def __init__(self, tarinfo, path): + self.tarinfo = tarinfo + self._path = path + super().__init__(f'{tarinfo.name!r} would be extracted to {path!r}, ' + + 'which is outside the destination') + +class SpecialFileError(FilterError): + def __init__(self, tarinfo): + self.tarinfo = tarinfo + super().__init__(f'{tarinfo.name!r} is a special file') + +class AbsoluteLinkError(FilterError): + def __init__(self, tarinfo): + self.tarinfo = tarinfo + super().__init__(f'{tarinfo.name!r} is a link to an absolute path') + +class LinkOutsideDestinationError(FilterError): + def __init__(self, tarinfo, path): + self.tarinfo = tarinfo + self._path = path + super().__init__(f'{tarinfo.name!r} would link to {path!r}, ' + + 'which is outside the destination') + +class LinkFallbackError(FilterError): + def __init__(self, tarinfo, path): + self.tarinfo = tarinfo + self._path = path + super().__init__(f'link {tarinfo.name!r} would be extracted as a ' + + f'copy of {path!r}, which was rejected') + +# Errors caused by filters -- both "fatal" and "non-fatal" -- that +# we consider to be issues with the argument, rather than a bug in the +# filter function +_FILTER_ERRORS = (FilterError, OSError, ExtractError) + +def _get_filtered_attrs(member, dest_path, for_data=True): + new_attrs = {} + name = member.name + dest_path = os.path.realpath(dest_path, strict=os.path.ALLOW_MISSING) + # Strip leading / (tar's directory separator) from filenames. + # Include os.sep (target OS directory separator) as well. + if name.startswith(('/', os.sep)): + name = new_attrs['name'] = member.path.lstrip('/' + os.sep) + if os.path.isabs(name): + # Path is absolute even after stripping. + # For example, 'C:/foo' on Windows. + raise AbsolutePathError(member) + # Ensure we stay in the destination + target_path = os.path.realpath(os.path.join(dest_path, name), + strict=os.path.ALLOW_MISSING) + if os.path.commonpath([target_path, dest_path]) != dest_path: + raise OutsideDestinationError(member, target_path) + # Limit permissions (no high bits, and go-w) + mode = member.mode + if mode is not None: + # Strip high bits & group/other write bits + mode = mode & 0o755 + if for_data: + # For data, handle permissions & file types + if member.isreg() or member.islnk(): + if not mode & 0o100: + # Clear executable bits if not executable by user + mode &= ~0o111 + # Ensure owner can read & write + mode |= 0o600 + elif member.isdir() or member.issym(): + # Ignore mode for directories & symlinks + mode = None + else: + # Reject special files + raise SpecialFileError(member) + if mode != member.mode: + new_attrs['mode'] = mode + if for_data: + # Ignore ownership for 'data' + if member.uid is not None: + new_attrs['uid'] = None + if member.gid is not None: + new_attrs['gid'] = None + if member.uname is not None: + new_attrs['uname'] = None + if member.gname is not None: + new_attrs['gname'] = None + # Check link destination for 'data' + if member.islnk() or member.issym(): + if os.path.isabs(member.linkname): + raise AbsoluteLinkError(member) + normalized = os.path.normpath(member.linkname) + if normalized != member.linkname: + new_attrs['linkname'] = normalized + if member.issym(): + target_path = os.path.join(dest_path, + os.path.dirname(name), + member.linkname) + else: + target_path = os.path.join(dest_path, + member.linkname) + target_path = os.path.realpath(target_path, + strict=os.path.ALLOW_MISSING) + if os.path.commonpath([target_path, dest_path]) != dest_path: + raise LinkOutsideDestinationError(member, target_path) + return new_attrs + +def fully_trusted_filter(member, dest_path): + return member + +def tar_filter(member, dest_path): + new_attrs = _get_filtered_attrs(member, dest_path, False) + if new_attrs: + return member.replace(**new_attrs, deep=False) + return member + +def data_filter(member, dest_path): + new_attrs = _get_filtered_attrs(member, dest_path, True) + if new_attrs: + return member.replace(**new_attrs, deep=False) + return member + +_NAMED_FILTERS = { + "fully_trusted": fully_trusted_filter, + "tar": tar_filter, + "data": data_filter, +} + +#------------------ +# Exported Classes +#------------------ + +# Sentinel for replace() defaults, meaning "don't change the attribute" +_KEEP = object() + +# Header length is digits followed by a space. +_header_length_prefix_re = re.compile(br"([0-9]{1,20}) ") + +class TarInfo(object): + """Informational class which holds the details about an + archive member given by a tar header block. + TarInfo objects are returned by TarFile.getmember(), + TarFile.getmembers() and TarFile.gettarinfo() and are + usually created internally. + """ + + __slots__ = dict( + name = 'Name of the archive member.', + mode = 'Permission bits.', + uid = 'User ID of the user who originally stored this member.', + gid = 'Group ID of the user who originally stored this member.', + size = 'Size in bytes.', + mtime = 'Time of last modification.', + chksum = 'Header checksum.', + type = ('File type. type is usually one of these constants: ' + 'REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, ' + 'CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_SPARSE.'), + linkname = ('Name of the target file name, which is only present ' + 'in TarInfo objects of type LNKTYPE and SYMTYPE.'), + uname = 'User name.', + gname = 'Group name.', + devmajor = 'Device major number.', + devminor = 'Device minor number.', + offset = 'The tar header starts here.', + offset_data = "The file's data starts here.", + pax_headers = ('A dictionary containing key-value pairs of an ' + 'associated pax extended header.'), + sparse = 'Sparse member information.', + _tarfile = None, + _sparse_structs = None, + _link_target = None, + ) + + def __init__(self, name=""): + """Construct a TarInfo object. name is the optional name + of the member. + """ + self.name = name # member name + self.mode = 0o644 # file permissions + self.uid = 0 # user id + self.gid = 0 # group id + self.size = 0 # file size + self.mtime = 0 # modification time + self.chksum = 0 # header checksum + self.type = REGTYPE # member type + self.linkname = "" # link name + self.uname = "" # user name + self.gname = "" # group name + self.devmajor = 0 # device major number + self.devminor = 0 # device minor number + + self.offset = 0 # the tar header starts here + self.offset_data = 0 # the file's data starts here + + self.sparse = None # sparse member information + self.pax_headers = {} # pax header information + + @property + def tarfile(self): + import warnings + warnings.warn( + 'The undocumented "tarfile" attribute of TarInfo objects ' + + 'is deprecated and will be removed in Python 3.16', + DeprecationWarning, stacklevel=2) + return self._tarfile + + @tarfile.setter + def tarfile(self, tarfile): + import warnings + warnings.warn( + 'The undocumented "tarfile" attribute of TarInfo objects ' + + 'is deprecated and will be removed in Python 3.16', + DeprecationWarning, stacklevel=2) + self._tarfile = tarfile + + @property + def path(self): + 'In pax headers, "name" is called "path".' + return self.name + + @path.setter + def path(self, name): + self.name = name + + @property + def linkpath(self): + 'In pax headers, "linkname" is called "linkpath".' + return self.linkname + + @linkpath.setter + def linkpath(self, linkname): + self.linkname = linkname + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) + + def replace(self, *, + name=_KEEP, mtime=_KEEP, mode=_KEEP, linkname=_KEEP, + uid=_KEEP, gid=_KEEP, uname=_KEEP, gname=_KEEP, + deep=True, _KEEP=_KEEP): + """Return a deep copy of self with the given attributes replaced. + """ + if deep: + result = copy.deepcopy(self) + else: + result = copy.copy(self) + if name is not _KEEP: + result.name = name + if mtime is not _KEEP: + result.mtime = mtime + if mode is not _KEEP: + result.mode = mode + if linkname is not _KEEP: + result.linkname = linkname + if uid is not _KEEP: + result.uid = uid + if gid is not _KEEP: + result.gid = gid + if uname is not _KEEP: + result.uname = uname + if gname is not _KEEP: + result.gname = gname + return result + + def get_info(self): + """Return the TarInfo's attributes as a dictionary. + """ + if self.mode is None: + mode = None + else: + mode = self.mode & 0o7777 + info = { + "name": self.name, + "mode": mode, + "uid": self.uid, + "gid": self.gid, + "size": self.size, + "mtime": self.mtime, + "chksum": self.chksum, + "type": self.type, + "linkname": self.linkname, + "uname": self.uname, + "gname": self.gname, + "devmajor": self.devmajor, + "devminor": self.devminor + } + + if info["type"] == DIRTYPE and not info["name"].endswith("/"): + info["name"] += "/" + + return info + + def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): + """Return a tar header as a string of 512 byte blocks. + """ + info = self.get_info() + for name, value in info.items(): + if value is None: + raise ValueError("%s may not be None" % name) + + if format == USTAR_FORMAT: + return self.create_ustar_header(info, encoding, errors) + elif format == GNU_FORMAT: + return self.create_gnu_header(info, encoding, errors) + elif format == PAX_FORMAT: + return self.create_pax_header(info, encoding) + else: + raise ValueError("invalid format") + + def create_ustar_header(self, info, encoding, errors): + """Return the object as a ustar header block. + """ + info["magic"] = POSIX_MAGIC + + if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK: + raise ValueError("linkname is too long") + + if len(info["name"].encode(encoding, errors)) > LENGTH_NAME: + info["prefix"], info["name"] = self._posix_split_name(info["name"], encoding, errors) + + return self._create_header(info, USTAR_FORMAT, encoding, errors) + + def create_gnu_header(self, info, encoding, errors): + """Return the object as a GNU header block sequence. + """ + info["magic"] = GNU_MAGIC + + buf = b"" + if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK: + buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) + + if len(info["name"].encode(encoding, errors)) > LENGTH_NAME: + buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) + + return buf + self._create_header(info, GNU_FORMAT, encoding, errors) + + def create_pax_header(self, info, encoding): + """Return the object as a ustar header block. If it cannot be + represented this way, prepend a pax extended header sequence + with supplement information. + """ + info["magic"] = POSIX_MAGIC + pax_headers = self.pax_headers.copy() + + # Test string fields for values that exceed the field length or cannot + # be represented in ASCII encoding. + for name, hname, length in ( + ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), + ("uname", "uname", 32), ("gname", "gname", 32)): + + if hname in pax_headers: + # The pax header has priority. + continue + + # Try to encode the string as ASCII. + try: + info[name].encode("ascii", "strict") + except UnicodeEncodeError: + pax_headers[hname] = info[name] + continue + + if len(info[name]) > length: + pax_headers[hname] = info[name] + + # Test number fields for values that exceed the field limit or values + # that like to be stored as float. + for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): + needs_pax = False + + val = info[name] + val_is_float = isinstance(val, float) + val_int = round(val) if val_is_float else val + if not 0 <= val_int < 8 ** (digits - 1): + # Avoid overflow. + info[name] = 0 + needs_pax = True + elif val_is_float: + # Put rounded value in ustar header, and full + # precision value in pax header. + info[name] = val_int + needs_pax = True + + # The existing pax header has priority. + if needs_pax and name not in pax_headers: + pax_headers[name] = str(val) + + # Create a pax extended header if necessary. + if pax_headers: + buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) + else: + buf = b"" + + return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") + + @classmethod + def create_pax_global_header(cls, pax_headers): + """Return the object as a pax global header block sequence. + """ + return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf-8") + + def _posix_split_name(self, name, encoding, errors): + """Split a name longer than 100 chars into a prefix + and a name part. + """ + components = name.split("/") + for i in range(1, len(components)): + prefix = "/".join(components[:i]) + name = "/".join(components[i:]) + if len(prefix.encode(encoding, errors)) <= LENGTH_PREFIX and \ + len(name.encode(encoding, errors)) <= LENGTH_NAME: + break + else: + raise ValueError("name is too long") + + return prefix, name + + @staticmethod + def _create_header(info, format, encoding, errors): + """Return a header block. info is a dictionary with file + information, format must be one of the *_FORMAT constants. + """ + has_device_fields = info.get("type") in (CHRTYPE, BLKTYPE) + if has_device_fields: + devmajor = itn(info.get("devmajor", 0), 8, format) + devminor = itn(info.get("devminor", 0), 8, format) + else: + devmajor = stn("", 8, encoding, errors) + devminor = stn("", 8, encoding, errors) + + # None values in metadata should cause ValueError. + # itn()/stn() do this for all fields except type. + filetype = info.get("type", REGTYPE) + if filetype is None: + raise ValueError("TarInfo.type must not be None") + + parts = [ + stn(info.get("name", ""), 100, encoding, errors), + itn(info.get("mode", 0) & 0o7777, 8, format), + itn(info.get("uid", 0), 8, format), + itn(info.get("gid", 0), 8, format), + itn(info.get("size", 0), 12, format), + itn(info.get("mtime", 0), 12, format), + b" ", # checksum field + filetype, + stn(info.get("linkname", ""), 100, encoding, errors), + info.get("magic", POSIX_MAGIC), + stn(info.get("uname", ""), 32, encoding, errors), + stn(info.get("gname", ""), 32, encoding, errors), + devmajor, + devminor, + stn(info.get("prefix", ""), 155, encoding, errors) + ] + + buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) + chksum = calc_chksums(buf[-BLOCKSIZE:])[0] + buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:] + return buf + + @staticmethod + def _create_payload(payload): + """Return the string payload filled with zero bytes + up to the next 512 byte border. + """ + blocks, remainder = divmod(len(payload), BLOCKSIZE) + if remainder > 0: + payload += (BLOCKSIZE - remainder) * NUL + return payload + + @classmethod + def _create_gnu_long_header(cls, name, type, encoding, errors): + """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence + for name. + """ + name = name.encode(encoding, errors) + NUL + + info = {} + info["name"] = "././@LongLink" + info["type"] = type + info["size"] = len(name) + info["magic"] = GNU_MAGIC + + # create extended header + name blocks. + return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ + cls._create_payload(name) + + @classmethod + def _create_pax_generic_header(cls, pax_headers, type, encoding): + """Return a POSIX.1-2008 extended or global header sequence + that contains a list of keyword, value pairs. The values + must be strings. + """ + # Check if one of the fields contains surrogate characters and thereby + # forces hdrcharset=BINARY, see _proc_pax() for more information. + binary = False + for keyword, value in pax_headers.items(): + try: + value.encode("utf-8", "strict") + except UnicodeEncodeError: + binary = True + break + + records = b"" + if binary: + # Put the hdrcharset field at the beginning of the header. + records += b"21 hdrcharset=BINARY\n" + + for keyword, value in pax_headers.items(): + keyword = keyword.encode("utf-8") + if binary: + # Try to restore the original byte representation of 'value'. + # Needless to say, that the encoding must match the string. + value = value.encode(encoding, "surrogateescape") + else: + value = value.encode("utf-8") + + l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' + n = p = 0 + while True: + n = l + len(str(p)) + if n == p: + break + p = n + records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" + + # We use a hardcoded "././@PaxHeader" name like star does + # instead of the one that POSIX recommends. + info = {} + info["name"] = "././@PaxHeader" + info["type"] = type + info["size"] = len(records) + info["magic"] = POSIX_MAGIC + + # Create pax header + record blocks. + return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ + cls._create_payload(records) + + @classmethod + def frombuf(cls, buf, encoding, errors): + """Construct a TarInfo object from a 512 byte bytes object. + + To support the old v7 tar format AREGTYPE headers are + transformed to DIRTYPE headers if their name ends in '/'. + """ + return cls._frombuf(buf, encoding, errors) + + @classmethod + def _frombuf(cls, buf, encoding, errors, *, dircheck=True): + """Construct a TarInfo object from a 512 byte bytes object. + + If ``dircheck`` is set to ``True`` then ``AREGTYPE`` headers will + be normalized to ``DIRTYPE`` if the name ends in a trailing slash. + ``dircheck`` must be set to ``False`` if this function is called + on a follow-up header such as ``GNUTYPE_LONGNAME``. + """ + if len(buf) == 0: + raise EmptyHeaderError("empty header") + if len(buf) != BLOCKSIZE: + raise TruncatedHeaderError("truncated header") + if buf.count(NUL) == BLOCKSIZE: + raise EOFHeaderError("end of file header") + + chksum = nti(buf[148:156]) + if chksum not in calc_chksums(buf): + raise InvalidHeaderError("bad checksum") + + obj = cls() + obj.name = nts(buf[0:100], encoding, errors) + obj.mode = nti(buf[100:108]) + obj.uid = nti(buf[108:116]) + obj.gid = nti(buf[116:124]) + obj.size = nti(buf[124:136]) + obj.mtime = nti(buf[136:148]) + obj.chksum = chksum + obj.type = buf[156:157] + obj.linkname = nts(buf[157:257], encoding, errors) + obj.uname = nts(buf[265:297], encoding, errors) + obj.gname = nts(buf[297:329], encoding, errors) + obj.devmajor = nti(buf[329:337]) + obj.devminor = nti(buf[337:345]) + prefix = nts(buf[345:500], encoding, errors) + + # Old V7 tar format represents a directory as a regular + # file with a trailing slash. + if dircheck and obj.type == AREGTYPE and obj.name.endswith("/"): + obj.type = DIRTYPE + + # The old GNU sparse format occupies some of the unused + # space in the buffer for up to 4 sparse structures. + # Save them for later processing in _proc_sparse(). + if obj.type == GNUTYPE_SPARSE: + pos = 386 + structs = [] + for i in range(4): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[482]) + origsize = nti(buf[483:495]) + obj._sparse_structs = (structs, isextended, origsize) + + # Remove redundant slashes from directories. + if obj.isdir(): + obj.name = obj.name.rstrip("/") + + # Reconstruct a ustar longname. + if prefix and obj.type not in GNU_TYPES: + obj.name = prefix + "/" + obj.name + return obj + + @classmethod + def fromtarfile(cls, tarfile): + """Return the next TarInfo object from TarFile object + tarfile. + """ + return cls._fromtarfile(tarfile) + + @classmethod + def _fromtarfile(cls, tarfile, *, dircheck=True): + """ + See dircheck documentation in _frombuf(). + """ + buf = tarfile.fileobj.read(BLOCKSIZE) + obj = cls._frombuf(buf, tarfile.encoding, tarfile.errors, dircheck=dircheck) + obj.offset = tarfile.fileobj.tell() - BLOCKSIZE + return obj._proc_member(tarfile) + + #-------------------------------------------------------------------------- + # The following are methods that are called depending on the type of a + # member. The entry point is _proc_member() which can be overridden in a + # subclass to add custom _proc_*() methods. A _proc_*() method MUST + # implement the following + # operations: + # 1. Set self.offset_data to the position where the data blocks begin, + # if there is data that follows. + # 2. Set tarfile.offset to the position where the next member's header will + # begin. + # 3. Return self or another valid TarInfo object. + def _proc_member(self, tarfile): + """Choose the right processing method depending on + the type and call it. + """ + if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): + return self._proc_gnulong(tarfile) + elif self.type == GNUTYPE_SPARSE: + return self._proc_sparse(tarfile) + elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): + return self._proc_pax(tarfile) + else: + return self._proc_builtin(tarfile) + + def _proc_builtin(self, tarfile): + """Process a builtin type or an unknown type which + will be treated as a regular file. + """ + self.offset_data = tarfile.fileobj.tell() + offset = self.offset_data + if self.isreg() or self.type not in SUPPORTED_TYPES: + # Skip the following data blocks. + offset += self._block(self.size) + tarfile.offset = offset + + # Patch the TarInfo object with saved global + # header information. + self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) + + # Remove redundant slashes from directories. This is to be consistent + # with frombuf(). + if self.isdir(): + self.name = self.name.rstrip("/") + + return self + + def _proc_gnulong(self, tarfile): + """Process the blocks that hold a GNU longname + or longlink member. + """ + buf = tarfile.fileobj.read(self._block(self.size)) + + # Fetch the next header and process it. + try: + next = self._fromtarfile(tarfile, dircheck=False) + except HeaderError as e: + raise SubsequentHeaderError(str(e)) from None + + # Patch the TarInfo object from the next header with + # the longname information. + next.offset = self.offset + if self.type == GNUTYPE_LONGNAME: + next.name = nts(buf, tarfile.encoding, tarfile.errors) + elif self.type == GNUTYPE_LONGLINK: + next.linkname = nts(buf, tarfile.encoding, tarfile.errors) + + # Remove redundant slashes from directories. This is to be consistent + # with frombuf(). + if next.isdir(): + next.name = next.name.removesuffix("/") + + return next + + def _proc_sparse(self, tarfile): + """Process a GNU sparse header plus extra headers. + """ + # We already collected some sparse structures in frombuf(). + structs, isextended, origsize = self._sparse_structs + del self._sparse_structs + + # Collect sparse structures from extended header blocks. + while isextended: + buf = tarfile.fileobj.read(BLOCKSIZE) + pos = 0 + for i in range(21): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + if offset and numbytes: + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[504]) + self.sparse = structs + + self.offset_data = tarfile.fileobj.tell() + tarfile.offset = self.offset_data + self._block(self.size) + self.size = origsize + return self + + def _proc_pax(self, tarfile): + """Process an extended or global header as described in + POSIX.1-2008. + """ + # Read the header information. + buf = tarfile.fileobj.read(self._block(self.size)) + + # A pax header stores supplemental information for either + # the following file (extended) or all following files + # (global). + if self.type == XGLTYPE: + pax_headers = tarfile.pax_headers + else: + pax_headers = tarfile.pax_headers.copy() + + # Parse pax header information. A record looks like that: + # "%d %s=%s\n" % (length, keyword, value). length is the size + # of the complete record including the length field itself and + # the newline. + pos = 0 + encoding = None + raw_headers = [] + while len(buf) > pos and buf[pos] != 0x00: + if not (match := _header_length_prefix_re.match(buf, pos)): + raise InvalidHeaderError("invalid header") + try: + length = int(match.group(1)) + except ValueError: + raise InvalidHeaderError("invalid header") + # Headers must be at least 5 bytes, shortest being '5 x=\n'. + # Value is allowed to be empty. + if length < 5: + raise InvalidHeaderError("invalid header") + if pos + length > len(buf): + raise InvalidHeaderError("invalid header") + + header_value_end_offset = match.start(1) + length - 1 # Last byte of the header + keyword_and_value = buf[match.end(1) + 1:header_value_end_offset] + raw_keyword, equals, raw_value = keyword_and_value.partition(b"=") + + # Check the framing of the header. The last character must be '\n' (0x0A) + if not raw_keyword or equals != b"=" or buf[header_value_end_offset] != 0x0A: + raise InvalidHeaderError("invalid header") + raw_headers.append((length, raw_keyword, raw_value)) + + # Check if the pax header contains a hdrcharset field. This tells us + # the encoding of the path, linkpath, uname and gname fields. Normally, + # these fields are UTF-8 encoded but since POSIX.1-2008 tar + # implementations are allowed to store them as raw binary strings if + # the translation to UTF-8 fails. For the time being, we don't care about + # anything other than "BINARY". The only other value that is currently + # allowed by the standard is "ISO-IR 10646 2000 UTF-8" in other words UTF-8. + # Note that we only follow the initial 'hdrcharset' setting to preserve + # the initial behavior of the 'tarfile' module. + if raw_keyword == b"hdrcharset" and encoding is None: + if raw_value == b"BINARY": + encoding = tarfile.encoding + else: # This branch ensures only the first 'hdrcharset' header is used. + encoding = "utf-8" + + pos += length + + # If no explicit hdrcharset is set, we use UTF-8 as a default. + if encoding is None: + encoding = "utf-8" + + # After parsing the raw headers we can decode them to text. + for length, raw_keyword, raw_value in raw_headers: + # Normally, we could just use "utf-8" as the encoding and "strict" + # as the error handler, but we better not take the risk. For + # example, GNU tar <= 1.23 is known to store filenames it cannot + # translate to UTF-8 as raw strings (unfortunately without a + # hdrcharset=BINARY header). + # We first try the strict standard encoding, and if that fails we + # fall back on the user's encoding and error handler. + keyword = self._decode_pax_field(raw_keyword, "utf-8", "utf-8", + tarfile.errors) + if keyword in PAX_NAME_FIELDS: + value = self._decode_pax_field(raw_value, encoding, tarfile.encoding, + tarfile.errors) + else: + value = self._decode_pax_field(raw_value, "utf-8", "utf-8", + tarfile.errors) + + pax_headers[keyword] = value + + # Fetch the next header. + try: + next = self._fromtarfile(tarfile, dircheck=False) + except HeaderError as e: + raise SubsequentHeaderError(str(e)) from None + + # Process GNU sparse information. + if "GNU.sparse.map" in pax_headers: + # GNU extended sparse format version 0.1. + self._proc_gnusparse_01(next, pax_headers) + + elif "GNU.sparse.size" in pax_headers: + # GNU extended sparse format version 0.0. + self._proc_gnusparse_00(next, raw_headers) + + elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": + # GNU extended sparse format version 1.0. + self._proc_gnusparse_10(next, pax_headers, tarfile) + + if self.type in (XHDTYPE, SOLARIS_XHDTYPE): + # Patch the TarInfo object with the extended header info. + next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) + next.offset = self.offset + + if "size" in pax_headers: + # If the extended header replaces the size field, + # we need to recalculate the offset where the next + # header starts. + offset = next.offset_data + if next.isreg() or next.type not in SUPPORTED_TYPES: + offset += next._block(next.size) + tarfile.offset = offset + + return next + + def _proc_gnusparse_00(self, next, raw_headers): + """Process a GNU tar extended sparse header, version 0.0. + """ + offsets = [] + numbytes = [] + for _, keyword, value in raw_headers: + if keyword == b"GNU.sparse.offset": + try: + offsets.append(int(value.decode())) + except ValueError: + raise InvalidHeaderError("invalid header") + + elif keyword == b"GNU.sparse.numbytes": + try: + numbytes.append(int(value.decode())) + except ValueError: + raise InvalidHeaderError("invalid header") + + next.sparse = list(zip(offsets, numbytes)) + + def _proc_gnusparse_01(self, next, pax_headers): + """Process a GNU tar extended sparse header, version 0.1. + """ + sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _proc_gnusparse_10(self, next, pax_headers, tarfile): + """Process a GNU tar extended sparse header, version 1.0. + """ + fields = None + sparse = [] + buf = tarfile.fileobj.read(BLOCKSIZE) + fields, buf = buf.split(b"\n", 1) + fields = int(fields) + while len(sparse) < fields * 2: + if b"\n" not in buf: + buf += tarfile.fileobj.read(BLOCKSIZE) + number, buf = buf.split(b"\n", 1) + sparse.append(int(number)) + next.offset_data = tarfile.fileobj.tell() + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _apply_pax_info(self, pax_headers, encoding, errors): + """Replace fields with supplemental information from a previous + pax extended or global header. + """ + for keyword, value in pax_headers.items(): + if keyword == "GNU.sparse.name": + setattr(self, "path", value) + elif keyword == "GNU.sparse.size": + setattr(self, "size", int(value)) + elif keyword == "GNU.sparse.realsize": + setattr(self, "size", int(value)) + elif keyword in PAX_FIELDS: + if keyword in PAX_NUMBER_FIELDS: + try: + value = PAX_NUMBER_FIELDS[keyword](value) + except ValueError: + value = 0 + if keyword == "path": + value = value.rstrip("/") + setattr(self, keyword, value) + + self.pax_headers = pax_headers.copy() + + def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): + """Decode a single field from a pax record. + """ + try: + return value.decode(encoding, "strict") + except UnicodeDecodeError: + return value.decode(fallback_encoding, fallback_errors) + + def _block(self, count): + """Round up a byte count by BLOCKSIZE and return it, + e.g. _block(834) => 1024. + """ + # Only non-negative offsets are allowed + if count < 0: + raise InvalidHeaderError("invalid offset") + blocks, remainder = divmod(count, BLOCKSIZE) + if remainder: + blocks += 1 + return blocks * BLOCKSIZE + + def isreg(self): + 'Return True if the Tarinfo object is a regular file.' + return self.type in REGULAR_TYPES + + def isfile(self): + 'Return True if the Tarinfo object is a regular file.' + return self.isreg() + + def isdir(self): + 'Return True if it is a directory.' + return self.type == DIRTYPE + + def issym(self): + 'Return True if it is a symbolic link.' + return self.type == SYMTYPE + + def islnk(self): + 'Return True if it is a hard link.' + return self.type == LNKTYPE + + def ischr(self): + 'Return True if it is a character device.' + return self.type == CHRTYPE + + def isblk(self): + 'Return True if it is a block device.' + return self.type == BLKTYPE + + def isfifo(self): + 'Return True if it is a FIFO.' + return self.type == FIFOTYPE + + def issparse(self): + return self.sparse is not None + + def isdev(self): + 'Return True if it is one of character device, block device or FIFO.' + return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) +# class TarInfo + +class TarFile(object): + """The TarFile Class provides an interface to tar archives. + """ + + debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) + + dereference = False # If true, add content of linked file to the + # tar file, else the link. + + ignore_zeros = False # If true, skips empty or invalid blocks and + # continues processing. + + errorlevel = 1 # If 0, fatal errors only appear in debug + # messages (if debug >= 0). If > 0, errors + # are passed to the caller as exceptions. + + format = DEFAULT_FORMAT # The format to use when creating an archive. + + encoding = ENCODING # Encoding for 8-bit character strings. + + errors = None # Error handler for unicode conversion. + + tarinfo = TarInfo # The default TarInfo class to use. + + fileobject = ExFileObject # The file-object for extractfile(). + + extraction_filter = None # The default filter for extraction. + + def __init__(self, name=None, mode="r", fileobj=None, format=None, + tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, + errors="surrogateescape", pax_headers=None, debug=None, + errorlevel=None, copybufsize=None, stream=False): + """Open an (uncompressed) tar archive 'name'. 'mode' is either 'r' to + read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. 'mode' + defaults to 'r'. + If 'fileobj' is given, it is used for reading or writing data. If it + can be determined, 'mode' is overridden by 'fileobj's mode. + 'fileobj' is not closed, when TarFile is closed. + """ + modes = {"r": "rb", "a": "r+b", "w": "wb", "x": "xb"} + if mode not in modes: + raise ValueError("mode must be 'r', 'a', 'w' or 'x'") + self.mode = mode + self._mode = modes[mode] + + if not fileobj: + if self.mode == "a" and not os.path.exists(name): + # Create nonexistent files in append mode. + self.mode = "w" + self._mode = "wb" + fileobj = bltn_open(name, self._mode) + self._extfileobj = False + else: + if (name is None and hasattr(fileobj, "name") and + isinstance(fileobj.name, (str, bytes))): + name = fileobj.name + if hasattr(fileobj, "mode"): + self._mode = fileobj.mode + self._extfileobj = True + self.name = os.path.abspath(name) if name else None + self.fileobj = fileobj + + self.stream = stream + + # Init attributes. + if format is not None: + self.format = format + if tarinfo is not None: + self.tarinfo = tarinfo + if dereference is not None: + self.dereference = dereference + if ignore_zeros is not None: + self.ignore_zeros = ignore_zeros + if encoding is not None: + self.encoding = encoding + self.errors = errors + + if pax_headers is not None and self.format == PAX_FORMAT: + self.pax_headers = pax_headers + else: + self.pax_headers = {} + + if debug is not None: + self.debug = debug + if errorlevel is not None: + self.errorlevel = errorlevel + + # Init datastructures. + self.copybufsize = copybufsize + self.closed = False + self.members = [] # list of members as TarInfo objects + self._loaded = False # flag if all members have been read + self.offset = self.fileobj.tell() + # current position in the archive file + self.inodes = {} # dictionary caching the inodes of + # archive members already added + self._unames = {} # Cached mappings of uid -> uname + self._gnames = {} # Cached mappings of gid -> gname + + try: + if self.mode == "r": + self.firstmember = None + self.firstmember = self.next() + + if self.mode == "a": + # Move to the end of the archive, + # before the first empty block. + while True: + self.fileobj.seek(self.offset) + try: + tarinfo = self.tarinfo.fromtarfile(self) + self.members.append(tarinfo) + except EOFHeaderError: + self.fileobj.seek(self.offset) + break + except HeaderError as e: + raise ReadError(str(e)) from None + + if self.mode in ("a", "w", "x"): + self._loaded = True + + if self.pax_headers: + buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) + self.fileobj.write(buf) + self.offset += len(buf) + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + #-------------------------------------------------------------------------- + # Below are the classmethods which act as alternate constructors to the + # TarFile class. The open() method is the only one that is needed for + # public use; it is the "super"-constructor and is able to select an + # adequate "sub"-constructor for a particular compression using the mapping + # from OPEN_METH. + # + # This concept allows one to subclass TarFile without losing the comfort of + # the super-constructor. A sub-constructor is registered and made available + # by adding it to the mapping in OPEN_METH. + + @classmethod + def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): + """Open a tar archive for reading, writing or appending. Return + an appropriate TarFile class. + + mode: + 'r' or 'r:*' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'r:xz' open for reading with lzma compression + 'r:zst' open for reading with zstd compression + 'a' or 'a:' open for appending, creating the file if necessary + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression + 'w:xz' open for writing with lzma compression + 'w:zst' open for writing with zstd compression + + 'x' or 'x:' create a tarfile exclusively without compression, raise + an exception if the file is already created + 'x:gz' create a gzip compressed tarfile, raise an exception + if the file is already created + 'x:bz2' create a bzip2 compressed tarfile, raise an exception + if the file is already created + 'x:xz' create an lzma compressed tarfile, raise an exception + if the file is already created + 'x:zst' create a zstd compressed tarfile, raise an exception + if the file is already created + + 'r|*' open a stream of tar blocks with transparent compression + 'r|' open an uncompressed stream of tar blocks for reading + 'r|gz' open a gzip compressed stream of tar blocks + 'r|bz2' open a bzip2 compressed stream of tar blocks + 'r|xz' open an lzma compressed stream of tar blocks + 'r|zst' open a zstd compressed stream of tar blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing + 'w|xz' open an lzma compressed stream for writing + 'w|zst' open a zstd compressed stream for writing + """ + + if not name and not fileobj: + raise ValueError("nothing to open") + + if mode in ("r", "r:*"): + # Find out which *open() is appropriate for opening the file. + def not_compressed(comptype): + return cls.OPEN_METH[comptype] == 'taropen' + error_msgs = [] + for comptype in sorted(cls.OPEN_METH, key=not_compressed): + func = getattr(cls, cls.OPEN_METH[comptype]) + if fileobj is not None: + saved_pos = fileobj.tell() + try: + return func(name, "r", fileobj, **kwargs) + except (ReadError, CompressionError) as e: + error_msgs.append(f'- method {comptype}: {e!r}') + if fileobj is not None: + fileobj.seek(saved_pos) + continue + error_msgs_summary = '\n'.join(error_msgs) + raise ReadError(f"file could not be opened successfully:\n{error_msgs_summary}") + + elif ":" in mode: + filemode, comptype = mode.split(":", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + # Select the *open() function according to + # given compression. + if comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + else: + raise CompressionError("unknown compression type %r" % comptype) + return func(name, filemode, fileobj, **kwargs) + + elif "|" in mode: + filemode, comptype = mode.split("|", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + if filemode not in ("r", "w"): + raise ValueError("mode must be 'r' or 'w'") + if "compresslevel" in kwargs and comptype not in ("gz", "bz2"): + raise ValueError( + "compresslevel is only valid for w|gz and w|bz2 modes" + ) + if "preset" in kwargs and comptype not in ("xz",): + raise ValueError("preset is only valid for w|xz mode") + + compresslevel = kwargs.pop("compresslevel", 9) + preset = kwargs.pop("preset", None) + stream = _Stream(name, filemode, comptype, fileobj, bufsize, + compresslevel, preset) + try: + t = cls(name, filemode, stream, **kwargs) + except: + stream.close() + raise + t._extfileobj = False + return t + + elif mode in ("a", "w", "x"): + return cls.taropen(name, mode, fileobj, **kwargs) + + raise ValueError("undiscernible mode") + + @classmethod + def taropen(cls, name, mode="r", fileobj=None, **kwargs): + """Open uncompressed tar archive name for reading or writing. + """ + if mode not in ("r", "a", "w", "x"): + raise ValueError("mode must be 'r', 'a', 'w' or 'x'") + return cls(name, mode, fileobj, **kwargs) + + @classmethod + def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open gzip compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if mode not in ("r", "w", "x"): + raise ValueError("mode must be 'r', 'w' or 'x'") + + try: + from gzip import GzipFile + except ImportError: + raise CompressionError("gzip module is not available") from None + + try: + fileobj = GzipFile(name, mode + "b", compresslevel, fileobj) + except OSError as e: + if fileobj is not None and mode == 'r': + raise ReadError("not a gzip file") from e + raise + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except OSError as e: + fileobj.close() + if mode == 'r': + raise ReadError("not a gzip file") from e + raise + except: + fileobj.close() + raise + t._extfileobj = False + return t + + @classmethod + def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open bzip2 compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if mode not in ("r", "w", "x"): + raise ValueError("mode must be 'r', 'w' or 'x'") + + try: + from bz2 import BZ2File + except ImportError: + raise CompressionError("bz2 module is not available") from None + + fileobj = BZ2File(fileobj or name, mode, compresslevel=compresslevel) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (OSError, EOFError) as e: + fileobj.close() + if mode == 'r': + raise ReadError("not a bzip2 file") from e + raise + except: + fileobj.close() + raise + t._extfileobj = False + return t + + @classmethod + def xzopen(cls, name, mode="r", fileobj=None, preset=None, **kwargs): + """Open lzma compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if mode not in ("r", "w", "x"): + raise ValueError("mode must be 'r', 'w' or 'x'") + + try: + from lzma import LZMAFile, LZMAError + except ImportError: + raise CompressionError("lzma module is not available") from None + + fileobj = LZMAFile(fileobj or name, mode, preset=preset) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (LZMAError, EOFError) as e: + fileobj.close() + if mode == 'r': + raise ReadError("not an lzma file") from e + raise + except: + fileobj.close() + raise + t._extfileobj = False + return t + + @classmethod + def zstopen(cls, name, mode="r", fileobj=None, level=None, options=None, + zstd_dict=None, **kwargs): + """Open zstd compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if mode not in ("r", "w", "x"): + raise ValueError("mode must be 'r', 'w' or 'x'") + + try: + from compression.zstd import ZstdFile, ZstdError + except ImportError: + raise CompressionError("compression.zstd module is not available") from None + + fileobj = ZstdFile( + fileobj or name, + mode, + level=level, + options=options, + zstd_dict=zstd_dict + ) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (ZstdError, EOFError) as e: + fileobj.close() + if mode == 'r': + raise ReadError("not a zstd file") from e + raise + except Exception: + fileobj.close() + raise + t._extfileobj = False + return t + + # All *open() methods are registered here. + OPEN_METH = { + "tar": "taropen", # uncompressed tar + "gz": "gzopen", # gzip compressed tar + "bz2": "bz2open", # bzip2 compressed tar + "xz": "xzopen", # lzma compressed tar + "zst": "zstopen", # zstd compressed tar + } + + #-------------------------------------------------------------------------- + # The public methods which TarFile provides: + + def close(self): + """Close the TarFile. In write-mode, two finishing zero blocks are + appended to the archive. + """ + if self.closed: + return + + self.closed = True + try: + if self.mode in ("a", "w", "x"): + self.fileobj.write(NUL * (BLOCKSIZE * 2)) + self.offset += (BLOCKSIZE * 2) + # fill up the end with zero-blocks + # (like option -b20 for tar does) + blocks, remainder = divmod(self.offset, RECORDSIZE) + if remainder > 0: + self.fileobj.write(NUL * (RECORDSIZE - remainder)) + finally: + if not self._extfileobj: + self.fileobj.close() + + def getmember(self, name): + """Return a TarInfo object for member 'name'. If 'name' can not be + found in the archive, KeyError is raised. If a member occurs more + than once in the archive, its last occurrence is assumed to be the + most up-to-date version. + """ + tarinfo = self._getmember(name.rstrip('/')) + if tarinfo is None: + raise KeyError("filename %r not found" % name) + return tarinfo + + def getmembers(self): + """Return the members of the archive as a list of TarInfo objects. The + list has the same order as the members in the archive. + """ + self._check() + if not self._loaded: # if we want to obtain a list of + self._load() # all members, we first have to + # scan the whole archive. + return self.members + + def getnames(self): + """Return the members of the archive as a list of their names. It has + the same order as the list returned by getmembers(). + """ + return [tarinfo.name for tarinfo in self.getmembers()] + + def gettarinfo(self, name=None, arcname=None, fileobj=None): + """Create a TarInfo object from the result of os.stat or equivalent + on an existing file. The file is either named by 'name', or + specified as a file object 'fileobj' with a file descriptor. If + given, 'arcname' specifies an alternative name for the file in the + archive, otherwise, the name is taken from the 'name' attribute of + 'fileobj', or the 'name' argument. The name should be a text + string. + """ + self._check("awx") + + # When fileobj is given, replace name by + # fileobj's real name. + if fileobj is not None: + name = fileobj.name + + # Building the name of the member in the archive. + # Backward slashes are converted to forward slashes, + # Absolute paths are turned to relative paths. + if arcname is None: + arcname = name + drv, arcname = os.path.splitdrive(arcname) + arcname = arcname.replace(os.sep, "/") + arcname = arcname.lstrip("/") + + # Now, fill the TarInfo object with + # information specific for the file. + tarinfo = self.tarinfo() + tarinfo._tarfile = self # To be removed in 3.16. + + # Use os.stat or os.lstat, depending on if symlinks shall be resolved. + if fileobj is None: + if not self.dereference: + statres = os.lstat(name) + else: + statres = os.stat(name) + else: + statres = os.fstat(fileobj.fileno()) + linkname = "" + + stmd = statres.st_mode + if stat.S_ISREG(stmd): + inode = (statres.st_ino, statres.st_dev) + if not self.dereference and statres.st_nlink > 1 and \ + inode in self.inodes and arcname != self.inodes[inode]: + # Is it a hardlink to an already + # archived file? + type = LNKTYPE + linkname = self.inodes[inode] + else: + # The inode is added only if its valid. + # For win32 it is always 0. + type = REGTYPE + if inode[0]: + self.inodes[inode] = arcname + elif stat.S_ISDIR(stmd): + type = DIRTYPE + elif stat.S_ISFIFO(stmd): + type = FIFOTYPE + elif stat.S_ISLNK(stmd): + type = SYMTYPE + linkname = os.readlink(name) + elif stat.S_ISCHR(stmd): + type = CHRTYPE + elif stat.S_ISBLK(stmd): + type = BLKTYPE + else: + return None + + # Fill the TarInfo object with all + # information we can get. + tarinfo.name = arcname + tarinfo.mode = stmd + tarinfo.uid = statres.st_uid + tarinfo.gid = statres.st_gid + if type == REGTYPE: + tarinfo.size = statres.st_size + else: + tarinfo.size = 0 + tarinfo.mtime = statres.st_mtime + tarinfo.type = type + tarinfo.linkname = linkname + + # Calls to pwd.getpwuid() and grp.getgrgid() tend to be expensive. To + # speed things up, cache the resolved usernames and group names. + if pwd: + if tarinfo.uid not in self._unames: + try: + self._unames[tarinfo.uid] = pwd.getpwuid(tarinfo.uid)[0] + except KeyError: + self._unames[tarinfo.uid] = '' + tarinfo.uname = self._unames[tarinfo.uid] + if grp: + if tarinfo.gid not in self._gnames: + try: + self._gnames[tarinfo.gid] = grp.getgrgid(tarinfo.gid)[0] + except KeyError: + self._gnames[tarinfo.gid] = '' + tarinfo.gname = self._gnames[tarinfo.gid] + + if type in (CHRTYPE, BLKTYPE): + if hasattr(os, "major") and hasattr(os, "minor"): + tarinfo.devmajor = os.major(statres.st_rdev) + tarinfo.devminor = os.minor(statres.st_rdev) + return tarinfo + + def list(self, verbose=True, *, members=None): + """Print a table of contents to sys.stdout. If 'verbose' is False, only + the names of the members are printed. If it is True, an 'ls -l'-like + output is produced. 'members' is optional and must be a subset of the + list returned by getmembers(). + """ + # Convert tarinfo type to stat type. + type2mode = {REGTYPE: stat.S_IFREG, SYMTYPE: stat.S_IFLNK, + FIFOTYPE: stat.S_IFIFO, CHRTYPE: stat.S_IFCHR, + DIRTYPE: stat.S_IFDIR, BLKTYPE: stat.S_IFBLK} + self._check() + + if members is None: + members = self + for tarinfo in members: + if verbose: + if tarinfo.mode is None: + _safe_print("??????????") + else: + modetype = type2mode.get(tarinfo.type, 0) + _safe_print(stat.filemode(modetype | tarinfo.mode)) + _safe_print("%s/%s" % (tarinfo.uname or tarinfo.uid, + tarinfo.gname or tarinfo.gid)) + if tarinfo.ischr() or tarinfo.isblk(): + _safe_print("%10s" % + ("%d,%d" % (tarinfo.devmajor, tarinfo.devminor))) + else: + _safe_print("%10d" % tarinfo.size) + if tarinfo.mtime is None: + _safe_print("????-??-?? ??:??:??") + else: + _safe_print("%d-%02d-%02d %02d:%02d:%02d" \ + % time.localtime(tarinfo.mtime)[:6]) + + _safe_print(tarinfo.name + ("/" if tarinfo.isdir() else "")) + + if verbose: + if tarinfo.issym(): + _safe_print("-> " + tarinfo.linkname) + if tarinfo.islnk(): + _safe_print("link to " + tarinfo.linkname) + print() + + def add(self, name, arcname=None, recursive=True, *, filter=None): + """Add the file 'name' to the archive. 'name' may be any type of file + (directory, fifo, symbolic link, etc.). If given, 'arcname' + specifies an alternative name for the file in the archive. + Directories are added recursively by default. This can be avoided by + setting 'recursive' to False. 'filter' is a function + that expects a TarInfo object argument and returns the changed + TarInfo object, if it returns None the TarInfo object will be + excluded from the archive. + """ + self._check("awx") + + if arcname is None: + arcname = name + + # Skip if somebody tries to archive the archive... + if self.name is not None and os.path.abspath(name) == self.name: + self._dbg(2, "tarfile: Skipped %r" % name) + return + + self._dbg(1, name) + + # Create a TarInfo object from the file. + tarinfo = self.gettarinfo(name, arcname) + + if tarinfo is None: + self._dbg(1, "tarfile: Unsupported type %r" % name) + return + + # Change or exclude the TarInfo object. + if filter is not None: + tarinfo = filter(tarinfo) + if tarinfo is None: + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Append the tar header and data to the archive. + if tarinfo.isreg(): + with bltn_open(name, "rb") as f: + self.addfile(tarinfo, f) + + elif tarinfo.isdir(): + self.addfile(tarinfo) + if recursive: + for f in sorted(os.listdir(name)): + self.add(os.path.join(name, f), os.path.join(arcname, f), + recursive, filter=filter) + + else: + self.addfile(tarinfo) + + def addfile(self, tarinfo, fileobj=None): + """Add the TarInfo object 'tarinfo' to the archive. If 'tarinfo' represents + a non zero-size regular file, the 'fileobj' argument should be a binary file, + and tarinfo.size bytes are read from it and added to the archive. + You can create TarInfo objects directly, or by using gettarinfo(). + """ + self._check("awx") + + if fileobj is None and tarinfo.isreg() and tarinfo.size != 0: + raise ValueError("fileobj not provided for non zero-size regular file") + + tarinfo = copy.copy(tarinfo) + + buf = tarinfo.tobuf(self.format, self.encoding, self.errors) + self.fileobj.write(buf) + self.offset += len(buf) + bufsize=self.copybufsize + # If there's data to follow, append it. + if fileobj is not None: + copyfileobj(fileobj, self.fileobj, tarinfo.size, bufsize=bufsize) + blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) + if remainder > 0: + self.fileobj.write(NUL * (BLOCKSIZE - remainder)) + blocks += 1 + self.offset += blocks * BLOCKSIZE + + self.members.append(tarinfo) + + def _get_filter_function(self, filter): + if filter is None: + filter = self.extraction_filter + if filter is None: + return data_filter + if isinstance(filter, str): + raise TypeError( + 'String names are not supported for ' + + 'TarFile.extraction_filter. Use a function such as ' + + 'tarfile.data_filter directly.') + return filter + if callable(filter): + return filter + try: + return _NAMED_FILTERS[filter] + except KeyError: + raise ValueError(f"filter {filter!r} not found") from None + + def extractall(self, path=".", members=None, *, numeric_owner=False, + filter=None): + """Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. 'path' specifies a different directory + to extract to. 'members' is optional and must be a subset of the + list returned by getmembers(). If 'numeric_owner' is True, only + the numbers for user/group names are used and not the names. + + The 'filter' function will be called on each member just + before extraction. + It can return a changed TarInfo or None to skip the member. + String names of common filters are accepted. + """ + directories = [] + + filter_function = self._get_filter_function(filter) + if members is None: + members = self + + for member in members: + tarinfo, unfiltered = self._get_extract_tarinfo( + member, filter_function, path) + if tarinfo is None: + continue + if tarinfo.isdir(): + # For directories, delay setting attributes until later, + # since permissions can interfere with extraction and + # extracting contents can reset mtime. + directories.append(unfiltered) + self._extract_one(tarinfo, path, set_attrs=not tarinfo.isdir(), + numeric_owner=numeric_owner, + filter_function=filter_function) + + # Reverse sort directories. + directories.sort(key=lambda a: a.name, reverse=True) + + + # Set correct owner, mtime and filemode on directories. + for unfiltered in directories: + try: + # Need to re-apply any filter, to take the *current* filesystem + # state into account. + try: + tarinfo = filter_function(unfiltered, path) + except _FILTER_ERRORS as exc: + self._log_no_directory_fixup(unfiltered, repr(exc)) + continue + if tarinfo is None: + self._log_no_directory_fixup(unfiltered, + 'excluded by filter') + continue + dirpath = os.path.join(path, tarinfo.name) + try: + lstat = os.lstat(dirpath) + except FileNotFoundError: + self._log_no_directory_fixup(tarinfo, 'missing') + continue + if not stat.S_ISDIR(lstat.st_mode): + # This is no longer a directory; presumably a later + # member overwrote the entry. + self._log_no_directory_fixup(tarinfo, 'not a directory') + continue + self.chown(tarinfo, dirpath, numeric_owner=numeric_owner) + self.utime(tarinfo, dirpath) + self.chmod(tarinfo, dirpath) + except ExtractError as e: + self._handle_nonfatal_error(e) + + def _log_no_directory_fixup(self, member, reason): + self._dbg(2, "tarfile: Not fixing up directory %r (%s)" % + (member.name, reason)) + + def extract(self, member, path="", set_attrs=True, *, numeric_owner=False, + filter=None): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. 'member' may be a filename or a TarInfo object. You can + specify a different directory using 'path'. File attributes (owner, + mtime, mode) are set unless 'set_attrs' is False. If 'numeric_owner' + is True, only the numbers for user/group names are used and not + the names. + + The 'filter' function will be called before extraction. + It can return a changed TarInfo or None to skip the member. + String names of common filters are accepted. + """ + filter_function = self._get_filter_function(filter) + tarinfo, unfiltered = self._get_extract_tarinfo( + member, filter_function, path) + if tarinfo is not None: + self._extract_one(tarinfo, path, set_attrs, numeric_owner) + + def _get_extract_tarinfo(self, member, filter_function, path): + """Get (filtered, unfiltered) TarInfos from *member* + + *member* might be a string. + + Return (None, None) if not found. + """ + + if isinstance(member, str): + unfiltered = self.getmember(member) + else: + unfiltered = member + + filtered = None + try: + filtered = filter_function(unfiltered, path) + except (OSError, UnicodeEncodeError, FilterError) as e: + self._handle_fatal_error(e) + except ExtractError as e: + self._handle_nonfatal_error(e) + if filtered is None: + self._dbg(2, "tarfile: Excluded %r" % unfiltered.name) + return None, None + + # Prepare the link target for makelink(). + if filtered.islnk(): + filtered = copy.copy(filtered) + filtered._link_target = os.path.join(path, filtered.linkname) + return filtered, unfiltered + + def _extract_one(self, tarinfo, path, set_attrs, numeric_owner, + filter_function=None): + """Extract from filtered tarinfo to disk. + + filter_function is only used when extracting a *different* + member (e.g. as fallback to creating a symlink) + """ + self._check("r") + + try: + self._extract_member(tarinfo, os.path.join(path, tarinfo.name), + set_attrs=set_attrs, + numeric_owner=numeric_owner, + filter_function=filter_function, + extraction_root=path) + except (OSError, UnicodeEncodeError) as e: + self._handle_fatal_error(e) + except ExtractError as e: + self._handle_nonfatal_error(e) + + def _handle_nonfatal_error(self, e): + """Handle non-fatal error (ExtractError) according to errorlevel""" + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def _handle_fatal_error(self, e): + """Handle "fatal" error according to self.errorlevel""" + if self.errorlevel > 0: + raise + elif isinstance(e, OSError): + if e.filename is None: + self._dbg(1, "tarfile: %s" % e.strerror) + else: + self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) + else: + self._dbg(1, "tarfile: %s %s" % (type(e).__name__, e)) + + def extractfile(self, member): + """Extract a member from the archive as a file object. 'member' may be + a filename or a TarInfo object. If 'member' is a regular file or + a link, an io.BufferedReader object is returned. For all other + existing members, None is returned. If 'member' does not appear + in the archive, KeyError is raised. + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES: + # Members with unknown types are treated as regular files. + return self.fileobject(self, tarinfo) + + elif tarinfo.islnk() or tarinfo.issym(): + if isinstance(self.fileobj, _Stream): + # A small but ugly workaround for the case that someone tries + # to extract a (sym)link as a file-object from a non-seekable + # stream of tar blocks. + raise StreamError("cannot extract (sym)link as file object") + else: + # A (sym)link's file object is its target's file object. + return self.extractfile(self._find_link_target(tarinfo)) + else: + # If there's no data associated with the member (directory, chrdev, + # blkdev, etc.), return None instead of a file object. + return None + + def _extract_member(self, tarinfo, targetpath, set_attrs=True, + numeric_owner=False, *, filter_function=None, + extraction_root=None): + """Extract the filtered TarInfo object tarinfo to a physical + file called targetpath. + + filter_function is only used when extracting a *different* + member (e.g. as fallback to creating a symlink) + """ + # Fetch the TarInfo object for the given name + # and build the destination pathname, replacing + # forward slashes to platform specific separators. + targetpath = targetpath.rstrip("/") + targetpath = targetpath.replace("/", os.sep) + + # Create all upper directories. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + # Create directories that are not part of the archive with + # default permissions. + os.makedirs(upperdirs, exist_ok=True) + + if tarinfo.islnk() or tarinfo.issym(): + self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) + else: + self._dbg(1, tarinfo.name) + + if tarinfo.isreg(): + self.makefile(tarinfo, targetpath) + elif tarinfo.isdir(): + self.makedir(tarinfo, targetpath) + elif tarinfo.isfifo(): + self.makefifo(tarinfo, targetpath) + elif tarinfo.ischr() or tarinfo.isblk(): + self.makedev(tarinfo, targetpath) + elif tarinfo.islnk() or tarinfo.issym(): + self.makelink_with_filter( + tarinfo, targetpath, + filter_function=filter_function, + extraction_root=extraction_root) + elif tarinfo.type not in SUPPORTED_TYPES: + self.makeunknown(tarinfo, targetpath) + else: + self.makefile(tarinfo, targetpath) + + if set_attrs: + self.chown(tarinfo, targetpath, numeric_owner) + if not tarinfo.issym(): + self.chmod(tarinfo, targetpath) + self.utime(tarinfo, targetpath) + + #-------------------------------------------------------------------------- + # Below are the different file methods. They are called via + # _extract_member() when extract() is called. They can be replaced in a + # subclass to implement other functionality. + + def makedir(self, tarinfo, targetpath): + """Make a directory called targetpath. + """ + try: + if tarinfo.mode is None: + # Use the system's default mode + os.mkdir(targetpath) + else: + # Use a safe mode for the directory, the real mode is set + # later in _extract_member(). + os.mkdir(targetpath, 0o700) + except FileExistsError: + if not os.path.isdir(targetpath): + raise + + def makefile(self, tarinfo, targetpath): + """Make a file called targetpath. + """ + source = self.fileobj + source.seek(tarinfo.offset_data) + bufsize = self.copybufsize + with bltn_open(targetpath, "wb") as target: + if tarinfo.sparse is not None: + for offset, size in tarinfo.sparse: + target.seek(offset) + copyfileobj(source, target, size, ReadError, bufsize) + target.seek(tarinfo.size) + target.truncate() + else: + copyfileobj(source, target, tarinfo.size, ReadError, bufsize) + + def makeunknown(self, tarinfo, targetpath): + """Make a file from a TarInfo object with an unknown type + at targetpath. + """ + self.makefile(tarinfo, targetpath) + self._dbg(1, "tarfile: Unknown file type %r, " \ + "extracted as regular file." % tarinfo.type) + + def makefifo(self, tarinfo, targetpath): + """Make a fifo called targetpath. + """ + if hasattr(os, "mkfifo"): + os.mkfifo(targetpath) + else: + raise ExtractError("fifo not supported by system") + + def makedev(self, tarinfo, targetpath): + """Make a character or block device called targetpath. + """ + if not hasattr(os, "mknod") or not hasattr(os, "makedev"): + raise ExtractError("special devices not supported by system") + + mode = tarinfo.mode + if mode is None: + # Use mknod's default + mode = 0o600 + if tarinfo.isblk(): + mode |= stat.S_IFBLK + else: + mode |= stat.S_IFCHR + + os.mknod(targetpath, mode, + os.makedev(tarinfo.devmajor, tarinfo.devminor)) + + def makelink(self, tarinfo, targetpath): + return self.makelink_with_filter(tarinfo, targetpath, None, None) + + def makelink_with_filter(self, tarinfo, targetpath, + filter_function, extraction_root): + """Make a (symbolic) link called targetpath. If it cannot be created + (platform limitation), we try to make a copy of the referenced file + instead of a link. + + filter_function is only used when extracting a *different* + member (e.g. as fallback to creating a link). + """ + keyerror_to_extracterror = False + try: + # For systems that support symbolic and hard links. + if tarinfo.issym(): + if os.path.lexists(targetpath): + # Avoid FileExistsError on following os.symlink. + os.unlink(targetpath) + os.symlink(tarinfo.linkname, targetpath) + return + else: + if os.path.exists(tarinfo._link_target): + if os.path.lexists(targetpath): + # Avoid FileExistsError on following os.link. + os.unlink(targetpath) + os.link(tarinfo._link_target, targetpath) + return + except symlink_exception: + keyerror_to_extracterror = True + + try: + unfiltered = self._find_link_target(tarinfo) + except KeyError: + if keyerror_to_extracterror: + raise ExtractError( + "unable to resolve link inside archive") from None + else: + raise + + if filter_function is None: + filtered = unfiltered + else: + if extraction_root is None: + raise ExtractError( + "makelink_with_filter: if filter_function is not None, " + + "extraction_root must also not be None") + try: + filtered = filter_function(unfiltered, extraction_root) + except _FILTER_ERRORS as cause: + raise LinkFallbackError(tarinfo, unfiltered.name) from cause + if filtered is not None: + self._extract_member(filtered, targetpath, + filter_function=filter_function, + extraction_root=extraction_root) + + def chown(self, tarinfo, targetpath, numeric_owner): + """Set owner of targetpath according to tarinfo. If numeric_owner + is True, use .gid/.uid instead of .gname/.uname. If numeric_owner + is False, fall back to .gid/.uid when the search based on name + fails. + """ + if hasattr(os, "geteuid") and os.geteuid() == 0: + # We have to be root to do so. + g = tarinfo.gid + u = tarinfo.uid + if not numeric_owner: + try: + if grp and tarinfo.gname: + g = grp.getgrnam(tarinfo.gname)[2] + except KeyError: + pass + try: + if pwd and tarinfo.uname: + u = pwd.getpwnam(tarinfo.uname)[2] + except KeyError: + pass + if g is None: + g = -1 + if u is None: + u = -1 + try: + if tarinfo.issym() and hasattr(os, "lchown"): + os.lchown(targetpath, u, g) + else: + os.chown(targetpath, u, g) + except (OSError, OverflowError) as e: + # OverflowError can be raised if an ID doesn't fit in 'id_t' + raise ExtractError("could not change owner") from e + + def chmod(self, tarinfo, targetpath): + """Set file permissions of targetpath according to tarinfo. + """ + if tarinfo.mode is None: + return + try: + os.chmod(targetpath, tarinfo.mode) + except OSError as e: + raise ExtractError("could not change mode") from e + + def utime(self, tarinfo, targetpath): + """Set modification time of targetpath according to tarinfo. + """ + mtime = tarinfo.mtime + if mtime is None: + return + if not hasattr(os, 'utime'): + return + try: + os.utime(targetpath, (mtime, mtime)) + except OSError as e: + raise ExtractError("could not change modification time") from e + + #-------------------------------------------------------------------------- + def next(self): + """Return the next member of the archive as a TarInfo object, when + TarFile is opened for reading. Return None if there is no more + available. + """ + self._check("ra") + if self.firstmember is not None: + m = self.firstmember + self.firstmember = None + return m + + # Advance the file pointer. + if self.offset != self.fileobj.tell(): + if self.offset == 0: + return None + self.fileobj.seek(self.offset - 1) + if not self.fileobj.read(1): + raise ReadError("unexpected end of data") + + # Read the next block. + tarinfo = None + while True: + try: + tarinfo = self.tarinfo.fromtarfile(self) + except EOFHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + except InvalidHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + elif self.offset == 0: + raise ReadError(str(e)) from None + except EmptyHeaderError: + if self.offset == 0: + raise ReadError("empty file") from None + except TruncatedHeaderError as e: + if self.offset == 0: + raise ReadError(str(e)) from None + except SubsequentHeaderError as e: + raise ReadError(str(e)) from None + except Exception as e: + try: + import zlib + if isinstance(e, zlib.error): + raise ReadError(f'zlib error: {e}') from None + else: + raise e + except ImportError: + raise e + break + + if tarinfo is not None: + # if streaming the file we do not want to cache the tarinfo + if not self.stream: + self.members.append(tarinfo) + else: + self._loaded = True + + return tarinfo + + #-------------------------------------------------------------------------- + # Little helper methods: + + def _getmember(self, name, tarinfo=None, normalize=False): + """Find an archive member by name from bottom to top. + If tarinfo is given, it is used as the starting point. + """ + # Ensure that all members have been loaded. + members = self.getmembers() + + # Limit the member search list up to tarinfo. + skipping = False + if tarinfo is not None: + try: + index = members.index(tarinfo) + except ValueError: + # The given starting point might be a (modified) copy. + # We'll later skip members until we find an equivalent. + skipping = True + else: + # Happy fast path + members = members[:index] + + if normalize: + name = os.path.normpath(name) + + for member in reversed(members): + if skipping: + if tarinfo.offset == member.offset: + skipping = False + continue + if normalize: + member_name = os.path.normpath(member.name) + else: + member_name = member.name + + if name == member_name: + return member + + if skipping: + # Starting point was not found + raise ValueError(tarinfo) + + def _load(self): + """Read through the entire archive file and look for readable + members. This should not run if the file is set to stream. + """ + if not self.stream: + while self.next() is not None: + pass + self._loaded = True + + def _check(self, mode=None): + """Check if TarFile is still open, and if the operation's mode + corresponds to TarFile's mode. + """ + if self.closed: + raise OSError("%s is closed" % self.__class__.__name__) + if mode is not None and self.mode not in mode: + raise OSError("bad operation for mode %r" % self.mode) + + def _find_link_target(self, tarinfo): + """Find the target member of a symlink or hardlink member in the + archive. + """ + if tarinfo.issym(): + # Always search the entire archive. + linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname))) + limit = None + else: + # Search the archive before the link, because a hard link is + # just a reference to an already archived file. + linkname = tarinfo.linkname + limit = tarinfo + + member = self._getmember(linkname, tarinfo=limit, normalize=True) + if member is None: + raise KeyError("linkname %r not found" % linkname) + return member + + def __iter__(self): + """Provide an iterator object. + """ + if self._loaded: + yield from self.members + return + + # Yield items using TarFile's next() method. + # When all members have been read, set TarFile as _loaded. + index = 0 + # Fix for SF #1100429: Under rare circumstances it can + # happen that getmembers() is called during iteration, + # which will have already exhausted the next() method. + if self.firstmember is not None: + tarinfo = self.next() + index += 1 + yield tarinfo + + while True: + if index < len(self.members): + tarinfo = self.members[index] + elif not self._loaded: + tarinfo = self.next() + if not tarinfo: + self._loaded = True + return + else: + return + index += 1 + yield tarinfo + + def _dbg(self, level, msg): + """Write debugging output to sys.stderr. + """ + if level <= self.debug: + print(msg, file=sys.stderr) + + def __enter__(self): + self._check() + return self + + def __exit__(self, type, value, traceback): + if type is None: + self.close() + else: + # An exception occurred. We must not call close() because + # it would try to write end-of-archive blocks and padding. + if not self._extfileobj: + self.fileobj.close() + self.closed = True + +#-------------------- +# exported functions +#-------------------- + +def is_tarfile(name): + """Return True if name points to a tar archive that we + are able to handle, else return False. + + 'name' should be a string, file, or file-like object. + """ + try: + if hasattr(name, "read"): + pos = name.tell() + t = open(fileobj=name) + name.seek(pos) + else: + t = open(name) + t.close() + return True + except TarError: + return False + +open = TarFile.open + + +def main(): + import argparse + + description = 'A simple command-line interface for tarfile module.' + parser = argparse.ArgumentParser(description=description, color=True) + parser.add_argument('-v', '--verbose', action='store_true', default=False, + help='Verbose output') + parser.add_argument('--filter', metavar='', + choices=_NAMED_FILTERS, + help='Filter for extraction') + + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('-l', '--list', metavar='', + help='Show listing of a tarfile') + group.add_argument('-e', '--extract', nargs='+', + metavar=('', ''), + help='Extract tarfile into target dir') + group.add_argument('-c', '--create', nargs='+', + metavar=('', ''), + help='Create tarfile from sources') + group.add_argument('-t', '--test', metavar='', + help='Test if a tarfile is valid') + + args = parser.parse_args() + + if args.filter and args.extract is None: + parser.exit(1, '--filter is only valid for extraction\n') + + if args.test is not None: + src = args.test + if is_tarfile(src): + with open(src, 'r') as tar: + tar.getmembers() + print(tar.getmembers(), file=sys.stderr) + if args.verbose: + print('{!r} is a tar archive.'.format(src)) + else: + parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) + + elif args.list is not None: + src = args.list + if is_tarfile(src): + with TarFile.open(src, 'r:*') as tf: + tf.list(verbose=args.verbose) + else: + parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) + + elif args.extract is not None: + if len(args.extract) == 1: + src = args.extract[0] + curdir = os.curdir + elif len(args.extract) == 2: + src, curdir = args.extract + else: + parser.exit(1, parser.format_help()) + + if is_tarfile(src): + with TarFile.open(src, 'r:*') as tf: + tf.extractall(path=curdir, filter=args.filter) + if args.verbose: + if curdir == '.': + msg = '{!r} file is extracted.'.format(src) + else: + msg = ('{!r} file is extracted ' + 'into {!r} directory.').format(src, curdir) + print(msg) + else: + parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) + + elif args.create is not None: + tar_name = args.create.pop(0) + _, ext = os.path.splitext(tar_name) + compressions = { + # gz + '.gz': 'gz', + '.tgz': 'gz', + # xz + '.xz': 'xz', + '.txz': 'xz', + # bz2 + '.bz2': 'bz2', + '.tbz': 'bz2', + '.tbz2': 'bz2', + '.tb2': 'bz2', + # zstd + '.zst': 'zst', + '.tzst': 'zst', + } + tar_mode = 'w:' + compressions[ext] if ext in compressions else 'w' + tar_files = args.create + + with TarFile.open(tar_name, tar_mode) as tf: + for file_name in tar_files: + tf.add(file_name) + + if args.verbose: + print('{!r} file created.'.format(tar_name)) + +if __name__ == '__main__': + main() diff --git a/Python314_4_x64_Template/Lib/tempfile.py b/Python314_4_x64_Template/Lib/tempfile.py new file mode 100644 index 00000000..a34e062f --- /dev/null +++ b/Python314_4_x64_Template/Lib/tempfile.py @@ -0,0 +1,977 @@ +"""Temporary files. + +This module provides generic, low- and high-level interfaces for +creating temporary files and directories. All of the interfaces +provided by this module can be used without fear of race conditions +except for 'mktemp'. 'mktemp' is subject to race conditions and +should not be used; it is provided for backward compatibility only. + +The default path names are returned as str. If you supply bytes as +input, all return values will be in bytes. Ex: + + >>> tempfile.mkstemp() + (4, '/tmp/tmptpu9nin8') + >>> tempfile.mkdtemp(suffix=b'') + b'/tmp/tmppbi8f0hy' + +This module also provides some data items to the user: + + TMP_MAX - maximum number of names that will be tried before + giving up. + tempdir - If this is set to a string before the first use of + any routine from this module, it will be considered as + another candidate location to store temporary files. +""" + +__all__ = [ + "NamedTemporaryFile", "TemporaryFile", # high level safe interfaces + "SpooledTemporaryFile", "TemporaryDirectory", + "mkstemp", "mkdtemp", # low level safe interfaces + "mktemp", # deprecated unsafe interface + "TMP_MAX", "gettempprefix", # constants + "tempdir", "gettempdir", + "gettempprefixb", "gettempdirb", + ] + + +# Imports. + +import functools as _functools +import warnings as _warnings +import io as _io +import os as _os +import shutil as _shutil +import errno as _errno +from random import Random as _Random +import sys as _sys +import types as _types +import weakref as _weakref +import _thread +_allocate_lock = _thread.allocate_lock + +_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL +if hasattr(_os, 'O_NOFOLLOW'): + _text_openflags |= _os.O_NOFOLLOW + +_bin_openflags = _text_openflags +if hasattr(_os, 'O_BINARY'): + _bin_openflags |= _os.O_BINARY + +# This is more than enough. +# Each name contains over 40 random bits. Even with a million temporary +# files, the chance of a conflict is less than 1 in a million, and with +# 20 attempts, it is less than 1e-120. +TMP_MAX = 20 + +# This variable _was_ unused for legacy reasons, see issue 10354. +# But as of 3.5 we actually use it at runtime so changing it would +# have a possibly desirable side effect... But we do not want to support +# that as an API. It is undocumented on purpose. Do not depend on this. +template = "tmp" + +# Internal routines. + +_once_lock = _allocate_lock() + + +def _exists(fn): + try: + _os.lstat(fn) + except OSError: + return False + else: + return True + + +def _infer_return_type(*args): + """Look at the type of all args and divine their implied return type.""" + return_type = None + for arg in args: + if arg is None: + continue + + if isinstance(arg, _os.PathLike): + arg = _os.fspath(arg) + + if isinstance(arg, bytes): + if return_type is str: + raise TypeError("Can't mix bytes and non-bytes in " + "path components.") + return_type = bytes + else: + if return_type is bytes: + raise TypeError("Can't mix bytes and non-bytes in " + "path components.") + return_type = str + if return_type is None: + if tempdir is None or isinstance(tempdir, str): + return str # tempfile APIs return a str by default. + else: + # we could check for bytes but it'll fail later on anyway + return bytes + return return_type + + +def _sanitize_params(prefix, suffix, dir): + """Common parameter processing for most APIs in this module.""" + output_type = _infer_return_type(prefix, suffix, dir) + if suffix is None: + suffix = output_type() + if prefix is None: + if output_type is str: + prefix = template + else: + prefix = _os.fsencode(template) + if dir is None: + if output_type is str: + dir = gettempdir() + else: + dir = gettempdirb() + return prefix, suffix, dir, output_type + + +class _RandomNameSequence: + """An instance of _RandomNameSequence generates an endless + sequence of unpredictable strings which can safely be incorporated + into file names. Each string is eight characters long. Multiple + threads can safely use the same instance at the same time. + + _RandomNameSequence is an iterator.""" + + characters = "abcdefghijklmnopqrstuvwxyz0123456789_" + + @property + def rng(self): + cur_pid = _os.getpid() + if cur_pid != getattr(self, '_rng_pid', None): + self._rng = _Random() + self._rng_pid = cur_pid + return self._rng + + def __iter__(self): + return self + + def __next__(self): + return ''.join(self.rng.choices(self.characters, k=8)) + +def _candidate_tempdir_list(): + """Generate a list of candidate temporary directories which + _get_default_tempdir will try.""" + + dirlist = [] + + # First, try the environment. + for envname in 'TMPDIR', 'TEMP', 'TMP': + dirname = _os.getenv(envname) + if dirname: dirlist.append(dirname) + + # Failing that, try OS-specific locations. + if _os.name == 'nt': + dirlist.extend([ _os.path.expanduser(r'~\AppData\Local\Temp'), + _os.path.expandvars(r'%SYSTEMROOT%\Temp'), + r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ]) + else: + dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ]) + + # As a last resort, the current directory. + try: + dirlist.append(_os.getcwd()) + except (AttributeError, OSError): + dirlist.append(_os.curdir) + + return dirlist + +def _get_default_tempdir(dirlist=None): + """Calculate the default directory to use for temporary files. + This routine should be called exactly once. + + We determine whether or not a candidate temp dir is usable by + trying to create and write to a file in that directory. If this + is successful, the test file is deleted. To prevent denial of + service, the name of the test file must be randomized.""" + + namer = _RandomNameSequence() + if dirlist is None: + dirlist = _candidate_tempdir_list() + + for dir in dirlist: + if dir != _os.curdir: + dir = _os.path.abspath(dir) + for seq in range(TMP_MAX): + name = next(namer) + filename = _os.path.join(dir, name) + try: + fd = _os.open(filename, _bin_openflags, 0o600) + try: + try: + _os.write(fd, b'blat') + finally: + _os.close(fd) + finally: + _os.unlink(filename) + return dir + except FileExistsError: + pass + except PermissionError: + # See the comment in mkdtemp(). + if _os.name == 'nt' and _os.path.isdir(dir): + continue + break # no point trying more names in this directory + except OSError: + break # no point trying more names in this directory + raise FileNotFoundError(_errno.ENOENT, + "No usable temporary directory found in %s" % + dirlist) + +_name_sequence = None + +def _get_candidate_names(): + """Common setup sequence for all user-callable interfaces.""" + + global _name_sequence + if _name_sequence is None: + _once_lock.acquire() + try: + if _name_sequence is None: + _name_sequence = _RandomNameSequence() + finally: + _once_lock.release() + return _name_sequence + + +def _mkstemp_inner(dir, pre, suf, flags, output_type): + """Code common to mkstemp, TemporaryFile, and NamedTemporaryFile.""" + + dir = _os.path.abspath(dir) + names = _get_candidate_names() + if output_type is bytes: + names = map(_os.fsencode, names) + + for seq in range(TMP_MAX): + name = next(names) + file = _os.path.join(dir, pre + name + suf) + _sys.audit("tempfile.mkstemp", file) + try: + fd = _os.open(file, flags, 0o600) + except FileExistsError: + continue # try again + except PermissionError: + # See the comment in mkdtemp(). + if _os.name == 'nt' and _os.path.isdir(dir) and seq < TMP_MAX - 1: + continue + else: + raise + return fd, file + + raise FileExistsError(_errno.EEXIST, + "No usable temporary file name found") + +def _dont_follow_symlinks(func, path, *args): + # Pass follow_symlinks=False, unless not supported on this platform. + if func in _os.supports_follow_symlinks: + func(path, *args, follow_symlinks=False) + elif not _os.path.islink(path): + func(path, *args) + +def _resetperms(path): + try: + chflags = _os.chflags + except AttributeError: + pass + else: + _dont_follow_symlinks(chflags, path, 0) + _dont_follow_symlinks(_os.chmod, path, 0o700) + + +# User visible interfaces. + +def gettempprefix(): + """The default prefix for temporary directories as string.""" + return _os.fsdecode(template) + +def gettempprefixb(): + """The default prefix for temporary directories as bytes.""" + return _os.fsencode(template) + +tempdir = None + +def _gettempdir(): + """Private accessor for tempfile.tempdir.""" + global tempdir + if tempdir is None: + _once_lock.acquire() + try: + if tempdir is None: + tempdir = _get_default_tempdir() + finally: + _once_lock.release() + return tempdir + +def gettempdir(): + """Returns tempfile.tempdir as str.""" + return _os.fsdecode(_gettempdir()) + +def gettempdirb(): + """Returns tempfile.tempdir as bytes.""" + return _os.fsencode(_gettempdir()) + +def mkstemp(suffix=None, prefix=None, dir=None, text=False): + """User-callable function to create and return a unique temporary + file. The return value is a pair (fd, name) where fd is the + file descriptor returned by os.open, and name is the filename. + + If 'suffix' is not None, the file name will end with that suffix, + otherwise there will be no suffix. + + If 'prefix' is not None, the file name will begin with that prefix, + otherwise a default prefix is used. + + If 'dir' is not None, the file will be created in that directory, + otherwise a default directory is used. + + If 'text' is specified and true, the file is opened in text + mode. Else (the default) the file is opened in binary mode. + + If any of 'suffix', 'prefix' and 'dir' are not None, they must be the + same type. If they are bytes, the returned name will be bytes; str + otherwise. + + The file is readable and writable only by the creating user ID. + If the operating system uses permission bits to indicate whether a + file is executable, the file is executable by no one. The file + descriptor is not inherited by children of this process. + + Caller is responsible for deleting the file when done with it. + """ + + prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) + + if text: + flags = _text_openflags + else: + flags = _bin_openflags + + return _mkstemp_inner(dir, prefix, suffix, flags, output_type) + + +def mkdtemp(suffix=None, prefix=None, dir=None): + """User-callable function to create and return a unique temporary + directory. The return value is the pathname of the directory. + + Arguments are as for mkstemp, except that the 'text' argument is + not accepted. + + The directory is readable, writable, and searchable only by the + creating user. + + Caller is responsible for deleting the directory when done with it. + """ + + prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) + + names = _get_candidate_names() + if output_type is bytes: + names = map(_os.fsencode, names) + + for seq in range(TMP_MAX): + name = next(names) + file = _os.path.join(dir, prefix + name + suffix) + _sys.audit("tempfile.mkdtemp", file) + try: + _os.mkdir(file, 0o700) + except FileExistsError: + continue # try again + except PermissionError: + # On Posix, this exception is raised when the user has no + # write access to the parent directory. + # On Windows, it is also raised when a directory with + # the chosen name already exists, or if the parent directory + # is not a directory. + # We cannot distinguish between "directory-exists-error" and + # "access-denied-error". + if _os.name == 'nt' and _os.path.isdir(dir) and seq < TMP_MAX - 1: + continue + else: + raise + return _os.path.abspath(file) + + raise FileExistsError(_errno.EEXIST, + "No usable temporary directory name found") + +def mktemp(suffix="", prefix=template, dir=None): + """User-callable function to return a unique temporary file name. The + file is not created. + + Arguments are similar to mkstemp, except that the 'text' argument is + not accepted, and suffix=None, prefix=None and bytes file names are not + supported. + + THIS FUNCTION IS UNSAFE AND SHOULD NOT BE USED. The file name may + refer to a file that did not exist at some point, but by the time + you get around to creating it, someone else may have beaten you to + the punch. + """ + +## from warnings import warn as _warn +## _warn("mktemp is a potential security risk to your program", +## RuntimeWarning, stacklevel=2) + + if dir is None: + dir = gettempdir() + + names = _get_candidate_names() + for seq in range(TMP_MAX): + name = next(names) + file = _os.path.join(dir, prefix + name + suffix) + if not _exists(file): + return file + + raise FileExistsError(_errno.EEXIST, + "No usable temporary filename found") + + +class _TemporaryFileCloser: + """A separate object allowing proper closing of a temporary file's + underlying file object, without adding a __del__ method to the + temporary file.""" + + cleanup_called = False + close_called = False + + def __init__( + self, + file, + name, + delete=True, + delete_on_close=True, + warn_message="Implicitly cleaning up unknown file", + ): + self.file = file + self.name = name + self.delete = delete + self.delete_on_close = delete_on_close + self.warn_message = warn_message + + def cleanup(self, windows=(_os.name == 'nt'), unlink=_os.unlink): + if not self.cleanup_called: + self.cleanup_called = True + try: + if not self.close_called: + self.close_called = True + self.file.close() + finally: + # Windows provides delete-on-close as a primitive, in which + # case the file was deleted by self.file.close(). + if self.delete and not (windows and self.delete_on_close): + try: + unlink(self.name) + except FileNotFoundError: + pass + + def close(self): + if not self.close_called: + self.close_called = True + try: + self.file.close() + finally: + if self.delete and self.delete_on_close: + self.cleanup() + + def __del__(self): + close_called = self.close_called + self.cleanup() + if not close_called: + _warnings.warn(self.warn_message, ResourceWarning) + + +class _TemporaryFileWrapper: + """Temporary file wrapper + + This class provides a wrapper around files opened for + temporary use. In particular, it seeks to automatically + remove the file when it is no longer needed. + """ + + def __init__(self, file, name, delete=True, delete_on_close=True): + self.file = file + self.name = name + self._closer = _TemporaryFileCloser( + file, + name, + delete, + delete_on_close, + warn_message=f"Implicitly cleaning up {self!r}", + ) + + def __repr__(self): + file = self.__dict__['file'] + return f"<{type(self).__name__} {file=}>" + + def __getattr__(self, name): + # Attribute lookups are delegated to the underlying file + # and cached for non-numeric results + # (i.e. methods are cached, closed and friends are not) + file = self.__dict__['file'] + a = getattr(file, name) + if hasattr(a, '__call__'): + func = a + @_functools.wraps(func) + def func_wrapper(*args, **kwargs): + return func(*args, **kwargs) + # Avoid closing the file as long as the wrapper is alive, + # see issue #18879. + func_wrapper._closer = self._closer + a = func_wrapper + if not isinstance(a, int): + setattr(self, name, a) + return a + + # The underlying __enter__ method returns the wrong object + # (self.file) so override it to return the wrapper + def __enter__(self): + self.file.__enter__() + return self + + # Need to trap __exit__ as well to ensure the file gets + # deleted when used in a with statement + def __exit__(self, exc, value, tb): + result = self.file.__exit__(exc, value, tb) + self._closer.cleanup() + return result + + def close(self): + """ + Close the temporary file, possibly deleting it. + """ + self._closer.close() + + # iter() doesn't use __getattr__ to find the __iter__ method + def __iter__(self): + # Don't return iter(self.file), but yield from it to avoid closing + # file as long as it's being used as iterator (see issue #23700). We + # can't use 'yield from' here because iter(file) returns the file + # object itself, which has a close method, and thus the file would get + # closed when the generator is finalized, due to PEP380 semantics. + for line in self.file: + yield line + +def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None, + newline=None, suffix=None, prefix=None, + dir=None, delete=True, *, errors=None, + delete_on_close=True): + """Create and return a temporary file. + Arguments: + 'prefix', 'suffix', 'dir' -- as for mkstemp. + 'mode' -- the mode argument to io.open (default "w+b"). + 'buffering' -- the buffer size argument to io.open (default -1). + 'encoding' -- the encoding argument to io.open (default None) + 'newline' -- the newline argument to io.open (default None) + 'delete' -- whether the file is automatically deleted (default True). + 'delete_on_close' -- if 'delete', whether the file is deleted on close + (default True) or otherwise either on context manager exit + (if context manager was used) or on object finalization. . + 'errors' -- the errors argument to io.open (default None) + The file is created as mkstemp() would do it. + + Returns an object with a file-like interface; the name of the file + is accessible as its 'name' attribute. The file will be automatically + deleted when it is closed unless the 'delete' argument is set to False. + + On POSIX, NamedTemporaryFiles cannot be automatically deleted if + the creating process is terminated abruptly with a SIGKILL signal. + Windows can delete the file even in this case. + """ + + prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) + + flags = _bin_openflags + + # Setting O_TEMPORARY in the flags causes the OS to delete + # the file when it is closed. This is only supported by Windows. + if _os.name == 'nt' and delete and delete_on_close: + flags |= _os.O_TEMPORARY + + if "b" not in mode: + encoding = _io.text_encoding(encoding) + + name = None + def opener(*args): + nonlocal name + fd, name = _mkstemp_inner(dir, prefix, suffix, flags, output_type) + return fd + try: + file = _io.open(dir, mode, buffering=buffering, + newline=newline, encoding=encoding, errors=errors, + opener=opener) + try: + raw = getattr(file, 'buffer', file) + raw = getattr(raw, 'raw', raw) + raw.name = name + return _TemporaryFileWrapper(file, name, delete, delete_on_close) + except: + file.close() + raise + except: + if name is not None and not ( + _os.name == 'nt' and delete and delete_on_close): + _os.unlink(name) + raise + +if _os.name != 'posix' or _sys.platform == 'cygwin': + # On non-POSIX and Cygwin systems, assume that we cannot unlink a file + # while it is open. + TemporaryFile = NamedTemporaryFile + +else: + # Is the O_TMPFILE flag available and does it work? + # The flag is set to False if os.open(dir, os.O_TMPFILE) raises an + # IsADirectoryError exception + _O_TMPFILE_WORKS = hasattr(_os, 'O_TMPFILE') + + def TemporaryFile(mode='w+b', buffering=-1, encoding=None, + newline=None, suffix=None, prefix=None, + dir=None, *, errors=None): + """Create and return a temporary file. + Arguments: + 'prefix', 'suffix', 'dir' -- as for mkstemp. + 'mode' -- the mode argument to io.open (default "w+b"). + 'buffering' -- the buffer size argument to io.open (default -1). + 'encoding' -- the encoding argument to io.open (default None) + 'newline' -- the newline argument to io.open (default None) + 'errors' -- the errors argument to io.open (default None) + The file is created as mkstemp() would do it. + + Returns an object with a file-like interface. The file has no + name, and will cease to exist when it is closed. + """ + global _O_TMPFILE_WORKS + + if "b" not in mode: + encoding = _io.text_encoding(encoding) + + prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) + + flags = _bin_openflags + if _O_TMPFILE_WORKS: + fd = None + def opener(*args): + nonlocal fd + flags2 = (flags | _os.O_TMPFILE) & ~_os.O_CREAT + fd = _os.open(dir, flags2, 0o600) + return fd + try: + file = _io.open(dir, mode, buffering=buffering, + newline=newline, encoding=encoding, + errors=errors, opener=opener) + raw = getattr(file, 'buffer', file) + raw = getattr(raw, 'raw', raw) + raw.name = fd + return file + except IsADirectoryError: + # Linux kernel older than 3.11 ignores the O_TMPFILE flag: + # O_TMPFILE is read as O_DIRECTORY. Trying to open a directory + # with O_RDWR|O_DIRECTORY fails with IsADirectoryError, a + # directory cannot be open to write. Set flag to False to not + # try again. + _O_TMPFILE_WORKS = False + except OSError: + # The filesystem of the directory does not support O_TMPFILE. + # For example, OSError(95, 'Operation not supported'). + # + # On Linux kernel older than 3.11, trying to open a regular + # file (or a symbolic link to a regular file) with O_TMPFILE + # fails with NotADirectoryError, because O_TMPFILE is read as + # O_DIRECTORY. + pass + # Fallback to _mkstemp_inner(). + + fd = None + def opener(*args): + nonlocal fd + fd, name = _mkstemp_inner(dir, prefix, suffix, flags, output_type) + try: + _os.unlink(name) + except BaseException as e: + _os.close(fd) + raise + return fd + file = _io.open(dir, mode, buffering=buffering, + newline=newline, encoding=encoding, errors=errors, + opener=opener) + raw = getattr(file, 'buffer', file) + raw = getattr(raw, 'raw', raw) + raw.name = fd + return file + +class SpooledTemporaryFile(_io.IOBase): + """Temporary file wrapper, specialized to switch from BytesIO + or StringIO to a real file when it exceeds a certain size or + when a fileno is needed. + """ + _rolled = False + + def __init__(self, max_size=0, mode='w+b', buffering=-1, + encoding=None, newline=None, + suffix=None, prefix=None, dir=None, *, errors=None): + if 'b' in mode: + self._file = _io.BytesIO() + else: + encoding = _io.text_encoding(encoding) + self._file = _io.TextIOWrapper(_io.BytesIO(), + encoding=encoding, errors=errors, + newline=newline) + self._max_size = max_size + self._rolled = False + self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering, + 'suffix': suffix, 'prefix': prefix, + 'encoding': encoding, 'newline': newline, + 'dir': dir, 'errors': errors} + + __class_getitem__ = classmethod(_types.GenericAlias) + + def _check(self, file): + if self._rolled: return + max_size = self._max_size + if max_size and file.tell() > max_size: + self.rollover() + + def rollover(self): + if self._rolled: return + file = self._file + newfile = self._file = TemporaryFile(**self._TemporaryFileArgs) + del self._TemporaryFileArgs + + pos = file.tell() + if hasattr(newfile, 'buffer'): + newfile.buffer.write(file.detach().getvalue()) + else: + newfile.write(file.getvalue()) + newfile.seek(pos, 0) + + self._rolled = True + + # The method caching trick from NamedTemporaryFile + # won't work here, because _file may change from a + # BytesIO/StringIO instance to a real file. So we list + # all the methods directly. + + # Context management protocol + def __enter__(self): + if self._file.closed: + raise ValueError("Cannot enter context with closed file") + return self + + def __exit__(self, exc, value, tb): + self._file.close() + + # file protocol + def __iter__(self): + return self._file.__iter__() + + def __del__(self): + if not self.closed: + _warnings.warn( + "Unclosed file {!r}".format(self), + ResourceWarning, + stacklevel=2, + source=self + ) + self.close() + + def close(self): + self._file.close() + + @property + def closed(self): + return self._file.closed + + @property + def encoding(self): + return self._file.encoding + + @property + def errors(self): + return self._file.errors + + def fileno(self): + self.rollover() + return self._file.fileno() + + def flush(self): + self._file.flush() + + def isatty(self): + return self._file.isatty() + + @property + def mode(self): + try: + return self._file.mode + except AttributeError: + return self._TemporaryFileArgs['mode'] + + @property + def name(self): + try: + return self._file.name + except AttributeError: + return None + + @property + def newlines(self): + return self._file.newlines + + def readable(self): + return self._file.readable() + + def read(self, *args): + return self._file.read(*args) + + def read1(self, *args): + return self._file.read1(*args) + + def readinto(self, b): + return self._file.readinto(b) + + def readinto1(self, b): + return self._file.readinto1(b) + + def readline(self, *args): + return self._file.readline(*args) + + def readlines(self, *args): + return self._file.readlines(*args) + + def seekable(self): + return self._file.seekable() + + def seek(self, *args): + return self._file.seek(*args) + + def tell(self): + return self._file.tell() + + def truncate(self, size=None): + if size is None: + return self._file.truncate() + else: + if size > self._max_size: + self.rollover() + return self._file.truncate(size) + + def writable(self): + return self._file.writable() + + def write(self, s): + file = self._file + rv = file.write(s) + self._check(file) + return rv + + def writelines(self, iterable): + if self._max_size == 0 or self._rolled: + return self._file.writelines(iterable) + + it = iter(iterable) + for line in it: + self.write(line) + if self._rolled: + return self._file.writelines(it) + + def detach(self): + return self._file.detach() + + +class TemporaryDirectory: + """Create and return a temporary directory. This has the same + behavior as mkdtemp but can be used as a context manager. For + example: + + with TemporaryDirectory() as tmpdir: + ... + + Upon exiting the context, the directory and everything contained + in it are removed (unless delete=False is passed or an exception + is raised during cleanup and ignore_cleanup_errors is not True). + + Optional Arguments: + suffix - A str suffix for the directory name. (see mkdtemp) + prefix - A str prefix for the directory name. (see mkdtemp) + dir - A directory to create this temp dir in. (see mkdtemp) + ignore_cleanup_errors - False; ignore exceptions during cleanup? + delete - True; whether the directory is automatically deleted. + """ + + def __init__(self, suffix=None, prefix=None, dir=None, + ignore_cleanup_errors=False, *, delete=True): + self.name = mkdtemp(suffix, prefix, dir) + self._ignore_cleanup_errors = ignore_cleanup_errors + self._delete = delete + self._finalizer = _weakref.finalize( + self, self._cleanup, self.name, + warn_message="Implicitly cleaning up {!r}".format(self), + ignore_errors=self._ignore_cleanup_errors, delete=self._delete) + + @classmethod + def _rmtree(cls, name, ignore_errors=False, repeated=False): + def onexc(func, path, exc): + if isinstance(exc, PermissionError): + if repeated and path == name: + if ignore_errors: + return + raise + + try: + if path != name: + _resetperms(_os.path.dirname(path)) + _resetperms(path) + + try: + _os.unlink(path) + except IsADirectoryError: + cls._rmtree(path, ignore_errors=ignore_errors) + except PermissionError: + # The PermissionError handler was originally added for + # FreeBSD in directories, but it seems that it is raised + # on Windows too. + # bpo-43153: Calling _rmtree again may + # raise NotADirectoryError and mask the PermissionError. + # So we must re-raise the current PermissionError if + # path is not a directory. + if not _os.path.isdir(path) or _os.path.isjunction(path): + if ignore_errors: + return + raise + cls._rmtree(path, ignore_errors=ignore_errors, + repeated=(path == name)) + except FileNotFoundError: + pass + elif isinstance(exc, FileNotFoundError): + pass + else: + if not ignore_errors: + raise + + _shutil.rmtree(name, onexc=onexc) + + @classmethod + def _cleanup(cls, name, warn_message, ignore_errors=False, delete=True): + if delete: + cls._rmtree(name, ignore_errors=ignore_errors) + _warnings.warn(warn_message, ResourceWarning) + + def __repr__(self): + return "<{} {!r}>".format(self.__class__.__name__, self.name) + + def __enter__(self): + return self.name + + def __exit__(self, exc, value, tb): + if self._delete: + self.cleanup() + + def cleanup(self): + if self._finalizer.detach() or _os.path.exists(self.name): + self._rmtree(self.name, ignore_errors=self._ignore_cleanup_errors) + + __class_getitem__ = classmethod(_types.GenericAlias) diff --git a/Python314_4_x64_Template/Lib/textwrap.py b/Python314_4_x64_Template/Lib/textwrap.py new file mode 100644 index 00000000..41366fbf --- /dev/null +++ b/Python314_4_x64_Template/Lib/textwrap.py @@ -0,0 +1,475 @@ +"""Text wrapping and filling. +""" + +# Copyright (C) 1999-2001 Gregory P. Ward. +# Copyright (C) 2002 Python Software Foundation. +# Written by Greg Ward + +import re + +__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent', 'shorten'] + +# Hardcode the recognized whitespace characters to the US-ASCII +# whitespace characters. The main reason for doing this is that +# some Unicode spaces (like \u00a0) are non-breaking whitespaces. +_whitespace = '\t\n\x0b\x0c\r ' + +class TextWrapper: + """ + Object for wrapping/filling text. The public interface consists of + the wrap() and fill() methods; the other methods are just there for + subclasses to override in order to tweak the default behaviour. + If you want to completely replace the main wrapping algorithm, + you'll probably have to override _wrap_chunks(). + + Several instance attributes control various aspects of wrapping: + width (default: 70) + the maximum width of wrapped lines (unless break_long_words + is false) + initial_indent (default: "") + string that will be prepended to the first line of wrapped + output. Counts towards the line's width. + subsequent_indent (default: "") + string that will be prepended to all lines save the first + of wrapped output; also counts towards each line's width. + expand_tabs (default: true) + Expand tabs in input text to spaces before further processing. + Each tab will become 0 .. 'tabsize' spaces, depending on its position + in its line. If false, each tab is treated as a single character. + tabsize (default: 8) + Expand tabs in input text to 0 .. 'tabsize' spaces, unless + 'expand_tabs' is false. + replace_whitespace (default: true) + Replace all whitespace characters in the input text by spaces + after tab expansion. Note that if expand_tabs is false and + replace_whitespace is true, every tab will be converted to a + single space! + fix_sentence_endings (default: false) + Ensure that sentence-ending punctuation is always followed + by two spaces. Off by default because the algorithm is + (unavoidably) imperfect. + break_long_words (default: true) + Break words longer than 'width'. If false, those words will not + be broken, and some lines might be longer than 'width'. + break_on_hyphens (default: true) + Allow breaking hyphenated words. If true, wrapping will occur + preferably on whitespaces and right after hyphens part of + compound words. + drop_whitespace (default: true) + Drop leading and trailing whitespace from lines. + max_lines (default: None) + Truncate wrapped lines. + placeholder (default: ' [...]') + Append to the last line of truncated text. + """ + + unicode_whitespace_trans = dict.fromkeys(map(ord, _whitespace), ord(' ')) + + # This funky little regex is just the trick for splitting + # text up into word-wrappable chunks. E.g. + # "Hello there -- you goof-ball, use the -b option!" + # splits into + # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option! + # (after stripping out empty strings). + word_punct = r'[\w!"\'&.,?]' + letter = r'[^\d\W]' + whitespace = r'[%s]' % re.escape(_whitespace) + nowhitespace = '[^' + whitespace[1:] + wordsep_re = re.compile(r''' + ( # any whitespace + %(ws)s+ + | # em-dash between words + (?<=%(wp)s) -{2,} (?=\w) + | # word, possibly hyphenated + %(nws)s+? (?: + # hyphenated word + -(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-)) + (?= %(lt)s -? %(lt)s) + | # end of word + (?=%(ws)s|\z) + | # em-dash + (?<=%(wp)s) (?=-{2,}\w) + ) + )''' % {'wp': word_punct, 'lt': letter, + 'ws': whitespace, 'nws': nowhitespace}, + re.VERBOSE) + del word_punct, letter, nowhitespace + + # This less funky little regex just split on recognized spaces. E.g. + # "Hello there -- you goof-ball, use the -b option!" + # splits into + # Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/ + wordsep_simple_re = re.compile(r'(%s+)' % whitespace) + del whitespace + + # XXX this is not locale- or charset-aware -- string.lowercase + # is US-ASCII only (and therefore English-only) + sentence_end_re = re.compile(r'[a-z]' # lowercase letter + r'[\.\!\?]' # sentence-ending punct. + r'[\"\']?' # optional end-of-quote + r'\z') # end of chunk + + def __init__(self, + width=70, + initial_indent="", + subsequent_indent="", + expand_tabs=True, + replace_whitespace=True, + fix_sentence_endings=False, + break_long_words=True, + drop_whitespace=True, + break_on_hyphens=True, + tabsize=8, + *, + max_lines=None, + placeholder=' [...]'): + self.width = width + self.initial_indent = initial_indent + self.subsequent_indent = subsequent_indent + self.expand_tabs = expand_tabs + self.replace_whitespace = replace_whitespace + self.fix_sentence_endings = fix_sentence_endings + self.break_long_words = break_long_words + self.drop_whitespace = drop_whitespace + self.break_on_hyphens = break_on_hyphens + self.tabsize = tabsize + self.max_lines = max_lines + self.placeholder = placeholder + + + # -- Private methods ----------------------------------------------- + # (possibly useful for subclasses to override) + + def _munge_whitespace(self, text): + """_munge_whitespace(text : string) -> string + + Munge whitespace in text: expand tabs and convert all other + whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz" + becomes " foo bar baz". + """ + if self.expand_tabs: + text = text.expandtabs(self.tabsize) + if self.replace_whitespace: + text = text.translate(self.unicode_whitespace_trans) + return text + + + def _split(self, text): + """_split(text : string) -> [string] + + Split the text to wrap into indivisible chunks. Chunks are + not quite the same as words; see _wrap_chunks() for full + details. As an example, the text + Look, goof-ball -- use the -b option! + breaks into the following chunks: + 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', 'option!' + if break_on_hyphens is True, or in: + 'Look,', ' ', 'goof-ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', option!' + otherwise. + """ + if self.break_on_hyphens is True: + chunks = self.wordsep_re.split(text) + else: + chunks = self.wordsep_simple_re.split(text) + chunks = [c for c in chunks if c] + return chunks + + def _fix_sentence_endings(self, chunks): + """_fix_sentence_endings(chunks : [string]) + + Correct for sentence endings buried in 'chunks'. Eg. when the + original text contains "... foo.\\nBar ...", munge_whitespace() + and split() will convert that to [..., "foo.", " ", "Bar", ...] + which has one too few spaces; this method simply changes the one + space to two. + """ + i = 0 + patsearch = self.sentence_end_re.search + while i < len(chunks)-1: + if chunks[i+1] == " " and patsearch(chunks[i]): + chunks[i+1] = " " + i += 2 + else: + i += 1 + + def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): + """_handle_long_word(chunks : [string], + cur_line : [string], + cur_len : int, width : int) + + Handle a chunk of text (most likely a word, not whitespace) that + is too long to fit in any line. + """ + # Figure out when indent is larger than the specified width, and make + # sure at least one character is stripped off on every pass + if width < 1: + space_left = 1 + else: + space_left = width - cur_len + + # If we're allowed to break long words, then do so: put as much + # of the next chunk onto the current line as will fit. + if self.break_long_words and space_left > 0: + end = space_left + chunk = reversed_chunks[-1] + if self.break_on_hyphens and len(chunk) > space_left: + # break after last hyphen, but only if there are + # non-hyphens before it + hyphen = chunk.rfind('-', 0, space_left) + if hyphen > 0 and any(c != '-' for c in chunk[:hyphen]): + end = hyphen + 1 + cur_line.append(chunk[:end]) + reversed_chunks[-1] = chunk[end:] + + # Otherwise, we have to preserve the long word intact. Only add + # it to the current line if there's nothing already there -- + # that minimizes how much we violate the width constraint. + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + # If we're not allowed to break long words, and there's already + # text on the current line, do nothing. Next time through the + # main loop of _wrap_chunks(), we'll wind up here again, but + # cur_len will be zero, so the next line will be entirely + # devoted to the long word that we can't handle right now. + + def _wrap_chunks(self, chunks): + """_wrap_chunks(chunks : [string]) -> [string] + + Wrap a sequence of text chunks and return a list of lines of + length 'self.width' or less. (If 'break_long_words' is false, + some lines may be longer than this.) Chunks correspond roughly + to words and the whitespace between them: each chunk is + indivisible (modulo 'break_long_words'), but a line break can + come between any two chunks. Chunks should not have internal + whitespace; ie. a chunk is either all whitespace or a "word". + Whitespace chunks will be removed from the beginning and end of + lines, but apart from that whitespace is preserved. + """ + lines = [] + if self.width <= 0: + raise ValueError("invalid width %r (must be > 0)" % self.width) + if self.max_lines is not None: + if self.max_lines > 1: + indent = self.subsequent_indent + else: + indent = self.initial_indent + if len(indent) + len(self.placeholder.lstrip()) > self.width: + raise ValueError("placeholder too large for max width") + + # Arrange in reverse order so items can be efficiently popped + # from a stack of chucks. + chunks.reverse() + + while chunks: + + # Start the list of chunks that will make up the current line. + # cur_len is just the length of all the chunks in cur_line. + cur_line = [] + cur_len = 0 + + # Figure out which static string will prefix this line. + if lines: + indent = self.subsequent_indent + else: + indent = self.initial_indent + + # Maximum width for this line. + width = self.width - len(indent) + + # First chunk on line is whitespace -- drop it, unless this + # is the very beginning of the text (ie. no lines started yet). + if self.drop_whitespace and chunks[-1].strip() == '' and lines: + del chunks[-1] + + while chunks: + l = len(chunks[-1]) + + # Can at least squeeze this chunk onto the current line. + if cur_len + l <= width: + cur_line.append(chunks.pop()) + cur_len += l + + # Nope, this line is full. + else: + break + + # The current line is full, and the next chunk is too big to + # fit on *any* line (not just this one). + if chunks and len(chunks[-1]) > width: + self._handle_long_word(chunks, cur_line, cur_len, width) + cur_len = sum(map(len, cur_line)) + + # If the last chunk on this line is all whitespace, drop it. + if self.drop_whitespace and cur_line and cur_line[-1].strip() == '': + cur_len -= len(cur_line[-1]) + del cur_line[-1] + + if cur_line: + if (self.max_lines is None or + len(lines) + 1 < self.max_lines or + (not chunks or + self.drop_whitespace and + len(chunks) == 1 and + not chunks[0].strip()) and cur_len <= width): + # Convert current line back to a string and store it in + # list of all lines (return value). + lines.append(indent + ''.join(cur_line)) + else: + while cur_line: + if (cur_line[-1].strip() and + cur_len + len(self.placeholder) <= width): + cur_line.append(self.placeholder) + lines.append(indent + ''.join(cur_line)) + break + cur_len -= len(cur_line[-1]) + del cur_line[-1] + else: + if lines: + prev_line = lines[-1].rstrip() + if (len(prev_line) + len(self.placeholder) <= + self.width): + lines[-1] = prev_line + self.placeholder + break + lines.append(indent + self.placeholder.lstrip()) + break + + return lines + + def _split_chunks(self, text): + text = self._munge_whitespace(text) + return self._split(text) + + # -- Public interface ---------------------------------------------- + + def wrap(self, text): + """wrap(text : string) -> [string] + + Reformat the single paragraph in 'text' so it fits in lines of + no more than 'self.width' columns, and return a list of wrapped + lines. Tabs in 'text' are expanded with string.expandtabs(), + and all other whitespace characters (including newline) are + converted to space. + """ + chunks = self._split_chunks(text) + if self.fix_sentence_endings: + self._fix_sentence_endings(chunks) + return self._wrap_chunks(chunks) + + def fill(self, text): + """fill(text : string) -> string + + Reformat the single paragraph in 'text' to fit in lines of no + more than 'self.width' columns, and return a new string + containing the entire wrapped paragraph. + """ + return "\n".join(self.wrap(text)) + + +# -- Convenience interface --------------------------------------------- + +def wrap(text, width=70, **kwargs): + """Wrap a single paragraph of text, returning a list of wrapped lines. + + Reformat the single paragraph in 'text' so it fits in lines of no + more than 'width' columns, and return a list of wrapped lines. By + default, tabs in 'text' are expanded with string.expandtabs(), and + all other whitespace characters (including newline) are converted to + space. See TextWrapper class for available keyword args to customize + wrapping behaviour. + """ + w = TextWrapper(width=width, **kwargs) + return w.wrap(text) + +def fill(text, width=70, **kwargs): + """Fill a single paragraph of text, returning a new string. + + Reformat the single paragraph in 'text' to fit in lines of no more + than 'width' columns, and return a new string containing the entire + wrapped paragraph. As with wrap(), tabs are expanded and other + whitespace characters converted to space. See TextWrapper class for + available keyword args to customize wrapping behaviour. + """ + w = TextWrapper(width=width, **kwargs) + return w.fill(text) + +def shorten(text, width, **kwargs): + """Collapse and truncate the given text to fit in the given width. + + The text first has its whitespace collapsed. If it then fits in + the *width*, it is returned as is. Otherwise, as many words + as possible are joined and then the placeholder is appended:: + + >>> textwrap.shorten("Hello world!", width=12) + 'Hello world!' + >>> textwrap.shorten("Hello world!", width=11) + 'Hello [...]' + """ + w = TextWrapper(width=width, max_lines=1, **kwargs) + return w.fill(' '.join(text.strip().split())) + + +# -- Loosely related functionality ------------------------------------- + +def dedent(text): + """Remove any common leading whitespace from every line in `text`. + + This can be used to make triple-quoted strings line up with the left + edge of the display, while still presenting them in the source code + in indented form. + + Note that tabs and spaces are both treated as whitespace, but they + are not equal: the lines " hello" and "\\thello" are + considered to have no common leading whitespace. + + Entirely blank lines are normalized to a newline character. + """ + try: + lines = text.split('\n') + except (AttributeError, TypeError): + msg = f'expected str object, not {type(text).__qualname__!r}' + raise TypeError(msg) from None + + # Get length of leading whitespace, inspired by ``os.path.commonprefix()``. + non_blank_lines = [l for l in lines if l and not l.isspace()] + l1 = min(non_blank_lines, default='') + l2 = max(non_blank_lines, default='') + margin = 0 + for margin, c in enumerate(l1): + if c != l2[margin] or c not in ' \t': + break + + return '\n'.join([l[margin:] if not l.isspace() else '' for l in lines]) + + +def indent(text, prefix, predicate=None): + """Adds 'prefix' to the beginning of selected lines in 'text'. + + If 'predicate' is provided, 'prefix' will only be added to the lines + where 'predicate(line)' is True. If 'predicate' is not provided, + it will default to adding 'prefix' to all non-empty lines that do not + consist solely of whitespace characters. + """ + prefixed_lines = [] + if predicate is None: + # str.splitlines(keepends=True) doesn't produce the empty string, + # so we need to use `str.isspace()` rather than a truth test. + # Inlining the predicate leads to a ~30% performance improvement. + for line in text.splitlines(True): + if not line.isspace(): + prefixed_lines.append(prefix) + prefixed_lines.append(line) + else: + for line in text.splitlines(True): + if predicate(line): + prefixed_lines.append(prefix) + prefixed_lines.append(line) + return ''.join(prefixed_lines) + + +if __name__ == "__main__": + #print dedent("\tfoo\n\tbar") + #print dedent(" \thello there\n \t how are you?") + print(dedent("Hello there.\n This is indented.")) diff --git a/Python313_13_x64_Template/Lib/this.py b/Python314_4_x64_Template/Lib/this.py similarity index 100% rename from Python313_13_x64_Template/Lib/this.py rename to Python314_4_x64_Template/Lib/this.py diff --git a/Python314_4_x64_Template/Lib/threading.py b/Python314_4_x64_Template/Lib/threading.py new file mode 100644 index 00000000..c03b0b53 --- /dev/null +++ b/Python314_4_x64_Template/Lib/threading.py @@ -0,0 +1,1642 @@ +"""Thread module emulating a subset of Java's threading model.""" + +import os as _os +import sys as _sys +import _thread +import _contextvars + +from time import monotonic as _time +from _weakrefset import WeakSet +from itertools import count as _count +try: + from _collections import deque as _deque +except ImportError: + from collections import deque as _deque + +# Note regarding PEP 8 compliant names +# This threading model was originally inspired by Java, and inherited +# the convention of camelCase function and method names from that +# language. Those original names are not in any imminent danger of +# being deprecated (even for Py3k),so this module provides them as an +# alias for the PEP 8 compliant names +# Note that using the new PEP 8 compliant names facilitates substitution +# with the multiprocessing module, which doesn't provide the old +# Java inspired names. + +__all__ = ['get_ident', 'active_count', 'Condition', 'current_thread', + 'enumerate', 'main_thread', 'TIMEOUT_MAX', + 'Event', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', + 'Barrier', 'BrokenBarrierError', 'Timer', 'ThreadError', + 'setprofile', 'settrace', 'local', 'stack_size', + 'excepthook', 'ExceptHookArgs', 'gettrace', 'getprofile', + 'setprofile_all_threads','settrace_all_threads'] + +# Rename some stuff so "from threading import *" is safe +_start_joinable_thread = _thread.start_joinable_thread +_daemon_threads_allowed = _thread.daemon_threads_allowed +_allocate_lock = _thread.allocate_lock +_LockType = _thread.LockType +_thread_shutdown = _thread._shutdown +_make_thread_handle = _thread._make_thread_handle +_ThreadHandle = _thread._ThreadHandle +get_ident = _thread.get_ident +_get_main_thread_ident = _thread._get_main_thread_ident +_is_main_interpreter = _thread._is_main_interpreter +try: + get_native_id = _thread.get_native_id + _HAVE_THREAD_NATIVE_ID = True + __all__.append('get_native_id') +except AttributeError: + _HAVE_THREAD_NATIVE_ID = False +try: + _set_name = _thread.set_name +except AttributeError: + _set_name = None +ThreadError = _thread.error +try: + _CRLock = _thread.RLock +except AttributeError: + _CRLock = None +TIMEOUT_MAX = _thread.TIMEOUT_MAX +del _thread + +# get thread-local implementation, either from the thread +# module, or from the python fallback + +try: + from _thread import _local as local +except ImportError: + from _threading_local import local + +# Support for profile and trace hooks + +_profile_hook = None +_trace_hook = None + +def setprofile(func): + """Set a profile function for all threads started from the threading module. + + The func will be passed to sys.setprofile() for each thread, before its + run() method is called. + """ + global _profile_hook + _profile_hook = func + +def setprofile_all_threads(func): + """Set a profile function for all threads started from the threading module + and all Python threads that are currently executing. + + The func will be passed to sys.setprofile() for each thread, before its + run() method is called. + """ + setprofile(func) + _sys._setprofileallthreads(func) + +def getprofile(): + """Get the profiler function as set by threading.setprofile().""" + return _profile_hook + +def settrace(func): + """Set a trace function for all threads started from the threading module. + + The func will be passed to sys.settrace() for each thread, before its run() + method is called. + """ + global _trace_hook + _trace_hook = func + +def settrace_all_threads(func): + """Set a trace function for all threads started from the threading module + and all Python threads that are currently executing. + + The func will be passed to sys.settrace() for each thread, before its run() + method is called. + """ + settrace(func) + _sys._settraceallthreads(func) + +def gettrace(): + """Get the trace function as set by threading.settrace().""" + return _trace_hook + +# Synchronization classes + +Lock = _LockType + +def RLock(*args, **kwargs): + """Factory function that returns a new reentrant lock. + + A reentrant lock must be released by the thread that acquired it. Once a + thread has acquired a reentrant lock, the same thread may acquire it again + without blocking; the thread must release it once for each time it has + acquired it. + + """ + if args or kwargs: + import warnings + warnings.warn( + 'Passing arguments to RLock is deprecated and will be removed in 3.15', + DeprecationWarning, + stacklevel=2, + ) + if _CRLock is None: + return _PyRLock(*args, **kwargs) + return _CRLock(*args, **kwargs) + +class _RLock: + """This class implements reentrant lock objects. + + A reentrant lock must be released by the thread that acquired it. Once a + thread has acquired a reentrant lock, the same thread may acquire it + again without blocking; the thread must release it once for each time it + has acquired it. + + """ + + def __init__(self): + self._block = _allocate_lock() + self._owner = None + self._count = 0 + + def __repr__(self): + owner = self._owner + try: + owner = _active[owner].name + except KeyError: + pass + return "<%s %s.%s object owner=%r count=%d at %s>" % ( + "locked" if self.locked() else "unlocked", + self.__class__.__module__, + self.__class__.__qualname__, + owner, + self._count, + hex(id(self)) + ) + + def _at_fork_reinit(self): + self._block._at_fork_reinit() + self._owner = None + self._count = 0 + + def acquire(self, blocking=True, timeout=-1): + """Acquire a lock, blocking or non-blocking. + + When invoked without arguments: if this thread already owns the lock, + increment the recursion level by one, and return immediately. Otherwise, + if another thread owns the lock, block until the lock is unlocked. Once + the lock is unlocked (not owned by any thread), then grab ownership, set + the recursion level to one, and return. If more than one thread is + blocked waiting until the lock is unlocked, only one at a time will be + able to grab ownership of the lock. There is no return value in this + case. + + When invoked with the blocking argument set to true, do the same thing + as when called without arguments, and return true. + + When invoked with the blocking argument set to false, do not block. If a + call without an argument would block, return false immediately; + otherwise, do the same thing as when called without arguments, and + return true. + + When invoked with the floating-point timeout argument set to a positive + value, block for at most the number of seconds specified by timeout + and as long as the lock cannot be acquired. Return true if the lock has + been acquired, false if the timeout has elapsed. + + """ + me = get_ident() + if self._owner == me: + self._count += 1 + return 1 + rc = self._block.acquire(blocking, timeout) + if rc: + self._owner = me + self._count = 1 + return rc + + __enter__ = acquire + + def release(self): + """Release a lock, decrementing the recursion level. + + If after the decrement it is zero, reset the lock to unlocked (not owned + by any thread), and if any other threads are blocked waiting for the + lock to become unlocked, allow exactly one of them to proceed. If after + the decrement the recursion level is still nonzero, the lock remains + locked and owned by the calling thread. + + Only call this method when the calling thread owns the lock. A + RuntimeError is raised if this method is called when the lock is + unlocked. + + There is no return value. + + """ + if self._owner != get_ident(): + raise RuntimeError("cannot release un-acquired lock") + self._count = count = self._count - 1 + if not count: + self._owner = None + self._block.release() + + def __exit__(self, t, v, tb): + self.release() + + def locked(self): + """Return whether this object is locked.""" + return self._block.locked() + + # Internal methods used by condition variables + + def _acquire_restore(self, state): + self._block.acquire() + self._count, self._owner = state + + def _release_save(self): + if self._count == 0: + raise RuntimeError("cannot release un-acquired lock") + count = self._count + self._count = 0 + owner = self._owner + self._owner = None + self._block.release() + return (count, owner) + + def _is_owned(self): + return self._owner == get_ident() + + # Internal method used for reentrancy checks + + def _recursion_count(self): + if self._owner != get_ident(): + return 0 + return self._count + +_PyRLock = _RLock + + +class Condition: + """Class that implements a condition variable. + + A condition variable allows one or more threads to wait until they are + notified by another thread. + + If the lock argument is given and not None, it must be a Lock or RLock + object, and it is used as the underlying lock. Otherwise, a new RLock object + is created and used as the underlying lock. + + """ + + def __init__(self, lock=None): + if lock is None: + lock = RLock() + self._lock = lock + # Export the lock's acquire(), release(), and locked() methods + self.acquire = lock.acquire + self.release = lock.release + self.locked = lock.locked + # If the lock defines _release_save() and/or _acquire_restore(), + # these override the default implementations (which just call + # release() and acquire() on the lock). Ditto for _is_owned(). + if hasattr(lock, '_release_save'): + self._release_save = lock._release_save + if hasattr(lock, '_acquire_restore'): + self._acquire_restore = lock._acquire_restore + if hasattr(lock, '_is_owned'): + self._is_owned = lock._is_owned + self._waiters = _deque() + + def _at_fork_reinit(self): + self._lock._at_fork_reinit() + self._waiters.clear() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def __repr__(self): + return "" % (self._lock, len(self._waiters)) + + def _release_save(self): + self._lock.release() # No state to save + + def _acquire_restore(self, x): + self._lock.acquire() # Ignore saved state + + def _is_owned(self): + # Return True if lock is owned by current_thread. + # This method is called only if _lock doesn't have _is_owned(). + if self._lock.acquire(False): + self._lock.release() + return False + else: + return True + + def wait(self, timeout=None): + """Wait until notified or until a timeout occurs. + + If the calling thread has not acquired the lock when this method is + called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks until it is + awakened by a notify() or notify_all() call for the same condition + variable in another thread, or until the optional timeout occurs. Once + awakened or timed out, it re-acquires the lock and returns. + + When the timeout argument is present and not None, it should be a + floating-point number specifying a timeout for the operation in seconds + (or fractions thereof). + + When the underlying lock is an RLock, it is not released using its + release() method, since this may not actually unlock the lock when it + was acquired multiple times recursively. Instead, an internal interface + of the RLock class is used, which really unlocks it even when it has + been recursively acquired several times. Another internal interface is + then used to restore the recursion level when the lock is reacquired. + + """ + if not self._is_owned(): + raise RuntimeError("cannot wait on un-acquired lock") + waiter = _allocate_lock() + waiter.acquire() + self._waiters.append(waiter) + saved_state = self._release_save() + gotit = False + try: # restore state no matter what (e.g., KeyboardInterrupt) + if timeout is None: + waiter.acquire() + gotit = True + else: + if timeout > 0: + gotit = waiter.acquire(True, timeout) + else: + gotit = waiter.acquire(False) + return gotit + finally: + self._acquire_restore(saved_state) + if not gotit: + try: + self._waiters.remove(waiter) + except ValueError: + pass + + def wait_for(self, predicate, timeout=None): + """Wait until a condition evaluates to True. + + predicate should be a callable which result will be interpreted as a + boolean value. A timeout may be provided giving the maximum time to + wait. + + """ + endtime = None + waittime = timeout + result = predicate() + while not result: + if waittime is not None: + if endtime is None: + endtime = _time() + waittime + else: + waittime = endtime - _time() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + + def notify(self, n=1): + """Wake up one or more threads waiting on this condition, if any. + + If the calling thread has not acquired the lock when this method is + called, a RuntimeError is raised. + + This method wakes up at most n of the threads waiting for the condition + variable; it is a no-op if no threads are waiting. + + """ + if not self._is_owned(): + raise RuntimeError("cannot notify on un-acquired lock") + waiters = self._waiters + while waiters and n > 0: + waiter = waiters[0] + try: + waiter.release() + except RuntimeError: + # gh-92530: The previous call of notify() released the lock, + # but was interrupted before removing it from the queue. + # It can happen if a signal handler raises an exception, + # like CTRL+C which raises KeyboardInterrupt. + pass + else: + n -= 1 + try: + waiters.remove(waiter) + except ValueError: + pass + + def notify_all(self): + """Wake up all threads waiting on this condition. + + If the calling thread has not acquired the lock when this method + is called, a RuntimeError is raised. + + """ + self.notify(len(self._waiters)) + + def notifyAll(self): + """Wake up all threads waiting on this condition. + + This method is deprecated, use notify_all() instead. + + """ + import warnings + warnings.warn('notifyAll() is deprecated, use notify_all() instead', + DeprecationWarning, stacklevel=2) + self.notify_all() + + +class Semaphore: + """This class implements semaphore objects. + + Semaphores manage a counter representing the number of release() calls minus + the number of acquire() calls, plus an initial value. The acquire() method + blocks if necessary until it can return without making the counter + negative. If not given, value defaults to 1. + + """ + + # After Tim Peters' semaphore class, but not quite the same (no maximum) + + def __init__(self, value=1): + if value < 0: + raise ValueError("semaphore initial value must be >= 0") + self._cond = Condition(Lock()) + self._value = value + + def __repr__(self): + cls = self.__class__ + return (f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}:" + f" value={self._value}>") + + def acquire(self, blocking=True, timeout=None): + """Acquire a semaphore, decrementing the internal counter by one. + + When invoked without arguments: if the internal counter is larger than + zero on entry, decrement it by one and return immediately. If it is zero + on entry, block, waiting until some other thread has called release() to + make it larger than zero. This is done with proper interlocking so that + if multiple acquire() calls are blocked, release() will wake exactly one + of them up. The implementation may pick one at random, so the order in + which blocked threads are awakened should not be relied on. There is no + return value in this case. + + When invoked with blocking set to true, do the same thing as when called + without arguments, and return true. + + When invoked with blocking set to false, do not block. If a call without + an argument would block, return false immediately; otherwise, do the + same thing as when called without arguments, and return true. + + When invoked with a timeout other than None, it will block for at + most timeout seconds. If acquire does not complete successfully in + that interval, return false. Return true otherwise. + + """ + if not blocking and timeout is not None: + raise ValueError("can't specify timeout for non-blocking acquire") + rc = False + endtime = None + with self._cond: + while self._value == 0: + if not blocking: + break + if timeout is not None: + if endtime is None: + endtime = _time() + timeout + else: + timeout = endtime - _time() + if timeout <= 0: + break + self._cond.wait(timeout) + else: + self._value -= 1 + rc = True + return rc + + __enter__ = acquire + + def release(self, n=1): + """Release a semaphore, incrementing the internal counter by one or more. + + When the counter is zero on entry and another thread is waiting for it + to become larger than zero again, wake up that thread. + + """ + if n < 1: + raise ValueError('n must be one or more') + with self._cond: + self._value += n + self._cond.notify(n) + + def __exit__(self, t, v, tb): + self.release() + + +class BoundedSemaphore(Semaphore): + """Implements a bounded semaphore. + + A bounded semaphore checks to make sure its current value doesn't exceed its + initial value. If it does, ValueError is raised. In most situations + semaphores are used to guard resources with limited capacity. + + If the semaphore is released too many times it's a sign of a bug. If not + given, value defaults to 1. + + Like regular semaphores, bounded semaphores manage a counter representing + the number of release() calls minus the number of acquire() calls, plus an + initial value. The acquire() method blocks if necessary until it can return + without making the counter negative. If not given, value defaults to 1. + + """ + + def __init__(self, value=1): + super().__init__(value) + self._initial_value = value + + def __repr__(self): + cls = self.__class__ + return (f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}:" + f" value={self._value}/{self._initial_value}>") + + def release(self, n=1): + """Release a semaphore, incrementing the internal counter by one or more. + + When the counter is zero on entry and another thread is waiting for it + to become larger than zero again, wake up that thread. + + If the number of releases exceeds the number of acquires, + raise a ValueError. + + """ + if n < 1: + raise ValueError('n must be one or more') + with self._cond: + if self._value + n > self._initial_value: + raise ValueError("Semaphore released too many times") + self._value += n + self._cond.notify(n) + + +class Event: + """Class implementing event objects. + + Events manage a flag that can be set to true with the set() method and reset + to false with the clear() method. The wait() method blocks until the flag is + true. The flag is initially false. + + """ + + # After Tim Peters' event class (without is_posted()) + + def __init__(self): + self._cond = Condition(Lock()) + self._flag = False + + def __repr__(self): + cls = self.__class__ + status = 'set' if self._flag else 'unset' + return f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}: {status}>" + + def _at_fork_reinit(self): + # Private method called by Thread._after_fork() + self._cond._at_fork_reinit() + + def is_set(self): + """Return true if and only if the internal flag is true.""" + return self._flag + + def isSet(self): + """Return true if and only if the internal flag is true. + + This method is deprecated, use is_set() instead. + + """ + import warnings + warnings.warn('isSet() is deprecated, use is_set() instead', + DeprecationWarning, stacklevel=2) + return self.is_set() + + def set(self): + """Set the internal flag to true. + + All threads waiting for it to become true are awakened. Threads + that call wait() once the flag is true will not block at all. + + """ + with self._cond: + self._flag = True + self._cond.notify_all() + + def clear(self): + """Reset the internal flag to false. + + Subsequently, threads calling wait() will block until set() is called to + set the internal flag to true again. + + """ + with self._cond: + self._flag = False + + def wait(self, timeout=None): + """Block until the internal flag is true. + + If the internal flag is true on entry, return immediately. Otherwise, + block until another thread calls set() to set the flag to true, or until + the optional timeout occurs. + + When the timeout argument is present and not None, it should be a + floating-point number specifying a timeout for the operation in seconds + (or fractions thereof). + + This method returns the internal flag on exit, so it will always return + ``True`` except if a timeout is given and the operation times out, when + it will return ``False``. + + """ + with self._cond: + signaled = self._flag + if not signaled: + signaled = self._cond.wait(timeout) + return signaled + + +# A barrier class. Inspired in part by the pthread_barrier_* api and +# the CyclicBarrier class from Java. See +# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and +# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/ +# CyclicBarrier.html +# for information. +# We maintain two main states, 'filling' and 'draining' enabling the barrier +# to be cyclic. Threads are not allowed into it until it has fully drained +# since the previous cycle. In addition, a 'resetting' state exists which is +# similar to 'draining' except that threads leave with a BrokenBarrierError, +# and a 'broken' state in which all threads get the exception. +class Barrier: + """Implements a Barrier. + + Useful for synchronizing a fixed number of threads at known synchronization + points. Threads block on 'wait()' and are simultaneously awoken once they + have all made that call. + + """ + + def __init__(self, parties, action=None, timeout=None): + """Create a barrier, initialised to 'parties' threads. + + 'action' is a callable which, when supplied, will be called by one of + the threads after they have all entered the barrier and just prior to + releasing them all. If a 'timeout' is provided, it is used as the + default for all subsequent 'wait()' calls. + + """ + if parties < 1: + raise ValueError("parties must be >= 1") + self._cond = Condition(Lock()) + self._action = action + self._timeout = timeout + self._parties = parties + self._state = 0 # 0 filling, 1 draining, -1 resetting, -2 broken + self._count = 0 + + def __repr__(self): + cls = self.__class__ + if self.broken: + return f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}: broken>" + return (f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}:" + f" waiters={self.n_waiting}/{self.parties}>") + + def wait(self, timeout=None): + """Wait for the barrier. + + When the specified number of threads have started waiting, they are all + simultaneously awoken. If an 'action' was provided for the barrier, one + of the threads will have executed that callback prior to returning. + Returns an individual index number from 0 to 'parties-1'. + + """ + if timeout is None: + timeout = self._timeout + with self._cond: + self._enter() # Block while the barrier drains. + index = self._count + self._count += 1 + try: + if index + 1 == self._parties: + # We release the barrier + self._release() + else: + # We wait until someone releases us + self._wait(timeout) + return index + finally: + self._count -= 1 + # Wake up any threads waiting for barrier to drain. + self._exit() + + # Block until the barrier is ready for us, or raise an exception + # if it is broken. + def _enter(self): + while self._state in (-1, 1): + # It is draining or resetting, wait until done + self._cond.wait() + #see if the barrier is in a broken state + if self._state < 0: + raise BrokenBarrierError + assert self._state == 0 + + # Optionally run the 'action' and release the threads waiting + # in the barrier. + def _release(self): + try: + if self._action: + self._action() + # enter draining state + self._state = 1 + self._cond.notify_all() + except: + #an exception during the _action handler. Break and reraise + self._break() + raise + + # Wait in the barrier until we are released. Raise an exception + # if the barrier is reset or broken. + def _wait(self, timeout): + if not self._cond.wait_for(lambda : self._state != 0, timeout): + #timed out. Break the barrier + self._break() + raise BrokenBarrierError + if self._state < 0: + raise BrokenBarrierError + assert self._state == 1 + + # If we are the last thread to exit the barrier, signal any threads + # waiting for the barrier to drain. + def _exit(self): + if self._count == 0: + if self._state in (-1, 1): + #resetting or draining + self._state = 0 + self._cond.notify_all() + + def reset(self): + """Reset the barrier to the initial state. + + Any threads currently waiting will get the BrokenBarrier exception + raised. + + """ + with self._cond: + if self._count > 0: + if self._state == 0: + #reset the barrier, waking up threads + self._state = -1 + elif self._state == -2: + #was broken, set it to reset state + #which clears when the last thread exits + self._state = -1 + else: + self._state = 0 + self._cond.notify_all() + + def abort(self): + """Place the barrier into a 'broken' state. + + Useful in case of error. Any currently waiting threads and threads + attempting to 'wait()' will have BrokenBarrierError raised. + + """ + with self._cond: + self._break() + + def _break(self): + # An internal error was detected. The barrier is set to + # a broken state all parties awakened. + self._state = -2 + self._cond.notify_all() + + @property + def parties(self): + """Return the number of threads required to trip the barrier.""" + return self._parties + + @property + def n_waiting(self): + """Return the number of threads currently waiting at the barrier.""" + # We don't need synchronization here since this is an ephemeral result + # anyway. It returns the correct value in the steady state. + if self._state == 0: + return self._count + return 0 + + @property + def broken(self): + """Return True if the barrier is in a broken state.""" + return self._state == -2 + +# exception raised by the Barrier class +class BrokenBarrierError(RuntimeError): + pass + + +# Helper to generate new thread names +_counter = _count(1).__next__ +def _newname(name_template): + return name_template % _counter() + +# Active thread administration. +# +# bpo-44422: Use a reentrant lock to allow reentrant calls to functions like +# threading.enumerate(). +_active_limbo_lock = RLock() +_active = {} # maps thread id to Thread object +_limbo = {} +_dangling = WeakSet() + + +# Main class for threads + +class Thread: + """A class that represents a thread of control. + + This class can be safely subclassed in a limited fashion. There are two ways + to specify the activity: by passing a callable object to the constructor, or + by overriding the run() method in a subclass. + + """ + + _initialized = False + + def __init__(self, group=None, target=None, name=None, + args=(), kwargs=None, *, daemon=None, context=None): + """This constructor should always be called with keyword arguments. Arguments are: + + *group* should be None; reserved for future extension when a ThreadGroup + class is implemented. + + *target* is the callable object to be invoked by the run() + method. Defaults to None, meaning nothing is called. + + *name* is the thread name. By default, a unique name is constructed of + the form "Thread-N" where N is a small decimal number. + + *args* is a list or tuple of arguments for the target invocation. Defaults to (). + + *kwargs* is a dictionary of keyword arguments for the target + invocation. Defaults to {}. + + *context* is the contextvars.Context value to use for the thread. + The default value is None, which means to check + sys.flags.thread_inherit_context. If that flag is true, use a copy + of the context of the caller. If false, use an empty context. To + explicitly start with an empty context, pass a new instance of + contextvars.Context(). To explicitly start with a copy of the current + context, pass the value from contextvars.copy_context(). + + If a subclass overrides the constructor, it must make sure to invoke + the base class constructor (Thread.__init__()) before doing anything + else to the thread. + + """ + assert group is None, "group argument must be None for now" + if kwargs is None: + kwargs = {} + if name: + name = str(name) + else: + name = _newname("Thread-%d") + if target is not None: + try: + target_name = target.__name__ + name += f" ({target_name})" + except AttributeError: + pass + + self._target = target + self._name = name + self._args = args + self._kwargs = kwargs + if daemon is not None: + if daemon and not _daemon_threads_allowed(): + raise RuntimeError('daemon threads are disabled in this (sub)interpreter') + self._daemonic = daemon + else: + self._daemonic = current_thread().daemon + self._context = context + self._ident = None + if _HAVE_THREAD_NATIVE_ID: + self._native_id = None + self._os_thread_handle = _ThreadHandle() + self._started = Event() + self._initialized = True + # Copy of sys.stderr used by self._invoke_excepthook() + self._stderr = _sys.stderr + self._invoke_excepthook = _make_invoke_excepthook() + # For debugging and _after_fork() + _dangling.add(self) + + def _after_fork(self, new_ident=None): + # Private! Called by threading._after_fork(). + self._started._at_fork_reinit() + if new_ident is not None: + # This thread is alive. + self._ident = new_ident + assert self._os_thread_handle.ident == new_ident + if _HAVE_THREAD_NATIVE_ID: + self._set_native_id() + else: + # Otherwise, the thread is dead, Jim. _PyThread_AfterFork() + # already marked our handle done. + pass + + def __repr__(self): + assert self._initialized, "Thread.__init__() was not called" + status = "initial" + if self._started.is_set(): + status = "started" + if self._os_thread_handle.is_done(): + status = "stopped" + if self._daemonic: + status += " daemon" + if self._ident is not None: + status += " %s" % self._ident + return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status) + + def start(self): + """Start the thread's activity. + + It must be called at most once per thread object. It arranges for the + object's run() method to be invoked in a separate thread of control. + + This method will raise a RuntimeError if called more than once on the + same thread object. + + """ + if not self._initialized: + raise RuntimeError("thread.__init__() not called") + + if self._started.is_set(): + raise RuntimeError("threads can only be started once") + + with _active_limbo_lock: + _limbo[self] = self + + if self._context is None: + # No context provided + if _sys.flags.thread_inherit_context: + # start with a copy of the context of the caller + self._context = _contextvars.copy_context() + else: + # start with an empty context + self._context = _contextvars.Context() + + try: + # Start joinable thread + _start_joinable_thread(self._bootstrap, handle=self._os_thread_handle, + daemon=self.daemon) + except Exception: + with _active_limbo_lock: + del _limbo[self] + raise + self._started.wait() # Will set ident and native_id + + def run(self): + """Method representing the thread's activity. + + You may override this method in a subclass. The standard run() method + invokes the callable object passed to the object's constructor as the + target argument, if any, with sequential and keyword arguments taken + from the args and kwargs arguments, respectively. + + """ + try: + if self._target is not None: + self._target(*self._args, **self._kwargs) + finally: + # Avoid a refcycle if the thread is running a function with + # an argument that has a member that points to the thread. + del self._target, self._args, self._kwargs + + def _bootstrap(self): + # Wrapper around the real bootstrap code that ignores + # exceptions during interpreter cleanup. Those typically + # happen when a daemon thread wakes up at an unfortunate + # moment, finds the world around it destroyed, and raises some + # random exception *** while trying to report the exception in + # _bootstrap_inner() below ***. Those random exceptions + # don't help anybody, and they confuse users, so we suppress + # them. We suppress them only when it appears that the world + # indeed has already been destroyed, so that exceptions in + # _bootstrap_inner() during normal business hours are properly + # reported. Also, we only suppress them for daemonic threads; + # if a non-daemonic encounters this, something else is wrong. + try: + self._bootstrap_inner() + except: + if self._daemonic and _sys is None: + return + raise + + def _set_ident(self): + self._ident = get_ident() + + if _HAVE_THREAD_NATIVE_ID: + def _set_native_id(self): + self._native_id = get_native_id() + + def _set_os_name(self): + if _set_name is None or not self._name: + return + try: + _set_name(self._name) + except OSError: + pass + + def _bootstrap_inner(self): + try: + self._set_ident() + if _HAVE_THREAD_NATIVE_ID: + self._set_native_id() + self._set_os_name() + self._started.set() + with _active_limbo_lock: + _active[self._ident] = self + del _limbo[self] + + if _trace_hook: + _sys.settrace(_trace_hook) + if _profile_hook: + _sys.setprofile(_profile_hook) + + try: + self._context.run(self.run) + except: + self._invoke_excepthook(self) + finally: + self._delete() + + def _delete(self): + "Remove current thread from the dict of currently running threads." + with _active_limbo_lock: + del _active[get_ident()] + # There must not be any python code between the previous line + # and after the lock is released. Otherwise a tracing function + # could try to acquire the lock again in the same thread, (in + # current_thread()), and would block. + + def join(self, timeout=None): + """Wait until the thread terminates. + + This blocks the calling thread until the thread whose join() method is + called terminates -- either normally or through an unhandled exception + or until the optional timeout occurs. + + When the timeout argument is present and not None, it should be a + floating-point number specifying a timeout for the operation in seconds + (or fractions thereof). As join() always returns None, you must call + is_alive() after join() to decide whether a timeout happened -- if the + thread is still alive, the join() call timed out. + + When the timeout argument is not present or None, the operation will + block until the thread terminates. + + A thread can be join()ed many times. + + join() raises a RuntimeError if an attempt is made to join the current + thread as that would cause a deadlock. It is also an error to join() a + thread before it has been started and attempts to do so raises the same + exception. + + """ + if not self._initialized: + raise RuntimeError("Thread.__init__() not called") + if not self._started.is_set(): + raise RuntimeError("cannot join thread before it is started") + if self is current_thread(): + raise RuntimeError("cannot join current thread") + + # the behavior of a negative timeout isn't documented, but + # historically .join(timeout=x) for x<0 has acted as if timeout=0 + if timeout is not None: + timeout = max(timeout, 0) + + self._os_thread_handle.join(timeout) + + @property + def name(self): + """A string used for identification purposes only. + + It has no semantics. Multiple threads may be given the same name. The + initial name is set by the constructor. + + """ + assert self._initialized, "Thread.__init__() not called" + return self._name + + @name.setter + def name(self, name): + assert self._initialized, "Thread.__init__() not called" + self._name = str(name) + if get_ident() == self._ident: + self._set_os_name() + + @property + def ident(self): + """Thread identifier of this thread or None if it has not been started. + + This is a nonzero integer. See the get_ident() function. Thread + identifiers may be recycled when a thread exits and another thread is + created. The identifier is available even after the thread has exited. + + """ + assert self._initialized, "Thread.__init__() not called" + return self._ident + + if _HAVE_THREAD_NATIVE_ID: + @property + def native_id(self): + """Native integral thread ID of this thread, or None if it has not been started. + + This is a non-negative integer. See the get_native_id() function. + This represents the Thread ID as reported by the kernel. + + """ + assert self._initialized, "Thread.__init__() not called" + return self._native_id + + def is_alive(self): + """Return whether the thread is alive. + + This method returns True just before the run() method starts until just + after the run() method terminates. See also the module function + enumerate(). + + """ + assert self._initialized, "Thread.__init__() not called" + return self._started.is_set() and not self._os_thread_handle.is_done() + + @property + def daemon(self): + """A boolean value indicating whether this thread is a daemon thread. + + This must be set before start() is called, otherwise RuntimeError is + raised. Its initial value is inherited from the creating thread; the + main thread is not a daemon thread and therefore all threads created in + the main thread default to daemon = False. + + The entire Python program exits when only daemon threads are left. + + """ + assert self._initialized, "Thread.__init__() not called" + return self._daemonic + + @daemon.setter + def daemon(self, daemonic): + if not self._initialized: + raise RuntimeError("Thread.__init__() not called") + if daemonic and not _daemon_threads_allowed(): + raise RuntimeError('daemon threads are disabled in this interpreter') + if self._started.is_set(): + raise RuntimeError("cannot set daemon status of active thread") + self._daemonic = daemonic + + def isDaemon(self): + """Return whether this thread is a daemon. + + This method is deprecated, use the daemon attribute instead. + + """ + import warnings + warnings.warn('isDaemon() is deprecated, get the daemon attribute instead', + DeprecationWarning, stacklevel=2) + return self.daemon + + def setDaemon(self, daemonic): + """Set whether this thread is a daemon. + + This method is deprecated, use the .daemon property instead. + + """ + import warnings + warnings.warn('setDaemon() is deprecated, set the daemon attribute instead', + DeprecationWarning, stacklevel=2) + self.daemon = daemonic + + def getName(self): + """Return a string used for identification purposes only. + + This method is deprecated, use the name attribute instead. + + """ + import warnings + warnings.warn('getName() is deprecated, get the name attribute instead', + DeprecationWarning, stacklevel=2) + return self.name + + def setName(self, name): + """Set the name string for this thread. + + This method is deprecated, use the name attribute instead. + + """ + import warnings + warnings.warn('setName() is deprecated, set the name attribute instead', + DeprecationWarning, stacklevel=2) + self.name = name + + +try: + from _thread import (_excepthook as excepthook, + _ExceptHookArgs as ExceptHookArgs) +except ImportError: + # Simple Python implementation if _thread._excepthook() is not available + from traceback import print_exception as _print_exception + from collections import namedtuple + + _ExceptHookArgs = namedtuple( + 'ExceptHookArgs', + 'exc_type exc_value exc_traceback thread') + + def ExceptHookArgs(args): + return _ExceptHookArgs(*args) + + def excepthook(args, /): + """ + Handle uncaught Thread.run() exception. + """ + if args.exc_type == SystemExit: + # silently ignore SystemExit + return + + if _sys is not None and _sys.stderr is not None: + stderr = _sys.stderr + elif args.thread is not None: + stderr = args.thread._stderr + if stderr is None: + # do nothing if sys.stderr is None and sys.stderr was None + # when the thread was created + return + else: + # do nothing if sys.stderr is None and args.thread is None + return + + if args.thread is not None: + name = args.thread.name + else: + name = get_ident() + print(f"Exception in thread {name}:", + file=stderr, flush=True) + _print_exception(args.exc_type, args.exc_value, args.exc_traceback, + file=stderr) + stderr.flush() + + +# Original value of threading.excepthook +__excepthook__ = excepthook + + +def _make_invoke_excepthook(): + # Create a local namespace to ensure that variables remain alive + # when _invoke_excepthook() is called, even if it is called late during + # Python shutdown. It is mostly needed for daemon threads. + + old_excepthook = excepthook + old_sys_excepthook = _sys.excepthook + if old_excepthook is None: + raise RuntimeError("threading.excepthook is None") + if old_sys_excepthook is None: + raise RuntimeError("sys.excepthook is None") + + sys_exc_info = _sys.exc_info + local_print = print + local_sys = _sys + + def invoke_excepthook(thread): + global excepthook + try: + hook = excepthook + if hook is None: + hook = old_excepthook + + args = ExceptHookArgs([*sys_exc_info(), thread]) + + hook(args) + except Exception as exc: + exc.__suppress_context__ = True + del exc + + if local_sys is not None and local_sys.stderr is not None: + stderr = local_sys.stderr + else: + stderr = thread._stderr + + local_print("Exception in threading.excepthook:", + file=stderr, flush=True) + + if local_sys is not None and local_sys.excepthook is not None: + sys_excepthook = local_sys.excepthook + else: + sys_excepthook = old_sys_excepthook + + sys_excepthook(*sys_exc_info()) + finally: + # Break reference cycle (exception stored in a variable) + args = None + + return invoke_excepthook + + +# The timer class was contributed by Itamar Shtull-Trauring + +class Timer(Thread): + """Call a function after a specified number of seconds: + + t = Timer(30.0, f, args=None, kwargs=None) + t.start() + t.cancel() # stop the timer's action if it's still waiting + + """ + + def __init__(self, interval, function, args=None, kwargs=None): + Thread.__init__(self) + self.interval = interval + self.function = function + self.args = args if args is not None else [] + self.kwargs = kwargs if kwargs is not None else {} + self.finished = Event() + + def cancel(self): + """Stop the timer if it hasn't finished yet.""" + self.finished.set() + + def run(self): + self.finished.wait(self.interval) + if not self.finished.is_set(): + self.function(*self.args, **self.kwargs) + self.finished.set() + + +# Special thread class to represent the main thread + +class _MainThread(Thread): + + def __init__(self): + Thread.__init__(self, name="MainThread", daemon=False) + self._started.set() + self._ident = _get_main_thread_ident() + self._os_thread_handle = _make_thread_handle(self._ident) + if _HAVE_THREAD_NATIVE_ID: + self._set_native_id() + with _active_limbo_lock: + _active[self._ident] = self + + +# Helper thread-local instance to detect when a _DummyThread +# is collected. Not a part of the public API. +_thread_local_info = local() + + +class _DeleteDummyThreadOnDel: + ''' + Helper class to remove a dummy thread from threading._active on __del__. + ''' + + def __init__(self, dummy_thread): + self._dummy_thread = dummy_thread + self._tident = dummy_thread.ident + # Put the thread on a thread local variable so that when + # the related thread finishes this instance is collected. + # + # Note: no other references to this instance may be created. + # If any client code creates a reference to this instance, + # the related _DummyThread will be kept forever! + _thread_local_info._track_dummy_thread_ref = self + + def __del__(self, _active_limbo_lock=_active_limbo_lock, _active=_active): + with _active_limbo_lock: + if _active.get(self._tident) is self._dummy_thread: + _active.pop(self._tident, None) + + +# Dummy thread class to represent threads not started here. +# These should be added to `_active` and removed automatically +# when they die, although they can't be waited for. +# Their purpose is to return *something* from current_thread(). +# They are marked as daemon threads so we won't wait for them +# when we exit (conform previous semantics). + +class _DummyThread(Thread): + + def __init__(self): + Thread.__init__(self, name=_newname("Dummy-%d"), + daemon=_daemon_threads_allowed()) + self._started.set() + self._set_ident() + self._os_thread_handle = _make_thread_handle(self._ident) + if _HAVE_THREAD_NATIVE_ID: + self._set_native_id() + with _active_limbo_lock: + _active[self._ident] = self + _DeleteDummyThreadOnDel(self) + + def is_alive(self): + if not self._os_thread_handle.is_done() and self._started.is_set(): + return True + raise RuntimeError("thread is not alive") + + def join(self, timeout=None): + raise RuntimeError("cannot join a dummy thread") + + def _after_fork(self, new_ident=None): + if new_ident is not None: + self.__class__ = _MainThread + self._name = 'MainThread' + self._daemonic = False + Thread._after_fork(self, new_ident=new_ident) + + +# Global API functions + +def current_thread(): + """Return the current Thread object, corresponding to the caller's thread of control. + + If the caller's thread of control was not created through the threading + module, a dummy thread object with limited functionality is returned. + + """ + try: + return _active[get_ident()] + except KeyError: + return _DummyThread() + +def currentThread(): + """Return the current Thread object, corresponding to the caller's thread of control. + + This function is deprecated, use current_thread() instead. + + """ + import warnings + warnings.warn('currentThread() is deprecated, use current_thread() instead', + DeprecationWarning, stacklevel=2) + return current_thread() + +def active_count(): + """Return the number of Thread objects currently alive. + + The returned count is equal to the length of the list returned by + enumerate(). + + """ + # NOTE: if the logic in here ever changes, update Modules/posixmodule.c + # warn_about_fork_with_threads() to match. + with _active_limbo_lock: + return len(_active) + len(_limbo) + +def activeCount(): + """Return the number of Thread objects currently alive. + + This function is deprecated, use active_count() instead. + + """ + import warnings + warnings.warn('activeCount() is deprecated, use active_count() instead', + DeprecationWarning, stacklevel=2) + return active_count() + +def _enumerate(): + # Same as enumerate(), but without the lock. Internal use only. + return list(_active.values()) + list(_limbo.values()) + +def enumerate(): + """Return a list of all Thread objects currently alive. + + The list includes daemonic threads, dummy thread objects created by + current_thread(), and the main thread. It excludes terminated threads and + threads that have not yet been started. + + """ + with _active_limbo_lock: + return list(_active.values()) + list(_limbo.values()) + + +_threading_atexits = [] +_SHUTTING_DOWN = False + +def _register_atexit(func, *arg, **kwargs): + """CPython internal: register *func* to be called before joining threads. + + The registered *func* is called with its arguments just before all + non-daemon threads are joined in `_shutdown()`. It provides a similar + purpose to `atexit.register()`, but its functions are called prior to + threading shutdown instead of interpreter shutdown. + + For similarity to atexit, the registered functions are called in reverse. + """ + if _SHUTTING_DOWN: + raise RuntimeError("can't register atexit after shutdown") + + _threading_atexits.append(lambda: func(*arg, **kwargs)) + + +from _thread import stack_size + +# Create the main thread object, +# and make it available for the interpreter +# (Py_Main) as threading._shutdown. + +_main_thread = _MainThread() + +def _shutdown(): + """ + Wait until the Python thread state of all non-daemon threads get deleted. + """ + # Obscure: other threads may be waiting to join _main_thread. That's + # dubious, but some code does it. We can't wait for it to be marked as done + # normally - that won't happen until the interpreter is nearly dead. So + # mark it done here. + if _main_thread._os_thread_handle.is_done() and _is_main_interpreter(): + # _shutdown() was already called + return + + global _SHUTTING_DOWN + _SHUTTING_DOWN = True + + # Call registered threading atexit functions before threads are joined. + # Order is reversed, similar to atexit. + for atexit_call in reversed(_threading_atexits): + atexit_call() + + if _is_main_interpreter(): + _main_thread._os_thread_handle._set_done() + + # Wait for all non-daemon threads to exit. + _thread_shutdown() + + +def main_thread(): + """Return the main thread object. + + In normal conditions, the main thread is the thread from which the + Python interpreter was started. + """ + # XXX Figure this out for subinterpreters. (See gh-75698.) + return _main_thread + + +def _after_fork(): + """ + Cleanup threading module state that should not exist after a fork. + """ + # Reset _active_limbo_lock, in case we forked while the lock was held + # by another (non-forked) thread. http://bugs.python.org/issue874900 + global _active_limbo_lock, _main_thread + _active_limbo_lock = RLock() + + # fork() only copied the current thread; clear references to others. + new_active = {} + + try: + current = _active[get_ident()] + except KeyError: + # fork() was called in a thread which was not spawned + # by threading.Thread. For example, a thread spawned + # by thread.start_new_thread(). + current = _MainThread() + + _main_thread = current + + with _active_limbo_lock: + # Dangling thread instances must still have their locks reset, + # because someone may join() them. + threads = set(_enumerate()) + threads.update(_dangling) + for thread in threads: + # Any lock/condition variable may be currently locked or in an + # invalid state, so we reinitialize them. + if thread is current: + # This is the one and only active thread. + ident = get_ident() + thread._after_fork(new_ident=ident) + new_active[ident] = thread + else: + # All the others are already stopped. + thread._after_fork() + + _limbo.clear() + _active.clear() + _active.update(new_active) + assert len(_active) == 1 + + +if hasattr(_os, "register_at_fork"): + _os.register_at_fork(after_in_child=_after_fork) diff --git a/Python314_4_x64_Template/Lib/timeit.py b/Python314_4_x64_Template/Lib/timeit.py new file mode 100644 index 00000000..e767f018 --- /dev/null +++ b/Python314_4_x64_Template/Lib/timeit.py @@ -0,0 +1,378 @@ +"""Tool for measuring execution time of small code snippets. + +This module avoids a number of common traps for measuring execution +times. See also Tim Peters' introduction to the Algorithms chapter in +the Python Cookbook, published by O'Reilly. + +Library usage: see the Timer class. + +Command line usage: + python timeit.py [-n N] [-r N] [-s S] [-p] [-h] [--] [statement] + +Options: + -n/--number N: how many times to execute 'statement' (default: see below) + -r/--repeat N: how many times to repeat the timer (default 5) + -s/--setup S: statement to be executed once initially (default 'pass'). + Execution time of this setup statement is NOT timed. + -p/--process: use time.process_time() (default is time.perf_counter()) + -v/--verbose: print raw timing results; repeat for more digits precision + -u/--unit: set the output time unit (nsec, usec, msec, or sec) + -h/--help: print this usage message and exit + --: separate options from statement, use when statement starts with - + statement: statement to be timed (default 'pass') + +A multi-line statement may be given by specifying each line as a +separate argument; indented lines are possible by enclosing an +argument in quotes and using leading spaces. Multiple -s options are +treated similarly. + +If -n is not given, a suitable number of loops is calculated by trying +increasing numbers from the sequence 1, 2, 5, 10, 20, 50, ... until the +total time is at least 0.2 seconds. + +Note: there is a certain baseline overhead associated with executing a +pass statement. It differs between versions. The code here doesn't try +to hide it, but you should be aware of it. The baseline overhead can be +measured by invoking the program without arguments. + +Classes: + + Timer + +Functions: + + timeit(string, string) -> float + repeat(string, string) -> list + default_timer() -> float +""" + +import gc +import itertools +import sys +import time + +__all__ = ["Timer", "timeit", "repeat", "default_timer"] + +dummy_src_name = "" +default_number = 1000000 +default_repeat = 5 +default_timer = time.perf_counter + +_globals = globals + +# Don't change the indentation of the template; the reindent() calls +# in Timer.__init__() depend on setup being indented 4 spaces and stmt +# being indented 8 spaces. +template = """ +def inner(_it, _timer{init}): + {setup} + _t0 = _timer() + for _i in _it: + {stmt} + pass + _t1 = _timer() + return _t1 - _t0 +""" + + +def reindent(src, indent): + """Helper to reindent a multi-line statement.""" + return src.replace("\n", "\n" + " " * indent) + + +class Timer: + """Class for timing execution speed of small code snippets. + + The constructor takes a statement to be timed, an additional + statement used for setup, and a timer function. Both statements + default to 'pass'; the timer function is platform-dependent (see + module doc string). If 'globals' is specified, the code will be + executed within that namespace (as opposed to inside timeit's + namespace). + + To measure the execution time of the first statement, use the + timeit() method. The repeat() method is a convenience to call + timeit() multiple times and return a list of results. + + The statements may contain newlines, as long as they don't contain + multi-line string literals. + """ + + def __init__(self, stmt="pass", setup="pass", timer=default_timer, + globals=None): + """Constructor. See class doc string.""" + self.timer = timer + local_ns = {} + global_ns = _globals() if globals is None else globals + init = '' + if isinstance(setup, str): + # Check that the code can be compiled outside a function + compile(setup, dummy_src_name, "exec") + stmtprefix = setup + '\n' + setup = reindent(setup, 4) + elif callable(setup): + local_ns['_setup'] = setup + init += ', _setup=_setup' + stmtprefix = '' + setup = '_setup()' + else: + raise ValueError("setup is neither a string nor callable") + if isinstance(stmt, str): + # Check that the code can be compiled outside a function + compile(stmtprefix + stmt, dummy_src_name, "exec") + stmt = reindent(stmt, 8) + elif callable(stmt): + local_ns['_stmt'] = stmt + init += ', _stmt=_stmt' + stmt = '_stmt()' + else: + raise ValueError("stmt is neither a string nor callable") + src = template.format(stmt=stmt, setup=setup, init=init) + self.src = src # Save for traceback display + code = compile(src, dummy_src_name, "exec") + exec(code, global_ns, local_ns) + self.inner = local_ns["inner"] + + def print_exc(self, file=None): + """Helper to print a traceback from the timed code. + + Typical use: + + t = Timer(...) # outside the try/except + try: + t.timeit(...) # or t.repeat(...) + except: + t.print_exc() + + The advantage over the standard traceback is that source lines + in the compiled template will be displayed. + + The optional file argument directs where the traceback is + sent; it defaults to sys.stderr. + """ + import linecache, traceback + if self.src is not None: + linecache.cache[dummy_src_name] = (len(self.src), + None, + self.src.split("\n"), + dummy_src_name) + # else the source is already stored somewhere else + + traceback.print_exc(file=file) + + def timeit(self, number=default_number): + """Time 'number' executions of the main statement. + + To be precise, this executes the setup statement once, and + then returns the time it takes to execute the main statement + a number of times, as float seconds if using the default timer. The + argument is the number of times through the loop, defaulting + to one million. The main statement, the setup statement and + the timer function to be used are passed to the constructor. + """ + it = itertools.repeat(None, number) + gcold = gc.isenabled() + gc.disable() + try: + timing = self.inner(it, self.timer) + finally: + if gcold: + gc.enable() + return timing + + def repeat(self, repeat=default_repeat, number=default_number): + """Call timeit() a few times. + + This is a convenience function that calls the timeit() + repeatedly, returning a list of results. The first argument + specifies how many times to call timeit(), defaulting to 5; + the second argument specifies the timer argument, defaulting + to one million. + + Note: it's tempting to calculate mean and standard deviation + from the result vector and report these. However, this is not + very useful. In a typical case, the lowest value gives a + lower bound for how fast your machine can run the given code + snippet; higher values in the result vector are typically not + caused by variability in Python's speed, but by other + processes interfering with your timing accuracy. So the min() + of the result is probably the only number you should be + interested in. After that, you should look at the entire + vector and apply common sense rather than statistics. + """ + r = [] + for i in range(repeat): + t = self.timeit(number) + r.append(t) + return r + + def autorange(self, callback=None): + """Return the number of loops and time taken so that total time >= 0.2. + + Calls the timeit method with increasing numbers from the sequence + 1, 2, 5, 10, 20, 50, ... until the time taken is at least 0.2 + second. Returns (number, time_taken). + + If *callback* is given and is not None, it will be called after + each trial with two arguments: ``callback(number, time_taken)``. + """ + i = 1 + while True: + for j in 1, 2, 5: + number = i * j + time_taken = self.timeit(number) + if callback: + callback(number, time_taken) + if time_taken >= 0.2: + return (number, time_taken) + i *= 10 + + +def timeit(stmt="pass", setup="pass", timer=default_timer, + number=default_number, globals=None): + """Convenience function to create Timer object and call timeit method.""" + return Timer(stmt, setup, timer, globals).timeit(number) + + +def repeat(stmt="pass", setup="pass", timer=default_timer, + repeat=default_repeat, number=default_number, globals=None): + """Convenience function to create Timer object and call repeat method.""" + return Timer(stmt, setup, timer, globals).repeat(repeat, number) + + +def main(args=None, *, _wrap_timer=None): + """Main program, used when run as a script. + + The optional 'args' argument specifies the command line to be parsed, + defaulting to sys.argv[1:]. + + The return value is an exit code to be passed to sys.exit(); it + may be None to indicate success. + + When an exception happens during timing, a traceback is printed to + stderr and the return value is 1. Exceptions at other times + (including the template compilation) are not caught. + + '_wrap_timer' is an internal interface used for unit testing. If it + is not None, it must be a callable that accepts a timer function + and returns another timer function (used for unit testing). + """ + if args is None: + args = sys.argv[1:] + import getopt + try: + opts, args = getopt.getopt(args, "n:u:s:r:pvh", + ["number=", "setup=", "repeat=", + "process", "verbose", "unit=", "help"]) + except getopt.error as err: + print(err) + print("use -h/--help for command line help") + return 2 + + timer = default_timer + stmt = "\n".join(args) or "pass" + number = 0 # auto-determine + setup = [] + repeat = default_repeat + verbose = 0 + time_unit = None + units = {"nsec": 1e-9, "usec": 1e-6, "msec": 1e-3, "sec": 1.0} + precision = 3 + for o, a in opts: + if o in ("-n", "--number"): + number = int(a) + if o in ("-s", "--setup"): + setup.append(a) + if o in ("-u", "--unit"): + if a in units: + time_unit = a + else: + print("Unrecognized unit. Please select nsec, usec, msec, or sec.", + file=sys.stderr) + return 2 + if o in ("-r", "--repeat"): + repeat = int(a) + if repeat <= 0: + repeat = 1 + if o in ("-p", "--process"): + timer = time.process_time + if o in ("-v", "--verbose"): + if verbose: + precision += 1 + verbose += 1 + if o in ("-h", "--help"): + print(__doc__, end="") + return 0 + setup = "\n".join(setup) or "pass" + + # Include the current directory, so that local imports work (sys.path + # contains the directory of this script, rather than the current + # directory) + import os + sys.path.insert(0, os.curdir) + if _wrap_timer is not None: + timer = _wrap_timer(timer) + + t = Timer(stmt, setup, timer) + if number == 0: + # determine number so that 0.2 <= total time < 2.0 + callback = None + if verbose: + def callback(number, time_taken): + msg = "{num} loop{s} -> {secs:.{prec}g} secs" + plural = (number != 1) + print(msg.format(num=number, s='s' if plural else '', + secs=time_taken, prec=precision)) + try: + number, _ = t.autorange(callback) + except: + t.print_exc() + return 1 + + if verbose: + print() + + try: + raw_timings = t.repeat(repeat, number) + except: + t.print_exc() + return 1 + + def format_time(dt): + unit = time_unit + + if unit is not None: + scale = units[unit] + else: + scales = [(scale, unit) for unit, scale in units.items()] + scales.sort(reverse=True) + for scale, unit in scales: + if dt >= scale: + break + + return "%.*g %s" % (precision, dt / scale, unit) + + if verbose: + print("raw times: %s" % ", ".join(map(format_time, raw_timings))) + print() + timings = [dt / number for dt in raw_timings] + + best = min(timings) + print("%d loop%s, best of %d: %s per loop" + % (number, 's' if number != 1 else '', + repeat, format_time(best))) + + best = min(timings) + worst = max(timings) + if worst >= best * 4: + import warnings + warnings.warn_explicit("The test results are likely unreliable. " + "The worst time (%s) was more than four times " + "slower than the best time (%s)." + % (format_time(worst), format_time(best)), + UserWarning, '', 0) + return None + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/Python314_4_x64_Template/Lib/token.py b/Python314_4_x64_Template/Lib/token.py new file mode 100644 index 00000000..f61723cc --- /dev/null +++ b/Python314_4_x64_Template/Lib/token.py @@ -0,0 +1,144 @@ +"""Token constants.""" +# Auto-generated by Tools/build/generate_token.py + +__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF', + 'EXACT_TOKEN_TYPES'] + +ENDMARKER = 0 +NAME = 1 +NUMBER = 2 +STRING = 3 +NEWLINE = 4 +INDENT = 5 +DEDENT = 6 +LPAR = 7 +RPAR = 8 +LSQB = 9 +RSQB = 10 +COLON = 11 +COMMA = 12 +SEMI = 13 +PLUS = 14 +MINUS = 15 +STAR = 16 +SLASH = 17 +VBAR = 18 +AMPER = 19 +LESS = 20 +GREATER = 21 +EQUAL = 22 +DOT = 23 +PERCENT = 24 +LBRACE = 25 +RBRACE = 26 +EQEQUAL = 27 +NOTEQUAL = 28 +LESSEQUAL = 29 +GREATEREQUAL = 30 +TILDE = 31 +CIRCUMFLEX = 32 +LEFTSHIFT = 33 +RIGHTSHIFT = 34 +DOUBLESTAR = 35 +PLUSEQUAL = 36 +MINEQUAL = 37 +STAREQUAL = 38 +SLASHEQUAL = 39 +PERCENTEQUAL = 40 +AMPEREQUAL = 41 +VBAREQUAL = 42 +CIRCUMFLEXEQUAL = 43 +LEFTSHIFTEQUAL = 44 +RIGHTSHIFTEQUAL = 45 +DOUBLESTAREQUAL = 46 +DOUBLESLASH = 47 +DOUBLESLASHEQUAL = 48 +AT = 49 +ATEQUAL = 50 +RARROW = 51 +ELLIPSIS = 52 +COLONEQUAL = 53 +EXCLAMATION = 54 +OP = 55 +TYPE_IGNORE = 56 +TYPE_COMMENT = 57 +SOFT_KEYWORD = 58 +FSTRING_START = 59 +FSTRING_MIDDLE = 60 +FSTRING_END = 61 +TSTRING_START = 62 +TSTRING_MIDDLE = 63 +TSTRING_END = 64 +COMMENT = 65 +NL = 66 +# These aren't used by the C tokenizer but are needed for tokenize.py +ERRORTOKEN = 67 +ENCODING = 68 +N_TOKENS = 69 +# Special definitions for cooperation with parser +NT_OFFSET = 256 + +tok_name = {value: name + for name, value in globals().items() + if isinstance(value, int) and not name.startswith('_')} +__all__.extend(tok_name.values()) + +EXACT_TOKEN_TYPES = { + '!': EXCLAMATION, + '!=': NOTEQUAL, + '%': PERCENT, + '%=': PERCENTEQUAL, + '&': AMPER, + '&=': AMPEREQUAL, + '(': LPAR, + ')': RPAR, + '*': STAR, + '**': DOUBLESTAR, + '**=': DOUBLESTAREQUAL, + '*=': STAREQUAL, + '+': PLUS, + '+=': PLUSEQUAL, + ',': COMMA, + '-': MINUS, + '-=': MINEQUAL, + '->': RARROW, + '.': DOT, + '...': ELLIPSIS, + '/': SLASH, + '//': DOUBLESLASH, + '//=': DOUBLESLASHEQUAL, + '/=': SLASHEQUAL, + ':': COLON, + ':=': COLONEQUAL, + ';': SEMI, + '<': LESS, + '<<': LEFTSHIFT, + '<<=': LEFTSHIFTEQUAL, + '<=': LESSEQUAL, + '=': EQUAL, + '==': EQEQUAL, + '>': GREATER, + '>=': GREATEREQUAL, + '>>': RIGHTSHIFT, + '>>=': RIGHTSHIFTEQUAL, + '@': AT, + '@=': ATEQUAL, + '[': LSQB, + ']': RSQB, + '^': CIRCUMFLEX, + '^=': CIRCUMFLEXEQUAL, + '{': LBRACE, + '|': VBAR, + '|=': VBAREQUAL, + '}': RBRACE, + '~': TILDE, +} + +def ISTERMINAL(x: int) -> bool: + return x < NT_OFFSET + +def ISNONTERMINAL(x: int) -> bool: + return x >= NT_OFFSET + +def ISEOF(x: int) -> bool: + return x == ENDMARKER diff --git a/Python314_4_x64_Template/Lib/tokenize.py b/Python314_4_x64_Template/Lib/tokenize.py new file mode 100644 index 00000000..1f31258c --- /dev/null +++ b/Python314_4_x64_Template/Lib/tokenize.py @@ -0,0 +1,598 @@ +"""Tokenization help for Python programs. + +tokenize(readline) is a generator that breaks a stream of bytes into +Python tokens. It decodes the bytes according to PEP-0263 for +determining source file encoding. + +It accepts a readline-like method which is called repeatedly to get the +next line of input (or b"" for EOF). It generates 5-tuples with these +members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators. Additionally, all token lists start with an ENCODING token +which tells you which encoding was used to decode the bytes stream. +""" + +__author__ = 'Ka-Ping Yee ' +__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' + 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' + 'Michael Foord') +from builtins import open as _builtin_open +from codecs import lookup, BOM_UTF8 +import collections +import functools +from io import TextIOWrapper +import itertools as _itertools +import re +import sys +from token import * +from token import EXACT_TOKEN_TYPES +import _tokenize + +cookie_re = re.compile(br'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII) +blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) + +import token +__all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding", + "untokenize", "TokenInfo", "open", "TokenError"] +del token + +class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): + def __repr__(self): + annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) + return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % + self._replace(type=annotated_type)) + + @property + def exact_type(self): + if self.type == OP and self.string in EXACT_TOKEN_TYPES: + return EXACT_TOKEN_TYPES[self.string] + else: + return self.type + +def group(*choices): return '(' + '|'.join(choices) + ')' +def any(*choices): return group(*choices) + '*' +def maybe(*choices): return group(*choices) + '?' + +# Note: we use unicode matching for names ("\w") but ascii matching for +# number literals. +Whitespace = r'[ \f\t]*' +Comment = r'#[^\r\n]*' +Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) +Name = r'\w+' + +Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+' +Binnumber = r'0[bB](?:_?[01])+' +Octnumber = r'0[oO](?:_?[0-7])+' +Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)' +Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) +Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*' +Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?', + r'\.[0-9](?:_?[0-9])*') + maybe(Exponent) +Expfloat = r'[0-9](?:_?[0-9])*' + Exponent +Floatnumber = group(Pointfloat, Expfloat) +Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]') +Number = group(Imagnumber, Floatnumber, Intnumber) + +# Return the empty string, plus all of the valid string prefixes. +def _all_string_prefixes(): + # The valid string prefixes. Only contain the lower case versions, + # and don't contain any permutations (include 'fr', but not + # 'rf'). The various permutations will be generated. + _valid_string_prefixes = ['b', 'r', 'u', 'f', 't', 'br', 'fr', 'tr'] + # if we add binary f-strings, add: ['fb', 'fbr'] + result = {''} + for prefix in _valid_string_prefixes: + for t in _itertools.permutations(prefix): + # create a list with upper and lower versions of each + # character + for u in _itertools.product(*[(c, c.upper()) for c in t]): + result.add(''.join(u)) + return result + +@functools.lru_cache +def _compile(expr): + return re.compile(expr, re.UNICODE) + +# Note that since _all_string_prefixes includes the empty string, +# StringPrefix can be the empty string (making it optional). +StringPrefix = group(*_all_string_prefixes()) + +# Tail end of ' string. +Single = r"[^'\\]*(?:\\.[^'\\]*)*'" +# Tail end of " string. +Double = r'[^"\\]*(?:\\.[^"\\]*)*"' +# Tail end of ''' string. +Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" +# Tail end of """ string. +Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' +Triple = group(StringPrefix + "'''", StringPrefix + '"""') +# Single-line ' or " string. +String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", + StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') + +# Sorting in reverse order puts the long operators before their prefixes. +# Otherwise if = came before ==, == would get recognized as two instances +# of =. +Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse=True))) +Funny = group(r'\r?\n', Special) + +PlainToken = group(Number, Funny, String, Name) +Token = Ignore + PlainToken + +# First (or only) line of ' or " string. +ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + + group("'", r'\\\r?\n'), + StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + + group('"', r'\\\r?\n')) +PseudoExtras = group(r'\\\r?\n|\z', Comment, Triple) +PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) + +# For a given string prefix plus quotes, endpats maps it to a regex +# to match the remainder of that string. _prefix can be empty, for +# a normal single or triple quoted string (with no prefix). +endpats = {} +for _prefix in _all_string_prefixes(): + endpats[_prefix + "'"] = Single + endpats[_prefix + '"'] = Double + endpats[_prefix + "'''"] = Single3 + endpats[_prefix + '"""'] = Double3 +del _prefix + +# A set of all of the single and triple quoted string prefixes, +# including the opening quotes. +single_quoted = set() +triple_quoted = set() +for t in _all_string_prefixes(): + for u in (t + '"', t + "'"): + single_quoted.add(u) + for u in (t + '"""', t + "'''"): + triple_quoted.add(u) +del t, u + +tabsize = 8 + +class TokenError(Exception): pass + + +class Untokenizer: + + def __init__(self): + self.tokens = [] + self.prev_row = 1 + self.prev_col = 0 + self.prev_type = None + self.prev_line = "" + self.encoding = None + + def add_whitespace(self, start): + row, col = start + if row < self.prev_row or row == self.prev_row and col < self.prev_col: + raise ValueError("start ({},{}) precedes previous end ({},{})" + .format(row, col, self.prev_row, self.prev_col)) + self.add_backslash_continuation(start) + col_offset = col - self.prev_col + if col_offset: + self.tokens.append(" " * col_offset) + + def add_backslash_continuation(self, start): + """Add backslash continuation characters if the row has increased + without encountering a newline token. + + This also inserts the correct amount of whitespace before the backslash. + """ + row = start[0] + row_offset = row - self.prev_row + if row_offset == 0: + return + + newline = '\r\n' if self.prev_line.endswith('\r\n') else '\n' + line = self.prev_line.rstrip('\\\r\n') + ws = ''.join(_itertools.takewhile(str.isspace, reversed(line))) + self.tokens.append(ws + f"\\{newline}" * row_offset) + self.prev_col = 0 + + def escape_brackets(self, token): + characters = [] + consume_until_next_bracket = False + for character in token: + if character == "}": + if consume_until_next_bracket: + consume_until_next_bracket = False + else: + characters.append(character) + if character == "{": + n_backslashes = sum( + 1 for char in _itertools.takewhile( + "\\".__eq__, + characters[-2::-1] + ) + ) + if n_backslashes % 2 == 0 or characters[-1] != "N": + characters.append(character) + else: + consume_until_next_bracket = True + characters.append(character) + return "".join(characters) + + def untokenize(self, iterable): + it = iter(iterable) + indents = [] + startline = False + for t in it: + if len(t) == 2: + self.compat(t, it) + break + tok_type, token, start, end, line = t + if tok_type == ENCODING: + self.encoding = token + continue + if tok_type == ENDMARKER: + break + if tok_type == INDENT: + indents.append(token) + continue + elif tok_type == DEDENT: + indents.pop() + self.prev_row, self.prev_col = end + continue + elif tok_type in (NEWLINE, NL): + startline = True + elif startline and indents: + indent = indents[-1] + if start[1] >= len(indent): + self.tokens.append(indent) + self.prev_col = len(indent) + startline = False + elif tok_type in {FSTRING_MIDDLE, TSTRING_MIDDLE}: + if '{' in token or '}' in token: + token = self.escape_brackets(token) + last_line = token.splitlines()[-1] + end_line, end_col = end + extra_chars = last_line.count("{{") + last_line.count("}}") + end = (end_line, end_col + extra_chars) + + self.add_whitespace(start) + self.tokens.append(token) + self.prev_row, self.prev_col = end + if tok_type in (NEWLINE, NL): + self.prev_row += 1 + self.prev_col = 0 + self.prev_type = tok_type + self.prev_line = line + return "".join(self.tokens) + + def compat(self, token, iterable): + indents = [] + toks_append = self.tokens.append + startline = token[0] in (NEWLINE, NL) + prevstring = False + in_fstring_or_tstring = 0 + + for tok in _itertools.chain([token], iterable): + toknum, tokval = tok[:2] + if toknum == ENCODING: + self.encoding = tokval + continue + + if toknum in (NAME, NUMBER): + tokval += ' ' + + # Insert a space between two consecutive strings + if toknum == STRING: + if prevstring: + tokval = ' ' + tokval + prevstring = True + else: + prevstring = False + + if toknum in {FSTRING_START, TSTRING_START}: + in_fstring_or_tstring += 1 + elif toknum in {FSTRING_END, TSTRING_END}: + in_fstring_or_tstring -= 1 + if toknum == INDENT: + indents.append(tokval) + continue + elif toknum == DEDENT: + indents.pop() + continue + elif toknum in (NEWLINE, NL): + startline = True + elif startline and indents: + toks_append(indents[-1]) + startline = False + elif toknum in {FSTRING_MIDDLE, TSTRING_MIDDLE}: + tokval = self.escape_brackets(tokval) + + # Insert a space between two consecutive brackets if we are in an f-string or t-string + if tokval in {"{", "}"} and self.tokens and self.tokens[-1] == tokval and in_fstring_or_tstring: + tokval = ' ' + tokval + + # Insert a space between two consecutive f-strings + if toknum in (STRING, FSTRING_START) and self.prev_type in (STRING, FSTRING_END): + self.tokens.append(" ") + + toks_append(tokval) + self.prev_type = toknum + + +def untokenize(iterable): + """Transform tokens back into Python source code. + It returns a bytes object, encoded using the ENCODING + token, which is the first token sequence output by tokenize. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + The result is guaranteed to tokenize back to match the input so + that the conversion is lossless and round-trips are assured. + The guarantee applies only to the token type and token string as + the spacing between tokens (column positions) may change. + """ + ut = Untokenizer() + out = ut.untokenize(iterable) + if ut.encoding is not None: + out = out.encode(ut.encoding) + return out + + +def _get_normal_name(orig_enc): + """Imitates get_normal_name in Parser/tokenizer/helpers.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if enc == "utf-8" or enc.startswith("utf-8-"): + return "utf-8" + if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ + enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): + return "iso-8859-1" + return orig_enc + +def detect_encoding(readline): + """ + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argument, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + """ + try: + filename = readline.__self__.name + except AttributeError: + filename = None + bom_found = False + encoding = None + default = 'utf-8' + def read_or_stop(): + try: + return readline() + except StopIteration: + return b'' + + def check(line, encoding): + # Check if the line matches the encoding. + if 0 in line: + raise SyntaxError("source code cannot contain null bytes") + try: + line.decode(encoding) + except UnicodeDecodeError: + msg = "invalid or missing encoding declaration" + if filename is not None: + msg = '{} for {!r}'.format(msg, filename) + raise SyntaxError(msg) + + def find_cookie(line): + match = cookie_re.match(line) + if not match: + return None + encoding = _get_normal_name(match.group(1).decode()) + try: + codec = lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + if filename is None: + msg = "unknown encoding: " + encoding + else: + msg = "unknown encoding for {!r}: {}".format(filename, + encoding) + raise SyntaxError(msg) + + if bom_found: + if encoding != 'utf-8': + # This behaviour mimics the Python interpreter + if filename is None: + msg = 'encoding problem: utf-8' + else: + msg = 'encoding problem for {!r}: utf-8'.format(filename) + raise SyntaxError(msg) + encoding += '-sig' + return encoding + + first = read_or_stop() + if first.startswith(BOM_UTF8): + bom_found = True + first = first[3:] + default = 'utf-8-sig' + if not first: + return default, [] + + encoding = find_cookie(first) + if encoding: + check(first, encoding) + return encoding, [first] + if not blank_re.match(first): + check(first, default) + return default, [first] + + second = read_or_stop() + if not second: + check(first, default) + return default, [first] + + encoding = find_cookie(second) + if encoding: + check(first + second, encoding) + return encoding, [first, second] + + check(first + second, default) + return default, [first, second] + + +def open(filename): + """Open a file in read only mode using the encoding detected by + detect_encoding(). + """ + buffer = _builtin_open(filename, 'rb') + try: + encoding, lines = detect_encoding(buffer.readline) + buffer.seek(0) + text = TextIOWrapper(buffer, encoding, line_buffering=True) + text.mode = 'r' + return text + except: + buffer.close() + raise + +def tokenize(readline): + """ + The tokenize() generator requires one argument, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as bytes. Alternatively, readline + can be a callable function terminating with StopIteration: + readline = open(myfile, 'rb').__next__ # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + physical line. + + The first token sequence will always be an ENCODING token + which tells you which encoding was used to decode the bytes stream. + """ + encoding, consumed = detect_encoding(readline) + rl_gen = _itertools.chain(consumed, iter(readline, b"")) + if encoding is not None: + if encoding == "utf-8-sig": + # BOM will already have been stripped. + encoding = "utf-8" + yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') + yield from _generate_tokens_from_c_tokenizer(rl_gen.__next__, encoding, extra_tokens=True) + +def generate_tokens(readline): + """Tokenize a source reading Python code as unicode strings. + + This has the same API as tokenize(), except that it expects the *readline* + callable to return str objects instead of bytes. + """ + return _generate_tokens_from_c_tokenizer(readline, extra_tokens=True) + +def _main(args=None): + import argparse + + # Helper error handling routines + def perror(message): + sys.stderr.write(message) + sys.stderr.write('\n') + + def error(message, filename=None, location=None): + if location: + args = (filename,) + location + (message,) + perror("%s:%d:%d: error: %s" % args) + elif filename: + perror("%s: error: %s" % (filename, message)) + else: + perror("error: %s" % message) + sys.exit(1) + + # Parse the arguments and options + parser = argparse.ArgumentParser(color=True) + parser.add_argument(dest='filename', nargs='?', + metavar='filename.py', + help='the file to tokenize; defaults to stdin') + parser.add_argument('-e', '--exact', dest='exact', action='store_true', + help='display token names using the exact type') + args = parser.parse_args(args) + + try: + # Tokenize the input + if args.filename: + filename = args.filename + with _builtin_open(filename, 'rb') as f: + tokens = list(tokenize(f.readline)) + else: + filename = "" + tokens = _generate_tokens_from_c_tokenizer( + sys.stdin.readline, extra_tokens=True) + + + # Output the tokenization + for token in tokens: + token_type = token.type + if args.exact: + token_type = token.exact_type + token_range = "%d,%d-%d,%d:" % (token.start + token.end) + print("%-20s%-15s%-15r" % + (token_range, tok_name[token_type], token.string)) + except IndentationError as err: + line, column = err.args[1][1:3] + error(err.args[0], filename, (line, column)) + except TokenError as err: + line, column = err.args[1] + error(err.args[0], filename, (line, column)) + except SyntaxError as err: + error(err, filename) + except OSError as err: + error(err) + except KeyboardInterrupt: + print("interrupted\n") + except Exception as err: + perror("unexpected error: %s" % err) + raise + +def _transform_msg(msg): + """Transform error messages from the C tokenizer into the Python tokenize + + The C tokenizer is more picky than the Python one, so we need to massage + the error messages a bit for backwards compatibility. + """ + if "unterminated triple-quoted string literal" in msg: + return "EOF in multi-line string" + return msg + +def _generate_tokens_from_c_tokenizer(source, encoding=None, extra_tokens=False): + """Tokenize a source reading Python code as unicode strings using the internal C tokenizer""" + if encoding is None: + it = _tokenize.TokenizerIter(source, extra_tokens=extra_tokens) + else: + it = _tokenize.TokenizerIter(source, encoding=encoding, extra_tokens=extra_tokens) + try: + for info in it: + yield TokenInfo._make(info) + except SyntaxError as e: + if type(e) != SyntaxError: + raise e from None + msg = _transform_msg(e.msg) + raise TokenError(msg, (e.lineno, e.offset)) from None + + +if __name__ == "__main__": + _main() diff --git a/Python313_13_x64_Template/Lib/tomllib/__init__.py b/Python314_4_x64_Template/Lib/tomllib/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/tomllib/__init__.py rename to Python314_4_x64_Template/Lib/tomllib/__init__.py diff --git a/Python314_4_x64_Template/Lib/tomllib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/tomllib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..dd8c34b2 Binary files /dev/null and b/Python314_4_x64_Template/Lib/tomllib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/tomllib/__pycache__/_parser.cpython-314.pyc b/Python314_4_x64_Template/Lib/tomllib/__pycache__/_parser.cpython-314.pyc new file mode 100644 index 00000000..21d39efb Binary files /dev/null and b/Python314_4_x64_Template/Lib/tomllib/__pycache__/_parser.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/tomllib/__pycache__/_re.cpython-314.pyc b/Python314_4_x64_Template/Lib/tomllib/__pycache__/_re.cpython-314.pyc new file mode 100644 index 00000000..56063066 Binary files /dev/null and b/Python314_4_x64_Template/Lib/tomllib/__pycache__/_re.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/tomllib/_parser.py b/Python314_4_x64_Template/Lib/tomllib/_parser.py new file mode 100644 index 00000000..3ee47aa9 --- /dev/null +++ b/Python314_4_x64_Template/Lib/tomllib/_parser.py @@ -0,0 +1,753 @@ +# SPDX-License-Identifier: MIT +# SPDX-FileCopyrightText: 2021 Taneli Hukkinen +# Licensed to PSF under a Contributor Agreement. + +from __future__ import annotations + +from types import MappingProxyType + +from ._re import ( + RE_DATETIME, + RE_LOCALTIME, + RE_NUMBER, + match_to_datetime, + match_to_localtime, + match_to_number, +) + +TYPE_CHECKING = False +if TYPE_CHECKING: + from collections.abc import Iterable + from typing import IO, Any + + from ._types import Key, ParseFloat, Pos + +ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) + +# Neither of these sets include quotation mark or backslash. They are +# currently handled as separate cases in the parser functions. +ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t") +ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n") + +ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS +ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS + +ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS + +TOML_WS = frozenset(" \t") +TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n") +BARE_KEY_CHARS = frozenset( + "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789" "-_" +) +KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'") +HEXDIGIT_CHARS = frozenset("abcdef" "ABCDEF" "0123456789") + +BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType( + { + "\\b": "\u0008", # backspace + "\\t": "\u0009", # tab + "\\n": "\u000A", # linefeed + "\\f": "\u000C", # form feed + "\\r": "\u000D", # carriage return + '\\"': "\u0022", # quote + "\\\\": "\u005C", # backslash + } +) + + +class DEPRECATED_DEFAULT: + """Sentinel to be used as default arg during deprecation + period of TOMLDecodeError's free-form arguments.""" + + +class TOMLDecodeError(ValueError): + """An error raised if a document is not valid TOML. + + Adds the following attributes to ValueError: + msg: The unformatted error message + doc: The TOML document being parsed + pos: The index of doc where parsing failed + lineno: The line corresponding to pos + colno: The column corresponding to pos + """ + + def __init__( + self, + msg: str = DEPRECATED_DEFAULT, # type: ignore[assignment] + doc: str = DEPRECATED_DEFAULT, # type: ignore[assignment] + pos: Pos = DEPRECATED_DEFAULT, # type: ignore[assignment] + *args: Any, + ): + if ( + args + or not isinstance(msg, str) + or not isinstance(doc, str) + or not isinstance(pos, int) + ): + import warnings + + warnings.warn( + "Free-form arguments for TOMLDecodeError are deprecated. " + "Please set 'msg' (str), 'doc' (str) and 'pos' (int) arguments only.", + DeprecationWarning, + stacklevel=2, + ) + if pos is not DEPRECATED_DEFAULT: # type: ignore[comparison-overlap] + args = pos, *args + if doc is not DEPRECATED_DEFAULT: # type: ignore[comparison-overlap] + args = doc, *args + if msg is not DEPRECATED_DEFAULT: # type: ignore[comparison-overlap] + args = msg, *args + ValueError.__init__(self, *args) + return + + lineno = doc.count("\n", 0, pos) + 1 + if lineno == 1: + colno = pos + 1 + else: + colno = pos - doc.rindex("\n", 0, pos) + + if pos >= len(doc): + coord_repr = "end of document" + else: + coord_repr = f"line {lineno}, column {colno}" + errmsg = f"{msg} (at {coord_repr})" + ValueError.__init__(self, errmsg) + + self.msg = msg + self.doc = doc + self.pos = pos + self.lineno = lineno + self.colno = colno + + +def load(fp: IO[bytes], /, *, parse_float: ParseFloat = float) -> dict[str, Any]: + """Parse TOML from a binary file object.""" + b = fp.read() + try: + s = b.decode() + except AttributeError: + raise TypeError( + "File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`" + ) from None + return loads(s, parse_float=parse_float) + + +def loads(s: str, /, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901 + """Parse TOML from a string.""" + + # The spec allows converting "\r\n" to "\n", even in string + # literals. Let's do so to simplify parsing. + try: + src = s.replace("\r\n", "\n") + except (AttributeError, TypeError): + raise TypeError( + f"Expected str object, not '{type(s).__qualname__}'" + ) from None + pos = 0 + out = Output() + header: Key = () + parse_float = make_safe_parse_float(parse_float) + + # Parse one statement at a time + # (typically means one line in TOML source) + while True: + # 1. Skip line leading whitespace + pos = skip_chars(src, pos, TOML_WS) + + # 2. Parse rules. Expect one of the following: + # - end of file + # - end of line + # - comment + # - key/value pair + # - append dict to list (and move to its namespace) + # - create dict (and move to its namespace) + # Skip trailing whitespace when applicable. + try: + char = src[pos] + except IndexError: + break + if char == "\n": + pos += 1 + continue + if char in KEY_INITIAL_CHARS: + pos = key_value_rule(src, pos, out, header, parse_float) + pos = skip_chars(src, pos, TOML_WS) + elif char == "[": + try: + second_char: str | None = src[pos + 1] + except IndexError: + second_char = None + out.flags.finalize_pending() + if second_char == "[": + pos, header = create_list_rule(src, pos, out) + else: + pos, header = create_dict_rule(src, pos, out) + pos = skip_chars(src, pos, TOML_WS) + elif char != "#": + raise TOMLDecodeError("Invalid statement", src, pos) + + # 3. Skip comment + pos = skip_comment(src, pos) + + # 4. Expect end of line or end of file + try: + char = src[pos] + except IndexError: + break + if char != "\n": + raise TOMLDecodeError( + "Expected newline or end of document after a statement", src, pos + ) + pos += 1 + + return out.data.dict + + +class Flags: + """Flags that map to parsed keys/namespaces.""" + + # Marks an immutable namespace (inline array or inline table). + FROZEN = 0 + # Marks a nest that has been explicitly created and can no longer + # be opened using the "[table]" syntax. + EXPLICIT_NEST = 1 + + def __init__(self) -> None: + self._flags: dict[str, dict[Any, Any]] = {} + self._pending_flags: set[tuple[Key, int]] = set() + + def add_pending(self, key: Key, flag: int) -> None: + self._pending_flags.add((key, flag)) + + def finalize_pending(self) -> None: + for key, flag in self._pending_flags: + self.set(key, flag, recursive=False) + self._pending_flags.clear() + + def unset_all(self, key: Key) -> None: + cont = self._flags + for k in key[:-1]: + if k not in cont: + return + cont = cont[k]["nested"] + cont.pop(key[-1], None) + + def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003 + cont = self._flags + key_parent, key_stem = key[:-1], key[-1] + for k in key_parent: + if k not in cont: + cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont = cont[k]["nested"] + if key_stem not in cont: + cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag) + + def is_(self, key: Key, flag: int) -> bool: + if not key: + return False # document root has no flags + cont = self._flags + for k in key[:-1]: + if k not in cont: + return False + inner_cont = cont[k] + if flag in inner_cont["recursive_flags"]: + return True + cont = inner_cont["nested"] + key_stem = key[-1] + if key_stem in cont: + cont = cont[key_stem] + return flag in cont["flags"] or flag in cont["recursive_flags"] + return False + + +class NestedDict: + def __init__(self) -> None: + # The parsed content of the TOML document + self.dict: dict[str, Any] = {} + + def get_or_create_nest( + self, + key: Key, + *, + access_lists: bool = True, + ) -> dict[str, Any]: + cont: Any = self.dict + for k in key: + if k not in cont: + cont[k] = {} + cont = cont[k] + if access_lists and isinstance(cont, list): + cont = cont[-1] + if not isinstance(cont, dict): + raise KeyError("There is no nest behind this key") + return cont # type: ignore[no-any-return] + + def append_nest_to_list(self, key: Key) -> None: + cont = self.get_or_create_nest(key[:-1]) + last_key = key[-1] + if last_key in cont: + list_ = cont[last_key] + if not isinstance(list_, list): + raise KeyError("An object other than list found behind this key") + list_.append({}) + else: + cont[last_key] = [{}] + + +class Output: + def __init__(self) -> None: + self.data = NestedDict() + self.flags = Flags() + + +def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: + try: + while src[pos] in chars: + pos += 1 + except IndexError: + pass + return pos + + +def skip_until( + src: str, + pos: Pos, + expect: str, + *, + error_on: frozenset[str], + error_on_eof: bool, +) -> Pos: + try: + new_pos = src.index(expect, pos) + except ValueError: + new_pos = len(src) + if error_on_eof: + raise TOMLDecodeError(f"Expected {expect!r}", src, new_pos) from None + + if not error_on.isdisjoint(src[pos:new_pos]): + while src[pos] not in error_on: + pos += 1 + raise TOMLDecodeError(f"Found invalid character {src[pos]!r}", src, pos) + return new_pos + + +def skip_comment(src: str, pos: Pos) -> Pos: + try: + char: str | None = src[pos] + except IndexError: + char = None + if char == "#": + return skip_until( + src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False + ) + return pos + + +def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos: + while True: + pos_before_skip = pos + pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) + pos = skip_comment(src, pos) + if pos == pos_before_skip: + return pos + + +def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: + pos += 1 # Skip "[" + pos = skip_chars(src, pos, TOML_WS) + pos, key = parse_key(src, pos) + + if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN): + raise TOMLDecodeError(f"Cannot declare {key} twice", src, pos) + out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) + try: + out.data.get_or_create_nest(key) + except KeyError: + raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None + + if not src.startswith("]", pos): + raise TOMLDecodeError( + "Expected ']' at the end of a table declaration", src, pos + ) + return pos + 1, key + + +def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: + pos += 2 # Skip "[[" + pos = skip_chars(src, pos, TOML_WS) + pos, key = parse_key(src, pos) + + if out.flags.is_(key, Flags.FROZEN): + raise TOMLDecodeError(f"Cannot mutate immutable namespace {key}", src, pos) + # Free the namespace now that it points to another empty list item... + out.flags.unset_all(key) + # ...but this key precisely is still prohibited from table declaration + out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) + try: + out.data.append_nest_to_list(key) + except KeyError: + raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None + + if not src.startswith("]]", pos): + raise TOMLDecodeError( + "Expected ']]' at the end of an array declaration", src, pos + ) + return pos + 2, key + + +def key_value_rule( + src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat +) -> Pos: + pos, key, value = parse_key_value_pair(src, pos, parse_float) + key_parent, key_stem = key[:-1], key[-1] + abs_key_parent = header + key_parent + + relative_path_cont_keys = (header + key[:i] for i in range(1, len(key))) + for cont_key in relative_path_cont_keys: + # Check that dotted key syntax does not redefine an existing table + if out.flags.is_(cont_key, Flags.EXPLICIT_NEST): + raise TOMLDecodeError(f"Cannot redefine namespace {cont_key}", src, pos) + # Containers in the relative path can't be opened with the table syntax or + # dotted key/value syntax in following table sections. + out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST) + + if out.flags.is_(abs_key_parent, Flags.FROZEN): + raise TOMLDecodeError( + f"Cannot mutate immutable namespace {abs_key_parent}", src, pos + ) + + try: + nest = out.data.get_or_create_nest(abs_key_parent) + except KeyError: + raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None + if key_stem in nest: + raise TOMLDecodeError("Cannot overwrite a value", src, pos) + # Mark inline table and array namespaces recursively immutable + if isinstance(value, (dict, list)): + out.flags.set(header + key, Flags.FROZEN, recursive=True) + nest[key_stem] = value + return pos + + +def parse_key_value_pair( + src: str, pos: Pos, parse_float: ParseFloat +) -> tuple[Pos, Key, Any]: + pos, key = parse_key(src, pos) + try: + char: str | None = src[pos] + except IndexError: + char = None + if char != "=": + raise TOMLDecodeError("Expected '=' after a key in a key/value pair", src, pos) + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + pos, value = parse_value(src, pos, parse_float) + return pos, key, value + + +def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]: + pos, key_part = parse_key_part(src, pos) + key: Key = (key_part,) + pos = skip_chars(src, pos, TOML_WS) + while True: + try: + char: str | None = src[pos] + except IndexError: + char = None + if char != ".": + return pos, key + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + pos, key_part = parse_key_part(src, pos) + key += (key_part,) + pos = skip_chars(src, pos, TOML_WS) + + +def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]: + try: + char: str | None = src[pos] + except IndexError: + char = None + if char in BARE_KEY_CHARS: + start_pos = pos + pos = skip_chars(src, pos, BARE_KEY_CHARS) + return pos, src[start_pos:pos] + if char == "'": + return parse_literal_str(src, pos) + if char == '"': + return parse_one_line_basic_str(src, pos) + raise TOMLDecodeError("Invalid initial character for a key part", src, pos) + + +def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]: + pos += 1 + return parse_basic_str(src, pos, multiline=False) + + +def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list[Any]]: + pos += 1 + array: list[Any] = [] + + pos = skip_comments_and_array_ws(src, pos) + if src.startswith("]", pos): + return pos + 1, array + while True: + pos, val = parse_value(src, pos, parse_float) + array.append(val) + pos = skip_comments_and_array_ws(src, pos) + + c = src[pos : pos + 1] + if c == "]": + return pos + 1, array + if c != ",": + raise TOMLDecodeError("Unclosed array", src, pos) + pos += 1 + + pos = skip_comments_and_array_ws(src, pos) + if src.startswith("]", pos): + return pos + 1, array + + +def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict[str, Any]]: + pos += 1 + nested_dict = NestedDict() + flags = Flags() + + pos = skip_chars(src, pos, TOML_WS) + if src.startswith("}", pos): + return pos + 1, nested_dict.dict + while True: + pos, key, value = parse_key_value_pair(src, pos, parse_float) + key_parent, key_stem = key[:-1], key[-1] + if flags.is_(key, Flags.FROZEN): + raise TOMLDecodeError(f"Cannot mutate immutable namespace {key}", src, pos) + try: + nest = nested_dict.get_or_create_nest(key_parent, access_lists=False) + except KeyError: + raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None + if key_stem in nest: + raise TOMLDecodeError(f"Duplicate inline table key {key_stem!r}", src, pos) + nest[key_stem] = value + pos = skip_chars(src, pos, TOML_WS) + c = src[pos : pos + 1] + if c == "}": + return pos + 1, nested_dict.dict + if c != ",": + raise TOMLDecodeError("Unclosed inline table", src, pos) + if isinstance(value, (dict, list)): + flags.set(key, Flags.FROZEN, recursive=True) + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + + +def parse_basic_str_escape( + src: str, pos: Pos, *, multiline: bool = False +) -> tuple[Pos, str]: + escape_id = src[pos : pos + 2] + pos += 2 + if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}: + # Skip whitespace until next non-whitespace character or end of + # the doc. Error if non-whitespace is found before newline. + if escape_id != "\\\n": + pos = skip_chars(src, pos, TOML_WS) + try: + char = src[pos] + except IndexError: + return pos, "" + if char != "\n": + raise TOMLDecodeError("Unescaped '\\' in a string", src, pos) + pos += 1 + pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) + return pos, "" + if escape_id == "\\u": + return parse_hex_char(src, pos, 4) + if escape_id == "\\U": + return parse_hex_char(src, pos, 8) + try: + return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id] + except KeyError: + raise TOMLDecodeError("Unescaped '\\' in a string", src, pos) from None + + +def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]: + return parse_basic_str_escape(src, pos, multiline=True) + + +def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]: + hex_str = src[pos : pos + hex_len] + if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str): + raise TOMLDecodeError("Invalid hex value", src, pos) + pos += hex_len + hex_int = int(hex_str, 16) + if not is_unicode_scalar_value(hex_int): + raise TOMLDecodeError( + "Escaped character is not a Unicode scalar value", src, pos + ) + return pos, chr(hex_int) + + +def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]: + pos += 1 # Skip starting apostrophe + start_pos = pos + pos = skip_until( + src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True + ) + return pos + 1, src[start_pos:pos] # Skip ending apostrophe + + +def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]: + pos += 3 + if src.startswith("\n", pos): + pos += 1 + + if literal: + delim = "'" + end_pos = skip_until( + src, + pos, + "'''", + error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS, + error_on_eof=True, + ) + result = src[pos:end_pos] + pos = end_pos + 3 + else: + delim = '"' + pos, result = parse_basic_str(src, pos, multiline=True) + + # Add at maximum two extra apostrophes/quotes if the end sequence + # is 4 or 5 chars long instead of just 3. + if not src.startswith(delim, pos): + return pos, result + pos += 1 + if not src.startswith(delim, pos): + return pos, result + delim + pos += 1 + return pos, result + (delim * 2) + + +def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]: + if multiline: + error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS + parse_escapes = parse_basic_str_escape_multiline + else: + error_on = ILLEGAL_BASIC_STR_CHARS + parse_escapes = parse_basic_str_escape + result = "" + start_pos = pos + while True: + try: + char = src[pos] + except IndexError: + raise TOMLDecodeError("Unterminated string", src, pos) from None + if char == '"': + if not multiline: + return pos + 1, result + src[start_pos:pos] + if src.startswith('"""', pos): + return pos + 3, result + src[start_pos:pos] + pos += 1 + continue + if char == "\\": + result += src[start_pos:pos] + pos, parsed_escape = parse_escapes(src, pos) + result += parsed_escape + start_pos = pos + continue + if char in error_on: + raise TOMLDecodeError(f"Illegal character {char!r}", src, pos) + pos += 1 + + +def parse_value( # noqa: C901 + src: str, pos: Pos, parse_float: ParseFloat +) -> tuple[Pos, Any]: + try: + char: str | None = src[pos] + except IndexError: + char = None + + # IMPORTANT: order conditions based on speed of checking and likelihood + + # Basic strings + if char == '"': + if src.startswith('"""', pos): + return parse_multiline_str(src, pos, literal=False) + return parse_one_line_basic_str(src, pos) + + # Literal strings + if char == "'": + if src.startswith("'''", pos): + return parse_multiline_str(src, pos, literal=True) + return parse_literal_str(src, pos) + + # Booleans + if char == "t": + if src.startswith("true", pos): + return pos + 4, True + if char == "f": + if src.startswith("false", pos): + return pos + 5, False + + # Arrays + if char == "[": + return parse_array(src, pos, parse_float) + + # Inline tables + if char == "{": + return parse_inline_table(src, pos, parse_float) + + # Dates and times + datetime_match = RE_DATETIME.match(src, pos) + if datetime_match: + try: + datetime_obj = match_to_datetime(datetime_match) + except ValueError as e: + raise TOMLDecodeError("Invalid date or datetime", src, pos) from e + return datetime_match.end(), datetime_obj + localtime_match = RE_LOCALTIME.match(src, pos) + if localtime_match: + return localtime_match.end(), match_to_localtime(localtime_match) + + # Integers and "normal" floats. + # The regex will greedily match any type starting with a decimal + # char, so needs to be located after handling of dates and times. + number_match = RE_NUMBER.match(src, pos) + if number_match: + return number_match.end(), match_to_number(number_match, parse_float) + + # Special floats + first_three = src[pos : pos + 3] + if first_three in {"inf", "nan"}: + return pos + 3, parse_float(first_three) + first_four = src[pos : pos + 4] + if first_four in {"-inf", "+inf", "-nan", "+nan"}: + return pos + 4, parse_float(first_four) + + raise TOMLDecodeError("Invalid value", src, pos) + + +def is_unicode_scalar_value(codepoint: int) -> bool: + return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111) + + +def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat: + """A decorator to make `parse_float` safe. + + `parse_float` must not return dicts or lists, because these types + would be mixed with parsed TOML tables and arrays, thus confusing + the parser. The returned decorated callable raises `ValueError` + instead of returning illegal types. + """ + # The default `float` callable never returns illegal types. Optimize it. + if parse_float is float: + return float + + def safe_parse_float(float_str: str) -> Any: + float_value = parse_float(float_str) + if isinstance(float_value, (dict, list)): + raise ValueError("parse_float must not return dicts or lists") + return float_value + + return safe_parse_float diff --git a/Python314_4_x64_Template/Lib/tomllib/_re.py b/Python314_4_x64_Template/Lib/tomllib/_re.py new file mode 100644 index 00000000..eb8beb19 --- /dev/null +++ b/Python314_4_x64_Template/Lib/tomllib/_re.py @@ -0,0 +1,113 @@ +# SPDX-License-Identifier: MIT +# SPDX-FileCopyrightText: 2021 Taneli Hukkinen +# Licensed to PSF under a Contributor Agreement. + +from __future__ import annotations + +from datetime import date, datetime, time, timedelta, timezone, tzinfo +from functools import lru_cache +import re + +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Any + + from ._types import ParseFloat + +# E.g. +# - 00:32:00.999999 +# - 00:32:00 +_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?" + +RE_NUMBER = re.compile( + r""" +0 +(?: + x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex + | + b[01](?:_?[01])* # bin + | + o[0-7](?:_?[0-7])* # oct +) +| +[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part +(?P + (?:\.[0-9](?:_?[0-9])*)? # optional fractional part + (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part +) +""", + flags=re.VERBOSE, +) +RE_LOCALTIME = re.compile(_TIME_RE_STR) +RE_DATETIME = re.compile( + rf""" +([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27 +(?: + [Tt ] + {_TIME_RE_STR} + (?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset +)? +""", + flags=re.VERBOSE, +) + + +def match_to_datetime(match: re.Match[str]) -> datetime | date: + """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. + + Raises ValueError if the match does not correspond to a valid date + or datetime. + """ + ( + year_str, + month_str, + day_str, + hour_str, + minute_str, + sec_str, + micros_str, + zulu_time, + offset_sign_str, + offset_hour_str, + offset_minute_str, + ) = match.groups() + year, month, day = int(year_str), int(month_str), int(day_str) + if hour_str is None: + return date(year, month, day) + hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) + micros = int(micros_str.ljust(6, "0")) if micros_str else 0 + if offset_sign_str: + tz: tzinfo | None = cached_tz( + offset_hour_str, offset_minute_str, offset_sign_str + ) + elif zulu_time: + tz = timezone.utc + else: # local date-time + tz = None + return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz) + + +# No need to limit cache size. This is only ever called on input +# that matched RE_DATETIME, so there is an implicit bound of +# 24 (hours) * 60 (minutes) * 2 (offset direction) = 2880. +@lru_cache(maxsize=None) +def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone: + sign = 1 if sign_str == "+" else -1 + return timezone( + timedelta( + hours=sign * int(hour_str), + minutes=sign * int(minute_str), + ) + ) + + +def match_to_localtime(match: re.Match[str]) -> time: + hour_str, minute_str, sec_str, micros_str = match.groups() + micros = int(micros_str.ljust(6, "0")) if micros_str else 0 + return time(int(hour_str), int(minute_str), int(sec_str), micros) + + +def match_to_number(match: re.Match[str], parse_float: ParseFloat) -> Any: + if match.group("floatpart"): + return parse_float(match.group()) + return int(match.group(), 0) diff --git a/Python313_13_x64_Template/Lib/tomllib/_types.py b/Python314_4_x64_Template/Lib/tomllib/_types.py similarity index 100% rename from Python313_13_x64_Template/Lib/tomllib/_types.py rename to Python314_4_x64_Template/Lib/tomllib/_types.py diff --git a/Python313_13_x64_Template/Lib/tomllib/mypy.ini b/Python314_4_x64_Template/Lib/tomllib/mypy.ini similarity index 100% rename from Python313_13_x64_Template/Lib/tomllib/mypy.ini rename to Python314_4_x64_Template/Lib/tomllib/mypy.ini diff --git a/Python314_4_x64_Template/Lib/trace.py b/Python314_4_x64_Template/Lib/trace.py new file mode 100644 index 00000000..cf8817f4 --- /dev/null +++ b/Python314_4_x64_Template/Lib/trace.py @@ -0,0 +1,751 @@ +# portions copyright 2001, Autonomous Zones Industries, Inc., all rights... +# err... reserved and offered to the public under the terms of the +# Python 2.2 license. +# Author: Zooko O'Whielacronx +# http://zooko.com/ +# mailto:zooko@zooko.com +# +# Copyright 2000, Mojam Media, Inc., all rights reserved. +# Author: Skip Montanaro +# +# Copyright 1999, Bioreason, Inc., all rights reserved. +# Author: Andrew Dalke +# +# Copyright 1995-1997, Automatrix, Inc., all rights reserved. +# Author: Skip Montanaro +# +# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved. +# +# +# Permission to use, copy, modify, and distribute this Python software and +# its associated documentation for any purpose without fee is hereby +# granted, provided that the above copyright notice appears in all copies, +# and that both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of neither Automatrix, +# Bioreason or Mojam Media be used in advertising or publicity pertaining to +# distribution of the software without specific, written prior permission. +# +"""program/module to trace Python program or function execution + +Sample use, command line: + trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs + trace.py -t --ignore-dir '$prefix' spam.py eggs + trace.py --trackcalls spam.py eggs + +Sample use, programmatically + import sys + + # create a Trace object, telling it what to ignore, and whether to + # do tracing or line-counting or both. + tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,], + trace=0, count=1) + # run the new command using the given tracer + tracer.run('main()') + # make a report, placing output in /tmp + r = tracer.results() + r.write_results(show_missing=True, coverdir="/tmp") +""" +__all__ = ['Trace', 'CoverageResults'] + +import io +import linecache +import os +import sys +import sysconfig +import token +import tokenize +import inspect +import gc +import dis +import pickle +from time import monotonic as _time + +import threading + +PRAGMA_NOCOVER = "#pragma NO COVER" + +class _Ignore: + def __init__(self, modules=None, dirs=None): + self._mods = set() if not modules else set(modules) + self._dirs = [] if not dirs else [os.path.normpath(d) + for d in dirs] + self._ignore = { '': 1 } + + def names(self, filename, modulename): + if modulename in self._ignore: + return self._ignore[modulename] + + # haven't seen this one before, so see if the module name is + # on the ignore list. + if modulename in self._mods: # Identical names, so ignore + self._ignore[modulename] = 1 + return 1 + + # check if the module is a proper submodule of something on + # the ignore list + for mod in self._mods: + # Need to take some care since ignoring + # "cmp" mustn't mean ignoring "cmpcache" but ignoring + # "Spam" must also mean ignoring "Spam.Eggs". + if modulename.startswith(mod + '.'): + self._ignore[modulename] = 1 + return 1 + + # Now check that filename isn't in one of the directories + if filename is None: + # must be a built-in, so we must ignore + self._ignore[modulename] = 1 + return 1 + + # Ignore a file when it contains one of the ignorable paths + for d in self._dirs: + # The '+ os.sep' is to ensure that d is a parent directory, + # as compared to cases like: + # d = "/usr/local" + # filename = "/usr/local.py" + # or + # d = "/usr/local.py" + # filename = "/usr/local.py" + if filename.startswith(d + os.sep): + self._ignore[modulename] = 1 + return 1 + + # Tried the different ways, so we don't ignore this module + self._ignore[modulename] = 0 + return 0 + +def _modname(path): + """Return a plausible module name for the path.""" + + base = os.path.basename(path) + filename, ext = os.path.splitext(base) + return filename + +def _fullmodname(path): + """Return a plausible module name for the path.""" + + # If the file 'path' is part of a package, then the filename isn't + # enough to uniquely identify it. Try to do the right thing by + # looking in sys.path for the longest matching prefix. We'll + # assume that the rest is the package name. + + comparepath = os.path.normcase(path) + longest = "" + for dir in sys.path: + dir = os.path.normcase(dir) + if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep: + if len(dir) > len(longest): + longest = dir + + if longest: + base = path[len(longest) + 1:] + else: + base = path + # the drive letter is never part of the module name + drive, base = os.path.splitdrive(base) + base = base.replace(os.sep, ".") + if os.altsep: + base = base.replace(os.altsep, ".") + filename, ext = os.path.splitext(base) + return filename.lstrip(".") + +class CoverageResults: + def __init__(self, counts=None, calledfuncs=None, infile=None, + callers=None, outfile=None): + self.counts = counts + if self.counts is None: + self.counts = {} + self.counter = self.counts.copy() # map (filename, lineno) to count + self.calledfuncs = calledfuncs + if self.calledfuncs is None: + self.calledfuncs = {} + self.calledfuncs = self.calledfuncs.copy() + self.callers = callers + if self.callers is None: + self.callers = {} + self.callers = self.callers.copy() + self.infile = infile + self.outfile = outfile + if self.infile: + # Try to merge existing counts file. + try: + with open(self.infile, 'rb') as f: + counts, calledfuncs, callers = pickle.load(f) + self.update(self.__class__(counts, calledfuncs, callers=callers)) + except (OSError, EOFError, ValueError) as err: + print(("Skipping counts file %r: %s" + % (self.infile, err)), file=sys.stderr) + + def is_ignored_filename(self, filename): + """Return True if the filename does not refer to a file + we want to have reported. + """ + return filename.startswith('<') and filename.endswith('>') + + def update(self, other): + """Merge in the data from another CoverageResults""" + counts = self.counts + calledfuncs = self.calledfuncs + callers = self.callers + other_counts = other.counts + other_calledfuncs = other.calledfuncs + other_callers = other.callers + + for key in other_counts: + counts[key] = counts.get(key, 0) + other_counts[key] + + for key in other_calledfuncs: + calledfuncs[key] = 1 + + for key in other_callers: + callers[key] = 1 + + def write_results(self, show_missing=True, summary=False, coverdir=None, *, + ignore_missing_files=False): + """ + Write the coverage results. + + :param show_missing: Show lines that had no hits. + :param summary: Include coverage summary per module. + :param coverdir: If None, the results of each module are placed in its + directory, otherwise it is included in the directory + specified. + :param ignore_missing_files: If True, counts for files that no longer + exist are silently ignored. Otherwise, a missing file + will raise a FileNotFoundError. + """ + if self.calledfuncs: + print() + print("functions called:") + calls = self.calledfuncs + for filename, modulename, funcname in sorted(calls): + print(("filename: %s, modulename: %s, funcname: %s" + % (filename, modulename, funcname))) + + if self.callers: + print() + print("calling relationships:") + lastfile = lastcfile = "" + for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) \ + in sorted(self.callers): + if pfile != lastfile: + print() + print("***", pfile, "***") + lastfile = pfile + lastcfile = "" + if cfile != pfile and lastcfile != cfile: + print(" -->", cfile) + lastcfile = cfile + print(" %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc)) + + # turn the counts data ("(filename, lineno) = count") into something + # accessible on a per-file basis + per_file = {} + for filename, lineno in self.counts: + lines_hit = per_file[filename] = per_file.get(filename, {}) + lines_hit[lineno] = self.counts[(filename, lineno)] + + # accumulate summary info, if needed + sums = {} + + for filename, count in per_file.items(): + if self.is_ignored_filename(filename): + continue + + if filename.endswith(".pyc"): + filename = filename[:-1] + + if ignore_missing_files and not os.path.isfile(filename): + continue + + if coverdir is None: + dir = os.path.dirname(os.path.abspath(filename)) + modulename = _modname(filename) + else: + dir = coverdir + os.makedirs(dir, exist_ok=True) + modulename = _fullmodname(filename) + + # If desired, get a list of the line numbers which represent + # executable content (returned as a dict for better lookup speed) + if show_missing: + lnotab = _find_executable_linenos(filename) + else: + lnotab = {} + source = linecache.getlines(filename) + coverpath = os.path.join(dir, modulename + ".cover") + with open(filename, 'rb') as fp: + encoding, _ = tokenize.detect_encoding(fp.readline) + n_hits, n_lines = self.write_results_file(coverpath, source, + lnotab, count, encoding) + if summary and n_lines: + sums[modulename] = n_lines, n_hits, modulename, filename + + if summary and sums: + print("lines cov% module (path)") + for m in sorted(sums): + n_lines, n_hits, modulename, filename = sums[m] + print(f"{n_lines:5d} {n_hits/n_lines:.1%} {modulename} ({filename})") + + if self.outfile: + # try and store counts and module info into self.outfile + try: + with open(self.outfile, 'wb') as f: + pickle.dump((self.counts, self.calledfuncs, self.callers), + f, 1) + except OSError as err: + print("Can't save counts files because %s" % err, file=sys.stderr) + + def write_results_file(self, path, lines, lnotab, lines_hit, encoding=None): + """Return a coverage results file in path.""" + # ``lnotab`` is a dict of executable lines, or a line number "table" + + try: + outfile = open(path, "w", encoding=encoding) + except OSError as err: + print(("trace: Could not open %r for writing: %s " + "- skipping" % (path, err)), file=sys.stderr) + return 0, 0 + + n_lines = 0 + n_hits = 0 + with outfile: + for lineno, line in enumerate(lines, 1): + # do the blank/comment match to try to mark more lines + # (help the reader find stuff that hasn't been covered) + if lineno in lines_hit: + outfile.write("%5d: " % lines_hit[lineno]) + n_hits += 1 + n_lines += 1 + elif lineno in lnotab and not PRAGMA_NOCOVER in line: + # Highlight never-executed lines, unless the line contains + # #pragma: NO COVER + outfile.write(">>>>>> ") + n_lines += 1 + else: + outfile.write(" ") + outfile.write(line.expandtabs(8)) + + return n_hits, n_lines + +def _find_lines_from_code(code, strs): + """Return dict where keys are lines in the line number table.""" + linenos = {} + + for _, lineno in dis.findlinestarts(code): + if lineno not in strs: + linenos[lineno] = 1 + + return linenos + +def _find_lines(code, strs): + """Return lineno dict for all code objects reachable from code.""" + # get all of the lineno information from the code of this scope level + linenos = _find_lines_from_code(code, strs) + + # and check the constants for references to other code objects + for c in code.co_consts: + if inspect.iscode(c): + # find another code object, so recurse into it + linenos.update(_find_lines(c, strs)) + return linenos + +def _find_strings(filename, encoding=None): + """Return a dict of possible docstring positions. + + The dict maps line numbers to strings. There is an entry for + line that contains only a string or a part of a triple-quoted + string. + """ + d = {} + # If the first token is a string, then it's the module docstring. + # Add this special case so that the test in the loop passes. + prev_ttype = token.INDENT + with open(filename, encoding=encoding) as f: + tok = tokenize.generate_tokens(f.readline) + for ttype, tstr, start, end, line in tok: + if ttype == token.STRING: + if prev_ttype == token.INDENT: + sline, scol = start + eline, ecol = end + for i in range(sline, eline + 1): + d[i] = 1 + prev_ttype = ttype + return d + +def _find_executable_linenos(filename): + """Return dict where keys are line numbers in the line number table.""" + try: + with tokenize.open(filename) as f: + prog = f.read() + encoding = f.encoding + except OSError as err: + print(("Not printing coverage data for %r: %s" + % (filename, err)), file=sys.stderr) + return {} + code = compile(prog, filename, "exec") + strs = _find_strings(filename, encoding) + return _find_lines(code, strs) + +class Trace: + def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0, + ignoremods=(), ignoredirs=(), infile=None, outfile=None, + timing=False): + """ + @param count true iff it should count number of times each + line is executed + @param trace true iff it should print out each line that is + being counted + @param countfuncs true iff it should just output a list of + (filename, modulename, funcname,) for functions + that were called at least once; This overrides + 'count' and 'trace' + @param ignoremods a list of the names of modules to ignore + @param ignoredirs a list of the names of directories to ignore + all of the (recursive) contents of + @param infile file from which to read stored counts to be + added into the results + @param outfile file in which to write the results + @param timing true iff timing information be displayed + """ + self.infile = infile + self.outfile = outfile + self.ignore = _Ignore(ignoremods, ignoredirs) + self.counts = {} # keys are (filename, linenumber) + self.pathtobasename = {} # for memoizing os.path.basename + self.donothing = 0 + self.trace = trace + self._calledfuncs = {} + self._callers = {} + self._caller_cache = {} + self.start_time = None + if timing: + self.start_time = _time() + if countcallers: + self.globaltrace = self.globaltrace_trackcallers + elif countfuncs: + self.globaltrace = self.globaltrace_countfuncs + elif trace and count: + self.globaltrace = self.globaltrace_lt + self.localtrace = self.localtrace_trace_and_count + elif trace: + self.globaltrace = self.globaltrace_lt + self.localtrace = self.localtrace_trace + elif count: + self.globaltrace = self.globaltrace_lt + self.localtrace = self.localtrace_count + else: + # Ahem -- do nothing? Okay. + self.donothing = 1 + + def run(self, cmd): + import __main__ + dict = __main__.__dict__ + self.runctx(cmd, dict, dict) + + def runctx(self, cmd, globals=None, locals=None): + if globals is None: globals = {} + if locals is None: locals = {} + if not self.donothing: + threading.settrace(self.globaltrace) + sys.settrace(self.globaltrace) + try: + exec(cmd, globals, locals) + finally: + if not self.donothing: + sys.settrace(None) + threading.settrace(None) + + def runfunc(self, func, /, *args, **kw): + result = None + if not self.donothing: + sys.settrace(self.globaltrace) + try: + result = func(*args, **kw) + finally: + if not self.donothing: + sys.settrace(None) + return result + + def file_module_function_of(self, frame): + code = frame.f_code + filename = code.co_filename + if filename: + modulename = _modname(filename) + else: + modulename = None + + funcname = code.co_name + clsname = None + if code in self._caller_cache: + if self._caller_cache[code] is not None: + clsname = self._caller_cache[code] + else: + self._caller_cache[code] = None + ## use of gc.get_referrers() was suggested by Michael Hudson + # all functions which refer to this code object + funcs = [f for f in gc.get_referrers(code) + if inspect.isfunction(f)] + # require len(func) == 1 to avoid ambiguity caused by calls to + # new.function(): "In the face of ambiguity, refuse the + # temptation to guess." + if len(funcs) == 1: + dicts = [d for d in gc.get_referrers(funcs[0]) + if isinstance(d, dict)] + if len(dicts) == 1: + classes = [c for c in gc.get_referrers(dicts[0]) + if hasattr(c, "__bases__")] + if len(classes) == 1: + # ditto for new.classobj() + clsname = classes[0].__name__ + # cache the result - assumption is that new.* is + # not called later to disturb this relationship + # _caller_cache could be flushed if functions in + # the new module get called. + self._caller_cache[code] = clsname + if clsname is not None: + funcname = "%s.%s" % (clsname, funcname) + + return filename, modulename, funcname + + def globaltrace_trackcallers(self, frame, why, arg): + """Handler for call events. + + Adds information about who called who to the self._callers dict. + """ + if why == 'call': + # XXX Should do a better job of identifying methods + this_func = self.file_module_function_of(frame) + parent_func = self.file_module_function_of(frame.f_back) + self._callers[(parent_func, this_func)] = 1 + + def globaltrace_countfuncs(self, frame, why, arg): + """Handler for call events. + + Adds (filename, modulename, funcname) to the self._calledfuncs dict. + """ + if why == 'call': + this_func = self.file_module_function_of(frame) + self._calledfuncs[this_func] = 1 + + def globaltrace_lt(self, frame, why, arg): + """Handler for call events. + + If the code block being entered is to be ignored, returns 'None', + else returns self.localtrace. + """ + if why == 'call': + code = frame.f_code + filename = frame.f_globals.get('__file__', None) + if filename: + # XXX _modname() doesn't work right for packages, so + # the ignore support won't work right for packages + modulename = _modname(filename) + if modulename is not None: + ignore_it = self.ignore.names(filename, modulename) + if not ignore_it: + if self.trace: + print((" --- modulename: %s, funcname: %s" + % (modulename, code.co_name))) + return self.localtrace + else: + return None + + def localtrace_trace_and_count(self, frame, why, arg): + if why == "line": + # record the file name and line number of every trace + filename = frame.f_code.co_filename + lineno = frame.f_lineno + key = filename, lineno + self.counts[key] = self.counts.get(key, 0) + 1 + + if self.start_time: + print('%.2f' % (_time() - self.start_time), end=' ') + bname = os.path.basename(filename) + line = linecache.getline(filename, lineno) + print("%s(%d)" % (bname, lineno), end='') + if line: + print(": ", line, end='') + else: + print() + return self.localtrace + + def localtrace_trace(self, frame, why, arg): + if why == "line": + # record the file name and line number of every trace + filename = frame.f_code.co_filename + lineno = frame.f_lineno + + if self.start_time: + print('%.2f' % (_time() - self.start_time), end=' ') + bname = os.path.basename(filename) + line = linecache.getline(filename, lineno) + print("%s(%d)" % (bname, lineno), end='') + if line: + print(": ", line, end='') + else: + print() + return self.localtrace + + def localtrace_count(self, frame, why, arg): + if why == "line": + filename = frame.f_code.co_filename + lineno = frame.f_lineno + key = filename, lineno + self.counts[key] = self.counts.get(key, 0) + 1 + return self.localtrace + + def results(self): + return CoverageResults(self.counts, infile=self.infile, + outfile=self.outfile, + calledfuncs=self._calledfuncs, + callers=self._callers) + +def main(): + import argparse + + parser = argparse.ArgumentParser(color=True) + parser.add_argument('--version', action='version', version='trace 2.0') + + grp = parser.add_argument_group('Main options', + 'One of these (or --report) must be given') + + grp.add_argument('-c', '--count', action='store_true', + help='Count the number of times each line is executed and write ' + 'the counts to .cover for each module executed, in ' + 'the module\'s directory. See also --coverdir, --file, ' + '--no-report below.') + grp.add_argument('-t', '--trace', action='store_true', + help='Print each line to sys.stdout before it is executed') + grp.add_argument('-l', '--listfuncs', action='store_true', + help='Keep track of which functions are executed at least once ' + 'and write the results to sys.stdout after the program exits. ' + 'Cannot be specified alongside --trace or --count.') + grp.add_argument('-T', '--trackcalls', action='store_true', + help='Keep track of caller/called pairs and write the results to ' + 'sys.stdout after the program exits.') + + grp = parser.add_argument_group('Modifiers') + + _grp = grp.add_mutually_exclusive_group() + _grp.add_argument('-r', '--report', action='store_true', + help='Generate a report from a counts file; does not execute any ' + 'code. --file must specify the results file to read, which ' + 'must have been created in a previous run with --count ' + '--file=FILE') + _grp.add_argument('-R', '--no-report', action='store_true', + help='Do not generate the coverage report files. ' + 'Useful if you want to accumulate over several runs.') + + grp.add_argument('-f', '--file', + help='File to accumulate counts over several runs') + grp.add_argument('-C', '--coverdir', + help='Directory where the report files go. The coverage report ' + 'for . will be written to file ' + '//.cover') + grp.add_argument('-m', '--missing', action='store_true', + help='Annotate executable lines that were not executed with ' + '">>>>>> "') + grp.add_argument('-s', '--summary', action='store_true', + help='Write a brief summary for each file to sys.stdout. ' + 'Can only be used with --count or --report') + grp.add_argument('-g', '--timing', action='store_true', + help='Prefix each line with the time since the program started. ' + 'Only used while tracing') + + grp = parser.add_argument_group('Filters', + 'Can be specified multiple times') + grp.add_argument('--ignore-module', action='append', default=[], + help='Ignore the given module(s) and its submodules ' + '(if it is a package). Accepts comma separated list of ' + 'module names.') + grp.add_argument('--ignore-dir', action='append', default=[], + help='Ignore files in the given directory ' + '(multiple directories can be joined by os.pathsep).') + + parser.add_argument('--module', action='store_true', default=False, + help='Trace a module. ') + parser.add_argument('progname', nargs='?', + help='file to run as main program') + parser.add_argument('arguments', nargs=argparse.REMAINDER, + help='arguments to the program') + + opts = parser.parse_args() + + if opts.ignore_dir: + _prefix = sysconfig.get_path("stdlib") + _exec_prefix = sysconfig.get_path("platstdlib") + + def parse_ignore_dir(s): + s = os.path.expanduser(os.path.expandvars(s)) + s = s.replace('$prefix', _prefix).replace('$exec_prefix', _exec_prefix) + return os.path.normpath(s) + + opts.ignore_module = [mod.strip() + for i in opts.ignore_module for mod in i.split(',')] + opts.ignore_dir = [parse_ignore_dir(s) + for i in opts.ignore_dir for s in i.split(os.pathsep)] + + if opts.report: + if not opts.file: + parser.error('-r/--report requires -f/--file') + results = CoverageResults(infile=opts.file, outfile=opts.file) + return results.write_results(opts.missing, opts.summary, opts.coverdir) + + if not any([opts.trace, opts.count, opts.listfuncs, opts.trackcalls]): + parser.error('must specify one of --trace, --count, --report, ' + '--listfuncs, or --trackcalls') + + if opts.listfuncs and (opts.count or opts.trace): + parser.error('cannot specify both --listfuncs and (--trace or --count)') + + if opts.summary and not opts.count: + parser.error('--summary can only be used with --count or --report') + + if opts.progname is None: + parser.error('progname is missing: required with the main options') + + t = Trace(opts.count, opts.trace, countfuncs=opts.listfuncs, + countcallers=opts.trackcalls, ignoremods=opts.ignore_module, + ignoredirs=opts.ignore_dir, infile=opts.file, + outfile=opts.file, timing=opts.timing) + try: + if opts.module: + import runpy + module_name = opts.progname + mod_name, mod_spec, code = runpy._get_module_details(module_name) + sys.argv = [code.co_filename, *opts.arguments] + globs = { + '__name__': '__main__', + '__file__': code.co_filename, + '__package__': mod_spec.parent, + '__loader__': mod_spec.loader, + '__spec__': mod_spec, + '__cached__': None, + } + else: + sys.argv = [opts.progname, *opts.arguments] + sys.path[0] = os.path.dirname(opts.progname) + + with io.open_code(opts.progname) as fp: + code = compile(fp.read(), opts.progname, 'exec') + # try to emulate __main__ namespace as much as possible + globs = { + '__file__': opts.progname, + '__name__': '__main__', + '__package__': None, + '__cached__': None, + } + t.runctx(code, globs, globs) + except OSError as err: + sys.exit("Cannot run file %r because: %s" % (sys.argv[0], err)) + except SystemExit: + pass + + results = t.results() + + if not opts.no_report: + results.write_results(opts.missing, opts.summary, opts.coverdir) + +if __name__=='__main__': + main() diff --git a/Python314_4_x64_Template/Lib/traceback.py b/Python314_4_x64_Template/Lib/traceback.py new file mode 100644 index 00000000..79f67b98 --- /dev/null +++ b/Python314_4_x64_Template/Lib/traceback.py @@ -0,0 +1,1745 @@ +"""Extract, format and print information about Python stack traces.""" + +import collections.abc +import itertools +import linecache +import sys +import textwrap +import warnings +import codeop +import keyword +import tokenize +import io +import _colorize + +from contextlib import suppress + +__all__ = ['extract_stack', 'extract_tb', 'format_exception', + 'format_exception_only', 'format_list', 'format_stack', + 'format_tb', 'print_exc', 'format_exc', 'print_exception', + 'print_last', 'print_stack', 'print_tb', 'clear_frames', + 'FrameSummary', 'StackSummary', 'TracebackException', + 'walk_stack', 'walk_tb', 'print_list'] + +# +# Formatting and printing lists of traceback lines. +# + + +def print_list(extracted_list, file=None): + """Print the list of tuples as returned by extract_tb() or + extract_stack() as a formatted stack trace to the given file.""" + if file is None: + file = sys.stderr + for item in StackSummary.from_list(extracted_list).format(): + print(item, file=file, end="") + +def format_list(extracted_list): + """Format a list of tuples or FrameSummary objects for printing. + + Given a list of tuples or FrameSummary objects as returned by + extract_tb() or extract_stack(), return a list of strings ready + for printing. + + Each string in the resulting list corresponds to the item with the + same index in the argument list. Each string ends in a newline; + the strings may contain internal newlines as well, for those items + whose source text line is not None. + """ + return StackSummary.from_list(extracted_list).format() + +# +# Printing and Extracting Tracebacks. +# + +def print_tb(tb, limit=None, file=None): + """Print up to 'limit' stack trace entries from the traceback 'tb'. + + If 'limit' is omitted or None, all entries are printed. If 'file' + is omitted or None, the output goes to sys.stderr; otherwise + 'file' should be an open file or file-like object with a write() + method. + """ + print_list(extract_tb(tb, limit=limit), file=file) + +def format_tb(tb, limit=None): + """A shorthand for 'format_list(extract_tb(tb, limit))'.""" + return extract_tb(tb, limit=limit).format() + +def extract_tb(tb, limit=None): + """ + Return a StackSummary object representing a list of + pre-processed entries from traceback. + + This is useful for alternate formatting of stack traces. If + 'limit' is omitted or None, all entries are extracted. A + pre-processed stack trace entry is a FrameSummary object + containing attributes filename, lineno, name, and line + representing the information that is usually printed for a stack + trace. The line is a string with leading and trailing + whitespace stripped; if the source is not available it is None. + """ + return StackSummary._extract_from_extended_frame_gen( + _walk_tb_with_full_positions(tb), limit=limit) + +# +# Exception formatting and output. +# + +_cause_message = ( + "\nThe above exception was the direct cause " + "of the following exception:\n\n") + +_context_message = ( + "\nDuring handling of the above exception, " + "another exception occurred:\n\n") + + +class _Sentinel: + def __repr__(self): + return "" + +_sentinel = _Sentinel() + +def _parse_value_tb(exc, value, tb): + if (value is _sentinel) != (tb is _sentinel): + raise ValueError("Both or neither of value and tb must be given") + if value is tb is _sentinel: + if exc is not None: + if isinstance(exc, BaseException): + return exc, exc.__traceback__ + + raise TypeError(f'Exception expected for value, ' + f'{type(exc).__name__} found') + else: + return None, None + return value, tb + + +def print_exception(exc, /, value=_sentinel, tb=_sentinel, limit=None, \ + file=None, chain=True, **kwargs): + """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. + + This differs from print_tb() in the following ways: (1) if + traceback is not None, it prints a header "Traceback (most recent + call last):"; (2) it prints the exception type and value after the + stack trace; (3) if type is SyntaxError and value has the + appropriate format, it prints the line where the syntax error + occurred with a caret on the next line indicating the approximate + position of the error. + """ + colorize = kwargs.get("colorize", False) + value, tb = _parse_value_tb(exc, value, tb) + te = TracebackException(type(value), value, tb, limit=limit, compact=True) + te.print(file=file, chain=chain, colorize=colorize) + + +BUILTIN_EXCEPTION_LIMIT = object() + + +def _print_exception_bltin(exc, /): + file = sys.stderr if sys.stderr is not None else sys.__stderr__ + colorize = _colorize.can_colorize(file=file) + return print_exception(exc, limit=BUILTIN_EXCEPTION_LIMIT, file=file, colorize=colorize) + + +def format_exception(exc, /, value=_sentinel, tb=_sentinel, limit=None, \ + chain=True, **kwargs): + """Format a stack trace and the exception information. + + The arguments have the same meaning as the corresponding arguments + to print_exception(). The return value is a list of strings, each + ending in a newline and some containing internal newlines. When + these lines are concatenated and printed, exactly the same text is + printed as does print_exception(). + """ + colorize = kwargs.get("colorize", False) + value, tb = _parse_value_tb(exc, value, tb) + te = TracebackException(type(value), value, tb, limit=limit, compact=True) + return list(te.format(chain=chain, colorize=colorize)) + + +def format_exception_only(exc, /, value=_sentinel, *, show_group=False, **kwargs): + """Format the exception part of a traceback. + + The return value is a list of strings, each ending in a newline. + + The list contains the exception's message, which is + normally a single string; however, for :exc:`SyntaxError` exceptions, it + contains several lines that (when printed) display detailed information + about where the syntax error occurred. Following the message, the list + contains the exception's ``__notes__``. + + When *show_group* is ``True``, and the exception is an instance of + :exc:`BaseExceptionGroup`, the nested exceptions are included as + well, recursively, with indentation relative to their nesting depth. + """ + colorize = kwargs.get("colorize", False) + if value is _sentinel: + value = exc + te = TracebackException(type(value), value, None, compact=True) + return list(te.format_exception_only(show_group=show_group, colorize=colorize)) + + +# -- not official API but folk probably use these two functions. + +def _format_final_exc_line(etype, value, *, insert_final_newline=True, colorize=False): + valuestr = _safe_string(value, 'exception') + end_char = "\n" if insert_final_newline else "" + if colorize: + theme = _colorize.get_theme(force_color=True).traceback + else: + theme = _colorize.get_theme(force_no_color=True).traceback + if value is None or not valuestr: + line = f"{theme.type}{etype}{theme.reset}{end_char}" + else: + line = f"{theme.type}{etype}{theme.reset}: {theme.message}{valuestr}{theme.reset}{end_char}" + return line + + +def _safe_string(value, what, func=str): + try: + return func(value) + except: + return f'<{what} {func.__name__}() failed>' + +# -- + +def print_exc(limit=None, file=None, chain=True): + """Shorthand for 'print_exception(sys.exception(), limit=limit, file=file, chain=chain)'.""" + print_exception(sys.exception(), limit=limit, file=file, chain=chain) + +def format_exc(limit=None, chain=True): + """Like print_exc() but return a string.""" + return "".join(format_exception(sys.exception(), limit=limit, chain=chain)) + +def print_last(limit=None, file=None, chain=True): + """This is a shorthand for 'print_exception(sys.last_exc, limit=limit, file=file, chain=chain)'.""" + if not hasattr(sys, "last_exc") and not hasattr(sys, "last_type"): + raise ValueError("no last exception") + + if hasattr(sys, "last_exc"): + print_exception(sys.last_exc, limit=limit, file=file, chain=chain) + else: + print_exception(sys.last_type, sys.last_value, sys.last_traceback, + limit=limit, file=file, chain=chain) + + +# +# Printing and Extracting Stacks. +# + +def print_stack(f=None, limit=None, file=None): + """Print a stack trace from its invocation point. + + The optional 'f' argument can be used to specify an alternate + stack frame at which to start. The optional 'limit' and 'file' + arguments have the same meaning as for print_exception(). + """ + if f is None: + f = sys._getframe().f_back + print_list(extract_stack(f, limit=limit), file=file) + + +def format_stack(f=None, limit=None): + """Shorthand for 'format_list(extract_stack(f, limit))'.""" + if f is None: + f = sys._getframe().f_back + return format_list(extract_stack(f, limit=limit)) + + +def extract_stack(f=None, limit=None): + """Extract the raw traceback from the current stack frame. + + The return value has the same format as for extract_tb(). The + optional 'f' and 'limit' arguments have the same meaning as for + print_stack(). Each item in the list is a quadruple (filename, + line number, function name, text), and the entries are in order + from oldest to newest stack frame. + """ + if f is None: + f = sys._getframe().f_back + stack = StackSummary.extract(walk_stack(f), limit=limit) + stack.reverse() + return stack + + +def clear_frames(tb): + "Clear all references to local variables in the frames of a traceback." + while tb is not None: + try: + tb.tb_frame.clear() + except RuntimeError: + # Ignore the exception raised if the frame is still executing. + pass + tb = tb.tb_next + + +class FrameSummary: + """Information about a single frame from a traceback. + + - :attr:`filename` The filename for the frame. + - :attr:`lineno` The line within filename for the frame that was + active when the frame was captured. + - :attr:`name` The name of the function or method that was executing + when the frame was captured. + - :attr:`line` The text from the linecache module for the + of code that was running when the frame was captured. + - :attr:`locals` Either None if locals were not supplied, or a dict + mapping the name to the repr() of the variable. + """ + + __slots__ = ('filename', 'lineno', 'end_lineno', 'colno', 'end_colno', + 'name', '_lines', '_lines_dedented', 'locals', '_code') + + def __init__(self, filename, lineno, name, *, lookup_line=True, + locals=None, line=None, + end_lineno=None, colno=None, end_colno=None, **kwargs): + """Construct a FrameSummary. + + :param lookup_line: If True, `linecache` is consulted for the source + code line. Otherwise, the line will be looked up when first needed. + :param locals: If supplied the frame locals, which will be captured as + object representations. + :param line: If provided, use this instead of looking up the line in + the linecache. + """ + self.filename = filename + self.lineno = lineno + self.end_lineno = lineno if end_lineno is None else end_lineno + self.colno = colno + self.end_colno = end_colno + self.name = name + self._code = kwargs.get("_code") + self._lines = line + self._lines_dedented = None + if lookup_line: + self.line + self.locals = {k: _safe_string(v, 'local', func=repr) + for k, v in locals.items()} if locals else None + + def __eq__(self, other): + if isinstance(other, FrameSummary): + return (self.filename == other.filename and + self.lineno == other.lineno and + self.name == other.name and + self.locals == other.locals) + if isinstance(other, tuple): + return (self.filename, self.lineno, self.name, self.line) == other + return NotImplemented + + def __getitem__(self, pos): + return (self.filename, self.lineno, self.name, self.line)[pos] + + def __iter__(self): + return iter([self.filename, self.lineno, self.name, self.line]) + + def __repr__(self): + return "".format( + filename=self.filename, lineno=self.lineno, name=self.name) + + def __len__(self): + return 4 + + def _set_lines(self): + if ( + self._lines is None + and self.lineno is not None + and self.end_lineno is not None + ): + lines = [] + for lineno in range(self.lineno, self.end_lineno + 1): + # treat errors (empty string) and empty lines (newline) as the same + line = linecache.getline(self.filename, lineno).rstrip() + if not line and self._code is not None and self.filename.startswith("<"): + line = linecache._getline_from_code(self._code, lineno).rstrip() + lines.append(line) + self._lines = "\n".join(lines) + "\n" + + @property + def _original_lines(self): + # Returns the line as-is from the source, without modifying whitespace. + self._set_lines() + return self._lines + + @property + def _dedented_lines(self): + # Returns _original_lines, but dedented + self._set_lines() + if self._lines_dedented is None and self._lines is not None: + self._lines_dedented = textwrap.dedent(self._lines) + return self._lines_dedented + + @property + def line(self): + self._set_lines() + if self._lines is None: + return None + # return only the first line, stripped + return self._lines.partition("\n")[0].strip() + + +def walk_stack(f): + """Walk a stack yielding the frame and line number for each frame. + + This will follow f.f_back from the given frame. If no frame is given, the + current stack is used. Usually used with StackSummary.extract. + """ + if f is None: + f = sys._getframe().f_back + + def walk_stack_generator(frame): + while frame is not None: + yield frame, frame.f_lineno + frame = frame.f_back + + return walk_stack_generator(f) + + +def walk_tb(tb): + """Walk a traceback yielding the frame and line number for each frame. + + This will follow tb.tb_next (and thus is in the opposite order to + walk_stack). Usually used with StackSummary.extract. + """ + while tb is not None: + yield tb.tb_frame, tb.tb_lineno + tb = tb.tb_next + + +def _walk_tb_with_full_positions(tb): + # Internal version of walk_tb that yields full code positions including + # end line and column information. + while tb is not None: + positions = _get_code_position(tb.tb_frame.f_code, tb.tb_lasti) + # Yield tb_lineno when co_positions does not have a line number to + # maintain behavior with walk_tb. + if positions[0] is None: + yield tb.tb_frame, (tb.tb_lineno, ) + positions[1:] + else: + yield tb.tb_frame, positions + tb = tb.tb_next + + +def _get_code_position(code, instruction_index): + if instruction_index < 0: + return (None, None, None, None) + positions_gen = code.co_positions() + return next(itertools.islice(positions_gen, instruction_index // 2, None)) + + +_RECURSIVE_CUTOFF = 3 # Also hardcoded in traceback.c. + + +class StackSummary(list): + """A list of FrameSummary objects, representing a stack of frames.""" + + @classmethod + def extract(klass, frame_gen, *, limit=None, lookup_lines=True, + capture_locals=False): + """Create a StackSummary from a traceback or stack object. + + :param frame_gen: A generator that yields (frame, lineno) tuples + whose summaries are to be included in the stack. + :param limit: None to include all frames or the number of frames to + include. + :param lookup_lines: If True, lookup lines for each frame immediately, + otherwise lookup is deferred until the frame is rendered. + :param capture_locals: If True, the local variables from each frame will + be captured as object representations into the FrameSummary. + """ + def extended_frame_gen(): + for f, lineno in frame_gen: + yield f, (lineno, None, None, None) + + return klass._extract_from_extended_frame_gen( + extended_frame_gen(), limit=limit, lookup_lines=lookup_lines, + capture_locals=capture_locals) + + @classmethod + def _extract_from_extended_frame_gen(klass, frame_gen, *, limit=None, + lookup_lines=True, capture_locals=False): + # Same as extract but operates on a frame generator that yields + # (frame, (lineno, end_lineno, colno, end_colno)) in the stack. + # Only lineno is required, the remaining fields can be None if the + # information is not available. + builtin_limit = limit is BUILTIN_EXCEPTION_LIMIT + if limit is None or builtin_limit: + limit = getattr(sys, 'tracebacklimit', None) + if limit is not None and limit < 0: + limit = 0 + if limit is not None: + if builtin_limit: + frame_gen = tuple(frame_gen) + frame_gen = frame_gen[len(frame_gen) - limit:] + elif limit >= 0: + frame_gen = itertools.islice(frame_gen, limit) + else: + frame_gen = collections.deque(frame_gen, maxlen=-limit) + + result = klass() + fnames = set() + for f, (lineno, end_lineno, colno, end_colno) in frame_gen: + co = f.f_code + filename = co.co_filename + name = co.co_name + fnames.add(filename) + linecache.lazycache(filename, f.f_globals) + # Must defer line lookups until we have called checkcache. + if capture_locals: + f_locals = f.f_locals + else: + f_locals = None + result.append( + FrameSummary(filename, lineno, name, + lookup_line=False, locals=f_locals, + end_lineno=end_lineno, colno=colno, end_colno=end_colno, + _code=f.f_code, + ) + ) + for filename in fnames: + linecache.checkcache(filename) + + # If immediate lookup was desired, trigger lookups now. + if lookup_lines: + for f in result: + f.line + return result + + @classmethod + def from_list(klass, a_list): + """ + Create a StackSummary object from a supplied list of + FrameSummary objects or old-style list of tuples. + """ + # While doing a fast-path check for isinstance(a_list, StackSummary) is + # appealing, idlelib.run.cleanup_traceback and other similar code may + # break this by making arbitrary frames plain tuples, so we need to + # check on a frame by frame basis. + result = StackSummary() + for frame in a_list: + if isinstance(frame, FrameSummary): + result.append(frame) + else: + filename, lineno, name, line = frame + result.append(FrameSummary(filename, lineno, name, line=line)) + return result + + def format_frame_summary(self, frame_summary, **kwargs): + """Format the lines for a single FrameSummary. + + Returns a string representing one frame involved in the stack. This + gets called for every frame to be printed in the stack summary. + """ + colorize = kwargs.get("colorize", False) + row = [] + filename = frame_summary.filename + if frame_summary.filename.startswith("'): + filename = "" + if colorize: + theme = _colorize.get_theme(force_color=True).traceback + else: + theme = _colorize.get_theme(force_no_color=True).traceback + row.append( + ' File {}"{}"{}, line {}{}{}, in {}{}{}\n'.format( + theme.filename, + filename, + theme.reset, + theme.line_no, + frame_summary.lineno, + theme.reset, + theme.frame, + frame_summary.name, + theme.reset, + ) + ) + if frame_summary._dedented_lines and frame_summary._dedented_lines.strip(): + if ( + frame_summary.colno is None or + frame_summary.end_colno is None + ): + # only output first line if column information is missing + row.append(textwrap.indent(frame_summary.line, ' ') + "\n") + else: + # get first and last line + all_lines_original = frame_summary._original_lines.splitlines() + first_line = all_lines_original[0] + # assume all_lines_original has enough lines (since we constructed it) + last_line = all_lines_original[frame_summary.end_lineno - frame_summary.lineno] + + # character index of the start/end of the instruction + start_offset = _byte_offset_to_character_offset(first_line, frame_summary.colno) + end_offset = _byte_offset_to_character_offset(last_line, frame_summary.end_colno) + + all_lines = frame_summary._dedented_lines.splitlines()[ + :frame_summary.end_lineno - frame_summary.lineno + 1 + ] + + # adjust start/end offset based on dedent + dedent_characters = len(first_line) - len(all_lines[0]) + start_offset = max(0, start_offset - dedent_characters) + end_offset = max(0, end_offset - dedent_characters) + + # When showing this on a terminal, some of the non-ASCII characters + # might be rendered as double-width characters, so we need to take + # that into account when calculating the length of the line. + dp_start_offset = _display_width(all_lines[0], offset=start_offset) + dp_end_offset = _display_width(all_lines[-1], offset=end_offset) + + # get exact code segment corresponding to the instruction + segment = "\n".join(all_lines) + segment = segment[start_offset:len(segment) - (len(all_lines[-1]) - end_offset)] + + # attempt to parse for anchors + anchors = None + show_carets = False + with suppress(Exception): + anchors = _extract_caret_anchors_from_line_segment(segment) + show_carets = self._should_show_carets(start_offset, end_offset, all_lines, anchors) + + result = [] + + # only display first line, last line, and lines around anchor start/end + significant_lines = {0, len(all_lines) - 1} + + anchors_left_end_offset = 0 + anchors_right_start_offset = 0 + primary_char = "^" + secondary_char = "^" + if anchors: + anchors_left_end_offset = anchors.left_end_offset + anchors_right_start_offset = anchors.right_start_offset + # computed anchor positions do not take start_offset into account, + # so account for it here + if anchors.left_end_lineno == 0: + anchors_left_end_offset += start_offset + if anchors.right_start_lineno == 0: + anchors_right_start_offset += start_offset + + # account for display width + anchors_left_end_offset = _display_width( + all_lines[anchors.left_end_lineno], offset=anchors_left_end_offset + ) + anchors_right_start_offset = _display_width( + all_lines[anchors.right_start_lineno], offset=anchors_right_start_offset + ) + + primary_char = anchors.primary_char + secondary_char = anchors.secondary_char + significant_lines.update( + range(anchors.left_end_lineno - 1, anchors.left_end_lineno + 2) + ) + significant_lines.update( + range(anchors.right_start_lineno - 1, anchors.right_start_lineno + 2) + ) + + # remove bad line numbers + significant_lines.discard(-1) + significant_lines.discard(len(all_lines)) + + def output_line(lineno): + """output all_lines[lineno] along with carets""" + result.append(all_lines[lineno] + "\n") + if not show_carets: + return + num_spaces = len(all_lines[lineno]) - len(all_lines[lineno].lstrip()) + carets = [] + num_carets = dp_end_offset if lineno == len(all_lines) - 1 else _display_width(all_lines[lineno]) + # compute caret character for each position + for col in range(num_carets): + if col < num_spaces or (lineno == 0 and col < dp_start_offset): + # before first non-ws char of the line, or before start of instruction + carets.append(' ') + elif anchors and ( + lineno > anchors.left_end_lineno or + (lineno == anchors.left_end_lineno and col >= anchors_left_end_offset) + ) and ( + lineno < anchors.right_start_lineno or + (lineno == anchors.right_start_lineno and col < anchors_right_start_offset) + ): + # within anchors + carets.append(secondary_char) + else: + carets.append(primary_char) + if colorize: + # Replace the previous line with a red version of it only in the parts covered + # by the carets. + line = result[-1] + colorized_line_parts = [] + colorized_carets_parts = [] + + for color, group in itertools.groupby(itertools.zip_longest(line, carets, fillvalue=""), key=lambda x: x[1]): + caret_group = list(group) + if color == "^": + colorized_line_parts.append(theme.error_highlight + "".join(char for char, _ in caret_group) + theme.reset) + colorized_carets_parts.append(theme.error_highlight + "".join(caret for _, caret in caret_group) + theme.reset) + elif color == "~": + colorized_line_parts.append(theme.error_range + "".join(char for char, _ in caret_group) + theme.reset) + colorized_carets_parts.append(theme.error_range + "".join(caret for _, caret in caret_group) + theme.reset) + else: + colorized_line_parts.append("".join(char for char, _ in caret_group)) + colorized_carets_parts.append("".join(caret for _, caret in caret_group)) + + colorized_line = "".join(colorized_line_parts) + colorized_carets = "".join(colorized_carets_parts) + result[-1] = colorized_line + result.append(colorized_carets + "\n") + else: + result.append("".join(carets) + "\n") + + # display significant lines + sig_lines_list = sorted(significant_lines) + for i, lineno in enumerate(sig_lines_list): + if i: + linediff = lineno - sig_lines_list[i - 1] + if linediff == 2: + # 1 line in between - just output it + output_line(lineno - 1) + elif linediff > 2: + # > 1 line in between - abbreviate + result.append(f"...<{linediff - 1} lines>...\n") + output_line(lineno) + + row.append( + textwrap.indent(textwrap.dedent("".join(result)), ' ', lambda line: True) + ) + if frame_summary.locals: + for name, value in sorted(frame_summary.locals.items()): + row.append(' {name} = {value}\n'.format(name=name, value=value)) + + return ''.join(row) + + def _should_show_carets(self, start_offset, end_offset, all_lines, anchors): + with suppress(SyntaxError, ImportError): + import ast + tree = ast.parse('\n'.join(all_lines)) + if not tree.body: + return False + statement = tree.body[0] + value = None + def _spawns_full_line(value): + return ( + value.lineno == 1 + and value.end_lineno == len(all_lines) + and value.col_offset == start_offset + and value.end_col_offset == end_offset + ) + match statement: + case ast.Return(value=ast.Call()): + if isinstance(statement.value.func, ast.Name): + value = statement.value + case ast.Assign(value=ast.Call()): + if ( + len(statement.targets) == 1 and + isinstance(statement.targets[0], ast.Name) + ): + value = statement.value + if value is not None and _spawns_full_line(value): + return False + if anchors: + return True + if all_lines[0][:start_offset].lstrip() or all_lines[-1][end_offset:].rstrip(): + return True + return False + + def format(self, **kwargs): + """Format the stack ready for printing. + + Returns a list of strings ready for printing. Each string in the + resulting list corresponds to a single frame from the stack. + Each string ends in a newline; the strings may contain internal + newlines as well, for those items with source text lines. + + For long sequences of the same frame and line, the first few + repetitions are shown, followed by a summary line stating the exact + number of further repetitions. + """ + colorize = kwargs.get("colorize", False) + result = [] + last_file = None + last_line = None + last_name = None + count = 0 + for frame_summary in self: + formatted_frame = self.format_frame_summary(frame_summary, colorize=colorize) + if formatted_frame is None: + continue + if (last_file is None or last_file != frame_summary.filename or + last_line is None or last_line != frame_summary.lineno or + last_name is None or last_name != frame_summary.name): + if count > _RECURSIVE_CUTOFF: + count -= _RECURSIVE_CUTOFF + result.append( + f' [Previous line repeated {count} more ' + f'time{"s" if count > 1 else ""}]\n' + ) + last_file = frame_summary.filename + last_line = frame_summary.lineno + last_name = frame_summary.name + count = 0 + count += 1 + if count > _RECURSIVE_CUTOFF: + continue + result.append(formatted_frame) + + if count > _RECURSIVE_CUTOFF: + count -= _RECURSIVE_CUTOFF + result.append( + f' [Previous line repeated {count} more ' + f'time{"s" if count > 1 else ""}]\n' + ) + return result + + +def _byte_offset_to_character_offset(str, offset): + as_utf8 = str.encode('utf-8') + return len(as_utf8[:offset].decode("utf-8", errors="replace")) + + +_Anchors = collections.namedtuple( + "_Anchors", + [ + "left_end_lineno", + "left_end_offset", + "right_start_lineno", + "right_start_offset", + "primary_char", + "secondary_char", + ], + defaults=["~", "^"] +) + +def _extract_caret_anchors_from_line_segment(segment): + """ + Given source code `segment` corresponding to a FrameSummary, determine: + - for binary ops, the location of the binary op + - for indexing and function calls, the location of the brackets. + `segment` is expected to be a valid Python expression. + """ + import ast + + try: + # Without parentheses, `segment` is parsed as a statement. + # Binary ops, subscripts, and calls are expressions, so + # we can wrap them with parentheses to parse them as + # (possibly multi-line) expressions. + # e.g. if we try to highlight the addition in + # x = ( + # a + + # b + # ) + # then we would ast.parse + # a + + # b + # which is not a valid statement because of the newline. + # Adding brackets makes it a valid expression. + # ( + # a + + # b + # ) + # Line locations will be different than the original, + # which is taken into account later on. + tree = ast.parse(f"(\n{segment}\n)") + except SyntaxError: + return None + + if len(tree.body) != 1: + return None + + lines = segment.splitlines() + + def normalize(lineno, offset): + """Get character index given byte offset""" + return _byte_offset_to_character_offset(lines[lineno], offset) + + def next_valid_char(lineno, col): + """Gets the next valid character index in `lines`, if + the current location is not valid. Handles empty lines. + """ + while lineno < len(lines) and col >= len(lines[lineno]): + col = 0 + lineno += 1 + assert lineno < len(lines) and col < len(lines[lineno]) + return lineno, col + + def increment(lineno, col): + """Get the next valid character index in `lines`.""" + col += 1 + lineno, col = next_valid_char(lineno, col) + return lineno, col + + def nextline(lineno, col): + """Get the next valid character at least on the next line""" + col = 0 + lineno += 1 + lineno, col = next_valid_char(lineno, col) + return lineno, col + + def increment_until(lineno, col, stop): + """Get the next valid non-"\\#" character that satisfies the `stop` predicate""" + while True: + ch = lines[lineno][col] + if ch in "\\#": + lineno, col = nextline(lineno, col) + elif not stop(ch): + lineno, col = increment(lineno, col) + else: + break + return lineno, col + + def setup_positions(expr, force_valid=True): + """Get the lineno/col position of the end of `expr`. If `force_valid` is True, + forces the position to be a valid character (e.g. if the position is beyond the + end of the line, move to the next line) + """ + # -2 since end_lineno is 1-indexed and because we added an extra + # bracket + newline to `segment` when calling ast.parse + lineno = expr.end_lineno - 2 + col = normalize(lineno, expr.end_col_offset) + return next_valid_char(lineno, col) if force_valid else (lineno, col) + + statement = tree.body[0] + match statement: + case ast.Expr(expr): + match expr: + case ast.BinOp(): + # ast gives these locations for BinOp subexpressions + # ( left_expr ) + ( right_expr ) + # left^^^^^ right^^^^^ + lineno, col = setup_positions(expr.left) + + # First operator character is the first non-space/')' character + lineno, col = increment_until(lineno, col, lambda x: not x.isspace() and x != ')') + + # binary op is 1 or 2 characters long, on the same line, + # before the right subexpression + right_col = col + 1 + if ( + right_col < len(lines[lineno]) + and ( + # operator char should not be in the right subexpression + expr.right.lineno - 2 > lineno or + right_col < normalize(expr.right.lineno - 2, expr.right.col_offset) + ) + and not (ch := lines[lineno][right_col]).isspace() + and ch not in "\\#" + ): + right_col += 1 + + # right_col can be invalid since it is exclusive + return _Anchors(lineno, col, lineno, right_col) + case ast.Subscript(): + # ast gives these locations for value and slice subexpressions + # ( value_expr ) [ slice_expr ] + # value^^^^^ slice^^^^^ + # subscript^^^^^^^^^^^^^^^^^^^^ + + # find left bracket + left_lineno, left_col = setup_positions(expr.value) + left_lineno, left_col = increment_until(left_lineno, left_col, lambda x: x == '[') + # find right bracket (final character of expression) + right_lineno, right_col = setup_positions(expr, force_valid=False) + return _Anchors(left_lineno, left_col, right_lineno, right_col) + case ast.Call(): + # ast gives these locations for function call expressions + # ( func_expr ) (args, kwargs) + # func^^^^^ + # call^^^^^^^^^^^^^^^^^^^^^^^^ + + # find left bracket + left_lineno, left_col = setup_positions(expr.func) + left_lineno, left_col = increment_until(left_lineno, left_col, lambda x: x == '(') + # find right bracket (final character of expression) + right_lineno, right_col = setup_positions(expr, force_valid=False) + return _Anchors(left_lineno, left_col, right_lineno, right_col) + + return None + +_WIDE_CHAR_SPECIFIERS = "WF" + +def _display_width(line, offset=None): + """Calculate the extra amount of width space the given source + code segment might take if it were to be displayed on a fixed + width output device. Supports wide unicode characters and emojis.""" + + if offset is None: + offset = len(line) + + # Fast track for ASCII-only strings + if line.isascii(): + return offset + + import unicodedata + + return sum( + 2 if unicodedata.east_asian_width(char) in _WIDE_CHAR_SPECIFIERS else 1 + for char in line[:offset] + ) + + + +class _ExceptionPrintContext: + def __init__(self): + self.seen = set() + self.exception_group_depth = 0 + self.need_close = False + + def indent(self): + return ' ' * (2 * self.exception_group_depth) + + def emit(self, text_gen, margin_char=None): + if margin_char is None: + margin_char = '|' + indent_str = self.indent() + if self.exception_group_depth: + indent_str += margin_char + ' ' + + if isinstance(text_gen, str): + yield textwrap.indent(text_gen, indent_str, lambda line: True) + else: + for text in text_gen: + yield textwrap.indent(text, indent_str, lambda line: True) + + +class TracebackException: + """An exception ready for rendering. + + The traceback module captures enough attributes from the original exception + to this intermediary form to ensure that no references are held, while + still being able to fully print or format it. + + max_group_width and max_group_depth control the formatting of exception + groups. The depth refers to the nesting level of the group, and the width + refers to the size of a single exception group's exceptions array. The + formatted output is truncated when either limit is exceeded. + + Use `from_exception` to create TracebackException instances from exception + objects, or the constructor to create TracebackException instances from + individual components. + + - :attr:`__cause__` A TracebackException of the original *__cause__*. + - :attr:`__context__` A TracebackException of the original *__context__*. + - :attr:`exceptions` For exception groups - a list of TracebackException + instances for the nested *exceptions*. ``None`` for other exceptions. + - :attr:`__suppress_context__` The *__suppress_context__* value from the + original exception. + - :attr:`stack` A `StackSummary` representing the traceback. + - :attr:`exc_type` (deprecated) The class of the original traceback. + - :attr:`exc_type_str` String display of exc_type + - :attr:`filename` For syntax errors - the filename where the error + occurred. + - :attr:`lineno` For syntax errors - the linenumber where the error + occurred. + - :attr:`end_lineno` For syntax errors - the end linenumber where the error + occurred. Can be `None` if not present. + - :attr:`text` For syntax errors - the text where the error + occurred. + - :attr:`offset` For syntax errors - the offset into the text where the + error occurred. + - :attr:`end_offset` For syntax errors - the end offset into the text where + the error occurred. Can be `None` if not present. + - :attr:`msg` For syntax errors - the compiler error message. + """ + + def __init__(self, exc_type, exc_value, exc_traceback, *, limit=None, + lookup_lines=True, capture_locals=False, compact=False, + max_group_width=15, max_group_depth=10, save_exc_type=True, _seen=None): + # NB: we need to accept exc_traceback, exc_value, exc_traceback to + # permit backwards compat with the existing API, otherwise we + # need stub thunk objects just to glue it together. + # Handle loops in __cause__ or __context__. + is_recursive_call = _seen is not None + if _seen is None: + _seen = set() + _seen.add(id(exc_value)) + + self.max_group_width = max_group_width + self.max_group_depth = max_group_depth + + self.stack = StackSummary._extract_from_extended_frame_gen( + _walk_tb_with_full_positions(exc_traceback), + limit=limit, lookup_lines=lookup_lines, + capture_locals=capture_locals) + + self._exc_type = exc_type if save_exc_type else None + + # Capture now to permit freeing resources: only complication is in the + # unofficial API _format_final_exc_line + self._str = _safe_string(exc_value, 'exception') + try: + self.__notes__ = getattr(exc_value, '__notes__', None) + except Exception as e: + self.__notes__ = [ + f'Ignored error getting __notes__: {_safe_string(e, '__notes__', repr)}'] + + self._is_syntax_error = False + self._have_exc_type = exc_type is not None + if exc_type is not None: + self.exc_type_qualname = exc_type.__qualname__ + self.exc_type_module = exc_type.__module__ + else: + self.exc_type_qualname = None + self.exc_type_module = None + + if exc_type and issubclass(exc_type, SyntaxError): + # Handle SyntaxError's specially + self.filename = exc_value.filename + lno = exc_value.lineno + self.lineno = str(lno) if lno is not None else None + end_lno = exc_value.end_lineno + self.end_lineno = str(end_lno) if end_lno is not None else None + self.text = exc_value.text + self.offset = exc_value.offset + self.end_offset = exc_value.end_offset + self.msg = exc_value.msg + self._is_syntax_error = True + self._exc_metadata = getattr(exc_value, "_metadata", None) + elif exc_type and issubclass(exc_type, ImportError) and \ + getattr(exc_value, "name_from", None) is not None: + wrong_name = getattr(exc_value, "name_from", None) + suggestion = _compute_suggestion_error(exc_value, exc_traceback, wrong_name) + if suggestion: + self._str += f". Did you mean: '{suggestion}'?" + elif exc_type and issubclass(exc_type, (NameError, AttributeError)) and \ + getattr(exc_value, "name", None) is not None: + wrong_name = getattr(exc_value, "name", None) + suggestion = _compute_suggestion_error(exc_value, exc_traceback, wrong_name) + if suggestion: + self._str += f". Did you mean: '{suggestion}'?" + if issubclass(exc_type, NameError): + wrong_name = getattr(exc_value, "name", None) + if wrong_name is not None and wrong_name in sys.stdlib_module_names: + if suggestion: + self._str += f" Or did you forget to import '{wrong_name}'?" + else: + self._str += f". Did you forget to import '{wrong_name}'?" + if lookup_lines: + self._load_lines() + self.__suppress_context__ = \ + exc_value.__suppress_context__ if exc_value is not None else False + + # Convert __cause__ and __context__ to `TracebackExceptions`s, use a + # queue to avoid recursion (only the top-level call gets _seen == None) + if not is_recursive_call: + queue = [(self, exc_value)] + while queue: + te, e = queue.pop() + if (e is not None and e.__cause__ is not None + and id(e.__cause__) not in _seen): + cause = TracebackException( + type(e.__cause__), + e.__cause__, + e.__cause__.__traceback__, + limit=limit, + lookup_lines=lookup_lines, + capture_locals=capture_locals, + max_group_width=max_group_width, + max_group_depth=max_group_depth, + _seen=_seen) + else: + cause = None + + if compact: + need_context = (cause is None and + e is not None and + not e.__suppress_context__) + else: + need_context = True + if (e is not None and e.__context__ is not None + and need_context and id(e.__context__) not in _seen): + context = TracebackException( + type(e.__context__), + e.__context__, + e.__context__.__traceback__, + limit=limit, + lookup_lines=lookup_lines, + capture_locals=capture_locals, + max_group_width=max_group_width, + max_group_depth=max_group_depth, + _seen=_seen) + else: + context = None + + if e is not None and isinstance(e, BaseExceptionGroup): + exceptions = [] + for exc in e.exceptions: + texc = TracebackException( + type(exc), + exc, + exc.__traceback__, + limit=limit, + lookup_lines=lookup_lines, + capture_locals=capture_locals, + max_group_width=max_group_width, + max_group_depth=max_group_depth, + _seen=_seen) + exceptions.append(texc) + else: + exceptions = None + + te.__cause__ = cause + te.__context__ = context + te.exceptions = exceptions + if cause: + queue.append((te.__cause__, e.__cause__)) + if context: + queue.append((te.__context__, e.__context__)) + if exceptions: + queue.extend(zip(te.exceptions, e.exceptions)) + + @classmethod + def from_exception(cls, exc, *args, **kwargs): + """Create a TracebackException from an exception.""" + return cls(type(exc), exc, exc.__traceback__, *args, **kwargs) + + @property + def exc_type(self): + warnings.warn('Deprecated in 3.13. Use exc_type_str instead.', + DeprecationWarning, stacklevel=2) + return self._exc_type + + @property + def exc_type_str(self): + if not self._have_exc_type: + return None + stype = self.exc_type_qualname + smod = self.exc_type_module + if smod not in ("__main__", "builtins"): + if not isinstance(smod, str): + smod = "" + stype = smod + '.' + stype + return stype + + def _load_lines(self): + """Private API. force all lines in the stack to be loaded.""" + for frame in self.stack: + frame.line + + def __eq__(self, other): + if isinstance(other, TracebackException): + return self.__dict__ == other.__dict__ + return NotImplemented + + def __str__(self): + return self._str + + def format_exception_only(self, *, show_group=False, _depth=0, **kwargs): + """Format the exception part of the traceback. + + The return value is a generator of strings, each ending in a newline. + + Generator yields the exception message. + For :exc:`SyntaxError` exceptions, it + also yields (before the exception message) + several lines that (when printed) + display detailed information about where the syntax error occurred. + Following the message, generator also yields + all the exception's ``__notes__``. + + When *show_group* is ``True``, and the exception is an instance of + :exc:`BaseExceptionGroup`, the nested exceptions are included as + well, recursively, with indentation relative to their nesting depth. + """ + colorize = kwargs.get("colorize", False) + + indent = 3 * _depth * ' ' + if not self._have_exc_type: + yield indent + _format_final_exc_line(None, self._str, colorize=colorize) + return + + stype = self.exc_type_str + if not self._is_syntax_error: + if _depth > 0: + # Nested exceptions needs correct handling of multiline messages. + formatted = _format_final_exc_line( + stype, self._str, insert_final_newline=False, colorize=colorize + ).split('\n') + yield from [ + indent + l + '\n' + for l in formatted + ] + else: + yield _format_final_exc_line(stype, self._str, colorize=colorize) + else: + yield from [indent + l for l in self._format_syntax_error(stype, colorize=colorize)] + + if ( + isinstance(self.__notes__, collections.abc.Sequence) + and not isinstance(self.__notes__, (str, bytes)) + ): + for note in self.__notes__: + note = _safe_string(note, 'note') + yield from [indent + l + '\n' for l in note.split('\n')] + elif self.__notes__ is not None: + yield indent + "{}\n".format(_safe_string(self.__notes__, '__notes__', func=repr)) + + if self.exceptions and show_group: + for ex in self.exceptions: + yield from ex.format_exception_only(show_group=show_group, _depth=_depth+1, colorize=colorize) + + def _find_keyword_typos(self): + assert self._is_syntax_error + try: + import _suggestions + except ImportError: + _suggestions = None + + # Only try to find keyword typos if there is no custom message + if self.msg != "invalid syntax" and "Perhaps you forgot a comma" not in self.msg: + return + + if not self._exc_metadata: + return + + line, offset, source = self._exc_metadata + end_line = int(self.lineno) if self.lineno is not None else 0 + lines = None + from_filename = False + + if source is None: + if self.filename: + try: + with open(self.filename) as f: + lines = f.read().splitlines() + except Exception: + line, end_line, offset = 0,1,0 + else: + from_filename = True + lines = lines if lines is not None else self.text.splitlines() + else: + lines = source.splitlines() + + error_code = lines[line -1 if line > 0 else 0:end_line] + error_code = textwrap.dedent('\n'.join(error_code)) + + # Do not continue if the source is too large + if len(error_code) > 1024: + return + + error_lines = error_code.splitlines() + tokens = tokenize.generate_tokens(io.StringIO(error_code).readline) + tokens_left_to_process = 10 + import difflib + for token in tokens: + start, end = token.start, token.end + if token.type != tokenize.NAME: + continue + # Only consider NAME tokens on the same line as the error + the_end = end_line if line == 0 else end_line + 1 + if from_filename and token.start[0]+line != the_end: + continue + wrong_name = token.string + if wrong_name in keyword.kwlist: + continue + + # Limit the number of valid tokens to consider to not spend + # to much time in this function + tokens_left_to_process -= 1 + if tokens_left_to_process < 0: + break + # Limit the number of possible matches to try + max_matches = 3 + matches = [] + if _suggestions is not None: + suggestion = _suggestions._generate_suggestions(keyword.kwlist, wrong_name) + if suggestion: + matches.append(suggestion) + matches.extend(difflib.get_close_matches(wrong_name, keyword.kwlist, n=max_matches, cutoff=0.5)) + matches = matches[:max_matches] + for suggestion in matches: + if not suggestion or suggestion == wrong_name: + continue + # Try to replace the token with the keyword + the_lines = error_lines.copy() + the_line = the_lines[start[0] - 1][:] + chars = list(the_line) + chars[token.start[1]:token.end[1]] = suggestion + the_lines[start[0] - 1] = ''.join(chars) + code = '\n'.join(the_lines) + + # Check if it works + try: + codeop.compile_command(code, symbol="exec", flags=codeop.PyCF_ONLY_AST) + except SyntaxError: + continue + + # Keep token.line but handle offsets correctly + self.text = token.line + self.offset = token.start[1] + 1 + self.end_offset = token.end[1] + 1 + self.lineno = start[0] + self.end_lineno = end[0] + self.msg = f"invalid syntax. Did you mean '{suggestion}'?" + return + + + def _format_syntax_error(self, stype, **kwargs): + """Format SyntaxError exceptions (internal helper).""" + # Show exactly where the problem was found. + colorize = kwargs.get("colorize", False) + if colorize: + theme = _colorize.get_theme(force_color=True).traceback + else: + theme = _colorize.get_theme(force_no_color=True).traceback + filename_suffix = '' + if self.lineno is not None: + yield ' File {}"{}"{}, line {}{}{}\n'.format( + theme.filename, + self.filename or "", + theme.reset, + theme.line_no, + self.lineno, + theme.reset, + ) + elif self.filename is not None: + filename_suffix = ' ({})'.format(self.filename) + + text = self.text + if isinstance(text, str): + # text = " foo\n" + # rtext = " foo" + # ltext = "foo" + with suppress(Exception): + self._find_keyword_typos() + text = self.text + rtext = text.rstrip('\n') + ltext = rtext.lstrip(' \n\f') + spaces = len(rtext) - len(ltext) + if self.offset is None: + yield ' {}\n'.format(ltext) + elif isinstance(self.offset, int): + offset = self.offset + if self.lineno == self.end_lineno: + end_offset = ( + self.end_offset + if ( + isinstance(self.end_offset, int) + and self.end_offset != 0 + ) + else offset + ) + else: + end_offset = len(rtext) + 1 + + if self.text and offset > len(self.text): + offset = len(rtext) + 1 + if self.text and end_offset > len(self.text): + end_offset = len(rtext) + 1 + if offset >= end_offset or end_offset < 0: + end_offset = offset + 1 + + # Convert 1-based column offset to 0-based index into stripped text + colno = offset - 1 - spaces + end_colno = end_offset - 1 - spaces + caretspace = ' ' + if colno >= 0: + # non-space whitespace (likes tabs) must be kept for alignment + caretspace = ((c if c.isspace() else ' ') for c in ltext[:colno]) + start_color = end_color = "" + if colorize: + # colorize from colno to end_colno + ltext = ( + ltext[:colno] + + theme.error_highlight + ltext[colno:end_colno] + theme.reset + + ltext[end_colno:] + ) + start_color = theme.error_highlight + end_color = theme.reset + yield ' {}\n'.format(ltext) + yield ' {}{}{}{}\n'.format( + "".join(caretspace), + start_color, + ('^' * (end_colno - colno)), + end_color, + ) + else: + yield ' {}\n'.format(ltext) + msg = self.msg or "" + yield "{}{}{}: {}{}{}{}\n".format( + theme.type, + stype, + theme.reset, + theme.message, + msg, + theme.reset, + filename_suffix, + ) + + def format(self, *, chain=True, _ctx=None, **kwargs): + """Format the exception. + + If chain is not *True*, *__cause__* and *__context__* will not be formatted. + + The return value is a generator of strings, each ending in a newline and + some containing internal newlines. `print_exception` is a wrapper around + this method which just prints the lines to a file. + + The message indicating which exception occurred is always the last + string in the output. + """ + colorize = kwargs.get("colorize", False) + if _ctx is None: + _ctx = _ExceptionPrintContext() + + output = [] + exc = self + if chain: + while exc: + if exc.__cause__ is not None: + chained_msg = _cause_message + chained_exc = exc.__cause__ + elif (exc.__context__ is not None and + not exc.__suppress_context__): + chained_msg = _context_message + chained_exc = exc.__context__ + else: + chained_msg = None + chained_exc = None + + output.append((chained_msg, exc)) + exc = chained_exc + else: + output.append((None, exc)) + + for msg, exc in reversed(output): + if msg is not None: + yield from _ctx.emit(msg) + if exc.exceptions is None: + if exc.stack: + yield from _ctx.emit('Traceback (most recent call last):\n') + yield from _ctx.emit(exc.stack.format(colorize=colorize)) + yield from _ctx.emit(exc.format_exception_only(colorize=colorize)) + elif _ctx.exception_group_depth > self.max_group_depth: + # exception group, but depth exceeds limit + yield from _ctx.emit( + f"... (max_group_depth is {self.max_group_depth})\n") + else: + # format exception group + is_toplevel = (_ctx.exception_group_depth == 0) + if is_toplevel: + _ctx.exception_group_depth += 1 + + if exc.stack: + yield from _ctx.emit( + 'Exception Group Traceback (most recent call last):\n', + margin_char = '+' if is_toplevel else None) + yield from _ctx.emit(exc.stack.format(colorize=colorize)) + + yield from _ctx.emit(exc.format_exception_only(colorize=colorize)) + num_excs = len(exc.exceptions) + if num_excs <= self.max_group_width: + n = num_excs + else: + n = self.max_group_width + 1 + _ctx.need_close = False + for i in range(n): + last_exc = (i == n-1) + if last_exc: + # The closing frame may be added by a recursive call + _ctx.need_close = True + + if self.max_group_width is not None: + truncated = (i >= self.max_group_width) + else: + truncated = False + title = f'{i+1}' if not truncated else '...' + yield (_ctx.indent() + + ('+-' if i==0 else ' ') + + f'+---------------- {title} ----------------\n') + _ctx.exception_group_depth += 1 + if not truncated: + yield from exc.exceptions[i].format(chain=chain, _ctx=_ctx, colorize=colorize) + else: + remaining = num_excs - self.max_group_width + plural = 's' if remaining > 1 else '' + yield from _ctx.emit( + f"and {remaining} more exception{plural}\n") + + if last_exc and _ctx.need_close: + yield (_ctx.indent() + + "+------------------------------------\n") + _ctx.need_close = False + _ctx.exception_group_depth -= 1 + + if is_toplevel: + assert _ctx.exception_group_depth == 1 + _ctx.exception_group_depth = 0 + + + def print(self, *, file=None, chain=True, **kwargs): + """Print the result of self.format(chain=chain) to 'file'.""" + colorize = kwargs.get("colorize", False) + if file is None: + file = sys.stderr + for line in self.format(chain=chain, colorize=colorize): + print(line, file=file, end="") + + +_MAX_CANDIDATE_ITEMS = 750 +_MAX_STRING_SIZE = 40 +_MOVE_COST = 2 +_CASE_COST = 1 + + +def _substitution_cost(ch_a, ch_b): + if ch_a == ch_b: + return 0 + if ch_a.lower() == ch_b.lower(): + return _CASE_COST + return _MOVE_COST + + +def _get_safe___dir__(obj): + # Use obj.__dir__() to avoid a TypeError when calling dir(obj). + # See gh-131001 and gh-139933. + try: + d = obj.__dir__() + except TypeError: # when obj is a class + d = type(obj).__dir__(obj) + return sorted(x for x in d if isinstance(x, str)) + + +def _compute_suggestion_error(exc_value, tb, wrong_name): + if wrong_name is None or not isinstance(wrong_name, str): + return None + if isinstance(exc_value, AttributeError): + obj = exc_value.obj + try: + d = _get_safe___dir__(obj) + hide_underscored = (wrong_name[:1] != '_') + if hide_underscored and tb is not None: + while tb.tb_next is not None: + tb = tb.tb_next + frame = tb.tb_frame + if 'self' in frame.f_locals and frame.f_locals['self'] is obj: + hide_underscored = False + if hide_underscored: + d = [x for x in d if x[:1] != '_'] + except Exception: + return None + elif isinstance(exc_value, ImportError): + try: + mod = __import__(exc_value.name) + d = _get_safe___dir__(mod) + if wrong_name[:1] != '_': + d = [x for x in d if x[:1] != '_'] + except Exception: + return None + else: + assert isinstance(exc_value, NameError) + # find most recent frame + if tb is None: + return None + while tb.tb_next is not None: + tb = tb.tb_next + frame = tb.tb_frame + d = ( + list(frame.f_locals) + + list(frame.f_globals) + + list(frame.f_builtins) + ) + d = [x for x in d if isinstance(x, str)] + + # Check first if we are in a method and the instance + # has the wrong name as attribute + if 'self' in frame.f_locals: + self = frame.f_locals['self'] + try: + has_wrong_name = hasattr(self, wrong_name) + except Exception: + has_wrong_name = False + if has_wrong_name: + return f"self.{wrong_name}" + + try: + import _suggestions + except ImportError: + pass + else: + return _suggestions._generate_suggestions(d, wrong_name) + + # Compute closest match + + if len(d) > _MAX_CANDIDATE_ITEMS: + return None + wrong_name_len = len(wrong_name) + if wrong_name_len > _MAX_STRING_SIZE: + return None + best_distance = wrong_name_len + suggestion = None + for possible_name in d: + if possible_name == wrong_name: + # A missing attribute is "found". Don't suggest it (see GH-88821). + continue + # No more than 1/3 of the involved characters should need changed. + max_distance = (len(possible_name) + wrong_name_len + 3) * _MOVE_COST // 6 + # Don't take matches we've already beaten. + max_distance = min(max_distance, best_distance - 1) + current_distance = _levenshtein_distance(wrong_name, possible_name, max_distance) + if current_distance > max_distance: + continue + if not suggestion or current_distance < best_distance: + suggestion = possible_name + best_distance = current_distance + return suggestion + + +def _levenshtein_distance(a, b, max_cost): + # A Python implementation of Python/suggestions.c:levenshtein_distance. + + # Both strings are the same + if a == b: + return 0 + + # Trim away common affixes + pre = 0 + while a[pre:] and b[pre:] and a[pre] == b[pre]: + pre += 1 + a = a[pre:] + b = b[pre:] + post = 0 + while a[:post or None] and b[:post or None] and a[post-1] == b[post-1]: + post -= 1 + a = a[:post or None] + b = b[:post or None] + if not a or not b: + return _MOVE_COST * (len(a) + len(b)) + if len(a) > _MAX_STRING_SIZE or len(b) > _MAX_STRING_SIZE: + return max_cost + 1 + + # Prefer shorter buffer + if len(b) < len(a): + a, b = b, a + + # Quick fail when a match is impossible + if (len(b) - len(a)) * _MOVE_COST > max_cost: + return max_cost + 1 + + # Instead of producing the whole traditional len(a)-by-len(b) + # matrix, we can update just one row in place. + # Initialize the buffer row + row = list(range(_MOVE_COST, _MOVE_COST * (len(a) + 1), _MOVE_COST)) + + result = 0 + for bindex in range(len(b)): + bchar = b[bindex] + distance = result = bindex * _MOVE_COST + minimum = sys.maxsize + for index in range(len(a)): + # 1) Previous distance in this row is cost(b[:b_index], a[:index]) + substitute = distance + _substitution_cost(bchar, a[index]) + # 2) cost(b[:b_index], a[:index+1]) from previous row + distance = row[index] + # 3) existing result is cost(b[:b_index+1], a[index]) + + insert_delete = min(result, distance) + _MOVE_COST + result = min(insert_delete, substitute) + + # cost(b[:b_index+1], a[:index+1]) + row[index] = result + if result < minimum: + minimum = result + if minimum > max_cost: + # Everything in this row is too big, so bail early. + return max_cost + 1 + return result diff --git a/Python313_13_x64_Template/Lib/tracemalloc.py b/Python314_4_x64_Template/Lib/tracemalloc.py similarity index 100% rename from Python313_13_x64_Template/Lib/tracemalloc.py rename to Python314_4_x64_Template/Lib/tracemalloc.py diff --git a/Python313_13_x64_Template/Lib/tty.py b/Python314_4_x64_Template/Lib/tty.py similarity index 100% rename from Python313_13_x64_Template/Lib/tty.py rename to Python314_4_x64_Template/Lib/tty.py diff --git a/Python314_4_x64_Template/Lib/types.py b/Python314_4_x64_Template/Lib/types.py new file mode 100644 index 00000000..fa6324fb --- /dev/null +++ b/Python314_4_x64_Template/Lib/types.py @@ -0,0 +1,344 @@ +""" +Define names for built-in types that aren't directly accessible as a builtin. +""" + +# Iterators in Python aren't a matter of type but of protocol. A large +# and changing number of builtin types implement *some* flavor of +# iterator. Don't check the type! Use hasattr to check for both +# "__iter__" and "__next__" attributes instead. + +try: + from _types import * +except ImportError: + import sys + + def _f(): pass + FunctionType = type(_f) + LambdaType = type(lambda: None) # Same as FunctionType + CodeType = type(_f.__code__) + MappingProxyType = type(type.__dict__) + SimpleNamespace = type(sys.implementation) + + def _cell_factory(): + a = 1 + def f(): + nonlocal a + return f.__closure__[0] + CellType = type(_cell_factory()) + + def _g(): + yield 1 + GeneratorType = type(_g()) + + async def _c(): pass + _c = _c() + CoroutineType = type(_c) + _c.close() # Prevent ResourceWarning + + async def _ag(): + yield + _ag = _ag() + AsyncGeneratorType = type(_ag) + + class _C: + def _m(self): pass + MethodType = type(_C()._m) + + BuiltinFunctionType = type(len) + BuiltinMethodType = type([].append) # Same as BuiltinFunctionType + + WrapperDescriptorType = type(object.__init__) + MethodWrapperType = type(object().__str__) + MethodDescriptorType = type(str.join) + ClassMethodDescriptorType = type(dict.__dict__['fromkeys']) + + ModuleType = type(sys) + + try: + raise TypeError + except TypeError as exc: + TracebackType = type(exc.__traceback__) + FrameType = type(exc.__traceback__.tb_frame) + + GetSetDescriptorType = type(FunctionType.__code__) + MemberDescriptorType = type(FunctionType.__globals__) + + GenericAlias = type(list[int]) + UnionType = type(int | str) + + EllipsisType = type(Ellipsis) + NoneType = type(None) + NotImplementedType = type(NotImplemented) + + # CapsuleType cannot be accessed from pure Python, + # so there is no fallback definition. + + del sys, _f, _g, _C, _c, _ag, _cell_factory # Not for export + + +# Provide a PEP 3115 compliant mechanism for class creation +def new_class(name, bases=(), kwds=None, exec_body=None): + """Create a class object dynamically using the appropriate metaclass.""" + resolved_bases = resolve_bases(bases) + meta, ns, kwds = prepare_class(name, resolved_bases, kwds) + if exec_body is not None: + exec_body(ns) + if resolved_bases is not bases: + ns['__orig_bases__'] = bases + return meta(name, resolved_bases, ns, **kwds) + +def resolve_bases(bases): + """Resolve MRO entries dynamically as specified by PEP 560.""" + new_bases = list(bases) + updated = False + shift = 0 + for i, base in enumerate(bases): + if isinstance(base, type): + continue + if not hasattr(base, "__mro_entries__"): + continue + new_base = base.__mro_entries__(bases) + updated = True + if not isinstance(new_base, tuple): + raise TypeError("__mro_entries__ must return a tuple") + else: + new_bases[i+shift:i+shift+1] = new_base + shift += len(new_base) - 1 + if not updated: + return bases + return tuple(new_bases) + +def prepare_class(name, bases=(), kwds=None): + """Call the __prepare__ method of the appropriate metaclass. + + Returns (metaclass, namespace, kwds) as a 3-tuple + + *metaclass* is the appropriate metaclass + *namespace* is the prepared class namespace + *kwds* is an updated copy of the passed in kwds argument with any + 'metaclass' entry removed. If no kwds argument is passed in, this will + be an empty dict. + """ + if kwds is None: + kwds = {} + else: + kwds = dict(kwds) # Don't alter the provided mapping + if 'metaclass' in kwds: + meta = kwds.pop('metaclass') + else: + if bases: + meta = type(bases[0]) + else: + meta = type + if isinstance(meta, type): + # when meta is a type, we first determine the most-derived metaclass + # instead of invoking the initial candidate directly + meta = _calculate_meta(meta, bases) + if hasattr(meta, '__prepare__'): + ns = meta.__prepare__(name, bases, **kwds) + else: + ns = {} + return meta, ns, kwds + +def _calculate_meta(meta, bases): + """Calculate the most derived metaclass.""" + winner = meta + for base in bases: + base_meta = type(base) + if issubclass(winner, base_meta): + continue + if issubclass(base_meta, winner): + winner = base_meta + continue + # else: + raise TypeError("metaclass conflict: " + "the metaclass of a derived class " + "must be a (non-strict) subclass " + "of the metaclasses of all its bases") + return winner + + +def get_original_bases(cls, /): + """Return the class's "original" bases prior to modification by `__mro_entries__`. + + Examples:: + + from typing import TypeVar, Generic, NamedTuple, TypedDict + + T = TypeVar("T") + class Foo(Generic[T]): ... + class Bar(Foo[int], float): ... + class Baz(list[str]): ... + Eggs = NamedTuple("Eggs", [("a", int), ("b", str)]) + Spam = TypedDict("Spam", {"a": int, "b": str}) + + assert get_original_bases(Bar) == (Foo[int], float) + assert get_original_bases(Baz) == (list[str],) + assert get_original_bases(Eggs) == (NamedTuple,) + assert get_original_bases(Spam) == (TypedDict,) + assert get_original_bases(int) == (object,) + """ + try: + return cls.__dict__.get("__orig_bases__", cls.__bases__) + except AttributeError: + raise TypeError( + f"Expected an instance of type, not {type(cls).__name__!r}" + ) from None + + +class DynamicClassAttribute: + """Route attribute access on a class to __getattr__. + + This is a descriptor, used to define attributes that act differently when + accessed through an instance and through a class. Instance access remains + normal, but access to an attribute through a class will be routed to the + class's __getattr__ method; this is done by raising AttributeError. + + This allows one to have properties active on an instance, and have virtual + attributes on the class with the same name. (Enum used this between Python + versions 3.4 - 3.9 .) + + Subclass from this to use a different method of accessing virtual attributes + and still be treated properly by the inspect module. (Enum uses this since + Python 3.10 .) + + """ + def __init__(self, fget=None, fset=None, fdel=None, doc=None): + self.fget = fget + self.fset = fset + self.fdel = fdel + # next two lines make DynamicClassAttribute act the same as property + self.__doc__ = doc or fget.__doc__ + self.overwrite_doc = doc is None + # support for abstract methods + self.__isabstractmethod__ = bool(getattr(fget, '__isabstractmethod__', False)) + + def __get__(self, instance, ownerclass=None): + if instance is None: + if self.__isabstractmethod__: + return self + raise AttributeError() + elif self.fget is None: + raise AttributeError("unreadable attribute") + return self.fget(instance) + + def __set__(self, instance, value): + if self.fset is None: + raise AttributeError("can't set attribute") + self.fset(instance, value) + + def __delete__(self, instance): + if self.fdel is None: + raise AttributeError("can't delete attribute") + self.fdel(instance) + + def getter(self, fget): + fdoc = fget.__doc__ if self.overwrite_doc else None + result = type(self)(fget, self.fset, self.fdel, fdoc or self.__doc__) + result.overwrite_doc = self.overwrite_doc + return result + + def setter(self, fset): + result = type(self)(self.fget, fset, self.fdel, self.__doc__) + result.overwrite_doc = self.overwrite_doc + return result + + def deleter(self, fdel): + result = type(self)(self.fget, self.fset, fdel, self.__doc__) + result.overwrite_doc = self.overwrite_doc + return result + + +class _GeneratorWrapper: + # TODO: Implement this in C. + def __init__(self, gen): + self.__wrapped = gen + self.__isgen = gen.__class__ is GeneratorType + self.__name__ = getattr(gen, '__name__', None) + self.__qualname__ = getattr(gen, '__qualname__', None) + def send(self, val): + return self.__wrapped.send(val) + def throw(self, tp, *rest): + return self.__wrapped.throw(tp, *rest) + def close(self): + return self.__wrapped.close() + @property + def gi_code(self): + return self.__wrapped.gi_code + @property + def gi_frame(self): + return self.__wrapped.gi_frame + @property + def gi_running(self): + return self.__wrapped.gi_running + @property + def gi_yieldfrom(self): + return self.__wrapped.gi_yieldfrom + @property + def gi_suspended(self): + return self.__wrapped.gi_suspended + cr_code = gi_code + cr_frame = gi_frame + cr_running = gi_running + cr_await = gi_yieldfrom + cr_suspended = gi_suspended + def __next__(self): + return next(self.__wrapped) + def __iter__(self): + if self.__isgen: + return self.__wrapped + return self + __await__ = __iter__ + +def coroutine(func): + """Convert regular generator function to a coroutine.""" + + if not callable(func): + raise TypeError('types.coroutine() expects a callable') + + if (func.__class__ is FunctionType and + getattr(func, '__code__', None).__class__ is CodeType): + + co_flags = func.__code__.co_flags + + # Check if 'func' is a coroutine function. + # (0x180 == CO_COROUTINE | CO_ITERABLE_COROUTINE) + if co_flags & 0x180: + return func + + # Check if 'func' is a generator function. + # (0x20 == CO_GENERATOR) + if co_flags & 0x20: + # TODO: Implement this in C. + co = func.__code__ + # 0x100 == CO_ITERABLE_COROUTINE + func.__code__ = co.replace(co_flags=co.co_flags | 0x100) + return func + + # The following code is primarily to support functions that + # return generator-like objects (for instance generators + # compiled with Cython). + + # Delay functools and _collections_abc import for speeding up types import. + import functools + import _collections_abc + @functools.wraps(func) + def wrapped(*args, **kwargs): + coro = func(*args, **kwargs) + if (coro.__class__ is CoroutineType or + coro.__class__ is GeneratorType and coro.gi_code.co_flags & 0x100): + # 'coro' is a native coroutine object or an iterable coroutine + return coro + if (isinstance(coro, _collections_abc.Generator) and + not isinstance(coro, _collections_abc.Coroutine)): + # 'coro' is either a pure Python generator iterator, or it + # implements collections.abc.Generator (and does not implement + # collections.abc.Coroutine). + return _GeneratorWrapper(coro) + # 'coro' is either an instance of collections.abc.Coroutine or + # some other object -- pass it through. + return coro + + return wrapped + +__all__ = [n for n in globals() if not n.startswith('_')] # for pydoc diff --git a/Python314_4_x64_Template/Lib/typing.py b/Python314_4_x64_Template/Lib/typing.py new file mode 100644 index 00000000..38021118 --- /dev/null +++ b/Python314_4_x64_Template/Lib/typing.py @@ -0,0 +1,3854 @@ +""" +The typing module: Support for gradual typing as defined by PEP 484 and subsequent PEPs. + +Among other things, the module includes the following: +* Generic, Protocol, and internal machinery to support generic aliases. + All subscripted types like X[int], Union[int, str] are generic aliases. +* Various "special forms" that have unique meanings in type annotations: + NoReturn, Never, ClassVar, Self, Concatenate, Unpack, and others. +* Classes whose instances can be type arguments to generic classes and functions: + TypeVar, ParamSpec, TypeVarTuple. +* Public helper functions: get_type_hints, overload, cast, final, and others. +* Several protocols to support duck-typing: + SupportsFloat, SupportsIndex, SupportsAbs, and others. +* Special types: NewType, NamedTuple, TypedDict. +* Deprecated aliases for builtin types and collections.abc ABCs. + +Any name not present in __all__ is an implementation detail +that may be changed without notice. Use at your own risk! +""" + +from abc import abstractmethod, ABCMeta +import collections +from collections import defaultdict +import collections.abc +import copyreg +import functools +import operator +import sys +import types +from types import GenericAlias + +from _typing import ( + _idfunc, + TypeVar, + ParamSpec, + TypeVarTuple, + ParamSpecArgs, + ParamSpecKwargs, + TypeAliasType, + Generic, + Union, + NoDefault, +) + +# Please keep __all__ alphabetized within each category. +__all__ = [ + # Super-special typing primitives. + 'Annotated', + 'Any', + 'Callable', + 'ClassVar', + 'Concatenate', + 'Final', + 'ForwardRef', + 'Generic', + 'Literal', + 'Optional', + 'ParamSpec', + 'Protocol', + 'Tuple', + 'Type', + 'TypeVar', + 'TypeVarTuple', + 'Union', + + # ABCs (from collections.abc). + 'AbstractSet', # collections.abc.Set. + 'ByteString', + 'Container', + 'ContextManager', + 'Hashable', + 'ItemsView', + 'Iterable', + 'Iterator', + 'KeysView', + 'Mapping', + 'MappingView', + 'MutableMapping', + 'MutableSequence', + 'MutableSet', + 'Sequence', + 'Sized', + 'ValuesView', + 'Awaitable', + 'AsyncIterator', + 'AsyncIterable', + 'Coroutine', + 'Collection', + 'AsyncGenerator', + 'AsyncContextManager', + + # Structural checks, a.k.a. protocols. + 'Reversible', + 'SupportsAbs', + 'SupportsBytes', + 'SupportsComplex', + 'SupportsFloat', + 'SupportsIndex', + 'SupportsInt', + 'SupportsRound', + + # Concrete collection types. + 'ChainMap', + 'Counter', + 'Deque', + 'Dict', + 'DefaultDict', + 'List', + 'OrderedDict', + 'Set', + 'FrozenSet', + 'NamedTuple', # Not really a type. + 'TypedDict', # Not really a type. + 'Generator', + + # Other concrete types. + 'BinaryIO', + 'IO', + 'Match', + 'Pattern', + 'TextIO', + + # One-off things. + 'AnyStr', + 'assert_type', + 'assert_never', + 'cast', + 'clear_overloads', + 'dataclass_transform', + 'evaluate_forward_ref', + 'final', + 'get_args', + 'get_origin', + 'get_overloads', + 'get_protocol_members', + 'get_type_hints', + 'is_protocol', + 'is_typeddict', + 'LiteralString', + 'Never', + 'NewType', + 'no_type_check', + 'no_type_check_decorator', + 'NoDefault', + 'NoReturn', + 'NotRequired', + 'overload', + 'override', + 'ParamSpecArgs', + 'ParamSpecKwargs', + 'ReadOnly', + 'Required', + 'reveal_type', + 'runtime_checkable', + 'Self', + 'Text', + 'TYPE_CHECKING', + 'TypeAlias', + 'TypeGuard', + 'TypeIs', + 'TypeAliasType', + 'Unpack', +] + +class _LazyAnnotationLib: + def __getattr__(self, attr): + global _lazy_annotationlib + import annotationlib + _lazy_annotationlib = annotationlib + return getattr(annotationlib, attr) + +_lazy_annotationlib = _LazyAnnotationLib() + + +def _type_convert(arg, module=None, *, allow_special_forms=False, owner=None): + """For converting None to type(None), and strings to ForwardRef.""" + if arg is None: + return type(None) + if isinstance(arg, str): + return _make_forward_ref(arg, module=module, is_class=allow_special_forms, owner=owner) + return arg + + +def _type_check(arg, msg, is_argument=True, module=None, *, allow_special_forms=False, owner=None): + """Check that the argument is a type, and return it (internal helper). + + As a special case, accept None and return type(None) instead. Also wrap strings + into ForwardRef instances. Consider several corner cases, for example plain + special forms like Union are not valid, while Union[int, str] is OK, etc. + The msg argument is a human-readable error message, e.g.:: + + "Union[arg, ...]: arg should be a type." + + We append the repr() of the actual value (truncated to 100 chars). + """ + invalid_generic_forms = (Generic, Protocol) + if not allow_special_forms: + invalid_generic_forms += (ClassVar,) + if is_argument: + invalid_generic_forms += (Final,) + + arg = _type_convert(arg, module=module, allow_special_forms=allow_special_forms, owner=owner) + if (isinstance(arg, _GenericAlias) and + arg.__origin__ in invalid_generic_forms): + raise TypeError(f"{arg} is not valid as type argument") + if arg in (Any, LiteralString, NoReturn, Never, Self, TypeAlias): + return arg + if allow_special_forms and arg in (ClassVar, Final): + return arg + if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol): + raise TypeError(f"Plain {arg} is not valid as type argument") + if type(arg) is tuple: + raise TypeError(f"{msg} Got {arg!r:.100}.") + return arg + + +def _is_param_expr(arg): + return arg is ... or isinstance(arg, + (tuple, list, ParamSpec, _ConcatenateGenericAlias)) + + +def _should_unflatten_callable_args(typ, args): + """Internal helper for munging collections.abc.Callable's __args__. + + The canonical representation for a Callable's __args__ flattens the + argument types, see https://github.com/python/cpython/issues/86361. + + For example:: + + >>> import collections.abc + >>> P = ParamSpec('P') + >>> collections.abc.Callable[[int, int], str].__args__ == (int, int, str) + True + >>> collections.abc.Callable[P, str].__args__ == (P, str) + True + + As a result, if we need to reconstruct the Callable from its __args__, + we need to unflatten it. + """ + return ( + typ.__origin__ is collections.abc.Callable + and not (len(args) == 2 and _is_param_expr(args[0])) + ) + + +def _type_repr(obj): + """Return the repr() of an object, special-casing types (internal helper). + + If obj is a type, we return a shorter version than the default + type.__repr__, based on the module and qualified name, which is + typically enough to uniquely identify a type. For everything + else, we fall back on repr(obj). + """ + if isinstance(obj, tuple): + # Special case for `repr` of types with `ParamSpec`: + return '[' + ', '.join(_type_repr(t) for t in obj) + ']' + return _lazy_annotationlib.type_repr(obj) + + +def _collect_type_parameters(args, *, enforce_default_ordering: bool = True): + """Collect all type parameters in args + in order of first appearance (lexicographic order). + + For example:: + + >>> P = ParamSpec('P') + >>> T = TypeVar('T') + >>> _collect_type_parameters((T, Callable[P, T])) + (~T, ~P) + """ + # required type parameter cannot appear after parameter with default + default_encountered = False + # or after TypeVarTuple + type_var_tuple_encountered = False + parameters = [] + for t in args: + if isinstance(t, type): + # We don't want __parameters__ descriptor of a bare Python class. + pass + elif isinstance(t, tuple): + # `t` might be a tuple, when `ParamSpec` is substituted with + # `[T, int]`, or `[int, *Ts]`, etc. + for x in t: + for collected in _collect_type_parameters([x]): + if collected not in parameters: + parameters.append(collected) + elif hasattr(t, '__typing_subst__'): + if t not in parameters: + if enforce_default_ordering: + if type_var_tuple_encountered and t.has_default(): + raise TypeError('Type parameter with a default' + ' follows TypeVarTuple') + + if t.has_default(): + default_encountered = True + elif default_encountered: + raise TypeError(f'Type parameter {t!r} without a default' + ' follows type parameter with a default') + + parameters.append(t) + else: + if _is_unpacked_typevartuple(t): + type_var_tuple_encountered = True + for x in getattr(t, '__parameters__', ()): + if x not in parameters: + parameters.append(x) + return tuple(parameters) + + +def _check_generic_specialization(cls, arguments): + """Check correct count for parameters of a generic cls (internal helper). + + This gives a nice error message in case of count mismatch. + """ + expected_len = len(cls.__parameters__) + if not expected_len: + raise TypeError(f"{cls} is not a generic class") + actual_len = len(arguments) + if actual_len != expected_len: + # deal with defaults + if actual_len < expected_len: + # If the parameter at index `actual_len` in the parameters list + # has a default, then all parameters after it must also have + # one, because we validated as much in _collect_type_parameters(). + # That means that no error needs to be raised here, despite + # the number of arguments being passed not matching the number + # of parameters: all parameters that aren't explicitly + # specialized in this call are parameters with default values. + if cls.__parameters__[actual_len].has_default(): + return + + expected_len -= sum(p.has_default() for p in cls.__parameters__) + expect_val = f"at least {expected_len}" + else: + expect_val = expected_len + + raise TypeError(f"Too {'many' if actual_len > expected_len else 'few'} arguments" + f" for {cls}; actual {actual_len}, expected {expect_val}") + + +def _unpack_args(*args): + newargs = [] + for arg in args: + subargs = getattr(arg, '__typing_unpacked_tuple_args__', None) + if subargs is not None and not (subargs and subargs[-1] is ...): + newargs.extend(subargs) + else: + newargs.append(arg) + return newargs + +def _deduplicate(params, *, unhashable_fallback=False): + # Weed out strict duplicates, preserving the first of each occurrence. + try: + return dict.fromkeys(params) + except TypeError: + if not unhashable_fallback: + raise + # Happens for cases like `Annotated[dict, {'x': IntValidator()}]` + new_unhashable = [] + for t in params: + if t not in new_unhashable: + new_unhashable.append(t) + return new_unhashable + +def _flatten_literal_params(parameters): + """Internal helper for Literal creation: flatten Literals among parameters.""" + params = [] + for p in parameters: + if isinstance(p, _LiteralGenericAlias): + params.extend(p.__args__) + else: + params.append(p) + return tuple(params) + + +_cleanups = [] +_caches = {} + + +def _tp_cache(func=None, /, *, typed=False): + """Internal wrapper caching __getitem__ of generic types. + + For non-hashable arguments, the original function is used as a fallback. + """ + def decorator(func): + # The callback 'inner' references the newly created lru_cache + # indirectly by performing a lookup in the global '_caches' dictionary. + # This breaks a reference that can be problematic when combined with + # C API extensions that leak references to types. See GH-98253. + + cache = functools.lru_cache(typed=typed)(func) + _caches[func] = cache + _cleanups.append(cache.cache_clear) + del cache + + @functools.wraps(func) + def inner(*args, **kwds): + try: + return _caches[func](*args, **kwds) + except TypeError: + pass # All real errors (not unhashable args) are raised below. + return func(*args, **kwds) + return inner + + if func is not None: + return decorator(func) + + return decorator + + +def _deprecation_warning_for_no_type_params_passed(funcname: str) -> None: + import warnings + + depr_message = ( + f"Failing to pass a value to the 'type_params' parameter " + f"of {funcname!r} is deprecated, as it leads to incorrect behaviour " + f"when calling {funcname} on a stringified annotation " + f"that references a PEP 695 type parameter. " + f"It will be disallowed in Python 3.15." + ) + warnings.warn(depr_message, category=DeprecationWarning, stacklevel=3) + + +class _Sentinel: + __slots__ = () + def __repr__(self): + return '' + + +_sentinel = _Sentinel() + + +def _eval_type(t, globalns, localns, type_params=_sentinel, *, recursive_guard=frozenset(), + format=None, owner=None, parent_fwdref=None, prefer_fwd_module=False): + """Evaluate all forward references in the given type t. + + For use of globalns and localns see the docstring for get_type_hints(). + recursive_guard is used to prevent infinite recursion with a recursive + ForwardRef. + """ + if type_params is _sentinel: + _deprecation_warning_for_no_type_params_passed("typing._eval_type") + type_params = () + if isinstance(t, _lazy_annotationlib.ForwardRef): + # If the forward_ref has __forward_module__ set, evaluate() infers the globals + # from the module, and it will probably pick better than the globals we have here. + # We do this only for calls from get_type_hints() (which opts in through the + # prefer_fwd_module flag), so that the default behavior remains more straightforward. + if prefer_fwd_module and t.__forward_module__ is not None: + globalns = None + # If there are type params on the owner, we need to add them back, because + # annotationlib won't. + if owner_type_params := getattr(owner, "__type_params__", None): + globalns = getattr( + sys.modules.get(t.__forward_module__, None), "__dict__", None + ) + if globalns is not None: + globalns = dict(globalns) + for type_param in owner_type_params: + globalns[type_param.__name__] = type_param + return evaluate_forward_ref(t, globals=globalns, locals=localns, + type_params=type_params, owner=owner, + _recursive_guard=recursive_guard, format=format) + if isinstance(t, (_GenericAlias, GenericAlias, Union)): + if isinstance(t, GenericAlias): + args = tuple( + _make_forward_ref(arg, parent_fwdref=parent_fwdref) if isinstance(arg, str) else arg + for arg in t.__args__ + ) + is_unpacked = t.__unpacked__ + if _should_unflatten_callable_args(t, args): + t = t.__origin__[(args[:-1], args[-1])] + else: + t = t.__origin__[args] + if is_unpacked: + t = Unpack[t] + + ev_args = tuple( + _eval_type( + a, globalns, localns, type_params, recursive_guard=recursive_guard, + format=format, owner=owner, prefer_fwd_module=prefer_fwd_module, + ) + for a in t.__args__ + ) + if ev_args == t.__args__: + return t + if isinstance(t, GenericAlias): + return GenericAlias(t.__origin__, ev_args) + if isinstance(t, Union): + return functools.reduce(operator.or_, ev_args) + else: + return t.copy_with(ev_args) + return t + + +class _Final: + """Mixin to prohibit subclassing.""" + + __slots__ = ('__weakref__',) + + def __init_subclass__(cls, /, *args, **kwds): + if '_root' not in kwds: + raise TypeError("Cannot subclass special typing classes") + + +class _NotIterable: + """Mixin to prevent iteration, without being compatible with Iterable. + + That is, we could do:: + + def __iter__(self): raise TypeError() + + But this would make users of this mixin duck type-compatible with + collections.abc.Iterable - isinstance(foo, Iterable) would be True. + + Luckily, we can instead prevent iteration by setting __iter__ to None, which + is treated specially. + """ + + __slots__ = () + __iter__ = None + + +# Internal indicator of special typing constructs. +# See __doc__ instance attribute for specific docs. +class _SpecialForm(_Final, _NotIterable, _root=True): + __slots__ = ('_name', '__doc__', '_getitem') + + def __init__(self, getitem): + self._getitem = getitem + self._name = getitem.__name__ + self.__doc__ = getitem.__doc__ + + def __getattr__(self, item): + if item in {'__name__', '__qualname__'}: + return self._name + + raise AttributeError(item) + + def __mro_entries__(self, bases): + raise TypeError(f"Cannot subclass {self!r}") + + def __repr__(self): + return 'typing.' + self._name + + def __reduce__(self): + return self._name + + def __call__(self, *args, **kwds): + raise TypeError(f"Cannot instantiate {self!r}") + + def __or__(self, other): + return Union[self, other] + + def __ror__(self, other): + return Union[other, self] + + def __instancecheck__(self, obj): + raise TypeError(f"{self} cannot be used with isinstance()") + + def __subclasscheck__(self, cls): + raise TypeError(f"{self} cannot be used with issubclass()") + + @_tp_cache + def __getitem__(self, parameters): + return self._getitem(self, parameters) + + +class _TypedCacheSpecialForm(_SpecialForm, _root=True): + def __getitem__(self, parameters): + if not isinstance(parameters, tuple): + parameters = (parameters,) + return self._getitem(self, *parameters) + + +class _AnyMeta(type): + def __instancecheck__(self, obj): + if self is Any: + raise TypeError("typing.Any cannot be used with isinstance()") + return super().__instancecheck__(obj) + + def __repr__(self): + if self is Any: + return "typing.Any" + return super().__repr__() # respect to subclasses + + +class Any(metaclass=_AnyMeta): + """Special type indicating an unconstrained type. + + - Any is compatible with every type. + - Any assumed to have all methods. + - All values assumed to be instances of Any. + + Note that all the above statements are true from the point of view of + static type checkers. At runtime, Any should not be used with instance + checks. + """ + + def __new__(cls, *args, **kwargs): + if cls is Any: + raise TypeError("Any cannot be instantiated") + return super().__new__(cls) + + +@_SpecialForm +def NoReturn(self, parameters): + """Special type indicating functions that never return. + + Example:: + + from typing import NoReturn + + def stop() -> NoReturn: + raise Exception('no way') + + NoReturn can also be used as a bottom type, a type that + has no values. Starting in Python 3.11, the Never type should + be used for this concept instead. Type checkers should treat the two + equivalently. + """ + raise TypeError(f"{self} is not subscriptable") + +# This is semantically identical to NoReturn, but it is implemented +# separately so that type checkers can distinguish between the two +# if they want. +@_SpecialForm +def Never(self, parameters): + """The bottom type, a type that has no members. + + This can be used to define a function that should never be + called, or a function that never returns:: + + from typing import Never + + def never_call_me(arg: Never) -> None: + pass + + def int_or_str(arg: int | str) -> None: + never_call_me(arg) # type checker error + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + never_call_me(arg) # OK, arg is of type Never + """ + raise TypeError(f"{self} is not subscriptable") + + +@_SpecialForm +def Self(self, parameters): + """Used to spell the type of "self" in classes. + + Example:: + + from typing import Self + + class Foo: + def return_self(self) -> Self: + ... + return self + + This is especially useful for: + - classmethods that are used as alternative constructors + - annotating an `__enter__` method which returns self + """ + raise TypeError(f"{self} is not subscriptable") + + +@_SpecialForm +def LiteralString(self, parameters): + """Represents an arbitrary literal string. + + Example:: + + from typing import LiteralString + + def run_query(sql: LiteralString) -> None: + ... + + def caller(arbitrary_string: str, literal_string: LiteralString) -> None: + run_query("SELECT * FROM students") # OK + run_query(literal_string) # OK + run_query("SELECT * FROM " + literal_string) # OK + run_query(arbitrary_string) # type checker error + run_query( # type checker error + f"SELECT * FROM students WHERE name = {arbitrary_string}" + ) + + Only string literals and other LiteralStrings are compatible + with LiteralString. This provides a tool to help prevent + security issues such as SQL injection. + """ + raise TypeError(f"{self} is not subscriptable") + + +@_SpecialForm +def ClassVar(self, parameters): + """Special type construct to mark class variables. + + An annotation wrapped in ClassVar indicates that a given + attribute is intended to be used as a class variable and + should not be set on instances of that class. + + Usage:: + + class Starship: + stats: ClassVar[dict[str, int]] = {} # class variable + damage: int = 10 # instance variable + + ClassVar accepts only types and cannot be further subscribed. + + Note that ClassVar is not a class itself, and should not + be used with isinstance() or issubclass(). + """ + item = _type_check(parameters, f'{self} accepts only single type.', allow_special_forms=True) + return _GenericAlias(self, (item,)) + +@_SpecialForm +def Final(self, parameters): + """Special typing construct to indicate final names to type checkers. + + A final name cannot be re-assigned or overridden in a subclass. + + For example:: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties. + """ + item = _type_check(parameters, f'{self} accepts only single type.', allow_special_forms=True) + return _GenericAlias(self, (item,)) + +@_SpecialForm +def Optional(self, parameters): + """Optional[X] is equivalent to Union[X, None].""" + arg = _type_check(parameters, f"{self} requires a single type.") + return Union[arg, type(None)] + +@_TypedCacheSpecialForm +@_tp_cache(typed=True) +def Literal(self, *parameters): + """Special typing form to define literal types (a.k.a. value types). + + This form can be used to indicate to type checkers that the corresponding + variable or function parameter has a value equivalent to the provided + literal (or one of several literals):: + + def validate_simple(data: Any) -> Literal[True]: # always returns True + ... + + MODE = Literal['r', 'rb', 'w', 'wb'] + def open_helper(file: str, mode: MODE) -> str: + ... + + open_helper('/some/path', 'r') # Passes type check + open_helper('/other/path', 'typo') # Error in type checker + + Literal[...] cannot be subclassed. At runtime, an arbitrary value + is allowed as type argument to Literal[...], but type checkers may + impose restrictions. + """ + # There is no '_type_check' call because arguments to Literal[...] are + # values, not types. + parameters = _flatten_literal_params(parameters) + + try: + parameters = tuple(p for p, _ in _deduplicate(list(_value_and_type_iter(parameters)))) + except TypeError: # unhashable parameters + pass + + return _LiteralGenericAlias(self, parameters) + + +@_SpecialForm +def TypeAlias(self, parameters): + """Special form for marking type aliases. + + Use TypeAlias to indicate that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example above. + """ + raise TypeError(f"{self} is not subscriptable") + + +@_SpecialForm +def Concatenate(self, parameters): + """Special form for annotating higher-order functions. + + ``Concatenate`` can be used in conjunction with ``ParamSpec`` and + ``Callable`` to represent a higher-order function which adds, removes or + transforms the parameters of a callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """ + if parameters == (): + raise TypeError("Cannot take a Concatenate of no types.") + if not isinstance(parameters, tuple): + parameters = (parameters,) + if not (parameters[-1] is ... or isinstance(parameters[-1], ParamSpec)): + raise TypeError("The last parameter to Concatenate should be a " + "ParamSpec variable or ellipsis.") + msg = "Concatenate[arg, ...]: each arg must be a type." + parameters = (*(_type_check(p, msg) for p in parameters[:-1]), parameters[-1]) + return _ConcatenateGenericAlias(self, parameters) + + +@_SpecialForm +def TypeGuard(self, parameters): + """Special typing construct for marking user-defined type predicate functions. + + ``TypeGuard`` can be used to annotate the return type of a user-defined + type predicate function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type predicate". + + Sometimes it would be convenient to use a user-defined boolean function + as a type predicate. Such a function should use ``TypeGuard[...]`` or + ``TypeIs[...]`` as its return type to alert static type checkers to + this intention. ``TypeGuard`` should be used over ``TypeIs`` when narrowing + from an incompatible type (e.g., ``list[object]`` to ``list[int]``) or when + the function does not return ``True`` for all instances of the narrowed type. + + Using ``-> TypeGuard[NarrowedType]`` tells the static type checker that + for a given function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is ``NarrowedType``. + + For example:: + + def is_str_list(val: list[object]) -> TypeGuard[list[str]]: + '''Determines whether all objects in the list are strings''' + return all(isinstance(x, str) for x in val) + + def func1(val: list[object]): + if is_str_list(val): + # Type of ``val`` is narrowed to ``list[str]``. + print(" ".join(val)) + else: + # Type of ``val`` remains as ``list[object]``. + print("Not a list of strings!") + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``list[object]`` to ``list[str]`` even though the latter is not + a subtype of the former, since ``list`` is invariant. The responsibility of + writing type-safe type predicates is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """ + item = _type_check(parameters, f'{self} accepts only single type.') + return _GenericAlias(self, (item,)) + + +@_SpecialForm +def TypeIs(self, parameters): + """Special typing construct for marking user-defined type predicate functions. + + ``TypeIs`` can be used to annotate the return type of a user-defined + type predicate function. ``TypeIs`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean and accept + at least one argument. + + ``TypeIs`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type predicate". + + Sometimes it would be convenient to use a user-defined boolean function + as a type predicate. Such a function should use ``TypeIs[...]`` or + ``TypeGuard[...]`` as its return type to alert static type checkers to + this intention. ``TypeIs`` usually has more intuitive behavior than + ``TypeGuard``, but it cannot be used when the input and output types + are incompatible (e.g., ``list[object]`` to ``list[int]``) or when the + function does not return ``True`` for all instances of the narrowed type. + + Using ``-> TypeIs[NarrowedType]`` tells the static type checker that for + a given function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the intersection of the argument's original type and + ``NarrowedType``. + 3. If the return value is ``False``, the type of its argument + is narrowed to exclude ``NarrowedType``. + + For example:: + + from typing import assert_type, final, TypeIs + + class Parent: pass + class Child(Parent): pass + @final + class Unrelated: pass + + def is_parent(val: object) -> TypeIs[Parent]: + return isinstance(val, Parent) + + def run(arg: Child | Unrelated): + if is_parent(arg): + # Type of ``arg`` is narrowed to the intersection + # of ``Parent`` and ``Child``, which is equivalent to + # ``Child``. + assert_type(arg, Child) + else: + # Type of ``arg`` is narrowed to exclude ``Parent``, + # so only ``Unrelated`` is left. + assert_type(arg, Unrelated) + + The type inside ``TypeIs`` must be consistent with the type of the + function's argument; if it is not, static type checkers will raise + an error. An incorrectly written ``TypeIs`` function can lead to + unsound behavior in the type system; it is the user's responsibility + to write such functions in a type-safe manner. + + ``TypeIs`` also works with type variables. For more information, see + PEP 742 (Narrowing types with ``TypeIs``). + """ + item = _type_check(parameters, f'{self} accepts only single type.') + return _GenericAlias(self, (item,)) + + +def _make_forward_ref(code, *, parent_fwdref=None, **kwargs): + if parent_fwdref is not None: + if parent_fwdref.__forward_module__ is not None: + kwargs['module'] = parent_fwdref.__forward_module__ + if parent_fwdref.__owner__ is not None: + kwargs['owner'] = parent_fwdref.__owner__ + forward_ref = _lazy_annotationlib.ForwardRef(code, **kwargs) + # For compatibility, eagerly compile the forwardref's code. + forward_ref.__forward_code__ + return forward_ref + + +def evaluate_forward_ref( + forward_ref, + *, + owner=None, + globals=None, + locals=None, + type_params=None, + format=None, + _recursive_guard=frozenset(), +): + """Evaluate a forward reference as a type hint. + + This is similar to calling the ForwardRef.evaluate() method, + but unlike that method, evaluate_forward_ref() also + recursively evaluates forward references nested within the type hint. + + *forward_ref* must be an instance of ForwardRef. *owner*, if given, + should be the object that holds the annotations that the forward reference + derived from, such as a module, class object, or function. It is used to + infer the namespaces to use for looking up names. *globals* and *locals* + can also be explicitly given to provide the global and local namespaces. + *type_params* is a tuple of type parameters that are in scope when + evaluating the forward reference. This parameter should be provided (though + it may be an empty tuple) if *owner* is not given and the forward reference + does not already have an owner set. *format* specifies the format of the + annotation and is a member of the annotationlib.Format enum, defaulting to + VALUE. + + """ + if format == _lazy_annotationlib.Format.STRING: + return forward_ref.__forward_arg__ + if forward_ref.__forward_arg__ in _recursive_guard: + return forward_ref + + if format is None: + format = _lazy_annotationlib.Format.VALUE + value = forward_ref.evaluate(globals=globals, locals=locals, + type_params=type_params, owner=owner, format=format) + + if (isinstance(value, _lazy_annotationlib.ForwardRef) + and format == _lazy_annotationlib.Format.FORWARDREF): + return value + + if isinstance(value, str): + value = _make_forward_ref(value, module=forward_ref.__forward_module__, + owner=owner or forward_ref.__owner__, + is_argument=forward_ref.__forward_is_argument__, + is_class=forward_ref.__forward_is_class__) + if owner is None: + owner = forward_ref.__owner__ + return _eval_type( + value, + globals, + locals, + type_params, + recursive_guard=_recursive_guard | {forward_ref.__forward_arg__}, + format=format, + owner=owner, + parent_fwdref=forward_ref, + ) + + +def _is_unpacked_typevartuple(x: Any) -> bool: + # Need to check 'is True' here + # See: https://github.com/python/cpython/issues/137706 + return ((not isinstance(x, type)) and + getattr(x, '__typing_is_unpacked_typevartuple__', False) is True) + + +def _is_typevar_like(x: Any) -> bool: + return isinstance(x, (TypeVar, ParamSpec)) or _is_unpacked_typevartuple(x) + + +def _typevar_subst(self, arg): + msg = "Parameters to generic types must be types." + arg = _type_check(arg, msg, is_argument=True) + if ((isinstance(arg, _GenericAlias) and arg.__origin__ is Unpack) or + (isinstance(arg, GenericAlias) and getattr(arg, '__unpacked__', False))): + raise TypeError(f"{arg} is not valid as type argument") + return arg + + +def _typevartuple_prepare_subst(self, alias, args): + params = alias.__parameters__ + typevartuple_index = params.index(self) + for param in params[typevartuple_index + 1:]: + if isinstance(param, TypeVarTuple): + raise TypeError(f"More than one TypeVarTuple parameter in {alias}") + + alen = len(args) + plen = len(params) + left = typevartuple_index + right = plen - typevartuple_index - 1 + var_tuple_index = None + fillarg = None + for k, arg in enumerate(args): + if not isinstance(arg, type): + subargs = getattr(arg, '__typing_unpacked_tuple_args__', None) + if subargs and len(subargs) == 2 and subargs[-1] is ...: + if var_tuple_index is not None: + raise TypeError("More than one unpacked arbitrary-length tuple argument") + var_tuple_index = k + fillarg = subargs[0] + if var_tuple_index is not None: + left = min(left, var_tuple_index) + right = min(right, alen - var_tuple_index - 1) + elif left + right > alen: + raise TypeError(f"Too few arguments for {alias};" + f" actual {alen}, expected at least {plen-1}") + if left == alen - right and self.has_default(): + replacement = _unpack_args(self.__default__) + else: + replacement = args[left: alen - right] + + return ( + *args[:left], + *([fillarg]*(typevartuple_index - left)), + replacement, + *([fillarg]*(plen - right - left - typevartuple_index - 1)), + *args[alen - right:], + ) + + +def _paramspec_subst(self, arg): + if isinstance(arg, (list, tuple)): + arg = tuple(_type_check(a, "Expected a type.") for a in arg) + elif not _is_param_expr(arg): + raise TypeError(f"Expected a list of types, an ellipsis, " + f"ParamSpec, or Concatenate. Got {arg}") + return arg + + +def _paramspec_prepare_subst(self, alias, args): + params = alias.__parameters__ + i = params.index(self) + if i == len(args) and self.has_default(): + args = (*args, self.__default__) + if i >= len(args): + raise TypeError(f"Too few arguments for {alias}") + # Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612. + if len(params) == 1 and not _is_param_expr(args[0]): + assert i == 0 + args = (args,) + # Convert lists to tuples to help other libraries cache the results. + elif isinstance(args[i], list): + args = (*args[:i], tuple(args[i]), *args[i+1:]) + return args + + +@_tp_cache +def _generic_class_getitem(cls, args): + """Parameterizes a generic class. + + At least, parameterizing a generic class is the *main* thing this method + does. For example, for some generic class `Foo`, this is called when we + do `Foo[int]` - there, with `cls=Foo` and `args=int`. + + However, note that this method is also called when defining generic + classes in the first place with `class Foo(Generic[T]): ...`. + """ + if not isinstance(args, tuple): + args = (args,) + + args = tuple(_type_convert(p) for p in args) + is_generic_or_protocol = cls in (Generic, Protocol) + + if is_generic_or_protocol: + # Generic and Protocol can only be subscripted with unique type variables. + if not args: + raise TypeError( + f"Parameter list to {cls.__qualname__}[...] cannot be empty" + ) + if not all(_is_typevar_like(p) for p in args): + raise TypeError( + f"Parameters to {cls.__name__}[...] must all be type variables " + f"or parameter specification variables.") + if len(set(args)) != len(args): + raise TypeError( + f"Parameters to {cls.__name__}[...] must all be unique") + else: + # Subscripting a regular Generic subclass. + try: + parameters = cls.__parameters__ + except AttributeError as e: + init_subclass = getattr(cls, '__init_subclass__', None) + if init_subclass not in {None, Generic.__init_subclass__}: + e.add_note( + f"Note: this exception may have been caused by " + f"{init_subclass.__qualname__!r} (or the " + f"'__init_subclass__' method on a superclass) not " + f"calling 'super().__init_subclass__()'" + ) + raise + for param in parameters: + prepare = getattr(param, '__typing_prepare_subst__', None) + if prepare is not None: + args = prepare(cls, args) + _check_generic_specialization(cls, args) + + new_args = [] + for param, new_arg in zip(parameters, args): + if isinstance(param, TypeVarTuple): + new_args.extend(new_arg) + else: + new_args.append(new_arg) + args = tuple(new_args) + + return _GenericAlias(cls, args) + + +def _generic_init_subclass(cls, *args, **kwargs): + super(Generic, cls).__init_subclass__(*args, **kwargs) + tvars = [] + if '__orig_bases__' in cls.__dict__: + error = Generic in cls.__orig_bases__ + else: + error = (Generic in cls.__bases__ and + cls.__name__ != 'Protocol' and + type(cls) != _TypedDictMeta) + if error: + raise TypeError("Cannot inherit from plain Generic") + if '__orig_bases__' in cls.__dict__: + tvars = _collect_type_parameters(cls.__orig_bases__) + # Look for Generic[T1, ..., Tn]. + # If found, tvars must be a subset of it. + # If not found, tvars is it. + # Also check for and reject plain Generic, + # and reject multiple Generic[...]. + gvars = None + for base in cls.__orig_bases__: + if (isinstance(base, _GenericAlias) and + base.__origin__ is Generic): + if gvars is not None: + raise TypeError( + "Cannot inherit from Generic[...] multiple times.") + gvars = base.__parameters__ + if gvars is not None: + tvarset = set(tvars) + gvarset = set(gvars) + if not tvarset <= gvarset: + s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) + s_args = ', '.join(str(g) for g in gvars) + raise TypeError(f"Some type variables ({s_vars}) are" + f" not listed in Generic[{s_args}]") + tvars = gvars + cls.__parameters__ = tuple(tvars) + + +def _is_dunder(attr): + return attr.startswith('__') and attr.endswith('__') + +class _BaseGenericAlias(_Final, _root=True): + """The central part of the internal API. + + This represents a generic version of type 'origin' with type arguments 'params'. + There are two kind of these aliases: user defined and special. The special ones + are wrappers around builtin collections and ABCs in collections.abc. These must + have 'name' always set. If 'inst' is False, then the alias can't be instantiated; + this is used by e.g. typing.List and typing.Dict. + """ + + def __init__(self, origin, *, inst=True, name=None): + self._inst = inst + self._name = name + self.__origin__ = origin + self.__slots__ = None # This is not documented. + + def __call__(self, *args, **kwargs): + if not self._inst: + raise TypeError(f"Type {self._name} cannot be instantiated; " + f"use {self.__origin__.__name__}() instead") + result = self.__origin__(*args, **kwargs) + try: + result.__orig_class__ = self + # Some objects raise TypeError (or something even more exotic) + # if you try to set attributes on them; we guard against that here + except Exception: + pass + return result + + def __mro_entries__(self, bases): + res = [] + if self.__origin__ not in bases: + res.append(self.__origin__) + + # Check if any base that occurs after us in `bases` is either itself a + # subclass of Generic, or something which will add a subclass of Generic + # to `__bases__` via its `__mro_entries__`. If not, add Generic + # ourselves. The goal is to ensure that Generic (or a subclass) will + # appear exactly once in the final bases tuple. If we let it appear + # multiple times, we risk "can't form a consistent MRO" errors. + i = bases.index(self) + for b in bases[i+1:]: + if isinstance(b, _BaseGenericAlias): + break + if not isinstance(b, type): + meth = getattr(b, "__mro_entries__", None) + new_bases = meth(bases) if meth else None + if ( + isinstance(new_bases, tuple) and + any( + isinstance(b2, type) and issubclass(b2, Generic) + for b2 in new_bases + ) + ): + break + elif issubclass(b, Generic): + break + else: + res.append(Generic) + return tuple(res) + + def __getattr__(self, attr): + if attr in {'__name__', '__qualname__'}: + return self._name or self.__origin__.__name__ + + # We are careful for copy and pickle. + # Also for simplicity we don't relay any dunder names + if '__origin__' in self.__dict__ and not _is_dunder(attr): + return getattr(self.__origin__, attr) + raise AttributeError(attr) + + def __setattr__(self, attr, val): + if _is_dunder(attr) or attr in {'_name', '_inst', '_nparams', '_defaults'}: + super().__setattr__(attr, val) + else: + setattr(self.__origin__, attr, val) + + def __instancecheck__(self, obj): + return self.__subclasscheck__(type(obj)) + + def __subclasscheck__(self, cls): + raise TypeError("Subscripted generics cannot be used with" + " class and instance checks") + + def __dir__(self): + return list(set(super().__dir__() + + [attr for attr in dir(self.__origin__) if not _is_dunder(attr)])) + + +# Special typing constructs Union, Optional, Generic, Callable and Tuple +# use three special attributes for internal bookkeeping of generic types: +# * __parameters__ is a tuple of unique free type parameters of a generic +# type, for example, Dict[T, T].__parameters__ == (T,); +# * __origin__ keeps a reference to a type that was subscripted, +# e.g., Union[T, int].__origin__ == Union, or the non-generic version of +# the type. +# * __args__ is a tuple of all arguments used in subscripting, +# e.g., Dict[T, int].__args__ == (T, int). + + +class _GenericAlias(_BaseGenericAlias, _root=True): + # The type of parameterized generics. + # + # That is, for example, `type(List[int])` is `_GenericAlias`. + # + # Objects which are instances of this class include: + # * Parameterized container types, e.g. `Tuple[int]`, `List[int]`. + # * Note that native container types, e.g. `tuple`, `list`, use + # `types.GenericAlias` instead. + # * Parameterized classes: + # class C[T]: pass + # # C[int] is a _GenericAlias + # * `Callable` aliases, generic `Callable` aliases, and + # parameterized `Callable` aliases: + # T = TypeVar('T') + # # _CallableGenericAlias inherits from _GenericAlias. + # A = Callable[[], None] # _CallableGenericAlias + # B = Callable[[T], None] # _CallableGenericAlias + # C = B[int] # _CallableGenericAlias + # * Parameterized `Final`, `ClassVar`, `TypeGuard`, and `TypeIs`: + # # All _GenericAlias + # Final[int] + # ClassVar[float] + # TypeGuard[bool] + # TypeIs[range] + + def __init__(self, origin, args, *, inst=True, name=None): + super().__init__(origin, inst=inst, name=name) + if not isinstance(args, tuple): + args = (args,) + self.__args__ = tuple(... if a is _TypingEllipsis else + a for a in args) + enforce_default_ordering = origin in (Generic, Protocol) + self.__parameters__ = _collect_type_parameters( + args, + enforce_default_ordering=enforce_default_ordering, + ) + if not name: + self.__module__ = origin.__module__ + + def __eq__(self, other): + if not isinstance(other, _GenericAlias): + return NotImplemented + return (self.__origin__ == other.__origin__ + and self.__args__ == other.__args__) + + def __hash__(self): + return hash((self.__origin__, self.__args__)) + + def __or__(self, right): + return Union[self, right] + + def __ror__(self, left): + return Union[left, self] + + @_tp_cache + def __getitem__(self, args): + # Parameterizes an already-parameterized object. + # + # For example, we arrive here doing something like: + # T1 = TypeVar('T1') + # T2 = TypeVar('T2') + # T3 = TypeVar('T3') + # class A(Generic[T1]): pass + # B = A[T2] # B is a _GenericAlias + # C = B[T3] # Invokes _GenericAlias.__getitem__ + # + # We also arrive here when parameterizing a generic `Callable` alias: + # T = TypeVar('T') + # C = Callable[[T], None] + # C[int] # Invokes _GenericAlias.__getitem__ + + if self.__origin__ in (Generic, Protocol): + # Can't subscript Generic[...] or Protocol[...]. + raise TypeError(f"Cannot subscript already-subscripted {self}") + if not self.__parameters__: + raise TypeError(f"{self} is not a generic class") + + # Preprocess `args`. + if not isinstance(args, tuple): + args = (args,) + args = _unpack_args(*(_type_convert(p) for p in args)) + new_args = self._determine_new_args(args) + r = self.copy_with(new_args) + return r + + def _determine_new_args(self, args): + # Determines new __args__ for __getitem__. + # + # For example, suppose we had: + # T1 = TypeVar('T1') + # T2 = TypeVar('T2') + # class A(Generic[T1, T2]): pass + # T3 = TypeVar('T3') + # B = A[int, T3] + # C = B[str] + # `B.__args__` is `(int, T3)`, so `C.__args__` should be `(int, str)`. + # Unfortunately, this is harder than it looks, because if `T3` is + # anything more exotic than a plain `TypeVar`, we need to consider + # edge cases. + + params = self.__parameters__ + # In the example above, this would be {T3: str} + for param in params: + prepare = getattr(param, '__typing_prepare_subst__', None) + if prepare is not None: + args = prepare(self, args) + alen = len(args) + plen = len(params) + if alen != plen: + raise TypeError(f"Too {'many' if alen > plen else 'few'} arguments for {self};" + f" actual {alen}, expected {plen}") + new_arg_by_param = dict(zip(params, args)) + return tuple(self._make_substitution(self.__args__, new_arg_by_param)) + + def _make_substitution(self, args, new_arg_by_param): + """Create a list of new type arguments.""" + new_args = [] + for old_arg in args: + if isinstance(old_arg, type): + new_args.append(old_arg) + continue + + substfunc = getattr(old_arg, '__typing_subst__', None) + if substfunc: + new_arg = substfunc(new_arg_by_param[old_arg]) + else: + subparams = getattr(old_arg, '__parameters__', ()) + if not subparams: + new_arg = old_arg + else: + subargs = [] + for x in subparams: + if isinstance(x, TypeVarTuple): + subargs.extend(new_arg_by_param[x]) + else: + subargs.append(new_arg_by_param[x]) + new_arg = old_arg[tuple(subargs)] + + if self.__origin__ == collections.abc.Callable and isinstance(new_arg, tuple): + # Consider the following `Callable`. + # C = Callable[[int], str] + # Here, `C.__args__` should be (int, str) - NOT ([int], str). + # That means that if we had something like... + # P = ParamSpec('P') + # T = TypeVar('T') + # C = Callable[P, T] + # D = C[[int, str], float] + # ...we need to be careful; `new_args` should end up as + # `(int, str, float)` rather than `([int, str], float)`. + new_args.extend(new_arg) + elif _is_unpacked_typevartuple(old_arg): + # Consider the following `_GenericAlias`, `B`: + # class A(Generic[*Ts]): ... + # B = A[T, *Ts] + # If we then do: + # B[float, int, str] + # The `new_arg` corresponding to `T` will be `float`, and the + # `new_arg` corresponding to `*Ts` will be `(int, str)`. We + # should join all these types together in a flat list + # `(float, int, str)` - so again, we should `extend`. + new_args.extend(new_arg) + elif isinstance(old_arg, tuple): + # Corner case: + # P = ParamSpec('P') + # T = TypeVar('T') + # class Base(Generic[P]): ... + # Can be substituted like this: + # X = Base[[int, T]] + # In this case, `old_arg` will be a tuple: + new_args.append( + tuple(self._make_substitution(old_arg, new_arg_by_param)), + ) + else: + new_args.append(new_arg) + return new_args + + def copy_with(self, args): + return self.__class__(self.__origin__, args, name=self._name, inst=self._inst) + + def __repr__(self): + if self._name: + name = 'typing.' + self._name + else: + name = _type_repr(self.__origin__) + if self.__args__: + args = ", ".join([_type_repr(a) for a in self.__args__]) + else: + # To ensure the repr is eval-able. + args = "()" + return f'{name}[{args}]' + + def __reduce__(self): + if self._name: + origin = globals()[self._name] + else: + origin = self.__origin__ + args = tuple(self.__args__) + if len(args) == 1 and not isinstance(args[0], tuple): + args, = args + return operator.getitem, (origin, args) + + def __mro_entries__(self, bases): + if isinstance(self.__origin__, _SpecialForm): + raise TypeError(f"Cannot subclass {self!r}") + + if self._name: # generic version of an ABC or built-in class + return super().__mro_entries__(bases) + if self.__origin__ is Generic: + if Protocol in bases: + return () + i = bases.index(self) + for b in bases[i+1:]: + if isinstance(b, _BaseGenericAlias) and b is not self: + return () + return (self.__origin__,) + + def __iter__(self): + yield Unpack[self] + + +# _nparams is the number of accepted parameters, e.g. 0 for Hashable, +# 1 for List and 2 for Dict. It may be -1 if variable number of +# parameters are accepted (needs custom __getitem__). + +class _SpecialGenericAlias(_NotIterable, _BaseGenericAlias, _root=True): + def __init__(self, origin, nparams, *, inst=True, name=None, defaults=()): + if name is None: + name = origin.__name__ + super().__init__(origin, inst=inst, name=name) + self._nparams = nparams + self._defaults = defaults + if origin.__module__ == 'builtins': + self.__doc__ = f'Deprecated alias to {origin.__qualname__}.' + else: + self.__doc__ = f'Deprecated alias to {origin.__module__}.{origin.__qualname__}.' + + @_tp_cache + def __getitem__(self, params): + if not isinstance(params, tuple): + params = (params,) + msg = "Parameters to generic types must be types." + params = tuple(_type_check(p, msg) for p in params) + if (self._defaults + and len(params) < self._nparams + and len(params) + len(self._defaults) >= self._nparams + ): + params = (*params, *self._defaults[len(params) - self._nparams:]) + actual_len = len(params) + + if actual_len != self._nparams: + if self._defaults: + expected = f"at least {self._nparams - len(self._defaults)}" + else: + expected = str(self._nparams) + if not self._nparams: + raise TypeError(f"{self} is not a generic class") + raise TypeError(f"Too {'many' if actual_len > self._nparams else 'few'} arguments for {self};" + f" actual {actual_len}, expected {expected}") + return self.copy_with(params) + + def copy_with(self, params): + return _GenericAlias(self.__origin__, params, + name=self._name, inst=self._inst) + + def __repr__(self): + return 'typing.' + self._name + + def __subclasscheck__(self, cls): + if isinstance(cls, _SpecialGenericAlias): + return issubclass(cls.__origin__, self.__origin__) + if not isinstance(cls, _GenericAlias): + return issubclass(cls, self.__origin__) + return super().__subclasscheck__(cls) + + def __reduce__(self): + return self._name + + def __or__(self, right): + return Union[self, right] + + def __ror__(self, left): + return Union[left, self] + + +class _DeprecatedGenericAlias(_SpecialGenericAlias, _root=True): + def __init__( + self, origin, nparams, *, removal_version, inst=True, name=None + ): + super().__init__(origin, nparams, inst=inst, name=name) + self._removal_version = removal_version + + def __instancecheck__(self, inst): + import warnings + warnings._deprecated( + f"{self.__module__}.{self._name}", remove=self._removal_version + ) + return super().__instancecheck__(inst) + + +class _CallableGenericAlias(_NotIterable, _GenericAlias, _root=True): + def __repr__(self): + assert self._name == 'Callable' + args = self.__args__ + if len(args) == 2 and _is_param_expr(args[0]): + return super().__repr__() + return (f'typing.Callable' + f'[[{", ".join([_type_repr(a) for a in args[:-1]])}], ' + f'{_type_repr(args[-1])}]') + + def __reduce__(self): + args = self.__args__ + if not (len(args) == 2 and _is_param_expr(args[0])): + args = list(args[:-1]), args[-1] + return operator.getitem, (Callable, args) + + +class _CallableType(_SpecialGenericAlias, _root=True): + def copy_with(self, params): + return _CallableGenericAlias(self.__origin__, params, + name=self._name, inst=self._inst) + + def __getitem__(self, params): + if not isinstance(params, tuple) or len(params) != 2: + raise TypeError("Callable must be used as " + "Callable[[arg, ...], result].") + args, result = params + # This relaxes what args can be on purpose to allow things like + # PEP 612 ParamSpec. Responsibility for whether a user is using + # Callable[...] properly is deferred to static type checkers. + if isinstance(args, list): + params = (tuple(args), result) + else: + params = (args, result) + return self.__getitem_inner__(params) + + @_tp_cache + def __getitem_inner__(self, params): + args, result = params + msg = "Callable[args, result]: result must be a type." + result = _type_check(result, msg) + if args is Ellipsis: + return self.copy_with((_TypingEllipsis, result)) + if not isinstance(args, tuple): + args = (args,) + args = tuple(_type_convert(arg) for arg in args) + params = args + (result,) + return self.copy_with(params) + + +class _TupleType(_SpecialGenericAlias, _root=True): + @_tp_cache + def __getitem__(self, params): + if not isinstance(params, tuple): + params = (params,) + if len(params) >= 2 and params[-1] is ...: + msg = "Tuple[t, ...]: t must be a type." + params = tuple(_type_check(p, msg) for p in params[:-1]) + return self.copy_with((*params, _TypingEllipsis)) + msg = "Tuple[t0, t1, ...]: each t must be a type." + params = tuple(_type_check(p, msg) for p in params) + return self.copy_with(params) + + +class _UnionGenericAliasMeta(type): + def __instancecheck__(self, inst: object) -> bool: + import warnings + warnings._deprecated("_UnionGenericAlias", remove=(3, 17)) + return isinstance(inst, Union) + + def __subclasscheck__(self, inst: type) -> bool: + import warnings + warnings._deprecated("_UnionGenericAlias", remove=(3, 17)) + return issubclass(inst, Union) + + def __eq__(self, other): + import warnings + warnings._deprecated("_UnionGenericAlias", remove=(3, 17)) + if other is _UnionGenericAlias or other is Union: + return True + return NotImplemented + + def __hash__(self): + return hash(Union) + + +class _UnionGenericAlias(metaclass=_UnionGenericAliasMeta): + """Compatibility hack. + + A class named _UnionGenericAlias used to be used to implement + typing.Union. This class exists to serve as a shim to preserve + the meaning of some code that used to use _UnionGenericAlias + directly. + + """ + def __new__(cls, self_cls, parameters, /, *, name=None): + import warnings + warnings._deprecated("_UnionGenericAlias", remove=(3, 17)) + return Union[parameters] + + +def _value_and_type_iter(parameters): + return ((p, type(p)) for p in parameters) + + +class _LiteralGenericAlias(_GenericAlias, _root=True): + def __eq__(self, other): + if not isinstance(other, _LiteralGenericAlias): + return NotImplemented + + return set(_value_and_type_iter(self.__args__)) == set(_value_and_type_iter(other.__args__)) + + def __hash__(self): + return hash(frozenset(_value_and_type_iter(self.__args__))) + + +class _ConcatenateGenericAlias(_GenericAlias, _root=True): + def copy_with(self, params): + if isinstance(params[-1], (list, tuple)): + return (*params[:-1], *params[-1]) + if isinstance(params[-1], _ConcatenateGenericAlias): + params = (*params[:-1], *params[-1].__args__) + return super().copy_with(params) + + +@_SpecialForm +def Unpack(self, parameters): + """Type unpack operator. + + The type unpack operator takes the child types from some container type, + such as `tuple[int, str]` or a `TypeVarTuple`, and 'pulls them out'. + + For example:: + + # For some generic class `Foo`: + Foo[Unpack[tuple[int, str]]] # Equivalent to Foo[int, str] + + Ts = TypeVarTuple('Ts') + # Specifies that `Bar` is generic in an arbitrary number of types. + # (Think of `Ts` as a tuple of an arbitrary number of individual + # `TypeVar`s, which the `Unpack` is 'pulling out' directly into the + # `Generic[]`.) + class Bar(Generic[Unpack[Ts]]): ... + Bar[int] # Valid + Bar[int, str] # Also valid + + From Python 3.11, this can also be done using the `*` operator:: + + Foo[*tuple[int, str]] + class Bar(Generic[*Ts]): ... + + And from Python 3.12, it can be done using built-in syntax for generics:: + + Foo[*tuple[int, str]] + class Bar[*Ts]: ... + + The operator can also be used along with a `TypedDict` to annotate + `**kwargs` in a function signature:: + + class Movie(TypedDict): + name: str + year: int + + # This function expects two keyword arguments - *name* of type `str` and + # *year* of type `int`. + def foo(**kwargs: Unpack[Movie]): ... + + Note that there is only some runtime checking of this operator. Not + everything the runtime allows may be accepted by static type checkers. + + For more information, see PEPs 646 and 692. + """ + item = _type_check(parameters, f'{self} accepts only single type.') + return _UnpackGenericAlias(origin=self, args=(item,)) + + +class _UnpackGenericAlias(_GenericAlias, _root=True): + def __repr__(self): + # `Unpack` only takes one argument, so __args__ should contain only + # a single item. + return f'typing.Unpack[{_type_repr(self.__args__[0])}]' + + def __getitem__(self, args): + if self.__typing_is_unpacked_typevartuple__: + return args + return super().__getitem__(args) + + @property + def __typing_unpacked_tuple_args__(self): + assert self.__origin__ is Unpack + assert len(self.__args__) == 1 + arg, = self.__args__ + if isinstance(arg, (_GenericAlias, types.GenericAlias)): + if arg.__origin__ is not tuple: + raise TypeError("Unpack[...] must be used with a tuple type") + return arg.__args__ + return None + + @property + def __typing_is_unpacked_typevartuple__(self): + assert self.__origin__ is Unpack + assert len(self.__args__) == 1 + return isinstance(self.__args__[0], TypeVarTuple) + + +class _TypingEllipsis: + """Internal placeholder for ... (ellipsis).""" + + +_TYPING_INTERNALS = frozenset({ + '__parameters__', '__orig_bases__', '__orig_class__', + '_is_protocol', '_is_runtime_protocol', '__protocol_attrs__', + '__non_callable_proto_members__', '__type_params__', +}) + +_SPECIAL_NAMES = frozenset({ + '__abstractmethods__', '__annotations__', '__dict__', '__doc__', + '__init__', '__module__', '__new__', '__slots__', + '__subclasshook__', '__weakref__', '__class_getitem__', + '__match_args__', '__static_attributes__', '__firstlineno__', + '__annotate__', '__annotate_func__', '__annotations_cache__', +}) + +# These special attributes will be not collected as protocol members. +EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS | _SPECIAL_NAMES | {'_MutableMapping__marker'} + + +def _get_protocol_attrs(cls): + """Collect protocol members from a protocol class objects. + + This includes names actually defined in the class dictionary, as well + as names that appear in annotations. Special names (above) are skipped. + """ + attrs = set() + for base in cls.__mro__[:-1]: # without object + if base.__name__ in {'Protocol', 'Generic'}: + continue + try: + annotations = base.__annotations__ + except Exception: + # Only go through annotationlib to handle deferred annotations if we need to + annotations = _lazy_annotationlib.get_annotations( + base, format=_lazy_annotationlib.Format.FORWARDREF + ) + for attr in (*base.__dict__, *annotations): + if not attr.startswith('_abc_') and attr not in EXCLUDED_ATTRIBUTES: + attrs.add(attr) + return attrs + + +def _no_init_or_replace_init(self, *args, **kwargs): + cls = type(self) + + if cls._is_protocol: + raise TypeError('Protocols cannot be instantiated') + + # Already using a custom `__init__`. No need to calculate correct + # `__init__` to call. This can lead to RecursionError. See bpo-45121. + if cls.__init__ is not _no_init_or_replace_init: + return + + # Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`. + # The first instantiation of the subclass will call `_no_init_or_replace_init` which + # searches for a proper new `__init__` in the MRO. The new `__init__` + # replaces the subclass' old `__init__` (ie `_no_init_or_replace_init`). Subsequent + # instantiation of the protocol subclass will thus use the new + # `__init__` and no longer call `_no_init_or_replace_init`. + for base in cls.__mro__: + init = base.__dict__.get('__init__', _no_init_or_replace_init) + if init is not _no_init_or_replace_init: + cls.__init__ = init + break + else: + # should not happen + cls.__init__ = object.__init__ + + cls.__init__(self, *args, **kwargs) + + +def _caller(depth=1, default='__main__'): + try: + return sys._getframemodulename(depth + 1) or default + except AttributeError: # For platforms without _getframemodulename() + pass + try: + return sys._getframe(depth + 1).f_globals.get('__name__', default) + except (AttributeError, ValueError): # For platforms without _getframe() + pass + return None + +def _allow_reckless_class_checks(depth=2): + """Allow instance and class checks for special stdlib modules. + + The abc and functools modules indiscriminately call isinstance() and + issubclass() on the whole MRO of a user class, which may contain protocols. + """ + return _caller(depth) in {'abc', 'functools', None} + + +_PROTO_ALLOWLIST = { + 'collections.abc': [ + 'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable', + 'AsyncIterator', 'Hashable', 'Sized', 'Container', 'Collection', + 'Reversible', 'Buffer', + ], + 'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'], + 'io': ['Reader', 'Writer'], + 'os': ['PathLike'], +} + + +@functools.cache +def _lazy_load_getattr_static(): + # Import getattr_static lazily so as not to slow down the import of typing.py + # Cache the result so we don't slow down _ProtocolMeta.__instancecheck__ unnecessarily + from inspect import getattr_static + return getattr_static + + +_cleanups.append(_lazy_load_getattr_static.cache_clear) + +def _pickle_psargs(psargs): + return ParamSpecArgs, (psargs.__origin__,) + +copyreg.pickle(ParamSpecArgs, _pickle_psargs) + +def _pickle_pskwargs(pskwargs): + return ParamSpecKwargs, (pskwargs.__origin__,) + +copyreg.pickle(ParamSpecKwargs, _pickle_pskwargs) + +del _pickle_psargs, _pickle_pskwargs + + +# Preload these once, as globals, as a micro-optimisation. +# This makes a significant difference to the time it takes +# to do `isinstance()`/`issubclass()` checks +# against runtime-checkable protocols with only one callable member. +_abc_instancecheck = ABCMeta.__instancecheck__ +_abc_subclasscheck = ABCMeta.__subclasscheck__ + + +def _type_check_issubclass_arg_1(arg): + """Raise TypeError if `arg` is not an instance of `type` + in `issubclass(arg, )`. + + In most cases, this is verified by type.__subclasscheck__. + Checking it again unnecessarily would slow down issubclass() checks, + so, we don't perform this check unless we absolutely have to. + + For various error paths, however, + we want to ensure that *this* error message is shown to the user + where relevant, rather than a typing.py-specific error message. + """ + if not isinstance(arg, type): + # Same error message as for issubclass(1, int). + raise TypeError('issubclass() arg 1 must be a class') + + +class _ProtocolMeta(ABCMeta): + # This metaclass is somewhat unfortunate, + # but is necessary for several reasons... + def __new__(mcls, name, bases, namespace, /, **kwargs): + if name == "Protocol" and bases == (Generic,): + pass + elif Protocol in bases: + for base in bases: + if not ( + base in {object, Generic} + or base.__name__ in _PROTO_ALLOWLIST.get(base.__module__, []) + or ( + issubclass(base, Generic) + and getattr(base, "_is_protocol", False) + ) + ): + raise TypeError( + f"Protocols can only inherit from other protocols, " + f"got {base!r}" + ) + return super().__new__(mcls, name, bases, namespace, **kwargs) + + def __init__(cls, *args, **kwargs): + super().__init__(*args, **kwargs) + if getattr(cls, "_is_protocol", False): + cls.__protocol_attrs__ = _get_protocol_attrs(cls) + + def __subclasscheck__(cls, other): + if cls is Protocol: + return type.__subclasscheck__(cls, other) + if ( + getattr(cls, '_is_protocol', False) + and not _allow_reckless_class_checks() + ): + if not getattr(cls, '_is_runtime_protocol', False): + _type_check_issubclass_arg_1(other) + raise TypeError( + "Instance and class checks can only be used with " + "@runtime_checkable protocols" + ) + if ( + # this attribute is set by @runtime_checkable: + cls.__non_callable_proto_members__ + and cls.__dict__.get("__subclasshook__") is _proto_hook + ): + _type_check_issubclass_arg_1(other) + non_method_attrs = sorted(cls.__non_callable_proto_members__) + raise TypeError( + "Protocols with non-method members don't support issubclass()." + f" Non-method members: {str(non_method_attrs)[1:-1]}." + ) + return _abc_subclasscheck(cls, other) + + def __instancecheck__(cls, instance): + # We need this method for situations where attributes are + # assigned in __init__. + if cls is Protocol: + return type.__instancecheck__(cls, instance) + if not getattr(cls, "_is_protocol", False): + # i.e., it's a concrete subclass of a protocol + return _abc_instancecheck(cls, instance) + + if ( + not getattr(cls, '_is_runtime_protocol', False) and + not _allow_reckless_class_checks() + ): + raise TypeError("Instance and class checks can only be used with" + " @runtime_checkable protocols") + + if _abc_instancecheck(cls, instance): + return True + + getattr_static = _lazy_load_getattr_static() + for attr in cls.__protocol_attrs__: + try: + val = getattr_static(instance, attr) + except AttributeError: + break + # this attribute is set by @runtime_checkable: + if val is None and attr not in cls.__non_callable_proto_members__: + break + else: + return True + + return False + + +@classmethod +def _proto_hook(cls, other): + if not cls.__dict__.get('_is_protocol', False): + return NotImplemented + + for attr in cls.__protocol_attrs__: + for base in other.__mro__: + # Check if the members appears in the class dictionary... + if attr in base.__dict__: + if base.__dict__[attr] is None: + return NotImplemented + break + + # ...or in annotations, if it is a sub-protocol. + if issubclass(other, Generic) and getattr(other, "_is_protocol", False): + # We avoid the slower path through annotationlib here because in most + # cases it should be unnecessary. + try: + annos = base.__annotations__ + except Exception: + annos = _lazy_annotationlib.get_annotations( + base, format=_lazy_annotationlib.Format.FORWARDREF + ) + if attr in annos: + break + else: + return NotImplemented + return True + + +class Protocol(Generic, metaclass=_ProtocolMeta): + """Base class for protocol classes. + + Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing). + + For example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing.runtime_checkable act as simple-minded runtime protocols that check + only the presence of given attributes, ignoring their type signatures. + Protocol classes can be generic, they are defined as:: + + class GenProto[T](Protocol): + def meth(self) -> T: + ... + """ + + __slots__ = () + _is_protocol = True + _is_runtime_protocol = False + + def __init_subclass__(cls, *args, **kwargs): + super().__init_subclass__(*args, **kwargs) + + # Determine if this is a protocol or a concrete subclass. + if not cls.__dict__.get('_is_protocol', False): + cls._is_protocol = any(b is Protocol for b in cls.__bases__) + + # Set (or override) the protocol subclass hook. + if '__subclasshook__' not in cls.__dict__: + cls.__subclasshook__ = _proto_hook + + # Prohibit instantiation for protocol classes + if cls._is_protocol and cls.__init__ is Protocol.__init__: + cls.__init__ = _no_init_or_replace_init + + +class _AnnotatedAlias(_NotIterable, _GenericAlias, _root=True): + """Runtime representation of an annotated type. + + At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' + with extra metadata. The alias behaves like a normal typing alias. + Instantiating is the same as instantiating the underlying type; binding + it to types is also the same. + + The metadata itself is stored in a '__metadata__' attribute as a tuple. + """ + + def __init__(self, origin, metadata): + if isinstance(origin, _AnnotatedAlias): + metadata = origin.__metadata__ + metadata + origin = origin.__origin__ + super().__init__(origin, origin, name='Annotated') + self.__metadata__ = metadata + + def copy_with(self, params): + assert len(params) == 1 + new_type = params[0] + return _AnnotatedAlias(new_type, self.__metadata__) + + def __repr__(self): + return "typing.Annotated[{}, {}]".format( + _type_repr(self.__origin__), + ", ".join(repr(a) for a in self.__metadata__) + ) + + def __reduce__(self): + return operator.getitem, ( + Annotated, (self.__origin__,) + self.__metadata__ + ) + + def __eq__(self, other): + if not isinstance(other, _AnnotatedAlias): + return NotImplemented + return (self.__origin__ == other.__origin__ + and self.__metadata__ == other.__metadata__) + + def __hash__(self): + return hash((self.__origin__, self.__metadata__)) + + def __getattr__(self, attr): + if attr in {'__name__', '__qualname__'}: + return 'Annotated' + return super().__getattr__(attr) + + def __mro_entries__(self, bases): + return (self.__origin__,) + + +@_TypedCacheSpecialForm +@_tp_cache(typed=True) +def Annotated(self, *params): + """Add context-specific metadata to a type. + + Example: Annotated[int, runtime_check.Unsigned] indicates to the + hypothetical runtime_check module that this type is an unsigned int. + Every other consumer of this type can ignore this metadata and treat + this type as int. + + The first argument to Annotated must be a valid type. + + Details: + + - It's an error to call `Annotated` with less than two arguments. + - Access the metadata via the ``__metadata__`` attribute:: + + assert Annotated[int, '$'].__metadata__ == ('$',) + + - Nested Annotated types are flattened:: + + assert Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] + + - Instantiating an annotated type is equivalent to instantiating the + underlying type:: + + assert Annotated[C, Ann1](5) == C(5) + + - Annotated can be used as a generic type alias:: + + type Optimized[T] = Annotated[T, runtime.Optimize()] + # type checker will treat Optimized[int] + # as equivalent to Annotated[int, runtime.Optimize()] + + type OptimizedList[T] = Annotated[list[T], runtime.Optimize()] + # type checker will treat OptimizedList[int] + # as equivalent to Annotated[list[int], runtime.Optimize()] + + - Annotated cannot be used with an unpacked TypeVarTuple:: + + type Variadic[*Ts] = Annotated[*Ts, Ann1] # NOT valid + + This would be equivalent to:: + + Annotated[T1, T2, T3, ..., Ann1] + + where T1, T2 etc. are TypeVars, which would be invalid, because + only one type should be passed to Annotated. + """ + if len(params) < 2: + raise TypeError("Annotated[...] should be used " + "with at least two arguments (a type and an " + "annotation).") + if _is_unpacked_typevartuple(params[0]): + raise TypeError("Annotated[...] should not be used with an " + "unpacked TypeVarTuple") + msg = "Annotated[t, ...]: t must be a type." + origin = _type_check(params[0], msg, allow_special_forms=True) + metadata = tuple(params[1:]) + return _AnnotatedAlias(origin, metadata) + + +def runtime_checkable(cls): + """Mark a protocol class as a runtime protocol. + + Such protocol can be used with isinstance() and issubclass(). + Raise TypeError if applied to a non-protocol class. + This allows a simple-minded structural check very similar to + one trick ponies in collections.abc such as Iterable. + + For example:: + + @runtime_checkable + class Closable(Protocol): + def close(self): ... + + assert isinstance(open('/some/file'), Closable) + + Warning: this will check only the presence of the required methods, + not their type signatures! + """ + if not issubclass(cls, Generic) or not getattr(cls, '_is_protocol', False): + raise TypeError('@runtime_checkable can be only applied to protocol classes,' + ' got %r' % cls) + cls._is_runtime_protocol = True + # PEP 544 prohibits using issubclass() + # with protocols that have non-method members. + # See gh-113320 for why we compute this attribute here, + # rather than in `_ProtocolMeta.__init__` + cls.__non_callable_proto_members__ = set() + for attr in cls.__protocol_attrs__: + try: + is_callable = callable(getattr(cls, attr, None)) + except Exception as e: + raise TypeError( + f"Failed to determine whether protocol member {attr!r} " + "is a method member" + ) from e + else: + if not is_callable: + cls.__non_callable_proto_members__.add(attr) + return cls + + +def cast(typ, val): + """Cast a value to a type. + + This returns the value unchanged. To the type checker this + signals that the return value has the designated type, but at + runtime we intentionally don't check anything (we want this + to be as fast as possible). + """ + return val + + +def assert_type(val, typ, /): + """Ask a static type checker to confirm that the value is of the given type. + + At runtime this does nothing: it returns the first argument unchanged with no + checks or side effects, no matter the actual type of the argument. + + When a static type checker encounters a call to assert_type(), it + emits an error if the value is not of the specified type:: + + def greet(name: str) -> None: + assert_type(name, str) # OK + assert_type(name, int) # type checker error + """ + return val + + +def get_type_hints(obj, globalns=None, localns=None, include_extras=False, + *, format=None): + """Return type hints for an object. + + This is often the same as obj.__annotations__, but it handles + forward references encoded as string literals and recursively replaces all + 'Annotated[T, ...]' with 'T' (unless 'include_extras=True'). + + The argument may be a module, class, method, or function. The annotations + are returned as a dictionary. For classes, annotations include also + inherited members. + + TypeError is raised if the argument is not of a type that can contain + annotations, and an empty dictionary is returned if no annotations are + present. + + BEWARE -- the behavior of globalns and localns is counterintuitive + (unless you are familiar with how eval() and exec() work). The + search order is locals first, then globals. + + - If no dict arguments are passed, an attempt is made to use the + globals from obj (or the respective module's globals for classes), + and these are also used as the locals. If the object does not appear + to have globals, an empty dictionary is used. For classes, the search + order is globals first then locals. + + - If one dict argument is passed, it is used for both globals and + locals. + + - If two dict arguments are passed, they specify globals and + locals, respectively. + """ + if getattr(obj, '__no_type_check__', None): + return {} + Format = _lazy_annotationlib.Format + if format is None: + format = Format.VALUE + # Classes require a special treatment. + if isinstance(obj, type): + hints = {} + for base in reversed(obj.__mro__): + ann = _lazy_annotationlib.get_annotations(base, format=format) + if format == Format.STRING: + hints.update(ann) + continue + if globalns is None: + base_globals = getattr(sys.modules.get(base.__module__, None), '__dict__', {}) + else: + base_globals = globalns + base_locals = dict(vars(base)) if localns is None else localns + if localns is None and globalns is None: + # This is surprising, but required. Before Python 3.10, + # get_type_hints only evaluated the globalns of + # a class. To maintain backwards compatibility, we reverse + # the globalns and localns order so that eval() looks into + # *base_globals* first rather than *base_locals*. + # This only affects ForwardRefs. + base_globals, base_locals = base_locals, base_globals + type_params = base.__type_params__ + base_globals, base_locals = _add_type_params_to_scope( + type_params, base_globals, base_locals, True) + for name, value in ann.items(): + if isinstance(value, str): + value = _make_forward_ref(value, is_argument=False, is_class=True) + value = _eval_type(value, base_globals, base_locals, (), + format=format, owner=obj, prefer_fwd_module=True) + if value is None: + value = type(None) + hints[name] = value + if include_extras or format == Format.STRING: + return hints + else: + return {k: _strip_annotations(t) for k, t in hints.items()} + + hints = _lazy_annotationlib.get_annotations(obj, format=format) + if ( + not hints + and not isinstance(obj, types.ModuleType) + and not callable(obj) + and not hasattr(obj, '__annotations__') + and not hasattr(obj, '__annotate__') + ): + raise TypeError(f"{obj!r} is not a module, class, or callable.") + if format == Format.STRING: + return hints + + if globalns is None: + if isinstance(obj, types.ModuleType): + globalns = obj.__dict__ + else: + nsobj = obj + # Find globalns for the unwrapped object. + while hasattr(nsobj, '__wrapped__'): + nsobj = nsobj.__wrapped__ + globalns = getattr(nsobj, '__globals__', {}) + if localns is None: + localns = globalns + elif localns is None: + localns = globalns + type_params = getattr(obj, "__type_params__", ()) + globalns, localns = _add_type_params_to_scope(type_params, globalns, localns, False) + for name, value in hints.items(): + if isinstance(value, str): + # class-level forward refs were handled above, this must be either + # a module-level annotation or a function argument annotation + value = _make_forward_ref( + value, + is_argument=not isinstance(obj, types.ModuleType), + is_class=False, + ) + value = _eval_type(value, globalns, localns, (), format=format, owner=obj, prefer_fwd_module=True) + if value is None: + value = type(None) + hints[name] = value + return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()} + + +# Add type parameters to the globals and locals scope. This is needed for +# compatibility. +def _add_type_params_to_scope(type_params, globalns, localns, is_class): + if not type_params: + return globalns, localns + globalns = dict(globalns) + localns = dict(localns) + for param in type_params: + if not is_class or param.__name__ not in globalns: + globalns[param.__name__] = param + localns.pop(param.__name__, None) + return globalns, localns + + +def _strip_annotations(t): + """Strip the annotations from a given type.""" + if isinstance(t, _AnnotatedAlias): + return _strip_annotations(t.__origin__) + if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired, ReadOnly): + return _strip_annotations(t.__args__[0]) + if isinstance(t, _GenericAlias): + stripped_args = tuple(_strip_annotations(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return t.copy_with(stripped_args) + if isinstance(t, GenericAlias): + stripped_args = tuple(_strip_annotations(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return GenericAlias(t.__origin__, stripped_args) + if isinstance(t, Union): + stripped_args = tuple(_strip_annotations(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return functools.reduce(operator.or_, stripped_args) + + return t + + +def get_origin(tp): + """Get the unsubscripted version of a type. + + This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar, + Annotated, and others. Return None for unsupported types. + + Examples:: + + >>> P = ParamSpec('P') + >>> assert get_origin(Literal[42]) is Literal + >>> assert get_origin(int) is None + >>> assert get_origin(ClassVar[int]) is ClassVar + >>> assert get_origin(Generic) is Generic + >>> assert get_origin(Generic[T]) is Generic + >>> assert get_origin(Union[T, int]) is Union + >>> assert get_origin(List[Tuple[T, T]][int]) is list + >>> assert get_origin(P.args) is P + """ + if isinstance(tp, _AnnotatedAlias): + return Annotated + if isinstance(tp, (_BaseGenericAlias, GenericAlias, + ParamSpecArgs, ParamSpecKwargs)): + return tp.__origin__ + if tp is Generic: + return Generic + if isinstance(tp, Union): + return Union + return None + + +def get_args(tp): + """Get type arguments with all substitutions performed. + + For unions, basic simplifications used by Union constructor are performed. + + Examples:: + + >>> T = TypeVar('T') + >>> assert get_args(Dict[str, int]) == (str, int) + >>> assert get_args(int) == () + >>> assert get_args(Union[int, Union[T, int], str][int]) == (int, str) + >>> assert get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) + >>> assert get_args(Callable[[], T][int]) == ([], int) + """ + if isinstance(tp, _AnnotatedAlias): + return (tp.__origin__,) + tp.__metadata__ + if isinstance(tp, (_GenericAlias, GenericAlias)): + res = tp.__args__ + if _should_unflatten_callable_args(tp, res): + res = (list(res[:-1]), res[-1]) + return res + if isinstance(tp, Union): + return tp.__args__ + return () + + +def is_typeddict(tp): + """Check if an annotation is a TypedDict class. + + For example:: + + >>> from typing import TypedDict + >>> class Film(TypedDict): + ... title: str + ... year: int + ... + >>> is_typeddict(Film) + True + >>> is_typeddict(dict) + False + """ + return isinstance(tp, _TypedDictMeta) + + +_ASSERT_NEVER_REPR_MAX_LENGTH = 100 + + +def assert_never(arg: Never, /) -> Never: + """Statically assert that a line of code is unreachable. + + Example:: + + def int_or_str(arg: int | str) -> None: + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + assert_never(arg) + + If a type checker finds that a call to assert_never() is + reachable, it will emit an error. + + At runtime, this throws an exception when called. + """ + value = repr(arg) + if len(value) > _ASSERT_NEVER_REPR_MAX_LENGTH: + value = value[:_ASSERT_NEVER_REPR_MAX_LENGTH] + '...' + raise AssertionError(f"Expected code to be unreachable, but got: {value}") + + +def no_type_check(arg): + """Decorator to indicate that annotations are not type hints. + + The argument must be a class or function; if it is a class, it + applies recursively to all methods and classes defined in that class + (but not to methods defined in its superclasses or subclasses). + + This mutates the function(s) or class(es) in place. + """ + if isinstance(arg, type): + for key in dir(arg): + obj = getattr(arg, key) + if ( + not hasattr(obj, '__qualname__') + or obj.__qualname__ != f'{arg.__qualname__}.{obj.__name__}' + or getattr(obj, '__module__', None) != arg.__module__ + ): + # We only modify objects that are defined in this type directly. + # If classes / methods are nested in multiple layers, + # we will modify them when processing their direct holders. + continue + # Instance, class, and static methods: + if isinstance(obj, types.FunctionType): + obj.__no_type_check__ = True + if isinstance(obj, types.MethodType): + obj.__func__.__no_type_check__ = True + # Nested types: + if isinstance(obj, type): + no_type_check(obj) + try: + arg.__no_type_check__ = True + except TypeError: # built-in classes + pass + return arg + + +def no_type_check_decorator(decorator): + """Decorator to give another decorator the @no_type_check effect. + + This wraps the decorator with something that wraps the decorated + function in @no_type_check. + """ + import warnings + warnings._deprecated("typing.no_type_check_decorator", remove=(3, 15)) + @functools.wraps(decorator) + def wrapped_decorator(*args, **kwds): + func = decorator(*args, **kwds) + func = no_type_check(func) + return func + + return wrapped_decorator + + +def _overload_dummy(*args, **kwds): + """Helper for @overload to raise when called.""" + raise NotImplementedError( + "You should not call an overloaded function. " + "A series of @overload-decorated functions " + "outside a stub module should always be followed " + "by an implementation that is not @overload-ed.") + + +# {module: {qualname: {firstlineno: func}}} +_overload_registry = defaultdict(functools.partial(defaultdict, dict)) + + +def overload(func): + """Decorator for overloaded functions/methods. + + In a stub file, place two or more stub definitions for the same + function in a row, each decorated with @overload. + + For example:: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + + In a non-stub file (i.e. a regular .py file), do the same but + follow it with an implementation. The implementation should *not* + be decorated with @overload:: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + def utf8(value): + ... # implementation goes here + + The overloads for a function can be retrieved at runtime using the + get_overloads() function. + """ + # classmethod and staticmethod + f = getattr(func, "__func__", func) + try: + _overload_registry[f.__module__][f.__qualname__][f.__code__.co_firstlineno] = func + except AttributeError: + # Not a normal function; ignore. + pass + return _overload_dummy + + +def get_overloads(func): + """Return all defined overloads for *func* as a sequence.""" + # classmethod and staticmethod + f = getattr(func, "__func__", func) + if f.__module__ not in _overload_registry: + return [] + mod_dict = _overload_registry[f.__module__] + if f.__qualname__ not in mod_dict: + return [] + return list(mod_dict[f.__qualname__].values()) + + +def clear_overloads(): + """Clear all overloads in the registry.""" + _overload_registry.clear() + + +def final(f): + """Decorator to indicate final methods and final classes. + + Use this decorator to indicate to type checkers that the decorated + method cannot be overridden, and decorated class cannot be subclassed. + + For example:: + + class Base: + @final + def done(self) -> None: + ... + class Sub(Base): + def done(self) -> None: # Error reported by type checker + ... + + @final + class Leaf: + ... + class Other(Leaf): # Error reported by type checker + ... + + There is no runtime checking of these properties. The decorator + attempts to set the ``__final__`` attribute to ``True`` on the decorated + object to allow runtime introspection. + """ + try: + f.__final__ = True + except (AttributeError, TypeError): + # Skip the attribute silently if it is not writable. + # AttributeError happens if the object has __slots__ or a + # read-only property, TypeError if it's a builtin class. + pass + return f + + +# Some unconstrained type variables. These were initially used by the container types. +# They were never meant for export and are now unused, but we keep them around to +# avoid breaking compatibility with users who import them. +T = TypeVar('T') # Any type. +KT = TypeVar('KT') # Key type. +VT = TypeVar('VT') # Value type. +T_co = TypeVar('T_co', covariant=True) # Any type covariant containers. +V_co = TypeVar('V_co', covariant=True) # Any type covariant containers. +VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers. +T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant. +# Internal type variable used for Type[]. +CT_co = TypeVar('CT_co', covariant=True, bound=type) + + +# A useful type variable with constraints. This represents string types. +# (This one *is* for export!) +AnyStr = TypeVar('AnyStr', bytes, str) + + +# Various ABCs mimicking those in collections.abc. +_alias = _SpecialGenericAlias + +Hashable = _alias(collections.abc.Hashable, 0) # Not generic. +Awaitable = _alias(collections.abc.Awaitable, 1) +Coroutine = _alias(collections.abc.Coroutine, 3) +AsyncIterable = _alias(collections.abc.AsyncIterable, 1) +AsyncIterator = _alias(collections.abc.AsyncIterator, 1) +Iterable = _alias(collections.abc.Iterable, 1) +Iterator = _alias(collections.abc.Iterator, 1) +Reversible = _alias(collections.abc.Reversible, 1) +Sized = _alias(collections.abc.Sized, 0) # Not generic. +Container = _alias(collections.abc.Container, 1) +Collection = _alias(collections.abc.Collection, 1) +Callable = _CallableType(collections.abc.Callable, 2) +Callable.__doc__ = \ + """Deprecated alias to collections.abc.Callable. + + Callable[[int], str] signifies a function that takes a single + parameter of type int and returns a str. + + The subscription syntax must always be used with exactly two + values: the argument list and the return type. + The argument list must be a list of types, a ParamSpec, + Concatenate or ellipsis. The return type must be a single type. + + There is no syntax to indicate optional or keyword arguments; + such function types are rarely used as callback types. + """ +AbstractSet = _alias(collections.abc.Set, 1, name='AbstractSet') +MutableSet = _alias(collections.abc.MutableSet, 1) +# NOTE: Mapping is only covariant in the value type. +Mapping = _alias(collections.abc.Mapping, 2) +MutableMapping = _alias(collections.abc.MutableMapping, 2) +Sequence = _alias(collections.abc.Sequence, 1) +MutableSequence = _alias(collections.abc.MutableSequence, 1) +ByteString = _DeprecatedGenericAlias( + collections.abc.ByteString, 0, removal_version=(3, 17) # Not generic. +) +# Tuple accepts variable number of parameters. +Tuple = _TupleType(tuple, -1, inst=False, name='Tuple') +Tuple.__doc__ = \ + """Deprecated alias to builtins.tuple. + + Tuple[X, Y] is the cross-product type of X and Y. + + Example: Tuple[T1, T2] is a tuple of two elements corresponding + to type variables T1 and T2. Tuple[int, float, str] is a tuple + of an int, a float and a string. + + To specify a variable-length tuple of homogeneous type, use Tuple[T, ...]. + """ +List = _alias(list, 1, inst=False, name='List') +Deque = _alias(collections.deque, 1, name='Deque') +Set = _alias(set, 1, inst=False, name='Set') +FrozenSet = _alias(frozenset, 1, inst=False, name='FrozenSet') +MappingView = _alias(collections.abc.MappingView, 1) +KeysView = _alias(collections.abc.KeysView, 1) +ItemsView = _alias(collections.abc.ItemsView, 2) +ValuesView = _alias(collections.abc.ValuesView, 1) +Dict = _alias(dict, 2, inst=False, name='Dict') +DefaultDict = _alias(collections.defaultdict, 2, name='DefaultDict') +OrderedDict = _alias(collections.OrderedDict, 2) +Counter = _alias(collections.Counter, 1) +ChainMap = _alias(collections.ChainMap, 2) +Generator = _alias(collections.abc.Generator, 3, defaults=(types.NoneType, types.NoneType)) +AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2, defaults=(types.NoneType,)) +Type = _alias(type, 1, inst=False, name='Type') +Type.__doc__ = \ + """Deprecated alias to builtins.type. + + builtins.type or typing.Type can be used to annotate class objects. + For example, suppose we have the following classes:: + + class User: ... # Abstract base for User classes + class BasicUser(User): ... + class ProUser(User): ... + class TeamUser(User): ... + + And a function that takes a class argument that's a subclass of + User and returns an instance of the corresponding class:: + + def new_user[U](user_class: Type[U]) -> U: + user = user_class() + # (Here we could write the user object to a database) + return user + + joe = new_user(BasicUser) + + At this point the type checker knows that joe has type BasicUser. + """ + + +@runtime_checkable +class SupportsInt(Protocol): + """An ABC with one abstract method __int__.""" + + __slots__ = () + + @abstractmethod + def __int__(self) -> int: + pass + + +@runtime_checkable +class SupportsFloat(Protocol): + """An ABC with one abstract method __float__.""" + + __slots__ = () + + @abstractmethod + def __float__(self) -> float: + pass + + +@runtime_checkable +class SupportsComplex(Protocol): + """An ABC with one abstract method __complex__.""" + + __slots__ = () + + @abstractmethod + def __complex__(self) -> complex: + pass + + +@runtime_checkable +class SupportsBytes(Protocol): + """An ABC with one abstract method __bytes__.""" + + __slots__ = () + + @abstractmethod + def __bytes__(self) -> bytes: + pass + + +@runtime_checkable +class SupportsIndex(Protocol): + """An ABC with one abstract method __index__.""" + + __slots__ = () + + @abstractmethod + def __index__(self) -> int: + pass + + +@runtime_checkable +class SupportsAbs[T](Protocol): + """An ABC with one abstract method __abs__ that is covariant in its return type.""" + + __slots__ = () + + @abstractmethod + def __abs__(self) -> T: + pass + + +@runtime_checkable +class SupportsRound[T](Protocol): + """An ABC with one abstract method __round__ that is covariant in its return type.""" + + __slots__ = () + + @abstractmethod + def __round__(self, ndigits: int = 0) -> T: + pass + + +def _make_nmtuple(name, fields, annotate_func, module, defaults = ()): + nm_tpl = collections.namedtuple(name, fields, + defaults=defaults, module=module) + nm_tpl.__annotate__ = nm_tpl.__new__.__annotate__ = annotate_func + return nm_tpl + + +def _make_eager_annotate(types): + checked_types = {key: _type_check(val, f"field {key} annotation must be a type") + for key, val in types.items()} + def annotate(format): + match format: + case _lazy_annotationlib.Format.VALUE | _lazy_annotationlib.Format.FORWARDREF: + return checked_types + case _lazy_annotationlib.Format.STRING: + return _lazy_annotationlib.annotations_to_string(types) + case _: + raise NotImplementedError(format) + return annotate + + +# attributes prohibited to set in NamedTuple class syntax +_prohibited = frozenset({'__new__', '__init__', '__slots__', '__getnewargs__', + '_fields', '_field_defaults', + '_make', '_replace', '_asdict', '_source'}) + +_special = frozenset({'__module__', '__name__', '__annotations__', '__annotate__', + '__annotate_func__', '__annotations_cache__'}) + + +class NamedTupleMeta(type): + def __new__(cls, typename, bases, ns): + assert _NamedTuple in bases + if "__classcell__" in ns: + raise TypeError( + "uses of super() and __class__ are unsupported in methods of NamedTuple subclasses") + for base in bases: + if base is not _NamedTuple and base is not Generic: + raise TypeError( + 'can only inherit from a NamedTuple type and Generic') + bases = tuple(tuple if base is _NamedTuple else base for base in bases) + if "__annotations__" in ns: + types = ns["__annotations__"] + field_names = list(types) + annotate = _make_eager_annotate(types) + elif (original_annotate := _lazy_annotationlib.get_annotate_from_class_namespace(ns)) is not None: + types = _lazy_annotationlib.call_annotate_function( + original_annotate, _lazy_annotationlib.Format.FORWARDREF) + field_names = list(types) + + # For backward compatibility, type-check all the types at creation time + for typ in types.values(): + _type_check(typ, "field annotation must be a type") + + def annotate(format): + annos = _lazy_annotationlib.call_annotate_function( + original_annotate, format) + if format != _lazy_annotationlib.Format.STRING: + return {key: _type_check(val, f"field {key} annotation must be a type") + for key, val in annos.items()} + return annos + else: + # Empty NamedTuple + field_names = [] + annotate = lambda format: {} + default_names = [] + for field_name in field_names: + if field_name in ns: + default_names.append(field_name) + elif default_names: + raise TypeError(f"Non-default namedtuple field {field_name} " + f"cannot follow default field" + f"{'s' if len(default_names) > 1 else ''} " + f"{', '.join(default_names)}") + nm_tpl = _make_nmtuple(typename, field_names, annotate, + defaults=[ns[n] for n in default_names], + module=ns['__module__']) + nm_tpl.__bases__ = bases + if Generic in bases: + class_getitem = _generic_class_getitem + nm_tpl.__class_getitem__ = classmethod(class_getitem) + # update from user namespace without overriding special namedtuple attributes + for key, val in ns.items(): + if key in _prohibited: + raise AttributeError("Cannot overwrite NamedTuple attribute " + key) + elif key not in _special: + if key not in nm_tpl._fields: + setattr(nm_tpl, key, val) + try: + set_name = type(val).__set_name__ + except AttributeError: + pass + else: + try: + set_name(val, nm_tpl, key) + except BaseException as e: + e.add_note( + f"Error calling __set_name__ on {type(val).__name__!r} " + f"instance {key!r} in {typename!r}" + ) + raise + + if Generic in bases: + nm_tpl.__init_subclass__() + return nm_tpl + + +def NamedTuple(typename, fields=_sentinel, /, **kwargs): + """Typed version of namedtuple. + + Usage:: + + class Employee(NamedTuple): + name: str + id: int + + This is equivalent to:: + + Employee = collections.namedtuple('Employee', ['name', 'id']) + + The resulting class has an extra __annotations__ attribute, giving a + dict that maps field names to types. (The field names are also in + the _fields attribute, which is part of the namedtuple API.) + An alternative equivalent functional syntax is also accepted:: + + Employee = NamedTuple('Employee', [('name', str), ('id', int)]) + """ + if fields is _sentinel: + if kwargs: + deprecated_thing = "Creating NamedTuple classes using keyword arguments" + deprecation_msg = ( + "{name} is deprecated and will be disallowed in Python {remove}. " + "Use the class-based or functional syntax instead." + ) + else: + deprecated_thing = "Failing to pass a value for the 'fields' parameter" + example = f"`{typename} = NamedTuple({typename!r}, [])`" + deprecation_msg = ( + "{name} is deprecated and will be disallowed in Python {remove}. " + "To create a NamedTuple class with 0 fields " + "using the functional syntax, " + "pass an empty list, e.g. " + ) + example + "." + elif fields is None: + if kwargs: + raise TypeError( + "Cannot pass `None` as the 'fields' parameter " + "and also specify fields using keyword arguments" + ) + else: + deprecated_thing = "Passing `None` as the 'fields' parameter" + example = f"`{typename} = NamedTuple({typename!r}, [])`" + deprecation_msg = ( + "{name} is deprecated and will be disallowed in Python {remove}. " + "To create a NamedTuple class with 0 fields " + "using the functional syntax, " + "pass an empty list, e.g. " + ) + example + "." + elif kwargs: + raise TypeError("Either list of fields or keywords" + " can be provided to NamedTuple, not both") + if fields is _sentinel or fields is None: + import warnings + warnings._deprecated(deprecated_thing, message=deprecation_msg, remove=(3, 15)) + fields = kwargs.items() + types = {n: _type_check(t, f"field {n} annotation must be a type") + for n, t in fields} + field_names = [n for n, _ in fields] + + nt = _make_nmtuple(typename, field_names, _make_eager_annotate(types), module=_caller()) + nt.__orig_bases__ = (NamedTuple,) + return nt + +_NamedTuple = type.__new__(NamedTupleMeta, 'NamedTuple', (), {}) + +def _namedtuple_mro_entries(bases): + assert NamedTuple in bases + return (_NamedTuple,) + +NamedTuple.__mro_entries__ = _namedtuple_mro_entries + + +def _get_typeddict_qualifiers(annotation_type): + while True: + annotation_origin = get_origin(annotation_type) + if annotation_origin is Annotated: + annotation_args = get_args(annotation_type) + if annotation_args: + annotation_type = annotation_args[0] + else: + break + elif annotation_origin is Required: + yield Required + (annotation_type,) = get_args(annotation_type) + elif annotation_origin is NotRequired: + yield NotRequired + (annotation_type,) = get_args(annotation_type) + elif annotation_origin is ReadOnly: + yield ReadOnly + (annotation_type,) = get_args(annotation_type) + else: + break + + +class _TypedDictMeta(type): + def __new__(cls, name, bases, ns, total=True): + """Create a new typed dict class object. + + This method is called when TypedDict is subclassed, + or when TypedDict is instantiated. This way + TypedDict supports all three syntax forms described in its docstring. + Subclasses and instances of TypedDict return actual dictionaries. + """ + for base in bases: + if type(base) is not _TypedDictMeta and base is not Generic: + raise TypeError('cannot inherit from both a TypedDict type ' + 'and a non-TypedDict base class') + + if any(issubclass(b, Generic) for b in bases): + generic_base = (Generic,) + else: + generic_base = () + + ns_annotations = ns.pop('__annotations__', None) + + tp_dict = type.__new__(_TypedDictMeta, name, (*generic_base, dict), ns) + + if not hasattr(tp_dict, '__orig_bases__'): + tp_dict.__orig_bases__ = bases + + if ns_annotations is not None: + own_annotate = None + own_annotations = ns_annotations + elif (own_annotate := _lazy_annotationlib.get_annotate_from_class_namespace(ns)) is not None: + own_annotations = _lazy_annotationlib.call_annotate_function( + own_annotate, _lazy_annotationlib.Format.FORWARDREF, owner=tp_dict + ) + else: + own_annotate = None + own_annotations = {} + msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" + own_checked_annotations = { + n: _type_check(tp, msg, owner=tp_dict, module=tp_dict.__module__) + for n, tp in own_annotations.items() + } + required_keys = set() + optional_keys = set() + readonly_keys = set() + mutable_keys = set() + + for base in bases: + base_required = base.__dict__.get('__required_keys__', set()) + required_keys |= base_required + optional_keys -= base_required + + base_optional = base.__dict__.get('__optional_keys__', set()) + required_keys -= base_optional + optional_keys |= base_optional + + readonly_keys.update(base.__dict__.get('__readonly_keys__', ())) + mutable_keys.update(base.__dict__.get('__mutable_keys__', ())) + + for annotation_key, annotation_type in own_checked_annotations.items(): + qualifiers = set(_get_typeddict_qualifiers(annotation_type)) + if Required in qualifiers: + is_required = True + elif NotRequired in qualifiers: + is_required = False + else: + is_required = total + + if is_required: + required_keys.add(annotation_key) + optional_keys.discard(annotation_key) + else: + optional_keys.add(annotation_key) + required_keys.discard(annotation_key) + + if ReadOnly in qualifiers: + if annotation_key in mutable_keys: + raise TypeError( + f"Cannot override mutable key {annotation_key!r}" + " with read-only key" + ) + readonly_keys.add(annotation_key) + else: + mutable_keys.add(annotation_key) + readonly_keys.discard(annotation_key) + + assert required_keys.isdisjoint(optional_keys), ( + f"Required keys overlap with optional keys in {name}:" + f" {required_keys=}, {optional_keys=}" + ) + + def __annotate__(format): + annos = {} + for base in bases: + if base is Generic: + continue + base_annotate = base.__annotate__ + if base_annotate is None: + continue + base_annos = _lazy_annotationlib.call_annotate_function( + base_annotate, format, owner=base) + annos.update(base_annos) + if own_annotate is not None: + own = _lazy_annotationlib.call_annotate_function( + own_annotate, format, owner=tp_dict) + if format != _lazy_annotationlib.Format.STRING: + own = { + n: _type_check(tp, msg, module=tp_dict.__module__) + for n, tp in own.items() + } + elif format == _lazy_annotationlib.Format.STRING: + own = _lazy_annotationlib.annotations_to_string(own_annotations) + elif format in (_lazy_annotationlib.Format.FORWARDREF, _lazy_annotationlib.Format.VALUE): + own = own_checked_annotations + else: + raise NotImplementedError(format) + annos.update(own) + return annos + + tp_dict.__annotate__ = __annotate__ + tp_dict.__required_keys__ = frozenset(required_keys) + tp_dict.__optional_keys__ = frozenset(optional_keys) + tp_dict.__readonly_keys__ = frozenset(readonly_keys) + tp_dict.__mutable_keys__ = frozenset(mutable_keys) + tp_dict.__total__ = total + return tp_dict + + __call__ = dict # static method + + def __subclasscheck__(cls, other): + # Typed dicts are only for static structural subtyping. + raise TypeError('TypedDict does not support instance and class checks') + + __instancecheck__ = __subclasscheck__ + + +def TypedDict(typename, fields=_sentinel, /, *, total=True): + """A simple typed namespace. At runtime it is equivalent to a plain dict. + + TypedDict creates a dictionary type such that a type checker will expect all + instances to have a certain set of keys, where each key is + associated with a value of a consistent type. This expectation + is not checked at runtime. + + Usage:: + + >>> class Point2D(TypedDict): + ... x: int + ... y: int + ... label: str + ... + >>> a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK + >>> b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check + >>> Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') + True + + The type info can be accessed via the Point2D.__annotations__ dict, and + the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets. + TypedDict supports an additional equivalent form:: + + Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) + + By default, all keys must be present in a TypedDict. It is possible + to override this by specifying totality:: + + class Point2D(TypedDict, total=False): + x: int + y: int + + This means that a Point2D TypedDict can have any of the keys omitted. A type + checker is only expected to support a literal False or True as the value of + the total argument. True is the default, and makes all items defined in the + class body be required. + + The Required and NotRequired special forms can also be used to mark + individual keys as being required or not required:: + + class Point2D(TypedDict): + x: int # the "x" key must always be present (Required is the default) + y: NotRequired[int] # the "y" key can be omitted + + See PEP 655 for more details on Required and NotRequired. + + The ReadOnly special form can be used + to mark individual keys as immutable for type checkers:: + + class DatabaseUser(TypedDict): + id: ReadOnly[int] # the "id" key must not be modified + username: str # the "username" key can be changed + + """ + if fields is _sentinel or fields is None: + import warnings + + if fields is _sentinel: + deprecated_thing = "Failing to pass a value for the 'fields' parameter" + else: + deprecated_thing = "Passing `None` as the 'fields' parameter" + + example = f"`{typename} = TypedDict({typename!r}, {{{{}}}})`" + deprecation_msg = ( + "{name} is deprecated and will be disallowed in Python {remove}. " + "To create a TypedDict class with 0 fields " + "using the functional syntax, " + "pass an empty dictionary, e.g. " + ) + example + "." + warnings._deprecated(deprecated_thing, message=deprecation_msg, remove=(3, 15)) + fields = {} + + ns = {'__annotations__': dict(fields)} + module = _caller() + if module is not None: + # Setting correct module is necessary to make typed dict classes pickleable. + ns['__module__'] = module + + td = _TypedDictMeta(typename, (), ns, total=total) + td.__orig_bases__ = (TypedDict,) + return td + +_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {}) +TypedDict.__mro_entries__ = lambda bases: (_TypedDict,) + + +@_SpecialForm +def Required(self, parameters): + """Special typing construct to mark a TypedDict key as required. + + This is mainly useful for total=False TypedDicts. + + For example:: + + class Movie(TypedDict, total=False): + title: Required[str] + year: int + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + + There is no runtime checking that a required key is actually provided + when instantiating a related TypedDict. + """ + item = _type_check(parameters, f'{self._name} accepts only a single type.') + return _GenericAlias(self, (item,)) + + +@_SpecialForm +def NotRequired(self, parameters): + """Special typing construct to mark a TypedDict key as potentially missing. + + For example:: + + class Movie(TypedDict): + title: str + year: NotRequired[int] + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + """ + item = _type_check(parameters, f'{self._name} accepts only a single type.') + return _GenericAlias(self, (item,)) + + +@_SpecialForm +def ReadOnly(self, parameters): + """A special typing construct to mark an item of a TypedDict as read-only. + + For example:: + + class Movie(TypedDict): + title: ReadOnly[str] + year: int + + def mutate_movie(m: Movie) -> None: + m["year"] = 1992 # allowed + m["title"] = "The Matrix" # typechecker error + + There is no runtime checking for this property. + """ + item = _type_check(parameters, f'{self._name} accepts only a single type.') + return _GenericAlias(self, (item,)) + + +class NewType: + """NewType creates simple unique types with almost zero runtime overhead. + + NewType(name, tp) is considered a subtype of tp + by static type checkers. At runtime, NewType(name, tp) returns + a dummy callable that simply returns its argument. + + Usage:: + + UserId = NewType('UserId', int) + + def name_by_id(user_id: UserId) -> str: + ... + + UserId('user') # Fails type check + + name_by_id(42) # Fails type check + name_by_id(UserId(42)) # OK + + num = UserId(5) + 1 # type: int + """ + + __call__ = _idfunc + + def __init__(self, name, tp): + self.__qualname__ = name + if '.' in name: + name = name.rpartition('.')[-1] + self.__name__ = name + self.__supertype__ = tp + def_mod = _caller() + if def_mod != 'typing': + self.__module__ = def_mod + + def __mro_entries__(self, bases): + # We defined __mro_entries__ to get a better error message + # if a user attempts to subclass a NewType instance. bpo-46170 + superclass_name = self.__name__ + + class Dummy: + def __init_subclass__(cls): + subclass_name = cls.__name__ + raise TypeError( + f"Cannot subclass an instance of NewType. Perhaps you were looking for: " + f"`{subclass_name} = NewType({subclass_name!r}, {superclass_name})`" + ) + + return (Dummy,) + + def __repr__(self): + return f'{self.__module__}.{self.__qualname__}' + + def __reduce__(self): + return self.__qualname__ + + def __or__(self, other): + return Union[self, other] + + def __ror__(self, other): + return Union[other, self] + + +# Python-version-specific alias (Python 2: unicode; Python 3: str) +Text = str + + +# Constant that's True when type checking, but False here. +TYPE_CHECKING = False + + +class IO(Generic[AnyStr]): + """Generic base class for TextIO and BinaryIO. + + This is an abstract, generic version of the return of open(). + + NOTE: This does not distinguish between the different possible + classes (text vs. binary, read vs. write vs. read/write, + append-only, unbuffered). The TextIO and BinaryIO subclasses + below capture the distinctions between text vs. binary, which is + pervasive in the interface; however we currently do not offer a + way to track the other distinctions in the type system. + """ + + __slots__ = () + + @property + @abstractmethod + def mode(self) -> str: + pass + + @property + @abstractmethod + def name(self) -> str: + pass + + @abstractmethod + def close(self) -> None: + pass + + @property + @abstractmethod + def closed(self) -> bool: + pass + + @abstractmethod + def fileno(self) -> int: + pass + + @abstractmethod + def flush(self) -> None: + pass + + @abstractmethod + def isatty(self) -> bool: + pass + + @abstractmethod + def read(self, n: int = -1) -> AnyStr: + pass + + @abstractmethod + def readable(self) -> bool: + pass + + @abstractmethod + def readline(self, limit: int = -1) -> AnyStr: + pass + + @abstractmethod + def readlines(self, hint: int = -1) -> list[AnyStr]: + pass + + @abstractmethod + def seek(self, offset: int, whence: int = 0) -> int: + pass + + @abstractmethod + def seekable(self) -> bool: + pass + + @abstractmethod + def tell(self) -> int: + pass + + @abstractmethod + def truncate(self, size: int | None = None) -> int: + pass + + @abstractmethod + def writable(self) -> bool: + pass + + @abstractmethod + def write(self, s: AnyStr) -> int: + pass + + @abstractmethod + def writelines(self, lines: list[AnyStr]) -> None: + pass + + @abstractmethod + def __enter__(self) -> IO[AnyStr]: + pass + + @abstractmethod + def __exit__(self, type, value, traceback) -> None: + pass + + +class BinaryIO(IO[bytes]): + """Typed version of the return of open() in binary mode.""" + + __slots__ = () + + @abstractmethod + def write(self, s: bytes | bytearray) -> int: + pass + + @abstractmethod + def __enter__(self) -> BinaryIO: + pass + + +class TextIO(IO[str]): + """Typed version of the return of open() in text mode.""" + + __slots__ = () + + @property + @abstractmethod + def buffer(self) -> BinaryIO: + pass + + @property + @abstractmethod + def encoding(self) -> str: + pass + + @property + @abstractmethod + def errors(self) -> str | None: + pass + + @property + @abstractmethod + def line_buffering(self) -> bool: + pass + + @property + @abstractmethod + def newlines(self) -> Any: + pass + + @abstractmethod + def __enter__(self) -> TextIO: + pass + + +def reveal_type[T](obj: T, /) -> T: + """Ask a static type checker to reveal the inferred type of an expression. + + When a static type checker encounters a call to ``reveal_type()``, + it will emit the inferred type of the argument:: + + x: int = 1 + reveal_type(x) + + Running a static type checker (e.g., mypy) on this example + will produce output similar to 'Revealed type is "builtins.int"'. + + At runtime, the function prints the runtime type of the + argument and returns the argument unchanged. + """ + print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr) + return obj + + +class _IdentityCallable(Protocol): + def __call__[T](self, arg: T, /) -> T: + ... + + +def dataclass_transform( + *, + eq_default: bool = True, + order_default: bool = False, + kw_only_default: bool = False, + frozen_default: bool = False, + field_specifiers: tuple[type[Any] | Callable[..., Any], ...] = (), + **kwargs: Any, +) -> _IdentityCallable: + """Decorator to mark an object as providing dataclass-like behaviour. + + The decorator can be applied to a function, class, or metaclass. + + Example usage with a decorator function:: + + @dataclass_transform() + def create_model[T](cls: type[T]) -> type[T]: + ... + return cls + + @create_model + class CustomerModel: + id: int + name: str + + On a base class:: + + @dataclass_transform() + class ModelBase: ... + + class CustomerModel(ModelBase): + id: int + name: str + + On a metaclass:: + + @dataclass_transform() + class ModelMeta(type): ... + + class ModelBase(metaclass=ModelMeta): ... + + class CustomerModel(ModelBase): + id: int + name: str + + The ``CustomerModel`` classes defined above will + be treated by type checkers similarly to classes created with + ``@dataclasses.dataclass``. + For example, type checkers will assume these classes have + ``__init__`` methods that accept ``id`` and ``name``. + + The arguments to this decorator can be used to customize this behavior: + - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be + ``True`` or ``False`` if it is omitted by the caller. + - ``order_default`` indicates whether the ``order`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``kw_only_default`` indicates whether the ``kw_only`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``frozen_default`` indicates whether the ``frozen`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``field_specifiers`` specifies a static list of supported classes + or functions that describe fields, similar to ``dataclasses.field()``. + - Arbitrary other keyword arguments are accepted in order to allow for + possible future extensions. + + At runtime, this decorator records its arguments in the + ``__dataclass_transform__`` attribute on the decorated object. + It has no other runtime effect. + + See PEP 681 for more details. + """ + def decorator(cls_or_fn): + cls_or_fn.__dataclass_transform__ = { + "eq_default": eq_default, + "order_default": order_default, + "kw_only_default": kw_only_default, + "frozen_default": frozen_default, + "field_specifiers": field_specifiers, + "kwargs": kwargs, + } + return cls_or_fn + return decorator + + +type _Func = Callable[..., Any] + + +def override[F: _Func](method: F, /) -> F: + """Indicate that a method is intended to override a method in a base class. + + Usage:: + + class Base: + def method(self) -> None: + pass + + class Child(Base): + @override + def method(self) -> None: + super().method() + + When this decorator is applied to a method, the type checker will + validate that it overrides a method or attribute with the same name on a + base class. This helps prevent bugs that may occur when a base class is + changed without an equivalent change to a child class. + + There is no runtime checking of this property. The decorator attempts to + set the ``__override__`` attribute to ``True`` on the decorated object to + allow runtime introspection. + + See PEP 698 for details. + """ + try: + method.__override__ = True + except (AttributeError, TypeError): + # Skip the attribute silently if it is not writable. + # AttributeError happens if the object has __slots__ or a + # read-only property, TypeError if it's a builtin class. + pass + return method + + +def is_protocol(tp: type, /) -> bool: + """Return True if the given type is a Protocol. + + Example:: + + >>> from typing import Protocol, is_protocol + >>> class P(Protocol): + ... def a(self) -> str: ... + ... b: int + >>> is_protocol(P) + True + >>> is_protocol(int) + False + """ + return ( + isinstance(tp, type) + and getattr(tp, '_is_protocol', False) + and tp != Protocol + ) + + +def get_protocol_members(tp: type, /) -> frozenset[str]: + """Return the set of members defined in a Protocol. + + Example:: + + >>> from typing import Protocol, get_protocol_members + >>> class P(Protocol): + ... def a(self) -> str: ... + ... b: int + >>> get_protocol_members(P) == frozenset({'a', 'b'}) + True + + Raise a TypeError for arguments that are not Protocols. + """ + if not is_protocol(tp): + raise TypeError(f'{tp!r} is not a Protocol') + return frozenset(tp.__protocol_attrs__) + + +def __getattr__(attr): + """Improve the import time of the typing module. + + Soft-deprecated objects which are costly to create + are only created on-demand here. + """ + if attr == "ForwardRef": + obj = _lazy_annotationlib.ForwardRef + elif attr in {"Pattern", "Match"}: + import re + obj = _alias(getattr(re, attr), 1) + elif attr in {"ContextManager", "AsyncContextManager"}: + import contextlib + obj = _alias(getattr(contextlib, f"Abstract{attr}"), 2, name=attr, defaults=(bool | None,)) + elif attr == "_collect_parameters": + import warnings + + depr_message = ( + "The private _collect_parameters function is deprecated and will be" + " removed in a future version of Python. Any use of private functions" + " is discouraged and may break in the future." + ) + warnings.warn(depr_message, category=DeprecationWarning, stacklevel=2) + obj = _collect_type_parameters + else: + raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") + globals()[attr] = obj + return obj diff --git a/Python314_4_x64_Template/Lib/unittest/__init__.py b/Python314_4_x64_Template/Lib/unittest/__init__.py new file mode 100644 index 00000000..78ff6bb4 --- /dev/null +++ b/Python314_4_x64_Template/Lib/unittest/__init__.py @@ -0,0 +1,80 @@ +""" +Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's +Smalltalk testing framework (used with permission). + +This module contains the core framework classes that form the basis of +specific test cases and suites (TestCase, TestSuite etc.), and also a +text-based utility class for running the tests and reporting the results + (TextTestRunner). + +Simple usage: + + import unittest + + class IntegerArithmeticTestCase(unittest.TestCase): + def testAdd(self): # test method names begin with 'test' + self.assertEqual((1 + 2), 3) + self.assertEqual(0 + 1, 1) + def testMultiply(self): + self.assertEqual((0 * 10), 0) + self.assertEqual((5 * 8), 40) + + if __name__ == '__main__': + unittest.main() + +Further information is available in the bundled documentation, and from + + http://docs.python.org/library/unittest.html + +Copyright (c) 1999-2003 Steve Purcell +Copyright (c) 2003 Python Software Foundation +This module is free software, and you may redistribute it and/or modify +it under the same terms as Python itself, so long as this copyright message +and disclaimer are retained in their original form. + +IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, +SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF +THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, +AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, +SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +""" + +__all__ = ['TestResult', 'TestCase', 'IsolatedAsyncioTestCase', 'TestSuite', + 'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main', + 'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless', + 'expectedFailure', 'TextTestResult', 'installHandler', + 'registerResult', 'removeResult', 'removeHandler', + 'addModuleCleanup', 'doModuleCleanups', 'enterModuleContext'] + +__unittest = True + +from .result import TestResult +from .case import (addModuleCleanup, TestCase, FunctionTestCase, SkipTest, skip, + skipIf, skipUnless, expectedFailure, doModuleCleanups, + enterModuleContext) +from .suite import BaseTestSuite, TestSuite # noqa: F401 +from .loader import TestLoader, defaultTestLoader +from .main import TestProgram, main # noqa: F401 +from .runner import TextTestRunner, TextTestResult +from .signals import installHandler, registerResult, removeResult, removeHandler +# IsolatedAsyncioTestCase will be imported lazily. + + +# Lazy import of IsolatedAsyncioTestCase from .async_case +# It imports asyncio, which is relatively heavy, but most tests +# do not need it. + +def __dir__(): + return globals().keys() | {'IsolatedAsyncioTestCase'} + +def __getattr__(name): + if name == 'IsolatedAsyncioTestCase': + global IsolatedAsyncioTestCase + from .async_case import IsolatedAsyncioTestCase + return IsolatedAsyncioTestCase + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/Python313_13_x64_Template/Lib/unittest/__main__.py b/Python314_4_x64_Template/Lib/unittest/__main__.py similarity index 100% rename from Python313_13_x64_Template/Lib/unittest/__main__.py rename to Python314_4_x64_Template/Lib/unittest/__main__.py diff --git a/Python313_13_x64_Template/Lib/unittest/_log.py b/Python314_4_x64_Template/Lib/unittest/_log.py similarity index 100% rename from Python313_13_x64_Template/Lib/unittest/_log.py rename to Python314_4_x64_Template/Lib/unittest/_log.py diff --git a/Python314_4_x64_Template/Lib/unittest/async_case.py b/Python314_4_x64_Template/Lib/unittest/async_case.py new file mode 100644 index 00000000..a1c0d6c3 --- /dev/null +++ b/Python314_4_x64_Template/Lib/unittest/async_case.py @@ -0,0 +1,158 @@ +import asyncio +import contextvars +import inspect +import warnings + +from .case import TestCase + +__unittest = True + +class IsolatedAsyncioTestCase(TestCase): + # Names intentionally have a long prefix + # to reduce a chance of clashing with user-defined attributes + # from inherited test case + # + # The class doesn't call loop.run_until_complete(self.setUp()) and family + # but uses a different approach: + # 1. create a long-running task that reads self.setUp() + # awaitable from queue along with a future + # 2. await the awaitable object passing in and set the result + # into the future object + # 3. Outer code puts the awaitable and the future object into a queue + # with waiting for the future + # The trick is necessary because every run_until_complete() call + # creates a new task with embedded ContextVar context. + # To share contextvars between setUp(), test and tearDown() we need to execute + # them inside the same task. + + # Note: the test case modifies event loop policy if the policy was not instantiated + # yet, unless loop_factory=asyncio.EventLoop is set. + # asyncio.get_event_loop_policy() creates a default policy on demand but never + # returns None + # I believe this is not an issue in user level tests but python itself for testing + # should reset a policy in every test module + # by calling asyncio.set_event_loop_policy(None) in tearDownModule() + # or set loop_factory=asyncio.EventLoop + + loop_factory = None + + def __init__(self, methodName='runTest'): + super().__init__(methodName) + self._asyncioRunner = None + self._asyncioTestContext = contextvars.copy_context() + + async def asyncSetUp(self): + pass + + async def asyncTearDown(self): + pass + + def addAsyncCleanup(self, func, /, *args, **kwargs): + # A trivial trampoline to addCleanup() + # the function exists because it has a different semantics + # and signature: + # addCleanup() accepts regular functions + # but addAsyncCleanup() accepts coroutines + # + # We intentionally don't add inspect.iscoroutinefunction() check + # for func argument because there is no way + # to check for async function reliably: + # 1. It can be "async def func()" itself + # 2. Class can implement "async def __call__()" method + # 3. Regular "def func()" that returns awaitable object + self.addCleanup(*(func, *args), **kwargs) + + async def enterAsyncContext(self, cm): + """Enters the supplied asynchronous context manager. + + If successful, also adds its __aexit__ method as a cleanup + function and returns the result of the __aenter__ method. + """ + # We look up the special methods on the type to match the with + # statement. + cls = type(cm) + try: + enter = cls.__aenter__ + exit = cls.__aexit__ + except AttributeError: + msg = (f"'{cls.__module__}.{cls.__qualname__}' object does " + "not support the asynchronous context manager protocol") + try: + cls.__enter__ + cls.__exit__ + except AttributeError: + pass + else: + msg += (" but it supports the context manager protocol. " + "Did you mean to use enterContext()?") + raise TypeError(msg) from None + result = await enter(cm) + self.addAsyncCleanup(exit, cm, None, None, None) + return result + + def _callSetUp(self): + # Force loop to be initialized and set as the current loop + # so that setUp functions can use get_event_loop() and get the + # correct loop instance. + self._asyncioRunner.get_loop() + self._asyncioTestContext.run(self.setUp) + self._callAsync(self.asyncSetUp) + + def _callTestMethod(self, method): + result = self._callMaybeAsync(method) + if result is not None: + msg = ( + f'It is deprecated to return a value that is not None ' + f'from a test case ({method} returned {type(result).__name__!r})', + ) + warnings.warn(msg, DeprecationWarning, stacklevel=4) + + def _callTearDown(self): + self._callAsync(self.asyncTearDown) + self._asyncioTestContext.run(self.tearDown) + + def _callCleanup(self, function, *args, **kwargs): + self._callMaybeAsync(function, *args, **kwargs) + + def _callAsync(self, func, /, *args, **kwargs): + assert self._asyncioRunner is not None, 'asyncio runner is not initialized' + assert inspect.iscoroutinefunction(func), f'{func!r} is not an async function' + return self._asyncioRunner.run( + func(*args, **kwargs), + context=self._asyncioTestContext + ) + + def _callMaybeAsync(self, func, /, *args, **kwargs): + assert self._asyncioRunner is not None, 'asyncio runner is not initialized' + if inspect.iscoroutinefunction(func): + return self._asyncioRunner.run( + func(*args, **kwargs), + context=self._asyncioTestContext, + ) + else: + return self._asyncioTestContext.run(func, *args, **kwargs) + + def _setupAsyncioRunner(self): + assert self._asyncioRunner is None, 'asyncio runner is already initialized' + runner = asyncio.Runner(debug=True, loop_factory=self.loop_factory) + self._asyncioRunner = runner + + def _tearDownAsyncioRunner(self): + runner = self._asyncioRunner + runner.close() + + def run(self, result=None): + self._setupAsyncioRunner() + try: + return super().run(result) + finally: + self._tearDownAsyncioRunner() + + def debug(self): + self._setupAsyncioRunner() + super().debug() + self._tearDownAsyncioRunner() + + def __del__(self): + if self._asyncioRunner is not None: + self._tearDownAsyncioRunner() diff --git a/Python314_4_x64_Template/Lib/unittest/case.py b/Python314_4_x64_Template/Lib/unittest/case.py new file mode 100644 index 00000000..884fc1b2 --- /dev/null +++ b/Python314_4_x64_Template/Lib/unittest/case.py @@ -0,0 +1,1628 @@ +"""Test case implementation""" + +import sys +import functools +import difflib +import pprint +import re +import warnings +import collections +import contextlib +import traceback +import time +import types + +from . import result +from .util import (strclass, safe_repr, _count_diff_all_purpose, + _count_diff_hashable, _common_shorten_repr) + +__unittest = True + +_subtest_msg_sentinel = object() + +DIFF_OMITTED = ('\nDiff is %s characters long. ' + 'Set self.maxDiff to None to see it.') + +class SkipTest(Exception): + """ + Raise this exception in a test to skip it. + + Usually you can use TestCase.skipTest() or one of the skipping decorators + instead of raising this directly. + """ + +class _ShouldStop(Exception): + """ + The test should stop. + """ + +class _UnexpectedSuccess(Exception): + """ + The test was supposed to fail, but it didn't! + """ + + +class _Outcome(object): + def __init__(self, result=None): + self.expecting_failure = False + self.result = result + self.result_supports_subtests = hasattr(result, "addSubTest") + self.success = True + self.expectedFailure = None + + @contextlib.contextmanager + def testPartExecutor(self, test_case, subTest=False): + old_success = self.success + self.success = True + try: + yield + except KeyboardInterrupt: + raise + except SkipTest as e: + self.success = False + _addSkip(self.result, test_case, str(e)) + except _ShouldStop: + pass + except: + exc_info = sys.exc_info() + if self.expecting_failure: + self.expectedFailure = exc_info + else: + self.success = False + if subTest: + self.result.addSubTest(test_case.test_case, test_case, exc_info) + else: + _addError(self.result, test_case, exc_info) + # explicitly break a reference cycle: + # exc_info -> frame -> exc_info + exc_info = None + else: + if subTest and self.success: + self.result.addSubTest(test_case.test_case, test_case, None) + finally: + self.success = self.success and old_success + + +def _addSkip(result, test_case, reason): + addSkip = getattr(result, 'addSkip', None) + if addSkip is not None: + addSkip(test_case, reason) + else: + warnings.warn("TestResult has no addSkip method, skips not reported", + RuntimeWarning, 2) + result.addSuccess(test_case) + +def _addError(result, test, exc_info): + if result is not None and exc_info is not None: + if issubclass(exc_info[0], test.failureException): + result.addFailure(test, exc_info) + else: + result.addError(test, exc_info) + +def _id(obj): + return obj + + +def _enter_context(cm, addcleanup): + # We look up the special methods on the type to match the with + # statement. + cls = type(cm) + try: + enter = cls.__enter__ + exit = cls.__exit__ + except AttributeError: + msg = (f"'{cls.__module__}.{cls.__qualname__}' object does " + "not support the context manager protocol") + try: + cls.__aenter__ + cls.__aexit__ + except AttributeError: + pass + else: + msg += (" but it supports the asynchronous context manager " + "protocol. Did you mean to use enterAsyncContext()?") + raise TypeError(msg) from None + result = enter(cm) + addcleanup(exit, cm, None, None, None) + return result + + +_module_cleanups = [] +def addModuleCleanup(function, /, *args, **kwargs): + """Same as addCleanup, except the cleanup items are called even if + setUpModule fails (unlike tearDownModule).""" + _module_cleanups.append((function, args, kwargs)) + +def enterModuleContext(cm): + """Same as enterContext, but module-wide.""" + return _enter_context(cm, addModuleCleanup) + + +def doModuleCleanups(): + """Execute all module cleanup functions. Normally called for you after + tearDownModule.""" + exceptions = [] + while _module_cleanups: + function, args, kwargs = _module_cleanups.pop() + try: + function(*args, **kwargs) + except Exception as exc: + exceptions.append(exc) + if exceptions: + # Swallows all but first exception. If a multi-exception handler + # gets written we should use that here instead. + raise exceptions[0] + + +def skip(reason): + """ + Unconditionally skip a test. + """ + def decorator(test_item): + if not isinstance(test_item, type): + @functools.wraps(test_item) + def skip_wrapper(*args, **kwargs): + raise SkipTest(reason) + test_item = skip_wrapper + + test_item.__unittest_skip__ = True + test_item.__unittest_skip_why__ = reason + return test_item + if isinstance(reason, types.FunctionType): + test_item = reason + reason = '' + return decorator(test_item) + return decorator + +def skipIf(condition, reason): + """ + Skip a test if the condition is true. + """ + if condition: + return skip(reason) + return _id + +def skipUnless(condition, reason): + """ + Skip a test unless the condition is true. + """ + if not condition: + return skip(reason) + return _id + +def expectedFailure(test_item): + test_item.__unittest_expecting_failure__ = True + return test_item + +def _is_subtype(expected, basetype): + if isinstance(expected, tuple): + return all(_is_subtype(e, basetype) for e in expected) + return isinstance(expected, type) and issubclass(expected, basetype) + +class _BaseTestCaseContext: + + def __init__(self, test_case): + self.test_case = test_case + + def _raiseFailure(self, standardMsg): + msg = self.test_case._formatMessage(self.msg, standardMsg) + raise self.test_case.failureException(msg) + +class _AssertRaisesBaseContext(_BaseTestCaseContext): + + def __init__(self, expected, test_case, expected_regex=None): + _BaseTestCaseContext.__init__(self, test_case) + self.expected = expected + self.test_case = test_case + if expected_regex is not None: + expected_regex = re.compile(expected_regex) + self.expected_regex = expected_regex + self.obj_name = None + self.msg = None + + def handle(self, name, args, kwargs): + """ + If args is empty, assertRaises/Warns is being used as a + context manager, so check for a 'msg' kwarg and return self. + If args is not empty, call a callable passing positional and keyword + arguments. + """ + try: + if not _is_subtype(self.expected, self._base_type): + raise TypeError('%s() arg 1 must be %s' % + (name, self._base_type_str)) + if not args: + self.msg = kwargs.pop('msg', None) + if kwargs: + raise TypeError('%r is an invalid keyword argument for ' + 'this function' % (next(iter(kwargs)),)) + return self + + callable_obj, *args = args + try: + self.obj_name = callable_obj.__name__ + except AttributeError: + self.obj_name = str(callable_obj) + with self: + callable_obj(*args, **kwargs) + finally: + # bpo-23890: manually break a reference cycle + self = None + + +class _AssertRaisesContext(_AssertRaisesBaseContext): + """A context manager used to implement TestCase.assertRaises* methods.""" + + _base_type = BaseException + _base_type_str = 'an exception type or tuple of exception types' + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + if exc_type is None: + try: + exc_name = self.expected.__name__ + except AttributeError: + exc_name = str(self.expected) + if self.obj_name: + self._raiseFailure("{} not raised by {}".format(exc_name, + self.obj_name)) + else: + self._raiseFailure("{} not raised".format(exc_name)) + else: + traceback.clear_frames(tb) + if not issubclass(exc_type, self.expected): + # let unexpected exceptions pass through + return False + # store exception, without traceback, for later retrieval + self.exception = exc_value.with_traceback(None) + if self.expected_regex is None: + return True + + expected_regex = self.expected_regex + if not expected_regex.search(str(exc_value)): + self._raiseFailure('"{}" does not match "{}"'.format( + expected_regex.pattern, str(exc_value))) + return True + + __class_getitem__ = classmethod(types.GenericAlias) + + +class _AssertWarnsContext(_AssertRaisesBaseContext): + """A context manager used to implement TestCase.assertWarns* methods.""" + + _base_type = Warning + _base_type_str = 'a warning type or tuple of warning types' + + def __enter__(self): + # The __warningregistry__'s need to be in a pristine state for tests + # to work properly. + for v in list(sys.modules.values()): + if getattr(v, '__warningregistry__', None): + v.__warningregistry__ = {} + self.warnings_manager = warnings.catch_warnings(record=True) + self.warnings = self.warnings_manager.__enter__() + warnings.simplefilter("always", self.expected) + return self + + def __exit__(self, exc_type, exc_value, tb): + self.warnings_manager.__exit__(exc_type, exc_value, tb) + if exc_type is not None: + # let unexpected exceptions pass through + return + try: + exc_name = self.expected.__name__ + except AttributeError: + exc_name = str(self.expected) + first_matching = None + for m in self.warnings: + w = m.message + if not isinstance(w, self.expected): + continue + if first_matching is None: + first_matching = w + if (self.expected_regex is not None and + not self.expected_regex.search(str(w))): + continue + # store warning for later retrieval + self.warning = w + self.filename = m.filename + self.lineno = m.lineno + return + # Now we simply try to choose a helpful failure message + if first_matching is not None: + self._raiseFailure('"{}" does not match "{}"'.format( + self.expected_regex.pattern, str(first_matching))) + if self.obj_name: + self._raiseFailure("{} not triggered by {}".format(exc_name, + self.obj_name)) + else: + self._raiseFailure("{} not triggered".format(exc_name)) + + +class _AssertNotWarnsContext(_AssertWarnsContext): + + def __exit__(self, exc_type, exc_value, tb): + self.warnings_manager.__exit__(exc_type, exc_value, tb) + if exc_type is not None: + # let unexpected exceptions pass through + return + try: + exc_name = self.expected.__name__ + except AttributeError: + exc_name = str(self.expected) + for m in self.warnings: + w = m.message + if isinstance(w, self.expected): + self._raiseFailure(f"{exc_name} triggered") + + +class _OrderedChainMap(collections.ChainMap): + def __iter__(self): + seen = set() + for mapping in self.maps: + for k in mapping: + if k not in seen: + seen.add(k) + yield k + + +class TestCase(object): + """A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + """ + + failureException = AssertionError + + longMessage = True + + maxDiff = 80*8 + + # If a string is longer than _diffThreshold, use normal comparison instead + # of difflib. See #11763. + _diffThreshold = 2**16 + + def __init_subclass__(cls, *args, **kwargs): + # Attribute used by TestSuite for classSetUp + cls._classSetupFailed = False + cls._class_cleanups = [] + super().__init_subclass__(*args, **kwargs) + + def __init__(self, methodName='runTest'): + """Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + """ + self._testMethodName = methodName + self._outcome = None + self._testMethodDoc = 'No test' + try: + testMethod = getattr(self, methodName) + except AttributeError: + if methodName != 'runTest': + # we allow instantiation with no explicit method name + # but not an *incorrect* or missing method name + raise ValueError("no such test method in %s: %s" % + (self.__class__, methodName)) + else: + self._testMethodDoc = testMethod.__doc__ + self._cleanups = [] + self._subtest = None + + # Map types to custom assertEqual functions that will compare + # instances of said type in more detail to generate a more useful + # error message. + self._type_equality_funcs = {} + self.addTypeEqualityFunc(dict, 'assertDictEqual') + self.addTypeEqualityFunc(list, 'assertListEqual') + self.addTypeEqualityFunc(tuple, 'assertTupleEqual') + self.addTypeEqualityFunc(set, 'assertSetEqual') + self.addTypeEqualityFunc(frozenset, 'assertSetEqual') + self.addTypeEqualityFunc(str, 'assertMultiLineEqual') + + def addTypeEqualityFunc(self, typeobj, function): + """Add a type specific assertEqual style function to compare a type. + + This method is for use by TestCase subclasses that need to register + their own type equality functions to provide nicer error messages. + + Args: + typeobj: The data type to call this function on when both values + are of the same type in assertEqual(). + function: The callable taking two arguments and an optional + msg= argument that raises self.failureException with a + useful error message when the two arguments are not equal. + """ + self._type_equality_funcs[typeobj] = function + + def addCleanup(self, function, /, *args, **kwargs): + """Add a function, with arguments, to be called when the test is + completed. Functions added are called on a LIFO basis and are + called after tearDown on test failure or success. + + Cleanup items are called even if setUp fails (unlike tearDown).""" + self._cleanups.append((function, args, kwargs)) + + def enterContext(self, cm): + """Enters the supplied context manager. + + If successful, also adds its __exit__ method as a cleanup + function and returns the result of the __enter__ method. + """ + return _enter_context(cm, self.addCleanup) + + @classmethod + def addClassCleanup(cls, function, /, *args, **kwargs): + """Same as addCleanup, except the cleanup items are called even if + setUpClass fails (unlike tearDownClass).""" + cls._class_cleanups.append((function, args, kwargs)) + + @classmethod + def enterClassContext(cls, cm): + """Same as enterContext, but class-wide.""" + return _enter_context(cm, cls.addClassCleanup) + + def setUp(self): + "Hook method for setting up the test fixture before exercising it." + pass + + def tearDown(self): + "Hook method for deconstructing the test fixture after testing it." + pass + + @classmethod + def setUpClass(cls): + "Hook method for setting up class fixture before running tests in the class." + + @classmethod + def tearDownClass(cls): + "Hook method for deconstructing the class fixture after running all tests in the class." + + def countTestCases(self): + return 1 + + def defaultTestResult(self): + return result.TestResult() + + def shortDescription(self): + """Returns a one-line description of the test, or None if no + description has been provided. + + The default implementation of this method returns the first line of + the specified test method's docstring. + """ + doc = self._testMethodDoc + return doc.strip().split("\n")[0].strip() if doc else None + + + def id(self): + return "%s.%s" % (strclass(self.__class__), self._testMethodName) + + def __eq__(self, other): + if type(self) is not type(other): + return NotImplemented + + return self._testMethodName == other._testMethodName + + def __hash__(self): + return hash((type(self), self._testMethodName)) + + def __str__(self): + return "%s (%s.%s)" % (self._testMethodName, strclass(self.__class__), self._testMethodName) + + def __repr__(self): + return "<%s testMethod=%s>" % \ + (strclass(self.__class__), self._testMethodName) + + @contextlib.contextmanager + def subTest(self, msg=_subtest_msg_sentinel, **params): + """Return a context manager that will return the enclosed block + of code in a subtest identified by the optional message and + keyword parameters. A failure in the subtest marks the test + case as failed but resumes execution at the end of the enclosed + block, allowing further test code to be executed. + """ + if self._outcome is None or not self._outcome.result_supports_subtests: + yield + return + parent = self._subtest + if parent is None: + params_map = _OrderedChainMap(params) + else: + params_map = parent.params.new_child(params) + self._subtest = _SubTest(self, msg, params_map) + try: + with self._outcome.testPartExecutor(self._subtest, subTest=True): + yield + if not self._outcome.success: + result = self._outcome.result + if result is not None and result.failfast: + raise _ShouldStop + elif self._outcome.expectedFailure: + # If the test is expecting a failure, we really want to + # stop now and register the expected failure. + raise _ShouldStop + finally: + self._subtest = parent + + def _addExpectedFailure(self, result, exc_info): + try: + addExpectedFailure = result.addExpectedFailure + except AttributeError: + warnings.warn("TestResult has no addExpectedFailure method, reporting as passes", + RuntimeWarning) + result.addSuccess(self) + else: + addExpectedFailure(self, exc_info) + + def _addUnexpectedSuccess(self, result): + try: + addUnexpectedSuccess = result.addUnexpectedSuccess + except AttributeError: + warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failure", + RuntimeWarning) + # We need to pass an actual exception and traceback to addFailure, + # otherwise the legacy result can choke. + try: + raise _UnexpectedSuccess from None + except _UnexpectedSuccess: + result.addFailure(self, sys.exc_info()) + else: + addUnexpectedSuccess(self) + + def _addDuration(self, result, elapsed): + try: + addDuration = result.addDuration + except AttributeError: + warnings.warn("TestResult has no addDuration method", + RuntimeWarning) + else: + addDuration(self, elapsed) + + def _callSetUp(self): + self.setUp() + + def _callTestMethod(self, method): + result = method() + if result is not None: + import inspect + msg = ( + f'It is deprecated to return a value that is not None ' + f'from a test case ({method} returned {type(result).__name__!r})' + ) + if inspect.iscoroutine(result): + msg += ( + '. Maybe you forgot to use IsolatedAsyncioTestCase as the base class?' + ) + warnings.warn(msg, DeprecationWarning, stacklevel=3) + + def _callTearDown(self): + self.tearDown() + + def _callCleanup(self, function, /, *args, **kwargs): + function(*args, **kwargs) + + def run(self, result=None): + if result is None: + result = self.defaultTestResult() + startTestRun = getattr(result, 'startTestRun', None) + stopTestRun = getattr(result, 'stopTestRun', None) + if startTestRun is not None: + startTestRun() + else: + stopTestRun = None + + result.startTest(self) + try: + testMethod = getattr(self, self._testMethodName) + if (getattr(self.__class__, "__unittest_skip__", False) or + getattr(testMethod, "__unittest_skip__", False)): + # If the class or method was skipped. + skip_why = (getattr(self.__class__, '__unittest_skip_why__', '') + or getattr(testMethod, '__unittest_skip_why__', '')) + _addSkip(result, self, skip_why) + return result + + expecting_failure = ( + getattr(self, "__unittest_expecting_failure__", False) or + getattr(testMethod, "__unittest_expecting_failure__", False) + ) + outcome = _Outcome(result) + start_time = time.perf_counter() + try: + self._outcome = outcome + + with outcome.testPartExecutor(self): + self._callSetUp() + if outcome.success: + outcome.expecting_failure = expecting_failure + with outcome.testPartExecutor(self): + self._callTestMethod(testMethod) + outcome.expecting_failure = False + with outcome.testPartExecutor(self): + self._callTearDown() + self.doCleanups() + self._addDuration(result, (time.perf_counter() - start_time)) + + if outcome.success: + if expecting_failure: + if outcome.expectedFailure: + self._addExpectedFailure(result, outcome.expectedFailure) + else: + self._addUnexpectedSuccess(result) + else: + result.addSuccess(self) + return result + finally: + # explicitly break reference cycle: + # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure + outcome.expectedFailure = None + outcome = None + + # clear the outcome, no more needed + self._outcome = None + + finally: + result.stopTest(self) + if stopTestRun is not None: + stopTestRun() + + def doCleanups(self): + """Execute all cleanup functions. Normally called for you after + tearDown.""" + outcome = self._outcome or _Outcome() + while self._cleanups: + function, args, kwargs = self._cleanups.pop() + with outcome.testPartExecutor(self): + self._callCleanup(function, *args, **kwargs) + + # return this for backwards compatibility + # even though we no longer use it internally + return outcome.success + + @classmethod + def doClassCleanups(cls): + """Execute all class cleanup functions. Normally called for you after + tearDownClass.""" + cls.tearDown_exceptions = [] + while cls._class_cleanups: + function, args, kwargs = cls._class_cleanups.pop() + try: + function(*args, **kwargs) + except Exception: + cls.tearDown_exceptions.append(sys.exc_info()) + + def __call__(self, *args, **kwds): + return self.run(*args, **kwds) + + def debug(self): + """Run the test without collecting errors in a TestResult""" + testMethod = getattr(self, self._testMethodName) + if (getattr(self.__class__, "__unittest_skip__", False) or + getattr(testMethod, "__unittest_skip__", False)): + # If the class or method was skipped. + skip_why = (getattr(self.__class__, '__unittest_skip_why__', '') + or getattr(testMethod, '__unittest_skip_why__', '')) + raise SkipTest(skip_why) + + self._callSetUp() + self._callTestMethod(testMethod) + self._callTearDown() + while self._cleanups: + function, args, kwargs = self._cleanups.pop() + self._callCleanup(function, *args, **kwargs) + + def skipTest(self, reason): + """Skip this test.""" + raise SkipTest(reason) + + def fail(self, msg=None): + """Fail immediately, with the given message.""" + raise self.failureException(msg) + + def assertFalse(self, expr, msg=None): + """Check that the expression is false.""" + if expr: + msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr)) + raise self.failureException(msg) + + def assertTrue(self, expr, msg=None): + """Check that the expression is true.""" + if not expr: + msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr)) + raise self.failureException(msg) + + def _formatMessage(self, msg, standardMsg): + """Honour the longMessage attribute when generating failure messages. + If longMessage is False this means: + * Use only an explicit message if it is provided + * Otherwise use the standard message for the assert + + If longMessage is True: + * Use the standard message + * If an explicit message is provided, plus ' : ' and the explicit message + """ + if not self.longMessage: + return msg or standardMsg + if msg is None: + return standardMsg + try: + # don't switch to '{}' formatting in Python 2.X + # it changes the way unicode input is handled + return '%s : %s' % (standardMsg, msg) + except UnicodeDecodeError: + return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg)) + + def assertRaises(self, expected_exception, *args, **kwargs): + """Fail unless an exception of class expected_exception is raised + by the callable when invoked with specified positional and + keyword arguments. If a different type of exception is + raised, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + If called with the callable and arguments omitted, will return a + context object used like this:: + + with self.assertRaises(SomeException): + do_something() + + An optional keyword argument 'msg' can be provided when assertRaises + is used as a context object. + + The context manager keeps a reference to the exception as + the 'exception' attribute. This allows you to inspect the + exception after the assertion:: + + with self.assertRaises(SomeException) as cm: + do_something() + the_exception = cm.exception + self.assertEqual(the_exception.error_code, 3) + """ + context = _AssertRaisesContext(expected_exception, self) + try: + return context.handle('assertRaises', args, kwargs) + finally: + # bpo-23890: manually break a reference cycle + context = None + + def assertWarns(self, expected_warning, *args, **kwargs): + """Fail unless a warning of class warnClass is triggered + by the callable when invoked with specified positional and + keyword arguments. If a different type of warning is + triggered, it will not be handled: depending on the other + warning filtering rules in effect, it might be silenced, printed + out, or raised as an exception. + + If called with the callable and arguments omitted, will return a + context object used like this:: + + with self.assertWarns(SomeWarning): + do_something() + + An optional keyword argument 'msg' can be provided when assertWarns + is used as a context object. + + The context manager keeps a reference to the first matching + warning as the 'warning' attribute; similarly, the 'filename' + and 'lineno' attributes give you information about the line + of Python code from which the warning was triggered. + This allows you to inspect the warning after the assertion:: + + with self.assertWarns(SomeWarning) as cm: + do_something() + the_warning = cm.warning + self.assertEqual(the_warning.some_attribute, 147) + """ + context = _AssertWarnsContext(expected_warning, self) + return context.handle('assertWarns', args, kwargs) + + def _assertNotWarns(self, expected_warning, *args, **kwargs): + """The opposite of assertWarns. Private due to low demand.""" + context = _AssertNotWarnsContext(expected_warning, self) + return context.handle('_assertNotWarns', args, kwargs) + + def assertLogs(self, logger=None, level=None): + """Fail unless a log message of level *level* or higher is emitted + on *logger_name* or its children. If omitted, *level* defaults to + INFO and *logger* defaults to the root logger. + + This method must be used as a context manager, and will yield + a recording object with two attributes: `output` and `records`. + At the end of the context manager, the `output` attribute will + be a list of the matching formatted log messages and the + `records` attribute will be a list of the corresponding LogRecord + objects. + + Example:: + + with self.assertLogs('foo', level='INFO') as cm: + logging.getLogger('foo').info('first message') + logging.getLogger('foo.bar').error('second message') + self.assertEqual(cm.output, ['INFO:foo:first message', + 'ERROR:foo.bar:second message']) + """ + # Lazy import to avoid importing logging if it is not needed. + from ._log import _AssertLogsContext + return _AssertLogsContext(self, logger, level, no_logs=False) + + def assertNoLogs(self, logger=None, level=None): + """ Fail unless no log messages of level *level* or higher are emitted + on *logger_name* or its children. + + This method must be used as a context manager. + """ + from ._log import _AssertLogsContext + return _AssertLogsContext(self, logger, level, no_logs=True) + + def _getAssertEqualityFunc(self, first, second): + """Get a detailed comparison function for the types of the two args. + + Returns: A callable accepting (first, second, msg=None) that will + raise a failure exception if first != second with a useful human + readable error message for those types. + """ + # + # NOTE(gregory.p.smith): I considered isinstance(first, type(second)) + # and vice versa. I opted for the conservative approach in case + # subclasses are not intended to be compared in detail to their super + # class instances using a type equality func. This means testing + # subtypes won't automagically use the detailed comparison. Callers + # should use their type specific assertSpamEqual method to compare + # subclasses if the detailed comparison is desired and appropriate. + # See the discussion in http://bugs.python.org/issue2578. + # + if type(first) is type(second): + asserter = self._type_equality_funcs.get(type(first)) + if asserter is not None: + if isinstance(asserter, str): + asserter = getattr(self, asserter) + return asserter + + return self._baseAssertEqual + + def _baseAssertEqual(self, first, second, msg=None): + """The default assertEqual implementation, not type specific.""" + if not first == second: + standardMsg = '%s != %s' % _common_shorten_repr(first, second) + msg = self._formatMessage(msg, standardMsg) + raise self.failureException(msg) + + def assertEqual(self, first, second, msg=None): + """Fail if the two objects are unequal as determined by the '==' + operator. + """ + assertion_func = self._getAssertEqualityFunc(first, second) + assertion_func(first, second, msg=msg) + + def assertNotEqual(self, first, second, msg=None): + """Fail if the two objects are equal as determined by the '!=' + operator. + """ + if not first != second: + msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first), + safe_repr(second))) + raise self.failureException(msg) + + def assertAlmostEqual(self, first, second, places=None, msg=None, + delta=None): + """Fail if the two objects are unequal as determined by their + difference rounded to the given number of decimal places + (default 7) and comparing to zero, or by comparing that the + difference between the two objects is more than the given + delta. + + Note that decimal places (from zero) are usually not the same + as significant digits (measured from the most significant digit). + + If the two objects compare equal then they will automatically + compare almost equal. + """ + if first == second: + # shortcut + return + if delta is not None and places is not None: + raise TypeError("specify delta or places not both") + + diff = abs(first - second) + if delta is not None: + if diff <= delta: + return + + standardMsg = '%s != %s within %s delta (%s difference)' % ( + safe_repr(first), + safe_repr(second), + safe_repr(delta), + safe_repr(diff)) + else: + if places is None: + places = 7 + + if round(diff, places) == 0: + return + + standardMsg = '%s != %s within %r places (%s difference)' % ( + safe_repr(first), + safe_repr(second), + places, + safe_repr(diff)) + msg = self._formatMessage(msg, standardMsg) + raise self.failureException(msg) + + def assertNotAlmostEqual(self, first, second, places=None, msg=None, + delta=None): + """Fail if the two objects are equal as determined by their + difference rounded to the given number of decimal places + (default 7) and comparing to zero, or by comparing that the + difference between the two objects is less than the given delta. + + Note that decimal places (from zero) are usually not the same + as significant digits (measured from the most significant digit). + + Objects that are equal automatically fail. + """ + if delta is not None and places is not None: + raise TypeError("specify delta or places not both") + diff = abs(first - second) + if delta is not None: + if not (first == second) and diff > delta: + return + standardMsg = '%s == %s within %s delta (%s difference)' % ( + safe_repr(first), + safe_repr(second), + safe_repr(delta), + safe_repr(diff)) + else: + if places is None: + places = 7 + if not (first == second) and round(diff, places) != 0: + return + standardMsg = '%s == %s within %r places' % (safe_repr(first), + safe_repr(second), + places) + + msg = self._formatMessage(msg, standardMsg) + raise self.failureException(msg) + + def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None): + """An equality assertion for ordered sequences (like lists and tuples). + + For the purposes of this function, a valid ordered sequence type is one + which can be indexed, has a length, and has an equality operator. + + Args: + seq1: The first sequence to compare. + seq2: The second sequence to compare. + seq_type: The expected datatype of the sequences, or None if no + datatype should be enforced. + msg: Optional message to use on failure instead of a list of + differences. + """ + if seq_type is not None: + seq_type_name = seq_type.__name__ + if not isinstance(seq1, seq_type): + raise self.failureException('First sequence is not a %s: %s' + % (seq_type_name, safe_repr(seq1))) + if not isinstance(seq2, seq_type): + raise self.failureException('Second sequence is not a %s: %s' + % (seq_type_name, safe_repr(seq2))) + else: + seq_type_name = "sequence" + + differing = None + try: + len1 = len(seq1) + except (TypeError, NotImplementedError): + differing = 'First %s has no length. Non-sequence?' % ( + seq_type_name) + + if differing is None: + try: + len2 = len(seq2) + except (TypeError, NotImplementedError): + differing = 'Second %s has no length. Non-sequence?' % ( + seq_type_name) + + if differing is None: + if seq1 == seq2: + return + + differing = '%ss differ: %s != %s\n' % ( + (seq_type_name.capitalize(),) + + _common_shorten_repr(seq1, seq2)) + + for i in range(min(len1, len2)): + try: + item1 = seq1[i] + except (TypeError, IndexError, NotImplementedError): + differing += ('\nUnable to index element %d of first %s\n' % + (i, seq_type_name)) + break + + try: + item2 = seq2[i] + except (TypeError, IndexError, NotImplementedError): + differing += ('\nUnable to index element %d of second %s\n' % + (i, seq_type_name)) + break + + if item1 != item2: + differing += ('\nFirst differing element %d:\n%s\n%s\n' % + ((i,) + _common_shorten_repr(item1, item2))) + break + else: + if (len1 == len2 and seq_type is None and + type(seq1) != type(seq2)): + # The sequences are the same, but have differing types. + return + + if len1 > len2: + differing += ('\nFirst %s contains %d additional ' + 'elements.\n' % (seq_type_name, len1 - len2)) + try: + differing += ('First extra element %d:\n%s\n' % + (len2, safe_repr(seq1[len2]))) + except (TypeError, IndexError, NotImplementedError): + differing += ('Unable to index element %d ' + 'of first %s\n' % (len2, seq_type_name)) + elif len1 < len2: + differing += ('\nSecond %s contains %d additional ' + 'elements.\n' % (seq_type_name, len2 - len1)) + try: + differing += ('First extra element %d:\n%s\n' % + (len1, safe_repr(seq2[len1]))) + except (TypeError, IndexError, NotImplementedError): + differing += ('Unable to index element %d ' + 'of second %s\n' % (len1, seq_type_name)) + standardMsg = differing + diffMsg = '\n' + '\n'.join( + difflib.ndiff(pprint.pformat(seq1).splitlines(), + pprint.pformat(seq2).splitlines())) + + standardMsg = self._truncateMessage(standardMsg, diffMsg) + msg = self._formatMessage(msg, standardMsg) + self.fail(msg) + + def _truncateMessage(self, message, diff): + max_diff = self.maxDiff + if max_diff is None or len(diff) <= max_diff: + return message + diff + return message + (DIFF_OMITTED % len(diff)) + + def assertListEqual(self, list1, list2, msg=None): + """A list-specific equality assertion. + + Args: + list1: The first list to compare. + list2: The second list to compare. + msg: Optional message to use on failure instead of a list of + differences. + + """ + self.assertSequenceEqual(list1, list2, msg, seq_type=list) + + def assertTupleEqual(self, tuple1, tuple2, msg=None): + """A tuple-specific equality assertion. + + Args: + tuple1: The first tuple to compare. + tuple2: The second tuple to compare. + msg: Optional message to use on failure instead of a list of + differences. + """ + self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple) + + def assertSetEqual(self, set1, set2, msg=None): + """A set-specific equality assertion. + + Args: + set1: The first set to compare. + set2: The second set to compare. + msg: Optional message to use on failure instead of a list of + differences. + + assertSetEqual uses ducktyping to support different types of sets, and + is optimized for sets specifically (parameters must support a + difference method). + """ + try: + difference1 = set1.difference(set2) + except TypeError as e: + self.fail('invalid type when attempting set difference: %s' % e) + except AttributeError as e: + self.fail('first argument does not support set difference: %s' % e) + + try: + difference2 = set2.difference(set1) + except TypeError as e: + self.fail('invalid type when attempting set difference: %s' % e) + except AttributeError as e: + self.fail('second argument does not support set difference: %s' % e) + + if not (difference1 or difference2): + return + + lines = [] + if difference1: + lines.append('Items in the first set but not the second:') + for item in difference1: + lines.append(repr(item)) + if difference2: + lines.append('Items in the second set but not the first:') + for item in difference2: + lines.append(repr(item)) + + standardMsg = '\n'.join(lines) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertIn(self, member, container, msg=None): + """Just like self.assertTrue(a in b), but with a nicer default message.""" + if member not in container: + standardMsg = '%s not found in %s' % (safe_repr(member), + safe_repr(container)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertNotIn(self, member, container, msg=None): + """Just like self.assertTrue(a not in b), but with a nicer default message.""" + if member in container: + standardMsg = '%s unexpectedly found in %s' % (safe_repr(member), + safe_repr(container)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertIs(self, expr1, expr2, msg=None): + """Just like self.assertTrue(a is b), but with a nicer default message.""" + if expr1 is not expr2: + standardMsg = '%s is not %s' % (safe_repr(expr1), + safe_repr(expr2)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertIsNot(self, expr1, expr2, msg=None): + """Just like self.assertTrue(a is not b), but with a nicer default message.""" + if expr1 is expr2: + standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertDictEqual(self, d1, d2, msg=None): + self.assertIsInstance(d1, dict, 'First argument is not a dictionary') + self.assertIsInstance(d2, dict, 'Second argument is not a dictionary') + + if d1 != d2: + standardMsg = '%s != %s' % _common_shorten_repr(d1, d2) + diff = ('\n' + '\n'.join(difflib.ndiff( + pprint.pformat(d1).splitlines(), + pprint.pformat(d2).splitlines()))) + standardMsg = self._truncateMessage(standardMsg, diff) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertCountEqual(self, first, second, msg=None): + """Asserts that two iterables have the same elements, the same number of + times, without regard to order. + + self.assertEqual(Counter(list(first)), + Counter(list(second))) + + Example: + - [0, 1, 1] and [1, 0, 1] compare equal. + - [0, 0, 1] and [0, 1] compare unequal. + + """ + first_seq, second_seq = list(first), list(second) + try: + first = collections.Counter(first_seq) + second = collections.Counter(second_seq) + except TypeError: + # Handle case with unhashable elements + differences = _count_diff_all_purpose(first_seq, second_seq) + else: + if first == second: + return + differences = _count_diff_hashable(first_seq, second_seq) + + if differences: + standardMsg = 'Element counts were not equal:\n' + lines = ['First has %d, Second has %d: %r' % diff for diff in differences] + diffMsg = '\n'.join(lines) + standardMsg = self._truncateMessage(standardMsg, diffMsg) + msg = self._formatMessage(msg, standardMsg) + self.fail(msg) + + def assertMultiLineEqual(self, first, second, msg=None): + """Assert that two multi-line strings are equal.""" + self.assertIsInstance(first, str, "First argument is not a string") + self.assertIsInstance(second, str, "Second argument is not a string") + + if first != second: + # Don't use difflib if the strings are too long + if (len(first) > self._diffThreshold or + len(second) > self._diffThreshold): + self._baseAssertEqual(first, second, msg) + + # Append \n to both strings if either is missing the \n. + # This allows the final ndiff to show the \n difference. The + # exception here is if the string is empty, in which case no + # \n should be added + first_presplit = first + second_presplit = second + if first and second: + if first[-1] != '\n' or second[-1] != '\n': + first_presplit += '\n' + second_presplit += '\n' + elif second and second[-1] != '\n': + second_presplit += '\n' + elif first and first[-1] != '\n': + first_presplit += '\n' + + firstlines = first_presplit.splitlines(keepends=True) + secondlines = second_presplit.splitlines(keepends=True) + + # Generate the message and diff, then raise the exception + standardMsg = '%s != %s' % _common_shorten_repr(first, second) + diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines)) + standardMsg = self._truncateMessage(standardMsg, diff) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertLess(self, a, b, msg=None): + """Just like self.assertTrue(a < b), but with a nicer default message.""" + if not a < b: + standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertLessEqual(self, a, b, msg=None): + """Just like self.assertTrue(a <= b), but with a nicer default message.""" + if not a <= b: + standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertGreater(self, a, b, msg=None): + """Just like self.assertTrue(a > b), but with a nicer default message.""" + if not a > b: + standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertGreaterEqual(self, a, b, msg=None): + """Just like self.assertTrue(a >= b), but with a nicer default message.""" + if not a >= b: + standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertIsNone(self, obj, msg=None): + """Same as self.assertTrue(obj is None), with a nicer default message.""" + if obj is not None: + standardMsg = '%s is not None' % (safe_repr(obj),) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertIsNotNone(self, obj, msg=None): + """Included for symmetry with assertIsNone.""" + if obj is None: + standardMsg = 'unexpectedly None' + self.fail(self._formatMessage(msg, standardMsg)) + + def assertIsInstance(self, obj, cls, msg=None): + """Same as self.assertTrue(isinstance(obj, cls)), with a nicer + default message.""" + if not isinstance(obj, cls): + if isinstance(cls, tuple): + standardMsg = f'{safe_repr(obj)} is not an instance of any of {cls!r}' + else: + standardMsg = f'{safe_repr(obj)} is not an instance of {cls!r}' + self.fail(self._formatMessage(msg, standardMsg)) + + def assertNotIsInstance(self, obj, cls, msg=None): + """Included for symmetry with assertIsInstance.""" + if isinstance(obj, cls): + if isinstance(cls, tuple): + for x in cls: + if isinstance(obj, x): + cls = x + break + standardMsg = f'{safe_repr(obj)} is an instance of {cls!r}' + self.fail(self._formatMessage(msg, standardMsg)) + + def assertIsSubclass(self, cls, superclass, msg=None): + try: + if issubclass(cls, superclass): + return + except TypeError: + if not isinstance(cls, type): + self.fail(self._formatMessage(msg, f'{cls!r} is not a class')) + raise + if isinstance(superclass, tuple): + standardMsg = f'{cls!r} is not a subclass of any of {superclass!r}' + else: + standardMsg = f'{cls!r} is not a subclass of {superclass!r}' + self.fail(self._formatMessage(msg, standardMsg)) + + def assertNotIsSubclass(self, cls, superclass, msg=None): + try: + if not issubclass(cls, superclass): + return + except TypeError: + if not isinstance(cls, type): + self.fail(self._formatMessage(msg, f'{cls!r} is not a class')) + raise + if isinstance(superclass, tuple): + for x in superclass: + if issubclass(cls, x): + superclass = x + break + standardMsg = f'{cls!r} is a subclass of {superclass!r}' + self.fail(self._formatMessage(msg, standardMsg)) + + def assertHasAttr(self, obj, name, msg=None): + if not hasattr(obj, name): + if isinstance(obj, types.ModuleType): + standardMsg = f'module {obj.__name__!r} has no attribute {name!r}' + elif isinstance(obj, type): + standardMsg = f'type object {obj.__name__!r} has no attribute {name!r}' + else: + standardMsg = f'{type(obj).__name__!r} object has no attribute {name!r}' + self.fail(self._formatMessage(msg, standardMsg)) + + def assertNotHasAttr(self, obj, name, msg=None): + if hasattr(obj, name): + if isinstance(obj, types.ModuleType): + standardMsg = f'module {obj.__name__!r} has unexpected attribute {name!r}' + elif isinstance(obj, type): + standardMsg = f'type object {obj.__name__!r} has unexpected attribute {name!r}' + else: + standardMsg = f'{type(obj).__name__!r} object has unexpected attribute {name!r}' + self.fail(self._formatMessage(msg, standardMsg)) + + def assertRaisesRegex(self, expected_exception, expected_regex, + *args, **kwargs): + """Asserts that the message in a raised exception matches a regex. + + Args: + expected_exception: Exception class expected to be raised. + expected_regex: Regex (re.Pattern object or string) expected + to be found in error message. + args: Function to be called and extra positional args. + kwargs: Extra kwargs. + msg: Optional message used in case of failure. Can only be used + when assertRaisesRegex is used as a context manager. + """ + context = _AssertRaisesContext(expected_exception, self, expected_regex) + return context.handle('assertRaisesRegex', args, kwargs) + + def assertWarnsRegex(self, expected_warning, expected_regex, + *args, **kwargs): + """Asserts that the message in a triggered warning matches a regexp. + Basic functioning is similar to assertWarns() with the addition + that only warnings whose messages also match the regular expression + are considered successful matches. + + Args: + expected_warning: Warning class expected to be triggered. + expected_regex: Regex (re.Pattern object or string) expected + to be found in error message. + args: Function to be called and extra positional args. + kwargs: Extra kwargs. + msg: Optional message used in case of failure. Can only be used + when assertWarnsRegex is used as a context manager. + """ + context = _AssertWarnsContext(expected_warning, self, expected_regex) + return context.handle('assertWarnsRegex', args, kwargs) + + def assertRegex(self, text, expected_regex, msg=None): + """Fail the test unless the text matches the regular expression.""" + if isinstance(expected_regex, (str, bytes)): + assert expected_regex, "expected_regex must not be empty." + expected_regex = re.compile(expected_regex) + if not expected_regex.search(text): + standardMsg = "Regex didn't match: %r not found in %r" % ( + expected_regex.pattern, text) + # _formatMessage ensures the longMessage option is respected + msg = self._formatMessage(msg, standardMsg) + raise self.failureException(msg) + + def assertNotRegex(self, text, unexpected_regex, msg=None): + """Fail the test if the text matches the regular expression.""" + if isinstance(unexpected_regex, (str, bytes)): + unexpected_regex = re.compile(unexpected_regex) + match = unexpected_regex.search(text) + if match: + standardMsg = 'Regex matched: %r matches %r in %r' % ( + text[match.start() : match.end()], + unexpected_regex.pattern, + text) + # _formatMessage ensures the longMessage option is respected + msg = self._formatMessage(msg, standardMsg) + raise self.failureException(msg) + + def _tail_type_check(self, s, tails, msg): + if not isinstance(tails, tuple): + tails = (tails,) + for tail in tails: + if isinstance(tail, str): + if not isinstance(s, str): + self.fail(self._formatMessage(msg, + f'Expected str, not {type(s).__name__}')) + elif isinstance(tail, (bytes, bytearray)): + if not isinstance(s, (bytes, bytearray)): + self.fail(self._formatMessage(msg, + f'Expected bytes, not {type(s).__name__}')) + + def assertStartsWith(self, s, prefix, msg=None): + try: + if s.startswith(prefix): + return + except (AttributeError, TypeError): + self._tail_type_check(s, prefix, msg) + raise + a = safe_repr(s, short=True) + b = safe_repr(prefix) + if isinstance(prefix, tuple): + standardMsg = f"{a} doesn't start with any of {b}" + else: + standardMsg = f"{a} doesn't start with {b}" + self.fail(self._formatMessage(msg, standardMsg)) + + def assertNotStartsWith(self, s, prefix, msg=None): + try: + if not s.startswith(prefix): + return + except (AttributeError, TypeError): + self._tail_type_check(s, prefix, msg) + raise + if isinstance(prefix, tuple): + for x in prefix: + if s.startswith(x): + prefix = x + break + a = safe_repr(s, short=True) + b = safe_repr(prefix) + self.fail(self._formatMessage(msg, f"{a} starts with {b}")) + + def assertEndsWith(self, s, suffix, msg=None): + try: + if s.endswith(suffix): + return + except (AttributeError, TypeError): + self._tail_type_check(s, suffix, msg) + raise + a = safe_repr(s, short=True) + b = safe_repr(suffix) + if isinstance(suffix, tuple): + standardMsg = f"{a} doesn't end with any of {b}" + else: + standardMsg = f"{a} doesn't end with {b}" + self.fail(self._formatMessage(msg, standardMsg)) + + def assertNotEndsWith(self, s, suffix, msg=None): + try: + if not s.endswith(suffix): + return + except (AttributeError, TypeError): + self._tail_type_check(s, suffix, msg) + raise + if isinstance(suffix, tuple): + for x in suffix: + if s.endswith(x): + suffix = x + break + a = safe_repr(s, short=True) + b = safe_repr(suffix) + self.fail(self._formatMessage(msg, f"{a} ends with {b}")) + + +class FunctionTestCase(TestCase): + """A test case that wraps a test function. + + This is useful for slipping pre-existing test functions into the + unittest framework. Optionally, set-up and tidy-up functions can be + supplied. As with TestCase, the tidy-up ('tearDown') function will + always be called if the set-up ('setUp') function ran successfully. + """ + + def __init__(self, testFunc, setUp=None, tearDown=None, description=None): + super(FunctionTestCase, self).__init__() + self._setUpFunc = setUp + self._tearDownFunc = tearDown + self._testFunc = testFunc + self._description = description + + def setUp(self): + if self._setUpFunc is not None: + self._setUpFunc() + + def tearDown(self): + if self._tearDownFunc is not None: + self._tearDownFunc() + + def runTest(self): + self._testFunc() + + def id(self): + return self._testFunc.__name__ + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + + return self._setUpFunc == other._setUpFunc and \ + self._tearDownFunc == other._tearDownFunc and \ + self._testFunc == other._testFunc and \ + self._description == other._description + + def __hash__(self): + return hash((type(self), self._setUpFunc, self._tearDownFunc, + self._testFunc, self._description)) + + def __str__(self): + return "%s (%s)" % (strclass(self.__class__), + self._testFunc.__name__) + + def __repr__(self): + return "<%s tec=%s>" % (strclass(self.__class__), + self._testFunc) + + def shortDescription(self): + if self._description is not None: + return self._description + doc = self._testFunc.__doc__ + return doc and doc.split("\n")[0].strip() or None + + +class _SubTest(TestCase): + + def __init__(self, test_case, message, params): + super().__init__() + self._message = message + self.test_case = test_case + self.params = params + self.failureException = test_case.failureException + + def runTest(self): + raise NotImplementedError("subtests cannot be run directly") + + def _subDescription(self): + parts = [] + if self._message is not _subtest_msg_sentinel: + parts.append("[{}]".format(self._message)) + if self.params: + params_desc = ', '.join( + "{}={!r}".format(k, v) + for (k, v) in self.params.items()) + parts.append("({})".format(params_desc)) + return " ".join(parts) or '()' + + def id(self): + return "{} {}".format(self.test_case.id(), self._subDescription()) + + def shortDescription(self): + """Returns a one-line description of the subtest, or None if no + description has been provided. + """ + return self.test_case.shortDescription() + + def __str__(self): + return "{} {}".format(self.test_case, self._subDescription()) diff --git a/Python314_4_x64_Template/Lib/unittest/loader.py b/Python314_4_x64_Template/Lib/unittest/loader.py new file mode 100644 index 00000000..a52950da --- /dev/null +++ b/Python314_4_x64_Template/Lib/unittest/loader.py @@ -0,0 +1,484 @@ +"""Loading unittests.""" + +import os +import re +import sys +import traceback +import types +import functools + +from fnmatch import fnmatch, fnmatchcase + +from . import case, suite, util + +__unittest = True + +# what about .pyc (etc) +# we would need to avoid loading the same tests multiple times +# from '.py', *and* '.pyc' +VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE) + + +class _FailedTest(case.TestCase): + _testMethodName = None + + def __init__(self, method_name, exception): + self._exception = exception + super(_FailedTest, self).__init__(method_name) + + def __getattr__(self, name): + if name != self._testMethodName: + return super(_FailedTest, self).__getattr__(name) + def testFailure(): + raise self._exception + return testFailure + + +def _make_failed_import_test(name, suiteClass): + message = 'Failed to import test module: %s\n%s' % ( + name, traceback.format_exc()) + return _make_failed_test(name, ImportError(message), suiteClass, message) + +def _make_failed_load_tests(name, exception, suiteClass): + message = 'Failed to call load_tests:\n%s' % (traceback.format_exc(),) + return _make_failed_test( + name, exception, suiteClass, message) + +def _make_failed_test(methodname, exception, suiteClass, message): + test = _FailedTest(methodname, exception) + return suiteClass((test,)), message + +def _make_skipped_test(methodname, exception, suiteClass): + @case.skip(str(exception)) + def testSkipped(self): + pass + attrs = {methodname: testSkipped} + TestClass = type("ModuleSkipped", (case.TestCase,), attrs) + return suiteClass((TestClass(methodname),)) + +def _splitext(path): + return os.path.splitext(path)[0] + + +class TestLoader(object): + """ + This class is responsible for loading tests according to various criteria + and returning them wrapped in a TestSuite + """ + testMethodPrefix = 'test' + sortTestMethodsUsing = staticmethod(util.three_way_cmp) + testNamePatterns = None + suiteClass = suite.TestSuite + _top_level_dir = None + + def __init__(self): + super(TestLoader, self).__init__() + self.errors = [] + # Tracks packages which we have called into via load_tests, to + # avoid infinite re-entrancy. + self._loading_packages = set() + + def loadTestsFromTestCase(self, testCaseClass): + """Return a suite of all test cases contained in testCaseClass""" + if issubclass(testCaseClass, suite.TestSuite): + raise TypeError("Test cases should not be derived from " + "TestSuite. Maybe you meant to derive from " + "TestCase?") + if testCaseClass in (case.TestCase, case.FunctionTestCase): + # We don't load any tests from base types that should not be loaded. + testCaseNames = [] + else: + testCaseNames = self.getTestCaseNames(testCaseClass) + if not testCaseNames and hasattr(testCaseClass, 'runTest'): + testCaseNames = ['runTest'] + loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames)) + return loaded_suite + + def loadTestsFromModule(self, module, *, pattern=None): + """Return a suite of all test cases contained in the given module""" + tests = [] + for name in dir(module): + obj = getattr(module, name) + if ( + isinstance(obj, type) + and issubclass(obj, case.TestCase) + and obj not in (case.TestCase, case.FunctionTestCase) + ): + tests.append(self.loadTestsFromTestCase(obj)) + + load_tests = getattr(module, 'load_tests', None) + tests = self.suiteClass(tests) + if load_tests is not None: + try: + return load_tests(self, tests, pattern) + except Exception as e: + error_case, error_message = _make_failed_load_tests( + module.__name__, e, self.suiteClass) + self.errors.append(error_message) + return error_case + return tests + + def loadTestsFromName(self, name, module=None): + """Return a suite of all test cases given a string specifier. + + The name may resolve either to a module, a test case class, a + test method within a test case class, or a callable object which + returns a TestCase or TestSuite instance. + + The method optionally resolves the names relative to a given module. + """ + parts = name.split('.') + error_case, error_message = None, None + if module is None: + parts_copy = parts[:] + while parts_copy: + try: + module_name = '.'.join(parts_copy) + module = __import__(module_name) + break + except ImportError: + next_attribute = parts_copy.pop() + # Last error so we can give it to the user if needed. + error_case, error_message = _make_failed_import_test( + next_attribute, self.suiteClass) + if not parts_copy: + # Even the top level import failed: report that error. + self.errors.append(error_message) + return error_case + parts = parts[1:] + obj = module + for part in parts: + try: + parent, obj = obj, getattr(obj, part) + except AttributeError as e: + # We can't traverse some part of the name. + if (getattr(obj, '__path__', None) is not None + and error_case is not None): + # This is a package (no __path__ per importlib docs), and we + # encountered an error importing something. We cannot tell + # the difference between package.WrongNameTestClass and + # package.wrong_module_name so we just report the + # ImportError - it is more informative. + self.errors.append(error_message) + return error_case + else: + # Otherwise, we signal that an AttributeError has occurred. + error_case, error_message = _make_failed_test( + part, e, self.suiteClass, + 'Failed to access attribute:\n%s' % ( + traceback.format_exc(),)) + self.errors.append(error_message) + return error_case + + if isinstance(obj, types.ModuleType): + return self.loadTestsFromModule(obj) + elif ( + isinstance(obj, type) + and issubclass(obj, case.TestCase) + and obj not in (case.TestCase, case.FunctionTestCase) + ): + return self.loadTestsFromTestCase(obj) + elif (isinstance(obj, types.FunctionType) and + isinstance(parent, type) and + issubclass(parent, case.TestCase)): + name = parts[-1] + inst = parent(name) + # static methods follow a different path + if not isinstance(getattr(inst, name), types.FunctionType): + return self.suiteClass([inst]) + elif isinstance(obj, suite.TestSuite): + return obj + if callable(obj): + test = obj() + if isinstance(test, suite.TestSuite): + return test + elif isinstance(test, case.TestCase): + return self.suiteClass([test]) + else: + raise TypeError("calling %s returned %s, not a test" % + (obj, test)) + else: + raise TypeError("don't know how to make test from: %s" % obj) + + def loadTestsFromNames(self, names, module=None): + """Return a suite of all test cases found using the given sequence + of string specifiers. See 'loadTestsFromName()'. + """ + suites = [self.loadTestsFromName(name, module) for name in names] + return self.suiteClass(suites) + + def getTestCaseNames(self, testCaseClass): + """Return a sorted sequence of method names found within testCaseClass + """ + def shouldIncludeMethod(attrname): + if not attrname.startswith(self.testMethodPrefix): + return False + testFunc = getattr(testCaseClass, attrname) + if not callable(testFunc): + return False + fullName = f'%s.%s.%s' % ( + testCaseClass.__module__, testCaseClass.__qualname__, attrname + ) + return self.testNamePatterns is None or \ + any(fnmatchcase(fullName, pattern) for pattern in self.testNamePatterns) + testFnNames = list(filter(shouldIncludeMethod, dir(testCaseClass))) + if self.sortTestMethodsUsing: + testFnNames.sort(key=functools.cmp_to_key(self.sortTestMethodsUsing)) + return testFnNames + + def discover(self, start_dir, pattern='test*.py', top_level_dir=None): + """Find and return all test modules from the specified start + directory, recursing into subdirectories to find them and return all + tests found within them. Only test files that match the pattern will + be loaded. (Using shell style pattern matching.) + + All test modules must be importable from the top level of the project. + If the start directory is not the top level directory then the top + level directory must be specified separately. + + If a test package name (directory with '__init__.py') matches the + pattern then the package will be checked for a 'load_tests' function. If + this exists then it will be called with (loader, tests, pattern) unless + the package has already had load_tests called from the same discovery + invocation, in which case the package module object is not scanned for + tests - this ensures that when a package uses discover to further + discover child tests that infinite recursion does not happen. + + If load_tests exists then discovery does *not* recurse into the package, + load_tests is responsible for loading all tests in the package. + + The pattern is deliberately not stored as a loader attribute so that + packages can continue discovery themselves. top_level_dir is stored so + load_tests does not need to pass this argument in to loader.discover(). + + Paths are sorted before being imported to ensure reproducible execution + order even on filesystems with non-alphabetical ordering like ext3/4. + """ + original_top_level_dir = self._top_level_dir + set_implicit_top = False + if top_level_dir is None and self._top_level_dir is not None: + # make top_level_dir optional if called from load_tests in a package + top_level_dir = self._top_level_dir + elif top_level_dir is None: + set_implicit_top = True + top_level_dir = start_dir + + top_level_dir = os.path.abspath(top_level_dir) + + if not top_level_dir in sys.path: + # all test modules must be importable from the top level directory + # should we *unconditionally* put the start directory in first + # in sys.path to minimise likelihood of conflicts between installed + # modules and development versions? + sys.path.insert(0, top_level_dir) + self._top_level_dir = top_level_dir + + is_not_importable = False + is_namespace = False + tests = [] + if os.path.isdir(os.path.abspath(start_dir)): + start_dir = os.path.abspath(start_dir) + if start_dir != top_level_dir: + is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py')) + else: + # support for discovery from dotted module names + try: + __import__(start_dir) + except ImportError: + is_not_importable = True + else: + the_module = sys.modules[start_dir] + if not hasattr(the_module, "__file__") or the_module.__file__ is None: + # look for namespace packages + try: + spec = the_module.__spec__ + except AttributeError: + spec = None + + if spec and spec.submodule_search_locations is not None: + is_namespace = True + + for path in the_module.__path__: + if (not set_implicit_top and + not path.startswith(top_level_dir)): + continue + self._top_level_dir = \ + (path.split(the_module.__name__ + .replace(".", os.path.sep))[0]) + tests.extend(self._find_tests(path, pattern, namespace=True)) + elif the_module.__name__ in sys.builtin_module_names: + # builtin module + raise TypeError('Can not use builtin modules ' + 'as dotted module names') from None + else: + raise TypeError( + f"don't know how to discover from {the_module!r}" + ) from None + + else: + top_part = start_dir.split('.')[0] + start_dir = os.path.abspath(os.path.dirname((the_module.__file__))) + + if set_implicit_top: + if not is_namespace: + if sys.modules[top_part].__file__ is None: + self._top_level_dir = os.path.dirname(the_module.__file__) + if self._top_level_dir not in sys.path: + sys.path.insert(0, self._top_level_dir) + else: + self._top_level_dir = \ + self._get_directory_containing_module(top_part) + sys.path.remove(top_level_dir) + + if is_not_importable: + raise ImportError('Start directory is not importable: %r' % start_dir) + + if not is_namespace: + tests = list(self._find_tests(start_dir, pattern)) + + self._top_level_dir = original_top_level_dir + return self.suiteClass(tests) + + def _get_directory_containing_module(self, module_name): + module = sys.modules[module_name] + full_path = os.path.abspath(module.__file__) + + if os.path.basename(full_path).lower().startswith('__init__.py'): + return os.path.dirname(os.path.dirname(full_path)) + else: + # here we have been given a module rather than a package - so + # all we can do is search the *same* directory the module is in + # should an exception be raised instead + return os.path.dirname(full_path) + + def _get_name_from_path(self, path): + if path == self._top_level_dir: + return '.' + path = _splitext(os.path.normpath(path)) + + _relpath = os.path.relpath(path, self._top_level_dir) + assert not os.path.isabs(_relpath), "Path must be within the project" + assert not _relpath.startswith('..'), "Path must be within the project" + + name = _relpath.replace(os.path.sep, '.') + return name + + def _get_module_from_name(self, name): + __import__(name) + return sys.modules[name] + + def _match_path(self, path, full_path, pattern): + # override this method to use alternative matching strategy + return fnmatch(path, pattern) + + def _find_tests(self, start_dir, pattern, namespace=False): + """Used by discovery. Yields test suites it loads.""" + # Handle the __init__ in this package + name = self._get_name_from_path(start_dir) + # name is '.' when start_dir == top_level_dir (and top_level_dir is by + # definition not a package). + if name != '.' and name not in self._loading_packages: + # name is in self._loading_packages while we have called into + # loadTestsFromModule with name. + tests, should_recurse = self._find_test_path( + start_dir, pattern, namespace) + if tests is not None: + yield tests + if not should_recurse: + # Either an error occurred, or load_tests was used by the + # package. + return + # Handle the contents. + paths = sorted(os.listdir(start_dir)) + for path in paths: + full_path = os.path.join(start_dir, path) + tests, should_recurse = self._find_test_path( + full_path, pattern, False) + if tests is not None: + yield tests + if should_recurse: + # we found a package that didn't use load_tests. + name = self._get_name_from_path(full_path) + self._loading_packages.add(name) + try: + yield from self._find_tests(full_path, pattern, False) + finally: + self._loading_packages.discard(name) + + def _find_test_path(self, full_path, pattern, namespace=False): + """Used by discovery. + + Loads tests from a single file, or a directories' __init__.py when + passed the directory. + + Returns a tuple (None_or_tests_from_file, should_recurse). + """ + basename = os.path.basename(full_path) + if os.path.isfile(full_path): + if not VALID_MODULE_NAME.match(basename): + # valid Python identifiers only + return None, False + if not self._match_path(basename, full_path, pattern): + return None, False + # if the test file matches, load it + name = self._get_name_from_path(full_path) + try: + module = self._get_module_from_name(name) + except case.SkipTest as e: + return _make_skipped_test(name, e, self.suiteClass), False + except: + error_case, error_message = \ + _make_failed_import_test(name, self.suiteClass) + self.errors.append(error_message) + return error_case, False + else: + mod_file = os.path.abspath( + getattr(module, '__file__', full_path)) + realpath = _splitext( + os.path.realpath(mod_file)) + fullpath_noext = _splitext( + os.path.realpath(full_path)) + if realpath.lower() != fullpath_noext.lower(): + module_dir = os.path.dirname(realpath) + mod_name = _splitext( + os.path.basename(full_path)) + expected_dir = os.path.dirname(full_path) + msg = ("%r module incorrectly imported from %r. Expected " + "%r. Is this module globally installed?") + raise ImportError( + msg % (mod_name, module_dir, expected_dir)) + return self.loadTestsFromModule(module, pattern=pattern), False + elif os.path.isdir(full_path): + if (not namespace and + not os.path.isfile(os.path.join(full_path, '__init__.py'))): + return None, False + + load_tests = None + tests = None + name = self._get_name_from_path(full_path) + try: + package = self._get_module_from_name(name) + except case.SkipTest as e: + return _make_skipped_test(name, e, self.suiteClass), False + except: + error_case, error_message = \ + _make_failed_import_test(name, self.suiteClass) + self.errors.append(error_message) + return error_case, False + else: + load_tests = getattr(package, 'load_tests', None) + # Mark this package as being in load_tests (possibly ;)) + self._loading_packages.add(name) + try: + tests = self.loadTestsFromModule(package, pattern=pattern) + if load_tests is not None: + # loadTestsFromModule(package) has loaded tests for us. + return tests, False + return tests, True + finally: + self._loading_packages.discard(name) + else: + return None, False + + +defaultTestLoader = TestLoader() diff --git a/Python314_4_x64_Template/Lib/unittest/main.py b/Python314_4_x64_Template/Lib/unittest/main.py new file mode 100644 index 00000000..be99d93c --- /dev/null +++ b/Python314_4_x64_Template/Lib/unittest/main.py @@ -0,0 +1,280 @@ +"""Unittest main program""" + +import sys +import argparse +import os + +from . import loader, runner +from .signals import installHandler + +__unittest = True +_NO_TESTS_EXITCODE = 5 + +MAIN_EXAMPLES = """\ +Examples: + %(prog)s test_module - run tests from test_module + %(prog)s module.TestClass - run tests from module.TestClass + %(prog)s module.Class.test_method - run specified test method + %(prog)s path/to/test_file.py - run tests from test_file.py +""" + +MODULE_EXAMPLES = """\ +Examples: + %(prog)s - run default set of tests + %(prog)s MyTestSuite - run suite 'MyTestSuite' + %(prog)s MyTestCase.testSomething - run MyTestCase.testSomething + %(prog)s MyTestCase - run all 'test*' test methods + in MyTestCase +""" + +def _convert_name(name): + # on Linux / Mac OS X 'foo.PY' is not importable, but on + # Windows it is. Simpler to do a case insensitive match + # a better check would be to check that the name is a + # valid Python module name. + if os.path.isfile(name) and name.lower().endswith('.py'): + if os.path.isabs(name): + rel_path = os.path.relpath(name, os.getcwd()) + if os.path.isabs(rel_path) or rel_path.startswith(os.pardir): + return name + name = rel_path + # on Windows both '\' and '/' are used as path + # separators. Better to replace both than rely on os.path.sep + return os.path.normpath(name)[:-3].replace('\\', '.').replace('/', '.') + return name + +def _convert_names(names): + return [_convert_name(name) for name in names] + + +def _convert_select_pattern(pattern): + if not '*' in pattern: + pattern = '*%s*' % pattern + return pattern + + +class TestProgram(object): + """A command-line program that runs a set of tests; this is primarily + for making test modules conveniently executable. + """ + # defaults for testing + module=None + verbosity = 1 + failfast = catchbreak = buffer = progName = warnings = testNamePatterns = None + _discovery_parser = None + + def __init__(self, module='__main__', defaultTest=None, argv=None, + testRunner=None, testLoader=loader.defaultTestLoader, + exit=True, verbosity=1, failfast=None, catchbreak=None, + buffer=None, warnings=None, *, tb_locals=False, + durations=None): + if isinstance(module, str): + self.module = __import__(module) + for part in module.split('.')[1:]: + self.module = getattr(self.module, part) + else: + self.module = module + if argv is None: + argv = sys.argv + + self.exit = exit + self.failfast = failfast + self.catchbreak = catchbreak + self.verbosity = verbosity + self.buffer = buffer + self.tb_locals = tb_locals + self.durations = durations + if warnings is None and not sys.warnoptions: + # even if DeprecationWarnings are ignored by default + # print them anyway unless other warnings settings are + # specified by the warnings arg or the -W python flag + self.warnings = 'default' + else: + # here self.warnings is set either to the value passed + # to the warnings args or to None. + # If the user didn't pass a value self.warnings will + # be None. This means that the behavior is unchanged + # and depends on the values passed to -W. + self.warnings = warnings + self.defaultTest = defaultTest + self.testRunner = testRunner + self.testLoader = testLoader + self.progName = os.path.basename(argv[0]) + self.parseArgs(argv) + self.runTests() + + def _print_help(self, *args, **kwargs): + if self.module is None: + print(self._main_parser.format_help()) + print(MAIN_EXAMPLES % {'prog': self.progName}) + self._discovery_parser.print_help() + else: + print(self._main_parser.format_help()) + print(MODULE_EXAMPLES % {'prog': self.progName}) + + def parseArgs(self, argv): + self._initArgParsers() + if self.module is None: + if len(argv) > 1 and argv[1].lower() == 'discover': + self._do_discovery(argv[2:]) + return + self._main_parser.parse_args(argv[1:], self) + if not self.tests: + # this allows "python -m unittest -v" to still work for + # test discovery. + self._do_discovery([]) + return + else: + self._main_parser.parse_args(argv[1:], self) + + if self.tests: + self.testNames = _convert_names(self.tests) + if __name__ == '__main__': + # to support python -m unittest ... + self.module = None + elif self.defaultTest is None: + # createTests will load tests from self.module + self.testNames = None + elif isinstance(self.defaultTest, str): + self.testNames = (self.defaultTest,) + else: + self.testNames = list(self.defaultTest) + self.createTests() + + def createTests(self, from_discovery=False, Loader=None): + if self.testNamePatterns: + self.testLoader.testNamePatterns = self.testNamePatterns + if from_discovery: + loader = self.testLoader if Loader is None else Loader() + self.test = loader.discover(self.start, self.pattern, self.top) + elif self.testNames is None: + self.test = self.testLoader.loadTestsFromModule(self.module) + else: + self.test = self.testLoader.loadTestsFromNames(self.testNames, + self.module) + + def _initArgParsers(self): + parent_parser = self._getParentArgParser() + self._main_parser = self._getMainArgParser(parent_parser) + self._discovery_parser = self._getDiscoveryArgParser(parent_parser) + + def _getParentArgParser(self): + parser = argparse.ArgumentParser(add_help=False) + + parser.add_argument('-v', '--verbose', dest='verbosity', + action='store_const', const=2, + help='Verbose output') + parser.add_argument('-q', '--quiet', dest='verbosity', + action='store_const', const=0, + help='Quiet output') + parser.add_argument('--locals', dest='tb_locals', + action='store_true', + help='Show local variables in tracebacks') + parser.add_argument('--durations', dest='durations', type=int, + default=None, metavar="N", + help='Show the N slowest test cases (N=0 for all)') + if self.failfast is None: + parser.add_argument('-f', '--failfast', dest='failfast', + action='store_true', + help='Stop on first fail or error') + self.failfast = False + if self.catchbreak is None: + parser.add_argument('-c', '--catch', dest='catchbreak', + action='store_true', + help='Catch Ctrl-C and display results so far') + self.catchbreak = False + if self.buffer is None: + parser.add_argument('-b', '--buffer', dest='buffer', + action='store_true', + help='Buffer stdout and stderr during tests') + self.buffer = False + if self.testNamePatterns is None: + parser.add_argument('-k', dest='testNamePatterns', + action='append', type=_convert_select_pattern, + help='Only run tests which match the given substring') + self.testNamePatterns = [] + + return parser + + def _getMainArgParser(self, parent): + parser = argparse.ArgumentParser(parents=[parent], color=True) + parser.prog = self.progName + parser.print_help = self._print_help + + parser.add_argument('tests', nargs='*', + help='a list of any number of test modules, ' + 'classes and test methods.') + + return parser + + def _getDiscoveryArgParser(self, parent): + parser = argparse.ArgumentParser(parents=[parent], color=True) + parser.prog = '%s discover' % self.progName + parser.epilog = ('For test discovery all test modules must be ' + 'importable from the top level directory of the ' + 'project.') + + parser.add_argument('-s', '--start-directory', dest='start', + help="Directory to start discovery ('.' default)") + parser.add_argument('-p', '--pattern', dest='pattern', + help="Pattern to match tests ('test*.py' default)") + parser.add_argument('-t', '--top-level-directory', dest='top', + help='Top level directory of project (defaults to ' + 'start directory)') + for arg in ('start', 'pattern', 'top'): + parser.add_argument(arg, nargs='?', + default=argparse.SUPPRESS, + help=argparse.SUPPRESS) + + return parser + + def _do_discovery(self, argv, Loader=None): + self.start = '.' + self.pattern = 'test*.py' + self.top = None + if argv is not None: + # handle command line args for test discovery + if self._discovery_parser is None: + # for testing + self._initArgParsers() + self._discovery_parser.parse_args(argv, self) + + self.createTests(from_discovery=True, Loader=Loader) + + def runTests(self): + if self.catchbreak: + installHandler() + if self.testRunner is None: + self.testRunner = runner.TextTestRunner + if isinstance(self.testRunner, type): + try: + try: + testRunner = self.testRunner(verbosity=self.verbosity, + failfast=self.failfast, + buffer=self.buffer, + warnings=self.warnings, + tb_locals=self.tb_locals, + durations=self.durations) + except TypeError: + # didn't accept the tb_locals or durations argument + testRunner = self.testRunner(verbosity=self.verbosity, + failfast=self.failfast, + buffer=self.buffer, + warnings=self.warnings) + except TypeError: + # didn't accept the verbosity, buffer or failfast arguments + testRunner = self.testRunner() + else: + # it is assumed to be a TestRunner instance + testRunner = self.testRunner + self.result = testRunner.run(self.test) + if self.exit: + if not self.result.wasSuccessful(): + sys.exit(1) + elif self.result.testsRun == 0 and len(self.result.skipped) == 0: + sys.exit(_NO_TESTS_EXITCODE) + else: + sys.exit(0) + + +main = TestProgram diff --git a/Python314_4_x64_Template/Lib/unittest/mock.py b/Python314_4_x64_Template/Lib/unittest/mock.py new file mode 100644 index 00000000..92b81d15 --- /dev/null +++ b/Python314_4_x64_Template/Lib/unittest/mock.py @@ -0,0 +1,3204 @@ +# mock.py +# Test tools for mocking and patching. +# Maintained by Michael Foord +# Backport for other versions of Python available from +# https://pypi.org/project/mock + +__all__ = ( + 'Mock', + 'MagicMock', + 'patch', + 'sentinel', + 'DEFAULT', + 'ANY', + 'call', + 'create_autospec', + 'AsyncMock', + 'ThreadingMock', + 'FILTER_DIR', + 'NonCallableMock', + 'NonCallableMagicMock', + 'mock_open', + 'PropertyMock', + 'seal', +) + + +import asyncio +import contextlib +import io +import inspect +import pprint +import sys +import builtins +import pkgutil +from inspect import iscoroutinefunction +import threading +from annotationlib import Format +from dataclasses import fields, is_dataclass +from types import CodeType, ModuleType, MethodType +from unittest.util import safe_repr +from functools import wraps, partial +from threading import RLock + + +class InvalidSpecError(Exception): + """Indicates that an invalid value was used as a mock spec.""" + + +_builtins = {name for name in dir(builtins) if not name.startswith('_')} + +FILTER_DIR = True + +# Workaround for issue #12370 +# Without this, the __class__ properties wouldn't be set correctly +_safe_super = super + +def _is_async_obj(obj): + if _is_instance_mock(obj) and not isinstance(obj, AsyncMock): + return False + if hasattr(obj, '__func__'): + obj = getattr(obj, '__func__') + return iscoroutinefunction(obj) or inspect.isawaitable(obj) + + +def _is_async_func(func): + if getattr(func, '__code__', None): + return iscoroutinefunction(func) + else: + return False + + +def _is_instance_mock(obj): + # can't use isinstance on Mock objects because they override __class__ + # The base class for all mocks is NonCallableMock + return issubclass(type(obj), NonCallableMock) + + +def _is_exception(obj): + return ( + isinstance(obj, BaseException) or + isinstance(obj, type) and issubclass(obj, BaseException) + ) + + +def _extract_mock(obj): + # Autospecced functions will return a FunctionType with "mock" attribute + # which is the actual mock object that needs to be used. + if isinstance(obj, FunctionTypes) and hasattr(obj, 'mock'): + return obj.mock + else: + return obj + + +def _get_signature_object(func, as_instance, eat_self): + """ + Given an arbitrary, possibly callable object, try to create a suitable + signature object. + Return a (reduced func, signature) tuple, or None. + """ + if isinstance(func, type) and not as_instance: + # If it's a type and should be modelled as a type, use __init__. + func = func.__init__ + # Skip the `self` argument in __init__ + eat_self = True + elif isinstance(func, (classmethod, staticmethod)): + if isinstance(func, classmethod): + # Skip the `cls` argument of a class method + eat_self = True + # Use the original decorated method to extract the correct function signature + func = func.__func__ + elif not isinstance(func, FunctionTypes): + # If we really want to model an instance of the passed type, + # __call__ should be looked up, not __init__. + try: + func = func.__call__ + except AttributeError: + return None + if eat_self: + sig_func = partial(func, None) + else: + sig_func = func + try: + return func, inspect.signature(sig_func, annotation_format=Format.FORWARDREF) + except ValueError: + # Certain callable types are not supported by inspect.signature() + return None + + +def _check_signature(func, mock, skipfirst, instance=False): + sig = _get_signature_object(func, instance, skipfirst) + if sig is None: + return + func, sig = sig + def checksig(self, /, *args, **kwargs): + sig.bind(*args, **kwargs) + _copy_func_details(func, checksig) + type(mock)._mock_check_sig = checksig + type(mock).__signature__ = sig + + +def _copy_func_details(func, funcopy): + # we explicitly don't copy func.__dict__ into this copy as it would + # expose original attributes that should be mocked + for attribute in ( + '__name__', '__doc__', '__text_signature__', + '__module__', '__defaults__', '__kwdefaults__', + ): + try: + setattr(funcopy, attribute, getattr(func, attribute)) + except AttributeError: + pass + + +def _callable(obj): + if isinstance(obj, type): + return True + if isinstance(obj, (staticmethod, classmethod, MethodType)): + return _callable(obj.__func__) + if getattr(obj, '__call__', None) is not None: + return True + return False + + +def _is_list(obj): + # checks for list or tuples + # XXXX badly named! + return type(obj) in (list, tuple) + + +def _instance_callable(obj): + """Given an object, return True if the object is callable. + For classes, return True if instances would be callable.""" + if not isinstance(obj, type): + # already an instance + return getattr(obj, '__call__', None) is not None + + # *could* be broken by a class overriding __mro__ or __dict__ via + # a metaclass + for base in (obj,) + obj.__mro__: + if base.__dict__.get('__call__') is not None: + return True + return False + + +def _set_signature(mock, original, instance=False): + # creates a function with signature (*args, **kwargs) that delegates to a + # mock. It still does signature checking by calling a lambda with the same + # signature as the original. + + skipfirst = isinstance(original, type) + result = _get_signature_object(original, instance, skipfirst) + if result is None: + return mock + func, sig = result + def checksig(*args, **kwargs): + sig.bind(*args, **kwargs) + _copy_func_details(func, checksig) + + name = original.__name__ + if not name.isidentifier(): + name = 'funcopy' + context = {'_checksig_': checksig, 'mock': mock} + src = """def %s(*args, **kwargs): + _checksig_(*args, **kwargs) + return mock(*args, **kwargs)""" % name + exec (src, context) + funcopy = context[name] + _setup_func(funcopy, mock, sig) + return funcopy + +def _set_async_signature(mock, original, instance=False, is_async_mock=False): + # creates an async function with signature (*args, **kwargs) that delegates to a + # mock. It still does signature checking by calling a lambda with the same + # signature as the original. + + skipfirst = isinstance(original, type) + func, sig = _get_signature_object(original, instance, skipfirst) + def checksig(*args, **kwargs): + sig.bind(*args, **kwargs) + _copy_func_details(func, checksig) + + name = original.__name__ + context = {'_checksig_': checksig, 'mock': mock} + src = """async def %s(*args, **kwargs): + _checksig_(*args, **kwargs) + return await mock(*args, **kwargs)""" % name + exec (src, context) + funcopy = context[name] + _setup_func(funcopy, mock, sig) + _setup_async_mock(funcopy) + return funcopy + + +def _setup_func(funcopy, mock, sig): + funcopy.mock = mock + + def assert_called_with(*args, **kwargs): + return mock.assert_called_with(*args, **kwargs) + def assert_called(*args, **kwargs): + return mock.assert_called(*args, **kwargs) + def assert_not_called(*args, **kwargs): + return mock.assert_not_called(*args, **kwargs) + def assert_called_once(*args, **kwargs): + return mock.assert_called_once(*args, **kwargs) + def assert_called_once_with(*args, **kwargs): + return mock.assert_called_once_with(*args, **kwargs) + def assert_has_calls(*args, **kwargs): + return mock.assert_has_calls(*args, **kwargs) + def assert_any_call(*args, **kwargs): + return mock.assert_any_call(*args, **kwargs) + def reset_mock(): + funcopy.method_calls = _CallList() + funcopy.mock_calls = _CallList() + mock.reset_mock() + ret = funcopy.return_value + if _is_instance_mock(ret) and not ret is mock: + ret.reset_mock() + + funcopy.called = False + funcopy.call_count = 0 + funcopy.call_args = None + funcopy.call_args_list = _CallList() + funcopy.method_calls = _CallList() + funcopy.mock_calls = _CallList() + + funcopy.return_value = mock.return_value + funcopy.side_effect = mock.side_effect + funcopy._mock_children = mock._mock_children + + funcopy.assert_called_with = assert_called_with + funcopy.assert_called_once_with = assert_called_once_with + funcopy.assert_has_calls = assert_has_calls + funcopy.assert_any_call = assert_any_call + funcopy.reset_mock = reset_mock + funcopy.assert_called = assert_called + funcopy.assert_not_called = assert_not_called + funcopy.assert_called_once = assert_called_once + funcopy.__signature__ = sig + + mock._mock_delegate = funcopy + + +def _setup_async_mock(mock): + mock._is_coroutine = asyncio.coroutines._is_coroutine + mock.await_count = 0 + mock.await_args = None + mock.await_args_list = _CallList() + + # Mock is not configured yet so the attributes are set + # to a function and then the corresponding mock helper function + # is called when the helper is accessed similar to _setup_func. + def wrapper(attr, /, *args, **kwargs): + return getattr(mock.mock, attr)(*args, **kwargs) + + for attribute in ('assert_awaited', + 'assert_awaited_once', + 'assert_awaited_with', + 'assert_awaited_once_with', + 'assert_any_await', + 'assert_has_awaits', + 'assert_not_awaited'): + + # setattr(mock, attribute, wrapper) causes late binding + # hence attribute will always be the last value in the loop + # Use partial(wrapper, attribute) to ensure the attribute is bound + # correctly. + setattr(mock, attribute, partial(wrapper, attribute)) + + +def _is_magic(name): + return '__%s__' % name[2:-2] == name + + +class _SentinelObject(object): + "A unique, named, sentinel object." + def __init__(self, name): + self.name = name + + def __repr__(self): + return 'sentinel.%s' % self.name + + def __reduce__(self): + return 'sentinel.%s' % self.name + + +class _Sentinel(object): + """Access attributes to return a named object, usable as a sentinel.""" + def __init__(self): + self._sentinels = {} + + def __getattr__(self, name): + if name == '__bases__': + # Without this help(unittest.mock) raises an exception + raise AttributeError + return self._sentinels.setdefault(name, _SentinelObject(name)) + + def __reduce__(self): + return 'sentinel' + + +sentinel = _Sentinel() + +DEFAULT = sentinel.DEFAULT +_missing = sentinel.MISSING +_deleted = sentinel.DELETED + + +_allowed_names = { + 'return_value', '_mock_return_value', 'side_effect', + '_mock_side_effect', '_mock_parent', '_mock_new_parent', + '_mock_name', '_mock_new_name' +} + + +def _delegating_property(name): + _allowed_names.add(name) + _the_name = '_mock_' + name + def _get(self, name=name, _the_name=_the_name): + sig = self._mock_delegate + if sig is None: + return getattr(self, _the_name) + return getattr(sig, name) + def _set(self, value, name=name, _the_name=_the_name): + sig = self._mock_delegate + if sig is None: + self.__dict__[_the_name] = value + else: + setattr(sig, name, value) + + return property(_get, _set) + + + +class _CallList(list): + + def __contains__(self, value): + if not isinstance(value, list): + return list.__contains__(self, value) + len_value = len(value) + len_self = len(self) + if len_value > len_self: + return False + + for i in range(0, len_self - len_value + 1): + sub_list = self[i:i+len_value] + if sub_list == value: + return True + return False + + def __repr__(self): + return pprint.pformat(list(self)) + + +def _check_and_set_parent(parent, value, name, new_name): + value = _extract_mock(value) + + if not _is_instance_mock(value): + return False + if ((value._mock_name or value._mock_new_name) or + (value._mock_parent is not None) or + (value._mock_new_parent is not None)): + return False + + _parent = parent + while _parent is not None: + # setting a mock (value) as a child or return value of itself + # should not modify the mock + if _parent is value: + return False + _parent = _parent._mock_new_parent + + if new_name: + value._mock_new_parent = parent + value._mock_new_name = new_name + if name: + value._mock_parent = parent + value._mock_name = name + return True + +# Internal class to identify if we wrapped an iterator object or not. +class _MockIter(object): + def __init__(self, obj): + self.obj = iter(obj) + def __next__(self): + return next(self.obj) + +class Base(object): + _mock_return_value = DEFAULT + _mock_side_effect = None + def __init__(self, /, *args, **kwargs): + pass + + + +class NonCallableMock(Base): + """A non-callable version of `Mock`""" + + # Store a mutex as a class attribute in order to protect concurrent access + # to mock attributes. Using a class attribute allows all NonCallableMock + # instances to share the mutex for simplicity. + # + # See https://github.com/python/cpython/issues/98624 for why this is + # necessary. + _lock = RLock() + + def __new__( + cls, spec=None, wraps=None, name=None, spec_set=None, + parent=None, _spec_state=None, _new_name='', _new_parent=None, + _spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs + ): + # every instance has its own class + # so we can create magic methods on the + # class without stomping on other mocks + bases = (cls,) + if not issubclass(cls, AsyncMockMixin): + # Check if spec is an async object or function + spec_arg = spec_set or spec + if spec_arg is not None and _is_async_obj(spec_arg): + bases = (AsyncMockMixin, cls) + new = type(cls.__name__, bases, {'__doc__': cls.__doc__}) + instance = _safe_super(NonCallableMock, cls).__new__(new) + return instance + + + def __init__( + self, spec=None, wraps=None, name=None, spec_set=None, + parent=None, _spec_state=None, _new_name='', _new_parent=None, + _spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs + ): + if _new_parent is None: + _new_parent = parent + + __dict__ = self.__dict__ + __dict__['_mock_parent'] = parent + __dict__['_mock_name'] = name + __dict__['_mock_new_name'] = _new_name + __dict__['_mock_new_parent'] = _new_parent + __dict__['_mock_sealed'] = False + + if spec_set is not None: + spec = spec_set + spec_set = True + if _eat_self is None: + _eat_self = parent is not None + + self._mock_add_spec(spec, spec_set, _spec_as_instance, _eat_self) + + __dict__['_mock_children'] = {} + __dict__['_mock_wraps'] = wraps + __dict__['_mock_delegate'] = None + + __dict__['_mock_called'] = False + __dict__['_mock_call_args'] = None + __dict__['_mock_call_count'] = 0 + __dict__['_mock_call_args_list'] = _CallList() + __dict__['_mock_mock_calls'] = _CallList() + + __dict__['method_calls'] = _CallList() + __dict__['_mock_unsafe'] = unsafe + + if kwargs: + self.configure_mock(**kwargs) + + _safe_super(NonCallableMock, self).__init__( + spec, wraps, name, spec_set, parent, + _spec_state + ) + + + def attach_mock(self, mock, attribute): + """ + Attach a mock as an attribute of this one, replacing its name and + parent. Calls to the attached mock will be recorded in the + `method_calls` and `mock_calls` attributes of this one.""" + inner_mock = _extract_mock(mock) + + inner_mock._mock_parent = None + inner_mock._mock_new_parent = None + inner_mock._mock_name = '' + inner_mock._mock_new_name = None + + setattr(self, attribute, mock) + + + def mock_add_spec(self, spec, spec_set=False): + """Add a spec to a mock. `spec` can either be an object or a + list of strings. Only attributes on the `spec` can be fetched as + attributes from the mock. + + If `spec_set` is True then only attributes on the spec can be set.""" + self._mock_add_spec(spec, spec_set) + + + def _mock_add_spec(self, spec, spec_set, _spec_as_instance=False, + _eat_self=False): + if _is_instance_mock(spec): + raise InvalidSpecError(f'Cannot spec a Mock object. [object={spec!r}]') + + _spec_class = None + _spec_signature = None + _spec_asyncs = [] + + if spec is not None and not _is_list(spec): + if isinstance(spec, type): + _spec_class = spec + else: + _spec_class = type(spec) + res = _get_signature_object(spec, + _spec_as_instance, _eat_self) + _spec_signature = res and res[1] + + spec_list = dir(spec) + + for attr in spec_list: + static_attr = inspect.getattr_static(spec, attr, None) + unwrapped_attr = static_attr + try: + unwrapped_attr = inspect.unwrap(unwrapped_attr) + except ValueError: + pass + if iscoroutinefunction(unwrapped_attr): + _spec_asyncs.append(attr) + + spec = spec_list + + __dict__ = self.__dict__ + __dict__['_spec_class'] = _spec_class + __dict__['_spec_set'] = spec_set + __dict__['_spec_signature'] = _spec_signature + __dict__['_mock_methods'] = spec + __dict__['_spec_asyncs'] = _spec_asyncs + + def _mock_extend_spec_methods(self, spec_methods): + methods = self.__dict__.get('_mock_methods') or [] + methods.extend(spec_methods) + self.__dict__['_mock_methods'] = methods + + def __get_return_value(self): + ret = self._mock_return_value + if self._mock_delegate is not None: + ret = self._mock_delegate.return_value + + if ret is DEFAULT and self._mock_wraps is None: + ret = self._get_child_mock( + _new_parent=self, _new_name='()' + ) + self.return_value = ret + return ret + + + def __set_return_value(self, value): + if self._mock_delegate is not None: + self._mock_delegate.return_value = value + else: + self._mock_return_value = value + _check_and_set_parent(self, value, None, '()') + + __return_value_doc = "The value to be returned when the mock is called." + return_value = property(__get_return_value, __set_return_value, + __return_value_doc) + + + @property + def __class__(self): + if self._spec_class is None: + return type(self) + return self._spec_class + + called = _delegating_property('called') + call_count = _delegating_property('call_count') + call_args = _delegating_property('call_args') + call_args_list = _delegating_property('call_args_list') + mock_calls = _delegating_property('mock_calls') + + + def __get_side_effect(self): + delegated = self._mock_delegate + if delegated is None: + return self._mock_side_effect + sf = delegated.side_effect + if (sf is not None and not callable(sf) + and not isinstance(sf, _MockIter) and not _is_exception(sf)): + sf = _MockIter(sf) + delegated.side_effect = sf + return sf + + def __set_side_effect(self, value): + value = _try_iter(value) + delegated = self._mock_delegate + if delegated is None: + self._mock_side_effect = value + else: + delegated.side_effect = value + + side_effect = property(__get_side_effect, __set_side_effect) + + + def reset_mock(self, visited=None, *, + return_value: bool = False, + side_effect: bool = False): + "Restore the mock object to its initial state." + if visited is None: + visited = [] + if id(self) in visited: + return + visited.append(id(self)) + + self.called = False + self.call_args = None + self.call_count = 0 + self.mock_calls = _CallList() + self.call_args_list = _CallList() + self.method_calls = _CallList() + + if return_value: + self._mock_return_value = DEFAULT + if side_effect: + self._mock_side_effect = None + + for child in self._mock_children.values(): + if isinstance(child, _SpecState) or child is _deleted: + continue + child.reset_mock(visited, return_value=return_value, side_effect=side_effect) + + ret = self._mock_return_value + if _is_instance_mock(ret) and ret is not self: + ret.reset_mock(visited) + + + def configure_mock(self, /, **kwargs): + """Set attributes on the mock through keyword arguments. + + Attributes plus return values and side effects can be set on child + mocks using standard dot notation and unpacking a dictionary in the + method call: + + >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError} + >>> mock.configure_mock(**attrs)""" + for arg, val in sorted(kwargs.items(), + # we sort on the number of dots so that + # attributes are set before we set attributes on + # attributes + key=lambda entry: entry[0].count('.')): + args = arg.split('.') + final = args.pop() + obj = self + for entry in args: + obj = getattr(obj, entry) + setattr(obj, final, val) + + + def __getattr__(self, name): + if name in {'_mock_methods', '_mock_unsafe'}: + raise AttributeError(name) + elif self._mock_methods is not None: + if name not in self._mock_methods or name in _all_magics: + raise AttributeError("Mock object has no attribute %r" % name) + elif _is_magic(name): + raise AttributeError(name) + if not self._mock_unsafe and (not self._mock_methods or name not in self._mock_methods): + if name.startswith(('assert', 'assret', 'asert', 'aseert', 'assrt')) or name in _ATTRIB_DENY_LIST: + raise AttributeError( + f"{name!r} is not a valid assertion. Use a spec " + f"for the mock if {name!r} is meant to be an attribute.") + + with NonCallableMock._lock: + result = self._mock_children.get(name) + if result is _deleted: + raise AttributeError(name) + elif result is None: + wraps = None + if self._mock_wraps is not None: + # XXXX should we get the attribute without triggering code + # execution? + wraps = getattr(self._mock_wraps, name) + + result = self._get_child_mock( + parent=self, name=name, wraps=wraps, _new_name=name, + _new_parent=self + ) + self._mock_children[name] = result + + elif isinstance(result, _SpecState): + try: + result = create_autospec( + result.spec, result.spec_set, result.instance, + result.parent, result.name + ) + except InvalidSpecError: + target_name = self.__dict__['_mock_name'] or self + raise InvalidSpecError( + f'Cannot autospec attr {name!r} from target ' + f'{target_name!r} as it has already been mocked out. ' + f'[target={self!r}, attr={result.spec!r}]') + self._mock_children[name] = result + + return result + + + def _extract_mock_name(self): + _name_list = [self._mock_new_name] + _parent = self._mock_new_parent + last = self + + dot = '.' + if _name_list == ['()']: + dot = '' + + while _parent is not None: + last = _parent + + _name_list.append(_parent._mock_new_name + dot) + dot = '.' + if _parent._mock_new_name == '()': + dot = '' + + _parent = _parent._mock_new_parent + + _name_list = list(reversed(_name_list)) + _first = last._mock_name or 'mock' + if len(_name_list) > 1: + if _name_list[1] not in ('()', '().'): + _first += '.' + _name_list[0] = _first + return ''.join(_name_list) + + def __repr__(self): + name = self._extract_mock_name() + + name_string = '' + if name not in ('mock', 'mock.'): + name_string = ' name=%r' % name + + spec_string = '' + if self._spec_class is not None: + spec_string = ' spec=%r' + if self._spec_set: + spec_string = ' spec_set=%r' + spec_string = spec_string % self._spec_class.__name__ + return "<%s%s%s id='%s'>" % ( + type(self).__name__, + name_string, + spec_string, + id(self) + ) + + + def __dir__(self): + """Filter the output of `dir(mock)` to only useful members.""" + if not FILTER_DIR: + return object.__dir__(self) + + extras = self._mock_methods or [] + from_type = dir(type(self)) + from_dict = list(self.__dict__) + from_child_mocks = [ + m_name for m_name, m_value in self._mock_children.items() + if m_value is not _deleted] + + from_type = [e for e in from_type if not e.startswith('_')] + from_dict = [e for e in from_dict if not e.startswith('_') or + _is_magic(e)] + return sorted(set(extras + from_type + from_dict + from_child_mocks)) + + + def __setattr__(self, name, value): + if name in _allowed_names: + # property setters go through here + return object.__setattr__(self, name, value) + elif (self._spec_set and self._mock_methods is not None and + name not in self._mock_methods and + name not in self.__dict__): + raise AttributeError("Mock object has no attribute '%s'" % name) + elif name in _unsupported_magics: + msg = 'Attempting to set unsupported magic method %r.' % name + raise AttributeError(msg) + elif name in _all_magics: + if self._mock_methods is not None and name not in self._mock_methods: + raise AttributeError("Mock object has no attribute '%s'" % name) + + if not _is_instance_mock(value): + setattr(type(self), name, _get_method(name, value)) + original = value + value = lambda *args, **kw: original(self, *args, **kw) + else: + # only set _new_name and not name so that mock_calls is tracked + # but not method calls + _check_and_set_parent(self, value, None, name) + setattr(type(self), name, value) + self._mock_children[name] = value + elif name == '__class__': + self._spec_class = value + return + else: + if _check_and_set_parent(self, value, name, name): + self._mock_children[name] = value + + if self._mock_sealed and not hasattr(self, name): + mock_name = f'{self._extract_mock_name()}.{name}' + raise AttributeError(f'Cannot set {mock_name}') + + if isinstance(value, PropertyMock): + self.__dict__[name] = value + return + return object.__setattr__(self, name, value) + + + def __delattr__(self, name): + if name in _all_magics and name in type(self).__dict__: + delattr(type(self), name) + if name not in self.__dict__: + # for magic methods that are still MagicProxy objects and + # not set on the instance itself + return + + obj = self._mock_children.get(name, _missing) + if name in self.__dict__: + _safe_super(NonCallableMock, self).__delattr__(name) + elif obj is _deleted: + raise AttributeError(name) + if obj is not _missing: + del self._mock_children[name] + self._mock_children[name] = _deleted + + + def _format_mock_call_signature(self, args, kwargs): + name = self._mock_name or 'mock' + return _format_call_signature(name, args, kwargs) + + + def _format_mock_failure_message(self, args, kwargs, action='call'): + message = 'expected %s not found.\nExpected: %s\n Actual: %s' + expected_string = self._format_mock_call_signature(args, kwargs) + call_args = self.call_args + actual_string = self._format_mock_call_signature(*call_args) + return message % (action, expected_string, actual_string) + + + def _get_call_signature_from_name(self, name): + """ + * If call objects are asserted against a method/function like obj.meth1 + then there could be no name for the call object to lookup. Hence just + return the spec_signature of the method/function being asserted against. + * If the name is not empty then remove () and split by '.' to get + list of names to iterate through the children until a potential + match is found. A child mock is created only during attribute access + so if we get a _SpecState then no attributes of the spec were accessed + and can be safely exited. + """ + if not name: + return self._spec_signature + + sig = None + names = name.replace('()', '').split('.') + children = self._mock_children + + for name in names: + child = children.get(name) + if child is None or isinstance(child, _SpecState): + break + else: + # If an autospecced object is attached using attach_mock the + # child would be a function with mock object as attribute from + # which signature has to be derived. + child = _extract_mock(child) + children = child._mock_children + sig = child._spec_signature + + return sig + + + def _call_matcher(self, _call): + """ + Given a call (or simply an (args, kwargs) tuple), return a + comparison key suitable for matching with other calls. + This is a best effort method which relies on the spec's signature, + if available, or falls back on the arguments themselves. + """ + + if isinstance(_call, tuple) and len(_call) > 2: + sig = self._get_call_signature_from_name(_call[0]) + else: + sig = self._spec_signature + + if sig is not None: + if len(_call) == 2: + name = '' + args, kwargs = _call + else: + name, args, kwargs = _call + try: + bound_call = sig.bind(*args, **kwargs) + return call(name, bound_call.args, bound_call.kwargs) + except TypeError as e: + return e.with_traceback(None) + else: + return _call + + def assert_not_called(self): + """assert that the mock was never called. + """ + if self.call_count != 0: + msg = ("Expected '%s' to not have been called. Called %s times.%s" + % (self._mock_name or 'mock', + self.call_count, + self._calls_repr())) + raise AssertionError(msg) + + def assert_called(self): + """assert that the mock was called at least once + """ + if self.call_count == 0: + msg = ("Expected '%s' to have been called." % + (self._mock_name or 'mock')) + raise AssertionError(msg) + + def assert_called_once(self): + """assert that the mock was called only once. + """ + if not self.call_count == 1: + msg = ("Expected '%s' to have been called once. Called %s times.%s" + % (self._mock_name or 'mock', + self.call_count, + self._calls_repr())) + raise AssertionError(msg) + + def assert_called_with(self, /, *args, **kwargs): + """assert that the last call was made with the specified arguments. + + Raises an AssertionError if the args and keyword args passed in are + different to the last call to the mock.""" + if self.call_args is None: + expected = self._format_mock_call_signature(args, kwargs) + actual = 'not called.' + error_message = ('expected call not found.\nExpected: %s\n Actual: %s' + % (expected, actual)) + raise AssertionError(error_message) + + def _error_message(): + msg = self._format_mock_failure_message(args, kwargs) + return msg + expected = self._call_matcher(_Call((args, kwargs), two=True)) + actual = self._call_matcher(self.call_args) + if actual != expected: + cause = expected if isinstance(expected, Exception) else None + raise AssertionError(_error_message()) from cause + + + def assert_called_once_with(self, /, *args, **kwargs): + """assert that the mock was called exactly once and that that call was + with the specified arguments.""" + if not self.call_count == 1: + msg = ("Expected '%s' to be called once. Called %s times.%s" + % (self._mock_name or 'mock', + self.call_count, + self._calls_repr())) + raise AssertionError(msg) + return self.assert_called_with(*args, **kwargs) + + + def assert_has_calls(self, calls, any_order=False): + """assert the mock has been called with the specified calls. + The `mock_calls` list is checked for the calls. + + If `any_order` is False (the default) then the calls must be + sequential. There can be extra calls before or after the + specified calls. + + If `any_order` is True then the calls can be in any order, but + they must all appear in `mock_calls`.""" + expected = [self._call_matcher(c) for c in calls] + cause = next((e for e in expected if isinstance(e, Exception)), None) + all_calls = _CallList(self._call_matcher(c) for c in self.mock_calls) + if not any_order: + if expected not in all_calls: + if cause is None: + problem = 'Calls not found.' + else: + problem = ('Error processing expected calls.\n' + 'Errors: {}').format( + [e if isinstance(e, Exception) else None + for e in expected]) + raise AssertionError( + f'{problem}\n' + f'Expected: {_CallList(calls)}\n' + f' Actual: {safe_repr(self.mock_calls)}' + ) from cause + return + + all_calls = list(all_calls) + + not_found = [] + for kall in expected: + try: + all_calls.remove(kall) + except ValueError: + not_found.append(kall) + if not_found: + raise AssertionError( + '%r does not contain all of %r in its call list, ' + 'found %r instead' % (self._mock_name or 'mock', + tuple(not_found), all_calls) + ) from cause + + + def assert_any_call(self, /, *args, **kwargs): + """assert the mock has been called with the specified arguments. + + The assert passes if the mock has *ever* been called, unlike + `assert_called_with` and `assert_called_once_with` that only pass if + the call is the most recent one.""" + expected = self._call_matcher(_Call((args, kwargs), two=True)) + cause = expected if isinstance(expected, Exception) else None + actual = [self._call_matcher(c) for c in self.call_args_list] + if cause or expected not in _AnyComparer(actual): + expected_string = self._format_mock_call_signature(args, kwargs) + raise AssertionError( + '%s call not found' % expected_string + ) from cause + + + def _get_child_mock(self, /, **kw): + """Create the child mocks for attributes and return value. + By default child mocks will be the same type as the parent. + Subclasses of Mock may want to override this to customize the way + child mocks are made. + + For non-callable mocks the callable variant will be used (rather than + any custom subclass).""" + if self._mock_sealed: + attribute = f".{kw['name']}" if "name" in kw else "()" + mock_name = self._extract_mock_name() + attribute + raise AttributeError(mock_name) + + _new_name = kw.get("_new_name") + if _new_name in self.__dict__['_spec_asyncs']: + return AsyncMock(**kw) + + _type = type(self) + if issubclass(_type, MagicMock) and _new_name in _async_method_magics: + # Any asynchronous magic becomes an AsyncMock + klass = AsyncMock + elif issubclass(_type, AsyncMockMixin): + if (_new_name in _all_sync_magics or + self._mock_methods and _new_name in self._mock_methods): + # Any synchronous method on AsyncMock becomes a MagicMock + klass = MagicMock + else: + klass = AsyncMock + elif not issubclass(_type, CallableMixin): + if issubclass(_type, NonCallableMagicMock): + klass = MagicMock + elif issubclass(_type, NonCallableMock): + klass = Mock + else: + klass = _type.__mro__[1] + return klass(**kw) + + + def _calls_repr(self): + """Renders self.mock_calls as a string. + + Example: "\nCalls: [call(1), call(2)]." + + If self.mock_calls is empty, an empty string is returned. The + output will be truncated if very long. + """ + if not self.mock_calls: + return "" + return f"\nCalls: {safe_repr(self.mock_calls)}." + + +# Denylist for forbidden attribute names in safe mode +_ATTRIB_DENY_LIST = frozenset({ + name.removeprefix("assert_") + for name in dir(NonCallableMock) + if name.startswith("assert_") +}) + + +class _AnyComparer(list): + """A list which checks if it contains a call which may have an + argument of ANY, flipping the components of item and self from + their traditional locations so that ANY is guaranteed to be on + the left.""" + def __contains__(self, item): + for _call in self: + assert len(item) == len(_call) + if all([ + expected == actual + for expected, actual in zip(item, _call) + ]): + return True + return False + + +def _try_iter(obj): + if obj is None: + return obj + if _is_exception(obj): + return obj + if _callable(obj): + return obj + try: + return iter(obj) + except TypeError: + # XXXX backwards compatibility + # but this will blow up on first call - so maybe we should fail early? + return obj + + +class CallableMixin(Base): + + def __init__(self, spec=None, side_effect=None, return_value=DEFAULT, + wraps=None, name=None, spec_set=None, parent=None, + _spec_state=None, _new_name='', _new_parent=None, **kwargs): + self.__dict__['_mock_return_value'] = return_value + _safe_super(CallableMixin, self).__init__( + spec, wraps, name, spec_set, parent, + _spec_state, _new_name, _new_parent, **kwargs + ) + + self.side_effect = side_effect + + + def _mock_check_sig(self, /, *args, **kwargs): + # stub method that can be replaced with one with a specific signature + pass + + + def __call__(self, /, *args, **kwargs): + # can't use self in-case a function / method we are mocking uses self + # in the signature + self._mock_check_sig(*args, **kwargs) + self._increment_mock_call(*args, **kwargs) + return self._mock_call(*args, **kwargs) + + + def _mock_call(self, /, *args, **kwargs): + return self._execute_mock_call(*args, **kwargs) + + def _increment_mock_call(self, /, *args, **kwargs): + self.called = True + + # handle call_args + # needs to be set here so assertions on call arguments pass before + # execution in the case of awaited calls + with NonCallableMock._lock: + # Lock is used here so that call_args_list and call_count are + # set atomically otherwise it is possible that by the time call_count + # is set another thread may have appended to call_args_list. + # The rest of this function relies on list.append being atomic and + # skips locking. + _call = _Call((args, kwargs), two=True) + self.call_args = _call + self.call_args_list.append(_call) + self.call_count = len(self.call_args_list) + + # initial stuff for method_calls: + do_method_calls = self._mock_parent is not None + method_call_name = self._mock_name + + # initial stuff for mock_calls: + mock_call_name = self._mock_new_name + is_a_call = mock_call_name == '()' + self.mock_calls.append(_Call(('', args, kwargs))) + + # follow up the chain of mocks: + _new_parent = self._mock_new_parent + while _new_parent is not None: + + # handle method_calls: + if do_method_calls: + _new_parent.method_calls.append(_Call((method_call_name, args, kwargs))) + do_method_calls = _new_parent._mock_parent is not None + if do_method_calls: + method_call_name = _new_parent._mock_name + '.' + method_call_name + + # handle mock_calls: + this_mock_call = _Call((mock_call_name, args, kwargs)) + _new_parent.mock_calls.append(this_mock_call) + + if _new_parent._mock_new_name: + if is_a_call: + dot = '' + else: + dot = '.' + is_a_call = _new_parent._mock_new_name == '()' + mock_call_name = _new_parent._mock_new_name + dot + mock_call_name + + # follow the parental chain: + _new_parent = _new_parent._mock_new_parent + + def _execute_mock_call(self, /, *args, **kwargs): + # separate from _increment_mock_call so that awaited functions are + # executed separately from their call, also AsyncMock overrides this method + + effect = self.side_effect + if effect is not None: + if _is_exception(effect): + raise effect + elif not _callable(effect): + result = next(effect) + if _is_exception(result): + raise result + else: + result = effect(*args, **kwargs) + + if result is not DEFAULT: + return result + + if self._mock_return_value is not DEFAULT: + return self.return_value + + if self._mock_delegate and self._mock_delegate.return_value is not DEFAULT: + return self.return_value + + if self._mock_wraps is not None: + return self._mock_wraps(*args, **kwargs) + + return self.return_value + + + +class Mock(CallableMixin, NonCallableMock): + """ + Create a new `Mock` object. `Mock` takes several optional arguments + that specify the behaviour of the Mock object: + + * `spec`: This can be either a list of strings or an existing object (a + class or instance) that acts as the specification for the mock object. If + you pass in an object then a list of strings is formed by calling dir on + the object (excluding unsupported magic attributes and methods). Accessing + any attribute not in this list will raise an `AttributeError`. + + If `spec` is an object (rather than a list of strings) then + `mock.__class__` returns the class of the spec object. This allows mocks + to pass `isinstance` tests. + + * `spec_set`: A stricter variant of `spec`. If used, attempting to *set* + or get an attribute on the mock that isn't on the object passed as + `spec_set` will raise an `AttributeError`. + + * `side_effect`: A function to be called whenever the Mock is called. See + the `side_effect` attribute. Useful for raising exceptions or + dynamically changing return values. The function is called with the same + arguments as the mock, and unless it returns `DEFAULT`, the return + value of this function is used as the return value. + + If `side_effect` is an iterable then each call to the mock will return + the next value from the iterable. If any of the members of the iterable + are exceptions they will be raised instead of returned. + + * `return_value`: The value returned when the mock is called. By default + this is a new Mock (created on first access). See the + `return_value` attribute. + + * `unsafe`: By default, accessing any attribute whose name starts with + *assert*, *assret*, *asert*, *aseert*, or *assrt* raises an AttributeError. + Additionally, an AttributeError is raised when accessing + attributes that match the name of an assertion method without the prefix + `assert_`, e.g. accessing `called_once` instead of `assert_called_once`. + Passing `unsafe=True` will allow access to these attributes. + + * `wraps`: Item for the mock object to wrap. If `wraps` is not None then + calling the Mock will pass the call through to the wrapped object + (returning the real result). Attribute access on the mock will return a + Mock object that wraps the corresponding attribute of the wrapped object + (so attempting to access an attribute that doesn't exist will raise an + `AttributeError`). + + If the mock has an explicit `return_value` set then calls are not passed + to the wrapped object and the `return_value` is returned instead. + + * `name`: If the mock has a name then it will be used in the repr of the + mock. This can be useful for debugging. The name is propagated to child + mocks. + + Mocks can also be called with arbitrary keyword arguments. These will be + used to set attributes on the mock after it is created. + """ + + +# _check_spec_arg_typos takes kwargs from commands like patch and checks that +# they don't contain common misspellings of arguments related to autospeccing. +def _check_spec_arg_typos(kwargs_to_check): + typos = ("autospect", "auto_spec", "set_spec") + for typo in typos: + if typo in kwargs_to_check: + raise RuntimeError( + f"{typo!r} might be a typo; use unsafe=True if this is intended" + ) + + +class _patch(object): + + attribute_name = None + _active_patches = [] + + def __init__( + self, getter, attribute, new, spec, create, + spec_set, autospec, new_callable, kwargs, *, unsafe=False + ): + if new_callable is not None: + if new is not DEFAULT: + raise ValueError( + "Cannot use 'new' and 'new_callable' together" + ) + if autospec is not None: + raise ValueError( + "Cannot use 'autospec' and 'new_callable' together" + ) + if not unsafe: + _check_spec_arg_typos(kwargs) + if _is_instance_mock(spec): + raise InvalidSpecError( + f'Cannot spec attr {attribute!r} as the spec ' + f'has already been mocked out. [spec={spec!r}]') + if _is_instance_mock(spec_set): + raise InvalidSpecError( + f'Cannot spec attr {attribute!r} as the spec_set ' + f'target has already been mocked out. [spec_set={spec_set!r}]') + + self.getter = getter + self.attribute = attribute + self.new = new + self.new_callable = new_callable + self.spec = spec + self.create = create + self.has_local = False + self.spec_set = spec_set + self.autospec = autospec + self.kwargs = kwargs + self.additional_patchers = [] + self.is_started = False + + + def copy(self): + patcher = _patch( + self.getter, self.attribute, self.new, self.spec, + self.create, self.spec_set, + self.autospec, self.new_callable, self.kwargs + ) + patcher.attribute_name = self.attribute_name + patcher.additional_patchers = [ + p.copy() for p in self.additional_patchers + ] + return patcher + + + def __call__(self, func): + if isinstance(func, type): + return self.decorate_class(func) + if inspect.iscoroutinefunction(func): + return self.decorate_async_callable(func) + return self.decorate_callable(func) + + + def decorate_class(self, klass): + for attr in dir(klass): + if not attr.startswith(patch.TEST_PREFIX): + continue + + attr_value = getattr(klass, attr) + if not hasattr(attr_value, "__call__"): + continue + + patcher = self.copy() + setattr(klass, attr, patcher(attr_value)) + return klass + + + @contextlib.contextmanager + def decoration_helper(self, patched, args, keywargs): + extra_args = [] + with contextlib.ExitStack() as exit_stack: + for patching in patched.patchings: + arg = exit_stack.enter_context(patching) + if patching.attribute_name is not None: + keywargs.update(arg) + elif patching.new is DEFAULT: + extra_args.append(arg) + + args += tuple(extra_args) + yield (args, keywargs) + + + def decorate_callable(self, func): + # NB. Keep the method in sync with decorate_async_callable() + if hasattr(func, 'patchings'): + func.patchings.append(self) + return func + + @wraps(func) + def patched(*args, **keywargs): + with self.decoration_helper(patched, + args, + keywargs) as (newargs, newkeywargs): + return func(*newargs, **newkeywargs) + + patched.patchings = [self] + return patched + + + def decorate_async_callable(self, func): + # NB. Keep the method in sync with decorate_callable() + if hasattr(func, 'patchings'): + func.patchings.append(self) + return func + + @wraps(func) + async def patched(*args, **keywargs): + with self.decoration_helper(patched, + args, + keywargs) as (newargs, newkeywargs): + return await func(*newargs, **newkeywargs) + + patched.patchings = [self] + return patched + + + def get_original(self): + target = self.getter() + name = self.attribute + + original = DEFAULT + local = False + + try: + original = target.__dict__[name] + except (AttributeError, KeyError): + original = getattr(target, name, DEFAULT) + else: + local = True + + if name in _builtins and isinstance(target, ModuleType): + self.create = True + + if not self.create and original is DEFAULT: + raise AttributeError( + "%s does not have the attribute %r" % (target, name) + ) + return original, local + + + def __enter__(self): + """Perform the patch.""" + if self.is_started: + raise RuntimeError("Patch is already started") + + new, spec, spec_set = self.new, self.spec, self.spec_set + autospec, kwargs = self.autospec, self.kwargs + new_callable = self.new_callable + self.target = self.getter() + + # normalise False to None + if spec is False: + spec = None + if spec_set is False: + spec_set = None + if autospec is False: + autospec = None + + if spec is not None and autospec is not None: + raise TypeError("Can't specify spec and autospec") + if ((spec is not None or autospec is not None) and + spec_set not in (True, None)): + raise TypeError("Can't provide explicit spec_set *and* spec or autospec") + + original, local = self.get_original() + + if new is DEFAULT and autospec is None: + inherit = False + if spec is True: + # set spec to the object we are replacing + spec = original + if spec_set is True: + spec_set = original + spec = None + elif spec is not None: + if spec_set is True: + spec_set = spec + spec = None + elif spec_set is True: + spec_set = original + + if spec is not None or spec_set is not None: + if original is DEFAULT: + raise TypeError("Can't use 'spec' with create=True") + if isinstance(original, type): + # If we're patching out a class and there is a spec + inherit = True + + # Determine the Klass to use + if new_callable is not None: + Klass = new_callable + elif spec is None and _is_async_obj(original): + Klass = AsyncMock + elif spec is not None or spec_set is not None: + this_spec = spec + if spec_set is not None: + this_spec = spec_set + if _is_list(this_spec): + not_callable = '__call__' not in this_spec + else: + not_callable = not callable(this_spec) + if _is_async_obj(this_spec): + Klass = AsyncMock + elif not_callable: + Klass = NonCallableMagicMock + else: + Klass = MagicMock + else: + Klass = MagicMock + + _kwargs = {} + if spec is not None: + _kwargs['spec'] = spec + if spec_set is not None: + _kwargs['spec_set'] = spec_set + + # add a name to mocks + if (isinstance(Klass, type) and + issubclass(Klass, NonCallableMock) and self.attribute): + _kwargs['name'] = self.attribute + + _kwargs.update(kwargs) + new = Klass(**_kwargs) + + if inherit and _is_instance_mock(new): + # we can only tell if the instance should be callable if the + # spec is not a list + this_spec = spec + if spec_set is not None: + this_spec = spec_set + if (not _is_list(this_spec) and not + _instance_callable(this_spec)): + Klass = NonCallableMagicMock + + _kwargs.pop('name') + new.return_value = Klass(_new_parent=new, _new_name='()', + **_kwargs) + elif autospec is not None: + # spec is ignored, new *must* be default, spec_set is treated + # as a boolean. Should we check spec is not None and that spec_set + # is a bool? + if new is not DEFAULT: + raise TypeError( + "autospec creates the mock for you. Can't specify " + "autospec and new." + ) + if original is DEFAULT: + raise TypeError("Can't use 'autospec' with create=True") + spec_set = bool(spec_set) + if autospec is True: + autospec = original + + if _is_instance_mock(self.target): + raise InvalidSpecError( + f'Cannot autospec attr {self.attribute!r} as the patch ' + f'target has already been mocked out. ' + f'[target={self.target!r}, attr={autospec!r}]') + if _is_instance_mock(autospec): + target_name = getattr(self.target, '__name__', self.target) + raise InvalidSpecError( + f'Cannot autospec attr {self.attribute!r} from target ' + f'{target_name!r} as it has already been mocked out. ' + f'[target={self.target!r}, attr={autospec!r}]') + + new = create_autospec(autospec, spec_set=spec_set, + _name=self.attribute, **kwargs) + elif kwargs: + # can't set keyword args when we aren't creating the mock + # XXXX If new is a Mock we could call new.configure_mock(**kwargs) + raise TypeError("Can't pass kwargs to a mock we aren't creating") + + new_attr = new + + self.temp_original = original + self.is_local = local + self._exit_stack = contextlib.ExitStack() + self.is_started = True + try: + setattr(self.target, self.attribute, new_attr) + if self.attribute_name is not None: + extra_args = {} + if self.new is DEFAULT: + extra_args[self.attribute_name] = new + for patching in self.additional_patchers: + arg = self._exit_stack.enter_context(patching) + if patching.new is DEFAULT: + extra_args.update(arg) + return extra_args + + return new + except: + if not self.__exit__(*sys.exc_info()): + raise + + def __exit__(self, *exc_info): + """Undo the patch.""" + if not self.is_started: + return + + if self.is_local and self.temp_original is not DEFAULT: + setattr(self.target, self.attribute, self.temp_original) + else: + delattr(self.target, self.attribute) + if not self.create and (not hasattr(self.target, self.attribute) or + self.attribute in ('__doc__', '__module__', + '__defaults__', '__annotations__', + '__kwdefaults__')): + # needed for proxy objects like django settings + setattr(self.target, self.attribute, self.temp_original) + + del self.temp_original + del self.is_local + del self.target + exit_stack = self._exit_stack + del self._exit_stack + self.is_started = False + return exit_stack.__exit__(*exc_info) + + + def start(self): + """Activate a patch, returning any created mock.""" + result = self.__enter__() + self._active_patches.append(self) + return result + + + def stop(self): + """Stop an active patch.""" + try: + self._active_patches.remove(self) + except ValueError: + # If the patch hasn't been started this will fail + return None + + return self.__exit__(None, None, None) + + + +def _get_target(target): + try: + target, attribute = target.rsplit('.', 1) + except (TypeError, ValueError, AttributeError): + raise TypeError( + f"Need a valid target to patch. You supplied: {target!r}") + return partial(pkgutil.resolve_name, target), attribute + + +def _patch_object( + target, attribute, new=DEFAULT, spec=None, + create=False, spec_set=None, autospec=None, + new_callable=None, *, unsafe=False, **kwargs + ): + """ + patch the named member (`attribute`) on an object (`target`) with a mock + object. + + `patch.object` can be used as a decorator, class decorator or a context + manager. Arguments `new`, `spec`, `create`, `spec_set`, + `autospec` and `new_callable` have the same meaning as for `patch`. Like + `patch`, `patch.object` takes arbitrary keyword arguments for configuring + the mock object it creates. + + When used as a class decorator `patch.object` honours `patch.TEST_PREFIX` + for choosing which methods to wrap. + """ + if type(target) is str: + raise TypeError( + f"{target!r} must be the actual object to be patched, not a str" + ) + getter = lambda: target + return _patch( + getter, attribute, new, spec, create, + spec_set, autospec, new_callable, kwargs, unsafe=unsafe + ) + + +def _patch_multiple(target, spec=None, create=False, spec_set=None, + autospec=None, new_callable=None, **kwargs): + """Perform multiple patches in a single call. It takes the object to be + patched (either as an object or a string to fetch the object by importing) + and keyword arguments for the patches:: + + with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'): + ... + + Use `DEFAULT` as the value if you want `patch.multiple` to create + mocks for you. In this case the created mocks are passed into a decorated + function by keyword, and a dictionary is returned when `patch.multiple` is + used as a context manager. + + `patch.multiple` can be used as a decorator, class decorator or a context + manager. The arguments `spec`, `spec_set`, `create`, + `autospec` and `new_callable` have the same meaning as for `patch`. These + arguments will be applied to *all* patches done by `patch.multiple`. + + When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX` + for choosing which methods to wrap. + """ + if type(target) is str: + getter = partial(pkgutil.resolve_name, target) + else: + getter = lambda: target + + if not kwargs: + raise ValueError( + 'Must supply at least one keyword argument with patch.multiple' + ) + # need to wrap in a list for python 3, where items is a view + items = list(kwargs.items()) + attribute, new = items[0] + patcher = _patch( + getter, attribute, new, spec, create, spec_set, + autospec, new_callable, {} + ) + patcher.attribute_name = attribute + for attribute, new in items[1:]: + this_patcher = _patch( + getter, attribute, new, spec, create, spec_set, + autospec, new_callable, {} + ) + this_patcher.attribute_name = attribute + patcher.additional_patchers.append(this_patcher) + return patcher + + +def patch( + target, new=DEFAULT, spec=None, create=False, + spec_set=None, autospec=None, new_callable=None, *, unsafe=False, **kwargs + ): + """ + `patch` acts as a function decorator, class decorator or a context + manager. Inside the body of the function or with statement, the `target` + is patched with a `new` object. When the function/with statement exits + the patch is undone. + + If `new` is omitted, then the target is replaced with an + `AsyncMock` if the patched object is an async function or a + `MagicMock` otherwise. If `patch` is used as a decorator and `new` is + omitted, the created mock is passed in as an extra argument to the + decorated function. If `patch` is used as a context manager the created + mock is returned by the context manager. + + `target` should be a string in the form `'package.module.ClassName'`. The + `target` is imported and the specified object replaced with the `new` + object, so the `target` must be importable from the environment you are + calling `patch` from. The target is imported when the decorated function + is executed, not at decoration time. + + The `spec` and `spec_set` keyword arguments are passed to the `MagicMock` + if patch is creating one for you. + + In addition you can pass `spec=True` or `spec_set=True`, which causes + patch to pass in the object being mocked as the spec/spec_set object. + + `new_callable` allows you to specify a different class, or callable object, + that will be called to create the `new` object. By default `AsyncMock` is + used for async functions and `MagicMock` for the rest. + + A more powerful form of `spec` is `autospec`. If you set `autospec=True` + then the mock will be created with a spec from the object being replaced. + All attributes of the mock will also have the spec of the corresponding + attribute of the object being replaced. Methods and functions being + mocked will have their arguments checked and will raise a `TypeError` if + they are called with the wrong signature. For mocks replacing a class, + their return value (the 'instance') will have the same spec as the class. + + Instead of `autospec=True` you can pass `autospec=some_object` to use an + arbitrary object as the spec instead of the one being replaced. + + By default `patch` will fail to replace attributes that don't exist. If + you pass in `create=True`, and the attribute doesn't exist, patch will + create the attribute for you when the patched function is called, and + delete it again afterwards. This is useful for writing tests against + attributes that your production code creates at runtime. It is off by + default because it can be dangerous. With it switched on you can write + passing tests against APIs that don't actually exist! + + Patch can be used as a `TestCase` class decorator. It works by + decorating each test method in the class. This reduces the boilerplate + code when your test methods share a common patchings set. `patch` finds + tests by looking for method names that start with `patch.TEST_PREFIX`. + By default this is `test`, which matches the way `unittest` finds tests. + You can specify an alternative prefix by setting `patch.TEST_PREFIX`. + + Patch can be used as a context manager, with the with statement. Here the + patching applies to the indented block after the with statement. If you + use "as" then the patched object will be bound to the name after the + "as"; very useful if `patch` is creating a mock object for you. + + Patch will raise a `RuntimeError` if passed some common misspellings of + the arguments autospec and spec_set. Pass the argument `unsafe` with the + value True to disable that check. + + `patch` takes arbitrary keyword arguments. These will be passed to + `AsyncMock` if the patched object is asynchronous, to `MagicMock` + otherwise or to `new_callable` if specified. + + `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are + available for alternate use-cases. + """ + getter, attribute = _get_target(target) + return _patch( + getter, attribute, new, spec, create, + spec_set, autospec, new_callable, kwargs, unsafe=unsafe + ) + + +class _patch_dict(object): + """ + Patch a dictionary, or dictionary like object, and restore the dictionary + to its original state after the test, where the restored dictionary is + a copy of the dictionary as it was before the test. + + `in_dict` can be a dictionary or a mapping like container. If it is a + mapping then it must at least support getting, setting and deleting items + plus iterating over keys. + + `in_dict` can also be a string specifying the name of the dictionary, which + will then be fetched by importing it. + + `values` can be a dictionary of values to set in the dictionary. `values` + can also be an iterable of `(key, value)` pairs. + + If `clear` is True then the dictionary will be cleared before the new + values are set. + + `patch.dict` can also be called with arbitrary keyword arguments to set + values in the dictionary:: + + with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()): + ... + + `patch.dict` can be used as a context manager, decorator or class + decorator. When used as a class decorator `patch.dict` honours + `patch.TEST_PREFIX` for choosing which methods to wrap. + """ + + def __init__(self, in_dict, values=(), clear=False, **kwargs): + self.in_dict = in_dict + # support any argument supported by dict(...) constructor + self.values = dict(values) + self.values.update(kwargs) + self.clear = clear + self._original = None + + + def __call__(self, f): + if isinstance(f, type): + return self.decorate_class(f) + if inspect.iscoroutinefunction(f): + return self.decorate_async_callable(f) + return self.decorate_callable(f) + + + def decorate_callable(self, f): + @wraps(f) + def _inner(*args, **kw): + self._patch_dict() + try: + return f(*args, **kw) + finally: + self._unpatch_dict() + + return _inner + + + def decorate_async_callable(self, f): + @wraps(f) + async def _inner(*args, **kw): + self._patch_dict() + try: + return await f(*args, **kw) + finally: + self._unpatch_dict() + + return _inner + + + def decorate_class(self, klass): + for attr in dir(klass): + attr_value = getattr(klass, attr) + if (attr.startswith(patch.TEST_PREFIX) and + hasattr(attr_value, "__call__")): + decorator = _patch_dict(self.in_dict, self.values, self.clear) + decorated = decorator(attr_value) + setattr(klass, attr, decorated) + return klass + + + def __enter__(self): + """Patch the dict.""" + self._patch_dict() + return self.in_dict + + + def _patch_dict(self): + values = self.values + if isinstance(self.in_dict, str): + self.in_dict = pkgutil.resolve_name(self.in_dict) + in_dict = self.in_dict + clear = self.clear + + try: + original = in_dict.copy() + except AttributeError: + # dict like object with no copy method + # must support iteration over keys + original = {} + for key in in_dict: + original[key] = in_dict[key] + self._original = original + + if clear: + _clear_dict(in_dict) + + try: + in_dict.update(values) + except AttributeError: + # dict like object with no update method + for key in values: + in_dict[key] = values[key] + + + def _unpatch_dict(self): + in_dict = self.in_dict + original = self._original + + _clear_dict(in_dict) + + try: + in_dict.update(original) + except AttributeError: + for key in original: + in_dict[key] = original[key] + + + def __exit__(self, *args): + """Unpatch the dict.""" + if self._original is not None: + self._unpatch_dict() + return False + + + def start(self): + """Activate a patch, returning any created mock.""" + result = self.__enter__() + _patch._active_patches.append(self) + return result + + + def stop(self): + """Stop an active patch.""" + try: + _patch._active_patches.remove(self) + except ValueError: + # If the patch hasn't been started this will fail + return None + + return self.__exit__(None, None, None) + + +def _clear_dict(in_dict): + try: + in_dict.clear() + except AttributeError: + keys = list(in_dict) + for key in keys: + del in_dict[key] + + +def _patch_stopall(): + """Stop all active patches. LIFO to unroll nested patches.""" + for patch in reversed(_patch._active_patches): + patch.stop() + + +patch.object = _patch_object +patch.dict = _patch_dict +patch.multiple = _patch_multiple +patch.stopall = _patch_stopall +patch.TEST_PREFIX = 'test' + +magic_methods = ( + "lt le gt ge eq ne " + "getitem setitem delitem " + "len contains iter " + "hash str sizeof " + "enter exit " + # we added divmod and rdivmod here instead of numerics + # because there is no idivmod + "divmod rdivmod neg pos abs invert " + "complex int float index " + "round trunc floor ceil " + "bool next " + "fspath " + "aiter " +) + +numerics = ( + "add sub mul matmul truediv floordiv mod lshift rshift and xor or pow" +) +inplace = ' '.join('i%s' % n for n in numerics.split()) +right = ' '.join('r%s' % n for n in numerics.split()) + +# not including __prepare__, __instancecheck__, __subclasscheck__ +# (as they are metaclass methods) +# __del__ is not supported at all as it causes problems if it exists + +_non_defaults = { + '__get__', '__set__', '__delete__', '__reversed__', '__missing__', + '__reduce__', '__reduce_ex__', '__getinitargs__', '__getnewargs__', + '__getstate__', '__setstate__', '__getformat__', + '__repr__', '__dir__', '__subclasses__', '__format__', + '__getnewargs_ex__', +} + + +def _get_method(name, func): + "Turns a callable object (like a mock) into a real function" + def method(self, /, *args, **kw): + return func(self, *args, **kw) + method.__name__ = name + return method + + +_magics = { + '__%s__' % method for method in + ' '.join([magic_methods, numerics, inplace, right]).split() +} + +# Magic methods used for async `with` statements +_async_method_magics = {"__aenter__", "__aexit__", "__anext__"} +# Magic methods that are only used with async calls but are synchronous functions themselves +_sync_async_magics = {"__aiter__"} +_async_magics = _async_method_magics | _sync_async_magics + +_all_sync_magics = _magics | _non_defaults +_all_magics = _all_sync_magics | _async_magics + +_unsupported_magics = { + '__getattr__', '__setattr__', + '__init__', '__new__', '__prepare__', + '__instancecheck__', '__subclasscheck__', + '__del__' +} + +_calculate_return_value = { + '__hash__': lambda self: object.__hash__(self), + '__str__': lambda self: object.__str__(self), + '__sizeof__': lambda self: object.__sizeof__(self), + '__fspath__': lambda self: f"{type(self).__name__}/{self._extract_mock_name()}/{id(self)}", +} + +_return_values = { + '__lt__': NotImplemented, + '__gt__': NotImplemented, + '__le__': NotImplemented, + '__ge__': NotImplemented, + '__int__': 1, + '__contains__': False, + '__len__': 0, + '__exit__': False, + '__complex__': 1j, + '__float__': 1.0, + '__bool__': True, + '__index__': 1, + '__aexit__': False, +} + + +def _get_eq(self): + def __eq__(other): + ret_val = self.__eq__._mock_return_value + if ret_val is not DEFAULT: + return ret_val + if self is other: + return True + return NotImplemented + return __eq__ + +def _get_ne(self): + def __ne__(other): + if self.__ne__._mock_return_value is not DEFAULT: + return DEFAULT + if self is other: + return False + return NotImplemented + return __ne__ + +def _get_iter(self): + def __iter__(): + ret_val = self.__iter__._mock_return_value + if ret_val is DEFAULT: + return iter([]) + # if ret_val was already an iterator, then calling iter on it should + # return the iterator unchanged + return iter(ret_val) + return __iter__ + +def _get_async_iter(self): + def __aiter__(): + ret_val = self.__aiter__._mock_return_value + if ret_val is DEFAULT: + return _AsyncIterator(iter([])) + return _AsyncIterator(iter(ret_val)) + return __aiter__ + +_side_effect_methods = { + '__eq__': _get_eq, + '__ne__': _get_ne, + '__iter__': _get_iter, + '__aiter__': _get_async_iter +} + + + +def _set_return_value(mock, method, name): + fixed = _return_values.get(name, DEFAULT) + if fixed is not DEFAULT: + method.return_value = fixed + return + + return_calculator = _calculate_return_value.get(name) + if return_calculator is not None: + return_value = return_calculator(mock) + method.return_value = return_value + return + + side_effector = _side_effect_methods.get(name) + if side_effector is not None: + method.side_effect = side_effector(mock) + + + +class MagicMixin(Base): + def __init__(self, /, *args, **kw): + self._mock_set_magics() # make magic work for kwargs in init + _safe_super(MagicMixin, self).__init__(*args, **kw) + self._mock_set_magics() # fix magic broken by upper level init + + + def _mock_set_magics(self): + orig_magics = _magics | _async_method_magics + these_magics = orig_magics + + if getattr(self, "_mock_methods", None) is not None: + these_magics = orig_magics.intersection(self._mock_methods) + remove_magics = orig_magics - these_magics + + for entry in remove_magics: + if entry in type(self).__dict__: + # remove unneeded magic methods + delattr(self, entry) + + # don't overwrite existing attributes if called a second time + these_magics = these_magics - set(type(self).__dict__) + + _type = type(self) + for entry in these_magics: + setattr(_type, entry, MagicProxy(entry, self)) + + + +class NonCallableMagicMock(MagicMixin, NonCallableMock): + """A version of `MagicMock` that isn't callable.""" + def mock_add_spec(self, spec, spec_set=False): + """Add a spec to a mock. `spec` can either be an object or a + list of strings. Only attributes on the `spec` can be fetched as + attributes from the mock. + + If `spec_set` is True then only attributes on the spec can be set.""" + self._mock_add_spec(spec, spec_set) + self._mock_set_magics() + + +class AsyncMagicMixin(MagicMixin): + pass + + +class MagicMock(MagicMixin, Mock): + """ + MagicMock is a subclass of Mock with default implementations + of most of the magic methods. You can use MagicMock without having to + configure the magic methods yourself. + + If you use the `spec` or `spec_set` arguments then *only* magic + methods that exist in the spec will be created. + + Attributes and the return value of a `MagicMock` will also be `MagicMocks`. + """ + def mock_add_spec(self, spec, spec_set=False): + """Add a spec to a mock. `spec` can either be an object or a + list of strings. Only attributes on the `spec` can be fetched as + attributes from the mock. + + If `spec_set` is True then only attributes on the spec can be set.""" + self._mock_add_spec(spec, spec_set) + self._mock_set_magics() + + def reset_mock(self, /, *args, return_value: bool = False, **kwargs): + if ( + return_value + and self._mock_name + and _is_magic(self._mock_name) + ): + # Don't reset return values for magic methods, + # otherwise `m.__str__` will start + # to return `MagicMock` instances, instead of `str` instances. + return_value = False + super().reset_mock(*args, return_value=return_value, **kwargs) + + +class MagicProxy(Base): + def __init__(self, name, parent): + self.name = name + self.parent = parent + + def create_mock(self): + entry = self.name + parent = self.parent + m = parent._get_child_mock(name=entry, _new_name=entry, + _new_parent=parent) + setattr(parent, entry, m) + _set_return_value(parent, m, entry) + return m + + def __get__(self, obj, _type=None): + return self.create_mock() + + +try: + _CODE_SIG = inspect.signature(partial(CodeType.__init__, None)) + _CODE_ATTRS = dir(CodeType) +except ValueError: + _CODE_SIG = None + + +class AsyncMockMixin(Base): + await_count = _delegating_property('await_count') + await_args = _delegating_property('await_args') + await_args_list = _delegating_property('await_args_list') + + def __init__(self, /, *args, **kwargs): + super().__init__(*args, **kwargs) + # iscoroutinefunction() checks _is_coroutine property to say if an + # object is a coroutine. Without this check it looks to see if it is a + # function/method, which in this case it is not (since it is an + # AsyncMock). + # It is set through __dict__ because when spec_set is True, this + # attribute is likely undefined. + self.__dict__['_is_coroutine'] = asyncio.coroutines._is_coroutine + self.__dict__['_mock_await_count'] = 0 + self.__dict__['_mock_await_args'] = None + self.__dict__['_mock_await_args_list'] = _CallList() + if _CODE_SIG: + code_mock = NonCallableMock(spec_set=_CODE_ATTRS) + code_mock.__dict__["_spec_class"] = CodeType + code_mock.__dict__["_spec_signature"] = _CODE_SIG + else: + code_mock = NonCallableMock(spec_set=CodeType) + code_mock.co_flags = ( + inspect.CO_COROUTINE + + inspect.CO_VARARGS + + inspect.CO_VARKEYWORDS + ) + code_mock.co_argcount = 0 + code_mock.co_varnames = ('args', 'kwargs') + code_mock.co_posonlyargcount = 0 + code_mock.co_kwonlyargcount = 0 + self.__dict__['__code__'] = code_mock + self.__dict__['__name__'] = 'AsyncMock' + self.__dict__['__defaults__'] = tuple() + self.__dict__['__kwdefaults__'] = {} + self.__dict__['__annotations__'] = None + + async def _execute_mock_call(self, /, *args, **kwargs): + # This is nearly just like super(), except for special handling + # of coroutines + + _call = _Call((args, kwargs), two=True) + self.await_count += 1 + self.await_args = _call + self.await_args_list.append(_call) + + effect = self.side_effect + if effect is not None: + if _is_exception(effect): + raise effect + elif not _callable(effect): + try: + result = next(effect) + except StopIteration: + # It is impossible to propagate a StopIteration + # through coroutines because of PEP 479 + raise StopAsyncIteration + if _is_exception(result): + raise result + elif iscoroutinefunction(effect): + result = await effect(*args, **kwargs) + else: + result = effect(*args, **kwargs) + + if result is not DEFAULT: + return result + + if self._mock_return_value is not DEFAULT: + return self.return_value + + if self._mock_wraps is not None: + if iscoroutinefunction(self._mock_wraps): + return await self._mock_wraps(*args, **kwargs) + return self._mock_wraps(*args, **kwargs) + + return self.return_value + + def assert_awaited(self): + """ + Assert that the mock was awaited at least once. + """ + if self.await_count == 0: + msg = f"Expected {self._mock_name or 'mock'} to have been awaited." + raise AssertionError(msg) + + def assert_awaited_once(self): + """ + Assert that the mock was awaited exactly once. + """ + if not self.await_count == 1: + msg = (f"Expected {self._mock_name or 'mock'} to have been awaited once." + f" Awaited {self.await_count} times.") + raise AssertionError(msg) + + def assert_awaited_with(self, /, *args, **kwargs): + """ + Assert that the last await was with the specified arguments. + """ + if self.await_args is None: + expected = self._format_mock_call_signature(args, kwargs) + raise AssertionError(f'Expected await: {expected}\nNot awaited') + + def _error_message(): + msg = self._format_mock_failure_message(args, kwargs, action='await') + return msg + + expected = self._call_matcher(_Call((args, kwargs), two=True)) + actual = self._call_matcher(self.await_args) + if actual != expected: + cause = expected if isinstance(expected, Exception) else None + raise AssertionError(_error_message()) from cause + + def assert_awaited_once_with(self, /, *args, **kwargs): + """ + Assert that the mock was awaited exactly once and with the specified + arguments. + """ + if not self.await_count == 1: + msg = (f"Expected {self._mock_name or 'mock'} to have been awaited once." + f" Awaited {self.await_count} times.") + raise AssertionError(msg) + return self.assert_awaited_with(*args, **kwargs) + + def assert_any_await(self, /, *args, **kwargs): + """ + Assert the mock has ever been awaited with the specified arguments. + """ + expected = self._call_matcher(_Call((args, kwargs), two=True)) + cause = expected if isinstance(expected, Exception) else None + actual = [self._call_matcher(c) for c in self.await_args_list] + if cause or expected not in _AnyComparer(actual): + expected_string = self._format_mock_call_signature(args, kwargs) + raise AssertionError( + '%s await not found' % expected_string + ) from cause + + def assert_has_awaits(self, calls, any_order=False): + """ + Assert the mock has been awaited with the specified calls. + The :attr:`await_args_list` list is checked for the awaits. + + If `any_order` is False (the default) then the awaits must be + sequential. There can be extra calls before or after the + specified awaits. + + If `any_order` is True then the awaits can be in any order, but + they must all appear in :attr:`await_args_list`. + """ + expected = [self._call_matcher(c) for c in calls] + cause = next((e for e in expected if isinstance(e, Exception)), None) + all_awaits = _CallList(self._call_matcher(c) for c in self.await_args_list) + if not any_order: + if expected not in all_awaits: + if cause is None: + problem = 'Awaits not found.' + else: + problem = ('Error processing expected awaits.\n' + 'Errors: {}').format( + [e if isinstance(e, Exception) else None + for e in expected]) + raise AssertionError( + f'{problem}\n' + f'Expected: {_CallList(calls)}\n' + f'Actual: {self.await_args_list}' + ) from cause + return + + all_awaits = list(all_awaits) + + not_found = [] + for kall in expected: + try: + all_awaits.remove(kall) + except ValueError: + not_found.append(kall) + if not_found: + raise AssertionError( + '%r not all found in await list' % (tuple(not_found),) + ) from cause + + def assert_not_awaited(self): + """ + Assert that the mock was never awaited. + """ + if self.await_count != 0: + msg = (f"Expected {self._mock_name or 'mock'} to not have been awaited." + f" Awaited {self.await_count} times.") + raise AssertionError(msg) + + def reset_mock(self, /, *args, **kwargs): + """ + See :func:`.Mock.reset_mock()` + """ + super().reset_mock(*args, **kwargs) + self.await_count = 0 + self.await_args = None + self.await_args_list = _CallList() + + +class AsyncMock(AsyncMockMixin, AsyncMagicMixin, Mock): + """ + Enhance :class:`Mock` with features allowing to mock + an async function. + + The :class:`AsyncMock` object will behave so the object is + recognized as an async function, and the result of a call is an awaitable: + + >>> mock = AsyncMock() + >>> inspect.iscoroutinefunction(mock) + True + >>> inspect.isawaitable(mock()) + True + + + The result of ``mock()`` is an async function which will have the outcome + of ``side_effect`` or ``return_value``: + + - if ``side_effect`` is a function, the async function will return the + result of that function, + - if ``side_effect`` is an exception, the async function will raise the + exception, + - if ``side_effect`` is an iterable, the async function will return the + next value of the iterable, however, if the sequence of result is + exhausted, ``StopIteration`` is raised immediately, + - if ``side_effect`` is not defined, the async function will return the + value defined by ``return_value``, hence, by default, the async function + returns a new :class:`AsyncMock` object. + + If the outcome of ``side_effect`` or ``return_value`` is an async function, + the mock async function obtained when the mock object is called will be this + async function itself (and not an async function returning an async + function). + + The test author can also specify a wrapped object with ``wraps``. In this + case, the :class:`Mock` object behavior is the same as with an + :class:`.Mock` object: the wrapped object may have methods + defined as async function functions. + + Based on Martin Richard's asynctest project. + """ + + +class _ANY(object): + "A helper object that compares equal to everything." + + def __eq__(self, other): + return True + + def __ne__(self, other): + return False + + def __repr__(self): + return '' + +ANY = _ANY() + + + +def _format_call_signature(name, args, kwargs): + message = '%s(%%s)' % name + formatted_args = '' + args_string = ', '.join([repr(arg) for arg in args]) + kwargs_string = ', '.join([ + '%s=%r' % (key, value) for key, value in kwargs.items() + ]) + if args_string: + formatted_args = args_string + if kwargs_string: + if formatted_args: + formatted_args += ', ' + formatted_args += kwargs_string + + return message % formatted_args + + + +class _Call(tuple): + """ + A tuple for holding the results of a call to a mock, either in the form + `(args, kwargs)` or `(name, args, kwargs)`. + + If args or kwargs are empty then a call tuple will compare equal to + a tuple without those values. This makes comparisons less verbose:: + + _Call(('name', (), {})) == ('name',) + _Call(('name', (1,), {})) == ('name', (1,)) + _Call(((), {'a': 'b'})) == ({'a': 'b'},) + + The `_Call` object provides a useful shortcut for comparing with call:: + + _Call(((1, 2), {'a': 3})) == call(1, 2, a=3) + _Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3) + + If the _Call has no name then it will match any name. + """ + def __new__(cls, value=(), name='', parent=None, two=False, + from_kall=True): + args = () + kwargs = {} + _len = len(value) + if _len == 3: + name, args, kwargs = value + elif _len == 2: + first, second = value + if isinstance(first, str): + name = first + if isinstance(second, tuple): + args = second + else: + kwargs = second + else: + args, kwargs = first, second + elif _len == 1: + value, = value + if isinstance(value, str): + name = value + elif isinstance(value, tuple): + args = value + else: + kwargs = value + + if two: + return tuple.__new__(cls, (args, kwargs)) + + return tuple.__new__(cls, (name, args, kwargs)) + + + def __init__(self, value=(), name=None, parent=None, two=False, + from_kall=True): + self._mock_name = name + self._mock_parent = parent + self._mock_from_kall = from_kall + + + def __eq__(self, other): + try: + len_other = len(other) + except TypeError: + return NotImplemented + + self_name = '' + if len(self) == 2: + self_args, self_kwargs = self + else: + self_name, self_args, self_kwargs = self + + if (getattr(self, '_mock_parent', None) and getattr(other, '_mock_parent', None) + and self._mock_parent != other._mock_parent): + return False + + other_name = '' + if len_other == 0: + other_args, other_kwargs = (), {} + elif len_other == 3: + other_name, other_args, other_kwargs = other + elif len_other == 1: + value, = other + if isinstance(value, tuple): + other_args = value + other_kwargs = {} + elif isinstance(value, str): + other_name = value + other_args, other_kwargs = (), {} + else: + other_args = () + other_kwargs = value + elif len_other == 2: + # could be (name, args) or (name, kwargs) or (args, kwargs) + first, second = other + if isinstance(first, str): + other_name = first + if isinstance(second, tuple): + other_args, other_kwargs = second, {} + else: + other_args, other_kwargs = (), second + else: + other_args, other_kwargs = first, second + else: + return False + + if self_name and other_name != self_name: + return False + + # this order is important for ANY to work! + return (other_args, other_kwargs) == (self_args, self_kwargs) + + + __ne__ = object.__ne__ + + + def __call__(self, /, *args, **kwargs): + if self._mock_name is None: + return _Call(('', args, kwargs), name='()') + + name = self._mock_name + '()' + return _Call((self._mock_name, args, kwargs), name=name, parent=self) + + + def __getattr__(self, attr): + if self._mock_name is None: + return _Call(name=attr, from_kall=False) + name = '%s.%s' % (self._mock_name, attr) + return _Call(name=name, parent=self, from_kall=False) + + + def __getattribute__(self, attr): + if attr in tuple.__dict__: + raise AttributeError + return tuple.__getattribute__(self, attr) + + + def _get_call_arguments(self): + if len(self) == 2: + args, kwargs = self + else: + name, args, kwargs = self + + return args, kwargs + + @property + def args(self): + return self._get_call_arguments()[0] + + @property + def kwargs(self): + return self._get_call_arguments()[1] + + def __repr__(self): + if not self._mock_from_kall: + name = self._mock_name or 'call' + if name.startswith('()'): + name = 'call%s' % name + return name + + if len(self) == 2: + name = 'call' + args, kwargs = self + else: + name, args, kwargs = self + if not name: + name = 'call' + elif not name.startswith('()'): + name = 'call.%s' % name + else: + name = 'call%s' % name + return _format_call_signature(name, args, kwargs) + + + def call_list(self): + """For a call object that represents multiple calls, `call_list` + returns a list of all the intermediate calls as well as the + final call.""" + vals = [] + thing = self + while thing is not None: + if thing._mock_from_kall: + vals.append(thing) + thing = thing._mock_parent + return _CallList(reversed(vals)) + + +call = _Call(from_kall=False) + + +def create_autospec(spec, spec_set=False, instance=False, _parent=None, + _name=None, *, unsafe=False, **kwargs): + """Create a mock object using another object as a spec. Attributes on the + mock will use the corresponding attribute on the `spec` object as their + spec. + + Functions or methods being mocked will have their arguments checked + to check that they are called with the correct signature. + + If `spec_set` is True then attempting to set attributes that don't exist + on the spec object will raise an `AttributeError`. + + If a class is used as a spec then the return value of the mock (the + instance of the class) will have the same spec. You can use a class as the + spec for an instance object by passing `instance=True`. The returned mock + will only be callable if instances of the mock are callable. + + `create_autospec` will raise a `RuntimeError` if passed some common + misspellings of the arguments autospec and spec_set. Pass the argument + `unsafe` with the value True to disable that check. + + `create_autospec` also takes arbitrary keyword arguments that are passed to + the constructor of the created mock.""" + if _is_list(spec): + # can't pass a list instance to the mock constructor as it will be + # interpreted as a list of strings + spec = type(spec) + + is_type = isinstance(spec, type) + if _is_instance_mock(spec): + raise InvalidSpecError(f'Cannot autospec a Mock object. ' + f'[object={spec!r}]') + is_async_func = _is_async_func(spec) + _kwargs = {'spec': spec} + + entries = [(entry, _missing) for entry in dir(spec)] + if is_type and instance and is_dataclass(spec): + is_dataclass_spec = True + dataclass_fields = fields(spec) + entries.extend((f.name, f.type) for f in dataclass_fields) + dataclass_spec_list = [f.name for f in dataclass_fields] + else: + is_dataclass_spec = False + + if spec_set: + _kwargs = {'spec_set': spec} + elif spec is None: + # None we mock with a normal mock without a spec + _kwargs = {} + if _kwargs and instance: + _kwargs['_spec_as_instance'] = True + if not unsafe: + _check_spec_arg_typos(kwargs) + + _name = kwargs.pop('name', _name) + _new_name = _name + if _parent is None: + # for a top level object no _new_name should be set + _new_name = '' + + _kwargs.update(kwargs) + + Klass = MagicMock + if inspect.isdatadescriptor(spec): + # descriptors don't have a spec + # because we don't know what type they return + _kwargs = {} + elif is_async_func: + if instance: + raise RuntimeError("Instance can not be True when create_autospec " + "is mocking an async function") + Klass = AsyncMock + elif not _callable(spec): + Klass = NonCallableMagicMock + elif is_type and instance and not _instance_callable(spec): + Klass = NonCallableMagicMock + + mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name, + name=_name, **_kwargs) + if is_dataclass_spec: + mock._mock_extend_spec_methods(dataclass_spec_list) + + if isinstance(spec, FunctionTypes): + # should only happen at the top level because we don't + # recurse for functions + if is_async_func: + mock = _set_async_signature(mock, spec) + else: + mock = _set_signature(mock, spec) + else: + _check_signature(spec, mock, is_type, instance) + + if _parent is not None and not instance: + _parent._mock_children[_name] = mock + + # Pop wraps from kwargs because it must not be passed to configure_mock. + wrapped = kwargs.pop('wraps', None) + if is_type and not instance and 'return_value' not in kwargs: + mock.return_value = create_autospec(spec, spec_set, instance=True, + _name='()', _parent=mock, + wraps=wrapped) + + for entry, original in entries: + if _is_magic(entry): + # MagicMock already does the useful magic methods for us + continue + + # XXXX do we need a better way of getting attributes without + # triggering code execution (?) Probably not - we need the actual + # object to mock it so we would rather trigger a property than mock + # the property descriptor. Likewise we want to mock out dynamically + # provided attributes. + # XXXX what about attributes that raise exceptions other than + # AttributeError on being fetched? + # we could be resilient against it, or catch and propagate the + # exception when the attribute is fetched from the mock + if original is _missing: + try: + original = getattr(spec, entry) + except AttributeError: + continue + + child_kwargs = {'spec': original} + # Wrap child attributes also. + if wrapped and hasattr(wrapped, entry): + child_kwargs.update(wraps=original) + if spec_set: + child_kwargs = {'spec_set': original} + + if not isinstance(original, FunctionTypes): + new = _SpecState(original, spec_set, mock, entry, instance) + mock._mock_children[entry] = new + else: + parent = mock + if isinstance(spec, FunctionTypes): + parent = mock.mock + + skipfirst = _must_skip(spec, entry, is_type) + child_kwargs['_eat_self'] = skipfirst + if iscoroutinefunction(original): + child_klass = AsyncMock + else: + child_klass = MagicMock + new = child_klass(parent=parent, name=entry, _new_name=entry, + _new_parent=parent, **child_kwargs) + mock._mock_children[entry] = new + new.return_value = child_klass() + _check_signature(original, new, skipfirst=skipfirst) + + # so functions created with _set_signature become instance attributes, + # *plus* their underlying mock exists in _mock_children of the parent + # mock. Adding to _mock_children may be unnecessary where we are also + # setting as an instance attribute? + if isinstance(new, FunctionTypes): + setattr(mock, entry, new) + # kwargs are passed with respect to the parent mock so, they are not used + # for creating return_value of the parent mock. So, this condition + # should be true only for the parent mock if kwargs are given. + if _is_instance_mock(mock) and kwargs: + mock.configure_mock(**kwargs) + + return mock + + +def _must_skip(spec, entry, is_type): + """ + Return whether we should skip the first argument on spec's `entry` + attribute. + """ + if not isinstance(spec, type): + if entry in getattr(spec, '__dict__', {}): + # instance attribute - shouldn't skip + return False + spec = spec.__class__ + + for klass in spec.__mro__: + result = klass.__dict__.get(entry, DEFAULT) + if result is DEFAULT: + continue + if isinstance(result, (staticmethod, classmethod)): + return False + elif isinstance(result, FunctionTypes): + # Normal method => skip if looked up on type + # (if looked up on instance, self is already skipped) + return is_type + else: + return False + + # function is a dynamically provided attribute + return is_type + + +class _SpecState(object): + + def __init__(self, spec, spec_set=False, parent=None, + name=None, ids=None, instance=False): + self.spec = spec + self.ids = ids + self.spec_set = spec_set + self.parent = parent + self.instance = instance + self.name = name + + +FunctionTypes = ( + # python function + type(create_autospec), + # instance method + type(ANY.__eq__), +) + + +file_spec = None +open_spec = None + + +def _to_stream(read_data): + if isinstance(read_data, bytes): + return io.BytesIO(read_data) + else: + return io.StringIO(read_data) + + +def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read`, `readline` and `readlines` of the + file handle to return. This is an empty string by default. + """ + _read_data = _to_stream(read_data) + _state = [_read_data, None] + + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return _state[0].readlines(*args, **kwargs) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return _state[0].read(*args, **kwargs) + + def _readline_side_effect(*args, **kwargs): + yield from _iter_side_effect() + while True: + yield _state[0].readline(*args, **kwargs) + + def _iter_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _state[0]: + yield line + + def _next_side_effect(): + if handle.readline.return_value is not None: + return handle.readline.return_value + return next(_state[0]) + + def _exit_side_effect(exctype, excinst, exctb): + handle.close() + + global file_spec + if file_spec is None: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + global open_spec + if open_spec is None: + import _io + open_spec = list(set(dir(_io.open))) + if mock is None: + mock = MagicMock(name='open', spec=open_spec) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + _state[1] = _readline_side_effect() + handle.readline.side_effect = _state[1] + handle.readlines.side_effect = _readlines_side_effect + handle.__iter__.side_effect = _iter_side_effect + handle.__next__.side_effect = _next_side_effect + handle.__exit__.side_effect = _exit_side_effect + + def reset_data(*args, **kwargs): + _state[0] = _to_stream(read_data) + if handle.readline.side_effect == _state[1]: + # Only reset the side effect if the user hasn't overridden it. + _state[1] = _readline_side_effect() + handle.readline.side_effect = _state[1] + return DEFAULT + + mock.side_effect = reset_data + mock.return_value = handle + return mock + + +class PropertyMock(Mock): + """ + A mock intended to be used as a property, or other descriptor, on a class. + `PropertyMock` provides `__get__` and `__set__` methods so you can specify + a return value when it is fetched. + + Fetching a `PropertyMock` instance from an object calls the mock, with + no args. Setting it calls the mock with the value being set. + """ + def _get_child_mock(self, /, **kwargs): + return MagicMock(**kwargs) + + def __get__(self, obj, obj_type=None): + return self() + def __set__(self, obj, val): + self(val) + + +_timeout_unset = sentinel.TIMEOUT_UNSET + +class ThreadingMixin(Base): + + DEFAULT_TIMEOUT = None + + def _get_child_mock(self, /, **kw): + if isinstance(kw.get("parent"), ThreadingMixin): + kw["timeout"] = kw["parent"]._mock_wait_timeout + elif isinstance(kw.get("_new_parent"), ThreadingMixin): + kw["timeout"] = kw["_new_parent"]._mock_wait_timeout + return super()._get_child_mock(**kw) + + def __init__(self, *args, timeout=_timeout_unset, **kwargs): + super().__init__(*args, **kwargs) + if timeout is _timeout_unset: + timeout = self.DEFAULT_TIMEOUT + self.__dict__["_mock_event"] = threading.Event() # Event for any call + self.__dict__["_mock_calls_events"] = [] # Events for each of the calls + self.__dict__["_mock_calls_events_lock"] = threading.Lock() + self.__dict__["_mock_wait_timeout"] = timeout + + def reset_mock(self, /, *args, **kwargs): + """ + See :func:`.Mock.reset_mock()` + """ + super().reset_mock(*args, **kwargs) + self.__dict__["_mock_event"] = threading.Event() + self.__dict__["_mock_calls_events"] = [] + + def __get_event(self, expected_args, expected_kwargs): + with self._mock_calls_events_lock: + for args, kwargs, event in self._mock_calls_events: + if (args, kwargs) == (expected_args, expected_kwargs): + return event + new_event = threading.Event() + self._mock_calls_events.append((expected_args, expected_kwargs, new_event)) + return new_event + + def _mock_call(self, *args, **kwargs): + ret_value = super()._mock_call(*args, **kwargs) + + call_event = self.__get_event(args, kwargs) + call_event.set() + + self._mock_event.set() + + return ret_value + + def wait_until_called(self, *, timeout=_timeout_unset): + """Wait until the mock object is called. + + `timeout` - time to wait for in seconds, waits forever otherwise. + Defaults to the constructor provided timeout. + Use None to block undefinetively. + """ + if timeout is _timeout_unset: + timeout = self._mock_wait_timeout + if not self._mock_event.wait(timeout=timeout): + msg = (f"{self._mock_name or 'mock'} was not called before" + f" timeout({timeout}).") + raise AssertionError(msg) + + def wait_until_any_call_with(self, *args, **kwargs): + """Wait until the mock object is called with given args. + + Waits for the timeout in seconds provided in the constructor. + """ + event = self.__get_event(args, kwargs) + if not event.wait(timeout=self._mock_wait_timeout): + expected_string = self._format_mock_call_signature(args, kwargs) + raise AssertionError(f'{expected_string} call not found') + + +class ThreadingMock(ThreadingMixin, MagicMixin, Mock): + """ + A mock that can be used to wait until on calls happening + in a different thread. + + The constructor can take a `timeout` argument which + controls the timeout in seconds for all `wait` calls of the mock. + + You can change the default timeout of all instances via the + `ThreadingMock.DEFAULT_TIMEOUT` attribute. + + If no timeout is set, it will block undefinetively. + """ + pass + + +def seal(mock): + """Disable the automatic generation of child mocks. + + Given an input Mock, seals it to ensure no further mocks will be generated + when accessing an attribute that was not already defined. + + The operation recursively seals the mock passed in, meaning that + the mock itself, any mocks generated by accessing one of its attributes, + and all assigned mocks without a name or spec will be sealed. + """ + mock._mock_sealed = True + for attr in dir(mock): + try: + m = getattr(mock, attr) + except AttributeError: + continue + if not isinstance(m, NonCallableMock): + continue + if isinstance(m._mock_children.get(attr), _SpecState): + continue + if m._mock_new_parent is mock: + seal(m) + + +class _AsyncIterator: + """ + Wraps an iterator in an asynchronous iterator. + """ + def __init__(self, iterator): + self.iterator = iterator + code_mock = NonCallableMock(spec_set=CodeType) + code_mock.co_flags = inspect.CO_ITERABLE_COROUTINE + self.__dict__['__code__'] = code_mock + + async def __anext__(self): + try: + return next(self.iterator) + except StopIteration: + pass + raise StopAsyncIteration diff --git a/Python314_4_x64_Template/Lib/unittest/result.py b/Python314_4_x64_Template/Lib/unittest/result.py new file mode 100644 index 00000000..b8ea396d --- /dev/null +++ b/Python314_4_x64_Template/Lib/unittest/result.py @@ -0,0 +1,259 @@ +"""Test result object""" + +import io +import sys +import traceback + +from . import util +from functools import wraps + +__unittest = True + +def failfast(method): + @wraps(method) + def inner(self, *args, **kw): + if getattr(self, 'failfast', False): + self.stop() + return method(self, *args, **kw) + return inner + +STDOUT_LINE = '\nStdout:\n%s' +STDERR_LINE = '\nStderr:\n%s' + + +class TestResult(object): + """Holder for test result information. + + Test results are automatically managed by the TestCase and TestSuite + classes, and do not need to be explicitly manipulated by writers of tests. + + Each instance holds the total number of tests run, and collections of + failures and errors that occurred among those test runs. The collections + contain tuples of (testcase, exceptioninfo), where exceptioninfo is the + formatted traceback of the error that occurred. + """ + _previousTestClass = None + _testRunEntered = False + _moduleSetUpFailed = False + def __init__(self, stream=None, descriptions=None, verbosity=None): + self.failfast = False + self.failures = [] + self.errors = [] + self.testsRun = 0 + self.skipped = [] + self.expectedFailures = [] + self.unexpectedSuccesses = [] + self.collectedDurations = [] + self.shouldStop = False + self.buffer = False + self.tb_locals = False + self._stdout_buffer = None + self._stderr_buffer = None + self._original_stdout = sys.stdout + self._original_stderr = sys.stderr + self._mirrorOutput = False + + def printErrors(self): + "Called by TestRunner after test run" + + def startTest(self, test): + "Called when the given test is about to be run" + self.testsRun += 1 + self._mirrorOutput = False + self._setupStdout() + + def _setupStdout(self): + if self.buffer: + if self._stderr_buffer is None: + self._stderr_buffer = io.StringIO() + self._stdout_buffer = io.StringIO() + sys.stdout = self._stdout_buffer + sys.stderr = self._stderr_buffer + + def startTestRun(self): + """Called once before any tests are executed. + + See startTest for a method called before each test. + """ + + def stopTest(self, test): + """Called when the given test has been run""" + self._restoreStdout() + self._mirrorOutput = False + + def _restoreStdout(self): + if self.buffer: + if self._mirrorOutput: + output = sys.stdout.getvalue() + error = sys.stderr.getvalue() + if output: + if not output.endswith('\n'): + output += '\n' + self._original_stdout.write(STDOUT_LINE % output) + if error: + if not error.endswith('\n'): + error += '\n' + self._original_stderr.write(STDERR_LINE % error) + + sys.stdout = self._original_stdout + sys.stderr = self._original_stderr + self._stdout_buffer.seek(0) + self._stdout_buffer.truncate() + self._stderr_buffer.seek(0) + self._stderr_buffer.truncate() + + def stopTestRun(self): + """Called once after all tests are executed. + + See stopTest for a method called after each test. + """ + + @failfast + def addError(self, test, err): + """Called when an error has occurred. 'err' is a tuple of values as + returned by sys.exc_info(). + """ + self.errors.append((test, self._exc_info_to_string(err, test))) + self._mirrorOutput = True + + @failfast + def addFailure(self, test, err): + """Called when an error has occurred. 'err' is a tuple of values as + returned by sys.exc_info().""" + self.failures.append((test, self._exc_info_to_string(err, test))) + self._mirrorOutput = True + + def addSubTest(self, test, subtest, err): + """Called at the end of a subtest. + 'err' is None if the subtest ended successfully, otherwise it's a + tuple of values as returned by sys.exc_info(). + """ + # By default, we don't do anything with successful subtests, but + # more sophisticated test results might want to record them. + if err is not None: + if getattr(self, 'failfast', False): + self.stop() + if issubclass(err[0], test.failureException): + errors = self.failures + else: + errors = self.errors + errors.append((subtest, self._exc_info_to_string(err, test))) + self._mirrorOutput = True + + def addSuccess(self, test): + "Called when a test has completed successfully" + pass + + def addSkip(self, test, reason): + """Called when a test is skipped.""" + self.skipped.append((test, reason)) + + def addExpectedFailure(self, test, err): + """Called when an expected failure/error occurred.""" + self.expectedFailures.append( + (test, self._exc_info_to_string(err, test))) + + @failfast + def addUnexpectedSuccess(self, test): + """Called when a test was expected to fail, but succeed.""" + self.unexpectedSuccesses.append(test) + + def addDuration(self, test, elapsed): + """Called when a test finished to run, regardless of its outcome. + *test* is the test case corresponding to the test method. + *elapsed* is the time represented in seconds, and it includes the + execution of cleanup functions. + """ + # support for a TextTestRunner using an old TestResult class + if hasattr(self, "collectedDurations"): + # Pass test repr and not the test object itself to avoid resources leak + self.collectedDurations.append((str(test), elapsed)) + + def wasSuccessful(self): + """Tells whether or not this result was a success.""" + # The hasattr check is for test_result's OldResult test. That + # way this method works on objects that lack the attribute. + # (where would such result instances come from? old stored pickles?) + return ((len(self.failures) == len(self.errors) == 0) and + (not hasattr(self, 'unexpectedSuccesses') or + len(self.unexpectedSuccesses) == 0)) + + def stop(self): + """Indicates that the tests should be aborted.""" + self.shouldStop = True + + def _exc_info_to_string(self, err, test): + """Converts a sys.exc_info()-style tuple of values into a string.""" + exctype, value, tb = err + tb = self._clean_tracebacks(exctype, value, tb, test) + tb_e = traceback.TracebackException( + exctype, value, tb, + capture_locals=self.tb_locals, compact=True) + from _colorize import can_colorize + + colorize = hasattr(self, "stream") and can_colorize(file=self.stream) + msgLines = list(tb_e.format(colorize=colorize)) + + if self.buffer: + output = sys.stdout.getvalue() + error = sys.stderr.getvalue() + if output: + if not output.endswith('\n'): + output += '\n' + msgLines.append(STDOUT_LINE % output) + if error: + if not error.endswith('\n'): + error += '\n' + msgLines.append(STDERR_LINE % error) + return ''.join(msgLines) + + def _clean_tracebacks(self, exctype, value, tb, test): + ret = None + first = True + excs = [(exctype, value, tb)] + seen = {id(value)} # Detect loops in chained exceptions. + while excs: + (exctype, value, tb) = excs.pop() + # Skip test runner traceback levels + while tb and self._is_relevant_tb_level(tb): + tb = tb.tb_next + + # Skip assert*() traceback levels + if exctype is test.failureException: + self._remove_unittest_tb_frames(tb) + + if first: + ret = tb + first = False + else: + value.__traceback__ = tb + + if value is not None: + for c in (value.__cause__, value.__context__): + if c is not None and id(c) not in seen: + excs.append((type(c), c, c.__traceback__)) + seen.add(id(c)) + return ret + + def _is_relevant_tb_level(self, tb): + return '__unittest' in tb.tb_frame.f_globals + + def _remove_unittest_tb_frames(self, tb): + '''Truncates usercode tb at the first unittest frame. + + If the first frame of the traceback is in user code, + the prefix up to the first unittest frame is returned. + If the first frame is already in the unittest module, + the traceback is not modified. + ''' + prev = None + while tb and not self._is_relevant_tb_level(tb): + prev = tb + tb = tb.tb_next + if prev is not None: + prev.tb_next = None + + def __repr__(self): + return ("<%s run=%i errors=%i failures=%i>" % + (util.strclass(self.__class__), self.testsRun, len(self.errors), + len(self.failures))) diff --git a/Python314_4_x64_Template/Lib/unittest/runner.py b/Python314_4_x64_Template/Lib/unittest/runner.py new file mode 100644 index 00000000..5f22d91a --- /dev/null +++ b/Python314_4_x64_Template/Lib/unittest/runner.py @@ -0,0 +1,313 @@ +"""Running tests""" + +import sys +import time +import warnings + +from _colorize import get_theme + +from . import result +from .case import _SubTest +from .signals import registerResult + +__unittest = True + + +class _WritelnDecorator(object): + """Used to decorate file-like objects with a handy 'writeln' method""" + def __init__(self, stream): + self.stream = stream + + def __getattr__(self, attr): + if attr in ('stream', '__getstate__'): + raise AttributeError(attr) + return getattr(self.stream, attr) + + def writeln(self, arg=None): + if arg: + self.write(arg) + self.write('\n') # text-mode streams translate to \r\n if needed + + +class TextTestResult(result.TestResult): + """A test result class that can print formatted text results to a stream. + + Used by TextTestRunner. + """ + separator1 = '=' * 70 + separator2 = '-' * 70 + + def __init__(self, stream, descriptions, verbosity, *, durations=None): + """Construct a TextTestResult. Subclasses should accept **kwargs + to ensure compatibility as the interface changes.""" + super(TextTestResult, self).__init__(stream, descriptions, verbosity) + self.stream = stream + self.showAll = verbosity > 1 + self.dots = verbosity == 1 + self.descriptions = descriptions + self._theme = get_theme(tty_file=stream).unittest + self._newline = True + self.durations = durations + + def getDescription(self, test): + doc_first_line = test.shortDescription() + if self.descriptions and doc_first_line: + return '\n'.join((str(test), doc_first_line)) + else: + return str(test) + + def startTest(self, test): + super(TextTestResult, self).startTest(test) + if self.showAll: + self.stream.write(self.getDescription(test)) + self.stream.write(" ... ") + self.stream.flush() + self._newline = False + + def _write_status(self, test, status): + is_subtest = isinstance(test, _SubTest) + if is_subtest or self._newline: + if not self._newline: + self.stream.writeln() + if is_subtest: + self.stream.write(" ") + self.stream.write(self.getDescription(test)) + self.stream.write(" ... ") + self.stream.writeln(status) + self.stream.flush() + self._newline = True + + def addSubTest(self, test, subtest, err): + if err is not None: + t = self._theme + if self.showAll: + if issubclass(err[0], subtest.failureException): + self._write_status(subtest, f"{t.fail}FAIL{t.reset}") + else: + self._write_status(subtest, f"{t.fail}ERROR{t.reset}") + elif self.dots: + if issubclass(err[0], subtest.failureException): + self.stream.write(f"{t.fail}F{t.reset}") + else: + self.stream.write(f"{t.fail}E{t.reset}") + self.stream.flush() + super(TextTestResult, self).addSubTest(test, subtest, err) + + def addSuccess(self, test): + super(TextTestResult, self).addSuccess(test) + t = self._theme + if self.showAll: + self._write_status(test, f"{t.passed}ok{t.reset}") + elif self.dots: + self.stream.write(f"{t.passed}.{t.reset}") + self.stream.flush() + + def addError(self, test, err): + super(TextTestResult, self).addError(test, err) + t = self._theme + if self.showAll: + self._write_status(test, f"{t.fail}ERROR{t.reset}") + elif self.dots: + self.stream.write(f"{t.fail}E{t.reset}") + self.stream.flush() + + def addFailure(self, test, err): + super(TextTestResult, self).addFailure(test, err) + t = self._theme + if self.showAll: + self._write_status(test, f"{t.fail}FAIL{t.reset}") + elif self.dots: + self.stream.write(f"{t.fail}F{t.reset}") + self.stream.flush() + + def addSkip(self, test, reason): + super(TextTestResult, self).addSkip(test, reason) + t = self._theme + if self.showAll: + self._write_status(test, f"{t.warn}skipped{t.reset} {reason!r}") + elif self.dots: + self.stream.write(f"{t.warn}s{t.reset}") + self.stream.flush() + + def addExpectedFailure(self, test, err): + super(TextTestResult, self).addExpectedFailure(test, err) + t = self._theme + if self.showAll: + self.stream.writeln(f"{t.warn}expected failure{t.reset}") + self.stream.flush() + elif self.dots: + self.stream.write(f"{t.warn}x{t.reset}") + self.stream.flush() + + def addUnexpectedSuccess(self, test): + super(TextTestResult, self).addUnexpectedSuccess(test) + t = self._theme + if self.showAll: + self.stream.writeln(f"{t.fail}unexpected success{t.reset}") + self.stream.flush() + elif self.dots: + self.stream.write(f"{t.fail}u{t.reset}") + self.stream.flush() + + def printErrors(self): + t = self._theme + if self.dots or self.showAll: + self.stream.writeln() + self.stream.flush() + self.printErrorList(f"{t.fail}ERROR{t.reset}", self.errors) + self.printErrorList(f"{t.fail}FAIL{t.reset}", self.failures) + unexpectedSuccesses = getattr(self, "unexpectedSuccesses", ()) + if unexpectedSuccesses: + self.stream.writeln(self.separator1) + for test in unexpectedSuccesses: + self.stream.writeln( + f"{t.fail}UNEXPECTED SUCCESS{t.fail_info}: " + f"{self.getDescription(test)}{t.reset}" + ) + self.stream.flush() + + def printErrorList(self, flavour, errors): + t = self._theme + for test, err in errors: + self.stream.writeln(self.separator1) + self.stream.writeln( + f"{flavour}{t.fail_info}: {self.getDescription(test)}{t.reset}" + ) + self.stream.writeln(self.separator2) + self.stream.writeln("%s" % err) + self.stream.flush() + + +class TextTestRunner(object): + """A test runner class that displays results in textual form. + + It prints out the names of tests as they are run, errors as they + occur, and a summary of the results at the end of the test run. + """ + resultclass = TextTestResult + + def __init__(self, stream=None, descriptions=True, verbosity=1, + failfast=False, buffer=False, resultclass=None, warnings=None, + *, tb_locals=False, durations=None): + """Construct a TextTestRunner. + + Subclasses should accept **kwargs to ensure compatibility as the + interface changes. + """ + if stream is None: + stream = sys.stderr + self.stream = _WritelnDecorator(stream) + self.descriptions = descriptions + self.verbosity = verbosity + self.failfast = failfast + self.buffer = buffer + self.tb_locals = tb_locals + self.durations = durations + self.warnings = warnings + if resultclass is not None: + self.resultclass = resultclass + + def _makeResult(self): + try: + return self.resultclass(self.stream, self.descriptions, + self.verbosity, durations=self.durations) + except TypeError: + # didn't accept the durations argument + return self.resultclass(self.stream, self.descriptions, + self.verbosity) + + def _printDurations(self, result): + if not result.collectedDurations: + return + ls = sorted(result.collectedDurations, key=lambda x: x[1], + reverse=True) + if self.durations > 0: + ls = ls[:self.durations] + self.stream.writeln("Slowest test durations") + if hasattr(result, 'separator2'): + self.stream.writeln(result.separator2) + hidden = False + for test, elapsed in ls: + if self.verbosity < 2 and elapsed < 0.001: + hidden = True + continue + self.stream.writeln("%-10s %s" % ("%.3fs" % elapsed, test)) + if hidden: + self.stream.writeln("\n(durations < 0.001s were hidden; " + "use -v to show these durations)") + else: + self.stream.writeln("") + + def run(self, test): + "Run the given test case or test suite." + result = self._makeResult() + registerResult(result) + result.failfast = self.failfast + result.buffer = self.buffer + result.tb_locals = self.tb_locals + with warnings.catch_warnings(): + if self.warnings: + # if self.warnings is set, use it to filter all the warnings + warnings.simplefilter(self.warnings) + start_time = time.perf_counter() + startTestRun = getattr(result, 'startTestRun', None) + if startTestRun is not None: + startTestRun() + try: + test(result) + finally: + stopTestRun = getattr(result, 'stopTestRun', None) + if stopTestRun is not None: + stopTestRun() + stop_time = time.perf_counter() + time_taken = stop_time - start_time + result.printErrors() + if self.durations is not None: + self._printDurations(result) + + if hasattr(result, 'separator2'): + self.stream.writeln(result.separator2) + + run = result.testsRun + self.stream.writeln("Ran %d test%s in %.3fs" % + (run, run != 1 and "s" or "", time_taken)) + self.stream.writeln() + + expected_fails = unexpected_successes = skipped = 0 + try: + results = map(len, (result.expectedFailures, + result.unexpectedSuccesses, + result.skipped)) + except AttributeError: + pass + else: + expected_fails, unexpected_successes, skipped = results + + infos = [] + t = get_theme(tty_file=self.stream).unittest + + if not result.wasSuccessful(): + self.stream.write(f"{t.fail_info}FAILED{t.reset}") + failed, errored = len(result.failures), len(result.errors) + if failed: + infos.append(f"{t.fail_info}failures={failed}{t.reset}") + if errored: + infos.append(f"{t.fail_info}errors={errored}{t.reset}") + elif run == 0 and not skipped: + self.stream.write(f"{t.warn}NO TESTS RAN{t.reset}") + else: + self.stream.write(f"{t.passed}OK{t.reset}") + if skipped: + infos.append(f"{t.warn}skipped={skipped}{t.reset}") + if expected_fails: + infos.append(f"{t.warn}expected failures={expected_fails}{t.reset}") + if unexpected_successes: + infos.append( + f"{t.fail}unexpected successes={unexpected_successes}{t.reset}" + ) + if infos: + self.stream.writeln(" (%s)" % (", ".join(infos),)) + else: + self.stream.write("\n") + self.stream.flush() + return result diff --git a/Python313_13_x64_Template/Lib/unittest/signals.py b/Python314_4_x64_Template/Lib/unittest/signals.py similarity index 100% rename from Python313_13_x64_Template/Lib/unittest/signals.py rename to Python314_4_x64_Template/Lib/unittest/signals.py diff --git a/Python313_13_x64_Template/Lib/unittest/suite.py b/Python314_4_x64_Template/Lib/unittest/suite.py similarity index 100% rename from Python313_13_x64_Template/Lib/unittest/suite.py rename to Python314_4_x64_Template/Lib/unittest/suite.py diff --git a/Python313_13_x64_Template/Lib/unittest/util.py b/Python314_4_x64_Template/Lib/unittest/util.py similarity index 100% rename from Python313_13_x64_Template/Lib/unittest/util.py rename to Python314_4_x64_Template/Lib/unittest/util.py diff --git a/Python313_13_x86_Template/Lib/email/mime/__init__.py b/Python314_4_x64_Template/Lib/urllib/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/email/mime/__init__.py rename to Python314_4_x64_Template/Lib/urllib/__init__.py diff --git a/Python314_4_x64_Template/Lib/urllib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/urllib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..ebae42b4 Binary files /dev/null and b/Python314_4_x64_Template/Lib/urllib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/urllib/__pycache__/error.cpython-314.pyc b/Python314_4_x64_Template/Lib/urllib/__pycache__/error.cpython-314.pyc new file mode 100644 index 00000000..3ec82d2c Binary files /dev/null and b/Python314_4_x64_Template/Lib/urllib/__pycache__/error.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/urllib/__pycache__/parse.cpython-314.pyc b/Python314_4_x64_Template/Lib/urllib/__pycache__/parse.cpython-314.pyc new file mode 100644 index 00000000..fc4be03e Binary files /dev/null and b/Python314_4_x64_Template/Lib/urllib/__pycache__/parse.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/urllib/__pycache__/request.cpython-314.pyc b/Python314_4_x64_Template/Lib/urllib/__pycache__/request.cpython-314.pyc new file mode 100644 index 00000000..9784a07a Binary files /dev/null and b/Python314_4_x64_Template/Lib/urllib/__pycache__/request.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/urllib/__pycache__/response.cpython-314.pyc b/Python314_4_x64_Template/Lib/urllib/__pycache__/response.cpython-314.pyc new file mode 100644 index 00000000..0b39fc6c Binary files /dev/null and b/Python314_4_x64_Template/Lib/urllib/__pycache__/response.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/urllib/error.py b/Python314_4_x64_Template/Lib/urllib/error.py similarity index 100% rename from Python313_13_x64_Template/Lib/urllib/error.py rename to Python314_4_x64_Template/Lib/urllib/error.py diff --git a/Python314_4_x64_Template/Lib/urllib/parse.py b/Python314_4_x64_Template/Lib/urllib/parse.py new file mode 100644 index 00000000..a651e815 --- /dev/null +++ b/Python314_4_x64_Template/Lib/urllib/parse.py @@ -0,0 +1,1289 @@ +"""Parse (absolute and relative) URLs. + +urllib.parse module is based upon the following RFC specifications. + +RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding +and L. Masinter, January 2005. + +RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter +and L.Masinter, December 1999. + +RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. +Berners-Lee, R. Fielding, and L. Masinter, August 1998. + +RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. + +RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June +1995. + +RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. +McCahill, December 1994 + +RFC 3986 is considered the current standard and any future changes to +urllib.parse module should conform with it. The urllib.parse module is +currently not entirely compliant with this RFC due to defacto +scenarios for parsing, and for backward compatibility purposes, some +parsing quirks from older RFCs are retained. The testcases in +test_urlparse.py provides a good indicator of parsing behavior. + +The WHATWG URL Parser spec should also be considered. We are not compliant with +it either due to existing user code API behavior expectations (Hyrum's Law). +It serves as a useful guide when making changes. +""" + +from collections import namedtuple +import functools +import math +import re +import types +import warnings +import ipaddress + +__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", + "urlsplit", "urlunsplit", "urlencode", "parse_qs", + "parse_qsl", "quote", "quote_plus", "quote_from_bytes", + "unquote", "unquote_plus", "unquote_to_bytes", + "DefragResult", "ParseResult", "SplitResult", + "DefragResultBytes", "ParseResultBytes", "SplitResultBytes"] + +# A classification of schemes. +# The empty string classifies URLs with no scheme specified, +# being the default value returned by “urlsplit” and “urlparse”. + +uses_relative = ['', 'ftp', 'http', 'gopher', 'nntp', 'imap', + 'wais', 'file', 'https', 'shttp', 'mms', + 'prospero', 'rtsp', 'rtsps', 'rtspu', 'sftp', + 'svn', 'svn+ssh', 'ws', 'wss'] + +uses_netloc = ['', 'ftp', 'http', 'gopher', 'nntp', 'telnet', + 'imap', 'wais', 'file', 'mms', 'https', 'shttp', + 'snews', 'prospero', 'rtsp', 'rtsps', 'rtspu', 'rsync', + 'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh', + 'ws', 'wss', 'itms-services'] + +uses_params = ['', 'ftp', 'hdl', 'prospero', 'http', 'imap', + 'https', 'shttp', 'rtsp', 'rtsps', 'rtspu', 'sip', + 'sips', 'mms', 'sftp', 'tel'] + +# These are not actually used anymore, but should stay for backwards +# compatibility. (They are undocumented, but have a public-looking name.) + +non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', + 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] + +uses_query = ['', 'http', 'wais', 'imap', 'https', 'shttp', 'mms', + 'gopher', 'rtsp', 'rtsps', 'rtspu', 'sip', 'sips'] + +uses_fragment = ['', 'ftp', 'hdl', 'http', 'gopher', 'news', + 'nntp', 'wais', 'https', 'shttp', 'snews', + 'file', 'prospero'] + +# Characters valid in scheme names +scheme_chars = ('abcdefghijklmnopqrstuvwxyz' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + '0123456789' + '+-.') + +# Leading and trailing C0 control and space to be stripped per WHATWG spec. +# == "".join([chr(i) for i in range(0, 0x20 + 1)]) +_WHATWG_C0_CONTROL_OR_SPACE = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f ' + +# Unsafe bytes to be removed per WHATWG spec +_UNSAFE_URL_BYTES_TO_REMOVE = ['\t', '\r', '\n'] + +def clear_cache(): + """Clear internal performance caches. Undocumented; some tests want it.""" + urlsplit.cache_clear() + _byte_quoter_factory.cache_clear() + +# Helpers for bytes handling +# For 3.2, we deliberately require applications that +# handle improperly quoted URLs to do their own +# decoding and encoding. If valid use cases are +# presented, we may relax this by using latin-1 +# decoding internally for 3.3 +_implicit_encoding = 'ascii' +_implicit_errors = 'strict' + +def _noop(obj): + return obj + +def _encode_result(obj, encoding=_implicit_encoding, + errors=_implicit_errors): + return obj.encode(encoding, errors) + +def _decode_args(args, encoding=_implicit_encoding, + errors=_implicit_errors): + return tuple(x.decode(encoding, errors) if x else '' for x in args) + +def _coerce_args(*args): + # Invokes decode if necessary to create str args + # and returns the coerced inputs along with + # an appropriate result coercion function + # - noop for str inputs + # - encoding function otherwise + str_input = isinstance(args[0], str) + for arg in args[1:]: + # We special-case the empty string to support the + # "scheme=''" default argument to some functions + if arg and isinstance(arg, str) != str_input: + raise TypeError("Cannot mix str and non-str arguments") + if str_input: + return args + (_noop,) + return _decode_args(args) + (_encode_result,) + +# Result objects are more helpful than simple tuples +class _ResultMixinStr(object): + """Standard approach to encoding parsed results from str to bytes""" + __slots__ = () + + def encode(self, encoding='ascii', errors='strict'): + return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self)) + + +class _ResultMixinBytes(object): + """Standard approach to decoding parsed results from bytes to str""" + __slots__ = () + + def decode(self, encoding='ascii', errors='strict'): + return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self)) + + +class _NetlocResultMixinBase(object): + """Shared methods for the parsed result objects containing a netloc element""" + __slots__ = () + + @property + def username(self): + return self._userinfo[0] + + @property + def password(self): + return self._userinfo[1] + + @property + def hostname(self): + hostname = self._hostinfo[0] + if not hostname: + return None + # Scoped IPv6 address may have zone info, which must not be lowercased + # like http://[fe80::822a:a8ff:fe49:470c%tESt]:1234/keys + separator = '%' if isinstance(hostname, str) else b'%' + hostname, percent, zone = hostname.partition(separator) + return hostname.lower() + percent + zone + + @property + def port(self): + port = self._hostinfo[1] + if port is not None: + if port.isdigit() and port.isascii(): + port = int(port) + else: + raise ValueError(f"Port could not be cast to integer value as {port!r}") + if not (0 <= port <= 65535): + raise ValueError("Port out of range 0-65535") + return port + + __class_getitem__ = classmethod(types.GenericAlias) + + +class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr): + __slots__ = () + + @property + def _userinfo(self): + netloc = self.netloc + userinfo, have_info, hostinfo = netloc.rpartition('@') + if have_info: + username, have_password, password = userinfo.partition(':') + if not have_password: + password = None + else: + username = password = None + return username, password + + @property + def _hostinfo(self): + netloc = self.netloc + _, _, hostinfo = netloc.rpartition('@') + _, have_open_br, bracketed = hostinfo.partition('[') + if have_open_br: + hostname, _, port = bracketed.partition(']') + _, _, port = port.partition(':') + else: + hostname, _, port = hostinfo.partition(':') + if not port: + port = None + return hostname, port + + +class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes): + __slots__ = () + + @property + def _userinfo(self): + netloc = self.netloc + userinfo, have_info, hostinfo = netloc.rpartition(b'@') + if have_info: + username, have_password, password = userinfo.partition(b':') + if not have_password: + password = None + else: + username = password = None + return username, password + + @property + def _hostinfo(self): + netloc = self.netloc + _, _, hostinfo = netloc.rpartition(b'@') + _, have_open_br, bracketed = hostinfo.partition(b'[') + if have_open_br: + hostname, _, port = bracketed.partition(b']') + _, _, port = port.partition(b':') + else: + hostname, _, port = hostinfo.partition(b':') + if not port: + port = None + return hostname, port + + +_DefragResultBase = namedtuple('_DefragResultBase', 'url fragment') +_SplitResultBase = namedtuple( + '_SplitResultBase', 'scheme netloc path query fragment') +_ParseResultBase = namedtuple( + '_ParseResultBase', 'scheme netloc path params query fragment') + +_DefragResultBase.__doc__ = """ +DefragResult(url, fragment) + +A 2-tuple that contains the url without fragment identifier and the fragment +identifier as a separate argument. +""" + +_DefragResultBase.url.__doc__ = """The URL with no fragment identifier.""" + +_DefragResultBase.fragment.__doc__ = """ +Fragment identifier separated from URL, that allows indirect identification of a +secondary resource by reference to a primary resource and additional identifying +information. +""" + +_SplitResultBase.__doc__ = """ +SplitResult(scheme, netloc, path, query, fragment) + +A 5-tuple that contains the different components of a URL. Similar to +ParseResult, but does not split params. +""" + +_SplitResultBase.scheme.__doc__ = """Specifies URL scheme for the request.""" + +_SplitResultBase.netloc.__doc__ = """ +Network location where the request is made to. +""" + +_SplitResultBase.path.__doc__ = """ +The hierarchical path, such as the path to a file to download. +""" + +_SplitResultBase.query.__doc__ = """ +The query component, that contains non-hierarchical data, that along with data +in path component, identifies a resource in the scope of URI's scheme and +network location. +""" + +_SplitResultBase.fragment.__doc__ = """ +Fragment identifier, that allows indirect identification of a secondary resource +by reference to a primary resource and additional identifying information. +""" + +_ParseResultBase.__doc__ = """ +ParseResult(scheme, netloc, path, params, query, fragment) + +A 6-tuple that contains components of a parsed URL. +""" + +_ParseResultBase.scheme.__doc__ = _SplitResultBase.scheme.__doc__ +_ParseResultBase.netloc.__doc__ = _SplitResultBase.netloc.__doc__ +_ParseResultBase.path.__doc__ = _SplitResultBase.path.__doc__ +_ParseResultBase.params.__doc__ = """ +Parameters for last path element used to dereference the URI in order to provide +access to perform some operation on the resource. +""" + +_ParseResultBase.query.__doc__ = _SplitResultBase.query.__doc__ +_ParseResultBase.fragment.__doc__ = _SplitResultBase.fragment.__doc__ + + +# For backwards compatibility, alias _NetlocResultMixinStr +# ResultBase is no longer part of the documented API, but it is +# retained since deprecating it isn't worth the hassle +ResultBase = _NetlocResultMixinStr + +# Structured result objects for string data +class DefragResult(_DefragResultBase, _ResultMixinStr): + __slots__ = () + def geturl(self): + if self.fragment: + return self.url + '#' + self.fragment + else: + return self.url + +class SplitResult(_SplitResultBase, _NetlocResultMixinStr): + __slots__ = () + def geturl(self): + return urlunsplit(self) + +class ParseResult(_ParseResultBase, _NetlocResultMixinStr): + __slots__ = () + def geturl(self): + return urlunparse(self) + +# Structured result objects for bytes data +class DefragResultBytes(_DefragResultBase, _ResultMixinBytes): + __slots__ = () + def geturl(self): + if self.fragment: + return self.url + b'#' + self.fragment + else: + return self.url + +class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes): + __slots__ = () + def geturl(self): + return urlunsplit(self) + +class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes): + __slots__ = () + def geturl(self): + return urlunparse(self) + +# Set up the encode/decode result pairs +def _fix_result_transcoding(): + _result_pairs = ( + (DefragResult, DefragResultBytes), + (SplitResult, SplitResultBytes), + (ParseResult, ParseResultBytes), + ) + for _decoded, _encoded in _result_pairs: + _decoded._encoded_counterpart = _encoded + _encoded._decoded_counterpart = _decoded + +_fix_result_transcoding() +del _fix_result_transcoding + +def urlparse(url, scheme='', allow_fragments=True): + """Parse a URL into 6 components: + :///;?# + + The result is a named 6-tuple with fields corresponding to the + above. It is either a ParseResult or ParseResultBytes object, + depending on the type of the url parameter. + + The username, password, hostname, and port sub-components of netloc + can also be accessed as attributes of the returned object. + + The scheme argument provides the default value of the scheme + component when no scheme is found in url. + + If allow_fragments is False, no attempt is made to separate the + fragment component from the previous component, which can be either + path or query. + + Note that % escapes are not expanded. + + urlsplit() should generally be used instead of urlparse(). + """ + url, scheme, _coerce_result = _coerce_args(url, scheme) + scheme, netloc, url, params, query, fragment = _urlparse(url, scheme, allow_fragments) + result = ParseResult(scheme or '', netloc or '', url, params or '', query or '', fragment or '') + return _coerce_result(result) + +def _urlparse(url, scheme=None, allow_fragments=True): + scheme, netloc, url, query, fragment = _urlsplit(url, scheme, allow_fragments) + if (scheme or '') in uses_params and ';' in url: + url, params = _splitparams(url, allow_none=True) + else: + params = None + return (scheme, netloc, url, params, query, fragment) + +def _splitparams(url, allow_none=False): + if '/' in url: + i = url.find(';', url.rfind('/')) + if i < 0: + return url, None if allow_none else '' + else: + i = url.find(';') + return url[:i], url[i+1:] + +def _splitnetloc(url, start=0): + delim = len(url) # position of end of domain part of url, default is end + for c in '/?#': # look for delimiters; the order is NOT important + wdelim = url.find(c, start) # find first of this delim + if wdelim >= 0: # if found + delim = min(delim, wdelim) # use earliest delim position + return url[start:delim], url[delim:] # return (domain, rest) + +def _checknetloc(netloc): + if not netloc or netloc.isascii(): + return + # looking for characters like \u2100 that expand to 'a/c' + # IDNA uses NFKC equivalence, so normalize for this check + import unicodedata + n = netloc.replace('@', '') # ignore characters already included + n = n.replace(':', '') # but not the surrounding text + n = n.replace('#', '') + n = n.replace('?', '') + netloc2 = unicodedata.normalize('NFKC', n) + if n == netloc2: + return + for c in '/?#@:': + if c in netloc2: + raise ValueError("netloc '" + netloc + "' contains invalid " + + "characters under NFKC normalization") + +def _check_bracketed_netloc(netloc): + # Note that this function must mirror the splitting + # done in NetlocResultMixins._hostinfo(). + hostname_and_port = netloc.rpartition('@')[2] + before_bracket, have_open_br, bracketed = hostname_and_port.partition('[') + if have_open_br: + # No data is allowed before a bracket. + if before_bracket: + raise ValueError("Invalid IPv6 URL") + hostname, _, port = bracketed.partition(']') + # No data is allowed after the bracket but before the port delimiter. + if port and not port.startswith(":"): + raise ValueError("Invalid IPv6 URL") + else: + hostname, _, port = hostname_and_port.partition(':') + _check_bracketed_host(hostname) + +# Valid bracketed hosts are defined in +# https://www.rfc-editor.org/rfc/rfc3986#page-49 and https://url.spec.whatwg.org/ +def _check_bracketed_host(hostname): + if hostname.startswith('v'): + if not re.match(r"\Av[a-fA-F0-9]+\..+\z", hostname): + raise ValueError(f"IPvFuture address is invalid") + else: + ip = ipaddress.ip_address(hostname) # Throws Value Error if not IPv6 or IPv4 + if isinstance(ip, ipaddress.IPv4Address): + raise ValueError(f"An IPv4 address cannot be in brackets") + +# typed=True avoids BytesWarnings being emitted during cache key +# comparison since this API supports both bytes and str input. +@functools.lru_cache(typed=True) +def urlsplit(url, scheme='', allow_fragments=True): + """Parse a URL into 5 components: + :///?# + + The result is a named 5-tuple with fields corresponding to the + above. It is either a SplitResult or SplitResultBytes object, + depending on the type of the url parameter. + + The username, password, hostname, and port sub-components of netloc + can also be accessed as attributes of the returned object. + + The scheme argument provides the default value of the scheme + component when no scheme is found in url. + + If allow_fragments is False, no attempt is made to separate the + fragment component from the previous component, which can be either + path or query. + + Note that % escapes are not expanded. + """ + + url, scheme, _coerce_result = _coerce_args(url, scheme) + scheme, netloc, url, query, fragment = _urlsplit(url, scheme, allow_fragments) + v = SplitResult(scheme or '', netloc or '', url, query or '', fragment or '') + return _coerce_result(v) + +def _urlsplit(url, scheme=None, allow_fragments=True): + # Only lstrip url as some applications rely on preserving trailing space. + # (https://url.spec.whatwg.org/#concept-basic-url-parser would strip both) + url = url.lstrip(_WHATWG_C0_CONTROL_OR_SPACE) + for b in _UNSAFE_URL_BYTES_TO_REMOVE: + url = url.replace(b, "") + if scheme is not None: + scheme = scheme.strip(_WHATWG_C0_CONTROL_OR_SPACE) + for b in _UNSAFE_URL_BYTES_TO_REMOVE: + scheme = scheme.replace(b, "") + + allow_fragments = bool(allow_fragments) + netloc = query = fragment = None + i = url.find(':') + if i > 0 and url[0].isascii() and url[0].isalpha(): + for c in url[:i]: + if c not in scheme_chars: + break + else: + scheme, url = url[:i].lower(), url[i+1:] + if url[:2] == '//': + netloc, url = _splitnetloc(url, 2) + if (('[' in netloc and ']' not in netloc) or + (']' in netloc and '[' not in netloc)): + raise ValueError("Invalid IPv6 URL") + if '[' in netloc and ']' in netloc: + _check_bracketed_netloc(netloc) + if allow_fragments and '#' in url: + url, fragment = url.split('#', 1) + if '?' in url: + url, query = url.split('?', 1) + _checknetloc(netloc) + return (scheme, netloc, url, query, fragment) + +def urlunparse(components): + """Put a parsed URL back together again. This may result in a + slightly different, but equivalent URL, if the URL that was parsed + originally had redundant delimiters, e.g. a ? with an empty query + (the draft states that these are equivalent).""" + scheme, netloc, url, params, query, fragment, _coerce_result = ( + _coerce_args(*components)) + if not netloc: + if scheme and scheme in uses_netloc and (not url or url[:1] == '/'): + netloc = '' + else: + netloc = None + if params: + url = "%s;%s" % (url, params) + return _coerce_result(_urlunsplit(scheme or None, netloc, url, + query or None, fragment or None)) + +def urlunsplit(components): + """Combine the elements of a tuple as returned by urlsplit() into a + complete URL as a string. The data argument can be any five-item iterable. + This may result in a slightly different, but equivalent URL, if the URL that + was parsed originally had unnecessary delimiters (for example, a ? with an + empty query; the RFC states that these are equivalent).""" + scheme, netloc, url, query, fragment, _coerce_result = ( + _coerce_args(*components)) + if not netloc: + if scheme and scheme in uses_netloc and (not url or url[:1] == '/'): + netloc = '' + else: + netloc = None + return _coerce_result(_urlunsplit(scheme or None, netloc, url, + query or None, fragment or None)) + +def _urlunsplit(scheme, netloc, url, query, fragment): + if netloc is not None: + if url and url[:1] != '/': url = '/' + url + url = '//' + netloc + url + elif url[:2] == '//': + url = '//' + url + if scheme: + url = scheme + ':' + url + if query is not None: + url = url + '?' + query + if fragment is not None: + url = url + '#' + fragment + return url + +def urljoin(base, url, allow_fragments=True): + """Join a base URL and a possibly relative URL to form an absolute + interpretation of the latter.""" + if not base: + return url + if not url: + return base + + base, url, _coerce_result = _coerce_args(base, url) + bscheme, bnetloc, bpath, bquery, bfragment = \ + _urlsplit(base, None, allow_fragments) + scheme, netloc, path, query, fragment = \ + _urlsplit(url, None, allow_fragments) + + if scheme is None: + scheme = bscheme + if scheme != bscheme or (scheme and scheme not in uses_relative): + return _coerce_result(url) + if not scheme or scheme in uses_netloc: + if netloc: + return _coerce_result(_urlunsplit(scheme, netloc, path, + query, fragment)) + netloc = bnetloc + + if not path: + path = bpath + if query is None: + query = bquery + if fragment is None: + fragment = bfragment + return _coerce_result(_urlunsplit(scheme, netloc, path, + query, fragment)) + + base_parts = bpath.split('/') + if base_parts[-1] != '': + # the last item is not a directory, so will not be taken into account + # in resolving the relative path + del base_parts[-1] + + # for rfc3986, ignore all base path should the first character be root. + if path[:1] == '/': + segments = path.split('/') + else: + segments = base_parts + path.split('/') + # filter out elements that would cause redundant slashes on re-joining + # the resolved_path + segments[1:-1] = filter(None, segments[1:-1]) + + resolved_path = [] + + for seg in segments: + if seg == '..': + try: + resolved_path.pop() + except IndexError: + # ignore any .. segments that would otherwise cause an IndexError + # when popped from resolved_path if resolving for rfc3986 + pass + elif seg == '.': + continue + else: + resolved_path.append(seg) + + if segments[-1] in ('.', '..'): + # do some post-processing here. if the last segment was a relative dir, + # then we need to append the trailing '/' + resolved_path.append('') + + return _coerce_result(_urlunsplit(scheme, netloc, '/'.join( + resolved_path) or '/', query, fragment)) + + +def urldefrag(url): + """Removes any existing fragment from URL. + + Returns a tuple of the defragmented URL and the fragment. If + the URL contained no fragments, the second element is the + empty string. + """ + url, _coerce_result = _coerce_args(url) + if '#' in url: + s, n, p, q, frag = _urlsplit(url) + defrag = _urlunsplit(s, n, p, q, None) + else: + frag = '' + defrag = url + return _coerce_result(DefragResult(defrag, frag or '')) + +_hexdig = '0123456789ABCDEFabcdef' +_hextobyte = None + +def unquote_to_bytes(string): + """unquote_to_bytes('abc%20def') -> b'abc def'.""" + return bytes(_unquote_impl(string)) + +def _unquote_impl(string: bytes | bytearray | str) -> bytes | bytearray: + # Note: strings are encoded as UTF-8. This is only an issue if it contains + # unescaped non-ASCII characters, which URIs should not. + if not string: + # Is it a string-like object? + string.split + return b'' + if isinstance(string, str): + string = string.encode('utf-8') + bits = string.split(b'%') + if len(bits) == 1: + return string + res = bytearray(bits[0]) + append = res.extend + # Delay the initialization of the table to not waste memory + # if the function is never called + global _hextobyte + if _hextobyte is None: + _hextobyte = {(a + b).encode(): bytes.fromhex(a + b) + for a in _hexdig for b in _hexdig} + for item in bits[1:]: + try: + append(_hextobyte[item[:2]]) + append(item[2:]) + except KeyError: + append(b'%') + append(item) + return res + +_asciire = re.compile('([\x00-\x7f]+)') + +def _generate_unquoted_parts(string, encoding, errors): + previous_match_end = 0 + for ascii_match in _asciire.finditer(string): + start, end = ascii_match.span() + yield string[previous_match_end:start] # Non-ASCII + # The ascii_match[1] group == string[start:end]. + yield _unquote_impl(ascii_match[1]).decode(encoding, errors) + previous_match_end = end + yield string[previous_match_end:] # Non-ASCII tail + +def unquote(string, encoding='utf-8', errors='replace'): + """Replace %xx escapes by their single-character equivalent. The optional + encoding and errors parameters specify how to decode percent-encoded + sequences into Unicode characters, as accepted by the bytes.decode() + method. + By default, percent-encoded sequences are decoded with UTF-8, and invalid + sequences are replaced by a placeholder character. + + unquote('abc%20def') -> 'abc def'. + """ + if isinstance(string, bytes): + return _unquote_impl(string).decode(encoding, errors) + if '%' not in string: + # Is it a string-like object? + string.split + return string + if encoding is None: + encoding = 'utf-8' + if errors is None: + errors = 'replace' + return ''.join(_generate_unquoted_parts(string, encoding, errors)) + + +def parse_qs(qs, keep_blank_values=False, strict_parsing=False, + encoding='utf-8', errors='replace', max_num_fields=None, separator='&'): + """Parse a query given as a string argument. + + Arguments: + + qs: percent-encoded query string to be parsed + + keep_blank_values: flag indicating whether blank values in + percent-encoded queries should be treated as blank strings. + A true value indicates that blanks should be retained as + blank strings. The default false value indicates that + blank values are to be ignored and treated as if they were + not included. + + strict_parsing: flag indicating what to do with parsing errors. + If false (the default), errors are silently ignored. + If true, errors raise a ValueError exception. + + encoding and errors: specify how to decode percent-encoded sequences + into Unicode characters, as accepted by the bytes.decode() method. + + max_num_fields: int. If set, then throws a ValueError if there + are more than n fields read by parse_qsl(). + + separator: str. The symbol to use for separating the query arguments. + Defaults to &. + + Returns a dictionary. + """ + parsed_result = {} + pairs = parse_qsl(qs, keep_blank_values, strict_parsing, + encoding=encoding, errors=errors, + max_num_fields=max_num_fields, separator=separator, + _stacklevel=2) + for name, value in pairs: + if name in parsed_result: + parsed_result[name].append(value) + else: + parsed_result[name] = [value] + return parsed_result + + +def parse_qsl(qs, keep_blank_values=False, strict_parsing=False, + encoding='utf-8', errors='replace', max_num_fields=None, separator='&', *, _stacklevel=1): + """Parse a query given as a string argument. + + Arguments: + + qs: percent-encoded query string to be parsed + + keep_blank_values: flag indicating whether blank values in + percent-encoded queries should be treated as blank strings. + A true value indicates that blanks should be retained as blank + strings. The default false value indicates that blank values + are to be ignored and treated as if they were not included. + + strict_parsing: flag indicating what to do with parsing errors. If + false (the default), errors are silently ignored. If true, + errors raise a ValueError exception. + + encoding and errors: specify how to decode percent-encoded sequences + into Unicode characters, as accepted by the bytes.decode() method. + + max_num_fields: int. If set, then throws a ValueError + if there are more than n fields read by parse_qsl(). + + separator: str. The symbol to use for separating the query arguments. + Defaults to &. + + Returns a list, as G-d intended. + """ + if not separator or not isinstance(separator, (str, bytes)): + raise ValueError("Separator must be of type string or bytes.") + if isinstance(qs, str): + if not isinstance(separator, str): + separator = str(separator, 'ascii') + eq = '=' + def _unquote(s): + return unquote_plus(s, encoding=encoding, errors=errors) + elif qs is None: + return [] + else: + try: + # Use memoryview() to reject integers and iterables, + # acceptable by the bytes constructor. + qs = bytes(memoryview(qs)) + except TypeError: + if not qs: + warnings.warn(f"Accepting {type(qs).__name__} objects with " + f"false value in urllib.parse.parse_qsl() is " + f"deprecated as of 3.14", + DeprecationWarning, stacklevel=_stacklevel + 1) + return [] + raise + if isinstance(separator, str): + separator = bytes(separator, 'ascii') + eq = b'=' + def _unquote(s): + return unquote_to_bytes(s.replace(b'+', b' ')) + + if not qs: + return [] + + # If max_num_fields is defined then check that the number of fields + # is less than max_num_fields. This prevents a memory exhaustion DOS + # attack via post bodies with many fields. + if max_num_fields is not None: + num_fields = 1 + qs.count(separator) + if max_num_fields < num_fields: + raise ValueError('Max number of fields exceeded') + + r = [] + for name_value in qs.split(separator): + if name_value or strict_parsing: + name, has_eq, value = name_value.partition(eq) + if not has_eq and strict_parsing: + raise ValueError("bad query field: %r" % (name_value,)) + if value or keep_blank_values: + name = _unquote(name) + value = _unquote(value) + r.append((name, value)) + return r + +def unquote_plus(string, encoding='utf-8', errors='replace'): + """Like unquote(), but also replace plus signs by spaces, as required for + unquoting HTML form values. + + unquote_plus('%7e/abc+def') -> '~/abc def' + """ + string = string.replace('+', ' ') + return unquote(string, encoding, errors) + +_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + b'abcdefghijklmnopqrstuvwxyz' + b'0123456789' + b'_.-~') +_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE) + + +class _Quoter(dict): + """A mapping from bytes numbers (in range(0,256)) to strings. + + String values are percent-encoded byte values, unless the key < 128, and + in either of the specified safe set, or the always safe set. + """ + # Keeps a cache internally, via __missing__, for efficiency (lookups + # of cached keys don't call Python code at all). + def __init__(self, safe): + """safe: bytes object.""" + self.safe = _ALWAYS_SAFE.union(safe) + + def __repr__(self): + return f"" + + def __missing__(self, b): + # Handle a cache miss. Store quoted string in cache and return. + res = chr(b) if b in self.safe else '%{:02X}'.format(b) + self[b] = res + return res + +def quote(string, safe='/', encoding=None, errors=None): + """quote('abc def') -> 'abc%20def' + + Each part of a URL, e.g. the path info, the query, etc., has a + different set of reserved characters that must be quoted. The + quote function offers a cautious (not minimal) way to quote a + string for most of these parts. + + RFC 3986 Uniform Resource Identifier (URI): Generic Syntax lists + the following (un)reserved characters. + + unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" + reserved = gen-delims / sub-delims + gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" + sub-delims = "!" / "$" / "&" / "'" / "(" / ")" + / "*" / "+" / "," / ";" / "=" + + Each of the reserved characters is reserved in some component of a URL, + but not necessarily in all of them. + + The quote function %-escapes all characters that are neither in the + unreserved chars ("always safe") nor the additional chars set via the + safe arg. + + The default for the safe arg is '/'. The character is reserved, but in + typical usage the quote function is being called on a path where the + existing slash characters are to be preserved. + + Python 3.7 updates from using RFC 2396 to RFC 3986 to quote URL strings. + Now, "~" is included in the set of unreserved characters. + + string and safe may be either str or bytes objects. encoding and errors + must not be specified if string is a bytes object. + + The optional encoding and errors parameters specify how to deal with + non-ASCII characters, as accepted by the str.encode method. + By default, encoding='utf-8' (characters are encoded with UTF-8), and + errors='strict' (unsupported characters raise a UnicodeEncodeError). + """ + if isinstance(string, str): + if not string: + return string + if encoding is None: + encoding = 'utf-8' + if errors is None: + errors = 'strict' + string = string.encode(encoding, errors) + else: + if encoding is not None: + raise TypeError("quote() doesn't support 'encoding' for bytes") + if errors is not None: + raise TypeError("quote() doesn't support 'errors' for bytes") + return quote_from_bytes(string, safe) + +def quote_plus(string, safe='', encoding=None, errors=None): + """Like quote(), but also replace ' ' with '+', as required for quoting + HTML form values. Plus signs in the original string are escaped unless + they are included in safe. It also does not have safe default to '/'. + """ + # Check if ' ' in string, where string may either be a str or bytes. If + # there are no spaces, the regular quote will produce the right answer. + if ((isinstance(string, str) and ' ' not in string) or + (isinstance(string, bytes) and b' ' not in string)): + return quote(string, safe, encoding, errors) + if isinstance(safe, str): + space = ' ' + else: + space = b' ' + string = quote(string, safe + space, encoding, errors) + return string.replace(' ', '+') + +# Expectation: A typical program is unlikely to create more than 5 of these. +@functools.lru_cache +def _byte_quoter_factory(safe): + return _Quoter(safe).__getitem__ + +def quote_from_bytes(bs, safe='/'): + """Like quote(), but accepts a bytes object rather than a str, and does + not perform string-to-bytes encoding. It always returns an ASCII string. + quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f' + """ + if not isinstance(bs, (bytes, bytearray)): + raise TypeError("quote_from_bytes() expected bytes") + if not bs: + return '' + if isinstance(safe, str): + # Normalize 'safe' by converting to bytes and removing non-ASCII chars + safe = safe.encode('ascii', 'ignore') + else: + # List comprehensions are faster than generator expressions. + safe = bytes([c for c in safe if c < 128]) + if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe): + return bs.decode() + quoter = _byte_quoter_factory(safe) + if (bs_len := len(bs)) < 200_000: + return ''.join(map(quoter, bs)) + else: + # This saves memory - https://github.com/python/cpython/issues/95865 + chunk_size = math.isqrt(bs_len) + chunks = [''.join(map(quoter, bs[i:i+chunk_size])) + for i in range(0, bs_len, chunk_size)] + return ''.join(chunks) + +def urlencode(query, doseq=False, safe='', encoding=None, errors=None, + quote_via=quote_plus): + """Encode a dict or sequence of two-element tuples into a URL query string. + + If any values in the query arg are sequences and doseq is true, each + sequence element is converted to a separate parameter. + + If the query arg is a sequence of two-element tuples, the order of the + parameters in the output will match the order of parameters in the + input. + + The components of a query arg may each be either a string or a bytes type. + + The safe, encoding, and errors parameters are passed down to the function + specified by quote_via (encoding and errors only if a component is a str). + """ + + if hasattr(query, "items"): + query = query.items() + else: + # It's a bother at times that strings and string-like objects are + # sequences. + try: + # non-sequence items should not work with len() + # non-empty strings will fail this + if len(query) and not isinstance(query[0], tuple): + raise TypeError + # Zero-length sequences of all types will get here and succeed, + # but that's a minor nit. Since the original implementation + # allowed empty dicts that type of behavior probably should be + # preserved for consistency + except TypeError as err: + raise TypeError("not a valid non-string sequence " + "or mapping object") from err + + l = [] + if not doseq: + for k, v in query: + if isinstance(k, bytes): + k = quote_via(k, safe) + else: + k = quote_via(str(k), safe, encoding, errors) + + if isinstance(v, bytes): + v = quote_via(v, safe) + else: + v = quote_via(str(v), safe, encoding, errors) + l.append(k + '=' + v) + else: + for k, v in query: + if isinstance(k, bytes): + k = quote_via(k, safe) + else: + k = quote_via(str(k), safe, encoding, errors) + + if isinstance(v, bytes): + v = quote_via(v, safe) + l.append(k + '=' + v) + elif isinstance(v, str): + v = quote_via(v, safe, encoding, errors) + l.append(k + '=' + v) + else: + try: + # Is this a sufficient test for sequence-ness? + x = len(v) + except TypeError: + # not a sequence + v = quote_via(str(v), safe, encoding, errors) + l.append(k + '=' + v) + else: + # loop over the sequence + for elt in v: + if isinstance(elt, bytes): + elt = quote_via(elt, safe) + else: + elt = quote_via(str(elt), safe, encoding, errors) + l.append(k + '=' + elt) + return '&'.join(l) + + +def to_bytes(url): + warnings.warn("urllib.parse.to_bytes() is deprecated as of 3.8", + DeprecationWarning, stacklevel=2) + return _to_bytes(url) + + +def _to_bytes(url): + """to_bytes(u"URL") --> 'URL'.""" + # Most URL schemes require ASCII. If that changes, the conversion + # can be relaxed. + # XXX get rid of to_bytes() + if isinstance(url, str): + try: + url = url.encode("ASCII").decode() + except UnicodeError: + raise UnicodeError("URL " + repr(url) + + " contains non-ASCII characters") + return url + + +def unwrap(url): + """Transform a string like '' into 'scheme://host/path'. + + The string is returned unchanged if it's not a wrapped URL. + """ + url = str(url).strip() + if url[:1] == '<' and url[-1:] == '>': + url = url[1:-1].strip() + if url[:4] == 'URL:': + url = url[4:].strip() + return url + + +def splittype(url): + warnings.warn("urllib.parse.splittype() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splittype(url) + + +_typeprog = None +def _splittype(url): + """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" + global _typeprog + if _typeprog is None: + _typeprog = re.compile('([^/:]+):(.*)', re.DOTALL) + + match = _typeprog.match(url) + if match: + scheme, data = match.groups() + return scheme.lower(), data + return None, url + + +def splithost(url): + warnings.warn("urllib.parse.splithost() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splithost(url) + + +_hostprog = None +def _splithost(url): + """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" + global _hostprog + if _hostprog is None: + _hostprog = re.compile('//([^/#?]*)(.*)', re.DOTALL) + + match = _hostprog.match(url) + if match: + host_port, path = match.groups() + if path and path[0] != '/': + path = '/' + path + return host_port, path + return None, url + + +def splituser(host): + warnings.warn("urllib.parse.splituser() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splituser(host) + + +def _splituser(host): + """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + user, delim, host = host.rpartition('@') + return (user if delim else None), host + + +def splitpasswd(user): + warnings.warn("urllib.parse.splitpasswd() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splitpasswd(user) + + +def _splitpasswd(user): + """splitpasswd('user:passwd') -> 'user', 'passwd'.""" + user, delim, passwd = user.partition(':') + return user, (passwd if delim else None) + + +def splitport(host): + warnings.warn("urllib.parse.splitport() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splitport(host) + + +# splittag('/path#tag') --> '/path', 'tag' +_portprog = None +def _splitport(host): + """splitport('host:port') --> 'host', 'port'.""" + global _portprog + if _portprog is None: + _portprog = re.compile('(.*):([0-9]*)', re.DOTALL) + + match = _portprog.fullmatch(host) + if match: + host, port = match.groups() + if port: + return host, port + return host, None + + +def splitnport(host, defport=-1): + warnings.warn("urllib.parse.splitnport() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splitnport(host, defport) + + +def _splitnport(host, defport=-1): + """Split host and port, returning numeric port. + Return given default port if no ':' found; defaults to -1. + Return numerical port if a valid number is found after ':'. + Return None if ':' but not a valid number.""" + host, delim, port = host.rpartition(':') + if not delim: + host = port + elif port: + if port.isdigit() and port.isascii(): + nport = int(port) + else: + nport = None + return host, nport + return host, defport + + +def splitquery(url): + warnings.warn("urllib.parse.splitquery() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splitquery(url) + + +def _splitquery(url): + """splitquery('/path?query') --> '/path', 'query'.""" + path, delim, query = url.rpartition('?') + if delim: + return path, query + return url, None + + +def splittag(url): + warnings.warn("urllib.parse.splittag() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splittag(url) + + +def _splittag(url): + """splittag('/path#tag') --> '/path', 'tag'.""" + path, delim, tag = url.rpartition('#') + if delim: + return path, tag + return url, None + + +def splitattr(url): + warnings.warn("urllib.parse.splitattr() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splitattr(url) + + +def _splitattr(url): + """splitattr('/path;attr1=value1;attr2=value2;...') -> + '/path', ['attr1=value1', 'attr2=value2', ...].""" + words = url.split(';') + return words[0], words[1:] + + +def splitvalue(attr): + warnings.warn("urllib.parse.splitvalue() is deprecated as of 3.8, " + "use urllib.parse.parse_qsl() instead", + DeprecationWarning, stacklevel=2) + return _splitvalue(attr) + + +def _splitvalue(attr): + """splitvalue('attr=value') --> 'attr', 'value'.""" + attr, delim, value = attr.partition('=') + return attr, (value if delim else None) diff --git a/Python314_4_x64_Template/Lib/urllib/request.py b/Python314_4_x64_Template/Lib/urllib/request.py new file mode 100644 index 00000000..8d7470a2 --- /dev/null +++ b/Python314_4_x64_Template/Lib/urllib/request.py @@ -0,0 +1,2163 @@ +"""An extensible library for opening URLs using a variety of protocols + +The simplest way to use this module is to call the urlopen function, +which accepts a string containing a URL or a Request object (described +below). It opens the URL and returns the results as file-like +object; the returned object has some extra methods described below. + +The OpenerDirector manages a collection of Handler objects that do +all the actual work. Each Handler implements a particular protocol or +option. The OpenerDirector is a composite object that invokes the +Handlers needed to open the requested URL. For example, the +HTTPHandler performs HTTP GET and POST requests and deals with +non-error returns. The HTTPRedirectHandler automatically deals with +HTTP 301, 302, 303, 307, and 308 redirect errors, and the +HTTPDigestAuthHandler deals with digest authentication. + +urlopen(url, data=None) -- Basic usage is the same as original +urllib. pass the url and optionally data to post to an HTTP URL, and +get a file-like object back. One difference is that you can also pass +a Request instance instead of URL. Raises a URLError (subclass of +OSError); for HTTP errors, raises an HTTPError, which can also be +treated as a valid response. + +build_opener -- Function that creates a new OpenerDirector instance. +Will install the default handlers. Accepts one or more Handlers as +arguments, either instances or Handler classes that it will +instantiate. If one of the argument is a subclass of the default +handler, the argument will be installed instead of the default. + +install_opener -- Installs a new opener as the default opener. + +objects of interest: + +OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages +the Handler classes, while dealing with requests and responses. + +Request -- An object that encapsulates the state of a request. The +state can be as simple as the URL. It can also include extra HTTP +headers, e.g. a User-Agent. + +BaseHandler -- + +internals: +BaseHandler and parent +_call_chain conventions + +Example usage: + +import urllib.request + +# set up authentication info +authinfo = urllib.request.HTTPBasicAuthHandler() +authinfo.add_password(realm='PDQ Application', + uri='https://mahler:8092/site-updates.py', + user='klem', + passwd='geheim$parole') + +proxy_support = urllib.request.ProxyHandler({"http" : "http://ahad-haam:3128"}) + +# build a new opener that adds authentication and caching FTP handlers +opener = urllib.request.build_opener(proxy_support, authinfo, + urllib.request.CacheFTPHandler) + +# install it +urllib.request.install_opener(opener) + +f = urllib.request.urlopen('https://www.python.org/') +""" + +# XXX issues: +# If an authentication error handler that tries to perform +# authentication for some reason but fails, how should the error be +# signalled? The client needs to know the HTTP error code. But if +# the handler knows that the problem was, e.g., that it didn't know +# that hash algo that requested in the challenge, it would be good to +# pass that information along to the client, too. +# ftp errors aren't handled cleanly +# check digest against correct (i.e. non-apache) implementation + +# Possible extensions: +# complex proxies XXX not sure what exactly was meant by this +# abstract factory for opener + +import base64 +import bisect +import contextlib +import email +import hashlib +import http.client +import io +import os +import re +import socket +import string +import sys +import time +import tempfile + + +from urllib.error import URLError, HTTPError, ContentTooShortError +from urllib.parse import ( + urlparse, urlsplit, urljoin, unwrap, quote, unquote, + _splittype, _splithost, _splitport, _splituser, _splitpasswd, + _splitattr, _splitvalue, _splittag, + unquote_to_bytes, urlunparse) +from urllib.response import addinfourl, addclosehook + +# check for SSL +try: + import ssl # noqa: F401 +except ImportError: + _have_ssl = False +else: + _have_ssl = True + +__all__ = [ + # Classes + 'Request', 'OpenerDirector', 'BaseHandler', 'HTTPDefaultErrorHandler', + 'HTTPRedirectHandler', 'HTTPCookieProcessor', 'ProxyHandler', + 'HTTPPasswordMgr', 'HTTPPasswordMgrWithDefaultRealm', + 'HTTPPasswordMgrWithPriorAuth', 'AbstractBasicAuthHandler', + 'HTTPBasicAuthHandler', 'ProxyBasicAuthHandler', 'AbstractDigestAuthHandler', + 'HTTPDigestAuthHandler', 'ProxyDigestAuthHandler', 'HTTPHandler', + 'FileHandler', 'FTPHandler', 'CacheFTPHandler', 'DataHandler', + 'UnknownHandler', 'HTTPErrorProcessor', + # Functions + 'urlopen', 'install_opener', 'build_opener', + 'pathname2url', 'url2pathname', 'getproxies', + # Legacy interface + 'urlretrieve', 'urlcleanup', +] + +# used in User-Agent header sent +__version__ = '%d.%d' % sys.version_info[:2] + +_opener = None +def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + *, context=None): + '''Open the URL url, which can be either a string or a Request object. + + *data* must be an object specifying additional data to be sent to + the server, or None if no such data is needed. See Request for + details. + + urllib.request module uses HTTP/1.1 and includes a "Connection:close" + header in its HTTP requests. + + The optional *timeout* parameter specifies a timeout in seconds for + blocking operations like the connection attempt (if not specified, the + global default timeout setting will be used). This only works for HTTP, + HTTPS and FTP connections. + + If *context* is specified, it must be a ssl.SSLContext instance describing + the various SSL options. See HTTPSConnection for more details. + + + This function always returns an object which can work as a + context manager and has the properties url, headers, and status. + See urllib.response.addinfourl for more detail on these properties. + + For HTTP and HTTPS URLs, this function returns a http.client.HTTPResponse + object slightly modified. In addition to the three new methods above, the + msg attribute contains the same information as the reason attribute --- + the reason phrase returned by the server --- instead of the response + headers as it is specified in the documentation for HTTPResponse. + + For FTP, file, and data URLs, this function returns a + urllib.response.addinfourl object. + + Note that None may be returned if no handler handles the request (though + the default installed global OpenerDirector uses UnknownHandler to ensure + this never happens). + + In addition, if proxy settings are detected (for example, when a *_proxy + environment variable like http_proxy is set), ProxyHandler is default + installed and makes sure the requests are handled through the proxy. + + ''' + global _opener + if context: + https_handler = HTTPSHandler(context=context) + opener = build_opener(https_handler) + elif _opener is None: + _opener = opener = build_opener() + else: + opener = _opener + return opener.open(url, data, timeout) + +def install_opener(opener): + global _opener + _opener = opener + +_url_tempfiles = [] +def urlretrieve(url, filename=None, reporthook=None, data=None): + """ + Retrieve a URL into a temporary location on disk. + + Requires a URL argument. If a filename is passed, it is used as + the temporary file location. The reporthook argument should be + a callable that accepts a block number, a read size, and the + total file size of the URL target. The data argument should be + valid URL encoded data. + + If a filename is passed and the URL points to a local resource, + the result is a copy from local file to new file. + + Returns a tuple containing the path to the newly created + data file as well as the resulting HTTPMessage object. + """ + url_type, path = _splittype(url) + + with contextlib.closing(urlopen(url, data)) as fp: + headers = fp.info() + + # Just return the local path and the "headers" for file:// + # URLs. No sense in performing a copy unless requested. + if url_type == "file" and not filename: + return os.path.normpath(path), headers + + # Handle temporary file setup. + if filename: + tfp = open(filename, 'wb') + else: + tfp = tempfile.NamedTemporaryFile(delete=False) + filename = tfp.name + _url_tempfiles.append(filename) + + with tfp: + result = filename, headers + bs = 1024*8 + size = -1 + read = 0 + blocknum = 0 + if "content-length" in headers: + size = int(headers["Content-Length"]) + + if reporthook: + reporthook(blocknum, bs, size) + + while block := fp.read(bs): + read += len(block) + tfp.write(block) + blocknum += 1 + if reporthook: + reporthook(blocknum, bs, size) + + if size >= 0 and read < size: + raise ContentTooShortError( + "retrieval incomplete: got only %i out of %i bytes" + % (read, size), result) + + return result + +def urlcleanup(): + """Clean up temporary files from urlretrieve calls.""" + for temp_file in _url_tempfiles: + try: + os.unlink(temp_file) + except OSError: + pass + + del _url_tempfiles[:] + global _opener + if _opener: + _opener = None + +# copied from cookielib.py +_cut_port_re = re.compile(r":\d+$", re.ASCII) +def request_host(request): + """Return request-host, as defined by RFC 2965. + + Variation from RFC: returned value is lowercased, for convenient + comparison. + + """ + url = request.full_url + host = urlparse(url)[1] + if host == "": + host = request.get_header("Host", "") + + # remove port, if present + host = _cut_port_re.sub("", host, 1) + return host.lower() + +class Request: + + def __init__(self, url, data=None, headers={}, + origin_req_host=None, unverifiable=False, + method=None): + self.full_url = url + self.headers = {} + self.unredirected_hdrs = {} + self._data = None + self.data = data + self._tunnel_host = None + for key, value in headers.items(): + self.add_header(key, value) + if origin_req_host is None: + origin_req_host = request_host(self) + self.origin_req_host = origin_req_host + self.unverifiable = unverifiable + if method: + self.method = method + + @property + def full_url(self): + if self.fragment: + return '{}#{}'.format(self._full_url, self.fragment) + return self._full_url + + @full_url.setter + def full_url(self, url): + # unwrap('') --> 'type://host/path' + self._full_url = unwrap(url) + self._full_url, self.fragment = _splittag(self._full_url) + self._parse() + + @full_url.deleter + def full_url(self): + self._full_url = None + self.fragment = None + self.selector = '' + + @property + def data(self): + return self._data + + @data.setter + def data(self, data): + if data != self._data: + self._data = data + # issue 16464 + # if we change data we need to remove content-length header + # (cause it's most probably calculated for previous value) + if self.has_header("Content-length"): + self.remove_header("Content-length") + + @data.deleter + def data(self): + self.data = None + + def _parse(self): + self.type, rest = _splittype(self._full_url) + if self.type is None: + raise ValueError("unknown url type: %r" % self.full_url) + self.host, self.selector = _splithost(rest) + if self.host: + self.host = unquote(self.host) + + def get_method(self): + """Return a string indicating the HTTP request method.""" + default_method = "POST" if self.data is not None else "GET" + return getattr(self, 'method', default_method) + + def get_full_url(self): + return self.full_url + + def set_proxy(self, host, type): + if self.type == 'https' and not self._tunnel_host: + self._tunnel_host = self.host + else: + self.type= type + self.selector = self.full_url + self.host = host + + def has_proxy(self): + return self.selector == self.full_url + + def add_header(self, key, val): + # useful for something like authentication + self.headers[key.capitalize()] = val + + def add_unredirected_header(self, key, val): + # will not be added to a redirected request + self.unredirected_hdrs[key.capitalize()] = val + + def has_header(self, header_name): + return (header_name in self.headers or + header_name in self.unredirected_hdrs) + + def get_header(self, header_name, default=None): + return self.headers.get( + header_name, + self.unredirected_hdrs.get(header_name, default)) + + def remove_header(self, header_name): + self.headers.pop(header_name, None) + self.unredirected_hdrs.pop(header_name, None) + + def header_items(self): + hdrs = {**self.unredirected_hdrs, **self.headers} + return list(hdrs.items()) + +class OpenerDirector: + def __init__(self): + client_version = "Python-urllib/%s" % __version__ + self.addheaders = [('User-agent', client_version)] + # self.handlers is retained only for backward compatibility + self.handlers = [] + # manage the individual handlers + self.handle_open = {} + self.handle_error = {} + self.process_response = {} + self.process_request = {} + + def add_handler(self, handler): + if not hasattr(handler, "add_parent"): + raise TypeError("expected BaseHandler instance, got %r" % + type(handler)) + + added = False + for meth in dir(handler): + if meth in ["redirect_request", "do_open", "proxy_open"]: + # oops, coincidental match + continue + + i = meth.find("_") + protocol = meth[:i] + condition = meth[i+1:] + + if condition.startswith("error"): + j = condition.find("_") + i + 1 + kind = meth[j+1:] + try: + kind = int(kind) + except ValueError: + pass + lookup = self.handle_error.get(protocol, {}) + self.handle_error[protocol] = lookup + elif condition == "open": + kind = protocol + lookup = self.handle_open + elif condition == "response": + kind = protocol + lookup = self.process_response + elif condition == "request": + kind = protocol + lookup = self.process_request + else: + continue + + handlers = lookup.setdefault(kind, []) + if handlers: + bisect.insort(handlers, handler) + else: + handlers.append(handler) + added = True + + if added: + bisect.insort(self.handlers, handler) + handler.add_parent(self) + + def close(self): + # Only exists for backwards compatibility. + pass + + def _call_chain(self, chain, kind, meth_name, *args): + # Handlers raise an exception if no one else should try to handle + # the request, or return None if they can't but another handler + # could. Otherwise, they return the response. + handlers = chain.get(kind, ()) + for handler in handlers: + func = getattr(handler, meth_name) + result = func(*args) + if result is not None: + return result + + def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): + # accept a URL or a Request object + if isinstance(fullurl, str): + req = Request(fullurl, data) + else: + req = fullurl + if data is not None: + req.data = data + + req.timeout = timeout + protocol = req.type + + # pre-process request + meth_name = protocol+"_request" + for processor in self.process_request.get(protocol, []): + meth = getattr(processor, meth_name) + req = meth(req) + + sys.audit('urllib.Request', req.full_url, req.data, req.headers, req.get_method()) + response = self._open(req, data) + + # post-process response + meth_name = protocol+"_response" + for processor in self.process_response.get(protocol, []): + meth = getattr(processor, meth_name) + response = meth(req, response) + + return response + + def _open(self, req, data=None): + result = self._call_chain(self.handle_open, 'default', + 'default_open', req) + if result: + return result + + protocol = req.type + result = self._call_chain(self.handle_open, protocol, protocol + + '_open', req) + if result: + return result + + return self._call_chain(self.handle_open, 'unknown', + 'unknown_open', req) + + def error(self, proto, *args): + if proto in ('http', 'https'): + # XXX http[s] protocols are special-cased + dict = self.handle_error['http'] # https is not different than http + proto = args[2] # YUCK! + meth_name = 'http_error_%s' % proto + http_err = 1 + orig_args = args + else: + dict = self.handle_error + meth_name = proto + '_error' + http_err = 0 + args = (dict, proto, meth_name) + args + result = self._call_chain(*args) + if result: + return result + + if http_err: + args = (dict, 'default', 'http_error_default') + orig_args + return self._call_chain(*args) + +# XXX probably also want an abstract factory that knows when it makes +# sense to skip a superclass in favor of a subclass and when it might +# make sense to include both + +def build_opener(*handlers): + """Create an opener object from a list of handlers. + + The opener will use several default handlers, including support + for HTTP, FTP and when applicable HTTPS. + + If any of the handlers passed as arguments are subclasses of the + default handlers, the default handlers will not be used. + """ + opener = OpenerDirector() + default_classes = [ProxyHandler, UnknownHandler, HTTPHandler, + HTTPDefaultErrorHandler, HTTPRedirectHandler, + FTPHandler, FileHandler, HTTPErrorProcessor, + DataHandler] + if hasattr(http.client, "HTTPSConnection"): + default_classes.append(HTTPSHandler) + skip = set() + for klass in default_classes: + for check in handlers: + if isinstance(check, type): + if issubclass(check, klass): + skip.add(klass) + elif isinstance(check, klass): + skip.add(klass) + for klass in skip: + default_classes.remove(klass) + + for klass in default_classes: + opener.add_handler(klass()) + + for h in handlers: + if isinstance(h, type): + h = h() + opener.add_handler(h) + return opener + +class BaseHandler: + handler_order = 500 + + def add_parent(self, parent): + self.parent = parent + + def close(self): + # Only exists for backwards compatibility + pass + + def __lt__(self, other): + if not hasattr(other, "handler_order"): + # Try to preserve the old behavior of having custom classes + # inserted after default ones (works only for custom user + # classes which are not aware of handler_order). + return True + return self.handler_order < other.handler_order + + +class HTTPErrorProcessor(BaseHandler): + """Process HTTP error responses.""" + handler_order = 1000 # after all other processing + + def http_response(self, request, response): + code, msg, hdrs = response.code, response.msg, response.info() + + # According to RFC 2616, "2xx" code indicates that the client's + # request was successfully received, understood, and accepted. + if not (200 <= code < 300): + response = self.parent.error( + 'http', request, response, code, msg, hdrs) + + return response + + https_response = http_response + +class HTTPDefaultErrorHandler(BaseHandler): + def http_error_default(self, req, fp, code, msg, hdrs): + raise HTTPError(req.full_url, code, msg, hdrs, fp) + +class HTTPRedirectHandler(BaseHandler): + # maximum number of redirections to any single URL + # this is needed because of the state that cookies introduce + max_repeats = 4 + # maximum total number of redirections (regardless of URL) before + # assuming we're in a loop + max_redirections = 10 + + def redirect_request(self, req, fp, code, msg, headers, newurl): + """Return a Request or None in response to a redirect. + + This is called by the http_error_30x methods when a + redirection response is received. If a redirection should + take place, return a new Request to allow http_error_30x to + perform the redirect. Otherwise, raise HTTPError if no-one + else should try to handle this url. Return None if you can't + but another Handler might. + """ + m = req.get_method() + if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD") + or code in (301, 302, 303) and m == "POST")): + raise HTTPError(req.full_url, code, msg, headers, fp) + + # Strictly (according to RFC 2616), 301 or 302 in response to + # a POST MUST NOT cause a redirection without confirmation + # from the user (of urllib.request, in this case). In practice, + # essentially all clients do redirect in this case, so we do + # the same. + + # Be conciliant with URIs containing a space. This is mainly + # redundant with the more complete encoding done in http_error_302(), + # but it is kept for compatibility with other callers. + newurl = newurl.replace(' ', '%20') + + CONTENT_HEADERS = ("content-length", "content-type") + newheaders = {k: v for k, v in req.headers.items() + if k.lower() not in CONTENT_HEADERS} + return Request(newurl, + method="HEAD" if m == "HEAD" else "GET", + headers=newheaders, + origin_req_host=req.origin_req_host, + unverifiable=True) + + # Implementation note: To avoid the server sending us into an + # infinite loop, the request object needs to track what URLs we + # have already seen. Do this by adding a handler-specific + # attribute to the Request object. + def http_error_302(self, req, fp, code, msg, headers): + # Some servers (incorrectly) return multiple Location headers + # (so probably same goes for URI). Use first header. + if "location" in headers: + newurl = headers["location"] + elif "uri" in headers: + newurl = headers["uri"] + else: + return + + # fix a possible malformed URL + urlparts = urlparse(newurl) + + # For security reasons we don't allow redirection to anything other + # than http, https or ftp. + + if urlparts.scheme not in ('http', 'https', 'ftp', ''): + raise HTTPError( + newurl, code, + "%s - Redirection to url '%s' is not allowed" % (msg, newurl), + headers, fp) + + if not urlparts.path and urlparts.netloc: + urlparts = list(urlparts) + urlparts[2] = "/" + newurl = urlunparse(urlparts) + + # http.client.parse_headers() decodes as ISO-8859-1. Recover the + # original bytes and percent-encode non-ASCII bytes, and any special + # characters such as the space. + newurl = quote( + newurl, encoding="iso-8859-1", safe=string.punctuation) + newurl = urljoin(req.full_url, newurl) + + # XXX Probably want to forget about the state of the current + # request, although that might interact poorly with other + # handlers that also use handler-specific request attributes + new = self.redirect_request(req, fp, code, msg, headers, newurl) + if new is None: + return + + # loop detection + # .redirect_dict has a key url if url was previously visited. + if hasattr(req, 'redirect_dict'): + visited = new.redirect_dict = req.redirect_dict + if (visited.get(newurl, 0) >= self.max_repeats or + len(visited) >= self.max_redirections): + raise HTTPError(req.full_url, code, + self.inf_msg + msg, headers, fp) + else: + visited = new.redirect_dict = req.redirect_dict = {} + visited[newurl] = visited.get(newurl, 0) + 1 + + # Don't close the fp until we are sure that we won't use it + # with HTTPError. + fp.read() + fp.close() + + return self.parent.open(new, timeout=req.timeout) + + http_error_301 = http_error_303 = http_error_307 = http_error_308 = http_error_302 + + inf_msg = "The HTTP server returned a redirect error that would " \ + "lead to an infinite loop.\n" \ + "The last 30x error message was:\n" + + +def _parse_proxy(proxy): + """Return (scheme, user, password, host/port) given a URL or an authority. + + If a URL is supplied, it must have an authority (host:port) component. + According to RFC 3986, having an authority component means the URL must + have two slashes after the scheme. + """ + scheme, r_scheme = _splittype(proxy) + if not r_scheme.startswith("/"): + # authority + scheme = None + authority = proxy + else: + # URL + if not r_scheme.startswith("//"): + raise ValueError("proxy URL with no authority: %r" % proxy) + # We have an authority, so for RFC 3986-compliant URLs (by ss 3. + # and 3.3.), path is empty or starts with '/' + if '@' in r_scheme: + host_separator = r_scheme.find('@') + end = r_scheme.find("/", host_separator) + else: + end = r_scheme.find("/", 2) + if end == -1: + end = None + authority = r_scheme[2:end] + userinfo, hostport = _splituser(authority) + if userinfo is not None: + user, password = _splitpasswd(userinfo) + else: + user = password = None + return scheme, user, password, hostport + +class ProxyHandler(BaseHandler): + # Proxies must be in front + handler_order = 100 + + def __init__(self, proxies=None): + if proxies is None: + proxies = getproxies() + assert hasattr(proxies, 'keys'), "proxies must be a mapping" + self.proxies = proxies + for type, url in proxies.items(): + type = type.lower() + setattr(self, '%s_open' % type, + lambda r, proxy=url, type=type, meth=self.proxy_open: + meth(r, proxy, type)) + + def proxy_open(self, req, proxy, type): + orig_type = req.type + proxy_type, user, password, hostport = _parse_proxy(proxy) + if proxy_type is None: + proxy_type = orig_type + + if req.host and proxy_bypass(req.host): + return None + + if user and password: + user_pass = '%s:%s' % (unquote(user), + unquote(password)) + creds = base64.b64encode(user_pass.encode()).decode("ascii") + req.add_header('Proxy-authorization', 'Basic ' + creds) + hostport = unquote(hostport) + req.set_proxy(hostport, proxy_type) + if orig_type == proxy_type or orig_type == 'https': + # let other handlers take care of it + return None + else: + # need to start over, because the other handlers don't + # grok the proxy's URL type + # e.g. if we have a constructor arg proxies like so: + # {'http': 'ftp://proxy.example.com'}, we may end up turning + # a request for http://acme.example.com/a into one for + # ftp://proxy.example.com/a + return self.parent.open(req, timeout=req.timeout) + +class HTTPPasswordMgr: + + def __init__(self): + self.passwd = {} + + def add_password(self, realm, uri, user, passwd): + # uri could be a single URI or a sequence + if isinstance(uri, str): + uri = [uri] + if realm not in self.passwd: + self.passwd[realm] = {} + for default_port in True, False: + reduced_uri = tuple( + self.reduce_uri(u, default_port) for u in uri) + self.passwd[realm][reduced_uri] = (user, passwd) + + def find_user_password(self, realm, authuri): + domains = self.passwd.get(realm, {}) + for default_port in True, False: + reduced_authuri = self.reduce_uri(authuri, default_port) + for uris, authinfo in domains.items(): + for uri in uris: + if self.is_suburi(uri, reduced_authuri): + return authinfo + return None, None + + def reduce_uri(self, uri, default_port=True): + """Accept authority or URI and extract only the authority and path.""" + # note HTTP URLs do not have a userinfo component + parts = urlsplit(uri) + if parts[1]: + # URI + scheme = parts[0] + authority = parts[1] + path = parts[2] or '/' + else: + # host or host:port + scheme = None + authority = uri + path = '/' + host, port = _splitport(authority) + if default_port and port is None and scheme is not None: + dport = {"http": 80, + "https": 443, + }.get(scheme) + if dport is not None: + authority = "%s:%d" % (host, dport) + return authority, path + + def is_suburi(self, base, test): + """Check if test is below base in a URI tree + + Both args must be URIs in reduced form. + """ + if base == test: + return True + if base[0] != test[0]: + return False + prefix = base[1] + if prefix[-1:] != '/': + prefix += '/' + return test[1].startswith(prefix) + + +class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr): + + def find_user_password(self, realm, authuri): + user, password = HTTPPasswordMgr.find_user_password(self, realm, + authuri) + if user is not None: + return user, password + return HTTPPasswordMgr.find_user_password(self, None, authuri) + + +class HTTPPasswordMgrWithPriorAuth(HTTPPasswordMgrWithDefaultRealm): + + def __init__(self): + self.authenticated = {} + super().__init__() + + def add_password(self, realm, uri, user, passwd, is_authenticated=False): + self.update_authenticated(uri, is_authenticated) + # Add a default for prior auth requests + if realm is not None: + super().add_password(None, uri, user, passwd) + super().add_password(realm, uri, user, passwd) + + def update_authenticated(self, uri, is_authenticated=False): + # uri could be a single URI or a sequence + if isinstance(uri, str): + uri = [uri] + + for default_port in True, False: + for u in uri: + reduced_uri = self.reduce_uri(u, default_port) + self.authenticated[reduced_uri] = is_authenticated + + def is_authenticated(self, authuri): + for default_port in True, False: + reduced_authuri = self.reduce_uri(authuri, default_port) + for uri in self.authenticated: + if self.is_suburi(uri, reduced_authuri): + return self.authenticated[uri] + + +class AbstractBasicAuthHandler: + + # XXX this allows for multiple auth-schemes, but will stupidly pick + # the last one with a realm specified. + + # allow for double- and single-quoted realm values + # (single quotes are a violation of the RFC, but appear in the wild) + rx = re.compile('(?:^|,)' # start of the string or ',' + '[ \t]*' # optional whitespaces + '([^ \t,]+)' # scheme like "Basic" + '[ \t]+' # mandatory whitespaces + # realm=xxx + # realm='xxx' + # realm="xxx" + 'realm=(["\']?)([^"\']*)\\2', + re.I) + + # XXX could pre-emptively send auth info already accepted (RFC 2617, + # end of section 2, and section 1.2 immediately after "credentials" + # production). + + def __init__(self, password_mgr=None): + if password_mgr is None: + password_mgr = HTTPPasswordMgr() + self.passwd = password_mgr + self.add_password = self.passwd.add_password + + def _parse_realm(self, header): + # parse WWW-Authenticate header: accept multiple challenges per header + found_challenge = False + for mo in AbstractBasicAuthHandler.rx.finditer(header): + scheme, quote, realm = mo.groups() + if quote not in ['"', "'"]: + import warnings + warnings.warn("Basic Auth Realm was unquoted", + UserWarning, 3) + + yield (scheme, realm) + + found_challenge = True + + if not found_challenge: + if header: + scheme = header.split()[0] + else: + scheme = '' + yield (scheme, None) + + def http_error_auth_reqed(self, authreq, host, req, headers): + # host may be an authority (without userinfo) or a URL with an + # authority + headers = headers.get_all(authreq) + if not headers: + # no header found + return + + unsupported = None + for header in headers: + for scheme, realm in self._parse_realm(header): + if scheme.lower() != 'basic': + unsupported = scheme + continue + + if realm is not None: + # Use the first matching Basic challenge. + # Ignore following challenges even if they use the Basic + # scheme. + return self.retry_http_basic_auth(host, req, realm) + + if unsupported is not None: + raise ValueError("AbstractBasicAuthHandler does not " + "support the following scheme: %r" + % (scheme,)) + + def retry_http_basic_auth(self, host, req, realm): + user, pw = self.passwd.find_user_password(realm, host) + if pw is not None: + raw = "%s:%s" % (user, pw) + auth = "Basic " + base64.b64encode(raw.encode()).decode("ascii") + if req.get_header(self.auth_header, None) == auth: + return None + req.add_unredirected_header(self.auth_header, auth) + return self.parent.open(req, timeout=req.timeout) + else: + return None + + def http_request(self, req): + if (not hasattr(self.passwd, 'is_authenticated') or + not self.passwd.is_authenticated(req.full_url)): + return req + + if not req.has_header('Authorization'): + user, passwd = self.passwd.find_user_password(None, req.full_url) + credentials = '{0}:{1}'.format(user, passwd).encode() + auth_str = base64.standard_b64encode(credentials).decode() + req.add_unredirected_header('Authorization', + 'Basic {}'.format(auth_str.strip())) + return req + + def http_response(self, req, response): + if hasattr(self.passwd, 'is_authenticated'): + if 200 <= response.code < 300: + self.passwd.update_authenticated(req.full_url, True) + else: + self.passwd.update_authenticated(req.full_url, False) + return response + + https_request = http_request + https_response = http_response + + + +class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): + + auth_header = 'Authorization' + + def http_error_401(self, req, fp, code, msg, headers): + url = req.full_url + response = self.http_error_auth_reqed('www-authenticate', + url, req, headers) + return response + + +class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): + + auth_header = 'Proxy-authorization' + + def http_error_407(self, req, fp, code, msg, headers): + # http_error_auth_reqed requires that there is no userinfo component in + # authority. Assume there isn't one, since urllib.request does not (and + # should not, RFC 3986 s. 3.2.1) support requests for URLs containing + # userinfo. + authority = req.host + response = self.http_error_auth_reqed('proxy-authenticate', + authority, req, headers) + return response + + +# Return n random bytes. +_randombytes = os.urandom + + +class AbstractDigestAuthHandler: + # Digest authentication is specified in RFC 2617/7616. + + # XXX The client does not inspect the Authentication-Info header + # in a successful response. + + # XXX It should be possible to test this implementation against + # a mock server that just generates a static set of challenges. + + # XXX qop="auth-int" supports is shaky + + def __init__(self, passwd=None): + if passwd is None: + passwd = HTTPPasswordMgr() + self.passwd = passwd + self.add_password = self.passwd.add_password + self.retried = 0 + self.nonce_count = 0 + self.last_nonce = None + + def reset_retry_count(self): + self.retried = 0 + + def http_error_auth_reqed(self, auth_header, host, req, headers): + authreq = headers.get(auth_header, None) + if self.retried > 5: + # Don't fail endlessly - if we failed once, we'll probably + # fail a second time. Hm. Unless the Password Manager is + # prompting for the information. Crap. This isn't great + # but it's better than the current 'repeat until recursion + # depth exceeded' approach + raise HTTPError(req.full_url, 401, "digest auth failed", + headers, None) + else: + self.retried += 1 + if authreq: + scheme = authreq.split()[0] + if scheme.lower() == 'digest': + return self.retry_http_digest_auth(req, authreq) + elif scheme.lower() != 'basic': + raise ValueError("AbstractDigestAuthHandler does not support" + " the following scheme: '%s'" % scheme) + + def retry_http_digest_auth(self, req, auth): + token, challenge = auth.split(' ', 1) + chal = parse_keqv_list(filter(None, parse_http_list(challenge))) + auth = self.get_authorization(req, chal) + if auth: + auth_val = 'Digest %s' % auth + if req.headers.get(self.auth_header, None) == auth_val: + return None + req.add_unredirected_header(self.auth_header, auth_val) + resp = self.parent.open(req, timeout=req.timeout) + return resp + + def get_cnonce(self, nonce): + # The cnonce-value is an opaque + # quoted string value provided by the client and used by both client + # and server to avoid chosen plaintext attacks, to provide mutual + # authentication, and to provide some message integrity protection. + # This isn't a fabulous effort, but it's probably Good Enough. + s = "%s:%s:%s:" % (self.nonce_count, nonce, time.ctime()) + b = s.encode("ascii") + _randombytes(8) + dig = hashlib.sha1(b).hexdigest() + return dig[:16] + + def get_authorization(self, req, chal): + try: + realm = chal['realm'] + nonce = chal['nonce'] + qop = chal.get('qop') + algorithm = chal.get('algorithm', 'MD5') + # mod_digest doesn't send an opaque, even though it isn't + # supposed to be optional + opaque = chal.get('opaque', None) + except KeyError: + return None + + H, KD = self.get_algorithm_impls(algorithm) + if H is None: + return None + + user, pw = self.passwd.find_user_password(realm, req.full_url) + if user is None: + return None + + # XXX not implemented yet + if req.data is not None: + entdig = self.get_entity_digest(req.data, chal) + else: + entdig = None + + A1 = "%s:%s:%s" % (user, realm, pw) + A2 = "%s:%s" % (req.get_method(), + # XXX selector: what about proxies and full urls + req.selector) + # NOTE: As per RFC 2617, when server sends "auth,auth-int", the client could use either `auth` + # or `auth-int` to the response back. we use `auth` to send the response back. + if qop is None: + respdig = KD(H(A1), "%s:%s" % (nonce, H(A2))) + elif 'auth' in qop.split(','): + if nonce == self.last_nonce: + self.nonce_count += 1 + else: + self.nonce_count = 1 + self.last_nonce = nonce + ncvalue = '%08x' % self.nonce_count + cnonce = self.get_cnonce(nonce) + noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, 'auth', H(A2)) + respdig = KD(H(A1), noncebit) + else: + # XXX handle auth-int. + raise URLError("qop '%s' is not supported." % qop) + + # XXX should the partial digests be encoded too? + + base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ + 'response="%s"' % (user, realm, nonce, req.selector, + respdig) + if opaque: + base += ', opaque="%s"' % opaque + if entdig: + base += ', digest="%s"' % entdig + base += ', algorithm="%s"' % algorithm + if qop: + base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce) + return base + + def get_algorithm_impls(self, algorithm): + # algorithm names taken from RFC 7616 Section 6.1 + # lambdas assume digest modules are imported at the top level + if algorithm == 'MD5': + H = lambda x: hashlib.md5(x.encode("ascii")).hexdigest() + elif algorithm == 'SHA': # non-standard, retained for compatibility. + H = lambda x: hashlib.sha1(x.encode("ascii")).hexdigest() + elif algorithm == 'SHA-256': + H = lambda x: hashlib.sha256(x.encode("ascii")).hexdigest() + # XXX MD5-sess + else: + raise ValueError("Unsupported digest authentication " + "algorithm %r" % algorithm) + KD = lambda s, d: H("%s:%s" % (s, d)) + return H, KD + + def get_entity_digest(self, data, chal): + # XXX not implemented yet + return None + + +class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): + """An authentication protocol defined by RFC 2069 + + Digest authentication improves on basic authentication because it + does not transmit passwords in the clear. + """ + + auth_header = 'Authorization' + handler_order = 490 # before Basic auth + + def http_error_401(self, req, fp, code, msg, headers): + host = urlparse(req.full_url)[1] + retry = self.http_error_auth_reqed('www-authenticate', + host, req, headers) + self.reset_retry_count() + return retry + + +class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): + + auth_header = 'Proxy-Authorization' + handler_order = 490 # before Basic auth + + def http_error_407(self, req, fp, code, msg, headers): + host = req.host + retry = self.http_error_auth_reqed('proxy-authenticate', + host, req, headers) + self.reset_retry_count() + return retry + +class AbstractHTTPHandler(BaseHandler): + + def __init__(self, debuglevel=None): + self._debuglevel = debuglevel if debuglevel is not None else http.client.HTTPConnection.debuglevel + + def set_http_debuglevel(self, level): + self._debuglevel = level + + def _get_content_length(self, request): + return http.client.HTTPConnection._get_content_length( + request.data, + request.get_method()) + + def do_request_(self, request): + host = request.host + if not host: + raise URLError('no host given') + + if request.data is not None: # POST + data = request.data + if isinstance(data, str): + msg = "POST data should be bytes, an iterable of bytes, " \ + "or a file object. It cannot be of type str." + raise TypeError(msg) + if not request.has_header('Content-type'): + request.add_unredirected_header( + 'Content-type', + 'application/x-www-form-urlencoded') + if (not request.has_header('Content-length') + and not request.has_header('Transfer-encoding')): + content_length = self._get_content_length(request) + if content_length is not None: + request.add_unredirected_header( + 'Content-length', str(content_length)) + else: + request.add_unredirected_header( + 'Transfer-encoding', 'chunked') + + sel_host = host + if request.has_proxy(): + scheme, sel = _splittype(request.selector) + sel_host, sel_path = _splithost(sel) + if not request.has_header('Host'): + request.add_unredirected_header('Host', sel_host) + for name, value in self.parent.addheaders: + name = name.capitalize() + if not request.has_header(name): + request.add_unredirected_header(name, value) + + return request + + def do_open(self, http_class, req, **http_conn_args): + """Return an HTTPResponse object for the request, using http_class. + + http_class must implement the HTTPConnection API from http.client. + """ + host = req.host + if not host: + raise URLError('no host given') + + # will parse host:port + h = http_class(host, timeout=req.timeout, **http_conn_args) + h.set_debuglevel(self._debuglevel) + + headers = dict(req.unredirected_hdrs) + headers.update({k: v for k, v in req.headers.items() + if k not in headers}) + + # TODO(jhylton): Should this be redesigned to handle + # persistent connections? + + # We want to make an HTTP/1.1 request, but the addinfourl + # class isn't prepared to deal with a persistent connection. + # It will try to read all remaining data from the socket, + # which will block while the server waits for the next request. + # So make sure the connection gets closed after the (only) + # request. + headers["Connection"] = "close" + headers = {name.title(): val for name, val in headers.items()} + + if req._tunnel_host: + tunnel_headers = {} + proxy_auth_hdr = "Proxy-Authorization" + if proxy_auth_hdr in headers: + tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr] + # Proxy-Authorization should not be sent to origin + # server. + del headers[proxy_auth_hdr] + h.set_tunnel(req._tunnel_host, headers=tunnel_headers) + + try: + try: + h.request(req.get_method(), req.selector, req.data, headers, + encode_chunked=req.has_header('Transfer-encoding')) + except OSError as err: # timeout error + raise URLError(err) + r = h.getresponse() + except: + h.close() + raise + + # If the server does not send us a 'Connection: close' header, + # HTTPConnection assumes the socket should be left open. Manually + # mark the socket to be closed when this response object goes away. + if h.sock: + h.sock.close() + h.sock = None + + r.url = req.get_full_url() + # This line replaces the .msg attribute of the HTTPResponse + # with .headers, because urllib clients expect the response to + # have the reason in .msg. It would be good to mark this + # attribute is deprecated and get then to use info() or + # .headers. + r.msg = r.reason + return r + + +class HTTPHandler(AbstractHTTPHandler): + + def http_open(self, req): + return self.do_open(http.client.HTTPConnection, req) + + http_request = AbstractHTTPHandler.do_request_ + +if hasattr(http.client, 'HTTPSConnection'): + + class HTTPSHandler(AbstractHTTPHandler): + + def __init__(self, debuglevel=None, context=None, check_hostname=None): + debuglevel = debuglevel if debuglevel is not None else http.client.HTTPSConnection.debuglevel + AbstractHTTPHandler.__init__(self, debuglevel) + if context is None: + http_version = http.client.HTTPSConnection._http_vsn + context = http.client._create_https_context(http_version) + if check_hostname is not None: + context.check_hostname = check_hostname + self._context = context + + def https_open(self, req): + return self.do_open(http.client.HTTPSConnection, req, + context=self._context) + + https_request = AbstractHTTPHandler.do_request_ + + __all__.append('HTTPSHandler') + +class HTTPCookieProcessor(BaseHandler): + def __init__(self, cookiejar=None): + import http.cookiejar + if cookiejar is None: + cookiejar = http.cookiejar.CookieJar() + self.cookiejar = cookiejar + + def http_request(self, request): + self.cookiejar.add_cookie_header(request) + return request + + def http_response(self, request, response): + self.cookiejar.extract_cookies(response, request) + return response + + https_request = http_request + https_response = http_response + +class UnknownHandler(BaseHandler): + def unknown_open(self, req): + type = req.type + raise URLError('unknown url type: %s' % type) + +def parse_keqv_list(l): + """Parse list of key=value strings where keys are not duplicated.""" + parsed = {} + for elt in l: + k, v = elt.split('=', 1) + if v[0] == '"' and v[-1] == '"': + v = v[1:-1] + parsed[k] = v + return parsed + +def parse_http_list(s): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Neither commas nor quotes count if they are escaped. + Only double-quotes count, not single-quotes. + """ + res = [] + part = '' + + escape = quote = False + for cur in s: + if escape: + part += cur + escape = False + continue + if quote: + if cur == '\\': + escape = True + continue + elif cur == '"': + quote = False + part += cur + continue + + if cur == ',': + res.append(part) + part = '' + continue + + if cur == '"': + quote = True + + part += cur + + # append last part + if part: + res.append(part) + + return [part.strip() for part in res] + +class FileHandler(BaseHandler): + # names for the localhost + names = None + def get_names(self): + if FileHandler.names is None: + try: + FileHandler.names = tuple( + socket.gethostbyname_ex('localhost')[2] + + socket.gethostbyname_ex(socket.gethostname())[2]) + except socket.gaierror: + FileHandler.names = (socket.gethostbyname('localhost'),) + return FileHandler.names + + # not entirely sure what the rules are here + def open_local_file(self, req): + import email.utils + import mimetypes + localfile = url2pathname(req.full_url, require_scheme=True, resolve_host=True) + try: + stats = os.stat(localfile) + size = stats.st_size + modified = email.utils.formatdate(stats.st_mtime, usegmt=True) + mtype = mimetypes.guess_file_type(localfile)[0] + headers = email.message_from_string( + 'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' % + (mtype or 'text/plain', size, modified)) + origurl = pathname2url(localfile, add_scheme=True) + return addinfourl(open(localfile, 'rb'), headers, origurl) + except OSError as exp: + raise URLError(exp, exp.filename) + + file_open = open_local_file + +def _is_local_authority(authority, resolve): + # Compare hostnames + if not authority or authority == 'localhost': + return True + try: + hostname = socket.gethostname() + except (socket.gaierror, AttributeError): + pass + else: + if authority == hostname: + return True + # Compare IP addresses + if not resolve: + return False + try: + address = socket.gethostbyname(authority) + except (socket.gaierror, AttributeError, UnicodeEncodeError): + return False + return address in FileHandler().get_names() + +class FTPHandler(BaseHandler): + def ftp_open(self, req): + import ftplib + import mimetypes + host = req.host + if not host: + raise URLError('ftp error: no host given') + host, port = _splitport(host) + if port is None: + port = ftplib.FTP_PORT + else: + port = int(port) + + # username/password handling + user, host = _splituser(host) + if user: + user, passwd = _splitpasswd(user) + else: + passwd = None + host = unquote(host) + user = user or '' + passwd = passwd or '' + + try: + host = socket.gethostbyname(host) + except OSError as msg: + raise URLError(msg) + path, attrs = _splitattr(req.selector) + dirs = path.split('/') + dirs = list(map(unquote, dirs)) + dirs, file = dirs[:-1], dirs[-1] + if dirs and not dirs[0]: + dirs = dirs[1:] + fw = None + try: + fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout) + type = file and 'I' or 'D' + for attr in attrs: + attr, value = _splitvalue(attr) + if attr.lower() == 'type' and \ + value in ('a', 'A', 'i', 'I', 'd', 'D'): + type = value.upper() + fp, retrlen = fw.retrfile(file, type) + headers = "" + mtype = mimetypes.guess_type(req.full_url)[0] + if mtype: + headers += "Content-type: %s\n" % mtype + if retrlen is not None and retrlen >= 0: + headers += "Content-length: %d\n" % retrlen + headers = email.message_from_string(headers) + return addinfourl(fp, headers, req.full_url) + except Exception as exp: + if fw is not None and not fw.keepalive: + fw.close() + if isinstance(exp, ftplib.all_errors): + raise URLError(f"ftp error: {exp}") from exp + raise + + def connect_ftp(self, user, passwd, host, port, dirs, timeout): + return ftpwrapper(user, passwd, host, port, dirs, timeout, + persistent=False) + +class CacheFTPHandler(FTPHandler): + # XXX would be nice to have pluggable cache strategies + # XXX this stuff is definitely not thread safe + def __init__(self): + self.cache = {} + self.timeout = {} + self.soonest = 0 + self.delay = 60 + self.max_conns = 16 + + def setTimeout(self, t): + self.delay = t + + def setMaxConns(self, m): + self.max_conns = m + + def connect_ftp(self, user, passwd, host, port, dirs, timeout): + key = user, host, port, '/'.join(dirs), timeout + conn = self.cache.get(key) + if conn is None or not conn.keepalive: + if conn is not None: + conn.close() + conn = self.cache[key] = ftpwrapper(user, passwd, host, port, + dirs, timeout) + self.timeout[key] = time.time() + self.delay + self.check_cache() + return conn + + def check_cache(self): + # first check for old ones + t = time.time() + if self.soonest <= t: + for k, v in list(self.timeout.items()): + if v < t: + self.cache[k].close() + del self.cache[k] + del self.timeout[k] + self.soonest = min(list(self.timeout.values())) + + # then check the size + if len(self.cache) == self.max_conns: + for k, v in list(self.timeout.items()): + if v == self.soonest: + del self.cache[k] + del self.timeout[k] + break + self.soonest = min(list(self.timeout.values())) + + def clear_cache(self): + for conn in self.cache.values(): + conn.close() + self.cache.clear() + self.timeout.clear() + +class DataHandler(BaseHandler): + def data_open(self, req): + # data URLs as specified in RFC 2397. + # + # ignores POSTed data + # + # syntax: + # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data + # mediatype := [ type "/" subtype ] *( ";" parameter ) + # data := *urlchar + # parameter := attribute "=" value + url = req.full_url + + scheme, data = url.split(":",1) + mediatype, data = data.split(",",1) + + # Disallow control characters within mediatype. + if re.search(r"[\x00-\x1F\x7F]", mediatype): + raise ValueError( + "Control characters not allowed in data: mediatype") + + # even base64 encoded data URLs might be quoted so unquote in any case: + data = unquote_to_bytes(data) + if mediatype.endswith(";base64"): + data = base64.decodebytes(data) + mediatype = mediatype[:-7] + + if not mediatype: + mediatype = "text/plain;charset=US-ASCII" + + headers = email.message_from_string("Content-type: %s\nContent-length: %d\n" % + (mediatype, len(data))) + + return addinfourl(io.BytesIO(data), headers, url) + + +# Code moved from the old urllib module + +def url2pathname(url, *, require_scheme=False, resolve_host=False): + """Convert the given file URL to a local file system path. + + The 'file:' scheme prefix must be omitted unless *require_scheme* + is set to true. + + The URL authority may be resolved with gethostbyname() if + *resolve_host* is set to true. + """ + if not require_scheme: + url = 'file:' + url + scheme, authority, url = urlsplit(url)[:3] # Discard query and fragment. + if scheme != 'file': + raise URLError("URL is missing a 'file:' scheme") + if os.name == 'nt': + if authority[1:2] == ':': + # e.g. file://c:/file.txt + url = authority + url + elif not _is_local_authority(authority, resolve_host): + # e.g. file://server/share/file.txt + url = '//' + authority + url + elif url[:3] == '///': + # e.g. file://///server/share/file.txt + url = url[1:] + else: + if url[:1] == '/' and url[2:3] in (':', '|'): + # Skip past extra slash before DOS drive in URL path. + url = url[1:] + if url[1:2] == '|': + # Older URLs use a pipe after a drive letter + url = url[:1] + ':' + url[2:] + url = url.replace('/', '\\') + elif not _is_local_authority(authority, resolve_host): + raise URLError("file:// scheme is supported only on localhost") + encoding = sys.getfilesystemencoding() + errors = sys.getfilesystemencodeerrors() + return unquote(url, encoding=encoding, errors=errors) + + +def pathname2url(pathname, *, add_scheme=False): + """Convert the given local file system path to a file URL. + + The 'file:' scheme prefix is omitted unless *add_scheme* + is set to true. + """ + if os.name == 'nt': + pathname = pathname.replace('\\', '/') + encoding = sys.getfilesystemencoding() + errors = sys.getfilesystemencodeerrors() + scheme = 'file:' if add_scheme else '' + drive, root, tail = os.path.splitroot(pathname) + if drive: + # First, clean up some special forms. We are going to sacrifice the + # additional information anyway + if drive[:4] == '//?/': + drive = drive[4:] + if drive[:4].upper() == 'UNC/': + drive = '//' + drive[4:] + if drive[1:] == ':': + # DOS drive specified. Add three slashes to the start, producing + # an authority section with a zero-length authority, and a path + # section starting with a single slash. + drive = '///' + drive + drive = quote(drive, encoding=encoding, errors=errors, safe='/:') + elif root: + # Add explicitly empty authority to absolute path. If the path + # starts with exactly one slash then this change is mostly + # cosmetic, but if it begins with two or more slashes then this + # avoids interpreting the path as a URL authority. + root = '//' + root + tail = quote(tail, encoding=encoding, errors=errors) + return scheme + drive + root + tail + + +# Utility functions + +_localhost = None +def localhost(): + """Return the IP address of the magic hostname 'localhost'.""" + global _localhost + if _localhost is None: + _localhost = socket.gethostbyname('localhost') + return _localhost + +_thishost = None +def thishost(): + """Return the IP addresses of the current host.""" + global _thishost + if _thishost is None: + try: + _thishost = tuple(socket.gethostbyname_ex(socket.gethostname())[2]) + except socket.gaierror: + _thishost = tuple(socket.gethostbyname_ex('localhost')[2]) + return _thishost + +_ftperrors = None +def ftperrors(): + """Return the set of errors raised by the FTP class.""" + global _ftperrors + if _ftperrors is None: + import ftplib + _ftperrors = ftplib.all_errors + return _ftperrors + +_noheaders = None +def noheaders(): + """Return an empty email Message object.""" + global _noheaders + if _noheaders is None: + _noheaders = email.message_from_string("") + return _noheaders + + +# Utility classes + +class ftpwrapper: + """Class used by open_ftp() for cache of open FTP connections.""" + + def __init__(self, user, passwd, host, port, dirs, timeout=None, + persistent=True): + self.user = user + self.passwd = passwd + self.host = host + self.port = port + self.dirs = dirs + self.timeout = timeout + self.refcount = 0 + self.keepalive = persistent + try: + self.init() + except: + self.close() + raise + + def init(self): + import ftplib + self.busy = 0 + self.ftp = ftplib.FTP() + self.ftp.connect(self.host, self.port, self.timeout) + self.ftp.login(self.user, self.passwd) + _target = '/'.join(self.dirs) + self.ftp.cwd(_target) + + def retrfile(self, file, type): + import ftplib + self.endtransfer() + if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1 + else: cmd = 'TYPE ' + type; isdir = 0 + try: + self.ftp.voidcmd(cmd) + except ftplib.all_errors: + self.init() + self.ftp.voidcmd(cmd) + conn = None + if file and not isdir: + # Try to retrieve as a file + try: + cmd = 'RETR ' + file + conn, retrlen = self.ftp.ntransfercmd(cmd) + except ftplib.error_perm as reason: + if str(reason)[:3] != '550': + raise URLError(f'ftp error: {reason}') from reason + if not conn: + # Set transfer mode to ASCII! + self.ftp.voidcmd('TYPE A') + # Try a directory listing. Verify that directory exists. + if file: + pwd = self.ftp.pwd() + try: + try: + self.ftp.cwd(file) + except ftplib.error_perm as reason: + raise URLError('ftp error: %r' % reason) from reason + finally: + self.ftp.cwd(pwd) + cmd = 'LIST ' + file + else: + cmd = 'LIST' + conn, retrlen = self.ftp.ntransfercmd(cmd) + self.busy = 1 + + ftpobj = addclosehook(conn.makefile('rb'), self.file_close) + self.refcount += 1 + conn.close() + # Pass back both a suitably decorated object and a retrieval length + return (ftpobj, retrlen) + + def endtransfer(self): + if not self.busy: + return + self.busy = 0 + try: + self.ftp.voidresp() + except ftperrors(): + pass + + def close(self): + self.keepalive = False + if self.refcount <= 0: + self.real_close() + + def file_close(self): + self.endtransfer() + self.refcount -= 1 + if self.refcount <= 0 and not self.keepalive: + self.real_close() + + def real_close(self): + self.endtransfer() + try: + self.ftp.close() + except ftperrors(): + pass + +# Proxy handling +def getproxies_environment(): + """Return a dictionary of scheme -> proxy server URL mappings. + + Scan the environment for variables named _proxy; + this seems to be the standard convention. + """ + # in order to prefer lowercase variables, process environment in + # two passes: first matches any, second pass matches lowercase only + + # select only environment variables which end in (after making lowercase) _proxy + proxies = {} + environment = [] + for name in os.environ: + # fast screen underscore position before more expensive case-folding + if len(name) > 5 and name[-6] == "_" and name[-5:].lower() == "proxy": + value = os.environ[name] + proxy_name = name[:-6].lower() + environment.append((name, value, proxy_name)) + if value: + proxies[proxy_name] = value + # CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY + # (non-all-lowercase) as it may be set from the web server by a "Proxy:" + # header from the client + # If "proxy" is lowercase, it will still be used thanks to the next block + if 'REQUEST_METHOD' in os.environ: + proxies.pop('http', None) + for name, value, proxy_name in environment: + # not case-folded, checking here for lower-case env vars only + if name[-6:] == '_proxy': + if value: + proxies[proxy_name] = value + else: + proxies.pop(proxy_name, None) + return proxies + +def proxy_bypass_environment(host, proxies=None): + """Test if proxies should not be used for a particular host. + + Checks the proxy dict for the value of no_proxy, which should + be a list of comma separated DNS suffixes, or '*' for all hosts. + + """ + if proxies is None: + proxies = getproxies_environment() + # don't bypass, if no_proxy isn't specified + try: + no_proxy = proxies['no'] + except KeyError: + return False + # '*' is special case for always bypass + if no_proxy == '*': + return True + host = host.lower() + # strip port off host + hostonly, port = _splitport(host) + # check if the host ends with any of the DNS suffixes + for name in no_proxy.split(','): + name = name.strip() + if name: + name = name.lstrip('.') # ignore leading dots + name = name.lower() + if hostonly == name or host == name: + return True + name = '.' + name + if hostonly.endswith(name) or host.endswith(name): + return True + # otherwise, don't bypass + return False + + +# This code tests an OSX specific data structure but is testable on all +# platforms +def _proxy_bypass_macosx_sysconf(host, proxy_settings): + """ + Return True iff this host shouldn't be accessed using a proxy + + This function uses the MacOSX framework SystemConfiguration + to fetch the proxy information. + + proxy_settings come from _scproxy._get_proxy_settings or get mocked ie: + { 'exclude_simple': bool, + 'exceptions': ['foo.bar', '*.bar.com', '127.0.0.1', '10.1', '10.0/16'] + } + """ + from fnmatch import fnmatch + from ipaddress import AddressValueError, IPv4Address + + hostonly, port = _splitport(host) + + def ip2num(ipAddr): + parts = ipAddr.split('.') + parts = list(map(int, parts)) + if len(parts) != 4: + parts = (parts + [0, 0, 0, 0])[:4] + return (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3] + + # Check for simple host names: + if '.' not in host: + if proxy_settings['exclude_simple']: + return True + + hostIP = None + try: + hostIP = int(IPv4Address(hostonly)) + except AddressValueError: + pass + + for value in proxy_settings.get('exceptions', ()): + # Items in the list are strings like these: *.local, 169.254/16 + if not value: continue + + m = re.match(r"(\d+(?:\.\d+)*)(/\d+)?", value) + if m is not None and hostIP is not None: + base = ip2num(m.group(1)) + mask = m.group(2) + if mask is None: + mask = 8 * (m.group(1).count('.') + 1) + else: + mask = int(mask[1:]) + + if mask < 0 or mask > 32: + # System libraries ignore invalid prefix lengths + continue + + mask = 32 - mask + + if (hostIP >> mask) == (base >> mask): + return True + + elif fnmatch(host, value): + return True + + return False + + +# Same as _proxy_bypass_macosx_sysconf, testable on all platforms +def _proxy_bypass_winreg_override(host, override): + """Return True if the host should bypass the proxy server. + + The proxy override list is obtained from the Windows + Internet settings proxy override registry value. + + An example of a proxy override value is: + "www.example.com;*.example.net; 192.168.0.1" + """ + from fnmatch import fnmatch + + host, _ = _splitport(host) + proxy_override = override.split(';') + for test in proxy_override: + test = test.strip() + # "" should bypass the proxy server for all intranet addresses + if test == '': + if '.' not in host: + return True + elif fnmatch(host, test): + return True + return False + + +if sys.platform == 'darwin': + from _scproxy import _get_proxy_settings, _get_proxies + + def proxy_bypass_macosx_sysconf(host): + proxy_settings = _get_proxy_settings() + return _proxy_bypass_macosx_sysconf(host, proxy_settings) + + def getproxies_macosx_sysconf(): + """Return a dictionary of scheme -> proxy server URL mappings. + + This function uses the MacOSX framework SystemConfiguration + to fetch the proxy information. + """ + return _get_proxies() + + + + def proxy_bypass(host): + """Return True, if host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or from the MacOSX framework SystemConfiguration. + + """ + proxies = getproxies_environment() + if proxies: + return proxy_bypass_environment(host, proxies) + else: + return proxy_bypass_macosx_sysconf(host) + + def getproxies(): + return getproxies_environment() or getproxies_macosx_sysconf() + + +elif os.name == 'nt': + def getproxies_registry(): + """Return a dictionary of scheme -> proxy server URL mappings. + + Win32 uses the registry to store proxies. + + """ + proxies = {} + try: + import winreg + except ImportError: + # Std module, so should be around - but you never know! + return proxies + try: + internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, + r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') + proxyEnable = winreg.QueryValueEx(internetSettings, + 'ProxyEnable')[0] + if proxyEnable: + # Returned as Unicode but problems if not converted to ASCII + proxyServer = str(winreg.QueryValueEx(internetSettings, + 'ProxyServer')[0]) + if '=' not in proxyServer and ';' not in proxyServer: + # Use one setting for all protocols. + proxyServer = 'http={0};https={0};ftp={0}'.format(proxyServer) + for p in proxyServer.split(';'): + protocol, address = p.split('=', 1) + # See if address has a type:// prefix + if not re.match('(?:[^/:]+)://', address): + # Add type:// prefix to address without specifying type + if protocol in ('http', 'https', 'ftp'): + # The default proxy type of Windows is HTTP + address = 'http://' + address + elif protocol == 'socks': + address = 'socks://' + address + proxies[protocol] = address + # Use SOCKS proxy for HTTP(S) protocols + if proxies.get('socks'): + # The default SOCKS proxy type of Windows is SOCKS4 + address = re.sub(r'^socks://', 'socks4://', proxies['socks']) + proxies['http'] = proxies.get('http') or address + proxies['https'] = proxies.get('https') or address + internetSettings.Close() + except (OSError, ValueError, TypeError): + # Either registry key not found etc, or the value in an + # unexpected format. + # proxies already set up to be empty so nothing to do + pass + return proxies + + def getproxies(): + """Return a dictionary of scheme -> proxy server URL mappings. + + Returns settings gathered from the environment, if specified, + or the registry. + + """ + return getproxies_environment() or getproxies_registry() + + def proxy_bypass_registry(host): + try: + import winreg + except ImportError: + # Std modules, so should be around - but you never know! + return False + try: + internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, + r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') + proxyEnable = winreg.QueryValueEx(internetSettings, + 'ProxyEnable')[0] + proxyOverride = str(winreg.QueryValueEx(internetSettings, + 'ProxyOverride')[0]) + # ^^^^ Returned as Unicode but problems if not converted to ASCII + except OSError: + return False + if not proxyEnable or not proxyOverride: + return False + return _proxy_bypass_winreg_override(host, proxyOverride) + + def proxy_bypass(host): + """Return True, if host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or the registry. + + """ + proxies = getproxies_environment() + if proxies: + return proxy_bypass_environment(host, proxies) + else: + return proxy_bypass_registry(host) + +else: + # By default use environment variables + getproxies = getproxies_environment + proxy_bypass = proxy_bypass_environment diff --git a/Python313_13_x64_Template/Lib/urllib/response.py b/Python314_4_x64_Template/Lib/urllib/response.py similarity index 100% rename from Python313_13_x64_Template/Lib/urllib/response.py rename to Python314_4_x64_Template/Lib/urllib/response.py diff --git a/Python314_4_x64_Template/Lib/urllib/robotparser.py b/Python314_4_x64_Template/Lib/urllib/robotparser.py new file mode 100644 index 00000000..4009fd6b --- /dev/null +++ b/Python314_4_x64_Template/Lib/urllib/robotparser.py @@ -0,0 +1,288 @@ +""" robotparser.py + + Copyright (C) 2000 Bastian Kleineidam + + You can choose between two licenses when using this package: + 1) GNU GPLv2 + 2) PSF license for Python 2.2 + + The robots.txt Exclusion Protocol is implemented as specified in + http://www.robotstxt.org/norobots-rfc.txt +""" + +import collections +import re +import urllib.error +import urllib.parse +import urllib.request + +__all__ = ["RobotFileParser"] + +RequestRate = collections.namedtuple("RequestRate", "requests seconds") + + +def normalize(path): + unquoted = urllib.parse.unquote(path, errors='surrogateescape') + return urllib.parse.quote(unquoted, errors='surrogateescape') + +def normalize_path(path): + path, sep, query = path.partition('?') + path = normalize(path) + if sep: + query = re.sub(r'[^=&]+', lambda m: normalize(m[0]), query) + path += '?' + query + return path + + +class RobotFileParser: + """ This class provides a set of methods to read, parse and answer + questions about a single robots.txt file. + + """ + + def __init__(self, url=''): + self.entries = [] + self.sitemaps = [] + self.default_entry = None + self.disallow_all = False + self.allow_all = False + self.set_url(url) + self.last_checked = 0 + + def mtime(self): + """Returns the time the robots.txt file was last fetched. + + This is useful for long-running web spiders that need to + check for new robots.txt files periodically. + + """ + return self.last_checked + + def modified(self): + """Sets the time the robots.txt file was last fetched to the + current time. + + """ + import time + self.last_checked = time.time() + + def set_url(self, url): + """Sets the URL referring to a robots.txt file.""" + self.url = url + self.host, self.path = urllib.parse.urlsplit(url)[1:3] + + def read(self): + """Reads the robots.txt URL and feeds it to the parser.""" + try: + f = urllib.request.urlopen(self.url) + except urllib.error.HTTPError as err: + if err.code in (401, 403): + self.disallow_all = True + elif err.code >= 400 and err.code < 500: + self.allow_all = True + err.close() + else: + raw = f.read() + self.parse(raw.decode("utf-8", "surrogateescape").splitlines()) + + def _add_entry(self, entry): + if "*" in entry.useragents: + # the default entry is considered last + if self.default_entry is None: + # the first default entry wins + self.default_entry = entry + else: + self.entries.append(entry) + + def parse(self, lines): + """Parse the input lines from a robots.txt file. + + We allow that a user-agent: line is not preceded by + one or more blank lines. + """ + # states: + # 0: start state + # 1: saw user-agent line + # 2: saw an allow or disallow line + state = 0 + entry = Entry() + + self.modified() + for line in lines: + if not line: + if state == 1: + entry = Entry() + state = 0 + elif state == 2: + self._add_entry(entry) + entry = Entry() + state = 0 + # remove optional comment and strip line + i = line.find('#') + if i >= 0: + line = line[:i] + line = line.strip() + if not line: + continue + line = line.split(':', 1) + if len(line) == 2: + line[0] = line[0].strip().lower() + line[1] = line[1].strip() + if line[0] == "user-agent": + if state == 2: + self._add_entry(entry) + entry = Entry() + entry.useragents.append(line[1]) + state = 1 + elif line[0] == "disallow": + if state != 0: + entry.rulelines.append(RuleLine(line[1], False)) + state = 2 + elif line[0] == "allow": + if state != 0: + entry.rulelines.append(RuleLine(line[1], True)) + state = 2 + elif line[0] == "crawl-delay": + if state != 0: + # before trying to convert to int we need to make + # sure that robots.txt has valid syntax otherwise + # it will crash + if line[1].strip().isdigit(): + entry.delay = int(line[1]) + state = 2 + elif line[0] == "request-rate": + if state != 0: + numbers = line[1].split('/') + # check if all values are sane + if (len(numbers) == 2 and numbers[0].strip().isdigit() + and numbers[1].strip().isdigit()): + entry.req_rate = RequestRate(int(numbers[0]), int(numbers[1])) + state = 2 + elif line[0] == "sitemap": + # According to http://www.sitemaps.org/protocol.html + # "This directive is independent of the user-agent line, + # so it doesn't matter where you place it in your file." + # Therefore we do not change the state of the parser. + self.sitemaps.append(line[1]) + if state == 2: + self._add_entry(entry) + + def can_fetch(self, useragent, url): + """using the parsed robots.txt decide if useragent can fetch url""" + if self.disallow_all: + return False + if self.allow_all: + return True + # Until the robots.txt file has been read or found not + # to exist, we must assume that no url is allowable. + # This prevents false positives when a user erroneously + # calls can_fetch() before calling read(). + if not self.last_checked: + return False + # search for given user agent matches + # the first match counts + # TODO: The private API is used in order to preserve an empty query. + # This is temporary until the public API starts supporting this feature. + parsed_url = urllib.parse._urlsplit(url, '') + url = urllib.parse._urlunsplit(None, None, *parsed_url[2:]) + url = normalize_path(url) + if not url: + url = "/" + for entry in self.entries: + if entry.applies_to(useragent): + return entry.allowance(url) + # try the default entry last + if self.default_entry: + return self.default_entry.allowance(url) + # agent not found ==> access granted + return True + + def crawl_delay(self, useragent): + if not self.mtime(): + return None + for entry in self.entries: + if entry.applies_to(useragent): + return entry.delay + if self.default_entry: + return self.default_entry.delay + return None + + def request_rate(self, useragent): + if not self.mtime(): + return None + for entry in self.entries: + if entry.applies_to(useragent): + return entry.req_rate + if self.default_entry: + return self.default_entry.req_rate + return None + + def site_maps(self): + if not self.sitemaps: + return None + return self.sitemaps + + def __str__(self): + entries = self.entries + if self.default_entry is not None: + entries = entries + [self.default_entry] + return '\n\n'.join(map(str, entries)) + +class RuleLine: + """A rule line is a single "Allow:" (allowance==True) or "Disallow:" + (allowance==False) followed by a path.""" + def __init__(self, path, allowance): + if path == '' and not allowance: + # an empty value means allow all + allowance = True + self.path = normalize_path(path) + self.allowance = allowance + + def applies_to(self, filename): + return self.path == "*" or filename.startswith(self.path) + + def __str__(self): + return ("Allow" if self.allowance else "Disallow") + ": " + self.path + + +class Entry: + """An entry has one or more user-agents and zero or more rulelines""" + def __init__(self): + self.useragents = [] + self.rulelines = [] + self.delay = None + self.req_rate = None + + def __str__(self): + ret = [] + for agent in self.useragents: + ret.append(f"User-agent: {agent}") + if self.delay is not None: + ret.append(f"Crawl-delay: {self.delay}") + if self.req_rate is not None: + rate = self.req_rate + ret.append(f"Request-rate: {rate.requests}/{rate.seconds}") + ret.extend(map(str, self.rulelines)) + return '\n'.join(ret) + + def applies_to(self, useragent): + """check if this entry applies to the specified agent""" + # split the name token and make it lower case + useragent = useragent.split("/")[0].lower() + for agent in self.useragents: + if agent == '*': + # we have the catch-all agent + return True + agent = agent.lower() + if agent in useragent: + return True + return False + + def allowance(self, filename): + """Preconditions: + - our agent applies to this entry + - filename is URL encoded""" + for line in self.rulelines: + if line.applies_to(filename): + return line.allowance + return True diff --git a/Python314_4_x64_Template/Lib/uuid.py b/Python314_4_x64_Template/Lib/uuid.py new file mode 100644 index 00000000..313f2fc4 --- /dev/null +++ b/Python314_4_x64_Template/Lib/uuid.py @@ -0,0 +1,1007 @@ +r"""UUID objects (universally unique identifiers) according to RFC 4122/9562. + +This module provides immutable UUID objects (class UUID) and functions for +generating UUIDs corresponding to a specific UUID version as specified in +RFC 4122/9562, e.g., uuid1() for UUID version 1, uuid3() for UUID version 3, +and so on. + +Note that UUID version 2 is deliberately omitted as it is outside the scope +of the RFC. + +If all you want is a unique ID, you should probably call uuid1() or uuid4(). +Note that uuid1() may compromise privacy since it creates a UUID containing +the computer's network address. uuid4() creates a random UUID. + +Typical usage: + + >>> import uuid + + # make a UUID based on the host ID and current time + >>> uuid.uuid1() # doctest: +SKIP + UUID('a8098c1a-f86e-11da-bd1a-00112444be1e') + + # make a UUID using an MD5 hash of a namespace UUID and a name + >>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org') + UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e') + + # make a random UUID + >>> uuid.uuid4() # doctest: +SKIP + UUID('16fd2706-8baf-433b-82eb-8c7fada847da') + + # make a UUID using a SHA-1 hash of a namespace UUID and a name + >>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org') + UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d') + + # make a UUID from a string of hex digits (braces and hyphens ignored) + >>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}') + + # convert a UUID to a string of hex digits in standard form + >>> str(x) + '00010203-0405-0607-0809-0a0b0c0d0e0f' + + # get the raw 16 bytes of the UUID + >>> x.bytes + b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f' + + # make a UUID from a 16-byte string + >>> uuid.UUID(bytes=x.bytes) + UUID('00010203-0405-0607-0809-0a0b0c0d0e0f') + + # get the Nil UUID + >>> uuid.NIL + UUID('00000000-0000-0000-0000-000000000000') + + # get the Max UUID + >>> uuid.MAX + UUID('ffffffff-ffff-ffff-ffff-ffffffffffff') +""" + +import os +import sys +import time + +from enum import Enum, _simple_enum + + +__author__ = 'Ka-Ping Yee ' + +# The recognized platforms - known behaviors +if sys.platform in {'win32', 'darwin', 'emscripten', 'wasi'}: + _AIX = _LINUX = False +elif sys.platform == 'linux': + _LINUX = True + _AIX = False +else: + import platform + _platform_system = platform.system() + _AIX = _platform_system == 'AIX' + _LINUX = _platform_system in ('Linux', 'Android') + +_MAC_DELIM = b':' +_MAC_OMITS_LEADING_ZEROES = False +if _AIX: + _MAC_DELIM = b'.' + _MAC_OMITS_LEADING_ZEROES = True + +RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [ + 'reserved for NCS compatibility', 'specified in RFC 4122', + 'reserved for Microsoft compatibility', 'reserved for future definition'] + +int_ = int # The built-in int type +bytes_ = bytes # The built-in bytes type + + +@_simple_enum(Enum) +class SafeUUID: + safe = 0 + unsafe = -1 + unknown = None + + +_UINT_128_MAX = (1 << 128) - 1 +# 128-bit mask to clear the variant and version bits of a UUID integral value +_RFC_4122_CLEARFLAGS_MASK = ~((0xf000 << 64) | (0xc000 << 48)) +# RFC 4122 variant bits and version bits to activate on a UUID integral value. +_RFC_4122_VERSION_1_FLAGS = ((1 << 76) | (0x8000 << 48)) +_RFC_4122_VERSION_3_FLAGS = ((3 << 76) | (0x8000 << 48)) +_RFC_4122_VERSION_4_FLAGS = ((4 << 76) | (0x8000 << 48)) +_RFC_4122_VERSION_5_FLAGS = ((5 << 76) | (0x8000 << 48)) +_RFC_4122_VERSION_6_FLAGS = ((6 << 76) | (0x8000 << 48)) +_RFC_4122_VERSION_7_FLAGS = ((7 << 76) | (0x8000 << 48)) +_RFC_4122_VERSION_8_FLAGS = ((8 << 76) | (0x8000 << 48)) + + +class UUID: + """Instances of the UUID class represent UUIDs as specified in RFC 4122. + UUID objects are immutable, hashable, and usable as dictionary keys. + Converting a UUID to a string with str() yields something in the form + '12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts + five possible forms: a similar string of hexadecimal digits, or a tuple + of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and + 48-bit values respectively) as an argument named 'fields', or a string + of 16 bytes (with all the integer fields in big-endian order) as an + argument named 'bytes', or a string of 16 bytes (with the first three + fields in little-endian order) as an argument named 'bytes_le', or a + single 128-bit integer as an argument named 'int'. + + UUIDs have these read-only attributes: + + bytes the UUID as a 16-byte string (containing the six + integer fields in big-endian byte order) + + bytes_le the UUID as a 16-byte string (with time_low, time_mid, + and time_hi_version in little-endian byte order) + + fields a tuple of the six integer fields of the UUID, + which are also available as six individual attributes + and two derived attributes. Those attributes are not + always relevant to all UUID versions: + + The 'time_*' attributes are only relevant to version 1. + + The 'clock_seq*' and 'node' attributes are only relevant + to versions 1 and 6. + + The 'time' attribute is only relevant to versions 1, 6 + and 7. + + time_low the first 32 bits of the UUID + time_mid the next 16 bits of the UUID + time_hi_version the next 16 bits of the UUID + clock_seq_hi_variant the next 8 bits of the UUID + clock_seq_low the next 8 bits of the UUID + node the last 48 bits of the UUID + + time the 60-bit timestamp for UUIDv1/v6, + or the 48-bit timestamp for UUIDv7 + clock_seq the 14-bit sequence number + + hex the UUID as a 32-character hexadecimal string + + int the UUID as a 128-bit integer + + urn the UUID as a URN as specified in RFC 4122/9562 + + variant the UUID variant (one of the constants RESERVED_NCS, + RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE) + + version the UUID version number (1 through 8, meaningful only + when the variant is RFC_4122) + + is_safe An enum indicating whether the UUID has been generated in + a way that is safe for multiprocessing applications, via + uuid_generate_time_safe(3). + """ + + __slots__ = ('int', 'is_safe', '__weakref__') + + def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None, + int=None, version=None, + *, is_safe=SafeUUID.unknown): + r"""Create a UUID from either a string of 32 hexadecimal digits, + a string of 16 bytes as the 'bytes' argument, a string of 16 bytes + in little-endian order as the 'bytes_le' argument, a tuple of six + integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version, + 8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as + the 'fields' argument, or a single 128-bit integer as the 'int' + argument. When a string of hex digits is given, curly braces, + hyphens, and a URN prefix are all optional. For example, these + expressions all yield the same UUID: + + UUID('{12345678-1234-5678-1234-567812345678}') + UUID('12345678123456781234567812345678') + UUID('urn:uuid:12345678-1234-5678-1234-567812345678') + UUID(bytes='\x12\x34\x56\x78'*4) + UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' + + '\x12\x34\x56\x78\x12\x34\x56\x78') + UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678)) + UUID(int=0x12345678123456781234567812345678) + + Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must + be given. The 'version' argument is optional; if given, the resulting + UUID will have its variant and version set according to RFC 4122, + overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'. + + is_safe is an enum exposed as an attribute on the instance. It + indicates whether the UUID has been generated in a way that is safe + for multiprocessing applications, via uuid_generate_time_safe(3). + """ + + if [hex, bytes, bytes_le, fields, int].count(None) != 4: + raise TypeError('one of the hex, bytes, bytes_le, fields, ' + 'or int arguments must be given') + if int is not None: + pass + elif hex is not None: + hex = hex.replace('urn:', '').replace('uuid:', '') + hex = hex.strip('{}').replace('-', '') + if len(hex) != 32: + raise ValueError('badly formed hexadecimal UUID string') + int = int_(hex, 16) + elif bytes_le is not None: + if len(bytes_le) != 16: + raise ValueError('bytes_le is not a 16-char string') + assert isinstance(bytes_le, bytes_), repr(bytes_le) + bytes = (bytes_le[4-1::-1] + bytes_le[6-1:4-1:-1] + + bytes_le[8-1:6-1:-1] + bytes_le[8:]) + int = int_.from_bytes(bytes) # big endian + elif bytes is not None: + if len(bytes) != 16: + raise ValueError('bytes is not a 16-char string') + assert isinstance(bytes, bytes_), repr(bytes) + int = int_.from_bytes(bytes) # big endian + elif fields is not None: + if len(fields) != 6: + raise ValueError('fields is not a 6-tuple') + (time_low, time_mid, time_hi_version, + clock_seq_hi_variant, clock_seq_low, node) = fields + if not 0 <= time_low < (1 << 32): + raise ValueError('field 1 out of range (need a 32-bit value)') + if not 0 <= time_mid < (1 << 16): + raise ValueError('field 2 out of range (need a 16-bit value)') + if not 0 <= time_hi_version < (1 << 16): + raise ValueError('field 3 out of range (need a 16-bit value)') + if not 0 <= clock_seq_hi_variant < (1 << 8): + raise ValueError('field 4 out of range (need an 8-bit value)') + if not 0 <= clock_seq_low < (1 << 8): + raise ValueError('field 5 out of range (need an 8-bit value)') + if not 0 <= node < (1 << 48): + raise ValueError('field 6 out of range (need a 48-bit value)') + clock_seq = (clock_seq_hi_variant << 8) | clock_seq_low + int = ((time_low << 96) | (time_mid << 80) | + (time_hi_version << 64) | (clock_seq << 48) | node) + if not 0 <= int <= _UINT_128_MAX: + raise ValueError('int is out of range (need a 128-bit value)') + if version is not None: + if not 1 <= version <= 8: + raise ValueError('illegal version number') + # clear the variant and the version number bits + int &= _RFC_4122_CLEARFLAGS_MASK + # Set the variant to RFC 4122/9562. + int |= 0x8000_0000_0000_0000 # (0x8000 << 48) + # Set the version number. + int |= version << 76 + object.__setattr__(self, 'int', int) + object.__setattr__(self, 'is_safe', is_safe) + + @classmethod + def _from_int(cls, value): + """Create a UUID from an integer *value*. Internal use only.""" + assert 0 <= value <= _UINT_128_MAX, repr(value) + self = object.__new__(cls) + object.__setattr__(self, 'int', value) + object.__setattr__(self, 'is_safe', SafeUUID.unknown) + return self + + def __getstate__(self): + d = {'int': self.int} + if self.is_safe != SafeUUID.unknown: + # is_safe is a SafeUUID instance. Return just its value, so that + # it can be un-pickled in older Python versions without SafeUUID. + d['is_safe'] = self.is_safe.value + return d + + def __setstate__(self, state): + object.__setattr__(self, 'int', state['int']) + # is_safe was added in 3.7; it is also omitted when it is "unknown" + object.__setattr__(self, 'is_safe', + SafeUUID(state['is_safe']) + if 'is_safe' in state else SafeUUID.unknown) + + def __eq__(self, other): + if isinstance(other, UUID): + return self.int == other.int + return NotImplemented + + # Q. What's the value of being able to sort UUIDs? + # A. Use them as keys in a B-Tree or similar mapping. + + def __lt__(self, other): + if isinstance(other, UUID): + return self.int < other.int + return NotImplemented + + def __gt__(self, other): + if isinstance(other, UUID): + return self.int > other.int + return NotImplemented + + def __le__(self, other): + if isinstance(other, UUID): + return self.int <= other.int + return NotImplemented + + def __ge__(self, other): + if isinstance(other, UUID): + return self.int >= other.int + return NotImplemented + + def __hash__(self): + return hash(self.int) + + def __int__(self): + return self.int + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, str(self)) + + def __setattr__(self, name, value): + raise TypeError('UUID objects are immutable') + + def __str__(self): + x = self.hex + return f'{x[:8]}-{x[8:12]}-{x[12:16]}-{x[16:20]}-{x[20:]}' + + @property + def bytes(self): + return self.int.to_bytes(16) # big endian + + @property + def bytes_le(self): + bytes = self.bytes + return (bytes[4-1::-1] + bytes[6-1:4-1:-1] + bytes[8-1:6-1:-1] + + bytes[8:]) + + @property + def fields(self): + return (self.time_low, self.time_mid, self.time_hi_version, + self.clock_seq_hi_variant, self.clock_seq_low, self.node) + + @property + def time_low(self): + return self.int >> 96 + + @property + def time_mid(self): + return (self.int >> 80) & 0xffff + + @property + def time_hi_version(self): + return (self.int >> 64) & 0xffff + + @property + def clock_seq_hi_variant(self): + return (self.int >> 56) & 0xff + + @property + def clock_seq_low(self): + return (self.int >> 48) & 0xff + + @property + def time(self): + if self.version == 6: + # time_hi (32) | time_mid (16) | ver (4) | time_lo (12) | ... (64) + time_hi = self.int >> 96 + time_lo = (self.int >> 64) & 0x0fff + return time_hi << 28 | (self.time_mid << 12) | time_lo + elif self.version == 7: + # unix_ts_ms (48) | ... (80) + return self.int >> 80 + else: + # time_lo (32) | time_mid (16) | ver (4) | time_hi (12) | ... (64) + # + # For compatibility purposes, we do not warn or raise when the + # version is not 1 (timestamp is irrelevant to other versions). + time_hi = (self.int >> 64) & 0x0fff + time_lo = self.int >> 96 + return time_hi << 48 | (self.time_mid << 32) | time_lo + + @property + def clock_seq(self): + return (((self.clock_seq_hi_variant & 0x3f) << 8) | + self.clock_seq_low) + + @property + def node(self): + return self.int & 0xffffffffffff + + @property + def hex(self): + return self.bytes.hex() + + @property + def urn(self): + return 'urn:uuid:' + str(self) + + @property + def variant(self): + if not self.int & (0x8000 << 48): + return RESERVED_NCS + elif not self.int & (0x4000 << 48): + return RFC_4122 + elif not self.int & (0x2000 << 48): + return RESERVED_MICROSOFT + else: + return RESERVED_FUTURE + + @property + def version(self): + # The version bits are only meaningful for RFC 4122/9562 UUIDs. + if self.variant == RFC_4122: + return int((self.int >> 76) & 0xf) + + +def _get_command_stdout(command, *args): + import io, os, shutil, subprocess + + try: + path_dirs = os.environ.get('PATH', os.defpath).split(os.pathsep) + path_dirs.extend(['/sbin', '/usr/sbin']) + executable = shutil.which(command, path=os.pathsep.join(path_dirs)) + if executable is None: + return None + # LC_ALL=C to ensure English output, stderr=DEVNULL to prevent output + # on stderr (Note: we don't have an example where the words we search + # for are actually localized, but in theory some system could do so.) + env = dict(os.environ) + env['LC_ALL'] = 'C' + # Empty strings will be quoted by popen so we should just omit it + if args != ('',): + command = (executable, *args) + else: + command = (executable,) + proc = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + env=env) + if not proc: + return None + stdout, stderr = proc.communicate() + return io.BytesIO(stdout) + except (OSError, subprocess.SubprocessError): + return None + + +# For MAC (a.k.a. IEEE 802, or EUI-48) addresses, the second least significant +# bit of the first octet signifies whether the MAC address is universally (0) +# or locally (1) administered. Network cards from hardware manufacturers will +# always be universally administered to guarantee global uniqueness of the MAC +# address, but any particular machine may have other interfaces which are +# locally administered. An example of the latter is the bridge interface to +# the Touch Bar on MacBook Pros. +# +# This bit works out to be the 42nd bit counting from 1 being the least +# significant, or 1<<41. We'll prefer universally administered MAC addresses +# over locally administered ones since the former are globally unique, but +# we'll return the first of the latter found if that's all the machine has. +# +# See https://en.wikipedia.org/wiki/MAC_address#Universal_vs._local_(U/L_bit) + +def _is_universal(mac): + return not (mac & (1 << 41)) + + +def _find_mac_near_keyword(command, args, keywords, get_word_index): + """Searches a command's output for a MAC address near a keyword. + + Each line of words in the output is case-insensitively searched for + any of the given keywords. Upon a match, get_word_index is invoked + to pick a word from the line, given the index of the match. For + example, lambda i: 0 would get the first word on the line, while + lambda i: i - 1 would get the word preceding the keyword. + """ + stdout = _get_command_stdout(command, args) + if stdout is None: + return None + + first_local_mac = None + for line in stdout: + words = line.lower().rstrip().split() + for i in range(len(words)): + if words[i] in keywords: + try: + word = words[get_word_index(i)] + mac = int(word.replace(_MAC_DELIM, b''), 16) + except (ValueError, IndexError): + # Virtual interfaces, such as those provided by + # VPNs, do not have a colon-delimited MAC address + # as expected, but a 16-byte HWAddr separated by + # dashes. These should be ignored in favor of a + # real MAC address + pass + else: + if _is_universal(mac): + return mac + first_local_mac = first_local_mac or mac + return first_local_mac or None + + +def _parse_mac(word): + # Accept 'HH:HH:HH:HH:HH:HH' MAC address (ex: '52:54:00:9d:0e:67'), + # but reject IPv6 address (ex: 'fe80::5054:ff:fe9' or '123:2:3:4:5:6:7:8'). + # + # Virtual interfaces, such as those provided by VPNs, do not have a + # colon-delimited MAC address as expected, but a 16-byte HWAddr separated + # by dashes. These should be ignored in favor of a real MAC address + parts = word.split(_MAC_DELIM) + if len(parts) != 6: + return + if _MAC_OMITS_LEADING_ZEROES: + # (Only) on AIX the macaddr value given is not prefixed by 0, e.g. + # en0 1500 link#2 fa.bc.de.f7.62.4 110854824 0 160133733 0 0 + # not + # en0 1500 link#2 fa.bc.de.f7.62.04 110854824 0 160133733 0 0 + if not all(1 <= len(part) <= 2 for part in parts): + return + hexstr = b''.join(part.rjust(2, b'0') for part in parts) + else: + if not all(len(part) == 2 for part in parts): + return + hexstr = b''.join(parts) + try: + return int(hexstr, 16) + except ValueError: + return + + +def _find_mac_under_heading(command, args, heading): + """Looks for a MAC address under a heading in a command's output. + + The first line of words in the output is searched for the given + heading. Words at the same word index as the heading in subsequent + lines are then examined to see if they look like MAC addresses. + """ + stdout = _get_command_stdout(command, args) + if stdout is None: + return None + + keywords = stdout.readline().rstrip().split() + try: + column_index = keywords.index(heading) + except ValueError: + return None + + first_local_mac = None + for line in stdout: + words = line.rstrip().split() + try: + word = words[column_index] + except IndexError: + continue + + mac = _parse_mac(word) + if mac is None: + continue + if _is_universal(mac): + return mac + if first_local_mac is None: + first_local_mac = mac + + return first_local_mac + + +# The following functions call external programs to 'get' a macaddr value to +# be used as basis for an uuid +def _ifconfig_getnode(): + """Get the hardware address on Unix by running ifconfig.""" + # This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes. + keywords = (b'hwaddr', b'ether', b'address:', b'lladdr') + for args in ('', '-a', '-av'): + mac = _find_mac_near_keyword('ifconfig', args, keywords, lambda i: i+1) + if mac: + return mac + return None + +def _ip_getnode(): + """Get the hardware address on Unix by running ip.""" + # This works on Linux with iproute2. + mac = _find_mac_near_keyword('ip', 'link', [b'link/ether'], lambda i: i+1) + if mac: + return mac + return None + +def _arp_getnode(): + """Get the hardware address on Unix by running arp.""" + import os, socket + if not hasattr(socket, "gethostbyname"): + return None + try: + ip_addr = socket.gethostbyname(socket.gethostname()) + except OSError: + return None + + # Try getting the MAC addr from arp based on our IP address (Solaris). + mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: -1) + if mac: + return mac + + # This works on OpenBSD + mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: i+1) + if mac: + return mac + + # This works on Linux, FreeBSD and NetBSD + mac = _find_mac_near_keyword('arp', '-an', [os.fsencode('(%s)' % ip_addr)], + lambda i: i+2) + # Return None instead of 0. + if mac: + return mac + return None + +def _lanscan_getnode(): + """Get the hardware address on Unix by running lanscan.""" + # This might work on HP-UX. + return _find_mac_near_keyword('lanscan', '-ai', [b'lan0'], lambda i: 0) + +def _netstat_getnode(): + """Get the hardware address on Unix by running netstat.""" + # This works on AIX and might work on Tru64 UNIX. + return _find_mac_under_heading('netstat', '-ian', b'Address') + + +# Import optional C extension at toplevel, to help disabling it when testing +try: + import _uuid + _generate_time_safe = getattr(_uuid, "generate_time_safe", None) + _has_stable_extractable_node = _uuid.has_stable_extractable_node + _UuidCreate = getattr(_uuid, "UuidCreate", None) +except ImportError: + _uuid = None + _generate_time_safe = None + _has_stable_extractable_node = False + _UuidCreate = None + + +def _unix_getnode(): + """Get the hardware address on Unix using the _uuid extension module.""" + if _generate_time_safe and _has_stable_extractable_node: + uuid_time, _ = _generate_time_safe() + return UUID(bytes=uuid_time).node + +def _windll_getnode(): + """Get the hardware address on Windows using the _uuid extension module.""" + if _UuidCreate and _has_stable_extractable_node: + uuid_bytes = _UuidCreate() + return UUID(bytes_le=uuid_bytes).node + +def _random_getnode(): + """Get a random node ID.""" + # RFC 9562, §6.10-3 says that + # + # Implementations MAY elect to obtain a 48-bit cryptographic-quality + # random number as per Section 6.9 to use as the Node ID. [...] [and] + # implementations MUST set the least significant bit of the first octet + # of the Node ID to 1. This bit is the unicast or multicast bit, which + # will never be set in IEEE 802 addresses obtained from network cards. + # + # The "multicast bit" of a MAC address is defined to be "the least + # significant bit of the first octet". This works out to be the 41st bit + # counting from 1 being the least significant bit, or 1<<40. + # + # See https://en.wikipedia.org/w/index.php?title=MAC_address&oldid=1128764812#Universal_vs._local_(U/L_bit) + return int.from_bytes(os.urandom(6)) | (1 << 40) + + +# _OS_GETTERS, when known, are targeted for a specific OS or platform. +# The order is by 'common practice' on the specified platform. +# Note: 'posix' and 'windows' _OS_GETTERS are prefixed by a dll/dlload() method +# which, when successful, means none of these "external" methods are called. +# _GETTERS is (also) used by test_uuid.py to SkipUnless(), e.g., +# @unittest.skipUnless(_uuid._ifconfig_getnode in _uuid._GETTERS, ...) +if _LINUX: + _OS_GETTERS = [_ip_getnode, _ifconfig_getnode] +elif sys.platform == 'darwin': + _OS_GETTERS = [_ifconfig_getnode, _arp_getnode, _netstat_getnode] +elif sys.platform == 'win32': + # bpo-40201: _windll_getnode will always succeed, so these are not needed + _OS_GETTERS = [] +elif _AIX: + _OS_GETTERS = [_netstat_getnode] +else: + _OS_GETTERS = [_ifconfig_getnode, _ip_getnode, _arp_getnode, + _netstat_getnode, _lanscan_getnode] +if os.name == 'posix': + _GETTERS = [_unix_getnode] + _OS_GETTERS +elif os.name == 'nt': + _GETTERS = [_windll_getnode] + _OS_GETTERS +else: + _GETTERS = _OS_GETTERS + +_node = None + +def getnode(): + """Get the hardware address as a 48-bit positive integer. + + The first time this runs, it may launch a separate program, which could + be quite slow. If all attempts to obtain the hardware address fail, we + choose a random 48-bit number with its eighth bit set to 1 as recommended + in RFC 4122. + """ + global _node + if _node is not None: + return _node + + for getter in _GETTERS + [_random_getnode]: + try: + _node = getter() + except: + continue + if (_node is not None) and (0 <= _node < (1 << 48)): + return _node + assert False, '_random_getnode() returned invalid value: {}'.format(_node) + + +_last_timestamp = None + +def uuid1(node=None, clock_seq=None): + """Generate a UUID from a host ID, sequence number, and the current time. + If 'node' is not given, getnode() is used to obtain the hardware + address. If 'clock_seq' is given, it is used as the sequence number; + otherwise a random 14-bit sequence number is chosen.""" + + # When the system provides a version-1 UUID generator, use it (but don't + # use UuidCreate here because its UUIDs don't conform to RFC 4122). + if _generate_time_safe is not None and node is clock_seq is None: + uuid_time, safely_generated = _generate_time_safe() + try: + is_safe = SafeUUID(safely_generated) + except ValueError: + is_safe = SafeUUID.unknown + return UUID(bytes=uuid_time, is_safe=is_safe) + + global _last_timestamp + nanoseconds = time.time_ns() + # 0x01b21dd213814000 is the number of 100-ns intervals between the + # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00. + timestamp = nanoseconds // 100 + 0x01b21dd213814000 + if _last_timestamp is not None and timestamp <= _last_timestamp: + timestamp = _last_timestamp + 1 + _last_timestamp = timestamp + if clock_seq is None: + import random + clock_seq = random.getrandbits(14) # instead of stable storage + time_low = timestamp & 0xffffffff + time_mid = (timestamp >> 32) & 0xffff + time_hi_version = (timestamp >> 48) & 0x0fff + clock_seq_low = clock_seq & 0xff + clock_seq_hi_variant = (clock_seq >> 8) & 0x3f + if node is None: + node = getnode() + return UUID(fields=(time_low, time_mid, time_hi_version, + clock_seq_hi_variant, clock_seq_low, node), version=1) + +def uuid3(namespace, name): + """Generate a UUID from the MD5 hash of a namespace UUID and a name.""" + if isinstance(name, str): + name = bytes(name, "utf-8") + import hashlib + h = hashlib.md5(namespace.bytes + name, usedforsecurity=False) + int_uuid_3 = int.from_bytes(h.digest()) + int_uuid_3 &= _RFC_4122_CLEARFLAGS_MASK + int_uuid_3 |= _RFC_4122_VERSION_3_FLAGS + return UUID._from_int(int_uuid_3) + +def uuid4(): + """Generate a random UUID.""" + int_uuid_4 = int.from_bytes(os.urandom(16)) + int_uuid_4 &= _RFC_4122_CLEARFLAGS_MASK + int_uuid_4 |= _RFC_4122_VERSION_4_FLAGS + return UUID._from_int(int_uuid_4) + +def uuid5(namespace, name): + """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" + if isinstance(name, str): + name = bytes(name, "utf-8") + import hashlib + h = hashlib.sha1(namespace.bytes + name, usedforsecurity=False) + int_uuid_5 = int.from_bytes(h.digest()[:16]) + int_uuid_5 &= _RFC_4122_CLEARFLAGS_MASK + int_uuid_5 |= _RFC_4122_VERSION_5_FLAGS + return UUID._from_int(int_uuid_5) + + +_last_timestamp_v6 = None + +def uuid6(node=None, clock_seq=None): + """Similar to :func:`uuid1` but where fields are ordered differently + for improved DB locality. + + More precisely, given a 60-bit timestamp value as specified for UUIDv1, + for UUIDv6 the first 48 most significant bits are stored first, followed + by the 4-bit version (same position), followed by the remaining 12 bits + of the original 60-bit timestamp. + """ + global _last_timestamp_v6 + import time + nanoseconds = time.time_ns() + # 0x01b21dd213814000 is the number of 100-ns intervals between the + # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00. + timestamp = nanoseconds // 100 + 0x01b21dd213814000 + if _last_timestamp_v6 is not None and timestamp <= _last_timestamp_v6: + timestamp = _last_timestamp_v6 + 1 + _last_timestamp_v6 = timestamp + if clock_seq is None: + import random + clock_seq = random.getrandbits(14) # instead of stable storage + time_hi_and_mid = (timestamp >> 12) & 0xffff_ffff_ffff + time_lo = timestamp & 0x0fff # keep 12 bits and clear version bits + clock_s = clock_seq & 0x3fff # keep 14 bits and clear variant bits + if node is None: + node = getnode() + # --- 32 + 16 --- -- 4 -- -- 12 -- -- 2 -- -- 14 --- 48 + # time_hi_and_mid | version | time_lo | variant | clock_seq | node + int_uuid_6 = time_hi_and_mid << 80 + int_uuid_6 |= time_lo << 64 + int_uuid_6 |= clock_s << 48 + int_uuid_6 |= node & 0xffff_ffff_ffff + # by construction, the variant and version bits are already cleared + int_uuid_6 |= _RFC_4122_VERSION_6_FLAGS + return UUID._from_int(int_uuid_6) + + +_last_timestamp_v7 = None +_last_counter_v7 = 0 # 42-bit counter + +def _uuid7_get_counter_and_tail(): + rand = int.from_bytes(os.urandom(10)) + # 42-bit counter with MSB set to 0 + counter = (rand >> 32) & 0x1ff_ffff_ffff + # 32-bit random data + tail = rand & 0xffff_ffff + return counter, tail + + +def uuid7(): + """Generate a UUID from a Unix timestamp in milliseconds and random bits. + + UUIDv7 objects feature monotonicity within a millisecond. + """ + # --- 48 --- -- 4 -- --- 12 --- -- 2 -- --- 30 --- - 32 - + # unix_ts_ms | version | counter_hi | variant | counter_lo | random + # + # 'counter = counter_hi | counter_lo' is a 42-bit counter constructed + # with Method 1 of RFC 9562, §6.2, and its MSB is set to 0. + # + # 'random' is a 32-bit random value regenerated for every new UUID. + # + # If multiple UUIDs are generated within the same millisecond, the LSB + # of 'counter' is incremented by 1. When overflowing, the timestamp is + # advanced and the counter is reset to a random 42-bit integer with MSB + # set to 0. + + global _last_timestamp_v7 + global _last_counter_v7 + + nanoseconds = time.time_ns() + timestamp_ms = nanoseconds // 1_000_000 + + if _last_timestamp_v7 is None or timestamp_ms > _last_timestamp_v7: + counter, tail = _uuid7_get_counter_and_tail() + else: + if timestamp_ms < _last_timestamp_v7: + timestamp_ms = _last_timestamp_v7 + 1 + # advance the 42-bit counter + counter = _last_counter_v7 + 1 + if counter > 0x3ff_ffff_ffff: + # advance the 48-bit timestamp + timestamp_ms += 1 + counter, tail = _uuid7_get_counter_and_tail() + else: + # 32-bit random data + tail = int.from_bytes(os.urandom(4)) + + unix_ts_ms = timestamp_ms & 0xffff_ffff_ffff + counter_msbs = counter >> 30 + # keep 12 counter's MSBs and clear variant bits + counter_hi = counter_msbs & 0x0fff + # keep 30 counter's LSBs and clear version bits + counter_lo = counter & 0x3fff_ffff + # ensure that the tail is always a 32-bit integer (by construction, + # it is already the case, but future interfaces may allow the user + # to specify the random tail) + tail &= 0xffff_ffff + + int_uuid_7 = unix_ts_ms << 80 + int_uuid_7 |= counter_hi << 64 + int_uuid_7 |= counter_lo << 32 + int_uuid_7 |= tail + # by construction, the variant and version bits are already cleared + int_uuid_7 |= _RFC_4122_VERSION_7_FLAGS + res = UUID._from_int(int_uuid_7) + + # defer global update until all computations are done + _last_timestamp_v7 = timestamp_ms + _last_counter_v7 = counter + return res + + +def uuid8(a=None, b=None, c=None): + """Generate a UUID from three custom blocks. + + * 'a' is the first 48-bit chunk of the UUID (octets 0-5); + * 'b' is the mid 12-bit chunk (octets 6-7); + * 'c' is the last 62-bit chunk (octets 8-15). + + When a value is not specified, a pseudo-random value is generated. + """ + if a is None: + import random + a = random.getrandbits(48) + if b is None: + import random + b = random.getrandbits(12) + if c is None: + import random + c = random.getrandbits(62) + int_uuid_8 = (a & 0xffff_ffff_ffff) << 80 + int_uuid_8 |= (b & 0xfff) << 64 + int_uuid_8 |= c & 0x3fff_ffff_ffff_ffff + # by construction, the variant and version bits are already cleared + int_uuid_8 |= _RFC_4122_VERSION_8_FLAGS + return UUID._from_int(int_uuid_8) + + +def main(): + """Run the uuid command line interface.""" + uuid_funcs = { + "uuid1": uuid1, + "uuid3": uuid3, + "uuid4": uuid4, + "uuid5": uuid5, + "uuid6": uuid6, + "uuid7": uuid7, + "uuid8": uuid8, + } + uuid_namespace_funcs = ("uuid3", "uuid5") + namespaces = { + "@dns": NAMESPACE_DNS, + "@url": NAMESPACE_URL, + "@oid": NAMESPACE_OID, + "@x500": NAMESPACE_X500 + } + + import argparse + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Generate a UUID using the selected UUID function.", + color=True, + ) + parser.add_argument("-u", "--uuid", + choices=uuid_funcs.keys(), + default="uuid4", + help="function to generate the UUID") + parser.add_argument("-n", "--namespace", + choices=["any UUID", *namespaces.keys()], + help="uuid3/uuid5 only: " + "a UUID, or a well-known predefined UUID addressed " + "by namespace name") + parser.add_argument("-N", "--name", + help="uuid3/uuid5 only: " + "name used as part of generating the UUID") + parser.add_argument("-C", "--count", metavar="NUM", type=int, default=1, + help="generate NUM fresh UUIDs") + + args = parser.parse_args() + uuid_func = uuid_funcs[args.uuid] + namespace = args.namespace + name = args.name + + if args.uuid in uuid_namespace_funcs: + if not namespace or not name: + parser.error( + "Incorrect number of arguments. " + f"{args.uuid} requires a namespace and a name. " + "Run 'python -m uuid -h' for more information." + ) + namespace = namespaces[namespace] if namespace in namespaces else UUID(namespace) + for _ in range(args.count): + print(uuid_func(namespace, name)) + else: + for _ in range(args.count): + print(uuid_func()) + + +# The following standard UUIDs are for use with uuid3() or uuid5(). + +NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8') +NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8') +NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8') +NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8') + +# RFC 9562 Sections 5.9 and 5.10 define the special Nil and Max UUID formats. + +NIL = UUID('00000000-0000-0000-0000-000000000000') +MAX = UUID('ffffffff-ffff-ffff-ffff-ffffffffffff') + +if __name__ == "__main__": + main() diff --git a/Python314_4_x64_Template/Lib/venv/__init__.py b/Python314_4_x64_Template/Lib/venv/__init__.py new file mode 100644 index 00000000..88f3340a --- /dev/null +++ b/Python314_4_x64_Template/Lib/venv/__init__.py @@ -0,0 +1,700 @@ +""" +Virtual environment (venv) package for Python. Based on PEP 405. + +Copyright (C) 2011-2014 Vinay Sajip. +Licensed to the PSF under a contributor agreement. +""" +import logging +import os +import shutil +import subprocess +import sys +import sysconfig +import types +import shlex + + +CORE_VENV_DEPS = ('pip',) +logger = logging.getLogger(__name__) + + +class EnvBuilder: + """ + This class exists to allow virtual environment creation to be + customized. The constructor parameters determine the builder's + behaviour when called upon to create a virtual environment. + + By default, the builder makes the system (global) site-packages dir + *un*available to the created environment. + + If invoked using the Python -m option, the default is to use copying + on Windows platforms but symlinks elsewhere. If instantiated some + other way, the default is to *not* use symlinks. + + :param system_site_packages: If True, the system (global) site-packages + dir is available to created environments. + :param clear: If True, delete the contents of the environment directory if + it already exists, before environment creation. + :param symlinks: If True, attempt to symlink rather than copy files into + virtual environment. + :param upgrade: If True, upgrade an existing virtual environment. + :param with_pip: If True, ensure pip is installed in the virtual + environment + :param prompt: Alternative terminal prefix for the environment. + :param upgrade_deps: Update the base venv modules to the latest on PyPI + :param scm_ignore_files: Create ignore files for the SCMs specified by the + iterable. + """ + + def __init__(self, system_site_packages=False, clear=False, + symlinks=False, upgrade=False, with_pip=False, prompt=None, + upgrade_deps=False, *, scm_ignore_files=frozenset()): + self.system_site_packages = system_site_packages + self.clear = clear + self.symlinks = symlinks + self.upgrade = upgrade + self.with_pip = with_pip + self.orig_prompt = prompt + if prompt == '.': # see bpo-38901 + prompt = os.path.basename(os.getcwd()) + self.prompt = prompt + self.upgrade_deps = upgrade_deps + self.scm_ignore_files = frozenset(map(str.lower, scm_ignore_files)) + + def create(self, env_dir): + """ + Create a virtual environment in a directory. + + :param env_dir: The target directory to create an environment in. + + """ + env_dir = os.path.abspath(env_dir) + context = self.ensure_directories(env_dir) + for scm in self.scm_ignore_files: + getattr(self, f"create_{scm}_ignore_file")(context) + # See issue 24875. We need system_site_packages to be False + # until after pip is installed. + true_system_site_packages = self.system_site_packages + self.system_site_packages = False + self.create_configuration(context) + self.setup_python(context) + if self.with_pip: + self._setup_pip(context) + if not self.upgrade: + self.setup_scripts(context) + self.post_setup(context) + if true_system_site_packages: + # We had set it to False before, now + # restore it and rewrite the configuration + self.system_site_packages = True + self.create_configuration(context) + if self.upgrade_deps: + self.upgrade_dependencies(context) + + def clear_directory(self, path): + for fn in os.listdir(path): + fn = os.path.join(path, fn) + if os.path.islink(fn) or os.path.isfile(fn): + os.remove(fn) + elif os.path.isdir(fn): + shutil.rmtree(fn) + + def _venv_path(self, env_dir, name): + vars = { + 'base': env_dir, + 'platbase': env_dir, + } + return sysconfig.get_path(name, scheme='venv', vars=vars) + + @classmethod + def _same_path(cls, path1, path2): + """Check whether two paths appear the same. + + Whether they refer to the same file is irrelevant; we're testing for + whether a human reader would look at the path string and easily tell + that they're the same file. + """ + if sys.platform == 'win32': + if os.path.normcase(path1) == os.path.normcase(path2): + return True + # gh-90329: Don't display a warning for short/long names + import _winapi + try: + path1 = _winapi.GetLongPathName(os.fsdecode(path1)) + except OSError: + pass + try: + path2 = _winapi.GetLongPathName(os.fsdecode(path2)) + except OSError: + pass + if os.path.normcase(path1) == os.path.normcase(path2): + return True + return False + else: + return path1 == path2 + + def ensure_directories(self, env_dir): + """ + Create the directories for the environment. + + Returns a context object which holds paths in the environment, + for use by subsequent logic. + """ + + def create_if_needed(d): + if not os.path.exists(d): + os.makedirs(d) + elif os.path.islink(d) or os.path.isfile(d): + raise ValueError('Unable to create directory %r' % d) + + if os.pathsep in os.fspath(env_dir): + raise ValueError(f'Refusing to create a venv in {env_dir} because ' + f'it contains the PATH separator {os.pathsep}.') + if os.path.exists(env_dir) and self.clear: + self.clear_directory(env_dir) + context = types.SimpleNamespace() + context.env_dir = env_dir + context.env_name = os.path.split(env_dir)[1] + context.prompt = self.prompt if self.prompt is not None else context.env_name + create_if_needed(env_dir) + executable = sys._base_executable + if not executable: # see gh-96861 + raise ValueError('Unable to determine path to the running ' + 'Python interpreter. Provide an explicit path or ' + 'check that your PATH environment variable is ' + 'correctly set.') + dirname, exename = os.path.split(os.path.abspath(executable)) + if sys.platform == 'win32': + # Always create the simplest name in the venv. It will either be a + # link back to executable, or a copy of the appropriate launcher + _d = '_d' if os.path.splitext(exename)[0].endswith('_d') else '' + exename = f'python{_d}.exe' + context.executable = executable + context.python_dir = dirname + context.python_exe = exename + binpath = self._venv_path(env_dir, 'scripts') + libpath = self._venv_path(env_dir, 'purelib') + + # PEP 405 says venvs should create a local include directory. + # See https://peps.python.org/pep-0405/#include-files + # XXX: This directory is not exposed in sysconfig or anywhere else, and + # doesn't seem to be utilized by modern packaging tools. We keep it + # for backwards-compatibility, and to follow the PEP, but I would + # recommend against using it, as most tooling does not pass it to + # compilers. Instead, until we standardize a site-specific include + # directory, I would recommend installing headers as package data, + # and providing some sort of API to get the include directories. + # Example: https://numpy.org/doc/2.1/reference/generated/numpy.get_include.html + incpath = os.path.join(env_dir, 'Include' if os.name == 'nt' else 'include') + + context.inc_path = incpath + create_if_needed(incpath) + context.lib_path = libpath + create_if_needed(libpath) + # Issue 21197: create lib64 as a symlink to lib on 64-bit non-OS X POSIX + if ((sys.maxsize > 2**32) and (os.name == 'posix') and + (sys.platform != 'darwin')): + link_path = os.path.join(env_dir, 'lib64') + if not os.path.exists(link_path): # Issue #21643 + os.symlink('lib', link_path) + context.bin_path = binpath + context.bin_name = os.path.relpath(binpath, env_dir) + context.env_exe = os.path.join(binpath, exename) + create_if_needed(binpath) + # Assign and update the command to use when launching the newly created + # environment, in case it isn't simply the executable script (e.g. bpo-45337) + context.env_exec_cmd = context.env_exe + if sys.platform == 'win32': + # bpo-45337: Fix up env_exec_cmd to account for file system redirections. + # Some redirects only apply to CreateFile and not CreateProcess + real_env_exe = os.path.realpath(context.env_exe) + if not self._same_path(real_env_exe, context.env_exe): + logger.warning('Actual environment location may have moved due to ' + 'redirects, links or junctions.\n' + ' Requested location: "%s"\n' + ' Actual location: "%s"', + context.env_exe, real_env_exe) + context.env_exec_cmd = real_env_exe + return context + + def create_configuration(self, context): + """ + Create a configuration file indicating where the environment's Python + was copied from, and whether the system site-packages should be made + available in the environment. + + :param context: The information for the environment creation request + being processed. + """ + context.cfg_path = path = os.path.join(context.env_dir, 'pyvenv.cfg') + with open(path, 'w', encoding='utf-8') as f: + f.write('home = %s\n' % context.python_dir) + if self.system_site_packages: + incl = 'true' + else: + incl = 'false' + f.write('include-system-site-packages = %s\n' % incl) + f.write('version = %d.%d.%d\n' % sys.version_info[:3]) + if self.prompt is not None: + f.write(f'prompt = {self.prompt!r}\n') + f.write('executable = %s\n' % os.path.realpath(sys.executable)) + args = [] + nt = os.name == 'nt' + if nt and self.symlinks: + args.append('--symlinks') + if not nt and not self.symlinks: + args.append('--copies') + if not self.with_pip: + args.append('--without-pip') + if self.system_site_packages: + args.append('--system-site-packages') + if self.clear: + args.append('--clear') + if self.upgrade: + args.append('--upgrade') + if self.upgrade_deps: + args.append('--upgrade-deps') + if self.orig_prompt is not None: + args.append(f'--prompt="{self.orig_prompt}"') + if not self.scm_ignore_files: + args.append('--without-scm-ignore-files') + + args.append(context.env_dir) + args = ' '.join(args) + f.write(f'command = {sys.executable} -m venv {args}\n') + + def symlink_or_copy(self, src, dst, relative_symlinks_ok=False): + """ + Try symlinking a file, and if that fails, fall back to copying. + (Unused on Windows, because we can't just copy a failed symlink file: we + switch to a different set of files instead.) + """ + assert os.name != 'nt' + force_copy = not self.symlinks + if not force_copy: + try: + if not os.path.islink(dst): # can't link to itself! + if relative_symlinks_ok: + assert os.path.dirname(src) == os.path.dirname(dst) + os.symlink(os.path.basename(src), dst) + else: + os.symlink(src, dst) + except Exception: # may need to use a more specific exception + logger.warning('Unable to symlink %r to %r', src, dst) + force_copy = True + if force_copy: + shutil.copyfile(src, dst) + + def create_git_ignore_file(self, context): + """ + Create a .gitignore file in the environment directory. + + The contents of the file cause the entire environment directory to be + ignored by git. + """ + gitignore_path = os.path.join(context.env_dir, '.gitignore') + with open(gitignore_path, 'w', encoding='utf-8') as file: + file.write('# Created by venv; ' + 'see https://docs.python.org/3/library/venv.html\n') + file.write('*\n') + + if os.name != 'nt': + def setup_python(self, context): + """ + Set up a Python executable in the environment. + + :param context: The information for the environment creation request + being processed. + """ + binpath = context.bin_path + path = context.env_exe + copier = self.symlink_or_copy + dirname = context.python_dir + copier(context.executable, path) + if not os.path.islink(path): + os.chmod(path, 0o755) + + suffixes = ['python', 'python3', f'python3.{sys.version_info[1]}'] + if sys.version_info[:2] == (3, 14) and sys.getfilesystemencoding() == 'utf-8': + suffixes.append('𝜋thon') + for suffix in suffixes: + path = os.path.join(binpath, suffix) + if not os.path.exists(path): + # Issue 18807: make copies if + # symlinks are not wanted + copier(context.env_exe, path, relative_symlinks_ok=True) + if not os.path.islink(path): + os.chmod(path, 0o755) + + else: + def setup_python(self, context): + """ + Set up a Python executable in the environment. + + :param context: The information for the environment creation request + being processed. + """ + binpath = context.bin_path + dirname = context.python_dir + exename = os.path.basename(context.env_exe) + exe_stem = os.path.splitext(exename)[0] + exe_d = '_d' if os.path.normcase(exe_stem).endswith('_d') else '' + if sysconfig.is_python_build(): + scripts = dirname + else: + scripts = os.path.join(os.path.dirname(__file__), + 'scripts', 'nt') + if not sysconfig.get_config_var("Py_GIL_DISABLED"): + python_exe = os.path.join(dirname, f'python{exe_d}.exe') + pythonw_exe = os.path.join(dirname, f'pythonw{exe_d}.exe') + link_sources = { + 'python.exe': python_exe, + f'python{exe_d}.exe': python_exe, + 'pythonw.exe': pythonw_exe, + f'pythonw{exe_d}.exe': pythonw_exe, + } + python_exe = os.path.join(scripts, f'venvlauncher{exe_d}.exe') + pythonw_exe = os.path.join(scripts, f'venvwlauncher{exe_d}.exe') + copy_sources = { + 'python.exe': python_exe, + f'python{exe_d}.exe': python_exe, + 'pythonw.exe': pythonw_exe, + f'pythonw{exe_d}.exe': pythonw_exe, + } + else: + exe_t = f'3.{sys.version_info[1]}t' + python_exe = os.path.join(dirname, f'python{exe_t}{exe_d}.exe') + pythonw_exe = os.path.join(dirname, f'pythonw{exe_t}{exe_d}.exe') + link_sources = { + 'python.exe': python_exe, + f'python{exe_d}.exe': python_exe, + f'python{exe_t}.exe': python_exe, + f'python{exe_t}{exe_d}.exe': python_exe, + 'pythonw.exe': pythonw_exe, + f'pythonw{exe_d}.exe': pythonw_exe, + f'pythonw{exe_t}.exe': pythonw_exe, + f'pythonw{exe_t}{exe_d}.exe': pythonw_exe, + } + python_exe = os.path.join(scripts, f'venvlaunchert{exe_d}.exe') + pythonw_exe = os.path.join(scripts, f'venvwlaunchert{exe_d}.exe') + copy_sources = { + 'python.exe': python_exe, + f'python{exe_d}.exe': python_exe, + f'python{exe_t}.exe': python_exe, + f'python{exe_t}{exe_d}.exe': python_exe, + 'pythonw.exe': pythonw_exe, + f'pythonw{exe_d}.exe': pythonw_exe, + f'pythonw{exe_t}.exe': pythonw_exe, + f'pythonw{exe_t}{exe_d}.exe': pythonw_exe, + } + + do_copies = True + if self.symlinks: + do_copies = False + # For symlinking, we need all the DLLs to be available alongside + # the executables. + link_sources.update({ + f: os.path.join(dirname, f) for f in os.listdir(dirname) + if os.path.normcase(f).startswith(('python', 'vcruntime')) + and os.path.normcase(os.path.splitext(f)[1]) == '.dll' + }) + + to_unlink = [] + for dest, src in link_sources.items(): + dest = os.path.join(binpath, dest) + try: + os.symlink(src, dest) + to_unlink.append(dest) + except OSError: + logger.warning('Unable to symlink %r to %r', src, dest) + do_copies = True + for f in to_unlink: + try: + os.unlink(f) + except OSError: + logger.warning('Failed to clean up symlink %r', + f) + logger.warning('Retrying with copies') + break + + if do_copies: + for dest, src in copy_sources.items(): + dest = os.path.join(binpath, dest) + try: + shutil.copy2(src, dest) + except OSError: + logger.warning('Unable to copy %r to %r', src, dest) + + if sysconfig.is_python_build(): + # copy init.tcl + for root, dirs, files in os.walk(context.python_dir): + if 'init.tcl' in files: + tcldir = os.path.basename(root) + tcldir = os.path.join(context.env_dir, 'Lib', tcldir) + if not os.path.exists(tcldir): + os.makedirs(tcldir) + src = os.path.join(root, 'init.tcl') + dst = os.path.join(tcldir, 'init.tcl') + shutil.copyfile(src, dst) + break + + def _call_new_python(self, context, *py_args, **kwargs): + """Executes the newly created Python using safe-ish options""" + # gh-98251: We do not want to just use '-I' because that masks + # legitimate user preferences (such as not writing bytecode). All we + # really need is to ensure that the path variables do not overrule + # normal venv handling. + args = [context.env_exec_cmd, *py_args] + kwargs['env'] = env = os.environ.copy() + env['VIRTUAL_ENV'] = context.env_dir + env.pop('PYTHONHOME', None) + env.pop('PYTHONPATH', None) + kwargs['cwd'] = context.env_dir + kwargs['executable'] = context.env_exec_cmd + subprocess.check_output(args, **kwargs) + + def _setup_pip(self, context): + """Installs or upgrades pip in a virtual environment""" + self._call_new_python(context, '-m', 'ensurepip', '--upgrade', + '--default-pip', stderr=subprocess.STDOUT) + + def setup_scripts(self, context): + """ + Set up scripts into the created environment from a directory. + + This method installs the default scripts into the environment + being created. You can prevent the default installation by overriding + this method if you really need to, or if you need to specify + a different location for the scripts to install. By default, the + 'scripts' directory in the venv package is used as the source of + scripts to install. + """ + path = os.path.abspath(os.path.dirname(__file__)) + path = os.path.join(path, 'scripts') + self.install_scripts(context, path) + + def post_setup(self, context): + """ + Hook for post-setup modification of the venv. Subclasses may install + additional packages or scripts here, add activation shell scripts, etc. + + :param context: The information for the environment creation request + being processed. + """ + pass + + def replace_variables(self, text, context): + """ + Replace variable placeholders in script text with context-specific + variables. + + Return the text passed in , but with variables replaced. + + :param text: The text in which to replace placeholder variables. + :param context: The information for the environment creation request + being processed. + """ + replacements = { + '__VENV_DIR__': context.env_dir, + '__VENV_NAME__': context.env_name, + '__VENV_PROMPT__': context.prompt, + '__VENV_BIN_NAME__': context.bin_name, + '__VENV_PYTHON__': context.env_exe, + } + + def quote_ps1(s): + """ + This should satisfy PowerShell quoting rules [1], unless the quoted + string is passed directly to Windows native commands [2]. + [1]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_quoting_rules + [2]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_parsing#passing-arguments-that-contain-quote-characters + """ + s = s.replace("'", "''") + return f"'{s}'" + + def quote_bat(s): + return s + + # gh-124651: need to quote the template strings properly + quote = shlex.quote + script_path = context.script_path + if script_path.endswith('.ps1'): + quote = quote_ps1 + elif script_path.endswith('.bat'): + quote = quote_bat + else: + # fallbacks to POSIX shell compliant quote + quote = shlex.quote + + replacements = {key: quote(s) for key, s in replacements.items()} + for key, quoted in replacements.items(): + text = text.replace(key, quoted) + return text + + def install_scripts(self, context, path): + """ + Install scripts into the created environment from a directory. + + :param context: The information for the environment creation request + being processed. + :param path: Absolute pathname of a directory containing script. + Scripts in the 'common' subdirectory of this directory, + and those in the directory named for the platform + being run on, are installed in the created environment. + Placeholder variables are replaced with environment- + specific values. + """ + binpath = context.bin_path + plen = len(path) + if os.name == 'nt': + def skip_file(f): + f = os.path.normcase(f) + return (f.startswith(('python', 'venv')) + and f.endswith(('.exe', '.pdb'))) + else: + def skip_file(f): + return False + for root, dirs, files in os.walk(path): + if root == path: # at top-level, remove irrelevant dirs + for d in dirs[:]: + if d not in ('common', os.name): + dirs.remove(d) + continue # ignore files in top level + for f in files: + if skip_file(f): + continue + srcfile = os.path.join(root, f) + suffix = root[plen:].split(os.sep)[2:] + if not suffix: + dstdir = binpath + else: + dstdir = os.path.join(binpath, *suffix) + if not os.path.exists(dstdir): + os.makedirs(dstdir) + dstfile = os.path.join(dstdir, f) + if os.name == 'nt' and srcfile.endswith(('.exe', '.pdb')): + shutil.copy2(srcfile, dstfile) + continue + with open(srcfile, 'rb') as f: + data = f.read() + try: + context.script_path = srcfile + new_data = ( + self.replace_variables(data.decode('utf-8'), context) + .encode('utf-8') + ) + except UnicodeError as e: + logger.warning('unable to copy script %r, ' + 'may be binary: %s', srcfile, e) + continue + if new_data == data: + shutil.copy(srcfile, dstfile) + else: + with open(dstfile, 'wb') as f: + f.write(new_data) + shutil.copymode(srcfile, dstfile) + + def upgrade_dependencies(self, context): + logger.debug( + f'Upgrading {CORE_VENV_DEPS} packages in {context.bin_path}' + ) + self._call_new_python(context, '-m', 'pip', 'install', '--upgrade', + *CORE_VENV_DEPS) + + +def create(env_dir, system_site_packages=False, clear=False, + symlinks=False, with_pip=False, prompt=None, upgrade_deps=False, + *, scm_ignore_files=frozenset()): + """Create a virtual environment in a directory.""" + builder = EnvBuilder(system_site_packages=system_site_packages, + clear=clear, symlinks=symlinks, with_pip=with_pip, + prompt=prompt, upgrade_deps=upgrade_deps, + scm_ignore_files=scm_ignore_files) + builder.create(env_dir) + + +def main(args=None): + import argparse + + parser = argparse.ArgumentParser(description='Creates virtual Python ' + 'environments in one or ' + 'more target ' + 'directories.', + epilog='Once an environment has been ' + 'created, you may wish to ' + 'activate it, e.g. by ' + 'sourcing an activate script ' + 'in its bin directory.', + color=True, + ) + parser.add_argument('dirs', metavar='ENV_DIR', nargs='+', + help='A directory to create the environment in.') + parser.add_argument('--system-site-packages', default=False, + action='store_true', dest='system_site', + help='Give the virtual environment access to the ' + 'system site-packages dir.') + if os.name == 'nt': + use_symlinks = False + else: + use_symlinks = True + group = parser.add_mutually_exclusive_group() + group.add_argument('--symlinks', default=use_symlinks, + action='store_true', dest='symlinks', + help='Try to use symlinks rather than copies, ' + 'when symlinks are not the default for ' + 'the platform.') + group.add_argument('--copies', default=not use_symlinks, + action='store_false', dest='symlinks', + help='Try to use copies rather than symlinks, ' + 'even when symlinks are the default for ' + 'the platform.') + parser.add_argument('--clear', default=False, action='store_true', + dest='clear', help='Delete the contents of the ' + 'environment directory if it ' + 'already exists, before ' + 'environment creation.') + parser.add_argument('--upgrade', default=False, action='store_true', + dest='upgrade', help='Upgrade the environment ' + 'directory to use this version ' + 'of Python, assuming Python ' + 'has been upgraded in-place.') + parser.add_argument('--without-pip', dest='with_pip', + default=True, action='store_false', + help='Skips installing or upgrading pip in the ' + 'virtual environment (pip is bootstrapped ' + 'by default)') + parser.add_argument('--prompt', + help='Provides an alternative prompt prefix for ' + 'this environment.') + parser.add_argument('--upgrade-deps', default=False, action='store_true', + dest='upgrade_deps', + help=f'Upgrade core dependencies ({", ".join(CORE_VENV_DEPS)}) ' + 'to the latest version in PyPI') + parser.add_argument('--without-scm-ignore-files', dest='scm_ignore_files', + action='store_const', const=frozenset(), + default=frozenset(['git']), + help='Skips adding SCM ignore files to the environment ' + 'directory (Git is supported by default).') + options = parser.parse_args(args) + if options.upgrade and options.clear: + raise ValueError('you cannot supply --upgrade and --clear together.') + builder = EnvBuilder(system_site_packages=options.system_site, + clear=options.clear, + symlinks=options.symlinks, + upgrade=options.upgrade, + with_pip=options.with_pip, + prompt=options.prompt, + upgrade_deps=options.upgrade_deps, + scm_ignore_files=options.scm_ignore_files) + for d in options.dirs: + builder.create(d) + + +if __name__ == '__main__': + rc = 1 + try: + main() + rc = 0 + except Exception as e: + print('Error: %s' % e, file=sys.stderr) + sys.exit(rc) diff --git a/Python313_13_x64_Template/Lib/venv/__main__.py b/Python314_4_x64_Template/Lib/venv/__main__.py similarity index 100% rename from Python313_13_x64_Template/Lib/venv/__main__.py rename to Python314_4_x64_Template/Lib/venv/__main__.py diff --git a/Python314_4_x64_Template/Lib/venv/scripts/common/Activate.ps1 b/Python314_4_x64_Template/Lib/venv/scripts/common/Activate.ps1 new file mode 100644 index 00000000..2cc90919 --- /dev/null +++ b/Python314_4_x64_Template/Lib/venv/scripts/common/Activate.ps1 @@ -0,0 +1,547 @@ +<# +.Synopsis +Activate a Python virtual environment for the current PowerShell session. + +.Description +Pushes the python executable for a virtual environment to the front of the +$Env:PATH environment variable and sets the prompt to signify that you are +in a Python virtual environment. Makes use of the command line switches as +well as the `pyvenv.cfg` file values present in the virtual environment. + +.Parameter VenvDir +Path to the directory that contains the virtual environment to activate. The +default value for this is the parent of the directory that the Activate.ps1 +script is located within. + +.Parameter Prompt +The prompt prefix to display when this virtual environment is activated. By +default, this prompt is the name of the virtual environment folder (VenvDir) +surrounded by parentheses and followed by a single space (ie. '(.venv) '). + +.Example +Activate.ps1 +Activates the Python virtual environment that contains the Activate.ps1 script. + +.Example +Activate.ps1 -Verbose +Activates the Python virtual environment that contains the Activate.ps1 script, +and shows extra information about the activation as it executes. + +.Example +Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv +Activates the Python virtual environment located in the specified location. + +.Example +Activate.ps1 -Prompt "MyPython" +Activates the Python virtual environment that contains the Activate.ps1 script, +and prefixes the current prompt with the specified string (surrounded in +parentheses) while the virtual environment is active. + +.Notes +On Windows, it may be required to enable this Activate.ps1 script by setting the +execution policy for the user. You can do this by issuing the following PowerShell +command: + +PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser + +For more information on Execution Policies: +https://go.microsoft.com/fwlink/?LinkID=135170 + +#> +Param( + [Parameter(Mandatory = $false)] + [String] + $VenvDir, + [Parameter(Mandatory = $false)] + [String] + $Prompt +) + +<# Function declarations --------------------------------------------------- #> + +<# +.Synopsis +Remove all shell session elements added by the Activate script, including the +addition of the virtual environment's Python executable from the beginning of +the PATH variable. + +.Parameter NonDestructive +If present, do not remove this function from the global namespace for the +session. + +#> +function global:deactivate ([switch]$NonDestructive) { + # Revert to original values + + # The prior prompt: + if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { + Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt + Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT + } + + # The prior PYTHONHOME: + if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { + Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME + Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME + } + + # The prior PATH: + if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { + Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH + Remove-Item -Path Env:_OLD_VIRTUAL_PATH + } + + # Just remove the VIRTUAL_ENV altogether: + if (Test-Path -Path Env:VIRTUAL_ENV) { + Remove-Item -Path env:VIRTUAL_ENV + } + + # Just remove VIRTUAL_ENV_PROMPT altogether. + if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { + Remove-Item -Path env:VIRTUAL_ENV_PROMPT + } + + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: + if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { + Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force + } + + # Leave deactivate function in the global namespace if requested: + if (-not $NonDestructive) { + Remove-Item -Path function:deactivate + } +} + +<# +.Description +Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the +given folder, and returns them in a map. + +For each line in the pyvenv.cfg file, if that line can be parsed into exactly +two strings separated by `=` (with any amount of whitespace surrounding the =) +then it is considered a `key = value` line. The left hand string is the key, +the right hand is the value. + +If the value starts with a `'` or a `"` then the first and last character is +stripped from the value before being captured. + +.Parameter ConfigDir +Path to the directory that contains the `pyvenv.cfg` file. +#> +function Get-PyVenvConfig( + [String] + $ConfigDir +) { + Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" + + # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). + $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue + + # An empty map will be returned if no config file is found. + $pyvenvConfig = @{ } + + if ($pyvenvConfigPath) { + + Write-Verbose "File exists, parse `key = value` lines" + $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath + + $pyvenvConfigContent | ForEach-Object { + $keyval = $PSItem -split "\s*=\s*", 2 + if ($keyval[0] -and $keyval[1]) { + $val = $keyval[1] + + # Remove extraneous quotations around a string value. + if ("'""".Contains($val.Substring(0, 1))) { + $val = $val.Substring(1, $val.Length - 2) + } + + $pyvenvConfig[$keyval[0]] = $val + Write-Verbose "Adding Key: '$($keyval[0])'='$val'" + } + } + } + return $pyvenvConfig +} + + +<# Begin Activate script --------------------------------------------------- #> + +# Determine the containing directory of this script +$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition +$VenvExecDir = Get-Item -Path $VenvExecPath + +Write-Verbose "Activation script is located in path: '$VenvExecPath'" +Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" +Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" + +# Set values required in priority: CmdLine, ConfigFile, Default +# First, get the location of the virtual environment, it might not be +# VenvExecDir if specified on the command line. +if ($VenvDir) { + Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" +} +else { + Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." + $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") + Write-Verbose "VenvDir=$VenvDir" +} + +# Next, read the `pyvenv.cfg` file to determine any required value such +# as `prompt`. +$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir + +# Next, set the prompt from the command line, or the config file, or +# just use the name of the virtual environment folder. +if ($Prompt) { + Write-Verbose "Prompt specified as argument, using '$Prompt'" +} +else { + Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" + if ($pyvenvCfg -and $pyvenvCfg['prompt']) { + Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" + $Prompt = $pyvenvCfg['prompt']; + } + else { + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" + Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" + $Prompt = Split-Path -Path $venvDir -Leaf + } +} + +Write-Verbose "Prompt = '$Prompt'" +Write-Verbose "VenvDir='$VenvDir'" + +# Deactivate any currently active virtual environment, but leave the +# deactivate function in place. +deactivate -nondestructive + +# Now set the environment variable VIRTUAL_ENV, used by many tools to determine +# that there is an activated venv. +$env:VIRTUAL_ENV = $VenvDir + +$env:VIRTUAL_ENV_PROMPT = $Prompt + +if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { + + Write-Verbose "Setting prompt to '$Prompt'" + + # Set the prompt to include the env name + # Make sure _OLD_VIRTUAL_PROMPT is global + function global:_OLD_VIRTUAL_PROMPT { "" } + Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT + New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt + + function global:prompt { + Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " + _OLD_VIRTUAL_PROMPT + } +} + +# Clear PYTHONHOME +if (Test-Path -Path Env:PYTHONHOME) { + Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME + Remove-Item -Path Env:PYTHONHOME +} + +# Add the venv to the PATH +Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH +$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" + +# SIG # Begin signature block +# MII3YgYJKoZIhvcNAQcCoII3UzCCN08CAQExDzANBglghkgBZQMEAgEFADB5Bgor +# BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG +# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCBALKwKRFIhr2RY +# IW/WJLd9pc8a9sj/IoThKU92fTfKsKCCG9IwggXMMIIDtKADAgECAhBUmNLR1FsZ +# lUgTecgRwIeZMA0GCSqGSIb3DQEBDAUAMHcxCzAJBgNVBAYTAlVTMR4wHAYDVQQK +# ExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xSDBGBgNVBAMTP01pY3Jvc29mdCBJZGVu +# dGl0eSBWZXJpZmljYXRpb24gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgMjAy +# MDAeFw0yMDA0MTYxODM2MTZaFw00NTA0MTYxODQ0NDBaMHcxCzAJBgNVBAYTAlVT +# MR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xSDBGBgNVBAMTP01pY3Jv +# c29mdCBJZGVudGl0eSBWZXJpZmljYXRpb24gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRo +# b3JpdHkgMjAyMDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALORKgeD +# Bmf9np3gx8C3pOZCBH8Ppttf+9Va10Wg+3cL8IDzpm1aTXlT2KCGhFdFIMeiVPvH +# or+Kx24186IVxC9O40qFlkkN/76Z2BT2vCcH7kKbK/ULkgbk/WkTZaiRcvKYhOuD +# PQ7k13ESSCHLDe32R0m3m/nJxxe2hE//uKya13NnSYXjhr03QNAlhtTetcJtYmrV +# qXi8LW9J+eVsFBT9FMfTZRY33stuvF4pjf1imxUs1gXmuYkyM6Nix9fWUmcIxC70 +# ViueC4fM7Ke0pqrrBc0ZV6U6CwQnHJFnni1iLS8evtrAIMsEGcoz+4m+mOJyoHI1 +# vnnhnINv5G0Xb5DzPQCGdTiO0OBJmrvb0/gwytVXiGhNctO/bX9x2P29Da6SZEi3 +# W295JrXNm5UhhNHvDzI9e1eM80UHTHzgXhgONXaLbZ7LNnSrBfjgc10yVpRnlyUK +# xjU9lJfnwUSLgP3B+PR0GeUw9gb7IVc+BhyLaxWGJ0l7gpPKWeh1R+g/OPTHU3mg +# trTiXFHvvV84wRPmeAyVWi7FQFkozA8kwOy6CXcjmTimthzax7ogttc32H83rwjj +# O3HbbnMbfZlysOSGM1l0tRYAe1BtxoYT2v3EOYI9JACaYNq6lMAFUSw0rFCZE4e7 +# swWAsk0wAly4JoNdtGNz764jlU9gKL431VulAgMBAAGjVDBSMA4GA1UdDwEB/wQE +# AwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTIftJqhSobyhmYBAcnz1AQ +# T2ioojAQBgkrBgEEAYI3FQEEAwIBADANBgkqhkiG9w0BAQwFAAOCAgEAr2rd5hnn +# LZRDGU7L6VCVZKUDkQKL4jaAOxWiUsIWGbZqWl10QzD0m/9gdAmxIR6QFm3FJI9c +# Zohj9E/MffISTEAQiwGf2qnIrvKVG8+dBetJPnSgaFvlVixlHIJ+U9pW2UYXeZJF +# xBA2CFIpF8svpvJ+1Gkkih6PsHMNzBxKq7Kq7aeRYwFkIqgyuH4yKLNncy2RtNwx +# AQv3Rwqm8ddK7VZgxCwIo3tAsLx0J1KH1r6I3TeKiW5niB31yV2g/rarOoDXGpc8 +# FzYiQR6sTdWD5jw4vU8w6VSp07YEwzJ2YbuwGMUrGLPAgNW3lbBeUU0i/OxYqujY +# lLSlLu2S3ucYfCFX3VVj979tzR/SpncocMfiWzpbCNJbTsgAlrPhgzavhgplXHT2 +# 6ux6anSg8Evu75SjrFDyh+3XOjCDyft9V77l4/hByuVkrrOj7FjshZrM77nq81YY +# uVxzmq/FdxeDWds3GhhyVKVB0rYjdaNDmuV3fJZ5t0GNv+zcgKCf0Xd1WF81E+Al +# GmcLfc4l+gcK5GEh2NQc5QfGNpn0ltDGFf5Ozdeui53bFv0ExpK91IjmqaOqu/dk +# ODtfzAzQNb50GQOmxapMomE2gj4d8yu8l13bS3g7LfU772Aj6PXsCyM2la+YZr9T +# 03u4aUoqlmZpxJTG9F9urJh4iIAGXKKy7aIwggb+MIIE5qADAgECAhMzAAfqVHr/ +# 4Q/aDzAcAAAAB+pUMA0GCSqGSIb3DQEBDAUAMFoxCzAJBgNVBAYTAlVTMR4wHAYD +# VQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKzApBgNVBAMTIk1pY3Jvc29mdCBJ +# RCBWZXJpZmllZCBDUyBFT0MgQ0EgMDIwHhcNMjYwNDA3MDcyODM1WhcNMjYwNDEw +# MDcyODM1WjB8MQswCQYDVQQGEwJVUzEPMA0GA1UECBMGT3JlZ29uMRIwEAYDVQQH +# EwlCZWF2ZXJ0b24xIzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9u +# MSMwIQYDVQQDExpQeXRob24gU29mdHdhcmUgRm91bmRhdGlvbjCCAaIwDQYJKoZI +# hvcNAQEBBQADggGPADCCAYoCggGBAND/lHfn3OCIvUzMUIL6OdsKJrpnvuRtahV1 +# 6NCf0YSqOQemwQw2bTIyTkgSFwY4WaCvfHzcliURiPidXiqy56OmeC19A95BarKA +# UmKRv3bVpM0XEK7OLvMyRFNg9aPUi1nmdF3Vx02RI9p88wBHQR5nNIpOTXlwfONQ +# klggyEZSxkBf+dCL6jtz4jiqoreiEmRwesOrtQxKNsRuezbumpmVMZGxrMQVLBIX +# OWG9a3GS6Sqfi+cJgxQhSKa9JENPRojyxOyVG8vdwJQiMqSjm2ZMFAkIkSWBQSfx +# WjrRmw8/20WaBENattpqb7/cjX7zwimJ86uV48D8AQIGzAxfYAySG6NG9iMfU5S5 +# wzDFpiCuXyfrlgAbZu4fnBIyOmGcq01XxruzJ3FcdLMif5YXZU+n30XOaJfgY9/x +# Gq2HiEIQF5MeuxknfD+vYi/GXGtC/nlKS0Tx91+YXt6RctxgJEwpZCGzFZmmaiUa +# Y0GBp4jzXXwLqX8T15lgxAGoqoPvvwIDAQABo4ICGTCCAhUwDAYDVR0TAQH/BAIw +# ADAOBgNVHQ8BAf8EBAMCB4AwPAYDVR0lBDUwMwYKKwYBBAGCN2EBAAYIKwYBBQUH +# AwMGGysGAQQBgjdhgqKNuwqmkohkgZH0oEWCk/3hbzAdBgNVHQ4EFgQUy3N6DzeS +# y91jju8Ihmm3r+5AO58wHwYDVR0jBBgwFoAUZZ9RzoVofy+KRYiq3acxux4NAF4w +# ZwYDVR0fBGAwXjBcoFqgWIZWaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9w +# cy9jcmwvTWljcm9zb2Z0JTIwSUQlMjBWZXJpZmllZCUyMENTJTIwRU9DJTIwQ0El +# MjAwMi5jcmwwgaUGCCsGAQUFBwEBBIGYMIGVMGQGCCsGAQUFBzAChlhodHRwOi8v +# d3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NlcnRzL01pY3Jvc29mdCUyMElEJTIw +# VmVyaWZpZWQlMjBDUyUyMEVPQyUyMENBJTIwMDIuY3J0MC0GCCsGAQUFBzABhiFo +# dHRwOi8vb25lb2NzcC5taWNyb3NvZnQuY29tL29jc3AwZgYDVR0gBF8wXTBRBgwr +# BgEEAYI3TIN9AQEwQTA/BggrBgEFBQcCARYzaHR0cDovL3d3dy5taWNyb3NvZnQu +# Y29tL3BraW9wcy9Eb2NzL1JlcG9zaXRvcnkuaHRtMAgGBmeBDAEEATANBgkqhkiG +# 9w0BAQwFAAOCAgEAPPwJPfkrkQMH39/iTBbir6tGnQpLCpOuP1A6mmKp22GxCG0/ +# 1IPx4QK1qXpy8hYd/G9ySDSYu3DSg22/icSmGSxdcI3zoRsj9vdJeesQrxtK8v9y +# 4zMxN5TaLV5CmatSUZPyX1t7Tee9wiLBUeZIj+3Lg2gNUsdvavywRYxSYkWGuGaM +# jGtJrs4PoJW3f4KkOc5mShCpUgl4Mo9ZO+ChcQpKEP99UJ9CXB9wrNzXnEOTyGnR +# f1sYklPqBifC7hrnKIPZiJte1efmGeExmspWewmUSNXCIGenDAN8XDut2yi1iSSQ +# n1VtL6deCRhS1cTn+FAzy2q7a/8Jhhq+HUlcJwRGtrxgKZHrwEvGRvIWNK5l1rKl +# Q+WQ7RqRrH6PpSfR/xoptfpJX9LNUoHS0m114HcE2xk2hbv+U/5ZgxUtSd4MbF7/ +# C8eShz4Os8CznYXJ/d+kfvoyEqKE9VCbc4BUC+w1iufQOPo4tRvK4TFJu1N4IqJk +# NsChWXUef7lIT5CoaJw4np0dVS2NosmRCxi1dMyADzqFNDXGKQxq5k6MpnXbevL5 +# JdcznhhxgwRUcwNK/3f9WSaU2mnI+6tHrnATteL7Ct6FzZWjqWDbURkU66bRqrBh +# +u5KyLZAAQXTfdsaDUfxtElQJf5wROgYvwnW1dGvujgc+XKVvf1VT3GSFRIwggda +# MIIFQqADAgECAhMzAAAABft6XDITYd9dAAAAAAAFMA0GCSqGSIb3DQEBDAUAMGMx +# CzAJBgNVBAYTAlVTMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xNDAy +# BgNVBAMTK01pY3Jvc29mdCBJRCBWZXJpZmllZCBDb2RlIFNpZ25pbmcgUENBIDIw +# MjEwHhcNMjEwNDEzMTczMTUzWhcNMjYwNDEzMTczMTUzWjBaMQswCQYDVQQGEwJV +# UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSswKQYDVQQDEyJNaWNy +# b3NvZnQgSUQgVmVyaWZpZWQgQ1MgRU9DIENBIDAyMIICIjANBgkqhkiG9w0BAQEF +# AAOCAg8AMIICCgKCAgEA0hqZfD8ykKTA6CDbWvshmBpDoBf7Lv132RVuSqVwQO3a +# ALLkuRnnTIoRmMGo0fIMQrtwR6UHB06xdqOkAfqB6exubXTHu44+duHUCdE4ngjE +# LBQyluMuSOnHaEdveIbt31OhMEX/4nQkph4+Ah0eR4H2sTRrVKmKrlOoQlhia73Q +# g2dHoitcX1uT1vW3Knpt9Mt76H7ZHbLNspMZLkWBabKMl6BdaWZXYpPGdS+qY80g +# DaNCvFq0d10UMu7xHesIqXpTDT3Q3AeOxSylSTc/74P3og9j3OuemEFauFzL55t1 +# MvpadEhQmD8uFMxFv/iZOjwvcdY1zhanVLLyplz13/NzSoU3QjhPdqAGhRIwh/YD +# zo3jCdVJgWQRrW83P3qWFFkxNiME2iO4IuYgj7RwseGwv7I9cxOyaHihKMdT9Neo +# SjpSNzVnKKGcYMtOdMtKFqoV7Cim2m84GmIYZTBorR/Po9iwlasTYKFpGZqdWKyY +# nJO2FV8oMmWkIK1iagLLgEt6ZaR0rk/1jUYssyTiRqWr84Qs3XL/V5KUBEtUEQfQ +# /4RtnI09uFFUIGJZV9mD/xOUksWodGrCQSem6Hy261xMJAHqTqMuDKgwi8xk/mfl +# r7yhXPL73SOULmu1Aqu4I7Gpe6QwNW2TtQBxM3vtSTmdPW6rK5y0gED51RjsyK0C +# AwEAAaOCAg4wggIKMA4GA1UdDwEB/wQEAwIBhjAQBgkrBgEEAYI3FQEEAwIBADAd +# BgNVHQ4EFgQUZZ9RzoVofy+KRYiq3acxux4NAF4wVAYDVR0gBE0wSzBJBgRVHSAA +# MEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9wa2lvcHMv +# RG9jcy9SZXBvc2l0b3J5Lmh0bTAZBgkrBgEEAYI3FAIEDB4KAFMAdQBiAEMAQTAS +# BgNVHRMBAf8ECDAGAQH/AgEAMB8GA1UdIwQYMBaAFNlBKbAPD2Ns72nX9c0pnqRI +# ajDmMHAGA1UdHwRpMGcwZaBjoGGGX2h0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9w +# a2lvcHMvY3JsL01pY3Jvc29mdCUyMElEJTIwVmVyaWZpZWQlMjBDb2RlJTIwU2ln +# bmluZyUyMFBDQSUyMDIwMjEuY3JsMIGuBggrBgEFBQcBAQSBoTCBnjBtBggrBgEF +# BQcwAoZhaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9jZXJ0cy9NaWNy +# b3NvZnQlMjBJRCUyMFZlcmlmaWVkJTIwQ29kZSUyMFNpZ25pbmclMjBQQ0ElMjAy +# MDIxLmNydDAtBggrBgEFBQcwAYYhaHR0cDovL29uZW9jc3AubWljcm9zb2Z0LmNv +# bS9vY3NwMA0GCSqGSIb3DQEBDAUAA4ICAQBFSWDUd08X4g5HzvVfrB1SiV8pk6XP +# HT9jPkCmvU/uvBzmZRAjYk2gKYR3pXoStRJaJ/lhjC5Dq/2R7P1YRZHCDYyK0zvS +# RMdE6YQtgGjmsdhzD0nCS6hVVcgfmNQscPJ1WHxbvG5EQgYQ0ZED1FN0MOPQzWe1 +# zbH5Va0dSxtnodBVRjnyDYEm7sNEcvJHTG3eXzAyd00E5KDCsEl4z5O0mvXqwaH2 +# PS0200E6P4WqLwgs/NmUu5+Aa8Lw/2En2VkIW7Pkir4Un1jG6+tj/ehuqgFyUPPC +# h6kbnvk48bisi/zPjAVkj7qErr7fSYICCzJ4s4YUNVVHgdoFn2xbW7ZfBT3QA9zf +# hq9u4ExXbrVD5rxXSTFEUg2gzQq9JHxsdHyMfcCKLFQOXODSzcYeLpCd+r6GcoDB +# ToyPdKccjC6mAq6+/hiMDnpvKUIHpyYEzWUeattyKXtMf+QrJeQ+ny5jBL+xqdOO +# PEz3dg7qn8/oprUrUbGLBv9fWm18fWXdAv1PCtLL/acMLtHoyeSVMKQYqDHb3Qm0 +# uQ+NQ0YE4kUxSQa+W/cCzYAI32uN0nb9M4Mr1pj4bJZidNkM4JyYqezohILxYkgH +# bboJQISrQWrm5RYdyhKBpptJ9JJn0Z63LjdnzlOUxjlsAbQir2Wmz/OJE703BbHm +# QZRwzPx1vu7S5zCCB54wggWGoAMCAQICEzMAAAAHh6M0o3uljhwAAAAAAAcwDQYJ +# KoZIhvcNAQEMBQAwdzELMAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBD +# b3Jwb3JhdGlvbjFIMEYGA1UEAxM/TWljcm9zb2Z0IElkZW50aXR5IFZlcmlmaWNh +# dGlvbiBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAyMDIwMB4XDTIxMDQwMTIw +# MDUyMFoXDTM2MDQwMTIwMTUyMFowYzELMAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1p +# Y3Jvc29mdCBDb3Jwb3JhdGlvbjE0MDIGA1UEAxMrTWljcm9zb2Z0IElEIFZlcmlm +# aWVkIENvZGUgU2lnbmluZyBQQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +# ADCCAgoCggIBALLwwK8ZiCji3VR6TElsaQhVCbRS/3pK+MHrJSj3Zxd3KU3rlfL3 +# qrZilYKJNqztA9OQacr1AwoNcHbKBLbsQAhBnIB34zxf52bDpIO3NJlfIaTE/xrw +# eLoQ71lzCHkD7A4As1Bs076Iu+mA6cQzsYYH/Cbl1icwQ6C65rU4V9NQhNUwgrx9 +# rGQ//h890Q8JdjLLw0nV+ayQ2Fbkd242o9kH82RZsH3HEyqjAB5a8+Ae2nPIPc8s +# ZU6ZE7iRrRZywRmrKDp5+TcmJX9MRff241UaOBs4NmHOyke8oU1TYrkxh+YeHgfW +# o5tTgkoSMoayqoDpHOLJs+qG8Tvh8SnifW2Jj3+ii11TS8/FGngEaNAWrbyfNrC6 +# 9oKpRQXY9bGH6jn9NEJv9weFxhTwyvx9OJLXmRGbAUXN1U9nf4lXezky6Uh/cgjk +# Vd6CGUAf0K+Jw+GE/5VpIVbcNr9rNE50Sbmy/4RTCEGvOq3GhjITbCa4crCzTTHg +# YYjHs1NbOc6brH+eKpWLtr+bGecy9CrwQyx7S/BfYJ+ozst7+yZtG2wR461uckFu +# 0t+gCwLdN0A6cFtSRtR8bvxVFyWwTtgMMFRuBa3vmUOTnfKLsLefRaQcVTgRnzeL +# zdpt32cdYKp+dhr2ogc+qM6K4CBI5/j4VFyC4QFeUP2YAidLtvpXRRo3AgMBAAGj +# ggI1MIICMTAOBgNVHQ8BAf8EBAMCAYYwEAYJKwYBBAGCNxUBBAMCAQAwHQYDVR0O +# BBYEFNlBKbAPD2Ns72nX9c0pnqRIajDmMFQGA1UdIARNMEswSQYEVR0gADBBMD8G +# CCsGAQUFBwIBFjNodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL0RvY3Mv +# UmVwb3NpdG9yeS5odG0wGQYJKwYBBAGCNxQCBAweCgBTAHUAYgBDAEEwDwYDVR0T +# AQH/BAUwAwEB/zAfBgNVHSMEGDAWgBTIftJqhSobyhmYBAcnz1AQT2ioojCBhAYD +# VR0fBH0wezB5oHegdYZzaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9j +# cmwvTWljcm9zb2Z0JTIwSWRlbnRpdHklMjBWZXJpZmljYXRpb24lMjBSb290JTIw +# Q2VydGlmaWNhdGUlMjBBdXRob3JpdHklMjAyMDIwLmNybDCBwwYIKwYBBQUHAQEE +# gbYwgbMwgYEGCCsGAQUFBzAChnVodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtp +# b3BzL2NlcnRzL01pY3Jvc29mdCUyMElkZW50aXR5JTIwVmVyaWZpY2F0aW9uJTIw +# Um9vdCUyMENlcnRpZmljYXRlJTIwQXV0aG9yaXR5JTIwMjAyMC5jcnQwLQYIKwYB +# BQUHMAGGIWh0dHA6Ly9vbmVvY3NwLm1pY3Jvc29mdC5jb20vb2NzcDANBgkqhkiG +# 9w0BAQwFAAOCAgEAfyUqnv7Uq+rdZgrbVyNMul5skONbhls5fccPlmIbzi+OwVdP +# Q4H55v7VOInnmezQEeW4LqK0wja+fBznANbXLB0KrdMCbHQpbLvG6UA/Xv2pfpVI +# E1CRFfNF4XKO8XYEa3oW8oVH+KZHgIQRIwAbyFKQ9iyj4aOWeAzwk+f9E5StNp5T +# 8FG7/VEURIVWArbAzPt9ThVN3w1fAZkF7+YU9kbq1bCR2YD+MtunSQ1Rft6XG7b4 +# e0ejRA7mB2IoX5hNh3UEauY0byxNRG+fT2MCEhQl9g2i2fs6VOG19CNep7SquKaB +# jhWmirYyANb0RJSLWjinMLXNOAga10n8i9jqeprzSMU5ODmrMCJE12xS/NWShg/t +# uLjAsKP6SzYZ+1Ry358ZTFcx0FS/mx2vSoU8s8HRvy+rnXqyUJ9HBqS0DErVLjQw +# K8VtsBdekBmdTbQVoCgPCqr+PDPB3xajYnzevs7eidBsM71PINK2BoE2UfMwxCCX +# 3mccFgx6UsQeRSdVVVNSyALQe6PT12418xon2iDGE81OGCreLzDcMAZnrUAx4XQL +# Uz6ZTl65yPUiOh3k7Yww94lDf+8oG2oZmDh5O1Qe38E+M3vhKwmzIeoB1dVLlz4i +# 3IpaDcR+iuGjH2TdaC1ZOmBXiCRKJLj4DT2uhJ04ji+tHD6n58vhavFIrmcxghrm +# MIIa4gIBATBxMFoxCzAJBgNVBAYTAlVTMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29y +# cG9yYXRpb24xKzApBgNVBAMTIk1pY3Jvc29mdCBJRCBWZXJpZmllZCBDUyBFT0Mg +# Q0EgMDICEzMAB+pUev/hD9oPMBwAAAAH6lQwDQYJYIZIAWUDBAIBBQCggbIwGQYJ +# KoZIhvcNAQkDMQwGCisGAQQBgjcCAQQwHAYKKwYBBAGCNwIBCzEOMAwGCisGAQQB +# gjcCARUwLwYJKoZIhvcNAQkEMSIEICpXe3RS3b2coD0CJveEHlglqtPUYZ2FqSrO +# UfP6C6Y4MEYGCisGAQQBgjcCAQwxODA2oDCALgBQAHkAdABoAG8AbgAgADMALgAx +# ADQALgA0ACAAKAAyADMAMQAxADYAZgA5ACmhAoAAMA0GCSqGSIb3DQEBAQUABIIB +# gHaAK9dRSQxQvAiBXu8BOjm/3WL7Hdh4vVPDdI7TVKrNk9GE/8isBY5v3SDISaGV +# VzilkWjgUJX1tp5Wq3Ix9zJVToVG3kaHlNrEjb+cK8oqkMJqIS0GTrS70Xs1UPKk +# PNSzUfi1ddmCW9Up3bmvR7e5SotcgAKysucyRPHmhDZKdC0tM3FdzqbFMs0QV1QL +# gUzdgWEMqRQp7PN9Y3uHeO7/FUUGkEGBHuKq9kGXbnYGwZEazzy6Uxx2Nd47iCsu +# cEiOdpecA13fGE6lnM+uTZGOvshUVQeTIkr5pb2NS+lXF+CEq6uqMntdAfblYzCs +# gTCp8OflGXHVnbo4p8SsrGaBV7KnbzCk7uuEqW3SJiQfMrkQjvjc1cRwQvOVVaPj +# F1Qknn7KmxukU7FEIwyGXPKQ5OG8oUugOTS/5aqSbpmY6HuTbbWH/7MULJlfikOq +# zuteCVjvd18Y6LMk+mK1x9PAtoHmZuMIedP6rHNE8admogur39k1IhRJIcG9S8Kk +# 3aGCGBEwghgNBgorBgEEAYI3AwMBMYIX/TCCF/kGCSqGSIb3DQEHAqCCF+owghfm +# AgEDMQ8wDQYJYIZIAWUDBAIBBQAwggFiBgsqhkiG9w0BCRABBKCCAVEEggFNMIIB +# SQIBAQYKKwYBBAGEWQoDATAxMA0GCWCGSAFlAwQCAQUABCASSpbbgjgopvaqW5Og +# e0ZpH9C7TBmrsY+qwoqXJMywiwIGacJyyM2cGBMyMDI2MDQwNzE2MTkwNy44NjJa +# MASAAgH0oIHhpIHeMIHbMQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3Rv +# bjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0 +# aW9uMSUwIwYDVQQLExxNaWNyb3NvZnQgQW1lcmljYSBPcGVyYXRpb25zMScwJQYD +# VQQLEx5uU2hpZWxkIFRTUyBFU046N0QwMC0wNUUwLUQ5NDcxNTAzBgNVBAMTLE1p +# Y3Jvc29mdCBQdWJsaWMgUlNBIFRpbWUgU3RhbXBpbmcgQXV0aG9yaXR5oIIPITCC +# B4IwggVqoAMCAQICEzMAAAAF5c8P/2YuyYcAAAAAAAUwDQYJKoZIhvcNAQEMBQAw +# dzELMAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjFI +# MEYGA1UEAxM/TWljcm9zb2Z0IElkZW50aXR5IFZlcmlmaWNhdGlvbiBSb290IENl +# cnRpZmljYXRlIEF1dGhvcml0eSAyMDIwMB4XDTIwMTExOTIwMzIzMVoXDTM1MTEx +# OTIwNDIzMVowYTELMAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jw +# b3JhdGlvbjEyMDAGA1UEAxMpTWljcm9zb2Z0IFB1YmxpYyBSU0EgVGltZXN0YW1w +# aW5nIENBIDIwMjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCefOdS +# Y/3gxZ8FfWO1BiKjHB7X55cz0RMFvWVGR3eRwV1wb3+yq0OXDEqhUhxqoNv6iYWK +# jkMcLhEFxvJAeNcLAyT+XdM5i2CgGPGcb95WJLiw7HzLiBKrxmDj1EQB/mG5eEiR +# BEp7dDGzxKCnTYocDOcRr9KxqHydajmEkzXHOeRGwU+7qt8Md5l4bVZrXAhK+WSk +# 5CihNQsWbzT1nRliVDwunuLkX1hyIWXIArCfrKM3+RHh+Sq5RZ8aYyik2r8HxT+l +# 2hmRllBvE2Wok6IEaAJanHr24qoqFM9WLeBUSudz+qL51HwDYyIDPSQ3SeHtKog0 +# ZubDk4hELQSxnfVYXdTGncaBnB60QrEuazvcob9n4yR65pUNBCF5qeA4QwYnilBk +# fnmeAjRN3LVuLr0g0FXkqfYdUmj1fFFhH8k8YBozrEaXnsSL3kdTD01X+4LfIWOu +# FzTzuoslBrBILfHNj8RfOxPgjuwNvE6YzauXi4orp4Sm6tF245DaFOSYbWFK5ZgG +# 6cUY2/bUq3g3bQAqZt65KcaewEJ3ZyNEobv35Nf6xN6FrA6jF9447+NHvCjeWLCQ +# Z3M8lgeCcnnhTFtyQX3XgCoc6IRXvFOcPVrr3D9RPHCMS6Ckg8wggTrtIVnY8yjb +# vGOUsAdZbeXUIQAWMs0d3cRDv09SvwVRd61evQIDAQABo4ICGzCCAhcwDgYDVR0P +# AQH/BAQDAgGGMBAGCSsGAQQBgjcVAQQDAgEAMB0GA1UdDgQWBBRraSg6NS9IY0DP +# e9ivSek+2T3bITBUBgNVHSAETTBLMEkGBFUdIAAwQTA/BggrBgEFBQcCARYzaHR0 +# cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9Eb2NzL1JlcG9zaXRvcnkuaHRt +# MBMGA1UdJQQMMAoGCCsGAQUFBwMIMBkGCSsGAQQBgjcUAgQMHgoAUwB1AGIAQwBB +# MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUyH7SaoUqG8oZmAQHJ89QEE9o +# qKIwgYQGA1UdHwR9MHsweaB3oHWGc2h0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9w +# a2lvcHMvY3JsL01pY3Jvc29mdCUyMElkZW50aXR5JTIwVmVyaWZpY2F0aW9uJTIw +# Um9vdCUyMENlcnRpZmljYXRlJTIwQXV0aG9yaXR5JTIwMjAyMC5jcmwwgZQGCCsG +# AQUFBwEBBIGHMIGEMIGBBggrBgEFBQcwAoZ1aHR0cDovL3d3dy5taWNyb3NvZnQu +# Y29tL3BraW9wcy9jZXJ0cy9NaWNyb3NvZnQlMjBJZGVudGl0eSUyMFZlcmlmaWNh +# dGlvbiUyMFJvb3QlMjBDZXJ0aWZpY2F0ZSUyMEF1dGhvcml0eSUyMDIwMjAuY3J0 +# MA0GCSqGSIb3DQEBDAUAA4ICAQBfiHbHfm21WhV150x4aPpO4dhEmSUVpbixNDmv +# 6TvuIHv1xIs174bNGO/ilWMm+Jx5boAXrJxagRhHQtiFprSjMktTliL4sKZyt2i+ +# SXncM23gRezzsoOiBhv14YSd1Klnlkzvgs29XNjT+c8hIfPRe9rvVCMPiH7zPZcw +# 5nNjthDQ+zD563I1nUJ6y59TbXWsuyUsqw7wXZoGzZwijWT5oc6GvD3HDokJY401 +# uhnj3ubBhbkR83RbfMvmzdp3he2bvIUztSOuFzRqrLfEvsPkVHYnvH1wtYyrt5vS +# hiKheGpXa2AWpsod4OJyT4/y0dggWi8g/tgbhmQlZqDUf3UqUQsZaLdIu/XSjgoZ +# qDjamzCPJtOLi2hBwL+KsCh0Nbwc21f5xvPSwym0Ukr4o5sCcMUcSy6TEP7uMV8R +# X0eH/4JLEpGyae6Ki8JYg5v4fsNGif1OXHJ2IWG+7zyjTDfkmQ1snFOTgyEX8qBp +# efQbF0fx6URrYiarjmBprwP6ZObwtZXJ23jK3Fg/9uqM3j0P01nzVygTppBabzxP +# Ah/hHhhls6kwo3QLJ6No803jUsZcd4JQxiYHHc+Q/wAMcPUnYKv/q2O444LO1+n6 +# j01z5mggCSlRwD9faBIySAcA9S8h22hIAcRQqIGEjolCK9F6nK9ZyX4lhthsGHum +# aABdWzCCB5cwggV/oAMCAQICEzMAAABV2d1pJij5+OIAAAAAAFUwDQYJKoZIhvcN +# AQEMBQAwYTELMAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3Jh +# dGlvbjEyMDAGA1UEAxMpTWljcm9zb2Z0IFB1YmxpYyBSU0EgVGltZXN0YW1waW5n +# IENBIDIwMjAwHhcNMjUxMDIzMjA0NjQ5WhcNMjYxMDIyMjA0NjQ5WjCB2zELMAkG +# A1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQx +# HjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjElMCMGA1UECxMcTWljcm9z +# b2Z0IEFtZXJpY2EgT3BlcmF0aW9uczEnMCUGA1UECxMeblNoaWVsZCBUU1MgRVNO +# OjdEMDAtMDVFMC1EOTQ3MTUwMwYDVQQDEyxNaWNyb3NvZnQgUHVibGljIFJTQSBU +# aW1lIFN0YW1waW5nIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCC +# AgoCggIBAL25H5IeWUiz9DAlFmn2sPymaFWbvYkMfK+ScIWb3a1IvOlIwghUDjY0 +# Gp6yMRhfYURiGS0GedIB6ywvuH6VBCX3+bdOFcAclgtv21jrpOjZmk4fSaT2Q3Bs +# zUfeUJa8o3xI7ZfoMY9dszTxHQAz6ZVX87fHGEVhQcfxW33IdPJOj/ae419qtYxT +# 21MVmCfsTshgtWioQxmOW/vMC9/b+qgtBxSMf798vm3qfmhF6KCvFaHlivrM32hY +# 16PGE3L0PFC+LM7vRxU7mTb+r76CeybvqOWk4+dbKYftPhV1t/E5S/6wwXeYmu/Y +# 7JC7Tnh2w45G5Y4pcM3oHMb/YuPRdOWa0v+RC2QgmNVWqjuxDiylWscXQDuaMtb2 +# 9AcdGUVV9ZsRY2M2sthAtOdZOshiR5ufMtaHtiCkWv0jNfgUxrHurxzYuUNneWZ6 +# EfQDgFAw8CSCKkSOK2c9jEop4ddVq10xvbqxdrqMneVXvvIcXrPQAXj9j2ECpV2E +# wMb3Wnmpw00P78JpzPsk3Fs61ZvOGd/F1RcOBu6f2TWdp7HL7+rq7tgHr13Mldbf +# IWu4lpoYYE1gTQa1Yrg5XN4j7zs9klT2z3qocmPzV8DWQgIHNh+aTs7bujMEMQyI +# 7Xt1zPxZCgcR6H0tmmzU/9BxvsWbRalCQ2sYGyWupTdc4e7KY7kPAgMBAAGjggHL +# MIIBxzAdBgNVHQ4EFgQUVgRfEG3cCAPwyL+pyRbKwdesZbYwHwYDVR0jBBgwFoAU +# a2koOjUvSGNAz3vYr0npPtk92yEwbAYDVR0fBGUwYzBhoF+gXYZbaHR0cDovL3d3 +# dy5taWNyb3NvZnQuY29tL3BraW9wcy9jcmwvTWljcm9zb2Z0JTIwUHVibGljJTIw +# UlNBJTIwVGltZXN0YW1waW5nJTIwQ0ElMjAyMDIwLmNybDB5BggrBgEFBQcBAQRt +# MGswaQYIKwYBBQUHMAKGXWh0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9wa2lvcHMv +# Y2VydHMvTWljcm9zb2Z0JTIwUHVibGljJTIwUlNBJTIwVGltZXN0YW1waW5nJTIw +# Q0ElMjAyMDIwLmNydDAMBgNVHRMBAf8EAjAAMBYGA1UdJQEB/wQMMAoGCCsGAQUF +# BwMIMA4GA1UdDwEB/wQEAwIHgDBmBgNVHSAEXzBdMFEGDCsGAQQBgjdMg30BATBB +# MD8GCCsGAQUFBwIBFjNodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL0Rv +# Y3MvUmVwb3NpdG9yeS5odG0wCAYGZ4EMAQQCMA0GCSqGSIb3DQEBDAUAA4ICAQBS +# HuGSVHvalCnFnlsqXIQefH1xP2SFr9g+Vz+f5P7QeywjfQb5jUlSmd1XnJUDPe/M +# HxL7r3TEElL+mNtG6CDPAytStSFPXD9tTBtBMYh8Wqo64pH9qm361yIqeBH979mz +# WCkMQsTd0nM6dUl9B+7qiti+ToXwxIl39eYqLuYYfhD2mqqePXMzUKSQzkf73yYI +# VHP6nLJQz4aAmaWcfG9jg78sBkDV8KpW7JgktuLhphJEN1B+SVHjenPdcmrFXIUu +# /K4jK5ukfWaQIjuaXzSjBlNjC5tQN6adPfA3GxUwHPeR4ekL5If/9vBf13tmzBW+ +# gy+0sNGTveb9IL9GU8iX8UvywsX62nhCCPRUhTigDBKdczRUrNrntBhowbfchBDF +# ML8avRMRc9Gmc2JvIryX336SFQ51//q1UU2HMSJEMhWLJSIWJVhfUowsOa+PampI +# zETYfFvTu2mqKJUlWZXkGYxrdCvCczJcqeoadpW1ul6kcdnDh228SQ8ZhDc6IRlM +# 4iNd5SNoNgX+aom3wuGyjUaSaPZWxPB1G2NKiYhPLt0lPHg0Gskj1zhISY8UQkMM +# Dr3o2JgRuT+wnJEDQUp55ddvhSkSoD6I9DL/s+TjIY/c9jLaW5xywJHqdKHUApRM +# sghv7kebSua1upmR+TquelFktDSOjVdSRkuya4uoxTGCB0Mwggc/AgEBMHgwYTEL +# MAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEyMDAG +# A1UEAxMpTWljcm9zb2Z0IFB1YmxpYyBSU0EgVGltZXN0YW1waW5nIENBIDIwMjAC +# EzMAAABV2d1pJij5+OIAAAAAAFUwDQYJYIZIAWUDBAIBBQCgggScMBEGCyqGSIb3 +# DQEJEAIPMQIFADAaBgkqhkiG9w0BCQMxDQYLKoZIhvcNAQkQAQQwHAYJKoZIhvcN +# AQkFMQ8XDTI2MDQwNzE2MTkwN1owLwYJKoZIhvcNAQkEMSIEIO1tbnR5rJvq+GXf +# 4bgeV3HDcP+8Ud48R8sLBQBLI+rBMIG5BgsqhkiG9w0BCRACLzGBqTCBpjCBozCB +# oAQg2Lk8l2SGYru/ff7+D2qrJnkswcYdK6pGKu7GGGr4/s0wfDBlpGMwYTELMAkG +# A1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEyMDAGA1UE +# AxMpTWljcm9zb2Z0IFB1YmxpYyBSU0EgVGltZXN0YW1waW5nIENBIDIwMjACEzMA +# AABV2d1pJij5+OIAAAAAAFUwggNeBgsqhkiG9w0BCRACEjGCA00wggNJoYIDRTCC +# A0EwggIpAgEBMIIBCaGB4aSB3jCB2zELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldh +# c2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBD +# b3Jwb3JhdGlvbjElMCMGA1UECxMcTWljcm9zb2Z0IEFtZXJpY2EgT3BlcmF0aW9u +# czEnMCUGA1UECxMeblNoaWVsZCBUU1MgRVNOOjdEMDAtMDVFMC1EOTQ3MTUwMwYD +# VQQDEyxNaWNyb3NvZnQgUHVibGljIFJTQSBUaW1lIFN0YW1waW5nIEF1dGhvcml0 +# eaIjCgEBMAcGBSsOAwIaAxUAHTtUAYJlv7bgWVeRBo4X7FeHDeqgZzBlpGMwYTEL +# MAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEyMDAG +# A1UEAxMpTWljcm9zb2Z0IFB1YmxpYyBSU0EgVGltZXN0YW1waW5nIENBIDIwMjAw +# DQYJKoZIhvcNAQELBQACBQDtf2YdMCIYDzIwMjYwNDA3MTExNjQ1WhgPMjAyNjA0 +# MDgxMTE2NDVaMHQwOgYKKwYBBAGEWQoEATEsMCowCgIFAO1/Zh0CAQAwBwIBAAIC +# Ew4wBwIBAAICHEcwCgIFAO2At50CAQAwNgYKKwYBBAGEWQoEAjEoMCYwDAYKKwYB +# BAGEWQoDAqAKMAgCAQACAwehIKEKMAgCAQACAwGGoDANBgkqhkiG9w0BAQsFAAOC +# AQEAj/DYTzrEaV8hMcUsbVdSBihFoM0FgJoy99LL0e/ztow4UwgL27kPtwlbyVfW +# Onw+tLBT2sbsS4yIj+QnTUoHeqNA/Ue9FPkPH801qthzRDZ4UTwz09KgVfKchw0Y +# jGTSpL8GQL6cAYgFuf/Zh0qEVwYW4OxiyKEMKpzYdqoNwRx9LrCgE0km4WZ3bDBw +# VFCKcd176gCpV5Yle8MbDWFizSUrylp7v7q/Knx0BrWs8Q17RUHnYAyehZCD0d0H +# zXEClh+sKxScyk8TjI2o7MKau8YRHzeLDN9wj24hJ9URkNThjDhY3ruNMvves5xS +# fKmhpIqWsb5ZvZW4hmbqdxwWbDANBgkqhkiG9w0BAQEFAASCAgA6C6+LOeV/wer0 +# SB0XI0QO89oeTrCbxFNxmKegd8rz7ERBKLcxLh9WFOfDf4t5oi0RlPP8cFwByVIb +# TIgtggkIq5uN6/qJ9+iF+w7a4123dTuQSAi35d7VMC/q8rtHL6ev5zeKeYVnov5C +# JqmlUyVL5NPRxrUaWeXNXOmOMZwmNTqZspf/eadKTiBwDVJZ4DLXkrZNoR3Gd7Wq +# w6N/gt5MDhL26NU2xnysk2pXRdTna4Fh545XD2YGqZAJdJ6nzv/uSQBDnAL7uOV1 +# U5I+hEBgbCMLAkNzTJUprQFaz1X9taVTbYQOE8IGkseOg1nw8kDc5nYga3kNZQ/k +# u4gbx3opZuxUL43teYm17AJwHphWgQojwdvi+OozR0bS27xVK5O5Y+5yReg4U6DG +# XjuaFzxxA8efdy7DDETfFWhO6fSfREWcf+Nb9uA7i+qPOJvK4ls5p3yTwJgG1KUr +# UNFhIw5LZZxn+yAArVY712VcCDsdBQiJ3ZbS90/AXjtsgFP/Cr99YhoKi7WNOtJ9 +# CHXI8Y5RMy3AAGBXmSJ2f1bEVX7Ya1Wdi7UlwgYbchhfL25XRKonczGNxAzymO6t +# MCAweDvuELomErT+70YfQXd1kye4j5KiCQXYtpedFgb9sDqGv5j7c5ACHVyAsvPA +# Wx3T3b6yi2pK9C6xC8g5cmjf8c7Rnw== +# SIG # End signature block diff --git a/Python313_13_x64_Template/Lib/venv/scripts/common/activate b/Python314_4_x64_Template/Lib/venv/scripts/common/activate similarity index 100% rename from Python313_13_x64_Template/Lib/venv/scripts/common/activate rename to Python314_4_x64_Template/Lib/venv/scripts/common/activate diff --git a/Python313_13_x64_Template/Lib/venv/scripts/common/activate.fish b/Python314_4_x64_Template/Lib/venv/scripts/common/activate.fish similarity index 100% rename from Python313_13_x64_Template/Lib/venv/scripts/common/activate.fish rename to Python314_4_x64_Template/Lib/venv/scripts/common/activate.fish diff --git a/Python313_13_x64_Template/Lib/venv/scripts/nt/activate.bat b/Python314_4_x64_Template/Lib/venv/scripts/nt/activate.bat similarity index 100% rename from Python313_13_x64_Template/Lib/venv/scripts/nt/activate.bat rename to Python314_4_x64_Template/Lib/venv/scripts/nt/activate.bat diff --git a/Python313_13_x64_Template/Lib/venv/scripts/nt/deactivate.bat b/Python314_4_x64_Template/Lib/venv/scripts/nt/deactivate.bat similarity index 100% rename from Python313_13_x64_Template/Lib/venv/scripts/nt/deactivate.bat rename to Python314_4_x64_Template/Lib/venv/scripts/nt/deactivate.bat diff --git a/Python314_4_x64_Template/Lib/venv/scripts/nt/venvlauncher.exe b/Python314_4_x64_Template/Lib/venv/scripts/nt/venvlauncher.exe new file mode 100644 index 00000000..cb8c9dc7 Binary files /dev/null and b/Python314_4_x64_Template/Lib/venv/scripts/nt/venvlauncher.exe differ diff --git a/Python314_4_x64_Template/Lib/venv/scripts/nt/venvwlauncher.exe b/Python314_4_x64_Template/Lib/venv/scripts/nt/venvwlauncher.exe new file mode 100644 index 00000000..02cd7f73 Binary files /dev/null and b/Python314_4_x64_Template/Lib/venv/scripts/nt/venvwlauncher.exe differ diff --git a/Python313_13_x64_Template/Lib/venv/scripts/posix/activate.csh b/Python314_4_x64_Template/Lib/venv/scripts/posix/activate.csh similarity index 100% rename from Python313_13_x64_Template/Lib/venv/scripts/posix/activate.csh rename to Python314_4_x64_Template/Lib/venv/scripts/posix/activate.csh diff --git a/Python314_4_x64_Template/Lib/warnings.py b/Python314_4_x64_Template/Lib/warnings.py new file mode 100644 index 00000000..6759857d --- /dev/null +++ b/Python314_4_x64_Template/Lib/warnings.py @@ -0,0 +1,99 @@ +import sys + +__all__ = [ + "warn", + "warn_explicit", + "showwarning", + "formatwarning", + "filterwarnings", + "simplefilter", + "resetwarnings", + "catch_warnings", + "deprecated", +] + +from _py_warnings import ( + WarningMessage, + _DEPRECATED_MSG, + _OptionError, + _add_filter, + _deprecated, + _filters_mutated, + _filters_mutated_lock_held, + _filters_version, + _formatwarning_orig, + _formatwarnmsg, + _formatwarnmsg_impl, + _get_context, + _get_filters, + _getaction, + _getcategory, + _is_filename_to_skip, + _is_internal_filename, + _is_internal_frame, + _lock, + _new_context, + _next_external_frame, + _processoptions, + _set_context, + _set_module, + _setoption, + _setup_defaults, + _showwarning_orig, + _showwarnmsg, + _showwarnmsg_impl, + _use_context, + _warn_unawaited_coroutine, + _warnings_context, + catch_warnings, + defaultaction, + deprecated, + filters, + filterwarnings, + formatwarning, + onceregistry, + resetwarnings, + showwarning, + simplefilter, + warn, + warn_explicit, +) + +try: + # Try to use the C extension, this will replace some parts of the + # _py_warnings implementation imported above. + from _warnings import ( + _acquire_lock, + _defaultaction as defaultaction, + _filters_mutated_lock_held, + _onceregistry as onceregistry, + _release_lock, + _warnings_context, + filters, + warn, + warn_explicit, + ) + + _warnings_defaults = True + + class _Lock: + def __enter__(self): + _acquire_lock() + return self + + def __exit__(self, *args): + _release_lock() + + _lock = _Lock() +except ImportError: + _warnings_defaults = False + + +# Module initialization +_set_module(sys.modules[__name__]) +_processoptions(sys.warnoptions) +if not _warnings_defaults: + _setup_defaults() + +del _warnings_defaults +del _setup_defaults diff --git a/Python313_13_x64_Template/Lib/wave.py b/Python314_4_x64_Template/Lib/wave.py similarity index 100% rename from Python313_13_x64_Template/Lib/wave.py rename to Python314_4_x64_Template/Lib/wave.py diff --git a/Python314_4_x64_Template/Lib/weakref.py b/Python314_4_x64_Template/Lib/weakref.py new file mode 100644 index 00000000..94e42781 --- /dev/null +++ b/Python314_4_x64_Template/Lib/weakref.py @@ -0,0 +1,574 @@ +"""Weak reference support for Python. + +This module is an implementation of PEP 205: + +https://peps.python.org/pep-0205/ +""" + +# Naming convention: Variables named "wr" are weak reference objects; +# they are called this instead of "ref" to avoid name collisions with +# the module-global ref() function imported from _weakref. + +from _weakref import ( + getweakrefcount, + getweakrefs, + ref, + proxy, + CallableProxyType, + ProxyType, + ReferenceType, + _remove_dead_weakref) + +from _weakrefset import WeakSet + +import _collections_abc # Import after _weakref to avoid circular import. +import sys +import itertools + +ProxyTypes = (ProxyType, CallableProxyType) + +__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs", + "WeakKeyDictionary", "ReferenceType", "ProxyType", + "CallableProxyType", "ProxyTypes", "WeakValueDictionary", + "WeakSet", "WeakMethod", "finalize"] + + +_collections_abc.MutableSet.register(WeakSet) + +class WeakMethod(ref): + """ + A custom `weakref.ref` subclass which simulates a weak reference to + a bound method, working around the lifetime problem of bound methods. + """ + + __slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__" + + def __new__(cls, meth, callback=None): + try: + obj = meth.__self__ + func = meth.__func__ + except AttributeError: + raise TypeError("argument should be a bound method, not {}" + .format(type(meth))) from None + def _cb(arg): + # The self-weakref trick is needed to avoid creating a reference + # cycle. + self = self_wr() + if self._alive: + self._alive = False + if callback is not None: + callback(self) + self = ref.__new__(cls, obj, _cb) + self._func_ref = ref(func, _cb) + self._meth_type = type(meth) + self._alive = True + self_wr = ref(self) + return self + + def __call__(self): + obj = super().__call__() + func = self._func_ref() + if obj is None or func is None: + return None + return self._meth_type(func, obj) + + def __eq__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is other + return ref.__eq__(self, other) and self._func_ref == other._func_ref + return NotImplemented + + def __ne__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is not other + return ref.__ne__(self, other) or self._func_ref != other._func_ref + return NotImplemented + + __hash__ = ref.__hash__ + + +class WeakValueDictionary(_collections_abc.MutableMapping): + """Mapping class that references values weakly. + + Entries in the dictionary will be discarded when no strong + reference to the value exists anymore + """ + # We inherit the constructor without worrying about the input + # dictionary; since it uses our .update() method, we get the right + # checks (if the other dictionary is a WeakValueDictionary, + # objects are unwrapped on the way out, and we always wrap on the + # way in). + + def __init__(self, other=(), /, **kw): + def remove(wr, selfref=ref(self), _atomic_removal=_remove_dead_weakref): + self = selfref() + if self is not None: + # Atomic removal is necessary since this function + # can be called asynchronously by the GC + _atomic_removal(self.data, wr.key) + self._remove = remove + self.data = {} + self.update(other, **kw) + + def __getitem__(self, key): + o = self.data[key]() + if o is None: + raise KeyError(key) + else: + return o + + def __delitem__(self, key): + del self.data[key] + + def __len__(self): + return len(self.data) + + def __contains__(self, key): + try: + o = self.data[key]() + except KeyError: + return False + return o is not None + + def __repr__(self): + return "<%s at %#x>" % (self.__class__.__name__, id(self)) + + def __setitem__(self, key, value): + self.data[key] = KeyedRef(value, self._remove, key) + + def copy(self): + new = WeakValueDictionary() + for key, wr in self.data.copy().items(): + o = wr() + if o is not None: + new[key] = o + return new + + __copy__ = copy + + def __deepcopy__(self, memo): + from copy import deepcopy + new = self.__class__() + for key, wr in self.data.copy().items(): + o = wr() + if o is not None: + new[deepcopy(key, memo)] = o + return new + + def get(self, key, default=None): + try: + wr = self.data[key] + except KeyError: + return default + else: + o = wr() + if o is None: + # This should only happen + return default + else: + return o + + def items(self): + for k, wr in self.data.copy().items(): + v = wr() + if v is not None: + yield k, v + + def keys(self): + for k, wr in self.data.copy().items(): + if wr() is not None: + yield k + + __iter__ = keys + + def itervaluerefs(self): + """Return an iterator that yields the weak references to the values. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the values around longer than needed. + + """ + yield from self.data.copy().values() + + def values(self): + for wr in self.data.copy().values(): + obj = wr() + if obj is not None: + yield obj + + def popitem(self): + while True: + key, wr = self.data.popitem() + o = wr() + if o is not None: + return key, o + + def pop(self, key, *args): + try: + o = self.data.pop(key)() + except KeyError: + o = None + if o is None: + if args: + return args[0] + else: + raise KeyError(key) + else: + return o + + def setdefault(self, key, default=None): + try: + o = self.data[key]() + except KeyError: + o = None + if o is None: + self.data[key] = KeyedRef(default, self._remove, key) + return default + else: + return o + + def update(self, other=None, /, **kwargs): + d = self.data + if other is not None: + if not hasattr(other, "items"): + other = dict(other) + for key, o in other.items(): + d[key] = KeyedRef(o, self._remove, key) + for key, o in kwargs.items(): + d[key] = KeyedRef(o, self._remove, key) + + def valuerefs(self): + """Return a list of weak references to the values. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the values around longer than needed. + + """ + return list(self.data.copy().values()) + + def __ior__(self, other): + self.update(other) + return self + + def __or__(self, other): + if isinstance(other, _collections_abc.Mapping): + c = self.copy() + c.update(other) + return c + return NotImplemented + + def __ror__(self, other): + if isinstance(other, _collections_abc.Mapping): + c = self.__class__() + c.update(other) + c.update(self) + return c + return NotImplemented + + +class KeyedRef(ref): + """Specialized reference that includes a key corresponding to the value. + + This is used in the WeakValueDictionary to avoid having to create + a function object for each key stored in the mapping. A shared + callback object can use the 'key' attribute of a KeyedRef instead + of getting a reference to the key from an enclosing scope. + + """ + + __slots__ = "key", + + def __new__(type, ob, callback, key): + self = ref.__new__(type, ob, callback) + self.key = key + return self + + def __init__(self, ob, callback, key): + super().__init__(ob, callback) + + +class WeakKeyDictionary(_collections_abc.MutableMapping): + """ Mapping class that references keys weakly. + + Entries in the dictionary will be discarded when there is no + longer a strong reference to the key. This can be used to + associate additional data with an object owned by other parts of + an application without adding attributes to those objects. This + can be especially useful with objects that override attribute + accesses. + """ + + def __init__(self, dict=None): + self.data = {} + def remove(k, selfref=ref(self)): + self = selfref() + if self is not None: + try: + del self.data[k] + except KeyError: + pass + self._remove = remove + if dict is not None: + self.update(dict) + + def __delitem__(self, key): + del self.data[ref(key)] + + def __getitem__(self, key): + return self.data[ref(key)] + + def __len__(self): + return len(self.data) + + def __repr__(self): + return "<%s at %#x>" % (self.__class__.__name__, id(self)) + + def __setitem__(self, key, value): + self.data[ref(key, self._remove)] = value + + def copy(self): + new = WeakKeyDictionary() + for key, value in self.data.copy().items(): + o = key() + if o is not None: + new[o] = value + return new + + __copy__ = copy + + def __deepcopy__(self, memo): + from copy import deepcopy + new = self.__class__() + for key, value in self.data.copy().items(): + o = key() + if o is not None: + new[o] = deepcopy(value, memo) + return new + + def get(self, key, default=None): + return self.data.get(ref(key),default) + + def __contains__(self, key): + try: + wr = ref(key) + except TypeError: + return False + return wr in self.data + + def items(self): + for wr, value in self.data.copy().items(): + key = wr() + if key is not None: + yield key, value + + def keys(self): + for wr in self.data.copy(): + obj = wr() + if obj is not None: + yield obj + + __iter__ = keys + + def values(self): + for wr, value in self.data.copy().items(): + if wr() is not None: + yield value + + def keyrefs(self): + """Return a list of weak references to the keys. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the keys around longer than needed. + + """ + return list(self.data) + + def popitem(self): + while True: + key, value = self.data.popitem() + o = key() + if o is not None: + return o, value + + def pop(self, key, *args): + return self.data.pop(ref(key), *args) + + def setdefault(self, key, default=None): + return self.data.setdefault(ref(key, self._remove),default) + + def update(self, dict=None, /, **kwargs): + d = self.data + if dict is not None: + if not hasattr(dict, "items"): + dict = type({})(dict) + for key, value in dict.items(): + d[ref(key, self._remove)] = value + if len(kwargs): + self.update(kwargs) + + def __ior__(self, other): + self.update(other) + return self + + def __or__(self, other): + if isinstance(other, _collections_abc.Mapping): + c = self.copy() + c.update(other) + return c + return NotImplemented + + def __ror__(self, other): + if isinstance(other, _collections_abc.Mapping): + c = self.__class__() + c.update(other) + c.update(self) + return c + return NotImplemented + + +class finalize: + """Class for finalization of weakrefable objects + + finalize(obj, func, *args, **kwargs) returns a callable finalizer + object which will be called when obj is garbage collected. The + first time the finalizer is called it evaluates func(*arg, **kwargs) + and returns the result. After this the finalizer is dead, and + calling it just returns None. + + When the program exits any remaining finalizers for which the + atexit attribute is true will be run in reverse order of creation. + By default atexit is true. + """ + + # Finalizer objects don't have any state of their own. They are + # just used as keys to lookup _Info objects in the registry. This + # ensures that they cannot be part of a ref-cycle. + + __slots__ = () + _registry = {} + _shutdown = False + _index_iter = itertools.count() + _dirty = False + _registered_with_atexit = False + + class _Info: + __slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index") + + def __init__(self, obj, func, /, *args, **kwargs): + if not self._registered_with_atexit: + # We may register the exit function more than once because + # of a thread race, but that is harmless + import atexit + atexit.register(self._exitfunc) + finalize._registered_with_atexit = True + info = self._Info() + info.weakref = ref(obj, self) + info.func = func + info.args = args + info.kwargs = kwargs or None + info.atexit = True + info.index = next(self._index_iter) + self._registry[self] = info + finalize._dirty = True + + def __call__(self, _=None): + """If alive then mark as dead and return func(*args, **kwargs); + otherwise return None""" + info = self._registry.pop(self, None) + if info and not self._shutdown: + return info.func(*info.args, **(info.kwargs or {})) + + def detach(self): + """If alive then mark as dead and return (obj, func, args, kwargs); + otherwise return None""" + info = self._registry.get(self) + obj = info and info.weakref() + if obj is not None and self._registry.pop(self, None): + return (obj, info.func, info.args, info.kwargs or {}) + + def peek(self): + """If alive then return (obj, func, args, kwargs); + otherwise return None""" + info = self._registry.get(self) + obj = info and info.weakref() + if obj is not None: + return (obj, info.func, info.args, info.kwargs or {}) + + @property + def alive(self): + """Whether finalizer is alive""" + return self in self._registry + + @property + def atexit(self): + """Whether finalizer should be called at exit""" + info = self._registry.get(self) + return bool(info) and info.atexit + + @atexit.setter + def atexit(self, value): + info = self._registry.get(self) + if info: + info.atexit = bool(value) + + def __repr__(self): + info = self._registry.get(self) + obj = info and info.weakref() + if obj is None: + return '<%s object at %#x; dead>' % (type(self).__name__, id(self)) + else: + return '<%s object at %#x; for %r at %#x>' % \ + (type(self).__name__, id(self), type(obj).__name__, id(obj)) + + @classmethod + def _select_for_exit(cls): + # Return live finalizers marked for exit, oldest first + L = [(f,i) for (f,i) in cls._registry.items() if i.atexit] + L.sort(key=lambda item:item[1].index) + return [f for (f,i) in L] + + @classmethod + def _exitfunc(cls): + # At shutdown invoke finalizers for which atexit is true. + # This is called once all other non-daemonic threads have been + # joined. + reenable_gc = False + try: + if cls._registry: + import gc + if gc.isenabled(): + reenable_gc = True + gc.disable() + pending = None + while True: + if pending is None or finalize._dirty: + pending = cls._select_for_exit() + finalize._dirty = False + if not pending: + break + f = pending.pop() + try: + # gc is disabled, so (assuming no daemonic + # threads) the following is the only line in + # this function which might trigger creation + # of a new finalizer + f() + except Exception: + sys.excepthook(*sys.exc_info()) + assert f not in cls._registry + finally: + # prevent any more finalizers from executing during shutdown + finalize._shutdown = True + if reenable_gc: + gc.enable() diff --git a/Python314_4_x64_Template/Lib/webbrowser.py b/Python314_4_x64_Template/Lib/webbrowser.py new file mode 100644 index 00000000..0e0b5034 --- /dev/null +++ b/Python314_4_x64_Template/Lib/webbrowser.py @@ -0,0 +1,762 @@ +"""Interfaces for launching and remotely controlling web browsers.""" +# Maintained by Georg Brandl. + +import os +import shlex +import shutil +import sys +import subprocess +import threading + +__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"] + + +class Error(Exception): + pass + + +_lock = threading.RLock() +_browsers = {} # Dictionary of available browser controllers +_tryorder = None # Preference order of available browsers +_os_preferred_browser = None # The preferred browser + + +def register(name, klass, instance=None, *, preferred=False): + """Register a browser connector.""" + with _lock: + if _tryorder is None: + register_standard_browsers() + _browsers[name.lower()] = [klass, instance] + + # Preferred browsers go to the front of the list. + # Need to match to the default browser returned by xdg-settings, which + # may be of the form e.g. "firefox.desktop". + if preferred or (_os_preferred_browser and f'{name}.desktop' == _os_preferred_browser): + _tryorder.insert(0, name) + else: + _tryorder.append(name) + + +def get(using=None): + """Return a browser launcher instance appropriate for the environment.""" + if _tryorder is None: + with _lock: + if _tryorder is None: + register_standard_browsers() + if using is not None: + alternatives = [using] + else: + alternatives = _tryorder + for browser in alternatives: + if '%s' in browser: + # User gave us a command line, split it into name and args + browser = shlex.split(browser) + if browser[-1] == '&': + return BackgroundBrowser(browser[:-1]) + else: + return GenericBrowser(browser) + else: + # User gave us a browser name or path. + try: + command = _browsers[browser.lower()] + except KeyError: + command = _synthesize(browser) + if command[1] is not None: + return command[1] + elif command[0] is not None: + return command[0]() + raise Error("could not locate runnable browser") + + +# Please note: the following definition hides a builtin function. +# It is recommended one does "import webbrowser" and uses webbrowser.open(url) +# instead of "from webbrowser import *". + +def open(url, new=0, autoraise=True): + """Display url using the default browser. + + If possible, open url in a location determined by new. + - 0: the same browser window (the default). + - 1: a new browser window. + - 2: a new browser page ("tab"). + If possible, autoraise raises the window (the default) or not. + + If opening the browser succeeds, return True. + If there is a problem, return False. + """ + if _tryorder is None: + with _lock: + if _tryorder is None: + register_standard_browsers() + for name in _tryorder: + browser = get(name) + if browser.open(url, new, autoraise): + return True + return False + + +def open_new(url): + """Open url in a new window of the default browser. + + If not possible, then open url in the only browser window. + """ + return open(url, 1) + + +def open_new_tab(url): + """Open url in a new page ("tab") of the default browser. + + If not possible, then the behavior becomes equivalent to open_new(). + """ + return open(url, 2) + + +def _synthesize(browser, *, preferred=False): + """Attempt to synthesize a controller based on existing controllers. + + This is useful to create a controller when a user specifies a path to + an entry in the BROWSER environment variable -- we can copy a general + controller to operate using a specific installation of the desired + browser in this way. + + If we can't create a controller in this way, or if there is no + executable for the requested browser, return [None, None]. + + """ + cmd = browser.split()[0] + if not shutil.which(cmd): + return [None, None] + name = os.path.basename(cmd) + try: + command = _browsers[name.lower()] + except KeyError: + return [None, None] + # now attempt to clone to fit the new name: + controller = command[1] + if controller and name.lower() == controller.basename: + import copy + controller = copy.copy(controller) + controller.name = browser + controller.basename = os.path.basename(browser) + register(browser, None, instance=controller, preferred=preferred) + return [None, controller] + return [None, None] + + +# General parent classes + +class BaseBrowser: + """Parent class for all browsers. Do not use directly.""" + + args = ['%s'] + + def __init__(self, name=""): + self.name = name + self.basename = name + + def open(self, url, new=0, autoraise=True): + raise NotImplementedError + + def open_new(self, url): + return self.open(url, 1) + + def open_new_tab(self, url): + return self.open(url, 2) + + @staticmethod + def _check_url(url): + """Ensures that the URL is safe to pass to subprocesses as a parameter""" + if url and url.lstrip().startswith("-"): + raise ValueError(f"Invalid URL (leading dash disallowed): {url!r}") + + +class GenericBrowser(BaseBrowser): + """Class for all browsers started with a command + and without remote functionality.""" + + def __init__(self, name): + if isinstance(name, str): + self.name = name + self.args = ["%s"] + else: + # name should be a list with arguments + self.name = name[0] + self.args = name[1:] + self.basename = os.path.basename(self.name) + + def open(self, url, new=0, autoraise=True): + sys.audit("webbrowser.open", url) + self._check_url(url) + cmdline = [self.name] + [arg.replace("%s", url) + for arg in self.args] + try: + if sys.platform[:3] == 'win': + p = subprocess.Popen(cmdline) + else: + p = subprocess.Popen(cmdline, close_fds=True) + return not p.wait() + except OSError: + return False + + +class BackgroundBrowser(GenericBrowser): + """Class for all browsers which are to be started in the + background.""" + + def open(self, url, new=0, autoraise=True): + cmdline = [self.name] + [arg.replace("%s", url) + for arg in self.args] + sys.audit("webbrowser.open", url) + self._check_url(url) + try: + if sys.platform[:3] == 'win': + p = subprocess.Popen(cmdline) + else: + p = subprocess.Popen(cmdline, close_fds=True, + start_new_session=True) + return p.poll() is None + except OSError: + return False + + +class UnixBrowser(BaseBrowser): + """Parent class for all Unix browsers with remote functionality.""" + + raise_opts = None + background = False + redirect_stdout = True + # In remote_args, %s will be replaced with the requested URL. %action will + # be replaced depending on the value of 'new' passed to open. + # remote_action is used for new=0 (open). If newwin is not None, it is + # used for new=1 (open_new). If newtab is not None, it is used for + # new=3 (open_new_tab). After both substitutions are made, any empty + # strings in the transformed remote_args list will be removed. + remote_args = ['%action', '%s'] + remote_action = None + remote_action_newwin = None + remote_action_newtab = None + + def _invoke(self, args, remote, autoraise, url=None): + raise_opt = [] + if remote and self.raise_opts: + # use autoraise argument only for remote invocation + autoraise = int(autoraise) + opt = self.raise_opts[autoraise] + if opt: + raise_opt = [opt] + + cmdline = [self.name] + raise_opt + args + + if remote or self.background: + inout = subprocess.DEVNULL + else: + # for TTY browsers, we need stdin/out + inout = None + p = subprocess.Popen(cmdline, close_fds=True, stdin=inout, + stdout=(self.redirect_stdout and inout or None), + stderr=inout, start_new_session=True) + if remote: + # wait at most five seconds. If the subprocess is not finished, the + # remote invocation has (hopefully) started a new instance. + try: + rc = p.wait(5) + # if remote call failed, open() will try direct invocation + return not rc + except subprocess.TimeoutExpired: + return True + elif self.background: + if p.poll() is None: + return True + else: + return False + else: + return not p.wait() + + def open(self, url, new=0, autoraise=True): + sys.audit("webbrowser.open", url) + self._check_url(url) + if new == 0: + action = self.remote_action + elif new == 1: + action = self.remote_action_newwin + elif new == 2: + if self.remote_action_newtab is None: + action = self.remote_action_newwin + else: + action = self.remote_action_newtab + else: + raise Error("Bad 'new' parameter to open(); " + f"expected 0, 1, or 2, got {new}") + + args = [arg.replace("%s", url).replace("%action", action) + for arg in self.remote_args] + args = [arg for arg in args if arg] + success = self._invoke(args, True, autoraise, url) + if not success: + # remote invocation failed, try straight way + args = [arg.replace("%s", url) for arg in self.args] + return self._invoke(args, False, False) + else: + return True + + +class Mozilla(UnixBrowser): + """Launcher class for Mozilla browsers.""" + + remote_args = ['%action', '%s'] + remote_action = "" + remote_action_newwin = "-new-window" + remote_action_newtab = "-new-tab" + background = True + + +class Epiphany(UnixBrowser): + """Launcher class for Epiphany browser.""" + + raise_opts = ["-noraise", ""] + remote_args = ['%action', '%s'] + remote_action = "-n" + remote_action_newwin = "-w" + background = True + + +class Chrome(UnixBrowser): + """Launcher class for Google Chrome browser.""" + + remote_args = ['%action', '%s'] + remote_action = "" + remote_action_newwin = "--new-window" + remote_action_newtab = "" + background = True + + +Chromium = Chrome + + +class Opera(UnixBrowser): + """Launcher class for Opera browser.""" + + remote_args = ['%action', '%s'] + remote_action = "" + remote_action_newwin = "--new-window" + remote_action_newtab = "" + background = True + + +class Elinks(UnixBrowser): + """Launcher class for Elinks browsers.""" + + remote_args = ['-remote', 'openURL(%s%action)'] + remote_action = "" + remote_action_newwin = ",new-window" + remote_action_newtab = ",new-tab" + background = False + + # elinks doesn't like its stdout to be redirected - + # it uses redirected stdout as a signal to do -dump + redirect_stdout = False + + +class Konqueror(BaseBrowser): + """Controller for the KDE File Manager (kfm, or Konqueror). + + See the output of ``kfmclient --commands`` + for more information on the Konqueror remote-control interface. + """ + + def open(self, url, new=0, autoraise=True): + sys.audit("webbrowser.open", url) + self._check_url(url) + # XXX Currently I know no way to prevent KFM from opening a new win. + if new == 2: + action = "newTab" + else: + action = "openURL" + + devnull = subprocess.DEVNULL + + try: + p = subprocess.Popen(["kfmclient", action, url], + close_fds=True, stdin=devnull, + stdout=devnull, stderr=devnull) + except OSError: + # fall through to next variant + pass + else: + p.wait() + # kfmclient's return code unfortunately has no meaning as it seems + return True + + try: + p = subprocess.Popen(["konqueror", "--silent", url], + close_fds=True, stdin=devnull, + stdout=devnull, stderr=devnull, + start_new_session=True) + except OSError: + # fall through to next variant + pass + else: + if p.poll() is None: + # Should be running now. + return True + + try: + p = subprocess.Popen(["kfm", "-d", url], + close_fds=True, stdin=devnull, + stdout=devnull, stderr=devnull, + start_new_session=True) + except OSError: + return False + else: + return p.poll() is None + + +class Edge(UnixBrowser): + """Launcher class for Microsoft Edge browser.""" + + remote_args = ['%action', '%s'] + remote_action = "" + remote_action_newwin = "--new-window" + remote_action_newtab = "" + background = True + + +# +# Platform support for Unix +# + +# These are the right tests because all these Unix browsers require either +# a console terminal or an X display to run. + +def register_X_browsers(): + + # use xdg-open if around + if shutil.which("xdg-open"): + register("xdg-open", None, BackgroundBrowser("xdg-open")) + + # Opens an appropriate browser for the URL scheme according to + # freedesktop.org settings (GNOME, KDE, XFCE, etc.) + if shutil.which("gio"): + register("gio", None, BackgroundBrowser(["gio", "open", "--", "%s"])) + + xdg_desktop = os.getenv("XDG_CURRENT_DESKTOP", "").split(":") + + # The default GNOME3 browser + if (("GNOME" in xdg_desktop or + "GNOME_DESKTOP_SESSION_ID" in os.environ) and + shutil.which("gvfs-open")): + register("gvfs-open", None, BackgroundBrowser("gvfs-open")) + + # The default KDE browser + if (("KDE" in xdg_desktop or + "KDE_FULL_SESSION" in os.environ) and + shutil.which("kfmclient")): + register("kfmclient", Konqueror, Konqueror("kfmclient")) + + # Common symbolic link for the default X11 browser + if shutil.which("x-www-browser"): + register("x-www-browser", None, BackgroundBrowser("x-www-browser")) + + # The Mozilla browsers + for browser in ("firefox", "iceweasel", "seamonkey", "mozilla-firefox", + "mozilla"): + if shutil.which(browser): + register(browser, None, Mozilla(browser)) + + # Konqueror/kfm, the KDE browser. + if shutil.which("kfm"): + register("kfm", Konqueror, Konqueror("kfm")) + elif shutil.which("konqueror"): + register("konqueror", Konqueror, Konqueror("konqueror")) + + # Gnome's Epiphany + if shutil.which("epiphany"): + register("epiphany", None, Epiphany("epiphany")) + + # Google Chrome/Chromium browsers + for browser in ("google-chrome", "chrome", "chromium", "chromium-browser"): + if shutil.which(browser): + register(browser, None, Chrome(browser)) + + # Opera, quite popular + if shutil.which("opera"): + register("opera", None, Opera("opera")) + + if shutil.which("microsoft-edge"): + register("microsoft-edge", None, Edge("microsoft-edge")) + + +def register_standard_browsers(): + global _tryorder + _tryorder = [] + + if sys.platform == 'darwin': + register("MacOSX", None, MacOSXOSAScript('default')) + register("chrome", None, MacOSXOSAScript('google chrome')) + register("firefox", None, MacOSXOSAScript('firefox')) + register("safari", None, MacOSXOSAScript('safari')) + # macOS can use below Unix support (but we prefer using the macOS + # specific stuff) + + if sys.platform == "ios": + register("iosbrowser", None, IOSBrowser(), preferred=True) + + if sys.platform == "serenityos": + # SerenityOS webbrowser, simply called "Browser". + register("Browser", None, BackgroundBrowser("Browser")) + + if sys.platform[:3] == "win": + # First try to use the default Windows browser + register("windows-default", WindowsDefault) + + # Detect some common Windows browsers, fallback to Microsoft Edge + # location in 64-bit Windows + edge64 = os.path.join(os.environ.get("PROGRAMFILES(x86)", "C:\\Program Files (x86)"), + "Microsoft\\Edge\\Application\\msedge.exe") + # location in 32-bit Windows + edge32 = os.path.join(os.environ.get("PROGRAMFILES", "C:\\Program Files"), + "Microsoft\\Edge\\Application\\msedge.exe") + for browser in ("firefox", "seamonkey", "mozilla", "chrome", + "opera", edge64, edge32): + if shutil.which(browser): + register(browser, None, BackgroundBrowser(browser)) + if shutil.which("MicrosoftEdge.exe"): + register("microsoft-edge", None, Edge("MicrosoftEdge.exe")) + else: + # Prefer X browsers if present + # + # NOTE: Do not check for X11 browser on macOS, + # XQuartz installation sets a DISPLAY environment variable and will + # autostart when someone tries to access the display. Mac users in + # general don't need an X11 browser. + if sys.platform != "darwin" and (os.environ.get("DISPLAY") or os.environ.get("WAYLAND_DISPLAY")): + try: + cmd = "xdg-settings get default-web-browser".split() + raw_result = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) + result = raw_result.decode().strip() + except (FileNotFoundError, subprocess.CalledProcessError, + PermissionError, NotADirectoryError): + pass + else: + global _os_preferred_browser + _os_preferred_browser = result + + register_X_browsers() + + # Also try console browsers + if os.environ.get("TERM"): + # Common symbolic link for the default text-based browser + if shutil.which("www-browser"): + register("www-browser", None, GenericBrowser("www-browser")) + # The Links/elinks browsers + if shutil.which("links"): + register("links", None, GenericBrowser("links")) + if shutil.which("elinks"): + register("elinks", None, Elinks("elinks")) + # The Lynx browser , + if shutil.which("lynx"): + register("lynx", None, GenericBrowser("lynx")) + # The w3m browser + if shutil.which("w3m"): + register("w3m", None, GenericBrowser("w3m")) + + # OK, now that we know what the default preference orders for each + # platform are, allow user to override them with the BROWSER variable. + if "BROWSER" in os.environ: + userchoices = os.environ["BROWSER"].split(os.pathsep) + userchoices.reverse() + + # Treat choices in same way as if passed into get() but do register + # and prepend to _tryorder + for cmdline in userchoices: + if all(x not in cmdline for x in " \t"): + # Assume this is the name of a registered command, use + # that unless it is a GenericBrowser. + try: + command = _browsers[cmdline.lower()] + except KeyError: + pass + + else: + if not isinstance(command[1], GenericBrowser): + _tryorder.insert(0, cmdline.lower()) + continue + + if cmdline != '': + cmd = _synthesize(cmdline, preferred=True) + if cmd[1] is None: + register(cmdline, None, GenericBrowser(cmdline), preferred=True) + + # what to do if _tryorder is now empty? + + +# +# Platform support for Windows +# + +if sys.platform[:3] == "win": + class WindowsDefault(BaseBrowser): + def open(self, url, new=0, autoraise=True): + sys.audit("webbrowser.open", url) + self._check_url(url) + try: + os.startfile(url) + except OSError: + # [Error 22] No application is associated with the specified + # file for this operation: '' + return False + else: + return True + +# +# Platform support for macOS +# + +if sys.platform == 'darwin': + class MacOSXOSAScript(BaseBrowser): + def __init__(self, name='default'): + super().__init__(name) + + def open(self, url, new=0, autoraise=True): + sys.audit("webbrowser.open", url) + self._check_url(url) + url = url.replace('"', '%22') + if self.name == 'default': + proto, _sep, _rest = url.partition(":") + if _sep and proto.lower() in {"http", "https"}: + # default web URL, don't need to lookup browser + script = f'open location "{url}"' + else: + # if not a web URL, need to lookup default browser to ensure a browser is launched + # this should always work, but is overkill to lookup http handler + # before launching http + script = f""" + use framework "AppKit" + use AppleScript version "2.4" + use scripting additions + + property NSWorkspace : a reference to current application's NSWorkspace + property NSURL : a reference to current application's NSURL + + set http_url to NSURL's URLWithString:"https://python.org" + set browser_url to (NSWorkspace's sharedWorkspace)'s ¬ + URLForApplicationToOpenURL:http_url + set app_path to browser_url's relativePath as text -- NSURL to absolute path '/Applications/Safari.app' + + tell application app_path + activate + open location "{url}" + end tell + """ + else: + script = f''' + tell application "{self.name}" + activate + open location "{url}" + end + ''' + + osapipe = os.popen("/usr/bin/osascript", "w") + if osapipe is None: + return False + + osapipe.write(script) + rc = osapipe.close() + return not rc + +# +# Platform support for iOS +# +if sys.platform == "ios": + from _ios_support import objc + if objc: + # If objc exists, we know ctypes is also importable. + from ctypes import c_void_p, c_char_p, c_ulong + + class IOSBrowser(BaseBrowser): + def open(self, url, new=0, autoraise=True): + sys.audit("webbrowser.open", url) + self._check_url(url) + # If ctypes isn't available, we can't open a browser + if objc is None: + return False + + # All the messages in this call return object references. + objc.objc_msgSend.restype = c_void_p + + # This is the equivalent of: + # NSString url_string = + # [NSString stringWithCString:url.encode("utf-8") + # encoding:NSUTF8StringEncoding]; + NSString = objc.objc_getClass(b"NSString") + constructor = objc.sel_registerName(b"stringWithCString:encoding:") + objc.objc_msgSend.argtypes = [c_void_p, c_void_p, c_char_p, c_ulong] + url_string = objc.objc_msgSend( + NSString, + constructor, + url.encode("utf-8"), + 4, # NSUTF8StringEncoding = 4 + ) + + # Create an NSURL object representing the URL + # This is the equivalent of: + # NSURL *nsurl = [NSURL URLWithString:url]; + NSURL = objc.objc_getClass(b"NSURL") + urlWithString_ = objc.sel_registerName(b"URLWithString:") + objc.objc_msgSend.argtypes = [c_void_p, c_void_p, c_void_p] + ns_url = objc.objc_msgSend(NSURL, urlWithString_, url_string) + + # Get the shared UIApplication instance + # This code is the equivalent of: + # UIApplication shared_app = [UIApplication sharedApplication] + UIApplication = objc.objc_getClass(b"UIApplication") + sharedApplication = objc.sel_registerName(b"sharedApplication") + objc.objc_msgSend.argtypes = [c_void_p, c_void_p] + shared_app = objc.objc_msgSend(UIApplication, sharedApplication) + + # Open the URL on the shared application + # This code is the equivalent of: + # [shared_app openURL:ns_url + # options:NIL + # completionHandler:NIL]; + openURL_ = objc.sel_registerName(b"openURL:options:completionHandler:") + objc.objc_msgSend.argtypes = [ + c_void_p, c_void_p, c_void_p, c_void_p, c_void_p + ] + # Method returns void + objc.objc_msgSend.restype = None + objc.objc_msgSend(shared_app, openURL_, ns_url, None, None) + + return True + + +def parse_args(arg_list: list[str] | None): + import argparse + parser = argparse.ArgumentParser( + description="Open URL in a web browser.", color=True, + ) + parser.add_argument("url", help="URL to open") + + group = parser.add_mutually_exclusive_group() + group.add_argument("-n", "--new-window", action="store_const", + const=1, default=0, dest="new_win", + help="open new window") + group.add_argument("-t", "--new-tab", action="store_const", + const=2, default=0, dest="new_win", + help="open new tab") + + args = parser.parse_args(arg_list) + + return args + + +def main(arg_list: list[str] | None = None): + args = parse_args(arg_list) + + open(args.url, args.new_win) + + print("\a") + + +if __name__ == "__main__": + main() diff --git a/Python313_13_x64_Template/Lib/wsgiref/__init__.py b/Python314_4_x64_Template/Lib/wsgiref/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/wsgiref/__init__.py rename to Python314_4_x64_Template/Lib/wsgiref/__init__.py diff --git a/Python313_13_x64_Template/Lib/wsgiref/handlers.py b/Python314_4_x64_Template/Lib/wsgiref/handlers.py similarity index 100% rename from Python313_13_x64_Template/Lib/wsgiref/handlers.py rename to Python314_4_x64_Template/Lib/wsgiref/handlers.py diff --git a/Python314_4_x64_Template/Lib/wsgiref/headers.py b/Python314_4_x64_Template/Lib/wsgiref/headers.py new file mode 100644 index 00000000..eb6ea6a4 --- /dev/null +++ b/Python314_4_x64_Template/Lib/wsgiref/headers.py @@ -0,0 +1,192 @@ +"""Manage HTTP Response Headers + +Much of this module is red-handedly pilfered from email.message in the stdlib, +so portions are Copyright (C) 2001 Python Software Foundation, and were +written by Barry Warsaw. +""" + +# Regular expression that matches 'special' characters in parameters, the +# existence of which force quoting of the parameter value. +import re +tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]') +# Disallowed characters for headers and values. +# HTAB (\x09) is allowed in header values, but +# not in header names. (RFC 9110 Section 5.5) +_name_disallowed_re = re.compile(r'[\x00-\x1F\x7F]') +_value_disallowed_re = re.compile(r'[\x00-\x08\x0A-\x1F\x7F]') + +def _formatparam(param, value=None, quote=1): + """Convenience function to format and return a key=value pair. + + This will quote the value if needed or if quote is true. + """ + if value is not None and len(value) > 0: + if quote or tspecials.search(value): + value = value.replace('\\', '\\\\').replace('"', r'\"') + return '%s="%s"' % (param, value) + else: + return '%s=%s' % (param, value) + else: + return param + + +class Headers: + """Manage a collection of HTTP response headers""" + + def __init__(self, headers=None): + headers = headers if headers is not None else [] + if type(headers) is not list: + raise TypeError("Headers must be a list of name/value tuples") + self._headers = headers + if __debug__: + for k, v in headers: + self._convert_string_type(k, name=True) + self._convert_string_type(v, name=False) + + def _convert_string_type(self, value, *, name): + """Convert/check value type.""" + if type(value) is str: + regex = (_name_disallowed_re if name else _value_disallowed_re) + if regex.search(value): + raise ValueError("Control characters not allowed in headers") + return value + raise AssertionError("Header names/values must be" + " of type str (got {0})".format(repr(value))) + + def __len__(self): + """Return the total number of headers, including duplicates.""" + return len(self._headers) + + def __setitem__(self, name, val): + """Set the value of a header.""" + del self[name] + self._headers.append( + (self._convert_string_type(name, name=True), self._convert_string_type(val, name=False))) + + def __delitem__(self,name): + """Delete all occurrences of a header, if present. + + Does *not* raise an exception if the header is missing. + """ + name = self._convert_string_type(name.lower(), name=True) + self._headers[:] = [kv for kv in self._headers if kv[0].lower() != name] + + def __getitem__(self,name): + """Get the first header value for 'name' + + Return None if the header is missing instead of raising an exception. + + Note that if the header appeared multiple times, the first exactly which + occurrence gets returned is undefined. Use getall() to get all + the values matching a header field name. + """ + return self.get(name) + + def __contains__(self, name): + """Return true if the message contains the header.""" + return self.get(name) is not None + + + def get_all(self, name): + """Return a list of all the values for the named field. + + These will be sorted in the order they appeared in the original header + list or were added to this instance, and may contain duplicates. Any + fields deleted and re-inserted are always appended to the header list. + If no fields exist with the given name, returns an empty list. + """ + name = self._convert_string_type(name.lower(), name=True) + return [kv[1] for kv in self._headers if kv[0].lower()==name] + + + def get(self,name,default=None): + """Get the first header value for 'name', or return 'default'""" + name = self._convert_string_type(name.lower(), name=True) + for k,v in self._headers: + if k.lower()==name: + return v + return default + + + def keys(self): + """Return a list of all the header field names. + + These will be sorted in the order they appeared in the original header + list, or were added to this instance, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + """ + return [k for k, v in self._headers] + + def values(self): + """Return a list of all header values. + + These will be sorted in the order they appeared in the original header + list, or were added to this instance, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + """ + return [v for k, v in self._headers] + + def items(self): + """Get all the header fields and values. + + These will be sorted in the order they were in the original header + list, or were added to this instance, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + """ + return self._headers[:] + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self._headers) + + def __str__(self): + """str() returns the formatted headers, complete with end line, + suitable for direct HTTP transmission.""" + return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['','']) + + def __bytes__(self): + return str(self).encode('iso-8859-1') + + def setdefault(self,name,value): + """Return first matching header value for 'name', or 'value' + + If there is no header named 'name', add a new header with name 'name' + and value 'value'.""" + result = self.get(name) + if result is None: + self._headers.append((self._convert_string_type(name, name=True), + self._convert_string_type(value, name=False))) + return value + else: + return result + + def add_header(self, _name, _value, **_params): + """Extended header setting. + + _name is the header field to add. keyword arguments can be used to set + additional parameters for the header field, with underscores converted + to dashes. Normally the parameter will be added as key="value" unless + value is None, in which case only the key will be added. + + Example: + + h.add_header('content-disposition', 'attachment', filename='bud.gif') + + Note that unlike the corresponding 'email.message' method, this does + *not* handle '(charset, language, value)' tuples: all values must be + strings or None. + """ + parts = [] + if _value is not None: + _value = self._convert_string_type(_value, name=False) + parts.append(_value) + for k, v in _params.items(): + k = self._convert_string_type(k, name=True) + if v is None: + parts.append(k.replace('_', '-')) + else: + v = self._convert_string_type(v, name=False) + parts.append(_formatparam(k.replace('_', '-'), v)) + self._headers.append((self._convert_string_type(_name, name=True), "; ".join(parts))) diff --git a/Python313_13_x64_Template/Lib/wsgiref/simple_server.py b/Python314_4_x64_Template/Lib/wsgiref/simple_server.py similarity index 100% rename from Python313_13_x64_Template/Lib/wsgiref/simple_server.py rename to Python314_4_x64_Template/Lib/wsgiref/simple_server.py diff --git a/Python313_13_x64_Template/Lib/wsgiref/types.py b/Python314_4_x64_Template/Lib/wsgiref/types.py similarity index 100% rename from Python313_13_x64_Template/Lib/wsgiref/types.py rename to Python314_4_x64_Template/Lib/wsgiref/types.py diff --git a/Python313_13_x64_Template/Lib/wsgiref/util.py b/Python314_4_x64_Template/Lib/wsgiref/util.py similarity index 100% rename from Python313_13_x64_Template/Lib/wsgiref/util.py rename to Python314_4_x64_Template/Lib/wsgiref/util.py diff --git a/Python313_13_x64_Template/Lib/wsgiref/validate.py b/Python314_4_x64_Template/Lib/wsgiref/validate.py similarity index 100% rename from Python313_13_x64_Template/Lib/wsgiref/validate.py rename to Python314_4_x64_Template/Lib/wsgiref/validate.py diff --git a/Python313_13_x64_Template/Lib/xml/__init__.py b/Python314_4_x64_Template/Lib/xml/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/__init__.py rename to Python314_4_x64_Template/Lib/xml/__init__.py diff --git a/Python314_4_x64_Template/Lib/xml/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/xml/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..a8feea58 Binary files /dev/null and b/Python314_4_x64_Template/Lib/xml/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/xml/dom/NodeFilter.py b/Python314_4_x64_Template/Lib/xml/dom/NodeFilter.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/dom/NodeFilter.py rename to Python314_4_x64_Template/Lib/xml/dom/NodeFilter.py diff --git a/Python314_4_x64_Template/Lib/xml/dom/__init__.py b/Python314_4_x64_Template/Lib/xml/dom/__init__.py new file mode 100644 index 00000000..dd7fb996 --- /dev/null +++ b/Python314_4_x64_Template/Lib/xml/dom/__init__.py @@ -0,0 +1,140 @@ +"""W3C Document Object Model implementation for Python. + +The Python mapping of the Document Object Model is documented in the +Python Library Reference in the section on the xml.dom package. + +This package contains the following modules: + +minidom -- A simple implementation of the Level 1 DOM with namespace + support added (based on the Level 2 specification) and other + minor Level 2 functionality. + +pulldom -- DOM builder supporting on-demand tree-building for selected + subtrees of the document. + +""" + + +class Node: + """Class giving the NodeType constants.""" + __slots__ = () + + # DOM implementations may use this as a base class for their own + # Node implementations. If they don't, the constants defined here + # should still be used as the canonical definitions as they match + # the values given in the W3C recommendation. Client code can + # safely refer to these values in all tests of Node.nodeType + # values. + + ELEMENT_NODE = 1 + ATTRIBUTE_NODE = 2 + TEXT_NODE = 3 + CDATA_SECTION_NODE = 4 + ENTITY_REFERENCE_NODE = 5 + ENTITY_NODE = 6 + PROCESSING_INSTRUCTION_NODE = 7 + COMMENT_NODE = 8 + DOCUMENT_NODE = 9 + DOCUMENT_TYPE_NODE = 10 + DOCUMENT_FRAGMENT_NODE = 11 + NOTATION_NODE = 12 + + +#ExceptionCode +INDEX_SIZE_ERR = 1 +DOMSTRING_SIZE_ERR = 2 +HIERARCHY_REQUEST_ERR = 3 +WRONG_DOCUMENT_ERR = 4 +INVALID_CHARACTER_ERR = 5 +NO_DATA_ALLOWED_ERR = 6 +NO_MODIFICATION_ALLOWED_ERR = 7 +NOT_FOUND_ERR = 8 +NOT_SUPPORTED_ERR = 9 +INUSE_ATTRIBUTE_ERR = 10 +INVALID_STATE_ERR = 11 +SYNTAX_ERR = 12 +INVALID_MODIFICATION_ERR = 13 +NAMESPACE_ERR = 14 +INVALID_ACCESS_ERR = 15 +VALIDATION_ERR = 16 + + +class DOMException(Exception): + """Abstract base class for DOM exceptions. + Exceptions with specific codes are specializations of this class.""" + + def __init__(self, *args, **kw): + if self.__class__ is DOMException: + raise RuntimeError( + "DOMException should not be instantiated directly") + Exception.__init__(self, *args, **kw) + + def _get_code(self): + return self.code + + +class IndexSizeErr(DOMException): + code = INDEX_SIZE_ERR + +class DomstringSizeErr(DOMException): + code = DOMSTRING_SIZE_ERR + +class HierarchyRequestErr(DOMException): + code = HIERARCHY_REQUEST_ERR + +class WrongDocumentErr(DOMException): + code = WRONG_DOCUMENT_ERR + +class InvalidCharacterErr(DOMException): + code = INVALID_CHARACTER_ERR + +class NoDataAllowedErr(DOMException): + code = NO_DATA_ALLOWED_ERR + +class NoModificationAllowedErr(DOMException): + code = NO_MODIFICATION_ALLOWED_ERR + +class NotFoundErr(DOMException): + code = NOT_FOUND_ERR + +class NotSupportedErr(DOMException): + code = NOT_SUPPORTED_ERR + +class InuseAttributeErr(DOMException): + code = INUSE_ATTRIBUTE_ERR + +class InvalidStateErr(DOMException): + code = INVALID_STATE_ERR + +class SyntaxErr(DOMException): + code = SYNTAX_ERR + +class InvalidModificationErr(DOMException): + code = INVALID_MODIFICATION_ERR + +class NamespaceErr(DOMException): + code = NAMESPACE_ERR + +class InvalidAccessErr(DOMException): + code = INVALID_ACCESS_ERR + +class ValidationErr(DOMException): + code = VALIDATION_ERR + +class UserDataHandler: + """Class giving the operation constants for UserDataHandler.handle().""" + + # Based on DOM Level 3 (WD 9 April 2002) + + NODE_CLONED = 1 + NODE_IMPORTED = 2 + NODE_DELETED = 3 + NODE_RENAMED = 4 + +XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace" +XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/" +XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml" +EMPTY_NAMESPACE = None +EMPTY_PREFIX = None + +from .domreg import getDOMImplementation, registerDOMImplementation # noqa: F401 diff --git a/Python313_13_x64_Template/Lib/xml/dom/domreg.py b/Python314_4_x64_Template/Lib/xml/dom/domreg.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/dom/domreg.py rename to Python314_4_x64_Template/Lib/xml/dom/domreg.py diff --git a/Python313_13_x64_Template/Lib/xml/dom/expatbuilder.py b/Python314_4_x64_Template/Lib/xml/dom/expatbuilder.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/dom/expatbuilder.py rename to Python314_4_x64_Template/Lib/xml/dom/expatbuilder.py diff --git a/Python313_13_x64_Template/Lib/xml/dom/minicompat.py b/Python314_4_x64_Template/Lib/xml/dom/minicompat.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/dom/minicompat.py rename to Python314_4_x64_Template/Lib/xml/dom/minicompat.py diff --git a/Python313_13_x64_Template/Lib/xml/dom/minidom.py b/Python314_4_x64_Template/Lib/xml/dom/minidom.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/dom/minidom.py rename to Python314_4_x64_Template/Lib/xml/dom/minidom.py diff --git a/Python313_13_x64_Template/Lib/xml/dom/pulldom.py b/Python314_4_x64_Template/Lib/xml/dom/pulldom.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/dom/pulldom.py rename to Python314_4_x64_Template/Lib/xml/dom/pulldom.py diff --git a/Python313_13_x64_Template/Lib/xml/dom/xmlbuilder.py b/Python314_4_x64_Template/Lib/xml/dom/xmlbuilder.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/dom/xmlbuilder.py rename to Python314_4_x64_Template/Lib/xml/dom/xmlbuilder.py diff --git a/Python313_13_x64_Template/Lib/xml/etree/ElementInclude.py b/Python314_4_x64_Template/Lib/xml/etree/ElementInclude.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/etree/ElementInclude.py rename to Python314_4_x64_Template/Lib/xml/etree/ElementInclude.py diff --git a/Python313_13_x64_Template/Lib/xml/etree/ElementPath.py b/Python314_4_x64_Template/Lib/xml/etree/ElementPath.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/etree/ElementPath.py rename to Python314_4_x64_Template/Lib/xml/etree/ElementPath.py diff --git a/Python314_4_x64_Template/Lib/xml/etree/ElementTree.py b/Python314_4_x64_Template/Lib/xml/etree/ElementTree.py new file mode 100644 index 00000000..dafe5b1b --- /dev/null +++ b/Python314_4_x64_Template/Lib/xml/etree/ElementTree.py @@ -0,0 +1,2102 @@ +"""Lightweight XML support for Python. + + XML is an inherently hierarchical data format, and the most natural way to + represent it is with a tree. This module has two classes for this purpose: + + 1. ElementTree represents the whole XML document as a tree and + + 2. Element represents a single node in this tree. + + Interactions with the whole document (reading and writing to/from files) are + usually done on the ElementTree level. Interactions with a single XML element + and its sub-elements are done on the Element level. + + Element is a flexible container object designed to store hierarchical data + structures in memory. It can be described as a cross between a list and a + dictionary. Each Element has a number of properties associated with it: + + 'tag' - a string containing the element's name. + + 'attributes' - a Python dictionary storing the element's attributes. + + 'text' - a string containing the element's text content. + + 'tail' - an optional string containing text after the element's end tag. + + And a number of child elements stored in a Python sequence. + + To create an element instance, use the Element constructor, + or the SubElement factory function. + + You can also use the ElementTree class to wrap an element structure + and convert it to and from XML. + +""" + +#--------------------------------------------------------------------- +# Licensed to PSF under a Contributor Agreement. +# See https://www.python.org/psf/license for licensing details. +# +# ElementTree +# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved. +# +# fredrik@pythonware.com +# http://www.pythonware.com +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2008 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +__all__ = [ + # public symbols + "Comment", + "dump", + "Element", "ElementTree", + "fromstring", "fromstringlist", + "indent", "iselement", "iterparse", + "parse", "ParseError", + "PI", "ProcessingInstruction", + "QName", + "SubElement", + "tostring", "tostringlist", + "TreeBuilder", + "VERSION", + "XML", "XMLID", + "XMLParser", "XMLPullParser", + "register_namespace", + "canonicalize", "C14NWriterTarget", + ] + +VERSION = "1.3.0" + +import sys +import re +import warnings +import io +import collections +import collections.abc +import contextlib +import weakref + +from . import ElementPath + + +class ParseError(SyntaxError): + """An error when parsing an XML document. + + In addition to its exception value, a ParseError contains + two extra attributes: + 'code' - the specific exception code + 'position' - the line and column of the error + + """ + pass + +# -------------------------------------------------------------------- + + +def iselement(element): + """Return True if *element* appears to be an Element.""" + return hasattr(element, 'tag') + + +class Element: + """An XML element. + + This class is the reference implementation of the Element interface. + + An element's length is its number of subelements. That means if you + want to check if an element is truly empty, you should check BOTH + its length AND its text attribute. + + The element tag, attribute names, and attribute values can be either + bytes or strings. + + *tag* is the element name. *attrib* is an optional dictionary containing + element attributes. *extra* are additional element attributes given as + keyword arguments. + + Example form: + text...tail + + """ + + tag = None + """The element's name.""" + + attrib = None + """Dictionary of the element's attributes.""" + + text = None + """ + Text before first subelement. This is either a string or the value None. + Note that if there is no text, this attribute may be either + None or the empty string, depending on the parser. + + """ + + tail = None + """ + Text after this element's end tag, but before the next sibling element's + start tag. This is either a string or the value None. Note that if there + was no text, this attribute may be either None or an empty string, + depending on the parser. + + """ + + def __init__(self, tag, attrib={}, **extra): + if not isinstance(attrib, dict): + raise TypeError("attrib must be dict, not %s" % ( + attrib.__class__.__name__,)) + self.tag = tag + self.attrib = {**attrib, **extra} + self._children = [] + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__, self.tag, id(self)) + + def makeelement(self, tag, attrib): + """Create a new element with the same type. + + *tag* is a string containing the element name. + *attrib* is a dictionary containing the element attributes. + + Do not call this method, use the SubElement factory function instead. + + """ + return self.__class__(tag, attrib) + + def __copy__(self): + elem = self.makeelement(self.tag, self.attrib) + elem.text = self.text + elem.tail = self.tail + elem[:] = self + return elem + + def __len__(self): + return len(self._children) + + def __bool__(self): + warnings.warn( + "Testing an element's truth value will always return True in " + "future versions. " + "Use specific 'len(elem)' or 'elem is not None' test instead.", + DeprecationWarning, stacklevel=2 + ) + return len(self._children) != 0 # emulate old behaviour, for now + + def __getitem__(self, index): + return self._children[index] + + def __setitem__(self, index, element): + if isinstance(index, slice): + for elt in element: + self._assert_is_element(elt) + else: + self._assert_is_element(element) + self._children[index] = element + + def __delitem__(self, index): + del self._children[index] + + def append(self, subelement): + """Add *subelement* to the end of this element. + + The new element will appear in document order after the last existing + subelement (or directly after the text, if it's the first subelement), + but before the end tag for this element. + + """ + self._assert_is_element(subelement) + self._children.append(subelement) + + def extend(self, elements): + """Append subelements from a sequence. + + *elements* is a sequence with zero or more elements. + + """ + for element in elements: + self._assert_is_element(element) + self._children.append(element) + + def insert(self, index, subelement): + """Insert *subelement* at position *index*.""" + self._assert_is_element(subelement) + self._children.insert(index, subelement) + + def _assert_is_element(self, e): + # Need to refer to the actual Python implementation, not the + # shadowing C implementation. + if not isinstance(e, _Element_Py): + raise TypeError('expected an Element, not %s' % type(e).__name__) + + def remove(self, subelement): + """Remove matching subelement. + + Unlike the find methods, this method compares elements based on + identity, NOT ON tag value or contents. To remove subelements by + other means, the easiest way is to use a list comprehension to + select what elements to keep, and then use slice assignment to update + the parent element. + + ValueError is raised if a matching element could not be found. + + """ + # assert iselement(element) + try: + self._children.remove(subelement) + except ValueError: + # to align the error message with the C implementation + raise ValueError("Element.remove(x): element not found") from None + + def find(self, path, namespaces=None): + """Find first matching element by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + """ + return ElementPath.find(self, path, namespaces) + + def findtext(self, path, default=None, namespaces=None): + """Find text for first matching element by tag name or path. + + *path* is a string having either an element tag or an XPath, + *default* is the value to return if the element was not found, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return text content of first matching element, or default value if + none was found. Note that if an element is found having no text + content, the empty string is returned. + + """ + return ElementPath.findtext(self, path, default, namespaces) + + def findall(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Returns list containing all matching elements in document order. + + """ + return ElementPath.findall(self, path, namespaces) + + def iterfind(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return an iterable yielding all matching elements in document order. + + """ + return ElementPath.iterfind(self, path, namespaces) + + def clear(self): + """Reset element. + + This function removes all subelements, clears all attributes, and sets + the text and tail attributes to None. + + """ + self.attrib.clear() + self._children = [] + self.text = self.tail = None + + def get(self, key, default=None): + """Get element attribute. + + Equivalent to attrib.get, but some implementations may handle this a + bit more efficiently. *key* is what attribute to look for, and + *default* is what to return if the attribute was not found. + + Returns a string containing the attribute value, or the default if + attribute was not found. + + """ + return self.attrib.get(key, default) + + def set(self, key, value): + """Set element attribute. + + Equivalent to attrib[key] = value, but some implementations may handle + this a bit more efficiently. *key* is what attribute to set, and + *value* is the attribute value to set it to. + + """ + self.attrib[key] = value + + def keys(self): + """Get list of attribute names. + + Names are returned in an arbitrary order, just like an ordinary + Python dict. Equivalent to attrib.keys() + + """ + return self.attrib.keys() + + def items(self): + """Get element attributes as a sequence. + + The attributes are returned in arbitrary order. Equivalent to + attrib.items(). + + Return a list of (name, value) tuples. + + """ + return self.attrib.items() + + def iter(self, tag=None): + """Create tree iterator. + + The iterator loops over the element and all subelements in document + order, returning all elements with a matching tag. + + If the tree structure is modified during iteration, new or removed + elements may or may not be included. To get a stable set, use the + list() function on the iterator, and loop over the resulting list. + + *tag* is what tags to look for (default is to return all elements) + + Return an iterator containing all the matching elements. + + """ + if tag == "*": + tag = None + if tag is None or self.tag == tag: + yield self + for e in self._children: + yield from e.iter(tag) + + def itertext(self): + """Create text iterator. + + The iterator loops over the element and all subelements in document + order, returning all inner text. + + """ + tag = self.tag + if not isinstance(tag, str) and tag is not None: + return + t = self.text + if t: + yield t + for e in self: + yield from e.itertext() + t = e.tail + if t: + yield t + + +def SubElement(parent, tag, attrib={}, **extra): + """Subelement factory which creates an element instance, and appends it + to an existing parent. + + The element tag, attribute names, and attribute values can be either + bytes or Unicode strings. + + *parent* is the parent element, *tag* is the subelements name, *attrib* is + an optional directory containing element attributes, *extra* are + additional attributes given as keyword arguments. + + """ + attrib = {**attrib, **extra} + element = parent.makeelement(tag, attrib) + parent.append(element) + return element + + +def Comment(text=None): + """Comment element factory. + + This function creates a special element which the standard serializer + serializes as an XML comment. + + *text* is a string containing the comment string. + + """ + element = Element(Comment) + element.text = text + return element + + +def ProcessingInstruction(target, text=None): + """Processing Instruction element factory. + + This function creates a special element which the standard serializer + serializes as an XML comment. + + *target* is a string containing the processing instruction, *text* is a + string containing the processing instruction contents, if any. + + """ + element = Element(ProcessingInstruction) + element.text = target + if text: + element.text = element.text + " " + text + return element + +PI = ProcessingInstruction + + +class QName: + """Qualified name wrapper. + + This class can be used to wrap a QName attribute value in order to get + proper namespace handing on output. + + *text_or_uri* is a string containing the QName value either in the form + {uri}local, or if the tag argument is given, the URI part of a QName. + + *tag* is an optional argument which if given, will make the first + argument (text_or_uri) be interpreted as a URI, and this argument (tag) + be interpreted as a local name. + + """ + def __init__(self, text_or_uri, tag=None): + if tag: + text_or_uri = "{%s}%s" % (text_or_uri, tag) + self.text = text_or_uri + def __str__(self): + return self.text + def __repr__(self): + return '<%s %r>' % (self.__class__.__name__, self.text) + def __hash__(self): + return hash(self.text) + def __le__(self, other): + if isinstance(other, QName): + return self.text <= other.text + return self.text <= other + def __lt__(self, other): + if isinstance(other, QName): + return self.text < other.text + return self.text < other + def __ge__(self, other): + if isinstance(other, QName): + return self.text >= other.text + return self.text >= other + def __gt__(self, other): + if isinstance(other, QName): + return self.text > other.text + return self.text > other + def __eq__(self, other): + if isinstance(other, QName): + return self.text == other.text + return self.text == other + +# -------------------------------------------------------------------- + + +class ElementTree: + """An XML element hierarchy. + + This class also provides support for serialization to and from + standard XML. + + *element* is an optional root element node, + *file* is an optional file handle or file name of an XML file whose + contents will be used to initialize the tree with. + + """ + def __init__(self, element=None, file=None): + if element is not None and not iselement(element): + raise TypeError('expected an Element, not %s' % + type(element).__name__) + self._root = element # first node + if file: + self.parse(file) + + def getroot(self): + """Return root element of this tree.""" + return self._root + + def _setroot(self, element): + """Replace root element of this tree. + + This will discard the current contents of the tree and replace it + with the given element. Use with care! + + """ + if not iselement(element): + raise TypeError('expected an Element, not %s' + % type(element).__name__) + self._root = element + + def parse(self, source, parser=None): + """Load external XML document into element tree. + + *source* is a file name or file object, *parser* is an optional parser + instance that defaults to XMLParser. + + ParseError is raised if the parser fails to parse the document. + + Returns the root element of the given source document. + + """ + close_source = False + if not hasattr(source, "read"): + source = open(source, "rb") + close_source = True + try: + if parser is None: + # If no parser was specified, create a default XMLParser + parser = XMLParser() + if hasattr(parser, '_parse_whole'): + # The default XMLParser, when it comes from an accelerator, + # can define an internal _parse_whole API for efficiency. + # It can be used to parse the whole source without feeding + # it with chunks. + self._root = parser._parse_whole(source) + return self._root + while data := source.read(65536): + parser.feed(data) + self._root = parser.close() + return self._root + finally: + if close_source: + source.close() + + def iter(self, tag=None): + """Create and return tree iterator for the root element. + + The iterator loops over all elements in this tree, in document order. + + *tag* is a string with the tag name to iterate over + (default is to return all elements). + + """ + # assert self._root is not None + return self._root.iter(tag) + + def find(self, path, namespaces=None): + """Find first matching element by tag name or path. + + Same as getroot().find(path), which is Element.find() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.find(path, namespaces) + + def findtext(self, path, default=None, namespaces=None): + """Find first matching element by tag name or path. + + Same as getroot().findtext(path), which is Element.findtext() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.findtext(path, default, namespaces) + + def findall(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + Same as getroot().findall(path), which is Element.findall(). + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return list containing all matching elements in document order. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.findall(path, namespaces) + + def iterfind(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + Same as getroot().iterfind(path), which is element.iterfind() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return an iterable yielding all matching elements in document order. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.iterfind(path, namespaces) + + def write(self, file_or_filename, + encoding=None, + xml_declaration=None, + default_namespace=None, + method=None, *, + short_empty_elements=True): + """Write element tree to a file as XML. + + Arguments: + *file_or_filename* -- file name or a file object opened for writing + + *encoding* -- the output encoding (default: US-ASCII) + + *xml_declaration* -- bool indicating if an XML declaration should be + added to the output. If None, an XML declaration + is added if encoding IS NOT either of: + US-ASCII, UTF-8, or Unicode + + *default_namespace* -- sets the default XML namespace (for "xmlns") + + *method* -- either "xml" (default), "html, "text", or "c14n" + + *short_empty_elements* -- controls the formatting of elements + that contain no content. If True (default) + they are emitted as a single self-closed + tag, otherwise they are emitted as a pair + of start/end tags + + """ + if self._root is None: + raise TypeError('ElementTree not initialized') + if not method: + method = "xml" + elif method not in _serialize: + raise ValueError("unknown method %r" % method) + if not encoding: + if method == "c14n": + encoding = "utf-8" + else: + encoding = "us-ascii" + with _get_writer(file_or_filename, encoding) as (write, declared_encoding): + if method == "xml" and (xml_declaration or + (xml_declaration is None and + encoding.lower() != "unicode" and + declared_encoding.lower() not in ("utf-8", "us-ascii"))): + write("\n" % ( + declared_encoding,)) + if method == "text": + _serialize_text(write, self._root) + else: + qnames, namespaces = _namespaces(self._root, default_namespace) + serialize = _serialize[method] + serialize(write, self._root, qnames, namespaces, + short_empty_elements=short_empty_elements) + + def write_c14n(self, file): + # lxml.etree compatibility. use output method instead + return self.write(file, method="c14n") + +# -------------------------------------------------------------------- +# serialization support + +@contextlib.contextmanager +def _get_writer(file_or_filename, encoding): + # returns text write method and release all resources after using + try: + write = file_or_filename.write + except AttributeError: + # file_or_filename is a file name + if encoding.lower() == "unicode": + encoding="utf-8" + with open(file_or_filename, "w", encoding=encoding, + errors="xmlcharrefreplace") as file: + yield file.write, encoding + else: + # file_or_filename is a file-like object + # encoding determines if it is a text or binary writer + if encoding.lower() == "unicode": + # use a text writer as is + yield write, getattr(file_or_filename, "encoding", None) or "utf-8" + else: + # wrap a binary writer with TextIOWrapper + with contextlib.ExitStack() as stack: + if isinstance(file_or_filename, io.BufferedIOBase): + file = file_or_filename + elif isinstance(file_or_filename, io.RawIOBase): + file = io.BufferedWriter(file_or_filename) + # Keep the original file open when the BufferedWriter is + # destroyed + stack.callback(file.detach) + else: + # This is to handle passed objects that aren't in the + # IOBase hierarchy, but just have a write method + file = io.BufferedIOBase() + file.writable = lambda: True + file.write = write + try: + # TextIOWrapper uses this methods to determine + # if BOM (for UTF-16, etc) should be added + file.seekable = file_or_filename.seekable + file.tell = file_or_filename.tell + except AttributeError: + pass + file = io.TextIOWrapper(file, + encoding=encoding, + errors="xmlcharrefreplace", + newline="\n") + # Keep the original file open when the TextIOWrapper is + # destroyed + stack.callback(file.detach) + yield file.write, encoding + +def _namespaces(elem, default_namespace=None): + # identify namespaces used in this tree + + # maps qnames to *encoded* prefix:local names + qnames = {None: None} + + # maps uri:s to prefixes + namespaces = {} + if default_namespace: + namespaces[default_namespace] = "" + + def add_qname(qname): + # calculate serialized qname representation + try: + if qname[:1] == "{": + uri, tag = qname[1:].rsplit("}", 1) + prefix = namespaces.get(uri) + if prefix is None: + prefix = _namespace_map.get(uri) + if prefix is None: + prefix = "ns%d" % len(namespaces) + if prefix != "xml": + namespaces[uri] = prefix + if prefix: + qnames[qname] = "%s:%s" % (prefix, tag) + else: + qnames[qname] = tag # default element + else: + if default_namespace: + # FIXME: can this be handled in XML 1.0? + raise ValueError( + "cannot use non-qualified names with " + "default_namespace option" + ) + qnames[qname] = qname + except TypeError: + _raise_serialization_error(qname) + + # populate qname and namespaces table + for elem in elem.iter(): + tag = elem.tag + if isinstance(tag, QName): + if tag.text not in qnames: + add_qname(tag.text) + elif isinstance(tag, str): + if tag not in qnames: + add_qname(tag) + elif tag is not None and tag is not Comment and tag is not PI: + _raise_serialization_error(tag) + for key, value in elem.items(): + if isinstance(key, QName): + key = key.text + if key not in qnames: + add_qname(key) + if isinstance(value, QName) and value.text not in qnames: + add_qname(value.text) + text = elem.text + if isinstance(text, QName) and text.text not in qnames: + add_qname(text.text) + return qnames, namespaces + +def _serialize_xml(write, elem, qnames, namespaces, + short_empty_elements, **kwargs): + tag = elem.tag + text = elem.text + if tag is Comment: + write("" % text) + elif tag is ProcessingInstruction: + write("" % text) + else: + tag = qnames[tag] + if tag is None: + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_xml(write, e, qnames, None, + short_empty_elements=short_empty_elements) + else: + write("<" + tag) + items = list(elem.items()) + if items or namespaces: + if namespaces: + for v, k in sorted(namespaces.items(), + key=lambda x: x[1]): # sort on prefix + if k: + k = ":" + k + write(" xmlns%s=\"%s\"" % ( + k, + _escape_attrib(v) + )) + for k, v in items: + if isinstance(k, QName): + k = k.text + if isinstance(v, QName): + v = qnames[v.text] + else: + v = _escape_attrib(v) + write(" %s=\"%s\"" % (qnames[k], v)) + if text or len(elem) or not short_empty_elements: + write(">") + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_xml(write, e, qnames, None, + short_empty_elements=short_empty_elements) + write("") + else: + write(" />") + if elem.tail: + write(_escape_cdata(elem.tail)) + +HTML_EMPTY = {"area", "base", "basefont", "br", "col", "embed", "frame", "hr", + "img", "input", "isindex", "link", "meta", "param", "source", + "track", "wbr"} + +def _serialize_html(write, elem, qnames, namespaces, **kwargs): + tag = elem.tag + text = elem.text + if tag is Comment: + write("" % _escape_cdata(text)) + elif tag is ProcessingInstruction: + write("" % _escape_cdata(text)) + else: + tag = qnames[tag] + if tag is None: + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_html(write, e, qnames, None) + else: + write("<" + tag) + items = list(elem.items()) + if items or namespaces: + if namespaces: + for v, k in sorted(namespaces.items(), + key=lambda x: x[1]): # sort on prefix + if k: + k = ":" + k + write(" xmlns%s=\"%s\"" % ( + k, + _escape_attrib(v) + )) + for k, v in items: + if isinstance(k, QName): + k = k.text + if isinstance(v, QName): + v = qnames[v.text] + else: + v = _escape_attrib_html(v) + # FIXME: handle boolean attributes + write(" %s=\"%s\"" % (qnames[k], v)) + write(">") + ltag = tag.lower() + if text: + if ltag == "script" or ltag == "style": + write(text) + else: + write(_escape_cdata(text)) + for e in elem: + _serialize_html(write, e, qnames, None) + if ltag not in HTML_EMPTY: + write("") + if elem.tail: + write(_escape_cdata(elem.tail)) + +def _serialize_text(write, elem): + for part in elem.itertext(): + write(part) + if elem.tail: + write(elem.tail) + +_serialize = { + "xml": _serialize_xml, + "html": _serialize_html, + "text": _serialize_text, +# this optional method is imported at the end of the module +# "c14n": _serialize_c14n, +} + + +def register_namespace(prefix, uri): + """Register a namespace prefix. + + The registry is global, and any existing mapping for either the + given prefix or the namespace URI will be removed. + + *prefix* is the namespace prefix, *uri* is a namespace uri. Tags and + attributes in this namespace will be serialized with prefix if possible. + + ValueError is raised if prefix is reserved or is invalid. + + """ + if re.match(r"ns\d+$", prefix): + raise ValueError("Prefix format reserved for internal use") + for k, v in list(_namespace_map.items()): + if k == uri or v == prefix: + del _namespace_map[k] + _namespace_map[uri] = prefix + +_namespace_map = { + # "well-known" namespace prefixes + "http://www.w3.org/XML/1998/namespace": "xml", + "http://www.w3.org/1999/xhtml": "html", + "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", + "http://schemas.xmlsoap.org/wsdl/": "wsdl", + # xml schema + "http://www.w3.org/2001/XMLSchema": "xs", + "http://www.w3.org/2001/XMLSchema-instance": "xsi", + # dublin core + "http://purl.org/dc/elements/1.1/": "dc", +} +# For tests and troubleshooting +register_namespace._namespace_map = _namespace_map + +def _raise_serialization_error(text): + raise TypeError( + "cannot serialize %r (type %s)" % (text, type(text).__name__) + ) + +def _escape_cdata(text): + # escape character data + try: + # it's worth avoiding do-nothing calls for strings that are + # shorter than 500 characters, or so. assume that's, by far, + # the most common case in most applications. + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + +def _escape_attrib(text): + # escape attribute value + try: + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + # Although section 2.11 of the XML specification states that CR or + # CR LN should be replaced with just LN, it applies only to EOLNs + # which take part of organizing file into lines. Within attributes, + # we are replacing these with entity numbers, so they do not count. + # http://www.w3.org/TR/REC-xml/#sec-line-ends + # The current solution, contained in following six lines, was + # discussed in issue 17582 and 39011. + if "\r" in text: + text = text.replace("\r", " ") + if "\n" in text: + text = text.replace("\n", " ") + if "\t" in text: + text = text.replace("\t", " ") + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + +def _escape_attrib_html(text): + # escape attribute value + try: + if "&" in text: + text = text.replace("&", "&") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + +# -------------------------------------------------------------------- + +def tostring(element, encoding=None, method=None, *, + xml_declaration=None, default_namespace=None, + short_empty_elements=True): + """Generate string representation of XML element. + + All subelements are included. If encoding is "unicode", a string + is returned. Otherwise a bytestring is returned. + + *element* is an Element instance, *encoding* is an optional output + encoding defaulting to US-ASCII, *method* is an optional output which can + be one of "xml" (default), "html", "text" or "c14n", *default_namespace* + sets the default XML namespace (for "xmlns"). + + Returns an (optionally) encoded string containing the XML data. + + """ + stream = io.StringIO() if encoding == 'unicode' else io.BytesIO() + ElementTree(element).write(stream, encoding, + xml_declaration=xml_declaration, + default_namespace=default_namespace, + method=method, + short_empty_elements=short_empty_elements) + return stream.getvalue() + +class _ListDataStream(io.BufferedIOBase): + """An auxiliary stream accumulating into a list reference.""" + def __init__(self, lst): + self.lst = lst + + def writable(self): + return True + + def seekable(self): + return True + + def write(self, b): + self.lst.append(b) + + def tell(self): + return len(self.lst) + +def tostringlist(element, encoding=None, method=None, *, + xml_declaration=None, default_namespace=None, + short_empty_elements=True): + lst = [] + stream = _ListDataStream(lst) + ElementTree(element).write(stream, encoding, + xml_declaration=xml_declaration, + default_namespace=default_namespace, + method=method, + short_empty_elements=short_empty_elements) + return lst + + +def dump(elem): + """Write element tree or element structure to sys.stdout. + + This function should be used for debugging only. + + *elem* is either an ElementTree, or a single Element. The exact output + format is implementation dependent. In this version, it's written as an + ordinary XML file. + + """ + # debugging + if not isinstance(elem, ElementTree): + elem = ElementTree(elem) + elem.write(sys.stdout, encoding="unicode") + tail = elem.getroot().tail + if not tail or tail[-1] != "\n": + sys.stdout.write("\n") + + +def indent(tree, space=" ", level=0): + """Indent an XML document by inserting newlines and indentation space + after elements. + + *tree* is the ElementTree or Element to modify. The (root) element + itself will not be changed, but the tail text of all elements in its + subtree will be adapted. + + *space* is the whitespace to insert for each indentation level, two + space characters by default. + + *level* is the initial indentation level. Setting this to a higher + value than 0 can be used for indenting subtrees that are more deeply + nested inside of a document. + """ + if isinstance(tree, ElementTree): + tree = tree.getroot() + if level < 0: + raise ValueError(f"Initial indentation level must be >= 0, got {level}") + if not len(tree): + return + + # Reduce the memory consumption by reusing indentation strings. + indentations = ["\n" + level * space] + + def _indent_children(elem, level): + # Start a new indentation level for the first child. + child_level = level + 1 + try: + child_indentation = indentations[child_level] + except IndexError: + child_indentation = indentations[level] + space + indentations.append(child_indentation) + + if not elem.text or not elem.text.strip(): + elem.text = child_indentation + + for child in elem: + if len(child): + _indent_children(child, child_level) + if not child.tail or not child.tail.strip(): + child.tail = child_indentation + + # Dedent after the last child by overwriting the previous indentation. + if not child.tail.strip(): + child.tail = indentations[level] + + _indent_children(tree, 0) + + +# -------------------------------------------------------------------- +# parsing + + +def parse(source, parser=None): + """Parse XML document into element tree. + + *source* is a filename or file object containing XML data, + *parser* is an optional parser instance defaulting to XMLParser. + + Return an ElementTree instance. + + """ + tree = ElementTree() + tree.parse(source, parser) + return tree + + +def iterparse(source, events=None, parser=None): + """Incrementally parse XML document into ElementTree. + + This class also reports what's going on to the user based on the + *events* it is initialized with. The supported events are the strings + "start", "end", "start-ns" and "end-ns" (the "ns" events are used to get + detailed namespace information). If *events* is omitted, only + "end" events are reported. + + *source* is a filename or file object containing XML data, *events* is + a list of events to report back, *parser* is an optional parser instance. + + Returns an iterator providing (event, elem) pairs. + + """ + # Use the internal, undocumented _parser argument for now; When the + # parser argument of iterparse is removed, this can be killed. + pullparser = XMLPullParser(events=events, _parser=parser) + + if not hasattr(source, "read"): + source = open(source, "rb") + close_source = True + else: + close_source = False + + def iterator(source): + try: + while True: + yield from pullparser.read_events() + # load event buffer + data = source.read(16 * 1024) + if not data: + break + pullparser.feed(data) + root = pullparser._close_and_return_root() + yield from pullparser.read_events() + it = wr() + if it is not None: + it.root = root + finally: + if close_source: + source.close() + + gen = iterator(source) + class IterParseIterator(collections.abc.Iterator): + __next__ = gen.__next__ + def close(self): + if close_source: + source.close() + gen.close() + + def __del__(self): + # TODO: Emit a ResourceWarning if it was not explicitly closed. + # (When the close() method will be supported in all maintained Python versions.) + if close_source: + source.close() + + it = IterParseIterator() + it.root = None + wr = weakref.ref(it) + return it + + +class XMLPullParser: + + def __init__(self, events=None, *, _parser=None): + # The _parser argument is for internal use only and must not be relied + # upon in user code. It will be removed in a future release. + # See https://bugs.python.org/issue17741 for more details. + + self._events_queue = collections.deque() + self._parser = _parser or XMLParser(target=TreeBuilder()) + # wire up the parser for event reporting + if events is None: + events = ("end",) + self._parser._setevents(self._events_queue, events) + + def feed(self, data): + """Feed encoded data to parser.""" + if self._parser is None: + raise ValueError("feed() called after end of stream") + if data: + try: + self._parser.feed(data) + except SyntaxError as exc: + self._events_queue.append(exc) + + def _close_and_return_root(self): + # iterparse needs this to set its root attribute properly :( + root = self._parser.close() + self._parser = None + return root + + def close(self): + """Finish feeding data to parser. + + Unlike XMLParser, does not return the root element. Use + read_events() to consume elements from XMLPullParser. + """ + self._close_and_return_root() + + def read_events(self): + """Return an iterator over currently available (event, elem) pairs. + + Events are consumed from the internal event queue as they are + retrieved from the iterator. + """ + events = self._events_queue + while events: + event = events.popleft() + if isinstance(event, Exception): + raise event + else: + yield event + + def flush(self): + if self._parser is None: + raise ValueError("flush() called after end of stream") + self._parser.flush() + + +def XML(text, parser=None): + """Parse XML document from string constant. + + This function can be used to embed "XML Literals" in Python code. + + *text* is a string containing XML data, *parser* is an + optional parser instance, defaulting to the standard XMLParser. + + Returns an Element instance. + + """ + if not parser: + parser = XMLParser(target=TreeBuilder()) + parser.feed(text) + return parser.close() + + +def XMLID(text, parser=None): + """Parse XML document from string constant for its IDs. + + *text* is a string containing XML data, *parser* is an + optional parser instance, defaulting to the standard XMLParser. + + Returns an (Element, dict) tuple, in which the + dict maps element id:s to elements. + + """ + if not parser: + parser = XMLParser(target=TreeBuilder()) + parser.feed(text) + tree = parser.close() + ids = {} + for elem in tree.iter(): + id = elem.get("id") + if id: + ids[id] = elem + return tree, ids + +# Parse XML document from string constant. Alias for XML(). +fromstring = XML + +def fromstringlist(sequence, parser=None): + """Parse XML document from sequence of string fragments. + + *sequence* is a list of other sequence, *parser* is an optional parser + instance, defaulting to the standard XMLParser. + + Returns an Element instance. + + """ + if not parser: + parser = XMLParser(target=TreeBuilder()) + for text in sequence: + parser.feed(text) + return parser.close() + +# -------------------------------------------------------------------- + + +class TreeBuilder: + """Generic element structure builder. + + This builder converts a sequence of start, data, and end method + calls to a well-formed element structure. + + You can use this class to build an element structure using a custom XML + parser, or a parser for some other XML-like format. + + *element_factory* is an optional element factory which is called + to create new Element instances, as necessary. + + *comment_factory* is a factory to create comments to be used instead of + the standard factory. If *insert_comments* is false (the default), + comments will not be inserted into the tree. + + *pi_factory* is a factory to create processing instructions to be used + instead of the standard factory. If *insert_pis* is false (the default), + processing instructions will not be inserted into the tree. + """ + def __init__(self, element_factory=None, *, + comment_factory=None, pi_factory=None, + insert_comments=False, insert_pis=False): + self._data = [] # data collector + self._elem = [] # element stack + self._last = None # last element + self._root = None # root element + self._tail = None # true if we're after an end tag + if comment_factory is None: + comment_factory = Comment + self._comment_factory = comment_factory + self.insert_comments = insert_comments + if pi_factory is None: + pi_factory = ProcessingInstruction + self._pi_factory = pi_factory + self.insert_pis = insert_pis + if element_factory is None: + element_factory = Element + self._factory = element_factory + + def close(self): + """Flush builder buffers and return toplevel document Element.""" + assert len(self._elem) == 0, "missing end tags" + assert self._root is not None, "missing toplevel element" + return self._root + + def _flush(self): + if self._data: + if self._last is not None: + text = "".join(self._data) + if self._tail: + assert self._last.tail is None, "internal error (tail)" + self._last.tail = text + else: + assert self._last.text is None, "internal error (text)" + self._last.text = text + self._data = [] + + def data(self, data): + """Add text to current element.""" + self._data.append(data) + + def start(self, tag, attrs): + """Open new element and return it. + + *tag* is the element name, *attrs* is a dict containing element + attributes. + + """ + self._flush() + self._last = elem = self._factory(tag, attrs) + if self._elem: + self._elem[-1].append(elem) + elif self._root is None: + self._root = elem + self._elem.append(elem) + self._tail = 0 + return elem + + def end(self, tag): + """Close and return current Element. + + *tag* is the element name. + + """ + self._flush() + self._last = self._elem.pop() + assert self._last.tag == tag,\ + "end tag mismatch (expected %s, got %s)" % ( + self._last.tag, tag) + self._tail = 1 + return self._last + + def comment(self, text): + """Create a comment using the comment_factory. + + *text* is the text of the comment. + """ + return self._handle_single( + self._comment_factory, self.insert_comments, text) + + def pi(self, target, text=None): + """Create a processing instruction using the pi_factory. + + *target* is the target name of the processing instruction. + *text* is the data of the processing instruction, or ''. + """ + return self._handle_single( + self._pi_factory, self.insert_pis, target, text) + + def _handle_single(self, factory, insert, *args): + elem = factory(*args) + if insert: + self._flush() + self._last = elem + if self._elem: + self._elem[-1].append(elem) + self._tail = 1 + return elem + + +# also see ElementTree and TreeBuilder +class XMLParser: + """Element structure builder for XML source data based on the expat parser. + + *target* is an optional target object which defaults to an instance of the + standard TreeBuilder class, *encoding* is an optional encoding string + which if given, overrides the encoding specified in the XML file: + http://www.iana.org/assignments/character-sets + + """ + + def __init__(self, *, target=None, encoding=None): + try: + from xml.parsers import expat + except ImportError: + try: + import pyexpat as expat + except ImportError: + raise ImportError( + "No module named expat; use SimpleXMLTreeBuilder instead" + ) + parser = expat.ParserCreate(encoding, "}") + if target is None: + target = TreeBuilder() + # underscored names are provided for compatibility only + self.parser = self._parser = parser + self.target = self._target = target + self._error = expat.error + self._names = {} # name memo cache + # main callbacks + parser.DefaultHandlerExpand = self._default + if hasattr(target, 'start'): + parser.StartElementHandler = self._start + if hasattr(target, 'end'): + parser.EndElementHandler = self._end + if hasattr(target, 'start_ns'): + parser.StartNamespaceDeclHandler = self._start_ns + if hasattr(target, 'end_ns'): + parser.EndNamespaceDeclHandler = self._end_ns + if hasattr(target, 'data'): + parser.CharacterDataHandler = target.data + # miscellaneous callbacks + if hasattr(target, 'comment'): + parser.CommentHandler = target.comment + if hasattr(target, 'pi'): + parser.ProcessingInstructionHandler = target.pi + # Configure pyexpat: buffering, new-style attribute handling. + parser.buffer_text = 1 + parser.ordered_attributes = 1 + self._doctype = None + self.entity = {} + try: + self.version = "Expat %d.%d.%d" % expat.version_info + except AttributeError: + pass # unknown + + def _setevents(self, events_queue, events_to_report): + # Internal API for XMLPullParser + # events_to_report: a list of events to report during parsing (same as + # the *events* of XMLPullParser's constructor. + # events_queue: a list of actual parsing events that will be populated + # by the underlying parser. + # + parser = self._parser + append = events_queue.append + for event_name in events_to_report: + if event_name == "start": + parser.ordered_attributes = 1 + def handler(tag, attrib_in, event=event_name, append=append, + start=self._start): + append((event, start(tag, attrib_in))) + parser.StartElementHandler = handler + elif event_name == "end": + def handler(tag, event=event_name, append=append, + end=self._end): + append((event, end(tag))) + parser.EndElementHandler = handler + elif event_name == "start-ns": + # TreeBuilder does not implement .start_ns() + if hasattr(self.target, "start_ns"): + def handler(prefix, uri, event=event_name, append=append, + start_ns=self._start_ns): + append((event, start_ns(prefix, uri))) + else: + def handler(prefix, uri, event=event_name, append=append): + append((event, (prefix or '', uri or ''))) + parser.StartNamespaceDeclHandler = handler + elif event_name == "end-ns": + # TreeBuilder does not implement .end_ns() + if hasattr(self.target, "end_ns"): + def handler(prefix, event=event_name, append=append, + end_ns=self._end_ns): + append((event, end_ns(prefix))) + else: + def handler(prefix, event=event_name, append=append): + append((event, None)) + parser.EndNamespaceDeclHandler = handler + elif event_name == 'comment': + def handler(text, event=event_name, append=append, self=self): + append((event, self.target.comment(text))) + parser.CommentHandler = handler + elif event_name == 'pi': + def handler(pi_target, data, event=event_name, append=append, + self=self): + append((event, self.target.pi(pi_target, data))) + parser.ProcessingInstructionHandler = handler + else: + raise ValueError("unknown event %r" % event_name) + + def _raiseerror(self, value): + err = ParseError(value) + err.code = value.code + err.position = value.lineno, value.offset + raise err + + def _fixname(self, key): + # expand qname, and convert name string to ascii, if possible + try: + name = self._names[key] + except KeyError: + name = key + if "}" in name: + name = "{" + name + self._names[key] = name + return name + + def _start_ns(self, prefix, uri): + return self.target.start_ns(prefix or '', uri or '') + + def _end_ns(self, prefix): + return self.target.end_ns(prefix or '') + + def _start(self, tag, attr_list): + # Handler for expat's StartElementHandler. Since ordered_attributes + # is set, the attributes are reported as a list of alternating + # attribute name,value. + fixname = self._fixname + tag = fixname(tag) + attrib = {} + if attr_list: + for i in range(0, len(attr_list), 2): + attrib[fixname(attr_list[i])] = attr_list[i+1] + return self.target.start(tag, attrib) + + def _end(self, tag): + return self.target.end(self._fixname(tag)) + + def _default(self, text): + prefix = text[:1] + if prefix == "&": + # deal with undefined entities + try: + data_handler = self.target.data + except AttributeError: + return + try: + data_handler(self.entity[text[1:-1]]) + except KeyError: + from xml.parsers import expat + err = expat.error( + "undefined entity %s: line %d, column %d" % + (text, self.parser.ErrorLineNumber, + self.parser.ErrorColumnNumber) + ) + err.code = 11 # XML_ERROR_UNDEFINED_ENTITY + err.lineno = self.parser.ErrorLineNumber + err.offset = self.parser.ErrorColumnNumber + raise err + elif prefix == "<" and text[:9] == "": + self._doctype = None + return + text = text.strip() + if not text: + return + self._doctype.append(text) + n = len(self._doctype) + if n > 2: + type = self._doctype[1] + if type == "PUBLIC" and n == 4: + name, type, pubid, system = self._doctype + if pubid: + pubid = pubid[1:-1] + elif type == "SYSTEM" and n == 3: + name, type, system = self._doctype + pubid = None + else: + return + if hasattr(self.target, "doctype"): + self.target.doctype(name, pubid, system[1:-1]) + elif hasattr(self, "doctype"): + warnings.warn( + "The doctype() method of XMLParser is ignored. " + "Define doctype() method on the TreeBuilder target.", + RuntimeWarning) + + self._doctype = None + + def feed(self, data): + """Feed encoded data to parser.""" + try: + self.parser.Parse(data, False) + except self._error as v: + self._raiseerror(v) + + def close(self): + """Finish feeding data to parser and return element structure.""" + try: + self.parser.Parse(b"", True) # end of data + except self._error as v: + self._raiseerror(v) + try: + close_handler = self.target.close + except AttributeError: + pass + else: + return close_handler() + finally: + # get rid of circular references + del self.parser, self._parser + del self.target, self._target + + def flush(self): + was_enabled = self.parser.GetReparseDeferralEnabled() + try: + self.parser.SetReparseDeferralEnabled(False) + self.parser.Parse(b"", False) + except self._error as v: + self._raiseerror(v) + finally: + self.parser.SetReparseDeferralEnabled(was_enabled) + +# -------------------------------------------------------------------- +# C14N 2.0 + +def canonicalize(xml_data=None, *, out=None, from_file=None, **options): + """Convert XML to its C14N 2.0 serialised form. + + If *out* is provided, it must be a file or file-like object that receives + the serialised canonical XML output (text, not bytes) through its ``.write()`` + method. To write to a file, open it in text mode with encoding "utf-8". + If *out* is not provided, this function returns the output as text string. + + Either *xml_data* (an XML string) or *from_file* (a file path or + file-like object) must be provided as input. + + The configuration options are the same as for the ``C14NWriterTarget``. + """ + if xml_data is None and from_file is None: + raise ValueError("Either 'xml_data' or 'from_file' must be provided as input") + sio = None + if out is None: + sio = out = io.StringIO() + + parser = XMLParser(target=C14NWriterTarget(out.write, **options)) + + if xml_data is not None: + parser.feed(xml_data) + parser.close() + elif from_file is not None: + parse(from_file, parser=parser) + + return sio.getvalue() if sio is not None else None + + +_looks_like_prefix_name = re.compile(r'^\w+:\w+$', re.UNICODE).match + + +class C14NWriterTarget: + """ + Canonicalization writer target for the XMLParser. + + Serialises parse events to XML C14N 2.0. + + The *write* function is used for writing out the resulting data stream + as text (not bytes). To write to a file, open it in text mode with encoding + "utf-8" and pass its ``.write`` method. + + Configuration options: + + - *with_comments*: set to true to include comments + - *strip_text*: set to true to strip whitespace before and after text content + - *rewrite_prefixes*: set to true to replace namespace prefixes by "n{number}" + - *qname_aware_tags*: a set of qname aware tag names in which prefixes + should be replaced in text content + - *qname_aware_attrs*: a set of qname aware attribute names in which prefixes + should be replaced in text content + - *exclude_attrs*: a set of attribute names that should not be serialised + - *exclude_tags*: a set of tag names that should not be serialised + """ + def __init__(self, write, *, + with_comments=False, strip_text=False, rewrite_prefixes=False, + qname_aware_tags=None, qname_aware_attrs=None, + exclude_attrs=None, exclude_tags=None): + self._write = write + self._data = [] + self._with_comments = with_comments + self._strip_text = strip_text + self._exclude_attrs = set(exclude_attrs) if exclude_attrs else None + self._exclude_tags = set(exclude_tags) if exclude_tags else None + + self._rewrite_prefixes = rewrite_prefixes + if qname_aware_tags: + self._qname_aware_tags = set(qname_aware_tags) + else: + self._qname_aware_tags = None + if qname_aware_attrs: + self._find_qname_aware_attrs = set(qname_aware_attrs).intersection + else: + self._find_qname_aware_attrs = None + + # Stack with globally and newly declared namespaces as (uri, prefix) pairs. + self._declared_ns_stack = [[ + ("http://www.w3.org/XML/1998/namespace", "xml"), + ]] + # Stack with user declared namespace prefixes as (uri, prefix) pairs. + self._ns_stack = [] + if not rewrite_prefixes: + self._ns_stack.append(list(_namespace_map.items())) + self._ns_stack.append([]) + self._prefix_map = {} + self._preserve_space = [False] + self._pending_start = None + self._root_seen = False + self._root_done = False + self._ignored_depth = 0 + + def _iter_namespaces(self, ns_stack, _reversed=reversed): + for namespaces in _reversed(ns_stack): + if namespaces: # almost no element declares new namespaces + yield from namespaces + + def _resolve_prefix_name(self, prefixed_name): + prefix, name = prefixed_name.split(':', 1) + for uri, p in self._iter_namespaces(self._ns_stack): + if p == prefix: + return f'{{{uri}}}{name}' + raise ValueError(f'Prefix {prefix} of QName "{prefixed_name}" is not declared in scope') + + def _qname(self, qname, uri=None): + if uri is None: + uri, tag = qname[1:].rsplit('}', 1) if qname[:1] == '{' else ('', qname) + else: + tag = qname + + prefixes_seen = set() + for u, prefix in self._iter_namespaces(self._declared_ns_stack): + if u == uri and prefix not in prefixes_seen: + return f'{prefix}:{tag}' if prefix else tag, tag, uri + prefixes_seen.add(prefix) + + # Not declared yet => add new declaration. + if self._rewrite_prefixes: + if uri in self._prefix_map: + prefix = self._prefix_map[uri] + else: + prefix = self._prefix_map[uri] = f'n{len(self._prefix_map)}' + self._declared_ns_stack[-1].append((uri, prefix)) + return f'{prefix}:{tag}', tag, uri + + if not uri and '' not in prefixes_seen: + # No default namespace declared => no prefix needed. + return tag, tag, uri + + for u, prefix in self._iter_namespaces(self._ns_stack): + if u == uri: + self._declared_ns_stack[-1].append((uri, prefix)) + return f'{prefix}:{tag}' if prefix else tag, tag, uri + + if not uri: + # As soon as a default namespace is defined, + # anything that has no namespace (and thus, no prefix) goes there. + return tag, tag, uri + + raise ValueError(f'Namespace "{uri}" is not declared in scope') + + def data(self, data): + if not self._ignored_depth: + self._data.append(data) + + def _flush(self, _join_text=''.join): + data = _join_text(self._data) + del self._data[:] + if self._strip_text and not self._preserve_space[-1]: + data = data.strip() + if self._pending_start is not None: + args, self._pending_start = self._pending_start, None + qname_text = data if data and _looks_like_prefix_name(data) else None + self._start(*args, qname_text) + if qname_text is not None: + return + if data and self._root_seen: + self._write(_escape_cdata_c14n(data)) + + def start_ns(self, prefix, uri): + if self._ignored_depth: + return + # we may have to resolve qnames in text content + if self._data: + self._flush() + self._ns_stack[-1].append((uri, prefix)) + + def start(self, tag, attrs): + if self._exclude_tags is not None and ( + self._ignored_depth or tag in self._exclude_tags): + self._ignored_depth += 1 + return + if self._data: + self._flush() + + new_namespaces = [] + self._declared_ns_stack.append(new_namespaces) + + if self._qname_aware_tags is not None and tag in self._qname_aware_tags: + # Need to parse text first to see if it requires a prefix declaration. + self._pending_start = (tag, attrs, new_namespaces) + return + self._start(tag, attrs, new_namespaces) + + def _start(self, tag, attrs, new_namespaces, qname_text=None): + if self._exclude_attrs is not None and attrs: + attrs = {k: v for k, v in attrs.items() if k not in self._exclude_attrs} + + qnames = {tag, *attrs} + resolved_names = {} + + # Resolve prefixes in attribute and tag text. + if qname_text is not None: + qname = resolved_names[qname_text] = self._resolve_prefix_name(qname_text) + qnames.add(qname) + if self._find_qname_aware_attrs is not None and attrs: + qattrs = self._find_qname_aware_attrs(attrs) + if qattrs: + for attr_name in qattrs: + value = attrs[attr_name] + if _looks_like_prefix_name(value): + qname = resolved_names[value] = self._resolve_prefix_name(value) + qnames.add(qname) + else: + qattrs = None + else: + qattrs = None + + # Assign prefixes in lexicographical order of used URIs. + parse_qname = self._qname + parsed_qnames = {n: parse_qname(n) for n in sorted( + qnames, key=lambda n: n.split('}', 1))} + + # Write namespace declarations in prefix order ... + if new_namespaces: + attr_list = [ + ('xmlns:' + prefix if prefix else 'xmlns', uri) + for uri, prefix in new_namespaces + ] + attr_list.sort() + else: + # almost always empty + attr_list = [] + + # ... followed by attributes in URI+name order + if attrs: + for k, v in sorted(attrs.items()): + if qattrs is not None and k in qattrs and v in resolved_names: + v = parsed_qnames[resolved_names[v]][0] + attr_qname, attr_name, uri = parsed_qnames[k] + # No prefix for attributes in default ('') namespace. + attr_list.append((attr_qname if uri else attr_name, v)) + + # Honour xml:space attributes. + space_behaviour = attrs.get('{http://www.w3.org/XML/1998/namespace}space') + self._preserve_space.append( + space_behaviour == 'preserve' if space_behaviour + else self._preserve_space[-1]) + + # Write the tag. + write = self._write + write('<' + parsed_qnames[tag][0]) + if attr_list: + write(''.join([f' {k}="{_escape_attrib_c14n(v)}"' for k, v in attr_list])) + write('>') + + # Write the resolved qname text content. + if qname_text is not None: + write(_escape_cdata_c14n(parsed_qnames[resolved_names[qname_text]][0])) + + self._root_seen = True + self._ns_stack.append([]) + + def end(self, tag): + if self._ignored_depth: + self._ignored_depth -= 1 + return + if self._data: + self._flush() + self._write(f'') + self._preserve_space.pop() + self._root_done = len(self._preserve_space) == 1 + self._declared_ns_stack.pop() + self._ns_stack.pop() + + def comment(self, text): + if not self._with_comments: + return + if self._ignored_depth: + return + if self._root_done: + self._write('\n') + elif self._root_seen and self._data: + self._flush() + self._write(f'') + if not self._root_seen: + self._write('\n') + + def pi(self, target, data): + if self._ignored_depth: + return + if self._root_done: + self._write('\n') + elif self._root_seen and self._data: + self._flush() + self._write( + f'' if data else f'') + if not self._root_seen: + self._write('\n') + + +def _escape_cdata_c14n(text): + # escape character data + try: + # it's worth avoiding do-nothing calls for strings that are + # shorter than 500 character, or so. assume that's, by far, + # the most common case in most applications. + if '&' in text: + text = text.replace('&', '&') + if '<' in text: + text = text.replace('<', '<') + if '>' in text: + text = text.replace('>', '>') + if '\r' in text: + text = text.replace('\r', ' ') + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + + +def _escape_attrib_c14n(text): + # escape attribute value + try: + if '&' in text: + text = text.replace('&', '&') + if '<' in text: + text = text.replace('<', '<') + if '"' in text: + text = text.replace('"', '"') + if '\t' in text: + text = text.replace('\t', ' ') + if '\n' in text: + text = text.replace('\n', ' ') + if '\r' in text: + text = text.replace('\r', ' ') + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + + +# -------------------------------------------------------------------- + +# Import the C accelerators +try: + # Element is going to be shadowed by the C implementation. We need to keep + # the Python version of it accessible for some "creative" by external code + # (see tests) + _Element_Py = Element + + # Element, SubElement, ParseError, TreeBuilder, XMLParser, _set_factories + from _elementtree import * + from _elementtree import _set_factories +except ImportError: + pass +else: + _set_factories(Comment, ProcessingInstruction) diff --git a/Python313_13_x64_Template/Lib/xml/etree/__init__.py b/Python314_4_x64_Template/Lib/xml/etree/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/etree/__init__.py rename to Python314_4_x64_Template/Lib/xml/etree/__init__.py diff --git a/Python313_13_x64_Template/Lib/xml/etree/cElementTree.py b/Python314_4_x64_Template/Lib/xml/etree/cElementTree.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/etree/cElementTree.py rename to Python314_4_x64_Template/Lib/xml/etree/cElementTree.py diff --git a/Python313_13_x64_Template/Lib/xml/parsers/__init__.py b/Python314_4_x64_Template/Lib/xml/parsers/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/parsers/__init__.py rename to Python314_4_x64_Template/Lib/xml/parsers/__init__.py diff --git a/Python314_4_x64_Template/Lib/xml/parsers/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/xml/parsers/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..2764401d Binary files /dev/null and b/Python314_4_x64_Template/Lib/xml/parsers/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/xml/parsers/__pycache__/expat.cpython-314.pyc b/Python314_4_x64_Template/Lib/xml/parsers/__pycache__/expat.cpython-314.pyc new file mode 100644 index 00000000..63429780 Binary files /dev/null and b/Python314_4_x64_Template/Lib/xml/parsers/__pycache__/expat.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/xml/parsers/expat.py b/Python314_4_x64_Template/Lib/xml/parsers/expat.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/parsers/expat.py rename to Python314_4_x64_Template/Lib/xml/parsers/expat.py diff --git a/Python314_4_x64_Template/Lib/xml/sax/__init__.py b/Python314_4_x64_Template/Lib/xml/sax/__init__.py new file mode 100644 index 00000000..fe4582c6 --- /dev/null +++ b/Python314_4_x64_Template/Lib/xml/sax/__init__.py @@ -0,0 +1,100 @@ +"""Simple API for XML (SAX) implementation for Python. + +This module provides an implementation of the SAX 2 interface; +information about the Java version of the interface can be found at +http://www.megginson.com/SAX/. The Python version of the interface is +documented at <...>. + +This package contains the following modules: + +handler -- Base classes and constants which define the SAX 2 API for + the 'client-side' of SAX for Python. + +saxutils -- Implementation of the convenience classes commonly used to + work with SAX. + +xmlreader -- Base classes and constants which define the SAX 2 API for + the parsers used with SAX for Python. + +expatreader -- Driver that allows use of the Expat parser with SAX. +""" + +from .xmlreader import InputSource +from .handler import ContentHandler, ErrorHandler +from ._exceptions import (SAXException, SAXNotRecognizedException, + SAXParseException, SAXNotSupportedException, + SAXReaderNotAvailable) + + +def parse(source, handler, errorHandler=ErrorHandler()): + parser = make_parser() + parser.setContentHandler(handler) + parser.setErrorHandler(errorHandler) + parser.parse(source) + +def parseString(string, handler, errorHandler=ErrorHandler()): + import io + if errorHandler is None: + errorHandler = ErrorHandler() + parser = make_parser() + parser.setContentHandler(handler) + parser.setErrorHandler(errorHandler) + + inpsrc = InputSource() + if isinstance(string, str): + inpsrc.setCharacterStream(io.StringIO(string)) + else: + inpsrc.setByteStream(io.BytesIO(string)) + parser.parse(inpsrc) + +# this is the parser list used by the make_parser function if no +# alternatives are given as parameters to the function + +default_parser_list = ["xml.sax.expatreader"] + +# tell modulefinder that importing sax potentially imports expatreader +_false = 0 +if _false: + import xml.sax.expatreader # noqa: F401 + +import os, sys +if not sys.flags.ignore_environment and "PY_SAX_PARSER" in os.environ: + default_parser_list = os.environ["PY_SAX_PARSER"].split(",") +del os, sys + + +def make_parser(parser_list=()): + """Creates and returns a SAX parser. + + Creates the first parser it is able to instantiate of the ones + given in the iterable created by chaining parser_list and + default_parser_list. The iterables must contain the names of Python + modules containing both a SAX parser and a create_parser function.""" + + for parser_name in list(parser_list) + default_parser_list: + try: + return _create_parser(parser_name) + except ImportError: + import sys + if parser_name in sys.modules: + # The parser module was found, but importing it + # failed unexpectedly, pass this exception through + raise + except SAXReaderNotAvailable: + # The parser module detected that it won't work properly, + # so try the next one + pass + + raise SAXReaderNotAvailable("No parsers found", None) + +# --- Internal utility methods used by make_parser + +def _create_parser(parser_name): + drv_module = __import__(parser_name,{},{},['create_parser']) + return drv_module.create_parser() + + +__all__ = ['ContentHandler', 'ErrorHandler', 'InputSource', 'SAXException', + 'SAXNotRecognizedException', 'SAXNotSupportedException', + 'SAXParseException', 'SAXReaderNotAvailable', + 'default_parser_list', 'make_parser', 'parse', 'parseString'] diff --git a/Python313_13_x64_Template/Lib/xml/sax/_exceptions.py b/Python314_4_x64_Template/Lib/xml/sax/_exceptions.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/sax/_exceptions.py rename to Python314_4_x64_Template/Lib/xml/sax/_exceptions.py diff --git a/Python313_13_x64_Template/Lib/xml/sax/expatreader.py b/Python314_4_x64_Template/Lib/xml/sax/expatreader.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/sax/expatreader.py rename to Python314_4_x64_Template/Lib/xml/sax/expatreader.py diff --git a/Python314_4_x64_Template/Lib/xml/sax/handler.py b/Python314_4_x64_Template/Lib/xml/sax/handler.py new file mode 100644 index 00000000..3183c3fe --- /dev/null +++ b/Python314_4_x64_Template/Lib/xml/sax/handler.py @@ -0,0 +1,387 @@ +""" +This module contains the core classes of version 2.0 of SAX for Python. +This file provides only default classes with absolutely minimum +functionality, from which drivers and applications can be subclassed. + +Many of these classes are empty and are included only as documentation +of the interfaces. + +$Id$ +""" + +version = '2.0beta' + +#============================================================================ +# +# HANDLER INTERFACES +# +#============================================================================ + +# ===== ERRORHANDLER ===== + +class ErrorHandler: + """Basic interface for SAX error handlers. + + If you create an object that implements this interface, then + register the object with your XMLReader, the parser will call the + methods in your object to report all warnings and errors. There + are three levels of errors available: warnings, (possibly) + recoverable errors, and unrecoverable errors. All methods take a + SAXParseException as the only parameter.""" + + def error(self, exception): + "Handle a recoverable error." + raise exception + + def fatalError(self, exception): + "Handle a non-recoverable error." + raise exception + + def warning(self, exception): + "Handle a warning." + print(exception) + + +# ===== CONTENTHANDLER ===== + +class ContentHandler: + """Interface for receiving logical document content events. + + This is the main callback interface in SAX, and the one most + important to applications. The order of events in this interface + mirrors the order of the information in the document.""" + + def __init__(self): + self._locator = None + + def setDocumentLocator(self, locator): + """Called by the parser to give the application a locator for + locating the origin of document events. + + SAX parsers are strongly encouraged (though not absolutely + required) to supply a locator: if it does so, it must supply + the locator to the application by invoking this method before + invoking any of the other methods in the DocumentHandler + interface. + + The locator allows the application to determine the end + position of any document-related event, even if the parser is + not reporting an error. Typically, the application will use + this information for reporting its own errors (such as + character content that does not match an application's + business rules). The information returned by the locator is + probably not sufficient for use with a search engine. + + Note that the locator will return correct information only + during the invocation of the events in this interface. The + application should not attempt to use it at any other time.""" + self._locator = locator + + def startDocument(self): + """Receive notification of the beginning of a document. + + The SAX parser will invoke this method only once, before any + other methods in this interface or in DTDHandler (except for + setDocumentLocator).""" + + def endDocument(self): + """Receive notification of the end of a document. + + The SAX parser will invoke this method only once, and it will + be the last method invoked during the parse. The parser shall + not invoke this method until it has either abandoned parsing + (because of an unrecoverable error) or reached the end of + input.""" + + def startPrefixMapping(self, prefix, uri): + """Begin the scope of a prefix-URI Namespace mapping. + + The information from this event is not necessary for normal + Namespace processing: the SAX XML reader will automatically + replace prefixes for element and attribute names when the + http://xml.org/sax/features/namespaces feature is true (the + default). + + There are cases, however, when applications need to use + prefixes in character data or in attribute values, where they + cannot safely be expanded automatically; the + start/endPrefixMapping event supplies the information to the + application to expand prefixes in those contexts itself, if + necessary. + + Note that start/endPrefixMapping events are not guaranteed to + be properly nested relative to each-other: all + startPrefixMapping events will occur before the corresponding + startElement event, and all endPrefixMapping events will occur + after the corresponding endElement event, but their order is + not guaranteed.""" + + def endPrefixMapping(self, prefix): + """End the scope of a prefix-URI mapping. + + See startPrefixMapping for details. This event will always + occur after the corresponding endElement event, but the order + of endPrefixMapping events is not otherwise guaranteed.""" + + def startElement(self, name, attrs): + """Signals the start of an element in non-namespace mode. + + The name parameter contains the raw XML 1.0 name of the + element type as a string and the attrs parameter holds an + instance of the Attributes class containing the attributes of + the element.""" + + def endElement(self, name): + """Signals the end of an element in non-namespace mode. + + The name parameter contains the name of the element type, just + as with the startElement event.""" + + def startElementNS(self, name, qname, attrs): + """Signals the start of an element in namespace mode. + + The name parameter contains the name of the element type as a + (uri, localname) tuple, the qname parameter the raw XML 1.0 + name used in the source document, and the attrs parameter + holds an instance of the Attributes class containing the + attributes of the element. + + The uri part of the name tuple is None for elements which have + no namespace.""" + + def endElementNS(self, name, qname): + """Signals the end of an element in namespace mode. + + The name parameter contains the name of the element type, just + as with the startElementNS event.""" + + def characters(self, content): + """Receive notification of character data. + + The Parser will call this method to report each chunk of + character data. SAX parsers may return all contiguous + character data in a single chunk, or they may split it into + several chunks; however, all of the characters in any single + event must come from the same external entity so that the + Locator provides useful information.""" + + def ignorableWhitespace(self, whitespace): + """Receive notification of ignorable whitespace in element content. + + Validating Parsers must use this method to report each chunk + of ignorable whitespace (see the W3C XML 1.0 recommendation, + section 2.10): non-validating parsers may also use this method + if they are capable of parsing and using content models. + + SAX parsers may return all contiguous whitespace in a single + chunk, or they may split it into several chunks; however, all + of the characters in any single event must come from the same + external entity, so that the Locator provides useful + information.""" + + def processingInstruction(self, target, data): + """Receive notification of a processing instruction. + + The Parser will invoke this method once for each processing + instruction found: note that processing instructions may occur + before or after the main document element. + + A SAX parser should never report an XML declaration (XML 1.0, + section 2.8) or a text declaration (XML 1.0, section 4.3.1) + using this method.""" + + def skippedEntity(self, name): + """Receive notification of a skipped entity. + + The Parser will invoke this method once for each entity + skipped. Non-validating processors may skip entities if they + have not seen the declarations (because, for example, the + entity was declared in an external DTD subset). All processors + may skip external entities, depending on the values of the + http://xml.org/sax/features/external-general-entities and the + http://xml.org/sax/features/external-parameter-entities + properties.""" + + +# ===== DTDHandler ===== + +class DTDHandler: + """Handle DTD events. + + This interface specifies only those DTD events required for basic + parsing (unparsed entities and attributes).""" + + def notationDecl(self, name, publicId, systemId): + "Handle a notation declaration event." + + def unparsedEntityDecl(self, name, publicId, systemId, ndata): + "Handle an unparsed entity declaration event." + + +# ===== ENTITYRESOLVER ===== + +class EntityResolver: + """Basic interface for resolving entities. If you create an object + implementing this interface, then register the object with your + Parser, the parser will call the method in your object to + resolve all external entities. Note that DefaultHandler implements + this interface with the default behaviour.""" + + def resolveEntity(self, publicId, systemId): + """Resolve the system identifier of an entity and return either + the system identifier to read from as a string, or an InputSource + to read from.""" + return systemId + + +#============================================================================ +# +# CORE FEATURES +# +#============================================================================ + +feature_namespaces = "http://xml.org/sax/features/namespaces" +# true: Perform Namespace processing (default). +# false: Optionally do not perform Namespace processing +# (implies namespace-prefixes). +# access: (parsing) read-only; (not parsing) read/write + +feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes" +# true: Report the original prefixed names and attributes used for Namespace +# declarations. +# false: Do not report attributes used for Namespace declarations, and +# optionally do not report original prefixed names (default). +# access: (parsing) read-only; (not parsing) read/write + +feature_string_interning = "http://xml.org/sax/features/string-interning" +# true: All element names, prefixes, attribute names, Namespace URIs, and +# local names are interned using the built-in intern function. +# false: Names are not necessarily interned, although they may be (default). +# access: (parsing) read-only; (not parsing) read/write + +feature_validation = "http://xml.org/sax/features/validation" +# true: Report all validation errors (implies external-general-entities and +# external-parameter-entities). +# false: Do not report validation errors. +# access: (parsing) read-only; (not parsing) read/write + +feature_external_ges = "http://xml.org/sax/features/external-general-entities" +# true: Include all external general (text) entities. +# false: Do not include external general entities. +# access: (parsing) read-only; (not parsing) read/write + +feature_external_pes = "http://xml.org/sax/features/external-parameter-entities" +# true: Include all external parameter entities, including the external +# DTD subset. +# false: Do not include any external parameter entities, even the external +# DTD subset. +# access: (parsing) read-only; (not parsing) read/write + +all_features = [feature_namespaces, + feature_namespace_prefixes, + feature_string_interning, + feature_validation, + feature_external_ges, + feature_external_pes] + + +#============================================================================ +# +# CORE PROPERTIES +# +#============================================================================ + +property_lexical_handler = "http://xml.org/sax/properties/lexical-handler" +# data type: xml.sax.sax2lib.LexicalHandler +# description: An optional extension handler for lexical events like comments. +# access: read/write + +property_declaration_handler = "http://xml.org/sax/properties/declaration-handler" +# data type: xml.sax.sax2lib.DeclHandler +# description: An optional extension handler for DTD-related events other +# than notations and unparsed entities. +# access: read/write + +property_dom_node = "http://xml.org/sax/properties/dom-node" +# data type: org.w3c.dom.Node +# description: When parsing, the current DOM node being visited if this is +# a DOM iterator; when not parsing, the root DOM node for +# iteration. +# access: (parsing) read-only; (not parsing) read/write + +property_xml_string = "http://xml.org/sax/properties/xml-string" +# data type: String +# description: The literal string of characters that was the source for +# the current event. +# access: read-only + +property_encoding = "http://www.python.org/sax/properties/encoding" +# data type: String +# description: The name of the encoding to assume for input data. +# access: write: set the encoding, e.g. established by a higher-level +# protocol. May change during parsing (e.g. after +# processing a META tag) +# read: return the current encoding (possibly established through +# auto-detection. +# initial value: UTF-8 +# + +property_interning_dict = "http://www.python.org/sax/properties/interning-dict" +# data type: Dictionary +# description: The dictionary used to intern common strings in the document +# access: write: Request that the parser uses a specific dictionary, to +# allow interning across different documents +# read: return the current interning dictionary, or None +# + +all_properties = [property_lexical_handler, + property_dom_node, + property_declaration_handler, + property_xml_string, + property_encoding, + property_interning_dict] + + +class LexicalHandler: + """Optional SAX2 handler for lexical events. + + This handler is used to obtain lexical information about an XML + document, that is, information about how the document was encoded + (as opposed to what it contains, which is reported to the + ContentHandler), such as comments and CDATA marked section + boundaries. + + To set the LexicalHandler of an XMLReader, use the setProperty + method with the property identifier + 'http://xml.org/sax/properties/lexical-handler'.""" + + def comment(self, content): + """Reports a comment anywhere in the document (including the + DTD and outside the document element). + + content is a string that holds the contents of the comment.""" + + def startDTD(self, name, public_id, system_id): + """Report the start of the DTD declarations, if the document + has an associated DTD. + + A startEntity event will be reported before declaration events + from the external DTD subset are reported, and this can be + used to infer from which subset DTD declarations derive. + + name is the name of the document element type, public_id the + public identifier of the DTD (or None if none were supplied) + and system_id the system identifier of the external subset (or + None if none were supplied).""" + + def endDTD(self): + """Signals the end of DTD declarations.""" + + def startCDATA(self): + """Reports the beginning of a CDATA marked section. + + The contents of the CDATA marked section will be reported + through the characters event.""" + + def endCDATA(self): + """Reports the end of a CDATA marked section.""" diff --git a/Python313_13_x64_Template/Lib/xml/sax/saxutils.py b/Python314_4_x64_Template/Lib/xml/sax/saxutils.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/sax/saxutils.py rename to Python314_4_x64_Template/Lib/xml/sax/saxutils.py diff --git a/Python313_13_x64_Template/Lib/xml/sax/xmlreader.py b/Python314_4_x64_Template/Lib/xml/sax/xmlreader.py similarity index 100% rename from Python313_13_x64_Template/Lib/xml/sax/xmlreader.py rename to Python314_4_x64_Template/Lib/xml/sax/xmlreader.py diff --git a/Python313_13_x64_Template/Lib/xmlrpc/__init__.py b/Python314_4_x64_Template/Lib/xmlrpc/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/xmlrpc/__init__.py rename to Python314_4_x64_Template/Lib/xmlrpc/__init__.py diff --git a/Python314_4_x64_Template/Lib/xmlrpc/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/xmlrpc/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..caa05065 Binary files /dev/null and b/Python314_4_x64_Template/Lib/xmlrpc/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/xmlrpc/__pycache__/client.cpython-314.pyc b/Python314_4_x64_Template/Lib/xmlrpc/__pycache__/client.cpython-314.pyc new file mode 100644 index 00000000..463b40b6 Binary files /dev/null and b/Python314_4_x64_Template/Lib/xmlrpc/__pycache__/client.cpython-314.pyc differ diff --git a/Python313_13_x64_Template/Lib/xmlrpc/client.py b/Python314_4_x64_Template/Lib/xmlrpc/client.py similarity index 100% rename from Python313_13_x64_Template/Lib/xmlrpc/client.py rename to Python314_4_x64_Template/Lib/xmlrpc/client.py diff --git a/Python314_4_x64_Template/Lib/xmlrpc/server.py b/Python314_4_x64_Template/Lib/xmlrpc/server.py new file mode 100644 index 00000000..3e687115 --- /dev/null +++ b/Python314_4_x64_Template/Lib/xmlrpc/server.py @@ -0,0 +1,1003 @@ +r"""XML-RPC Servers. + +This module can be used to create simple XML-RPC servers +by creating a server and either installing functions, a +class instance, or by extending the SimpleXMLRPCServer +class. + +It can also be used to handle XML-RPC requests in a CGI +environment using CGIXMLRPCRequestHandler. + +The Doc* classes can be used to create XML-RPC servers that +serve pydoc-style documentation in response to HTTP +GET requests. This documentation is dynamically generated +based on the functions and methods registered with the +server. + +A list of possible usage patterns follows: + +1. Install functions: + +server = SimpleXMLRPCServer(("localhost", 8000)) +server.register_function(pow) +server.register_function(lambda x,y: x+y, 'add') +server.serve_forever() + +2. Install an instance: + +class MyFuncs: + def __init__(self): + # make all of the sys functions available through sys.func_name + import sys + self.sys = sys + def _listMethods(self): + # implement this method so that system.listMethods + # knows to advertise the sys methods + return list_public_methods(self) + \ + ['sys.' + method for method in list_public_methods(self.sys)] + def pow(self, x, y): return pow(x, y) + def add(self, x, y) : return x + y + +server = SimpleXMLRPCServer(("localhost", 8000)) +server.register_introspection_functions() +server.register_instance(MyFuncs()) +server.serve_forever() + +3. Install an instance with custom dispatch method: + +class Math: + def _listMethods(self): + # this method must be present for system.listMethods + # to work + return ['add', 'pow'] + def _methodHelp(self, method): + # this method must be present for system.methodHelp + # to work + if method == 'add': + return "add(2,3) => 5" + elif method == 'pow': + return "pow(x, y[, z]) => number" + else: + # By convention, return empty + # string if no help is available + return "" + def _dispatch(self, method, params): + if method == 'pow': + return pow(*params) + elif method == 'add': + return params[0] + params[1] + else: + raise ValueError('bad method') + +server = SimpleXMLRPCServer(("localhost", 8000)) +server.register_introspection_functions() +server.register_instance(Math()) +server.serve_forever() + +4. Subclass SimpleXMLRPCServer: + +class MathServer(SimpleXMLRPCServer): + def _dispatch(self, method, params): + try: + # We are forcing the 'export_' prefix on methods that are + # callable through XML-RPC to prevent potential security + # problems + func = getattr(self, 'export_' + method) + except AttributeError: + raise Exception('method "%s" is not supported' % method) + else: + return func(*params) + + def export_add(self, x, y): + return x + y + +server = MathServer(("localhost", 8000)) +server.serve_forever() + +5. CGI script: + +server = CGIXMLRPCRequestHandler() +server.register_function(pow) +server.handle_request() +""" + +# Written by Brian Quinlan (brian@sweetapp.com). +# Based on code written by Fredrik Lundh. + +from xmlrpc.client import Fault, dumps, loads, gzip_encode, gzip_decode +from http.server import BaseHTTPRequestHandler +from functools import partial +from inspect import signature +import html +import http.server +import socketserver +import sys +import os +import re +import pydoc +import traceback +try: + import fcntl +except ImportError: + fcntl = None + +def resolve_dotted_attribute(obj, attr, allow_dotted_names=True): + """resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d + + Resolves a dotted attribute name to an object. Raises + an AttributeError if any attribute in the chain starts with a '_'. + + If the optional allow_dotted_names argument is false, dots are not + supported and this function operates similar to getattr(obj, attr). + """ + + if allow_dotted_names: + attrs = attr.split('.') + else: + attrs = [attr] + + for i in attrs: + if i.startswith('_'): + raise AttributeError( + 'attempt to access private attribute "%s"' % i + ) + else: + obj = getattr(obj,i) + return obj + +def list_public_methods(obj): + """Returns a list of attribute strings, found in the specified + object, which represent callable attributes""" + + return [member for member in dir(obj) + if not member.startswith('_') and + callable(getattr(obj, member))] + +class SimpleXMLRPCDispatcher: + """Mix-in class that dispatches XML-RPC requests. + + This class is used to register XML-RPC method handlers + and then to dispatch them. This class doesn't need to be + instanced directly when used by SimpleXMLRPCServer but it + can be instanced when used by the MultiPathXMLRPCServer + """ + + def __init__(self, allow_none=False, encoding=None, + use_builtin_types=False): + self.funcs = {} + self.instance = None + self.allow_none = allow_none + self.encoding = encoding or 'utf-8' + self.use_builtin_types = use_builtin_types + + def register_instance(self, instance, allow_dotted_names=False): + """Registers an instance to respond to XML-RPC requests. + + Only one instance can be installed at a time. + + If the registered instance has a _dispatch method then that + method will be called with the name of the XML-RPC method and + its parameters as a tuple + e.g. instance._dispatch('add',(2,3)) + + If the registered instance does not have a _dispatch method + then the instance will be searched to find a matching method + and, if found, will be called. Methods beginning with an '_' + are considered private and will not be called by + SimpleXMLRPCServer. + + If a registered function matches an XML-RPC request, then it + will be called instead of the registered instance. + + If the optional allow_dotted_names argument is true and the + instance does not have a _dispatch method, method names + containing dots are supported and resolved, as long as none of + the name segments start with an '_'. + + *** SECURITY WARNING: *** + + Enabling the allow_dotted_names options allows intruders + to access your module's global variables and may allow + intruders to execute arbitrary code on your machine. Only + use this option on a secure, closed network. + + """ + + self.instance = instance + self.allow_dotted_names = allow_dotted_names + + def register_function(self, function=None, name=None): + """Registers a function to respond to XML-RPC requests. + + The optional name argument can be used to set a Unicode name + for the function. + """ + # decorator factory + if function is None: + return partial(self.register_function, name=name) + + if name is None: + name = function.__name__ + self.funcs[name] = function + + return function + + def register_introspection_functions(self): + """Registers the XML-RPC introspection methods in the system + namespace. + + see http://xmlrpc.usefulinc.com/doc/reserved.html + """ + + self.funcs.update({'system.listMethods' : self.system_listMethods, + 'system.methodSignature' : self.system_methodSignature, + 'system.methodHelp' : self.system_methodHelp}) + + def register_multicall_functions(self): + """Registers the XML-RPC multicall method in the system + namespace. + + see http://www.xmlrpc.com/discuss/msgReader$1208""" + + self.funcs['system.multicall'] = self.system_multicall + + def _marshaled_dispatch(self, data, dispatch_method = None, path = None): + """Dispatches an XML-RPC method from marshalled (XML) data. + + XML-RPC methods are dispatched from the marshalled (XML) data + using the _dispatch method and the result is returned as + marshalled data. For backwards compatibility, a dispatch + function can be provided as an argument (see comment in + SimpleXMLRPCRequestHandler.do_POST) but overriding the + existing method through subclassing is the preferred means + of changing method dispatch behavior. + """ + + try: + params, method = loads(data, use_builtin_types=self.use_builtin_types) + + # generate response + if dispatch_method is not None: + response = dispatch_method(method, params) + else: + response = self._dispatch(method, params) + # wrap response in a singleton tuple + response = (response,) + response = dumps(response, methodresponse=1, + allow_none=self.allow_none, encoding=self.encoding) + except Fault as fault: + response = dumps(fault, allow_none=self.allow_none, + encoding=self.encoding) + except BaseException as exc: + response = dumps( + Fault(1, "%s:%s" % (type(exc), exc)), + encoding=self.encoding, allow_none=self.allow_none, + ) + + return response.encode(self.encoding, 'xmlcharrefreplace') + + def system_listMethods(self): + """system.listMethods() => ['add', 'subtract', 'multiple'] + + Returns a list of the methods supported by the server.""" + + methods = set(self.funcs.keys()) + if self.instance is not None: + # Instance can implement _listMethod to return a list of + # methods + if hasattr(self.instance, '_listMethods'): + methods |= set(self.instance._listMethods()) + # if the instance has a _dispatch method then we + # don't have enough information to provide a list + # of methods + elif not hasattr(self.instance, '_dispatch'): + methods |= set(list_public_methods(self.instance)) + return sorted(methods) + + def system_methodSignature(self, method_name): + """system.methodSignature('add') => [double, int, int] + + Returns a list describing the signature of the method. In the + above example, the add method takes two integers as arguments + and returns a double result. + + This server does NOT support system.methodSignature.""" + + # See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html + + return 'signatures not supported' + + def system_methodHelp(self, method_name): + """system.methodHelp('add') => "Adds two integers together" + + Returns a string containing documentation for the specified method.""" + + method = None + if method_name in self.funcs: + method = self.funcs[method_name] + elif self.instance is not None: + # Instance can implement _methodHelp to return help for a method + if hasattr(self.instance, '_methodHelp'): + return self.instance._methodHelp(method_name) + # if the instance has a _dispatch method then we + # don't have enough information to provide help + elif not hasattr(self.instance, '_dispatch'): + try: + method = resolve_dotted_attribute( + self.instance, + method_name, + self.allow_dotted_names + ) + except AttributeError: + pass + + # Note that we aren't checking that the method actually + # be a callable object of some kind + if method is None: + return "" + else: + return pydoc.getdoc(method) + + def system_multicall(self, call_list): + """system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \ +[[4], ...] + + Allows the caller to package multiple XML-RPC calls into a single + request. + + See http://www.xmlrpc.com/discuss/msgReader$1208 + """ + + results = [] + for call in call_list: + method_name = call['methodName'] + params = call['params'] + + try: + # XXX A marshalling error in any response will fail the entire + # multicall. If someone cares they should fix this. + results.append([self._dispatch(method_name, params)]) + except Fault as fault: + results.append( + {'faultCode' : fault.faultCode, + 'faultString' : fault.faultString} + ) + except BaseException as exc: + results.append( + {'faultCode' : 1, + 'faultString' : "%s:%s" % (type(exc), exc)} + ) + return results + + def _dispatch(self, method, params): + """Dispatches the XML-RPC method. + + XML-RPC calls are forwarded to a registered function that + matches the called XML-RPC method name. If no such function + exists then the call is forwarded to the registered instance, + if available. + + If the registered instance has a _dispatch method then that + method will be called with the name of the XML-RPC method and + its parameters as a tuple + e.g. instance._dispatch('add',(2,3)) + + If the registered instance does not have a _dispatch method + then the instance will be searched to find a matching method + and, if found, will be called. + + Methods beginning with an '_' are considered private and will + not be called. + """ + + try: + # call the matching registered function + func = self.funcs[method] + except KeyError: + pass + else: + if func is not None: + return func(*params) + raise Exception('method "%s" is not supported' % method) + + if self.instance is not None: + if hasattr(self.instance, '_dispatch'): + # call the `_dispatch` method on the instance + return self.instance._dispatch(method, params) + + # call the instance's method directly + try: + func = resolve_dotted_attribute( + self.instance, + method, + self.allow_dotted_names + ) + except AttributeError: + pass + else: + if func is not None: + return func(*params) + + raise Exception('method "%s" is not supported' % method) + +class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler): + """Simple XML-RPC request handler class. + + Handles all HTTP POST requests and attempts to decode them as + XML-RPC requests. + """ + + # Class attribute listing the accessible path components; + # paths not on this list will result in a 404 error. + rpc_paths = ('/', '/RPC2', '/pydoc.css') + + #if not None, encode responses larger than this, if possible + encode_threshold = 1400 #a common MTU + + #Override form StreamRequestHandler: full buffering of output + #and no Nagle. + wbufsize = -1 + disable_nagle_algorithm = True + + # a re to match a gzip Accept-Encoding + aepattern = re.compile(r""" + \s* ([^\s;]+) \s* #content-coding + (;\s* q \s*=\s* ([0-9\.]+))? #q + """, re.VERBOSE | re.IGNORECASE) + + def accept_encodings(self): + r = {} + ae = self.headers.get("Accept-Encoding", "") + for e in ae.split(","): + match = self.aepattern.match(e) + if match: + v = match.group(3) + v = float(v) if v else 1.0 + r[match.group(1)] = v + return r + + def is_rpc_path_valid(self): + if self.rpc_paths: + return self.path in self.rpc_paths + else: + # If .rpc_paths is empty, just assume all paths are legal + return True + + def do_POST(self): + """Handles the HTTP POST request. + + Attempts to interpret all HTTP POST requests as XML-RPC calls, + which are forwarded to the server's _dispatch method for handling. + """ + + # Check that the path is legal + if not self.is_rpc_path_valid(): + self.report_404() + return + + try: + # Get arguments by reading body of request. + # We read this in chunks to avoid straining + # socket.read(); around the 10 or 15Mb mark, some platforms + # begin to have problems (bug #792570). + max_chunk_size = 10*1024*1024 + size_remaining = int(self.headers["content-length"]) + L = [] + while size_remaining: + chunk_size = min(size_remaining, max_chunk_size) + chunk = self.rfile.read(chunk_size) + if not chunk: + break + L.append(chunk) + size_remaining -= len(L[-1]) + data = b''.join(L) + + data = self.decode_request_content(data) + if data is None: + return #response has been sent + + # In previous versions of SimpleXMLRPCServer, _dispatch + # could be overridden in this class, instead of in + # SimpleXMLRPCDispatcher. To maintain backwards compatibility, + # check to see if a subclass implements _dispatch and dispatch + # using that method if present. + response = self.server._marshaled_dispatch( + data, getattr(self, '_dispatch', None), self.path + ) + except Exception as e: # This should only happen if the module is buggy + # internal error, report as HTTP server error + self.send_response(500) + + # Send information about the exception if requested + if hasattr(self.server, '_send_traceback_header') and \ + self.server._send_traceback_header: + self.send_header("X-exception", str(e)) + trace = traceback.format_exc() + trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII') + self.send_header("X-traceback", trace) + + self.send_header("Content-length", "0") + self.end_headers() + else: + self.send_response(200) + self.send_header("Content-type", "text/xml") + if self.encode_threshold is not None: + if len(response) > self.encode_threshold: + q = self.accept_encodings().get("gzip", 0) + if q: + try: + response = gzip_encode(response) + self.send_header("Content-Encoding", "gzip") + except NotImplementedError: + pass + self.send_header("Content-length", str(len(response))) + self.end_headers() + self.wfile.write(response) + + def decode_request_content(self, data): + #support gzip encoding of request + encoding = self.headers.get("content-encoding", "identity").lower() + if encoding == "identity": + return data + if encoding == "gzip": + try: + return gzip_decode(data) + except NotImplementedError: + self.send_response(501, "encoding %r not supported" % encoding) + except ValueError: + self.send_response(400, "error decoding gzip content") + else: + self.send_response(501, "encoding %r not supported" % encoding) + self.send_header("Content-length", "0") + self.end_headers() + + def report_404 (self): + # Report a 404 error + self.send_response(404) + response = b'No such page' + self.send_header("Content-type", "text/plain") + self.send_header("Content-length", str(len(response))) + self.end_headers() + self.wfile.write(response) + + def log_request(self, code='-', size='-'): + """Selectively log an accepted request.""" + + if self.server.logRequests: + BaseHTTPRequestHandler.log_request(self, code, size) + +class SimpleXMLRPCServer(socketserver.TCPServer, + SimpleXMLRPCDispatcher): + """Simple XML-RPC server. + + Simple XML-RPC server that allows functions and a single instance + to be installed to handle requests. The default implementation + attempts to dispatch XML-RPC calls to the functions or instance + installed in the server. Override the _dispatch method inherited + from SimpleXMLRPCDispatcher to change this behavior. + """ + + allow_reuse_address = True + allow_reuse_port = False + + # Warning: this is for debugging purposes only! Never set this to True in + # production code, as will be sending out sensitive information (exception + # and stack trace details) when exceptions are raised inside + # SimpleXMLRPCRequestHandler.do_POST + _send_traceback_header = False + + def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, + logRequests=True, allow_none=False, encoding=None, + bind_and_activate=True, use_builtin_types=False): + self.logRequests = logRequests + + SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types) + socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate) + + +class MultiPathXMLRPCServer(SimpleXMLRPCServer): + """Multipath XML-RPC Server + This specialization of SimpleXMLRPCServer allows the user to create + multiple Dispatcher instances and assign them to different + HTTP request paths. This makes it possible to run two or more + 'virtual XML-RPC servers' at the same port. + Make sure that the requestHandler accepts the paths in question. + """ + def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, + logRequests=True, allow_none=False, encoding=None, + bind_and_activate=True, use_builtin_types=False): + + SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none, + encoding, bind_and_activate, use_builtin_types) + self.dispatchers = {} + self.allow_none = allow_none + self.encoding = encoding or 'utf-8' + + def add_dispatcher(self, path, dispatcher): + self.dispatchers[path] = dispatcher + return dispatcher + + def get_dispatcher(self, path): + return self.dispatchers[path] + + def _marshaled_dispatch(self, data, dispatch_method = None, path = None): + try: + response = self.dispatchers[path]._marshaled_dispatch( + data, dispatch_method, path) + except BaseException as exc: + # report low level exception back to server + # (each dispatcher should have handled their own + # exceptions) + response = dumps( + Fault(1, "%s:%s" % (type(exc), exc)), + encoding=self.encoding, allow_none=self.allow_none) + response = response.encode(self.encoding, 'xmlcharrefreplace') + return response + +class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher): + """Simple handler for XML-RPC data passed through CGI.""" + + def __init__(self, allow_none=False, encoding=None, use_builtin_types=False): + SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types) + + def handle_xmlrpc(self, request_text): + """Handle a single XML-RPC request""" + + response = self._marshaled_dispatch(request_text) + + print('Content-Type: text/xml') + print('Content-Length: %d' % len(response)) + print() + sys.stdout.flush() + sys.stdout.buffer.write(response) + sys.stdout.buffer.flush() + + def handle_get(self): + """Handle a single HTTP GET request. + + Default implementation indicates an error because + XML-RPC uses the POST method. + """ + + code = 400 + message, explain = BaseHTTPRequestHandler.responses[code] + + response = http.server.DEFAULT_ERROR_MESSAGE % \ + { + 'code' : code, + 'message' : message, + 'explain' : explain + } + response = response.encode('utf-8') + print('Status: %d %s' % (code, message)) + print('Content-Type: %s' % http.server.DEFAULT_ERROR_CONTENT_TYPE) + print('Content-Length: %d' % len(response)) + print() + sys.stdout.flush() + sys.stdout.buffer.write(response) + sys.stdout.buffer.flush() + + def handle_request(self, request_text=None): + """Handle a single XML-RPC request passed through a CGI post method. + + If no XML data is given then it is read from stdin. The resulting + XML-RPC response is printed to stdout along with the correct HTTP + headers. + """ + + if request_text is None and \ + os.environ.get('REQUEST_METHOD', None) == 'GET': + self.handle_get() + else: + # POST data is normally available through stdin + try: + length = int(os.environ.get('CONTENT_LENGTH', None)) + except (ValueError, TypeError): + length = -1 + if request_text is None: + request_text = sys.stdin.read(length) + + self.handle_xmlrpc(request_text) + + +# ----------------------------------------------------------------------------- +# Self documenting XML-RPC Server. + +class ServerHTMLDoc(pydoc.HTMLDoc): + """Class used to generate pydoc HTML document for a server""" + + def markup(self, text, escape=None, funcs={}, classes={}, methods={}): + """Mark up some plain text, given a context of symbols to look for. + Each context dictionary maps object names to anchor names.""" + escape = escape or self.escape + results = [] + here = 0 + + # XXX Note that this regular expression does not allow for the + # hyperlinking of arbitrary strings being used as method + # names. Only methods with names consisting of word characters + # and '.'s are hyperlinked. + pattern = re.compile(r'\b((http|https|ftp)://\S+[\w/]|' + r'RFC[- ]?(\d+)|' + r'PEP[- ]?(\d+)|' + r'(self\.)?((?:\w|\.)+))\b') + while match := pattern.search(text, here): + start, end = match.span() + results.append(escape(text[here:start])) + + all, scheme, rfc, pep, selfdot, name = match.groups() + if scheme: + url = escape(all).replace('"', '"') + results.append('%s' % (url, url)) + elif rfc: + url = 'https://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc) + results.append('%s' % (url, escape(all))) + elif pep: + url = 'https://peps.python.org/pep-%04d/' % int(pep) + results.append('%s' % (url, escape(all))) + elif text[end:end+1] == '(': + results.append(self.namelink(name, methods, funcs, classes)) + elif selfdot: + results.append('self.%s' % name) + else: + results.append(self.namelink(name, classes)) + here = end + results.append(escape(text[here:])) + return ''.join(results) + + def docroutine(self, object, name, mod=None, + funcs={}, classes={}, methods={}, cl=None): + """Produce HTML documentation for a function or method object.""" + + anchor = (cl and cl.__name__ or '') + '-' + name + note = '' + + title = '%s' % ( + self.escape(anchor), self.escape(name)) + + if callable(object): + argspec = str(signature(object)) + else: + argspec = '(...)' + + if isinstance(object, tuple): + argspec = object[0] or argspec + docstring = object[1] or "" + else: + docstring = pydoc.getdoc(object) + + decl = title + argspec + (note and self.grey( + '%s' % note)) + + doc = self.markup( + docstring, self.preformat, funcs, classes, methods) + doc = doc and '
%s
' % doc + return '
%s
%s
\n' % (decl, doc) + + def docserver(self, server_name, package_documentation, methods): + """Produce HTML documentation for an XML-RPC server.""" + + fdict = {} + for key, value in methods.items(): + fdict[key] = '#-' + key + fdict[value] = fdict[key] + + server_name = self.escape(server_name) + head = '%s' % server_name + result = self.heading(head) + + doc = self.markup(package_documentation, self.preformat, fdict) + doc = doc and '%s' % doc + result = result + '

%s

\n' % doc + + contents = [] + method_items = sorted(methods.items()) + for key, value in method_items: + contents.append(self.docroutine(value, key, funcs=fdict)) + result = result + self.bigsection( + 'Methods', 'functions', ''.join(contents)) + + return result + + + def page(self, title, contents): + """Format an HTML page.""" + css_path = "/pydoc.css" + css_link = ( + '' % + css_path) + return '''\ + + + + +Python: %s +%s%s''' % (title, css_link, contents) + +class XMLRPCDocGenerator: + """Generates documentation for an XML-RPC server. + + This class is designed as mix-in and should not + be constructed directly. + """ + + def __init__(self): + # setup variables used for HTML documentation + self.server_name = 'XML-RPC Server Documentation' + self.server_documentation = \ + "This server exports the following methods through the XML-RPC "\ + "protocol." + self.server_title = 'XML-RPC Server Documentation' + + def set_server_title(self, server_title): + """Set the HTML title of the generated server documentation""" + + self.server_title = server_title + + def set_server_name(self, server_name): + """Set the name of the generated HTML server documentation""" + + self.server_name = server_name + + def set_server_documentation(self, server_documentation): + """Set the documentation string for the entire server.""" + + self.server_documentation = server_documentation + + def generate_html_documentation(self): + """generate_html_documentation() => html documentation for the server + + Generates HTML documentation for the server using introspection for + installed functions and instances that do not implement the + _dispatch method. Alternatively, instances can choose to implement + the _get_method_argstring(method_name) method to provide the + argument string used in the documentation and the + _methodHelp(method_name) method to provide the help text used + in the documentation.""" + + methods = {} + + for method_name in self.system_listMethods(): + if method_name in self.funcs: + method = self.funcs[method_name] + elif self.instance is not None: + method_info = [None, None] # argspec, documentation + if hasattr(self.instance, '_get_method_argstring'): + method_info[0] = self.instance._get_method_argstring(method_name) + if hasattr(self.instance, '_methodHelp'): + method_info[1] = self.instance._methodHelp(method_name) + + method_info = tuple(method_info) + if method_info != (None, None): + method = method_info + elif not hasattr(self.instance, '_dispatch'): + try: + method = resolve_dotted_attribute( + self.instance, + method_name + ) + except AttributeError: + method = method_info + else: + method = method_info + else: + assert 0, "Could not find method in self.functions and no "\ + "instance installed" + + methods[method_name] = method + + documenter = ServerHTMLDoc() + documentation = documenter.docserver( + self.server_name, + self.server_documentation, + methods + ) + + return documenter.page(html.escape(self.server_title), documentation) + +class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler): + """XML-RPC and documentation request handler class. + + Handles all HTTP POST requests and attempts to decode them as + XML-RPC requests. + + Handles all HTTP GET requests and interprets them as requests + for documentation. + """ + + def _get_css(self, url): + path_here = os.path.dirname(os.path.realpath(__file__)) + css_path = os.path.join(path_here, "..", "pydoc_data", "_pydoc.css") + with open(css_path, mode="rb") as fp: + return fp.read() + + def do_GET(self): + """Handles the HTTP GET request. + + Interpret all HTTP GET requests as requests for server + documentation. + """ + # Check that the path is legal + if not self.is_rpc_path_valid(): + self.report_404() + return + + if self.path.endswith('.css'): + content_type = 'text/css' + response = self._get_css(self.path) + else: + content_type = 'text/html' + response = self.server.generate_html_documentation().encode('utf-8') + + self.send_response(200) + self.send_header('Content-Type', '%s; charset=UTF-8' % content_type) + self.send_header("Content-length", str(len(response))) + self.end_headers() + self.wfile.write(response) + +class DocXMLRPCServer( SimpleXMLRPCServer, + XMLRPCDocGenerator): + """XML-RPC and HTML documentation server. + + Adds the ability to serve server documentation to the capabilities + of SimpleXMLRPCServer. + """ + + def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler, + logRequests=True, allow_none=False, encoding=None, + bind_and_activate=True, use_builtin_types=False): + SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, + allow_none, encoding, bind_and_activate, + use_builtin_types) + XMLRPCDocGenerator.__init__(self) + +class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler, + XMLRPCDocGenerator): + """Handler for XML-RPC data and documentation requests passed through + CGI""" + + def handle_get(self): + """Handles the HTTP GET request. + + Interpret all HTTP GET requests as requests for server + documentation. + """ + + response = self.generate_html_documentation().encode('utf-8') + + print('Content-Type: text/html') + print('Content-Length: %d' % len(response)) + print() + sys.stdout.flush() + sys.stdout.buffer.write(response) + sys.stdout.buffer.flush() + + def __init__(self): + CGIXMLRPCRequestHandler.__init__(self) + XMLRPCDocGenerator.__init__(self) + + +if __name__ == '__main__': + import datetime + + class ExampleService: + def getData(self): + return '42' + + class currentTime: + @staticmethod + def getCurrentTime(): + return datetime.datetime.now() + + with SimpleXMLRPCServer(("localhost", 8000)) as server: + server.register_function(pow) + server.register_function(lambda x,y: x+y, 'add') + server.register_instance(ExampleService(), allow_dotted_names=True) + server.register_multicall_functions() + print('Serving XML-RPC on localhost port 8000') + print('It is advisable to run this example server within a secure, closed network.') + try: + server.serve_forever() + except KeyboardInterrupt: + print("\nKeyboard interrupt received, exiting.") + sys.exit(0) diff --git a/Python314_4_x64_Template/Lib/zipapp.py b/Python314_4_x64_Template/Lib/zipapp.py new file mode 100644 index 00000000..7a4ef96e --- /dev/null +++ b/Python314_4_x64_Template/Lib/zipapp.py @@ -0,0 +1,231 @@ +import contextlib +import os +import pathlib +import shutil +import stat +import sys +import zipfile + +__all__ = ['ZipAppError', 'create_archive', 'get_interpreter'] + + +# The __main__.py used if the users specifies "-m module:fn". +# Note that this will always be written as UTF-8 (module and +# function names can be non-ASCII in Python 3). +# We add a coding cookie even though UTF-8 is the default in Python 3 +# because the resulting archive may be intended to be run under Python 2. +MAIN_TEMPLATE = """\ +# -*- coding: utf-8 -*- +import {module} +{module}.{fn}() +""" + + +# The Windows launcher defaults to UTF-8 when parsing shebang lines if the +# file has no BOM. So use UTF-8 on Windows. +# On Unix, use the filesystem encoding. +if sys.platform.startswith('win'): + shebang_encoding = 'utf-8' +else: + shebang_encoding = sys.getfilesystemencoding() + + +class ZipAppError(ValueError): + pass + + +@contextlib.contextmanager +def _maybe_open(archive, mode): + if isinstance(archive, (str, os.PathLike)): + with open(archive, mode) as f: + yield f + else: + yield archive + + +def _write_file_prefix(f, interpreter): + """Write a shebang line.""" + if interpreter: + shebang = b'#!' + interpreter.encode(shebang_encoding) + b'\n' + f.write(shebang) + + +def _copy_archive(archive, new_archive, interpreter=None): + """Copy an application archive, modifying the shebang line.""" + with _maybe_open(archive, 'rb') as src: + # Skip the shebang line from the source. + # Read 2 bytes of the source and check if they are #!. + first_2 = src.read(2) + if first_2 == b'#!': + # Discard the initial 2 bytes and the rest of the shebang line. + first_2 = b'' + src.readline() + + with _maybe_open(new_archive, 'wb') as dst: + _write_file_prefix(dst, interpreter) + # If there was no shebang, "first_2" contains the first 2 bytes + # of the source file, so write them before copying the rest + # of the file. + dst.write(first_2) + shutil.copyfileobj(src, dst) + + if interpreter and isinstance(new_archive, str): + os.chmod(new_archive, os.stat(new_archive).st_mode | stat.S_IEXEC) + + +def create_archive(source, target=None, interpreter=None, main=None, + filter=None, compressed=False): + """Create an application archive from SOURCE. + + The SOURCE can be the name of a directory, or a filename or a file-like + object referring to an existing archive. + + The content of SOURCE is packed into an application archive in TARGET, + which can be a filename or a file-like object. If SOURCE is a directory, + TARGET can be omitted and will default to the name of SOURCE with .pyz + appended. + + The created application archive will have a shebang line specifying + that it should run with INTERPRETER (there will be no shebang line if + INTERPRETER is None), and a __main__.py which runs MAIN (if MAIN is + not specified, an existing __main__.py will be used). It is an error + to specify MAIN for anything other than a directory source with no + __main__.py, and it is an error to omit MAIN if the directory has no + __main__.py. + """ + # Are we copying an existing archive? + source_is_file = False + if hasattr(source, 'read') and hasattr(source, 'readline'): + source_is_file = True + else: + source = pathlib.Path(source) + if source.is_file(): + source_is_file = True + + if source_is_file: + _copy_archive(source, target, interpreter) + return + + # We are creating a new archive from a directory. + if not source.exists(): + raise ZipAppError("Source does not exist") + has_main = (source / '__main__.py').is_file() + if main and has_main: + raise ZipAppError( + "Cannot specify entry point if the source has __main__.py") + if not (main or has_main): + raise ZipAppError("Archive has no entry point") + + main_py = None + if main: + # Check that main has the right format. + mod, sep, fn = main.partition(':') + mod_ok = all(part.isidentifier() for part in mod.split('.')) + fn_ok = all(part.isidentifier() for part in fn.split('.')) + if not (sep == ':' and mod_ok and fn_ok): + raise ZipAppError("Invalid entry point: " + main) + main_py = MAIN_TEMPLATE.format(module=mod, fn=fn) + + if target is None: + target = source.with_suffix('.pyz') + elif not hasattr(target, 'write'): + target = pathlib.Path(target) + + # Create the list of files to add to the archive now, in case + # the target is being created in the source directory - we + # don't want the target being added to itself + files_to_add = {} + for path in sorted(source.rglob('*')): + relative_path = path.relative_to(source) + if filter is None or filter(relative_path): + files_to_add[path] = relative_path + + # The target cannot be in the list of files to add. If it were, we'd + # end up overwriting the source file and writing the archive into + # itself, which is an error. We therefore check for that case and + # provide a helpful message for the user. + + # Note that we only do a simple path equality check. This won't + # catch every case, but it will catch the common case where the + # source is the CWD and the target is a file in the CWD. More + # thorough checks don't provide enough value to justify the extra + # cost. + + # If target is a file-like object, it will simply fail to compare + # equal to any of the entries in files_to_add, so there's no need + # to add a special check for that. + if target in files_to_add: + raise ZipAppError( + f"The target archive {target} overwrites one of the source files.") + + with _maybe_open(target, 'wb') as fd: + _write_file_prefix(fd, interpreter) + compression = (zipfile.ZIP_DEFLATED if compressed else + zipfile.ZIP_STORED) + with zipfile.ZipFile(fd, 'w', compression=compression) as z: + for path, relative_path in files_to_add.items(): + z.write(path, relative_path.as_posix()) + if main_py: + z.writestr('__main__.py', main_py.encode('utf-8')) + + if interpreter and not hasattr(target, 'write'): + target.chmod(target.stat().st_mode | stat.S_IEXEC) + + +def get_interpreter(archive): + with _maybe_open(archive, 'rb') as f: + if f.read(2) == b'#!': + return f.readline().strip().decode(shebang_encoding) + + +def main(args=None): + """Run the zipapp command line interface. + + The ARGS parameter lets you specify the argument list directly. + Omitting ARGS (or setting it to None) works as for argparse, using + sys.argv[1:] as the argument list. + """ + import argparse + + parser = argparse.ArgumentParser(color=True) + parser.add_argument('--output', '-o', default=None, + help="The name of the output archive. " + "Required if SOURCE is an archive.") + parser.add_argument('--python', '-p', default=None, + help="The name of the Python interpreter to use " + "(default: no shebang line).") + parser.add_argument('--main', '-m', default=None, + help="The main function of the application " + "(default: use an existing __main__.py).") + parser.add_argument('--compress', '-c', action='store_true', + help="Compress files with the deflate method. " + "Files are stored uncompressed by default.") + parser.add_argument('--info', default=False, action='store_true', + help="Display the interpreter from the archive.") + parser.add_argument('source', + help="Source directory (or existing archive).") + + args = parser.parse_args(args) + + # Handle `python -m zipapp archive.pyz --info`. + if args.info: + if not os.path.isfile(args.source): + raise SystemExit("Can only get info for an archive file") + interpreter = get_interpreter(args.source) + print("Interpreter: {}".format(interpreter or "")) + sys.exit(0) + + if os.path.isfile(args.source): + if args.output is None or (os.path.exists(args.output) and + os.path.samefile(args.source, args.output)): + raise SystemExit("In-place editing of archives is not supported") + if args.main: + raise SystemExit("Cannot change the main function when copying") + + create_archive(args.source, args.output, + interpreter=args.python, main=args.main, + compressed=args.compress) + + +if __name__ == '__main__': + main() diff --git a/Python314_4_x64_Template/Lib/zipfile/__init__.py b/Python314_4_x64_Template/Lib/zipfile/__init__.py new file mode 100644 index 00000000..19aea290 --- /dev/null +++ b/Python314_4_x64_Template/Lib/zipfile/__init__.py @@ -0,0 +1,2435 @@ +""" +Read and write ZIP files. + +XXX references to utf-8 need further investigation. +""" +import binascii +import importlib.util +import io +import os +import shutil +import stat +import struct +import sys +import threading +import time + +try: + import zlib # We may need its compression method + crc32 = zlib.crc32 +except ImportError: + zlib = None + crc32 = binascii.crc32 + +try: + import bz2 # We may need its compression method +except ImportError: + bz2 = None + +try: + import lzma # We may need its compression method +except ImportError: + lzma = None + +try: + from compression import zstd # We may need its compression method +except ImportError: + zstd = None + +__all__ = ["BadZipFile", "BadZipfile", "error", + "ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA", + "ZIP_ZSTANDARD", "is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", + "LargeZipFile", "Path"] + +class BadZipFile(Exception): + pass + + +class LargeZipFile(Exception): + """ + Raised when writing a zipfile, the zipfile requires ZIP64 extensions + and those extensions are disabled. + """ + +error = BadZipfile = BadZipFile # Pre-3.2 compatibility names + + +ZIP64_LIMIT = (1 << 31) - 1 +ZIP_FILECOUNT_LIMIT = (1 << 16) - 1 +ZIP_MAX_COMMENT = (1 << 16) - 1 + +# constants for Zip file compression methods +ZIP_STORED = 0 +ZIP_DEFLATED = 8 +ZIP_BZIP2 = 12 +ZIP_LZMA = 14 +ZIP_ZSTANDARD = 93 +# Other ZIP compression methods not supported + +DEFAULT_VERSION = 20 +ZIP64_VERSION = 45 +BZIP2_VERSION = 46 +LZMA_VERSION = 63 +ZSTANDARD_VERSION = 63 +# we recognize (but not necessarily support) all features up to that version +MAX_EXTRACT_VERSION = 63 + +# Below are some formats and associated data for reading/writing headers using +# the struct module. The names and structures of headers/records are those used +# in the PKWARE description of the ZIP file format: +# http://www.pkware.com/documents/casestudies/APPNOTE.TXT +# (URL valid as of January 2008) + +# The "end of central directory" structure, magic number, size, and indices +# (section V.I in the format document) +structEndArchive = b"<4s4H2LH" +stringEndArchive = b"PK\005\006" +sizeEndCentDir = struct.calcsize(structEndArchive) + +_ECD_SIGNATURE = 0 +_ECD_DISK_NUMBER = 1 +_ECD_DISK_START = 2 +_ECD_ENTRIES_THIS_DISK = 3 +_ECD_ENTRIES_TOTAL = 4 +_ECD_SIZE = 5 +_ECD_OFFSET = 6 +_ECD_COMMENT_SIZE = 7 +# These last two indices are not part of the structure as defined in the +# spec, but they are used internally by this module as a convenience +_ECD_COMMENT = 8 +_ECD_LOCATION = 9 + +# The "central directory" structure, magic number, size, and indices +# of entries in the structure (section V.F in the format document) +structCentralDir = "<4s4B4HL2L5H2L" +stringCentralDir = b"PK\001\002" +sizeCentralDir = struct.calcsize(structCentralDir) + +# indexes of entries in the central directory structure +_CD_SIGNATURE = 0 +_CD_CREATE_VERSION = 1 +_CD_CREATE_SYSTEM = 2 +_CD_EXTRACT_VERSION = 3 +_CD_EXTRACT_SYSTEM = 4 +_CD_FLAG_BITS = 5 +_CD_COMPRESS_TYPE = 6 +_CD_TIME = 7 +_CD_DATE = 8 +_CD_CRC = 9 +_CD_COMPRESSED_SIZE = 10 +_CD_UNCOMPRESSED_SIZE = 11 +_CD_FILENAME_LENGTH = 12 +_CD_EXTRA_FIELD_LENGTH = 13 +_CD_COMMENT_LENGTH = 14 +_CD_DISK_NUMBER_START = 15 +_CD_INTERNAL_FILE_ATTRIBUTES = 16 +_CD_EXTERNAL_FILE_ATTRIBUTES = 17 +_CD_LOCAL_HEADER_OFFSET = 18 + +# General purpose bit flags +# Zip Appnote: 4.4.4 general purpose bit flag: (2 bytes) +_MASK_ENCRYPTED = 1 << 0 +# Bits 1 and 2 have different meanings depending on the compression used. +_MASK_COMPRESS_OPTION_1 = 1 << 1 +# _MASK_COMPRESS_OPTION_2 = 1 << 2 +# _MASK_USE_DATA_DESCRIPTOR: If set, crc-32, compressed size and uncompressed +# size are zero in the local header and the real values are written in the data +# descriptor immediately following the compressed data. +_MASK_USE_DATA_DESCRIPTOR = 1 << 3 +# Bit 4: Reserved for use with compression method 8, for enhanced deflating. +# _MASK_RESERVED_BIT_4 = 1 << 4 +_MASK_COMPRESSED_PATCH = 1 << 5 +_MASK_STRONG_ENCRYPTION = 1 << 6 +# _MASK_UNUSED_BIT_7 = 1 << 7 +# _MASK_UNUSED_BIT_8 = 1 << 8 +# _MASK_UNUSED_BIT_9 = 1 << 9 +# _MASK_UNUSED_BIT_10 = 1 << 10 +_MASK_UTF_FILENAME = 1 << 11 +# Bit 12: Reserved by PKWARE for enhanced compression. +# _MASK_RESERVED_BIT_12 = 1 << 12 +# _MASK_ENCRYPTED_CENTRAL_DIR = 1 << 13 +# Bit 14, 15: Reserved by PKWARE +# _MASK_RESERVED_BIT_14 = 1 << 14 +# _MASK_RESERVED_BIT_15 = 1 << 15 + +# The "local file header" structure, magic number, size, and indices +# (section V.A in the format document) +structFileHeader = "<4s2B4HL2L2H" +stringFileHeader = b"PK\003\004" +sizeFileHeader = struct.calcsize(structFileHeader) + +_FH_SIGNATURE = 0 +_FH_EXTRACT_VERSION = 1 +_FH_EXTRACT_SYSTEM = 2 +_FH_GENERAL_PURPOSE_FLAG_BITS = 3 +_FH_COMPRESSION_METHOD = 4 +_FH_LAST_MOD_TIME = 5 +_FH_LAST_MOD_DATE = 6 +_FH_CRC = 7 +_FH_COMPRESSED_SIZE = 8 +_FH_UNCOMPRESSED_SIZE = 9 +_FH_FILENAME_LENGTH = 10 +_FH_EXTRA_FIELD_LENGTH = 11 + +# The "Zip64 end of central directory locator" structure, magic number, and size +structEndArchive64Locator = "<4sLQL" +stringEndArchive64Locator = b"PK\x06\x07" +sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator) + +# The "Zip64 end of central directory" record, magic number, size, and indices +# (section V.G in the format document) +structEndArchive64 = "<4sQ2H2L4Q" +stringEndArchive64 = b"PK\x06\x06" +sizeEndCentDir64 = struct.calcsize(structEndArchive64) + +_CD64_SIGNATURE = 0 +_CD64_DIRECTORY_RECSIZE = 1 +_CD64_CREATE_VERSION = 2 +_CD64_EXTRACT_VERSION = 3 +_CD64_DISK_NUMBER = 4 +_CD64_DISK_NUMBER_START = 5 +_CD64_NUMBER_ENTRIES_THIS_DISK = 6 +_CD64_NUMBER_ENTRIES_TOTAL = 7 +_CD64_DIRECTORY_SIZE = 8 +_CD64_OFFSET_START_CENTDIR = 9 + +_DD_SIGNATURE = 0x08074b50 + + +class _Extra(bytes): + FIELD_STRUCT = struct.Struct('= sizeCentralDir: + data = fp.read(sizeCentralDir) # CD is where we expect it to be + if len(data) == sizeCentralDir: + centdir = struct.unpack(structCentralDir, data) # CD is the right size + if centdir[_CD_SIGNATURE] == stringCentralDir: + return True # First central directory entry has correct magic number + except OSError: + pass + return False + +def is_zipfile(filename): + """Quickly see if a file is a ZIP file by checking the magic number. + + The filename argument may be a file or file-like object too. + """ + result = False + try: + if hasattr(filename, "read"): + pos = filename.tell() + result = _check_zipfile(fp=filename) + filename.seek(pos) + else: + with open(filename, "rb") as fp: + result = _check_zipfile(fp) + except (OSError, BadZipFile): + pass + return result + +def _handle_prepended_data(endrec, debug=0): + size_cd = endrec[_ECD_SIZE] # bytes in central directory + offset_cd = endrec[_ECD_OFFSET] # offset of central directory + + # "concat" is zero, unless zip was concatenated to another file + concat = endrec[_ECD_LOCATION] - size_cd - offset_cd + + if debug > 2: + inferred = concat + offset_cd + print("given, inferred, offset", offset_cd, inferred, concat) + + return offset_cd, concat + +def _EndRecData64(fpin, offset, endrec): + """ + Read the ZIP64 end-of-archive records and use that to update endrec + """ + offset -= sizeEndCentDir64Locator + if offset < 0: + # The file is not large enough to contain a ZIP64 + # end-of-archive record, so just return the end record we were given. + return endrec + fpin.seek(offset) + data = fpin.read(sizeEndCentDir64Locator) + if len(data) != sizeEndCentDir64Locator: + raise OSError("Unknown I/O error") + sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data) + if sig != stringEndArchive64Locator: + return endrec + + if diskno != 0 or disks > 1: + raise BadZipFile("zipfiles that span multiple disks are not supported") + + offset -= sizeEndCentDir64 + if reloff > offset: + raise BadZipFile("Corrupt zip64 end of central directory locator") + # First, check the assumption that there is no prepended data. + fpin.seek(reloff) + extrasz = offset - reloff + data = fpin.read(sizeEndCentDir64) + if len(data) != sizeEndCentDir64: + raise OSError("Unknown I/O error") + if not data.startswith(stringEndArchive64) and reloff != offset: + # Since we already have seen the Zip64 EOCD Locator, it's + # possible we got here because there is prepended data. + # Assume no 'zip64 extensible data' + fpin.seek(offset) + extrasz = 0 + data = fpin.read(sizeEndCentDir64) + if len(data) != sizeEndCentDir64: + raise OSError("Unknown I/O error") + if not data.startswith(stringEndArchive64): + raise BadZipFile("Zip64 end of central directory record not found") + + sig, sz, create_version, read_version, disk_num, disk_dir, \ + dircount, dircount2, dirsize, diroffset = \ + struct.unpack(structEndArchive64, data) + if (diroffset + dirsize != reloff or + sz + 12 != sizeEndCentDir64 + extrasz): + raise BadZipFile("Corrupt zip64 end of central directory record") + + # Update the original endrec using data from the ZIP64 record + endrec[_ECD_SIGNATURE] = sig + endrec[_ECD_DISK_NUMBER] = disk_num + endrec[_ECD_DISK_START] = disk_dir + endrec[_ECD_ENTRIES_THIS_DISK] = dircount + endrec[_ECD_ENTRIES_TOTAL] = dircount2 + endrec[_ECD_SIZE] = dirsize + endrec[_ECD_OFFSET] = diroffset + endrec[_ECD_LOCATION] = offset - extrasz + return endrec + + +def _EndRecData(fpin): + """Return data from the "End of Central Directory" record, or None. + + The data is a list of the nine items in the ZIP "End of central dir" + record followed by a tenth item, the file seek offset of this record.""" + + # Determine file size + fpin.seek(0, 2) + filesize = fpin.tell() + + # Check to see if this is ZIP file with no archive comment (the + # "end of central directory" structure should be the last item in the + # file if this is the case). + try: + fpin.seek(-sizeEndCentDir, 2) + except OSError: + return None + data = fpin.read(sizeEndCentDir) + if (len(data) == sizeEndCentDir and + data[0:4] == stringEndArchive and + data[-2:] == b"\000\000"): + # the signature is correct and there's no comment, unpack structure + endrec = struct.unpack(structEndArchive, data) + endrec=list(endrec) + + # Append a blank comment and record start offset + endrec.append(b"") + endrec.append(filesize - sizeEndCentDir) + + # Try to read the "Zip64 end of central directory" structure + return _EndRecData64(fpin, filesize - sizeEndCentDir, endrec) + + # Either this is not a ZIP file, or it is a ZIP file with an archive + # comment. Search the end of the file for the "end of central directory" + # record signature. The comment is the last item in the ZIP file and may be + # up to 64K long. It is assumed that the "end of central directory" magic + # number does not appear in the comment. + maxCommentStart = max(filesize - ZIP_MAX_COMMENT - sizeEndCentDir, 0) + fpin.seek(maxCommentStart, 0) + data = fpin.read(ZIP_MAX_COMMENT + sizeEndCentDir) + start = data.rfind(stringEndArchive) + if start >= 0: + # found the magic number; attempt to unpack and interpret + recData = data[start:start+sizeEndCentDir] + if len(recData) != sizeEndCentDir: + # Zip file is corrupted. + return None + endrec = list(struct.unpack(structEndArchive, recData)) + commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file + comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize] + endrec.append(comment) + endrec.append(maxCommentStart + start) + + # Try to read the "Zip64 end of central directory" structure + return _EndRecData64(fpin, maxCommentStart + start, endrec) + + # Unable to find a valid end of central directory structure + return None + +def _sanitize_filename(filename): + """Terminate the file name at the first null byte and + ensure paths always use forward slashes as the directory separator.""" + + # Terminate the file name at the first null byte. Null bytes in file + # names are used as tricks by viruses in archives. + null_byte = filename.find(chr(0)) + if null_byte >= 0: + filename = filename[0:null_byte] + # This is used to ensure paths in generated ZIP files always use + # forward slashes as the directory separator, as required by the + # ZIP format specification. + if os.sep != "/" and os.sep in filename: + filename = filename.replace(os.sep, "/") + if os.altsep and os.altsep != "/" and os.altsep in filename: + filename = filename.replace(os.altsep, "/") + return filename + + +class ZipInfo: + """Class with attributes describing each file in the ZIP archive.""" + + __slots__ = ( + 'orig_filename', + 'filename', + 'date_time', + 'compress_type', + 'compress_level', + 'comment', + 'extra', + 'create_system', + 'create_version', + 'extract_version', + 'reserved', + 'flag_bits', + 'volume', + 'internal_attr', + 'external_attr', + 'header_offset', + 'CRC', + 'compress_size', + 'file_size', + '_raw_time', + '_end_offset', + ) + + def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)): + self.orig_filename = filename # Original file name in archive + + # Terminate the file name at the first null byte and + # ensure paths always use forward slashes as the directory separator. + filename = _sanitize_filename(filename) + + self.filename = filename # Normalized file name + self.date_time = date_time # year, month, day, hour, min, sec + + if date_time[0] < 1980: + raise ValueError('ZIP does not support timestamps before 1980') + + # Standard values: + self.compress_type = ZIP_STORED # Type of compression for the file + self.compress_level = None # Level for the compressor + self.comment = b"" # Comment for each file + self.extra = b"" # ZIP extra data + if sys.platform == 'win32': + self.create_system = 0 # System which created ZIP archive + else: + # Assume everything else is unix-y + self.create_system = 3 # System which created ZIP archive + self.create_version = DEFAULT_VERSION # Version which created ZIP archive + self.extract_version = DEFAULT_VERSION # Version needed to extract archive + self.reserved = 0 # Must be zero + self.flag_bits = 0 # ZIP flag bits + self.volume = 0 # Volume number of file header + self.internal_attr = 0 # Internal attributes + self.external_attr = 0 # External file attributes + self.compress_size = 0 # Size of the compressed file + self.file_size = 0 # Size of the uncompressed file + self._end_offset = None # Start of the next local header or central directory + # Other attributes are set by class ZipFile: + # header_offset Byte offset to the file header + # CRC CRC-32 of the uncompressed file + + # Maintain backward compatibility with the old protected attribute name. + @property + def _compresslevel(self): + return self.compress_level + + @_compresslevel.setter + def _compresslevel(self, value): + self.compress_level = value + + def __repr__(self): + result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)] + if self.compress_type != ZIP_STORED: + result.append(' compress_type=%s' % + compressor_names.get(self.compress_type, + self.compress_type)) + hi = self.external_attr >> 16 + lo = self.external_attr & 0xFFFF + if hi: + result.append(' filemode=%r' % stat.filemode(hi)) + if lo: + result.append(' external_attr=%#x' % lo) + isdir = self.is_dir() + if not isdir or self.file_size: + result.append(' file_size=%r' % self.file_size) + if ((not isdir or self.compress_size) and + (self.compress_type != ZIP_STORED or + self.file_size != self.compress_size)): + result.append(' compress_size=%r' % self.compress_size) + result.append('>') + return ''.join(result) + + def FileHeader(self, zip64=None): + """Return the per-file header as a bytes object. + + When the optional zip64 arg is None rather than a bool, we will + decide based upon the file_size and compress_size, if known, + False otherwise. + """ + dt = self.date_time + dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] + dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) + if self.flag_bits & _MASK_USE_DATA_DESCRIPTOR: + # Set these to zero because we write them after the file data + CRC = compress_size = file_size = 0 + else: + CRC = self.CRC + compress_size = self.compress_size + file_size = self.file_size + + extra = self.extra + + min_version = 0 + if zip64 is None: + # We always explicitly pass zip64 within this module.... This + # remains for anyone using ZipInfo.FileHeader as a public API. + zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT + if zip64: + fmt = '= 4: + tp, ln = unpack(' len(extra): + raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln)) + if tp == 0x0001: + data = extra[4:ln+4] + # ZIP64 extension (large files and/or large archives) + try: + if self.file_size in (0xFFFF_FFFF_FFFF_FFFF, 0xFFFF_FFFF): + field = "File size" + self.file_size, = unpack(' 2107: + date_time = (2107, 12, 31, 23, 59, 59) + # Create ZipInfo instance to store file information + if arcname is None: + arcname = filename + arcname = os.path.normpath(os.path.splitdrive(arcname)[1]) + while arcname[0] in (os.sep, os.altsep): + arcname = arcname[1:] + if isdir: + arcname += '/' + zinfo = cls(arcname, date_time) + zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes + if isdir: + zinfo.file_size = 0 + zinfo.external_attr |= 0x10 # MS-DOS directory flag + else: + zinfo.file_size = st.st_size + + return zinfo + + def _for_archive(self, archive): + """Resolve suitable defaults from the archive. + + Resolve the date_time, compression attributes, and external attributes + to suitable defaults as used by :method:`ZipFile.writestr`. + + Return self. + """ + # gh-91279: Set the SOURCE_DATE_EPOCH to a specific timestamp + epoch = os.environ.get('SOURCE_DATE_EPOCH') + get_time = int(epoch) if epoch else time.time() + self.date_time = time.localtime(get_time)[:6] + + self.compress_type = archive.compression + self.compress_level = archive.compresslevel + if self.filename.endswith('/'): # pragma: no cover + self.external_attr = 0o40775 << 16 # drwxrwxr-x + self.external_attr |= 0x10 # MS-DOS directory flag + else: + self.external_attr = 0o600 << 16 # ?rw------- + return self + + def is_dir(self): + """Return True if this archive member is a directory.""" + if self.filename.endswith('/'): + return True + # The ZIP format specification requires to use forward slashes + # as the directory separator, but in practice some ZIP files + # created on Windows can use backward slashes. For compatibility + # with the extraction code which already handles this: + if os.path.altsep: + return self.filename.endswith((os.path.sep, os.path.altsep)) + return False + + +# ZIP encryption uses the CRC32 one-byte primitive for scrambling some +# internal keys. We noticed that a direct implementation is faster than +# relying on binascii.crc32(). + +_crctable = None +def _gen_crc(crc): + for j in range(8): + if crc & 1: + crc = (crc >> 1) ^ 0xEDB88320 + else: + crc >>= 1 + return crc + +# ZIP supports a password-based form of encryption. Even though known +# plaintext attacks have been found against it, it is still useful +# to be able to get data out of such a file. +# +# Usage: +# zd = _ZipDecrypter(mypwd) +# plain_bytes = zd(cypher_bytes) + +def _ZipDecrypter(pwd): + key0 = 305419896 + key1 = 591751049 + key2 = 878082192 + + global _crctable + if _crctable is None: + _crctable = list(map(_gen_crc, range(256))) + crctable = _crctable + + def crc32(ch, crc): + """Compute the CRC32 primitive on one byte.""" + return (crc >> 8) ^ crctable[(crc ^ ch) & 0xFF] + + def update_keys(c): + nonlocal key0, key1, key2 + key0 = crc32(c, key0) + key1 = (key1 + (key0 & 0xFF)) & 0xFFFFFFFF + key1 = (key1 * 134775813 + 1) & 0xFFFFFFFF + key2 = crc32(key1 >> 24, key2) + + for p in pwd: + update_keys(p) + + def decrypter(data): + """Decrypt a bytes object.""" + result = bytearray() + append = result.append + for c in data: + k = key2 | 2 + c ^= ((k * (k^1)) >> 8) & 0xFF + update_keys(c) + append(c) + return bytes(result) + + return decrypter + + +class LZMACompressor: + + def __init__(self): + self._comp = None + + def _init(self): + props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1}) + self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[ + lzma._decode_filter_properties(lzma.FILTER_LZMA1, props) + ]) + return struct.pack('> 8) & 0xff + else: + # compare against the CRC otherwise + check_byte = (zipinfo.CRC >> 24) & 0xff + h = self._init_decrypter() + if h != check_byte: + raise RuntimeError("Bad password for file %r" % zipinfo.orig_filename) + + + def _init_decrypter(self): + self._decrypter = _ZipDecrypter(self._pwd) + # The first 12 bytes in the cypher stream is an encryption header + # used to strengthen the algorithm. The first 11 bytes are + # completely random, while the 12th contains the MSB of the CRC, + # or the MSB of the file time depending on the header type + # and is used to check the correctness of the password. + header = self._fileobj.read(12) + self._compress_left -= 12 + return self._decrypter(header)[11] + + def __repr__(self): + result = ['<%s.%s' % (self.__class__.__module__, + self.__class__.__qualname__)] + if not self.closed: + result.append(' name=%r' % (self.name,)) + if self._compress_type != ZIP_STORED: + result.append(' compress_type=%s' % + compressor_names.get(self._compress_type, + self._compress_type)) + else: + result.append(' [closed]') + result.append('>') + return ''.join(result) + + def readline(self, limit=-1): + """Read and return a line from the stream. + + If limit is specified, at most limit bytes will be read. + """ + + if limit < 0: + # Shortcut common case - newline found in buffer. + i = self._readbuffer.find(b'\n', self._offset) + 1 + if i > 0: + line = self._readbuffer[self._offset: i] + self._offset = i + return line + + return io.BufferedIOBase.readline(self, limit) + + def peek(self, n=1): + """Returns buffered bytes without advancing the position.""" + if n > len(self._readbuffer) - self._offset: + chunk = self.read(n) + if len(chunk) > self._offset: + self._readbuffer = chunk + self._readbuffer[self._offset:] + self._offset = 0 + else: + self._offset -= len(chunk) + + # Return up to 512 bytes to reduce allocation overhead for tight loops. + return self._readbuffer[self._offset: self._offset + 512] + + def readable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") + return True + + def read(self, n=-1): + """Read and return up to n bytes. + If the argument is omitted, None, or negative, data is read and returned until EOF is reached. + """ + if self.closed: + raise ValueError("read from closed file.") + if n is None or n < 0: + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + while not self._eof: + buf += self._read1(self.MAX_N) + return buf + + end = n + self._offset + if end < len(self._readbuffer): + buf = self._readbuffer[self._offset:end] + self._offset = end + return buf + + n = end - len(self._readbuffer) + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + while n > 0 and not self._eof: + data = self._read1(n) + if n < len(data): + self._readbuffer = data + self._offset = n + buf += data[:n] + break + buf += data + n -= len(data) + return buf + + def _update_crc(self, newdata): + # Update the CRC using the given data. + if self._expected_crc is None: + # No need to compute the CRC if we don't have a reference value + return + self._running_crc = crc32(newdata, self._running_crc) + # Check the CRC if we're at the end of the file + if self._eof and self._running_crc != self._expected_crc: + raise BadZipFile("Bad CRC-32 for file %r" % self.name) + + def read1(self, n): + """Read up to n bytes with at most one read() system call.""" + + if n is None or n < 0: + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + while not self._eof: + data = self._read1(self.MAX_N) + if data: + buf += data + break + return buf + + end = n + self._offset + if end < len(self._readbuffer): + buf = self._readbuffer[self._offset:end] + self._offset = end + return buf + + n = end - len(self._readbuffer) + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + if n > 0: + while not self._eof: + data = self._read1(n) + if n < len(data): + self._readbuffer = data + self._offset = n + buf += data[:n] + break + if data: + buf += data + break + return buf + + def _read1(self, n): + # Read up to n compressed bytes with at most one read() system call, + # decrypt and decompress them. + if self._eof or n <= 0: + return b'' + + # Read from file. + if self._compress_type == ZIP_DEFLATED: + ## Handle unconsumed data. + data = self._decompressor.unconsumed_tail + if n > len(data): + data += self._read2(n - len(data)) + else: + data = self._read2(n) + + if self._compress_type == ZIP_STORED: + self._eof = self._compress_left <= 0 + elif self._compress_type == ZIP_DEFLATED: + n = max(n, self.MIN_READ_SIZE) + data = self._decompressor.decompress(data, n) + self._eof = (self._decompressor.eof or + self._compress_left <= 0 and + not self._decompressor.unconsumed_tail) + if self._eof: + data += self._decompressor.flush() + else: + data = self._decompressor.decompress(data) + self._eof = self._decompressor.eof or self._compress_left <= 0 + + data = data[:self._left] + self._left -= len(data) + if self._left <= 0: + self._eof = True + self._update_crc(data) + return data + + def _read2(self, n): + if self._compress_left <= 0: + return b'' + + n = max(n, self.MIN_READ_SIZE) + n = min(n, self._compress_left) + + data = self._fileobj.read(n) + self._compress_left -= len(data) + if not data: + raise EOFError + + if self._decrypter is not None: + data = self._decrypter(data) + return data + + def close(self): + try: + if self._close_fileobj: + self._fileobj.close() + finally: + super().close() + + def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") + return self._seekable + + def seek(self, offset, whence=os.SEEK_SET): + if self.closed: + raise ValueError("seek on closed file.") + if not self._seekable: + raise io.UnsupportedOperation("underlying stream is not seekable") + curr_pos = self.tell() + if whence == os.SEEK_SET: + new_pos = offset + elif whence == os.SEEK_CUR: + new_pos = curr_pos + offset + elif whence == os.SEEK_END: + new_pos = self._orig_file_size + offset + else: + raise ValueError("whence must be os.SEEK_SET (0), " + "os.SEEK_CUR (1), or os.SEEK_END (2)") + + if new_pos > self._orig_file_size: + new_pos = self._orig_file_size + + if new_pos < 0: + new_pos = 0 + + read_offset = new_pos - curr_pos + buff_offset = read_offset + self._offset + + if buff_offset >= 0 and buff_offset < len(self._readbuffer): + # Just move the _offset index if the new position is in the _readbuffer + self._offset = buff_offset + read_offset = 0 + # Fast seek uncompressed unencrypted file + elif self._compress_type == ZIP_STORED and self._decrypter is None and read_offset != 0: + # disable CRC checking after first seeking - it would be invalid + self._expected_crc = None + # seek actual file taking already buffered data into account + read_offset -= len(self._readbuffer) - self._offset + self._fileobj.seek(read_offset, os.SEEK_CUR) + self._left -= read_offset + self._compress_left -= read_offset + self._eof = self._left <= 0 + read_offset = 0 + # flush read buffer + self._readbuffer = b'' + self._offset = 0 + elif read_offset < 0: + # Position is before the current position. Reset the ZipExtFile + self._fileobj.seek(self._orig_compress_start) + self._running_crc = self._orig_start_crc + self._expected_crc = self._orig_crc + self._compress_left = self._orig_compress_size + self._left = self._orig_file_size + self._readbuffer = b'' + self._offset = 0 + self._decompressor = _get_decompressor(self._compress_type) + self._eof = False + read_offset = new_pos + if self._decrypter is not None: + self._init_decrypter() + + while read_offset > 0: + read_len = min(self.MAX_SEEK_READ, read_offset) + self.read(read_len) + read_offset -= read_len + + return self.tell() + + def tell(self): + if self.closed: + raise ValueError("tell on closed file.") + if not self._seekable: + raise io.UnsupportedOperation("underlying stream is not seekable") + filepos = self._orig_file_size - self._left - len(self._readbuffer) + self._offset + return filepos + + +class _ZipWriteFile(io.BufferedIOBase): + def __init__(self, zf, zinfo, zip64): + self._zinfo = zinfo + self._zip64 = zip64 + self._zipfile = zf + self._compressor = _get_compressor(zinfo.compress_type, + zinfo.compress_level) + self._file_size = 0 + self._compress_size = 0 + self._crc = 0 + + @property + def _fileobj(self): + return self._zipfile.fp + + @property + def name(self): + return self._zinfo.filename + + @property + def mode(self): + return 'wb' + + def writable(self): + return True + + def write(self, data): + if self.closed: + raise ValueError('I/O operation on closed file.') + + # Accept any data that supports the buffer protocol + if isinstance(data, (bytes, bytearray)): + nbytes = len(data) + else: + data = memoryview(data) + nbytes = data.nbytes + self._file_size += nbytes + + self._crc = crc32(data, self._crc) + if self._compressor: + data = self._compressor.compress(data) + self._compress_size += len(data) + self._fileobj.write(data) + return nbytes + + def close(self): + if self.closed: + return + try: + super().close() + # Flush any data from the compressor, and update header info + if self._compressor: + buf = self._compressor.flush() + self._compress_size += len(buf) + self._fileobj.write(buf) + self._zinfo.compress_size = self._compress_size + else: + self._zinfo.compress_size = self._file_size + self._zinfo.CRC = self._crc + self._zinfo.file_size = self._file_size + + if not self._zip64: + if self._file_size > ZIP64_LIMIT: + raise RuntimeError("File size too large, try using force_zip64") + if self._compress_size > ZIP64_LIMIT: + raise RuntimeError("Compressed size too large, try using force_zip64") + + # Write updated header info + if self._zinfo.flag_bits & _MASK_USE_DATA_DESCRIPTOR: + # Write CRC and file sizes after the file data + fmt = '') + return ''.join(result) + + def _RealGetContents(self): + """Read in the table of contents for the ZIP file.""" + fp = self.fp + try: + endrec = _EndRecData(fp) + except OSError: + raise BadZipFile("File is not a zip file") + if not endrec: + raise BadZipFile("File is not a zip file") + if self.debug > 1: + print(endrec) + self._comment = endrec[_ECD_COMMENT] # archive comment + + offset_cd, concat = _handle_prepended_data(endrec, self.debug) + + # self.start_dir: Position of start of central directory + self.start_dir = offset_cd + concat + + if self.start_dir < 0: + raise BadZipFile("Bad offset for central directory") + fp.seek(self.start_dir, 0) + size_cd = endrec[_ECD_SIZE] + data = fp.read(size_cd) + fp = io.BytesIO(data) + total = 0 + while total < size_cd: + centdir = fp.read(sizeCentralDir) + if len(centdir) != sizeCentralDir: + raise BadZipFile("Truncated central directory") + centdir = struct.unpack(structCentralDir, centdir) + if centdir[_CD_SIGNATURE] != stringCentralDir: + raise BadZipFile("Bad magic number for central directory") + if self.debug > 2: + print(centdir) + filename = fp.read(centdir[_CD_FILENAME_LENGTH]) + orig_filename_crc = crc32(filename) + flags = centdir[_CD_FLAG_BITS] + if flags & _MASK_UTF_FILENAME: + # UTF-8 file names extension + filename = filename.decode('utf-8') + else: + # Historical ZIP filename encoding + filename = filename.decode(self.metadata_encoding or 'cp437') + # Create ZipInfo instance to store file information + x = ZipInfo(filename) + x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) + x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) + x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] + (x.create_version, x.create_system, x.extract_version, x.reserved, + x.flag_bits, x.compress_type, t, d, + x.CRC, x.compress_size, x.file_size) = centdir[1:12] + if x.extract_version > MAX_EXTRACT_VERSION: + raise NotImplementedError("zip file version %.1f" % + (x.extract_version / 10)) + x.volume, x.internal_attr, x.external_attr = centdir[15:18] + # Convert date/time code to (year, month, day, hour, min, sec) + x._raw_time = t + x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, + t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) + x._decodeExtra(orig_filename_crc) + x.header_offset = x.header_offset + concat + self.filelist.append(x) + self.NameToInfo[x.filename] = x + + # update total bytes read from central directory + total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] + + centdir[_CD_EXTRA_FIELD_LENGTH] + + centdir[_CD_COMMENT_LENGTH]) + + if self.debug > 2: + print("total", total) + + end_offset = self.start_dir + for zinfo in reversed(sorted(self.filelist, + key=lambda zinfo: zinfo.header_offset)): + zinfo._end_offset = end_offset + end_offset = zinfo.header_offset + + def namelist(self): + """Return a list of file names in the archive.""" + return [data.filename for data in self.filelist] + + def infolist(self): + """Return a list of class ZipInfo instances for files in the + archive.""" + return self.filelist + + def printdir(self, file=None): + """Print a table of contents for the zip file.""" + print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"), + file=file) + for zinfo in self.filelist: + date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6] + print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size), + file=file) + + def testzip(self): + """Read all the files and check the CRC. + + Return None if all files could be read successfully, or the name + of the offending file otherwise.""" + chunk_size = 2 ** 20 + for zinfo in self.filelist: + try: + # Read by chunks, to avoid an OverflowError or a + # MemoryError with very large embedded files. + with self.open(zinfo.filename, "r") as f: + while f.read(chunk_size): # Check CRC-32 + pass + except BadZipFile: + return zinfo.filename + + def getinfo(self, name): + """Return the instance of ZipInfo given 'name'.""" + info = self.NameToInfo.get(name) + if info is None: + raise KeyError( + 'There is no item named %r in the archive' % name) + + return info + + def setpassword(self, pwd): + """Set default password for encrypted files.""" + if pwd and not isinstance(pwd, bytes): + raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) + if pwd: + self.pwd = pwd + else: + self.pwd = None + + @property + def comment(self): + """The comment text associated with the ZIP file.""" + return self._comment + + @comment.setter + def comment(self, comment): + if not isinstance(comment, bytes): + raise TypeError("comment: expected bytes, got %s" % type(comment).__name__) + # check for valid comment length + if len(comment) > ZIP_MAX_COMMENT: + import warnings + warnings.warn('Archive comment is too long; truncating to %d bytes' + % ZIP_MAX_COMMENT, stacklevel=2) + comment = comment[:ZIP_MAX_COMMENT] + self._comment = comment + self._didModify = True + + def read(self, name, pwd=None): + """Return file bytes for name. 'pwd' is the password to decrypt + encrypted files.""" + with self.open(name, "r", pwd) as fp: + return fp.read() + + def open(self, name, mode="r", pwd=None, *, force_zip64=False): + """Return file-like object for 'name'. + + name is a string for the file name within the ZIP file, or a ZipInfo + object. + + mode should be 'r' to read a file already in the ZIP file, or 'w' to + write to a file newly added to the archive. + + pwd is the password to decrypt files (only used for reading). + + When writing, if the file size is not known in advance but may exceed + 2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large + files. If the size is known in advance, it is best to pass a ZipInfo + instance for name, with zinfo.file_size set. + """ + if mode not in {"r", "w"}: + raise ValueError('open() requires mode "r" or "w"') + if pwd and (mode == "w"): + raise ValueError("pwd is only supported for reading files") + if not self.fp: + raise ValueError( + "Attempt to use ZIP archive that was already closed") + + # Make sure we have an info object + if isinstance(name, ZipInfo): + # 'name' is already an info object + zinfo = name + elif mode == 'w': + zinfo = ZipInfo(name) + zinfo.compress_type = self.compression + zinfo.compress_level = self.compresslevel + else: + # Get info object for name + zinfo = self.getinfo(name) + + if mode == 'w': + return self._open_to_write(zinfo, force_zip64=force_zip64) + + if self._writing: + raise ValueError("Can't read from the ZIP file while there " + "is an open writing handle on it. " + "Close the writing handle before trying to read.") + + # Open for reading: + self._fileRefCnt += 1 + zef_file = _SharedFile(self.fp, zinfo.header_offset, + self._fpclose, self._lock, lambda: self._writing) + try: + # Skip the file header: + fheader = zef_file.read(sizeFileHeader) + if len(fheader) != sizeFileHeader: + raise BadZipFile("Truncated file header") + fheader = struct.unpack(structFileHeader, fheader) + if fheader[_FH_SIGNATURE] != stringFileHeader: + raise BadZipFile("Bad magic number for file header") + + fname = zef_file.read(fheader[_FH_FILENAME_LENGTH]) + if fheader[_FH_EXTRA_FIELD_LENGTH]: + zef_file.seek(fheader[_FH_EXTRA_FIELD_LENGTH], whence=1) + + if zinfo.flag_bits & _MASK_COMPRESSED_PATCH: + # Zip 2.7: compressed patched data + raise NotImplementedError("compressed patched data (flag bit 5)") + + if zinfo.flag_bits & _MASK_STRONG_ENCRYPTION: + # strong encryption + raise NotImplementedError("strong encryption (flag bit 6)") + + if fheader[_FH_GENERAL_PURPOSE_FLAG_BITS] & _MASK_UTF_FILENAME: + # UTF-8 filename + fname_str = fname.decode("utf-8") + else: + fname_str = fname.decode(self.metadata_encoding or "cp437") + + if fname_str != zinfo.orig_filename: + raise BadZipFile( + 'File name in directory %r and header %r differ.' + % (zinfo.orig_filename, fname)) + + if (zinfo._end_offset is not None and + zef_file.tell() + zinfo.compress_size > zinfo._end_offset): + if zinfo._end_offset == zinfo.header_offset: + import warnings + warnings.warn( + f"Overlapped entries: {zinfo.orig_filename!r} " + f"(possible zip bomb)", + skip_file_prefixes=(os.path.dirname(__file__),)) + else: + raise BadZipFile( + f"Overlapped entries: {zinfo.orig_filename!r} " + f"(possible zip bomb)") + + # check for encrypted flag & handle password + is_encrypted = zinfo.flag_bits & _MASK_ENCRYPTED + if is_encrypted: + if not pwd: + pwd = self.pwd + if pwd and not isinstance(pwd, bytes): + raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) + if not pwd: + raise RuntimeError("File %r is encrypted, password " + "required for extraction" % name) + else: + pwd = None + + return ZipExtFile(zef_file, mode + 'b', zinfo, pwd, True) + except: + zef_file.close() + raise + + def _open_to_write(self, zinfo, force_zip64=False): + if force_zip64 and not self._allowZip64: + raise ValueError( + "force_zip64 is True, but allowZip64 was False when opening " + "the ZIP file." + ) + if self._writing: + raise ValueError("Can't write to the ZIP file while there is " + "another write handle open on it. " + "Close the first handle before opening another.") + + # Size and CRC are overwritten with correct data after processing the file + zinfo.compress_size = 0 + zinfo.CRC = 0 + + zinfo.flag_bits = 0x00 + if zinfo.compress_type == ZIP_LZMA: + # Compressed data includes an end-of-stream (EOS) marker + zinfo.flag_bits |= _MASK_COMPRESS_OPTION_1 + if not self._seekable: + zinfo.flag_bits |= _MASK_USE_DATA_DESCRIPTOR + + if not zinfo.external_attr: + zinfo.external_attr = 0o600 << 16 # permissions: ?rw------- + + # Compressed size can be larger than uncompressed size + zip64 = force_zip64 or (zinfo.file_size * 1.05 > ZIP64_LIMIT) + if not self._allowZip64 and zip64: + raise LargeZipFile("Filesize would require ZIP64 extensions") + + if self._seekable: + self.fp.seek(self.start_dir) + zinfo.header_offset = self.fp.tell() + + self._writecheck(zinfo) + self._didModify = True + + self.fp.write(zinfo.FileHeader(zip64)) + + self._writing = True + return _ZipWriteFile(self, zinfo, zip64) + + def extract(self, member, path=None, pwd=None): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. 'member' may be a filename or a ZipInfo object. You can + specify a different directory using 'path'. You can specify the + password to decrypt the file using 'pwd'. + """ + if path is None: + path = os.getcwd() + else: + path = os.fspath(path) + + return self._extract_member(member, path, pwd) + + def extractall(self, path=None, members=None, pwd=None): + """Extract all members from the archive to the current working + directory. 'path' specifies a different directory to extract to. + 'members' is optional and must be a subset of the list returned + by namelist(). You can specify the password to decrypt all files + using 'pwd'. + """ + if members is None: + members = self.namelist() + + if path is None: + path = os.getcwd() + else: + path = os.fspath(path) + + for zipinfo in members: + self._extract_member(zipinfo, path, pwd) + + @classmethod + def _sanitize_windows_name(cls, arcname, pathsep): + """Replace bad characters and remove trailing dots from parts.""" + table = cls._windows_illegal_name_trans_table + if not table: + illegal = ':<>|"?*' + table = str.maketrans(illegal, '_' * len(illegal)) + cls._windows_illegal_name_trans_table = table + arcname = arcname.translate(table) + # remove trailing dots and spaces + arcname = (x.rstrip(' .') for x in arcname.split(pathsep)) + # rejoin, removing empty parts. + arcname = pathsep.join(x for x in arcname if x) + return arcname + + def _extract_member(self, member, targetpath, pwd): + """Extract the ZipInfo object 'member' to a physical + file on the path targetpath. + """ + if not isinstance(member, ZipInfo): + member = self.getinfo(member) + + # build the destination pathname, replacing + # forward slashes to platform specific separators. + arcname = member.filename.replace('/', os.path.sep) + + if os.path.altsep: + arcname = arcname.replace(os.path.altsep, os.path.sep) + # interpret absolute pathname as relative, remove drive letter or + # UNC path, redundant separators, "." and ".." components. + arcname = os.path.splitdrive(arcname)[1] + invalid_path_parts = ('', os.path.curdir, os.path.pardir) + arcname = os.path.sep.join(x for x in arcname.split(os.path.sep) + if x not in invalid_path_parts) + if os.path.sep == '\\': + # filter illegal characters on Windows + arcname = self._sanitize_windows_name(arcname, os.path.sep) + + if not arcname and not member.is_dir(): + raise ValueError("Empty filename.") + + targetpath = os.path.join(targetpath, arcname) + targetpath = os.path.normpath(targetpath) + + # Create all upper directories if necessary. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + os.makedirs(upperdirs, exist_ok=True) + + if member.is_dir(): + if not os.path.isdir(targetpath): + try: + os.mkdir(targetpath) + except FileExistsError: + if not os.path.isdir(targetpath): + raise + return targetpath + + with self.open(member, pwd=pwd) as source, \ + open(targetpath, "wb") as target: + shutil.copyfileobj(source, target) + + return targetpath + + def _writecheck(self, zinfo): + """Check for errors before writing a file to the archive.""" + if zinfo.filename in self.NameToInfo: + import warnings + warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3) + if self.mode not in ('w', 'x', 'a'): + raise ValueError("write() requires mode 'w', 'x', or 'a'") + if not self.fp: + raise ValueError( + "Attempt to write ZIP archive that was already closed") + _check_compression(zinfo.compress_type) + if not self._allowZip64: + requires_zip64 = None + if len(self.filelist) >= ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif zinfo.file_size > ZIP64_LIMIT: + requires_zip64 = "Filesize" + elif zinfo.header_offset > ZIP64_LIMIT: + requires_zip64 = "Zipfile size" + if requires_zip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") + + def write(self, filename, arcname=None, + compress_type=None, compresslevel=None): + """Put the bytes from filename into the archive under the name + arcname.""" + if not self.fp: + raise ValueError( + "Attempt to write to ZIP archive that was already closed") + if self._writing: + raise ValueError( + "Can't write to ZIP archive while an open writing handle exists" + ) + + zinfo = ZipInfo.from_file(filename, arcname, + strict_timestamps=self._strict_timestamps) + + if zinfo.is_dir(): + zinfo.compress_size = 0 + zinfo.CRC = 0 + self.mkdir(zinfo) + else: + if compress_type is not None: + zinfo.compress_type = compress_type + else: + zinfo.compress_type = self.compression + + if compresslevel is not None: + zinfo.compress_level = compresslevel + else: + zinfo.compress_level = self.compresslevel + + with open(filename, "rb") as src, self.open(zinfo, 'w') as dest: + shutil.copyfileobj(src, dest, 1024*8) + + def writestr(self, zinfo_or_arcname, data, + compress_type=None, compresslevel=None): + """Write a file into the archive. The contents is 'data', which + may be either a 'str' or a 'bytes' instance; if it is a 'str', + it is encoded as UTF-8 first. + 'zinfo_or_arcname' is either a ZipInfo instance or + the name of the file in the archive.""" + if isinstance(data, str): + data = data.encode("utf-8") + if isinstance(zinfo_or_arcname, ZipInfo): + zinfo = zinfo_or_arcname + else: + zinfo = ZipInfo(zinfo_or_arcname)._for_archive(self) + + if not self.fp: + raise ValueError( + "Attempt to write to ZIP archive that was already closed") + if self._writing: + raise ValueError( + "Can't write to ZIP archive while an open writing handle exists." + ) + + if compress_type is not None: + zinfo.compress_type = compress_type + + if compresslevel is not None: + zinfo.compress_level = compresslevel + + zinfo.file_size = len(data) # Uncompressed size + with self._lock: + with self.open(zinfo, mode='w') as dest: + dest.write(data) + + def mkdir(self, zinfo_or_directory_name, mode=511): + """Creates a directory inside the zip archive.""" + if isinstance(zinfo_or_directory_name, ZipInfo): + zinfo = zinfo_or_directory_name + if not zinfo.is_dir(): + raise ValueError("The given ZipInfo does not describe a directory") + elif isinstance(zinfo_or_directory_name, str): + directory_name = zinfo_or_directory_name + if not directory_name.endswith("/"): + directory_name += "/" + zinfo = ZipInfo(directory_name) + zinfo.compress_size = 0 + zinfo.CRC = 0 + zinfo.external_attr = ((0o40000 | mode) & 0xFFFF) << 16 + zinfo.file_size = 0 + zinfo.external_attr |= 0x10 + else: + raise TypeError("Expected type str or ZipInfo") + + with self._lock: + if self._seekable: + self.fp.seek(self.start_dir) + zinfo.header_offset = self.fp.tell() # Start of header bytes + if zinfo.compress_type == ZIP_LZMA: + # Compressed data includes an end-of-stream (EOS) marker + zinfo.flag_bits |= _MASK_COMPRESS_OPTION_1 + + self._writecheck(zinfo) + self._didModify = True + + self.filelist.append(zinfo) + self.NameToInfo[zinfo.filename] = zinfo + self.fp.write(zinfo.FileHeader(False)) + self.start_dir = self.fp.tell() + + def __del__(self): + """Call the "close()" method in case the user forgot.""" + self.close() + + def close(self): + """Close the file, and for mode 'w', 'x' and 'a' write the ending + records.""" + if self.fp is None: + return + + if self._writing: + raise ValueError("Can't close the ZIP file while there is " + "an open writing handle on it. " + "Close the writing handle before closing the zip.") + + try: + if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records + with self._lock: + if self._seekable: + self.fp.seek(self.start_dir) + self._write_end_record() + finally: + fp = self.fp + self.fp = None + self._fpclose(fp) + + def _write_end_record(self): + for zinfo in self.filelist: # write central directory + dt = zinfo.date_time + dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] + dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) + extra = [] + if zinfo.file_size > ZIP64_LIMIT \ + or zinfo.compress_size > ZIP64_LIMIT: + extra.append(zinfo.file_size) + extra.append(zinfo.compress_size) + file_size = 0xffffffff + compress_size = 0xffffffff + else: + file_size = zinfo.file_size + compress_size = zinfo.compress_size + + if zinfo.header_offset > ZIP64_LIMIT: + extra.append(zinfo.header_offset) + header_offset = 0xffffffff + else: + header_offset = zinfo.header_offset + + extra_data = zinfo.extra + min_version = 0 + if extra: + # Append a ZIP64 field to the extra's + extra_data = _Extra.strip(extra_data, (1,)) + extra_data = struct.pack( + ' ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif centDirOffset > ZIP64_LIMIT: + requires_zip64 = "Central directory offset" + elif centDirSize > ZIP64_LIMIT: + requires_zip64 = "Central directory size" + if requires_zip64: + # Need to write the ZIP64 end-of-archive records + if not self._allowZip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") + zip64endrec = struct.pack( + structEndArchive64, stringEndArchive64, + sizeEndCentDir64 - 12, 45, 45, 0, 0, centDirCount, centDirCount, + centDirSize, centDirOffset) + self.fp.write(zip64endrec) + + zip64locrec = struct.pack( + structEndArchive64Locator, + stringEndArchive64Locator, 0, pos2, 1) + self.fp.write(zip64locrec) + centDirCount = min(centDirCount, 0xFFFF) + centDirSize = min(centDirSize, 0xFFFFFFFF) + centDirOffset = min(centDirOffset, 0xFFFFFFFF) + + endrec = struct.pack(structEndArchive, stringEndArchive, + 0, 0, centDirCount, centDirCount, + centDirSize, centDirOffset, len(self._comment)) + self.fp.write(endrec) + self.fp.write(self._comment) + if self.mode == "a": + self.fp.truncate() + self.fp.flush() + + def _fpclose(self, fp): + assert self._fileRefCnt > 0 + self._fileRefCnt -= 1 + if not self._fileRefCnt and not self._filePassed: + fp.close() + + +class PyZipFile(ZipFile): + """Class to create ZIP archives with Python library files and packages.""" + + def __init__(self, file, mode="r", compression=ZIP_STORED, + allowZip64=True, optimize=-1): + ZipFile.__init__(self, file, mode=mode, compression=compression, + allowZip64=allowZip64) + self._optimize = optimize + + def writepy(self, pathname, basename="", filterfunc=None): + """Add all files from "pathname" to the ZIP archive. + + If pathname is a package directory, search the directory and + all package subdirectories recursively for all *.py and enter + the modules into the archive. If pathname is a plain + directory, listdir *.py and enter all modules. Else, pathname + must be a Python *.py file and the module will be put into the + archive. Added modules are always module.pyc. + This method will compile the module.py into module.pyc if + necessary. + If filterfunc(pathname) is given, it is called with every argument. + When it is False, the file or directory is skipped. + """ + pathname = os.fspath(pathname) + if filterfunc and not filterfunc(pathname): + if self.debug: + label = 'path' if os.path.isdir(pathname) else 'file' + print('%s %r skipped by filterfunc' % (label, pathname)) + return + dir, name = os.path.split(pathname) + if os.path.isdir(pathname): + initname = os.path.join(pathname, "__init__.py") + if os.path.isfile(initname): + # This is a package directory, add it + if basename: + basename = "%s/%s" % (basename, name) + else: + basename = name + if self.debug: + print("Adding package in", pathname, "as", basename) + fname, arcname = self._get_codename(initname[0:-3], basename) + if self.debug: + print("Adding", arcname) + self.write(fname, arcname) + dirlist = sorted(os.listdir(pathname)) + dirlist.remove("__init__.py") + # Add all *.py files and package subdirectories + for filename in dirlist: + path = os.path.join(pathname, filename) + root, ext = os.path.splitext(filename) + if os.path.isdir(path): + if os.path.isfile(os.path.join(path, "__init__.py")): + # This is a package directory, add it + self.writepy(path, basename, + filterfunc=filterfunc) # Recursive call + elif ext == ".py": + if filterfunc and not filterfunc(path): + if self.debug: + print('file %r skipped by filterfunc' % path) + continue + fname, arcname = self._get_codename(path[0:-3], + basename) + if self.debug: + print("Adding", arcname) + self.write(fname, arcname) + else: + # This is NOT a package directory, add its files at top level + if self.debug: + print("Adding files from directory", pathname) + for filename in sorted(os.listdir(pathname)): + path = os.path.join(pathname, filename) + root, ext = os.path.splitext(filename) + if ext == ".py": + if filterfunc and not filterfunc(path): + if self.debug: + print('file %r skipped by filterfunc' % path) + continue + fname, arcname = self._get_codename(path[0:-3], + basename) + if self.debug: + print("Adding", arcname) + self.write(fname, arcname) + else: + if pathname[-3:] != ".py": + raise RuntimeError( + 'Files added with writepy() must end with ".py"') + fname, arcname = self._get_codename(pathname[0:-3], basename) + if self.debug: + print("Adding file", arcname) + self.write(fname, arcname) + + def _get_codename(self, pathname, basename): + """Return (filename, archivename) for the path. + + Given a module name path, return the correct file path and + archive name, compiling if necessary. For example, given + /python/lib/string, return (/python/lib/string.pyc, string). + """ + def _compile(file, optimize=-1): + import py_compile + if self.debug: + print("Compiling", file) + try: + py_compile.compile(file, doraise=True, optimize=optimize) + except py_compile.PyCompileError as err: + print(err.msg) + return False + return True + + file_py = pathname + ".py" + file_pyc = pathname + ".pyc" + pycache_opt0 = importlib.util.cache_from_source(file_py, optimization='') + pycache_opt1 = importlib.util.cache_from_source(file_py, optimization=1) + pycache_opt2 = importlib.util.cache_from_source(file_py, optimization=2) + if self._optimize == -1: + # legacy mode: use whatever file is present + if (os.path.isfile(file_pyc) and + os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime): + # Use .pyc file. + arcname = fname = file_pyc + elif (os.path.isfile(pycache_opt0) and + os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime): + # Use the __pycache__/*.pyc file, but write it to the legacy pyc + # file name in the archive. + fname = pycache_opt0 + arcname = file_pyc + elif (os.path.isfile(pycache_opt1) and + os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime): + # Use the __pycache__/*.pyc file, but write it to the legacy pyc + # file name in the archive. + fname = pycache_opt1 + arcname = file_pyc + elif (os.path.isfile(pycache_opt2) and + os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime): + # Use the __pycache__/*.pyc file, but write it to the legacy pyc + # file name in the archive. + fname = pycache_opt2 + arcname = file_pyc + else: + # Compile py into PEP 3147 pyc file. + if _compile(file_py): + if sys.flags.optimize == 0: + fname = pycache_opt0 + elif sys.flags.optimize == 1: + fname = pycache_opt1 + else: + fname = pycache_opt2 + arcname = file_pyc + else: + fname = arcname = file_py + else: + # new mode: use given optimization level + if self._optimize == 0: + fname = pycache_opt0 + arcname = file_pyc + else: + arcname = file_pyc + if self._optimize == 1: + fname = pycache_opt1 + elif self._optimize == 2: + fname = pycache_opt2 + else: + msg = "invalid value for 'optimize': {!r}".format(self._optimize) + raise ValueError(msg) + if not (os.path.isfile(fname) and + os.stat(fname).st_mtime >= os.stat(file_py).st_mtime): + if not _compile(file_py, optimize=self._optimize): + fname = arcname = file_py + archivename = os.path.split(arcname)[1] + if basename: + archivename = "%s/%s" % (basename, archivename) + return (fname, archivename) + + +def main(args=None): + import argparse + + description = 'A simple command-line interface for zipfile module.' + parser = argparse.ArgumentParser(description=description, color=True) + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('-l', '--list', metavar='', + help='Show listing of a zipfile') + group.add_argument('-e', '--extract', nargs=2, + metavar=('', ''), + help='Extract zipfile into target dir') + group.add_argument('-c', '--create', nargs='+', + metavar=('', ''), + help='Create zipfile from sources') + group.add_argument('-t', '--test', metavar='', + help='Test if a zipfile is valid') + parser.add_argument('--metadata-encoding', metavar='', + help='Specify encoding of member names for -l, -e and -t') + args = parser.parse_args(args) + + encoding = args.metadata_encoding + + if args.test is not None: + src = args.test + with ZipFile(src, 'r', metadata_encoding=encoding) as zf: + badfile = zf.testzip() + if badfile: + print("The following enclosed file is corrupted: {!r}".format(badfile)) + print("Done testing") + + elif args.list is not None: + src = args.list + with ZipFile(src, 'r', metadata_encoding=encoding) as zf: + zf.printdir() + + elif args.extract is not None: + src, curdir = args.extract + with ZipFile(src, 'r', metadata_encoding=encoding) as zf: + zf.extractall(curdir) + + elif args.create is not None: + if encoding: + print("Non-conforming encodings not supported with -c.", + file=sys.stderr) + sys.exit(1) + + zip_name = args.create.pop(0) + files = args.create + + def addToZip(zf, path, zippath): + if os.path.isfile(path): + zf.write(path, zippath, ZIP_DEFLATED) + elif os.path.isdir(path): + if zippath: + zf.write(path, zippath) + for nm in sorted(os.listdir(path)): + addToZip(zf, + os.path.join(path, nm), os.path.join(zippath, nm)) + # else: ignore + + with ZipFile(zip_name, 'w') as zf: + for path in files: + zippath = os.path.basename(path) + if not zippath: + zippath = os.path.basename(os.path.dirname(path)) + if zippath in ('', os.curdir, os.pardir): + zippath = '' + addToZip(zf, path, zippath) + + +from ._path import ( # noqa: E402 + Path, + + # used privately for tests + CompleteDirs, # noqa: F401 +) diff --git a/Python313_13_x64_Template/Lib/zipfile/__main__.py b/Python314_4_x64_Template/Lib/zipfile/__main__.py similarity index 100% rename from Python313_13_x64_Template/Lib/zipfile/__main__.py rename to Python314_4_x64_Template/Lib/zipfile/__main__.py diff --git a/Python314_4_x64_Template/Lib/zipfile/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/zipfile/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..8f705cd7 Binary files /dev/null and b/Python314_4_x64_Template/Lib/zipfile/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/zipfile/_path/__init__.py b/Python314_4_x64_Template/Lib/zipfile/_path/__init__.py new file mode 100644 index 00000000..80f5d607 --- /dev/null +++ b/Python314_4_x64_Template/Lib/zipfile/_path/__init__.py @@ -0,0 +1,452 @@ +""" +A Path-like interface for zipfiles. + +This codebase is shared between zipfile.Path in the stdlib +and zipp in PyPI. See +https://github.com/python/importlib_metadata/wiki/Development-Methodology +for more detail. +""" + +import contextlib +import io +import itertools +import pathlib +import posixpath +import re +import stat +import sys +import zipfile + +from .glob import Translator + +__all__ = ['Path'] + + +def _parents(path): + """ + Given a path with elements separated by + posixpath.sep, generate all parents of that path. + + >>> list(_parents('b/d')) + ['b'] + >>> list(_parents('/b/d/')) + ['/b'] + >>> list(_parents('b/d/f/')) + ['b/d', 'b'] + >>> list(_parents('b')) + [] + >>> list(_parents('')) + [] + """ + return itertools.islice(_ancestry(path), 1, None) + + +def _ancestry(path): + """ + Given a path with elements separated by + posixpath.sep, generate all elements of that path. + + >>> list(_ancestry('b/d')) + ['b/d', 'b'] + >>> list(_ancestry('/b/d/')) + ['/b/d', '/b'] + >>> list(_ancestry('b/d/f/')) + ['b/d/f', 'b/d', 'b'] + >>> list(_ancestry('b')) + ['b'] + >>> list(_ancestry('')) + [] + + Multiple separators are treated like a single. + + >>> list(_ancestry('//b//d///f//')) + ['//b//d///f', '//b//d', '//b'] + """ + path = path.rstrip(posixpath.sep) + while path.rstrip(posixpath.sep): + yield path + path, tail = posixpath.split(path) + + +_dedupe = dict.fromkeys +"""Deduplicate an iterable in original order""" + + +def _difference(minuend, subtrahend): + """ + Return items in minuend not in subtrahend, retaining order + with O(1) lookup. + """ + return itertools.filterfalse(set(subtrahend).__contains__, minuend) + + +class InitializedState: + """ + Mix-in to save the initialization state for pickling. + """ + + def __init__(self, *args, **kwargs): + self.__args = args + self.__kwargs = kwargs + super().__init__(*args, **kwargs) + + def __getstate__(self): + return self.__args, self.__kwargs + + def __setstate__(self, state): + args, kwargs = state + super().__init__(*args, **kwargs) + + +class CompleteDirs(InitializedState, zipfile.ZipFile): + """ + A ZipFile subclass that ensures that implied directories + are always included in the namelist. + + >>> list(CompleteDirs._implied_dirs(['foo/bar.txt', 'foo/bar/baz.txt'])) + ['foo/', 'foo/bar/'] + >>> list(CompleteDirs._implied_dirs(['foo/bar.txt', 'foo/bar/baz.txt', 'foo/bar/'])) + ['foo/'] + """ + + @staticmethod + def _implied_dirs(names): + parents = itertools.chain.from_iterable(map(_parents, names)) + as_dirs = (p + posixpath.sep for p in parents) + return _dedupe(_difference(as_dirs, names)) + + def namelist(self): + names = super().namelist() + return names + list(self._implied_dirs(names)) + + def _name_set(self): + return set(self.namelist()) + + def resolve_dir(self, name): + """ + If the name represents a directory, return that name + as a directory (with the trailing slash). + """ + names = self._name_set() + dirname = name + '/' + dir_match = name not in names and dirname in names + return dirname if dir_match else name + + def getinfo(self, name): + """ + Supplement getinfo for implied dirs. + """ + try: + return super().getinfo(name) + except KeyError: + if not name.endswith('/') or name not in self._name_set(): + raise + return zipfile.ZipInfo(filename=name) + + @classmethod + def make(cls, source): + """ + Given a source (filename or zipfile), return an + appropriate CompleteDirs subclass. + """ + if isinstance(source, CompleteDirs): + return source + + if not isinstance(source, zipfile.ZipFile): + return cls(source) + + # Only allow for FastLookup when supplied zipfile is read-only + if 'r' not in source.mode: + cls = CompleteDirs + + source.__class__ = cls + return source + + @classmethod + def inject(cls, zf: zipfile.ZipFile) -> zipfile.ZipFile: + """ + Given a writable zip file zf, inject directory entries for + any directories implied by the presence of children. + """ + for name in cls._implied_dirs(zf.namelist()): + zf.writestr(name, b"") + return zf + + +class FastLookup(CompleteDirs): + """ + ZipFile subclass to ensure implicit + dirs exist and are resolved rapidly. + """ + + def namelist(self): + with contextlib.suppress(AttributeError): + return self.__names + self.__names = super().namelist() + return self.__names + + def _name_set(self): + with contextlib.suppress(AttributeError): + return self.__lookup + self.__lookup = super()._name_set() + return self.__lookup + +def _extract_text_encoding(encoding=None, *args, **kwargs): + # compute stack level so that the caller of the caller sees any warning. + is_pypy = sys.implementation.name == 'pypy' + # PyPy no longer special cased after 7.3.19 (or maybe 7.3.18) + # See jaraco/zipp#143 + is_old_pypi = is_pypy and sys.pypy_version_info < (7, 3, 19) + stack_level = 3 + is_old_pypi + return io.text_encoding(encoding, stack_level), args, kwargs + + +class Path: + """ + A :class:`importlib.resources.abc.Traversable` interface for zip files. + + Implements many of the features users enjoy from + :class:`pathlib.Path`. + + Consider a zip file with this structure:: + + . + ├── a.txt + └── b + ├── c.txt + └── d + └── e.txt + + >>> data = io.BytesIO() + >>> zf = ZipFile(data, 'w') + >>> zf.writestr('a.txt', 'content of a') + >>> zf.writestr('b/c.txt', 'content of c') + >>> zf.writestr('b/d/e.txt', 'content of e') + >>> zf.filename = 'mem/abcde.zip' + + Path accepts the zipfile object itself or a filename + + >>> path = Path(zf) + + From there, several path operations are available. + + Directory iteration (including the zip file itself): + + >>> a, b = path.iterdir() + >>> a + Path('mem/abcde.zip', 'a.txt') + >>> b + Path('mem/abcde.zip', 'b/') + + name property: + + >>> b.name + 'b' + + join with divide operator: + + >>> c = b / 'c.txt' + >>> c + Path('mem/abcde.zip', 'b/c.txt') + >>> c.name + 'c.txt' + + Read text: + + >>> c.read_text(encoding='utf-8') + 'content of c' + + existence: + + >>> c.exists() + True + >>> (b / 'missing.txt').exists() + False + + Coercion to string: + + >>> import os + >>> str(c).replace(os.sep, posixpath.sep) + 'mem/abcde.zip/b/c.txt' + + At the root, ``name``, ``filename``, and ``parent`` + resolve to the zipfile. + + >>> str(path) + 'mem/abcde.zip/' + >>> path.name + 'abcde.zip' + >>> path.filename == pathlib.Path('mem/abcde.zip') + True + >>> str(path.parent) + 'mem' + + If the zipfile has no filename, such attributes are not + valid and accessing them will raise an Exception. + + >>> zf.filename = None + >>> path.name + Traceback (most recent call last): + ... + TypeError: ... + + >>> path.filename + Traceback (most recent call last): + ... + TypeError: ... + + >>> path.parent + Traceback (most recent call last): + ... + TypeError: ... + + # workaround python/cpython#106763 + >>> pass + """ + + __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" + + def __init__(self, root, at=""): + """ + Construct a Path from a ZipFile or filename. + + Note: When the source is an existing ZipFile object, + its type (__class__) will be mutated to a + specialized type. If the caller wishes to retain the + original type, the caller should either create a + separate ZipFile object or pass a filename. + """ + self.root = FastLookup.make(root) + self.at = at + + def __eq__(self, other): + """ + >>> Path(zipfile.ZipFile(io.BytesIO(), 'w')) == 'foo' + False + """ + if self.__class__ is not other.__class__: + return NotImplemented + return (self.root, self.at) == (other.root, other.at) + + def __hash__(self): + return hash((self.root, self.at)) + + def open(self, mode='r', *args, pwd=None, **kwargs): + """ + Open this entry as text or binary following the semantics + of ``pathlib.Path.open()`` by passing arguments through + to io.TextIOWrapper(). + """ + if self.is_dir(): + raise IsADirectoryError(self) + zip_mode = mode[0] + if zip_mode == 'r' and not self.exists(): + raise FileNotFoundError(self) + stream = self.root.open(self.at, zip_mode, pwd=pwd) + if 'b' in mode: + if args or kwargs: + raise ValueError("encoding args invalid for binary operation") + return stream + # Text mode: + encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) + return io.TextIOWrapper(stream, encoding, *args, **kwargs) + + def _base(self): + return pathlib.PurePosixPath(self.at) if self.at else self.filename + + @property + def name(self): + return self._base().name + + @property + def suffix(self): + return self._base().suffix + + @property + def suffixes(self): + return self._base().suffixes + + @property + def stem(self): + return self._base().stem + + @property + def filename(self): + return pathlib.Path(self.root.filename).joinpath(self.at) + + def read_text(self, *args, **kwargs): + encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) + with self.open('r', encoding, *args, **kwargs) as strm: + return strm.read() + + def read_bytes(self): + with self.open('rb') as strm: + return strm.read() + + def _is_child(self, path): + return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") + + def _next(self, at): + return self.__class__(self.root, at) + + def is_dir(self): + return not self.at or self.at.endswith("/") + + def is_file(self): + return self.exists() and not self.is_dir() + + def exists(self): + return self.at in self.root._name_set() + + def iterdir(self): + if not self.is_dir(): + raise ValueError("Can't listdir a file") + subs = map(self._next, self.root.namelist()) + return filter(self._is_child, subs) + + def match(self, path_pattern): + return pathlib.PurePosixPath(self.at).match(path_pattern) + + def is_symlink(self): + """ + Return whether this path is a symlink. + """ + info = self.root.getinfo(self.at) + mode = info.external_attr >> 16 + return stat.S_ISLNK(mode) + + def glob(self, pattern): + if not pattern: + raise ValueError(f"Unacceptable pattern: {pattern!r}") + + prefix = re.escape(self.at) + tr = Translator(seps='/') + matches = re.compile(prefix + tr.translate(pattern)).fullmatch + return map(self._next, filter(matches, self.root.namelist())) + + def rglob(self, pattern): + return self.glob(f'**/{pattern}') + + def relative_to(self, other, *extra): + return posixpath.relpath(str(self), str(other.joinpath(*extra))) + + def __str__(self): + return posixpath.join(self.root.filename, self.at) + + def __repr__(self): + return self.__repr.format(self=self) + + def joinpath(self, *other): + next = posixpath.join(self.at, *other) + return self._next(self.root.resolve_dir(next)) + + __truediv__ = joinpath + + @property + def parent(self): + if not self.at: + return self.filename.parent + parent_at = posixpath.dirname(self.at.rstrip('/')) + if parent_at: + parent_at += '/' + return self._next(parent_at) diff --git a/Python314_4_x64_Template/Lib/zipfile/_path/__pycache__/__init__.cpython-314.pyc b/Python314_4_x64_Template/Lib/zipfile/_path/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..de7f5ea5 Binary files /dev/null and b/Python314_4_x64_Template/Lib/zipfile/_path/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/zipfile/_path/__pycache__/glob.cpython-314.pyc b/Python314_4_x64_Template/Lib/zipfile/_path/__pycache__/glob.cpython-314.pyc new file mode 100644 index 00000000..3b522478 Binary files /dev/null and b/Python314_4_x64_Template/Lib/zipfile/_path/__pycache__/glob.cpython-314.pyc differ diff --git a/Python314_4_x64_Template/Lib/zipfile/_path/glob.py b/Python314_4_x64_Template/Lib/zipfile/_path/glob.py new file mode 100644 index 00000000..bd283930 --- /dev/null +++ b/Python314_4_x64_Template/Lib/zipfile/_path/glob.py @@ -0,0 +1,113 @@ +import os +import re + +_default_seps = os.sep + str(os.altsep) * bool(os.altsep) + + +class Translator: + """ + >>> Translator('xyz') + Traceback (most recent call last): + ... + AssertionError: Invalid separators + + >>> Translator('') + Traceback (most recent call last): + ... + AssertionError: Invalid separators + """ + + seps: str + + def __init__(self, seps: str = _default_seps): + assert seps and set(seps) <= set(_default_seps), "Invalid separators" + self.seps = seps + + def translate(self, pattern): + """ + Given a glob pattern, produce a regex that matches it. + """ + return self.extend(self.match_dirs(self.translate_core(pattern))) + + def extend(self, pattern): + r""" + Extend regex for pattern-wide concerns. + + Apply '(?s:)' to create a non-matching group that + matches newlines (valid on Unix). + + Append '\z' to imply fullmatch even when match is used. + """ + return rf'(?s:{pattern})\z' + + def match_dirs(self, pattern): + """ + Ensure that zipfile.Path directory names are matched. + + zipfile.Path directory names always end in a slash. + """ + return rf'{pattern}[/]?' + + def translate_core(self, pattern): + r""" + Given a glob pattern, produce a regex that matches it. + + >>> t = Translator() + >>> t.translate_core('*.txt').replace('\\\\', '') + '[^/]*\\.txt' + >>> t.translate_core('a?txt') + 'a[^/]txt' + >>> t.translate_core('**/*').replace('\\\\', '') + '.*/[^/][^/]*' + """ + self.restrict_rglob(pattern) + return ''.join(map(self.replace, separate(self.star_not_empty(pattern)))) + + def replace(self, match): + """ + Perform the replacements for a match from :func:`separate`. + """ + return match.group('set') or ( + re.escape(match.group(0)) + .replace('\\*\\*', r'.*') + .replace('\\*', rf'[^{re.escape(self.seps)}]*') + .replace('\\?', r'[^/]') + ) + + def restrict_rglob(self, pattern): + """ + Raise ValueError if ** appears in anything but a full path segment. + + >>> Translator().translate('**foo') + Traceback (most recent call last): + ... + ValueError: ** must appear alone in a path segment + """ + seps_pattern = rf'[{re.escape(self.seps)}]+' + segments = re.split(seps_pattern, pattern) + if any('**' in segment and segment != '**' for segment in segments): + raise ValueError("** must appear alone in a path segment") + + def star_not_empty(self, pattern): + """ + Ensure that * will not match an empty segment. + """ + + def handle_segment(match): + segment = match.group(0) + return '?*' if segment == '*' else segment + + not_seps_pattern = rf'[^{re.escape(self.seps)}]+' + return re.sub(not_seps_pattern, handle_segment, pattern) + + +def separate(pattern): + """ + Separate out character sets to avoid translating their contents. + + >>> [m.group(0) for m in separate('*.txt')] + ['*.txt'] + >>> [m.group(0) for m in separate('a[?]txt')] + ['a', '[?]', 'txt'] + """ + return re.finditer(r'([^\[]+)|(?P[\[].*?[\]])|([\[][^\]]*$)', pattern) diff --git a/Python314_4_x64_Template/Lib/zipimport.py b/Python314_4_x64_Template/Lib/zipimport.py new file mode 100644 index 00000000..444c9dd1 --- /dev/null +++ b/Python314_4_x64_Template/Lib/zipimport.py @@ -0,0 +1,822 @@ +"""zipimport provides support for importing Python modules from Zip archives. + +This module exports two objects: +- zipimporter: a class; its constructor takes a path to a Zip archive. +- ZipImportError: exception raised by zipimporter objects. It's a + subclass of ImportError, so it can be caught as ImportError, too. + +It is usually not needed to use the zipimport module explicitly; it is +used by the builtin import mechanism for sys.path items that are paths +to Zip archives. +""" + +#from importlib import _bootstrap_external +#from importlib import _bootstrap # for _verbose_message +import _frozen_importlib_external as _bootstrap_external +from _frozen_importlib_external import _unpack_uint16, _unpack_uint32, _unpack_uint64 +import _frozen_importlib as _bootstrap # for _verbose_message +import _imp # for check_hash_based_pycs +import _io # for open +import marshal # for loads +import sys # for modules +import time # for mktime + +__all__ = ['ZipImportError', 'zipimporter'] + + +path_sep = _bootstrap_external.path_sep +alt_path_sep = _bootstrap_external.path_separators[1:] + + +class ZipImportError(ImportError): + pass + +# _read_directory() cache +_zip_directory_cache = {} + +_module_type = type(sys) + +END_CENTRAL_DIR_SIZE = 22 +END_CENTRAL_DIR_SIZE_64 = 56 +END_CENTRAL_DIR_LOCATOR_SIZE_64 = 20 +STRING_END_ARCHIVE = b'PK\x05\x06' # standard EOCD signature +STRING_END_LOCATOR_64 = b'PK\x06\x07' # Zip64 EOCD Locator signature +STRING_END_ZIP_64 = b'PK\x06\x06' # Zip64 EOCD signature +MAX_COMMENT_LEN = (1 << 16) - 1 +MAX_UINT32 = 0xffffffff +ZIP64_EXTRA_TAG = 0x1 + +class zipimporter(_bootstrap_external._LoaderBasics): + """zipimporter(archivepath) -> zipimporter object + + Create a new zipimporter instance. 'archivepath' must be a path to + a zipfile, or to a specific path inside a zipfile. For example, it can be + '/tmp/myimport.zip', or '/tmp/myimport.zip/mydirectory', if mydirectory is a + valid directory inside the archive. + + 'ZipImportError is raised if 'archivepath' doesn't point to a valid Zip + archive. + + The 'archive' attribute of zipimporter objects contains the name of the + zipfile targeted. + """ + + # Split the "subdirectory" from the Zip archive path, lookup a matching + # entry in sys.path_importer_cache, fetch the file directory from there + # if found, or else read it from the archive. + def __init__(self, path): + if not isinstance(path, str): + raise TypeError(f"expected str, not {type(path)!r}") + if not path: + raise ZipImportError('archive path is empty', path=path) + if alt_path_sep: + path = path.replace(alt_path_sep, path_sep) + + prefix = [] + while True: + try: + st = _bootstrap_external._path_stat(path) + except (OSError, ValueError): + # On Windows a ValueError is raised for too long paths. + # Back up one path element. + dirname, basename = _bootstrap_external._path_split(path) + if dirname == path: + raise ZipImportError('not a Zip file', path=path) + path = dirname + prefix.append(basename) + else: + # it exists + if (st.st_mode & 0o170000) != 0o100000: # stat.S_ISREG + # it's a not file + raise ZipImportError('not a Zip file', path=path) + break + + if path not in _zip_directory_cache: + _zip_directory_cache[path] = _read_directory(path) + self.archive = path + # a prefix directory following the ZIP file path. + self.prefix = _bootstrap_external._path_join(*prefix[::-1]) + if self.prefix: + self.prefix += path_sep + + + def find_spec(self, fullname, target=None): + """Create a ModuleSpec for the specified module. + + Returns None if the module cannot be found. + """ + module_info = _get_module_info(self, fullname) + if module_info is not None: + return _bootstrap.spec_from_loader(fullname, self, is_package=module_info) + else: + # Not a module or regular package. See if this is a directory, and + # therefore possibly a portion of a namespace package. + + # We're only interested in the last path component of fullname + # earlier components are recorded in self.prefix. + modpath = _get_module_path(self, fullname) + if _is_dir(self, modpath): + # This is possibly a portion of a namespace + # package. Return the string representing its path, + # without a trailing separator. + path = f'{self.archive}{path_sep}{modpath}' + spec = _bootstrap.ModuleSpec(name=fullname, loader=None, + is_package=True) + spec.submodule_search_locations.append(path) + return spec + else: + return None + + def get_code(self, fullname): + """get_code(fullname) -> code object. + + Return the code object for the specified module. Raise ZipImportError + if the module couldn't be imported. + """ + code, ispackage, modpath = _get_module_code(self, fullname) + return code + + + def get_data(self, pathname): + """get_data(pathname) -> string with file data. + + Return the data associated with 'pathname'. Raise OSError if + the file wasn't found. + """ + if alt_path_sep: + pathname = pathname.replace(alt_path_sep, path_sep) + + key = pathname + if pathname.startswith(self.archive + path_sep): + key = pathname[len(self.archive + path_sep):] + + try: + toc_entry = self._get_files()[key] + except KeyError: + raise OSError(0, '', key) + if toc_entry is None: + return b'' + return _get_data(self.archive, toc_entry) + + + # Return a string matching __file__ for the named module + def get_filename(self, fullname): + """get_filename(fullname) -> filename string. + + Return the filename for the specified module or raise ZipImportError + if it couldn't be imported. + """ + # Deciding the filename requires working out where the code + # would come from if the module was actually loaded + code, ispackage, modpath = _get_module_code(self, fullname) + return modpath + + + def get_source(self, fullname): + """get_source(fullname) -> source string. + + Return the source code for the specified module. Raise ZipImportError + if the module couldn't be found, return None if the archive does + contain the module, but has no source for it. + """ + mi = _get_module_info(self, fullname) + if mi is None: + raise ZipImportError(f"can't find module {fullname!r}", name=fullname) + + path = _get_module_path(self, fullname) + if mi: + fullpath = _bootstrap_external._path_join(path, '__init__.py') + else: + fullpath = f'{path}.py' + + try: + toc_entry = self._get_files()[fullpath] + except KeyError: + # we have the module, but no source + return None + return _get_data(self.archive, toc_entry).decode() + + + # Return a bool signifying whether the module is a package or not. + def is_package(self, fullname): + """is_package(fullname) -> bool. + + Return True if the module specified by fullname is a package. + Raise ZipImportError if the module couldn't be found. + """ + mi = _get_module_info(self, fullname) + if mi is None: + raise ZipImportError(f"can't find module {fullname!r}", name=fullname) + return mi + + + # Load and return the module named by 'fullname'. + def load_module(self, fullname): + """load_module(fullname) -> module. + + Load the module specified by 'fullname'. 'fullname' must be the + fully qualified (dotted) module name. It returns the imported + module, or raises ZipImportError if it could not be imported. + + Deprecated since Python 3.10. Use exec_module() instead. + """ + import warnings + warnings._deprecated("zipimport.zipimporter.load_module", + f"{warnings._DEPRECATED_MSG}; " + "use zipimport.zipimporter.exec_module() instead", + remove=(3, 15)) + code, ispackage, modpath = _get_module_code(self, fullname) + mod = sys.modules.get(fullname) + if mod is None or not isinstance(mod, _module_type): + mod = _module_type(fullname) + sys.modules[fullname] = mod + mod.__loader__ = self + + try: + if ispackage: + # add __path__ to the module *before* the code gets + # executed + path = _get_module_path(self, fullname) + fullpath = _bootstrap_external._path_join(self.archive, path) + mod.__path__ = [fullpath] + + if not hasattr(mod, '__builtins__'): + mod.__builtins__ = __builtins__ + _bootstrap_external._fix_up_module(mod.__dict__, fullname, modpath) + exec(code, mod.__dict__) + except: + del sys.modules[fullname] + raise + + try: + mod = sys.modules[fullname] + except KeyError: + raise ImportError(f'Loaded module {fullname!r} not found in sys.modules') + _bootstrap._verbose_message('import {} # loaded from Zip {}', fullname, modpath) + return mod + + + def get_resource_reader(self, fullname): + """Return the ResourceReader for a module in a zip file.""" + from importlib.readers import ZipReader + + return ZipReader(self, fullname) + + + def _get_files(self): + """Return the files within the archive path.""" + try: + files = _zip_directory_cache[self.archive] + except KeyError: + try: + files = _zip_directory_cache[self.archive] = _read_directory(self.archive) + except ZipImportError: + files = {} + + return files + + + def invalidate_caches(self): + """Invalidates the cache of file data of the archive path.""" + _zip_directory_cache.pop(self.archive, None) + + + def __repr__(self): + return f'' + + +# _zip_searchorder defines how we search for a module in the Zip +# archive: we first search for a package __init__, then for +# non-package .pyc, and .py entries. The .pyc entries +# are swapped by initzipimport() if we run in optimized mode. Also, +# '/' is replaced by path_sep there. +_zip_searchorder = ( + (path_sep + '__init__.pyc', True, True), + (path_sep + '__init__.py', False, True), + ('.pyc', True, False), + ('.py', False, False), +) + +# Given a module name, return the potential file path in the +# archive (without extension). +def _get_module_path(self, fullname): + return self.prefix + fullname.rpartition('.')[2] + +# Does this path represent a directory? +def _is_dir(self, path): + # See if this is a "directory". If so, it's eligible to be part + # of a namespace package. We test by seeing if the name, with an + # appended path separator, exists. + dirpath = path + path_sep + # If dirpath is present in self._get_files(), we have a directory. + return dirpath in self._get_files() + +# Return some information about a module. +def _get_module_info(self, fullname): + path = _get_module_path(self, fullname) + for suffix, isbytecode, ispackage in _zip_searchorder: + fullpath = path + suffix + if fullpath in self._get_files(): + return ispackage + return None + + +# implementation + +# _read_directory(archive) -> files dict (new reference) +# +# Given a path to a Zip archive, build a dict, mapping file names +# (local to the archive, using SEP as a separator) to toc entries. +# +# A toc_entry is a tuple: +# +# (__file__, # value to use for __file__, available for all files, +# # encoded to the filesystem encoding +# compress, # compression kind; 0 for uncompressed +# data_size, # size of compressed data on disk +# file_size, # size of decompressed data +# file_offset, # offset of file header from start of archive +# time, # mod time of file (in dos format) +# date, # mod data of file (in dos format) +# crc, # crc checksum of the data +# ) +# +# Directories can be recognized by the trailing path_sep in the name, +# data_size and file_offset are 0. +def _read_directory(archive): + try: + fp = _io.open_code(archive) + except OSError: + raise ZipImportError(f"can't open Zip file: {archive!r}", path=archive) + + with fp: + # GH-87235: On macOS all file descriptors for /dev/fd/N share the same + # file offset, reset the file offset after scanning the zipfile directory + # to not cause problems when some runs 'python3 /dev/fd/9 9= 0 and pos64+END_CENTRAL_DIR_SIZE_64+END_CENTRAL_DIR_LOCATOR_SIZE_64==pos): + # Zip64 at "correct" offset from standard EOCD + buffer = data[pos64:pos64 + END_CENTRAL_DIR_SIZE_64] + if len(buffer) != END_CENTRAL_DIR_SIZE_64: + raise ZipImportError( + f"corrupt Zip64 file: Expected {END_CENTRAL_DIR_SIZE_64} byte " + f"zip64 central directory, but read {len(buffer)} bytes.", + path=archive) + header_position = file_size - len(data) + pos64 + + central_directory_size = _unpack_uint64(buffer[40:48]) + central_directory_position = _unpack_uint64(buffer[48:56]) + num_entries = _unpack_uint64(buffer[24:32]) + elif pos >= 0: + buffer = data[pos:pos+END_CENTRAL_DIR_SIZE] + if len(buffer) != END_CENTRAL_DIR_SIZE: + raise ZipImportError(f"corrupt Zip file: {archive!r}", + path=archive) + + header_position = file_size - len(data) + pos + + # Buffer now contains a valid EOCD, and header_position gives the + # starting position of it. + central_directory_size = _unpack_uint32(buffer[12:16]) + central_directory_position = _unpack_uint32(buffer[16:20]) + num_entries = _unpack_uint16(buffer[8:10]) + + # N.b. if someday you want to prefer the standard (non-zip64) EOCD, + # you need to adjust position by 76 for arc to be 0. + else: + raise ZipImportError(f'not a Zip file: {archive!r}', + path=archive) + + # Buffer now contains a valid EOCD, and header_position gives the + # starting position of it. + # XXX: These are cursory checks but are not as exact or strict as they + # could be. Checking the arc-adjusted value is probably good too. + if header_position < central_directory_size: + raise ZipImportError(f'bad central directory size: {archive!r}', path=archive) + if header_position < central_directory_position: + raise ZipImportError(f'bad central directory offset: {archive!r}', path=archive) + header_position -= central_directory_size + # On just-a-zipfile these values are the same and arc_offset is zero; if + # the file has some bytes prepended, `arc_offset` is the number of such + # bytes. This is used for pex as well as self-extracting .exe. + arc_offset = header_position - central_directory_position + if arc_offset < 0: + raise ZipImportError(f'bad central directory size or offset: {archive!r}', path=archive) + + files = {} + # Start of Central Directory + count = 0 + try: + fp.seek(header_position) + except OSError: + raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) + while True: + buffer = fp.read(46) + if len(buffer) < 4: + raise EOFError('EOF read where not expected') + # Start of file header + if buffer[:4] != b'PK\x01\x02': + if count != num_entries: + raise ZipImportError( + f"mismatched num_entries: {count} should be {num_entries} in {archive!r}", + path=archive, + ) + break # Bad: Central Dir File Header + if len(buffer) != 46: + raise EOFError('EOF read where not expected') + flags = _unpack_uint16(buffer[8:10]) + compress = _unpack_uint16(buffer[10:12]) + time = _unpack_uint16(buffer[12:14]) + date = _unpack_uint16(buffer[14:16]) + crc = _unpack_uint32(buffer[16:20]) + data_size = _unpack_uint32(buffer[20:24]) + file_size = _unpack_uint32(buffer[24:28]) + name_size = _unpack_uint16(buffer[28:30]) + extra_size = _unpack_uint16(buffer[30:32]) + comment_size = _unpack_uint16(buffer[32:34]) + file_offset = _unpack_uint32(buffer[42:46]) + header_size = name_size + extra_size + comment_size + + try: + name = fp.read(name_size) + except OSError: + raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) + if len(name) != name_size: + raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) + # On Windows, calling fseek to skip over the fields we don't use is + # slower than reading the data because fseek flushes stdio's + # internal buffers. See issue #8745. + try: + extra_data_len = header_size - name_size + extra_data = memoryview(fp.read(extra_data_len)) + + if len(extra_data) != extra_data_len: + raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) + except OSError: + raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) + + if flags & 0x800: + # UTF-8 file names extension + name = name.decode() + else: + # Historical ZIP filename encoding + try: + name = name.decode('ascii') + except UnicodeDecodeError: + name = name.decode('latin1').translate(cp437_table) + + name = name.replace('/', path_sep) + path = _bootstrap_external._path_join(archive, name) + + # Ordering matches unpacking below. + if ( + file_size == MAX_UINT32 or + data_size == MAX_UINT32 or + file_offset == MAX_UINT32 + ): + # need to decode extra_data looking for a zip64 extra (which might not + # be present) + while extra_data: + if len(extra_data) < 4: + raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) + tag = _unpack_uint16(extra_data[:2]) + size = _unpack_uint16(extra_data[2:4]) + if len(extra_data) < 4 + size: + raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) + if tag == ZIP64_EXTRA_TAG: + if (len(extra_data) - 4) % 8 != 0: + raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) + num_extra_values = (len(extra_data) - 4) // 8 + if num_extra_values > 3: + raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) + import struct + values = list(struct.unpack_from(f"<{min(num_extra_values, 3)}Q", + extra_data, offset=4)) + + # N.b. Here be dragons: the ordering of these is different than + # the header fields, and it's really easy to get it wrong since + # naturally-occurring zips that use all 3 are >4GB + if file_size == MAX_UINT32: + file_size = values.pop(0) + if data_size == MAX_UINT32: + data_size = values.pop(0) + if file_offset == MAX_UINT32: + file_offset = values.pop(0) + + break + + # For a typical zip, this bytes-slicing only happens 2-3 times, on + # small data like timestamps and filesizes. + extra_data = extra_data[4+size:] + else: + _bootstrap._verbose_message( + "zipimport: suspected zip64 but no zip64 extra for {!r}", + path, + ) + # XXX These two statements seem swapped because `central_directory_position` + # is a position within the actual file, but `file_offset` (when compared) is + # as encoded in the entry, not adjusted for this file. + # N.b. this must be after we've potentially read the zip64 extra which can + # change `file_offset`. + if file_offset > central_directory_position: + raise ZipImportError(f'bad local header offset: {archive!r}', path=archive) + file_offset += arc_offset + + t = (path, compress, data_size, file_size, file_offset, time, date, crc) + files[name] = t + count += 1 + finally: + fp.seek(start_offset) + _bootstrap._verbose_message('zipimport: found {} names in {!r}', count, archive) + + # Add implicit directories. + count = 0 + for name in list(files): + while True: + i = name.rstrip(path_sep).rfind(path_sep) + if i < 0: + break + name = name[:i + 1] + if name in files: + break + files[name] = None + count += 1 + if count: + _bootstrap._verbose_message('zipimport: added {} implicit directories in {!r}', + count, archive) + return files + +# During bootstrap, we may need to load the encodings +# package from a ZIP file. But the cp437 encoding is implemented +# in Python in the encodings package. +# +# Break out of this dependency by using the translation table for +# the cp437 encoding. +cp437_table = ( + # ASCII part, 8 rows x 16 chars + '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f' + '\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f' + ' !"#$%&\'()*+,-./' + '0123456789:;<=>?' + '@ABCDEFGHIJKLMNO' + 'PQRSTUVWXYZ[\\]^_' + '`abcdefghijklmno' + 'pqrstuvwxyz{|}~\x7f' + # non-ASCII part, 16 rows x 8 chars + '\xc7\xfc\xe9\xe2\xe4\xe0\xe5\xe7' + '\xea\xeb\xe8\xef\xee\xec\xc4\xc5' + '\xc9\xe6\xc6\xf4\xf6\xf2\xfb\xf9' + '\xff\xd6\xdc\xa2\xa3\xa5\u20a7\u0192' + '\xe1\xed\xf3\xfa\xf1\xd1\xaa\xba' + '\xbf\u2310\xac\xbd\xbc\xa1\xab\xbb' + '\u2591\u2592\u2593\u2502\u2524\u2561\u2562\u2556' + '\u2555\u2563\u2551\u2557\u255d\u255c\u255b\u2510' + '\u2514\u2534\u252c\u251c\u2500\u253c\u255e\u255f' + '\u255a\u2554\u2569\u2566\u2560\u2550\u256c\u2567' + '\u2568\u2564\u2565\u2559\u2558\u2552\u2553\u256b' + '\u256a\u2518\u250c\u2588\u2584\u258c\u2590\u2580' + '\u03b1\xdf\u0393\u03c0\u03a3\u03c3\xb5\u03c4' + '\u03a6\u0398\u03a9\u03b4\u221e\u03c6\u03b5\u2229' + '\u2261\xb1\u2265\u2264\u2320\u2321\xf7\u2248' + '\xb0\u2219\xb7\u221a\u207f\xb2\u25a0\xa0' +) + +_importing_zlib = False + +# Return the zlib.decompress function object, or NULL if zlib couldn't +# be imported. The function is cached when found, so subsequent calls +# don't import zlib again. +def _get_decompress_func(): + global _importing_zlib + if _importing_zlib: + # Someone has a zlib.py[co] in their Zip file + # let's avoid a stack overflow. + _bootstrap._verbose_message('zipimport: zlib UNAVAILABLE') + raise ZipImportError("can't decompress data; zlib not available") + + _importing_zlib = True + try: + from zlib import decompress + except Exception: + _bootstrap._verbose_message('zipimport: zlib UNAVAILABLE') + raise ZipImportError("can't decompress data; zlib not available") + finally: + _importing_zlib = False + + _bootstrap._verbose_message('zipimport: zlib available') + return decompress + +# Given a path to a Zip file and a toc_entry, return the (uncompressed) data. +def _get_data(archive, toc_entry): + datapath, compress, data_size, file_size, file_offset, time, date, crc = toc_entry + if data_size < 0: + raise ZipImportError('negative data size') + + with _io.open_code(archive) as fp: + # Check to make sure the local file header is correct + try: + fp.seek(file_offset) + except OSError: + raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) + buffer = fp.read(30) + if len(buffer) != 30: + raise EOFError('EOF read where not expected') + + if buffer[:4] != b'PK\x03\x04': + # Bad: Local File Header + raise ZipImportError(f'bad local file header: {archive!r}', path=archive) + + name_size = _unpack_uint16(buffer[26:28]) + extra_size = _unpack_uint16(buffer[28:30]) + header_size = 30 + name_size + extra_size + file_offset += header_size # Start of file data + try: + fp.seek(file_offset) + except OSError: + raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) + raw_data = fp.read(data_size) + if len(raw_data) != data_size: + raise OSError("zipimport: can't read data") + + if compress == 0: + # data is not compressed + return raw_data + + # Decompress with zlib + try: + decompress = _get_decompress_func() + except Exception: + raise ZipImportError("can't decompress data; zlib not available") + return decompress(raw_data, -15) + + +# Lenient date/time comparison function. The precision of the mtime +# in the archive is lower than the mtime stored in a .pyc: we +# must allow a difference of at most one second. +def _eq_mtime(t1, t2): + # dostime only stores even seconds, so be lenient + return abs(t1 - t2) <= 1 + + +# Given the contents of a .py[co] file, unmarshal the data +# and return the code object. Raises ImportError it the magic word doesn't +# match, or if the recorded .py[co] metadata does not match the source. +def _unmarshal_code(self, pathname, fullpath, fullname, data): + exc_details = { + 'name': fullname, + 'path': fullpath, + } + + flags = _bootstrap_external._classify_pyc(data, fullname, exc_details) + + hash_based = flags & 0b1 != 0 + if hash_based: + check_source = flags & 0b10 != 0 + if (_imp.check_hash_based_pycs != 'never' and + (check_source or _imp.check_hash_based_pycs == 'always')): + source_bytes = _get_pyc_source(self, fullpath) + if source_bytes is not None: + source_hash = _imp.source_hash( + _imp.pyc_magic_number_token, + source_bytes, + ) + + _bootstrap_external._validate_hash_pyc( + data, source_hash, fullname, exc_details) + else: + source_mtime, source_size = \ + _get_mtime_and_size_of_source(self, fullpath) + + if source_mtime: + # We don't use _bootstrap_external._validate_timestamp_pyc + # to allow for a more lenient timestamp check. + if (not _eq_mtime(_unpack_uint32(data[8:12]), source_mtime) or + _unpack_uint32(data[12:16]) != source_size): + _bootstrap._verbose_message( + f'bytecode is stale for {fullname!r}') + return None + + code = marshal.loads(data[16:]) + if not isinstance(code, _code_type): + raise TypeError(f'compiled module {pathname!r} is not a code object') + return code + +_code_type = type(_unmarshal_code.__code__) + + +# Replace any occurrences of '\r\n?' in the input string with '\n'. +# This converts DOS and Mac line endings to Unix line endings. +def _normalize_line_endings(source): + source = source.replace(b'\r\n', b'\n') + source = source.replace(b'\r', b'\n') + return source + +# Given a string buffer containing Python source code, compile it +# and return a code object. +def _compile_source(pathname, source): + source = _normalize_line_endings(source) + return compile(source, pathname, 'exec', dont_inherit=True) + +# Convert the date/time values found in the Zip archive to a value +# that's compatible with the time stamp stored in .pyc files. +def _parse_dostime(d, t): + return time.mktime(( + (d >> 9) + 1980, # bits 9..15: year + (d >> 5) & 0xF, # bits 5..8: month + d & 0x1F, # bits 0..4: day + t >> 11, # bits 11..15: hours + (t >> 5) & 0x3F, # bits 8..10: minutes + (t & 0x1F) * 2, # bits 0..7: seconds / 2 + -1, -1, -1)) + +# Given a path to a .pyc file in the archive, return the +# modification time of the matching .py file and its size, +# or (0, 0) if no source is available. +def _get_mtime_and_size_of_source(self, path): + try: + # strip 'c' or 'o' from *.py[co] + assert path[-1:] in ('c', 'o') + path = path[:-1] + toc_entry = self._get_files()[path] + # fetch the time stamp of the .py file for comparison + # with an embedded pyc time stamp + time = toc_entry[5] + date = toc_entry[6] + uncompressed_size = toc_entry[3] + return _parse_dostime(date, time), uncompressed_size + except (KeyError, IndexError, TypeError): + return 0, 0 + + +# Given a path to a .pyc file in the archive, return the +# contents of the matching .py file, or None if no source +# is available. +def _get_pyc_source(self, path): + # strip 'c' or 'o' from *.py[co] + assert path[-1:] in ('c', 'o') + path = path[:-1] + + try: + toc_entry = self._get_files()[path] + except KeyError: + return None + else: + return _get_data(self.archive, toc_entry) + + +# Get the code object associated with the module specified by +# 'fullname'. +def _get_module_code(self, fullname): + path = _get_module_path(self, fullname) + import_error = None + for suffix, isbytecode, ispackage in _zip_searchorder: + fullpath = path + suffix + _bootstrap._verbose_message('trying {}{}{}', self.archive, path_sep, fullpath, verbosity=2) + try: + toc_entry = self._get_files()[fullpath] + except KeyError: + pass + else: + modpath = toc_entry[0] + data = _get_data(self.archive, toc_entry) + code = None + if isbytecode: + try: + code = _unmarshal_code(self, modpath, fullpath, fullname, data) + except ImportError as exc: + import_error = exc + else: + code = _compile_source(modpath, data) + if code is None: + # bad magic number or non-matching mtime + # in byte code, try next + continue + modpath = toc_entry[0] + return code, ispackage, modpath + else: + if import_error: + msg = f"module load failed: {import_error}" + raise ZipImportError(msg, name=fullname) from import_error + else: + raise ZipImportError(f"can't find module {fullname!r}", name=fullname) diff --git a/Python313_13_x64_Template/Lib/zoneinfo/__init__.py b/Python314_4_x64_Template/Lib/zoneinfo/__init__.py similarity index 100% rename from Python313_13_x64_Template/Lib/zoneinfo/__init__.py rename to Python314_4_x64_Template/Lib/zoneinfo/__init__.py diff --git a/Python313_13_x64_Template/Lib/zoneinfo/_common.py b/Python314_4_x64_Template/Lib/zoneinfo/_common.py similarity index 100% rename from Python313_13_x64_Template/Lib/zoneinfo/_common.py rename to Python314_4_x64_Template/Lib/zoneinfo/_common.py diff --git a/Python313_13_x64_Template/Lib/zoneinfo/_tzpath.py b/Python314_4_x64_Template/Lib/zoneinfo/_tzpath.py similarity index 100% rename from Python313_13_x64_Template/Lib/zoneinfo/_tzpath.py rename to Python314_4_x64_Template/Lib/zoneinfo/_tzpath.py diff --git a/Python313_13_x64_Template/Lib/zoneinfo/_zoneinfo.py b/Python314_4_x64_Template/Lib/zoneinfo/_zoneinfo.py similarity index 100% rename from Python313_13_x64_Template/Lib/zoneinfo/_zoneinfo.py rename to Python314_4_x64_Template/Lib/zoneinfo/_zoneinfo.py diff --git a/Python314_4_x64_Template/NEWS.txt b/Python314_4_x64_Template/NEWS.txt new file mode 100644 index 00000000..6b05d9f3 --- /dev/null +++ b/Python314_4_x64_Template/NEWS.txt @@ -0,0 +1,55267 @@ ++++++++++++ +Python News ++++++++++++ + +What's New in Python 3.14.4 final? +================================== + +*Release date: 2026-04-07* + +Security +-------- + +- gh-145986: :mod:`xml.parsers.expat`: Fixed a crash caused by unbounded C + recursion when converting deeply nested XML content models with + :meth:`~xml.parsers.expat.xmlparser.ElementDeclHandler`. This addresses + :cve:`2026-4224`. + +- gh-145599: Reject control characters in :class:`http.cookies.Morsel` + :meth:`~http.cookies.Morsel.update` and + :meth:`~http.cookies.BaseCookie.js_output`. This addresses + :cve:`2026-3644`. + +- gh-145506: Fixes :cve:`2026-2297` by ensuring that + ``SourcelessFileLoader`` uses :func:`io.open_code` when opening ``.pyc`` + files. + +- gh-144370: Disallow usage of control characters in status in + :mod:`wsgiref.handlers` to prevent HTTP header injections. Patch by + Benedikt Johannes. + +- gh-143930: Reject leading dashes in URLs passed to + :func:`webbrowser.open`. + +Core and Builtins +----------------- + +- gh-148157: Fix an unlikely crash when parsing an invalid type comments for + function parameters. Found by OSS Fuzz in :oss-fuzz:`492782951`. + +- gh-148144: Initialize ``_PyInterpreterFrame.visited`` when copying + interpreter frames so incremental GC does not read an uninitialized byte + from generator and frame-object copies. + +- gh-146615: Fix a crash in :meth:`~object.__get__` for + :c:expr:`METH_METHOD` descriptors when an invalid (non-type) object is + passed as the second argument. Patch by Steven Sun. + +- gh-146308: Fixed several error handling issues in the + :mod:`!_remote_debugging` module, including safer validation of remote + ``int`` objects, clearer asyncio task chain failures, and cache cleanup + fixes that avoid leaking or double-freeing metadata on allocation failure. + Patch by Pablo Galindo. + +- gh-146128: Fix a bug which could cause constant values to be partially + corrupted in AArch64 JIT code. This issue is theoretical, and hasn't + actually been observed in unmodified Python interpreters. + +- gh-146250: Fixed a memory leak in :exc:`SyntaxError` when re-initializing + it. + +- gh-146245: Fixed reference leaks in :mod:`socket` when audit hooks raise + exceptions in :func:`socket.getaddrinfo` and :meth:`!socket.sendto`. + +- gh-146196: Fix potential Undefined Behavior in + :c:func:`PyUnicodeWriter_WriteASCII` by adding a zero-length check. Patch + by Shamil Abdulaev. + +- gh-146227: Fix wrong type in ``_Py_atomic_load_uint16`` in the C11 atomics + backend (``pyatomic_std.h``), which used a 32-bit atomic load instead of + 16-bit. Found by Mohammed Zuhaib. + +- gh-146056: Fix :func:`repr` for lists and tuples containing ``NULL``\ s. + +- gh-146092: Handle properly memory allocation failures on str and float + opcodes. Patch by Victor Stinner. + +- gh-146041: Fix free-threading scaling bottleneck in :func:`sys.intern` and + :c:func:`PyObject_SetAttr` by avoiding the interpreter-wide lock when the + string is already interned and immortalized. + +- gh-145990: ``python --help-env`` sections are now sorted by environment + variable name. + +- gh-145990: ``python --help-xoptions`` is now sorted by ``-X`` option name. + +- gh-145376: Fix GC tracking in ``structseq.__replace__()``. + +- gh-145792: Fix out-of-bounds access when invoking faulthandler on a + CPython build compiled without support for VLAs. + +- gh-142183: Avoid a pathological case where repeated calls at a specific + stack depth could be significantly slower. + +- gh-145779: Improve scaling of :func:`classmethod` and :func:`staticmethod` + calls in the free-threaded build by avoiding the descriptor ``__get__`` + call. + +- gh-145783: Fix an unlikely crash in the parser when certain errors were + erroneously not propagated. Found by OSS Fuzz in :oss-fuzz:`491369109`. + +- gh-145685: Improve scaling of type attribute lookups in the + :term:`free-threaded build` by avoiding contention on the internal type + lock. + +- gh-145701: Fix :exc:`SystemError` when ``__classdict__`` or + ``__conditional_annotations__`` is in a class-scope inlined comprehension. + Found by OSS Fuzz in :oss-fuzz:`491105000`. + +- gh-145713: Make :meth:`bytearray.resize` thread-safe in the free-threaded + build by using a critical section and calling the lock-held variant of the + resize function. + +- gh-145615: Fixed a memory leak in the :term:`free-threaded build` where + mimalloc pages could become permanently unreclaimable until the owning + thread exited. + +- gh-145566: In the free threading build, skip the stop-the-world pause when + reassigning ``__class__`` on a newly created object. + +- gh-145335: Fix a crash in :func:`os.pathconf` when called with ``-1`` as + the path argument. + +- gh-145036: In free-threaded build, fix race condition when calling + :meth:`!__sizeof__` on a :class:`list` + +- gh-145376: Fix reference leaks in various unusual error scenarios. + +- gh-145234: Fixed a ``SystemError`` in the parser when an encoding cookie + (for example, UTF-7) decodes to carriage returns (``\r``). Newlines are + now normalized after decoding in the string tokenizer. + + Patch by Pablo Galindo. + +- gh-130555: Fix use-after-free in :meth:`dict.clear` when the dictionary + values are embedded in an object and a destructor causes re-entrant + mutation of the dictionary. + +- gh-145187: Fix compiler assertion fail when a type parameter bound + contains an invalid expression in a conditional block. + +- gh-145142: Fix a crash in the free-threaded build when the dictionary + argument to :meth:`str.maketrans` is concurrently modified. + +- gh-144872: Fix heap buffer overflow in the parser found by OSS-Fuzz. + +- gh-144766: Fix a crash in fork child process when perf support is enabled. + +- gh-144759: Fix undefined behavior in the lexer when ``start`` and + ``multi_line_start`` pointers are ``NULL`` in + ``_PyLexer_remember_fstring_buffers()`` and + ``_PyLexer_restore_fstring_buffers()``. The ``NULL`` pointer arithmetic + (``NULL - valid_pointer``) is now guarded with explicit ``NULL`` checks. + +- gh-144563: Fix interaction of the Tachyon profiler and :mod:`ctypes` and + other modules that load the Python shared library (if present) in an + independent map as this was causing the mechanism that loads the binary + information to be confused. Patch by Pablo Galindo + +- gh-144601: Fix crash when importing a module whose ``PyInit`` function + raises an exception from a subinterpreter. + +- gh-144438: Align the QSBR thread state array to a 64-byte cache line + boundary to avoid false sharing in the :term:`free-threaded build`. + +- gh-144513: Fix potential deadlock when using critical sections during + stop-the-world pauses in the free-threaded build. + +- gh-144446: Fix data races in the free-threaded build when reading frame + object attributes while another thread is executing the frame. + +- gh-143636: Fix a crash when calling :class:`SimpleNamespace.__replace__() + ` on non-namespace instances. Patch by Bénédikt + Tran. + +- gh-143650: Fix race condition in :mod:`importlib` where a thread could + receive a stale module reference when another thread's import fails. + +- gh-141732: Ensure the :meth:`~object.__repr__` for :exc:`ExceptionGroup` + and :exc:`BaseExceptionGroup` does not change when the exception sequence + that was original passed in to its constructor is subsequently mutated. + +- gh-140594: Fix an out of bounds read when a single NUL character is read + from the standard input. Patch by Shamil Abdulaev. + +- gh-91636: While performing garbage collection, clear weakrefs to + unreachable objects that are created during running of finalizers. If + those weakrefs were are not cleared, they could reveal unreachable + objects. + +- gh-130327: Fix erroneous clearing of an object's :attr:`~object.__dict__` + if overwritten at runtime. + +- gh-80667: Literals using the ``\N{name}`` escape syntax can now construct + CJK ideographs and Hangul syllables using case-insensitive names. + +Library +------- + +- gh-144503: Fix a regression introduced in 3.14.3 and 3.13.12 where the + :mod:`multiprocessing` ``forkserver`` start method would fail with + :exc:`BrokenPipeError` when the parent process had a very large + :data:`sys.argv`. The argv is now passed to the forkserver as separate + command-line arguments rather than being embedded in the ``-c`` command + string, avoiding the operating system's per-argument length limit. + +- gh-146613: :mod:`itertools`: Fix a crash in :func:`itertools.groupby` when + the grouper iterator is concurrently mutated. + +- gh-146080: :mod:`ssl`: fix a crash when an SNI callback tries to use an + SSL object that has already been garbage-collected. Patch by Bénédikt + Tran. + +- gh-146556: Fix :func:`annotationlib.get_annotations` hanging indefinitely + when called with ``eval_str=True`` on a callable that has a circular + ``__wrapped__`` chain (e.g. ``f.__wrapped__ = f``). Cycle detection using + an id-based visited set now stops the traversal and falls back to the + globals found so far, mirroring the approach of :func:`inspect.unwrap`. + +- gh-146090: :mod:`sqlite3`: fix a crash when + :meth:`sqlite3.Connection.create_collation` fails with `SQLITE_BUSY + `__. Patch by Bénédikt Tran. + +- gh-146090: :mod:`sqlite3`: properly raise :exc:`MemoryError` instead of + :exc:`SystemError` when a context callback fails to be allocated. Patch by + Bénédikt Tran. + +- gh-145633: Fix ``struct.pack('f', float)``: use :c:func:`PyFloat_Pack4` to + raise :exc:`OverflowError`. Patch by Sergey B Kirpichev and Victor + Stinner. + +- gh-146310: The :mod:`ensurepip` module no longer looks for ``pip-*.whl`` + wheel packages in the current directory. + +- gh-146083: Update bundled `libexpat `_ to + version 2.7.5. + +- gh-146076: :mod:`zoneinfo`: fix crashes when deleting ``_weak_cache`` from + a :class:`zoneinfo.ZoneInfo` subclass. + +- gh-146054: Limit the size of :func:`encodings.search_function` cache. + Found by OSS Fuzz in :oss-fuzz:`493449985`. + +- gh-146004: All :option:`-X` options from the Python command line are now + propagated to child processes spawned by :mod:`multiprocessing`, not just + a hard-coded subset. This makes the behavior consistent between default + "spawn" and "forkserver" start methods and the old "fork" start method. + The options that were previously not propagated are: + ``context_aware_warnings``, ``cpu_count``, ``disable-remote-debug``, + ``int_max_str_digits``, ``lazy_imports``, ``no_debug_ranges``, + ``pathconfig_warnings``, ``perf``, ``perf_jit``, ``presite``, + ``pycache_prefix``, ``thread_inherit_context``, and + ``warn_default_encoding``. + +- gh-145883: :mod:`zoneinfo`: Fix heap buffer overflow reads from malformed + TZif data. Found by OSS Fuzz, issues :oss-fuzz:`492245058` and + :oss-fuzz:`492230068`. + +- gh-145754: Request signature during mock autospec with ``FORWARDREF`` + annotation format. This prevents runtime errors when an annotation uses a + name that is not defined at runtime. + +- gh-145750: Avoid undefined behaviour from signed integer overflow when + parsing format strings in the :mod:`struct` module. Found by OSS Fuzz in + :oss-fuzz:`488466741`. + +- gh-145492: Fix infinite recursion in :class:`collections.defaultdict` + ``__repr__`` when a ``defaultdict`` contains itself. Based on analysis by + KowalskiThomas in :gh:`145492`. + +- gh-145623: Fix crash in :mod:`struct` when calling :func:`repr` or + ``__sizeof__()`` on an uninitialized :class:`struct.Struct` object created + via ``Struct.__new__()`` without calling ``__init__()``. + +- gh-145616: Detect Android sysconfig ABI correctly on 32-bit ARM Android on + 64-bit ARM kernel + +- gh-145551: Fix InvalidStateError when cancelling process created by + :func:`asyncio.create_subprocess_exec` or + :func:`asyncio.create_subprocess_shell`. Patch by Daan De Meyer. + +- gh-145446: Now :mod:`functools` is safer in free-threaded build when using + keywords in :func:`functools.partial` + +- gh-145417: :mod:`venv`: Prevent incorrect preservation of SELinux context + when copying the ``Activate.ps1`` script. The script inherited the SELinux + security context of the system template directory, rather than the + destination project directory. + +- gh-145376: Fix double free and null pointer dereference in unusual error + scenarios in :mod:`hashlib` and :mod:`hmac` modules. + +- gh-145301: :mod:`hmac`: fix a crash when the initialization of the + underlying C extension module fails. + +- gh-145301: :mod:`hashlib`: fix a crash when the initialization of the + underlying C extension module fails. + +- gh-145264: Base64 decoder (see :func:`binascii.a2b_base64`, + :func:`base64.b64decode`, etc) no longer ignores excess data after the + first padded quad in non-strict (default) mode. Instead, in conformance + with :rfc:`4648`, section 3.3, it now ignores the pad character, "=", if + it is present before the end of the encoded data. + +- gh-145158: Avoid undefined behaviour from signed integer overflow when + parsing format strings in the :mod:`struct` module. + +- gh-144984: Fix crash in + :meth:`xml.parsers.expat.xmlparser.ExternalEntityParserCreate` when an + allocation fails. The error paths could dereference NULL ``handlers`` and + double-decrement the parent parser's reference count. + +- gh-88091: Fix :func:`unicodedata.decomposition` for Hangul characters. + +- gh-144986: Fix a memory leak in :func:`atexit.register`. Patch by Shamil + Abdulaev. + +- gh-144777: Fix data races in :class:`io.IncrementalNewlineDecoder` in the + :term:`free-threaded build`. + +- gh-144809: Make :class:`collections.deque` copy atomic in the + :term:`free-threaded build`. + +- gh-144835: Added missing explanations for some parameters in + :func:`glob.glob` and :func:`glob.iglob`. + +- gh-144833: Fixed a use-after-free in :mod:`ssl` when ``SSL_new()`` returns + NULL in ``newPySSLSocket()``. The error was reported via a dangling + pointer after the object had already been freed. + +- gh-144782: Fix :class:`argparse.ArgumentParser` to be :mod:`pickleable + `. + +- gh-144259: Fix inconsistent display of long multiline pasted content in + the REPL. + +- gh-144156: Fix the folding of headers by the :mod:`email` library when + :rfc:`2047` encoded words are used. Now whitespace is correctly preserved + and also correctly added between adjacent encoded words. The latter + property was broken by the fix for gh-92081, which mostly fixed previous + failures to preserve whitespace. + +- gh-66305: Fixed a hang on Windows in the :mod:`tempfile` module when + trying to create a temporary file or subdirectory in a non-writable + directory. + +- gh-140814: :func:`multiprocessing.freeze_support` no longer sets the + default start method as a side effect, which previously caused a + subsequent :func:`multiprocessing.set_start_method` call to raise + :exc:`RuntimeError`. + +- gh-144475: Calling :func:`repr` on :func:`functools.partial` is now safer + when the partial object's internal attributes are replaced while the + string representation is being generated. + +- gh-144538: Bump the version of pip bundled in ensurepip to version 26.0.1 + +- gh-144494: Fix performance regression in :func:`asyncio.all_tasks` on + :term:`free-threaded builds `. Patch by Kumar Aditya. + +- gh-144316: Fix crash in ``_remote_debugging`` that caused + ``test_external_inspection`` to intermittently fail. Patch by Taegyun Kim. + +- gh-144363: Update bundled `libexpat `_ to + 2.7.4 + +- gh-143637: Fixed a crash in socket.sendmsg() that could occur if ancillary + data is mutated re-entrantly during argument parsing. + +- gh-143543: Fix a crash in itertools.groupby that could occur when a + user-defined :meth:`~object.__eq__` method re-enters the iterator during + key comparison. + +- gh-140652: Fix a crash in :func:`!_interpchannels.list_all` after closing + a channel. + +- gh-143698: Allow *scheduler* and *setpgroup* arguments to be explicitly + :const:`None` when calling :func:`os.posix_spawn` or + :func:`os.posix_spawnp`. Patch by Bénédikt Tran. + +- gh-143698: Raise :exc:`TypeError` instead of :exc:`SystemError` when the + *scheduler* in :func:`os.posix_spawn` or :func:`os.posix_spawnp` is not a + tuple. Patch by Bénédikt Tran. + +- gh-142516: :mod:`ssl`: fix reference leaks in :class:`ssl.SSLContext` + objects. Patch by Bénédikt Tran. + +- gh-143304: Fix :class:`ctypes.CDLL` to honor the ``handle`` parameter on + POSIX systems. + +- gh-142781: :mod:`zoneinfo`: fix a crash when instantiating + :class:`~zoneinfo.ZoneInfo` objects for which the internal class-level + cache is inconsistent. + +- gh-142763: Fix a race condition between :class:`zoneinfo.ZoneInfo` + creation and :func:`zoneinfo.ZoneInfo.clear_cache` that could raise + :exc:`KeyError`. + +- gh-142787: Fix assertion failure in :mod:`sqlite3` blob subscript when + slicing with indices that result in an empty slice. + +- gh-142352: Fix :meth:`asyncio.StreamWriter.start_tls` to transfer buffered + data from :class:`~asyncio.StreamReader` to the SSL layer, preventing data + loss when upgrading a connection to TLS mid-stream (e.g., when + implementing PROXY protocol support). + +- gh-141707: Don't change :class:`tarfile.TarInfo` type from ``AREGTYPE`` to + ``DIRTYPE`` when parsing GNU long name or link headers. + +- gh-139933: Improve :exc:`AttributeError` suggestions for classes with a + custom :meth:`~object.__dir__` method returning a list of unsortable + values. Patch by Bénédikt Tran. + +- gh-137335: Get rid of any possibility of a name conflict for named pipes + in :mod:`multiprocessing` and :mod:`asyncio` on Windows, no matter how + small. + +- gh-80667: Support lookup for Tangut Ideographs in :mod:`unicodedata`. + +- bpo-40243: Fix :meth:`!unicodedata.ucd_3_2_0.numeric` for non-decimal + values. + +Documentation +------------- + +- gh-126676: Expand :mod:`argparse` documentation for ``type=bool`` with a + demonstration of the surprising behavior and pointers to common + alternatives. + +- gh-145649: Fix text wrapping and formatting of ``-X`` option descriptions + in the :manpage:`python(1)` man page by using proper roff markup. + +- gh-145450: Document missing public :class:`wave.Wave_write` getter + methods. + +- gh-136246: A new "Improve this page" link is available in the left-hand + sidebar of the docs, offering links to create GitHub issues, discussion + forum posts, or pull requests. + +Tests +----- + +- gh-144418: The Android testbed's emulator RAM has been increased from 2 GB + to 4 GB. + +- gh-146202: Fix a race condition in regrtest: make sure that the temporary + directory is created in the worker process. Previously, temp_cwd() could + fail on Windows if the "build" directory was not created. Patch by Victor + Stinner. + +- gh-144739: When Python was compiled with system expat older then 2.7.2 but + tests run with newer expat, still skip + :class:`!test.test_pyexpat.MemoryProtectionTest`. + +Build +----- + +- gh-146541: The Android testbed can now be built for 32-bit ARM and x86 + targets. + +- gh-146498: The iOS XCframework build script now ensures libpython isn't + included in installed app content, and is more robust in identifying + standard library binary content that requires processing. + +- gh-146450: The Android build script was modified to improve parity with + other platform build scripts. + +- gh-146446: The clean target for the Apple/iOS XCframework build script is + now more selective when targeting a single architecture. + +- gh-145801: When Python build is optimized with GCC using PGO, use + ``-fprofile-update=atomic`` option to use atomic operations when updating + profile information. This option reduces the risk of gcov Data Files + (.gcda) corruption which can cause random GCC crashes. Patch by Victor + Stinner. + +Windows +------- + +- gh-145307: Defers loading of the ``psapi.dll`` module until it is used by + :func:`ctypes.util.dllist`. + +- gh-144551: Updated bundled version of OpenSSL to 3.0.19. + +- gh-140131: Fix REPL cursor position on Windows when module completion + suggestion line hits console width. + +macOS +----- + +- gh-144551: Update macOS installer to use OpenSSL 3.0.19. + +- gh-137586: Invoke :program:`osascript` with absolute path in + :mod:`webbrowser` and :mod:`!turtledemo`. + +C API +----- + +- gh-146056: :c:func:`PyUnicodeWriter_WriteRepr` now supports ``NULL`` + argument. + +- gh-145010: Use GCC dialect alternatives for inline assembly in + ``object.h`` so that the Python headers compile correctly with + ``-masm=intel``. + +- gh-144981: Made :c:func:`PyUnstable_Code_SetExtra`, + :c:func:`PyUnstable_Code_GetExtra`, and + :c:func:`PyUnstable_Eval_RequestCodeExtraIndex` thread-safe on the + :term:`free threaded ` build. + + +What's New in Python 3.14.3 final? +================================== + +*Release date: 2026-02-03* + +Windows +------- + +- gh-128067: Fix a bug in PyREPL on Windows where output without a trailing + newline was overwritten by the next prompt. + +Tools/Demos +----------- + +- gh-142095: Make gdb 'py-bt' command use frame from thread local state when + available. Patch by Sam Gross and Victor Stinner. + +Tests +----- + +- gh-144415: The Android testbed now distinguishes between stdout/stderr + messages which were triggered by a newline, and those triggered by a + manual call to ``flush``. This fixes logging of progress indicators and + similar content. + +- gh-143460: Skip tests relying on infinite recusion if stack size is + unlimited. + +- gh-65784: Add support for parametrized resource ``wantobjects`` in + regrtests, which allows to run Tkinter tests with the specified value of + :data:`!tkinter.wantobjects`, for example ``-u wantobjects=0``. + +- gh-143553: Add support for parametrized resources, such as ``-u + xpickle=2.7``. + +- gh-142836: Accommodated Solaris in + ``test_pdb.test_script_target_anonymous_pipe``. + +- bpo-31391: Forward-port test_xpickle from Python 2 to Python 3 and add the + resource back to test's command line. + +Security +-------- + +- gh-144125: :mod:`~email.generator.BytesGenerator` will now refuse to + serialize (write) headers that are unsafely folded or delimited; see + :attr:`~email.policy.Policy.verify_generated_headers`. (Contributed by Bas + Bloemsaat and Petr Viktorin in :gh:`121650`). + +- gh-143935: Fixed a bug in the folding of comments when flattening an email + message using a modern email policy. Comments consisting of a very long + sequence of non-foldable characters could trigger a forced line wrap that + omitted the required leading space on the continuation line, causing the + remainder of the comment to be interpreted as a new header field. This + enabled header injection with carefully crafted inputs. + +- gh-143925: Reject control characters in ``data:`` URL media types. + +- gh-143919: Reject control characters in :class:`http.cookies.Morsel` + fields and values. + +- gh-143916: Reject C0 control characters within wsgiref.headers.Headers + fields, values, and parameters. + +Library +------- + +- gh-144380: Improve performance of :class:`io.BufferedReader` line + iteration by ~49%. + +- gh-144169: Fix three crashes when non-string keyword arguments are + supplied to objects in the :mod:`ast` module. + +- gh-144100: Fixed a crash in ctypes when using a deprecated + ``POINTER(str)`` type in ``argtypes``. Instead of aborting, ctypes now + raises a proper Python exception when the pointer target type is + unresolved. + +- gh-144050: Fix :func:`stat.filemode` in the pure-Python implementation to + avoid misclassifying invalid mode values as block devices. + +- gh-144023: Fixed validation of file descriptor 0 in posix functions when + used with follow_symlinks parameter. + +- gh-143999: Fix an issue where :func:`inspect.getgeneratorstate` and + :func:`inspect.getcoroutinestate` could fail for generators wrapped by + :func:`types.coroutine` in the suspended state. + +- gh-143831: :class:`annotationlib.ForwardRef` objects are now hashable when + created from annotation scopes with closures. Previously, hashing such + objects would throw an exception. Patch by Bartosz Sławecki. + +- gh-143874: Fixed a bug in :mod:`pdb` where expression results were not + sent back to remote client. + +- gh-143880: Fix data race in :func:`functools.partial` in the :term:`free + threading` build. + +- gh-143706: Fix :mod:`multiprocessing` forkserver so that :data:`sys.argv` + is correctly set before ``__main__`` is preloaded. Previously, + :data:`sys.argv` was empty during main module import in forkserver child + processes. This fixes a regression introduced in 3.13.8 and 3.14.1. Root + caused by Aaron Wieczorek, test provided by Thomas Watson, thanks! + +- gh-143638: Forbid reentrant calls of the :class:`pickle.Pickler` and + :class:`pickle.Unpickler` methods for the C implementation. Previously, + this could cause crash or data corruption, now concurrent calls of methods + of the same object raise :exc:`RuntimeError`. + +- gh-78724: Raise :exc:`RuntimeError`'s when user attempts to call methods + on half-initialized :class:`~struct.Struct` objects, For example, created + by ``Struct.__new__(Struct)``. Patch by Sergey B Kirpichev. + +- gh-143196: Fix crash when the internal encoder object returned by + undocumented function :func:`!json.encoder.c_make_encoder` was called with + non-zero second (*_current_indent_level*) argument. + +- gh-143191: :func:`_thread.stack_size` now raises :exc:`ValueError` if the + stack size is too small. Patch by Victor Stinner. + +- gh-143602: Fix a inconsistency issue in :meth:`~io.RawIOBase.write` that + leads to unexpected buffer overwrite by deduplicating the buffer exports. + +- gh-143547: Fix :func:`sys.unraisablehook` when the hook raises an + exception and changes :func:`sys.unraisablehook`: hold a strong reference + to the old hook. Patch by Victor Stinner. + +- gh-143517: :func:`annotationlib.get_annotations` no longer raises a + :exc:`SyntaxError` when evaluating a stringified starred annotation that + starts with one or more whitespace characters followed by a ``*``. Patch + by Bartosz Sławecki. + +- gh-143378: Fix use-after-free crashes when a :class:`~io.BytesIO` object + is concurrently mutated during :meth:`~io.RawIOBase.write` or + :meth:`~io.IOBase.writelines`. + +- gh-143346: Fix incorrect wrapping of the Base64 data in + :class:`!plistlib._PlistWriter` when the indent contains a mix of tabs and + spaces. + +- gh-143310: :mod:`tkinter`: fix a crash when a Python :class:`list` is + mutated during the conversion to a Tcl object (e.g., when setting a Tcl + variable). Patch by Bénédikt Tran. + +- gh-143309: Fix a crash in :func:`os.execve` on non-Windows platforms when + given a custom environment mapping which is then mutated during parsing. + Patch by Bénédikt Tran. + +- gh-143308: :mod:`pickle`: fix use-after-free crashes when a + :class:`~pickle.PickleBuffer` is concurrently mutated by a custom buffer + callback during pickling. Patch by Bénédikt Tran and Aaron Wieczorek. + +- gh-143237: Fix support of named pipes in the rotating :mod:`logging` + handlers. + +- gh-143249: Fix possible buffer leaks in Windows overlapped I/O on error + handling. + +- gh-143241: :mod:`zoneinfo`: fix infinite loop in :meth:`ZoneInfo.from_file + ` when parsing a malformed TZif file. Patch + by Fatih Celik. + +- gh-142830: :mod:`sqlite3`: fix use-after-free crashes when the + connection's callbacks are mutated during a callback execution. Patch by + Bénédikt Tran. + +- gh-143200: :mod:`xml.etree.ElementTree`: fix use-after-free crashes in + :meth:`~object.__getitem__` and :meth:`~object.__setitem__` methods of + :class:`~xml.etree.ElementTree.Element` when the element is concurrently + mutated. Patch by Bénédikt Tran. + +- gh-142195: Updated timeout evaluation logic in :mod:`subprocess` to be + compatible with deterministic environments like Shadow where time moves + exactly as requested. + +- gh-142164: Fix the ctypes bitfield overflow error message to report the + correct offset and size calculation. + +- gh-143145: Fixed a possible reference leak in ctypes when constructing + results with multiple output parameters on error. + +- gh-122431: Corrected the error message in + :func:`readline.append_history_file` to state that ``nelements`` must be + non-negative instead of positive. + +- gh-143004: Fix a potential use-after-free in + :meth:`collections.Counter.update` when user code mutates the Counter + during an update. + +- gh-143046: The :mod:`asyncio` REPL no longer prints copyright and version + messages in the quiet mode (:option:`-q`). Patch by Bartosz Sławecki. + +- gh-140648: The :mod:`asyncio` REPL now respects the :option:`-I` flag + (isolated mode). Previously, it would load and execute + :envvar:`PYTHONSTARTUP` even if the flag was set. Contributed by Bartosz + Sławecki. + +- gh-142991: Fixed socket operations such as recvfrom() and sendto() for + FreeBSD divert(4) socket. + +- gh-143010: Fixed a bug in :mod:`mailbox` where the precise timing of an + external event could result in the library opening an existing file + instead of a file it expected to create. + +- gh-142881: Fix concurrent and reentrant call of :func:`atexit.unregister`. + +- gh-112127: Fix possible use-after-free in :func:`atexit.unregister` when + the callback is unregistered during comparison. + +- gh-142783: Fix zoneinfo use-after-free with descriptor _weak_cache. a + descriptor as _weak_cache could cause crashes during object creation. The + fix ensures proper reference counting for descriptor-provided objects. + +- gh-142754: Add the *ownerDocument* attribute to :mod:`xml.dom.minidom` + elements and attributes created by directly instantiating the ``Element`` + or ``Attr`` class. Note that this way of creating nodes is not supported; + creator functions like :py:meth:`xml.dom.Document.documentElement` should + be used instead. + +- gh-142784: The :mod:`asyncio` REPL now properly closes the loop upon the + end of interactive session. Previously, it could cause surprising + warnings. Contributed by Bartosz Sławecki. + +- gh-142555: :mod:`array`: fix a crash in ``a[i] = v`` when converting *i* + to an index via :meth:`i.__index__ ` or + :meth:`i.__float__ ` mutates the array. + +- gh-142594: Fix crash in ``TextIOWrapper.close()`` when the underlying + buffer's ``closed`` property calls :meth:`~io.TextIOBase.detach`. + +- gh-142451: :mod:`hmac`: Ensure that the :attr:`HMAC.block_size + ` attribute is correctly copied by :meth:`HMAC.copy + `. Patch by Bénédikt Tran. + +- gh-142495: :class:`collections.defaultdict` now prioritizes + :meth:`~object.__setitem__` when inserting default values from + ``default_factory``. This prevents race conditions where a default value + would overwrite a value set before ``default_factory`` returns. + +- gh-142651: :mod:`unittest.mock`: fix a thread safety issue where + :attr:`Mock.call_count ` may return + inaccurate values when the mock is called concurrently from multiple + threads. + +- gh-142595: Added type check during initialization of the :mod:`decimal` + module to prevent a crash in case of broken stdlib. Patch by Sergey B + Kirpichev. + +- gh-142556: Fix crash when a task gets re-registered during finalization in + :mod:`asyncio`. Patch by Kumar Aditya. + +- gh-123241: Avoid reference count operations in garbage collection of + :mod:`ctypes` objects. + +- gh-142517: The non-``compat32`` :mod:`email` policies now correctly handle + refolding encoded words that contain bytes that can not be decoded in + their specified character set. Previously this resulted in an encoding + exception during folding. + +- gh-112527: The help text for required options in :mod:`argparse` no longer + extended with " (default: None)". + +- gh-142346: Fix usage formatting for mutually exclusive groups in + :mod:`argparse` when they are preceded by positional arguments or followed + or intermixed with other optional arguments. + +- gh-142315: Pdb can now run scripts from anonymous pipes used in process + substitution. Patch by Bartosz Sławecki. + +- gh-142332: Fix usage formatting for positional arguments in mutually + exclusive groups in :mod:`argparse`. in :mod:`argparse`. + +- gh-142282: Fix :func:`winreg.QueryValueEx` to not accidentally read + garbage buffer under race condition. + +- gh-75949: Fix :mod:`argparse` to preserve ``|`` separators in mutually + exclusive groups when the usage line wraps due to length. + +- gh-142267: Improve :mod:`argparse` performance by caching the formatter + used for argument validation. + +- gh-68552: ``MisplacedEnvelopeHeaderDefect`` and ``Missing header name`` + defects are now correctly passed to the ``handle_defect`` method of + ``policy`` in :class:`~email.parser.FeedParser`. + +- gh-142006: Fix a bug in the :mod:`email.policy.default` folding algorithm + which incorrectly resulted in a doubled newline when a line ending at + exactly max_line_length was followed by an unfoldable token. + +- gh-105836: Fix :meth:`asyncio.run_coroutine_threadsafe` leaving underlying + cancelled asyncio task running. + +- gh-139971: :mod:`pydoc`: Ensure that the link to the online documentation + of a :term:`stdlib` module is correct. + +- gh-139262: Some keystrokes can be swallowed in the new ``PyREPL`` on + Windows, especially when used together with the ALT key. Fix by Chris + Eibl. + +- gh-138897: Improved :data:`license`/:data:`copyright`/:data:`credits` + display in the :term:`REPL`: now uses a pager. + +- gh-79986: Add parsing for ``References`` and ``In-Reply-To`` headers to + the :mod:`email` library that parses the header content as lists of + message id tokens. This prevents them from being folded incorrectly. + +- gh-136282: Add support for :const:`~configparser.UNNAMED_SECTION` when + creating a section via the mapping protocol access + +- gh-109263: Starting a process from spawn context in :mod:`multiprocessing` + no longer sets the start method globally. + +- gh-133253: Fix thread-safety issues in :mod:`linecache`. + +- gh-132715: Skip writing objects during marshalling once a failure has + occurred. + +IDLE +---- + +- gh-143774: Better explain the operation of Format / Format Paragraph. + +Documentation +------------- + +- gh-140806: Add documentation for :func:`enum.bin`. + +Core and Builtins +----------------- + +- gh-144307: Prevent a reference leak in module teardown at interpreter + finalization. + +- gh-144194: Fix error handling in perf jitdump initialization on memory + allocation failure. + +- gh-144012: Check if the result is ``NULL`` in ``BINARY_OP_EXTENT`` opcode. + +- gh-141805: Fix crash in :class:`set` when objects with the same hash are + concurrently added to the set after removing an element with the same hash + while the set still contains elements with the same hash. + +- gh-143670: Fixes a crash in ``ga_repr_items_list`` function. + +- gh-143377: Fix a crash in :func:`!_interpreters.capture_exception` when + the exception is incorrectly formatted. Patch by Bénédikt Tran. + +- gh-136924: The interactive help mode in the :term:`REPL` no longer + incorrectly syntax highlights text input as Python code. Contributed by + Olga Matoula. + +- gh-143189: Fix crash when inserting a non-:class:`str` key into a split + table dictionary when the key matches an existing key in the split table + but has no corresponding value in the dict. + +- gh-143228: Fix use-after-free in perf trampoline when toggling profiling + while threads are running or during interpreter finalization with daemon + threads active. The fix uses reference counting to ensure trampolines are + not freed while any code object could still reference them. Pach by Pablo + Galindo + +- gh-142664: Fix a use-after-free crash in :meth:`memoryview.__hash__ + ` when the ``__hash__`` method of the referenced object + mutates that object or the view. Patch by Bénédikt Tran. + +- gh-142557: Fix a use-after-free crash in :ref:`bytearray.__mod__ + ` when the :class:`!bytearray` is mutated while + formatting the ``%``-style arguments. Patch by Bénédikt Tran. + +- gh-143195: Fix use-after-free crashes in :meth:`bytearray.hex` and + :meth:`memoryview.hex` when the separator's :meth:`~object.__len__` + mutates the original object. Patch by Bénédikt Tran. + +- gh-142975: Fix crash after unfreezing all objects tracked by the garbage + collector on the :term:`free threaded ` build. + +- gh-143135: Set :data:`sys.flags.inspect` to ``1`` when + :envvar:`PYTHONINSPECT` is ``0``. Previously, it was set to ``0`` in this + case. + +- gh-143003: Fix an overflow of the shared empty buffer in + :meth:`bytearray.extend` when ``__length_hint__()`` returns 0 for + non-empty iterator. + +- gh-143006: Fix a possible assertion error when comparing negative + non-integer ``float`` and ``int`` with the same number of bits in the + integer part. + +- gh-143057: Avoid locking in :c:func:`PyTraceMalloc_Track` and + :c:func:`PyTraceMalloc_Untrack` when :mod:`tracemalloc` is not enabled. + +- gh-142776: Fix a file descriptor leak in import.c + +- gh-142829: Fix a use-after-free crash in :class:`contextvars.Context` + comparison when a custom ``__eq__`` method modifies the context via + :meth:`~contextvars.ContextVar.set`. + +- gh-142766: Clear the frame of a generator when :meth:`generator.close` is + called. + +- gh-142737: Tracebacks will be displayed in fallback mode even if + :func:`io.open` is lost. Previously, this would crash the interpreter. + Patch by Bartosz Sławecki. + +- gh-142554: Fix a crash in :func:`divmod` when :func:`!_pylong.int_divmod` + does not return a tuple of length two exactly. Patch by Bénédikt Tran. + +- gh-142560: Fix use-after-free in :class:`bytearray` search-like methods + (:meth:`~bytearray.find`, :meth:`~bytearray.count`, + :meth:`~bytearray.index`, :meth:`~bytearray.rindex`, and + :meth:`~bytearray.rfind`) by marking the storage as exported which causes + reallocation attempts to raise :exc:`BufferError`. For + :func:`~operator.contains`, :meth:`~bytearray.split`, and + :meth:`~bytearray.rsplit` the :ref:`buffer protocol ` is + used for this. + +- gh-142531: Fix a free-threaded GC performance regression. If there are + many untracked tuples, the GC will run too often, resulting in poor + performance. The fix is to include untracked tuples in the "long lived" + object count. The number of frozen objects is also now included since the + free-threaded GC must scan those too. + +- gh-142402: Fix reference counting when adjacent literal parts are merged + while constructing :class:`string.templatelib.Template`, preventing the + displaced string object from leaking. + +- gh-133932: Fix crash in the free threading build when clearing frames that + hold tagged integers. + +- gh-142343: Fix SIGILL crash on m68k due to incorrect assembly constraint. + +- gh-100964: Fix reference cycle in exhausted generator frames. Patch by + Savannah Ostrowski. + +- gh-69605: Fix edge-cases around already imported modules in the + :term:`REPL` auto-completion of imports. + +- gh-138568: Adjusted the built-in :func:`help` function so that empty + inputs are ignored in interactive mode. + +- gh-137007: Fix a bug during JIT compilation failure which caused garbage + collection debug assertions to fail. + +C API +----- + +- gh-142589: Fix :c:func:`PyUnstable_Object_IsUniqueReferencedTemporary()` + handling of tagged ints on the interpreter stack. + +- gh-142571: :c:func:`!PyUnstable_CopyPerfMapFile` now checks that opening + the file succeeded before flushing. + +Build +----- + +- gh-142454: When calculating the digest of the JIT stencils input, sort the + hashed files by filenames before adding their content to the hasher. This + ensures deterministic hash input and hence deterministic hash, independent + on filesystem order. + +- gh-141808: When running ``make clean-retain-profile``, keep the generated + JIT stencils. That way, the stencils are not generated twice when + Profile-guided optimization (PGO) is used. It also allows distributors to + supply their own pre-built JIT stencils. + +- gh-138061: Ensure reproducible builds by making JIT stencil header + generation deterministic. + + +What's New in Python 3.14.2 final? +================================== + +*Release date: 2025-12-05* + +Security +-------- + +- gh-142145: Remove quadratic behavior in ``xml.minidom`` node ID cache + clearing. + +- gh-119452: Fix a potential memory denial of service in the + :mod:`http.server` module. When a malicious user is connected to the CGI + server on Windows, it could cause an arbitrary amount of memory to be + allocated. This could have led to symptoms including a :exc:`MemoryError`, + swapping, out of memory (OOM) killed processes or containers, or even + system crashes. + +Library +------- + +- gh-140797: Revert changes to the undocumented :class:`!re.Scanner` class. + Capturing groups are still allowed for backward compatibility, although + using them can lead to incorrect result. They will be forbidden in future + Python versions. + +- gh-142206: The resource tracker in the :mod:`multiprocessing` module now + uses the original communication protocol, as in Python 3.14.0 and below, + by default. This avoids issues with upgrading Python while it is running. + (Note that such 'in-place' upgrades are not tested.) The tracker remains + compatible with subprocesses that use new protocol (that is, subprocesses + using Python 3.13.10, 3.14.1 and 3.15). + +- gh-142214: Fix two regressions in :mod:`dataclasses` in Python 3.14.1 + related to annotations. + + * An exception is no longer raised if ``slots=True`` is used and the + ``__init__`` method does not have an ``__annotate__`` attribute + (likely because ``init=False`` was used). + + * An exception is no longer raised if annotations are requested on the + ``__init__`` method and one of the fields is not present in the class + annotations. This can occur in certain dynamic scenarios. + + Patch by Jelle Zijlstra. + +Core and Builtins +----------------- + +- gh-142218: Fix crash when inserting into a split table dictionary with a + non :class:`str` key that matches an existing key. + +Library +------- + +- gh-116738: Fix :mod:`cmath` data race when initializing trigonometric + tables with subinterpreters. + + +What's New in Python 3.14.1 final? +================================== + +*Release date: 2025-12-02* + +Windows +------- + +- gh-139810: Installing with ``py install 3[.x]-dev`` will now select final + versions as well as prereleases. + +Tools/Demos +----------- + +- gh-141692: Each slice of an iOS XCframework now contains a ``lib`` folder + that contains a symlink to the libpython dylib. This allows binary modules + to be compiled for iOS using dynamic libreary linking, rather than + Framework linking. + +- gh-141442: The iOS testbed now correctly handles test arguments that + contain spaces. + +- gh-140702: The iOS testbed app will now expose the ``GITHUB_ACTIONS`` + environment variable to iOS apps being tested. + +- gh-137484: Have ``Tools/wasm/wasi`` put the build Python into a directory + named after the build triple instead of "build". + +- gh-137248: Add a ``--logdir`` option to ``Tools/wasm/wasi`` for specifying + where to write log files. + +- gh-137243: Have Tools/wasm/wasi detect a WASI SDK install in /opt when it + was directly extracted from a release tarball. + +Tests +----- + +- gh-140482: Preserve and restore the state of ``stty echo`` as part of the + test environment. + +- gh-140082: Update ``python -m test`` to set ``FORCE_COLOR=1`` when being + run with color enabled so that :mod:`unittest` which is run by it with + redirected output will output in color. + +- gh-139208: Fix regrtest ``--fast-ci --verbose``: don't ignore the + ``--verbose`` option anymore. Patch by Victor Stinner. + +- gh-136442: Use exitcode ``1`` instead of ``5`` if + :func:`unittest.TestCase.setUpClass` raises an exception + +Security +-------- + +- gh-139700: Check consistency of the zip64 end of central directory record. + Support records with "zip64 extensible data" if there are no bytes + prepended to the ZIP file. + +- gh-139283: :mod:`sqlite3`: correctly handle maximum number of rows to + fetch in :meth:`Cursor.fetchmany ` and reject + negative values for :attr:`Cursor.arraysize `. + Patch by Bénédikt Tran. + +- gh-137836: Add support of the "plaintext" element, RAWTEXT elements "xmp", + "iframe", "noembed" and "noframes", and optionally RAWTEXT element + "noscript" in :class:`html.parser.HTMLParser`. + +- gh-136063: :mod:`email.message`: ensure linear complexity for legacy HTTP + parameters parsing. Patch by Bénédikt Tran. + +- gh-136065: Fix quadratic complexity in :func:`os.path.expandvars`. + +- gh-119451: Fix a potential memory denial of service in the + :mod:`http.client` module. When connecting to a malicious server, it could + cause an arbitrary amount of memory to be allocated. This could have led + to symptoms including a :exc:`MemoryError`, swapping, out of memory (OOM) + killed processes or containers, or even system crashes. + +- gh-119342: Fix a potential memory denial of service in the :mod:`plistlib` + module. When reading a Plist file received from untrusted source, it could + cause an arbitrary amount of memory to be allocated. This could have led + to symptoms including a :exc:`MemoryError`, swapping, out of memory (OOM) + killed processes or containers, or even system crashes. + +Library +------- + +- gh-74389: When the stdin being used by a :class:`subprocess.Popen` + instance is closed, this is now ignored in + :meth:`subprocess.Popen.communicate` instead of leaving the class in an + inconsistent state. + +- gh-87512: Fix :func:`subprocess.Popen.communicate` timeout handling on + Windows when writing large input. Previously, the timeout was ignored + during stdin writing, causing the method to block indefinitely if the + child process did not consume input quickly. The stdin write is now + performed in a background thread, allowing the timeout to be properly + enforced. + +- gh-141473: When :meth:`subprocess.Popen.communicate` was called with + *input* and a *timeout* and is called for a second time after a + :exc:`~subprocess.TimeoutExpired` exception before the process has died, + it should no longer hang. + +- gh-59000: Fix :mod:`pdb` breakpoint resolution for class methods when the + module defining the class is not imported. + +- gh-141570: Support :term:`file-like object` raising :exc:`OSError` from + :meth:`~io.IOBase.fileno` in color detection + (``_colorize.can_colorize()``). This can occur when ``sys.stdout`` is + redirected. + +- gh-141659: Fix bad file descriptor errors from ``_posixsubprocess`` on + AIX. + +- gh-141600: Fix musl version detection on Void Linux. + +- gh-141497: :mod:`ipaddress`: ensure that the methods + :meth:`IPv4Network.hosts() ` and + :meth:`IPv6Network.hosts() ` always return an + iterator. + +- gh-140938: The :func:`statistics.stdev` and :func:`statistics.pstdev` + functions now raise a :exc:`ValueError` when the input contains an + infinity or a NaN. + +- gh-124111: Updated Tcl threading configuration in :mod:`_tkinter` to + assume that threads are always available in Tcl 9 and later. + +- gh-137109: The :mod:`os.fork` and related forking APIs will no longer warn + in the common case where Linux or macOS platform APIs return the number of + threads in a process and find the answer to be 1 even when a + :func:`os.register_at_fork` ``after_in_parent=`` callback (re)starts a + thread. + +- gh-141314: Fix assertion failure in :meth:`io.TextIOWrapper.tell` when + reading files with standalone carriage return (``\r``) line endings. + +- gh-141311: Fix assertion failure in :func:`!io.BytesIO.readinto` and + undefined behavior arising when read position is above capcity in + :class:`io.BytesIO`. + +- gh-141141: Fix a thread safety issue with :func:`base64.b85decode`. + Contributed by Benel Tayar. + +- gh-137969: Fix :meth:`annotationlib.ForwardRef.evaluate` returning + :class:`~annotationlib.ForwardRef` objects which don't update with new + globals. + +- gh-140911: :mod:`collections`: Ensure that the methods + ``UserString.rindex()`` and ``UserString.index()`` accept + :class:`collections.UserString` instances as the sub argument. + +- gh-140797: The undocumented :class:`!re.Scanner` class now forbids regular + expressions containing capturing groups in its lexicon patterns. Patterns + using capturing groups could previously lead to crashes with segmentation + fault. Use non-capturing groups (?:...) instead. + +- gh-125115: Refactor the :mod:`pdb` parsing issue so positional arguments + can pass through intuitively. + +- gh-140815: :mod:`faulthandler` now detects if a frame or a code object is + invalid or freed. Patch by Victor Stinner. + +- gh-100218: Correctly set :attr:`~OSError.errno` when + :func:`socket.if_nametoindex` or :func:`socket.if_indextoname` raise an + :exc:`OSError`. Patch by Bénédikt Tran. + +- gh-140875: Fix handling of unclosed character references (named and + numerical) followed by the end of file in :class:`html.parser.HTMLParser` + with ``convert_charrefs=False``. + +- gh-140734: :mod:`multiprocessing`: fix off-by-one error when checking the + length of a temporary socket file path. Patch by Bénédikt Tran. + +- gh-140874: Bump the version of pip bundled in ensurepip to version 25.3 + +- gh-140691: In :mod:`urllib.request`, when opening a FTP URL fails because + a data connection cannot be made, the control connection's socket is now + closed to avoid a :exc:`ResourceWarning`. + +- gh-103847: Fix hang when cancelling process created by + :func:`asyncio.create_subprocess_exec` or + :func:`asyncio.create_subprocess_shell`. Patch by Kumar Aditya. + +- gh-120057: Add :func:`os.reload_environ` to ``os.__all__``. + +- gh-140228: Avoid making unnecessary filesystem calls for frozen modules in + :mod:`linecache` when the global module cache is not present. + +- gh-140590: Fix arguments checking for the + :meth:`!functools.partial.__setstate__` that may lead to internal state + corruption and crash. Patch by Sergey Miryanov. + +- gh-125434: Display thread name in :mod:`faulthandler` on Windows. Patch by + Victor Stinner. + +- gh-140634: Fix a reference counting bug in + :meth:`!os.sched_param.__reduce__`. + +- gh-140633: Ignore :exc:`AttributeError` when setting a module's + ``__file__`` attribute when loading an extension module packaged as Apple + Framework. + +- gh-140593: :mod:`xml.parsers.expat`: Fix a memory leak that could affect + users with :meth:`~xml.parsers.expat.xmlparser.ElementDeclHandler` set to + a custom element declaration handler. Patch by Sebastian Pipping. + +- gh-140607: Inside :meth:`io.RawIOBase.read`, validate that the count of + bytes returned by :meth:`io.RawIOBase.readinto` is valid (inside the + provided buffer). + +- gh-138162: Fix :class:`logging.LoggerAdapter` with ``merge_extra=True`` + and without the *extra* argument. + +- gh-138774: :func:`ast.unparse` now generates full source code when + handling :class:`ast.Interpolation` nodes that do not have a specified + source. + +- gh-140474: Fix memory leak in :class:`array.array` when creating arrays + from an empty :class:`str` and the ``u`` type code. + +- gh-137530: :mod:`dataclasses` Fix annotations for generated ``__init__`` + methods by replacing the annotations that were in-line in the generated + source code with ``__annotate__`` functions attached to the methods. + +- gh-140348: Fix regression in Python 3.14.0 where using the ``|`` operator + on a :class:`typing.Union` object combined with an object that is not a + type would raise an error. + +- gh-140272: Fix memory leak in the :meth:`!clear` method of the + :mod:`dbm.gnu` database. + +- gh-140041: Fix import of :mod:`ctypes` on Android and Cygwin when ABI + flags are present. + +- gh-140120: Fixed a memory leak in :mod:`hmac` when it was using the + hacl-star backend. Discovered by ``@ashm-dev`` using AddressSanitizer. + +- gh-139905: Add suggestion to error message for :class:`typing.Generic` + subclasses when ``cls.__parameters__`` is missing due to a parent class + failing to call :meth:`super().__init_subclass__() + ` in its ``__init_subclass__``. + +- gh-139894: Fix incorrect sharing of current task with the child process + while forking in :mod:`asyncio`. Patch by Kumar Aditya. + +- gh-139845: Fix to not print KeyboardInterrupt twice in default asyncio + REPL. + +- gh-139783: Fix :func:`inspect.getsourcelines` for the case when a + decorator is followed by a comment or an empty line. + +- gh-139809: Prevent premature colorization of subparser ``prog`` in + :meth:`argparse.ArgumentParser.add_subparsers` to respect color + environment variable changes after parser creation. + +- gh-139736: Fix excessive indentation in the default :mod:`argparse` + :class:`!HelpFormatter`. Patch by Alexander Edland. + +- gh-70765: :mod:`http.server`: fix default handling of HTTP/0.9 requests in + :class:`~http.server.BaseHTTPRequestHandler`. Previously, + :meth:`!BaseHTTPRequestHandler.parse_request` incorrectly waited for + headers in the request although those are not supported in HTTP/0.9. Patch + by Bénédikt Tran. + +- gh-63161: Fix :func:`tokenize.detect_encoding`. Support non-UTF-8 shebang + and comments if non-UTF-8 encoding is specified. Detect decoding error for + non-UTF-8 encoding. Detect null bytes in source code. + +- gh-139391: Fix an issue when, on non-Windows platforms, it was not + possible to gracefully exit a ``python -m asyncio`` process suspended by + Ctrl+Z and later resumed by :manpage:`fg` other than with :manpage:`kill`. + +- gh-101828: Fix ``'shift_jisx0213'``, ``'shift_jis_2004'``, + ``'euc_jisx0213'`` and ``'euc_jis_2004'`` codecs truncating null chars as + they were treated as part of multi-character sequences. + +- gh-139289: Do a real lazy-import on :mod:`rlcompleter` in :mod:`pdb` and + restore the existing completer after importing :mod:`rlcompleter`. + +- gh-139246: fix: paste zero-width in default repl width is wrong. + +- gh-90949: Add + :meth:`~xml.parsers.expat.xmlparser.SetAllocTrackerActivationThreshold` + and + :meth:`~xml.parsers.expat.xmlparser.SetAllocTrackerMaximumAmplification` + to :ref:`xmlparser ` objects to prevent use of + disproportional amounts of dynamic memory from within an Expat parser. + Patch by Bénédikt Tran. + +- gh-139210: Fix use-after-free when reporting unknown event in + :func:`xml.etree.ElementTree.iterparse`. Patch by Ken Jin. + +- gh-138860: Lazy import :mod:`rlcompleter` in :mod:`pdb` to avoid deadlock + in subprocess. + +- gh-112729: Fix crash when calling :func:`concurrent.interpreters.create` + when the process is out of memory. + +- gh-135729: Fix unraisable exception during finalization when using + :mod:`concurrent.interpreters` in the REPL. + +- gh-139076: Fix a bug in the :mod:`pydoc` module that was hiding functions + in a Python module if they were implemented in an extension module and the + module did not have ``__all__``. + +- gh-139065: Fix trailing space before a wrapped long word if the line + length is exactly *width* in :mod:`textwrap`. + +- gh-139001: Fix race condition in :class:`pathlib.Path` on the internal + ``_raw_paths`` field. + +- gh-138813: :class:`!multiprocessing.BaseProcess` defaults ``kwargs`` to + ``None`` instead of a shared dictionary. + +- gh-138993: Dedent :data:`credits` text. + +- gh-138891: Fix ``SyntaxError`` when ``inspect.get_annotations(f, + eval_str=True)`` is called on a function annotated with a :pep:`646` + ``star_expression`` + +- gh-130567: Fix possible crash in :func:`locale.strxfrm` due to a platform + bug on macOS. + +- gh-138859: Fix generic type parameterization raising a :exc:`TypeError` + when omitting a :class:`ParamSpec` that has a default which is not a list + of types. + +- gh-138764: Prevent :func:`annotationlib.call_annotate_function` from + calling ``__annotate__`` functions that don't support + ``VALUE_WITH_FAKE_GLOBALS`` in a fake globals namespace with empty + globals. + + Make ``FORWARDREF`` and ``STRING`` annotations fall back to using + ``VALUE`` annotations in the case that neither their own format, nor + ``VALUE_WITH_FAKE_GLOBALS`` are supported. + +- gh-138775: Use of ``python -m`` with :mod:`base64` has been fixed to + detect input from a terminal so that it properly notices EOF. + +- gh-138779: Support device numbers larger than ``2**63-1`` for the + :attr:`~os.stat_result.st_rdev` field of the :class:`os.stat_result` + structure. + +- gh-137706: Fix the partial evaluation of annotations that use + ``typing.Annotated[T, x]`` where ``T`` is a forward reference. + +- gh-88375: Fix normalization of the ``robots.txt`` rules and URLs in the + :mod:`urllib.robotparser` module. No longer ignore trailing ``?``. + Distinguish raw special characters ``?``, ``=`` and ``&`` from the + percent-encoded ones. + +- gh-111788: Fix parsing errors in the :mod:`urllib.robotparser` module. + Don't fail trying to parse weird paths. Don't fail trying to decode + non-UTF-8 ``robots.txt`` files. + +- gh-98896: Fix a failure in multiprocessing resource_tracker when + SharedMemory names contain colons. Patch by Rani Pinchuk. + +- gh-138425: Fix partial evaluation of :class:`annotationlib.ForwardRef` + objects which rely on names defined as globals. + +- gh-138432: :meth:`zoneinfo.reset_tzpath` will now convert any + :class:`os.PathLike` objects it receives into strings before adding them + to ``TZPATH``. It will raise ``TypeError`` if anything other than a string + is found after this conversion. If given an :class:`os.PathLike` object + that represents a relative path, it will now raise ``ValueError`` instead + of ``TypeError``, and present a more informative error message. + +- gh-138008: Fix segmentation faults in the :mod:`ctypes` module due to + invalid :attr:`~ctypes._CFuncPtr.argtypes`. Patch by Dung Nguyen. + +- gh-60462: Fix :func:`locale.strxfrm` on Solaris (and possibly other + platforms). + +- gh-138239: The REPL now highlights :keyword:`type` as a soft keyword in + :ref:`type statements `. + +- gh-138204: Forbid expansion of shared anonymous :mod:`memory maps ` + on Linux, which caused a bus error. + +- gh-138010: Fix an issue where defining a class with an + :func:`@warnings.deprecated `-decorated base class + may not invoke the correct :meth:`~object.__init_subclass__` method in + cases involving multiple inheritance. Patch by Brian Schubert. + +- gh-138151: In :mod:`annotationlib`, improve evaluation of forward + references to nonlocal variables that are not yet defined when the + annotations are initially evaluated. + +- gh-137317: :func:`inspect.signature` now correctly handles classes that + use a descriptor on a wrapped :meth:`!__init__` or :meth:`!__new__` + method. Contributed by Yongyu Yan. + +- gh-137754: Fix import of the :mod:`zoneinfo` module if the C + implementation of the :mod:`datetime` module is not available. + +- gh-137490: Handle :data:`~errno.ECANCELED` in the same way as + :data:`~errno.EINTR` in :func:`signal.sigwaitinfo` on NetBSD. + +- gh-137477: Fix :func:`!inspect.getblock`, :func:`inspect.getsourcelines` + and :func:`inspect.getsource` for generator expressions. + +- gh-137044: Return large limit values as positive integers instead of + negative integers in :func:`resource.getrlimit`. Accept large values and + reject negative values (except :data:`~resource.RLIM_INFINITY`) for limits + in :func:`resource.setrlimit`. + +- gh-75989: :func:`tarfile.TarFile.extractall` and + :func:`tarfile.TarFile.extract` now overwrite symlinks when extracting + hardlinks. (Contributed by Alexander Enrique Urieles Nieto in + :gh:`75989`.) + +- gh-137017: Fix :obj:`threading.Thread.is_alive` to remain ``True`` until + the underlying OS thread is fully cleaned up. This avoids false negatives + in edge cases involving thread monitoring or premature + :obj:`threading.Thread.is_alive` calls. + +- gh-137273: Fix debug assertion failure in :func:`locale.setlocale` on + Windows. + +- gh-137239: :mod:`heapq`: Update :data:`!heapq.__all__` with ``*_max`` + functions. + +- gh-81325: :class:`tarfile.TarFile` now accepts a :term:`path-like + ` when working on a tar archive. (Contributed by + Alexander Enrique Urieles Nieto in :gh:`81325`.) + +- gh-137185: Fix a potential async-signal-safety issue in + :mod:`faulthandler` when printing C stack traces. + +- gh-136914: Fix retrieval of :attr:`doctest.DocTest.lineno` for objects + decorated with :func:`functools.cache` or + :class:`functools.cached_property`. + +- gh-136912: :func:`hmac.digest` now properly handles large keys and + messages by falling back to the pure Python implementation when necessary. + Patch by Bénédikt Tran. + +- gh-83424: Allows creating a :class:`ctypes.CDLL` without name when passing + a handle as an argument. + +- gh-136234: Fix :meth:`asyncio.WriteTransport.writelines` to be robust to + connection failure, by using the same behavior as + :meth:`~asyncio.WriteTransport.write`. + +- gh-136507: Fix mimetypes CLI to handle multiple file parameters. + +- gh-136057: Fixed the bug in :mod:`pdb` and :mod:`bdb` where ``next`` and + ``step`` can't go over the line if a loop exists in the line. + +- gh-135386: Fix opening a :mod:`dbm.sqlite3` database for reading from + read-only file or directory. + +- gh-135444: Fix :meth:`asyncio.DatagramTransport.sendto` to account for + datagram header size when data cannot be sent. + +- gh-126631: Fix :mod:`multiprocessing` ``forkserver`` bug which prevented + ``__main__`` from being preloaded. + +- gh-135307: :mod:`email`: Fix exception in ``set_content()`` when encoding + text and max_line_length is set to ``0`` or ``None`` (unlimited). + +- gh-134453: Fixed :func:`subprocess.Popen.communicate` ``input=`` handling + of :class:`memoryview` instances that were non-byte shaped on POSIX + platforms. Those are now properly cast to a byte shaped view instead of + truncating the input. Windows platforms did not have this bug. + +- gh-134698: Fix a crash when calling methods of :class:`ssl.SSLContext` or + :class:`ssl.SSLSocket` across multiple threads. + +- gh-125996: Fix thread safety of :class:`collections.OrderedDict`. Patch by + Kumar Aditya. + +- gh-133789: Fix unpickling of :mod:`pathlib` objects that were pickled in + Python 3.13. + +- gh-127081: Fix libc thread safety issues with :mod:`dbm` by performing + stateful operations in critical sections. + +- gh-132551: Make :class:`io.BytesIO` safe in :term:`free-threaded ` build. + +- gh-131788: Make ``ResourceTracker.send`` from :mod:`multiprocessing` + re-entrant safe + +- gh-118981: Fix potential hang in ``multiprocessing.popen_spawn_posix`` + that can happen when the child proc dies early by closing the child fds + right away. + +- gh-102431: Clarify constraints for "logical" arguments in methods of + :class:`decimal.Context`. + +- gh-78319: UTF8 support for the IMAP APPEND command has been made RFC + compliant. + +- bpo-38735: Fix failure when importing a module from the root directory on + unix-like platforms with sys.pycache_prefix set. + +- bpo-41839: Allow negative priority values from + :func:`os.sched_get_priority_min` and :func:`os.sched_get_priority_max` + functions. + +IDLE +---- + +- gh-96491: Deduplicate version number in IDLE shell title bar after saving + to a file. + +- gh-139742: Colorize t-string prefixes for template strings in IDLE, as + done for f-string prefixes. + +Documentation +------------- + +- gh-141994: :mod:`xml.sax.handler`: Make Documentation of + :data:`xml.sax.handler.feature_external_ges` warn of opening up to + `external entity attacks + `_. Patch by + Sebastian Pipping. + +- gh-140578: Remove outdated sencence in the documentation for + :mod:`multiprocessing`, that implied that + :class:`concurrent.futures.ThreadPoolExecutor` did not exist. + +Core and Builtins +----------------- + +- gh-142048: Fix quadratically increasing garbage collection delays in + free-threaded build. + +Library +------- + +- gh-116738: Fix thread safety issue with :mod:`re` scanner objects in + free-threaded builds. + +Core and Builtins +----------------- + +- gh-141930: When importing a module, use Python's regular file object to + ensure that writes to ``.pyc`` files are complete or an appropriate error + is raised. + +- gh-120158: Fix inconsistent state when enabling or disabling monitoring + events too many times. + +- gh-139653: Only raise a ``RecursionError`` or trigger a fatal error if the + stack pointer is both below the limit pointer *and* above the stack base. + If outside of these bounds assume that it is OK. This prevents false + positives when user-space threads swap stacks. + +- gh-139103: Improve multithreaded scaling of dataclasses on the + free-threaded build. + +- gh-141579: Fix :func:`sys.activate_stack_trampoline` to properly support + the ``perf_jit`` backend. Patch by Pablo Galindo. + +- gh-114203: Skip locking if object is already locked by two-mutex critical + section. + +- gh-141528: Suggest using :meth:`concurrent.interpreters.Interpreter.close` + instead of the private ``_interpreters.destroy`` function when warning + about remaining subinterpreters. Patch by Sergey Miryanov. + +- gh-141312: Fix the assertion failure in the ``__setstate__`` method of the + range iterator when a non-integer argument is passed. Patch by Sergey + Miryanov. + +Library +------- + +- gh-116738: Make csv module thread-safe on the :term:`free threaded ` build. + +Core and Builtins +----------------- + +- gh-140939: Fix memory leak when :class:`bytearray` or :class:`bytes` is + formated with the ``%*b`` format with a large width that results in a + :exc:`MemoryError`. + +Library +------- + +- gh-140260: Fix :mod:`struct` data race in endian table initialization with + subinterpreters. Patch by Shamil Abdulaev. + +Core and Builtins +----------------- + +- gh-140530: Fix a reference leak when ``raise exc from cause`` fails. Patch + by Bénédikt Tran. + +- gh-140373: Correctly emit ``PY_UNWIND`` event when generator object is + closed. Patch by Mikhail Efimov. + +- gh-140576: Fixed crash in :func:`tokenize.generate_tokens` in case of + specific incorrect input. Patch by Mikhail Efimov. + +- gh-140551: Fixed crash in :class:`dict` if :meth:`dict.clear` is called at + the lookup stage. Patch by Mikhail Efimov and Inada Naoki. + +- gh-140517: Fixed a reference leak when iterating over the result of + :func:`map` with ``strict=True`` when the input iterables have different + lengths. Patch by Mikhail Efimov. + +- gh-140471: Fix potential buffer overflow in :class:`ast.AST` node + initialization when encountering malformed :attr:`~ast.AST._fields` + containing non-:class:`str`. + +- gh-140431: Fix a crash in Python's :term:`garbage collector ` due to partially initialized :term:`coroutine` objects when + coroutine origin tracking depth is enabled + (:func:`sys.set_coroutine_origin_tracking_depth`). + +Library +------- + +- gh-140398: Fix memory leaks in :mod:`readline` functions + :func:`~readline.read_init_file`, :func:`~readline.read_history_file`, + :func:`~readline.write_history_file`, and + :func:`~readline.append_history_file` when :c:func:`PySys_Audit` fails. + +Core and Builtins +----------------- + +- gh-140406: Fix memory leak when an object's :meth:`~object.__hash__` + method returns an object that isn't an :class:`int`. + +- gh-140358: Restore elapsed time and unreachable object count in GC debug + output. These were inadvertently removed during a refactor of ``gc.c``. + The debug log now again reports elapsed collection time and the number of + unreachable objects. Contributed by Pål Grønås Drange. + +- gh-140306: Fix memory leaks in cross-interpreter channel operations and + shared namespace handling. + +- gh-140301: Fix memory leak of ``PyConfig`` in subinterpreters. + +- gh-140257: Fix data race between interpreter_clear() and take_gil() on + eval_breaker during finalization with daemon threads. + +- gh-139951: Fixes a regression in GC performance for a growing heap + composed mostly of small tuples. + + * Counts number of actually tracked objects, instead of trackable objects. + This ensures that untracking tuples has the desired effect of reducing GC overhead. + * Does not track most untrackable tuples during creation. + This prevents large numbers of small tuples causing excessive GCs. + +- gh-140104: Fix a bug with exception handling in the JIT. Patch by Ken Jin. + Bug reported by Daniel Diniz. + +- gh-140061: Fixing the checking of whether an object is uniquely referenced + to ensure free-threaded compatibility. Patch by Sergey Miryanov. + +- gh-140067: Fix memory leak in sub-interpreter creation. + +- gh-140000: Fix potential memory leak when a reference cycle exists between + an instance of :class:`typing.TypeAliasType`, :class:`typing.TypeVar`, + :class:`typing.ParamSpec`, or :class:`typing.TypeVarTuple` and its + ``__name__`` attribute. Patch by Mikhail Efimov. + +- gh-139914: Restore support for HP PA-RISC, which has an upwards-growing + stack. + +- gh-139988: Fix a memory leak when failing to create a + :class:`~typing.Union` type. Patch by Bénédikt Tran. + +- gh-139748: Fix reference leaks in error branches of functions accepting + path strings or bytes such as :func:`compile` and :func:`os.system`. Patch + by Bénédikt Tran. + +- gh-139516: Fix lambda colon erroneously start format spec in f-string in + tokenizer. + +- gh-139640: :func:`ast.parse` no longer emits syntax warnings for + ``return``/``break``/``continue`` in ``finally`` (see :pep:`765`) -- they + are only emitted during compilation. + +- gh-139640: Fix swallowing some syntax warnings in different modules if + they accidentally have the same message and are emitted from the same + line. Fix duplicated warnings in the ``finally`` block. + +- gh-63161: Support non-UTF-8 shebang and comments in Python source files if + non-UTF-8 encoding is specified. Detect decoding error in comments for + default (UTF-8) encoding. Show the line and position of decoding error for + default encoding in a traceback. Show the line containing the coding + cookie when it conflicts with the BOM in a traceback. + +Library +------- + +- gh-116738: Make :mod:`mmap` thread-safe on the :term:`free threaded ` build. + +Core and Builtins +----------------- + +- gh-138558: Fix handling of unusual t-string annotations in annotationlib. + Patch by Dave Peck. + +- gh-134466: Don't run PyREPL in a degraded environment where setting + termios attributes is not allowed. + +- gh-138944: Fix :exc:`SyntaxError` message when invalid syntax appears on + the same line as a valid ``import ... as ...`` or ``from ... import ... as + ...`` statement. Patch by Brian Schubert. + +- gh-105487: Remove non-existent :meth:`~object.__copy__`, + :meth:`~object.__deepcopy__`, and :attr:`~type.__bases__` from the + :meth:`~object.__dir__` entries of :class:`types.GenericAlias`. + +- gh-69605: Fix some standard library submodules missing from the + :term:`REPL` auto-completion of imports. + +Library +------- + +- gh-116738: Make :mod:`cProfile` thread-safe on the :term:`free threaded + ` build. + +- gh-138004: On Solaris/Illumos platforms, thread names are now encoded as + ASCII to avoid errors on systems (e.g. OpenIndiana) that don't support + non-ASCII names. + +Core and Builtins +----------------- + +- gh-137433: Fix a potential deadlock in the :term:`free threading` build + when daemon threads enable or disable profiling or tracing while the main + thread is shutting down the interpreter. + +- gh-137400: Fix a crash in the :term:`free threading` build when disabling + profiling or tracing across all threads with + :c:func:`PyEval_SetProfileAllThreads` or + :c:func:`PyEval_SetTraceAllThreads` or their Python equivalents + :func:`threading.settrace_all_threads` and + :func:`threading.setprofile_all_threads`. + +- gh-58124: Fix name of the Python encoding in Unicode errors of the code + page codec: use "cp65000" and "cp65001" instead of "CP_UTF7" and "CP_UTF8" + which are not valid Python code names. Patch by Victor Stinner. + +- gh-132657: Improve performance of :class:`frozenset` by removing locks in + the free-threading build. + +- gh-133400: Fixed Ctrl+D (^D) behavior in _pyrepl module to match old + pre-3.13 REPL behavior. + +- gh-128640: Fix a crash when using threads inside of a subinterpreter. + +C API +----- + +- gh-137422: Fix :term:`free threading` race condition in + :c:func:`PyImport_AddModuleRef`. It was previously possible for two calls + to the function return two different objects, only one of which was stored + in :data:`sys.modules`. + +- gh-140042: Removed the sqlite3_shutdown call that could cause closing + connections for sqlite when used with multiple sub interpreters. + +- gh-141042: Make qNaN in :c:func:`PyFloat_Pack2` and + :c:func:`PyFloat_Pack4`, if while conversion to a narrower precision + floating-point format --- the remaining after truncation payload will be + zero. Patch by Sergey B Kirpichev. + +- gh-140487: Fix :c:macro:`Py_RETURN_NOTIMPLEMENTED` in limited C API 3.11 + and older: don't treat ``Py_NotImplemented`` as immortal. Patch by Victor + Stinner. + +- gh-140153: Fix :c:func:`Py_REFCNT` definition on limited C API 3.11-3.13. + Patch by Victor Stinner. + +- gh-139653: Add :c:func:`PyUnstable_ThreadState_SetStackProtection` and + :c:func:`PyUnstable_ThreadState_ResetStackProtection` functions to set the + stack protection base address and stack protection size of a Python thread + state. Patch by Victor Stinner. + +Build +----- + +- gh-141808: Do not generate the jit stencils twice in case of PGO builds on + Windows. + +- gh-141784: Fix ``_remote_debugging_module.c`` compilation on 32-bit Linux. + Include Python.h before system headers to make sure that + ``_remote_debugging_module.c`` uses the same types (ABI) than Python. + Patch by Victor Stinner. + +- gh-140768: Warn when the WASI SDK version doesn't match what's supported. + +- gh-140513: Generate a clear compilation error when + ``_Py_TAIL_CALL_INTERP`` is enabled but either ``preserve_none`` or + ``musttail`` is not supported. + +- gh-140189: iOS builds were added to CI. + +- gh-138489: When cross-compiling for WASI by ``build_wasm`` or + ``build_emscripten``, the ``build-details.json`` step is now included in + the build process, just like with native builds. + + This fixes the ``libinstall`` task which requires the + ``build-details.json`` file during the process. + +- gh-137618: ``PYTHON_FOR_REGEN`` now requires Python 3.10 to Python 3.15. + Patch by Adam Turner. + +- gh-123681: Check the ``strftime()`` behavior at runtime instead of at the + compile time to support cross-compiling. Remove the internal macro + ``_Py_NORMALIZE_CENTURY``. + + +What's New in Python 3.14.0 final? +================================== + +*Release date: 2025-10-07* + +macOS +----- + +- gh-124111: Update macOS installer to use Tcl/Tk 8.6.17. + +- gh-139573: Updated bundled version of OpenSSL to 3.0.18. + +Windows +------- + +- gh-139573: Updated bundled version of OpenSSL to 3.0.18. + +Tools/Demos +----------- + +- gh-139330: SBOM generation tool didn't cross-check the version and + checksum values against the ``Modules/expat/refresh.sh`` script, leading + to the values becoming out-of-date during routine updates. + +- gh-132006: XCframeworks now include privacy manifests to satisfy Apple App + Store submission requirements. + +- gh-138171: A script for building an iOS XCframework was added. As part of + this change, the top level ``iOS`` folder has been moved to be a + subdirectory of the ``Apple`` folder. + +Security +-------- + +- gh-139400: :mod:`xml.parsers.expat`: Make sure that parent Expat parsers + are only garbage-collected once they are no longer referenced by + subparsers created by + :meth:`~xml.parsers.expat.xmlparser.ExternalEntityParserCreate`. Patch by + Sebastian Pipping. + +Library +------- + +- gh-139312: Upgrade bundled libexpat to 2.7.3 + + +What's New in Python 3.14.0 release candidate 3? +================================================ + +*Release date: 2025-09-18* + +Windows +------- + +- gh-138896: Fix error installing C runtime on non-updated Windows machines + +Tools/Demos +----------- + +- gh-137873: The iOS test runner has been simplified, resolving some issues + that have been observed using the runner in GitHub Actions and Azure + Pipelines test environments. + +Security +-------- + +- gh-135661: Fix CDATA section parsing in :class:`html.parser.HTMLParser` + according to the HTML5 standard: ``] ]>`` and ``]] >`` no longer end the + CDATA section. Add private method ``_set_support_cdata()`` which can be + used to specify how to parse ``<[CDATA[`` --- as a CDATA section in + foreign content (SVG or MathML) or as a bogus comment in the HTML + namespace. + +Library +------- + +- gh-138998: Update bundled libexpat to 2.7.2 + +- gh-118803: Add back :class:`collections.abc.ByteString` and + :class:`typing.ByteString`. Both had been removed in prior alpha, beta and + release candidates for Python 3.14, but their removal has now been + postponed to Python 3.17. + +- gh-137226: Fix :func:`typing.get_type_hints` calls on generic + :class:`typing.TypedDict` classes defined with string annotations. + +- gh-138804: Raise :exc:`TypeError` instead of :exc:`AttributeError` when an + argument of incorrect type is passed to :func:`shlex.quote`. This restores + the behavior of the function prior to 3.14. + +- gh-128636: Fix crash in PyREPL when os.environ is overwritten with an + invalid value for mac + +- gh-138514: Raise :exc:`ValueError` when a multi-character string is passed + to the *echo_char* parameter of :func:`getpass.getpass`. Patch by Benjamin + Johnson. + +- gh-138515: :mod:`email` is added to Emscripten build. + +- gh-99948: :func:`ctypes.util.find_library` now works in Emscripten build. + +- gh-138253: Add the *block* parameter in the :meth:`!put` and :meth:`!get` + methods of the :mod:`concurrent.interpreters` queues for compatibility + with the :class:`queue.Queue` interface. + +- gh-138133: Prevent infinite traceback loop when sending CTRL^C to Python + through ``strace``. + +- gh-134869: Fix an issue where pressing Ctrl+C during tab completion in the + REPL would leave the autocompletion menu in a corrupted state. + +- gh-90548: Fix ``musl`` detection for :func:`platform.libc_ver` on Alpine + Linux if compiled with --strip-all. + +- gh-136134: :meth:`!SMTP.auth_cram_md5` now raises an + :exc:`~smtplib.SMTPException` instead of a :exc:`ValueError` if Python has + been built without MD5 support. In particular, :class:`~smtplib.SMTP` + clients will not attempt to use this method even if the remote server is + assumed to support it. Patch by Bénédikt Tran. + +- gh-136134: :meth:`IMAP4.login_cram_md5 ` now + raises an :exc:`IMAP4.error ` if CRAM-MD5 + authentication is not supported. Patch by Bénédikt Tran. + +- gh-134953: Expand ``_colorize`` theme with ``keyword_constant`` and + implement in :term:`repl`. + +Core and Builtins +----------------- + +- gh-71810: Raise :exc:`OverflowError` for ``(-1).to_bytes()`` for signed + conversions when bytes count is zero. Patch by Sergey B Kirpichev. + +- gh-138192: Fix :mod:`contextvars` initialization so that all + subinterpreters are assigned the :attr:`~contextvars.Token.MISSING` value. + +- gh-138479: Fix a crash when a generic object's ``__typing_subst__`` + returns an object that isn't a :class:`tuple`. + +- gh-138372: Fix :exc:`SyntaxWarning` emitted for erroneous subscript + expressions involving :ref:`template string literals `. Patch + by Brian Schubert. + +- gh-138318: The default REPL now avoids highlighting built-in names (for + instance :class:`set` or :func:`format`) when they are used as attribute + names (for instance in ``value.set`` or ``text.format``). + +- gh-138349: Fix crash in certain cases where a module contains both a + module-level annotation and a comprehension. + +- gh-137384: Fix a crash when using the :mod:`warnings` module in a + finalizer at shutdown. Patch by Kumar Aditya. + +- gh-137883: Fix runaway recursion when calling a function with keyword + arguments. + +- gh-137079: Fix keyword typo recognition when parsing files. Patch by Pablo + Galindo. + +- gh-137728: Fix the JIT's handling of many local variables. This previously + caused a segfault. + +- gh-137576: Fix for incorrect source code being shown in tracebacks from + the Basic REPL when :envvar:`PYTHONSTARTUP` is given. Patch by Adam Hartz. + + +What's New in Python 3.14.0 release candidate 2? +================================================ + +*Release date: 2025-08-14* + +macOS +----- + +- gh-137450: macOS installer shell path management improvements: separate + the installer ``Shell profile updater`` postinstall script from the + ``Update Shell Profile.command`` to enable more robust error handling. + +- gh-137134: Update macOS installer to ship with SQLite version 3.50.4. + +Windows +------- + +- gh-137134: Update Windows installer to ship with SQLite 3.50.4. + +Library +------- + +- gh-137426: Remove the code deprecation of + ``importlib.abc.ResourceLoader``. It is documented as deprecated, but left + for backwards compatibility with other classes in ``importlib.abc``. + +- gh-137282: Fix tab completion and :func:`dir` on + :mod:`concurrent.futures`. + +- gh-137257: Bump the version of pip bundled in ensurepip to version 25.2 + +- gh-137226: Fix behavior of :meth:`annotationlib.ForwardRef.evaluate` when + the *type_params* parameter is passed and the name of a type param is also + present in an enclosing scope. + +- gh-130522: Fix unraisable :exc:`TypeError` raised during + :term:`interpreter shutdown` in the :mod:`threading` module. + +- gh-137059: Fix handling of file URLs with a Windows drive letter in the + URL authority by :func:`urllib.request.url2pathname`. This fixes a + regression in earlier pre-releases of Python 3.14. + +- gh-130577: :mod:`tarfile` now validates archives to ensure member offsets + are non-negative. (Contributed by Alexander Enrique Urieles Nieto in + :gh:`130577`.) + +- gh-135228: When :mod:`dataclasses` replaces a class with a slotted + dataclass, the original class can now be garbage collected again. Earlier + changes in Python 3.14 caused this class to always remain in existence + together with the replacement class synthesized by :mod:`dataclasses`. + +Documentation +------------- + +- gh-136155: We are now checking for fatal errors in EPUB builds in CI. + +Core and Builtins +----------------- + +- gh-137400: Fix a crash in the :term:`free threading` build when disabling + profiling or tracing across all threads with + :c:func:`PyEval_SetProfileAllThreads` or + :c:func:`PyEval_SetTraceAllThreads` or their Python equivalents + :func:`threading.settrace_all_threads` and + :func:`threading.setprofile_all_threads`. + +- gh-137314: Fixed a regression where raw f-strings incorrectly interpreted + escape sequences in format specifications. Raw f-strings now properly + preserve literal backslashes in format specs, matching the behavior from + Python 3.11. For example, ``rf"{obj:\xFF}"`` now correctly produces + ``'\\xFF'`` instead of ``'ÿ'``. Patch by Pablo Galindo. + +- gh-137308: A standalone docstring in a node body is optimized as a + :keyword:`pass` statement to ensure that the node's body is never empty. + There was a :exc:`ValueError` in :func:`compile` otherwise. + +- gh-137288: Fix bug where some bytecode instructions of a boolean + expression are not associated with the correct exception handler. + +- gh-134291: Remove some newer macOS API usage from the JIT compiler in + order to restore compatibility with older OSX 10.15 deployment targets. + +- gh-131338: Disable computed stack limit checks on non-glibc linux + platforms to fix crashes on deep recursion. + +- gh-136870: Fix data races while de-instrumenting bytecode of code objects + running concurrently in threads. + +C API +----- + +- gh-137573: Mark ``_PyOptimizer_Optimize`` as :c:macro:`Py_NO_INLINE` to + prevent stack overflow crashes on macOS. + +Build +----- + +- gh-132339: Add support for OpenSSL 3.5. + + +What's New in Python 3.14.0 release candidate 1? +================================================ + +*Release date: 2025-07-22* + +Tools/Demos +----------- + +- gh-136251: Fixes and usability improvements for + ``Tools/wasm/emscripten/web_example`` + +Security +-------- + +- gh-135661: Fix parsing attributes with whitespaces around the ``=`` + separator in :class:`html.parser.HTMLParser` according to the HTML5 + standard. + +- gh-118350: Fix support of escapable raw text mode (elements "textarea" and + "title") in :class:`html.parser.HTMLParser`. + +Library +------- + +- gh-136170: Removed the unreleased ``zipfile.ZipFile.data_offset`` property + added in 3.14.0a7 as it wasn't fully clear which behavior it should have + in some situations so the result was not always what a user might expect. + +- gh-124621: pyrepl now works in Emscripten. + +- gh-136874: Discard URL query and fragment in + :func:`urllib.request.url2pathname`. + +- gh-130645: Enable color help by default in :mod:`argparse`. + +- gh-136549: Fix signature of :func:`threading.excepthook`. + +- gh-136523: Fix :class:`wave.Wave_write` emitting an unraisable when open + raises. + +- gh-52876: Add missing ``keepends`` (default ``True``) parameter to + :meth:`!codecs.StreamReaderWriter.readline` and + :meth:`!codecs.StreamReaderWriter.readlines`. + +- gh-136470: Correct :class:`concurrent.futures.InterpreterPoolExecutor`'s + default thread name. + +- gh-136476: Fix a bug that was causing the ``get_async_stack_trace`` + function to miss some frames in the stack trace. + +- gh-136434: Fix docs generation of ``UnboundItem`` in + :mod:`concurrent.interpreters` when running with :option:`-OO`. + +- gh-136380: Raises :exc:`AttributeError` when accessing + :class:`concurrent.futures.InterpreterPoolExecutor` and subinterpreters + are not available. + +- gh-134759: Fix :exc:`UnboundLocalError` in + :func:`email.message.Message.get_payload` when the payload to decode is a + :class:`bytes` object. Patch by Kliment Lamonov. + +- gh-134657: :mod:`asyncio`: Remove some private names from + ``asyncio.__all__``. + +Core and Builtins +----------------- + +- gh-136801: Fix PyREPL syntax highlighting on match cases after multi-line + case. Contributed by Olga Matoula. + +Library +------- + +- gh-136421: Fix crash when initializing :mod:`datetime` concurrently. + +Core and Builtins +----------------- + +- gh-136541: Fix some issues with the perf trampolines on x86-64 and + aarch64. The trampolines were not being generated correctly for some + cases, which could lead to the perf integration not working correctly. + Patch by Pablo Galindo. + +- gh-136517: Fixed a typo that prevented printing of uncollectable objects + when the :const:`gc.DEBUG_UNCOLLECTABLE` mode was set. + +- gh-136525: Fix issue where per-thread bytecode was not instrumented for + newly created threads. + +- gh-132661: ``Interpolation.expression`` now has a default, the empty + string. + +- gh-132661: Reflect recent :pep:`750` change. + + Disallow concatenation of ``string.templatelib.Template`` and + :class:`str`. Also, disallow implicit concatenation of t-string literals + with string or f-string literals. + +Library +------- + +- gh-116738: Make functions in :mod:`grp` thread-safe on the :term:`free + threaded ` build. + +Core and Builtins +----------------- + +- gh-135148: Fixed a bug where f-string debug expressions (using =) would + incorrectly strip out parts of strings containing escaped quotes and # + characters. Patch by Pablo Galindo. + +- gh-133136: Limit excess memory usage in the :term:`free threading` build + when a large dictionary or list is resized and accessed by multiple + threads. + +- gh-91153: Fix a crash when a :class:`bytearray` is concurrently mutated + during item assignment. + +- gh-127971: Fix off-by-one read beyond the end of a string in string + search. + +C API +----- + +- gh-112068: Revert support of nullable arguments in :c:func:`PyArg_Parse`. + +- gh-133296: New variants for the critical section API that accept one or + two :c:type:`PyMutex` pointers rather than :c:type:`PyObject` instances + are now public in the non-limited C API. + +- gh-134009: Expose :c:func:`PyMutex_IsLocked` as part of the public C API. + +Build +----- + +- gh-135621: PyREPL no longer depends on the :mod:`curses` standard library. + Contributed by Łukasz Langa. + + +What's New in Python 3.14.0 beta 4? +=================================== + +*Release date: 2025-07-08* + +Tools/Demos +----------- + +- gh-135968: Stubs for ``strip`` are now provided as part of an iOS install. + +- gh-133600: Backport file reorganization for Tools/wasm/wasi. + + This should make backporting future code changes easier. It also + simplifies instructions around how to do WASI builds in the devguide. + +Tests +----- + +- gh-135966: The iOS testbed now handles the ``app_packages`` folder as a + site directory. + +- gh-135494: Fix regrtest to support excluding tests from ``--pgo`` tests. + Patch by Victor Stinner. + +Security +-------- + +- gh-136053: :mod:`marshal`: fix a possible crash when deserializing + :class:`slice` objects. + +- gh-135661: Fix parsing start and end tags in + :class:`html.parser.HTMLParser` according to the HTML5 standard. + + * Whitespaces no longer accepted between ```` does not end the script section. + + * Vertical tabulation (``\v``) and non-ASCII whitespaces no longer recognized + as whitespaces. The only whitespaces are ``\t\n\r\f`` and space. + + * Null character (U+0000) no longer ends the tag name. + + * Attributes and slashes after the tag name in end tags are now ignored, + instead of terminating after the first ``>`` in quoted attribute value. + E.g. ````. + + * Multiple slashes and whitespaces between the last attribute and closing ``>`` + are now ignored in both start and end tags. E.g. ````. + + * Multiple ``=`` between attribute name and value are no longer collapsed. + E.g. ```` produces attribute "foo" with value "=bar". + + * [Reverted in :gh:`136927`] Whitespaces between the ``=`` separator and attribute name or value are no + longer ignored. E.g. ```` produces two attributes "foo" and + "=bar", both with value None; ```` produces two attributes: + "foo" with value "" and "bar" with value None. + +- gh-102555: Fix comment parsing in :class:`html.parser.HTMLParser` + according to the HTML5 standard. ``--!>`` now ends the comment. ``-- >`` + no longer ends the comment. Support abnormally ended empty comments + ``<-->`` and ``<--->``. + +Library +------- + +- gh-136286: Fix pickling failures for protocols 0 and 1 for many objects + realted to subinterpreters. + +- gh-136316: Improve support for evaluating nested forward references in + :func:`typing.evaluate_forward_ref`. + +- gh-85702: If ``zoneinfo._common.load_tzdata`` is given a package without a + resource a :exc:`zoneinfo.ZoneInfoNotFoundError` is raised rather than a + :exc:`PermissionError`. Patch by Victor Stinner. + +- gh-136028: Fix parsing month names containing "İ" (U+0130, LATIN CAPITAL + LETTER I WITH DOT ABOVE) in :func:`time.strptime`. This affects locales + az_AZ, ber_DZ, ber_MA and crh_UA. + +- gh-135995: In the palmos encoding, make byte ``0x9b`` decode to ``›`` + (U+203A - SINGLE RIGHT-POINTING ANGLE QUOTATION MARK). + +- gh-53203: Fix :func:`time.strptime` for ``%c`` and ``%x`` formats on + locales byn_ER, wal_ET and lzh_TW, and for ``%X`` format on locales ar_SA, + bg_BG and lzh_TW. + +- gh-91555: An earlier change, which was introduced in 3.14.0b2, has been + reverted. It disabled logging for a logger during handling of log messages + for that logger. Since the reversion, the behaviour should be as it was + before 3.14.0b2. + +- gh-135878: Fixes a crash of :class:`types.SimpleNamespace` on :term:`free + threading` builds, when several threads were calling its + :meth:`~object.__repr__` method at the same time. + +- gh-135836: Fix :exc:`IndexError` in :meth:`asyncio.loop.create_connection` + that could occur when non-\ :exc:`OSError` exception is raised during + connection and socket's ``close()`` raises :exc:`!OSError`. + +- gh-135836: Fix :exc:`IndexError` in :meth:`asyncio.loop.create_connection` + that could occur when the Happy Eyeballs algorithm resulted in an empty + exceptions list during connection attempts. + +- gh-135855: Raise :exc:`TypeError` instead of :exc:`SystemError` when + :func:`!_interpreters.set___main___attrs` is passed a non-dict object. + Patch by Brian Schubert. + +- gh-135815: :mod:`netrc`: skip security checks if :func:`os.getuid` is + missing. Patch by Bénédikt Tran. + +- gh-135640: Address bug where it was possible to call + :func:`xml.etree.ElementTree.ElementTree.write` on an ElementTree object + with an invalid root element. This behavior blanked the file passed to + ``write`` if it already existed. + +- gh-135645: Added ``supports_isolated_interpreters`` field to + :data:`sys.implementation`. + +- gh-135646: Raise consistent :exc:`NameError` exceptions in + :func:`annotationlib.ForwardRef.evaluate` + +- gh-135557: Fix races on :mod:`heapq` updates and :class:`list` reads on + the :term:`free threaded ` build. + +- gh-119180: Only fetch globals and locals if necessary in + :func:`annotationlib.get_annotations` + +- gh-135561: Fix a crash on DEBUG builds when an HACL* HMAC routine fails. + Patch by Bénédikt Tran. + +- gh-135487: Fix :meth:`!reprlib.Repr.repr_int` when given integers with + more than :func:`sys.get_int_max_str_digits` digits. Patch by Bénédikt + Tran. + +- gh-135335: :mod:`multiprocessing`: Flush ``stdout`` and ``stderr`` after + preloading modules in the ``forkserver``. + +- gh-135069: Fix the "Invalid error handling" exception in + :class:`!encodings.idna.IncrementalDecoder` to correctly replace the + 'errors' parameter. + +- gh-130662: +Accept leading zeros in precision and width fields for + +:class:`~decimal.Decimal` formatting, for example ``format(Decimal(1.25), + '.016f')``. + +- gh-130662: Accept leading zeros in precision and width fields for + :class:`~fractions.Fraction` formatting, for example ``format(Fraction(1, + 3), '.016f')``. + +- gh-87790: Support underscore and comma as thousands separators in the + fractional part for :class:`~fractions.Fraction`'s formatting. Patch by + Sergey B Kirpichev. + +- gh-87790: Support underscore and comma as thousands separators in the + fractional part for :class:`~decimal.Decimal`'s formatting. Patch by + Sergey B Kirpichev. + +- gh-130664: Handle corner-case for :class:`~fractions.Fraction`'s + formatting: treat zero-padding (preceding the width field by a zero + (``'0'``) character) as an equivalent to a fill character of ``'0'`` with + an alignment type of ``'='``, just as in case of :class:`float`'s. + +Documentation +------------- + +- gh-136155: EPUB builds are fixed by excluding non-XHTML-compatible tags. + +Core and Builtins +----------------- + +- gh-109700: Fix memory error handling in :c:func:`PyDict_SetDefault`. + +- gh-78465: Fix error message for ``cls.__new__(cls, ...)`` where ``cls`` is + not instantiable builtin or extension type (with ``tp_new`` set to + ``NULL``). + +- gh-129958: Differentiate between t-strings and f-strings in syntax error + for newlines in format specifiers of single-quoted interpolated strings. + +- gh-135871: Non-blocking mutex lock attempts now return immediately when + the lock is busy instead of briefly spinning in the :term:`free threading` + build. + +- gh-135106: Restrict the trashcan mechanism to GC'ed objects and untrack + them while in the trashcan to prevent the GC and trashcan mechanisms + conflicting. + +- gh-135607: Fix potential :mod:`weakref` races in an object's destructor on + the :term:`free threaded ` build. + +- gh-135608: Fix a crash in the JIT involving attributes of modules. + +- gh-135543: Emit ``sys.remote_exec`` audit event when + :func:`sys.remote_exec` is called and migrate ``remote_debugger_script`` + to ``cpython.remote_debugger_script``. + +- gh-134280: Disable constant folding for ``~`` with a boolean argument. + This moves the deprecation warning from compile time to runtime. + +C API +----- + +- gh-135906: Fix compilation errors when compiling the internal headers with + a C++ compiler. + +Build +----- + +- gh-134273: Add support for configuring compiler flags for the JIT with + ``CFLAGS_JIT`` + + +What's New in Python 3.14.0 beta 3? +=================================== + +*Release date: 2025-06-17* + +Windows +------- + +- gh-135099: Fix a crash that could occur on Windows when a background + thread waits on a :c:type:`PyMutex` while the main thread is shutting down + the interpreter. + +Tests +----- + +- gh-132815: Fix test__opcode: add ``JUMP_BACKWARD`` to specialization + stats. + +- gh-135489: Show verbose output for failing tests during PGO profiling step + with --enable-optimizations. + +- gh-135120: Add :func:`!test.support.subTests`. + +Security +-------- + +- gh-135462: Fix quadratic complexity in processing specially crafted input + in :class:`html.parser.HTMLParser`. End-of-file errors are now handled + according to the HTML5 specs -- comments and declarations are + automatically closed, tags are ignored. + +- gh-135034: Fixes multiple issues that allowed ``tarfile`` extraction + filters (``filter="data"`` and ``filter="tar"``) to be bypassed using + crafted symlinks and hard links. + + Addresses :cve:`2024-12718`, :cve:`2025-4138`, :cve:`2025-4330`, and + :cve:`2025-4517`. + +Library +------- + +- gh-65697: :class:`configparser`'s error message when attempting to write + an invalid key is now more helpful. + +- gh-135497: Fix :func:`os.getlogin` failing for longer usernames on + BSD-based platforms. + +- gh-135429: Fix the argument mismatch in ``_lsprof`` for ``PY_THROW`` + event. + +- gh-135368: Fix :class:`unittest.mock.Mock` generation on + :func:`dataclasses.dataclass` objects. Now all special attributes are set + as it was before :gh:`124429`. + +- gh-133967: Do not normalize :mod:`locale` name 'C.UTF-8' to 'en_US.UTF-8'. + +- gh-135321: Raise a correct exception for values greater than 0x7fffffff + for the ``BINSTRING`` opcode in the C implementation of :mod:`pickle`. + +- gh-135276: Backported bugfixes in zipfile.Path from zipp 3.23. Fixed + ``.name``, ``.stem`` and other basename-based properties on Windows when + working with a zipfile on disk. + +- gh-135244: :mod:`uuid`: when the MAC address cannot be determined, the + 48-bit node ID is now generated with a cryptographically-secure + pseudo-random number generator (CSPRNG) as per :rfc:`RFC 9562, §6.10.3 + <9562#section-6.10-3>`. This affects :func:`~uuid.uuid1` and + :func:`~uuid.uuid6`. + +- gh-134970: Fix the "unknown action" exception in + :meth:`argparse.ArgumentParser.add_argument_group` to correctly replace + the action class. + +- gh-134718: :func:`ast.dump` now only omits ``None`` and ``[]`` values if + they are default values. + +- gh-134939: Add the :mod:`concurrent.interpreters` module. See :pep:`734`. + +- gh-134885: Fix possible crash in the :mod:`compression.zstd` module + related to setting parameter types. Patch by Jelle Zijlstra. + +- gh-134857: Improve error report for :mod:`doctest`\ s run with + :mod:`unittest`. Remove :mod:`!doctest` module frames from tracebacks and + redundant newline character from a failure message. + +- gh-128840: Fix parsing long IPv6 addresses with embedded IPv4 address. + +- gh-134637: Fix performance regression in calling a :mod:`ctypes` function + pointer in :term:`free threading`. + +- gh-134696: Built-in HACL* and OpenSSL implementations of hash function + constructors now correctly accept the same *documented* named arguments. + For instance, :func:`~hashlib.md5` could be previously invoked as + ``md5(data=data)`` or ``md5(string=string)`` depending on the underlying + implementation but these calls were not compatible. Patch by Bénédikt + Tran. + +- gh-134151: :mod:`email`: Fix :exc:`TypeError` in + :func:`email.utils.decode_params` when sorting :rfc:`2231` continuations + that contain an unnumbered section. + +- gh-134210: :func:`curses.window.getch` now correctly handles signals. + Patch by Bénédikt Tran. + +- gh-134152: :mod:`email`: Fix parsing of email message ID with invalid + domain. + +- gh-133489: :func:`random.getrandbits` can now generate more that 2\ + :sup:`31` bits. :func:`random.randbytes` can now generate more that 256 + MiB. + +- gh-132813: Improve error messages for incorrect types and values of + :class:`csv.Dialect` attributes. + +- gh-132969: Prevent the :class:`~concurrent.futures.ProcessPoolExecutor` + executor thread, which remains running when :meth:`shutdown(wait=False) + `, from attempting to adjust the + pool's worker processes after the object state has already been reset + during shutdown. A combination of conditions, including a worker process + having terminated abormally, resulted in an exception and a potential hang + when the still-running executor thread attempted to replace dead workers + within the pool. + +- gh-127081: Fix libc thread safety issues with :mod:`os` by replacing + ``getlogin`` with ``getlogin_r`` re-entrant version. + +- gh-131884: Fix formatting issues in :func:`json.dump` when both *indent* + and *skipkeys* are used. + +- gh-130999: Avoid exiting the new REPL and offer suggestions even if there + are non-string candidates when errors occur. + +Documentation +------------- + +- gh-135171: Document that the :term:`iterator` for the leftmost + :keyword:`!for` clause in the generator expression is created immediately. + +- bpo-45210: Document that error indicator may be set in tp_dealloc, and how + to avoid clobbering it. + +Core and Builtins +----------------- + +- gh-135496: Fix typo in the f-string conversion type error ("exclamanation" + -> "exclamation"). + +- gh-135371: Fixed :mod:`asyncio` debugging tools to properly display + internal coroutine call stacks alongside external task dependencies. The + ``python -m asyncio ps`` and ``python -m asyncio pstree`` commands now + show complete execution context. Patch by Pablo Galindo. + +Library +------- + +- gh-127319: Set the ``allow_reuse_port`` class variable to ``False`` on the + XMLRPC, logging, and HTTP servers. This matches the behavior in prior + Python releases, which is to not allow port reuse. + +Core and Builtins +----------------- + +- gh-135171: Reverts the behavior of async generator expressions when + created with object w/o __aiter__ method to the pre-3.13 behavior of + raising a TypeError. + +- gh-130077: Properly raise custom syntax errors when incorrect syntax + containing names that are prefixes of soft keywords is encountered. Patch + by Pablo Galindo. + +- gh-135171: Reverts the behavior of generator expressions when created with + a non-iterable to the pre-3.13 behavior of raising a TypeError. It is no + longer possible to cause a crash in the debugger by altering the generator + expression's local variables. This is achieved by moving the ``GET_ITER`` + instruction back to the creation of the generator expression and adding an + additional check to ``FOR_ITER``. + +Library +------- + +- gh-116738: Make methods in :mod:`heapq` thread-safe on the :term:`free + threaded ` build. + +Core and Builtins +----------------- + +- gh-134876: Add support to :pep:`768` remote debugging for Linux kernels + which don't have CONFIG_CROSS_MEMORY_ATTACH configured. + +- gh-134889: Fix handling of a few opcodes that leave operands on the stack + when optimizing ``LOAD_FAST``. + +Library +------- + +- gh-134908: Fix crash when iterating over lines in a text file on the + :term:`free threaded ` build. + +Core and Builtins +----------------- + +- gh-132617: Fix :meth:`dict.update` modification check that could + incorrectly raise a "dict mutated during update" error when a different + dictionary was modified that happens to share the same underlying keys + object. + +- gh-134679: Fix crash in the :term:`free threading` build's QSBR code that + could occur when changing an object's ``__dict__`` attribute. + +- gh-127682: No longer call ``__iter__`` twice in list comprehensions. This + brings the behavior of list comprehensions in line with other forms of + iteration + +- gh-133912: Fix the C API function ``PyObject_GenericSetDict`` to handle + extension classes with inline values. + +C API +----- + +- gh-134989: Fix ``Py_RETURN_NONE``, ``Py_RETURN_TRUE`` and + ``Py_RETURN_FALSE`` macros in the limited C API 3.11 and older: don't + treat ``Py_None``, ``Py_True`` and ``Py_False`` as immortal. Patch by + Victor Stinner. + +- gh-134989: Implement :c:func:`PyObject_DelAttr` and + :c:func:`PyObject_DelAttrString` as macros in the limited C API 3.12 and + older. Patch by Victor Stinner. + +- gh-133968: Add :c:func:`PyUnicodeWriter_WriteASCII` function to write an + ASCII string into a :c:type:`PyUnicodeWriter`. The function is faster than + :c:func:`PyUnicodeWriter_WriteUTF8`, but has an undefined behavior if the + input string contains non-ASCII characters. Patch by Victor Stinner. + +Build +----- + +- gh-119132: Remove "experimental" tag from the CPython free-threading + build. + +- gh-135497: Fix the detection of ``MAXLOGNAME`` in the ``configure.ac`` + script. + +- gh-134923: Windows builds with profile-guided optimization enabled now use + ``/GENPROFILE`` and ``/USEPROFILE`` instead of deprecated ``/LTCG:`` + options. + +- gh-134774: Fix :c:macro:`Py_DEBUG` macro redefinition warnings on Windows + debug builds. Patch by Chris Eibl. + +- gh-134632: Fixed ``build-details.json`` generation to use ``INCLUDEPY``, + in order to reference the ``pythonX.Y`` subdirectory of the include + directory, as required in :pep:`739`, instead of the top-level include + directory. + + +What's New in Python 3.14.0 beta 2? +=================================== + +*Release date: 2025-05-26* + +Windows +------- + +- gh-130727: Fix a race in internal calls into WMI that can result in an + "invalid handle" exception under high load. Patch by Chris Eibl. + +- gh-76023: Make :func:`os.path.realpath` ignore Windows error 1005 when in + non-strict mode. + +- gh-133779: Reverts the change to generate different :file:`pyconfig.h` + files based on compiler settings, as it was frequently causing extension + builds to break. In particular, the ``Py_GIL_DISABLED`` preprocessor + variable must now always be defined explicitly when compiling for the + experimental free-threaded runtime. The :func:`sysconfig.get_config_var` + function can be used to determine whether the current runtime was compiled + with that flag or not. + +- gh-133626: Ensures packages are not accidentally bundled into the + traditional installer. + +Tools/Demos +----------- + +- gh-134215: :term:`REPL` import autocomplete only suggests private modules + when explicitly specified. + +Tests +----- + +- gh-133744: Fix multiprocessing interrupt test. Add an event to synchronize + the parent process with the child process: wait until the child process + starts sleeping. Patch by Victor Stinner. + +- gh-133682: Fixed test case + ``test.test_annotationlib.TestStringFormat.test_displays`` which ensures + proper handling of complex data structures (lists, sets, dictionaries, and + tuples) in string annotations. + +- gh-133639: Fix ``TestPyReplAutoindent.test_auto_indent_default()`` doesn't + run ``input_code``. + +Security +-------- + +- gh-133767: Fix use-after-free in the "unicode-escape" decoder with a + non-"strict" error handler. + +- gh-128840: Short-circuit the processing of long IPv6 addresses early in + :mod:`ipaddress` to prevent excessive memory consumption and a minor + denial-of-service. + +Library +------- + +- gh-132710: If possible, ensure that :func:`uuid.getnode` returns the same + result even across different processes. Previously, the result was + constant only within the same process. Patch by Bénédikt Tran. + +- gh-80334: :func:`multiprocessing.freeze_support` now checks for work on + any "spawn" start method platform rather than only on Windows. + +- gh-134582: Fix tokenize.untokenize() round-trip errors related to + t-strings braces escaping + +- gh-134546: Ensure :mod:`pdb` remote debugging script is readable by remote + Python process. + +- gh-134451: Converted ``asyncio.tools.CycleFoundException`` from dataclass + to a regular exception type. + +- gh-114177: Fix :mod:`asyncio` to not close subprocess pipes which would + otherwise error out when the event loop is already closed. + +- gh-90871: Fixed an off by one error concerning the backlog parameter in + :meth:`~asyncio.loop.create_unix_server`. Contributed by Christian + Harries. + +- gh-134323: Fix the :meth:`threading.RLock.locked` method. + +- gh-86802: Fixed asyncio memory leak in cancelled shield tasks. For + shielded tasks where the shield was cancelled, log potential exceptions + through the exception handler. Contributed by Christian Harries. + +- gh-134209: :mod:`curses`: The :meth:`curses.window.instr` and + :meth:`curses.window.getstr` methods now allocate their internal buffer on + the heap instead of the stack; in addition, the max buffer size is + increased from 1023 to 2047. + +- gh-134235: Updated tab completion on REPL to include builtin modules. + Contributed by Tom Wang, Hunter Young + +- gh-134152: Fixed :exc:`UnboundLocalError` that could occur during + :mod:`email` header parsing if an expected trailing delimiter is missing + in some contexts. + +- gh-134168: :mod:`http.server`: Fix IPv6 address binding and + :option:`--directory ` handling when using HTTPS. + +- gh-62184: Remove import of C implementation of :class:`io.FileIO` from + Python implementation which has its own implementation + +- gh-133982: Emit :exc:`RuntimeWarning` in the Python implementation of + :mod:`io` when the :term:`file-like object ` is not closed + explicitly in the presence of multiple I/O layers. + +- gh-133890: The :mod:`tarfile` module now handles :exc:`UnicodeEncodeError` + in the same way as :exc:`OSError` when cannot extract a member. + +- gh-134097: Fix interaction of the new :term:`REPL` and :option:`-X + showrefcount <-X>` command line option. + +- gh-133889: The generated directory listing page in + :class:`http.server.SimpleHTTPRequestHandler` now only shows the decoded + path component of the requested URL, and not the query and fragment. + +- gh-134098: Fix handling paths that end with a percent-encoded slash + (``%2f`` or ``%2F``) in :class:`http.server.SimpleHTTPRequestHandler`. + +- gh-132124: On POSIX-compliant systems, + :func:`!multiprocessing.util.get_temp_dir` now ignores :envvar:`TMPDIR` + (and similar environment variables) if the path length of ``AF_UNIX`` + socket files exceeds the platform-specific maximum length when using the + :ref:`forkserver ` start method. + Patch by Bénédikt Tran. + +- gh-134062: :mod:`ipaddress`: fix collisions in :meth:`~object.__hash__` + for :class:`~ipaddress.IPv4Network` and :class:`~ipaddress.IPv6Network` + objects. + +- gh-133970: Make :class:`!string.templatelib.Template` and + :class:`!string.templatelib.Interpolation` generic. + +- gh-71253: Raise :exc:`ValueError` in :func:`open` if *opener* returns a + negative file-descriptor in the Python implementation of :mod:`io` to + match the C implementation. + +- gh-133960: Simplify and improve :func:`typing.evaluate_forward_ref`. It + now no longer raises errors on certain invalid types. In several + situations, it is now able to evaluate forward references that were + previously unsupported. + +- gh-133925: Make the private class ``typing._UnionGenericAlias`` hashable. + +- gh-133653: Fix :class:`argparse.ArgumentParser` with the *formatter_class* + argument. Fix TypeError when *formatter_class* is a custom subclass of + :class:`!HelpFormatter`. Fix TypeError when *formatter_class* is not a + subclass of :class:`!HelpFormatter` and non-standard *prefix_char* is + used. Fix support of colorizing when *formatter_class* is not a subclass + of :class:`!HelpFormatter`. + +- gh-132641: Fixed a race in :func:`functools.lru_cache` under + free-threading. + +- gh-133783: Fix bug with applying :func:`copy.replace` to :mod:`ast` + objects. Attributes that default to ``None`` were incorrectly treated as + required for manually created AST nodes. + +- gh-133684: Fix bug where :func:`annotationlib.get_annotations` would + return the wrong result for certain classes that are part of a class + hierarchy where ``from __future__ import annotations`` is used. + +- gh-77057: Fix handling of invalid markup declarations in + :class:`html.parser.HTMLParser`. + +- gh-130328: Speedup pasting in ``PyREPL`` on Windows in a legacy console. + Patch by Chris Eibl. + +- gh-133701: Fix bug where :class:`typing.TypedDict` classes defined under + ``from __future__ import annotations`` and inheriting from another + ``TypedDict`` had an incorrect ``__annotations__`` attribute. + +- gh-133581: Improve unparsing of t-strings in :func:`ast.unparse` and + ``from __future__ import annotations``. Empty t-strings now round-trip + correctly and formatting in interpolations is preserved. Patch by Jelle + Zijlstra. + +- gh-133551: Support t-strings (:pep:`750`) in :mod:`annotationlib`. Patch + by Jelle Zijlstra. + +- gh-133439: Fix dot commands with trailing spaces are mistaken for + multi-line SQL statements in the sqlite3 command-line interface. + +- gh-132493: Avoid accessing ``__annotations__`` unnecessarily in + :func:`inspect.signature`. + +- gh-132876: ``ldexp()`` on Windows doesn't round subnormal results before + Windows 11, but should. Python's :func:`math.ldexp` wrapper now does + round them, so results may change slightly, in rare cases of very small + results, on Windows versions before 11. + +- gh-133009: :mod:`xml.etree.ElementTree`: Fix a crash in + :meth:`Element.__deepcopy__ ` when the element is + concurrently mutated. Patch by Bénédikt Tran. + +- gh-91555: Ignore log messages generated during handling of log messages, + to avoid deadlock or infinite recursion. [NOTE: This change has since been + reverted.] + +- gh-125028: :data:`functools.Placeholder` cannot be passed to + :func:`functools.partial` as a keyword argument. + +- gh-62824: Fix aliases for ``iso8859_8`` encoding. Patch by Dave Goncalves. + +- gh-86155: :meth:`html.parser.HTMLParser.close` no longer loses data when + the `` + """ % (output_string.replace('"', r'\"')) + + def OutputString(self, attrs=None): + # Build up our result + # + result = [] + append = result.append + + # First, the key=value pair + append("%s=%s" % (self.key, self.coded_value)) + + # Now add any defined attributes + if attrs is None: + attrs = self._reserved + items = sorted(self.items()) + for key, value in items: + if value == "": + continue + if key not in attrs: + continue + if key == "expires" and isinstance(value, int): + append("%s=%s" % (self._reserved[key], _getdate(value))) + elif key == "max-age" and isinstance(value, int): + append("%s=%d" % (self._reserved[key], value)) + elif key == "comment" and isinstance(value, str): + append("%s=%s" % (self._reserved[key], _quote(value))) + elif key in self._flags: + if value: + append(str(self._reserved[key])) + else: + append("%s=%s" % (self._reserved[key], value)) + + # Return the result + return _semispacejoin(result) + + __class_getitem__ = classmethod(types.GenericAlias) + + +# +# Pattern for finding cookie +# +# This used to be strict parsing based on the RFC2109 and RFC2068 +# specifications. I have since discovered that MSIE 3.0x doesn't +# follow the character rules outlined in those specs. As a +# result, the parsing rules here are less strict. +# + +_LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=" +_LegalValueChars = _LegalKeyChars + r'\[\]' +_CookiePattern = re.compile(r""" + \s* # Optional whitespace at start of cookie + (?P # Start of group 'key' + [""" + _LegalKeyChars + r"""]+? # Any word of at least one letter + ) # End of group 'key' + ( # Optional group: there may not be a value. + \s*=\s* # Equal Sign + (?P # Start of group 'val' + "(?:[^\\"]|\\.)*" # Any double-quoted string + | # or + # Special case for "expires" attr + (\w{3,6}day|\w{3}),\s # Day of the week or abbreviated day + [\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Date and time in specific format + | # or + [""" + _LegalValueChars + r"""]* # Any word or empty string + ) # End of group 'val' + )? # End of optional value group + \s* # Any number of spaces. + (\s+|;|$) # Ending either at space, semicolon, or EOS. + """, re.ASCII | re.VERBOSE) # re.ASCII may be removed if safe. + + +# At long last, here is the cookie class. Using this class is almost just like +# using a dictionary. See this module's docstring for example usage. +# +class BaseCookie(dict): + """A container class for a set of Morsels.""" + + def value_decode(self, val): + """real_value, coded_value = value_decode(STRING) + Called prior to setting a cookie's value from the network + representation. The VALUE is the value read from HTTP + header. + Override this function to modify the behavior of cookies. + """ + return val, val + + def value_encode(self, val): + """real_value, coded_value = value_encode(VALUE) + Called prior to setting a cookie's value from the dictionary + representation. The VALUE is the value being assigned. + Override this function to modify the behavior of cookies. + """ + strval = str(val) + return strval, strval + + def __init__(self, input=None): + if input: + self.load(input) + + def __set(self, key, real_value, coded_value): + """Private method for setting a cookie's value""" + M = self.get(key, Morsel()) + M.set(key, real_value, coded_value) + dict.__setitem__(self, key, M) + + def __setitem__(self, key, value): + """Dictionary style assignment.""" + if isinstance(value, Morsel): + # allow assignment of constructed Morsels (e.g. for pickling) + dict.__setitem__(self, key, value) + else: + rval, cval = self.value_encode(value) + self.__set(key, rval, cval) + + def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"): + """Return a string suitable for HTTP.""" + result = [] + items = sorted(self.items()) + for key, value in items: + value_output = value.output(attrs, header) + if _has_control_character(value_output): + raise CookieError("Control characters are not allowed in cookies") + result.append(value_output) + return sep.join(result) + + __str__ = output + + def __repr__(self): + l = [] + items = sorted(self.items()) + for key, value in items: + l.append('%s=%s' % (key, repr(value.value))) + return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l)) + + def js_output(self, attrs=None): + """Return a string suitable for JavaScript.""" + result = [] + items = sorted(self.items()) + for key, value in items: + result.append(value.js_output(attrs)) + return _nulljoin(result) + + def load(self, rawdata): + """Load cookies from a string (presumably HTTP_COOKIE) or + from a dictionary. Loading cookies from a dictionary 'd' + is equivalent to calling: + map(Cookie.__setitem__, d.keys(), d.values()) + """ + if isinstance(rawdata, str): + self.__parse_string(rawdata) + else: + # self.update() wouldn't call our custom __setitem__ + for key, value in rawdata.items(): + self[key] = value + return + + def __parse_string(self, str, patt=_CookiePattern): + i = 0 # Our starting point + n = len(str) # Length of string + parsed_items = [] # Parsed (type, key, value) triples + morsel_seen = False # A key=value pair was previously encountered + + TYPE_ATTRIBUTE = 1 + TYPE_KEYVALUE = 2 + + # We first parse the whole cookie string and reject it if it's + # syntactically invalid (this helps avoid some classes of injection + # attacks). + while 0 <= i < n: + # Start looking for a cookie + match = patt.match(str, i) + if not match: + # No more cookies + break + + key, value = match.group("key"), match.group("val") + i = match.end(0) + + if key[0] == "$": + if not morsel_seen: + # We ignore attributes which pertain to the cookie + # mechanism as a whole, such as "$Version". + # See RFC 2965. (Does anyone care?) + continue + parsed_items.append((TYPE_ATTRIBUTE, key[1:], value)) + elif key.lower() in Morsel._reserved: + if not morsel_seen: + # Invalid cookie string + return + if value is None: + if key.lower() in Morsel._flags: + parsed_items.append((TYPE_ATTRIBUTE, key, True)) + else: + # Invalid cookie string + return + else: + parsed_items.append((TYPE_ATTRIBUTE, key, _unquote(value))) + elif value is not None: + parsed_items.append((TYPE_KEYVALUE, key, self.value_decode(value))) + morsel_seen = True + else: + # Invalid cookie string + return + + # The cookie string is valid, apply it. + M = None # current morsel + for tp, key, value in parsed_items: + if tp == TYPE_ATTRIBUTE: + assert M is not None + M[key] = value + else: + assert tp == TYPE_KEYVALUE + rval, cval = value + self.__set(key, rval, cval) + M = self[key] + + +class SimpleCookie(BaseCookie): + """ + SimpleCookie supports strings as cookie values. When setting + the value using the dictionary assignment notation, SimpleCookie + calls the builtin str() to convert the value to a string. Values + received from HTTP are kept as strings. + """ + def value_decode(self, val): + return _unquote(val), val + + def value_encode(self, val): + strval = str(val) + return strval, _quote(strval) diff --git a/Python314_4_x86_Template/Lib/http/server.py b/Python314_4_x86_Template/Lib/http/server.py new file mode 100644 index 00000000..ac1f57c2 --- /dev/null +++ b/Python314_4_x86_Template/Lib/http/server.py @@ -0,0 +1,1441 @@ +"""HTTP server classes. + +Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see +SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST, +and (deprecated) CGIHTTPRequestHandler for CGI scripts. + +It does, however, optionally implement HTTP/1.1 persistent connections. + +Notes on CGIHTTPRequestHandler +------------------------------ + +This class is deprecated. It implements GET and POST requests to cgi-bin scripts. + +If the os.fork() function is not present (Windows), subprocess.Popen() is used, +with slightly altered but never documented semantics. Use from a threaded +process is likely to trigger a warning at os.fork() time. + +In all cases, the implementation is intentionally naive -- all +requests are executed synchronously. + +SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL +-- it may execute arbitrary Python code or external programs. + +Note that status code 200 is sent prior to execution of a CGI script, so +scripts cannot send other status codes such as 302 (redirect). + +XXX To do: + +- log requests even later (to capture byte count) +- log user-agent header and other interesting goodies +- send error log to separate file +""" + + +# See also: +# +# HTTP Working Group T. Berners-Lee +# INTERNET-DRAFT R. T. Fielding +# H. Frystyk Nielsen +# Expires September 8, 1995 March 8, 1995 +# +# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt +# +# and +# +# Network Working Group R. Fielding +# Request for Comments: 2616 et al +# Obsoletes: 2068 June 1999 +# Category: Standards Track +# +# URL: http://www.faqs.org/rfcs/rfc2616.html + +# Log files +# --------- +# +# Here's a quote from the NCSA httpd docs about log file format. +# +# | The logfile format is as follows. Each line consists of: +# | +# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb +# | +# | host: Either the DNS name or the IP number of the remote client +# | rfc931: Any information returned by identd for this person, +# | - otherwise. +# | authuser: If user sent a userid for authentication, the user name, +# | - otherwise. +# | DD: Day +# | Mon: Month (calendar name) +# | YYYY: Year +# | hh: hour (24-hour format, the machine's timezone) +# | mm: minutes +# | ss: seconds +# | request: The first line of the HTTP request as sent by the client. +# | ddd: the status code returned by the server, - if not available. +# | bbbb: the total number of bytes sent, +# | *not including the HTTP/1.0 header*, - if not available +# | +# | You can determine the name of the file accessed through request. +# +# (Actually, the latter is only true if you know the server configuration +# at the time the request was made!) + +__version__ = "0.6" + +__all__ = [ + "HTTPServer", "ThreadingHTTPServer", + "HTTPSServer", "ThreadingHTTPSServer", + "BaseHTTPRequestHandler", "SimpleHTTPRequestHandler", + "CGIHTTPRequestHandler", +] + +import copy +import datetime +import email.utils +import html +import http.client +import io +import itertools +import mimetypes +import os +import posixpath +import select +import shutil +import socket +import socketserver +import sys +import time +import urllib.parse + +from http import HTTPStatus + + +# Default error message template +DEFAULT_ERROR_MESSAGE = """\ + + + + + + Error response + + +

Error response

+

Error code: %(code)d

+

Message: %(message)s.

+

Error code explanation: %(code)s - %(explain)s.

+ + +""" + +DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8" + +# Data larger than this will be read in chunks, to prevent extreme +# overallocation. +_MIN_READ_BUF_SIZE = 1 << 20 + +class HTTPServer(socketserver.TCPServer): + + allow_reuse_address = True # Seems to make sense in testing environment + allow_reuse_port = False + + def server_bind(self): + """Override server_bind to store the server name.""" + socketserver.TCPServer.server_bind(self) + host, port = self.server_address[:2] + self.server_name = socket.getfqdn(host) + self.server_port = port + + +class ThreadingHTTPServer(socketserver.ThreadingMixIn, HTTPServer): + daemon_threads = True + + +class HTTPSServer(HTTPServer): + def __init__(self, server_address, RequestHandlerClass, + bind_and_activate=True, *, certfile, keyfile=None, + password=None, alpn_protocols=None): + try: + import ssl + except ImportError: + raise RuntimeError("SSL module is missing; " + "HTTPS support is unavailable") + + self.ssl = ssl + self.certfile = certfile + self.keyfile = keyfile + self.password = password + # Support by default HTTP/1.1 + self.alpn_protocols = ( + ["http/1.1"] if alpn_protocols is None else alpn_protocols + ) + + super().__init__(server_address, + RequestHandlerClass, + bind_and_activate) + + def server_activate(self): + """Wrap the socket in SSLSocket.""" + super().server_activate() + context = self._create_context() + self.socket = context.wrap_socket(self.socket, server_side=True) + + def _create_context(self): + """Create a secure SSL context.""" + context = self.ssl.create_default_context(self.ssl.Purpose.CLIENT_AUTH) + context.load_cert_chain(self.certfile, self.keyfile, self.password) + context.set_alpn_protocols(self.alpn_protocols) + return context + + +class ThreadingHTTPSServer(socketserver.ThreadingMixIn, HTTPSServer): + daemon_threads = True + + +class BaseHTTPRequestHandler(socketserver.StreamRequestHandler): + + """HTTP request handler base class. + + The following explanation of HTTP serves to guide you through the + code as well as to expose any misunderstandings I may have about + HTTP (so you don't need to read the code to figure out I'm wrong + :-). + + HTTP (HyperText Transfer Protocol) is an extensible protocol on + top of a reliable stream transport (e.g. TCP/IP). The protocol + recognizes three parts to a request: + + 1. One line identifying the request type and path + 2. An optional set of RFC-822-style headers + 3. An optional data part + + The headers and data are separated by a blank line. + + The first line of the request has the form + + + + where is a (case-sensitive) keyword such as GET or POST, + is a string containing path information for the request, + and should be the string "HTTP/1.0" or "HTTP/1.1". + is encoded using the URL encoding scheme (using %xx to signify + the ASCII character with hex code xx). + + The specification specifies that lines are separated by CRLF but + for compatibility with the widest range of clients recommends + servers also handle LF. Similarly, whitespace in the request line + is treated sensibly (allowing multiple spaces between components + and allowing trailing whitespace). + + Similarly, for output, lines ought to be separated by CRLF pairs + but most clients grok LF characters just fine. + + If the first line of the request has the form + + + + (i.e. is left out) then this is assumed to be an HTTP + 0.9 request; this form has no optional headers and data part and + the reply consists of just the data. + + The reply form of the HTTP 1.x protocol again has three parts: + + 1. One line giving the response code + 2. An optional set of RFC-822-style headers + 3. The data + + Again, the headers and data are separated by a blank line. + + The response code line has the form + + + + where is the protocol version ("HTTP/1.0" or "HTTP/1.1"), + is a 3-digit response code indicating success or + failure of the request, and is an optional + human-readable string explaining what the response code means. + + This server parses the request and the headers, and then calls a + function specific to the request type (). Specifically, + a request SPAM will be handled by a method do_SPAM(). If no + such method exists the server sends an error response to the + client. If it exists, it is called with no arguments: + + do_SPAM() + + Note that the request name is case sensitive (i.e. SPAM and spam + are different requests). + + The various request details are stored in instance variables: + + - client_address is the client IP address in the form (host, + port); + + - command, path and version are the broken-down request line; + + - headers is an instance of email.message.Message (or a derived + class) containing the header information; + + - rfile is a file object open for reading positioned at the + start of the optional input data part; + + - wfile is a file object open for writing. + + IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING! + + The first thing to be written must be the response line. Then + follow 0 or more header lines, then a blank line, and then the + actual data (if any). The meaning of the header lines depends on + the command executed by the server; in most cases, when data is + returned, there should be at least one header line of the form + + Content-type: / + + where and should be registered MIME types, + e.g. "text/html" or "text/plain". + + """ + + # The Python system version, truncated to its first component. + sys_version = "Python/" + sys.version.split()[0] + + # The server software version. You may want to override this. + # The format is multiple whitespace-separated strings, + # where each string is of the form name[/version]. + server_version = "BaseHTTP/" + __version__ + + error_message_format = DEFAULT_ERROR_MESSAGE + error_content_type = DEFAULT_ERROR_CONTENT_TYPE + + # The default request version. This only affects responses up until + # the point where the request line is parsed, so it mainly decides what + # the client gets back when sending a malformed request line. + # Most web servers default to HTTP 0.9, i.e. don't send a status line. + default_request_version = "HTTP/0.9" + + def parse_request(self): + """Parse a request (internal). + + The request should be stored in self.raw_requestline; the results + are in self.command, self.path, self.request_version and + self.headers. + + Return True for success, False for failure; on failure, any relevant + error response has already been sent back. + + """ + is_http_0_9 = False + self.command = None # set in case of error on the first line + self.request_version = version = self.default_request_version + self.close_connection = True + requestline = str(self.raw_requestline, 'iso-8859-1') + requestline = requestline.rstrip('\r\n') + self.requestline = requestline + words = requestline.split() + if len(words) == 0: + return False + + if len(words) >= 3: # Enough to determine protocol version + version = words[-1] + try: + if not version.startswith('HTTP/'): + raise ValueError + base_version_number = version.split('/', 1)[1] + version_number = base_version_number.split(".") + # RFC 2145 section 3.1 says there can be only one "." and + # - major and minor numbers MUST be treated as + # separate integers; + # - HTTP/2.4 is a lower version than HTTP/2.13, which in + # turn is lower than HTTP/12.3; + # - Leading zeros MUST be ignored by recipients. + if len(version_number) != 2: + raise ValueError + if any(not component.isdigit() for component in version_number): + raise ValueError("non digit in http version") + if any(len(component) > 10 for component in version_number): + raise ValueError("unreasonable length http version") + version_number = int(version_number[0]), int(version_number[1]) + except (ValueError, IndexError): + self.send_error( + HTTPStatus.BAD_REQUEST, + "Bad request version (%r)" % version) + return False + if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": + self.close_connection = False + if version_number >= (2, 0): + self.send_error( + HTTPStatus.HTTP_VERSION_NOT_SUPPORTED, + "Invalid HTTP version (%s)" % base_version_number) + return False + self.request_version = version + + if not 2 <= len(words) <= 3: + self.send_error( + HTTPStatus.BAD_REQUEST, + "Bad request syntax (%r)" % requestline) + return False + command, path = words[:2] + if len(words) == 2: + self.close_connection = True + if command != 'GET': + self.send_error( + HTTPStatus.BAD_REQUEST, + "Bad HTTP/0.9 request type (%r)" % command) + return False + is_http_0_9 = True + self.command, self.path = command, path + + # gh-87389: The purpose of replacing '//' with '/' is to protect + # against open redirect attacks possibly triggered if the path starts + # with '//' because http clients treat //path as an absolute URI + # without scheme (similar to http://path) rather than a path. + if self.path.startswith('//'): + self.path = '/' + self.path.lstrip('/') # Reduce to a single / + + # For HTTP/0.9, headers are not expected at all. + if is_http_0_9: + self.headers = {} + return True + + # Examine the headers and look for a Connection directive. + try: + self.headers = http.client.parse_headers(self.rfile, + _class=self.MessageClass) + except http.client.LineTooLong as err: + self.send_error( + HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE, + "Line too long", + str(err)) + return False + except http.client.HTTPException as err: + self.send_error( + HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE, + "Too many headers", + str(err) + ) + return False + + conntype = self.headers.get('Connection', "") + if conntype.lower() == 'close': + self.close_connection = True + elif (conntype.lower() == 'keep-alive' and + self.protocol_version >= "HTTP/1.1"): + self.close_connection = False + # Examine the headers and look for an Expect directive + expect = self.headers.get('Expect', "") + if (expect.lower() == "100-continue" and + self.protocol_version >= "HTTP/1.1" and + self.request_version >= "HTTP/1.1"): + if not self.handle_expect_100(): + return False + return True + + def handle_expect_100(self): + """Decide what to do with an "Expect: 100-continue" header. + + If the client is expecting a 100 Continue response, we must + respond with either a 100 Continue or a final response before + waiting for the request body. The default is to always respond + with a 100 Continue. You can behave differently (for example, + reject unauthorized requests) by overriding this method. + + This method should either return True (possibly after sending + a 100 Continue response) or send an error response and return + False. + + """ + self.send_response_only(HTTPStatus.CONTINUE) + self.end_headers() + return True + + def handle_one_request(self): + """Handle a single HTTP request. + + You normally don't need to override this method; see the class + __doc__ string for information on how to handle specific HTTP + commands such as GET and POST. + + """ + try: + self.raw_requestline = self.rfile.readline(65537) + if len(self.raw_requestline) > 65536: + self.requestline = '' + self.request_version = '' + self.command = '' + self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG) + return + if not self.raw_requestline: + self.close_connection = True + return + if not self.parse_request(): + # An error code has been sent, just exit + return + mname = 'do_' + self.command + if not hasattr(self, mname): + self.send_error( + HTTPStatus.NOT_IMPLEMENTED, + "Unsupported method (%r)" % self.command) + return + method = getattr(self, mname) + method() + self.wfile.flush() #actually send the response if not already done. + except TimeoutError as e: + #a read or a write timed out. Discard this connection + self.log_error("Request timed out: %r", e) + self.close_connection = True + return + + def handle(self): + """Handle multiple requests if necessary.""" + self.close_connection = True + + self.handle_one_request() + while not self.close_connection: + self.handle_one_request() + + def send_error(self, code, message=None, explain=None): + """Send and log an error reply. + + Arguments are + * code: an HTTP error code + 3 digits + * message: a simple optional 1 line reason phrase. + *( HTAB / SP / VCHAR / %x80-FF ) + defaults to short entry matching the response code + * explain: a detailed message defaults to the long entry + matching the response code. + + This sends an error response (so it must be called before any + output has been generated), logs the error, and finally sends + a piece of HTML explaining the error to the user. + + """ + + try: + shortmsg, longmsg = self.responses[code] + except KeyError: + shortmsg, longmsg = '???', '???' + if message is None: + message = shortmsg + if explain is None: + explain = longmsg + self.log_error("code %d, message %s", code, message) + self.send_response(code, message) + self.send_header('Connection', 'close') + + # Message body is omitted for cases described in: + # - RFC7230: 3.3. 1xx, 204(No Content), 304(Not Modified) + # - RFC7231: 6.3.6. 205(Reset Content) + body = None + if (code >= 200 and + code not in (HTTPStatus.NO_CONTENT, + HTTPStatus.RESET_CONTENT, + HTTPStatus.NOT_MODIFIED)): + # HTML encode to prevent Cross Site Scripting attacks + # (see bug #1100201) + content = (self.error_message_format % { + 'code': code, + 'message': html.escape(message, quote=False), + 'explain': html.escape(explain, quote=False) + }) + body = content.encode('UTF-8', 'replace') + self.send_header("Content-Type", self.error_content_type) + self.send_header('Content-Length', str(len(body))) + self.end_headers() + + if self.command != 'HEAD' and body: + self.wfile.write(body) + + def send_response(self, code, message=None): + """Add the response header to the headers buffer and log the + response code. + + Also send two standard headers with the server software + version and the current date. + + """ + self.log_request(code) + self.send_response_only(code, message) + self.send_header('Server', self.version_string()) + self.send_header('Date', self.date_time_string()) + + def send_response_only(self, code, message=None): + """Send the response header only.""" + if self.request_version != 'HTTP/0.9': + if message is None: + if code in self.responses: + message = self.responses[code][0] + else: + message = '' + if not hasattr(self, '_headers_buffer'): + self._headers_buffer = [] + self._headers_buffer.append(("%s %d %s\r\n" % + (self.protocol_version, code, message)).encode( + 'latin-1', 'strict')) + + def send_header(self, keyword, value): + """Send a MIME header to the headers buffer.""" + if self.request_version != 'HTTP/0.9': + if not hasattr(self, '_headers_buffer'): + self._headers_buffer = [] + self._headers_buffer.append( + ("%s: %s\r\n" % (keyword, value)).encode('latin-1', 'strict')) + + if keyword.lower() == 'connection': + if value.lower() == 'close': + self.close_connection = True + elif value.lower() == 'keep-alive': + self.close_connection = False + + def end_headers(self): + """Send the blank line ending the MIME headers.""" + if self.request_version != 'HTTP/0.9': + self._headers_buffer.append(b"\r\n") + self.flush_headers() + + def flush_headers(self): + if hasattr(self, '_headers_buffer'): + self.wfile.write(b"".join(self._headers_buffer)) + self._headers_buffer = [] + + def log_request(self, code='-', size='-'): + """Log an accepted request. + + This is called by send_response(). + + """ + if isinstance(code, HTTPStatus): + code = code.value + self.log_message('"%s" %s %s', + self.requestline, str(code), str(size)) + + def log_error(self, format, *args): + """Log an error. + + This is called when a request cannot be fulfilled. By + default it passes the message on to log_message(). + + Arguments are the same as for log_message(). + + XXX This should go to the separate error log. + + """ + + self.log_message(format, *args) + + # https://en.wikipedia.org/wiki/List_of_Unicode_characters#Control_codes + _control_char_table = str.maketrans( + {c: fr'\x{c:02x}' for c in itertools.chain(range(0x20), range(0x7f,0xa0))}) + _control_char_table[ord('\\')] = r'\\' + + def log_message(self, format, *args): + """Log an arbitrary message. + + This is used by all other logging functions. Override + it if you have specific logging wishes. + + The first argument, FORMAT, is a format string for the + message to be logged. If the format string contains + any % escapes requiring parameters, they should be + specified as subsequent arguments (it's just like + printf!). + + The client ip and current date/time are prefixed to + every message. + + Unicode control characters are replaced with escaped hex + before writing the output to stderr. + + """ + + message = format % args + sys.stderr.write("%s - - [%s] %s\n" % + (self.address_string(), + self.log_date_time_string(), + message.translate(self._control_char_table))) + + def version_string(self): + """Return the server software version string.""" + return self.server_version + ' ' + self.sys_version + + def date_time_string(self, timestamp=None): + """Return the current date and time formatted for a message header.""" + if timestamp is None: + timestamp = time.time() + return email.utils.formatdate(timestamp, usegmt=True) + + def log_date_time_string(self): + """Return the current time formatted for logging.""" + now = time.time() + year, month, day, hh, mm, ss, x, y, z = time.localtime(now) + s = "%02d/%3s/%04d %02d:%02d:%02d" % ( + day, self.monthname[month], year, hh, mm, ss) + return s + + weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + + monthname = [None, + 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', + 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + + def address_string(self): + """Return the client address.""" + + return self.client_address[0] + + # Essentially static class variables + + # The version of the HTTP protocol we support. + # Set this to HTTP/1.1 to enable automatic keepalive + protocol_version = "HTTP/1.0" + + # MessageClass used to parse headers + MessageClass = http.client.HTTPMessage + + # hack to maintain backwards compatibility + responses = { + v: (v.phrase, v.description) + for v in HTTPStatus.__members__.values() + } + + +class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): + + """Simple HTTP request handler with GET and HEAD commands. + + This serves files from the current directory and any of its + subdirectories. The MIME type for files is determined by + calling the .guess_type() method. + + The GET and HEAD requests are identical except that the HEAD + request omits the actual contents of the file. + + """ + + server_version = "SimpleHTTP/" + __version__ + index_pages = ("index.html", "index.htm") + extensions_map = _encodings_map_default = { + '.gz': 'application/gzip', + '.Z': 'application/octet-stream', + '.bz2': 'application/x-bzip2', + '.xz': 'application/x-xz', + } + + def __init__(self, *args, directory=None, **kwargs): + if directory is None: + directory = os.getcwd() + self.directory = os.fspath(directory) + super().__init__(*args, **kwargs) + + def do_GET(self): + """Serve a GET request.""" + f = self.send_head() + if f: + try: + self.copyfile(f, self.wfile) + finally: + f.close() + + def do_HEAD(self): + """Serve a HEAD request.""" + f = self.send_head() + if f: + f.close() + + def send_head(self): + """Common code for GET and HEAD commands. + + This sends the response code and MIME headers. + + Return value is either a file object (which has to be copied + to the outputfile by the caller unless the command was HEAD, + and must be closed by the caller under all circumstances), or + None, in which case the caller has nothing further to do. + + """ + path = self.translate_path(self.path) + f = None + if os.path.isdir(path): + parts = urllib.parse.urlsplit(self.path) + if not parts.path.endswith(('/', '%2f', '%2F')): + # redirect browser - doing basically what apache does + self.send_response(HTTPStatus.MOVED_PERMANENTLY) + new_parts = (parts[0], parts[1], parts[2] + '/', + parts[3], parts[4]) + new_url = urllib.parse.urlunsplit(new_parts) + self.send_header("Location", new_url) + self.send_header("Content-Length", "0") + self.end_headers() + return None + for index in self.index_pages: + index = os.path.join(path, index) + if os.path.isfile(index): + path = index + break + else: + return self.list_directory(path) + ctype = self.guess_type(path) + # check for trailing "/" which should return 404. See Issue17324 + # The test for this was added in test_httpserver.py + # However, some OS platforms accept a trailingSlash as a filename + # See discussion on python-dev and Issue34711 regarding + # parsing and rejection of filenames with a trailing slash + if path.endswith("/"): + self.send_error(HTTPStatus.NOT_FOUND, "File not found") + return None + try: + f = open(path, 'rb') + except OSError: + self.send_error(HTTPStatus.NOT_FOUND, "File not found") + return None + + try: + fs = os.fstat(f.fileno()) + # Use browser cache if possible + if ("If-Modified-Since" in self.headers + and "If-None-Match" not in self.headers): + # compare If-Modified-Since and time of last file modification + try: + ims = email.utils.parsedate_to_datetime( + self.headers["If-Modified-Since"]) + except (TypeError, IndexError, OverflowError, ValueError): + # ignore ill-formed values + pass + else: + if ims.tzinfo is None: + # obsolete format with no timezone, cf. + # https://tools.ietf.org/html/rfc7231#section-7.1.1.1 + ims = ims.replace(tzinfo=datetime.timezone.utc) + if ims.tzinfo is datetime.timezone.utc: + # compare to UTC datetime of last modification + last_modif = datetime.datetime.fromtimestamp( + fs.st_mtime, datetime.timezone.utc) + # remove microseconds, like in If-Modified-Since + last_modif = last_modif.replace(microsecond=0) + + if last_modif <= ims: + self.send_response(HTTPStatus.NOT_MODIFIED) + self.end_headers() + f.close() + return None + + self.send_response(HTTPStatus.OK) + self.send_header("Content-type", ctype) + self.send_header("Content-Length", str(fs[6])) + self.send_header("Last-Modified", + self.date_time_string(fs.st_mtime)) + self.end_headers() + return f + except: + f.close() + raise + + def list_directory(self, path): + """Helper to produce a directory listing (absent index.html). + + Return value is either a file object, or None (indicating an + error). In either case, the headers are sent, making the + interface the same as for send_head(). + + """ + try: + list = os.listdir(path) + except OSError: + self.send_error( + HTTPStatus.NOT_FOUND, + "No permission to list directory") + return None + list.sort(key=lambda a: a.lower()) + r = [] + displaypath = self.path + displaypath = displaypath.split('#', 1)[0] + displaypath = displaypath.split('?', 1)[0] + try: + displaypath = urllib.parse.unquote(displaypath, + errors='surrogatepass') + except UnicodeDecodeError: + displaypath = urllib.parse.unquote(displaypath) + displaypath = html.escape(displaypath, quote=False) + enc = sys.getfilesystemencoding() + title = f'Directory listing for {displaypath}' + r.append('') + r.append('') + r.append('') + r.append(f'') + r.append('') + r.append(f'{title}\n') + r.append(f'\n

{title}

') + r.append('
\n
\n
\n\n\n') + encoded = '\n'.join(r).encode(enc, 'surrogateescape') + f = io.BytesIO() + f.write(encoded) + f.seek(0) + self.send_response(HTTPStatus.OK) + self.send_header("Content-type", "text/html; charset=%s" % enc) + self.send_header("Content-Length", str(len(encoded))) + self.end_headers() + return f + + def translate_path(self, path): + """Translate a /-separated PATH to the local filename syntax. + + Components that mean special things to the local file system + (e.g. drive or directory names) are ignored. (XXX They should + probably be diagnosed.) + + """ + # abandon query parameters + path = path.split('#', 1)[0] + path = path.split('?', 1)[0] + # Don't forget explicit trailing slash when normalizing. Issue17324 + try: + path = urllib.parse.unquote(path, errors='surrogatepass') + except UnicodeDecodeError: + path = urllib.parse.unquote(path) + trailing_slash = path.endswith('/') + path = posixpath.normpath(path) + words = path.split('/') + words = filter(None, words) + path = self.directory + for word in words: + if os.path.dirname(word) or word in (os.curdir, os.pardir): + # Ignore components that are not a simple file/directory name + continue + path = os.path.join(path, word) + if trailing_slash: + path += '/' + return path + + def copyfile(self, source, outputfile): + """Copy all data between two file objects. + + The SOURCE argument is a file object open for reading + (or anything with a read() method) and the DESTINATION + argument is a file object open for writing (or + anything with a write() method). + + The only reason for overriding this would be to change + the block size or perhaps to replace newlines by CRLF + -- note however that this the default server uses this + to copy binary data as well. + + """ + shutil.copyfileobj(source, outputfile) + + def guess_type(self, path): + """Guess the type of a file. + + Argument is a PATH (a filename). + + Return value is a string of the form type/subtype, + usable for a MIME Content-type header. + + The default implementation looks the file's extension + up in the table self.extensions_map, using application/octet-stream + as a default; however it would be permissible (if + slow) to look inside the data to make a better guess. + + """ + base, ext = posixpath.splitext(path) + if ext in self.extensions_map: + return self.extensions_map[ext] + ext = ext.lower() + if ext in self.extensions_map: + return self.extensions_map[ext] + guess, _ = mimetypes.guess_file_type(path) + if guess: + return guess + return 'application/octet-stream' + + +# Utilities for CGIHTTPRequestHandler + +def _url_collapse_path(path): + """ + Given a URL path, remove extra '/'s and '.' path elements and collapse + any '..' references and returns a collapsed path. + + Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. + The utility of this function is limited to is_cgi method and helps + preventing some security attacks. + + Returns: The reconstituted URL, which will always start with a '/'. + + Raises: IndexError if too many '..' occur within the path. + + """ + # Query component should not be involved. + path, _, query = path.partition('?') + path = urllib.parse.unquote(path) + + # Similar to os.path.split(os.path.normpath(path)) but specific to URL + # path semantics rather than local operating system semantics. + path_parts = path.split('/') + head_parts = [] + for part in path_parts[:-1]: + if part == '..': + head_parts.pop() # IndexError if more '..' than prior parts + elif part and part != '.': + head_parts.append( part ) + if path_parts: + tail_part = path_parts.pop() + if tail_part: + if tail_part == '..': + head_parts.pop() + tail_part = '' + elif tail_part == '.': + tail_part = '' + else: + tail_part = '' + + if query: + tail_part = '?'.join((tail_part, query)) + + splitpath = ('/' + '/'.join(head_parts), tail_part) + collapsed_path = "/".join(splitpath) + + return collapsed_path + + + +nobody = None + +def nobody_uid(): + """Internal routine to get nobody's uid""" + global nobody + if nobody: + return nobody + try: + import pwd + except ImportError: + return -1 + try: + nobody = pwd.getpwnam('nobody')[2] + except KeyError: + nobody = 1 + max(x[2] for x in pwd.getpwall()) + return nobody + + +def executable(path): + """Test for executable file.""" + return os.access(path, os.X_OK) + + +class CGIHTTPRequestHandler(SimpleHTTPRequestHandler): + + """Complete HTTP server with GET, HEAD and POST commands. + + GET and HEAD also support running CGI scripts. + + The POST command is *only* implemented for CGI scripts. + + """ + + def __init__(self, *args, **kwargs): + import warnings + warnings._deprecated("http.server.CGIHTTPRequestHandler", + remove=(3, 15)) + super().__init__(*args, **kwargs) + + # Determine platform specifics + have_fork = hasattr(os, 'fork') + + # Make rfile unbuffered -- we need to read one line and then pass + # the rest to a subprocess, so we can't use buffered input. + rbufsize = 0 + + def do_POST(self): + """Serve a POST request. + + This is only implemented for CGI scripts. + + """ + + if self.is_cgi(): + self.run_cgi() + else: + self.send_error( + HTTPStatus.NOT_IMPLEMENTED, + "Can only POST to CGI scripts") + + def send_head(self): + """Version of send_head that support CGI scripts""" + if self.is_cgi(): + return self.run_cgi() + else: + return SimpleHTTPRequestHandler.send_head(self) + + def is_cgi(self): + """Test whether self.path corresponds to a CGI script. + + Returns True and updates the cgi_info attribute to the tuple + (dir, rest) if self.path requires running a CGI script. + Returns False otherwise. + + If any exception is raised, the caller should assume that + self.path was rejected as invalid and act accordingly. + + The default implementation tests whether the normalized url + path begins with one of the strings in self.cgi_directories + (and the next character is a '/' or the end of the string). + + """ + collapsed_path = _url_collapse_path(self.path) + dir_sep = collapsed_path.find('/', 1) + while dir_sep > 0 and not collapsed_path[:dir_sep] in self.cgi_directories: + dir_sep = collapsed_path.find('/', dir_sep+1) + if dir_sep > 0: + head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] + self.cgi_info = head, tail + return True + return False + + + cgi_directories = ['/cgi-bin', '/htbin'] + + def is_executable(self, path): + """Test whether argument path is an executable file.""" + return executable(path) + + def is_python(self, path): + """Test whether argument path is a Python script.""" + head, tail = os.path.splitext(path) + return tail.lower() in (".py", ".pyw") + + def run_cgi(self): + """Execute a CGI script.""" + dir, rest = self.cgi_info + path = dir + '/' + rest + i = path.find('/', len(dir)+1) + while i >= 0: + nextdir = path[:i] + nextrest = path[i+1:] + + scriptdir = self.translate_path(nextdir) + if os.path.isdir(scriptdir): + dir, rest = nextdir, nextrest + i = path.find('/', len(dir)+1) + else: + break + + # find an explicit query string, if present. + rest, _, query = rest.partition('?') + + # dissect the part after the directory name into a script name & + # a possible additional path, to be stored in PATH_INFO. + i = rest.find('/') + if i >= 0: + script, rest = rest[:i], rest[i:] + else: + script, rest = rest, '' + + scriptname = dir + '/' + script + scriptfile = self.translate_path(scriptname) + if not os.path.exists(scriptfile): + self.send_error( + HTTPStatus.NOT_FOUND, + "No such CGI script (%r)" % scriptname) + return + if not os.path.isfile(scriptfile): + self.send_error( + HTTPStatus.FORBIDDEN, + "CGI script is not a plain file (%r)" % scriptname) + return + ispy = self.is_python(scriptname) + if self.have_fork or not ispy: + if not self.is_executable(scriptfile): + self.send_error( + HTTPStatus.FORBIDDEN, + "CGI script is not executable (%r)" % scriptname) + return + + # Reference: https://www6.uniovi.es/~antonio/ncsa_httpd/cgi/env.html + # XXX Much of the following could be prepared ahead of time! + env = copy.deepcopy(os.environ) + env['SERVER_SOFTWARE'] = self.version_string() + env['SERVER_NAME'] = self.server.server_name + env['GATEWAY_INTERFACE'] = 'CGI/1.1' + env['SERVER_PROTOCOL'] = self.protocol_version + env['SERVER_PORT'] = str(self.server.server_port) + env['REQUEST_METHOD'] = self.command + uqrest = urllib.parse.unquote(rest) + env['PATH_INFO'] = uqrest + env['PATH_TRANSLATED'] = self.translate_path(uqrest) + env['SCRIPT_NAME'] = scriptname + env['QUERY_STRING'] = query + env['REMOTE_ADDR'] = self.client_address[0] + authorization = self.headers.get("authorization") + if authorization: + authorization = authorization.split() + if len(authorization) == 2: + import base64, binascii + env['AUTH_TYPE'] = authorization[0] + if authorization[0].lower() == "basic": + try: + authorization = authorization[1].encode('ascii') + authorization = base64.decodebytes(authorization).\ + decode('ascii') + except (binascii.Error, UnicodeError): + pass + else: + authorization = authorization.split(':') + if len(authorization) == 2: + env['REMOTE_USER'] = authorization[0] + # XXX REMOTE_IDENT + if self.headers.get('content-type') is None: + env['CONTENT_TYPE'] = self.headers.get_content_type() + else: + env['CONTENT_TYPE'] = self.headers['content-type'] + length = self.headers.get('content-length') + if length: + env['CONTENT_LENGTH'] = length + referer = self.headers.get('referer') + if referer: + env['HTTP_REFERER'] = referer + accept = self.headers.get_all('accept', ()) + env['HTTP_ACCEPT'] = ','.join(accept) + ua = self.headers.get('user-agent') + if ua: + env['HTTP_USER_AGENT'] = ua + co = filter(None, self.headers.get_all('cookie', [])) + cookie_str = ', '.join(co) + if cookie_str: + env['HTTP_COOKIE'] = cookie_str + # XXX Other HTTP_* headers + # Since we're setting the env in the parent, provide empty + # values to override previously set values + for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH', + 'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'): + env.setdefault(k, "") + + self.send_response(HTTPStatus.OK, "Script output follows") + self.flush_headers() + + decoded_query = query.replace('+', ' ') + + if self.have_fork: + # Unix -- fork as we should + args = [script] + if '=' not in decoded_query: + args.append(decoded_query) + nobody = nobody_uid() + self.wfile.flush() # Always flush before forking + pid = os.fork() + if pid != 0: + # Parent + pid, sts = os.waitpid(pid, 0) + # throw away additional data [see bug #427345] + while select.select([self.rfile], [], [], 0)[0]: + if not self.rfile.read(1): + break + exitcode = os.waitstatus_to_exitcode(sts) + if exitcode: + self.log_error(f"CGI script exit code {exitcode}") + return + # Child + try: + try: + os.setuid(nobody) + except OSError: + pass + os.dup2(self.rfile.fileno(), 0) + os.dup2(self.wfile.fileno(), 1) + os.execve(scriptfile, args, env) + except: + self.server.handle_error(self.request, self.client_address) + os._exit(127) + + else: + # Non-Unix -- use subprocess + import subprocess + cmdline = [scriptfile] + if self.is_python(scriptfile): + interp = sys.executable + if interp.lower().endswith("w.exe"): + # On Windows, use python.exe, not pythonw.exe + interp = interp[:-5] + interp[-4:] + cmdline = [interp, '-u'] + cmdline + if '=' not in query: + cmdline.append(query) + self.log_message("command: %s", subprocess.list2cmdline(cmdline)) + try: + nbytes = int(length) + except (TypeError, ValueError): + nbytes = 0 + p = subprocess.Popen(cmdline, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env = env + ) + if self.command.lower() == "post" and nbytes > 0: + cursize = 0 + data = self.rfile.read(min(nbytes, _MIN_READ_BUF_SIZE)) + while len(data) < nbytes and len(data) != cursize: + cursize = len(data) + # This is a geometric increase in read size (never more + # than doubling out the current length of data per loop + # iteration). + delta = min(cursize, nbytes - cursize) + try: + data += self.rfile.read(delta) + except TimeoutError: + break + else: + data = None + # throw away additional data [see bug #427345] + while select.select([self.rfile._sock], [], [], 0)[0]: + if not self.rfile._sock.recv(1): + break + stdout, stderr = p.communicate(data) + self.wfile.write(stdout) + if stderr: + self.log_error('%s', stderr) + p.stderr.close() + p.stdout.close() + status = p.returncode + if status: + self.log_error("CGI script exit status %#x", status) + else: + self.log_message("CGI script exited OK") + + +def _get_best_family(*address): + infos = socket.getaddrinfo( + *address, + type=socket.SOCK_STREAM, + flags=socket.AI_PASSIVE, + ) + family, type, proto, canonname, sockaddr = next(iter(infos)) + return family, sockaddr + + +def test(HandlerClass=BaseHTTPRequestHandler, + ServerClass=ThreadingHTTPServer, + protocol="HTTP/1.0", port=8000, bind=None, + tls_cert=None, tls_key=None, tls_password=None): + """Test the HTTP request handler class. + + This runs an HTTP server on port 8000 (or the port argument). + + """ + ServerClass.address_family, addr = _get_best_family(bind, port) + HandlerClass.protocol_version = protocol + + if tls_cert: + server = ServerClass(addr, HandlerClass, certfile=tls_cert, + keyfile=tls_key, password=tls_password) + else: + server = ServerClass(addr, HandlerClass) + + with server as httpd: + host, port = httpd.socket.getsockname()[:2] + url_host = f'[{host}]' if ':' in host else host + protocol = 'HTTPS' if tls_cert else 'HTTP' + print( + f"Serving {protocol} on {host} port {port} " + f"({protocol.lower()}://{url_host}:{port}/) ..." + ) + try: + httpd.serve_forever() + except KeyboardInterrupt: + print("\nKeyboard interrupt received, exiting.") + sys.exit(0) + +if __name__ == '__main__': + import argparse + import contextlib + + parser = argparse.ArgumentParser(color=True) + parser.add_argument('--cgi', action='store_true', + help='run as CGI server') + parser.add_argument('-b', '--bind', metavar='ADDRESS', + help='bind to this address ' + '(default: all interfaces)') + parser.add_argument('-d', '--directory', default=os.getcwd(), + help='serve this directory ' + '(default: current directory)') + parser.add_argument('-p', '--protocol', metavar='VERSION', + default='HTTP/1.0', + help='conform to this HTTP version ' + '(default: %(default)s)') + parser.add_argument('--tls-cert', metavar='PATH', + help='path to the TLS certificate chain file') + parser.add_argument('--tls-key', metavar='PATH', + help='path to the TLS key file') + parser.add_argument('--tls-password-file', metavar='PATH', + help='path to the password file for the TLS key') + parser.add_argument('port', default=8000, type=int, nargs='?', + help='bind to this port ' + '(default: %(default)s)') + args = parser.parse_args() + + if not args.tls_cert and args.tls_key: + parser.error("--tls-key requires --tls-cert to be set") + + tls_key_password = None + if args.tls_password_file: + if not args.tls_cert: + parser.error("--tls-password-file requires --tls-cert to be set") + + try: + with open(args.tls_password_file, "r", encoding="utf-8") as f: + tls_key_password = f.read().strip() + except OSError as e: + parser.error(f"Failed to read TLS password file: {e}") + + if args.cgi: + handler_class = CGIHTTPRequestHandler + else: + handler_class = SimpleHTTPRequestHandler + + # ensure dual-stack is not disabled; ref #38907 + class DualStackServerMixin: + + def server_bind(self): + # suppress exception when protocol is IPv4 + with contextlib.suppress(Exception): + self.socket.setsockopt( + socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) + return super().server_bind() + + def finish_request(self, request, client_address): + self.RequestHandlerClass(request, client_address, self, + directory=args.directory) + + class HTTPDualStackServer(DualStackServerMixin, ThreadingHTTPServer): + pass + class HTTPSDualStackServer(DualStackServerMixin, ThreadingHTTPSServer): + pass + + ServerClass = HTTPSDualStackServer if args.tls_cert else HTTPDualStackServer + + test( + HandlerClass=handler_class, + ServerClass=ServerClass, + port=args.port, + bind=args.bind, + protocol=args.protocol, + tls_cert=args.tls_cert, + tls_key=args.tls_key, + tls_password=tls_key_password, + ) diff --git a/Python314_4_x86_Template/Lib/imaplib.py b/Python314_4_x86_Template/Lib/imaplib.py new file mode 100644 index 00000000..cbe129b3 --- /dev/null +++ b/Python314_4_x86_Template/Lib/imaplib.py @@ -0,0 +1,1967 @@ +"""IMAP4 client. + +Based on RFC 2060. + +Public class: IMAP4 +Public variable: Debug +Public functions: Internaldate2tuple + Int2AP + ParseFlags + Time2Internaldate +""" + +# Author: Piers Lauder December 1997. +# +# Authentication code contributed by Donn Cave June 1998. +# String method conversion by ESR, February 2001. +# GET/SETACL contributed by Anthony Baxter April 2001. +# IMAP4_SSL contributed by Tino Lange March 2002. +# GET/SETQUOTA contributed by Andreas Zeidler June 2002. +# PROXYAUTH contributed by Rick Holbert November 2002. +# GET/SETANNOTATION contributed by Tomas Lindroos June 2005. +# IDLE contributed by Forest August 2024. + +__version__ = "2.60" + +import binascii, errno, random, re, socket, subprocess, sys, time, calendar +from datetime import datetime, timezone, timedelta +from io import DEFAULT_BUFFER_SIZE + +try: + import ssl + HAVE_SSL = True +except ImportError: + HAVE_SSL = False + +__all__ = ["IMAP4", "IMAP4_stream", "Internaldate2tuple", + "Int2AP", "ParseFlags", "Time2Internaldate"] + +# Globals + +CRLF = b'\r\n' +Debug = 0 +IMAP4_PORT = 143 +IMAP4_SSL_PORT = 993 +AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first + +# Maximal line length when calling readline(). This is to prevent +# reading arbitrary length lines. RFC 3501 and 2060 (IMAP 4rev1) +# don't specify a line length. RFC 2683 suggests limiting client +# command lines to 1000 octets and that servers should be prepared +# to accept command lines up to 8000 octets, so we used to use 10K here. +# In the modern world (eg: gmail) the response to, for example, a +# search command can be quite large, so we now use 1M. +_MAXLINE = 1000000 + + +# Commands + +Commands = { + # name valid states + 'APPEND': ('AUTH', 'SELECTED'), + 'AUTHENTICATE': ('NONAUTH',), + 'CAPABILITY': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), + 'CHECK': ('SELECTED',), + 'CLOSE': ('SELECTED',), + 'COPY': ('SELECTED',), + 'CREATE': ('AUTH', 'SELECTED'), + 'DELETE': ('AUTH', 'SELECTED'), + 'DELETEACL': ('AUTH', 'SELECTED'), + 'ENABLE': ('AUTH', ), + 'EXAMINE': ('AUTH', 'SELECTED'), + 'EXPUNGE': ('SELECTED',), + 'FETCH': ('SELECTED',), + 'GETACL': ('AUTH', 'SELECTED'), + 'GETANNOTATION':('AUTH', 'SELECTED'), + 'GETQUOTA': ('AUTH', 'SELECTED'), + 'GETQUOTAROOT': ('AUTH', 'SELECTED'), + 'IDLE': ('AUTH', 'SELECTED'), + 'MYRIGHTS': ('AUTH', 'SELECTED'), + 'LIST': ('AUTH', 'SELECTED'), + 'LOGIN': ('NONAUTH',), + 'LOGOUT': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), + 'LSUB': ('AUTH', 'SELECTED'), + 'MOVE': ('SELECTED',), + 'NAMESPACE': ('AUTH', 'SELECTED'), + 'NOOP': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), + 'PARTIAL': ('SELECTED',), # NB: obsolete + 'PROXYAUTH': ('AUTH',), + 'RENAME': ('AUTH', 'SELECTED'), + 'SEARCH': ('SELECTED',), + 'SELECT': ('AUTH', 'SELECTED'), + 'SETACL': ('AUTH', 'SELECTED'), + 'SETANNOTATION':('AUTH', 'SELECTED'), + 'SETQUOTA': ('AUTH', 'SELECTED'), + 'SORT': ('SELECTED',), + 'STARTTLS': ('NONAUTH',), + 'STATUS': ('AUTH', 'SELECTED'), + 'STORE': ('SELECTED',), + 'SUBSCRIBE': ('AUTH', 'SELECTED'), + 'THREAD': ('SELECTED',), + 'UID': ('SELECTED',), + 'UNSUBSCRIBE': ('AUTH', 'SELECTED'), + 'UNSELECT': ('SELECTED',), + } + +# Patterns to match server responses + +Continuation = re.compile(br'\+( (?P.*))?') +Flags = re.compile(br'.*FLAGS \((?P[^\)]*)\)') +InternalDate = re.compile(br'.*INTERNALDATE "' + br'(?P[ 0123][0-9])-(?P[A-Z][a-z][a-z])-(?P[0-9][0-9][0-9][0-9])' + br' (?P[0-9][0-9]):(?P[0-9][0-9]):(?P[0-9][0-9])' + br' (?P[-+])(?P[0-9][0-9])(?P[0-9][0-9])' + br'"') +# Literal is no longer used; kept for backward compatibility. +Literal = re.compile(br'.*{(?P\d+)}$', re.ASCII) +MapCRLF = re.compile(br'\r\n|\r|\n') +# We no longer exclude the ']' character from the data portion of the response +# code, even though it violates the RFC. Popular IMAP servers such as Gmail +# allow flags with ']', and there are programs (including imaplib!) that can +# produce them. The problem with this is if the 'text' portion of the response +# includes a ']' we'll parse the response wrong (which is the point of the RFC +# restriction). However, that seems less likely to be a problem in practice +# than being unable to correctly parse flags that include ']' chars, which +# was reported as a real-world problem in issue #21815. +Response_code = re.compile(br'\[(?P[A-Z-]+)( (?P.*))?\]') +Untagged_response = re.compile(br'\* (?P[A-Z-]+)( (?P.*))?') +# Untagged_status is no longer used; kept for backward compatibility +Untagged_status = re.compile( + br'\* (?P\d+) (?P[A-Z-]+)( (?P.*))?', re.ASCII) +# We compile these in _mode_xxx. +_Literal = br'.*{(?P\d+)}$' +_Untagged_status = br'\* (?P\d+) (?P[A-Z-]+)( (?P.*))?' + + + +class IMAP4: + + r"""IMAP4 client class. + + Instantiate with: IMAP4([host[, port[, timeout=None]]]) + + host - host's name (default: localhost); + port - port number (default: standard IMAP4 port). + timeout - socket timeout (default: None) + If timeout is not given or is None, + the global default socket timeout is used + + All IMAP4rev1 commands are supported by methods of the same + name (in lowercase). + + All arguments to commands are converted to strings, except for + AUTHENTICATE, and the last argument to APPEND which is passed as + an IMAP4 literal. If necessary (the string contains any + non-printing characters or white-space and isn't enclosed with + either parentheses or double quotes) each string is quoted. + However, the 'password' argument to the LOGIN command is always + quoted. If you want to avoid having an argument string quoted + (eg: the 'flags' argument to STORE) then enclose the string in + parentheses (eg: "(\Deleted)"). + + Each command returns a tuple: (type, [data, ...]) where 'type' + is usually 'OK' or 'NO', and 'data' is either the text from the + tagged response, or untagged results from command. Each 'data' + is either a string, or a tuple. If a tuple, then the first part + is the header of the response, and the second part contains + the data (ie: 'literal' value). + + Errors raise the exception class .error(""). + IMAP4 server errors raise .abort(""), + which is a sub-class of 'error'. Mailbox status changes + from READ-WRITE to READ-ONLY raise the exception class + .readonly(""), which is a sub-class of 'abort'. + + "error" exceptions imply a program error. + "abort" exceptions imply the connection should be reset, and + the command re-tried. + "readonly" exceptions imply the command should be re-tried. + + Note: to use this module, you must read the RFCs pertaining to the + IMAP4 protocol, as the semantics of the arguments to each IMAP4 + command are left to the invoker, not to mention the results. Also, + most IMAP servers implement a sub-set of the commands available here. + """ + + class error(Exception): pass # Logical errors - debug required + class abort(error): pass # Service errors - close and retry + class readonly(abort): pass # Mailbox status changed to READ-ONLY + class _responsetimeout(TimeoutError): pass # No response during IDLE + + def __init__(self, host='', port=IMAP4_PORT, timeout=None): + self.debug = Debug + self.state = 'LOGOUT' + self.literal = None # A literal argument to a command + self.tagged_commands = {} # Tagged commands awaiting response + self.untagged_responses = {} # {typ: [data, ...], ...} + self.continuation_response = '' # Last continuation response + self._idle_responses = [] # Response queue for idle iteration + self._idle_capture = False # Whether to queue responses for idle + self.is_readonly = False # READ-ONLY desired state + self.tagnum = 0 + self._tls_established = False + self._mode_ascii() + self._readbuf = [] + + # Open socket to server. + + self.open(host, port, timeout) + + try: + self._connect() + except Exception: + try: + self.shutdown() + except OSError: + pass + raise + + def _mode_ascii(self): + self.utf8_enabled = False + self._encoding = 'ascii' + self.Literal = re.compile(_Literal, re.ASCII) + self.Untagged_status = re.compile(_Untagged_status, re.ASCII) + + + def _mode_utf8(self): + self.utf8_enabled = True + self._encoding = 'utf-8' + self.Literal = re.compile(_Literal) + self.Untagged_status = re.compile(_Untagged_status) + + + def _connect(self): + # Create unique tag for this session, + # and compile tagged response matcher. + + self.tagpre = Int2AP(random.randint(4096, 65535)) + self.tagre = re.compile(br'(?P' + + self.tagpre + + br'\d+) (?P[A-Z]+) (?P.*)', re.ASCII) + + # Get server welcome message, + # request and store CAPABILITY response. + + if __debug__: + self._cmd_log_len = 10 + self._cmd_log_idx = 0 + self._cmd_log = {} # Last '_cmd_log_len' interactions + if self.debug >= 1: + self._mesg('imaplib version %s' % __version__) + self._mesg('new IMAP4 connection, tag=%s' % self.tagpre) + + self.welcome = self._get_response() + if 'PREAUTH' in self.untagged_responses: + self.state = 'AUTH' + elif 'OK' in self.untagged_responses: + self.state = 'NONAUTH' + else: + raise self.error(self.welcome) + + self._get_capabilities() + if __debug__: + if self.debug >= 3: + self._mesg('CAPABILITIES: %r' % (self.capabilities,)) + + for version in AllowedVersions: + if not version in self.capabilities: + continue + self.PROTOCOL_VERSION = version + return + + raise self.error('server not IMAP4 compliant') + + + def __getattr__(self, attr): + # Allow UPPERCASE variants of IMAP4 command methods. + if attr in Commands: + return getattr(self, attr.lower()) + raise AttributeError("Unknown IMAP4 command: '%s'" % attr) + + def __enter__(self): + return self + + def __exit__(self, *args): + if self.state == "LOGOUT": + return + + try: + self.logout() + except OSError: + pass + + + # Overridable methods + + + def _create_socket(self, timeout): + # Default value of IMAP4.host is '', but socket.getaddrinfo() + # (which is used by socket.create_connection()) expects None + # as a default value for host. + if timeout is not None and not timeout: + raise ValueError('Non-blocking socket (timeout=0) is not supported') + host = None if not self.host else self.host + sys.audit("imaplib.open", self, self.host, self.port) + address = (host, self.port) + if timeout is not None: + return socket.create_connection(address, timeout) + return socket.create_connection(address) + + def open(self, host='', port=IMAP4_PORT, timeout=None): + """Setup connection to remote server on "host:port" + (default: localhost:standard IMAP4 port). + This connection will be used by the routines: + read, readline, send, shutdown. + """ + self.host = host + self.port = port + self.sock = self._create_socket(timeout) + self._file = self.sock.makefile('rb') + + + @property + def file(self): + # The old 'file' attribute is no longer used now that we do our own + # read() and readline() buffering, with which it conflicts. + # As an undocumented interface, it should never have been accessed by + # external code, and therefore does not warrant deprecation. + # Nevertheless, we provide this property for now, to avoid suddenly + # breaking any code in the wild that might have been using it in a + # harmless way. + import warnings + warnings.warn( + 'IMAP4.file is unsupported, can cause errors, and may be removed.', + RuntimeWarning, + stacklevel=2) + return self._file + + + def read(self, size): + """Read 'size' bytes from remote.""" + # We need buffered read() to continue working after socket timeouts, + # since we use them during IDLE. Unfortunately, the standard library's + # SocketIO implementation makes this impossible, by setting a permanent + # error condition instead of letting the caller decide how to handle a + # timeout. We therefore implement our own buffered read(). + # https://github.com/python/cpython/issues/51571 + # + # Reading in chunks instead of delegating to a single + # BufferedReader.read() call also means we avoid its preallocation + # of an unreasonably large memory block if a malicious server claims + # it will send a huge literal without actually sending one. + # https://github.com/python/cpython/issues/119511 + + parts = [] + + while size > 0: + + if len(parts) < len(self._readbuf): + buf = self._readbuf[len(parts)] + else: + try: + buf = self.sock.recv(DEFAULT_BUFFER_SIZE) + except ConnectionError: + break + if not buf: + break + self._readbuf.append(buf) + + if len(buf) >= size: + parts.append(buf[:size]) + self._readbuf = [buf[size:]] + self._readbuf[len(parts):] + break + parts.append(buf) + size -= len(buf) + + return b''.join(parts) + + + def readline(self): + """Read line from remote.""" + # The comment in read() explains why we implement our own readline(). + + LF = b'\n' + parts = [] + length = 0 + + while length < _MAXLINE: + + if len(parts) < len(self._readbuf): + buf = self._readbuf[len(parts)] + else: + try: + buf = self.sock.recv(DEFAULT_BUFFER_SIZE) + except ConnectionError: + break + if not buf: + break + self._readbuf.append(buf) + + pos = buf.find(LF) + if pos != -1: + pos += 1 + parts.append(buf[:pos]) + self._readbuf = [buf[pos:]] + self._readbuf[len(parts):] + break + parts.append(buf) + length += len(buf) + + line = b''.join(parts) + if len(line) > _MAXLINE: + raise self.error("got more than %d bytes" % _MAXLINE) + return line + + + def send(self, data): + """Send data to remote.""" + sys.audit("imaplib.send", self, data) + self.sock.sendall(data) + + + def shutdown(self): + """Close I/O established in "open".""" + self._file.close() + try: + self.sock.shutdown(socket.SHUT_RDWR) + except OSError as exc: + # The server might already have closed the connection. + # On Windows, this may result in WSAEINVAL (error 10022): + # An invalid operation was attempted. + if (exc.errno != errno.ENOTCONN + and getattr(exc, 'winerror', 0) != 10022): + raise + finally: + self.sock.close() + + + def socket(self): + """Return socket instance used to connect to IMAP4 server. + + socket = .socket() + """ + return self.sock + + + + # Utility methods + + + def recent(self): + """Return most recent 'RECENT' responses if any exist, + else prompt server for an update using the 'NOOP' command. + + (typ, [data]) = .recent() + + 'data' is None if no new messages, + else list of RECENT responses, most recent last. + """ + name = 'RECENT' + typ, dat = self._untagged_response('OK', [None], name) + if dat[-1]: + return typ, dat + typ, dat = self.noop() # Prod server for response + return self._untagged_response(typ, dat, name) + + + def response(self, code): + """Return data for response 'code' if received, or None. + + Old value for response 'code' is cleared. + + (code, [data]) = .response(code) + """ + return self._untagged_response(code, [None], code.upper()) + + + + # IMAP4 commands + + + def append(self, mailbox, flags, date_time, message): + """Append message to named mailbox. + + (typ, [data]) = .append(mailbox, flags, date_time, message) + + All args except 'message' can be None. + """ + name = 'APPEND' + if not mailbox: + mailbox = 'INBOX' + if flags: + if (flags[0],flags[-1]) != ('(',')'): + flags = '(%s)' % flags + else: + flags = None + if date_time: + date_time = Time2Internaldate(date_time) + else: + date_time = None + literal = MapCRLF.sub(CRLF, message) + self.literal = literal + return self._simple_command(name, mailbox, flags, date_time) + + + def authenticate(self, mechanism, authobject): + """Authenticate command - requires response processing. + + 'mechanism' specifies which authentication mechanism is to + be used - it must appear in .capabilities in the + form AUTH=. + + 'authobject' must be a callable object: + + data = authobject(response) + + It will be called to process server continuation responses; the + response argument it is passed will be a bytes. It should return bytes + data that will be base64 encoded and sent to the server. It should + return None if the client abort response '*' should be sent instead. + """ + mech = mechanism.upper() + # XXX: shouldn't this code be removed, not commented out? + #cap = 'AUTH=%s' % mech + #if not cap in self.capabilities: # Let the server decide! + # raise self.error("Server doesn't allow %s authentication." % mech) + self.literal = _Authenticator(authobject).process + typ, dat = self._simple_command('AUTHENTICATE', mech) + if typ != 'OK': + raise self.error(dat[-1].decode('utf-8', 'replace')) + self.state = 'AUTH' + return typ, dat + + + def capability(self): + """(typ, [data]) = .capability() + Fetch capabilities list from server.""" + + name = 'CAPABILITY' + typ, dat = self._simple_command(name) + return self._untagged_response(typ, dat, name) + + + def check(self): + """Checkpoint mailbox on server. + + (typ, [data]) = .check() + """ + return self._simple_command('CHECK') + + + def close(self): + """Close currently selected mailbox. + + Deleted messages are removed from writable mailbox. + This is the recommended command before 'LOGOUT'. + + (typ, [data]) = .close() + """ + try: + typ, dat = self._simple_command('CLOSE') + finally: + self.state = 'AUTH' + return typ, dat + + + def copy(self, message_set, new_mailbox): + """Copy 'message_set' messages onto end of 'new_mailbox'. + + (typ, [data]) = .copy(message_set, new_mailbox) + """ + return self._simple_command('COPY', message_set, new_mailbox) + + + def create(self, mailbox): + """Create new mailbox. + + (typ, [data]) = .create(mailbox) + """ + return self._simple_command('CREATE', mailbox) + + + def delete(self, mailbox): + """Delete old mailbox. + + (typ, [data]) = .delete(mailbox) + """ + return self._simple_command('DELETE', mailbox) + + def deleteacl(self, mailbox, who): + """Delete the ACLs (remove any rights) set for who on mailbox. + + (typ, [data]) = .deleteacl(mailbox, who) + """ + return self._simple_command('DELETEACL', mailbox, who) + + def enable(self, capability): + """Send an RFC5161 enable string to the server. + + (typ, [data]) = .enable(capability) + """ + if 'ENABLE' not in self.capabilities: + raise IMAP4.error("Server does not support ENABLE") + typ, data = self._simple_command('ENABLE', capability) + if typ == 'OK' and 'UTF8=ACCEPT' in capability.upper(): + self._mode_utf8() + return typ, data + + def expunge(self): + """Permanently remove deleted items from selected mailbox. + + Generates 'EXPUNGE' response for each deleted message. + + (typ, [data]) = .expunge() + + 'data' is list of 'EXPUNGE'd message numbers in order received. + """ + name = 'EXPUNGE' + typ, dat = self._simple_command(name) + return self._untagged_response(typ, dat, name) + + + def fetch(self, message_set, message_parts): + """Fetch (parts of) messages. + + (typ, [data, ...]) = .fetch(message_set, message_parts) + + 'message_parts' should be a string of selected parts + enclosed in parentheses, eg: "(UID BODY[TEXT])". + + 'data' are tuples of message part envelope and data. + """ + name = 'FETCH' + typ, dat = self._simple_command(name, message_set, message_parts) + return self._untagged_response(typ, dat, name) + + + def getacl(self, mailbox): + """Get the ACLs for a mailbox. + + (typ, [data]) = .getacl(mailbox) + """ + typ, dat = self._simple_command('GETACL', mailbox) + return self._untagged_response(typ, dat, 'ACL') + + + def getannotation(self, mailbox, entry, attribute): + """(typ, [data]) = .getannotation(mailbox, entry, attribute) + Retrieve ANNOTATIONs.""" + + typ, dat = self._simple_command('GETANNOTATION', mailbox, entry, attribute) + return self._untagged_response(typ, dat, 'ANNOTATION') + + + def getquota(self, root): + """Get the quota root's resource usage and limits. + + Part of the IMAP4 QUOTA extension defined in rfc2087. + + (typ, [data]) = .getquota(root) + """ + typ, dat = self._simple_command('GETQUOTA', root) + return self._untagged_response(typ, dat, 'QUOTA') + + + def getquotaroot(self, mailbox): + """Get the list of quota roots for the named mailbox. + + (typ, [[QUOTAROOT responses...], [QUOTA responses]]) = .getquotaroot(mailbox) + """ + typ, dat = self._simple_command('GETQUOTAROOT', mailbox) + typ, quota = self._untagged_response(typ, dat, 'QUOTA') + typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT') + return typ, [quotaroot, quota] + + + def idle(self, duration=None): + """Return an iterable IDLE context manager producing untagged responses. + If the argument is not None, limit iteration to 'duration' seconds. + + with M.idle(duration=29 * 60) as idler: + for typ, data in idler: + print(typ, data) + + Note: 'duration' requires a socket connection (not IMAP4_stream). + """ + return Idler(self, duration) + + + def list(self, directory='""', pattern='*'): + """List mailbox names in directory matching pattern. + + (typ, [data]) = .list(directory='""', pattern='*') + + 'data' is list of LIST responses. + """ + name = 'LIST' + typ, dat = self._simple_command(name, directory, pattern) + return self._untagged_response(typ, dat, name) + + + def login(self, user, password): + """Identify client using plaintext password. + + (typ, [data]) = .login(user, password) + + NB: 'password' will be quoted. + """ + typ, dat = self._simple_command('LOGIN', user, self._quote(password)) + if typ != 'OK': + raise self.error(dat[-1]) + self.state = 'AUTH' + return typ, dat + + + def login_cram_md5(self, user, password): + """ Force use of CRAM-MD5 authentication. + + (typ, [data]) = .login_cram_md5(user, password) + """ + self.user, self.password = user, password + return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH) + + + def _CRAM_MD5_AUTH(self, challenge): + """ Authobject to use with CRAM-MD5 authentication. """ + import hmac + + if isinstance(self.password, str): + password = self.password.encode('utf-8') + else: + password = self.password + + try: + authcode = hmac.HMAC(password, challenge, 'md5') + except ValueError: # HMAC-MD5 is not available + raise self.error("CRAM-MD5 authentication is not supported") + return f"{self.user} {authcode.hexdigest()}" + + + def logout(self): + """Shutdown connection to server. + + (typ, [data]) = .logout() + + Returns server 'BYE' response. + """ + self.state = 'LOGOUT' + typ, dat = self._simple_command('LOGOUT') + self.shutdown() + return typ, dat + + + def lsub(self, directory='""', pattern='*'): + """List 'subscribed' mailbox names in directory matching pattern. + + (typ, [data, ...]) = .lsub(directory='""', pattern='*') + + 'data' are tuples of message part envelope and data. + """ + name = 'LSUB' + typ, dat = self._simple_command(name, directory, pattern) + return self._untagged_response(typ, dat, name) + + def myrights(self, mailbox): + """Show my ACLs for a mailbox (i.e. the rights that I have on mailbox). + + (typ, [data]) = .myrights(mailbox) + """ + typ,dat = self._simple_command('MYRIGHTS', mailbox) + return self._untagged_response(typ, dat, 'MYRIGHTS') + + def namespace(self): + """ Returns IMAP namespaces ala rfc2342 + + (typ, [data, ...]) = .namespace() + """ + name = 'NAMESPACE' + typ, dat = self._simple_command(name) + return self._untagged_response(typ, dat, name) + + + def noop(self): + """Send NOOP command. + + (typ, [data]) = .noop() + """ + if __debug__: + if self.debug >= 3: + self._dump_ur(self.untagged_responses) + return self._simple_command('NOOP') + + + def partial(self, message_num, message_part, start, length): + """Fetch truncated part of a message. + + (typ, [data, ...]) = .partial(message_num, message_part, start, length) + + 'data' is tuple of message part envelope and data. + """ + name = 'PARTIAL' + typ, dat = self._simple_command(name, message_num, message_part, start, length) + return self._untagged_response(typ, dat, 'FETCH') + + + def proxyauth(self, user): + """Assume authentication as "user". + + Allows an authorised administrator to proxy into any user's + mailbox. + + (typ, [data]) = .proxyauth(user) + """ + + name = 'PROXYAUTH' + return self._simple_command('PROXYAUTH', user) + + + def rename(self, oldmailbox, newmailbox): + """Rename old mailbox name to new. + + (typ, [data]) = .rename(oldmailbox, newmailbox) + """ + return self._simple_command('RENAME', oldmailbox, newmailbox) + + + def search(self, charset, *criteria): + """Search mailbox for matching messages. + + (typ, [data]) = .search(charset, criterion, ...) + + 'data' is space separated list of matching message numbers. + If UTF8 is enabled, charset MUST be None. + """ + name = 'SEARCH' + if charset: + if self.utf8_enabled: + raise IMAP4.error("Non-None charset not valid in UTF8 mode") + typ, dat = self._simple_command(name, 'CHARSET', charset, *criteria) + else: + typ, dat = self._simple_command(name, *criteria) + return self._untagged_response(typ, dat, name) + + + def select(self, mailbox='INBOX', readonly=False): + """Select a mailbox. + + Flush all untagged responses. + + (typ, [data]) = .select(mailbox='INBOX', readonly=False) + + 'data' is count of messages in mailbox ('EXISTS' response). + + Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so + other responses should be obtained via .response('FLAGS') etc. + """ + self.untagged_responses = {} # Flush old responses. + self.is_readonly = readonly + if readonly: + name = 'EXAMINE' + else: + name = 'SELECT' + typ, dat = self._simple_command(name, mailbox) + if typ != 'OK': + self.state = 'AUTH' # Might have been 'SELECTED' + return typ, dat + self.state = 'SELECTED' + if 'READ-ONLY' in self.untagged_responses \ + and not readonly: + if __debug__: + if self.debug >= 1: + self._dump_ur(self.untagged_responses) + raise self.readonly('%s is not writable' % mailbox) + return typ, self.untagged_responses.get('EXISTS', [None]) + + + def setacl(self, mailbox, who, what): + """Set a mailbox acl. + + (typ, [data]) = .setacl(mailbox, who, what) + """ + return self._simple_command('SETACL', mailbox, who, what) + + + def setannotation(self, *args): + """(typ, [data]) = .setannotation(mailbox[, entry, attribute]+) + Set ANNOTATIONs.""" + + typ, dat = self._simple_command('SETANNOTATION', *args) + return self._untagged_response(typ, dat, 'ANNOTATION') + + + def setquota(self, root, limits): + """Set the quota root's resource limits. + + (typ, [data]) = .setquota(root, limits) + """ + typ, dat = self._simple_command('SETQUOTA', root, limits) + return self._untagged_response(typ, dat, 'QUOTA') + + + def sort(self, sort_criteria, charset, *search_criteria): + """IMAP4rev1 extension SORT command. + + (typ, [data]) = .sort(sort_criteria, charset, search_criteria, ...) + """ + name = 'SORT' + #if not name in self.capabilities: # Let the server decide! + # raise self.error('unimplemented extension command: %s' % name) + if (sort_criteria[0],sort_criteria[-1]) != ('(',')'): + sort_criteria = '(%s)' % sort_criteria + typ, dat = self._simple_command(name, sort_criteria, charset, *search_criteria) + return self._untagged_response(typ, dat, name) + + + def starttls(self, ssl_context=None): + name = 'STARTTLS' + if not HAVE_SSL: + raise self.error('SSL support missing') + if self._tls_established: + raise self.abort('TLS session already established') + if name not in self.capabilities: + raise self.abort('TLS not supported by server') + # Generate a default SSL context if none was passed. + if ssl_context is None: + ssl_context = ssl._create_stdlib_context() + typ, dat = self._simple_command(name) + if typ == 'OK': + self.sock = ssl_context.wrap_socket(self.sock, + server_hostname=self.host) + self._file = self.sock.makefile('rb') + self._tls_established = True + self._get_capabilities() + else: + raise self.error("Couldn't establish TLS session") + return self._untagged_response(typ, dat, name) + + + def status(self, mailbox, names): + """Request named status conditions for mailbox. + + (typ, [data]) = .status(mailbox, names) + """ + name = 'STATUS' + #if self.PROTOCOL_VERSION == 'IMAP4': # Let the server decide! + # raise self.error('%s unimplemented in IMAP4 (obtain IMAP4rev1 server, or re-code)' % name) + typ, dat = self._simple_command(name, mailbox, names) + return self._untagged_response(typ, dat, name) + + + def store(self, message_set, command, flags): + """Alters flag dispositions for messages in mailbox. + + (typ, [data]) = .store(message_set, command, flags) + """ + if (flags[0],flags[-1]) != ('(',')'): + flags = '(%s)' % flags # Avoid quoting the flags + typ, dat = self._simple_command('STORE', message_set, command, flags) + return self._untagged_response(typ, dat, 'FETCH') + + + def subscribe(self, mailbox): + """Subscribe to new mailbox. + + (typ, [data]) = .subscribe(mailbox) + """ + return self._simple_command('SUBSCRIBE', mailbox) + + + def thread(self, threading_algorithm, charset, *search_criteria): + """IMAPrev1 extension THREAD command. + + (type, [data]) = .thread(threading_algorithm, charset, search_criteria, ...) + """ + name = 'THREAD' + typ, dat = self._simple_command(name, threading_algorithm, charset, *search_criteria) + return self._untagged_response(typ, dat, name) + + + def uid(self, command, *args): + """Execute "command arg ..." with messages identified by UID, + rather than message number. + + (typ, [data]) = .uid(command, arg1, arg2, ...) + + Returns response appropriate to 'command'. + """ + command = command.upper() + if not command in Commands: + raise self.error("Unknown IMAP4 UID command: %s" % command) + if self.state not in Commands[command]: + raise self.error("command %s illegal in state %s, " + "only allowed in states %s" % + (command, self.state, + ', '.join(Commands[command]))) + name = 'UID' + typ, dat = self._simple_command(name, command, *args) + if command in ('SEARCH', 'SORT', 'THREAD'): + name = command + else: + name = 'FETCH' + return self._untagged_response(typ, dat, name) + + + def unsubscribe(self, mailbox): + """Unsubscribe from old mailbox. + + (typ, [data]) = .unsubscribe(mailbox) + """ + return self._simple_command('UNSUBSCRIBE', mailbox) + + + def unselect(self): + """Free server's resources associated with the selected mailbox + and returns the server to the authenticated state. + This command performs the same actions as CLOSE, except + that no messages are permanently removed from the currently + selected mailbox. + + (typ, [data]) = .unselect() + """ + try: + typ, data = self._simple_command('UNSELECT') + finally: + self.state = 'AUTH' + return typ, data + + + def xatom(self, name, *args): + """Allow simple extension commands + notified by server in CAPABILITY response. + + Assumes command is legal in current state. + + (typ, [data]) = .xatom(name, arg, ...) + + Returns response appropriate to extension command 'name'. + """ + name = name.upper() + #if not name in self.capabilities: # Let the server decide! + # raise self.error('unknown extension command: %s' % name) + if not name in Commands: + Commands[name] = (self.state,) + return self._simple_command(name, *args) + + + + # Private methods + + + def _append_untagged(self, typ, dat): + if dat is None: + dat = b'' + + # During idle, queue untagged responses for delivery via iteration + if self._idle_capture: + # Responses containing literal strings are passed to us one data + # fragment at a time, while others arrive in a single call. + if (not self._idle_responses or + isinstance(self._idle_responses[-1][1][-1], bytes)): + # We are not continuing a fragmented response; start a new one + self._idle_responses.append((typ, [dat])) + else: + # We are continuing a fragmented response; append the fragment + response = self._idle_responses[-1] + assert response[0] == typ + response[1].append(dat) + if __debug__ and self.debug >= 5: + self._mesg(f'idle: queue untagged {typ} {dat!r}') + return + + ur = self.untagged_responses + if __debug__: + if self.debug >= 5: + self._mesg('untagged_responses[%s] %s += ["%r"]' % + (typ, len(ur.get(typ,'')), dat)) + if typ in ur: + ur[typ].append(dat) + else: + ur[typ] = [dat] + + + def _check_bye(self): + bye = self.untagged_responses.get('BYE') + if bye: + raise self.abort(bye[-1].decode(self._encoding, 'replace')) + + + def _command(self, name, *args): + + if self.state not in Commands[name]: + self.literal = None + raise self.error("command %s illegal in state %s, " + "only allowed in states %s" % + (name, self.state, + ', '.join(Commands[name]))) + + for typ in ('OK', 'NO', 'BAD'): + if typ in self.untagged_responses: + del self.untagged_responses[typ] + + if 'READ-ONLY' in self.untagged_responses \ + and not self.is_readonly: + raise self.readonly('mailbox status changed to READ-ONLY') + + tag = self._new_tag() + name = bytes(name, self._encoding) + data = tag + b' ' + name + for arg in args: + if arg is None: continue + if isinstance(arg, str): + arg = bytes(arg, self._encoding) + data = data + b' ' + arg + + literal = self.literal + if literal is not None: + self.literal = None + if type(literal) is type(self._command): + literator = literal + else: + literator = None + if self.utf8_enabled: + data = data + bytes(' UTF8 (~{%s}' % len(literal), self._encoding) + literal = literal + b')' + else: + data = data + bytes(' {%s}' % len(literal), self._encoding) + + if __debug__: + if self.debug >= 4: + self._mesg('> %r' % data) + else: + self._log('> %r' % data) + + try: + self.send(data + CRLF) + except OSError as val: + raise self.abort('socket error: %s' % val) + + if literal is None: + return tag + + while 1: + # Wait for continuation response + + while self._get_response(): + if self.tagged_commands[tag]: # BAD/NO? + return tag + + # Send literal + + if literator: + literal = literator(self.continuation_response) + + if __debug__: + if self.debug >= 4: + self._mesg('write literal size %s' % len(literal)) + + try: + self.send(literal) + self.send(CRLF) + except OSError as val: + raise self.abort('socket error: %s' % val) + + if not literator: + break + + return tag + + + def _command_complete(self, name, tag): + logout = (name == 'LOGOUT') + # BYE is expected after LOGOUT + if not logout: + self._check_bye() + try: + typ, data = self._get_tagged_response(tag, expect_bye=logout) + except self.abort as val: + raise self.abort('command: %s => %s' % (name, val)) + except self.error as val: + raise self.error('command: %s => %s' % (name, val)) + if not logout: + self._check_bye() + if typ == 'BAD': + raise self.error('%s command error: %s %s' % (name, typ, data)) + return typ, data + + + def _get_capabilities(self): + typ, dat = self.capability() + if dat == [None]: + raise self.error('no CAPABILITY response from server') + dat = str(dat[-1], self._encoding) + dat = dat.upper() + self.capabilities = tuple(dat.split()) + + + def _get_response(self, start_timeout=False): + + # Read response and store. + # + # Returns None for continuation responses, + # otherwise first response line received. + # + # If start_timeout is given, temporarily uses it as a socket + # timeout while waiting for the start of a response, raising + # _responsetimeout if one doesn't arrive. (Used by Idler.) + + if start_timeout is not False and self.sock: + assert start_timeout is None or start_timeout > 0 + saved_timeout = self.sock.gettimeout() + self.sock.settimeout(start_timeout) + try: + resp = self._get_line() + except TimeoutError as err: + raise self._responsetimeout from err + finally: + self.sock.settimeout(saved_timeout) + else: + resp = self._get_line() + + # Command completion response? + + if self._match(self.tagre, resp): + tag = self.mo.group('tag') + if not tag in self.tagged_commands: + raise self.abort('unexpected tagged response: %r' % resp) + + typ = self.mo.group('type') + typ = str(typ, self._encoding) + dat = self.mo.group('data') + self.tagged_commands[tag] = (typ, [dat]) + else: + dat2 = None + + # '*' (untagged) responses? + + if not self._match(Untagged_response, resp): + if self._match(self.Untagged_status, resp): + dat2 = self.mo.group('data2') + + if self.mo is None: + # Only other possibility is '+' (continuation) response... + + if self._match(Continuation, resp): + self.continuation_response = self.mo.group('data') + return None # NB: indicates continuation + + raise self.abort("unexpected response: %r" % resp) + + typ = self.mo.group('type') + typ = str(typ, self._encoding) + dat = self.mo.group('data') + if dat is None: dat = b'' # Null untagged response + if dat2: dat = dat + b' ' + dat2 + + # Is there a literal to come? + + while self._match(self.Literal, dat): + + # Read literal direct from connection. + + size = int(self.mo.group('size')) + if __debug__: + if self.debug >= 4: + self._mesg('read literal size %s' % size) + data = self.read(size) + + # Store response with literal as tuple + + self._append_untagged(typ, (dat, data)) + + # Read trailer - possibly containing another literal + + dat = self._get_line() + + self._append_untagged(typ, dat) + + # Bracketed response information? + + if typ in ('OK', 'NO', 'BAD') and self._match(Response_code, dat): + typ = self.mo.group('type') + typ = str(typ, self._encoding) + self._append_untagged(typ, self.mo.group('data')) + + if __debug__: + if self.debug >= 1 and typ in ('NO', 'BAD', 'BYE'): + self._mesg('%s response: %r' % (typ, dat)) + + return resp + + + def _get_tagged_response(self, tag, expect_bye=False): + + while 1: + result = self.tagged_commands[tag] + if result is not None: + del self.tagged_commands[tag] + return result + + if expect_bye: + typ = 'BYE' + bye = self.untagged_responses.pop(typ, None) + if bye is not None: + # Server replies to the "LOGOUT" command with "BYE" + return (typ, bye) + + # If we've seen a BYE at this point, the socket will be + # closed, so report the BYE now. + self._check_bye() + + # Some have reported "unexpected response" exceptions. + # Note that ignoring them here causes loops. + # Instead, send me details of the unexpected response and + # I'll update the code in '_get_response()'. + + try: + self._get_response() + except self.abort as val: + if __debug__: + if self.debug >= 1: + self.print_log() + raise + + + def _get_line(self): + + line = self.readline() + if not line: + raise self.abort('socket error: EOF') + + # Protocol mandates all lines terminated by CRLF + if not line.endswith(b'\r\n'): + raise self.abort('socket error: unterminated line: %r' % line) + + line = line[:-2] + if __debug__: + if self.debug >= 4: + self._mesg('< %r' % line) + else: + self._log('< %r' % line) + return line + + + def _match(self, cre, s): + + # Run compiled regular expression match method on 's'. + # Save result, return success. + + self.mo = cre.match(s) + if __debug__: + if self.mo is not None and self.debug >= 5: + self._mesg("\tmatched %r => %r" % (cre.pattern, self.mo.groups())) + return self.mo is not None + + + def _new_tag(self): + + tag = self.tagpre + bytes(str(self.tagnum), self._encoding) + self.tagnum = self.tagnum + 1 + self.tagged_commands[tag] = None + return tag + + + def _quote(self, arg): + + arg = arg.replace('\\', '\\\\') + arg = arg.replace('"', '\\"') + + return '"' + arg + '"' + + + def _simple_command(self, name, *args): + + return self._command_complete(name, self._command(name, *args)) + + + def _untagged_response(self, typ, dat, name): + if typ == 'NO': + return typ, dat + if not name in self.untagged_responses: + return typ, [None] + data = self.untagged_responses.pop(name) + if __debug__: + if self.debug >= 5: + self._mesg('untagged_responses[%s] => %s' % (name, data)) + return typ, data + + + if __debug__: + + def _mesg(self, s, secs=None): + if secs is None: + secs = time.time() + tm = time.strftime('%M:%S', time.localtime(secs)) + sys.stderr.write(' %s.%02d %s\n' % (tm, (secs*100)%100, s)) + sys.stderr.flush() + + def _dump_ur(self, untagged_resp_dict): + if not untagged_resp_dict: + return + items = (f'{key}: {value!r}' + for key, value in untagged_resp_dict.items()) + self._mesg('untagged responses dump:' + '\n\t\t'.join(items)) + + def _log(self, line): + # Keep log of last '_cmd_log_len' interactions for debugging. + self._cmd_log[self._cmd_log_idx] = (line, time.time()) + self._cmd_log_idx += 1 + if self._cmd_log_idx >= self._cmd_log_len: + self._cmd_log_idx = 0 + + def print_log(self): + self._mesg('last %d IMAP4 interactions:' % len(self._cmd_log)) + i, n = self._cmd_log_idx, self._cmd_log_len + while n: + try: + self._mesg(*self._cmd_log[i]) + except: + pass + i += 1 + if i >= self._cmd_log_len: + i = 0 + n -= 1 + + +class Idler: + """Iterable IDLE context manager: start IDLE & produce untagged responses. + + An object of this type is returned by the IMAP4.idle() method. + + Note: The name and structure of this class are subject to change. + """ + + def __init__(self, imap, duration=None): + if 'IDLE' not in imap.capabilities: + raise imap.error("Server does not support IMAP4 IDLE") + if duration is not None and not imap.sock: + # IMAP4_stream pipes don't support timeouts + raise imap.error('duration requires a socket connection') + self._duration = duration + self._deadline = None + self._imap = imap + self._tag = None + self._saved_state = None + + def __enter__(self): + imap = self._imap + assert not imap._idle_responses + assert not imap._idle_capture + + if __debug__ and imap.debug >= 4: + imap._mesg(f'idle start duration={self._duration}') + + # Start capturing untagged responses before sending IDLE, + # so we can deliver via iteration any that arrive while + # the IDLE command continuation request is still pending. + imap._idle_capture = True + + try: + self._tag = imap._command('IDLE') + # As with any command, the server is allowed to send us unrelated, + # untagged responses before acting on IDLE. These lines will be + # returned by _get_response(). When the server is ready, it will + # send an IDLE continuation request, indicated by _get_response() + # returning None. We therefore process responses in a loop until + # this occurs. + while resp := imap._get_response(): + if imap.tagged_commands[self._tag]: + typ, data = imap.tagged_commands.pop(self._tag) + if typ == 'NO': + raise imap.error(f'idle denied: {data}') + raise imap.abort(f'unexpected status response: {resp}') + + if __debug__ and imap.debug >= 4: + prompt = imap.continuation_response + imap._mesg(f'idle continuation prompt: {prompt}') + except BaseException: + imap._idle_capture = False + raise + + if self._duration is not None: + self._deadline = time.monotonic() + self._duration + + self._saved_state = imap.state + imap.state = 'IDLING' + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + imap = self._imap + + if __debug__ and imap.debug >= 4: + imap._mesg('idle done') + imap.state = self._saved_state + + # Stop intercepting untagged responses before sending DONE, + # since we can no longer deliver them via iteration. + imap._idle_capture = False + + # If we captured untagged responses while the IDLE command + # continuation request was still pending, but the user did not + # iterate over them before exiting IDLE, we must put them + # someplace where the user can retrieve them. The only + # sensible place for this is the untagged_responses dict, + # despite its unfortunate inability to preserve the relative + # order of different response types. + if leftovers := len(imap._idle_responses): + if __debug__ and imap.debug >= 4: + imap._mesg(f'idle quit with {leftovers} leftover responses') + while imap._idle_responses: + typ, data = imap._idle_responses.pop(0) + # Append one fragment at a time, just as _get_response() does + for datum in data: + imap._append_untagged(typ, datum) + + try: + imap.send(b'DONE' + CRLF) + status, [msg] = imap._command_complete('IDLE', self._tag) + if __debug__ and imap.debug >= 4: + imap._mesg(f'idle status: {status} {msg!r}') + except OSError: + if not exc_type: + raise + + return False # Do not suppress context body exceptions + + def __iter__(self): + return self + + def _pop(self, timeout, default=('', None)): + # Get the next response, or a default value on timeout. + # The timeout arg can be an int or float, or None for no timeout. + # Timeouts require a socket connection (not IMAP4_stream). + # This method ignores self._duration. + + # Historical Note: + # The timeout was originally implemented using select() after + # checking for the presence of already-buffered data. + # That allowed timeouts on pipe connetions like IMAP4_stream. + # However, it seemed possible that SSL data arriving without any + # IMAP data afterward could cause select() to indicate available + # application data when there was none, leading to a read() call + # that would block with no timeout. It was unclear under what + # conditions this would happen in practice. Our implementation was + # changed to use socket timeouts instead of select(), just to be + # safe. + + imap = self._imap + if imap.state != 'IDLING': + raise imap.error('_pop() only works during IDLE') + + if imap._idle_responses: + # Response is ready to return to the user + resp = imap._idle_responses.pop(0) + if __debug__ and imap.debug >= 4: + imap._mesg(f'idle _pop({timeout}) de-queued {resp[0]}') + return resp + + if __debug__ and imap.debug >= 4: + imap._mesg(f'idle _pop({timeout}) reading') + + if timeout is not None: + if timeout <= 0: + return default + timeout = float(timeout) # Required by socket.settimeout() + + try: + imap._get_response(timeout) # Reads line, calls _append_untagged() + except IMAP4._responsetimeout: + if __debug__ and imap.debug >= 4: + imap._mesg(f'idle _pop({timeout}) done') + return default + + resp = imap._idle_responses.pop(0) + + if __debug__ and imap.debug >= 4: + imap._mesg(f'idle _pop({timeout}) read {resp[0]}') + return resp + + def __next__(self): + imap = self._imap + + if self._duration is None: + timeout = None + else: + timeout = self._deadline - time.monotonic() + typ, data = self._pop(timeout) + + if not typ: + if __debug__ and imap.debug >= 4: + imap._mesg('idle iterator exhausted') + raise StopIteration + + return typ, data + + def burst(self, interval=0.1): + """Yield a burst of responses no more than 'interval' seconds apart. + + with M.idle() as idler: + # get a response and any others following by < 0.1 seconds + batch = list(idler.burst()) + print(f'processing {len(batch)} responses...') + print(batch) + + Note: This generator requires a socket connection (not IMAP4_stream). + """ + if not self._imap.sock: + raise self._imap.error('burst() requires a socket connection') + + try: + yield next(self) + except StopIteration: + return + + while response := self._pop(interval, None): + yield response + + +if HAVE_SSL: + + class IMAP4_SSL(IMAP4): + + """IMAP4 client class over SSL connection + + Instantiate with: IMAP4_SSL([host[, port[, ssl_context[, timeout=None]]]]) + + host - host's name (default: localhost); + port - port number (default: standard IMAP4 SSL port); + ssl_context - a SSLContext object that contains your certificate chain + and private key (default: None) + timeout - socket timeout (default: None) If timeout is not given or is None, + the global default socket timeout is used + + for more documentation see the docstring of the parent class IMAP4. + """ + + + def __init__(self, host='', port=IMAP4_SSL_PORT, + *, ssl_context=None, timeout=None): + if ssl_context is None: + ssl_context = ssl._create_stdlib_context() + self.ssl_context = ssl_context + IMAP4.__init__(self, host, port, timeout) + + def _create_socket(self, timeout): + sock = IMAP4._create_socket(self, timeout) + return self.ssl_context.wrap_socket(sock, + server_hostname=self.host) + + def open(self, host='', port=IMAP4_SSL_PORT, timeout=None): + """Setup connection to remote server on "host:port". + (default: localhost:standard IMAP4 SSL port). + This connection will be used by the routines: + read, readline, send, shutdown. + """ + IMAP4.open(self, host, port, timeout) + + __all__.append("IMAP4_SSL") + + +class IMAP4_stream(IMAP4): + + """IMAP4 client class over a stream + + Instantiate with: IMAP4_stream(command) + + "command" - a string that can be passed to subprocess.Popen() + + for more documentation see the docstring of the parent class IMAP4. + """ + + + def __init__(self, command): + self.command = command + IMAP4.__init__(self) + + + def open(self, host=None, port=None, timeout=None): + """Setup a stream connection. + This connection will be used by the routines: + read, readline, send, shutdown. + """ + self.host = None # For compatibility with parent class + self.port = None + self.sock = None + self._file = None + self.process = subprocess.Popen(self.command, + bufsize=DEFAULT_BUFFER_SIZE, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + shell=True, close_fds=True) + self.writefile = self.process.stdin + self.readfile = self.process.stdout + + def read(self, size): + """Read 'size' bytes from remote.""" + return self.readfile.read(size) + + + def readline(self): + """Read line from remote.""" + return self.readfile.readline() + + + def send(self, data): + """Send data to remote.""" + self.writefile.write(data) + self.writefile.flush() + + + def shutdown(self): + """Close I/O established in "open".""" + self.readfile.close() + self.writefile.close() + self.process.wait() + + + +class _Authenticator: + + """Private class to provide en/decoding + for base64-based authentication conversation. + """ + + def __init__(self, mechinst): + self.mech = mechinst # Callable object to provide/process data + + def process(self, data): + ret = self.mech(self.decode(data)) + if ret is None: + return b'*' # Abort conversation + return self.encode(ret) + + def encode(self, inp): + # + # Invoke binascii.b2a_base64 iteratively with + # short even length buffers, strip the trailing + # line feed from the result and append. "Even" + # means a number that factors to both 6 and 8, + # so when it gets to the end of the 8-bit input + # there's no partial 6-bit output. + # + oup = b'' + if isinstance(inp, str): + inp = inp.encode('utf-8') + while inp: + if len(inp) > 48: + t = inp[:48] + inp = inp[48:] + else: + t = inp + inp = b'' + e = binascii.b2a_base64(t) + if e: + oup = oup + e[:-1] + return oup + + def decode(self, inp): + if not inp: + return b'' + return binascii.a2b_base64(inp) + +Months = ' Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split(' ') +Mon2num = {s.encode():n+1 for n, s in enumerate(Months[1:])} + +def Internaldate2tuple(resp): + """Parse an IMAP4 INTERNALDATE string. + + Return corresponding local time. The return value is a + time.struct_time tuple or None if the string has wrong format. + """ + + mo = InternalDate.match(resp) + if not mo: + return None + + mon = Mon2num[mo.group('mon')] + zonen = mo.group('zonen') + + day = int(mo.group('day')) + year = int(mo.group('year')) + hour = int(mo.group('hour')) + min = int(mo.group('min')) + sec = int(mo.group('sec')) + zoneh = int(mo.group('zoneh')) + zonem = int(mo.group('zonem')) + + # INTERNALDATE timezone must be subtracted to get UT + + zone = (zoneh*60 + zonem)*60 + if zonen == b'-': + zone = -zone + + tt = (year, mon, day, hour, min, sec, -1, -1, -1) + utc = calendar.timegm(tt) - zone + + return time.localtime(utc) + + + +def Int2AP(num): + + """Convert integer to A-P string representation.""" + + val = b''; AP = b'ABCDEFGHIJKLMNOP' + num = int(abs(num)) + while num: + num, mod = divmod(num, 16) + val = AP[mod:mod+1] + val + return val + + + +def ParseFlags(resp): + + """Convert IMAP4 flags response to python tuple.""" + + mo = Flags.match(resp) + if not mo: + return () + + return tuple(mo.group('flags').split()) + + +def Time2Internaldate(date_time): + + """Convert date_time to IMAP4 INTERNALDATE representation. + + Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The + date_time argument can be a number (int or float) representing + seconds since epoch (as returned by time.time()), a 9-tuple + representing local time, an instance of time.struct_time (as + returned by time.localtime()), an aware datetime instance or a + double-quoted string. In the last case, it is assumed to already + be in the correct format. + """ + if isinstance(date_time, (int, float)): + dt = datetime.fromtimestamp(date_time, + timezone.utc).astimezone() + elif isinstance(date_time, tuple): + try: + gmtoff = date_time.tm_gmtoff + except AttributeError: + if time.daylight: + dst = date_time[8] + if dst == -1: + dst = time.localtime(time.mktime(date_time))[8] + gmtoff = -(time.timezone, time.altzone)[dst] + else: + gmtoff = -time.timezone + delta = timedelta(seconds=gmtoff) + dt = datetime(*date_time[:6], tzinfo=timezone(delta)) + elif isinstance(date_time, datetime): + if date_time.tzinfo is None: + raise ValueError("date_time must be aware") + dt = date_time + elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'): + return date_time # Assume in correct format + else: + raise ValueError("date_time not of a known type") + fmt = '"%d-{}-%Y %H:%M:%S %z"'.format(Months[dt.month]) + return dt.strftime(fmt) + + + +if __name__ == '__main__': + + # To test: invoke either as 'python imaplib.py [IMAP4_server_hostname]' + # or 'python imaplib.py -s "rsh IMAP4_server_hostname exec /etc/rimapd"' + # to test the IMAP4_stream class + + import getopt, getpass + + try: + optlist, args = getopt.getopt(sys.argv[1:], 'd:s:') + except getopt.error as val: + optlist, args = (), () + + stream_command = None + for opt,val in optlist: + if opt == '-d': + Debug = int(val) + elif opt == '-s': + stream_command = val + if not args: args = (stream_command,) + + if not args: args = ('',) + + host = args[0] + + USER = getpass.getuser() + PASSWD = getpass.getpass("IMAP password for %s on %s: " % (USER, host or "localhost")) + + test_mesg = 'From: %(user)s@localhost%(lf)sSubject: IMAP4 test%(lf)s%(lf)sdata...%(lf)s' % {'user':USER, 'lf':'\n'} + test_seq1 = ( + ('login', (USER, PASSWD)), + ('create', ('/tmp/xxx 1',)), + ('rename', ('/tmp/xxx 1', '/tmp/yyy')), + ('CREATE', ('/tmp/yyz 2',)), + ('append', ('/tmp/yyz 2', None, None, test_mesg)), + ('list', ('/tmp', 'yy*')), + ('select', ('/tmp/yyz 2',)), + ('search', (None, 'SUBJECT', 'test')), + ('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')), + ('store', ('1', 'FLAGS', r'(\Deleted)')), + ('namespace', ()), + ('expunge', ()), + ('recent', ()), + ('close', ()), + ) + + test_seq2 = ( + ('select', ()), + ('response',('UIDVALIDITY',)), + ('uid', ('SEARCH', 'ALL')), + ('response', ('EXISTS',)), + ('append', (None, None, None, test_mesg)), + ('recent', ()), + ('logout', ()), + ) + + def run(cmd, args): + M._mesg('%s %s' % (cmd, args)) + typ, dat = getattr(M, cmd)(*args) + M._mesg('%s => %s %s' % (cmd, typ, dat)) + if typ == 'NO': raise dat[0] + return dat + + try: + if stream_command: + M = IMAP4_stream(stream_command) + else: + M = IMAP4(host) + if M.state == 'AUTH': + test_seq1 = test_seq1[1:] # Login not needed + M._mesg('PROTOCOL_VERSION = %s' % M.PROTOCOL_VERSION) + M._mesg('CAPABILITIES = %r' % (M.capabilities,)) + + for cmd,args in test_seq1: + run(cmd, args) + + for ml in run('list', ('/tmp/', 'yy%')): + mo = re.match(r'.*"([^"]+)"$', ml) + if mo: path = mo.group(1) + else: path = ml.split()[-1] + run('delete', (path,)) + + for cmd,args in test_seq2: + dat = run(cmd, args) + + if (cmd,args) != ('uid', ('SEARCH', 'ALL')): + continue + + uid = dat[-1].split() + if not uid: continue + run('uid', ('FETCH', '%s' % uid[-1], + '(FLAGS INTERNALDATE RFC822.SIZE RFC822.HEADER RFC822.TEXT)')) + + print('\nAll tests OK.') + + except: + print('\nTests failed.') + + if not Debug: + print(''' +If you would like to see debugging output, +try: %s -d5 +''' % sys.argv[0]) + + raise diff --git a/Python313_13_x86_Template/Lib/importlib/__init__.py b/Python314_4_x86_Template/Lib/importlib/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/__init__.py rename to Python314_4_x86_Template/Lib/importlib/__init__.py diff --git a/Python314_4_x86_Template/Lib/importlib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..b967b9af Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/__pycache__/_abc.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/__pycache__/_abc.cpython-314.pyc new file mode 100644 index 00000000..ac8e2b02 Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/__pycache__/_abc.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/__pycache__/abc.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/__pycache__/abc.cpython-314.pyc new file mode 100644 index 00000000..d0a3b4a3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/__pycache__/abc.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/__pycache__/readers.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/__pycache__/readers.cpython-314.pyc new file mode 100644 index 00000000..0d280350 Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/__pycache__/readers.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/importlib/_abc.py b/Python314_4_x86_Template/Lib/importlib/_abc.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/_abc.py rename to Python314_4_x86_Template/Lib/importlib/_abc.py diff --git a/Python314_4_x86_Template/Lib/importlib/_bootstrap.py b/Python314_4_x86_Template/Lib/importlib/_bootstrap.py new file mode 100644 index 00000000..9d911e1d --- /dev/null +++ b/Python314_4_x86_Template/Lib/importlib/_bootstrap.py @@ -0,0 +1,1570 @@ +"""Core implementation of import. + +This module is NOT meant to be directly imported! It has been designed such +that it can be bootstrapped into Python as the implementation of import. As +such it requires the injection of specific modules and attributes in order to +work. One should use importlib as the public-facing version of this module. + +""" +# +# IMPORTANT: Whenever making changes to this module, be sure to run a top-level +# `make regen-importlib` followed by `make` in order to get the frozen version +# of the module updated. Not doing so will result in the Makefile to fail for +# all others who don't have a ./python around to freeze the module +# in the early stages of compilation. +# + +# See importlib._setup() for what is injected into the global namespace. + +# When editing this code be aware that code executed at import time CANNOT +# reference any injected objects! This includes not only global code but also +# anything specified at the class level. + +def _object_name(obj): + try: + return obj.__qualname__ + except AttributeError: + return type(obj).__qualname__ + +# Bootstrap-related code ###################################################### + +# Modules injected manually by _setup() +_thread = None +_warnings = None +_weakref = None + +# Import done by _install_external_importers() +_bootstrap_external = None + + +def _wrap(new, old): + """Simple substitute for functools.update_wrapper.""" + for replace in ['__module__', '__name__', '__qualname__', '__doc__']: + if hasattr(old, replace): + setattr(new, replace, getattr(old, replace)) + new.__dict__.update(old.__dict__) + + +def _new_module(name): + return type(sys)(name) + + +# Module-level locking ######################################################## + +# For a list that can have a weakref to it. +class _List(list): + __slots__ = ("__weakref__",) + + +# Copied from weakref.py with some simplifications and modifications unique to +# bootstrapping importlib. Many methods were simply deleting for simplicity, so if they +# are needed in the future they may work if simply copied back in. +class _WeakValueDictionary: + + def __init__(self): + self_weakref = _weakref.ref(self) + + # Inlined to avoid issues with inheriting from _weakref.ref before _weakref is + # set by _setup(). Since there's only one instance of this class, this is + # not expensive. + class KeyedRef(_weakref.ref): + + __slots__ = "key", + + def __new__(type, ob, key): + self = super().__new__(type, ob, type.remove) + self.key = key + return self + + def __init__(self, ob, key): + super().__init__(ob, self.remove) + + @staticmethod + def remove(wr): + nonlocal self_weakref + + self = self_weakref() + if self is not None: + if self._iterating: + self._pending_removals.append(wr.key) + else: + _weakref._remove_dead_weakref(self.data, wr.key) + + self._KeyedRef = KeyedRef + self.clear() + + def clear(self): + self._pending_removals = [] + self._iterating = set() + self.data = {} + + def _commit_removals(self): + pop = self._pending_removals.pop + d = self.data + while True: + try: + key = pop() + except IndexError: + return + _weakref._remove_dead_weakref(d, key) + + def get(self, key, default=None): + if self._pending_removals: + self._commit_removals() + try: + wr = self.data[key] + except KeyError: + return default + else: + if (o := wr()) is None: + return default + else: + return o + + def setdefault(self, key, default=None): + try: + o = self.data[key]() + except KeyError: + o = None + if o is None: + if self._pending_removals: + self._commit_removals() + self.data[key] = self._KeyedRef(default, key) + return default + else: + return o + + +# A dict mapping module names to weakrefs of _ModuleLock instances. +# Dictionary protected by the global import lock. +_module_locks = {} + +# A dict mapping thread IDs to weakref'ed lists of _ModuleLock instances. +# This maps a thread to the module locks it is blocking on acquiring. The +# values are lists because a single thread could perform a re-entrant import +# and be "in the process" of blocking on locks for more than one module. A +# thread can be "in the process" because a thread cannot actually block on +# acquiring more than one lock but it can have set up bookkeeping that reflects +# that it intends to block on acquiring more than one lock. +# +# The dictionary uses a WeakValueDictionary to avoid keeping unnecessary +# lists around, regardless of GC runs. This way there's no memory leak if +# the list is no longer needed (GH-106176). +_blocking_on = None + + +class _BlockingOnManager: + """A context manager responsible to updating ``_blocking_on``.""" + def __init__(self, thread_id, lock): + self.thread_id = thread_id + self.lock = lock + + def __enter__(self): + """Mark the running thread as waiting for self.lock. via _blocking_on.""" + # Interactions with _blocking_on are *not* protected by the global + # import lock here because each thread only touches the state that it + # owns (state keyed on its thread id). The global import lock is + # re-entrant (i.e., a single thread may take it more than once) so it + # wouldn't help us be correct in the face of re-entrancy either. + + self.blocked_on = _blocking_on.setdefault(self.thread_id, _List()) + self.blocked_on.append(self.lock) + + def __exit__(self, *args, **kwargs): + """Remove self.lock from this thread's _blocking_on list.""" + self.blocked_on.remove(self.lock) + + +class _DeadlockError(RuntimeError): + pass + + + +def _has_deadlocked(target_id, *, seen_ids, candidate_ids, blocking_on): + """Check if 'target_id' is holding the same lock as another thread(s). + + The search within 'blocking_on' starts with the threads listed in + 'candidate_ids'. 'seen_ids' contains any threads that are considered + already traversed in the search. + + Keyword arguments: + target_id -- The thread id to try to reach. + seen_ids -- A set of threads that have already been visited. + candidate_ids -- The thread ids from which to begin. + blocking_on -- A dict representing the thread/blocking-on graph. This may + be the same object as the global '_blocking_on' but it is + a parameter to reduce the impact that global mutable + state has on the result of this function. + """ + if target_id in candidate_ids: + # If we have already reached the target_id, we're done - signal that it + # is reachable. + return True + + # Otherwise, try to reach the target_id from each of the given candidate_ids. + for tid in candidate_ids: + if not (candidate_blocking_on := blocking_on.get(tid)): + # There are no edges out from this node, skip it. + continue + elif tid in seen_ids: + # bpo 38091: the chain of tid's we encounter here eventually leads + # to a fixed point or a cycle, but does not reach target_id. + # This means we would not actually deadlock. This can happen if + # other threads are at the beginning of acquire() below. + return False + seen_ids.add(tid) + + # Follow the edges out from this thread. + edges = [lock.owner for lock in candidate_blocking_on] + if _has_deadlocked(target_id, seen_ids=seen_ids, candidate_ids=edges, + blocking_on=blocking_on): + return True + + return False + + +class _ModuleLock: + """A recursive lock implementation which is able to detect deadlocks + (e.g. thread 1 trying to take locks A then B, and thread 2 trying to + take locks B then A). + """ + + def __init__(self, name): + # Create an RLock for protecting the import process for the + # corresponding module. Since it is an RLock, a single thread will be + # able to take it more than once. This is necessary to support + # re-entrancy in the import system that arises from (at least) signal + # handlers and the garbage collector. Consider the case of: + # + # import foo + # -> ... + # -> importlib._bootstrap._ModuleLock.acquire + # -> ... + # -> + # -> __del__ + # -> import foo + # -> ... + # -> importlib._bootstrap._ModuleLock.acquire + # -> _BlockingOnManager.__enter__ + # + # If a different thread than the running one holds the lock then the + # thread will have to block on taking the lock, which is what we want + # for thread safety. + self.lock = _thread.RLock() + self.wakeup = _thread.allocate_lock() + + # The name of the module for which this is a lock. + self.name = name + + # Can end up being set to None if this lock is not owned by any thread + # or the thread identifier for the owning thread. + self.owner = None + + # Represent the number of times the owning thread has acquired this lock + # via a list of True. This supports RLock-like ("re-entrant lock") + # behavior, necessary in case a single thread is following a circular + # import dependency and needs to take the lock for a single module + # more than once. + # + # Counts are represented as a list of True because list.append(True) + # and list.pop() are both atomic and thread-safe in CPython and it's hard + # to find another primitive with the same properties. + self.count = [] + + # This is a count of the number of threads that are blocking on + # self.wakeup.acquire() awaiting to get their turn holding this module + # lock. When the module lock is released, if this is greater than + # zero, it is decremented and `self.wakeup` is released one time. The + # intent is that this will let one other thread make more progress on + # acquiring this module lock. This repeats until all the threads have + # gotten a turn. + # + # This is incremented in self.acquire() when a thread notices it is + # going to have to wait for another thread to finish. + # + # See the comment above count for explanation of the representation. + self.waiters = [] + + def has_deadlock(self): + # To avoid deadlocks for concurrent or re-entrant circular imports, + # look at _blocking_on to see if any threads are blocking + # on getting the import lock for any module for which the import lock + # is held by this thread. + return _has_deadlocked( + # Try to find this thread. + target_id=_thread.get_ident(), + seen_ids=set(), + # Start from the thread that holds the import lock for this + # module. + candidate_ids=[self.owner], + # Use the global "blocking on" state. + blocking_on=_blocking_on, + ) + + def acquire(self): + """ + Acquire the module lock. If a potential deadlock is detected, + a _DeadlockError is raised. + Otherwise, the lock is always acquired and True is returned. + """ + tid = _thread.get_ident() + with _BlockingOnManager(tid, self): + while True: + # Protect interaction with state on self with a per-module + # lock. This makes it safe for more than one thread to try to + # acquire the lock for a single module at the same time. + with self.lock: + if self.count == [] or self.owner == tid: + # If the lock for this module is unowned then we can + # take the lock immediately and succeed. If the lock + # for this module is owned by the running thread then + # we can also allow the acquire to succeed. This + # supports circular imports (thread T imports module A + # which imports module B which imports module A). + self.owner = tid + self.count.append(True) + return True + + # At this point we know the lock is held (because count != + # 0) by another thread (because owner != tid). We'll have + # to get in line to take the module lock. + + # But first, check to see if this thread would create a + # deadlock by acquiring this module lock. If it would + # then just stop with an error. + # + # It's not clear who is expected to handle this error. + # There is one handler in _lock_unlock_module but many + # times this method is called when entering the context + # manager _ModuleLockManager instead - so _DeadlockError + # will just propagate up to application code. + # + # This seems to be more than just a hypothetical - + # https://stackoverflow.com/questions/59509154 + # https://github.com/encode/django-rest-framework/issues/7078 + if self.has_deadlock(): + raise _DeadlockError(f'deadlock detected by {self!r}') + + # Check to see if we're going to be able to acquire the + # lock. If we are going to have to wait then increment + # the waiters so `self.release` will know to unblock us + # later on. We do this part non-blockingly so we don't + # get stuck here before we increment waiters. We have + # this extra acquire call (in addition to the one below, + # outside the self.lock context manager) to make sure + # self.wakeup is held when the next acquire is called (so + # we block). This is probably needlessly complex and we + # should just take self.wakeup in the return codepath + # above. + if self.wakeup.acquire(False): + self.waiters.append(None) + + # Now take the lock in a blocking fashion. This won't + # complete until the thread holding this lock + # (self.owner) calls self.release. + self.wakeup.acquire() + + # Taking the lock has served its purpose (making us wait), so we can + # give it up now. We'll take it w/o blocking again on the + # next iteration around this 'while' loop. + self.wakeup.release() + + def release(self): + tid = _thread.get_ident() + with self.lock: + if self.owner != tid: + raise RuntimeError('cannot release un-acquired lock') + assert len(self.count) > 0 + self.count.pop() + if not len(self.count): + self.owner = None + if len(self.waiters) > 0: + self.waiters.pop() + self.wakeup.release() + + def locked(self): + return bool(self.count) + + def __repr__(self): + return f'_ModuleLock({self.name!r}) at {id(self)}' + + +class _DummyModuleLock: + """A simple _ModuleLock equivalent for Python builds without + multi-threading support.""" + + def __init__(self, name): + self.name = name + self.count = 0 + + def acquire(self): + self.count += 1 + return True + + def release(self): + if self.count == 0: + raise RuntimeError('cannot release un-acquired lock') + self.count -= 1 + + def __repr__(self): + return f'_DummyModuleLock({self.name!r}) at {id(self)}' + + +class _ModuleLockManager: + + def __init__(self, name): + self._name = name + self._lock = None + + def __enter__(self): + self._lock = _get_module_lock(self._name) + self._lock.acquire() + + def __exit__(self, *args, **kwargs): + self._lock.release() + + +# The following two functions are for consumption by Python/import.c. + +def _get_module_lock(name): + """Get or create the module lock for a given module name. + + Acquire/release internally the global import lock to protect + _module_locks.""" + + _imp.acquire_lock() + try: + try: + lock = _module_locks[name]() + except KeyError: + lock = None + + if lock is None: + if _thread is None: + lock = _DummyModuleLock(name) + else: + lock = _ModuleLock(name) + + def cb(ref, name=name): + _imp.acquire_lock() + try: + # bpo-31070: Check if another thread created a new lock + # after the previous lock was destroyed + # but before the weakref callback was called. + if _module_locks.get(name) is ref: + del _module_locks[name] + finally: + _imp.release_lock() + + _module_locks[name] = _weakref.ref(lock, cb) + finally: + _imp.release_lock() + + return lock + + +def _lock_unlock_module(name): + """Acquires then releases the module lock for a given module name. + + This is used to ensure a module is completely initialized, in the + event it is being imported by another thread. + """ + lock = _get_module_lock(name) + try: + lock.acquire() + except _DeadlockError: + # Concurrent circular import, we'll accept a partially initialized + # module object. + pass + else: + lock.release() + +# Frame stripping magic ############################################### +def _call_with_frames_removed(f, *args, **kwds): + """remove_importlib_frames in import.c will always remove sequences + of importlib frames that end with a call to this function + + Use it instead of a normal call in places where including the importlib + frames introduces unwanted noise into the traceback (e.g. when executing + module code) + """ + return f(*args, **kwds) + + +def _verbose_message(message, *args, verbosity=1): + """Print the message to stderr if -v/PYTHONVERBOSE is turned on.""" + if sys.flags.verbose >= verbosity: + if not message.startswith(('#', 'import ')): + message = '# ' + message + print(message.format(*args), file=sys.stderr) + + +def _requires_builtin(fxn): + """Decorator to verify the named module is built-in.""" + def _requires_builtin_wrapper(self, fullname): + if fullname not in sys.builtin_module_names: + raise ImportError(f'{fullname!r} is not a built-in module', + name=fullname) + return fxn(self, fullname) + _wrap(_requires_builtin_wrapper, fxn) + return _requires_builtin_wrapper + + +def _requires_frozen(fxn): + """Decorator to verify the named module is frozen.""" + def _requires_frozen_wrapper(self, fullname): + if not _imp.is_frozen(fullname): + raise ImportError(f'{fullname!r} is not a frozen module', + name=fullname) + return fxn(self, fullname) + _wrap(_requires_frozen_wrapper, fxn) + return _requires_frozen_wrapper + + +# Typically used by loader classes as a method replacement. +def _load_module_shim(self, fullname): + """Load the specified module into sys.modules and return it. + + This method is deprecated. Use loader.exec_module() instead. + + """ + msg = ("the load_module() method is deprecated and slated for removal in " + "Python 3.15; use exec_module() instead") + _warnings.warn(msg, DeprecationWarning) + spec = spec_from_loader(fullname, self) + if fullname in sys.modules: + module = sys.modules[fullname] + _exec(spec, module) + return sys.modules[fullname] + else: + return _load(spec) + +# Module specifications ####################################################### + +def _module_repr(module): + """The implementation of ModuleType.__repr__().""" + loader = getattr(module, '__loader__', None) + if spec := getattr(module, "__spec__", None): + return _module_repr_from_spec(spec) + # Fall through to a catch-all which always succeeds. + try: + name = module.__name__ + except AttributeError: + name = '?' + try: + filename = module.__file__ + except AttributeError: + if loader is None: + return f'' + else: + return f'' + else: + return f'' + + +class ModuleSpec: + """The specification for a module, used for loading. + + A module's spec is the source for information about the module. For + data associated with the module, including source, use the spec's + loader. + + `name` is the absolute name of the module. `loader` is the loader + to use when loading the module. `parent` is the name of the + package the module is in. The parent is derived from the name. + + `is_package` determines if the module is considered a package or + not. On modules this is reflected by the `__path__` attribute. + + `origin` is the specific location used by the loader from which to + load the module, if that information is available. When filename is + set, origin will match. + + `has_location` indicates that a spec's "origin" reflects a location. + When this is True, `__file__` attribute of the module is set. + + `cached` is the location of the cached bytecode file, if any. It + corresponds to the `__cached__` attribute. + + `submodule_search_locations` is the sequence of path entries to + search when importing submodules. If set, is_package should be + True--and False otherwise. + + Packages are simply modules that (may) have submodules. If a spec + has a non-None value in `submodule_search_locations`, the import + system will consider modules loaded from the spec as packages. + + Only finders (see importlib.abc.MetaPathFinder and + importlib.abc.PathEntryFinder) should modify ModuleSpec instances. + + """ + + def __init__(self, name, loader, *, origin=None, loader_state=None, + is_package=None): + self.name = name + self.loader = loader + self.origin = origin + self.loader_state = loader_state + self.submodule_search_locations = [] if is_package else None + self._uninitialized_submodules = [] + + # file-location attributes + self._set_fileattr = False + self._cached = None + + def __repr__(self): + args = [f'name={self.name!r}', f'loader={self.loader!r}'] + if self.origin is not None: + args.append(f'origin={self.origin!r}') + if self.submodule_search_locations is not None: + args.append(f'submodule_search_locations={self.submodule_search_locations}') + return f'{self.__class__.__name__}({", ".join(args)})' + + def __eq__(self, other): + smsl = self.submodule_search_locations + try: + return (self.name == other.name and + self.loader == other.loader and + self.origin == other.origin and + smsl == other.submodule_search_locations and + self.cached == other.cached and + self.has_location == other.has_location) + except AttributeError: + return NotImplemented + + @property + def cached(self): + if self._cached is None: + if self.origin is not None and self._set_fileattr: + if _bootstrap_external is None: + raise NotImplementedError + self._cached = _bootstrap_external._get_cached(self.origin) + return self._cached + + @cached.setter + def cached(self, cached): + self._cached = cached + + @property + def parent(self): + """The name of the module's parent.""" + if self.submodule_search_locations is None: + return self.name.rpartition('.')[0] + else: + return self.name + + @property + def has_location(self): + return self._set_fileattr + + @has_location.setter + def has_location(self, value): + self._set_fileattr = bool(value) + + +def spec_from_loader(name, loader, *, origin=None, is_package=None): + """Return a module spec based on various loader methods.""" + if origin is None: + origin = getattr(loader, '_ORIGIN', None) + + if not origin and hasattr(loader, 'get_filename'): + if _bootstrap_external is None: + raise NotImplementedError + spec_from_file_location = _bootstrap_external.spec_from_file_location + + if is_package is None: + return spec_from_file_location(name, loader=loader) + search = [] if is_package else None + return spec_from_file_location(name, loader=loader, + submodule_search_locations=search) + + if is_package is None: + if hasattr(loader, 'is_package'): + try: + is_package = loader.is_package(name) + except ImportError: + is_package = None # aka, undefined + else: + # the default + is_package = False + + return ModuleSpec(name, loader, origin=origin, is_package=is_package) + + +def _spec_from_module(module, loader=None, origin=None): + # This function is meant for use in _setup(). + try: + spec = module.__spec__ + except AttributeError: + pass + else: + if spec is not None: + return spec + + name = module.__name__ + if loader is None: + try: + loader = module.__loader__ + except AttributeError: + # loader will stay None. + pass + try: + location = module.__file__ + except AttributeError: + location = None + if origin is None: + if loader is not None: + origin = getattr(loader, '_ORIGIN', None) + if not origin and location is not None: + origin = location + try: + cached = module.__cached__ + except AttributeError: + cached = None + try: + submodule_search_locations = list(module.__path__) + except AttributeError: + submodule_search_locations = None + + spec = ModuleSpec(name, loader, origin=origin) + spec._set_fileattr = False if location is None else (origin == location) + spec.cached = cached + spec.submodule_search_locations = submodule_search_locations + return spec + + +def _init_module_attrs(spec, module, *, override=False): + # The passed-in module may be not support attribute assignment, + # in which case we simply don't set the attributes. + # __name__ + if (override or getattr(module, '__name__', None) is None): + try: + module.__name__ = spec.name + except AttributeError: + pass + # __loader__ + if override or getattr(module, '__loader__', None) is None: + loader = spec.loader + if loader is None: + # A backward compatibility hack. + if spec.submodule_search_locations is not None: + if _bootstrap_external is None: + raise NotImplementedError + NamespaceLoader = _bootstrap_external.NamespaceLoader + + loader = NamespaceLoader.__new__(NamespaceLoader) + loader._path = spec.submodule_search_locations + spec.loader = loader + # While the docs say that module.__file__ is not set for + # built-in modules, and the code below will avoid setting it if + # spec.has_location is false, this is incorrect for namespace + # packages. Namespace packages have no location, but their + # __spec__.origin is None, and thus their module.__file__ + # should also be None for consistency. While a bit of a hack, + # this is the best place to ensure this consistency. + # + # See # https://docs.python.org/3/library/importlib.html#importlib.abc.Loader.load_module + # and bpo-32305 + module.__file__ = None + try: + module.__loader__ = loader + except AttributeError: + pass + # __package__ + if override or getattr(module, '__package__', None) is None: + try: + module.__package__ = spec.parent + except AttributeError: + pass + # __spec__ + try: + module.__spec__ = spec + except AttributeError: + pass + # __path__ + if override or getattr(module, '__path__', None) is None: + if spec.submodule_search_locations is not None: + # XXX We should extend __path__ if it's already a list. + try: + module.__path__ = spec.submodule_search_locations + except AttributeError: + pass + # __file__/__cached__ + if spec.has_location: + if override or getattr(module, '__file__', None) is None: + try: + module.__file__ = spec.origin + except AttributeError: + pass + + if override or getattr(module, '__cached__', None) is None: + if spec.cached is not None: + try: + module.__cached__ = spec.cached + except AttributeError: + pass + return module + + +def module_from_spec(spec): + """Create a module based on the provided spec.""" + # Typically loaders will not implement create_module(). + module = None + if hasattr(spec.loader, 'create_module'): + # If create_module() returns `None` then it means default + # module creation should be used. + module = spec.loader.create_module(spec) + elif hasattr(spec.loader, 'exec_module'): + raise ImportError('loaders that define exec_module() ' + 'must also define create_module()') + if module is None: + module = _new_module(spec.name) + _init_module_attrs(spec, module) + return module + + +def _module_repr_from_spec(spec): + """Return the repr to use for the module.""" + name = '?' if spec.name is None else spec.name + if spec.origin is None: + loader = spec.loader + if loader is None: + return f'' + elif ( + _bootstrap_external is not None + and isinstance(loader, _bootstrap_external.NamespaceLoader) + ): + return f'' + else: + return f'' + else: + if spec.has_location: + return f'' + else: + return f'' + + +# Used by importlib.reload() and _load_module_shim(). +def _exec(spec, module): + """Execute the spec's specified module in an existing module's namespace.""" + name = spec.name + with _ModuleLockManager(name): + if sys.modules.get(name) is not module: + msg = f'module {name!r} not in sys.modules' + raise ImportError(msg, name=name) + try: + if spec.loader is None: + if spec.submodule_search_locations is None: + raise ImportError('missing loader', name=spec.name) + # Namespace package. + _init_module_attrs(spec, module, override=True) + else: + _init_module_attrs(spec, module, override=True) + if not hasattr(spec.loader, 'exec_module'): + msg = (f"{_object_name(spec.loader)}.exec_module() not found; " + "falling back to load_module()") + _warnings.warn(msg, ImportWarning) + spec.loader.load_module(name) + else: + spec.loader.exec_module(module) + finally: + # Update the order of insertion into sys.modules for module + # clean-up at shutdown. + module = sys.modules.pop(spec.name) + sys.modules[spec.name] = module + return module + + +def _load_backward_compatible(spec): + # It is assumed that all callers have been warned about using load_module() + # appropriately before calling this function. + try: + spec.loader.load_module(spec.name) + except: + if spec.name in sys.modules: + module = sys.modules.pop(spec.name) + sys.modules[spec.name] = module + raise + # The module must be in sys.modules at this point! + # Move it to the end of sys.modules. + module = sys.modules.pop(spec.name) + sys.modules[spec.name] = module + if getattr(module, '__loader__', None) is None: + try: + module.__loader__ = spec.loader + except AttributeError: + pass + if getattr(module, '__package__', None) is None: + try: + # Since module.__path__ may not line up with + # spec.submodule_search_paths, we can't necessarily rely + # on spec.parent here. + module.__package__ = module.__name__ + if not hasattr(module, '__path__'): + module.__package__ = spec.name.rpartition('.')[0] + except AttributeError: + pass + if getattr(module, '__spec__', None) is None: + try: + module.__spec__ = spec + except AttributeError: + pass + return module + +def _load_unlocked(spec): + # A helper for direct use by the import system. + if spec.loader is not None: + # Not a namespace package. + if not hasattr(spec.loader, 'exec_module'): + msg = (f"{_object_name(spec.loader)}.exec_module() not found; " + "falling back to load_module()") + _warnings.warn(msg, ImportWarning) + return _load_backward_compatible(spec) + + module = module_from_spec(spec) + + # This must be done before putting the module in sys.modules + # (otherwise an optimization shortcut in import.c becomes + # wrong). + spec._initializing = True + try: + sys.modules[spec.name] = module + try: + if spec.loader is None: + if spec.submodule_search_locations is None: + raise ImportError('missing loader', name=spec.name) + # A namespace package so do nothing. + else: + spec.loader.exec_module(module) + except: + try: + del sys.modules[spec.name] + except KeyError: + pass + raise + # Move the module to the end of sys.modules. + # We don't ensure that the import-related module attributes get + # set in the sys.modules replacement case. Such modules are on + # their own. + module = sys.modules.pop(spec.name) + sys.modules[spec.name] = module + _verbose_message('import {!r} # {!r}', spec.name, spec.loader) + finally: + spec._initializing = False + + return module + +# A method used during testing of _load_unlocked() and by +# _load_module_shim(). +def _load(spec): + """Return a new module object, loaded by the spec's loader. + + The module is not added to its parent. + + If a module is already in sys.modules, that existing module gets + clobbered. + + """ + with _ModuleLockManager(spec.name): + return _load_unlocked(spec) + + +# Loaders ##################################################################### + +class BuiltinImporter: + + """Meta path import for built-in modules. + + All methods are either class or static methods to avoid the need to + instantiate the class. + + """ + + _ORIGIN = "built-in" + + @classmethod + def find_spec(cls, fullname, path=None, target=None): + if _imp.is_builtin(fullname): + return spec_from_loader(fullname, cls, origin=cls._ORIGIN) + else: + return None + + @staticmethod + def create_module(spec): + """Create a built-in module""" + if spec.name not in sys.builtin_module_names: + raise ImportError(f'{spec.name!r} is not a built-in module', + name=spec.name) + return _call_with_frames_removed(_imp.create_builtin, spec) + + @staticmethod + def exec_module(module): + """Exec a built-in module""" + _call_with_frames_removed(_imp.exec_builtin, module) + + @classmethod + @_requires_builtin + def get_code(cls, fullname): + """Return None as built-in modules do not have code objects.""" + return None + + @classmethod + @_requires_builtin + def get_source(cls, fullname): + """Return None as built-in modules do not have source code.""" + return None + + @classmethod + @_requires_builtin + def is_package(cls, fullname): + """Return False as built-in modules are never packages.""" + return False + + load_module = classmethod(_load_module_shim) + + +class FrozenImporter: + + """Meta path import for frozen modules. + + All methods are either class or static methods to avoid the need to + instantiate the class. + + """ + + _ORIGIN = "frozen" + + @classmethod + def _fix_up_module(cls, module): + spec = module.__spec__ + state = spec.loader_state + if state is None: + # The module is missing FrozenImporter-specific values. + + # Fix up the spec attrs. + origname = vars(module).pop('__origname__', None) + assert origname, 'see PyImport_ImportFrozenModuleObject()' + ispkg = hasattr(module, '__path__') + assert _imp.is_frozen_package(module.__name__) == ispkg, ispkg + filename, pkgdir = cls._resolve_filename(origname, spec.name, ispkg) + spec.loader_state = type(sys.implementation)( + filename=filename, + origname=origname, + ) + __path__ = spec.submodule_search_locations + if ispkg: + assert __path__ == [], __path__ + if pkgdir: + spec.submodule_search_locations.insert(0, pkgdir) + else: + assert __path__ is None, __path__ + + # Fix up the module attrs (the bare minimum). + assert not hasattr(module, '__file__'), module.__file__ + if filename: + try: + module.__file__ = filename + except AttributeError: + pass + if ispkg: + if module.__path__ != __path__: + assert module.__path__ == [], module.__path__ + module.__path__.extend(__path__) + else: + # These checks ensure that _fix_up_module() is only called + # in the right places. + __path__ = spec.submodule_search_locations + ispkg = __path__ is not None + # Check the loader state. + assert sorted(vars(state)) == ['filename', 'origname'], state + if state.origname: + # The only frozen modules with "origname" set are stdlib modules. + (__file__, pkgdir, + ) = cls._resolve_filename(state.origname, spec.name, ispkg) + assert state.filename == __file__, (state.filename, __file__) + if pkgdir: + assert __path__ == [pkgdir], (__path__, pkgdir) + else: + assert __path__ == ([] if ispkg else None), __path__ + else: + __file__ = None + assert state.filename is None, state.filename + assert __path__ == ([] if ispkg else None), __path__ + # Check the file attrs. + if __file__: + assert hasattr(module, '__file__') + assert module.__file__ == __file__, (module.__file__, __file__) + else: + assert not hasattr(module, '__file__'), module.__file__ + if ispkg: + assert hasattr(module, '__path__') + assert module.__path__ == __path__, (module.__path__, __path__) + else: + assert not hasattr(module, '__path__'), module.__path__ + assert not spec.has_location + + @classmethod + def _resolve_filename(cls, fullname, alias=None, ispkg=False): + if not fullname or not getattr(sys, '_stdlib_dir', None): + return None, None + try: + sep = cls._SEP + except AttributeError: + sep = cls._SEP = '\\' if sys.platform == 'win32' else '/' + + if fullname != alias: + if fullname.startswith('<'): + fullname = fullname[1:] + if not ispkg: + fullname = f'{fullname}.__init__' + else: + ispkg = False + relfile = fullname.replace('.', sep) + if ispkg: + pkgdir = f'{sys._stdlib_dir}{sep}{relfile}' + filename = f'{pkgdir}{sep}__init__.py' + else: + pkgdir = None + filename = f'{sys._stdlib_dir}{sep}{relfile}.py' + return filename, pkgdir + + @classmethod + def find_spec(cls, fullname, path=None, target=None): + info = _call_with_frames_removed(_imp.find_frozen, fullname) + if info is None: + return None + # We get the marshaled data in exec_module() (the loader + # part of the importer), instead of here (the finder part). + # The loader is the usual place to get the data that will + # be loaded into the module. (For example, see _LoaderBasics + # in _bootstrap_external.py.) Most importantly, this importer + # is simpler if we wait to get the data. + # However, getting as much data in the finder as possible + # to later load the module is okay, and sometimes important. + # (That's why ModuleSpec.loader_state exists.) This is + # especially true if it avoids throwing away expensive data + # the loader would otherwise duplicate later and can be done + # efficiently. In this case it isn't worth it. + _, ispkg, origname = info + spec = spec_from_loader(fullname, cls, + origin=cls._ORIGIN, + is_package=ispkg) + filename, pkgdir = cls._resolve_filename(origname, fullname, ispkg) + spec.loader_state = type(sys.implementation)( + filename=filename, + origname=origname, + ) + if pkgdir: + spec.submodule_search_locations.insert(0, pkgdir) + return spec + + @staticmethod + def create_module(spec): + """Set __file__, if able.""" + module = _new_module(spec.name) + try: + filename = spec.loader_state.filename + except AttributeError: + pass + else: + if filename: + module.__file__ = filename + return module + + @staticmethod + def exec_module(module): + spec = module.__spec__ + name = spec.name + code = _call_with_frames_removed(_imp.get_frozen_object, name) + exec(code, module.__dict__) + + @classmethod + def load_module(cls, fullname): + """Load a frozen module. + + This method is deprecated. Use exec_module() instead. + + """ + # Warning about deprecation implemented in _load_module_shim(). + module = _load_module_shim(cls, fullname) + info = _imp.find_frozen(fullname) + assert info is not None + _, ispkg, origname = info + module.__origname__ = origname + vars(module).pop('__file__', None) + if ispkg: + module.__path__ = [] + cls._fix_up_module(module) + return module + + @classmethod + @_requires_frozen + def get_code(cls, fullname): + """Return the code object for the frozen module.""" + return _imp.get_frozen_object(fullname) + + @classmethod + @_requires_frozen + def get_source(cls, fullname): + """Return None as frozen modules do not have source code.""" + return None + + @classmethod + @_requires_frozen + def is_package(cls, fullname): + """Return True if the frozen module is a package.""" + return _imp.is_frozen_package(fullname) + + +# Import itself ############################################################### + +class _ImportLockContext: + + """Context manager for the import lock.""" + + def __enter__(self): + """Acquire the import lock.""" + _imp.acquire_lock() + + def __exit__(self, exc_type, exc_value, exc_traceback): + """Release the import lock regardless of any raised exceptions.""" + _imp.release_lock() + + +def _resolve_name(name, package, level): + """Resolve a relative module name to an absolute one.""" + bits = package.rsplit('.', level - 1) + if len(bits) < level: + raise ImportError('attempted relative import beyond top-level package') + base = bits[0] + return f'{base}.{name}' if name else base + + +def _find_spec(name, path, target=None): + """Find a module's spec.""" + meta_path = sys.meta_path + if meta_path is None: + raise ImportError("sys.meta_path is None, Python is likely " + "shutting down") + + # gh-130094: Copy sys.meta_path so that we have a consistent view of the + # list while iterating over it. + meta_path = list(meta_path) + if not meta_path: + _warnings.warn('sys.meta_path is empty', ImportWarning) + + # We check sys.modules here for the reload case. While a passed-in + # target will usually indicate a reload there is no guarantee, whereas + # sys.modules provides one. + is_reload = name in sys.modules + for finder in meta_path: + with _ImportLockContext(): + try: + find_spec = finder.find_spec + except AttributeError: + continue + else: + spec = find_spec(name, path, target) + if spec is not None: + # The parent import may have already imported this module. + if not is_reload and name in sys.modules: + module = sys.modules[name] + try: + __spec__ = module.__spec__ + except AttributeError: + # We use the found spec since that is the one that + # we would have used if the parent module hadn't + # beaten us to the punch. + return spec + else: + if __spec__ is None: + return spec + else: + return __spec__ + else: + return spec + else: + return None + + +def _sanity_check(name, package, level): + """Verify arguments are "sane".""" + if not isinstance(name, str): + raise TypeError(f'module name must be str, not {type(name)}') + if level < 0: + raise ValueError('level must be >= 0') + if level > 0: + if not isinstance(package, str): + raise TypeError('__package__ not set to a string') + elif not package: + raise ImportError('attempted relative import with no known parent ' + 'package') + if not name and level == 0: + raise ValueError('Empty module name') + + +_ERR_MSG_PREFIX = 'No module named ' + +def _find_and_load_unlocked(name, import_): + path = None + parent = name.rpartition('.')[0] + parent_spec = None + if parent: + if parent not in sys.modules: + _call_with_frames_removed(import_, parent) + # Crazy side-effects! + module = sys.modules.get(name) + if module is not None: + return module + parent_module = sys.modules[parent] + try: + path = parent_module.__path__ + except AttributeError: + msg = f'{_ERR_MSG_PREFIX}{name!r}; {parent!r} is not a package' + raise ModuleNotFoundError(msg, name=name) from None + parent_spec = parent_module.__spec__ + if getattr(parent_spec, '_initializing', False): + _call_with_frames_removed(import_, parent) + # Crazy side-effects (again)! + module = sys.modules.get(name) + if module is not None: + return module + child = name.rpartition('.')[2] + spec = _find_spec(name, path) + if spec is None: + raise ModuleNotFoundError(f'{_ERR_MSG_PREFIX}{name!r}', name=name) + else: + if parent_spec: + # Temporarily add child we are currently importing to parent's + # _uninitialized_submodules for circular import tracking. + parent_spec._uninitialized_submodules.append(child) + try: + module = _load_unlocked(spec) + finally: + if parent_spec: + parent_spec._uninitialized_submodules.pop() + if parent: + # Set the module as an attribute on its parent. + parent_module = sys.modules[parent] + try: + setattr(parent_module, child, module) + except AttributeError: + msg = f"Cannot set an attribute on {parent!r} for child module {child!r}" + _warnings.warn(msg, ImportWarning) + return module + + +_NEEDS_LOADING = object() + + +def _find_and_load(name, import_): + """Find and load the module.""" + + # Optimization: we avoid unneeded module locking if the module + # already exists in sys.modules and is fully initialized. + module = sys.modules.get(name, _NEEDS_LOADING) + if (module is _NEEDS_LOADING or + getattr(getattr(module, "__spec__", None), "_initializing", False)): + with _ModuleLockManager(name): + module = sys.modules.get(name, _NEEDS_LOADING) + if module is _NEEDS_LOADING: + return _find_and_load_unlocked(name, import_) + + # Optimization: only call _bootstrap._lock_unlock_module() if + # module.__spec__._initializing is True. + # NOTE: because of this, initializing must be set *before* + # putting the new module in sys.modules. + _lock_unlock_module(name) + else: + # Verify the module is still in sys.modules. Another thread may have + # removed it (due to import failure) between our sys.modules.get() + # above and the _initializing check. If removed, we retry the import + # to preserve normal semantics: the caller gets the exception from + # the actual import failure rather than a synthetic error. + if sys.modules.get(name) is not module: + return _find_and_load(name, import_) + + if module is None: + message = f'import of {name} halted; None in sys.modules' + raise ModuleNotFoundError(message, name=name) + + return module + + +def _gcd_import(name, package=None, level=0): + """Import and return the module based on its name, the package the call is + being made from, and the level adjustment. + + This function represents the greatest common denominator of functionality + between import_module and __import__. This includes setting __package__ if + the loader did not. + + """ + _sanity_check(name, package, level) + if level > 0: + name = _resolve_name(name, package, level) + return _find_and_load(name, _gcd_import) + + +def _handle_fromlist(module, fromlist, import_, *, recursive=False): + """Figure out what __import__ should return. + + The import_ parameter is a callable which takes the name of module to + import. It is required to decouple the function from assuming importlib's + import implementation is desired. + + """ + # The hell that is fromlist ... + # If a package was imported, try to import stuff from fromlist. + for x in fromlist: + if not isinstance(x, str): + if recursive: + where = module.__name__ + '.__all__' + else: + where = "``from list''" + raise TypeError(f"Item in {where} must be str, " + f"not {type(x).__name__}") + elif x == '*': + if not recursive and hasattr(module, '__all__'): + _handle_fromlist(module, module.__all__, import_, + recursive=True) + elif not hasattr(module, x): + from_name = f'{module.__name__}.{x}' + try: + _call_with_frames_removed(import_, from_name) + except ModuleNotFoundError as exc: + # Backwards-compatibility dictates we ignore failed + # imports triggered by fromlist for modules that don't + # exist. + if (exc.name == from_name and + sys.modules.get(from_name, _NEEDS_LOADING) is not None): + continue + raise + return module + + +def _calc___package__(globals): + """Calculate what __package__ should be. + + __package__ is not guaranteed to be defined or could be set to None + to represent that its proper value is unknown. + + """ + package = globals.get('__package__') + spec = globals.get('__spec__') + if package is not None: + if spec is not None and package != spec.parent: + _warnings.warn("__package__ != __spec__.parent " + f"({package!r} != {spec.parent!r})", + DeprecationWarning, stacklevel=3) + return package + elif spec is not None: + return spec.parent + else: + _warnings.warn("can't resolve package from __spec__ or __package__, " + "falling back on __name__ and __path__", + ImportWarning, stacklevel=3) + package = globals['__name__'] + if '__path__' not in globals: + package = package.rpartition('.')[0] + return package + + +def __import__(name, globals=None, locals=None, fromlist=(), level=0): + """Import a module. + + The 'globals' argument is used to infer where the import is occurring from + to handle relative imports. The 'locals' argument is ignored. The + 'fromlist' argument specifies what should exist as attributes on the module + being imported (e.g. ``from module import ``). The 'level' + argument represents the package location to import from in a relative + import (e.g. ``from ..pkg import mod`` would have a 'level' of 2). + + """ + if level == 0: + module = _gcd_import(name) + else: + globals_ = globals if globals is not None else {} + package = _calc___package__(globals_) + module = _gcd_import(name, package, level) + if not fromlist: + # Return up to the first dot in 'name'. This is complicated by the fact + # that 'name' may be relative. + if level == 0: + return _gcd_import(name.partition('.')[0]) + elif not name: + return module + else: + # Figure out where to slice the module's name up to the first dot + # in 'name'. + cut_off = len(name) - len(name.partition('.')[0]) + # Slice end needs to be positive to alleviate need to special-case + # when ``'.' not in name``. + return sys.modules[module.__name__[:len(module.__name__)-cut_off]] + elif hasattr(module, '__path__'): + return _handle_fromlist(module, fromlist, _gcd_import) + else: + return module + + +def _builtin_from_name(name): + spec = BuiltinImporter.find_spec(name) + if spec is None: + raise ImportError('no built-in module named ' + name) + return _load_unlocked(spec) + + +def _setup(sys_module, _imp_module): + """Setup importlib by importing needed built-in modules and injecting them + into the global namespace. + + As sys is needed for sys.modules access and _imp is needed to load built-in + modules, those two modules must be explicitly passed in. + + """ + global _imp, sys, _blocking_on + _imp = _imp_module + sys = sys_module + + # Set up the spec for existing builtin/frozen modules. + module_type = type(sys) + for name, module in sys.modules.items(): + if isinstance(module, module_type): + if name in sys.builtin_module_names: + loader = BuiltinImporter + elif _imp.is_frozen(name): + loader = FrozenImporter + else: + continue + spec = _spec_from_module(module, loader) + _init_module_attrs(spec, module) + if loader is FrozenImporter: + loader._fix_up_module(module) + + # Directly load built-in modules needed during bootstrap. + self_module = sys.modules[__name__] + for builtin_name in ('_thread', '_warnings', '_weakref'): + if builtin_name not in sys.modules: + builtin_module = _builtin_from_name(builtin_name) + else: + builtin_module = sys.modules[builtin_name] + setattr(self_module, builtin_name, builtin_module) + + # Instantiation requires _weakref to have been set. + _blocking_on = _WeakValueDictionary() + + +def _install(sys_module, _imp_module): + """Install importers for builtin and frozen modules""" + _setup(sys_module, _imp_module) + + sys.meta_path.append(BuiltinImporter) + sys.meta_path.append(FrozenImporter) + + +def _install_external_importers(): + """Install importers that require external filesystem access""" + global _bootstrap_external + import _frozen_importlib_external + _bootstrap_external = _frozen_importlib_external + _frozen_importlib_external._install(sys.modules[__name__]) diff --git a/Python314_4_x86_Template/Lib/importlib/_bootstrap_external.py b/Python314_4_x86_Template/Lib/importlib/_bootstrap_external.py new file mode 100644 index 00000000..6a828ae7 --- /dev/null +++ b/Python314_4_x86_Template/Lib/importlib/_bootstrap_external.py @@ -0,0 +1,1562 @@ +"""Core implementation of path-based import. + +This module is NOT meant to be directly imported! It has been designed such +that it can be bootstrapped into Python as the implementation of import. As +such it requires the injection of specific modules and attributes in order to +work. One should use importlib as the public-facing version of this module. + +""" +# IMPORTANT: Whenever making changes to this module, be sure to run a top-level +# `make regen-importlib` followed by `make` in order to get the frozen version +# of the module updated. Not doing so will result in the Makefile to fail for +# all others who don't have a ./python around to freeze the module in the early +# stages of compilation. +# + +# See importlib._setup() for what is injected into the global namespace. + +# When editing this code be aware that code executed at import time CANNOT +# reference any injected objects! This includes not only global code but also +# anything specified at the class level. + +# Module injected manually by _set_bootstrap_module() +_bootstrap = None + +# Import builtin modules +import _imp +import _io +import sys +import _warnings +import marshal + + +_MS_WINDOWS = (sys.platform == 'win32') +if _MS_WINDOWS: + import nt as _os + import winreg +else: + import posix as _os + + +if _MS_WINDOWS: + path_separators = ['\\', '/'] +else: + path_separators = ['/'] +# Assumption made in _path_join() +assert all(len(sep) == 1 for sep in path_separators) +path_sep = path_separators[0] +path_sep_tuple = tuple(path_separators) +path_separators = ''.join(path_separators) +_pathseps_with_colon = {f':{s}' for s in path_separators} + + +# Bootstrap-related code ###################################################### +_CASE_INSENSITIVE_PLATFORMS_STR_KEY = 'win', +_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY = 'cygwin', 'darwin', 'ios', 'tvos', 'watchos' +_CASE_INSENSITIVE_PLATFORMS = (_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY + + _CASE_INSENSITIVE_PLATFORMS_STR_KEY) + + +def _make_relax_case(): + if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS): + if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS_STR_KEY): + key = 'PYTHONCASEOK' + else: + key = b'PYTHONCASEOK' + + def _relax_case(): + """True if filenames must be checked case-insensitively and ignore environment flags are not set.""" + return not sys.flags.ignore_environment and key in _os.environ + else: + def _relax_case(): + """True if filenames must be checked case-insensitively.""" + return False + return _relax_case + +_relax_case = _make_relax_case() + + +def _pack_uint32(x): + """Convert a 32-bit integer to little-endian.""" + return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little') + + +def _unpack_uint64(data): + """Convert 8 bytes in little-endian to an integer.""" + assert len(data) == 8 + return int.from_bytes(data, 'little') + +def _unpack_uint32(data): + """Convert 4 bytes in little-endian to an integer.""" + assert len(data) == 4 + return int.from_bytes(data, 'little') + +def _unpack_uint16(data): + """Convert 2 bytes in little-endian to an integer.""" + assert len(data) == 2 + return int.from_bytes(data, 'little') + + +if _MS_WINDOWS: + def _path_join(*path_parts): + """Replacement for os.path.join().""" + if not path_parts: + return "" + if len(path_parts) == 1: + return path_parts[0] + root = "" + path = [] + for new_root, tail in map(_os._path_splitroot, path_parts): + if new_root.startswith(path_sep_tuple) or new_root.endswith(path_sep_tuple): + root = new_root.rstrip(path_separators) or root + path = [path_sep + tail] + elif new_root.endswith(':'): + if root.casefold() != new_root.casefold(): + # Drive relative paths have to be resolved by the OS, so we reset the + # tail but do not add a path_sep prefix. + root = new_root + path = [tail] + else: + path.append(tail) + else: + root = new_root or root + path.append(tail) + path = [p.rstrip(path_separators) for p in path if p] + if len(path) == 1 and not path[0]: + # Avoid losing the root's trailing separator when joining with nothing + return root + path_sep + return root + path_sep.join(path) + +else: + def _path_join(*path_parts): + """Replacement for os.path.join().""" + return path_sep.join([part.rstrip(path_separators) + for part in path_parts if part]) + + +def _path_split(path): + """Replacement for os.path.split().""" + i = max(path.rfind(p) for p in path_separators) + if i < 0: + return '', path + return path[:i], path[i + 1:] + + +def _path_stat(path): + """Stat the path. + + Made a separate function to make it easier to override in experiments + (e.g. cache stat results). + + """ + return _os.stat(path) + + +def _path_is_mode_type(path, mode): + """Test whether the path is the specified mode type.""" + try: + stat_info = _path_stat(path) + except OSError: + return False + return (stat_info.st_mode & 0o170000) == mode + + +def _path_isfile(path): + """Replacement for os.path.isfile.""" + return _path_is_mode_type(path, 0o100000) + + +def _path_isdir(path): + """Replacement for os.path.isdir.""" + if not path: + path = _os.getcwd() + return _path_is_mode_type(path, 0o040000) + + +if _MS_WINDOWS: + def _path_isabs(path): + """Replacement for os.path.isabs.""" + if not path: + return False + root = _os._path_splitroot(path)[0].replace('/', '\\') + return len(root) > 1 and (root.startswith('\\\\') or root.endswith('\\')) + +else: + def _path_isabs(path): + """Replacement for os.path.isabs.""" + return path.startswith(path_separators) + + +def _path_abspath(path): + """Replacement for os.path.abspath.""" + if not _path_isabs(path): + for sep in path_separators: + path = path.removeprefix(f".{sep}") + return _path_join(_os.getcwd(), path) + else: + return path + + +def _write_atomic(path, data, mode=0o666): + """Best-effort function to write data to a path atomically. + Be prepared to handle a FileExistsError if concurrent writing of the + temporary file is attempted.""" + # id() is used to generate a pseudo-random filename. + path_tmp = f'{path}.{id(path)}' + fd = _os.open(path_tmp, + _os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666) + try: + # We first write data to a temporary file, and then use os.replace() to + # perform an atomic rename. + with _io.open(fd, 'wb') as file: + file.write(data) + _os.replace(path_tmp, path) + except OSError: + try: + _os.unlink(path_tmp) + except OSError: + pass + raise + + +_code_type = type(_write_atomic.__code__) + +MAGIC_NUMBER = _imp.pyc_magic_number_token.to_bytes(4, 'little') + +_PYCACHE = '__pycache__' +_OPT = 'opt-' + +SOURCE_SUFFIXES = ['.py'] +if _MS_WINDOWS: + SOURCE_SUFFIXES.append('.pyw') + +EXTENSION_SUFFIXES = _imp.extension_suffixes() + +BYTECODE_SUFFIXES = ['.pyc'] +# Deprecated. +DEBUG_BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES = BYTECODE_SUFFIXES + +def cache_from_source(path, debug_override=None, *, optimization=None): + """Given the path to a .py file, return the path to its .pyc file. + + The .py file does not need to exist; this simply returns the path to the + .pyc file calculated as if the .py file were imported. + + The 'optimization' parameter controls the presumed optimization level of + the bytecode file. If 'optimization' is not None, the string representation + of the argument is taken and verified to be alphanumeric (else ValueError + is raised). + + The debug_override parameter is deprecated. If debug_override is not None, + a True value is the same as setting 'optimization' to the empty string + while a False value is equivalent to setting 'optimization' to '1'. + + If sys.implementation.cache_tag is None then NotImplementedError is raised. + + """ + if debug_override is not None: + _warnings.warn('the debug_override parameter is deprecated; use ' + "'optimization' instead", DeprecationWarning) + if optimization is not None: + message = 'debug_override or optimization must be set to None' + raise TypeError(message) + optimization = '' if debug_override else 1 + path = _os.fspath(path) + head, tail = _path_split(path) + base, sep, rest = tail.rpartition('.') + tag = sys.implementation.cache_tag + if tag is None: + raise NotImplementedError('sys.implementation.cache_tag is None') + almost_filename = ''.join([(base if base else rest), sep, tag]) + if optimization is None: + if sys.flags.optimize == 0: + optimization = '' + else: + optimization = sys.flags.optimize + optimization = str(optimization) + if optimization != '': + if not optimization.isalnum(): + raise ValueError(f'{optimization!r} is not alphanumeric') + almost_filename = f'{almost_filename}.{_OPT}{optimization}' + filename = almost_filename + BYTECODE_SUFFIXES[0] + if sys.pycache_prefix is not None: + # We need an absolute path to the py file to avoid the possibility of + # collisions within sys.pycache_prefix, if someone has two different + # `foo/bar.py` on their system and they import both of them using the + # same sys.pycache_prefix. Let's say sys.pycache_prefix is + # `C:\Bytecode`; the idea here is that if we get `Foo\Bar`, we first + # make it absolute (`C:\Somewhere\Foo\Bar`), then make it root-relative + # (`Somewhere\Foo\Bar`), so we end up placing the bytecode file in an + # unambiguous `C:\Bytecode\Somewhere\Foo\Bar\`. + head = _path_abspath(head) + + # Strip initial drive from a Windows path. We know we have an absolute + # path here, so the second part of the check rules out a POSIX path that + # happens to contain a colon at the second character. + # Slicing avoids issues with an empty (or short) `head`. + if head[1:2] == ':' and head[0:1] not in path_separators: + head = head[2:] + + # Strip initial path separator from `head` to complete the conversion + # back to a root-relative path before joining. + return _path_join( + sys.pycache_prefix, + head.lstrip(path_separators), + filename, + ) + return _path_join(head, _PYCACHE, filename) + + +def source_from_cache(path): + """Given the path to a .pyc. file, return the path to its .py file. + + The .pyc file does not need to exist; this simply returns the path to + the .py file calculated to correspond to the .pyc file. If path does + not conform to PEP 3147/488 format, ValueError will be raised. If + sys.implementation.cache_tag is None then NotImplementedError is raised. + + """ + if sys.implementation.cache_tag is None: + raise NotImplementedError('sys.implementation.cache_tag is None') + path = _os.fspath(path) + head, pycache_filename = _path_split(path) + found_in_pycache_prefix = False + if sys.pycache_prefix is not None: + stripped_path = sys.pycache_prefix.rstrip(path_separators) + if head.startswith(stripped_path + path_sep): + head = head[len(stripped_path):] + found_in_pycache_prefix = True + if not found_in_pycache_prefix: + head, pycache = _path_split(head) + if pycache != _PYCACHE: + raise ValueError(f'{_PYCACHE} not bottom-level directory in ' + f'{path!r}') + dot_count = pycache_filename.count('.') + if dot_count not in {2, 3}: + raise ValueError(f'expected only 2 or 3 dots in {pycache_filename!r}') + elif dot_count == 3: + optimization = pycache_filename.rsplit('.', 2)[-2] + if not optimization.startswith(_OPT): + raise ValueError("optimization portion of filename does not start " + f"with {_OPT!r}") + opt_level = optimization[len(_OPT):] + if not opt_level.isalnum(): + raise ValueError(f"optimization level {optimization!r} is not an " + "alphanumeric value") + base_filename = pycache_filename.partition('.')[0] + return _path_join(head, base_filename + SOURCE_SUFFIXES[0]) + + +def _get_sourcefile(bytecode_path): + """Convert a bytecode file path to a source path (if possible). + + This function exists purely for backwards-compatibility for + PyImport_ExecCodeModuleWithFilenames() in the C API. + + """ + if len(bytecode_path) == 0: + return None + rest, _, extension = bytecode_path.rpartition('.') + if not rest or extension.lower()[-3:-1] != 'py': + return bytecode_path + try: + source_path = source_from_cache(bytecode_path) + except (NotImplementedError, ValueError): + source_path = bytecode_path[:-1] + return source_path if _path_isfile(source_path) else bytecode_path + + +def _get_cached(filename): + if filename.endswith(tuple(SOURCE_SUFFIXES)): + try: + return cache_from_source(filename) + except NotImplementedError: + pass + elif filename.endswith(tuple(BYTECODE_SUFFIXES)): + return filename + else: + return None + + +def _calc_mode(path): + """Calculate the mode permissions for a bytecode file.""" + try: + mode = _path_stat(path).st_mode + except OSError: + mode = 0o666 + # We always ensure write access so we can update cached files + # later even when the source files are read-only on Windows (#6074) + mode |= 0o200 + return mode + + +def _check_name(method): + """Decorator to verify that the module being requested matches the one the + loader can handle. + + The first argument (self) must define _name which the second argument is + compared against. If the comparison fails then ImportError is raised. + + """ + def _check_name_wrapper(self, name=None, *args, **kwargs): + if name is None: + name = self.name + elif self.name != name: + raise ImportError('loader for %s cannot handle %s' % + (self.name, name), name=name) + return method(self, name, *args, **kwargs) + + # FIXME: @_check_name is used to define class methods before the + # _bootstrap module is set by _set_bootstrap_module(). + if _bootstrap is not None: + _wrap = _bootstrap._wrap + else: + def _wrap(new, old): + for replace in ['__module__', '__name__', '__qualname__', '__doc__']: + if hasattr(old, replace): + setattr(new, replace, getattr(old, replace)) + new.__dict__.update(old.__dict__) + + _wrap(_check_name_wrapper, method) + return _check_name_wrapper + + +def _classify_pyc(data, name, exc_details): + """Perform basic validity checking of a pyc header and return the flags field, + which determines how the pyc should be further validated against the source. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required, though.) + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + ImportError is raised when the magic number is incorrect or when the flags + field is invalid. EOFError is raised when the data is found to be truncated. + + """ + magic = data[:4] + if magic != MAGIC_NUMBER: + message = f'bad magic number in {name!r}: {magic!r}' + _bootstrap._verbose_message('{}', message) + raise ImportError(message, **exc_details) + if len(data) < 16: + message = f'reached EOF while reading pyc header of {name!r}' + _bootstrap._verbose_message('{}', message) + raise EOFError(message) + flags = _unpack_uint32(data[4:8]) + # Only the first two flags are defined. + if flags & ~0b11: + message = f'invalid flags {flags!r} in {name!r}' + raise ImportError(message, **exc_details) + return flags + + +def _validate_timestamp_pyc(data, source_mtime, source_size, name, + exc_details): + """Validate a pyc against the source last-modified time. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required.) + + *source_mtime* is the last modified timestamp of the source file. + + *source_size* is None or the size of the source file in bytes. + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + An ImportError is raised if the bytecode is stale. + + """ + if _unpack_uint32(data[8:12]) != (source_mtime & 0xFFFFFFFF): + message = f'bytecode is stale for {name!r}' + _bootstrap._verbose_message('{}', message) + raise ImportError(message, **exc_details) + if (source_size is not None and + _unpack_uint32(data[12:16]) != (source_size & 0xFFFFFFFF)): + raise ImportError(f'bytecode is stale for {name!r}', **exc_details) + + +def _validate_hash_pyc(data, source_hash, name, exc_details): + """Validate a hash-based pyc by checking the real source hash against the one in + the pyc header. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required.) + + *source_hash* is the importlib.util.source_hash() of the source file. + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + An ImportError is raised if the bytecode is stale. + + """ + if data[8:16] != source_hash: + raise ImportError( + f'hash in bytecode doesn\'t match hash of source {name!r}', + **exc_details, + ) + + +def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None): + """Compile bytecode as found in a pyc.""" + code = marshal.loads(data) + if isinstance(code, _code_type): + _bootstrap._verbose_message('code object from {!r}', bytecode_path) + if source_path is not None: + _imp._fix_co_filename(code, source_path) + return code + else: + raise ImportError(f'Non-code object in {bytecode_path!r}', + name=name, path=bytecode_path) + + +def _code_to_timestamp_pyc(code, mtime=0, source_size=0): + "Produce the data for a timestamp-based pyc." + data = bytearray(MAGIC_NUMBER) + data.extend(_pack_uint32(0)) + data.extend(_pack_uint32(mtime)) + data.extend(_pack_uint32(source_size)) + data.extend(marshal.dumps(code)) + return data + + +def _code_to_hash_pyc(code, source_hash, checked=True): + "Produce the data for a hash-based pyc." + data = bytearray(MAGIC_NUMBER) + flags = 0b1 | checked << 1 + data.extend(_pack_uint32(flags)) + assert len(source_hash) == 8 + data.extend(source_hash) + data.extend(marshal.dumps(code)) + return data + + +def decode_source(source_bytes): + """Decode bytes representing source code and return the string. + + Universal newline support is used in the decoding. + """ + import tokenize # To avoid bootstrap issues. + source_bytes_readline = _io.BytesIO(source_bytes).readline + encoding = tokenize.detect_encoding(source_bytes_readline) + newline_decoder = _io.IncrementalNewlineDecoder(None, True) + return newline_decoder.decode(source_bytes.decode(encoding[0])) + + +# Module specifications ####################################################### + +_POPULATE = object() + + +def spec_from_file_location(name, location=None, *, loader=None, + submodule_search_locations=_POPULATE): + """Return a module spec based on a file location. + + To indicate that the module is a package, set + submodule_search_locations to a list of directory paths. An + empty list is sufficient, though its not otherwise useful to the + import system. + + The loader must take a spec as its only __init__() arg. + + """ + if location is None: + # The caller may simply want a partially populated location- + # oriented spec. So we set the location to a bogus value and + # fill in as much as we can. + location = '' + if hasattr(loader, 'get_filename'): + # ExecutionLoader + try: + location = loader.get_filename(name) + except ImportError: + pass + else: + location = _os.fspath(location) + try: + location = _path_abspath(location) + except OSError: + pass + + # If the location is on the filesystem, but doesn't actually exist, + # we could return None here, indicating that the location is not + # valid. However, we don't have a good way of testing since an + # indirect location (e.g. a zip file or URL) will look like a + # non-existent file relative to the filesystem. + + spec = _bootstrap.ModuleSpec(name, loader, origin=location) + spec._set_fileattr = True + + # Pick a loader if one wasn't provided. + if loader is None: + for loader_class, suffixes in _get_supported_file_loaders(): + if location.endswith(tuple(suffixes)): + loader = loader_class(name, location) + spec.loader = loader + break + else: + return None + + # Set submodule_search_paths appropriately. + if submodule_search_locations is _POPULATE: + # Check the loader. + if hasattr(loader, 'is_package'): + try: + is_package = loader.is_package(name) + except ImportError: + pass + else: + if is_package: + spec.submodule_search_locations = [] + else: + spec.submodule_search_locations = submodule_search_locations + if spec.submodule_search_locations == []: + if location: + dirname = _path_split(location)[0] + spec.submodule_search_locations.append(dirname) + + return spec + + +def _bless_my_loader(module_globals): + """Helper function for _warnings.c + + See GH#97850 for details. + """ + # 2022-10-06(warsaw): For now, this helper is only used in _warnings.c and + # that use case only has the module globals. This function could be + # extended to accept either that or a module object. However, in the + # latter case, it would be better to raise certain exceptions when looking + # at a module, which should have either a __loader__ or __spec__.loader. + # For backward compatibility, it is possible that we'll get an empty + # dictionary for the module globals, and that cannot raise an exception. + if not isinstance(module_globals, dict): + return None + + missing = object() + loader = module_globals.get('__loader__', None) + spec = module_globals.get('__spec__', missing) + + if loader is None: + if spec is missing: + # If working with a module: + # raise AttributeError('Module globals is missing a __spec__') + return None + elif spec is None: + raise ValueError('Module globals is missing a __spec__.loader') + + spec_loader = getattr(spec, 'loader', missing) + + if spec_loader in (missing, None): + if loader is None: + exc = AttributeError if spec_loader is missing else ValueError + raise exc('Module globals is missing a __spec__.loader') + _warnings.warn( + 'Module globals is missing a __spec__.loader', + DeprecationWarning) + spec_loader = loader + + assert spec_loader is not None + if loader is not None and loader != spec_loader: + _warnings.warn( + 'Module globals; __loader__ != __spec__.loader', + DeprecationWarning) + return loader + + return spec_loader + + +# Loaders ##################################################################### + +class WindowsRegistryFinder: + + """Meta path finder for modules declared in the Windows registry.""" + + REGISTRY_KEY = ( + 'Software\\Python\\PythonCore\\{sys_version}' + '\\Modules\\{fullname}') + REGISTRY_KEY_DEBUG = ( + 'Software\\Python\\PythonCore\\{sys_version}' + '\\Modules\\{fullname}\\Debug') + DEBUG_BUILD = (_MS_WINDOWS and '_d.pyd' in EXTENSION_SUFFIXES) + + @staticmethod + def _open_registry(key): + try: + return winreg.OpenKey(winreg.HKEY_CURRENT_USER, key) + except OSError: + return winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key) + + @classmethod + def _search_registry(cls, fullname): + if cls.DEBUG_BUILD: + registry_key = cls.REGISTRY_KEY_DEBUG + else: + registry_key = cls.REGISTRY_KEY + key = registry_key.format(fullname=fullname, + sys_version='%d.%d' % sys.version_info[:2]) + try: + with cls._open_registry(key) as hkey: + filepath = winreg.QueryValue(hkey, '') + except OSError: + return None + return filepath + + @classmethod + def find_spec(cls, fullname, path=None, target=None): + _warnings.warn('importlib.machinery.WindowsRegistryFinder is ' + 'deprecated; use site configuration instead. ' + 'Future versions of Python may not enable this ' + 'finder by default.', + DeprecationWarning, stacklevel=2) + + filepath = cls._search_registry(fullname) + if filepath is None: + return None + try: + _path_stat(filepath) + except OSError: + return None + for loader, suffixes in _get_supported_file_loaders(): + if filepath.endswith(tuple(suffixes)): + spec = _bootstrap.spec_from_loader(fullname, + loader(fullname, filepath), + origin=filepath) + return spec + + +class _LoaderBasics: + + """Base class of common code needed by both SourceLoader and + SourcelessFileLoader.""" + + def is_package(self, fullname): + """Concrete implementation of InspectLoader.is_package by checking if + the path returned by get_filename has a filename of '__init__.py'.""" + filename = _path_split(self.get_filename(fullname))[1] + filename_base = filename.rsplit('.', 1)[0] + tail_name = fullname.rpartition('.')[2] + return filename_base == '__init__' and tail_name != '__init__' + + def create_module(self, spec): + """Use default semantics for module creation.""" + + def exec_module(self, module): + """Execute the module.""" + code = self.get_code(module.__name__) + if code is None: + raise ImportError(f'cannot load module {module.__name__!r} when ' + 'get_code() returns None') + _bootstrap._call_with_frames_removed(exec, code, module.__dict__) + + def load_module(self, fullname): + """This method is deprecated.""" + # Warning implemented in _load_module_shim(). + return _bootstrap._load_module_shim(self, fullname) + + +class SourceLoader(_LoaderBasics): + + def path_mtime(self, path): + """Optional method that returns the modification time (an int) for the + specified path (a str). + + Raises OSError when the path cannot be handled. + """ + raise OSError + + def path_stats(self, path): + """Optional method returning a metadata dict for the specified + path (a str). + + Possible keys: + - 'mtime' (mandatory) is the numeric timestamp of last source + code modification; + - 'size' (optional) is the size in bytes of the source code. + + Implementing this method allows the loader to read bytecode files. + Raises OSError when the path cannot be handled. + """ + return {'mtime': self.path_mtime(path)} + + def _cache_bytecode(self, source_path, cache_path, data): + """Optional method which writes data (bytes) to a file path (a str). + + Implementing this method allows for the writing of bytecode files. + + The source path is needed in order to correctly transfer permissions + """ + # For backwards compatibility, we delegate to set_data() + return self.set_data(cache_path, data) + + def set_data(self, path, data): + """Optional method which writes data (bytes) to a file path (a str). + + Implementing this method allows for the writing of bytecode files. + """ + + + def get_source(self, fullname): + """Concrete implementation of InspectLoader.get_source.""" + path = self.get_filename(fullname) + try: + source_bytes = self.get_data(path) + except OSError as exc: + raise ImportError('source not available through get_data()', + name=fullname) from exc + return decode_source(source_bytes) + + def source_to_code(self, data, path, *, _optimize=-1): + """Return the code object compiled from source. + + The 'data' argument can be any object type that compile() supports. + """ + return _bootstrap._call_with_frames_removed(compile, data, path, 'exec', + dont_inherit=True, optimize=_optimize) + + def get_code(self, fullname): + """Concrete implementation of InspectLoader.get_code. + + Reading of bytecode requires path_stats to be implemented. To write + bytecode, set_data must also be implemented. + + """ + source_path = self.get_filename(fullname) + source_mtime = None + source_bytes = None + source_hash = None + hash_based = False + check_source = True + try: + bytecode_path = cache_from_source(source_path) + except NotImplementedError: + bytecode_path = None + else: + try: + st = self.path_stats(source_path) + except OSError: + pass + else: + source_mtime = int(st['mtime']) + try: + data = self.get_data(bytecode_path) + except OSError: + pass + else: + exc_details = { + 'name': fullname, + 'path': bytecode_path, + } + try: + flags = _classify_pyc(data, fullname, exc_details) + bytes_data = memoryview(data)[16:] + hash_based = flags & 0b1 != 0 + if hash_based: + check_source = flags & 0b10 != 0 + if (_imp.check_hash_based_pycs != 'never' and + (check_source or + _imp.check_hash_based_pycs == 'always')): + source_bytes = self.get_data(source_path) + source_hash = _imp.source_hash( + _imp.pyc_magic_number_token, + source_bytes, + ) + _validate_hash_pyc(data, source_hash, fullname, + exc_details) + else: + _validate_timestamp_pyc( + data, + source_mtime, + st['size'], + fullname, + exc_details, + ) + except (ImportError, EOFError): + pass + else: + _bootstrap._verbose_message('{} matches {}', bytecode_path, + source_path) + return _compile_bytecode(bytes_data, name=fullname, + bytecode_path=bytecode_path, + source_path=source_path) + if source_bytes is None: + source_bytes = self.get_data(source_path) + code_object = self.source_to_code(source_bytes, source_path) + _bootstrap._verbose_message('code object from {}', source_path) + if (not sys.dont_write_bytecode and bytecode_path is not None and + source_mtime is not None): + if hash_based: + if source_hash is None: + source_hash = _imp.source_hash(_imp.pyc_magic_number_token, + source_bytes) + data = _code_to_hash_pyc(code_object, source_hash, check_source) + else: + data = _code_to_timestamp_pyc(code_object, source_mtime, + len(source_bytes)) + try: + self._cache_bytecode(source_path, bytecode_path, data) + except NotImplementedError: + pass + return code_object + + +class FileLoader: + + """Base file loader class which implements the loader protocol methods that + require file system usage.""" + + def __init__(self, fullname, path): + """Cache the module name and the path to the file found by the + finder.""" + self.name = fullname + self.path = path + + def __eq__(self, other): + return (self.__class__ == other.__class__ and + self.__dict__ == other.__dict__) + + def __hash__(self): + return hash(self.name) ^ hash(self.path) + + @_check_name + def load_module(self, fullname): + """Load a module from a file. + + This method is deprecated. Use exec_module() instead. + + """ + # The only reason for this method is for the name check. + # Issue #14857: Avoid the zero-argument form of super so the implementation + # of that form can be updated without breaking the frozen module. + return super(FileLoader, self).load_module(fullname) + + @_check_name + def get_filename(self, fullname): + """Return the path to the source file as found by the finder.""" + return self.path + + def get_data(self, path): + """Return the data from path as raw bytes.""" + if isinstance(self, (SourceLoader, SourcelessFileLoader, ExtensionFileLoader)): + with _io.open_code(str(path)) as file: + return file.read() + else: + with _io.FileIO(path, 'r') as file: + return file.read() + + @_check_name + def get_resource_reader(self, module): + from importlib.readers import FileReader + return FileReader(self) + + +class SourceFileLoader(FileLoader, SourceLoader): + + """Concrete implementation of SourceLoader using the file system.""" + + def path_stats(self, path): + """Return the metadata for the path.""" + st = _path_stat(path) + return {'mtime': st.st_mtime, 'size': st.st_size} + + def _cache_bytecode(self, source_path, bytecode_path, data): + # Adapt between the two APIs + mode = _calc_mode(source_path) + return self.set_data(bytecode_path, data, _mode=mode) + + def set_data(self, path, data, *, _mode=0o666): + """Write bytes data to a file.""" + parent, filename = _path_split(path) + path_parts = [] + # Figure out what directories are missing. + while parent and not _path_isdir(parent): + parent, part = _path_split(parent) + path_parts.append(part) + # Create needed directories. + for part in reversed(path_parts): + parent = _path_join(parent, part) + try: + _os.mkdir(parent) + except FileExistsError: + # Probably another Python process already created the dir. + continue + except OSError as exc: + # Could be a permission error, read-only filesystem: just forget + # about writing the data. + _bootstrap._verbose_message('could not create {!r}: {!r}', + parent, exc) + return + try: + _write_atomic(path, data, _mode) + _bootstrap._verbose_message('created {!r}', path) + except OSError as exc: + # Same as above: just don't write the bytecode. + _bootstrap._verbose_message('could not create {!r}: {!r}', path, + exc) + + +class SourcelessFileLoader(FileLoader, _LoaderBasics): + + """Loader which handles sourceless file imports.""" + + def get_code(self, fullname): + path = self.get_filename(fullname) + data = self.get_data(path) + # Call _classify_pyc to do basic validation of the pyc but ignore the + # result. There's no source to check against. + exc_details = { + 'name': fullname, + 'path': path, + } + _classify_pyc(data, fullname, exc_details) + return _compile_bytecode( + memoryview(data)[16:], + name=fullname, + bytecode_path=path, + ) + + def get_source(self, fullname): + """Return None as there is no source code.""" + return None + + +class ExtensionFileLoader(FileLoader, _LoaderBasics): + + """Loader for extension modules. + + The constructor is designed to work with FileFinder. + + """ + + def __init__(self, name, path): + self.name = name + self.path = path + + def __eq__(self, other): + return (self.__class__ == other.__class__ and + self.__dict__ == other.__dict__) + + def __hash__(self): + return hash(self.name) ^ hash(self.path) + + def create_module(self, spec): + """Create an uninitialized extension module""" + module = _bootstrap._call_with_frames_removed( + _imp.create_dynamic, spec) + _bootstrap._verbose_message('extension module {!r} loaded from {!r}', + spec.name, self.path) + return module + + def exec_module(self, module): + """Initialize an extension module""" + _bootstrap._call_with_frames_removed(_imp.exec_dynamic, module) + _bootstrap._verbose_message('extension module {!r} executed from {!r}', + self.name, self.path) + + def is_package(self, fullname): + """Return True if the extension module is a package.""" + file_name = _path_split(self.path)[1] + return any(file_name == '__init__' + suffix + for suffix in EXTENSION_SUFFIXES) + + def get_code(self, fullname): + """Return None as an extension module cannot create a code object.""" + return None + + def get_source(self, fullname): + """Return None as extension modules have no source code.""" + return None + + @_check_name + def get_filename(self, fullname): + """Return the path to the source file as found by the finder.""" + return self.path + + +class _NamespacePath: + """Represents a namespace package's path. It uses the module name + to find its parent module, and from there it looks up the parent's + __path__. When this changes, the module's own path is recomputed, + using path_finder. For top-level modules, the parent module's path + is sys.path.""" + + # When invalidate_caches() is called, this epoch is incremented + # https://bugs.python.org/issue45703 + _epoch = 0 + + def __init__(self, name, path, path_finder): + self._name = name + self._path = path + self._last_parent_path = tuple(self._get_parent_path()) + self._last_epoch = self._epoch + self._path_finder = path_finder + + def _find_parent_path_names(self): + """Returns a tuple of (parent-module-name, parent-path-attr-name)""" + parent, dot, me = self._name.rpartition('.') + if dot == '': + # This is a top-level module. sys.path contains the parent path. + return 'sys', 'path' + # Not a top-level module. parent-module.__path__ contains the + # parent path. + return parent, '__path__' + + def _get_parent_path(self): + parent_module_name, path_attr_name = self._find_parent_path_names() + return getattr(sys.modules[parent_module_name], path_attr_name) + + def _recalculate(self): + # If the parent's path has changed, recalculate _path + parent_path = tuple(self._get_parent_path()) # Make a copy + if parent_path != self._last_parent_path or self._epoch != self._last_epoch: + spec = self._path_finder(self._name, parent_path) + # Note that no changes are made if a loader is returned, but we + # do remember the new parent path + if spec is not None and spec.loader is None: + if spec.submodule_search_locations: + self._path = spec.submodule_search_locations + self._last_parent_path = parent_path # Save the copy + self._last_epoch = self._epoch + return self._path + + def __iter__(self): + return iter(self._recalculate()) + + def __getitem__(self, index): + return self._recalculate()[index] + + def __setitem__(self, index, path): + self._path[index] = path + + def __len__(self): + return len(self._recalculate()) + + def __repr__(self): + return f'_NamespacePath({self._path!r})' + + def __contains__(self, item): + return item in self._recalculate() + + def append(self, item): + self._path.append(item) + + +# This class is actually exposed publicly in a namespace package's __loader__ +# attribute, so it should be available through a non-private name. +# https://github.com/python/cpython/issues/92054 +class NamespaceLoader: + def __init__(self, name, path, path_finder): + self._path = _NamespacePath(name, path, path_finder) + + def is_package(self, fullname): + return True + + def get_source(self, fullname): + return '' + + def get_code(self, fullname): + return compile('', '', 'exec', dont_inherit=True) + + def create_module(self, spec): + """Use default semantics for module creation.""" + + def exec_module(self, module): + pass + + def load_module(self, fullname): + """Load a namespace module. + + This method is deprecated. Use exec_module() instead. + + """ + # The import system never calls this method. + _bootstrap._verbose_message('namespace module loaded with path {!r}', + self._path) + # Warning implemented in _load_module_shim(). + return _bootstrap._load_module_shim(self, fullname) + + def get_resource_reader(self, module): + from importlib.readers import NamespaceReader + return NamespaceReader(self._path) + + +# We use this exclusively in module_from_spec() for backward-compatibility. +_NamespaceLoader = NamespaceLoader + + +# Finders ##################################################################### + +class PathFinder: + + """Meta path finder for sys.path and package __path__ attributes.""" + + @staticmethod + def invalidate_caches(): + """Call the invalidate_caches() method on all path entry finders + stored in sys.path_importer_cache (where implemented).""" + for name, finder in list(sys.path_importer_cache.items()): + # Drop entry if finder name is a relative path. The current + # working directory may have changed. + if finder is None or not _path_isabs(name): + del sys.path_importer_cache[name] + elif hasattr(finder, 'invalidate_caches'): + finder.invalidate_caches() + # Also invalidate the caches of _NamespacePaths + # https://bugs.python.org/issue45703 + _NamespacePath._epoch += 1 + + from importlib.metadata import MetadataPathFinder + MetadataPathFinder.invalidate_caches() + + @staticmethod + def _path_hooks(path): + """Search sys.path_hooks for a finder for 'path'.""" + if sys.path_hooks is not None and not sys.path_hooks: + _warnings.warn('sys.path_hooks is empty', ImportWarning) + for hook in sys.path_hooks: + try: + return hook(path) + except ImportError: + continue + else: + return None + + @classmethod + def _path_importer_cache(cls, path): + """Get the finder for the path entry from sys.path_importer_cache. + + If the path entry is not in the cache, find the appropriate finder + and cache it. If no finder is available, store None. + + """ + if path == '': + try: + path = _os.getcwd() + except (FileNotFoundError, PermissionError): + # Don't cache the failure as the cwd can easily change to + # a valid directory later on. + return None + try: + finder = sys.path_importer_cache[path] + except KeyError: + finder = cls._path_hooks(path) + sys.path_importer_cache[path] = finder + return finder + + @classmethod + def _get_spec(cls, fullname, path, target=None): + """Find the loader or namespace_path for this module/package name.""" + # If this ends up being a namespace package, namespace_path is + # the list of paths that will become its __path__ + namespace_path = [] + for entry in path: + if not isinstance(entry, str): + continue + finder = cls._path_importer_cache(entry) + if finder is not None: + spec = finder.find_spec(fullname, target) + if spec is None: + continue + if spec.loader is not None: + return spec + portions = spec.submodule_search_locations + if portions is None: + raise ImportError('spec missing loader') + # This is possibly part of a namespace package. + # Remember these path entries (if any) for when we + # create a namespace package, and continue iterating + # on path. + namespace_path.extend(portions) + else: + spec = _bootstrap.ModuleSpec(fullname, None) + spec.submodule_search_locations = namespace_path + return spec + + @classmethod + def find_spec(cls, fullname, path=None, target=None): + """Try to find a spec for 'fullname' on sys.path or 'path'. + + The search is based on sys.path_hooks and sys.path_importer_cache. + """ + if path is None: + path = sys.path + spec = cls._get_spec(fullname, path, target) + if spec is None: + return None + elif spec.loader is None: + namespace_path = spec.submodule_search_locations + if namespace_path: + # We found at least one namespace path. Return a spec which + # can create the namespace package. + spec.origin = None + spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec) + return spec + else: + return None + else: + return spec + + @staticmethod + def find_distributions(*args, **kwargs): + """ + Find distributions. + + Return an iterable of all Distribution instances capable of + loading the metadata for packages matching ``context.name`` + (or all names if ``None`` indicated) along the paths in the list + of directories ``context.path``. + """ + from importlib.metadata import MetadataPathFinder + return MetadataPathFinder.find_distributions(*args, **kwargs) + + +class FileFinder: + + """File-based finder. + + Interactions with the file system are cached for performance, being + refreshed when the directory the finder is handling has been modified. + + """ + + def __init__(self, path, *loader_details): + """Initialize with the path to search on and a variable number of + 2-tuples containing the loader and the file suffixes the loader + recognizes.""" + loaders = [] + for loader, suffixes in loader_details: + loaders.extend((suffix, loader) for suffix in suffixes) + self._loaders = loaders + # Base (directory) path + if not path or path == '.': + self.path = _os.getcwd() + else: + self.path = _path_abspath(path) + self._path_mtime = -1 + self._path_cache = set() + self._relaxed_path_cache = set() + + def invalidate_caches(self): + """Invalidate the directory mtime.""" + self._path_mtime = -1 + + def _get_spec(self, loader_class, fullname, path, smsl, target): + loader = loader_class(fullname, path) + return spec_from_file_location(fullname, path, loader=loader, + submodule_search_locations=smsl) + + def find_spec(self, fullname, target=None): + """Try to find a spec for the specified module. + + Returns the matching spec, or None if not found. + """ + is_namespace = False + tail_module = fullname.rpartition('.')[2] + try: + mtime = _path_stat(self.path or _os.getcwd()).st_mtime + except OSError: + mtime = -1 + if mtime != self._path_mtime: + self._fill_cache() + self._path_mtime = mtime + # tail_module keeps the original casing, for __file__ and friends + if _relax_case(): + cache = self._relaxed_path_cache + cache_module = tail_module.lower() + else: + cache = self._path_cache + cache_module = tail_module + # Check if the module is the name of a directory (and thus a package). + if cache_module in cache: + base_path = _path_join(self.path, tail_module) + for suffix, loader_class in self._loaders: + init_filename = '__init__' + suffix + full_path = _path_join(base_path, init_filename) + if _path_isfile(full_path): + return self._get_spec(loader_class, fullname, full_path, [base_path], target) + else: + # If a namespace package, return the path if we don't + # find a module in the next section. + is_namespace = _path_isdir(base_path) + # Check for a file w/ a proper suffix exists. + for suffix, loader_class in self._loaders: + try: + full_path = _path_join(self.path, tail_module + suffix) + except ValueError: + return None + _bootstrap._verbose_message('trying {}', full_path, verbosity=2) + if cache_module + suffix in cache: + if _path_isfile(full_path): + return self._get_spec(loader_class, fullname, full_path, + None, target) + if is_namespace: + _bootstrap._verbose_message('possible namespace for {}', base_path) + spec = _bootstrap.ModuleSpec(fullname, None) + spec.submodule_search_locations = [base_path] + return spec + return None + + def _fill_cache(self): + """Fill the cache of potential modules and packages for this directory.""" + path = self.path + try: + contents = _os.listdir(path or _os.getcwd()) + except (FileNotFoundError, PermissionError, NotADirectoryError): + # Directory has either been removed, turned into a file, or made + # unreadable. + contents = [] + # We store two cached versions, to handle runtime changes of the + # PYTHONCASEOK environment variable. + if not sys.platform.startswith('win'): + self._path_cache = set(contents) + else: + # Windows users can import modules with case-insensitive file + # suffixes (for legacy reasons). Make the suffix lowercase here + # so it's done once instead of for every import. This is safe as + # the specified suffixes to check against are always specified in a + # case-sensitive manner. + lower_suffix_contents = set() + for item in contents: + name, dot, suffix = item.partition('.') + if dot: + new_name = f'{name}.{suffix.lower()}' + else: + new_name = name + lower_suffix_contents.add(new_name) + self._path_cache = lower_suffix_contents + if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS): + self._relaxed_path_cache = {fn.lower() for fn in contents} + + @classmethod + def path_hook(cls, *loader_details): + """A class method which returns a closure to use on sys.path_hook + which will return an instance using the specified loaders and the path + called on the closure. + + If the path called on the closure is not a directory, ImportError is + raised. + + """ + def path_hook_for_FileFinder(path): + """Path hook for importlib.machinery.FileFinder.""" + if not _path_isdir(path): + raise ImportError('only directories are supported', path=path) + return cls(path, *loader_details) + + return path_hook_for_FileFinder + + def __repr__(self): + return f'FileFinder({self.path!r})' + + +class AppleFrameworkLoader(ExtensionFileLoader): + """A loader for modules that have been packaged as frameworks for + compatibility with Apple's iOS App Store policies. + """ + def create_module(self, spec): + # If the ModuleSpec has been created by the FileFinder, it will have + # been created with an origin pointing to the .fwork file. We need to + # redirect this to the location in the Frameworks folder, using the + # content of the .fwork file. + if spec.origin.endswith(".fwork"): + with _io.FileIO(spec.origin, 'r') as file: + framework_binary = file.read().decode().strip() + bundle_path = _path_split(sys.executable)[0] + spec.origin = _path_join(bundle_path, framework_binary) + + # If the loader is created based on the spec for a loaded module, the + # path will be pointing at the Framework location. If this occurs, + # get the original .fwork location to use as the module's __file__. + if self.path.endswith(".fwork"): + path = self.path + else: + with _io.FileIO(self.path + ".origin", 'r') as file: + origin = file.read().decode().strip() + bundle_path = _path_split(sys.executable)[0] + path = _path_join(bundle_path, origin) + + module = _bootstrap._call_with_frames_removed(_imp.create_dynamic, spec) + + _bootstrap._verbose_message( + "Apple framework extension module {!r} loaded from {!r} (path {!r})", + spec.name, + spec.origin, + path, + ) + + # Ensure that the __file__ points at the .fwork location + try: + module.__file__ = path + except AttributeError: + # Not important enough to report. + # (The error is also ignored in _bootstrap._init_module_attrs or + # import_run_extension in import.c) + pass + + return module + +# Import setup ############################################################### + +def _fix_up_module(ns, name, pathname, cpathname=None): + # This function is used by PyImport_ExecCodeModuleObject(). + loader = ns.get('__loader__') + spec = ns.get('__spec__') + if not loader: + if spec: + loader = spec.loader + elif pathname == cpathname: + loader = SourcelessFileLoader(name, pathname) + else: + loader = SourceFileLoader(name, pathname) + if not spec: + spec = spec_from_file_location(name, pathname, loader=loader) + if cpathname: + spec.cached = _path_abspath(cpathname) + try: + ns['__spec__'] = spec + ns['__loader__'] = loader + ns['__file__'] = pathname + ns['__cached__'] = cpathname + except Exception: + # Not important enough to report. + pass + + +def _get_supported_file_loaders(): + """Returns a list of file-based module loaders. + + Each item is a tuple (loader, suffixes). + """ + extension_loaders = [] + if hasattr(_imp, 'create_dynamic'): + if sys.platform in {"ios", "tvos", "watchos"}: + extension_loaders = [(AppleFrameworkLoader, [ + suffix.replace(".so", ".fwork") + for suffix in _imp.extension_suffixes() + ])] + extension_loaders.append((ExtensionFileLoader, _imp.extension_suffixes())) + source = SourceFileLoader, SOURCE_SUFFIXES + bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES + return extension_loaders + [source, bytecode] + + +def _set_bootstrap_module(_bootstrap_module): + global _bootstrap + _bootstrap = _bootstrap_module + + +def _install(_bootstrap_module): + """Install the path-based import components.""" + _set_bootstrap_module(_bootstrap_module) + supported_loaders = _get_supported_file_loaders() + sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)]) + sys.meta_path.append(PathFinder) diff --git a/Python314_4_x86_Template/Lib/importlib/abc.py b/Python314_4_x86_Template/Lib/importlib/abc.py new file mode 100644 index 00000000..1e47495f --- /dev/null +++ b/Python314_4_x86_Template/Lib/importlib/abc.py @@ -0,0 +1,234 @@ +"""Abstract base classes related to import.""" +from . import _bootstrap_external +from . import machinery +try: + import _frozen_importlib +except ImportError as exc: + if exc.name != '_frozen_importlib': + raise + _frozen_importlib = None +try: + import _frozen_importlib_external +except ImportError: + _frozen_importlib_external = _bootstrap_external +from ._abc import Loader +import abc + + +__all__ = [ + 'Loader', 'MetaPathFinder', 'PathEntryFinder', + 'ResourceLoader', 'InspectLoader', 'ExecutionLoader', + 'FileLoader', 'SourceLoader', +] + + +def _register(abstract_cls, *classes): + for cls in classes: + abstract_cls.register(cls) + if _frozen_importlib is not None: + try: + frozen_cls = getattr(_frozen_importlib, cls.__name__) + except AttributeError: + frozen_cls = getattr(_frozen_importlib_external, cls.__name__) + abstract_cls.register(frozen_cls) + + +class MetaPathFinder(metaclass=abc.ABCMeta): + + """Abstract base class for import finders on sys.meta_path.""" + + # We don't define find_spec() here since that would break + # hasattr checks we do to support backward compatibility. + + def invalidate_caches(self): + """An optional method for clearing the finder's cache, if any. + This method is used by importlib.invalidate_caches(). + """ + +_register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter, + machinery.PathFinder, machinery.WindowsRegistryFinder) + + +class PathEntryFinder(metaclass=abc.ABCMeta): + + """Abstract base class for path entry finders used by PathFinder.""" + + def invalidate_caches(self): + """An optional method for clearing the finder's cache, if any. + This method is used by PathFinder.invalidate_caches(). + """ + +_register(PathEntryFinder, machinery.FileFinder) + + +class ResourceLoader(Loader): + + """Abstract base class for loaders which can return data from their + back-end storage to facilitate reading data to perform an import. + + This ABC represents one of the optional protocols specified by PEP 302. + + For directly loading resources, use TraversableResources instead. This class + primarily exists for backwards compatibility with other ABCs in this module. + + """ + + @abc.abstractmethod + def get_data(self, path): + """Abstract method which when implemented should return the bytes for + the specified path. The path must be a str.""" + raise OSError + + +class InspectLoader(Loader): + + """Abstract base class for loaders which support inspection about the + modules they can load. + + This ABC represents one of the optional protocols specified by PEP 302. + + """ + + def is_package(self, fullname): + """Optional method which when implemented should return whether the + module is a package. The fullname is a str. Returns a bool. + + Raises ImportError if the module cannot be found. + """ + raise ImportError + + def get_code(self, fullname): + """Method which returns the code object for the module. + + The fullname is a str. Returns a types.CodeType if possible, else + returns None if a code object does not make sense + (e.g. built-in module). Raises ImportError if the module cannot be + found. + """ + source = self.get_source(fullname) + if source is None: + return None + return self.source_to_code(source) + + @abc.abstractmethod + def get_source(self, fullname): + """Abstract method which should return the source code for the + module. The fullname is a str. Returns a str. + + Raises ImportError if the module cannot be found. + """ + raise ImportError + + @staticmethod + def source_to_code(data, path=''): + """Compile 'data' into a code object. + + The 'data' argument can be anything that compile() can handle. The'path' + argument should be where the data was retrieved (when applicable).""" + return compile(data, path, 'exec', dont_inherit=True) + + exec_module = _bootstrap_external._LoaderBasics.exec_module + load_module = _bootstrap_external._LoaderBasics.load_module + +_register(InspectLoader, machinery.BuiltinImporter, machinery.FrozenImporter, machinery.NamespaceLoader) + + +class ExecutionLoader(InspectLoader): + + """Abstract base class for loaders that wish to support the execution of + modules as scripts. + + This ABC represents one of the optional protocols specified in PEP 302. + + """ + + @abc.abstractmethod + def get_filename(self, fullname): + """Abstract method which should return the value that __file__ is to be + set to. + + Raises ImportError if the module cannot be found. + """ + raise ImportError + + def get_code(self, fullname): + """Method to return the code object for fullname. + + Should return None if not applicable (e.g. built-in module). + Raise ImportError if the module cannot be found. + """ + source = self.get_source(fullname) + if source is None: + return None + try: + path = self.get_filename(fullname) + except ImportError: + return self.source_to_code(source) + else: + return self.source_to_code(source, path) + +_register( + ExecutionLoader, + machinery.ExtensionFileLoader, + machinery.AppleFrameworkLoader, +) + + +class FileLoader(_bootstrap_external.FileLoader, ResourceLoader, ExecutionLoader): + + """Abstract base class partially implementing the ResourceLoader and + ExecutionLoader ABCs.""" + +_register(FileLoader, machinery.SourceFileLoader, + machinery.SourcelessFileLoader) + + +class SourceLoader(_bootstrap_external.SourceLoader, ResourceLoader, ExecutionLoader): + + """Abstract base class for loading source code (and optionally any + corresponding bytecode). + + To support loading from source code, the abstractmethods inherited from + ResourceLoader and ExecutionLoader need to be implemented. To also support + loading from bytecode, the optional methods specified directly by this ABC + is required. + + Inherited abstractmethods not implemented in this ABC: + + * ResourceLoader.get_data + * ExecutionLoader.get_filename + + """ + + def path_mtime(self, path): + """Return the (int) modification time for the path (str).""" + import warnings + warnings.warn('SourceLoader.path_mtime is deprecated in favour of ' + 'SourceLoader.path_stats().', + DeprecationWarning, stacklevel=2) + if self.path_stats.__func__ is SourceLoader.path_stats: + raise OSError + return int(self.path_stats(path)['mtime']) + + def path_stats(self, path): + """Return a metadata dict for the source pointed to by the path (str). + Possible keys: + - 'mtime' (mandatory) is the numeric timestamp of last source + code modification; + - 'size' (optional) is the size in bytes of the source code. + """ + if self.path_mtime.__func__ is SourceLoader.path_mtime: + raise OSError + return {'mtime': self.path_mtime(path)} + + def set_data(self, path, data): + """Write the bytes to the path (if possible). + + Accepts a str path and data as bytes. + + Any needed intermediary directories are to be created. If for some + reason the file cannot be written because of permissions, fail + silently. + """ + +_register(SourceLoader, machinery.SourceFileLoader) diff --git a/Python314_4_x86_Template/Lib/importlib/machinery.py b/Python314_4_x86_Template/Lib/importlib/machinery.py new file mode 100644 index 00000000..63d72644 --- /dev/null +++ b/Python314_4_x86_Template/Lib/importlib/machinery.py @@ -0,0 +1,50 @@ +"""The machinery of importlib: finders, loaders, hooks, etc.""" + +from ._bootstrap import ModuleSpec +from ._bootstrap import BuiltinImporter +from ._bootstrap import FrozenImporter +from ._bootstrap_external import ( + SOURCE_SUFFIXES, BYTECODE_SUFFIXES, EXTENSION_SUFFIXES, + DEBUG_BYTECODE_SUFFIXES as _DEBUG_BYTECODE_SUFFIXES, + OPTIMIZED_BYTECODE_SUFFIXES as _OPTIMIZED_BYTECODE_SUFFIXES +) +from ._bootstrap_external import WindowsRegistryFinder +from ._bootstrap_external import PathFinder +from ._bootstrap_external import FileFinder +from ._bootstrap_external import SourceFileLoader +from ._bootstrap_external import SourcelessFileLoader +from ._bootstrap_external import ExtensionFileLoader +from ._bootstrap_external import AppleFrameworkLoader +from ._bootstrap_external import NamespaceLoader + + +def all_suffixes(): + """Returns a list of all recognized module suffixes for this process""" + return SOURCE_SUFFIXES + BYTECODE_SUFFIXES + EXTENSION_SUFFIXES + + +__all__ = ['AppleFrameworkLoader', 'BYTECODE_SUFFIXES', 'BuiltinImporter', + 'DEBUG_BYTECODE_SUFFIXES', 'EXTENSION_SUFFIXES', + 'ExtensionFileLoader', 'FileFinder', 'FrozenImporter', 'ModuleSpec', + 'NamespaceLoader', 'OPTIMIZED_BYTECODE_SUFFIXES', 'PathFinder', + 'SOURCE_SUFFIXES', 'SourceFileLoader', 'SourcelessFileLoader', + 'WindowsRegistryFinder', 'all_suffixes'] + + +def __getattr__(name): + import warnings + + if name == 'DEBUG_BYTECODE_SUFFIXES': + warnings.warn('importlib.machinery.DEBUG_BYTECODE_SUFFIXES is ' + 'deprecated; use importlib.machinery.BYTECODE_SUFFIXES ' + 'instead.', + DeprecationWarning, stacklevel=2) + return _DEBUG_BYTECODE_SUFFIXES + elif name == 'OPTIMIZED_BYTECODE_SUFFIXES': + warnings.warn('importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES is ' + 'deprecated; use importlib.machinery.BYTECODE_SUFFIXES ' + 'instead.', + DeprecationWarning, stacklevel=2) + return _OPTIMIZED_BYTECODE_SUFFIXES + + raise AttributeError(f'module {__name__!r} has no attribute {name!r}') diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/__init__.py b/Python314_4_x86_Template/Lib/importlib/metadata/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/metadata/__init__.py rename to Python314_4_x86_Template/Lib/importlib/metadata/__init__.py diff --git a/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..ff59dedc Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_adapters.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_adapters.cpython-314.pyc new file mode 100644 index 00000000..e893c349 Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_adapters.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_collections.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_collections.cpython-314.pyc new file mode 100644 index 00000000..2e2edb7a Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_collections.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_functools.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_functools.cpython-314.pyc new file mode 100644 index 00000000..df143a44 Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_functools.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_itertools.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_itertools.cpython-314.pyc new file mode 100644 index 00000000..a6bb2a2b Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_itertools.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_meta.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_meta.cpython-314.pyc new file mode 100644 index 00000000..c9d6fe2e Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_meta.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_text.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_text.cpython-314.pyc new file mode 100644 index 00000000..12f2c10a Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/metadata/__pycache__/_text.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/_adapters.py b/Python314_4_x86_Template/Lib/importlib/metadata/_adapters.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/metadata/_adapters.py rename to Python314_4_x86_Template/Lib/importlib/metadata/_adapters.py diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/_collections.py b/Python314_4_x86_Template/Lib/importlib/metadata/_collections.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/metadata/_collections.py rename to Python314_4_x86_Template/Lib/importlib/metadata/_collections.py diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/_functools.py b/Python314_4_x86_Template/Lib/importlib/metadata/_functools.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/metadata/_functools.py rename to Python314_4_x86_Template/Lib/importlib/metadata/_functools.py diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/_itertools.py b/Python314_4_x86_Template/Lib/importlib/metadata/_itertools.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/metadata/_itertools.py rename to Python314_4_x86_Template/Lib/importlib/metadata/_itertools.py diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/_meta.py b/Python314_4_x86_Template/Lib/importlib/metadata/_meta.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/metadata/_meta.py rename to Python314_4_x86_Template/Lib/importlib/metadata/_meta.py diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/_text.py b/Python314_4_x86_Template/Lib/importlib/metadata/_text.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/metadata/_text.py rename to Python314_4_x86_Template/Lib/importlib/metadata/_text.py diff --git a/Python313_13_x86_Template/Lib/importlib/metadata/diagnose.py b/Python314_4_x86_Template/Lib/importlib/metadata/diagnose.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/metadata/diagnose.py rename to Python314_4_x86_Template/Lib/importlib/metadata/diagnose.py diff --git a/Python313_13_x86_Template/Lib/importlib/readers.py b/Python314_4_x86_Template/Lib/importlib/readers.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/readers.py rename to Python314_4_x86_Template/Lib/importlib/readers.py diff --git a/Python313_13_x86_Template/Lib/importlib/resources/__init__.py b/Python314_4_x86_Template/Lib/importlib/resources/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/resources/__init__.py rename to Python314_4_x86_Template/Lib/importlib/resources/__init__.py diff --git a/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..0f97c0fd Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/_adapters.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/_adapters.cpython-314.pyc new file mode 100644 index 00000000..541dda2a Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/_adapters.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/_common.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/_common.cpython-314.pyc new file mode 100644 index 00000000..9fa88cd3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/_common.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/_functional.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/_functional.cpython-314.pyc new file mode 100644 index 00000000..59ba922e Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/_functional.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/_itertools.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/_itertools.cpython-314.pyc new file mode 100644 index 00000000..5311fb4e Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/_itertools.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/abc.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/abc.cpython-314.pyc new file mode 100644 index 00000000..d56726cb Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/abc.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/readers.cpython-314.pyc b/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/readers.cpython-314.pyc new file mode 100644 index 00000000..8b27569c Binary files /dev/null and b/Python314_4_x86_Template/Lib/importlib/resources/__pycache__/readers.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/importlib/resources/_adapters.py b/Python314_4_x86_Template/Lib/importlib/resources/_adapters.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/resources/_adapters.py rename to Python314_4_x86_Template/Lib/importlib/resources/_adapters.py diff --git a/Python314_4_x86_Template/Lib/importlib/resources/_common.py b/Python314_4_x86_Template/Lib/importlib/resources/_common.py new file mode 100644 index 00000000..4e9014c4 --- /dev/null +++ b/Python314_4_x86_Template/Lib/importlib/resources/_common.py @@ -0,0 +1,211 @@ +import os +import pathlib +import tempfile +import functools +import contextlib +import types +import importlib +import inspect +import warnings +import itertools + +from typing import Union, Optional, cast +from .abc import ResourceReader, Traversable + +Package = Union[types.ModuleType, str] +Anchor = Package + + +def package_to_anchor(func): + """ + Replace 'package' parameter as 'anchor' and warn about the change. + + Other errors should fall through. + + >>> files('a', 'b') + Traceback (most recent call last): + TypeError: files() takes from 0 to 1 positional arguments but 2 were given + + Remove this compatibility in Python 3.14. + """ + undefined = object() + + @functools.wraps(func) + def wrapper(anchor=undefined, package=undefined): + if package is not undefined: + if anchor is not undefined: + return func(anchor, package) + warnings.warn( + "First parameter to files is renamed to 'anchor'", + DeprecationWarning, + stacklevel=2, + ) + return func(package) + elif anchor is undefined: + return func() + return func(anchor) + + return wrapper + + +@package_to_anchor +def files(anchor: Optional[Anchor] = None) -> Traversable: + """ + Get a Traversable resource for an anchor. + """ + return from_package(resolve(anchor)) + + +def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]: + """ + Return the package's loader if it's a ResourceReader. + """ + # We can't use + # a issubclass() check here because apparently abc.'s __subclasscheck__() + # hook wants to create a weak reference to the object, but + # zipimport.zipimporter does not support weak references, resulting in a + # TypeError. That seems terrible. + spec = package.__spec__ + reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore[union-attr] + if reader is None: + return None + return reader(spec.name) # type: ignore[union-attr] + + +@functools.singledispatch +def resolve(cand: Optional[Anchor]) -> types.ModuleType: + return cast(types.ModuleType, cand) + + +@resolve.register +def _(cand: str) -> types.ModuleType: + return importlib.import_module(cand) + + +@resolve.register +def _(cand: None) -> types.ModuleType: + return resolve(_infer_caller().f_globals['__name__']) + + +def _infer_caller(): + """ + Walk the stack and find the frame of the first caller not in this module. + """ + + def is_this_file(frame_info): + return frame_info.filename == stack[0].filename + + def is_wrapper(frame_info): + return frame_info.function == 'wrapper' + + stack = inspect.stack() + not_this_file = itertools.filterfalse(is_this_file, stack) + # also exclude 'wrapper' due to singledispatch in the call stack + callers = itertools.filterfalse(is_wrapper, not_this_file) + return next(callers).frame + + +def from_package(package: types.ModuleType): + """ + Return a Traversable object for the given package. + + """ + # deferred for performance (python/cpython#109829) + from ._adapters import wrap_spec + + spec = wrap_spec(package) + reader = spec.loader.get_resource_reader(spec.name) + return reader.files() + + +@contextlib.contextmanager +def _tempfile( + reader, + suffix='', + # gh-93353: Keep a reference to call os.remove() in late Python + # finalization. + *, + _os_remove=os.remove, +): + # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' + # blocks due to the need to close the temporary file to work on Windows + # properly. + fd, raw_path = tempfile.mkstemp(suffix=suffix) + try: + try: + os.write(fd, reader()) + finally: + os.close(fd) + del reader + yield pathlib.Path(raw_path) + finally: + try: + _os_remove(raw_path) + except FileNotFoundError: + pass + + +def _temp_file(path): + return _tempfile(path.read_bytes, suffix=path.name) + + +def _is_present_dir(path: Traversable) -> bool: + """ + Some Traversables implement ``is_dir()`` to raise an + exception (i.e. ``FileNotFoundError``) when the + directory doesn't exist. This function wraps that call + to always return a boolean and only return True + if there's a dir and it exists. + """ + with contextlib.suppress(FileNotFoundError): + return path.is_dir() + return False + + +@functools.singledispatch +def as_file(path): + """ + Given a Traversable object, return that object as a + path on the local file system in a context manager. + """ + return _temp_dir(path) if _is_present_dir(path) else _temp_file(path) + + +@as_file.register(pathlib.Path) +@contextlib.contextmanager +def _(path): + """ + Degenerate behavior for pathlib.Path objects. + """ + yield path + + +@contextlib.contextmanager +def _temp_path(dir: tempfile.TemporaryDirectory): + """ + Wrap tempfile.TemporaryDirectory to return a pathlib object. + """ + with dir as result: + yield pathlib.Path(result) + + +@contextlib.contextmanager +def _temp_dir(path): + """ + Given a traversable dir, recursively replicate the whole tree + to the file system in a context manager. + """ + assert path.is_dir() + with _temp_path(tempfile.TemporaryDirectory()) as temp_dir: + yield _write_contents(temp_dir, path) + + +def _write_contents(target, source): + child = target.joinpath(source.name) + if source.is_dir(): + child.mkdir() + for item in source.iterdir(): + _write_contents(child, item) + else: + child.write_bytes(source.read_bytes()) + return child diff --git a/Python313_13_x86_Template/Lib/importlib/resources/_functional.py b/Python314_4_x86_Template/Lib/importlib/resources/_functional.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/resources/_functional.py rename to Python314_4_x86_Template/Lib/importlib/resources/_functional.py diff --git a/Python313_13_x86_Template/Lib/importlib/resources/_itertools.py b/Python314_4_x86_Template/Lib/importlib/resources/_itertools.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/resources/_itertools.py rename to Python314_4_x86_Template/Lib/importlib/resources/_itertools.py diff --git a/Python313_13_x86_Template/Lib/importlib/resources/abc.py b/Python314_4_x86_Template/Lib/importlib/resources/abc.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/resources/abc.py rename to Python314_4_x86_Template/Lib/importlib/resources/abc.py diff --git a/Python313_13_x86_Template/Lib/importlib/resources/readers.py b/Python314_4_x86_Template/Lib/importlib/resources/readers.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/resources/readers.py rename to Python314_4_x86_Template/Lib/importlib/resources/readers.py diff --git a/Python313_13_x86_Template/Lib/importlib/resources/simple.py b/Python314_4_x86_Template/Lib/importlib/resources/simple.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/resources/simple.py rename to Python314_4_x86_Template/Lib/importlib/resources/simple.py diff --git a/Python313_13_x86_Template/Lib/importlib/simple.py b/Python314_4_x86_Template/Lib/importlib/simple.py similarity index 100% rename from Python313_13_x86_Template/Lib/importlib/simple.py rename to Python314_4_x86_Template/Lib/importlib/simple.py diff --git a/Python314_4_x86_Template/Lib/importlib/util.py b/Python314_4_x86_Template/Lib/importlib/util.py new file mode 100644 index 00000000..2b564e9b --- /dev/null +++ b/Python314_4_x86_Template/Lib/importlib/util.py @@ -0,0 +1,279 @@ +"""Utility code for constructing importers, etc.""" +from ._abc import Loader +from ._bootstrap import module_from_spec +from ._bootstrap import _resolve_name +from ._bootstrap import spec_from_loader +from ._bootstrap import _find_spec +from ._bootstrap_external import MAGIC_NUMBER +from ._bootstrap_external import cache_from_source +from ._bootstrap_external import decode_source +from ._bootstrap_external import source_from_cache +from ._bootstrap_external import spec_from_file_location + +import _imp +import sys +import types + + +def source_hash(source_bytes): + "Return the hash of *source_bytes* as used in hash-based pyc files." + return _imp.source_hash(_imp.pyc_magic_number_token, source_bytes) + + +def resolve_name(name, package): + """Resolve a relative module name to an absolute one.""" + if not name.startswith('.'): + return name + elif not package: + raise ImportError(f'no package specified for {repr(name)} ' + '(required for relative module names)') + level = 0 + for character in name: + if character != '.': + break + level += 1 + return _resolve_name(name[level:], package, level) + + +def _find_spec_from_path(name, path=None): + """Return the spec for the specified module. + + First, sys.modules is checked to see if the module was already imported. If + so, then sys.modules[name].__spec__ is returned. If that happens to be + set to None, then ValueError is raised. If the module is not in + sys.modules, then sys.meta_path is searched for a suitable spec with the + value of 'path' given to the finders. None is returned if no spec could + be found. + + Dotted names do not have their parent packages implicitly imported. You will + most likely need to explicitly import all parent packages in the proper + order for a submodule to get the correct spec. + + """ + if name not in sys.modules: + return _find_spec(name, path) + else: + module = sys.modules[name] + if module is None: + return None + try: + spec = module.__spec__ + except AttributeError: + raise ValueError(f'{name}.__spec__ is not set') from None + else: + if spec is None: + raise ValueError(f'{name}.__spec__ is None') + return spec + + +def find_spec(name, package=None): + """Return the spec for the specified module. + + First, sys.modules is checked to see if the module was already imported. If + so, then sys.modules[name].__spec__ is returned. If that happens to be + set to None, then ValueError is raised. If the module is not in + sys.modules, then sys.meta_path is searched for a suitable spec with the + value of 'path' given to the finders. None is returned if no spec could + be found. + + If the name is for submodule (contains a dot), the parent module is + automatically imported. + + The name and package arguments work the same as importlib.import_module(). + In other words, relative module names (with leading dots) work. + + """ + fullname = resolve_name(name, package) if name.startswith('.') else name + if fullname not in sys.modules: + parent_name = fullname.rpartition('.')[0] + if parent_name: + parent = __import__(parent_name, fromlist=['__path__']) + try: + parent_path = parent.__path__ + except AttributeError as e: + raise ModuleNotFoundError( + f"__path__ attribute not found on {parent_name!r} " + f"while trying to find {fullname!r}", name=fullname) from e + else: + parent_path = None + return _find_spec(fullname, parent_path) + else: + module = sys.modules[fullname] + if module is None: + return None + try: + spec = module.__spec__ + except AttributeError: + raise ValueError(f'{name}.__spec__ is not set') from None + else: + if spec is None: + raise ValueError(f'{name}.__spec__ is None') + return spec + + +# Normally we would use contextlib.contextmanager. However, this module +# is imported by runpy, which means we want to avoid any unnecessary +# dependencies. Thus we use a class. + +class _incompatible_extension_module_restrictions: + """A context manager that can temporarily skip the compatibility check. + + NOTE: This function is meant to accommodate an unusual case; one + which is likely to eventually go away. There's is a pretty good + chance this is not what you were looking for. + + WARNING: Using this function to disable the check can lead to + unexpected behavior and even crashes. It should only be used during + extension module development. + + If "disable_check" is True then the compatibility check will not + happen while the context manager is active. Otherwise the check + *will* happen. + + Normally, extensions that do not support multiple interpreters + may not be imported in a subinterpreter. That implies modules + that do not implement multi-phase init or that explicitly of out. + + Likewise for modules import in a subinterpreter with its own GIL + when the extension does not support a per-interpreter GIL. This + implies the module does not have a Py_mod_multiple_interpreters slot + set to Py_MOD_PER_INTERPRETER_GIL_SUPPORTED. + + In both cases, this context manager may be used to temporarily + disable the check for compatible extension modules. + + You can get the same effect as this function by implementing the + basic interface of multi-phase init (PEP 489) and lying about + support for multiple interpreters (or per-interpreter GIL). + """ + + def __init__(self, *, disable_check): + self.disable_check = bool(disable_check) + + def __enter__(self): + self.old = _imp._override_multi_interp_extensions_check(self.override) + return self + + def __exit__(self, *args): + old = self.old + del self.old + _imp._override_multi_interp_extensions_check(old) + + @property + def override(self): + return -1 if self.disable_check else 1 + + +class _LazyModule(types.ModuleType): + + """A subclass of the module type which triggers loading upon attribute access.""" + + def __getattribute__(self, attr): + """Trigger the load of the module and return the attribute.""" + __spec__ = object.__getattribute__(self, '__spec__') + loader_state = __spec__.loader_state + with loader_state['lock']: + # Only the first thread to get the lock should trigger the load + # and reset the module's class. The rest can now getattr(). + if object.__getattribute__(self, '__class__') is _LazyModule: + __class__ = loader_state['__class__'] + + # Reentrant calls from the same thread must be allowed to proceed without + # triggering the load again. + # exec_module() and self-referential imports are the primary ways this can + # happen, but in any case we must return something to avoid deadlock. + if loader_state['is_loading']: + return __class__.__getattribute__(self, attr) + loader_state['is_loading'] = True + + __dict__ = __class__.__getattribute__(self, '__dict__') + + # All module metadata must be gathered from __spec__ in order to avoid + # using mutated values. + # Get the original name to make sure no object substitution occurred + # in sys.modules. + original_name = __spec__.name + # Figure out exactly what attributes were mutated between the creation + # of the module and now. + attrs_then = loader_state['__dict__'] + attrs_now = __dict__ + attrs_updated = {} + for key, value in attrs_now.items(): + # Code that set an attribute may have kept a reference to the + # assigned object, making identity more important than equality. + if key not in attrs_then: + attrs_updated[key] = value + elif id(attrs_now[key]) != id(attrs_then[key]): + attrs_updated[key] = value + __spec__.loader.exec_module(self) + # If exec_module() was used directly there is no guarantee the module + # object was put into sys.modules. + if original_name in sys.modules: + if id(self) != id(sys.modules[original_name]): + raise ValueError(f"module object for {original_name!r} " + "substituted in sys.modules during a lazy " + "load") + # Update after loading since that's what would happen in an eager + # loading situation. + __dict__.update(attrs_updated) + # Finally, stop triggering this method, if the module did not + # already update its own __class__. + if isinstance(self, _LazyModule): + object.__setattr__(self, '__class__', __class__) + + return getattr(self, attr) + + def __delattr__(self, attr): + """Trigger the load and then perform the deletion.""" + # To trigger the load and raise an exception if the attribute + # doesn't exist. + self.__getattribute__(attr) + delattr(self, attr) + + +class LazyLoader(Loader): + + """A loader that creates a module which defers loading until attribute access.""" + + @staticmethod + def __check_eager_loader(loader): + if not hasattr(loader, 'exec_module'): + raise TypeError('loader must define exec_module()') + + @classmethod + def factory(cls, loader): + """Construct a callable which returns the eager loader made lazy.""" + cls.__check_eager_loader(loader) + return lambda *args, **kwargs: cls(loader(*args, **kwargs)) + + def __init__(self, loader): + self.__check_eager_loader(loader) + self.loader = loader + + def create_module(self, spec): + return self.loader.create_module(spec) + + def exec_module(self, module): + """Make the module load lazily.""" + # Threading is only needed for lazy loading, and importlib.util can + # be pulled in at interpreter startup, so defer until needed. + import threading + module.__spec__.loader = self.loader + module.__loader__ = self.loader + # Don't need to worry about deep-copying as trying to set an attribute + # on an object would have triggered the load, + # e.g. ``module.__spec__.loader = None`` would trigger a load from + # trying to access module.__spec__. + loader_state = {} + loader_state['__dict__'] = module.__dict__.copy() + loader_state['__class__'] = module.__class__ + loader_state['lock'] = threading.RLock() + loader_state['is_loading'] = False + module.__spec__.loader_state = loader_state + module.__class__ = _LazyModule + + +__all__ = ['LazyLoader', 'Loader', 'MAGIC_NUMBER', + 'cache_from_source', 'decode_source', 'find_spec', + 'module_from_spec', 'resolve_name', 'source_from_cache', + 'source_hash', 'spec_from_file_location', 'spec_from_loader'] diff --git a/Python314_4_x86_Template/Lib/inspect.py b/Python314_4_x86_Template/Lib/inspect.py new file mode 100644 index 00000000..2d229051 --- /dev/null +++ b/Python314_4_x86_Template/Lib/inspect.py @@ -0,0 +1,3409 @@ +"""Get useful information from live Python objects. + +This module encapsulates the interface provided by the internal special +attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion. +It also provides some help for examining source code and class layout. + +Here are some of the useful functions provided by this module: + + ismodule(), isclass(), ismethod(), ispackage(), isfunction(), + isgeneratorfunction(), isgenerator(), istraceback(), isframe(), + iscode(), isbuiltin(), isroutine() - check object types + getmembers() - get members of an object that satisfy a given condition + + getfile(), getsourcefile(), getsource() - find an object's source code + getdoc(), getcomments() - get documentation on an object + getmodule() - determine the module that an object came from + getclasstree() - arrange classes so as to represent their hierarchy + + getargvalues(), getcallargs() - get info about function arguments + getfullargspec() - same, with support for Python 3 features + formatargvalues() - format an argument spec + getouterframes(), getinnerframes() - get info about frames + currentframe() - get the current stack frame + stack(), trace() - get info about frames on the stack or in a traceback + + signature() - get a Signature object for the callable +""" + +# This module is in the public domain. No warranties. + +__author__ = ('Ka-Ping Yee ', + 'Yury Selivanov ') + +__all__ = [ + "AGEN_CLOSED", + "AGEN_CREATED", + "AGEN_RUNNING", + "AGEN_SUSPENDED", + "ArgInfo", + "Arguments", + "Attribute", + "BlockFinder", + "BoundArguments", + "BufferFlags", + "CORO_CLOSED", + "CORO_CREATED", + "CORO_RUNNING", + "CORO_SUSPENDED", + "CO_ASYNC_GENERATOR", + "CO_COROUTINE", + "CO_GENERATOR", + "CO_ITERABLE_COROUTINE", + "CO_NESTED", + "CO_NEWLOCALS", + "CO_NOFREE", + "CO_OPTIMIZED", + "CO_VARARGS", + "CO_VARKEYWORDS", + "CO_HAS_DOCSTRING", + "CO_METHOD", + "ClassFoundException", + "ClosureVars", + "EndOfBlock", + "FrameInfo", + "FullArgSpec", + "GEN_CLOSED", + "GEN_CREATED", + "GEN_RUNNING", + "GEN_SUSPENDED", + "Parameter", + "Signature", + "TPFLAGS_IS_ABSTRACT", + "Traceback", + "classify_class_attrs", + "cleandoc", + "currentframe", + "findsource", + "formatannotation", + "formatannotationrelativeto", + "formatargvalues", + "get_annotations", + "getabsfile", + "getargs", + "getargvalues", + "getasyncgenlocals", + "getasyncgenstate", + "getattr_static", + "getblock", + "getcallargs", + "getclasstree", + "getclosurevars", + "getcomments", + "getcoroutinelocals", + "getcoroutinestate", + "getdoc", + "getfile", + "getframeinfo", + "getfullargspec", + "getgeneratorlocals", + "getgeneratorstate", + "getinnerframes", + "getlineno", + "getmembers", + "getmembers_static", + "getmodule", + "getmodulename", + "getmro", + "getouterframes", + "getsource", + "getsourcefile", + "getsourcelines", + "indentsize", + "isabstract", + "isasyncgen", + "isasyncgenfunction", + "isawaitable", + "isbuiltin", + "isclass", + "iscode", + "iscoroutine", + "iscoroutinefunction", + "isdatadescriptor", + "isframe", + "isfunction", + "isgenerator", + "isgeneratorfunction", + "isgetsetdescriptor", + "ismemberdescriptor", + "ismethod", + "ismethoddescriptor", + "ismethodwrapper", + "ismodule", + "ispackage", + "isroutine", + "istraceback", + "markcoroutinefunction", + "signature", + "stack", + "trace", + "unwrap", + "walktree", +] + + +import abc +from annotationlib import Format, ForwardRef +from annotationlib import get_annotations # re-exported +import ast +import dis +import collections.abc +import enum +import importlib.machinery +import itertools +import linecache +import os +import re +import sys +import tokenize +import token +import types +import functools +import builtins +from keyword import iskeyword +from operator import attrgetter +from collections import namedtuple, OrderedDict +from weakref import ref as make_weakref + +# Create constants for the compiler flags in Include/code.h +# We try to get them from dis to avoid duplication +mod_dict = globals() +for k, v in dis.COMPILER_FLAG_NAMES.items(): + mod_dict["CO_" + v] = k +del k, v, mod_dict + +# See Include/object.h +TPFLAGS_IS_ABSTRACT = 1 << 20 + + +# ----------------------------------------------------------- type-checking +def ismodule(object): + """Return true if the object is a module.""" + return isinstance(object, types.ModuleType) + +def isclass(object): + """Return true if the object is a class.""" + return isinstance(object, type) + +def ismethod(object): + """Return true if the object is an instance method.""" + return isinstance(object, types.MethodType) + +def ispackage(object): + """Return true if the object is a package.""" + return ismodule(object) and hasattr(object, "__path__") + +def ismethoddescriptor(object): + """Return true if the object is a method descriptor. + + But not if ismethod() or isclass() or isfunction() are true. + + This is new in Python 2.2, and, for example, is true of int.__add__. + An object passing this test has a __get__ attribute, but not a + __set__ attribute or a __delete__ attribute. Beyond that, the set + of attributes varies; __name__ is usually sensible, and __doc__ + often is. + + Methods implemented via descriptors that also pass one of the other + tests return false from the ismethoddescriptor() test, simply because + the other tests promise more -- you can, e.g., count on having the + __func__ attribute (etc) when an object passes ismethod().""" + if isclass(object) or ismethod(object) or isfunction(object): + # mutual exclusion + return False + tp = type(object) + return (hasattr(tp, "__get__") + and not hasattr(tp, "__set__") + and not hasattr(tp, "__delete__")) + +def isdatadescriptor(object): + """Return true if the object is a data descriptor. + + Data descriptors have a __set__ or a __delete__ attribute. Examples are + properties (defined in Python) and getsets and members (defined in C). + Typically, data descriptors will also have __name__ and __doc__ attributes + (properties, getsets, and members have both of these attributes), but this + is not guaranteed.""" + if isclass(object) or ismethod(object) or isfunction(object): + # mutual exclusion + return False + tp = type(object) + return hasattr(tp, "__set__") or hasattr(tp, "__delete__") + +if hasattr(types, 'MemberDescriptorType'): + # CPython and equivalent + def ismemberdescriptor(object): + """Return true if the object is a member descriptor. + + Member descriptors are specialized descriptors defined in extension + modules.""" + return isinstance(object, types.MemberDescriptorType) +else: + # Other implementations + def ismemberdescriptor(object): + """Return true if the object is a member descriptor. + + Member descriptors are specialized descriptors defined in extension + modules.""" + return False + +if hasattr(types, 'GetSetDescriptorType'): + # CPython and equivalent + def isgetsetdescriptor(object): + """Return true if the object is a getset descriptor. + + getset descriptors are specialized descriptors defined in extension + modules.""" + return isinstance(object, types.GetSetDescriptorType) +else: + # Other implementations + def isgetsetdescriptor(object): + """Return true if the object is a getset descriptor. + + getset descriptors are specialized descriptors defined in extension + modules.""" + return False + +def isfunction(object): + """Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + __qualname__ qualified name of this function + __module__ name of the module the function was defined in or None + __code__ code object containing compiled function bytecode + __defaults__ tuple of any default values for arguments + __globals__ global namespace in which this function was defined + __annotations__ dict of parameter annotations + __kwdefaults__ dict of keyword only parameters with defaults + __dict__ namespace which is supporting arbitrary function attributes + __closure__ a tuple of cells or None + __type_params__ tuple of type parameters""" + return isinstance(object, types.FunctionType) + +def _has_code_flag(f, flag): + """Return true if ``f`` is a function (or a method or functools.partial + wrapper wrapping a function or a functools.partialmethod wrapping a + function) whose code object has the given ``flag`` + set in its flags.""" + f = functools._unwrap_partialmethod(f) + while ismethod(f): + f = f.__func__ + f = functools._unwrap_partial(f) + if not (isfunction(f) or _signature_is_functionlike(f)): + return False + return bool(f.__code__.co_flags & flag) + +def isgeneratorfunction(obj): + """Return true if the object is a user-defined generator function. + + Generator function objects provide the same attributes as functions. + See help(isfunction) for a list of attributes.""" + return _has_code_flag(obj, CO_GENERATOR) + +# A marker for markcoroutinefunction and iscoroutinefunction. +_is_coroutine_mark = object() + +def _has_coroutine_mark(f): + while ismethod(f): + f = f.__func__ + f = functools._unwrap_partial(f) + return getattr(f, "_is_coroutine_marker", None) is _is_coroutine_mark + +def markcoroutinefunction(func): + """ + Decorator to ensure callable is recognised as a coroutine function. + """ + if hasattr(func, '__func__'): + func = func.__func__ + func._is_coroutine_marker = _is_coroutine_mark + return func + +def iscoroutinefunction(obj): + """Return true if the object is a coroutine function. + + Coroutine functions are normally defined with "async def" syntax, but may + be marked via markcoroutinefunction. + """ + return _has_code_flag(obj, CO_COROUTINE) or _has_coroutine_mark(obj) + +def isasyncgenfunction(obj): + """Return true if the object is an asynchronous generator function. + + Asynchronous generator functions are defined with "async def" + syntax and have "yield" expressions in their body. + """ + return _has_code_flag(obj, CO_ASYNC_GENERATOR) + +def isasyncgen(object): + """Return true if the object is an asynchronous generator.""" + return isinstance(object, types.AsyncGeneratorType) + +def isgenerator(object): + """Return true if the object is a generator. + + Generator objects provide these attributes: + gi_code code object + gi_frame frame object or possibly None once the generator has + been exhausted + gi_running set to 1 when generator is executing, 0 otherwise + gi_suspended set to 1 when the generator is suspended at a yield point, 0 otherwise + gi_yieldfrom object being iterated by yield from or None + + __iter__() defined to support iteration over container + close() raises a new GeneratorExit exception inside the + generator to terminate the iteration + send() resumes the generator and "sends" a value that becomes + the result of the current yield-expression + throw() used to raise an exception inside the generator""" + return isinstance(object, types.GeneratorType) + +def iscoroutine(object): + """Return true if the object is a coroutine.""" + return isinstance(object, types.CoroutineType) + +def isawaitable(object): + """Return true if object can be passed to an ``await`` expression.""" + return (isinstance(object, types.CoroutineType) or + isinstance(object, types.GeneratorType) and + bool(object.gi_code.co_flags & CO_ITERABLE_COROUTINE) or + isinstance(object, collections.abc.Awaitable)) + +def istraceback(object): + """Return true if the object is a traceback. + + Traceback objects provide these attributes: + tb_frame frame object at this level + tb_lasti index of last attempted instruction in bytecode + tb_lineno current line number in Python source code + tb_next next inner traceback object (called by this level)""" + return isinstance(object, types.TracebackType) + +def isframe(object): + """Return true if the object is a frame object. + + Frame objects provide these attributes: + f_back next outer frame object (this frame's caller) + f_builtins built-in namespace seen by this frame + f_code code object being executed in this frame + f_globals global namespace seen by this frame + f_lasti index of last attempted instruction in bytecode + f_lineno current line number in Python source code + f_locals local namespace seen by this frame + f_trace tracing function for this frame, or None + f_trace_lines is a tracing event triggered for each source line? + f_trace_opcodes are per-opcode events being requested? + + clear() used to clear all references to local variables""" + return isinstance(object, types.FrameType) + +def iscode(object): + """Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including *, ** args + or keyword only arguments) + co_code string of raw compiled bytecode + co_cellvars tuple of names of cell variables + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + | 16=nested | 32=generator | 64=nofree | 128=coroutine + | 256=iterable_coroutine | 512=async_generator + | 0x4000000=has_docstring + co_freevars tuple of names of free variables + co_posonlyargcount number of positional only arguments + co_kwonlyargcount number of keyword only arguments (not including ** arg) + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names other than arguments and function locals + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables + co_qualname fully qualified function name + + co_lines() returns an iterator that yields successive bytecode ranges + co_positions() returns an iterator of source code positions for each bytecode instruction + replace() returns a copy of the code object with a new values""" + return isinstance(object, types.CodeType) + +def isbuiltin(object): + """Return true if the object is a built-in function or method. + + Built-in functions and methods provide these attributes: + __doc__ documentation string + __name__ original name of this function or method + __self__ instance to which a method is bound, or None""" + return isinstance(object, types.BuiltinFunctionType) + +def ismethodwrapper(object): + """Return true if the object is a method wrapper.""" + return isinstance(object, types.MethodWrapperType) + +def isroutine(object): + """Return true if the object is any kind of function or method.""" + return (isbuiltin(object) + or isfunction(object) + or ismethod(object) + or ismethoddescriptor(object) + or ismethodwrapper(object) + or isinstance(object, functools._singledispatchmethod_get)) + +def isabstract(object): + """Return true if the object is an abstract base class (ABC).""" + if not isinstance(object, type): + return False + if object.__flags__ & TPFLAGS_IS_ABSTRACT: + return True + if not issubclass(type(object), abc.ABCMeta): + return False + if hasattr(object, '__abstractmethods__'): + # It looks like ABCMeta.__new__ has finished running; + # TPFLAGS_IS_ABSTRACT should have been accurate. + return False + # It looks like ABCMeta.__new__ has not finished running yet; we're + # probably in __init_subclass__. We'll look for abstractmethods manually. + for name, value in object.__dict__.items(): + if getattr(value, "__isabstractmethod__", False): + return True + for base in object.__bases__: + for name in getattr(base, "__abstractmethods__", ()): + value = getattr(object, name, None) + if getattr(value, "__isabstractmethod__", False): + return True + return False + +def _getmembers(object, predicate, getter): + results = [] + processed = set() + names = dir(object) + if isclass(object): + mro = getmro(object) + # add any DynamicClassAttributes to the list of names if object is a class; + # this may result in duplicate entries if, for example, a virtual + # attribute with the same name as a DynamicClassAttribute exists + try: + for base in object.__bases__: + for k, v in base.__dict__.items(): + if isinstance(v, types.DynamicClassAttribute): + names.append(k) + except AttributeError: + pass + else: + mro = () + for key in names: + # First try to get the value via getattr. Some descriptors don't + # like calling their __get__ (see bug #1785), so fall back to + # looking in the __dict__. + try: + value = getter(object, key) + # handle the duplicate key + if key in processed: + raise AttributeError + except AttributeError: + for base in mro: + if key in base.__dict__: + value = base.__dict__[key] + break + else: + # could be a (currently) missing slot member, or a buggy + # __dir__; discard and move on + continue + if not predicate or predicate(value): + results.append((key, value)) + processed.add(key) + results.sort(key=lambda pair: pair[0]) + return results + +def getmembers(object, predicate=None): + """Return all members of an object as (name, value) pairs sorted by name. + Optionally, only return members that satisfy a given predicate.""" + return _getmembers(object, predicate, getattr) + +def getmembers_static(object, predicate=None): + """Return all members of an object as (name, value) pairs sorted by name + without triggering dynamic lookup via the descriptor protocol, + __getattr__ or __getattribute__. Optionally, only return members that + satisfy a given predicate. + + Note: this function may not be able to retrieve all members + that getmembers can fetch (like dynamically created attributes) + and may find members that getmembers can't (like descriptors + that raise AttributeError). It can also return descriptor objects + instead of instance members in some cases. + """ + return _getmembers(object, predicate, getattr_static) + +Attribute = namedtuple('Attribute', 'name kind defining_class object') + +def classify_class_attrs(cls): + """Return list of attribute-descriptor tuples. + + For each name in dir(cls), the return list contains a 4-tuple + with these elements: + + 0. The name (a string). + + 1. The kind of attribute this is, one of these strings: + 'class method' created via classmethod() + 'static method' created via staticmethod() + 'property' created via property() + 'method' any other flavor of method or descriptor + 'data' not a method + + 2. The class which defined this attribute (a class). + + 3. The object as obtained by calling getattr; if this fails, or if the + resulting object does not live anywhere in the class' mro (including + metaclasses) then the object is looked up in the defining class's + dict (found by walking the mro). + + If one of the items in dir(cls) is stored in the metaclass it will now + be discovered and not have None be listed as the class in which it was + defined. Any items whose home class cannot be discovered are skipped. + """ + + mro = getmro(cls) + metamro = getmro(type(cls)) # for attributes stored in the metaclass + metamro = tuple(cls for cls in metamro if cls not in (type, object)) + class_bases = (cls,) + mro + all_bases = class_bases + metamro + names = dir(cls) + # :dd any DynamicClassAttributes to the list of names; + # this may result in duplicate entries if, for example, a virtual + # attribute with the same name as a DynamicClassAttribute exists. + for base in mro: + for k, v in base.__dict__.items(): + if isinstance(v, types.DynamicClassAttribute) and v.fget is not None: + names.append(k) + result = [] + processed = set() + + for name in names: + # Get the object associated with the name, and where it was defined. + # Normal objects will be looked up with both getattr and directly in + # its class' dict (in case getattr fails [bug #1785], and also to look + # for a docstring). + # For DynamicClassAttributes on the second pass we only look in the + # class's dict. + # + # Getting an obj from the __dict__ sometimes reveals more than + # using getattr. Static and class methods are dramatic examples. + homecls = None + get_obj = None + dict_obj = None + if name not in processed: + try: + if name == '__dict__': + raise Exception("__dict__ is special, don't want the proxy") + get_obj = getattr(cls, name) + except Exception: + pass + else: + homecls = getattr(get_obj, "__objclass__", homecls) + if homecls not in class_bases: + # if the resulting object does not live somewhere in the + # mro, drop it and search the mro manually + homecls = None + last_cls = None + # first look in the classes + for srch_cls in class_bases: + srch_obj = getattr(srch_cls, name, None) + if srch_obj is get_obj: + last_cls = srch_cls + # then check the metaclasses + for srch_cls in metamro: + try: + srch_obj = srch_cls.__getattr__(cls, name) + except AttributeError: + continue + if srch_obj is get_obj: + last_cls = srch_cls + if last_cls is not None: + homecls = last_cls + for base in all_bases: + if name in base.__dict__: + dict_obj = base.__dict__[name] + if homecls not in metamro: + homecls = base + break + if homecls is None: + # unable to locate the attribute anywhere, most likely due to + # buggy custom __dir__; discard and move on + continue + obj = get_obj if get_obj is not None else dict_obj + # Classify the object or its descriptor. + if isinstance(dict_obj, (staticmethod, types.BuiltinMethodType)): + kind = "static method" + obj = dict_obj + elif isinstance(dict_obj, (classmethod, types.ClassMethodDescriptorType)): + kind = "class method" + obj = dict_obj + elif isinstance(dict_obj, property): + kind = "property" + obj = dict_obj + elif isroutine(obj): + kind = "method" + else: + kind = "data" + result.append(Attribute(name, kind, homecls, obj)) + processed.add(name) + return result + +# ----------------------------------------------------------- class helpers + +def getmro(cls): + "Return tuple of base classes (including cls) in method resolution order." + return cls.__mro__ + +# -------------------------------------------------------- function helpers + +def unwrap(func, *, stop=None): + """Get the object wrapped by *func*. + + Follows the chain of :attr:`__wrapped__` attributes returning the last + object in the chain. + + *stop* is an optional callback accepting an object in the wrapper chain + as its sole argument that allows the unwrapping to be terminated early if + the callback returns a true value. If the callback never returns a true + value, the last object in the chain is returned as usual. For example, + :func:`signature` uses this to stop unwrapping if any object in the + chain has a ``__signature__`` attribute defined. + + :exc:`ValueError` is raised if a cycle is encountered. + + """ + f = func # remember the original func for error reporting + # Memoise by id to tolerate non-hashable objects, but store objects to + # ensure they aren't destroyed, which would allow their IDs to be reused. + memo = {id(f): f} + recursion_limit = sys.getrecursionlimit() + while not isinstance(func, type) and hasattr(func, '__wrapped__'): + if stop is not None and stop(func): + break + func = func.__wrapped__ + id_func = id(func) + if (id_func in memo) or (len(memo) >= recursion_limit): + raise ValueError('wrapper loop when unwrapping {!r}'.format(f)) + memo[id_func] = func + return func + +# -------------------------------------------------- source code extraction +def indentsize(line): + """Return the indent size, in spaces, at the start of a line of text.""" + expline = line.expandtabs() + return len(expline) - len(expline.lstrip()) + +def _findclass(func): + cls = sys.modules.get(func.__module__) + if cls is None: + return None + for name in func.__qualname__.split('.')[:-1]: + cls = getattr(cls, name) + if not isclass(cls): + return None + return cls + +def _finddoc(obj): + if isclass(obj): + for base in obj.__mro__: + if base is not object: + try: + doc = base.__doc__ + except AttributeError: + continue + if doc is not None: + return doc + return None + + if ismethod(obj): + name = obj.__func__.__name__ + self = obj.__self__ + if (isclass(self) and + getattr(getattr(self, name, None), '__func__') is obj.__func__): + # classmethod + cls = self + else: + cls = self.__class__ + elif isfunction(obj): + name = obj.__name__ + cls = _findclass(obj) + if cls is None or getattr(cls, name) is not obj: + return None + elif isbuiltin(obj): + name = obj.__name__ + self = obj.__self__ + if (isclass(self) and + self.__qualname__ + '.' + name == obj.__qualname__): + # classmethod + cls = self + else: + cls = self.__class__ + # Should be tested before isdatadescriptor(). + elif isinstance(obj, property): + name = obj.__name__ + cls = _findclass(obj.fget) + if cls is None or getattr(cls, name) is not obj: + return None + elif ismethoddescriptor(obj) or isdatadescriptor(obj): + name = obj.__name__ + cls = obj.__objclass__ + if getattr(cls, name) is not obj: + return None + if ismemberdescriptor(obj): + slots = getattr(cls, '__slots__', None) + if isinstance(slots, dict) and name in slots: + return slots[name] + else: + return None + for base in cls.__mro__: + try: + doc = getattr(base, name).__doc__ + except AttributeError: + continue + if doc is not None: + return doc + return None + +def getdoc(object): + """Get the documentation string for an object. + + All tabs are expanded to spaces. To clean up docstrings that are + indented to line up with blocks of code, any whitespace than can be + uniformly removed from the second line onwards is removed.""" + try: + doc = object.__doc__ + except AttributeError: + return None + if doc is None: + try: + doc = _finddoc(object) + except (AttributeError, TypeError): + return None + if not isinstance(doc, str): + return None + return cleandoc(doc) + +def cleandoc(doc): + """Clean up indentation from docstrings. + + Any whitespace that can be uniformly removed from the second line + onwards is removed.""" + lines = doc.expandtabs().split('\n') + + # Find minimum indentation of any non-blank lines after first line. + margin = sys.maxsize + for line in lines[1:]: + content = len(line.lstrip(' ')) + if content: + indent = len(line) - content + margin = min(margin, indent) + # Remove indentation. + if lines: + lines[0] = lines[0].lstrip(' ') + if margin < sys.maxsize: + for i in range(1, len(lines)): + lines[i] = lines[i][margin:] + # Remove any trailing or leading blank lines. + while lines and not lines[-1]: + lines.pop() + while lines and not lines[0]: + lines.pop(0) + return '\n'.join(lines) + + +def getfile(object): + """Work out which source or compiled file an object was defined in.""" + if ismodule(object): + if getattr(object, '__file__', None): + return object.__file__ + raise TypeError('{!r} is a built-in module'.format(object)) + if isclass(object): + if hasattr(object, '__module__'): + module = sys.modules.get(object.__module__) + if getattr(module, '__file__', None): + return module.__file__ + if object.__module__ == '__main__': + raise OSError('source code not available') + raise TypeError('{!r} is a built-in class'.format(object)) + if ismethod(object): + object = object.__func__ + if isfunction(object): + object = object.__code__ + if istraceback(object): + object = object.tb_frame + if isframe(object): + object = object.f_code + if iscode(object): + return object.co_filename + raise TypeError('module, class, method, function, traceback, frame, or ' + 'code object was expected, got {}'.format( + type(object).__name__)) + +def getmodulename(path): + """Return the module name for a given file, or None.""" + fname = os.path.basename(path) + # Check for paths that look like an actual module file + suffixes = [(-len(suffix), suffix) + for suffix in importlib.machinery.all_suffixes()] + suffixes.sort() # try longest suffixes first, in case they overlap + for neglen, suffix in suffixes: + if fname.endswith(suffix): + return fname[:neglen] + return None + +def getsourcefile(object): + """Return the filename that can be used to locate an object's source. + Return None if no way can be identified to get the source. + """ + filename = getfile(object) + all_bytecode_suffixes = importlib.machinery.BYTECODE_SUFFIXES[:] + if any(filename.endswith(s) for s in all_bytecode_suffixes): + filename = (os.path.splitext(filename)[0] + + importlib.machinery.SOURCE_SUFFIXES[0]) + elif any(filename.endswith(s) for s in + importlib.machinery.EXTENSION_SUFFIXES): + return None + elif filename.endswith(".fwork"): + # Apple mobile framework markers are another type of non-source file + return None + + # return a filename found in the linecache even if it doesn't exist on disk + if filename in linecache.cache: + return filename + if os.path.exists(filename): + return filename + # only return a non-existent filename if the module has a PEP 302 loader + module = getmodule(object, filename) + if getattr(module, '__loader__', None) is not None: + return filename + elif getattr(getattr(module, "__spec__", None), "loader", None) is not None: + return filename + +def getabsfile(object, _filename=None): + """Return an absolute path to the source or compiled file for an object. + + The idea is for each object to have a unique origin, so this routine + normalizes the result as much as possible.""" + if _filename is None: + _filename = getsourcefile(object) or getfile(object) + return os.path.normcase(os.path.abspath(_filename)) + +modulesbyfile = {} +_filesbymodname = {} + +def getmodule(object, _filename=None): + """Return the module an object was defined in, or None if not found.""" + if ismodule(object): + return object + if hasattr(object, '__module__'): + return sys.modules.get(object.__module__) + + # Try the filename to modulename cache + if _filename is not None and _filename in modulesbyfile: + return sys.modules.get(modulesbyfile[_filename]) + # Try the cache again with the absolute file name + try: + file = getabsfile(object, _filename) + except (TypeError, FileNotFoundError): + return None + if file in modulesbyfile: + return sys.modules.get(modulesbyfile[file]) + # Update the filename to module name cache and check yet again + # Copy sys.modules in order to cope with changes while iterating + for modname, module in sys.modules.copy().items(): + if ismodule(module) and hasattr(module, '__file__'): + f = module.__file__ + if f == _filesbymodname.get(modname, None): + # Have already mapped this module, so skip it + continue + _filesbymodname[modname] = f + f = getabsfile(module) + # Always map to the name the module knows itself by + modulesbyfile[f] = modulesbyfile[ + os.path.realpath(f)] = module.__name__ + if file in modulesbyfile: + return sys.modules.get(modulesbyfile[file]) + # Check the main module + main = sys.modules['__main__'] + if not hasattr(object, '__name__'): + return None + if hasattr(main, object.__name__): + mainobject = getattr(main, object.__name__) + if mainobject is object: + return main + # Check builtins + builtin = sys.modules['builtins'] + if hasattr(builtin, object.__name__): + builtinobject = getattr(builtin, object.__name__) + if builtinobject is object: + return builtin + + +class ClassFoundException(Exception): + pass + + +def findsource(object): + """Return the entire source file and starting line number for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of all the lines + in the file and the line number indexes a line in that list. An OSError + is raised if the source code cannot be retrieved.""" + + file = getsourcefile(object) + if file: + # Invalidate cache if needed. + linecache.checkcache(file) + else: + file = getfile(object) + # Allow filenames in form of "" to pass through. + # `doctest` monkeypatches `linecache` module to enable + # inspection, so let `linecache.getlines` to be called. + if (not (file.startswith('<') and file.endswith('>'))) or file.endswith('.fwork'): + raise OSError('source code not available') + + module = getmodule(object, file) + if module: + lines = linecache.getlines(file, module.__dict__) + if not lines and file.startswith('<') and hasattr(object, "__code__"): + lines = linecache._getlines_from_code(object.__code__) + else: + lines = linecache.getlines(file) + if not lines: + raise OSError('could not get source code') + + if ismodule(object): + return lines, 0 + + if isclass(object): + try: + lnum = vars(object)['__firstlineno__'] - 1 + except (TypeError, KeyError): + raise OSError('source code not available') + if lnum >= len(lines): + raise OSError('lineno is out of bounds') + return lines, lnum + + if ismethod(object): + object = object.__func__ + if isfunction(object): + object = object.__code__ + if istraceback(object): + object = object.tb_frame + if isframe(object): + object = object.f_code + if iscode(object): + if not hasattr(object, 'co_firstlineno'): + raise OSError('could not find function definition') + lnum = object.co_firstlineno - 1 + if lnum >= len(lines): + raise OSError('lineno is out of bounds') + return lines, lnum + raise OSError('could not find code object') + +def getcomments(object): + """Get lines of comments immediately preceding an object's source code. + + Returns None when source can't be found. + """ + try: + lines, lnum = findsource(object) + except (OSError, TypeError): + return None + + if ismodule(object): + # Look for a comment block at the top of the file. + start = 0 + if lines and lines[0][:2] == '#!': start = 1 + while start < len(lines) and lines[start].strip() in ('', '#'): + start = start + 1 + if start < len(lines) and lines[start][:1] == '#': + comments = [] + end = start + while end < len(lines) and lines[end][:1] == '#': + comments.append(lines[end].expandtabs()) + end = end + 1 + return ''.join(comments) + + # Look for a preceding block of comments at the same indentation. + elif lnum > 0: + indent = indentsize(lines[lnum]) + end = lnum - 1 + if end >= 0 and lines[end].lstrip()[:1] == '#' and \ + indentsize(lines[end]) == indent: + comments = [lines[end].expandtabs().lstrip()] + if end > 0: + end = end - 1 + comment = lines[end].expandtabs().lstrip() + while comment[:1] == '#' and indentsize(lines[end]) == indent: + comments[:0] = [comment] + end = end - 1 + if end < 0: break + comment = lines[end].expandtabs().lstrip() + while comments and comments[0].strip() == '#': + comments[:1] = [] + while comments and comments[-1].strip() == '#': + comments[-1:] = [] + return ''.join(comments) + +class EndOfBlock(Exception): pass + +class BlockFinder: + """Provide a tokeneater() method to detect the end of a code block.""" + def __init__(self): + self.indent = 0 + self.singleline = False + self.started = False + self.passline = False + self.indecorator = False + self.last = 1 + self.body_col0 = None + + def tokeneater(self, type, token, srowcol, erowcol, line): + if not self.started and not self.indecorator: + if type in (tokenize.INDENT, tokenize.COMMENT, tokenize.NL): + pass + elif token == "async": + pass + # skip any decorators + elif token == "@": + self.indecorator = True + else: + # For "def" and "class" scan to the end of the block. + # For "lambda" and generator expression scan to + # the end of the logical line. + self.singleline = token not in ("def", "class") + self.started = True + self.passline = True # skip to the end of the line + elif type == tokenize.NEWLINE: + self.passline = False # stop skipping when a NEWLINE is seen + self.last = srowcol[0] + if self.singleline: + raise EndOfBlock + # hitting a NEWLINE when in a decorator without args + # ends the decorator + if self.indecorator: + self.indecorator = False + elif self.passline: + pass + elif type == tokenize.INDENT: + if self.body_col0 is None and self.started: + self.body_col0 = erowcol[1] + self.indent = self.indent + 1 + self.passline = True + elif type == tokenize.DEDENT: + self.indent = self.indent - 1 + # the end of matching indent/dedent pairs end a block + # (note that this only works for "def"/"class" blocks, + # not e.g. for "if: else:" or "try: finally:" blocks) + if self.indent <= 0: + raise EndOfBlock + elif type == tokenize.COMMENT: + if self.body_col0 is not None and srowcol[1] >= self.body_col0: + # Include comments if indented at least as much as the block + self.last = srowcol[0] + elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): + # any other token on the same indentation level end the previous + # block as well, except the pseudo-tokens COMMENT and NL. + raise EndOfBlock + +def getblock(lines): + """Extract the block of code at the top of the given list of lines.""" + blockfinder = BlockFinder() + try: + tokens = tokenize.generate_tokens(iter(lines).__next__) + for _token in tokens: + blockfinder.tokeneater(*_token) + except (EndOfBlock, IndentationError): + pass + except SyntaxError as e: + if "unmatched" not in e.msg: + raise e from None + _, *_token_info = _token + try: + blockfinder.tokeneater(tokenize.NEWLINE, *_token_info) + except (EndOfBlock, IndentationError): + pass + return lines[:blockfinder.last] + +def getsourcelines(object): + """Return a list of source lines and starting line number for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of the lines + corresponding to the object and the line number indicates where in the + original source file the first line of code was found. An OSError is + raised if the source code cannot be retrieved.""" + object = unwrap(object) + lines, lnum = findsource(object) + + if istraceback(object): + object = object.tb_frame + + # for module or frame that corresponds to module, return all source lines + if (ismodule(object) or + (isframe(object) and object.f_code.co_name == "")): + return lines, 0 + else: + return getblock(lines[lnum:]), lnum + 1 + +def getsource(object): + """Return the text of the source code for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a single string. An + OSError is raised if the source code cannot be retrieved.""" + lines, lnum = getsourcelines(object) + return ''.join(lines) + +# --------------------------------------------------- class tree extraction +def walktree(classes, children, parent): + """Recursive helper function for getclasstree().""" + results = [] + classes.sort(key=attrgetter('__module__', '__name__')) + for c in classes: + results.append((c, c.__bases__)) + if c in children: + results.append(walktree(children[c], children, c)) + return results + +def getclasstree(classes, unique=False): + """Arrange the given list of classes into a hierarchy of nested lists. + + Where a nested list appears, it contains classes derived from the class + whose entry immediately precedes the list. Each entry is a 2-tuple + containing a class and a tuple of its base classes. If the 'unique' + argument is true, exactly one entry appears in the returned structure + for each class in the given list. Otherwise, classes using multiple + inheritance and their descendants will appear multiple times.""" + children = {} + roots = [] + for c in classes: + if c.__bases__: + for parent in c.__bases__: + if parent not in children: + children[parent] = [] + if c not in children[parent]: + children[parent].append(c) + if unique and parent in classes: break + elif c not in roots: + roots.append(c) + for parent in children: + if parent not in classes: + roots.append(parent) + return walktree(roots, children, None) + +# ------------------------------------------------ argument list extraction +Arguments = namedtuple('Arguments', 'args, varargs, varkw') + +def getargs(co): + """Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where + 'args' is the list of argument names. Keyword-only arguments are + appended. 'varargs' and 'varkw' are the names of the * and ** + arguments or None.""" + if not iscode(co): + raise TypeError('{!r} is not a code object'.format(co)) + + names = co.co_varnames + nargs = co.co_argcount + nkwargs = co.co_kwonlyargcount + args = list(names[:nargs]) + kwonlyargs = list(names[nargs:nargs+nkwargs]) + + nargs += nkwargs + varargs = None + if co.co_flags & CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + return Arguments(args + kwonlyargs, varargs, varkw) + + +FullArgSpec = namedtuple('FullArgSpec', + 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations') + +def getfullargspec(func): + """Get the names and default values of a callable object's parameters. + + A tuple of seven things is returned: + (args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations). + 'args' is a list of the parameter names. + 'varargs' and 'varkw' are the names of the * and ** parameters or None. + 'defaults' is an n-tuple of the default values of the last n parameters. + 'kwonlyargs' is a list of keyword-only parameter names. + 'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults. + 'annotations' is a dictionary mapping parameter names to annotations. + + Notable differences from inspect.signature(): + - the "self" parameter is always reported, even for bound methods + - wrapper chains defined by __wrapped__ *not* unwrapped automatically + """ + try: + # Re: `skip_bound_arg=False` + # + # There is a notable difference in behaviour between getfullargspec + # and Signature: the former always returns 'self' parameter for bound + # methods, whereas the Signature always shows the actual calling + # signature of the passed object. + # + # To simulate this behaviour, we "unbind" bound methods, to trick + # inspect.signature to always return their first parameter ("self", + # usually) + + # Re: `follow_wrapper_chains=False` + # + # getfullargspec() historically ignored __wrapped__ attributes, + # so we ensure that remains the case in 3.3+ + + sig = _signature_from_callable(func, + follow_wrapper_chains=False, + skip_bound_arg=False, + sigcls=Signature, + eval_str=False) + except Exception as ex: + # Most of the times 'signature' will raise ValueError. + # But, it can also raise AttributeError, and, maybe something + # else. So to be fully backwards compatible, we catch all + # possible exceptions here, and reraise a TypeError. + raise TypeError('unsupported callable') from ex + + args = [] + varargs = None + varkw = None + posonlyargs = [] + kwonlyargs = [] + annotations = {} + defaults = () + kwdefaults = {} + + if sig.return_annotation is not sig.empty: + annotations['return'] = sig.return_annotation + + for param in sig.parameters.values(): + kind = param.kind + name = param.name + + if kind is _POSITIONAL_ONLY: + posonlyargs.append(name) + if param.default is not param.empty: + defaults += (param.default,) + elif kind is _POSITIONAL_OR_KEYWORD: + args.append(name) + if param.default is not param.empty: + defaults += (param.default,) + elif kind is _VAR_POSITIONAL: + varargs = name + elif kind is _KEYWORD_ONLY: + kwonlyargs.append(name) + if param.default is not param.empty: + kwdefaults[name] = param.default + elif kind is _VAR_KEYWORD: + varkw = name + + if param.annotation is not param.empty: + annotations[name] = param.annotation + + if not kwdefaults: + # compatibility with 'func.__kwdefaults__' + kwdefaults = None + + if not defaults: + # compatibility with 'func.__defaults__' + defaults = None + + return FullArgSpec(posonlyargs + args, varargs, varkw, defaults, + kwonlyargs, kwdefaults, annotations) + + +ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals') + +def getargvalues(frame): + """Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names. + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame.""" + args, varargs, varkw = getargs(frame.f_code) + return ArgInfo(args, varargs, varkw, frame.f_locals) + +def formatannotation(annotation, base_module=None, *, quote_annotation_strings=True): + if not quote_annotation_strings and isinstance(annotation, str): + return annotation + if getattr(annotation, '__module__', None) == 'typing': + def repl(match): + text = match.group() + return text.removeprefix('typing.') + return re.sub(r'[\w\.]+', repl, repr(annotation)) + if isinstance(annotation, types.GenericAlias): + return str(annotation) + if isinstance(annotation, type): + if annotation.__module__ in ('builtins', base_module): + return annotation.__qualname__ + return annotation.__module__+'.'+annotation.__qualname__ + if isinstance(annotation, ForwardRef): + return annotation.__forward_arg__ + return repr(annotation) + +def formatannotationrelativeto(object): + module = getattr(object, '__module__', None) + def _formatannotation(annotation): + return formatannotation(annotation, module) + return _formatannotation + + +def formatargvalues(args, varargs, varkw, locals, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value)): + """Format an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments.""" + def convert(name, locals=locals, + formatarg=formatarg, formatvalue=formatvalue): + return formatarg(name) + formatvalue(locals[name]) + specs = [] + for i in range(len(args)): + specs.append(convert(args[i])) + if varargs: + specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) + if varkw: + specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) + return '(' + ', '.join(specs) + ')' + +def _missing_arguments(f_name, argnames, pos, values): + names = [repr(name) for name in argnames if name not in values] + missing = len(names) + if missing == 1: + s = names[0] + elif missing == 2: + s = "{} and {}".format(*names) + else: + tail = ", {} and {}".format(*names[-2:]) + del names[-2:] + s = ", ".join(names) + tail + raise TypeError("%s() missing %i required %s argument%s: %s" % + (f_name, missing, + "positional" if pos else "keyword-only", + "" if missing == 1 else "s", s)) + +def _too_many(f_name, args, kwonly, varargs, defcount, given, values): + atleast = len(args) - defcount + kwonly_given = len([arg for arg in kwonly if arg in values]) + if varargs: + plural = atleast != 1 + sig = "at least %d" % (atleast,) + elif defcount: + plural = True + sig = "from %d to %d" % (atleast, len(args)) + else: + plural = len(args) != 1 + sig = str(len(args)) + kwonly_sig = "" + if kwonly_given: + msg = " positional argument%s (and %d keyword-only argument%s)" + kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given, + "s" if kwonly_given != 1 else "")) + raise TypeError("%s() takes %s positional argument%s but %d%s %s given" % + (f_name, sig, "s" if plural else "", given, kwonly_sig, + "was" if given == 1 and not kwonly_given else "were")) + +def getcallargs(func, /, *positional, **named): + """Get the mapping of arguments to values. + + A dict is returned, with keys the function argument names (including the + names of the * and ** arguments, if any), and values the respective bound + values from 'positional' and 'named'.""" + spec = getfullargspec(func) + args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec + f_name = func.__name__ + arg2value = {} + + + if ismethod(func) and func.__self__ is not None: + # implicit 'self' (or 'cls' for classmethods) argument + positional = (func.__self__,) + positional + num_pos = len(positional) + num_args = len(args) + num_defaults = len(defaults) if defaults else 0 + + n = min(num_pos, num_args) + for i in range(n): + arg2value[args[i]] = positional[i] + if varargs: + arg2value[varargs] = tuple(positional[n:]) + possible_kwargs = set(args + kwonlyargs) + if varkw: + arg2value[varkw] = {} + for kw, value in named.items(): + if kw not in possible_kwargs: + if not varkw: + raise TypeError("%s() got an unexpected keyword argument %r" % + (f_name, kw)) + arg2value[varkw][kw] = value + continue + if kw in arg2value: + raise TypeError("%s() got multiple values for argument %r" % + (f_name, kw)) + arg2value[kw] = value + if num_pos > num_args and not varargs: + _too_many(f_name, args, kwonlyargs, varargs, num_defaults, + num_pos, arg2value) + if num_pos < num_args: + req = args[:num_args - num_defaults] + for arg in req: + if arg not in arg2value: + _missing_arguments(f_name, req, True, arg2value) + for i, arg in enumerate(args[num_args - num_defaults:]): + if arg not in arg2value: + arg2value[arg] = defaults[i] + missing = 0 + for kwarg in kwonlyargs: + if kwarg not in arg2value: + if kwonlydefaults and kwarg in kwonlydefaults: + arg2value[kwarg] = kwonlydefaults[kwarg] + else: + missing += 1 + if missing: + _missing_arguments(f_name, kwonlyargs, False, arg2value) + return arg2value + +ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound') + +def getclosurevars(func): + """ + Get the mapping of free variables to their current values. + + Returns a named tuple of dicts mapping the current nonlocal, global + and builtin references as seen by the body of the function. A final + set of unbound names that could not be resolved is also provided. + """ + + if ismethod(func): + func = func.__func__ + + if not isfunction(func): + raise TypeError("{!r} is not a Python function".format(func)) + + code = func.__code__ + # Nonlocal references are named in co_freevars and resolved + # by looking them up in __closure__ by positional index + if func.__closure__ is None: + nonlocal_vars = {} + else: + nonlocal_vars = { + var : cell.cell_contents + for var, cell in zip(code.co_freevars, func.__closure__) + } + + # Global and builtin references are named in co_names and resolved + # by looking them up in __globals__ or __builtins__ + global_ns = func.__globals__ + builtin_ns = global_ns.get("__builtins__", builtins.__dict__) + if ismodule(builtin_ns): + builtin_ns = builtin_ns.__dict__ + global_vars = {} + builtin_vars = {} + unbound_names = set() + global_names = set() + for instruction in dis.get_instructions(code): + opname = instruction.opname + name = instruction.argval + if opname == "LOAD_ATTR": + unbound_names.add(name) + elif opname == "LOAD_GLOBAL": + global_names.add(name) + for name in global_names: + try: + global_vars[name] = global_ns[name] + except KeyError: + try: + builtin_vars[name] = builtin_ns[name] + except KeyError: + unbound_names.add(name) + + return ClosureVars(nonlocal_vars, global_vars, + builtin_vars, unbound_names) + +# -------------------------------------------------- stack frame extraction + +_Traceback = namedtuple('_Traceback', 'filename lineno function code_context index') + +class Traceback(_Traceback): + def __new__(cls, filename, lineno, function, code_context, index, *, positions=None): + instance = super().__new__(cls, filename, lineno, function, code_context, index) + instance.positions = positions + return instance + + def __repr__(self): + return ('Traceback(filename={!r}, lineno={!r}, function={!r}, ' + 'code_context={!r}, index={!r}, positions={!r})'.format( + self.filename, self.lineno, self.function, self.code_context, + self.index, self.positions)) + +def _get_code_position_from_tb(tb): + code, instruction_index = tb.tb_frame.f_code, tb.tb_lasti + return _get_code_position(code, instruction_index) + +def _get_code_position(code, instruction_index): + if instruction_index < 0: + return (None, None, None, None) + positions_gen = code.co_positions() + # The nth entry in code.co_positions() corresponds to instruction (2*n)th since Python 3.10+ + return next(itertools.islice(positions_gen, instruction_index // 2, None)) + +def getframeinfo(frame, context=1): + """Get information about a frame or traceback object. + + A tuple of five things is returned: the filename, the line number of + the current line, the function name, a list of lines of context from + the source code, and the index of the current line within that list. + The optional second argument specifies the number of lines of context + to return, which are centered around the current line.""" + if istraceback(frame): + positions = _get_code_position_from_tb(frame) + lineno = frame.tb_lineno + frame = frame.tb_frame + else: + lineno = frame.f_lineno + positions = _get_code_position(frame.f_code, frame.f_lasti) + + if positions[0] is None: + frame, *positions = (frame, lineno, *positions[1:]) + else: + frame, *positions = (frame, *positions) + + lineno = positions[0] + + if not isframe(frame): + raise TypeError('{!r} is not a frame or traceback object'.format(frame)) + + filename = getsourcefile(frame) or getfile(frame) + if context > 0: + start = lineno - 1 - context//2 + try: + lines, lnum = findsource(frame) + except OSError: + lines = index = None + else: + start = max(0, min(start, len(lines) - context)) + lines = lines[start:start+context] + index = lineno - 1 - start + else: + lines = index = None + + return Traceback(filename, lineno, frame.f_code.co_name, lines, + index, positions=dis.Positions(*positions)) + +def getlineno(frame): + """Get the line number from a frame object, allowing for optimization.""" + # FrameType.f_lineno is now a descriptor that grovels co_lnotab + return frame.f_lineno + +_FrameInfo = namedtuple('_FrameInfo', ('frame',) + Traceback._fields) +class FrameInfo(_FrameInfo): + def __new__(cls, frame, filename, lineno, function, code_context, index, *, positions=None): + instance = super().__new__(cls, frame, filename, lineno, function, code_context, index) + instance.positions = positions + return instance + + def __repr__(self): + return ('FrameInfo(frame={!r}, filename={!r}, lineno={!r}, function={!r}, ' + 'code_context={!r}, index={!r}, positions={!r})'.format( + self.frame, self.filename, self.lineno, self.function, + self.code_context, self.index, self.positions)) + +def getouterframes(frame, context=1): + """Get a list of records for a frame and all higher (calling) frames. + + Each record contains a frame object, filename, line number, function + name, a list of lines of context, and index within the context.""" + framelist = [] + while frame: + traceback_info = getframeinfo(frame, context) + frameinfo = (frame,) + traceback_info + framelist.append(FrameInfo(*frameinfo, positions=traceback_info.positions)) + frame = frame.f_back + return framelist + +def getinnerframes(tb, context=1): + """Get a list of records for a traceback's frame and all lower frames. + + Each record contains a frame object, filename, line number, function + name, a list of lines of context, and index within the context.""" + framelist = [] + while tb: + traceback_info = getframeinfo(tb, context) + frameinfo = (tb.tb_frame,) + traceback_info + framelist.append(FrameInfo(*frameinfo, positions=traceback_info.positions)) + tb = tb.tb_next + return framelist + +def currentframe(): + """Return the frame of the caller or None if this is not possible.""" + return sys._getframe(1) if hasattr(sys, "_getframe") else None + +def stack(context=1): + """Return a list of records for the stack above the caller's frame.""" + return getouterframes(sys._getframe(1), context) + +def trace(context=1): + """Return a list of records for the stack below the current exception.""" + exc = sys.exception() + tb = None if exc is None else exc.__traceback__ + return getinnerframes(tb, context) + + +# ------------------------------------------------ static version of getattr + +_sentinel = object() +_static_getmro = type.__dict__['__mro__'].__get__ +_get_dunder_dict_of_class = type.__dict__["__dict__"].__get__ + + +def _check_instance(obj, attr): + instance_dict = {} + try: + instance_dict = object.__getattribute__(obj, "__dict__") + except AttributeError: + pass + return dict.get(instance_dict, attr, _sentinel) + + +def _check_class(klass, attr): + for entry in _static_getmro(klass): + if _shadowed_dict(type(entry)) is _sentinel and attr in entry.__dict__: + return entry.__dict__[attr] + return _sentinel + + +@functools.lru_cache() +def _shadowed_dict_from_weakref_mro_tuple(*weakref_mro): + for weakref_entry in weakref_mro: + # Normally we'd have to check whether the result of weakref_entry() + # is None here, in case the object the weakref is pointing to has died. + # In this specific case, however, we know that the only caller of this + # function is `_shadowed_dict()`, and that therefore this weakref is + # guaranteed to point to an object that is still alive. + entry = weakref_entry() + dunder_dict = _get_dunder_dict_of_class(entry) + if '__dict__' in dunder_dict: + class_dict = dunder_dict['__dict__'] + if not (type(class_dict) is types.GetSetDescriptorType and + class_dict.__name__ == "__dict__" and + class_dict.__objclass__ is entry): + return class_dict + return _sentinel + + +def _shadowed_dict(klass): + # gh-118013: the inner function here is decorated with lru_cache for + # performance reasons, *but* make sure not to pass strong references + # to the items in the mro. Doing so can lead to unexpected memory + # consumption in cases where classes are dynamically created and + # destroyed, and the dynamically created classes happen to be the only + # objects that hold strong references to other objects that take up a + # significant amount of memory. + return _shadowed_dict_from_weakref_mro_tuple( + *[make_weakref(entry) for entry in _static_getmro(klass)] + ) + + +def getattr_static(obj, attr, default=_sentinel): + """Retrieve attributes without triggering dynamic lookup via the + descriptor protocol, __getattr__ or __getattribute__. + + Note: this function may not be able to retrieve all attributes + that getattr can fetch (like dynamically created attributes) + and may find attributes that getattr can't (like descriptors + that raise AttributeError). It can also return descriptor objects + instead of instance members in some cases. See the + documentation for details. + """ + instance_result = _sentinel + + objtype = type(obj) + if type not in _static_getmro(objtype): + klass = objtype + dict_attr = _shadowed_dict(klass) + if (dict_attr is _sentinel or + type(dict_attr) is types.MemberDescriptorType): + instance_result = _check_instance(obj, attr) + else: + klass = obj + + klass_result = _check_class(klass, attr) + + if instance_result is not _sentinel and klass_result is not _sentinel: + if _check_class(type(klass_result), "__get__") is not _sentinel and ( + _check_class(type(klass_result), "__set__") is not _sentinel + or _check_class(type(klass_result), "__delete__") is not _sentinel + ): + return klass_result + + if instance_result is not _sentinel: + return instance_result + if klass_result is not _sentinel: + return klass_result + + if obj is klass: + # for types we check the metaclass too + for entry in _static_getmro(type(klass)): + if ( + _shadowed_dict(type(entry)) is _sentinel + and attr in entry.__dict__ + ): + return entry.__dict__[attr] + if default is not _sentinel: + return default + raise AttributeError(attr) + + +# ------------------------------------------------ generator introspection + +GEN_CREATED = 'GEN_CREATED' +GEN_RUNNING = 'GEN_RUNNING' +GEN_SUSPENDED = 'GEN_SUSPENDED' +GEN_CLOSED = 'GEN_CLOSED' + +def getgeneratorstate(generator): + """Get current state of a generator-iterator. + + Possible states are: + GEN_CREATED: Waiting to start execution. + GEN_RUNNING: Currently being executed by the interpreter. + GEN_SUSPENDED: Currently suspended at a yield expression. + GEN_CLOSED: Execution has completed. + """ + if generator.gi_running: + return GEN_RUNNING + if generator.gi_suspended: + return GEN_SUSPENDED + if generator.gi_frame is None: + return GEN_CLOSED + return GEN_CREATED + + +def getgeneratorlocals(generator): + """ + Get the mapping of generator local variables to their current values. + + A dict is returned, with the keys the local variable names and values the + bound values.""" + + if not isgenerator(generator): + raise TypeError("{!r} is not a Python generator".format(generator)) + + frame = getattr(generator, "gi_frame", None) + if frame is not None: + return generator.gi_frame.f_locals + else: + return {} + + +# ------------------------------------------------ coroutine introspection + +CORO_CREATED = 'CORO_CREATED' +CORO_RUNNING = 'CORO_RUNNING' +CORO_SUSPENDED = 'CORO_SUSPENDED' +CORO_CLOSED = 'CORO_CLOSED' + +def getcoroutinestate(coroutine): + """Get current state of a coroutine object. + + Possible states are: + CORO_CREATED: Waiting to start execution. + CORO_RUNNING: Currently being executed by the interpreter. + CORO_SUSPENDED: Currently suspended at an await expression. + CORO_CLOSED: Execution has completed. + """ + if coroutine.cr_running: + return CORO_RUNNING + if coroutine.cr_suspended: + return CORO_SUSPENDED + if coroutine.cr_frame is None: + return CORO_CLOSED + return CORO_CREATED + + +def getcoroutinelocals(coroutine): + """ + Get the mapping of coroutine local variables to their current values. + + A dict is returned, with the keys the local variable names and values the + bound values.""" + frame = getattr(coroutine, "cr_frame", None) + if frame is not None: + return frame.f_locals + else: + return {} + + +# ----------------------------------- asynchronous generator introspection + +AGEN_CREATED = 'AGEN_CREATED' +AGEN_RUNNING = 'AGEN_RUNNING' +AGEN_SUSPENDED = 'AGEN_SUSPENDED' +AGEN_CLOSED = 'AGEN_CLOSED' + + +def getasyncgenstate(agen): + """Get current state of an asynchronous generator object. + + Possible states are: + AGEN_CREATED: Waiting to start execution. + AGEN_RUNNING: Currently being executed by the interpreter. + AGEN_SUSPENDED: Currently suspended at a yield expression. + AGEN_CLOSED: Execution has completed. + """ + if agen.ag_running: + return AGEN_RUNNING + if agen.ag_suspended: + return AGEN_SUSPENDED + if agen.ag_frame is None: + return AGEN_CLOSED + return AGEN_CREATED + + +def getasyncgenlocals(agen): + """ + Get the mapping of asynchronous generator local variables to their current + values. + + A dict is returned, with the keys the local variable names and values the + bound values.""" + + if not isasyncgen(agen): + raise TypeError(f"{agen!r} is not a Python async generator") + + frame = getattr(agen, "ag_frame", None) + if frame is not None: + return agen.ag_frame.f_locals + else: + return {} + + +############################################################################### +### Function Signature Object (PEP 362) +############################################################################### + + +_NonUserDefinedCallables = (types.WrapperDescriptorType, + types.MethodWrapperType, + types.ClassMethodDescriptorType, + types.BuiltinFunctionType) + + +def _signature_get_user_defined_method(cls, method_name, *, follow_wrapper_chains=True): + """Private helper. Checks if ``cls`` has an attribute + named ``method_name`` and returns it only if it is a + pure python function. + """ + if method_name == '__new__': + meth = getattr(cls, method_name, None) + else: + meth = getattr_static(cls, method_name, None) + if meth is None: + return None + + # NOTE: The meth may wraps a non-user-defined callable. + # In this case, we treat the meth as non-user-defined callable too. + # (e.g. cls.__new__ generated by @warnings.deprecated) + unwrapped_meth = None + if follow_wrapper_chains: + unwrapped_meth = unwrap(meth, stop=(lambda m: hasattr(m, "__signature__") + or _signature_is_builtin(m))) + + if (isinstance(meth, _NonUserDefinedCallables) + or isinstance(unwrapped_meth, _NonUserDefinedCallables)): + # Once '__signature__' will be added to 'C'-level + # callables, this check won't be necessary + return None + if method_name != '__new__': + meth = _descriptor_get(meth, cls) + return meth + + +def _signature_get_partial(wrapped_sig, partial, extra_args=()): + """Private helper to calculate how 'wrapped_sig' signature will + look like after applying a 'functools.partial' object (or alike) + on it. + """ + + old_params = wrapped_sig.parameters + new_params = OrderedDict(old_params.items()) + + partial_args = partial.args or () + partial_keywords = partial.keywords or {} + + if extra_args: + partial_args = extra_args + partial_args + + try: + ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords) + except TypeError as ex: + msg = 'partial object {!r} has incorrect arguments'.format(partial) + raise ValueError(msg) from ex + + + transform_to_kwonly = False + for param_name, param in old_params.items(): + try: + arg_value = ba.arguments[param_name] + except KeyError: + pass + else: + if param.kind is _POSITIONAL_ONLY: + # If positional-only parameter is bound by partial, + # it effectively disappears from the signature + # However, if it is a Placeholder it is not removed + # And also looses default value + if arg_value is functools.Placeholder: + new_params[param_name] = param.replace(default=_empty) + else: + new_params.pop(param_name) + continue + + if param.kind is _POSITIONAL_OR_KEYWORD: + if param_name in partial_keywords: + # This means that this parameter, and all parameters + # after it should be keyword-only (and var-positional + # should be removed). Here's why. Consider the following + # function: + # foo(a, b, *args, c): + # pass + # + # "partial(foo, a='spam')" will have the following + # signature: "(*, a='spam', b, c)". Because attempting + # to call that partial with "(10, 20)" arguments will + # raise a TypeError, saying that "a" argument received + # multiple values. + transform_to_kwonly = True + # Set the new default value + new_params[param_name] = param.replace(default=arg_value) + else: + # was passed as a positional argument + # Do not pop if it is a Placeholder + # also change kind to positional only + # and remove default + if arg_value is functools.Placeholder: + new_param = param.replace( + kind=_POSITIONAL_ONLY, + default=_empty + ) + new_params[param_name] = new_param + else: + new_params.pop(param_name) + continue + + if param.kind is _KEYWORD_ONLY: + # Set the new default value + new_params[param_name] = param.replace(default=arg_value) + + if transform_to_kwonly: + assert param.kind is not _POSITIONAL_ONLY + + if param.kind is _POSITIONAL_OR_KEYWORD: + new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY) + new_params[param_name] = new_param + new_params.move_to_end(param_name) + elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD): + new_params.move_to_end(param_name) + elif param.kind is _VAR_POSITIONAL: + new_params.pop(param.name) + + return wrapped_sig.replace(parameters=new_params.values()) + + +def _signature_bound_method(sig): + """Private helper to transform signatures for unbound + functions to bound methods. + """ + + params = tuple(sig.parameters.values()) + + if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + raise ValueError('invalid method signature') + + kind = params[0].kind + if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY): + # Drop first parameter: + # '(p1, p2[, ...])' -> '(p2[, ...])' + params = params[1:] + else: + if kind is not _VAR_POSITIONAL: + # Unless we add a new parameter type we never + # get here + raise ValueError('invalid argument type') + # It's a var-positional parameter. + # Do nothing. '(*args[, ...])' -> '(*args[, ...])' + + return sig.replace(parameters=params) + + +def _signature_is_builtin(obj): + """Private helper to test if `obj` is a callable that might + support Argument Clinic's __text_signature__ protocol. + """ + return (isbuiltin(obj) or + ismethoddescriptor(obj) or + isinstance(obj, _NonUserDefinedCallables) or + # Can't test 'isinstance(type)' here, as it would + # also be True for regular python classes. + # Can't use the `in` operator here, as it would + # invoke the custom __eq__ method. + obj is type or obj is object) + + +def _signature_is_functionlike(obj): + """Private helper to test if `obj` is a duck type of FunctionType. + A good example of such objects are functions compiled with + Cython, which have all attributes that a pure Python function + would have, but have their code statically compiled. + """ + + if not callable(obj) or isclass(obj): + # All function-like objects are obviously callables, + # and not classes. + return False + + name = getattr(obj, '__name__', None) + code = getattr(obj, '__code__', None) + defaults = getattr(obj, '__defaults__', _void) # Important to use _void ... + kwdefaults = getattr(obj, '__kwdefaults__', _void) # ... and not None here + + return (isinstance(code, types.CodeType) and + isinstance(name, str) and + (defaults is None or isinstance(defaults, tuple)) and + (kwdefaults is None or isinstance(kwdefaults, dict))) + + +def _signature_strip_non_python_syntax(signature): + """ + Private helper function. Takes a signature in Argument Clinic's + extended signature format. + + Returns a tuple of two things: + * that signature re-rendered in standard Python syntax, and + * the index of the "self" parameter (generally 0), or None if + the function does not have a "self" parameter. + """ + + if not signature: + return signature, None + + self_parameter = None + + lines = [l.encode('ascii') for l in signature.split('\n') if l] + generator = iter(lines).__next__ + token_stream = tokenize.tokenize(generator) + + text = [] + add = text.append + + current_parameter = 0 + OP = token.OP + ERRORTOKEN = token.ERRORTOKEN + + # token stream always starts with ENCODING token, skip it + t = next(token_stream) + assert t.type == tokenize.ENCODING + + for t in token_stream: + type, string = t.type, t.string + + if type == OP: + if string == ',': + current_parameter += 1 + + if (type == OP) and (string == '$'): + assert self_parameter is None + self_parameter = current_parameter + continue + + add(string) + if (string == ','): + add(' ') + clean_signature = ''.join(text).strip().replace("\n", "") + return clean_signature, self_parameter + + +def _signature_fromstr(cls, obj, s, skip_bound_arg=True): + """Private helper to parse content of '__text_signature__' + and return a Signature based on it. + """ + Parameter = cls._parameter_cls + + clean_signature, self_parameter = _signature_strip_non_python_syntax(s) + + program = "def foo" + clean_signature + ": pass" + + try: + module = ast.parse(program) + except SyntaxError: + module = None + + if not isinstance(module, ast.Module): + raise ValueError("{!r} builtin has invalid signature".format(obj)) + + f = module.body[0] + + parameters = [] + empty = Parameter.empty + + module = None + module_dict = {} + + module_name = getattr(obj, '__module__', None) + if not module_name: + objclass = getattr(obj, '__objclass__', None) + module_name = getattr(objclass, '__module__', None) + + if module_name: + module = sys.modules.get(module_name, None) + if module: + module_dict = module.__dict__ + sys_module_dict = sys.modules.copy() + + def parse_name(node): + assert isinstance(node, ast.arg) + if node.annotation is not None: + raise ValueError("Annotations are not currently supported") + return node.arg + + def wrap_value(s): + try: + value = eval(s, module_dict) + except NameError: + try: + value = eval(s, sys_module_dict) + except NameError: + raise ValueError + + if isinstance(value, (str, int, float, bytes, bool, type(None))): + return ast.Constant(value) + raise ValueError + + class RewriteSymbolics(ast.NodeTransformer): + def visit_Attribute(self, node): + a = [] + n = node + while isinstance(n, ast.Attribute): + a.append(n.attr) + n = n.value + if not isinstance(n, ast.Name): + raise ValueError + a.append(n.id) + value = ".".join(reversed(a)) + return wrap_value(value) + + def visit_Name(self, node): + if not isinstance(node.ctx, ast.Load): + raise ValueError() + return wrap_value(node.id) + + def visit_BinOp(self, node): + # Support constant folding of a couple simple binary operations + # commonly used to define default values in text signatures + left = self.visit(node.left) + right = self.visit(node.right) + if not isinstance(left, ast.Constant) or not isinstance(right, ast.Constant): + raise ValueError + if isinstance(node.op, ast.Add): + return ast.Constant(left.value + right.value) + elif isinstance(node.op, ast.Sub): + return ast.Constant(left.value - right.value) + elif isinstance(node.op, ast.BitOr): + return ast.Constant(left.value | right.value) + raise ValueError + + def p(name_node, default_node, default=empty): + name = parse_name(name_node) + if default_node and default_node is not _empty: + try: + default_node = RewriteSymbolics().visit(default_node) + default = ast.literal_eval(default_node) + except ValueError: + raise ValueError("{!r} builtin has invalid signature".format(obj)) from None + parameters.append(Parameter(name, kind, default=default, annotation=empty)) + + # non-keyword-only parameters + total_non_kw_args = len(f.args.posonlyargs) + len(f.args.args) + required_non_kw_args = total_non_kw_args - len(f.args.defaults) + defaults = itertools.chain(itertools.repeat(None, required_non_kw_args), f.args.defaults) + + kind = Parameter.POSITIONAL_ONLY + for (name, default) in zip(f.args.posonlyargs, defaults): + p(name, default) + + kind = Parameter.POSITIONAL_OR_KEYWORD + for (name, default) in zip(f.args.args, defaults): + p(name, default) + + # *args + if f.args.vararg: + kind = Parameter.VAR_POSITIONAL + p(f.args.vararg, empty) + + # keyword-only arguments + kind = Parameter.KEYWORD_ONLY + for name, default in zip(f.args.kwonlyargs, f.args.kw_defaults): + p(name, default) + + # **kwargs + if f.args.kwarg: + kind = Parameter.VAR_KEYWORD + p(f.args.kwarg, empty) + + if self_parameter is not None: + # Possibly strip the bound argument: + # - We *always* strip first bound argument if + # it is a module. + # - We don't strip first bound argument if + # skip_bound_arg is False. + assert parameters + _self = getattr(obj, '__self__', None) + self_isbound = _self is not None + self_ismodule = ismodule(_self) + if self_isbound and (self_ismodule or skip_bound_arg): + parameters.pop(0) + else: + # for builtins, self parameter is always positional-only! + p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY) + parameters[0] = p + + return cls(parameters, return_annotation=cls.empty) + + +def _signature_from_builtin(cls, func, skip_bound_arg=True): + """Private helper function to get signature for + builtin callables. + """ + + if not _signature_is_builtin(func): + raise TypeError("{!r} is not a Python builtin " + "function".format(func)) + + s = getattr(func, "__text_signature__", None) + if not s: + raise ValueError("no signature found for builtin {!r}".format(func)) + + return _signature_fromstr(cls, func, s, skip_bound_arg) + + +def _signature_from_function(cls, func, skip_bound_arg=True, + globals=None, locals=None, eval_str=False, + *, annotation_format=Format.VALUE): + """Private helper: constructs Signature for the given python function.""" + + is_duck_function = False + if not isfunction(func): + if _signature_is_functionlike(func): + is_duck_function = True + else: + # If it's not a pure Python function, and not a duck type + # of pure function: + raise TypeError('{!r} is not a Python function'.format(func)) + + s = getattr(func, "__text_signature__", None) + if s: + return _signature_fromstr(cls, func, s, skip_bound_arg) + + Parameter = cls._parameter_cls + + # Parameter information. + func_code = func.__code__ + pos_count = func_code.co_argcount + arg_names = func_code.co_varnames + posonly_count = func_code.co_posonlyargcount + positional = arg_names[:pos_count] + keyword_only_count = func_code.co_kwonlyargcount + keyword_only = arg_names[pos_count:pos_count + keyword_only_count] + annotations = get_annotations(func, globals=globals, locals=locals, eval_str=eval_str, + format=annotation_format) + defaults = func.__defaults__ + kwdefaults = func.__kwdefaults__ + + if defaults: + pos_default_count = len(defaults) + else: + pos_default_count = 0 + + parameters = [] + + non_default_count = pos_count - pos_default_count + posonly_left = posonly_count + + # Non-keyword-only parameters w/o defaults. + for name in positional[:non_default_count]: + kind = _POSITIONAL_ONLY if posonly_left else _POSITIONAL_OR_KEYWORD + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=kind)) + if posonly_left: + posonly_left -= 1 + + # ... w/ defaults. + for offset, name in enumerate(positional[non_default_count:]): + kind = _POSITIONAL_ONLY if posonly_left else _POSITIONAL_OR_KEYWORD + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=kind, + default=defaults[offset])) + if posonly_left: + posonly_left -= 1 + + # *args + if func_code.co_flags & CO_VARARGS: + name = arg_names[pos_count + keyword_only_count] + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_VAR_POSITIONAL)) + + # Keyword-only parameters. + for name in keyword_only: + default = _empty + if kwdefaults is not None: + default = kwdefaults.get(name, _empty) + + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_KEYWORD_ONLY, + default=default)) + # **kwargs + if func_code.co_flags & CO_VARKEYWORDS: + index = pos_count + keyword_only_count + if func_code.co_flags & CO_VARARGS: + index += 1 + + name = arg_names[index] + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_VAR_KEYWORD)) + + # Is 'func' is a pure Python function - don't validate the + # parameters list (for correct order and defaults), it should be OK. + return cls(parameters, + return_annotation=annotations.get('return', _empty), + __validate_parameters__=is_duck_function) + + +def _descriptor_get(descriptor, obj): + if isclass(descriptor): + return descriptor + get = getattr(type(descriptor), '__get__', _sentinel) + if get is _sentinel: + return descriptor + return get(descriptor, obj, type(obj)) + + +def _signature_from_callable(obj, *, + follow_wrapper_chains=True, + skip_bound_arg=True, + globals=None, + locals=None, + eval_str=False, + sigcls, + annotation_format=Format.VALUE): + + """Private helper function to get signature for arbitrary + callable objects. + """ + + _get_signature_of = functools.partial(_signature_from_callable, + follow_wrapper_chains=follow_wrapper_chains, + skip_bound_arg=skip_bound_arg, + globals=globals, + locals=locals, + sigcls=sigcls, + eval_str=eval_str, + annotation_format=annotation_format) + + if not callable(obj): + raise TypeError('{!r} is not a callable object'.format(obj)) + + if isinstance(obj, types.MethodType): + # In this case we skip the first parameter of the underlying + # function (usually `self` or `cls`). + sig = _get_signature_of(obj.__func__) + + if skip_bound_arg: + return _signature_bound_method(sig) + else: + return sig + + # Was this function wrapped by a decorator? + if follow_wrapper_chains: + # Unwrap until we find an explicit signature or a MethodType (which will be + # handled explicitly below). + obj = unwrap(obj, stop=(lambda f: hasattr(f, "__signature__") + or isinstance(f, types.MethodType))) + if isinstance(obj, types.MethodType): + # If the unwrapped object is a *method*, we might want to + # skip its first parameter (self). + # See test_signature_wrapped_bound_method for details. + return _get_signature_of(obj) + + try: + sig = obj.__signature__ + except AttributeError: + pass + else: + if sig is not None: + if not isinstance(sig, Signature): + raise TypeError( + 'unexpected object {!r} in __signature__ ' + 'attribute'.format(sig)) + return sig + + try: + partialmethod = obj.__partialmethod__ + except AttributeError: + pass + else: + if isinstance(partialmethod, functools.partialmethod): + # Unbound partialmethod (see functools.partialmethod) + # This means, that we need to calculate the signature + # as if it's a regular partial object, but taking into + # account that the first positional argument + # (usually `self`, or `cls`) will not be passed + # automatically (as for boundmethods) + + wrapped_sig = _get_signature_of(partialmethod.func) + + sig = _signature_get_partial(wrapped_sig, partialmethod, (None,)) + first_wrapped_param = tuple(wrapped_sig.parameters.values())[0] + if first_wrapped_param.kind is Parameter.VAR_POSITIONAL: + # First argument of the wrapped callable is `*args`, as in + # `partialmethod(lambda *args)`. + return sig + else: + sig_params = tuple(sig.parameters.values()) + assert (not sig_params or + first_wrapped_param is not sig_params[0]) + # If there were placeholders set, + # first param is transformed to positional only + if partialmethod.args.count(functools.Placeholder): + first_wrapped_param = first_wrapped_param.replace( + kind=Parameter.POSITIONAL_ONLY) + new_params = (first_wrapped_param,) + sig_params + return sig.replace(parameters=new_params) + + if isinstance(obj, functools.partial): + wrapped_sig = _get_signature_of(obj.func) + return _signature_get_partial(wrapped_sig, obj) + + if isfunction(obj) or _signature_is_functionlike(obj): + # If it's a pure Python function, or an object that is duck type + # of a Python function (Cython functions, for instance), then: + return _signature_from_function(sigcls, obj, + skip_bound_arg=skip_bound_arg, + globals=globals, locals=locals, eval_str=eval_str, + annotation_format=annotation_format) + + if _signature_is_builtin(obj): + return _signature_from_builtin(sigcls, obj, + skip_bound_arg=skip_bound_arg) + + if isinstance(obj, type): + # obj is a class or a metaclass + + # First, let's see if it has an overloaded __call__ defined + # in its metaclass + call = _signature_get_user_defined_method( + type(obj), + '__call__', + follow_wrapper_chains=follow_wrapper_chains, + ) + if call is not None: + return _get_signature_of(call) + + # NOTE: The user-defined method can be a function with a thin wrapper + # around object.__new__ (e.g., generated by `@warnings.deprecated`) + new = _signature_get_user_defined_method( + obj, + '__new__', + follow_wrapper_chains=follow_wrapper_chains, + ) + init = _signature_get_user_defined_method( + obj, + '__init__', + follow_wrapper_chains=follow_wrapper_chains, + ) + + # Go through the MRO and see if any class has user-defined + # pure Python __new__ or __init__ method + for base in obj.__mro__: + # Now we check if the 'obj' class has an own '__new__' method + if new is not None and '__new__' in base.__dict__: + sig = _get_signature_of(new) + if skip_bound_arg: + sig = _signature_bound_method(sig) + return sig + # or an own '__init__' method + elif init is not None and '__init__' in base.__dict__: + return _get_signature_of(init) + + # At this point we know, that `obj` is a class, with no user- + # defined '__init__', '__new__', or class-level '__call__' + + for base in obj.__mro__[:-1]: + # Since '__text_signature__' is implemented as a + # descriptor that extracts text signature from the + # class docstring, if 'obj' is derived from a builtin + # class, its own '__text_signature__' may be 'None'. + # Therefore, we go through the MRO (except the last + # class in there, which is 'object') to find the first + # class with non-empty text signature. + try: + text_sig = base.__text_signature__ + except AttributeError: + pass + else: + if text_sig: + # If 'base' class has a __text_signature__ attribute: + # return a signature based on it + return _signature_fromstr(sigcls, base, text_sig) + + # No '__text_signature__' was found for the 'obj' class. + # Last option is to check if its '__init__' is + # object.__init__ or type.__init__. + if type not in obj.__mro__: + obj_init = obj.__init__ + obj_new = obj.__new__ + if follow_wrapper_chains: + obj_init = unwrap(obj_init) + obj_new = unwrap(obj_new) + # We have a class (not metaclass), but no user-defined + # __init__ or __new__ for it + if obj_init is object.__init__ and obj_new is object.__new__: + # Return a signature of 'object' builtin. + return sigcls.from_callable(object) + else: + raise ValueError( + 'no signature found for builtin type {!r}'.format(obj)) + + else: + # An object with __call__ + call = getattr_static(type(obj), '__call__', None) + if call is not None: + try: + text_sig = obj.__text_signature__ + except AttributeError: + pass + else: + if text_sig: + return _signature_fromstr(sigcls, obj, text_sig) + call = _descriptor_get(call, obj) + return _get_signature_of(call) + + raise ValueError('callable {!r} is not supported by signature'.format(obj)) + + +class _void: + """A private marker - used in Parameter & Signature.""" + + +class _empty: + """Marker object for Signature.empty and Parameter.empty.""" + + +class _ParameterKind(enum.IntEnum): + POSITIONAL_ONLY = 'positional-only' + POSITIONAL_OR_KEYWORD = 'positional or keyword' + VAR_POSITIONAL = 'variadic positional' + KEYWORD_ONLY = 'keyword-only' + VAR_KEYWORD = 'variadic keyword' + + def __new__(cls, description): + value = len(cls.__members__) + member = int.__new__(cls, value) + member._value_ = value + member.description = description + return member + + def __str__(self): + return self.name + +_POSITIONAL_ONLY = _ParameterKind.POSITIONAL_ONLY +_POSITIONAL_OR_KEYWORD = _ParameterKind.POSITIONAL_OR_KEYWORD +_VAR_POSITIONAL = _ParameterKind.VAR_POSITIONAL +_KEYWORD_ONLY = _ParameterKind.KEYWORD_ONLY +_VAR_KEYWORD = _ParameterKind.VAR_KEYWORD + + +class Parameter: + """Represents a parameter in a function signature. + + Has the following public attributes: + + * name : str + The name of the parameter as a string. + * default : object + The default value for the parameter if specified. If the + parameter has no default value, this attribute is set to + `Parameter.empty`. + * annotation + The annotation for the parameter if specified. If the + parameter has no annotation, this attribute is set to + `Parameter.empty`. + * kind + Describes how argument values are bound to the parameter. + Possible values: `Parameter.POSITIONAL_ONLY`, + `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`, + `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`. + Every value has a `description` attribute describing meaning. + """ + + __slots__ = ('_name', '_kind', '_default', '_annotation') + + POSITIONAL_ONLY = _POSITIONAL_ONLY + POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD + VAR_POSITIONAL = _VAR_POSITIONAL + KEYWORD_ONLY = _KEYWORD_ONLY + VAR_KEYWORD = _VAR_KEYWORD + + empty = _empty + + def __init__(self, name, kind, *, default=_empty, annotation=_empty): + try: + self._kind = _ParameterKind(kind) + except ValueError: + raise ValueError(f'value {kind!r} is not a valid Parameter.kind') + if default is not _empty: + if self._kind in (_VAR_POSITIONAL, _VAR_KEYWORD): + msg = '{} parameters cannot have default values' + msg = msg.format(self._kind.description) + raise ValueError(msg) + self._default = default + self._annotation = annotation + + if name is _empty: + raise ValueError('name is a required attribute for Parameter') + + if not isinstance(name, str): + msg = 'name must be a str, not a {}'.format(type(name).__name__) + raise TypeError(msg) + + if name[0] == '.' and name[1:].isdigit(): + # These are implicit arguments generated by comprehensions. In + # order to provide a friendlier interface to users, we recast + # their name as "implicitN" and treat them as positional-only. + # See issue 19611. + if self._kind != _POSITIONAL_OR_KEYWORD: + msg = ( + 'implicit arguments must be passed as ' + 'positional or keyword arguments, not {}' + ) + msg = msg.format(self._kind.description) + raise ValueError(msg) + self._kind = _POSITIONAL_ONLY + name = 'implicit{}'.format(name[1:]) + + # It's possible for C functions to have a positional-only parameter + # where the name is a keyword, so for compatibility we'll allow it. + is_keyword = iskeyword(name) and self._kind is not _POSITIONAL_ONLY + if is_keyword or not name.isidentifier(): + raise ValueError('{!r} is not a valid parameter name'.format(name)) + + self._name = name + + def __reduce__(self): + return (type(self), + (self._name, self._kind), + {'_default': self._default, + '_annotation': self._annotation}) + + def __setstate__(self, state): + self._default = state['_default'] + self._annotation = state['_annotation'] + + @property + def name(self): + return self._name + + @property + def default(self): + return self._default + + @property + def annotation(self): + return self._annotation + + @property + def kind(self): + return self._kind + + def replace(self, *, name=_void, kind=_void, + annotation=_void, default=_void): + """Creates a customized copy of the Parameter.""" + + if name is _void: + name = self._name + + if kind is _void: + kind = self._kind + + if annotation is _void: + annotation = self._annotation + + if default is _void: + default = self._default + + return type(self)(name, kind, default=default, annotation=annotation) + + def __str__(self): + return self._format() + + def _format(self, *, quote_annotation_strings=True): + kind = self.kind + formatted = self._name + + # Add annotation and default value + if self._annotation is not _empty: + annotation = formatannotation(self._annotation, + quote_annotation_strings=quote_annotation_strings) + formatted = '{}: {}'.format(formatted, annotation) + + if self._default is not _empty: + if self._annotation is not _empty: + formatted = '{} = {}'.format(formatted, repr(self._default)) + else: + formatted = '{}={}'.format(formatted, repr(self._default)) + + if kind == _VAR_POSITIONAL: + formatted = '*' + formatted + elif kind == _VAR_KEYWORD: + formatted = '**' + formatted + + return formatted + + __replace__ = replace + + def __repr__(self): + return '<{} "{}">'.format(self.__class__.__name__, self) + + def __hash__(self): + return hash((self._name, self._kind, self._annotation, self._default)) + + def __eq__(self, other): + if self is other: + return True + if not isinstance(other, Parameter): + return NotImplemented + return (self._name == other._name and + self._kind == other._kind and + self._default == other._default and + self._annotation == other._annotation) + + +class BoundArguments: + """Result of `Signature.bind` call. Holds the mapping of arguments + to the function's parameters. + + Has the following public attributes: + + * arguments : dict + An ordered mutable mapping of parameters' names to arguments' values. + Does not contain arguments' default values. + * signature : Signature + The Signature object that created this instance. + * args : tuple + Tuple of positional arguments values. + * kwargs : dict + Dict of keyword arguments values. + """ + + __slots__ = ('arguments', '_signature', '__weakref__') + + def __init__(self, signature, arguments): + self.arguments = arguments + self._signature = signature + + @property + def signature(self): + return self._signature + + @property + def args(self): + args = [] + for param_name, param in self._signature.parameters.items(): + if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + break + + try: + arg = self.arguments[param_name] + except KeyError: + # We're done here. Other arguments + # will be mapped in 'BoundArguments.kwargs' + break + else: + if param.kind == _VAR_POSITIONAL: + # *args + args.extend(arg) + else: + # plain argument + args.append(arg) + + return tuple(args) + + @property + def kwargs(self): + kwargs = {} + kwargs_started = False + for param_name, param in self._signature.parameters.items(): + if not kwargs_started: + if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + kwargs_started = True + else: + if param_name not in self.arguments: + kwargs_started = True + continue + + if not kwargs_started: + continue + + try: + arg = self.arguments[param_name] + except KeyError: + pass + else: + if param.kind == _VAR_KEYWORD: + # **kwargs + kwargs.update(arg) + else: + # plain keyword argument + kwargs[param_name] = arg + + return kwargs + + def apply_defaults(self): + """Set default values for missing arguments. + + For variable-positional arguments (*args) the default is an + empty tuple. + + For variable-keyword arguments (**kwargs) the default is an + empty dict. + """ + arguments = self.arguments + new_arguments = [] + for name, param in self._signature.parameters.items(): + try: + new_arguments.append((name, arguments[name])) + except KeyError: + if param.default is not _empty: + val = param.default + elif param.kind is _VAR_POSITIONAL: + val = () + elif param.kind is _VAR_KEYWORD: + val = {} + else: + # This BoundArguments was likely produced by + # Signature.bind_partial(). + continue + new_arguments.append((name, val)) + self.arguments = dict(new_arguments) + + def __eq__(self, other): + if self is other: + return True + if not isinstance(other, BoundArguments): + return NotImplemented + return (self.signature == other.signature and + self.arguments == other.arguments) + + def __setstate__(self, state): + self._signature = state['_signature'] + self.arguments = state['arguments'] + + def __getstate__(self): + return {'_signature': self._signature, 'arguments': self.arguments} + + def __repr__(self): + args = [] + for arg, value in self.arguments.items(): + args.append('{}={!r}'.format(arg, value)) + return '<{} ({})>'.format(self.__class__.__name__, ', '.join(args)) + + +class Signature: + """A Signature object represents the overall signature of a function. + It stores a Parameter object for each parameter accepted by the + function, as well as information specific to the function itself. + + A Signature object has the following public attributes and methods: + + * parameters : OrderedDict + An ordered mapping of parameters' names to the corresponding + Parameter objects (keyword-only arguments are in the same order + as listed in `code.co_varnames`). + * return_annotation : object + The annotation for the return type of the function if specified. + If the function has no annotation for its return type, this + attribute is set to `Signature.empty`. + * bind(*args, **kwargs) -> BoundArguments + Creates a mapping from positional and keyword arguments to + parameters. + * bind_partial(*args, **kwargs) -> BoundArguments + Creates a partial mapping from positional and keyword arguments + to parameters (simulating 'functools.partial' behavior.) + """ + + __slots__ = ('_return_annotation', '_parameters') + + _parameter_cls = Parameter + _bound_arguments_cls = BoundArguments + + empty = _empty + + def __init__(self, parameters=None, *, return_annotation=_empty, + __validate_parameters__=True): + """Constructs Signature from the given list of Parameter + objects and 'return_annotation'. All arguments are optional. + """ + + if parameters is None: + params = OrderedDict() + else: + if __validate_parameters__: + params = OrderedDict() + top_kind = _POSITIONAL_ONLY + seen_default = False + seen_var_parameters = set() + + for param in parameters: + kind = param.kind + name = param.name + + if kind in (_VAR_POSITIONAL, _VAR_KEYWORD): + if kind in seen_var_parameters: + msg = f'more than one {kind.description} parameter' + raise ValueError(msg) + + seen_var_parameters.add(kind) + + if kind < top_kind: + msg = ( + 'wrong parameter order: {} parameter before {} ' + 'parameter' + ) + msg = msg.format(top_kind.description, + kind.description) + raise ValueError(msg) + elif kind > top_kind: + top_kind = kind + + if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD): + if param.default is _empty: + if seen_default: + # No default for this parameter, but the + # previous parameter of had a default + msg = 'non-default argument follows default ' \ + 'argument' + raise ValueError(msg) + else: + # There is a default for this parameter. + seen_default = True + + if name in params: + msg = 'duplicate parameter name: {!r}'.format(name) + raise ValueError(msg) + + params[name] = param + else: + params = OrderedDict((param.name, param) for param in parameters) + + self._parameters = types.MappingProxyType(params) + self._return_annotation = return_annotation + + @classmethod + def from_callable(cls, obj, *, + follow_wrapped=True, globals=None, locals=None, eval_str=False, + annotation_format=Format.VALUE): + """Constructs Signature for the given callable object.""" + return _signature_from_callable(obj, sigcls=cls, + follow_wrapper_chains=follow_wrapped, + globals=globals, locals=locals, eval_str=eval_str, + annotation_format=annotation_format) + + @property + def parameters(self): + return self._parameters + + @property + def return_annotation(self): + return self._return_annotation + + def replace(self, *, parameters=_void, return_annotation=_void): + """Creates a customized copy of the Signature. + Pass 'parameters' and/or 'return_annotation' arguments + to override them in the new copy. + """ + + if parameters is _void: + parameters = self.parameters.values() + + if return_annotation is _void: + return_annotation = self._return_annotation + + return type(self)(parameters, + return_annotation=return_annotation) + + __replace__ = replace + + def _hash_basis(self): + params = tuple(param for param in self.parameters.values() + if param.kind != _KEYWORD_ONLY) + + kwo_params = {param.name: param for param in self.parameters.values() + if param.kind == _KEYWORD_ONLY} + + return params, kwo_params, self.return_annotation + + def __hash__(self): + params, kwo_params, return_annotation = self._hash_basis() + kwo_params = frozenset(kwo_params.values()) + return hash((params, kwo_params, return_annotation)) + + def __eq__(self, other): + if self is other: + return True + if not isinstance(other, Signature): + return NotImplemented + return self._hash_basis() == other._hash_basis() + + def _bind(self, args, kwargs, *, partial=False): + """Private method. Don't use directly.""" + + arguments = {} + + parameters = iter(self.parameters.values()) + parameters_ex = () + arg_vals = iter(args) + + pos_only_param_in_kwargs = [] + + while True: + # Let's iterate through the positional arguments and corresponding + # parameters + try: + arg_val = next(arg_vals) + except StopIteration: + # No more positional arguments + try: + param = next(parameters) + except StopIteration: + # No more parameters. That's it. Just need to check that + # we have no `kwargs` after this while loop + break + else: + if param.kind == _VAR_POSITIONAL: + # That's OK, just empty *args. Let's start parsing + # kwargs + break + elif param.name in kwargs: + if param.kind == _POSITIONAL_ONLY: + if param.default is _empty: + msg = f'missing a required positional-only argument: {param.name!r}' + raise TypeError(msg) + # Raise a TypeError once we are sure there is no + # **kwargs param later. + pos_only_param_in_kwargs.append(param) + continue + parameters_ex = (param,) + break + elif (param.kind == _VAR_KEYWORD or + param.default is not _empty): + # That's fine too - we have a default value for this + # parameter. So, lets start parsing `kwargs`, starting + # with the current parameter + parameters_ex = (param,) + break + else: + # No default, not VAR_KEYWORD, not VAR_POSITIONAL, + # not in `kwargs` + if partial: + parameters_ex = (param,) + break + else: + if param.kind == _KEYWORD_ONLY: + argtype = ' keyword-only' + else: + argtype = '' + msg = 'missing a required{argtype} argument: {arg!r}' + msg = msg.format(arg=param.name, argtype=argtype) + raise TypeError(msg) from None + else: + # We have a positional argument to process + try: + param = next(parameters) + except StopIteration: + raise TypeError('too many positional arguments') from None + else: + if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + # Looks like we have no parameter for this positional + # argument + raise TypeError( + 'too many positional arguments') from None + + if param.kind == _VAR_POSITIONAL: + # We have an '*args'-like argument, let's fill it with + # all positional arguments we have left and move on to + # the next phase + values = [arg_val] + values.extend(arg_vals) + arguments[param.name] = tuple(values) + break + + if param.name in kwargs and param.kind != _POSITIONAL_ONLY: + raise TypeError( + 'multiple values for argument {arg!r}'.format( + arg=param.name)) from None + + arguments[param.name] = arg_val + + # Now, we iterate through the remaining parameters to process + # keyword arguments + kwargs_param = None + for param in itertools.chain(parameters_ex, parameters): + if param.kind == _VAR_KEYWORD: + # Memorize that we have a '**kwargs'-like parameter + kwargs_param = param + continue + + if param.kind == _VAR_POSITIONAL: + # Named arguments don't refer to '*args'-like parameters. + # We only arrive here if the positional arguments ended + # before reaching the last parameter before *args. + continue + + param_name = param.name + try: + arg_val = kwargs.pop(param_name) + except KeyError: + # We have no value for this parameter. It's fine though, + # if it has a default value, or it is an '*args'-like + # parameter, left alone by the processing of positional + # arguments. + if (not partial and param.kind != _VAR_POSITIONAL and + param.default is _empty): + raise TypeError('missing a required argument: {arg!r}'. \ + format(arg=param_name)) from None + + else: + arguments[param_name] = arg_val + + if kwargs: + if kwargs_param is not None: + # Process our '**kwargs'-like parameter + arguments[kwargs_param.name] = kwargs + elif pos_only_param_in_kwargs: + raise TypeError( + 'got some positional-only arguments passed as ' + 'keyword arguments: {arg!r}'.format( + arg=', '.join( + param.name + for param in pos_only_param_in_kwargs + ), + ), + ) + else: + raise TypeError( + 'got an unexpected keyword argument {arg!r}'.format( + arg=next(iter(kwargs)))) + + return self._bound_arguments_cls(self, arguments) + + def bind(self, /, *args, **kwargs): + """Get a BoundArguments object, that maps the passed `args` + and `kwargs` to the function's signature. Raises `TypeError` + if the passed arguments can not be bound. + """ + return self._bind(args, kwargs) + + def bind_partial(self, /, *args, **kwargs): + """Get a BoundArguments object, that partially maps the + passed `args` and `kwargs` to the function's signature. + Raises `TypeError` if the passed arguments can not be bound. + """ + return self._bind(args, kwargs, partial=True) + + def __reduce__(self): + return (type(self), + (tuple(self._parameters.values()),), + {'_return_annotation': self._return_annotation}) + + def __setstate__(self, state): + self._return_annotation = state['_return_annotation'] + + def __repr__(self): + return '<{} {}>'.format(self.__class__.__name__, self) + + def __str__(self): + return self.format() + + def format(self, *, max_width=None, quote_annotation_strings=True): + """Create a string representation of the Signature object. + + If *max_width* integer is passed, + signature will try to fit into the *max_width*. + If signature is longer than *max_width*, + all parameters will be on separate lines. + + If *quote_annotation_strings* is False, annotations + in the signature are displayed without opening and closing quotation + marks. This is useful when the signature was created with the + STRING format or when ``from __future__ import annotations`` was used. + """ + result = [] + render_pos_only_separator = False + render_kw_only_separator = True + for param in self.parameters.values(): + formatted = param._format(quote_annotation_strings=quote_annotation_strings) + + kind = param.kind + + if kind == _POSITIONAL_ONLY: + render_pos_only_separator = True + elif render_pos_only_separator: + # It's not a positional-only parameter, and the flag + # is set to 'True' (there were pos-only params before.) + result.append('/') + render_pos_only_separator = False + + if kind == _VAR_POSITIONAL: + # OK, we have an '*args'-like parameter, so we won't need + # a '*' to separate keyword-only arguments + render_kw_only_separator = False + elif kind == _KEYWORD_ONLY and render_kw_only_separator: + # We have a keyword-only parameter to render and we haven't + # rendered an '*args'-like parameter before, so add a '*' + # separator to the parameters list ("foo(arg1, *, arg2)" case) + result.append('*') + # This condition should be only triggered once, so + # reset the flag + render_kw_only_separator = False + + result.append(formatted) + + if render_pos_only_separator: + # There were only positional-only parameters, hence the + # flag was not reset to 'False' + result.append('/') + + rendered = '({})'.format(', '.join(result)) + if max_width is not None and len(rendered) > max_width: + rendered = '(\n {}\n)'.format(',\n '.join(result)) + + if self.return_annotation is not _empty: + anno = formatannotation(self.return_annotation, + quote_annotation_strings=quote_annotation_strings) + rendered += ' -> {}'.format(anno) + + return rendered + + +def signature(obj, *, follow_wrapped=True, globals=None, locals=None, eval_str=False, + annotation_format=Format.VALUE): + """Get a signature object for the passed callable.""" + return Signature.from_callable(obj, follow_wrapped=follow_wrapped, + globals=globals, locals=locals, eval_str=eval_str, + annotation_format=annotation_format) + + +class BufferFlags(enum.IntFlag): + SIMPLE = 0x0 + WRITABLE = 0x1 + FORMAT = 0x4 + ND = 0x8 + STRIDES = 0x10 | ND + C_CONTIGUOUS = 0x20 | STRIDES + F_CONTIGUOUS = 0x40 | STRIDES + ANY_CONTIGUOUS = 0x80 | STRIDES + INDIRECT = 0x100 | STRIDES + CONTIG = ND | WRITABLE + CONTIG_RO = ND + STRIDED = STRIDES | WRITABLE + STRIDED_RO = STRIDES + RECORDS = STRIDES | WRITABLE | FORMAT + RECORDS_RO = STRIDES | FORMAT + FULL = INDIRECT | WRITABLE | FORMAT + FULL_RO = INDIRECT | FORMAT + READ = 0x100 + WRITE = 0x200 + + +def _main(): + """ Logic for inspecting an object given at command line """ + import argparse + import importlib + + parser = argparse.ArgumentParser(color=True) + parser.add_argument( + 'object', + help="The object to be analysed. " + "It supports the 'module:qualname' syntax") + parser.add_argument( + '-d', '--details', action='store_true', + help='Display info about the module rather than its source code') + + args = parser.parse_args() + + target = args.object + mod_name, has_attrs, attrs = target.partition(":") + try: + obj = module = importlib.import_module(mod_name) + except Exception as exc: + msg = "Failed to import {} ({}: {})".format(mod_name, + type(exc).__name__, + exc) + print(msg, file=sys.stderr) + sys.exit(2) + + if has_attrs: + parts = attrs.split(".") + obj = module + for part in parts: + obj = getattr(obj, part) + + if module.__name__ in sys.builtin_module_names: + print("Can't get info for builtin modules.", file=sys.stderr) + sys.exit(1) + + if args.details: + print('Target: {}'.format(target)) + print('Origin: {}'.format(getsourcefile(module))) + print('Cached: {}'.format(module.__cached__)) + if obj is module: + print('Loader: {}'.format(repr(module.__loader__))) + if hasattr(module, '__path__'): + print('Submodule search path: {}'.format(module.__path__)) + else: + try: + __, lineno = findsource(obj) + except Exception: + pass + else: + print('Line: {}'.format(lineno)) + + print('\n') + else: + print(getsource(obj)) + + +if __name__ == "__main__": + _main() diff --git a/Python314_4_x86_Template/Lib/io.py b/Python314_4_x86_Template/Lib/io.py new file mode 100644 index 00000000..63ffadb1 --- /dev/null +++ b/Python314_4_x86_Template/Lib/io.py @@ -0,0 +1,150 @@ +"""The io module provides the Python interfaces to stream handling. The +builtin open function is defined in this module. + +At the top of the I/O hierarchy is the abstract base class IOBase. It +defines the basic interface to a stream. Note, however, that there is no +separation between reading and writing to streams; implementations are +allowed to raise an OSError if they do not support a given operation. + +Extending IOBase is RawIOBase which deals simply with the reading and +writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide +an interface to OS files. + +BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its +subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer +streams that are readable, writable, and both respectively. +BufferedRandom provides a buffered interface to random access +streams. BytesIO is a simple stream of in-memory bytes. + +Another IOBase subclass, TextIOBase, deals with the encoding and decoding +of streams into text. TextIOWrapper, which extends it, is a buffered text +interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO +is an in-memory stream for text. + +Argument names are not part of the specification, and only the arguments +of open() are intended to be used as keyword arguments. + +data: + +DEFAULT_BUFFER_SIZE + + An int containing the default buffer size used by the module's buffered + I/O classes. open() uses the file's blksize (as obtained by os.stat) if + possible. +""" +# New I/O library conforming to PEP 3116. + +__author__ = ("Guido van Rossum , " + "Mike Verdone , " + "Mark Russell , " + "Antoine Pitrou , " + "Amaury Forgeot d'Arc , " + "Benjamin Peterson ") + +__all__ = ["BlockingIOError", "open", "open_code", "IOBase", "RawIOBase", + "FileIO", "BytesIO", "StringIO", "BufferedIOBase", + "BufferedReader", "BufferedWriter", "BufferedRWPair", + "BufferedRandom", "TextIOBase", "TextIOWrapper", + "UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END", + "DEFAULT_BUFFER_SIZE", "text_encoding", "IncrementalNewlineDecoder", + "Reader", "Writer"] + + +import _io +import abc + +from _collections_abc import _check_methods +from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation, + open, open_code, FileIO, BytesIO, StringIO, BufferedReader, + BufferedWriter, BufferedRWPair, BufferedRandom, + IncrementalNewlineDecoder, text_encoding, TextIOWrapper) + + +# for seek() +SEEK_SET = 0 +SEEK_CUR = 1 +SEEK_END = 2 + +# Declaring ABCs in C is tricky so we do it here. +# Method descriptions and default implementations are inherited from the C +# version however. +class IOBase(_io._IOBase, metaclass=abc.ABCMeta): + __doc__ = _io._IOBase.__doc__ + +class RawIOBase(_io._RawIOBase, IOBase): + __doc__ = _io._RawIOBase.__doc__ + +class BufferedIOBase(_io._BufferedIOBase, IOBase): + __doc__ = _io._BufferedIOBase.__doc__ + +class TextIOBase(_io._TextIOBase, IOBase): + __doc__ = _io._TextIOBase.__doc__ + +RawIOBase.register(FileIO) + +for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom, + BufferedRWPair): + BufferedIOBase.register(klass) + +for klass in (StringIO, TextIOWrapper): + TextIOBase.register(klass) +del klass + +try: + from _io import _WindowsConsoleIO +except ImportError: + pass +else: + RawIOBase.register(_WindowsConsoleIO) + +# +# Static Typing Support +# + +GenericAlias = type(list[int]) + + +class Reader(metaclass=abc.ABCMeta): + """Protocol for simple I/O reader instances. + + This protocol only supports blocking I/O. + """ + + __slots__ = () + + @abc.abstractmethod + def read(self, size=..., /): + """Read data from the input stream and return it. + + If *size* is specified, at most *size* items (bytes/characters) will be + read. + """ + + @classmethod + def __subclasshook__(cls, C): + if cls is Reader: + return _check_methods(C, "read") + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) + + +class Writer(metaclass=abc.ABCMeta): + """Protocol for simple I/O writer instances. + + This protocol only supports blocking I/O. + """ + + __slots__ = () + + @abc.abstractmethod + def write(self, data, /): + """Write *data* to the output stream and return the number of items written.""" + + @classmethod + def __subclasshook__(cls, C): + if cls is Writer: + return _check_methods(C, "write") + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) diff --git a/Python314_4_x86_Template/Lib/ipaddress.py b/Python314_4_x86_Template/Lib/ipaddress.py new file mode 100644 index 00000000..ca732e4f --- /dev/null +++ b/Python314_4_x86_Template/Lib/ipaddress.py @@ -0,0 +1,2417 @@ +# Copyright 2007 Google Inc. +# Licensed to PSF under a Contributor Agreement. + +"""A fast, lightweight IPv4/IPv6 manipulation library in Python. + +This library is used to create/poke/manipulate IPv4 and IPv6 addresses +and networks. + +""" + +__version__ = '1.0' + + +import functools + +IPV4LENGTH = 32 +IPV6LENGTH = 128 + + +class AddressValueError(ValueError): + """A Value Error related to the address.""" + + +class NetmaskValueError(ValueError): + """A Value Error related to the netmask.""" + + +def ip_address(address): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP address. Either IPv4 or + IPv6 addresses may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Address or IPv6Address object. + + Raises: + ValueError: if the *address* passed isn't either a v4 or a v6 + address + + """ + try: + return IPv4Address(address) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Address(address) + except (AddressValueError, NetmaskValueError): + pass + + raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 address') + + +def ip_network(address, strict=True): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP network. Either IPv4 or + IPv6 networks may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Network or IPv6Network object. + + Raises: + ValueError: if the string passed isn't either a v4 or a v6 + address. Or if the network has host bits set. + + """ + try: + return IPv4Network(address, strict) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Network(address, strict) + except (AddressValueError, NetmaskValueError): + pass + + raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 network') + + +def ip_interface(address): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP address. Either IPv4 or + IPv6 addresses may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Interface or IPv6Interface object. + + Raises: + ValueError: if the string passed isn't either a v4 or a v6 + address. + + Notes: + The IPv?Interface classes describe an Address on a particular + Network, so they're basically a combination of both the Address + and Network classes. + + """ + try: + return IPv4Interface(address) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Interface(address) + except (AddressValueError, NetmaskValueError): + pass + + raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 interface') + + +def v4_int_to_packed(address): + """Represent an address as 4 packed bytes in network (big-endian) order. + + Args: + address: An integer representation of an IPv4 IP address. + + Returns: + The integer address packed as 4 bytes in network (big-endian) order. + + Raises: + ValueError: If the integer is negative or too large to be an + IPv4 IP address. + + """ + try: + return address.to_bytes(4) # big endian + except OverflowError: + raise ValueError("Address negative or too large for IPv4") + + +def v6_int_to_packed(address): + """Represent an address as 16 packed bytes in network (big-endian) order. + + Args: + address: An integer representation of an IPv6 IP address. + + Returns: + The integer address packed as 16 bytes in network (big-endian) order. + + """ + try: + return address.to_bytes(16) # big endian + except OverflowError: + raise ValueError("Address negative or too large for IPv6") + + +def _split_optional_netmask(address): + """Helper to split the netmask and raise AddressValueError if needed""" + addr = str(address).split('/') + if len(addr) > 2: + raise AddressValueError(f"Only one '/' permitted in {address!r}") + return addr + + +def _find_address_range(addresses): + """Find a sequence of sorted deduplicated IPv#Address. + + Args: + addresses: a list of IPv#Address objects. + + Yields: + A tuple containing the first and last IP addresses in the sequence. + + """ + it = iter(addresses) + first = last = next(it) + for ip in it: + if ip._ip != last._ip + 1: + yield first, last + first = ip + last = ip + yield first, last + + +def _count_righthand_zero_bits(number, bits): + """Count the number of zero bits on the right hand side. + + Args: + number: an integer. + bits: maximum number of bits to count. + + Returns: + The number of zero bits on the right hand side of the number. + + """ + if number == 0: + return bits + return min(bits, (~number & (number-1)).bit_length()) + + +def summarize_address_range(first, last): + """Summarize a network range given the first and last IP addresses. + + Example: + >>> list(summarize_address_range(IPv4Address('192.0.2.0'), + ... IPv4Address('192.0.2.130'))) + ... #doctest: +NORMALIZE_WHITESPACE + [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), + IPv4Network('192.0.2.130/32')] + + Args: + first: the first IPv4Address or IPv6Address in the range. + last: the last IPv4Address or IPv6Address in the range. + + Returns: + An iterator of the summarized IPv(4|6) network objects. + + Raise: + TypeError: + If the first and last objects are not IP addresses. + If the first and last objects are not the same version. + ValueError: + If the last object is not greater than the first. + If the version of the first address is not 4 or 6. + + """ + if (not (isinstance(first, _BaseAddress) and + isinstance(last, _BaseAddress))): + raise TypeError('first and last must be IP addresses, not networks') + if first.version != last.version: + raise TypeError("%s and %s are not of the same version" % ( + first, last)) + if first > last: + raise ValueError('last IP address must be greater than first') + + if first.version == 4: + ip = IPv4Network + elif first.version == 6: + ip = IPv6Network + else: + raise ValueError('unknown IP version') + + ip_bits = first.max_prefixlen + first_int = first._ip + last_int = last._ip + while first_int <= last_int: + nbits = min(_count_righthand_zero_bits(first_int, ip_bits), + (last_int - first_int + 1).bit_length() - 1) + net = ip((first_int, ip_bits - nbits)) + yield net + first_int += 1 << nbits + if first_int - 1 == ip._ALL_ONES: + break + + +def _collapse_addresses_internal(addresses): + """Loops through the addresses, collapsing concurrent netblocks. + + Example: + + ip1 = IPv4Network('192.0.2.0/26') + ip2 = IPv4Network('192.0.2.64/26') + ip3 = IPv4Network('192.0.2.128/26') + ip4 = IPv4Network('192.0.2.192/26') + + _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> + [IPv4Network('192.0.2.0/24')] + + This shouldn't be called directly; it is called via + collapse_addresses([]). + + Args: + addresses: A list of IPv4Network's or IPv6Network's + + Returns: + A list of IPv4Network's or IPv6Network's depending on what we were + passed. + + """ + # First merge + to_merge = list(addresses) + subnets = {} + while to_merge: + net = to_merge.pop() + supernet = net.supernet() + existing = subnets.get(supernet) + if existing is None: + subnets[supernet] = net + elif existing != net: + # Merge consecutive subnets + del subnets[supernet] + to_merge.append(supernet) + # Then iterate over resulting networks, skipping subsumed subnets + last = None + for net in sorted(subnets.values()): + if last is not None: + # Since they are sorted, last.network_address <= net.network_address + # is a given. + if last.broadcast_address >= net.broadcast_address: + continue + yield net + last = net + + +def collapse_addresses(addresses): + """Collapse a list of IP objects. + + Example: + collapse_addresses([IPv4Network('192.0.2.0/25'), + IPv4Network('192.0.2.128/25')]) -> + [IPv4Network('192.0.2.0/24')] + + Args: + addresses: An iterable of IPv4Network or IPv6Network objects. + + Returns: + An iterator of the collapsed IPv(4|6)Network objects. + + Raises: + TypeError: If passed a list of mixed version objects. + + """ + addrs = [] + ips = [] + nets = [] + + # split IP addresses and networks + for ip in addresses: + if isinstance(ip, _BaseAddress): + if ips and ips[-1].version != ip.version: + raise TypeError("%s and %s are not of the same version" % ( + ip, ips[-1])) + ips.append(ip) + elif ip._prefixlen == ip.max_prefixlen: + if ips and ips[-1].version != ip.version: + raise TypeError("%s and %s are not of the same version" % ( + ip, ips[-1])) + try: + ips.append(ip.ip) + except AttributeError: + ips.append(ip.network_address) + else: + if nets and nets[-1].version != ip.version: + raise TypeError("%s and %s are not of the same version" % ( + ip, nets[-1])) + nets.append(ip) + + # sort and dedup + ips = sorted(set(ips)) + + # find consecutive address ranges in the sorted sequence and summarize them + if ips: + for first, last in _find_address_range(ips): + addrs.extend(summarize_address_range(first, last)) + + return _collapse_addresses_internal(addrs + nets) + + +def get_mixed_type_key(obj): + """Return a key suitable for sorting between networks and addresses. + + Address and Network objects are not sortable by default; they're + fundamentally different so the expression + + IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') + + doesn't make any sense. There are some times however, where you may wish + to have ipaddress sort these for you anyway. If you need to do this, you + can use this function as the key= argument to sorted(). + + Args: + obj: either a Network or Address object. + Returns: + appropriate key. + + """ + if isinstance(obj, _BaseNetwork): + return obj._get_networks_key() + elif isinstance(obj, _BaseAddress): + return obj._get_address_key() + return NotImplemented + + +class _IPAddressBase: + + """The mother class.""" + + __slots__ = () + + @property + def exploded(self): + """Return the longhand version of the IP address as a string.""" + return self._explode_shorthand_ip_string() + + @property + def compressed(self): + """Return the shorthand version of the IP address as a string.""" + return str(self) + + @property + def reverse_pointer(self): + """The name of the reverse DNS pointer for the IP address, e.g.: + >>> ipaddress.ip_address("127.0.0.1").reverse_pointer + '1.0.0.127.in-addr.arpa' + >>> ipaddress.ip_address("2001:db8::1").reverse_pointer + '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' + + """ + return self._reverse_pointer() + + def _check_int_address(self, address): + if address < 0: + msg = "%d (< 0) is not permitted as an IPv%d address" + raise AddressValueError(msg % (address, self.version)) + if address > self._ALL_ONES: + msg = "%d (>= 2**%d) is not permitted as an IPv%d address" + raise AddressValueError(msg % (address, self.max_prefixlen, + self.version)) + + def _check_packed_address(self, address, expected_len): + address_len = len(address) + if address_len != expected_len: + msg = "%r (len %d != %d) is not permitted as an IPv%d address" + raise AddressValueError(msg % (address, address_len, + expected_len, self.version)) + + @classmethod + def _ip_int_from_prefix(cls, prefixlen): + """Turn the prefix length into a bitwise netmask + + Args: + prefixlen: An integer, the prefix length. + + Returns: + An integer. + + """ + return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen) + + @classmethod + def _prefix_from_ip_int(cls, ip_int): + """Return prefix length from the bitwise netmask. + + Args: + ip_int: An integer, the netmask in expanded bitwise format + + Returns: + An integer, the prefix length. + + Raises: + ValueError: If the input intermingles zeroes & ones + """ + trailing_zeroes = _count_righthand_zero_bits(ip_int, + cls.max_prefixlen) + prefixlen = cls.max_prefixlen - trailing_zeroes + leading_ones = ip_int >> trailing_zeroes + all_ones = (1 << prefixlen) - 1 + if leading_ones != all_ones: + byteslen = cls.max_prefixlen // 8 + details = ip_int.to_bytes(byteslen, 'big') + msg = 'Netmask pattern %r mixes zeroes & ones' + raise ValueError(msg % details) + return prefixlen + + @classmethod + def _report_invalid_netmask(cls, netmask_str): + msg = '%r is not a valid netmask' % netmask_str + raise NetmaskValueError(msg) from None + + @classmethod + def _prefix_from_prefix_string(cls, prefixlen_str): + """Return prefix length from a numeric string + + Args: + prefixlen_str: The string to be converted + + Returns: + An integer, the prefix length. + + Raises: + NetmaskValueError: If the input is not a valid netmask + """ + # int allows a leading +/- as well as surrounding whitespace, + # so we ensure that isn't the case + if not (prefixlen_str.isascii() and prefixlen_str.isdigit()): + cls._report_invalid_netmask(prefixlen_str) + try: + prefixlen = int(prefixlen_str) + except ValueError: + cls._report_invalid_netmask(prefixlen_str) + if not (0 <= prefixlen <= cls.max_prefixlen): + cls._report_invalid_netmask(prefixlen_str) + return prefixlen + + @classmethod + def _prefix_from_ip_string(cls, ip_str): + """Turn a netmask/hostmask string into a prefix length + + Args: + ip_str: The netmask/hostmask to be converted + + Returns: + An integer, the prefix length. + + Raises: + NetmaskValueError: If the input is not a valid netmask/hostmask + """ + # Parse the netmask/hostmask like an IP address. + try: + ip_int = cls._ip_int_from_string(ip_str) + except AddressValueError: + cls._report_invalid_netmask(ip_str) + + # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). + # Note that the two ambiguous cases (all-ones and all-zeroes) are + # treated as netmasks. + try: + return cls._prefix_from_ip_int(ip_int) + except ValueError: + pass + + # Invert the bits, and try matching a /0+1+/ hostmask instead. + ip_int ^= cls._ALL_ONES + try: + return cls._prefix_from_ip_int(ip_int) + except ValueError: + cls._report_invalid_netmask(ip_str) + + @classmethod + def _split_addr_prefix(cls, address): + """Helper function to parse address of Network/Interface. + + Arg: + address: Argument of Network/Interface. + + Returns: + (addr, prefix) tuple. + """ + # a packed address or integer + if isinstance(address, (bytes, int)): + return address, cls.max_prefixlen + + if not isinstance(address, tuple): + # Assume input argument to be string or any object representation + # which converts into a formatted IP prefix string. + address = _split_optional_netmask(address) + + # Constructing from a tuple (addr, [mask]) + if len(address) > 1: + return address + return address[0], cls.max_prefixlen + + def __reduce__(self): + return self.__class__, (str(self),) + + +_address_fmt_re = None + +@functools.total_ordering +class _BaseAddress(_IPAddressBase): + + """A generic IP object. + + This IP class contains the version independent methods which are + used by single IP addresses. + """ + + __slots__ = () + + def __int__(self): + return self._ip + + def __eq__(self, other): + try: + return (self._ip == other._ip + and self.version == other.version) + except AttributeError: + return NotImplemented + + def __lt__(self, other): + if not isinstance(other, _BaseAddress): + return NotImplemented + if self.version != other.version: + raise TypeError('%s and %s are not of the same version' % ( + self, other)) + if self._ip != other._ip: + return self._ip < other._ip + return False + + # Shorthand for Integer addition and subtraction. This is not + # meant to ever support addition/subtraction of addresses. + def __add__(self, other): + if not isinstance(other, int): + return NotImplemented + return self.__class__(int(self) + other) + + def __sub__(self, other): + if not isinstance(other, int): + return NotImplemented + return self.__class__(int(self) - other) + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, str(self)) + + def __str__(self): + return str(self._string_from_ip_int(self._ip)) + + def __hash__(self): + return hash(hex(int(self._ip))) + + def _get_address_key(self): + return (self.version, self) + + def __reduce__(self): + return self.__class__, (self._ip,) + + def __format__(self, fmt): + """Returns an IP address as a formatted string. + + Supported presentation types are: + 's': returns the IP address as a string (default) + 'b': converts to binary and returns a zero-padded string + 'X' or 'x': converts to upper- or lower-case hex and returns a zero-padded string + 'n': the same as 'b' for IPv4 and 'x' for IPv6 + + For binary and hex presentation types, the alternate form specifier + '#' and the grouping option '_' are supported. + """ + + # Support string formatting + if not fmt or fmt[-1] == 's': + return format(str(self), fmt) + + # From here on down, support for 'bnXx' + global _address_fmt_re + if _address_fmt_re is None: + import re + _address_fmt_re = re.compile('(#?)(_?)([xbnX])') + + m = _address_fmt_re.fullmatch(fmt) + if not m: + return super().__format__(fmt) + + alternate, grouping, fmt_base = m.groups() + + # Set some defaults + if fmt_base == 'n': + if self.version == 4: + fmt_base = 'b' # Binary is default for ipv4 + else: + fmt_base = 'x' # Hex is default for ipv6 + + if fmt_base == 'b': + padlen = self.max_prefixlen + else: + padlen = self.max_prefixlen // 4 + + if grouping: + padlen += padlen // 4 - 1 + + if alternate: + padlen += 2 # 0b or 0x + + return format(int(self), f'{alternate}0{padlen}{grouping}{fmt_base}') + + +@functools.total_ordering +class _BaseNetwork(_IPAddressBase): + """A generic IP network object. + + This IP class contains the version independent methods which are + used by networks. + """ + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, str(self)) + + def __str__(self): + return '%s/%d' % (self.network_address, self.prefixlen) + + def hosts(self): + """Generate Iterator over usable hosts in a network. + + This is like __iter__ except it doesn't return the network + or broadcast addresses. + + """ + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in range(network + 1, broadcast): + yield self._address_class(x) + + def __iter__(self): + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in range(network, broadcast + 1): + yield self._address_class(x) + + def __getitem__(self, n): + network = int(self.network_address) + broadcast = int(self.broadcast_address) + if n >= 0: + if network + n > broadcast: + raise IndexError('address out of range') + return self._address_class(network + n) + else: + n += 1 + if broadcast + n < network: + raise IndexError('address out of range') + return self._address_class(broadcast + n) + + def __lt__(self, other): + if not isinstance(other, _BaseNetwork): + return NotImplemented + if self.version != other.version: + raise TypeError('%s and %s are not of the same version' % ( + self, other)) + if self.network_address != other.network_address: + return self.network_address < other.network_address + if self.netmask != other.netmask: + return self.netmask < other.netmask + return False + + def __eq__(self, other): + try: + return (self.version == other.version and + self.network_address == other.network_address and + int(self.netmask) == int(other.netmask)) + except AttributeError: + return NotImplemented + + def __hash__(self): + return hash((int(self.network_address), int(self.netmask))) + + def __contains__(self, other): + # always false if one is v4 and the other is v6. + if self.version != other.version: + return False + # dealing with another network. + if isinstance(other, _BaseNetwork): + return False + # dealing with another address + else: + # address + return other._ip & self.netmask._ip == self.network_address._ip + + def overlaps(self, other): + """Tell if self is partly contained in other.""" + return self.network_address in other or ( + self.broadcast_address in other or ( + other.network_address in self or ( + other.broadcast_address in self))) + + @functools.cached_property + def broadcast_address(self): + return self._address_class(int(self.network_address) | + int(self.hostmask)) + + @functools.cached_property + def hostmask(self): + return self._address_class(int(self.netmask) ^ self._ALL_ONES) + + @property + def with_prefixlen(self): + return '%s/%d' % (self.network_address, self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self.network_address, self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self.network_address, self.hostmask) + + @property + def num_addresses(self): + """Number of hosts in the current subnet.""" + return int(self.broadcast_address) - int(self.network_address) + 1 + + @property + def _address_class(self): + # Returning bare address objects (rather than interfaces) allows for + # more consistent behaviour across the network address, broadcast + # address and individual host addresses. + msg = '%200s has no associated address class' % (type(self),) + raise NotImplementedError(msg) + + @property + def prefixlen(self): + return self._prefixlen + + def address_exclude(self, other): + """Remove an address from a larger block. + + For example: + + addr1 = ip_network('192.0.2.0/28') + addr2 = ip_network('192.0.2.1/32') + list(addr1.address_exclude(addr2)) = + [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), + IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] + + or IPv6: + + addr1 = ip_network('2001:db8::1/32') + addr2 = ip_network('2001:db8::1/128') + list(addr1.address_exclude(addr2)) = + [ip_network('2001:db8::1/128'), + ip_network('2001:db8::2/127'), + ip_network('2001:db8::4/126'), + ip_network('2001:db8::8/125'), + ... + ip_network('2001:db8:8000::/33')] + + Args: + other: An IPv4Network or IPv6Network object of the same type. + + Returns: + An iterator of the IPv(4|6)Network objects which is self + minus other. + + Raises: + TypeError: If self and other are of differing address + versions, or if other is not a network object. + ValueError: If other is not completely contained by self. + + """ + if not self.version == other.version: + raise TypeError("%s and %s are not of the same version" % ( + self, other)) + + if not isinstance(other, _BaseNetwork): + raise TypeError("%s is not a network object" % other) + + if not other.subnet_of(self): + raise ValueError('%s not contained in %s' % (other, self)) + if other == self: + return + + # Make sure we're comparing the network of other. + other = other.__class__('%s/%s' % (other.network_address, + other.prefixlen)) + + s1, s2 = self.subnets() + while s1 != other and s2 != other: + if other.subnet_of(s1): + yield s2 + s1, s2 = s1.subnets() + elif other.subnet_of(s2): + yield s1 + s1, s2 = s2.subnets() + else: + # If we got here, there's a bug somewhere. + raise AssertionError('Error performing exclusion: ' + 's1: %s s2: %s other: %s' % + (s1, s2, other)) + if s1 == other: + yield s2 + elif s2 == other: + yield s1 + else: + # If we got here, there's a bug somewhere. + raise AssertionError('Error performing exclusion: ' + 's1: %s s2: %s other: %s' % + (s1, s2, other)) + + def compare_networks(self, other): + """Compare two IP objects. + + This is only concerned about the comparison of the integer + representation of the network addresses. This means that the + host bits aren't considered at all in this method. If you want + to compare host bits, you can easily enough do a + 'HostA._ip < HostB._ip' + + Args: + other: An IP object. + + Returns: + If the IP versions of self and other are the same, returns: + + -1 if self < other: + eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') + IPv6Network('2001:db8::1000/124') < + IPv6Network('2001:db8::2000/124') + 0 if self == other + eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') + IPv6Network('2001:db8::1000/124') == + IPv6Network('2001:db8::1000/124') + 1 if self > other + eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') + IPv6Network('2001:db8::2000/124') > + IPv6Network('2001:db8::1000/124') + + Raises: + TypeError if the IP versions are different. + + """ + # does this need to raise a ValueError? + if self.version != other.version: + raise TypeError('%s and %s are not of the same type' % ( + self, other)) + # self.version == other.version below here: + if self.network_address < other.network_address: + return -1 + if self.network_address > other.network_address: + return 1 + # self.network_address == other.network_address below here: + if self.netmask < other.netmask: + return -1 + if self.netmask > other.netmask: + return 1 + return 0 + + def _get_networks_key(self): + """Network-only key function. + + Returns an object that identifies this address' network and + netmask. This function is a suitable "key" argument for sorted() + and list.sort(). + + """ + return (self.version, self.network_address, self.netmask) + + def subnets(self, prefixlen_diff=1, new_prefix=None): + """The subnets which join to make the current subnet. + + In the case that self contains only one IP + (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 + for IPv6), yield an iterator with just ourself. + + Args: + prefixlen_diff: An integer, the amount the prefix length + should be increased by. This should not be set if + new_prefix is also set. + new_prefix: The desired new prefix length. This must be a + larger number (smaller prefix) than the existing prefix. + This should not be set if prefixlen_diff is also set. + + Returns: + An iterator of IPv(4|6) objects. + + Raises: + ValueError: The prefixlen_diff is too small or too large. + OR + prefixlen_diff and new_prefix are both set or new_prefix + is a smaller number than the current prefix (smaller + number means a larger network) + + """ + if self._prefixlen == self.max_prefixlen: + yield self + return + + if new_prefix is not None: + if new_prefix < self._prefixlen: + raise ValueError('new prefix must be longer') + if prefixlen_diff != 1: + raise ValueError('cannot set prefixlen_diff and new_prefix') + prefixlen_diff = new_prefix - self._prefixlen + + if prefixlen_diff < 0: + raise ValueError('prefix length diff must be > 0') + new_prefixlen = self._prefixlen + prefixlen_diff + + if new_prefixlen > self.max_prefixlen: + raise ValueError( + 'prefix length diff %d is invalid for netblock %s' % ( + new_prefixlen, self)) + + start = int(self.network_address) + end = int(self.broadcast_address) + 1 + step = (int(self.hostmask) + 1) >> prefixlen_diff + for new_addr in range(start, end, step): + current = self.__class__((new_addr, new_prefixlen)) + yield current + + def supernet(self, prefixlen_diff=1, new_prefix=None): + """The supernet containing the current network. + + Args: + prefixlen_diff: An integer, the amount the prefix length of + the network should be decreased by. For example, given a + /24 network and a prefixlen_diff of 3, a supernet with a + /21 netmask is returned. + + Returns: + An IPv4 network object. + + Raises: + ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have + a negative prefix length. + OR + If prefixlen_diff and new_prefix are both set or new_prefix is a + larger number than the current prefix (larger number means a + smaller network) + + """ + if self._prefixlen == 0: + return self + + if new_prefix is not None: + if new_prefix > self._prefixlen: + raise ValueError('new prefix must be shorter') + if prefixlen_diff != 1: + raise ValueError('cannot set prefixlen_diff and new_prefix') + prefixlen_diff = self._prefixlen - new_prefix + + new_prefixlen = self.prefixlen - prefixlen_diff + if new_prefixlen < 0: + raise ValueError( + 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % + (self.prefixlen, prefixlen_diff)) + return self.__class__(( + int(self.network_address) & (int(self.netmask) << prefixlen_diff), + new_prefixlen + )) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is a multicast address. + See RFC 2373 2.7 for details. + + """ + return (self.network_address.is_multicast and + self.broadcast_address.is_multicast) + + @staticmethod + def _is_subnet_of(a, b): + try: + # Always false if one is v4 and the other is v6. + if a.version != b.version: + raise TypeError(f"{a} and {b} are not of the same version") + return (b.network_address <= a.network_address and + b.broadcast_address >= a.broadcast_address) + except AttributeError: + raise TypeError(f"Unable to test subnet containment " + f"between {a} and {b}") + + def subnet_of(self, other): + """Return True if this network is a subnet of other.""" + return self._is_subnet_of(self, other) + + def supernet_of(self, other): + """Return True if this network is a supernet of other.""" + return self._is_subnet_of(other, self) + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within one of the + reserved IPv6 Network ranges. + + """ + return (self.network_address.is_reserved and + self.broadcast_address.is_reserved) + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is reserved per RFC 4291. + + """ + return (self.network_address.is_link_local and + self.broadcast_address.is_link_local) + + @property + def is_private(self): + """Test if this network belongs to a private range. + + Returns: + A boolean, True if the network is reserved per + iana-ipv4-special-registry or iana-ipv6-special-registry. + + """ + return any(self.network_address in priv_network and + self.broadcast_address in priv_network + for priv_network in self._constants._private_networks) and all( + self.network_address not in network and + self.broadcast_address not in network + for network in self._constants._private_networks_exceptions + ) + + @property + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, True if the address is not reserved per + iana-ipv4-special-registry or iana-ipv6-special-registry. + + """ + return not self.is_private + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 2373 2.5.2. + + """ + return (self.network_address.is_unspecified and + self.broadcast_address.is_unspecified) + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback address as defined in + RFC 2373 2.5.3. + + """ + return (self.network_address.is_loopback and + self.broadcast_address.is_loopback) + + +class _BaseConstants: + + _private_networks = [] + + +_BaseNetwork._constants = _BaseConstants + + +class _BaseV4: + + """Base IPv4 object. + + The following methods are used by IPv4 objects in both single IP + addresses and networks. + + """ + + __slots__ = () + version = 4 + # Equivalent to 255.255.255.255 or 32 bits of 1's. + _ALL_ONES = (2**IPV4LENGTH) - 1 + + max_prefixlen = IPV4LENGTH + # There are only a handful of valid v4 netmasks, so we cache them all + # when constructed (see _make_netmask()). + _netmask_cache = {} + + def _explode_shorthand_ip_string(self): + return str(self) + + @classmethod + def _make_netmask(cls, arg): + """Make a (netmask, prefix_len) tuple from the given argument. + + Argument can be: + - an integer (the prefix length) + - a string representing the prefix length (e.g. "24") + - a string representing the prefix netmask (e.g. "255.255.255.0") + """ + if arg not in cls._netmask_cache: + if isinstance(arg, int): + prefixlen = arg + if not (0 <= prefixlen <= cls.max_prefixlen): + cls._report_invalid_netmask(prefixlen) + else: + try: + # Check for a netmask in prefix length form + prefixlen = cls._prefix_from_prefix_string(arg) + except NetmaskValueError: + # Check for a netmask or hostmask in dotted-quad form. + # This may raise NetmaskValueError. + prefixlen = cls._prefix_from_ip_string(arg) + netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen)) + cls._netmask_cache[arg] = netmask, prefixlen + return cls._netmask_cache[arg] + + @classmethod + def _ip_int_from_string(cls, ip_str): + """Turn the given IP string into an integer for comparison. + + Args: + ip_str: A string, the IP ip_str. + + Returns: + The IP ip_str as an integer. + + Raises: + AddressValueError: if ip_str isn't a valid IPv4 Address. + + """ + if not ip_str: + raise AddressValueError('Address cannot be empty') + + octets = ip_str.split('.') + if len(octets) != 4: + raise AddressValueError("Expected 4 octets in %r" % ip_str) + + try: + return int.from_bytes(map(cls._parse_octet, octets), 'big') + except ValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) from None + + @classmethod + def _parse_octet(cls, octet_str): + """Convert a decimal octet into an integer. + + Args: + octet_str: A string, the number to parse. + + Returns: + The octet as an integer. + + Raises: + ValueError: if the octet isn't strictly a decimal from [0..255]. + + """ + if not octet_str: + raise ValueError("Empty octet not permitted") + # Reject non-ASCII digits. + if not (octet_str.isascii() and octet_str.isdigit()): + msg = "Only decimal digits permitted in %r" + raise ValueError(msg % octet_str) + # We do the length check second, since the invalid character error + # is likely to be more informative for the user + if len(octet_str) > 3: + msg = "At most 3 characters permitted in %r" + raise ValueError(msg % octet_str) + # Handle leading zeros as strict as glibc's inet_pton() + # See security bug bpo-36384 + if octet_str != '0' and octet_str[0] == '0': + msg = "Leading zeros are not permitted in %r" + raise ValueError(msg % octet_str) + # Convert to integer (we know digits are legal) + octet_int = int(octet_str, 10) + if octet_int > 255: + raise ValueError("Octet %d (> 255) not permitted" % octet_int) + return octet_int + + @classmethod + def _string_from_ip_int(cls, ip_int): + """Turns a 32-bit integer into dotted decimal notation. + + Args: + ip_int: An integer, the IP address. + + Returns: + The IP address as a string in dotted decimal notation. + + """ + return '.'.join(map(str, ip_int.to_bytes(4, 'big'))) + + def _reverse_pointer(self): + """Return the reverse DNS pointer name for the IPv4 address. + + This implements the method described in RFC1035 3.5. + + """ + reverse_octets = str(self).split('.')[::-1] + return '.'.join(reverse_octets) + '.in-addr.arpa' + +class IPv4Address(_BaseV4, _BaseAddress): + + """Represent and manipulate single IPv4 Addresses.""" + + __slots__ = ('_ip', '__weakref__') + + def __init__(self, address): + + """ + Args: + address: A string or integer representing the IP + + Additionally, an integer can be passed, so + IPv4Address('192.0.2.1') == IPv4Address(3221225985). + or, more generally + IPv4Address(int(IPv4Address('192.0.2.1'))) == + IPv4Address('192.0.2.1') + + Raises: + AddressValueError: If ipaddress isn't a valid IPv4 address. + + """ + # Efficient constructor from integer. + if isinstance(address, int): + self._check_int_address(address) + self._ip = address + return + + # Constructing from a packed address + if isinstance(address, bytes): + self._check_packed_address(address, 4) + self._ip = int.from_bytes(address) # big endian + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP string. + addr_str = str(address) + if '/' in addr_str: + raise AddressValueError(f"Unexpected '/' in {address!r}") + self._ip = self._ip_int_from_string(addr_str) + + @property + def packed(self): + """The binary representation of this address.""" + return v4_int_to_packed(self._ip) + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within the + reserved IPv4 Network range. + + """ + return self in self._constants._reserved_network + + @property + @functools.lru_cache() + def is_private(self): + """``True`` if the address is defined as not globally reachable by + iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ + (for IPv6) with the following exceptions: + + * ``is_private`` is ``False`` for ``100.64.0.0/10`` + * For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the + semantics of the underlying IPv4 addresses and the following condition holds + (see :attr:`IPv6Address.ipv4_mapped`):: + + address.is_private == address.ipv4_mapped.is_private + + ``is_private`` has value opposite to :attr:`is_global`, except for the ``100.64.0.0/10`` + IPv4 range where they are both ``False``. + """ + return ( + any(self in net for net in self._constants._private_networks) + and all(self not in net for net in self._constants._private_networks_exceptions) + ) + + @property + @functools.lru_cache() + def is_global(self): + """``True`` if the address is defined as globally reachable by + iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ + (for IPv6) with the following exception: + + For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the + semantics of the underlying IPv4 addresses and the following condition holds + (see :attr:`IPv6Address.ipv4_mapped`):: + + address.is_global == address.ipv4_mapped.is_global + + ``is_global`` has value opposite to :attr:`is_private`, except for the ``100.64.0.0/10`` + IPv4 range where they are both ``False``. + """ + return self not in self._constants._public_network and not self.is_private + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is multicast. + See RFC 3171 for details. + + """ + return self in self._constants._multicast_network + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 5735 3. + + """ + return self == self._constants._unspecified_address + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback per RFC 3330. + + """ + return self in self._constants._loopback_network + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is link-local per RFC 3927. + + """ + return self in self._constants._linklocal_network + + @property + def ipv6_mapped(self): + """Return the IPv4-mapped IPv6 address. + + Returns: + The IPv4-mapped IPv6 address per RFC 4291. + + """ + return IPv6Address(f'::ffff:{self}') + + +class IPv4Interface(IPv4Address): + + def __init__(self, address): + addr, mask = self._split_addr_prefix(address) + + IPv4Address.__init__(self, addr) + self.network = IPv4Network((addr, mask), strict=False) + self.netmask = self.network.netmask + self._prefixlen = self.network._prefixlen + + @functools.cached_property + def hostmask(self): + return self.network.hostmask + + def __str__(self): + return '%s/%d' % (self._string_from_ip_int(self._ip), + self._prefixlen) + + def __eq__(self, other): + address_equal = IPv4Address.__eq__(self, other) + if address_equal is NotImplemented or not address_equal: + return address_equal + try: + return self.network == other.network + except AttributeError: + # An interface with an associated network is NOT the + # same as an unassociated address. That's why the hash + # takes the extra info into account. + return False + + def __lt__(self, other): + address_less = IPv4Address.__lt__(self, other) + if address_less is NotImplemented: + return NotImplemented + try: + return (self.network < other.network or + self.network == other.network and address_less) + except AttributeError: + # We *do* allow addresses and interfaces to be sorted. The + # unassociated address is considered less than all interfaces. + return False + + def __hash__(self): + return hash((self._ip, self._prefixlen, int(self.network.network_address))) + + __reduce__ = _IPAddressBase.__reduce__ + + @property + def ip(self): + return IPv4Address(self._ip) + + @property + def with_prefixlen(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.hostmask) + + +class IPv4Network(_BaseV4, _BaseNetwork): + + """This class represents and manipulates 32-bit IPv4 network + addresses.. + + Attributes: [examples for IPv4Network('192.0.2.0/27')] + .network_address: IPv4Address('192.0.2.0') + .hostmask: IPv4Address('0.0.0.31') + .broadcast_address: IPv4Address('192.0.2.32') + .netmask: IPv4Address('255.255.255.224') + .prefixlen: 27 + + """ + # Class to use when creating address objects + _address_class = IPv4Address + + def __init__(self, address, strict=True): + """Instantiate a new IPv4 network object. + + Args: + address: A string or integer representing the IP [& network]. + '192.0.2.0/24' + '192.0.2.0/255.255.255.0' + '192.0.2.0/0.0.0.255' + are all functionally the same in IPv4. Similarly, + '192.0.2.1' + '192.0.2.1/255.255.255.255' + '192.0.2.1/32' + are also functionally equivalent. That is to say, failing to + provide a subnetmask will create an object with a mask of /32. + + If the mask (portion after the / in the argument) is given in + dotted quad form, it is treated as a netmask if it starts with a + non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it + starts with a zero field (e.g. 0.255.255.255 == /8), with the + single exception of an all-zero mask which is treated as a + netmask == /0. If no mask is given, a default of /32 is used. + + Additionally, an integer can be passed, so + IPv4Network('192.0.2.1') == IPv4Network(3221225985) + or, more generally + IPv4Interface(int(IPv4Interface('192.0.2.1'))) == + IPv4Interface('192.0.2.1') + + Raises: + AddressValueError: If ipaddress isn't a valid IPv4 address. + NetmaskValueError: If the netmask isn't valid for + an IPv4 address. + ValueError: If strict is True and a network address is not + supplied. + """ + addr, mask = self._split_addr_prefix(address) + + self.network_address = IPv4Address(addr) + self.netmask, self._prefixlen = self._make_netmask(mask) + packed = int(self.network_address) + if packed & int(self.netmask) != packed: + if strict: + raise ValueError('%s has host bits set' % self) + else: + self.network_address = IPv4Address(packed & + int(self.netmask)) + + if self._prefixlen == (self.max_prefixlen - 1): + self.hosts = self.__iter__ + elif self._prefixlen == (self.max_prefixlen): + self.hosts = lambda: iter((IPv4Address(addr),)) + + @property + @functools.lru_cache() + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, True if the address is not reserved per + iana-ipv4-special-registry. + + """ + return (not (self.network_address in IPv4Network('100.64.0.0/10') and + self.broadcast_address in IPv4Network('100.64.0.0/10')) and + not self.is_private) + + +class _IPv4Constants: + _linklocal_network = IPv4Network('169.254.0.0/16') + + _loopback_network = IPv4Network('127.0.0.0/8') + + _multicast_network = IPv4Network('224.0.0.0/4') + + _public_network = IPv4Network('100.64.0.0/10') + + # Not globally reachable address blocks listed on + # https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml + _private_networks = [ + IPv4Network('0.0.0.0/8'), + IPv4Network('10.0.0.0/8'), + IPv4Network('127.0.0.0/8'), + IPv4Network('169.254.0.0/16'), + IPv4Network('172.16.0.0/12'), + IPv4Network('192.0.0.0/24'), + IPv4Network('192.0.0.170/31'), + IPv4Network('192.0.2.0/24'), + IPv4Network('192.168.0.0/16'), + IPv4Network('198.18.0.0/15'), + IPv4Network('198.51.100.0/24'), + IPv4Network('203.0.113.0/24'), + IPv4Network('240.0.0.0/4'), + IPv4Network('255.255.255.255/32'), + ] + + _private_networks_exceptions = [ + IPv4Network('192.0.0.9/32'), + IPv4Network('192.0.0.10/32'), + ] + + _reserved_network = IPv4Network('240.0.0.0/4') + + _unspecified_address = IPv4Address('0.0.0.0') + + +IPv4Address._constants = _IPv4Constants +IPv4Network._constants = _IPv4Constants + + +class _BaseV6: + + """Base IPv6 object. + + The following methods are used by IPv6 objects in both single IP + addresses and networks. + + """ + + __slots__ = () + version = 6 + _ALL_ONES = (2**IPV6LENGTH) - 1 + _HEXTET_COUNT = 8 + _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef') + max_prefixlen = IPV6LENGTH + + # There are only a bunch of valid v6 netmasks, so we cache them all + # when constructed (see _make_netmask()). + _netmask_cache = {} + + @classmethod + def _make_netmask(cls, arg): + """Make a (netmask, prefix_len) tuple from the given argument. + + Argument can be: + - an integer (the prefix length) + - a string representing the prefix length (e.g. "24") + - a string representing the prefix netmask (e.g. "255.255.255.0") + """ + if arg not in cls._netmask_cache: + if isinstance(arg, int): + prefixlen = arg + if not (0 <= prefixlen <= cls.max_prefixlen): + cls._report_invalid_netmask(prefixlen) + else: + prefixlen = cls._prefix_from_prefix_string(arg) + netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen)) + cls._netmask_cache[arg] = netmask, prefixlen + return cls._netmask_cache[arg] + + @classmethod + def _ip_int_from_string(cls, ip_str): + """Turn an IPv6 ip_str into an integer. + + Args: + ip_str: A string, the IPv6 ip_str. + + Returns: + An int, the IPv6 address + + Raises: + AddressValueError: if ip_str isn't a valid IPv6 Address. + + """ + if not ip_str: + raise AddressValueError('Address cannot be empty') + if len(ip_str) > 45: + shorten = ip_str + if len(shorten) > 100: + shorten = f'{ip_str[:45]}({len(ip_str)-90} chars elided){ip_str[-45:]}' + raise AddressValueError(f"At most 45 characters expected in " + f"{shorten!r}") + + # We want to allow more parts than the max to be 'split' + # to preserve the correct error message when there are + # too many parts combined with '::' + _max_parts = cls._HEXTET_COUNT + 1 + parts = ip_str.split(':', maxsplit=_max_parts) + + # An IPv6 address needs at least 2 colons (3 parts). + _min_parts = 3 + if len(parts) < _min_parts: + msg = "At least %d parts expected in %r" % (_min_parts, ip_str) + raise AddressValueError(msg) + + # If the address has an IPv4-style suffix, convert it to hexadecimal. + if '.' in parts[-1]: + try: + ipv4_int = IPv4Address(parts.pop())._ip + except AddressValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) from None + parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) + parts.append('%x' % (ipv4_int & 0xFFFF)) + + # An IPv6 address can't have more than 8 colons (9 parts). + # The extra colon comes from using the "::" notation for a single + # leading or trailing zero part. + if len(parts) > _max_parts: + msg = "At most %d colons permitted in %r" % (_max_parts-1, ip_str) + raise AddressValueError(msg) + + # Disregarding the endpoints, find '::' with nothing in between. + # This indicates that a run of zeroes has been skipped. + skip_index = None + for i in range(1, len(parts) - 1): + if not parts[i]: + if skip_index is not None: + # Can't have more than one '::' + msg = "At most one '::' permitted in %r" % ip_str + raise AddressValueError(msg) + skip_index = i + + # parts_hi is the number of parts to copy from above/before the '::' + # parts_lo is the number of parts to copy from below/after the '::' + if skip_index is not None: + # If we found a '::', then check if it also covers the endpoints. + parts_hi = skip_index + parts_lo = len(parts) - skip_index - 1 + if not parts[0]: + parts_hi -= 1 + if parts_hi: + msg = "Leading ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # ^: requires ^:: + if not parts[-1]: + parts_lo -= 1 + if parts_lo: + msg = "Trailing ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # :$ requires ::$ + parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) + if parts_skipped < 1: + msg = "Expected at most %d other parts with '::' in %r" + raise AddressValueError(msg % (cls._HEXTET_COUNT-1, ip_str)) + else: + # Otherwise, allocate the entire address to parts_hi. The + # endpoints could still be empty, but _parse_hextet() will check + # for that. + if len(parts) != cls._HEXTET_COUNT: + msg = "Exactly %d parts expected without '::' in %r" + raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) + if not parts[0]: + msg = "Leading ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # ^: requires ^:: + if not parts[-1]: + msg = "Trailing ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # :$ requires ::$ + parts_hi = len(parts) + parts_lo = 0 + parts_skipped = 0 + + try: + # Now, parse the hextets into a 128-bit integer. + ip_int = 0 + for i in range(parts_hi): + ip_int <<= 16 + ip_int |= cls._parse_hextet(parts[i]) + ip_int <<= 16 * parts_skipped + for i in range(-parts_lo, 0): + ip_int <<= 16 + ip_int |= cls._parse_hextet(parts[i]) + return ip_int + except ValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) from None + + @classmethod + def _parse_hextet(cls, hextet_str): + """Convert an IPv6 hextet string into an integer. + + Args: + hextet_str: A string, the number to parse. + + Returns: + The hextet as an integer. + + Raises: + ValueError: if the input isn't strictly a hex number from + [0..FFFF]. + + """ + # Reject non-ASCII digits. + if not cls._HEX_DIGITS.issuperset(hextet_str): + raise ValueError("Only hex digits permitted in %r" % hextet_str) + # We do the length check second, since the invalid character error + # is likely to be more informative for the user + if len(hextet_str) > 4: + msg = "At most 4 characters permitted in %r" + raise ValueError(msg % hextet_str) + # Length check means we can skip checking the integer value + return int(hextet_str, 16) + + @classmethod + def _compress_hextets(cls, hextets): + """Compresses a list of hextets. + + Compresses a list of strings, replacing the longest continuous + sequence of "0" in the list with "" and adding empty strings at + the beginning or at the end of the string such that subsequently + calling ":".join(hextets) will produce the compressed version of + the IPv6 address. + + Args: + hextets: A list of strings, the hextets to compress. + + Returns: + A list of strings. + + """ + best_doublecolon_start = -1 + best_doublecolon_len = 0 + doublecolon_start = -1 + doublecolon_len = 0 + for index, hextet in enumerate(hextets): + if hextet == '0': + doublecolon_len += 1 + if doublecolon_start == -1: + # Start of a sequence of zeros. + doublecolon_start = index + if doublecolon_len > best_doublecolon_len: + # This is the longest sequence of zeros so far. + best_doublecolon_len = doublecolon_len + best_doublecolon_start = doublecolon_start + else: + doublecolon_len = 0 + doublecolon_start = -1 + + if best_doublecolon_len > 1: + best_doublecolon_end = (best_doublecolon_start + + best_doublecolon_len) + # For zeros at the end of the address. + if best_doublecolon_end == len(hextets): + hextets += [''] + hextets[best_doublecolon_start:best_doublecolon_end] = [''] + # For zeros at the beginning of the address. + if best_doublecolon_start == 0: + hextets = [''] + hextets + + return hextets + + @classmethod + def _string_from_ip_int(cls, ip_int=None): + """Turns a 128-bit integer into hexadecimal notation. + + Args: + ip_int: An integer, the IP address. + + Returns: + A string, the hexadecimal representation of the address. + + Raises: + ValueError: The address is bigger than 128 bits of all ones. + + """ + if ip_int is None: + ip_int = int(cls._ip) + + if ip_int > cls._ALL_ONES: + raise ValueError('IPv6 address is too large') + + hex_str = '%032x' % ip_int + hextets = ['%x' % int(hex_str[x:x+4], 16) for x in range(0, 32, 4)] + + hextets = cls._compress_hextets(hextets) + return ':'.join(hextets) + + def _explode_shorthand_ip_string(self): + """Expand a shortened IPv6 address. + + Returns: + A string, the expanded IPv6 address. + + """ + if isinstance(self, IPv6Network): + ip_str = str(self.network_address) + elif isinstance(self, IPv6Interface): + ip_str = str(self.ip) + else: + ip_str = str(self) + + ip_int = self._ip_int_from_string(ip_str) + hex_str = '%032x' % ip_int + parts = [hex_str[x:x+4] for x in range(0, 32, 4)] + if isinstance(self, (_BaseNetwork, IPv6Interface)): + return '%s/%d' % (':'.join(parts), self._prefixlen) + return ':'.join(parts) + + def _reverse_pointer(self): + """Return the reverse DNS pointer name for the IPv6 address. + + This implements the method described in RFC3596 2.5. + + """ + reverse_chars = self.exploded[::-1].replace(':', '') + return '.'.join(reverse_chars) + '.ip6.arpa' + + @staticmethod + def _split_scope_id(ip_str): + """Helper function to parse IPv6 string address with scope id. + + See RFC 4007 for details. + + Args: + ip_str: A string, the IPv6 address. + + Returns: + (addr, scope_id) tuple. + + """ + addr, sep, scope_id = ip_str.partition('%') + if not sep: + scope_id = None + elif not scope_id or '%' in scope_id: + raise AddressValueError('Invalid IPv6 address: "%r"' % ip_str) + return addr, scope_id + +class IPv6Address(_BaseV6, _BaseAddress): + + """Represent and manipulate single IPv6 Addresses.""" + + __slots__ = ('_ip', '_scope_id', '__weakref__') + + def __init__(self, address): + """Instantiate a new IPv6 address object. + + Args: + address: A string or integer representing the IP + + Additionally, an integer can be passed, so + IPv6Address('2001:db8::') == + IPv6Address(42540766411282592856903984951653826560) + or, more generally + IPv6Address(int(IPv6Address('2001:db8::'))) == + IPv6Address('2001:db8::') + + Raises: + AddressValueError: If address isn't a valid IPv6 address. + + """ + # Efficient constructor from integer. + if isinstance(address, int): + self._check_int_address(address) + self._ip = address + self._scope_id = None + return + + # Constructing from a packed address + if isinstance(address, bytes): + self._check_packed_address(address, 16) + self._ip = int.from_bytes(address, 'big') + self._scope_id = None + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP string. + addr_str = str(address) + if '/' in addr_str: + raise AddressValueError(f"Unexpected '/' in {address!r}") + addr_str, self._scope_id = self._split_scope_id(addr_str) + + self._ip = self._ip_int_from_string(addr_str) + + def _explode_shorthand_ip_string(self): + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is None: + return super()._explode_shorthand_ip_string() + prefix_len = 30 + raw_exploded_str = super()._explode_shorthand_ip_string() + return f"{raw_exploded_str[:prefix_len]}{ipv4_mapped!s}" + + def _reverse_pointer(self): + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is None: + return super()._reverse_pointer() + prefix_len = 30 + raw_exploded_str = super()._explode_shorthand_ip_string()[:prefix_len] + # ipv4 encoded using hexadecimal nibbles instead of decimals + ipv4_int = ipv4_mapped._ip + reverse_chars = f"{raw_exploded_str}{ipv4_int:008x}"[::-1].replace(':', '') + return '.'.join(reverse_chars) + '.ip6.arpa' + + def _ipv4_mapped_ipv6_to_str(self): + """Return convenient text representation of IPv4-mapped IPv6 address + + See RFC 4291 2.5.5.2, 2.2 p.3 for details. + + Returns: + A string, 'x:x:x:x:x:x:d.d.d.d', where the 'x's are the hexadecimal values of + the six high-order 16-bit pieces of the address, and the 'd's are + the decimal values of the four low-order 8-bit pieces of the + address (standard IPv4 representation) as defined in RFC 4291 2.2 p.3. + + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is None: + raise AddressValueError("Can not apply to non-IPv4-mapped IPv6 address %s" % str(self)) + high_order_bits = self._ip >> 32 + return "%s:%s" % (self._string_from_ip_int(high_order_bits), str(ipv4_mapped)) + + def __str__(self): + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is None: + ip_str = super().__str__() + else: + ip_str = self._ipv4_mapped_ipv6_to_str() + return ip_str + '%' + self._scope_id if self._scope_id else ip_str + + def __hash__(self): + return hash((self._ip, self._scope_id)) + + def __eq__(self, other): + address_equal = super().__eq__(other) + if address_equal is NotImplemented: + return NotImplemented + if not address_equal: + return False + return self._scope_id == getattr(other, '_scope_id', None) + + def __reduce__(self): + return (self.__class__, (str(self),)) + + @property + def scope_id(self): + """Identifier of a particular zone of the address's scope. + + See RFC 4007 for details. + + Returns: + A string identifying the zone of the address if specified, else None. + + """ + return self._scope_id + + @property + def packed(self): + """The binary representation of this address.""" + return v6_int_to_packed(self._ip) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is a multicast address. + See RFC 2373 2.7 for details. + + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_multicast + return self in self._constants._multicast_network + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within one of the + reserved IPv6 Network ranges. + + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_reserved + return any(self in x for x in self._constants._reserved_networks) + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is reserved per RFC 4291. + + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_link_local + return self in self._constants._linklocal_network + + @property + def is_site_local(self): + """Test if the address is reserved for site-local. + + Note that the site-local address space has been deprecated by RFC 3879. + Use is_private to test if this address is in the space of unique local + addresses as defined by RFC 4193. + + Returns: + A boolean, True if the address is reserved per RFC 3513 2.5.6. + + """ + return self in self._constants._sitelocal_network + + @property + @functools.lru_cache() + def is_private(self): + """``True`` if the address is defined as not globally reachable by + iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ + (for IPv6) with the following exceptions: + + * ``is_private`` is ``False`` for ``100.64.0.0/10`` + * For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the + semantics of the underlying IPv4 addresses and the following condition holds + (see :attr:`IPv6Address.ipv4_mapped`):: + + address.is_private == address.ipv4_mapped.is_private + + ``is_private`` has value opposite to :attr:`is_global`, except for the ``100.64.0.0/10`` + IPv4 range where they are both ``False``. + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_private + return ( + any(self in net for net in self._constants._private_networks) + and all(self not in net for net in self._constants._private_networks_exceptions) + ) + + @property + def is_global(self): + """``True`` if the address is defined as globally reachable by + iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_ + (for IPv6) with the following exception: + + For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the + semantics of the underlying IPv4 addresses and the following condition holds + (see :attr:`IPv6Address.ipv4_mapped`):: + + address.is_global == address.ipv4_mapped.is_global + + ``is_global`` has value opposite to :attr:`is_private`, except for the ``100.64.0.0/10`` + IPv4 range where they are both ``False``. + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_global + return not self.is_private + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 2373 2.5.2. + + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_unspecified + return self._ip == 0 + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback address as defined in + RFC 2373 2.5.3. + + """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_loopback + return self._ip == 1 + + @property + def ipv4_mapped(self): + """Return the IPv4 mapped address. + + Returns: + If the IPv6 address is a v4 mapped address, return the + IPv4 mapped address. Return None otherwise. + + """ + if (self._ip >> 32) != 0xFFFF: + return None + return IPv4Address(self._ip & 0xFFFFFFFF) + + @property + def teredo(self): + """Tuple of embedded teredo IPs. + + Returns: + Tuple of the (server, client) IPs or None if the address + doesn't appear to be a teredo address (doesn't start with + 2001::/32) + + """ + if (self._ip >> 96) != 0x20010000: + return None + return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), + IPv4Address(~self._ip & 0xFFFFFFFF)) + + @property + def sixtofour(self): + """Return the IPv4 6to4 embedded address. + + Returns: + The IPv4 6to4-embedded address if present or None if the + address doesn't appear to contain a 6to4 embedded address. + + """ + if (self._ip >> 112) != 0x2002: + return None + return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) + + +class IPv6Interface(IPv6Address): + + def __init__(self, address): + addr, mask = self._split_addr_prefix(address) + + IPv6Address.__init__(self, addr) + self.network = IPv6Network((addr, mask), strict=False) + self.netmask = self.network.netmask + self._prefixlen = self.network._prefixlen + + @functools.cached_property + def hostmask(self): + return self.network.hostmask + + def __str__(self): + return '%s/%d' % (super().__str__(), + self._prefixlen) + + def __eq__(self, other): + address_equal = IPv6Address.__eq__(self, other) + if address_equal is NotImplemented or not address_equal: + return address_equal + try: + return self.network == other.network + except AttributeError: + # An interface with an associated network is NOT the + # same as an unassociated address. That's why the hash + # takes the extra info into account. + return False + + def __lt__(self, other): + address_less = IPv6Address.__lt__(self, other) + if address_less is NotImplemented: + return address_less + try: + return (self.network < other.network or + self.network == other.network and address_less) + except AttributeError: + # We *do* allow addresses and interfaces to be sorted. The + # unassociated address is considered less than all interfaces. + return False + + def __hash__(self): + return hash((self._ip, self._prefixlen, int(self.network.network_address))) + + __reduce__ = _IPAddressBase.__reduce__ + + @property + def ip(self): + return IPv6Address(self._ip) + + @property + def with_prefixlen(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.hostmask) + + @property + def is_unspecified(self): + return self._ip == 0 and self.network.is_unspecified + + @property + def is_loopback(self): + return super().is_loopback and self.network.is_loopback + + +class IPv6Network(_BaseV6, _BaseNetwork): + + """This class represents and manipulates 128-bit IPv6 networks. + + Attributes: [examples for IPv6('2001:db8::1000/124')] + .network_address: IPv6Address('2001:db8::1000') + .hostmask: IPv6Address('::f') + .broadcast_address: IPv6Address('2001:db8::100f') + .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') + .prefixlen: 124 + + """ + + # Class to use when creating address objects + _address_class = IPv6Address + + def __init__(self, address, strict=True): + """Instantiate a new IPv6 Network object. + + Args: + address: A string or integer representing the IPv6 network or the + IP and prefix/netmask. + '2001:db8::/128' + '2001:db8:0000:0000:0000:0000:0000:0000/128' + '2001:db8::' + are all functionally the same in IPv6. That is to say, + failing to provide a subnetmask will create an object with + a mask of /128. + + Additionally, an integer can be passed, so + IPv6Network('2001:db8::') == + IPv6Network(42540766411282592856903984951653826560) + or, more generally + IPv6Network(int(IPv6Network('2001:db8::'))) == + IPv6Network('2001:db8::') + + strict: A boolean. If true, ensure that we have been passed + A true network address, eg, 2001:db8::1000/124 and not an + IP address on a network, eg, 2001:db8::1/124. + + Raises: + AddressValueError: If address isn't a valid IPv6 address. + NetmaskValueError: If the netmask isn't valid for + an IPv6 address. + ValueError: If strict was True and a network address was not + supplied. + """ + addr, mask = self._split_addr_prefix(address) + + self.network_address = IPv6Address(addr) + self.netmask, self._prefixlen = self._make_netmask(mask) + packed = int(self.network_address) + if packed & int(self.netmask) != packed: + if strict: + raise ValueError('%s has host bits set' % self) + else: + self.network_address = IPv6Address(packed & + int(self.netmask)) + + if self._prefixlen == (self.max_prefixlen - 1): + self.hosts = self.__iter__ + elif self._prefixlen == self.max_prefixlen: + self.hosts = lambda: iter((IPv6Address(addr),)) + + def hosts(self): + """Generate Iterator over usable hosts in a network. + + This is like __iter__ except it doesn't return the + Subnet-Router anycast address. + + """ + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in range(network + 1, broadcast + 1): + yield self._address_class(x) + + @property + def is_site_local(self): + """Test if the address is reserved for site-local. + + Note that the site-local address space has been deprecated by RFC 3879. + Use is_private to test if this address is in the space of unique local + addresses as defined by RFC 4193. + + Returns: + A boolean, True if the address is reserved per RFC 3513 2.5.6. + + """ + return (self.network_address.is_site_local and + self.broadcast_address.is_site_local) + + +class _IPv6Constants: + + _linklocal_network = IPv6Network('fe80::/10') + + _multicast_network = IPv6Network('ff00::/8') + + # Not globally reachable address blocks listed on + # https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml + _private_networks = [ + IPv6Network('::1/128'), + IPv6Network('::/128'), + IPv6Network('::ffff:0:0/96'), + IPv6Network('64:ff9b:1::/48'), + IPv6Network('100::/64'), + IPv6Network('2001::/23'), + IPv6Network('2001:db8::/32'), + # IANA says N/A, let's consider it not globally reachable to be safe + IPv6Network('2002::/16'), + # RFC 9637: https://www.rfc-editor.org/rfc/rfc9637.html#section-6-2.2 + IPv6Network('3fff::/20'), + IPv6Network('fc00::/7'), + IPv6Network('fe80::/10'), + ] + + _private_networks_exceptions = [ + IPv6Network('2001:1::1/128'), + IPv6Network('2001:1::2/128'), + IPv6Network('2001:3::/32'), + IPv6Network('2001:4:112::/48'), + IPv6Network('2001:20::/28'), + IPv6Network('2001:30::/28'), + ] + + _reserved_networks = [ + IPv6Network('::/8'), IPv6Network('100::/8'), + IPv6Network('200::/7'), IPv6Network('400::/6'), + IPv6Network('800::/5'), IPv6Network('1000::/4'), + IPv6Network('4000::/3'), IPv6Network('6000::/3'), + IPv6Network('8000::/3'), IPv6Network('A000::/3'), + IPv6Network('C000::/3'), IPv6Network('E000::/4'), + IPv6Network('F000::/5'), IPv6Network('F800::/6'), + IPv6Network('FE00::/9'), + ] + + _sitelocal_network = IPv6Network('fec0::/10') + + +IPv6Address._constants = _IPv6Constants +IPv6Network._constants = _IPv6Constants diff --git a/Python314_4_x86_Template/Lib/json/__init__.py b/Python314_4_x86_Template/Lib/json/__init__.py new file mode 100644 index 00000000..9eaa4f3f --- /dev/null +++ b/Python314_4_x86_Template/Lib/json/__init__.py @@ -0,0 +1,365 @@ +r"""JSON (JavaScript Object Notation) is a subset of +JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data +interchange format. + +:mod:`json` exposes an API familiar to users of the standard library +:mod:`marshal` and :mod:`pickle` modules. It is derived from a +version of the externally maintained simplejson library. + +Encoding basic Python object hierarchies:: + + >>> import json + >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) + '["foo", {"bar": ["baz", null, 1.0, 2]}]' + >>> print(json.dumps("\"foo\bar")) + "\"foo\bar" + >>> print(json.dumps('\u1234')) + "\u1234" + >>> print(json.dumps('\\')) + "\\" + >>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)) + {"a": 0, "b": 0, "c": 0} + >>> from io import StringIO + >>> io = StringIO() + >>> json.dump(['streaming API'], io) + >>> io.getvalue() + '["streaming API"]' + +Compact encoding:: + + >>> import json + >>> mydict = {'4': 5, '6': 7} + >>> json.dumps([1,2,3,mydict], separators=(',', ':')) + '[1,2,3,{"4":5,"6":7}]' + +Pretty printing:: + + >>> import json + >>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)) + { + "4": 5, + "6": 7 + } + +Decoding JSON:: + + >>> import json + >>> obj = ['foo', {'bar': ['baz', None, 1.0, 2]}] + >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj + True + >>> json.loads('"\\"foo\\bar"') == '"foo\x08ar' + True + >>> from io import StringIO + >>> io = StringIO('["streaming API"]') + >>> json.load(io)[0] == 'streaming API' + True + +Specializing JSON object decoding:: + + >>> import json + >>> def as_complex(dct): + ... if '__complex__' in dct: + ... return complex(dct['real'], dct['imag']) + ... return dct + ... + >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', + ... object_hook=as_complex) + (1+2j) + >>> from decimal import Decimal + >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1') + True + +Specializing JSON object encoding:: + + >>> import json + >>> def encode_complex(obj): + ... if isinstance(obj, complex): + ... return [obj.real, obj.imag] + ... raise TypeError(f'Object of type {obj.__class__.__name__} ' + ... f'is not JSON serializable') + ... + >>> json.dumps(2 + 1j, default=encode_complex) + '[2.0, 1.0]' + >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j) + '[2.0, 1.0]' + >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j)) + '[2.0, 1.0]' + + +Using json from the shell to validate and pretty-print:: + + $ echo '{"json":"obj"}' | python -m json + { + "json": "obj" + } + $ echo '{ 1.2:3.4}' | python -m json + Expecting property name enclosed in double quotes: line 1 column 3 (char 2) +""" +__version__ = '2.0.9' +__all__ = [ + 'dump', 'dumps', 'load', 'loads', + 'JSONDecoder', 'JSONDecodeError', 'JSONEncoder', +] + +__author__ = 'Bob Ippolito ' + +from .decoder import JSONDecoder, JSONDecodeError +from .encoder import JSONEncoder +import codecs + +_default_encoder = JSONEncoder( + skipkeys=False, + ensure_ascii=True, + check_circular=True, + allow_nan=True, + indent=None, + separators=None, + default=None, +) + +def dump(obj, fp, *, skipkeys=False, ensure_ascii=True, check_circular=True, + allow_nan=True, cls=None, indent=None, separators=None, + default=None, sort_keys=False, **kw): + """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a + ``.write()``-supporting file-like object). + + If ``skipkeys`` is true then ``dict`` keys that are not basic types + (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped + instead of raising a ``TypeError``. + + If ``ensure_ascii`` is false, then the strings written to ``fp`` can + contain non-ASCII and non-printable characters if they appear in strings + contained in ``obj``. Otherwise, all such characters are escaped in JSON + strings. + + If ``check_circular`` is false, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``RecursionError`` (or worse). + + If ``allow_nan`` is false, then it will be a ``ValueError`` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) + in strict compliance of the JSON specification, instead of using the + JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If ``indent`` is a non-negative integer, then JSON array elements and + object members will be pretty-printed with that indent level. An indent + level of 0 will only insert newlines. ``None`` is the most compact + representation. + + If specified, ``separators`` should be an ``(item_separator, + key_separator)`` tuple. The default is ``(', ', ': ')`` if *indent* is + ``None`` and ``(',', ': ')`` otherwise. To get the most compact JSON + representation, you should specify ``(',', ':')`` to eliminate + whitespace. + + ``default(obj)`` is a function that should return a serializable version + of obj or raise TypeError. The default simply raises TypeError. + + If *sort_keys* is true (default: ``False``), then the output of + dictionaries will be sorted by key. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg; otherwise ``JSONEncoder`` is used. + + """ + # cached encoder + if (not skipkeys and ensure_ascii and + check_circular and allow_nan and + cls is None and indent is None and separators is None and + default is None and not sort_keys and not kw): + iterable = _default_encoder.iterencode(obj) + else: + if cls is None: + cls = JSONEncoder + iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, allow_nan=allow_nan, indent=indent, + separators=separators, + default=default, sort_keys=sort_keys, **kw).iterencode(obj) + # could accelerate with writelines in some versions of Python, at + # a debuggability cost + for chunk in iterable: + fp.write(chunk) + + +def dumps(obj, *, skipkeys=False, ensure_ascii=True, check_circular=True, + allow_nan=True, cls=None, indent=None, separators=None, + default=None, sort_keys=False, **kw): + """Serialize ``obj`` to a JSON formatted ``str``. + + If ``skipkeys`` is true then ``dict`` keys that are not basic types + (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped + instead of raising a ``TypeError``. + + If ``ensure_ascii`` is false, then the return value can contain + non-ASCII and non-printable characters if they appear in strings + contained in ``obj``. Otherwise, all such characters are escaped in + JSON strings. + + If ``check_circular`` is false, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``RecursionError`` (or worse). + + If ``allow_nan`` is false, then it will be a ``ValueError`` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in + strict compliance of the JSON specification, instead of using the + JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If ``indent`` is a non-negative integer, then JSON array elements and + object members will be pretty-printed with that indent level. An indent + level of 0 will only insert newlines. ``None`` is the most compact + representation. + + If specified, ``separators`` should be an ``(item_separator, + key_separator)`` tuple. The default is ``(', ', ': ')`` if *indent* is + ``None`` and ``(',', ': ')`` otherwise. To get the most compact JSON + representation, you should specify ``(',', ':')`` to eliminate + whitespace. + + ``default(obj)`` is a function that should return a serializable version + of obj or raise TypeError. The default simply raises TypeError. + + If *sort_keys* is true (default: ``False``), then the output of + dictionaries will be sorted by key. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg; otherwise ``JSONEncoder`` is used. + + """ + # cached encoder + if (not skipkeys and ensure_ascii and + check_circular and allow_nan and + cls is None and indent is None and separators is None and + default is None and not sort_keys and not kw): + return _default_encoder.encode(obj) + if cls is None: + cls = JSONEncoder + return cls( + skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, allow_nan=allow_nan, indent=indent, + separators=separators, default=default, sort_keys=sort_keys, + **kw).encode(obj) + + +_default_decoder = JSONDecoder(object_hook=None, object_pairs_hook=None) + + +def detect_encoding(b): + bstartswith = b.startswith + if bstartswith((codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)): + return 'utf-32' + if bstartswith((codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)): + return 'utf-16' + if bstartswith(codecs.BOM_UTF8): + return 'utf-8-sig' + + if len(b) >= 4: + if not b[0]: + # 00 00 -- -- - utf-32-be + # 00 XX -- -- - utf-16-be + return 'utf-16-be' if b[1] else 'utf-32-be' + if not b[1]: + # XX 00 00 00 - utf-32-le + # XX 00 00 XX - utf-16-le + # XX 00 XX -- - utf-16-le + return 'utf-16-le' if b[2] or b[3] else 'utf-32-le' + elif len(b) == 2: + if not b[0]: + # 00 XX - utf-16-be + return 'utf-16-be' + if not b[1]: + # XX 00 - utf-16-le + return 'utf-16-le' + # default + return 'utf-8' + + +def load(fp, *, cls=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): + """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing + a JSON document) to a Python object. + + ``object_hook`` is an optional function that will be called with the + result of any object literal decode (a ``dict``). The return value of + ``object_hook`` will be used instead of the ``dict``. This feature + can be used to implement custom decoders (e.g. JSON-RPC class hinting). + + ``object_pairs_hook`` is an optional function that will be called with + the result of any object literal decoded with an ordered list of pairs. + The return value of ``object_pairs_hook`` will be used instead of the + ``dict``. This feature can be used to implement custom decoders. If + ``object_hook`` is also defined, the ``object_pairs_hook`` takes + priority. + + To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` + kwarg; otherwise ``JSONDecoder`` is used. + """ + return loads(fp.read(), + cls=cls, object_hook=object_hook, + parse_float=parse_float, parse_int=parse_int, + parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw) + + +def loads(s, *, cls=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): + """Deserialize ``s`` (a ``str``, ``bytes`` or ``bytearray`` instance + containing a JSON document) to a Python object. + + ``object_hook`` is an optional function that will be called with the + result of any object literal decode (a ``dict``). The return value of + ``object_hook`` will be used instead of the ``dict``. This feature + can be used to implement custom decoders (e.g. JSON-RPC class hinting). + + ``object_pairs_hook`` is an optional function that will be called with + the result of any object literal decoded with an ordered list of pairs. + The return value of ``object_pairs_hook`` will be used instead of the + ``dict``. This feature can be used to implement custom decoders. If + ``object_hook`` is also defined, the ``object_pairs_hook`` takes + priority. + + ``parse_float``, if specified, will be called with the string + of every JSON float to be decoded. By default this is equivalent to + float(num_str). This can be used to use another datatype or parser + for JSON floats (e.g. decimal.Decimal). + + ``parse_int``, if specified, will be called with the string + of every JSON int to be decoded. By default this is equivalent to + int(num_str). This can be used to use another datatype or parser + for JSON integers (e.g. float). + + ``parse_constant``, if specified, will be called with one of the + following strings: -Infinity, Infinity, NaN. + This can be used to raise an exception if invalid JSON numbers + are encountered. + + To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` + kwarg; otherwise ``JSONDecoder`` is used. + """ + if isinstance(s, str): + if s.startswith('\ufeff'): + raise JSONDecodeError("Unexpected UTF-8 BOM (decode using utf-8-sig)", + s, 0) + else: + if not isinstance(s, (bytes, bytearray)): + raise TypeError(f'the JSON object must be str, bytes or bytearray, ' + f'not {s.__class__.__name__}') + s = s.decode(detect_encoding(s), 'surrogatepass') + + if (cls is None and object_hook is None and + parse_int is None and parse_float is None and + parse_constant is None and object_pairs_hook is None and not kw): + return _default_decoder.decode(s) + if cls is None: + cls = JSONDecoder + if object_hook is not None: + kw['object_hook'] = object_hook + if object_pairs_hook is not None: + kw['object_pairs_hook'] = object_pairs_hook + if parse_float is not None: + kw['parse_float'] = parse_float + if parse_int is not None: + kw['parse_int'] = parse_int + if parse_constant is not None: + kw['parse_constant'] = parse_constant + return cls(**kw).decode(s) diff --git a/Python314_4_x86_Template/Lib/json/__main__.py b/Python314_4_x86_Template/Lib/json/__main__.py new file mode 100644 index 00000000..1808eadd --- /dev/null +++ b/Python314_4_x86_Template/Lib/json/__main__.py @@ -0,0 +1,20 @@ +"""Command-line tool to validate and pretty-print JSON + +Usage:: + + $ echo '{"json":"obj"}' | python -m json + { + "json": "obj" + } + $ echo '{ 1.2:3.4}' | python -m json + Expecting property name enclosed in double quotes: line 1 column 3 (char 2) + +""" +import json.tool + + +if __name__ == '__main__': + try: + json.tool.main() + except BrokenPipeError as exc: + raise SystemExit(exc.errno) diff --git a/Python314_4_x86_Template/Lib/json/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/json/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..ea3156d2 Binary files /dev/null and b/Python314_4_x86_Template/Lib/json/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/json/__pycache__/decoder.cpython-314.pyc b/Python314_4_x86_Template/Lib/json/__pycache__/decoder.cpython-314.pyc new file mode 100644 index 00000000..f286b55a Binary files /dev/null and b/Python314_4_x86_Template/Lib/json/__pycache__/decoder.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/json/__pycache__/encoder.cpython-314.pyc b/Python314_4_x86_Template/Lib/json/__pycache__/encoder.cpython-314.pyc new file mode 100644 index 00000000..9ba43b61 Binary files /dev/null and b/Python314_4_x86_Template/Lib/json/__pycache__/encoder.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/json/__pycache__/scanner.cpython-314.pyc b/Python314_4_x86_Template/Lib/json/__pycache__/scanner.cpython-314.pyc new file mode 100644 index 00000000..98f959fd Binary files /dev/null and b/Python314_4_x86_Template/Lib/json/__pycache__/scanner.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/json/decoder.py b/Python314_4_x86_Template/Lib/json/decoder.py similarity index 100% rename from Python313_13_x86_Template/Lib/json/decoder.py rename to Python314_4_x86_Template/Lib/json/decoder.py diff --git a/Python314_4_x86_Template/Lib/json/encoder.py b/Python314_4_x86_Template/Lib/json/encoder.py new file mode 100644 index 00000000..5cf6d64f --- /dev/null +++ b/Python314_4_x86_Template/Lib/json/encoder.py @@ -0,0 +1,461 @@ +"""Implementation of JSONEncoder +""" +import re + +try: + from _json import encode_basestring_ascii as c_encode_basestring_ascii +except ImportError: + c_encode_basestring_ascii = None +try: + from _json import encode_basestring as c_encode_basestring +except ImportError: + c_encode_basestring = None +try: + from _json import make_encoder as c_make_encoder +except ImportError: + c_make_encoder = None + +ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') +ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') +HAS_UTF8 = re.compile(b'[\x80-\xff]') +ESCAPE_DCT = { + '\\': '\\\\', + '"': '\\"', + '\b': '\\b', + '\f': '\\f', + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', +} +for i in range(0x20): + ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) + #ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) +del i + +INFINITY = float('inf') + +def py_encode_basestring(s): + """Return a JSON representation of a Python string + + """ + def replace(match): + return ESCAPE_DCT[match.group(0)] + return '"' + ESCAPE.sub(replace, s) + '"' + + +encode_basestring = (c_encode_basestring or py_encode_basestring) + + +def py_encode_basestring_ascii(s): + """Return an ASCII-only JSON representation of a Python string + + """ + def replace(match): + s = match.group(0) + try: + return ESCAPE_DCT[s] + except KeyError: + n = ord(s) + if n < 0x10000: + return '\\u{0:04x}'.format(n) + #return '\\u%04x' % (n,) + else: + # surrogate pair + n -= 0x10000 + s1 = 0xd800 | ((n >> 10) & 0x3ff) + s2 = 0xdc00 | (n & 0x3ff) + return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) + return '"' + ESCAPE_ASCII.sub(replace, s) + '"' + + +encode_basestring_ascii = ( + c_encode_basestring_ascii or py_encode_basestring_ascii) + +class JSONEncoder(object): + """Extensible JSON encoder for Python data structures. + + Supports the following objects and types by default: + + +-------------------+---------------+ + | Python | JSON | + +===================+===============+ + | dict | object | + +-------------------+---------------+ + | list, tuple | array | + +-------------------+---------------+ + | str | string | + +-------------------+---------------+ + | int, float | number | + +-------------------+---------------+ + | True | true | + +-------------------+---------------+ + | False | false | + +-------------------+---------------+ + | None | null | + +-------------------+---------------+ + + To extend this to recognize other objects, subclass and implement a + ``.default()`` method with another method that returns a serializable + object for ``o`` if possible, otherwise it should call the superclass + implementation (to raise ``TypeError``). + + """ + item_separator = ', ' + key_separator = ': ' + def __init__(self, *, skipkeys=False, ensure_ascii=True, + check_circular=True, allow_nan=True, sort_keys=False, + indent=None, separators=None, default=None): + """Constructor for JSONEncoder, with sensible defaults. + + If skipkeys is false, then it is a TypeError to attempt + encoding of keys that are not str, int, float, bool or None. + If skipkeys is True, such items are simply skipped. + + If ensure_ascii is true, the output is guaranteed to be str objects + with all incoming non-ASCII and non-printable characters escaped. + If ensure_ascii is false, the output can contain non-ASCII and + non-printable characters. + + If check_circular is true, then lists, dicts, and custom encoded + objects will be checked for circular references during encoding to + prevent an infinite recursion (which would cause an RecursionError). + Otherwise, no such check takes place. + + If allow_nan is true, then NaN, Infinity, and -Infinity will be + encoded as such. This behavior is not JSON specification compliant, + but is consistent with most JavaScript based encoders and decoders. + Otherwise, it will be a ValueError to encode such floats. + + If sort_keys is true, then the output of dictionaries will be + sorted by key; this is useful for regression tests to ensure + that JSON serializations can be compared on a day-to-day basis. + + If indent is a non-negative integer, then JSON array + elements and object members will be pretty-printed with that + indent level. An indent level of 0 will only insert newlines. + None is the most compact representation. + + If specified, separators should be an (item_separator, + key_separator) tuple. The default is (', ', ': ') if *indent* is + ``None`` and (',', ': ') otherwise. To get the most compact JSON + representation, you should specify (',', ':') to eliminate + whitespace. + + If specified, default is a function that gets called for objects + that can't otherwise be serialized. It should return a JSON + encodable version of the object or raise a ``TypeError``. + + """ + + self.skipkeys = skipkeys + self.ensure_ascii = ensure_ascii + self.check_circular = check_circular + self.allow_nan = allow_nan + self.sort_keys = sort_keys + self.indent = indent + if separators is not None: + self.item_separator, self.key_separator = separators + elif indent is not None: + self.item_separator = ',' + if default is not None: + self.default = default + + def default(self, o): + """Implement this method in a subclass such that it returns + a serializable object for ``o``, or calls the base implementation + (to raise a ``TypeError``). + + For example, to support arbitrary iterators, you could + implement default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + # Let the base class default method raise the TypeError + return super().default(o) + + """ + raise TypeError(f'Object of type {o.__class__.__name__} ' + f'is not JSON serializable') + + def encode(self, o): + """Return a JSON string representation of a Python data structure. + + >>> from json.encoder import JSONEncoder + >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) + '{"foo": ["bar", "baz"]}' + + """ + # This is for extremely simple cases and benchmarks. + if isinstance(o, str): + if self.ensure_ascii: + return encode_basestring_ascii(o) + else: + return encode_basestring(o) + # This doesn't pass the iterator directly to ''.join() because the + # exceptions aren't as detailed. The list call should be roughly + # equivalent to the PySequence_Fast that ''.join() would do. + chunks = self.iterencode(o, _one_shot=True) + if not isinstance(chunks, (list, tuple)): + chunks = list(chunks) + return ''.join(chunks) + + def iterencode(self, o, _one_shot=False): + """Encode the given object and yield each string + representation as available. + + For example:: + + for chunk in JSONEncoder().iterencode(bigobject): + mysocket.write(chunk) + + """ + if self.check_circular: + markers = {} + else: + markers = None + if self.ensure_ascii: + _encoder = encode_basestring_ascii + else: + _encoder = encode_basestring + + def floatstr(o, allow_nan=self.allow_nan, + _repr=float.__repr__, _inf=INFINITY, _neginf=-INFINITY): + # Check for specials. Note that this type of test is processor + # and/or platform-specific, so do tests which don't depend on the + # internals. + + if o != o: + text = 'NaN' + elif o == _inf: + text = 'Infinity' + elif o == _neginf: + text = '-Infinity' + else: + return _repr(o) + + if not allow_nan: + raise ValueError( + "Out of range float values are not JSON compliant: " + + repr(o)) + + return text + + + if self.indent is None or isinstance(self.indent, str): + indent = self.indent + else: + indent = ' ' * self.indent + if _one_shot and c_make_encoder is not None: + _iterencode = c_make_encoder( + markers, self.default, _encoder, indent, + self.key_separator, self.item_separator, self.sort_keys, + self.skipkeys, self.allow_nan) + else: + _iterencode = _make_iterencode( + markers, self.default, _encoder, indent, floatstr, + self.key_separator, self.item_separator, self.sort_keys, + self.skipkeys, _one_shot) + return _iterencode(o, 0) + +def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, + _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, + ## HACK: hand-optimized bytecode; turn globals into locals + ValueError=ValueError, + dict=dict, + float=float, + id=id, + int=int, + isinstance=isinstance, + list=list, + str=str, + tuple=tuple, + _intstr=int.__repr__, + ): + + def _iterencode_list(lst, _current_indent_level): + if not lst: + yield '[]' + return + if markers is not None: + markerid = id(lst) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = lst + buf = '[' + if _indent is not None: + _current_indent_level += 1 + newline_indent = '\n' + _indent * _current_indent_level + separator = _item_separator + newline_indent + buf += newline_indent + else: + newline_indent = None + separator = _item_separator + for i, value in enumerate(lst): + if i: + buf = separator + try: + if isinstance(value, str): + yield buf + _encoder(value) + elif value is None: + yield buf + 'null' + elif value is True: + yield buf + 'true' + elif value is False: + yield buf + 'false' + elif isinstance(value, int): + # Subclasses of int/float may override __repr__, but we still + # want to encode them as integers/floats in JSON. One example + # within the standard library is IntEnum. + yield buf + _intstr(value) + elif isinstance(value, float): + # see comment above for int + yield buf + _floatstr(value) + else: + yield buf + if isinstance(value, (list, tuple)): + chunks = _iterencode_list(value, _current_indent_level) + elif isinstance(value, dict): + chunks = _iterencode_dict(value, _current_indent_level) + else: + chunks = _iterencode(value, _current_indent_level) + yield from chunks + except GeneratorExit: + raise + except BaseException as exc: + exc.add_note(f'when serializing {type(lst).__name__} item {i}') + raise + if newline_indent is not None: + _current_indent_level -= 1 + yield '\n' + _indent * _current_indent_level + yield ']' + if markers is not None: + del markers[markerid] + + def _iterencode_dict(dct, _current_indent_level): + if not dct: + yield '{}' + return + if markers is not None: + markerid = id(dct) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = dct + yield '{' + if _indent is not None: + _current_indent_level += 1 + newline_indent = '\n' + _indent * _current_indent_level + item_separator = _item_separator + newline_indent + else: + newline_indent = None + item_separator = _item_separator + first = True + if _sort_keys: + items = sorted(dct.items()) + else: + items = dct.items() + for key, value in items: + if isinstance(key, str): + pass + # JavaScript is weakly typed for these, so it makes sense to + # also allow them. Many encoders seem to do something like this. + elif isinstance(key, float): + # see comment for int/float in _make_iterencode + key = _floatstr(key) + elif key is True: + key = 'true' + elif key is False: + key = 'false' + elif key is None: + key = 'null' + elif isinstance(key, int): + # see comment for int/float in _make_iterencode + key = _intstr(key) + elif _skipkeys: + continue + else: + raise TypeError(f'keys must be str, int, float, bool or None, ' + f'not {key.__class__.__name__}') + if first: + first = False + if newline_indent is not None: + yield newline_indent + else: + yield item_separator + yield _encoder(key) + yield _key_separator + try: + if isinstance(value, str): + yield _encoder(value) + elif value is None: + yield 'null' + elif value is True: + yield 'true' + elif value is False: + yield 'false' + elif isinstance(value, int): + # see comment for int/float in _make_iterencode + yield _intstr(value) + elif isinstance(value, float): + # see comment for int/float in _make_iterencode + yield _floatstr(value) + else: + if isinstance(value, (list, tuple)): + chunks = _iterencode_list(value, _current_indent_level) + elif isinstance(value, dict): + chunks = _iterencode_dict(value, _current_indent_level) + else: + chunks = _iterencode(value, _current_indent_level) + yield from chunks + except GeneratorExit: + raise + except BaseException as exc: + exc.add_note(f'when serializing {type(dct).__name__} item {key!r}') + raise + if not first and newline_indent is not None: + _current_indent_level -= 1 + yield '\n' + _indent * _current_indent_level + yield '}' + if markers is not None: + del markers[markerid] + + def _iterencode(o, _current_indent_level): + if isinstance(o, str): + yield _encoder(o) + elif o is None: + yield 'null' + elif o is True: + yield 'true' + elif o is False: + yield 'false' + elif isinstance(o, int): + # see comment for int/float in _make_iterencode + yield _intstr(o) + elif isinstance(o, float): + # see comment for int/float in _make_iterencode + yield _floatstr(o) + elif isinstance(o, (list, tuple)): + yield from _iterencode_list(o, _current_indent_level) + elif isinstance(o, dict): + yield from _iterencode_dict(o, _current_indent_level) + else: + if markers is not None: + markerid = id(o) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = o + newobj = _default(o) + try: + yield from _iterencode(newobj, _current_indent_level) + except GeneratorExit: + raise + except BaseException as exc: + exc.add_note(f'when serializing {type(o).__name__} object') + raise + if markers is not None: + del markers[markerid] + return _iterencode diff --git a/Python313_13_x86_Template/Lib/json/scanner.py b/Python314_4_x86_Template/Lib/json/scanner.py similarity index 100% rename from Python313_13_x86_Template/Lib/json/scanner.py rename to Python314_4_x86_Template/Lib/json/scanner.py diff --git a/Python314_4_x86_Template/Lib/json/tool.py b/Python314_4_x86_Template/Lib/json/tool.py new file mode 100644 index 00000000..1967817a --- /dev/null +++ b/Python314_4_x86_Template/Lib/json/tool.py @@ -0,0 +1,121 @@ +"""Command-line tool to validate and pretty-print JSON + +See `json.__main__` for a usage example (invocation as +`python -m json.tool` is supported for backwards compatibility). +""" +import argparse +import json +import re +import sys +from _colorize import get_theme, can_colorize + + +# The string we are colorizing is valid JSON, +# so we can use a looser but simpler regex to match +# the various parts, most notably strings and numbers, +# where the regex given by the spec is much more complex. +_color_pattern = re.compile(r''' + (?P"(\\.|[^"\\])*")(?=:) | + (?P"(\\.|[^"\\])*") | + (?PNaN|-?Infinity|[0-9\-+.Ee]+) | + (?Ptrue|false) | + (?Pnull) +''', re.VERBOSE) + +_group_to_theme_color = { + "key": "definition", + "string": "string", + "number": "number", + "boolean": "keyword", + "null": "keyword", +} + + +def _colorize_json(json_str, theme): + def _replace_match_callback(match): + for group, color in _group_to_theme_color.items(): + if m := match.group(group): + return f"{theme[color]}{m}{theme.reset}" + return match.group() + + return re.sub(_color_pattern, _replace_match_callback, json_str) + + +def main(): + description = ('A simple command line interface for json module ' + 'to validate and pretty-print JSON objects.') + parser = argparse.ArgumentParser(description=description, color=True) + parser.add_argument('infile', nargs='?', + help='a JSON file to be validated or pretty-printed', + default='-') + parser.add_argument('outfile', nargs='?', + help='write the output of infile to outfile', + default=None) + parser.add_argument('--sort-keys', action='store_true', default=False, + help='sort the output of dictionaries alphabetically by key') + parser.add_argument('--no-ensure-ascii', dest='ensure_ascii', action='store_false', + help='disable escaping of non-ASCII characters') + parser.add_argument('--json-lines', action='store_true', default=False, + help='parse input using the JSON Lines format. ' + 'Use with --no-indent or --compact to produce valid JSON Lines output.') + group = parser.add_mutually_exclusive_group() + group.add_argument('--indent', default=4, type=int, + help='separate items with newlines and use this number ' + 'of spaces for indentation') + group.add_argument('--tab', action='store_const', dest='indent', + const='\t', help='separate items with newlines and use ' + 'tabs for indentation') + group.add_argument('--no-indent', action='store_const', dest='indent', + const=None, + help='separate items with spaces rather than newlines') + group.add_argument('--compact', action='store_true', + help='suppress all whitespace separation (most compact)') + options = parser.parse_args() + + dump_args = { + 'sort_keys': options.sort_keys, + 'indent': options.indent, + 'ensure_ascii': options.ensure_ascii, + } + if options.compact: + dump_args['indent'] = None + dump_args['separators'] = ',', ':' + + try: + if options.infile == '-': + infile = sys.stdin + else: + infile = open(options.infile, encoding='utf-8') + try: + if options.json_lines: + objs = (json.loads(line) for line in infile) + else: + objs = (json.load(infile),) + finally: + if infile is not sys.stdin: + infile.close() + + if options.outfile is None: + outfile = sys.stdout + else: + outfile = open(options.outfile, 'w', encoding='utf-8') + with outfile: + if can_colorize(file=outfile): + t = get_theme(tty_file=outfile).syntax + for obj in objs: + json_str = json.dumps(obj, **dump_args) + outfile.write(_colorize_json(json_str, t)) + outfile.write('\n') + else: + for obj in objs: + json.dump(obj, outfile, **dump_args) + outfile.write('\n') + except ValueError as e: + raise SystemExit(e) + + +if __name__ == '__main__': + try: + main() + except BrokenPipeError as exc: + raise SystemExit(exc.errno) diff --git a/Python313_13_x86_Template/Lib/keyword.py b/Python314_4_x86_Template/Lib/keyword.py similarity index 100% rename from Python313_13_x86_Template/Lib/keyword.py rename to Python314_4_x86_Template/Lib/keyword.py diff --git a/Python314_4_x86_Template/Lib/linecache.py b/Python314_4_x86_Template/Lib/linecache.py new file mode 100644 index 00000000..ef3b2d91 --- /dev/null +++ b/Python314_4_x86_Template/Lib/linecache.py @@ -0,0 +1,256 @@ +"""Cache lines from Python source files. + +This is intended to read lines from modules imported -- hence if a filename +is not found, it will look down the module search path for a file by +that name. +""" + +__all__ = ["getline", "clearcache", "checkcache", "lazycache"] + + +# The cache. Maps filenames to either a thunk which will provide source code, +# or a tuple (size, mtime, lines, fullname) once loaded. +cache = {} +_interactive_cache = {} + + +def clearcache(): + """Clear the cache entirely.""" + cache.clear() + + +def getline(filename, lineno, module_globals=None): + """Get a line for a Python source file from the cache. + Update the cache if it doesn't contain an entry for this file already.""" + + lines = getlines(filename, module_globals) + if 1 <= lineno <= len(lines): + return lines[lineno - 1] + return '' + + +def getlines(filename, module_globals=None): + """Get the lines for a Python source file from the cache. + Update the cache if it doesn't contain an entry for this file already.""" + + entry = cache.get(filename, None) + if entry is not None and len(entry) != 1: + return entry[2] + + try: + return updatecache(filename, module_globals) + except MemoryError: + clearcache() + return [] + + +def _getline_from_code(filename, lineno): + lines = _getlines_from_code(filename) + if 1 <= lineno <= len(lines): + return lines[lineno - 1] + return '' + +def _make_key(code): + return (code.co_filename, code.co_qualname, code.co_firstlineno) + +def _getlines_from_code(code): + code_id = _make_key(code) + entry = _interactive_cache.get(code_id, None) + if entry is not None and len(entry) != 1: + return entry[2] + return [] + + +def _source_unavailable(filename): + """Return True if the source code is unavailable for such file name.""" + return ( + not filename + or (filename.startswith('<') + and filename.endswith('>') + and not filename.startswith('')): + return None + # Try for a __loader__, if available + if module_globals and '__name__' in module_globals: + spec = module_globals.get('__spec__') + name = getattr(spec, 'name', None) or module_globals['__name__'] + loader = getattr(spec, 'loader', None) + if loader is None: + loader = module_globals.get('__loader__') + get_source = getattr(loader, 'get_source', None) + + if name and get_source: + def get_lines(name=name, *args, **kwargs): + return get_source(name, *args, **kwargs) + return (get_lines,) + return None + + + +def _register_code(code, string, name): + entry = (len(string), + None, + [line + '\n' for line in string.splitlines()], + name) + stack = [code] + while stack: + code = stack.pop() + for const in code.co_consts: + if isinstance(const, type(code)): + stack.append(const) + key = _make_key(code) + _interactive_cache[key] = entry diff --git a/Python314_4_x86_Template/Lib/locale.py b/Python314_4_x86_Template/Lib/locale.py new file mode 100644 index 00000000..dfedc638 --- /dev/null +++ b/Python314_4_x86_Template/Lib/locale.py @@ -0,0 +1,1783 @@ +"""Locale support module. + +The module provides low-level access to the C lib's locale APIs and adds high +level number formatting APIs as well as a locale aliasing engine to complement +these. + +The aliasing engine includes support for many commonly used locale names and +maps them to values suitable for passing to the C lib's setlocale() function. It +also includes default encodings for all supported locale names. + +""" + +import sys +import encodings +import encodings.aliases +import _collections_abc +from builtins import str as _builtin_str +import functools + +# Try importing the _locale module. +# +# If this fails, fall back on a basic 'C' locale emulation. + +# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before +# trying the import. So __all__ is also fiddled at the end of the file. +__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error", + "setlocale", "localeconv", "strcoll", "strxfrm", + "str", "atof", "atoi", "format_string", "currency", + "normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY", + "LC_NUMERIC", "LC_ALL", "CHAR_MAX", "getencoding"] + +def _strcoll(a,b): + """ strcoll(string,string) -> int. + Compares two strings according to the locale. + """ + return (a > b) - (a < b) + +def _strxfrm(s): + """ strxfrm(string) -> string. + Returns a string that behaves for cmp locale-aware. + """ + return s + +try: + + from _locale import * + +except ImportError: + + # Locale emulation + + CHAR_MAX = 127 + LC_ALL = 6 + LC_COLLATE = 3 + LC_CTYPE = 0 + LC_MESSAGES = 5 + LC_MONETARY = 4 + LC_NUMERIC = 1 + LC_TIME = 2 + Error = ValueError + + def localeconv(): + """ localeconv() -> dict. + Returns numeric and monetary locale-specific parameters. + """ + # 'C' locale default values + return {'grouping': [127], + 'currency_symbol': '', + 'n_sign_posn': 127, + 'p_cs_precedes': 127, + 'n_cs_precedes': 127, + 'mon_grouping': [], + 'n_sep_by_space': 127, + 'decimal_point': '.', + 'negative_sign': '', + 'positive_sign': '', + 'p_sep_by_space': 127, + 'int_curr_symbol': '', + 'p_sign_posn': 127, + 'thousands_sep': '', + 'mon_thousands_sep': '', + 'frac_digits': 127, + 'mon_decimal_point': '', + 'int_frac_digits': 127} + + def setlocale(category, value=None): + """ setlocale(integer,string=None) -> string. + Activates/queries locale processing. + """ + if value not in (None, '', 'C'): + raise Error('_locale emulation only supports "C" locale') + return 'C' + +# These may or may not exist in _locale, so be sure to set them. +if 'strxfrm' not in globals(): + strxfrm = _strxfrm +if 'strcoll' not in globals(): + strcoll = _strcoll + + +_localeconv = localeconv + +# With this dict, you can override some items of localeconv's return value. +# This is useful for testing purposes. +_override_localeconv = {} + +@functools.wraps(_localeconv) +def localeconv(): + d = _localeconv() + if _override_localeconv: + d.update(_override_localeconv) + return d + + +### Number formatting APIs + +# Author: Martin von Loewis +# improved by Georg Brandl + +# Iterate over grouping intervals +def _grouping_intervals(grouping): + last_interval = None + for interval in grouping: + # if grouping is -1, we are done + if interval == CHAR_MAX: + return + # 0: re-use last group ad infinitum + if interval == 0: + if last_interval is None: + raise ValueError("invalid grouping") + while True: + yield last_interval + yield interval + last_interval = interval + +#perform the grouping from right to left +def _group(s, monetary=False): + conv = localeconv() + thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep'] + grouping = conv[monetary and 'mon_grouping' or 'grouping'] + if not grouping: + return (s, 0) + if s[-1] == ' ': + stripped = s.rstrip() + right_spaces = s[len(stripped):] + s = stripped + else: + right_spaces = '' + left_spaces = '' + groups = [] + for interval in _grouping_intervals(grouping): + if not s or s[-1] not in "0123456789": + # only non-digit characters remain (sign, spaces) + left_spaces = s + s = '' + break + groups.append(s[-interval:]) + s = s[:-interval] + if s: + groups.append(s) + groups.reverse() + return ( + left_spaces + thousands_sep.join(groups) + right_spaces, + len(thousands_sep) * (len(groups) - 1) + ) + +# Strip a given amount of excess padding from the given string +def _strip_padding(s, amount): + lpos = 0 + while amount and s[lpos] == ' ': + lpos += 1 + amount -= 1 + rpos = len(s) - 1 + while amount and s[rpos] == ' ': + rpos -= 1 + amount -= 1 + return s[lpos:rpos+1] + +_percent_re = None + +def _format(percent, value, grouping=False, monetary=False, *additional): + if additional: + formatted = percent % ((value,) + additional) + else: + formatted = percent % value + if percent[-1] in 'eEfFgGdiu': + formatted = _localize(formatted, grouping, monetary) + return formatted + +# Transform formatted as locale number according to the locale settings +def _localize(formatted, grouping=False, monetary=False): + # floats and decimal ints need special action! + if '.' in formatted: + seps = 0 + parts = formatted.split('.') + if grouping: + parts[0], seps = _group(parts[0], monetary=monetary) + decimal_point = localeconv()[monetary and 'mon_decimal_point' + or 'decimal_point'] + formatted = decimal_point.join(parts) + if seps: + formatted = _strip_padding(formatted, seps) + else: + seps = 0 + if grouping: + formatted, seps = _group(formatted, monetary=monetary) + if seps: + formatted = _strip_padding(formatted, seps) + return formatted + +def format_string(f, val, grouping=False, monetary=False): + """Formats a string in the same way that the % formatting would use, + but takes the current locale into account. + + Grouping is applied if the third parameter is true. + Conversion uses monetary thousands separator and grouping strings if + forth parameter monetary is true.""" + global _percent_re + if _percent_re is None: + import re + + _percent_re = re.compile(r'%(?:\((?P.*?)\))?(?P[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]') + + percents = list(_percent_re.finditer(f)) + new_f = _percent_re.sub('%s', f) + + if isinstance(val, _collections_abc.Mapping): + new_val = [] + for perc in percents: + if perc.group()[-1]=='%': + new_val.append('%') + else: + new_val.append(_format(perc.group(), val, grouping, monetary)) + else: + if not isinstance(val, tuple): + val = (val,) + new_val = [] + i = 0 + for perc in percents: + if perc.group()[-1]=='%': + new_val.append('%') + else: + starcount = perc.group('modifiers').count('*') + new_val.append(_format(perc.group(), + val[i], + grouping, + monetary, + *val[i+1:i+1+starcount])) + i += (1 + starcount) + val = tuple(new_val) + + return new_f % val + +def currency(val, symbol=True, grouping=False, international=False): + """Formats val according to the currency settings + in the current locale.""" + conv = localeconv() + + # check for illegal values + digits = conv[international and 'int_frac_digits' or 'frac_digits'] + if digits == 127: + raise ValueError("Currency formatting is not possible using " + "the 'C' locale.") + + s = _localize(f'{abs(val):.{digits}f}', grouping, monetary=True) + # '<' and '>' are markers if the sign must be inserted between symbol and value + s = '<' + s + '>' + + if symbol: + smb = conv[international and 'int_curr_symbol' or 'currency_symbol'] + precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes'] + separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space'] + + if precedes: + s = smb + (separated and ' ' or '') + s + else: + if international and smb[-1] == ' ': + smb = smb[:-1] + s = s + (separated and ' ' or '') + smb + + sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn'] + sign = conv[val<0 and 'negative_sign' or 'positive_sign'] + + if sign_pos == 0: + s = '(' + s + ')' + elif sign_pos == 1: + s = sign + s + elif sign_pos == 2: + s = s + sign + elif sign_pos == 3: + s = s.replace('<', sign) + elif sign_pos == 4: + s = s.replace('>', sign) + else: + # the default if nothing specified; + # this should be the most fitting sign position + s = sign + s + + return s.replace('<', '').replace('>', '') + +def str(val): + """Convert float to string, taking the locale into account.""" + return _format("%.12g", val) + +def delocalize(string): + "Parses a string as a normalized number according to the locale settings." + + conv = localeconv() + + #First, get rid of the grouping + ts = conv['thousands_sep'] + if ts: + string = string.replace(ts, '') + + #next, replace the decimal point with a dot + dd = conv['decimal_point'] + if dd: + string = string.replace(dd, '.') + return string + +def localize(string, grouping=False, monetary=False): + """Parses a string as locale number according to the locale settings.""" + return _localize(string, grouping, monetary) + +def atof(string, func=float): + "Parses a string as a float according to the locale settings." + return func(delocalize(string)) + +def atoi(string): + "Converts a string to an integer according to the locale settings." + return int(delocalize(string)) + +def _test(): + setlocale(LC_ALL, "") + #do grouping + s1 = format_string("%d", 123456789,1) + print(s1, "is", atoi(s1)) + #standard formatting + s1 = str(3.14) + print(s1, "is", atof(s1)) + +### Locale name aliasing engine + +# Author: Marc-Andre Lemburg, mal@lemburg.com +# Various tweaks by Fredrik Lundh + +# store away the low-level version of setlocale (it's +# overridden below) +_setlocale = setlocale + +def _replace_encoding(code, encoding): + if '.' in code: + langname = code[:code.index('.')] + else: + langname = code + # Convert the encoding to a C lib compatible encoding string + norm_encoding = encodings.normalize_encoding(encoding) + #print('norm encoding: %r' % norm_encoding) + norm_encoding = encodings.aliases.aliases.get(norm_encoding.lower(), + norm_encoding) + #print('aliased encoding: %r' % norm_encoding) + encoding = norm_encoding + norm_encoding = norm_encoding.lower() + if norm_encoding in locale_encoding_alias: + encoding = locale_encoding_alias[norm_encoding] + else: + norm_encoding = norm_encoding.replace('_', '') + norm_encoding = norm_encoding.replace('-', '') + if norm_encoding in locale_encoding_alias: + encoding = locale_encoding_alias[norm_encoding] + #print('found encoding %r' % encoding) + return langname + '.' + encoding + +def _append_modifier(code, modifier): + if modifier == 'euro': + if '.' not in code: + return code + '.ISO8859-15' + _, _, encoding = code.partition('.') + if encoding in ('ISO8859-15', 'UTF-8'): + return code + if encoding == 'ISO8859-1': + return _replace_encoding(code, 'ISO8859-15') + return code + '@' + modifier + +def normalize(localename): + + """ Returns a normalized locale code for the given locale + name. + + The returned locale code is formatted for use with + setlocale(). + + If normalization fails, the original name is returned + unchanged. + + If the given encoding is not known, the function defaults to + the default encoding for the locale code just like setlocale() + does. + + """ + # Normalize the locale name and extract the encoding and modifier + code = localename.lower() + if ':' in code: + # ':' is sometimes used as encoding delimiter. + code = code.replace(':', '.') + if '@' in code: + code, modifier = code.split('@', 1) + else: + modifier = '' + if '.' in code: + langname, encoding = code.split('.')[:2] + else: + langname = code + encoding = '' + + # First lookup: fullname (possibly with encoding and modifier) + lang_enc = langname + if encoding: + norm_encoding = encoding.replace('-', '') + norm_encoding = norm_encoding.replace('_', '') + lang_enc += '.' + norm_encoding + lookup_name = lang_enc + if modifier: + lookup_name += '@' + modifier + code = locale_alias.get(lookup_name, None) + if code is not None: + return code + #print('first lookup failed') + + if modifier: + # Second try: fullname without modifier (possibly with encoding) + code = locale_alias.get(lang_enc, None) + if code is not None: + #print('lookup without modifier succeeded') + if '@' not in code: + return _append_modifier(code, modifier) + if code.split('@', 1)[1].lower() == modifier: + return code + #print('second lookup failed') + + if encoding: + # Third try: langname (without encoding, possibly with modifier) + lookup_name = langname + if modifier: + lookup_name += '@' + modifier + code = locale_alias.get(lookup_name, None) + if code is not None: + #print('lookup without encoding succeeded') + if '@' not in code: + return _replace_encoding(code, encoding) + code, modifier = code.split('@', 1) + return _replace_encoding(code, encoding) + '@' + modifier + + if modifier: + # Fourth try: langname (without encoding and modifier) + code = locale_alias.get(langname, None) + if code is not None: + #print('lookup without modifier and encoding succeeded') + if '@' not in code: + code = _replace_encoding(code, encoding) + return _append_modifier(code, modifier) + code, defmod = code.split('@', 1) + if defmod.lower() == modifier: + return _replace_encoding(code, encoding) + '@' + defmod + + return localename + +def _parse_localename(localename): + + """ Parses the locale code for localename and returns the + result as tuple (language code, encoding). + + The localename is normalized and passed through the locale + alias engine. A ValueError is raised in case the locale name + cannot be parsed. + + The language code corresponds to RFC 1766. code and encoding + can be None in case the values cannot be determined or are + unknown to this implementation. + + """ + code = normalize(localename) + if '@' in code: + # Deal with locale modifiers + code, modifier = code.split('@', 1) + if modifier == 'euro' and '.' not in code: + # Assume Latin-9 for @euro locales. This is bogus, + # since some systems may use other encodings for these + # locales. Also, we ignore other modifiers. + return code, 'iso-8859-15' + + if '.' in code: + return tuple(code.split('.')[:2]) + elif code == 'C': + return None, None + elif code == 'UTF-8': + # On macOS "LC_CTYPE=UTF-8" is a valid locale setting + # for getting UTF-8 handling for text. + return None, 'UTF-8' + raise ValueError('unknown locale: %s' % localename) + +def _build_localename(localetuple): + + """ Builds a locale code from the given tuple (language code, + encoding). + + No aliasing or normalizing takes place. + + """ + try: + language, encoding = localetuple + + if language is None: + language = 'C' + if encoding is None: + return language + else: + return language + '.' + encoding + except (TypeError, ValueError): + raise TypeError('Locale must be None, a string, or an iterable of ' + 'two strings -- language code, encoding.') from None + +def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')): + + """ Tries to determine the default locale settings and returns + them as tuple (language code, encoding). + + According to POSIX, a program which has not called + setlocale(LC_ALL, "") runs using the portable 'C' locale. + Calling setlocale(LC_ALL, "") lets it use the default locale as + defined by the LANG variable. Since we don't want to interfere + with the current locale setting we thus emulate the behavior + in the way described above. + + To maintain compatibility with other platforms, not only the + LANG variable is tested, but a list of variables given as + envvars parameter. The first found to be defined will be + used. envvars defaults to the search path used in GNU gettext; + it must always contain the variable name 'LANG'. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + """ + + import warnings + warnings._deprecated( + "locale.getdefaultlocale", + "{name!r} is deprecated and slated for removal in Python {remove}. " + "Use setlocale(), getencoding() and getlocale() instead.", + remove=(3, 15)) + return _getdefaultlocale(envvars) + + +def _getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')): + try: + # check if it's supported by the _locale module + import _locale + code, encoding = _locale._getdefaultlocale() + except (ImportError, AttributeError): + pass + else: + # make sure the code/encoding values are valid + if sys.platform == "win32" and code and code[:2] == "0x": + # map windows language identifier to language name + code = windows_locale.get(int(code, 0)) + # ...add other platform-specific processing here, if + # necessary... + return code, encoding + + # fall back on POSIX behaviour + import os + lookup = os.environ.get + for variable in envvars: + localename = lookup(variable,None) + if localename: + if variable == 'LANGUAGE': + localename = localename.split(':')[0] + break + else: + localename = 'C' + return _parse_localename(localename) + + +def getlocale(category=LC_CTYPE): + + """ Returns the current setting for the given locale category as + tuple (language code, encoding). + + category may be one of the LC_* value except LC_ALL. It + defaults to LC_CTYPE. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + """ + localename = _setlocale(category) + if category == LC_ALL and ';' in localename: + raise TypeError('category LC_ALL is not supported') + return _parse_localename(localename) + +def setlocale(category, locale=None): + + """ Set the locale for the given category. The locale can be + a string, an iterable of two strings (language code and encoding), + or None. + + Iterables are converted to strings using the locale aliasing + engine. Locale strings are passed directly to the C lib. + + category may be given as one of the LC_* values. + + """ + if locale and not isinstance(locale, _builtin_str): + # convert to string + locale = normalize(_build_localename(locale)) + return _setlocale(category, locale) + + +try: + from _locale import getencoding +except ImportError: + # When _locale.getencoding() is missing, locale.getencoding() uses the + # Python filesystem encoding. + def getencoding(): + return sys.getfilesystemencoding() + + +try: + CODESET +except NameError: + def getpreferredencoding(do_setlocale=True): + """Return the charset that the user is likely using.""" + if sys.flags.warn_default_encoding: + import warnings + warnings.warn( + "UTF-8 Mode affects locale.getpreferredencoding(). Consider locale.getencoding() instead.", + EncodingWarning, 2) + if sys.flags.utf8_mode: + return 'utf-8' + return getencoding() +else: + # On Unix, if CODESET is available, use that. + def getpreferredencoding(do_setlocale=True): + """Return the charset that the user is likely using, + according to the system configuration.""" + + if sys.flags.warn_default_encoding: + import warnings + warnings.warn( + "UTF-8 Mode affects locale.getpreferredencoding(). Consider locale.getencoding() instead.", + EncodingWarning, 2) + if sys.flags.utf8_mode: + return 'utf-8' + + if not do_setlocale: + return getencoding() + + old_loc = setlocale(LC_CTYPE) + try: + try: + setlocale(LC_CTYPE, "") + except Error: + pass + return getencoding() + finally: + setlocale(LC_CTYPE, old_loc) + + +### Database +# +# The following data was extracted from the locale.alias file which +# comes with X11 and then hand edited removing the explicit encoding +# definitions and adding some more aliases. The file is usually +# available as /usr/lib/X11/locale/locale.alias. +# + +# +# The local_encoding_alias table maps lowercase encoding alias names +# to C locale encoding names (case-sensitive). Note that normalize() +# first looks up the encoding in the encodings.aliases dictionary and +# then applies this mapping to find the correct C lib name for the +# encoding. +# +locale_encoding_alias = { + + # Mappings for non-standard encoding names used in locale names + '437': 'C', + 'c': 'C', + 'en': 'ISO8859-1', + 'jis': 'JIS7', + 'jis7': 'JIS7', + 'ajec': 'eucJP', + 'koi8c': 'KOI8-C', + 'microsoftcp1251': 'CP1251', + 'microsoftcp1255': 'CP1255', + 'microsoftcp1256': 'CP1256', + '88591': 'ISO8859-1', + '88592': 'ISO8859-2', + '88595': 'ISO8859-5', + '885915': 'ISO8859-15', + + # Mappings from Python codec names to C lib encoding names + 'ascii': 'ISO8859-1', + 'latin_1': 'ISO8859-1', + 'iso8859_1': 'ISO8859-1', + 'iso8859_10': 'ISO8859-10', + 'iso8859_11': 'ISO8859-11', + 'iso8859_13': 'ISO8859-13', + 'iso8859_14': 'ISO8859-14', + 'iso8859_15': 'ISO8859-15', + 'iso8859_16': 'ISO8859-16', + 'iso8859_2': 'ISO8859-2', + 'iso8859_3': 'ISO8859-3', + 'iso8859_4': 'ISO8859-4', + 'iso8859_5': 'ISO8859-5', + 'iso8859_6': 'ISO8859-6', + 'iso8859_7': 'ISO8859-7', + 'iso8859_8': 'ISO8859-8', + 'iso8859_9': 'ISO8859-9', + 'iso2022_jp': 'JIS7', + 'shift_jis': 'SJIS', + 'tactis': 'TACTIS', + 'euc_jp': 'eucJP', + 'euc_kr': 'eucKR', + 'utf_8': 'UTF-8', + 'koi8_r': 'KOI8-R', + 'koi8_t': 'KOI8-T', + 'koi8_u': 'KOI8-U', + 'kz1048': 'RK1048', + 'cp1251': 'CP1251', + 'cp1255': 'CP1255', + 'cp1256': 'CP1256', + + # XXX This list is still incomplete. If you know more + # mappings, please file a bug report. Thanks. +} + +for k, v in sorted(locale_encoding_alias.items()): + k = k.replace('_', '') + locale_encoding_alias.setdefault(k, v) +del k, v + +# +# The locale_alias table maps lowercase alias names to C locale names +# (case-sensitive). Encodings are always separated from the locale +# name using a dot ('.'); they should only be given in case the +# language name is needed to interpret the given encoding alias +# correctly (CJK codes often have this need). +# +# Note that the normalize() function which uses this tables +# removes '_' and '-' characters from the encoding part of the +# locale name before doing the lookup. This saves a lot of +# space in the table. +# +# MAL 2004-12-10: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.4 +# and older): +# +# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' +# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1' +# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' +# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' +# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' +# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' +# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP' +# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13' +# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13' +# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' +# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' +# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11' +# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312' +# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5' +# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5' +# +# MAL 2008-05-30: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.5 +# and older): +# +# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2' +# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2' +# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' +# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2' +# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' +# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8' +# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' +# +# AP 2010-04-12: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 2.6.5 +# and older): +# +# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' +# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' +# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin' +# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' +# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin' +# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8' +# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' +# +# SS 2013-12-20: +# Updated alias mapping to most recent locale.alias file +# from X.org distribution using makelocalealias.py. +# +# These are the differences compared to the old mapping (Python 3.3.3 +# and older): +# +# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C' +# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' +# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' +# updated 'sd' -> 'sd_IN@devanagari.UTF-8' to 'sd_IN.UTF-8' +# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8' +# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin' +# +# SS 2014-10-01: +# Updated alias mapping with glibc 2.19 supported locales. +# +# SS 2018-05-05: +# Updated alias mapping with glibc 2.27 supported locales. +# +# These are the differences compared to the old mapping (Python 3.6.5 +# and older): +# +# updated 'ca_es@valencia' -> 'ca_ES.ISO8859-15@valencia' to 'ca_ES.UTF-8@valencia' +# updated 'kk_kz' -> 'kk_KZ.RK1048' to 'kk_KZ.ptcp154' +# updated 'russian' -> 'ru_RU.ISO8859-5' to 'ru_RU.KOI8-R' +# +# SS 2025-02-04: +# Updated alias mapping with glibc 2.41 supported locales and the latest +# X lib alias mapping. +# +# These are the differences compared to the old mapping (Python 3.13.1 +# and older): +# +# updated 'c.utf8' -> 'C.UTF-8' to 'en_US.UTF-8' +# updated 'de_it' -> 'de_IT.ISO8859-1' to 'de_IT.UTF-8' +# removed 'de_li.utf8' +# updated 'en_il' -> 'en_IL.UTF-8' to 'en_IL.ISO8859-1' +# removed 'english.iso88591' +# updated 'es_cu' -> 'es_CU.UTF-8' to 'es_CU.ISO8859-1' +# updated 'russian' -> 'ru_RU.KOI8-R' to 'ru_RU.ISO8859-5' +# updated 'sr@latn' -> 'sr_CS.UTF-8@latin' to 'sr_RS.UTF-8@latin' +# removed 'univ' +# removed 'universal' +# +# SS 2025-06-10: +# Remove 'c.utf8' -> 'en_US.UTF-8' because 'en_US.UTF-8' does not exist +# on all platforms. + +locale_alias = { + 'a3': 'az_AZ.KOI8-C', + 'a3_az': 'az_AZ.KOI8-C', + 'a3_az.koic': 'az_AZ.KOI8-C', + 'aa_dj': 'aa_DJ.ISO8859-1', + 'aa_er': 'aa_ER.UTF-8', + 'aa_et': 'aa_ET.UTF-8', + 'af': 'af_ZA.ISO8859-1', + 'af_za': 'af_ZA.ISO8859-1', + 'agr_pe': 'agr_PE.UTF-8', + 'ak_gh': 'ak_GH.UTF-8', + 'am': 'am_ET.UTF-8', + 'am_et': 'am_ET.UTF-8', + 'american': 'en_US.ISO8859-1', + 'an_es': 'an_ES.ISO8859-15', + 'anp_in': 'anp_IN.UTF-8', + 'ar': 'ar_AA.ISO8859-6', + 'ar_aa': 'ar_AA.ISO8859-6', + 'ar_ae': 'ar_AE.ISO8859-6', + 'ar_bh': 'ar_BH.ISO8859-6', + 'ar_dz': 'ar_DZ.ISO8859-6', + 'ar_eg': 'ar_EG.ISO8859-6', + 'ar_in': 'ar_IN.UTF-8', + 'ar_iq': 'ar_IQ.ISO8859-6', + 'ar_jo': 'ar_JO.ISO8859-6', + 'ar_kw': 'ar_KW.ISO8859-6', + 'ar_lb': 'ar_LB.ISO8859-6', + 'ar_ly': 'ar_LY.ISO8859-6', + 'ar_ma': 'ar_MA.ISO8859-6', + 'ar_om': 'ar_OM.ISO8859-6', + 'ar_qa': 'ar_QA.ISO8859-6', + 'ar_sa': 'ar_SA.ISO8859-6', + 'ar_sd': 'ar_SD.ISO8859-6', + 'ar_ss': 'ar_SS.UTF-8', + 'ar_sy': 'ar_SY.ISO8859-6', + 'ar_tn': 'ar_TN.ISO8859-6', + 'ar_ye': 'ar_YE.ISO8859-6', + 'arabic': 'ar_AA.ISO8859-6', + 'as': 'as_IN.UTF-8', + 'as_in': 'as_IN.UTF-8', + 'ast_es': 'ast_ES.ISO8859-15', + 'ayc_pe': 'ayc_PE.UTF-8', + 'az': 'az_AZ.ISO8859-9E', + 'az_az': 'az_AZ.ISO8859-9E', + 'az_az.iso88599e': 'az_AZ.ISO8859-9E', + 'az_ir': 'az_IR.UTF-8', + 'be': 'be_BY.CP1251', + 'be@latin': 'be_BY.UTF-8@latin', + 'be_bg.utf8': 'bg_BG.UTF-8', + 'be_by': 'be_BY.CP1251', + 'be_by@latin': 'be_BY.UTF-8@latin', + 'bem_zm': 'bem_ZM.UTF-8', + 'ber_dz': 'ber_DZ.UTF-8', + 'ber_ma': 'ber_MA.UTF-8', + 'bg': 'bg_BG.CP1251', + 'bg_bg': 'bg_BG.CP1251', + 'bhb_in.utf8': 'bhb_IN.UTF-8', + 'bho_in': 'bho_IN.UTF-8', + 'bho_np': 'bho_NP.UTF-8', + 'bi_vu': 'bi_VU.UTF-8', + 'bn_bd': 'bn_BD.UTF-8', + 'bn_in': 'bn_IN.UTF-8', + 'bo_cn': 'bo_CN.UTF-8', + 'bo_in': 'bo_IN.UTF-8', + 'bokmal': 'nb_NO.ISO8859-1', + 'bokm\xe5l': 'nb_NO.ISO8859-1', + 'br': 'br_FR.ISO8859-1', + 'br_fr': 'br_FR.ISO8859-1', + 'brx_in': 'brx_IN.UTF-8', + 'bs': 'bs_BA.ISO8859-2', + 'bs_ba': 'bs_BA.ISO8859-2', + 'bulgarian': 'bg_BG.CP1251', + 'byn_er': 'byn_ER.UTF-8', + 'c': 'C', + 'c-french': 'fr_CA.ISO8859-1', + 'c.ascii': 'C', + 'c.en': 'C', + 'c.iso88591': 'en_US.ISO8859-1', + 'c_c': 'C', + 'c_c.c': 'C', + 'ca': 'ca_ES.ISO8859-1', + 'ca_ad': 'ca_AD.ISO8859-1', + 'ca_es': 'ca_ES.ISO8859-1', + 'ca_es@valencia': 'ca_ES.UTF-8@valencia', + 'ca_fr': 'ca_FR.ISO8859-1', + 'ca_it': 'ca_IT.ISO8859-1', + 'catalan': 'ca_ES.ISO8859-1', + 'ce_ru': 'ce_RU.UTF-8', + 'cextend': 'en_US.ISO8859-1', + 'chinese-s': 'zh_CN.eucCN', + 'chinese-t': 'zh_TW.eucTW', + 'chr_us': 'chr_US.UTF-8', + 'ckb_iq': 'ckb_IQ.UTF-8', + 'cmn_tw': 'cmn_TW.UTF-8', + 'crh_ru': 'crh_RU.UTF-8', + 'crh_ua': 'crh_UA.UTF-8', + 'croatian': 'hr_HR.ISO8859-2', + 'cs': 'cs_CZ.ISO8859-2', + 'cs_cs': 'cs_CZ.ISO8859-2', + 'cs_cz': 'cs_CZ.ISO8859-2', + 'csb_pl': 'csb_PL.UTF-8', + 'cv_ru': 'cv_RU.UTF-8', + 'cy': 'cy_GB.ISO8859-1', + 'cy_gb': 'cy_GB.ISO8859-1', + 'cz': 'cs_CZ.ISO8859-2', + 'cz_cz': 'cs_CZ.ISO8859-2', + 'czech': 'cs_CZ.ISO8859-2', + 'da': 'da_DK.ISO8859-1', + 'da_dk': 'da_DK.ISO8859-1', + 'danish': 'da_DK.ISO8859-1', + 'dansk': 'da_DK.ISO8859-1', + 'de': 'de_DE.ISO8859-1', + 'de_at': 'de_AT.ISO8859-1', + 'de_be': 'de_BE.ISO8859-1', + 'de_ch': 'de_CH.ISO8859-1', + 'de_de': 'de_DE.ISO8859-1', + 'de_it': 'de_IT.UTF-8', + 'de_li': 'de_LI.ISO8859-1', + 'de_lu': 'de_LU.ISO8859-1', + 'deutsch': 'de_DE.ISO8859-1', + 'doi_in': 'doi_IN.UTF-8', + 'dsb_de': 'dsb_DE.UTF-8', + 'dutch': 'nl_NL.ISO8859-1', + 'dutch.iso88591': 'nl_BE.ISO8859-1', + 'dv_mv': 'dv_MV.UTF-8', + 'dz_bt': 'dz_BT.UTF-8', + 'ee': 'ee_EE.ISO8859-4', + 'ee_ee': 'ee_EE.ISO8859-4', + 'eesti': 'et_EE.ISO8859-1', + 'el': 'el_GR.ISO8859-7', + 'el_cy': 'el_CY.ISO8859-7', + 'el_gr': 'el_GR.ISO8859-7', + 'el_gr@euro': 'el_GR.ISO8859-15', + 'en': 'en_US.ISO8859-1', + 'en_ag': 'en_AG.UTF-8', + 'en_au': 'en_AU.ISO8859-1', + 'en_be': 'en_BE.ISO8859-1', + 'en_bw': 'en_BW.ISO8859-1', + 'en_ca': 'en_CA.ISO8859-1', + 'en_dk': 'en_DK.ISO8859-1', + 'en_dl.utf8': 'en_DL.UTF-8', + 'en_gb': 'en_GB.ISO8859-1', + 'en_hk': 'en_HK.ISO8859-1', + 'en_ie': 'en_IE.ISO8859-1', + 'en_il': 'en_IL.ISO8859-1', + 'en_in': 'en_IN.ISO8859-1', + 'en_ng': 'en_NG.UTF-8', + 'en_nz': 'en_NZ.ISO8859-1', + 'en_ph': 'en_PH.ISO8859-1', + 'en_sc.utf8': 'en_SC.UTF-8', + 'en_sg': 'en_SG.ISO8859-1', + 'en_uk': 'en_GB.ISO8859-1', + 'en_us': 'en_US.ISO8859-1', + 'en_us@euro@euro': 'en_US.ISO8859-15', + 'en_za': 'en_ZA.ISO8859-1', + 'en_zm': 'en_ZM.UTF-8', + 'en_zw': 'en_ZW.ISO8859-1', + 'en_zw.utf8': 'en_ZS.UTF-8', + 'eng_gb': 'en_GB.ISO8859-1', + 'english': 'en_EN.ISO8859-1', + 'english_uk': 'en_GB.ISO8859-1', + 'english_united-states': 'en_US.ISO8859-1', + 'english_united-states.437': 'C', + 'english_us': 'en_US.ISO8859-1', + 'eo': 'eo_XX.ISO8859-3', + 'eo.utf8': 'eo.UTF-8', + 'eo_eo': 'eo_EO.ISO8859-3', + 'eo_us.utf8': 'eo_US.UTF-8', + 'eo_xx': 'eo_XX.ISO8859-3', + 'es': 'es_ES.ISO8859-1', + 'es_ar': 'es_AR.ISO8859-1', + 'es_bo': 'es_BO.ISO8859-1', + 'es_cl': 'es_CL.ISO8859-1', + 'es_co': 'es_CO.ISO8859-1', + 'es_cr': 'es_CR.ISO8859-1', + 'es_cu': 'es_CU.ISO8859-1', + 'es_do': 'es_DO.ISO8859-1', + 'es_ec': 'es_EC.ISO8859-1', + 'es_es': 'es_ES.ISO8859-1', + 'es_gt': 'es_GT.ISO8859-1', + 'es_hn': 'es_HN.ISO8859-1', + 'es_mx': 'es_MX.ISO8859-1', + 'es_ni': 'es_NI.ISO8859-1', + 'es_pa': 'es_PA.ISO8859-1', + 'es_pe': 'es_PE.ISO8859-1', + 'es_pr': 'es_PR.ISO8859-1', + 'es_py': 'es_PY.ISO8859-1', + 'es_sv': 'es_SV.ISO8859-1', + 'es_us': 'es_US.ISO8859-1', + 'es_uy': 'es_UY.ISO8859-1', + 'es_ve': 'es_VE.ISO8859-1', + 'estonian': 'et_EE.ISO8859-1', + 'et': 'et_EE.ISO8859-15', + 'et_ee': 'et_EE.ISO8859-15', + 'eu': 'eu_ES.ISO8859-1', + 'eu_es': 'eu_ES.ISO8859-1', + 'eu_fr': 'eu_FR.ISO8859-1', + 'fa': 'fa_IR.UTF-8', + 'fa_ir': 'fa_IR.UTF-8', + 'fa_ir.isiri3342': 'fa_IR.ISIRI-3342', + 'ff_sn': 'ff_SN.UTF-8', + 'fi': 'fi_FI.ISO8859-15', + 'fi_fi': 'fi_FI.ISO8859-15', + 'fil_ph': 'fil_PH.UTF-8', + 'finnish': 'fi_FI.ISO8859-1', + 'fo': 'fo_FO.ISO8859-1', + 'fo_fo': 'fo_FO.ISO8859-1', + 'fr': 'fr_FR.ISO8859-1', + 'fr_be': 'fr_BE.ISO8859-1', + 'fr_ca': 'fr_CA.ISO8859-1', + 'fr_ch': 'fr_CH.ISO8859-1', + 'fr_fr': 'fr_FR.ISO8859-1', + 'fr_lu': 'fr_LU.ISO8859-1', + 'fran\xe7ais': 'fr_FR.ISO8859-1', + 'fre_fr': 'fr_FR.ISO8859-1', + 'french': 'fr_FR.ISO8859-1', + 'french.iso88591': 'fr_CH.ISO8859-1', + 'french_france': 'fr_FR.ISO8859-1', + 'fur_it': 'fur_IT.UTF-8', + 'fy_de': 'fy_DE.UTF-8', + 'fy_nl': 'fy_NL.UTF-8', + 'ga': 'ga_IE.ISO8859-1', + 'ga_ie': 'ga_IE.ISO8859-1', + 'galego': 'gl_ES.ISO8859-1', + 'galician': 'gl_ES.ISO8859-1', + 'gbm_in': 'gbm_IN.UTF-8', + 'gd': 'gd_GB.ISO8859-1', + 'gd_gb': 'gd_GB.ISO8859-1', + 'ger_de': 'de_DE.ISO8859-1', + 'german': 'de_DE.ISO8859-1', + 'german.iso88591': 'de_CH.ISO8859-1', + 'german_germany': 'de_DE.ISO8859-1', + 'gez_er': 'gez_ER.UTF-8', + 'gez_et': 'gez_ET.UTF-8', + 'gl': 'gl_ES.ISO8859-1', + 'gl_es': 'gl_ES.ISO8859-1', + 'greek': 'el_GR.ISO8859-7', + 'gu_in': 'gu_IN.UTF-8', + 'gv': 'gv_GB.ISO8859-1', + 'gv_gb': 'gv_GB.ISO8859-1', + 'ha_ng': 'ha_NG.UTF-8', + 'hak_tw': 'hak_TW.UTF-8', + 'he': 'he_IL.ISO8859-8', + 'he_il': 'he_IL.ISO8859-8', + 'hebrew': 'he_IL.ISO8859-8', + 'hi': 'hi_IN.ISCII-DEV', + 'hi_in': 'hi_IN.ISCII-DEV', + 'hi_in.isciidev': 'hi_IN.ISCII-DEV', + 'hif_fj': 'hif_FJ.UTF-8', + 'hne': 'hne_IN.UTF-8', + 'hne_in': 'hne_IN.UTF-8', + 'hr': 'hr_HR.ISO8859-2', + 'hr_hr': 'hr_HR.ISO8859-2', + 'hrvatski': 'hr_HR.ISO8859-2', + 'hsb_de': 'hsb_DE.ISO8859-2', + 'ht_ht': 'ht_HT.UTF-8', + 'hu': 'hu_HU.ISO8859-2', + 'hu_hu': 'hu_HU.ISO8859-2', + 'hungarian': 'hu_HU.ISO8859-2', + 'hy_am': 'hy_AM.UTF-8', + 'hy_am.armscii8': 'hy_AM.ARMSCII_8', + 'ia': 'ia.UTF-8', + 'ia_fr': 'ia_FR.UTF-8', + 'icelandic': 'is_IS.ISO8859-1', + 'id': 'id_ID.ISO8859-1', + 'id_id': 'id_ID.ISO8859-1', + 'ie': 'ie.UTF-8', + 'ig_ng': 'ig_NG.UTF-8', + 'ik_ca': 'ik_CA.UTF-8', + 'in': 'id_ID.ISO8859-1', + 'in_id': 'id_ID.ISO8859-1', + 'is': 'is_IS.ISO8859-1', + 'is_is': 'is_IS.ISO8859-1', + 'iso-8859-1': 'en_US.ISO8859-1', + 'iso-8859-15': 'en_US.ISO8859-15', + 'iso8859-1': 'en_US.ISO8859-1', + 'iso8859-15': 'en_US.ISO8859-15', + 'iso_8859_1': 'en_US.ISO8859-1', + 'iso_8859_15': 'en_US.ISO8859-15', + 'it': 'it_IT.ISO8859-1', + 'it_ch': 'it_CH.ISO8859-1', + 'it_it': 'it_IT.ISO8859-1', + 'italian': 'it_IT.ISO8859-1', + 'iu': 'iu_CA.NUNACOM-8', + 'iu_ca': 'iu_CA.NUNACOM-8', + 'iu_ca.nunacom8': 'iu_CA.NUNACOM-8', + 'iw': 'he_IL.ISO8859-8', + 'iw_il': 'he_IL.ISO8859-8', + 'iw_il.utf8': 'iw_IL.UTF-8', + 'ja': 'ja_JP.eucJP', + 'ja_jp': 'ja_JP.eucJP', + 'ja_jp.euc': 'ja_JP.eucJP', + 'ja_jp.mscode': 'ja_JP.SJIS', + 'ja_jp.pck': 'ja_JP.SJIS', + 'japan': 'ja_JP.eucJP', + 'japanese': 'ja_JP.eucJP', + 'japanese-euc': 'ja_JP.eucJP', + 'japanese.euc': 'ja_JP.eucJP', + 'jp_jp': 'ja_JP.eucJP', + 'ka': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY', + 'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS', + 'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY', + 'kab_dz': 'kab_DZ.UTF-8', + 'kk_kz': 'kk_KZ.ptcp154', + 'kl': 'kl_GL.ISO8859-1', + 'kl_gl': 'kl_GL.ISO8859-1', + 'km_kh': 'km_KH.UTF-8', + 'kn': 'kn_IN.UTF-8', + 'kn_in': 'kn_IN.UTF-8', + 'ko': 'ko_KR.eucKR', + 'ko_kr': 'ko_KR.eucKR', + 'ko_kr.euc': 'ko_KR.eucKR', + 'kok_in': 'kok_IN.UTF-8', + 'korean': 'ko_KR.eucKR', + 'korean.euc': 'ko_KR.eucKR', + 'ks': 'ks_IN.UTF-8', + 'ks_in': 'ks_IN.UTF-8', + 'ks_in@devanagari.utf8': 'ks_IN.UTF-8@devanagari', + 'ku_tr': 'ku_TR.ISO8859-9', + 'kv_ru': 'kv_RU.UTF-8', + 'kw': 'kw_GB.ISO8859-1', + 'kw_gb': 'kw_GB.ISO8859-1', + 'ky': 'ky_KG.UTF-8', + 'ky_kg': 'ky_KG.UTF-8', + 'lb_lu': 'lb_LU.UTF-8', + 'lg_ug': 'lg_UG.ISO8859-10', + 'li_be': 'li_BE.UTF-8', + 'li_nl': 'li_NL.UTF-8', + 'lij_it': 'lij_IT.UTF-8', + 'lithuanian': 'lt_LT.ISO8859-13', + 'ln_cd': 'ln_CD.UTF-8', + 'lo': 'lo_LA.MULELAO-1', + 'lo_la': 'lo_LA.MULELAO-1', + 'lo_la.cp1133': 'lo_LA.IBM-CP1133', + 'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133', + 'lo_la.mulelao1': 'lo_LA.MULELAO-1', + 'lt': 'lt_LT.ISO8859-13', + 'lt_lt': 'lt_LT.ISO8859-13', + 'ltg_lv.utf8': 'ltg_LV.UTF-8', + 'lv': 'lv_LV.ISO8859-13', + 'lv_lv': 'lv_LV.ISO8859-13', + 'lzh_tw': 'lzh_TW.UTF-8', + 'mag_in': 'mag_IN.UTF-8', + 'mai': 'mai_IN.UTF-8', + 'mai_in': 'mai_IN.UTF-8', + 'mai_np': 'mai_NP.UTF-8', + 'mdf_ru': 'mdf_RU.UTF-8', + 'mfe_mu': 'mfe_MU.UTF-8', + 'mg_mg': 'mg_MG.ISO8859-15', + 'mhr_ru': 'mhr_RU.UTF-8', + 'mi': 'mi_NZ.ISO8859-1', + 'mi_nz': 'mi_NZ.ISO8859-1', + 'miq_ni': 'miq_NI.UTF-8', + 'mjw_in': 'mjw_IN.UTF-8', + 'mk': 'mk_MK.ISO8859-5', + 'mk_mk': 'mk_MK.ISO8859-5', + 'ml': 'ml_IN.UTF-8', + 'ml_in': 'ml_IN.UTF-8', + 'mn_mn': 'mn_MN.UTF-8', + 'mni_in': 'mni_IN.UTF-8', + 'mnw_mm': 'mnw_MM.UTF-8', + 'mr': 'mr_IN.UTF-8', + 'mr_in': 'mr_IN.UTF-8', + 'ms': 'ms_MY.ISO8859-1', + 'ms_my': 'ms_MY.ISO8859-1', + 'mt': 'mt_MT.ISO8859-3', + 'mt_mt': 'mt_MT.ISO8859-3', + 'my_mm': 'my_MM.UTF-8', + 'nan_tw': 'nan_TW.UTF-8', + 'nb': 'nb_NO.ISO8859-1', + 'nb_no': 'nb_NO.ISO8859-1', + 'nds_de': 'nds_DE.UTF-8', + 'nds_nl': 'nds_NL.UTF-8', + 'ne_np': 'ne_NP.UTF-8', + 'nhn_mx': 'nhn_MX.UTF-8', + 'niu_nu': 'niu_NU.UTF-8', + 'niu_nz': 'niu_NZ.UTF-8', + 'nl': 'nl_NL.ISO8859-1', + 'nl_aw': 'nl_AW.UTF-8', + 'nl_be': 'nl_BE.ISO8859-1', + 'nl_nl': 'nl_NL.ISO8859-1', + 'nn': 'nn_NO.ISO8859-1', + 'nn_no': 'nn_NO.ISO8859-1', + 'no': 'no_NO.ISO8859-1', + 'no@nynorsk': 'ny_NO.ISO8859-1', + 'no_no': 'no_NO.ISO8859-1', + 'no_no.iso88591@bokmal': 'no_NO.ISO8859-1', + 'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1', + 'norwegian': 'no_NO.ISO8859-1', + 'nr': 'nr_ZA.ISO8859-1', + 'nr_za': 'nr_ZA.ISO8859-1', + 'nso': 'nso_ZA.ISO8859-15', + 'nso_za': 'nso_ZA.ISO8859-15', + 'ny': 'ny_NO.ISO8859-1', + 'ny_no': 'ny_NO.ISO8859-1', + 'nynorsk': 'nn_NO.ISO8859-1', + 'oc': 'oc_FR.ISO8859-1', + 'oc_fr': 'oc_FR.ISO8859-1', + 'om_et': 'om_ET.UTF-8', + 'om_ke': 'om_KE.ISO8859-1', + 'or': 'or_IN.UTF-8', + 'or_in': 'or_IN.UTF-8', + 'os_ru': 'os_RU.UTF-8', + 'pa': 'pa_IN.UTF-8', + 'pa_in': 'pa_IN.UTF-8', + 'pa_pk': 'pa_PK.UTF-8', + 'pap_an': 'pap_AN.UTF-8', + 'pap_aw': 'pap_AW.UTF-8', + 'pap_cw': 'pap_CW.UTF-8', + 'pd': 'pd_US.ISO8859-1', + 'pd_de': 'pd_DE.ISO8859-1', + 'pd_us': 'pd_US.ISO8859-1', + 'ph': 'ph_PH.ISO8859-1', + 'ph_ph': 'ph_PH.ISO8859-1', + 'pl': 'pl_PL.ISO8859-2', + 'pl_pl': 'pl_PL.ISO8859-2', + 'polish': 'pl_PL.ISO8859-2', + 'portuguese': 'pt_PT.ISO8859-1', + 'portuguese_brazil': 'pt_BR.ISO8859-1', + 'posix': 'C', + 'posix-utf2': 'C', + 'pp': 'pp_AN.ISO8859-1', + 'pp_an': 'pp_AN.ISO8859-1', + 'ps_af': 'ps_AF.UTF-8', + 'pt': 'pt_PT.ISO8859-1', + 'pt_br': 'pt_BR.ISO8859-1', + 'pt_pt': 'pt_PT.ISO8859-1', + 'quz_pe': 'quz_PE.UTF-8', + 'raj_in': 'raj_IN.UTF-8', + 'rif_ma': 'rif_MA.UTF-8', + 'ro': 'ro_RO.ISO8859-2', + 'ro_ro': 'ro_RO.ISO8859-2', + 'romanian': 'ro_RO.ISO8859-2', + 'ru': 'ru_RU.UTF-8', + 'ru_ru': 'ru_RU.UTF-8', + 'ru_ua': 'ru_UA.KOI8-U', + 'rumanian': 'ro_RO.ISO8859-2', + 'russian': 'ru_RU.ISO8859-5', + 'rw': 'rw_RW.ISO8859-1', + 'rw_rw': 'rw_RW.ISO8859-1', + 'sa_in': 'sa_IN.UTF-8', + 'sah_ru': 'sah_RU.UTF-8', + 'sat_in': 'sat_IN.UTF-8', + 'sc_it': 'sc_IT.UTF-8', + 'scn_it': 'scn_IT.UTF-8', + 'sd': 'sd_IN.UTF-8', + 'sd_in': 'sd_IN.UTF-8', + 'sd_in@devanagari.utf8': 'sd_IN.UTF-8@devanagari', + 'sd_pk': 'sd_PK.UTF-8', + 'se_no': 'se_NO.UTF-8', + 'serbocroatian': 'sr_RS.UTF-8@latin', + 'sgs_lt': 'sgs_LT.UTF-8', + 'sh': 'sr_RS.UTF-8@latin', + 'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2', + 'sh_hr': 'sh_HR.ISO8859-2', + 'sh_hr.iso88592': 'hr_HR.ISO8859-2', + 'sh_sp': 'sr_CS.ISO8859-2', + 'sh_yu': 'sr_RS.UTF-8@latin', + 'shn_mm': 'shn_MM.UTF-8', + 'shs_ca': 'shs_CA.UTF-8', + 'si': 'si_LK.UTF-8', + 'si_lk': 'si_LK.UTF-8', + 'sid_et': 'sid_ET.UTF-8', + 'sinhala': 'si_LK.UTF-8', + 'sk': 'sk_SK.ISO8859-2', + 'sk_sk': 'sk_SK.ISO8859-2', + 'sl': 'sl_SI.ISO8859-2', + 'sl_cs': 'sl_CS.ISO8859-2', + 'sl_si': 'sl_SI.ISO8859-2', + 'slovak': 'sk_SK.ISO8859-2', + 'slovene': 'sl_SI.ISO8859-2', + 'slovenian': 'sl_SI.ISO8859-2', + 'sm_ws': 'sm_WS.UTF-8', + 'so_dj': 'so_DJ.ISO8859-1', + 'so_et': 'so_ET.UTF-8', + 'so_ke': 'so_KE.ISO8859-1', + 'so_so': 'so_SO.ISO8859-1', + 'sp': 'sr_CS.ISO8859-5', + 'sp_yu': 'sr_CS.ISO8859-5', + 'spanish': 'es_ES.ISO8859-1', + 'spanish_spain': 'es_ES.ISO8859-1', + 'sq': 'sq_AL.ISO8859-2', + 'sq_al': 'sq_AL.ISO8859-2', + 'sq_mk': 'sq_MK.UTF-8', + 'sr': 'sr_RS.UTF-8', + 'sr@cyrillic': 'sr_RS.UTF-8', + 'sr@latn': 'sr_RS.UTF-8@latin', + 'sr_cs': 'sr_CS.UTF-8', + 'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2', + 'sr_cs@latn': 'sr_CS.UTF-8@latin', + 'sr_me': 'sr_ME.UTF-8', + 'sr_rs': 'sr_RS.UTF-8', + 'sr_rs@latn': 'sr_RS.UTF-8@latin', + 'sr_sp': 'sr_CS.ISO8859-2', + 'sr_yu': 'sr_RS.UTF-8@latin', + 'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251', + 'sr_yu.iso88592': 'sr_CS.ISO8859-2', + 'sr_yu.iso88595': 'sr_CS.ISO8859-5', + 'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5', + 'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251', + 'sr_yu.utf8': 'sr_RS.UTF-8', + 'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8', + 'sr_yu@cyrillic': 'sr_RS.UTF-8', + 'ss': 'ss_ZA.ISO8859-1', + 'ss_za': 'ss_ZA.ISO8859-1', + 'ssy_er': 'ssy_ER.UTF-8', + 'st': 'st_ZA.ISO8859-1', + 'st_za': 'st_ZA.ISO8859-1', + 'su_id': 'su_ID.UTF-8', + 'sv': 'sv_SE.ISO8859-1', + 'sv_fi': 'sv_FI.ISO8859-1', + 'sv_se': 'sv_SE.ISO8859-1', + 'sw_ke': 'sw_KE.UTF-8', + 'sw_tz': 'sw_TZ.UTF-8', + 'swedish': 'sv_SE.ISO8859-1', + 'syr': 'syr.UTF-8', + 'szl_pl': 'szl_PL.UTF-8', + 'ta': 'ta_IN.TSCII-0', + 'ta_in': 'ta_IN.TSCII-0', + 'ta_in.tscii': 'ta_IN.TSCII-0', + 'ta_in.tscii0': 'ta_IN.TSCII-0', + 'ta_lk': 'ta_LK.UTF-8', + 'tcy_in.utf8': 'tcy_IN.UTF-8', + 'te': 'te_IN.UTF-8', + 'te_in': 'te_IN.UTF-8', + 'tg': 'tg_TJ.KOI8-C', + 'tg_tj': 'tg_TJ.KOI8-C', + 'th': 'th_TH.ISO8859-11', + 'th_th': 'th_TH.ISO8859-11', + 'th_th.tactis': 'th_TH.TIS620', + 'th_th.tis620': 'th_TH.TIS620', + 'thai': 'th_TH.ISO8859-11', + 'the_np': 'the_NP.UTF-8', + 'ti_er': 'ti_ER.UTF-8', + 'ti_et': 'ti_ET.UTF-8', + 'tig_er': 'tig_ER.UTF-8', + 'tk_tm': 'tk_TM.UTF-8', + 'tl': 'tl_PH.ISO8859-1', + 'tl_ph': 'tl_PH.ISO8859-1', + 'tn': 'tn_ZA.ISO8859-15', + 'tn_za': 'tn_ZA.ISO8859-15', + 'to_to': 'to_TO.UTF-8', + 'tok': 'tok.UTF-8', + 'tpi_pg': 'tpi_PG.UTF-8', + 'tr': 'tr_TR.ISO8859-9', + 'tr_cy': 'tr_CY.ISO8859-9', + 'tr_tr': 'tr_TR.ISO8859-9', + 'ts': 'ts_ZA.ISO8859-1', + 'ts_za': 'ts_ZA.ISO8859-1', + 'tt': 'tt_RU.TATAR-CYR', + 'tt_ru': 'tt_RU.TATAR-CYR', + 'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR', + 'tt_ru@iqtelif': 'tt_RU.UTF-8@iqtelif', + 'turkish': 'tr_TR.ISO8859-9', + 'ug_cn': 'ug_CN.UTF-8', + 'uk': 'uk_UA.KOI8-U', + 'uk_ua': 'uk_UA.KOI8-U', + 'univ.utf8': 'en_US.UTF-8', + 'universal.utf8@ucs4': 'en_US.UTF-8', + 'unm_us': 'unm_US.UTF-8', + 'ur': 'ur_PK.CP1256', + 'ur_in': 'ur_IN.UTF-8', + 'ur_pk': 'ur_PK.CP1256', + 'uz': 'uz_UZ.UTF-8', + 'uz_uz': 'uz_UZ.UTF-8', + 'uz_uz@cyrillic': 'uz_UZ.UTF-8', + 've': 've_ZA.UTF-8', + 've_za': 've_ZA.UTF-8', + 'vi': 'vi_VN.TCVN', + 'vi_vn': 'vi_VN.TCVN', + 'vi_vn.tcvn': 'vi_VN.TCVN', + 'vi_vn.tcvn5712': 'vi_VN.TCVN', + 'vi_vn.viscii': 'vi_VN.VISCII', + 'vi_vn.viscii111': 'vi_VN.VISCII', + 'wa': 'wa_BE.ISO8859-1', + 'wa_be': 'wa_BE.ISO8859-1', + 'wae_ch': 'wae_CH.UTF-8', + 'wal_et': 'wal_ET.UTF-8', + 'wo_sn': 'wo_SN.UTF-8', + 'xh': 'xh_ZA.ISO8859-1', + 'xh_za': 'xh_ZA.ISO8859-1', + 'yi': 'yi_US.CP1255', + 'yi_us': 'yi_US.CP1255', + 'yo_ng': 'yo_NG.UTF-8', + 'yue_hk': 'yue_HK.UTF-8', + 'yuw_pg': 'yuw_PG.UTF-8', + 'zgh_ma': 'zgh_MA.UTF-8', + 'zh': 'zh_CN.eucCN', + 'zh_cn': 'zh_CN.gb2312', + 'zh_cn.big5': 'zh_TW.big5', + 'zh_cn.euc': 'zh_CN.eucCN', + 'zh_hk': 'zh_HK.big5hkscs', + 'zh_hk.big5hk': 'zh_HK.big5hkscs', + 'zh_sg': 'zh_SG.GB2312', + 'zh_sg.gbk': 'zh_SG.GBK', + 'zh_tw': 'zh_TW.big5', + 'zh_tw.euc': 'zh_TW.eucTW', + 'zh_tw.euctw': 'zh_TW.eucTW', + 'zu': 'zu_ZA.ISO8859-1', + 'zu_za': 'zu_ZA.ISO8859-1', +} + +# +# This maps Windows language identifiers to locale strings. +# +# This list has been updated from +# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp +# to include every locale up to Windows Vista. +# +# NOTE: this mapping is incomplete. If your language is missing, please +# submit a bug report as detailed in the Python devguide at: +# https://devguide.python.org/triage/issue-tracker/ +# Make sure you include the missing language identifier and the suggested +# locale code. +# + +windows_locale = { + 0x0436: "af_ZA", # Afrikaans + 0x041c: "sq_AL", # Albanian + 0x0484: "gsw_FR",# Alsatian - France + 0x045e: "am_ET", # Amharic - Ethiopia + 0x0401: "ar_SA", # Arabic - Saudi Arabia + 0x0801: "ar_IQ", # Arabic - Iraq + 0x0c01: "ar_EG", # Arabic - Egypt + 0x1001: "ar_LY", # Arabic - Libya + 0x1401: "ar_DZ", # Arabic - Algeria + 0x1801: "ar_MA", # Arabic - Morocco + 0x1c01: "ar_TN", # Arabic - Tunisia + 0x2001: "ar_OM", # Arabic - Oman + 0x2401: "ar_YE", # Arabic - Yemen + 0x2801: "ar_SY", # Arabic - Syria + 0x2c01: "ar_JO", # Arabic - Jordan + 0x3001: "ar_LB", # Arabic - Lebanon + 0x3401: "ar_KW", # Arabic - Kuwait + 0x3801: "ar_AE", # Arabic - United Arab Emirates + 0x3c01: "ar_BH", # Arabic - Bahrain + 0x4001: "ar_QA", # Arabic - Qatar + 0x042b: "hy_AM", # Armenian + 0x044d: "as_IN", # Assamese - India + 0x042c: "az_AZ", # Azeri - Latin + 0x082c: "az_AZ", # Azeri - Cyrillic + 0x046d: "ba_RU", # Bashkir + 0x042d: "eu_ES", # Basque - Russia + 0x0423: "be_BY", # Belarusian + 0x0445: "bn_IN", # Begali + 0x201a: "bs_BA", # Bosnian - Cyrillic + 0x141a: "bs_BA", # Bosnian - Latin + 0x047e: "br_FR", # Breton - France + 0x0402: "bg_BG", # Bulgarian +# 0x0455: "my_MM", # Burmese - Not supported + 0x0403: "ca_ES", # Catalan + 0x0004: "zh_CHS",# Chinese - Simplified + 0x0404: "zh_TW", # Chinese - Taiwan + 0x0804: "zh_CN", # Chinese - PRC + 0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R. + 0x1004: "zh_SG", # Chinese - Singapore + 0x1404: "zh_MO", # Chinese - Macao S.A.R. + 0x7c04: "zh_CHT",# Chinese - Traditional + 0x0483: "co_FR", # Corsican - France + 0x041a: "hr_HR", # Croatian + 0x101a: "hr_BA", # Croatian - Bosnia + 0x0405: "cs_CZ", # Czech + 0x0406: "da_DK", # Danish + 0x048c: "gbz_AF",# Dari - Afghanistan + 0x0465: "div_MV",# Divehi - Maldives + 0x0413: "nl_NL", # Dutch - The Netherlands + 0x0813: "nl_BE", # Dutch - Belgium + 0x0409: "en_US", # English - United States + 0x0809: "en_GB", # English - United Kingdom + 0x0c09: "en_AU", # English - Australia + 0x1009: "en_CA", # English - Canada + 0x1409: "en_NZ", # English - New Zealand + 0x1809: "en_IE", # English - Ireland + 0x1c09: "en_ZA", # English - South Africa + 0x2009: "en_JA", # English - Jamaica + 0x2409: "en_CB", # English - Caribbean + 0x2809: "en_BZ", # English - Belize + 0x2c09: "en_TT", # English - Trinidad + 0x3009: "en_ZW", # English - Zimbabwe + 0x3409: "en_PH", # English - Philippines + 0x4009: "en_IN", # English - India + 0x4409: "en_MY", # English - Malaysia + 0x4809: "en_IN", # English - Singapore + 0x0425: "et_EE", # Estonian + 0x0438: "fo_FO", # Faroese + 0x0464: "fil_PH",# Filipino + 0x040b: "fi_FI", # Finnish + 0x040c: "fr_FR", # French - France + 0x080c: "fr_BE", # French - Belgium + 0x0c0c: "fr_CA", # French - Canada + 0x100c: "fr_CH", # French - Switzerland + 0x140c: "fr_LU", # French - Luxembourg + 0x180c: "fr_MC", # French - Monaco + 0x0462: "fy_NL", # Frisian - Netherlands + 0x0456: "gl_ES", # Galician + 0x0437: "ka_GE", # Georgian + 0x0407: "de_DE", # German - Germany + 0x0807: "de_CH", # German - Switzerland + 0x0c07: "de_AT", # German - Austria + 0x1007: "de_LU", # German - Luxembourg + 0x1407: "de_LI", # German - Liechtenstein + 0x0408: "el_GR", # Greek + 0x046f: "kl_GL", # Greenlandic - Greenland + 0x0447: "gu_IN", # Gujarati + 0x0468: "ha_NG", # Hausa - Latin + 0x040d: "he_IL", # Hebrew + 0x0439: "hi_IN", # Hindi + 0x040e: "hu_HU", # Hungarian + 0x040f: "is_IS", # Icelandic + 0x0421: "id_ID", # Indonesian + 0x045d: "iu_CA", # Inuktitut - Syllabics + 0x085d: "iu_CA", # Inuktitut - Latin + 0x083c: "ga_IE", # Irish - Ireland + 0x0410: "it_IT", # Italian - Italy + 0x0810: "it_CH", # Italian - Switzerland + 0x0411: "ja_JP", # Japanese + 0x044b: "kn_IN", # Kannada - India + 0x043f: "kk_KZ", # Kazakh + 0x0453: "kh_KH", # Khmer - Cambodia + 0x0486: "qut_GT",# K'iche - Guatemala + 0x0487: "rw_RW", # Kinyarwanda - Rwanda + 0x0457: "kok_IN",# Konkani + 0x0412: "ko_KR", # Korean + 0x0440: "ky_KG", # Kyrgyz + 0x0454: "lo_LA", # Lao - Lao PDR + 0x0426: "lv_LV", # Latvian + 0x0427: "lt_LT", # Lithuanian + 0x082e: "dsb_DE",# Lower Sorbian - Germany + 0x046e: "lb_LU", # Luxembourgish + 0x042f: "mk_MK", # FYROM Macedonian + 0x043e: "ms_MY", # Malay - Malaysia + 0x083e: "ms_BN", # Malay - Brunei Darussalam + 0x044c: "ml_IN", # Malayalam - India + 0x043a: "mt_MT", # Maltese + 0x0481: "mi_NZ", # Maori + 0x047a: "arn_CL",# Mapudungun + 0x044e: "mr_IN", # Marathi + 0x047c: "moh_CA",# Mohawk - Canada + 0x0450: "mn_MN", # Mongolian - Cyrillic + 0x0850: "mn_CN", # Mongolian - PRC + 0x0461: "ne_NP", # Nepali + 0x0414: "nb_NO", # Norwegian - Bokmal + 0x0814: "nn_NO", # Norwegian - Nynorsk + 0x0482: "oc_FR", # Occitan - France + 0x0448: "or_IN", # Oriya - India + 0x0463: "ps_AF", # Pashto - Afghanistan + 0x0429: "fa_IR", # Persian + 0x0415: "pl_PL", # Polish + 0x0416: "pt_BR", # Portuguese - Brazil + 0x0816: "pt_PT", # Portuguese - Portugal + 0x0446: "pa_IN", # Punjabi + 0x046b: "quz_BO",# Quechua (Bolivia) + 0x086b: "quz_EC",# Quechua (Ecuador) + 0x0c6b: "quz_PE",# Quechua (Peru) + 0x0418: "ro_RO", # Romanian - Romania + 0x0417: "rm_CH", # Romansh + 0x0419: "ru_RU", # Russian + 0x243b: "smn_FI",# Sami Finland + 0x103b: "smj_NO",# Sami Norway + 0x143b: "smj_SE",# Sami Sweden + 0x043b: "se_NO", # Sami Northern Norway + 0x083b: "se_SE", # Sami Northern Sweden + 0x0c3b: "se_FI", # Sami Northern Finland + 0x203b: "sms_FI",# Sami Skolt + 0x183b: "sma_NO",# Sami Southern Norway + 0x1c3b: "sma_SE",# Sami Southern Sweden + 0x044f: "sa_IN", # Sanskrit + 0x0c1a: "sr_SP", # Serbian - Cyrillic + 0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic + 0x081a: "sr_SP", # Serbian - Latin + 0x181a: "sr_BA", # Serbian - Bosnia Latin + 0x045b: "si_LK", # Sinhala - Sri Lanka + 0x046c: "ns_ZA", # Northern Sotho + 0x0432: "tn_ZA", # Setswana - Southern Africa + 0x041b: "sk_SK", # Slovak + 0x0424: "sl_SI", # Slovenian + 0x040a: "es_ES", # Spanish - Spain + 0x080a: "es_MX", # Spanish - Mexico + 0x0c0a: "es_ES", # Spanish - Spain (Modern) + 0x100a: "es_GT", # Spanish - Guatemala + 0x140a: "es_CR", # Spanish - Costa Rica + 0x180a: "es_PA", # Spanish - Panama + 0x1c0a: "es_DO", # Spanish - Dominican Republic + 0x200a: "es_VE", # Spanish - Venezuela + 0x240a: "es_CO", # Spanish - Colombia + 0x280a: "es_PE", # Spanish - Peru + 0x2c0a: "es_AR", # Spanish - Argentina + 0x300a: "es_EC", # Spanish - Ecuador + 0x340a: "es_CL", # Spanish - Chile + 0x380a: "es_UR", # Spanish - Uruguay + 0x3c0a: "es_PY", # Spanish - Paraguay + 0x400a: "es_BO", # Spanish - Bolivia + 0x440a: "es_SV", # Spanish - El Salvador + 0x480a: "es_HN", # Spanish - Honduras + 0x4c0a: "es_NI", # Spanish - Nicaragua + 0x500a: "es_PR", # Spanish - Puerto Rico + 0x540a: "es_US", # Spanish - United States +# 0x0430: "", # Sutu - Not supported + 0x0441: "sw_KE", # Swahili + 0x041d: "sv_SE", # Swedish - Sweden + 0x081d: "sv_FI", # Swedish - Finland + 0x045a: "syr_SY",# Syriac + 0x0428: "tg_TJ", # Tajik - Cyrillic + 0x085f: "tmz_DZ",# Tamazight - Latin + 0x0449: "ta_IN", # Tamil + 0x0444: "tt_RU", # Tatar + 0x044a: "te_IN", # Telugu + 0x041e: "th_TH", # Thai + 0x0851: "bo_BT", # Tibetan - Bhutan + 0x0451: "bo_CN", # Tibetan - PRC + 0x041f: "tr_TR", # Turkish + 0x0442: "tk_TM", # Turkmen - Cyrillic + 0x0480: "ug_CN", # Uighur - Arabic + 0x0422: "uk_UA", # Ukrainian + 0x042e: "wen_DE",# Upper Sorbian - Germany + 0x0420: "ur_PK", # Urdu + 0x0820: "ur_IN", # Urdu - India + 0x0443: "uz_UZ", # Uzbek - Latin + 0x0843: "uz_UZ", # Uzbek - Cyrillic + 0x042a: "vi_VN", # Vietnamese + 0x0452: "cy_GB", # Welsh + 0x0488: "wo_SN", # Wolof - Senegal + 0x0434: "xh_ZA", # Xhosa - South Africa + 0x0485: "sah_RU",# Yakut - Cyrillic + 0x0478: "ii_CN", # Yi - PRC + 0x046a: "yo_NG", # Yoruba - Nigeria + 0x0435: "zu_ZA", # Zulu +} + +def _print_locale(): + + """ Test function. + """ + categories = {} + def _init_categories(categories=categories): + for k,v in globals().items(): + if k[:3] == 'LC_': + categories[k] = v + _init_categories() + del categories['LC_ALL'] + + print('Locale defaults as determined by getdefaultlocale():') + print('-'*72) + lang, enc = getdefaultlocale() + print('Language: ', lang or '(undefined)') + print('Encoding: ', enc or '(undefined)') + print() + + print('Locale settings on startup:') + print('-'*72) + for name,category in categories.items(): + print(name, '...') + lang, enc = getlocale(category) + print(' Language: ', lang or '(undefined)') + print(' Encoding: ', enc or '(undefined)') + print() + + try: + setlocale(LC_ALL, "") + except: + print('NOTE:') + print('setlocale(LC_ALL, "") does not support the default locale') + print('given in the OS environment variables.') + else: + print() + print('Locale settings after calling setlocale(LC_ALL, ""):') + print('-'*72) + for name,category in categories.items(): + print(name, '...') + lang, enc = getlocale(category) + print(' Language: ', lang or '(undefined)') + print(' Encoding: ', enc or '(undefined)') + print() + +### + +try: + LC_MESSAGES +except NameError: + pass +else: + __all__.append("LC_MESSAGES") + +if __name__=='__main__': + print('Locale aliasing:') + print() + _print_locale() + print() + print('Number formatting:') + print() + _test() diff --git a/Python313_13_x86_Template/Lib/logging/__init__.py b/Python314_4_x86_Template/Lib/logging/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/logging/__init__.py rename to Python314_4_x86_Template/Lib/logging/__init__.py diff --git a/Python314_4_x86_Template/Lib/logging/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/logging/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..49afedf8 Binary files /dev/null and b/Python314_4_x86_Template/Lib/logging/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/logging/__pycache__/config.cpython-314.pyc b/Python314_4_x86_Template/Lib/logging/__pycache__/config.cpython-314.pyc new file mode 100644 index 00000000..6b130fd1 Binary files /dev/null and b/Python314_4_x86_Template/Lib/logging/__pycache__/config.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/logging/__pycache__/handlers.cpython-314.pyc b/Python314_4_x86_Template/Lib/logging/__pycache__/handlers.cpython-314.pyc new file mode 100644 index 00000000..b972ee3b Binary files /dev/null and b/Python314_4_x86_Template/Lib/logging/__pycache__/handlers.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/logging/config.py b/Python314_4_x86_Template/Lib/logging/config.py new file mode 100644 index 00000000..3d9aa00f --- /dev/null +++ b/Python314_4_x86_Template/Lib/logging/config.py @@ -0,0 +1,1077 @@ +# Copyright 2001-2023 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Configuration functions for the logging package for Python. The core package +is based on PEP 282 and comments thereto in comp.lang.python, and influenced +by Apache's log4j system. + +Copyright (C) 2001-2022 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import errno +import functools +import io +import logging +import logging.handlers +import os +import queue +import re +import struct +import threading +import traceback + +from socketserver import ThreadingTCPServer, StreamRequestHandler + + +DEFAULT_LOGGING_CONFIG_PORT = 9030 + +RESET_ERROR = errno.ECONNRESET + +# +# The following code implements a socket listener for on-the-fly +# reconfiguration of logging. +# +# _listener holds the server object doing the listening +_listener = None + +def fileConfig(fname, defaults=None, disable_existing_loggers=True, encoding=None): + """ + Read the logging configuration from a ConfigParser-format file. + + This can be called several times from an application, allowing an end user + the ability to select from various pre-canned configurations (if the + developer provides a mechanism to present the choices and load the chosen + configuration). + """ + import configparser + + if isinstance(fname, str): + if not os.path.exists(fname): + raise FileNotFoundError(f"{fname} doesn't exist") + elif not os.path.getsize(fname): + raise RuntimeError(f'{fname} is an empty file') + + if isinstance(fname, configparser.RawConfigParser): + cp = fname + else: + try: + cp = configparser.ConfigParser(defaults) + if hasattr(fname, 'readline'): + cp.read_file(fname) + else: + encoding = io.text_encoding(encoding) + cp.read(fname, encoding=encoding) + except configparser.ParsingError as e: + raise RuntimeError(f'{fname} is invalid: {e}') + + formatters = _create_formatters(cp) + + # critical section + with logging._lock: + _clearExistingHandlers() + + # Handlers add themselves to logging._handlers + handlers = _install_handlers(cp, formatters) + _install_loggers(cp, handlers, disable_existing_loggers) + + +def _resolve(name): + """Resolve a dotted name to a global object.""" + name = name.split('.') + used = name.pop(0) + found = __import__(used) + for n in name: + used = used + '.' + n + try: + found = getattr(found, n) + except AttributeError: + __import__(used) + found = getattr(found, n) + return found + +def _strip_spaces(alist): + return map(str.strip, alist) + +def _create_formatters(cp): + """Create and return formatters""" + flist = cp["formatters"]["keys"] + if not len(flist): + return {} + flist = flist.split(",") + flist = _strip_spaces(flist) + formatters = {} + for form in flist: + sectname = "formatter_%s" % form + fs = cp.get(sectname, "format", raw=True, fallback=None) + dfs = cp.get(sectname, "datefmt", raw=True, fallback=None) + stl = cp.get(sectname, "style", raw=True, fallback='%') + defaults = cp.get(sectname, "defaults", raw=True, fallback=None) + + c = logging.Formatter + class_name = cp[sectname].get("class") + if class_name: + c = _resolve(class_name) + + if defaults is not None: + defaults = eval(defaults, vars(logging)) + f = c(fs, dfs, stl, defaults=defaults) + else: + f = c(fs, dfs, stl) + formatters[form] = f + return formatters + + +def _install_handlers(cp, formatters): + """Install and return handlers""" + hlist = cp["handlers"]["keys"] + if not len(hlist): + return {} + hlist = hlist.split(",") + hlist = _strip_spaces(hlist) + handlers = {} + fixups = [] #for inter-handler references + for hand in hlist: + section = cp["handler_%s" % hand] + klass = section["class"] + fmt = section.get("formatter", "") + try: + klass = eval(klass, vars(logging)) + except (AttributeError, NameError): + klass = _resolve(klass) + args = section.get("args", '()') + args = eval(args, vars(logging)) + kwargs = section.get("kwargs", '{}') + kwargs = eval(kwargs, vars(logging)) + h = klass(*args, **kwargs) + h.name = hand + if "level" in section: + level = section["level"] + h.setLevel(level) + if len(fmt): + h.setFormatter(formatters[fmt]) + if issubclass(klass, logging.handlers.MemoryHandler): + target = section.get("target", "") + if len(target): #the target handler may not be loaded yet, so keep for later... + fixups.append((h, target)) + handlers[hand] = h + #now all handlers are loaded, fixup inter-handler references... + for h, t in fixups: + h.setTarget(handlers[t]) + return handlers + +def _handle_existing_loggers(existing, child_loggers, disable_existing): + """ + When (re)configuring logging, handle loggers which were in the previous + configuration but are not in the new configuration. There's no point + deleting them as other threads may continue to hold references to them; + and by disabling them, you stop them doing any logging. + + However, don't disable children of named loggers, as that's probably not + what was intended by the user. Also, allow existing loggers to NOT be + disabled if disable_existing is false. + """ + root = logging.root + for log in existing: + logger = root.manager.loggerDict[log] + if log in child_loggers: + if not isinstance(logger, logging.PlaceHolder): + logger.setLevel(logging.NOTSET) + logger.handlers = [] + logger.propagate = True + else: + logger.disabled = disable_existing + +def _install_loggers(cp, handlers, disable_existing): + """Create and install loggers""" + + # configure the root first + llist = cp["loggers"]["keys"] + llist = llist.split(",") + llist = list(_strip_spaces(llist)) + llist.remove("root") + section = cp["logger_root"] + root = logging.root + log = root + if "level" in section: + level = section["level"] + log.setLevel(level) + for h in root.handlers[:]: + root.removeHandler(h) + hlist = section["handlers"] + if len(hlist): + hlist = hlist.split(",") + hlist = _strip_spaces(hlist) + for hand in hlist: + log.addHandler(handlers[hand]) + + #and now the others... + #we don't want to lose the existing loggers, + #since other threads may have pointers to them. + #existing is set to contain all existing loggers, + #and as we go through the new configuration we + #remove any which are configured. At the end, + #what's left in existing is the set of loggers + #which were in the previous configuration but + #which are not in the new configuration. + existing = list(root.manager.loggerDict.keys()) + #The list needs to be sorted so that we can + #avoid disabling child loggers of explicitly + #named loggers. With a sorted list it is easier + #to find the child loggers. + existing.sort() + #We'll keep the list of existing loggers + #which are children of named loggers here... + child_loggers = [] + #now set up the new ones... + for log in llist: + section = cp["logger_%s" % log] + qn = section["qualname"] + propagate = section.getint("propagate", fallback=1) + logger = logging.getLogger(qn) + if qn in existing: + i = existing.index(qn) + 1 # start with the entry after qn + prefixed = qn + "." + pflen = len(prefixed) + num_existing = len(existing) + while i < num_existing: + if existing[i][:pflen] == prefixed: + child_loggers.append(existing[i]) + i += 1 + existing.remove(qn) + if "level" in section: + level = section["level"] + logger.setLevel(level) + for h in logger.handlers[:]: + logger.removeHandler(h) + logger.propagate = propagate + logger.disabled = 0 + hlist = section["handlers"] + if len(hlist): + hlist = hlist.split(",") + hlist = _strip_spaces(hlist) + for hand in hlist: + logger.addHandler(handlers[hand]) + + #Disable any old loggers. There's no point deleting + #them as other threads may continue to hold references + #and by disabling them, you stop them doing any logging. + #However, don't disable children of named loggers, as that's + #probably not what was intended by the user. + #for log in existing: + # logger = root.manager.loggerDict[log] + # if log in child_loggers: + # logger.level = logging.NOTSET + # logger.handlers = [] + # logger.propagate = 1 + # elif disable_existing_loggers: + # logger.disabled = 1 + _handle_existing_loggers(existing, child_loggers, disable_existing) + + +def _clearExistingHandlers(): + """Clear and close existing handlers""" + logging._handlers.clear() + logging.shutdown(logging._handlerList[:]) + del logging._handlerList[:] + + +IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) + + +def valid_ident(s): + m = IDENTIFIER.match(s) + if not m: + raise ValueError('Not a valid Python identifier: %r' % s) + return True + + +class ConvertingMixin(object): + """For ConvertingXXX's, this mixin class provides common functions""" + + def convert_with_key(self, key, value, replace=True): + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + if replace: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def convert(self, value): + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + return result + + +# The ConvertingXXX classes are wrappers around standard Python containers, +# and they serve to convert any suitable values in the container. The +# conversion converts base dicts, lists and tuples to their wrapped +# equivalents, whereas strings which match a conversion format are converted +# appropriately. +# +# Each wrapper should have a configurator attribute holding the actual +# configurator to use for conversion. + +class ConvertingDict(dict, ConvertingMixin): + """A converting dictionary wrapper.""" + + def __getitem__(self, key): + value = dict.__getitem__(self, key) + return self.convert_with_key(key, value) + + def get(self, key, default=None): + value = dict.get(self, key, default) + return self.convert_with_key(key, value) + + def pop(self, key, default=None): + value = dict.pop(self, key, default) + return self.convert_with_key(key, value, replace=False) + +class ConvertingList(list, ConvertingMixin): + """A converting list wrapper.""" + def __getitem__(self, key): + value = list.__getitem__(self, key) + return self.convert_with_key(key, value) + + def pop(self, idx=-1): + value = list.pop(self, idx) + return self.convert(value) + +class ConvertingTuple(tuple, ConvertingMixin): + """A converting tuple wrapper.""" + def __getitem__(self, key): + value = tuple.__getitem__(self, key) + # Can't replace a tuple entry. + return self.convert_with_key(key, value, replace=False) + +class BaseConfigurator(object): + """ + The configurator base class which defines some useful defaults. + """ + + CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') + + WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') + DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') + INDEX_PATTERN = re.compile(r'^\[([^\[\]]*)\]\s*') + DIGIT_PATTERN = re.compile(r'^\d+$') + + value_converters = { + 'ext' : 'ext_convert', + 'cfg' : 'cfg_convert', + } + + # We might want to use a different one, e.g. importlib + importer = staticmethod(__import__) + + def __init__(self, config): + self.config = ConvertingDict(config) + self.config.configurator = self + + def resolve(self, s): + """ + Resolve strings to objects using standard import and attribute + syntax. + """ + name = s.split('.') + used = name.pop(0) + try: + found = self.importer(used) + for frag in name: + used += '.' + frag + try: + found = getattr(found, frag) + except AttributeError: + self.importer(used) + found = getattr(found, frag) + return found + except ImportError as e: + v = ValueError('Cannot resolve %r: %s' % (s, e)) + raise v from e + + def ext_convert(self, value): + """Default converter for the ext:// protocol.""" + return self.resolve(value) + + def cfg_convert(self, value): + """Default converter for the cfg:// protocol.""" + rest = value + m = self.WORD_PATTERN.match(rest) + if m is None: + raise ValueError("Unable to convert %r" % value) + else: + rest = rest[m.end():] + d = self.config[m.groups()[0]] + #print d, rest + while rest: + m = self.DOT_PATTERN.match(rest) + if m: + d = d[m.groups()[0]] + else: + m = self.INDEX_PATTERN.match(rest) + if m: + idx = m.groups()[0] + if not self.DIGIT_PATTERN.match(idx): + d = d[idx] + else: + try: + n = int(idx) # try as number first (most likely) + d = d[n] + except TypeError: + d = d[idx] + if m: + rest = rest[m.end():] + else: + raise ValueError('Unable to convert ' + '%r at %r' % (value, rest)) + #rest should be empty + return d + + def convert(self, value): + """ + Convert values to an appropriate type. dicts, lists and tuples are + replaced by their converting alternatives. Strings are checked to + see if they have a conversion format and are converted if they do. + """ + if not isinstance(value, ConvertingDict) and isinstance(value, dict): + value = ConvertingDict(value) + value.configurator = self + elif not isinstance(value, ConvertingList) and isinstance(value, list): + value = ConvertingList(value) + value.configurator = self + elif not isinstance(value, ConvertingTuple) and\ + isinstance(value, tuple) and not hasattr(value, '_fields'): + value = ConvertingTuple(value) + value.configurator = self + elif isinstance(value, str): # str for py3k + m = self.CONVERT_PATTERN.match(value) + if m: + d = m.groupdict() + prefix = d['prefix'] + converter = self.value_converters.get(prefix, None) + if converter: + suffix = d['suffix'] + converter = getattr(self, converter) + value = converter(suffix) + return value + + def configure_custom(self, config): + """Configure an object with a user-supplied factory.""" + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + # Check for valid identifiers + kwargs = {k: config[k] for k in config if (k != '.' and valid_ident(k))} + result = c(**kwargs) + props = config.pop('.', None) + if props: + for name, value in props.items(): + setattr(result, name, value) + return result + + def as_tuple(self, value): + """Utility function which converts lists to tuples.""" + if isinstance(value, list): + value = tuple(value) + return value + +def _is_queue_like_object(obj): + """Check that *obj* implements the Queue API.""" + if isinstance(obj, (queue.Queue, queue.SimpleQueue)): + return True + # defer importing multiprocessing as much as possible + from multiprocessing.queues import Queue as MPQueue + if isinstance(obj, MPQueue): + return True + # Depending on the multiprocessing start context, we cannot create + # a multiprocessing.managers.BaseManager instance 'mm' to get the + # runtime type of mm.Queue() or mm.JoinableQueue() (see gh-119819). + # + # Since we only need an object implementing the Queue API, we only + # do a protocol check, but we do not use typing.runtime_checkable() + # and typing.Protocol to reduce import time (see gh-121723). + # + # Ideally, we would have wanted to simply use strict type checking + # instead of a protocol-based type checking since the latter does + # not check the method signatures. + # + # Note that only 'put_nowait' and 'get' are required by the logging + # queue handler and queue listener (see gh-124653) and that other + # methods are either optional or unused. + minimal_queue_interface = ['put_nowait', 'get'] + return all(callable(getattr(obj, method, None)) + for method in minimal_queue_interface) + +class DictConfigurator(BaseConfigurator): + """ + Configure logging using a dictionary-like object to describe the + configuration. + """ + + def configure(self): + """Do the configuration.""" + + config = self.config + if 'version' not in config: + raise ValueError("dictionary doesn't specify a version") + if config['version'] != 1: + raise ValueError("Unsupported version: %s" % config['version']) + incremental = config.pop('incremental', False) + EMPTY_DICT = {} + with logging._lock: + if incremental: + handlers = config.get('handlers', EMPTY_DICT) + for name in handlers: + if name not in logging._handlers: + raise ValueError('No handler found with ' + 'name %r' % name) + else: + try: + handler = logging._handlers[name] + handler_config = handlers[name] + level = handler_config.get('level', None) + if level: + handler.setLevel(logging._checkLevel(level)) + except Exception as e: + raise ValueError('Unable to configure handler ' + '%r' % name) from e + loggers = config.get('loggers', EMPTY_DICT) + for name in loggers: + try: + self.configure_logger(name, loggers[name], True) + except Exception as e: + raise ValueError('Unable to configure logger ' + '%r' % name) from e + root = config.get('root', None) + if root: + try: + self.configure_root(root, True) + except Exception as e: + raise ValueError('Unable to configure root ' + 'logger') from e + else: + disable_existing = config.pop('disable_existing_loggers', True) + + _clearExistingHandlers() + + # Do formatters first - they don't refer to anything else + formatters = config.get('formatters', EMPTY_DICT) + for name in formatters: + try: + formatters[name] = self.configure_formatter( + formatters[name]) + except Exception as e: + raise ValueError('Unable to configure ' + 'formatter %r' % name) from e + # Next, do filters - they don't refer to anything else, either + filters = config.get('filters', EMPTY_DICT) + for name in filters: + try: + filters[name] = self.configure_filter(filters[name]) + except Exception as e: + raise ValueError('Unable to configure ' + 'filter %r' % name) from e + + # Next, do handlers - they refer to formatters and filters + # As handlers can refer to other handlers, sort the keys + # to allow a deterministic order of configuration + handlers = config.get('handlers', EMPTY_DICT) + deferred = [] + for name in sorted(handlers): + try: + handler = self.configure_handler(handlers[name]) + handler.name = name + handlers[name] = handler + except Exception as e: + if ' not configured yet' in str(e.__cause__): + deferred.append(name) + else: + raise ValueError('Unable to configure handler ' + '%r' % name) from e + + # Now do any that were deferred + for name in deferred: + try: + handler = self.configure_handler(handlers[name]) + handler.name = name + handlers[name] = handler + except Exception as e: + raise ValueError('Unable to configure handler ' + '%r' % name) from e + + # Next, do loggers - they refer to handlers and filters + + #we don't want to lose the existing loggers, + #since other threads may have pointers to them. + #existing is set to contain all existing loggers, + #and as we go through the new configuration we + #remove any which are configured. At the end, + #what's left in existing is the set of loggers + #which were in the previous configuration but + #which are not in the new configuration. + root = logging.root + existing = list(root.manager.loggerDict.keys()) + #The list needs to be sorted so that we can + #avoid disabling child loggers of explicitly + #named loggers. With a sorted list it is easier + #to find the child loggers. + existing.sort() + #We'll keep the list of existing loggers + #which are children of named loggers here... + child_loggers = [] + #now set up the new ones... + loggers = config.get('loggers', EMPTY_DICT) + for name in loggers: + if name in existing: + i = existing.index(name) + 1 # look after name + prefixed = name + "." + pflen = len(prefixed) + num_existing = len(existing) + while i < num_existing: + if existing[i][:pflen] == prefixed: + child_loggers.append(existing[i]) + i += 1 + existing.remove(name) + try: + self.configure_logger(name, loggers[name]) + except Exception as e: + raise ValueError('Unable to configure logger ' + '%r' % name) from e + + #Disable any old loggers. There's no point deleting + #them as other threads may continue to hold references + #and by disabling them, you stop them doing any logging. + #However, don't disable children of named loggers, as that's + #probably not what was intended by the user. + #for log in existing: + # logger = root.manager.loggerDict[log] + # if log in child_loggers: + # logger.level = logging.NOTSET + # logger.handlers = [] + # logger.propagate = True + # elif disable_existing: + # logger.disabled = True + _handle_existing_loggers(existing, child_loggers, + disable_existing) + + # And finally, do the root logger + root = config.get('root', None) + if root: + try: + self.configure_root(root) + except Exception as e: + raise ValueError('Unable to configure root ' + 'logger') from e + + def configure_formatter(self, config): + """Configure a formatter from a dictionary.""" + if '()' in config: + factory = config['()'] # for use in exception handler + try: + result = self.configure_custom(config) + except TypeError as te: + if "'format'" not in str(te): + raise + # logging.Formatter and its subclasses expect the `fmt` + # parameter instead of `format`. Retry passing configuration + # with `fmt`. + config['fmt'] = config.pop('format') + config['()'] = factory + result = self.configure_custom(config) + else: + fmt = config.get('format', None) + dfmt = config.get('datefmt', None) + style = config.get('style', '%') + cname = config.get('class', None) + defaults = config.get('defaults', None) + + if not cname: + c = logging.Formatter + else: + c = _resolve(cname) + + kwargs = {} + + # Add defaults only if it exists. + # Prevents TypeError in custom formatter callables that do not + # accept it. + if defaults is not None: + kwargs['defaults'] = defaults + + # A TypeError would be raised if "validate" key is passed in with a formatter callable + # that does not accept "validate" as a parameter + if 'validate' in config: # if user hasn't mentioned it, the default will be fine + result = c(fmt, dfmt, style, config['validate'], **kwargs) + else: + result = c(fmt, dfmt, style, **kwargs) + + return result + + def configure_filter(self, config): + """Configure a filter from a dictionary.""" + if '()' in config: + result = self.configure_custom(config) + else: + name = config.get('name', '') + result = logging.Filter(name) + return result + + def add_filters(self, filterer, filters): + """Add filters to a filterer from a list of names.""" + for f in filters: + try: + if callable(f) or callable(getattr(f, 'filter', None)): + filter_ = f + else: + filter_ = self.config['filters'][f] + filterer.addFilter(filter_) + except Exception as e: + raise ValueError('Unable to add filter %r' % f) from e + + def _configure_queue_handler(self, klass, **kwargs): + if 'queue' in kwargs: + q = kwargs.pop('queue') + else: + q = queue.Queue() # unbounded + + rhl = kwargs.pop('respect_handler_level', False) + lklass = kwargs.pop('listener', logging.handlers.QueueListener) + handlers = kwargs.pop('handlers', []) + + listener = lklass(q, *handlers, respect_handler_level=rhl) + handler = klass(q, **kwargs) + handler.listener = listener + return handler + + def configure_handler(self, config): + """Configure a handler from a dictionary.""" + config_copy = dict(config) # for restoring in case of error + formatter = config.pop('formatter', None) + if formatter: + try: + formatter = self.config['formatters'][formatter] + except Exception as e: + raise ValueError('Unable to set formatter ' + '%r' % formatter) from e + level = config.pop('level', None) + filters = config.pop('filters', None) + if '()' in config: + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + factory = c + else: + cname = config.pop('class') + if callable(cname): + klass = cname + else: + klass = self.resolve(cname) + if issubclass(klass, logging.handlers.MemoryHandler): + if 'flushLevel' in config: + config['flushLevel'] = logging._checkLevel(config['flushLevel']) + if 'target' in config: + # Special case for handler which refers to another handler + try: + tn = config['target'] + th = self.config['handlers'][tn] + if not isinstance(th, logging.Handler): + config.update(config_copy) # restore for deferred cfg + raise TypeError('target not configured yet') + config['target'] = th + except Exception as e: + raise ValueError('Unable to set target handler %r' % tn) from e + elif issubclass(klass, logging.handlers.QueueHandler): + # Another special case for handler which refers to other handlers + # if 'handlers' not in config: + # raise ValueError('No handlers specified for a QueueHandler') + if 'queue' in config: + qspec = config['queue'] + + if isinstance(qspec, str): + q = self.resolve(qspec) + if not callable(q): + raise TypeError('Invalid queue specifier %r' % qspec) + config['queue'] = q() + elif isinstance(qspec, dict): + if '()' not in qspec: + raise TypeError('Invalid queue specifier %r' % qspec) + config['queue'] = self.configure_custom(dict(qspec)) + elif not _is_queue_like_object(qspec): + raise TypeError('Invalid queue specifier %r' % qspec) + + if 'listener' in config: + lspec = config['listener'] + if isinstance(lspec, type): + if not issubclass(lspec, logging.handlers.QueueListener): + raise TypeError('Invalid listener specifier %r' % lspec) + else: + if isinstance(lspec, str): + listener = self.resolve(lspec) + if isinstance(listener, type) and\ + not issubclass(listener, logging.handlers.QueueListener): + raise TypeError('Invalid listener specifier %r' % lspec) + elif isinstance(lspec, dict): + if '()' not in lspec: + raise TypeError('Invalid listener specifier %r' % lspec) + listener = self.configure_custom(dict(lspec)) + else: + raise TypeError('Invalid listener specifier %r' % lspec) + if not callable(listener): + raise TypeError('Invalid listener specifier %r' % lspec) + config['listener'] = listener + if 'handlers' in config: + hlist = [] + try: + for hn in config['handlers']: + h = self.config['handlers'][hn] + if not isinstance(h, logging.Handler): + config.update(config_copy) # restore for deferred cfg + raise TypeError('Required handler %r ' + 'is not configured yet' % hn) + hlist.append(h) + except Exception as e: + raise ValueError('Unable to set required handler %r' % hn) from e + config['handlers'] = hlist + elif issubclass(klass, logging.handlers.SMTPHandler) and\ + 'mailhost' in config: + config['mailhost'] = self.as_tuple(config['mailhost']) + elif issubclass(klass, logging.handlers.SysLogHandler) and\ + 'address' in config: + config['address'] = self.as_tuple(config['address']) + if issubclass(klass, logging.handlers.QueueHandler): + factory = functools.partial(self._configure_queue_handler, klass) + else: + factory = klass + kwargs = {k: config[k] for k in config if (k != '.' and valid_ident(k))} + # When deprecation ends for using the 'strm' parameter, remove the + # "except TypeError ..." + try: + result = factory(**kwargs) + except TypeError as te: + if "'stream'" not in str(te): + raise + #The argument name changed from strm to stream + #Retry with old name. + #This is so that code can be used with older Python versions + #(e.g. by Django) + kwargs['strm'] = kwargs.pop('stream') + result = factory(**kwargs) + + import warnings + warnings.warn( + "Support for custom logging handlers with the 'strm' argument " + "is deprecated and scheduled for removal in Python 3.16. " + "Define handlers with the 'stream' argument instead.", + DeprecationWarning, + stacklevel=2, + ) + if formatter: + result.setFormatter(formatter) + if level is not None: + result.setLevel(logging._checkLevel(level)) + if filters: + self.add_filters(result, filters) + props = config.pop('.', None) + if props: + for name, value in props.items(): + setattr(result, name, value) + return result + + def add_handlers(self, logger, handlers): + """Add handlers to a logger from a list of names.""" + for h in handlers: + try: + logger.addHandler(self.config['handlers'][h]) + except Exception as e: + raise ValueError('Unable to add handler %r' % h) from e + + def common_logger_config(self, logger, config, incremental=False): + """ + Perform configuration which is common to root and non-root loggers. + """ + level = config.get('level', None) + if level is not None: + logger.setLevel(logging._checkLevel(level)) + if not incremental: + #Remove any existing handlers + for h in logger.handlers[:]: + logger.removeHandler(h) + handlers = config.get('handlers', None) + if handlers: + self.add_handlers(logger, handlers) + filters = config.get('filters', None) + if filters: + self.add_filters(logger, filters) + + def configure_logger(self, name, config, incremental=False): + """Configure a non-root logger from a dictionary.""" + logger = logging.getLogger(name) + self.common_logger_config(logger, config, incremental) + logger.disabled = False + propagate = config.get('propagate', None) + if propagate is not None: + logger.propagate = propagate + + def configure_root(self, config, incremental=False): + """Configure a root logger from a dictionary.""" + root = logging.getLogger() + self.common_logger_config(root, config, incremental) + +dictConfigClass = DictConfigurator + +def dictConfig(config): + """Configure logging using a dictionary.""" + dictConfigClass(config).configure() + + +def listen(port=DEFAULT_LOGGING_CONFIG_PORT, verify=None): + """ + Start up a socket server on the specified port, and listen for new + configurations. + + These will be sent as a file suitable for processing by fileConfig(). + Returns a Thread object on which you can call start() to start the server, + and which you can join() when appropriate. To stop the server, call + stopListening(). + + Use the ``verify`` argument to verify any bytes received across the wire + from a client. If specified, it should be a callable which receives a + single argument - the bytes of configuration data received across the + network - and it should return either ``None``, to indicate that the + passed in bytes could not be verified and should be discarded, or a + byte string which is then passed to the configuration machinery as + normal. Note that you can return transformed bytes, e.g. by decrypting + the bytes passed in. + """ + + class ConfigStreamHandler(StreamRequestHandler): + """ + Handler for a logging configuration request. + + It expects a completely new logging configuration and uses fileConfig + to install it. + """ + def handle(self): + """ + Handle a request. + + Each request is expected to be a 4-byte length, packed using + struct.pack(">L", n), followed by the config file. + Uses fileConfig() to do the grunt work. + """ + try: + conn = self.connection + chunk = conn.recv(4) + if len(chunk) == 4: + slen = struct.unpack(">L", chunk)[0] + chunk = self.connection.recv(slen) + while len(chunk) < slen: + chunk = chunk + conn.recv(slen - len(chunk)) + if self.server.verify is not None: + chunk = self.server.verify(chunk) + if chunk is not None: # verified, can process + chunk = chunk.decode("utf-8") + try: + import json + d =json.loads(chunk) + assert isinstance(d, dict) + dictConfig(d) + except Exception: + #Apply new configuration. + + file = io.StringIO(chunk) + try: + fileConfig(file) + except Exception: + traceback.print_exc() + if self.server.ready: + self.server.ready.set() + except OSError as e: + if e.errno != RESET_ERROR: + raise + + class ConfigSocketReceiver(ThreadingTCPServer): + """ + A simple TCP socket-based logging config receiver. + """ + + allow_reuse_address = True + allow_reuse_port = False + + def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT, + handler=None, ready=None, verify=None): + ThreadingTCPServer.__init__(self, (host, port), handler) + with logging._lock: + self.abort = 0 + self.timeout = 1 + self.ready = ready + self.verify = verify + + def serve_until_stopped(self): + import select + abort = 0 + while not abort: + rd, wr, ex = select.select([self.socket.fileno()], + [], [], + self.timeout) + if rd: + self.handle_request() + with logging._lock: + abort = self.abort + self.server_close() + + class Server(threading.Thread): + + def __init__(self, rcvr, hdlr, port, verify): + super(Server, self).__init__() + self.rcvr = rcvr + self.hdlr = hdlr + self.port = port + self.verify = verify + self.ready = threading.Event() + + def run(self): + server = self.rcvr(port=self.port, handler=self.hdlr, + ready=self.ready, + verify=self.verify) + if self.port == 0: + self.port = server.server_address[1] + self.ready.set() + global _listener + with logging._lock: + _listener = server + server.serve_until_stopped() + + return Server(ConfigSocketReceiver, ConfigStreamHandler, port, verify) + +def stopListening(): + """ + Stop the listening server which was created with a call to listen(). + """ + global _listener + with logging._lock: + if _listener: + _listener.abort = 1 + _listener = None diff --git a/Python314_4_x86_Template/Lib/logging/handlers.py b/Python314_4_x86_Template/Lib/logging/handlers.py new file mode 100644 index 00000000..4a07258f --- /dev/null +++ b/Python314_4_x86_Template/Lib/logging/handlers.py @@ -0,0 +1,1645 @@ +# Copyright 2001-2021 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Additional handlers for the logging package for Python. The core package is +based on PEP 282 and comments thereto in comp.lang.python. + +Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging.handlers' and log away! +""" + +import copy +import io +import logging +import os +import pickle +import queue +import re +import socket +import struct +import threading +import time + +# +# Some constants... +# + +DEFAULT_TCP_LOGGING_PORT = 9020 +DEFAULT_UDP_LOGGING_PORT = 9021 +DEFAULT_HTTP_LOGGING_PORT = 9022 +DEFAULT_SOAP_LOGGING_PORT = 9023 +SYSLOG_UDP_PORT = 514 +SYSLOG_TCP_PORT = 514 + +_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day + +class BaseRotatingHandler(logging.FileHandler): + """ + Base class for handlers that rotate log files at a certain point. + Not meant to be instantiated directly. Instead, use RotatingFileHandler + or TimedRotatingFileHandler. + """ + namer = None + rotator = None + + def __init__(self, filename, mode, encoding=None, delay=False, errors=None): + """ + Use the specified filename for streamed logging + """ + logging.FileHandler.__init__(self, filename, mode=mode, + encoding=encoding, delay=delay, + errors=errors) + self.mode = mode + self.encoding = encoding + self.errors = errors + + def emit(self, record): + """ + Emit a record. + + Output the record to the file, catering for rollover as described + in doRollover(). + """ + try: + if self.shouldRollover(record): + self.doRollover() + logging.FileHandler.emit(self, record) + except Exception: + self.handleError(record) + + def rotation_filename(self, default_name): + """ + Modify the filename of a log file when rotating. + + This is provided so that a custom filename can be provided. + + The default implementation calls the 'namer' attribute of the + handler, if it's callable, passing the default name to + it. If the attribute isn't callable (the default is None), the name + is returned unchanged. + + :param default_name: The default name for the log file. + """ + if not callable(self.namer): + result = default_name + else: + result = self.namer(default_name) + return result + + def rotate(self, source, dest): + """ + When rotating, rotate the current log. + + The default implementation calls the 'rotator' attribute of the + handler, if it's callable, passing the source and dest arguments to + it. If the attribute isn't callable (the default is None), the source + is simply renamed to the destination. + + :param source: The source filename. This is normally the base + filename, e.g. 'test.log' + :param dest: The destination filename. This is normally + what the source is rotated to, e.g. 'test.log.1'. + """ + if not callable(self.rotator): + # Issue 18940: A file may not have been created if delay is True. + if os.path.exists(source): + os.rename(source, dest) + else: + self.rotator(source, dest) + +class RotatingFileHandler(BaseRotatingHandler): + """ + Handler for logging to a set of files, which switches from one file + to the next when the current file reaches a certain size. + """ + def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, + encoding=None, delay=False, errors=None): + """ + Open the specified file and use it as the stream for logging. + + By default, the file grows indefinitely. You can specify particular + values of maxBytes and backupCount to allow the file to rollover at + a predetermined size. + + Rollover occurs whenever the current log file is nearly maxBytes in + length. If backupCount is >= 1, the system will successively create + new files with the same pathname as the base file, but with extensions + ".1", ".2" etc. appended to it. For example, with a backupCount of 5 + and a base file name of "app.log", you would get "app.log", + "app.log.1", "app.log.2", ... through to "app.log.5". The file being + written to is always "app.log" - when it gets filled up, it is closed + and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. + exist, then they are renamed to "app.log.2", "app.log.3" etc. + respectively. + + If maxBytes is zero, rollover never occurs. + """ + # If rotation/rollover is wanted, it doesn't make sense to use another + # mode. If for example 'w' were specified, then if there were multiple + # runs of the calling application, the logs from previous runs would be + # lost if the 'w' is respected, because the log file would be truncated + # on each run. + if maxBytes > 0: + mode = 'a' + if "b" not in mode: + encoding = io.text_encoding(encoding) + BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding, + delay=delay, errors=errors) + self.maxBytes = maxBytes + self.backupCount = backupCount + + def doRollover(self): + """ + Do a rollover, as described in __init__(). + """ + if self.stream: + self.stream.close() + self.stream = None + if self.backupCount > 0: + for i in range(self.backupCount - 1, 0, -1): + sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i)) + dfn = self.rotation_filename("%s.%d" % (self.baseFilename, + i + 1)) + if os.path.exists(sfn): + if os.path.exists(dfn): + os.remove(dfn) + os.rename(sfn, dfn) + dfn = self.rotation_filename(self.baseFilename + ".1") + if os.path.exists(dfn): + os.remove(dfn) + self.rotate(self.baseFilename, dfn) + if not self.delay: + self.stream = self._open() + + def shouldRollover(self, record): + """ + Determine if rollover should occur. + + Basically, see if the supplied record would cause the file to exceed + the size limit we have. + """ + if self.stream is None: # delay was set... + self.stream = self._open() + if self.maxBytes > 0: # are we rolling over? + try: + pos = self.stream.tell() + except io.UnsupportedOperation: + # gh-143237: Never rollover a named pipe. + return False + if not pos: + # gh-116263: Never rollover an empty file + return False + msg = "%s\n" % self.format(record) + if pos + len(msg) >= self.maxBytes: + # See bpo-45401: Never rollover anything other than regular files + if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename): + return False + return True + return False + +class TimedRotatingFileHandler(BaseRotatingHandler): + """ + Handler for logging to a file, rotating the log file at certain timed + intervals. + + If backupCount is > 0, when rollover is done, no more than backupCount + files are kept - the oldest ones are deleted. + """ + def __init__(self, filename, when='h', interval=1, backupCount=0, + encoding=None, delay=False, utc=False, atTime=None, + errors=None): + encoding = io.text_encoding(encoding) + BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding, + delay=delay, errors=errors) + self.when = when.upper() + self.backupCount = backupCount + self.utc = utc + self.atTime = atTime + # Calculate the real rollover interval, which is just the number of + # seconds between rollovers. Also set the filename suffix used when + # a rollover occurs. Current 'when' events supported: + # S - Seconds + # M - Minutes + # H - Hours + # D - Days + # midnight - roll over at midnight + # W{0-6} - roll over on a certain day; 0 - Monday + # + # Case of the 'when' specifier is not important; lower or upper case + # will work. + if self.when == 'S': + self.interval = 1 # one second + self.suffix = "%Y-%m-%d_%H-%M-%S" + extMatch = r"(? '6': + raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) + self.dayOfWeek = int(self.when[1]) + self.suffix = "%Y-%m-%d" + extMatch = r"(?= self.rolloverAt: + # See #89564: Never rollover anything other than regular files + if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename): + # The file is not a regular file, so do not rollover, but do + # set the next rollover time to avoid repeated checks. + self.rolloverAt = self.computeRollover(t) + return False + + return True + return False + + def getFilesToDelete(self): + """ + Determine the files to delete when rolling over. + + More specific than the earlier method, which just used glob.glob(). + """ + dirName, baseName = os.path.split(self.baseFilename) + fileNames = os.listdir(dirName) + result = [] + if self.namer is None: + prefix = baseName + '.' + plen = len(prefix) + for fileName in fileNames: + if fileName[:plen] == prefix: + suffix = fileName[plen:] + if self.extMatch.fullmatch(suffix): + result.append(os.path.join(dirName, fileName)) + else: + for fileName in fileNames: + # Our files could be just about anything after custom naming, + # but they should contain the datetime suffix. + # Try to find the datetime suffix in the file name and verify + # that the file name can be generated by this handler. + m = self.extMatch.search(fileName) + while m: + dfn = self.namer(self.baseFilename + "." + m[0]) + if os.path.basename(dfn) == fileName: + result.append(os.path.join(dirName, fileName)) + break + m = self.extMatch.search(fileName, m.start() + 1) + + if len(result) < self.backupCount: + result = [] + else: + result.sort() + result = result[:len(result) - self.backupCount] + return result + + def doRollover(self): + """ + do a rollover; in this case, a date/time stamp is appended to the filename + when the rollover happens. However, you want the file to be named for the + start of the interval, not the current time. If there is a backup count, + then we have to get a list of matching filenames, sort them and remove + the one with the oldest suffix. + """ + # get the time that this sequence started at and make it a TimeTuple + currentTime = int(time.time()) + t = self.rolloverAt - self.interval + if self.utc: + timeTuple = time.gmtime(t) + else: + timeTuple = time.localtime(t) + dstNow = time.localtime(currentTime)[-1] + dstThen = timeTuple[-1] + if dstNow != dstThen: + if dstNow: + addend = 3600 + else: + addend = -3600 + timeTuple = time.localtime(t + addend) + dfn = self.rotation_filename(self.baseFilename + "." + + time.strftime(self.suffix, timeTuple)) + if os.path.exists(dfn): + # Already rolled over. + return + + if self.stream: + self.stream.close() + self.stream = None + self.rotate(self.baseFilename, dfn) + if self.backupCount > 0: + for s in self.getFilesToDelete(): + os.remove(s) + if not self.delay: + self.stream = self._open() + self.rolloverAt = self.computeRollover(currentTime) + +class WatchedFileHandler(logging.FileHandler): + """ + A handler for logging to a file, which watches the file + to see if it has changed while in use. This can happen because of + usage of programs such as newsyslog and logrotate which perform + log file rotation. This handler, intended for use under Unix, + watches the file to see if it has changed since the last emit. + (A file has changed if its device or inode have changed.) + If it has changed, the old file stream is closed, and the file + opened to get a new stream. + + This handler is not appropriate for use under Windows, because + under Windows open files cannot be moved or renamed - logging + opens the files with exclusive locks - and so there is no need + for such a handler. + + This handler is based on a suggestion and patch by Chad J. + Schroeder. + """ + def __init__(self, filename, mode='a', encoding=None, delay=False, + errors=None): + if "b" not in mode: + encoding = io.text_encoding(encoding) + logging.FileHandler.__init__(self, filename, mode=mode, + encoding=encoding, delay=delay, + errors=errors) + self.dev, self.ino = -1, -1 + self._statstream() + + def _statstream(self): + if self.stream is None: + return + sres = os.fstat(self.stream.fileno()) + self.dev = sres.st_dev + self.ino = sres.st_ino + + def reopenIfNeeded(self): + """ + Reopen log file if needed. + + Checks if the underlying file has changed, and if it + has, close the old stream and reopen the file to get the + current stream. + """ + if self.stream is None: + return + + # Reduce the chance of race conditions by stat'ing by path only + # once and then fstat'ing our new fd if we opened a new log stream. + # See issue #14632: Thanks to John Mulligan for the problem report + # and patch. + try: + # stat the file by path, checking for existence + sres = os.stat(self.baseFilename) + + # compare file system stat with that of our stream file handle + reopen = (sres.st_dev != self.dev or sres.st_ino != self.ino) + except FileNotFoundError: + reopen = True + + if not reopen: + return + + # we have an open file handle, clean it up + self.stream.flush() + self.stream.close() + self.stream = None # See Issue #21742: _open () might fail. + + # open a new file handle and get new stat info from that fd + self.stream = self._open() + self._statstream() + + def emit(self, record): + """ + Emit a record. + + If underlying file has changed, reopen the file before emitting the + record to it. + """ + self.reopenIfNeeded() + logging.FileHandler.emit(self, record) + + +class SocketHandler(logging.Handler): + """ + A handler class which writes logging records, in pickle format, to + a streaming socket. The socket is kept open across logging calls. + If the peer resets it, an attempt is made to reconnect on the next call. + The pickle which is sent is that of the LogRecord's attribute dictionary + (__dict__), so that the receiver does not need to have the logging module + installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + """ + + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + + When the attribute *closeOnError* is set to True - if a socket error + occurs, the socket is silently closed and then reopened on the next + logging call. + """ + logging.Handler.__init__(self) + self.host = host + self.port = port + if port is None: + self.address = host + else: + self.address = (host, port) + self.sock = None + self.closeOnError = False + self.retryTime = None + # + # Exponential backoff parameters. + # + self.retryStart = 1.0 + self.retryMax = 30.0 + self.retryFactor = 2.0 + + def makeSocket(self, timeout=1): + """ + A factory method which allows subclasses to define the precise + type of socket they want. + """ + if self.port is not None: + result = socket.create_connection(self.address, timeout=timeout) + else: + result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + result.settimeout(timeout) + try: + result.connect(self.address) + except OSError: + result.close() # Issue 19182 + raise + return result + + def createSocket(self): + """ + Try to create a socket, using an exponential backoff with + a max retry time. Thanks to Robert Olson for the original patch + (SF #815911) which has been slightly refactored. + """ + now = time.time() + # Either retryTime is None, in which case this + # is the first time back after a disconnect, or + # we've waited long enough. + if self.retryTime is None: + attempt = True + else: + attempt = (now >= self.retryTime) + if attempt: + try: + self.sock = self.makeSocket() + self.retryTime = None # next time, no delay before trying + except OSError: + #Creation failed, so set the retry time and return. + if self.retryTime is None: + self.retryPeriod = self.retryStart + else: + self.retryPeriod = self.retryPeriod * self.retryFactor + if self.retryPeriod > self.retryMax: + self.retryPeriod = self.retryMax + self.retryTime = now + self.retryPeriod + + def send(self, s): + """ + Send a pickled string to the socket. + + This function allows for partial sends which can happen when the + network is busy. + """ + if self.sock is None: + self.createSocket() + #self.sock can be None either because we haven't reached the retry + #time yet, or because we have reached the retry time and retried, + #but are still unable to connect. + if self.sock: + try: + self.sock.sendall(s) + except OSError: #pragma: no cover + self.sock.close() + self.sock = None # so we can call createSocket next time + + def makePickle(self, record): + """ + Pickles the record in binary format with a length prefix, and + returns it ready for transmission across the socket. + """ + ei = record.exc_info + if ei: + # just to get traceback text into record.exc_text ... + dummy = self.format(record) + # See issue #14436: If msg or args are objects, they may not be + # available on the receiving end. So we convert the msg % args + # to a string, save it as msg and zap the args. + d = dict(record.__dict__) + d['msg'] = record.getMessage() + d['args'] = None + d['exc_info'] = None + # Issue #25685: delete 'message' if present: redundant with 'msg' + d.pop('message', None) + s = pickle.dumps(d, 1) + slen = struct.pack(">L", len(s)) + return slen + s + + def handleError(self, record): + """ + Handle an error during logging. + + An error has occurred during logging. Most likely cause - + connection lost. Close the socket so that we can retry on the + next event. + """ + if self.closeOnError and self.sock: + self.sock.close() + self.sock = None #try to reconnect next time + else: + logging.Handler.handleError(self, record) + + def emit(self, record): + """ + Emit a record. + + Pickles the record and writes it to the socket in binary format. + If there is an error with the socket, silently drop the packet. + If there was a problem with the socket, re-establishes the + socket. + """ + try: + s = self.makePickle(record) + self.send(s) + except Exception: + self.handleError(record) + + def close(self): + """ + Closes the socket. + """ + with self.lock: + sock = self.sock + if sock: + self.sock = None + sock.close() + logging.Handler.close(self) + +class DatagramHandler(SocketHandler): + """ + A handler class which writes logging records, in pickle format, to + a datagram socket. The pickle which is sent is that of the LogRecord's + attribute dictionary (__dict__), so that the receiver does not need to + have the logging module installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + + """ + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + """ + SocketHandler.__init__(self, host, port) + self.closeOnError = False + + def makeSocket(self): + """ + The factory method of SocketHandler is here overridden to create + a UDP socket (SOCK_DGRAM). + """ + if self.port is None: + family = socket.AF_UNIX + else: + family = socket.AF_INET + s = socket.socket(family, socket.SOCK_DGRAM) + return s + + def send(self, s): + """ + Send a pickled string to a socket. + + This function no longer allows for partial sends which can happen + when the network is busy - UDP does not guarantee delivery and + can deliver packets out of sequence. + """ + if self.sock is None: + self.createSocket() + self.sock.sendto(s, self.address) + +class SysLogHandler(logging.Handler): + """ + A handler class which sends formatted logging records to a syslog + server. Based on Sam Rushing's syslog module: + http://www.nightmare.com/squirl/python-ext/misc/syslog.py + Contributed by Nicolas Untz (after which minor refactoring changes + have been made). + """ + + # from : + # ====================================================================== + # priorities/facilities are encoded into a single 32-bit quantity, where + # the bottom 3 bits are the priority (0-7) and the top 28 bits are the + # facility (0-big number). Both the priorities and the facilities map + # roughly one-to-one to strings in the syslogd(8) source code. This + # mapping is included in this file. + # + # priorities (these are ordered) + + LOG_EMERG = 0 # system is unusable + LOG_ALERT = 1 # action must be taken immediately + LOG_CRIT = 2 # critical conditions + LOG_ERR = 3 # error conditions + LOG_WARNING = 4 # warning conditions + LOG_NOTICE = 5 # normal but significant condition + LOG_INFO = 6 # informational + LOG_DEBUG = 7 # debug-level messages + + # facility codes + LOG_KERN = 0 # kernel messages + LOG_USER = 1 # random user-level messages + LOG_MAIL = 2 # mail system + LOG_DAEMON = 3 # system daemons + LOG_AUTH = 4 # security/authorization messages + LOG_SYSLOG = 5 # messages generated internally by syslogd + LOG_LPR = 6 # line printer subsystem + LOG_NEWS = 7 # network news subsystem + LOG_UUCP = 8 # UUCP subsystem + LOG_CRON = 9 # clock daemon + LOG_AUTHPRIV = 10 # security/authorization messages (private) + LOG_FTP = 11 # FTP daemon + LOG_NTP = 12 # NTP subsystem + LOG_SECURITY = 13 # Log audit + LOG_CONSOLE = 14 # Log alert + LOG_SOLCRON = 15 # Scheduling daemon (Solaris) + + # other codes through 15 reserved for system use + LOG_LOCAL0 = 16 # reserved for local use + LOG_LOCAL1 = 17 # reserved for local use + LOG_LOCAL2 = 18 # reserved for local use + LOG_LOCAL3 = 19 # reserved for local use + LOG_LOCAL4 = 20 # reserved for local use + LOG_LOCAL5 = 21 # reserved for local use + LOG_LOCAL6 = 22 # reserved for local use + LOG_LOCAL7 = 23 # reserved for local use + + priority_names = { + "alert": LOG_ALERT, + "crit": LOG_CRIT, + "critical": LOG_CRIT, + "debug": LOG_DEBUG, + "emerg": LOG_EMERG, + "err": LOG_ERR, + "error": LOG_ERR, # DEPRECATED + "info": LOG_INFO, + "notice": LOG_NOTICE, + "panic": LOG_EMERG, # DEPRECATED + "warn": LOG_WARNING, # DEPRECATED + "warning": LOG_WARNING, + } + + facility_names = { + "auth": LOG_AUTH, + "authpriv": LOG_AUTHPRIV, + "console": LOG_CONSOLE, + "cron": LOG_CRON, + "daemon": LOG_DAEMON, + "ftp": LOG_FTP, + "kern": LOG_KERN, + "lpr": LOG_LPR, + "mail": LOG_MAIL, + "news": LOG_NEWS, + "ntp": LOG_NTP, + "security": LOG_SECURITY, + "solaris-cron": LOG_SOLCRON, + "syslog": LOG_SYSLOG, + "user": LOG_USER, + "uucp": LOG_UUCP, + "local0": LOG_LOCAL0, + "local1": LOG_LOCAL1, + "local2": LOG_LOCAL2, + "local3": LOG_LOCAL3, + "local4": LOG_LOCAL4, + "local5": LOG_LOCAL5, + "local6": LOG_LOCAL6, + "local7": LOG_LOCAL7, + } + + # Originally added to work around GH-43683. Unnecessary since GH-50043 but kept + # for backwards compatibility. + priority_map = { + "DEBUG" : "debug", + "INFO" : "info", + "WARNING" : "warning", + "ERROR" : "error", + "CRITICAL" : "critical" + } + + def __init__(self, address=('localhost', SYSLOG_UDP_PORT), + facility=LOG_USER, socktype=None, timeout=None): + """ + Initialize a handler. + + If address is specified as a string, a UNIX socket is used. To log to a + local syslogd, "SysLogHandler(address="/dev/log")" can be used. + If facility is not specified, LOG_USER is used. If socktype is + specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific + socket type will be used. For Unix sockets, you can also specify a + socktype of None, in which case socket.SOCK_DGRAM will be used, falling + back to socket.SOCK_STREAM. + """ + logging.Handler.__init__(self) + + self.address = address + self.facility = facility + self.socktype = socktype + self.timeout = timeout + self.socket = None + self.createSocket() + + def _connect_unixsocket(self, address): + use_socktype = self.socktype + if use_socktype is None: + use_socktype = socket.SOCK_DGRAM + self.socket = socket.socket(socket.AF_UNIX, use_socktype) + try: + self.socket.connect(address) + # it worked, so set self.socktype to the used type + self.socktype = use_socktype + except OSError: + self.socket.close() + if self.socktype is not None: + # user didn't specify falling back, so fail + raise + use_socktype = socket.SOCK_STREAM + self.socket = socket.socket(socket.AF_UNIX, use_socktype) + try: + self.socket.connect(address) + # it worked, so set self.socktype to the used type + self.socktype = use_socktype + except OSError: + self.socket.close() + raise + + def createSocket(self): + """ + Try to create a socket and, if it's not a datagram socket, connect it + to the other end. This method is called during handler initialization, + but it's not regarded as an error if the other end isn't listening yet + --- the method will be called again when emitting an event, + if there is no socket at that point. + """ + address = self.address + socktype = self.socktype + + if isinstance(address, str): + self.unixsocket = True + # Syslog server may be unavailable during handler initialisation. + # C's openlog() function also ignores connection errors. + # Moreover, we ignore these errors while logging, so it's not worse + # to ignore it also here. + try: + self._connect_unixsocket(address) + except OSError: + pass + else: + self.unixsocket = False + if socktype is None: + socktype = socket.SOCK_DGRAM + host, port = address + ress = socket.getaddrinfo(host, port, 0, socktype) + if not ress: + raise OSError("getaddrinfo returns an empty list") + for res in ress: + af, socktype, proto, _, sa = res + err = sock = None + try: + sock = socket.socket(af, socktype, proto) + if self.timeout: + sock.settimeout(self.timeout) + if socktype == socket.SOCK_STREAM: + sock.connect(sa) + break + except OSError as exc: + err = exc + if sock is not None: + sock.close() + if err is not None: + raise err + self.socket = sock + self.socktype = socktype + + def encodePriority(self, facility, priority): + """ + Encode the facility and priority. You can pass in strings or + integers - if strings are passed, the facility_names and + priority_names mapping dictionaries are used to convert them to + integers. + """ + if isinstance(facility, str): + facility = self.facility_names[facility] + if isinstance(priority, str): + priority = self.priority_names[priority] + return (facility << 3) | priority + + def close(self): + """ + Closes the socket. + """ + with self.lock: + sock = self.socket + if sock: + self.socket = None + sock.close() + logging.Handler.close(self) + + def mapPriority(self, levelName): + """ + Map a logging level name to a key in the priority_names map. + This is useful in two scenarios: when custom levels are being + used, and in the case where you can't do a straightforward + mapping by lowercasing the logging level name because of locale- + specific issues (see SF #1524081). + """ + return self.priority_map.get(levelName, "warning") + + ident = '' # prepended to all messages + append_nul = True # some old syslog daemons expect a NUL terminator + + def emit(self, record): + """ + Emit a record. + + The record is formatted, and then sent to the syslog server. If + exception information is present, it is NOT sent to the server. + """ + try: + msg = self.format(record) + if self.ident: + msg = self.ident + msg + if self.append_nul: + msg += '\000' + + # We need to convert record level to lowercase, maybe this will + # change in the future. + prio = '<%d>' % self.encodePriority(self.facility, + self.mapPriority(record.levelname)) + prio = prio.encode('utf-8') + # Message is a string. Convert to bytes as required by RFC 5424 + msg = msg.encode('utf-8') + msg = prio + msg + + if not self.socket: + self.createSocket() + + if self.unixsocket: + try: + self.socket.send(msg) + except OSError: + self.socket.close() + self._connect_unixsocket(self.address) + self.socket.send(msg) + elif self.socktype == socket.SOCK_DGRAM: + self.socket.sendto(msg, self.address) + else: + self.socket.sendall(msg) + except Exception: + self.handleError(record) + +class SMTPHandler(logging.Handler): + """ + A handler class which sends an SMTP email for each logging event. + """ + def __init__(self, mailhost, fromaddr, toaddrs, subject, + credentials=None, secure=None, timeout=5.0): + """ + Initialize the handler. + + Initialize the instance with the from and to addresses and subject + line of the email. To specify a non-standard SMTP port, use the + (host, port) tuple format for the mailhost argument. To specify + authentication credentials, supply a (username, password) tuple + for the credentials argument. To specify the use of a secure + protocol (TLS), pass in a tuple for the secure argument. This will + only be used when authentication credentials are supplied. The tuple + will be either an empty tuple, or a single-value tuple with the name + of a keyfile, or a 2-value tuple with the names of the keyfile and + certificate file. (This tuple is passed to the + `ssl.SSLContext.load_cert_chain` method). + A timeout in seconds can be specified for the SMTP connection (the + default is one second). + """ + logging.Handler.__init__(self) + if isinstance(mailhost, (list, tuple)): + self.mailhost, self.mailport = mailhost + else: + self.mailhost, self.mailport = mailhost, None + if isinstance(credentials, (list, tuple)): + self.username, self.password = credentials + else: + self.username = None + self.fromaddr = fromaddr + if isinstance(toaddrs, str): + toaddrs = [toaddrs] + self.toaddrs = toaddrs + self.subject = subject + self.secure = secure + self.timeout = timeout + + def getSubject(self, record): + """ + Determine the subject for the email. + + If you want to specify a subject line which is record-dependent, + override this method. + """ + return self.subject + + def emit(self, record): + """ + Emit a record. + + Format the record and send it to the specified addressees. + """ + try: + import smtplib + from email.message import EmailMessage + import email.utils + + port = self.mailport + if not port: + port = smtplib.SMTP_PORT + smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout) + msg = EmailMessage() + msg['From'] = self.fromaddr + msg['To'] = ','.join(self.toaddrs) + msg['Subject'] = self.getSubject(record) + msg['Date'] = email.utils.localtime() + msg.set_content(self.format(record)) + if self.username: + if self.secure is not None: + import ssl + + try: + keyfile = self.secure[0] + except IndexError: + keyfile = None + + try: + certfile = self.secure[1] + except IndexError: + certfile = None + + context = ssl._create_stdlib_context( + certfile=certfile, keyfile=keyfile + ) + smtp.ehlo() + smtp.starttls(context=context) + smtp.ehlo() + smtp.login(self.username, self.password) + smtp.send_message(msg) + smtp.quit() + except Exception: + self.handleError(record) + +class NTEventLogHandler(logging.Handler): + """ + A handler class which sends events to the NT Event Log. Adds a + registry entry for the specified application name. If no dllname is + provided, win32service.pyd (which contains some basic message + placeholders) is used. Note that use of these placeholders will make + your event logs big, as the entire message source is held in the log. + If you want slimmer logs, you have to pass in the name of your own DLL + which contains the message definitions you want to use in the event log. + """ + def __init__(self, appname, dllname=None, logtype="Application"): + logging.Handler.__init__(self) + try: + import win32evtlogutil, win32evtlog + self.appname = appname + self._welu = win32evtlogutil + if not dllname: + dllname = os.path.split(self._welu.__file__) + dllname = os.path.split(dllname[0]) + dllname = os.path.join(dllname[0], r'win32service.pyd') + self.dllname = dllname + self.logtype = logtype + # Administrative privileges are required to add a source to the registry. + # This may not be available for a user that just wants to add to an + # existing source - handle this specific case. + try: + self._welu.AddSourceToRegistry(appname, dllname, logtype) + except Exception as e: + # This will probably be a pywintypes.error. Only raise if it's not + # an "access denied" error, else let it pass + if getattr(e, 'winerror', None) != 5: # not access denied + raise + self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE + self.typemap = { + logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, + logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, + logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, + } + except ImportError: + print("The Python Win32 extensions for NT (service, event "\ + "logging) appear not to be available.") + self._welu = None + + def getMessageID(self, record): + """ + Return the message ID for the event record. If you are using your + own messages, you could do this by having the msg passed to the + logger being an ID rather than a formatting string. Then, in here, + you could use a dictionary lookup to get the message ID. This + version returns 1, which is the base message ID in win32service.pyd. + """ + return 1 + + def getEventCategory(self, record): + """ + Return the event category for the record. + + Override this if you want to specify your own categories. This version + returns 0. + """ + return 0 + + def getEventType(self, record): + """ + Return the event type for the record. + + Override this if you want to specify your own types. This version does + a mapping using the handler's typemap attribute, which is set up in + __init__() to a dictionary which contains mappings for DEBUG, INFO, + WARNING, ERROR and CRITICAL. If you are using your own levels you will + either need to override this method or place a suitable dictionary in + the handler's typemap attribute. + """ + return self.typemap.get(record.levelno, self.deftype) + + def emit(self, record): + """ + Emit a record. + + Determine the message ID, event category and event type. Then + log the message in the NT event log. + """ + if self._welu: + try: + id = self.getMessageID(record) + cat = self.getEventCategory(record) + type = self.getEventType(record) + msg = self.format(record) + self._welu.ReportEvent(self.appname, id, cat, type, [msg]) + except Exception: + self.handleError(record) + + def close(self): + """ + Clean up this handler. + + You can remove the application name from the registry as a + source of event log entries. However, if you do this, you will + not be able to see the events as you intended in the Event Log + Viewer - it needs to be able to access the registry to get the + DLL name. + """ + #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) + logging.Handler.close(self) + +class HTTPHandler(logging.Handler): + """ + A class which sends records to a web server, using either GET or + POST semantics. + """ + def __init__(self, host, url, method="GET", secure=False, credentials=None, + context=None): + """ + Initialize the instance with the host, the request URL, and the method + ("GET" or "POST") + """ + logging.Handler.__init__(self) + method = method.upper() + if method not in ["GET", "POST"]: + raise ValueError("method must be GET or POST") + if not secure and context is not None: + raise ValueError("context parameter only makes sense " + "with secure=True") + self.host = host + self.url = url + self.method = method + self.secure = secure + self.credentials = credentials + self.context = context + + def mapLogRecord(self, record): + """ + Default implementation of mapping the log record into a dict + that is sent as the CGI data. Overwrite in your class. + Contributed by Franz Glasner. + """ + return record.__dict__ + + def getConnection(self, host, secure): + """ + get a HTTP[S]Connection. + + Override when a custom connection is required, for example if + there is a proxy. + """ + import http.client + if secure: + connection = http.client.HTTPSConnection(host, context=self.context) + else: + connection = http.client.HTTPConnection(host) + return connection + + def emit(self, record): + """ + Emit a record. + + Send the record to the web server as a percent-encoded dictionary + """ + try: + import urllib.parse + host = self.host + h = self.getConnection(host, self.secure) + url = self.url + data = urllib.parse.urlencode(self.mapLogRecord(record)) + if self.method == "GET": + if (url.find('?') >= 0): + sep = '&' + else: + sep = '?' + url = url + "%c%s" % (sep, data) + h.putrequest(self.method, url) + # support multiple hosts on one IP address... + # need to strip optional :port from host, if present + i = host.find(":") + if i >= 0: + host = host[:i] + # See issue #30904: putrequest call above already adds this header + # on Python 3.x. + # h.putheader("Host", host) + if self.method == "POST": + h.putheader("Content-type", + "application/x-www-form-urlencoded") + h.putheader("Content-length", str(len(data))) + if self.credentials: + import base64 + s = ('%s:%s' % self.credentials).encode('utf-8') + s = 'Basic ' + base64.b64encode(s).strip().decode('ascii') + h.putheader('Authorization', s) + h.endheaders() + if self.method == "POST": + h.send(data.encode('utf-8')) + h.getresponse() #can't do anything with the result + except Exception: + self.handleError(record) + +class BufferingHandler(logging.Handler): + """ + A handler class which buffers logging records in memory. Whenever each + record is added to the buffer, a check is made to see if the buffer should + be flushed. If it should, then flush() is expected to do what's needed. + """ + def __init__(self, capacity): + """ + Initialize the handler with the buffer size. + """ + logging.Handler.__init__(self) + self.capacity = capacity + self.buffer = [] + + def shouldFlush(self, record): + """ + Should the handler flush its buffer? + + Returns true if the buffer is up to capacity. This method can be + overridden to implement custom flushing strategies. + """ + return (len(self.buffer) >= self.capacity) + + def emit(self, record): + """ + Emit a record. + + Append the record. If shouldFlush() tells us to, call flush() to process + the buffer. + """ + self.buffer.append(record) + if self.shouldFlush(record): + self.flush() + + def flush(self): + """ + Override to implement custom flushing behaviour. + + This version just zaps the buffer to empty. + """ + with self.lock: + self.buffer.clear() + + def close(self): + """ + Close the handler. + + This version just flushes and chains to the parent class' close(). + """ + try: + self.flush() + finally: + logging.Handler.close(self) + +class MemoryHandler(BufferingHandler): + """ + A handler class which buffers logging records in memory, periodically + flushing them to a target handler. Flushing occurs whenever the buffer + is full, or when an event of a certain severity or greater is seen. + """ + def __init__(self, capacity, flushLevel=logging.ERROR, target=None, + flushOnClose=True): + """ + Initialize the handler with the buffer size, the level at which + flushing should occur and an optional target. + + Note that without a target being set either here or via setTarget(), + a MemoryHandler is no use to anyone! + + The ``flushOnClose`` argument is ``True`` for backward compatibility + reasons - the old behaviour is that when the handler is closed, the + buffer is flushed, even if the flush level hasn't been exceeded nor the + capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``. + """ + BufferingHandler.__init__(self, capacity) + self.flushLevel = flushLevel + self.target = target + # See Issue #26559 for why this has been added + self.flushOnClose = flushOnClose + + def shouldFlush(self, record): + """ + Check for buffer full or a record at the flushLevel or higher. + """ + return (len(self.buffer) >= self.capacity) or \ + (record.levelno >= self.flushLevel) + + def setTarget(self, target): + """ + Set the target handler for this handler. + """ + with self.lock: + self.target = target + + def flush(self): + """ + For a MemoryHandler, flushing means just sending the buffered + records to the target, if there is one. Override if you want + different behaviour. + + The record buffer is only cleared if a target has been set. + """ + with self.lock: + if self.target: + for record in self.buffer: + self.target.handle(record) + self.buffer.clear() + + def close(self): + """ + Flush, if appropriately configured, set the target to None and lose the + buffer. + """ + try: + if self.flushOnClose: + self.flush() + finally: + with self.lock: + self.target = None + BufferingHandler.close(self) + + +class QueueHandler(logging.Handler): + """ + This handler sends events to a queue. Typically, it would be used together + with a multiprocessing Queue to centralise logging to file in one process + (in a multi-process application), so as to avoid file write contention + between processes. + + This code is new in Python 3.2, but this class can be copy pasted into + user code for use with earlier Python versions. + """ + + def __init__(self, queue): + """ + Initialise an instance, using the passed queue. + """ + logging.Handler.__init__(self) + self.queue = queue + self.listener = None # will be set to listener if configured via dictConfig() + + def enqueue(self, record): + """ + Enqueue a record. + + The base implementation uses put_nowait. You may want to override + this method if you want to use blocking, timeouts or custom queue + implementations. + """ + self.queue.put_nowait(record) + + def prepare(self, record): + """ + Prepare a record for queuing. The object returned by this method is + enqueued. + + The base implementation formats the record to merge the message and + arguments, and removes unpickleable items from the record in-place. + Specifically, it overwrites the record's `msg` and + `message` attributes with the merged message (obtained by + calling the handler's `format` method), and sets the `args`, + `exc_info` and `exc_text` attributes to None. + + You might want to override this method if you want to convert + the record to a dict or JSON string, or send a modified copy + of the record while leaving the original intact. + """ + # The format operation gets traceback text into record.exc_text + # (if there's exception data), and also returns the formatted + # message. We can then use this to replace the original + # msg + args, as these might be unpickleable. We also zap the + # exc_info, exc_text and stack_info attributes, as they are no longer + # needed and, if not None, will typically not be pickleable. + msg = self.format(record) + # bpo-35726: make copy of record to avoid affecting other handlers in the chain. + record = copy.copy(record) + record.message = msg + record.msg = msg + record.args = None + record.exc_info = None + record.exc_text = None + record.stack_info = None + return record + + def emit(self, record): + """ + Emit a record. + + Writes the LogRecord to the queue, preparing it for pickling first. + """ + try: + self.enqueue(self.prepare(record)) + except Exception: + self.handleError(record) + + +class QueueListener(object): + """ + This class implements an internal threaded listener which watches for + LogRecords being added to a queue, removes them and passes them to a + list of handlers for processing. + """ + _sentinel = None + + def __init__(self, queue, *handlers, respect_handler_level=False): + """ + Initialise an instance with the specified queue and + handlers. + """ + self.queue = queue + self.handlers = handlers + self._thread = None + self.respect_handler_level = respect_handler_level + + def __enter__(self): + """ + For use as a context manager. Starts the listener. + """ + self.start() + return self + + def __exit__(self, *args): + """ + For use as a context manager. Stops the listener. + """ + self.stop() + + def dequeue(self, block): + """ + Dequeue a record and return it, optionally blocking. + + The base implementation uses get. You may want to override this method + if you want to use timeouts or work with custom queue implementations. + """ + return self.queue.get(block) + + def start(self): + """ + Start the listener. + + This starts up a background thread to monitor the queue for + LogRecords to process. + """ + if self._thread is not None: + raise RuntimeError("Listener already started") + + self._thread = t = threading.Thread(target=self._monitor) + t.daemon = True + t.start() + + def prepare(self, record): + """ + Prepare a record for handling. + + This method just returns the passed-in record. You may want to + override this method if you need to do any custom marshalling or + manipulation of the record before passing it to the handlers. + """ + return record + + def handle(self, record): + """ + Handle a record. + + This just loops through the handlers offering them the record + to handle. + """ + record = self.prepare(record) + for handler in self.handlers: + if not self.respect_handler_level: + process = True + else: + process = record.levelno >= handler.level + if process: + handler.handle(record) + + def _monitor(self): + """ + Monitor the queue for records, and ask the handler + to deal with them. + + This method runs on a separate, internal thread. + The thread will terminate if it sees a sentinel object in the queue. + """ + q = self.queue + has_task_done = hasattr(q, 'task_done') + while True: + try: + record = self.dequeue(True) + if record is self._sentinel: + if has_task_done: + q.task_done() + break + self.handle(record) + if has_task_done: + q.task_done() + except queue.Empty: + break + + def enqueue_sentinel(self): + """ + This is used to enqueue the sentinel record. + + The base implementation uses put_nowait. You may want to override this + method if you want to use timeouts or work with custom queue + implementations. + """ + self.queue.put_nowait(self._sentinel) + + def stop(self): + """ + Stop the listener. + + This asks the thread to terminate, and then waits for it to do so. + Note that if you don't call this before your application exits, there + may be some records still left on the queue, which won't be processed. + """ + if self._thread: # see gh-114706 - allow calling this more than once + self.enqueue_sentinel() + self._thread.join() + self._thread = None diff --git a/Python314_4_x86_Template/Lib/lzma.py b/Python314_4_x86_Template/Lib/lzma.py new file mode 100644 index 00000000..316066d0 --- /dev/null +++ b/Python314_4_x86_Template/Lib/lzma.py @@ -0,0 +1,364 @@ +"""Interface to the liblzma compression library. + +This module provides a class for reading and writing compressed files, +classes for incremental (de)compression, and convenience functions for +one-shot (de)compression. + +These classes and functions support both the XZ and legacy LZMA +container formats, as well as raw compressed data streams. +""" + +__all__ = [ + "CHECK_NONE", "CHECK_CRC32", "CHECK_CRC64", "CHECK_SHA256", + "CHECK_ID_MAX", "CHECK_UNKNOWN", + "FILTER_LZMA1", "FILTER_LZMA2", "FILTER_DELTA", "FILTER_X86", "FILTER_IA64", + "FILTER_ARM", "FILTER_ARMTHUMB", "FILTER_POWERPC", "FILTER_SPARC", + "FORMAT_AUTO", "FORMAT_XZ", "FORMAT_ALONE", "FORMAT_RAW", + "MF_HC3", "MF_HC4", "MF_BT2", "MF_BT3", "MF_BT4", + "MODE_FAST", "MODE_NORMAL", "PRESET_DEFAULT", "PRESET_EXTREME", + + "LZMACompressor", "LZMADecompressor", "LZMAFile", "LZMAError", + "open", "compress", "decompress", "is_check_supported", +] + +import builtins +import io +import os +from compression._common import _streams +from _lzma import * +from _lzma import _encode_filter_properties, _decode_filter_properties # noqa: F401 + + +# Value 0 no longer used +_MODE_READ = 1 +# Value 2 no longer used +_MODE_WRITE = 3 + + +class LZMAFile(_streams.BaseStream): + + """A file object providing transparent LZMA (de)compression. + + An LZMAFile can act as a wrapper for an existing file object, or + refer directly to a named file on disk. + + Note that LZMAFile provides a *binary* file interface - data read + is returned as bytes, and data to be written must be given as bytes. + """ + + def __init__(self, filename=None, mode="r", *, + format=None, check=-1, preset=None, filters=None): + """Open an LZMA-compressed file in binary mode. + + filename can be either an actual file name (given as a str, + bytes, or PathLike object), in which case the named file is + opened, or it can be an existing file object to read from or + write to. + + mode can be "r" for reading (default), "w" for (over)writing, + "x" for creating exclusively, or "a" for appending. These can + equivalently be given as "rb", "wb", "xb" and "ab" respectively. + + format specifies the container format to use for the file. + If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the + default is FORMAT_XZ. + + check specifies the integrity check to use. This argument can + only be used when opening a file for writing. For FORMAT_XZ, + the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not + support integrity checks - for these formats, check must be + omitted, or be CHECK_NONE. + + When opening a file for reading, the *preset* argument is not + meaningful, and should be omitted. The *filters* argument should + also be omitted, except when format is FORMAT_RAW (in which case + it is required). + + When opening a file for writing, the settings used by the + compressor can be specified either as a preset compression + level (with the *preset* argument), or in detail as a custom + filter chain (with the *filters* argument). For FORMAT_XZ and + FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset + level. For FORMAT_RAW, the caller must always specify a filter + chain; the raw compressor does not support preset compression + levels. + + preset (if provided) should be an integer in the range 0-9, + optionally OR-ed with the constant PRESET_EXTREME. + + filters (if provided) should be a sequence of dicts. Each dict + should have an entry for "id" indicating ID of the filter, plus + additional entries for options to the filter. + """ + self._fp = None + self._closefp = False + self._mode = None + + if mode in ("r", "rb"): + if check != -1: + raise ValueError("Cannot specify an integrity check " + "when opening a file for reading") + if preset is not None: + raise ValueError("Cannot specify a preset compression " + "level when opening a file for reading") + if format is None: + format = FORMAT_AUTO + mode_code = _MODE_READ + elif mode in ("w", "wb", "a", "ab", "x", "xb"): + if format is None: + format = FORMAT_XZ + mode_code = _MODE_WRITE + self._compressor = LZMACompressor(format=format, check=check, + preset=preset, filters=filters) + self._pos = 0 + else: + raise ValueError("Invalid mode: {!r}".format(mode)) + + if isinstance(filename, (str, bytes, os.PathLike)): + if "b" not in mode: + mode += "b" + self._fp = builtins.open(filename, mode) + self._closefp = True + self._mode = mode_code + elif hasattr(filename, "read") or hasattr(filename, "write"): + self._fp = filename + self._mode = mode_code + else: + raise TypeError("filename must be a str, bytes, file or PathLike object") + + if self._mode == _MODE_READ: + raw = _streams.DecompressReader(self._fp, LZMADecompressor, + trailing_error=LZMAError, format=format, filters=filters) + self._buffer = io.BufferedReader(raw) + + def close(self): + """Flush and close the file. + + May be called more than once without error. Once the file is + closed, any other operation on it will raise a ValueError. + """ + if self.closed: + return + try: + if self._mode == _MODE_READ: + self._buffer.close() + self._buffer = None + elif self._mode == _MODE_WRITE: + self._fp.write(self._compressor.flush()) + self._compressor = None + finally: + try: + if self._closefp: + self._fp.close() + finally: + self._fp = None + self._closefp = False + + @property + def closed(self): + """True if this file is closed.""" + return self._fp is None + + @property + def name(self): + self._check_not_closed() + return self._fp.name + + @property + def mode(self): + return 'wb' if self._mode == _MODE_WRITE else 'rb' + + def fileno(self): + """Return the file descriptor for the underlying file.""" + self._check_not_closed() + return self._fp.fileno() + + def seekable(self): + """Return whether the file supports seeking.""" + return self.readable() and self._buffer.seekable() + + def readable(self): + """Return whether the file was opened for reading.""" + self._check_not_closed() + return self._mode == _MODE_READ + + def writable(self): + """Return whether the file was opened for writing.""" + self._check_not_closed() + return self._mode == _MODE_WRITE + + def peek(self, size=-1): + """Return buffered data without advancing the file position. + + Always returns at least one byte of data, unless at EOF. + The exact number of bytes returned is unspecified. + """ + self._check_can_read() + # Relies on the undocumented fact that BufferedReader.peek() always + # returns at least one byte (except at EOF) + return self._buffer.peek(size) + + def read(self, size=-1): + """Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b"" if the file is already at EOF. + """ + self._check_can_read() + return self._buffer.read(size) + + def read1(self, size=-1): + """Read up to size uncompressed bytes, while trying to avoid + making multiple reads from the underlying stream. Reads up to a + buffer's worth of data if size is negative. + + Returns b"" if the file is at EOF. + """ + self._check_can_read() + if size < 0: + size = io.DEFAULT_BUFFER_SIZE + return self._buffer.read1(size) + + def readline(self, size=-1): + """Read a line of uncompressed bytes from the file. + + The terminating newline (if present) is retained. If size is + non-negative, no more than size bytes will be read (in which + case the line may be incomplete). Returns b'' if already at EOF. + """ + self._check_can_read() + return self._buffer.readline(size) + + def write(self, data): + """Write a bytes object to the file. + + Returns the number of uncompressed bytes written, which is + always the length of data in bytes. Note that due to buffering, + the file on disk may not reflect the data written until close() + is called. + """ + self._check_can_write() + if isinstance(data, (bytes, bytearray)): + length = len(data) + else: + # accept any data that supports the buffer protocol + data = memoryview(data) + length = data.nbytes + + compressed = self._compressor.compress(data) + self._fp.write(compressed) + self._pos += length + return length + + def seek(self, offset, whence=io.SEEK_SET): + """Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Possible values for whence are: + + 0: start of stream (default): offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + """ + self._check_can_seek() + return self._buffer.seek(offset, whence) + + def tell(self): + """Return the current file position.""" + self._check_not_closed() + if self._mode == _MODE_READ: + return self._buffer.tell() + return self._pos + + +def open(filename, mode="rb", *, + format=None, check=-1, preset=None, filters=None, + encoding=None, errors=None, newline=None): + """Open an LZMA-compressed file in binary or text mode. + + filename can be either an actual file name (given as a str, bytes, + or PathLike object), in which case the named file is opened, or it + can be an existing file object to read from or write to. + + The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb", + "a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text + mode. + + The format, check, preset and filters arguments specify the + compression settings, as for LZMACompressor, LZMADecompressor and + LZMAFile. + + For binary mode, this function is equivalent to the LZMAFile + constructor: LZMAFile(filename, mode, ...). In this case, the + encoding, errors and newline arguments must not be provided. + + For text mode, an LZMAFile object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error + handling behavior, and line ending(s). + + """ + if "t" in mode: + if "b" in mode: + raise ValueError("Invalid mode: %r" % (mode,)) + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if errors is not None: + raise ValueError("Argument 'errors' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + lz_mode = mode.replace("t", "") + binary_file = LZMAFile(filename, lz_mode, format=format, check=check, + preset=preset, filters=filters) + + if "t" in mode: + encoding = io.text_encoding(encoding) + return io.TextIOWrapper(binary_file, encoding, errors, newline) + else: + return binary_file + + +def compress(data, format=FORMAT_XZ, check=-1, preset=None, filters=None): + """Compress a block of data. + + Refer to LZMACompressor's docstring for a description of the + optional arguments *format*, *check*, *preset* and *filters*. + + For incremental compression, use an LZMACompressor instead. + """ + comp = LZMACompressor(format, check, preset, filters) + return comp.compress(data) + comp.flush() + + +def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None): + """Decompress a block of data. + + Refer to LZMADecompressor's docstring for a description of the + optional arguments *format*, *check* and *filters*. + + For incremental decompression, use an LZMADecompressor instead. + """ + results = [] + while True: + decomp = LZMADecompressor(format, memlimit, filters) + try: + res = decomp.decompress(data) + except LZMAError: + if results: + break # Leftover data is not a valid LZMA/XZ stream; ignore it. + else: + raise # Error on the first iteration; bail out. + results.append(res) + if not decomp.eof: + raise LZMAError("Compressed data ended before the " + "end-of-stream marker was reached") + data = decomp.unused_data + if not data: + break + return b"".join(results) diff --git a/Python313_13_x86_Template/Lib/mailbox.py b/Python314_4_x86_Template/Lib/mailbox.py similarity index 100% rename from Python313_13_x86_Template/Lib/mailbox.py rename to Python314_4_x86_Template/Lib/mailbox.py diff --git a/Python314_4_x86_Template/Lib/mimetypes.py b/Python314_4_x86_Template/Lib/mimetypes.py new file mode 100644 index 00000000..7d0f4c1f --- /dev/null +++ b/Python314_4_x86_Template/Lib/mimetypes.py @@ -0,0 +1,747 @@ +"""Guess the MIME type of a file. + +This module defines two useful functions: + +guess_type(url, strict=True) -- guess the MIME type and encoding of a URL. + +guess_extension(type, strict=True) -- guess the extension for a given MIME type. + +It also contains the following, for tuning the behavior: + +Data: + +knownfiles -- list of files to parse +inited -- flag set when init() has been called +suffix_map -- dictionary mapping suffixes to suffixes +encodings_map -- dictionary mapping suffixes to encodings +types_map -- dictionary mapping suffixes to types + +Functions: + +init([files]) -- parse a list of files, default knownfiles (on Windows, the + default values are taken from the registry) +read_mime_types(file) -- parse one file, return a dictionary or None +""" + +try: + from _winapi import _mimetypes_read_windows_registry +except ImportError: + _mimetypes_read_windows_registry = None + +try: + import winreg as _winreg +except ImportError: + _winreg = None + +__all__ = [ + "knownfiles", "inited", "MimeTypes", + "guess_type", "guess_file_type", "guess_all_extensions", "guess_extension", + "add_type", "init", "read_mime_types", + "suffix_map", "encodings_map", "types_map", "common_types" +] + +knownfiles = [ + "/etc/mime.types", + "/etc/httpd/mime.types", # Mac OS X + "/etc/httpd/conf/mime.types", # Apache + "/etc/apache/mime.types", # Apache 1 + "/etc/apache2/mime.types", # Apache 2 + "/usr/local/etc/httpd/conf/mime.types", + "/usr/local/lib/netscape/mime.types", + "/usr/local/etc/httpd/conf/mime.types", # Apache 1.2 + "/usr/local/etc/mime.types", # Apache 1.3 + ] + +inited = False +_db = None + + +class MimeTypes: + """MIME-types datastore. + + This datastore can handle information from mime.types-style files + and supports basic determination of MIME type from a filename or + URL, and can guess a reasonable extension given a MIME type. + """ + + def __init__(self, filenames=(), strict=True): + if not inited: + init() + self.encodings_map = _encodings_map_default.copy() + self.suffix_map = _suffix_map_default.copy() + self.types_map = ({}, {}) # dict for (non-strict, strict) + self.types_map_inv = ({}, {}) + for (ext, type) in _types_map_default.items(): + self.add_type(type, ext, True) + for (ext, type) in _common_types_default.items(): + self.add_type(type, ext, False) + for name in filenames: + self.read(name, strict) + + def add_type(self, type, ext, strict=True): + """Add a mapping between a type and an extension. + + When the extension is already known, the new + type will replace the old one. When the type + is already known the extension will be added + to the list of known extensions. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + + Valid extensions are empty or start with a '.'. + """ + if ext and not ext.startswith('.'): + from warnings import _deprecated + + _deprecated( + "Undotted extensions", + "Using undotted extensions is deprecated and " + "will raise a ValueError in Python {remove}", + remove=(3, 16), + ) + + if not type: + return + self.types_map[strict][ext] = type + exts = self.types_map_inv[strict].setdefault(type, []) + if ext not in exts: + exts.append(ext) + + def guess_type(self, url, strict=True): + """Guess the type of a file which is either a URL or a path-like object. + + Return value is a tuple (type, encoding) where type is None if + the type can't be guessed (no or unknown suffix) or a string + of the form type/subtype, usable for a MIME Content-type + header; and encoding is None for no encoding or the name of + the program used to encode (e.g. compress or gzip). The + mappings are table driven. Encoding suffixes are case + sensitive; type suffixes are first tried case sensitive, then + case insensitive. + + The suffixes .tgz, .taz and .tz (case sensitive!) are all + mapped to '.tar.gz'. (This is table-driven too, using the + dictionary suffix_map.) + + Optional 'strict' argument when False adds a bunch of commonly found, + but non-standard types. + """ + # Lazy import to improve module import time + import os + import urllib.parse + + # TODO: Deprecate accepting file paths (in particular path-like objects). + url = os.fspath(url) + p = urllib.parse.urlparse(url) + if p.scheme and len(p.scheme) > 1: + scheme = p.scheme + url = p.path + else: + return self.guess_file_type(url, strict=strict) + if scheme == 'data': + # syntax of data URLs: + # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data + # mediatype := [ type "/" subtype ] *( ";" parameter ) + # data := *urlchar + # parameter := attribute "=" value + # type/subtype defaults to "text/plain" + comma = url.find(',') + if comma < 0: + # bad data URL + return None, None + semi = url.find(';', 0, comma) + if semi >= 0: + type = url[:semi] + else: + type = url[:comma] + if '=' in type or '/' not in type: + type = 'text/plain' + return type, None # never compressed, so encoding is None + + # Lazy import to improve module import time + import posixpath + + return self._guess_file_type(url, strict, posixpath.splitext) + + def guess_file_type(self, path, *, strict=True): + """Guess the type of a file based on its path. + + Similar to guess_type(), but takes file path instead of URL. + """ + # Lazy import to improve module import time + import os + + path = os.fsdecode(path) + path = os.path.splitdrive(path)[1] + return self._guess_file_type(path, strict, os.path.splitext) + + def _guess_file_type(self, path, strict, splitext): + base, ext = splitext(path) + while (ext_lower := ext.lower()) in self.suffix_map: + base, ext = splitext(base + self.suffix_map[ext_lower]) + # encodings_map is case sensitive + if ext in self.encodings_map: + encoding = self.encodings_map[ext] + base, ext = splitext(base) + else: + encoding = None + ext = ext.lower() + types_map = self.types_map[True] + if ext in types_map: + return types_map[ext], encoding + elif strict: + return None, encoding + types_map = self.types_map[False] + if ext in types_map: + return types_map[ext], encoding + else: + return None, encoding + + def guess_all_extensions(self, type, strict=True): + """Guess the extensions for a file based on its MIME type. + + Return value is a list of strings giving the possible filename + extensions, including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data stream, + but would be mapped to the MIME type 'type' by guess_type(). + + Optional 'strict' argument when false adds a bunch of commonly found, + but non-standard types. + """ + type = type.lower() + extensions = list(self.types_map_inv[True].get(type, [])) + if not strict: + for ext in self.types_map_inv[False].get(type, []): + if ext not in extensions: + extensions.append(ext) + return extensions + + def guess_extension(self, type, strict=True): + """Guess the extension for a file based on its MIME type. + + Return value is a string giving a filename extension, + including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data + stream, but would be mapped to the MIME type 'type' by + guess_type(). If no extension can be guessed for 'type', None + is returned. + + Optional 'strict' argument when false adds a bunch of commonly found, + but non-standard types. + """ + extensions = self.guess_all_extensions(type, strict) + if not extensions: + return None + return extensions[0] + + def read(self, filename, strict=True): + """ + Read a single mime.types-format file, specified by pathname. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + """ + with open(filename, encoding='utf-8') as fp: + self.readfp(fp, strict) + + def readfp(self, fp, strict=True): + """ + Read a single mime.types-format file. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + """ + while line := fp.readline(): + words = line.split() + for i in range(len(words)): + if words[i][0] == '#': + del words[i:] + break + if not words: + continue + type, suffixes = words[0], words[1:] + for suff in suffixes: + self.add_type(type, '.' + suff, strict) + + def read_windows_registry(self, strict=True): + """ + Load the MIME types database from Windows registry. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + """ + + if not _mimetypes_read_windows_registry and not _winreg: + return + + add_type = self.add_type + if strict: + add_type = lambda type, ext: self.add_type(type, ext, True) + + # Accelerated function if it is available + if _mimetypes_read_windows_registry: + _mimetypes_read_windows_registry(add_type) + elif _winreg: + self._read_windows_registry(add_type) + + @classmethod + def _read_windows_registry(cls, add_type): + def enum_types(mimedb): + i = 0 + while True: + try: + ctype = _winreg.EnumKey(mimedb, i) + except OSError: + break + else: + if '\0' not in ctype: + yield ctype + i += 1 + + with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, '') as hkcr: + for subkeyname in enum_types(hkcr): + try: + with _winreg.OpenKey(hkcr, subkeyname) as subkey: + # Only check file extensions + if not subkeyname.startswith("."): + continue + # raises OSError if no 'Content Type' value + mimetype, datatype = _winreg.QueryValueEx( + subkey, 'Content Type') + if datatype != _winreg.REG_SZ: + continue + add_type(mimetype, subkeyname) + except OSError: + continue + +def guess_type(url, strict=True): + """Guess the type of a file based on its URL. + + Return value is a tuple (type, encoding) where type is None if the + type can't be guessed (no or unknown suffix) or a string of the + form type/subtype, usable for a MIME Content-type header; and + encoding is None for no encoding or the name of the program used + to encode (e.g. compress or gzip). The mappings are table + driven. Encoding suffixes are case sensitive; type suffixes are + first tried case sensitive, then case insensitive. + + The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped + to ".tar.gz". (This is table-driven too, using the dictionary + suffix_map). + + Optional 'strict' argument when false adds a bunch of commonly found, but + non-standard types. + """ + if _db is None: + init() + return _db.guess_type(url, strict) + + +def guess_file_type(path, *, strict=True): + """Guess the type of a file based on its path. + + Similar to guess_type(), but takes file path instead of URL. + """ + if _db is None: + init() + return _db.guess_file_type(path, strict=strict) + + +def guess_all_extensions(type, strict=True): + """Guess the extensions for a file based on its MIME type. + + Return value is a list of strings giving the possible filename + extensions, including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data + stream, but would be mapped to the MIME type 'type' by + guess_type(). If no extension can be guessed for 'type', None + is returned. + + Optional 'strict' argument when false adds a bunch of commonly found, + but non-standard types. + """ + if _db is None: + init() + return _db.guess_all_extensions(type, strict) + +def guess_extension(type, strict=True): + """Guess the extension for a file based on its MIME type. + + Return value is a string giving a filename extension, including the + leading dot ('.'). The extension is not guaranteed to have been + associated with any particular data stream, but would be mapped to the + MIME type 'type' by guess_type(). If no extension can be guessed for + 'type', None is returned. + + Optional 'strict' argument when false adds a bunch of commonly found, + but non-standard types. + """ + if _db is None: + init() + return _db.guess_extension(type, strict) + +def add_type(type, ext, strict=True): + """Add a mapping between a type and an extension. + + When the extension is already known, the new + type will replace the old one. When the type + is already known the extension will be added + to the list of known extensions. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + """ + if _db is None: + init() + return _db.add_type(type, ext, strict) + + +def init(files=None): + global suffix_map, types_map, encodings_map, common_types + global inited, _db + inited = True # so that MimeTypes.__init__() doesn't call us again + + if files is None or _db is None: + db = MimeTypes() + # Quick return if not supported + db.read_windows_registry() + + if files is None: + files = knownfiles + else: + files = knownfiles + list(files) + else: + db = _db + + # Lazy import to improve module import time + import os + + for file in files: + if os.path.isfile(file): + db.read(file) + encodings_map = db.encodings_map + suffix_map = db.suffix_map + types_map = db.types_map[True] + common_types = db.types_map[False] + # Make the DB a global variable now that it is fully initialized + _db = db + + +def read_mime_types(file): + try: + f = open(file, encoding='utf-8') + except OSError: + return None + with f: + db = MimeTypes() + db.readfp(f, True) + return db.types_map[True] + + +def _default_mime_types(): + global suffix_map, _suffix_map_default + global encodings_map, _encodings_map_default + global types_map, _types_map_default + global common_types, _common_types_default + + suffix_map = _suffix_map_default = { + '.svgz': '.svg.gz', + '.tgz': '.tar.gz', + '.taz': '.tar.gz', + '.tz': '.tar.gz', + '.tbz2': '.tar.bz2', + '.txz': '.tar.xz', + } + + encodings_map = _encodings_map_default = { + '.gz': 'gzip', + '.Z': 'compress', + '.bz2': 'bzip2', + '.xz': 'xz', + '.br': 'br', + } + + # Before adding new types, make sure they are either registered with IANA, + # at https://www.iana.org/assignments/media-types/media-types.xhtml + # or extensions, i.e. using the x- prefix + + # If you add to these, please keep them sorted by mime type. + # Make sure the entry with the preferred file extension for a particular mime type + # appears before any others of the same mimetype. + types_map = _types_map_default = { + '.js' : 'text/javascript', + '.mjs' : 'text/javascript', + '.epub' : 'application/epub+zip', + '.gz' : 'application/gzip', + '.json' : 'application/json', + '.webmanifest': 'application/manifest+json', + '.doc' : 'application/msword', + '.dot' : 'application/msword', + '.wiz' : 'application/msword', + '.nq' : 'application/n-quads', + '.nt' : 'application/n-triples', + '.bin' : 'application/octet-stream', + '.a' : 'application/octet-stream', + '.dll' : 'application/octet-stream', + '.exe' : 'application/octet-stream', + '.o' : 'application/octet-stream', + '.obj' : 'application/octet-stream', + '.so' : 'application/octet-stream', + '.oda' : 'application/oda', + '.ogx' : 'application/ogg', + '.pdf' : 'application/pdf', + '.p7c' : 'application/pkcs7-mime', + '.ps' : 'application/postscript', + '.ai' : 'application/postscript', + '.eps' : 'application/postscript', + '.trig' : 'application/trig', + '.m3u' : 'application/vnd.apple.mpegurl', + '.m3u8' : 'application/vnd.apple.mpegurl', + '.xls' : 'application/vnd.ms-excel', + '.xlb' : 'application/vnd.ms-excel', + '.eot' : 'application/vnd.ms-fontobject', + '.ppt' : 'application/vnd.ms-powerpoint', + '.pot' : 'application/vnd.ms-powerpoint', + '.ppa' : 'application/vnd.ms-powerpoint', + '.pps' : 'application/vnd.ms-powerpoint', + '.pwz' : 'application/vnd.ms-powerpoint', + '.odg' : 'application/vnd.oasis.opendocument.graphics', + '.odp' : 'application/vnd.oasis.opendocument.presentation', + '.ods' : 'application/vnd.oasis.opendocument.spreadsheet', + '.odt' : 'application/vnd.oasis.opendocument.text', + '.pptx' : 'application/vnd.openxmlformats-officedocument.presentationml.presentation', + '.xlsx' : 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + '.docx' : 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', + '.rar' : 'application/vnd.rar', + '.wasm' : 'application/wasm', + '.7z' : 'application/x-7z-compressed', + '.bcpio' : 'application/x-bcpio', + '.cpio' : 'application/x-cpio', + '.csh' : 'application/x-csh', + '.deb' : 'application/x-debian-package', + '.dvi' : 'application/x-dvi', + '.gtar' : 'application/x-gtar', + '.hdf' : 'application/x-hdf', + '.h5' : 'application/x-hdf5', + '.latex' : 'application/x-latex', + '.mif' : 'application/x-mif', + '.cdf' : 'application/x-netcdf', + '.nc' : 'application/x-netcdf', + '.p12' : 'application/x-pkcs12', + '.php' : 'application/x-httpd-php', + '.pfx' : 'application/x-pkcs12', + '.ram' : 'application/x-pn-realaudio', + '.pyc' : 'application/x-python-code', + '.pyo' : 'application/x-python-code', + '.rpm' : 'application/x-rpm', + '.sh' : 'application/x-sh', + '.shar' : 'application/x-shar', + '.swf' : 'application/x-shockwave-flash', + '.sv4cpio': 'application/x-sv4cpio', + '.sv4crc' : 'application/x-sv4crc', + '.tar' : 'application/x-tar', + '.tcl' : 'application/x-tcl', + '.tex' : 'application/x-tex', + '.texi' : 'application/x-texinfo', + '.texinfo': 'application/x-texinfo', + '.roff' : 'application/x-troff', + '.t' : 'application/x-troff', + '.tr' : 'application/x-troff', + '.man' : 'application/x-troff-man', + '.me' : 'application/x-troff-me', + '.ms' : 'application/x-troff-ms', + '.ustar' : 'application/x-ustar', + '.src' : 'application/x-wais-source', + '.xsl' : 'application/xml', + '.rdf' : 'application/xml', + '.wsdl' : 'application/xml', + '.xpdl' : 'application/xml', + '.yaml' : 'application/yaml', + '.yml' : 'application/yaml', + '.zip' : 'application/zip', + '.3gp' : 'audio/3gpp', + '.3gpp' : 'audio/3gpp', + '.3g2' : 'audio/3gpp2', + '.3gpp2' : 'audio/3gpp2', + '.aac' : 'audio/aac', + '.adts' : 'audio/aac', + '.loas' : 'audio/aac', + '.ass' : 'audio/aac', + '.au' : 'audio/basic', + '.snd' : 'audio/basic', + '.flac' : 'audio/flac', + '.mka' : 'audio/matroska', + '.m4a' : 'audio/mp4', + '.mp3' : 'audio/mpeg', + '.mp2' : 'audio/mpeg', + '.ogg' : 'audio/ogg', + '.opus' : 'audio/opus', + '.aif' : 'audio/x-aiff', + '.aifc' : 'audio/x-aiff', + '.aiff' : 'audio/x-aiff', + '.ra' : 'audio/x-pn-realaudio', + '.wav' : 'audio/vnd.wave', + '.otf' : 'font/otf', + '.ttf' : 'font/ttf', + '.weba' : 'audio/webm', + '.woff' : 'font/woff', + '.woff2' : 'font/woff2', + '.avif' : 'image/avif', + '.bmp' : 'image/bmp', + '.emf' : 'image/emf', + '.fits' : 'image/fits', + '.g3' : 'image/g3fax', + '.gif' : 'image/gif', + '.ief' : 'image/ief', + '.jp2' : 'image/jp2', + '.jpg' : 'image/jpeg', + '.jpe' : 'image/jpeg', + '.jpeg' : 'image/jpeg', + '.jpm' : 'image/jpm', + '.jpx' : 'image/jpx', + '.heic' : 'image/heic', + '.heif' : 'image/heif', + '.png' : 'image/png', + '.svg' : 'image/svg+xml', + '.t38' : 'image/t38', + '.tiff' : 'image/tiff', + '.tif' : 'image/tiff', + '.tfx' : 'image/tiff-fx', + '.ico' : 'image/vnd.microsoft.icon', + '.webp' : 'image/webp', + '.wmf' : 'image/wmf', + '.ras' : 'image/x-cmu-raster', + '.pnm' : 'image/x-portable-anymap', + '.pbm' : 'image/x-portable-bitmap', + '.pgm' : 'image/x-portable-graymap', + '.ppm' : 'image/x-portable-pixmap', + '.rgb' : 'image/x-rgb', + '.xbm' : 'image/x-xbitmap', + '.xpm' : 'image/x-xpixmap', + '.xwd' : 'image/x-xwindowdump', + '.eml' : 'message/rfc822', + '.mht' : 'message/rfc822', + '.mhtml' : 'message/rfc822', + '.nws' : 'message/rfc822', + '.gltf' : 'model/gltf+json', + '.glb' : 'model/gltf-binary', + '.stl' : 'model/stl', + '.css' : 'text/css', + '.csv' : 'text/csv', + '.html' : 'text/html', + '.htm' : 'text/html', + '.md' : 'text/markdown', + '.markdown': 'text/markdown', + '.n3' : 'text/n3', + '.txt' : 'text/plain', + '.bat' : 'text/plain', + '.c' : 'text/plain', + '.h' : 'text/plain', + '.ksh' : 'text/plain', + '.pl' : 'text/plain', + '.srt' : 'text/plain', + '.rtx' : 'text/richtext', + '.rtf' : 'text/rtf', + '.tsv' : 'text/tab-separated-values', + '.vtt' : 'text/vtt', + '.py' : 'text/x-python', + '.rst' : 'text/x-rst', + '.etx' : 'text/x-setext', + '.sgm' : 'text/x-sgml', + '.sgml' : 'text/x-sgml', + '.vcf' : 'text/x-vcard', + '.xml' : 'text/xml', + '.mkv' : 'video/matroska', + '.mk3d' : 'video/matroska-3d', + '.mp4' : 'video/mp4', + '.mpeg' : 'video/mpeg', + '.m1v' : 'video/mpeg', + '.mpa' : 'video/mpeg', + '.mpe' : 'video/mpeg', + '.mpg' : 'video/mpeg', + '.ogv' : 'video/ogg', + '.mov' : 'video/quicktime', + '.qt' : 'video/quicktime', + '.webm' : 'video/webm', + '.avi' : 'video/vnd.avi', + '.m4v' : 'video/x-m4v', + '.wmv' : 'video/x-ms-wmv', + '.movie' : 'video/x-sgi-movie', + } + + # These are non-standard types, commonly found in the wild. They will + # only match if strict=0 flag is given to the API methods. + + # Please sort these too + common_types = _common_types_default = { + '.rtf' : 'application/rtf', + '.apk' : 'application/vnd.android.package-archive', + '.midi': 'audio/midi', + '.mid' : 'audio/midi', + '.jpg' : 'image/jpg', + '.pict': 'image/pict', + '.pct' : 'image/pict', + '.pic' : 'image/pict', + '.xul' : 'text/xul', + } + + +_default_mime_types() + + +def _parse_args(args): + from argparse import ArgumentParser + + parser = ArgumentParser( + description='map filename extensions to MIME types', color=True + ) + parser.add_argument( + '-e', '--extension', + action='store_true', + help='guess extension instead of type' + ) + parser.add_argument( + '-l', '--lenient', + action='store_true', + help='additionally search for common but non-standard types' + ) + parser.add_argument('type', nargs='+', help='a type to search') + args = parser.parse_args(args) + return args, parser.format_help() + + +def _main(args=None): + """Run the mimetypes command-line interface and return a text to print.""" + args, help_text = _parse_args(args) + + results = [] + if args.extension: + for gtype in args.type: + guess = guess_extension(gtype, not args.lenient) + if guess: + results.append(str(guess)) + else: + results.append(f"error: unknown type {gtype}") + return results + else: + for gtype in args.type: + guess, encoding = guess_type(gtype, not args.lenient) + if guess: + results.append(f"type: {guess} encoding: {encoding}") + else: + results.append(f"error: media type unknown for {gtype}") + return results + + +if __name__ == '__main__': + import sys + + results = _main() + print("\n".join(results)) + sys.exit(any(result.startswith("error: ") for result in results)) diff --git a/Python313_13_x86_Template/Lib/modulefinder.py b/Python314_4_x86_Template/Lib/modulefinder.py similarity index 100% rename from Python313_13_x86_Template/Lib/modulefinder.py rename to Python314_4_x86_Template/Lib/modulefinder.py diff --git a/Python313_13_x86_Template/Lib/multiprocessing/__init__.py b/Python314_4_x86_Template/Lib/multiprocessing/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/multiprocessing/__init__.py rename to Python314_4_x86_Template/Lib/multiprocessing/__init__.py diff --git a/Python314_4_x86_Template/Lib/multiprocessing/connection.py b/Python314_4_x86_Template/Lib/multiprocessing/connection.py new file mode 100644 index 00000000..a6e1b0c7 --- /dev/null +++ b/Python314_4_x86_Template/Lib/multiprocessing/connection.py @@ -0,0 +1,1229 @@ +# +# A higher level module for using sockets (or Windows named pipes) +# +# multiprocessing/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] + +import errno +import io +import itertools +import os +import sys +import socket +import struct +import tempfile +import time + + +from . import util + +from . import AuthenticationError, BufferTooShort +from .context import reduction +_ForkingPickler = reduction.ForkingPickler + +try: + import _multiprocessing + import _winapi + from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE +except ImportError: + if sys.platform == 'win32': + raise + _winapi = None + +# +# +# + +# 64 KiB is the default PIPE buffer size of most POSIX platforms. +BUFSIZE = 64 * 1024 + +# A very generous timeout when it comes to local connections... +CONNECTION_TIMEOUT = 20. + +_mmap_counter = itertools.count() +_MAX_PIPE_ATTEMPTS = 100 + +default_family = 'AF_INET' +families = ['AF_INET'] + +if hasattr(socket, 'AF_UNIX'): + default_family = 'AF_UNIX' + families += ['AF_UNIX'] + +if sys.platform == 'win32': + default_family = 'AF_PIPE' + families += ['AF_PIPE'] + + +def _init_timeout(timeout=CONNECTION_TIMEOUT): + return time.monotonic() + timeout + +def _check_timeout(t): + return time.monotonic() > t + +# +# +# + +def arbitrary_address(family): + ''' + Return an arbitrary free address for the given family + ''' + if family == 'AF_INET': + return ('localhost', 0) + elif family == 'AF_UNIX': + return tempfile.mktemp(prefix='sock-', dir=util.get_temp_dir()) + elif family == 'AF_PIPE': + return (r'\\.\pipe\pyc-%d-%d-%s' % + (os.getpid(), next(_mmap_counter), os.urandom(8).hex())) + else: + raise ValueError('unrecognized family') + +def _validate_family(family): + ''' + Checks if the family is valid for the current environment. + ''' + if sys.platform != 'win32' and family == 'AF_PIPE': + raise ValueError('Family %s is not recognized.' % family) + + if sys.platform == 'win32' and family == 'AF_UNIX': + # double check + if not hasattr(socket, family): + raise ValueError('Family %s is not recognized.' % family) + +def address_type(address): + ''' + Return the types of the address + + This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' + ''' + if type(address) == tuple: + return 'AF_INET' + elif type(address) is str and address.startswith('\\\\'): + return 'AF_PIPE' + elif type(address) is str or util.is_abstract_socket_namespace(address): + return 'AF_UNIX' + else: + raise ValueError('address type of %r unrecognized' % address) + +# +# Connection classes +# + +class _ConnectionBase: + _handle = None + + def __init__(self, handle, readable=True, writable=True): + handle = handle.__index__() + if handle < 0: + raise ValueError("invalid handle") + if not readable and not writable: + raise ValueError( + "at least one of `readable` and `writable` must be True") + self._handle = handle + self._readable = readable + self._writable = writable + + # XXX should we use util.Finalize instead of a __del__? + + def __del__(self): + if self._handle is not None: + self._close() + + def _check_closed(self): + if self._handle is None: + raise OSError("handle is closed") + + def _check_readable(self): + if not self._readable: + raise OSError("connection is write-only") + + def _check_writable(self): + if not self._writable: + raise OSError("connection is read-only") + + def _bad_message_length(self): + if self._writable: + self._readable = False + else: + self.close() + raise OSError("bad message length") + + @property + def closed(self): + """True if the connection is closed""" + return self._handle is None + + @property + def readable(self): + """True if the connection is readable""" + return self._readable + + @property + def writable(self): + """True if the connection is writable""" + return self._writable + + def fileno(self): + """File descriptor or handle of the connection""" + self._check_closed() + return self._handle + + def close(self): + """Close the connection""" + if self._handle is not None: + try: + self._close() + finally: + self._handle = None + + def _detach(self): + """Stop managing the underlying file descriptor or handle.""" + self._handle = None + + def send_bytes(self, buf, offset=0, size=None): + """Send the bytes data from a bytes-like object""" + self._check_closed() + self._check_writable() + m = memoryview(buf) + if m.itemsize > 1: + m = m.cast('B') + n = m.nbytes + if offset < 0: + raise ValueError("offset is negative") + if n < offset: + raise ValueError("buffer length < offset") + if size is None: + size = n - offset + elif size < 0: + raise ValueError("size is negative") + elif offset + size > n: + raise ValueError("buffer length < offset + size") + self._send_bytes(m[offset:offset + size]) + + def send(self, obj): + """Send a (picklable) object""" + self._check_closed() + self._check_writable() + self._send_bytes(_ForkingPickler.dumps(obj)) + + def recv_bytes(self, maxlength=None): + """ + Receive bytes data as a bytes object. + """ + self._check_closed() + self._check_readable() + if maxlength is not None and maxlength < 0: + raise ValueError("negative maxlength") + buf = self._recv_bytes(maxlength) + if buf is None: + self._bad_message_length() + return buf.getvalue() + + def recv_bytes_into(self, buf, offset=0): + """ + Receive bytes data into a writeable bytes-like object. + Return the number of bytes read. + """ + self._check_closed() + self._check_readable() + with memoryview(buf) as m: + # Get bytesize of arbitrary buffer + itemsize = m.itemsize + bytesize = itemsize * len(m) + if offset < 0: + raise ValueError("negative offset") + elif offset > bytesize: + raise ValueError("offset too large") + result = self._recv_bytes() + size = result.tell() + if bytesize < offset + size: + raise BufferTooShort(result.getvalue()) + # Message can fit in dest + result.seek(0) + result.readinto(m[offset // itemsize : + (offset + size) // itemsize]) + return size + + def recv(self): + """Receive a (picklable) object""" + self._check_closed() + self._check_readable() + buf = self._recv_bytes() + return _ForkingPickler.loads(buf.getbuffer()) + + def poll(self, timeout=0.0): + """Whether there is any input available to be read""" + self._check_closed() + self._check_readable() + return self._poll(timeout) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +if _winapi: + + class PipeConnection(_ConnectionBase): + """ + Connection class based on a Windows named pipe. + Overlapped I/O is used, so the handles must have been created + with FILE_FLAG_OVERLAPPED. + """ + _got_empty_message = False + _send_ov = None + + def _close(self, _CloseHandle=_winapi.CloseHandle): + ov = self._send_ov + if ov is not None: + # Interrupt WaitForMultipleObjects() in _send_bytes() + ov.cancel() + _CloseHandle(self._handle) + + def _send_bytes(self, buf): + if self._send_ov is not None: + # A connection should only be used by a single thread + raise ValueError("concurrent send_bytes() calls " + "are not supported") + ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) + self._send_ov = ov + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + self._send_ov = None + nwritten, err = ov.GetOverlappedResult(True) + if err == _winapi.ERROR_OPERATION_ABORTED: + # close() was called by another thread while + # WaitForMultipleObjects() was waiting for the overlapped + # operation. + raise OSError(errno.EPIPE, "handle is closed") + assert err == 0 + assert nwritten == len(buf) + + def _recv_bytes(self, maxsize=None): + if self._got_empty_message: + self._got_empty_message = False + return io.BytesIO() + else: + bsize = 128 if maxsize is None else min(maxsize, 128) + try: + ov, err = _winapi.ReadFile(self._handle, bsize, + overlapped=True) + + sentinel = object() + return_value = sentinel + try: + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + nread, err = ov.GetOverlappedResult(True) + if err == 0: + f = io.BytesIO() + f.write(ov.getbuffer()) + return_value = f + elif err == _winapi.ERROR_MORE_DATA: + return_value = self._get_more_data(ov, maxsize) + except: + if return_value is sentinel: + raise + + if return_value is not sentinel: + return return_value + except OSError as e: + if e.winerror == _winapi.ERROR_BROKEN_PIPE: + raise EOFError + else: + raise + raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") + + def _poll(self, timeout): + if (self._got_empty_message or + _winapi.PeekNamedPipe(self._handle)[0] != 0): + return True + return bool(wait([self], timeout)) + + def _get_more_data(self, ov, maxsize): + buf = ov.getbuffer() + f = io.BytesIO() + f.write(buf) + left = _winapi.PeekNamedPipe(self._handle)[1] + assert left > 0 + if maxsize is not None and len(buf) + left > maxsize: + self._bad_message_length() + ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) + rbytes, err = ov.GetOverlappedResult(True) + assert err == 0 + assert rbytes == left + f.write(ov.getbuffer()) + return f + + +class Connection(_ConnectionBase): + """ + Connection class based on an arbitrary file descriptor (Unix only), or + a socket handle (Windows). + """ + + if _winapi: + def _close(self, _close=_multiprocessing.closesocket): + _close(self._handle) + _write = _multiprocessing.send + _read = _multiprocessing.recv + else: + def _close(self, _close=os.close): + _close(self._handle) + _write = os.write + _read = os.read + + def _send(self, buf, write=_write): + remaining = len(buf) + while True: + n = write(self._handle, buf) + remaining -= n + if remaining == 0: + break + buf = buf[n:] + + def _recv(self, size, read=_read): + buf = io.BytesIO() + handle = self._handle + remaining = size + while remaining > 0: + to_read = min(BUFSIZE, remaining) + chunk = read(handle, to_read) + n = len(chunk) + if n == 0: + if remaining == size: + raise EOFError + else: + raise OSError("got end of file during message") + buf.write(chunk) + remaining -= n + return buf + + def _send_bytes(self, buf): + n = len(buf) + if n > 0x7fffffff: + pre_header = struct.pack("!i", -1) + header = struct.pack("!Q", n) + self._send(pre_header) + self._send(header) + self._send(buf) + else: + # For wire compatibility with 3.7 and lower + header = struct.pack("!i", n) + if n > 16384: + # The payload is large so Nagle's algorithm won't be triggered + # and we'd better avoid the cost of concatenation. + self._send(header) + self._send(buf) + else: + # Issue #20540: concatenate before sending, to avoid delays due + # to Nagle's algorithm on a TCP socket. + # Also note we want to avoid sending a 0-length buffer separately, + # to avoid "broken pipe" errors if the other end closed the pipe. + self._send(header + buf) + + def _recv_bytes(self, maxsize=None): + buf = self._recv(4) + size, = struct.unpack("!i", buf.getvalue()) + if size == -1: + buf = self._recv(8) + size, = struct.unpack("!Q", buf.getvalue()) + if maxsize is not None and size > maxsize: + return None + return self._recv(size) + + def _poll(self, timeout): + r = wait([self], timeout) + return bool(r) + + +# +# Public functions +# + +class Listener(object): + ''' + Returns a listener object. + + This is a wrapper for a bound socket which is 'listening' for + connections, or for a Windows named pipe. + ''' + def __init__(self, address=None, family=None, backlog=1, authkey=None): + family = family or (address and address_type(address)) \ + or default_family + _validate_family(family) + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + if family == 'AF_PIPE': + if address: + self._listener = PipeListener(address, backlog) + else: + for attempts in itertools.count(): + address = arbitrary_address(family) + try: + self._listener = PipeListener(address, backlog) + break + except OSError as e: + if attempts >= _MAX_PIPE_ATTEMPTS: + raise + if e.winerror not in (_winapi.ERROR_PIPE_BUSY, + _winapi.ERROR_ACCESS_DENIED): + raise + else: + address = address or arbitrary_address(family) + self._listener = SocketListener(address, family, backlog) + + self._authkey = authkey + + def accept(self): + ''' + Accept a connection on the bound socket or named pipe of `self`. + + Returns a `Connection` object. + ''' + if self._listener is None: + raise OSError('listener is closed') + + c = self._listener.accept() + if self._authkey is not None: + deliver_challenge(c, self._authkey) + answer_challenge(c, self._authkey) + return c + + def close(self): + ''' + Close the bound socket or named pipe of `self`. + ''' + listener = self._listener + if listener is not None: + self._listener = None + listener.close() + + @property + def address(self): + return self._listener._address + + @property + def last_accepted(self): + return self._listener._last_accepted + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +def Client(address, family=None, authkey=None): + ''' + Returns a connection to the address of a `Listener` + ''' + family = family or address_type(address) + _validate_family(family) + if family == 'AF_PIPE': + c = PipeClient(address) + else: + c = SocketClient(address) + + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + if authkey is not None: + answer_challenge(c, authkey) + deliver_challenge(c, authkey) + + return c + + +if sys.platform != 'win32': + + def Pipe(duplex=True): + ''' + Returns pair of connection objects at either end of a pipe + ''' + if duplex: + s1, s2 = socket.socketpair() + s1.setblocking(True) + s2.setblocking(True) + c1 = Connection(s1.detach()) + c2 = Connection(s2.detach()) + else: + fd1, fd2 = os.pipe() + c1 = Connection(fd1, writable=False) + c2 = Connection(fd2, readable=False) + + return c1, c2 + +else: + + def Pipe(duplex=True): + ''' + Returns pair of connection objects at either end of a pipe + ''' + if duplex: + openmode = _winapi.PIPE_ACCESS_DUPLEX + access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE + obsize, ibsize = BUFSIZE, BUFSIZE + else: + openmode = _winapi.PIPE_ACCESS_INBOUND + access = _winapi.GENERIC_WRITE + obsize, ibsize = 0, BUFSIZE + + for attempts in itertools.count(): + address = arbitrary_address('AF_PIPE') + try: + h1 = _winapi.CreateNamedPipe( + address, openmode | _winapi.FILE_FLAG_OVERLAPPED | + _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, + # default security descriptor: the handle cannot be inherited + _winapi.NULL + ) + break + except OSError as e: + if attempts >= _MAX_PIPE_ATTEMPTS: + raise + if e.winerror not in (_winapi.ERROR_PIPE_BUSY, + _winapi.ERROR_ACCESS_DENIED): + raise + h2 = _winapi.CreateFile( + address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + _winapi.SetNamedPipeHandleState( + h2, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + + overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) + _, err = overlapped.GetOverlappedResult(True) + assert err == 0 + + c1 = PipeConnection(h1, writable=duplex) + c2 = PipeConnection(h2, readable=duplex) + + return c1, c2 + +# +# Definitions for connections based on sockets +# + +class SocketListener(object): + ''' + Representation of a socket which is bound to an address and listening + ''' + def __init__(self, address, family, backlog=1): + self._socket = socket.socket(getattr(socket, family)) + try: + # SO_REUSEADDR has different semantics on Windows (issue #2550). + if os.name == 'posix': + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + self._socket.setblocking(True) + self._socket.bind(address) + self._socket.listen(backlog) + self._address = self._socket.getsockname() + except OSError: + self._socket.close() + raise + self._family = family + self._last_accepted = None + + if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): + # Linux abstract socket namespaces do not need to be explicitly unlinked + self._unlink = util.Finalize( + self, os.unlink, args=(address,), exitpriority=0 + ) + else: + self._unlink = None + + def accept(self): + s, self._last_accepted = self._socket.accept() + s.setblocking(True) + return Connection(s.detach()) + + def close(self): + try: + self._socket.close() + finally: + unlink = self._unlink + if unlink is not None: + self._unlink = None + unlink() + + +def SocketClient(address): + ''' + Return a connection object connected to the socket given by `address` + ''' + family = address_type(address) + with socket.socket( getattr(socket, family) ) as s: + s.setblocking(True) + s.connect(address) + return Connection(s.detach()) + +# +# Definitions for connections based on named pipes +# + +if sys.platform == 'win32': + + class PipeListener(object): + ''' + Representation of a named pipe + ''' + def __init__(self, address, backlog=None): + self._address = address + self._handle_queue = [self._new_handle(first=True)] + + self._last_accepted = None + util.sub_debug('listener created with address=%r', self._address) + self.close = util.Finalize( + self, PipeListener._finalize_pipe_listener, + args=(self._handle_queue, self._address), exitpriority=0 + ) + + def _new_handle(self, first=False): + flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED + if first: + flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + return _winapi.CreateNamedPipe( + self._address, flags, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, + _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL + ) + + def accept(self): + self._handle_queue.append(self._new_handle()) + handle = self._handle_queue.pop(0) + try: + ov = _winapi.ConnectNamedPipe(handle, overlapped=True) + except OSError as e: + if e.winerror != _winapi.ERROR_NO_DATA: + raise + # ERROR_NO_DATA can occur if a client has already connected, + # written data and then disconnected -- see Issue 14725. + else: + try: + res = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + except: + ov.cancel() + _winapi.CloseHandle(handle) + raise + finally: + _, err = ov.GetOverlappedResult(True) + assert err == 0 + return PipeConnection(handle) + + @staticmethod + def _finalize_pipe_listener(queue, address): + util.sub_debug('closing listener with address=%r', address) + for handle in queue: + _winapi.CloseHandle(handle) + + def PipeClient(address): + ''' + Return a connection object connected to the pipe given by `address` + ''' + t = _init_timeout() + while 1: + try: + _winapi.WaitNamedPipe(address, 1000) + h = _winapi.CreateFile( + address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, + 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + except OSError as e: + if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, + _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): + raise + else: + break + else: + raise + + _winapi.SetNamedPipeHandleState( + h, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + return PipeConnection(h) + +# +# Authentication stuff +# + +MESSAGE_LENGTH = 40 # MUST be > 20 + +_CHALLENGE = b'#CHALLENGE#' +_WELCOME = b'#WELCOME#' +_FAILURE = b'#FAILURE#' + +# multiprocessing.connection Authentication Handshake Protocol Description +# (as documented for reference after reading the existing code) +# ============================================================================= +# +# On Windows: native pipes with "overlapped IO" are used to send the bytes, +# instead of the length prefix SIZE scheme described below. (ie: the OS deals +# with message sizes for us) +# +# Protocol error behaviors: +# +# On POSIX, any failure to receive the length prefix into SIZE, for SIZE greater +# than the requested maxsize to receive, or receiving fewer than SIZE bytes +# results in the connection being closed and auth to fail. +# +# On Windows, receiving too few bytes is never a low level _recv_bytes read +# error, receiving too many will trigger an error only if receive maxsize +# value was larger than 128 OR the if the data arrived in smaller pieces. +# +# Serving side Client side +# ------------------------------ --------------------------------------- +# 0. Open a connection on the pipe. +# 1. Accept connection. +# 2. Random 20+ bytes -> MESSAGE +# Modern servers always send +# more than 20 bytes and include +# a {digest} prefix on it with +# their preferred HMAC digest. +# Legacy ones send ==20 bytes. +# 3. send 4 byte length (net order) +# prefix followed by: +# b'#CHALLENGE#' + MESSAGE +# 4. Receive 4 bytes, parse as network byte +# order integer. If it is -1, receive an +# additional 8 bytes, parse that as network +# byte order. The result is the length of +# the data that follows -> SIZE. +# 5. Receive min(SIZE, 256) bytes -> M1 +# 6. Assert that M1 starts with: +# b'#CHALLENGE#' +# 7. Strip that prefix from M1 into -> M2 +# 7.1. Parse M2: if it is exactly 20 bytes in +# length this indicates a legacy server +# supporting only HMAC-MD5. Otherwise the +# 7.2. preferred digest is looked up from an +# expected "{digest}" prefix on M2. No prefix +# or unsupported digest? <- AuthenticationError +# 7.3. Put divined algorithm name in -> D_NAME +# 8. Compute HMAC-D_NAME of AUTHKEY, M2 -> C_DIGEST +# 9. Send 4 byte length prefix (net order) +# followed by C_DIGEST bytes. +# 10. Receive 4 or 4+8 byte length +# prefix (#4 dance) -> SIZE. +# 11. Receive min(SIZE, 256) -> C_D. +# 11.1. Parse C_D: legacy servers +# accept it as is, "md5" -> D_NAME +# 11.2. modern servers check the length +# of C_D, IF it is 16 bytes? +# 11.2.1. "md5" -> D_NAME +# and skip to step 12. +# 11.3. longer? expect and parse a "{digest}" +# prefix into -> D_NAME. +# Strip the prefix and store remaining +# bytes in -> C_D. +# 11.4. Don't like D_NAME? <- AuthenticationError +# 12. Compute HMAC-D_NAME of AUTHKEY, +# MESSAGE into -> M_DIGEST. +# 13. Compare M_DIGEST == C_D: +# 14a: Match? Send length prefix & +# b'#WELCOME#' +# <- RETURN +# 14b: Mismatch? Send len prefix & +# b'#FAILURE#' +# <- CLOSE & AuthenticationError +# 15. Receive 4 or 4+8 byte length prefix (net +# order) again as in #4 into -> SIZE. +# 16. Receive min(SIZE, 256) bytes -> M3. +# 17. Compare M3 == b'#WELCOME#': +# 17a. Match? <- RETURN +# 17b. Mismatch? <- CLOSE & AuthenticationError +# +# If this RETURNed, the connection remains open: it has been authenticated. +# +# Length prefixes are used consistently. Even on the legacy protocol, this +# was good fortune and allowed us to evolve the protocol by using the length +# of the opening challenge or length of the returned digest as a signal as +# to which protocol the other end supports. + +_ALLOWED_DIGESTS = frozenset( + {b'md5', b'sha256', b'sha384', b'sha3_256', b'sha3_384'}) +_MAX_DIGEST_LEN = max(len(_) for _ in _ALLOWED_DIGESTS) + +# Old hmac-md5 only server versions from Python <=3.11 sent a message of this +# length. It happens to not match the length of any supported digest so we can +# use a message of this length to indicate that we should work in backwards +# compatible md5-only mode without a {digest_name} prefix on our response. +_MD5ONLY_MESSAGE_LENGTH = 20 +_MD5_DIGEST_LEN = 16 +_LEGACY_LENGTHS = (_MD5ONLY_MESSAGE_LENGTH, _MD5_DIGEST_LEN) + + +def _get_digest_name_and_payload(message): # type: (bytes) -> tuple[str, bytes] + """Returns a digest name and the payload for a response hash. + + If a legacy protocol is detected based on the message length + or contents the digest name returned will be empty to indicate + legacy mode where MD5 and no digest prefix should be sent. + """ + # modern message format: b"{digest}payload" longer than 20 bytes + # legacy message format: 16 or 20 byte b"payload" + if len(message) in _LEGACY_LENGTHS: + # Either this was a legacy server challenge, or we're processing + # a reply from a legacy client that sent an unprefixed 16-byte + # HMAC-MD5 response. All messages using the modern protocol will + # be longer than either of these lengths. + return '', message + if (message.startswith(b'{') and + (curly := message.find(b'}', 1, _MAX_DIGEST_LEN+2)) > 0): + digest = message[1:curly] + if digest in _ALLOWED_DIGESTS: + payload = message[curly+1:] + return digest.decode('ascii'), payload + raise AuthenticationError( + 'unsupported message length, missing digest prefix, ' + f'or unsupported digest: {message=}') + + +def _create_response(authkey, message): + """Create a MAC based on authkey and message + + The MAC algorithm defaults to HMAC-MD5, unless MD5 is not available or + the message has a '{digest_name}' prefix. For legacy HMAC-MD5, the response + is the raw MAC, otherwise the response is prefixed with '{digest_name}', + e.g. b'{sha256}abcdefg...' + + Note: The MAC protects the entire message including the digest_name prefix. + """ + import hmac + digest_name = _get_digest_name_and_payload(message)[0] + # The MAC protects the entire message: digest header and payload. + if not digest_name: + # Legacy server without a {digest} prefix on message. + # Generate a legacy non-prefixed HMAC-MD5 reply. + try: + return hmac.new(authkey, message, 'md5').digest() + except ValueError: + # HMAC-MD5 is not available (FIPS mode?), fall back to + # HMAC-SHA2-256 modern protocol. The legacy server probably + # doesn't support it and will reject us anyways. :shrug: + digest_name = 'sha256' + # Modern protocol, indicate the digest used in the reply. + response = hmac.new(authkey, message, digest_name).digest() + return b'{%s}%s' % (digest_name.encode('ascii'), response) + + +def _verify_challenge(authkey, message, response): + """Verify MAC challenge + + If our message did not include a digest_name prefix, the client is allowed + to select a stronger digest_name from _ALLOWED_DIGESTS. + + In case our message is prefixed, a client cannot downgrade to a weaker + algorithm, because the MAC is calculated over the entire message + including the '{digest_name}' prefix. + """ + import hmac + response_digest, response_mac = _get_digest_name_and_payload(response) + response_digest = response_digest or 'md5' + try: + expected = hmac.new(authkey, message, response_digest).digest() + except ValueError: + raise AuthenticationError(f'{response_digest=} unsupported') + if len(expected) != len(response_mac): + raise AuthenticationError( + f'expected {response_digest!r} of length {len(expected)} ' + f'got {len(response_mac)}') + if not hmac.compare_digest(expected, response_mac): + raise AuthenticationError('digest received was wrong') + + +def deliver_challenge(connection, authkey: bytes, digest_name='sha256'): + if not isinstance(authkey, bytes): + raise ValueError( + "Authkey must be bytes, not {0!s}".format(type(authkey))) + assert MESSAGE_LENGTH > _MD5ONLY_MESSAGE_LENGTH, "protocol constraint" + message = os.urandom(MESSAGE_LENGTH) + message = b'{%s}%s' % (digest_name.encode('ascii'), message) + # Even when sending a challenge to a legacy client that does not support + # digest prefixes, they'll take the entire thing as a challenge and + # respond to it with a raw HMAC-MD5. + connection.send_bytes(_CHALLENGE + message) + response = connection.recv_bytes(256) # reject large message + try: + _verify_challenge(authkey, message, response) + except AuthenticationError: + connection.send_bytes(_FAILURE) + raise + else: + connection.send_bytes(_WELCOME) + + +def answer_challenge(connection, authkey: bytes): + if not isinstance(authkey, bytes): + raise ValueError( + "Authkey must be bytes, not {0!s}".format(type(authkey))) + message = connection.recv_bytes(256) # reject large message + if not message.startswith(_CHALLENGE): + raise AuthenticationError( + f'Protocol error, expected challenge: {message=}') + message = message[len(_CHALLENGE):] + if len(message) < _MD5ONLY_MESSAGE_LENGTH: + raise AuthenticationError(f'challenge too short: {len(message)} bytes') + digest = _create_response(authkey, message) + connection.send_bytes(digest) + response = connection.recv_bytes(256) # reject large message + if response != _WELCOME: + raise AuthenticationError('digest sent was rejected') + +# +# Support for using xmlrpclib for serialization +# + +class ConnectionWrapper(object): + def __init__(self, conn, dumps, loads): + self._conn = conn + self._dumps = dumps + self._loads = loads + for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): + obj = getattr(conn, attr) + setattr(self, attr, obj) + def send(self, obj): + s = self._dumps(obj) + self._conn.send_bytes(s) + def recv(self): + s = self._conn.recv_bytes() + return self._loads(s) + +def _xml_dumps(obj): + return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') + +def _xml_loads(s): + (obj,), method = xmlrpclib.loads(s.decode('utf-8')) + return obj + +class XmlListener(Listener): + def accept(self): + global xmlrpclib + import xmlrpc.client as xmlrpclib + obj = Listener.accept(self) + return ConnectionWrapper(obj, _xml_dumps, _xml_loads) + +def XmlClient(*args, **kwds): + global xmlrpclib + import xmlrpc.client as xmlrpclib + return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) + +# +# Wait +# + +if sys.platform == 'win32': + + def _exhaustive_wait(handles, timeout): + # Return ALL handles which are currently signalled. (Only + # returning the first signalled might create starvation issues.) + L = list(handles) + ready = [] + # Windows limits WaitForMultipleObjects at 64 handles, and we use a + # few for synchronisation, so we switch to batched waits at 60. + if len(L) > 60: + try: + res = _winapi.BatchedWaitForMultipleObjects(L, False, timeout) + except TimeoutError: + return [] + ready.extend(L[i] for i in res) + if res: + L = [h for i, h in enumerate(L) if i > res[0] & i not in res] + timeout = 0 + while L: + short_L = L[:60] if len(L) > 60 else L + res = _winapi.WaitForMultipleObjects(short_L, False, timeout) + if res == WAIT_TIMEOUT: + break + elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): + res -= WAIT_OBJECT_0 + elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): + res -= WAIT_ABANDONED_0 + else: + raise RuntimeError('Should not get here') + ready.append(L[res]) + L = L[res+1:] + timeout = 0 + return ready + + _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} + + def wait(object_list, timeout=None): + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + if timeout is None: + timeout = INFINITE + elif timeout < 0: + timeout = 0 + else: + timeout = int(timeout * 1000 + 0.5) + + object_list = list(object_list) + waithandle_to_obj = {} + ov_list = [] + ready_objects = set() + ready_handles = set() + + try: + for o in object_list: + try: + fileno = getattr(o, 'fileno') + except AttributeError: + waithandle_to_obj[o.__index__()] = o + else: + # start an overlapped read of length zero + try: + ov, err = _winapi.ReadFile(fileno(), 0, True) + except OSError as e: + ov, err = None, e.winerror + if err not in _ready_errors: + raise + if err == _winapi.ERROR_IO_PENDING: + ov_list.append(ov) + waithandle_to_obj[ov.event] = o + else: + # If o.fileno() is an overlapped pipe handle and + # err == 0 then there is a zero length message + # in the pipe, but it HAS NOT been consumed... + if ov and sys.getwindowsversion()[:2] >= (6, 2): + # ... except on Windows 8 and later, where + # the message HAS been consumed. + try: + _, err = ov.GetOverlappedResult(False) + except OSError as e: + err = e.winerror + if not err and hasattr(o, '_got_empty_message'): + o._got_empty_message = True + ready_objects.add(o) + timeout = 0 + + ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) + finally: + # request that overlapped reads stop + for ov in ov_list: + ov.cancel() + + # wait for all overlapped reads to stop + for ov in ov_list: + try: + _, err = ov.GetOverlappedResult(True) + except OSError as e: + err = e.winerror + if err not in _ready_errors: + raise + if err != _winapi.ERROR_OPERATION_ABORTED: + o = waithandle_to_obj[ov.event] + ready_objects.add(o) + if err == 0: + # If o.fileno() is an overlapped pipe handle then + # a zero length message HAS been consumed. + if hasattr(o, '_got_empty_message'): + o._got_empty_message = True + + ready_objects.update(waithandle_to_obj[h] for h in ready_handles) + return [o for o in object_list if o in ready_objects] + +else: + + import selectors + + # poll/select have the advantage of not requiring any extra file + # descriptor, contrarily to epoll/kqueue (also, they require a single + # syscall). + if hasattr(selectors, 'PollSelector'): + _WaitSelector = selectors.PollSelector + else: + _WaitSelector = selectors.SelectSelector + + def wait(object_list, timeout=None): + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + with _WaitSelector() as selector: + for obj in object_list: + selector.register(obj, selectors.EVENT_READ) + + if timeout is not None: + deadline = time.monotonic() + timeout + + while True: + ready = selector.select(timeout) + if ready: + return [key.fileobj for (key, events) in ready] + else: + if timeout is not None: + timeout = deadline - time.monotonic() + if timeout < 0: + return ready + +# +# Make connection and socket objects shareable if possible +# + +if sys.platform == 'win32': + def reduce_connection(conn): + handle = conn.fileno() + with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: + from . import resource_sharer + ds = resource_sharer.DupSocket(s) + return rebuild_connection, (ds, conn.readable, conn.writable) + def rebuild_connection(ds, readable, writable): + sock = ds.detach() + return Connection(sock.detach(), readable, writable) + reduction.register(Connection, reduce_connection) + + def reduce_pipe_connection(conn): + access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | + (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) + dh = reduction.DupHandle(conn.fileno(), access) + return rebuild_pipe_connection, (dh, conn.readable, conn.writable) + def rebuild_pipe_connection(dh, readable, writable): + handle = dh.detach() + return PipeConnection(handle, readable, writable) + reduction.register(PipeConnection, reduce_pipe_connection) + +else: + def reduce_connection(conn): + df = reduction.DupFd(conn.fileno()) + return rebuild_connection, (df, conn.readable, conn.writable) + def rebuild_connection(df, readable, writable): + fd = df.detach() + return Connection(fd, readable, writable) + reduction.register(Connection, reduce_connection) diff --git a/Python314_4_x86_Template/Lib/multiprocessing/context.py b/Python314_4_x86_Template/Lib/multiprocessing/context.py new file mode 100644 index 00000000..5fa6d7e4 --- /dev/null +++ b/Python314_4_x86_Template/Lib/multiprocessing/context.py @@ -0,0 +1,383 @@ +import os +import sys +import threading + +from . import process +from . import reduction + +__all__ = () + +# +# Exceptions +# + +class ProcessError(Exception): + pass + +class BufferTooShort(ProcessError): + pass + +class TimeoutError(ProcessError): + pass + +class AuthenticationError(ProcessError): + pass + +# +# Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py +# + +class BaseContext(object): + + ProcessError = ProcessError + BufferTooShort = BufferTooShort + TimeoutError = TimeoutError + AuthenticationError = AuthenticationError + + current_process = staticmethod(process.current_process) + parent_process = staticmethod(process.parent_process) + active_children = staticmethod(process.active_children) + + def cpu_count(self): + '''Returns the number of CPUs in the system''' + num = os.cpu_count() + if num is None: + raise NotImplementedError('cannot determine number of cpus') + else: + return num + + def Manager(self): + '''Returns a manager associated with a running server process + + The managers methods such as `Lock()`, `Condition()` and `Queue()` + can be used to create shared objects. + ''' + from .managers import SyncManager + m = SyncManager(ctx=self.get_context()) + m.start() + return m + + def Pipe(self, duplex=True): + '''Returns two connection object connected by a pipe''' + from .connection import Pipe + return Pipe(duplex) + + def Lock(self): + '''Returns a non-recursive lock object''' + from .synchronize import Lock + return Lock(ctx=self.get_context()) + + def RLock(self): + '''Returns a recursive lock object''' + from .synchronize import RLock + return RLock(ctx=self.get_context()) + + def Condition(self, lock=None): + '''Returns a condition object''' + from .synchronize import Condition + return Condition(lock, ctx=self.get_context()) + + def Semaphore(self, value=1): + '''Returns a semaphore object''' + from .synchronize import Semaphore + return Semaphore(value, ctx=self.get_context()) + + def BoundedSemaphore(self, value=1): + '''Returns a bounded semaphore object''' + from .synchronize import BoundedSemaphore + return BoundedSemaphore(value, ctx=self.get_context()) + + def Event(self): + '''Returns an event object''' + from .synchronize import Event + return Event(ctx=self.get_context()) + + def Barrier(self, parties, action=None, timeout=None): + '''Returns a barrier object''' + from .synchronize import Barrier + return Barrier(parties, action, timeout, ctx=self.get_context()) + + def Queue(self, maxsize=0): + '''Returns a queue object''' + from .queues import Queue + return Queue(maxsize, ctx=self.get_context()) + + def JoinableQueue(self, maxsize=0): + '''Returns a queue object''' + from .queues import JoinableQueue + return JoinableQueue(maxsize, ctx=self.get_context()) + + def SimpleQueue(self): + '''Returns a queue object''' + from .queues import SimpleQueue + return SimpleQueue(ctx=self.get_context()) + + def Pool(self, processes=None, initializer=None, initargs=(), + maxtasksperchild=None): + '''Returns a process pool object''' + from .pool import Pool + return Pool(processes, initializer, initargs, maxtasksperchild, + context=self.get_context()) + + def RawValue(self, typecode_or_type, *args): + '''Returns a shared object''' + from .sharedctypes import RawValue + return RawValue(typecode_or_type, *args) + + def RawArray(self, typecode_or_type, size_or_initializer): + '''Returns a shared array''' + from .sharedctypes import RawArray + return RawArray(typecode_or_type, size_or_initializer) + + def Value(self, typecode_or_type, *args, lock=True): + '''Returns a synchronized shared object''' + from .sharedctypes import Value + return Value(typecode_or_type, *args, lock=lock, + ctx=self.get_context()) + + def Array(self, typecode_or_type, size_or_initializer, *, lock=True): + '''Returns a synchronized shared array''' + from .sharedctypes import Array + return Array(typecode_or_type, size_or_initializer, lock=lock, + ctx=self.get_context()) + + def freeze_support(self): + '''Check whether this is a fake forked process in a frozen executable. + If so then run code specified by commandline and exit. + ''' + # gh-140814: allow_none=True avoids locking in the default start + # method, which would cause a later set_start_method() to fail. + # None is safe to pass through: spawn.freeze_support() + # independently detects whether this process is a spawned + # child, so the start method check here is only an optimization. + if (getattr(sys, 'frozen', False) + and self.get_start_method(allow_none=True) in ('spawn', None)): + from .spawn import freeze_support + freeze_support() + + def get_logger(self): + '''Return package logger -- if it does not already exist then + it is created. + ''' + from .util import get_logger + return get_logger() + + def log_to_stderr(self, level=None): + '''Turn on logging and add a handler which prints to stderr''' + from .util import log_to_stderr + return log_to_stderr(level) + + def allow_connection_pickling(self): + '''Install support for sending connections and sockets + between processes + ''' + # This is undocumented. In previous versions of multiprocessing + # its only effect was to make socket objects inheritable on Windows. + from . import connection # noqa: F401 + + def set_executable(self, executable): + '''Sets the path to a python.exe or pythonw.exe binary used to run + child processes instead of sys.executable when using the 'spawn' + start method. Useful for people embedding Python. + ''' + from .spawn import set_executable + set_executable(executable) + + def set_forkserver_preload(self, module_names): + '''Set list of module names to try to load in forkserver process. + This is really just a hint. + ''' + from .forkserver import set_forkserver_preload + set_forkserver_preload(module_names) + + def get_context(self, method=None): + if method is None: + return self + try: + ctx = _concrete_contexts[method] + except KeyError: + raise ValueError('cannot find context for %r' % method) from None + ctx._check_available() + return ctx + + def get_start_method(self, allow_none=False): + return self._name + + def set_start_method(self, method, force=False): + raise ValueError('cannot set start method of concrete context') + + @property + def reducer(self): + '''Controls how objects will be reduced to a form that can be + shared with other processes.''' + return globals().get('reduction') + + @reducer.setter + def reducer(self, reduction): + globals()['reduction'] = reduction + + def _check_available(self): + pass + +# +# Type of default context -- underlying context can be set at most once +# + +class Process(process.BaseProcess): + _start_method = None + @staticmethod + def _Popen(process_obj): + return _default_context.get_context().Process._Popen(process_obj) + + @staticmethod + def _after_fork(): + return _default_context.get_context().Process._after_fork() + +class DefaultContext(BaseContext): + Process = Process + + def __init__(self, context): + self._default_context = context + self._actual_context = None + + def get_context(self, method=None): + if method is None: + if self._actual_context is None: + self._actual_context = self._default_context + return self._actual_context + else: + return super().get_context(method) + + def set_start_method(self, method, force=False): + if self._actual_context is not None and not force: + raise RuntimeError('context has already been set') + if method is None and force: + self._actual_context = None + return + self._actual_context = self.get_context(method) + + def get_start_method(self, allow_none=False): + if self._actual_context is None: + if allow_none: + return None + self._actual_context = self._default_context + return self._actual_context._name + + def get_all_start_methods(self): + """Returns a list of the supported start methods, default first.""" + default = self._default_context.get_start_method() + start_method_names = [default] + start_method_names.extend( + name for name in _concrete_contexts if name != default + ) + return start_method_names + + +# +# Context types for fixed start method +# + +if sys.platform != 'win32': + + class ForkProcess(process.BaseProcess): + _start_method = 'fork' + @staticmethod + def _Popen(process_obj): + from .popen_fork import Popen + return Popen(process_obj) + + class SpawnProcess(process.BaseProcess): + _start_method = 'spawn' + @staticmethod + def _Popen(process_obj): + from .popen_spawn_posix import Popen + return Popen(process_obj) + + @staticmethod + def _after_fork(): + # process is spawned, nothing to do + pass + + class ForkServerProcess(process.BaseProcess): + _start_method = 'forkserver' + @staticmethod + def _Popen(process_obj): + from .popen_forkserver import Popen + return Popen(process_obj) + + class ForkContext(BaseContext): + _name = 'fork' + Process = ForkProcess + + class SpawnContext(BaseContext): + _name = 'spawn' + Process = SpawnProcess + + class ForkServerContext(BaseContext): + _name = 'forkserver' + Process = ForkServerProcess + def _check_available(self): + if not reduction.HAVE_SEND_HANDLE: + raise ValueError('forkserver start method not available') + + _concrete_contexts = { + 'fork': ForkContext(), + 'spawn': SpawnContext(), + 'forkserver': ForkServerContext(), + } + # bpo-33725: running arbitrary code after fork() is no longer reliable + # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. + # gh-84559: We changed everyones default to a thread safeish one in 3.14. + if reduction.HAVE_SEND_HANDLE and sys.platform != 'darwin': + _default_context = DefaultContext(_concrete_contexts['forkserver']) + else: + _default_context = DefaultContext(_concrete_contexts['spawn']) + +else: # Windows + + class SpawnProcess(process.BaseProcess): + _start_method = 'spawn' + @staticmethod + def _Popen(process_obj): + from .popen_spawn_win32 import Popen + return Popen(process_obj) + + @staticmethod + def _after_fork(): + # process is spawned, nothing to do + pass + + class SpawnContext(BaseContext): + _name = 'spawn' + Process = SpawnProcess + + _concrete_contexts = { + 'spawn': SpawnContext(), + } + _default_context = DefaultContext(_concrete_contexts['spawn']) + +# +# Force the start method +# + +def _force_start_method(method): + _default_context._actual_context = _concrete_contexts[method] + +# +# Check that the current thread is spawning a child process +# + +_tls = threading.local() + +def get_spawning_popen(): + return getattr(_tls, 'spawning_popen', None) + +def set_spawning_popen(popen): + _tls.spawning_popen = popen + +def assert_spawning(obj): + if get_spawning_popen() is None: + raise RuntimeError( + '%s objects should only be shared between processes' + ' through inheritance' % type(obj).__name__ + ) diff --git a/Python314_4_x86_Template/Lib/multiprocessing/dummy/__init__.py b/Python314_4_x86_Template/Lib/multiprocessing/dummy/__init__.py new file mode 100644 index 00000000..7dc5d1c8 --- /dev/null +++ b/Python314_4_x86_Template/Lib/multiprocessing/dummy/__init__.py @@ -0,0 +1,126 @@ +# +# Support for the API of the multiprocessing package using threads +# +# multiprocessing/dummy/__init__.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ + 'Process', 'current_process', 'active_children', 'freeze_support', + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', + 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' + ] + +# +# Imports +# + +import threading +import sys +import weakref +import array + +from .connection import Pipe +from threading import Lock, RLock, Semaphore, BoundedSemaphore +from threading import Event, Condition, Barrier +from queue import Queue + +# +# +# + +class DummyProcess(threading.Thread): + + def __init__(self, group=None, target=None, name=None, args=(), kwargs=None): + threading.Thread.__init__(self, group, target, name, args, kwargs) + self._pid = None + self._children = weakref.WeakKeyDictionary() + self._start_called = False + self._parent = current_process() + + def start(self): + if self._parent is not current_process(): + raise RuntimeError( + "Parent is {0!r} but current_process is {1!r}".format( + self._parent, current_process())) + self._start_called = True + if hasattr(self._parent, '_children'): + self._parent._children[self] = None + threading.Thread.start(self) + + @property + def exitcode(self): + if self._start_called and not self.is_alive(): + return 0 + else: + return None + +# +# +# + +Process = DummyProcess +current_process = threading.current_thread +current_process()._children = weakref.WeakKeyDictionary() + +def active_children(): + children = current_process()._children + for p in list(children): + if not p.is_alive(): + children.pop(p, None) + return list(children) + +def freeze_support(): + pass + +# +# +# + +class Namespace(object): + def __init__(self, /, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) + +dict = dict +list = list + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + + @property + def value(self): + return self._value + + @value.setter + def value(self, value): + self._value = value + + def __repr__(self): + return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) + +def Manager(): + return sys.modules[__name__] + +def shutdown(): + pass + +def Pool(processes=None, initializer=None, initargs=()): + from ..pool import ThreadPool + return ThreadPool(processes, initializer, initargs) + +JoinableQueue = Queue diff --git a/Python313_13_x86_Template/Lib/multiprocessing/dummy/connection.py b/Python314_4_x86_Template/Lib/multiprocessing/dummy/connection.py similarity index 100% rename from Python313_13_x86_Template/Lib/multiprocessing/dummy/connection.py rename to Python314_4_x86_Template/Lib/multiprocessing/dummy/connection.py diff --git a/Python314_4_x86_Template/Lib/multiprocessing/forkserver.py b/Python314_4_x86_Template/Lib/multiprocessing/forkserver.py new file mode 100644 index 00000000..e431b3f1 --- /dev/null +++ b/Python314_4_x86_Template/Lib/multiprocessing/forkserver.py @@ -0,0 +1,429 @@ +import atexit +import errno +import os +import selectors +import signal +import socket +import struct +import sys +import threading +import warnings + +from . import AuthenticationError +from . import connection +from . import process +from .context import reduction +from . import resource_tracker +from . import spawn +from . import util + +__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', + 'set_forkserver_preload'] + +# +# +# + +MAXFDS_TO_SEND = 256 +SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t +_AUTHKEY_LEN = 32 # <= PIPEBUF so it fits a single write to an empty pipe. + +# +# Forkserver class +# + +class ForkServer(object): + + def __init__(self): + self._forkserver_authkey = None + self._forkserver_address = None + self._forkserver_alive_fd = None + self._forkserver_pid = None + self._inherited_fds = None + self._lock = threading.Lock() + self._preload_modules = ['__main__'] + + def _stop(self): + # Method used by unit tests to stop the server + with self._lock: + self._stop_unlocked() + + def _stop_unlocked(self): + if self._forkserver_pid is None: + return + + # close the "alive" file descriptor asks the server to stop + os.close(self._forkserver_alive_fd) + self._forkserver_alive_fd = None + + os.waitpid(self._forkserver_pid, 0) + self._forkserver_pid = None + + if not util.is_abstract_socket_namespace(self._forkserver_address): + os.unlink(self._forkserver_address) + self._forkserver_address = None + self._forkserver_authkey = None + + def set_forkserver_preload(self, modules_names): + '''Set list of module names to try to load in forkserver process.''' + if not all(type(mod) is str for mod in modules_names): + raise TypeError('module_names must be a list of strings') + self._preload_modules = modules_names + + def get_inherited_fds(self): + '''Return list of fds inherited from parent process. + + This returns None if the current process was not started by fork + server. + ''' + return self._inherited_fds + + def connect_to_new_process(self, fds): + '''Request forkserver to create a child process. + + Returns a pair of fds (status_r, data_w). The calling process can read + the child process's pid and (eventually) its returncode from status_r. + The calling process should write to data_w the pickled preparation and + process data. + ''' + self.ensure_running() + assert self._forkserver_authkey + if len(fds) + 4 >= MAXFDS_TO_SEND: + raise ValueError('too many fds') + with socket.socket(socket.AF_UNIX) as client: + client.connect(self._forkserver_address) + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + allfds = [child_r, child_w, self._forkserver_alive_fd, + resource_tracker.getfd()] + allfds += fds + try: + client.setblocking(True) + wrapped_client = connection.Connection(client.fileno()) + # The other side of this exchange happens in the child as + # implemented in main(). + try: + connection.answer_challenge( + wrapped_client, self._forkserver_authkey) + connection.deliver_challenge( + wrapped_client, self._forkserver_authkey) + finally: + wrapped_client._detach() + del wrapped_client + reduction.sendfds(client, allfds) + return parent_r, parent_w + except: + os.close(parent_r) + os.close(parent_w) + raise + finally: + os.close(child_r) + os.close(child_w) + + def ensure_running(self): + '''Make sure that a fork server is running. + + This can be called from any process. Note that usually a child + process will just reuse the forkserver started by its parent, so + ensure_running() will do nothing. + ''' + with self._lock: + resource_tracker.ensure_running() + if self._forkserver_pid is not None: + # forkserver was launched before, is it still running? + pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) + if not pid: + # still alive + return + # dead, launch it again + os.close(self._forkserver_alive_fd) + self._forkserver_authkey = None + self._forkserver_address = None + self._forkserver_alive_fd = None + self._forkserver_pid = None + + # gh-144503: sys_argv is passed as real argv elements after the + # ``-c cmd`` rather than repr'd into main_kws so that a large + # parent sys.argv cannot push the single ``-c`` command string + # over the OS per-argument length limit (MAX_ARG_STRLEN on Linux). + # The child sees them as sys.argv[1:]. + cmd = ('import sys; ' + 'from multiprocessing.forkserver import main; ' + 'main(%d, %d, %r, sys_argv=sys.argv[1:], **%r)') + + main_kws = {} + sys_argv = None + if self._preload_modules: + data = spawn.get_preparation_data('ignore') + if 'sys_path' in data: + main_kws['sys_path'] = data['sys_path'] + if 'init_main_from_path' in data: + main_kws['main_path'] = data['init_main_from_path'] + if 'sys_argv' in data: + sys_argv = data['sys_argv'] + + with socket.socket(socket.AF_UNIX) as listener: + address = connection.arbitrary_address('AF_UNIX') + listener.bind(address) + if not util.is_abstract_socket_namespace(address): + os.chmod(address, 0o600) + listener.listen() + + # all client processes own the write end of the "alive" pipe; + # when they all terminate the read end becomes ready. + alive_r, alive_w = os.pipe() + # A short lived pipe to initialize the forkserver authkey. + authkey_r, authkey_w = os.pipe() + try: + fds_to_pass = [listener.fileno(), alive_r, authkey_r] + main_kws['authkey_r'] = authkey_r + cmd %= (listener.fileno(), alive_r, self._preload_modules, + main_kws) + exe = spawn.get_executable() + args = [exe] + util._args_from_interpreter_flags() + args += ['-c', cmd] + if sys_argv is not None: + args += sys_argv + pid = util.spawnv_passfds(exe, args, fds_to_pass) + except: + os.close(alive_w) + os.close(authkey_w) + raise + finally: + os.close(alive_r) + os.close(authkey_r) + # Authenticate our control socket to prevent access from + # processes we have not shared this key with. + try: + self._forkserver_authkey = os.urandom(_AUTHKEY_LEN) + os.write(authkey_w, self._forkserver_authkey) + finally: + os.close(authkey_w) + self._forkserver_address = address + self._forkserver_alive_fd = alive_w + self._forkserver_pid = pid + +# +# +# + +def main(listener_fd, alive_r, preload, main_path=None, sys_path=None, + *, sys_argv=None, authkey_r=None): + """Run forkserver.""" + if authkey_r is not None: + try: + authkey = os.read(authkey_r, _AUTHKEY_LEN) + assert len(authkey) == _AUTHKEY_LEN, f'{len(authkey)} < {_AUTHKEY_LEN}' + finally: + os.close(authkey_r) + else: + authkey = b'' + + if preload: + if sys_argv is not None: + sys.argv[:] = sys_argv + if sys_path is not None: + sys.path[:] = sys_path + if '__main__' in preload and main_path is not None: + process.current_process()._inheriting = True + try: + spawn.import_main_path(main_path) + finally: + del process.current_process()._inheriting + for modname in preload: + try: + __import__(modname) + except ImportError: + pass + + # gh-135335: flush stdout/stderr in case any of the preloaded modules + # wrote to them, otherwise children might inherit buffered data + util._flush_std_streams() + + util._close_stdin() + + sig_r, sig_w = os.pipe() + os.set_blocking(sig_r, False) + os.set_blocking(sig_w, False) + + def sigchld_handler(*_unused): + # Dummy signal handler, doesn't do anything + pass + + handlers = { + # unblocking SIGCHLD allows the wakeup fd to notify our event loop + signal.SIGCHLD: sigchld_handler, + # protect the process from ^C + signal.SIGINT: signal.SIG_IGN, + } + old_handlers = {sig: signal.signal(sig, val) + for (sig, val) in handlers.items()} + + # calling os.write() in the Python signal handler is racy + signal.set_wakeup_fd(sig_w) + + # map child pids to client fds + pid_to_fd = {} + + with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ + selectors.DefaultSelector() as selector: + _forkserver._forkserver_address = listener.getsockname() + + selector.register(listener, selectors.EVENT_READ) + selector.register(alive_r, selectors.EVENT_READ) + selector.register(sig_r, selectors.EVENT_READ) + + while True: + try: + while True: + rfds = [key.fileobj for (key, events) in selector.select()] + if rfds: + break + + if alive_r in rfds: + # EOF because no more client processes left + assert os.read(alive_r, 1) == b'', "Not at EOF?" + raise SystemExit + + if sig_r in rfds: + # Got SIGCHLD + os.read(sig_r, 65536) # exhaust + while True: + # Scan for child processes + try: + pid, sts = os.waitpid(-1, os.WNOHANG) + except ChildProcessError: + break + if pid == 0: + break + child_w = pid_to_fd.pop(pid, None) + if child_w is not None: + returncode = os.waitstatus_to_exitcode(sts) + + # Send exit code to client process + try: + write_signed(child_w, returncode) + except BrokenPipeError: + # client vanished + pass + os.close(child_w) + else: + # This shouldn't happen really + warnings.warn('forkserver: waitpid returned ' + 'unexpected pid %d' % pid) + + if listener in rfds: + # Incoming fork request + with listener.accept()[0] as s: + try: + if authkey: + wrapped_s = connection.Connection(s.fileno()) + # The other side of this exchange happens in + # in connect_to_new_process(). + try: + connection.deliver_challenge( + wrapped_s, authkey) + connection.answer_challenge( + wrapped_s, authkey) + finally: + wrapped_s._detach() + del wrapped_s + # Receive fds from client + fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) + except (EOFError, BrokenPipeError, AuthenticationError): + s.close() + continue + if len(fds) > MAXFDS_TO_SEND: + raise RuntimeError( + "Too many ({0:n}) fds to send".format( + len(fds))) + child_r, child_w, *fds = fds + s.close() + pid = os.fork() + if pid == 0: + # Child + code = 1 + try: + listener.close() + selector.close() + unused_fds = [alive_r, child_w, sig_r, sig_w] + unused_fds.extend(pid_to_fd.values()) + atexit._clear() + atexit.register(util._exit_function) + code = _serve_one(child_r, fds, + unused_fds, + old_handlers) + except Exception: + sys.excepthook(*sys.exc_info()) + sys.stderr.flush() + finally: + atexit._run_exitfuncs() + os._exit(code) + else: + # Send pid to client process + try: + write_signed(child_w, pid) + except BrokenPipeError: + # client vanished + pass + pid_to_fd[pid] = child_w + os.close(child_r) + for fd in fds: + os.close(fd) + + except OSError as e: + if e.errno != errno.ECONNABORTED: + raise + + +def _serve_one(child_r, fds, unused_fds, handlers): + # close unnecessary stuff and reset signal handlers + signal.set_wakeup_fd(-1) + for sig, val in handlers.items(): + signal.signal(sig, val) + for fd in unused_fds: + os.close(fd) + + (_forkserver._forkserver_alive_fd, + resource_tracker._resource_tracker._fd, + *_forkserver._inherited_fds) = fds + + # Run process object received over pipe + parent_sentinel = os.dup(child_r) + code = spawn._main(child_r, parent_sentinel) + + return code + + +# +# Read and write signed numbers +# + +def read_signed(fd): + data = bytearray(SIGNED_STRUCT.size) + unread = memoryview(data) + while unread: + count = os.readinto(fd, unread) + if count == 0: + raise EOFError('unexpected EOF') + unread = unread[count:] + + return SIGNED_STRUCT.unpack(data)[0] + +def write_signed(fd, n): + msg = SIGNED_STRUCT.pack(n) + while msg: + nbytes = os.write(fd, msg) + if nbytes == 0: + raise RuntimeError('should not get here') + msg = msg[nbytes:] + +# +# +# + +_forkserver = ForkServer() +ensure_running = _forkserver.ensure_running +get_inherited_fds = _forkserver.get_inherited_fds +connect_to_new_process = _forkserver.connect_to_new_process +set_forkserver_preload = _forkserver.set_forkserver_preload diff --git a/Python313_13_x86_Template/Lib/multiprocessing/heap.py b/Python314_4_x86_Template/Lib/multiprocessing/heap.py similarity index 100% rename from Python313_13_x86_Template/Lib/multiprocessing/heap.py rename to Python314_4_x86_Template/Lib/multiprocessing/heap.py diff --git a/Python314_4_x86_Template/Lib/multiprocessing/managers.py b/Python314_4_x86_Template/Lib/multiprocessing/managers.py new file mode 100644 index 00000000..91bcf243 --- /dev/null +++ b/Python314_4_x86_Template/Lib/multiprocessing/managers.py @@ -0,0 +1,1438 @@ +# +# Module providing manager classes for dealing +# with shared objects +# +# multiprocessing/managers.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] + +# +# Imports +# + +import sys +import threading +import signal +import array +import collections.abc +import queue +import time +import types +import os +from os import getpid + +from traceback import format_exc + +from . import connection +from .context import reduction, get_spawning_popen, ProcessError +from . import pool +from . import process +from . import util +from . import get_context +try: + from . import shared_memory +except ImportError: + HAS_SHMEM = False +else: + HAS_SHMEM = True + __all__.append('SharedMemoryManager') + +# +# Register some things for pickling +# + +def reduce_array(a): + return array.array, (a.typecode, a.tobytes()) +reduction.register(array.array, reduce_array) + +view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] +def rebuild_as_list(obj): + return list, (list(obj),) +for view_type in view_types: + reduction.register(view_type, rebuild_as_list) +del view_type, view_types + +# +# Type for identifying shared objects +# + +class Token(object): + ''' + Type to uniquely identify a shared object + ''' + __slots__ = ('typeid', 'address', 'id') + + def __init__(self, typeid, address, id): + (self.typeid, self.address, self.id) = (typeid, address, id) + + def __getstate__(self): + return (self.typeid, self.address, self.id) + + def __setstate__(self, state): + (self.typeid, self.address, self.id) = state + + def __repr__(self): + return '%s(typeid=%r, address=%r, id=%r)' % \ + (self.__class__.__name__, self.typeid, self.address, self.id) + +# +# Function for communication with a manager's server process +# + +def dispatch(c, id, methodname, args=(), kwds={}): + ''' + Send a message to manager using connection `c` and return response + ''' + c.send((id, methodname, args, kwds)) + kind, result = c.recv() + if kind == '#RETURN': + return result + try: + raise convert_to_error(kind, result) + finally: + del result # break reference cycle + +def convert_to_error(kind, result): + if kind == '#ERROR': + return result + elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): + if not isinstance(result, str): + raise TypeError( + "Result {0!r} (kind '{1}') type is {2}, not str".format( + result, kind, type(result))) + if kind == '#UNSERIALIZABLE': + return RemoteError('Unserializable message: %s\n' % result) + else: + return RemoteError(result) + else: + return ValueError('Unrecognized message type {!r}'.format(kind)) + +class RemoteError(Exception): + def __str__(self): + return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) + +# +# Functions for finding the method names of an object +# + +def all_methods(obj): + ''' + Return a list of names of methods of `obj` + ''' + temp = [] + for name in dir(obj): + func = getattr(obj, name) + if callable(func): + temp.append(name) + return temp + +def public_methods(obj): + ''' + Return a list of names of methods of `obj` which do not start with '_' + ''' + return [name for name in all_methods(obj) if name[0] != '_'] + +# +# Server which is run in a process controlled by a manager +# + +class Server(object): + ''' + Server class which runs in a process controlled by a manager object + ''' + public = ['shutdown', 'create', 'accept_connection', 'get_methods', + 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] + + def __init__(self, registry, address, authkey, serializer): + if not isinstance(authkey, bytes): + raise TypeError( + "Authkey {0!r} is type {1!s}, not bytes".format( + authkey, type(authkey))) + self.registry = registry + self.authkey = process.AuthenticationString(authkey) + Listener, Client = listener_client[serializer] + + # do authentication later + self.listener = Listener(address=address, backlog=128) + self.address = self.listener.address + + self.id_to_obj = {'0': (None, ())} + self.id_to_refcount = {} + self.id_to_local_proxy_obj = {} + self.mutex = threading.Lock() + + def serve_forever(self): + ''' + Run the server forever + ''' + self.stop_event = threading.Event() + process.current_process()._manager_server = self + try: + accepter = threading.Thread(target=self.accepter) + accepter.daemon = True + accepter.start() + try: + while not self.stop_event.is_set(): + self.stop_event.wait(1) + except (KeyboardInterrupt, SystemExit): + pass + finally: + if sys.stdout != sys.__stdout__: # what about stderr? + util.debug('resetting stdout, stderr') + sys.stdout = sys.__stdout__ + sys.stderr = sys.__stderr__ + sys.exit(0) + + def accepter(self): + while True: + try: + c = self.listener.accept() + except OSError: + continue + t = threading.Thread(target=self.handle_request, args=(c,)) + t.daemon = True + t.start() + + def _handle_request(self, c): + request = None + try: + connection.deliver_challenge(c, self.authkey) + connection.answer_challenge(c, self.authkey) + request = c.recv() + ignore, funcname, args, kwds = request + assert funcname in self.public, '%r unrecognized' % funcname + func = getattr(self, funcname) + except Exception: + msg = ('#TRACEBACK', format_exc()) + else: + try: + result = func(c, *args, **kwds) + except Exception: + msg = ('#TRACEBACK', format_exc()) + else: + msg = ('#RETURN', result) + + try: + c.send(msg) + except Exception as e: + try: + c.send(('#TRACEBACK', format_exc())) + except Exception: + pass + util.info('Failure to send message: %r', msg) + util.info(' ... request was %r', request) + util.info(' ... exception was %r', e) + + def handle_request(self, conn): + ''' + Handle a new connection + ''' + try: + self._handle_request(conn) + except SystemExit: + # Server.serve_client() calls sys.exit(0) on EOF + pass + finally: + conn.close() + + def serve_client(self, conn): + ''' + Handle requests from the proxies in a particular process/thread + ''' + util.debug('starting server thread to service %r', + threading.current_thread().name) + + recv = conn.recv + send = conn.send + id_to_obj = self.id_to_obj + + while not self.stop_event.is_set(): + + try: + methodname = obj = None + request = recv() + ident, methodname, args, kwds = request + try: + obj, exposed, gettypeid = id_to_obj[ident] + except KeyError as ke: + try: + obj, exposed, gettypeid = \ + self.id_to_local_proxy_obj[ident] + except KeyError: + raise ke + + if methodname not in exposed: + raise AttributeError( + 'method %r of %r object is not in exposed=%r' % + (methodname, type(obj), exposed) + ) + + function = getattr(obj, methodname) + + try: + res = function(*args, **kwds) + except Exception as e: + msg = ('#ERROR', e) + else: + typeid = gettypeid and gettypeid.get(methodname, None) + if typeid: + rident, rexposed = self.create(conn, typeid, res) + token = Token(typeid, self.address, rident) + msg = ('#PROXY', (rexposed, token)) + else: + msg = ('#RETURN', res) + + except AttributeError: + if methodname is None: + msg = ('#TRACEBACK', format_exc()) + else: + try: + fallback_func = self.fallback_mapping[methodname] + result = fallback_func( + self, conn, ident, obj, *args, **kwds + ) + msg = ('#RETURN', result) + except Exception: + msg = ('#TRACEBACK', format_exc()) + + except EOFError: + util.debug('got EOF -- exiting thread serving %r', + threading.current_thread().name) + sys.exit(0) + + except Exception: + msg = ('#TRACEBACK', format_exc()) + + try: + try: + send(msg) + except Exception: + send(('#UNSERIALIZABLE', format_exc())) + except Exception as e: + util.info('exception in thread serving %r', + threading.current_thread().name) + util.info(' ... message was %r', msg) + util.info(' ... exception was %r', e) + conn.close() + sys.exit(1) + + def fallback_getvalue(self, conn, ident, obj): + return obj + + def fallback_str(self, conn, ident, obj): + return str(obj) + + def fallback_repr(self, conn, ident, obj): + return repr(obj) + + fallback_mapping = { + '__str__':fallback_str, + '__repr__':fallback_repr, + '#GETVALUE':fallback_getvalue + } + + def dummy(self, c): + pass + + def debug_info(self, c): + ''' + Return some info --- useful to spot problems with refcounting + ''' + # Perhaps include debug info about 'c'? + with self.mutex: + result = [] + keys = list(self.id_to_refcount.keys()) + keys.sort() + for ident in keys: + if ident != '0': + result.append(' %s: refcount=%s\n %s' % + (ident, self.id_to_refcount[ident], + str(self.id_to_obj[ident][0])[:75])) + return '\n'.join(result) + + def number_of_objects(self, c): + ''' + Number of shared objects + ''' + # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' + return len(self.id_to_refcount) + + def shutdown(self, c): + ''' + Shutdown this process + ''' + try: + util.debug('manager received shutdown message') + c.send(('#RETURN', None)) + except: + import traceback + traceback.print_exc() + finally: + self.stop_event.set() + + def create(self, c, typeid, /, *args, **kwds): + ''' + Create a new shared object and return its id + ''' + with self.mutex: + callable, exposed, method_to_typeid, proxytype = \ + self.registry[typeid] + + if callable is None: + if kwds or (len(args) != 1): + raise ValueError( + "Without callable, must have one non-keyword argument") + obj = args[0] + else: + obj = callable(*args, **kwds) + + if exposed is None: + exposed = public_methods(obj) + if method_to_typeid is not None: + if not isinstance(method_to_typeid, dict): + raise TypeError( + "Method_to_typeid {0!r}: type {1!s}, not dict".format( + method_to_typeid, type(method_to_typeid))) + exposed = list(exposed) + list(method_to_typeid) + + ident = '%x' % id(obj) # convert to string because xmlrpclib + # only has 32 bit signed integers + util.debug('%r callable returned object with id %r', typeid, ident) + + self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) + if ident not in self.id_to_refcount: + self.id_to_refcount[ident] = 0 + + self.incref(c, ident) + return ident, tuple(exposed) + + def get_methods(self, c, token): + ''' + Return the methods of the shared object indicated by token + ''' + return tuple(self.id_to_obj[token.id][1]) + + def accept_connection(self, c, name): + ''' + Spawn a new thread to serve this connection + ''' + threading.current_thread().name = name + c.send(('#RETURN', None)) + self.serve_client(c) + + def incref(self, c, ident): + with self.mutex: + try: + self.id_to_refcount[ident] += 1 + except KeyError as ke: + # If no external references exist but an internal (to the + # manager) still does and a new external reference is created + # from it, restore the manager's tracking of it from the + # previously stashed internal ref. + if ident in self.id_to_local_proxy_obj: + self.id_to_refcount[ident] = 1 + self.id_to_obj[ident] = \ + self.id_to_local_proxy_obj[ident] + util.debug('Server re-enabled tracking & INCREF %r', ident) + else: + raise ke + + def decref(self, c, ident): + if ident not in self.id_to_refcount and \ + ident in self.id_to_local_proxy_obj: + util.debug('Server DECREF skipping %r', ident) + return + + with self.mutex: + if self.id_to_refcount[ident] <= 0: + raise AssertionError( + "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( + ident, self.id_to_obj[ident], + self.id_to_refcount[ident])) + self.id_to_refcount[ident] -= 1 + if self.id_to_refcount[ident] == 0: + del self.id_to_refcount[ident] + + if ident not in self.id_to_refcount: + # Two-step process in case the object turns out to contain other + # proxy objects (e.g. a managed list of managed lists). + # Otherwise, deleting self.id_to_obj[ident] would trigger the + # deleting of the stored value (another managed object) which would + # in turn attempt to acquire the mutex that is already held here. + self.id_to_obj[ident] = (None, (), None) # thread-safe + util.debug('disposing of obj with id %r', ident) + with self.mutex: + del self.id_to_obj[ident] + + +# +# Class to represent state of a manager +# + +class State(object): + __slots__ = ['value'] + INITIAL = 0 + STARTED = 1 + SHUTDOWN = 2 + +# +# Mapping from serializer name to Listener and Client types +# + +listener_client = { + 'pickle' : (connection.Listener, connection.Client), + 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) + } + +# +# Definition of BaseManager +# + +class BaseManager(object): + ''' + Base class for managers + ''' + _registry = {} + _Server = Server + + def __init__(self, address=None, authkey=None, serializer='pickle', + ctx=None, *, shutdown_timeout=1.0): + if authkey is None: + authkey = process.current_process().authkey + self._address = address # XXX not final address if eg ('', 0) + self._authkey = process.AuthenticationString(authkey) + self._state = State() + self._state.value = State.INITIAL + self._serializer = serializer + self._Listener, self._Client = listener_client[serializer] + self._ctx = ctx or get_context() + self._shutdown_timeout = shutdown_timeout + + def get_server(self): + ''' + Return server object with serve_forever() method and address attribute + ''' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return Server(self._registry, self._address, + self._authkey, self._serializer) + + def connect(self): + ''' + Connect manager object to the server process + ''' + Listener, Client = listener_client[self._serializer] + conn = Client(self._address, authkey=self._authkey) + dispatch(conn, None, 'dummy') + self._state.value = State.STARTED + + def start(self, initializer=None, initargs=()): + ''' + Spawn a server process for this manager object + ''' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + + if initializer is not None and not callable(initializer): + raise TypeError('initializer must be a callable') + + # pipe over which we will retrieve address of server + reader, writer = connection.Pipe(duplex=False) + + # spawn process which runs a server + self._process = self._ctx.Process( + target=type(self)._run_server, + args=(self._registry, self._address, self._authkey, + self._serializer, writer, initializer, initargs), + ) + ident = ':'.join(str(i) for i in self._process._identity) + self._process.name = type(self).__name__ + '-' + ident + self._process.start() + + # get address of server + writer.close() + self._address = reader.recv() + reader.close() + + # register a finalizer + self._state.value = State.STARTED + self.shutdown = util.Finalize( + self, type(self)._finalize_manager, + args=(self._process, self._address, self._authkey, self._state, + self._Client, self._shutdown_timeout), + exitpriority=0 + ) + + @classmethod + def _run_server(cls, registry, address, authkey, serializer, writer, + initializer=None, initargs=()): + ''' + Create a server, report its address and run it + ''' + # bpo-36368: protect server process from KeyboardInterrupt signals + signal.signal(signal.SIGINT, signal.SIG_IGN) + + if initializer is not None: + initializer(*initargs) + + # create server + server = cls._Server(registry, address, authkey, serializer) + + # inform parent process of the server's address + writer.send(server.address) + writer.close() + + # run the manager + util.info('manager serving at %r', server.address) + server.serve_forever() + + def _create(self, typeid, /, *args, **kwds): + ''' + Create a new shared object; return the token and exposed tuple + ''' + assert self._state.value == State.STARTED, 'server not yet started' + conn = self._Client(self._address, authkey=self._authkey) + try: + id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) + finally: + conn.close() + return Token(typeid, self._address, id), exposed + + def join(self, timeout=None): + ''' + Join the manager process (if it has been spawned) + ''' + if self._process is not None: + self._process.join(timeout) + if not self._process.is_alive(): + self._process = None + + def _debug_info(self): + ''' + Return some info about the servers shared objects and connections + ''' + conn = self._Client(self._address, authkey=self._authkey) + try: + return dispatch(conn, None, 'debug_info') + finally: + conn.close() + + def _number_of_objects(self): + ''' + Return the number of shared objects + ''' + conn = self._Client(self._address, authkey=self._authkey) + try: + return dispatch(conn, None, 'number_of_objects') + finally: + conn.close() + + def __enter__(self): + if self._state.value == State.INITIAL: + self.start() + if self._state.value != State.STARTED: + if self._state.value == State.INITIAL: + raise ProcessError("Unable to start server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown() + + @staticmethod + def _finalize_manager(process, address, authkey, state, _Client, + shutdown_timeout): + ''' + Shutdown the manager process; will be registered as a finalizer + ''' + if process.is_alive(): + util.info('sending shutdown message to manager') + try: + conn = _Client(address, authkey=authkey) + try: + dispatch(conn, None, 'shutdown') + finally: + conn.close() + except Exception: + pass + + process.join(timeout=shutdown_timeout) + if process.is_alive(): + util.info('manager still alive') + if hasattr(process, 'terminate'): + util.info('trying to `terminate()` manager process') + process.terminate() + process.join(timeout=shutdown_timeout) + if process.is_alive(): + util.info('manager still alive after terminate') + process.kill() + process.join() + + state.value = State.SHUTDOWN + try: + del BaseProxy._address_to_local[address] + except KeyError: + pass + + @property + def address(self): + return self._address + + @classmethod + def register(cls, typeid, callable=None, proxytype=None, exposed=None, + method_to_typeid=None, create_method=True): + ''' + Register a typeid with the manager type + ''' + if '_registry' not in cls.__dict__: + cls._registry = cls._registry.copy() + + if proxytype is None: + proxytype = AutoProxy + + exposed = exposed or getattr(proxytype, '_exposed_', None) + + method_to_typeid = method_to_typeid or \ + getattr(proxytype, '_method_to_typeid_', None) + + if method_to_typeid: + for key, value in list(method_to_typeid.items()): # isinstance? + assert type(key) is str, '%r is not a string' % key + assert type(value) is str, '%r is not a string' % value + + cls._registry[typeid] = ( + callable, exposed, method_to_typeid, proxytype + ) + + if create_method: + def temp(self, /, *args, **kwds): + util.debug('requesting creation of a shared %r object', typeid) + token, exp = self._create(typeid, *args, **kwds) + proxy = proxytype( + token, self._serializer, manager=self, + authkey=self._authkey, exposed=exp + ) + conn = self._Client(token.address, authkey=self._authkey) + dispatch(conn, None, 'decref', (token.id,)) + return proxy + temp.__name__ = typeid + setattr(cls, typeid, temp) + +# +# Subclass of set which get cleared after a fork +# + +class ProcessLocalSet(set): + def __init__(self): + util.register_after_fork(self, lambda obj: obj.clear()) + def __reduce__(self): + return type(self), () + +# +# Definition of BaseProxy +# + +class BaseProxy(object): + ''' + A base for proxies of shared objects + ''' + _address_to_local = {} + _mutex = util.ForkAwareThreadLock() + + # Each instance gets a `_serial` number. Unlike `id(...)`, this number + # is never reused. + _next_serial = 1 + + def __init__(self, token, serializer, manager=None, + authkey=None, exposed=None, incref=True, manager_owned=False): + with BaseProxy._mutex: + tls_serials = BaseProxy._address_to_local.get(token.address, None) + if tls_serials is None: + tls_serials = util.ForkAwareLocal(), ProcessLocalSet() + BaseProxy._address_to_local[token.address] = tls_serials + + self._serial = BaseProxy._next_serial + BaseProxy._next_serial += 1 + + # self._tls is used to record the connection used by this + # thread to communicate with the manager at token.address + self._tls = tls_serials[0] + + # self._all_serials is a set used to record the identities of all + # shared objects for which the current process owns references and + # which are in the manager at token.address + self._all_serials = tls_serials[1] + + self._token = token + self._id = self._token.id + self._manager = manager + self._serializer = serializer + self._Client = listener_client[serializer][1] + + # Should be set to True only when a proxy object is being created + # on the manager server; primary use case: nested proxy objects. + # RebuildProxy detects when a proxy is being created on the manager + # and sets this value appropriately. + self._owned_by_manager = manager_owned + + if authkey is not None: + self._authkey = process.AuthenticationString(authkey) + elif self._manager is not None: + self._authkey = self._manager._authkey + else: + self._authkey = process.current_process().authkey + + if incref: + self._incref() + + util.register_after_fork(self, BaseProxy._after_fork) + + def _connect(self): + util.debug('making connection to manager') + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + conn = self._Client(self._token.address, authkey=self._authkey) + dispatch(conn, None, 'accept_connection', (name,)) + self._tls.connection = conn + + def _callmethod(self, methodname, args=(), kwds={}): + ''' + Try to call a method of the referent and return a copy of the result + ''' + try: + conn = self._tls.connection + except AttributeError: + util.debug('thread %r does not own a connection', + threading.current_thread().name) + self._connect() + conn = self._tls.connection + + conn.send((self._id, methodname, args, kwds)) + kind, result = conn.recv() + + if kind == '#RETURN': + return result + elif kind == '#PROXY': + exposed, token = result + proxytype = self._manager._registry[token.typeid][-1] + token.address = self._token.address + proxy = proxytype( + token, self._serializer, manager=self._manager, + authkey=self._authkey, exposed=exposed + ) + conn = self._Client(token.address, authkey=self._authkey) + dispatch(conn, None, 'decref', (token.id,)) + return proxy + try: + raise convert_to_error(kind, result) + finally: + del result # break reference cycle + + def _getvalue(self): + ''' + Get a copy of the value of the referent + ''' + return self._callmethod('#GETVALUE') + + def _incref(self): + if self._owned_by_manager: + util.debug('owned_by_manager skipped INCREF of %r', self._token.id) + return + + conn = self._Client(self._token.address, authkey=self._authkey) + dispatch(conn, None, 'incref', (self._id,)) + util.debug('INCREF %r', self._token.id) + + self._all_serials.add(self._serial) + + state = self._manager and self._manager._state + + self._close = util.Finalize( + self, BaseProxy._decref, + args=(self._token, self._serial, self._authkey, state, + self._tls, self._all_serials, self._Client), + exitpriority=10 + ) + + @staticmethod + def _decref(token, serial, authkey, state, tls, idset, _Client): + idset.discard(serial) + + # check whether manager is still alive + if state is None or state.value == State.STARTED: + # tell manager this process no longer cares about referent + try: + util.debug('DECREF %r', token.id) + conn = _Client(token.address, authkey=authkey) + dispatch(conn, None, 'decref', (token.id,)) + except Exception as e: + util.debug('... decref failed %s', e) + + else: + util.debug('DECREF %r -- manager already shutdown', token.id) + + # check whether we can close this thread's connection because + # the process owns no more references to objects for this manager + if not idset and hasattr(tls, 'connection'): + util.debug('thread %r has no more proxies so closing conn', + threading.current_thread().name) + tls.connection.close() + del tls.connection + + def _after_fork(self): + self._manager = None + try: + self._incref() + except Exception as e: + # the proxy may just be for a manager which has shutdown + util.info('incref failed: %s' % e) + + def __reduce__(self): + kwds = {} + if get_spawning_popen() is not None: + kwds['authkey'] = self._authkey + + if getattr(self, '_isauto', False): + kwds['exposed'] = self._exposed_ + return (RebuildProxy, + (AutoProxy, self._token, self._serializer, kwds)) + else: + return (RebuildProxy, + (type(self), self._token, self._serializer, kwds)) + + def __deepcopy__(self, memo): + return self._getvalue() + + def __repr__(self): + return '<%s object, typeid %r at %#x>' % \ + (type(self).__name__, self._token.typeid, id(self)) + + def __str__(self): + ''' + Return representation of the referent (or a fall-back if that fails) + ''' + try: + return self._callmethod('__repr__') + except Exception: + return repr(self)[:-1] + "; '__str__()' failed>" + +# +# Function used for unpickling +# + +def RebuildProxy(func, token, serializer, kwds): + ''' + Function used for unpickling proxy objects. + ''' + server = getattr(process.current_process(), '_manager_server', None) + if server and server.address == token.address: + util.debug('Rebuild a proxy owned by manager, token=%r', token) + kwds['manager_owned'] = True + if token.id not in server.id_to_local_proxy_obj: + server.id_to_local_proxy_obj[token.id] = \ + server.id_to_obj[token.id] + incref = ( + kwds.pop('incref', True) and + not getattr(process.current_process(), '_inheriting', False) + ) + return func(token, serializer, incref=incref, **kwds) + +# +# Functions to create proxies and proxy types +# + +def MakeProxyType(name, exposed, _cache={}): + ''' + Return a proxy type whose methods are given by `exposed` + ''' + exposed = tuple(exposed) + try: + return _cache[(name, exposed)] + except KeyError: + pass + + dic = {} + + for meth in exposed: + exec('''def %s(self, /, *args, **kwds): + return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) + + ProxyType = type(name, (BaseProxy,), dic) + ProxyType._exposed_ = exposed + _cache[(name, exposed)] = ProxyType + return ProxyType + + +def AutoProxy(token, serializer, manager=None, authkey=None, + exposed=None, incref=True, manager_owned=False): + ''' + Return an auto-proxy for `token` + ''' + _Client = listener_client[serializer][1] + + if exposed is None: + conn = _Client(token.address, authkey=authkey) + try: + exposed = dispatch(conn, None, 'get_methods', (token,)) + finally: + conn.close() + + if authkey is None and manager is not None: + authkey = manager._authkey + if authkey is None: + authkey = process.current_process().authkey + + ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) + proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, + incref=incref, manager_owned=manager_owned) + proxy._isauto = True + return proxy + +# +# Types/callables which we will register with SyncManager +# + +class Namespace(object): + def __init__(self, /, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + def get(self): + return self._value + def set(self, value): + self._value = value + def __repr__(self): + return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) + value = property(get, set) + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +# +# Proxy types used by SyncManager +# + +class IteratorProxy(BaseProxy): + _exposed_ = ('__next__', 'send', 'throw', 'close') + def __iter__(self): + return self + def __next__(self, *args): + return self._callmethod('__next__', args) + def send(self, *args): + return self._callmethod('send', args) + def throw(self, *args): + return self._callmethod('throw', args) + def close(self, *args): + return self._callmethod('close', args) + + +class AcquirerProxy(BaseProxy): + _exposed_ = ('acquire', 'release', 'locked') + def acquire(self, blocking=True, timeout=None): + args = (blocking,) if timeout is None else (blocking, timeout) + return self._callmethod('acquire', args) + def release(self): + return self._callmethod('release') + def locked(self): + return self._callmethod('locked') + def __enter__(self): + return self._callmethod('acquire') + def __exit__(self, exc_type, exc_val, exc_tb): + return self._callmethod('release') + + +class ConditionProxy(AcquirerProxy): + _exposed_ = ('acquire', 'release', 'locked', 'wait', 'notify', 'notify_all') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + def notify(self, n=1): + return self._callmethod('notify', (n,)) + def notify_all(self): + return self._callmethod('notify_all') + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = time.monotonic() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - time.monotonic() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + + +class EventProxy(BaseProxy): + _exposed_ = ('is_set', 'set', 'clear', 'wait') + def is_set(self): + return self._callmethod('is_set') + def set(self): + return self._callmethod('set') + def clear(self): + return self._callmethod('clear') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + + +class BarrierProxy(BaseProxy): + _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + def abort(self): + return self._callmethod('abort') + def reset(self): + return self._callmethod('reset') + @property + def parties(self): + return self._callmethod('__getattribute__', ('parties',)) + @property + def n_waiting(self): + return self._callmethod('__getattribute__', ('n_waiting',)) + @property + def broken(self): + return self._callmethod('__getattribute__', ('broken',)) + + +class NamespaceProxy(BaseProxy): + _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') + def __getattr__(self, key): + if key[0] == '_': + return object.__getattribute__(self, key) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__getattribute__', (key,)) + def __setattr__(self, key, value): + if key[0] == '_': + return object.__setattr__(self, key, value) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__setattr__', (key, value)) + def __delattr__(self, key): + if key[0] == '_': + return object.__delattr__(self, key) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__delattr__', (key,)) + + +class ValueProxy(BaseProxy): + _exposed_ = ('get', 'set') + def get(self): + return self._callmethod('get') + def set(self, value): + return self._callmethod('set', (value,)) + value = property(get, set) + + __class_getitem__ = classmethod(types.GenericAlias) + + +BaseListProxy = MakeProxyType('BaseListProxy', ( + '__add__', '__contains__', '__delitem__', '__getitem__', '__imul__', + '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', + 'append', 'clear', 'copy', 'count', 'extend', 'index', 'insert', 'pop', + 'remove', 'reverse', 'sort', + )) +class ListProxy(BaseListProxy): + def __iadd__(self, value): + self._callmethod('extend', (value,)) + return self + def __imul__(self, value): + self._callmethod('__imul__', (value,)) + return self + + __class_getitem__ = classmethod(types.GenericAlias) + +collections.abc.MutableSequence.register(BaseListProxy) + +_BaseDictProxy = MakeProxyType('_BaseDictProxy', ( + '__contains__', '__delitem__', '__getitem__', '__ior__', '__iter__', + '__len__', '__or__', '__reversed__', '__ror__', + '__setitem__', 'clear', 'copy', 'fromkeys', 'get', 'items', + 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' + )) +_BaseDictProxy._method_to_typeid_ = { + '__iter__': 'Iterator', + } +class DictProxy(_BaseDictProxy): + def __ior__(self, value): + self._callmethod('__ior__', (value,)) + return self + + __class_getitem__ = classmethod(types.GenericAlias) + +collections.abc.MutableMapping.register(_BaseDictProxy) + +_BaseSetProxy = MakeProxyType("_BaseSetProxy", ( + '__and__', '__class_getitem__', '__contains__', '__iand__', '__ior__', + '__isub__', '__iter__', '__ixor__', '__len__', '__or__', '__rand__', + '__ror__', '__rsub__', '__rxor__', '__sub__', '__xor__', + '__ge__', '__gt__', '__le__', '__lt__', + 'add', 'clear', 'copy', 'difference', 'difference_update', 'discard', + 'intersection', 'intersection_update', 'isdisjoint', 'issubset', + 'issuperset', 'pop', 'remove', 'symmetric_difference', + 'symmetric_difference_update', 'union', 'update', +)) + +class SetProxy(_BaseSetProxy): + def __ior__(self, value): + self._callmethod('__ior__', (value,)) + return self + def __iand__(self, value): + self._callmethod('__iand__', (value,)) + return self + def __ixor__(self, value): + self._callmethod('__ixor__', (value,)) + return self + def __isub__(self, value): + self._callmethod('__isub__', (value,)) + return self + + __class_getitem__ = classmethod(types.GenericAlias) + +collections.abc.MutableMapping.register(_BaseSetProxy) + + +ArrayProxy = MakeProxyType('ArrayProxy', ( + '__len__', '__getitem__', '__setitem__' + )) + + +BasePoolProxy = MakeProxyType('PoolProxy', ( + 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', + 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', + )) +BasePoolProxy._method_to_typeid_ = { + 'apply_async': 'AsyncResult', + 'map_async': 'AsyncResult', + 'starmap_async': 'AsyncResult', + 'imap': 'Iterator', + 'imap_unordered': 'Iterator' + } +class PoolProxy(BasePoolProxy): + def __enter__(self): + return self + def __exit__(self, exc_type, exc_val, exc_tb): + self.terminate() + +# +# Definition of SyncManager +# + +class SyncManager(BaseManager): + ''' + Subclass of `BaseManager` which supports a number of shared object types. + + The types registered are those intended for the synchronization + of threads, plus `dict`, `list` and `Namespace`. + + The `multiprocessing.Manager()` function creates started instances of + this class. + ''' + +SyncManager.register('Queue', queue.Queue) +SyncManager.register('JoinableQueue', queue.Queue) +SyncManager.register('Event', threading.Event, EventProxy) +SyncManager.register('Lock', threading.Lock, AcquirerProxy) +SyncManager.register('RLock', threading.RLock, AcquirerProxy) +SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) +SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, + AcquirerProxy) +SyncManager.register('Condition', threading.Condition, ConditionProxy) +SyncManager.register('Barrier', threading.Barrier, BarrierProxy) +SyncManager.register('Pool', pool.Pool, PoolProxy) +SyncManager.register('list', list, ListProxy) +SyncManager.register('dict', dict, DictProxy) +SyncManager.register('set', set, SetProxy) +SyncManager.register('Value', Value, ValueProxy) +SyncManager.register('Array', Array, ArrayProxy) +SyncManager.register('Namespace', Namespace, NamespaceProxy) + +# types returned by methods of PoolProxy +SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) +SyncManager.register('AsyncResult', create_method=False) + +# +# Definition of SharedMemoryManager and SharedMemoryServer +# + +if HAS_SHMEM: + class _SharedMemoryTracker: + "Manages one or more shared memory segments." + + def __init__(self, name, segment_names=[]): + self.shared_memory_context_name = name + self.segment_names = segment_names + + def register_segment(self, segment_name): + "Adds the supplied shared memory block name to tracker." + util.debug(f"Register segment {segment_name!r} in pid {getpid()}") + self.segment_names.append(segment_name) + + def destroy_segment(self, segment_name): + """Calls unlink() on the shared memory block with the supplied name + and removes it from the list of blocks being tracked.""" + util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") + self.segment_names.remove(segment_name) + segment = shared_memory.SharedMemory(segment_name) + segment.close() + segment.unlink() + + def unlink(self): + "Calls destroy_segment() on all tracked shared memory blocks." + for segment_name in self.segment_names[:]: + self.destroy_segment(segment_name) + + def __del__(self): + util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") + self.unlink() + + def __getstate__(self): + return (self.shared_memory_context_name, self.segment_names) + + def __setstate__(self, state): + self.__init__(*state) + + + class SharedMemoryServer(Server): + + public = Server.public + \ + ['track_segment', 'release_segment', 'list_segments'] + + def __init__(self, *args, **kwargs): + Server.__init__(self, *args, **kwargs) + address = self.address + # The address of Linux abstract namespaces can be bytes + if isinstance(address, bytes): + address = os.fsdecode(address) + self.shared_memory_context = \ + _SharedMemoryTracker(f"shm_{address}_{getpid()}") + util.debug(f"SharedMemoryServer started by pid {getpid()}") + + def create(self, c, typeid, /, *args, **kwargs): + """Create a new distributed-shared object (not backed by a shared + memory block) and return its id to be used in a Proxy Object.""" + # Unless set up as a shared proxy, don't make shared_memory_context + # a standard part of kwargs. This makes things easier for supplying + # simple functions. + if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): + kwargs['shared_memory_context'] = self.shared_memory_context + return Server.create(self, c, typeid, *args, **kwargs) + + def shutdown(self, c): + "Call unlink() on all tracked shared memory, terminate the Server." + self.shared_memory_context.unlink() + return Server.shutdown(self, c) + + def track_segment(self, c, segment_name): + "Adds the supplied shared memory block name to Server's tracker." + self.shared_memory_context.register_segment(segment_name) + + def release_segment(self, c, segment_name): + """Calls unlink() on the shared memory block with the supplied name + and removes it from the tracker instance inside the Server.""" + self.shared_memory_context.destroy_segment(segment_name) + + def list_segments(self, c): + """Returns a list of names of shared memory blocks that the Server + is currently tracking.""" + return self.shared_memory_context.segment_names + + + class SharedMemoryManager(BaseManager): + """Like SyncManager but uses SharedMemoryServer instead of Server. + + It provides methods for creating and returning SharedMemory instances + and for creating a list-like object (ShareableList) backed by shared + memory. It also provides methods that create and return Proxy Objects + that support synchronization across processes (i.e. multi-process-safe + locks and semaphores). + """ + + _Server = SharedMemoryServer + + def __init__(self, *args, **kwargs): + if os.name == "posix": + # bpo-36867: Ensure the resource_tracker is running before + # launching the manager process, so that concurrent + # shared_memory manipulation both in the manager and in the + # current process does not create two resource_tracker + # processes. + from . import resource_tracker + resource_tracker.ensure_running() + BaseManager.__init__(self, *args, **kwargs) + util.debug(f"{self.__class__.__name__} created by pid {getpid()}") + + def __del__(self): + util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") + + def get_server(self): + 'Better than monkeypatching for now; merge into Server ultimately' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started SharedMemoryServer") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("SharedMemoryManager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return self._Server(self._registry, self._address, + self._authkey, self._serializer) + + def SharedMemory(self, size): + """Returns a new SharedMemory instance with the specified size in + bytes, to be tracked by the manager.""" + with self._Client(self._address, authkey=self._authkey) as conn: + sms = shared_memory.SharedMemory(None, create=True, size=size) + try: + dispatch(conn, None, 'track_segment', (sms.name,)) + except BaseException as e: + sms.unlink() + raise e + return sms + + def ShareableList(self, sequence): + """Returns a new ShareableList instance populated with the values + from the input sequence, to be tracked by the manager.""" + with self._Client(self._address, authkey=self._authkey) as conn: + sl = shared_memory.ShareableList(sequence) + try: + dispatch(conn, None, 'track_segment', (sl.shm.name,)) + except BaseException as e: + sl.shm.unlink() + raise e + return sl diff --git a/Python313_13_x86_Template/Lib/multiprocessing/pool.py b/Python314_4_x86_Template/Lib/multiprocessing/pool.py similarity index 100% rename from Python313_13_x86_Template/Lib/multiprocessing/pool.py rename to Python314_4_x86_Template/Lib/multiprocessing/pool.py diff --git a/Python314_4_x86_Template/Lib/multiprocessing/popen_fork.py b/Python314_4_x86_Template/Lib/multiprocessing/popen_fork.py new file mode 100644 index 00000000..7affa1b9 --- /dev/null +++ b/Python314_4_x86_Template/Lib/multiprocessing/popen_fork.py @@ -0,0 +1,90 @@ +import atexit +import os +import signal + +from . import util + +__all__ = ['Popen'] + +# +# Start child process using fork +# + +class Popen(object): + method = 'fork' + + def __init__(self, process_obj): + util._flush_std_streams() + self.returncode = None + self.finalizer = None + self._launch(process_obj) + + def duplicate_for_child(self, fd): + return fd + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + try: + pid, sts = os.waitpid(self.pid, flag) + except OSError: + # Child process not yet created. See #1731717 + # e.errno == errno.ECHILD == 10 + return None + if pid == self.pid: + self.returncode = os.waitstatus_to_exitcode(sts) + return self.returncode + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is not None: + from multiprocessing.connection import wait + if not wait([self.sentinel], timeout): + return None + # This shouldn't block if wait() returned successfully. + return self.poll(os.WNOHANG if timeout == 0.0 else 0) + return self.returncode + + def _send_signal(self, sig): + if self.returncode is None: + try: + os.kill(self.pid, sig) + except ProcessLookupError: + pass + except OSError: + if self.wait(timeout=0.1) is None: + raise + + def interrupt(self): + self._send_signal(signal.SIGINT) + + def terminate(self): + self._send_signal(signal.SIGTERM) + + def kill(self): + self._send_signal(signal.SIGKILL) + + def _launch(self, process_obj): + code = 1 + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + self.pid = os.fork() + if self.pid == 0: + try: + atexit._clear() + atexit.register(util._exit_function) + os.close(parent_r) + os.close(parent_w) + code = process_obj._bootstrap(parent_sentinel=child_r) + finally: + atexit._run_exitfuncs() + os._exit(code) + else: + os.close(child_w) + os.close(child_r) + self.finalizer = util.Finalize(self, util.close_fds, + (parent_r, parent_w,)) + self.sentinel = parent_r + + def close(self): + if self.finalizer is not None: + self.finalizer() diff --git a/Python313_13_x86_Template/Lib/multiprocessing/popen_forkserver.py b/Python314_4_x86_Template/Lib/multiprocessing/popen_forkserver.py similarity index 100% rename from Python313_13_x86_Template/Lib/multiprocessing/popen_forkserver.py rename to Python314_4_x86_Template/Lib/multiprocessing/popen_forkserver.py diff --git a/Python313_13_x86_Template/Lib/multiprocessing/popen_spawn_posix.py b/Python314_4_x86_Template/Lib/multiprocessing/popen_spawn_posix.py similarity index 100% rename from Python313_13_x86_Template/Lib/multiprocessing/popen_spawn_posix.py rename to Python314_4_x86_Template/Lib/multiprocessing/popen_spawn_posix.py diff --git a/Python313_13_x86_Template/Lib/multiprocessing/popen_spawn_win32.py b/Python314_4_x86_Template/Lib/multiprocessing/popen_spawn_win32.py similarity index 100% rename from Python313_13_x86_Template/Lib/multiprocessing/popen_spawn_win32.py rename to Python314_4_x86_Template/Lib/multiprocessing/popen_spawn_win32.py diff --git a/Python314_4_x86_Template/Lib/multiprocessing/process.py b/Python314_4_x86_Template/Lib/multiprocessing/process.py new file mode 100644 index 00000000..262513f2 --- /dev/null +++ b/Python314_4_x86_Template/Lib/multiprocessing/process.py @@ -0,0 +1,443 @@ +# +# Module providing the `Process` class which emulates `threading.Thread` +# +# multiprocessing/process.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['BaseProcess', 'current_process', 'active_children', + 'parent_process'] + +# +# Imports +# + +import os +import sys +import signal +import itertools +import threading +from _weakrefset import WeakSet + +# +# +# + +try: + ORIGINAL_DIR = os.path.abspath(os.getcwd()) +except OSError: + ORIGINAL_DIR = None + +# +# Public functions +# + +def current_process(): + ''' + Return process object representing the current process + ''' + return _current_process + +def active_children(): + ''' + Return list of process objects corresponding to live child processes + ''' + _cleanup() + return list(_children) + + +def parent_process(): + ''' + Return process object representing the parent process + ''' + return _parent_process + +# +# +# + +def _cleanup(): + # check for processes which have finished + for p in list(_children): + if (child_popen := p._popen) and child_popen.poll() is not None: + _children.discard(p) + +# +# The `Process` class +# + +class BaseProcess(object): + ''' + Process objects represent activity that is run in a separate process + + The class is analogous to `threading.Thread` + ''' + def _Popen(self): + raise NotImplementedError + + def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, + *, daemon=None): + assert group is None, 'group argument must be None for now' + count = next(_process_counter) + self._identity = _current_process._identity + (count,) + self._config = _current_process._config.copy() + self._parent_pid = os.getpid() + self._parent_name = _current_process.name + self._popen = None + self._closed = False + self._target = target + self._args = tuple(args) + self._kwargs = dict(kwargs) if kwargs else {} + self._name = name or type(self).__name__ + '-' + \ + ':'.join(str(i) for i in self._identity) + if daemon is not None: + self.daemon = daemon + _dangling.add(self) + + def _check_closed(self): + if self._closed: + raise ValueError("process object is closed") + + def run(self): + ''' + Method to be run in sub-process; can be overridden in sub-class + ''' + if self._target: + self._target(*self._args, **self._kwargs) + + def start(self): + ''' + Start child process + ''' + self._check_closed() + assert self._popen is None, 'cannot start a process twice' + assert self._parent_pid == os.getpid(), \ + 'can only start a process object created by current process' + assert not _current_process._config.get('daemon'), \ + 'daemonic processes are not allowed to have children' + _cleanup() + self._popen = self._Popen(self) + self._sentinel = self._popen.sentinel + # Avoid a refcycle if the target function holds an indirect + # reference to the process object (see bpo-30775) + del self._target, self._args, self._kwargs + _children.add(self) + + def interrupt(self): + ''' + Terminate process; sends SIGINT signal + ''' + self._check_closed() + self._popen.interrupt() + + def terminate(self): + ''' + Terminate process; sends SIGTERM signal or uses TerminateProcess() + ''' + self._check_closed() + self._popen.terminate() + + def kill(self): + ''' + Terminate process; sends SIGKILL signal or uses TerminateProcess() + ''' + self._check_closed() + self._popen.kill() + + def join(self, timeout=None): + ''' + Wait until child process terminates + ''' + self._check_closed() + assert self._parent_pid == os.getpid(), 'can only join a child process' + assert self._popen is not None, 'can only join a started process' + res = self._popen.wait(timeout) + if res is not None: + _children.discard(self) + + def is_alive(self): + ''' + Return whether process is alive + ''' + self._check_closed() + if self is _current_process: + return True + assert self._parent_pid == os.getpid(), 'can only test a child process' + + if self._popen is None: + return False + + returncode = self._popen.poll() + if returncode is None: + return True + else: + _children.discard(self) + return False + + def close(self): + ''' + Close the Process object. + + This method releases resources held by the Process object. It is + an error to call this method if the child process is still running. + ''' + if self._popen is not None: + if self._popen.poll() is None: + raise ValueError("Cannot close a process while it is still running. " + "You should first call join() or terminate().") + self._popen.close() + self._popen = None + del self._sentinel + _children.discard(self) + self._closed = True + + @property + def name(self): + return self._name + + @name.setter + def name(self, name): + assert isinstance(name, str), 'name must be a string' + self._name = name + + @property + def daemon(self): + ''' + Return whether process is a daemon + ''' + return self._config.get('daemon', False) + + @daemon.setter + def daemon(self, daemonic): + ''' + Set whether process is a daemon + ''' + assert self._popen is None, 'process has already started' + self._config['daemon'] = daemonic + + @property + def authkey(self): + return self._config['authkey'] + + @authkey.setter + def authkey(self, authkey): + ''' + Set authorization key of process + ''' + self._config['authkey'] = AuthenticationString(authkey) + + @property + def exitcode(self): + ''' + Return exit code of process or `None` if it has yet to stop + ''' + self._check_closed() + if self._popen is None: + return self._popen + return self._popen.poll() + + @property + def ident(self): + ''' + Return identifier (PID) of process or `None` if it has yet to start + ''' + self._check_closed() + if self is _current_process: + return os.getpid() + else: + return self._popen and self._popen.pid + + pid = ident + + @property + def sentinel(self): + ''' + Return a file descriptor (Unix) or handle (Windows) suitable for + waiting for process termination. + ''' + self._check_closed() + try: + return self._sentinel + except AttributeError: + raise ValueError("process not started") from None + + def __repr__(self): + exitcode = None + if self is _current_process: + status = 'started' + elif self._closed: + status = 'closed' + elif self._parent_pid != os.getpid(): + status = 'unknown' + elif self._popen is None: + status = 'initial' + else: + exitcode = self._popen.poll() + if exitcode is not None: + status = 'stopped' + else: + status = 'started' + + info = [type(self).__name__, 'name=%r' % self._name] + if self._popen is not None: + info.append('pid=%s' % self._popen.pid) + info.append('parent=%s' % self._parent_pid) + info.append(status) + if exitcode is not None: + exitcode = _exitcode_to_name.get(exitcode, exitcode) + info.append('exitcode=%s' % exitcode) + if self.daemon: + info.append('daemon') + return '<%s>' % ' '.join(info) + + ## + + def _bootstrap(self, parent_sentinel=None): + from . import util, context + global _current_process, _parent_process, _process_counter, _children + + try: + if self._start_method is not None: + context._force_start_method(self._start_method) + _process_counter = itertools.count(1) + _children = set() + util._close_stdin() + old_process = _current_process + _current_process = self + _parent_process = _ParentProcess( + self._parent_name, self._parent_pid, parent_sentinel) + if threading._HAVE_THREAD_NATIVE_ID: + threading.main_thread()._set_native_id() + try: + self._after_fork() + finally: + # delay finalization of the old process object until after + # _run_after_forkers() is executed + del old_process + util.info('child process calling self.run()') + self.run() + exitcode = 0 + except SystemExit as e: + if e.code is None: + exitcode = 0 + elif isinstance(e.code, int): + exitcode = e.code + else: + sys.stderr.write(str(e.code) + '\n') + exitcode = 1 + except: + exitcode = 1 + import traceback + sys.stderr.write('Process %s:\n' % self.name) + traceback.print_exc() + finally: + threading._shutdown() + util.info('process exiting with exitcode %d' % exitcode) + util._flush_std_streams() + + return exitcode + + @staticmethod + def _after_fork(): + from . import util + util._finalizer_registry.clear() + util._run_after_forkers() + + +# +# We subclass bytes to avoid accidental transmission of auth keys over network +# + +class AuthenticationString(bytes): + def __reduce__(self): + from .context import get_spawning_popen + if get_spawning_popen() is None: + raise TypeError( + 'Pickling an AuthenticationString object is ' + 'disallowed for security reasons' + ) + return AuthenticationString, (bytes(self),) + + +# +# Create object representing the parent process +# + +class _ParentProcess(BaseProcess): + + def __init__(self, name, pid, sentinel): + self._identity = () + self._name = name + self._pid = pid + self._parent_pid = None + self._popen = None + self._closed = False + self._sentinel = sentinel + self._config = {} + + def is_alive(self): + from multiprocessing.connection import wait + return not wait([self._sentinel], timeout=0) + + @property + def ident(self): + return self._pid + + def join(self, timeout=None): + ''' + Wait until parent process terminates + ''' + from multiprocessing.connection import wait + wait([self._sentinel], timeout=timeout) + + pid = ident + +# +# Create object representing the main process +# + +class _MainProcess(BaseProcess): + + def __init__(self): + self._identity = () + self._name = 'MainProcess' + self._parent_pid = None + self._popen = None + self._closed = False + self._config = {'authkey': AuthenticationString(os.urandom(32)), + 'semprefix': '/mp'} + # Note that some versions of FreeBSD only allow named + # semaphores to have names of up to 14 characters. Therefore + # we choose a short prefix. + # + # On MacOSX in a sandbox it may be necessary to use a + # different prefix -- see #19478. + # + # Everything in self._config will be inherited by descendant + # processes. + + def close(self): + pass + + +_parent_process = None +_current_process = _MainProcess() +_process_counter = itertools.count(1) +_children = set() +del _MainProcess + +# +# Give names to some return codes +# + +_exitcode_to_name = {} + +for name, signum in list(signal.__dict__.items()): + if name[:3]=='SIG' and '_' not in name: + _exitcode_to_name[-signum] = f'-{name}' +del name, signum + +# For debug and leak testing +_dangling = WeakSet() diff --git a/Python314_4_x86_Template/Lib/multiprocessing/queues.py b/Python314_4_x86_Template/Lib/multiprocessing/queues.py new file mode 100644 index 00000000..981599ac --- /dev/null +++ b/Python314_4_x86_Template/Lib/multiprocessing/queues.py @@ -0,0 +1,399 @@ +# +# Module implementing queues +# +# multiprocessing/queues.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] + +import sys +import os +import threading +import collections +import time +import types +import weakref +import errno + +from queue import Empty, Full + +from . import connection +from . import context +_ForkingPickler = context.reduction.ForkingPickler + +from .util import debug, info, Finalize, register_after_fork, is_exiting + +# +# Queue type using a pipe, buffer and thread +# + +class Queue(object): + + def __init__(self, maxsize=0, *, ctx): + if maxsize <= 0: + # Can raise ImportError (see issues #3770 and #23400) + from .synchronize import SEM_VALUE_MAX as maxsize + self._maxsize = maxsize + self._reader, self._writer = connection.Pipe(duplex=False) + self._rlock = ctx.Lock() + self._opid = os.getpid() + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = ctx.Lock() + self._sem = ctx.BoundedSemaphore(maxsize) + # For use by concurrent.futures + self._ignore_epipe = False + self._reset() + + if sys.platform != 'win32': + register_after_fork(self, Queue._after_fork) + + def __getstate__(self): + context.assert_spawning(self) + return (self._ignore_epipe, self._maxsize, self._reader, self._writer, + self._rlock, self._wlock, self._sem, self._opid) + + def __setstate__(self, state): + (self._ignore_epipe, self._maxsize, self._reader, self._writer, + self._rlock, self._wlock, self._sem, self._opid) = state + self._reset() + + def _after_fork(self): + debug('Queue._after_fork()') + self._reset(after_fork=True) + + def _reset(self, after_fork=False): + if after_fork: + self._notempty._at_fork_reinit() + else: + self._notempty = threading.Condition(threading.Lock()) + self._buffer = collections.deque() + self._thread = None + self._jointhread = None + self._joincancelled = False + self._closed = False + self._close = None + self._send_bytes = self._writer.send_bytes + self._recv_bytes = self._reader.recv_bytes + self._poll = self._reader.poll + + def put(self, obj, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if not self._sem.acquire(block, timeout): + raise Full + + with self._notempty: + if self._thread is None: + self._start_thread() + self._buffer.append(obj) + self._notempty.notify() + + def get(self, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if block and timeout is None: + with self._rlock: + res = self._recv_bytes() + self._sem.release() + else: + if block: + deadline = time.monotonic() + timeout + if not self._rlock.acquire(block, timeout): + raise Empty + try: + if block: + timeout = deadline - time.monotonic() + if not self._poll(timeout): + raise Empty + elif not self._poll(): + raise Empty + res = self._recv_bytes() + self._sem.release() + finally: + self._rlock.release() + # unserialize the data after having released the lock + return _ForkingPickler.loads(res) + + def qsize(self): + # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() + return self._maxsize - self._sem.get_value() + + def empty(self): + return not self._poll() + + def full(self): + return self._sem._semlock._is_zero() + + def get_nowait(self): + return self.get(False) + + def put_nowait(self, obj): + return self.put(obj, False) + + def close(self): + self._closed = True + close = self._close + if close: + self._close = None + close() + + def join_thread(self): + debug('Queue.join_thread()') + assert self._closed, "Queue {0!r} not closed".format(self) + if self._jointhread: + self._jointhread() + + def cancel_join_thread(self): + debug('Queue.cancel_join_thread()') + self._joincancelled = True + try: + self._jointhread.cancel() + except AttributeError: + pass + + def _terminate_broken(self): + # Close a Queue on error. + + # gh-94777: Prevent queue writing to a pipe which is no longer read. + self._reader.close() + + # gh-107219: Close the connection writer which can unblock + # Queue._feed() if it was stuck in send_bytes(). + if sys.platform == 'win32': + self._writer.close() + + self.close() + self.join_thread() + + def _start_thread(self): + debug('Queue._start_thread()') + + # Start thread which transfers data from buffer to pipe + self._buffer.clear() + self._thread = threading.Thread( + target=Queue._feed, + args=(self._buffer, self._notempty, self._send_bytes, + self._wlock, self._reader.close, self._writer.close, + self._ignore_epipe, self._on_queue_feeder_error, + self._sem), + name='QueueFeederThread', + daemon=True, + ) + + try: + debug('doing self._thread.start()') + self._thread.start() + debug('... done self._thread.start()') + except: + # gh-109047: During Python finalization, creating a thread + # can fail with RuntimeError. + self._thread = None + raise + + if not self._joincancelled: + self._jointhread = Finalize( + self._thread, Queue._finalize_join, + [weakref.ref(self._thread)], + exitpriority=-5 + ) + + # Send sentinel to the thread queue object when garbage collected + self._close = Finalize( + self, Queue._finalize_close, + [self._buffer, self._notempty], + exitpriority=10 + ) + + @staticmethod + def _finalize_join(twr): + debug('joining queue thread') + thread = twr() + if thread is not None: + thread.join() + debug('... queue thread joined') + else: + debug('... queue thread already dead') + + @staticmethod + def _finalize_close(buffer, notempty): + debug('telling queue thread to quit') + with notempty: + buffer.append(_sentinel) + notempty.notify() + + @staticmethod + def _feed(buffer, notempty, send_bytes, writelock, reader_close, + writer_close, ignore_epipe, onerror, queue_sem): + debug('starting thread to feed data to pipe') + nacquire = notempty.acquire + nrelease = notempty.release + nwait = notempty.wait + bpopleft = buffer.popleft + sentinel = _sentinel + if sys.platform != 'win32': + wacquire = writelock.acquire + wrelease = writelock.release + else: + wacquire = None + + while 1: + try: + nacquire() + try: + if not buffer: + nwait() + finally: + nrelease() + try: + while 1: + obj = bpopleft() + if obj is sentinel: + debug('feeder thread got sentinel -- exiting') + reader_close() + writer_close() + return + + # serialize the data before acquiring the lock + obj = _ForkingPickler.dumps(obj) + if wacquire is None: + send_bytes(obj) + else: + wacquire() + try: + send_bytes(obj) + finally: + wrelease() + except IndexError: + pass + except Exception as e: + if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: + return + # Since this runs in a daemon thread the resources it uses + # may be become unusable while the process is cleaning up. + # We ignore errors which happen after the process has + # started to cleanup. + if is_exiting(): + info('error in queue thread: %s', e) + return + else: + # Since the object has not been sent in the queue, we need + # to decrease the size of the queue. The error acts as + # if the object had been silently removed from the queue + # and this step is necessary to have a properly working + # queue. + queue_sem.release() + onerror(e, obj) + + @staticmethod + def _on_queue_feeder_error(e, obj): + """ + Private API hook called when feeding data in the background thread + raises an exception. For overriding by concurrent.futures. + """ + import traceback + traceback.print_exc() + + __class_getitem__ = classmethod(types.GenericAlias) + + +_sentinel = object() + +# +# A queue type which also supports join() and task_done() methods +# +# Note that if you do not call task_done() for each finished task then +# eventually the counter's semaphore may overflow causing Bad Things +# to happen. +# + +class JoinableQueue(Queue): + + def __init__(self, maxsize=0, *, ctx): + Queue.__init__(self, maxsize, ctx=ctx) + self._unfinished_tasks = ctx.Semaphore(0) + self._cond = ctx.Condition() + + def __getstate__(self): + return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) + + def __setstate__(self, state): + Queue.__setstate__(self, state[:-2]) + self._cond, self._unfinished_tasks = state[-2:] + + def put(self, obj, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if not self._sem.acquire(block, timeout): + raise Full + + with self._notempty, self._cond: + if self._thread is None: + self._start_thread() + self._buffer.append(obj) + self._unfinished_tasks.release() + self._notempty.notify() + + def task_done(self): + with self._cond: + if not self._unfinished_tasks.acquire(False): + raise ValueError('task_done() called too many times') + if self._unfinished_tasks._semlock._is_zero(): + self._cond.notify_all() + + def join(self): + with self._cond: + if not self._unfinished_tasks._semlock._is_zero(): + self._cond.wait() + +# +# Simplified Queue type -- really just a locked pipe +# + +class SimpleQueue(object): + + def __init__(self, *, ctx): + self._reader, self._writer = connection.Pipe(duplex=False) + self._rlock = ctx.Lock() + self._poll = self._reader.poll + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = ctx.Lock() + + def close(self): + self._reader.close() + self._writer.close() + + def empty(self): + return not self._poll() + + def __getstate__(self): + context.assert_spawning(self) + return (self._reader, self._writer, self._rlock, self._wlock) + + def __setstate__(self, state): + (self._reader, self._writer, self._rlock, self._wlock) = state + self._poll = self._reader.poll + + def get(self): + with self._rlock: + res = self._reader.recv_bytes() + # unserialize the data after having released the lock + return _ForkingPickler.loads(res) + + def put(self, obj): + # serialize the data before acquiring the lock + obj = _ForkingPickler.dumps(obj) + if self._wlock is None: + # writes to a message oriented win32 pipe are atomic + self._writer.send_bytes(obj) + else: + with self._wlock: + self._writer.send_bytes(obj) + + __class_getitem__ = classmethod(types.GenericAlias) diff --git a/Python314_4_x86_Template/Lib/multiprocessing/reduction.py b/Python314_4_x86_Template/Lib/multiprocessing/reduction.py new file mode 100644 index 00000000..fcccd3ee --- /dev/null +++ b/Python314_4_x86_Template/Lib/multiprocessing/reduction.py @@ -0,0 +1,281 @@ +# +# Module which deals with pickling of objects. +# +# multiprocessing/reduction.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +from abc import ABCMeta +import copyreg +import functools +import io +import os +import pickle +import socket +import sys + +from . import context + +__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] + + +HAVE_SEND_HANDLE = (sys.platform == 'win32' or + (hasattr(socket, 'CMSG_LEN') and + hasattr(socket, 'SCM_RIGHTS') and + hasattr(socket.socket, 'sendmsg'))) + +# +# Pickler subclass +# + +class ForkingPickler(pickle.Pickler): + '''Pickler subclass used by multiprocessing.''' + _extra_reducers = {} + _copyreg_dispatch_table = copyreg.dispatch_table + + def __init__(self, *args): + super().__init__(*args) + self.dispatch_table = self._copyreg_dispatch_table.copy() + self.dispatch_table.update(self._extra_reducers) + + @classmethod + def register(cls, type, reduce): + '''Register a reduce function for a type.''' + cls._extra_reducers[type] = reduce + + @classmethod + def dumps(cls, obj, protocol=None): + buf = io.BytesIO() + cls(buf, protocol).dump(obj) + return buf.getbuffer() + + loads = pickle.loads + +register = ForkingPickler.register + +def dump(obj, file, protocol=None): + '''Replacement for pickle.dump() using ForkingPickler.''' + ForkingPickler(file, protocol).dump(obj) + +# +# Platform specific definitions +# + +if sys.platform == 'win32': + # Windows + __all__ += ['DupHandle', 'duplicate', 'steal_handle'] + import _winapi + + def duplicate(handle, target_process=None, inheritable=False, + *, source_process=None): + '''Duplicate a handle. (target_process is a handle not a pid!)''' + current_process = _winapi.GetCurrentProcess() + if source_process is None: + source_process = current_process + if target_process is None: + target_process = current_process + return _winapi.DuplicateHandle( + source_process, handle, target_process, + 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) + + def steal_handle(source_pid, handle): + '''Steal a handle from process identified by source_pid.''' + source_process_handle = _winapi.OpenProcess( + _winapi.PROCESS_DUP_HANDLE, False, source_pid) + try: + return _winapi.DuplicateHandle( + source_process_handle, handle, + _winapi.GetCurrentProcess(), 0, False, + _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(source_process_handle) + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) + conn.send(dh) + + def recv_handle(conn): + '''Receive a handle over a local connection.''' + return conn.recv().detach() + + class DupHandle(object): + '''Picklable wrapper for a handle.''' + def __init__(self, handle, access, pid=None): + if pid is None: + # We just duplicate the handle in the current process and + # let the receiving process steal the handle. + pid = os.getpid() + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) + try: + self._handle = _winapi.DuplicateHandle( + _winapi.GetCurrentProcess(), + handle, proc, access, False, 0) + finally: + _winapi.CloseHandle(proc) + self._access = access + self._pid = pid + + def detach(self): + '''Get the handle. This should only be called once.''' + # retrieve handle from process which currently owns it + if self._pid == os.getpid(): + # The handle has already been duplicated for this process. + return self._handle + # We must steal the handle from the process whose pid is self._pid. + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, + self._pid) + try: + return _winapi.DuplicateHandle( + proc, self._handle, _winapi.GetCurrentProcess(), + self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(proc) + +else: + # Unix + __all__ += ['DupFd', 'sendfds', 'recvfds'] + import array + + def sendfds(sock, fds): + '''Send an array of fds over an AF_UNIX socket.''' + fds = array.array('i', fds) + msg = bytes([len(fds) % 256]) + sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) + if sock.recv(1) != b'A': + raise RuntimeError('did not receive acknowledgement of fd') + + def recvfds(sock, size): + '''Receive an array of fds over an AF_UNIX socket.''' + a = array.array('i') + bytes_size = a.itemsize * size + msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) + if not msg and not ancdata: + raise EOFError + try: + # We send/recv an Ack byte after the fds to work around an old + # macOS bug; it isn't clear if this is still required but it + # makes unit testing fd sending easier. + # See: https://github.com/python/cpython/issues/58874 + sock.send(b'A') # Acknowledge + if len(ancdata) != 1: + raise RuntimeError('received %d items of ancdata' % + len(ancdata)) + cmsg_level, cmsg_type, cmsg_data = ancdata[0] + if (cmsg_level == socket.SOL_SOCKET and + cmsg_type == socket.SCM_RIGHTS): + if len(cmsg_data) % a.itemsize != 0: + raise ValueError + a.frombytes(cmsg_data) + if len(a) % 256 != msg[0]: + raise AssertionError( + "Len is {0:n} but msg[0] is {1!r}".format( + len(a), msg[0])) + return list(a) + except (ValueError, IndexError): + pass + raise RuntimeError('Invalid data received') + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: + sendfds(s, [handle]) + + def recv_handle(conn): + '''Receive a handle over a local connection.''' + with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: + return recvfds(s, 1)[0] + + def DupFd(fd): + '''Return a wrapper for an fd.''' + popen_obj = context.get_spawning_popen() + if popen_obj is not None: + return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) + elif HAVE_SEND_HANDLE: + from . import resource_sharer + return resource_sharer.DupFd(fd) + else: + raise ValueError('SCM_RIGHTS appears not to be available') + +# +# Try making some callable types picklable +# + +def _reduce_method(m): + if m.__self__ is None: + return getattr, (m.__class__, m.__func__.__name__) + else: + return getattr, (m.__self__, m.__func__.__name__) +class _C: + def f(self): + pass +register(type(_C().f), _reduce_method) + + +def _reduce_method_descriptor(m): + return getattr, (m.__objclass__, m.__name__) +register(type(list.append), _reduce_method_descriptor) +register(type(int.__add__), _reduce_method_descriptor) + + +def _reduce_partial(p): + return _rebuild_partial, (p.func, p.args, p.keywords or {}) +def _rebuild_partial(func, args, keywords): + return functools.partial(func, *args, **keywords) +register(functools.partial, _reduce_partial) + +# +# Make sockets picklable +# + +if sys.platform == 'win32': + def _reduce_socket(s): + from .resource_sharer import DupSocket + return _rebuild_socket, (DupSocket(s),) + def _rebuild_socket(ds): + return ds.detach() + register(socket.socket, _reduce_socket) + +else: + def _reduce_socket(s): + df = DupFd(s.fileno()) + return _rebuild_socket, (df, s.family, s.type, s.proto) + def _rebuild_socket(df, family, type, proto): + fd = df.detach() + return socket.socket(family, type, proto, fileno=fd) + register(socket.socket, _reduce_socket) + + +class AbstractReducer(metaclass=ABCMeta): + '''Abstract base class for use in implementing a Reduction class + suitable for use in replacing the standard reduction mechanism + used in multiprocessing.''' + ForkingPickler = ForkingPickler + register = register + dump = dump + send_handle = send_handle + recv_handle = recv_handle + + if sys.platform == 'win32': + steal_handle = steal_handle + duplicate = duplicate + DupHandle = DupHandle + else: + sendfds = sendfds + recvfds = recvfds + DupFd = DupFd + + _reduce_method = _reduce_method + _reduce_method_descriptor = _reduce_method_descriptor + _rebuild_partial = _rebuild_partial + _reduce_socket = _reduce_socket + _rebuild_socket = _rebuild_socket + + def __init__(self, *args): + register(type(_C().f), _reduce_method) + register(type(list.append), _reduce_method_descriptor) + register(type(int.__add__), _reduce_method_descriptor) + register(functools.partial, _reduce_partial) + register(socket.socket, _reduce_socket) diff --git a/Python313_13_x86_Template/Lib/multiprocessing/resource_sharer.py b/Python314_4_x86_Template/Lib/multiprocessing/resource_sharer.py similarity index 100% rename from Python313_13_x86_Template/Lib/multiprocessing/resource_sharer.py rename to Python314_4_x86_Template/Lib/multiprocessing/resource_sharer.py diff --git a/Python314_4_x86_Template/Lib/multiprocessing/resource_tracker.py b/Python314_4_x86_Template/Lib/multiprocessing/resource_tracker.py new file mode 100644 index 00000000..2ef2d1ec --- /dev/null +++ b/Python314_4_x86_Template/Lib/multiprocessing/resource_tracker.py @@ -0,0 +1,416 @@ +############################################################################### +# Server process to keep track of unlinked resources (like shared memory +# segments, semaphores etc.) and clean them. +# +# On Unix we run a server process which keeps track of unlinked +# resources. The server ignores SIGINT and SIGTERM and reads from a +# pipe. Every other process of the program has a copy of the writable +# end of the pipe, so we get EOF when all other processes have exited. +# Then the server process unlinks any remaining resource names. +# +# This is important because there may be system limits for such resources: for +# instance, the system only supports a limited number of named semaphores, and +# shared-memory segments live in the RAM. If a python process leaks such a +# resource, this resource will not be removed till the next reboot. Without +# this resource tracker process, "killall python" would probably leave unlinked +# resources. + +import base64 +import os +import signal +import sys +import threading +import warnings +from collections import deque + +import json + +from . import spawn +from . import util + +__all__ = ['ensure_running', 'register', 'unregister'] + +_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') +_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) + +def cleanup_noop(name): + raise RuntimeError('noop should never be registered or cleaned up') + +_CLEANUP_FUNCS = { + 'noop': cleanup_noop, + 'dummy': lambda name: None, # Dummy resource used in tests +} + +if os.name == 'posix': + import _multiprocessing + import _posixshmem + + # Use sem_unlink() to clean up named semaphores. + # + # sem_unlink() may be missing if the Python build process detected the + # absence of POSIX named semaphores. In that case, no named semaphores were + # ever opened, so no cleanup would be necessary. + if hasattr(_multiprocessing, 'sem_unlink'): + _CLEANUP_FUNCS['semaphore'] = _multiprocessing.sem_unlink + _CLEANUP_FUNCS['shared_memory'] = _posixshmem.shm_unlink + + +class ReentrantCallError(RuntimeError): + pass + + +class ResourceTracker(object): + + def __init__(self): + self._lock = threading.RLock() + self._fd = None + self._pid = None + self._exitcode = None + self._reentrant_messages = deque() + + # True to use colon-separated lines, rather than JSON lines, + # for internal communication. (Mainly for testing). + # Filenames not supported by the simple format will always be sent + # using JSON. + # The reader should understand all formats. + self._use_simple_format = True + + def _reentrant_call_error(self): + # gh-109629: this happens if an explicit call to the ResourceTracker + # gets interrupted by a garbage collection, invoking a finalizer (*) + # that itself calls back into ResourceTracker. + # (*) for example the SemLock finalizer + raise ReentrantCallError( + "Reentrant call into the multiprocessing resource tracker") + + def __del__(self): + # making sure child processess are cleaned before ResourceTracker + # gets destructed. + # see https://github.com/python/cpython/issues/88887 + self._stop(use_blocking_lock=False) + + def _stop(self, use_blocking_lock=True): + if use_blocking_lock: + with self._lock: + self._stop_locked() + else: + acquired = self._lock.acquire(blocking=False) + try: + self._stop_locked() + finally: + if acquired: + self._lock.release() + + def _stop_locked( + self, + close=os.close, + waitpid=os.waitpid, + waitstatus_to_exitcode=os.waitstatus_to_exitcode, + ): + # This shouldn't happen (it might when called by a finalizer) + # so we check for it anyway. + if self._lock._recursion_count() > 1: + raise self._reentrant_call_error() + if self._fd is None: + # not running + return + if self._pid is None: + return + + # closing the "alive" file descriptor stops main() + close(self._fd) + self._fd = None + + try: + _, status = waitpid(self._pid, 0) + except ChildProcessError: + self._pid = None + self._exitcode = None + return + + self._pid = None + + try: + self._exitcode = waitstatus_to_exitcode(status) + except ValueError: + # os.waitstatus_to_exitcode may raise an exception for invalid values + self._exitcode = None + + def getfd(self): + self.ensure_running() + return self._fd + + def ensure_running(self): + '''Make sure that resource tracker process is running. + + This can be run from any process. Usually a child process will use + the resource created by its parent.''' + return self._ensure_running_and_write() + + def _teardown_dead_process(self): + os.close(self._fd) + + # Clean-up to avoid dangling processes. + try: + # _pid can be None if this process is a child from another + # python process, which has started the resource_tracker. + if self._pid is not None: + os.waitpid(self._pid, 0) + except ChildProcessError: + # The resource_tracker has already been terminated. + pass + self._fd = None + self._pid = None + self._exitcode = None + + warnings.warn('resource_tracker: process died unexpectedly, ' + 'relaunching. Some resources might leak.') + + def _launch(self): + fds_to_pass = [] + try: + fds_to_pass.append(sys.stderr.fileno()) + except Exception: + pass + r, w = os.pipe() + try: + fds_to_pass.append(r) + # process will out live us, so no need to wait on pid + exe = spawn.get_executable() + args = [ + exe, + *util._args_from_interpreter_flags(), + '-c', + f'from multiprocessing.resource_tracker import main;main({r})', + ] + # bpo-33613: Register a signal mask that will block the signals. + # This signal mask will be inherited by the child that is going + # to be spawned and will protect the child from a race condition + # that can make the child die before it registers signal handlers + # for SIGINT and SIGTERM. The mask is unregistered after spawning + # the child. + prev_sigmask = None + try: + if _HAVE_SIGMASK: + prev_sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) + pid = util.spawnv_passfds(exe, args, fds_to_pass) + finally: + if prev_sigmask is not None: + signal.pthread_sigmask(signal.SIG_SETMASK, prev_sigmask) + except: + os.close(w) + raise + else: + self._fd = w + self._pid = pid + finally: + os.close(r) + + def _make_probe_message(self): + """Return a probe message.""" + if self._use_simple_format: + return b'PROBE:0:noop\n' + return ( + json.dumps( + {"cmd": "PROBE", "rtype": "noop"}, + ensure_ascii=True, + separators=(",", ":"), + ) + + "\n" + ).encode("ascii") + + def _ensure_running_and_write(self, msg=None): + with self._lock: + if self._lock._recursion_count() > 1: + # The code below is certainly not reentrant-safe, so bail out + if msg is None: + raise self._reentrant_call_error() + return self._reentrant_messages.append(msg) + + if self._fd is not None: + # resource tracker was launched before, is it still running? + if msg is None: + to_send = self._make_probe_message() + else: + to_send = msg + try: + self._write(to_send) + except OSError: + self._teardown_dead_process() + self._launch() + + msg = None # message was sent in probe + else: + self._launch() + + while True: + try: + reentrant_msg = self._reentrant_messages.popleft() + except IndexError: + break + self._write(reentrant_msg) + if msg is not None: + self._write(msg) + + def _check_alive(self): + '''Check that the pipe has not been closed by sending a probe.''' + try: + # We cannot use send here as it calls ensure_running, creating + # a cycle. + os.write(self._fd, self._make_probe_message()) + except OSError: + return False + else: + return True + + def register(self, name, rtype): + '''Register name of resource with resource tracker.''' + self._send('REGISTER', name, rtype) + + def unregister(self, name, rtype): + '''Unregister name of resource with resource tracker.''' + self._send('UNREGISTER', name, rtype) + + def _write(self, msg): + nbytes = os.write(self._fd, msg) + assert nbytes == len(msg), f"{nbytes=} != {len(msg)=}" + + def _send(self, cmd, name, rtype): + if self._use_simple_format and '\n' not in name: + msg = f"{cmd}:{name}:{rtype}\n".encode("ascii") + if len(msg) > 512: + # posix guarantees that writes to a pipe of less than PIPE_BUF + # bytes are atomic, and that PIPE_BUF >= 512 + raise ValueError('msg too long') + self._ensure_running_and_write(msg) + return + + # POSIX guarantees that writes to a pipe of less than PIPE_BUF (512 on Linux) + # bytes are atomic. Therefore, we want the message to be shorter than 512 bytes. + # POSIX shm_open() and sem_open() require the name, including its leading slash, + # to be at most NAME_MAX bytes (255 on Linux) + # With json.dump(..., ensure_ascii=True) every non-ASCII byte becomes a 6-char + # escape like \uDC80. + # As we want the overall message to be kept atomic and therefore smaller than 512, + # we encode encode the raw name bytes with URL-safe Base64 - so a 255 long name + # will not exceed 340 bytes. + b = name.encode('utf-8', 'surrogateescape') + if len(b) > 255: + raise ValueError('shared memory name too long (max 255 bytes)') + b64 = base64.urlsafe_b64encode(b).decode('ascii') + + payload = {"cmd": cmd, "rtype": rtype, "base64_name": b64} + msg = (json.dumps(payload, ensure_ascii=True, separators=(",", ":")) + "\n").encode("ascii") + + # The entire JSON message is guaranteed < PIPE_BUF (512 bytes) by construction. + assert len(msg) <= 512, f"internal error: message too long ({len(msg)} bytes)" + assert msg.startswith(b'{') + + self._ensure_running_and_write(msg) + +_resource_tracker = ResourceTracker() +ensure_running = _resource_tracker.ensure_running +register = _resource_tracker.register +unregister = _resource_tracker.unregister +getfd = _resource_tracker.getfd + + +def _decode_message(line): + if line.startswith(b'{'): + try: + obj = json.loads(line.decode('ascii')) + except Exception as e: + raise ValueError("malformed resource_tracker message: %r" % (line,)) from e + + cmd = obj["cmd"] + rtype = obj["rtype"] + b64 = obj.get("base64_name", "") + + if not isinstance(cmd, str) or not isinstance(rtype, str) or not isinstance(b64, str): + raise ValueError("malformed resource_tracker fields: %r" % (obj,)) + + try: + name = base64.urlsafe_b64decode(b64).decode('utf-8', 'surrogateescape') + except ValueError as e: + raise ValueError("malformed resource_tracker base64_name: %r" % (b64,)) from e + else: + cmd, rest = line.strip().decode('ascii').split(':', maxsplit=1) + name, rtype = rest.rsplit(':', maxsplit=1) + return cmd, rtype, name + + +def main(fd): + '''Run resource tracker.''' + # protect the process from ^C and "killall python" etc + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGTERM, signal.SIG_IGN) + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) + + for f in (sys.stdin, sys.stdout): + try: + f.close() + except Exception: + pass + + cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} + exit_code = 0 + + try: + # keep track of registered/unregistered resources + with open(fd, 'rb') as f: + for line in f: + try: + cmd, rtype, name = _decode_message(line) + cleanup_func = _CLEANUP_FUNCS.get(rtype, None) + if cleanup_func is None: + raise ValueError( + f'Cannot register {name} for automatic cleanup: ' + f'unknown resource type {rtype}') + + if cmd == 'REGISTER': + cache[rtype].add(name) + elif cmd == 'UNREGISTER': + cache[rtype].remove(name) + elif cmd == 'PROBE': + pass + else: + raise RuntimeError('unrecognized command %r' % cmd) + except Exception: + exit_code = 3 + try: + sys.excepthook(*sys.exc_info()) + except: + pass + finally: + # all processes have terminated; cleanup any remaining resources + for rtype, rtype_cache in cache.items(): + if rtype_cache: + try: + exit_code = 1 + if rtype == 'dummy': + # The test 'dummy' resource is expected to leak. + # We skip the warning (and *only* the warning) for it. + pass + else: + warnings.warn( + f'resource_tracker: There appear to be ' + f'{len(rtype_cache)} leaked {rtype} objects to ' + f'clean up at shutdown: {rtype_cache}' + ) + except Exception: + pass + for name in rtype_cache: + # For some reason the process which created and registered this + # resource has failed to unregister it. Presumably it has + # died. We therefore unlink it. + try: + try: + _CLEANUP_FUNCS[rtype](name) + except Exception as e: + exit_code = 2 + warnings.warn('resource_tracker: %r: %s' % (name, e)) + finally: + pass + + sys.exit(exit_code) diff --git a/Python314_4_x86_Template/Lib/multiprocessing/shared_memory.py b/Python314_4_x86_Template/Lib/multiprocessing/shared_memory.py new file mode 100644 index 00000000..99a8ce33 --- /dev/null +++ b/Python314_4_x86_Template/Lib/multiprocessing/shared_memory.py @@ -0,0 +1,544 @@ +"""Provides shared memory for direct access across processes. + +The API of this package is currently provisional. Refer to the +documentation for details. +""" + + +__all__ = [ 'SharedMemory', 'ShareableList' ] + + +from functools import partial +import mmap +import os +import errno +import struct +import secrets +import types + +if os.name == "nt": + import _winapi + _USE_POSIX = False +else: + import _posixshmem + _USE_POSIX = True + +from . import resource_tracker + +_O_CREX = os.O_CREAT | os.O_EXCL + +# FreeBSD (and perhaps other BSDs) limit names to 14 characters. +_SHM_SAFE_NAME_LENGTH = 14 + +# Shared memory block name prefix +if _USE_POSIX: + _SHM_NAME_PREFIX = '/psm_' +else: + _SHM_NAME_PREFIX = 'wnsm_' + + +def _make_filename(): + "Create a random filename for the shared memory object." + # number of random bytes to use for name + nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 + assert nbytes >= 2, '_SHM_NAME_PREFIX too long' + name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) + assert len(name) <= _SHM_SAFE_NAME_LENGTH + return name + + +class SharedMemory: + """Creates a new shared memory block or attaches to an existing + shared memory block. + + Every shared memory block is assigned a unique name. This enables + one process to create a shared memory block with a particular name + so that a different process can attach to that same shared memory + block using that same name. + + As a resource for sharing data across processes, shared memory blocks + may outlive the original process that created them. When one process + no longer needs access to a shared memory block that might still be + needed by other processes, the close() method should be called. + When a shared memory block is no longer needed by any process, the + unlink() method should be called to ensure proper cleanup.""" + + # Defaults; enables close() and unlink() to run without errors. + _name = None + _fd = -1 + _mmap = None + _buf = None + _flags = os.O_RDWR + _mode = 0o600 + _prepend_leading_slash = True if _USE_POSIX else False + _track = True + + def __init__(self, name=None, create=False, size=0, *, track=True): + if not size >= 0: + raise ValueError("'size' must be a positive integer") + if create: + self._flags = _O_CREX | os.O_RDWR + if size == 0: + raise ValueError("'size' must be a positive number different from zero") + if name is None and not self._flags & os.O_EXCL: + raise ValueError("'name' can only be None if create=True") + + self._track = track + if _USE_POSIX: + + # POSIX Shared Memory + + if name is None: + while True: + name = _make_filename() + try: + self._fd = _posixshmem.shm_open( + name, + self._flags, + mode=self._mode + ) + except FileExistsError: + continue + self._name = name + break + else: + name = "/" + name if self._prepend_leading_slash else name + self._fd = _posixshmem.shm_open( + name, + self._flags, + mode=self._mode + ) + self._name = name + try: + if create and size: + os.ftruncate(self._fd, size) + stats = os.fstat(self._fd) + size = stats.st_size + self._mmap = mmap.mmap(self._fd, size) + except OSError: + self.unlink() + raise + if self._track: + resource_tracker.register(self._name, "shared_memory") + + else: + + # Windows Named Shared Memory + + if create: + while True: + temp_name = _make_filename() if name is None else name + # Create and reserve shared memory block with this name + # until it can be attached to by mmap. + h_map = _winapi.CreateFileMapping( + _winapi.INVALID_HANDLE_VALUE, + _winapi.NULL, + _winapi.PAGE_READWRITE, + (size >> 32) & 0xFFFFFFFF, + size & 0xFFFFFFFF, + temp_name + ) + try: + last_error_code = _winapi.GetLastError() + if last_error_code == _winapi.ERROR_ALREADY_EXISTS: + if name is not None: + raise FileExistsError( + errno.EEXIST, + os.strerror(errno.EEXIST), + name, + _winapi.ERROR_ALREADY_EXISTS + ) + else: + continue + self._mmap = mmap.mmap(-1, size, tagname=temp_name) + finally: + _winapi.CloseHandle(h_map) + self._name = temp_name + break + + else: + self._name = name + # Dynamically determine the existing named shared memory + # block's size which is likely a multiple of mmap.PAGESIZE. + h_map = _winapi.OpenFileMapping( + _winapi.FILE_MAP_READ, + False, + name + ) + try: + p_buf = _winapi.MapViewOfFile( + h_map, + _winapi.FILE_MAP_READ, + 0, + 0, + 0 + ) + finally: + _winapi.CloseHandle(h_map) + try: + size = _winapi.VirtualQuerySize(p_buf) + finally: + _winapi.UnmapViewOfFile(p_buf) + self._mmap = mmap.mmap(-1, size, tagname=name) + + self._size = size + self._buf = memoryview(self._mmap) + + def __del__(self): + try: + self.close() + except OSError: + pass + + def __reduce__(self): + return ( + self.__class__, + ( + self.name, + False, + self.size, + ), + ) + + def __repr__(self): + return f'{self.__class__.__name__}({self.name!r}, size={self.size})' + + @property + def buf(self): + "A memoryview of contents of the shared memory block." + return self._buf + + @property + def name(self): + "Unique name that identifies the shared memory block." + reported_name = self._name + if _USE_POSIX and self._prepend_leading_slash: + if self._name.startswith("/"): + reported_name = self._name[1:] + return reported_name + + @property + def size(self): + "Size in bytes." + return self._size + + def close(self): + """Closes access to the shared memory from this instance but does + not destroy the shared memory block.""" + if self._buf is not None: + self._buf.release() + self._buf = None + if self._mmap is not None: + self._mmap.close() + self._mmap = None + if _USE_POSIX and self._fd >= 0: + os.close(self._fd) + self._fd = -1 + + def unlink(self): + """Requests that the underlying shared memory block be destroyed. + + Unlink should be called once (and only once) across all handles + which have access to the shared memory block, even if these + handles belong to different processes. Closing and unlinking may + happen in any order, but trying to access data inside a shared + memory block after unlinking may result in memory errors, + depending on platform. + + This method has no effect on Windows, where the only way to + delete a shared memory block is to close all handles.""" + + if _USE_POSIX and self._name: + _posixshmem.shm_unlink(self._name) + if self._track: + resource_tracker.unregister(self._name, "shared_memory") + + +_encoding = "utf8" + +class ShareableList: + """Pattern for a mutable list-like object shareable via a shared + memory block. It differs from the built-in list type in that these + lists can not change their overall length (i.e. no append, insert, + etc.) + + Because values are packed into a memoryview as bytes, the struct + packing format for any storable value must require no more than 8 + characters to describe its format.""" + + # The shared memory area is organized as follows: + # - 8 bytes: number of items (N) as a 64-bit integer + # - (N + 1) * 8 bytes: offsets of each element from the start of the + # data area + # - K bytes: the data area storing item values (with encoding and size + # depending on their respective types) + # - N * 8 bytes: `struct` format string for each element + # - N bytes: index into _back_transforms_mapping for each element + # (for reconstructing the corresponding Python value) + _types_mapping = { + int: "q", + float: "d", + bool: "xxxxxxx?", + str: "%ds", + bytes: "%ds", + None.__class__: "xxxxxx?x", + } + _alignment = 8 + _back_transforms_mapping = { + 0: lambda value: value, # int, float, bool + 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str + 2: lambda value: value.rstrip(b'\x00'), # bytes + 3: lambda _value: None, # None + } + + @staticmethod + def _extract_recreation_code(value): + """Used in concert with _back_transforms_mapping to convert values + into the appropriate Python objects when retrieving them from + the list as well as when storing them.""" + if not isinstance(value, (str, bytes, None.__class__)): + return 0 + elif isinstance(value, str): + return 1 + elif isinstance(value, bytes): + return 2 + else: + return 3 # NoneType + + def __init__(self, sequence=None, *, name=None): + if name is None or sequence is not None: + sequence = sequence or () + _formats = [ + self._types_mapping[type(item)] + if not isinstance(item, (str, bytes)) + else self._types_mapping[type(item)] % ( + self._alignment * (len(item) // self._alignment + 1), + ) + for item in sequence + ] + self._list_len = len(_formats) + assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len + offset = 0 + # The offsets of each list element into the shared memory's + # data area (0 meaning the start of the data area, not the start + # of the shared memory area). + self._allocated_offsets = [0] + for fmt in _formats: + offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) + self._allocated_offsets.append(offset) + _recreation_codes = [ + self._extract_recreation_code(item) for item in sequence + ] + requested_size = struct.calcsize( + "q" + self._format_size_metainfo + + "".join(_formats) + + self._format_packing_metainfo + + self._format_back_transform_codes + ) + + self.shm = SharedMemory(name, create=True, size=requested_size) + else: + self.shm = SharedMemory(name) + + if sequence is not None: + _enc = _encoding + struct.pack_into( + "q" + self._format_size_metainfo, + self.shm.buf, + 0, + self._list_len, + *(self._allocated_offsets) + ) + struct.pack_into( + "".join(_formats), + self.shm.buf, + self._offset_data_start, + *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) + ) + struct.pack_into( + self._format_packing_metainfo, + self.shm.buf, + self._offset_packing_formats, + *(v.encode(_enc) for v in _formats) + ) + struct.pack_into( + self._format_back_transform_codes, + self.shm.buf, + self._offset_back_transform_codes, + *(_recreation_codes) + ) + + else: + self._list_len = len(self) # Obtains size from offset 0 in buffer. + self._allocated_offsets = list( + struct.unpack_from( + self._format_size_metainfo, + self.shm.buf, + 1 * 8 + ) + ) + + def _get_packing_format(self, position): + "Gets the packing format for a single value stored in the list." + position = position if position >= 0 else position + self._list_len + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + v = struct.unpack_from( + "8s", + self.shm.buf, + self._offset_packing_formats + position * 8 + )[0] + fmt = v.rstrip(b'\x00') + fmt_as_str = fmt.decode(_encoding) + + return fmt_as_str + + def _get_back_transform(self, position): + "Gets the back transformation function for a single value." + + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + transform_code = struct.unpack_from( + "b", + self.shm.buf, + self._offset_back_transform_codes + position + )[0] + transform_function = self._back_transforms_mapping[transform_code] + + return transform_function + + def _set_packing_format_and_transform(self, position, fmt_as_str, value): + """Sets the packing format and back transformation code for a + single value in the list at the specified position.""" + + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + struct.pack_into( + "8s", + self.shm.buf, + self._offset_packing_formats + position * 8, + fmt_as_str.encode(_encoding) + ) + + transform_code = self._extract_recreation_code(value) + struct.pack_into( + "b", + self.shm.buf, + self._offset_back_transform_codes + position, + transform_code + ) + + def __getitem__(self, position): + position = position if position >= 0 else position + self._list_len + try: + offset = self._offset_data_start + self._allocated_offsets[position] + (v,) = struct.unpack_from( + self._get_packing_format(position), + self.shm.buf, + offset + ) + except IndexError: + raise IndexError("index out of range") + + back_transform = self._get_back_transform(position) + v = back_transform(v) + + return v + + def __setitem__(self, position, value): + position = position if position >= 0 else position + self._list_len + try: + item_offset = self._allocated_offsets[position] + offset = self._offset_data_start + item_offset + current_format = self._get_packing_format(position) + except IndexError: + raise IndexError("assignment index out of range") + + if not isinstance(value, (str, bytes)): + new_format = self._types_mapping[type(value)] + encoded_value = value + else: + allocated_length = self._allocated_offsets[position + 1] - item_offset + + encoded_value = (value.encode(_encoding) + if isinstance(value, str) else value) + if len(encoded_value) > allocated_length: + raise ValueError("bytes/str item exceeds available storage") + if current_format[-1] == "s": + new_format = current_format + else: + new_format = self._types_mapping[str] % ( + allocated_length, + ) + + self._set_packing_format_and_transform( + position, + new_format, + value + ) + struct.pack_into(new_format, self.shm.buf, offset, encoded_value) + + def __reduce__(self): + return partial(self.__class__, name=self.shm.name), () + + def __len__(self): + return struct.unpack_from("q", self.shm.buf, 0)[0] + + def __repr__(self): + return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' + + @property + def format(self): + "The struct packing format used by all currently stored items." + return "".join( + self._get_packing_format(i) for i in range(self._list_len) + ) + + @property + def _format_size_metainfo(self): + "The struct packing format used for the items' storage offsets." + return "q" * (self._list_len + 1) + + @property + def _format_packing_metainfo(self): + "The struct packing format used for the items' packing formats." + return "8s" * self._list_len + + @property + def _format_back_transform_codes(self): + "The struct packing format used for the items' back transforms." + return "b" * self._list_len + + @property + def _offset_data_start(self): + # - 8 bytes for the list length + # - (N + 1) * 8 bytes for the element offsets + return (self._list_len + 2) * 8 + + @property + def _offset_packing_formats(self): + return self._offset_data_start + self._allocated_offsets[-1] + + @property + def _offset_back_transform_codes(self): + return self._offset_packing_formats + self._list_len * 8 + + def count(self, value): + "L.count(value) -> integer -- return number of occurrences of value." + + return sum(value == entry for entry in self) + + def index(self, value): + """L.index(value) -> integer -- return first index of value. + Raises ValueError if the value is not present.""" + + for position, entry in enumerate(self): + if value == entry: + return position + else: + raise ValueError("ShareableList.index(x): x not in list") + + __class_getitem__ = classmethod(types.GenericAlias) diff --git a/Python313_13_x86_Template/Lib/multiprocessing/sharedctypes.py b/Python314_4_x86_Template/Lib/multiprocessing/sharedctypes.py similarity index 100% rename from Python313_13_x86_Template/Lib/multiprocessing/sharedctypes.py rename to Python314_4_x86_Template/Lib/multiprocessing/sharedctypes.py diff --git a/Python313_13_x86_Template/Lib/multiprocessing/spawn.py b/Python314_4_x86_Template/Lib/multiprocessing/spawn.py similarity index 100% rename from Python313_13_x86_Template/Lib/multiprocessing/spawn.py rename to Python314_4_x86_Template/Lib/multiprocessing/spawn.py diff --git a/Python314_4_x86_Template/Lib/multiprocessing/synchronize.py b/Python314_4_x86_Template/Lib/multiprocessing/synchronize.py new file mode 100644 index 00000000..9188114a --- /dev/null +++ b/Python314_4_x86_Template/Lib/multiprocessing/synchronize.py @@ -0,0 +1,411 @@ +# +# Module implementing synchronization primitives +# +# multiprocessing/synchronize.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' + ] + +import threading +import sys +import tempfile +import _multiprocessing +import time + +from . import context +from . import process +from . import util + +# TODO: Do any platforms still lack a functioning sem_open? +try: + from _multiprocessing import SemLock, sem_unlink +except ImportError: + raise ImportError("This platform lacks a functioning sem_open" + + " implementation. https://github.com/python/cpython/issues/48020.") + +# +# Constants +# + +# These match the enum in Modules/_multiprocessing/semaphore.c +RECURSIVE_MUTEX = 0 +SEMAPHORE = 1 + +SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX + +# +# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` +# + +class SemLock(object): + + _rand = tempfile._RandomNameSequence() + + def __init__(self, kind, value, maxvalue, *, ctx): + if ctx is None: + ctx = context._default_context.get_context() + self._is_fork_ctx = ctx.get_start_method() == 'fork' + unlink_now = sys.platform == 'win32' or self._is_fork_ctx + for i in range(100): + try: + sl = self._semlock = _multiprocessing.SemLock( + kind, value, maxvalue, self._make_name(), + unlink_now) + except FileExistsError: + pass + else: + break + else: + raise FileExistsError('cannot find name for semaphore') + + util.debug('created semlock with handle %s' % sl.handle) + self._make_methods() + + if sys.platform != 'win32': + def _after_fork(obj): + obj._semlock._after_fork() + util.register_after_fork(self, _after_fork) + + if self._semlock.name is not None: + # We only get here if we are on Unix with forking + # disabled. When the object is garbage collected or the + # process shuts down we unlink the semaphore name + from .resource_tracker import register + register(self._semlock.name, "semaphore") + util.Finalize(self, SemLock._cleanup, (self._semlock.name,), + exitpriority=0) + + @staticmethod + def _cleanup(name): + from .resource_tracker import unregister + sem_unlink(name) + unregister(name, "semaphore") + + def _make_methods(self): + self.acquire = self._semlock.acquire + self.release = self._semlock.release + + def locked(self): + return self._semlock._is_zero() + + def __enter__(self): + return self._semlock.__enter__() + + def __exit__(self, *args): + return self._semlock.__exit__(*args) + + def __getstate__(self): + context.assert_spawning(self) + sl = self._semlock + if sys.platform == 'win32': + h = context.get_spawning_popen().duplicate_for_child(sl.handle) + else: + if self._is_fork_ctx: + raise RuntimeError('A SemLock created in a fork context is being ' + 'shared with a process in a spawn context. This is ' + 'not supported. Please use the same context to create ' + 'multiprocessing objects and Process.') + h = sl.handle + return (h, sl.kind, sl.maxvalue, sl.name) + + def __setstate__(self, state): + self._semlock = _multiprocessing.SemLock._rebuild(*state) + util.debug('recreated blocker with handle %r' % state[0]) + self._make_methods() + # Ensure that deserialized SemLock can be serialized again (gh-108520). + self._is_fork_ctx = False + + @staticmethod + def _make_name(): + return '%s-%s' % (process.current_process()._config['semprefix'], + next(SemLock._rand)) + +# +# Semaphore +# + +class Semaphore(SemLock): + + def __init__(self, value=1, *, ctx): + SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) + + def get_value(self): + '''Returns current value of Semaphore. + + Raises NotImplementedError on Mac OSX + because of broken sem_getvalue(). + ''' + return self._semlock._get_value() + + def __repr__(self): + try: + value = self.get_value() + except Exception: + value = 'unknown' + return '<%s(value=%s)>' % (self.__class__.__name__, value) + +# +# Bounded semaphore +# + +class BoundedSemaphore(Semaphore): + + def __init__(self, value=1, *, ctx): + SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) + + def __repr__(self): + try: + value = self.get_value() + except Exception: + value = 'unknown' + return '<%s(value=%s, maxvalue=%s)>' % \ + (self.__class__.__name__, value, self._semlock.maxvalue) + +# +# Non-recursive lock +# + +class Lock(SemLock): + + def __init__(self, *, ctx): + SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + elif not self._semlock._is_zero(): + name = 'None' + elif self._semlock._count() > 0: + name = 'SomeOtherThread' + else: + name = 'SomeOtherProcess' + except Exception: + name = 'unknown' + return '<%s(owner=%s)>' % (self.__class__.__name__, name) + +# +# Recursive lock +# + +class RLock(SemLock): + + def __init__(self, *, ctx): + SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + count = self._semlock._count() + elif not self._semlock._is_zero(): + name, count = 'None', 0 + elif self._semlock._count() > 0: + name, count = 'SomeOtherThread', 'nonzero' + else: + name, count = 'SomeOtherProcess', 'nonzero' + except Exception: + name, count = 'unknown', 'unknown' + return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) + +# +# Condition variable +# + +class Condition(object): + + def __init__(self, lock=None, *, ctx): + self._lock = lock or ctx.RLock() + self._sleeping_count = ctx.Semaphore(0) + self._woken_count = ctx.Semaphore(0) + self._wait_semaphore = ctx.Semaphore(0) + self._make_methods() + + def __getstate__(self): + context.assert_spawning(self) + return (self._lock, self._sleeping_count, + self._woken_count, self._wait_semaphore) + + def __setstate__(self, state): + (self._lock, self._sleeping_count, + self._woken_count, self._wait_semaphore) = state + self._make_methods() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def _make_methods(self): + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __repr__(self): + try: + num_waiters = (self._sleeping_count.get_value() - + self._woken_count.get_value()) + except Exception: + num_waiters = 'unknown' + return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) + + def wait(self, timeout=None): + assert self._lock._semlock._is_mine(), \ + 'must acquire() condition before using wait()' + + # indicate that this thread is going to sleep + self._sleeping_count.release() + + # release lock + count = self._lock._semlock._count() + for i in range(count): + self._lock.release() + + try: + # wait for notification or timeout + return self._wait_semaphore.acquire(True, timeout) + finally: + # indicate that this thread has woken + self._woken_count.release() + + # reacquire lock + for i in range(count): + self._lock.acquire() + + def notify(self, n=1): + assert self._lock._semlock._is_mine(), 'lock is not owned' + assert not self._wait_semaphore.acquire( + False), ('notify: Should not have been able to acquire ' + + '_wait_semaphore') + + # to take account of timeouts since last notify*() we subtract + # woken_count from sleeping_count and rezero woken_count + while self._woken_count.acquire(False): + res = self._sleeping_count.acquire(False) + assert res, ('notify: Bug in sleeping_count.acquire' + + '- res should not be False') + + sleepers = 0 + while sleepers < n and self._sleeping_count.acquire(False): + self._wait_semaphore.release() # wake up one sleeper + sleepers += 1 + + if sleepers: + for i in range(sleepers): + self._woken_count.acquire() # wait for a sleeper to wake + + # rezero wait_semaphore in case some timeouts just happened + while self._wait_semaphore.acquire(False): + pass + + def notify_all(self): + self.notify(n=sys.maxsize) + + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = time.monotonic() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - time.monotonic() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + +# +# Event +# + +class Event(object): + + def __init__(self, *, ctx): + self._cond = ctx.Condition(ctx.Lock()) + self._flag = ctx.Semaphore(0) + + def is_set(self): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + return True + return False + + def set(self): + with self._cond: + self._flag.acquire(False) + self._flag.release() + self._cond.notify_all() + + def clear(self): + with self._cond: + self._flag.acquire(False) + + def wait(self, timeout=None): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + else: + self._cond.wait(timeout) + + if self._flag.acquire(False): + self._flag.release() + return True + return False + + def __repr__(self): + set_status = 'set' if self.is_set() else 'unset' + return f"<{type(self).__qualname__} at {id(self):#x} {set_status}>" +# +# Barrier +# + +class Barrier(threading.Barrier): + + def __init__(self, parties, action=None, timeout=None, *, ctx): + import struct + from .heap import BufferWrapper + wrapper = BufferWrapper(struct.calcsize('i') * 2) + cond = ctx.Condition() + self.__setstate__((parties, action, timeout, cond, wrapper)) + self._state = 0 + self._count = 0 + + def __setstate__(self, state): + (self._parties, self._action, self._timeout, + self._cond, self._wrapper) = state + self._array = self._wrapper.create_memoryview().cast('i') + + def __getstate__(self): + return (self._parties, self._action, self._timeout, + self._cond, self._wrapper) + + @property + def _state(self): + return self._array[0] + + @_state.setter + def _state(self, value): + self._array[0] = value + + @property + def _count(self): + return self._array[1] + + @_count.setter + def _count(self, value): + self._array[1] = value diff --git a/Python314_4_x86_Template/Lib/multiprocessing/util.py b/Python314_4_x86_Template/Lib/multiprocessing/util.py new file mode 100644 index 00000000..549fb07c --- /dev/null +++ b/Python314_4_x86_Template/Lib/multiprocessing/util.py @@ -0,0 +1,560 @@ +# +# Module providing various facilities to other parts of the package +# +# multiprocessing/util.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import os +import itertools +import sys +import weakref +import atexit +import threading # we want threading to install it's + # cleanup function before multiprocessing does +from subprocess import _args_from_interpreter_flags # noqa: F401 + +from . import process + +__all__ = [ + 'sub_debug', 'debug', 'info', 'sub_warning', 'warn', 'get_logger', + 'log_to_stderr', 'get_temp_dir', 'register_after_fork', + 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', + 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', + ] + +# +# Logging +# + +NOTSET = 0 +SUBDEBUG = 5 +DEBUG = 10 +INFO = 20 +SUBWARNING = 25 +WARNING = 30 + +LOGGER_NAME = 'multiprocessing' +DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' + +_logger = None +_log_to_stderr = False + +def sub_debug(msg, *args): + if _logger: + _logger.log(SUBDEBUG, msg, *args, stacklevel=2) + +def debug(msg, *args): + if _logger: + _logger.log(DEBUG, msg, *args, stacklevel=2) + +def info(msg, *args): + if _logger: + _logger.log(INFO, msg, *args, stacklevel=2) + +def warn(msg, *args): + if _logger: + _logger.log(WARNING, msg, *args, stacklevel=2) + +def sub_warning(msg, *args): + if _logger: + _logger.log(SUBWARNING, msg, *args, stacklevel=2) + +def get_logger(): + ''' + Returns logger used by multiprocessing + ''' + global _logger + import logging + + with logging._lock: + if not _logger: + + _logger = logging.getLogger(LOGGER_NAME) + _logger.propagate = 0 + + # XXX multiprocessing should cleanup before logging + if hasattr(atexit, 'unregister'): + atexit.unregister(_exit_function) + atexit.register(_exit_function) + else: + atexit._exithandlers.remove((_exit_function, (), {})) + atexit._exithandlers.append((_exit_function, (), {})) + + return _logger + +def log_to_stderr(level=None): + ''' + Turn on logging and add a handler which prints to stderr + ''' + global _log_to_stderr + import logging + + logger = get_logger() + formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + + if level: + logger.setLevel(level) + _log_to_stderr = True + return _logger + + +# Abstract socket support + +def _platform_supports_abstract_sockets(): + return sys.platform in ("linux", "android") + + +def is_abstract_socket_namespace(address): + if not address: + return False + if isinstance(address, bytes): + return address[0] == 0 + elif isinstance(address, str): + return address[0] == "\0" + raise TypeError(f'address type of {address!r} unrecognized') + + +abstract_sockets_supported = _platform_supports_abstract_sockets() + +# +# Function returning a temp directory which will be removed on exit +# + +# Maximum length of a NULL-terminated [1] socket file path is usually +# between 92 and 108 [2], but Linux is known to use a size of 108 [3]. +# BSD-based systems usually use a size of 104 or 108 and Windows does +# not create AF_UNIX sockets. +# +# [1]: https://github.com/python/cpython/issues/140734 +# [2]: https://pubs.opengroup.org/onlinepubs/9799919799/basedefs/sys_un.h.html +# [3]: https://man7.org/linux/man-pages/man7/unix.7.html + +if sys.platform == 'linux': + _SUN_PATH_MAX = 108 +elif sys.platform.startswith(('openbsd', 'freebsd')): + _SUN_PATH_MAX = 104 +else: + # On Windows platforms, we do not create AF_UNIX sockets. + _SUN_PATH_MAX = None if os.name == 'nt' else 92 + +def _remove_temp_dir(rmtree, tempdir): + rmtree(tempdir) + + current_process = process.current_process() + # current_process() can be None if the finalizer is called + # late during Python finalization + if current_process is not None: + current_process._config['tempdir'] = None + +def _get_base_temp_dir(tempfile): + """Get a temporary directory where socket files will be created. + + To prevent additional imports, pass a pre-imported 'tempfile' module. + """ + if os.name == 'nt': + return None + # Most of the time, the default temporary directory is /tmp. Thus, + # listener sockets files "$TMPDIR/pymp-XXXXXXXX/sock-XXXXXXXX" do + # not have a path length exceeding SUN_PATH_MAX. + # + # If users specify their own temporary directory, we may be unable + # to create those files. Therefore, we fall back to the system-wide + # temporary directory /tmp, assumed to exist on POSIX systems. + # + # See https://github.com/python/cpython/issues/132124. + base_tempdir = tempfile.gettempdir() + # Files created in a temporary directory are suffixed by a string + # generated by tempfile._RandomNameSequence, which, by design, + # is 8 characters long. + # + # Thus, the socket file path length (without NULL terminator) will be: + # + # len(base_tempdir + '/pymp-XXXXXXXX' + '/sock-XXXXXXXX') + sun_path_len = len(base_tempdir) + 14 + 14 + # Strict inequality to account for the NULL terminator. + # See https://github.com/python/cpython/issues/140734. + if sun_path_len < _SUN_PATH_MAX: + return base_tempdir + # Fallback to the default system-wide temporary directory. + # This ignores user-defined environment variables. + # + # On POSIX systems, /tmp MUST be writable by any application [1]. + # We however emit a warning if this is not the case to prevent + # obscure errors later in the execution. + # + # On some legacy systems, /var/tmp and /usr/tmp can be present + # and will be used instead. + # + # [1]: https://refspecs.linuxfoundation.org/FHS_3.0/fhs/ch03s18.html + dirlist = ['/tmp', '/var/tmp', '/usr/tmp'] + try: + base_system_tempdir = tempfile._get_default_tempdir(dirlist) + except FileNotFoundError: + warn("Process-wide temporary directory %s will not be usable for " + "creating socket files and no usable system-wide temporary " + "directory was found in %s", base_tempdir, dirlist) + # At this point, the system-wide temporary directory is not usable + # but we may assume that the user-defined one is, even if we will + # not be able to write socket files out there. + return base_tempdir + warn("Ignoring user-defined temporary directory: %s", base_tempdir) + # at most max(map(len, dirlist)) + 14 + 14 = 36 characters + assert len(base_system_tempdir) + 14 + 14 < _SUN_PATH_MAX + return base_system_tempdir + +def get_temp_dir(): + # get name of a temp directory which will be automatically cleaned up + tempdir = process.current_process()._config.get('tempdir') + if tempdir is None: + import shutil, tempfile + base_tempdir = _get_base_temp_dir(tempfile) + tempdir = tempfile.mkdtemp(prefix='pymp-', dir=base_tempdir) + info('created temp directory %s', tempdir) + # keep a strong reference to shutil.rmtree(), since the finalizer + # can be called late during Python shutdown + Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), + exitpriority=-100) + process.current_process()._config['tempdir'] = tempdir + return tempdir + +# +# Support for reinitialization of objects when bootstrapping a child process +# + +_afterfork_registry = weakref.WeakValueDictionary() +_afterfork_counter = itertools.count() + +def _run_after_forkers(): + items = list(_afterfork_registry.items()) + items.sort() + for (index, ident, func), obj in items: + try: + func(obj) + except Exception as e: + info('after forker raised exception %s', e) + +def register_after_fork(obj, func): + _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj + +# +# Finalization using weakrefs +# + +_finalizer_registry = {} +_finalizer_counter = itertools.count() + + +class Finalize(object): + ''' + Class which supports object finalization using weakrefs + ''' + def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): + if (exitpriority is not None) and not isinstance(exitpriority,int): + raise TypeError( + "Exitpriority ({0!r}) must be None or int, not {1!s}".format( + exitpriority, type(exitpriority))) + + if obj is not None: + self._weakref = weakref.ref(obj, self) + elif exitpriority is None: + raise ValueError("Without object, exitpriority cannot be None") + + self._callback = callback + self._args = args + self._kwargs = kwargs or {} + self._key = (exitpriority, next(_finalizer_counter)) + self._pid = os.getpid() + + _finalizer_registry[self._key] = self + + def __call__(self, wr=None, + # Need to bind these locally because the globals can have + # been cleared at shutdown + _finalizer_registry=_finalizer_registry, + sub_debug=sub_debug, getpid=os.getpid): + ''' + Run the callback unless it has already been called or cancelled + ''' + try: + del _finalizer_registry[self._key] + except KeyError: + sub_debug('finalizer no longer registered') + else: + if self._pid != getpid(): + sub_debug('finalizer ignored because different process') + res = None + else: + sub_debug('finalizer calling %s with args %s and kwargs %s', + self._callback, self._args, self._kwargs) + res = self._callback(*self._args, **self._kwargs) + self._weakref = self._callback = self._args = \ + self._kwargs = self._key = None + return res + + def cancel(self): + ''' + Cancel finalization of the object + ''' + try: + del _finalizer_registry[self._key] + except KeyError: + pass + else: + self._weakref = self._callback = self._args = \ + self._kwargs = self._key = None + + def still_active(self): + ''' + Return whether this finalizer is still waiting to invoke callback + ''' + return self._key in _finalizer_registry + + def __repr__(self): + try: + obj = self._weakref() + except (AttributeError, TypeError): + obj = None + + if obj is None: + return '<%s object, dead>' % self.__class__.__name__ + + x = '<%s object, callback=%s' % ( + self.__class__.__name__, + getattr(self._callback, '__name__', self._callback)) + if self._args: + x += ', args=' + str(self._args) + if self._kwargs: + x += ', kwargs=' + str(self._kwargs) + if self._key[0] is not None: + x += ', exitpriority=' + str(self._key[0]) + return x + '>' + + +def _run_finalizers(minpriority=None): + ''' + Run all finalizers whose exit priority is not None and at least minpriority + + Finalizers with highest priority are called first; finalizers with + the same priority will be called in reverse order of creation. + ''' + if _finalizer_registry is None: + # This function may be called after this module's globals are + # destroyed. See the _exit_function function in this module for more + # notes. + return + + if minpriority is None: + f = lambda p : p[0] is not None + else: + f = lambda p : p[0] is not None and p[0] >= minpriority + + # Careful: _finalizer_registry may be mutated while this function + # is running (either by a GC run or by another thread). + + # list(_finalizer_registry) should be atomic, while + # list(_finalizer_registry.items()) is not. + keys = [key for key in list(_finalizer_registry) if f(key)] + keys.sort(reverse=True) + + for key in keys: + finalizer = _finalizer_registry.get(key) + # key may have been removed from the registry + if finalizer is not None: + sub_debug('calling %s', finalizer) + try: + finalizer() + except Exception: + import traceback + traceback.print_exc() + + if minpriority is None: + _finalizer_registry.clear() + +# +# Clean up on exit +# + +def is_exiting(): + ''' + Returns true if the process is shutting down + ''' + return _exiting or _exiting is None + +_exiting = False + +def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, + active_children=process.active_children, + current_process=process.current_process): + # We hold on to references to functions in the arglist due to the + # situation described below, where this function is called after this + # module's globals are destroyed. + + global _exiting + + if not _exiting: + _exiting = True + + info('process shutting down') + debug('running all "atexit" finalizers with priority >= 0') + _run_finalizers(0) + + if current_process() is not None: + # We check if the current process is None here because if + # it's None, any call to ``active_children()`` will raise + # an AttributeError (active_children winds up trying to + # get attributes from util._current_process). One + # situation where this can happen is if someone has + # manipulated sys.modules, causing this module to be + # garbage collected. The destructor for the module type + # then replaces all values in the module dict with None. + # For instance, after setuptools runs a test it replaces + # sys.modules with a copy created earlier. See issues + # #9775 and #15881. Also related: #4106, #9205, and + # #9207. + + for p in active_children(): + if p.daemon: + info('calling terminate() for daemon %s', p.name) + p._popen.terminate() + + for p in active_children(): + info('calling join() for process %s', p.name) + p.join() + + debug('running the remaining "atexit" finalizers') + _run_finalizers() + +atexit.register(_exit_function) + +# +# Some fork aware types +# + +class ForkAwareThreadLock(object): + def __init__(self): + self._lock = threading.Lock() + self.acquire = self._lock.acquire + self.release = self._lock.release + register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) + + def _at_fork_reinit(self): + self._lock._at_fork_reinit() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + +class ForkAwareLocal(threading.local): + def __init__(self): + register_after_fork(self, lambda obj : obj.__dict__.clear()) + def __reduce__(self): + return type(self), () + +# +# Close fds except those specified +# + +try: + MAXFD = os.sysconf("SC_OPEN_MAX") +except Exception: + MAXFD = 256 + +def close_all_fds_except(fds): + fds = list(fds) + [-1, MAXFD] + fds.sort() + assert fds[-1] == MAXFD, 'fd too large' + for i in range(len(fds) - 1): + os.closerange(fds[i]+1, fds[i+1]) +# +# Close sys.stdin and replace stdin with os.devnull +# + +def _close_stdin(): + if sys.stdin is None: + return + + try: + sys.stdin.close() + except (OSError, ValueError): + pass + + try: + fd = os.open(os.devnull, os.O_RDONLY) + try: + sys.stdin = open(fd, encoding="utf-8", closefd=False) + except: + os.close(fd) + raise + except (OSError, ValueError): + pass + +# +# Flush standard streams, if any +# + +def _flush_std_streams(): + try: + sys.stdout.flush() + except (AttributeError, ValueError): + pass + try: + sys.stderr.flush() + except (AttributeError, ValueError): + pass + +# +# Start a program with only specified fds kept open +# + +def spawnv_passfds(path, args, passfds): + import _posixsubprocess + passfds = tuple(sorted(map(int, passfds))) + errpipe_read, errpipe_write = os.pipe() + try: + return _posixsubprocess.fork_exec( + args, [path], True, passfds, None, None, + -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, + False, False, -1, None, None, None, -1, None) + finally: + os.close(errpipe_read) + os.close(errpipe_write) + + +def close_fds(*fds): + """Close each file descriptor given as an argument""" + for fd in fds: + os.close(fd) + + +def _cleanup_tests(): + """Cleanup multiprocessing resources when multiprocessing tests + completed.""" + + from test import support + + # cleanup multiprocessing + process._cleanup() + + # Stop the ForkServer process if it's running + from multiprocessing import forkserver + forkserver._forkserver._stop() + + # Stop the ResourceTracker process if it's running + from multiprocessing import resource_tracker + resource_tracker._resource_tracker._stop() + + # bpo-37421: Explicitly call _run_finalizers() to remove immediately + # temporary directories created by multiprocessing.util.get_temp_dir(). + _run_finalizers() + support.gc_collect() + + support.reap_children() diff --git a/Python313_13_x86_Template/Lib/netrc.py b/Python314_4_x86_Template/Lib/netrc.py similarity index 100% rename from Python313_13_x86_Template/Lib/netrc.py rename to Python314_4_x86_Template/Lib/netrc.py diff --git a/Python313_13_x86_Template/Lib/ntpath.py b/Python314_4_x86_Template/Lib/ntpath.py similarity index 100% rename from Python313_13_x86_Template/Lib/ntpath.py rename to Python314_4_x86_Template/Lib/ntpath.py diff --git a/Python314_4_x86_Template/Lib/nturl2path.py b/Python314_4_x86_Template/Lib/nturl2path.py new file mode 100644 index 00000000..57c7858d --- /dev/null +++ b/Python314_4_x86_Template/Lib/nturl2path.py @@ -0,0 +1,74 @@ +"""Convert a NT pathname to a file URL and vice versa. + +This module only exists to provide OS-specific code +for urllib.requests, thus do not use directly. +""" +# Testing is done through test_nturl2path. + +import warnings + + +warnings._deprecated( + __name__, + message=f"{warnings._DEPRECATED_MSG}; use 'urllib.request' instead", + remove=(3, 19)) + +def url2pathname(url): + """OS-specific conversion from a relative URL of the 'file' scheme + to a file system path; not recommended for general use.""" + # e.g. + # ///C|/foo/bar/spam.foo + # and + # ///C:/foo/bar/spam.foo + # become + # C:\foo\bar\spam.foo + import urllib.parse + if url[:3] == '///': + # URL has an empty authority section, so the path begins on the third + # character. + url = url[2:] + elif url[:12] == '//localhost/': + # Skip past 'localhost' authority. + url = url[11:] + if url[:3] == '///': + # Skip past extra slash before UNC drive in URL path. + url = url[1:] + else: + if url[:1] == '/' and url[2:3] in (':', '|'): + # Skip past extra slash before DOS drive in URL path. + url = url[1:] + if url[1:2] == '|': + # Older URLs use a pipe after a drive letter + url = url[:1] + ':' + url[2:] + return urllib.parse.unquote(url.replace('/', '\\')) + +def pathname2url(p): + """OS-specific conversion from a file system path to a relative URL + of the 'file' scheme; not recommended for general use.""" + # e.g. + # C:\foo\bar\spam.foo + # becomes + # ///C:/foo/bar/spam.foo + import ntpath + import urllib.parse + # First, clean up some special forms. We are going to sacrifice + # the additional information anyway + p = p.replace('\\', '/') + if p[:4] == '//?/': + p = p[4:] + if p[:4].upper() == 'UNC/': + p = '//' + p[4:] + drive, root, tail = ntpath.splitroot(p) + if drive: + if drive[1:] == ':': + # DOS drive specified. Add three slashes to the start, producing + # an authority section with a zero-length authority, and a path + # section starting with a single slash. + drive = f'///{drive}' + drive = urllib.parse.quote(drive, safe='/:') + elif root: + # Add explicitly empty authority to path beginning with one slash. + root = f'//{root}' + + tail = urllib.parse.quote(tail) + return drive + root + tail diff --git a/Python313_13_x86_Template/Lib/numbers.py b/Python314_4_x86_Template/Lib/numbers.py similarity index 100% rename from Python313_13_x86_Template/Lib/numbers.py rename to Python314_4_x86_Template/Lib/numbers.py diff --git a/Python314_4_x86_Template/Lib/opcode.py b/Python314_4_x86_Template/Lib/opcode.py new file mode 100644 index 00000000..0e9520b6 --- /dev/null +++ b/Python314_4_x86_Template/Lib/opcode.py @@ -0,0 +1,122 @@ + +""" +opcode module - potentially shared between dis and other modules which +operate on bytecodes (e.g. peephole optimizers). +""" + + +__all__ = ["cmp_op", "stack_effect", "hascompare", "opname", "opmap", + "HAVE_ARGUMENT", "EXTENDED_ARG", "hasarg", "hasconst", "hasname", + "hasjump", "hasjrel", "hasjabs", "hasfree", "haslocal", "hasexc"] + +import builtins +import _opcode +from _opcode import stack_effect + +from _opcode_metadata import (_specializations, _specialized_opmap, opmap, # noqa: F401 + HAVE_ARGUMENT, MIN_INSTRUMENTED_OPCODE) # noqa: F401 +EXTENDED_ARG = opmap['EXTENDED_ARG'] + +opname = ['<%r>' % (op,) for op in range(max(opmap.values()) + 1)] +for m in (opmap, _specialized_opmap): + for op, i in m.items(): + opname[i] = op + +cmp_op = ('<', '<=', '==', '!=', '>', '>=') + +# These lists are documented as part of the dis module's API +hasarg = [op for op in opmap.values() if _opcode.has_arg(op)] +hasconst = [op for op in opmap.values() if _opcode.has_const(op)] +hasname = [op for op in opmap.values() if _opcode.has_name(op)] +hasjump = [op for op in opmap.values() if _opcode.has_jump(op)] +hasjrel = hasjump # for backward compatibility +hasjabs = [] +hasfree = [op for op in opmap.values() if _opcode.has_free(op)] +haslocal = [op for op in opmap.values() if _opcode.has_local(op)] +hasexc = [op for op in opmap.values() if _opcode.has_exc(op)] + + +_intrinsic_1_descs = _opcode.get_intrinsic1_descs() +_intrinsic_2_descs = _opcode.get_intrinsic2_descs() +_special_method_names = _opcode.get_special_method_names() +_common_constants = [builtins.AssertionError, builtins.NotImplementedError, + builtins.tuple, builtins.all, builtins.any] +_nb_ops = _opcode.get_nb_ops() + +hascompare = [opmap["COMPARE_OP"]] + +_cache_format = { + "LOAD_GLOBAL": { + "counter": 1, + "index": 1, + "module_keys_version": 1, + "builtin_keys_version": 1, + }, + "BINARY_OP": { + "counter": 1, + "descr": 4, + }, + "UNPACK_SEQUENCE": { + "counter": 1, + }, + "COMPARE_OP": { + "counter": 1, + }, + "CONTAINS_OP": { + "counter": 1, + }, + "FOR_ITER": { + "counter": 1, + }, + "LOAD_SUPER_ATTR": { + "counter": 1, + }, + "LOAD_ATTR": { + "counter": 1, + "version": 2, + "keys_version": 2, + "descr": 4, + }, + "STORE_ATTR": { + "counter": 1, + "version": 2, + "index": 1, + }, + "CALL": { + "counter": 1, + "func_version": 2, + }, + "CALL_KW": { + "counter": 1, + "func_version": 2, + }, + "STORE_SUBSCR": { + "counter": 1, + }, + "SEND": { + "counter": 1, + }, + "JUMP_BACKWARD": { + "counter": 1, + }, + "TO_BOOL": { + "counter": 1, + "version": 2, + }, + "POP_JUMP_IF_TRUE": { + "counter": 1, + }, + "POP_JUMP_IF_FALSE": { + "counter": 1, + }, + "POP_JUMP_IF_NONE": { + "counter": 1, + }, + "POP_JUMP_IF_NOT_NONE": { + "counter": 1, + }, +} + +_inline_cache_entries = { + name : sum(value.values()) for (name, value) in _cache_format.items() +} diff --git a/Python314_4_x86_Template/Lib/operator.py b/Python314_4_x86_Template/Lib/operator.py new file mode 100644 index 00000000..1b765522 --- /dev/null +++ b/Python314_4_x86_Template/Lib/operator.py @@ -0,0 +1,475 @@ +""" +Operator Interface + +This module exports a set of functions corresponding to the intrinsic +operators of Python. For example, operator.add(x, y) is equivalent +to the expression x+y. The function names are those used for special +methods; variants without leading and trailing '__' are also provided +for convenience. + +This is the pure Python implementation of the module. +""" + +__all__ = ['abs', 'add', 'and_', 'attrgetter', 'call', 'concat', 'contains', 'countOf', + 'delitem', 'eq', 'floordiv', 'ge', 'getitem', 'gt', 'iadd', 'iand', + 'iconcat', 'ifloordiv', 'ilshift', 'imatmul', 'imod', 'imul', + 'index', 'indexOf', 'inv', 'invert', 'ior', 'ipow', 'irshift', + 'is_', 'is_none', 'is_not', 'is_not_none', 'isub', 'itemgetter', 'itruediv', + 'ixor', 'le', 'length_hint', 'lshift', 'lt', 'matmul', 'methodcaller', 'mod', + 'mul', 'ne', 'neg', 'not_', 'or_', 'pos', 'pow', 'rshift', + 'setitem', 'sub', 'truediv', 'truth', 'xor'] + +from builtins import abs as _abs + + +# Comparison Operations *******************************************************# + +def lt(a, b): + "Same as a < b." + return a < b + +def le(a, b): + "Same as a <= b." + return a <= b + +def eq(a, b): + "Same as a == b." + return a == b + +def ne(a, b): + "Same as a != b." + return a != b + +def ge(a, b): + "Same as a >= b." + return a >= b + +def gt(a, b): + "Same as a > b." + return a > b + +# Logical Operations **********************************************************# + +def not_(a): + "Same as not a." + return not a + +def truth(a): + "Return True if a is true, False otherwise." + return True if a else False + +def is_(a, b): + "Same as a is b." + return a is b + +def is_not(a, b): + "Same as a is not b." + return a is not b + +def is_none(a): + "Same as a is None." + return a is None + +def is_not_none(a): + "Same as a is not None." + return a is not None + +# Mathematical/Bitwise Operations *********************************************# + +def abs(a): + "Same as abs(a)." + return _abs(a) + +def add(a, b): + "Same as a + b." + return a + b + +def and_(a, b): + "Same as a & b." + return a & b + +def floordiv(a, b): + "Same as a // b." + return a // b + +def index(a): + "Same as a.__index__()." + return a.__index__() + +def inv(a): + "Same as ~a." + return ~a +invert = inv + +def lshift(a, b): + "Same as a << b." + return a << b + +def mod(a, b): + "Same as a % b." + return a % b + +def mul(a, b): + "Same as a * b." + return a * b + +def matmul(a, b): + "Same as a @ b." + return a @ b + +def neg(a): + "Same as -a." + return -a + +def or_(a, b): + "Same as a | b." + return a | b + +def pos(a): + "Same as +a." + return +a + +def pow(a, b): + "Same as a ** b." + return a ** b + +def rshift(a, b): + "Same as a >> b." + return a >> b + +def sub(a, b): + "Same as a - b." + return a - b + +def truediv(a, b): + "Same as a / b." + return a / b + +def xor(a, b): + "Same as a ^ b." + return a ^ b + +# Sequence Operations *********************************************************# + +def concat(a, b): + "Same as a + b, for a and b sequences." + if not hasattr(a, '__getitem__'): + msg = "'%s' object can't be concatenated" % type(a).__name__ + raise TypeError(msg) + return a + b + +def contains(a, b): + "Same as b in a (note reversed operands)." + return b in a + +def countOf(a, b): + "Return the number of items in a which are, or which equal, b." + count = 0 + for i in a: + if i is b or i == b: + count += 1 + return count + +def delitem(a, b): + "Same as del a[b]." + del a[b] + +def getitem(a, b): + "Same as a[b]." + return a[b] + +def indexOf(a, b): + "Return the first index of b in a." + for i, j in enumerate(a): + if j is b or j == b: + return i + else: + raise ValueError('sequence.index(x): x not in sequence') + +def setitem(a, b, c): + "Same as a[b] = c." + a[b] = c + +def length_hint(obj, default=0): + """ + Return an estimate of the number of items in obj. + This is useful for presizing containers when building from an iterable. + + If the object supports len(), the result will be exact. Otherwise, it may + over- or under-estimate by an arbitrary amount. The result will be an + integer >= 0. + """ + if not isinstance(default, int): + msg = ("'%s' object cannot be interpreted as an integer" % + type(default).__name__) + raise TypeError(msg) + + try: + return len(obj) + except TypeError: + pass + + try: + hint = type(obj).__length_hint__ + except AttributeError: + return default + + try: + val = hint(obj) + except TypeError: + return default + if val is NotImplemented: + return default + if not isinstance(val, int): + msg = ('__length_hint__ must be integer, not %s' % + type(val).__name__) + raise TypeError(msg) + if val < 0: + msg = '__length_hint__() should return >= 0' + raise ValueError(msg) + return val + +# Other Operations ************************************************************# + +def call(obj, /, *args, **kwargs): + """Same as obj(*args, **kwargs).""" + return obj(*args, **kwargs) + +# Generalized Lookup Objects **************************************************# + +class attrgetter: + """ + Return a callable object that fetches the given attribute(s) from its operand. + After f = attrgetter('name'), the call f(r) returns r.name. + After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date). + After h = attrgetter('name.first', 'name.last'), the call h(r) returns + (r.name.first, r.name.last). + """ + __slots__ = ('_attrs', '_call') + + def __init__(self, attr, /, *attrs): + if not attrs: + if not isinstance(attr, str): + raise TypeError('attribute name must be a string') + self._attrs = (attr,) + names = attr.split('.') + def func(obj): + for name in names: + obj = getattr(obj, name) + return obj + self._call = func + else: + self._attrs = (attr,) + attrs + getters = tuple(map(attrgetter, self._attrs)) + def func(obj): + return tuple(getter(obj) for getter in getters) + self._call = func + + def __call__(self, obj, /): + return self._call(obj) + + def __repr__(self): + return '%s.%s(%s)' % (self.__class__.__module__, + self.__class__.__qualname__, + ', '.join(map(repr, self._attrs))) + + def __reduce__(self): + return self.__class__, self._attrs + +class itemgetter: + """ + Return a callable object that fetches the given item(s) from its operand. + After f = itemgetter(2), the call f(r) returns r[2]. + After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]) + """ + __slots__ = ('_items', '_call') + + def __init__(self, item, /, *items): + if not items: + self._items = (item,) + def func(obj): + return obj[item] + self._call = func + else: + self._items = items = (item,) + items + def func(obj): + return tuple(obj[i] for i in items) + self._call = func + + def __call__(self, obj, /): + return self._call(obj) + + def __repr__(self): + return '%s.%s(%s)' % (self.__class__.__module__, + self.__class__.__name__, + ', '.join(map(repr, self._items))) + + def __reduce__(self): + return self.__class__, self._items + +class methodcaller: + """ + Return a callable object that calls the given method on its operand. + After f = methodcaller('name'), the call f(r) returns r.name(). + After g = methodcaller('name', 'date', foo=1), the call g(r) returns + r.name('date', foo=1). + """ + __slots__ = ('_name', '_args', '_kwargs') + + def __init__(self, name, /, *args, **kwargs): + self._name = name + if not isinstance(self._name, str): + raise TypeError('method name must be a string') + self._args = args + self._kwargs = kwargs + + def __call__(self, obj, /): + return getattr(obj, self._name)(*self._args, **self._kwargs) + + def __repr__(self): + args = [repr(self._name)] + args.extend(map(repr, self._args)) + args.extend('%s=%r' % (k, v) for k, v in self._kwargs.items()) + return '%s.%s(%s)' % (self.__class__.__module__, + self.__class__.__name__, + ', '.join(args)) + + def __reduce__(self): + if not self._kwargs: + return self.__class__, (self._name,) + self._args + else: + from functools import partial + return partial(self.__class__, self._name, **self._kwargs), self._args + + +# In-place Operations *********************************************************# + +def iadd(a, b): + "Same as a += b." + a += b + return a + +def iand(a, b): + "Same as a &= b." + a &= b + return a + +def iconcat(a, b): + "Same as a += b, for a and b sequences." + if not hasattr(a, '__getitem__'): + msg = "'%s' object can't be concatenated" % type(a).__name__ + raise TypeError(msg) + a += b + return a + +def ifloordiv(a, b): + "Same as a //= b." + a //= b + return a + +def ilshift(a, b): + "Same as a <<= b." + a <<= b + return a + +def imod(a, b): + "Same as a %= b." + a %= b + return a + +def imul(a, b): + "Same as a *= b." + a *= b + return a + +def imatmul(a, b): + "Same as a @= b." + a @= b + return a + +def ior(a, b): + "Same as a |= b." + a |= b + return a + +def ipow(a, b): + "Same as a **= b." + a **=b + return a + +def irshift(a, b): + "Same as a >>= b." + a >>= b + return a + +def isub(a, b): + "Same as a -= b." + a -= b + return a + +def itruediv(a, b): + "Same as a /= b." + a /= b + return a + +def ixor(a, b): + "Same as a ^= b." + a ^= b + return a + + +try: + from _operator import * +except ImportError: + pass +else: + from _operator import __doc__ # noqa: F401 + +# All of these "__func__ = func" assignments have to happen after importing +# from _operator to make sure they're set to the right function +__lt__ = lt +__le__ = le +__eq__ = eq +__ne__ = ne +__ge__ = ge +__gt__ = gt +__not__ = not_ +__abs__ = abs +__add__ = add +__and__ = and_ +__call__ = call +__floordiv__ = floordiv +__index__ = index +__inv__ = inv +__invert__ = invert +__lshift__ = lshift +__mod__ = mod +__mul__ = mul +__matmul__ = matmul +__neg__ = neg +__or__ = or_ +__pos__ = pos +__pow__ = pow +__rshift__ = rshift +__sub__ = sub +__truediv__ = truediv +__xor__ = xor +__concat__ = concat +__contains__ = contains +__delitem__ = delitem +__getitem__ = getitem +__setitem__ = setitem +__iadd__ = iadd +__iand__ = iand +__iconcat__ = iconcat +__ifloordiv__ = ifloordiv +__ilshift__ = ilshift +__imod__ = imod +__imul__ = imul +__imatmul__ = imatmul +__ior__ = ior +__ipow__ = ipow +__irshift__ = irshift +__isub__ = isub +__itruediv__ = itruediv +__ixor__ = ixor diff --git a/Python314_4_x86_Template/Lib/optparse.py b/Python314_4_x86_Template/Lib/optparse.py new file mode 100644 index 00000000..38cf16d2 --- /dev/null +++ b/Python314_4_x86_Template/Lib/optparse.py @@ -0,0 +1,1671 @@ +"""A powerful, extensible, and easy-to-use option parser. + +By Greg Ward + +Originally distributed as Optik. + +For support, use the optik-users@lists.sourceforge.net mailing list +(http://lists.sourceforge.net/lists/listinfo/optik-users). + +Simple usage example: + + from optparse import OptionParser + + parser = OptionParser() + parser.add_option("-f", "--file", dest="filename", + help="write report to FILE", metavar="FILE") + parser.add_option("-q", "--quiet", + action="store_false", dest="verbose", default=True, + help="don't print status messages to stdout") + + (options, args) = parser.parse_args() +""" + +__version__ = "1.5.3" + +__all__ = ['Option', + 'make_option', + 'SUPPRESS_HELP', + 'SUPPRESS_USAGE', + 'Values', + 'OptionContainer', + 'OptionGroup', + 'OptionParser', + 'HelpFormatter', + 'IndentedHelpFormatter', + 'TitledHelpFormatter', + 'OptParseError', + 'OptionError', + 'OptionConflictError', + 'OptionValueError', + 'BadOptionError', + 'check_choice'] + +__copyright__ = """ +Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved. +Copyright (c) 2002 Python Software Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import sys, os +from gettext import gettext as _, ngettext + + +def _repr(self): + return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self) + + +# This file was generated from: +# Id: option_parser.py 527 2006-07-23 15:21:30Z greg +# Id: option.py 522 2006-06-11 16:22:03Z gward +# Id: help.py 527 2006-07-23 15:21:30Z greg +# Id: errors.py 509 2006-04-20 00:58:24Z gward + + +class OptParseError (Exception): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + + +class OptionError (OptParseError): + """ + Raised if an Option instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + +class OptionConflictError (OptionError): + """ + Raised if conflicting options are added to an OptionParser. + """ + +class OptionValueError (OptParseError): + """ + Raised if an invalid option value is encountered on the command + line. + """ + +class BadOptionError (OptParseError): + """ + Raised if an invalid option is seen on the command line. + """ + def __init__(self, opt_str): + self.opt_str = opt_str + + def __str__(self): + return _("no such option: %s") % self.opt_str + +class AmbiguousOptionError (BadOptionError): + """ + Raised if an ambiguous option is seen on the command line. + """ + def __init__(self, opt_str, possibilities): + BadOptionError.__init__(self, opt_str) + self.possibilities = possibilities + + def __str__(self): + return (_("ambiguous option: %s (%s?)") + % (self.opt_str, ", ".join(self.possibilities))) + + +class HelpFormatter: + + """ + Abstract base class for formatting option help. OptionParser + instances should use one of the HelpFormatter subclasses for + formatting help; by default IndentedHelpFormatter is used. + + Instance attributes: + parser : OptionParser + the controlling OptionParser instance + indent_increment : int + the number of columns to indent per nesting level + max_help_position : int + the maximum starting column for option help text + help_position : int + the calculated starting column for option help text; + initially the same as the maximum + width : int + total number of columns for output (pass None to constructor for + this value to be taken from the $COLUMNS environment variable) + level : int + current indentation level + current_indent : int + current indentation level (in columns) + help_width : int + number of columns available for option help text (calculated) + default_tag : str + text to replace with each option's default value, "%default" + by default. Set to false value to disable default value expansion. + option_strings : { Option : str } + maps Option instances to the snippet of help text explaining + the syntax of that option, e.g. "-h, --help" or + "-fFILE, --file=FILE" + _short_opt_fmt : str + format string controlling how short options with values are + printed in help text. Must be either "%s%s" ("-fFILE") or + "%s %s" ("-f FILE"), because those are the two syntaxes that + Optik supports. + _long_opt_fmt : str + similar but for long options; must be either "%s %s" ("--file FILE") + or "%s=%s" ("--file=FILE"). + """ + + NO_DEFAULT_VALUE = "none" + + def __init__(self, + indent_increment, + max_help_position, + width, + short_first): + self.parser = None + self.indent_increment = indent_increment + if width is None: + try: + width = int(os.environ['COLUMNS']) + except (KeyError, ValueError): + width = 80 + width -= 2 + self.width = width + self.help_position = self.max_help_position = \ + min(max_help_position, max(width - 20, indent_increment * 2)) + self.current_indent = 0 + self.level = 0 + self.help_width = None # computed later + self.short_first = short_first + self.default_tag = "%default" + self.option_strings = {} + self._short_opt_fmt = "%s %s" + self._long_opt_fmt = "%s=%s" + + def set_parser(self, parser): + self.parser = parser + + def set_short_opt_delimiter(self, delim): + if delim not in ("", " "): + raise ValueError( + "invalid metavar delimiter for short options: %r" % delim) + self._short_opt_fmt = "%s" + delim + "%s" + + def set_long_opt_delimiter(self, delim): + if delim not in ("=", " "): + raise ValueError( + "invalid metavar delimiter for long options: %r" % delim) + self._long_opt_fmt = "%s" + delim + "%s" + + def indent(self): + self.current_indent += self.indent_increment + self.level += 1 + + def dedent(self): + self.current_indent -= self.indent_increment + assert self.current_indent >= 0, "Indent decreased below 0." + self.level -= 1 + + def format_usage(self, usage): + raise NotImplementedError("subclasses must implement") + + def format_heading(self, heading): + raise NotImplementedError("subclasses must implement") + + def _format_text(self, text): + """ + Format a paragraph of free-form text for inclusion in the + help output at the current indentation level. + """ + import textwrap + text_width = max(self.width - self.current_indent, 11) + indent = " "*self.current_indent + return textwrap.fill(text, + text_width, + initial_indent=indent, + subsequent_indent=indent) + + def format_description(self, description): + if description: + return self._format_text(description) + "\n" + else: + return "" + + def format_epilog(self, epilog): + if epilog: + return "\n" + self._format_text(epilog) + "\n" + else: + return "" + + + def expand_default(self, option): + if self.parser is None or not self.default_tag: + return option.help + + default_value = self.parser.defaults.get(option.dest) + if default_value is NO_DEFAULT or default_value is None: + default_value = self.NO_DEFAULT_VALUE + + return option.help.replace(self.default_tag, str(default_value)) + + def format_option(self, option): + # The help for each option consists of two parts: + # * the opt strings and metavars + # eg. ("-x", or "-fFILENAME, --file=FILENAME") + # * the user-supplied help string + # eg. ("turn on expert mode", "read data from FILENAME") + # + # If possible, we write both of these on the same line: + # -x turn on expert mode + # + # But if the opt string list is too long, we put the help + # string on a second line, indented to the same column it would + # start in if it fit on the first line. + # -fFILENAME, --file=FILENAME + # read data from FILENAME + result = [] + opts = self.option_strings[option] + opt_width = self.help_position - self.current_indent - 2 + if len(opts) > opt_width: + opts = "%*s%s\n" % (self.current_indent, "", opts) + indent_first = self.help_position + else: # start help on same line as opts + opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts) + indent_first = 0 + result.append(opts) + if option.help: + import textwrap + help_text = self.expand_default(option) + help_lines = textwrap.wrap(help_text, self.help_width) + result.append("%*s%s\n" % (indent_first, "", help_lines[0])) + result.extend(["%*s%s\n" % (self.help_position, "", line) + for line in help_lines[1:]]) + elif opts[-1] != "\n": + result.append("\n") + return "".join(result) + + def store_option_strings(self, parser): + self.indent() + max_len = 0 + for opt in parser.option_list: + strings = self.format_option_strings(opt) + self.option_strings[opt] = strings + max_len = max(max_len, len(strings) + self.current_indent) + self.indent() + for group in parser.option_groups: + for opt in group.option_list: + strings = self.format_option_strings(opt) + self.option_strings[opt] = strings + max_len = max(max_len, len(strings) + self.current_indent) + self.dedent() + self.dedent() + self.help_position = min(max_len + 2, self.max_help_position) + self.help_width = max(self.width - self.help_position, 11) + + def format_option_strings(self, option): + """Return a comma-separated list of option strings & metavariables.""" + if option.takes_value(): + metavar = option.metavar or option.dest.upper() + short_opts = [self._short_opt_fmt % (sopt, metavar) + for sopt in option._short_opts] + long_opts = [self._long_opt_fmt % (lopt, metavar) + for lopt in option._long_opts] + else: + short_opts = option._short_opts + long_opts = option._long_opts + + if self.short_first: + opts = short_opts + long_opts + else: + opts = long_opts + short_opts + + return ", ".join(opts) + +class IndentedHelpFormatter (HelpFormatter): + """Format help with indented section bodies. + """ + + def __init__(self, + indent_increment=2, + max_help_position=24, + width=None, + short_first=1): + HelpFormatter.__init__( + self, indent_increment, max_help_position, width, short_first) + + def format_usage(self, usage): + return _("Usage: %s\n") % usage + + def format_heading(self, heading): + return "%*s%s:\n" % (self.current_indent, "", heading) + + +class TitledHelpFormatter (HelpFormatter): + """Format help with underlined section headers. + """ + + def __init__(self, + indent_increment=0, + max_help_position=24, + width=None, + short_first=0): + HelpFormatter.__init__ ( + self, indent_increment, max_help_position, width, short_first) + + def format_usage(self, usage): + return "%s %s\n" % (self.format_heading(_("Usage")), usage) + + def format_heading(self, heading): + return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading)) + + +def _parse_num(val, type): + if val[:2].lower() == "0x": # hexadecimal + radix = 16 + elif val[:2].lower() == "0b": # binary + radix = 2 + val = val[2:] or "0" # have to remove "0b" prefix + elif val[:1] == "0": # octal + radix = 8 + else: # decimal + radix = 10 + + return type(val, radix) + +def _parse_int(val): + return _parse_num(val, int) + +_builtin_cvt = { "int" : (_parse_int, _("integer")), + "long" : (_parse_int, _("integer")), + "float" : (float, _("floating-point")), + "complex" : (complex, _("complex")) } + +def check_builtin(option, opt, value): + (cvt, what) = _builtin_cvt[option.type] + try: + return cvt(value) + except ValueError: + raise OptionValueError( + _("option %s: invalid %s value: %r") % (opt, what, value)) + +def check_choice(option, opt, value): + if value in option.choices: + return value + else: + choices = ", ".join(map(repr, option.choices)) + raise OptionValueError( + _("option %s: invalid choice: %r (choose from %s)") + % (opt, value, choices)) + +# Not supplying a default is different from a default of None, +# so we need an explicit "not supplied" value. +NO_DEFAULT = ("NO", "DEFAULT") + + +class Option: + """ + Instance attributes: + _short_opts : [string] + _long_opts : [string] + + action : string + type : string + dest : string + default : any + nargs : int + const : any + choices : [string] + callback : function + callback_args : (any*) + callback_kwargs : { string : any } + help : string + metavar : string + """ + + # The list of instance attributes that may be set through + # keyword args to the constructor. + ATTRS = ['action', + 'type', + 'dest', + 'default', + 'nargs', + 'const', + 'choices', + 'callback', + 'callback_args', + 'callback_kwargs', + 'help', + 'metavar'] + + # The set of actions allowed by option parsers. Explicitly listed + # here so the constructor can validate its arguments. + ACTIONS = ("store", + "store_const", + "store_true", + "store_false", + "append", + "append_const", + "count", + "callback", + "help", + "version") + + # The set of actions that involve storing a value somewhere; + # also listed just for constructor argument validation. (If + # the action is one of these, there must be a destination.) + STORE_ACTIONS = ("store", + "store_const", + "store_true", + "store_false", + "append", + "append_const", + "count") + + # The set of actions for which it makes sense to supply a value + # type, ie. which may consume an argument from the command line. + TYPED_ACTIONS = ("store", + "append", + "callback") + + # The set of actions which *require* a value type, ie. that + # always consume an argument from the command line. + ALWAYS_TYPED_ACTIONS = ("store", + "append") + + # The set of actions which take a 'const' attribute. + CONST_ACTIONS = ("store_const", + "append_const") + + # The set of known types for option parsers. Again, listed here for + # constructor argument validation. + TYPES = ("string", "int", "long", "float", "complex", "choice") + + # Dictionary of argument checking functions, which convert and + # validate option arguments according to the option type. + # + # Signature of checking functions is: + # check(option : Option, opt : string, value : string) -> any + # where + # option is the Option instance calling the checker + # opt is the actual option seen on the command-line + # (eg. "-a", "--file") + # value is the option argument seen on the command-line + # + # The return value should be in the appropriate Python type + # for option.type -- eg. an integer if option.type == "int". + # + # If no checker is defined for a type, arguments will be + # unchecked and remain strings. + TYPE_CHECKER = { "int" : check_builtin, + "long" : check_builtin, + "float" : check_builtin, + "complex": check_builtin, + "choice" : check_choice, + } + + + # CHECK_METHODS is a list of unbound method objects; they are called + # by the constructor, in order, after all attributes are + # initialized. The list is created and filled in later, after all + # the methods are actually defined. (I just put it here because I + # like to define and document all class attributes in the same + # place.) Subclasses that add another _check_*() method should + # define their own CHECK_METHODS list that adds their check method + # to those from this class. + CHECK_METHODS = None + + + # -- Constructor/initialization methods ---------------------------- + + def __init__(self, *opts, **attrs): + # Set _short_opts, _long_opts attrs from 'opts' tuple. + # Have to be set now, in case no option strings are supplied. + self._short_opts = [] + self._long_opts = [] + opts = self._check_opt_strings(opts) + self._set_opt_strings(opts) + + # Set all other attrs (action, type, etc.) from 'attrs' dict + self._set_attrs(attrs) + + # Check all the attributes we just set. There are lots of + # complicated interdependencies, but luckily they can be farmed + # out to the _check_*() methods listed in CHECK_METHODS -- which + # could be handy for subclasses! The one thing these all share + # is that they raise OptionError if they discover a problem. + for checker in self.CHECK_METHODS: + checker(self) + + def _check_opt_strings(self, opts): + # Filter out None because early versions of Optik had exactly + # one short option and one long option, either of which + # could be None. + opts = [opt for opt in opts if opt] + if not opts: + raise TypeError("at least one option string must be supplied") + return opts + + def _set_opt_strings(self, opts): + for opt in opts: + if len(opt) < 2: + raise OptionError( + "invalid option string %r: " + "must be at least two characters long" % opt, self) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise OptionError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise OptionError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self) + self._long_opts.append(opt) + + def _set_attrs(self, attrs): + for attr in self.ATTRS: + if attr in attrs: + setattr(self, attr, attrs[attr]) + del attrs[attr] + else: + if attr == 'default': + setattr(self, attr, NO_DEFAULT) + else: + setattr(self, attr, None) + if attrs: + attrs = sorted(attrs.keys()) + raise OptionError( + "invalid keyword arguments: %s" % ", ".join(attrs), + self) + + + # -- Constructor validation methods -------------------------------- + + def _check_action(self): + if self.action is None: + self.action = "store" + elif self.action not in self.ACTIONS: + raise OptionError("invalid action: %r" % self.action, self) + + def _check_type(self): + if self.type is None: + if self.action in self.ALWAYS_TYPED_ACTIONS: + if self.choices is not None: + # The "choices" attribute implies "choice" type. + self.type = "choice" + else: + # No type given? "string" is the most sensible default. + self.type = "string" + else: + # Allow type objects or builtin type conversion functions + # (int, str, etc.) as an alternative to their names. + if isinstance(self.type, type): + self.type = self.type.__name__ + + if self.type == "str": + self.type = "string" + + if self.type not in self.TYPES: + raise OptionError("invalid option type: %r" % self.type, self) + if self.action not in self.TYPED_ACTIONS: + raise OptionError( + "must not supply a type for action %r" % self.action, self) + + def _check_choice(self): + if self.type == "choice": + if self.choices is None: + raise OptionError( + "must supply a list of choices for type 'choice'", self) + elif not isinstance(self.choices, (tuple, list)): + raise OptionError( + "choices must be a list of strings ('%s' supplied)" + % str(type(self.choices)).split("'")[1], self) + elif self.choices is not None: + raise OptionError( + "must not supply choices for type %r" % self.type, self) + + def _check_dest(self): + # No destination given, and we need one for this action. The + # self.type check is for callbacks that take a value. + takes_value = (self.action in self.STORE_ACTIONS or + self.type is not None) + if self.dest is None and takes_value: + + # Glean a destination from the first long option string, + # or from the first short option string if no long options. + if self._long_opts: + # eg. "--foo-bar" -> "foo_bar" + self.dest = self._long_opts[0][2:].replace('-', '_') + else: + self.dest = self._short_opts[0][1] + + def _check_const(self): + if self.action not in self.CONST_ACTIONS and self.const is not None: + raise OptionError( + "'const' must not be supplied for action %r" % self.action, + self) + + def _check_nargs(self): + if self.action in self.TYPED_ACTIONS: + if self.nargs is None: + self.nargs = 1 + elif self.nargs is not None: + raise OptionError( + "'nargs' must not be supplied for action %r" % self.action, + self) + + def _check_callback(self): + if self.action == "callback": + if not callable(self.callback): + raise OptionError( + "callback not callable: %r" % self.callback, self) + if (self.callback_args is not None and + not isinstance(self.callback_args, tuple)): + raise OptionError( + "callback_args, if supplied, must be a tuple: not %r" + % self.callback_args, self) + if (self.callback_kwargs is not None and + not isinstance(self.callback_kwargs, dict)): + raise OptionError( + "callback_kwargs, if supplied, must be a dict: not %r" + % self.callback_kwargs, self) + else: + if self.callback is not None: + raise OptionError( + "callback supplied (%r) for non-callback option" + % self.callback, self) + if self.callback_args is not None: + raise OptionError( + "callback_args supplied for non-callback option", self) + if self.callback_kwargs is not None: + raise OptionError( + "callback_kwargs supplied for non-callback option", self) + + + CHECK_METHODS = [_check_action, + _check_type, + _check_choice, + _check_dest, + _check_const, + _check_nargs, + _check_callback] + + + # -- Miscellaneous methods ----------------------------------------- + + def __str__(self): + return "/".join(self._short_opts + self._long_opts) + + __repr__ = _repr + + def takes_value(self): + return self.type is not None + + def get_opt_string(self): + if self._long_opts: + return self._long_opts[0] + else: + return self._short_opts[0] + + + # -- Processing methods -------------------------------------------- + + def check_value(self, opt, value): + checker = self.TYPE_CHECKER.get(self.type) + if checker is None: + return value + else: + return checker(self, opt, value) + + def convert_value(self, opt, value): + if value is not None: + if self.nargs == 1: + return self.check_value(opt, value) + else: + return tuple([self.check_value(opt, v) for v in value]) + + def process(self, opt, value, values, parser): + + # First, convert the value(s) to the right type. Howl if any + # value(s) are bogus. + value = self.convert_value(opt, value) + + # And then take whatever action is expected of us. + # This is a separate method to make life easier for + # subclasses to add new actions. + return self.take_action( + self.action, self.dest, opt, value, values, parser) + + def take_action(self, action, dest, opt, value, values, parser): + if action == "store": + setattr(values, dest, value) + elif action == "store_const": + setattr(values, dest, self.const) + elif action == "store_true": + setattr(values, dest, True) + elif action == "store_false": + setattr(values, dest, False) + elif action == "append": + values.ensure_value(dest, []).append(value) + elif action == "append_const": + values.ensure_value(dest, []).append(self.const) + elif action == "count": + setattr(values, dest, values.ensure_value(dest, 0) + 1) + elif action == "callback": + args = self.callback_args or () + kwargs = self.callback_kwargs or {} + self.callback(self, opt, value, parser, *args, **kwargs) + elif action == "help": + parser.print_help() + parser.exit() + elif action == "version": + parser.print_version() + parser.exit() + else: + raise ValueError("unknown action %r" % self.action) + + return 1 + +# class Option + + +SUPPRESS_HELP = "SUPPRESS"+"HELP" +SUPPRESS_USAGE = "SUPPRESS"+"USAGE" + +class Values: + + def __init__(self, defaults=None): + if defaults: + for (attr, val) in defaults.items(): + setattr(self, attr, val) + + def __str__(self): + return str(self.__dict__) + + __repr__ = _repr + + def __eq__(self, other): + if isinstance(other, Values): + return self.__dict__ == other.__dict__ + elif isinstance(other, dict): + return self.__dict__ == other + else: + return NotImplemented + + def _update_careful(self, dict): + """ + Update the option values from an arbitrary dictionary, but only + use keys from dict that already have a corresponding attribute + in self. Any keys in dict without a corresponding attribute + are silently ignored. + """ + for attr in dir(self): + if attr in dict: + dval = dict[attr] + if dval is not None: + setattr(self, attr, dval) + + def _update_loose(self, dict): + """ + Update the option values from an arbitrary dictionary, + using all keys from the dictionary regardless of whether + they have a corresponding attribute in self or not. + """ + self.__dict__.update(dict) + + def _update(self, dict, mode): + if mode == "careful": + self._update_careful(dict) + elif mode == "loose": + self._update_loose(dict) + else: + raise ValueError("invalid update mode: %r" % mode) + + def read_module(self, modname, mode="careful"): + __import__(modname) + mod = sys.modules[modname] + self._update(vars(mod), mode) + + def read_file(self, filename, mode="careful"): + vars = {} + exec(open(filename).read(), vars) + self._update(vars, mode) + + def ensure_value(self, attr, value): + if not hasattr(self, attr) or getattr(self, attr) is None: + setattr(self, attr, value) + return getattr(self, attr) + + +class OptionContainer: + + """ + Abstract base class. + + Class attributes: + standard_option_list : [Option] + list of standard options that will be accepted by all instances + of this parser class (intended to be overridden by subclasses). + + Instance attributes: + option_list : [Option] + the list of Option objects contained by this OptionContainer + _short_opt : { string : Option } + dictionary mapping short option strings, eg. "-f" or "-X", + to the Option instances that implement them. If an Option + has multiple short option strings, it will appear in this + dictionary multiple times. [1] + _long_opt : { string : Option } + dictionary mapping long option strings, eg. "--file" or + "--exclude", to the Option instances that implement them. + Again, a given Option can occur multiple times in this + dictionary. [1] + defaults : { string : any } + dictionary mapping option destination names to default + values for each destination [1] + + [1] These mappings are common to (shared by) all components of the + controlling OptionParser, where they are initially created. + + """ + + def __init__(self, option_class, conflict_handler, description): + # Initialize the option list and related data structures. + # This method must be provided by subclasses, and it must + # initialize at least the following instance attributes: + # option_list, _short_opt, _long_opt, defaults. + self._create_option_list() + + self.option_class = option_class + self.set_conflict_handler(conflict_handler) + self.set_description(description) + + def _create_option_mappings(self): + # For use by OptionParser constructor -- create the main + # option mappings used by this OptionParser and all + # OptionGroups that it owns. + self._short_opt = {} # single letter -> Option instance + self._long_opt = {} # long option -> Option instance + self.defaults = {} # maps option dest -> default value + + + def _share_option_mappings(self, parser): + # For use by OptionGroup constructor -- use shared option + # mappings from the OptionParser that owns this OptionGroup. + self._short_opt = parser._short_opt + self._long_opt = parser._long_opt + self.defaults = parser.defaults + + def set_conflict_handler(self, handler): + if handler not in ("error", "resolve"): + raise ValueError("invalid conflict_resolution value %r" % handler) + self.conflict_handler = handler + + def set_description(self, description): + self.description = description + + def get_description(self): + return self.description + + + def destroy(self): + """see OptionParser.destroy().""" + del self._short_opt + del self._long_opt + del self.defaults + + + # -- Option-adding methods ----------------------------------------- + + def _check_conflict(self, option): + conflict_opts = [] + for opt in option._short_opts: + if opt in self._short_opt: + conflict_opts.append((opt, self._short_opt[opt])) + for opt in option._long_opts: + if opt in self._long_opt: + conflict_opts.append((opt, self._long_opt[opt])) + + if conflict_opts: + handler = self.conflict_handler + if handler == "error": + raise OptionConflictError( + "conflicting option string(s): %s" + % ", ".join([co[0] for co in conflict_opts]), + option) + elif handler == "resolve": + for (opt, c_option) in conflict_opts: + if opt.startswith("--"): + c_option._long_opts.remove(opt) + del self._long_opt[opt] + else: + c_option._short_opts.remove(opt) + del self._short_opt[opt] + if not (c_option._short_opts or c_option._long_opts): + c_option.container.option_list.remove(c_option) + + def add_option(self, *args, **kwargs): + """add_option(Option) + add_option(opt_str, ..., kwarg=val, ...) + """ + if isinstance(args[0], str): + option = self.option_class(*args, **kwargs) + elif len(args) == 1 and not kwargs: + option = args[0] + if not isinstance(option, Option): + raise TypeError("not an Option instance: %r" % option) + else: + raise TypeError("invalid arguments") + + self._check_conflict(option) + + self.option_list.append(option) + option.container = self + for opt in option._short_opts: + self._short_opt[opt] = option + for opt in option._long_opts: + self._long_opt[opt] = option + + if option.dest is not None: # option has a dest, we need a default + if option.default is not NO_DEFAULT: + self.defaults[option.dest] = option.default + elif option.dest not in self.defaults: + self.defaults[option.dest] = None + + return option + + def add_options(self, option_list): + for option in option_list: + self.add_option(option) + + # -- Option query/removal methods ---------------------------------- + + def get_option(self, opt_str): + return (self._short_opt.get(opt_str) or + self._long_opt.get(opt_str)) + + def has_option(self, opt_str): + return (opt_str in self._short_opt or + opt_str in self._long_opt) + + def remove_option(self, opt_str): + option = self._short_opt.get(opt_str) + if option is None: + option = self._long_opt.get(opt_str) + if option is None: + raise ValueError("no such option %r" % opt_str) + + for opt in option._short_opts: + del self._short_opt[opt] + for opt in option._long_opts: + del self._long_opt[opt] + option.container.option_list.remove(option) + + + # -- Help-formatting methods --------------------------------------- + + def format_option_help(self, formatter): + if not self.option_list: + return "" + result = [] + for option in self.option_list: + if not option.help is SUPPRESS_HELP: + result.append(formatter.format_option(option)) + return "".join(result) + + def format_description(self, formatter): + return formatter.format_description(self.get_description()) + + def format_help(self, formatter): + result = [] + if self.description: + result.append(self.format_description(formatter)) + if self.option_list: + result.append(self.format_option_help(formatter)) + return "\n".join(result) + + +class OptionGroup (OptionContainer): + + def __init__(self, parser, title, description=None): + self.parser = parser + OptionContainer.__init__( + self, parser.option_class, parser.conflict_handler, description) + self.title = title + + def _create_option_list(self): + self.option_list = [] + self._share_option_mappings(self.parser) + + def set_title(self, title): + self.title = title + + def destroy(self): + """see OptionParser.destroy().""" + OptionContainer.destroy(self) + del self.option_list + + # -- Help-formatting methods --------------------------------------- + + def format_help(self, formatter): + result = formatter.format_heading(self.title) + formatter.indent() + result += OptionContainer.format_help(self, formatter) + formatter.dedent() + return result + + +class OptionParser (OptionContainer): + + """ + Class attributes: + standard_option_list : [Option] + list of standard options that will be accepted by all instances + of this parser class (intended to be overridden by subclasses). + + Instance attributes: + usage : string + a usage string for your program. Before it is displayed + to the user, "%prog" will be expanded to the name of + your program (self.prog or os.path.basename(sys.argv[0])). + prog : string + the name of the current program (to override + os.path.basename(sys.argv[0])). + description : string + A paragraph of text giving a brief overview of your program. + optparse reformats this paragraph to fit the current terminal + width and prints it when the user requests help (after usage, + but before the list of options). + epilog : string + paragraph of help text to print after option help + + option_groups : [OptionGroup] + list of option groups in this parser (option groups are + irrelevant for parsing the command-line, but very useful + for generating help) + + allow_interspersed_args : bool = true + if true, positional arguments may be interspersed with options. + Assuming -a and -b each take a single argument, the command-line + -ablah foo bar -bboo baz + will be interpreted the same as + -ablah -bboo -- foo bar baz + If this flag were false, that command line would be interpreted as + -ablah -- foo bar -bboo baz + -- ie. we stop processing options as soon as we see the first + non-option argument. (This is the tradition followed by + Python's getopt module, Perl's Getopt::Std, and other argument- + parsing libraries, but it is generally annoying to users.) + + process_default_values : bool = true + if true, option default values are processed similarly to option + values from the command line: that is, they are passed to the + type-checking function for the option's type (as long as the + default value is a string). (This really only matters if you + have defined custom types; see SF bug #955889.) Set it to false + to restore the behaviour of Optik 1.4.1 and earlier. + + rargs : [string] + the argument list currently being parsed. Only set when + parse_args() is active, and continually trimmed down as + we consume arguments. Mainly there for the benefit of + callback options. + largs : [string] + the list of leftover arguments that we have skipped while + parsing options. If allow_interspersed_args is false, this + list is always empty. + values : Values + the set of option values currently being accumulated. Only + set when parse_args() is active. Also mainly for callbacks. + + Because of the 'rargs', 'largs', and 'values' attributes, + OptionParser is not thread-safe. If, for some perverse reason, you + need to parse command-line arguments simultaneously in different + threads, use different OptionParser instances. + + """ + + standard_option_list = [] + + def __init__(self, + usage=None, + option_list=None, + option_class=Option, + version=None, + conflict_handler="error", + description=None, + formatter=None, + add_help_option=True, + prog=None, + epilog=None): + OptionContainer.__init__( + self, option_class, conflict_handler, description) + self.set_usage(usage) + self.prog = prog + self.version = version + self.allow_interspersed_args = True + self.process_default_values = True + if formatter is None: + formatter = IndentedHelpFormatter() + self.formatter = formatter + self.formatter.set_parser(self) + self.epilog = epilog + + # Populate the option list; initial sources are the + # standard_option_list class attribute, the 'option_list' + # argument, and (if applicable) the _add_version_option() and + # _add_help_option() methods. + self._populate_option_list(option_list, + add_help=add_help_option) + + self._init_parsing_state() + + + def destroy(self): + """ + Declare that you are done with this OptionParser. This cleans up + reference cycles so the OptionParser (and all objects referenced by + it) can be garbage-collected promptly. After calling destroy(), the + OptionParser is unusable. + """ + OptionContainer.destroy(self) + for group in self.option_groups: + group.destroy() + del self.option_list + del self.option_groups + del self.formatter + + + # -- Private methods ----------------------------------------------- + # (used by our or OptionContainer's constructor) + + def _create_option_list(self): + self.option_list = [] + self.option_groups = [] + self._create_option_mappings() + + def _add_help_option(self): + self.add_option("-h", "--help", + action="help", + help=_("show this help message and exit")) + + def _add_version_option(self): + self.add_option("--version", + action="version", + help=_("show program's version number and exit")) + + def _populate_option_list(self, option_list, add_help=True): + if self.standard_option_list: + self.add_options(self.standard_option_list) + if option_list: + self.add_options(option_list) + if self.version: + self._add_version_option() + if add_help: + self._add_help_option() + + def _init_parsing_state(self): + # These are set in parse_args() for the convenience of callbacks. + self.rargs = None + self.largs = None + self.values = None + + + # -- Simple modifier methods --------------------------------------- + + def set_usage(self, usage): + if usage is None: + self.usage = _("%prog [options]") + elif usage is SUPPRESS_USAGE: + self.usage = None + # For backwards compatibility with Optik 1.3 and earlier. + elif usage.lower().startswith("usage: "): + self.usage = usage[7:] + else: + self.usage = usage + + def enable_interspersed_args(self): + """Set parsing to not stop on the first non-option, allowing + interspersing switches with command arguments. This is the + default behavior. See also disable_interspersed_args() and the + class documentation description of the attribute + allow_interspersed_args.""" + self.allow_interspersed_args = True + + def disable_interspersed_args(self): + """Set parsing to stop on the first non-option. Use this if + you have a command processor which runs another command that + has options of its own and you want to make sure these options + don't get confused. + """ + self.allow_interspersed_args = False + + def set_process_default_values(self, process): + self.process_default_values = process + + def set_default(self, dest, value): + self.defaults[dest] = value + + def set_defaults(self, **kwargs): + self.defaults.update(kwargs) + + def _get_all_options(self): + options = self.option_list[:] + for group in self.option_groups: + options.extend(group.option_list) + return options + + def get_default_values(self): + if not self.process_default_values: + # Old, pre-Optik 1.5 behaviour. + return Values(self.defaults) + + defaults = self.defaults.copy() + for option in self._get_all_options(): + default = defaults.get(option.dest) + if isinstance(default, str): + opt_str = option.get_opt_string() + defaults[option.dest] = option.check_value(opt_str, default) + + return Values(defaults) + + + # -- OptionGroup methods ------------------------------------------- + + def add_option_group(self, *args, **kwargs): + # XXX lots of overlap with OptionContainer.add_option() + if isinstance(args[0], str): + group = OptionGroup(self, *args, **kwargs) + elif len(args) == 1 and not kwargs: + group = args[0] + if not isinstance(group, OptionGroup): + raise TypeError("not an OptionGroup instance: %r" % group) + if group.parser is not self: + raise ValueError("invalid OptionGroup (wrong parser)") + else: + raise TypeError("invalid arguments") + + self.option_groups.append(group) + return group + + def get_option_group(self, opt_str): + option = (self._short_opt.get(opt_str) or + self._long_opt.get(opt_str)) + if option and option.container is not self: + return option.container + return None + + + # -- Option-parsing methods ---------------------------------------- + + def _get_args(self, args): + if args is None: + return sys.argv[1:] + else: + return args[:] # don't modify caller's list + + def parse_args(self, args=None, values=None): + """ + parse_args(args : [string] = sys.argv[1:], + values : Values = None) + -> (values : Values, args : [string]) + + Parse the command-line options found in 'args' (default: + sys.argv[1:]). Any errors result in a call to 'error()', which + by default prints the usage message to stderr and calls + sys.exit() with an error message. On success returns a pair + (values, args) where 'values' is a Values instance (with all + your option values) and 'args' is the list of arguments left + over after parsing options. + """ + rargs = self._get_args(args) + if values is None: + values = self.get_default_values() + + # Store the halves of the argument list as attributes for the + # convenience of callbacks: + # rargs + # the rest of the command-line (the "r" stands for + # "remaining" or "right-hand") + # largs + # the leftover arguments -- ie. what's left after removing + # options and their arguments (the "l" stands for "leftover" + # or "left-hand") + self.rargs = rargs + self.largs = largs = [] + self.values = values + + try: + stop = self._process_args(largs, rargs, values) + except (BadOptionError, OptionValueError) as err: + self.error(str(err)) + + args = largs + rargs + return self.check_values(values, args) + + def check_values(self, values, args): + """ + check_values(values : Values, args : [string]) + -> (values : Values, args : [string]) + + Check that the supplied option values and leftover arguments are + valid. Returns the option values and leftover arguments + (possibly adjusted, possibly completely new -- whatever you + like). Default implementation just returns the passed-in + values; subclasses may override as desired. + """ + return (values, args) + + def _process_args(self, largs, rargs, values): + """_process_args(largs : [string], + rargs : [string], + values : Values) + + Process command-line arguments and populate 'values', consuming + options and arguments from 'rargs'. If 'allow_interspersed_args' is + false, stop at the first non-option argument. If true, accumulate any + interspersed non-option arguments in 'largs'. + """ + while rargs: + arg = rargs[0] + # We handle bare "--" explicitly, and bare "-" is handled by the + # standard arg handler since the short arg case ensures that the + # len of the opt string is greater than 1. + if arg == "--": + del rargs[0] + return + elif arg[0:2] == "--": + # process a single long option (possibly with value(s)) + self._process_long_opt(rargs, values) + elif arg[:1] == "-" and len(arg) > 1: + # process a cluster of short options (possibly with + # value(s) for the last one only) + self._process_short_opts(rargs, values) + elif self.allow_interspersed_args: + largs.append(arg) + del rargs[0] + else: + return # stop now, leave this arg in rargs + + # Say this is the original argument list: + # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] + # ^ + # (we are about to process arg(i)). + # + # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of + # [arg0, ..., arg(i-1)] (any options and their arguments will have + # been removed from largs). + # + # The while loop will usually consume 1 or more arguments per pass. + # If it consumes 1 (eg. arg is an option that takes no arguments), + # then after _process_arg() is done the situation is: + # + # largs = subset of [arg0, ..., arg(i)] + # rargs = [arg(i+1), ..., arg(N-1)] + # + # If allow_interspersed_args is false, largs will always be + # *empty* -- still a subset of [arg0, ..., arg(i-1)], but + # not a very interesting subset! + + def _match_long_opt(self, opt): + """_match_long_opt(opt : string) -> string + + Determine which long option string 'opt' matches, ie. which one + it is an unambiguous abbreviation for. Raises BadOptionError if + 'opt' doesn't unambiguously match any long option string. + """ + return _match_abbrev(opt, self._long_opt) + + def _process_long_opt(self, rargs, values): + arg = rargs.pop(0) + + # Value explicitly attached to arg? Pretend it's the next + # argument. + if "=" in arg: + (opt, next_arg) = arg.split("=", 1) + rargs.insert(0, next_arg) + had_explicit_value = True + else: + opt = arg + had_explicit_value = False + + opt = self._match_long_opt(opt) + option = self._long_opt[opt] + if option.takes_value(): + nargs = option.nargs + if len(rargs) < nargs: + self.error(ngettext( + "%(option)s option requires %(number)d argument", + "%(option)s option requires %(number)d arguments", + nargs) % {"option": opt, "number": nargs}) + elif nargs == 1: + value = rargs.pop(0) + else: + value = tuple(rargs[0:nargs]) + del rargs[0:nargs] + + elif had_explicit_value: + self.error(_("%s option does not take a value") % opt) + + else: + value = None + + option.process(opt, value, values, self) + + def _process_short_opts(self, rargs, values): + arg = rargs.pop(0) + stop = False + i = 1 + for ch in arg[1:]: + opt = "-" + ch + option = self._short_opt.get(opt) + i += 1 # we have consumed a character + + if not option: + raise BadOptionError(opt) + if option.takes_value(): + # Any characters left in arg? Pretend they're the + # next arg, and stop consuming characters of arg. + if i < len(arg): + rargs.insert(0, arg[i:]) + stop = True + + nargs = option.nargs + if len(rargs) < nargs: + self.error(ngettext( + "%(option)s option requires %(number)d argument", + "%(option)s option requires %(number)d arguments", + nargs) % {"option": opt, "number": nargs}) + elif nargs == 1: + value = rargs.pop(0) + else: + value = tuple(rargs[0:nargs]) + del rargs[0:nargs] + + else: # option doesn't take a value + value = None + + option.process(opt, value, values, self) + + if stop: + break + + + # -- Feedback methods ---------------------------------------------- + + def get_prog_name(self): + if self.prog is None: + return os.path.basename(sys.argv[0]) + else: + return self.prog + + def expand_prog_name(self, s): + return s.replace("%prog", self.get_prog_name()) + + def get_description(self): + return self.expand_prog_name(self.description) + + def exit(self, status=0, msg=None): + if msg: + sys.stderr.write(msg) + sys.exit(status) + + def error(self, msg): + """error(msg : string) + + Print a usage message incorporating 'msg' to stderr and exit. + If you override this in a subclass, it should not return -- it + should either exit or raise an exception. + """ + self.print_usage(sys.stderr) + self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg)) + + def get_usage(self): + if self.usage: + return self.formatter.format_usage( + self.expand_prog_name(self.usage)) + else: + return "" + + def print_usage(self, file=None): + """print_usage(file : file = stdout) + + Print the usage message for the current program (self.usage) to + 'file' (default stdout). Any occurrence of the string "%prog" in + self.usage is replaced with the name of the current program + (basename of sys.argv[0]). Does nothing if self.usage is empty + or not defined. + """ + if self.usage: + print(self.get_usage(), file=file) + + def get_version(self): + if self.version: + return self.expand_prog_name(self.version) + else: + return "" + + def print_version(self, file=None): + """print_version(file : file = stdout) + + Print the version message for this program (self.version) to + 'file' (default stdout). As with print_usage(), any occurrence + of "%prog" in self.version is replaced by the current program's + name. Does nothing if self.version is empty or undefined. + """ + if self.version: + print(self.get_version(), file=file) + + def format_option_help(self, formatter=None): + if formatter is None: + formatter = self.formatter + formatter.store_option_strings(self) + result = [] + result.append(formatter.format_heading(_("Options"))) + formatter.indent() + if self.option_list: + result.append(OptionContainer.format_option_help(self, formatter)) + result.append("\n") + for group in self.option_groups: + result.append(group.format_help(formatter)) + result.append("\n") + formatter.dedent() + # Drop the last "\n", or the header if no options or option groups: + return "".join(result[:-1]) + + def format_epilog(self, formatter): + return formatter.format_epilog(self.epilog) + + def format_help(self, formatter=None): + if formatter is None: + formatter = self.formatter + result = [] + if self.usage: + result.append(self.get_usage() + "\n") + if self.description: + result.append(self.format_description(formatter) + "\n") + result.append(self.format_option_help(formatter)) + result.append(self.format_epilog(formatter)) + return "".join(result) + + def print_help(self, file=None): + """print_help(file : file = stdout) + + Print an extended help message, listing all options and any + help text provided with them, to 'file' (default stdout). + """ + if file is None: + file = sys.stdout + file.write(self.format_help()) + +# class OptionParser + + +def _match_abbrev(s, wordmap): + """_match_abbrev(s : string, wordmap : {string : Option}) -> string + + Return the string key in 'wordmap' for which 's' is an unambiguous + abbreviation. If 's' is found to be ambiguous or doesn't match any of + 'words', raise BadOptionError. + """ + # Is there an exact match? + if s in wordmap: + return s + else: + # Isolate all words with s as a prefix. + possibilities = [word for word in wordmap.keys() + if word.startswith(s)] + # No exact match, so there had better be just one possibility. + if len(possibilities) == 1: + return possibilities[0] + elif not possibilities: + raise BadOptionError(s) + else: + # More than one possible completion: ambiguous prefix. + possibilities.sort() + raise AmbiguousOptionError(s, possibilities) + + +# Some day, there might be many Option classes. As of Optik 1.3, the +# preferred way to instantiate Options is indirectly, via make_option(), +# which will become a factory function when there are many Option +# classes. +make_option = Option diff --git a/Python314_4_x86_Template/Lib/os.py b/Python314_4_x86_Template/Lib/os.py new file mode 100644 index 00000000..ac03b416 --- /dev/null +++ b/Python314_4_x86_Template/Lib/os.py @@ -0,0 +1,1191 @@ +r"""OS routines for NT or Posix depending on what system we're on. + +This exports: + - all functions from posix or nt, e.g. unlink, stat, etc. + - os.path is either posixpath or ntpath + - os.name is either 'posix' or 'nt' + - os.curdir is a string representing the current directory (always '.') + - os.pardir is a string representing the parent directory (always '..') + - os.sep is the (or a most common) pathname separator ('/' or '\\') + - os.extsep is the extension separator (always '.') + - os.altsep is the alternate pathname separator (None or '/') + - os.pathsep is the component separator used in $PATH etc + - os.linesep is the line separator in text files ('\n' or '\r\n') + - os.defpath is the default search path for executables + - os.devnull is the file path of the null device ('/dev/null', etc.) + +Programs that import and use 'os' stand a better chance of being +portable between different platforms. Of course, they must then +only use functions that are defined by all platforms (e.g., unlink +and opendir), and leave all pathname manipulation to os.path +(e.g., split and join). +""" + +#' +import abc +import sys +import stat as st + +from _collections_abc import _check_methods + +GenericAlias = type(list[int]) + +_names = sys.builtin_module_names + +# Note: more names are added to __all__ later. +__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep", + "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR", + "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen", + "extsep"] + +def _exists(name): + return name in globals() + +def _get_exports_list(module): + try: + return list(module.__all__) + except AttributeError: + return [n for n in dir(module) if n[0] != '_'] + +# Any new dependencies of the os module and/or changes in path separator +# requires updating importlib as well. +if 'posix' in _names: + name = 'posix' + linesep = '\n' + from posix import * + try: + from posix import _exit + __all__.append('_exit') + except ImportError: + pass + import posixpath as path + + try: + from posix import _have_functions + except ImportError: + pass + try: + from posix import _create_environ + except ImportError: + pass + + import posix + __all__.extend(_get_exports_list(posix)) + del posix + +elif 'nt' in _names: + name = 'nt' + linesep = '\r\n' + from nt import * + try: + from nt import _exit + __all__.append('_exit') + except ImportError: + pass + import ntpath as path + + import nt + __all__.extend(_get_exports_list(nt)) + del nt + + try: + from nt import _have_functions + except ImportError: + pass + try: + from nt import _create_environ + except ImportError: + pass + +else: + raise ImportError('no os specific module found') + +sys.modules['os.path'] = path +from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep, + devnull) + +del _names + + +if _exists("_have_functions"): + _globals = globals() + def _add(str, fn): + if (fn in _globals) and (str in _have_functions): + _set.add(_globals[fn]) + + _set = set() + _add("HAVE_FACCESSAT", "access") + _add("HAVE_FCHMODAT", "chmod") + _add("HAVE_FCHOWNAT", "chown") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_LSTAT", "lstat") + _add("HAVE_FUTIMESAT", "utime") + _add("HAVE_LINKAT", "link") + _add("HAVE_MKDIRAT", "mkdir") + _add("HAVE_MKFIFOAT", "mkfifo") + _add("HAVE_MKNODAT", "mknod") + _add("HAVE_OPENAT", "open") + _add("HAVE_READLINKAT", "readlink") + _add("HAVE_RENAMEAT", "rename") + _add("HAVE_SYMLINKAT", "symlink") + _add("HAVE_UNLINKAT", "unlink") + _add("HAVE_UNLINKAT", "rmdir") + _add("HAVE_UTIMENSAT", "utime") + supports_dir_fd = _set + + _set = set() + _add("HAVE_FACCESSAT", "access") + supports_effective_ids = _set + + _set = set() + _add("HAVE_FCHDIR", "chdir") + _add("HAVE_FCHMOD", "chmod") + _add("MS_WINDOWS", "chmod") + _add("HAVE_FCHOWN", "chown") + _add("HAVE_FDOPENDIR", "listdir") + _add("HAVE_FDOPENDIR", "scandir") + _add("HAVE_FEXECVE", "execve") + _set.add(stat) # fstat always works + _add("HAVE_FTRUNCATE", "truncate") + _add("HAVE_FUTIMENS", "utime") + _add("HAVE_FUTIMES", "utime") + _add("HAVE_FPATHCONF", "pathconf") + if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3 + _add("HAVE_FSTATVFS", "statvfs") + supports_fd = _set + + _set = set() + _add("HAVE_FACCESSAT", "access") + # Some platforms don't support lchmod(). Often the function exists + # anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP. + # (No, I don't know why that's a good design.) ./configure will detect + # this and reject it--so HAVE_LCHMOD still won't be defined on such + # platforms. This is Very Helpful. + # + # However, sometimes platforms without a working lchmod() *do* have + # fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15, + # OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes + # it behave like lchmod(). So in theory it would be a suitable + # replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s + # flag doesn't work *either*. Sadly ./configure isn't sophisticated + # enough to detect this condition--it only determines whether or not + # fchmodat() minimally works. + # + # Therefore we simply ignore fchmodat() when deciding whether or not + # os.chmod supports follow_symlinks. Just checking lchmod() is + # sufficient. After all--if you have a working fchmodat(), your + # lchmod() almost certainly works too. + # + # _add("HAVE_FCHMODAT", "chmod") + _add("HAVE_FCHOWNAT", "chown") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_LCHFLAGS", "chflags") + _add("HAVE_LCHMOD", "chmod") + _add("MS_WINDOWS", "chmod") + if _exists("lchown"): # mac os x10.3 + _add("HAVE_LCHOWN", "chown") + _add("HAVE_LINKAT", "link") + _add("HAVE_LUTIMES", "utime") + _add("HAVE_LSTAT", "stat") + _add("HAVE_FSTATAT", "stat") + _add("HAVE_UTIMENSAT", "utime") + _add("MS_WINDOWS", "stat") + supports_follow_symlinks = _set + + del _set + del _have_functions + del _globals + del _add + + +# Python uses fixed values for the SEEK_ constants; they are mapped +# to native constants if necessary in posixmodule.c +# Other possible SEEK values are directly imported from posixmodule.c +SEEK_SET = 0 +SEEK_CUR = 1 +SEEK_END = 2 + +# Super directory utilities. +# (Inspired by Eric Raymond; the doc strings are mostly his) + +def makedirs(name, mode=0o777, exist_ok=False): + """makedirs(name [, mode=0o777][, exist_ok=False]) + + Super-mkdir; create a leaf directory and all intermediate ones. Works like + mkdir, except that any intermediate path segment (not just the rightmost) + will be created if it does not exist. If the target directory already + exists, raise an OSError if exist_ok is False. Otherwise no exception is + raised. This is recursive. + + """ + head, tail = path.split(name) + if not tail: + head, tail = path.split(head) + if head and tail and not path.exists(head): + try: + makedirs(head, exist_ok=exist_ok) + except FileExistsError: + # Defeats race condition when another thread created the path + pass + cdir = curdir + if isinstance(tail, bytes): + cdir = bytes(curdir, 'ASCII') + if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists + return + try: + mkdir(name, mode) + except OSError: + # Cannot rely on checking for EEXIST, since the operating system + # could give priority to other errors like EACCES or EROFS + if not exist_ok or not path.isdir(name): + raise + +def removedirs(name): + """removedirs(name) + + Super-rmdir; remove a leaf directory and all empty intermediate + ones. Works like rmdir except that, if the leaf directory is + successfully removed, directories corresponding to rightmost path + segments will be pruned away until either the whole path is + consumed or an error occurs. Errors during this latter phase are + ignored -- they generally mean that a directory was not empty. + + """ + rmdir(name) + head, tail = path.split(name) + if not tail: + head, tail = path.split(head) + while head and tail: + try: + rmdir(head) + except OSError: + break + head, tail = path.split(head) + +def renames(old, new): + """renames(old, new) + + Super-rename; create directories as necessary and delete any left + empty. Works like rename, except creation of any intermediate + directories needed to make the new pathname good is attempted + first. After the rename, directories corresponding to rightmost + path segments of the old name will be pruned until either the + whole path is consumed or a nonempty directory is found. + + Note: this function can fail with the new directory structure made + if you lack permissions needed to unlink the leaf directory or + file. + + """ + head, tail = path.split(new) + if head and tail and not path.exists(head): + makedirs(head) + rename(old, new) + head, tail = path.split(old) + if head and tail: + try: + removedirs(head) + except OSError: + pass + +__all__.extend(["makedirs", "removedirs", "renames"]) + +# Private sentinel that makes walk() classify all symlinks and junctions as +# regular files. +_walk_symlinks_as_files = object() + +def walk(top, topdown=True, onerror=None, followlinks=False): + """Directory tree generator. + + For each directory in the directory tree rooted at top (including top + itself, but excluding '.' and '..'), yields a 3-tuple + + dirpath, dirnames, filenames + + dirpath is a string, the path to the directory. dirnames is a list of + the names of the subdirectories in dirpath (including symlinks to directories, + and excluding '.' and '..'). + filenames is a list of the names of the non-directory files in dirpath. + Note that the names in the lists are just names, with no path components. + To get a full path (which begins with top) to a file or directory in + dirpath, do os.path.join(dirpath, name). + + If optional arg 'topdown' is true or not specified, the triple for a + directory is generated before the triples for any of its subdirectories + (directories are generated top down). If topdown is false, the triple + for a directory is generated after the triples for all of its + subdirectories (directories are generated bottom up). + + When topdown is true, the caller can modify the dirnames list in-place + (e.g., via del or slice assignment), and walk will only recurse into the + subdirectories whose names remain in dirnames; this can be used to prune the + search, or to impose a specific order of visiting. Modifying dirnames when + topdown is false has no effect on the behavior of os.walk(), since the + directories in dirnames have already been generated by the time dirnames + itself is generated. No matter the value of topdown, the list of + subdirectories is retrieved before the tuples for the directory and its + subdirectories are generated. + + By default errors from the os.scandir() call are ignored. If + optional arg 'onerror' is specified, it should be a function; it + will be called with one argument, an OSError instance. It can + report the error to continue with the walk, or raise the exception + to abort the walk. Note that the filename is available as the + filename attribute of the exception object. + + By default, os.walk does not follow symbolic links to subdirectories on + systems that support them. In order to get this functionality, set the + optional argument 'followlinks' to true. + + Caution: if you pass a relative pathname for top, don't change the + current working directory between resumptions of walk. walk never + changes the current directory, and assumes that the client doesn't + either. + + Example: + + import os + from os.path import join, getsize + for root, dirs, files in os.walk('python/Lib/xml'): + print(root, "consumes ") + print(sum(getsize(join(root, name)) for name in files), end=" ") + print("bytes in", len(files), "non-directory files") + if '__pycache__' in dirs: + dirs.remove('__pycache__') # don't visit __pycache__ directories + + """ + sys.audit("os.walk", top, topdown, onerror, followlinks) + + stack = [fspath(top)] + islink, join = path.islink, path.join + while stack: + top = stack.pop() + if isinstance(top, tuple): + yield top + continue + + dirs = [] + nondirs = [] + walk_dirs = [] + + # We may not have read permission for top, in which case we can't + # get a list of the files the directory contains. + # We suppress the exception here, rather than blow up for a + # minor reason when (say) a thousand readable directories are still + # left to visit. + try: + with scandir(top) as entries: + for entry in entries: + try: + if followlinks is _walk_symlinks_as_files: + is_dir = entry.is_dir(follow_symlinks=False) and not entry.is_junction() + else: + is_dir = entry.is_dir() + except OSError: + # If is_dir() raises an OSError, consider the entry not to + # be a directory, same behaviour as os.path.isdir(). + is_dir = False + + if is_dir: + dirs.append(entry.name) + else: + nondirs.append(entry.name) + + if not topdown and is_dir: + # Bottom-up: traverse into sub-directory, but exclude + # symlinks to directories if followlinks is False + if followlinks: + walk_into = True + else: + try: + is_symlink = entry.is_symlink() + except OSError: + # If is_symlink() raises an OSError, consider the + # entry not to be a symbolic link, same behaviour + # as os.path.islink(). + is_symlink = False + walk_into = not is_symlink + + if walk_into: + walk_dirs.append(entry.path) + except OSError as error: + if onerror is not None: + onerror(error) + continue + + if topdown: + # Yield before sub-directory traversal if going top down + yield top, dirs, nondirs + # Traverse into sub-directories + for dirname in reversed(dirs): + new_path = join(top, dirname) + # bpo-23605: os.path.islink() is used instead of caching + # entry.is_symlink() result during the loop on os.scandir() because + # the caller can replace the directory entry during the "yield" + # above. + if followlinks or not islink(new_path): + stack.append(new_path) + else: + # Yield after sub-directory traversal if going bottom up + stack.append((top, dirs, nondirs)) + # Traverse into sub-directories + for new_path in reversed(walk_dirs): + stack.append(new_path) + +__all__.append("walk") + +if {open, stat} <= supports_dir_fd and {scandir, stat} <= supports_fd: + + def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None): + """Directory tree generator. + + This behaves exactly like walk(), except that it yields a 4-tuple + + dirpath, dirnames, filenames, dirfd + + `dirpath`, `dirnames` and `filenames` are identical to walk() output, + and `dirfd` is a file descriptor referring to the directory `dirpath`. + + The advantage of fwalk() over walk() is that it's safe against symlink + races (when follow_symlinks is False). + + If dir_fd is not None, it should be a file descriptor open to a directory, + and top should be relative; top will then be relative to that directory. + (dir_fd is always supported for fwalk.) + + Caution: + Since fwalk() yields file descriptors, those are only valid until the + next iteration step, so you should dup() them if you want to keep them + for a longer period. + + Example: + + import os + for root, dirs, files, rootfd in os.fwalk('python/Lib/xml'): + print(root, "consumes", end="") + print(sum(os.stat(name, dir_fd=rootfd).st_size for name in files), + end="") + print("bytes in", len(files), "non-directory files") + if '__pycache__' in dirs: + dirs.remove('__pycache__') # don't visit __pycache__ directories + """ + sys.audit("os.fwalk", top, topdown, onerror, follow_symlinks, dir_fd) + top = fspath(top) + stack = [(_fwalk_walk, (True, dir_fd, top, top, None))] + isbytes = isinstance(top, bytes) + try: + while stack: + yield from _fwalk(stack, isbytes, topdown, onerror, follow_symlinks) + finally: + # Close any file descriptors still on the stack. + while stack: + action, value = stack.pop() + if action == _fwalk_close: + close(value) + + # Each item in the _fwalk() stack is a pair (action, args). + _fwalk_walk = 0 # args: (isroot, dirfd, toppath, topname, entry) + _fwalk_yield = 1 # args: (toppath, dirnames, filenames, topfd) + _fwalk_close = 2 # args: dirfd + + def _fwalk(stack, isbytes, topdown, onerror, follow_symlinks): + # Note: This uses O(depth of the directory tree) file descriptors: if + # necessary, it can be adapted to only require O(1) FDs, see issue + # #13734. + + action, value = stack.pop() + if action == _fwalk_close: + close(value) + return + elif action == _fwalk_yield: + yield value + return + assert action == _fwalk_walk + isroot, dirfd, toppath, topname, entry = value + try: + if not follow_symlinks: + # Note: To guard against symlink races, we use the standard + # lstat()/open()/fstat() trick. + if entry is None: + orig_st = stat(topname, follow_symlinks=False, dir_fd=dirfd) + else: + orig_st = entry.stat(follow_symlinks=False) + topfd = open(topname, O_RDONLY | O_NONBLOCK, dir_fd=dirfd) + except OSError as err: + if isroot: + raise + if onerror is not None: + onerror(err) + return + stack.append((_fwalk_close, topfd)) + if not follow_symlinks: + if isroot and not st.S_ISDIR(orig_st.st_mode): + return + if not path.samestat(orig_st, stat(topfd)): + return + + scandir_it = scandir(topfd) + dirs = [] + nondirs = [] + entries = None if topdown or follow_symlinks else [] + for entry in scandir_it: + name = entry.name + if isbytes: + name = fsencode(name) + try: + if entry.is_dir(): + dirs.append(name) + if entries is not None: + entries.append(entry) + else: + nondirs.append(name) + except OSError: + try: + # Add dangling symlinks, ignore disappeared files + if entry.is_symlink(): + nondirs.append(name) + except OSError: + pass + + if topdown: + yield toppath, dirs, nondirs, topfd + else: + stack.append((_fwalk_yield, (toppath, dirs, nondirs, topfd))) + + toppath = path.join(toppath, toppath[:0]) # Add trailing slash. + if entries is None: + stack.extend( + (_fwalk_walk, (False, topfd, toppath + name, name, None)) + for name in dirs[::-1]) + else: + stack.extend( + (_fwalk_walk, (False, topfd, toppath + name, name, entry)) + for name, entry in zip(dirs[::-1], entries[::-1])) + + __all__.append("fwalk") + +def execl(file, *args): + """execl(file, *args) + + Execute the executable file with argument list args, replacing the + current process. """ + execv(file, args) + +def execle(file, *args): + """execle(file, *args, env) + + Execute the executable file with argument list args and + environment env, replacing the current process. """ + env = args[-1] + execve(file, args[:-1], env) + +def execlp(file, *args): + """execlp(file, *args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. """ + execvp(file, args) + +def execlpe(file, *args): + """execlpe(file, *args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the current + process. """ + env = args[-1] + execvpe(file, args[:-1], env) + +def execvp(file, args): + """execvp(file, args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. + args may be a list or tuple of strings. """ + _execvpe(file, args) + +def execvpe(file, args, env): + """execvpe(file, args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the + current process. + args may be a list or tuple of strings. """ + _execvpe(file, args, env) + +__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"]) + +def _execvpe(file, args, env=None): + if env is not None: + exec_func = execve + argrest = (args, env) + else: + exec_func = execv + argrest = (args,) + env = environ + + if path.dirname(file): + exec_func(file, *argrest) + return + saved_exc = None + path_list = get_exec_path(env) + if name != 'nt': + file = fsencode(file) + path_list = map(fsencode, path_list) + for dir in path_list: + fullname = path.join(dir, file) + try: + exec_func(fullname, *argrest) + except (FileNotFoundError, NotADirectoryError) as e: + last_exc = e + except OSError as e: + last_exc = e + if saved_exc is None: + saved_exc = e + if saved_exc is not None: + raise saved_exc + raise last_exc + + +def get_exec_path(env=None): + """Returns the sequence of directories that will be searched for the + named executable (similar to a shell) when launching a process. + + *env* must be an environment variable dict or None. If *env* is None, + os.environ will be used. + """ + # Use a local import instead of a global import to limit the number of + # modules loaded at startup: the os module is always loaded at startup by + # Python. It may also avoid a bootstrap issue. + import warnings + + if env is None: + env = environ + + # {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a + # BytesWarning when using python -b or python -bb: ignore the warning + with warnings.catch_warnings(): + warnings.simplefilter("ignore", BytesWarning) + + try: + path_list = env.get('PATH') + except TypeError: + path_list = None + + if supports_bytes_environ: + try: + path_listb = env[b'PATH'] + except (KeyError, TypeError): + pass + else: + if path_list is not None: + raise ValueError( + "env cannot contain 'PATH' and b'PATH' keys") + path_list = path_listb + + if path_list is not None and isinstance(path_list, bytes): + path_list = fsdecode(path_list) + + if path_list is None: + path_list = defpath + return path_list.split(pathsep) + + +# Change environ to automatically call putenv() and unsetenv() +from _collections_abc import MutableMapping, Mapping + +class _Environ(MutableMapping): + def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue): + self.encodekey = encodekey + self.decodekey = decodekey + self.encodevalue = encodevalue + self.decodevalue = decodevalue + self._data = data + + def __getitem__(self, key): + try: + value = self._data[self.encodekey(key)] + except KeyError: + # raise KeyError with the original key value + raise KeyError(key) from None + return self.decodevalue(value) + + def __setitem__(self, key, value): + key = self.encodekey(key) + value = self.encodevalue(value) + putenv(key, value) + self._data[key] = value + + def __delitem__(self, key): + encodedkey = self.encodekey(key) + unsetenv(encodedkey) + try: + del self._data[encodedkey] + except KeyError: + # raise KeyError with the original key value + raise KeyError(key) from None + + def __iter__(self): + # list() from dict object is an atomic operation + keys = list(self._data) + for key in keys: + yield self.decodekey(key) + + def __len__(self): + return len(self._data) + + def __repr__(self): + formatted_items = ", ".join( + f"{self.decodekey(key)!r}: {self.decodevalue(value)!r}" + for key, value in self._data.items() + ) + return f"environ({{{formatted_items}}})" + + def copy(self): + return dict(self) + + def setdefault(self, key, value): + if key not in self: + self[key] = value + return self[key] + + def __ior__(self, other): + self.update(other) + return self + + def __or__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + new = dict(self) + new.update(other) + return new + + def __ror__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + new = dict(other) + new.update(self) + return new + +def _create_environ_mapping(): + if name == 'nt': + # Where Env Var Names Must Be UPPERCASE + def check_str(value): + if not isinstance(value, str): + raise TypeError("str expected, not %s" % type(value).__name__) + return value + encode = check_str + decode = str + def encodekey(key): + return encode(key).upper() + data = {} + for key, value in environ.items(): + data[encodekey(key)] = value + else: + # Where Env Var Names Can Be Mixed Case + encoding = sys.getfilesystemencoding() + def encode(value): + if not isinstance(value, str): + raise TypeError("str expected, not %s" % type(value).__name__) + return value.encode(encoding, 'surrogateescape') + def decode(value): + return value.decode(encoding, 'surrogateescape') + encodekey = encode + data = environ + return _Environ(data, + encodekey, decode, + encode, decode) + +# unicode environ +environ = _create_environ_mapping() +del _create_environ_mapping + + +if _exists("_create_environ"): + def reload_environ(): + data = _create_environ() + if name == 'nt': + encodekey = environ.encodekey + data = {encodekey(key): value + for key, value in data.items()} + + # modify in-place to keep os.environb in sync + env_data = environ._data + env_data.clear() + env_data.update(data) + + __all__.append("reload_environ") + +def getenv(key, default=None): + """Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are str.""" + return environ.get(key, default) + +supports_bytes_environ = (name != 'nt') +__all__.extend(("getenv", "supports_bytes_environ")) + +if supports_bytes_environ: + def _check_bytes(value): + if not isinstance(value, bytes): + raise TypeError("bytes expected, not %s" % type(value).__name__) + return value + + # bytes environ + environb = _Environ(environ._data, + _check_bytes, bytes, + _check_bytes, bytes) + del _check_bytes + + def getenvb(key, default=None): + """Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are bytes.""" + return environb.get(key, default) + + __all__.extend(("environb", "getenvb")) + +def _fscodec(): + encoding = sys.getfilesystemencoding() + errors = sys.getfilesystemencodeerrors() + + def fsencode(filename): + """Encode filename (an os.PathLike, bytes, or str) to the filesystem + encoding with 'surrogateescape' error handler, return bytes unchanged. + On Windows, use 'strict' error handler if the file system encoding is + 'mbcs' (which is the default encoding). + """ + filename = fspath(filename) # Does type-checking of `filename`. + if isinstance(filename, str): + return filename.encode(encoding, errors) + else: + return filename + + def fsdecode(filename): + """Decode filename (an os.PathLike, bytes, or str) from the filesystem + encoding with 'surrogateescape' error handler, return str unchanged. On + Windows, use 'strict' error handler if the file system encoding is + 'mbcs' (which is the default encoding). + """ + filename = fspath(filename) # Does type-checking of `filename`. + if isinstance(filename, bytes): + return filename.decode(encoding, errors) + else: + return filename + + return fsencode, fsdecode + +fsencode, fsdecode = _fscodec() +del _fscodec + +# Supply spawn*() (probably only for Unix) +if _exists("fork") and not _exists("spawnv") and _exists("execv"): + + P_WAIT = 0 + P_NOWAIT = P_NOWAITO = 1 + + __all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"]) + + # XXX Should we support P_DETACH? I suppose it could fork()**2 + # and close the std I/O streams. Also, P_OVERLAY is the same + # as execv*()? + + def _spawnvef(mode, file, args, env, func): + # Internal helper; func is the exec*() function to use + if not isinstance(args, (tuple, list)): + raise TypeError('argv must be a tuple or a list') + if not args or not args[0]: + raise ValueError('argv first element cannot be empty') + pid = fork() + if not pid: + # Child + try: + if env is None: + func(file, args) + else: + func(file, args, env) + except: + _exit(127) + else: + # Parent + if mode == P_NOWAIT: + return pid # Caller is responsible for waiting! + while 1: + wpid, sts = waitpid(pid, 0) + if WIFSTOPPED(sts): + continue + + return waitstatus_to_exitcode(sts) + + def spawnv(mode, file, args): + """spawnv(mode, file, args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, None, execv) + + def spawnve(mode, file, args, env): + """spawnve(mode, file, args, env) -> integer + +Execute file with arguments from args in a subprocess with the +specified environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, env, execve) + + # Note: spawnvp[e] isn't currently supported on Windows + + def spawnvp(mode, file, args): + """spawnvp(mode, file, args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, None, execvp) + + def spawnvpe(mode, file, args, env): + """spawnvpe(mode, file, args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return _spawnvef(mode, file, args, env, execvpe) + + + __all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"]) + + +if _exists("spawnv"): + # These aren't supplied by the basic Windows code + # but can be easily implemented in Python + + def spawnl(mode, file, *args): + """spawnl(mode, file, *args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return spawnv(mode, file, args) + + def spawnle(mode, file, *args): + """spawnle(mode, file, *args, env) -> integer + +Execute file with arguments from args in a subprocess with the +supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + env = args[-1] + return spawnve(mode, file, args[:-1], env) + + + __all__.extend(["spawnl", "spawnle"]) + + +if _exists("spawnvp"): + # At the moment, Windows doesn't implement spawnvp[e], + # so it won't have spawnlp[e] either. + def spawnlp(mode, file, *args): + """spawnlp(mode, file, *args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + return spawnvp(mode, file, args) + + def spawnlpe(mode, file, *args): + """spawnlpe(mode, file, *args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. """ + env = args[-1] + return spawnvpe(mode, file, args[:-1], env) + + + __all__.extend(["spawnlp", "spawnlpe"]) + +# VxWorks has no user space shell provided. As a result, running +# command in a shell can't be supported. +if sys.platform != 'vxworks': + # Supply os.popen() + def popen(cmd, mode="r", buffering=-1): + if not isinstance(cmd, str): + raise TypeError("invalid cmd type (%s, expected string)" % type(cmd)) + if mode not in ("r", "w"): + raise ValueError("invalid mode %r" % mode) + if buffering == 0 or buffering is None: + raise ValueError("popen() does not support unbuffered streams") + import subprocess + if mode == "r": + proc = subprocess.Popen(cmd, + shell=True, text=True, + stdout=subprocess.PIPE, + bufsize=buffering) + return _wrap_close(proc.stdout, proc) + else: + proc = subprocess.Popen(cmd, + shell=True, text=True, + stdin=subprocess.PIPE, + bufsize=buffering) + return _wrap_close(proc.stdin, proc) + + # Helper for popen() -- a proxy for a file whose close waits for the process + class _wrap_close: + def __init__(self, stream, proc): + self._stream = stream + self._proc = proc + def close(self): + self._stream.close() + returncode = self._proc.wait() + if returncode == 0: + return None + if name == 'nt': + return returncode + else: + return returncode << 8 # Shift left to match old behavior + def __enter__(self): + return self + def __exit__(self, *args): + self.close() + def __getattr__(self, name): + return getattr(self._stream, name) + def __iter__(self): + return iter(self._stream) + + __all__.append("popen") + +# Supply os.fdopen() +def fdopen(fd, mode="r", buffering=-1, encoding=None, *args, **kwargs): + if not isinstance(fd, int): + raise TypeError("invalid fd type (%s, expected integer)" % type(fd)) + import io + if "b" not in mode: + encoding = io.text_encoding(encoding) + return io.open(fd, mode, buffering, encoding, *args, **kwargs) + + +# For testing purposes, make sure the function is available when the C +# implementation exists. +def _fspath(path): + """Return the path representation of a path-like object. + + If str or bytes is passed in, it is returned unchanged. Otherwise the + os.PathLike interface is used to get the path representation. If the + path representation is not str or bytes, TypeError is raised. If the + provided path is not str, bytes, or os.PathLike, TypeError is raised. + """ + if isinstance(path, (str, bytes)): + return path + + # Work from the object's type to match method resolution of other magic + # methods. + path_type = type(path) + try: + path_repr = path_type.__fspath__(path) + except AttributeError: + if hasattr(path_type, '__fspath__'): + raise + else: + raise TypeError("expected str, bytes or os.PathLike object, " + "not " + path_type.__name__) + except TypeError: + if path_type.__fspath__ is None: + raise TypeError("expected str, bytes or os.PathLike object, " + "not " + path_type.__name__) from None + else: + raise + if isinstance(path_repr, (str, bytes)): + return path_repr + else: + raise TypeError("expected {}.__fspath__() to return str or bytes, " + "not {}".format(path_type.__name__, + type(path_repr).__name__)) + +# If there is no C implementation, make the pure Python version the +# implementation as transparently as possible. +if not _exists('fspath'): + fspath = _fspath + fspath.__name__ = "fspath" + + +class PathLike(abc.ABC): + + """Abstract base class for implementing the file system path protocol.""" + + __slots__ = () + + @abc.abstractmethod + def __fspath__(self): + """Return the file system path representation of the object.""" + raise NotImplementedError + + @classmethod + def __subclasshook__(cls, subclass): + if cls is PathLike: + return _check_methods(subclass, '__fspath__') + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) + + +if name == 'nt': + class _AddedDllDirectory: + def __init__(self, path, cookie, remove_dll_directory): + self.path = path + self._cookie = cookie + self._remove_dll_directory = remove_dll_directory + def close(self): + self._remove_dll_directory(self._cookie) + self.path = None + def __enter__(self): + return self + def __exit__(self, *args): + self.close() + def __repr__(self): + if self.path: + return "".format(self.path) + return "" + + def add_dll_directory(path): + """Add a path to the DLL search path. + + This search path is used when resolving dependencies for imported + extension modules (the module itself is resolved through sys.path), + and also by ctypes. + + Remove the directory by calling close() on the returned object or + using it in a with statement. + """ + import nt + cookie = nt._add_dll_directory(path) + return _AddedDllDirectory( + path, + cookie, + nt._remove_dll_directory + ) + + +if _exists('sched_getaffinity') and sys._get_cpu_count_config() < 0: + def process_cpu_count(): + """ + Get the number of CPUs of the current process. + + Return the number of logical CPUs usable by the calling thread of the + current process. Return None if indeterminable. + """ + return len(sched_getaffinity(0)) +else: + # Just an alias to cpu_count() (same docstring) + process_cpu_count = cpu_count diff --git a/Python314_4_x86_Template/Lib/pathlib/__init__.py b/Python314_4_x86_Template/Lib/pathlib/__init__.py new file mode 100644 index 00000000..0d763d1f --- /dev/null +++ b/Python314_4_x86_Template/Lib/pathlib/__init__.py @@ -0,0 +1,1307 @@ +"""Object-oriented filesystem paths. + +This module provides classes to represent abstract paths and concrete +paths with operations that have semantics appropriate for different +operating systems. +""" + +import io +import ntpath +import operator +import os +import posixpath +import sys +from errno import * +from glob import _StringGlobber, _no_recurse_symlinks +from itertools import chain +from stat import S_ISDIR, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO +from _collections_abc import Sequence + +try: + import pwd +except ImportError: + pwd = None +try: + import grp +except ImportError: + grp = None + +from pathlib._os import ( + PathInfo, DirEntryInfo, + ensure_different_files, ensure_distinct_paths, + copyfile2, copyfileobj, magic_open, copy_info, +) + + +__all__ = [ + "UnsupportedOperation", + "PurePath", "PurePosixPath", "PureWindowsPath", + "Path", "PosixPath", "WindowsPath", + ] + + +class UnsupportedOperation(NotImplementedError): + """An exception that is raised when an unsupported operation is attempted. + """ + pass + + +class _PathParents(Sequence): + """This object provides sequence-like access to the logical ancestors + of a path. Don't try to construct it yourself.""" + __slots__ = ('_path', '_drv', '_root', '_tail') + + def __init__(self, path): + self._path = path + self._drv = path.drive + self._root = path.root + self._tail = path._tail + + def __len__(self): + return len(self._tail) + + def __getitem__(self, idx): + if isinstance(idx, slice): + return tuple(self[i] for i in range(*idx.indices(len(self)))) + + if idx >= len(self) or idx < -len(self): + raise IndexError(idx) + if idx < 0: + idx += len(self) + return self._path._from_parsed_parts(self._drv, self._root, + self._tail[:-idx - 1]) + + def __repr__(self): + return "<{}.parents>".format(type(self._path).__name__) + + +class PurePath: + """Base class for manipulating paths without I/O. + + PurePath represents a filesystem path and offers operations which + don't imply any actual filesystem I/O. Depending on your system, + instantiating a PurePath will return either a PurePosixPath or a + PureWindowsPath object. You can also instantiate either of these classes + directly, regardless of your system. + """ + + __slots__ = ( + # The `_raw_paths` slot stores unjoined string paths. This is set in + # the `__init__()` method. + '_raw_paths', + + # The `_drv`, `_root` and `_tail_cached` slots store parsed and + # normalized parts of the path. They are set when any of the `drive`, + # `root` or `_tail` properties are accessed for the first time. The + # three-part division corresponds to the result of + # `os.path.splitroot()`, except that the tail is further split on path + # separators (i.e. it is a list of strings), and that the root and + # tail are normalized. + '_drv', '_root', '_tail_cached', + + # The `_str` slot stores the string representation of the path, + # computed from the drive, root and tail when `__str__()` is called + # for the first time. It's used to implement `_str_normcase` + '_str', + + # The `_str_normcase_cached` slot stores the string path with + # normalized case. It is set when the `_str_normcase` property is + # accessed for the first time. It's used to implement `__eq__()` + # `__hash__()`, and `_parts_normcase` + '_str_normcase_cached', + + # The `_parts_normcase_cached` slot stores the case-normalized + # string path after splitting on path separators. It's set when the + # `_parts_normcase` property is accessed for the first time. It's used + # to implement comparison methods like `__lt__()`. + '_parts_normcase_cached', + + # The `_hash` slot stores the hash of the case-normalized string + # path. It's set when `__hash__()` is called for the first time. + '_hash', + ) + parser = os.path + + def __new__(cls, *args, **kwargs): + """Construct a PurePath from one or several strings and or existing + PurePath objects. The strings and path objects are combined so as + to yield a canonicalized path, which is incorporated into the + new PurePath object. + """ + if cls is PurePath: + cls = PureWindowsPath if os.name == 'nt' else PurePosixPath + return object.__new__(cls) + + def __init__(self, *args): + paths = [] + for arg in args: + if isinstance(arg, PurePath): + if arg.parser is not self.parser: + # GH-103631: Convert separators for backwards compatibility. + paths.append(arg.as_posix()) + else: + paths.extend(arg._raw_paths) + else: + try: + path = os.fspath(arg) + except TypeError: + path = arg + if not isinstance(path, str): + raise TypeError( + "argument should be a str or an os.PathLike " + "object where __fspath__ returns a str, " + f"not {type(path).__name__!r}") + paths.append(path) + self._raw_paths = paths + + def with_segments(self, *pathsegments): + """Construct a new path object from any number of path-like objects. + Subclasses may override this method to customize how new path objects + are created from methods like `iterdir()`. + """ + return type(self)(*pathsegments) + + def joinpath(self, *pathsegments): + """Combine this path with one or several arguments, and return a + new path representing either a subpath (if all arguments are relative + paths) or a totally different path (if one of the arguments is + anchored). + """ + return self.with_segments(self, *pathsegments) + + def __truediv__(self, key): + try: + return self.with_segments(self, key) + except TypeError: + return NotImplemented + + def __rtruediv__(self, key): + try: + return self.with_segments(key, self) + except TypeError: + return NotImplemented + + def __reduce__(self): + return self.__class__, tuple(self._raw_paths) + + def __repr__(self): + return "{}({!r})".format(self.__class__.__name__, self.as_posix()) + + def __fspath__(self): + return str(self) + + def __bytes__(self): + """Return the bytes representation of the path. This is only + recommended to use under Unix.""" + return os.fsencode(self) + + @property + def _str_normcase(self): + # String with normalized case, for hashing and equality checks + try: + return self._str_normcase_cached + except AttributeError: + if self.parser is posixpath: + self._str_normcase_cached = str(self) + else: + self._str_normcase_cached = str(self).lower() + return self._str_normcase_cached + + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self._str_normcase) + return self._hash + + def __eq__(self, other): + if not isinstance(other, PurePath): + return NotImplemented + return self._str_normcase == other._str_normcase and self.parser is other.parser + + @property + def _parts_normcase(self): + # Cached parts with normalized case, for comparisons. + try: + return self._parts_normcase_cached + except AttributeError: + self._parts_normcase_cached = self._str_normcase.split(self.parser.sep) + return self._parts_normcase_cached + + def __lt__(self, other): + if not isinstance(other, PurePath) or self.parser is not other.parser: + return NotImplemented + return self._parts_normcase < other._parts_normcase + + def __le__(self, other): + if not isinstance(other, PurePath) or self.parser is not other.parser: + return NotImplemented + return self._parts_normcase <= other._parts_normcase + + def __gt__(self, other): + if not isinstance(other, PurePath) or self.parser is not other.parser: + return NotImplemented + return self._parts_normcase > other._parts_normcase + + def __ge__(self, other): + if not isinstance(other, PurePath) or self.parser is not other.parser: + return NotImplemented + return self._parts_normcase >= other._parts_normcase + + def __str__(self): + """Return the string representation of the path, suitable for + passing to system calls.""" + try: + return self._str + except AttributeError: + self._str = self._format_parsed_parts(self.drive, self.root, + self._tail) or '.' + return self._str + + @classmethod + def _format_parsed_parts(cls, drv, root, tail): + if drv or root: + return drv + root + cls.parser.sep.join(tail) + elif tail and cls.parser.splitdrive(tail[0])[0]: + tail = ['.'] + tail + return cls.parser.sep.join(tail) + + def _from_parsed_parts(self, drv, root, tail): + path = self._from_parsed_string(self._format_parsed_parts(drv, root, tail)) + path._drv = drv + path._root = root + path._tail_cached = tail + return path + + def _from_parsed_string(self, path_str): + path = self.with_segments(path_str) + path._str = path_str or '.' + return path + + @classmethod + def _parse_path(cls, path): + if not path: + return '', '', [] + sep = cls.parser.sep + altsep = cls.parser.altsep + if altsep: + path = path.replace(altsep, sep) + drv, root, rel = cls.parser.splitroot(path) + if not root and drv.startswith(sep) and not drv.endswith(sep): + drv_parts = drv.split(sep) + if len(drv_parts) == 4 and drv_parts[2] not in '?.': + # e.g. //server/share + root = sep + elif len(drv_parts) == 6: + # e.g. //?/unc/server/share + root = sep + return drv, root, [x for x in rel.split(sep) if x and x != '.'] + + @classmethod + def _parse_pattern(cls, pattern): + """Parse a glob pattern to a list of parts. This is much like + _parse_path, except: + + - Rather than normalizing and returning the drive and root, we raise + NotImplementedError if either are present. + - If the path has no real parts, we raise ValueError. + - If the path ends in a slash, then a final empty part is added. + """ + drv, root, rel = cls.parser.splitroot(pattern) + if root or drv: + raise NotImplementedError("Non-relative patterns are unsupported") + sep = cls.parser.sep + altsep = cls.parser.altsep + if altsep: + rel = rel.replace(altsep, sep) + parts = [x for x in rel.split(sep) if x and x != '.'] + if not parts: + raise ValueError(f"Unacceptable pattern: {str(pattern)!r}") + elif rel.endswith(sep): + # GH-65238: preserve trailing slash in glob patterns. + parts.append('') + return parts + + def as_posix(self): + """Return the string representation of the path with forward (/) + slashes.""" + return str(self).replace(self.parser.sep, '/') + + @property + def _raw_path(self): + paths = self._raw_paths + if len(paths) == 1: + return paths[0] + elif paths: + # Join path segments from the initializer. + return self.parser.join(*paths) + else: + return '' + + @property + def drive(self): + """The drive prefix (letter or UNC path), if any.""" + try: + return self._drv + except AttributeError: + self._drv, self._root, self._tail_cached = self._parse_path(self._raw_path) + return self._drv + + @property + def root(self): + """The root of the path, if any.""" + try: + return self._root + except AttributeError: + self._drv, self._root, self._tail_cached = self._parse_path(self._raw_path) + return self._root + + @property + def _tail(self): + try: + return self._tail_cached + except AttributeError: + self._drv, self._root, self._tail_cached = self._parse_path(self._raw_path) + return self._tail_cached + + @property + def anchor(self): + """The concatenation of the drive and root, or ''.""" + return self.drive + self.root + + @property + def parts(self): + """An object providing sequence-like access to the + components in the filesystem path.""" + if self.drive or self.root: + return (self.drive + self.root,) + tuple(self._tail) + else: + return tuple(self._tail) + + @property + def parent(self): + """The logical parent of the path.""" + drv = self.drive + root = self.root + tail = self._tail + if not tail: + return self + return self._from_parsed_parts(drv, root, tail[:-1]) + + @property + def parents(self): + """A sequence of this path's logical parents.""" + # The value of this property should not be cached on the path object, + # as doing so would introduce a reference cycle. + return _PathParents(self) + + @property + def name(self): + """The final path component, if any.""" + tail = self._tail + if not tail: + return '' + return tail[-1] + + def with_name(self, name): + """Return a new path with the file name changed.""" + p = self.parser + if not name or p.sep in name or (p.altsep and p.altsep in name) or name == '.': + raise ValueError(f"Invalid name {name!r}") + tail = self._tail.copy() + if not tail: + raise ValueError(f"{self!r} has an empty name") + tail[-1] = name + return self._from_parsed_parts(self.drive, self.root, tail) + + def with_stem(self, stem): + """Return a new path with the stem changed.""" + suffix = self.suffix + if not suffix: + return self.with_name(stem) + elif not stem: + # If the suffix is non-empty, we can't make the stem empty. + raise ValueError(f"{self!r} has a non-empty suffix") + else: + return self.with_name(stem + suffix) + + def with_suffix(self, suffix): + """Return a new path with the file suffix changed. If the path + has no suffix, add given suffix. If the given suffix is an empty + string, remove the suffix from the path. + """ + stem = self.stem + if not stem: + # If the stem is empty, we can't make the suffix non-empty. + raise ValueError(f"{self!r} has an empty name") + elif suffix and not suffix.startswith('.'): + raise ValueError(f"Invalid suffix {suffix!r}") + else: + return self.with_name(stem + suffix) + + @property + def stem(self): + """The final path component, minus its last suffix.""" + name = self.name + i = name.rfind('.') + if i != -1: + stem = name[:i] + # Stem must contain at least one non-dot character. + if stem.lstrip('.'): + return stem + return name + + @property + def suffix(self): + """ + The final component's last suffix, if any. + + This includes the leading period. For example: '.txt' + """ + name = self.name.lstrip('.') + i = name.rfind('.') + if i != -1: + return name[i:] + return '' + + @property + def suffixes(self): + """ + A list of the final component's suffixes, if any. + + These include the leading periods. For example: ['.tar', '.gz'] + """ + return ['.' + ext for ext in self.name.lstrip('.').split('.')[1:]] + + def relative_to(self, other, *, walk_up=False): + """Return the relative path to another path identified by the passed + arguments. If the operation is not possible (because this is not + related to the other path), raise ValueError. + + The *walk_up* parameter controls whether `..` may be used to resolve + the path. + """ + if not hasattr(other, 'with_segments'): + other = self.with_segments(other) + for step, path in enumerate(chain([other], other.parents)): + if path == self or path in self.parents: + break + elif not walk_up: + raise ValueError(f"{str(self)!r} is not in the subpath of {str(other)!r}") + elif path.name == '..': + raise ValueError(f"'..' segment in {str(other)!r} cannot be walked") + else: + raise ValueError(f"{str(self)!r} and {str(other)!r} have different anchors") + parts = ['..'] * step + self._tail[len(path._tail):] + return self._from_parsed_parts('', '', parts) + + def is_relative_to(self, other): + """Return True if the path is relative to another path or False. + """ + if not hasattr(other, 'with_segments'): + other = self.with_segments(other) + return other == self or other in self.parents + + def is_absolute(self): + """True if the path is absolute (has both a root and, if applicable, + a drive).""" + if self.parser is posixpath: + # Optimization: work with raw paths on POSIX. + for path in self._raw_paths: + if path.startswith('/'): + return True + return False + return self.parser.isabs(self) + + def is_reserved(self): + """Return True if the path contains one of the special names reserved + by the system, if any.""" + import warnings + msg = ("pathlib.PurePath.is_reserved() is deprecated and scheduled " + "for removal in Python 3.15. Use os.path.isreserved() to " + "detect reserved paths on Windows.") + warnings._deprecated("pathlib.PurePath.is_reserved", msg, remove=(3, 15)) + if self.parser is ntpath: + return self.parser.isreserved(self) + return False + + def as_uri(self): + """Return the path as a URI.""" + import warnings + msg = ("pathlib.PurePath.as_uri() is deprecated and scheduled " + "for removal in Python 3.19. Use pathlib.Path.as_uri().") + warnings._deprecated("pathlib.PurePath.as_uri", msg, remove=(3, 19)) + if not self.is_absolute(): + raise ValueError("relative path can't be expressed as a file URI") + + drive = self.drive + if len(drive) == 2 and drive[1] == ':': + # It's a path on a local drive => 'file:///c:/a/b' + prefix = 'file:///' + drive + path = self.as_posix()[2:] + elif drive: + # It's a path on a network drive => 'file://host/share/a/b' + prefix = 'file:' + path = self.as_posix() + else: + # It's a posix path => 'file:///etc/hosts' + prefix = 'file://' + path = str(self) + from urllib.parse import quote_from_bytes + return prefix + quote_from_bytes(os.fsencode(path)) + + def full_match(self, pattern, *, case_sensitive=None): + """ + Return True if this path matches the given glob-style pattern. The + pattern is matched against the entire path. + """ + if not hasattr(pattern, 'with_segments'): + pattern = self.with_segments(pattern) + if case_sensitive is None: + case_sensitive = self.parser is posixpath + + # The string representation of an empty path is a single dot ('.'). Empty + # paths shouldn't match wildcards, so we change it to the empty string. + path = str(self) if self.parts else '' + pattern = str(pattern) if pattern.parts else '' + globber = _StringGlobber(self.parser.sep, case_sensitive, recursive=True) + return globber.compile(pattern)(path) is not None + + def match(self, path_pattern, *, case_sensitive=None): + """ + Return True if this path matches the given pattern. If the pattern is + relative, matching is done from the right; otherwise, the entire path + is matched. The recursive wildcard '**' is *not* supported by this + method. + """ + if not hasattr(path_pattern, 'with_segments'): + path_pattern = self.with_segments(path_pattern) + if case_sensitive is None: + case_sensitive = self.parser is posixpath + path_parts = self.parts[::-1] + pattern_parts = path_pattern.parts[::-1] + if not pattern_parts: + raise ValueError("empty pattern") + if len(path_parts) < len(pattern_parts): + return False + if len(path_parts) > len(pattern_parts) and path_pattern.anchor: + return False + globber = _StringGlobber(self.parser.sep, case_sensitive) + for path_part, pattern_part in zip(path_parts, pattern_parts): + match = globber.compile(pattern_part) + if match(path_part) is None: + return False + return True + +# Subclassing os.PathLike makes isinstance() checks slower, +# which in turn makes Path construction slower. Register instead! +os.PathLike.register(PurePath) + + +class PurePosixPath(PurePath): + """PurePath subclass for non-Windows systems. + + On a POSIX system, instantiating a PurePath should return this object. + However, you can also instantiate it directly on any system. + """ + parser = posixpath + __slots__ = () + + +class PureWindowsPath(PurePath): + """PurePath subclass for Windows systems. + + On a Windows system, instantiating a PurePath should return this object. + However, you can also instantiate it directly on any system. + """ + parser = ntpath + __slots__ = () + + +class Path(PurePath): + """PurePath subclass that can make system calls. + + Path represents a filesystem path but unlike PurePath, also offers + methods to do system calls on path objects. Depending on your system, + instantiating a Path will return either a PosixPath or a WindowsPath + object. You can also instantiate a PosixPath or WindowsPath directly, + but cannot instantiate a WindowsPath on a POSIX system or vice versa. + """ + __slots__ = ('_info',) + + def __new__(cls, *args, **kwargs): + if cls is Path: + cls = WindowsPath if os.name == 'nt' else PosixPath + return object.__new__(cls) + + @property + def info(self): + """ + A PathInfo object that exposes the file type and other file attributes + of this path. + """ + try: + return self._info + except AttributeError: + self._info = PathInfo(self) + return self._info + + def stat(self, *, follow_symlinks=True): + """ + Return the result of the stat() system call on this path, like + os.stat() does. + """ + return os.stat(self, follow_symlinks=follow_symlinks) + + def lstat(self): + """ + Like stat(), except if the path points to a symlink, the symlink's + status information is returned, rather than its target's. + """ + return os.lstat(self) + + def exists(self, *, follow_symlinks=True): + """ + Whether this path exists. + + This method normally follows symlinks; to check whether a symlink exists, + add the argument follow_symlinks=False. + """ + if follow_symlinks: + return os.path.exists(self) + return os.path.lexists(self) + + def is_dir(self, *, follow_symlinks=True): + """ + Whether this path is a directory. + """ + if follow_symlinks: + return os.path.isdir(self) + try: + return S_ISDIR(self.stat(follow_symlinks=follow_symlinks).st_mode) + except (OSError, ValueError): + return False + + def is_file(self, *, follow_symlinks=True): + """ + Whether this path is a regular file (also True for symlinks pointing + to regular files). + """ + if follow_symlinks: + return os.path.isfile(self) + try: + return S_ISREG(self.stat(follow_symlinks=follow_symlinks).st_mode) + except (OSError, ValueError): + return False + + def is_mount(self): + """ + Check if this path is a mount point + """ + return os.path.ismount(self) + + def is_symlink(self): + """ + Whether this path is a symbolic link. + """ + return os.path.islink(self) + + def is_junction(self): + """ + Whether this path is a junction. + """ + return os.path.isjunction(self) + + def is_block_device(self): + """ + Whether this path is a block device. + """ + try: + return S_ISBLK(self.stat().st_mode) + except (OSError, ValueError): + return False + + def is_char_device(self): + """ + Whether this path is a character device. + """ + try: + return S_ISCHR(self.stat().st_mode) + except (OSError, ValueError): + return False + + def is_fifo(self): + """ + Whether this path is a FIFO. + """ + try: + return S_ISFIFO(self.stat().st_mode) + except (OSError, ValueError): + return False + + def is_socket(self): + """ + Whether this path is a socket. + """ + try: + return S_ISSOCK(self.stat().st_mode) + except (OSError, ValueError): + return False + + def samefile(self, other_path): + """Return whether other_path is the same or not as this file + (as returned by os.path.samefile()). + """ + st = self.stat() + try: + other_st = other_path.stat() + except AttributeError: + other_st = self.with_segments(other_path).stat() + return (st.st_ino == other_st.st_ino and + st.st_dev == other_st.st_dev) + + def open(self, mode='r', buffering=-1, encoding=None, + errors=None, newline=None): + """ + Open the file pointed to by this path and return a file object, as + the built-in open() function does. + """ + if "b" not in mode: + encoding = io.text_encoding(encoding) + return io.open(self, mode, buffering, encoding, errors, newline) + + def read_bytes(self): + """ + Open the file in bytes mode, read it, and close the file. + """ + with self.open(mode='rb', buffering=0) as f: + return f.read() + + def read_text(self, encoding=None, errors=None, newline=None): + """ + Open the file in text mode, read it, and close the file. + """ + # Call io.text_encoding() here to ensure any warning is raised at an + # appropriate stack level. + encoding = io.text_encoding(encoding) + with self.open(mode='r', encoding=encoding, errors=errors, newline=newline) as f: + return f.read() + + def write_bytes(self, data): + """ + Open the file in bytes mode, write to it, and close the file. + """ + # type-check for the buffer interface before truncating the file + view = memoryview(data) + with self.open(mode='wb') as f: + return f.write(view) + + def write_text(self, data, encoding=None, errors=None, newline=None): + """ + Open the file in text mode, write to it, and close the file. + """ + # Call io.text_encoding() here to ensure any warning is raised at an + # appropriate stack level. + encoding = io.text_encoding(encoding) + if not isinstance(data, str): + raise TypeError('data must be str, not %s' % + data.__class__.__name__) + with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f: + return f.write(data) + + _remove_leading_dot = operator.itemgetter(slice(2, None)) + _remove_trailing_slash = operator.itemgetter(slice(-1)) + + def _filter_trailing_slash(self, paths): + sep = self.parser.sep + anchor_len = len(self.anchor) + for path_str in paths: + if len(path_str) > anchor_len and path_str[-1] == sep: + path_str = path_str[:-1] + yield path_str + + def _from_dir_entry(self, dir_entry, path_str): + path = self.with_segments(path_str) + path._str = path_str + path._info = DirEntryInfo(dir_entry) + return path + + def iterdir(self): + """Yield path objects of the directory contents. + + The children are yielded in arbitrary order, and the + special entries '.' and '..' are not included. + """ + root_dir = str(self) + with os.scandir(root_dir) as scandir_it: + entries = list(scandir_it) + if root_dir == '.': + return (self._from_dir_entry(e, e.name) for e in entries) + else: + return (self._from_dir_entry(e, e.path) for e in entries) + + def glob(self, pattern, *, case_sensitive=None, recurse_symlinks=False): + """Iterate over this subtree and yield all existing files (of any + kind, including directories) matching the given relative pattern. + """ + sys.audit("pathlib.Path.glob", self, pattern) + if case_sensitive is None: + case_sensitive = self.parser is posixpath + case_pedantic = False + else: + # The user has expressed a case sensitivity choice, but we don't + # know the case sensitivity of the underlying filesystem, so we + # must use scandir() for everything, including non-wildcard parts. + case_pedantic = True + parts = self._parse_pattern(pattern) + recursive = True if recurse_symlinks else _no_recurse_symlinks + globber = _StringGlobber(self.parser.sep, case_sensitive, case_pedantic, recursive) + select = globber.selector(parts[::-1]) + root = str(self) + paths = select(self.parser.join(root, '')) + + # Normalize results + if root == '.': + paths = map(self._remove_leading_dot, paths) + if parts[-1] == '': + paths = map(self._remove_trailing_slash, paths) + elif parts[-1] == '**': + paths = self._filter_trailing_slash(paths) + paths = map(self._from_parsed_string, paths) + return paths + + def rglob(self, pattern, *, case_sensitive=None, recurse_symlinks=False): + """Recursively yield all existing files (of any kind, including + directories) matching the given relative pattern, anywhere in + this subtree. + """ + sys.audit("pathlib.Path.rglob", self, pattern) + pattern = self.parser.join('**', pattern) + return self.glob(pattern, case_sensitive=case_sensitive, recurse_symlinks=recurse_symlinks) + + def walk(self, top_down=True, on_error=None, follow_symlinks=False): + """Walk the directory tree from this directory, similar to os.walk().""" + sys.audit("pathlib.Path.walk", self, on_error, follow_symlinks) + root_dir = str(self) + if not follow_symlinks: + follow_symlinks = os._walk_symlinks_as_files + results = os.walk(root_dir, top_down, on_error, follow_symlinks) + for path_str, dirnames, filenames in results: + if root_dir == '.': + path_str = path_str[2:] + yield self._from_parsed_string(path_str), dirnames, filenames + + def absolute(self): + """Return an absolute version of this path + No normalization or symlink resolution is performed. + + Use resolve() to resolve symlinks and remove '..' segments. + """ + if self.is_absolute(): + return self + if self.root: + drive = os.path.splitroot(os.getcwd())[0] + return self._from_parsed_parts(drive, self.root, self._tail) + if self.drive: + # There is a CWD on each drive-letter drive. + cwd = os.path.abspath(self.drive) + else: + cwd = os.getcwd() + if not self._tail: + # Fast path for "empty" paths, e.g. Path("."), Path("") or Path(). + # We pass only one argument to with_segments() to avoid the cost + # of joining, and we exploit the fact that getcwd() returns a + # fully-normalized string by storing it in _str. This is used to + # implement Path.cwd(). + return self._from_parsed_string(cwd) + drive, root, rel = os.path.splitroot(cwd) + if not rel: + return self._from_parsed_parts(drive, root, self._tail) + tail = rel.split(self.parser.sep) + tail.extend(self._tail) + return self._from_parsed_parts(drive, root, tail) + + @classmethod + def cwd(cls): + """Return a new path pointing to the current working directory.""" + cwd = os.getcwd() + path = cls(cwd) + path._str = cwd # getcwd() returns a normalized path + return path + + def resolve(self, strict=False): + """ + Make the path absolute, resolving all symlinks on the way and also + normalizing it. + """ + + return self.with_segments(os.path.realpath(self, strict=strict)) + + if pwd: + def owner(self, *, follow_symlinks=True): + """ + Return the login name of the file owner. + """ + uid = self.stat(follow_symlinks=follow_symlinks).st_uid + return pwd.getpwuid(uid).pw_name + else: + def owner(self, *, follow_symlinks=True): + """ + Return the login name of the file owner. + """ + f = f"{type(self).__name__}.owner()" + raise UnsupportedOperation(f"{f} is unsupported on this system") + + if grp: + def group(self, *, follow_symlinks=True): + """ + Return the group name of the file gid. + """ + gid = self.stat(follow_symlinks=follow_symlinks).st_gid + return grp.getgrgid(gid).gr_name + else: + def group(self, *, follow_symlinks=True): + """ + Return the group name of the file gid. + """ + f = f"{type(self).__name__}.group()" + raise UnsupportedOperation(f"{f} is unsupported on this system") + + if hasattr(os, "readlink"): + def readlink(self): + """ + Return the path to which the symbolic link points. + """ + return self.with_segments(os.readlink(self)) + else: + def readlink(self): + """ + Return the path to which the symbolic link points. + """ + f = f"{type(self).__name__}.readlink()" + raise UnsupportedOperation(f"{f} is unsupported on this system") + + def touch(self, mode=0o666, exist_ok=True): + """ + Create this file with the given access mode, if it doesn't exist. + """ + + if exist_ok: + # First try to bump modification time + # Implementation note: GNU touch uses the UTIME_NOW option of + # the utimensat() / futimens() functions. + try: + os.utime(self, None) + except OSError: + # Avoid exception chaining + pass + else: + return + flags = os.O_CREAT | os.O_WRONLY + if not exist_ok: + flags |= os.O_EXCL + fd = os.open(self, flags, mode) + os.close(fd) + + def mkdir(self, mode=0o777, parents=False, exist_ok=False): + """ + Create a new directory at this given path. + """ + try: + os.mkdir(self, mode) + except FileNotFoundError: + if not parents or self.parent == self: + raise + self.parent.mkdir(parents=True, exist_ok=True) + self.mkdir(mode, parents=False, exist_ok=exist_ok) + except OSError: + # Cannot rely on checking for EEXIST, since the operating system + # could give priority to other errors like EACCES or EROFS + if not exist_ok or not self.is_dir(): + raise + + def chmod(self, mode, *, follow_symlinks=True): + """ + Change the permissions of the path, like os.chmod(). + """ + os.chmod(self, mode, follow_symlinks=follow_symlinks) + + def lchmod(self, mode): + """ + Like chmod(), except if the path points to a symlink, the symlink's + permissions are changed, rather than its target's. + """ + self.chmod(mode, follow_symlinks=False) + + def unlink(self, missing_ok=False): + """ + Remove this file or link. + If the path is a directory, use rmdir() instead. + """ + try: + os.unlink(self) + except FileNotFoundError: + if not missing_ok: + raise + + def rmdir(self): + """ + Remove this directory. The directory must be empty. + """ + os.rmdir(self) + + def _delete(self): + """ + Delete this file or directory (including all sub-directories). + """ + if self.is_symlink() or self.is_junction(): + self.unlink() + elif self.is_dir(): + # Lazy import to improve module import time + import shutil + shutil.rmtree(self) + else: + self.unlink() + + def rename(self, target): + """ + Rename this path to the target path. + + The target path may be absolute or relative. Relative paths are + interpreted relative to the current working directory, *not* the + directory of the Path object. + + Returns the new Path instance pointing to the target path. + """ + os.rename(self, target) + if not hasattr(target, 'with_segments'): + target = self.with_segments(target) + return target + + def replace(self, target): + """ + Rename this path to the target path, overwriting if that path exists. + + The target path may be absolute or relative. Relative paths are + interpreted relative to the current working directory, *not* the + directory of the Path object. + + Returns the new Path instance pointing to the target path. + """ + os.replace(self, target) + if not hasattr(target, 'with_segments'): + target = self.with_segments(target) + return target + + def copy(self, target, **kwargs): + """ + Recursively copy this file or directory tree to the given destination. + """ + if not hasattr(target, 'with_segments'): + target = self.with_segments(target) + ensure_distinct_paths(self, target) + target._copy_from(self, **kwargs) + return target.joinpath() # Empty join to ensure fresh metadata. + + def copy_into(self, target_dir, **kwargs): + """ + Copy this file or directory tree into the given existing directory. + """ + name = self.name + if not name: + raise ValueError(f"{self!r} has an empty name") + elif hasattr(target_dir, 'with_segments'): + target = target_dir / name + else: + target = self.with_segments(target_dir, name) + return self.copy(target, **kwargs) + + def _copy_from(self, source, follow_symlinks=True, preserve_metadata=False): + """ + Recursively copy the given path to this path. + """ + if not follow_symlinks and source.info.is_symlink(): + self._copy_from_symlink(source, preserve_metadata) + elif source.info.is_dir(): + children = source.iterdir() + os.mkdir(self) + for child in children: + self.joinpath(child.name)._copy_from( + child, follow_symlinks, preserve_metadata) + if preserve_metadata: + copy_info(source.info, self) + else: + self._copy_from_file(source, preserve_metadata) + + def _copy_from_file(self, source, preserve_metadata=False): + ensure_different_files(source, self) + with magic_open(source, 'rb') as source_f: + with open(self, 'wb') as target_f: + copyfileobj(source_f, target_f) + if preserve_metadata: + copy_info(source.info, self) + + if copyfile2: + # Use fast OS routine for local file copying where available. + _copy_from_file_fallback = _copy_from_file + def _copy_from_file(self, source, preserve_metadata=False): + try: + source = os.fspath(source) + except TypeError: + pass + else: + copyfile2(source, str(self)) + return + self._copy_from_file_fallback(source, preserve_metadata) + + if os.name == 'nt': + # If a directory-symlink is copied *before* its target, then + # os.symlink() incorrectly creates a file-symlink on Windows. Avoid + # this by passing *target_is_dir* to os.symlink() on Windows. + def _copy_from_symlink(self, source, preserve_metadata=False): + os.symlink(str(source.readlink()), self, source.info.is_dir()) + if preserve_metadata: + copy_info(source.info, self, follow_symlinks=False) + else: + def _copy_from_symlink(self, source, preserve_metadata=False): + os.symlink(str(source.readlink()), self) + if preserve_metadata: + copy_info(source.info, self, follow_symlinks=False) + + def move(self, target): + """ + Recursively move this file or directory tree to the given destination. + """ + # Use os.replace() if the target is os.PathLike and on the same FS. + try: + target = self.with_segments(target) + except TypeError: + pass + else: + ensure_different_files(self, target) + try: + os.replace(self, target) + except OSError as err: + if err.errno != EXDEV: + raise + else: + return target.joinpath() # Empty join to ensure fresh metadata. + # Fall back to copy+delete. + target = self.copy(target, follow_symlinks=False, preserve_metadata=True) + self._delete() + return target + + def move_into(self, target_dir): + """ + Move this file or directory tree into the given existing directory. + """ + name = self.name + if not name: + raise ValueError(f"{self!r} has an empty name") + elif hasattr(target_dir, 'with_segments'): + target = target_dir / name + else: + target = self.with_segments(target_dir, name) + return self.move(target) + + if hasattr(os, "symlink"): + def symlink_to(self, target, target_is_directory=False): + """ + Make this path a symlink pointing to the target path. + Note the order of arguments (link, target) is the reverse of os.symlink. + """ + os.symlink(target, self, target_is_directory) + else: + def symlink_to(self, target, target_is_directory=False): + """ + Make this path a symlink pointing to the target path. + Note the order of arguments (link, target) is the reverse of os.symlink. + """ + f = f"{type(self).__name__}.symlink_to()" + raise UnsupportedOperation(f"{f} is unsupported on this system") + + if hasattr(os, "link"): + def hardlink_to(self, target): + """ + Make this path a hard link pointing to the same file as *target*. + + Note the order of arguments (self, target) is the reverse of os.link's. + """ + os.link(target, self) + else: + def hardlink_to(self, target): + """ + Make this path a hard link pointing to the same file as *target*. + + Note the order of arguments (self, target) is the reverse of os.link's. + """ + f = f"{type(self).__name__}.hardlink_to()" + raise UnsupportedOperation(f"{f} is unsupported on this system") + + def expanduser(self): + """ Return a new path with expanded ~ and ~user constructs + (as returned by os.path.expanduser) + """ + if (not (self.drive or self.root) and + self._tail and self._tail[0][:1] == '~'): + homedir = os.path.expanduser(self._tail[0]) + if homedir[:1] == "~": + raise RuntimeError("Could not determine home directory.") + drv, root, tail = self._parse_path(homedir) + return self._from_parsed_parts(drv, root, tail + self._tail[1:]) + + return self + + @classmethod + def home(cls): + """Return a new path pointing to expanduser('~'). + """ + homedir = os.path.expanduser("~") + if homedir == "~": + raise RuntimeError("Could not determine home directory.") + return cls(homedir) + + def as_uri(self): + """Return the path as a URI.""" + if not self.is_absolute(): + raise ValueError("relative paths can't be expressed as file URIs") + from urllib.request import pathname2url + return pathname2url(str(self), add_scheme=True) + + @classmethod + def from_uri(cls, uri): + """Return a new path from the given 'file' URI.""" + from urllib.error import URLError + from urllib.request import url2pathname + try: + path = cls(url2pathname(uri, require_scheme=True)) + except URLError as exc: + raise ValueError(exc.reason) from None + if not path.is_absolute(): + raise ValueError(f"URI is not absolute: {uri!r}") + return path + + +class PosixPath(Path, PurePosixPath): + """Path subclass for non-Windows systems. + + On a POSIX system, instantiating a Path should return this object. + """ + __slots__ = () + + if os.name == 'nt': + def __new__(cls, *args, **kwargs): + raise UnsupportedOperation( + f"cannot instantiate {cls.__name__!r} on your system") + +class WindowsPath(Path, PureWindowsPath): + """Path subclass for Windows systems. + + On a Windows system, instantiating a Path should return this object. + """ + __slots__ = () + + if os.name != 'nt': + def __new__(cls, *args, **kwargs): + raise UnsupportedOperation( + f"cannot instantiate {cls.__name__!r} on your system") diff --git a/Python314_4_x86_Template/Lib/pathlib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/pathlib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..4a5dd0bf Binary files /dev/null and b/Python314_4_x86_Template/Lib/pathlib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/pathlib/__pycache__/_os.cpython-314.pyc b/Python314_4_x86_Template/Lib/pathlib/__pycache__/_os.cpython-314.pyc new file mode 100644 index 00000000..82e92e40 Binary files /dev/null and b/Python314_4_x86_Template/Lib/pathlib/__pycache__/_os.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/pathlib/_local.py b/Python314_4_x86_Template/Lib/pathlib/_local.py new file mode 100644 index 00000000..58e137f2 --- /dev/null +++ b/Python314_4_x86_Template/Lib/pathlib/_local.py @@ -0,0 +1,12 @@ +""" +This module exists so that pathlib objects pickled under Python 3.13 can be +unpickled in 3.14+. +""" + +from pathlib import * + +__all__ = [ + "UnsupportedOperation", + "PurePath", "PurePosixPath", "PureWindowsPath", + "Path", "PosixPath", "WindowsPath", +] diff --git a/Python314_4_x86_Template/Lib/pathlib/_os.py b/Python314_4_x86_Template/Lib/pathlib/_os.py new file mode 100644 index 00000000..03983694 --- /dev/null +++ b/Python314_4_x86_Template/Lib/pathlib/_os.py @@ -0,0 +1,530 @@ +""" +Low-level OS functionality wrappers used by pathlib. +""" + +from errno import * +from io import TextIOWrapper, text_encoding +from stat import S_ISDIR, S_ISREG, S_ISLNK, S_IMODE +import os +import sys +try: + import fcntl +except ImportError: + fcntl = None +try: + import posix +except ImportError: + posix = None +try: + import _winapi +except ImportError: + _winapi = None + + +def _get_copy_blocksize(infd): + """Determine blocksize for fastcopying on Linux. + Hopefully the whole file will be copied in a single call. + The copying itself should be performed in a loop 'till EOF is + reached (0 return) so a blocksize smaller or bigger than the actual + file size should not make any difference, also in case the file + content changes while being copied. + """ + try: + blocksize = max(os.fstat(infd).st_size, 2 ** 23) # min 8 MiB + except OSError: + blocksize = 2 ** 27 # 128 MiB + # On 32-bit architectures truncate to 1 GiB to avoid OverflowError, + # see gh-82500. + if sys.maxsize < 2 ** 32: + blocksize = min(blocksize, 2 ** 30) + return blocksize + + +if fcntl and hasattr(fcntl, 'FICLONE'): + def _ficlone(source_fd, target_fd): + """ + Perform a lightweight copy of two files, where the data blocks are + copied only when modified. This is known as Copy on Write (CoW), + instantaneous copy or reflink. + """ + fcntl.ioctl(target_fd, fcntl.FICLONE, source_fd) +else: + _ficlone = None + + +if posix and hasattr(posix, '_fcopyfile'): + def _fcopyfile(source_fd, target_fd): + """ + Copy a regular file content using high-performance fcopyfile(3) + syscall (macOS). + """ + posix._fcopyfile(source_fd, target_fd, posix._COPYFILE_DATA) +else: + _fcopyfile = None + + +if hasattr(os, 'copy_file_range'): + def _copy_file_range(source_fd, target_fd): + """ + Copy data from one regular mmap-like fd to another by using a + high-performance copy_file_range(2) syscall that gives filesystems + an opportunity to implement the use of reflinks or server-side + copy. + This should work on Linux >= 4.5 only. + """ + blocksize = _get_copy_blocksize(source_fd) + offset = 0 + while True: + sent = os.copy_file_range(source_fd, target_fd, blocksize, + offset_dst=offset) + if sent == 0: + break # EOF + offset += sent +else: + _copy_file_range = None + + +if hasattr(os, 'sendfile'): + def _sendfile(source_fd, target_fd): + """Copy data from one regular mmap-like fd to another by using + high-performance sendfile(2) syscall. + This should work on Linux >= 2.6.33 only. + """ + blocksize = _get_copy_blocksize(source_fd) + offset = 0 + while True: + sent = os.sendfile(target_fd, source_fd, offset, blocksize) + if sent == 0: + break # EOF + offset += sent +else: + _sendfile = None + + +if _winapi and hasattr(_winapi, 'CopyFile2'): + def copyfile2(source, target): + """ + Copy from one file to another using CopyFile2 (Windows only). + """ + _winapi.CopyFile2(source, target, 0) +else: + copyfile2 = None + + +def copyfileobj(source_f, target_f): + """ + Copy data from file-like object source_f to file-like object target_f. + """ + try: + source_fd = source_f.fileno() + target_fd = target_f.fileno() + except Exception: + pass # Fall through to generic code. + else: + try: + # Use OS copy-on-write where available. + if _ficlone: + try: + _ficlone(source_fd, target_fd) + return + except OSError as err: + if err.errno not in (EBADF, EOPNOTSUPP, ETXTBSY, EXDEV): + raise err + + # Use OS copy where available. + if _fcopyfile: + try: + _fcopyfile(source_fd, target_fd) + return + except OSError as err: + if err.errno not in (EINVAL, ENOTSUP): + raise err + if _copy_file_range: + try: + _copy_file_range(source_fd, target_fd) + return + except OSError as err: + if err.errno not in (ETXTBSY, EXDEV): + raise err + if _sendfile: + try: + _sendfile(source_fd, target_fd) + return + except OSError as err: + if err.errno != ENOTSOCK: + raise err + except OSError as err: + # Produce more useful error messages. + err.filename = source_f.name + err.filename2 = target_f.name + raise err + + # Last resort: copy with fileobj read() and write(). + read_source = source_f.read + write_target = target_f.write + while buf := read_source(1024 * 1024): + write_target(buf) + + +def magic_open(path, mode='r', buffering=-1, encoding=None, errors=None, + newline=None): + """ + Open the file pointed to by this path and return a file object, as + the built-in open() function does. + """ + text = 'b' not in mode + if text: + # Call io.text_encoding() here to ensure any warning is raised at an + # appropriate stack level. + encoding = text_encoding(encoding) + try: + return open(path, mode, buffering, encoding, errors, newline) + except TypeError: + pass + cls = type(path) + mode = ''.join(sorted(c for c in mode if c not in 'bt')) + if text: + try: + attr = getattr(cls, f'__open_{mode}__') + except AttributeError: + pass + else: + return attr(path, buffering, encoding, errors, newline) + elif encoding is not None: + raise ValueError("binary mode doesn't take an encoding argument") + elif errors is not None: + raise ValueError("binary mode doesn't take an errors argument") + elif newline is not None: + raise ValueError("binary mode doesn't take a newline argument") + + try: + attr = getattr(cls, f'__open_{mode}b__') + except AttributeError: + pass + else: + stream = attr(path, buffering) + if text: + stream = TextIOWrapper(stream, encoding, errors, newline) + return stream + + raise TypeError(f"{cls.__name__} can't be opened with mode {mode!r}") + + +def ensure_distinct_paths(source, target): + """ + Raise OSError(EINVAL) if the other path is within this path. + """ + # Note: there is no straightforward, foolproof algorithm to determine + # if one directory is within another (a particularly perverse example + # would be a single network share mounted in one location via NFS, and + # in another location via CIFS), so we simply checks whether the + # other path is lexically equal to, or within, this path. + if source == target: + err = OSError(EINVAL, "Source and target are the same path") + elif source in target.parents: + err = OSError(EINVAL, "Source path is a parent of target path") + else: + return + err.filename = str(source) + err.filename2 = str(target) + raise err + + +def ensure_different_files(source, target): + """ + Raise OSError(EINVAL) if both paths refer to the same file. + """ + try: + source_file_id = source.info._file_id + target_file_id = target.info._file_id + except AttributeError: + if source != target: + return + else: + try: + if source_file_id() != target_file_id(): + return + except (OSError, ValueError): + return + err = OSError(EINVAL, "Source and target are the same file") + err.filename = str(source) + err.filename2 = str(target) + raise err + + +def copy_info(info, target, follow_symlinks=True): + """Copy metadata from the given PathInfo to the given local path.""" + copy_times_ns = ( + hasattr(info, '_access_time_ns') and + hasattr(info, '_mod_time_ns') and + (follow_symlinks or os.utime in os.supports_follow_symlinks)) + if copy_times_ns: + t0 = info._access_time_ns(follow_symlinks=follow_symlinks) + t1 = info._mod_time_ns(follow_symlinks=follow_symlinks) + os.utime(target, ns=(t0, t1), follow_symlinks=follow_symlinks) + + # We must copy extended attributes before the file is (potentially) + # chmod()'ed read-only, otherwise setxattr() will error with -EACCES. + copy_xattrs = ( + hasattr(info, '_xattrs') and + hasattr(os, 'setxattr') and + (follow_symlinks or os.setxattr in os.supports_follow_symlinks)) + if copy_xattrs: + xattrs = info._xattrs(follow_symlinks=follow_symlinks) + for attr, value in xattrs: + try: + os.setxattr(target, attr, value, follow_symlinks=follow_symlinks) + except OSError as e: + if e.errno not in (EPERM, ENOTSUP, ENODATA, EINVAL, EACCES): + raise + + copy_posix_permissions = ( + hasattr(info, '_posix_permissions') and + (follow_symlinks or os.chmod in os.supports_follow_symlinks)) + if copy_posix_permissions: + posix_permissions = info._posix_permissions(follow_symlinks=follow_symlinks) + try: + os.chmod(target, posix_permissions, follow_symlinks=follow_symlinks) + except NotImplementedError: + # if we got a NotImplementedError, it's because + # * follow_symlinks=False, + # * lchown() is unavailable, and + # * either + # * fchownat() is unavailable or + # * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW. + # (it returned ENOSUP.) + # therefore we're out of options--we simply cannot chown the + # symlink. give up, suppress the error. + # (which is what shutil always did in this circumstance.) + pass + + copy_bsd_flags = ( + hasattr(info, '_bsd_flags') and + hasattr(os, 'chflags') and + (follow_symlinks or os.chflags in os.supports_follow_symlinks)) + if copy_bsd_flags: + bsd_flags = info._bsd_flags(follow_symlinks=follow_symlinks) + try: + os.chflags(target, bsd_flags, follow_symlinks=follow_symlinks) + except OSError as why: + if why.errno not in (EOPNOTSUPP, ENOTSUP): + raise + + +class _PathInfoBase: + __slots__ = ('_path', '_stat_result', '_lstat_result') + + def __init__(self, path): + self._path = str(path) + + def __repr__(self): + path_type = "WindowsPath" if os.name == "nt" else "PosixPath" + return f"<{path_type}.info>" + + def _stat(self, *, follow_symlinks=True, ignore_errors=False): + """Return the status as an os.stat_result, or None if stat() fails and + ignore_errors is true.""" + if follow_symlinks: + try: + result = self._stat_result + except AttributeError: + pass + else: + if ignore_errors or result is not None: + return result + try: + self._stat_result = os.stat(self._path) + except (OSError, ValueError): + self._stat_result = None + if not ignore_errors: + raise + return self._stat_result + else: + try: + result = self._lstat_result + except AttributeError: + pass + else: + if ignore_errors or result is not None: + return result + try: + self._lstat_result = os.lstat(self._path) + except (OSError, ValueError): + self._lstat_result = None + if not ignore_errors: + raise + return self._lstat_result + + def _posix_permissions(self, *, follow_symlinks=True): + """Return the POSIX file permissions.""" + return S_IMODE(self._stat(follow_symlinks=follow_symlinks).st_mode) + + def _file_id(self, *, follow_symlinks=True): + """Returns the identifier of the file.""" + st = self._stat(follow_symlinks=follow_symlinks) + return st.st_dev, st.st_ino + + def _access_time_ns(self, *, follow_symlinks=True): + """Return the access time in nanoseconds.""" + return self._stat(follow_symlinks=follow_symlinks).st_atime_ns + + def _mod_time_ns(self, *, follow_symlinks=True): + """Return the modify time in nanoseconds.""" + return self._stat(follow_symlinks=follow_symlinks).st_mtime_ns + + if hasattr(os.stat_result, 'st_flags'): + def _bsd_flags(self, *, follow_symlinks=True): + """Return the flags.""" + return self._stat(follow_symlinks=follow_symlinks).st_flags + + if hasattr(os, 'listxattr'): + def _xattrs(self, *, follow_symlinks=True): + """Return the xattrs as a list of (attr, value) pairs, or an empty + list if extended attributes aren't supported.""" + try: + return [ + (attr, os.getxattr(self._path, attr, follow_symlinks=follow_symlinks)) + for attr in os.listxattr(self._path, follow_symlinks=follow_symlinks)] + except OSError as err: + if err.errno not in (EPERM, ENOTSUP, ENODATA, EINVAL, EACCES): + raise + return [] + + +class _WindowsPathInfo(_PathInfoBase): + """Implementation of pathlib.types.PathInfo that provides status + information for Windows paths. Don't try to construct it yourself.""" + __slots__ = ('_exists', '_is_dir', '_is_file', '_is_symlink') + + def exists(self, *, follow_symlinks=True): + """Whether this path exists.""" + if not follow_symlinks and self.is_symlink(): + return True + try: + return self._exists + except AttributeError: + if os.path.exists(self._path): + self._exists = True + return True + else: + self._exists = self._is_dir = self._is_file = False + return False + + def is_dir(self, *, follow_symlinks=True): + """Whether this path is a directory.""" + if not follow_symlinks and self.is_symlink(): + return False + try: + return self._is_dir + except AttributeError: + if os.path.isdir(self._path): + self._is_dir = self._exists = True + return True + else: + self._is_dir = False + return False + + def is_file(self, *, follow_symlinks=True): + """Whether this path is a regular file.""" + if not follow_symlinks and self.is_symlink(): + return False + try: + return self._is_file + except AttributeError: + if os.path.isfile(self._path): + self._is_file = self._exists = True + return True + else: + self._is_file = False + return False + + def is_symlink(self): + """Whether this path is a symbolic link.""" + try: + return self._is_symlink + except AttributeError: + self._is_symlink = os.path.islink(self._path) + return self._is_symlink + + +class _PosixPathInfo(_PathInfoBase): + """Implementation of pathlib.types.PathInfo that provides status + information for POSIX paths. Don't try to construct it yourself.""" + __slots__ = () + + def exists(self, *, follow_symlinks=True): + """Whether this path exists.""" + st = self._stat(follow_symlinks=follow_symlinks, ignore_errors=True) + if st is None: + return False + return True + + def is_dir(self, *, follow_symlinks=True): + """Whether this path is a directory.""" + st = self._stat(follow_symlinks=follow_symlinks, ignore_errors=True) + if st is None: + return False + return S_ISDIR(st.st_mode) + + def is_file(self, *, follow_symlinks=True): + """Whether this path is a regular file.""" + st = self._stat(follow_symlinks=follow_symlinks, ignore_errors=True) + if st is None: + return False + return S_ISREG(st.st_mode) + + def is_symlink(self): + """Whether this path is a symbolic link.""" + st = self._stat(follow_symlinks=False, ignore_errors=True) + if st is None: + return False + return S_ISLNK(st.st_mode) + + +PathInfo = _WindowsPathInfo if os.name == 'nt' else _PosixPathInfo + + +class DirEntryInfo(_PathInfoBase): + """Implementation of pathlib.types.PathInfo that provides status + information by querying a wrapped os.DirEntry object. Don't try to + construct it yourself.""" + __slots__ = ('_entry',) + + def __init__(self, entry): + super().__init__(entry.path) + self._entry = entry + + def _stat(self, *, follow_symlinks=True, ignore_errors=False): + try: + return self._entry.stat(follow_symlinks=follow_symlinks) + except OSError: + if not ignore_errors: + raise + return None + + def exists(self, *, follow_symlinks=True): + """Whether this path exists.""" + if not follow_symlinks: + return True + return self._stat(ignore_errors=True) is not None + + def is_dir(self, *, follow_symlinks=True): + """Whether this path is a directory.""" + try: + return self._entry.is_dir(follow_symlinks=follow_symlinks) + except OSError: + return False + + def is_file(self, *, follow_symlinks=True): + """Whether this path is a regular file.""" + try: + return self._entry.is_file(follow_symlinks=follow_symlinks) + except OSError: + return False + + def is_symlink(self): + """Whether this path is a symbolic link.""" + try: + return self._entry.is_symlink() + except OSError: + return False diff --git a/Python314_4_x86_Template/Lib/pathlib/types.py b/Python314_4_x86_Template/Lib/pathlib/types.py new file mode 100644 index 00000000..d8f5c34a --- /dev/null +++ b/Python314_4_x86_Template/Lib/pathlib/types.py @@ -0,0 +1,430 @@ +""" +Protocols for supporting classes in pathlib. +""" + +# This module also provides abstract base classes for rich path objects. +# These ABCs are a *private* part of the Python standard library, but they're +# made available as a PyPI package called "pathlib-abc". It's possible they'll +# become an official part of the standard library in future. +# +# Three ABCs are provided -- _JoinablePath, _ReadablePath and _WritablePath + + +from abc import ABC, abstractmethod +from glob import _PathGlobber +from io import text_encoding +from pathlib._os import magic_open, ensure_distinct_paths, ensure_different_files, copyfileobj +from pathlib import PurePath, Path +from typing import Optional, Protocol, runtime_checkable + + +def _explode_path(path, split): + """ + Split the path into a 2-tuple (anchor, parts), where *anchor* is the + uppermost parent of the path (equivalent to path.parents[-1]), and + *parts* is a reversed list of parts following the anchor. + """ + parent, name = split(path) + names = [] + while path != parent: + names.append(name) + path = parent + parent, name = split(path) + return path, names + + +@runtime_checkable +class _PathParser(Protocol): + """Protocol for path parsers, which do low-level path manipulation. + + Path parsers provide a subset of the os.path API, specifically those + functions needed to provide JoinablePath functionality. Each JoinablePath + subclass references its path parser via a 'parser' class attribute. + """ + + sep: str + altsep: Optional[str] + def split(self, path: str) -> tuple[str, str]: ... + def splitext(self, path: str) -> tuple[str, str]: ... + def normcase(self, path: str) -> str: ... + + +@runtime_checkable +class PathInfo(Protocol): + """Protocol for path info objects, which support querying the file type. + Methods may return cached results. + """ + def exists(self, *, follow_symlinks: bool = True) -> bool: ... + def is_dir(self, *, follow_symlinks: bool = True) -> bool: ... + def is_file(self, *, follow_symlinks: bool = True) -> bool: ... + def is_symlink(self) -> bool: ... + + +class _JoinablePath(ABC): + """Abstract base class for pure path objects. + + This class *does not* provide several magic methods that are defined in + its implementation PurePath. They are: __init__, __fspath__, __bytes__, + __reduce__, __hash__, __eq__, __lt__, __le__, __gt__, __ge__. + """ + __slots__ = () + + @property + @abstractmethod + def parser(self): + """Implementation of pathlib._types.Parser used for low-level path + parsing and manipulation. + """ + raise NotImplementedError + + @abstractmethod + def with_segments(self, *pathsegments): + """Construct a new path object from any number of path-like objects. + Subclasses may override this method to customize how new path objects + are created from methods like `iterdir()`. + """ + raise NotImplementedError + + @abstractmethod + def __str__(self): + """Return the string representation of the path, suitable for + passing to system calls.""" + raise NotImplementedError + + @property + def anchor(self): + """The concatenation of the drive and root, or ''.""" + return _explode_path(str(self), self.parser.split)[0] + + @property + def name(self): + """The final path component, if any.""" + return self.parser.split(str(self))[1] + + @property + def suffix(self): + """ + The final component's last suffix, if any. + + This includes the leading period. For example: '.txt' + """ + return self.parser.splitext(self.name)[1] + + @property + def suffixes(self): + """ + A list of the final component's suffixes, if any. + + These include the leading periods. For example: ['.tar', '.gz'] + """ + split = self.parser.splitext + stem, suffix = split(self.name) + suffixes = [] + while suffix: + suffixes.append(suffix) + stem, suffix = split(stem) + return suffixes[::-1] + + @property + def stem(self): + """The final path component, minus its last suffix.""" + return self.parser.splitext(self.name)[0] + + def with_name(self, name): + """Return a new path with the file name changed.""" + split = self.parser.split + if split(name)[0]: + raise ValueError(f"Invalid name {name!r}") + path = str(self) + path = path.removesuffix(split(path)[1]) + name + return self.with_segments(path) + + def with_stem(self, stem): + """Return a new path with the stem changed.""" + suffix = self.suffix + if not suffix: + return self.with_name(stem) + elif not stem: + # If the suffix is non-empty, we can't make the stem empty. + raise ValueError(f"{self!r} has a non-empty suffix") + else: + return self.with_name(stem + suffix) + + def with_suffix(self, suffix): + """Return a new path with the file suffix changed. If the path + has no suffix, add given suffix. If the given suffix is an empty + string, remove the suffix from the path. + """ + stem = self.stem + if not stem: + # If the stem is empty, we can't make the suffix non-empty. + raise ValueError(f"{self!r} has an empty name") + elif suffix and not suffix.startswith('.'): + raise ValueError(f"Invalid suffix {suffix!r}") + else: + return self.with_name(stem + suffix) + + @property + def parts(self): + """An object providing sequence-like access to the + components in the filesystem path.""" + anchor, parts = _explode_path(str(self), self.parser.split) + if anchor: + parts.append(anchor) + return tuple(reversed(parts)) + + def joinpath(self, *pathsegments): + """Combine this path with one or several arguments, and return a + new path representing either a subpath (if all arguments are relative + paths) or a totally different path (if one of the arguments is + anchored). + """ + return self.with_segments(str(self), *pathsegments) + + def __truediv__(self, key): + try: + return self.with_segments(str(self), key) + except TypeError: + return NotImplemented + + def __rtruediv__(self, key): + try: + return self.with_segments(key, str(self)) + except TypeError: + return NotImplemented + + @property + def parent(self): + """The logical parent of the path.""" + path = str(self) + parent = self.parser.split(path)[0] + if path != parent: + return self.with_segments(parent) + return self + + @property + def parents(self): + """A sequence of this path's logical parents.""" + split = self.parser.split + path = str(self) + parent = split(path)[0] + parents = [] + while path != parent: + parents.append(self.with_segments(parent)) + path = parent + parent = split(path)[0] + return tuple(parents) + + def full_match(self, pattern): + """ + Return True if this path matches the given glob-style pattern. The + pattern is matched against the entire path. + """ + case_sensitive = self.parser.normcase('Aa') == 'Aa' + globber = _PathGlobber(self.parser.sep, case_sensitive, recursive=True) + match = globber.compile(pattern, altsep=self.parser.altsep) + return match(str(self)) is not None + + +class _ReadablePath(_JoinablePath): + """Abstract base class for readable path objects. + + The Path class implements this ABC for local filesystem paths. Users may + create subclasses to implement readable virtual filesystem paths, such as + paths in archive files or on remote storage systems. + """ + __slots__ = () + + @property + @abstractmethod + def info(self): + """ + A PathInfo object that exposes the file type and other file attributes + of this path. + """ + raise NotImplementedError + + @abstractmethod + def __open_rb__(self, buffering=-1): + """ + Open the file pointed to by this path for reading in binary mode and + return a file object, like open(mode='rb'). + """ + raise NotImplementedError + + def read_bytes(self): + """ + Open the file in bytes mode, read it, and close the file. + """ + with magic_open(self, mode='rb', buffering=0) as f: + return f.read() + + def read_text(self, encoding=None, errors=None, newline=None): + """ + Open the file in text mode, read it, and close the file. + """ + # Call io.text_encoding() here to ensure any warning is raised at an + # appropriate stack level. + encoding = text_encoding(encoding) + with magic_open(self, mode='r', encoding=encoding, errors=errors, newline=newline) as f: + return f.read() + + @abstractmethod + def iterdir(self): + """Yield path objects of the directory contents. + + The children are yielded in arbitrary order, and the + special entries '.' and '..' are not included. + """ + raise NotImplementedError + + def glob(self, pattern, *, recurse_symlinks=True): + """Iterate over this subtree and yield all existing files (of any + kind, including directories) matching the given relative pattern. + """ + anchor, parts = _explode_path(pattern, self.parser.split) + if anchor: + raise NotImplementedError("Non-relative patterns are unsupported") + elif not parts: + raise ValueError(f"Unacceptable pattern: {pattern!r}") + elif not recurse_symlinks: + raise NotImplementedError("recurse_symlinks=False is unsupported") + case_sensitive = self.parser.normcase('Aa') == 'Aa' + globber = _PathGlobber(self.parser.sep, case_sensitive, recursive=True) + select = globber.selector(parts) + return select(self.joinpath('')) + + def walk(self, top_down=True, on_error=None, follow_symlinks=False): + """Walk the directory tree from this directory, similar to os.walk().""" + paths = [self] + while paths: + path = paths.pop() + if isinstance(path, tuple): + yield path + continue + dirnames = [] + filenames = [] + if not top_down: + paths.append((path, dirnames, filenames)) + try: + for child in path.iterdir(): + if child.info.is_dir(follow_symlinks=follow_symlinks): + if not top_down: + paths.append(child) + dirnames.append(child.name) + else: + filenames.append(child.name) + except OSError as error: + if on_error is not None: + on_error(error) + if not top_down: + while not isinstance(paths.pop(), tuple): + pass + continue + if top_down: + yield path, dirnames, filenames + paths += [path.joinpath(d) for d in reversed(dirnames)] + + @abstractmethod + def readlink(self): + """ + Return the path to which the symbolic link points. + """ + raise NotImplementedError + + def copy(self, target, **kwargs): + """ + Recursively copy this file or directory tree to the given destination. + """ + ensure_distinct_paths(self, target) + target._copy_from(self, **kwargs) + return target.joinpath() # Empty join to ensure fresh metadata. + + def copy_into(self, target_dir, **kwargs): + """ + Copy this file or directory tree into the given existing directory. + """ + name = self.name + if not name: + raise ValueError(f"{self!r} has an empty name") + return self.copy(target_dir / name, **kwargs) + + +class _WritablePath(_JoinablePath): + """Abstract base class for writable path objects. + + The Path class implements this ABC for local filesystem paths. Users may + create subclasses to implement writable virtual filesystem paths, such as + paths in archive files or on remote storage systems. + """ + __slots__ = () + + @abstractmethod + def symlink_to(self, target, target_is_directory=False): + """ + Make this path a symlink pointing to the target path. + Note the order of arguments (link, target) is the reverse of os.symlink. + """ + raise NotImplementedError + + @abstractmethod + def mkdir(self): + """ + Create a new directory at this given path. + """ + raise NotImplementedError + + @abstractmethod + def __open_wb__(self, buffering=-1): + """ + Open the file pointed to by this path for writing in binary mode and + return a file object, like open(mode='wb'). + """ + raise NotImplementedError + + def write_bytes(self, data): + """ + Open the file in bytes mode, write to it, and close the file. + """ + # type-check for the buffer interface before truncating the file + view = memoryview(data) + with magic_open(self, mode='wb') as f: + return f.write(view) + + def write_text(self, data, encoding=None, errors=None, newline=None): + """ + Open the file in text mode, write to it, and close the file. + """ + # Call io.text_encoding() here to ensure any warning is raised at an + # appropriate stack level. + encoding = text_encoding(encoding) + if not isinstance(data, str): + raise TypeError('data must be str, not %s' % + data.__class__.__name__) + with magic_open(self, mode='w', encoding=encoding, errors=errors, newline=newline) as f: + return f.write(data) + + def _copy_from(self, source, follow_symlinks=True): + """ + Recursively copy the given path to this path. + """ + stack = [(source, self)] + while stack: + src, dst = stack.pop() + if not follow_symlinks and src.info.is_symlink(): + dst.symlink_to(str(src.readlink()), src.info.is_dir()) + elif src.info.is_dir(): + children = src.iterdir() + dst.mkdir() + for child in children: + stack.append((child, dst.joinpath(child.name))) + else: + ensure_different_files(src, dst) + with magic_open(src, 'rb') as source_f: + with magic_open(dst, 'wb') as target_f: + copyfileobj(source_f, target_f) + + +_JoinablePath.register(PurePath) +_ReadablePath.register(Path) +_WritablePath.register(Path) diff --git a/Python314_4_x86_Template/Lib/pdb.py b/Python314_4_x86_Template/Lib/pdb.py new file mode 100644 index 00000000..903baeb8 --- /dev/null +++ b/Python314_4_x86_Template/Lib/pdb.py @@ -0,0 +1,3678 @@ +""" +The Python Debugger Pdb +======================= + +To use the debugger in its simplest form: + + >>> import pdb + >>> pdb.run('') + +The debugger's prompt is '(Pdb) '. This will stop in the first +function call in . + +Alternatively, if a statement terminated with an unhandled exception, +you can use pdb's post-mortem facility to inspect the contents of the +traceback: + + >>> + + >>> import pdb + >>> pdb.pm() + +The commands recognized by the debugger are listed in the next +section. Most can be abbreviated as indicated; e.g., h(elp) means +that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel', +nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in +square brackets. Alternatives in the command syntax are separated +by a vertical bar (|). + +A blank line repeats the previous command literally, except for +'list', where it lists the next 11 lines. + +Commands that the debugger doesn't recognize are assumed to be Python +statements and are executed in the context of the program being +debugged. Python statements can also be prefixed with an exclamation +point ('!'). This is a powerful way to inspect the program being +debugged; it is even possible to change variables or call functions. +When an exception occurs in such a statement, the exception name is +printed but the debugger's state is not changed. + +The debugger supports aliases, which can save typing. And aliases can +have parameters (see the alias help entry) which allows one a certain +level of adaptability to the context under examination. + +Multiple commands may be entered on a single line, separated by the +pair ';;'. No intelligence is applied to separating the commands; the +input is split at the first ';;', even if it is in the middle of a +quoted string. + +If a file ".pdbrc" exists in your home directory or in the current +directory, it is read in and executed as if it had been typed at the +debugger prompt. This is particularly useful for aliases. If both +files exist, the one in the home directory is read first and aliases +defined there can be overridden by the local file. This behavior can be +disabled by passing the "readrc=False" argument to the Pdb constructor. + +Aside from aliases, the debugger is not directly programmable; but it +is implemented as a class from which you can derive your own debugger +class, which you can make as fancy as you like. + + +Debugger commands +================= + +""" +# NOTE: the actual command documentation is collected from docstrings of the +# commands and is appended to __doc__ after the class has been defined. + +import os +import io +import re +import sys +import cmd +import bdb +import dis +import code +import glob +import json +import stat +import token +import types +import atexit +import codeop +import pprint +import signal +import socket +import typing +import asyncio +import inspect +import weakref +import builtins +import tempfile +import textwrap +import tokenize +import itertools +import traceback +import linecache +import selectors +import threading +import _colorize +import _pyrepl.utils + +from contextlib import ExitStack, closing, contextmanager +from types import CodeType +from warnings import deprecated + + +class Restart(Exception): + """Causes a debugger to be restarted for the debugged python program.""" + pass + +__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace", + "post_mortem", "set_default_backend", "get_default_backend", "help"] + + +def find_first_executable_line(code): + """ Try to find the first executable line of the code object. + + Equivalently, find the line number of the instruction that's + after RESUME + + Return code.co_firstlineno if no executable line is found. + """ + prev = None + for instr in dis.get_instructions(code): + if prev is not None and prev.opname == 'RESUME': + if instr.positions.lineno is not None: + return instr.positions.lineno + return code.co_firstlineno + prev = instr + return code.co_firstlineno + +def find_function(funcname, filename): + cre = re.compile(r'def\s+%s(\s*\[.+\])?\s*[(]' % re.escape(funcname)) + try: + fp = tokenize.open(filename) + except OSError: + lines = linecache.getlines(filename) + if not lines: + return None + fp = io.StringIO(''.join(lines)) + funcdef = "" + funcstart = 0 + # consumer of this info expects the first line to be 1 + with fp: + for lineno, line in enumerate(fp, start=1): + if cre.match(line): + funcstart, funcdef = lineno, line + elif funcdef: + funcdef += line + + if funcdef: + try: + code = compile(funcdef, filename, 'exec') + except SyntaxError: + continue + # We should always be able to find the code object here + funccode = next(c for c in code.co_consts if + isinstance(c, CodeType) and c.co_name == funcname) + lineno_offset = find_first_executable_line(funccode) + return funcname, filename, funcstart + lineno_offset - 1 + return None + +def lasti2lineno(code, lasti): + linestarts = list(dis.findlinestarts(code)) + linestarts.reverse() + for i, lineno in linestarts: + if lasti >= i: + return lineno + return 0 + + +class _rstr(str): + """String that doesn't quote its repr.""" + def __repr__(self): + return self + + +class _ExecutableTarget: + filename: str + code: CodeType | str + namespace: dict + + +class _ScriptTarget(_ExecutableTarget): + def __init__(self, target): + self._check(target) + self._target = self._safe_realpath(target) + + # If PYTHONSAFEPATH (-P) is not set, sys.path[0] is the directory + # of pdb, and we should replace it with the directory of the script + if not sys.flags.safe_path: + sys.path[0] = os.path.dirname(self._target) + + @staticmethod + def _check(target): + """ + Check that target is plausibly a script. + """ + if not os.path.exists(target): + print(f'Error: {target} does not exist') + sys.exit(1) + if os.path.isdir(target): + print(f'Error: {target} is a directory') + sys.exit(1) + + @staticmethod + def _safe_realpath(path): + """ + Return the canonical path (realpath) if it is accessible from the userspace. + Otherwise (for example, if the path is a symlink to an anonymous pipe), + return the original path. + + See GH-142315. + """ + realpath = os.path.realpath(path) + return realpath if os.path.exists(realpath) else path + + def __repr__(self): + return self._target + + @property + def filename(self): + return self._target + + @property + def code(self): + # Open the file each time because the file may be modified + with io.open_code(self._target) as fp: + return f"exec(compile({fp.read()!r}, {self._target!r}, 'exec'))" + + @property + def namespace(self): + return dict( + __name__='__main__', + __file__=self._target, + __builtins__=__builtins__, + __spec__=None, + ) + + +class _ModuleTarget(_ExecutableTarget): + def __init__(self, target): + self._target = target + + import runpy + try: + _, self._spec, self._code = runpy._get_module_details(self._target) + except ImportError as e: + print(f"ImportError: {e}") + sys.exit(1) + except Exception: + traceback.print_exc() + sys.exit(1) + + def __repr__(self): + return self._target + + @property + def filename(self): + return self._code.co_filename + + @property + def code(self): + return self._code + + @property + def namespace(self): + return dict( + __name__='__main__', + __file__=os.path.normcase(os.path.abspath(self.filename)), + __package__=self._spec.parent, + __loader__=self._spec.loader, + __spec__=self._spec, + __builtins__=__builtins__, + ) + + +class _ZipTarget(_ExecutableTarget): + def __init__(self, target): + import runpy + + self._target = os.path.realpath(target) + sys.path.insert(0, self._target) + try: + _, self._spec, self._code = runpy._get_main_module_details() + except ImportError as e: + print(f"ImportError: {e}") + sys.exit(1) + except Exception: + traceback.print_exc() + sys.exit(1) + + def __repr__(self): + return self._target + + @property + def filename(self): + return self._code.co_filename + + @property + def code(self): + return self._code + + @property + def namespace(self): + return dict( + __name__='__main__', + __file__=os.path.normcase(os.path.abspath(self.filename)), + __package__=self._spec.parent, + __loader__=self._spec.loader, + __spec__=self._spec, + __builtins__=__builtins__, + ) + + +class _PdbInteractiveConsole(code.InteractiveConsole): + def __init__(self, ns, message): + self._message = message + super().__init__(locals=ns, local_exit=True) + + def write(self, data): + self._message(data, end='') + + +# Interaction prompt line will separate file and call info from code +# text using value of line_prefix string. A newline and arrow may +# be to your liking. You can set it once pdb is imported using the +# command "pdb.line_prefix = '\n% '". +# line_prefix = ': ' # Use this to get the old situation back +line_prefix = '\n-> ' # Probably a better default + + +# The default backend to use for Pdb instances if not specified +# Should be either 'settrace' or 'monitoring' +_default_backend = 'settrace' + + +def set_default_backend(backend): + """Set the default backend to use for Pdb instances.""" + global _default_backend + if backend not in ('settrace', 'monitoring'): + raise ValueError("Invalid backend: %s" % backend) + _default_backend = backend + + +def get_default_backend(): + """Get the default backend to use for Pdb instances.""" + return _default_backend + + +class Pdb(bdb.Bdb, cmd.Cmd): + _previous_sigint_handler = None + + # Limit the maximum depth of chained exceptions, we should be handling cycles, + # but in case there are recursions, we stop at 999. + MAX_CHAINED_EXCEPTION_DEPTH = 999 + + _file_mtime_table = {} + + _last_pdb_instance = None + + def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None, + nosigint=False, readrc=True, mode=None, backend=None, colorize=False): + bdb.Bdb.__init__(self, skip=skip, backend=backend if backend else get_default_backend()) + cmd.Cmd.__init__(self, completekey, stdin, stdout) + sys.audit("pdb.Pdb") + if stdout: + self.use_rawinput = 0 + self.prompt = '(Pdb) ' + self.aliases = {} + self.displaying = {} + self.mainpyfile = '' + self._wait_for_mainpyfile = False + self.tb_lineno = {} + self.mode = mode + self.colorize = colorize and _colorize.can_colorize(file=stdout or sys.stdout) + # Try to load readline if it exists + try: + import readline + # remove some common file name delimiters + readline.set_completer_delims(' \t\n`@#%^&*()=+[{]}\\|;:\'",<>?') + except ImportError: + pass + + self.allow_kbdint = False + self.nosigint = nosigint + # Consider these characters as part of the command so when the users type + # c.a or c['a'], it won't be recognized as a c(ontinue) command + self.identchars = cmd.Cmd.identchars + '=.[](),"\'+-*/%@&|<>~^' + + # Read ~/.pdbrc and ./.pdbrc + self.rcLines = [] + if readrc: + try: + with open(os.path.expanduser('~/.pdbrc'), encoding='utf-8') as rcFile: + self.rcLines.extend(rcFile) + except OSError: + pass + try: + with open(".pdbrc", encoding='utf-8') as rcFile: + self.rcLines.extend(rcFile) + except OSError: + pass + + self.commands = {} # associates a command list to breakpoint numbers + self.commands_defining = False # True while in the process of defining + # a command list + self.commands_bnum = None # The breakpoint number for which we are + # defining a list + + self.async_shim_frame = None + self.async_awaitable = None + + self._chained_exceptions = tuple() + self._chained_exception_index = 0 + + self._current_task = None + + def set_trace(self, frame=None, *, commands=None): + Pdb._last_pdb_instance = self + if frame is None: + frame = sys._getframe().f_back + + if commands is not None: + self.rcLines.extend(commands) + + super().set_trace(frame) + + async def set_trace_async(self, frame=None, *, commands=None): + if self.async_awaitable is not None: + # We are already in a set_trace_async call, do not mess with it + return + + if frame is None: + frame = sys._getframe().f_back + + # We need set_trace to set up the basics, however, this will call + # set_stepinstr() will we need to compensate for, because we don't + # want to trigger on calls + self.set_trace(frame, commands=commands) + # Changing the stopframe will disable trace dispatch on calls + self.stopframe = frame + # We need to stop tracing because we don't have the privilege to avoid + # triggering tracing functions as normal, as we are not already in + # tracing functions + self.stop_trace() + + self.async_shim_frame = sys._getframe() + self.async_awaitable = None + + while True: + self.async_awaitable = None + # Simulate a trace event + # This should bring up pdb and make pdb believe it's debugging the + # caller frame + self.trace_dispatch(frame, "opcode", None) + if self.async_awaitable is not None: + try: + if self.breaks: + with self.set_enterframe(frame): + # set_continue requires enterframe to work + self.set_continue() + self.start_trace() + await self.async_awaitable + except Exception: + self._error_exc() + else: + break + + self.async_shim_frame = None + + # start the trace (the actual command is already set by set_* calls) + if self.returnframe is None and self.stoplineno == -1 and not self.breaks: + # This means we did a continue without any breakpoints, we should not + # start the trace + return + + self.start_trace() + + def sigint_handler(self, signum, frame): + if self.allow_kbdint: + raise KeyboardInterrupt + self.message("\nProgram interrupted. (Use 'cont' to resume).") + self.set_step() + self.set_trace(frame) + + def reset(self): + bdb.Bdb.reset(self) + self.forget() + + def forget(self): + self.lineno = None + self.stack = [] + self.curindex = 0 + if hasattr(self, 'curframe') and self.curframe: + self.curframe.f_globals.pop('__pdb_convenience_variables', None) + self.curframe = None + self.tb_lineno.clear() + + def setup(self, f, tb): + self.forget() + self.stack, self.curindex = self.get_stack(f, tb) + while tb: + # when setting up post-mortem debugging with a traceback, save all + # the original line numbers to be displayed along the current line + # numbers (which can be different, e.g. due to finally clauses) + lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti) + self.tb_lineno[tb.tb_frame] = lineno + tb = tb.tb_next + self.curframe = self.stack[self.curindex][0] + self.set_convenience_variable(self.curframe, '_frame', self.curframe) + if self._current_task: + self.set_convenience_variable(self.curframe, '_asynctask', self._current_task) + self._save_initial_file_mtime(self.curframe) + + if self._chained_exceptions: + self.set_convenience_variable( + self.curframe, + '_exception', + self._chained_exceptions[self._chained_exception_index], + ) + + if self.rcLines: + self.cmdqueue = [ + line for line in self.rcLines + if line.strip() and not line.strip().startswith("#") + ] + self.rcLines = [] + + @property + @deprecated("The frame locals reference is no longer cached. Use 'curframe.f_locals' instead.") + def curframe_locals(self): + return self.curframe.f_locals + + @curframe_locals.setter + @deprecated("Setting 'curframe_locals' no longer has any effect. Update the contents of 'curframe.f_locals' instead.") + def curframe_locals(self, value): + pass + + # Override Bdb methods + + def user_call(self, frame, argument_list): + """This method is called when there is the remote possibility + that we ever need to stop in this function.""" + if self._wait_for_mainpyfile: + return + if self.stop_here(frame): + self.message('--Call--') + self.interaction(frame, None) + + def user_line(self, frame): + """This function is called when we stop or break at this line.""" + if self._wait_for_mainpyfile: + if (self.mainpyfile != self.canonic(frame.f_code.co_filename)): + return + self._wait_for_mainpyfile = False + if self.trace_opcodes: + # GH-127321 + # We want to avoid stopping at an opcode that does not have + # an associated line number because pdb does not like it + if frame.f_lineno is None: + self.set_stepinstr() + return + self.bp_commands(frame) + self.interaction(frame, None) + + user_opcode = user_line + + def bp_commands(self, frame): + """Call every command that was set for the current active breakpoint + (if there is one). + + Returns True if the normal interaction function must be called, + False otherwise.""" + # self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit + if getattr(self, "currentbp", False) and \ + self.currentbp in self.commands: + currentbp = self.currentbp + self.currentbp = 0 + for line in self.commands[currentbp]: + self.cmdqueue.append(line) + self.cmdqueue.append(f'_pdbcmd_restore_lastcmd {self.lastcmd}') + + def user_return(self, frame, return_value): + """This function is called when a return trap is set here.""" + if self._wait_for_mainpyfile: + return + frame.f_locals['__return__'] = return_value + self.set_convenience_variable(frame, '_retval', return_value) + self.message('--Return--') + self.interaction(frame, None) + + def user_exception(self, frame, exc_info): + """This function is called if an exception occurs, + but only if we are to stop at or just below this level.""" + if self._wait_for_mainpyfile: + return + exc_type, exc_value, exc_traceback = exc_info + frame.f_locals['__exception__'] = exc_type, exc_value + self.set_convenience_variable(frame, '_exception', exc_value) + + # An 'Internal StopIteration' exception is an exception debug event + # issued by the interpreter when handling a subgenerator run with + # 'yield from' or a generator controlled by a for loop. No exception has + # actually occurred in this case. The debugger uses this debug event to + # stop when the debuggee is returning from such generators. + prefix = 'Internal ' if (not exc_traceback + and exc_type is StopIteration) else '' + self.message('%s%s' % (prefix, self._format_exc(exc_value))) + self.interaction(frame, exc_traceback) + + # General interaction function + def _cmdloop(self): + while True: + try: + # keyboard interrupts allow for an easy way to cancel + # the current command, so allow them during interactive input + self.allow_kbdint = True + self.cmdloop() + self.allow_kbdint = False + break + except KeyboardInterrupt: + self.message('--KeyboardInterrupt--') + + def _save_initial_file_mtime(self, frame): + """save the mtime of the all the files in the frame stack in the file mtime table + if they haven't been saved yet.""" + while frame: + filename = frame.f_code.co_filename + if filename not in self._file_mtime_table: + try: + self._file_mtime_table[filename] = os.path.getmtime(filename) + except Exception: + pass + frame = frame.f_back + + def _validate_file_mtime(self): + """Check if the source file of the current frame has been modified. + If so, give a warning and reset the modify time to current.""" + try: + filename = self.curframe.f_code.co_filename + mtime = os.path.getmtime(filename) + except Exception: + return + if (filename in self._file_mtime_table and + mtime != self._file_mtime_table[filename]): + self.message(f"*** WARNING: file '{filename}' was edited, " + "running stale code until the program is rerun") + self._file_mtime_table[filename] = mtime + + # Called before loop, handles display expressions + # Set up convenience variable containers + def _show_display(self): + displaying = self.displaying.get(self.curframe) + if displaying: + for expr, oldvalue in displaying.items(): + newvalue = self._getval_except(expr) + # check for identity first; this prevents custom __eq__ to + # be called at every loop, and also prevents instances whose + # fields are changed to be displayed + if newvalue is not oldvalue and newvalue != oldvalue: + displaying[expr] = newvalue + self.message('display %s: %s [old: %s]' % + (expr, self._safe_repr(newvalue, expr), + self._safe_repr(oldvalue, expr))) + + def _get_tb_and_exceptions(self, tb_or_exc): + """ + Given a tracecack or an exception, return a tuple of chained exceptions + and current traceback to inspect. + + This will deal with selecting the right ``__cause__`` or ``__context__`` + as well as handling cycles, and return a flattened list of exceptions we + can jump to with do_exceptions. + + """ + _exceptions = [] + if isinstance(tb_or_exc, BaseException): + traceback, current = tb_or_exc.__traceback__, tb_or_exc + + while current is not None: + if current in _exceptions: + break + _exceptions.append(current) + if current.__cause__ is not None: + current = current.__cause__ + elif ( + current.__context__ is not None and not current.__suppress_context__ + ): + current = current.__context__ + + if len(_exceptions) >= self.MAX_CHAINED_EXCEPTION_DEPTH: + self.message( + f"More than {self.MAX_CHAINED_EXCEPTION_DEPTH}" + " chained exceptions found, not all exceptions" + "will be browsable with `exceptions`." + ) + break + else: + traceback = tb_or_exc + return tuple(reversed(_exceptions)), traceback + + @contextmanager + def _hold_exceptions(self, exceptions): + """ + Context manager to ensure proper cleaning of exceptions references + + When given a chained exception instead of a traceback, + pdb may hold references to many objects which may leak memory. + + We use this context manager to make sure everything is properly cleaned + + """ + try: + self._chained_exceptions = exceptions + self._chained_exception_index = len(exceptions) - 1 + yield + finally: + # we can't put those in forget as otherwise they would + # be cleared on exception change + self._chained_exceptions = tuple() + self._chained_exception_index = 0 + + def _get_asyncio_task(self): + try: + task = asyncio.current_task() + except RuntimeError: + task = None + return task + + def interaction(self, frame, tb_or_exc): + # Restore the previous signal handler at the Pdb prompt. + if Pdb._previous_sigint_handler: + try: + signal.signal(signal.SIGINT, Pdb._previous_sigint_handler) + except ValueError: # ValueError: signal only works in main thread + pass + else: + Pdb._previous_sigint_handler = None + + self._current_task = self._get_asyncio_task() + + _chained_exceptions, tb = self._get_tb_and_exceptions(tb_or_exc) + if isinstance(tb_or_exc, BaseException): + assert tb is not None, "main exception must have a traceback" + with self._hold_exceptions(_chained_exceptions): + self.setup(frame, tb) + # We should print the stack entry if and only if the user input + # is expected, and we should print it right before the user input. + # We achieve this by appending _pdbcmd_print_frame_status to the + # command queue. If cmdqueue is not exhausted, the user input is + # not expected and we will not print the stack entry. + self.cmdqueue.append('_pdbcmd_print_frame_status') + self._cmdloop() + # If _pdbcmd_print_frame_status is not used, pop it out + if self.cmdqueue and self.cmdqueue[-1] == '_pdbcmd_print_frame_status': + self.cmdqueue.pop() + self.forget() + + def displayhook(self, obj): + """Custom displayhook for the exec in default(), which prevents + assignment of the _ variable in the builtins. + """ + # reproduce the behavior of the standard displayhook, not printing None + if obj is not None: + self.message(repr(obj)) + + @contextmanager + def _enable_multiline_input(self): + try: + import readline + except ImportError: + yield + return + + def input_auto_indent(): + last_index = readline.get_current_history_length() + last_line = readline.get_history_item(last_index) + if last_line: + if last_line.isspace(): + # If the last line is empty, we don't need to indent + return + + last_line = last_line.rstrip('\r\n') + indent = len(last_line) - len(last_line.lstrip()) + if last_line.endswith(":"): + indent += 4 + readline.insert_text(' ' * indent) + + completenames = self.completenames + try: + self.completenames = self.complete_multiline_names + readline.set_startup_hook(input_auto_indent) + yield + finally: + readline.set_startup_hook() + self.completenames = completenames + return + + def _exec_in_closure(self, source, globals, locals): + """ Run source code in closure so code object created within source + can find variables in locals correctly + + returns True if the source is executed, False otherwise + """ + + # Determine if the source should be executed in closure. Only when the + # source compiled to multiple code objects, we should use this feature. + # Otherwise, we can just raise an exception and normal exec will be used. + + code = compile(source, "", "exec") + if not any(isinstance(const, CodeType) for const in code.co_consts): + return False + + # locals could be a proxy which does not support pop + # copy it first to avoid modifying the original locals + locals_copy = dict(locals) + + locals_copy["__pdb_eval__"] = { + "result": None, + "write_back": {} + } + + # If the source is an expression, we need to print its value + try: + compile(source, "", "eval") + except SyntaxError: + pass + else: + source = "__pdb_eval__['result'] = " + source + + # Add write-back to update the locals + source = ("try:\n" + + textwrap.indent(source, " ") + "\n" + + "finally:\n" + + " __pdb_eval__['write_back'] = locals()") + + # Build a closure source code with freevars from locals like: + # def __pdb_outer(): + # var = None + # def __pdb_scope(): # This is the code object we want to execute + # nonlocal var + # + # return __pdb_scope.__code__ + source_with_closure = ("def __pdb_outer():\n" + + "\n".join(f" {var} = None" for var in locals_copy) + "\n" + + " def __pdb_scope():\n" + + "\n".join(f" nonlocal {var}" for var in locals_copy) + "\n" + + textwrap.indent(source, " ") + "\n" + + " return __pdb_scope.__code__" + ) + + # Get the code object of __pdb_scope() + # The exec fills locals_copy with the __pdb_outer() function and we can call + # that to get the code object of __pdb_scope() + ns = {} + try: + exec(source_with_closure, {}, ns) + except Exception: + return False + code = ns["__pdb_outer"]() + + cells = tuple(types.CellType(locals_copy.get(var)) for var in code.co_freevars) + + try: + exec(code, globals, locals_copy, closure=cells) + except Exception: + return False + + # get the data we need from the statement + pdb_eval = locals_copy["__pdb_eval__"] + + # __pdb_eval__ should not be updated back to locals + pdb_eval["write_back"].pop("__pdb_eval__") + + # Write all local variables back to locals + locals.update(pdb_eval["write_back"]) + eval_result = pdb_eval["result"] + if eval_result is not None: + self.message(repr(eval_result)) + + return True + + def _exec_await(self, source, globals, locals): + """ Run source code that contains await by playing with async shim frame""" + # Put the source in an async function + source_async = ( + "async def __pdb_await():\n" + + textwrap.indent(source, " ") + '\n' + + " __pdb_locals.update(locals())" + ) + ns = globals | locals + # We use __pdb_locals to do write back + ns["__pdb_locals"] = locals + exec(source_async, ns) + self.async_awaitable = ns["__pdb_await"]() + + def _read_code(self, line): + buffer = line + is_await_code = False + code = None + try: + if (code := codeop.compile_command(line + '\n', '', 'single')) is None: + # Multi-line mode + with self._enable_multiline_input(): + buffer = line + continue_prompt = "... " + while (code := codeop.compile_command(buffer, '', 'single')) is None: + if self.use_rawinput: + try: + line = input(continue_prompt) + except (EOFError, KeyboardInterrupt): + self.lastcmd = "" + print('\n') + return None, None, False + else: + self.stdout.write(continue_prompt) + self.stdout.flush() + line = self.stdin.readline() + if not len(line): + self.lastcmd = "" + self.stdout.write('\n') + self.stdout.flush() + return None, None, False + else: + line = line.rstrip('\r\n') + if line.isspace(): + # empty line, just continue + buffer += '\n' + else: + buffer += '\n' + line + self.lastcmd = buffer + except SyntaxError as e: + # Maybe it's an await expression/statement + if ( + self.async_shim_frame is not None + and e.msg == "'await' outside function" + ): + is_await_code = True + else: + raise + + return code, buffer, is_await_code + + def default(self, line): + if line[:1] == '!': line = line[1:].strip() + locals = self.curframe.f_locals + globals = self.curframe.f_globals + try: + code, buffer, is_await_code = self._read_code(line) + if buffer is None: + return + save_stdout = sys.stdout + save_stdin = sys.stdin + save_displayhook = sys.displayhook + try: + sys.stdin = self.stdin + sys.stdout = self.stdout + sys.displayhook = self.displayhook + if is_await_code: + self._exec_await(buffer, globals, locals) + return True + else: + if not self._exec_in_closure(buffer, globals, locals): + exec(code, globals, locals) + finally: + sys.stdout = save_stdout + sys.stdin = save_stdin + sys.displayhook = save_displayhook + except: + self._error_exc() + + def _replace_convenience_variables(self, line): + """Replace the convenience variables in 'line' with their values. + e.g. $foo is replaced by __pdb_convenience_variables["foo"]. + Note: such pattern in string literals will be skipped""" + + if "$" not in line: + return line + + dollar_start = dollar_end = (-1, -1) + replace_variables = [] + try: + for t in tokenize.generate_tokens(io.StringIO(line).readline): + token_type, token_string, start, end, _ = t + if token_type == token.OP and token_string == '$': + dollar_start, dollar_end = start, end + elif start == dollar_end and token_type == token.NAME: + # line is a one-line command so we only care about column + replace_variables.append((dollar_start[1], end[1], token_string)) + except tokenize.TokenError: + return line + + if not replace_variables: + return line + + last_end = 0 + line_pieces = [] + for start, end, name in replace_variables: + line_pieces.append(line[last_end:start] + f'__pdb_convenience_variables["{name}"]') + last_end = end + line_pieces.append(line[last_end:]) + + return ''.join(line_pieces) + + def precmd(self, line): + """Handle alias expansion and ';;' separator.""" + if not line.strip(): + return line + args = line.split() + while args[0] in self.aliases: + line = self.aliases[args[0]] + for idx in range(1, 10): + if f'%{idx}' in line: + if idx >= len(args): + self.error(f"Not enough arguments for alias '{args[0]}'") + # This is a no-op + return "!" + line = line.replace(f'%{idx}', args[idx]) + elif '%*' not in line: + if idx < len(args): + self.error(f"Too many arguments for alias '{args[0]}'") + # This is a no-op + return "!" + break + + line = line.replace("%*", ' '.join(args[1:])) + args = line.split() + # split into ';;' separated commands + # unless it's an alias command + if args[0] != 'alias': + marker = line.find(';;') + if marker >= 0: + # queue up everything after marker + next = line[marker+2:].lstrip() + self.cmdqueue.insert(0, next) + line = line[:marker].rstrip() + + # Replace all the convenience variables + line = self._replace_convenience_variables(line) + + return line + + def onecmd(self, line): + """Interpret the argument as though it had been typed in response + to the prompt. + + Checks whether this line is typed at the normal prompt or in + a breakpoint command list definition. + """ + if not self.commands_defining: + if line.startswith('_pdbcmd'): + command, arg, line = self.parseline(line) + if hasattr(self, command): + return getattr(self, command)(arg) + return cmd.Cmd.onecmd(self, line) + else: + return self.handle_command_def(line) + + def handle_command_def(self, line): + """Handles one command line during command list definition.""" + cmd, arg, line = self.parseline(line) + if not cmd: + return False + if cmd == 'end': + return True # end of cmd list + elif cmd == 'EOF': + self.message('') + return True # end of cmd list + cmdlist = self.commands[self.commands_bnum] + if cmd == 'silent': + cmdlist.append('_pdbcmd_silence_frame_status') + return False # continue to handle other cmd def in the cmd list + if arg: + cmdlist.append(cmd+' '+arg) + else: + cmdlist.append(cmd) + # Determine if we must stop + try: + func = getattr(self, 'do_' + cmd) + except AttributeError: + func = self.default + # one of the resuming commands + if func.__name__ in self.commands_resuming: + return True + return False + + def _colorize_code(self, code): + if self.colorize: + colors = list(_pyrepl.utils.gen_colors(code)) + chars, _ = _pyrepl.utils.disp_str(code, colors=colors, force_color=True) + code = "".join(chars) + return code + + # interface abstraction functions + + def message(self, msg, end='\n'): + print(msg, end=end, file=self.stdout) + + def error(self, msg): + print('***', msg, file=self.stdout) + + # convenience variables + + def set_convenience_variable(self, frame, name, value): + if '__pdb_convenience_variables' not in frame.f_globals: + frame.f_globals['__pdb_convenience_variables'] = {} + frame.f_globals['__pdb_convenience_variables'][name] = value + + # Generic completion functions. Individual complete_foo methods can be + # assigned below to one of these functions. + + @property + def rlcompleter(self): + """Return the `Completer` class from `rlcompleter`, while avoiding the + side effects of changing the completer from `import rlcompleter`. + + This is a compromise between GH-138860 and GH-139289. If GH-139289 is + fixed, then we don't need this and we can just `import rlcompleter` in + `Pdb.__init__`. + """ + if not hasattr(self, "_rlcompleter"): + try: + import readline + except ImportError: + # readline is not available, just get the Completer + from rlcompleter import Completer + self._rlcompleter = Completer + else: + # importing rlcompleter could have side effect of changing + # the current completer, we need to restore it + prev_completer = readline.get_completer() + from rlcompleter import Completer + self._rlcompleter = Completer + readline.set_completer(prev_completer) + return self._rlcompleter + + def completenames(self, text, line, begidx, endidx): + # Overwrite completenames() of cmd so for the command completion, + # if no current command matches, check for expressions as well + commands = super().completenames(text, line, begidx, endidx) + for alias in self.aliases: + if alias.startswith(text): + commands.append(alias) + if commands: + return commands + else: + expressions = self._complete_expression(text, line, begidx, endidx) + if expressions: + return expressions + return self.completedefault(text, line, begidx, endidx) + + def _complete_location(self, text, line, begidx, endidx): + # Complete a file/module/function location for break/tbreak/clear. + if line.strip().endswith((':', ',')): + # Here comes a line number or a condition which we can't complete. + return [] + # First, try to find matching functions (i.e. expressions). + try: + ret = self._complete_expression(text, line, begidx, endidx) + except Exception: + ret = [] + # Then, try to complete file names as well. + globs = glob.glob(glob.escape(text) + '*') + for fn in globs: + if os.path.isdir(fn): + ret.append(fn + '/') + elif os.path.isfile(fn) and fn.lower().endswith(('.py', '.pyw')): + ret.append(fn + ':') + return ret + + def _complete_bpnumber(self, text, line, begidx, endidx): + # Complete a breakpoint number. (This would be more helpful if we could + # display additional info along with the completions, such as file/line + # of the breakpoint.) + return [str(i) for i, bp in enumerate(bdb.Breakpoint.bpbynumber) + if bp is not None and str(i).startswith(text)] + + def _complete_expression(self, text, line, begidx, endidx): + # Complete an arbitrary expression. + if not self.curframe: + return [] + # Collect globals and locals. It is usually not really sensible to also + # complete builtins, and they clutter the namespace quite heavily, so we + # leave them out. + ns = {**self.curframe.f_globals, **self.curframe.f_locals} + if '.' in text: + # Walk an attribute chain up to the last part, similar to what + # rlcompleter does. This will bail if any of the parts are not + # simple attribute access, which is what we want. + dotted = text.split('.') + try: + if dotted[0].startswith('$'): + obj = self.curframe.f_globals['__pdb_convenience_variables'][dotted[0][1:]] + else: + obj = ns[dotted[0]] + for part in dotted[1:-1]: + obj = getattr(obj, part) + except (KeyError, AttributeError): + return [] + prefix = '.'.join(dotted[:-1]) + '.' + return [prefix + n for n in dir(obj) if n.startswith(dotted[-1])] + else: + if text.startswith("$"): + # Complete convenience variables + conv_vars = self.curframe.f_globals.get('__pdb_convenience_variables', {}) + return [f"${name}" for name in conv_vars if name.startswith(text[1:])] + # Complete a simple name. + return [n for n in ns.keys() if n.startswith(text)] + + def _complete_indentation(self, text, line, begidx, endidx): + try: + import readline + except ImportError: + return [] + # Fill in spaces to form a 4-space indent + return [' ' * (4 - readline.get_begidx() % 4)] + + def complete_multiline_names(self, text, line, begidx, endidx): + # If text is space-only, the user entered before any text. + # That normally means they want to indent the current line. + if not text.strip(): + return self._complete_indentation(text, line, begidx, endidx) + return self.completedefault(text, line, begidx, endidx) + + def completedefault(self, text, line, begidx, endidx): + if text.startswith("$"): + # Complete convenience variables + conv_vars = self.curframe.f_globals.get('__pdb_convenience_variables', {}) + return [f"${name}" for name in conv_vars if name.startswith(text[1:])] + + state = 0 + matches = [] + completer = self.rlcompleter(self.curframe.f_globals | self.curframe.f_locals) + while (match := completer.complete(text, state)) is not None: + matches.append(match) + state += 1 + return matches + + @contextmanager + def _enable_rlcompleter(self, ns): + try: + import readline + except ImportError: + yield + return + + try: + completer = self.rlcompleter(ns) + old_completer = readline.get_completer() + readline.set_completer(completer.complete) + yield + finally: + readline.set_completer(old_completer) + + # Pdb meta commands, only intended to be used internally by pdb + + def _pdbcmd_print_frame_status(self, arg): + self.print_stack_trace(0) + self._validate_file_mtime() + self._show_display() + + def _pdbcmd_silence_frame_status(self, arg): + if self.cmdqueue and self.cmdqueue[-1] == '_pdbcmd_print_frame_status': + self.cmdqueue.pop() + + def _pdbcmd_restore_lastcmd(self, arg): + self.lastcmd = arg + + # Command definitions, called by cmdloop() + # The argument is the remaining string on the command line + # Return true to exit from the command loop + + def do_commands(self, arg): + """(Pdb) commands [bpnumber] + (com) ... + (com) end + (Pdb) + + Specify a list of commands for breakpoint number bpnumber. + The commands themselves are entered on the following lines. + Type a line containing just 'end' to terminate the commands. + The commands are executed when the breakpoint is hit. + + To remove all commands from a breakpoint, type commands and + follow it immediately with end; that is, give no commands. + + With no bpnumber argument, commands refers to the last + breakpoint set. + + You can use breakpoint commands to start your program up + again. Simply use the continue command, or step, or any other + command that resumes execution. + + Specifying any command resuming execution (currently continue, + step, next, return, jump, quit and their abbreviations) + terminates the command list (as if that command was + immediately followed by end). This is because any time you + resume execution (even with a simple next or step), you may + encounter another breakpoint -- which could have its own + command list, leading to ambiguities about which list to + execute. + + If you use the 'silent' command in the command list, the usual + message about stopping at a breakpoint is not printed. This + may be desirable for breakpoints that are to print a specific + message and then continue. If none of the other commands + print anything, you will see no sign that the breakpoint was + reached. + """ + if not arg: + bnum = len(bdb.Breakpoint.bpbynumber) - 1 + else: + try: + bnum = int(arg) + except: + self._print_invalid_arg(arg) + return + try: + self.get_bpbynumber(bnum) + except ValueError as err: + self.error('cannot set commands: %s' % err) + return + + self.commands_bnum = bnum + # Save old definitions for the case of a keyboard interrupt. + if bnum in self.commands: + old_commands = self.commands[bnum] + else: + old_commands = None + self.commands[bnum] = [] + + prompt_back = self.prompt + self.prompt = '(com) ' + self.commands_defining = True + try: + self.cmdloop() + except KeyboardInterrupt: + # Restore old definitions. + if old_commands: + self.commands[bnum] = old_commands + else: + del self.commands[bnum] + self.error('command definition aborted, old commands restored') + finally: + self.commands_defining = False + self.prompt = prompt_back + + complete_commands = _complete_bpnumber + + def do_break(self, arg, temporary=False): + """b(reak) [ ([filename:]lineno | function) [, condition] ] + + Without argument, list all breaks. + + With a line number argument, set a break at this line in the + current file. With a function name, set a break at the first + executable line of that function. If a second argument is + present, it is a string specifying an expression which must + evaluate to true before the breakpoint is honored. + + The line number may be prefixed with a filename and a colon, + to specify a breakpoint in another file (probably one that + hasn't been loaded yet). The file is searched for on + sys.path; the .py suffix may be omitted. + """ + if not arg: + if self.breaks: # There's at least one + self.message("Num Type Disp Enb Where") + for bp in bdb.Breakpoint.bpbynumber: + if bp: + self.message(bp.bpformat()) + return + # parse arguments; comma has lowest precedence + # and cannot occur in filename + filename = None + lineno = None + cond = None + module_globals = None + comma = arg.find(',') + if comma > 0: + # parse stuff after comma: "condition" + cond = arg[comma+1:].lstrip() + if err := self._compile_error_message(cond): + self.error('Invalid condition %s: %r' % (cond, err)) + return + arg = arg[:comma].rstrip() + # parse stuff before comma: [filename:]lineno | function + colon = arg.rfind(':') + funcname = None + if colon >= 0: + filename = arg[:colon].rstrip() + f = self.lookupmodule(filename) + if not f: + self.error('%r not found from sys.path' % filename) + return + else: + filename = f + arg = arg[colon+1:].lstrip() + try: + lineno = int(arg) + except ValueError: + self.error('Bad lineno: %s' % arg) + return + else: + # no colon; can be lineno or function + try: + lineno = int(arg) + except ValueError: + try: + func = eval(arg, + self.curframe.f_globals, + self.curframe.f_locals) + except: + func = arg + try: + if hasattr(func, '__func__'): + func = func.__func__ + code = func.__code__ + #use co_name to identify the bkpt (function names + #could be aliased, but co_name is invariant) + funcname = code.co_name + lineno = find_first_executable_line(code) + filename = code.co_filename + module_globals = func.__globals__ + except: + # last thing to try + (ok, filename, ln) = self.lineinfo(arg) + if not ok: + self.error('The specified object %r is not a function ' + 'or was not found along sys.path.' % arg) + return + funcname = ok # ok contains a function name + lineno = int(ln) + if not filename: + filename = self.defaultFile() + filename = self.canonic(filename) + # Check for reasonable breakpoint + line = self.checkline(filename, lineno, module_globals) + if line: + # now set the break point + err = self.set_break(filename, line, temporary, cond, funcname) + if err: + self.error(err) + else: + bp = self.get_breaks(filename, line)[-1] + self.message("Breakpoint %d at %s:%d" % + (bp.number, bp.file, bp.line)) + + # To be overridden in derived debuggers + def defaultFile(self): + """Produce a reasonable default.""" + filename = self.curframe.f_code.co_filename + if filename == '' and self.mainpyfile: + filename = self.mainpyfile + return filename + + do_b = do_break + + complete_break = _complete_location + complete_b = _complete_location + + def do_tbreak(self, arg): + """tbreak [ ([filename:]lineno | function) [, condition] ] + + Same arguments as break, but sets a temporary breakpoint: it + is automatically deleted when first hit. + """ + self.do_break(arg, True) + + complete_tbreak = _complete_location + + def lineinfo(self, identifier): + failed = (None, None, None) + # Input is identifier, may be in single quotes + idstring = identifier.split("'") + if len(idstring) == 1: + # not in single quotes + id = idstring[0].strip() + elif len(idstring) == 3: + # quoted + id = idstring[1].strip() + else: + return failed + if id == '': return failed + parts = id.split('.') + # Protection for derived debuggers + if parts[0] == 'self': + del parts[0] + if len(parts) == 0: + return failed + # Best first guess at file to look at + fname = self.defaultFile() + if len(parts) == 1: + item = parts[0] + else: + # More than one part. + # First is module, second is method/class + f = self.lookupmodule(parts[0]) + if f: + fname = f + item = parts[1] + else: + return failed + answer = find_function(item, self.canonic(fname)) + return answer or failed + + def checkline(self, filename, lineno, module_globals=None): + """Check whether specified line seems to be executable. + + Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank + line or EOF). Warning: testing is not comprehensive. + """ + # this method should be callable before starting debugging, so default + # to "no globals" if there is no current frame + frame = getattr(self, 'curframe', None) + if module_globals is None: + module_globals = frame.f_globals if frame else None + line = linecache.getline(filename, lineno, module_globals) + if not line: + self.message('End of file') + return 0 + line = line.strip() + # Don't allow setting breakpoint at a blank line + if (not line or (line[0] == '#') or + (line[:3] == '"""') or line[:3] == "'''"): + self.error('Blank or comment') + return 0 + return lineno + + def do_enable(self, arg): + """enable bpnumber [bpnumber ...] + + Enables the breakpoints given as a space separated list of + breakpoint numbers. + """ + if not arg: + self._print_invalid_arg(arg) + return + args = arg.split() + for i in args: + try: + bp = self.get_bpbynumber(i) + except ValueError as err: + self.error(err) + else: + bp.enable() + self.message('Enabled %s' % bp) + + complete_enable = _complete_bpnumber + + def do_disable(self, arg): + """disable bpnumber [bpnumber ...] + + Disables the breakpoints given as a space separated list of + breakpoint numbers. Disabling a breakpoint means it cannot + cause the program to stop execution, but unlike clearing a + breakpoint, it remains in the list of breakpoints and can be + (re-)enabled. + """ + if not arg: + self._print_invalid_arg(arg) + return + args = arg.split() + for i in args: + try: + bp = self.get_bpbynumber(i) + except ValueError as err: + self.error(err) + else: + bp.disable() + self.message('Disabled %s' % bp) + + complete_disable = _complete_bpnumber + + def do_condition(self, arg): + """condition bpnumber [condition] + + Set a new condition for the breakpoint, an expression which + must evaluate to true before the breakpoint is honored. If + condition is absent, any existing condition is removed; i.e., + the breakpoint is made unconditional. + """ + if not arg: + self._print_invalid_arg(arg) + return + args = arg.split(' ', 1) + try: + cond = args[1] + if err := self._compile_error_message(cond): + self.error('Invalid condition %s: %r' % (cond, err)) + return + except IndexError: + cond = None + try: + bp = self.get_bpbynumber(args[0].strip()) + except IndexError: + self.error('Breakpoint number expected') + except ValueError as err: + self.error(err) + else: + bp.cond = cond + if not cond: + self.message('Breakpoint %d is now unconditional.' % bp.number) + else: + self.message('New condition set for breakpoint %d.' % bp.number) + + complete_condition = _complete_bpnumber + + def do_ignore(self, arg): + """ignore bpnumber [count] + + Set the ignore count for the given breakpoint number. If + count is omitted, the ignore count is set to 0. A breakpoint + becomes active when the ignore count is zero. When non-zero, + the count is decremented each time the breakpoint is reached + and the breakpoint is not disabled and any associated + condition evaluates to true. + """ + if not arg: + self._print_invalid_arg(arg) + return + args = arg.split() + if not args: + self.error('Breakpoint number expected') + return + if len(args) == 1: + count = 0 + elif len(args) == 2: + try: + count = int(args[1]) + except ValueError: + self._print_invalid_arg(arg) + return + else: + self._print_invalid_arg(arg) + return + try: + bp = self.get_bpbynumber(args[0].strip()) + except ValueError as err: + self.error(err) + else: + bp.ignore = count + if count > 0: + if count > 1: + countstr = '%d crossings' % count + else: + countstr = '1 crossing' + self.message('Will ignore next %s of breakpoint %d.' % + (countstr, bp.number)) + else: + self.message('Will stop next time breakpoint %d is reached.' + % bp.number) + + complete_ignore = _complete_bpnumber + + def _prompt_for_confirmation(self, prompt, default): + try: + reply = input(prompt) + except EOFError: + reply = default + return reply.strip().lower() + + def do_clear(self, arg): + """cl(ear) [filename:lineno | bpnumber ...] + + With a space separated list of breakpoint numbers, clear + those breakpoints. Without argument, clear all breaks (but + first ask confirmation). With a filename:lineno argument, + clear all breaks at that line in that file. + """ + if not arg: + reply = self._prompt_for_confirmation( + 'Clear all breaks? ', + default='no', + ) + if reply in ('y', 'yes'): + bplist = [bp for bp in bdb.Breakpoint.bpbynumber if bp] + self.clear_all_breaks() + for bp in bplist: + self.message('Deleted %s' % bp) + return + if ':' in arg: + # Make sure it works for "clear C:\foo\bar.py:12" + i = arg.rfind(':') + filename = arg[:i] + arg = arg[i+1:] + try: + lineno = int(arg) + except ValueError: + err = "Invalid line number (%s)" % arg + else: + bplist = self.get_breaks(filename, lineno)[:] + err = self.clear_break(filename, lineno) + if err: + self.error(err) + else: + for bp in bplist: + self.message('Deleted %s' % bp) + return + numberlist = arg.split() + for i in numberlist: + try: + bp = self.get_bpbynumber(i) + except ValueError as err: + self.error(err) + else: + self.clear_bpbynumber(i) + self.message('Deleted %s' % bp) + do_cl = do_clear # 'c' is already an abbreviation for 'continue' + + complete_clear = _complete_location + complete_cl = _complete_location + + def do_where(self, arg): + """w(here) [count] + + Print a stack trace. If count is not specified, print the full stack. + If count is 0, print the current frame entry. If count is positive, + print count entries from the most recent frame. If count is negative, + print -count entries from the least recent frame. + An arrow indicates the "current frame", which determines the + context of most commands. 'bt' is an alias for this command. + """ + if not arg: + count = None + else: + try: + count = int(arg) + except ValueError: + self.error('Invalid count (%s)' % arg) + return + self.print_stack_trace(count) + do_w = do_where + do_bt = do_where + + def _select_frame(self, number): + assert 0 <= number < len(self.stack) + self.curindex = number + self.curframe = self.stack[self.curindex][0] + self.set_convenience_variable(self.curframe, '_frame', self.curframe) + self.print_stack_entry(self.stack[self.curindex]) + self.lineno = None + + def do_exceptions(self, arg): + """exceptions [number] + + List or change current exception in an exception chain. + + Without arguments, list all the current exception in the exception + chain. Exceptions will be numbered, with the current exception indicated + with an arrow. + + If given an integer as argument, switch to the exception at that index. + """ + if not self._chained_exceptions: + self.message( + "Did not find chained exceptions. To move between" + " exceptions, pdb/post_mortem must be given an exception" + " object rather than a traceback." + ) + return + if not arg: + for ix, exc in enumerate(self._chained_exceptions): + prompt = ">" if ix == self._chained_exception_index else " " + rep = repr(exc) + if len(rep) > 80: + rep = rep[:77] + "..." + indicator = ( + " -" + if self._chained_exceptions[ix].__traceback__ is None + else f"{ix:>3}" + ) + self.message(f"{prompt} {indicator} {rep}") + else: + try: + number = int(arg) + except ValueError: + self.error("Argument must be an integer") + return + if 0 <= number < len(self._chained_exceptions): + if self._chained_exceptions[number].__traceback__ is None: + self.error("This exception does not have a traceback, cannot jump to it") + return + + self._chained_exception_index = number + self.setup(None, self._chained_exceptions[number].__traceback__) + self.print_stack_entry(self.stack[self.curindex]) + else: + self.error("No exception with that number") + + def do_up(self, arg): + """u(p) [count] + + Move the current frame count (default one) levels up in the + stack trace (to an older frame). + """ + if self.curindex == 0: + self.error('Oldest frame') + return + try: + count = int(arg or 1) + except ValueError: + self.error('Invalid frame count (%s)' % arg) + return + if count < 0: + newframe = 0 + else: + newframe = max(0, self.curindex - count) + self._select_frame(newframe) + do_u = do_up + + def do_down(self, arg): + """d(own) [count] + + Move the current frame count (default one) levels down in the + stack trace (to a newer frame). + """ + if self.curindex + 1 == len(self.stack): + self.error('Newest frame') + return + try: + count = int(arg or 1) + except ValueError: + self.error('Invalid frame count (%s)' % arg) + return + if count < 0: + newframe = len(self.stack) - 1 + else: + newframe = min(len(self.stack) - 1, self.curindex + count) + self._select_frame(newframe) + do_d = do_down + + def do_until(self, arg): + """unt(il) [lineno] + + Without argument, continue execution until the line with a + number greater than the current one is reached. With a line + number, continue execution until a line with a number greater + or equal to that is reached. In both cases, also stop when + the current frame returns. + """ + if arg: + try: + lineno = int(arg) + except ValueError: + self.error('Error in argument: %r' % arg) + return + if lineno <= self.curframe.f_lineno: + self.error('"until" line number is smaller than current ' + 'line number') + return + else: + lineno = None + self.set_until(self.curframe, lineno) + return 1 + do_unt = do_until + + def do_step(self, arg): + """s(tep) + + Execute the current line, stop at the first possible occasion + (either in a function that is called or in the current + function). + """ + if arg: + self._print_invalid_arg(arg) + return + self.set_step() + return 1 + do_s = do_step + + def do_next(self, arg): + """n(ext) + + Continue execution until the next line in the current function + is reached or it returns. + """ + if arg: + self._print_invalid_arg(arg) + return + self.set_next(self.curframe) + return 1 + do_n = do_next + + def do_run(self, arg): + """run [args...] + + Restart the debugged python program. If a string is supplied + it is split with "shlex", and the result is used as the new + sys.argv. History, breakpoints, actions and debugger options + are preserved. "restart" is an alias for "run". + """ + if self.mode == 'inline': + self.error('run/restart command is disabled when pdb is running in inline mode.\n' + 'Use the command line interface to enable restarting your program\n' + 'e.g. "python -m pdb myscript.py"') + return + if arg: + import shlex + argv0 = sys.argv[0:1] + try: + sys.argv = shlex.split(arg) + except ValueError as e: + self.error('Cannot run %s: %s' % (arg, e)) + return + sys.argv[:0] = argv0 + # this is caught in the main debugger loop + raise Restart + + do_restart = do_run + + def do_return(self, arg): + """r(eturn) + + Continue execution until the current function returns. + """ + if arg: + self._print_invalid_arg(arg) + return + self.set_return(self.curframe) + return 1 + do_r = do_return + + def do_continue(self, arg): + """c(ont(inue)) + + Continue execution, only stop when a breakpoint is encountered. + """ + if arg: + self._print_invalid_arg(arg) + return + if not self.nosigint: + try: + Pdb._previous_sigint_handler = \ + signal.signal(signal.SIGINT, self.sigint_handler) + except ValueError: + # ValueError happens when do_continue() is invoked from + # a non-main thread in which case we just continue without + # SIGINT set. Would printing a message here (once) make + # sense? + pass + self.set_continue() + return 1 + do_c = do_cont = do_continue + + def do_jump(self, arg): + """j(ump) lineno + + Set the next line that will be executed. Only available in + the bottom-most frame. This lets you jump back and execute + code again, or jump forward to skip code that you don't want + to run. + + It should be noted that not all jumps are allowed -- for + instance it is not possible to jump into the middle of a + for loop or out of a finally clause. + """ + if not arg: + self._print_invalid_arg(arg) + return + if self.curindex + 1 != len(self.stack): + self.error('You can only jump within the bottom frame') + return + try: + arg = int(arg) + except ValueError: + self.error("The 'jump' command requires a line number") + else: + try: + # Do the jump, fix up our copy of the stack, and display the + # new position + self.curframe.f_lineno = arg + self.stack[self.curindex] = self.stack[self.curindex][0], arg + self.print_stack_entry(self.stack[self.curindex]) + except ValueError as e: + self.error('Jump failed: %s' % e) + do_j = do_jump + + def _create_recursive_debugger(self): + return Pdb(self.completekey, self.stdin, self.stdout) + + def do_debug(self, arg): + """debug code + + Enter a recursive debugger that steps through the code + argument (which is an arbitrary expression or statement to be + executed in the current environment). + """ + if not arg: + self._print_invalid_arg(arg) + return + self.stop_trace() + globals = self.curframe.f_globals + locals = self.curframe.f_locals + p = self._create_recursive_debugger() + p.prompt = "(%s) " % self.prompt.strip() + self.message("ENTERING RECURSIVE DEBUGGER") + try: + sys.call_tracing(p.run, (arg, globals, locals)) + except Exception: + self._error_exc() + self.message("LEAVING RECURSIVE DEBUGGER") + self.start_trace() + self.lastcmd = p.lastcmd + + complete_debug = _complete_expression + + def do_quit(self, arg): + """q(uit) | exit + + Quit from the debugger. The program being executed is aborted. + """ + # Show prompt to kill process when in 'inline' mode and if pdb was not + # started from an interactive console. The attribute sys.ps1 is only + # defined if the interpreter is in interactive mode. + if self.mode == 'inline' and not hasattr(sys, 'ps1'): + while True: + try: + reply = input('Quitting pdb will kill the process. Quit anyway? [y/n] ') + reply = reply.lower().strip() + except EOFError: + reply = 'y' + self.message('') + if reply == 'y' or reply == '': + sys.exit(1) + elif reply.lower() == 'n': + return + + self._user_requested_quit = True + self.set_quit() + return 1 + + do_q = do_quit + do_exit = do_quit + + def do_EOF(self, arg): + """EOF + + Handles the receipt of EOF as a command. + """ + self.message('') + return self.do_quit(arg) + + def do_args(self, arg): + """a(rgs) + + Print the argument list of the current function. + """ + if arg: + self._print_invalid_arg(arg) + return + co = self.curframe.f_code + dict = self.curframe.f_locals + n = co.co_argcount + co.co_kwonlyargcount + if co.co_flags & inspect.CO_VARARGS: n = n+1 + if co.co_flags & inspect.CO_VARKEYWORDS: n = n+1 + for i in range(n): + name = co.co_varnames[i] + if name in dict: + self.message('%s = %s' % (name, self._safe_repr(dict[name], name))) + else: + self.message('%s = *** undefined ***' % (name,)) + do_a = do_args + + def do_retval(self, arg): + """retval + + Print the return value for the last return of a function. + """ + if arg: + self._print_invalid_arg(arg) + return + if '__return__' in self.curframe.f_locals: + self.message(self._safe_repr(self.curframe.f_locals['__return__'], "retval")) + else: + self.error('Not yet returned!') + do_rv = do_retval + + def _getval(self, arg): + try: + return eval(arg, self.curframe.f_globals, self.curframe.f_locals) + except: + self._error_exc() + raise + + def _getval_except(self, arg, frame=None): + try: + if frame is None: + return eval(arg, self.curframe.f_globals, self.curframe.f_locals) + else: + return eval(arg, frame.f_globals, frame.f_locals) + except BaseException as exc: + return _rstr('** raised %s **' % self._format_exc(exc)) + + def _error_exc(self): + exc = sys.exception() + self.error(self._format_exc(exc)) + + def _msg_val_func(self, arg, func): + try: + val = self._getval(arg) + except: + return # _getval() has displayed the error + try: + self.message(func(val)) + except: + self._error_exc() + + def _safe_repr(self, obj, expr): + try: + return repr(obj) + except Exception as e: + return _rstr(f"*** repr({expr}) failed: {self._format_exc(e)} ***") + + def do_p(self, arg): + """p expression + + Print the value of the expression. + """ + if not arg: + self._print_invalid_arg(arg) + return + self._msg_val_func(arg, repr) + + def do_pp(self, arg): + """pp expression + + Pretty-print the value of the expression. + """ + if not arg: + self._print_invalid_arg(arg) + return + self._msg_val_func(arg, pprint.pformat) + + complete_print = _complete_expression + complete_p = _complete_expression + complete_pp = _complete_expression + + def do_list(self, arg): + """l(ist) [first[, last] | .] + + List source code for the current file. Without arguments, + list 11 lines around the current line or continue the previous + listing. With . as argument, list 11 lines around the current + line. With one argument, list 11 lines starting at that line. + With two arguments, list the given range; if the second + argument is less than the first, it is a count. + + The current line in the current frame is indicated by "->". + If an exception is being debugged, the line where the + exception was originally raised or propagated is indicated by + ">>", if it differs from the current line. + """ + self.lastcmd = 'list' + last = None + if arg and arg != '.': + try: + if ',' in arg: + first, last = arg.split(',') + first = int(first.strip()) + last = int(last.strip()) + if last < first: + # assume it's a count + last = first + last + else: + first = int(arg.strip()) + first = max(1, first - 5) + except ValueError: + self.error('Error in argument: %r' % arg) + return + elif self.lineno is None or arg == '.': + first = max(1, self.curframe.f_lineno - 5) + else: + first = self.lineno + 1 + if last is None: + last = first + 10 + filename = self.curframe.f_code.co_filename + breaklist = self.get_file_breaks(filename) + try: + lines = linecache.getlines(filename, self.curframe.f_globals) + self._print_lines(lines[first-1:last], first, breaklist, + self.curframe) + self.lineno = min(last, len(lines)) + if len(lines) < last: + self.message('[EOF]') + except KeyboardInterrupt: + pass + self._validate_file_mtime() + do_l = do_list + + def do_longlist(self, arg): + """ll | longlist + + List the whole source code for the current function or frame. + """ + if arg: + self._print_invalid_arg(arg) + return + filename = self.curframe.f_code.co_filename + breaklist = self.get_file_breaks(filename) + try: + lines, lineno = self._getsourcelines(self.curframe) + except OSError as err: + self.error(err) + return + self._print_lines(lines, lineno, breaklist, self.curframe) + self._validate_file_mtime() + do_ll = do_longlist + + def do_source(self, arg): + """source expression + + Try to get source code for the given object and display it. + """ + if not arg: + self._print_invalid_arg(arg) + return + try: + obj = self._getval(arg) + except: + return + try: + lines, lineno = self._getsourcelines(obj) + except (OSError, TypeError) as err: + self.error(err) + return + self._print_lines(lines, lineno) + + complete_source = _complete_expression + + def _print_lines(self, lines, start, breaks=(), frame=None): + """Print a range of lines.""" + if frame: + current_lineno = frame.f_lineno + exc_lineno = self.tb_lineno.get(frame, -1) + else: + current_lineno = exc_lineno = -1 + for lineno, line in enumerate(lines, start): + s = str(lineno).rjust(3) + if len(s) < 4: + s += ' ' + if lineno in breaks: + s += 'B' + else: + s += ' ' + if lineno == current_lineno: + s += '->' + elif lineno == exc_lineno: + s += '>>' + if self.colorize: + line = self._colorize_code(line) + self.message(s + '\t' + line.rstrip()) + + def do_whatis(self, arg): + """whatis expression + + Print the type of the argument. + """ + if not arg: + self._print_invalid_arg(arg) + return + try: + value = self._getval(arg) + except: + # _getval() already printed the error + return + code = None + # Is it an instance method? + try: + code = value.__func__.__code__ + except Exception: + pass + if code: + self.message('Method %s' % code.co_name) + return + # Is it a function? + try: + code = value.__code__ + except Exception: + pass + if code: + self.message('Function %s' % code.co_name) + return + # Is it a class? + if value.__class__ is type: + self.message('Class %s.%s' % (value.__module__, value.__qualname__)) + return + # None of the above... + self.message(type(value)) + + complete_whatis = _complete_expression + + def do_display(self, arg): + """display [expression] + + Display the value of the expression if it changed, each time execution + stops in the current frame. + + Without expression, list all display expressions for the current frame. + """ + if not arg: + if self.displaying: + self.message('Currently displaying:') + for key, val in self.displaying.get(self.curframe, {}).items(): + self.message('%s: %s' % (key, self._safe_repr(val, key))) + else: + self.message('No expression is being displayed') + else: + if err := self._compile_error_message(arg): + self.error('Unable to display %s: %r' % (arg, err)) + else: + val = self._getval_except(arg) + self.displaying.setdefault(self.curframe, {})[arg] = val + self.message('display %s: %s' % (arg, self._safe_repr(val, arg))) + + complete_display = _complete_expression + + def do_undisplay(self, arg): + """undisplay [expression] + + Do not display the expression any more in the current frame. + + Without expression, clear all display expressions for the current frame. + """ + if arg: + try: + del self.displaying.get(self.curframe, {})[arg] + except KeyError: + self.error('not displaying %s' % arg) + else: + self.displaying.pop(self.curframe, None) + + def complete_undisplay(self, text, line, begidx, endidx): + return [e for e in self.displaying.get(self.curframe, {}) + if e.startswith(text)] + + def do_interact(self, arg): + """interact + + Start an interactive interpreter whose global namespace + contains all the (global and local) names found in the current scope. + """ + ns = {**self.curframe.f_globals, **self.curframe.f_locals} + with self._enable_rlcompleter(ns): + console = _PdbInteractiveConsole(ns, message=self.message) + console.interact(banner="*pdb interact start*", + exitmsg="*exit from pdb interact command*") + + def do_alias(self, arg): + """alias [name [command]] + + Create an alias called 'name' that executes 'command'. The + command must *not* be enclosed in quotes. Replaceable + parameters can be indicated by %1, %2, and so on, while %* is + replaced by all the parameters. If no command is given, the + current alias for name is shown. If no name is given, all + aliases are listed. + + Aliases may be nested and can contain anything that can be + legally typed at the pdb prompt. Note! You *can* override + internal pdb commands with aliases! Those internal commands + are then hidden until the alias is removed. Aliasing is + recursively applied to the first word of the command line; all + other words in the line are left alone. + + As an example, here are two useful aliases (especially when + placed in the .pdbrc file): + + # Print instance variables (usage "pi classInst") + alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k]) + # Print instance variables in self + alias ps pi self + """ + args = arg.split() + if len(args) == 0: + keys = sorted(self.aliases.keys()) + for alias in keys: + self.message("%s = %s" % (alias, self.aliases[alias])) + return + if len(args) == 1: + if args[0] in self.aliases: + self.message("%s = %s" % (args[0], self.aliases[args[0]])) + else: + self.error(f"Unknown alias '{args[0]}'") + else: + # Do a validation check to make sure no replaceable parameters + # are skipped if %* is not used. + alias = ' '.join(args[1:]) + if '%*' not in alias: + consecutive = True + for idx in range(1, 10): + if f'%{idx}' not in alias: + consecutive = False + if f'%{idx}' in alias and not consecutive: + self.error("Replaceable parameters must be consecutive") + return + self.aliases[args[0]] = alias + + def do_unalias(self, arg): + """unalias name + + Delete the specified alias. + """ + args = arg.split() + if len(args) == 0: + self._print_invalid_arg(arg) + return + if args[0] in self.aliases: + del self.aliases[args[0]] + + def complete_unalias(self, text, line, begidx, endidx): + return [a for a in self.aliases if a.startswith(text)] + + # List of all the commands making the program resume execution. + commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return', + 'do_until', 'do_quit', 'do_jump'] + + # Print a traceback starting at the top stack frame. + # The most recently entered frame is printed last; + # this is different from dbx and gdb, but consistent with + # the Python interpreter's stack trace. + # It is also consistent with the up/down commands (which are + # compatible with dbx and gdb: up moves towards 'main()' + # and down moves towards the most recent stack frame). + # * if count is None, prints the full stack + # * if count = 0, prints the current frame entry + # * if count < 0, prints -count least recent frame entries + # * if count > 0, prints count most recent frame entries + + def print_stack_trace(self, count=None): + if count is None: + stack_to_print = self.stack + elif count == 0: + stack_to_print = [self.stack[self.curindex]] + elif count < 0: + stack_to_print = self.stack[:-count] + else: + stack_to_print = self.stack[-count:] + try: + for frame_lineno in stack_to_print: + self.print_stack_entry(frame_lineno) + except KeyboardInterrupt: + pass + + def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix): + frame, lineno = frame_lineno + if frame is self.curframe: + prefix = '> ' + else: + prefix = ' ' + stack_entry = self.format_stack_entry(frame_lineno, prompt_prefix) + if self.colorize: + lines = stack_entry.split(prompt_prefix, 1) + if len(lines) > 1: + # We have some code to display + lines[1] = self._colorize_code(lines[1]) + stack_entry = prompt_prefix.join(lines) + self.message(prefix + stack_entry) + + # Provide help + + def do_help(self, arg): + """h(elp) + + Without argument, print the list of available commands. + With a command name as argument, print help about that command. + "help pdb" shows the full pdb documentation. + "help exec" gives help on the ! command. + """ + if not arg: + return cmd.Cmd.do_help(self, arg) + try: + try: + topic = getattr(self, 'help_' + arg) + return topic() + except AttributeError: + command = getattr(self, 'do_' + arg) + except AttributeError: + self.error('No help for %r' % arg) + else: + if sys.flags.optimize >= 2: + self.error('No help for %r; please do not run Python with -OO ' + 'if you need command help' % arg) + return + if command.__doc__ is None: + self.error('No help for %r; __doc__ string missing' % arg) + return + self.message(self._help_message_from_doc(command.__doc__)) + + do_h = do_help + + def help_exec(self): + """(!) statement + + Execute the (one-line) statement in the context of the current + stack frame. The exclamation point can be omitted unless the + first word of the statement resembles a debugger command, e.g.: + (Pdb) ! n=42 + (Pdb) + + To assign to a global variable you must always prefix the command with + a 'global' command, e.g.: + (Pdb) global list_options; list_options = ['-l'] + (Pdb) + """ + self.message((self.help_exec.__doc__ or '').strip()) + + def help_pdb(self): + help() + + # other helper functions + + def lookupmodule(self, filename): + """Helper function for break/clear parsing -- may be overridden. + + lookupmodule() translates (possibly incomplete) file or module name + into an absolute file name. + + filename could be in format of: + * an absolute path like '/path/to/file.py' + * a relative path like 'file.py' or 'dir/file.py' + * a module name like 'module' or 'package.module' + + files and modules will be searched in sys.path. + """ + if not filename.endswith('.py'): + # A module is passed in so convert it to equivalent file + filename = filename.replace('.', os.sep) + '.py' + + if os.path.isabs(filename): + if os.path.exists(filename): + return filename + return None + + for dirname in sys.path: + while os.path.islink(dirname): + dirname = os.readlink(dirname) + fullname = os.path.join(dirname, filename) + if os.path.exists(fullname): + return fullname + return None + + def _run(self, target: _ExecutableTarget): + # When bdb sets tracing, a number of call and line events happen + # BEFORE debugger even reaches user's code (and the exact sequence of + # events depends on python version). Take special measures to + # avoid stopping before reaching the main script (see user_line and + # user_call for details). + self._wait_for_mainpyfile = True + self._user_requested_quit = False + + self.mainpyfile = self.canonic(target.filename) + + # The target has to run in __main__ namespace (or imports from + # __main__ will break). Clear __main__ and replace with + # the target namespace. + import __main__ + __main__.__dict__.clear() + __main__.__dict__.update(target.namespace) + + # Clear the mtime table for program reruns, assume all the files + # are up to date. + self._file_mtime_table.clear() + + self.run(target.code) + + def _format_exc(self, exc: BaseException): + return traceback.format_exception_only(exc)[-1].strip() + + def _compile_error_message(self, expr): + """Return the error message as string if compiling `expr` fails.""" + try: + compile(expr, "", "eval") + except SyntaxError as exc: + return _rstr(self._format_exc(exc)) + return "" + + def _getsourcelines(self, obj): + # GH-103319 + # inspect.getsourcelines() returns lineno = 0 for + # module-level frame which breaks our code print line number + # This method should be replaced by inspect.getsourcelines(obj) + # once this bug is fixed in inspect + lines, lineno = inspect.getsourcelines(obj) + lineno = max(1, lineno) + return lines, lineno + + def _help_message_from_doc(self, doc, usage_only=False): + lines = [line.strip() for line in doc.rstrip().splitlines()] + if not lines: + return "No help message found." + if "" in lines: + usage_end = lines.index("") + else: + usage_end = 1 + formatted = [] + indent = " " * len(self.prompt) + for i, line in enumerate(lines): + if i == 0: + prefix = "Usage: " + elif i < usage_end: + prefix = " " + else: + if usage_only: + break + prefix = "" + formatted.append(indent + prefix + line) + return "\n".join(formatted) + + def _print_invalid_arg(self, arg): + """Return the usage string for a function.""" + + if not arg: + self.error("Argument is required for this command") + else: + self.error(f"Invalid argument: {arg}") + + # Yes it's a bit hacky. Get the caller name, get the method based on + # that name, and get the docstring from that method. + # This should NOT fail if the caller is a method of this class. + doc = inspect.getdoc(getattr(self, sys._getframe(1).f_code.co_name)) + if doc is not None: + self.message(self._help_message_from_doc(doc, usage_only=True)) + +# Collect all command help into docstring, if not run with -OO + +if __doc__ is not None: + # unfortunately we can't guess this order from the class definition + _help_order = [ + 'help', 'where', 'down', 'up', 'break', 'tbreak', 'clear', 'disable', + 'enable', 'ignore', 'condition', 'commands', 'step', 'next', 'until', + 'jump', 'return', 'retval', 'run', 'continue', 'list', 'longlist', + 'args', 'p', 'pp', 'whatis', 'source', 'display', 'undisplay', + 'interact', 'alias', 'unalias', 'debug', 'quit', + ] + + for _command in _help_order: + __doc__ += getattr(Pdb, 'do_' + _command).__doc__.strip() + '\n\n' + __doc__ += Pdb.help_exec.__doc__ + + del _help_order, _command + + +# Simplified interface + +def run(statement, globals=None, locals=None): + """Execute the *statement* (given as a string or a code object) + under debugger control. + + The debugger prompt appears before any code is executed; you can set + breakpoints and type continue, or you can step through the statement + using step or next. + + The optional *globals* and *locals* arguments specify the + environment in which the code is executed; by default the + dictionary of the module __main__ is used (see the explanation of + the built-in exec() or eval() functions.). + """ + Pdb().run(statement, globals, locals) + +def runeval(expression, globals=None, locals=None): + """Evaluate the *expression* (given as a string or a code object) + under debugger control. + + When runeval() returns, it returns the value of the expression. + Otherwise this function is similar to run(). + """ + return Pdb().runeval(expression, globals, locals) + +def runctx(statement, globals, locals): + # B/W compatibility + run(statement, globals, locals) + +def runcall(*args, **kwds): + """Call the function (a function or method object, not a string) + with the given arguments. + + When runcall() returns, it returns whatever the function call + returned. The debugger prompt appears as soon as the function is + entered. + """ + return Pdb().runcall(*args, **kwds) + +def set_trace(*, header=None, commands=None): + """Enter the debugger at the calling stack frame. + + This is useful to hard-code a breakpoint at a given point in a + program, even if the code is not otherwise being debugged (e.g. when + an assertion fails). If given, *header* is printed to the console + just before debugging begins. *commands* is an optional list of + pdb commands to run when the debugger starts. + """ + if Pdb._last_pdb_instance is not None: + pdb = Pdb._last_pdb_instance + else: + pdb = Pdb(mode='inline', backend='monitoring', colorize=True) + if header is not None: + pdb.message(header) + pdb.set_trace(sys._getframe().f_back, commands=commands) + +async def set_trace_async(*, header=None, commands=None): + """Enter the debugger at the calling stack frame, but in async mode. + + This should be used as await pdb.set_trace_async(). Users can do await + if they enter the debugger with this function. Otherwise it's the same + as set_trace(). + """ + if Pdb._last_pdb_instance is not None: + pdb = Pdb._last_pdb_instance + else: + pdb = Pdb(mode='inline', backend='monitoring', colorize=True) + if header is not None: + pdb.message(header) + await pdb.set_trace_async(sys._getframe().f_back, commands=commands) + +# Remote PDB + +class _PdbServer(Pdb): + def __init__( + self, + sockfile, + signal_server=None, + owns_sockfile=True, + colorize=False, + **kwargs, + ): + self._owns_sockfile = owns_sockfile + self._interact_state = None + self._sockfile = sockfile + self._command_name_cache = [] + self._write_failed = False + if signal_server: + # Only started by the top level _PdbServer, not recursive ones. + self._start_signal_listener(signal_server) + # Override the `colorize` attribute set by the parent constructor, + # because it checks the server's stdout, rather than the client's. + super().__init__(colorize=False, **kwargs) + self.colorize = colorize + + @staticmethod + def protocol_version(): + # By default, assume a client and server are compatible if they run + # the same Python major.minor version. We'll try to keep backwards + # compatibility between patch versions of a minor version if possible. + # If we do need to change the protocol in a patch version, we'll change + # `revision` to the patch version where the protocol changed. + # We can ignore compatibility for pre-release versions; sys.remote_exec + # can't attach to a pre-release version except from that same version. + v = sys.version_info + revision = 0 + return int(f"{v.major:02X}{v.minor:02X}{revision:02X}F0", 16) + + def _ensure_valid_message(self, msg): + # Ensure the message conforms to our protocol. + # If anything needs to be changed here for a patch release of Python, + # the 'revision' in protocol_version() should be updated. + match msg: + case {"message": str(), "type": str()}: + # Have the client show a message. The client chooses how to + # format the message based on its type. The currently defined + # types are "info" and "error". If a message has a type the + # client doesn't recognize, it must be treated as "info". + pass + case {"help": str()}: + # Have the client show the help for a given argument. + pass + case {"prompt": str(), "state": str()}: + # Have the client display the given prompt and wait for a reply + # from the user. If the client recognizes the state it may + # enable mode-specific features like multi-line editing. + # If it doesn't recognize the state it must prompt for a single + # line only and send it directly to the server. A server won't + # progress until it gets a "reply" or "signal" message, but can + # process "complete" requests while waiting for the reply. + pass + case { + "completions": list(completions) + } if all(isinstance(c, str) for c in completions): + # Return valid completions for a client's "complete" request. + pass + case { + "command_list": list(command_list) + } if all(isinstance(c, str) for c in command_list): + # Report the list of legal PDB commands to the client. + # Due to aliases this list is not static, but the client + # needs to know it for multi-line editing. + pass + case _: + raise AssertionError( + f"PDB message doesn't follow the schema! {msg}" + ) + + @classmethod + def _start_signal_listener(cls, address): + def listener(sock): + with closing(sock): + # Check if the interpreter is finalizing every quarter of a second. + # Clean up and exit if so. + sock.settimeout(0.25) + sock.shutdown(socket.SHUT_WR) + while not shut_down.is_set(): + try: + data = sock.recv(1024) + except socket.timeout: + continue + if data == b"": + return # EOF + signal.raise_signal(signal.SIGINT) + + def stop_thread(): + shut_down.set() + thread.join() + + # Use a daemon thread so that we don't detach until after all non-daemon + # threads are done. Use an atexit handler to stop gracefully at that point, + # so that our thread is stopped before the interpreter is torn down. + shut_down = threading.Event() + thread = threading.Thread( + target=listener, + args=[socket.create_connection(address, timeout=5)], + daemon=True, + ) + atexit.register(stop_thread) + thread.start() + + def _send(self, **kwargs): + self._ensure_valid_message(kwargs) + json_payload = json.dumps(kwargs) + try: + self._sockfile.write(json_payload.encode() + b"\n") + self._sockfile.flush() + except (OSError, ValueError): + # We get an OSError if the network connection has dropped, and a + # ValueError if detach() if the sockfile has been closed. We'll + # handle this the next time we try to read from the client instead + # of trying to handle it from everywhere _send() may be called. + # Track this with a flag rather than assuming readline() will ever + # return an empty string because the socket may be half-closed. + self._write_failed = True + + @typing.override + def message(self, msg, end="\n"): + self._send(message=str(msg) + end, type="info") + + @typing.override + def error(self, msg): + self._send(message=str(msg), type="error") + + def _get_input(self, prompt, state) -> str: + # Before displaying a (Pdb) prompt, send the list of PDB commands + # unless we've already sent an up-to-date list. + if state == "pdb" and not self._command_name_cache: + self._command_name_cache = self.completenames("", "", 0, 0) + self._send(command_list=self._command_name_cache) + self._send(prompt=prompt, state=state) + return self._read_reply() + + def _read_reply(self): + # Loop until we get a 'reply' or 'signal' from the client, + # processing out-of-band 'complete' requests as they arrive. + while True: + if self._write_failed: + raise EOFError + + msg = self._sockfile.readline() + if not msg: + raise EOFError + + try: + payload = json.loads(msg) + except json.JSONDecodeError: + self.error(f"Disconnecting: client sent invalid JSON {msg!r}") + raise EOFError + + match payload: + case {"reply": str(reply)}: + return reply + case {"signal": str(signal)}: + if signal == "INT": + raise KeyboardInterrupt + elif signal == "EOF": + raise EOFError + else: + self.error( + f"Received unrecognized signal: {signal}" + ) + # Our best hope of recovering is to pretend we + # got an EOF to exit whatever mode we're in. + raise EOFError + case { + "complete": { + "text": str(text), + "line": str(line), + "begidx": int(begidx), + "endidx": int(endidx), + } + }: + items = self._complete_any(text, line, begidx, endidx) + self._send(completions=items) + continue + # Valid JSON, but doesn't meet the schema. + self.error(f"Ignoring invalid message from client: {msg}") + + def _complete_any(self, text, line, begidx, endidx): + # If we're in 'interact' mode, we need to use the default completer + if self._interact_state: + compfunc = self.completedefault + else: + if begidx == 0: + return self.completenames(text, line, begidx, endidx) + + cmd = self.parseline(line)[0] + if cmd: + compfunc = getattr(self, "complete_" + cmd, self.completedefault) + else: + compfunc = self.completedefault + return compfunc(text, line, begidx, endidx) + + def cmdloop(self, intro=None): + self.preloop() + if intro is not None: + self.intro = intro + if self.intro: + self.message(str(self.intro)) + stop = None + while not stop: + if self._interact_state is not None: + try: + reply = self._get_input(prompt=">>> ", state="interact") + except KeyboardInterrupt: + # Match how KeyboardInterrupt is handled in a REPL + self.message("\nKeyboardInterrupt") + except EOFError: + self.message("\n*exit from pdb interact command*") + self._interact_state = None + else: + self._run_in_python_repl(reply) + continue + + if not self.cmdqueue: + try: + state = "commands" if self.commands_defining else "pdb" + reply = self._get_input(prompt=self.prompt, state=state) + except EOFError: + reply = "EOF" + + self.cmdqueue.append(reply) + + line = self.cmdqueue.pop(0) + line = self.precmd(line) + stop = self.onecmd(line) + stop = self.postcmd(stop, line) + self.postloop() + + def postloop(self): + super().postloop() + if self.quitting: + self.detach() + + def detach(self): + # Detach the debugger and close the socket without raising BdbQuit + self.quitting = False + if self._owns_sockfile: + # Don't try to reuse this instance, it's not valid anymore. + Pdb._last_pdb_instance = None + try: + self._sockfile.close() + except OSError: + # close() can fail if the connection was broken unexpectedly. + pass + + def do_debug(self, arg): + # Clear our cached list of valid commands; the recursive debugger might + # send its own differing list, and so ours needs to be re-sent. + self._command_name_cache = [] + return super().do_debug(arg) + + def do_alias(self, arg): + # Clear our cached list of valid commands; one might be added. + self._command_name_cache = [] + return super().do_alias(arg) + + def do_unalias(self, arg): + # Clear our cached list of valid commands; one might be removed. + self._command_name_cache = [] + return super().do_unalias(arg) + + def do_help(self, arg): + # Tell the client to render the help, since it might need a pager. + self._send(help=arg) + + do_h = do_help + + def _interact_displayhook(self, obj): + # Like the default `sys.displayhook` except sending a socket message. + if obj is not None: + self.message(repr(obj)) + builtins._ = obj + + def _run_in_python_repl(self, lines): + # Run one 'interact' mode code block against an existing namespace. + assert self._interact_state + save_displayhook = sys.displayhook + try: + sys.displayhook = self._interact_displayhook + code_obj = self._interact_state["compiler"](lines + "\n") + if code_obj is None: + raise SyntaxError("Incomplete command") + exec(code_obj, self._interact_state["ns"]) + except: + self._error_exc() + finally: + sys.displayhook = save_displayhook + + def do_interact(self, arg): + # Prepare to run 'interact' mode code blocks, and trigger the client + # to start treating all input as Python commands, not PDB ones. + self.message("*pdb interact start*") + self._interact_state = dict( + compiler=codeop.CommandCompiler(), + ns={**self.curframe.f_globals, **self.curframe.f_locals}, + ) + + @typing.override + def _create_recursive_debugger(self): + return _PdbServer( + self._sockfile, + owns_sockfile=False, + colorize=self.colorize, + ) + + @typing.override + def _prompt_for_confirmation(self, prompt, default): + try: + return self._get_input(prompt=prompt, state="confirm") + except (EOFError, KeyboardInterrupt): + return default + + def do_run(self, arg): + self.error("remote PDB cannot restart the program") + + do_restart = do_run + + def _error_exc(self): + if self._interact_state and isinstance(sys.exception(), SystemExit): + # If we get a SystemExit in 'interact' mode, exit the REPL. + self._interact_state = None + ret = super()._error_exc() + self.message("*exit from pdb interact command*") + return ret + else: + return super()._error_exc() + + def default(self, line): + # Unlike Pdb, don't prompt for more lines of a multi-line command. + # The remote needs to send us the whole block in one go. + try: + candidate = line.removeprefix("!") + "\n" + if codeop.compile_command(candidate, "", "single") is None: + raise SyntaxError("Incomplete command") + return super().default(candidate) + except: + self._error_exc() + + +class _PdbClient: + def __init__(self, pid, server_socket, interrupt_sock): + self.pid = pid + self.read_buf = b"" + self.signal_read = None + self.signal_write = None + self.sigint_received = False + self.raise_on_sigint = False + self.server_socket = server_socket + self.interrupt_sock = interrupt_sock + self.pdb_instance = Pdb() + self.pdb_commands = set() + self.completion_matches = [] + self.state = "dumb" + self.write_failed = False + self.multiline_block = False + + def _ensure_valid_message(self, msg): + # Ensure the message conforms to our protocol. + # If anything needs to be changed here for a patch release of Python, + # the 'revision' in protocol_version() should be updated. + match msg: + case {"reply": str()}: + # Send input typed by a user at a prompt to the remote PDB. + pass + case {"signal": "EOF"}: + # Tell the remote PDB that the user pressed ^D at a prompt. + pass + case {"signal": "INT"}: + # Tell the remote PDB that the user pressed ^C at a prompt. + pass + case { + "complete": { + "text": str(), + "line": str(), + "begidx": int(), + "endidx": int(), + } + }: + # Ask the remote PDB what completions are valid for the given + # parameters, using readline's completion protocol. + pass + case _: + raise AssertionError( + f"PDB message doesn't follow the schema! {msg}" + ) + + def _send(self, **kwargs): + self._ensure_valid_message(kwargs) + json_payload = json.dumps(kwargs) + try: + self.server_socket.sendall(json_payload.encode() + b"\n") + except OSError: + # This means that the client has abruptly disconnected, but we'll + # handle that the next time we try to read from the client instead + # of trying to handle it from everywhere _send() may be called. + # Track this with a flag rather than assuming readline() will ever + # return an empty string because the socket may be half-closed. + self.write_failed = True + + def _readline(self): + if self.sigint_received: + # There's a pending unhandled SIGINT. Handle it now. + self.sigint_received = False + raise KeyboardInterrupt + + # Wait for either a SIGINT or a line or EOF from the PDB server. + selector = selectors.DefaultSelector() + selector.register(self.signal_read, selectors.EVENT_READ) + selector.register(self.server_socket, selectors.EVENT_READ) + + while b"\n" not in self.read_buf: + for key, _ in selector.select(): + if key.fileobj == self.signal_read: + self.signal_read.recv(1024) + if self.sigint_received: + # If not, we're reading wakeup events for sigints that + # we've previously handled, and can ignore them. + self.sigint_received = False + raise KeyboardInterrupt + elif key.fileobj == self.server_socket: + data = self.server_socket.recv(16 * 1024) + self.read_buf += data + if not data and b"\n" not in self.read_buf: + # EOF without a full final line. Drop the partial line. + self.read_buf = b"" + return b"" + + ret, sep, self.read_buf = self.read_buf.partition(b"\n") + return ret + sep + + def read_input(self, prompt, multiline_block): + self.multiline_block = multiline_block + with self._sigint_raises_keyboard_interrupt(): + return input(prompt) + + def read_command(self, prompt): + reply = self.read_input(prompt, multiline_block=False) + if self.state == "dumb": + # No logic applied whatsoever, just pass the raw reply back. + return reply + + prefix = "" + if self.state == "pdb": + # PDB command entry mode + cmd = self.pdb_instance.parseline(reply)[0] + if cmd in self.pdb_commands or reply.strip() == "": + # Recognized PDB command, or blank line repeating last command + return reply + + # Otherwise, explicit or implicit exec command + if reply.startswith("!"): + prefix = "!" + reply = reply.removeprefix(prefix).lstrip() + + if codeop.compile_command(reply + "\n", "", "single") is not None: + # Valid single-line statement + return prefix + reply + + # Otherwise, valid first line of a multi-line statement + more_prompt = "...".ljust(len(prompt)) + while codeop.compile_command(reply, "", "single") is None: + reply += "\n" + self.read_input(more_prompt, multiline_block=True) + + return prefix + reply + + @contextmanager + def readline_completion(self, completer): + try: + import readline + except ImportError: + yield + return + + old_completer = readline.get_completer() + try: + readline.set_completer(completer) + if readline.backend == "editline": + # libedit uses "^I" instead of "tab" + command_string = "bind ^I rl_complete" + else: + command_string = "tab: complete" + readline.parse_and_bind(command_string) + yield + finally: + readline.set_completer(old_completer) + + @contextmanager + def _sigint_handler(self): + # Signal handling strategy: + # - When we call input() we want a SIGINT to raise KeyboardInterrupt + # - Otherwise we want to write to the wakeup FD and set a flag. + # We'll break out of select() when the wakeup FD is written to, + # and we'll check the flag whenever we're about to accept input. + def handler(signum, frame): + self.sigint_received = True + if self.raise_on_sigint: + # One-shot; don't raise again until the flag is set again. + self.raise_on_sigint = False + self.sigint_received = False + raise KeyboardInterrupt + + sentinel = object() + old_handler = sentinel + old_wakeup_fd = sentinel + + self.signal_read, self.signal_write = socket.socketpair() + with (closing(self.signal_read), closing(self.signal_write)): + self.signal_read.setblocking(False) + self.signal_write.setblocking(False) + + try: + old_handler = signal.signal(signal.SIGINT, handler) + + try: + old_wakeup_fd = signal.set_wakeup_fd( + self.signal_write.fileno(), + warn_on_full_buffer=False, + ) + yield + finally: + # Restore the old wakeup fd if we installed a new one + if old_wakeup_fd is not sentinel: + signal.set_wakeup_fd(old_wakeup_fd) + finally: + self.signal_read = self.signal_write = None + if old_handler is not sentinel: + # Restore the old handler if we installed a new one + signal.signal(signal.SIGINT, old_handler) + + @contextmanager + def _sigint_raises_keyboard_interrupt(self): + if self.sigint_received: + # There's a pending unhandled SIGINT. Handle it now. + self.sigint_received = False + raise KeyboardInterrupt + + try: + self.raise_on_sigint = True + yield + finally: + self.raise_on_sigint = False + + def cmdloop(self): + with ( + self._sigint_handler(), + self.readline_completion(self.complete), + ): + while not self.write_failed: + try: + if not (payload_bytes := self._readline()): + break + except KeyboardInterrupt: + self.send_interrupt() + continue + + try: + payload = json.loads(payload_bytes) + except json.JSONDecodeError: + print( + f"*** Invalid JSON from remote: {payload_bytes!r}", + flush=True, + ) + continue + + self.process_payload(payload) + + def send_interrupt(self): + if self.interrupt_sock is not None: + # Write to a socket that the PDB server listens on. This triggers + # the remote to raise a SIGINT for itself. We do this because + # Windows doesn't allow triggering SIGINT remotely. + # See https://stackoverflow.com/a/35792192 for many more details. + self.interrupt_sock.sendall(signal.SIGINT.to_bytes()) + else: + # On Unix we can just send a SIGINT to the remote process. + # This is preferable to using the signal thread approach that we + # use on Windows because it can interrupt IO in the main thread. + os.kill(self.pid, signal.SIGINT) + + def process_payload(self, payload): + match payload: + case { + "command_list": command_list + } if all(isinstance(c, str) for c in command_list): + self.pdb_commands = set(command_list) + case {"message": str(msg), "type": str(msg_type)}: + if msg_type == "error": + print("***", msg, flush=True) + else: + print(msg, end="", flush=True) + case {"help": str(arg)}: + self.pdb_instance.do_help(arg) + case {"prompt": str(prompt), "state": str(state)}: + if state not in ("pdb", "interact"): + state = "dumb" + self.state = state + self.prompt_for_reply(prompt) + case _: + raise RuntimeError(f"Unrecognized payload {payload}") + + def prompt_for_reply(self, prompt): + while True: + try: + payload = {"reply": self.read_command(prompt)} + except EOFError: + payload = {"signal": "EOF"} + except KeyboardInterrupt: + payload = {"signal": "INT"} + except Exception as exc: + msg = traceback.format_exception_only(exc)[-1].strip() + print("***", msg, flush=True) + continue + + self._send(**payload) + return + + def complete(self, text, state): + import readline + + if state == 0: + self.completion_matches = [] + if self.state not in ("pdb", "interact"): + return None + + origline = readline.get_line_buffer() + line = origline.lstrip() + if self.multiline_block: + # We're completing a line contained in a multi-line block. + # Force the remote to treat it as a Python expression. + line = "! " + line + offset = len(origline) - len(line) + begidx = readline.get_begidx() - offset + endidx = readline.get_endidx() - offset + + msg = { + "complete": { + "text": text, + "line": line, + "begidx": begidx, + "endidx": endidx, + } + } + + self._send(**msg) + if self.write_failed: + return None + + payload = self._readline() + if not payload: + return None + + payload = json.loads(payload) + if "completions" not in payload: + raise RuntimeError( + f"Failed to get valid completions. Got: {payload}" + ) + + self.completion_matches = payload["completions"] + try: + return self.completion_matches[state] + except IndexError: + return None + + +def _connect( + *, + host, + port, + frame, + commands, + version, + signal_raising_thread, + colorize, +): + with closing(socket.create_connection((host, port))) as conn: + sockfile = conn.makefile("rwb") + + # The client requests this thread on Windows but not on Unix. + # Most tests don't request this thread, to keep them simpler. + if signal_raising_thread: + signal_server = (host, port) + else: + signal_server = None + + remote_pdb = _PdbServer( + sockfile, + signal_server=signal_server, + colorize=colorize, + ) + weakref.finalize(remote_pdb, sockfile.close) + + if Pdb._last_pdb_instance is not None: + remote_pdb.error("Another PDB instance is already attached.") + elif version != remote_pdb.protocol_version(): + target_ver = f"0x{remote_pdb.protocol_version():08X}" + attach_ver = f"0x{version:08X}" + remote_pdb.error( + f"The target process is running a Python version that is" + f" incompatible with this PDB module." + f"\nTarget process pdb protocol version: {target_ver}" + f"\nLocal pdb module's protocol version: {attach_ver}" + ) + else: + remote_pdb.rcLines.extend(commands.splitlines()) + remote_pdb.set_trace(frame=frame) + + +def attach(pid, commands=()): + """Attach to a running process with the given PID.""" + with ExitStack() as stack: + server = stack.enter_context( + closing(socket.create_server(("localhost", 0))) + ) + port = server.getsockname()[1] + + connect_script = stack.enter_context( + tempfile.NamedTemporaryFile("w", delete_on_close=False) + ) + + use_signal_thread = sys.platform == "win32" + colorize = _colorize.can_colorize() + + connect_script.write( + textwrap.dedent( + f""" + import pdb, sys + pdb._connect( + host="localhost", + port={port}, + frame=sys._getframe(1), + commands={json.dumps("\n".join(commands))}, + version={_PdbServer.protocol_version()}, + signal_raising_thread={use_signal_thread!r}, + colorize={colorize!r}, + ) + """ + ) + ) + connect_script.close() + orig_mode = os.stat(connect_script.name).st_mode + os.chmod(connect_script.name, orig_mode | stat.S_IROTH | stat.S_IRGRP) + sys.remote_exec(pid, connect_script.name) + + # TODO Add a timeout? Or don't bother since the user can ^C? + client_sock, _ = server.accept() + stack.enter_context(closing(client_sock)) + + if use_signal_thread: + interrupt_sock, _ = server.accept() + stack.enter_context(closing(interrupt_sock)) + interrupt_sock.setblocking(False) + else: + interrupt_sock = None + + _PdbClient(pid, client_sock, interrupt_sock).cmdloop() + + +# Post-Mortem interface + +def post_mortem(t=None): + """Enter post-mortem debugging of the given *traceback*, or *exception* + object. + + If no traceback is given, it uses the one of the exception that is + currently being handled (an exception must be being handled if the + default is to be used). + + If `t` is an exception object, the `exceptions` command makes it possible to + list and inspect its chained exceptions (if any). + """ + return _post_mortem(t, Pdb()) + + +def _post_mortem(t, pdb_instance): + """ + Private version of post_mortem, which allow to pass a pdb instance + for testing purposes. + """ + # handling the default + if t is None: + exc = sys.exception() + if exc is not None: + t = exc.__traceback__ + + if t is None or (isinstance(t, BaseException) and t.__traceback__ is None): + raise ValueError("A valid traceback must be passed if no " + "exception is being handled") + + pdb_instance.reset() + pdb_instance.interaction(None, t) + + +def pm(): + """Enter post-mortem debugging of the traceback found in sys.last_exc.""" + post_mortem(sys.last_exc) + + +# Main program for testing + +TESTCMD = 'import x; x.main()' + +def test(): + run(TESTCMD) + +# print help +def help(): + import pydoc + pydoc.pager(__doc__) + +_usage = """\ +Debug the Python program given by pyfile. Alternatively, +an executable module or package to debug can be specified using +the -m switch. You can also attach to a running Python process +using the -p option with its PID. + +Initial commands are read from .pdbrc files in your home directory +and in the current directory, if they exist. Commands supplied with +-c are executed after commands from .pdbrc files. + +To let the script run until an exception occurs, use "-c continue". +To let the script run up to a given line X in the debugged file, use +"-c 'until X'".""" + + +def exit_with_permission_help_text(): + """ + Prints a message pointing to platform-specific permission help text and exits the program. + This function is called when a PermissionError is encountered while trying + to attach to a process. + """ + print( + "Error: The specified process cannot be attached to due to insufficient permissions.\n" + "See the Python documentation for details on required privileges and troubleshooting:\n" + "https://docs.python.org/3.14/howto/remote_debugging.html#permission-requirements\n" + ) + sys.exit(1) + + +def parse_args(): + # We want pdb to be as intuitive as possible to users, so we need to do some + # heuristic parsing to deal with ambiguity. + # For example: + # "python -m pdb -m foo -p 1" should pass "-p 1" to "foo". + # "python -m pdb foo.py -m bar" should pass "-m bar" to "foo.py". + # "python -m pdb -m foo -m bar" should pass "-m bar" to "foo". + # This require some customized parsing logic to find the actual debug target. + + import argparse + + parser = argparse.ArgumentParser( + usage="%(prog)s [-h] [-c command] (-m module | -p pid | pyfile) [args ...]", + description=_usage, + formatter_class=argparse.RawDescriptionHelpFormatter, + allow_abbrev=False, + color=True, + ) + + # Get all the commands out first. For backwards compatibility, we allow + # -c commands to be after the target. + parser.add_argument('-c', '--command', action='append', default=[], metavar='command', dest='commands', + help='pdb commands to execute as if given in a .pdbrc file') + + opts, args = parser.parse_known_args() + + if not args: + # If no arguments were given (python -m pdb), print the whole help message. + # Without this check, argparse would only complain about missing required arguments. + # We need to add the arguments definitions here to get a proper help message. + parser.add_argument('-m', metavar='module', dest='module') + parser.add_argument('-p', '--pid', type=int, help="attach to the specified PID", default=None) + parser.print_help() + sys.exit(2) + elif args[0] == '-p' or args[0] == '--pid': + # Attach to a pid + parser.add_argument('-p', '--pid', type=int, help="attach to the specified PID", default=None) + opts, args = parser.parse_known_args() + if args: + # For --pid, any extra arguments are invalid. + parser.error(f"unrecognized arguments: {' '.join(args)}") + elif args[0] == '-m': + # Debug a module, we only need the first -m module argument. + # The rest is passed to the module itself. + parser.add_argument('-m', metavar='module', dest='module') + opt_module = parser.parse_args(args[:2]) + opts.module = opt_module.module + args = args[2:] + elif args[0].startswith('-'): + # Invalid argument before the script name. + invalid_args = list(itertools.takewhile(lambda a: a.startswith('-'), args)) + parser.error(f"unrecognized arguments: {' '.join(invalid_args)}") + + # Otherwise it's debugging a script and we already parsed all -c commands. + + return opts, args + +def main(): + opts, args = parse_args() + + if getattr(opts, 'pid', None) is not None: + try: + attach(opts.pid, opts.commands) + except PermissionError as e: + exit_with_permission_help_text() + return + elif getattr(opts, 'module', None) is not None: + file = opts.module + target = _ModuleTarget(file) + else: + file = args.pop(0) + if file.endswith('.pyz'): + target = _ZipTarget(file) + else: + target = _ScriptTarget(file) + + sys.argv[:] = [file] + args # Hide "pdb.py" and pdb options from argument list + + # Note on saving/restoring sys.argv: it's a good idea when sys.argv was + # modified by the script being debugged. It's a bad idea when it was + # changed by the user from the command line. There is a "restart" command + # which allows explicit specification of command line arguments. + pdb = Pdb(mode='cli', backend='monitoring', colorize=True) + pdb.rcLines.extend(opts.commands) + while True: + try: + pdb._run(target) + except Restart: + print("Restarting", target, "with arguments:") + print("\t" + " ".join(sys.argv[1:])) + except SystemExit as e: + # In most cases SystemExit does not warrant a post-mortem session. + print("The program exited via sys.exit(). Exit status:", end=' ') + print(e) + except BaseException as e: + traceback.print_exception(e, colorize=_colorize.can_colorize()) + print("Uncaught exception. Entering post mortem debugging") + print("Running 'cont' or 'step' will restart the program") + try: + pdb.interaction(None, e) + except Restart: + print("Restarting", target, "with arguments:") + print("\t" + " ".join(sys.argv[1:])) + continue + if pdb._user_requested_quit: + break + print("The program finished and will be restarted") + + +# When invoked as main program, invoke the debugger on a script +if __name__ == '__main__': + import pdb + pdb.main() diff --git a/Python314_4_x86_Template/Lib/pickle.py b/Python314_4_x86_Template/Lib/pickle.py new file mode 100644 index 00000000..beaefae0 --- /dev/null +++ b/Python314_4_x86_Template/Lib/pickle.py @@ -0,0 +1,1931 @@ +"""Create portable serialized representations of Python objects. + +See module copyreg for a mechanism for registering custom picklers. +See module pickletools source for extensive comments. + +Classes: + + Pickler + Unpickler + +Functions: + + dump(object, file) + dumps(object) -> string + load(file) -> object + loads(bytes) -> object + +Misc variables: + + __version__ + format_version + compatible_formats + +""" + +from types import FunctionType +from copyreg import dispatch_table +from copyreg import _extension_registry, _inverted_registry, _extension_cache +from itertools import batched +from functools import partial +import sys +from sys import maxsize +from struct import pack, unpack +import io +import codecs +import _compat_pickle + +__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler", + "Unpickler", "dump", "dumps", "load", "loads"] + +try: + from _pickle import PickleBuffer + __all__.append("PickleBuffer") + _HAVE_PICKLE_BUFFER = True +except ImportError: + _HAVE_PICKLE_BUFFER = False + + +# Shortcut for use in isinstance testing +bytes_types = (bytes, bytearray) + +# These are purely informational; no code uses these. +format_version = "5.0" # File format version we write +compatible_formats = ["1.0", # Original protocol 0 + "1.1", # Protocol 0 with INST added + "1.2", # Original protocol 1 + "1.3", # Protocol 1 with BINFLOAT added + "2.0", # Protocol 2 + "3.0", # Protocol 3 + "4.0", # Protocol 4 + "5.0", # Protocol 5 + ] # Old format versions we can read + +# This is the highest protocol number we know how to read. +HIGHEST_PROTOCOL = 5 + +# The protocol we write by default. May be less than HIGHEST_PROTOCOL. +# Only bump this if the oldest still supported version of Python already +# includes it. +DEFAULT_PROTOCOL = 5 + +class PickleError(Exception): + """A common base class for the other pickling exceptions.""" + pass + +class PicklingError(PickleError): + """This exception is raised when an unpicklable object is passed to the + dump() method. + + """ + pass + +class UnpicklingError(PickleError): + """This exception is raised when there is a problem unpickling an object, + such as a security violation. + + Note that other exceptions may also be raised during unpickling, including + (but not necessarily limited to) AttributeError, EOFError, ImportError, + and IndexError. + + """ + pass + +# An instance of _Stop is raised by Unpickler.load_stop() in response to +# the STOP opcode, passing the object that is the result of unpickling. +class _Stop(Exception): + def __init__(self, value): + self.value = value + +# Pickle opcodes. See pickletools.py for extensive docs. The listing +# here is in kind-of alphabetical order of 1-character pickle code. +# pickletools groups them by purpose. + +MARK = b'(' # push special markobject on stack +STOP = b'.' # every pickle ends with STOP +POP = b'0' # discard topmost stack item +POP_MARK = b'1' # discard stack top through topmost markobject +DUP = b'2' # duplicate top stack item +FLOAT = b'F' # push float object; decimal string argument +INT = b'I' # push integer or bool; decimal string argument +BININT = b'J' # push four-byte signed int +BININT1 = b'K' # push 1-byte unsigned int +LONG = b'L' # push long; decimal string argument +BININT2 = b'M' # push 2-byte unsigned int +NONE = b'N' # push None +PERSID = b'P' # push persistent object; id is taken from string arg +BINPERSID = b'Q' # " " " ; " " " " stack +REDUCE = b'R' # apply callable to argtuple, both on stack +STRING = b'S' # push string; NL-terminated string argument +BINSTRING = b'T' # push string; counted binary string argument +SHORT_BINSTRING= b'U' # " " ; " " " " < 256 bytes +UNICODE = b'V' # push Unicode string; raw-unicode-escaped'd argument +BINUNICODE = b'X' # " " " ; counted UTF-8 string argument +APPEND = b'a' # append stack top to list below it +BUILD = b'b' # call __setstate__ or __dict__.update() +GLOBAL = b'c' # push self.find_class(modname, name); 2 string args +DICT = b'd' # build a dict from stack items +EMPTY_DICT = b'}' # push empty dict +APPENDS = b'e' # extend list on stack by topmost stack slice +GET = b'g' # push item from memo on stack; index is string arg +BINGET = b'h' # " " " " " " ; " " 1-byte arg +INST = b'i' # build & push class instance +LONG_BINGET = b'j' # push item from memo on stack; index is 4-byte arg +LIST = b'l' # build list from topmost stack items +EMPTY_LIST = b']' # push empty list +OBJ = b'o' # build & push class instance +PUT = b'p' # store stack top in memo; index is string arg +BINPUT = b'q' # " " " " " ; " " 1-byte arg +LONG_BINPUT = b'r' # " " " " " ; " " 4-byte arg +SETITEM = b's' # add key+value pair to dict +TUPLE = b't' # build tuple from topmost stack items +EMPTY_TUPLE = b')' # push empty tuple +SETITEMS = b'u' # modify dict by adding topmost key+value pairs +BINFLOAT = b'G' # push float; arg is 8-byte float encoding + +TRUE = b'I01\n' # not an opcode; see INT docs in pickletools.py +FALSE = b'I00\n' # not an opcode; see INT docs in pickletools.py + +# Protocol 2 + +PROTO = b'\x80' # identify pickle protocol +NEWOBJ = b'\x81' # build object by applying cls.__new__ to argtuple +EXT1 = b'\x82' # push object from extension registry; 1-byte index +EXT2 = b'\x83' # ditto, but 2-byte index +EXT4 = b'\x84' # ditto, but 4-byte index +TUPLE1 = b'\x85' # build 1-tuple from stack top +TUPLE2 = b'\x86' # build 2-tuple from two topmost stack items +TUPLE3 = b'\x87' # build 3-tuple from three topmost stack items +NEWTRUE = b'\x88' # push True +NEWFALSE = b'\x89' # push False +LONG1 = b'\x8a' # push long from < 256 bytes +LONG4 = b'\x8b' # push really big long + +_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3] + +# Protocol 3 (Python 3.x) + +BINBYTES = b'B' # push bytes; counted binary string argument +SHORT_BINBYTES = b'C' # " " ; " " " " < 256 bytes + +# Protocol 4 + +SHORT_BINUNICODE = b'\x8c' # push short string; UTF-8 length < 256 bytes +BINUNICODE8 = b'\x8d' # push very long string +BINBYTES8 = b'\x8e' # push very long bytes string +EMPTY_SET = b'\x8f' # push empty set on the stack +ADDITEMS = b'\x90' # modify set by adding topmost stack items +FROZENSET = b'\x91' # build frozenset from topmost stack items +NEWOBJ_EX = b'\x92' # like NEWOBJ but work with keyword only arguments +STACK_GLOBAL = b'\x93' # same as GLOBAL but using names on the stacks +MEMOIZE = b'\x94' # store top of the stack in memo +FRAME = b'\x95' # indicate the beginning of a new frame + +# Protocol 5 + +BYTEARRAY8 = b'\x96' # push bytearray +NEXT_BUFFER = b'\x97' # push next out-of-band buffer +READONLY_BUFFER = b'\x98' # make top of stack readonly + +__all__.extend(x for x in dir() if x.isupper() and not x.startswith('_')) + + +class _Framer: + + _FRAME_SIZE_MIN = 4 + _FRAME_SIZE_TARGET = 64 * 1024 + + def __init__(self, file_write): + self.file_write = file_write + self.current_frame = None + + def start_framing(self): + self.current_frame = io.BytesIO() + + def end_framing(self): + if self.current_frame and self.current_frame.tell() > 0: + self.commit_frame(force=True) + self.current_frame = None + + def commit_frame(self, force=False): + if self.current_frame: + f = self.current_frame + if f.tell() >= self._FRAME_SIZE_TARGET or force: + data = f.getbuffer() + write = self.file_write + if len(data) >= self._FRAME_SIZE_MIN: + # Issue a single call to the write method of the underlying + # file object for the frame opcode with the size of the + # frame. The concatenation is expected to be less expensive + # than issuing an additional call to write. + write(FRAME + pack("' in dotted_path: + raise PicklingError(f"Can't pickle local object {obj!r}") + if module_name is None: + # Protect the iteration by using a list copy of sys.modules against dynamic + # modules that trigger imports of other modules upon calls to getattr. + for module_name, module in sys.modules.copy().items(): + if (module_name == '__main__' + or module_name == '__mp_main__' # bpo-42406 + or module is None): + continue + try: + if _getattribute(module, dotted_path) is obj: + return module_name + except AttributeError: + pass + module_name = '__main__' + + try: + __import__(module_name, level=0) + module = sys.modules[module_name] + except (ImportError, ValueError, KeyError) as exc: + raise PicklingError(f"Can't pickle {obj!r}: {exc!s}") + try: + if _getattribute(module, dotted_path) is obj: + return module_name + except AttributeError: + raise PicklingError(f"Can't pickle {obj!r}: " + f"it's not found as {module_name}.{name}") + + raise PicklingError( + f"Can't pickle {obj!r}: it's not the same object as {module_name}.{name}") + +def encode_long(x): + r"""Encode a long to a two's complement little-endian binary string. + Note that 0 is a special case, returning an empty string, to save a + byte in the LONG1 pickling context. + + >>> encode_long(0) + b'' + >>> encode_long(255) + b'\xff\x00' + >>> encode_long(32767) + b'\xff\x7f' + >>> encode_long(-256) + b'\x00\xff' + >>> encode_long(-32768) + b'\x00\x80' + >>> encode_long(-128) + b'\x80' + >>> encode_long(127) + b'\x7f' + >>> + """ + if x == 0: + return b'' + nbytes = (x.bit_length() >> 3) + 1 + result = x.to_bytes(nbytes, byteorder='little', signed=True) + if x < 0 and nbytes > 1: + if result[-1] == 0xff and (result[-2] & 0x80) != 0: + result = result[:-1] + return result + +def decode_long(data): + r"""Decode a long from a two's complement little-endian binary string. + + >>> decode_long(b'') + 0 + >>> decode_long(b"\xff\x00") + 255 + >>> decode_long(b"\xff\x7f") + 32767 + >>> decode_long(b"\x00\xff") + -256 + >>> decode_long(b"\x00\x80") + -32768 + >>> decode_long(b"\x80") + -128 + >>> decode_long(b"\x7f") + 127 + """ + return int.from_bytes(data, byteorder='little', signed=True) + +def _T(obj): + cls = type(obj) + module = cls.__module__ + if module in (None, 'builtins', '__main__'): + return cls.__qualname__ + return f'{module}.{cls.__qualname__}' + + +_NoValue = object() + +# Pickling machinery + +class _Pickler: + + def __init__(self, file, protocol=None, *, fix_imports=True, + buffer_callback=None): + """This takes a binary file for writing a pickle data stream. + + The optional *protocol* argument tells the pickler to use the + given protocol; supported protocols are 0, 1, 2, 3, 4 and 5. + The default protocol is 5. It was introduced in Python 3.8, and + is incompatible with previous versions. + + Specifying a negative protocol version selects the highest + protocol version supported. The higher the protocol used, the + more recent the version of Python needed to read the pickle + produced. + + The *file* argument must have a write() method that accepts a + single bytes argument. It can thus be a file object opened for + binary writing, an io.BytesIO instance, or any other custom + object that meets this interface. + + If *fix_imports* is True and *protocol* is less than 3, pickle + will try to map the new Python 3 names to the old module names + used in Python 2, so that the pickle data stream is readable + with Python 2. + + If *buffer_callback* is None (the default), buffer views are + serialized into *file* as part of the pickle stream. + + If *buffer_callback* is not None, then it can be called any number + of times with a buffer view. If the callback returns a false value + (such as None), the given buffer is out-of-band; otherwise the + buffer is serialized in-band, i.e. inside the pickle stream. + + It is an error if *buffer_callback* is not None and *protocol* + is None or smaller than 5. + """ + if protocol is None: + protocol = DEFAULT_PROTOCOL + if protocol < 0: + protocol = HIGHEST_PROTOCOL + elif not 0 <= protocol <= HIGHEST_PROTOCOL: + raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL) + if buffer_callback is not None and protocol < 5: + raise ValueError("buffer_callback needs protocol >= 5") + self._buffer_callback = buffer_callback + try: + self._file_write = file.write + except AttributeError: + raise TypeError("file must have a 'write' attribute") + self.framer = _Framer(self._file_write) + self.write = self.framer.write + self._write_large_bytes = self.framer.write_large_bytes + self.memo = {} + self.proto = int(protocol) + self.bin = protocol >= 1 + self.fast = 0 + self.fix_imports = fix_imports and protocol < 3 + + def clear_memo(self): + """Clears the pickler's "memo". + + The memo is the data structure that remembers which objects the + pickler has already seen, so that shared or recursive objects + are pickled by reference and not by value. This method is + useful when re-using picklers. + """ + self.memo.clear() + + def dump(self, obj): + """Write a pickled representation of obj to the open file.""" + # Check whether Pickler was initialized correctly. This is + # only needed to mimic the behavior of _pickle.Pickler.dump(). + if not hasattr(self, "_file_write"): + raise PicklingError("Pickler.__init__() was not called by " + "%s.__init__()" % (self.__class__.__name__,)) + if self.proto >= 2: + self.write(PROTO + pack("= 4: + self.framer.start_framing() + self.save(obj) + self.write(STOP) + self.framer.end_framing() + + def memoize(self, obj): + """Store an object in the memo.""" + + # The Pickler memo is a dictionary mapping object ids to 2-tuples + # that contain the Unpickler memo key and the object being memoized. + # The memo key is written to the pickle and will become + # the key in the Unpickler's memo. The object is stored in the + # Pickler memo so that transient objects are kept alive during + # pickling. + + # The use of the Unpickler memo length as the memo key is just a + # convention. The only requirement is that the memo values be unique. + # But there appears no advantage to any other scheme, and this + # scheme allows the Unpickler memo to be implemented as a plain (but + # growable) array, indexed by memo key. + if self.fast: + return + assert id(obj) not in self.memo + idx = len(self.memo) + self.write(self.put(idx)) + self.memo[id(obj)] = idx, obj + + # Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i. + def put(self, idx): + if self.proto >= 4: + return MEMOIZE + elif self.bin: + if idx < 256: + return BINPUT + pack("= 2 and func_name == "__newobj_ex__": + cls, args, kwargs = args + if not hasattr(cls, "__new__"): + raise PicklingError("first argument to __newobj_ex__() has no __new__") + if obj is not None and cls is not obj.__class__: + raise PicklingError(f"first argument to __newobj_ex__() " + f"must be {obj.__class__!r}, not {cls!r}") + if self.proto >= 4: + try: + save(cls) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} class') + raise + try: + save(args) + save(kwargs) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} __new__ arguments') + raise + write(NEWOBJ_EX) + else: + func = partial(cls.__new__, cls, *args, **kwargs) + try: + save(func) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} reconstructor') + raise + save(()) + write(REDUCE) + elif self.proto >= 2 and func_name == "__newobj__": + # A __reduce__ implementation can direct protocol 2 or newer to + # use the more efficient NEWOBJ opcode, while still + # allowing protocol 0 and 1 to work normally. For this to + # work, the function returned by __reduce__ should be + # called __newobj__, and its first argument should be a + # class. The implementation for __newobj__ + # should be as follows, although pickle has no way to + # verify this: + # + # def __newobj__(cls, *args): + # return cls.__new__(cls, *args) + # + # Protocols 0 and 1 will pickle a reference to __newobj__, + # while protocol 2 (and above) will pickle a reference to + # cls, the remaining args tuple, and the NEWOBJ code, + # which calls cls.__new__(cls, *args) at unpickling time + # (see load_newobj below). If __reduce__ returns a + # three-tuple, the state from the third tuple item will be + # pickled regardless of the protocol, calling __setstate__ + # at unpickling time (see load_build below). + # + # Note that no standard __newobj__ implementation exists; + # you have to provide your own. This is to enforce + # compatibility with Python 2.2 (pickles written using + # protocol 0 or 1 in Python 2.3 should be unpicklable by + # Python 2.2). + cls = args[0] + if not hasattr(cls, "__new__"): + raise PicklingError("first argument to __newobj__() has no __new__") + if obj is not None and cls is not obj.__class__: + raise PicklingError(f"first argument to __newobj__() " + f"must be {obj.__class__!r}, not {cls!r}") + args = args[1:] + try: + save(cls) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} class') + raise + try: + save(args) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} __new__ arguments') + raise + write(NEWOBJ) + else: + try: + save(func) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} reconstructor') + raise + try: + save(args) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} reconstructor arguments') + raise + write(REDUCE) + + if obj is not None: + # If the object is already in the memo, this means it is + # recursive. In this case, throw away everything we put on the + # stack, and fetch the object back from the memo. + if id(obj) in self.memo: + write(POP + self.get(self.memo[id(obj)][0])) + else: + self.memoize(obj) + + # More new special cases (that work with older protocols as + # well): when __reduce__ returns a tuple with 4 or 5 items, + # the 4th and 5th item should be iterators that provide list + # items and dict items (as (key, value) tuples), or None. + + if listitems is not None: + self._batch_appends(listitems, obj) + + if dictitems is not None: + self._batch_setitems(dictitems, obj) + + if state is not None: + if state_setter is None: + try: + save(state) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} state') + raise + write(BUILD) + else: + # If a state_setter is specified, call it instead of load_build + # to update obj's with its previous state. + # First, push state_setter and its tuple of expected arguments + # (obj, state) onto the stack. + try: + save(state_setter) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} state setter') + raise + save(obj) # simple BINGET opcode as obj is already memoized. + try: + save(state) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} state') + raise + write(TUPLE2) + # Trigger a state_setter(obj, state) function call. + write(REDUCE) + # The purpose of state_setter is to carry-out an + # inplace modification of obj. We do not care about what the + # method might return, so its output is eventually removed from + # the stack. + write(POP) + + # Methods below this point are dispatched through the dispatch table + + dispatch = {} + + def save_none(self, obj): + self.write(NONE) + dispatch[type(None)] = save_none + + def save_bool(self, obj): + if self.proto >= 2: + self.write(NEWTRUE if obj else NEWFALSE) + else: + self.write(TRUE if obj else FALSE) + dispatch[bool] = save_bool + + def save_long(self, obj): + if self.bin: + # If the int is small enough to fit in a signed 4-byte 2's-comp + # format, we can store it more efficiently than the general + # case. + # First one- and two-byte unsigned ints: + if obj >= 0: + if obj <= 0xff: + self.write(BININT1 + pack("= 2: + encoded = encode_long(obj) + n = len(encoded) + if n < 256: + self.write(LONG1 + pack("d', obj)) + else: + self.write(FLOAT + repr(obj).encode("ascii") + b'\n') + dispatch[float] = save_float + + def _save_bytes_no_memo(self, obj): + # helper for writing bytes objects for protocol >= 3 + # without memoizing them + assert self.proto >= 3 + n = len(obj) + if n <= 0xff: + self.write(SHORT_BINBYTES + pack(" 0xffffffff and self.proto >= 4: + self._write_large_bytes(BINBYTES8 + pack("= self.framer._FRAME_SIZE_TARGET: + self._write_large_bytes(BINBYTES + pack("= 5 + # without memoizing them + assert self.proto >= 5 + n = len(obj) + if n >= self.framer._FRAME_SIZE_TARGET: + self._write_large_bytes(BYTEARRAY8 + pack("= 5") + with obj.raw() as m: + if not m.contiguous: + raise PicklingError("PickleBuffer can not be pickled when " + "pointing to a non-contiguous buffer") + in_band = True + if self._buffer_callback is not None: + in_band = bool(self._buffer_callback(obj)) + if in_band: + # Write data in-band + # XXX The C implementation avoids a copy here + buf = m.tobytes() + in_memo = id(buf) in self.memo + if m.readonly: + if in_memo: + self._save_bytes_no_memo(buf) + else: + self.save_bytes(buf) + else: + if in_memo: + self._save_bytearray_no_memo(buf) + else: + self.save_bytearray(buf) + else: + # Write data out-of-band + self.write(NEXT_BUFFER) + if m.readonly: + self.write(READONLY_BUFFER) + + dispatch[PickleBuffer] = save_picklebuffer + + def save_str(self, obj): + if self.bin: + encoded = obj.encode('utf-8', 'surrogatepass') + n = len(encoded) + if n <= 0xff and self.proto >= 4: + self.write(SHORT_BINUNICODE + pack(" 0xffffffff and self.proto >= 4: + self._write_large_bytes(BINUNICODE8 + pack("= self.framer._FRAME_SIZE_TARGET: + self._write_large_bytes(BINUNICODE + pack("= 2: + for i, element in enumerate(obj): + try: + save(element) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {i}') + raise + # Subtle. Same as in the big comment below. + if id(obj) in memo: + get = self.get(memo[id(obj)][0]) + self.write(POP * n + get) + else: + self.write(_tuplesize2code[n]) + self.memoize(obj) + return + + # proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple + # has more than 3 elements. + write = self.write + write(MARK) + for i, element in enumerate(obj): + try: + save(element) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {i}') + raise + + if id(obj) in memo: + # Subtle. d was not in memo when we entered save_tuple(), so + # the process of saving the tuple's elements must have saved + # the tuple itself: the tuple is recursive. The proper action + # now is to throw away everything we put on the stack, and + # simply GET the tuple (it's already constructed). This check + # could have been done in the "for element" loop instead, but + # recursive tuples are a rare thing. + get = self.get(memo[id(obj)][0]) + if self.bin: + write(POP_MARK + get) + else: # proto 0 -- POP_MARK not available + write(POP * (n+1) + get) + return + + # No recursion. + write(TUPLE) + self.memoize(obj) + + dispatch[tuple] = save_tuple + + def save_list(self, obj): + if self.bin: + self.write(EMPTY_LIST) + else: # proto 0 -- can't use EMPTY_LIST + self.write(MARK + LIST) + + self.memoize(obj) + self._batch_appends(obj, obj) + + dispatch[list] = save_list + + _BATCHSIZE = 1000 + + def _batch_appends(self, items, obj): + # Helper to batch up APPENDS sequences + save = self.save + write = self.write + + if not self.bin: + for i, x in enumerate(items): + try: + save(x) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {i}') + raise + write(APPEND) + return + + start = 0 + for batch in batched(items, self._BATCHSIZE): + batch_len = len(batch) + if batch_len != 1: + write(MARK) + for i, x in enumerate(batch, start): + try: + save(x) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {i}') + raise + write(APPENDS) + else: + try: + save(batch[0]) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {start}') + raise + write(APPEND) + start += batch_len + + def save_dict(self, obj): + if self.bin: + self.write(EMPTY_DICT) + else: # proto 0 -- can't use EMPTY_DICT + self.write(MARK + DICT) + + self.memoize(obj) + self._batch_setitems(obj.items(), obj) + + dispatch[dict] = save_dict + + def _batch_setitems(self, items, obj): + # Helper to batch up SETITEMS sequences; proto >= 1 only + save = self.save + write = self.write + + if not self.bin: + for k, v in items: + save(k) + try: + save(v) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {k!r}') + raise + write(SETITEM) + return + + for batch in batched(items, self._BATCHSIZE): + if len(batch) != 1: + write(MARK) + for k, v in batch: + save(k) + try: + save(v) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {k!r}') + raise + write(SETITEMS) + else: + k, v = batch[0] + save(k) + try: + save(v) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} item {k!r}') + raise + write(SETITEM) + + def save_set(self, obj): + save = self.save + write = self.write + + if self.proto < 4: + self.save_reduce(set, (list(obj),), obj=obj) + return + + write(EMPTY_SET) + self.memoize(obj) + + for batch in batched(obj, self._BATCHSIZE): + write(MARK) + try: + for item in batch: + save(item) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} element') + raise + write(ADDITEMS) + dispatch[set] = save_set + + def save_frozenset(self, obj): + save = self.save + write = self.write + + if self.proto < 4: + self.save_reduce(frozenset, (list(obj),), obj=obj) + return + + write(MARK) + try: + for item in obj: + save(item) + except BaseException as exc: + exc.add_note(f'when serializing {_T(obj)} element') + raise + + if id(obj) in self.memo: + # If the object is already in the memo, this means it is + # recursive. In this case, throw away everything we put on the + # stack, and fetch the object back from the memo. + write(POP_MARK + self.get(self.memo[id(obj)][0])) + return + + write(FROZENSET) + self.memoize(obj) + dispatch[frozenset] = save_frozenset + + def save_global(self, obj, name=None): + write = self.write + memo = self.memo + + if name is None: + name = getattr(obj, '__qualname__', None) + if name is None: + name = obj.__name__ + + module_name = whichmodule(obj, name) + if self.proto >= 2: + code = _extension_registry.get((module_name, name), _NoValue) + if code is not _NoValue: + if code <= 0xff: + data = pack("= 4: + self.save(module_name) + self.save(name) + write(STACK_GLOBAL) + elif '.' in name: + # In protocol < 4, objects with multi-part __qualname__ + # are represented as + # getattr(getattr(..., attrname1), attrname2). + dotted_path = name.split('.') + name = dotted_path.pop(0) + save = self.save + for attrname in dotted_path: + save(getattr) + if self.proto < 2: + write(MARK) + self._save_toplevel_by_name(module_name, name) + for attrname in dotted_path: + save(attrname) + if self.proto < 2: + write(TUPLE) + else: + write(TUPLE2) + write(REDUCE) + else: + self._save_toplevel_by_name(module_name, name) + + self.memoize(obj) + + def _save_toplevel_by_name(self, module_name, name): + if self.proto >= 3: + # Non-ASCII identifiers are supported only with protocols >= 3. + encoding = "utf-8" + else: + if self.fix_imports: + r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING + r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING + if (module_name, name) in r_name_mapping: + module_name, name = r_name_mapping[(module_name, name)] + elif module_name in r_import_mapping: + module_name = r_import_mapping[module_name] + encoding = "ascii" + try: + self.write(GLOBAL + bytes(module_name, encoding) + b'\n') + except UnicodeEncodeError: + raise PicklingError( + f"can't pickle module identifier {module_name!r} using " + f"pickle protocol {self.proto}") + try: + self.write(bytes(name, encoding) + b'\n') + except UnicodeEncodeError: + raise PicklingError( + f"can't pickle global identifier {name!r} using " + f"pickle protocol {self.proto}") + + def save_type(self, obj): + if obj is type(None): + return self.save_reduce(type, (None,), obj=obj) + elif obj is type(NotImplemented): + return self.save_reduce(type, (NotImplemented,), obj=obj) + elif obj is type(...): + return self.save_reduce(type, (...,), obj=obj) + return self.save_global(obj) + + dispatch[FunctionType] = save_global + dispatch[type] = save_type + + +# Unpickling machinery + +class _Unpickler: + + def __init__(self, file, *, fix_imports=True, + encoding="ASCII", errors="strict", buffers=None): + """This takes a binary file for reading a pickle data stream. + + The protocol version of the pickle is detected automatically, so + no proto argument is needed. + + The argument *file* must have two methods, a read() method that + takes an integer argument, and a readline() method that requires + no arguments. Both methods should return bytes. Thus *file* + can be a binary file object opened for reading, an io.BytesIO + object, or any other custom object that meets this interface. + + The file-like object must have two methods, a read() method + that takes an integer argument, and a readline() method that + requires no arguments. Both methods should return bytes. + Thus file-like object can be a binary file object opened for + reading, a BytesIO object, or any other custom object that + meets this interface. + + If *buffers* is not None, it should be an iterable of buffer-enabled + objects that is consumed each time the pickle stream references + an out-of-band buffer view. Such buffers have been given in order + to the *buffer_callback* of a Pickler object. + + If *buffers* is None (the default), then the buffers are taken + from the pickle stream, assuming they are serialized there. + It is an error for *buffers* to be None if the pickle stream + was produced with a non-None *buffer_callback*. + + Other optional arguments are *fix_imports*, *encoding* and + *errors*, which are used to control compatibility support for + pickle stream generated by Python 2. If *fix_imports* is True, + pickle will try to map the old Python 2 names to the new names + used in Python 3. The *encoding* and *errors* tell pickle how + to decode 8-bit string instances pickled by Python 2; these + default to 'ASCII' and 'strict', respectively. *encoding* can be + 'bytes' to read these 8-bit string instances as bytes objects. + """ + self._buffers = iter(buffers) if buffers is not None else None + self._file_readline = file.readline + self._file_read = file.read + self.memo = {} + self.encoding = encoding + self.errors = errors + self.proto = 0 + self.fix_imports = fix_imports + + def load(self): + """Read a pickled object representation from the open file. + + Return the reconstituted object hierarchy specified in the file. + """ + # Check whether Unpickler was initialized correctly. This is + # only needed to mimic the behavior of _pickle.Unpickler.dump(). + if not hasattr(self, "_file_read"): + raise UnpicklingError("Unpickler.__init__() was not called by " + "%s.__init__()" % (self.__class__.__name__,)) + self._unframer = _Unframer(self._file_read, self._file_readline) + self.read = self._unframer.read + self.readinto = self._unframer.readinto + self.readline = self._unframer.readline + self.metastack = [] + self.stack = [] + self.append = self.stack.append + self.proto = 0 + read = self.read + dispatch = self.dispatch + try: + while True: + key = read(1) + if not key: + raise EOFError + assert isinstance(key, bytes_types) + dispatch[key[0]](self) + except _Stop as stopinst: + return stopinst.value + + # Return a list of items pushed in the stack after last MARK instruction. + def pop_mark(self): + items = self.stack + self.stack = self.metastack.pop() + self.append = self.stack.append + return items + + def persistent_load(self, pid): + raise UnpicklingError("unsupported persistent id encountered") + + dispatch = {} + + def load_proto(self): + proto = self.read(1)[0] + if not 0 <= proto <= HIGHEST_PROTOCOL: + raise ValueError("unsupported pickle protocol: %d" % proto) + self.proto = proto + dispatch[PROTO[0]] = load_proto + + def load_frame(self): + frame_size, = unpack(' sys.maxsize: + raise ValueError("frame size > sys.maxsize: %d" % frame_size) + self._unframer.load_frame(frame_size) + dispatch[FRAME[0]] = load_frame + + def load_persid(self): + try: + pid = self.readline()[:-1].decode("ascii") + except UnicodeDecodeError: + raise UnpicklingError( + "persistent IDs in protocol 0 must be ASCII strings") + self.append(self.persistent_load(pid)) + dispatch[PERSID[0]] = load_persid + + def load_binpersid(self): + pid = self.stack.pop() + self.append(self.persistent_load(pid)) + dispatch[BINPERSID[0]] = load_binpersid + + def load_none(self): + self.append(None) + dispatch[NONE[0]] = load_none + + def load_false(self): + self.append(False) + dispatch[NEWFALSE[0]] = load_false + + def load_true(self): + self.append(True) + dispatch[NEWTRUE[0]] = load_true + + def load_int(self): + data = self.readline() + if data == FALSE[1:]: + val = False + elif data == TRUE[1:]: + val = True + else: + val = int(data) + self.append(val) + dispatch[INT[0]] = load_int + + def load_binint(self): + self.append(unpack('d', self.read(8))[0]) + dispatch[BINFLOAT[0]] = load_binfloat + + def _decode_string(self, value): + # Used to allow strings from Python 2 to be decoded either as + # bytes or Unicode strings. This should be used only with the + # STRING, BINSTRING and SHORT_BINSTRING opcodes. + if self.encoding == "bytes": + return value + else: + return value.decode(self.encoding, self.errors) + + def load_string(self): + data = self.readline()[:-1] + # Strip outermost quotes + if len(data) >= 2 and data[0] == data[-1] and data[0] in b'"\'': + data = data[1:-1] + else: + raise UnpicklingError("the STRING opcode argument must be quoted") + self.append(self._decode_string(codecs.escape_decode(data)[0])) + dispatch[STRING[0]] = load_string + + def load_binstring(self): + # Deprecated BINSTRING uses signed 32-bit length + len, = unpack(' maxsize: + raise UnpicklingError("BINBYTES exceeds system's maximum size " + "of %d bytes" % maxsize) + self.append(self.read(len)) + dispatch[BINBYTES[0]] = load_binbytes + + def load_unicode(self): + self.append(str(self.readline()[:-1], 'raw-unicode-escape')) + dispatch[UNICODE[0]] = load_unicode + + def load_binunicode(self): + len, = unpack(' maxsize: + raise UnpicklingError("BINUNICODE exceeds system's maximum size " + "of %d bytes" % maxsize) + self.append(str(self.read(len), 'utf-8', 'surrogatepass')) + dispatch[BINUNICODE[0]] = load_binunicode + + def load_binunicode8(self): + len, = unpack(' maxsize: + raise UnpicklingError("BINUNICODE8 exceeds system's maximum size " + "of %d bytes" % maxsize) + self.append(str(self.read(len), 'utf-8', 'surrogatepass')) + dispatch[BINUNICODE8[0]] = load_binunicode8 + + def load_binbytes8(self): + len, = unpack(' maxsize: + raise UnpicklingError("BINBYTES8 exceeds system's maximum size " + "of %d bytes" % maxsize) + self.append(self.read(len)) + dispatch[BINBYTES8[0]] = load_binbytes8 + + def load_bytearray8(self): + len, = unpack(' maxsize: + raise UnpicklingError("BYTEARRAY8 exceeds system's maximum size " + "of %d bytes" % maxsize) + b = bytearray(len) + self.readinto(b) + self.append(b) + dispatch[BYTEARRAY8[0]] = load_bytearray8 + + def load_next_buffer(self): + if self._buffers is None: + raise UnpicklingError("pickle stream refers to out-of-band data " + "but no *buffers* argument was given") + try: + buf = next(self._buffers) + except StopIteration: + raise UnpicklingError("not enough out-of-band buffers") + self.append(buf) + dispatch[NEXT_BUFFER[0]] = load_next_buffer + + def load_readonly_buffer(self): + buf = self.stack[-1] + with memoryview(buf) as m: + if not m.readonly: + self.stack[-1] = m.toreadonly() + dispatch[READONLY_BUFFER[0]] = load_readonly_buffer + + def load_short_binstring(self): + len = self.read(1)[0] + data = self.read(len) + self.append(self._decode_string(data)) + dispatch[SHORT_BINSTRING[0]] = load_short_binstring + + def load_short_binbytes(self): + len = self.read(1)[0] + self.append(self.read(len)) + dispatch[SHORT_BINBYTES[0]] = load_short_binbytes + + def load_short_binunicode(self): + len = self.read(1)[0] + self.append(str(self.read(len), 'utf-8', 'surrogatepass')) + dispatch[SHORT_BINUNICODE[0]] = load_short_binunicode + + def load_tuple(self): + items = self.pop_mark() + self.append(tuple(items)) + dispatch[TUPLE[0]] = load_tuple + + def load_empty_tuple(self): + self.append(()) + dispatch[EMPTY_TUPLE[0]] = load_empty_tuple + + def load_tuple1(self): + self.stack[-1] = (self.stack[-1],) + dispatch[TUPLE1[0]] = load_tuple1 + + def load_tuple2(self): + self.stack[-2:] = [(self.stack[-2], self.stack[-1])] + dispatch[TUPLE2[0]] = load_tuple2 + + def load_tuple3(self): + self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])] + dispatch[TUPLE3[0]] = load_tuple3 + + def load_empty_list(self): + self.append([]) + dispatch[EMPTY_LIST[0]] = load_empty_list + + def load_empty_dictionary(self): + self.append({}) + dispatch[EMPTY_DICT[0]] = load_empty_dictionary + + def load_empty_set(self): + self.append(set()) + dispatch[EMPTY_SET[0]] = load_empty_set + + def load_frozenset(self): + items = self.pop_mark() + self.append(frozenset(items)) + dispatch[FROZENSET[0]] = load_frozenset + + def load_list(self): + items = self.pop_mark() + self.append(items) + dispatch[LIST[0]] = load_list + + def load_dict(self): + items = self.pop_mark() + d = {items[i]: items[i+1] + for i in range(0, len(items), 2)} + self.append(d) + dispatch[DICT[0]] = load_dict + + # INST and OBJ differ only in how they get a class object. It's not + # only sensible to do the rest in a common routine, the two routines + # previously diverged and grew different bugs. + # klass is the class to instantiate, and k points to the topmost mark + # object, following which are the arguments for klass.__init__. + def _instantiate(self, klass, args): + if (args or not isinstance(klass, type) or + hasattr(klass, "__getinitargs__")): + try: + value = klass(*args) + except TypeError as err: + raise TypeError("in constructor for %s: %s" % + (klass.__name__, str(err)), err.__traceback__) + else: + value = klass.__new__(klass) + self.append(value) + + def load_inst(self): + module = self.readline()[:-1].decode("ascii") + name = self.readline()[:-1].decode("ascii") + klass = self.find_class(module, name) + self._instantiate(klass, self.pop_mark()) + dispatch[INST[0]] = load_inst + + def load_obj(self): + # Stack is ... markobject classobject arg1 arg2 ... + args = self.pop_mark() + cls = args.pop(0) + self._instantiate(cls, args) + dispatch[OBJ[0]] = load_obj + + def load_newobj(self): + args = self.stack.pop() + cls = self.stack.pop() + obj = cls.__new__(cls, *args) + self.append(obj) + dispatch[NEWOBJ[0]] = load_newobj + + def load_newobj_ex(self): + kwargs = self.stack.pop() + args = self.stack.pop() + cls = self.stack.pop() + obj = cls.__new__(cls, *args, **kwargs) + self.append(obj) + dispatch[NEWOBJ_EX[0]] = load_newobj_ex + + def load_global(self): + module = self.readline()[:-1].decode("utf-8") + name = self.readline()[:-1].decode("utf-8") + klass = self.find_class(module, name) + self.append(klass) + dispatch[GLOBAL[0]] = load_global + + def load_stack_global(self): + name = self.stack.pop() + module = self.stack.pop() + if type(name) is not str or type(module) is not str: + raise UnpicklingError("STACK_GLOBAL requires str") + self.append(self.find_class(module, name)) + dispatch[STACK_GLOBAL[0]] = load_stack_global + + def load_ext1(self): + code = self.read(1)[0] + self.get_extension(code) + dispatch[EXT1[0]] = load_ext1 + + def load_ext2(self): + code, = unpack('= 4 and '.' in name: + dotted_path = name.split('.') + try: + return _getattribute(sys.modules[module], dotted_path) + except AttributeError: + raise AttributeError( + f"Can't resolve path {name!r} on module {module!r}") + else: + return getattr(sys.modules[module], name) + + def load_reduce(self): + stack = self.stack + args = stack.pop() + func = stack[-1] + stack[-1] = func(*args) + dispatch[REDUCE[0]] = load_reduce + + def load_pop(self): + if self.stack: + del self.stack[-1] + else: + self.pop_mark() + dispatch[POP[0]] = load_pop + + def load_pop_mark(self): + self.pop_mark() + dispatch[POP_MARK[0]] = load_pop_mark + + def load_dup(self): + self.append(self.stack[-1]) + dispatch[DUP[0]] = load_dup + + def load_get(self): + i = int(self.readline()[:-1]) + try: + self.append(self.memo[i]) + except KeyError: + msg = f'Memo value not found at index {i}' + raise UnpicklingError(msg) from None + dispatch[GET[0]] = load_get + + def load_binget(self): + i = self.read(1)[0] + try: + self.append(self.memo[i]) + except KeyError as exc: + msg = f'Memo value not found at index {i}' + raise UnpicklingError(msg) from None + dispatch[BINGET[0]] = load_binget + + def load_long_binget(self): + i, = unpack(' maxsize: + raise ValueError("negative LONG_BINPUT argument") + self.memo[i] = self.stack[-1] + dispatch[LONG_BINPUT[0]] = load_long_binput + + def load_memoize(self): + memo = self.memo + memo[len(memo)] = self.stack[-1] + dispatch[MEMOIZE[0]] = load_memoize + + def load_append(self): + stack = self.stack + value = stack.pop() + list = stack[-1] + list.append(value) + dispatch[APPEND[0]] = load_append + + def load_appends(self): + items = self.pop_mark() + list_obj = self.stack[-1] + try: + extend = list_obj.extend + except AttributeError: + pass + else: + extend(items) + return + # Even if the PEP 307 requires extend() and append() methods, + # fall back on append() if the object has no extend() method + # for backward compatibility. + append = list_obj.append + for item in items: + append(item) + dispatch[APPENDS[0]] = load_appends + + def load_setitem(self): + stack = self.stack + value = stack.pop() + key = stack.pop() + dict = stack[-1] + dict[key] = value + dispatch[SETITEM[0]] = load_setitem + + def load_setitems(self): + items = self.pop_mark() + dict = self.stack[-1] + for i in range(0, len(items), 2): + dict[items[i]] = items[i + 1] + dispatch[SETITEMS[0]] = load_setitems + + def load_additems(self): + items = self.pop_mark() + set_obj = self.stack[-1] + if isinstance(set_obj, set): + set_obj.update(items) + else: + add = set_obj.add + for item in items: + add(item) + dispatch[ADDITEMS[0]] = load_additems + + def load_build(self): + stack = self.stack + state = stack.pop() + inst = stack[-1] + setstate = getattr(inst, "__setstate__", _NoValue) + if setstate is not _NoValue: + setstate(state) + return + slotstate = None + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if state: + inst_dict = inst.__dict__ + intern = sys.intern + for k, v in state.items(): + if type(k) is str: + inst_dict[intern(k)] = v + else: + inst_dict[k] = v + if slotstate: + for k, v in slotstate.items(): + setattr(inst, k, v) + dispatch[BUILD[0]] = load_build + + def load_mark(self): + self.metastack.append(self.stack) + self.stack = [] + self.append = self.stack.append + dispatch[MARK[0]] = load_mark + + def load_stop(self): + value = self.stack.pop() + raise _Stop(value) + dispatch[STOP[0]] = load_stop + + +# Shorthands + +def _dump(obj, file, protocol=None, *, fix_imports=True, buffer_callback=None): + _Pickler(file, protocol, fix_imports=fix_imports, + buffer_callback=buffer_callback).dump(obj) + +def _dumps(obj, protocol=None, *, fix_imports=True, buffer_callback=None): + f = io.BytesIO() + _Pickler(f, protocol, fix_imports=fix_imports, + buffer_callback=buffer_callback).dump(obj) + res = f.getvalue() + assert isinstance(res, bytes_types) + return res + +def _load(file, *, fix_imports=True, encoding="ASCII", errors="strict", + buffers=None): + return _Unpickler(file, fix_imports=fix_imports, buffers=buffers, + encoding=encoding, errors=errors).load() + +def _loads(s, /, *, fix_imports=True, encoding="ASCII", errors="strict", + buffers=None): + if isinstance(s, str): + raise TypeError("Can't load pickle from unicode string") + file = io.BytesIO(s) + return _Unpickler(file, fix_imports=fix_imports, buffers=buffers, + encoding=encoding, errors=errors).load() + +# Use the faster _pickle if possible +try: + from _pickle import ( + PickleError, + PicklingError, + UnpicklingError, + Pickler, + Unpickler, + dump, + dumps, + load, + loads + ) +except ImportError: + Pickler, Unpickler = _Pickler, _Unpickler + dump, dumps, load, loads = _dump, _dumps, _load, _loads + + +def _main(args=None): + import argparse + import pprint + parser = argparse.ArgumentParser( + description='display contents of the pickle files', + color=True, + ) + parser.add_argument( + 'pickle_file', + nargs='+', help='the pickle file') + args = parser.parse_args(args) + for fn in args.pickle_file: + if fn == '-': + obj = load(sys.stdin.buffer) + else: + with open(fn, 'rb') as f: + obj = load(f) + pprint.pprint(obj) + + +if __name__ == "__main__": + _main() diff --git a/Python314_4_x86_Template/Lib/pickletools.py b/Python314_4_x86_Template/Lib/pickletools.py new file mode 100644 index 00000000..254b6c7f --- /dev/null +++ b/Python314_4_x86_Template/Lib/pickletools.py @@ -0,0 +1,2887 @@ +'''"Executable documentation" for the pickle module. + +Extensive comments about the pickle protocols and pickle-machine opcodes +can be found here. Some functions meant for external use: + +genops(pickle) + Generate all the opcodes in a pickle, as (opcode, arg, position) triples. + +dis(pickle, out=None, memo=None, indentlevel=4) + Print a symbolic disassembly of a pickle. +''' + +import codecs +import io +import pickle +import re +import sys + +__all__ = ['dis', 'genops', 'optimize'] + +bytes_types = pickle.bytes_types + +# Other ideas: +# +# - A pickle verifier: read a pickle and check it exhaustively for +# well-formedness. dis() does a lot of this already. +# +# - A protocol identifier: examine a pickle and return its protocol number +# (== the highest .proto attr value among all the opcodes in the pickle). +# dis() already prints this info at the end. +# +# - A pickle optimizer: for example, tuple-building code is sometimes more +# elaborate than necessary, catering for the possibility that the tuple +# is recursive. Or lots of times a PUT is generated that's never accessed +# by a later GET. + + +# "A pickle" is a program for a virtual pickle machine (PM, but more accurately +# called an unpickling machine). It's a sequence of opcodes, interpreted by the +# PM, building an arbitrarily complex Python object. +# +# For the most part, the PM is very simple: there are no looping, testing, or +# conditional instructions, no arithmetic and no function calls. Opcodes are +# executed once each, from first to last, until a STOP opcode is reached. +# +# The PM has two data areas, "the stack" and "the memo". +# +# Many opcodes push Python objects onto the stack; e.g., INT pushes a Python +# integer object on the stack, whose value is gotten from a decimal string +# literal immediately following the INT opcode in the pickle bytestream. Other +# opcodes take Python objects off the stack. The result of unpickling is +# whatever object is left on the stack when the final STOP opcode is executed. +# +# The memo is simply an array of objects, or it can be implemented as a dict +# mapping little integers to objects. The memo serves as the PM's "long term +# memory", and the little integers indexing the memo are akin to variable +# names. Some opcodes pop a stack object into the memo at a given index, +# and others push a memo object at a given index onto the stack again. +# +# At heart, that's all the PM has. Subtleties arise for these reasons: +# +# + Object identity. Objects can be arbitrarily complex, and subobjects +# may be shared (for example, the list [a, a] refers to the same object a +# twice). It can be vital that unpickling recreate an isomorphic object +# graph, faithfully reproducing sharing. +# +# + Recursive objects. For example, after "L = []; L.append(L)", L is a +# list, and L[0] is the same list. This is related to the object identity +# point, and some sequences of pickle opcodes are subtle in order to +# get the right result in all cases. +# +# + Things pickle doesn't know everything about. Examples of things pickle +# does know everything about are Python's builtin scalar and container +# types, like ints and tuples. They generally have opcodes dedicated to +# them. For things like module references and instances of user-defined +# classes, pickle's knowledge is limited. Historically, many enhancements +# have been made to the pickle protocol in order to do a better (faster, +# and/or more compact) job on those. +# +# + Backward compatibility and micro-optimization. As explained below, +# pickle opcodes never go away, not even when better ways to do a thing +# get invented. The repertoire of the PM just keeps growing over time. +# For example, protocol 0 had two opcodes for building Python integers (INT +# and LONG), protocol 1 added three more for more-efficient pickling of short +# integers, and protocol 2 added two more for more-efficient pickling of +# long integers (before protocol 2, the only ways to pickle a Python long +# took time quadratic in the number of digits, for both pickling and +# unpickling). "Opcode bloat" isn't so much a subtlety as a source of +# wearying complication. +# +# +# Pickle protocols: +# +# For compatibility, the meaning of a pickle opcode never changes. Instead new +# pickle opcodes get added, and each version's unpickler can handle all the +# pickle opcodes in all protocol versions to date. So old pickles continue to +# be readable forever. The pickler can generally be told to restrict itself to +# the subset of opcodes available under previous protocol versions too, so that +# users can create pickles under the current version readable by older +# versions. However, a pickle does not contain its version number embedded +# within it. If an older unpickler tries to read a pickle using a later +# protocol, the result is most likely an exception due to seeing an unknown (in +# the older unpickler) opcode. +# +# The original pickle used what's now called "protocol 0", and what was called +# "text mode" before Python 2.3. The entire pickle bytestream is made up of +# printable 7-bit ASCII characters, plus the newline character, in protocol 0. +# That's why it was called text mode. Protocol 0 is small and elegant, but +# sometimes painfully inefficient. +# +# The second major set of additions is now called "protocol 1", and was called +# "binary mode" before Python 2.3. This added many opcodes with arguments +# consisting of arbitrary bytes, including NUL bytes and unprintable "high bit" +# bytes. Binary mode pickles can be substantially smaller than equivalent +# text mode pickles, and sometimes faster too; e.g., BININT represents a 4-byte +# int as 4 bytes following the opcode, which is cheaper to unpickle than the +# (perhaps) 11-character decimal string attached to INT. Protocol 1 also added +# a number of opcodes that operate on many stack elements at once (like APPENDS +# and SETITEMS), and "shortcut" opcodes (like EMPTY_DICT and EMPTY_TUPLE). +# +# The third major set of additions came in Python 2.3, and is called "protocol +# 2". This added: +# +# - A better way to pickle instances of new-style classes (NEWOBJ). +# +# - A way for a pickle to identify its protocol (PROTO). +# +# - Time- and space- efficient pickling of long ints (LONG{1,4}). +# +# - Shortcuts for small tuples (TUPLE{1,2,3}}. +# +# - Dedicated opcodes for bools (NEWTRUE, NEWFALSE). +# +# - The "extension registry", a vector of popular objects that can be pushed +# efficiently by index (EXT{1,2,4}). This is akin to the memo and GET, but +# the registry contents are predefined (there's nothing akin to the memo's +# PUT). +# +# Another independent change with Python 2.3 is the abandonment of any +# pretense that it might be safe to load pickles received from untrusted +# parties -- no sufficient security analysis has been done to guarantee +# this and there isn't a use case that warrants the expense of such an +# analysis. +# +# To this end, all tests for __safe_for_unpickling__ or for +# copyreg.safe_constructors are removed from the unpickling code. +# References to these variables in the descriptions below are to be seen +# as describing unpickling in Python 2.2 and before. + + +# Meta-rule: Descriptions are stored in instances of descriptor objects, +# with plain constructors. No meta-language is defined from which +# descriptors could be constructed. If you want, e.g., XML, write a little +# program to generate XML from the objects. + +############################################################################## +# Some pickle opcodes have an argument, following the opcode in the +# bytestream. An argument is of a specific type, described by an instance +# of ArgumentDescriptor. These are not to be confused with arguments taken +# off the stack -- ArgumentDescriptor applies only to arguments embedded in +# the opcode stream, immediately following an opcode. + +# Represents the number of bytes consumed by an argument delimited by the +# next newline character. +UP_TO_NEWLINE = -1 + +# Represents the number of bytes consumed by a two-argument opcode where +# the first argument gives the number of bytes in the second argument. +TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int +TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int +TAKEN_FROM_ARGUMENT4U = -4 # num bytes is 4-byte unsigned little-endian int +TAKEN_FROM_ARGUMENT8U = -5 # num bytes is 8-byte unsigned little-endian int + +class ArgumentDescriptor(object): + __slots__ = ( + # name of descriptor record, also a module global name; a string + 'name', + + # length of argument, in bytes; an int; UP_TO_NEWLINE and + # TAKEN_FROM_ARGUMENT{1,4,8} are negative values for variable-length + # cases + 'n', + + # a function taking a file-like object, reading this kind of argument + # from the object at the current position, advancing the current + # position by n bytes, and returning the value of the argument + 'reader', + + # human-readable docs for this arg descriptor; a string + 'doc', + ) + + def __init__(self, name, n, reader, doc): + assert isinstance(name, str) + self.name = name + + assert isinstance(n, int) and (n >= 0 or + n in (UP_TO_NEWLINE, + TAKEN_FROM_ARGUMENT1, + TAKEN_FROM_ARGUMENT4, + TAKEN_FROM_ARGUMENT4U, + TAKEN_FROM_ARGUMENT8U)) + self.n = n + + self.reader = reader + + assert isinstance(doc, str) + self.doc = doc + +from struct import unpack as _unpack + +def read_uint1(f): + r""" + >>> import io + >>> read_uint1(io.BytesIO(b'\xff')) + 255 + """ + + data = f.read(1) + if data: + return data[0] + raise ValueError("not enough data in stream to read uint1") + +uint1 = ArgumentDescriptor( + name='uint1', + n=1, + reader=read_uint1, + doc="One-byte unsigned integer.") + + +def read_uint2(f): + r""" + >>> import io + >>> read_uint2(io.BytesIO(b'\xff\x00')) + 255 + >>> read_uint2(io.BytesIO(b'\xff\xff')) + 65535 + """ + + data = f.read(2) + if len(data) == 2: + return _unpack(">> import io + >>> read_int4(io.BytesIO(b'\xff\x00\x00\x00')) + 255 + >>> read_int4(io.BytesIO(b'\x00\x00\x00\x80')) == -(2**31) + True + """ + + data = f.read(4) + if len(data) == 4: + return _unpack(">> import io + >>> read_uint4(io.BytesIO(b'\xff\x00\x00\x00')) + 255 + >>> read_uint4(io.BytesIO(b'\x00\x00\x00\x80')) == 2**31 + True + """ + + data = f.read(4) + if len(data) == 4: + return _unpack(">> import io + >>> read_uint8(io.BytesIO(b'\xff\x00\x00\x00\x00\x00\x00\x00')) + 255 + >>> read_uint8(io.BytesIO(b'\xff' * 8)) == 2**64-1 + True + """ + + data = f.read(8) + if len(data) == 8: + return _unpack(">> import io + >>> read_stringnl(io.BytesIO(b"'abcd'\nefg\n")) + 'abcd' + + >>> read_stringnl(io.BytesIO(b"\n")) + Traceback (most recent call last): + ... + ValueError: no string quotes around b'' + + >>> read_stringnl(io.BytesIO(b"\n"), stripquotes=False) + '' + + >>> read_stringnl(io.BytesIO(b"''\n")) + '' + + >>> read_stringnl(io.BytesIO(b'"abcd"')) + Traceback (most recent call last): + ... + ValueError: no newline found when trying to read stringnl + + Embedded escapes are undone in the result. + >>> read_stringnl(io.BytesIO(br"'a\n\\b\x00c\td'" + b"\n'e'")) + 'a\n\\b\x00c\td' + """ + + data = f.readline() + if not data.endswith(b'\n'): + raise ValueError("no newline found when trying to read stringnl") + data = data[:-1] # lose the newline + + if stripquotes: + for q in (b'"', b"'"): + if data.startswith(q): + if not data.endswith(q): + raise ValueError("string quote %r not found at both " + "ends of %r" % (q, data)) + data = data[1:-1] + break + else: + raise ValueError("no string quotes around %r" % data) + + if decode: + data = codecs.escape_decode(data)[0].decode(encoding) + return data + +stringnl = ArgumentDescriptor( + name='stringnl', + n=UP_TO_NEWLINE, + reader=read_stringnl, + doc="""A newline-terminated string. + + This is a repr-style string, with embedded escapes, and + bracketing quotes. + """) + +def read_stringnl_noescape(f): + return read_stringnl(f, stripquotes=False, encoding='utf-8') + +stringnl_noescape = ArgumentDescriptor( + name='stringnl_noescape', + n=UP_TO_NEWLINE, + reader=read_stringnl_noescape, + doc="""A newline-terminated string. + + This is a str-style string, without embedded escapes, + or bracketing quotes. It should consist solely of + printable ASCII characters. + """) + +def read_stringnl_noescape_pair(f): + r""" + >>> import io + >>> read_stringnl_noescape_pair(io.BytesIO(b"Queue\nEmpty\njunk")) + 'Queue Empty' + """ + + return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f)) + +stringnl_noescape_pair = ArgumentDescriptor( + name='stringnl_noescape_pair', + n=UP_TO_NEWLINE, + reader=read_stringnl_noescape_pair, + doc="""A pair of newline-terminated strings. + + These are str-style strings, without embedded + escapes, or bracketing quotes. They should + consist solely of printable ASCII characters. + The pair is returned as a single string, with + a single blank separating the two strings. + """) + + +def read_string1(f): + r""" + >>> import io + >>> read_string1(io.BytesIO(b"\x00")) + '' + >>> read_string1(io.BytesIO(b"\x03abcdef")) + 'abc' + """ + + n = read_uint1(f) + assert n >= 0 + data = f.read(n) + if len(data) == n: + return data.decode("latin-1") + raise ValueError("expected %d bytes in a string1, but only %d remain" % + (n, len(data))) + +string1 = ArgumentDescriptor( + name="string1", + n=TAKEN_FROM_ARGUMENT1, + reader=read_string1, + doc="""A counted string. + + The first argument is a 1-byte unsigned int giving the number + of bytes in the string, and the second argument is that many + bytes. + """) + + +def read_string4(f): + r""" + >>> import io + >>> read_string4(io.BytesIO(b"\x00\x00\x00\x00abc")) + '' + >>> read_string4(io.BytesIO(b"\x03\x00\x00\x00abcdef")) + 'abc' + >>> read_string4(io.BytesIO(b"\x00\x00\x00\x03abcdef")) + Traceback (most recent call last): + ... + ValueError: expected 50331648 bytes in a string4, but only 6 remain + """ + + n = read_int4(f) + if n < 0: + raise ValueError("string4 byte count < 0: %d" % n) + data = f.read(n) + if len(data) == n: + return data.decode("latin-1") + raise ValueError("expected %d bytes in a string4, but only %d remain" % + (n, len(data))) + +string4 = ArgumentDescriptor( + name="string4", + n=TAKEN_FROM_ARGUMENT4, + reader=read_string4, + doc="""A counted string. + + The first argument is a 4-byte little-endian signed int giving + the number of bytes in the string, and the second argument is + that many bytes. + """) + + +def read_bytes1(f): + r""" + >>> import io + >>> read_bytes1(io.BytesIO(b"\x00")) + b'' + >>> read_bytes1(io.BytesIO(b"\x03abcdef")) + b'abc' + """ + + n = read_uint1(f) + assert n >= 0 + data = f.read(n) + if len(data) == n: + return data + raise ValueError("expected %d bytes in a bytes1, but only %d remain" % + (n, len(data))) + +bytes1 = ArgumentDescriptor( + name="bytes1", + n=TAKEN_FROM_ARGUMENT1, + reader=read_bytes1, + doc="""A counted bytes string. + + The first argument is a 1-byte unsigned int giving the number + of bytes, and the second argument is that many bytes. + """) + + +def read_bytes4(f): + r""" + >>> import io + >>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x00abc")) + b'' + >>> read_bytes4(io.BytesIO(b"\x03\x00\x00\x00abcdef")) + b'abc' + >>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x03abcdef")) + Traceback (most recent call last): + ... + ValueError: expected 50331648 bytes in a bytes4, but only 6 remain + """ + + n = read_uint4(f) + assert n >= 0 + if n > sys.maxsize: + raise ValueError("bytes4 byte count > sys.maxsize: %d" % n) + data = f.read(n) + if len(data) == n: + return data + raise ValueError("expected %d bytes in a bytes4, but only %d remain" % + (n, len(data))) + +bytes4 = ArgumentDescriptor( + name="bytes4", + n=TAKEN_FROM_ARGUMENT4U, + reader=read_bytes4, + doc="""A counted bytes string. + + The first argument is a 4-byte little-endian unsigned int giving + the number of bytes, and the second argument is that many bytes. + """) + + +def read_bytes8(f): + r""" + >>> import io, struct, sys + >>> read_bytes8(io.BytesIO(b"\x00\x00\x00\x00\x00\x00\x00\x00abc")) + b'' + >>> read_bytes8(io.BytesIO(b"\x03\x00\x00\x00\x00\x00\x00\x00abcdef")) + b'abc' + >>> bigsize8 = struct.pack(">> read_bytes8(io.BytesIO(bigsize8 + b"abcdef")) #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: expected ... bytes in a bytes8, but only 6 remain + """ + + n = read_uint8(f) + assert n >= 0 + if n > sys.maxsize: + raise ValueError("bytes8 byte count > sys.maxsize: %d" % n) + data = f.read(n) + if len(data) == n: + return data + raise ValueError("expected %d bytes in a bytes8, but only %d remain" % + (n, len(data))) + +bytes8 = ArgumentDescriptor( + name="bytes8", + n=TAKEN_FROM_ARGUMENT8U, + reader=read_bytes8, + doc="""A counted bytes string. + + The first argument is an 8-byte little-endian unsigned int giving + the number of bytes, and the second argument is that many bytes. + """) + + +def read_bytearray8(f): + r""" + >>> import io, struct, sys + >>> read_bytearray8(io.BytesIO(b"\x00\x00\x00\x00\x00\x00\x00\x00abc")) + bytearray(b'') + >>> read_bytearray8(io.BytesIO(b"\x03\x00\x00\x00\x00\x00\x00\x00abcdef")) + bytearray(b'abc') + >>> bigsize8 = struct.pack(">> read_bytearray8(io.BytesIO(bigsize8 + b"abcdef")) #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: expected ... bytes in a bytearray8, but only 6 remain + """ + + n = read_uint8(f) + assert n >= 0 + if n > sys.maxsize: + raise ValueError("bytearray8 byte count > sys.maxsize: %d" % n) + data = f.read(n) + if len(data) == n: + return bytearray(data) + raise ValueError("expected %d bytes in a bytearray8, but only %d remain" % + (n, len(data))) + +bytearray8 = ArgumentDescriptor( + name="bytearray8", + n=TAKEN_FROM_ARGUMENT8U, + reader=read_bytearray8, + doc="""A counted bytearray. + + The first argument is an 8-byte little-endian unsigned int giving + the number of bytes, and the second argument is that many bytes. + """) + +def read_unicodestringnl(f): + r""" + >>> import io + >>> read_unicodestringnl(io.BytesIO(b"abc\\uabcd\njunk")) == 'abc\uabcd' + True + """ + + data = f.readline() + if not data.endswith(b'\n'): + raise ValueError("no newline found when trying to read " + "unicodestringnl") + data = data[:-1] # lose the newline + return str(data, 'raw-unicode-escape') + +unicodestringnl = ArgumentDescriptor( + name='unicodestringnl', + n=UP_TO_NEWLINE, + reader=read_unicodestringnl, + doc="""A newline-terminated Unicode string. + + This is raw-unicode-escape encoded, so consists of + printable ASCII characters, and may contain embedded + escape sequences. + """) + + +def read_unicodestring1(f): + r""" + >>> import io + >>> s = 'abcd\uabcd' + >>> enc = s.encode('utf-8') + >>> enc + b'abcd\xea\xaf\x8d' + >>> n = bytes([len(enc)]) # little-endian 1-byte length + >>> t = read_unicodestring1(io.BytesIO(n + enc + b'junk')) + >>> s == t + True + + >>> read_unicodestring1(io.BytesIO(n + enc[:-1])) + Traceback (most recent call last): + ... + ValueError: expected 7 bytes in a unicodestring1, but only 6 remain + """ + + n = read_uint1(f) + assert n >= 0 + data = f.read(n) + if len(data) == n: + return str(data, 'utf-8', 'surrogatepass') + raise ValueError("expected %d bytes in a unicodestring1, but only %d " + "remain" % (n, len(data))) + +unicodestring1 = ArgumentDescriptor( + name="unicodestring1", + n=TAKEN_FROM_ARGUMENT1, + reader=read_unicodestring1, + doc="""A counted Unicode string. + + The first argument is a 1-byte little-endian signed int + giving the number of bytes in the string, and the second + argument-- the UTF-8 encoding of the Unicode string -- + contains that many bytes. + """) + + +def read_unicodestring4(f): + r""" + >>> import io + >>> s = 'abcd\uabcd' + >>> enc = s.encode('utf-8') + >>> enc + b'abcd\xea\xaf\x8d' + >>> n = bytes([len(enc), 0, 0, 0]) # little-endian 4-byte length + >>> t = read_unicodestring4(io.BytesIO(n + enc + b'junk')) + >>> s == t + True + + >>> read_unicodestring4(io.BytesIO(n + enc[:-1])) + Traceback (most recent call last): + ... + ValueError: expected 7 bytes in a unicodestring4, but only 6 remain + """ + + n = read_uint4(f) + assert n >= 0 + if n > sys.maxsize: + raise ValueError("unicodestring4 byte count > sys.maxsize: %d" % n) + data = f.read(n) + if len(data) == n: + return str(data, 'utf-8', 'surrogatepass') + raise ValueError("expected %d bytes in a unicodestring4, but only %d " + "remain" % (n, len(data))) + +unicodestring4 = ArgumentDescriptor( + name="unicodestring4", + n=TAKEN_FROM_ARGUMENT4U, + reader=read_unicodestring4, + doc="""A counted Unicode string. + + The first argument is a 4-byte little-endian signed int + giving the number of bytes in the string, and the second + argument-- the UTF-8 encoding of the Unicode string -- + contains that many bytes. + """) + + +def read_unicodestring8(f): + r""" + >>> import io + >>> s = 'abcd\uabcd' + >>> enc = s.encode('utf-8') + >>> enc + b'abcd\xea\xaf\x8d' + >>> n = bytes([len(enc)]) + b'\0' * 7 # little-endian 8-byte length + >>> t = read_unicodestring8(io.BytesIO(n + enc + b'junk')) + >>> s == t + True + + >>> read_unicodestring8(io.BytesIO(n + enc[:-1])) + Traceback (most recent call last): + ... + ValueError: expected 7 bytes in a unicodestring8, but only 6 remain + """ + + n = read_uint8(f) + assert n >= 0 + if n > sys.maxsize: + raise ValueError("unicodestring8 byte count > sys.maxsize: %d" % n) + data = f.read(n) + if len(data) == n: + return str(data, 'utf-8', 'surrogatepass') + raise ValueError("expected %d bytes in a unicodestring8, but only %d " + "remain" % (n, len(data))) + +unicodestring8 = ArgumentDescriptor( + name="unicodestring8", + n=TAKEN_FROM_ARGUMENT8U, + reader=read_unicodestring8, + doc="""A counted Unicode string. + + The first argument is an 8-byte little-endian signed int + giving the number of bytes in the string, and the second + argument-- the UTF-8 encoding of the Unicode string -- + contains that many bytes. + """) + + +def read_decimalnl_short(f): + r""" + >>> import io + >>> read_decimalnl_short(io.BytesIO(b"1234\n56")) + 1234 + + >>> read_decimalnl_short(io.BytesIO(b"1234L\n56")) + Traceback (most recent call last): + ... + ValueError: invalid literal for int() with base 10: b'1234L' + """ + + s = read_stringnl(f, decode=False, stripquotes=False) + + # There's a hack for True and False here. + if s == b"00": + return False + elif s == b"01": + return True + + return int(s) + +def read_decimalnl_long(f): + r""" + >>> import io + + >>> read_decimalnl_long(io.BytesIO(b"1234L\n56")) + 1234 + + >>> read_decimalnl_long(io.BytesIO(b"123456789012345678901234L\n6")) + 123456789012345678901234 + """ + + s = read_stringnl(f, decode=False, stripquotes=False) + if s[-1:] == b'L': + s = s[:-1] + return int(s) + + +decimalnl_short = ArgumentDescriptor( + name='decimalnl_short', + n=UP_TO_NEWLINE, + reader=read_decimalnl_short, + doc="""A newline-terminated decimal integer literal. + + This never has a trailing 'L', and the integer fit + in a short Python int on the box where the pickle + was written -- but there's no guarantee it will fit + in a short Python int on the box where the pickle + is read. + """) + +decimalnl_long = ArgumentDescriptor( + name='decimalnl_long', + n=UP_TO_NEWLINE, + reader=read_decimalnl_long, + doc="""A newline-terminated decimal integer literal. + + This has a trailing 'L', and can represent integers + of any size. + """) + + +def read_floatnl(f): + r""" + >>> import io + >>> read_floatnl(io.BytesIO(b"-1.25\n6")) + -1.25 + """ + s = read_stringnl(f, decode=False, stripquotes=False) + return float(s) + +floatnl = ArgumentDescriptor( + name='floatnl', + n=UP_TO_NEWLINE, + reader=read_floatnl, + doc="""A newline-terminated decimal floating literal. + + In general this requires 17 significant digits for roundtrip + identity, and pickling then unpickling infinities, NaNs, and + minus zero doesn't work across boxes, or on some boxes even + on itself (e.g., Windows can't read the strings it produces + for infinities or NaNs). + """) + +def read_float8(f): + r""" + >>> import io, struct + >>> raw = struct.pack(">d", -1.25) + >>> raw + b'\xbf\xf4\x00\x00\x00\x00\x00\x00' + >>> read_float8(io.BytesIO(raw + b"\n")) + -1.25 + """ + + data = f.read(8) + if len(data) == 8: + return _unpack(">d", data)[0] + raise ValueError("not enough data in stream to read float8") + + +float8 = ArgumentDescriptor( + name='float8', + n=8, + reader=read_float8, + doc="""An 8-byte binary representation of a float, big-endian. + + The format is unique to Python, and shared with the struct + module (format string '>d') "in theory" (the struct and pickle + implementations don't share the code -- they should). It's + strongly related to the IEEE-754 double format, and, in normal + cases, is in fact identical to the big-endian 754 double format. + On other boxes the dynamic range is limited to that of a 754 + double, and "add a half and chop" rounding is used to reduce + the precision to 53 bits. However, even on a 754 box, + infinities, NaNs, and minus zero may not be handled correctly + (may not survive roundtrip pickling intact). + """) + +# Protocol 2 formats + +from pickle import decode_long + +def read_long1(f): + r""" + >>> import io + >>> read_long1(io.BytesIO(b"\x00")) + 0 + >>> read_long1(io.BytesIO(b"\x02\xff\x00")) + 255 + >>> read_long1(io.BytesIO(b"\x02\xff\x7f")) + 32767 + >>> read_long1(io.BytesIO(b"\x02\x00\xff")) + -256 + >>> read_long1(io.BytesIO(b"\x02\x00\x80")) + -32768 + """ + + n = read_uint1(f) + data = f.read(n) + if len(data) != n: + raise ValueError("not enough data in stream to read long1") + return decode_long(data) + +long1 = ArgumentDescriptor( + name="long1", + n=TAKEN_FROM_ARGUMENT1, + reader=read_long1, + doc="""A binary long, little-endian, using 1-byte size. + + This first reads one byte as an unsigned size, then reads that + many bytes and interprets them as a little-endian 2's-complement long. + If the size is 0, that's taken as a shortcut for the long 0L. + """) + +def read_long4(f): + r""" + >>> import io + >>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x00")) + 255 + >>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x7f")) + 32767 + >>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\xff")) + -256 + >>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\x80")) + -32768 + >>> read_long1(io.BytesIO(b"\x00\x00\x00\x00")) + 0 + """ + + n = read_int4(f) + if n < 0: + raise ValueError("long4 byte count < 0: %d" % n) + data = f.read(n) + if len(data) != n: + raise ValueError("not enough data in stream to read long4") + return decode_long(data) + +long4 = ArgumentDescriptor( + name="long4", + n=TAKEN_FROM_ARGUMENT4, + reader=read_long4, + doc="""A binary representation of a long, little-endian. + + This first reads four bytes as a signed size (but requires the + size to be >= 0), then reads that many bytes and interprets them + as a little-endian 2's-complement long. If the size is 0, that's taken + as a shortcut for the int 0, although LONG1 should really be used + then instead (and in any case where # of bytes < 256). + """) + + +############################################################################## +# Object descriptors. The stack used by the pickle machine holds objects, +# and in the stack_before and stack_after attributes of OpcodeInfo +# descriptors we need names to describe the various types of objects that can +# appear on the stack. + +class StackObject(object): + __slots__ = ( + # name of descriptor record, for info only + 'name', + + # type of object, or tuple of type objects (meaning the object can + # be of any type in the tuple) + 'obtype', + + # human-readable docs for this kind of stack object; a string + 'doc', + ) + + def __init__(self, name, obtype, doc): + assert isinstance(name, str) + self.name = name + + assert isinstance(obtype, type) or isinstance(obtype, tuple) + if isinstance(obtype, tuple): + for contained in obtype: + assert isinstance(contained, type) + self.obtype = obtype + + assert isinstance(doc, str) + self.doc = doc + + def __repr__(self): + return self.name + + +pyint = pylong = StackObject( + name='int', + obtype=int, + doc="A Python integer object.") + +pyinteger_or_bool = StackObject( + name='int_or_bool', + obtype=(int, bool), + doc="A Python integer or boolean object.") + +pybool = StackObject( + name='bool', + obtype=bool, + doc="A Python boolean object.") + +pyfloat = StackObject( + name='float', + obtype=float, + doc="A Python float object.") + +pybytes_or_str = pystring = StackObject( + name='bytes_or_str', + obtype=(bytes, str), + doc="A Python bytes or (Unicode) string object.") + +pybytes = StackObject( + name='bytes', + obtype=bytes, + doc="A Python bytes object.") + +pybytearray = StackObject( + name='bytearray', + obtype=bytearray, + doc="A Python bytearray object.") + +pyunicode = StackObject( + name='str', + obtype=str, + doc="A Python (Unicode) string object.") + +pynone = StackObject( + name="None", + obtype=type(None), + doc="The Python None object.") + +pytuple = StackObject( + name="tuple", + obtype=tuple, + doc="A Python tuple object.") + +pylist = StackObject( + name="list", + obtype=list, + doc="A Python list object.") + +pydict = StackObject( + name="dict", + obtype=dict, + doc="A Python dict object.") + +pyset = StackObject( + name="set", + obtype=set, + doc="A Python set object.") + +pyfrozenset = StackObject( + name="frozenset", + obtype=set, + doc="A Python frozenset object.") + +pybuffer = StackObject( + name='buffer', + obtype=object, + doc="A Python buffer-like object.") + +anyobject = StackObject( + name='any', + obtype=object, + doc="Any kind of object whatsoever.") + +markobject = StackObject( + name="mark", + obtype=StackObject, + doc="""'The mark' is a unique object. + +Opcodes that operate on a variable number of objects +generally don't embed the count of objects in the opcode, +or pull it off the stack. Instead the MARK opcode is used +to push a special marker object on the stack, and then +some other opcodes grab all the objects from the top of +the stack down to (but not including) the topmost marker +object. +""") + +stackslice = StackObject( + name="stackslice", + obtype=StackObject, + doc="""An object representing a contiguous slice of the stack. + +This is used in conjunction with markobject, to represent all +of the stack following the topmost markobject. For example, +the POP_MARK opcode changes the stack from + + [..., markobject, stackslice] +to + [...] + +No matter how many object are on the stack after the topmost +markobject, POP_MARK gets rid of all of them (including the +topmost markobject too). +""") + +############################################################################## +# Descriptors for pickle opcodes. + +class OpcodeInfo(object): + + __slots__ = ( + # symbolic name of opcode; a string + 'name', + + # the code used in a bytestream to represent the opcode; a + # one-character string + 'code', + + # If the opcode has an argument embedded in the byte string, an + # instance of ArgumentDescriptor specifying its type. Note that + # arg.reader(s) can be used to read and decode the argument from + # the bytestream s, and arg.doc documents the format of the raw + # argument bytes. If the opcode doesn't have an argument embedded + # in the bytestream, arg should be None. + 'arg', + + # what the stack looks like before this opcode runs; a list + 'stack_before', + + # what the stack looks like after this opcode runs; a list + 'stack_after', + + # the protocol number in which this opcode was introduced; an int + 'proto', + + # human-readable docs for this opcode; a string + 'doc', + ) + + def __init__(self, name, code, arg, + stack_before, stack_after, proto, doc): + assert isinstance(name, str) + self.name = name + + assert isinstance(code, str) + assert len(code) == 1 + self.code = code + + assert arg is None or isinstance(arg, ArgumentDescriptor) + self.arg = arg + + assert isinstance(stack_before, list) + for x in stack_before: + assert isinstance(x, StackObject) + self.stack_before = stack_before + + assert isinstance(stack_after, list) + for x in stack_after: + assert isinstance(x, StackObject) + self.stack_after = stack_after + + assert isinstance(proto, int) and 0 <= proto <= pickle.HIGHEST_PROTOCOL + self.proto = proto + + assert isinstance(doc, str) + self.doc = doc + +I = OpcodeInfo +opcodes = [ + + # Ways to spell integers. + + I(name='INT', + code='I', + arg=decimalnl_short, + stack_before=[], + stack_after=[pyinteger_or_bool], + proto=0, + doc="""Push an integer or bool. + + The argument is a newline-terminated decimal literal string. + + The intent may have been that this always fit in a short Python int, + but INT can be generated in pickles written on a 64-bit box that + require a Python long on a 32-bit box. The difference between this + and LONG then is that INT skips a trailing 'L', and produces a short + int whenever possible. + + Another difference is due to that, when bool was introduced as a + distinct type in 2.3, builtin names True and False were also added to + 2.2.2, mapping to ints 1 and 0. For compatibility in both directions, + True gets pickled as INT + "I01\\n", and False as INT + "I00\\n". + Leading zeroes are never produced for a genuine integer. The 2.3 + (and later) unpicklers special-case these and return bool instead; + earlier unpicklers ignore the leading "0" and return the int. + """), + + I(name='BININT', + code='J', + arg=int4, + stack_before=[], + stack_after=[pyint], + proto=1, + doc="""Push a four-byte signed integer. + + This handles the full range of Python (short) integers on a 32-bit + box, directly as binary bytes (1 for the opcode and 4 for the integer). + If the integer is non-negative and fits in 1 or 2 bytes, pickling via + BININT1 or BININT2 saves space. + """), + + I(name='BININT1', + code='K', + arg=uint1, + stack_before=[], + stack_after=[pyint], + proto=1, + doc="""Push a one-byte unsigned integer. + + This is a space optimization for pickling very small non-negative ints, + in range(256). + """), + + I(name='BININT2', + code='M', + arg=uint2, + stack_before=[], + stack_after=[pyint], + proto=1, + doc="""Push a two-byte unsigned integer. + + This is a space optimization for pickling small positive ints, in + range(256, 2**16). Integers in range(256) can also be pickled via + BININT2, but BININT1 instead saves a byte. + """), + + I(name='LONG', + code='L', + arg=decimalnl_long, + stack_before=[], + stack_after=[pyint], + proto=0, + doc="""Push a long integer. + + The same as INT, except that the literal ends with 'L', and always + unpickles to a Python long. There doesn't seem a real purpose to the + trailing 'L'. + + Note that LONG takes time quadratic in the number of digits when + unpickling (this is simply due to the nature of decimal->binary + conversion). Proto 2 added linear-time (in C; still quadratic-time + in Python) LONG1 and LONG4 opcodes. + """), + + I(name="LONG1", + code='\x8a', + arg=long1, + stack_before=[], + stack_after=[pyint], + proto=2, + doc="""Long integer using one-byte length. + + A more efficient encoding of a Python long; the long1 encoding + says it all."""), + + I(name="LONG4", + code='\x8b', + arg=long4, + stack_before=[], + stack_after=[pyint], + proto=2, + doc="""Long integer using four-byte length. + + A more efficient encoding of a Python long; the long4 encoding + says it all."""), + + # Ways to spell strings (8-bit, not Unicode). + + I(name='STRING', + code='S', + arg=stringnl, + stack_before=[], + stack_after=[pybytes_or_str], + proto=0, + doc="""Push a Python string object. + + The argument is a repr-style string, with bracketing quote characters, + and perhaps embedded escapes. The argument extends until the next + newline character. These are usually decoded into a str instance + using the encoding given to the Unpickler constructor. or the default, + 'ASCII'. If the encoding given was 'bytes' however, they will be + decoded as bytes object instead. + """), + + I(name='BINSTRING', + code='T', + arg=string4, + stack_before=[], + stack_after=[pybytes_or_str], + proto=1, + doc="""Push a Python string object. + + There are two arguments: the first is a 4-byte little-endian + signed int giving the number of bytes in the string, and the + second is that many bytes, which are taken literally as the string + content. These are usually decoded into a str instance using the + encoding given to the Unpickler constructor. or the default, + 'ASCII'. If the encoding given was 'bytes' however, they will be + decoded as bytes object instead. + """), + + I(name='SHORT_BINSTRING', + code='U', + arg=string1, + stack_before=[], + stack_after=[pybytes_or_str], + proto=1, + doc="""Push a Python string object. + + There are two arguments: the first is a 1-byte unsigned int giving + the number of bytes in the string, and the second is that many + bytes, which are taken literally as the string content. These are + usually decoded into a str instance using the encoding given to + the Unpickler constructor. or the default, 'ASCII'. If the + encoding given was 'bytes' however, they will be decoded as bytes + object instead. + """), + + # Bytes (protocol 3 and higher) + + I(name='BINBYTES', + code='B', + arg=bytes4, + stack_before=[], + stack_after=[pybytes], + proto=3, + doc="""Push a Python bytes object. + + There are two arguments: the first is a 4-byte little-endian unsigned int + giving the number of bytes, and the second is that many bytes, which are + taken literally as the bytes content. + """), + + I(name='SHORT_BINBYTES', + code='C', + arg=bytes1, + stack_before=[], + stack_after=[pybytes], + proto=3, + doc="""Push a Python bytes object. + + There are two arguments: the first is a 1-byte unsigned int giving + the number of bytes, and the second is that many bytes, which are taken + literally as the string content. + """), + + I(name='BINBYTES8', + code='\x8e', + arg=bytes8, + stack_before=[], + stack_after=[pybytes], + proto=4, + doc="""Push a Python bytes object. + + There are two arguments: the first is an 8-byte unsigned int giving + the number of bytes in the string, and the second is that many bytes, + which are taken literally as the string content. + """), + + # Bytearray (protocol 5 and higher) + + I(name='BYTEARRAY8', + code='\x96', + arg=bytearray8, + stack_before=[], + stack_after=[pybytearray], + proto=5, + doc="""Push a Python bytearray object. + + There are two arguments: the first is an 8-byte unsigned int giving + the number of bytes in the bytearray, and the second is that many bytes, + which are taken literally as the bytearray content. + """), + + # Out-of-band buffer (protocol 5 and higher) + + I(name='NEXT_BUFFER', + code='\x97', + arg=None, + stack_before=[], + stack_after=[pybuffer], + proto=5, + doc="Push an out-of-band buffer object."), + + I(name='READONLY_BUFFER', + code='\x98', + arg=None, + stack_before=[pybuffer], + stack_after=[pybuffer], + proto=5, + doc="Make an out-of-band buffer object read-only."), + + # Ways to spell None. + + I(name='NONE', + code='N', + arg=None, + stack_before=[], + stack_after=[pynone], + proto=0, + doc="Push None on the stack."), + + # Ways to spell bools, starting with proto 2. See INT for how this was + # done before proto 2. + + I(name='NEWTRUE', + code='\x88', + arg=None, + stack_before=[], + stack_after=[pybool], + proto=2, + doc="Push True onto the stack."), + + I(name='NEWFALSE', + code='\x89', + arg=None, + stack_before=[], + stack_after=[pybool], + proto=2, + doc="Push False onto the stack."), + + # Ways to spell Unicode strings. + + I(name='UNICODE', + code='V', + arg=unicodestringnl, + stack_before=[], + stack_after=[pyunicode], + proto=0, # this may be pure-text, but it's a later addition + doc="""Push a Python Unicode string object. + + The argument is a raw-unicode-escape encoding of a Unicode string, + and so may contain embedded escape sequences. The argument extends + until the next newline character. + """), + + I(name='SHORT_BINUNICODE', + code='\x8c', + arg=unicodestring1, + stack_before=[], + stack_after=[pyunicode], + proto=4, + doc="""Push a Python Unicode string object. + + There are two arguments: the first is a 1-byte little-endian signed int + giving the number of bytes in the string. The second is that many + bytes, and is the UTF-8 encoding of the Unicode string. + """), + + I(name='BINUNICODE', + code='X', + arg=unicodestring4, + stack_before=[], + stack_after=[pyunicode], + proto=1, + doc="""Push a Python Unicode string object. + + There are two arguments: the first is a 4-byte little-endian unsigned int + giving the number of bytes in the string. The second is that many + bytes, and is the UTF-8 encoding of the Unicode string. + """), + + I(name='BINUNICODE8', + code='\x8d', + arg=unicodestring8, + stack_before=[], + stack_after=[pyunicode], + proto=4, + doc="""Push a Python Unicode string object. + + There are two arguments: the first is an 8-byte little-endian signed int + giving the number of bytes in the string. The second is that many + bytes, and is the UTF-8 encoding of the Unicode string. + """), + + # Ways to spell floats. + + I(name='FLOAT', + code='F', + arg=floatnl, + stack_before=[], + stack_after=[pyfloat], + proto=0, + doc="""Newline-terminated decimal float literal. + + The argument is repr(a_float), and in general requires 17 significant + digits for roundtrip conversion to be an identity (this is so for + IEEE-754 double precision values, which is what Python float maps to + on most boxes). + + In general, FLOAT cannot be used to transport infinities, NaNs, or + minus zero across boxes (or even on a single box, if the platform C + library can't read the strings it produces for such things -- Windows + is like that), but may do less damage than BINFLOAT on boxes with + greater precision or dynamic range than IEEE-754 double. + """), + + I(name='BINFLOAT', + code='G', + arg=float8, + stack_before=[], + stack_after=[pyfloat], + proto=1, + doc="""Float stored in binary form, with 8 bytes of data. + + This generally requires less than half the space of FLOAT encoding. + In general, BINFLOAT cannot be used to transport infinities, NaNs, or + minus zero, raises an exception if the exponent exceeds the range of + an IEEE-754 double, and retains no more than 53 bits of precision (if + there are more than that, "add a half and chop" rounding is used to + cut it back to 53 significant bits). + """), + + # Ways to build lists. + + I(name='EMPTY_LIST', + code=']', + arg=None, + stack_before=[], + stack_after=[pylist], + proto=1, + doc="Push an empty list."), + + I(name='APPEND', + code='a', + arg=None, + stack_before=[pylist, anyobject], + stack_after=[pylist], + proto=0, + doc="""Append an object to a list. + + Stack before: ... pylist anyobject + Stack after: ... pylist+[anyobject] + + although pylist is really extended in-place. + """), + + I(name='APPENDS', + code='e', + arg=None, + stack_before=[pylist, markobject, stackslice], + stack_after=[pylist], + proto=1, + doc="""Extend a list by a slice of stack objects. + + Stack before: ... pylist markobject stackslice + Stack after: ... pylist+stackslice + + although pylist is really extended in-place. + """), + + I(name='LIST', + code='l', + arg=None, + stack_before=[markobject, stackslice], + stack_after=[pylist], + proto=0, + doc="""Build a list out of the topmost stack slice, after markobject. + + All the stack entries following the topmost markobject are placed into + a single Python list, which single list object replaces all of the + stack from the topmost markobject onward. For example, + + Stack before: ... markobject 1 2 3 'abc' + Stack after: ... [1, 2, 3, 'abc'] + """), + + # Ways to build tuples. + + I(name='EMPTY_TUPLE', + code=')', + arg=None, + stack_before=[], + stack_after=[pytuple], + proto=1, + doc="Push an empty tuple."), + + I(name='TUPLE', + code='t', + arg=None, + stack_before=[markobject, stackslice], + stack_after=[pytuple], + proto=0, + doc="""Build a tuple out of the topmost stack slice, after markobject. + + All the stack entries following the topmost markobject are placed into + a single Python tuple, which single tuple object replaces all of the + stack from the topmost markobject onward. For example, + + Stack before: ... markobject 1 2 3 'abc' + Stack after: ... (1, 2, 3, 'abc') + """), + + I(name='TUPLE1', + code='\x85', + arg=None, + stack_before=[anyobject], + stack_after=[pytuple], + proto=2, + doc="""Build a one-tuple out of the topmost item on the stack. + + This code pops one value off the stack and pushes a tuple of + length 1 whose one item is that value back onto it. In other + words: + + stack[-1] = tuple(stack[-1:]) + """), + + I(name='TUPLE2', + code='\x86', + arg=None, + stack_before=[anyobject, anyobject], + stack_after=[pytuple], + proto=2, + doc="""Build a two-tuple out of the top two items on the stack. + + This code pops two values off the stack and pushes a tuple of + length 2 whose items are those values back onto it. In other + words: + + stack[-2:] = [tuple(stack[-2:])] + """), + + I(name='TUPLE3', + code='\x87', + arg=None, + stack_before=[anyobject, anyobject, anyobject], + stack_after=[pytuple], + proto=2, + doc="""Build a three-tuple out of the top three items on the stack. + + This code pops three values off the stack and pushes a tuple of + length 3 whose items are those values back onto it. In other + words: + + stack[-3:] = [tuple(stack[-3:])] + """), + + # Ways to build dicts. + + I(name='EMPTY_DICT', + code='}', + arg=None, + stack_before=[], + stack_after=[pydict], + proto=1, + doc="Push an empty dict."), + + I(name='DICT', + code='d', + arg=None, + stack_before=[markobject, stackslice], + stack_after=[pydict], + proto=0, + doc="""Build a dict out of the topmost stack slice, after markobject. + + All the stack entries following the topmost markobject are placed into + a single Python dict, which single dict object replaces all of the + stack from the topmost markobject onward. The stack slice alternates + key, value, key, value, .... For example, + + Stack before: ... markobject 1 2 3 'abc' + Stack after: ... {1: 2, 3: 'abc'} + """), + + I(name='SETITEM', + code='s', + arg=None, + stack_before=[pydict, anyobject, anyobject], + stack_after=[pydict], + proto=0, + doc="""Add a key+value pair to an existing dict. + + Stack before: ... pydict key value + Stack after: ... pydict + + where pydict has been modified via pydict[key] = value. + """), + + I(name='SETITEMS', + code='u', + arg=None, + stack_before=[pydict, markobject, stackslice], + stack_after=[pydict], + proto=1, + doc="""Add an arbitrary number of key+value pairs to an existing dict. + + The slice of the stack following the topmost markobject is taken as + an alternating sequence of keys and values, added to the dict + immediately under the topmost markobject. Everything at and after the + topmost markobject is popped, leaving the mutated dict at the top + of the stack. + + Stack before: ... pydict markobject key_1 value_1 ... key_n value_n + Stack after: ... pydict + + where pydict has been modified via pydict[key_i] = value_i for i in + 1, 2, ..., n, and in that order. + """), + + # Ways to build sets + + I(name='EMPTY_SET', + code='\x8f', + arg=None, + stack_before=[], + stack_after=[pyset], + proto=4, + doc="Push an empty set."), + + I(name='ADDITEMS', + code='\x90', + arg=None, + stack_before=[pyset, markobject, stackslice], + stack_after=[pyset], + proto=4, + doc="""Add an arbitrary number of items to an existing set. + + The slice of the stack following the topmost markobject is taken as + a sequence of items, added to the set immediately under the topmost + markobject. Everything at and after the topmost markobject is popped, + leaving the mutated set at the top of the stack. + + Stack before: ... pyset markobject item_1 ... item_n + Stack after: ... pyset + + where pyset has been modified via pyset.add(item_i) = item_i for i in + 1, 2, ..., n, and in that order. + """), + + # Way to build frozensets + + I(name='FROZENSET', + code='\x91', + arg=None, + stack_before=[markobject, stackslice], + stack_after=[pyfrozenset], + proto=4, + doc="""Build a frozenset out of the topmost slice, after markobject. + + All the stack entries following the topmost markobject are placed into + a single Python frozenset, which single frozenset object replaces all + of the stack from the topmost markobject onward. For example, + + Stack before: ... markobject 1 2 3 + Stack after: ... frozenset({1, 2, 3}) + """), + + # Stack manipulation. + + I(name='POP', + code='0', + arg=None, + stack_before=[anyobject], + stack_after=[], + proto=0, + doc="Discard the top stack item, shrinking the stack by one item."), + + I(name='DUP', + code='2', + arg=None, + stack_before=[anyobject], + stack_after=[anyobject, anyobject], + proto=0, + doc="Push the top stack item onto the stack again, duplicating it."), + + I(name='MARK', + code='(', + arg=None, + stack_before=[], + stack_after=[markobject], + proto=0, + doc="""Push markobject onto the stack. + + markobject is a unique object, used by other opcodes to identify a + region of the stack containing a variable number of objects for them + to work on. See markobject.doc for more detail. + """), + + I(name='POP_MARK', + code='1', + arg=None, + stack_before=[markobject, stackslice], + stack_after=[], + proto=1, + doc="""Pop all the stack objects at and above the topmost markobject. + + When an opcode using a variable number of stack objects is done, + POP_MARK is used to remove those objects, and to remove the markobject + that delimited their starting position on the stack. + """), + + # Memo manipulation. There are really only two operations (get and put), + # each in all-text, "short binary", and "long binary" flavors. + + I(name='GET', + code='g', + arg=decimalnl_short, + stack_before=[], + stack_after=[anyobject], + proto=0, + doc="""Read an object from the memo and push it on the stack. + + The index of the memo object to push is given by the newline-terminated + decimal string following. BINGET and LONG_BINGET are space-optimized + versions. + """), + + I(name='BINGET', + code='h', + arg=uint1, + stack_before=[], + stack_after=[anyobject], + proto=1, + doc="""Read an object from the memo and push it on the stack. + + The index of the memo object to push is given by the 1-byte unsigned + integer following. + """), + + I(name='LONG_BINGET', + code='j', + arg=uint4, + stack_before=[], + stack_after=[anyobject], + proto=1, + doc="""Read an object from the memo and push it on the stack. + + The index of the memo object to push is given by the 4-byte unsigned + little-endian integer following. + """), + + I(name='PUT', + code='p', + arg=decimalnl_short, + stack_before=[], + stack_after=[], + proto=0, + doc="""Store the stack top into the memo. The stack is not popped. + + The index of the memo location to write into is given by the newline- + terminated decimal string following. BINPUT and LONG_BINPUT are + space-optimized versions. + """), + + I(name='BINPUT', + code='q', + arg=uint1, + stack_before=[], + stack_after=[], + proto=1, + doc="""Store the stack top into the memo. The stack is not popped. + + The index of the memo location to write into is given by the 1-byte + unsigned integer following. + """), + + I(name='LONG_BINPUT', + code='r', + arg=uint4, + stack_before=[], + stack_after=[], + proto=1, + doc="""Store the stack top into the memo. The stack is not popped. + + The index of the memo location to write into is given by the 4-byte + unsigned little-endian integer following. + """), + + I(name='MEMOIZE', + code='\x94', + arg=None, + stack_before=[anyobject], + stack_after=[anyobject], + proto=4, + doc="""Store the stack top into the memo. The stack is not popped. + + The index of the memo location to write is the number of + elements currently present in the memo. + """), + + # Access the extension registry (predefined objects). Akin to the GET + # family. + + I(name='EXT1', + code='\x82', + arg=uint1, + stack_before=[], + stack_after=[anyobject], + proto=2, + doc="""Extension code. + + This code and the similar EXT2 and EXT4 allow using a registry + of popular objects that are pickled by name, typically classes. + It is envisioned that through a global negotiation and + registration process, third parties can set up a mapping between + ints and object names. + + In order to guarantee pickle interchangeability, the extension + code registry ought to be global, although a range of codes may + be reserved for private use. + + EXT1 has a 1-byte integer argument. This is used to index into the + extension registry, and the object at that index is pushed on the stack. + """), + + I(name='EXT2', + code='\x83', + arg=uint2, + stack_before=[], + stack_after=[anyobject], + proto=2, + doc="""Extension code. + + See EXT1. EXT2 has a two-byte integer argument. + """), + + I(name='EXT4', + code='\x84', + arg=int4, + stack_before=[], + stack_after=[anyobject], + proto=2, + doc="""Extension code. + + See EXT1. EXT4 has a four-byte integer argument. + """), + + # Push a class object, or module function, on the stack, via its module + # and name. + + I(name='GLOBAL', + code='c', + arg=stringnl_noescape_pair, + stack_before=[], + stack_after=[anyobject], + proto=0, + doc="""Push a global object (module.attr) on the stack. + + Two newline-terminated strings follow the GLOBAL opcode. The first is + taken as a module name, and the second as a class name. The class + object module.class is pushed on the stack. More accurately, the + object returned by self.find_class(module, class) is pushed on the + stack, so unpickling subclasses can override this form of lookup. + """), + + I(name='STACK_GLOBAL', + code='\x93', + arg=None, + stack_before=[pyunicode, pyunicode], + stack_after=[anyobject], + proto=4, + doc="""Push a global object (module.attr) on the stack. + """), + + # Ways to build objects of classes pickle doesn't know about directly + # (user-defined classes). I despair of documenting this accurately + # and comprehensibly -- you really have to read the pickle code to + # find all the special cases. + + I(name='REDUCE', + code='R', + arg=None, + stack_before=[anyobject, anyobject], + stack_after=[anyobject], + proto=0, + doc="""Push an object built from a callable and an argument tuple. + + The opcode is named to remind of the __reduce__() method. + + Stack before: ... callable pytuple + Stack after: ... callable(*pytuple) + + The callable and the argument tuple are the first two items returned + by a __reduce__ method. Applying the callable to the argtuple is + supposed to reproduce the original object, or at least get it started. + If the __reduce__ method returns a 3-tuple, the last component is an + argument to be passed to the object's __setstate__, and then the REDUCE + opcode is followed by code to create setstate's argument, and then a + BUILD opcode to apply __setstate__ to that argument. + + If not isinstance(callable, type), REDUCE complains unless the + callable has been registered with the copyreg module's + safe_constructors dict, or the callable has a magic + '__safe_for_unpickling__' attribute with a true value. I'm not sure + why it does this, but I've sure seen this complaint often enough when + I didn't want to . + """), + + I(name='BUILD', + code='b', + arg=None, + stack_before=[anyobject, anyobject], + stack_after=[anyobject], + proto=0, + doc="""Finish building an object, via __setstate__ or dict update. + + Stack before: ... anyobject argument + Stack after: ... anyobject + + where anyobject may have been mutated, as follows: + + If the object has a __setstate__ method, + + anyobject.__setstate__(argument) + + is called. + + Else the argument must be a dict, the object must have a __dict__, and + the object is updated via + + anyobject.__dict__.update(argument) + """), + + I(name='INST', + code='i', + arg=stringnl_noescape_pair, + stack_before=[markobject, stackslice], + stack_after=[anyobject], + proto=0, + doc="""Build a class instance. + + This is the protocol 0 version of protocol 1's OBJ opcode. + INST is followed by two newline-terminated strings, giving a + module and class name, just as for the GLOBAL opcode (and see + GLOBAL for more details about that). self.find_class(module, name) + is used to get a class object. + + In addition, all the objects on the stack following the topmost + markobject are gathered into a tuple and popped (along with the + topmost markobject), just as for the TUPLE opcode. + + Now it gets complicated. If all of these are true: + + + The argtuple is empty (markobject was at the top of the stack + at the start). + + + The class object does not have a __getinitargs__ attribute. + + then we want to create an old-style class instance without invoking + its __init__() method (pickle has waffled on this over the years; not + calling __init__() is current wisdom). In this case, an instance of + an old-style dummy class is created, and then we try to rebind its + __class__ attribute to the desired class object. If this succeeds, + the new instance object is pushed on the stack, and we're done. + + Else (the argtuple is not empty, it's not an old-style class object, + or the class object does have a __getinitargs__ attribute), the code + first insists that the class object have a __safe_for_unpickling__ + attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE, + it doesn't matter whether this attribute has a true or false value, it + only matters whether it exists (XXX this is a bug). If + __safe_for_unpickling__ doesn't exist, UnpicklingError is raised. + + Else (the class object does have a __safe_for_unpickling__ attr), + the class object obtained from INST's arguments is applied to the + argtuple obtained from the stack, and the resulting instance object + is pushed on the stack. + + NOTE: checks for __safe_for_unpickling__ went away in Python 2.3. + NOTE: the distinction between old-style and new-style classes does + not make sense in Python 3. + """), + + I(name='OBJ', + code='o', + arg=None, + stack_before=[markobject, anyobject, stackslice], + stack_after=[anyobject], + proto=1, + doc="""Build a class instance. + + This is the protocol 1 version of protocol 0's INST opcode, and is + very much like it. The major difference is that the class object + is taken off the stack, allowing it to be retrieved from the memo + repeatedly if several instances of the same class are created. This + can be much more efficient (in both time and space) than repeatedly + embedding the module and class names in INST opcodes. + + Unlike INST, OBJ takes no arguments from the opcode stream. Instead + the class object is taken off the stack, immediately above the + topmost markobject: + + Stack before: ... markobject classobject stackslice + Stack after: ... new_instance_object + + As for INST, the remainder of the stack above the markobject is + gathered into an argument tuple, and then the logic seems identical, + except that no __safe_for_unpickling__ check is done (XXX this is + a bug). See INST for the gory details. + + NOTE: In Python 2.3, INST and OBJ are identical except for how they + get the class object. That was always the intent; the implementations + had diverged for accidental reasons. + """), + + I(name='NEWOBJ', + code='\x81', + arg=None, + stack_before=[anyobject, anyobject], + stack_after=[anyobject], + proto=2, + doc="""Build an object instance. + + The stack before should be thought of as containing a class + object followed by an argument tuple (the tuple being the stack + top). Call these cls and args. They are popped off the stack, + and the value returned by cls.__new__(cls, *args) is pushed back + onto the stack. + """), + + I(name='NEWOBJ_EX', + code='\x92', + arg=None, + stack_before=[anyobject, anyobject, anyobject], + stack_after=[anyobject], + proto=4, + doc="""Build an object instance. + + The stack before should be thought of as containing a class + object followed by an argument tuple and by a keyword argument dict + (the dict being the stack top). Call these cls and args. They are + popped off the stack, and the value returned by + cls.__new__(cls, *args, *kwargs) is pushed back onto the stack. + """), + + # Machine control. + + I(name='PROTO', + code='\x80', + arg=uint1, + stack_before=[], + stack_after=[], + proto=2, + doc="""Protocol version indicator. + + For protocol 2 and above, a pickle must start with this opcode. + The argument is the protocol version, an int in range(2, 256). + """), + + I(name='STOP', + code='.', + arg=None, + stack_before=[anyobject], + stack_after=[], + proto=0, + doc="""Stop the unpickling machine. + + Every pickle ends with this opcode. The object at the top of the stack + is popped, and that's the result of unpickling. The stack should be + empty then. + """), + + # Framing support. + + I(name='FRAME', + code='\x95', + arg=uint8, + stack_before=[], + stack_after=[], + proto=4, + doc="""Indicate the beginning of a new frame. + + The unpickler may use this opcode to safely prefetch data from its + underlying stream. + """), + + # Ways to deal with persistent IDs. + + I(name='PERSID', + code='P', + arg=stringnl_noescape, + stack_before=[], + stack_after=[anyobject], + proto=0, + doc="""Push an object identified by a persistent ID. + + The pickle module doesn't define what a persistent ID means. PERSID's + argument is a newline-terminated str-style (no embedded escapes, no + bracketing quote characters) string, which *is* "the persistent ID". + The unpickler passes this string to self.persistent_load(). Whatever + object that returns is pushed on the stack. There is no implementation + of persistent_load() in Python's unpickler: it must be supplied by an + unpickler subclass. + """), + + I(name='BINPERSID', + code='Q', + arg=None, + stack_before=[anyobject], + stack_after=[anyobject], + proto=1, + doc="""Push an object identified by a persistent ID. + + Like PERSID, except the persistent ID is popped off the stack (instead + of being a string embedded in the opcode bytestream). The persistent + ID is passed to self.persistent_load(), and whatever object that + returns is pushed on the stack. See PERSID for more detail. + """), +] +del I + +# Verify uniqueness of .name and .code members. +name2i = {} +code2i = {} + +for i, d in enumerate(opcodes): + if d.name in name2i: + raise ValueError("repeated name %r at indices %d and %d" % + (d.name, name2i[d.name], i)) + if d.code in code2i: + raise ValueError("repeated code %r at indices %d and %d" % + (d.code, code2i[d.code], i)) + + name2i[d.name] = i + code2i[d.code] = i + +del name2i, code2i, i, d + +############################################################################## +# Build a code2op dict, mapping opcode characters to OpcodeInfo records. +# Also ensure we've got the same stuff as pickle.py, although the +# introspection here is dicey. + +code2op = {} +for d in opcodes: + code2op[d.code] = d +del d + +def assure_pickle_consistency(verbose=False): + + copy = code2op.copy() + for name in pickle.__all__: + if not re.match("[A-Z][A-Z0-9_]+$", name): + if verbose: + print("skipping %r: it doesn't look like an opcode name" % name) + continue + picklecode = getattr(pickle, name) + if not isinstance(picklecode, bytes) or len(picklecode) != 1: + if verbose: + print(("skipping %r: value %r doesn't look like a pickle " + "code" % (name, picklecode))) + continue + picklecode = picklecode.decode("latin-1") + if picklecode in copy: + if verbose: + print("checking name %r w/ code %r for consistency" % ( + name, picklecode)) + d = copy[picklecode] + if d.name != name: + raise ValueError("for pickle code %r, pickle.py uses name %r " + "but we're using name %r" % (picklecode, + name, + d.name)) + # Forget this one. Any left over in copy at the end are a problem + # of a different kind. + del copy[picklecode] + else: + raise ValueError("pickle.py appears to have a pickle opcode with " + "name %r and code %r, but we don't" % + (name, picklecode)) + if copy: + msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"] + for code, d in copy.items(): + msg.append(" name %r with code %r" % (d.name, code)) + raise ValueError("\n".join(msg)) + +assure_pickle_consistency() +del assure_pickle_consistency + +############################################################################## +# A pickle opcode generator. + +def _genops(data, yield_end_pos=False): + if isinstance(data, bytes_types): + data = io.BytesIO(data) + + if hasattr(data, "tell"): + getpos = data.tell + else: + getpos = lambda: None + + while True: + pos = getpos() + code = data.read(1) + opcode = code2op.get(code.decode("latin-1")) + if opcode is None: + if code == b"": + raise ValueError("pickle exhausted before seeing STOP") + else: + raise ValueError("at position %s, opcode %r unknown" % ( + "" if pos is None else pos, + code)) + if opcode.arg is None: + arg = None + else: + arg = opcode.arg.reader(data) + if yield_end_pos: + yield opcode, arg, pos, getpos() + else: + yield opcode, arg, pos + if code == b'.': + assert opcode.name == 'STOP' + break + +def genops(pickle): + """Generate all the opcodes in a pickle. + + 'pickle' is a file-like object, or string, containing the pickle. + + Each opcode in the pickle is generated, from the current pickle position, + stopping after a STOP opcode is delivered. A triple is generated for + each opcode: + + opcode, arg, pos + + opcode is an OpcodeInfo record, describing the current opcode. + + If the opcode has an argument embedded in the pickle, arg is its decoded + value, as a Python object. If the opcode doesn't have an argument, arg + is None. + + If the pickle has a tell() method, pos was the value of pickle.tell() + before reading the current opcode. If the pickle is a bytes object, + it's wrapped in a BytesIO object, and the latter's tell() result is + used. Else (the pickle doesn't have a tell(), and it's not obvious how + to query its current position) pos is None. + """ + return _genops(pickle) + +############################################################################## +# A pickle optimizer. + +def optimize(p): + 'Optimize a pickle string by removing unused PUT opcodes' + put = 'PUT' + get = 'GET' + oldids = set() # set of all PUT ids + newids = {} # set of ids used by a GET opcode + opcodes = [] # (op, idx) or (pos, end_pos) + proto = 0 + protoheader = b'' + for opcode, arg, pos, end_pos in _genops(p, yield_end_pos=True): + if 'PUT' in opcode.name: + oldids.add(arg) + opcodes.append((put, arg)) + elif opcode.name == 'MEMOIZE': + idx = len(oldids) + oldids.add(idx) + opcodes.append((put, idx)) + elif 'FRAME' in opcode.name: + pass + elif 'GET' in opcode.name: + if opcode.proto > proto: + proto = opcode.proto + newids[arg] = None + opcodes.append((get, arg)) + elif opcode.name == 'PROTO': + if arg > proto: + proto = arg + if pos == 0: + protoheader = p[pos:end_pos] + else: + opcodes.append((pos, end_pos)) + else: + opcodes.append((pos, end_pos)) + del oldids + + # Copy the opcodes except for PUTS without a corresponding GET + out = io.BytesIO() + # Write the PROTO header before any framing + out.write(protoheader) + pickler = pickle._Pickler(out, proto) + if proto >= 4: + pickler.framer.start_framing() + idx = 0 + for op, arg in opcodes: + frameless = False + if op is put: + if arg not in newids: + continue + data = pickler.put(idx) + newids[arg] = idx + idx += 1 + elif op is get: + data = pickler.get(newids[arg]) + else: + data = p[op:arg] + frameless = len(data) > pickler.framer._FRAME_SIZE_TARGET + pickler.framer.commit_frame(force=frameless) + if frameless: + pickler.framer.file_write(data) + else: + pickler.write(data) + pickler.framer.end_framing() + return out.getvalue() + +############################################################################## +# A symbolic pickle disassembler. + +def dis(pickle, out=None, memo=None, indentlevel=4, annotate=0): + """Produce a symbolic disassembly of a pickle. + + 'pickle' is a file-like object, or string, containing a (at least one) + pickle. The pickle is disassembled from the current position, through + the first STOP opcode encountered. + + Optional arg 'out' is a file-like object to which the disassembly is + printed. It defaults to sys.stdout. + + Optional arg 'memo' is a Python dict, used as the pickle's memo. It + may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes. + Passing the same memo object to another dis() call then allows disassembly + to proceed across multiple pickles that were all created by the same + pickler with the same memo. Ordinarily you don't need to worry about this. + + Optional arg 'indentlevel' is the number of blanks by which to indent + a new MARK level. It defaults to 4. + + Optional arg 'annotate' if nonzero instructs dis() to add short + description of the opcode on each line of disassembled output. + The value given to 'annotate' must be an integer and is used as a + hint for the column where annotation should start. The default + value is 0, meaning no annotations. + + In addition to printing the disassembly, some sanity checks are made: + + + All embedded opcode arguments "make sense". + + + Explicit and implicit pop operations have enough items on the stack. + + + When an opcode implicitly refers to a markobject, a markobject is + actually on the stack. + + + A memo entry isn't referenced before it's defined. + + + The markobject isn't stored in the memo. + """ + + # Most of the hair here is for sanity checks, but most of it is needed + # anyway to detect when a protocol 0 POP takes a MARK off the stack + # (which in turn is needed to indent MARK blocks correctly). + + stack = [] # crude emulation of unpickler stack + if memo is None: + memo = {} # crude emulation of unpickler memo + maxproto = -1 # max protocol number seen + markstack = [] # bytecode positions of MARK opcodes + indentchunk = ' ' * indentlevel + errormsg = None + annocol = annotate # column hint for annotations + for opcode, arg, pos in genops(pickle): + if pos is not None: + print("%5d:" % pos, end=' ', file=out) + + line = "%-4s %s%s" % (repr(opcode.code)[1:-1], + indentchunk * len(markstack), + opcode.name) + + maxproto = max(maxproto, opcode.proto) + before = opcode.stack_before # don't mutate + after = opcode.stack_after # don't mutate + numtopop = len(before) + + # See whether a MARK should be popped. + markmsg = None + if markobject in before or (opcode.name == "POP" and + stack and + stack[-1] is markobject): + assert markobject not in after + if __debug__: + if markobject in before: + assert before[-1] is stackslice + if markstack: + markpos = markstack.pop() + if markpos is None: + markmsg = "(MARK at unknown opcode offset)" + else: + markmsg = "(MARK at %d)" % markpos + # Pop everything at and after the topmost markobject. + while stack[-1] is not markobject: + stack.pop() + stack.pop() + # Stop later code from popping too much. + try: + numtopop = before.index(markobject) + except ValueError: + assert opcode.name == "POP" + numtopop = 0 + else: + errormsg = "no MARK exists on stack" + + # Check for correct memo usage. + if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT", "MEMOIZE"): + if opcode.name == "MEMOIZE": + memo_idx = len(memo) + markmsg = "(as %d)" % memo_idx + else: + assert arg is not None + memo_idx = arg + if not stack: + errormsg = "stack is empty -- can't store into memo" + elif stack[-1] is markobject: + errormsg = "can't store markobject in the memo" + else: + memo[memo_idx] = stack[-1] + elif opcode.name in ("GET", "BINGET", "LONG_BINGET"): + if arg in memo: + assert len(after) == 1 + after = [memo[arg]] # for better stack emulation + else: + errormsg = "memo key %r has never been stored into" % arg + + if arg is not None or markmsg: + # make a mild effort to align arguments + line += ' ' * (10 - len(opcode.name)) + if arg is not None: + if opcode.name in ("STRING", "BINSTRING", "SHORT_BINSTRING"): + line += ' ' + ascii(arg) + else: + line += ' ' + repr(arg) + if markmsg: + line += ' ' + markmsg + if annotate: + line += ' ' * (annocol - len(line)) + # make a mild effort to align annotations + annocol = len(line) + if annocol > 50: + annocol = annotate + line += ' ' + opcode.doc.split('\n', 1)[0] + print(line, file=out) + + if errormsg: + # Note that we delayed complaining until the offending opcode + # was printed. + raise ValueError(errormsg) + + # Emulate the stack effects. + if len(stack) < numtopop: + raise ValueError("tries to pop %d items from stack with " + "only %d items" % (numtopop, len(stack))) + if numtopop: + del stack[-numtopop:] + if markobject in after: + assert markobject not in before + markstack.append(pos) + + stack.extend(after) + + print("highest protocol among opcodes =", maxproto, file=out) + if stack: + raise ValueError("stack not empty after STOP: %r" % stack) + +# For use in the doctest, simply as an example of a class to pickle. +class _Example: + def __init__(self, value): + self.value = value + +_dis_test = r""" +>>> import pickle +>>> x = [1, 2, (3, 4), {b'abc': "def"}] +>>> pkl0 = pickle.dumps(x, 0) +>>> dis(pkl0) + 0: ( MARK + 1: l LIST (MARK at 0) + 2: p PUT 0 + 5: I INT 1 + 8: a APPEND + 9: I INT 2 + 12: a APPEND + 13: ( MARK + 14: I INT 3 + 17: I INT 4 + 20: t TUPLE (MARK at 13) + 21: p PUT 1 + 24: a APPEND + 25: ( MARK + 26: d DICT (MARK at 25) + 27: p PUT 2 + 30: c GLOBAL '_codecs encode' + 46: p PUT 3 + 49: ( MARK + 50: V UNICODE 'abc' + 55: p PUT 4 + 58: V UNICODE 'latin1' + 66: p PUT 5 + 69: t TUPLE (MARK at 49) + 70: p PUT 6 + 73: R REDUCE + 74: p PUT 7 + 77: V UNICODE 'def' + 82: p PUT 8 + 85: s SETITEM + 86: a APPEND + 87: . STOP +highest protocol among opcodes = 0 + +Try again with a "binary" pickle. + +>>> pkl1 = pickle.dumps(x, 1) +>>> dis(pkl1) + 0: ] EMPTY_LIST + 1: q BINPUT 0 + 3: ( MARK + 4: K BININT1 1 + 6: K BININT1 2 + 8: ( MARK + 9: K BININT1 3 + 11: K BININT1 4 + 13: t TUPLE (MARK at 8) + 14: q BINPUT 1 + 16: } EMPTY_DICT + 17: q BINPUT 2 + 19: c GLOBAL '_codecs encode' + 35: q BINPUT 3 + 37: ( MARK + 38: X BINUNICODE 'abc' + 46: q BINPUT 4 + 48: X BINUNICODE 'latin1' + 59: q BINPUT 5 + 61: t TUPLE (MARK at 37) + 62: q BINPUT 6 + 64: R REDUCE + 65: q BINPUT 7 + 67: X BINUNICODE 'def' + 75: q BINPUT 8 + 77: s SETITEM + 78: e APPENDS (MARK at 3) + 79: . STOP +highest protocol among opcodes = 1 + +Exercise the INST/OBJ/BUILD family. + +>>> import pickletools +>>> dis(pickle.dumps(pickletools.dis, 0)) + 0: c GLOBAL 'pickletools dis' + 17: p PUT 0 + 20: . STOP +highest protocol among opcodes = 0 + +>>> from pickletools import _Example +>>> x = [_Example(42)] * 2 +>>> dis(pickle.dumps(x, 0)) + 0: ( MARK + 1: l LIST (MARK at 0) + 2: p PUT 0 + 5: c GLOBAL 'copy_reg _reconstructor' + 30: p PUT 1 + 33: ( MARK + 34: c GLOBAL 'pickletools _Example' + 56: p PUT 2 + 59: c GLOBAL '__builtin__ object' + 79: p PUT 3 + 82: N NONE + 83: t TUPLE (MARK at 33) + 84: p PUT 4 + 87: R REDUCE + 88: p PUT 5 + 91: ( MARK + 92: d DICT (MARK at 91) + 93: p PUT 6 + 96: V UNICODE 'value' + 103: p PUT 7 + 106: I INT 42 + 110: s SETITEM + 111: b BUILD + 112: a APPEND + 113: g GET 5 + 116: a APPEND + 117: . STOP +highest protocol among opcodes = 0 + +>>> dis(pickle.dumps(x, 1)) + 0: ] EMPTY_LIST + 1: q BINPUT 0 + 3: ( MARK + 4: c GLOBAL 'copy_reg _reconstructor' + 29: q BINPUT 1 + 31: ( MARK + 32: c GLOBAL 'pickletools _Example' + 54: q BINPUT 2 + 56: c GLOBAL '__builtin__ object' + 76: q BINPUT 3 + 78: N NONE + 79: t TUPLE (MARK at 31) + 80: q BINPUT 4 + 82: R REDUCE + 83: q BINPUT 5 + 85: } EMPTY_DICT + 86: q BINPUT 6 + 88: X BINUNICODE 'value' + 98: q BINPUT 7 + 100: K BININT1 42 + 102: s SETITEM + 103: b BUILD + 104: h BINGET 5 + 106: e APPENDS (MARK at 3) + 107: . STOP +highest protocol among opcodes = 1 + +Try "the canonical" recursive-object test. + +>>> L = [] +>>> T = L, +>>> L.append(T) +>>> L[0] is T +True +>>> T[0] is L +True +>>> L[0][0] is L +True +>>> T[0][0] is T +True +>>> dis(pickle.dumps(L, 0)) + 0: ( MARK + 1: l LIST (MARK at 0) + 2: p PUT 0 + 5: ( MARK + 6: g GET 0 + 9: t TUPLE (MARK at 5) + 10: p PUT 1 + 13: a APPEND + 14: . STOP +highest protocol among opcodes = 0 + +>>> dis(pickle.dumps(L, 1)) + 0: ] EMPTY_LIST + 1: q BINPUT 0 + 3: ( MARK + 4: h BINGET 0 + 6: t TUPLE (MARK at 3) + 7: q BINPUT 1 + 9: a APPEND + 10: . STOP +highest protocol among opcodes = 1 + +Note that, in the protocol 0 pickle of the recursive tuple, the disassembler +has to emulate the stack in order to realize that the POP opcode at 16 gets +rid of the MARK at 0. + +>>> dis(pickle.dumps(T, 0)) + 0: ( MARK + 1: ( MARK + 2: l LIST (MARK at 1) + 3: p PUT 0 + 6: ( MARK + 7: g GET 0 + 10: t TUPLE (MARK at 6) + 11: p PUT 1 + 14: a APPEND + 15: 0 POP + 16: 0 POP (MARK at 0) + 17: g GET 1 + 20: . STOP +highest protocol among opcodes = 0 + +>>> dis(pickle.dumps(T, 1)) + 0: ( MARK + 1: ] EMPTY_LIST + 2: q BINPUT 0 + 4: ( MARK + 5: h BINGET 0 + 7: t TUPLE (MARK at 4) + 8: q BINPUT 1 + 10: a APPEND + 11: 1 POP_MARK (MARK at 0) + 12: h BINGET 1 + 14: . STOP +highest protocol among opcodes = 1 + +Try protocol 2. + +>>> dis(pickle.dumps(L, 2)) + 0: \x80 PROTO 2 + 2: ] EMPTY_LIST + 3: q BINPUT 0 + 5: h BINGET 0 + 7: \x85 TUPLE1 + 8: q BINPUT 1 + 10: a APPEND + 11: . STOP +highest protocol among opcodes = 2 + +>>> dis(pickle.dumps(T, 2)) + 0: \x80 PROTO 2 + 2: ] EMPTY_LIST + 3: q BINPUT 0 + 5: h BINGET 0 + 7: \x85 TUPLE1 + 8: q BINPUT 1 + 10: a APPEND + 11: 0 POP + 12: h BINGET 1 + 14: . STOP +highest protocol among opcodes = 2 + +Try protocol 3 with annotations: + +>>> dis(pickle.dumps(T, 3), annotate=1) + 0: \x80 PROTO 3 Protocol version indicator. + 2: ] EMPTY_LIST Push an empty list. + 3: q BINPUT 0 Store the stack top into the memo. The stack is not popped. + 5: h BINGET 0 Read an object from the memo and push it on the stack. + 7: \x85 TUPLE1 Build a one-tuple out of the topmost item on the stack. + 8: q BINPUT 1 Store the stack top into the memo. The stack is not popped. + 10: a APPEND Append an object to a list. + 11: 0 POP Discard the top stack item, shrinking the stack by one item. + 12: h BINGET 1 Read an object from the memo and push it on the stack. + 14: . STOP Stop the unpickling machine. +highest protocol among opcodes = 2 + +""" + +_memo_test = r""" +>>> import pickle +>>> import io +>>> f = io.BytesIO() +>>> p = pickle.Pickler(f, 2) +>>> x = [1, 2, 3] +>>> p.dump(x) +>>> p.dump(x) +>>> f.seek(0) +0 +>>> memo = {} +>>> dis(f, memo=memo) + 0: \x80 PROTO 2 + 2: ] EMPTY_LIST + 3: q BINPUT 0 + 5: ( MARK + 6: K BININT1 1 + 8: K BININT1 2 + 10: K BININT1 3 + 12: e APPENDS (MARK at 5) + 13: . STOP +highest protocol among opcodes = 2 +>>> dis(f, memo=memo) + 14: \x80 PROTO 2 + 16: h BINGET 0 + 18: . STOP +highest protocol among opcodes = 2 +""" + +__test__ = {'disassembler_test': _dis_test, + 'disassembler_memo_test': _memo_test, + } + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser( + description='disassemble one or more pickle files', + color=True, + ) + parser.add_argument( + 'pickle_file', + nargs='+', help='the pickle file') + parser.add_argument( + '-o', '--output', + help='the file where the output should be written') + parser.add_argument( + '-m', '--memo', action='store_true', + help='preserve memo between disassemblies') + parser.add_argument( + '-l', '--indentlevel', default=4, type=int, + help='the number of blanks by which to indent a new MARK level') + parser.add_argument( + '-a', '--annotate', action='store_true', + help='annotate each line with a short opcode description') + parser.add_argument( + '-p', '--preamble', default="==> {name} <==", + help='if more than one pickle file is specified, print this before' + ' each disassembly') + args = parser.parse_args() + annotate = 30 if args.annotate else 0 + memo = {} if args.memo else None + if args.output is None: + output = sys.stdout + else: + output = open(args.output, 'w') + try: + for arg in args.pickle_file: + if len(args.pickle_file) > 1: + name = '' if arg == '-' else arg + preamble = args.preamble.format(name=name) + output.write(preamble + '\n') + if arg == '-': + dis(sys.stdin.buffer, output, memo, args.indentlevel, annotate) + else: + with open(arg, 'rb') as f: + dis(f, output, memo, args.indentlevel, annotate) + finally: + if output is not sys.stdout: + output.close() diff --git a/Python314_4_x86_Template/Lib/pkgutil.py b/Python314_4_x86_Template/Lib/pkgutil.py new file mode 100644 index 00000000..8772a667 --- /dev/null +++ b/Python314_4_x86_Template/Lib/pkgutil.py @@ -0,0 +1,474 @@ +"""Utilities to support packages.""" + +from collections import namedtuple +from functools import singledispatch as simplegeneric +import importlib +import importlib.util +import importlib.machinery +import os +import os.path +import sys + +__all__ = [ + 'get_importer', 'iter_importers', + 'walk_packages', 'iter_modules', 'get_data', + 'read_code', 'extend_path', + 'ModuleInfo', +] + + +ModuleInfo = namedtuple('ModuleInfo', 'module_finder name ispkg') +ModuleInfo.__doc__ = 'A namedtuple with minimal info about a module.' + + +def read_code(stream): + # This helper is needed in order for the PEP 302 emulation to + # correctly handle compiled files + import marshal + + magic = stream.read(4) + if magic != importlib.util.MAGIC_NUMBER: + return None + + stream.read(12) # Skip rest of the header + return marshal.load(stream) + + +def walk_packages(path=None, prefix='', onerror=None): + """Yields ModuleInfo for all modules recursively + on path, or, if path is None, all accessible modules. + + 'path' should be either None or a list of paths to look for + modules in. + + 'prefix' is a string to output on the front of every module name + on output. + + Note that this function must import all *packages* (NOT all + modules!) on the given path, in order to access the __path__ + attribute to find submodules. + + 'onerror' is a function which gets called with one argument (the + name of the package which was being imported) if any exception + occurs while trying to import a package. If no onerror function is + supplied, ImportErrors are caught and ignored, while all other + exceptions are propagated, terminating the search. + + Examples: + + # list all modules python can access + walk_packages() + + # list all submodules of ctypes + walk_packages(ctypes.__path__, ctypes.__name__+'.') + """ + + def seen(p, m={}): + if p in m: + return True + m[p] = True + + for info in iter_modules(path, prefix): + yield info + + if info.ispkg: + try: + __import__(info.name) + except ImportError: + if onerror is not None: + onerror(info.name) + except Exception: + if onerror is not None: + onerror(info.name) + else: + raise + else: + path = getattr(sys.modules[info.name], '__path__', None) or [] + + # don't traverse path items we've seen before + path = [p for p in path if not seen(p)] + + yield from walk_packages(path, info.name+'.', onerror) + + +def iter_modules(path=None, prefix=''): + """Yields ModuleInfo for all submodules on path, + or, if path is None, all top-level modules on sys.path. + + 'path' should be either None or a list of paths to look for + modules in. + + 'prefix' is a string to output on the front of every module name + on output. + """ + if path is None: + importers = iter_importers() + elif isinstance(path, str): + raise ValueError("path must be None or list of paths to look for " + "modules in") + else: + importers = map(get_importer, path) + + yielded = {} + for i in importers: + for name, ispkg in iter_importer_modules(i, prefix): + if name not in yielded: + yielded[name] = 1 + yield ModuleInfo(i, name, ispkg) + + +@simplegeneric +def iter_importer_modules(importer, prefix=''): + if not hasattr(importer, 'iter_modules'): + return [] + return importer.iter_modules(prefix) + + +# Implement a file walker for the normal importlib path hook +def _iter_file_finder_modules(importer, prefix=''): + if importer.path is None or not os.path.isdir(importer.path): + return + + yielded = {} + import inspect + try: + filenames = os.listdir(importer.path) + except OSError: + # ignore unreadable directories like import does + filenames = [] + filenames.sort() # handle packages before same-named modules + + for fn in filenames: + modname = inspect.getmodulename(fn) + if modname=='__init__' or modname in yielded: + continue + + path = os.path.join(importer.path, fn) + ispkg = False + + if not modname and os.path.isdir(path) and '.' not in fn: + modname = fn + try: + dircontents = os.listdir(path) + except OSError: + # ignore unreadable directories like import does + dircontents = [] + for fn in dircontents: + subname = inspect.getmodulename(fn) + if subname=='__init__': + ispkg = True + break + else: + continue # not a package + + if modname and '.' not in modname: + yielded[modname] = 1 + yield prefix + modname, ispkg + +iter_importer_modules.register( + importlib.machinery.FileFinder, _iter_file_finder_modules) + + +try: + import zipimport + from zipimport import zipimporter + + def iter_zipimport_modules(importer, prefix=''): + dirlist = sorted(zipimport._zip_directory_cache[importer.archive]) + _prefix = importer.prefix + plen = len(_prefix) + yielded = {} + import inspect + for fn in dirlist: + if not fn.startswith(_prefix): + continue + + fn = fn[plen:].split(os.sep) + + if len(fn)==2 and fn[1].startswith('__init__.py'): + if fn[0] not in yielded: + yielded[fn[0]] = 1 + yield prefix + fn[0], True + + if len(fn)!=1: + continue + + modname = inspect.getmodulename(fn[0]) + if modname=='__init__': + continue + + if modname and '.' not in modname and modname not in yielded: + yielded[modname] = 1 + yield prefix + modname, False + + iter_importer_modules.register(zipimporter, iter_zipimport_modules) + +except ImportError: + pass + + +def get_importer(path_item): + """Retrieve a finder for the given path item + + The returned finder is cached in sys.path_importer_cache + if it was newly created by a path hook. + + The cache (or part of it) can be cleared manually if a + rescan of sys.path_hooks is necessary. + """ + path_item = os.fsdecode(path_item) + try: + importer = sys.path_importer_cache[path_item] + except KeyError: + for path_hook in sys.path_hooks: + try: + importer = path_hook(path_item) + sys.path_importer_cache.setdefault(path_item, importer) + break + except ImportError: + pass + else: + importer = None + return importer + + +def iter_importers(fullname=""): + """Yield finders for the given module name + + If fullname contains a '.', the finders will be for the package + containing fullname, otherwise they will be all registered top level + finders (i.e. those on both sys.meta_path and sys.path_hooks). + + If the named module is in a package, that package is imported as a side + effect of invoking this function. + + If no module name is specified, all top level finders are produced. + """ + if fullname.startswith('.'): + msg = "Relative module name {!r} not supported".format(fullname) + raise ImportError(msg) + if '.' in fullname: + # Get the containing package's __path__ + pkg_name = fullname.rpartition(".")[0] + pkg = importlib.import_module(pkg_name) + path = getattr(pkg, '__path__', None) + if path is None: + return + else: + yield from sys.meta_path + path = sys.path + for item in path: + yield get_importer(item) + + +def extend_path(path, name): + """Extend a package's path. + + Intended use is to place the following code in a package's __init__.py: + + from pkgutil import extend_path + __path__ = extend_path(__path__, __name__) + + For each directory on sys.path that has a subdirectory that + matches the package name, add the subdirectory to the package's + __path__. This is useful if one wants to distribute different + parts of a single logical package as multiple directories. + + It also looks for *.pkg files beginning where * matches the name + argument. This feature is similar to *.pth files (see site.py), + except that it doesn't special-case lines starting with 'import'. + A *.pkg file is trusted at face value: apart from checking for + duplicates, all entries found in a *.pkg file are added to the + path, regardless of whether they are exist the filesystem. (This + is a feature.) + + If the input path is not a list (as is the case for frozen + packages) it is returned unchanged. The input path is not + modified; an extended copy is returned. Items are only appended + to the copy at the end. + + It is assumed that sys.path is a sequence. Items of sys.path that + are not (unicode or 8-bit) strings referring to existing + directories are ignored. Unicode items of sys.path that cause + errors when used as filenames may cause this function to raise an + exception (in line with os.path.isdir() behavior). + """ + + if not isinstance(path, list): + # This could happen e.g. when this is called from inside a + # frozen package. Return the path unchanged in that case. + return path + + sname_pkg = name + ".pkg" + + path = path[:] # Start with a copy of the existing path + + parent_package, _, final_name = name.rpartition('.') + if parent_package: + try: + search_path = sys.modules[parent_package].__path__ + except (KeyError, AttributeError): + # We can't do anything: find_loader() returns None when + # passed a dotted name. + return path + else: + search_path = sys.path + + for dir in search_path: + if not isinstance(dir, str): + continue + + finder = get_importer(dir) + if finder is not None: + portions = [] + if hasattr(finder, 'find_spec'): + spec = finder.find_spec(final_name) + if spec is not None: + portions = spec.submodule_search_locations or [] + # Is this finder PEP 420 compliant? + elif hasattr(finder, 'find_loader'): + _, portions = finder.find_loader(final_name) + + for portion in portions: + # XXX This may still add duplicate entries to path on + # case-insensitive filesystems + if portion not in path: + path.append(portion) + + # XXX Is this the right thing for subpackages like zope.app? + # It looks for a file named "zope.app.pkg" + pkgfile = os.path.join(dir, sname_pkg) + if os.path.isfile(pkgfile): + try: + f = open(pkgfile) + except OSError as msg: + sys.stderr.write("Can't open %s: %s\n" % + (pkgfile, msg)) + else: + with f: + for line in f: + line = line.rstrip('\n') + if not line or line.startswith('#'): + continue + path.append(line) # Don't check for existence! + + return path + + +def get_data(package, resource): + """Get a resource from a package. + + This is a wrapper round the PEP 302 loader get_data API. The package + argument should be the name of a package, in standard module format + (foo.bar). The resource argument should be in the form of a relative + filename, using '/' as the path separator. The parent directory name '..' + is not allowed, and nor is a rooted name (starting with a '/'). + + The function returns a binary string, which is the contents of the + specified resource. + + For packages located in the filesystem, which have already been imported, + this is the rough equivalent of + + d = os.path.dirname(sys.modules[package].__file__) + data = open(os.path.join(d, resource), 'rb').read() + + If the package cannot be located or loaded, or it uses a PEP 302 loader + which does not support get_data(), then None is returned. + """ + + spec = importlib.util.find_spec(package) + if spec is None: + return None + loader = spec.loader + if loader is None or not hasattr(loader, 'get_data'): + return None + # XXX needs test + mod = (sys.modules.get(package) or + importlib._bootstrap._load(spec)) + if mod is None or not hasattr(mod, '__file__'): + return None + + # Modify the resource name to be compatible with the loader.get_data + # signature - an os.path format "filename" starting with the dirname of + # the package's __file__ + parts = resource.split('/') + parts.insert(0, os.path.dirname(mod.__file__)) + resource_name = os.path.join(*parts) + return loader.get_data(resource_name) + + +_NAME_PATTERN = None + +def resolve_name(name): + """ + Resolve a name to an object. + + It is expected that `name` will be a string in one of the following + formats, where W is shorthand for a valid Python identifier and dot stands + for a literal period in these pseudo-regexes: + + W(.W)* + W(.W)*:(W(.W)*)? + + The first form is intended for backward compatibility only. It assumes that + some part of the dotted name is a package, and the rest is an object + somewhere within that package, possibly nested inside other objects. + Because the place where the package stops and the object hierarchy starts + can't be inferred by inspection, repeated attempts to import must be done + with this form. + + In the second form, the caller makes the division point clear through the + provision of a single colon: the dotted name to the left of the colon is a + package to be imported, and the dotted name to the right is the object + hierarchy within that package. Only one import is needed in this form. If + it ends with the colon, then a module object is returned. + + The function will return an object (which might be a module), or raise one + of the following exceptions: + + ValueError - if `name` isn't in a recognised format + ImportError - if an import failed when it shouldn't have + AttributeError - if a failure occurred when traversing the object hierarchy + within the imported package to get to the desired object. + """ + global _NAME_PATTERN + if _NAME_PATTERN is None: + # Lazy import to speedup Python startup time + import re + dotted_words = r'(?!\d)(\w+)(\.(?!\d)(\w+))*' + _NAME_PATTERN = re.compile(f'^(?P{dotted_words})' + f'(?P:(?P{dotted_words})?)?$', + re.UNICODE) + + m = _NAME_PATTERN.match(name) + if not m: + raise ValueError(f'invalid format: {name!r}') + gd = m.groupdict() + if gd.get('cln'): + # there is a colon - a one-step import is all that's needed + mod = importlib.import_module(gd['pkg']) + parts = gd.get('obj') + parts = parts.split('.') if parts else [] + else: + # no colon - have to iterate to find the package boundary + parts = name.split('.') + modname = parts.pop(0) + # first part *must* be a module/package. + mod = importlib.import_module(modname) + while parts: + p = parts[0] + s = f'{modname}.{p}' + try: + mod = importlib.import_module(s) + parts.pop(0) + modname = s + except ImportError: + break + # if we reach this point, mod is the module, already imported, and + # parts is the list of parts in the object hierarchy to be traversed, or + # an empty list if just the module is wanted. + result = mod + for p in parts: + result = getattr(result, p) + return result diff --git a/Python314_4_x86_Template/Lib/platform.py b/Python314_4_x86_Template/Lib/platform.py new file mode 100644 index 00000000..b017b841 --- /dev/null +++ b/Python314_4_x86_Template/Lib/platform.py @@ -0,0 +1,1515 @@ +""" This module tries to retrieve as much platform-identifying data as + possible. It makes this information available via function APIs. + + If called from the command line, it prints the platform + information concatenated as single string to stdout. The output + format is usable as part of a filename. + +""" +# This module is maintained by Marc-Andre Lemburg . +# If you find problems, please submit bug reports/patches via the +# Python issue tracker (https://github.com/python/cpython/issues) and +# mention "@malemburg". +# +# Still needed: +# * support for MS-DOS (PythonDX ?) +# * support for Amiga and other still unsupported platforms running Python +# * support for additional Linux distributions +# +# Many thanks to all those who helped adding platform-specific +# checks (in no particular order): +# +# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell, +# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef +# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg +# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark +# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support), +# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter, Steve +# Dower +# +# History: +# +# +# +# 1.0.9 - added invalidate_caches() function to invalidate cached values +# 1.0.8 - changed Windows support to read version from kernel32.dll +# 1.0.7 - added DEV_NULL +# 1.0.6 - added linux_distribution() +# 1.0.5 - fixed Java support to allow running the module on Jython +# 1.0.4 - added IronPython support +# 1.0.3 - added normalization of Windows system name +# 1.0.2 - added more Windows support +# 1.0.1 - reformatted to make doc.py happy +# 1.0.0 - reformatted a bit and checked into Python CVS +# 0.8.0 - added sys.version parser and various new access +# APIs (python_version(), python_compiler(), etc.) +# 0.7.2 - fixed architecture() to use sizeof(pointer) where available +# 0.7.1 - added support for Caldera OpenLinux +# 0.7.0 - some fixes for WinCE; untabified the source file +# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and +# vms_lib.getsyi() configured +# 0.6.1 - added code to prevent 'uname -p' on platforms which are +# known not to support it +# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k; +# did some cleanup of the interfaces - some APIs have changed +# 0.5.5 - fixed another type in the MacOS code... should have +# used more coffee today ;-) +# 0.5.4 - fixed a few typos in the MacOS code +# 0.5.3 - added experimental MacOS support; added better popen() +# workarounds in _syscmd_ver() -- still not 100% elegant +# though +# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all +# return values (the system uname command tends to return +# 'unknown' instead of just leaving the field empty) +# 0.5.1 - included code for slackware dist; added exception handlers +# to cover up situations where platforms don't have os.popen +# (e.g. Mac) or fail on socket.gethostname(); fixed libc +# detection RE +# 0.5.0 - changed the API names referring to system commands to *syscmd*; +# added java_ver(); made syscmd_ver() a private +# API (was system_ver() in previous versions) -- use uname() +# instead; extended the win32_ver() to also return processor +# type information +# 0.4.0 - added win32_ver() and modified the platform() output for WinXX +# 0.3.4 - fixed a bug in _follow_symlinks() +# 0.3.3 - fixed popen() and "file" command invocation bugs +# 0.3.2 - added architecture() API and support for it in platform() +# 0.3.1 - fixed syscmd_ver() RE to support Windows NT +# 0.3.0 - added system alias support +# 0.2.3 - removed 'wince' again... oh well. +# 0.2.2 - added 'wince' to syscmd_ver() supported platforms +# 0.2.1 - added cache logic and changed the platform string format +# 0.2.0 - changed the API to use functions instead of module globals +# since some action take too long to be run on module import +# 0.1.0 - first release +# +# You can always get the latest version of this module at: +# +# http://www.egenix.com/files/python/platform.py +# +# If that URL should fail, try contacting the author. + +__copyright__ = """ + Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com + Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com + + Permission to use, copy, modify, and distribute this software and its + documentation for any purpose and without fee or royalty is hereby granted, + provided that the above copyright notice appear in all copies and that + both that copyright notice and this permission notice appear in + supporting documentation or portions thereof, including modifications, + that you make. + + EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO + THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND + FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, + INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING + FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + WITH THE USE OR PERFORMANCE OF THIS SOFTWARE ! + +""" + +__version__ = '1.0.9' + +import collections +import os +import re +import sys +import functools +import itertools +try: + import _wmi +except ImportError: + _wmi = None + +### Globals & Constants + +# Helper for comparing two version number strings. +# Based on the description of the PHP's version_compare(): +# http://php.net/manual/en/function.version-compare.php + +_ver_stages = { + # any string not found in this dict, will get 0 assigned + 'dev': 10, + 'alpha': 20, 'a': 20, + 'beta': 30, 'b': 30, + 'c': 40, + 'RC': 50, 'rc': 50, + # number, will get 100 assigned + 'pl': 200, 'p': 200, +} + + +def _comparable_version(version): + component_re = re.compile(r'([0-9]+|[._+-])') + result = [] + for v in component_re.split(version): + if v not in '._+-': + try: + v = int(v, 10) + t = 100 + except ValueError: + t = _ver_stages.get(v, 0) + result.extend((t, v)) + return result + +### Platform specific APIs + + +def libc_ver(executable=None, lib='', version='', chunksize=16384): + + """ Tries to determine the libc version that the file executable + (which defaults to the Python interpreter) is linked against. + + Returns a tuple of strings (lib,version) which default to the + given parameters in case the lookup fails. + + Note that the function has intimate knowledge of how different + libc versions add symbols to the executable and thus is probably + only usable for executables compiled using gcc. + + The file is read and scanned in chunks of chunksize bytes. + + """ + if not executable: + if sys.platform == "emscripten": + # Emscripten's os.confstr reports that it is glibc, so special case + # it. + ver = ".".join(str(x) for x in sys._emscripten_info.emscripten_version) + return ("emscripten", ver) + try: + ver = os.confstr('CS_GNU_LIBC_VERSION') + # parse 'glibc 2.28' as ('glibc', '2.28') + parts = ver.split(maxsplit=1) + if len(parts) == 2: + return tuple(parts) + except (AttributeError, ValueError, OSError): + # os.confstr() or CS_GNU_LIBC_VERSION value not available + pass + + executable = sys.executable + + if not executable: + # sys.executable is not set. + return lib, version + + libc_search = re.compile(br""" + (__libc_init) + | (GLIBC_([0-9.]+)) + | (libc(_\w+)?\.so(?:\.(\d[0-9.]*))?) + | (musl-([0-9.]+)) + | ((?:libc\.|ld-)musl(?:-\w+)?.so(?:\.(\d[0-9.]*))?) + """, + re.ASCII | re.VERBOSE) + + V = _comparable_version + # We use os.path.realpath() + # here to work around problems with Cygwin not being + # able to open symlinks for reading + executable = os.path.realpath(executable) + ver = None + with open(executable, 'rb') as f: + binary = f.read(chunksize) + pos = 0 + while pos < len(binary): + if b'libc' in binary or b'GLIBC' in binary or b'musl' in binary: + m = libc_search.search(binary, pos) + else: + m = None + if not m or m.end() == len(binary): + chunk = f.read(chunksize) + if chunk: + binary = binary[max(pos, len(binary) - 1000):] + chunk + pos = 0 + continue + if not m: + break + decoded_groups = [s.decode('latin1') if s is not None else s + for s in m.groups()] + (libcinit, glibc, glibcversion, so, threads, soversion, + musl, muslversion, musl_so, musl_sover) = decoded_groups + if libcinit and not lib: + lib = 'libc' + elif glibc: + if lib != 'glibc': + lib = 'glibc' + ver = glibcversion + elif V(glibcversion) > V(ver): + ver = glibcversion + elif so: + if lib not in ('glibc', 'musl'): + lib = 'libc' + if soversion and (not ver or V(soversion) > V(ver)): + ver = soversion + if threads and ver[-len(threads):] != threads: + ver = ver + threads + elif musl: + lib = 'musl' + if not ver or V(muslversion) > V(ver): + ver = muslversion + elif musl_so: + lib = 'musl' + if musl_sover and (not ver or V(musl_sover) > V(ver)): + ver = musl_sover + pos = m.end() + return lib, version if ver is None else ver + +def _norm_version(version, build=''): + + """ Normalize the version and build strings and return a single + version string using the format major.minor.build (or patchlevel). + """ + l = version.split('.') + if build: + l.append(build) + try: + strings = list(map(str, map(int, l))) + except ValueError: + strings = l + version = '.'.join(strings[:3]) + return version + + +# Examples of VER command output: +# +# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195] +# Windows XP: Microsoft Windows XP [Version 5.1.2600] +# Windows Vista: Microsoft Windows [Version 6.0.6002] +# +# Note that the "Version" string gets localized on different +# Windows versions. + +def _syscmd_ver(system='', release='', version='', + + supported_platforms=('win32', 'win16', 'dos')): + + """ Tries to figure out the OS version used and returns + a tuple (system, release, version). + + It uses the "ver" shell command for this which is known + to exists on Windows, DOS. XXX Others too ? + + In case this fails, the given parameters are used as + defaults. + + """ + if sys.platform not in supported_platforms: + return system, release, version + + # Try some common cmd strings + import subprocess + for cmd in ('ver', 'command /c ver', 'cmd /c ver'): + try: + info = subprocess.check_output(cmd, + stdin=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + text=True, + encoding="locale", + shell=True) + except (OSError, subprocess.CalledProcessError) as why: + #print('Command %s failed: %s' % (cmd, why)) + continue + else: + break + else: + return system, release, version + + ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) ' + r'.*' + r'\[.* ([\d.]+)\])') + + # Parse the output + info = info.strip() + m = ver_output.match(info) + if m is not None: + system, release, version = m.groups() + # Strip trailing dots from version and release + if release[-1] == '.': + release = release[:-1] + if version[-1] == '.': + version = version[:-1] + # Normalize the version and build strings (eliminating additional + # zeros) + version = _norm_version(version) + return system, release, version + + +def _wmi_query(table, *keys): + global _wmi + if not _wmi: + raise OSError("not supported") + table = { + "OS": "Win32_OperatingSystem", + "CPU": "Win32_Processor", + }[table] + try: + data = _wmi.exec_query("SELECT {} FROM {}".format( + ",".join(keys), + table, + )).split("\0") + except OSError: + _wmi = None + raise OSError("not supported") + split_data = (i.partition("=") for i in data) + dict_data = {i[0]: i[2] for i in split_data} + return (dict_data[k] for k in keys) + + +_WIN32_CLIENT_RELEASES = [ + ((10, 1, 0), "post11"), + ((10, 0, 22000), "11"), + ((6, 4, 0), "10"), + ((6, 3, 0), "8.1"), + ((6, 2, 0), "8"), + ((6, 1, 0), "7"), + ((6, 0, 0), "Vista"), + ((5, 2, 3790), "XP64"), + ((5, 2, 0), "XPMedia"), + ((5, 1, 0), "XP"), + ((5, 0, 0), "2000"), +] + +_WIN32_SERVER_RELEASES = [ + ((10, 1, 0), "post2025Server"), + ((10, 0, 26100), "2025Server"), + ((10, 0, 20348), "2022Server"), + ((10, 0, 17763), "2019Server"), + ((6, 4, 0), "2016Server"), + ((6, 3, 0), "2012ServerR2"), + ((6, 2, 0), "2012Server"), + ((6, 1, 0), "2008ServerR2"), + ((6, 0, 0), "2008Server"), + ((5, 2, 0), "2003Server"), + ((5, 0, 0), "2000Server"), +] + +def win32_is_iot(): + return win32_edition() in ('IoTUAP', 'NanoServer', 'WindowsCoreHeadless', 'IoTEdgeOS') + +def win32_edition(): + try: + import winreg + except ImportError: + pass + else: + try: + cvkey = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion' + with winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE, cvkey) as key: + return winreg.QueryValueEx(key, 'EditionId')[0] + except OSError: + pass + + return None + +def _win32_ver(version, csd, ptype): + # Try using WMI first, as this is the canonical source of data + try: + (version, product_type, ptype, spmajor, spminor) = _wmi_query( + 'OS', + 'Version', + 'ProductType', + 'BuildType', + 'ServicePackMajorVersion', + 'ServicePackMinorVersion', + ) + is_client = (int(product_type) == 1) + if spminor and spminor != '0': + csd = f'SP{spmajor}.{spminor}' + else: + csd = f'SP{spmajor}' + return version, csd, ptype, is_client + except OSError: + pass + + # Fall back to a combination of sys.getwindowsversion and "ver" + try: + from sys import getwindowsversion + except ImportError: + return version, csd, ptype, True + + winver = getwindowsversion() + is_client = (getattr(winver, 'product_type', 1) == 1) + try: + version = _syscmd_ver()[2] + major, minor, build = map(int, version.split('.')) + except ValueError: + major, minor, build = winver.platform_version or winver[:3] + version = '{0}.{1}.{2}'.format(major, minor, build) + + # getwindowsversion() reflect the compatibility mode Python is + # running under, and so the service pack value is only going to be + # valid if the versions match. + if winver[:2] == (major, minor): + try: + csd = 'SP{}'.format(winver.service_pack_major) + except AttributeError: + if csd[:13] == 'Service Pack ': + csd = 'SP' + csd[13:] + + try: + import winreg + except ImportError: + pass + else: + try: + cvkey = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion' + with winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE, cvkey) as key: + ptype = winreg.QueryValueEx(key, 'CurrentType')[0] + except OSError: + pass + + return version, csd, ptype, is_client + +def win32_ver(release='', version='', csd='', ptype=''): + is_client = False + + version, csd, ptype, is_client = _win32_ver(version, csd, ptype) + + if version: + intversion = tuple(map(int, version.split('.'))) + releases = _WIN32_CLIENT_RELEASES if is_client else _WIN32_SERVER_RELEASES + release = next((r for v, r in releases if v <= intversion), release) + + return release, version, csd, ptype + + +def _mac_ver_xml(): + fn = '/System/Library/CoreServices/SystemVersion.plist' + if not os.path.exists(fn): + return None + + try: + import plistlib + except ImportError: + return None + + with open(fn, 'rb') as f: + pl = plistlib.load(f) + release = pl['ProductVersion'] + versioninfo = ('', '', '') + machine = os.uname().machine + if machine in ('ppc', 'Power Macintosh'): + # Canonical name + machine = 'PowerPC' + + return release, versioninfo, machine + + +def mac_ver(release='', versioninfo=('', '', ''), machine=''): + + """ Get macOS version information and return it as tuple (release, + versioninfo, machine) with versioninfo being a tuple (version, + dev_stage, non_release_version). + + Entries which cannot be determined are set to the parameter values + which default to ''. All tuple entries are strings. + """ + + # First try reading the information from an XML file which should + # always be present + info = _mac_ver_xml() + if info is not None: + return info + + # If that also doesn't work return the default values + return release, versioninfo, machine + + +# A namedtuple for iOS version information. +IOSVersionInfo = collections.namedtuple( + "IOSVersionInfo", + ["system", "release", "model", "is_simulator"] +) + + +def ios_ver(system="", release="", model="", is_simulator=False): + """Get iOS version information, and return it as a namedtuple: + (system, release, model, is_simulator). + + If values can't be determined, they are set to values provided as + parameters. + """ + if sys.platform == "ios": + import _ios_support + result = _ios_support.get_platform_ios() + if result is not None: + return IOSVersionInfo(*result) + + return IOSVersionInfo(system, release, model, is_simulator) + + +def _java_getprop(name, default): + """This private helper is deprecated in 3.13 and will be removed in 3.15""" + from java.lang import System + try: + value = System.getProperty(name) + if value is None: + return default + return value + except AttributeError: + return default + +def java_ver(release='', vendor='', vminfo=('', '', ''), osinfo=('', '', '')): + + """ Version interface for Jython. + + Returns a tuple (release, vendor, vminfo, osinfo) with vminfo being + a tuple (vm_name, vm_release, vm_vendor) and osinfo being a + tuple (os_name, os_version, os_arch). + + Values which cannot be determined are set to the defaults + given as parameters (which all default to ''). + + """ + import warnings + warnings._deprecated('java_ver', remove=(3, 15)) + # Import the needed APIs + try: + import java.lang # noqa: F401 + except ImportError: + return release, vendor, vminfo, osinfo + + vendor = _java_getprop('java.vendor', vendor) + release = _java_getprop('java.version', release) + vm_name, vm_release, vm_vendor = vminfo + vm_name = _java_getprop('java.vm.name', vm_name) + vm_vendor = _java_getprop('java.vm.vendor', vm_vendor) + vm_release = _java_getprop('java.vm.version', vm_release) + vminfo = vm_name, vm_release, vm_vendor + os_name, os_version, os_arch = osinfo + os_arch = _java_getprop('java.os.arch', os_arch) + os_name = _java_getprop('java.os.name', os_name) + os_version = _java_getprop('java.os.version', os_version) + osinfo = os_name, os_version, os_arch + + return release, vendor, vminfo, osinfo + + +AndroidVer = collections.namedtuple( + "AndroidVer", "release api_level manufacturer model device is_emulator") + +def android_ver(release="", api_level=0, manufacturer="", model="", device="", + is_emulator=False): + if sys.platform == "android": + try: + from ctypes import CDLL, c_char_p, create_string_buffer + except ImportError: + pass + else: + # An NDK developer confirmed that this is an officially-supported + # API (https://stackoverflow.com/a/28416743). Use `getattr` to avoid + # private name mangling. + system_property_get = getattr(CDLL("libc.so"), "__system_property_get") + system_property_get.argtypes = (c_char_p, c_char_p) + + def getprop(name, default): + # https://android.googlesource.com/platform/bionic/+/refs/tags/android-5.0.0_r1/libc/include/sys/system_properties.h#39 + PROP_VALUE_MAX = 92 + buffer = create_string_buffer(PROP_VALUE_MAX) + length = system_property_get(name.encode("UTF-8"), buffer) + if length == 0: + # This API doesn’t distinguish between an empty property and + # a missing one. + return default + else: + return buffer.value.decode("UTF-8", "backslashreplace") + + release = getprop("ro.build.version.release", release) + api_level = int(getprop("ro.build.version.sdk", api_level)) + manufacturer = getprop("ro.product.manufacturer", manufacturer) + model = getprop("ro.product.model", model) + device = getprop("ro.product.device", device) + is_emulator = getprop("ro.kernel.qemu", "0") == "1" + + return AndroidVer( + release, api_level, manufacturer, model, device, is_emulator) + + +### System name aliasing + +def system_alias(system, release, version): + + """ Returns (system, release, version) aliased to common + marketing names used for some systems. + + It also does some reordering of the information in some cases + where it would otherwise cause confusion. + + """ + if system == 'SunOS': + # Sun's OS + if release < '5': + # These releases use the old name SunOS + return system, release, version + # Modify release (marketing release = SunOS release - 3) + l = release.split('.') + if l: + try: + major = int(l[0]) + except ValueError: + pass + else: + major = major - 3 + l[0] = str(major) + release = '.'.join(l) + if release < '6': + system = 'Solaris' + else: + # XXX Whatever the new SunOS marketing name is... + system = 'Solaris' + + elif system in ('win32', 'win16'): + # In case one of the other tricks + system = 'Windows' + + # bpo-35516: Don't replace Darwin with macOS since input release and + # version arguments can be different than the currently running version. + + return system, release, version + +### Various internal helpers + +def _platform(*args): + + """ Helper to format the platform string in a filename + compatible format e.g. "system-version-machine". + """ + # Format the platform string + platform = '-'.join(x.strip() for x in filter(len, args)) + + # Cleanup some possible filename obstacles... + platform = platform.replace(' ', '_') + platform = platform.replace('/', '-') + platform = platform.replace('\\', '-') + platform = platform.replace(':', '-') + platform = platform.replace(';', '-') + platform = platform.replace('"', '-') + platform = platform.replace('(', '-') + platform = platform.replace(')', '-') + + # No need to report 'unknown' information... + platform = platform.replace('unknown', '') + + # Fold '--'s and remove trailing '-' + while True: + cleaned = platform.replace('--', '-') + if cleaned == platform: + break + platform = cleaned + while platform and platform[-1] == '-': + platform = platform[:-1] + + return platform + +def _node(default=''): + + """ Helper to determine the node name of this machine. + """ + try: + import socket + except ImportError: + # No sockets... + return default + try: + return socket.gethostname() + except OSError: + # Still not working... + return default + +def _follow_symlinks(filepath): + + """ In case filepath is a symlink, follow it until a + real file is reached. + """ + filepath = os.path.abspath(filepath) + while os.path.islink(filepath): + filepath = os.path.normpath( + os.path.join(os.path.dirname(filepath), os.readlink(filepath))) + return filepath + + +def _syscmd_file(target, default=''): + + """ Interface to the system's file command. + + The function uses the -b option of the file command to have it + omit the filename in its output. Follow the symlinks. It returns + default in case the command should fail. + + """ + if sys.platform in {'dos', 'win32', 'win16', 'ios', 'tvos', 'watchos'}: + # XXX Others too ? + return default + + try: + import subprocess + except ImportError: + return default + target = _follow_symlinks(target) + # "file" output is locale dependent: force the usage of the C locale + # to get deterministic behavior. + env = dict(os.environ, LC_ALL='C') + try: + # -b: do not prepend filenames to output lines (brief mode) + output = subprocess.check_output(['file', '-b', target], + stderr=subprocess.DEVNULL, + env=env) + except (OSError, subprocess.CalledProcessError): + return default + if not output: + return default + # With the C locale, the output should be mostly ASCII-compatible. + # Decode from Latin-1 to prevent Unicode decode error. + return output.decode('latin-1') + +### Information about the used architecture + +# Default values for architecture; non-empty strings override the +# defaults given as parameters +_default_architecture = { + 'win32': ('', 'WindowsPE'), + 'win16': ('', 'Windows'), + 'dos': ('', 'MSDOS'), +} + +def architecture(executable=sys.executable, bits='', linkage=''): + + """ Queries the given executable (defaults to the Python interpreter + binary) for various architecture information. + + Returns a tuple (bits, linkage) which contains information about + the bit architecture and the linkage format used for the + executable. Both values are returned as strings. + + Values that cannot be determined are returned as given by the + parameter presets. If bits is given as '', the sizeof(pointer) + (or sizeof(long) on Python version < 1.5.2) is used as + indicator for the supported pointer size. + + The function relies on the system's "file" command to do the + actual work. This is available on most if not all Unix + platforms. On some non-Unix platforms where the "file" command + does not exist and the executable is set to the Python interpreter + binary defaults from _default_architecture are used. + + """ + # Use the sizeof(pointer) as default number of bits if nothing + # else is given as default. + if not bits: + import struct + size = struct.calcsize('P') + bits = str(size * 8) + 'bit' + + # Get data from the 'file' system command + if executable: + fileout = _syscmd_file(executable, '') + else: + fileout = '' + + if not fileout and \ + executable == sys.executable: + # "file" command did not return anything; we'll try to provide + # some sensible defaults then... + if sys.platform in _default_architecture: + b, l = _default_architecture[sys.platform] + if b: + bits = b + if l: + linkage = l + return bits, linkage + + if 'executable' not in fileout and 'shared object' not in fileout: + # Format not supported + return bits, linkage + + # Bits + if '32-bit' in fileout: + bits = '32bit' + elif '64-bit' in fileout: + bits = '64bit' + + # Linkage + if 'ELF' in fileout: + linkage = 'ELF' + elif 'Mach-O' in fileout: + linkage = "Mach-O" + elif 'PE' in fileout: + # E.g. Windows uses this format + if 'Windows' in fileout: + linkage = 'WindowsPE' + else: + linkage = 'PE' + elif 'COFF' in fileout: + linkage = 'COFF' + elif 'MS-DOS' in fileout: + linkage = 'MSDOS' + else: + # XXX the A.OUT format also falls under this class... + pass + + return bits, linkage + + +def _get_machine_win32(): + # Try to use the PROCESSOR_* environment variables + # available on Win XP and later; see + # http://support.microsoft.com/kb/888731 and + # http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM + + # WOW64 processes mask the native architecture + try: + [arch, *_] = _wmi_query('CPU', 'Architecture') + except OSError: + pass + else: + try: + arch = ['x86', 'MIPS', 'Alpha', 'PowerPC', None, + 'ARM', 'ia64', None, None, + 'AMD64', None, None, 'ARM64', + ][int(arch)] + except (ValueError, IndexError): + pass + else: + if arch: + return arch + return ( + os.environ.get('PROCESSOR_ARCHITEW6432', '') or + os.environ.get('PROCESSOR_ARCHITECTURE', '') + ) + + +class _Processor: + @classmethod + def get(cls): + func = getattr(cls, f'get_{sys.platform}', cls.from_subprocess) + return func() or '' + + def get_win32(): + try: + manufacturer, caption = _wmi_query('CPU', 'Manufacturer', 'Caption') + except OSError: + return os.environ.get('PROCESSOR_IDENTIFIER', _get_machine_win32()) + else: + return f'{caption}, {manufacturer}' + + def get_OpenVMS(): + try: + import vms_lib + except ImportError: + pass + else: + csid, cpu_number = vms_lib.getsyi('SYI$_CPU', 0) + return 'Alpha' if cpu_number >= 128 else 'VAX' + + # On the iOS simulator, os.uname returns the architecture as uname.machine. + # On device it returns the model name for some reason; but there's only one + # CPU architecture for iOS devices, so we know the right answer. + def get_ios(): + if sys.implementation._multiarch.endswith("simulator"): + return os.uname().machine + return 'arm64' + + def from_subprocess(): + """ + Fall back to `uname -p` + """ + try: + import subprocess + except ImportError: + return None + try: + return subprocess.check_output( + ['uname', '-p'], + stderr=subprocess.DEVNULL, + text=True, + encoding="utf8", + ).strip() + except (OSError, subprocess.CalledProcessError): + pass + + +def _unknown_as_blank(val): + return '' if val == 'unknown' else val + + +### Portable uname() interface + +class uname_result( + collections.namedtuple( + "uname_result_base", + "system node release version machine") + ): + """ + A uname_result that's largely compatible with a + simple namedtuple except that 'processor' is + resolved late and cached to avoid calling "uname" + except when needed. + """ + + _fields = ('system', 'node', 'release', 'version', 'machine', 'processor') + + @functools.cached_property + def processor(self): + return _unknown_as_blank(_Processor.get()) + + def __iter__(self): + return itertools.chain( + super().__iter__(), + (self.processor,) + ) + + @classmethod + def _make(cls, iterable): + # override factory to affect length check + num_fields = len(cls._fields) - 1 + result = cls.__new__(cls, *iterable) + if len(result) != num_fields + 1: + msg = f'Expected {num_fields} arguments, got {len(result)}' + raise TypeError(msg) + return result + + def __getitem__(self, key): + return tuple(self)[key] + + def __len__(self): + return len(tuple(iter(self))) + + def __reduce__(self): + return uname_result, tuple(self)[:len(self._fields) - 1] + + +_uname_cache = None + + +def uname(): + + """ Fairly portable uname interface. Returns a tuple + of strings (system, node, release, version, machine, processor) + identifying the underlying platform. + + Note that unlike the os.uname function this also returns + possible processor information as an additional tuple entry. + + Entries which cannot be determined are set to ''. + + """ + global _uname_cache + + if _uname_cache is not None: + return _uname_cache + + # Get some infos from the builtin os.uname API... + try: + system, node, release, version, machine = infos = os.uname() + except AttributeError: + system = sys.platform + node = _node() + release = version = machine = '' + infos = () + + if not any(infos): + # uname is not available + + # Try win32_ver() on win32 platforms + if system == 'win32': + release, version, csd, ptype = win32_ver() + machine = machine or _get_machine_win32() + + # Try the 'ver' system command available on some + # platforms + if not (release and version): + system, release, version = _syscmd_ver(system) + # Normalize system to what win32_ver() normally returns + # (_syscmd_ver() tends to return the vendor name as well) + if system == 'Microsoft Windows': + system = 'Windows' + elif system == 'Microsoft' and release == 'Windows': + # Under Windows Vista and Windows Server 2008, + # Microsoft changed the output of the ver command. The + # release is no longer printed. This causes the + # system and release to be misidentified. + system = 'Windows' + if '6.0' == version[:3]: + release = 'Vista' + else: + release = '' + + # In case we still don't know anything useful, we'll try to + # help ourselves + if system in ('win32', 'win16'): + if not version: + if system == 'win32': + version = '32bit' + else: + version = '16bit' + system = 'Windows' + + elif system[:4] == 'java': + release, vendor, vminfo, osinfo = java_ver() + system = 'Java' + version = ', '.join(vminfo) + if not version: + version = vendor + + # System specific extensions + if system == 'OpenVMS': + # OpenVMS seems to have release and version mixed up + if not release or release == '0': + release = version + version = '' + + # normalize name + if system == 'Microsoft' and release == 'Windows': + system = 'Windows' + release = 'Vista' + + # On Android, return the name and version of the OS rather than the kernel. + if sys.platform == 'android': + system = 'Android' + release = android_ver().release + + # Normalize responses on iOS + if sys.platform == 'ios': + system, release, _, _ = ios_ver() + + vals = system, node, release, version, machine + # Replace 'unknown' values with the more portable '' + _uname_cache = uname_result(*map(_unknown_as_blank, vals)) + return _uname_cache + +### Direct interfaces to some of the uname() return values + +def system(): + + """ Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'. + + An empty string is returned if the value cannot be determined. + + """ + return uname().system + +def node(): + + """ Returns the computer's network name (which may not be fully + qualified) + + An empty string is returned if the value cannot be determined. + + """ + return uname().node + +def release(): + + """ Returns the system's release, e.g. '2.2.0' or 'NT' + + An empty string is returned if the value cannot be determined. + + """ + return uname().release + +def version(): + + """ Returns the system's release version, e.g. '#3 on degas' + + An empty string is returned if the value cannot be determined. + + """ + return uname().version + +def machine(): + + """ Returns the machine type, e.g. 'i386' + + An empty string is returned if the value cannot be determined. + + """ + return uname().machine + +def processor(): + + """ Returns the (true) processor name, e.g. 'amdk6' + + An empty string is returned if the value cannot be + determined. Note that many platforms do not provide this + information or simply return the same value as for machine(), + e.g. NetBSD does this. + + """ + return uname().processor + +### Various APIs for extracting information from sys.version + +_sys_version_cache = {} + +def _sys_version(sys_version=None): + + """ Returns a parsed version of Python's sys.version as tuple + (name, version, branch, revision, buildno, builddate, compiler) + referring to the Python implementation name, version, branch, + revision, build number, build date/time as string and the compiler + identification string. + + Note that unlike the Python sys.version, the returned value + for the Python version will always include the patchlevel (it + defaults to '.0'). + + The function returns empty strings for tuple entries that + cannot be determined. + + sys_version may be given to parse an alternative version + string, e.g. if the version was read from a different Python + interpreter. + + """ + # Get the Python version + if sys_version is None: + sys_version = sys.version + + # Try the cache first + result = _sys_version_cache.get(sys_version, None) + if result is not None: + return result + + if sys.platform.startswith('java'): + # Jython + jython_sys_version_parser = re.compile( + r'([\w.+]+)\s*' # "version" + r'\(#?([^,]+)' # "(#buildno" + r'(?:,\s*([\w ]*)' # ", builddate" + r'(?:,\s*([\w :]*))?)?\)\s*' # ", buildtime)" + r'\[([^\]]+)\]?', re.ASCII) # "[compiler]" + name = 'Jython' + match = jython_sys_version_parser.match(sys_version) + if match is None: + raise ValueError( + 'failed to parse Jython sys.version: %s' % + repr(sys_version)) + version, buildno, builddate, buildtime, _ = match.groups() + if builddate is None: + builddate = '' + compiler = sys.platform + + elif "PyPy" in sys_version: + # PyPy + pypy_sys_version_parser = re.compile( + r'([\w.+]+)\s*' + r'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*' + r'\[PyPy [^\]]+\]?') + + name = "PyPy" + match = pypy_sys_version_parser.match(sys_version) + if match is None: + raise ValueError("failed to parse PyPy sys.version: %s" % + repr(sys_version)) + version, buildno, builddate, buildtime = match.groups() + compiler = "" + + else: + # CPython + cpython_sys_version_parser = re.compile( + r'([\w.+]+)\s*' # "version" + r'(?:free-threading build\s+)?' # "free-threading-build" + r'\(#?([^,]+)' # "(#buildno" + r'(?:,\s*([\w ]*)' # ", builddate" + r'(?:,\s*([\w :]*))?)?\)\s*' # ", buildtime)" + r'\[([^\]]+)\]?', re.ASCII) # "[compiler]" + match = cpython_sys_version_parser.match(sys_version) + if match is None: + raise ValueError( + 'failed to parse CPython sys.version: %s' % + repr(sys_version)) + version, buildno, builddate, buildtime, compiler = \ + match.groups() + name = 'CPython' + if builddate is None: + builddate = '' + elif buildtime: + builddate = builddate + ' ' + buildtime + + if hasattr(sys, '_git'): + _, branch, revision = sys._git + elif hasattr(sys, '_mercurial'): + _, branch, revision = sys._mercurial + else: + branch = '' + revision = '' + + # Add the patchlevel version if missing + l = version.split('.') + if len(l) == 2: + l.append('0') + version = '.'.join(l) + + # Build and cache the result + result = (name, version, branch, revision, buildno, builddate, compiler) + _sys_version_cache[sys_version] = result + return result + +def python_implementation(): + + """ Returns a string identifying the Python implementation. + + Currently, the following implementations are identified: + 'CPython' (C implementation of Python), + 'Jython' (Java implementation of Python), + 'PyPy' (Python implementation of Python). + + """ + return _sys_version()[0] + +def python_version(): + + """ Returns the Python version as string 'major.minor.patchlevel' + + Note that unlike the Python sys.version, the returned value + will always include the patchlevel (it defaults to 0). + + """ + return _sys_version()[1] + +def python_version_tuple(): + + """ Returns the Python version as tuple (major, minor, patchlevel) + of strings. + + Note that unlike the Python sys.version, the returned value + will always include the patchlevel (it defaults to 0). + + """ + return tuple(_sys_version()[1].split('.')) + +def python_branch(): + + """ Returns a string identifying the Python implementation + branch. + + For CPython this is the SCM branch from which the + Python binary was built. + + If not available, an empty string is returned. + + """ + + return _sys_version()[2] + +def python_revision(): + + """ Returns a string identifying the Python implementation + revision. + + For CPython this is the SCM revision from which the + Python binary was built. + + If not available, an empty string is returned. + + """ + return _sys_version()[3] + +def python_build(): + + """ Returns a tuple (buildno, builddate) stating the Python + build number and date as strings. + + """ + return _sys_version()[4:6] + +def python_compiler(): + + """ Returns a string identifying the compiler used for compiling + Python. + + """ + return _sys_version()[6] + +### The Opus Magnum of platform strings :-) + +_platform_cache = {} + +def platform(aliased=False, terse=False): + + """ Returns a single string identifying the underlying platform + with as much useful information as possible (but no more :). + + The output is intended to be human readable rather than + machine parseable. It may look different on different + platforms and this is intended. + + If "aliased" is true, the function will use aliases for + various platforms that report system names which differ from + their common names, e.g. SunOS will be reported as + Solaris. The system_alias() function is used to implement + this. + + Setting terse to true causes the function to return only the + absolute minimum information needed to identify the platform. + + """ + result = _platform_cache.get((aliased, terse), None) + if result is not None: + return result + + # Get uname information and then apply platform specific cosmetics + # to it... + system, node, release, version, machine, processor = uname() + if machine == processor: + processor = '' + if aliased: + system, release, version = system_alias(system, release, version) + + if system == 'Darwin': + # macOS and iOS both report as a "Darwin" kernel + if sys.platform == "ios": + system, release, _, _ = ios_ver() + else: + macos_release = mac_ver()[0] + if macos_release: + system = 'macOS' + release = macos_release + + if system == 'Windows': + # MS platforms + rel, vers, csd, ptype = win32_ver(version) + if terse: + platform = _platform(system, release) + else: + platform = _platform(system, release, version, csd) + + elif system == 'Linux': + # check for libc vs. glibc + libcname, libcversion = libc_ver() + platform = _platform(system, release, machine, processor, + 'with', + libcname+libcversion) + elif system == 'Java': + # Java platforms + r, v, vminfo, (os_name, os_version, os_arch) = java_ver() + if terse or not os_name: + platform = _platform(system, release, version) + else: + platform = _platform(system, release, version, + 'on', + os_name, os_version, os_arch) + + else: + # Generic handler + if terse: + platform = _platform(system, release) + else: + bits, linkage = architecture(sys.executable) + platform = _platform(system, release, machine, + processor, bits, linkage) + + _platform_cache[(aliased, terse)] = platform + return platform + +### freedesktop.org os-release standard +# https://www.freedesktop.org/software/systemd/man/os-release.html + +# /etc takes precedence over /usr/lib +_os_release_candidates = ("/etc/os-release", "/usr/lib/os-release") +_os_release_cache = None + + +def _parse_os_release(lines): + # These fields are mandatory fields with well-known defaults + # in practice all Linux distributions override NAME, ID, and PRETTY_NAME. + info = { + "NAME": "Linux", + "ID": "linux", + "PRETTY_NAME": "Linux", + } + + # NAME=value with optional quotes (' or "). The regular expression is less + # strict than shell lexer, but that's ok. + os_release_line = re.compile( + "^(?P[a-zA-Z0-9_]+)=(?P[\"\']?)(?P.*)(?P=quote)$" + ) + # unescape five special characters mentioned in the standard + os_release_unescape = re.compile(r"\\([\\\$\"\'`])") + + for line in lines: + mo = os_release_line.match(line) + if mo is not None: + info[mo.group('name')] = os_release_unescape.sub( + r"\1", mo.group('value') + ) + + return info + + +def freedesktop_os_release(): + """Return operation system identification from freedesktop.org os-release + """ + global _os_release_cache + + if _os_release_cache is None: + errno = None + for candidate in _os_release_candidates: + try: + with open(candidate, encoding="utf-8") as f: + _os_release_cache = _parse_os_release(f) + break + except OSError as e: + errno = e.errno + else: + raise OSError( + errno, + f"Unable to read files {', '.join(_os_release_candidates)}" + ) + + return _os_release_cache.copy() + + +def invalidate_caches(): + """Invalidate the cached results.""" + global _uname_cache + _uname_cache = None + + global _os_release_cache + _os_release_cache = None + + _sys_version_cache.clear() + _platform_cache.clear() + + +### Command line interface + +def _parse_args(args: list[str] | None): + import argparse + + parser = argparse.ArgumentParser(color=True) + parser.add_argument("args", nargs="*", choices=["nonaliased", "terse"]) + parser.add_argument( + "--terse", + action="store_true", + help=( + "return only the absolute minimum information needed " + "to identify the platform" + ), + ) + parser.add_argument( + "--nonaliased", + dest="aliased", + action="store_false", + help=( + "disable system/OS name aliasing. If aliasing is enabled, " + "some platforms report system names different from " + "their common names, e.g. SunOS is reported as Solaris" + ), + ) + + return parser.parse_args(args) + + +def _main(args: list[str] | None = None): + args = _parse_args(args) + + terse = args.terse or ("terse" in args.args) + aliased = args.aliased and ('nonaliased' not in args.args) + + print(platform(aliased, terse)) + + +if __name__ == "__main__": + _main() diff --git a/Python313_13_x86_Template/Lib/plistlib.py b/Python314_4_x86_Template/Lib/plistlib.py similarity index 100% rename from Python313_13_x86_Template/Lib/plistlib.py rename to Python314_4_x86_Template/Lib/plistlib.py diff --git a/Python313_13_x86_Template/Lib/poplib.py b/Python314_4_x86_Template/Lib/poplib.py similarity index 100% rename from Python313_13_x86_Template/Lib/poplib.py rename to Python314_4_x86_Template/Lib/poplib.py diff --git a/Python314_4_x86_Template/Lib/posixpath.py b/Python314_4_x86_Template/Lib/posixpath.py new file mode 100644 index 00000000..ad86cc06 --- /dev/null +++ b/Python314_4_x86_Template/Lib/posixpath.py @@ -0,0 +1,592 @@ +"""Common operations on Posix pathnames. + +Instead of importing this module directly, import os and refer to +this module as os.path. The "os.path" name is an alias for this +module on Posix systems; on other systems (e.g. Windows), +os.path provides the same operations in a manner specific to that +platform, and is an alias to another module (e.g. ntpath). + +Some of this can actually be useful on non-Posix systems too, e.g. +for manipulation of the pathname component of URLs. +""" + +# Strings representing various path-related bits and pieces. +# These are primarily for export; internally, they are hardcoded. +# Should be set before imports for resolving cyclic dependency. +curdir = '.' +pardir = '..' +extsep = '.' +sep = '/' +pathsep = ':' +defpath = '/bin:/usr/bin' +altsep = None +devnull = '/dev/null' + +import errno +import os +import sys +import stat +import genericpath +from genericpath import * + +__all__ = ["normcase","isabs","join","splitdrive","splitroot","split","splitext", + "basename","dirname","commonprefix","getsize","getmtime", + "getatime","getctime","islink","exists","lexists","isdir","isfile", + "ismount", "expanduser","expandvars","normpath","abspath", + "samefile","sameopenfile","samestat", + "curdir","pardir","sep","pathsep","defpath","altsep","extsep", + "devnull","realpath","supports_unicode_filenames","relpath", + "commonpath", "isjunction","isdevdrive","ALLOW_MISSING"] + + +def _get_sep(path): + if isinstance(path, bytes): + return b'/' + else: + return '/' + +# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac. +# On MS-DOS this may also turn slashes into backslashes; however, other +# normalizations (such as optimizing '../' away) are not allowed +# (another function should be defined to do that). + +def normcase(s): + """Normalize case of pathname. Has no effect under Posix""" + return os.fspath(s) + + +# Return whether a path is absolute. +# Trivial in Posix, harder on the Mac or MS-DOS. + +def isabs(s): + """Test whether a path is absolute""" + s = os.fspath(s) + sep = _get_sep(s) + return s.startswith(sep) + + +# Join pathnames. +# Ignore the previous parts if a part is absolute. +# Insert a '/' unless the first part is empty or already ends in '/'. + +def join(a, *p): + """Join two or more pathname components, inserting '/' as needed. + If any component is an absolute path, all previous path components + will be discarded. An empty last part will result in a path that + ends with a separator.""" + a = os.fspath(a) + sep = _get_sep(a) + path = a + try: + for b in p: + b = os.fspath(b) + if b.startswith(sep) or not path: + path = b + elif path.endswith(sep): + path += b + else: + path += sep + b + except (TypeError, AttributeError, BytesWarning): + genericpath._check_arg_types('join', a, *p) + raise + return path + + +# Split a path in head (everything up to the last '/') and tail (the +# rest). If the path ends in '/', tail will be empty. If there is no +# '/' in the path, head will be empty. +# Trailing '/'es are stripped from head unless it is the root. + +def split(p): + """Split a pathname. Returns tuple "(head, tail)" where "tail" is + everything after the final slash. Either part may be empty.""" + p = os.fspath(p) + sep = _get_sep(p) + i = p.rfind(sep) + 1 + head, tail = p[:i], p[i:] + if head and head != sep*len(head): + head = head.rstrip(sep) + return head, tail + + +# Split a path in root and extension. +# The extension is everything starting at the last dot in the last +# pathname component; the root is everything before that. +# It is always true that root + ext == p. + +def splitext(p): + p = os.fspath(p) + if isinstance(p, bytes): + sep = b'/' + extsep = b'.' + else: + sep = '/' + extsep = '.' + return genericpath._splitext(p, sep, None, extsep) +splitext.__doc__ = genericpath._splitext.__doc__ + +# Split a pathname into a drive specification and the rest of the +# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty. + +def splitdrive(p): + """Split a pathname into drive and path. On Posix, drive is always + empty.""" + p = os.fspath(p) + return p[:0], p + + +try: + from posix import _path_splitroot_ex as splitroot +except ImportError: + def splitroot(p): + """Split a pathname into drive, root and tail. + + The tail contains anything after the root.""" + p = os.fspath(p) + if isinstance(p, bytes): + sep = b'/' + empty = b'' + else: + sep = '/' + empty = '' + if p[:1] != sep: + # Relative path, e.g.: 'foo' + return empty, empty, p + elif p[1:2] != sep or p[2:3] == sep: + # Absolute path, e.g.: '/foo', '///foo', '////foo', etc. + return empty, sep, p[1:] + else: + # Precisely two leading slashes, e.g.: '//foo'. Implementation defined per POSIX, see + # https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_13 + return empty, p[:2], p[2:] + + +# Return the tail (basename) part of a path, same as split(path)[1]. + +def basename(p): + """Returns the final component of a pathname""" + p = os.fspath(p) + sep = _get_sep(p) + i = p.rfind(sep) + 1 + return p[i:] + + +# Return the head (dirname) part of a path, same as split(path)[0]. + +def dirname(p): + """Returns the directory component of a pathname""" + p = os.fspath(p) + sep = _get_sep(p) + i = p.rfind(sep) + 1 + head = p[:i] + if head and head != sep*len(head): + head = head.rstrip(sep) + return head + + +# Is a path a mount point? +# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?) + +def ismount(path): + """Test whether a path is a mount point""" + try: + s1 = os.lstat(path) + except (OSError, ValueError): + # It doesn't exist -- so not a mount point. :-) + return False + else: + # A symlink can never be a mount point + if stat.S_ISLNK(s1.st_mode): + return False + + path = os.fspath(path) + if isinstance(path, bytes): + parent = join(path, b'..') + else: + parent = join(path, '..') + try: + s2 = os.lstat(parent) + except OSError: + parent = realpath(parent) + try: + s2 = os.lstat(parent) + except OSError: + return False + + # path/.. on a different device as path or the same i-node as path + return s1.st_dev != s2.st_dev or s1.st_ino == s2.st_ino + + +# Expand paths beginning with '~' or '~user'. +# '~' means $HOME; '~user' means that user's home directory. +# If the path doesn't begin with '~', or if the user or $HOME is unknown, +# the path is returned unchanged (leaving error reporting to whatever +# function is called with the expanded path as argument). +# See also module 'glob' for expansion of *, ? and [...] in pathnames. +# (A function should also be defined to do full *sh-style environment +# variable expansion.) + +def expanduser(path): + """Expand ~ and ~user constructions. If user or $HOME is unknown, + do nothing.""" + path = os.fspath(path) + if isinstance(path, bytes): + tilde = b'~' + else: + tilde = '~' + if not path.startswith(tilde): + return path + sep = _get_sep(path) + i = path.find(sep, 1) + if i < 0: + i = len(path) + if i == 1: + if 'HOME' not in os.environ: + try: + import pwd + except ImportError: + # pwd module unavailable, return path unchanged + return path + try: + userhome = pwd.getpwuid(os.getuid()).pw_dir + except KeyError: + # bpo-10496: if the current user identifier doesn't exist in the + # password database, return the path unchanged + return path + else: + userhome = os.environ['HOME'] + else: + try: + import pwd + except ImportError: + # pwd module unavailable, return path unchanged + return path + name = path[1:i] + if isinstance(name, bytes): + name = os.fsdecode(name) + try: + pwent = pwd.getpwnam(name) + except KeyError: + # bpo-10496: if the user name from the path doesn't exist in the + # password database, return the path unchanged + return path + userhome = pwent.pw_dir + # if no user home, return the path unchanged on VxWorks + if userhome is None and sys.platform == "vxworks": + return path + if isinstance(path, bytes): + userhome = os.fsencode(userhome) + userhome = userhome.rstrip(sep) + return (userhome + path[i:]) or sep + + +# Expand paths containing shell variable substitutions. +# This expands the forms $variable and ${variable} only. +# Non-existent variables are left unchanged. + +_varpattern = r'\$(\w+|\{[^}]*\}?)' +_varsub = None +_varsubb = None + +def expandvars(path): + """Expand shell variables of form $var and ${var}. Unknown variables + are left unchanged.""" + path = os.fspath(path) + global _varsub, _varsubb + if isinstance(path, bytes): + if b'$' not in path: + return path + if not _varsubb: + import re + _varsubb = re.compile(_varpattern.encode(), re.ASCII).sub + sub = _varsubb + start = b'{' + end = b'}' + environ = getattr(os, 'environb', None) + else: + if '$' not in path: + return path + if not _varsub: + import re + _varsub = re.compile(_varpattern, re.ASCII).sub + sub = _varsub + start = '{' + end = '}' + environ = os.environ + + def repl(m): + name = m[1] + if name.startswith(start): + if not name.endswith(end): + return m[0] + name = name[1:-1] + try: + if environ is None: + value = os.fsencode(os.environ[os.fsdecode(name)]) + else: + value = environ[name] + except KeyError: + return m[0] + else: + return value + + return sub(repl, path) + + +# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B. +# It should be understood that this may change the meaning of the path +# if it contains symbolic links! + +try: + from posix import _path_normpath as normpath + +except ImportError: + def normpath(path): + """Normalize path, eliminating double slashes, etc.""" + path = os.fspath(path) + if isinstance(path, bytes): + sep = b'/' + dot = b'.' + dotdot = b'..' + else: + sep = '/' + dot = '.' + dotdot = '..' + if not path: + return dot + _, initial_slashes, path = splitroot(path) + comps = path.split(sep) + new_comps = [] + for comp in comps: + if not comp or comp == dot: + continue + if (comp != dotdot or (not initial_slashes and not new_comps) or + (new_comps and new_comps[-1] == dotdot)): + new_comps.append(comp) + elif new_comps: + new_comps.pop() + comps = new_comps + path = initial_slashes + sep.join(comps) + return path or dot + + +def abspath(path): + """Return an absolute path.""" + path = os.fspath(path) + if isinstance(path, bytes): + if not path.startswith(b'/'): + path = join(os.getcwdb(), path) + else: + if not path.startswith('/'): + path = join(os.getcwd(), path) + return normpath(path) + + +# Return a canonical path (i.e. the absolute location of a file on the +# filesystem). + +def realpath(filename, *, strict=False): + """Return the canonical path of the specified filename, eliminating any +symbolic links encountered in the path.""" + filename = os.fspath(filename) + if isinstance(filename, bytes): + sep = b'/' + curdir = b'.' + pardir = b'..' + getcwd = os.getcwdb + else: + sep = '/' + curdir = '.' + pardir = '..' + getcwd = os.getcwd + if strict is ALLOW_MISSING: + ignored_error = FileNotFoundError + strict = True + elif strict: + ignored_error = () + else: + ignored_error = OSError + + lstat = os.lstat + readlink = os.readlink + maxlinks = None + + # The stack of unresolved path parts. When popped, a special value of None + # indicates that a symlink target has been resolved, and that the original + # symlink path can be retrieved by popping again. The [::-1] slice is a + # very fast way of spelling list(reversed(...)). + rest = filename.split(sep)[::-1] + + # Number of unprocessed parts in 'rest'. This can differ from len(rest) + # later, because 'rest' might contain markers for unresolved symlinks. + part_count = len(rest) + + # The resolved path, which is absolute throughout this function. + # Note: getcwd() returns a normalized and symlink-free path. + path = sep if filename.startswith(sep) else getcwd() + + # Mapping from symlink paths to *fully resolved* symlink targets. If a + # symlink is encountered but not yet resolved, the value is None. This is + # used both to detect symlink loops and to speed up repeated traversals of + # the same links. + seen = {} + + # Number of symlinks traversed. When the number of traversals is limited + # by *maxlinks*, this is used instead of *seen* to detect symlink loops. + link_count = 0 + + while part_count: + name = rest.pop() + if name is None: + # resolved symlink target + seen[rest.pop()] = path + continue + part_count -= 1 + if not name or name == curdir: + # current dir + continue + if name == pardir: + # parent dir + path = path[:path.rindex(sep)] or sep + continue + if path == sep: + newpath = path + name + else: + newpath = path + sep + name + try: + st_mode = lstat(newpath).st_mode + if not stat.S_ISLNK(st_mode): + if strict and part_count and not stat.S_ISDIR(st_mode): + raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), + newpath) + path = newpath + continue + elif maxlinks is not None: + link_count += 1 + if link_count > maxlinks: + if strict: + raise OSError(errno.ELOOP, os.strerror(errno.ELOOP), + newpath) + path = newpath + continue + elif newpath in seen: + # Already seen this path + path = seen[newpath] + if path is not None: + # use cached value + continue + # The symlink is not resolved, so we must have a symlink loop. + if strict: + raise OSError(errno.ELOOP, os.strerror(errno.ELOOP), + newpath) + path = newpath + continue + target = readlink(newpath) + except ignored_error: + pass + else: + # Resolve the symbolic link + if target.startswith(sep): + # Symlink target is absolute; reset resolved path. + path = sep + if maxlinks is None: + # Mark this symlink as seen but not fully resolved. + seen[newpath] = None + # Push the symlink path onto the stack, and signal its specialness + # by also pushing None. When these entries are popped, we'll + # record the fully-resolved symlink target in the 'seen' mapping. + rest.append(newpath) + rest.append(None) + # Push the unresolved symlink target parts onto the stack. + target_parts = target.split(sep)[::-1] + rest.extend(target_parts) + part_count += len(target_parts) + continue + # An error occurred and was ignored. + path = newpath + + return path + + +supports_unicode_filenames = (sys.platform == 'darwin') + +def relpath(path, start=None): + """Return a relative version of a path""" + + path = os.fspath(path) + if not path: + raise ValueError("no path specified") + + if isinstance(path, bytes): + curdir = b'.' + sep = b'/' + pardir = b'..' + else: + curdir = '.' + sep = '/' + pardir = '..' + + if start is None: + start = curdir + else: + start = os.fspath(start) + + try: + start_tail = abspath(start).lstrip(sep) + path_tail = abspath(path).lstrip(sep) + start_list = start_tail.split(sep) if start_tail else [] + path_list = path_tail.split(sep) if path_tail else [] + # Work out how much of the filepath is shared by start and path. + i = len(commonprefix([start_list, path_list])) + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return sep.join(rel_list) + except (TypeError, AttributeError, BytesWarning, DeprecationWarning): + genericpath._check_arg_types('relpath', path, start) + raise + + +# Return the longest common sub-path of the sequence of paths given as input. +# The paths are not normalized before comparing them (this is the +# responsibility of the caller). Any trailing separator is stripped from the +# returned path. + +def commonpath(paths): + """Given a sequence of path names, returns the longest common sub-path.""" + + paths = tuple(map(os.fspath, paths)) + + if not paths: + raise ValueError('commonpath() arg is an empty sequence') + + if isinstance(paths[0], bytes): + sep = b'/' + curdir = b'.' + else: + sep = '/' + curdir = '.' + + try: + split_paths = [path.split(sep) for path in paths] + + try: + isabs, = {p.startswith(sep) for p in paths} + except ValueError: + raise ValueError("Can't mix absolute and relative paths") from None + + split_paths = [[c for c in s if c and c != curdir] for s in split_paths] + s1 = min(split_paths) + s2 = max(split_paths) + common = s1 + for i, c in enumerate(s1): + if c != s2[i]: + common = s1[:i] + break + + prefix = sep if isabs else sep[:0] + return prefix + sep.join(common) + except (TypeError, AttributeError): + genericpath._check_arg_types('commonpath', *paths) + raise diff --git a/Python314_4_x86_Template/Lib/pprint.py b/Python314_4_x86_Template/Lib/pprint.py new file mode 100644 index 00000000..dc0953ce --- /dev/null +++ b/Python314_4_x86_Template/Lib/pprint.py @@ -0,0 +1,675 @@ +# Author: Fred L. Drake, Jr. +# fdrake@acm.org +# +# This is a simple little module I wrote to make life easier. I didn't +# see anything quite like it in the library, though I may have overlooked +# something. I wrote this when I was trying to read some heavily nested +# tuples with fairly non-descriptive content. This is modeled very much +# after Lisp/Scheme - style pretty-printing of lists. If you find it +# useful, thank small children who sleep at night. + +"""Support to pretty-print lists, tuples, & dictionaries recursively. + +Very simple, but useful, especially in debugging data structures. + +Classes +------- + +PrettyPrinter() + Handle pretty-printing operations onto a stream using a configured + set of formatting parameters. + +Functions +--------- + +pformat() + Format a Python object into a pretty-printed representation. + +pprint() + Pretty-print a Python object to a stream [default is sys.stdout]. + +saferepr() + Generate a 'standard' repr()-like value, but protect against recursive + data structures. + +""" + +import collections as _collections +import sys as _sys +import types as _types +from io import StringIO as _StringIO + +__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr", + "PrettyPrinter", "pp"] + + +def pprint(object, stream=None, indent=1, width=80, depth=None, *, + compact=False, sort_dicts=True, underscore_numbers=False): + """Pretty-print a Python object to a stream [default is sys.stdout].""" + printer = PrettyPrinter( + stream=stream, indent=indent, width=width, depth=depth, + compact=compact, sort_dicts=sort_dicts, + underscore_numbers=underscore_numbers) + printer.pprint(object) + + +def pformat(object, indent=1, width=80, depth=None, *, + compact=False, sort_dicts=True, underscore_numbers=False): + """Format a Python object into a pretty-printed representation.""" + return PrettyPrinter(indent=indent, width=width, depth=depth, + compact=compact, sort_dicts=sort_dicts, + underscore_numbers=underscore_numbers).pformat(object) + + +def pp(object, *args, sort_dicts=False, **kwargs): + """Pretty-print a Python object""" + pprint(object, *args, sort_dicts=sort_dicts, **kwargs) + + +def saferepr(object): + """Version of repr() which can handle recursive data structures.""" + return PrettyPrinter()._safe_repr(object, {}, None, 0)[0] + + +def isreadable(object): + """Determine if saferepr(object) is readable by eval().""" + return PrettyPrinter()._safe_repr(object, {}, None, 0)[1] + + +def isrecursive(object): + """Determine if object requires a recursive representation.""" + return PrettyPrinter()._safe_repr(object, {}, None, 0)[2] + + +class _safe_key: + """Helper function for key functions when sorting unorderable objects. + + The wrapped-object will fallback to a Py2.x style comparison for + unorderable types (sorting first comparing the type name and then by + the obj ids). Does not work recursively, so dict.items() must have + _safe_key applied to both the key and the value. + + """ + + __slots__ = ['obj'] + + def __init__(self, obj): + self.obj = obj + + def __lt__(self, other): + try: + return self.obj < other.obj + except TypeError: + return ((str(type(self.obj)), id(self.obj)) < \ + (str(type(other.obj)), id(other.obj))) + + +def _safe_tuple(t): + "Helper function for comparing 2-tuples" + return _safe_key(t[0]), _safe_key(t[1]) + + +class PrettyPrinter: + def __init__(self, indent=1, width=80, depth=None, stream=None, *, + compact=False, sort_dicts=True, underscore_numbers=False): + """Handle pretty printing operations onto a stream using a set of + configured parameters. + + indent + Number of spaces to indent for each level of nesting. + + width + Attempted maximum number of columns in the output. + + depth + The maximum depth to print out nested structures. + + stream + The desired output stream. If omitted (or false), the standard + output stream available at construction will be used. + + compact + If true, several items will be combined in one line. + + sort_dicts + If true, dict keys are sorted. + + underscore_numbers + If true, digit groups are separated with underscores. + + """ + indent = int(indent) + width = int(width) + if indent < 0: + raise ValueError('indent must be >= 0') + if depth is not None and depth <= 0: + raise ValueError('depth must be > 0') + if not width: + raise ValueError('width must be != 0') + self._depth = depth + self._indent_per_level = indent + self._width = width + if stream is not None: + self._stream = stream + else: + self._stream = _sys.stdout + self._compact = bool(compact) + self._sort_dicts = sort_dicts + self._underscore_numbers = underscore_numbers + + def pprint(self, object): + if self._stream is not None: + self._format(object, self._stream, 0, 0, {}, 0) + self._stream.write("\n") + + def pformat(self, object): + sio = _StringIO() + self._format(object, sio, 0, 0, {}, 0) + return sio.getvalue() + + def isrecursive(self, object): + return self.format(object, {}, 0, 0)[2] + + def isreadable(self, object): + s, readable, recursive = self.format(object, {}, 0, 0) + return readable and not recursive + + def _format(self, object, stream, indent, allowance, context, level): + objid = id(object) + if objid in context: + stream.write(_recursion(object)) + self._recursive = True + self._readable = False + return + rep = self._repr(object, context, level) + max_width = self._width - indent - allowance + if len(rep) > max_width: + p = self._dispatch.get(type(object).__repr__, None) + # Lazy import to improve module import time + from dataclasses import is_dataclass + + if p is not None: + context[objid] = 1 + p(self, object, stream, indent, allowance, context, level + 1) + del context[objid] + return + elif (is_dataclass(object) and + not isinstance(object, type) and + object.__dataclass_params__.repr and + # Check dataclass has generated repr method. + hasattr(object.__repr__, "__wrapped__") and + "__create_fn__" in object.__repr__.__wrapped__.__qualname__): + context[objid] = 1 + self._pprint_dataclass(object, stream, indent, allowance, context, level + 1) + del context[objid] + return + stream.write(rep) + + def _pprint_dataclass(self, object, stream, indent, allowance, context, level): + # Lazy import to improve module import time + from dataclasses import fields as dataclass_fields + + cls_name = object.__class__.__name__ + indent += len(cls_name) + 1 + items = [(f.name, getattr(object, f.name)) for f in dataclass_fields(object) if f.repr] + stream.write(cls_name + '(') + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(')') + + _dispatch = {} + + def _pprint_dict(self, object, stream, indent, allowance, context, level): + write = stream.write + write('{') + if self._indent_per_level > 1: + write((self._indent_per_level - 1) * ' ') + length = len(object) + if length: + if self._sort_dicts: + items = sorted(object.items(), key=_safe_tuple) + else: + items = object.items() + self._format_dict_items(items, stream, indent, allowance + 1, + context, level) + write('}') + + _dispatch[dict.__repr__] = _pprint_dict + + def _pprint_ordered_dict(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + '(') + self._format(list(object.items()), stream, + indent + len(cls.__name__) + 1, allowance + 1, + context, level) + stream.write(')') + + _dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict + + def _pprint_list(self, object, stream, indent, allowance, context, level): + stream.write('[') + self._format_items(object, stream, indent, allowance + 1, + context, level) + stream.write(']') + + _dispatch[list.__repr__] = _pprint_list + + def _pprint_tuple(self, object, stream, indent, allowance, context, level): + stream.write('(') + endchar = ',)' if len(object) == 1 else ')' + self._format_items(object, stream, indent, allowance + len(endchar), + context, level) + stream.write(endchar) + + _dispatch[tuple.__repr__] = _pprint_tuple + + def _pprint_set(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + typ = object.__class__ + if typ is set: + stream.write('{') + endchar = '}' + else: + stream.write(typ.__name__ + '({') + endchar = '})' + indent += len(typ.__name__) + 1 + object = sorted(object, key=_safe_key) + self._format_items(object, stream, indent, allowance + len(endchar), + context, level) + stream.write(endchar) + + _dispatch[set.__repr__] = _pprint_set + _dispatch[frozenset.__repr__] = _pprint_set + + def _pprint_str(self, object, stream, indent, allowance, context, level): + write = stream.write + if not len(object): + write(repr(object)) + return + chunks = [] + lines = object.splitlines(True) + if level == 1: + indent += 1 + allowance += 1 + max_width1 = max_width = self._width - indent + for i, line in enumerate(lines): + rep = repr(line) + if i == len(lines) - 1: + max_width1 -= allowance + if len(rep) <= max_width1: + chunks.append(rep) + else: + # Lazy import to improve module import time + import re + + # A list of alternating (non-space, space) strings + parts = re.findall(r'\S*\s*', line) + assert parts + assert not parts[-1] + parts.pop() # drop empty last part + max_width2 = max_width + current = '' + for j, part in enumerate(parts): + candidate = current + part + if j == len(parts) - 1 and i == len(lines) - 1: + max_width2 -= allowance + if len(repr(candidate)) > max_width2: + if current: + chunks.append(repr(current)) + current = part + else: + current = candidate + if current: + chunks.append(repr(current)) + if len(chunks) == 1: + write(rep) + return + if level == 1: + write('(') + for i, rep in enumerate(chunks): + if i > 0: + write('\n' + ' '*indent) + write(rep) + if level == 1: + write(')') + + _dispatch[str.__repr__] = _pprint_str + + def _pprint_bytes(self, object, stream, indent, allowance, context, level): + write = stream.write + if len(object) <= 4: + write(repr(object)) + return + parens = level == 1 + if parens: + indent += 1 + allowance += 1 + write('(') + delim = '' + for rep in _wrap_bytes_repr(object, self._width - indent, allowance): + write(delim) + write(rep) + if not delim: + delim = '\n' + ' '*indent + if parens: + write(')') + + _dispatch[bytes.__repr__] = _pprint_bytes + + def _pprint_bytearray(self, object, stream, indent, allowance, context, level): + write = stream.write + write('bytearray(') + self._pprint_bytes(bytes(object), stream, indent + 10, + allowance + 1, context, level + 1) + write(')') + + _dispatch[bytearray.__repr__] = _pprint_bytearray + + def _pprint_mappingproxy(self, object, stream, indent, allowance, context, level): + stream.write('mappingproxy(') + self._format(object.copy(), stream, indent + 13, allowance + 1, + context, level) + stream.write(')') + + _dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy + + def _pprint_simplenamespace(self, object, stream, indent, allowance, context, level): + if type(object) is _types.SimpleNamespace: + # The SimpleNamespace repr is "namespace" instead of the class + # name, so we do the same here. For subclasses; use the class name. + cls_name = 'namespace' + else: + cls_name = object.__class__.__name__ + indent += len(cls_name) + 1 + items = object.__dict__.items() + stream.write(cls_name + '(') + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(')') + + _dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace + + def _format_dict_items(self, items, stream, indent, allowance, context, + level): + write = stream.write + indent += self._indent_per_level + delimnl = ',\n' + ' ' * indent + last_index = len(items) - 1 + for i, (key, ent) in enumerate(items): + last = i == last_index + rep = self._repr(key, context, level) + write(rep) + write(': ') + self._format(ent, stream, indent + len(rep) + 2, + allowance if last else 1, + context, level) + if not last: + write(delimnl) + + def _format_namespace_items(self, items, stream, indent, allowance, context, level): + write = stream.write + delimnl = ',\n' + ' ' * indent + last_index = len(items) - 1 + for i, (key, ent) in enumerate(items): + last = i == last_index + write(key) + write('=') + if id(ent) in context: + # Special-case representation of recursion to match standard + # recursive dataclass repr. + write("...") + else: + self._format(ent, stream, indent + len(key) + 1, + allowance if last else 1, + context, level) + if not last: + write(delimnl) + + def _format_items(self, items, stream, indent, allowance, context, level): + write = stream.write + indent += self._indent_per_level + if self._indent_per_level > 1: + write((self._indent_per_level - 1) * ' ') + delimnl = ',\n' + ' ' * indent + delim = '' + width = max_width = self._width - indent + 1 + it = iter(items) + try: + next_ent = next(it) + except StopIteration: + return + last = False + while not last: + ent = next_ent + try: + next_ent = next(it) + except StopIteration: + last = True + max_width -= allowance + width -= allowance + if self._compact: + rep = self._repr(ent, context, level) + w = len(rep) + 2 + if width < w: + width = max_width + if delim: + delim = delimnl + if width >= w: + width -= w + write(delim) + delim = ', ' + write(rep) + continue + write(delim) + delim = delimnl + self._format(ent, stream, indent, + allowance if last else 1, + context, level) + + def _repr(self, object, context, level): + repr, readable, recursive = self.format(object, context.copy(), + self._depth, level) + if not readable: + self._readable = False + if recursive: + self._recursive = True + return repr + + def format(self, object, context, maxlevels, level): + """Format object for a specific context, returning a string + and flags indicating whether the representation is 'readable' + and whether the object represents a recursive construct. + """ + return self._safe_repr(object, context, maxlevels, level) + + def _pprint_default_dict(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + rdf = self._repr(object.default_factory, context, level) + cls = object.__class__ + indent += len(cls.__name__) + 1 + stream.write('%s(%s,\n%s' % (cls.__name__, rdf, ' ' * indent)) + self._pprint_dict(object, stream, indent, allowance + 1, context, level) + stream.write(')') + + _dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict + + def _pprint_counter(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + '({') + if self._indent_per_level > 1: + stream.write((self._indent_per_level - 1) * ' ') + items = object.most_common() + self._format_dict_items(items, stream, + indent + len(cls.__name__) + 1, allowance + 2, + context, level) + stream.write('})') + + _dispatch[_collections.Counter.__repr__] = _pprint_counter + + def _pprint_chain_map(self, object, stream, indent, allowance, context, level): + if not len(object.maps): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + '(') + indent += len(cls.__name__) + 1 + for i, m in enumerate(object.maps): + if i == len(object.maps) - 1: + self._format(m, stream, indent, allowance + 1, context, level) + stream.write(')') + else: + self._format(m, stream, indent, 1, context, level) + stream.write(',\n' + ' ' * indent) + + _dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map + + def _pprint_deque(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + '(') + indent += len(cls.__name__) + 1 + stream.write('[') + if object.maxlen is None: + self._format_items(object, stream, indent, allowance + 2, + context, level) + stream.write('])') + else: + self._format_items(object, stream, indent, 2, + context, level) + rml = self._repr(object.maxlen, context, level) + stream.write('],\n%smaxlen=%s)' % (' ' * indent, rml)) + + _dispatch[_collections.deque.__repr__] = _pprint_deque + + def _pprint_user_dict(self, object, stream, indent, allowance, context, level): + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserDict.__repr__] = _pprint_user_dict + + def _pprint_user_list(self, object, stream, indent, allowance, context, level): + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserList.__repr__] = _pprint_user_list + + def _pprint_user_string(self, object, stream, indent, allowance, context, level): + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserString.__repr__] = _pprint_user_string + + def _safe_repr(self, object, context, maxlevels, level): + # Return triple (repr_string, isreadable, isrecursive). + typ = type(object) + if typ in _builtin_scalars: + return repr(object), True, False + + r = getattr(typ, "__repr__", None) + + if issubclass(typ, int) and r is int.__repr__: + if self._underscore_numbers: + return f"{object:_d}", True, False + else: + return repr(object), True, False + + if issubclass(typ, dict) and r is dict.__repr__: + if not object: + return "{}", True, False + objid = id(object) + if maxlevels and level >= maxlevels: + return "{...}", False, objid in context + if objid in context: + return _recursion(object), False, True + context[objid] = 1 + readable = True + recursive = False + components = [] + append = components.append + level += 1 + if self._sort_dicts: + items = sorted(object.items(), key=_safe_tuple) + else: + items = object.items() + for k, v in items: + krepr, kreadable, krecur = self.format( + k, context, maxlevels, level) + vrepr, vreadable, vrecur = self.format( + v, context, maxlevels, level) + append("%s: %s" % (krepr, vrepr)) + readable = readable and kreadable and vreadable + if krecur or vrecur: + recursive = True + del context[objid] + return "{%s}" % ", ".join(components), readable, recursive + + if (issubclass(typ, list) and r is list.__repr__) or \ + (issubclass(typ, tuple) and r is tuple.__repr__): + if issubclass(typ, list): + if not object: + return "[]", True, False + format = "[%s]" + elif len(object) == 1: + format = "(%s,)" + else: + if not object: + return "()", True, False + format = "(%s)" + objid = id(object) + if maxlevels and level >= maxlevels: + return format % "...", False, objid in context + if objid in context: + return _recursion(object), False, True + context[objid] = 1 + readable = True + recursive = False + components = [] + append = components.append + level += 1 + for o in object: + orepr, oreadable, orecur = self.format( + o, context, maxlevels, level) + append(orepr) + if not oreadable: + readable = False + if orecur: + recursive = True + del context[objid] + return format % ", ".join(components), readable, recursive + + rep = repr(object) + return rep, (rep and not rep.startswith('<')), False + + +_builtin_scalars = frozenset({str, bytes, bytearray, float, complex, + bool, type(None)}) + + +def _recursion(object): + return ("" + % (type(object).__name__, id(object))) + + +def _wrap_bytes_repr(object, width, allowance): + current = b'' + last = len(object) // 4 * 4 + for i in range(0, len(object), 4): + part = object[i: i+4] + candidate = current + part + if i == last: + width -= allowance + if len(repr(candidate)) > width: + if current: + yield repr(current) + current = part + else: + current = candidate + if current: + yield repr(current) diff --git a/Python314_4_x86_Template/Lib/profile.py b/Python314_4_x86_Template/Lib/profile.py new file mode 100644 index 00000000..a5afb12c --- /dev/null +++ b/Python314_4_x86_Template/Lib/profile.py @@ -0,0 +1,615 @@ +# +# Class for profiling python code. rev 1.0 6/2/94 +# +# Written by James Roskind +# Based on prior profile module by Sjoerd Mullender... +# which was hacked somewhat by: Guido van Rossum + +"""Class for profiling Python code.""" + +# Copyright Disney Enterprises, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific language +# governing permissions and limitations under the License. + + +import importlib.machinery +import io +import sys +import time +import marshal + +__all__ = ["run", "runctx", "Profile"] + +# Sample timer for use with +#i_count = 0 +#def integer_timer(): +# global i_count +# i_count = i_count + 1 +# return i_count +#itimes = integer_timer # replace with C coded timer returning integers + +class _Utils: + """Support class for utility functions which are shared by + profile.py and cProfile.py modules. + Not supposed to be used directly. + """ + + def __init__(self, profiler): + self.profiler = profiler + + def run(self, statement, filename, sort): + prof = self.profiler() + try: + prof.run(statement) + except SystemExit: + pass + finally: + self._show(prof, filename, sort) + + def runctx(self, statement, globals, locals, filename, sort): + prof = self.profiler() + try: + prof.runctx(statement, globals, locals) + except SystemExit: + pass + finally: + self._show(prof, filename, sort) + + def _show(self, prof, filename, sort): + if filename is not None: + prof.dump_stats(filename) + else: + prof.print_stats(sort) + + +#************************************************************************** +# The following are the static member functions for the profiler class +# Note that an instance of Profile() is *not* needed to call them. +#************************************************************************** + +def run(statement, filename=None, sort=-1): + """Run statement under profiler optionally saving results in filename + + This function takes a single argument that can be passed to the + "exec" statement, and an optional file name. In all cases this + routine attempts to "exec" its first argument and gather profiling + statistics from the execution. If no file name is present, then this + function automatically prints a simple profiling report, sorted by the + standard name string (file/line/function-name) that is presented in + each line. + """ + return _Utils(Profile).run(statement, filename, sort) + +def runctx(statement, globals, locals, filename=None, sort=-1): + """Run statement under profiler, supplying your own globals and locals, + optionally saving results in filename. + + statement and filename have the same semantics as profile.run + """ + return _Utils(Profile).runctx(statement, globals, locals, filename, sort) + + +class Profile: + """Profiler class. + + self.cur is always a tuple. Each such tuple corresponds to a stack + frame that is currently active (self.cur[-2]). The following are the + definitions of its members. We use this external "parallel stack" to + avoid contaminating the program that we are profiling. (old profiler + used to write into the frames local dictionary!!) Derived classes + can change the definition of some entries, as long as they leave + [-2:] intact (frame and previous tuple). In case an internal error is + detected, the -3 element is used as the function name. + + [ 0] = Time that needs to be charged to the parent frame's function. + It is used so that a function call will not have to access the + timing data for the parent frame. + [ 1] = Total time spent in this frame's function, excluding time in + subfunctions (this latter is tallied in cur[2]). + [ 2] = Total time spent in subfunctions, excluding time executing the + frame's function (this latter is tallied in cur[1]). + [-3] = Name of the function that corresponds to this frame. + [-2] = Actual frame that we correspond to (used to sync exception handling). + [-1] = Our parent 6-tuple (corresponds to frame.f_back). + + Timing data for each function is stored as a 5-tuple in the dictionary + self.timings[]. The index is always the name stored in self.cur[-3]. + The following are the definitions of the members: + + [0] = The number of times this function was called, not counting direct + or indirect recursion, + [1] = Number of times this function appears on the stack, minus one + [2] = Total time spent internal to this function + [3] = Cumulative time that this function was present on the stack. In + non-recursive functions, this is the total execution time from start + to finish of each invocation of a function, including time spent in + all subfunctions. + [4] = A dictionary indicating for each function name, the number of times + it was called by us. + """ + + bias = 0 # calibration constant + + def __init__(self, timer=None, bias=None): + self.timings = {} + self.cur = None + self.cmd = "" + self.c_func_name = "" + + if bias is None: + bias = self.bias + self.bias = bias # Materialize in local dict for lookup speed. + + if not timer: + self.timer = self.get_time = time.process_time + self.dispatcher = self.trace_dispatch_i + else: + self.timer = timer + t = self.timer() # test out timer function + try: + length = len(t) + except TypeError: + self.get_time = timer + self.dispatcher = self.trace_dispatch_i + else: + if length == 2: + self.dispatcher = self.trace_dispatch + else: + self.dispatcher = self.trace_dispatch_l + # This get_time() implementation needs to be defined + # here to capture the passed-in timer in the parameter + # list (for performance). Note that we can't assume + # the timer() result contains two values in all + # cases. + def get_time_timer(timer=timer, sum=sum): + return sum(timer()) + self.get_time = get_time_timer + self.t = self.get_time() + self.simulate_call('profiler') + + # Heavily optimized dispatch routine for time.process_time() timer + + def trace_dispatch(self, frame, event, arg): + timer = self.timer + t = timer() + t = t[0] + t[1] - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame,t): + t = timer() + self.t = t[0] + t[1] + else: + r = timer() + self.t = r[0] + r[1] - t # put back unrecorded delta + + # Dispatch routine for best timer program (return = scalar, fastest if + # an integer but float works too -- and time.process_time() relies on that). + + def trace_dispatch_i(self, frame, event, arg): + timer = self.timer + t = timer() - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame, t): + self.t = timer() + else: + self.t = timer() - t # put back unrecorded delta + + # Dispatch routine for macintosh (timer returns time in ticks of + # 1/60th second) + + def trace_dispatch_mac(self, frame, event, arg): + timer = self.timer + t = timer()/60.0 - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame, t): + self.t = timer()/60.0 + else: + self.t = timer()/60.0 - t # put back unrecorded delta + + # SLOW generic dispatch routine for timer returning lists of numbers + + def trace_dispatch_l(self, frame, event, arg): + get_time = self.get_time + t = get_time() - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame, t): + self.t = get_time() + else: + self.t = get_time() - t # put back unrecorded delta + + # In the event handlers, the first 3 elements of self.cur are unpacked + # into vrbls w/ 3-letter names. The last two characters are meant to be + # mnemonic: + # _pt self.cur[0] "parent time" time to be charged to parent frame + # _it self.cur[1] "internal time" time spent directly in the function + # _et self.cur[2] "external time" time spent in subfunctions + + def trace_dispatch_exception(self, frame, t): + rpt, rit, ret, rfn, rframe, rcur = self.cur + if (rframe is not frame) and rcur: + return self.trace_dispatch_return(rframe, t) + self.cur = rpt, rit+t, ret, rfn, rframe, rcur + return 1 + + + def trace_dispatch_call(self, frame, t): + if self.cur and frame.f_back is not self.cur[-2]: + rpt, rit, ret, rfn, rframe, rcur = self.cur + if not isinstance(rframe, Profile.fake_frame): + assert rframe.f_back is frame.f_back, ("Bad call", rfn, + rframe, rframe.f_back, + frame, frame.f_back) + self.trace_dispatch_return(rframe, 0) + assert (self.cur is None or \ + frame.f_back is self.cur[-2]), ("Bad call", + self.cur[-3]) + fcode = frame.f_code + fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name) + self.cur = (t, 0, 0, fn, frame, self.cur) + timings = self.timings + if fn in timings: + cc, ns, tt, ct, callers = timings[fn] + timings[fn] = cc, ns + 1, tt, ct, callers + else: + timings[fn] = 0, 0, 0, 0, {} + return 1 + + def trace_dispatch_c_call (self, frame, t): + fn = ("", 0, self.c_func_name) + self.cur = (t, 0, 0, fn, frame, self.cur) + timings = self.timings + if fn in timings: + cc, ns, tt, ct, callers = timings[fn] + timings[fn] = cc, ns+1, tt, ct, callers + else: + timings[fn] = 0, 0, 0, 0, {} + return 1 + + def trace_dispatch_return(self, frame, t): + if frame is not self.cur[-2]: + assert frame is self.cur[-2].f_back, ("Bad return", self.cur[-3]) + self.trace_dispatch_return(self.cur[-2], 0) + + # Prefix "r" means part of the Returning or exiting frame. + # Prefix "p" means part of the Previous or Parent or older frame. + + rpt, rit, ret, rfn, frame, rcur = self.cur + rit = rit + t + frame_total = rit + ret + + ppt, pit, pet, pfn, pframe, pcur = rcur + self.cur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur + + timings = self.timings + cc, ns, tt, ct, callers = timings[rfn] + if not ns: + # This is the only occurrence of the function on the stack. + # Else this is a (directly or indirectly) recursive call, and + # its cumulative time will get updated when the topmost call to + # it returns. + ct = ct + frame_total + cc = cc + 1 + + if pfn in callers: + callers[pfn] = callers[pfn] + 1 # hack: gather more + # stats such as the amount of time added to ct courtesy + # of this specific call, and the contribution to cc + # courtesy of this call. + else: + callers[pfn] = 1 + + timings[rfn] = cc, ns - 1, tt + rit, ct, callers + + return 1 + + + dispatch = { + "call": trace_dispatch_call, + "exception": trace_dispatch_exception, + "return": trace_dispatch_return, + "c_call": trace_dispatch_c_call, + "c_exception": trace_dispatch_return, # the C function returned + "c_return": trace_dispatch_return, + } + + + # The next few functions play with self.cmd. By carefully preloading + # our parallel stack, we can force the profiled result to include + # an arbitrary string as the name of the calling function. + # We use self.cmd as that string, and the resulting stats look + # very nice :-). + + def set_cmd(self, cmd): + if self.cur[-1]: return # already set + self.cmd = cmd + self.simulate_call(cmd) + + class fake_code: + def __init__(self, filename, line, name): + self.co_filename = filename + self.co_line = line + self.co_name = name + self.co_firstlineno = 0 + + def __repr__(self): + return repr((self.co_filename, self.co_line, self.co_name)) + + class fake_frame: + def __init__(self, code, prior): + self.f_code = code + self.f_back = prior + + def simulate_call(self, name): + code = self.fake_code('profile', 0, name) + if self.cur: + pframe = self.cur[-2] + else: + pframe = None + frame = self.fake_frame(code, pframe) + self.dispatch['call'](self, frame, 0) + + # collect stats from pending stack, including getting final + # timings for self.cmd frame. + + def simulate_cmd_complete(self): + get_time = self.get_time + t = get_time() - self.t + while self.cur[-1]: + # We *can* cause assertion errors here if + # dispatch_trace_return checks for a frame match! + self.dispatch['return'](self, self.cur[-2], t) + t = 0 + self.t = get_time() - t + + + def print_stats(self, sort=-1): + import pstats + if not isinstance(sort, tuple): + sort = (sort,) + pstats.Stats(self).strip_dirs().sort_stats(*sort).print_stats() + + def dump_stats(self, file): + with open(file, 'wb') as f: + self.create_stats() + marshal.dump(self.stats, f) + + def create_stats(self): + self.simulate_cmd_complete() + self.snapshot_stats() + + def snapshot_stats(self): + self.stats = {} + for func, (cc, ns, tt, ct, callers) in self.timings.items(): + callers = callers.copy() + nc = 0 + for callcnt in callers.values(): + nc += callcnt + self.stats[func] = cc, nc, tt, ct, callers + + + # The following two methods can be called by clients to use + # a profiler to profile a statement, given as a string. + + def run(self, cmd): + import __main__ + dict = __main__.__dict__ + return self.runctx(cmd, dict, dict) + + def runctx(self, cmd, globals, locals): + self.set_cmd(cmd) + sys.setprofile(self.dispatcher) + try: + exec(cmd, globals, locals) + finally: + sys.setprofile(None) + return self + + # This method is more useful to profile a single function call. + def runcall(self, func, /, *args, **kw): + self.set_cmd(repr(func)) + sys.setprofile(self.dispatcher) + try: + return func(*args, **kw) + finally: + sys.setprofile(None) + + + #****************************************************************** + # The following calculates the overhead for using a profiler. The + # problem is that it takes a fair amount of time for the profiler + # to stop the stopwatch (from the time it receives an event). + # Similarly, there is a delay from the time that the profiler + # re-starts the stopwatch before the user's code really gets to + # continue. The following code tries to measure the difference on + # a per-event basis. + # + # Note that this difference is only significant if there are a lot of + # events, and relatively little user code per event. For example, + # code with small functions will typically benefit from having the + # profiler calibrated for the current platform. This *could* be + # done on the fly during init() time, but it is not worth the + # effort. Also note that if too large a value specified, then + # execution time on some functions will actually appear as a + # negative number. It is *normal* for some functions (with very + # low call counts) to have such negative stats, even if the + # calibration figure is "correct." + # + # One alternative to profile-time calibration adjustments (i.e., + # adding in the magic little delta during each event) is to track + # more carefully the number of events (and cumulatively, the number + # of events during sub functions) that are seen. If this were + # done, then the arithmetic could be done after the fact (i.e., at + # display time). Currently, we track only call/return events. + # These values can be deduced by examining the callees and callers + # vectors for each functions. Hence we *can* almost correct the + # internal time figure at print time (note that we currently don't + # track exception event processing counts). Unfortunately, there + # is currently no similar information for cumulative sub-function + # time. It would not be hard to "get all this info" at profiler + # time. Specifically, we would have to extend the tuples to keep + # counts of this in each frame, and then extend the defs of timing + # tuples to include the significant two figures. I'm a bit fearful + # that this additional feature will slow the heavily optimized + # event/time ratio (i.e., the profiler would run slower, fur a very + # low "value added" feature.) + #************************************************************** + + def calibrate(self, m, verbose=0): + if self.__class__ is not Profile: + raise TypeError("Subclasses must override .calibrate().") + + saved_bias = self.bias + self.bias = 0 + try: + return self._calibrate_inner(m, verbose) + finally: + self.bias = saved_bias + + def _calibrate_inner(self, m, verbose): + get_time = self.get_time + + # Set up a test case to be run with and without profiling. Include + # lots of calls, because we're trying to quantify stopwatch overhead. + # Do not raise any exceptions, though, because we want to know + # exactly how many profile events are generated (one call event, + + # one return event, per Python-level call). + + def f1(n): + for i in range(n): + x = 1 + + def f(m, f1=f1): + for i in range(m): + f1(100) + + f(m) # warm up the cache + + # elapsed_noprofile <- time f(m) takes without profiling. + t0 = get_time() + f(m) + t1 = get_time() + elapsed_noprofile = t1 - t0 + if verbose: + print("elapsed time without profiling =", elapsed_noprofile) + + # elapsed_profile <- time f(m) takes with profiling. The difference + # is profiling overhead, only some of which the profiler subtracts + # out on its own. + p = Profile() + t0 = get_time() + p.runctx('f(m)', globals(), locals()) + t1 = get_time() + elapsed_profile = t1 - t0 + if verbose: + print("elapsed time with profiling =", elapsed_profile) + + # reported_time <- "CPU seconds" the profiler charged to f and f1. + total_calls = 0.0 + reported_time = 0.0 + for (filename, line, funcname), (cc, ns, tt, ct, callers) in \ + p.timings.items(): + if funcname in ("f", "f1"): + total_calls += cc + reported_time += tt + + if verbose: + print("'CPU seconds' profiler reported =", reported_time) + print("total # calls =", total_calls) + if total_calls != m + 1: + raise ValueError("internal error: total calls = %d" % total_calls) + + # reported_time - elapsed_noprofile = overhead the profiler wasn't + # able to measure. Divide by twice the number of calls (since there + # are two profiler events per call in this test) to get the hidden + # overhead per event. + mean = (reported_time - elapsed_noprofile) / 2.0 / total_calls + if verbose: + print("mean stopwatch overhead per profile event =", mean) + return mean + +#**************************************************************************** + +def main(): + import os + from optparse import OptionParser + + usage = "profile.py [-o output_file_path] [-s sort] [-m module | scriptfile] [arg] ..." + parser = OptionParser(usage=usage) + parser.allow_interspersed_args = False + parser.add_option('-o', '--outfile', dest="outfile", + help="Save stats to ", default=None) + parser.add_option('-m', dest="module", action="store_true", + help="Profile a library module.", default=False) + parser.add_option('-s', '--sort', dest="sort", + help="Sort order when printing to stdout, based on pstats.Stats class", + default=-1) + + if not sys.argv[1:]: + parser.print_usage() + sys.exit(2) + + (options, args) = parser.parse_args() + sys.argv[:] = args + + # The script that we're profiling may chdir, so capture the absolute path + # to the output file at startup. + if options.outfile is not None: + options.outfile = os.path.abspath(options.outfile) + + if len(args) > 0: + if options.module: + import runpy + code = "run_module(modname, run_name='__main__')" + globs = { + 'run_module': runpy.run_module, + 'modname': args[0] + } + else: + progname = args[0] + sys.path.insert(0, os.path.dirname(progname)) + with io.open_code(progname) as fp: + code = compile(fp.read(), progname, 'exec') + spec = importlib.machinery.ModuleSpec(name='__main__', loader=None, + origin=progname) + globs = { + '__spec__': spec, + '__file__': spec.origin, + '__name__': spec.name, + '__package__': None, + '__cached__': None, + } + try: + runctx(code, globs, None, options.outfile, options.sort) + except BrokenPipeError as exc: + # Prevent "Exception ignored" during interpreter shutdown. + sys.stdout = None + sys.exit(exc.errno) + else: + parser.print_usage() + return parser + +# When invoked as main program, invoke the profiler on a script +if __name__ == '__main__': + main() diff --git a/Python314_4_x86_Template/Lib/pstats.py b/Python314_4_x86_Template/Lib/pstats.py new file mode 100644 index 00000000..becaf355 --- /dev/null +++ b/Python314_4_x86_Template/Lib/pstats.py @@ -0,0 +1,777 @@ +"""Class for printing reports on profiled python code.""" + +# Written by James Roskind +# Based on prior profile module by Sjoerd Mullender... +# which was hacked somewhat by: Guido van Rossum + +# Copyright Disney Enterprises, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific language +# governing permissions and limitations under the License. + + +import sys +import os +import time +import marshal +import re + +from enum import StrEnum, _simple_enum +from functools import cmp_to_key +from dataclasses import dataclass + +__all__ = ["Stats", "SortKey", "FunctionProfile", "StatsProfile"] + +@_simple_enum(StrEnum) +class SortKey: + CALLS = 'calls', 'ncalls' + CUMULATIVE = 'cumulative', 'cumtime' + FILENAME = 'filename', 'module' + LINE = 'line' + NAME = 'name' + NFL = 'nfl' + PCALLS = 'pcalls' + STDNAME = 'stdname' + TIME = 'time', 'tottime' + + def __new__(cls, *values): + value = values[0] + obj = str.__new__(cls, value) + obj._value_ = value + for other_value in values[1:]: + cls._value2member_map_[other_value] = obj + obj._all_values = values + return obj + + +@dataclass(unsafe_hash=True) +class FunctionProfile: + ncalls: str + tottime: float + percall_tottime: float + cumtime: float + percall_cumtime: float + file_name: str + line_number: int + +@dataclass(unsafe_hash=True) +class StatsProfile: + '''Class for keeping track of an item in inventory.''' + total_tt: float + func_profiles: dict[str, FunctionProfile] + +class Stats: + """This class is used for creating reports from data generated by the + Profile class. It is a "friend" of that class, and imports data either + by direct access to members of Profile class, or by reading in a dictionary + that was emitted (via marshal) from the Profile class. + + The big change from the previous Profiler (in terms of raw functionality) + is that an "add()" method has been provided to combine Stats from + several distinct profile runs. Both the constructor and the add() + method now take arbitrarily many file names as arguments. + + All the print methods now take an argument that indicates how many lines + to print. If the arg is a floating-point number between 0 and 1.0, then + it is taken as a decimal percentage of the available lines to be printed + (e.g., .1 means print 10% of all available lines). If it is an integer, + it is taken to mean the number of lines of data that you wish to have + printed. + + The sort_stats() method now processes some additional options (i.e., in + addition to the old -1, 0, 1, or 2 that are respectively interpreted as + 'stdname', 'calls', 'time', and 'cumulative'). It takes either an + arbitrary number of quoted strings or SortKey enum to select the sort + order. + + For example sort_stats('time', 'name') or sort_stats(SortKey.TIME, + SortKey.NAME) sorts on the major key of 'internal function time', and on + the minor key of 'the name of the function'. Look at the two tables in + sort_stats() and get_sort_arg_defs(self) for more examples. + + All methods return self, so you can string together commands like: + Stats('foo', 'goo').strip_dirs().sort_stats('calls').\ + print_stats(5).print_callers(5) + """ + + def __init__(self, *args, stream=None): + self.stream = stream or sys.stdout + if not len(args): + arg = None + else: + arg = args[0] + args = args[1:] + self.init(arg) + self.add(*args) + + def init(self, arg): + self.all_callees = None # calc only if needed + self.files = [] + self.fcn_list = None + self.total_tt = 0 + self.total_calls = 0 + self.prim_calls = 0 + self.max_name_len = 0 + self.top_level = set() + self.stats = {} + self.sort_arg_dict = {} + self.load_stats(arg) + try: + self.get_top_level_stats() + except Exception: + print("Invalid timing data %s" % + (self.files[-1] if self.files else ''), file=self.stream) + raise + + def load_stats(self, arg): + if arg is None: + self.stats = {} + return + elif isinstance(arg, str): + with open(arg, 'rb') as f: + self.stats = marshal.load(f) + try: + file_stats = os.stat(arg) + arg = time.ctime(file_stats.st_mtime) + " " + arg + except: # in case this is not unix + pass + self.files = [arg] + elif hasattr(arg, 'create_stats'): + arg.create_stats() + self.stats = arg.stats + arg.stats = {} + if not self.stats: + raise TypeError("Cannot create or construct a %r object from %r" + % (self.__class__, arg)) + return + + def get_top_level_stats(self): + for func, (cc, nc, tt, ct, callers) in self.stats.items(): + self.total_calls += nc + self.prim_calls += cc + self.total_tt += tt + if ("jprofile", 0, "profiler") in callers: + self.top_level.add(func) + if len(func_std_string(func)) > self.max_name_len: + self.max_name_len = len(func_std_string(func)) + + def add(self, *arg_list): + if not arg_list: + return self + for item in reversed(arg_list): + if type(self) != type(item): + item = Stats(item) + self.files += item.files + self.total_calls += item.total_calls + self.prim_calls += item.prim_calls + self.total_tt += item.total_tt + for func in item.top_level: + self.top_level.add(func) + + if self.max_name_len < item.max_name_len: + self.max_name_len = item.max_name_len + + self.fcn_list = None + + for func, stat in item.stats.items(): + if func in self.stats: + old_func_stat = self.stats[func] + else: + old_func_stat = (0, 0, 0, 0, {},) + self.stats[func] = add_func_stats(old_func_stat, stat) + return self + + def dump_stats(self, filename): + """Write the profile data to a file we know how to load back.""" + with open(filename, 'wb') as f: + marshal.dump(self.stats, f) + + # list the tuple indices and directions for sorting, + # along with some printable description + sort_arg_dict_default = { + "calls" : (((1,-1), ), "call count"), + "ncalls" : (((1,-1), ), "call count"), + "cumtime" : (((3,-1), ), "cumulative time"), + "cumulative": (((3,-1), ), "cumulative time"), + "filename" : (((4, 1), ), "file name"), + "line" : (((5, 1), ), "line number"), + "module" : (((4, 1), ), "file name"), + "name" : (((6, 1), ), "function name"), + "nfl" : (((6, 1),(4, 1),(5, 1),), "name/file/line"), + "pcalls" : (((0,-1), ), "primitive call count"), + "stdname" : (((7, 1), ), "standard name"), + "time" : (((2,-1), ), "internal time"), + "tottime" : (((2,-1), ), "internal time"), + } + + def get_sort_arg_defs(self): + """Expand all abbreviations that are unique.""" + if not self.sort_arg_dict: + self.sort_arg_dict = dict = {} + bad_list = {} + for word, tup in self.sort_arg_dict_default.items(): + fragment = word + while fragment: + if fragment in dict: + bad_list[fragment] = 0 + break + dict[fragment] = tup + fragment = fragment[:-1] + for word in bad_list: + del dict[word] + return self.sort_arg_dict + + def sort_stats(self, *field): + if not field: + self.fcn_list = 0 + return self + if len(field) == 1 and isinstance(field[0], int): + # Be compatible with old profiler + field = [ {-1: "stdname", + 0: "calls", + 1: "time", + 2: "cumulative"}[field[0]] ] + elif len(field) >= 2: + for arg in field[1:]: + if type(arg) != type(field[0]): + raise TypeError("Can't have mixed argument type") + + sort_arg_defs = self.get_sort_arg_defs() + + sort_tuple = () + self.sort_type = "" + connector = "" + for word in field: + if isinstance(word, SortKey): + word = word.value + sort_tuple = sort_tuple + sort_arg_defs[word][0] + self.sort_type += connector + sort_arg_defs[word][1] + connector = ", " + + stats_list = [] + for func, (cc, nc, tt, ct, callers) in self.stats.items(): + stats_list.append((cc, nc, tt, ct) + func + + (func_std_string(func), func)) + + stats_list.sort(key=cmp_to_key(TupleComp(sort_tuple).compare)) + + self.fcn_list = fcn_list = [] + for tuple in stats_list: + fcn_list.append(tuple[-1]) + return self + + def reverse_order(self): + if self.fcn_list: + self.fcn_list.reverse() + return self + + def strip_dirs(self): + oldstats = self.stats + self.stats = newstats = {} + max_name_len = 0 + for func, (cc, nc, tt, ct, callers) in oldstats.items(): + newfunc = func_strip_path(func) + if len(func_std_string(newfunc)) > max_name_len: + max_name_len = len(func_std_string(newfunc)) + newcallers = {} + for func2, caller in callers.items(): + newcallers[func_strip_path(func2)] = caller + + if newfunc in newstats: + newstats[newfunc] = add_func_stats( + newstats[newfunc], + (cc, nc, tt, ct, newcallers)) + else: + newstats[newfunc] = (cc, nc, tt, ct, newcallers) + old_top = self.top_level + self.top_level = new_top = set() + for func in old_top: + new_top.add(func_strip_path(func)) + + self.max_name_len = max_name_len + + self.fcn_list = None + self.all_callees = None + return self + + def calc_callees(self): + if self.all_callees: + return + self.all_callees = all_callees = {} + for func, (cc, nc, tt, ct, callers) in self.stats.items(): + if not func in all_callees: + all_callees[func] = {} + for func2, caller in callers.items(): + if not func2 in all_callees: + all_callees[func2] = {} + all_callees[func2][func] = caller + return + + #****************************************************************** + # The following functions support actual printing of reports + #****************************************************************** + + # Optional "amount" is either a line count, or a percentage of lines. + + def eval_print_amount(self, sel, list, msg): + new_list = list + if isinstance(sel, str): + try: + rex = re.compile(sel) + except re.PatternError: + msg += " \n" % sel + return new_list, msg + new_list = [] + for func in list: + if rex.search(func_std_string(func)): + new_list.append(func) + else: + count = len(list) + if isinstance(sel, float) and 0.0 <= sel < 1.0: + count = int(count * sel + .5) + new_list = list[:count] + elif isinstance(sel, int) and 0 <= sel < count: + count = sel + new_list = list[:count] + if len(list) != len(new_list): + msg += " List reduced from %r to %r due to restriction <%r>\n" % ( + len(list), len(new_list), sel) + + return new_list, msg + + def get_stats_profile(self): + """This method returns an instance of StatsProfile, which contains a mapping + of function names to instances of FunctionProfile. Each FunctionProfile + instance holds information related to the function's profile such as how + long the function took to run, how many times it was called, etc... + """ + func_list = self.fcn_list[:] if self.fcn_list else list(self.stats.keys()) + if not func_list: + return StatsProfile(0, {}) + + total_tt = float(f8(self.total_tt)) + func_profiles = {} + stats_profile = StatsProfile(total_tt, func_profiles) + + for func in func_list: + cc, nc, tt, ct, callers = self.stats[func] + file_name, line_number, func_name = func + ncalls = str(nc) if nc == cc else (str(nc) + '/' + str(cc)) + tottime = float(f8(tt)) + percall_tottime = -1 if nc == 0 else float(f8(tt/nc)) + cumtime = float(f8(ct)) + percall_cumtime = -1 if cc == 0 else float(f8(ct/cc)) + func_profile = FunctionProfile( + ncalls, + tottime, # time spent in this function alone + percall_tottime, + cumtime, # time spent in the function plus all functions that this function called, + percall_cumtime, + file_name, + line_number + ) + func_profiles[func_name] = func_profile + + return stats_profile + + def get_print_list(self, sel_list): + width = self.max_name_len + if self.fcn_list: + stat_list = self.fcn_list[:] + msg = " Ordered by: " + self.sort_type + '\n' + else: + stat_list = list(self.stats.keys()) + msg = " Random listing order was used\n" + + for selection in sel_list: + stat_list, msg = self.eval_print_amount(selection, stat_list, msg) + + count = len(stat_list) + + if not stat_list: + return 0, stat_list + print(msg, file=self.stream) + if count < len(self.stats): + width = 0 + for func in stat_list: + if len(func_std_string(func)) > width: + width = len(func_std_string(func)) + return width+2, stat_list + + def print_stats(self, *amount): + for filename in self.files: + print(filename, file=self.stream) + if self.files: + print(file=self.stream) + indent = ' ' * 8 + for func in self.top_level: + print(indent, func_get_function_name(func), file=self.stream) + + print(indent, self.total_calls, "function calls", end=' ', file=self.stream) + if self.total_calls != self.prim_calls: + print("(%d primitive calls)" % self.prim_calls, end=' ', file=self.stream) + print("in %.3f seconds" % self.total_tt, file=self.stream) + print(file=self.stream) + width, list = self.get_print_list(amount) + if list: + self.print_title() + for func in list: + self.print_line(func) + print(file=self.stream) + print(file=self.stream) + return self + + def print_callees(self, *amount): + width, list = self.get_print_list(amount) + if list: + self.calc_callees() + + self.print_call_heading(width, "called...") + for func in list: + if func in self.all_callees: + self.print_call_line(width, func, self.all_callees[func]) + else: + self.print_call_line(width, func, {}) + print(file=self.stream) + print(file=self.stream) + return self + + def print_callers(self, *amount): + width, list = self.get_print_list(amount) + if list: + self.print_call_heading(width, "was called by...") + for func in list: + cc, nc, tt, ct, callers = self.stats[func] + self.print_call_line(width, func, callers, "<-") + print(file=self.stream) + print(file=self.stream) + return self + + def print_call_heading(self, name_size, column_title): + print("Function ".ljust(name_size) + column_title, file=self.stream) + # print sub-header only if we have new-style callers + subheader = False + for cc, nc, tt, ct, callers in self.stats.values(): + if callers: + value = next(iter(callers.values())) + subheader = isinstance(value, tuple) + break + if subheader: + print(" "*name_size + " ncalls tottime cumtime", file=self.stream) + + def print_call_line(self, name_size, source, call_dict, arrow="->"): + print(func_std_string(source).ljust(name_size) + arrow, end=' ', file=self.stream) + if not call_dict: + print(file=self.stream) + return + clist = sorted(call_dict.keys()) + indent = "" + for func in clist: + name = func_std_string(func) + value = call_dict[func] + if isinstance(value, tuple): + nc, cc, tt, ct = value + if nc != cc: + substats = '%d/%d' % (nc, cc) + else: + substats = '%d' % (nc,) + substats = '%s %s %s %s' % (substats.rjust(7+2*len(indent)), + f8(tt), f8(ct), name) + left_width = name_size + 1 + else: + substats = '%s(%r) %s' % (name, value, f8(self.stats[func][3])) + left_width = name_size + 3 + print(indent*left_width + substats, file=self.stream) + indent = " " + + def print_title(self): + print(' ncalls tottime percall cumtime percall', end=' ', file=self.stream) + print('filename:lineno(function)', file=self.stream) + + def print_line(self, func): # hack: should print percentages + cc, nc, tt, ct, callers = self.stats[func] + c = str(nc) + if nc != cc: + c = c + '/' + str(cc) + print(c.rjust(9), end=' ', file=self.stream) + print(f8(tt), end=' ', file=self.stream) + if nc == 0: + print(' '*8, end=' ', file=self.stream) + else: + print(f8(tt/nc), end=' ', file=self.stream) + print(f8(ct), end=' ', file=self.stream) + if cc == 0: + print(' '*8, end=' ', file=self.stream) + else: + print(f8(ct/cc), end=' ', file=self.stream) + print(func_std_string(func), file=self.stream) + +class TupleComp: + """This class provides a generic function for comparing any two tuples. + Each instance records a list of tuple-indices (from most significant + to least significant), and sort direction (ascending or descending) for + each tuple-index. The compare functions can then be used as the function + argument to the system sort() function when a list of tuples need to be + sorted in the instances order.""" + + def __init__(self, comp_select_list): + self.comp_select_list = comp_select_list + + def compare (self, left, right): + for index, direction in self.comp_select_list: + l = left[index] + r = right[index] + if l < r: + return -direction + if l > r: + return direction + return 0 + + +#************************************************************************** +# func_name is a triple (file:string, line:int, name:string) + +def func_strip_path(func_name): + filename, line, name = func_name + return os.path.basename(filename), line, name + +def func_get_function_name(func): + return func[2] + +def func_std_string(func_name): # match what old profile produced + if func_name[:2] == ('~', 0): + # special case for built-in functions + name = func_name[2] + if name.startswith('<') and name.endswith('>'): + return '{%s}' % name[1:-1] + else: + return name + else: + return "%s:%d(%s)" % func_name + +#************************************************************************** +# The following functions combine statistics for pairs functions. +# The bulk of the processing involves correctly handling "call" lists, +# such as callers and callees. +#************************************************************************** + +def add_func_stats(target, source): + """Add together all the stats for two profile entries.""" + cc, nc, tt, ct, callers = source + t_cc, t_nc, t_tt, t_ct, t_callers = target + return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct, + add_callers(t_callers, callers)) + +def add_callers(target, source): + """Combine two caller lists in a single list.""" + new_callers = {} + for func, caller in target.items(): + new_callers[func] = caller + for func, caller in source.items(): + if func in new_callers: + if isinstance(caller, tuple): + # format used by cProfile + new_callers[func] = tuple(i + j for i, j in zip(caller, new_callers[func])) + else: + # format used by profile + new_callers[func] += caller + else: + new_callers[func] = caller + return new_callers + +def count_calls(callers): + """Sum the caller statistics to get total number of calls received.""" + nc = 0 + for calls in callers.values(): + nc += calls + return nc + +#************************************************************************** +# The following functions support printing of reports +#************************************************************************** + +def f8(x): + return "%8.3f" % x + +#************************************************************************** +# Statistics browser added by ESR, April 2001 +#************************************************************************** + +if __name__ == '__main__': + import cmd + try: + import readline # noqa: F401 + except ImportError: + pass + + class ProfileBrowser(cmd.Cmd): + def __init__(self, profile=None): + cmd.Cmd.__init__(self) + self.prompt = "% " + self.stats = None + self.stream = sys.stdout + if profile is not None: + self.do_read(profile) + + def generic(self, fn, line): + args = line.split() + processed = [] + for term in args: + try: + processed.append(int(term)) + continue + except ValueError: + pass + try: + frac = float(term) + if frac > 1 or frac < 0: + print("Fraction argument must be in [0, 1]", file=self.stream) + continue + processed.append(frac) + continue + except ValueError: + pass + processed.append(term) + if self.stats: + getattr(self.stats, fn)(*processed) + else: + print("No statistics object is loaded.", file=self.stream) + return 0 + def generic_help(self): + print("Arguments may be:", file=self.stream) + print("* An integer maximum number of entries to print.", file=self.stream) + print("* A decimal fractional number between 0 and 1, controlling", file=self.stream) + print(" what fraction of selected entries to print.", file=self.stream) + print("* A regular expression; only entries with function names", file=self.stream) + print(" that match it are printed.", file=self.stream) + + def do_add(self, line): + if self.stats: + try: + self.stats.add(line) + except OSError as e: + print("Failed to load statistics for %s: %s" % (line, e), file=self.stream) + else: + print("No statistics object is loaded.", file=self.stream) + return 0 + def help_add(self): + print("Add profile info from given file to current statistics object.", file=self.stream) + + def do_callees(self, line): + return self.generic('print_callees', line) + def help_callees(self): + print("Print callees statistics from the current stat object.", file=self.stream) + self.generic_help() + + def do_callers(self, line): + return self.generic('print_callers', line) + def help_callers(self): + print("Print callers statistics from the current stat object.", file=self.stream) + self.generic_help() + + def do_EOF(self, line): + print("", file=self.stream) + return 1 + def help_EOF(self): + print("Leave the profile browser.", file=self.stream) + + def do_quit(self, line): + return 1 + def help_quit(self): + print("Leave the profile browser.", file=self.stream) + + def do_read(self, line): + if line: + try: + self.stats = Stats(line) + except OSError as err: + print(err.args[1], file=self.stream) + return + except Exception as err: + print(err.__class__.__name__ + ':', err, file=self.stream) + return + self.prompt = line + "% " + elif len(self.prompt) > 2: + line = self.prompt[:-2] + self.do_read(line) + else: + print("No statistics object is current -- cannot reload.", file=self.stream) + return 0 + def help_read(self): + print("Read in profile data from a specified file.", file=self.stream) + print("Without argument, reload the current file.", file=self.stream) + + def do_reverse(self, line): + if self.stats: + self.stats.reverse_order() + else: + print("No statistics object is loaded.", file=self.stream) + return 0 + def help_reverse(self): + print("Reverse the sort order of the profiling report.", file=self.stream) + + def do_sort(self, line): + if not self.stats: + print("No statistics object is loaded.", file=self.stream) + return + abbrevs = self.stats.get_sort_arg_defs() + if line and all((x in abbrevs) for x in line.split()): + self.stats.sort_stats(*line.split()) + else: + print("Valid sort keys (unique prefixes are accepted):", file=self.stream) + for (key, value) in Stats.sort_arg_dict_default.items(): + print("%s -- %s" % (key, value[1]), file=self.stream) + return 0 + def help_sort(self): + print("Sort profile data according to specified keys.", file=self.stream) + print("(Typing `sort' without arguments lists valid keys.)", file=self.stream) + def complete_sort(self, text, *args): + return [a for a in Stats.sort_arg_dict_default if a.startswith(text)] + + def do_stats(self, line): + return self.generic('print_stats', line) + def help_stats(self): + print("Print statistics from the current stat object.", file=self.stream) + self.generic_help() + + def do_strip(self, line): + if self.stats: + self.stats.strip_dirs() + else: + print("No statistics object is loaded.", file=self.stream) + def help_strip(self): + print("Strip leading path information from filenames in the report.", file=self.stream) + + def help_help(self): + print("Show help for a given command.", file=self.stream) + + def postcmd(self, stop, line): + if stop: + return stop + return None + + if len(sys.argv) > 1: + initprofile = sys.argv[1] + else: + initprofile = None + try: + browser = ProfileBrowser(initprofile) + for profile in sys.argv[2:]: + browser.do_add(profile) + print("Welcome to the profile statistics browser.", file=browser.stream) + browser.cmdloop() + print("Goodbye.", file=browser.stream) + except KeyboardInterrupt: + pass + +# That's all, folks. diff --git a/Python314_4_x86_Template/Lib/pty.py b/Python314_4_x86_Template/Lib/pty.py new file mode 100644 index 00000000..4b25ac32 --- /dev/null +++ b/Python314_4_x86_Template/Lib/pty.py @@ -0,0 +1,182 @@ +"""Pseudo terminal utilities.""" + +# Bugs: No signal handling. Doesn't set slave termios and window size. +# Only tested on Linux, FreeBSD, and macOS. +# See: W. Richard Stevens. 1992. Advanced Programming in the +# UNIX Environment. Chapter 19. +# Author: Steen Lumholt -- with additions by Guido. + +from select import select +import os +import sys +import tty + +# names imported directly for test mocking purposes +from os import close, waitpid +from tty import setraw, tcgetattr, tcsetattr + +__all__ = ["openpty", "fork", "spawn"] + +STDIN_FILENO = 0 +STDOUT_FILENO = 1 +STDERR_FILENO = 2 + +CHILD = 0 + +def openpty(): + """openpty() -> (master_fd, slave_fd) + Open a pty master/slave pair, using os.openpty() if possible.""" + + try: + return os.openpty() + except (AttributeError, OSError): + pass + master_fd, slave_name = _open_terminal() + + slave_fd = os.open(slave_name, os.O_RDWR) + try: + from fcntl import ioctl, I_PUSH + except ImportError: + return master_fd, slave_fd + try: + ioctl(slave_fd, I_PUSH, "ptem") + ioctl(slave_fd, I_PUSH, "ldterm") + except OSError: + pass + return master_fd, slave_fd + +def _open_terminal(): + """Open pty master and return (master_fd, tty_name).""" + for x in 'pqrstuvwxyzPQRST': + for y in '0123456789abcdef': + pty_name = '/dev/pty' + x + y + try: + fd = os.open(pty_name, os.O_RDWR) + except OSError: + continue + return (fd, '/dev/tty' + x + y) + raise OSError('out of pty devices') + + +def fork(): + """fork() -> (pid, master_fd) + Fork and make the child a session leader with a controlling terminal.""" + + try: + pid, fd = os.forkpty() + except (AttributeError, OSError): + pass + else: + if pid == CHILD: + try: + os.setsid() + except OSError: + # os.forkpty() already set us session leader + pass + return pid, fd + + master_fd, slave_fd = openpty() + pid = os.fork() + if pid == CHILD: + os.close(master_fd) + os.login_tty(slave_fd) + else: + os.close(slave_fd) + + # Parent and child process. + return pid, master_fd + +def _read(fd): + """Default read function.""" + return os.read(fd, 1024) + +def _copy(master_fd, master_read=_read, stdin_read=_read): + """Parent copy loop. + Copies + pty master -> standard output (master_read) + standard input -> pty master (stdin_read)""" + if os.get_blocking(master_fd): + # If we write more than tty/ndisc is willing to buffer, we may block + # indefinitely. So we set master_fd to non-blocking temporarily during + # the copy operation. + os.set_blocking(master_fd, False) + try: + _copy(master_fd, master_read=master_read, stdin_read=stdin_read) + finally: + # restore blocking mode for backwards compatibility + os.set_blocking(master_fd, True) + return + high_waterlevel = 4096 + stdin_avail = master_fd != STDIN_FILENO + stdout_avail = master_fd != STDOUT_FILENO + i_buf = b'' + o_buf = b'' + while 1: + rfds = [] + wfds = [] + if stdin_avail and len(i_buf) < high_waterlevel: + rfds.append(STDIN_FILENO) + if stdout_avail and len(o_buf) < high_waterlevel: + rfds.append(master_fd) + if stdout_avail and len(o_buf) > 0: + wfds.append(STDOUT_FILENO) + if len(i_buf) > 0: + wfds.append(master_fd) + + rfds, wfds, _xfds = select(rfds, wfds, []) + + if STDOUT_FILENO in wfds: + try: + n = os.write(STDOUT_FILENO, o_buf) + o_buf = o_buf[n:] + except OSError: + stdout_avail = False + + if master_fd in rfds: + # Some OSes signal EOF by returning an empty byte string, + # some throw OSErrors. + try: + data = master_read(master_fd) + except OSError: + data = b"" + if not data: # Reached EOF. + return # Assume the child process has exited and is + # unreachable, so we clean up. + o_buf += data + + if master_fd in wfds: + n = os.write(master_fd, i_buf) + i_buf = i_buf[n:] + + if stdin_avail and STDIN_FILENO in rfds: + data = stdin_read(STDIN_FILENO) + if not data: + stdin_avail = False + else: + i_buf += data + +def spawn(argv, master_read=_read, stdin_read=_read): + """Create a spawned process.""" + if isinstance(argv, str): + argv = (argv,) + sys.audit('pty.spawn', argv) + + pid, master_fd = fork() + if pid == CHILD: + os.execlp(argv[0], *argv) + + try: + mode = tcgetattr(STDIN_FILENO) + setraw(STDIN_FILENO) + restore = True + except tty.error: # This is the same as termios.error + restore = False + + try: + _copy(master_fd, master_read, stdin_read) + finally: + if restore: + tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode) + + close(master_fd) + return waitpid(pid, 0)[1] diff --git a/Python314_4_x86_Template/Lib/py_compile.py b/Python314_4_x86_Template/Lib/py_compile.py new file mode 100644 index 00000000..43d8ec90 --- /dev/null +++ b/Python314_4_x86_Template/Lib/py_compile.py @@ -0,0 +1,212 @@ +"""Routine to "compile" a .py file to a .pyc file. + +This module has intimate knowledge of the format of .pyc files. +""" + +import enum +import importlib._bootstrap_external +import importlib.machinery +import importlib.util +import os +import os.path +import sys +import traceback + +__all__ = ["compile", "main", "PyCompileError", "PycInvalidationMode"] + + +class PyCompileError(Exception): + """Exception raised when an error occurs while attempting to + compile the file. + + To raise this exception, use + + raise PyCompileError(exc_type,exc_value,file[,msg]) + + where + + exc_type: exception type to be used in error message + type name can be accesses as class variable + 'exc_type_name' + + exc_value: exception value to be used in error message + can be accesses as class variable 'exc_value' + + file: name of file being compiled to be used in error message + can be accesses as class variable 'file' + + msg: string message to be written as error message + If no value is given, a default exception message will be + given, consistent with 'standard' py_compile output. + message (or default) can be accesses as class variable + 'msg' + + """ + + def __init__(self, exc_type, exc_value, file, msg=''): + exc_type_name = exc_type.__name__ + if exc_type is SyntaxError: + tbtext = ''.join(traceback.format_exception_only( + exc_type, exc_value)) + errmsg = tbtext.replace('File ""', 'File "%s"' % file) + else: + errmsg = "Sorry: %s: %s" % (exc_type_name,exc_value) + + Exception.__init__(self,msg or errmsg,exc_type_name,exc_value,file) + + self.exc_type_name = exc_type_name + self.exc_value = exc_value + self.file = file + self.msg = msg or errmsg + + def __str__(self): + return self.msg + + +class PycInvalidationMode(enum.Enum): + TIMESTAMP = 1 + CHECKED_HASH = 2 + UNCHECKED_HASH = 3 + + +def _get_default_invalidation_mode(): + if os.environ.get('SOURCE_DATE_EPOCH'): + return PycInvalidationMode.CHECKED_HASH + else: + return PycInvalidationMode.TIMESTAMP + + +def compile(file, cfile=None, dfile=None, doraise=False, optimize=-1, + invalidation_mode=None, quiet=0): + """Byte-compile one Python source file to Python bytecode. + + :param file: The source file name. + :param cfile: The target byte compiled file name. When not given, this + defaults to the PEP 3147/PEP 488 location. + :param dfile: Purported file name, i.e. the file name that shows up in + error messages. Defaults to the source file name. + :param doraise: Flag indicating whether or not an exception should be + raised when a compile error is found. If an exception occurs and this + flag is set to False, a string indicating the nature of the exception + will be printed, and the function will return to the caller. If an + exception occurs and this flag is set to True, a PyCompileError + exception will be raised. + :param optimize: The optimization level for the compiler. Valid values + are -1, 0, 1 and 2. A value of -1 means to use the optimization + level of the current interpreter, as given by -O command line options. + :param invalidation_mode: + :param quiet: Return full output with False or 0, errors only with 1, + and no output with 2. + + :return: Path to the resulting byte compiled file. + + Note that it isn't necessary to byte-compile Python modules for + execution efficiency -- Python itself byte-compiles a module when + it is loaded, and if it can, writes out the bytecode to the + corresponding .pyc file. + + However, if a Python installation is shared between users, it is a + good idea to byte-compile all modules upon installation, since + other users may not be able to write in the source directories, + and thus they won't be able to write the .pyc file, and then + they would be byte-compiling every module each time it is loaded. + This can slow down program start-up considerably. + + See compileall.py for a script/module that uses this module to + byte-compile all installed files (or all files in selected + directories). + + Do note that FileExistsError is raised if cfile ends up pointing at a + non-regular file or symlink. Because the compilation uses a file renaming, + the resulting file would be regular and thus not the same type of file as + it was previously. + """ + if invalidation_mode is None: + invalidation_mode = _get_default_invalidation_mode() + if cfile is None: + if optimize >= 0: + optimization = optimize if optimize >= 1 else '' + cfile = importlib.util.cache_from_source(file, + optimization=optimization) + else: + cfile = importlib.util.cache_from_source(file) + if os.path.islink(cfile): + msg = ('{} is a symlink and will be changed into a regular file if ' + 'import writes a byte-compiled file to it') + raise FileExistsError(msg.format(cfile)) + elif os.path.exists(cfile) and not os.path.isfile(cfile): + msg = ('{} is a non-regular file and will be changed into a regular ' + 'one if import writes a byte-compiled file to it') + raise FileExistsError(msg.format(cfile)) + loader = importlib.machinery.SourceFileLoader('', file) + source_bytes = loader.get_data(file) + try: + code = loader.source_to_code(source_bytes, dfile or file, + _optimize=optimize) + except Exception as err: + py_exc = PyCompileError(err.__class__, err, dfile or file) + if quiet < 2: + if doraise: + raise py_exc + else: + sys.stderr.write(py_exc.msg + '\n') + return + try: + dirname = os.path.dirname(cfile) + if dirname: + os.makedirs(dirname) + except FileExistsError: + pass + if invalidation_mode == PycInvalidationMode.TIMESTAMP: + source_stats = loader.path_stats(file) + bytecode = importlib._bootstrap_external._code_to_timestamp_pyc( + code, source_stats['mtime'], source_stats['size']) + else: + source_hash = importlib.util.source_hash(source_bytes) + bytecode = importlib._bootstrap_external._code_to_hash_pyc( + code, + source_hash, + (invalidation_mode == PycInvalidationMode.CHECKED_HASH), + ) + mode = importlib._bootstrap_external._calc_mode(file) + importlib._bootstrap_external._write_atomic(cfile, bytecode, mode) + return cfile + + +def main(): + import argparse + + description = 'A simple command-line interface for py_compile module.' + parser = argparse.ArgumentParser(description=description, color=True) + parser.add_argument( + '-q', '--quiet', + action='store_true', + help='Suppress error output', + ) + parser.add_argument( + 'filenames', + nargs='+', + help='Files to compile', + ) + args = parser.parse_args() + if args.filenames == ['-']: + filenames = [filename.rstrip('\n') for filename in sys.stdin.readlines()] + else: + filenames = args.filenames + for filename in filenames: + try: + compile(filename, doraise=True) + except PyCompileError as error: + if args.quiet: + parser.exit(1) + else: + parser.exit(1, error.msg) + except OSError as error: + if args.quiet: + parser.exit(1) + else: + parser.exit(1, str(error)) + + +if __name__ == "__main__": + main() diff --git a/Python313_13_x86_Template/Lib/pyclbr.py b/Python314_4_x86_Template/Lib/pyclbr.py similarity index 100% rename from Python313_13_x86_Template/Lib/pyclbr.py rename to Python314_4_x86_Template/Lib/pyclbr.py diff --git a/Python314_4_x86_Template/Lib/pydoc.py b/Python314_4_x86_Template/Lib/pydoc.py new file mode 100644 index 00000000..1f8a6ef3 --- /dev/null +++ b/Python314_4_x86_Template/Lib/pydoc.py @@ -0,0 +1,2887 @@ +"""Generate Python documentation in HTML or text for interactive use. + +At the Python interactive prompt, calling help(thing) on a Python object +documents the object, and calling help() starts up an interactive +help session. + +Or, at the shell command line outside of Python: + +Run "pydoc " to show documentation on something. may be +the name of a function, module, package, or a dotted reference to a +class or function within a module or module in a package. If the +argument contains a path segment delimiter (e.g. slash on Unix, +backslash on Windows) it is treated as the path to a Python source file. + +Run "pydoc -k " to search for a keyword in the synopsis lines +of all available modules. + +Run "pydoc -n " to start an HTTP server with the given +hostname (default: localhost) on the local machine. + +Run "pydoc -p " to start an HTTP server on the given port on the +local machine. Port number 0 can be used to get an arbitrary unused port. + +Run "pydoc -b" to start an HTTP server on an arbitrary unused port and +open a web browser to interactively browse documentation. Combine with +the -n and -p options to control the hostname and port used. + +Run "pydoc -w " to write out the HTML documentation for a module +to a file named ".html". + +Module docs for core modules are assumed to be in + + https://docs.python.org/X.Y/library/ + +This can be overridden by setting the PYTHONDOCS environment variable +to a different URL or to a local directory containing the Library +Reference Manual pages. +""" +__all__ = ['help'] +__author__ = "Ka-Ping Yee " +__date__ = "26 February 2001" + +__credits__ = """Guido van Rossum, for an excellent programming language. +Tommy Burnette, the original creator of manpy. +Paul Prescod, for all his work on onlinehelp. +Richard Chamberlain, for the first implementation of textdoc. +""" + +# Known bugs that can't be fixed here: +# - synopsis() cannot be prevented from clobbering existing +# loaded modules. +# - If the __file__ attribute on a module is a relative path and +# the current directory is changed with os.chdir(), an incorrect +# path will be displayed. + +import ast +import __future__ +import builtins +import importlib._bootstrap +import importlib._bootstrap_external +import importlib.machinery +import importlib.util +import inspect +import io +import os +import pkgutil +import platform +import re +import sys +import sysconfig +import textwrap +import time +import tokenize +import urllib.parse +import warnings +from annotationlib import Format +from collections import deque +from reprlib import Repr +from traceback import format_exception_only + +from _pyrepl.pager import (get_pager, pipe_pager, + plain_pager, tempfile_pager, tty_pager) + +# Expose plain() as pydoc.plain() +from _pyrepl.pager import plain # noqa: F401 + + +# --------------------------------------------------------- old names + +getpager = get_pager +pipepager = pipe_pager +plainpager = plain_pager +tempfilepager = tempfile_pager +ttypager = tty_pager + + +# --------------------------------------------------------- common routines + +def pathdirs(): + """Convert sys.path into a list of absolute, existing, unique paths.""" + dirs = [] + normdirs = [] + for dir in sys.path: + dir = os.path.abspath(dir or '.') + normdir = os.path.normcase(dir) + if normdir not in normdirs and os.path.isdir(dir): + dirs.append(dir) + normdirs.append(normdir) + return dirs + +def _findclass(func): + cls = sys.modules.get(func.__module__) + if cls is None: + return None + for name in func.__qualname__.split('.')[:-1]: + cls = getattr(cls, name) + if not inspect.isclass(cls): + return None + return cls + +def _finddoc(obj): + if inspect.ismethod(obj): + name = obj.__func__.__name__ + self = obj.__self__ + if (inspect.isclass(self) and + getattr(getattr(self, name, None), '__func__') is obj.__func__): + # classmethod + cls = self + else: + cls = self.__class__ + elif inspect.isfunction(obj): + name = obj.__name__ + cls = _findclass(obj) + if cls is None or getattr(cls, name) is not obj: + return None + elif inspect.isbuiltin(obj): + name = obj.__name__ + self = obj.__self__ + if (inspect.isclass(self) and + self.__qualname__ + '.' + name == obj.__qualname__): + # classmethod + cls = self + else: + cls = self.__class__ + # Should be tested before isdatadescriptor(). + elif isinstance(obj, property): + name = obj.__name__ + cls = _findclass(obj.fget) + if cls is None or getattr(cls, name) is not obj: + return None + elif inspect.ismethoddescriptor(obj) or inspect.isdatadescriptor(obj): + name = obj.__name__ + cls = obj.__objclass__ + if getattr(cls, name) is not obj: + return None + if inspect.ismemberdescriptor(obj): + slots = getattr(cls, '__slots__', None) + if isinstance(slots, dict) and name in slots: + return slots[name] + else: + return None + for base in cls.__mro__: + try: + doc = _getowndoc(getattr(base, name)) + except AttributeError: + continue + if doc is not None: + return doc + return None + +def _getowndoc(obj): + """Get the documentation string for an object if it is not + inherited from its class.""" + try: + doc = object.__getattribute__(obj, '__doc__') + if doc is None: + return None + if obj is not type: + typedoc = type(obj).__doc__ + if isinstance(typedoc, str) and typedoc == doc: + return None + return doc + except AttributeError: + return None + +def _getdoc(object): + """Get the documentation string for an object. + + All tabs are expanded to spaces. To clean up docstrings that are + indented to line up with blocks of code, any whitespace than can be + uniformly removed from the second line onwards is removed.""" + doc = _getowndoc(object) + if doc is None: + try: + doc = _finddoc(object) + except (AttributeError, TypeError): + return None + if not isinstance(doc, str): + return None + return inspect.cleandoc(doc) + +def getdoc(object): + """Get the doc string or comments for an object.""" + result = _getdoc(object) or inspect.getcomments(object) + return result and re.sub('^ *\n', '', result.rstrip()) or '' + +def splitdoc(doc): + """Split a doc string into a synopsis line (if any) and the rest.""" + lines = doc.strip().split('\n') + if len(lines) == 1: + return lines[0], '' + elif len(lines) >= 2 and not lines[1].rstrip(): + return lines[0], '\n'.join(lines[2:]) + return '', '\n'.join(lines) + +def _getargspec(object): + try: + signature = inspect.signature(object, annotation_format=Format.STRING) + if signature: + name = getattr(object, '__name__', '') + # function are always single-line and should not be formatted + max_width = (80 - len(name)) if name != '' else None + return signature.format(max_width=max_width, quote_annotation_strings=False) + except (ValueError, TypeError): + argspec = getattr(object, '__text_signature__', None) + if argspec: + if argspec[:2] == '($': + argspec = '(' + argspec[2:] + if getattr(object, '__self__', None) is not None: + # Strip the bound argument. + m = re.match(r'\(\w+(?:(?=\))|,\s*(?:/(?:(?=\))|,\s*))?)', argspec) + if m: + argspec = '(' + argspec[m.end():] + return argspec + return None + +def classname(object, modname): + """Get a class name and qualify it with a module name if necessary.""" + name = object.__name__ + if object.__module__ != modname: + name = object.__module__ + '.' + name + return name + +def parentname(object, modname): + """Get a name of the enclosing class (qualified it with a module name + if necessary) or module.""" + if '.' in object.__qualname__: + name = object.__qualname__.rpartition('.')[0] + if object.__module__ != modname and object.__module__ is not None: + return object.__module__ + '.' + name + else: + return name + else: + if object.__module__ != modname: + return object.__module__ + +def isdata(object): + """Check if an object is of a type that probably means it's data.""" + return not (inspect.ismodule(object) or inspect.isclass(object) or + inspect.isroutine(object) or inspect.isframe(object) or + inspect.istraceback(object) or inspect.iscode(object)) + +def replace(text, *pairs): + """Do a series of global replacements on a string.""" + while pairs: + text = pairs[1].join(text.split(pairs[0])) + pairs = pairs[2:] + return text + +def cram(text, maxlen): + """Omit part of a string if needed to make it fit in a maximum length.""" + if len(text) > maxlen: + pre = max(0, (maxlen-3)//2) + post = max(0, maxlen-3-pre) + return text[:pre] + '...' + text[len(text)-post:] + return text + +_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE) +def stripid(text): + """Remove the hexadecimal id from a Python object representation.""" + # The behaviour of %p is implementation-dependent in terms of case. + return _re_stripid.sub(r'\1', text) + +def _is_bound_method(fn): + """ + Returns True if fn is a bound method, regardless of whether + fn was implemented in Python or in C. + """ + if inspect.ismethod(fn): + return True + if inspect.isbuiltin(fn): + self = getattr(fn, '__self__', None) + return not (inspect.ismodule(self) or (self is None)) + return False + + +def allmethods(cl): + methods = {} + for key, value in inspect.getmembers(cl, inspect.isroutine): + methods[key] = 1 + for base in cl.__bases__: + methods.update(allmethods(base)) # all your base are belong to us + for key in methods.keys(): + methods[key] = getattr(cl, key) + return methods + +def _split_list(s, predicate): + """Split sequence s via predicate, and return pair ([true], [false]). + + The return value is a 2-tuple of lists, + ([x for x in s if predicate(x)], + [x for x in s if not predicate(x)]) + """ + + yes = [] + no = [] + for x in s: + if predicate(x): + yes.append(x) + else: + no.append(x) + return yes, no + +_future_feature_names = set(__future__.all_feature_names) + +def visiblename(name, all=None, obj=None): + """Decide whether to show documentation on a variable.""" + # Certain special names are redundant or internal. + # XXX Remove __initializing__? + if name in {'__author__', '__builtins__', '__cached__', '__credits__', + '__date__', '__doc__', '__file__', '__spec__', + '__loader__', '__module__', '__name__', '__package__', + '__path__', '__qualname__', '__slots__', '__version__', + '__static_attributes__', '__firstlineno__', + '__annotate_func__', '__annotations_cache__'}: + return 0 + # Private names are hidden, but special names are displayed. + if name.startswith('__') and name.endswith('__'): return 1 + # Namedtuples have public fields and methods with a single leading underscore + if name.startswith('_') and hasattr(obj, '_fields'): + return True + # Ignore __future__ imports. + if obj is not __future__ and name in _future_feature_names: + if isinstance(getattr(obj, name, None), __future__._Feature): + return False + if all is not None: + # only document that which the programmer exported in __all__ + return name in all + else: + return not name.startswith('_') + +def classify_class_attrs(object): + """Wrap inspect.classify_class_attrs, with fixup for data descriptors and bound methods.""" + results = [] + for (name, kind, cls, value) in inspect.classify_class_attrs(object): + if inspect.isdatadescriptor(value): + kind = 'data descriptor' + if isinstance(value, property) and value.fset is None: + kind = 'readonly property' + elif kind == 'method' and _is_bound_method(value): + kind = 'static method' + results.append((name, kind, cls, value)) + return results + +def sort_attributes(attrs, object): + 'Sort the attrs list in-place by _fields and then alphabetically by name' + # This allows data descriptors to be ordered according + # to a _fields attribute if present. + fields = getattr(object, '_fields', []) + try: + field_order = {name : i-len(fields) for (i, name) in enumerate(fields)} + except TypeError: + field_order = {} + keyfunc = lambda attr: (field_order.get(attr[0], 0), attr[0]) + attrs.sort(key=keyfunc) + +# ----------------------------------------------------- module manipulation + +def ispackage(path): + """Guess whether a path refers to a package directory.""" + warnings.warn('The pydoc.ispackage() function is deprecated', + DeprecationWarning, stacklevel=2) + if os.path.isdir(path): + for ext in ('.py', '.pyc'): + if os.path.isfile(os.path.join(path, '__init__' + ext)): + return True + return False + +def source_synopsis(file): + """Return the one-line summary of a file object, if present""" + + string = '' + try: + tokens = tokenize.generate_tokens(file.readline) + for tok_type, tok_string, _, _, _ in tokens: + if tok_type == tokenize.STRING: + string += tok_string + elif tok_type == tokenize.NEWLINE: + with warnings.catch_warnings(): + # Ignore the "invalid escape sequence" warning. + warnings.simplefilter("ignore", SyntaxWarning) + docstring = ast.literal_eval(string) + if not isinstance(docstring, str): + return None + return docstring.strip().split('\n')[0].strip() + elif tok_type == tokenize.OP and tok_string in ('(', ')'): + string += tok_string + elif tok_type not in (tokenize.COMMENT, tokenize.NL, tokenize.ENCODING): + return None + except (tokenize.TokenError, UnicodeDecodeError, SyntaxError): + return None + return None + +def synopsis(filename, cache={}): + """Get the one-line summary out of a module file.""" + mtime = os.stat(filename).st_mtime + lastupdate, result = cache.get(filename, (None, None)) + if lastupdate is None or lastupdate < mtime: + # Look for binary suffixes first, falling back to source. + if filename.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)): + loader_cls = importlib.machinery.SourcelessFileLoader + elif filename.endswith(tuple(importlib.machinery.EXTENSION_SUFFIXES)): + loader_cls = importlib.machinery.ExtensionFileLoader + else: + loader_cls = None + # Now handle the choice. + if loader_cls is None: + # Must be a source file. + try: + file = tokenize.open(filename) + except OSError: + # module can't be opened, so skip it + return None + # text modules can be directly examined + with file: + result = source_synopsis(file) + else: + # Must be a binary module, which has to be imported. + loader = loader_cls('__temp__', filename) + # XXX We probably don't need to pass in the loader here. + spec = importlib.util.spec_from_file_location('__temp__', filename, + loader=loader) + try: + module = importlib._bootstrap._load(spec) + except: + return None + del sys.modules['__temp__'] + result = module.__doc__.splitlines()[0] if module.__doc__ else None + # Cache the result. + cache[filename] = (mtime, result) + return result + +class ErrorDuringImport(Exception): + """Errors that occurred while trying to import something to document it.""" + def __init__(self, filename, exc_info): + if not isinstance(exc_info, tuple): + assert isinstance(exc_info, BaseException) + self.exc = type(exc_info) + self.value = exc_info + self.tb = exc_info.__traceback__ + else: + warnings.warn("A tuple value for exc_info is deprecated, use an exception instance", + DeprecationWarning) + + self.exc, self.value, self.tb = exc_info + self.filename = filename + + def __str__(self): + exc = self.exc.__name__ + return 'problem in %s - %s: %s' % (self.filename, exc, self.value) + +def importfile(path): + """Import a Python source file or compiled file given its path.""" + magic = importlib.util.MAGIC_NUMBER + with open(path, 'rb') as file: + is_bytecode = magic == file.read(len(magic)) + filename = os.path.basename(path) + name, ext = os.path.splitext(filename) + if is_bytecode: + loader = importlib._bootstrap_external.SourcelessFileLoader(name, path) + else: + loader = importlib._bootstrap_external.SourceFileLoader(name, path) + # XXX We probably don't need to pass in the loader here. + spec = importlib.util.spec_from_file_location(name, path, loader=loader) + try: + return importlib._bootstrap._load(spec) + except BaseException as err: + raise ErrorDuringImport(path, err) + +def safeimport(path, forceload=0, cache={}): + """Import a module; handle errors; return None if the module isn't found. + + If the module *is* found but an exception occurs, it's wrapped in an + ErrorDuringImport exception and reraised. Unlike __import__, if a + package path is specified, the module at the end of the path is returned, + not the package at the beginning. If the optional 'forceload' argument + is 1, we reload the module from disk (unless it's a dynamic extension).""" + try: + # If forceload is 1 and the module has been previously loaded from + # disk, we always have to reload the module. Checking the file's + # mtime isn't good enough (e.g. the module could contain a class + # that inherits from another module that has changed). + if forceload and path in sys.modules: + if path not in sys.builtin_module_names: + # Remove the module from sys.modules and re-import to try + # and avoid problems with partially loaded modules. + # Also remove any submodules because they won't appear + # in the newly loaded module's namespace if they're already + # in sys.modules. + subs = [m for m in sys.modules if m.startswith(path + '.')] + for key in [path] + subs: + # Prevent garbage collection. + cache[key] = sys.modules[key] + del sys.modules[key] + module = importlib.import_module(path) + except BaseException as err: + # Did the error occur before or after the module was found? + if path in sys.modules: + # An error occurred while executing the imported module. + raise ErrorDuringImport(sys.modules[path].__file__, err) + elif type(err) is SyntaxError: + # A SyntaxError occurred before we could execute the module. + raise ErrorDuringImport(err.filename, err) + elif isinstance(err, ImportError) and err.name == path: + # No such module in the path. + return None + else: + # Some other error occurred during the importing process. + raise ErrorDuringImport(path, err) + return module + +# ---------------------------------------------------- formatter base class + +class Doc: + + PYTHONDOCS = os.environ.get("PYTHONDOCS", + "https://docs.python.org/%d.%d/library" + % sys.version_info[:2]) + + def document(self, object, name=None, *args): + """Generate documentation for an object.""" + args = (object, name) + args + # 'try' clause is to attempt to handle the possibility that inspect + # identifies something in a way that pydoc itself has issues handling; + # think 'super' and how it is a descriptor (which raises the exception + # by lacking a __name__ attribute) and an instance. + try: + if inspect.ismodule(object): return self.docmodule(*args) + if inspect.isclass(object): return self.docclass(*args) + if inspect.isroutine(object): return self.docroutine(*args) + except AttributeError: + pass + if inspect.isdatadescriptor(object): return self.docdata(*args) + return self.docother(*args) + + def fail(self, object, name=None, *args): + """Raise an exception for unimplemented types.""" + message = "don't know how to document object%s of type %s" % ( + name and ' ' + repr(name), type(object).__name__) + raise TypeError(message) + + docmodule = docclass = docroutine = docother = docproperty = docdata = fail + + def getdocloc(self, object, basedir=sysconfig.get_path('stdlib')): + """Return the location of module docs or None""" + + try: + file = inspect.getabsfile(object) + except TypeError: + file = '(built-in)' + + docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS) + + basedir = os.path.normcase(basedir) + if (isinstance(object, type(os)) and + (object.__name__ in ('errno', 'exceptions', 'gc', + 'marshal', 'posix', 'signal', 'sys', + '_thread', 'zipimport') or + (file.startswith(basedir) and + not file.startswith(os.path.join(basedir, 'site-packages')))) and + object.__name__ not in ('xml.etree', 'test.test_pydoc.pydoc_mod')): + + try: + from pydoc_data import module_docs + except ImportError: + module_docs = None + + if module_docs and object.__name__ in module_docs.module_docs: + doc_name = module_docs.module_docs[object.__name__] + if docloc.startswith(("http://", "https://")): + docloc = "{}/{}".format(docloc.rstrip("/"), doc_name) + else: + docloc = os.path.join(docloc, doc_name) + else: + docloc = None + else: + docloc = None + return docloc + +# -------------------------------------------- HTML documentation generator + +class HTMLRepr(Repr): + """Class for safely making an HTML representation of a Python object.""" + def __init__(self): + Repr.__init__(self) + self.maxlist = self.maxtuple = 20 + self.maxdict = 10 + self.maxstring = self.maxother = 100 + + def escape(self, text): + return replace(text, '&', '&', '<', '<', '>', '>') + + def repr(self, object): + return Repr.repr(self, object) + + def repr1(self, x, level): + if hasattr(type(x), '__name__'): + methodname = 'repr_' + '_'.join(type(x).__name__.split()) + if hasattr(self, methodname): + return getattr(self, methodname)(x, level) + return self.escape(cram(stripid(repr(x)), self.maxother)) + + def repr_string(self, x, level): + test = cram(x, self.maxstring) + testrepr = repr(test) + if '\\' in test and '\\' not in replace(testrepr, r'\\', ''): + # Backslashes are only literal in the string and are never + # needed to make any special characters, so show a raw string. + return 'r' + testrepr[0] + self.escape(test) + testrepr[0] + return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)', + r'\1', + self.escape(testrepr)) + + repr_str = repr_string + + def repr_instance(self, x, level): + try: + return self.escape(cram(stripid(repr(x)), self.maxstring)) + except: + return self.escape('<%s instance>' % x.__class__.__name__) + + repr_unicode = repr_string + +class HTMLDoc(Doc): + """Formatter class for HTML documentation.""" + + # ------------------------------------------- HTML formatting utilities + + _repr_instance = HTMLRepr() + repr = _repr_instance.repr + escape = _repr_instance.escape + + def page(self, title, contents): + """Format an HTML page.""" + return '''\ + + + + +Python: %s + +%s +''' % (title, contents) + + def heading(self, title, extras=''): + """Format a page heading.""" + return ''' + + + +
 
%s
%s
+ ''' % (title, extras or ' ') + + def section(self, title, cls, contents, width=6, + prelude='', marginalia=None, gap=' '): + """Format a section with a heading.""" + if marginalia is None: + marginalia = '' + ' ' * width + '' + result = '''

+ + + + ''' % (cls, title) + if prelude: + result = result + ''' + + +''' % (cls, marginalia, cls, prelude, gap) + else: + result = result + ''' +''' % (cls, marginalia, gap) + + return result + '\n
 
%s
%s%s
%s
%s%s%s
' % contents + + def bigsection(self, title, *args): + """Format a section with a big heading.""" + title = '%s' % title + return self.section(title, *args) + + def preformat(self, text): + """Format literal preformatted text.""" + text = self.escape(text.expandtabs()) + return replace(text, '\n\n', '\n \n', '\n\n', '\n \n', + ' ', ' ', '\n', '
\n') + + def multicolumn(self, list, format): + """Format a list of items into a multi-column list.""" + result = '' + rows = (len(list) + 3) // 4 + for col in range(4): + result = result + '' + for i in range(rows*col, rows*col+rows): + if i < len(list): + result = result + format(list[i]) + '
\n' + result = result + '' + return '%s
' % result + + def grey(self, text): return '%s' % text + + def namelink(self, name, *dicts): + """Make a link for an identifier, given name-to-URL mappings.""" + for dict in dicts: + if name in dict: + return '
%s' % (dict[name], name) + return name + + def classlink(self, object, modname): + """Make a link for a class.""" + name, module = object.__name__, sys.modules.get(object.__module__) + if hasattr(module, name) and getattr(module, name) is object: + return '%s' % ( + module.__name__, name, classname(object, modname)) + return classname(object, modname) + + def parentlink(self, object, modname): + """Make a link for the enclosing class or module.""" + link = None + name, module = object.__name__, sys.modules.get(object.__module__) + if hasattr(module, name) and getattr(module, name) is object: + if '.' in object.__qualname__: + name = object.__qualname__.rpartition('.')[0] + if object.__module__ != modname: + link = '%s.html#%s' % (module.__name__, name) + else: + link = '#%s' % name + else: + if object.__module__ != modname: + link = '%s.html' % module.__name__ + if link: + return '%s' % (link, parentname(object, modname)) + else: + return parentname(object, modname) + + def modulelink(self, object): + """Make a link for a module.""" + return '%s' % (object.__name__, object.__name__) + + def modpkglink(self, modpkginfo): + """Make a link for a module or package to display in an index.""" + name, path, ispackage, shadowed = modpkginfo + if shadowed: + return self.grey(name) + if path: + url = '%s.%s.html' % (path, name) + else: + url = '%s.html' % name + if ispackage: + text = '%s (package)' % name + else: + text = name + return '%s' % (url, text) + + def filelink(self, url, path): + """Make a link to source file.""" + return '%s' % (url, path) + + def markup(self, text, escape=None, funcs={}, classes={}, methods={}): + """Mark up some plain text, given a context of symbols to look for. + Each context dictionary maps object names to anchor names.""" + escape = escape or self.escape + results = [] + here = 0 + pattern = re.compile(r'\b((http|https|ftp)://\S+[\w/]|' + r'RFC[- ]?(\d+)|' + r'PEP[- ]?(\d+)|' + r'(self\.)?(\w+))') + while match := pattern.search(text, here): + start, end = match.span() + results.append(escape(text[here:start])) + + all, scheme, rfc, pep, selfdot, name = match.groups() + if scheme: + url = escape(all).replace('"', '"') + results.append('%s' % (url, url)) + elif rfc: + url = 'https://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc) + results.append('%s' % (url, escape(all))) + elif pep: + url = 'https://peps.python.org/pep-%04d/' % int(pep) + results.append('%s' % (url, escape(all))) + elif selfdot: + # Create a link for methods like 'self.method(...)' + # and use for attributes like 'self.attr' + if text[end:end+1] == '(': + results.append('self.' + self.namelink(name, methods)) + else: + results.append('self.%s' % name) + elif text[end:end+1] == '(': + results.append(self.namelink(name, methods, funcs, classes)) + else: + results.append(self.namelink(name, classes)) + here = end + results.append(escape(text[here:])) + return ''.join(results) + + # ---------------------------------------------- type-specific routines + + def formattree(self, tree, modname, parent=None): + """Produce HTML for a class tree as given by inspect.getclasstree().""" + result = '' + for entry in tree: + if isinstance(entry, tuple): + c, bases = entry + result = result + '

' + result = result + self.classlink(c, modname) + if bases and bases != (parent,): + parents = [] + for base in bases: + parents.append(self.classlink(base, modname)) + result = result + '(' + ', '.join(parents) + ')' + result = result + '\n
' + elif isinstance(entry, list): + result = result + '
\n%s
\n' % self.formattree( + entry, modname, c) + return '
\n%s
\n' % result + + def docmodule(self, object, name=None, mod=None, *ignored): + """Produce HTML documentation for a module object.""" + name = object.__name__ # ignore the passed-in name + try: + all = object.__all__ + except AttributeError: + all = None + parts = name.split('.') + links = [] + for i in range(len(parts)-1): + links.append( + '%s' % + ('.'.join(parts[:i+1]), parts[i])) + linkedname = '.'.join(links + parts[-1:]) + head = '%s' % linkedname + try: + path = inspect.getabsfile(object) + url = urllib.parse.quote(path) + filelink = self.filelink(url, path) + except TypeError: + filelink = '(built-in)' + info = [] + if hasattr(object, '__version__'): + version = str(object.__version__) + if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': + version = version[11:-1].strip() + info.append('version %s' % self.escape(version)) + if hasattr(object, '__date__'): + info.append(self.escape(str(object.__date__))) + if info: + head = head + ' (%s)' % ', '.join(info) + docloc = self.getdocloc(object) + if docloc is not None: + docloc = '
Module Reference' % locals() + else: + docloc = '' + result = self.heading(head, 'index
' + filelink + docloc) + + modules = inspect.getmembers(object, inspect.ismodule) + + classes, cdict = [], {} + for key, value in inspect.getmembers(object, inspect.isclass): + # if __all__ exists, believe it. Otherwise use old heuristic. + if (all is not None or + (inspect.getmodule(value) or object) is object): + if visiblename(key, all, object): + classes.append((key, value)) + cdict[key] = cdict[value] = '#' + key + for key, value in classes: + for base in value.__bases__: + key, modname = base.__name__, base.__module__ + module = sys.modules.get(modname) + if modname != name and module and hasattr(module, key): + if getattr(module, key) is base: + if not key in cdict: + cdict[key] = cdict[base] = modname + '.html#' + key + funcs, fdict = [], {} + for key, value in inspect.getmembers(object, inspect.isroutine): + # if __all__ exists, believe it. Otherwise use a heuristic. + if (all is not None + or inspect.isbuiltin(value) + or (inspect.getmodule(value) or object) is object): + if visiblename(key, all, object): + funcs.append((key, value)) + fdict[key] = '#-' + key + if inspect.isfunction(value): fdict[value] = fdict[key] + data = [] + for key, value in inspect.getmembers(object, isdata): + if visiblename(key, all, object): + data.append((key, value)) + + doc = self.markup(getdoc(object), self.preformat, fdict, cdict) + doc = doc and '%s' % doc + result = result + '

%s

\n' % doc + + if hasattr(object, '__path__'): + modpkgs = [] + for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): + modpkgs.append((modname, name, ispkg, 0)) + modpkgs.sort() + contents = self.multicolumn(modpkgs, self.modpkglink) + result = result + self.bigsection( + 'Package Contents', 'pkg-content', contents) + elif modules: + contents = self.multicolumn( + modules, lambda t: self.modulelink(t[1])) + result = result + self.bigsection( + 'Modules', 'pkg-content', contents) + + if classes: + classlist = [value for (key, value) in classes] + contents = [ + self.formattree(inspect.getclasstree(classlist, 1), name)] + for key, value in classes: + contents.append(self.document(value, key, name, fdict, cdict)) + result = result + self.bigsection( + 'Classes', 'index', ' '.join(contents)) + if funcs: + contents = [] + for key, value in funcs: + contents.append(self.document(value, key, name, fdict, cdict)) + result = result + self.bigsection( + 'Functions', 'functions', ' '.join(contents)) + if data: + contents = [] + for key, value in data: + contents.append(self.document(value, key)) + result = result + self.bigsection( + 'Data', 'data', '
\n'.join(contents)) + if hasattr(object, '__author__'): + contents = self.markup(str(object.__author__), self.preformat) + result = result + self.bigsection('Author', 'author', contents) + if hasattr(object, '__credits__'): + contents = self.markup(str(object.__credits__), self.preformat) + result = result + self.bigsection('Credits', 'credits', contents) + + return result + + def docclass(self, object, name=None, mod=None, funcs={}, classes={}, + *ignored): + """Produce HTML documentation for a class object.""" + realname = object.__name__ + name = name or realname + bases = object.__bases__ + + contents = [] + push = contents.append + + # Cute little class to pump out a horizontal rule between sections. + class HorizontalRule: + def __init__(self): + self.needone = 0 + def maybe(self): + if self.needone: + push('
\n') + self.needone = 1 + hr = HorizontalRule() + + # List the mro, if non-trivial. + mro = deque(inspect.getmro(object)) + if len(mro) > 2: + hr.maybe() + push('
Method resolution order:
\n') + for base in mro: + push('
%s
\n' % self.classlink(base, + object.__module__)) + push('
\n') + + def spill(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + try: + value = getattr(object, name) + except Exception: + # Some descriptors may meet a failure in their __get__. + # (bug #1785) + push(self.docdata(value, name, mod)) + else: + push(self.document(value, name, mod, + funcs, classes, mdict, object, homecls)) + push('\n') + return attrs + + def spilldescriptors(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + push(self.docdata(value, name, mod)) + return attrs + + def spilldata(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + base = self.docother(getattr(object, name), name, mod) + doc = getdoc(value) + if not doc: + push('
%s
\n' % base) + else: + doc = self.markup(getdoc(value), self.preformat, + funcs, classes, mdict) + doc = '
%s' % doc + push('
%s%s
\n' % (base, doc)) + push('\n') + return attrs + + attrs = [(name, kind, cls, value) + for name, kind, cls, value in classify_class_attrs(object) + if visiblename(name, obj=object)] + + mdict = {} + for key, kind, homecls, value in attrs: + mdict[key] = anchor = '#' + name + '-' + key + try: + value = getattr(object, name) + except Exception: + # Some descriptors may meet a failure in their __get__. + # (bug #1785) + pass + try: + # The value may not be hashable (e.g., a data attr with + # a dict or list value). + mdict[value] = anchor + except TypeError: + pass + + while attrs: + if mro: + thisclass = mro.popleft() + else: + thisclass = attrs[0][2] + attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) + + if object is not builtins.object and thisclass is builtins.object: + attrs = inherited + continue + elif thisclass is object: + tag = 'defined here' + else: + tag = 'inherited from %s' % self.classlink(thisclass, + object.__module__) + tag += ':
\n' + + sort_attributes(attrs, object) + + # Pump out the attrs, segregated by kind. + attrs = spill('Methods %s' % tag, attrs, + lambda t: t[1] == 'method') + attrs = spill('Class methods %s' % tag, attrs, + lambda t: t[1] == 'class method') + attrs = spill('Static methods %s' % tag, attrs, + lambda t: t[1] == 'static method') + attrs = spilldescriptors("Readonly properties %s" % tag, attrs, + lambda t: t[1] == 'readonly property') + attrs = spilldescriptors('Data descriptors %s' % tag, attrs, + lambda t: t[1] == 'data descriptor') + attrs = spilldata('Data and other attributes %s' % tag, attrs, + lambda t: t[1] == 'data') + assert attrs == [] + attrs = inherited + + contents = ''.join(contents) + + if name == realname: + title = 'class %s' % ( + name, realname) + else: + title = '%s = class %s' % ( + name, name, realname) + if bases: + parents = [] + for base in bases: + parents.append(self.classlink(base, object.__module__)) + title = title + '(%s)' % ', '.join(parents) + + decl = '' + argspec = _getargspec(object) + if argspec and argspec != '()': + decl = name + self.escape(argspec) + '\n\n' + + doc = getdoc(object) + if decl: + doc = decl + (doc or '') + doc = self.markup(doc, self.preformat, funcs, classes, mdict) + doc = doc and '%s
 
' % doc + + return self.section(title, 'title', contents, 3, doc) + + def formatvalue(self, object): + """Format an argument default value as text.""" + return self.grey('=' + self.repr(object)) + + def docroutine(self, object, name=None, mod=None, + funcs={}, classes={}, methods={}, cl=None, homecls=None): + """Produce HTML documentation for a function or method object.""" + realname = object.__name__ + name = name or realname + if homecls is None: + homecls = cl + anchor = ('' if cl is None else cl.__name__) + '-' + name + note = '' + skipdocs = False + imfunc = None + if _is_bound_method(object): + imself = object.__self__ + if imself is cl: + imfunc = getattr(object, '__func__', None) + elif inspect.isclass(imself): + note = ' class method of %s' % self.classlink(imself, mod) + else: + note = ' method of %s instance' % self.classlink( + imself.__class__, mod) + elif (inspect.ismethoddescriptor(object) or + inspect.ismethodwrapper(object)): + try: + objclass = object.__objclass__ + except AttributeError: + pass + else: + if cl is None: + note = ' unbound %s method' % self.classlink(objclass, mod) + elif objclass is not homecls: + note = ' from ' + self.classlink(objclass, mod) + else: + imfunc = object + if inspect.isfunction(imfunc) and homecls is not None and ( + imfunc.__module__ != homecls.__module__ or + imfunc.__qualname__ != homecls.__qualname__ + '.' + realname): + pname = self.parentlink(imfunc, mod) + if pname: + note = ' from %s' % pname + + if (inspect.iscoroutinefunction(object) or + inspect.isasyncgenfunction(object)): + asyncqualifier = 'async ' + else: + asyncqualifier = '' + + if name == realname: + title = '%s' % (anchor, realname) + else: + if (cl is not None and + inspect.getattr_static(cl, realname, []) is object): + reallink = '%s' % ( + cl.__name__ + '-' + realname, realname) + skipdocs = True + if note.startswith(' from '): + note = '' + else: + reallink = realname + title = '%s = %s' % ( + anchor, name, reallink) + argspec = None + if inspect.isroutine(object): + argspec = _getargspec(object) + if argspec and realname == '': + title = '%s lambda ' % name + # XXX lambda's won't usually have func_annotations['return'] + # since the syntax doesn't support but it is possible. + # So removing parentheses isn't truly safe. + if not object.__annotations__: + argspec = argspec[1:-1] # remove parentheses + if not argspec: + argspec = '(...)' + + decl = asyncqualifier + title + self.escape(argspec) + (note and + self.grey('%s' % note)) + + if skipdocs: + return '
%s
\n' % decl + else: + doc = self.markup( + getdoc(object), self.preformat, funcs, classes, methods) + doc = doc and '
%s
' % doc + return '
%s
%s
\n' % (decl, doc) + + def docdata(self, object, name=None, mod=None, cl=None, *ignored): + """Produce html documentation for a data descriptor.""" + results = [] + push = results.append + + if name: + push('
%s
\n' % name) + doc = self.markup(getdoc(object), self.preformat) + if doc: + push('
%s
\n' % doc) + push('
\n') + + return ''.join(results) + + docproperty = docdata + + def docother(self, object, name=None, mod=None, *ignored): + """Produce HTML documentation for a data object.""" + lhs = name and '%s = ' % name or '' + return lhs + self.repr(object) + + def index(self, dir, shadowed=None): + """Generate an HTML index for a directory of modules.""" + modpkgs = [] + if shadowed is None: shadowed = {} + for importer, name, ispkg in pkgutil.iter_modules([dir]): + if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name): + # ignore a module if its name contains a surrogate character + continue + modpkgs.append((name, '', ispkg, name in shadowed)) + shadowed[name] = 1 + + modpkgs.sort() + contents = self.multicolumn(modpkgs, self.modpkglink) + return self.bigsection(dir, 'index', contents) + +# -------------------------------------------- text documentation generator + +class TextRepr(Repr): + """Class for safely making a text representation of a Python object.""" + def __init__(self): + Repr.__init__(self) + self.maxlist = self.maxtuple = 20 + self.maxdict = 10 + self.maxstring = self.maxother = 100 + + def repr1(self, x, level): + if hasattr(type(x), '__name__'): + methodname = 'repr_' + '_'.join(type(x).__name__.split()) + if hasattr(self, methodname): + return getattr(self, methodname)(x, level) + return cram(stripid(repr(x)), self.maxother) + + def repr_string(self, x, level): + test = cram(x, self.maxstring) + testrepr = repr(test) + if '\\' in test and '\\' not in replace(testrepr, r'\\', ''): + # Backslashes are only literal in the string and are never + # needed to make any special characters, so show a raw string. + return 'r' + testrepr[0] + test + testrepr[0] + return testrepr + + repr_str = repr_string + + def repr_instance(self, x, level): + try: + return cram(stripid(repr(x)), self.maxstring) + except: + return '<%s instance>' % x.__class__.__name__ + +class TextDoc(Doc): + """Formatter class for text documentation.""" + + # ------------------------------------------- text formatting utilities + + _repr_instance = TextRepr() + repr = _repr_instance.repr + + def bold(self, text): + """Format a string in bold by overstriking.""" + return ''.join(ch + '\b' + ch for ch in text) + + def indent(self, text, prefix=' '): + """Indent text by prepending a given prefix to each line.""" + if not text: return '' + lines = [(prefix + line).rstrip() for line in text.split('\n')] + return '\n'.join(lines) + + def section(self, title, contents): + """Format a section with a given heading.""" + clean_contents = self.indent(contents).rstrip() + return self.bold(title) + '\n' + clean_contents + '\n\n' + + # ---------------------------------------------- type-specific routines + + def formattree(self, tree, modname, parent=None, prefix=''): + """Render in text a class tree as returned by inspect.getclasstree().""" + result = '' + for entry in tree: + if isinstance(entry, tuple): + c, bases = entry + result = result + prefix + classname(c, modname) + if bases and bases != (parent,): + parents = (classname(c, modname) for c in bases) + result = result + '(%s)' % ', '.join(parents) + result = result + '\n' + elif isinstance(entry, list): + result = result + self.formattree( + entry, modname, c, prefix + ' ') + return result + + def docmodule(self, object, name=None, mod=None, *ignored): + """Produce text documentation for a given module object.""" + name = object.__name__ # ignore the passed-in name + synop, desc = splitdoc(getdoc(object)) + result = self.section('NAME', name + (synop and ' - ' + synop)) + all = getattr(object, '__all__', None) + docloc = self.getdocloc(object) + if docloc is not None: + result = result + self.section('MODULE REFERENCE', docloc + """ + +The following documentation is automatically generated from the Python +source files. It may be incomplete, incorrect or include features that +are considered implementation detail and may vary between Python +implementations. When in doubt, consult the module reference at the +location listed above. +""") + + if desc: + result = result + self.section('DESCRIPTION', desc) + + classes = [] + for key, value in inspect.getmembers(object, inspect.isclass): + # if __all__ exists, believe it. Otherwise use old heuristic. + if (all is not None + or (inspect.getmodule(value) or object) is object): + if visiblename(key, all, object): + classes.append((key, value)) + funcs = [] + for key, value in inspect.getmembers(object, inspect.isroutine): + # if __all__ exists, believe it. Otherwise use a heuristic. + if (all is not None + or inspect.isbuiltin(value) + or (inspect.getmodule(value) or object) is object): + if visiblename(key, all, object): + funcs.append((key, value)) + data = [] + for key, value in inspect.getmembers(object, isdata): + if visiblename(key, all, object): + data.append((key, value)) + + modpkgs = [] + modpkgs_names = set() + if hasattr(object, '__path__'): + for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): + modpkgs_names.add(modname) + if ispkg: + modpkgs.append(modname + ' (package)') + else: + modpkgs.append(modname) + + modpkgs.sort() + result = result + self.section( + 'PACKAGE CONTENTS', '\n'.join(modpkgs)) + + # Detect submodules as sometimes created by C extensions + submodules = [] + for key, value in inspect.getmembers(object, inspect.ismodule): + if value.__name__.startswith(name + '.') and key not in modpkgs_names: + submodules.append(key) + if submodules: + submodules.sort() + result = result + self.section( + 'SUBMODULES', '\n'.join(submodules)) + + if classes: + classlist = [value for key, value in classes] + contents = [self.formattree( + inspect.getclasstree(classlist, 1), name)] + for key, value in classes: + contents.append(self.document(value, key, name)) + result = result + self.section('CLASSES', '\n'.join(contents)) + + if funcs: + contents = [] + for key, value in funcs: + contents.append(self.document(value, key, name)) + result = result + self.section('FUNCTIONS', '\n'.join(contents)) + + if data: + contents = [] + for key, value in data: + contents.append(self.docother(value, key, name, maxlen=70)) + result = result + self.section('DATA', '\n'.join(contents)) + + if hasattr(object, '__version__'): + version = str(object.__version__) + if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': + version = version[11:-1].strip() + result = result + self.section('VERSION', version) + if hasattr(object, '__date__'): + result = result + self.section('DATE', str(object.__date__)) + if hasattr(object, '__author__'): + result = result + self.section('AUTHOR', str(object.__author__)) + if hasattr(object, '__credits__'): + result = result + self.section('CREDITS', str(object.__credits__)) + try: + file = inspect.getabsfile(object) + except TypeError: + file = '(built-in)' + result = result + self.section('FILE', file) + return result + + def docclass(self, object, name=None, mod=None, *ignored): + """Produce text documentation for a given class object.""" + realname = object.__name__ + name = name or realname + bases = object.__bases__ + + def makename(c, m=object.__module__): + return classname(c, m) + + if name == realname: + title = 'class ' + self.bold(realname) + else: + title = self.bold(name) + ' = class ' + realname + if bases: + parents = map(makename, bases) + title = title + '(%s)' % ', '.join(parents) + + contents = [] + push = contents.append + + argspec = _getargspec(object) + if argspec and argspec != '()': + push(name + argspec + '\n') + + doc = getdoc(object) + if doc: + push(doc + '\n') + + # List the mro, if non-trivial. + mro = deque(inspect.getmro(object)) + if len(mro) > 2: + push("Method resolution order:") + for base in mro: + push(' ' + makename(base)) + push('') + + # List the built-in subclasses, if any: + subclasses = sorted( + (str(cls.__name__) for cls in type.__subclasses__(object) + if (not cls.__name__.startswith("_") and + getattr(cls, '__module__', '') == "builtins")), + key=str.lower + ) + no_of_subclasses = len(subclasses) + MAX_SUBCLASSES_TO_DISPLAY = 4 + if subclasses: + push("Built-in subclasses:") + for subclassname in subclasses[:MAX_SUBCLASSES_TO_DISPLAY]: + push(' ' + subclassname) + if no_of_subclasses > MAX_SUBCLASSES_TO_DISPLAY: + push(' ... and ' + + str(no_of_subclasses - MAX_SUBCLASSES_TO_DISPLAY) + + ' other subclasses') + push('') + + # Cute little class to pump out a horizontal rule between sections. + class HorizontalRule: + def __init__(self): + self.needone = 0 + def maybe(self): + if self.needone: + push('-' * 70) + self.needone = 1 + hr = HorizontalRule() + + def spill(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + try: + value = getattr(object, name) + except Exception: + # Some descriptors may meet a failure in their __get__. + # (bug #1785) + push(self.docdata(value, name, mod)) + else: + push(self.document(value, + name, mod, object, homecls)) + return attrs + + def spilldescriptors(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + push(self.docdata(value, name, mod)) + return attrs + + def spilldata(msg, attrs, predicate): + ok, attrs = _split_list(attrs, predicate) + if ok: + hr.maybe() + push(msg) + for name, kind, homecls, value in ok: + doc = getdoc(value) + try: + obj = getattr(object, name) + except AttributeError: + obj = homecls.__dict__[name] + push(self.docother(obj, name, mod, maxlen=70, doc=doc) + + '\n') + return attrs + + attrs = [(name, kind, cls, value) + for name, kind, cls, value in classify_class_attrs(object) + if visiblename(name, obj=object)] + + while attrs: + if mro: + thisclass = mro.popleft() + else: + thisclass = attrs[0][2] + attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) + + if object is not builtins.object and thisclass is builtins.object: + attrs = inherited + continue + elif thisclass is object: + tag = "defined here" + else: + tag = "inherited from %s" % classname(thisclass, + object.__module__) + + sort_attributes(attrs, object) + + # Pump out the attrs, segregated by kind. + attrs = spill("Methods %s:\n" % tag, attrs, + lambda t: t[1] == 'method') + attrs = spill("Class methods %s:\n" % tag, attrs, + lambda t: t[1] == 'class method') + attrs = spill("Static methods %s:\n" % tag, attrs, + lambda t: t[1] == 'static method') + attrs = spilldescriptors("Readonly properties %s:\n" % tag, attrs, + lambda t: t[1] == 'readonly property') + attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs, + lambda t: t[1] == 'data descriptor') + attrs = spilldata("Data and other attributes %s:\n" % tag, attrs, + lambda t: t[1] == 'data') + + assert attrs == [] + attrs = inherited + + contents = '\n'.join(contents) + if not contents: + return title + '\n' + return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n' + + def formatvalue(self, object): + """Format an argument default value as text.""" + return '=' + self.repr(object) + + def docroutine(self, object, name=None, mod=None, cl=None, homecls=None): + """Produce text documentation for a function or method object.""" + realname = object.__name__ + name = name or realname + if homecls is None: + homecls = cl + note = '' + skipdocs = False + imfunc = None + if _is_bound_method(object): + imself = object.__self__ + if imself is cl: + imfunc = getattr(object, '__func__', None) + elif inspect.isclass(imself): + note = ' class method of %s' % classname(imself, mod) + else: + note = ' method of %s instance' % classname( + imself.__class__, mod) + elif (inspect.ismethoddescriptor(object) or + inspect.ismethodwrapper(object)): + try: + objclass = object.__objclass__ + except AttributeError: + pass + else: + if cl is None: + note = ' unbound %s method' % classname(objclass, mod) + elif objclass is not homecls: + note = ' from ' + classname(objclass, mod) + else: + imfunc = object + if inspect.isfunction(imfunc) and homecls is not None and ( + imfunc.__module__ != homecls.__module__ or + imfunc.__qualname__ != homecls.__qualname__ + '.' + realname): + pname = parentname(imfunc, mod) + if pname: + note = ' from %s' % pname + + if (inspect.iscoroutinefunction(object) or + inspect.isasyncgenfunction(object)): + asyncqualifier = 'async ' + else: + asyncqualifier = '' + + if name == realname: + title = self.bold(realname) + else: + if (cl is not None and + inspect.getattr_static(cl, realname, []) is object): + skipdocs = True + if note.startswith(' from '): + note = '' + title = self.bold(name) + ' = ' + realname + argspec = None + + if inspect.isroutine(object): + argspec = _getargspec(object) + if argspec and realname == '': + title = self.bold(name) + ' lambda ' + # XXX lambda's won't usually have func_annotations['return'] + # since the syntax doesn't support but it is possible. + # So removing parentheses isn't truly safe. + if not object.__annotations__: + argspec = argspec[1:-1] + if not argspec: + argspec = '(...)' + decl = asyncqualifier + title + argspec + note + + if skipdocs: + return decl + '\n' + else: + doc = getdoc(object) or '' + return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n') + + def docdata(self, object, name=None, mod=None, cl=None, *ignored): + """Produce text documentation for a data descriptor.""" + results = [] + push = results.append + + if name: + push(self.bold(name)) + push('\n') + doc = getdoc(object) or '' + if doc: + push(self.indent(doc)) + push('\n') + return ''.join(results) + + docproperty = docdata + + def docother(self, object, name=None, mod=None, parent=None, *ignored, + maxlen=None, doc=None): + """Produce text documentation for a data object.""" + repr = self.repr(object) + if maxlen: + line = (name and name + ' = ' or '') + repr + chop = maxlen - len(line) + if chop < 0: repr = repr[:chop] + '...' + line = (name and self.bold(name) + ' = ' or '') + repr + if not doc: + doc = getdoc(object) + if doc: + line += '\n' + self.indent(str(doc)) + '\n' + return line + +class _PlainTextDoc(TextDoc): + """Subclass of TextDoc which overrides string styling""" + def bold(self, text): + return text + +# --------------------------------------------------------- user interfaces + +def pager(text, title=''): + """The first time this is called, determine what kind of pager to use.""" + global pager + pager = get_pager() + pager(text, title) + +def describe(thing): + """Produce a short description of the given thing.""" + if inspect.ismodule(thing): + if thing.__name__ in sys.builtin_module_names: + return 'built-in module ' + thing.__name__ + if hasattr(thing, '__path__'): + return 'package ' + thing.__name__ + else: + return 'module ' + thing.__name__ + if inspect.isbuiltin(thing): + return 'built-in function ' + thing.__name__ + if inspect.isgetsetdescriptor(thing): + return 'getset descriptor %s.%s.%s' % ( + thing.__objclass__.__module__, thing.__objclass__.__name__, + thing.__name__) + if inspect.ismemberdescriptor(thing): + return 'member descriptor %s.%s.%s' % ( + thing.__objclass__.__module__, thing.__objclass__.__name__, + thing.__name__) + if inspect.isclass(thing): + return 'class ' + thing.__name__ + if inspect.isfunction(thing): + return 'function ' + thing.__name__ + if inspect.ismethod(thing): + return 'method ' + thing.__name__ + if inspect.ismethodwrapper(thing): + return 'method wrapper ' + thing.__name__ + if inspect.ismethoddescriptor(thing): + try: + return 'method descriptor ' + thing.__name__ + except AttributeError: + pass + return type(thing).__name__ + +def locate(path, forceload=0): + """Locate an object by name or dotted path, importing as necessary.""" + parts = [part for part in path.split('.') if part] + module, n = None, 0 + while n < len(parts): + nextmodule = safeimport('.'.join(parts[:n+1]), forceload) + if nextmodule: module, n = nextmodule, n + 1 + else: break + if module: + object = module + else: + object = builtins + for part in parts[n:]: + try: + object = getattr(object, part) + except AttributeError: + return None + return object + +# --------------------------------------- interactive interpreter interface + +text = TextDoc() +plaintext = _PlainTextDoc() +html = HTMLDoc() + +def resolve(thing, forceload=0): + """Given an object or a path to an object, get the object and its name.""" + if isinstance(thing, str): + object = locate(thing, forceload) + if object is None: + raise ImportError('''\ +No Python documentation found for %r. +Use help() to get the interactive help utility. +Use help(str) for help on the str class.''' % thing) + return object, thing + else: + name = getattr(thing, '__name__', None) + return thing, name if isinstance(name, str) else None + +def render_doc(thing, title='Python Library Documentation: %s', forceload=0, + renderer=None): + """Render text documentation, given an object or a path to an object.""" + if renderer is None: + renderer = text + object, name = resolve(thing, forceload) + desc = describe(object) + module = inspect.getmodule(object) + if name and '.' in name: + desc += ' in ' + name[:name.rfind('.')] + elif module and module is not object: + desc += ' in module ' + module.__name__ + + if not (inspect.ismodule(object) or + inspect.isclass(object) or + inspect.isroutine(object) or + inspect.isdatadescriptor(object) or + _getdoc(object)): + # If the passed object is a piece of data or an instance, + # document its available methods instead of its value. + if hasattr(object, '__origin__'): + object = object.__origin__ + else: + object = type(object) + desc += ' object' + return title % desc + '\n\n' + renderer.document(object, name) + +def doc(thing, title='Python Library Documentation: %s', forceload=0, + output=None, is_cli=False): + """Display text documentation, given an object or a path to an object.""" + if output is None: + try: + if isinstance(thing, str): + what = thing + else: + what = getattr(thing, '__qualname__', None) + if not isinstance(what, str): + what = getattr(thing, '__name__', None) + if not isinstance(what, str): + what = type(thing).__name__ + ' object' + pager(render_doc(thing, title, forceload), f'Help on {what!s}') + except ImportError as exc: + if is_cli: + raise + print(exc) + else: + try: + s = render_doc(thing, title, forceload, plaintext) + except ImportError as exc: + s = str(exc) + output.write(s) + +def writedoc(thing, forceload=0): + """Write HTML documentation to a file in the current directory.""" + object, name = resolve(thing, forceload) + page = html.page(describe(object), html.document(object, name)) + with open(name + '.html', 'w', encoding='utf-8') as file: + file.write(page) + print('wrote', name + '.html') + +def writedocs(dir, pkgpath='', done=None): + """Write out HTML documentation for all modules in a directory tree.""" + if done is None: done = {} + for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath): + writedoc(modname) + return + + +def _introdoc(): + import textwrap + ver = '%d.%d' % sys.version_info[:2] + if os.environ.get('PYTHON_BASIC_REPL'): + pyrepl_keys = '' + else: + # Additional help for keyboard shortcuts if enhanced REPL is used. + pyrepl_keys = ''' + You can use the following keyboard shortcuts at the main interpreter prompt. + F1: enter interactive help, F2: enter history browsing mode, F3: enter paste + mode (press again to exit). + ''' + return textwrap.dedent(f'''\ + Welcome to Python {ver}'s help utility! If this is your first time using + Python, you should definitely check out the tutorial at + https://docs.python.org/{ver}/tutorial/. + + Enter the name of any module, keyword, or topic to get help on writing + Python programs and using Python modules. To get a list of available + modules, keywords, symbols, or topics, enter "modules", "keywords", + "symbols", or "topics". + {pyrepl_keys} + Each module also comes with a one-line summary of what it does; to list + the modules whose name or summary contain a given string such as "spam", + enter "modules spam". + + To quit this help utility and return to the interpreter, + enter "q", "quit" or "exit". + ''') + +class Helper: + + # These dictionaries map a topic name to either an alias, or a tuple + # (label, seealso-items). The "label" is the label of the corresponding + # section in the .rst file under Doc/ and an index into the dictionary + # in pydoc_data/topics.py. + # + # CAUTION: if you change one of these dictionaries, be sure to adapt the + # list of needed labels in Doc/tools/extensions/pyspecific.py and + # regenerate the pydoc_data/topics.py file by running + # make pydoc-topics + # in Doc/ and copying the output file into the Lib/ directory. + + keywords = { + 'False': '', + 'None': '', + 'True': '', + 'and': 'BOOLEAN', + 'as': 'with', + 'assert': ('assert', ''), + 'async': ('async', ''), + 'await': ('await', ''), + 'break': ('break', 'while for'), + 'class': ('class', 'CLASSES SPECIALMETHODS'), + 'continue': ('continue', 'while for'), + 'def': ('function', ''), + 'del': ('del', 'BASICMETHODS'), + 'elif': 'if', + 'else': ('else', 'while for'), + 'except': 'try', + 'finally': 'try', + 'for': ('for', 'break continue while'), + 'from': 'import', + 'global': ('global', 'nonlocal NAMESPACES'), + 'if': ('if', 'TRUTHVALUE'), + 'import': ('import', 'MODULES'), + 'in': ('in', 'SEQUENCEMETHODS'), + 'is': 'COMPARISON', + 'lambda': ('lambda', 'FUNCTIONS'), + 'nonlocal': ('nonlocal', 'global NAMESPACES'), + 'not': 'BOOLEAN', + 'or': 'BOOLEAN', + 'pass': ('pass', ''), + 'raise': ('raise', 'EXCEPTIONS'), + 'return': ('return', 'FUNCTIONS'), + 'try': ('try', 'EXCEPTIONS'), + 'while': ('while', 'break continue if TRUTHVALUE'), + 'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'), + 'yield': ('yield', ''), + } + # Either add symbols to this dictionary or to the symbols dictionary + # directly: Whichever is easier. They are merged later. + _strprefixes = [p + q for p in ('b', 'f', 'r', 'u') for q in ("'", '"')] + _symbols_inverse = { + 'STRINGS' : ("'", "'''", '"', '"""', *_strprefixes), + 'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&', + '|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'), + 'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'), + 'UNARY' : ('-', '~'), + 'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=', + '^=', '<<=', '>>=', '**=', '//='), + 'BITWISE' : ('<<', '>>', '&', '|', '^', '~'), + 'COMPLEX' : ('j', 'J') + } + symbols = { + '%': 'OPERATORS FORMATTING', + '**': 'POWER', + ',': 'TUPLES LISTS FUNCTIONS', + '.': 'ATTRIBUTES FLOAT MODULES OBJECTS', + '...': 'ELLIPSIS', + ':': 'SLICINGS DICTIONARYLITERALS', + '@': 'def class', + '\\': 'STRINGS', + ':=': 'ASSIGNMENTEXPRESSIONS', + '_': 'PRIVATENAMES', + '__': 'PRIVATENAMES SPECIALMETHODS', + '`': 'BACKQUOTES', + '(': 'TUPLES FUNCTIONS CALLS', + ')': 'TUPLES FUNCTIONS CALLS', + '[': 'LISTS SUBSCRIPTS SLICINGS', + ']': 'LISTS SUBSCRIPTS SLICINGS' + } + for topic, symbols_ in _symbols_inverse.items(): + for symbol in symbols_: + topics = symbols.get(symbol, topic) + if topic not in topics: + topics = topics + ' ' + topic + symbols[symbol] = topics + del topic, symbols_, symbol, topics + + topics = { + 'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS ' + 'FUNCTIONS CLASSES MODULES FILES inspect'), + 'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS ' + 'FORMATTING TYPES'), + 'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'), + 'FORMATTING': ('formatstrings', 'OPERATORS'), + 'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS ' + 'FORMATTING TYPES'), + 'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'), + 'INTEGER': ('integers', 'int range'), + 'FLOAT': ('floating', 'float math'), + 'COMPLEX': ('imaginary', 'complex cmath'), + 'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'), + 'MAPPINGS': 'DICTIONARIES', + 'FUNCTIONS': ('typesfunctions', 'def TYPES'), + 'METHODS': ('typesmethods', 'class def CLASSES TYPES'), + 'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'), + 'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'), + 'FRAMEOBJECTS': 'TYPES', + 'TRACEBACKS': 'TYPES', + 'NONE': ('bltin-null-object', ''), + 'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'), + 'SPECIALATTRIBUTES': ('specialattrs', ''), + 'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'), + 'MODULES': ('typesmodules', 'import'), + 'PACKAGES': 'import', + 'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN ' + 'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER ' + 'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES ' + 'LISTS DICTIONARIES'), + 'OPERATORS': 'EXPRESSIONS', + 'PRECEDENCE': 'EXPRESSIONS', + 'OBJECTS': ('objects', 'TYPES'), + 'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS ' + 'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS ' + 'NUMBERMETHODS CLASSES'), + 'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'), + 'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'), + 'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'), + 'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS ' + 'SPECIALMETHODS'), + 'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'), + 'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT ' + 'SPECIALMETHODS'), + 'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'), + 'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'), + 'DYNAMICFEATURES': ('dynamic-features', ''), + 'SCOPING': 'NAMESPACES', + 'FRAMES': 'NAMESPACES', + 'EXCEPTIONS': ('exceptions', 'try except finally raise'), + 'CONVERSIONS': ('conversions', ''), + 'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'), + 'SPECIALIDENTIFIERS': ('id-classes', ''), + 'PRIVATENAMES': ('atom-identifiers', ''), + 'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS ' + 'LISTLITERALS DICTIONARYLITERALS'), + 'TUPLES': 'SEQUENCES', + 'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'), + 'LISTS': ('typesseq-mutable', 'LISTLITERALS'), + 'LISTLITERALS': ('lists', 'LISTS LITERALS'), + 'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'), + 'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'), + 'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'), + 'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'), + 'SLICINGS': ('slicings', 'SEQUENCEMETHODS'), + 'CALLS': ('calls', 'EXPRESSIONS'), + 'POWER': ('power', 'EXPRESSIONS'), + 'UNARY': ('unary', 'EXPRESSIONS'), + 'BINARY': ('binary', 'EXPRESSIONS'), + 'SHIFTING': ('shifting', 'EXPRESSIONS'), + 'BITWISE': ('bitwise', 'EXPRESSIONS'), + 'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'), + 'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'), + 'ASSERTION': 'assert', + 'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'), + 'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'), + 'ASSIGNMENTEXPRESSIONS': ('assignment-expressions', ''), + 'DELETION': 'del', + 'RETURNING': 'return', + 'IMPORTING': 'import', + 'CONDITIONAL': 'if', + 'LOOPING': ('compound', 'for while break continue'), + 'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'), + 'DEBUGGING': ('debugger', 'pdb'), + 'CONTEXTMANAGERS': ('context-managers', 'with'), + } + + def __init__(self, input=None, output=None): + self._input = input + self._output = output + + @property + def input(self): + return self._input or sys.stdin + + @property + def output(self): + return self._output or sys.stdout + + def __repr__(self): + if inspect.stack()[1][3] == '?': + self() + return '' + return '<%s.%s instance>' % (self.__class__.__module__, + self.__class__.__qualname__) + + _GoInteractive = object() + def __call__(self, request=_GoInteractive): + if request is not self._GoInteractive: + try: + self.help(request) + except ImportError as err: + self.output.write(f'{err}\n') + else: + self.intro() + self.interact() + self.output.write(''' +You are now leaving help and returning to the Python interpreter. +If you want to ask for help on a particular object directly from the +interpreter, you can type "help(object)". Executing "help('string')" +has the same effect as typing a particular string at the help> prompt. +''') + + def interact(self): + self.output.write('\n') + while True: + try: + request = self.getline('help> ') + except (KeyboardInterrupt, EOFError): + break + request = request.strip() + if not request: + continue # back to the prompt + + # Make sure significant trailing quoting marks of literals don't + # get deleted while cleaning input + if (len(request) > 2 and request[0] == request[-1] in ("'", '"') + and request[0] not in request[1:-1]): + request = request[1:-1] + if request.lower() in ('q', 'quit', 'exit'): break + if request == 'help': + self.intro() + else: + self.help(request) + + def getline(self, prompt): + """Read one line, using input() when appropriate.""" + if self.input is sys.stdin: + return input(prompt) + else: + self.output.write(prompt) + self.output.flush() + return self.input.readline() + + def help(self, request, is_cli=False): + if isinstance(request, str): + request = request.strip() + if request == 'keywords': self.listkeywords() + elif request == 'symbols': self.listsymbols() + elif request == 'topics': self.listtopics() + elif request == 'modules': self.listmodules() + elif request[:8] == 'modules ': + self.listmodules(request.split()[1]) + elif request in self.symbols: self.showsymbol(request) + elif request in ['True', 'False', 'None']: + # special case these keywords since they are objects too + doc(eval(request), 'Help on %s:', output=self._output, is_cli=is_cli) + elif request in self.keywords: self.showtopic(request) + elif request in self.topics: self.showtopic(request) + elif request: doc(request, 'Help on %s:', output=self._output, is_cli=is_cli) + else: doc(str, 'Help on %s:', output=self._output, is_cli=is_cli) + elif isinstance(request, Helper): self() + else: doc(request, 'Help on %s:', output=self._output, is_cli=is_cli) + self.output.write('\n') + + def intro(self): + self.output.write(_introdoc()) + + def list(self, items, columns=4, width=80): + items = sorted(items) + colw = width // columns + rows = (len(items) + columns - 1) // columns + for row in range(rows): + for col in range(columns): + i = col * rows + row + if i < len(items): + self.output.write(items[i]) + if col < columns - 1: + self.output.write(' ' + ' ' * (colw - 1 - len(items[i]))) + self.output.write('\n') + + def listkeywords(self): + self.output.write(''' +Here is a list of the Python keywords. Enter any keyword to get more help. + +''') + self.list(self.keywords.keys()) + + def listsymbols(self): + self.output.write(''' +Here is a list of the punctuation symbols which Python assigns special meaning +to. Enter any symbol to get more help. + +''') + self.list(self.symbols.keys()) + + def listtopics(self): + self.output.write(''' +Here is a list of available topics. Enter any topic name to get more help. + +''') + self.list(self.topics.keys(), columns=3) + + def showtopic(self, topic, more_xrefs=''): + try: + import pydoc_data.topics + except ImportError: + self.output.write(''' +Sorry, topic and keyword documentation is not available because the +module "pydoc_data.topics" could not be found. +''') + return + target = self.topics.get(topic, self.keywords.get(topic)) + if not target: + self.output.write('no documentation found for %s\n' % repr(topic)) + return + if isinstance(target, str): + return self.showtopic(target, more_xrefs) + + label, xrefs = target + try: + doc = pydoc_data.topics.topics[label] + except KeyError: + self.output.write('no documentation found for %s\n' % repr(topic)) + return + doc = doc.strip() + '\n' + if more_xrefs: + xrefs = (xrefs or '') + ' ' + more_xrefs + if xrefs: + import textwrap + text = 'Related help topics: ' + ', '.join(xrefs.split()) + '\n' + wrapped_text = textwrap.wrap(text, 72) + doc += '\n%s\n' % '\n'.join(wrapped_text) + + if self._output is None: + pager(doc, f'Help on {topic!s}') + else: + self.output.write(doc) + + def _gettopic(self, topic, more_xrefs=''): + """Return unbuffered tuple of (topic, xrefs). + + If an error occurs here, the exception is caught and displayed by + the url handler. + + This function duplicates the showtopic method but returns its + result directly so it can be formatted for display in an html page. + """ + try: + import pydoc_data.topics + except ImportError: + return(''' +Sorry, topic and keyword documentation is not available because the +module "pydoc_data.topics" could not be found. +''' , '') + target = self.topics.get(topic, self.keywords.get(topic)) + if not target: + raise ValueError('could not find topic') + if isinstance(target, str): + return self._gettopic(target, more_xrefs) + label, xrefs = target + doc = pydoc_data.topics.topics[label] + if more_xrefs: + xrefs = (xrefs or '') + ' ' + more_xrefs + return doc, xrefs + + def showsymbol(self, symbol): + target = self.symbols[symbol] + topic, _, xrefs = target.partition(' ') + self.showtopic(topic, xrefs) + + def listmodules(self, key=''): + if key: + self.output.write(''' +Here is a list of modules whose name or summary contains '{}'. +If there are any, enter a module name to get more help. + +'''.format(key)) + apropos(key) + else: + self.output.write(''' +Please wait a moment while I gather a list of all available modules... + +''') + modules = {} + def callback(path, modname, desc, modules=modules): + if modname and modname[-9:] == '.__init__': + modname = modname[:-9] + ' (package)' + if modname.find('.') < 0: + modules[modname] = 1 + def onerror(modname): + callback(None, modname, None) + ModuleScanner().run(callback, onerror=onerror) + self.list(modules.keys()) + self.output.write(''' +Enter any module name to get more help. Or, type "modules spam" to search +for modules whose name or summary contain the string "spam". +''') + +help = Helper() + +class ModuleScanner: + """An interruptible scanner that searches module synopses.""" + + def run(self, callback, key=None, completer=None, onerror=None): + if key: key = key.lower() + self.quit = False + seen = {} + + for modname in sys.builtin_module_names: + if modname != '__main__': + seen[modname] = 1 + if key is None: + callback(None, modname, '') + else: + name = __import__(modname).__doc__ or '' + desc = name.split('\n')[0] + name = modname + ' - ' + desc + if name.lower().find(key) >= 0: + callback(None, modname, desc) + + for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror): + if self.quit: + break + + if key is None: + callback(None, modname, '') + else: + try: + spec = importer.find_spec(modname) + except SyntaxError: + # raised by tests for bad coding cookies or BOM + continue + loader = spec.loader + if hasattr(loader, 'get_source'): + try: + source = loader.get_source(modname) + except Exception: + if onerror: + onerror(modname) + continue + desc = source_synopsis(io.StringIO(source)) or '' + if hasattr(loader, 'get_filename'): + path = loader.get_filename(modname) + else: + path = None + else: + try: + module = importlib._bootstrap._load(spec) + except ImportError: + if onerror: + onerror(modname) + continue + desc = module.__doc__.splitlines()[0] if module.__doc__ else '' + path = getattr(module,'__file__',None) + name = modname + ' - ' + desc + if name.lower().find(key) >= 0: + callback(path, modname, desc) + + if completer: + completer() + +def apropos(key): + """Print all the one-line module summaries that contain a substring.""" + def callback(path, modname, desc): + if modname[-9:] == '.__init__': + modname = modname[:-9] + ' (package)' + print(modname, desc and '- ' + desc) + def onerror(modname): + pass + with warnings.catch_warnings(): + warnings.filterwarnings('ignore') # ignore problems during import + ModuleScanner().run(callback, key, onerror=onerror) + +# --------------------------------------- enhanced web browser interface + +def _start_server(urlhandler, hostname, port): + """Start an HTTP server thread on a specific port. + + Start an HTML/text server thread, so HTML or text documents can be + browsed dynamically and interactively with a web browser. Example use: + + >>> import time + >>> import pydoc + + Define a URL handler. To determine what the client is asking + for, check the URL and content_type. + + Then get or generate some text or HTML code and return it. + + >>> def my_url_handler(url, content_type): + ... text = 'the URL sent was: (%s, %s)' % (url, content_type) + ... return text + + Start server thread on port 0. + If you use port 0, the server will pick a random port number. + You can then use serverthread.port to get the port number. + + >>> port = 0 + >>> serverthread = pydoc._start_server(my_url_handler, port) + + Check that the server is really started. If it is, open browser + and get first page. Use serverthread.url as the starting page. + + >>> if serverthread.serving: + ... import webbrowser + + The next two lines are commented out so a browser doesn't open if + doctest is run on this module. + + #... webbrowser.open(serverthread.url) + #True + + Let the server do its thing. We just need to monitor its status. + Use time.sleep so the loop doesn't hog the CPU. + + >>> starttime = time.monotonic() + >>> timeout = 1 #seconds + + This is a short timeout for testing purposes. + + >>> while serverthread.serving: + ... time.sleep(.01) + ... if serverthread.serving and time.monotonic() - starttime > timeout: + ... serverthread.stop() + ... break + + Print any errors that may have occurred. + + >>> print(serverthread.error) + None + """ + import http.server + import email.message + import select + import threading + + class DocHandler(http.server.BaseHTTPRequestHandler): + + def do_GET(self): + """Process a request from an HTML browser. + + The URL received is in self.path. + Get an HTML page from self.urlhandler and send it. + """ + if self.path.endswith('.css'): + content_type = 'text/css' + else: + content_type = 'text/html' + self.send_response(200) + self.send_header('Content-Type', '%s; charset=UTF-8' % content_type) + self.end_headers() + self.wfile.write(self.urlhandler( + self.path, content_type).encode('utf-8')) + + def log_message(self, *args): + # Don't log messages. + pass + + class DocServer(http.server.HTTPServer): + + def __init__(self, host, port, callback): + self.host = host + self.address = (self.host, port) + self.callback = callback + self.base.__init__(self, self.address, self.handler) + self.quit = False + + def serve_until_quit(self): + while not self.quit: + rd, wr, ex = select.select([self.socket.fileno()], [], [], 1) + if rd: + self.handle_request() + self.server_close() + + def server_activate(self): + self.base.server_activate(self) + if self.callback: + self.callback(self) + + class ServerThread(threading.Thread): + + def __init__(self, urlhandler, host, port): + self.urlhandler = urlhandler + self.host = host + self.port = int(port) + threading.Thread.__init__(self) + self.serving = False + self.error = None + self.docserver = None + + def run(self): + """Start the server.""" + try: + DocServer.base = http.server.HTTPServer + DocServer.handler = DocHandler + DocHandler.MessageClass = email.message.Message + DocHandler.urlhandler = staticmethod(self.urlhandler) + docsvr = DocServer(self.host, self.port, self.ready) + self.docserver = docsvr + docsvr.serve_until_quit() + except Exception as err: + self.error = err + + def ready(self, server): + self.serving = True + self.host = server.host + self.port = server.server_port + self.url = 'http://%s:%d/' % (self.host, self.port) + + def stop(self): + """Stop the server and this thread nicely""" + self.docserver.quit = True + self.join() + # explicitly break a reference cycle: DocServer.callback + # has indirectly a reference to ServerThread. + self.docserver = None + self.serving = False + self.url = None + + thread = ServerThread(urlhandler, hostname, port) + thread.start() + # Wait until thread.serving is True and thread.docserver is set + # to make sure we are really up before returning. + while not thread.error and not (thread.serving and thread.docserver): + time.sleep(.01) + return thread + + +def _url_handler(url, content_type="text/html"): + """The pydoc url handler for use with the pydoc server. + + If the content_type is 'text/css', the _pydoc.css style + sheet is read and returned if it exits. + + If the content_type is 'text/html', then the result of + get_html_page(url) is returned. + """ + class _HTMLDoc(HTMLDoc): + + def page(self, title, contents): + """Format an HTML page.""" + css_path = "pydoc_data/_pydoc.css" + css_link = ( + '' % + css_path) + return '''\ + + + + +Pydoc: %s +%s%s
%s
+''' % (title, css_link, html_navbar(), contents) + + + html = _HTMLDoc() + + def html_navbar(): + version = html.escape("%s [%s, %s]" % (platform.python_version(), + platform.python_build()[0], + platform.python_compiler())) + return """ +
+ Python %s
%s +
+
+ +
+
+ + +
  +
+ + +
+
+
+ """ % (version, html.escape(platform.platform(terse=True))) + + def html_index(): + """Module Index page.""" + + def bltinlink(name): + return '%s' % (name, name) + + heading = html.heading( + 'Index of Modules' + ) + names = [name for name in sys.builtin_module_names + if name != '__main__'] + contents = html.multicolumn(names, bltinlink) + contents = [heading, '

' + html.bigsection( + 'Built-in Modules', 'index', contents)] + + seen = {} + for dir in sys.path: + contents.append(html.index(dir, seen)) + + contents.append( + '

pydoc by Ka-Ping Yee' + '<ping@lfw.org>

') + return 'Index of Modules', ''.join(contents) + + def html_search(key): + """Search results page.""" + # scan for modules + search_result = [] + + def callback(path, modname, desc): + if modname[-9:] == '.__init__': + modname = modname[:-9] + ' (package)' + search_result.append((modname, desc and '- ' + desc)) + + with warnings.catch_warnings(): + warnings.filterwarnings('ignore') # ignore problems during import + def onerror(modname): + pass + ModuleScanner().run(callback, key, onerror=onerror) + + # format page + def bltinlink(name): + return '%s' % (name, name) + + results = [] + heading = html.heading( + 'Search Results', + ) + for name, desc in search_result: + results.append(bltinlink(name) + desc) + contents = heading + html.bigsection( + 'key = %s' % key, 'index', '
'.join(results)) + return 'Search Results', contents + + def html_topics(): + """Index of topic texts available.""" + + def bltinlink(name): + return '%s' % (name, name) + + heading = html.heading( + 'INDEX', + ) + names = sorted(Helper.topics.keys()) + + contents = html.multicolumn(names, bltinlink) + contents = heading + html.bigsection( + 'Topics', 'index', contents) + return 'Topics', contents + + def html_keywords(): + """Index of keywords.""" + heading = html.heading( + 'INDEX', + ) + names = sorted(Helper.keywords.keys()) + + def bltinlink(name): + return '%s' % (name, name) + + contents = html.multicolumn(names, bltinlink) + contents = heading + html.bigsection( + 'Keywords', 'index', contents) + return 'Keywords', contents + + def html_topicpage(topic): + """Topic or keyword help page.""" + buf = io.StringIO() + htmlhelp = Helper(buf, buf) + contents, xrefs = htmlhelp._gettopic(topic) + if topic in htmlhelp.keywords: + title = 'KEYWORD' + else: + title = 'TOPIC' + heading = html.heading( + '%s' % title, + ) + contents = '
%s
' % html.markup(contents) + contents = html.bigsection(topic , 'index', contents) + if xrefs: + xrefs = sorted(xrefs.split()) + + def bltinlink(name): + return '%s' % (name, name) + + xrefs = html.multicolumn(xrefs, bltinlink) + xrefs = html.section('Related help topics: ', 'index', xrefs) + return ('%s %s' % (title, topic), + ''.join((heading, contents, xrefs))) + + def html_getobj(url): + obj = locate(url, forceload=1) + if obj is None and url != 'None': + raise ValueError('could not find object') + title = describe(obj) + content = html.document(obj, url) + return title, content + + def html_error(url, exc): + heading = html.heading( + 'Error', + ) + contents = '
'.join(html.escape(line) for line in + format_exception_only(type(exc), exc)) + contents = heading + html.bigsection(url, 'error', contents) + return "Error - %s" % url, contents + + def get_html_page(url): + """Generate an HTML page for url.""" + complete_url = url + if url.endswith('.html'): + url = url[:-5] + try: + if url in ("", "index"): + title, content = html_index() + elif url == "topics": + title, content = html_topics() + elif url == "keywords": + title, content = html_keywords() + elif '=' in url: + op, _, url = url.partition('=') + if op == "search?key": + title, content = html_search(url) + elif op == "topic?key": + # try topics first, then objects. + try: + title, content = html_topicpage(url) + except ValueError: + title, content = html_getobj(url) + elif op == "get?key": + # try objects first, then topics. + if url in ("", "index"): + title, content = html_index() + else: + try: + title, content = html_getobj(url) + except ValueError: + title, content = html_topicpage(url) + else: + raise ValueError('bad pydoc url') + else: + title, content = html_getobj(url) + except Exception as exc: + # Catch any errors and display them in an error page. + title, content = html_error(complete_url, exc) + return html.page(title, content) + + if url.startswith('/'): + url = url[1:] + if content_type == 'text/css': + path_here = os.path.dirname(os.path.realpath(__file__)) + css_path = os.path.join(path_here, url) + with open(css_path) as fp: + return ''.join(fp.readlines()) + elif content_type == 'text/html': + return get_html_page(url) + # Errors outside the url handler are caught by the server. + raise TypeError('unknown content type %r for url %s' % (content_type, url)) + + +def browse(port=0, *, open_browser=True, hostname='localhost'): + """Start the enhanced pydoc web server and open a web browser. + + Use port '0' to start the server on an arbitrary port. + Set open_browser to False to suppress opening a browser. + """ + import webbrowser + serverthread = _start_server(_url_handler, hostname, port) + if serverthread.error: + print(serverthread.error) + return + if serverthread.serving: + server_help_msg = 'Server commands: [b]rowser, [q]uit' + if open_browser: + webbrowser.open(serverthread.url) + try: + print('Server ready at', serverthread.url) + print(server_help_msg) + while serverthread.serving: + cmd = input('server> ') + cmd = cmd.lower() + if cmd == 'q': + break + elif cmd == 'b': + webbrowser.open(serverthread.url) + else: + print(server_help_msg) + except (KeyboardInterrupt, EOFError): + print() + finally: + if serverthread.serving: + serverthread.stop() + print('Server stopped') + + +# -------------------------------------------------- command-line interface + +def ispath(x): + return isinstance(x, str) and x.find(os.sep) >= 0 + +def _get_revised_path(given_path, argv0): + """Ensures current directory is on returned path, and argv0 directory is not + + Exception: argv0 dir is left alone if it's also pydoc's directory. + + Returns a new path entry list, or None if no adjustment is needed. + """ + # Scripts may get the current directory in their path by default if they're + # run with the -m switch, or directly from the current directory. + # The interactive prompt also allows imports from the current directory. + + # Accordingly, if the current directory is already present, don't make + # any changes to the given_path + if '' in given_path or os.curdir in given_path or os.getcwd() in given_path: + return None + + # Otherwise, add the current directory to the given path, and remove the + # script directory (as long as the latter isn't also pydoc's directory. + stdlib_dir = os.path.dirname(__file__) + script_dir = os.path.dirname(argv0) + revised_path = given_path.copy() + if script_dir in given_path and not os.path.samefile(script_dir, stdlib_dir): + revised_path.remove(script_dir) + revised_path.insert(0, os.getcwd()) + return revised_path + + +# Note: the tests only cover _get_revised_path, not _adjust_cli_path itself +def _adjust_cli_sys_path(): + """Ensures current directory is on sys.path, and __main__ directory is not. + + Exception: __main__ dir is left alone if it's also pydoc's directory. + """ + revised_path = _get_revised_path(sys.path, sys.argv[0]) + if revised_path is not None: + sys.path[:] = revised_path + + +def cli(): + """Command-line interface (looks at sys.argv to decide what to do).""" + import getopt + class BadUsage(Exception): pass + + _adjust_cli_sys_path() + + try: + opts, args = getopt.getopt(sys.argv[1:], 'bk:n:p:w') + writing = False + start_server = False + open_browser = False + port = 0 + hostname = 'localhost' + for opt, val in opts: + if opt == '-b': + start_server = True + open_browser = True + if opt == '-k': + apropos(val) + return + if opt == '-p': + start_server = True + port = val + if opt == '-w': + writing = True + if opt == '-n': + start_server = True + hostname = val + + if start_server: + browse(port, hostname=hostname, open_browser=open_browser) + return + + if not args: raise BadUsage + for arg in args: + if ispath(arg) and not os.path.exists(arg): + print('file %r does not exist' % arg) + sys.exit(1) + try: + if ispath(arg) and os.path.isfile(arg): + arg = importfile(arg) + if writing: + if ispath(arg) and os.path.isdir(arg): + writedocs(arg) + else: + writedoc(arg) + else: + help.help(arg, is_cli=True) + except (ImportError, ErrorDuringImport) as value: + print(value) + sys.exit(1) + + except (getopt.error, BadUsage): + cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0] + print("""pydoc - the Python documentation tool + +{cmd} ... + Show text documentation on something. may be the name of a + Python keyword, topic, function, module, or package, or a dotted + reference to a class or function within a module or module in a + package. If contains a '{sep}', it is used as the path to a + Python source file to document. If name is 'keywords', 'topics', + or 'modules', a listing of these things is displayed. + +{cmd} -k + Search for a keyword in the synopsis lines of all available modules. + +{cmd} -n + Start an HTTP server with the given hostname (default: localhost). + +{cmd} -p + Start an HTTP server on the given port on the local machine. Port + number 0 can be used to get an arbitrary unused port. + +{cmd} -b + Start an HTTP server on an arbitrary unused port and open a web browser + to interactively browse documentation. This option can be used in + combination with -n and/or -p. + +{cmd} -w ... + Write out the HTML documentation for a module to a file in the current + directory. If contains a '{sep}', it is treated as a filename; if + it names a directory, documentation is written for all the contents. +""".format(cmd=cmd, sep=os.sep)) + +if __name__ == '__main__': + cli() diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__init__.py b/Python314_4_x86_Template/Lib/pydoc_data/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__init__.py rename to Python314_4_x86_Template/Lib/pydoc_data/__init__.py diff --git a/Python313_13_x86_Template/Lib/pydoc_data/_pydoc.css b/Python314_4_x86_Template/Lib/pydoc_data/_pydoc.css similarity index 100% rename from Python313_13_x86_Template/Lib/pydoc_data/_pydoc.css rename to Python314_4_x86_Template/Lib/pydoc_data/_pydoc.css diff --git a/Python314_4_x86_Template/Lib/pydoc_data/module_docs.py b/Python314_4_x86_Template/Lib/pydoc_data/module_docs.py new file mode 100644 index 00000000..d6583783 --- /dev/null +++ b/Python314_4_x86_Template/Lib/pydoc_data/module_docs.py @@ -0,0 +1,320 @@ +# Autogenerated by Sphinx on Tue Apr 7 16:13:12 2026 +# as part of the release process. + +module_docs = { + '__future__': '__future__#module-__future__', + '__main__': '__main__#module-__main__', + '_thread': '_thread#module-_thread', + '_tkinter': 'tkinter#module-_tkinter', + 'abc': 'abc#module-abc', + 'aifc': 'aifc#module-aifc', + 'annotationlib': 'annotationlib#module-annotationlib', + 'argparse': 'argparse#module-argparse', + 'array': 'array#module-array', + 'ast': 'ast#module-ast', + 'asynchat': 'asynchat#module-asynchat', + 'asyncio': 'asyncio#module-asyncio', + 'asyncore': 'asyncore#module-asyncore', + 'atexit': 'atexit#module-atexit', + 'audioop': 'audioop#module-audioop', + 'base64': 'base64#module-base64', + 'bdb': 'bdb#module-bdb', + 'binascii': 'binascii#module-binascii', + 'bisect': 'bisect#module-bisect', + 'builtins': 'builtins#module-builtins', + 'bz2': 'bz2#module-bz2', + 'cProfile': 'profile#module-cProfile', + 'calendar': 'calendar#module-calendar', + 'cgi': 'cgi#module-cgi', + 'cgitb': 'cgitb#module-cgitb', + 'chunk': 'chunk#module-chunk', + 'cmath': 'cmath#module-cmath', + 'cmd': 'cmd#module-cmd', + 'code': 'code#module-code', + 'codecs': 'codecs#module-codecs', + 'codeop': 'codeop#module-codeop', + 'collections': 'collections#module-collections', + 'collections.abc': 'collections.abc#module-collections.abc', + 'colorsys': 'colorsys#module-colorsys', + 'compileall': 'compileall#module-compileall', + 'compression': 'compression#module-compression', + 'compression.zstd': 'compression.zstd#module-compression.zstd', + 'concurrent.futures': 'concurrent.futures#module-concurrent.futures', + 'concurrent.interpreters': 'concurrent.interpreters#module-concurrent.interpreters', + 'configparser': 'configparser#module-configparser', + 'contextlib': 'contextlib#module-contextlib', + 'contextvars': 'contextvars#module-contextvars', + 'copy': 'copy#module-copy', + 'copyreg': 'copyreg#module-copyreg', + 'crypt': 'crypt#module-crypt', + 'csv': 'csv#module-csv', + 'ctypes': 'ctypes#module-ctypes', + 'curses': 'curses#module-curses', + 'curses.ascii': 'curses.ascii#module-curses.ascii', + 'curses.panel': 'curses.panel#module-curses.panel', + 'curses.textpad': 'curses#module-curses.textpad', + 'dataclasses': 'dataclasses#module-dataclasses', + 'datetime': 'datetime#module-datetime', + 'dbm': 'dbm#module-dbm', + 'dbm.dumb': 'dbm#module-dbm.dumb', + 'dbm.gnu': 'dbm#module-dbm.gnu', + 'dbm.ndbm': 'dbm#module-dbm.ndbm', + 'dbm.sqlite3': 'dbm#module-dbm.sqlite3', + 'decimal': 'decimal#module-decimal', + 'difflib': 'difflib#module-difflib', + 'dis': 'dis#module-dis', + 'distutils': 'distutils#module-distutils', + 'doctest': 'doctest#module-doctest', + 'email': 'email#module-email', + 'email.charset': 'email.charset#module-email.charset', + 'email.contentmanager': 'email.contentmanager#module-email.contentmanager', + 'email.encoders': 'email.encoders#module-email.encoders', + 'email.errors': 'email.errors#module-email.errors', + 'email.generator': 'email.generator#module-email.generator', + 'email.header': 'email.header#module-email.header', + 'email.headerregistry': 'email.headerregistry#module-email.headerregistry', + 'email.iterators': 'email.iterators#module-email.iterators', + 'email.message': 'email.message#module-email.message', + 'email.mime': 'email.mime#module-email.mime', + 'email.mime.application': 'email.mime#module-email.mime.application', + 'email.mime.audio': 'email.mime#module-email.mime.audio', + 'email.mime.base': 'email.mime#module-email.mime.base', + 'email.mime.image': 'email.mime#module-email.mime.image', + 'email.mime.message': 'email.mime#module-email.mime.message', + 'email.mime.multipart': 'email.mime#module-email.mime.multipart', + 'email.mime.nonmultipart': 'email.mime#module-email.mime.nonmultipart', + 'email.mime.text': 'email.mime#module-email.mime.text', + 'email.parser': 'email.parser#module-email.parser', + 'email.policy': 'email.policy#module-email.policy', + 'email.utils': 'email.utils#module-email.utils', + 'encodings': 'codecs#module-encodings', + 'encodings.idna': 'codecs#module-encodings.idna', + 'encodings.mbcs': 'codecs#module-encodings.mbcs', + 'encodings.utf_8_sig': 'codecs#module-encodings.utf_8_sig', + 'ensurepip': 'ensurepip#module-ensurepip', + 'enum': 'enum#module-enum', + 'errno': 'errno#module-errno', + 'faulthandler': 'faulthandler#module-faulthandler', + 'fcntl': 'fcntl#module-fcntl', + 'filecmp': 'filecmp#module-filecmp', + 'fileinput': 'fileinput#module-fileinput', + 'fnmatch': 'fnmatch#module-fnmatch', + 'fractions': 'fractions#module-fractions', + 'ftplib': 'ftplib#module-ftplib', + 'functools': 'functools#module-functools', + 'gc': 'gc#module-gc', + 'getopt': 'getopt#module-getopt', + 'getpass': 'getpass#module-getpass', + 'gettext': 'gettext#module-gettext', + 'glob': 'glob#module-glob', + 'graphlib': 'graphlib#module-graphlib', + 'grp': 'grp#module-grp', + 'gzip': 'gzip#module-gzip', + 'hashlib': 'hashlib#module-hashlib', + 'heapq': 'heapq#module-heapq', + 'hmac': 'hmac#module-hmac', + 'html': 'html#module-html', + 'html.entities': 'html.entities#module-html.entities', + 'html.parser': 'html.parser#module-html.parser', + 'http': 'http#module-http', + 'http.client': 'http.client#module-http.client', + 'http.cookiejar': 'http.cookiejar#module-http.cookiejar', + 'http.cookies': 'http.cookies#module-http.cookies', + 'http.server': 'http.server#module-http.server', + 'idlelib': 'idle#module-idlelib', + 'imaplib': 'imaplib#module-imaplib', + 'imghdr': 'imghdr#module-imghdr', + 'imp': 'imp#module-imp', + 'importlib': 'importlib#module-importlib', + 'importlib.abc': 'importlib#module-importlib.abc', + 'importlib.machinery': 'importlib#module-importlib.machinery', + 'importlib.metadata': 'importlib.metadata#module-importlib.metadata', + 'importlib.resources': 'importlib.resources#module-importlib.resources', + 'importlib.resources.abc': 'importlib.resources.abc#module-importlib.resources.abc', + 'importlib.util': 'importlib#module-importlib.util', + 'inspect': 'inspect#module-inspect', + 'io': 'io#module-io', + 'ipaddress': 'ipaddress#module-ipaddress', + 'itertools': 'itertools#module-itertools', + 'json': 'json#module-json', + 'json.tool': 'json#module-json.tool', + 'keyword': 'keyword#module-keyword', + 'linecache': 'linecache#module-linecache', + 'locale': 'locale#module-locale', + 'logging': 'logging#module-logging', + 'logging.config': 'logging.config#module-logging.config', + 'logging.handlers': 'logging.handlers#module-logging.handlers', + 'lzma': 'lzma#module-lzma', + 'mailbox': 'mailbox#module-mailbox', + 'mailcap': 'mailcap#module-mailcap', + 'marshal': 'marshal#module-marshal', + 'math': 'math#module-math', + 'mimetypes': 'mimetypes#module-mimetypes', + 'mmap': 'mmap#module-mmap', + 'modulefinder': 'modulefinder#module-modulefinder', + 'msilib': 'msilib#module-msilib', + 'msvcrt': 'msvcrt#module-msvcrt', + 'multiprocessing': 'multiprocessing#module-multiprocessing', + 'multiprocessing.connection': 'multiprocessing#module-multiprocessing.connection', + 'multiprocessing.dummy': 'multiprocessing#module-multiprocessing.dummy', + 'multiprocessing.managers': 'multiprocessing#module-multiprocessing.managers', + 'multiprocessing.pool': 'multiprocessing#module-multiprocessing.pool', + 'multiprocessing.shared_memory': 'multiprocessing.shared_memory#module-multiprocessing.shared_memory', + 'multiprocessing.sharedctypes': 'multiprocessing#module-multiprocessing.sharedctypes', + 'netrc': 'netrc#module-netrc', + 'nis': 'nis#module-nis', + 'nntplib': 'nntplib#module-nntplib', + 'numbers': 'numbers#module-numbers', + 'operator': 'operator#module-operator', + 'optparse': 'optparse#module-optparse', + 'os': 'os#module-os', + 'os.path': 'os.path#module-os.path', + 'ossaudiodev': 'ossaudiodev#module-ossaudiodev', + 'pathlib': 'pathlib#module-pathlib', + 'pathlib.types': 'pathlib#module-pathlib.types', + 'pdb': 'pdb#module-pdb', + 'pickle': 'pickle#module-pickle', + 'pickletools': 'pickletools#module-pickletools', + 'pipes': 'pipes#module-pipes', + 'pkgutil': 'pkgutil#module-pkgutil', + 'platform': 'platform#module-platform', + 'plistlib': 'plistlib#module-plistlib', + 'poplib': 'poplib#module-poplib', + 'posix': 'posix#module-posix', + 'pprint': 'pprint#module-pprint', + 'profile': 'profile#module-profile', + 'pstats': 'profile#module-pstats', + 'pty': 'pty#module-pty', + 'pwd': 'pwd#module-pwd', + 'py_compile': 'py_compile#module-py_compile', + 'pyclbr': 'pyclbr#module-pyclbr', + 'pydoc': 'pydoc#module-pydoc', + 'queue': 'queue#module-queue', + 'quopri': 'quopri#module-quopri', + 'random': 'random#module-random', + 're': 're#module-re', + 'readline': 'readline#module-readline', + 'reprlib': 'reprlib#module-reprlib', + 'resource': 'resource#module-resource', + 'rlcompleter': 'rlcompleter#module-rlcompleter', + 'runpy': 'runpy#module-runpy', + 'sched': 'sched#module-sched', + 'secrets': 'secrets#module-secrets', + 'select': 'select#module-select', + 'selectors': 'selectors#module-selectors', + 'shelve': 'shelve#module-shelve', + 'shlex': 'shlex#module-shlex', + 'shutil': 'shutil#module-shutil', + 'signal': 'signal#module-signal', + 'site': 'site#module-site', + 'sitecustomize': 'site#module-sitecustomize', + 'smtpd': 'smtpd#module-smtpd', + 'smtplib': 'smtplib#module-smtplib', + 'sndhdr': 'sndhdr#module-sndhdr', + 'socket': 'socket#module-socket', + 'socketserver': 'socketserver#module-socketserver', + 'spwd': 'spwd#module-spwd', + 'sqlite3': 'sqlite3#module-sqlite3', + 'ssl': 'ssl#module-ssl', + 'stat': 'stat#module-stat', + 'statistics': 'statistics#module-statistics', + 'string': 'string#module-string', + 'string.templatelib': 'string.templatelib#module-string.templatelib', + 'stringprep': 'stringprep#module-stringprep', + 'struct': 'struct#module-struct', + 'subprocess': 'subprocess#module-subprocess', + 'sunau': 'sunau#module-sunau', + 'symtable': 'symtable#module-symtable', + 'sys': 'sys#module-sys', + 'sys.monitoring': 'sys.monitoring#module-sys.monitoring', + 'sysconfig': 'sysconfig#module-sysconfig', + 'syslog': 'syslog#module-syslog', + 'tabnanny': 'tabnanny#module-tabnanny', + 'tarfile': 'tarfile#module-tarfile', + 'telnetlib': 'telnetlib#module-telnetlib', + 'tempfile': 'tempfile#module-tempfile', + 'termios': 'termios#module-termios', + 'test': 'test#module-test', + 'test.regrtest': 'test#module-test.regrtest', + 'test.support': 'test#module-test.support', + 'test.support.bytecode_helper': 'test#module-test.support.bytecode_helper', + 'test.support.import_helper': 'test#module-test.support.import_helper', + 'test.support.os_helper': 'test#module-test.support.os_helper', + 'test.support.script_helper': 'test#module-test.support.script_helper', + 'test.support.socket_helper': 'test#module-test.support.socket_helper', + 'test.support.threading_helper': 'test#module-test.support.threading_helper', + 'test.support.warnings_helper': 'test#module-test.support.warnings_helper', + 'textwrap': 'textwrap#module-textwrap', + 'threading': 'threading#module-threading', + 'time': 'time#module-time', + 'timeit': 'timeit#module-timeit', + 'tkinter': 'tkinter#module-tkinter', + 'tkinter.colorchooser': 'tkinter.colorchooser#module-tkinter.colorchooser', + 'tkinter.commondialog': 'dialog#module-tkinter.commondialog', + 'tkinter.dnd': 'tkinter.dnd#module-tkinter.dnd', + 'tkinter.filedialog': 'dialog#module-tkinter.filedialog', + 'tkinter.font': 'tkinter.font#module-tkinter.font', + 'tkinter.messagebox': 'tkinter.messagebox#module-tkinter.messagebox', + 'tkinter.scrolledtext': 'tkinter.scrolledtext#module-tkinter.scrolledtext', + 'tkinter.simpledialog': 'dialog#module-tkinter.simpledialog', + 'tkinter.ttk': 'tkinter.ttk#module-tkinter.ttk', + 'token': 'token#module-token', + 'tokenize': 'tokenize#module-tokenize', + 'tomllib': 'tomllib#module-tomllib', + 'trace': 'trace#module-trace', + 'traceback': 'traceback#module-traceback', + 'tracemalloc': 'tracemalloc#module-tracemalloc', + 'tty': 'tty#module-tty', + 'turtle': 'turtle#module-turtle', + 'turtledemo': 'turtle#module-turtledemo', + 'types': 'types#module-types', + 'typing': 'typing#module-typing', + 'unicodedata': 'unicodedata#module-unicodedata', + 'unittest': 'unittest#module-unittest', + 'unittest.mock': 'unittest.mock#module-unittest.mock', + 'urllib': 'urllib#module-urllib', + 'urllib.error': 'urllib.error#module-urllib.error', + 'urllib.parse': 'urllib.parse#module-urllib.parse', + 'urllib.request': 'urllib.request#module-urllib.request', + 'urllib.response': 'urllib.request#module-urllib.response', + 'urllib.robotparser': 'urllib.robotparser#module-urllib.robotparser', + 'usercustomize': 'site#module-usercustomize', + 'uu': 'uu#module-uu', + 'uuid': 'uuid#module-uuid', + 'venv': 'venv#module-venv', + 'warnings': 'warnings#module-warnings', + 'wave': 'wave#module-wave', + 'weakref': 'weakref#module-weakref', + 'webbrowser': 'webbrowser#module-webbrowser', + 'winreg': 'winreg#module-winreg', + 'winsound': 'winsound#module-winsound', + 'wsgiref': 'wsgiref#module-wsgiref', + 'wsgiref.handlers': 'wsgiref#module-wsgiref.handlers', + 'wsgiref.headers': 'wsgiref#module-wsgiref.headers', + 'wsgiref.simple_server': 'wsgiref#module-wsgiref.simple_server', + 'wsgiref.types': 'wsgiref#module-wsgiref.types', + 'wsgiref.util': 'wsgiref#module-wsgiref.util', + 'wsgiref.validate': 'wsgiref#module-wsgiref.validate', + 'xdrlib': 'xdrlib#module-xdrlib', + 'xml': 'xml#module-xml', + 'xml.dom': 'xml.dom#module-xml.dom', + 'xml.dom.minidom': 'xml.dom.minidom#module-xml.dom.minidom', + 'xml.dom.pulldom': 'xml.dom.pulldom#module-xml.dom.pulldom', + 'xml.etree.ElementInclude': 'xml.etree.elementtree#module-xml.etree.ElementInclude', + 'xml.etree.ElementTree': 'xml.etree.elementtree#module-xml.etree.ElementTree', + 'xml.parsers.expat': 'pyexpat#module-xml.parsers.expat', + 'xml.parsers.expat.errors': 'pyexpat#module-xml.parsers.expat.errors', + 'xml.parsers.expat.model': 'pyexpat#module-xml.parsers.expat.model', + 'xml.sax': 'xml.sax#module-xml.sax', + 'xml.sax.handler': 'xml.sax.handler#module-xml.sax.handler', + 'xml.sax.saxutils': 'xml.sax.utils#module-xml.sax.saxutils', + 'xml.sax.xmlreader': 'xml.sax.reader#module-xml.sax.xmlreader', + 'xmlrpc': 'xmlrpc#module-xmlrpc', + 'xmlrpc.client': 'xmlrpc.client#module-xmlrpc.client', + 'xmlrpc.server': 'xmlrpc.server#module-xmlrpc.server', + 'zipapp': 'zipapp#module-zipapp', + 'zipfile': 'zipfile#module-zipfile', + 'zipimport': 'zipimport#module-zipimport', + 'zlib': 'zlib#module-zlib', + 'zoneinfo': 'zoneinfo#module-zoneinfo', +} diff --git a/Python314_4_x86_Template/Lib/pydoc_data/topics.py b/Python314_4_x86_Template/Lib/pydoc_data/topics.py new file mode 100644 index 00000000..6dca99ce --- /dev/null +++ b/Python314_4_x86_Template/Lib/pydoc_data/topics.py @@ -0,0 +1,14506 @@ +# Autogenerated by Sphinx on Tue Apr 7 16:13:12 2026 +# as part of the release process. + +topics = { + 'assert': r'''The "assert" statement +********************** + +Assert statements are a convenient way to insert debugging assertions +into a program: + + assert_stmt: "assert" expression ["," expression] + +The simple form, "assert expression", is equivalent to + + if __debug__: + if not expression: raise AssertionError + +The extended form, "assert expression1, expression2", is equivalent to + + if __debug__: + if not expression1: raise AssertionError(expression2) + +These equivalences assume that "__debug__" and "AssertionError" refer +to the built-in variables with those names. In the current +implementation, the built-in variable "__debug__" is "True" under +normal circumstances, "False" when optimization is requested (command +line option "-O"). The current code generator emits no code for an +"assert" statement when optimization is requested at compile time. +Note that it is unnecessary to include the source code for the +expression that failed in the error message; it will be displayed as +part of the stack trace. + +Assignments to "__debug__" are illegal. The value for the built-in +variable is determined when the interpreter starts. +''', + 'assignment': r'''Assignment statements +********************* + +Assignment statements are used to (re)bind names to values and to +modify attributes or items of mutable objects: + + assignment_stmt: (target_list "=")+ (starred_expression | yield_expression) + target_list: target ("," target)* [","] + target: identifier + | "(" [target_list] ")" + | "[" [target_list] "]" + | attributeref + | subscription + | "*" target + +(See section Primaries for the syntax definitions for *attributeref* +and *subscription*.) + +An assignment statement evaluates the expression list (remember that +this can be a single expression or a comma-separated list, the latter +yielding a tuple) and assigns the single resulting object to each of +the target lists, from left to right. + +Assignment is defined recursively depending on the form of the target +(list). When a target is part of a mutable object (an attribute +reference or subscription), the mutable object must ultimately perform +the assignment and decide about its validity, and may raise an +exception if the assignment is unacceptable. The rules observed by +various types and the exceptions raised are given with the definition +of the object types (see section The standard type hierarchy). + +Assignment of an object to a target list, optionally enclosed in +parentheses or square brackets, is recursively defined as follows. + +* If the target list is a single target with no trailing comma, + optionally in parentheses, the object is assigned to that target. + +* Else: + + * If the target list contains one target prefixed with an asterisk, + called a “starred” target: The object must be an iterable with at + least as many items as there are targets in the target list, minus + one. The first items of the iterable are assigned, from left to + right, to the targets before the starred target. The final items + of the iterable are assigned to the targets after the starred + target. A list of the remaining items in the iterable is then + assigned to the starred target (the list can be empty). + + * Else: The object must be an iterable with the same number of items + as there are targets in the target list, and the items are + assigned, from left to right, to the corresponding targets. + +Assignment of an object to a single target is recursively defined as +follows. + +* If the target is an identifier (name): + + * If the name does not occur in a "global" or "nonlocal" statement + in the current code block: the name is bound to the object in the + current local namespace. + + * Otherwise: the name is bound to the object in the global namespace + or the outer namespace determined by "nonlocal", respectively. + + The name is rebound if it was already bound. This may cause the + reference count for the object previously bound to the name to reach + zero, causing the object to be deallocated and its destructor (if it + has one) to be called. + +* If the target is an attribute reference: The primary expression in + the reference is evaluated. It should yield an object with + assignable attributes; if this is not the case, "TypeError" is + raised. That object is then asked to assign the assigned object to + the given attribute; if it cannot perform the assignment, it raises + an exception (usually but not necessarily "AttributeError"). + + Note: If the object is a class instance and the attribute reference + occurs on both sides of the assignment operator, the right-hand side + expression, "a.x" can access either an instance attribute or (if no + instance attribute exists) a class attribute. The left-hand side + target "a.x" is always set as an instance attribute, creating it if + necessary. Thus, the two occurrences of "a.x" do not necessarily + refer to the same attribute: if the right-hand side expression + refers to a class attribute, the left-hand side creates a new + instance attribute as the target of the assignment: + + class Cls: + x = 3 # class variable + inst = Cls() + inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3 + + This description does not necessarily apply to descriptor + attributes, such as properties created with "property()". + +* If the target is a subscription: The primary expression in the + reference is evaluated. Next, the subscript expression is evaluated. + Then, the primary’s "__setitem__()" method is called with two + arguments: the subscript and the assigned object. + + Typically, "__setitem__()" is defined on mutable sequence objects + (such as lists) and mapping objects (such as dictionaries), and + behaves as follows. + + If the primary is a mutable sequence object (such as a list), the + subscript must yield an integer. If it is negative, the sequence’s + length is added to it. The resulting value must be a nonnegative + integer less than the sequence’s length, and the sequence is asked + to assign the assigned object to its item with that index. If the + index is out of range, "IndexError" is raised (assignment to a + subscripted sequence cannot add new items to a list). + + If the primary is a mapping object (such as a dictionary), the + subscript must have a type compatible with the mapping’s key type, + and the mapping is then asked to create a key/value pair which maps + the subscript to the assigned object. This can either replace an + existing key/value pair with the same key value, or insert a new + key/value pair (if no key with the same value existed). + + If the target is a slicing: The primary expression should evaluate + to a mutable sequence object (such as a list). The assigned object + should be *iterable*. The slicing’s lower and upper bounds should be + integers; if they are "None" (or not present), the defaults are zero + and the sequence’s length. If either bound is negative, the + sequence’s length is added to it. The resulting bounds are clipped + to lie between zero and the sequence’s length, inclusive. Finally, + the sequence object is asked to replace the slice with the items of + the assigned sequence. The length of the slice may be different + from the length of the assigned sequence, thus changing the length + of the target sequence, if the target sequence allows it. + +Although the definition of assignment implies that overlaps between +the left-hand side and the right-hand side are ‘simultaneous’ (for +example "a, b = b, a" swaps two variables), overlaps *within* the +collection of assigned-to variables occur left-to-right, sometimes +resulting in confusion. For instance, the following program prints +"[0, 2]": + + x = [0, 1] + i = 0 + i, x[i] = 1, 2 # i is updated, then x[i] is updated + print(x) + +See also: + + **PEP 3132** - Extended Iterable Unpacking + The specification for the "*target" feature. + + +Augmented assignment statements +=============================== + +Augmented assignment is the combination, in a single statement, of a +binary operation and an assignment statement: + + augmented_assignment_stmt: augtarget augop (expression_list | yield_expression) + augtarget: identifier | attributeref | subscription + augop: "+=" | "-=" | "*=" | "@=" | "/=" | "//=" | "%=" | "**=" + | ">>=" | "<<=" | "&=" | "^=" | "|=" + +(See section Primaries for the syntax definitions of the last three +symbols.) + +An augmented assignment evaluates the target (which, unlike normal +assignment statements, cannot be an unpacking) and the expression +list, performs the binary operation specific to the type of assignment +on the two operands, and assigns the result to the original target. +The target is only evaluated once. + +An augmented assignment statement like "x += 1" can be rewritten as "x += x + 1" to achieve a similar, but not exactly equal effect. In the +augmented version, "x" is only evaluated once. Also, when possible, +the actual operation is performed *in-place*, meaning that rather than +creating a new object and assigning that to the target, the old object +is modified instead. + +Unlike normal assignments, augmented assignments evaluate the left- +hand side *before* evaluating the right-hand side. For example, "a[i] ++= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs +the addition, and lastly, it writes the result back to "a[i]". + +With the exception of assigning to tuples and multiple targets in a +single statement, the assignment done by augmented assignment +statements is handled the same way as normal assignments. Similarly, +with the exception of the possible *in-place* behavior, the binary +operation performed by augmented assignment is the same as the normal +binary operations. + +For targets which are attribute references, the same caveat about +class and instance attributes applies as for regular assignments. + + +Annotated assignment statements +=============================== + +*Annotation* assignment is the combination, in a single statement, of +a variable or attribute annotation and an optional assignment +statement: + + annotated_assignment_stmt: augtarget ":" expression + ["=" (starred_expression | yield_expression)] + +The difference from normal Assignment statements is that only a single +target is allowed. + +The assignment target is considered “simple” if it consists of a +single name that is not enclosed in parentheses. For simple assignment +targets, if in class or module scope, the annotations are gathered in +a lazily evaluated annotation scope. The annotations can be evaluated +using the "__annotations__" attribute of a class or module, or using +the facilities in the "annotationlib" module. + +If the assignment target is not simple (an attribute, subscript node, +or parenthesized name), the annotation is never evaluated. + +If a name is annotated in a function scope, then this name is local +for that scope. Annotations are never evaluated and stored in function +scopes. + +If the right hand side is present, an annotated assignment performs +the actual assignment as if there was no annotation present. If the +right hand side is not present for an expression target, then the +interpreter evaluates the target except for the last "__setitem__()" +or "__setattr__()" call. + +See also: + + **PEP 526** - Syntax for Variable Annotations + The proposal that added syntax for annotating the types of + variables (including class variables and instance variables), + instead of expressing them through comments. + + **PEP 484** - Type hints + The proposal that added the "typing" module to provide a standard + syntax for type annotations that can be used in static analysis + tools and IDEs. + +Changed in version 3.8: Now annotated assignments allow the same +expressions in the right hand side as regular assignments. Previously, +some expressions (like un-parenthesized tuple expressions) caused a +syntax error. + +Changed in version 3.14: Annotations are now lazily evaluated in a +separate annotation scope. If the assignment target is not simple, +annotations are never evaluated. +''', + 'assignment-expressions': r'''Assignment expressions +********************** + + assignment_expression: [identifier ":="] expression + +An assignment expression (sometimes also called a “named expression” +or “walrus”) assigns an "expression" to an "identifier", while also +returning the value of the "expression". + +One common use case is when handling matched regular expressions: + + if matching := pattern.search(data): + do_something(matching) + +Or, when processing a file stream in chunks: + + while chunk := file.read(9000): + process(chunk) + +Assignment expressions must be surrounded by parentheses when used as +expression statements and when used as sub-expressions in slicing, +conditional, lambda, keyword-argument, and comprehension-if +expressions and in "assert", "with", and "assignment" statements. In +all other places where they can be used, parentheses are not required, +including in "if" and "while" statements. + +Added in version 3.8: See **PEP 572** for more details about +assignment expressions. +''', + 'async': r'''Coroutines +********** + +Added in version 3.5. + + +Coroutine function definition +============================= + + async_funcdef: [decorators] "async" "def" funcname "(" [parameter_list] ")" + ["->" expression] ":" suite + +Execution of Python coroutines can be suspended and resumed at many +points (see *coroutine*). "await" expressions, "async for" and "async +with" can only be used in the body of a coroutine function. + +Functions defined with "async def" syntax are always coroutine +functions, even if they do not contain "await" or "async" keywords. + +It is a "SyntaxError" to use a "yield from" expression inside the body +of a coroutine function. + +An example of a coroutine function: + + async def func(param1, param2): + do_stuff() + await some_coroutine() + +Changed in version 3.7: "await" and "async" are now keywords; +previously they were only treated as such inside the body of a +coroutine function. + + +The "async for" statement +========================= + + async_for_stmt: "async" for_stmt + +An *asynchronous iterable* provides an "__aiter__" method that +directly returns an *asynchronous iterator*, which can call +asynchronous code in its "__anext__" method. + +The "async for" statement allows convenient iteration over +asynchronous iterables. + +The following code: + + async for TARGET in ITER: + SUITE + else: + SUITE2 + +Is semantically equivalent to: + + iter = (ITER).__aiter__() + running = True + + while running: + try: + TARGET = await iter.__anext__() + except StopAsyncIteration: + running = False + else: + SUITE + else: + SUITE2 + +except that implicit special method lookup is used for "__aiter__()" +and "__anext__()". + +It is a "SyntaxError" to use an "async for" statement outside the body +of a coroutine function. + + +The "async with" statement +========================== + + async_with_stmt: "async" with_stmt + +An *asynchronous context manager* is a *context manager* that is able +to suspend execution in its *enter* and *exit* methods. + +The following code: + + async with EXPRESSION as TARGET: + SUITE + +is semantically equivalent to: + + manager = (EXPRESSION) + aenter = manager.__aenter__ + aexit = manager.__aexit__ + value = await aenter() + hit_except = False + + try: + TARGET = value + SUITE + except: + hit_except = True + if not await aexit(*sys.exc_info()): + raise + finally: + if not hit_except: + await aexit(None, None, None) + +except that implicit special method lookup is used for "__aenter__()" +and "__aexit__()". + +It is a "SyntaxError" to use an "async with" statement outside the +body of a coroutine function. + +See also: + + **PEP 492** - Coroutines with async and await syntax + The proposal that made coroutines a proper standalone concept in + Python, and added supporting syntax. +''', + 'atom-identifiers': r'''Identifiers (Names) +******************* + +An identifier occurring as an atom is a name. See section Names +(identifiers and keywords) for lexical definition and section Naming +and binding for documentation of naming and binding. + +When the name is bound to an object, evaluation of the atom yields +that object. When a name is not bound, an attempt to evaluate it +raises a "NameError" exception. + + +Private name mangling +===================== + +When an identifier that textually occurs in a class definition begins +with two or more underscore characters and does not end in two or more +underscores, it is considered a *private name* of that class. + +See also: The class specifications. + +More precisely, private names are transformed to a longer form before +code is generated for them. If the transformed name is longer than +255 characters, implementation-defined truncation may happen. + +The transformation is independent of the syntactical context in which +the identifier is used but only the following private identifiers are +mangled: + +* Any name used as the name of a variable that is assigned or read or + any name of an attribute being accessed. + + The "__name__" attribute of nested functions, classes, and type + aliases is however not mangled. + +* The name of imported modules, e.g., "__spam" in "import __spam". If + the module is part of a package (i.e., its name contains a dot), the + name is *not* mangled, e.g., the "__foo" in "import __foo.bar" is + not mangled. + +* The name of an imported member, e.g., "__f" in "from spam import + __f". + +The transformation rule is defined as follows: + +* The class name, with leading underscores removed and a single + leading underscore inserted, is inserted in front of the identifier, + e.g., the identifier "__spam" occurring in a class named "Foo", + "_Foo" or "__Foo" is transformed to "_Foo__spam". + +* If the class name consists only of underscores, the transformation + is the identity, e.g., the identifier "__spam" occurring in a class + named "_" or "__" is left as is. +''', + 'atom-literals': r'''Literals +******** + +A *literal* is a textual representation of a value. Python supports +numeric, string and bytes literals. Format strings and template +strings are treated as string literals. + +Numeric literals consist of a single "NUMBER" token, which names an +integer, floating-point number, or an imaginary number. See the +Numeric literals section in Lexical analysis documentation for +details. + +String and bytes literals may consist of several tokens. See section +String literal concatenation for details. + +Note that negative and complex numbers, like "-3" or "3+4.2j", are +syntactically not literals, but unary or binary arithmetic operations +involving the "-" or "+" operator. + +Evaluation of a literal yields an object of the given type ("int", +"float", "complex", "str", "bytes", or "Template") with the given +value. The value may be approximated in the case of floating-point and +imaginary literals. + +The formal grammar for literals is: + + literal: strings | NUMBER + + +Literals and object identity +============================ + +All literals correspond to immutable data types, and hence the +object’s identity is less important than its value. Multiple +evaluations of literals with the same value (either the same +occurrence in the program text or a different occurrence) may obtain +the same object or a different object with the same value. + +CPython implementation detail: For example, in CPython, *small* +integers with the same value evaluate to the same object: + + >>> x = 7 + >>> y = 7 + >>> x is y + True + +However, large integers evaluate to different objects: + + >>> x = 123456789 + >>> y = 123456789 + >>> x is y + False + +This behavior may change in future versions of CPython. In particular, +the boundary between “small” and “large” integers has already changed +in the past.CPython will emit a "SyntaxWarning" when you compare +literals using "is": + + >>> x = 7 + >>> x is 7 + :1: SyntaxWarning: "is" with 'int' literal. Did you mean "=="? + True + +See When can I rely on identity tests with the is operator? for more +information. + +Template strings are immutable but may reference mutable objects as +"Interpolation" values. For the purposes of this section, two +t-strings have the “same value” if both their structure and the +*identity* of the values match. + +**CPython implementation detail:** Currently, each evaluation of a +template string results in a different object. + + +String literal concatenation +============================ + +Multiple adjacent string or bytes literals, possibly using different +quoting conventions, are allowed, and their meaning is the same as +their concatenation: + + >>> "hello" 'world' + "helloworld" + +This feature is defined at the syntactical level, so it only works +with literals. To concatenate string expressions at run time, the ‘+’ +operator may be used: + + >>> greeting = "Hello" + >>> space = " " + >>> name = "Blaise" + >>> print(greeting + space + name) # not: print(greeting space name) + Hello Blaise + +Literal concatenation can freely mix raw strings, triple-quoted +strings, and formatted string literals. For example: + + >>> "Hello" r', ' f"{name}!" + "Hello, Blaise!" + +This feature can be used to reduce the number of backslashes needed, +to split long strings conveniently across long lines, or even to add +comments to parts of strings. For example: + + re.compile("[A-Za-z_]" # letter or underscore + "[A-Za-z0-9_]*" # letter, digit or underscore + ) + +However, bytes literals may only be combined with other byte literals; +not with string literals of any kind. Also, template string literals +may only be combined with other template string literals: + + >>> t"Hello" t"{name}!" + Template(strings=('Hello', '!'), interpolations=(...)) + +Formally: + + strings: (STRING | fstring)+ | tstring+ +''', + 'attribute-access': r'''Customizing attribute access +**************************** + +The following methods can be defined to customize the meaning of +attribute access (use of, assignment to, or deletion of "x.name") for +class instances. + +object.__getattr__(self, name) + + Called when the default attribute access fails with an + "AttributeError" (either "__getattribute__()" raises an + "AttributeError" because *name* is not an instance attribute or an + attribute in the class tree for "self"; or "__get__()" of a *name* + property raises "AttributeError"). This method should either + return the (computed) attribute value or raise an "AttributeError" + exception. The "object" class itself does not provide this method. + + Note that if the attribute is found through the normal mechanism, + "__getattr__()" is not called. (This is an intentional asymmetry + between "__getattr__()" and "__setattr__()".) This is done both for + efficiency reasons and because otherwise "__getattr__()" would have + no way to access other attributes of the instance. Note that at + least for instance variables, you can take total control by not + inserting any values in the instance attribute dictionary (but + instead inserting them in another object). See the + "__getattribute__()" method below for a way to actually get total + control over attribute access. + +object.__getattribute__(self, name) + + Called unconditionally to implement attribute accesses for + instances of the class. If the class also defines "__getattr__()", + the latter will not be called unless "__getattribute__()" either + calls it explicitly or raises an "AttributeError". This method + should return the (computed) attribute value or raise an + "AttributeError" exception. In order to avoid infinite recursion in + this method, its implementation should always call the base class + method with the same name to access any attributes it needs, for + example, "object.__getattribute__(self, name)". + + Note: + + This method may still be bypassed when looking up special methods + as the result of implicit invocation via language syntax or + built-in functions. See Special method lookup. + + For certain sensitive attribute accesses, raises an auditing event + "object.__getattr__" with arguments "obj" and "name". + +object.__setattr__(self, name, value) + + Called when an attribute assignment is attempted. This is called + instead of the normal mechanism (i.e. store the value in the + instance dictionary). *name* is the attribute name, *value* is the + value to be assigned to it. + + If "__setattr__()" wants to assign to an instance attribute, it + should call the base class method with the same name, for example, + "object.__setattr__(self, name, value)". + + For certain sensitive attribute assignments, raises an auditing + event "object.__setattr__" with arguments "obj", "name", "value". + +object.__delattr__(self, name) + + Like "__setattr__()" but for attribute deletion instead of + assignment. This should only be implemented if "del obj.name" is + meaningful for the object. + + For certain sensitive attribute deletions, raises an auditing event + "object.__delattr__" with arguments "obj" and "name". + +object.__dir__(self) + + Called when "dir()" is called on the object. An iterable must be + returned. "dir()" converts the returned iterable to a list and + sorts it. + + +Customizing module attribute access +=================================== + +module.__getattr__() +module.__dir__() + +Special names "__getattr__" and "__dir__" can be also used to +customize access to module attributes. The "__getattr__" function at +the module level should accept one argument which is the name of an +attribute and return the computed value or raise an "AttributeError". +If an attribute is not found on a module object through the normal +lookup, i.e. "object.__getattribute__()", then "__getattr__" is +searched in the module "__dict__" before raising an "AttributeError". +If found, it is called with the attribute name and the result is +returned. + +The "__dir__" function should accept no arguments, and return an +iterable of strings that represents the names accessible on module. If +present, this function overrides the standard "dir()" search on a +module. + +module.__class__ + +For a more fine grained customization of the module behavior (setting +attributes, properties, etc.), one can set the "__class__" attribute +of a module object to a subclass of "types.ModuleType". For example: + + import sys + from types import ModuleType + + class VerboseModule(ModuleType): + def __repr__(self): + return f'Verbose {self.__name__}' + + def __setattr__(self, attr, value): + print(f'Setting {attr}...') + super().__setattr__(attr, value) + + sys.modules[__name__].__class__ = VerboseModule + +Note: + + Defining module "__getattr__" and setting module "__class__" only + affect lookups made using the attribute access syntax – directly + accessing the module globals (whether by code within the module, or + via a reference to the module’s globals dictionary) is unaffected. + +Changed in version 3.5: "__class__" module attribute is now writable. + +Added in version 3.7: "__getattr__" and "__dir__" module attributes. + +See also: + + **PEP 562** - Module __getattr__ and __dir__ + Describes the "__getattr__" and "__dir__" functions on modules. + + +Implementing Descriptors +======================== + +The following methods only apply when an instance of the class +containing the method (a so-called *descriptor* class) appears in an +*owner* class (the descriptor must be in either the owner’s class +dictionary or in the class dictionary for one of its parents). In the +examples below, “the attribute” refers to the attribute whose name is +the key of the property in the owner class’ "__dict__". The "object" +class itself does not implement any of these protocols. + +object.__get__(self, instance, owner=None) + + Called to get the attribute of the owner class (class attribute + access) or of an instance of that class (instance attribute + access). The optional *owner* argument is the owner class, while + *instance* is the instance that the attribute was accessed through, + or "None" when the attribute is accessed through the *owner*. + + This method should return the computed attribute value or raise an + "AttributeError" exception. + + **PEP 252** specifies that "__get__()" is callable with one or two + arguments. Python’s own built-in descriptors support this + specification; however, it is likely that some third-party tools + have descriptors that require both arguments. Python’s own + "__getattribute__()" implementation always passes in both arguments + whether they are required or not. + +object.__set__(self, instance, value) + + Called to set the attribute on an instance *instance* of the owner + class to a new value, *value*. + + Note, adding "__set__()" or "__delete__()" changes the kind of + descriptor to a “data descriptor”. See Invoking Descriptors for + more details. + +object.__delete__(self, instance) + + Called to delete the attribute on an instance *instance* of the + owner class. + +Instances of descriptors may also have the "__objclass__" attribute +present: + +object.__objclass__ + + The attribute "__objclass__" is interpreted by the "inspect" module + as specifying the class where this object was defined (setting this + appropriately can assist in runtime introspection of dynamic class + attributes). For callables, it may indicate that an instance of the + given type (or a subclass) is expected or required as the first + positional argument (for example, CPython sets this attribute for + unbound methods that are implemented in C). + + +Invoking Descriptors +==================== + +In general, a descriptor is an object attribute with “binding +behavior”, one whose attribute access has been overridden by methods +in the descriptor protocol: "__get__()", "__set__()", and +"__delete__()". If any of those methods are defined for an object, it +is said to be a descriptor. + +The default behavior for attribute access is to get, set, or delete +the attribute from an object’s dictionary. For instance, "a.x" has a +lookup chain starting with "a.__dict__['x']", then +"type(a).__dict__['x']", and continuing through the base classes of +"type(a)" excluding metaclasses. + +However, if the looked-up value is an object defining one of the +descriptor methods, then Python may override the default behavior and +invoke the descriptor method instead. Where this occurs in the +precedence chain depends on which descriptor methods were defined and +how they were called. + +The starting point for descriptor invocation is a binding, "a.x". How +the arguments are assembled depends on "a": + +Direct Call + The simplest and least common call is when user code directly + invokes a descriptor method: "x.__get__(a)". + +Instance Binding + If binding to an object instance, "a.x" is transformed into the + call: "type(a).__dict__['x'].__get__(a, type(a))". + +Class Binding + If binding to a class, "A.x" is transformed into the call: + "A.__dict__['x'].__get__(None, A)". + +Super Binding + A dotted lookup such as "super(A, a).x" searches + "a.__class__.__mro__" for a base class "B" following "A" and then + returns "B.__dict__['x'].__get__(a, A)". If not a descriptor, "x" + is returned unchanged. + +For instance bindings, the precedence of descriptor invocation depends +on which descriptor methods are defined. A descriptor can define any +combination of "__get__()", "__set__()" and "__delete__()". If it +does not define "__get__()", then accessing the attribute will return +the descriptor object itself unless there is a value in the object’s +instance dictionary. If the descriptor defines "__set__()" and/or +"__delete__()", it is a data descriptor; if it defines neither, it is +a non-data descriptor. Normally, data descriptors define both +"__get__()" and "__set__()", while non-data descriptors have just the +"__get__()" method. Data descriptors with "__get__()" and "__set__()" +(and/or "__delete__()") defined always override a redefinition in an +instance dictionary. In contrast, non-data descriptors can be +overridden by instances. + +Python methods (including those decorated with "@staticmethod" and +"@classmethod") are implemented as non-data descriptors. Accordingly, +instances can redefine and override methods. This allows individual +instances to acquire behaviors that differ from other instances of the +same class. + +The "property()" function is implemented as a data descriptor. +Accordingly, instances cannot override the behavior of a property. + + +__slots__ +========= + +*__slots__* allow us to explicitly declare data members (like +properties) and deny the creation of "__dict__" and *__weakref__* +(unless explicitly declared in *__slots__* or available in a parent.) + +The space saved over using "__dict__" can be significant. Attribute +lookup speed can be significantly improved as well. + +object.__slots__ + + This class variable can be assigned a string, iterable, or sequence + of strings with variable names used by instances. *__slots__* + reserves space for the declared variables and prevents the + automatic creation of "__dict__" and *__weakref__* for each + instance. + +Notes on using *__slots__*: + +* When inheriting from a class without *__slots__*, the "__dict__" and + *__weakref__* attribute of the instances will always be accessible. + +* Without a "__dict__" variable, instances cannot be assigned new + variables not listed in the *__slots__* definition. Attempts to + assign to an unlisted variable name raises "AttributeError". If + dynamic assignment of new variables is desired, then add + "'__dict__'" to the sequence of strings in the *__slots__* + declaration. + +* Without a *__weakref__* variable for each instance, classes defining + *__slots__* do not support "weak references" to its instances. If + weak reference support is needed, then add "'__weakref__'" to the + sequence of strings in the *__slots__* declaration. + +* *__slots__* are implemented at the class level by creating + descriptors for each variable name. As a result, class attributes + cannot be used to set default values for instance variables defined + by *__slots__*; otherwise, the class attribute would overwrite the + descriptor assignment. + +* The action of a *__slots__* declaration is not limited to the class + where it is defined. *__slots__* declared in parents are available + in child classes. However, instances of a child subclass will get a + "__dict__" and *__weakref__* unless the subclass also defines + *__slots__* (which should only contain names of any *additional* + slots). + +* If a class defines a slot also defined in a base class, the instance + variable defined by the base class slot is inaccessible (except by + retrieving its descriptor directly from the base class). This + renders the meaning of the program undefined. In the future, a + check may be added to prevent this. + +* "TypeError" will be raised if nonempty *__slots__* are defined for a + class derived from a ""variable-length" built-in type" such as + "int", "bytes", and "tuple". + +* Any non-string *iterable* may be assigned to *__slots__*. + +* If a "dictionary" is used to assign *__slots__*, the dictionary keys + will be used as the slot names. The values of the dictionary can be + used to provide per-attribute docstrings that will be recognised by + "inspect.getdoc()" and displayed in the output of "help()". + +* "__class__" assignment works only if both classes have the same + *__slots__*. + +* Multiple inheritance with multiple slotted parent classes can be + used, but only one parent is allowed to have attributes created by + slots (the other bases must have empty slot layouts) - violations + raise "TypeError". + +* If an *iterator* is used for *__slots__* then a *descriptor* is + created for each of the iterator’s values. However, the *__slots__* + attribute will be an empty iterator. +''', + 'attribute-references': r'''Attribute references +******************** + +An attribute reference is a primary followed by a period and a name: + + attributeref: primary "." identifier + +The primary must evaluate to an object of a type that supports +attribute references, which most objects do. This object is then +asked to produce the attribute whose name is the identifier. The type +and value produced is determined by the object. Multiple evaluations +of the same attribute reference may yield different objects. + +This production can be customized by overriding the +"__getattribute__()" method or the "__getattr__()" method. The +"__getattribute__()" method is called first and either returns a value +or raises "AttributeError" if the attribute is not available. + +If an "AttributeError" is raised and the object has a "__getattr__()" +method, that method is called as a fallback. +''', + 'augassign': r'''Augmented assignment statements +******************************* + +Augmented assignment is the combination, in a single statement, of a +binary operation and an assignment statement: + + augmented_assignment_stmt: augtarget augop (expression_list | yield_expression) + augtarget: identifier | attributeref | subscription + augop: "+=" | "-=" | "*=" | "@=" | "/=" | "//=" | "%=" | "**=" + | ">>=" | "<<=" | "&=" | "^=" | "|=" + +(See section Primaries for the syntax definitions of the last three +symbols.) + +An augmented assignment evaluates the target (which, unlike normal +assignment statements, cannot be an unpacking) and the expression +list, performs the binary operation specific to the type of assignment +on the two operands, and assigns the result to the original target. +The target is only evaluated once. + +An augmented assignment statement like "x += 1" can be rewritten as "x += x + 1" to achieve a similar, but not exactly equal effect. In the +augmented version, "x" is only evaluated once. Also, when possible, +the actual operation is performed *in-place*, meaning that rather than +creating a new object and assigning that to the target, the old object +is modified instead. + +Unlike normal assignments, augmented assignments evaluate the left- +hand side *before* evaluating the right-hand side. For example, "a[i] ++= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs +the addition, and lastly, it writes the result back to "a[i]". + +With the exception of assigning to tuples and multiple targets in a +single statement, the assignment done by augmented assignment +statements is handled the same way as normal assignments. Similarly, +with the exception of the possible *in-place* behavior, the binary +operation performed by augmented assignment is the same as the normal +binary operations. + +For targets which are attribute references, the same caveat about +class and instance attributes applies as for regular assignments. +''', + 'await': r'''Await expression +**************** + +Suspend the execution of *coroutine* on an *awaitable* object. Can +only be used inside a *coroutine function*. + + await_expr: "await" primary + +Added in version 3.5. +''', + 'binary': r'''Binary arithmetic operations +**************************** + +The binary arithmetic operations have the conventional priority +levels. Note that some of these operations also apply to certain non- +numeric types. Apart from the power operator, there are only two +levels, one for multiplicative operators and one for additive +operators: + + m_expr: u_expr | m_expr "*" u_expr | m_expr "@" m_expr | + m_expr "//" u_expr | m_expr "/" u_expr | + m_expr "%" u_expr + a_expr: m_expr | a_expr "+" m_expr | a_expr "-" m_expr + +The "*" (multiplication) operator yields the product of its arguments. +The arguments must either both be numbers, or one argument must be an +integer and the other must be a sequence. In the former case, the +numbers are converted to a common real type and then multiplied +together. In the latter case, sequence repetition is performed; a +negative repetition factor yields an empty sequence. + +This operation can be customized using the special "__mul__()" and +"__rmul__()" methods. + +Changed in version 3.14: If only one operand is a complex number, the +other operand is converted to a floating-point number. + +The "@" (at) operator is intended to be used for matrix +multiplication. No builtin Python types implement this operator. + +This operation can be customized using the special "__matmul__()" and +"__rmatmul__()" methods. + +Added in version 3.5. + +The "/" (division) and "//" (floor division) operators yield the +quotient of their arguments. The numeric arguments are first +converted to a common type. Division of integers yields a float, while +floor division of integers results in an integer; the result is that +of mathematical division with the ‘floor’ function applied to the +result. Division by zero raises the "ZeroDivisionError" exception. + +The division operation can be customized using the special +"__truediv__()" and "__rtruediv__()" methods. The floor division +operation can be customized using the special "__floordiv__()" and +"__rfloordiv__()" methods. + +The "%" (modulo) operator yields the remainder from the division of +the first argument by the second. The numeric arguments are first +converted to a common type. A zero right argument raises the +"ZeroDivisionError" exception. The arguments may be floating-point +numbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 + +0.34".) The modulo operator always yields a result with the same sign +as its second operand (or zero); the absolute value of the result is +strictly smaller than the absolute value of the second operand [1]. + +The floor division and modulo operators are connected by the following +identity: "x == (x//y)*y + (x%y)". Floor division and modulo are also +connected with the built-in function "divmod()": "divmod(x, y) == +(x//y, x%y)". [2]. + +In addition to performing the modulo operation on numbers, the "%" +operator is also overloaded by string objects to perform old-style +string formatting (also known as interpolation). The syntax for +string formatting is described in the Python Library Reference, +section printf-style String Formatting. + +The *modulo* operation can be customized using the special "__mod__()" +and "__rmod__()" methods. + +The floor division operator, the modulo operator, and the "divmod()" +function are not defined for complex numbers. Instead, convert to a +floating-point number using the "abs()" function if appropriate. + +The "+" (addition) operator yields the sum of its arguments. The +arguments must either both be numbers or both be sequences of the same +type. In the former case, the numbers are converted to a common real +type and then added together. In the latter case, the sequences are +concatenated. + +This operation can be customized using the special "__add__()" and +"__radd__()" methods. + +Changed in version 3.14: If only one operand is a complex number, the +other operand is converted to a floating-point number. + +The "-" (subtraction) operator yields the difference of its arguments. +The numeric arguments are first converted to a common real type. + +This operation can be customized using the special "__sub__()" and +"__rsub__()" methods. + +Changed in version 3.14: If only one operand is a complex number, the +other operand is converted to a floating-point number. +''', + 'bitwise': r'''Binary bitwise operations +************************* + +Each of the three bitwise operations has a different priority level: + + and_expr: shift_expr | and_expr "&" shift_expr + xor_expr: and_expr | xor_expr "^" and_expr + or_expr: xor_expr | or_expr "|" xor_expr + +The "&" operator yields the bitwise AND of its arguments, which must +be integers or one of them must be a custom object overriding +"__and__()" or "__rand__()" special methods. + +The "^" operator yields the bitwise XOR (exclusive OR) of its +arguments, which must be integers or one of them must be a custom +object overriding "__xor__()" or "__rxor__()" special methods. + +The "|" operator yields the bitwise (inclusive) OR of its arguments, +which must be integers or one of them must be a custom object +overriding "__or__()" or "__ror__()" special methods. +''', + 'bltin-code-objects': r'''Code Objects +************ + +Code objects are used by the implementation to represent “pseudo- +compiled” executable Python code such as a function body. They differ +from function objects because they don’t contain a reference to their +global execution environment. Code objects are returned by the built- +in "compile()" function and can be extracted from function objects +through their "__code__" attribute. See also the "code" module. + +Accessing "__code__" raises an auditing event "object.__getattr__" +with arguments "obj" and ""__code__"". + +A code object can be executed or evaluated by passing it (instead of a +source string) to the "exec()" or "eval()" built-in functions. + +See The standard type hierarchy for more information. +''', + 'bltin-ellipsis-object': r'''The Ellipsis Object +******************* + +This object is commonly used to indicate that something is omitted. It +supports no special operations. There is exactly one ellipsis object, +named "Ellipsis" (a built-in name). "type(Ellipsis)()" produces the +"Ellipsis" singleton. + +It is written as "Ellipsis" or "...". + +In typical use, "..." as the "Ellipsis" object appears in a few +different places, for instance: + +* In type annotations, such as callable arguments or tuple elements. + +* As the body of a function instead of a pass statement. + +* In third-party libraries, such as Numpy’s slicing and striding. + +Python also uses three dots in ways that are not "Ellipsis" objects, +for instance: + +* Doctest’s "ELLIPSIS", as a pattern for missing content. + +* The default Python prompt of the *interactive* shell when partial + input is incomplete. + +Lastly, the Python documentation often uses three dots in conventional +English usage to mean omitted content, even in code examples that also +use them as the "Ellipsis". +''', + 'bltin-null-object': r'''The Null Object +*************** + +This object is returned by functions that don’t explicitly return a +value. It supports no special operations. There is exactly one null +object, named "None" (a built-in name). "type(None)()" produces the +same singleton. + +It is written as "None". +''', + 'bltin-type-objects': r'''Type Objects +************ + +Type objects represent the various object types. An object’s type is +accessed by the built-in function "type()". There are no special +operations on types. The standard module "types" defines names for +all standard built-in types. + +Types are written like this: "". +''', + 'booleans': r'''Boolean operations +****************** + + or_test: and_test | or_test "or" and_test + and_test: not_test | and_test "and" not_test + not_test: comparison | "not" not_test + +In the context of Boolean operations, and also when expressions are +used by control flow statements, the following values are interpreted +as false: "False", "None", numeric zero of all types, and empty +strings and containers (including strings, tuples, lists, +dictionaries, sets and frozensets). All other values are interpreted +as true. User-defined objects can customize their truth value by +providing a "__bool__()" method. + +The operator "not" yields "True" if its argument is false, "False" +otherwise. + +The expression "x and y" first evaluates *x*; if *x* is false, its +value is returned; otherwise, *y* is evaluated and the resulting value +is returned. + +The expression "x or y" first evaluates *x*; if *x* is true, its value +is returned; otherwise, *y* is evaluated and the resulting value is +returned. + +Note that neither "and" nor "or" restrict the value and type they +return to "False" and "True", but rather return the last evaluated +argument. This is sometimes useful, e.g., if "s" is a string that +should be replaced by a default value if it is empty, the expression +"s or 'foo'" yields the desired value. Because "not" has to create a +new value, it returns a boolean value regardless of the type of its +argument (for example, "not 'foo'" produces "False" rather than "''".) +''', + 'break': r'''The "break" statement +********************* + + break_stmt: "break" + +"break" may only occur syntactically nested in a "for" or "while" +loop, but not nested in a function or class definition within that +loop. + +It terminates the nearest enclosing loop, skipping the optional "else" +clause if the loop has one. + +If a "for" loop is terminated by "break", the loop control target +keeps its current value. + +When "break" passes control out of a "try" statement with a "finally" +clause, that "finally" clause is executed before really leaving the +loop. +''', + 'callable-types': r'''Emulating callable objects +************************** + +object.__call__(self[, args...]) + + Called when the instance is “called” as a function; if this method + is defined, "x(arg1, arg2, ...)" roughly translates to + "type(x).__call__(x, arg1, ...)". The "object" class itself does + not provide this method. +''', + 'calls': r'''Calls +***** + +A call calls a callable object (e.g., a *function*) with a possibly +empty series of *arguments*: + + call: primary "(" [argument_list [","] | comprehension] ")" + argument_list: positional_arguments ["," starred_and_keywords] + ["," keywords_arguments] + | starred_and_keywords ["," keywords_arguments] + | keywords_arguments + positional_arguments: positional_item ("," positional_item)* + positional_item: assignment_expression | "*" expression + starred_and_keywords: ("*" expression | keyword_item) + ("," "*" expression | "," keyword_item)* + keywords_arguments: (keyword_item | "**" expression) + ("," keyword_item | "," "**" expression)* + keyword_item: identifier "=" expression + +An optional trailing comma may be present after the positional and +keyword arguments but does not affect the semantics. + +The primary must evaluate to a callable object (user-defined +functions, built-in functions, methods of built-in objects, class +objects, methods of class instances, and all objects having a +"__call__()" method are callable). All argument expressions are +evaluated before the call is attempted. Please refer to section +Function definitions for the syntax of formal *parameter* lists. + +If keyword arguments are present, they are first converted to +positional arguments, as follows. First, a list of unfilled slots is +created for the formal parameters. If there are N positional +arguments, they are placed in the first N slots. Next, for each +keyword argument, the identifier is used to determine the +corresponding slot (if the identifier is the same as the first formal +parameter name, the first slot is used, and so on). If the slot is +already filled, a "TypeError" exception is raised. Otherwise, the +argument is placed in the slot, filling it (even if the expression is +"None", it fills the slot). When all arguments have been processed, +the slots that are still unfilled are filled with the corresponding +default value from the function definition. (Default values are +calculated, once, when the function is defined; thus, a mutable object +such as a list or dictionary used as default value will be shared by +all calls that don’t specify an argument value for the corresponding +slot; this should usually be avoided.) If there are any unfilled +slots for which no default value is specified, a "TypeError" exception +is raised. Otherwise, the list of filled slots is used as the +argument list for the call. + +**CPython implementation detail:** An implementation may provide +built-in functions whose positional parameters do not have names, even +if they are ‘named’ for the purpose of documentation, and which +therefore cannot be supplied by keyword. In CPython, this is the case +for functions implemented in C that use "PyArg_ParseTuple()" to parse +their arguments. + +If there are more positional arguments than there are formal parameter +slots, a "TypeError" exception is raised, unless a formal parameter +using the syntax "*identifier" is present; in this case, that formal +parameter receives a tuple containing the excess positional arguments +(or an empty tuple if there were no excess positional arguments). + +If any keyword argument does not correspond to a formal parameter +name, a "TypeError" exception is raised, unless a formal parameter +using the syntax "**identifier" is present; in this case, that formal +parameter receives a dictionary containing the excess keyword +arguments (using the keywords as keys and the argument values as +corresponding values), or a (new) empty dictionary if there were no +excess keyword arguments. + +If the syntax "*expression" appears in the function call, "expression" +must evaluate to an *iterable*. Elements from these iterables are +treated as if they were additional positional arguments. For the call +"f(x1, x2, *y, x3, x4)", if *y* evaluates to a sequence *y1*, …, *yM*, +this is equivalent to a call with M+4 positional arguments *x1*, *x2*, +*y1*, …, *yM*, *x3*, *x4*. + +A consequence of this is that although the "*expression" syntax may +appear *after* explicit keyword arguments, it is processed *before* +the keyword arguments (and any "**expression" arguments – see below). +So: + + >>> def f(a, b): + ... print(a, b) + ... + >>> f(b=1, *(2,)) + 2 1 + >>> f(a=1, *(2,)) + Traceback (most recent call last): + File "", line 1, in + TypeError: f() got multiple values for keyword argument 'a' + >>> f(1, *(2,)) + 1 2 + +It is unusual for both keyword arguments and the "*expression" syntax +to be used in the same call, so in practice this confusion does not +often arise. + +If the syntax "**expression" appears in the function call, +"expression" must evaluate to a *mapping*, the contents of which are +treated as additional keyword arguments. If a parameter matching a key +has already been given a value (by an explicit keyword argument, or +from another unpacking), a "TypeError" exception is raised. + +When "**expression" is used, each key in this mapping must be a +string. Each value from the mapping is assigned to the first formal +parameter eligible for keyword assignment whose name is equal to the +key. A key need not be a Python identifier (e.g. ""max-temp °F"" is +acceptable, although it will not match any formal parameter that could +be declared). If there is no match to a formal parameter the key-value +pair is collected by the "**" parameter, if there is one, or if there +is not, a "TypeError" exception is raised. + +Formal parameters using the syntax "*identifier" or "**identifier" +cannot be used as positional argument slots or as keyword argument +names. + +Changed in version 3.5: Function calls accept any number of "*" and +"**" unpackings, positional arguments may follow iterable unpackings +("*"), and keyword arguments may follow dictionary unpackings ("**"). +Originally proposed by **PEP 448**. + +A call always returns some value, possibly "None", unless it raises an +exception. How this value is computed depends on the type of the +callable object. + +If it is— + +a user-defined function: + The code block for the function is executed, passing it the + argument list. The first thing the code block will do is bind the + formal parameters to the arguments; this is described in section + Function definitions. When the code block executes a "return" + statement, this specifies the return value of the function call. + If execution reaches the end of the code block without executing a + "return" statement, the return value is "None". + +a built-in function or method: + The result is up to the interpreter; see Built-in Functions for the + descriptions of built-in functions and methods. + +a class object: + A new instance of that class is returned. + +a class instance method: + The corresponding user-defined function is called, with an argument + list that is one longer than the argument list of the call: the + instance becomes the first argument. + +a class instance: + The class must define a "__call__()" method; the effect is then the + same as if that method was called. +''', + 'class': r'''Class definitions +***************** + +A class definition defines a class object (see section The standard +type hierarchy): + + classdef: [decorators] "class" classname [type_params] [inheritance] ":" suite + inheritance: "(" [argument_list] ")" + classname: identifier + +A class definition is an executable statement. The inheritance list +usually gives a list of base classes (see Metaclasses for more +advanced uses), so each item in the list should evaluate to a class +object which allows subclassing. Classes without an inheritance list +inherit, by default, from the base class "object"; hence, + + class Foo: + pass + +is equivalent to + + class Foo(object): + pass + +The class’s suite is then executed in a new execution frame (see +Naming and binding), using a newly created local namespace and the +original global namespace. (Usually, the suite contains mostly +function definitions.) When the class’s suite finishes execution, its +execution frame is discarded but its local namespace is saved. [5] A +class object is then created using the inheritance list for the base +classes and the saved local namespace for the attribute dictionary. +The class name is bound to this class object in the original local +namespace. + +The order in which attributes are defined in the class body is +preserved in the new class’s "__dict__". Note that this is reliable +only right after the class is created and only for classes that were +defined using the definition syntax. + +Class creation can be customized heavily using metaclasses. + +Classes can also be decorated: just like when decorating functions, + + @f1(arg) + @f2 + class Foo: pass + +is roughly equivalent to + + class Foo: pass + Foo = f1(arg)(f2(Foo)) + +The evaluation rules for the decorator expressions are the same as for +function decorators. The result is then bound to the class name. + +Changed in version 3.9: Classes may be decorated with any valid +"assignment_expression". Previously, the grammar was much more +restrictive; see **PEP 614** for details. + +A list of type parameters may be given in square brackets immediately +after the class’s name. This indicates to static type checkers that +the class is generic. At runtime, the type parameters can be retrieved +from the class’s "__type_params__" attribute. See Generic classes for +more. + +Changed in version 3.12: Type parameter lists are new in Python 3.12. + +**Programmer’s note:** Variables defined in the class definition are +class attributes; they are shared by instances. Instance attributes +can be set in a method with "self.name = value". Both class and +instance attributes are accessible through the notation “"self.name"”, +and an instance attribute hides a class attribute with the same name +when accessed in this way. Class attributes can be used as defaults +for instance attributes, but using mutable values there can lead to +unexpected results. Descriptors can be used to create instance +variables with different implementation details. + +See also: + + **PEP 3115** - Metaclasses in Python 3000 + The proposal that changed the declaration of metaclasses to the + current syntax, and the semantics for how classes with + metaclasses are constructed. + + **PEP 3129** - Class Decorators + The proposal that added class decorators. Function and method + decorators were introduced in **PEP 318**. +''', + 'comparisons': r'''Comparisons +*********** + +Unlike C, all comparison operations in Python have the same priority, +which is lower than that of any arithmetic, shifting or bitwise +operation. Also unlike C, expressions like "a < b < c" have the +interpretation that is conventional in mathematics: + + comparison: or_expr (comp_operator or_expr)* + comp_operator: "<" | ">" | "==" | ">=" | "<=" | "!=" + | "is" ["not"] | ["not"] "in" + +Comparisons yield boolean values: "True" or "False". Custom *rich +comparison methods* may return non-boolean values. In this case Python +will call "bool()" on such value in boolean contexts. + +Comparisons can be chained arbitrarily, e.g., "x < y <= z" is +equivalent to "x < y and y <= z", except that "y" is evaluated only +once (but in both cases "z" is not evaluated at all when "x < y" is +found to be false). + +Formally, if *a*, *b*, *c*, …, *y*, *z* are expressions and *op1*, +*op2*, …, *opN* are comparison operators, then "a op1 b op2 c ... y +opN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except +that each expression is evaluated at most once. + +Note that "a op1 b op2 c" doesn’t imply any kind of comparison between +*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though +perhaps not pretty). + + +Value comparisons +================= + +The operators "<", ">", "==", ">=", "<=", and "!=" compare the values +of two objects. The objects do not need to have the same type. + +Chapter Objects, values and types states that objects have a value (in +addition to type and identity). The value of an object is a rather +abstract notion in Python: For example, there is no canonical access +method for an object’s value. Also, there is no requirement that the +value of an object should be constructed in a particular way, e.g. +comprised of all its data attributes. Comparison operators implement a +particular notion of what the value of an object is. One can think of +them as defining the value of an object indirectly, by means of their +comparison implementation. + +Because all types are (direct or indirect) subtypes of "object", they +inherit the default comparison behavior from "object". Types can +customize their comparison behavior by implementing *rich comparison +methods* like "__lt__()", described in Basic customization. + +The default behavior for equality comparison ("==" and "!=") is based +on the identity of the objects. Hence, equality comparison of +instances with the same identity results in equality, and equality +comparison of instances with different identities results in +inequality. A motivation for this default behavior is the desire that +all objects should be reflexive (i.e. "x is y" implies "x == y"). + +A default order comparison ("<", ">", "<=", and ">=") is not provided; +an attempt raises "TypeError". A motivation for this default behavior +is the lack of a similar invariant as for equality. + +The behavior of the default equality comparison, that instances with +different identities are always unequal, may be in contrast to what +types will need that have a sensible definition of object value and +value-based equality. Such types will need to customize their +comparison behavior, and in fact, a number of built-in types have done +that. + +The following list describes the comparison behavior of the most +important built-in types. + +* Numbers of built-in numeric types (Numeric Types — int, float, + complex) and of the standard library types "fractions.Fraction" and + "decimal.Decimal" can be compared within and across their types, + with the restriction that complex numbers do not support order + comparison. Within the limits of the types involved, they compare + mathematically (algorithmically) correct without loss of precision. + + The not-a-number values "float('NaN')" and "decimal.Decimal('NaN')" + are special. Any ordered comparison of a number to a not-a-number + value is false. A counter-intuitive implication is that not-a-number + values are not equal to themselves. For example, if "x = + float('NaN')", "3 < x", "x < 3" and "x == x" are all false, while "x + != x" is true. This behavior is compliant with IEEE 754. + +* "None" and "NotImplemented" are singletons. **PEP 8** advises that + comparisons for singletons should always be done with "is" or "is + not", never the equality operators. + +* Binary sequences (instances of "bytes" or "bytearray") can be + compared within and across their types. They compare + lexicographically using the numeric values of their elements. + +* Strings (instances of "str") compare lexicographically using the + numerical Unicode code points (the result of the built-in function + "ord()") of their characters. [3] + + Strings and binary sequences cannot be directly compared. + +* Sequences (instances of "tuple", "list", or "range") can be compared + only within each of their types, with the restriction that ranges do + not support order comparison. Equality comparison across these + types results in inequality, and ordering comparison across these + types raises "TypeError". + + Sequences compare lexicographically using comparison of + corresponding elements. The built-in containers typically assume + identical objects are equal to themselves. That lets them bypass + equality tests for identical objects to improve performance and to + maintain their internal invariants. + + Lexicographical comparison between built-in collections works as + follows: + + * For two collections to compare equal, they must be of the same + type, have the same length, and each pair of corresponding + elements must compare equal (for example, "[1,2] == (1,2)" is + false because the type is not the same). + + * Collections that support order comparison are ordered the same as + their first unequal elements (for example, "[1,2,x] <= [1,2,y]" + has the same value as "x <= y"). If a corresponding element does + not exist, the shorter collection is ordered first (for example, + "[1,2] < [1,2,3]" is true). + +* Mappings (instances of "dict") compare equal if and only if they + have equal "(key, value)" pairs. Equality comparison of the keys and + values enforces reflexivity. + + Order comparisons ("<", ">", "<=", and ">=") raise "TypeError". + +* Sets (instances of "set" or "frozenset") can be compared within and + across their types. + + They define order comparison operators to mean subset and superset + tests. Those relations do not define total orderings (for example, + the two sets "{1,2}" and "{2,3}" are not equal, nor subsets of one + another, nor supersets of one another). Accordingly, sets are not + appropriate arguments for functions which depend on total ordering + (for example, "min()", "max()", and "sorted()" produce undefined + results given a list of sets as inputs). + + Comparison of sets enforces reflexivity of its elements. + +* Most other built-in types have no comparison methods implemented, so + they inherit the default comparison behavior. + +User-defined classes that customize their comparison behavior should +follow some consistency rules, if possible: + +* Equality comparison should be reflexive. In other words, identical + objects should compare equal: + + "x is y" implies "x == y" + +* Comparison should be symmetric. In other words, the following + expressions should have the same result: + + "x == y" and "y == x" + + "x != y" and "y != x" + + "x < y" and "y > x" + + "x <= y" and "y >= x" + +* Comparison should be transitive. The following (non-exhaustive) + examples illustrate that: + + "x > y and y > z" implies "x > z" + + "x < y and y <= z" implies "x < z" + +* Inverse comparison should result in the boolean negation. In other + words, the following expressions should have the same result: + + "x == y" and "not x != y" + + "x < y" and "not x >= y" (for total ordering) + + "x > y" and "not x <= y" (for total ordering) + + The last two expressions apply to totally ordered collections (e.g. + to sequences, but not to sets or mappings). See also the + "total_ordering()" decorator. + +* The "hash()" result should be consistent with equality. Objects that + are equal should either have the same hash value, or be marked as + unhashable. + +Python does not enforce these consistency rules. In fact, the +not-a-number values are an example for not following these rules. + + +Membership test operations +========================== + +The operators "in" and "not in" test for membership. "x in s" +evaluates to "True" if *x* is a member of *s*, and "False" otherwise. +"x not in s" returns the negation of "x in s". All built-in sequences +and set types support this as well as dictionary, for which "in" tests +whether the dictionary has a given key. For container types such as +list, tuple, set, frozenset, dict, or collections.deque, the +expression "x in y" is equivalent to "any(x is e or x == e for e in +y)". + +For the string and bytes types, "x in y" is "True" if and only if *x* +is a substring of *y*. An equivalent test is "y.find(x) != -1". +Empty strings are always considered to be a substring of any other +string, so """ in "abc"" will return "True". + +For user-defined classes which define the "__contains__()" method, "x +in y" returns "True" if "y.__contains__(x)" returns a true value, and +"False" otherwise. + +For user-defined classes which do not define "__contains__()" but do +define "__iter__()", "x in y" is "True" if some value "z", for which +the expression "x is z or x == z" is true, is produced while iterating +over "y". If an exception is raised during the iteration, it is as if +"in" raised that exception. + +Lastly, the old-style iteration protocol is tried: if a class defines +"__getitem__()", "x in y" is "True" if and only if there is a non- +negative integer index *i* such that "x is y[i] or x == y[i]", and no +lower integer index raises the "IndexError" exception. (If any other +exception is raised, it is as if "in" raised that exception). + +The operator "not in" is defined to have the inverse truth value of +"in". + + +Identity comparisons +==================== + +The operators "is" and "is not" test for an object’s identity: "x is +y" is true if and only if *x* and *y* are the same object. An +Object’s identity is determined using the "id()" function. "x is not +y" yields the inverse truth value. [4] +''', + 'compound': r'''Compound statements +******************* + +Compound statements contain (groups of) other statements; they affect +or control the execution of those other statements in some way. In +general, compound statements span multiple lines, although in simple +incarnations a whole compound statement may be contained in one line. + +The "if", "while" and "for" statements implement traditional control +flow constructs. "try" specifies exception handlers and/or cleanup +code for a group of statements, while the "with" statement allows the +execution of initialization and finalization code around a block of +code. Function and class definitions are also syntactically compound +statements. + +A compound statement consists of one or more ‘clauses.’ A clause +consists of a header and a ‘suite.’ The clause headers of a +particular compound statement are all at the same indentation level. +Each clause header begins with a uniquely identifying keyword and ends +with a colon. A suite is a group of statements controlled by a +clause. A suite can be one or more semicolon-separated simple +statements on the same line as the header, following the header’s +colon, or it can be one or more indented statements on subsequent +lines. Only the latter form of a suite can contain nested compound +statements; the following is illegal, mostly because it wouldn’t be +clear to which "if" clause a following "else" clause would belong: + + if test1: if test2: print(x) + +Also note that the semicolon binds tighter than the colon in this +context, so that in the following example, either all or none of the +"print()" calls are executed: + + if x < y < z: print(x); print(y); print(z) + +Summarizing: + + compound_stmt: if_stmt + | while_stmt + | for_stmt + | try_stmt + | with_stmt + | match_stmt + | funcdef + | classdef + | async_with_stmt + | async_for_stmt + | async_funcdef + suite: stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT + statement: stmt_list NEWLINE | compound_stmt + stmt_list: simple_stmt (";" simple_stmt)* [";"] + +Note that statements always end in a "NEWLINE" possibly followed by a +"DEDENT". Also note that optional continuation clauses always begin +with a keyword that cannot start a statement, thus there are no +ambiguities (the ‘dangling "else"’ problem is solved in Python by +requiring nested "if" statements to be indented). + +The formatting of the grammar rules in the following sections places +each clause on a separate line for clarity. + + +The "if" statement +================== + +The "if" statement is used for conditional execution: + + if_stmt: "if" assignment_expression ":" suite + ("elif" assignment_expression ":" suite)* + ["else" ":" suite] + +It selects exactly one of the suites by evaluating the expressions one +by one until one is found to be true (see section Boolean operations +for the definition of true and false); then that suite is executed +(and no other part of the "if" statement is executed or evaluated). +If all expressions are false, the suite of the "else" clause, if +present, is executed. + + +The "while" statement +===================== + +The "while" statement is used for repeated execution as long as an +expression is true: + + while_stmt: "while" assignment_expression ":" suite + ["else" ":" suite] + +This repeatedly tests the expression and, if it is true, executes the +first suite; if the expression is false (which may be the first time +it is tested) the suite of the "else" clause, if present, is executed +and the loop terminates. + +A "break" statement executed in the first suite terminates the loop +without executing the "else" clause’s suite. A "continue" statement +executed in the first suite skips the rest of the suite and goes back +to testing the expression. + + +The "for" statement +=================== + +The "for" statement is used to iterate over the elements of a sequence +(such as a string, tuple or list) or other iterable object: + + for_stmt: "for" target_list "in" starred_expression_list ":" suite + ["else" ":" suite] + +The "starred_expression_list" expression is evaluated once; it should +yield an *iterable* object. An *iterator* is created for that +iterable. The first item provided by the iterator is then assigned to +the target list using the standard rules for assignments (see +Assignment statements), and the suite is executed. This repeats for +each item provided by the iterator. When the iterator is exhausted, +the suite in the "else" clause, if present, is executed, and the loop +terminates. + +A "break" statement executed in the first suite terminates the loop +without executing the "else" clause’s suite. A "continue" statement +executed in the first suite skips the rest of the suite and continues +with the next item, or with the "else" clause if there is no next +item. + +The for-loop makes assignments to the variables in the target list. +This overwrites all previous assignments to those variables including +those made in the suite of the for-loop: + + for i in range(10): + print(i) + i = 5 # this will not affect the for-loop + # because i will be overwritten with the next + # index in the range + +Names in the target list are not deleted when the loop is finished, +but if the sequence is empty, they will not have been assigned to at +all by the loop. Hint: the built-in type "range()" represents +immutable arithmetic sequences of integers. For instance, iterating +"range(3)" successively yields 0, 1, and then 2. + +Changed in version 3.11: Starred elements are now allowed in the +expression list. + + +The "try" statement +=================== + +The "try" statement specifies exception handlers and/or cleanup code +for a group of statements: + + try_stmt: try1_stmt | try2_stmt | try3_stmt + try1_stmt: "try" ":" suite + ("except" [expression ["as" identifier]] ":" suite)+ + ["else" ":" suite] + ["finally" ":" suite] + try2_stmt: "try" ":" suite + ("except" "*" expression ["as" identifier] ":" suite)+ + ["else" ":" suite] + ["finally" ":" suite] + try3_stmt: "try" ":" suite + "finally" ":" suite + +Additional information on exceptions can be found in section +Exceptions, and information on using the "raise" statement to generate +exceptions may be found in section The raise statement. + +Changed in version 3.14: Support for optionally dropping grouping +parentheses when using multiple exception types. See **PEP 758**. + + +"except" clause +--------------- + +The "except" clause(s) specify one or more exception handlers. When no +exception occurs in the "try" clause, no exception handler is +executed. When an exception occurs in the "try" suite, a search for an +exception handler is started. This search inspects the "except" +clauses in turn until one is found that matches the exception. An +expression-less "except" clause, if present, must be last; it matches +any exception. + +For an "except" clause with an expression, the expression must +evaluate to an exception type or a tuple of exception types. +Parentheses can be dropped if multiple exception types are provided +and the "as" clause is not used. The raised exception matches an +"except" clause whose expression evaluates to the class or a *non- +virtual base class* of the exception object, or to a tuple that +contains such a class. + +If no "except" clause matches the exception, the search for an +exception handler continues in the surrounding code and on the +invocation stack. [1] + +If the evaluation of an expression in the header of an "except" clause +raises an exception, the original search for a handler is canceled and +a search starts for the new exception in the surrounding code and on +the call stack (it is treated as if the entire "try" statement raised +the exception). + +When a matching "except" clause is found, the exception is assigned to +the target specified after the "as" keyword in that "except" clause, +if present, and the "except" clause’s suite is executed. All "except" +clauses must have an executable block. When the end of this block is +reached, execution continues normally after the entire "try" +statement. (This means that if two nested handlers exist for the same +exception, and the exception occurs in the "try" clause of the inner +handler, the outer handler will not handle the exception.) + +When an exception has been assigned using "as target", it is cleared +at the end of the "except" clause. This is as if + + except E as N: + foo + +was translated to + + except E as N: + try: + foo + finally: + del N + +This means the exception must be assigned to a different name to be +able to refer to it after the "except" clause. Exceptions are cleared +because with the traceback attached to them, they form a reference +cycle with the stack frame, keeping all locals in that frame alive +until the next garbage collection occurs. + +Before an "except" clause’s suite is executed, the exception is stored +in the "sys" module, where it can be accessed from within the body of +the "except" clause by calling "sys.exception()". When leaving an +exception handler, the exception stored in the "sys" module is reset +to its previous value: + + >>> print(sys.exception()) + None + >>> try: + ... raise TypeError + ... except: + ... print(repr(sys.exception())) + ... try: + ... raise ValueError + ... except: + ... print(repr(sys.exception())) + ... print(repr(sys.exception())) + ... + TypeError() + ValueError() + TypeError() + >>> print(sys.exception()) + None + + +"except*" clause +---------------- + +The "except*" clause(s) specify one or more handlers for groups of +exceptions ("BaseExceptionGroup" instances). A "try" statement can +have either "except" or "except*" clauses, but not both. The exception +type for matching is mandatory in the case of "except*", so "except*:" +is a syntax error. The type is interpreted as in the case of "except", +but matching is performed on the exceptions contained in the group +that is being handled. An "TypeError" is raised if a matching type is +a subclass of "BaseExceptionGroup", because that would have ambiguous +semantics. + +When an exception group is raised in the try block, each "except*" +clause splits (see "split()") it into the subgroups of matching and +non-matching exceptions. If the matching subgroup is not empty, it +becomes the handled exception (the value returned from +"sys.exception()") and assigned to the target of the "except*" clause +(if there is one). Then, the body of the "except*" clause executes. If +the non-matching subgroup is not empty, it is processed by the next +"except*" in the same manner. This continues until all exceptions in +the group have been matched, or the last "except*" clause has run. + +After all "except*" clauses execute, the group of unhandled exceptions +is merged with any exceptions that were raised or re-raised from +within "except*" clauses. This merged exception group propagates on.: + + >>> try: + ... raise ExceptionGroup("eg", + ... [ValueError(1), TypeError(2), OSError(3), OSError(4)]) + ... except* TypeError as e: + ... print(f'caught {type(e)} with nested {e.exceptions}') + ... except* OSError as e: + ... print(f'caught {type(e)} with nested {e.exceptions}') + ... + caught with nested (TypeError(2),) + caught with nested (OSError(3), OSError(4)) + + Exception Group Traceback (most recent call last): + | File "", line 2, in + | raise ExceptionGroup("eg", + | [ValueError(1), TypeError(2), OSError(3), OSError(4)]) + | ExceptionGroup: eg (1 sub-exception) + +-+---------------- 1 ---------------- + | ValueError: 1 + +------------------------------------ + +If the exception raised from the "try" block is not an exception group +and its type matches one of the "except*" clauses, it is caught and +wrapped by an exception group with an empty message string. This +ensures that the type of the target "e" is consistently +"BaseExceptionGroup": + + >>> try: + ... raise BlockingIOError + ... except* BlockingIOError as e: + ... print(repr(e)) + ... + ExceptionGroup('', (BlockingIOError(),)) + +"break", "continue" and "return" cannot appear in an "except*" clause. + + +"else" clause +------------- + +The optional "else" clause is executed if the control flow leaves the +"try" suite, no exception was raised, and no "return", "continue", or +"break" statement was executed. Exceptions in the "else" clause are +not handled by the preceding "except" clauses. + + +"finally" clause +---------------- + +If "finally" is present, it specifies a ‘cleanup’ handler. The "try" +clause is executed, including any "except" and "else" clauses. If an +exception occurs in any of the clauses and is not handled, the +exception is temporarily saved. The "finally" clause is executed. If +there is a saved exception it is re-raised at the end of the "finally" +clause. If the "finally" clause raises another exception, the saved +exception is set as the context of the new exception. If the "finally" +clause executes a "return", "break" or "continue" statement, the saved +exception is discarded. For example, this function returns 42. + + def f(): + try: + 1/0 + finally: + return 42 + +The exception information is not available to the program during +execution of the "finally" clause. + +When a "return", "break" or "continue" statement is executed in the +"try" suite of a "try"…"finally" statement, the "finally" clause is +also executed ‘on the way out.’ + +The return value of a function is determined by the last "return" +statement executed. Since the "finally" clause always executes, a +"return" statement executed in the "finally" clause will always be the +last one executed. The following function returns ‘finally’. + + def foo(): + try: + return 'try' + finally: + return 'finally' + +Changed in version 3.8: Prior to Python 3.8, a "continue" statement +was illegal in the "finally" clause due to a problem with the +implementation. + +Changed in version 3.14: The compiler emits a "SyntaxWarning" when a +"return", "break" or "continue" appears in a "finally" block (see +**PEP 765**). + + +The "with" statement +==================== + +The "with" statement is used to wrap the execution of a block with +methods defined by a context manager (see section With Statement +Context Managers). This allows common "try"…"except"…"finally" usage +patterns to be encapsulated for convenient reuse. + + with_stmt: "with" ( "(" with_stmt_contents ","? ")" | with_stmt_contents ) ":" suite + with_stmt_contents: with_item ("," with_item)* + with_item: expression ["as" target] + +The execution of the "with" statement with one “item” proceeds as +follows: + +1. The context expression (the expression given in the "with_item") is + evaluated to obtain a context manager. + +2. The context manager’s "__enter__()" is loaded for later use. + +3. The context manager’s "__exit__()" is loaded for later use. + +4. The context manager’s "__enter__()" method is invoked. + +5. If a target was included in the "with" statement, the return value + from "__enter__()" is assigned to it. + + Note: + + The "with" statement guarantees that if the "__enter__()" method + returns without an error, then "__exit__()" will always be + called. Thus, if an error occurs during the assignment to the + target list, it will be treated the same as an error occurring + within the suite would be. See step 7 below. + +6. The suite is executed. + +7. The context manager’s "__exit__()" method is invoked. If an + exception caused the suite to be exited, its type, value, and + traceback are passed as arguments to "__exit__()". Otherwise, three + "None" arguments are supplied. + + If the suite was exited due to an exception, and the return value + from the "__exit__()" method was false, the exception is reraised. + If the return value was true, the exception is suppressed, and + execution continues with the statement following the "with" + statement. + + If the suite was exited for any reason other than an exception, the + return value from "__exit__()" is ignored, and execution proceeds + at the normal location for the kind of exit that was taken. + +The following code: + + with EXPRESSION as TARGET: + SUITE + +is semantically equivalent to: + + manager = (EXPRESSION) + enter = manager.__enter__ + exit = manager.__exit__ + value = enter() + hit_except = False + + try: + TARGET = value + SUITE + except: + hit_except = True + if not exit(*sys.exc_info()): + raise + finally: + if not hit_except: + exit(None, None, None) + +except that implicit special method lookup is used for "__enter__()" +and "__exit__()". + +With more than one item, the context managers are processed as if +multiple "with" statements were nested: + + with A() as a, B() as b: + SUITE + +is semantically equivalent to: + + with A() as a: + with B() as b: + SUITE + +You can also write multi-item context managers in multiple lines if +the items are surrounded by parentheses. For example: + + with ( + A() as a, + B() as b, + ): + SUITE + +Changed in version 3.1: Support for multiple context expressions. + +Changed in version 3.10: Support for using grouping parentheses to +break the statement in multiple lines. + +See also: + + **PEP 343** - The “with” statement + The specification, background, and examples for the Python "with" + statement. + + +The "match" statement +===================== + +Added in version 3.10. + +The match statement is used for pattern matching. Syntax: + + match_stmt: 'match' subject_expr ":" NEWLINE INDENT case_block+ DEDENT + subject_expr: `!star_named_expression` "," `!star_named_expressions`? + | `!named_expression` + case_block: 'case' patterns [guard] ":" `!block` + +Note: + + This section uses single quotes to denote soft keywords. + +Pattern matching takes a pattern as input (following "case") and a +subject value (following "match"). The pattern (which may contain +subpatterns) is matched against the subject value. The outcomes are: + +* A match success or failure (also termed a pattern success or + failure). + +* Possible binding of matched values to a name. The prerequisites for + this are further discussed below. + +The "match" and "case" keywords are soft keywords. + +See also: + + * **PEP 634** – Structural Pattern Matching: Specification + + * **PEP 636** – Structural Pattern Matching: Tutorial + + +Overview +-------- + +Here’s an overview of the logical flow of a match statement: + +1. The subject expression "subject_expr" is evaluated and a resulting + subject value obtained. If the subject expression contains a comma, + a tuple is constructed using the standard rules. + +2. Each pattern in a "case_block" is attempted to match with the + subject value. The specific rules for success or failure are + described below. The match attempt can also bind some or all of the + standalone names within the pattern. The precise pattern binding + rules vary per pattern type and are specified below. **Name + bindings made during a successful pattern match outlive the + executed block and can be used after the match statement**. + + Note: + + During failed pattern matches, some subpatterns may succeed. Do + not rely on bindings being made for a failed match. Conversely, + do not rely on variables remaining unchanged after a failed + match. The exact behavior is dependent on implementation and may + vary. This is an intentional decision made to allow different + implementations to add optimizations. + +3. If the pattern succeeds, the corresponding guard (if present) is + evaluated. In this case all name bindings are guaranteed to have + happened. + + * If the guard evaluates as true or is missing, the "block" inside + "case_block" is executed. + + * Otherwise, the next "case_block" is attempted as described above. + + * If there are no further case blocks, the match statement is + completed. + +Note: + + Users should generally never rely on a pattern being evaluated. + Depending on implementation, the interpreter may cache values or use + other optimizations which skip repeated evaluations. + +A sample match statement: + + >>> flag = False + >>> match (100, 200): + ... case (100, 300): # Mismatch: 200 != 300 + ... print('Case 1') + ... case (100, 200) if flag: # Successful match, but guard fails + ... print('Case 2') + ... case (100, y): # Matches and binds y to 200 + ... print(f'Case 3, y: {y}') + ... case _: # Pattern not attempted + ... print('Case 4, I match anything!') + ... + Case 3, y: 200 + +In this case, "if flag" is a guard. Read more about that in the next +section. + + +Guards +------ + + guard: "if" `!named_expression` + +A "guard" (which is part of the "case") must succeed for code inside +the "case" block to execute. It takes the form: "if" followed by an +expression. + +The logical flow of a "case" block with a "guard" follows: + +1. Check that the pattern in the "case" block succeeded. If the + pattern failed, the "guard" is not evaluated and the next "case" + block is checked. + +2. If the pattern succeeded, evaluate the "guard". + + * If the "guard" condition evaluates as true, the case block is + selected. + + * If the "guard" condition evaluates as false, the case block is + not selected. + + * If the "guard" raises an exception during evaluation, the + exception bubbles up. + +Guards are allowed to have side effects as they are expressions. +Guard evaluation must proceed from the first to the last case block, +one at a time, skipping case blocks whose pattern(s) don’t all +succeed. (I.e., guard evaluation must happen in order.) Guard +evaluation must stop once a case block is selected. + + +Irrefutable Case Blocks +----------------------- + +An irrefutable case block is a match-all case block. A match +statement may have at most one irrefutable case block, and it must be +last. + +A case block is considered irrefutable if it has no guard and its +pattern is irrefutable. A pattern is considered irrefutable if we can +prove from its syntax alone that it will always succeed. Only the +following patterns are irrefutable: + +* AS Patterns whose left-hand side is irrefutable + +* OR Patterns containing at least one irrefutable pattern + +* Capture Patterns + +* Wildcard Patterns + +* parenthesized irrefutable patterns + + +Patterns +-------- + +Note: + + This section uses grammar notations beyond standard EBNF: + + * the notation "SEP.RULE+" is shorthand for "RULE (SEP RULE)*" + + * the notation "!RULE" is shorthand for a negative lookahead + assertion + +The top-level syntax for "patterns" is: + + patterns: open_sequence_pattern | pattern + pattern: as_pattern | or_pattern + closed_pattern: | literal_pattern + | capture_pattern + | wildcard_pattern + | value_pattern + | group_pattern + | sequence_pattern + | mapping_pattern + | class_pattern + +The descriptions below will include a description “in simple terms” of +what a pattern does for illustration purposes (credits to Raymond +Hettinger for a document that inspired most of the descriptions). Note +that these descriptions are purely for illustration purposes and **may +not** reflect the underlying implementation. Furthermore, they do not +cover all valid forms. + + +OR Patterns +~~~~~~~~~~~ + +An OR pattern is two or more patterns separated by vertical bars "|". +Syntax: + + or_pattern: "|".closed_pattern+ + +Only the final subpattern may be irrefutable, and each subpattern must +bind the same set of names to avoid ambiguity. + +An OR pattern matches each of its subpatterns in turn to the subject +value, until one succeeds. The OR pattern is then considered +successful. Otherwise, if none of the subpatterns succeed, the OR +pattern fails. + +In simple terms, "P1 | P2 | ..." will try to match "P1", if it fails +it will try to match "P2", succeeding immediately if any succeeds, +failing otherwise. + + +AS Patterns +~~~~~~~~~~~ + +An AS pattern matches an OR pattern on the left of the "as" keyword +against a subject. Syntax: + + as_pattern: or_pattern "as" capture_pattern + +If the OR pattern fails, the AS pattern fails. Otherwise, the AS +pattern binds the subject to the name on the right of the as keyword +and succeeds. "capture_pattern" cannot be a "_". + +In simple terms "P as NAME" will match with "P", and on success it +will set "NAME = ". + + +Literal Patterns +~~~~~~~~~~~~~~~~ + +A literal pattern corresponds to most literals in Python. Syntax: + + literal_pattern: signed_number + | signed_number "+" NUMBER + | signed_number "-" NUMBER + | strings + | "None" + | "True" + | "False" + signed_number: ["-"] NUMBER + +The rule "strings" and the token "NUMBER" are defined in the standard +Python grammar. Triple-quoted strings are supported. Raw strings and +byte strings are supported. f-strings and t-strings are not +supported. + +The forms "signed_number '+' NUMBER" and "signed_number '-' NUMBER" +are for expressing complex numbers; they require a real number on the +left and an imaginary number on the right. E.g. "3 + 4j". + +In simple terms, "LITERAL" will succeed only if " == +LITERAL". For the singletons "None", "True" and "False", the "is" +operator is used. + + +Capture Patterns +~~~~~~~~~~~~~~~~ + +A capture pattern binds the subject value to a name. Syntax: + + capture_pattern: !'_' NAME + +A single underscore "_" is not a capture pattern (this is what "!'_'" +expresses). It is instead treated as a "wildcard_pattern". + +In a given pattern, a given name can only be bound once. E.g. "case +x, x: ..." is invalid while "case [x] | x: ..." is allowed. + +Capture patterns always succeed. The binding follows scoping rules +established by the assignment expression operator in **PEP 572**; the +name becomes a local variable in the closest containing function scope +unless there’s an applicable "global" or "nonlocal" statement. + +In simple terms "NAME" will always succeed and it will set "NAME = +". + + +Wildcard Patterns +~~~~~~~~~~~~~~~~~ + +A wildcard pattern always succeeds (matches anything) and binds no +name. Syntax: + + wildcard_pattern: '_' + +"_" is a soft keyword within any pattern, but only within patterns. +It is an identifier, as usual, even within "match" subject +expressions, "guard"s, and "case" blocks. + +In simple terms, "_" will always succeed. + + +Value Patterns +~~~~~~~~~~~~~~ + +A value pattern represents a named value in Python. Syntax: + + value_pattern: attr + attr: name_or_attr "." NAME + name_or_attr: attr | NAME + +The dotted name in the pattern is looked up using standard Python name +resolution rules. The pattern succeeds if the value found compares +equal to the subject value (using the "==" equality operator). + +In simple terms "NAME1.NAME2" will succeed only if " == +NAME1.NAME2" + +Note: + + If the same value occurs multiple times in the same match statement, + the interpreter may cache the first value found and reuse it rather + than repeat the same lookup. This cache is strictly tied to a given + execution of a given match statement. + + +Group Patterns +~~~~~~~~~~~~~~ + +A group pattern allows users to add parentheses around patterns to +emphasize the intended grouping. Otherwise, it has no additional +syntax. Syntax: + + group_pattern: "(" pattern ")" + +In simple terms "(P)" has the same effect as "P". + + +Sequence Patterns +~~~~~~~~~~~~~~~~~ + +A sequence pattern contains several subpatterns to be matched against +sequence elements. The syntax is similar to the unpacking of a list or +tuple. + + sequence_pattern: "[" [maybe_sequence_pattern] "]" + | "(" [open_sequence_pattern] ")" + open_sequence_pattern: maybe_star_pattern "," [maybe_sequence_pattern] + maybe_sequence_pattern: ",".maybe_star_pattern+ ","? + maybe_star_pattern: star_pattern | pattern + star_pattern: "*" (capture_pattern | wildcard_pattern) + +There is no difference if parentheses or square brackets are used for +sequence patterns (i.e. "(...)" vs "[...]" ). + +Note: + + A single pattern enclosed in parentheses without a trailing comma + (e.g. "(3 | 4)") is a group pattern. While a single pattern enclosed + in square brackets (e.g. "[3 | 4]") is still a sequence pattern. + +At most one star subpattern may be in a sequence pattern. The star +subpattern may occur in any position. If no star subpattern is +present, the sequence pattern is a fixed-length sequence pattern; +otherwise it is a variable-length sequence pattern. + +The following is the logical flow for matching a sequence pattern +against a subject value: + +1. If the subject value is not a sequence [2], the sequence pattern + fails. + +2. If the subject value is an instance of "str", "bytes" or + "bytearray" the sequence pattern fails. + +3. The subsequent steps depend on whether the sequence pattern is + fixed or variable-length. + + If the sequence pattern is fixed-length: + + 1. If the length of the subject sequence is not equal to the number + of subpatterns, the sequence pattern fails + + 2. Subpatterns in the sequence pattern are matched to their + corresponding items in the subject sequence from left to right. + Matching stops as soon as a subpattern fails. If all + subpatterns succeed in matching their corresponding item, the + sequence pattern succeeds. + + Otherwise, if the sequence pattern is variable-length: + + 1. If the length of the subject sequence is less than the number of + non-star subpatterns, the sequence pattern fails. + + 2. The leading non-star subpatterns are matched to their + corresponding items as for fixed-length sequences. + + 3. If the previous step succeeds, the star subpattern matches a + list formed of the remaining subject items, excluding the + remaining items corresponding to non-star subpatterns following + the star subpattern. + + 4. Remaining non-star subpatterns are matched to their + corresponding subject items, as for a fixed-length sequence. + + Note: + + The length of the subject sequence is obtained via "len()" (i.e. + via the "__len__()" protocol). This length may be cached by the + interpreter in a similar manner as value patterns. + +In simple terms "[P1, P2, P3," … ", P]" matches only if all the +following happens: + +* check "" is a sequence + +* "len(subject) == " + +* "P1" matches "[0]" (note that this match can also bind + names) + +* "P2" matches "[1]" (note that this match can also bind + names) + +* … and so on for the corresponding pattern/element. + + +Mapping Patterns +~~~~~~~~~~~~~~~~ + +A mapping pattern contains one or more key-value patterns. The syntax +is similar to the construction of a dictionary. Syntax: + + mapping_pattern: "{" [items_pattern] "}" + items_pattern: ",".key_value_pattern+ ","? + key_value_pattern: (literal_pattern | value_pattern) ":" pattern + | double_star_pattern + double_star_pattern: "**" capture_pattern + +At most one double star pattern may be in a mapping pattern. The +double star pattern must be the last subpattern in the mapping +pattern. + +Duplicate keys in mapping patterns are disallowed. Duplicate literal +keys will raise a "SyntaxError". Two keys that otherwise have the same +value will raise a "ValueError" at runtime. + +The following is the logical flow for matching a mapping pattern +against a subject value: + +1. If the subject value is not a mapping [3],the mapping pattern + fails. + +2. If every key given in the mapping pattern is present in the subject + mapping, and the pattern for each key matches the corresponding + item of the subject mapping, the mapping pattern succeeds. + +3. If duplicate keys are detected in the mapping pattern, the pattern + is considered invalid. A "SyntaxError" is raised for duplicate + literal values; or a "ValueError" for named keys of the same value. + +Note: + + Key-value pairs are matched using the two-argument form of the + mapping subject’s "get()" method. Matched key-value pairs must + already be present in the mapping, and not created on-the-fly via + "__missing__()" or "__getitem__()". + +In simple terms "{KEY1: P1, KEY2: P2, ... }" matches only if all the +following happens: + +* check "" is a mapping + +* "KEY1 in " + +* "P1" matches "[KEY1]" + +* … and so on for the corresponding KEY/pattern pair. + + +Class Patterns +~~~~~~~~~~~~~~ + +A class pattern represents a class and its positional and keyword +arguments (if any). Syntax: + + class_pattern: name_or_attr "(" [pattern_arguments ","?] ")" + pattern_arguments: positional_patterns ["," keyword_patterns] + | keyword_patterns + positional_patterns: ",".pattern+ + keyword_patterns: ",".keyword_pattern+ + keyword_pattern: NAME "=" pattern + +The same keyword should not be repeated in class patterns. + +The following is the logical flow for matching a class pattern against +a subject value: + +1. If "name_or_attr" is not an instance of the builtin "type" , raise + "TypeError". + +2. If the subject value is not an instance of "name_or_attr" (tested + via "isinstance()"), the class pattern fails. + +3. If no pattern arguments are present, the pattern succeeds. + Otherwise, the subsequent steps depend on whether keyword or + positional argument patterns are present. + + For a number of built-in types (specified below), a single + positional subpattern is accepted which will match the entire + subject; for these types keyword patterns also work as for other + types. + + If only keyword patterns are present, they are processed as + follows, one by one: + + 1. The keyword is looked up as an attribute on the subject. + + * If this raises an exception other than "AttributeError", the + exception bubbles up. + + * If this raises "AttributeError", the class pattern has failed. + + * Else, the subpattern associated with the keyword pattern is + matched against the subject’s attribute value. If this fails, + the class pattern fails; if this succeeds, the match proceeds + to the next keyword. + + 2. If all keyword patterns succeed, the class pattern succeeds. + + If any positional patterns are present, they are converted to + keyword patterns using the "__match_args__" attribute on the class + "name_or_attr" before matching: + + 1. The equivalent of "getattr(cls, "__match_args__", ())" is + called. + + * If this raises an exception, the exception bubbles up. + + * If the returned value is not a tuple, the conversion fails and + "TypeError" is raised. + + * If there are more positional patterns than + "len(cls.__match_args__)", "TypeError" is raised. + + * Otherwise, positional pattern "i" is converted to a keyword + pattern using "__match_args__[i]" as the keyword. + "__match_args__[i]" must be a string; if not "TypeError" is + raised. + + * If there are duplicate keywords, "TypeError" is raised. + + See also: + + Customizing positional arguments in class pattern matching + + 2. Once all positional patterns have been converted to keyword + patterns, the match proceeds as if there were only keyword + patterns. + + For the following built-in types the handling of positional + subpatterns is different: + + * "bool" + + * "bytearray" + + * "bytes" + + * "dict" + + * "float" + + * "frozenset" + + * "int" + + * "list" + + * "set" + + * "str" + + * "tuple" + + These classes accept a single positional argument, and the pattern + there is matched against the whole object rather than an attribute. + For example "int(0|1)" matches the value "0", but not the value + "0.0". + +In simple terms "CLS(P1, attr=P2)" matches only if the following +happens: + +* "isinstance(, CLS)" + +* convert "P1" to a keyword pattern using "CLS.__match_args__" + +* For each keyword argument "attr=P2": + + * "hasattr(, "attr")" + + * "P2" matches ".attr" + +* … and so on for the corresponding keyword argument/pattern pair. + +See also: + + * **PEP 634** – Structural Pattern Matching: Specification + + * **PEP 636** – Structural Pattern Matching: Tutorial + + +Function definitions +==================== + +A function definition defines a user-defined function object (see +section The standard type hierarchy): + + funcdef: [decorators] "def" funcname [type_params] "(" [parameter_list] ")" + ["->" expression] ":" suite + decorators: decorator+ + decorator: "@" assignment_expression NEWLINE + parameter_list: defparameter ("," defparameter)* "," "/" ["," [parameter_list_no_posonly]] + | parameter_list_no_posonly + parameter_list_no_posonly: defparameter ("," defparameter)* ["," [parameter_list_starargs]] + | parameter_list_starargs + parameter_list_starargs: "*" [star_parameter] ("," defparameter)* ["," [parameter_star_kwargs]] + | "*" ("," defparameter)+ ["," [parameter_star_kwargs]] + | parameter_star_kwargs + parameter_star_kwargs: "**" parameter [","] + parameter: identifier [":" expression] + star_parameter: identifier [":" ["*"] expression] + defparameter: parameter ["=" expression] + funcname: identifier + +A function definition is an executable statement. Its execution binds +the function name in the current local namespace to a function object +(a wrapper around the executable code for the function). This +function object contains a reference to the current global namespace +as the global namespace to be used when the function is called. + +The function definition does not execute the function body; this gets +executed only when the function is called. [4] + +A function definition may be wrapped by one or more *decorator* +expressions. Decorator expressions are evaluated when the function is +defined, in the scope that contains the function definition. The +result must be a callable, which is invoked with the function object +as the only argument. The returned value is bound to the function name +instead of the function object. Multiple decorators are applied in +nested fashion. For example, the following code + + @f1(arg) + @f2 + def func(): pass + +is roughly equivalent to + + def func(): pass + func = f1(arg)(f2(func)) + +except that the original function is not temporarily bound to the name +"func". + +Changed in version 3.9: Functions may be decorated with any valid +"assignment_expression". Previously, the grammar was much more +restrictive; see **PEP 614** for details. + +A list of type parameters may be given in square brackets between the +function’s name and the opening parenthesis for its parameter list. +This indicates to static type checkers that the function is generic. +At runtime, the type parameters can be retrieved from the function’s +"__type_params__" attribute. See Generic functions for more. + +Changed in version 3.12: Type parameter lists are new in Python 3.12. + +When one or more *parameters* have the form *parameter* "=" +*expression*, the function is said to have “default parameter values.” +For a parameter with a default value, the corresponding *argument* may +be omitted from a call, in which case the parameter’s default value is +substituted. If a parameter has a default value, all following +parameters up until the “"*"” must also have a default value — this is +a syntactic restriction that is not expressed by the grammar. + +**Default parameter values are evaluated from left to right when the +function definition is executed.** This means that the expression is +evaluated once, when the function is defined, and that the same “pre- +computed” value is used for each call. This is especially important +to understand when a default parameter value is a mutable object, such +as a list or a dictionary: if the function modifies the object (e.g. +by appending an item to a list), the default parameter value is in +effect modified. This is generally not what was intended. A way +around this is to use "None" as the default, and explicitly test for +it in the body of the function, e.g.: + + def whats_on_the_telly(penguin=None): + if penguin is None: + penguin = [] + penguin.append("property of the zoo") + return penguin + +Function call semantics are described in more detail in section Calls. +A function call always assigns values to all parameters mentioned in +the parameter list, either from positional arguments, from keyword +arguments, or from default values. If the form “"*identifier"” is +present, it is initialized to a tuple receiving any excess positional +parameters, defaulting to the empty tuple. If the form +“"**identifier"” is present, it is initialized to a new ordered +mapping receiving any excess keyword arguments, defaulting to a new +empty mapping of the same type. Parameters after “"*"” or +“"*identifier"” are keyword-only parameters and may only be passed by +keyword arguments. Parameters before “"/"” are positional-only +parameters and may only be passed by positional arguments. + +Changed in version 3.8: The "/" function parameter syntax may be used +to indicate positional-only parameters. See **PEP 570** for details. + +Parameters may have an *annotation* of the form “": expression"” +following the parameter name. Any parameter may have an annotation, +even those of the form "*identifier" or "**identifier". (As a special +case, parameters of the form "*identifier" may have an annotation “": +*expression"”.) Functions may have “return” annotation of the form +“"-> expression"” after the parameter list. These annotations can be +any valid Python expression. The presence of annotations does not +change the semantics of a function. See Annotations for more +information on annotations. + +Changed in version 3.11: Parameters of the form “"*identifier"” may +have an annotation “": *expression"”. See **PEP 646**. + +It is also possible to create anonymous functions (functions not bound +to a name), for immediate use in expressions. This uses lambda +expressions, described in section Lambdas. Note that the lambda +expression is merely a shorthand for a simplified function definition; +a function defined in a “"def"” statement can be passed around or +assigned to another name just like a function defined by a lambda +expression. The “"def"” form is actually more powerful since it +allows the execution of multiple statements and annotations. + +**Programmer’s note:** Functions are first-class objects. A “"def"” +statement executed inside a function definition defines a local +function that can be returned or passed around. Free variables used +in the nested function can access the local variables of the function +containing the def. See section Naming and binding for details. + +See also: + + **PEP 3107** - Function Annotations + The original specification for function annotations. + + **PEP 484** - Type Hints + Definition of a standard meaning for annotations: type hints. + + **PEP 526** - Syntax for Variable Annotations + Ability to type hint variable declarations, including class + variables and instance variables. + + **PEP 563** - Postponed Evaluation of Annotations + Support for forward references within annotations by preserving + annotations in a string form at runtime instead of eager + evaluation. + + **PEP 318** - Decorators for Functions and Methods + Function and method decorators were introduced. Class decorators + were introduced in **PEP 3129**. + + +Class definitions +================= + +A class definition defines a class object (see section The standard +type hierarchy): + + classdef: [decorators] "class" classname [type_params] [inheritance] ":" suite + inheritance: "(" [argument_list] ")" + classname: identifier + +A class definition is an executable statement. The inheritance list +usually gives a list of base classes (see Metaclasses for more +advanced uses), so each item in the list should evaluate to a class +object which allows subclassing. Classes without an inheritance list +inherit, by default, from the base class "object"; hence, + + class Foo: + pass + +is equivalent to + + class Foo(object): + pass + +The class’s suite is then executed in a new execution frame (see +Naming and binding), using a newly created local namespace and the +original global namespace. (Usually, the suite contains mostly +function definitions.) When the class’s suite finishes execution, its +execution frame is discarded but its local namespace is saved. [5] A +class object is then created using the inheritance list for the base +classes and the saved local namespace for the attribute dictionary. +The class name is bound to this class object in the original local +namespace. + +The order in which attributes are defined in the class body is +preserved in the new class’s "__dict__". Note that this is reliable +only right after the class is created and only for classes that were +defined using the definition syntax. + +Class creation can be customized heavily using metaclasses. + +Classes can also be decorated: just like when decorating functions, + + @f1(arg) + @f2 + class Foo: pass + +is roughly equivalent to + + class Foo: pass + Foo = f1(arg)(f2(Foo)) + +The evaluation rules for the decorator expressions are the same as for +function decorators. The result is then bound to the class name. + +Changed in version 3.9: Classes may be decorated with any valid +"assignment_expression". Previously, the grammar was much more +restrictive; see **PEP 614** for details. + +A list of type parameters may be given in square brackets immediately +after the class’s name. This indicates to static type checkers that +the class is generic. At runtime, the type parameters can be retrieved +from the class’s "__type_params__" attribute. See Generic classes for +more. + +Changed in version 3.12: Type parameter lists are new in Python 3.12. + +**Programmer’s note:** Variables defined in the class definition are +class attributes; they are shared by instances. Instance attributes +can be set in a method with "self.name = value". Both class and +instance attributes are accessible through the notation “"self.name"”, +and an instance attribute hides a class attribute with the same name +when accessed in this way. Class attributes can be used as defaults +for instance attributes, but using mutable values there can lead to +unexpected results. Descriptors can be used to create instance +variables with different implementation details. + +See also: + + **PEP 3115** - Metaclasses in Python 3000 + The proposal that changed the declaration of metaclasses to the + current syntax, and the semantics for how classes with + metaclasses are constructed. + + **PEP 3129** - Class Decorators + The proposal that added class decorators. Function and method + decorators were introduced in **PEP 318**. + + +Coroutines +========== + +Added in version 3.5. + + +Coroutine function definition +----------------------------- + + async_funcdef: [decorators] "async" "def" funcname "(" [parameter_list] ")" + ["->" expression] ":" suite + +Execution of Python coroutines can be suspended and resumed at many +points (see *coroutine*). "await" expressions, "async for" and "async +with" can only be used in the body of a coroutine function. + +Functions defined with "async def" syntax are always coroutine +functions, even if they do not contain "await" or "async" keywords. + +It is a "SyntaxError" to use a "yield from" expression inside the body +of a coroutine function. + +An example of a coroutine function: + + async def func(param1, param2): + do_stuff() + await some_coroutine() + +Changed in version 3.7: "await" and "async" are now keywords; +previously they were only treated as such inside the body of a +coroutine function. + + +The "async for" statement +------------------------- + + async_for_stmt: "async" for_stmt + +An *asynchronous iterable* provides an "__aiter__" method that +directly returns an *asynchronous iterator*, which can call +asynchronous code in its "__anext__" method. + +The "async for" statement allows convenient iteration over +asynchronous iterables. + +The following code: + + async for TARGET in ITER: + SUITE + else: + SUITE2 + +Is semantically equivalent to: + + iter = (ITER).__aiter__() + running = True + + while running: + try: + TARGET = await iter.__anext__() + except StopAsyncIteration: + running = False + else: + SUITE + else: + SUITE2 + +except that implicit special method lookup is used for "__aiter__()" +and "__anext__()". + +It is a "SyntaxError" to use an "async for" statement outside the body +of a coroutine function. + + +The "async with" statement +-------------------------- + + async_with_stmt: "async" with_stmt + +An *asynchronous context manager* is a *context manager* that is able +to suspend execution in its *enter* and *exit* methods. + +The following code: + + async with EXPRESSION as TARGET: + SUITE + +is semantically equivalent to: + + manager = (EXPRESSION) + aenter = manager.__aenter__ + aexit = manager.__aexit__ + value = await aenter() + hit_except = False + + try: + TARGET = value + SUITE + except: + hit_except = True + if not await aexit(*sys.exc_info()): + raise + finally: + if not hit_except: + await aexit(None, None, None) + +except that implicit special method lookup is used for "__aenter__()" +and "__aexit__()". + +It is a "SyntaxError" to use an "async with" statement outside the +body of a coroutine function. + +See also: + + **PEP 492** - Coroutines with async and await syntax + The proposal that made coroutines a proper standalone concept in + Python, and added supporting syntax. + + +Type parameter lists +==================== + +Added in version 3.12. + +Changed in version 3.13: Support for default values was added (see +**PEP 696**). + + type_params: "[" type_param ("," type_param)* "]" + type_param: typevar | typevartuple | paramspec + typevar: identifier (":" expression)? ("=" expression)? + typevartuple: "*" identifier ("=" expression)? + paramspec: "**" identifier ("=" expression)? + +Functions (including coroutines), classes and type aliases may contain +a type parameter list: + + def max[T](args: list[T]) -> T: + ... + + async def amax[T](args: list[T]) -> T: + ... + + class Bag[T]: + def __iter__(self) -> Iterator[T]: + ... + + def add(self, arg: T) -> None: + ... + + type ListOrSet[T] = list[T] | set[T] + +Semantically, this indicates that the function, class, or type alias +is generic over a type variable. This information is primarily used by +static type checkers, and at runtime, generic objects behave much like +their non-generic counterparts. + +Type parameters are declared in square brackets ("[]") immediately +after the name of the function, class, or type alias. The type +parameters are accessible within the scope of the generic object, but +not elsewhere. Thus, after a declaration "def func[T](): pass", the +name "T" is not available in the module scope. Below, the semantics of +generic objects are described with more precision. The scope of type +parameters is modeled with a special function (technically, an +annotation scope) that wraps the creation of the generic object. + +Generic functions, classes, and type aliases have a "__type_params__" +attribute listing their type parameters. + +Type parameters come in three kinds: + +* "typing.TypeVar", introduced by a plain name (e.g., "T"). + Semantically, this represents a single type to a type checker. + +* "typing.TypeVarTuple", introduced by a name prefixed with a single + asterisk (e.g., "*Ts"). Semantically, this stands for a tuple of any + number of types. + +* "typing.ParamSpec", introduced by a name prefixed with two asterisks + (e.g., "**P"). Semantically, this stands for the parameters of a + callable. + +"typing.TypeVar" declarations can define *bounds* and *constraints* +with a colon (":") followed by an expression. A single expression +after the colon indicates a bound (e.g. "T: int"). Semantically, this +means that the "typing.TypeVar" can only represent types that are a +subtype of this bound. A parenthesized tuple of expressions after the +colon indicates a set of constraints (e.g. "T: (str, bytes)"). Each +member of the tuple should be a type (again, this is not enforced at +runtime). Constrained type variables can only take on one of the types +in the list of constraints. + +For "typing.TypeVar"s declared using the type parameter list syntax, +the bound and constraints are not evaluated when the generic object is +created, but only when the value is explicitly accessed through the +attributes "__bound__" and "__constraints__". To accomplish this, the +bounds or constraints are evaluated in a separate annotation scope. + +"typing.TypeVarTuple"s and "typing.ParamSpec"s cannot have bounds or +constraints. + +All three flavors of type parameters can also have a *default value*, +which is used when the type parameter is not explicitly provided. This +is added by appending a single equals sign ("=") followed by an +expression. Like the bounds and constraints of type variables, the +default value is not evaluated when the object is created, but only +when the type parameter’s "__default__" attribute is accessed. To this +end, the default value is evaluated in a separate annotation scope. If +no default value is specified for a type parameter, the "__default__" +attribute is set to the special sentinel object "typing.NoDefault". + +The following example indicates the full set of allowed type parameter +declarations: + + def overly_generic[ + SimpleTypeVar, + TypeVarWithDefault = int, + TypeVarWithBound: int, + TypeVarWithConstraints: (str, bytes), + *SimpleTypeVarTuple = (int, float), + **SimpleParamSpec = (str, bytearray), + ]( + a: SimpleTypeVar, + b: TypeVarWithDefault, + c: TypeVarWithBound, + d: Callable[SimpleParamSpec, TypeVarWithConstraints], + *e: SimpleTypeVarTuple, + ): ... + + +Generic functions +----------------- + +Generic functions are declared as follows: + + def func[T](arg: T): ... + +This syntax is equivalent to: + + annotation-def TYPE_PARAMS_OF_func(): + T = typing.TypeVar("T") + def func(arg: T): ... + func.__type_params__ = (T,) + return func + func = TYPE_PARAMS_OF_func() + +Here "annotation-def" indicates an annotation scope, which is not +actually bound to any name at runtime. (One other liberty is taken in +the translation: the syntax does not go through attribute access on +the "typing" module, but creates an instance of "typing.TypeVar" +directly.) + +The annotations of generic functions are evaluated within the +annotation scope used for declaring the type parameters, but the +function’s defaults and decorators are not. + +The following example illustrates the scoping rules for these cases, +as well as for additional flavors of type parameters: + + @decorator + def func[T: int, *Ts, **P](*args: *Ts, arg: Callable[P, T] = some_default): + ... + +Except for the lazy evaluation of the "TypeVar" bound, this is +equivalent to: + + DEFAULT_OF_arg = some_default + + annotation-def TYPE_PARAMS_OF_func(): + + annotation-def BOUND_OF_T(): + return int + # In reality, BOUND_OF_T() is evaluated only on demand. + T = typing.TypeVar("T", bound=BOUND_OF_T()) + + Ts = typing.TypeVarTuple("Ts") + P = typing.ParamSpec("P") + + def func(*args: *Ts, arg: Callable[P, T] = DEFAULT_OF_arg): + ... + + func.__type_params__ = (T, Ts, P) + return func + func = decorator(TYPE_PARAMS_OF_func()) + +The capitalized names like "DEFAULT_OF_arg" are not actually bound at +runtime. + + +Generic classes +--------------- + +Generic classes are declared as follows: + + class Bag[T]: ... + +This syntax is equivalent to: + + annotation-def TYPE_PARAMS_OF_Bag(): + T = typing.TypeVar("T") + class Bag(typing.Generic[T]): + __type_params__ = (T,) + ... + return Bag + Bag = TYPE_PARAMS_OF_Bag() + +Here again "annotation-def" (not a real keyword) indicates an +annotation scope, and the name "TYPE_PARAMS_OF_Bag" is not actually +bound at runtime. + +Generic classes implicitly inherit from "typing.Generic". The base +classes and keyword arguments of generic classes are evaluated within +the type scope for the type parameters, and decorators are evaluated +outside that scope. This is illustrated by this example: + + @decorator + class Bag(Base[T], arg=T): ... + +This is equivalent to: + + annotation-def TYPE_PARAMS_OF_Bag(): + T = typing.TypeVar("T") + class Bag(Base[T], typing.Generic[T], arg=T): + __type_params__ = (T,) + ... + return Bag + Bag = decorator(TYPE_PARAMS_OF_Bag()) + + +Generic type aliases +-------------------- + +The "type" statement can also be used to create a generic type alias: + + type ListOrSet[T] = list[T] | set[T] + +Except for the lazy evaluation of the value, this is equivalent to: + + annotation-def TYPE_PARAMS_OF_ListOrSet(): + T = typing.TypeVar("T") + + annotation-def VALUE_OF_ListOrSet(): + return list[T] | set[T] + # In reality, the value is lazily evaluated + return typing.TypeAliasType("ListOrSet", VALUE_OF_ListOrSet(), type_params=(T,)) + ListOrSet = TYPE_PARAMS_OF_ListOrSet() + +Here, "annotation-def" (not a real keyword) indicates an annotation +scope. The capitalized names like "TYPE_PARAMS_OF_ListOrSet" are not +actually bound at runtime. + + +Annotations +=========== + +Changed in version 3.14: Annotations are now lazily evaluated by +default. + +Variables and function parameters may carry *annotations*, created by +adding a colon after the name, followed by an expression: + + x: annotation = 1 + def f(param: annotation): ... + +Functions may also carry a return annotation following an arrow: + + def f() -> annotation: ... + +Annotations are conventionally used for *type hints*, but this is not +enforced by the language, and in general annotations may contain +arbitrary expressions. The presence of annotations does not change the +runtime semantics of the code, except if some mechanism is used that +introspects and uses the annotations (such as "dataclasses" or +"functools.singledispatch()"). + +By default, annotations are lazily evaluated in an annotation scope. +This means that they are not evaluated when the code containing the +annotation is evaluated. Instead, the interpreter saves information +that can be used to evaluate the annotation later if requested. The +"annotationlib" module provides tools for evaluating annotations. + +If the future statement "from __future__ import annotations" is +present, all annotations are instead stored as strings: + + >>> from __future__ import annotations + >>> def f(param: annotation): ... + >>> f.__annotations__ + {'param': 'annotation'} + +This future statement will be deprecated and removed in a future +version of Python, but not before Python 3.13 reaches its end of life +(see **PEP 749**). When it is used, introspection tools like +"annotationlib.get_annotations()" and "typing.get_type_hints()" are +less likely to be able to resolve annotations at runtime. + +-[ Footnotes ]- + +[1] The exception is propagated to the invocation stack unless there + is a "finally" clause which happens to raise another exception. + That new exception causes the old one to be lost. + +[2] In pattern matching, a sequence is defined as one of the + following: + + * a class that inherits from "collections.abc.Sequence" + + * a Python class that has been registered as + "collections.abc.Sequence" + + * a builtin class that has its (CPython) "Py_TPFLAGS_SEQUENCE" bit + set + + * a class that inherits from any of the above + + The following standard library classes are sequences: + + * "array.array" + + * "collections.deque" + + * "list" + + * "memoryview" + + * "range" + + * "tuple" + + Note: + + Subject values of type "str", "bytes", and "bytearray" do not + match sequence patterns. + +[3] In pattern matching, a mapping is defined as one of the following: + + * a class that inherits from "collections.abc.Mapping" + + * a Python class that has been registered as + "collections.abc.Mapping" + + * a builtin class that has its (CPython) "Py_TPFLAGS_MAPPING" bit + set + + * a class that inherits from any of the above + + The standard library classes "dict" and "types.MappingProxyType" + are mappings. + +[4] A string literal appearing as the first statement in the function + body is transformed into the function’s "__doc__" attribute and + therefore the function’s *docstring*. + +[5] A string literal appearing as the first statement in the class + body is transformed into the namespace’s "__doc__" item and + therefore the class’s *docstring*. +''', + 'context-managers': r'''With Statement Context Managers +******************************* + +A *context manager* is an object that defines the runtime context to +be established when executing a "with" statement. The context manager +handles the entry into, and the exit from, the desired runtime context +for the execution of the block of code. Context managers are normally +invoked using the "with" statement (described in section The with +statement), but can also be used by directly invoking their methods. + +Typical uses of context managers include saving and restoring various +kinds of global state, locking and unlocking resources, closing opened +files, etc. + +For more information on context managers, see Context Manager Types. +The "object" class itself does not provide the context manager +methods. + +object.__enter__(self) + + Enter the runtime context related to this object. The "with" + statement will bind this method’s return value to the target(s) + specified in the "as" clause of the statement, if any. + +object.__exit__(self, exc_type, exc_value, traceback) + + Exit the runtime context related to this object. The parameters + describe the exception that caused the context to be exited. If the + context was exited without an exception, all three arguments will + be "None". + + If an exception is supplied, and the method wishes to suppress the + exception (i.e., prevent it from being propagated), it should + return a true value. Otherwise, the exception will be processed + normally upon exit from this method. + + Note that "__exit__()" methods should not reraise the passed-in + exception; this is the caller’s responsibility. + +See also: + + **PEP 343** - The “with” statement + The specification, background, and examples for the Python "with" + statement. +''', + 'continue': r'''The "continue" statement +************************ + + continue_stmt: "continue" + +"continue" may only occur syntactically nested in a "for" or "while" +loop, but not nested in a function or class definition within that +loop. It continues with the next cycle of the nearest enclosing loop. + +When "continue" passes control out of a "try" statement with a +"finally" clause, that "finally" clause is executed before really +starting the next loop cycle. +''', + 'conversions': r'''Arithmetic conversions +********************** + +When a description of an arithmetic operator below uses the phrase +“the numeric arguments are converted to a common real type”, this +means that the operator implementation for built-in numeric types +works as described in the Numeric Types section of the standard +library documentation. + +Some additional rules apply for certain operators and non-numeric +operands (for example, a string as a left argument to the "%" +operator). Extensions must define their own conversion behavior. +''', + 'customization': r'''Basic customization +******************* + +object.__new__(cls[, ...]) + + Called to create a new instance of class *cls*. "__new__()" is a + static method (special-cased so you need not declare it as such) + that takes the class of which an instance was requested as its + first argument. The remaining arguments are those passed to the + object constructor expression (the call to the class). The return + value of "__new__()" should be the new object instance (usually an + instance of *cls*). + + Typical implementations create a new instance of the class by + invoking the superclass’s "__new__()" method using + "super().__new__(cls[, ...])" with appropriate arguments and then + modifying the newly created instance as necessary before returning + it. + + If "__new__()" is invoked during object construction and it returns + an instance of *cls*, then the new instance’s "__init__()" method + will be invoked like "__init__(self[, ...])", where *self* is the + new instance and the remaining arguments are the same as were + passed to the object constructor. + + If "__new__()" does not return an instance of *cls*, then the new + instance’s "__init__()" method will not be invoked. + + "__new__()" is intended mainly to allow subclasses of immutable + types (like int, str, or tuple) to customize instance creation. It + is also commonly overridden in custom metaclasses in order to + customize class creation. + +object.__init__(self[, ...]) + + Called after the instance has been created (by "__new__()"), but + before it is returned to the caller. The arguments are those + passed to the class constructor expression. If a base class has an + "__init__()" method, the derived class’s "__init__()" method, if + any, must explicitly call it to ensure proper initialization of the + base class part of the instance; for example: + "super().__init__([args...])". + + Because "__new__()" and "__init__()" work together in constructing + objects ("__new__()" to create it, and "__init__()" to customize + it), no non-"None" value may be returned by "__init__()"; doing so + will cause a "TypeError" to be raised at runtime. + +object.__del__(self) + + Called when the instance is about to be destroyed. This is also + called a finalizer or (improperly) a destructor. If a base class + has a "__del__()" method, the derived class’s "__del__()" method, + if any, must explicitly call it to ensure proper deletion of the + base class part of the instance. + + It is possible (though not recommended!) for the "__del__()" method + to postpone destruction of the instance by creating a new reference + to it. This is called object *resurrection*. It is + implementation-dependent whether "__del__()" is called a second + time when a resurrected object is about to be destroyed; the + current *CPython* implementation only calls it once. + + It is not guaranteed that "__del__()" methods are called for + objects that still exist when the interpreter exits. + "weakref.finalize" provides a straightforward way to register a + cleanup function to be called when an object is garbage collected. + + Note: + + "del x" doesn’t directly call "x.__del__()" — the former + decrements the reference count for "x" by one, and the latter is + only called when "x"’s reference count reaches zero. + + **CPython implementation detail:** It is possible for a reference + cycle to prevent the reference count of an object from going to + zero. In this case, the cycle will be later detected and deleted + by the *cyclic garbage collector*. A common cause of reference + cycles is when an exception has been caught in a local variable. + The frame’s locals then reference the exception, which references + its own traceback, which references the locals of all frames caught + in the traceback. + + See also: Documentation for the "gc" module. + + Warning: + + Due to the precarious circumstances under which "__del__()" + methods are invoked, exceptions that occur during their execution + are ignored, and a warning is printed to "sys.stderr" instead. + In particular: + + * "__del__()" can be invoked when arbitrary code is being + executed, including from any arbitrary thread. If "__del__()" + needs to take a lock or invoke any other blocking resource, it + may deadlock as the resource may already be taken by the code + that gets interrupted to execute "__del__()". + + * "__del__()" can be executed during interpreter shutdown. As a + consequence, the global variables it needs to access (including + other modules) may already have been deleted or set to "None". + Python guarantees that globals whose name begins with a single + underscore are deleted from their module before other globals + are deleted; if no other references to such globals exist, this + may help in assuring that imported modules are still available + at the time when the "__del__()" method is called. + +object.__repr__(self) + + Called by the "repr()" built-in function to compute the “official” + string representation of an object. If at all possible, this + should look like a valid Python expression that could be used to + recreate an object with the same value (given an appropriate + environment). If this is not possible, a string of the form + "<...some useful description...>" should be returned. The return + value must be a string object. If a class defines "__repr__()" but + not "__str__()", then "__repr__()" is also used when an “informal” + string representation of instances of that class is required. + + This is typically used for debugging, so it is important that the + representation is information-rich and unambiguous. A default + implementation is provided by the "object" class itself. + +object.__str__(self) + + Called by "str(object)", the default "__format__()" implementation, + and the built-in function "print()", to compute the “informal” or + nicely printable string representation of an object. The return + value must be a str object. + + This method differs from "object.__repr__()" in that there is no + expectation that "__str__()" return a valid Python expression: a + more convenient or concise representation can be used. + + The default implementation defined by the built-in type "object" + calls "object.__repr__()". + +object.__bytes__(self) + + Called by bytes to compute a byte-string representation of an + object. This should return a "bytes" object. The "object" class + itself does not provide this method. + +object.__format__(self, format_spec) + + Called by the "format()" built-in function, and by extension, + evaluation of formatted string literals and the "str.format()" + method, to produce a “formatted” string representation of an + object. The *format_spec* argument is a string that contains a + description of the formatting options desired. The interpretation + of the *format_spec* argument is up to the type implementing + "__format__()", however most classes will either delegate + formatting to one of the built-in types, or use a similar + formatting option syntax. + + See Format specification mini-language for a description of the + standard formatting syntax. + + The return value must be a string object. + + The default implementation by the "object" class should be given an + empty *format_spec* string. It delegates to "__str__()". + + Changed in version 3.4: The __format__ method of "object" itself + raises a "TypeError" if passed any non-empty string. + + Changed in version 3.7: "object.__format__(x, '')" is now + equivalent to "str(x)" rather than "format(str(x), '')". + +object.__lt__(self, other) +object.__le__(self, other) +object.__eq__(self, other) +object.__ne__(self, other) +object.__gt__(self, other) +object.__ge__(self, other) + + These are the so-called “rich comparison” methods. The + correspondence between operator symbols and method names is as + follows: "xy" calls + "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)". + + A rich comparison method may return the singleton "NotImplemented" + if it does not implement the operation for a given pair of + arguments. By convention, "False" and "True" are returned for a + successful comparison. However, these methods can return any value, + so if the comparison operator is used in a Boolean context (e.g., + in the condition of an "if" statement), Python will call "bool()" + on the value to determine if the result is true or false. + + By default, "object" implements "__eq__()" by using "is", returning + "NotImplemented" in the case of a false comparison: "True if x is y + else NotImplemented". For "__ne__()", by default it delegates to + "__eq__()" and inverts the result unless it is "NotImplemented". + There are no other implied relationships among the comparison + operators or default implementations; for example, the truth of + "(x.__hash__". + + If a class that does not override "__eq__()" wishes to suppress + hash support, it should include "__hash__ = None" in the class + definition. A class which defines its own "__hash__()" that + explicitly raises a "TypeError" would be incorrectly identified as + hashable by an "isinstance(obj, collections.abc.Hashable)" call. + + Note: + + By default, the "__hash__()" values of str and bytes objects are + “salted” with an unpredictable random value. Although they + remain constant within an individual Python process, they are not + predictable between repeated invocations of Python.This is + intended to provide protection against a denial-of-service caused + by carefully chosen inputs that exploit the worst case + performance of a dict insertion, *O*(*n*^2) complexity. See + https://ocert.org/advisories/ocert-2011-003.html for + details.Changing hash values affects the iteration order of sets. + Python has never made guarantees about this ordering (and it + typically varies between 32-bit and 64-bit builds).See also + "PYTHONHASHSEED". + + Changed in version 3.3: Hash randomization is enabled by default. + +object.__bool__(self) + + Called to implement truth value testing and the built-in operation + "bool()"; should return "False" or "True". When this method is not + defined, "__len__()" is called, if it is defined, and the object is + considered true if its result is nonzero. If a class defines + neither "__len__()" nor "__bool__()" (which is true of the "object" + class itself), all its instances are considered true. +''', + 'debugger': r'''"pdb" — The Python Debugger +*************************** + +**Source code:** Lib/pdb.py + +====================================================================== + +The module "pdb" defines an interactive source code debugger for +Python programs. It supports setting (conditional) breakpoints and +single stepping at the source line level, inspection of stack frames, +source code listing, and evaluation of arbitrary Python code in the +context of any stack frame. It also supports post-mortem debugging +and can be called under program control. + +The debugger is extensible – it is actually defined as the class +"Pdb". This is currently undocumented but easily understood by reading +the source. The extension interface uses the modules "bdb" and "cmd". + +See also: + + Module "faulthandler" + Used to dump Python tracebacks explicitly, on a fault, after a + timeout, or on a user signal. + + Module "traceback" + Standard interface to extract, format and print stack traces of + Python programs. + +The typical usage to break into the debugger is to insert: + + import pdb; pdb.set_trace() + +Or: + + breakpoint() + +at the location you want to break into the debugger, and then run the +program. You can then step through the code following this statement, +and continue running without the debugger using the "continue" +command. + +Changed in version 3.7: The built-in "breakpoint()", when called with +defaults, can be used instead of "import pdb; pdb.set_trace()". + + def double(x): + breakpoint() + return x * 2 + val = 3 + print(f"{val} * 2 is {double(val)}") + +The debugger’s prompt is "(Pdb)", which is the indicator that you are +in debug mode: + + > ...(2)double() + -> breakpoint() + (Pdb) p x + 3 + (Pdb) continue + 3 * 2 is 6 + +Changed in version 3.3: Tab-completion via the "readline" module is +available for commands and command arguments, e.g. the current global +and local names are offered as arguments of the "p" command. + + +Command-line interface +====================== + +You can also invoke "pdb" from the command line to debug other +scripts. For example: + + python -m pdb [-c command] (-m module | -p pid | pyfile) [args ...] + +When invoked as a module, pdb will automatically enter post-mortem +debugging if the program being debugged exits abnormally. After post- +mortem debugging (or after normal exit of the program), pdb will +restart the program. Automatic restarting preserves pdb’s state (such +as breakpoints) and in most cases is more useful than quitting the +debugger upon program’s exit. + +-c, --command + + To execute commands as if given in a ".pdbrc" file; see Debugger + commands. + + Changed in version 3.2: Added the "-c" option. + +-m + + To execute modules similar to the way "python -m" does. As with a + script, the debugger will pause execution just before the first + line of the module. + + Changed in version 3.7: Added the "-m" option. + +-p, --pid + + Attach to the process with the specified PID. + + Added in version 3.14. + +To attach to a running Python process for remote debugging, use the +"-p" or "--pid" option with the target process’s PID: + + python -m pdb -p 1234 + +Note: + + Attaching to a process that is blocked in a system call or waiting + for I/O will only work once the next bytecode instruction is + executed or when the process receives a signal. + +Typical usage to execute a statement under control of the debugger is: + + >>> import pdb + >>> def f(x): + ... print(1 / x) + >>> pdb.run("f(2)") + > (1)() + (Pdb) continue + 0.5 + >>> + +The typical usage to inspect a crashed program is: + + >>> import pdb + >>> def f(x): + ... print(1 / x) + ... + >>> f(0) + Traceback (most recent call last): + File "", line 1, in + File "", line 2, in f + ZeroDivisionError: division by zero + >>> pdb.pm() + > (2)f() + (Pdb) p x + 0 + (Pdb) + +Changed in version 3.13: The implementation of **PEP 667** means that +name assignments made via "pdb" will immediately affect the active +scope, even when running inside an *optimized scope*. + +The module defines the following functions; each enters the debugger +in a slightly different way: + +pdb.run(statement, globals=None, locals=None) + + Execute the *statement* (given as a string or a code object) under + debugger control. The debugger prompt appears before any code is + executed; you can set breakpoints and type "continue", or you can + step through the statement using "step" or "next" (all these + commands are explained below). The optional *globals* and *locals* + arguments specify the environment in which the code is executed; by + default the dictionary of the module "__main__" is used. (See the + explanation of the built-in "exec()" or "eval()" functions.) + +pdb.runeval(expression, globals=None, locals=None) + + Evaluate the *expression* (given as a string or a code object) + under debugger control. When "runeval()" returns, it returns the + value of the *expression*. Otherwise this function is similar to + "run()". + +pdb.runcall(function, *args, **kwds) + + Call the *function* (a function or method object, not a string) + with the given arguments. When "runcall()" returns, it returns + whatever the function call returned. The debugger prompt appears + as soon as the function is entered. + +pdb.set_trace(*, header=None, commands=None) + + Enter the debugger at the calling stack frame. This is useful to + hard-code a breakpoint at a given point in a program, even if the + code is not otherwise being debugged (e.g. when an assertion + fails). If given, *header* is printed to the console just before + debugging begins. The *commands* argument, if given, is a list of + commands to execute when the debugger starts. + + Changed in version 3.7: The keyword-only argument *header*. + + Changed in version 3.13: "set_trace()" will enter the debugger + immediately, rather than on the next line of code to be executed. + + Added in version 3.14: The *commands* argument. + +awaitable pdb.set_trace_async(*, header=None, commands=None) + + async version of "set_trace()". This function should be used inside + an async function with "await". + + async def f(): + await pdb.set_trace_async() + + "await" statements are supported if the debugger is invoked by this + function. + + Added in version 3.14. + +pdb.post_mortem(t=None) + + Enter post-mortem debugging of the given exception or traceback + object. If no value is given, it uses the exception that is + currently being handled, or raises "ValueError" if there isn’t one. + + Changed in version 3.13: Support for exception objects was added. + +pdb.pm() + + Enter post-mortem debugging of the exception found in + "sys.last_exc". + +pdb.set_default_backend(backend) + + There are two supported backends for pdb: "'settrace'" and + "'monitoring'". See "bdb.Bdb" for details. The user can set the + default backend to use if none is specified when instantiating + "Pdb". If no backend is specified, the default is "'settrace'". + + Note: + + "breakpoint()" and "set_trace()" will not be affected by this + function. They always use "'monitoring'" backend. + + Added in version 3.14. + +pdb.get_default_backend() + + Returns the default backend for pdb. + + Added in version 3.14. + +The "run*" functions and "set_trace()" are aliases for instantiating +the "Pdb" class and calling the method of the same name. If you want +to access further features, you have to do this yourself: + +class pdb.Pdb(completekey='tab', stdin=None, stdout=None, skip=None, nosigint=False, readrc=True, mode=None, backend=None, colorize=False) + + "Pdb" is the debugger class. + + The *completekey*, *stdin* and *stdout* arguments are passed to the + underlying "cmd.Cmd" class; see the description there. + + The *skip* argument, if given, must be an iterable of glob-style + module name patterns. The debugger will not step into frames that + originate in a module that matches one of these patterns. [1] + + By default, Pdb sets a handler for the SIGINT signal (which is sent + when the user presses "Ctrl"-"C" on the console) when you give a + "continue" command. This allows you to break into the debugger + again by pressing "Ctrl"-"C". If you want Pdb not to touch the + SIGINT handler, set *nosigint* to true. + + The *readrc* argument defaults to true and controls whether Pdb + will load .pdbrc files from the filesystem. + + The *mode* argument specifies how the debugger was invoked. It + impacts the workings of some debugger commands. Valid values are + "'inline'" (used by the breakpoint() builtin), "'cli'" (used by the + command line invocation) or "None" (for backwards compatible + behaviour, as before the *mode* argument was added). + + The *backend* argument specifies the backend to use for the + debugger. If "None" is passed, the default backend will be used. + See "set_default_backend()". Otherwise the supported backends are + "'settrace'" and "'monitoring'". + + The *colorize* argument, if set to "True", will enable colorized + output in the debugger, if color is supported. This will highlight + source code displayed in pdb. + + Example call to enable tracing with *skip*: + + import pdb; pdb.Pdb(skip=['django.*']).set_trace() + + Raises an auditing event "pdb.Pdb" with no arguments. + + Changed in version 3.1: Added the *skip* parameter. + + Changed in version 3.2: Added the *nosigint* parameter. Previously, + a SIGINT handler was never set by Pdb. + + Changed in version 3.6: The *readrc* argument. + + Added in version 3.14: Added the *mode* argument. + + Added in version 3.14: Added the *backend* argument. + + Added in version 3.14: Added the *colorize* argument. + + Changed in version 3.14: Inline breakpoints like "breakpoint()" or + "pdb.set_trace()" will always stop the program at calling frame, + ignoring the *skip* pattern (if any). + + run(statement, globals=None, locals=None) + runeval(expression, globals=None, locals=None) + runcall(function, *args, **kwds) + set_trace() + + See the documentation for the functions explained above. + + +Debugger commands +================= + +The commands recognized by the debugger are listed below. Most +commands can be abbreviated to one or two letters as indicated; e.g. +"h(elp)" means that either "h" or "help" can be used to enter the help +command (but not "he" or "hel", nor "H" or "Help" or "HELP"). +Arguments to commands must be separated by whitespace (spaces or +tabs). Optional arguments are enclosed in square brackets ("[]") in +the command syntax; the square brackets must not be typed. +Alternatives in the command syntax are separated by a vertical bar +("|"). + +Entering a blank line repeats the last command entered. Exception: if +the last command was a "list" command, the next 11 lines are listed. + +Commands that the debugger doesn’t recognize are assumed to be Python +statements and are executed in the context of the program being +debugged. Python statements can also be prefixed with an exclamation +point ("!"). This is a powerful way to inspect the program being +debugged; it is even possible to change a variable or call a function. +When an exception occurs in such a statement, the exception name is +printed but the debugger’s state is not changed. + +Changed in version 3.13: Expressions/Statements whose prefix is a pdb +command are now correctly identified and executed. + +The debugger supports aliases. Aliases can have parameters which +allows one a certain level of adaptability to the context under +examination. + +Multiple commands may be entered on a single line, separated by ";;". +(A single ";" is not used as it is the separator for multiple commands +in a line that is passed to the Python parser.) No intelligence is +applied to separating the commands; the input is split at the first +";;" pair, even if it is in the middle of a quoted string. A +workaround for strings with double semicolons is to use implicit +string concatenation "';'';'" or "";"";"". + +To set a temporary global variable, use a *convenience variable*. A +*convenience variable* is a variable whose name starts with "$". For +example, "$foo = 1" sets a global variable "$foo" which you can use in +the debugger session. The *convenience variables* are cleared when +the program resumes execution so it’s less likely to interfere with +your program compared to using normal variables like "foo = 1". + +There are four preset *convenience variables*: + +* "$_frame": the current frame you are debugging + +* "$_retval": the return value if the frame is returning + +* "$_exception": the exception if the frame is raising an exception + +* "$_asynctask": the asyncio task if pdb stops in an async function + +Added in version 3.12: Added the *convenience variable* feature. + +Added in version 3.14: Added the "$_asynctask" convenience variable. + +If a file ".pdbrc" exists in the user’s home directory or in the +current directory, it is read with "'utf-8'" encoding and executed as +if it had been typed at the debugger prompt, with the exception that +empty lines and lines starting with "#" are ignored. This is +particularly useful for aliases. If both files exist, the one in the +home directory is read first and aliases defined there can be +overridden by the local file. + +Changed in version 3.2: ".pdbrc" can now contain commands that +continue debugging, such as "continue" or "next". Previously, these +commands had no effect. + +Changed in version 3.11: ".pdbrc" is now read with "'utf-8'" encoding. +Previously, it was read with the system locale encoding. + +h(elp) [command] + + Without argument, print the list of available commands. With a + *command* as argument, print help about that command. "help pdb" + displays the full documentation (the docstring of the "pdb" + module). Since the *command* argument must be an identifier, "help + exec" must be entered to get help on the "!" command. + +w(here) [count] + + Print a stack trace, with the most recent frame at the bottom. if + *count* is 0, print the current frame entry. If *count* is + negative, print the least recent - *count* frames. If *count* is + positive, print the most recent *count* frames. An arrow (">") + indicates the current frame, which determines the context of most + commands. + + Changed in version 3.14: *count* argument is added. + +d(own) [count] + + Move the current frame *count* (default one) levels down in the + stack trace (to a newer frame). + +u(p) [count] + + Move the current frame *count* (default one) levels up in the stack + trace (to an older frame). + +b(reak) [([filename:]lineno | function) [, condition]] + + With a *lineno* argument, set a break at line *lineno* in the + current file. The line number may be prefixed with a *filename* and + a colon, to specify a breakpoint in another file (possibly one that + hasn’t been loaded yet). The file is searched on "sys.path". + Acceptable forms of *filename* are "/abspath/to/file.py", + "relpath/file.py", "module" and "package.module". + + With a *function* argument, set a break at the first executable + statement within that function. *function* can be any expression + that evaluates to a function in the current namespace. + + If a second argument is present, it is an expression which must + evaluate to true before the breakpoint is honored. + + Without argument, list all breaks, including for each breakpoint, + the number of times that breakpoint has been hit, the current + ignore count, and the associated condition if any. + + Each breakpoint is assigned a number to which all the other + breakpoint commands refer. + +tbreak [([filename:]lineno | function) [, condition]] + + Temporary breakpoint, which is removed automatically when it is + first hit. The arguments are the same as for "break". + +cl(ear) [filename:lineno | bpnumber ...] + + With a *filename:lineno* argument, clear all the breakpoints at + this line. With a space separated list of breakpoint numbers, clear + those breakpoints. Without argument, clear all breaks (but first + ask confirmation). + +disable bpnumber [bpnumber ...] + + Disable the breakpoints given as a space separated list of + breakpoint numbers. Disabling a breakpoint means it cannot cause + the program to stop execution, but unlike clearing a breakpoint, it + remains in the list of breakpoints and can be (re-)enabled. + +enable bpnumber [bpnumber ...] + + Enable the breakpoints specified. + +ignore bpnumber [count] + + Set the ignore count for the given breakpoint number. If *count* + is omitted, the ignore count is set to 0. A breakpoint becomes + active when the ignore count is zero. When non-zero, the *count* + is decremented each time the breakpoint is reached and the + breakpoint is not disabled and any associated condition evaluates + to true. + +condition bpnumber [condition] + + Set a new *condition* for the breakpoint, an expression which must + evaluate to true before the breakpoint is honored. If *condition* + is absent, any existing condition is removed; i.e., the breakpoint + is made unconditional. + +commands [bpnumber] + + Specify a list of commands for breakpoint number *bpnumber*. The + commands themselves appear on the following lines. Type a line + containing just "end" to terminate the commands. An example: + + (Pdb) commands 1 + (com) p some_variable + (com) end + (Pdb) + + To remove all commands from a breakpoint, type "commands" and + follow it immediately with "end"; that is, give no commands. + + With no *bpnumber* argument, "commands" refers to the last + breakpoint set. + + You can use breakpoint commands to start your program up again. + Simply use the "continue" command, or "step", or any other command + that resumes execution. + + Specifying any command resuming execution (currently "continue", + "step", "next", "return", "until", "jump", "quit" and their + abbreviations) terminates the command list (as if that command was + immediately followed by end). This is because any time you resume + execution (even with a simple next or step), you may encounter + another breakpoint—which could have its own command list, leading + to ambiguities about which list to execute. + + If the list of commands contains the "silent" command, or a command + that resumes execution, then the breakpoint message containing + information about the frame is not displayed. + + Changed in version 3.14: Frame information will not be displayed if + a command that resumes execution is present in the command list. + +s(tep) + + Execute the current line, stop at the first possible occasion + (either in a function that is called or on the next line in the + current function). + +n(ext) + + Continue execution until the next line in the current function is + reached or it returns. (The difference between "next" and "step" + is that "step" stops inside a called function, while "next" + executes called functions at (nearly) full speed, only stopping at + the next line in the current function.) + +unt(il) [lineno] + + Without argument, continue execution until the line with a number + greater than the current one is reached. + + With *lineno*, continue execution until a line with a number + greater or equal to *lineno* is reached. In both cases, also stop + when the current frame returns. + + Changed in version 3.2: Allow giving an explicit line number. + +r(eturn) + + Continue execution until the current function returns. + +c(ont(inue)) + + Continue execution, only stop when a breakpoint is encountered. + +j(ump) lineno + + Set the next line that will be executed. Only available in the + bottom-most frame. This lets you jump back and execute code again, + or jump forward to skip code that you don’t want to run. + + It should be noted that not all jumps are allowed – for instance it + is not possible to jump into the middle of a "for" loop or out of a + "finally" clause. + +l(ist) [first[, last]] + + List source code for the current file. Without arguments, list 11 + lines around the current line or continue the previous listing. + With "." as argument, list 11 lines around the current line. With + one argument, list 11 lines around at that line. With two + arguments, list the given range; if the second argument is less + than the first, it is interpreted as a count. + + The current line in the current frame is indicated by "->". If an + exception is being debugged, the line where the exception was + originally raised or propagated is indicated by ">>", if it differs + from the current line. + + Changed in version 3.2: Added the ">>" marker. + +ll | longlist + + List all source code for the current function or frame. + Interesting lines are marked as for "list". + + Added in version 3.2. + +a(rgs) + + Print the arguments of the current function and their current + values. + +p expression + + Evaluate *expression* in the current context and print its value. + + Note: + + "print()" can also be used, but is not a debugger command — this + executes the Python "print()" function. + +pp expression + + Like the "p" command, except the value of *expression* is pretty- + printed using the "pprint" module. + +whatis expression + + Print the type of *expression*. + +source expression + + Try to get source code of *expression* and display it. + + Added in version 3.2. + +display [expression] + + Display the value of *expression* if it changed, each time + execution stops in the current frame. + + Without *expression*, list all display expressions for the current + frame. + + Note: + + Display evaluates *expression* and compares to the result of the + previous evaluation of *expression*, so when the result is + mutable, display may not be able to pick up the changes. + + Example: + + lst = [] + breakpoint() + pass + lst.append(1) + print(lst) + + Display won’t realize "lst" has been changed because the result of + evaluation is modified in place by "lst.append(1)" before being + compared: + + > example.py(3)() + -> pass + (Pdb) display lst + display lst: [] + (Pdb) n + > example.py(4)() + -> lst.append(1) + (Pdb) n + > example.py(5)() + -> print(lst) + (Pdb) + + You can do some tricks with copy mechanism to make it work: + + > example.py(3)() + -> pass + (Pdb) display lst[:] + display lst[:]: [] + (Pdb) n + > example.py(4)() + -> lst.append(1) + (Pdb) n + > example.py(5)() + -> print(lst) + display lst[:]: [1] [old: []] + (Pdb) + + Added in version 3.2. + +undisplay [expression] + + Do not display *expression* anymore in the current frame. Without + *expression*, clear all display expressions for the current frame. + + Added in version 3.2. + +interact + + Start an interactive interpreter (using the "code" module) in a new + global namespace initialised from the local and global namespaces + for the current scope. Use "exit()" or "quit()" to exit the + interpreter and return to the debugger. + + Note: + + As "interact" creates a new dedicated namespace for code + execution, assignments to variables will not affect the original + namespaces. However, modifications to any referenced mutable + objects will be reflected in the original namespaces as usual. + + Added in version 3.2. + + Changed in version 3.13: "exit()" and "quit()" can be used to exit + the "interact" command. + + Changed in version 3.13: "interact" directs its output to the + debugger’s output channel rather than "sys.stderr". + +alias [name [command]] + + Create an alias called *name* that executes *command*. The + *command* must *not* be enclosed in quotes. Replaceable parameters + can be indicated by "%1", "%2", … and "%9", while "%*" is replaced + by all the parameters. If *command* is omitted, the current alias + for *name* is shown. If no arguments are given, all aliases are + listed. + + Aliases may be nested and can contain anything that can be legally + typed at the pdb prompt. Note that internal pdb commands *can* be + overridden by aliases. Such a command is then hidden until the + alias is removed. Aliasing is recursively applied to the first + word of the command line; all other words in the line are left + alone. + + As an example, here are two useful aliases (especially when placed + in the ".pdbrc" file): + + # Print instance variables (usage "pi classInst") + alias pi for k in %1.__dict__.keys(): print(f"%1.{k} = {%1.__dict__[k]}") + # Print instance variables in self + alias ps pi self + +unalias name + + Delete the specified alias *name*. + +! statement + + Execute the (one-line) *statement* in the context of the current + stack frame. The exclamation point can be omitted unless the first + word of the statement resembles a debugger command, e.g.: + + (Pdb) ! n=42 + (Pdb) + + To set a global variable, you can prefix the assignment command + with a "global" statement on the same line, e.g.: + + (Pdb) global list_options; list_options = ['-l'] + (Pdb) + +run [args ...] +restart [args ...] + + Restart the debugged Python program. If *args* is supplied, it is + split with "shlex" and the result is used as the new "sys.argv". + History, breakpoints, actions and debugger options are preserved. + "restart" is an alias for "run". + + Changed in version 3.14: "run" and "restart" commands are disabled + when the debugger is invoked in "'inline'" mode. + +q(uit) + + Quit from the debugger. The program being executed is aborted. An + end-of-file input is equivalent to "quit". + + A confirmation prompt will be shown if the debugger is invoked in + "'inline'" mode. Either "y", "Y", "" or "EOF" will confirm + the quit. + + Changed in version 3.14: A confirmation prompt will be shown if the + debugger is invoked in "'inline'" mode. After the confirmation, the + debugger will call "sys.exit()" immediately, instead of raising + "bdb.BdbQuit" in the next trace event. + +debug code + + Enter a recursive debugger that steps through *code* (which is an + arbitrary expression or statement to be executed in the current + environment). + +retval + + Print the return value for the last return of the current function. + +exceptions [excnumber] + + List or jump between chained exceptions. + + When using "pdb.pm()" or "Pdb.post_mortem(...)" with a chained + exception instead of a traceback, it allows the user to move + between the chained exceptions using "exceptions" command to list + exceptions, and "exceptions " to switch to that exception. + + Example: + + def out(): + try: + middle() + except Exception as e: + raise ValueError("reraise middle() error") from e + + def middle(): + try: + return inner(0) + except Exception as e: + raise ValueError("Middle fail") + + def inner(x): + 1 / x + + out() + + calling "pdb.pm()" will allow to move between exceptions: + + > example.py(5)out() + -> raise ValueError("reraise middle() error") from e + + (Pdb) exceptions + 0 ZeroDivisionError('division by zero') + 1 ValueError('Middle fail') + > 2 ValueError('reraise middle() error') + + (Pdb) exceptions 0 + > example.py(16)inner() + -> 1 / x + + (Pdb) up + > example.py(10)middle() + -> return inner(0) + + Added in version 3.13. + +-[ Footnotes ]- + +[1] Whether a frame is considered to originate in a certain module is + determined by the "__name__" in the frame globals. +''', + 'del': r'''The "del" statement +******************* + + del_stmt: "del" target_list + +Deletion is recursively defined very similar to the way assignment is +defined. Rather than spelling it out in full details, here are some +hints. + +Deletion of a target list recursively deletes each target, from left +to right. + +Deletion of a name removes the binding of that name from the local or +global namespace, depending on whether the name occurs in a "global" +statement in the same code block. Trying to delete an unbound name +raises a "NameError" exception. + +Deletion of attribute references and subscriptions is passed to the +primary object involved; deletion of a slicing is in general +equivalent to assignment of an empty slice of the right type (but even +this is determined by the sliced object). + +Changed in version 3.2: Previously it was illegal to delete a name +from the local namespace if it occurs as a free variable in a nested +block. +''', + 'dict': r'''Dictionary displays +******************* + +A dictionary display is a possibly empty series of dict items +(key/value pairs) enclosed in curly braces: + + dict_display: "{" [dict_item_list | dict_comprehension] "}" + dict_item_list: dict_item ("," dict_item)* [","] + dict_item: expression ":" expression | "**" or_expr + dict_comprehension: expression ":" expression comp_for + +A dictionary display yields a new dictionary object. + +If a comma-separated sequence of dict items is given, they are +evaluated from left to right to define the entries of the dictionary: +each key object is used as a key into the dictionary to store the +corresponding value. This means that you can specify the same key +multiple times in the dict item list, and the final dictionary’s value +for that key will be the last one given. + +A double asterisk "**" denotes *dictionary unpacking*. Its operand +must be a *mapping*. Each mapping item is added to the new +dictionary. Later values replace values already set by earlier dict +items and earlier dictionary unpackings. + +Added in version 3.5: Unpacking into dictionary displays, originally +proposed by **PEP 448**. + +A dict comprehension, in contrast to list and set comprehensions, +needs two expressions separated with a colon followed by the usual +“for” and “if” clauses. When the comprehension is run, the resulting +key and value elements are inserted in the new dictionary in the order +they are produced. + +Restrictions on the types of the key values are listed earlier in +section The standard type hierarchy. (To summarize, the key type +should be *hashable*, which excludes all mutable objects.) Clashes +between duplicate keys are not detected; the last value (textually +rightmost in the display) stored for a given key value prevails. + +Changed in version 3.8: Prior to Python 3.8, in dict comprehensions, +the evaluation order of key and value was not well-defined. In +CPython, the value was evaluated before the key. Starting with 3.8, +the key is evaluated before the value, as proposed by **PEP 572**. +''', + 'dynamic-features': r'''Interaction with dynamic features +********************************* + +Name resolution of free variables occurs at runtime, not at compile +time. This means that the following code will print 42: + + i = 10 + def f(): + print(i) + i = 42 + f() + +The "eval()" and "exec()" functions do not have access to the full +environment for resolving names. Names may be resolved in the local +and global namespaces of the caller. Free variables are not resolved +in the nearest enclosing namespace, but in the global namespace. [1] +The "exec()" and "eval()" functions have optional arguments to +override the global and local namespace. If only one namespace is +specified, it is used for both. +''', + 'else': r'''The "if" statement +****************** + +The "if" statement is used for conditional execution: + + if_stmt: "if" assignment_expression ":" suite + ("elif" assignment_expression ":" suite)* + ["else" ":" suite] + +It selects exactly one of the suites by evaluating the expressions one +by one until one is found to be true (see section Boolean operations +for the definition of true and false); then that suite is executed +(and no other part of the "if" statement is executed or evaluated). +If all expressions are false, the suite of the "else" clause, if +present, is executed. +''', + 'exceptions': r'''Exceptions +********** + +Exceptions are a means of breaking out of the normal flow of control +of a code block in order to handle errors or other exceptional +conditions. An exception is *raised* at the point where the error is +detected; it may be *handled* by the surrounding code block or by any +code block that directly or indirectly invoked the code block where +the error occurred. + +The Python interpreter raises an exception when it detects a run-time +error (such as division by zero). A Python program can also +explicitly raise an exception with the "raise" statement. Exception +handlers are specified with the "try" … "except" statement. The +"finally" clause of such a statement can be used to specify cleanup +code which does not handle the exception, but is executed whether an +exception occurred or not in the preceding code. + +Python uses the “termination” model of error handling: an exception +handler can find out what happened and continue execution at an outer +level, but it cannot repair the cause of the error and retry the +failing operation (except by re-entering the offending piece of code +from the top). + +When an exception is not handled at all, the interpreter terminates +execution of the program, or returns to its interactive main loop. In +either case, it prints a stack traceback, except when the exception is +"SystemExit". + +Exceptions are identified by class instances. The "except" clause is +selected depending on the class of the instance: it must reference the +class of the instance or a *non-virtual base class* thereof. The +instance can be received by the handler and can carry additional +information about the exceptional condition. + +Note: + + Exception messages are not part of the Python API. Their contents + may change from one version of Python to the next without warning + and should not be relied on by code which will run under multiple + versions of the interpreter. + +See also the description of the "try" statement in section The try +statement and "raise" statement in section The raise statement. +''', + 'execmodel': r'''Execution model +*************** + + +Structure of a program +====================== + +A Python program is constructed from code blocks. A *block* is a piece +of Python program text that is executed as a unit. The following are +blocks: a module, a function body, and a class definition. Each +command typed interactively is a block. A script file (a file given +as standard input to the interpreter or specified as a command line +argument to the interpreter) is a code block. A script command (a +command specified on the interpreter command line with the "-c" +option) is a code block. A module run as a top level script (as module +"__main__") from the command line using a "-m" argument is also a code +block. The string argument passed to the built-in functions "eval()" +and "exec()" is a code block. + +A code block is executed in an *execution frame*. A frame contains +some administrative information (used for debugging) and determines +where and how execution continues after the code block’s execution has +completed. + + +Naming and binding +================== + + +Binding of names +---------------- + +*Names* refer to objects. Names are introduced by name binding +operations. + +The following constructs bind names: + +* formal parameters to functions, + +* class definitions, + +* function definitions, + +* assignment expressions, + +* targets that are identifiers if occurring in an assignment: + + * "for" loop header, + + * after "as" in a "with" statement, "except" clause, "except*" + clause, or in the as-pattern in structural pattern matching, + + * in a capture pattern in structural pattern matching + +* "import" statements. + +* "type" statements. + +* type parameter lists. + +The "import" statement of the form "from ... import *" binds all names +defined in the imported module, except those beginning with an +underscore. This form may only be used at the module level. + +A target occurring in a "del" statement is also considered bound for +this purpose (though the actual semantics are to unbind the name). + +Each assignment or import statement occurs within a block defined by a +class or function definition or at the module level (the top-level +code block). + +If a name is bound in a block, it is a local variable of that block, +unless declared as "nonlocal" or "global". If a name is bound at the +module level, it is a global variable. (The variables of the module +code block are local and global.) If a variable is used in a code +block but not defined there, it is a *free variable*. + +Each occurrence of a name in the program text refers to the *binding* +of that name established by the following name resolution rules. + + +Resolution of names +------------------- + +A *scope* defines the visibility of a name within a block. If a local +variable is defined in a block, its scope includes that block. If the +definition occurs in a function block, the scope extends to any blocks +contained within the defining one, unless a contained block introduces +a different binding for the name. + +When a name is used in a code block, it is resolved using the nearest +enclosing scope. The set of all such scopes visible to a code block +is called the block’s *environment*. + +When a name is not found at all, a "NameError" exception is raised. If +the current scope is a function scope, and the name refers to a local +variable that has not yet been bound to a value at the point where the +name is used, an "UnboundLocalError" exception is raised. +"UnboundLocalError" is a subclass of "NameError". + +If a name binding operation occurs anywhere within a code block, all +uses of the name within the block are treated as references to the +current block. This can lead to errors when a name is used within a +block before it is bound. This rule is subtle. Python lacks +declarations and allows name binding operations to occur anywhere +within a code block. The local variables of a code block can be +determined by scanning the entire text of the block for name binding +operations. See the FAQ entry on UnboundLocalError for examples. + +If the "global" statement occurs within a block, all uses of the names +specified in the statement refer to the bindings of those names in the +top-level namespace. Names are resolved in the top-level namespace by +searching the global namespace, i.e. the namespace of the module +containing the code block, and the builtins namespace, the namespace +of the module "builtins". The global namespace is searched first. If +the names are not found there, the builtins namespace is searched +next. If the names are also not found in the builtins namespace, new +variables are created in the global namespace. The global statement +must precede all uses of the listed names. + +The "global" statement has the same scope as a name binding operation +in the same block. If the nearest enclosing scope for a free variable +contains a global statement, the free variable is treated as a global. + +The "nonlocal" statement causes corresponding names to refer to +previously bound variables in the nearest enclosing function scope. +"SyntaxError" is raised at compile time if the given name does not +exist in any enclosing function scope. Type parameters cannot be +rebound with the "nonlocal" statement. + +The namespace for a module is automatically created the first time a +module is imported. The main module for a script is always called +"__main__". + +Class definition blocks and arguments to "exec()" and "eval()" are +special in the context of name resolution. A class definition is an +executable statement that may use and define names. These references +follow the normal rules for name resolution with an exception that +unbound local variables are looked up in the global namespace. The +namespace of the class definition becomes the attribute dictionary of +the class. The scope of names defined in a class block is limited to +the class block; it does not extend to the code blocks of methods. +This includes comprehensions and generator expressions, but it does +not include annotation scopes, which have access to their enclosing +class scopes. This means that the following will fail: + + class A: + a = 42 + b = list(a + i for i in range(10)) + +However, the following will succeed: + + class A: + type Alias = Nested + class Nested: pass + + print(A.Alias.__value__) # + + +Annotation scopes +----------------- + +*Annotations*, type parameter lists and "type" statements introduce +*annotation scopes*, which behave mostly like function scopes, but +with some exceptions discussed below. + +Annotation scopes are used in the following contexts: + +* *Function annotations*. + +* *Variable annotations*. + +* Type parameter lists for generic type aliases. + +* Type parameter lists for generic functions. A generic function’s + annotations are executed within the annotation scope, but its + defaults and decorators are not. + +* Type parameter lists for generic classes. A generic class’s base + classes and keyword arguments are executed within the annotation + scope, but its decorators are not. + +* The bounds, constraints, and default values for type parameters + (lazily evaluated). + +* The value of type aliases (lazily evaluated). + +Annotation scopes differ from function scopes in the following ways: + +* Annotation scopes have access to their enclosing class namespace. If + an annotation scope is immediately within a class scope, or within + another annotation scope that is immediately within a class scope, + the code in the annotation scope can use names defined in the class + scope as if it were executed directly within the class body. This + contrasts with regular functions defined within classes, which + cannot access names defined in the class scope. + +* Expressions in annotation scopes cannot contain "yield", "yield + from", "await", or ":=" expressions. (These expressions are allowed + in other scopes contained within the annotation scope.) + +* Names defined in annotation scopes cannot be rebound with "nonlocal" + statements in inner scopes. This includes only type parameters, as + no other syntactic elements that can appear within annotation scopes + can introduce new names. + +* While annotation scopes have an internal name, that name is not + reflected in the *qualified name* of objects defined within the + scope. Instead, the "__qualname__" of such objects is as if the + object were defined in the enclosing scope. + +Added in version 3.12: Annotation scopes were introduced in Python +3.12 as part of **PEP 695**. + +Changed in version 3.13: Annotation scopes are also used for type +parameter defaults, as introduced by **PEP 696**. + +Changed in version 3.14: Annotation scopes are now also used for +annotations, as specified in **PEP 649** and **PEP 749**. + + +Lazy evaluation +--------------- + +Most annotation scopes are *lazily evaluated*. This includes +annotations, the values of type aliases created through the "type" +statement, and the bounds, constraints, and default values of type +variables created through the type parameter syntax. This means that +they are not evaluated when the type alias or type variable is +created, or when the object carrying annotations is created. Instead, +they are only evaluated when necessary, for example when the +"__value__" attribute on a type alias is accessed. + +Example: + + >>> type Alias = 1/0 + >>> Alias.__value__ + Traceback (most recent call last): + ... + ZeroDivisionError: division by zero + >>> def func[T: 1/0](): pass + >>> T = func.__type_params__[0] + >>> T.__bound__ + Traceback (most recent call last): + ... + ZeroDivisionError: division by zero + +Here the exception is raised only when the "__value__" attribute of +the type alias or the "__bound__" attribute of the type variable is +accessed. + +This behavior is primarily useful for references to types that have +not yet been defined when the type alias or type variable is created. +For example, lazy evaluation enables creation of mutually recursive +type aliases: + + from typing import Literal + + type SimpleExpr = int | Parenthesized + type Parenthesized = tuple[Literal["("], Expr, Literal[")"]] + type Expr = SimpleExpr | tuple[SimpleExpr, Literal["+", "-"], Expr] + +Lazily evaluated values are evaluated in annotation scope, which means +that names that appear inside the lazily evaluated value are looked up +as if they were used in the immediately enclosing scope. + +Added in version 3.12. + + +Builtins and restricted execution +--------------------------------- + +**CPython implementation detail:** Users should not touch +"__builtins__"; it is strictly an implementation detail. Users +wanting to override values in the builtins namespace should "import" +the "builtins" module and modify its attributes appropriately. + +The builtins namespace associated with the execution of a code block +is actually found by looking up the name "__builtins__" in its global +namespace; this should be a dictionary or a module (in the latter case +the module’s dictionary is used). By default, when in the "__main__" +module, "__builtins__" is the built-in module "builtins"; when in any +other module, "__builtins__" is an alias for the dictionary of the +"builtins" module itself. + + +Interaction with dynamic features +--------------------------------- + +Name resolution of free variables occurs at runtime, not at compile +time. This means that the following code will print 42: + + i = 10 + def f(): + print(i) + i = 42 + f() + +The "eval()" and "exec()" functions do not have access to the full +environment for resolving names. Names may be resolved in the local +and global namespaces of the caller. Free variables are not resolved +in the nearest enclosing namespace, but in the global namespace. [1] +The "exec()" and "eval()" functions have optional arguments to +override the global and local namespace. If only one namespace is +specified, it is used for both. + + +Exceptions +========== + +Exceptions are a means of breaking out of the normal flow of control +of a code block in order to handle errors or other exceptional +conditions. An exception is *raised* at the point where the error is +detected; it may be *handled* by the surrounding code block or by any +code block that directly or indirectly invoked the code block where +the error occurred. + +The Python interpreter raises an exception when it detects a run-time +error (such as division by zero). A Python program can also +explicitly raise an exception with the "raise" statement. Exception +handlers are specified with the "try" … "except" statement. The +"finally" clause of such a statement can be used to specify cleanup +code which does not handle the exception, but is executed whether an +exception occurred or not in the preceding code. + +Python uses the “termination” model of error handling: an exception +handler can find out what happened and continue execution at an outer +level, but it cannot repair the cause of the error and retry the +failing operation (except by re-entering the offending piece of code +from the top). + +When an exception is not handled at all, the interpreter terminates +execution of the program, or returns to its interactive main loop. In +either case, it prints a stack traceback, except when the exception is +"SystemExit". + +Exceptions are identified by class instances. The "except" clause is +selected depending on the class of the instance: it must reference the +class of the instance or a *non-virtual base class* thereof. The +instance can be received by the handler and can carry additional +information about the exceptional condition. + +Note: + + Exception messages are not part of the Python API. Their contents + may change from one version of Python to the next without warning + and should not be relied on by code which will run under multiple + versions of the interpreter. + +See also the description of the "try" statement in section The try +statement and "raise" statement in section The raise statement. + + +Runtime Components +================== + + +General Computing Model +----------------------- + +Python’s execution model does not operate in a vacuum. It runs on a +host machine and through that host’s runtime environment, including +its operating system (OS), if there is one. When a program runs, the +conceptual layers of how it runs on the host look something like this: + + **host machine** + **process** (global resources) + **thread** (runs machine code) + +Each process represents a program running on the host. Think of each +process itself as the data part of its program. Think of the process’ +threads as the execution part of the program. This distinction will +be important to understand the conceptual Python runtime. + +The process, as the data part, is the execution context in which the +program runs. It mostly consists of the set of resources assigned to +the program by the host, including memory, signals, file handles, +sockets, and environment variables. + +Processes are isolated and independent from one another. (The same is +true for hosts.) The host manages the process’ access to its assigned +resources, in addition to coordinating between processes. + +Each thread represents the actual execution of the program’s machine +code, running relative to the resources assigned to the program’s +process. It’s strictly up to the host how and when that execution +takes place. + +From the point of view of Python, a program always starts with exactly +one thread. However, the program may grow to run in multiple +simultaneous threads. Not all hosts support multiple threads per +process, but most do. Unlike processes, threads in a process are not +isolated and independent from one another. Specifically, all threads +in a process share all of the process’ resources. + +The fundamental point of threads is that each one does *run* +independently, at the same time as the others. That may be only +conceptually at the same time (“concurrently”) or physically (“in +parallel”). Either way, the threads effectively run at a non- +synchronized rate. + +Note: + + That non-synchronized rate means none of the process’ memory is + guaranteed to stay consistent for the code running in any given + thread. Thus multi-threaded programs must take care to coordinate + access to intentionally shared resources. Likewise, they must take + care to be absolutely diligent about not accessing any *other* + resources in multiple threads; otherwise two threads running at the + same time might accidentally interfere with each other’s use of some + shared data. All this is true for both Python programs and the + Python runtime.The cost of this broad, unstructured requirement is + the tradeoff for the kind of raw concurrency that threads provide. + The alternative to the required discipline generally means dealing + with non-deterministic bugs and data corruption. + + +Python Runtime Model +-------------------- + +The same conceptual layers apply to each Python program, with some +extra data layers specific to Python: + + **host machine** + **process** (global resources) + Python global runtime (*state*) + Python interpreter (*state*) + **thread** (runs Python bytecode and “C-API”) + Python thread *state* + +At the conceptual level: when a Python program starts, it looks +exactly like that diagram, with one of each. The runtime may grow to +include multiple interpreters, and each interpreter may grow to +include multiple thread states. + +Note: + + A Python implementation won’t necessarily implement the runtime + layers distinctly or even concretely. The only exception is places + where distinct layers are directly specified or exposed to users, + like through the "threading" module. + +Note: + + The initial interpreter is typically called the “main” interpreter. + Some Python implementations, like CPython, assign special roles to + the main interpreter.Likewise, the host thread where the runtime was + initialized is known as the “main” thread. It may be different from + the process’ initial thread, though they are often the same. In + some cases “main thread” may be even more specific and refer to the + initial thread state. A Python runtime might assign specific + responsibilities to the main thread, such as handling signals. + +As a whole, the Python runtime consists of the global runtime state, +interpreters, and thread states. The runtime ensures all that state +stays consistent over its lifetime, particularly when used with +multiple host threads. + +The global runtime, at the conceptual level, is just a set of +interpreters. While those interpreters are otherwise isolated and +independent from one another, they may share some data or other +resources. The runtime is responsible for managing these global +resources safely. The actual nature and management of these resources +is implementation-specific. Ultimately, the external utility of the +global runtime is limited to managing interpreters. + +In contrast, an “interpreter” is conceptually what we would normally +think of as the (full-featured) “Python runtime”. When machine code +executing in a host thread interacts with the Python runtime, it calls +into Python in the context of a specific interpreter. + +Note: + + The term “interpreter” here is not the same as the “bytecode + interpreter”, which is what regularly runs in threads, executing + compiled Python code.In an ideal world, “Python runtime” would refer + to what we currently call “interpreter”. However, it’s been called + “interpreter” at least since introduced in 1997 (CPython:a027efa5b). + +Each interpreter completely encapsulates all of the non-process- +global, non-thread-specific state needed for the Python runtime to +work. Notably, the interpreter’s state persists between uses. It +includes fundamental data like "sys.modules". The runtime ensures +multiple threads using the same interpreter will safely share it +between them. + +A Python implementation may support using multiple interpreters at the +same time in the same process. They are independent and isolated from +one another. For example, each interpreter has its own "sys.modules". + +For thread-specific runtime state, each interpreter has a set of +thread states, which it manages, in the same way the global runtime +contains a set of interpreters. It can have thread states for as many +host threads as it needs. It may even have multiple thread states for +the same host thread, though that isn’t as common. + +Each thread state, conceptually, has all the thread-specific runtime +data an interpreter needs to operate in one host thread. The thread +state includes the current raised exception and the thread’s Python +call stack. It may include other thread-specific resources. + +Note: + + The term “Python thread” can sometimes refer to a thread state, but + normally it means a thread created using the "threading" module. + +Each thread state, over its lifetime, is always tied to exactly one +interpreter and exactly one host thread. It will only ever be used in +that thread and with that interpreter. + +Multiple thread states may be tied to the same host thread, whether +for different interpreters or even the same interpreter. However, for +any given host thread, only one of the thread states tied to it can be +used by the thread at a time. + +Thread states are isolated and independent from one another and don’t +share any data, except for possibly sharing an interpreter and objects +or other resources belonging to that interpreter. + +Once a program is running, new Python threads can be created using the +"threading" module (on platforms and Python implementations that +support threads). Additional processes can be created using the "os", +"subprocess", and "multiprocessing" modules. Interpreters can be +created and used with the "interpreters" module. Coroutines (async) +can be run using "asyncio" in each interpreter, typically only in a +single thread (often the main thread). + +-[ Footnotes ]- + +[1] This limitation occurs because the code that is executed by these + operations is not available at the time the module is compiled. +''', + 'exprlists': r'''Expression lists +**************** + + starred_expression: "*" or_expr | expression + flexible_expression: assignment_expression | starred_expression + flexible_expression_list: flexible_expression ("," flexible_expression)* [","] + starred_expression_list: starred_expression ("," starred_expression)* [","] + expression_list: expression ("," expression)* [","] + yield_list: expression_list | starred_expression "," [starred_expression_list] + +Except when part of a list or set display, an expression list +containing at least one comma yields a tuple. The length of the tuple +is the number of expressions in the list. The expressions are +evaluated from left to right. + +An asterisk "*" denotes *iterable unpacking*. Its operand must be an +*iterable*. The iterable is expanded into a sequence of items, which +are included in the new tuple, list, or set, at the site of the +unpacking. + +Added in version 3.5: Iterable unpacking in expression lists, +originally proposed by **PEP 448**. + +Added in version 3.11: Any item in an expression list may be starred. +See **PEP 646**. + +A trailing comma is required only to create a one-item tuple, such as +"1,"; it is optional in all other cases. A single expression without a +trailing comma doesn’t create a tuple, but rather yields the value of +that expression. (To create an empty tuple, use an empty pair of +parentheses: "()".) +''', + 'floating': r'''Floating-point literals +*********************** + +Floating-point (float) literals, such as "3.14" or "1.5", denote +approximations of real numbers. + +They consist of *integer* and *fraction* parts, each composed of +decimal digits. The parts are separated by a decimal point, ".": + + 2.71828 + 4.0 + +Unlike in integer literals, leading zeros are allowed. For example, +"077.010" is legal, and denotes the same number as "77.01". + +As in integer literals, single underscores may occur between digits to +help readability: + + 96_485.332_123 + 3.14_15_93 + +Either of these parts, but not both, can be empty. For example: + + 10. # (equivalent to 10.0) + .001 # (equivalent to 0.001) + +Optionally, the integer and fraction may be followed by an *exponent*: +the letter "e" or "E", followed by an optional sign, "+" or "-", and a +number in the same format as the integer and fraction parts. The "e" +or "E" represents “times ten raised to the power of”: + + 1.0e3 # (represents 1.0×10³, or 1000.0) + 1.166e-5 # (represents 1.166×10⁻⁵, or 0.00001166) + 6.02214076e+23 # (represents 6.02214076×10²³, or 602214076000000000000000.) + +In floats with only integer and exponent parts, the decimal point may +be omitted: + + 1e3 # (equivalent to 1.e3 and 1.0e3) + 0e0 # (equivalent to 0.) + +Formally, floating-point literals are described by the following +lexical definitions: + + floatnumber: + | digitpart "." [digitpart] [exponent] + | "." digitpart [exponent] + | digitpart exponent + digitpart: digit (["_"] digit)* + exponent: ("e" | "E") ["+" | "-"] digitpart + +Changed in version 3.6: Underscores are now allowed for grouping +purposes in literals. +''', + 'for': r'''The "for" statement +******************* + +The "for" statement is used to iterate over the elements of a sequence +(such as a string, tuple or list) or other iterable object: + + for_stmt: "for" target_list "in" starred_expression_list ":" suite + ["else" ":" suite] + +The "starred_expression_list" expression is evaluated once; it should +yield an *iterable* object. An *iterator* is created for that +iterable. The first item provided by the iterator is then assigned to +the target list using the standard rules for assignments (see +Assignment statements), and the suite is executed. This repeats for +each item provided by the iterator. When the iterator is exhausted, +the suite in the "else" clause, if present, is executed, and the loop +terminates. + +A "break" statement executed in the first suite terminates the loop +without executing the "else" clause’s suite. A "continue" statement +executed in the first suite skips the rest of the suite and continues +with the next item, or with the "else" clause if there is no next +item. + +The for-loop makes assignments to the variables in the target list. +This overwrites all previous assignments to those variables including +those made in the suite of the for-loop: + + for i in range(10): + print(i) + i = 5 # this will not affect the for-loop + # because i will be overwritten with the next + # index in the range + +Names in the target list are not deleted when the loop is finished, +but if the sequence is empty, they will not have been assigned to at +all by the loop. Hint: the built-in type "range()" represents +immutable arithmetic sequences of integers. For instance, iterating +"range(3)" successively yields 0, 1, and then 2. + +Changed in version 3.11: Starred elements are now allowed in the +expression list. +''', + 'formatstrings': r'''Format string syntax +******************** + +The "str.format()" method and the "Formatter" class share the same +syntax for format strings (although in the case of "Formatter", +subclasses can define their own format string syntax). The syntax is +related to that of formatted string literals and template string +literals, but it is less sophisticated and, in particular, does not +support arbitrary expressions in interpolations. + +Format strings contain “replacement fields” surrounded by curly braces +"{}". Anything that is not contained in braces is considered literal +text, which is copied unchanged to the output. If you need to include +a brace character in the literal text, it can be escaped by doubling: +"{{" and "}}". + +The grammar for a replacement field is as follows: + + replacement_field: "{" [field_name] ["!" conversion] [":" format_spec] "}" + field_name: arg_name ("." attribute_name | "[" element_index "]")* + arg_name: [identifier | digit+] + attribute_name: identifier + element_index: digit+ | index_string + index_string: + + conversion: "r" | "s" | "a" + format_spec: format-spec:format_spec + +In less formal terms, the replacement field can start with a +*field_name* that specifies the object whose value is to be formatted +and inserted into the output instead of the replacement field. The +*field_name* is optionally followed by a *conversion* field, which is +preceded by an exclamation point "'!'", and a *format_spec*, which is +preceded by a colon "':'". These specify a non-default format for the +replacement value. + +See also the Format specification mini-language section. + +The *field_name* itself begins with an *arg_name* that is either a +number or a keyword. If it’s a number, it refers to a positional +argument, and if it’s a keyword, it refers to a named keyword +argument. An *arg_name* is treated as a number if a call to +"str.isdecimal()" on the string would return true. If the numerical +arg_names in a format string are 0, 1, 2, … in sequence, they can all +be omitted (not just some) and the numbers 0, 1, 2, … will be +automatically inserted in that order. Because *arg_name* is not quote- +delimited, it is not possible to specify arbitrary dictionary keys +(e.g., the strings "'10'" or "':-]'") within a format string. The +*arg_name* can be followed by any number of index or attribute +expressions. An expression of the form "'.name'" selects the named +attribute using "getattr()", while an expression of the form +"'[index]'" does an index lookup using "__getitem__()". + +Changed in version 3.1: The positional argument specifiers can be +omitted for "str.format()", so "'{} {}'.format(a, b)" is equivalent to +"'{0} {1}'.format(a, b)". + +Changed in version 3.4: The positional argument specifiers can be +omitted for "Formatter". + +Some simple format string examples: + + "First, thou shalt count to {0}" # References first positional argument + "Bring me a {}" # Implicitly references the first positional argument + "From {} to {}" # Same as "From {0} to {1}" + "My quest is {name}" # References keyword argument 'name' + "Weight in tons {0.weight}" # 'weight' attribute of first positional arg + "Units destroyed: {players[0]}" # First element of keyword argument 'players'. + +The *conversion* field causes a type coercion before formatting. +Normally, the job of formatting a value is done by the "__format__()" +method of the value itself. However, in some cases it is desirable to +force a type to be formatted as a string, overriding its own +definition of formatting. By converting the value to a string before +calling "__format__()", the normal formatting logic is bypassed. + +Three conversion flags are currently supported: "'!s'" which calls +"str()" on the value, "'!r'" which calls "repr()" and "'!a'" which +calls "ascii()". + +Some examples: + + "Harold's a clever {0!s}" # Calls str() on the argument first + "Bring out the holy {name!r}" # Calls repr() on the argument first + "More {!a}" # Calls ascii() on the argument first + +The *format_spec* field contains a specification of how the value +should be presented, including such details as field width, alignment, +padding, decimal precision and so on. Each value type can define its +own “formatting mini-language” or interpretation of the *format_spec*. + +Most built-in types support a common formatting mini-language, which +is described in the next section. + +A *format_spec* field can also include nested replacement fields +within it. These nested replacement fields may contain a field name, +conversion flag and format specification, but deeper nesting is not +allowed. The replacement fields within the format_spec are +substituted before the *format_spec* string is interpreted. This +allows the formatting of a value to be dynamically specified. + +See the Format examples section for some examples. + + +Format specification mini-language +================================== + +“Format specifications” are used within replacement fields contained +within a format string to define how individual values are presented +(see Format string syntax, f-strings, and t-strings). They can also be +passed directly to the built-in "format()" function. Each formattable +type may define how the format specification is to be interpreted. + +Most built-in types implement the following options for format +specifications, although some of the formatting options are only +supported by the numeric types. + +A general convention is that an empty format specification produces +the same result as if you had called "str()" on the value. A non-empty +format specification typically modifies the result. + +The general form of a *standard format specifier* is: + + format_spec: [options][width_and_precision][type] + options: [[fill]align][sign]["z"]["#"]["0"] + fill: + align: "<" | ">" | "=" | "^" + sign: "+" | "-" | " " + width_and_precision: [width_with_grouping][precision_with_grouping] + width_with_grouping: [width][grouping] + precision_with_grouping: "." [precision][grouping] | "." grouping + width: digit+ + precision: digit+ + grouping: "," | "_" + type: "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" + | "G" | "n" | "o" | "s" | "x" | "X" | "%" + +If a valid *align* value is specified, it can be preceded by a *fill* +character that can be any character and defaults to a space if +omitted. It is not possible to use a literal curly brace (”"{"” or +“"}"”) as the *fill* character in a formatted string literal or when +using the "str.format()" method. However, it is possible to insert a +curly brace with a nested replacement field. This limitation doesn’t +affect the "format()" function. + +The meaning of the various alignment options is as follows: + ++-----------+------------------------------------------------------------+ +| Option | Meaning | +|===========|============================================================| +| "'<'" | Forces the field to be left-aligned within the available | +| | space (this is the default for most objects). | ++-----------+------------------------------------------------------------+ +| "'>'" | Forces the field to be right-aligned within the available | +| | space (this is the default for numbers). | ++-----------+------------------------------------------------------------+ +| "'='" | Forces the padding to be placed after the sign (if any) | +| | but before the digits. This is used for printing fields | +| | in the form ‘+000000120’. This alignment option is only | +| | valid for numeric types, excluding "complex". It becomes | +| | the default for numbers when ‘0’ immediately precedes the | +| | field width. | ++-----------+------------------------------------------------------------+ +| "'^'" | Forces the field to be centered within the available | +| | space. | ++-----------+------------------------------------------------------------+ + +Note that unless a minimum field width is defined, the field width +will always be the same size as the data to fill it, so that the +alignment option has no meaning in this case. + +The *sign* option is only valid for number types, and can be one of +the following: + ++-----------+------------------------------------------------------------+ +| Option | Meaning | +|===========|============================================================| +| "'+'" | Indicates that a sign should be used for both positive as | +| | well as negative numbers. | ++-----------+------------------------------------------------------------+ +| "'-'" | Indicates that a sign should be used only for negative | +| | numbers (this is the default behavior). | ++-----------+------------------------------------------------------------+ +| space | Indicates that a leading space should be used on positive | +| | numbers, and a minus sign on negative numbers. | ++-----------+------------------------------------------------------------+ + +The "'z'" option coerces negative zero floating-point values to +positive zero after rounding to the format precision. This option is +only valid for floating-point presentation types. + +Changed in version 3.11: Added the "'z'" option (see also **PEP +682**). + +The "'#'" option causes the “alternate form” to be used for the +conversion. The alternate form is defined differently for different +types. This option is only valid for integer, float and complex +types. For integers, when binary, octal, or hexadecimal output is +used, this option adds the respective prefix "'0b'", "'0o'", "'0x'", +or "'0X'" to the output value. For float and complex the alternate +form causes the result of the conversion to always contain a decimal- +point character, even if no digits follow it. Normally, a decimal- +point character appears in the result of these conversions only if a +digit follows it. In addition, for "'g'" and "'G'" conversions, +trailing zeros are not removed from the result. + +The *width* is a decimal integer defining the minimum total field +width, including any prefixes, separators, and other formatting +characters. If not specified, then the field width will be determined +by the content. + +When no explicit alignment is given, preceding the *width* field by a +zero ("'0'") character enables sign-aware zero-padding for numeric +types, excluding "complex". This is equivalent to a *fill* character +of "'0'" with an *alignment* type of "'='". + +Changed in version 3.10: Preceding the *width* field by "'0'" no +longer affects the default alignment for strings. + +The *precision* is a decimal integer indicating how many digits should +be displayed after the decimal point for presentation types "'f'" and +"'F'", or before and after the decimal point for presentation types +"'g'" or "'G'". For string presentation types the field indicates the +maximum field size - in other words, how many characters will be used +from the field content. The *precision* is not allowed for integer +presentation types. + +The *grouping* option after *width* and *precision* fields specifies a +digit group separator for the integral and fractional parts of a +number respectively. It can be one of the following: + ++-----------+------------------------------------------------------------+ +| Option | Meaning | +|===========|============================================================| +| "','" | Inserts a comma every 3 digits for integer presentation | +| | type "'d'" and floating-point presentation types, | +| | excluding "'n'". For other presentation types, this option | +| | is not supported. | ++-----------+------------------------------------------------------------+ +| "'_'" | Inserts an underscore every 3 digits for integer | +| | presentation type "'d'" and floating-point presentation | +| | types, excluding "'n'". For integer presentation types | +| | "'b'", "'o'", "'x'", and "'X'", underscores are inserted | +| | every 4 digits. For other presentation types, this option | +| | is not supported. | ++-----------+------------------------------------------------------------+ + +For a locale aware separator, use the "'n'" presentation type instead. + +Changed in version 3.1: Added the "','" option (see also **PEP 378**). + +Changed in version 3.6: Added the "'_'" option (see also **PEP 515**). + +Changed in version 3.14: Support the *grouping* option for the +fractional part. + +Finally, the *type* determines how the data should be presented. + +The available string presentation types are: + + +-----------+------------------------------------------------------------+ + | Type | Meaning | + |===========|============================================================| + | "'s'" | String format. This is the default type for strings and | + | | may be omitted. | + +-----------+------------------------------------------------------------+ + | None | The same as "'s'". | + +-----------+------------------------------------------------------------+ + +The available integer presentation types are: + + +-----------+------------------------------------------------------------+ + | Type | Meaning | + |===========|============================================================| + | "'b'" | Binary format. Outputs the number in base 2. | + +-----------+------------------------------------------------------------+ + | "'c'" | Character. Converts the integer to the corresponding | + | | unicode character before printing. | + +-----------+------------------------------------------------------------+ + | "'d'" | Decimal Integer. Outputs the number in base 10. | + +-----------+------------------------------------------------------------+ + | "'o'" | Octal format. Outputs the number in base 8. | + +-----------+------------------------------------------------------------+ + | "'x'" | Hex format. Outputs the number in base 16, using lower- | + | | case letters for the digits above 9. | + +-----------+------------------------------------------------------------+ + | "'X'" | Hex format. Outputs the number in base 16, using upper- | + | | case letters for the digits above 9. In case "'#'" is | + | | specified, the prefix "'0x'" will be upper-cased to "'0X'" | + | | as well. | + +-----------+------------------------------------------------------------+ + | "'n'" | Number. This is the same as "'d'", except that it uses the | + | | current locale setting to insert the appropriate digit | + | | group separators. | + +-----------+------------------------------------------------------------+ + | None | The same as "'d'". | + +-----------+------------------------------------------------------------+ + +In addition to the above presentation types, integers can be formatted +with the floating-point presentation types listed below (except "'n'" +and "None"). When doing so, "float()" is used to convert the integer +to a floating-point number before formatting. + +The available presentation types for "float" and "Decimal" values are: + + +-----------+------------------------------------------------------------+ + | Type | Meaning | + |===========|============================================================| + | "'e'" | Scientific notation. For a given precision "p", formats | + | | the number in scientific notation with the letter ‘e’ | + | | separating the coefficient from the exponent. The | + | | coefficient has one digit before and "p" digits after the | + | | decimal point, for a total of "p + 1" significant digits. | + | | With no precision given, uses a precision of "6" digits | + | | after the decimal point for "float", and shows all | + | | coefficient digits for "Decimal". If "p=0", the decimal | + | | point is omitted unless the "#" option is used. For | + | | "float", the exponent always contains at least two digits, | + | | and is zero if the value is zero. | + +-----------+------------------------------------------------------------+ + | "'E'" | Scientific notation. Same as "'e'" except it uses an upper | + | | case ‘E’ as the separator character. | + +-----------+------------------------------------------------------------+ + | "'f'" | Fixed-point notation. For a given precision "p", formats | + | | the number as a decimal number with exactly "p" digits | + | | following the decimal point. With no precision given, uses | + | | a precision of "6" digits after the decimal point for | + | | "float", and uses a precision large enough to show all | + | | coefficient digits for "Decimal". If "p=0", the decimal | + | | point is omitted unless the "#" option is used. | + +-----------+------------------------------------------------------------+ + | "'F'" | Fixed-point notation. Same as "'f'", but converts "nan" to | + | | "NAN" and "inf" to "INF". | + +-----------+------------------------------------------------------------+ + | "'g'" | General format. For a given precision "p >= 1", this | + | | rounds the number to "p" significant digits and then | + | | formats the result in either fixed-point format or in | + | | scientific notation, depending on its magnitude. A | + | | precision of "0" is treated as equivalent to a precision | + | | of "1". The precise rules are as follows: suppose that | + | | the result formatted with presentation type "'e'" and | + | | precision "p-1" would have exponent "exp". Then, if "m <= | + | | exp < p", where "m" is -4 for floats and -6 for | + | | "Decimals", the number is formatted with presentation type | + | | "'f'" and precision "p-1-exp". Otherwise, the number is | + | | formatted with presentation type "'e'" and precision | + | | "p-1". In both cases insignificant trailing zeros are | + | | removed from the significand, and the decimal point is | + | | also removed if there are no remaining digits following | + | | it, unless the "'#'" option is used. With no precision | + | | given, uses a precision of "6" significant digits for | + | | "float". For "Decimal", the coefficient of the result is | + | | formed from the coefficient digits of the value; | + | | scientific notation is used for values smaller than "1e-6" | + | | in absolute value and values where the place value of the | + | | least significant digit is larger than 1, and fixed-point | + | | notation is used otherwise. Positive and negative | + | | infinity, positive and negative zero, and nans, are | + | | formatted as "inf", "-inf", "0", "-0" and "nan" | + | | respectively, regardless of the precision. | + +-----------+------------------------------------------------------------+ + | "'G'" | General format. Same as "'g'" except switches to "'E'" if | + | | the number gets too large. The representations of infinity | + | | and NaN are uppercased, too. | + +-----------+------------------------------------------------------------+ + | "'n'" | Number. This is the same as "'g'", except that it uses the | + | | current locale setting to insert the appropriate digit | + | | group separators for the integral part of a number. | + +-----------+------------------------------------------------------------+ + | "'%'" | Percentage. Multiplies the number by 100 and displays in | + | | fixed ("'f'") format, followed by a percent sign. | + +-----------+------------------------------------------------------------+ + | None | For "float" this is like the "'g'" type, except that when | + | | fixed- point notation is used to format the result, it | + | | always includes at least one digit past the decimal point, | + | | and switches to the scientific notation when "exp >= p - | + | | 1". When the precision is not specified, the latter will | + | | be as large as needed to represent the given value | + | | faithfully. For "Decimal", this is the same as either | + | | "'g'" or "'G'" depending on the value of | + | | "context.capitals" for the current decimal context. The | + | | overall effect is to match the output of "str()" as | + | | altered by the other format modifiers. | + +-----------+------------------------------------------------------------+ + +The result should be correctly rounded to a given precision "p" of +digits after the decimal point. The rounding mode for "float" matches +that of the "round()" builtin. For "Decimal", the rounding mode of +the current context will be used. + +The available presentation types for "complex" are the same as those +for "float" ("'%'" is not allowed). Both the real and imaginary +components of a complex number are formatted as floating-point +numbers, according to the specified presentation type. They are +separated by the mandatory sign of the imaginary part, the latter +being terminated by a "j" suffix. If the presentation type is +missing, the result will match the output of "str()" (complex numbers +with a non-zero real part are also surrounded by parentheses), +possibly altered by other format modifiers. + + +Format examples +=============== + +This section contains examples of the "str.format()" syntax and +comparison with the old "%"-formatting. + +In most of the cases the syntax is similar to the old "%"-formatting, +with the addition of the "{}" and with ":" used instead of "%". For +example, "'%03.2f'" can be translated to "'{:03.2f}'". + +The new format syntax also supports new and different options, shown +in the following examples. + +Accessing arguments by position: + + >>> '{0}, {1}, {2}'.format('a', 'b', 'c') + 'a, b, c' + >>> '{}, {}, {}'.format('a', 'b', 'c') # 3.1+ only + 'a, b, c' + >>> '{2}, {1}, {0}'.format('a', 'b', 'c') + 'c, b, a' + >>> '{2}, {1}, {0}'.format(*'abc') # unpacking argument sequence + 'c, b, a' + >>> '{0}{1}{0}'.format('abra', 'cad') # arguments' indices can be repeated + 'abracadabra' + +Accessing arguments by name: + + >>> 'Coordinates: {latitude}, {longitude}'.format(latitude='37.24N', longitude='-115.81W') + 'Coordinates: 37.24N, -115.81W' + >>> coord = {'latitude': '37.24N', 'longitude': '-115.81W'} + >>> 'Coordinates: {latitude}, {longitude}'.format(**coord) + 'Coordinates: 37.24N, -115.81W' + +Accessing arguments’ attributes: + + >>> c = 3-5j + >>> ('The complex number {0} is formed from the real part {0.real} ' + ... 'and the imaginary part {0.imag}.').format(c) + 'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.' + >>> class Point: + ... def __init__(self, x, y): + ... self.x, self.y = x, y + ... def __str__(self): + ... return 'Point({self.x}, {self.y})'.format(self=self) + ... + >>> str(Point(4, 2)) + 'Point(4, 2)' + +Accessing arguments’ items: + + >>> coord = (3, 5) + >>> 'X: {0[0]}; Y: {0[1]}'.format(coord) + 'X: 3; Y: 5' + +Replacing "%s" and "%r": + + >>> "repr() shows quotes: {!r}; str() doesn't: {!s}".format('test1', 'test2') + "repr() shows quotes: 'test1'; str() doesn't: test2" + +Aligning the text and specifying a width: + + >>> '{:<30}'.format('left aligned') + 'left aligned ' + >>> '{:>30}'.format('right aligned') + ' right aligned' + >>> '{:^30}'.format('centered') + ' centered ' + >>> '{:*^30}'.format('centered') # use '*' as a fill char + '***********centered***********' + +Replacing "%+f", "%-f", and "% f" and specifying a sign: + + >>> '{:+f}; {:+f}'.format(3.14, -3.14) # show it always + '+3.140000; -3.140000' + >>> '{: f}; {: f}'.format(3.14, -3.14) # show a space for positive numbers + ' 3.140000; -3.140000' + >>> '{:-f}; {:-f}'.format(3.14, -3.14) # show only the minus -- same as '{:f}; {:f}' + '3.140000; -3.140000' + +Replacing "%x" and "%o" and converting the value to different bases: + + >>> # format also supports binary numbers + >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42) + 'int: 42; hex: 2a; oct: 52; bin: 101010' + >>> # with 0x, 0o, or 0b as prefix: + >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42) + 'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010' + +Using the comma or the underscore as a digit group separator: + + >>> '{:,}'.format(1234567890) + '1,234,567,890' + >>> '{:_}'.format(1234567890) + '1_234_567_890' + >>> '{:_b}'.format(1234567890) + '100_1001_1001_0110_0000_0010_1101_0010' + >>> '{:_x}'.format(1234567890) + '4996_02d2' + >>> '{:_}'.format(123456789.123456789) + '123_456_789.12345679' + >>> '{:.,}'.format(123456789.123456789) + '123456789.123,456,79' + >>> '{:,._}'.format(123456789.123456789) + '123,456,789.123_456_79' + +Expressing a percentage: + + >>> points = 19 + >>> total = 22 + >>> 'Correct answers: {:.2%}'.format(points/total) + 'Correct answers: 86.36%' + +Using type-specific formatting: + + >>> import datetime as dt + >>> d = dt.datetime(2010, 7, 4, 12, 15, 58) + >>> '{:%Y-%m-%d %H:%M:%S}'.format(d) + '2010-07-04 12:15:58' + +Nesting arguments and more complex examples: + + >>> for align, text in zip('<^>', ['left', 'center', 'right']): + ... '{0:{fill}{align}16}'.format(text, fill=align, align=align) + ... + 'left<<<<<<<<<<<<' + '^^^^^center^^^^^' + '>>>>>>>>>>>right' + >>> + >>> octets = [192, 168, 0, 1] + >>> '{:02X}{:02X}{:02X}{:02X}'.format(*octets) + 'C0A80001' + >>> int(_, 16) + 3232235521 + >>> + >>> width = 5 + >>> for num in range(5,12): + ... for base in 'dXob': + ... print('{0:{width}{base}}'.format(num, base=base, width=width), end=' ') + ... print() + ... + 5 5 5 101 + 6 6 6 110 + 7 7 7 111 + 8 8 10 1000 + 9 9 11 1001 + 10 A 12 1010 + 11 B 13 1011 +''', + 'function': r'''Function definitions +******************** + +A function definition defines a user-defined function object (see +section The standard type hierarchy): + + funcdef: [decorators] "def" funcname [type_params] "(" [parameter_list] ")" + ["->" expression] ":" suite + decorators: decorator+ + decorator: "@" assignment_expression NEWLINE + parameter_list: defparameter ("," defparameter)* "," "/" ["," [parameter_list_no_posonly]] + | parameter_list_no_posonly + parameter_list_no_posonly: defparameter ("," defparameter)* ["," [parameter_list_starargs]] + | parameter_list_starargs + parameter_list_starargs: "*" [star_parameter] ("," defparameter)* ["," [parameter_star_kwargs]] + | "*" ("," defparameter)+ ["," [parameter_star_kwargs]] + | parameter_star_kwargs + parameter_star_kwargs: "**" parameter [","] + parameter: identifier [":" expression] + star_parameter: identifier [":" ["*"] expression] + defparameter: parameter ["=" expression] + funcname: identifier + +A function definition is an executable statement. Its execution binds +the function name in the current local namespace to a function object +(a wrapper around the executable code for the function). This +function object contains a reference to the current global namespace +as the global namespace to be used when the function is called. + +The function definition does not execute the function body; this gets +executed only when the function is called. [4] + +A function definition may be wrapped by one or more *decorator* +expressions. Decorator expressions are evaluated when the function is +defined, in the scope that contains the function definition. The +result must be a callable, which is invoked with the function object +as the only argument. The returned value is bound to the function name +instead of the function object. Multiple decorators are applied in +nested fashion. For example, the following code + + @f1(arg) + @f2 + def func(): pass + +is roughly equivalent to + + def func(): pass + func = f1(arg)(f2(func)) + +except that the original function is not temporarily bound to the name +"func". + +Changed in version 3.9: Functions may be decorated with any valid +"assignment_expression". Previously, the grammar was much more +restrictive; see **PEP 614** for details. + +A list of type parameters may be given in square brackets between the +function’s name and the opening parenthesis for its parameter list. +This indicates to static type checkers that the function is generic. +At runtime, the type parameters can be retrieved from the function’s +"__type_params__" attribute. See Generic functions for more. + +Changed in version 3.12: Type parameter lists are new in Python 3.12. + +When one or more *parameters* have the form *parameter* "=" +*expression*, the function is said to have “default parameter values.” +For a parameter with a default value, the corresponding *argument* may +be omitted from a call, in which case the parameter’s default value is +substituted. If a parameter has a default value, all following +parameters up until the “"*"” must also have a default value — this is +a syntactic restriction that is not expressed by the grammar. + +**Default parameter values are evaluated from left to right when the +function definition is executed.** This means that the expression is +evaluated once, when the function is defined, and that the same “pre- +computed” value is used for each call. This is especially important +to understand when a default parameter value is a mutable object, such +as a list or a dictionary: if the function modifies the object (e.g. +by appending an item to a list), the default parameter value is in +effect modified. This is generally not what was intended. A way +around this is to use "None" as the default, and explicitly test for +it in the body of the function, e.g.: + + def whats_on_the_telly(penguin=None): + if penguin is None: + penguin = [] + penguin.append("property of the zoo") + return penguin + +Function call semantics are described in more detail in section Calls. +A function call always assigns values to all parameters mentioned in +the parameter list, either from positional arguments, from keyword +arguments, or from default values. If the form “"*identifier"” is +present, it is initialized to a tuple receiving any excess positional +parameters, defaulting to the empty tuple. If the form +“"**identifier"” is present, it is initialized to a new ordered +mapping receiving any excess keyword arguments, defaulting to a new +empty mapping of the same type. Parameters after “"*"” or +“"*identifier"” are keyword-only parameters and may only be passed by +keyword arguments. Parameters before “"/"” are positional-only +parameters and may only be passed by positional arguments. + +Changed in version 3.8: The "/" function parameter syntax may be used +to indicate positional-only parameters. See **PEP 570** for details. + +Parameters may have an *annotation* of the form “": expression"” +following the parameter name. Any parameter may have an annotation, +even those of the form "*identifier" or "**identifier". (As a special +case, parameters of the form "*identifier" may have an annotation “": +*expression"”.) Functions may have “return” annotation of the form +“"-> expression"” after the parameter list. These annotations can be +any valid Python expression. The presence of annotations does not +change the semantics of a function. See Annotations for more +information on annotations. + +Changed in version 3.11: Parameters of the form “"*identifier"” may +have an annotation “": *expression"”. See **PEP 646**. + +It is also possible to create anonymous functions (functions not bound +to a name), for immediate use in expressions. This uses lambda +expressions, described in section Lambdas. Note that the lambda +expression is merely a shorthand for a simplified function definition; +a function defined in a “"def"” statement can be passed around or +assigned to another name just like a function defined by a lambda +expression. The “"def"” form is actually more powerful since it +allows the execution of multiple statements and annotations. + +**Programmer’s note:** Functions are first-class objects. A “"def"” +statement executed inside a function definition defines a local +function that can be returned or passed around. Free variables used +in the nested function can access the local variables of the function +containing the def. See section Naming and binding for details. + +See also: + + **PEP 3107** - Function Annotations + The original specification for function annotations. + + **PEP 484** - Type Hints + Definition of a standard meaning for annotations: type hints. + + **PEP 526** - Syntax for Variable Annotations + Ability to type hint variable declarations, including class + variables and instance variables. + + **PEP 563** - Postponed Evaluation of Annotations + Support for forward references within annotations by preserving + annotations in a string form at runtime instead of eager + evaluation. + + **PEP 318** - Decorators for Functions and Methods + Function and method decorators were introduced. Class decorators + were introduced in **PEP 3129**. +''', + 'global': r'''The "global" statement +********************** + + global_stmt: "global" identifier ("," identifier)* + +The "global" statement causes the listed identifiers to be interpreted +as globals. It would be impossible to assign to a global variable +without "global", although free variables may refer to globals without +being declared global. + +The "global" statement applies to the entire current scope (module, +function body or class definition). A "SyntaxError" is raised if a +variable is used or assigned to prior to its global declaration in the +scope. + +At the module level, all variables are global, so a "global" statement +has no effect. However, variables must still not be used or assigned +to prior to their "global" declaration. This requirement is relaxed in +the interactive prompt (*REPL*). + +**Programmer’s note:** "global" is a directive to the parser. It +applies only to code parsed at the same time as the "global" +statement. In particular, a "global" statement contained in a string +or code object supplied to the built-in "exec()" function does not +affect the code block *containing* the function call, and code +contained in such a string is unaffected by "global" statements in the +code containing the function call. The same applies to the "eval()" +and "compile()" functions. +''', + 'id-classes': r'''Reserved classes of identifiers +******************************* + +Certain classes of identifiers (besides keywords) have special +meanings. These classes are identified by the patterns of leading and +trailing underscore characters: + +"_*" + Not imported by "from module import *". + +"_" + In a "case" pattern within a "match" statement, "_" is a soft + keyword that denotes a wildcard. + + Separately, the interactive interpreter makes the result of the + last evaluation available in the variable "_". (It is stored in the + "builtins" module, alongside built-in functions like "print".) + + Elsewhere, "_" is a regular identifier. It is often used to name + “special” items, but it is not special to Python itself. + + Note: + + The name "_" is often used in conjunction with + internationalization; refer to the documentation for the + "gettext" module for more information on this convention.It is + also commonly used for unused variables. + +"__*__" + System-defined names, informally known as “dunder” names. These + names are defined by the interpreter and its implementation + (including the standard library). Current system names are + discussed in the Special method names section and elsewhere. More + will likely be defined in future versions of Python. *Any* use of + "__*__" names, in any context, that does not follow explicitly + documented use, is subject to breakage without warning. + +"__*" + Class-private names. Names in this category, when used within the + context of a class definition, are re-written to use a mangled form + to help avoid name clashes between “private” attributes of base and + derived classes. See section Identifiers (Names). +''', + 'identifiers': r'''Names (identifiers and keywords) +******************************** + +"NAME" tokens represent *identifiers*, *keywords*, and *soft +keywords*. + +Names are composed of the following characters: + +* uppercase and lowercase letters ("A-Z" and "a-z"), + +* the underscore ("_"), + +* digits ("0" through "9"), which cannot appear as the first + character, and + +* non-ASCII characters. Valid names may only contain “letter-like” and + “digit-like” characters; see Non-ASCII characters in names for + details. + +Names must contain at least one character, but have no upper length +limit. Case is significant. + +Formally, names are described by the following lexical definitions: + + NAME: name_start name_continue* + name_start: "a"..."z" | "A"..."Z" | "_" | + name_continue: name_start | "0"..."9" + identifier: + +Note that not all names matched by this grammar are valid; see Non- +ASCII characters in names for details. + + +Keywords +======== + +The following names are used as reserved words, or *keywords* of the +language, and cannot be used as ordinary identifiers. They must be +spelled exactly as written here: + + False await else import pass + None break except in raise + True class finally is return + and continue for lambda try + as def from nonlocal while + assert del global not with + async elif if or yield + + +Soft Keywords +============= + +Added in version 3.10. + +Some names are only reserved under specific contexts. These are known +as *soft keywords*: + +* "match", "case", and "_", when used in the "match" statement. + +* "type", when used in the "type" statement. + +These syntactically act as keywords in their specific contexts, but +this distinction is done at the parser level, not when tokenizing. + +As soft keywords, their use in the grammar is possible while still +preserving compatibility with existing code that uses these names as +identifier names. + +Changed in version 3.12: "type" is now a soft keyword. + + +Reserved classes of identifiers +=============================== + +Certain classes of identifiers (besides keywords) have special +meanings. These classes are identified by the patterns of leading and +trailing underscore characters: + +"_*" + Not imported by "from module import *". + +"_" + In a "case" pattern within a "match" statement, "_" is a soft + keyword that denotes a wildcard. + + Separately, the interactive interpreter makes the result of the + last evaluation available in the variable "_". (It is stored in the + "builtins" module, alongside built-in functions like "print".) + + Elsewhere, "_" is a regular identifier. It is often used to name + “special” items, but it is not special to Python itself. + + Note: + + The name "_" is often used in conjunction with + internationalization; refer to the documentation for the + "gettext" module for more information on this convention.It is + also commonly used for unused variables. + +"__*__" + System-defined names, informally known as “dunder” names. These + names are defined by the interpreter and its implementation + (including the standard library). Current system names are + discussed in the Special method names section and elsewhere. More + will likely be defined in future versions of Python. *Any* use of + "__*__" names, in any context, that does not follow explicitly + documented use, is subject to breakage without warning. + +"__*" + Class-private names. Names in this category, when used within the + context of a class definition, are re-written to use a mangled form + to help avoid name clashes between “private” attributes of base and + derived classes. See section Identifiers (Names). + + +Non-ASCII characters in names +============================= + +Names that contain non-ASCII characters need additional normalization +and validation beyond the rules and grammar explained above. For +example, "ř_1", "蛇", or "साँप" are valid names, but "r〰2", "€", or +"🐍" are not. + +This section explains the exact rules. + +All names are converted into the normalization form NFKC while +parsing. This means that, for example, some typographic variants of +characters are converted to their “basic” form. For example, +"fiⁿₐˡᵢᶻₐᵗᵢᵒₙ" normalizes to "finalization", so Python treats them as +the same name: + + >>> fiⁿₐˡᵢᶻₐᵗᵢᵒₙ = 3 + >>> finalization + 3 + +Note: + + Normalization is done at the lexical level only. Run-time functions + that take names as *strings* generally do not normalize their + arguments. For example, the variable defined above is accessible at + run time in the "globals()" dictionary as + "globals()["finalization"]" but not "globals()["fiⁿₐˡᵢᶻₐᵗᵢᵒₙ"]". + +Similarly to how ASCII-only names must contain only letters, digits +and the underscore, and cannot start with a digit, a valid name must +start with a character in the “letter-like” set "xid_start", and the +remaining characters must be in the “letter- and digit-like” set +"xid_continue". + +These sets are based on the *XID_Start* and *XID_Continue* sets as +defined by the Unicode standard annex UAX-31. Python’s "xid_start" +additionally includes the underscore ("_"). Note that Python does not +necessarily conform to UAX-31. + +A non-normative listing of characters in the *XID_Start* and +*XID_Continue* sets as defined by Unicode is available in the +DerivedCoreProperties.txt file in the Unicode Character Database. For +reference, the construction rules for the "xid_*" sets are given +below. + +The set "id_start" is defined as the union of: + +* Unicode category "" - uppercase letters (includes "A" to "Z") + +* Unicode category "" - lowercase letters (includes "a" to "z") + +* Unicode category "" - titlecase letters + +* Unicode category "" - modifier letters + +* Unicode category "" - other letters + +* Unicode category "" - letter numbers + +* {""_""} - the underscore + +* "" - an explicit set of characters in PropList.txt + to support backwards compatibility + +The set "xid_start" then closes this set under NFKC normalization, by +removing all characters whose normalization is not of the form +"id_start id_continue*". + +The set "id_continue" is defined as the union of: + +* "id_start" (see above) + +* Unicode category "" - decimal numbers (includes "0" to "9") + +* Unicode category "" - connector punctuations + +* Unicode category "" - nonspacing marks + +* Unicode category "" - spacing combining marks + +* "" - another explicit set of characters in + PropList.txt to support backwards compatibility + +Again, "xid_continue" closes this set under NFKC normalization. + +Unicode categories use the version of the Unicode Character Database +as included in the "unicodedata" module. + +See also: + + * **PEP 3131** – Supporting Non-ASCII Identifiers + + * **PEP 672** – Unicode-related Security Considerations for Python +''', + 'if': r'''The "if" statement +****************** + +The "if" statement is used for conditional execution: + + if_stmt: "if" assignment_expression ":" suite + ("elif" assignment_expression ":" suite)* + ["else" ":" suite] + +It selects exactly one of the suites by evaluating the expressions one +by one until one is found to be true (see section Boolean operations +for the definition of true and false); then that suite is executed +(and no other part of the "if" statement is executed or evaluated). +If all expressions are false, the suite of the "else" clause, if +present, is executed. +''', + 'imaginary': r'''Imaginary literals +****************** + +Python has complex number objects, but no complex literals. Instead, +*imaginary literals* denote complex numbers with a zero real part. + +For example, in math, the complex number 3+4.2*i* is written as the +real number 3 added to the imaginary number 4.2*i*. Python uses a +similar syntax, except the imaginary unit is written as "j" rather +than *i*: + + 3+4.2j + +This is an expression composed of the integer literal "3", the +operator ‘"+"’, and the imaginary literal "4.2j". Since these are +three separate tokens, whitespace is allowed between them: + + 3 + 4.2j + +No whitespace is allowed *within* each token. In particular, the "j" +suffix, may not be separated from the number before it. + +The number before the "j" has the same syntax as a floating-point +literal. Thus, the following are valid imaginary literals: + + 4.2j + 3.14j + 10.j + .001j + 1e100j + 3.14e-10j + 3.14_15_93j + +Unlike in a floating-point literal the decimal point can be omitted if +the imaginary number only has an integer part. The number is still +evaluated as a floating-point number, not an integer: + + 10j + 0j + 1000000000000000000000000j # equivalent to 1e+24j + +The "j" suffix is case-insensitive. That means you can use "J" +instead: + + 3.14J # equivalent to 3.14j + +Formally, imaginary literals are described by the following lexical +definition: + + imagnumber: (floatnumber | digitpart) ("j" | "J") +''', + 'import': r'''The "import" statement +********************** + + import_stmt: "import" module ["as" identifier] ("," module ["as" identifier])* + | "from" relative_module "import" identifier ["as" identifier] + ("," identifier ["as" identifier])* + | "from" relative_module "import" "(" identifier ["as" identifier] + ("," identifier ["as" identifier])* [","] ")" + | "from" relative_module "import" "*" + module: (identifier ".")* identifier + relative_module: "."* module | "."+ + +The basic import statement (no "from" clause) is executed in two +steps: + +1. find a module, loading and initializing it if necessary + +2. define a name or names in the local namespace for the scope where + the "import" statement occurs. + +When the statement contains multiple clauses (separated by commas) the +two steps are carried out separately for each clause, just as though +the clauses had been separated out into individual import statements. + +The details of the first step, finding and loading modules, are +described in greater detail in the section on the import system, which +also describes the various types of packages and modules that can be +imported, as well as all the hooks that can be used to customize the +import system. Note that failures in this step may indicate either +that the module could not be located, *or* that an error occurred +while initializing the module, which includes execution of the +module’s code. + +If the requested module is retrieved successfully, it will be made +available in the local namespace in one of three ways: + +* If the module name is followed by "as", then the name following "as" + is bound directly to the imported module. + +* If no other name is specified, and the module being imported is a + top level module, the module’s name is bound in the local namespace + as a reference to the imported module + +* If the module being imported is *not* a top level module, then the + name of the top level package that contains the module is bound in + the local namespace as a reference to the top level package. The + imported module must be accessed using its full qualified name + rather than directly + +The "from" form uses a slightly more complex process: + +1. find the module specified in the "from" clause, loading and + initializing it if necessary; + +2. for each of the identifiers specified in the "import" clauses: + + 1. check if the imported module has an attribute by that name + + 2. if not, attempt to import a submodule with that name and then + check the imported module again for that attribute + + 3. if the attribute is not found, "ImportError" is raised. + + 4. otherwise, a reference to that value is stored in the local + namespace, using the name in the "as" clause if it is present, + otherwise using the attribute name + +Examples: + + import foo # foo imported and bound locally + import foo.bar.baz # foo, foo.bar, and foo.bar.baz imported, foo bound locally + import foo.bar.baz as fbb # foo, foo.bar, and foo.bar.baz imported, foo.bar.baz bound as fbb + from foo.bar import baz # foo, foo.bar, and foo.bar.baz imported, foo.bar.baz bound as baz + from foo import attr # foo imported and foo.attr bound as attr + +If the list of identifiers is replaced by a star ("'*'"), all public +names defined in the module are bound in the local namespace for the +scope where the "import" statement occurs. + +The *public names* defined by a module are determined by checking the +module’s namespace for a variable named "__all__"; if defined, it must +be a sequence of strings which are names defined or imported by that +module. Names containing non-ASCII characters must be in the +normalization form NFKC; see Non-ASCII characters in names for +details. The names given in "__all__" are all considered public and +are required to exist. If "__all__" is not defined, the set of public +names includes all names found in the module’s namespace which do not +begin with an underscore character ("'_'"). "__all__" should contain +the entire public API. It is intended to avoid accidentally exporting +items that are not part of the API (such as library modules which were +imported and used within the module). + +The wild card form of import — "from module import *" — is only +allowed at the module level. Attempting to use it in class or +function definitions will raise a "SyntaxError". + +When specifying what module to import you do not have to specify the +absolute name of the module. When a module or package is contained +within another package it is possible to make a relative import within +the same top package without having to mention the package name. By +using leading dots in the specified module or package after "from" you +can specify how high to traverse up the current package hierarchy +without specifying exact names. One leading dot means the current +package where the module making the import exists. Two dots means up +one package level. Three dots is up two levels, etc. So if you execute +"from . import mod" from a module in the "pkg" package then you will +end up importing "pkg.mod". If you execute "from ..subpkg2 import mod" +from within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The +specification for relative imports is contained in the Package +Relative Imports section. + +"importlib.import_module()" is provided to support applications that +determine dynamically the modules to be loaded. + +Raises an auditing event "import" with arguments "module", "filename", +"sys.path", "sys.meta_path", "sys.path_hooks". + + +Future statements +================= + +A *future statement* is a directive to the compiler that a particular +module should be compiled using syntax or semantics that will be +available in a specified future release of Python where the feature +becomes standard. + +The future statement is intended to ease migration to future versions +of Python that introduce incompatible changes to the language. It +allows use of the new features on a per-module basis before the +release in which the feature becomes standard. + + future_stmt: "from" "__future__" "import" feature ["as" identifier] + ("," feature ["as" identifier])* + | "from" "__future__" "import" "(" feature ["as" identifier] + ("," feature ["as" identifier])* [","] ")" + feature: identifier + +A future statement must appear near the top of the module. The only +lines that can appear before a future statement are: + +* the module docstring (if any), + +* comments, + +* blank lines, and + +* other future statements. + +The only feature that requires using the future statement is +"annotations" (see **PEP 563**). + +All historical features enabled by the future statement are still +recognized by Python 3. The list includes "absolute_import", +"division", "generators", "generator_stop", "unicode_literals", +"print_function", "nested_scopes" and "with_statement". They are all +redundant because they are always enabled, and only kept for backwards +compatibility. + +A future statement is recognized and treated specially at compile +time: Changes to the semantics of core constructs are often +implemented by generating different code. It may even be the case +that a new feature introduces new incompatible syntax (such as a new +reserved word), in which case the compiler may need to parse the +module differently. Such decisions cannot be pushed off until +runtime. + +For any given release, the compiler knows which feature names have +been defined, and raises a compile-time error if a future statement +contains a feature not known to it. + +The direct runtime semantics are the same as for any import statement: +there is a standard module "__future__", described later, and it will +be imported in the usual way at the time the future statement is +executed. + +The interesting runtime semantics depend on the specific feature +enabled by the future statement. + +Note that there is nothing special about the statement: + + import __future__ [as name] + +That is not a future statement; it’s an ordinary import statement with +no special semantics or syntax restrictions. + +Code compiled by calls to the built-in functions "exec()" and +"compile()" that occur in a module "M" containing a future statement +will, by default, use the new syntax or semantics associated with the +future statement. This can be controlled by optional arguments to +"compile()" — see the documentation of that function for details. + +A future statement typed at an interactive interpreter prompt will +take effect for the rest of the interpreter session. If an +interpreter is started with the "-i" option, is passed a script name +to execute, and the script includes a future statement, it will be in +effect in the interactive session started after the script is +executed. + +See also: + + **PEP 236** - Back to the __future__ + The original proposal for the __future__ mechanism. +''', + 'in': r'''Membership test operations +************************** + +The operators "in" and "not in" test for membership. "x in s" +evaluates to "True" if *x* is a member of *s*, and "False" otherwise. +"x not in s" returns the negation of "x in s". All built-in sequences +and set types support this as well as dictionary, for which "in" tests +whether the dictionary has a given key. For container types such as +list, tuple, set, frozenset, dict, or collections.deque, the +expression "x in y" is equivalent to "any(x is e or x == e for e in +y)". + +For the string and bytes types, "x in y" is "True" if and only if *x* +is a substring of *y*. An equivalent test is "y.find(x) != -1". +Empty strings are always considered to be a substring of any other +string, so """ in "abc"" will return "True". + +For user-defined classes which define the "__contains__()" method, "x +in y" returns "True" if "y.__contains__(x)" returns a true value, and +"False" otherwise. + +For user-defined classes which do not define "__contains__()" but do +define "__iter__()", "x in y" is "True" if some value "z", for which +the expression "x is z or x == z" is true, is produced while iterating +over "y". If an exception is raised during the iteration, it is as if +"in" raised that exception. + +Lastly, the old-style iteration protocol is tried: if a class defines +"__getitem__()", "x in y" is "True" if and only if there is a non- +negative integer index *i* such that "x is y[i] or x == y[i]", and no +lower integer index raises the "IndexError" exception. (If any other +exception is raised, it is as if "in" raised that exception). + +The operator "not in" is defined to have the inverse truth value of +"in". +''', + 'integers': r'''Integer literals +**************** + +Integer literals denote whole numbers. For example: + + 7 + 3 + 2147483647 + +There is no limit for the length of integer literals apart from what +can be stored in available memory: + + 7922816251426433759354395033679228162514264337593543950336 + +Underscores can be used to group digits for enhanced readability, and +are ignored for determining the numeric value of the literal. For +example, the following literals are equivalent: + + 100_000_000_000 + 100000000000 + 1_00_00_00_00_000 + +Underscores can only occur between digits. For example, "_123", +"321_", and "123__321" are *not* valid literals. + +Integers can be specified in binary (base 2), octal (base 8), or +hexadecimal (base 16) using the prefixes "0b", "0o" and "0x", +respectively. Hexadecimal digits 10 through 15 are represented by +letters "A"-"F", case-insensitive. For example: + + 0b100110111 + 0b_1110_0101 + 0o177 + 0o377 + 0xdeadbeef + 0xDead_Beef + +An underscore can follow the base specifier. For example, "0x_1f" is a +valid literal, but "0_x1f" and "0x__1f" are not. + +Leading zeros in a non-zero decimal number are not allowed. For +example, "0123" is not a valid literal. This is for disambiguation +with C-style octal literals, which Python used before version 3.0. + +Formally, integer literals are described by the following lexical +definitions: + + integer: decinteger | bininteger | octinteger | hexinteger | zerointeger + decinteger: nonzerodigit (["_"] digit)* + bininteger: "0" ("b" | "B") (["_"] bindigit)+ + octinteger: "0" ("o" | "O") (["_"] octdigit)+ + hexinteger: "0" ("x" | "X") (["_"] hexdigit)+ + zerointeger: "0"+ (["_"] "0")* + nonzerodigit: "1"..."9" + digit: "0"..."9" + bindigit: "0" | "1" + octdigit: "0"..."7" + hexdigit: digit | "a"..."f" | "A"..."F" + +Changed in version 3.6: Underscores are now allowed for grouping +purposes in literals. +''', + 'lambda': r'''Lambdas +******* + + lambda_expr: "lambda" [parameter_list] ":" expression + +Lambda expressions (sometimes called lambda forms) are used to create +anonymous functions. The expression "lambda parameters: expression" +yields a function object. The unnamed object behaves like a function +object defined with: + + def (parameters): + return expression + +See section Function definitions for the syntax of parameter lists. +Note that functions created with lambda expressions cannot contain +statements or annotations. +''', + 'lists': r'''List displays +************* + +A list display is a possibly empty series of expressions enclosed in +square brackets: + + list_display: "[" [flexible_expression_list | comprehension] "]" + +A list display yields a new list object, the contents being specified +by either a list of expressions or a comprehension. When a comma- +separated list of expressions is supplied, its elements are evaluated +from left to right and placed into the list object in that order. +When a comprehension is supplied, the list is constructed from the +elements resulting from the comprehension. +''', + 'naming': r'''Naming and binding +****************** + + +Binding of names +================ + +*Names* refer to objects. Names are introduced by name binding +operations. + +The following constructs bind names: + +* formal parameters to functions, + +* class definitions, + +* function definitions, + +* assignment expressions, + +* targets that are identifiers if occurring in an assignment: + + * "for" loop header, + + * after "as" in a "with" statement, "except" clause, "except*" + clause, or in the as-pattern in structural pattern matching, + + * in a capture pattern in structural pattern matching + +* "import" statements. + +* "type" statements. + +* type parameter lists. + +The "import" statement of the form "from ... import *" binds all names +defined in the imported module, except those beginning with an +underscore. This form may only be used at the module level. + +A target occurring in a "del" statement is also considered bound for +this purpose (though the actual semantics are to unbind the name). + +Each assignment or import statement occurs within a block defined by a +class or function definition or at the module level (the top-level +code block). + +If a name is bound in a block, it is a local variable of that block, +unless declared as "nonlocal" or "global". If a name is bound at the +module level, it is a global variable. (The variables of the module +code block are local and global.) If a variable is used in a code +block but not defined there, it is a *free variable*. + +Each occurrence of a name in the program text refers to the *binding* +of that name established by the following name resolution rules. + + +Resolution of names +=================== + +A *scope* defines the visibility of a name within a block. If a local +variable is defined in a block, its scope includes that block. If the +definition occurs in a function block, the scope extends to any blocks +contained within the defining one, unless a contained block introduces +a different binding for the name. + +When a name is used in a code block, it is resolved using the nearest +enclosing scope. The set of all such scopes visible to a code block +is called the block’s *environment*. + +When a name is not found at all, a "NameError" exception is raised. If +the current scope is a function scope, and the name refers to a local +variable that has not yet been bound to a value at the point where the +name is used, an "UnboundLocalError" exception is raised. +"UnboundLocalError" is a subclass of "NameError". + +If a name binding operation occurs anywhere within a code block, all +uses of the name within the block are treated as references to the +current block. This can lead to errors when a name is used within a +block before it is bound. This rule is subtle. Python lacks +declarations and allows name binding operations to occur anywhere +within a code block. The local variables of a code block can be +determined by scanning the entire text of the block for name binding +operations. See the FAQ entry on UnboundLocalError for examples. + +If the "global" statement occurs within a block, all uses of the names +specified in the statement refer to the bindings of those names in the +top-level namespace. Names are resolved in the top-level namespace by +searching the global namespace, i.e. the namespace of the module +containing the code block, and the builtins namespace, the namespace +of the module "builtins". The global namespace is searched first. If +the names are not found there, the builtins namespace is searched +next. If the names are also not found in the builtins namespace, new +variables are created in the global namespace. The global statement +must precede all uses of the listed names. + +The "global" statement has the same scope as a name binding operation +in the same block. If the nearest enclosing scope for a free variable +contains a global statement, the free variable is treated as a global. + +The "nonlocal" statement causes corresponding names to refer to +previously bound variables in the nearest enclosing function scope. +"SyntaxError" is raised at compile time if the given name does not +exist in any enclosing function scope. Type parameters cannot be +rebound with the "nonlocal" statement. + +The namespace for a module is automatically created the first time a +module is imported. The main module for a script is always called +"__main__". + +Class definition blocks and arguments to "exec()" and "eval()" are +special in the context of name resolution. A class definition is an +executable statement that may use and define names. These references +follow the normal rules for name resolution with an exception that +unbound local variables are looked up in the global namespace. The +namespace of the class definition becomes the attribute dictionary of +the class. The scope of names defined in a class block is limited to +the class block; it does not extend to the code blocks of methods. +This includes comprehensions and generator expressions, but it does +not include annotation scopes, which have access to their enclosing +class scopes. This means that the following will fail: + + class A: + a = 42 + b = list(a + i for i in range(10)) + +However, the following will succeed: + + class A: + type Alias = Nested + class Nested: pass + + print(A.Alias.__value__) # + + +Annotation scopes +================= + +*Annotations*, type parameter lists and "type" statements introduce +*annotation scopes*, which behave mostly like function scopes, but +with some exceptions discussed below. + +Annotation scopes are used in the following contexts: + +* *Function annotations*. + +* *Variable annotations*. + +* Type parameter lists for generic type aliases. + +* Type parameter lists for generic functions. A generic function’s + annotations are executed within the annotation scope, but its + defaults and decorators are not. + +* Type parameter lists for generic classes. A generic class’s base + classes and keyword arguments are executed within the annotation + scope, but its decorators are not. + +* The bounds, constraints, and default values for type parameters + (lazily evaluated). + +* The value of type aliases (lazily evaluated). + +Annotation scopes differ from function scopes in the following ways: + +* Annotation scopes have access to their enclosing class namespace. If + an annotation scope is immediately within a class scope, or within + another annotation scope that is immediately within a class scope, + the code in the annotation scope can use names defined in the class + scope as if it were executed directly within the class body. This + contrasts with regular functions defined within classes, which + cannot access names defined in the class scope. + +* Expressions in annotation scopes cannot contain "yield", "yield + from", "await", or ":=" expressions. (These expressions are allowed + in other scopes contained within the annotation scope.) + +* Names defined in annotation scopes cannot be rebound with "nonlocal" + statements in inner scopes. This includes only type parameters, as + no other syntactic elements that can appear within annotation scopes + can introduce new names. + +* While annotation scopes have an internal name, that name is not + reflected in the *qualified name* of objects defined within the + scope. Instead, the "__qualname__" of such objects is as if the + object were defined in the enclosing scope. + +Added in version 3.12: Annotation scopes were introduced in Python +3.12 as part of **PEP 695**. + +Changed in version 3.13: Annotation scopes are also used for type +parameter defaults, as introduced by **PEP 696**. + +Changed in version 3.14: Annotation scopes are now also used for +annotations, as specified in **PEP 649** and **PEP 749**. + + +Lazy evaluation +=============== + +Most annotation scopes are *lazily evaluated*. This includes +annotations, the values of type aliases created through the "type" +statement, and the bounds, constraints, and default values of type +variables created through the type parameter syntax. This means that +they are not evaluated when the type alias or type variable is +created, or when the object carrying annotations is created. Instead, +they are only evaluated when necessary, for example when the +"__value__" attribute on a type alias is accessed. + +Example: + + >>> type Alias = 1/0 + >>> Alias.__value__ + Traceback (most recent call last): + ... + ZeroDivisionError: division by zero + >>> def func[T: 1/0](): pass + >>> T = func.__type_params__[0] + >>> T.__bound__ + Traceback (most recent call last): + ... + ZeroDivisionError: division by zero + +Here the exception is raised only when the "__value__" attribute of +the type alias or the "__bound__" attribute of the type variable is +accessed. + +This behavior is primarily useful for references to types that have +not yet been defined when the type alias or type variable is created. +For example, lazy evaluation enables creation of mutually recursive +type aliases: + + from typing import Literal + + type SimpleExpr = int | Parenthesized + type Parenthesized = tuple[Literal["("], Expr, Literal[")"]] + type Expr = SimpleExpr | tuple[SimpleExpr, Literal["+", "-"], Expr] + +Lazily evaluated values are evaluated in annotation scope, which means +that names that appear inside the lazily evaluated value are looked up +as if they were used in the immediately enclosing scope. + +Added in version 3.12. + + +Builtins and restricted execution +================================= + +**CPython implementation detail:** Users should not touch +"__builtins__"; it is strictly an implementation detail. Users +wanting to override values in the builtins namespace should "import" +the "builtins" module and modify its attributes appropriately. + +The builtins namespace associated with the execution of a code block +is actually found by looking up the name "__builtins__" in its global +namespace; this should be a dictionary or a module (in the latter case +the module’s dictionary is used). By default, when in the "__main__" +module, "__builtins__" is the built-in module "builtins"; when in any +other module, "__builtins__" is an alias for the dictionary of the +"builtins" module itself. + + +Interaction with dynamic features +================================= + +Name resolution of free variables occurs at runtime, not at compile +time. This means that the following code will print 42: + + i = 10 + def f(): + print(i) + i = 42 + f() + +The "eval()" and "exec()" functions do not have access to the full +environment for resolving names. Names may be resolved in the local +and global namespaces of the caller. Free variables are not resolved +in the nearest enclosing namespace, but in the global namespace. [1] +The "exec()" and "eval()" functions have optional arguments to +override the global and local namespace. If only one namespace is +specified, it is used for both. +''', + 'nonlocal': r'''The "nonlocal" statement +************************ + + nonlocal_stmt: "nonlocal" identifier ("," identifier)* + +When the definition of a function or class is nested (enclosed) within +the definitions of other functions, its nonlocal scopes are the local +scopes of the enclosing functions. The "nonlocal" statement causes the +listed identifiers to refer to names previously bound in nonlocal +scopes. It allows encapsulated code to rebind such nonlocal +identifiers. If a name is bound in more than one nonlocal scope, the +nearest binding is used. If a name is not bound in any nonlocal scope, +or if there is no nonlocal scope, a "SyntaxError" is raised. + +The "nonlocal" statement applies to the entire scope of a function or +class body. A "SyntaxError" is raised if a variable is used or +assigned to prior to its nonlocal declaration in the scope. + +See also: + + **PEP 3104** - Access to Names in Outer Scopes + The specification for the "nonlocal" statement. + +**Programmer’s note:** "nonlocal" is a directive to the parser and +applies only to code parsed along with it. See the note for the +"global" statement. +''', + 'numbers': r'''Numeric literals +**************** + +"NUMBER" tokens represent numeric literals, of which there are three +types: integers, floating-point numbers, and imaginary numbers. + + NUMBER: integer | floatnumber | imagnumber + +The numeric value of a numeric literal is the same as if it were +passed as a string to the "int", "float" or "complex" class +constructor, respectively. Note that not all valid inputs for those +constructors are also valid literals. + +Numeric literals do not include a sign; a phrase like "-1" is actually +an expression composed of the unary operator ‘"-"’ and the literal +"1". + + +Integer literals +================ + +Integer literals denote whole numbers. For example: + + 7 + 3 + 2147483647 + +There is no limit for the length of integer literals apart from what +can be stored in available memory: + + 7922816251426433759354395033679228162514264337593543950336 + +Underscores can be used to group digits for enhanced readability, and +are ignored for determining the numeric value of the literal. For +example, the following literals are equivalent: + + 100_000_000_000 + 100000000000 + 1_00_00_00_00_000 + +Underscores can only occur between digits. For example, "_123", +"321_", and "123__321" are *not* valid literals. + +Integers can be specified in binary (base 2), octal (base 8), or +hexadecimal (base 16) using the prefixes "0b", "0o" and "0x", +respectively. Hexadecimal digits 10 through 15 are represented by +letters "A"-"F", case-insensitive. For example: + + 0b100110111 + 0b_1110_0101 + 0o177 + 0o377 + 0xdeadbeef + 0xDead_Beef + +An underscore can follow the base specifier. For example, "0x_1f" is a +valid literal, but "0_x1f" and "0x__1f" are not. + +Leading zeros in a non-zero decimal number are not allowed. For +example, "0123" is not a valid literal. This is for disambiguation +with C-style octal literals, which Python used before version 3.0. + +Formally, integer literals are described by the following lexical +definitions: + + integer: decinteger | bininteger | octinteger | hexinteger | zerointeger + decinteger: nonzerodigit (["_"] digit)* + bininteger: "0" ("b" | "B") (["_"] bindigit)+ + octinteger: "0" ("o" | "O") (["_"] octdigit)+ + hexinteger: "0" ("x" | "X") (["_"] hexdigit)+ + zerointeger: "0"+ (["_"] "0")* + nonzerodigit: "1"..."9" + digit: "0"..."9" + bindigit: "0" | "1" + octdigit: "0"..."7" + hexdigit: digit | "a"..."f" | "A"..."F" + +Changed in version 3.6: Underscores are now allowed for grouping +purposes in literals. + + +Floating-point literals +======================= + +Floating-point (float) literals, such as "3.14" or "1.5", denote +approximations of real numbers. + +They consist of *integer* and *fraction* parts, each composed of +decimal digits. The parts are separated by a decimal point, ".": + + 2.71828 + 4.0 + +Unlike in integer literals, leading zeros are allowed. For example, +"077.010" is legal, and denotes the same number as "77.01". + +As in integer literals, single underscores may occur between digits to +help readability: + + 96_485.332_123 + 3.14_15_93 + +Either of these parts, but not both, can be empty. For example: + + 10. # (equivalent to 10.0) + .001 # (equivalent to 0.001) + +Optionally, the integer and fraction may be followed by an *exponent*: +the letter "e" or "E", followed by an optional sign, "+" or "-", and a +number in the same format as the integer and fraction parts. The "e" +or "E" represents “times ten raised to the power of”: + + 1.0e3 # (represents 1.0×10³, or 1000.0) + 1.166e-5 # (represents 1.166×10⁻⁵, or 0.00001166) + 6.02214076e+23 # (represents 6.02214076×10²³, or 602214076000000000000000.) + +In floats with only integer and exponent parts, the decimal point may +be omitted: + + 1e3 # (equivalent to 1.e3 and 1.0e3) + 0e0 # (equivalent to 0.) + +Formally, floating-point literals are described by the following +lexical definitions: + + floatnumber: + | digitpart "." [digitpart] [exponent] + | "." digitpart [exponent] + | digitpart exponent + digitpart: digit (["_"] digit)* + exponent: ("e" | "E") ["+" | "-"] digitpart + +Changed in version 3.6: Underscores are now allowed for grouping +purposes in literals. + + +Imaginary literals +================== + +Python has complex number objects, but no complex literals. Instead, +*imaginary literals* denote complex numbers with a zero real part. + +For example, in math, the complex number 3+4.2*i* is written as the +real number 3 added to the imaginary number 4.2*i*. Python uses a +similar syntax, except the imaginary unit is written as "j" rather +than *i*: + + 3+4.2j + +This is an expression composed of the integer literal "3", the +operator ‘"+"’, and the imaginary literal "4.2j". Since these are +three separate tokens, whitespace is allowed between them: + + 3 + 4.2j + +No whitespace is allowed *within* each token. In particular, the "j" +suffix, may not be separated from the number before it. + +The number before the "j" has the same syntax as a floating-point +literal. Thus, the following are valid imaginary literals: + + 4.2j + 3.14j + 10.j + .001j + 1e100j + 3.14e-10j + 3.14_15_93j + +Unlike in a floating-point literal the decimal point can be omitted if +the imaginary number only has an integer part. The number is still +evaluated as a floating-point number, not an integer: + + 10j + 0j + 1000000000000000000000000j # equivalent to 1e+24j + +The "j" suffix is case-insensitive. That means you can use "J" +instead: + + 3.14J # equivalent to 3.14j + +Formally, imaginary literals are described by the following lexical +definition: + + imagnumber: (floatnumber | digitpart) ("j" | "J") +''', + 'numeric-types': r'''Emulating numeric types +*********************** + +The following methods can be defined to emulate numeric objects. +Methods corresponding to operations that are not supported by the +particular kind of number implemented (e.g., bitwise operations for +non-integral numbers) should be left undefined. + +object.__add__(self, other) +object.__sub__(self, other) +object.__mul__(self, other) +object.__matmul__(self, other) +object.__truediv__(self, other) +object.__floordiv__(self, other) +object.__mod__(self, other) +object.__divmod__(self, other) +object.__pow__(self, other[, modulo]) +object.__lshift__(self, other) +object.__rshift__(self, other) +object.__and__(self, other) +object.__xor__(self, other) +object.__or__(self, other) + + These methods are called to implement the binary arithmetic + operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", + "pow()", "**", "<<", ">>", "&", "^", "|"). For instance, to + evaluate the expression "x + y", where *x* is an instance of a + class that has an "__add__()" method, "type(x).__add__(x, y)" is + called. The "__divmod__()" method should be the equivalent to + using "__floordiv__()" and "__mod__()"; it should not be related to + "__truediv__()". Note that "__pow__()" should be defined to accept + an optional third argument if the three-argument version of the + built-in "pow()" function is to be supported. + + If one of those methods does not support the operation with the + supplied arguments, it should return "NotImplemented". + +object.__radd__(self, other) +object.__rsub__(self, other) +object.__rmul__(self, other) +object.__rmatmul__(self, other) +object.__rtruediv__(self, other) +object.__rfloordiv__(self, other) +object.__rmod__(self, other) +object.__rdivmod__(self, other) +object.__rpow__(self, other[, modulo]) +object.__rlshift__(self, other) +object.__rrshift__(self, other) +object.__rand__(self, other) +object.__rxor__(self, other) +object.__ror__(self, other) + + These methods are called to implement the binary arithmetic + operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", + "pow()", "**", "<<", ">>", "&", "^", "|") with reflected (swapped) + operands. These functions are only called if the operands are of + different types, when the left operand does not support the + corresponding operation [3], or the right operand’s class is + derived from the left operand’s class. [4] For instance, to + evaluate the expression "x - y", where *y* is an instance of a + class that has an "__rsub__()" method, "type(y).__rsub__(y, x)" is + called if "type(x).__sub__(x, y)" returns "NotImplemented" or + "type(y)" is a subclass of "type(x)". [5] + + Note that "__rpow__()" should be defined to accept an optional + third argument if the three-argument version of the built-in + "pow()" function is to be supported. + + Changed in version 3.14: Three-argument "pow()" now try calling + "__rpow__()" if necessary. Previously it was only called in two- + argument "pow()" and the binary power operator. + + Note: + + If the right operand’s type is a subclass of the left operand’s + type and that subclass provides a different implementation of the + reflected method for the operation, this method will be called + before the left operand’s non-reflected method. This behavior + allows subclasses to override their ancestors’ operations. + +object.__iadd__(self, other) +object.__isub__(self, other) +object.__imul__(self, other) +object.__imatmul__(self, other) +object.__itruediv__(self, other) +object.__ifloordiv__(self, other) +object.__imod__(self, other) +object.__ipow__(self, other[, modulo]) +object.__ilshift__(self, other) +object.__irshift__(self, other) +object.__iand__(self, other) +object.__ixor__(self, other) +object.__ior__(self, other) + + These methods are called to implement the augmented arithmetic + assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", "**=", + "<<=", ">>=", "&=", "^=", "|="). These methods should attempt to + do the operation in-place (modifying *self*) and return the result + (which could be, but does not have to be, *self*). If a specific + method is not defined, or if that method returns "NotImplemented", + the augmented assignment falls back to the normal methods. For + instance, if *x* is an instance of a class with an "__iadd__()" + method, "x += y" is equivalent to "x = x.__iadd__(y)" . If + "__iadd__()" does not exist, or if "x.__iadd__(y)" returns + "NotImplemented", "x.__add__(y)" and "y.__radd__(x)" are + considered, as with the evaluation of "x + y". In certain + situations, augmented assignment can result in unexpected errors + (see Why does a_tuple[i] += [‘item’] raise an exception when the + addition works?), but this behavior is in fact part of the data + model. + +object.__neg__(self) +object.__pos__(self) +object.__abs__(self) +object.__invert__(self) + + Called to implement the unary arithmetic operations ("-", "+", + "abs()" and "~"). + +object.__complex__(self) +object.__int__(self) +object.__float__(self) + + Called to implement the built-in functions "complex()", "int()" and + "float()". Should return a value of the appropriate type. + +object.__index__(self) + + Called to implement "operator.index()", and whenever Python needs + to losslessly convert the numeric object to an integer object (such + as in slicing, or in the built-in "bin()", "hex()" and "oct()" + functions). Presence of this method indicates that the numeric + object is an integer type. Must return an integer. + + If "__int__()", "__float__()" and "__complex__()" are not defined + then corresponding built-in functions "int()", "float()" and + "complex()" fall back to "__index__()". + +object.__round__(self[, ndigits]) +object.__trunc__(self) +object.__floor__(self) +object.__ceil__(self) + + Called to implement the built-in function "round()" and "math" + functions "trunc()", "floor()" and "ceil()". Unless *ndigits* is + passed to "__round__()" all these methods should return the value + of the object truncated to an "Integral" (typically an "int"). + + Changed in version 3.14: "int()" no longer delegates to the + "__trunc__()" method. +''', + 'objects': r'''Objects, values and types +************************* + +*Objects* are Python’s abstraction for data. All data in a Python +program is represented by objects or by relations between objects. +Even code is represented by objects. + +Every object has an identity, a type and a value. An object’s +*identity* never changes once it has been created; you may think of it +as the object’s address in memory. The "is" operator compares the +identity of two objects; the "id()" function returns an integer +representing its identity. + +**CPython implementation detail:** For CPython, "id(x)" is the memory +address where "x" is stored. + +An object’s type determines the operations that the object supports +(e.g., “does it have a length?”) and also defines the possible values +for objects of that type. The "type()" function returns an object’s +type (which is an object itself). Like its identity, an object’s +*type* is also unchangeable. [1] + +The *value* of some objects can change. Objects whose value can +change are said to be *mutable*; objects whose value is unchangeable +once they are created are called *immutable*. (The value of an +immutable container object that contains a reference to a mutable +object can change when the latter’s value is changed; however the +container is still considered immutable, because the collection of +objects it contains cannot be changed. So, immutability is not +strictly the same as having an unchangeable value, it is more subtle.) +An object’s mutability is determined by its type; for instance, +numbers, strings and tuples are immutable, while dictionaries and +lists are mutable. + +Objects are never explicitly destroyed; however, when they become +unreachable they may be garbage-collected. An implementation is +allowed to postpone garbage collection or omit it altogether — it is a +matter of implementation quality how garbage collection is +implemented, as long as no objects are collected that are still +reachable. + +**CPython implementation detail:** CPython currently uses a reference- +counting scheme with (optional) delayed detection of cyclically linked +garbage, which collects most objects as soon as they become +unreachable, but is not guaranteed to collect garbage containing +circular references. See the documentation of the "gc" module for +information on controlling the collection of cyclic garbage. Other +implementations act differently and CPython may change. Do not depend +on immediate finalization of objects when they become unreachable (so +you should always close files explicitly). + +Note that the use of the implementation’s tracing or debugging +facilities may keep objects alive that would normally be collectable. +Also note that catching an exception with a "try"…"except" statement +may keep objects alive. + +Some objects contain references to “external” resources such as open +files or windows. It is understood that these resources are freed +when the object is garbage-collected, but since garbage collection is +not guaranteed to happen, such objects also provide an explicit way to +release the external resource, usually a "close()" method. Programs +are strongly recommended to explicitly close such objects. The +"try"…"finally" statement and the "with" statement provide convenient +ways to do this. + +Some objects contain references to other objects; these are called +*containers*. Examples of containers are tuples, lists and +dictionaries. The references are part of a container’s value. In +most cases, when we talk about the value of a container, we imply the +values, not the identities of the contained objects; however, when we +talk about the mutability of a container, only the identities of the +immediately contained objects are implied. So, if an immutable +container (like a tuple) contains a reference to a mutable object, its +value changes if that mutable object is changed. + +Types affect almost all aspects of object behavior. Even the +importance of object identity is affected in some sense: for immutable +types, operations that compute new values may actually return a +reference to any existing object with the same type and value, while +for mutable objects this is not allowed. For example, after "a = 1; b += 1", *a* and *b* may or may not refer to the same object with the +value one, depending on the implementation. This is because "int" is +an immutable type, so the reference to "1" can be reused. This +behaviour depends on the implementation used, so should not be relied +upon, but is something to be aware of when making use of object +identity tests. However, after "c = []; d = []", *c* and *d* are +guaranteed to refer to two different, unique, newly created empty +lists. (Note that "e = f = []" assigns the *same* object to both *e* +and *f*.) +''', + 'operator-summary': r'''Operator precedence +******************* + +The following table summarizes the operator precedence in Python, from +highest precedence (most binding) to lowest precedence (least +binding). Operators in the same box have the same precedence. Unless +the syntax is explicitly given, operators are binary. Operators in +the same box group left to right (except for exponentiation and +conditional expressions, which group from right to left). + +Note that comparisons, membership tests, and identity tests, all have +the same precedence and have a left-to-right chaining feature as +described in the Comparisons section. + ++-------------------------------------------------+---------------------------------------+ +| Operator | Description | +|=================================================|=======================================| +| "(expressions...)", "[expressions...]", "{key: | Binding or parenthesized expression, | +| value...}", "{expressions...}" | list display, dictionary display, set | +| | display | ++-------------------------------------------------+---------------------------------------+ +| "x[index]", "x[index:index]" "x(arguments...)", | Subscription (including slicing), | +| "x.attribute" | call, attribute reference | ++-------------------------------------------------+---------------------------------------+ +| "await x" | Await expression | ++-------------------------------------------------+---------------------------------------+ +| "**" | Exponentiation [5] | ++-------------------------------------------------+---------------------------------------+ +| "+x", "-x", "~x" | Positive, negative, bitwise NOT | ++-------------------------------------------------+---------------------------------------+ +| "*", "@", "/", "//", "%" | Multiplication, matrix | +| | multiplication, division, floor | +| | division, remainder [6] | ++-------------------------------------------------+---------------------------------------+ +| "+", "-" | Addition and subtraction | ++-------------------------------------------------+---------------------------------------+ +| "<<", ">>" | Shifts | ++-------------------------------------------------+---------------------------------------+ +| "&" | Bitwise AND | ++-------------------------------------------------+---------------------------------------+ +| "^" | Bitwise XOR | ++-------------------------------------------------+---------------------------------------+ +| "|" | Bitwise OR | ++-------------------------------------------------+---------------------------------------+ +| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership | +| ">=", "!=", "==" | tests and identity tests | ++-------------------------------------------------+---------------------------------------+ +| "not x" | Boolean NOT | ++-------------------------------------------------+---------------------------------------+ +| "and" | Boolean AND | ++-------------------------------------------------+---------------------------------------+ +| "or" | Boolean OR | ++-------------------------------------------------+---------------------------------------+ +| "if" – "else" | Conditional expression | ++-------------------------------------------------+---------------------------------------+ +| "lambda" | Lambda expression | ++-------------------------------------------------+---------------------------------------+ +| ":=" | Assignment expression | ++-------------------------------------------------+---------------------------------------+ + +-[ Footnotes ]- + +[1] While "abs(x%y) < abs(y)" is true mathematically, for floats it + may not be true numerically due to roundoff. For example, and + assuming a platform on which a Python float is an IEEE 754 double- + precision number, in order that "-1e-100 % 1e100" have the same + sign as "1e100", the computed result is "-1e-100 + 1e100", which + is numerically exactly equal to "1e100". The function + "math.fmod()" returns a result whose sign matches the sign of the + first argument instead, and so returns "-1e-100" in this case. + Which approach is more appropriate depends on the application. + +[2] If x is very close to an exact integer multiple of y, it’s + possible for "x//y" to be one larger than "(x-x%y)//y" due to + rounding. In such cases, Python returns the latter result, in + order to preserve that "divmod(x,y)[0] * y + x % y" be very close + to "x". + +[3] The Unicode standard distinguishes between *code points* (e.g. + U+0041) and *abstract characters* (e.g. “LATIN CAPITAL LETTER A”). + While most abstract characters in Unicode are only represented + using one code point, there is a number of abstract characters + that can in addition be represented using a sequence of more than + one code point. For example, the abstract character “LATIN + CAPITAL LETTER C WITH CEDILLA” can be represented as a single + *precomposed character* at code position U+00C7, or as a sequence + of a *base character* at code position U+0043 (LATIN CAPITAL + LETTER C), followed by a *combining character* at code position + U+0327 (COMBINING CEDILLA). + + The comparison operators on strings compare at the level of + Unicode code points. This may be counter-intuitive to humans. For + example, ""\u00C7" == "\u0043\u0327"" is "False", even though both + strings represent the same abstract character “LATIN CAPITAL + LETTER C WITH CEDILLA”. + + To compare strings at the level of abstract characters (that is, + in a way intuitive to humans), use "unicodedata.normalize()". + +[4] Due to automatic garbage-collection, free lists, and the dynamic + nature of descriptors, you may notice seemingly unusual behaviour + in certain uses of the "is" operator, like those involving + comparisons between instance methods, or constants. Check their + documentation for more info. + +[5] The power operator "**" binds less tightly than an arithmetic or + bitwise unary operator on its right, that is, "2**-1" is "0.5". + +[6] The "%" operator is also used for string formatting; the same + precedence applies. +''', + 'pass': r'''The "pass" statement +******************** + + pass_stmt: "pass" + +"pass" is a null operation — when it is executed, nothing happens. It +is useful as a placeholder when a statement is required syntactically, +but no code needs to be executed, for example: + + def f(arg): pass # a function that does nothing (yet) + + class C: pass # a class with no methods (yet) +''', + 'power': r'''The power operator +****************** + +The power operator binds more tightly than unary operators on its +left; it binds less tightly than unary operators on its right. The +syntax is: + + power: (await_expr | primary) ["**" u_expr] + +Thus, in an unparenthesized sequence of power and unary operators, the +operators are evaluated from right to left (this does not constrain +the evaluation order for the operands): "-1**2" results in "-1". + +The power operator has the same semantics as the built-in "pow()" +function, when called with two arguments: it yields its left argument +raised to the power of its right argument. Numeric arguments are first +converted to a common type, and the result is of that type. + +For int operands, the result has the same type as the operands unless +the second argument is negative; in that case, all arguments are +converted to float and a float result is delivered. For example, +"10**2" returns "100", but "10**-2" returns "0.01". + +Raising "0.0" to a negative power results in a "ZeroDivisionError". +Raising a negative number to a fractional power results in a "complex" +number. (In earlier versions it raised a "ValueError".) + +This operation can be customized using the special "__pow__()" and +"__rpow__()" methods. +''', + 'raise': r'''The "raise" statement +********************* + + raise_stmt: "raise" [expression ["from" expression]] + +If no expressions are present, "raise" re-raises the exception that is +currently being handled, which is also known as the *active +exception*. If there isn’t currently an active exception, a +"RuntimeError" exception is raised indicating that this is an error. + +Otherwise, "raise" evaluates the first expression as the exception +object. It must be either a subclass or an instance of +"BaseException". If it is a class, the exception instance will be +obtained when needed by instantiating the class with no arguments. + +The *type* of the exception is the exception instance’s class, the +*value* is the instance itself. + +A traceback object is normally created automatically when an exception +is raised and attached to it as the "__traceback__" attribute. You can +create an exception and set your own traceback in one step using the +"with_traceback()" exception method (which returns the same exception +instance, with its traceback set to its argument), like so: + + raise Exception("foo occurred").with_traceback(tracebackobj) + +The "from" clause is used for exception chaining: if given, the second +*expression* must be another exception class or instance. If the +second expression is an exception instance, it will be attached to the +raised exception as the "__cause__" attribute (which is writable). If +the expression is an exception class, the class will be instantiated +and the resulting exception instance will be attached to the raised +exception as the "__cause__" attribute. If the raised exception is not +handled, both exceptions will be printed: + + >>> try: + ... print(1 / 0) + ... except Exception as exc: + ... raise RuntimeError("Something bad happened") from exc + ... + Traceback (most recent call last): + File "", line 2, in + print(1 / 0) + ~~^~~ + ZeroDivisionError: division by zero + + The above exception was the direct cause of the following exception: + + Traceback (most recent call last): + File "", line 4, in + raise RuntimeError("Something bad happened") from exc + RuntimeError: Something bad happened + +A similar mechanism works implicitly if a new exception is raised when +an exception is already being handled. An exception may be handled +when an "except" or "finally" clause, or a "with" statement, is used. +The previous exception is then attached as the new exception’s +"__context__" attribute: + + >>> try: + ... print(1 / 0) + ... except: + ... raise RuntimeError("Something bad happened") + ... + Traceback (most recent call last): + File "", line 2, in + print(1 / 0) + ~~^~~ + ZeroDivisionError: division by zero + + During handling of the above exception, another exception occurred: + + Traceback (most recent call last): + File "", line 4, in + raise RuntimeError("Something bad happened") + RuntimeError: Something bad happened + +Exception chaining can be explicitly suppressed by specifying "None" +in the "from" clause: + + >>> try: + ... print(1 / 0) + ... except: + ... raise RuntimeError("Something bad happened") from None + ... + Traceback (most recent call last): + File "", line 4, in + RuntimeError: Something bad happened + +Additional information on exceptions can be found in section +Exceptions, and information about handling exceptions is in section +The try statement. + +Changed in version 3.3: "None" is now permitted as "Y" in "raise X +from Y".Added the "__suppress_context__" attribute to suppress +automatic display of the exception context. + +Changed in version 3.11: If the traceback of the active exception is +modified in an "except" clause, a subsequent "raise" statement re- +raises the exception with the modified traceback. Previously, the +exception was re-raised with the traceback it had when it was caught. +''', + 'return': r'''The "return" statement +********************** + + return_stmt: "return" [expression_list] + +"return" may only occur syntactically nested in a function definition, +not within a nested class definition. + +If an expression list is present, it is evaluated, else "None" is +substituted. + +"return" leaves the current function call with the expression list (or +"None") as return value. + +When "return" passes control out of a "try" statement with a "finally" +clause, that "finally" clause is executed before really leaving the +function. + +In a generator function, the "return" statement indicates that the +generator is done and will cause "StopIteration" to be raised. The +returned value (if any) is used as an argument to construct +"StopIteration" and becomes the "StopIteration.value" attribute. + +In an asynchronous generator function, an empty "return" statement +indicates that the asynchronous generator is done and will cause +"StopAsyncIteration" to be raised. A non-empty "return" statement is +a syntax error in an asynchronous generator function. +''', + 'sequence-types': r'''Emulating container types +************************* + +The following methods can be defined to implement container objects. +None of them are provided by the "object" class itself. Containers +usually are *sequences* (such as "lists" or "tuples") or *mappings* +(like *dictionaries*), but can represent other containers as well. +The first set of methods is used either to emulate a sequence or to +emulate a mapping; the difference is that for a sequence, the +allowable keys should be the integers *k* for which "0 <= k < N" where +*N* is the length of the sequence, or "slice" objects, which define a +range of items. It is also recommended that mappings provide the +methods "keys()", "values()", "items()", "get()", "clear()", +"setdefault()", "pop()", "popitem()", "copy()", and "update()" +behaving similar to those for Python’s standard "dictionary" objects. +The "collections.abc" module provides a "MutableMapping" *abstract +base class* to help create those methods from a base set of +"__getitem__()", "__setitem__()", "__delitem__()", and "keys()". + +Mutable sequences should provide methods "append()", "clear()", +"count()", "extend()", "index()", "insert()", "pop()", "remove()", and +"reverse()", like Python standard "list" objects. Finally, sequence +types should implement addition (meaning concatenation) and +multiplication (meaning repetition) by defining the methods +"__add__()", "__radd__()", "__iadd__()", "__mul__()", "__rmul__()" and +"__imul__()" described below; they should not define other numerical +operators. + +It is recommended that both mappings and sequences implement the +"__contains__()" method to allow efficient use of the "in" operator; +for mappings, "in" should search the mapping’s keys; for sequences, it +should search through the values. It is further recommended that both +mappings and sequences implement the "__iter__()" method to allow +efficient iteration through the container; for mappings, "__iter__()" +should iterate through the object’s keys; for sequences, it should +iterate through the values. + +object.__len__(self) + + Called to implement the built-in function "len()". Should return + the length of the object, an integer ">=" 0. Also, an object that + doesn’t define a "__bool__()" method and whose "__len__()" method + returns zero is considered to be false in a Boolean context. + + **CPython implementation detail:** In CPython, the length is + required to be at most "sys.maxsize". If the length is larger than + "sys.maxsize" some features (such as "len()") may raise + "OverflowError". To prevent raising "OverflowError" by truth value + testing, an object must define a "__bool__()" method. + +object.__length_hint__(self) + + Called to implement "operator.length_hint()". Should return an + estimated length for the object (which may be greater or less than + the actual length). The length must be an integer ">=" 0. The + return value may also be "NotImplemented", which is treated the + same as if the "__length_hint__" method didn’t exist at all. This + method is purely an optimization and is never required for + correctness. + + Added in version 3.4. + +object.__getitem__(self, subscript) + + Called to implement *subscription*, that is, "self[subscript]". See + Subscriptions and slicings for details on the syntax. + + There are two types of built-in objects that support subscription + via "__getitem__()": + + * **sequences**, where *subscript* (also called *index*) should be + an integer or a "slice" object. See the sequence documentation + for the expected behavior, including handling "slice" objects and + negative indices. + + * **mappings**, where *subscript* is also called the *key*. See + mapping documentation for the expected behavior. + + If *subscript* is of an inappropriate type, "__getitem__()" should + raise "TypeError". If *subscript* has an inappropriate value, + "__getitem__()" should raise an "LookupError" or one of its + subclasses ("IndexError" for sequences; "KeyError" for mappings). + + Note: + + Slicing is handled by "__getitem__()", "__setitem__()", and + "__delitem__()". A call like + + a[1:2] = b + + is translated to + + a[slice(1, 2, None)] = b + + and so forth. Missing slice items are always filled in with + "None". + + Note: + + The sequence iteration protocol (used, for example, in "for" + loops), expects that an "IndexError" will be raised for illegal + indexes to allow proper detection of the end of a sequence. + + Note: + + When subscripting a *class*, the special class method + "__class_getitem__()" may be called instead of "__getitem__()". + See __class_getitem__ versus __getitem__ for more details. + +object.__setitem__(self, key, value) + + Called to implement assignment to "self[key]". Same note as for + "__getitem__()". This should only be implemented for mappings if + the objects support changes to the values for keys, or if new keys + can be added, or for sequences if elements can be replaced. The + same exceptions should be raised for improper *key* values as for + the "__getitem__()" method. + +object.__delitem__(self, key) + + Called to implement deletion of "self[key]". Same note as for + "__getitem__()". This should only be implemented for mappings if + the objects support removal of keys, or for sequences if elements + can be removed from the sequence. The same exceptions should be + raised for improper *key* values as for the "__getitem__()" method. + +object.__missing__(self, key) + + Called by "dict"."__getitem__()" to implement "self[key]" for dict + subclasses when key is not in the dictionary. + +object.__iter__(self) + + This method is called when an *iterator* is required for a + container. This method should return a new iterator object that can + iterate over all the objects in the container. For mappings, it + should iterate over the keys of the container. + +object.__reversed__(self) + + Called (if present) by the "reversed()" built-in to implement + reverse iteration. It should return a new iterator object that + iterates over all the objects in the container in reverse order. + + If the "__reversed__()" method is not provided, the "reversed()" + built-in will fall back to using the sequence protocol ("__len__()" + and "__getitem__()"). Objects that support the sequence protocol + should only provide "__reversed__()" if they can provide an + implementation that is more efficient than the one provided by + "reversed()". + +The membership test operators ("in" and "not in") are normally +implemented as an iteration through a container. However, container +objects can supply the following special method with a more efficient +implementation, which also does not require the object be iterable. + +object.__contains__(self, item) + + Called to implement membership test operators. Should return true + if *item* is in *self*, false otherwise. For mapping objects, this + should consider the keys of the mapping rather than the values or + the key-item pairs. + + For objects that don’t define "__contains__()", the membership test + first tries iteration via "__iter__()", then the old sequence + iteration protocol via "__getitem__()", see this section in the + language reference. +''', + 'shifting': r'''Shifting operations +******************* + +The shifting operations have lower priority than the arithmetic +operations: + + shift_expr: a_expr | shift_expr ("<<" | ">>") a_expr + +These operators accept integers as arguments. They shift the first +argument to the left or right by the number of bits given by the +second argument. + +The left shift operation can be customized using the special +"__lshift__()" and "__rlshift__()" methods. The right shift operation +can be customized using the special "__rshift__()" and "__rrshift__()" +methods. + +A right shift by *n* bits is defined as floor division by "pow(2,n)". +A left shift by *n* bits is defined as multiplication with "pow(2,n)". +''', + 'slicings': r'''Slicings +******** + +A more advanced form of subscription, *slicing*, is commonly used to +extract a portion of a sequence. In this form, the subscript is a +*slice*: up to three expressions separated by colons. Any of the +expressions may be omitted, but a slice must contain at least one +colon: + + >>> number_names = ['zero', 'one', 'two', 'three', 'four', 'five'] + >>> number_names[1:3] + ['one', 'two'] + >>> number_names[1:] + ['one', 'two', 'three', 'four', 'five'] + >>> number_names[:3] + ['zero', 'one', 'two'] + >>> number_names[:] + ['zero', 'one', 'two', 'three', 'four', 'five'] + >>> number_names[::2] + ['zero', 'two', 'four'] + >>> number_names[:-3] + ['zero', 'one', 'two'] + >>> del number_names[4:] + >>> number_names + ['zero', 'one', 'two', 'three'] + +When a slice is evaluated, the interpreter constructs a "slice" object +whose "start", "stop" and "step" attributes, respectively, are the +results of the expressions between the colons. Any missing expression +evaluates to "None". This "slice" object is then passed to the +"__getitem__()" or "__class_getitem__()" *special method*, as above. + + # continuing with the SubscriptionDemo instance defined above: + >>> demo[2:3] + subscripted with: slice(2, 3, None) + >>> demo[::'spam'] + subscripted with: slice(None, None, 'spam') +''', + 'specialattrs': r'''Special Attributes +****************** + +The implementation adds a few special read-only attributes to several +object types, where they are relevant. Some of these are not reported +by the "dir()" built-in function. + +definition.__name__ + + The name of the class, function, method, descriptor, or generator + instance. + +definition.__qualname__ + + The *qualified name* of the class, function, method, descriptor, or + generator instance. + + Added in version 3.3. + +definition.__module__ + + The name of the module in which a class or function was defined. + +definition.__doc__ + + The documentation string of a class or function, or "None" if + undefined. + +definition.__type_params__ + + The type parameters of generic classes, functions, and type + aliases. For classes and functions that are not generic, this will + be an empty tuple. + + Added in version 3.12. +''', + 'specialnames': r'''Special method names +******************** + +A class can implement certain operations that are invoked by special +syntax (such as arithmetic operations or subscripting and slicing) by +defining methods with special names. This is Python’s approach to +*operator overloading*, allowing classes to define their own behavior +with respect to language operators. For instance, if a class defines +a method named "__getitem__()", and "x" is an instance of this class, +then "x[i]" is roughly equivalent to "type(x).__getitem__(x, i)". +Except where mentioned, attempts to execute an operation raise an +exception when no appropriate method is defined (typically +"AttributeError" or "TypeError"). + +Setting a special method to "None" indicates that the corresponding +operation is not available. For example, if a class sets "__iter__()" +to "None", the class is not iterable, so calling "iter()" on its +instances will raise a "TypeError" (without falling back to +"__getitem__()"). [2] + +When implementing a class that emulates any built-in type, it is +important that the emulation only be implemented to the degree that it +makes sense for the object being modelled. For example, some +sequences may work well with retrieval of individual elements, but +extracting a slice may not make sense. (One example of this is the +NodeList interface in the W3C’s Document Object Model.) + + +Basic customization +=================== + +object.__new__(cls[, ...]) + + Called to create a new instance of class *cls*. "__new__()" is a + static method (special-cased so you need not declare it as such) + that takes the class of which an instance was requested as its + first argument. The remaining arguments are those passed to the + object constructor expression (the call to the class). The return + value of "__new__()" should be the new object instance (usually an + instance of *cls*). + + Typical implementations create a new instance of the class by + invoking the superclass’s "__new__()" method using + "super().__new__(cls[, ...])" with appropriate arguments and then + modifying the newly created instance as necessary before returning + it. + + If "__new__()" is invoked during object construction and it returns + an instance of *cls*, then the new instance’s "__init__()" method + will be invoked like "__init__(self[, ...])", where *self* is the + new instance and the remaining arguments are the same as were + passed to the object constructor. + + If "__new__()" does not return an instance of *cls*, then the new + instance’s "__init__()" method will not be invoked. + + "__new__()" is intended mainly to allow subclasses of immutable + types (like int, str, or tuple) to customize instance creation. It + is also commonly overridden in custom metaclasses in order to + customize class creation. + +object.__init__(self[, ...]) + + Called after the instance has been created (by "__new__()"), but + before it is returned to the caller. The arguments are those + passed to the class constructor expression. If a base class has an + "__init__()" method, the derived class’s "__init__()" method, if + any, must explicitly call it to ensure proper initialization of the + base class part of the instance; for example: + "super().__init__([args...])". + + Because "__new__()" and "__init__()" work together in constructing + objects ("__new__()" to create it, and "__init__()" to customize + it), no non-"None" value may be returned by "__init__()"; doing so + will cause a "TypeError" to be raised at runtime. + +object.__del__(self) + + Called when the instance is about to be destroyed. This is also + called a finalizer or (improperly) a destructor. If a base class + has a "__del__()" method, the derived class’s "__del__()" method, + if any, must explicitly call it to ensure proper deletion of the + base class part of the instance. + + It is possible (though not recommended!) for the "__del__()" method + to postpone destruction of the instance by creating a new reference + to it. This is called object *resurrection*. It is + implementation-dependent whether "__del__()" is called a second + time when a resurrected object is about to be destroyed; the + current *CPython* implementation only calls it once. + + It is not guaranteed that "__del__()" methods are called for + objects that still exist when the interpreter exits. + "weakref.finalize" provides a straightforward way to register a + cleanup function to be called when an object is garbage collected. + + Note: + + "del x" doesn’t directly call "x.__del__()" — the former + decrements the reference count for "x" by one, and the latter is + only called when "x"’s reference count reaches zero. + + **CPython implementation detail:** It is possible for a reference + cycle to prevent the reference count of an object from going to + zero. In this case, the cycle will be later detected and deleted + by the *cyclic garbage collector*. A common cause of reference + cycles is when an exception has been caught in a local variable. + The frame’s locals then reference the exception, which references + its own traceback, which references the locals of all frames caught + in the traceback. + + See also: Documentation for the "gc" module. + + Warning: + + Due to the precarious circumstances under which "__del__()" + methods are invoked, exceptions that occur during their execution + are ignored, and a warning is printed to "sys.stderr" instead. + In particular: + + * "__del__()" can be invoked when arbitrary code is being + executed, including from any arbitrary thread. If "__del__()" + needs to take a lock or invoke any other blocking resource, it + may deadlock as the resource may already be taken by the code + that gets interrupted to execute "__del__()". + + * "__del__()" can be executed during interpreter shutdown. As a + consequence, the global variables it needs to access (including + other modules) may already have been deleted or set to "None". + Python guarantees that globals whose name begins with a single + underscore are deleted from their module before other globals + are deleted; if no other references to such globals exist, this + may help in assuring that imported modules are still available + at the time when the "__del__()" method is called. + +object.__repr__(self) + + Called by the "repr()" built-in function to compute the “official” + string representation of an object. If at all possible, this + should look like a valid Python expression that could be used to + recreate an object with the same value (given an appropriate + environment). If this is not possible, a string of the form + "<...some useful description...>" should be returned. The return + value must be a string object. If a class defines "__repr__()" but + not "__str__()", then "__repr__()" is also used when an “informal” + string representation of instances of that class is required. + + This is typically used for debugging, so it is important that the + representation is information-rich and unambiguous. A default + implementation is provided by the "object" class itself. + +object.__str__(self) + + Called by "str(object)", the default "__format__()" implementation, + and the built-in function "print()", to compute the “informal” or + nicely printable string representation of an object. The return + value must be a str object. + + This method differs from "object.__repr__()" in that there is no + expectation that "__str__()" return a valid Python expression: a + more convenient or concise representation can be used. + + The default implementation defined by the built-in type "object" + calls "object.__repr__()". + +object.__bytes__(self) + + Called by bytes to compute a byte-string representation of an + object. This should return a "bytes" object. The "object" class + itself does not provide this method. + +object.__format__(self, format_spec) + + Called by the "format()" built-in function, and by extension, + evaluation of formatted string literals and the "str.format()" + method, to produce a “formatted” string representation of an + object. The *format_spec* argument is a string that contains a + description of the formatting options desired. The interpretation + of the *format_spec* argument is up to the type implementing + "__format__()", however most classes will either delegate + formatting to one of the built-in types, or use a similar + formatting option syntax. + + See Format specification mini-language for a description of the + standard formatting syntax. + + The return value must be a string object. + + The default implementation by the "object" class should be given an + empty *format_spec* string. It delegates to "__str__()". + + Changed in version 3.4: The __format__ method of "object" itself + raises a "TypeError" if passed any non-empty string. + + Changed in version 3.7: "object.__format__(x, '')" is now + equivalent to "str(x)" rather than "format(str(x), '')". + +object.__lt__(self, other) +object.__le__(self, other) +object.__eq__(self, other) +object.__ne__(self, other) +object.__gt__(self, other) +object.__ge__(self, other) + + These are the so-called “rich comparison” methods. The + correspondence between operator symbols and method names is as + follows: "xy" calls + "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)". + + A rich comparison method may return the singleton "NotImplemented" + if it does not implement the operation for a given pair of + arguments. By convention, "False" and "True" are returned for a + successful comparison. However, these methods can return any value, + so if the comparison operator is used in a Boolean context (e.g., + in the condition of an "if" statement), Python will call "bool()" + on the value to determine if the result is true or false. + + By default, "object" implements "__eq__()" by using "is", returning + "NotImplemented" in the case of a false comparison: "True if x is y + else NotImplemented". For "__ne__()", by default it delegates to + "__eq__()" and inverts the result unless it is "NotImplemented". + There are no other implied relationships among the comparison + operators or default implementations; for example, the truth of + "(x.__hash__". + + If a class that does not override "__eq__()" wishes to suppress + hash support, it should include "__hash__ = None" in the class + definition. A class which defines its own "__hash__()" that + explicitly raises a "TypeError" would be incorrectly identified as + hashable by an "isinstance(obj, collections.abc.Hashable)" call. + + Note: + + By default, the "__hash__()" values of str and bytes objects are + “salted” with an unpredictable random value. Although they + remain constant within an individual Python process, they are not + predictable between repeated invocations of Python.This is + intended to provide protection against a denial-of-service caused + by carefully chosen inputs that exploit the worst case + performance of a dict insertion, *O*(*n*^2) complexity. See + https://ocert.org/advisories/ocert-2011-003.html for + details.Changing hash values affects the iteration order of sets. + Python has never made guarantees about this ordering (and it + typically varies between 32-bit and 64-bit builds).See also + "PYTHONHASHSEED". + + Changed in version 3.3: Hash randomization is enabled by default. + +object.__bool__(self) + + Called to implement truth value testing and the built-in operation + "bool()"; should return "False" or "True". When this method is not + defined, "__len__()" is called, if it is defined, and the object is + considered true if its result is nonzero. If a class defines + neither "__len__()" nor "__bool__()" (which is true of the "object" + class itself), all its instances are considered true. + + +Customizing attribute access +============================ + +The following methods can be defined to customize the meaning of +attribute access (use of, assignment to, or deletion of "x.name") for +class instances. + +object.__getattr__(self, name) + + Called when the default attribute access fails with an + "AttributeError" (either "__getattribute__()" raises an + "AttributeError" because *name* is not an instance attribute or an + attribute in the class tree for "self"; or "__get__()" of a *name* + property raises "AttributeError"). This method should either + return the (computed) attribute value or raise an "AttributeError" + exception. The "object" class itself does not provide this method. + + Note that if the attribute is found through the normal mechanism, + "__getattr__()" is not called. (This is an intentional asymmetry + between "__getattr__()" and "__setattr__()".) This is done both for + efficiency reasons and because otherwise "__getattr__()" would have + no way to access other attributes of the instance. Note that at + least for instance variables, you can take total control by not + inserting any values in the instance attribute dictionary (but + instead inserting them in another object). See the + "__getattribute__()" method below for a way to actually get total + control over attribute access. + +object.__getattribute__(self, name) + + Called unconditionally to implement attribute accesses for + instances of the class. If the class also defines "__getattr__()", + the latter will not be called unless "__getattribute__()" either + calls it explicitly or raises an "AttributeError". This method + should return the (computed) attribute value or raise an + "AttributeError" exception. In order to avoid infinite recursion in + this method, its implementation should always call the base class + method with the same name to access any attributes it needs, for + example, "object.__getattribute__(self, name)". + + Note: + + This method may still be bypassed when looking up special methods + as the result of implicit invocation via language syntax or + built-in functions. See Special method lookup. + + For certain sensitive attribute accesses, raises an auditing event + "object.__getattr__" with arguments "obj" and "name". + +object.__setattr__(self, name, value) + + Called when an attribute assignment is attempted. This is called + instead of the normal mechanism (i.e. store the value in the + instance dictionary). *name* is the attribute name, *value* is the + value to be assigned to it. + + If "__setattr__()" wants to assign to an instance attribute, it + should call the base class method with the same name, for example, + "object.__setattr__(self, name, value)". + + For certain sensitive attribute assignments, raises an auditing + event "object.__setattr__" with arguments "obj", "name", "value". + +object.__delattr__(self, name) + + Like "__setattr__()" but for attribute deletion instead of + assignment. This should only be implemented if "del obj.name" is + meaningful for the object. + + For certain sensitive attribute deletions, raises an auditing event + "object.__delattr__" with arguments "obj" and "name". + +object.__dir__(self) + + Called when "dir()" is called on the object. An iterable must be + returned. "dir()" converts the returned iterable to a list and + sorts it. + + +Customizing module attribute access +----------------------------------- + +module.__getattr__() +module.__dir__() + +Special names "__getattr__" and "__dir__" can be also used to +customize access to module attributes. The "__getattr__" function at +the module level should accept one argument which is the name of an +attribute and return the computed value or raise an "AttributeError". +If an attribute is not found on a module object through the normal +lookup, i.e. "object.__getattribute__()", then "__getattr__" is +searched in the module "__dict__" before raising an "AttributeError". +If found, it is called with the attribute name and the result is +returned. + +The "__dir__" function should accept no arguments, and return an +iterable of strings that represents the names accessible on module. If +present, this function overrides the standard "dir()" search on a +module. + +module.__class__ + +For a more fine grained customization of the module behavior (setting +attributes, properties, etc.), one can set the "__class__" attribute +of a module object to a subclass of "types.ModuleType". For example: + + import sys + from types import ModuleType + + class VerboseModule(ModuleType): + def __repr__(self): + return f'Verbose {self.__name__}' + + def __setattr__(self, attr, value): + print(f'Setting {attr}...') + super().__setattr__(attr, value) + + sys.modules[__name__].__class__ = VerboseModule + +Note: + + Defining module "__getattr__" and setting module "__class__" only + affect lookups made using the attribute access syntax – directly + accessing the module globals (whether by code within the module, or + via a reference to the module’s globals dictionary) is unaffected. + +Changed in version 3.5: "__class__" module attribute is now writable. + +Added in version 3.7: "__getattr__" and "__dir__" module attributes. + +See also: + + **PEP 562** - Module __getattr__ and __dir__ + Describes the "__getattr__" and "__dir__" functions on modules. + + +Implementing Descriptors +------------------------ + +The following methods only apply when an instance of the class +containing the method (a so-called *descriptor* class) appears in an +*owner* class (the descriptor must be in either the owner’s class +dictionary or in the class dictionary for one of its parents). In the +examples below, “the attribute” refers to the attribute whose name is +the key of the property in the owner class’ "__dict__". The "object" +class itself does not implement any of these protocols. + +object.__get__(self, instance, owner=None) + + Called to get the attribute of the owner class (class attribute + access) or of an instance of that class (instance attribute + access). The optional *owner* argument is the owner class, while + *instance* is the instance that the attribute was accessed through, + or "None" when the attribute is accessed through the *owner*. + + This method should return the computed attribute value or raise an + "AttributeError" exception. + + **PEP 252** specifies that "__get__()" is callable with one or two + arguments. Python’s own built-in descriptors support this + specification; however, it is likely that some third-party tools + have descriptors that require both arguments. Python’s own + "__getattribute__()" implementation always passes in both arguments + whether they are required or not. + +object.__set__(self, instance, value) + + Called to set the attribute on an instance *instance* of the owner + class to a new value, *value*. + + Note, adding "__set__()" or "__delete__()" changes the kind of + descriptor to a “data descriptor”. See Invoking Descriptors for + more details. + +object.__delete__(self, instance) + + Called to delete the attribute on an instance *instance* of the + owner class. + +Instances of descriptors may also have the "__objclass__" attribute +present: + +object.__objclass__ + + The attribute "__objclass__" is interpreted by the "inspect" module + as specifying the class where this object was defined (setting this + appropriately can assist in runtime introspection of dynamic class + attributes). For callables, it may indicate that an instance of the + given type (or a subclass) is expected or required as the first + positional argument (for example, CPython sets this attribute for + unbound methods that are implemented in C). + + +Invoking Descriptors +-------------------- + +In general, a descriptor is an object attribute with “binding +behavior”, one whose attribute access has been overridden by methods +in the descriptor protocol: "__get__()", "__set__()", and +"__delete__()". If any of those methods are defined for an object, it +is said to be a descriptor. + +The default behavior for attribute access is to get, set, or delete +the attribute from an object’s dictionary. For instance, "a.x" has a +lookup chain starting with "a.__dict__['x']", then +"type(a).__dict__['x']", and continuing through the base classes of +"type(a)" excluding metaclasses. + +However, if the looked-up value is an object defining one of the +descriptor methods, then Python may override the default behavior and +invoke the descriptor method instead. Where this occurs in the +precedence chain depends on which descriptor methods were defined and +how they were called. + +The starting point for descriptor invocation is a binding, "a.x". How +the arguments are assembled depends on "a": + +Direct Call + The simplest and least common call is when user code directly + invokes a descriptor method: "x.__get__(a)". + +Instance Binding + If binding to an object instance, "a.x" is transformed into the + call: "type(a).__dict__['x'].__get__(a, type(a))". + +Class Binding + If binding to a class, "A.x" is transformed into the call: + "A.__dict__['x'].__get__(None, A)". + +Super Binding + A dotted lookup such as "super(A, a).x" searches + "a.__class__.__mro__" for a base class "B" following "A" and then + returns "B.__dict__['x'].__get__(a, A)". If not a descriptor, "x" + is returned unchanged. + +For instance bindings, the precedence of descriptor invocation depends +on which descriptor methods are defined. A descriptor can define any +combination of "__get__()", "__set__()" and "__delete__()". If it +does not define "__get__()", then accessing the attribute will return +the descriptor object itself unless there is a value in the object’s +instance dictionary. If the descriptor defines "__set__()" and/or +"__delete__()", it is a data descriptor; if it defines neither, it is +a non-data descriptor. Normally, data descriptors define both +"__get__()" and "__set__()", while non-data descriptors have just the +"__get__()" method. Data descriptors with "__get__()" and "__set__()" +(and/or "__delete__()") defined always override a redefinition in an +instance dictionary. In contrast, non-data descriptors can be +overridden by instances. + +Python methods (including those decorated with "@staticmethod" and +"@classmethod") are implemented as non-data descriptors. Accordingly, +instances can redefine and override methods. This allows individual +instances to acquire behaviors that differ from other instances of the +same class. + +The "property()" function is implemented as a data descriptor. +Accordingly, instances cannot override the behavior of a property. + + +__slots__ +--------- + +*__slots__* allow us to explicitly declare data members (like +properties) and deny the creation of "__dict__" and *__weakref__* +(unless explicitly declared in *__slots__* or available in a parent.) + +The space saved over using "__dict__" can be significant. Attribute +lookup speed can be significantly improved as well. + +object.__slots__ + + This class variable can be assigned a string, iterable, or sequence + of strings with variable names used by instances. *__slots__* + reserves space for the declared variables and prevents the + automatic creation of "__dict__" and *__weakref__* for each + instance. + +Notes on using *__slots__*: + +* When inheriting from a class without *__slots__*, the "__dict__" and + *__weakref__* attribute of the instances will always be accessible. + +* Without a "__dict__" variable, instances cannot be assigned new + variables not listed in the *__slots__* definition. Attempts to + assign to an unlisted variable name raises "AttributeError". If + dynamic assignment of new variables is desired, then add + "'__dict__'" to the sequence of strings in the *__slots__* + declaration. + +* Without a *__weakref__* variable for each instance, classes defining + *__slots__* do not support "weak references" to its instances. If + weak reference support is needed, then add "'__weakref__'" to the + sequence of strings in the *__slots__* declaration. + +* *__slots__* are implemented at the class level by creating + descriptors for each variable name. As a result, class attributes + cannot be used to set default values for instance variables defined + by *__slots__*; otherwise, the class attribute would overwrite the + descriptor assignment. + +* The action of a *__slots__* declaration is not limited to the class + where it is defined. *__slots__* declared in parents are available + in child classes. However, instances of a child subclass will get a + "__dict__" and *__weakref__* unless the subclass also defines + *__slots__* (which should only contain names of any *additional* + slots). + +* If a class defines a slot also defined in a base class, the instance + variable defined by the base class slot is inaccessible (except by + retrieving its descriptor directly from the base class). This + renders the meaning of the program undefined. In the future, a + check may be added to prevent this. + +* "TypeError" will be raised if nonempty *__slots__* are defined for a + class derived from a ""variable-length" built-in type" such as + "int", "bytes", and "tuple". + +* Any non-string *iterable* may be assigned to *__slots__*. + +* If a "dictionary" is used to assign *__slots__*, the dictionary keys + will be used as the slot names. The values of the dictionary can be + used to provide per-attribute docstrings that will be recognised by + "inspect.getdoc()" and displayed in the output of "help()". + +* "__class__" assignment works only if both classes have the same + *__slots__*. + +* Multiple inheritance with multiple slotted parent classes can be + used, but only one parent is allowed to have attributes created by + slots (the other bases must have empty slot layouts) - violations + raise "TypeError". + +* If an *iterator* is used for *__slots__* then a *descriptor* is + created for each of the iterator’s values. However, the *__slots__* + attribute will be an empty iterator. + + +Customizing class creation +========================== + +Whenever a class inherits from another class, "__init_subclass__()" is +called on the parent class. This way, it is possible to write classes +which change the behavior of subclasses. This is closely related to +class decorators, but where class decorators only affect the specific +class they’re applied to, "__init_subclass__" solely applies to future +subclasses of the class defining the method. + +classmethod object.__init_subclass__(cls) + + This method is called whenever the containing class is subclassed. + *cls* is then the new subclass. If defined as a normal instance + method, this method is implicitly converted to a class method. + + Keyword arguments which are given to a new class are passed to the + parent class’s "__init_subclass__". For compatibility with other + classes using "__init_subclass__", one should take out the needed + keyword arguments and pass the others over to the base class, as + in: + + class Philosopher: + def __init_subclass__(cls, /, default_name, **kwargs): + super().__init_subclass__(**kwargs) + cls.default_name = default_name + + class AustralianPhilosopher(Philosopher, default_name="Bruce"): + pass + + The default implementation "object.__init_subclass__" does nothing, + but raises an error if it is called with any arguments. + + Note: + + The metaclass hint "metaclass" is consumed by the rest of the + type machinery, and is never passed to "__init_subclass__" + implementations. The actual metaclass (rather than the explicit + hint) can be accessed as "type(cls)". + + Added in version 3.6. + +When a class is created, "type.__new__()" scans the class variables +and makes callbacks to those with a "__set_name__()" hook. + +object.__set_name__(self, owner, name) + + Automatically called at the time the owning class *owner* is + created. The object has been assigned to *name* in that class: + + class A: + x = C() # Automatically calls: x.__set_name__(A, 'x') + + If the class variable is assigned after the class is created, + "__set_name__()" will not be called automatically. If needed, + "__set_name__()" can be called directly: + + class A: + pass + + c = C() + A.x = c # The hook is not called + c.__set_name__(A, 'x') # Manually invoke the hook + + See Creating the class object for more details. + + Added in version 3.6. + + +Metaclasses +----------- + +By default, classes are constructed using "type()". The class body is +executed in a new namespace and the class name is bound locally to the +result of "type(name, bases, namespace)". + +The class creation process can be customized by passing the +"metaclass" keyword argument in the class definition line, or by +inheriting from an existing class that included such an argument. In +the following example, both "MyClass" and "MySubclass" are instances +of "Meta": + + class Meta(type): + pass + + class MyClass(metaclass=Meta): + pass + + class MySubclass(MyClass): + pass + +Any other keyword arguments that are specified in the class definition +are passed through to all metaclass operations described below. + +When a class definition is executed, the following steps occur: + +* MRO entries are resolved; + +* the appropriate metaclass is determined; + +* the class namespace is prepared; + +* the class body is executed; + +* the class object is created. + + +Resolving MRO entries +--------------------- + +object.__mro_entries__(self, bases) + + If a base that appears in a class definition is not an instance of + "type", then an "__mro_entries__()" method is searched on the base. + If an "__mro_entries__()" method is found, the base is substituted + with the result of a call to "__mro_entries__()" when creating the + class. The method is called with the original bases tuple passed to + the *bases* parameter, and must return a tuple of classes that will + be used instead of the base. The returned tuple may be empty: in + these cases, the original base is ignored. + +See also: + + "types.resolve_bases()" + Dynamically resolve bases that are not instances of "type". + + "types.get_original_bases()" + Retrieve a class’s “original bases” prior to modifications by + "__mro_entries__()". + + **PEP 560** + Core support for typing module and generic types. + + +Determining the appropriate metaclass +------------------------------------- + +The appropriate metaclass for a class definition is determined as +follows: + +* if no bases and no explicit metaclass are given, then "type()" is + used; + +* if an explicit metaclass is given and it is *not* an instance of + "type()", then it is used directly as the metaclass; + +* if an instance of "type()" is given as the explicit metaclass, or + bases are defined, then the most derived metaclass is used. + +The most derived metaclass is selected from the explicitly specified +metaclass (if any) and the metaclasses (i.e. "type(cls)") of all +specified base classes. The most derived metaclass is one which is a +subtype of *all* of these candidate metaclasses. If none of the +candidate metaclasses meets that criterion, then the class definition +will fail with "TypeError". + + +Preparing the class namespace +----------------------------- + +Once the appropriate metaclass has been identified, then the class +namespace is prepared. If the metaclass has a "__prepare__" attribute, +it is called as "namespace = metaclass.__prepare__(name, bases, +**kwds)" (where the additional keyword arguments, if any, come from +the class definition). The "__prepare__" method should be implemented +as a "classmethod". The namespace returned by "__prepare__" is passed +in to "__new__", but when the final class object is created the +namespace is copied into a new "dict". + +If the metaclass has no "__prepare__" attribute, then the class +namespace is initialised as an empty ordered mapping. + +See also: + + **PEP 3115** - Metaclasses in Python 3000 + Introduced the "__prepare__" namespace hook + + +Executing the class body +------------------------ + +The class body is executed (approximately) as "exec(body, globals(), +namespace)". The key difference from a normal call to "exec()" is that +lexical scoping allows the class body (including any methods) to +reference names from the current and outer scopes when the class +definition occurs inside a function. + +However, even when the class definition occurs inside the function, +methods defined inside the class still cannot see names defined at the +class scope. Class variables must be accessed through the first +parameter of instance or class methods, or through the implicit +lexically scoped "__class__" reference described in the next section. + + +Creating the class object +------------------------- + +Once the class namespace has been populated by executing the class +body, the class object is created by calling "metaclass(name, bases, +namespace, **kwds)" (the additional keywords passed here are the same +as those passed to "__prepare__"). + +This class object is the one that will be referenced by the zero- +argument form of "super()". "__class__" is an implicit closure +reference created by the compiler if any methods in a class body refer +to either "__class__" or "super". This allows the zero argument form +of "super()" to correctly identify the class being defined based on +lexical scoping, while the class or instance that was used to make the +current call is identified based on the first argument passed to the +method. + +**CPython implementation detail:** In CPython 3.6 and later, the +"__class__" cell is passed to the metaclass as a "__classcell__" entry +in the class namespace. If present, this must be propagated up to the +"type.__new__" call in order for the class to be initialised +correctly. Failing to do so will result in a "RuntimeError" in Python +3.8. + +When using the default metaclass "type", or any metaclass that +ultimately calls "type.__new__", the following additional +customization steps are invoked after creating the class object: + +1. The "type.__new__" method collects all of the attributes in the + class namespace that define a "__set_name__()" method; + +2. Those "__set_name__" methods are called with the class being + defined and the assigned name of that particular attribute; + +3. The "__init_subclass__()" hook is called on the immediate parent of + the new class in its method resolution order. + +After the class object is created, it is passed to the class +decorators included in the class definition (if any) and the resulting +object is bound in the local namespace as the defined class. + +When a new class is created by "type.__new__", the object provided as +the namespace parameter is copied to a new ordered mapping and the +original object is discarded. The new copy is wrapped in a read-only +proxy, which becomes the "__dict__" attribute of the class object. + +See also: + + **PEP 3135** - New super + Describes the implicit "__class__" closure reference + + +Uses for metaclasses +-------------------- + +The potential uses for metaclasses are boundless. Some ideas that have +been explored include enum, logging, interface checking, automatic +delegation, automatic property creation, proxies, frameworks, and +automatic resource locking/synchronization. + + +Customizing instance and subclass checks +======================================== + +The following methods are used to override the default behavior of the +"isinstance()" and "issubclass()" built-in functions. + +In particular, the metaclass "abc.ABCMeta" implements these methods in +order to allow the addition of Abstract Base Classes (ABCs) as +“virtual base classes” to any class or type (including built-in +types), including other ABCs. + +type.__instancecheck__(self, instance) + + Return true if *instance* should be considered a (direct or + indirect) instance of *class*. If defined, called to implement + "isinstance(instance, class)". + +type.__subclasscheck__(self, subclass) + + Return true if *subclass* should be considered a (direct or + indirect) subclass of *class*. If defined, called to implement + "issubclass(subclass, class)". + +Note that these methods are looked up on the type (metaclass) of a +class. They cannot be defined as class methods in the actual class. +This is consistent with the lookup of special methods that are called +on instances, only in this case the instance is itself a class. + +See also: + + **PEP 3119** - Introducing Abstract Base Classes + Includes the specification for customizing "isinstance()" and + "issubclass()" behavior through "__instancecheck__()" and + "__subclasscheck__()", with motivation for this functionality in + the context of adding Abstract Base Classes (see the "abc" + module) to the language. + + +Emulating generic types +======================= + +When using *type annotations*, it is often useful to *parameterize* a +*generic type* using Python’s square-brackets notation. For example, +the annotation "list[int]" might be used to signify a "list" in which +all the elements are of type "int". + +See also: + + **PEP 484** - Type Hints + Introducing Python’s framework for type annotations + + Generic Alias Types + Documentation for objects representing parameterized generic + classes + + Generics, user-defined generics and "typing.Generic" + Documentation on how to implement generic classes that can be + parameterized at runtime and understood by static type-checkers. + +A class can *generally* only be parameterized if it defines the +special class method "__class_getitem__()". + +classmethod object.__class_getitem__(cls, key) + + Return an object representing the specialization of a generic class + by type arguments found in *key*. + + When defined on a class, "__class_getitem__()" is automatically a + class method. As such, there is no need for it to be decorated with + "@classmethod" when it is defined. + + +The purpose of *__class_getitem__* +---------------------------------- + +The purpose of "__class_getitem__()" is to allow runtime +parameterization of standard-library generic classes in order to more +easily apply *type hints* to these classes. + +To implement custom generic classes that can be parameterized at +runtime and understood by static type-checkers, users should either +inherit from a standard library class that already implements +"__class_getitem__()", or inherit from "typing.Generic", which has its +own implementation of "__class_getitem__()". + +Custom implementations of "__class_getitem__()" on classes defined +outside of the standard library may not be understood by third-party +type-checkers such as mypy. Using "__class_getitem__()" on any class +for purposes other than type hinting is discouraged. + + +*__class_getitem__* versus *__getitem__* +---------------------------------------- + +Usually, the subscription of an object using square brackets will call +the "__getitem__()" instance method defined on the object’s class. +However, if the object being subscribed is itself a class, the class +method "__class_getitem__()" may be called instead. +"__class_getitem__()" should return a GenericAlias object if it is +properly defined. + +Presented with the *expression* "obj[x]", the Python interpreter +follows something like the following process to decide whether +"__getitem__()" or "__class_getitem__()" should be called: + + from inspect import isclass + + def subscribe(obj, x): + """Return the result of the expression 'obj[x]'""" + + class_of_obj = type(obj) + + # If the class of obj defines __getitem__, + # call class_of_obj.__getitem__(obj, x) + if hasattr(class_of_obj, '__getitem__'): + return class_of_obj.__getitem__(obj, x) + + # Else, if obj is a class and defines __class_getitem__, + # call obj.__class_getitem__(x) + elif isclass(obj) and hasattr(obj, '__class_getitem__'): + return obj.__class_getitem__(x) + + # Else, raise an exception + else: + raise TypeError( + f"'{class_of_obj.__name__}' object is not subscriptable" + ) + +In Python, all classes are themselves instances of other classes. The +class of a class is known as that class’s *metaclass*, and most +classes have the "type" class as their metaclass. "type" does not +define "__getitem__()", meaning that expressions such as "list[int]", +"dict[str, float]" and "tuple[str, bytes]" all result in +"__class_getitem__()" being called: + + >>> # list has class "type" as its metaclass, like most classes: + >>> type(list) + + >>> type(dict) == type(list) == type(tuple) == type(str) == type(bytes) + True + >>> # "list[int]" calls "list.__class_getitem__(int)" + >>> list[int] + list[int] + >>> # list.__class_getitem__ returns a GenericAlias object: + >>> type(list[int]) + + +However, if a class has a custom metaclass that defines +"__getitem__()", subscribing the class may result in different +behaviour. An example of this can be found in the "enum" module: + + >>> from enum import Enum + >>> class Menu(Enum): + ... """A breakfast menu""" + ... SPAM = 'spam' + ... BACON = 'bacon' + ... + >>> # Enum classes have a custom metaclass: + >>> type(Menu) + + >>> # EnumMeta defines __getitem__, + >>> # so __class_getitem__ is not called, + >>> # and the result is not a GenericAlias object: + >>> Menu['SPAM'] + + >>> type(Menu['SPAM']) + + +See also: + + **PEP 560** - Core Support for typing module and generic types + Introducing "__class_getitem__()", and outlining when a + subscription results in "__class_getitem__()" being called + instead of "__getitem__()" + + +Emulating callable objects +========================== + +object.__call__(self[, args...]) + + Called when the instance is “called” as a function; if this method + is defined, "x(arg1, arg2, ...)" roughly translates to + "type(x).__call__(x, arg1, ...)". The "object" class itself does + not provide this method. + + +Emulating container types +========================= + +The following methods can be defined to implement container objects. +None of them are provided by the "object" class itself. Containers +usually are *sequences* (such as "lists" or "tuples") or *mappings* +(like *dictionaries*), but can represent other containers as well. +The first set of methods is used either to emulate a sequence or to +emulate a mapping; the difference is that for a sequence, the +allowable keys should be the integers *k* for which "0 <= k < N" where +*N* is the length of the sequence, or "slice" objects, which define a +range of items. It is also recommended that mappings provide the +methods "keys()", "values()", "items()", "get()", "clear()", +"setdefault()", "pop()", "popitem()", "copy()", and "update()" +behaving similar to those for Python’s standard "dictionary" objects. +The "collections.abc" module provides a "MutableMapping" *abstract +base class* to help create those methods from a base set of +"__getitem__()", "__setitem__()", "__delitem__()", and "keys()". + +Mutable sequences should provide methods "append()", "clear()", +"count()", "extend()", "index()", "insert()", "pop()", "remove()", and +"reverse()", like Python standard "list" objects. Finally, sequence +types should implement addition (meaning concatenation) and +multiplication (meaning repetition) by defining the methods +"__add__()", "__radd__()", "__iadd__()", "__mul__()", "__rmul__()" and +"__imul__()" described below; they should not define other numerical +operators. + +It is recommended that both mappings and sequences implement the +"__contains__()" method to allow efficient use of the "in" operator; +for mappings, "in" should search the mapping’s keys; for sequences, it +should search through the values. It is further recommended that both +mappings and sequences implement the "__iter__()" method to allow +efficient iteration through the container; for mappings, "__iter__()" +should iterate through the object’s keys; for sequences, it should +iterate through the values. + +object.__len__(self) + + Called to implement the built-in function "len()". Should return + the length of the object, an integer ">=" 0. Also, an object that + doesn’t define a "__bool__()" method and whose "__len__()" method + returns zero is considered to be false in a Boolean context. + + **CPython implementation detail:** In CPython, the length is + required to be at most "sys.maxsize". If the length is larger than + "sys.maxsize" some features (such as "len()") may raise + "OverflowError". To prevent raising "OverflowError" by truth value + testing, an object must define a "__bool__()" method. + +object.__length_hint__(self) + + Called to implement "operator.length_hint()". Should return an + estimated length for the object (which may be greater or less than + the actual length). The length must be an integer ">=" 0. The + return value may also be "NotImplemented", which is treated the + same as if the "__length_hint__" method didn’t exist at all. This + method is purely an optimization and is never required for + correctness. + + Added in version 3.4. + +object.__getitem__(self, subscript) + + Called to implement *subscription*, that is, "self[subscript]". See + Subscriptions and slicings for details on the syntax. + + There are two types of built-in objects that support subscription + via "__getitem__()": + + * **sequences**, where *subscript* (also called *index*) should be + an integer or a "slice" object. See the sequence documentation + for the expected behavior, including handling "slice" objects and + negative indices. + + * **mappings**, where *subscript* is also called the *key*. See + mapping documentation for the expected behavior. + + If *subscript* is of an inappropriate type, "__getitem__()" should + raise "TypeError". If *subscript* has an inappropriate value, + "__getitem__()" should raise an "LookupError" or one of its + subclasses ("IndexError" for sequences; "KeyError" for mappings). + + Note: + + Slicing is handled by "__getitem__()", "__setitem__()", and + "__delitem__()". A call like + + a[1:2] = b + + is translated to + + a[slice(1, 2, None)] = b + + and so forth. Missing slice items are always filled in with + "None". + + Note: + + The sequence iteration protocol (used, for example, in "for" + loops), expects that an "IndexError" will be raised for illegal + indexes to allow proper detection of the end of a sequence. + + Note: + + When subscripting a *class*, the special class method + "__class_getitem__()" may be called instead of "__getitem__()". + See __class_getitem__ versus __getitem__ for more details. + +object.__setitem__(self, key, value) + + Called to implement assignment to "self[key]". Same note as for + "__getitem__()". This should only be implemented for mappings if + the objects support changes to the values for keys, or if new keys + can be added, or for sequences if elements can be replaced. The + same exceptions should be raised for improper *key* values as for + the "__getitem__()" method. + +object.__delitem__(self, key) + + Called to implement deletion of "self[key]". Same note as for + "__getitem__()". This should only be implemented for mappings if + the objects support removal of keys, or for sequences if elements + can be removed from the sequence. The same exceptions should be + raised for improper *key* values as for the "__getitem__()" method. + +object.__missing__(self, key) + + Called by "dict"."__getitem__()" to implement "self[key]" for dict + subclasses when key is not in the dictionary. + +object.__iter__(self) + + This method is called when an *iterator* is required for a + container. This method should return a new iterator object that can + iterate over all the objects in the container. For mappings, it + should iterate over the keys of the container. + +object.__reversed__(self) + + Called (if present) by the "reversed()" built-in to implement + reverse iteration. It should return a new iterator object that + iterates over all the objects in the container in reverse order. + + If the "__reversed__()" method is not provided, the "reversed()" + built-in will fall back to using the sequence protocol ("__len__()" + and "__getitem__()"). Objects that support the sequence protocol + should only provide "__reversed__()" if they can provide an + implementation that is more efficient than the one provided by + "reversed()". + +The membership test operators ("in" and "not in") are normally +implemented as an iteration through a container. However, container +objects can supply the following special method with a more efficient +implementation, which also does not require the object be iterable. + +object.__contains__(self, item) + + Called to implement membership test operators. Should return true + if *item* is in *self*, false otherwise. For mapping objects, this + should consider the keys of the mapping rather than the values or + the key-item pairs. + + For objects that don’t define "__contains__()", the membership test + first tries iteration via "__iter__()", then the old sequence + iteration protocol via "__getitem__()", see this section in the + language reference. + + +Emulating numeric types +======================= + +The following methods can be defined to emulate numeric objects. +Methods corresponding to operations that are not supported by the +particular kind of number implemented (e.g., bitwise operations for +non-integral numbers) should be left undefined. + +object.__add__(self, other) +object.__sub__(self, other) +object.__mul__(self, other) +object.__matmul__(self, other) +object.__truediv__(self, other) +object.__floordiv__(self, other) +object.__mod__(self, other) +object.__divmod__(self, other) +object.__pow__(self, other[, modulo]) +object.__lshift__(self, other) +object.__rshift__(self, other) +object.__and__(self, other) +object.__xor__(self, other) +object.__or__(self, other) + + These methods are called to implement the binary arithmetic + operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", + "pow()", "**", "<<", ">>", "&", "^", "|"). For instance, to + evaluate the expression "x + y", where *x* is an instance of a + class that has an "__add__()" method, "type(x).__add__(x, y)" is + called. The "__divmod__()" method should be the equivalent to + using "__floordiv__()" and "__mod__()"; it should not be related to + "__truediv__()". Note that "__pow__()" should be defined to accept + an optional third argument if the three-argument version of the + built-in "pow()" function is to be supported. + + If one of those methods does not support the operation with the + supplied arguments, it should return "NotImplemented". + +object.__radd__(self, other) +object.__rsub__(self, other) +object.__rmul__(self, other) +object.__rmatmul__(self, other) +object.__rtruediv__(self, other) +object.__rfloordiv__(self, other) +object.__rmod__(self, other) +object.__rdivmod__(self, other) +object.__rpow__(self, other[, modulo]) +object.__rlshift__(self, other) +object.__rrshift__(self, other) +object.__rand__(self, other) +object.__rxor__(self, other) +object.__ror__(self, other) + + These methods are called to implement the binary arithmetic + operations ("+", "-", "*", "@", "/", "//", "%", "divmod()", + "pow()", "**", "<<", ">>", "&", "^", "|") with reflected (swapped) + operands. These functions are only called if the operands are of + different types, when the left operand does not support the + corresponding operation [3], or the right operand’s class is + derived from the left operand’s class. [4] For instance, to + evaluate the expression "x - y", where *y* is an instance of a + class that has an "__rsub__()" method, "type(y).__rsub__(y, x)" is + called if "type(x).__sub__(x, y)" returns "NotImplemented" or + "type(y)" is a subclass of "type(x)". [5] + + Note that "__rpow__()" should be defined to accept an optional + third argument if the three-argument version of the built-in + "pow()" function is to be supported. + + Changed in version 3.14: Three-argument "pow()" now try calling + "__rpow__()" if necessary. Previously it was only called in two- + argument "pow()" and the binary power operator. + + Note: + + If the right operand’s type is a subclass of the left operand’s + type and that subclass provides a different implementation of the + reflected method for the operation, this method will be called + before the left operand’s non-reflected method. This behavior + allows subclasses to override their ancestors’ operations. + +object.__iadd__(self, other) +object.__isub__(self, other) +object.__imul__(self, other) +object.__imatmul__(self, other) +object.__itruediv__(self, other) +object.__ifloordiv__(self, other) +object.__imod__(self, other) +object.__ipow__(self, other[, modulo]) +object.__ilshift__(self, other) +object.__irshift__(self, other) +object.__iand__(self, other) +object.__ixor__(self, other) +object.__ior__(self, other) + + These methods are called to implement the augmented arithmetic + assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", "**=", + "<<=", ">>=", "&=", "^=", "|="). These methods should attempt to + do the operation in-place (modifying *self*) and return the result + (which could be, but does not have to be, *self*). If a specific + method is not defined, or if that method returns "NotImplemented", + the augmented assignment falls back to the normal methods. For + instance, if *x* is an instance of a class with an "__iadd__()" + method, "x += y" is equivalent to "x = x.__iadd__(y)" . If + "__iadd__()" does not exist, or if "x.__iadd__(y)" returns + "NotImplemented", "x.__add__(y)" and "y.__radd__(x)" are + considered, as with the evaluation of "x + y". In certain + situations, augmented assignment can result in unexpected errors + (see Why does a_tuple[i] += [‘item’] raise an exception when the + addition works?), but this behavior is in fact part of the data + model. + +object.__neg__(self) +object.__pos__(self) +object.__abs__(self) +object.__invert__(self) + + Called to implement the unary arithmetic operations ("-", "+", + "abs()" and "~"). + +object.__complex__(self) +object.__int__(self) +object.__float__(self) + + Called to implement the built-in functions "complex()", "int()" and + "float()". Should return a value of the appropriate type. + +object.__index__(self) + + Called to implement "operator.index()", and whenever Python needs + to losslessly convert the numeric object to an integer object (such + as in slicing, or in the built-in "bin()", "hex()" and "oct()" + functions). Presence of this method indicates that the numeric + object is an integer type. Must return an integer. + + If "__int__()", "__float__()" and "__complex__()" are not defined + then corresponding built-in functions "int()", "float()" and + "complex()" fall back to "__index__()". + +object.__round__(self[, ndigits]) +object.__trunc__(self) +object.__floor__(self) +object.__ceil__(self) + + Called to implement the built-in function "round()" and "math" + functions "trunc()", "floor()" and "ceil()". Unless *ndigits* is + passed to "__round__()" all these methods should return the value + of the object truncated to an "Integral" (typically an "int"). + + Changed in version 3.14: "int()" no longer delegates to the + "__trunc__()" method. + + +With Statement Context Managers +=============================== + +A *context manager* is an object that defines the runtime context to +be established when executing a "with" statement. The context manager +handles the entry into, and the exit from, the desired runtime context +for the execution of the block of code. Context managers are normally +invoked using the "with" statement (described in section The with +statement), but can also be used by directly invoking their methods. + +Typical uses of context managers include saving and restoring various +kinds of global state, locking and unlocking resources, closing opened +files, etc. + +For more information on context managers, see Context Manager Types. +The "object" class itself does not provide the context manager +methods. + +object.__enter__(self) + + Enter the runtime context related to this object. The "with" + statement will bind this method’s return value to the target(s) + specified in the "as" clause of the statement, if any. + +object.__exit__(self, exc_type, exc_value, traceback) + + Exit the runtime context related to this object. The parameters + describe the exception that caused the context to be exited. If the + context was exited without an exception, all three arguments will + be "None". + + If an exception is supplied, and the method wishes to suppress the + exception (i.e., prevent it from being propagated), it should + return a true value. Otherwise, the exception will be processed + normally upon exit from this method. + + Note that "__exit__()" methods should not reraise the passed-in + exception; this is the caller’s responsibility. + +See also: + + **PEP 343** - The “with” statement + The specification, background, and examples for the Python "with" + statement. + + +Customizing positional arguments in class pattern matching +========================================================== + +When using a class name in a pattern, positional arguments in the +pattern are not allowed by default, i.e. "case MyClass(x, y)" is +typically invalid without special support in "MyClass". To be able to +use that kind of pattern, the class needs to define a *__match_args__* +attribute. + +object.__match_args__ + + This class variable can be assigned a tuple of strings. When this + class is used in a class pattern with positional arguments, each + positional argument will be converted into a keyword argument, + using the corresponding value in *__match_args__* as the keyword. + The absence of this attribute is equivalent to setting it to "()". + +For example, if "MyClass.__match_args__" is "("left", "center", +"right")" that means that "case MyClass(x, y)" is equivalent to "case +MyClass(left=x, center=y)". Note that the number of arguments in the +pattern must be smaller than or equal to the number of elements in +*__match_args__*; if it is larger, the pattern match attempt will +raise a "TypeError". + +Added in version 3.10. + +See also: + + **PEP 634** - Structural Pattern Matching + The specification for the Python "match" statement. + + +Emulating buffer types +====================== + +The buffer protocol provides a way for Python objects to expose +efficient access to a low-level memory array. This protocol is +implemented by builtin types such as "bytes" and "memoryview", and +third-party libraries may define additional buffer types. + +While buffer types are usually implemented in C, it is also possible +to implement the protocol in Python. + +object.__buffer__(self, flags) + + Called when a buffer is requested from *self* (for example, by the + "memoryview" constructor). The *flags* argument is an integer + representing the kind of buffer requested, affecting for example + whether the returned buffer is read-only or writable. + "inspect.BufferFlags" provides a convenient way to interpret the + flags. The method must return a "memoryview" object. + + **Thread safety:** In *free-threaded* Python, implementations must + manage any internal export counter using atomic operations. The + method must be safe to call concurrently from multiple threads, and + the returned buffer’s underlying data must remain valid until the + corresponding "__release_buffer__()" call completes. See Thread + safety for memoryview objects for details. + +object.__release_buffer__(self, buffer) + + Called when a buffer is no longer needed. The *buffer* argument is + a "memoryview" object that was previously returned by + "__buffer__()". The method must release any resources associated + with the buffer. This method should return "None". + + **Thread safety:** In *free-threaded* Python, any export counter + decrement must use atomic operations. Resource cleanup must be + thread-safe, as the final release may race with concurrent releases + from other threads. + + Buffer objects that do not need to perform any cleanup are not + required to implement this method. + +Added in version 3.12. + +See also: + + **PEP 688** - Making the buffer protocol accessible in Python + Introduces the Python "__buffer__" and "__release_buffer__" + methods. + + "collections.abc.Buffer" + ABC for buffer types. + + +Annotations +=========== + +Functions, classes, and modules may contain *annotations*, which are a +way to associate information (usually *type hints*) with a symbol. + +object.__annotations__ + + This attribute contains the annotations for an object. It is lazily + evaluated, so accessing the attribute may execute arbitrary code + and raise exceptions. If evaluation is successful, the attribute is + set to a dictionary mapping from variable names to annotations. + + Changed in version 3.14: Annotations are now lazily evaluated. + +object.__annotate__(format) + + An *annotate function*. Returns a new dictionary object mapping + attribute/parameter names to their annotation values. + + Takes a format parameter specifying the format in which annotations + values should be provided. It must be a member of the + "annotationlib.Format" enum, or an integer with a value + corresponding to a member of the enum. + + If an annotate function doesn’t support the requested format, it + must raise "NotImplementedError". Annotate functions must always + support "VALUE" format; they must not raise "NotImplementedError()" + when called with this format. + + When called with "VALUE" format, an annotate function may raise + "NameError"; it must not raise "NameError" when called requesting + any other format. + + If an object does not have any annotations, "__annotate__" should + preferably be set to "None" (it can’t be deleted), rather than set + to a function that returns an empty dict. + + Added in version 3.14. + +See also: + + **PEP 649** — Deferred evaluation of annotation using descriptors + Introduces lazy evaluation of annotations and the "__annotate__" + function. + + +Special method lookup +===================== + +For custom classes, implicit invocations of special methods are only +guaranteed to work correctly if defined on an object’s type, not in +the object’s instance dictionary. That behaviour is the reason why +the following code raises an exception: + + >>> class C: + ... pass + ... + >>> c = C() + >>> c.__len__ = lambda: 5 + >>> len(c) + Traceback (most recent call last): + File "", line 1, in + TypeError: object of type 'C' has no len() + +The rationale behind this behaviour lies with a number of special +methods such as "__hash__()" and "__repr__()" that are implemented by +all objects, including type objects. If the implicit lookup of these +methods used the conventional lookup process, they would fail when +invoked on the type object itself: + + >>> 1 .__hash__() == hash(1) + True + >>> int.__hash__() == hash(int) + Traceback (most recent call last): + File "", line 1, in + TypeError: descriptor '__hash__' of 'int' object needs an argument + +Incorrectly attempting to invoke an unbound method of a class in this +way is sometimes referred to as ‘metaclass confusion’, and is avoided +by bypassing the instance when looking up special methods: + + >>> type(1).__hash__(1) == hash(1) + True + >>> type(int).__hash__(int) == hash(int) + True + +In addition to bypassing any instance attributes in the interest of +correctness, implicit special method lookup generally also bypasses +the "__getattribute__()" method even of the object’s metaclass: + + >>> class Meta(type): + ... def __getattribute__(*args): + ... print("Metaclass getattribute invoked") + ... return type.__getattribute__(*args) + ... + >>> class C(object, metaclass=Meta): + ... def __len__(self): + ... return 10 + ... def __getattribute__(*args): + ... print("Class getattribute invoked") + ... return object.__getattribute__(*args) + ... + >>> c = C() + >>> c.__len__() # Explicit lookup via instance + Class getattribute invoked + 10 + >>> type(c).__len__(c) # Explicit lookup via type + Metaclass getattribute invoked + 10 + >>> len(c) # Implicit lookup + 10 + +Bypassing the "__getattribute__()" machinery in this fashion provides +significant scope for speed optimisations within the interpreter, at +the cost of some flexibility in the handling of special methods (the +special method *must* be set on the class object itself in order to be +consistently invoked by the interpreter). +''', + 'string-methods': r'''String Methods +************** + +Strings implement all of the common sequence operations, along with +the additional methods described below. + +Strings also support two styles of string formatting, one providing a +large degree of flexibility and customization (see "str.format()", +Format string syntax and Custom string formatting) and the other based +on C "printf" style formatting that handles a narrower range of types +and is slightly harder to use correctly, but is often faster for the +cases it can handle (printf-style String Formatting). + +The Text Processing Services section of the standard library covers a +number of other modules that provide various text related utilities +(including regular expression support in the "re" module). + +str.capitalize() + + Return a copy of the string with its first character capitalized + and the rest lowercased. + + Changed in version 3.8: The first character is now put into + titlecase rather than uppercase. This means that characters like + digraphs will only have their first letter capitalized, instead of + the full character. + +str.casefold() + + Return a casefolded copy of the string. Casefolded strings may be + used for caseless matching. + + Casefolding is similar to lowercasing but more aggressive because + it is intended to remove all case distinctions in a string. For + example, the German lowercase letter "'ß'" is equivalent to ""ss"". + Since it is already lowercase, "lower()" would do nothing to "'ß'"; + "casefold()" converts it to ""ss"". For example: + + >>> 'straße'.lower() + 'straße' + >>> 'straße'.casefold() + 'strasse' + + The casefolding algorithm is described in section 3.13 ‘Default + Case Folding’ of the Unicode Standard. + + Added in version 3.3. + +str.center(width, fillchar=' ', /) + + Return centered in a string of length *width*. Padding is done + using the specified *fillchar* (default is an ASCII space). The + original string is returned if *width* is less than or equal to + "len(s)". For example: + + >>> 'Python'.center(10) + ' Python ' + >>> 'Python'.center(10, '-') + '--Python--' + >>> 'Python'.center(4) + 'Python' + +str.count(sub[, start[, end]]) + + Return the number of non-overlapping occurrences of substring *sub* + in the range [*start*, *end*]. Optional arguments *start* and + *end* are interpreted as in slice notation. + + If *sub* is empty, returns the number of empty strings between + characters which is the length of the string plus one. For example: + + >>> 'spam, spam, spam'.count('spam') + 3 + >>> 'spam, spam, spam'.count('spam', 5) + 2 + >>> 'spam, spam, spam'.count('spam', 5, 10) + 1 + >>> 'spam, spam, spam'.count('eggs') + 0 + >>> 'spam, spam, spam'.count('') + 17 + +str.encode(encoding='utf-8', errors='strict') + + Return the string encoded to "bytes". + + *encoding* defaults to "'utf-8'"; see Standard Encodings for + possible values. + + *errors* controls how encoding errors are handled. If "'strict'" + (the default), a "UnicodeError" exception is raised. Other possible + values are "'ignore'", "'replace'", "'xmlcharrefreplace'", + "'backslashreplace'" and any other name registered via + "codecs.register_error()". See Error Handlers for details. + + For performance reasons, the value of *errors* is not checked for + validity unless an encoding error actually occurs, Python + Development Mode is enabled or a debug build is used. For example: + + >>> encoded_str_to_bytes = 'Python'.encode() + >>> type(encoded_str_to_bytes) + + >>> encoded_str_to_bytes + b'Python' + + Changed in version 3.1: Added support for keyword arguments. + + Changed in version 3.9: The value of the *errors* argument is now + checked in Python Development Mode and in debug mode. + +str.endswith(suffix[, start[, end]]) + + Return "True" if the string ends with the specified *suffix*, + otherwise return "False". *suffix* can also be a tuple of suffixes + to look for. With optional *start*, test beginning at that + position. With optional *end*, stop comparing at that position. + Using *start* and *end* is equivalent to + "str[start:end].endswith(suffix)". For example: + + >>> 'Python'.endswith('on') + True + >>> 'a tuple of suffixes'.endswith(('at', 'in')) + False + >>> 'a tuple of suffixes'.endswith(('at', 'es')) + True + >>> 'Python is amazing'.endswith('is', 0, 9) + True + + See also "startswith()" and "removesuffix()". + +str.expandtabs(tabsize=8) + + Return a copy of the string where all tab characters are replaced + by one or more spaces, depending on the current column and the + given tab size. Tab positions occur every *tabsize* characters + (default is 8, giving tab positions at columns 0, 8, 16 and so on). + To expand the string, the current column is set to zero and the + string is examined character by character. If the character is a + tab ("\t"), one or more space characters are inserted in the result + until the current column is equal to the next tab position. (The + tab character itself is not copied.) If the character is a newline + ("\n") or return ("\r"), it is copied and the current column is + reset to zero. Any other character is copied unchanged and the + current column is incremented by one regardless of how the + character is represented when printed. For example: + + >>> '01\t012\t0123\t01234'.expandtabs() + '01 012 0123 01234' + >>> '01\t012\t0123\t01234'.expandtabs(4) + '01 012 0123 01234' + >>> print('01\t012\n0123\t01234'.expandtabs(4)) + 01 012 + 0123 01234 + +str.find(sub[, start[, end]]) + + Return the lowest index in the string where substring *sub* is + found within the slice "s[start:end]". Optional arguments *start* + and *end* are interpreted as in slice notation. Return "-1" if + *sub* is not found. For example: + + >>> 'spam, spam, spam'.find('sp') + 0 + >>> 'spam, spam, spam'.find('sp', 5) + 6 + + See also "rfind()" and "index()". + + Note: + + The "find()" method should be used only if you need to know the + position of *sub*. To check if *sub* is a substring or not, use + the "in" operator: + + >>> 'Py' in 'Python' + True + +str.format(*args, **kwargs) + + Perform a string formatting operation. The string on which this + method is called can contain literal text or replacement fields + delimited by braces "{}". Each replacement field contains either + the numeric index of a positional argument, or the name of a + keyword argument. Returns a copy of the string where each + replacement field is replaced with the string value of the + corresponding argument. For example: + + >>> "The sum of 1 + 2 is {0}".format(1+2) + 'The sum of 1 + 2 is 3' + >>> "The sum of {a} + {b} is {answer}".format(answer=1+2, a=1, b=2) + 'The sum of 1 + 2 is 3' + >>> "{1} expects the {0} Inquisition!".format("Spanish", "Nobody") + 'Nobody expects the Spanish Inquisition!' + + See Format string syntax for a description of the various + formatting options that can be specified in format strings. + + Note: + + When formatting a number ("int", "float", "complex", + "decimal.Decimal" and subclasses) with the "n" type (ex: + "'{:n}'.format(1234)"), the function temporarily sets the + "LC_CTYPE" locale to the "LC_NUMERIC" locale to decode + "decimal_point" and "thousands_sep" fields of "localeconv()" if + they are non-ASCII or longer than 1 byte, and the "LC_NUMERIC" + locale is different than the "LC_CTYPE" locale. This temporary + change affects other threads. + + Changed in version 3.7: When formatting a number with the "n" type, + the function sets temporarily the "LC_CTYPE" locale to the + "LC_NUMERIC" locale in some cases. + +str.format_map(mapping, /) + + Similar to "str.format(**mapping)", except that "mapping" is used + directly and not copied to a "dict". This is useful if for example + "mapping" is a dict subclass: + + >>> class Default(dict): + ... def __missing__(self, key): + ... return key + ... + >>> '{name} was born in {country}'.format_map(Default(name='Guido')) + 'Guido was born in country' + + Added in version 3.2. + +str.index(sub[, start[, end]]) + + Like "find()", but raise "ValueError" when the substring is not + found. For example: + + >>> 'spam, spam, spam'.index('spam') + 0 + >>> 'spam, spam, spam'.index('eggs') + Traceback (most recent call last): + File "", line 1, in + 'spam, spam, spam'.index('eggs') + ~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^ + ValueError: substring not found + + See also "rindex()". + +str.isalnum() + + Return "True" if all characters in the string are alphanumeric and + there is at least one character, "False" otherwise. A character + "c" is alphanumeric if one of the following returns "True": + "c.isalpha()", "c.isdecimal()", "c.isdigit()", or "c.isnumeric()". + For example: + + >>> 'abc123'.isalnum() + True + >>> 'abc123!@#'.isalnum() + False + >>> ''.isalnum() + False + >>> ' '.isalnum() + False + +str.isalpha() + + Return "True" if all characters in the string are alphabetic and + there is at least one character, "False" otherwise. Alphabetic + characters are those characters defined in the Unicode character + database as “Letter”, i.e., those with general category property + being one of “Lm”, “Lt”, “Lu”, “Ll”, or “Lo”. Note that this is + different from the Alphabetic property defined in the section 4.10 + ‘Letters, Alphabetic, and Ideographic’ of the Unicode Standard. For + example: + + >>> 'Letters and spaces'.isalpha() + False + >>> 'LettersOnly'.isalpha() + True + >>> 'µ'.isalpha() # non-ASCII characters can be considered alphabetical too + True + + See Unicode Properties. + +str.isascii() + + Return "True" if the string is empty or all characters in the + string are ASCII, "False" otherwise. ASCII characters have code + points in the range U+0000-U+007F. For example: + + >>> 'ASCII characters'.isascii() + True + >>> 'µ'.isascii() + False + + Added in version 3.7. + +str.isdecimal() + + Return "True" if all characters in the string are decimal + characters and there is at least one character, "False" otherwise. + Decimal characters are those that can be used to form numbers in + base 10, such as U+0660, ARABIC-INDIC DIGIT ZERO. Formally a + decimal character is a character in the Unicode General Category + “Nd”. For example: + + >>> '0123456789'.isdecimal() + True + >>> '٠١٢٣٤٥٦٧٨٩'.isdecimal() # Arabic-Indic digits zero to nine + True + >>> 'alphabetic'.isdecimal() + False + +str.isdigit() + + Return "True" if all characters in the string are digits and there + is at least one character, "False" otherwise. Digits include + decimal characters and digits that need special handling, such as + the compatibility superscript digits. This covers digits which + cannot be used to form numbers in base 10, like the Kharosthi + numbers. Formally, a digit is a character that has the property + value Numeric_Type=Digit or Numeric_Type=Decimal. + +str.isidentifier() + + Return "True" if the string is a valid identifier according to the + language definition, section Names (identifiers and keywords). + + "keyword.iskeyword()" can be used to test whether string "s" is a + reserved identifier, such as "def" and "class". + + Example: + + >>> from keyword import iskeyword + + >>> 'hello'.isidentifier(), iskeyword('hello') + (True, False) + >>> 'def'.isidentifier(), iskeyword('def') + (True, True) + +str.islower() + + Return "True" if all cased characters [4] in the string are + lowercase and there is at least one cased character, "False" + otherwise. + +str.isnumeric() + + Return "True" if all characters in the string are numeric + characters, and there is at least one character, "False" otherwise. + Numeric characters include digit characters, and all characters + that have the Unicode numeric value property, e.g. U+2155, VULGAR + FRACTION ONE FIFTH. Formally, numeric characters are those with + the property value Numeric_Type=Digit, Numeric_Type=Decimal or + Numeric_Type=Numeric. For example: + + >>> '0123456789'.isnumeric() + True + >>> '٠١٢٣٤٥٦٧٨٩'.isnumeric() # Arabic-indic digit zero to nine + True + >>> '⅕'.isnumeric() # Vulgar fraction one fifth + True + >>> '²'.isdecimal(), '²'.isdigit(), '²'.isnumeric() + (False, True, True) + + See also "isdecimal()" and "isdigit()". Numeric characters are a + superset of decimal numbers. + +str.isprintable() + + Return "True" if all characters in the string are printable, + "False" if it contains at least one non-printable character. + + Here “printable” means the character is suitable for "repr()" to + use in its output; “non-printable” means that "repr()" on built-in + types will hex-escape the character. It has no bearing on the + handling of strings written to "sys.stdout" or "sys.stderr". + + The printable characters are those which in the Unicode character + database (see "unicodedata") have a general category in group + Letter, Mark, Number, Punctuation, or Symbol (L, M, N, P, or S); + plus the ASCII space 0x20. Nonprintable characters are those in + group Separator or Other (Z or C), except the ASCII space. + + For example: + + >>> ''.isprintable(), ' '.isprintable() + (True, True) + >>> '\t'.isprintable(), '\n'.isprintable() + (False, False) + + See also "isspace()". + +str.isspace() + + Return "True" if there are only whitespace characters in the string + and there is at least one character, "False" otherwise. + + For example: + + >>> ''.isspace() + False + >>> ' '.isspace() + True + >>> '\t\n'.isspace() # TAB and BREAK LINE + True + >>> '\u3000'.isspace() # IDEOGRAPHIC SPACE + True + + A character is *whitespace* if in the Unicode character database + (see "unicodedata"), either its general category is "Zs" + (“Separator, space”), or its bidirectional class is one of "WS", + "B", or "S". + + See also "isprintable()". + +str.istitle() + + Return "True" if the string is a titlecased string and there is at + least one character, for example uppercase characters may only + follow uncased characters and lowercase characters only cased ones. + Return "False" otherwise. + + For example: + + >>> 'Spam, Spam, Spam'.istitle() + True + >>> 'spam, spam, spam'.istitle() + False + >>> 'SPAM, SPAM, SPAM'.istitle() + False + + See also "title()". + +str.isupper() + + Return "True" if all cased characters [4] in the string are + uppercase and there is at least one cased character, "False" + otherwise. + + >>> 'BANANA'.isupper() + True + >>> 'banana'.isupper() + False + >>> 'baNana'.isupper() + False + >>> ' '.isupper() + False + +str.join(iterable, /) + + Return a string which is the concatenation of the strings in + *iterable*. A "TypeError" will be raised if there are any non- + string values in *iterable*, including "bytes" objects. The + separator between elements is the string providing this method. For + example: + + >>> ', '.join(['spam', 'spam', 'spam']) + 'spam, spam, spam' + >>> '-'.join('Python') + 'P-y-t-h-o-n' + + See also "split()". + +str.ljust(width, fillchar=' ', /) + + Return the string left justified in a string of length *width*. + Padding is done using the specified *fillchar* (default is an ASCII + space). The original string is returned if *width* is less than or + equal to "len(s)". + + For example: + + >>> 'Python'.ljust(10) + 'Python ' + >>> 'Python'.ljust(10, '.') + 'Python....' + >>> 'Monty Python'.ljust(10, '.') + 'Monty Python' + + See also "rjust()". + +str.lower() + + Return a copy of the string with all the cased characters [4] + converted to lowercase. For example: + + >>> 'Lower Method Example'.lower() + 'lower method example' + + The lowercasing algorithm used is described in section 3.13 + ‘Default Case Folding’ of the Unicode Standard. + +str.lstrip(chars=None, /) + + Return a copy of the string with leading characters removed. The + *chars* argument is a string specifying the set of characters to be + removed. If omitted or "None", the *chars* argument defaults to + removing whitespace. The *chars* argument is not a prefix; rather, + all combinations of its values are stripped: + + >>> ' spacious '.lstrip() + 'spacious ' + >>> 'www.example.com'.lstrip('cmowz.') + 'example.com' + + See "str.removeprefix()" for a method that will remove a single + prefix string rather than all of a set of characters. For example: + + >>> 'Arthur: three!'.lstrip('Arthur: ') + 'ee!' + >>> 'Arthur: three!'.removeprefix('Arthur: ') + 'three!' + +static str.maketrans(dict, /) +static str.maketrans(from, to, remove='', /) + + This static method returns a translation table usable for + "str.translate()". + + If there is only one argument, it must be a dictionary mapping + Unicode ordinals (integers) or characters (strings of length 1) to + Unicode ordinals, strings (of arbitrary lengths) or "None". + Character keys will then be converted to ordinals. + + If there are two arguments, they must be strings of equal length, + and in the resulting dictionary, each character in *from* will be + mapped to the character at the same position in *to*. If there is + a third argument, it must be a string, whose characters will be + mapped to "None" in the result. + +str.partition(sep, /) + + Split the string at the first occurrence of *sep*, and return a + 3-tuple containing the part before the separator, the separator + itself, and the part after the separator. If the separator is not + found, return a 3-tuple containing the string itself, followed by + two empty strings. + + For example: + + >>> 'Monty Python'.partition(' ') + ('Monty', ' ', 'Python') + >>> "Monty Python's Flying Circus".partition(' ') + ('Monty', ' ', "Python's Flying Circus") + >>> 'Monty Python'.partition('-') + ('Monty Python', '', '') + + See also "rpartition()". + +str.removeprefix(prefix, /) + + If the string starts with the *prefix* string, return + "string[len(prefix):]". Otherwise, return a copy of the original + string: + + >>> 'TestHook'.removeprefix('Test') + 'Hook' + >>> 'BaseTestCase'.removeprefix('Test') + 'BaseTestCase' + + Added in version 3.9. + + See also "removesuffix()" and "startswith()". + +str.removesuffix(suffix, /) + + If the string ends with the *suffix* string and that *suffix* is + not empty, return "string[:-len(suffix)]". Otherwise, return a copy + of the original string: + + >>> 'MiscTests'.removesuffix('Tests') + 'Misc' + >>> 'TmpDirMixin'.removesuffix('Tests') + 'TmpDirMixin' + + Added in version 3.9. + + See also "removeprefix()" and "endswith()". + +str.replace(old, new, /, count=-1) + + Return a copy of the string with all occurrences of substring *old* + replaced by *new*. If *count* is given, only the first *count* + occurrences are replaced. If *count* is not specified or "-1", then + all occurrences are replaced. For example: + + >>> 'spam, spam, spam'.replace('spam', 'eggs') + 'eggs, eggs, eggs' + >>> 'spam, spam, spam'.replace('spam', 'eggs', 1) + 'eggs, spam, spam' + + Changed in version 3.13: *count* is now supported as a keyword + argument. + +str.rfind(sub[, start[, end]]) + + Return the highest index in the string where substring *sub* is + found, such that *sub* is contained within "s[start:end]". + Optional arguments *start* and *end* are interpreted as in slice + notation. Return "-1" on failure. For example: + + >>> 'spam, spam, spam'.rfind('sp') + 12 + >>> 'spam, spam, spam'.rfind('sp', 0, 10) + 6 + + See also "find()" and "rindex()". + +str.rindex(sub[, start[, end]]) + + Like "rfind()" but raises "ValueError" when the substring *sub* is + not found. For example: + + >>> 'spam, spam, spam'.rindex('spam') + 12 + >>> 'spam, spam, spam'.rindex('eggs') + Traceback (most recent call last): + File "", line 1, in + 'spam, spam, spam'.rindex('eggs') + ~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^ + ValueError: substring not found + + See also "index()" and "find()". + +str.rjust(width, fillchar=' ', /) + + Return the string right justified in a string of length *width*. + Padding is done using the specified *fillchar* (default is an ASCII + space). The original string is returned if *width* is less than or + equal to "len(s)". + + For example: + + >>> 'Python'.rjust(10) + ' Python' + >>> 'Python'.rjust(10, '.') + '....Python' + >>> 'Monty Python'.rjust(10, '.') + 'Monty Python' + + See also "ljust()" and "zfill()". + +str.rpartition(sep, /) + + Split the string at the last occurrence of *sep*, and return a + 3-tuple containing the part before the separator, the separator + itself, and the part after the separator. If the separator is not + found, return a 3-tuple containing two empty strings, followed by + the string itself. + + For example: + + >>> 'Monty Python'.rpartition(' ') + ('Monty', ' ', 'Python') + >>> "Monty Python's Flying Circus".rpartition(' ') + ("Monty Python's Flying", ' ', 'Circus') + >>> 'Monty Python'.rpartition('-') + ('', '', 'Monty Python') + + See also "partition()". + +str.rsplit(sep=None, maxsplit=-1) + + Return a list of the words in the string, using *sep* as the + delimiter string. If *maxsplit* is given, at most *maxsplit* splits + are done, the *rightmost* ones. If *sep* is not specified or + "None", any whitespace string is a separator. Except for splitting + from the right, "rsplit()" behaves like "split()" which is + described in detail below. + +str.rstrip(chars=None, /) + + Return a copy of the string with trailing characters removed. The + *chars* argument is a string specifying the set of characters to be + removed. If omitted or "None", the *chars* argument defaults to + removing whitespace. The *chars* argument is not a suffix; rather, + all combinations of its values are stripped. For example: + + >>> ' spacious '.rstrip() + ' spacious' + >>> 'mississippi'.rstrip('ipz') + 'mississ' + + See "removesuffix()" for a method that will remove a single suffix + string rather than all of a set of characters. For example: + + >>> 'Monty Python'.rstrip(' Python') + 'M' + >>> 'Monty Python'.removesuffix(' Python') + 'Monty' + + See also "strip()". + +str.split(sep=None, maxsplit=-1) + + Return a list of the words in the string, using *sep* as the + delimiter string. If *maxsplit* is given, at most *maxsplit* + splits are done (thus, the list will have at most "maxsplit+1" + elements). If *maxsplit* is not specified or "-1", then there is + no limit on the number of splits (all possible splits are made). + + If *sep* is given, consecutive delimiters are not grouped together + and are deemed to delimit empty strings (for example, + "'1,,2'.split(',')" returns "['1', '', '2']"). The *sep* argument + may consist of multiple characters as a single delimiter (to split + with multiple delimiters, use "re.split()"). Splitting an empty + string with a specified separator returns "['']". + + For example: + + >>> '1,2,3'.split(',') + ['1', '2', '3'] + >>> '1,2,3'.split(',', maxsplit=1) + ['1', '2,3'] + >>> '1,2,,3,'.split(',') + ['1', '2', '', '3', ''] + >>> '1<>2<>3<4'.split('<>') + ['1', '2', '3<4'] + + If *sep* is not specified or is "None", a different splitting + algorithm is applied: runs of consecutive whitespace are regarded + as a single separator, and the result will contain no empty strings + at the start or end if the string has leading or trailing + whitespace. Consequently, splitting an empty string or a string + consisting of just whitespace with a "None" separator returns "[]". + + For example: + + >>> '1 2 3'.split() + ['1', '2', '3'] + >>> '1 2 3'.split(maxsplit=1) + ['1', '2 3'] + >>> ' 1 2 3 '.split() + ['1', '2', '3'] + + If *sep* is not specified or is "None" and *maxsplit* is "0", only + leading runs of consecutive whitespace are considered. + + For example: + + >>> "".split(None, 0) + [] + >>> " ".split(None, 0) + [] + >>> " foo ".split(maxsplit=0) + ['foo '] + + See also "join()". + +str.splitlines(keepends=False) + + Return a list of the lines in the string, breaking at line + boundaries. Line breaks are not included in the resulting list + unless *keepends* is given and true. + + This method splits on the following line boundaries. In + particular, the boundaries are a superset of *universal newlines*. + + +-------------------------+-------------------------------+ + | Representation | Description | + |=========================|===============================| + | "\n" | Line Feed | + +-------------------------+-------------------------------+ + | "\r" | Carriage Return | + +-------------------------+-------------------------------+ + | "\r\n" | Carriage Return + Line Feed | + +-------------------------+-------------------------------+ + | "\v" or "\x0b" | Line Tabulation | + +-------------------------+-------------------------------+ + | "\f" or "\x0c" | Form Feed | + +-------------------------+-------------------------------+ + | "\x1c" | File Separator | + +-------------------------+-------------------------------+ + | "\x1d" | Group Separator | + +-------------------------+-------------------------------+ + | "\x1e" | Record Separator | + +-------------------------+-------------------------------+ + | "\x85" | Next Line (C1 Control Code) | + +-------------------------+-------------------------------+ + | "\u2028" | Line Separator | + +-------------------------+-------------------------------+ + | "\u2029" | Paragraph Separator | + +-------------------------+-------------------------------+ + + Changed in version 3.2: "\v" and "\f" added to list of line + boundaries. + + For example: + + >>> 'ab c\n\nde fg\rkl\r\n'.splitlines() + ['ab c', '', 'de fg', 'kl'] + >>> 'ab c\n\nde fg\rkl\r\n'.splitlines(keepends=True) + ['ab c\n', '\n', 'de fg\r', 'kl\r\n'] + + Unlike "split()" when a delimiter string *sep* is given, this + method returns an empty list for the empty string, and a terminal + line break does not result in an extra line: + + >>> "".splitlines() + [] + >>> "One line\n".splitlines() + ['One line'] + + For comparison, "split('\n')" gives: + + >>> ''.split('\n') + [''] + >>> 'Two lines\n'.split('\n') + ['Two lines', ''] + +str.startswith(prefix[, start[, end]]) + + Return "True" if string starts with the *prefix*, otherwise return + "False". *prefix* can also be a tuple of prefixes to look for. + With optional *start*, test string beginning at that position. + With optional *end*, stop comparing string at that position. + + For example: + + >>> 'Python'.startswith('Py') + True + >>> 'a tuple of prefixes'.startswith(('at', 'a')) + True + >>> 'Python is amazing'.startswith('is', 7) + True + + See also "endswith()" and "removeprefix()". + +str.strip(chars=None, /) + + Return a copy of the string with the leading and trailing + characters removed. The *chars* argument is a string specifying the + set of characters to be removed. If omitted or "None", the *chars* + argument defaults to removing whitespace. The *chars* argument is + not a prefix or suffix; rather, all combinations of its values are + stripped. + + For example: + + >>> ' spacious '.strip() + 'spacious' + >>> 'www.example.com'.strip('cmowz.') + 'example' + + The outermost leading and trailing *chars* argument values are + stripped from the string. Characters are removed from the leading + end until reaching a string character that is not contained in the + set of characters in *chars*. A similar action takes place on the + trailing end. + + For example: + + >>> comment_string = '#....... Section 3.2.1 Issue #32 .......' + >>> comment_string.strip('.#! ') + 'Section 3.2.1 Issue #32' + + See also "rstrip()". + +str.swapcase() + + Return a copy of the string with uppercase characters converted to + lowercase and vice versa. Note that it is not necessarily true that + "s.swapcase().swapcase() == s". + +str.title() + + Return a titlecased version of the string where words start with an + uppercase character and the remaining characters are lowercase. + + For example: + + >>> 'Hello world'.title() + 'Hello World' + + The algorithm uses a simple language-independent definition of a + word as groups of consecutive letters. The definition works in + many contexts but it means that apostrophes in contractions and + possessives form word boundaries, which may not be the desired + result: + + >>> "they're bill's friends from the UK".title() + "They'Re Bill'S Friends From The Uk" + + The "string.capwords()" function does not have this problem, as it + splits words on spaces only. + + Alternatively, a workaround for apostrophes can be constructed + using regular expressions: + + >>> import re + >>> def titlecase(s): + ... return re.sub(r"[A-Za-z]+('[A-Za-z]+)?", + ... lambda mo: mo.group(0).capitalize(), + ... s) + ... + >>> titlecase("they're bill's friends.") + "They're Bill's Friends." + + See also "istitle()". + +str.translate(table, /) + + Return a copy of the string in which each character has been mapped + through the given translation table. The table must be an object + that implements indexing via "__getitem__()", typically a *mapping* + or *sequence*. When indexed by a Unicode ordinal (an integer), the + table object can do any of the following: return a Unicode ordinal + or a string, to map the character to one or more other characters; + return "None", to delete the character from the return string; or + raise a "LookupError" exception, to map the character to itself. + + You can use "str.maketrans()" to create a translation map from + character-to-character mappings in different formats. + + See also the "codecs" module for a more flexible approach to custom + character mappings. + +str.upper() + + Return a copy of the string with all the cased characters [4] + converted to uppercase. Note that "s.upper().isupper()" might be + "False" if "s" contains uncased characters or if the Unicode + category of the resulting character(s) is not “Lu” (Letter, + uppercase), but e.g. “Lt” (Letter, titlecase). + + The uppercasing algorithm used is described in section 3.13 + ‘Default Case Folding’ of the Unicode Standard. + +str.zfill(width, /) + + Return a copy of the string left filled with ASCII "'0'" digits to + make a string of length *width*. A leading sign prefix + ("'+'"/"'-'") is handled by inserting the padding *after* the sign + character rather than before. The original string is returned if + *width* is less than or equal to "len(s)". + + For example: + + >>> "42".zfill(5) + '00042' + >>> "-42".zfill(5) + '-0042' + + See also "rjust()". +''', + 'strings': '''String and Bytes literals +************************* + +String literals are text enclosed in single quotes ("'") or double +quotes ("""). For example: + + "spam" + 'eggs' + +The quote used to start the literal also terminates it, so a string +literal can only contain the other quote (except with escape +sequences, see below). For example: + + 'Say "Hello", please.' + "Don't do that!" + +Except for this limitation, the choice of quote character ("'" or """) +does not affect how the literal is parsed. + +Inside a string literal, the backslash ("\\") character introduces an +*escape sequence*, which has special meaning depending on the +character after the backslash. For example, "\\"" denotes the double +quote character, and does *not* end the string: + + >>> print("Say \\"Hello\\" to everyone!") + Say "Hello" to everyone! + +See escape sequences below for a full list of such sequences, and more +details. + + +Triple-quoted strings +===================== + +Strings can also be enclosed in matching groups of three single or +double quotes. These are generally referred to as *triple-quoted +strings*: + + """This is a triple-quoted string.""" + +In triple-quoted literals, unescaped quotes are allowed (and are +retained), except that three unescaped quotes in a row terminate the +literal, if they are of the same kind ("'" or """) used at the start: + + """This string has "quotes" inside.""" + +Unescaped newlines are also allowed and retained: + + \'\'\'This triple-quoted string + continues on the next line.\'\'\' + + +String prefixes +=============== + +String literals can have an optional *prefix* that influences how the +content of the literal is parsed, for example: + + b"data" + f'{result=}' + +The allowed prefixes are: + +* "b": Bytes literal + +* "r": Raw string + +* "f": Formatted string literal (“f-string”) + +* "t": Template string literal (“t-string”) + +* "u": No effect (allowed for backwards compatibility) + +See the linked sections for details on each type. + +Prefixes are case-insensitive (for example, ‘"B"’ works the same as +‘"b"’). The ‘"r"’ prefix can be combined with ‘"f"’, ‘"t"’ or ‘"b"’, +so ‘"fr"’, ‘"rf"’, ‘"tr"’, ‘"rt"’, ‘"br"’, and ‘"rb"’ are also valid +prefixes. + +Added in version 3.3: The "'rb'" prefix of raw bytes literals has been +added as a synonym of "'br'".Support for the unicode legacy literal +("u'value'") was reintroduced to simplify the maintenance of dual +Python 2.x and 3.x codebases. See **PEP 414** for more information. + + +Formal grammar +============== + +String literals, except “f-strings” and “t-strings”, are described by +the following lexical definitions. + +These definitions use negative lookaheads ("!") to indicate that an +ending quote ends the literal. + + STRING: [stringprefix] (stringcontent) + stringprefix: <("r" | "u" | "b" | "br" | "rb"), case-insensitive> + stringcontent: + | "\'\'\'" ( !"\'\'\'" longstringitem)* "\'\'\'" + | '"""' ( !'"""' longstringitem)* '"""' + | "'" ( !"'" stringitem)* "'" + | '"' ( !'"' stringitem)* '"' + stringitem: stringchar | stringescapeseq + stringchar: + longstringitem: stringitem | newline + stringescapeseq: "\\" + +Note that as in all lexical definitions, whitespace is significant. In +particular, the prefix (if any) must be immediately followed by the +starting quote. + + +Escape sequences +================ + +Unless an ‘"r"’ or ‘"R"’ prefix is present, escape sequences in string +and bytes literals are interpreted according to rules similar to those +used by Standard C. The recognized escape sequences are: + ++----------------------------------------------------+----------------------------------------------------+ +| Escape Sequence | Meaning | +|====================================================|====================================================| +| "\\" | Ignored end of line | ++----------------------------------------------------+----------------------------------------------------+ +| "\\\\" | Backslash | ++----------------------------------------------------+----------------------------------------------------+ +| "\\'" | Single quote | ++----------------------------------------------------+----------------------------------------------------+ +| "\\"" | Double quote | ++----------------------------------------------------+----------------------------------------------------+ +| "\\a" | ASCII Bell (BEL) | ++----------------------------------------------------+----------------------------------------------------+ +| "\\b" | ASCII Backspace (BS) | ++----------------------------------------------------+----------------------------------------------------+ +| "\\f" | ASCII Formfeed (FF) | ++----------------------------------------------------+----------------------------------------------------+ +| "\\n" | ASCII Linefeed (LF) | ++----------------------------------------------------+----------------------------------------------------+ +| "\\r" | ASCII Carriage Return (CR) | ++----------------------------------------------------+----------------------------------------------------+ +| "\\t" | ASCII Horizontal Tab (TAB) | ++----------------------------------------------------+----------------------------------------------------+ +| "\\v" | ASCII Vertical Tab (VT) | ++----------------------------------------------------+----------------------------------------------------+ +| "\\*ooo*" | Octal character | ++----------------------------------------------------+----------------------------------------------------+ +| "\\x*hh*" | Hexadecimal character | ++----------------------------------------------------+----------------------------------------------------+ +| "\\N{*name*}" | Named Unicode character | ++----------------------------------------------------+----------------------------------------------------+ +| "\\u*xxxx*" | Hexadecimal Unicode character | ++----------------------------------------------------+----------------------------------------------------+ +| "\\U*xxxxxxxx*" | Hexadecimal Unicode character | ++----------------------------------------------------+----------------------------------------------------+ + + +Ignored end of line +------------------- + +A backslash can be added at the end of a line to ignore the newline: + + >>> 'This string will not include \\ + ... backslashes or newline characters.' + 'This string will not include backslashes or newline characters.' + +The same result can be achieved using triple-quoted strings, or +parentheses and string literal concatenation. + + +Escaped characters +------------------ + +To include a backslash in a non-raw Python string literal, it must be +doubled. The "\\\\" escape sequence denotes a single backslash +character: + + >>> print('C:\\\\Program Files') + C:\\Program Files + +Similarly, the "\\'" and "\\"" sequences denote the single and double +quote character, respectively: + + >>> print('\\' and \\"') + ' and " + + +Octal character +--------------- + +The sequence "\\*ooo*" denotes a *character* with the octal (base 8) +value *ooo*: + + >>> '\\120' + 'P' + +Up to three octal digits (0 through 7) are accepted. + +In a bytes literal, *character* means a *byte* with the given value. +In a string literal, it means a Unicode character with the given +value. + +Changed in version 3.11: Octal escapes with value larger than "0o377" +(255) produce a "DeprecationWarning". + +Changed in version 3.12: Octal escapes with value larger than "0o377" +(255) produce a "SyntaxWarning". In a future Python version they will +raise a "SyntaxError". + + +Hexadecimal character +--------------------- + +The sequence "\\x*hh*" denotes a *character* with the hex (base 16) +value *hh*: + + >>> '\\x50' + 'P' + +Unlike in Standard C, exactly two hex digits are required. + +In a bytes literal, *character* means a *byte* with the given value. +In a string literal, it means a Unicode character with the given +value. + + +Named Unicode character +----------------------- + +The sequence "\\N{*name*}" denotes a Unicode character with the given +*name*: + + >>> '\\N{LATIN CAPITAL LETTER P}' + 'P' + >>> '\\N{SNAKE}' + '🐍' + +This sequence cannot appear in bytes literals. + +Changed in version 3.3: Support for name aliases has been added. + + +Hexadecimal Unicode characters +------------------------------ + +These sequences "\\u*xxxx*" and "\\U*xxxxxxxx*" denote the Unicode +character with the given hex (base 16) value. Exactly four digits are +required for "\\u"; exactly eight digits are required for "\\U". The +latter can encode any Unicode character. + + >>> '\\u1234' + 'ሴ' + >>> '\\U0001f40d' + '🐍' + +These sequences cannot appear in bytes literals. + + +Unrecognized escape sequences +----------------------------- + +Unlike in Standard C, all unrecognized escape sequences are left in +the string unchanged, that is, *the backslash is left in the result*: + + >>> print('\\q') + \\q + >>> list('\\q') + ['\\\\', 'q'] + +Note that for bytes literals, the escape sequences only recognized in +string literals ("\\N...", "\\u...", "\\U...") fall into the category of +unrecognized escapes. + +Changed in version 3.6: Unrecognized escape sequences produce a +"DeprecationWarning". + +Changed in version 3.12: Unrecognized escape sequences produce a +"SyntaxWarning". In a future Python version they will raise a +"SyntaxError". + + +Bytes literals +============== + +*Bytes literals* are always prefixed with ‘"b"’ or ‘"B"’; they produce +an instance of the "bytes" type instead of the "str" type. They may +only contain ASCII characters; bytes with a numeric value of 128 or +greater must be expressed with escape sequences (typically Hexadecimal +character or Octal character): + + >>> b'\\x89PNG\\r\\n\\x1a\\n' + b'\\x89PNG\\r\\n\\x1a\\n' + >>> list(b'\\x89PNG\\r\\n\\x1a\\n') + [137, 80, 78, 71, 13, 10, 26, 10] + +Similarly, a zero byte must be expressed using an escape sequence +(typically "\\0" or "\\x00"). + + +Raw string literals +=================== + +Both string and bytes literals may optionally be prefixed with a +letter ‘"r"’ or ‘"R"’; such constructs are called *raw string +literals* and *raw bytes literals* respectively and treat backslashes +as literal characters. As a result, in raw string literals, escape +sequences are not treated specially: + + >>> r'\\d{4}-\\d{2}-\\d{2}' + '\\\\d{4}-\\\\d{2}-\\\\d{2}' + +Even in a raw literal, quotes can be escaped with a backslash, but the +backslash remains in the result; for example, "r"\\""" is a valid +string literal consisting of two characters: a backslash and a double +quote; "r"\\"" is not a valid string literal (even a raw string cannot +end in an odd number of backslashes). Specifically, *a raw literal +cannot end in a single backslash* (since the backslash would escape +the following quote character). Note also that a single backslash +followed by a newline is interpreted as those two characters as part +of the literal, *not* as a line continuation. + + +f-strings +========= + +Added in version 3.6. + +Changed in version 3.7: The "await" and "async for" can be used in +expressions within f-strings. + +Changed in version 3.8: Added the debug specifier ("=") + +Changed in version 3.12: Many restrictions on expressions within +f-strings have been removed. Notably, nested strings, comments, and +backslashes are now permitted. + +A *formatted string literal* or *f-string* is a string literal that is +prefixed with ‘"f"’ or ‘"F"’. Unlike other string literals, f-strings +do not have a constant value. They may contain *replacement fields* +delimited by curly braces "{}". Replacement fields contain expressions +which are evaluated at run time. For example: + + >>> who = 'nobody' + >>> nationality = 'Spanish' + >>> f'{who.title()} expects the {nationality} Inquisition!' + 'Nobody expects the Spanish Inquisition!' + +Any doubled curly braces ("{{" or "}}") outside replacement fields are +replaced with the corresponding single curly brace: + + >>> print(f'{{...}}') + {...} + +Other characters outside replacement fields are treated like in +ordinary string literals. This means that escape sequences are decoded +(except when a literal is also marked as a raw string), and newlines +are possible in triple-quoted f-strings: + + >>> name = 'Galahad' + >>> favorite_color = 'blue' + >>> print(f'{name}:\\t{favorite_color}') + Galahad: blue + >>> print(rf"C:\\Users\\{name}") + C:\\Users\\Galahad + >>> print(f\'\'\'Three shall be the number of the counting + ... and the number of the counting shall be three.\'\'\') + Three shall be the number of the counting + and the number of the counting shall be three. + +Expressions in formatted string literals are treated like regular +Python expressions. Each expression is evaluated in the context where +the formatted string literal appears, in order from left to right. An +empty expression is not allowed, and both "lambda" and assignment +expressions ":=" must be surrounded by explicit parentheses: + + >>> f'{(half := 1/2)}, {half * 42}' + '0.5, 21.0' + +Reusing the outer f-string quoting type inside a replacement field is +permitted: + + >>> a = dict(x=2) + >>> f"abc {a["x"]} def" + 'abc 2 def' + +Backslashes are also allowed in replacement fields and are evaluated +the same way as in any other context: + + >>> a = ["a", "b", "c"] + >>> print(f"List a contains:\\n{"\\n".join(a)}") + List a contains: + a + b + c + +It is possible to nest f-strings: + + >>> name = 'world' + >>> f'Repeated:{f' hello {name}' * 3}' + 'Repeated: hello world hello world hello world' + +Portable Python programs should not use more than 5 levels of nesting. + +**CPython implementation detail:** CPython does not limit nesting of +f-strings. + +Replacement expressions can contain newlines in both single-quoted and +triple-quoted f-strings and they can contain comments. Everything that +comes after a "#" inside a replacement field is a comment (even +closing braces and quotes). This means that replacement fields with +comments must be closed in a different line: + + >>> a = 2 + >>> f"abc{a # This comment }" continues until the end of the line + ... + 3}" + 'abc5' + +After the expression, replacement fields may optionally contain: + +* a *debug specifier* – an equal sign ("="), optionally surrounded by + whitespace on one or both sides; + +* a *conversion specifier* – "!s", "!r" or "!a"; and/or + +* a *format specifier* prefixed with a colon (":"). + +See the Standard Library section on f-strings for details on how these +fields are evaluated. + +As that section explains, *format specifiers* are passed as the second +argument to the "format()" function to format a replacement field +value. For example, they can be used to specify a field width and +padding characters using the Format Specification Mini-Language: + + >>> number = 14.3 + >>> f'{number:20.7f}' + ' 14.3000000' + +Top-level format specifiers may include nested replacement fields: + + >>> field_size = 20 + >>> precision = 7 + >>> f'{number:{field_size}.{precision}f}' + ' 14.3000000' + +These nested fields may include their own conversion fields and format +specifiers: + + >>> number = 3 + >>> f'{number:{field_size}}' + ' 3' + >>> f'{number:{field_size:05}}' + '00000000000000000003' + +However, these nested fields may not include more deeply nested +replacement fields. + +Formatted string literals cannot be used as *docstrings*, even if they +do not include expressions: + + >>> def foo(): + ... f"Not a docstring" + ... + >>> print(foo.__doc__) + None + +See also: + + * **PEP 498** – Literal String Interpolation + + * **PEP 701** – Syntactic formalization of f-strings + + * "str.format()", which uses a related format string mechanism. + + +t-strings +========= + +Added in version 3.14. + +A *template string literal* or *t-string* is a string literal that is +prefixed with ‘"t"’ or ‘"T"’. These strings follow the same syntax +rules as formatted string literals. For differences in evaluation +rules, see the Standard Library section on t-strings + + +Formal grammar for f-strings +============================ + +F-strings are handled partly by the *lexical analyzer*, which produces +the tokens "FSTRING_START", "FSTRING_MIDDLE" and "FSTRING_END", and +partly by the parser, which handles expressions in the replacement +field. The exact way the work is split is a CPython implementation +detail. + +Correspondingly, the f-string grammar is a mix of lexical and +syntactic definitions. + +Whitespace is significant in these situations: + +* There may be no whitespace in "FSTRING_START" (between the prefix + and quote). + +* Whitespace in "FSTRING_MIDDLE" is part of the literal string + contents. + +* In "fstring_replacement_field", if "f_debug_specifier" is present, + all whitespace after the opening brace until the + "f_debug_specifier", as well as whitespace immediately following + "f_debug_specifier", is retained as part of the expression. + + **CPython implementation detail:** The expression is not handled in + the tokenization phase; it is retrieved from the source code using + locations of the "{" token and the token after "=". + +The "FSTRING_MIDDLE" definition uses negative lookaheads ("!") to +indicate special characters (backslash, newline, "{", "}") and +sequences ("f_quote"). + + fstring: FSTRING_START fstring_middle* FSTRING_END + + FSTRING_START: fstringprefix ("'" | '"' | "\'\'\'" | '"""') + FSTRING_END: f_quote + fstringprefix: <("f" | "fr" | "rf"), case-insensitive> + f_debug_specifier: '=' + f_quote: + + fstring_middle: + | fstring_replacement_field + | FSTRING_MIDDLE + FSTRING_MIDDLE: + | (!"\\" !newline !'{' !'}' !f_quote) source_character + | stringescapeseq + | "{{" + | "}}" + | + fstring_replacement_field: + | '{' f_expression [f_debug_specifier] [fstring_conversion] + [fstring_full_format_spec] '}' + fstring_conversion: + | "!" ("s" | "r" | "a") + fstring_full_format_spec: + | ':' fstring_format_spec* + fstring_format_spec: + | FSTRING_MIDDLE + | fstring_replacement_field + f_expression: + | ','.(conditional_expression | "*" or_expr)+ [","] + | yield_expression + +Note: + + In the above grammar snippet, the "f_quote" and "FSTRING_MIDDLE" + rules are context-sensitive – they depend on the contents of + "FSTRING_START" of the nearest enclosing "fstring".Constructing a + more traditional formal grammar from this template is left as an + exercise for the reader. + +The grammar for t-strings is identical to the one for f-strings, with +*t* instead of *f* at the beginning of rule and token names and in the +prefix. + + tstring: TSTRING_START tstring_middle* TSTRING_END + + +''', + 'subscriptions': r'''Subscriptions and slicings +************************** + +The *subscription* syntax is usually used for selecting an element +from a container – for example, to get a value from a "dict": + + >>> digits_by_name = {'one': 1, 'two': 2} + >>> digits_by_name['two'] # Subscripting a dictionary using the key 'two' + 2 + +In the subscription syntax, the object being subscribed – a primary – +is followed by a *subscript* in square brackets. In the simplest case, +the subscript is a single expression. + +Depending on the type of the object being subscribed, the subscript is +sometimes called a *key* (for mappings), *index* (for sequences), or +*type argument* (for *generic types*). Syntactically, these are all +equivalent: + + >>> colors = ['red', 'blue', 'green', 'black'] + >>> colors[3] # Subscripting a list using the index 3 + 'black' + + >>> list[str] # Parameterizing the list type using the type argument str + list[str] + +At runtime, the interpreter will evaluate the primary and the +subscript, and call the primary’s "__getitem__()" or +"__class_getitem__()" *special method* with the subscript as argument. +For more details on which of these methods is called, see +__class_getitem__ versus __getitem__. + +To show how subscription works, we can define a custom object that +implements "__getitem__()" and prints out the value of the subscript: + + >>> class SubscriptionDemo: + ... def __getitem__(self, key): + ... print(f'subscripted with: {key!r}') + ... + >>> demo = SubscriptionDemo() + >>> demo[1] + subscripted with: 1 + >>> demo['a' * 3] + subscripted with: 'aaa' + +See "__getitem__()" documentation for how built-in types handle +subscription. + +Subscriptions may also be used as targets in assignment or deletion +statements. In these cases, the interpreter will call the subscripted +object’s "__setitem__()" or "__delitem__()" *special method*, +respectively, instead of "__getitem__()". + + >>> colors = ['red', 'blue', 'green', 'black'] + >>> colors[3] = 'white' # Setting item at index + >>> colors + ['red', 'blue', 'green', 'white'] + >>> del colors[3] # Deleting item at index 3 + >>> colors + ['red', 'blue', 'green'] + +All advanced forms of *subscript* documented in the following sections +are also usable for assignment and deletion. + + +Slicings +======== + +A more advanced form of subscription, *slicing*, is commonly used to +extract a portion of a sequence. In this form, the subscript is a +*slice*: up to three expressions separated by colons. Any of the +expressions may be omitted, but a slice must contain at least one +colon: + + >>> number_names = ['zero', 'one', 'two', 'three', 'four', 'five'] + >>> number_names[1:3] + ['one', 'two'] + >>> number_names[1:] + ['one', 'two', 'three', 'four', 'five'] + >>> number_names[:3] + ['zero', 'one', 'two'] + >>> number_names[:] + ['zero', 'one', 'two', 'three', 'four', 'five'] + >>> number_names[::2] + ['zero', 'two', 'four'] + >>> number_names[:-3] + ['zero', 'one', 'two'] + >>> del number_names[4:] + >>> number_names + ['zero', 'one', 'two', 'three'] + +When a slice is evaluated, the interpreter constructs a "slice" object +whose "start", "stop" and "step" attributes, respectively, are the +results of the expressions between the colons. Any missing expression +evaluates to "None". This "slice" object is then passed to the +"__getitem__()" or "__class_getitem__()" *special method*, as above. + + # continuing with the SubscriptionDemo instance defined above: + >>> demo[2:3] + subscripted with: slice(2, 3, None) + >>> demo[::'spam'] + subscripted with: slice(None, None, 'spam') + + +Comma-separated subscripts +========================== + +The subscript can also be given as two or more comma-separated +expressions or slices: + + # continuing with the SubscriptionDemo instance defined above: + >>> demo[1, 2, 3] + subscripted with: (1, 2, 3) + >>> demo[1:2, 3] + subscripted with: (slice(1, 2, None), 3) + +This form is commonly used with numerical libraries for slicing multi- +dimensional data. In this case, the interpreter constructs a "tuple" +of the results of the expressions or slices, and passes this tuple to +the "__getitem__()" or "__class_getitem__()" *special method*, as +above. + +The subscript may also be given as a single expression or slice +followed by a comma, to specify a one-element tuple: + + >>> demo['spam',] + subscripted with: ('spam',) + + +“Starred” subscriptions +======================= + +Added in version 3.11: Expressions in *tuple_slices* may be starred. +See **PEP 646**. + +The subscript can also contain a starred expression. In this case, the +interpreter unpacks the result into a tuple, and passes this tuple to +"__getitem__()" or "__class_getitem__()": + + # continuing with the SubscriptionDemo instance defined above: + >>> demo[*range(10)] + subscripted with: (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) + +Starred expressions may be combined with comma-separated expressions +and slices: + + >>> demo['a', 'b', *range(3), 'c'] + subscripted with: ('a', 'b', 0, 1, 2, 'c') + + +Formal subscription grammar +=========================== + + subscription: primary '[' subscript ']' + subscript: single_subscript | tuple_subscript + single_subscript: proper_slice | assignment_expression + proper_slice: [expression] ":" [expression] [ ":" [expression] ] + tuple_subscript: ','.(single_subscript | starred_expression)+ [','] + +Recall that the "|" operator denotes ordered choice. Specifically, in +"subscript", if both alternatives would match, the first +("single_subscript") has priority. +''', + 'truth': r'''Truth Value Testing +******************* + +Any object can be tested for truth value, for use in an "if" or +"while" condition or as operand of the Boolean operations below. + +By default, an object is considered true unless its class defines +either a "__bool__()" method that returns "False" or a "__len__()" +method that returns zero, when called with the object. [1] If one of +the methods raises an exception when called, the exception is +propagated and the object does not have a truth value (for example, +"NotImplemented"). Here are most of the built-in objects considered +false: + +* constants defined to be false: "None" and "False" + +* zero of any numeric type: "0", "0.0", "0j", "Decimal(0)", + "Fraction(0, 1)" + +* empty sequences and collections: "''", "()", "[]", "{}", "set()", + "range(0)" + +Operations and built-in functions that have a Boolean result always +return "0" or "False" for false and "1" or "True" for true, unless +otherwise stated. (Important exception: the Boolean operations "or" +and "and" always return one of their operands.) +''', + 'try': r'''The "try" statement +******************* + +The "try" statement specifies exception handlers and/or cleanup code +for a group of statements: + + try_stmt: try1_stmt | try2_stmt | try3_stmt + try1_stmt: "try" ":" suite + ("except" [expression ["as" identifier]] ":" suite)+ + ["else" ":" suite] + ["finally" ":" suite] + try2_stmt: "try" ":" suite + ("except" "*" expression ["as" identifier] ":" suite)+ + ["else" ":" suite] + ["finally" ":" suite] + try3_stmt: "try" ":" suite + "finally" ":" suite + +Additional information on exceptions can be found in section +Exceptions, and information on using the "raise" statement to generate +exceptions may be found in section The raise statement. + +Changed in version 3.14: Support for optionally dropping grouping +parentheses when using multiple exception types. See **PEP 758**. + + +"except" clause +=============== + +The "except" clause(s) specify one or more exception handlers. When no +exception occurs in the "try" clause, no exception handler is +executed. When an exception occurs in the "try" suite, a search for an +exception handler is started. This search inspects the "except" +clauses in turn until one is found that matches the exception. An +expression-less "except" clause, if present, must be last; it matches +any exception. + +For an "except" clause with an expression, the expression must +evaluate to an exception type or a tuple of exception types. +Parentheses can be dropped if multiple exception types are provided +and the "as" clause is not used. The raised exception matches an +"except" clause whose expression evaluates to the class or a *non- +virtual base class* of the exception object, or to a tuple that +contains such a class. + +If no "except" clause matches the exception, the search for an +exception handler continues in the surrounding code and on the +invocation stack. [1] + +If the evaluation of an expression in the header of an "except" clause +raises an exception, the original search for a handler is canceled and +a search starts for the new exception in the surrounding code and on +the call stack (it is treated as if the entire "try" statement raised +the exception). + +When a matching "except" clause is found, the exception is assigned to +the target specified after the "as" keyword in that "except" clause, +if present, and the "except" clause’s suite is executed. All "except" +clauses must have an executable block. When the end of this block is +reached, execution continues normally after the entire "try" +statement. (This means that if two nested handlers exist for the same +exception, and the exception occurs in the "try" clause of the inner +handler, the outer handler will not handle the exception.) + +When an exception has been assigned using "as target", it is cleared +at the end of the "except" clause. This is as if + + except E as N: + foo + +was translated to + + except E as N: + try: + foo + finally: + del N + +This means the exception must be assigned to a different name to be +able to refer to it after the "except" clause. Exceptions are cleared +because with the traceback attached to them, they form a reference +cycle with the stack frame, keeping all locals in that frame alive +until the next garbage collection occurs. + +Before an "except" clause’s suite is executed, the exception is stored +in the "sys" module, where it can be accessed from within the body of +the "except" clause by calling "sys.exception()". When leaving an +exception handler, the exception stored in the "sys" module is reset +to its previous value: + + >>> print(sys.exception()) + None + >>> try: + ... raise TypeError + ... except: + ... print(repr(sys.exception())) + ... try: + ... raise ValueError + ... except: + ... print(repr(sys.exception())) + ... print(repr(sys.exception())) + ... + TypeError() + ValueError() + TypeError() + >>> print(sys.exception()) + None + + +"except*" clause +================ + +The "except*" clause(s) specify one or more handlers for groups of +exceptions ("BaseExceptionGroup" instances). A "try" statement can +have either "except" or "except*" clauses, but not both. The exception +type for matching is mandatory in the case of "except*", so "except*:" +is a syntax error. The type is interpreted as in the case of "except", +but matching is performed on the exceptions contained in the group +that is being handled. An "TypeError" is raised if a matching type is +a subclass of "BaseExceptionGroup", because that would have ambiguous +semantics. + +When an exception group is raised in the try block, each "except*" +clause splits (see "split()") it into the subgroups of matching and +non-matching exceptions. If the matching subgroup is not empty, it +becomes the handled exception (the value returned from +"sys.exception()") and assigned to the target of the "except*" clause +(if there is one). Then, the body of the "except*" clause executes. If +the non-matching subgroup is not empty, it is processed by the next +"except*" in the same manner. This continues until all exceptions in +the group have been matched, or the last "except*" clause has run. + +After all "except*" clauses execute, the group of unhandled exceptions +is merged with any exceptions that were raised or re-raised from +within "except*" clauses. This merged exception group propagates on.: + + >>> try: + ... raise ExceptionGroup("eg", + ... [ValueError(1), TypeError(2), OSError(3), OSError(4)]) + ... except* TypeError as e: + ... print(f'caught {type(e)} with nested {e.exceptions}') + ... except* OSError as e: + ... print(f'caught {type(e)} with nested {e.exceptions}') + ... + caught with nested (TypeError(2),) + caught with nested (OSError(3), OSError(4)) + + Exception Group Traceback (most recent call last): + | File "", line 2, in + | raise ExceptionGroup("eg", + | [ValueError(1), TypeError(2), OSError(3), OSError(4)]) + | ExceptionGroup: eg (1 sub-exception) + +-+---------------- 1 ---------------- + | ValueError: 1 + +------------------------------------ + +If the exception raised from the "try" block is not an exception group +and its type matches one of the "except*" clauses, it is caught and +wrapped by an exception group with an empty message string. This +ensures that the type of the target "e" is consistently +"BaseExceptionGroup": + + >>> try: + ... raise BlockingIOError + ... except* BlockingIOError as e: + ... print(repr(e)) + ... + ExceptionGroup('', (BlockingIOError(),)) + +"break", "continue" and "return" cannot appear in an "except*" clause. + + +"else" clause +============= + +The optional "else" clause is executed if the control flow leaves the +"try" suite, no exception was raised, and no "return", "continue", or +"break" statement was executed. Exceptions in the "else" clause are +not handled by the preceding "except" clauses. + + +"finally" clause +================ + +If "finally" is present, it specifies a ‘cleanup’ handler. The "try" +clause is executed, including any "except" and "else" clauses. If an +exception occurs in any of the clauses and is not handled, the +exception is temporarily saved. The "finally" clause is executed. If +there is a saved exception it is re-raised at the end of the "finally" +clause. If the "finally" clause raises another exception, the saved +exception is set as the context of the new exception. If the "finally" +clause executes a "return", "break" or "continue" statement, the saved +exception is discarded. For example, this function returns 42. + + def f(): + try: + 1/0 + finally: + return 42 + +The exception information is not available to the program during +execution of the "finally" clause. + +When a "return", "break" or "continue" statement is executed in the +"try" suite of a "try"…"finally" statement, the "finally" clause is +also executed ‘on the way out.’ + +The return value of a function is determined by the last "return" +statement executed. Since the "finally" clause always executes, a +"return" statement executed in the "finally" clause will always be the +last one executed. The following function returns ‘finally’. + + def foo(): + try: + return 'try' + finally: + return 'finally' + +Changed in version 3.8: Prior to Python 3.8, a "continue" statement +was illegal in the "finally" clause due to a problem with the +implementation. + +Changed in version 3.14: The compiler emits a "SyntaxWarning" when a +"return", "break" or "continue" appears in a "finally" block (see +**PEP 765**). +''', + 'types': r'''The standard type hierarchy +*************************** + +Below is a list of the types that are built into Python. Extension +modules (written in C, Java, or other languages, depending on the +implementation) can define additional types. Future versions of +Python may add types to the type hierarchy (e.g., rational numbers, +efficiently stored arrays of integers, etc.), although such additions +will often be provided via the standard library instead. + +Some of the type descriptions below contain a paragraph listing +‘special attributes.’ These are attributes that provide access to the +implementation and are not intended for general use. Their definition +may change in the future. + + +None +==== + +This type has a single value. There is a single object with this +value. This object is accessed through the built-in name "None". It is +used to signify the absence of a value in many situations, e.g., it is +returned from functions that don’t explicitly return anything. Its +truth value is false. + + +NotImplemented +============== + +This type has a single value. There is a single object with this +value. This object is accessed through the built-in name +"NotImplemented". Numeric methods and rich comparison methods should +return this value if they do not implement the operation for the +operands provided. (The interpreter will then try the reflected +operation, or some other fallback, depending on the operator.) It +should not be evaluated in a boolean context. + +See Implementing the arithmetic operations for more details. + +Changed in version 3.9: Evaluating "NotImplemented" in a boolean +context was deprecated. + +Changed in version 3.14: Evaluating "NotImplemented" in a boolean +context now raises a "TypeError". It previously evaluated to "True" +and emitted a "DeprecationWarning" since Python 3.9. + + +Ellipsis +======== + +This type has a single value. There is a single object with this +value. This object is accessed through the literal "..." or the built- +in name "Ellipsis". Its truth value is true. + + +"numbers.Number" +================ + +These are created by numeric literals and returned as results by +arithmetic operators and arithmetic built-in functions. Numeric +objects are immutable; once created their value never changes. Python +numbers are of course strongly related to mathematical numbers, but +subject to the limitations of numerical representation in computers. + +The string representations of the numeric classes, computed by +"__repr__()" and "__str__()", have the following properties: + +* They are valid numeric literals which, when passed to their class + constructor, produce an object having the value of the original + numeric. + +* The representation is in base 10, when possible. + +* Leading zeros, possibly excepting a single zero before a decimal + point, are not shown. + +* Trailing zeros, possibly excepting a single zero after a decimal + point, are not shown. + +* A sign is shown only when the number is negative. + +Python distinguishes between integers, floating-point numbers, and +complex numbers: + + +"numbers.Integral" +------------------ + +These represent elements from the mathematical set of integers +(positive and negative). + +Note: + + The rules for integer representation are intended to give the most + meaningful interpretation of shift and mask operations involving + negative integers. + +There are two types of integers: + +Integers ("int") + These represent numbers in an unlimited range, subject to available + (virtual) memory only. For the purpose of shift and mask + operations, a binary representation is assumed, and negative + numbers are represented in a variant of 2’s complement which gives + the illusion of an infinite string of sign bits extending to the + left. + +Booleans ("bool") + These represent the truth values False and True. The two objects + representing the values "False" and "True" are the only Boolean + objects. The Boolean type is a subtype of the integer type, and + Boolean values behave like the values 0 and 1, respectively, in + almost all contexts, the exception being that when converted to a + string, the strings ""False"" or ""True"" are returned, + respectively. + + +"numbers.Real" ("float") +------------------------ + +These represent machine-level double precision floating-point numbers. +You are at the mercy of the underlying machine architecture (and C or +Java implementation) for the accepted range and handling of overflow. +Python does not support single-precision floating-point numbers; the +savings in processor and memory usage that are usually the reason for +using these are dwarfed by the overhead of using objects in Python, so +there is no reason to complicate the language with two kinds of +floating-point numbers. + + +"numbers.Complex" ("complex") +----------------------------- + +These represent complex numbers as a pair of machine-level double +precision floating-point numbers. The same caveats apply as for +floating-point numbers. The real and imaginary parts of a complex +number "z" can be retrieved through the read-only attributes "z.real" +and "z.imag". + + +Sequences +========= + +These represent finite ordered sets indexed by non-negative numbers. +The built-in function "len()" returns the number of items of a +sequence. When the length of a sequence is *n*, the index set contains +the numbers 0, 1, …, *n*-1. Item *i* of sequence *a* is selected by +"a[i]". Some sequences, including built-in sequences, interpret +negative subscripts by adding the sequence length. For example, +"a[-2]" equals "a[n-2]", the second to last item of sequence a with +length "n". + +The resulting value must be a nonnegative integer less than the number +of items in the sequence. If it is not, an "IndexError" is raised. + +Sequences also support slicing: "a[start:stop]" selects all items with +index *k* such that *start* "<=" *k* "<" *stop*. When used as an +expression, a slice is a sequence of the same type. The comment above +about negative subscripts also applies to negative slice positions. +Note that no error is raised if a slice position is less than zero or +larger than the length of the sequence. + +If *start* is missing or "None", slicing behaves as if *start* was +zero. If *stop* is missing or "None", slicing behaves as if *stop* was +equal to the length of the sequence. + +Some sequences also support “extended slicing” with a third “step” +parameter: "a[i:j:k]" selects all items of *a* with index *x* where "x += i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*. + +Sequences are distinguished according to their mutability: + + +Immutable sequences +------------------- + +An object of an immutable sequence type cannot change once it is +created. (If the object contains references to other objects, these +other objects may be mutable and may be changed; however, the +collection of objects directly referenced by an immutable object +cannot change.) + +The following types are immutable sequences: + +Strings + A string ("str") is a sequence of values that represent + *characters*, or more formally, *Unicode code points*. All the code + points in the range "0" to "0x10FFFF" can be represented in a + string. + + Python doesn’t have a dedicated *character* type. Instead, every + code point in the string is represented as a string object with + length "1". + + The built-in function "ord()" converts a code point from its string + form to an integer in the range "0" to "0x10FFFF"; "chr()" converts + an integer in the range "0" to "0x10FFFF" to the corresponding + length "1" string object. "str.encode()" can be used to convert a + "str" to "bytes" using the given text encoding, and + "bytes.decode()" can be used to achieve the opposite. + +Tuples + The items of a "tuple" are arbitrary Python objects. Tuples of two + or more items are formed by comma-separated lists of expressions. + A tuple of one item (a ‘singleton’) can be formed by affixing a + comma to an expression (an expression by itself does not create a + tuple, since parentheses must be usable for grouping of + expressions). An empty tuple can be formed by an empty pair of + parentheses. + +Bytes + A "bytes" object is an immutable array. The items are 8-bit bytes, + represented by integers in the range 0 <= x < 256. Bytes literals + (like "b'abc'") and the built-in "bytes()" constructor can be used + to create bytes objects. Also, bytes objects can be decoded to + strings via the "decode()" method. + + +Mutable sequences +----------------- + +Mutable sequences can be changed after they are created. The +subscription and slicing notations can be used as the target of +assignment and "del" (delete) statements. + +Note: + + The "collections" and "array" module provide additional examples of + mutable sequence types. + +There are currently two intrinsic mutable sequence types: + +Lists + The items of a list are arbitrary Python objects. Lists are formed + by placing a comma-separated list of expressions in square + brackets. (Note that there are no special cases needed to form + lists of length 0 or 1.) + +Byte Arrays + A bytearray object is a mutable array. They are created by the + built-in "bytearray()" constructor. Aside from being mutable (and + hence unhashable), byte arrays otherwise provide the same interface + and functionality as immutable "bytes" objects. + + +Set types +========= + +These represent unordered, finite sets of unique, immutable objects. +As such, they cannot be indexed by any subscript. However, they can be +iterated over, and the built-in function "len()" returns the number of +items in a set. Common uses for sets are fast membership testing, +removing duplicates from a sequence, and computing mathematical +operations such as intersection, union, difference, and symmetric +difference. + +For set elements, the same immutability rules apply as for dictionary +keys. Note that numeric types obey the normal rules for numeric +comparison: if two numbers compare equal (e.g., "1" and "1.0"), only +one of them can be contained in a set. + +There are currently two intrinsic set types: + +Sets + These represent a mutable set. They are created by the built-in + "set()" constructor and can be modified afterwards by several + methods, such as "add()". + +Frozen sets + These represent an immutable set. They are created by the built-in + "frozenset()" constructor. As a frozenset is immutable and + *hashable*, it can be used again as an element of another set, or + as a dictionary key. + + +Mappings +======== + +These represent finite sets of objects indexed by arbitrary index +sets. The subscript notation "a[k]" selects the item indexed by "k" +from the mapping "a"; this can be used in expressions and as the +target of assignments or "del" statements. The built-in function +"len()" returns the number of items in a mapping. + +There is currently a single intrinsic mapping type: + + +Dictionaries +------------ + +These represent finite sets of objects indexed by nearly arbitrary +values. The only types of values not acceptable as keys are values +containing lists or dictionaries or other mutable types that are +compared by value rather than by object identity, the reason being +that the efficient implementation of dictionaries requires a key’s +hash value to remain constant. Numeric types used for keys obey the +normal rules for numeric comparison: if two numbers compare equal +(e.g., "1" and "1.0") then they can be used interchangeably to index +the same dictionary entry. + +Dictionaries preserve insertion order, meaning that keys will be +produced in the same order they were added sequentially over the +dictionary. Replacing an existing key does not change the order, +however removing a key and re-inserting it will add it to the end +instead of keeping its old place. + +Dictionaries are mutable; they can be created by the "{}" notation +(see section Dictionary displays). + +The extension modules "dbm.ndbm" and "dbm.gnu" provide additional +examples of mapping types, as does the "collections" module. + +Changed in version 3.7: Dictionaries did not preserve insertion order +in versions of Python before 3.6. In CPython 3.6, insertion order was +preserved, but it was considered an implementation detail at that time +rather than a language guarantee. + + +Callable types +============== + +These are the types to which the function call operation (see section +Calls) can be applied: + + +User-defined functions +---------------------- + +A user-defined function object is created by a function definition +(see section Function definitions). It should be called with an +argument list containing the same number of items as the function’s +formal parameter list. + + +Special read-only attributes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ++----------------------------------------------------+----------------------------------------------------+ +| Attribute | Meaning | +|====================================================|====================================================| +| function.__builtins__ | A reference to the "dictionary" that holds the | +| | function’s builtins namespace. Added in version | +| | 3.10. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__globals__ | A reference to the "dictionary" that holds the | +| | function’s global variables – the global namespace | +| | of the module in which the function was defined. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__closure__ | "None" or a "tuple" of cells that contain bindings | +| | for the names specified in the "co_freevars" | +| | attribute of the function’s "code object". A cell | +| | object has the attribute "cell_contents". This can | +| | be used to get the value of the cell, as well as | +| | set the value. | ++----------------------------------------------------+----------------------------------------------------+ + + +Special writable attributes +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Most of these attributes check the type of the assigned value: + ++----------------------------------------------------+----------------------------------------------------+ +| Attribute | Meaning | +|====================================================|====================================================| +| function.__doc__ | The function’s documentation string, or "None" if | +| | unavailable. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__name__ | The function’s name. See also: "__name__ | +| | attributes". | ++----------------------------------------------------+----------------------------------------------------+ +| function.__qualname__ | The function’s *qualified name*. See also: | +| | "__qualname__ attributes". Added in version 3.3. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__module__ | The name of the module the function was defined | +| | in, or "None" if unavailable. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__defaults__ | A "tuple" containing default *parameter* values | +| | for those parameters that have defaults, or "None" | +| | if no parameters have a default value. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__code__ | The code object representing the compiled function | +| | body. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__dict__ | The namespace supporting arbitrary function | +| | attributes. See also: "__dict__ attributes". | ++----------------------------------------------------+----------------------------------------------------+ +| function.__annotations__ | A "dictionary" containing annotations of | +| | *parameters*. The keys of the dictionary are the | +| | parameter names, and "'return'" for the return | +| | annotation, if provided. See also: | +| | "object.__annotations__". Changed in version | +| | 3.14: Annotations are now lazily evaluated. See | +| | **PEP 649**. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__annotate__ | The *annotate function* for this function, or | +| | "None" if the function has no annotations. See | +| | "object.__annotate__". Added in version 3.14. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__kwdefaults__ | A "dictionary" containing defaults for keyword- | +| | only *parameters*. | ++----------------------------------------------------+----------------------------------------------------+ +| function.__type_params__ | A "tuple" containing the type parameters of a | +| | generic function. Added in version 3.12. | ++----------------------------------------------------+----------------------------------------------------+ + +Function objects also support getting and setting arbitrary +attributes, which can be used, for example, to attach metadata to +functions. Regular attribute dot-notation is used to get and set such +attributes. + +**CPython implementation detail:** CPython’s current implementation +only supports function attributes on user-defined functions. Function +attributes on built-in functions may be supported in the future. + +Additional information about a function’s definition can be retrieved +from its code object (accessible via the "__code__" attribute). + + +Instance methods +---------------- + +An instance method object combines a class, a class instance and any +callable object (normally a user-defined function). + +Special read-only attributes: + ++----------------------------------------------------+----------------------------------------------------+ +| method.__self__ | Refers to the class instance object to which the | +| | method is bound | ++----------------------------------------------------+----------------------------------------------------+ +| method.__func__ | Refers to the original function object | ++----------------------------------------------------+----------------------------------------------------+ +| method.__doc__ | The method’s documentation (same as | +| | "method.__func__.__doc__"). A "string" if the | +| | original function had a docstring, else "None". | ++----------------------------------------------------+----------------------------------------------------+ +| method.__name__ | The name of the method (same as | +| | "method.__func__.__name__") | ++----------------------------------------------------+----------------------------------------------------+ +| method.__module__ | The name of the module the method was defined in, | +| | or "None" if unavailable. | ++----------------------------------------------------+----------------------------------------------------+ + +Methods also support accessing (but not setting) the arbitrary +function attributes on the underlying function object. + +User-defined method objects may be created when getting an attribute +of a class (perhaps via an instance of that class), if that attribute +is a user-defined function object or a "classmethod" object. + +When an instance method object is created by retrieving a user-defined +function object from a class via one of its instances, its "__self__" +attribute is the instance, and the method object is said to be +*bound*. The new method’s "__func__" attribute is the original +function object. + +When an instance method object is created by retrieving a +"classmethod" object from a class or instance, its "__self__" +attribute is the class itself, and its "__func__" attribute is the +function object underlying the class method. + +When an instance method object is called, the underlying function +("__func__") is called, inserting the class instance ("__self__") in +front of the argument list. For instance, when "C" is a class which +contains a definition for a function "f()", and "x" is an instance of +"C", calling "x.f(1)" is equivalent to calling "C.f(x, 1)". + +When an instance method object is derived from a "classmethod" object, +the “class instance” stored in "__self__" will actually be the class +itself, so that calling either "x.f(1)" or "C.f(1)" is equivalent to +calling "f(C,1)" where "f" is the underlying function. + +It is important to note that user-defined functions which are +attributes of a class instance are not converted to bound methods; +this *only* happens when the function is an attribute of the class. + + +Generator functions +------------------- + +A function or method which uses the "yield" statement (see section The +yield statement) is called a *generator function*. Such a function, +when called, always returns an *iterator* object which can be used to +execute the body of the function: calling the iterator’s +"iterator.__next__()" method will cause the function to execute until +it provides a value using the "yield" statement. When the function +executes a "return" statement or falls off the end, a "StopIteration" +exception is raised and the iterator will have reached the end of the +set of values to be returned. + + +Coroutine functions +------------------- + +A function or method which is defined using "async def" is called a +*coroutine function*. Such a function, when called, returns a +*coroutine* object. It may contain "await" expressions, as well as +"async with" and "async for" statements. See also the Coroutine +Objects section. + + +Asynchronous generator functions +-------------------------------- + +A function or method which is defined using "async def" and which uses +the "yield" statement is called a *asynchronous generator function*. +Such a function, when called, returns an *asynchronous iterator* +object which can be used in an "async for" statement to execute the +body of the function. + +Calling the asynchronous iterator’s "aiterator.__anext__" method will +return an *awaitable* which when awaited will execute until it +provides a value using the "yield" expression. When the function +executes an empty "return" statement or falls off the end, a +"StopAsyncIteration" exception is raised and the asynchronous iterator +will have reached the end of the set of values to be yielded. + + +Built-in functions +------------------ + +A built-in function object is a wrapper around a C function. Examples +of built-in functions are "len()" and "math.sin()" ("math" is a +standard built-in module). The number and type of the arguments are +determined by the C function. Special read-only attributes: + +* "__doc__" is the function’s documentation string, or "None" if + unavailable. See "function.__doc__". + +* "__name__" is the function’s name. See "function.__name__". + +* "__self__" is set to "None" (but see the next item). + +* "__module__" is the name of the module the function was defined in + or "None" if unavailable. See "function.__module__". + + +Built-in methods +---------------- + +This is really a different disguise of a built-in function, this time +containing an object passed to the C function as an implicit extra +argument. An example of a built-in method is "alist.append()", +assuming *alist* is a list object. In this case, the special read-only +attribute "__self__" is set to the object denoted by *alist*. (The +attribute has the same semantics as it does with "other instance +methods".) + + +Classes +------- + +Classes are callable. These objects normally act as factories for new +instances of themselves, but variations are possible for class types +that override "__new__()". The arguments of the call are passed to +"__new__()" and, in the typical case, to "__init__()" to initialize +the new instance. + + +Class Instances +--------------- + +Instances of arbitrary classes can be made callable by defining a +"__call__()" method in their class. + + +Modules +======= + +Modules are a basic organizational unit of Python code, and are +created by the import system as invoked either by the "import" +statement, or by calling functions such as "importlib.import_module()" +and built-in "__import__()". A module object has a namespace +implemented by a "dictionary" object (this is the dictionary +referenced by the "__globals__" attribute of functions defined in the +module). Attribute references are translated to lookups in this +dictionary, e.g., "m.x" is equivalent to "m.__dict__["x"]". A module +object does not contain the code object used to initialize the module +(since it isn’t needed once the initialization is done). + +Attribute assignment updates the module’s namespace dictionary, e.g., +"m.x = 1" is equivalent to "m.__dict__["x"] = 1". + + +Import-related attributes on module objects +------------------------------------------- + +Module objects have the following attributes that relate to the import +system. When a module is created using the machinery associated with +the import system, these attributes are filled in based on the +module’s *spec*, before the *loader* executes and loads the module. + +To create a module dynamically rather than using the import system, +it’s recommended to use "importlib.util.module_from_spec()", which +will set the various import-controlled attributes to appropriate +values. It’s also possible to use the "types.ModuleType" constructor +to create modules directly, but this technique is more error-prone, as +most attributes must be manually set on the module object after it has +been created when using this approach. + +Caution: + + With the exception of "__name__", it is **strongly** recommended + that you rely on "__spec__" and its attributes instead of any of the + other individual attributes listed in this subsection. Note that + updating an attribute on "__spec__" will not update the + corresponding attribute on the module itself: + + >>> import typing + >>> typing.__name__, typing.__spec__.name + ('typing', 'typing') + >>> typing.__spec__.name = 'spelling' + >>> typing.__name__, typing.__spec__.name + ('typing', 'spelling') + >>> typing.__name__ = 'keyboard_smashing' + >>> typing.__name__, typing.__spec__.name + ('keyboard_smashing', 'spelling') + +module.__name__ + + The name used to uniquely identify the module in the import system. + For a directly executed module, this will be set to ""__main__"". + + This attribute must be set to the fully qualified name of the + module. It is expected to match the value of + "module.__spec__.name". + +module.__spec__ + + A record of the module’s import-system-related state. + + Set to the "module spec" that was used when importing the module. + See Module specs for more details. + + Added in version 3.4. + +module.__package__ + + The *package* a module belongs to. + + If the module is top-level (that is, not a part of any specific + package) then the attribute should be set to "''" (the empty + string). Otherwise, it should be set to the name of the module’s + package (which can be equal to "module.__name__" if the module + itself is a package). See **PEP 366** for further details. + + This attribute is used instead of "__name__" to calculate explicit + relative imports for main modules. It defaults to "None" for + modules created dynamically using the "types.ModuleType" + constructor; use "importlib.util.module_from_spec()" instead to + ensure the attribute is set to a "str". + + It is **strongly** recommended that you use + "module.__spec__.parent" instead of "module.__package__". + "__package__" is now only used as a fallback if "__spec__.parent" + is not set, and this fallback path is deprecated. + + Changed in version 3.4: This attribute now defaults to "None" for + modules created dynamically using the "types.ModuleType" + constructor. Previously the attribute was optional. + + Changed in version 3.6: The value of "__package__" is expected to + be the same as "__spec__.parent". "__package__" is now only used as + a fallback during import resolution if "__spec__.parent" is not + defined. + + Changed in version 3.10: "ImportWarning" is raised if an import + resolution falls back to "__package__" instead of + "__spec__.parent". + + Changed in version 3.12: Raise "DeprecationWarning" instead of + "ImportWarning" when falling back to "__package__" during import + resolution. + + Deprecated since version 3.13, will be removed in version 3.15: + "__package__" will cease to be set or taken into consideration by + the import system or standard library. + +module.__loader__ + + The *loader* object that the import machinery used to load the + module. + + This attribute is mostly useful for introspection, but can be used + for additional loader-specific functionality, for example getting + data associated with a loader. + + "__loader__" defaults to "None" for modules created dynamically + using the "types.ModuleType" constructor; use + "importlib.util.module_from_spec()" instead to ensure the attribute + is set to a *loader* object. + + It is **strongly** recommended that you use + "module.__spec__.loader" instead of "module.__loader__". + + Changed in version 3.4: This attribute now defaults to "None" for + modules created dynamically using the "types.ModuleType" + constructor. Previously the attribute was optional. + + Deprecated since version 3.12, will be removed in version 3.16: + Setting "__loader__" on a module while failing to set + "__spec__.loader" is deprecated. In Python 3.16, "__loader__" will + cease to be set or taken into consideration by the import system or + the standard library. + +module.__path__ + + A (possibly empty) *sequence* of strings enumerating the locations + where the package’s submodules will be found. Non-package modules + should not have a "__path__" attribute. See __path__ attributes on + modules for more details. + + It is **strongly** recommended that you use + "module.__spec__.submodule_search_locations" instead of + "module.__path__". + +module.__file__ + +module.__cached__ + + "__file__" and "__cached__" are both optional attributes that may + or may not be set. Both attributes should be a "str" when they are + available. + + "__file__" indicates the pathname of the file from which the module + was loaded (if loaded from a file), or the pathname of the shared + library file for extension modules loaded dynamically from a shared + library. It might be missing for certain types of modules, such as + C modules that are statically linked into the interpreter, and the + import system may opt to leave it unset if it has no semantic + meaning (for example, a module loaded from a database). + + If "__file__" is set then the "__cached__" attribute might also be + set, which is the path to any compiled version of the code (for + example, a byte-compiled file). The file does not need to exist to + set this attribute; the path can simply point to where the compiled + file *would* exist (see **PEP 3147**). + + Note that "__cached__" may be set even if "__file__" is not set. + However, that scenario is quite atypical. Ultimately, the *loader* + is what makes use of the module spec provided by the *finder* (from + which "__file__" and "__cached__" are derived). So if a loader can + load from a cached module but otherwise does not load from a file, + that atypical scenario may be appropriate. + + It is **strongly** recommended that you use + "module.__spec__.cached" instead of "module.__cached__". + + Deprecated since version 3.13, will be removed in version 3.15: + Setting "__cached__" on a module while failing to set + "__spec__.cached" is deprecated. In Python 3.15, "__cached__" will + cease to be set or taken into consideration by the import system or + standard library. + + +Other writable attributes on module objects +------------------------------------------- + +As well as the import-related attributes listed above, module objects +also have the following writable attributes: + +module.__doc__ + + The module’s documentation string, or "None" if unavailable. See + also: "__doc__ attributes". + +module.__annotations__ + + A dictionary containing *variable annotations* collected during + module body execution. For best practices on working with + "__annotations__", see "annotationlib". + + Changed in version 3.14: Annotations are now lazily evaluated. See + **PEP 649**. + +module.__annotate__ + + The *annotate function* for this module, or "None" if the module + has no annotations. See also: "__annotate__" attributes. + + Added in version 3.14. + + +Module dictionaries +------------------- + +Module objects also have the following special read-only attribute: + +module.__dict__ + + The module’s namespace as a dictionary object. Uniquely among the + attributes listed here, "__dict__" cannot be accessed as a global + variable from within a module; it can only be accessed as an + attribute on module objects. + + **CPython implementation detail:** Because of the way CPython + clears module dictionaries, the module dictionary will be cleared + when the module falls out of scope even if the dictionary still has + live references. To avoid this, copy the dictionary or keep the + module around while using its dictionary directly. + + +Custom classes +============== + +Custom class types are typically created by class definitions (see +section Class definitions). A class has a namespace implemented by a +dictionary object. Class attribute references are translated to +lookups in this dictionary, e.g., "C.x" is translated to +"C.__dict__["x"]" (although there are a number of hooks which allow +for other means of locating attributes). When the attribute name is +not found there, the attribute search continues in the base classes. +This search of the base classes uses the C3 method resolution order +which behaves correctly even in the presence of ‘diamond’ inheritance +structures where there are multiple inheritance paths leading back to +a common ancestor. Additional details on the C3 MRO used by Python can +be found at The Python 2.3 Method Resolution Order. + +When a class attribute reference (for class "C", say) would yield a +class method object, it is transformed into an instance method object +whose "__self__" attribute is "C". When it would yield a +"staticmethod" object, it is transformed into the object wrapped by +the static method object. See section Implementing Descriptors for +another way in which attributes retrieved from a class may differ from +those actually contained in its "__dict__". + +Class attribute assignments update the class’s dictionary, never the +dictionary of a base class. + +A class object can be called (see above) to yield a class instance +(see below). + + +Special attributes +------------------ + ++----------------------------------------------------+----------------------------------------------------+ +| Attribute | Meaning | +|====================================================|====================================================| +| type.__name__ | The class’s name. See also: "__name__ attributes". | ++----------------------------------------------------+----------------------------------------------------+ +| type.__qualname__ | The class’s *qualified name*. See also: | +| | "__qualname__ attributes". | ++----------------------------------------------------+----------------------------------------------------+ +| type.__module__ | The name of the module in which the class was | +| | defined. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__dict__ | A "mapping proxy" providing a read-only view of | +| | the class’s namespace. See also: "__dict__ | +| | attributes". | ++----------------------------------------------------+----------------------------------------------------+ +| type.__bases__ | A "tuple" containing the class’s bases. In most | +| | cases, for a class defined as "class X(A, B, C)", | +| | "X.__bases__" will be exactly equal to "(A, B, | +| | C)". | ++----------------------------------------------------+----------------------------------------------------+ +| type.__base__ | **CPython implementation detail:** The single base | +| | class in the inheritance chain that is responsible | +| | for the memory layout of instances. This attribute | +| | corresponds to "tp_base" at the C level. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__doc__ | The class’s documentation string, or "None" if | +| | undefined. Not inherited by subclasses. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__annotations__ | A dictionary containing *variable annotations* | +| | collected during class body execution. See also: | +| | "__annotations__ attributes". For best practices | +| | on working with "__annotations__", please see | +| | "annotationlib". Use | +| | "annotationlib.get_annotations()" instead of | +| | accessing this attribute directly. Warning: | +| | Accessing the "__annotations__" attribute directly | +| | on a class object may return annotations for the | +| | wrong class, specifically in certain cases where | +| | the class, its base class, or a metaclass is | +| | defined under "from __future__ import | +| | annotations". See **749** for details.This | +| | attribute does not exist on certain builtin | +| | classes. On user-defined classes without | +| | "__annotations__", it is an empty dictionary. | +| | Changed in version 3.14: Annotations are now | +| | lazily evaluated. See **PEP 649**. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__annotate__() | The *annotate function* for this class, or "None" | +| | if the class has no annotations. See also: | +| | "__annotate__ attributes". Added in version 3.14. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__type_params__ | A "tuple" containing the type parameters of a | +| | generic class. Added in version 3.12. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__static_attributes__ | A "tuple" containing names of attributes of this | +| | class which are assigned through "self.X" from any | +| | function in its body. Added in version 3.13. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__firstlineno__ | The line number of the first line of the class | +| | definition, including decorators. Setting the | +| | "__module__" attribute removes the | +| | "__firstlineno__" item from the type’s dictionary. | +| | Added in version 3.13. | ++----------------------------------------------------+----------------------------------------------------+ +| type.__mro__ | The "tuple" of classes that are considered when | +| | looking for base classes during method resolution. | ++----------------------------------------------------+----------------------------------------------------+ + + +Special methods +--------------- + +In addition to the special attributes described above, all Python +classes also have the following two methods available: + +type.mro() + + This method can be overridden by a metaclass to customize the + method resolution order for its instances. It is called at class + instantiation, and its result is stored in "__mro__". + +type.__subclasses__() + + Each class keeps a list of weak references to its immediate + subclasses. This method returns a list of all those references + still alive. The list is in definition order. Example: + + >>> class A: pass + >>> class B(A): pass + >>> A.__subclasses__() + [] + + +Class instances +=============== + +A class instance is created by calling a class object (see above). A +class instance has a namespace implemented as a dictionary which is +the first place in which attribute references are searched. When an +attribute is not found there, and the instance’s class has an +attribute by that name, the search continues with the class +attributes. If a class attribute is found that is a user-defined +function object, it is transformed into an instance method object +whose "__self__" attribute is the instance. Static method and class +method objects are also transformed; see above under “Classes”. See +section Implementing Descriptors for another way in which attributes +of a class retrieved via its instances may differ from the objects +actually stored in the class’s "__dict__". If no class attribute is +found, and the object’s class has a "__getattr__()" method, that is +called to satisfy the lookup. + +Attribute assignments and deletions update the instance’s dictionary, +never a class’s dictionary. If the class has a "__setattr__()" or +"__delattr__()" method, this is called instead of updating the +instance dictionary directly. + +Class instances can pretend to be numbers, sequences, or mappings if +they have methods with certain special names. See section Special +method names. + + +Special attributes +------------------ + +object.__class__ + + The class to which a class instance belongs. + +object.__dict__ + + A dictionary or other mapping object used to store an object’s + (writable) attributes. Not all instances have a "__dict__" + attribute; see the section on __slots__ for more details. + + +I/O objects (also known as file objects) +======================================== + +A *file object* represents an open file. Various shortcuts are +available to create file objects: the "open()" built-in function, and +also "os.popen()", "os.fdopen()", and the "makefile()" method of +socket objects (and perhaps by other functions or methods provided by +extension modules). + +File objects implement common methods, listed below, to simplify usage +in generic code. They are expected to be With Statement Context +Managers. + +The objects "sys.stdin", "sys.stdout" and "sys.stderr" are initialized +to file objects corresponding to the interpreter’s standard input, +output and error streams; they are all open in text mode and therefore +follow the interface defined by the "io.TextIOBase" abstract class. + +file.read(size=-1, /) + + Retrieve up to *size* data from the file. As a convenience if + *size* is unspecified or -1 retrieve all data available. + +file.write(data, /) + + Store *data* to the file. + +file.close() + + Flush any buffers and close the underlying file. + + +Internal types +============== + +A few types used internally by the interpreter are exposed to the +user. Their definitions may change with future versions of the +interpreter, but they are mentioned here for completeness. + + +Code objects +------------ + +Code objects represent *byte-compiled* executable Python code, or +*bytecode*. The difference between a code object and a function object +is that the function object contains an explicit reference to the +function’s globals (the module in which it was defined), while a code +object contains no context; also the default argument values are +stored in the function object, not in the code object (because they +represent values calculated at run-time). Unlike function objects, +code objects are immutable and contain no references (directly or +indirectly) to mutable objects. + + +Special read-only attributes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_name | The function name | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_qualname | The fully qualified function name Added in | +| | version 3.11. | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_argcount | The total number of positional *parameters* | +| | (including positional-only parameters and | +| | parameters with default values) that the function | +| | has | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_posonlyargcount | The number of positional-only *parameters* | +| | (including arguments with default values) that the | +| | function has | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_kwonlyargcount | The number of keyword-only *parameters* (including | +| | arguments with default values) that the function | +| | has | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_nlocals | The number of local variables used by the function | +| | (including parameters) | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_varnames | A "tuple" containing the names of the local | +| | variables in the function (starting with the | +| | parameter names) | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_cellvars | A "tuple" containing the names of local variables | +| | that are referenced from at least one *nested | +| | scope* inside the function | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_freevars | A "tuple" containing the names of *free (closure) | +| | variables* that a *nested scope* references in an | +| | outer scope. See also "function.__closure__". | +| | Note: references to global and builtin names are | +| | *not* included. | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_code | A string representing the sequence of *bytecode* | +| | instructions in the function | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_consts | A "tuple" containing the literals used by the | +| | *bytecode* in the function | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_names | A "tuple" containing the names used by the | +| | *bytecode* in the function | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_filename | The name of the file from which the code was | +| | compiled | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_firstlineno | The line number of the first line of the function | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_lnotab | A string encoding the mapping from *bytecode* | +| | offsets to line numbers. For details, see the | +| | source code of the interpreter. Deprecated since | +| | version 3.12: This attribute of code objects is | +| | deprecated, and may be removed in Python 3.15. | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_stacksize | The required stack size of the code object | ++----------------------------------------------------+----------------------------------------------------+ +| codeobject.co_flags | An "integer" encoding a number of flags for the | +| | interpreter. | ++----------------------------------------------------+----------------------------------------------------+ + +The following flag bits are defined for "co_flags": bit "0x04" is set +if the function uses the "*arguments" syntax to accept an arbitrary +number of positional arguments; bit "0x08" is set if the function uses +the "**keywords" syntax to accept arbitrary keyword arguments; bit +"0x20" is set if the function is a generator. See Code Objects Bit +Flags for details on the semantics of each flags that might be +present. + +Future feature declarations (for example, "from __future__ import +division") also use bits in "co_flags" to indicate whether a code +object was compiled with a particular feature enabled. See +"compiler_flag". + +Other bits in "co_flags" are reserved for internal use. + +If a code object represents a function and has a docstring, the +"CO_HAS_DOCSTRING" bit is set in "co_flags" and the first item in +"co_consts" is the docstring of the function. + + +Methods on code objects +~~~~~~~~~~~~~~~~~~~~~~~ + +codeobject.co_positions() + + Returns an iterable over the source code positions of each + *bytecode* instruction in the code object. + + The iterator returns "tuple"s containing the "(start_line, + end_line, start_column, end_column)". The *i-th* tuple corresponds + to the position of the source code that compiled to the *i-th* code + unit. Column information is 0-indexed utf-8 byte offsets on the + given source line. + + This positional information can be missing. A non-exhaustive lists + of cases where this may happen: + + * Running the interpreter with "-X" "no_debug_ranges". + + * Loading a pyc file compiled while using "-X" "no_debug_ranges". + + * Position tuples corresponding to artificial instructions. + + * Line and column numbers that can’t be represented due to + implementation specific limitations. + + When this occurs, some or all of the tuple elements can be "None". + + Added in version 3.11. + + Note: + + This feature requires storing column positions in code objects + which may result in a small increase of disk usage of compiled + Python files or interpreter memory usage. To avoid storing the + extra information and/or deactivate printing the extra traceback + information, the "-X" "no_debug_ranges" command line flag or the + "PYTHONNODEBUGRANGES" environment variable can be used. + +codeobject.co_lines() + + Returns an iterator that yields information about successive ranges + of *bytecode*s. Each item yielded is a "(start, end, lineno)" + "tuple": + + * "start" (an "int") represents the offset (inclusive) of the start + of the *bytecode* range + + * "end" (an "int") represents the offset (exclusive) of the end of + the *bytecode* range + + * "lineno" is an "int" representing the line number of the + *bytecode* range, or "None" if the bytecodes in the given range + have no line number + + The items yielded will have the following properties: + + * The first range yielded will have a "start" of 0. + + * The "(start, end)" ranges will be non-decreasing and consecutive. + That is, for any pair of "tuple"s, the "start" of the second will + be equal to the "end" of the first. + + * No range will be backwards: "end >= start" for all triples. + + * The last "tuple" yielded will have "end" equal to the size of the + *bytecode*. + + Zero-width ranges, where "start == end", are allowed. Zero-width + ranges are used for lines that are present in the source code, but + have been eliminated by the *bytecode* compiler. + + Added in version 3.10. + + See also: + + **PEP 626** - Precise line numbers for debugging and other tools. + The PEP that introduced the "co_lines()" method. + +codeobject.replace(**kwargs) + + Return a copy of the code object with new values for the specified + fields. + + Code objects are also supported by the generic function + "copy.replace()". + + Added in version 3.8. + + +Frame objects +------------- + +Frame objects represent execution frames. They may occur in traceback +objects, and are also passed to registered trace functions. + + +Special read-only attributes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_back | Points to the previous stack frame (towards the | +| | caller), or "None" if this is the bottom stack | +| | frame | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_code | The code object being executed in this frame. | +| | Accessing this attribute raises an auditing event | +| | "object.__getattr__" with arguments "obj" and | +| | ""f_code"". | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_locals | The mapping used by the frame to look up local | +| | variables. If the frame refers to an *optimized | +| | scope*, this may return a write-through proxy | +| | object. Changed in version 3.13: Return a proxy | +| | for optimized scopes. | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_globals | The dictionary used by the frame to look up global | +| | variables | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_builtins | The dictionary used by the frame to look up built- | +| | in (intrinsic) names | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_lasti | The “precise instruction” of the frame object | +| | (this is an index into the *bytecode* string of | +| | the code object) | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_generator | The *generator* or *coroutine* object that owns | +| | this frame, or "None" if the frame is a normal | +| | function. Added in version 3.14. | ++----------------------------------------------------+----------------------------------------------------+ + + +Special writable attributes +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_trace | If not "None", this is a function called for | +| | various events during code execution (this is used | +| | by debuggers). Normally an event is triggered for | +| | each new source line (see "f_trace_lines"). | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_trace_lines | Set this attribute to "False" to disable | +| | triggering a tracing event for each source line. | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_trace_opcodes | Set this attribute to "True" to allow per-opcode | +| | events to be requested. Note that this may lead to | +| | undefined interpreter behaviour if exceptions | +| | raised by the trace function escape to the | +| | function being traced. | ++----------------------------------------------------+----------------------------------------------------+ +| frame.f_lineno | The current line number of the frame – writing to | +| | this from within a trace function jumps to the | +| | given line (only for the bottom-most frame). A | +| | debugger can implement a Jump command (aka Set | +| | Next Statement) by writing to this attribute. | ++----------------------------------------------------+----------------------------------------------------+ + + +Frame object methods +~~~~~~~~~~~~~~~~~~~~ + +Frame objects support one method: + +frame.clear() + + This method clears all references to local variables held by the + frame. Also, if the frame belonged to a *generator*, the generator + is finalized. This helps break reference cycles involving frame + objects (for example when catching an exception and storing its + traceback for later use). + + "RuntimeError" is raised if the frame is currently executing or + suspended. + + Added in version 3.4. + + Changed in version 3.13: Attempting to clear a suspended frame + raises "RuntimeError" (as has always been the case for executing + frames). + + +Traceback objects +----------------- + +Traceback objects represent the stack trace of an exception. A +traceback object is implicitly created when an exception occurs, and +may also be explicitly created by calling "types.TracebackType". + +Changed in version 3.7: Traceback objects can now be explicitly +instantiated from Python code. + +For implicitly created tracebacks, when the search for an exception +handler unwinds the execution stack, at each unwound level a traceback +object is inserted in front of the current traceback. When an +exception handler is entered, the stack trace is made available to the +program. (See section The try statement.) It is accessible as the +third item of the tuple returned by "sys.exc_info()", and as the +"__traceback__" attribute of the caught exception. + +When the program contains no suitable handler, the stack trace is +written (nicely formatted) to the standard error stream; if the +interpreter is interactive, it is also made available to the user as +"sys.last_traceback". + +For explicitly created tracebacks, it is up to the creator of the +traceback to determine how the "tb_next" attributes should be linked +to form a full stack trace. + +Special read-only attributes: + ++----------------------------------------------------+----------------------------------------------------+ +| traceback.tb_frame | Points to the execution frame of the current | +| | level. Accessing this attribute raises an | +| | auditing event "object.__getattr__" with arguments | +| | "obj" and ""tb_frame"". | ++----------------------------------------------------+----------------------------------------------------+ +| traceback.tb_lineno | Gives the line number where the exception occurred | ++----------------------------------------------------+----------------------------------------------------+ +| traceback.tb_lasti | Indicates the “precise instruction”. | ++----------------------------------------------------+----------------------------------------------------+ + +The line number and last instruction in the traceback may differ from +the line number of its frame object if the exception occurred in a +"try" statement with no matching except clause or with a "finally" +clause. + +traceback.tb_next + + The special writable attribute "tb_next" is the next level in the + stack trace (towards the frame where the exception occurred), or + "None" if there is no next level. + + Changed in version 3.7: This attribute is now writable + + +Slice objects +------------- + +Slice objects are used to represent slices for "__getitem__()" +methods. They are also created by the built-in "slice()" function. + +Special read-only attributes: "start" is the lower bound; "stop" is +the upper bound; "step" is the step value; each is "None" if omitted. +These attributes can have any type. + +Slice objects support one method: + +slice.indices(self, length) + + This method takes a single integer argument *length* and computes + information about the slice that the slice object would describe if + applied to a sequence of *length* items. It returns a tuple of + three integers; respectively these are the *start* and *stop* + indices and the *step* or stride length of the slice. Missing or + out-of-bounds indices are handled in a manner consistent with + regular slices. + + +Static method objects +--------------------- + +Static method objects provide a way of defeating the transformation of +function objects to method objects described above. A static method +object is a wrapper around any other object, usually a user-defined +method object. When a static method object is retrieved from a class +or a class instance, the object actually returned is the wrapped +object, which is not subject to any further transformation. Static +method objects are also callable. Static method objects are created by +the built-in "staticmethod()" constructor. + + +Class method objects +-------------------- + +A class method object, like a static method object, is a wrapper +around another object that alters the way in which that object is +retrieved from classes and class instances. The behaviour of class +method objects upon such retrieval is described above, under “instance +methods”. Class method objects are created by the built-in +"classmethod()" constructor. +''', + 'typesfunctions': r'''Functions +********* + +Function objects are created by function definitions. The only +operation on a function object is to call it: "func(argument-list)". + +There are really two flavors of function objects: built-in functions +and user-defined functions. Both support the same operation (to call +the function), but the implementation is different, hence the +different object types. + +See Function definitions for more information. +''', + 'typesmapping': r'''Mapping Types — "dict" +********************** + +A *mapping* object maps *hashable* values to arbitrary objects. +Mappings are mutable objects. There is currently only one standard +mapping type, the *dictionary*. (For other containers see the built- +in "list", "set", and "tuple" classes, and the "collections" module.) + +A dictionary’s keys are *almost* arbitrary values. Values that are +not *hashable*, that is, values containing lists, dictionaries or +other mutable types (that are compared by value rather than by object +identity) may not be used as keys. Values that compare equal (such as +"1", "1.0", and "True") can be used interchangeably to index the same +dictionary entry. + +class dict(**kwargs) +class dict(mapping, /, **kwargs) +class dict(iterable, /, **kwargs) + + Return a new dictionary initialized from an optional positional + argument and a possibly empty set of keyword arguments. + + Dictionaries can be created by several means: + + * Use a comma-separated list of "key: value" pairs within braces: + "{'jack': 4098, 'sjoerd': 4127}" or "{4098: 'jack', 4127: + 'sjoerd'}" + + * Use a dict comprehension: "{}", "{x: x ** 2 for x in range(10)}" + + * Use the type constructor: "dict()", "dict([('foo', 100), ('bar', + 200)])", "dict(foo=100, bar=200)" + + If no positional argument is given, an empty dictionary is created. + If a positional argument is given and it defines a "keys()" method, + a dictionary is created by calling "__getitem__()" on the argument + with each returned key from the method. Otherwise, the positional + argument must be an *iterable* object. Each item in the iterable + must itself be an iterable with exactly two elements. The first + element of each item becomes a key in the new dictionary, and the + second element the corresponding value. If a key occurs more than + once, the last value for that key becomes the corresponding value + in the new dictionary. + + If keyword arguments are given, the keyword arguments and their + values are added to the dictionary created from the positional + argument. If a key being added is already present, the value from + the keyword argument replaces the value from the positional + argument. + + Dictionaries compare equal if and only if they have the same "(key, + value)" pairs (regardless of ordering). Order comparisons (‘<’, + ‘<=’, ‘>=’, ‘>’) raise "TypeError". To illustrate dictionary + creation and equality, the following examples all return a + dictionary equal to "{"one": 1, "two": 2, "three": 3}": + + >>> a = dict(one=1, two=2, three=3) + >>> b = {'one': 1, 'two': 2, 'three': 3} + >>> c = dict(zip(['one', 'two', 'three'], [1, 2, 3])) + >>> d = dict([('two', 2), ('one', 1), ('three', 3)]) + >>> e = dict({'three': 3, 'one': 1, 'two': 2}) + >>> f = dict({'one': 1, 'three': 3}, two=2) + >>> a == b == c == d == e == f + True + + Providing keyword arguments as in the first example only works for + keys that are valid Python identifiers. Otherwise, any valid keys + can be used. + + Dictionaries preserve insertion order. Note that updating a key + does not affect the order. Keys added after deletion are inserted + at the end. + + >>> d = {"one": 1, "two": 2, "three": 3, "four": 4} + >>> d + {'one': 1, 'two': 2, 'three': 3, 'four': 4} + >>> list(d) + ['one', 'two', 'three', 'four'] + >>> list(d.values()) + [1, 2, 3, 4] + >>> d["one"] = 42 + >>> d + {'one': 42, 'two': 2, 'three': 3, 'four': 4} + >>> del d["two"] + >>> d["two"] = None + >>> d + {'one': 42, 'three': 3, 'four': 4, 'two': None} + + Changed in version 3.7: Dictionary order is guaranteed to be + insertion order. This behavior was an implementation detail of + CPython from 3.6. + + These are the operations that dictionaries support (and therefore, + custom mapping types should support too): + + list(d) + + Return a list of all the keys used in the dictionary *d*. + + len(d) + + Return the number of items in the dictionary *d*. + + d[key] + + Return the item of *d* with key *key*. Raises a "KeyError" if + *key* is not in the map. + + If a subclass of dict defines a method "__missing__()" and *key* + is not present, the "d[key]" operation calls that method with + the key *key* as argument. The "d[key]" operation then returns + or raises whatever is returned or raised by the + "__missing__(key)" call. No other operations or methods invoke + "__missing__()". If "__missing__()" is not defined, "KeyError" + is raised. "__missing__()" must be a method; it cannot be an + instance variable: + + >>> class Counter(dict): + ... def __missing__(self, key): + ... return 0 + ... + >>> c = Counter() + >>> c['red'] + 0 + >>> c['red'] += 1 + >>> c['red'] + 1 + + The example above shows part of the implementation of + "collections.Counter". A different "__missing__()" method is + used by "collections.defaultdict". + + d[key] = value + + Set "d[key]" to *value*. + + del d[key] + + Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not + in the map. + + key in d + + Return "True" if *d* has a key *key*, else "False". + + key not in d + + Equivalent to "not key in d". + + iter(d) + + Return an iterator over the keys of the dictionary. This is a + shortcut for "iter(d.keys())". + + clear() + + Remove all items from the dictionary. + + copy() + + Return a shallow copy of the dictionary. + + classmethod fromkeys(iterable, value=None, /) + + Create a new dictionary with keys from *iterable* and values set + to *value*. + + "fromkeys()" is a class method that returns a new dictionary. + *value* defaults to "None". All of the values refer to just a + single instance, so it generally doesn’t make sense for *value* + to be a mutable object such as an empty list. To get distinct + values, use a dict comprehension instead. + + get(key, default=None, /) + + Return the value for *key* if *key* is in the dictionary, else + *default*. If *default* is not given, it defaults to "None", so + that this method never raises a "KeyError". + + items() + + Return a new view of the dictionary’s items ("(key, value)" + pairs). See the documentation of view objects. + + keys() + + Return a new view of the dictionary’s keys. See the + documentation of view objects. + + pop(key, /) + pop(key, default, /) + + If *key* is in the dictionary, remove it and return its value, + else return *default*. If *default* is not given and *key* is + not in the dictionary, a "KeyError" is raised. + + popitem() + + Remove and return a "(key, value)" pair from the dictionary. + Pairs are returned in LIFO (last-in, first-out) order. + + "popitem()" is useful to destructively iterate over a + dictionary, as often used in set algorithms. If the dictionary + is empty, calling "popitem()" raises a "KeyError". + + Changed in version 3.7: LIFO order is now guaranteed. In prior + versions, "popitem()" would return an arbitrary key/value pair. + + reversed(d) + + Return a reverse iterator over the keys of the dictionary. This + is a shortcut for "reversed(d.keys())". + + Added in version 3.8. + + setdefault(key, default=None, /) + + If *key* is in the dictionary, return its value. If not, insert + *key* with a value of *default* and return *default*. *default* + defaults to "None". + + update(**kwargs) + update(mapping, /, **kwargs) + update(iterable, /, **kwargs) + + Update the dictionary with the key/value pairs from *mapping* or + *iterable* and *kwargs*, overwriting existing keys. Return + "None". + + "update()" accepts either another object with a "keys()" method + (in which case "__getitem__()" is called with every key returned + from the method) or an iterable of key/value pairs (as tuples or + other iterables of length two). If keyword arguments are + specified, the dictionary is then updated with those key/value + pairs: "d.update(red=1, blue=2)". + + values() + + Return a new view of the dictionary’s values. See the + documentation of view objects. + + An equality comparison between one "dict.values()" view and + another will always return "False". This also applies when + comparing "dict.values()" to itself: + + >>> d = {'a': 1} + >>> d.values() == d.values() + False + + d | other + + Create a new dictionary with the merged keys and values of *d* + and *other*, which must both be dictionaries. The values of + *other* take priority when *d* and *other* share keys. + + Added in version 3.9. + + d |= other + + Update the dictionary *d* with keys and values from *other*, + which may be either a *mapping* or an *iterable* of key/value + pairs. The values of *other* take priority when *d* and *other* + share keys. + + Added in version 3.9. + + Dictionaries and dictionary views are reversible. + + >>> d = {"one": 1, "two": 2, "three": 3, "four": 4} + >>> d + {'one': 1, 'two': 2, 'three': 3, 'four': 4} + >>> list(reversed(d)) + ['four', 'three', 'two', 'one'] + >>> list(reversed(d.values())) + [4, 3, 2, 1] + >>> list(reversed(d.items())) + [('four', 4), ('three', 3), ('two', 2), ('one', 1)] + + Changed in version 3.8: Dictionaries are now reversible. + +See also: + + "types.MappingProxyType" can be used to create a read-only view of a + "dict". + +See also: + + For detailed information on thread-safety guarantees for "dict" + objects, see Thread safety for dict objects. + + +Dictionary view objects +======================= + +The objects returned by "dict.keys()", "dict.values()" and +"dict.items()" are *view objects*. They provide a dynamic view on the +dictionary’s entries, which means that when the dictionary changes, +the view reflects these changes. + +Dictionary views can be iterated over to yield their respective data, +and support membership tests: + +len(dictview) + + Return the number of entries in the dictionary. + +iter(dictview) + + Return an iterator over the keys, values or items (represented as + tuples of "(key, value)") in the dictionary. + + Keys and values are iterated over in insertion order. This allows + the creation of "(value, key)" pairs using "zip()": "pairs = + zip(d.values(), d.keys())". Another way to create the same list is + "pairs = [(v, k) for (k, v) in d.items()]". + + Iterating views while adding or deleting entries in the dictionary + may raise a "RuntimeError" or fail to iterate over all entries. + + Changed in version 3.7: Dictionary order is guaranteed to be + insertion order. + +x in dictview + + Return "True" if *x* is in the underlying dictionary’s keys, values + or items (in the latter case, *x* should be a "(key, value)" + tuple). + +reversed(dictview) + + Return a reverse iterator over the keys, values or items of the + dictionary. The view will be iterated in reverse order of the + insertion. + + Changed in version 3.8: Dictionary views are now reversible. + +dictview.mapping + + Return a "types.MappingProxyType" that wraps the original + dictionary to which the view refers. + + Added in version 3.10. + +Keys views are set-like since their entries are unique and *hashable*. +Items views also have set-like operations since the (key, value) pairs +are unique and the keys are hashable. If all values in an items view +are hashable as well, then the items view can interoperate with other +sets. (Values views are not treated as set-like since the entries are +generally not unique.) For set-like views, all of the operations +defined for the abstract base class "collections.abc.Set" are +available (for example, "==", "<", or "^"). While using set +operators, set-like views accept any iterable as the other operand, +unlike sets which only accept sets as the input. + +An example of dictionary view usage: + + >>> dishes = {'eggs': 2, 'sausage': 1, 'bacon': 1, 'spam': 500} + >>> keys = dishes.keys() + >>> values = dishes.values() + + >>> # iteration + >>> n = 0 + >>> for val in values: + ... n += val + ... + >>> print(n) + 504 + + >>> # keys and values are iterated over in the same order (insertion order) + >>> list(keys) + ['eggs', 'sausage', 'bacon', 'spam'] + >>> list(values) + [2, 1, 1, 500] + + >>> # view objects are dynamic and reflect dict changes + >>> del dishes['eggs'] + >>> del dishes['sausage'] + >>> list(keys) + ['bacon', 'spam'] + + >>> # set operations + >>> keys & {'eggs', 'bacon', 'salad'} + {'bacon'} + >>> keys ^ {'sausage', 'juice'} == {'juice', 'sausage', 'bacon', 'spam'} + True + >>> keys | ['juice', 'juice', 'juice'] == {'bacon', 'spam', 'juice'} + True + + >>> # get back a read-only proxy for the original dictionary + >>> values.mapping + mappingproxy({'bacon': 1, 'spam': 500}) + >>> values.mapping['spam'] + 500 +''', + 'typesmethods': r'''Methods +******* + +Methods are functions that are called using the attribute notation. +There are two flavors: built-in methods (such as "append()" on lists) +and class instance method. Built-in methods are described with the +types that support them. + +If you access a method (a function defined in a class namespace) +through an instance, you get a special object: a *bound method* (also +called instance method) object. When called, it will add the "self" +argument to the argument list. Bound methods have two special read- +only attributes: "m.__self__" is the object on which the method +operates, and "m.__func__" is the function implementing the method. +Calling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to +calling "m.__func__(m.__self__, arg-1, arg-2, ..., arg-n)". + +Like function objects, bound method objects support getting arbitrary +attributes. However, since method attributes are actually stored on +the underlying function object ("method.__func__"), setting method +attributes on bound methods is disallowed. Attempting to set an +attribute on a method results in an "AttributeError" being raised. In +order to set a method attribute, you need to explicitly set it on the +underlying function object: + + >>> class C: + ... def method(self): + ... pass + ... + >>> c = C() + >>> c.method.whoami = 'my name is method' # can't set on the method + Traceback (most recent call last): + File "", line 1, in + AttributeError: 'method' object has no attribute 'whoami' + >>> c.method.__func__.whoami = 'my name is method' + >>> c.method.whoami + 'my name is method' + +See Instance methods for more information. +''', + 'typesmodules': r'''Modules +******* + +The only special operation on a module is attribute access: "m.name", +where *m* is a module and *name* accesses a name defined in *m*’s +symbol table. Module attributes can be assigned to. (Note that the +"import" statement is not, strictly speaking, an operation on a module +object; "import foo" does not require a module object named *foo* to +exist, rather it requires an (external) *definition* for a module +named *foo* somewhere.) + +A special attribute of every module is "__dict__". This is the +dictionary containing the module’s symbol table. Modifying this +dictionary will actually change the module’s symbol table, but direct +assignment to the "__dict__" attribute is not possible (you can write +"m.__dict__['a'] = 1", which defines "m.a" to be "1", but you can’t +write "m.__dict__ = {}"). Modifying "__dict__" directly is not +recommended. + +Modules built into the interpreter are written like this: "". If loaded from a file, they are written as +"". +''', + 'typesseq': r'''Sequence Types — "list", "tuple", "range" +***************************************** + +There are three basic sequence types: lists, tuples, and range +objects. Additional sequence types tailored for processing of binary +data and text strings are described in dedicated sections. + + +Common Sequence Operations +========================== + +The operations in the following table are supported by most sequence +types, both mutable and immutable. The "collections.abc.Sequence" ABC +is provided to make it easier to correctly implement these operations +on custom sequence types. + +This table lists the sequence operations sorted in ascending priority. +In the table, *s* and *t* are sequences of the same type, *n*, *i*, +*j* and *k* are integers and *x* is an arbitrary object that meets any +type and value restrictions imposed by *s*. + +The "in" and "not in" operations have the same priorities as the +comparison operations. The "+" (concatenation) and "*" (repetition) +operations have the same priority as the corresponding numeric +operations. [3] + ++----------------------------+----------------------------------+------------+ +| Operation | Result | Notes | +|============================|==================================|============| +| "x in s" | "True" if an item of *s* is | (1) | +| | equal to *x*, else "False" | | ++----------------------------+----------------------------------+------------+ +| "x not in s" | "False" if an item of *s* is | (1) | +| | equal to *x*, else "True" | | ++----------------------------+----------------------------------+------------+ +| "s + t" | the concatenation of *s* and *t* | (6)(7) | ++----------------------------+----------------------------------+------------+ +| "s * n" or "n * s" | equivalent to adding *s* to | (2)(7) | +| | itself *n* times | | ++----------------------------+----------------------------------+------------+ +| "s[i]" | *i*th item of *s*, origin 0 | (3)(8) | ++----------------------------+----------------------------------+------------+ +| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) | ++----------------------------+----------------------------------+------------+ +| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) | +| | with step *k* | | ++----------------------------+----------------------------------+------------+ +| "len(s)" | length of *s* | | ++----------------------------+----------------------------------+------------+ +| "min(s)" | smallest item of *s* | | ++----------------------------+----------------------------------+------------+ +| "max(s)" | largest item of *s* | | ++----------------------------+----------------------------------+------------+ + +Sequences of the same type also support comparisons. In particular, +tuples and lists are compared lexicographically by comparing +corresponding elements. This means that to compare equal, every +element must compare equal and the two sequences must be of the same +type and have the same length. (For full details see Comparisons in +the language reference.) + +Forward and reversed iterators over mutable sequences access values +using an index. That index will continue to march forward (or +backward) even if the underlying sequence is mutated. The iterator +terminates only when an "IndexError" or a "StopIteration" is +encountered (or when the index drops below zero). + +Notes: + +1. While the "in" and "not in" operations are used only for simple + containment testing in the general case, some specialised sequences + (such as "str", "bytes" and "bytearray") also use them for + subsequence testing: + + >>> "gg" in "eggs" + True + +2. Values of *n* less than "0" are treated as "0" (which yields an + empty sequence of the same type as *s*). Note that items in the + sequence *s* are not copied; they are referenced multiple times. + This often haunts new Python programmers; consider: + + >>> lists = [[]] * 3 + >>> lists + [[], [], []] + >>> lists[0].append(3) + >>> lists + [[3], [3], [3]] + + What has happened is that "[[]]" is a one-element list containing + an empty list, so all three elements of "[[]] * 3" are references + to this single empty list. Modifying any of the elements of + "lists" modifies this single list. You can create a list of + different lists this way: + + >>> lists = [[] for i in range(3)] + >>> lists[0].append(3) + >>> lists[1].append(5) + >>> lists[2].append(7) + >>> lists + [[3], [5], [7]] + + Further explanation is available in the FAQ entry How do I create a + multidimensional list?. + +3. If *i* or *j* is negative, the index is relative to the end of + sequence *s*: "len(s) + i" or "len(s) + j" is substituted. But + note that "-0" is still "0". + +4. The slice of *s* from *i* to *j* is defined as the sequence of + items with index *k* such that "i <= k < j". + + * If *i* is omitted or "None", use "0". + + * If *j* is omitted or "None", use "len(s)". + + * If *i* or *j* is less than "-len(s)", use "0". + + * If *i* or *j* is greater than "len(s)", use "len(s)". + + * If *i* is greater than or equal to *j*, the slice is empty. + +5. The slice of *s* from *i* to *j* with step *k* is defined as the + sequence of items with index "x = i + n*k" such that "0 <= n < + (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k", + "i+3*k" and so on, stopping when *j* is reached (but never + including *j*). When *k* is positive, *i* and *j* are reduced to + "len(s)" if they are greater. When *k* is negative, *i* and *j* are + reduced to "len(s) - 1" if they are greater. If *i* or *j* are + omitted or "None", they become “end” values (which end depends on + the sign of *k*). Note, *k* cannot be zero. If *k* is "None", it + is treated like "1". + +6. Concatenating immutable sequences always results in a new object. + This means that building up a sequence by repeated concatenation + will have a quadratic runtime cost in the total sequence length. + To get a linear runtime cost, you must switch to one of the + alternatives below: + + * if concatenating "str" objects, you can build a list and use + "str.join()" at the end or else write to an "io.StringIO" + instance and retrieve its value when complete + + * if concatenating "bytes" objects, you can similarly use + "bytes.join()" or "io.BytesIO", or you can do in-place + concatenation with a "bytearray" object. "bytearray" objects are + mutable and have an efficient overallocation mechanism + + * if concatenating "tuple" objects, extend a "list" instead + + * for other types, investigate the relevant class documentation + +7. Some sequence types (such as "range") only support item sequences + that follow specific patterns, and hence don’t support sequence + concatenation or repetition. + +8. An "IndexError" is raised if *i* is outside the sequence range. + +-[ Sequence Methods ]- + +Sequence types also support the following methods: + +sequence.count(value, /) + + Return the total number of occurrences of *value* in *sequence*. + +sequence.index(value[, start[, stop]]) + + Return the index of the first occurrence of *value* in *sequence*. + + Raises "ValueError" if *value* is not found in *sequence*. + + The *start* or *stop* arguments allow for efficient searching of + subsections of the sequence, beginning at *start* and ending at + *stop*. This is roughly equivalent to "start + + sequence[start:stop].index(value)", only without copying any data. + + Caution: + + Not all sequence types support passing the *start* and *stop* + arguments. + + +Immutable Sequence Types +======================== + +The only operation that immutable sequence types generally implement +that is not also implemented by mutable sequence types is support for +the "hash()" built-in. + +This support allows immutable sequences, such as "tuple" instances, to +be used as "dict" keys and stored in "set" and "frozenset" instances. + +Attempting to hash an immutable sequence that contains unhashable +values will result in "TypeError". + + +Mutable Sequence Types +====================== + +The operations in the following table are defined on mutable sequence +types. The "collections.abc.MutableSequence" ABC is provided to make +it easier to correctly implement these operations on custom sequence +types. + +In the table *s* is an instance of a mutable sequence type, *t* is any +iterable object and *x* is an arbitrary object that meets any type and +value restrictions imposed by *s* (for example, "bytearray" only +accepts integers that meet the value restriction "0 <= x <= 255"). + ++--------------------------------+----------------------------------+-----------------------+ +| Operation | Result | Notes | +|================================|==================================|=======================| +| "s[i] = x" | item *i* of *s* is replaced by | | +| | *x* | | ++--------------------------------+----------------------------------+-----------------------+ +| "del s[i]" | removes item *i* of *s* | | ++--------------------------------+----------------------------------+-----------------------+ +| "s[i:j] = t" | slice of *s* from *i* to *j* is | | +| | replaced by the contents of the | | +| | iterable *t* | | ++--------------------------------+----------------------------------+-----------------------+ +| "del s[i:j]" | removes the elements of "s[i:j]" | | +| | from the list (same as "s[i:j] = | | +| | []") | | ++--------------------------------+----------------------------------+-----------------------+ +| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) | +| | replaced by those of *t* | | ++--------------------------------+----------------------------------+-----------------------+ +| "del s[i:j:k]" | removes the elements of | | +| | "s[i:j:k]" from the list | | ++--------------------------------+----------------------------------+-----------------------+ +| "s += t" | extends *s* with the contents of | | +| | *t* (for the most part the same | | +| | as "s[len(s):len(s)] = t") | | ++--------------------------------+----------------------------------+-----------------------+ +| "s *= n" | updates *s* with its contents | (2) | +| | repeated *n* times | | ++--------------------------------+----------------------------------+-----------------------+ + +Notes: + +1. If *k* is not equal to "1", *t* must have the same length as the + slice it is replacing. + +2. The value *n* is an integer, or an object implementing + "__index__()". Zero and negative values of *n* clear the sequence. + Items in the sequence are not copied; they are referenced multiple + times, as explained for "s * n" under Common Sequence Operations. + +-[ Mutable Sequence Methods ]- + +Mutable sequence types also support the following methods: + +sequence.append(value, /) + + Append *value* to the end of the sequence. This is equivalent to + writing "seq[len(seq):len(seq)] = [value]". + +sequence.clear() + + Added in version 3.3. + + Remove all items from *sequence*. This is equivalent to writing + "del sequence[:]". + +sequence.copy() + + Added in version 3.3. + + Create a shallow copy of *sequence*. This is equivalent to writing + "sequence[:]". + + Hint: + + The "copy()" method is not part of the "MutableSequence" "ABC", + but most concrete mutable sequence types provide it. + +sequence.extend(iterable, /) + + Extend *sequence* with the contents of *iterable*. For the most + part, this is the same as writing "seq[len(seq):len(seq)] = + iterable". + +sequence.insert(index, value, /) + + Insert *value* into *sequence* at the given *index*. This is + equivalent to writing "sequence[index:index] = [value]". + +sequence.pop(index=-1, /) + + Retrieve the item at *index* and also removes it from *sequence*. + By default, the last item in *sequence* is removed and returned. + +sequence.remove(value, /) + + Remove the first item from *sequence* where "sequence[i] == value". + + Raises "ValueError" if *value* is not found in *sequence*. + +sequence.reverse() + + Reverse the items of *sequence* in place. This method maintains + economy of space when reversing a large sequence. To remind users + that it operates by side-effect, it returns "None". + + +Lists +===== + +Lists are mutable sequences, typically used to store collections of +homogeneous items (where the precise degree of similarity will vary by +application). + +class list(iterable=(), /) + + Lists may be constructed in several ways: + + * Using a pair of square brackets to denote the empty list: "[]" + + * Using square brackets, separating items with commas: "[a]", "[a, + b, c]" + + * Using a list comprehension: "[x for x in iterable]" + + * Using the type constructor: "list()" or "list(iterable)" + + The constructor builds a list whose items are the same and in the + same order as *iterable*’s items. *iterable* may be either a + sequence, a container that supports iteration, or an iterator + object. If *iterable* is already a list, a copy is made and + returned, similar to "iterable[:]". For example, "list('abc')" + returns "['a', 'b', 'c']" and "list( (1, 2, 3) )" returns "[1, 2, + 3]". If no argument is given, the constructor creates a new empty + list, "[]". + + Many other operations also produce lists, including the "sorted()" + built-in. + + Lists implement all of the common and mutable sequence operations. + Lists also provide the following additional method: + + sort(*, key=None, reverse=False) + + This method sorts the list in place, using only "<" comparisons + between items. Exceptions are not suppressed - if any comparison + operations fail, the entire sort operation will fail (and the + list will likely be left in a partially modified state). + + "sort()" accepts two arguments that can only be passed by + keyword (keyword-only arguments): + + *key* specifies a function of one argument that is used to + extract a comparison key from each list element (for example, + "key=str.lower"). The key corresponding to each item in the list + is calculated once and then used for the entire sorting process. + The default value of "None" means that list items are sorted + directly without calculating a separate key value. + + The "functools.cmp_to_key()" utility is available to convert a + 2.x style *cmp* function to a *key* function. + + *reverse* is a boolean value. If set to "True", then the list + elements are sorted as if each comparison were reversed. + + This method modifies the sequence in place for economy of space + when sorting a large sequence. To remind users that it operates + by side effect, it does not return the sorted sequence (use + "sorted()" to explicitly request a new sorted list instance). + + The "sort()" method is guaranteed to be stable. A sort is + stable if it guarantees not to change the relative order of + elements that compare equal — this is helpful for sorting in + multiple passes (for example, sort by department, then by salary + grade). + + For sorting examples and a brief sorting tutorial, see Sorting + Techniques. + + **CPython implementation detail:** While a list is being sorted, + the effect of attempting to mutate, or even inspect, the list is + undefined. The C implementation of Python makes the list appear + empty for the duration, and raises "ValueError" if it can detect + that the list has been mutated during a sort. + +See also: + + For detailed information on thread-safety guarantees for "list" + objects, see Thread safety for list objects. + + +Tuples +====== + +Tuples are immutable sequences, typically used to store collections of +heterogeneous data (such as the 2-tuples produced by the "enumerate()" +built-in). Tuples are also used for cases where an immutable sequence +of homogeneous data is needed (such as allowing storage in a "set" or +"dict" instance). + +class tuple(iterable=(), /) + + Tuples may be constructed in a number of ways: + + * Using a pair of parentheses to denote the empty tuple: "()" + + * Using a trailing comma for a singleton tuple: "a," or "(a,)" + + * Separating items with commas: "a, b, c" or "(a, b, c)" + + * Using the "tuple()" built-in: "tuple()" or "tuple(iterable)" + + The constructor builds a tuple whose items are the same and in the + same order as *iterable*’s items. *iterable* may be either a + sequence, a container that supports iteration, or an iterator + object. If *iterable* is already a tuple, it is returned + unchanged. For example, "tuple('abc')" returns "('a', 'b', 'c')" + and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument is + given, the constructor creates a new empty tuple, "()". + + Note that it is actually the comma which makes a tuple, not the + parentheses. The parentheses are optional, except in the empty + tuple case, or when they are needed to avoid syntactic ambiguity. + For example, "f(a, b, c)" is a function call with three arguments, + while "f((a, b, c))" is a function call with a 3-tuple as the sole + argument. + + Tuples implement all of the common sequence operations. + +For heterogeneous collections of data where access by name is clearer +than access by index, "collections.namedtuple()" may be a more +appropriate choice than a simple tuple object. + + +Ranges +====== + +The "range" type represents an immutable sequence of numbers and is +commonly used for looping a specific number of times in "for" loops. + +class range(stop, /) +class range(start, stop, step=1, /) + + The arguments to the range constructor must be integers (either + built-in "int" or any object that implements the "__index__()" + special method). If the *step* argument is omitted, it defaults to + "1". If the *start* argument is omitted, it defaults to "0". If + *step* is zero, "ValueError" is raised. + + For a positive *step*, the contents of a range "r" are determined + by the formula "r[i] = start + step*i" where "i >= 0" and "r[i] < + stop". + + For a negative *step*, the contents of the range are still + determined by the formula "r[i] = start + step*i", but the + constraints are "i >= 0" and "r[i] > stop". + + A range object will be empty if "r[0]" does not meet the value + constraint. Ranges do support negative indices, but these are + interpreted as indexing from the end of the sequence determined by + the positive indices. + + Ranges containing absolute values larger than "sys.maxsize" are + permitted but some features (such as "len()") may raise + "OverflowError". + + Range examples: + + >>> list(range(10)) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> list(range(1, 11)) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> list(range(0, 30, 5)) + [0, 5, 10, 15, 20, 25] + >>> list(range(0, 10, 3)) + [0, 3, 6, 9] + >>> list(range(0, -10, -1)) + [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] + >>> list(range(0)) + [] + >>> list(range(1, 0)) + [] + + Ranges implement all of the common sequence operations except + concatenation and repetition (due to the fact that range objects + can only represent sequences that follow a strict pattern and + repetition and concatenation will usually violate that pattern). + + start + + The value of the *start* parameter (or "0" if the parameter was + not supplied) + + stop + + The value of the *stop* parameter + + step + + The value of the *step* parameter (or "1" if the parameter was + not supplied) + +The advantage of the "range" type over a regular "list" or "tuple" is +that a "range" object will always take the same (small) amount of +memory, no matter the size of the range it represents (as it only +stores the "start", "stop" and "step" values, calculating individual +items and subranges as needed). + +Range objects implement the "collections.abc.Sequence" ABC, and +provide features such as containment tests, element index lookup, +slicing and support for negative indices (see Sequence Types — list, +tuple, range): + +>>> r = range(0, 20, 2) +>>> r +range(0, 20, 2) +>>> 11 in r +False +>>> 10 in r +True +>>> r.index(10) +5 +>>> r[5] +10 +>>> r[:5] +range(0, 10, 2) +>>> r[-1] +18 + +Testing range objects for equality with "==" and "!=" compares them as +sequences. That is, two range objects are considered equal if they +represent the same sequence of values. (Note that two range objects +that compare equal might have different "start", "stop" and "step" +attributes, for example "range(0) == range(2, 1, 3)" or "range(0, 3, +2) == range(0, 4, 2)".) + +Changed in version 3.2: Implement the Sequence ABC. Support slicing +and negative indices. Test "int" objects for membership in constant +time instead of iterating through all items. + +Changed in version 3.3: Define ‘==’ and ‘!=’ to compare range objects +based on the sequence of values they define (instead of comparing +based on object identity).Added the "start", "stop" and "step" +attributes. + +See also: + + * The linspace recipe shows how to implement a lazy version of range + suitable for floating-point applications. +''', + 'typesseq-mutable': r'''Mutable Sequence Types +********************** + +The operations in the following table are defined on mutable sequence +types. The "collections.abc.MutableSequence" ABC is provided to make +it easier to correctly implement these operations on custom sequence +types. + +In the table *s* is an instance of a mutable sequence type, *t* is any +iterable object and *x* is an arbitrary object that meets any type and +value restrictions imposed by *s* (for example, "bytearray" only +accepts integers that meet the value restriction "0 <= x <= 255"). + ++--------------------------------+----------------------------------+-----------------------+ +| Operation | Result | Notes | +|================================|==================================|=======================| +| "s[i] = x" | item *i* of *s* is replaced by | | +| | *x* | | ++--------------------------------+----------------------------------+-----------------------+ +| "del s[i]" | removes item *i* of *s* | | ++--------------------------------+----------------------------------+-----------------------+ +| "s[i:j] = t" | slice of *s* from *i* to *j* is | | +| | replaced by the contents of the | | +| | iterable *t* | | ++--------------------------------+----------------------------------+-----------------------+ +| "del s[i:j]" | removes the elements of "s[i:j]" | | +| | from the list (same as "s[i:j] = | | +| | []") | | ++--------------------------------+----------------------------------+-----------------------+ +| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) | +| | replaced by those of *t* | | ++--------------------------------+----------------------------------+-----------------------+ +| "del s[i:j:k]" | removes the elements of | | +| | "s[i:j:k]" from the list | | ++--------------------------------+----------------------------------+-----------------------+ +| "s += t" | extends *s* with the contents of | | +| | *t* (for the most part the same | | +| | as "s[len(s):len(s)] = t") | | ++--------------------------------+----------------------------------+-----------------------+ +| "s *= n" | updates *s* with its contents | (2) | +| | repeated *n* times | | ++--------------------------------+----------------------------------+-----------------------+ + +Notes: + +1. If *k* is not equal to "1", *t* must have the same length as the + slice it is replacing. + +2. The value *n* is an integer, or an object implementing + "__index__()". Zero and negative values of *n* clear the sequence. + Items in the sequence are not copied; they are referenced multiple + times, as explained for "s * n" under Common Sequence Operations. + +-[ Mutable Sequence Methods ]- + +Mutable sequence types also support the following methods: + +sequence.append(value, /) + + Append *value* to the end of the sequence. This is equivalent to + writing "seq[len(seq):len(seq)] = [value]". + +sequence.clear() + + Added in version 3.3. + + Remove all items from *sequence*. This is equivalent to writing + "del sequence[:]". + +sequence.copy() + + Added in version 3.3. + + Create a shallow copy of *sequence*. This is equivalent to writing + "sequence[:]". + + Hint: + + The "copy()" method is not part of the "MutableSequence" "ABC", + but most concrete mutable sequence types provide it. + +sequence.extend(iterable, /) + + Extend *sequence* with the contents of *iterable*. For the most + part, this is the same as writing "seq[len(seq):len(seq)] = + iterable". + +sequence.insert(index, value, /) + + Insert *value* into *sequence* at the given *index*. This is + equivalent to writing "sequence[index:index] = [value]". + +sequence.pop(index=-1, /) + + Retrieve the item at *index* and also removes it from *sequence*. + By default, the last item in *sequence* is removed and returned. + +sequence.remove(value, /) + + Remove the first item from *sequence* where "sequence[i] == value". + + Raises "ValueError" if *value* is not found in *sequence*. + +sequence.reverse() + + Reverse the items of *sequence* in place. This method maintains + economy of space when reversing a large sequence. To remind users + that it operates by side-effect, it returns "None". +''', + 'unary': r'''Unary arithmetic and bitwise operations +*************************************** + +All unary arithmetic and bitwise operations have the same priority: + + u_expr: power | "-" u_expr | "+" u_expr | "~" u_expr + +The unary "-" (minus) operator yields the negation of its numeric +argument; the operation can be overridden with the "__neg__()" special +method. + +The unary "+" (plus) operator yields its numeric argument unchanged; +the operation can be overridden with the "__pos__()" special method. + +The unary "~" (invert) operator yields the bitwise inversion of its +integer argument. The bitwise inversion of "x" is defined as +"-(x+1)". It only applies to integral numbers or to custom objects +that override the "__invert__()" special method. + +In all three cases, if the argument does not have the proper type, a +"TypeError" exception is raised. +''', + 'while': r'''The "while" statement +********************* + +The "while" statement is used for repeated execution as long as an +expression is true: + + while_stmt: "while" assignment_expression ":" suite + ["else" ":" suite] + +This repeatedly tests the expression and, if it is true, executes the +first suite; if the expression is false (which may be the first time +it is tested) the suite of the "else" clause, if present, is executed +and the loop terminates. + +A "break" statement executed in the first suite terminates the loop +without executing the "else" clause’s suite. A "continue" statement +executed in the first suite skips the rest of the suite and goes back +to testing the expression. +''', + 'with': r'''The "with" statement +******************** + +The "with" statement is used to wrap the execution of a block with +methods defined by a context manager (see section With Statement +Context Managers). This allows common "try"…"except"…"finally" usage +patterns to be encapsulated for convenient reuse. + + with_stmt: "with" ( "(" with_stmt_contents ","? ")" | with_stmt_contents ) ":" suite + with_stmt_contents: with_item ("," with_item)* + with_item: expression ["as" target] + +The execution of the "with" statement with one “item” proceeds as +follows: + +1. The context expression (the expression given in the "with_item") is + evaluated to obtain a context manager. + +2. The context manager’s "__enter__()" is loaded for later use. + +3. The context manager’s "__exit__()" is loaded for later use. + +4. The context manager’s "__enter__()" method is invoked. + +5. If a target was included in the "with" statement, the return value + from "__enter__()" is assigned to it. + + Note: + + The "with" statement guarantees that if the "__enter__()" method + returns without an error, then "__exit__()" will always be + called. Thus, if an error occurs during the assignment to the + target list, it will be treated the same as an error occurring + within the suite would be. See step 7 below. + +6. The suite is executed. + +7. The context manager’s "__exit__()" method is invoked. If an + exception caused the suite to be exited, its type, value, and + traceback are passed as arguments to "__exit__()". Otherwise, three + "None" arguments are supplied. + + If the suite was exited due to an exception, and the return value + from the "__exit__()" method was false, the exception is reraised. + If the return value was true, the exception is suppressed, and + execution continues with the statement following the "with" + statement. + + If the suite was exited for any reason other than an exception, the + return value from "__exit__()" is ignored, and execution proceeds + at the normal location for the kind of exit that was taken. + +The following code: + + with EXPRESSION as TARGET: + SUITE + +is semantically equivalent to: + + manager = (EXPRESSION) + enter = manager.__enter__ + exit = manager.__exit__ + value = enter() + hit_except = False + + try: + TARGET = value + SUITE + except: + hit_except = True + if not exit(*sys.exc_info()): + raise + finally: + if not hit_except: + exit(None, None, None) + +except that implicit special method lookup is used for "__enter__()" +and "__exit__()". + +With more than one item, the context managers are processed as if +multiple "with" statements were nested: + + with A() as a, B() as b: + SUITE + +is semantically equivalent to: + + with A() as a: + with B() as b: + SUITE + +You can also write multi-item context managers in multiple lines if +the items are surrounded by parentheses. For example: + + with ( + A() as a, + B() as b, + ): + SUITE + +Changed in version 3.1: Support for multiple context expressions. + +Changed in version 3.10: Support for using grouping parentheses to +break the statement in multiple lines. + +See also: + + **PEP 343** - The “with” statement + The specification, background, and examples for the Python "with" + statement. +''', + 'yield': r'''The "yield" statement +********************* + + yield_stmt: yield_expression + +A "yield" statement is semantically equivalent to a yield expression. +The "yield" statement can be used to omit the parentheses that would +otherwise be required in the equivalent yield expression statement. +For example, the yield statements + + yield + yield from + +are equivalent to the yield expression statements + + (yield ) + (yield from ) + +Yield expressions and statements are only used when defining a +*generator* function, and are only used in the body of the generator +function. Using "yield" in a function definition is sufficient to +cause that definition to create a generator function instead of a +normal function. + +For full details of "yield" semantics, refer to the Yield expressions +section. +''', +} diff --git a/Python313_13_x86_Template/Lib/queue.py b/Python314_4_x86_Template/Lib/queue.py similarity index 100% rename from Python313_13_x86_Template/Lib/queue.py rename to Python314_4_x86_Template/Lib/queue.py diff --git a/Python314_4_x86_Template/Lib/quopri.py b/Python314_4_x86_Template/Lib/quopri.py new file mode 100644 index 00000000..129fd2f5 --- /dev/null +++ b/Python314_4_x86_Template/Lib/quopri.py @@ -0,0 +1,235 @@ +"""Conversions to/from quoted-printable transport encoding as per RFC 1521.""" + +# (Dec 1991 version). + +__all__ = ["encode", "decode", "encodestring", "decodestring"] + +ESCAPE = b'=' +MAXLINESIZE = 76 +HEX = b'0123456789ABCDEF' +EMPTYSTRING = b'' + +try: + from binascii import a2b_qp, b2a_qp +except ImportError: + a2b_qp = None + b2a_qp = None + + +def needsquoting(c, quotetabs, header): + """Decide whether a particular byte ordinal needs to be quoted. + + The 'quotetabs' flag indicates whether embedded tabs and spaces should be + quoted. Note that line-ending tabs and spaces are always encoded, as per + RFC 1521. + """ + assert isinstance(c, bytes) + if c in b' \t': + return quotetabs + # if header, we have to escape _ because _ is used to escape space + if c == b'_': + return header + return c == ESCAPE or not (b' ' <= c <= b'~') + +def quote(c): + """Quote a single character.""" + assert isinstance(c, bytes) and len(c)==1 + c = ord(c) + return ESCAPE + bytes((HEX[c//16], HEX[c%16])) + + + +def encode(input, output, quotetabs, header=False): + """Read 'input', apply quoted-printable encoding, and write to 'output'. + + 'input' and 'output' are binary file objects. The 'quotetabs' flag + indicates whether embedded tabs and spaces should be quoted. Note that + line-ending tabs and spaces are always encoded, as per RFC 1521. + The 'header' flag indicates whether we are encoding spaces as _ as per RFC + 1522.""" + + if b2a_qp is not None: + data = input.read() + odata = b2a_qp(data, quotetabs=quotetabs, header=header) + output.write(odata) + return + + def write(s, output=output, lineEnd=b'\n'): + # RFC 1521 requires that the line ending in a space or tab must have + # that trailing character encoded. + if s and s[-1:] in b' \t': + output.write(s[:-1] + quote(s[-1:]) + lineEnd) + elif s == b'.': + output.write(quote(s) + lineEnd) + else: + output.write(s + lineEnd) + + prevline = None + while line := input.readline(): + outline = [] + # Strip off any readline induced trailing newline + stripped = b'' + if line[-1:] == b'\n': + line = line[:-1] + stripped = b'\n' + # Calculate the un-length-limited encoded line + for c in line: + c = bytes((c,)) + if needsquoting(c, quotetabs, header): + c = quote(c) + if header and c == b' ': + outline.append(b'_') + else: + outline.append(c) + # First, write out the previous line + if prevline is not None: + write(prevline) + # Now see if we need any soft line breaks because of RFC-imposed + # length limitations. Then do the thisline->prevline dance. + thisline = EMPTYSTRING.join(outline) + while len(thisline) > MAXLINESIZE: + # Don't forget to include the soft line break `=' sign in the + # length calculation! + write(thisline[:MAXLINESIZE-1], lineEnd=b'=\n') + thisline = thisline[MAXLINESIZE-1:] + # Write out the current line + prevline = thisline + # Write out the last line, without a trailing newline + if prevline is not None: + write(prevline, lineEnd=stripped) + +def encodestring(s, quotetabs=False, header=False): + if b2a_qp is not None: + return b2a_qp(s, quotetabs=quotetabs, header=header) + from io import BytesIO + infp = BytesIO(s) + outfp = BytesIO() + encode(infp, outfp, quotetabs, header) + return outfp.getvalue() + + + +def decode(input, output, header=False): + """Read 'input', apply quoted-printable decoding, and write to 'output'. + 'input' and 'output' are binary file objects. + If 'header' is true, decode underscore as space (per RFC 1522).""" + + if a2b_qp is not None: + data = input.read() + odata = a2b_qp(data, header=header) + output.write(odata) + return + + new = b'' + while line := input.readline(): + i, n = 0, len(line) + if n > 0 and line[n-1:n] == b'\n': + partial = 0; n = n-1 + # Strip trailing whitespace + while n > 0 and line[n-1:n] in b" \t\r": + n = n-1 + else: + partial = 1 + while i < n: + c = line[i:i+1] + if c == b'_' and header: + new = new + b' '; i = i+1 + elif c != ESCAPE: + new = new + c; i = i+1 + elif i+1 == n and not partial: + partial = 1; break + elif i+1 < n and line[i+1:i+2] == ESCAPE: + new = new + ESCAPE; i = i+2 + elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]): + new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3 + else: # Bad escape sequence -- leave it in + new = new + c; i = i+1 + if not partial: + output.write(new + b'\n') + new = b'' + if new: + output.write(new) + +def decodestring(s, header=False): + if a2b_qp is not None: + return a2b_qp(s, header=header) + from io import BytesIO + infp = BytesIO(s) + outfp = BytesIO() + decode(infp, outfp, header=header) + return outfp.getvalue() + + + +# Other helper functions +def ishex(c): + """Return true if the byte ordinal 'c' is a hexadecimal digit in ASCII.""" + assert isinstance(c, bytes) + return b'0' <= c <= b'9' or b'a' <= c <= b'f' or b'A' <= c <= b'F' + +def unhex(s): + """Get the integer value of a hexadecimal number.""" + bits = 0 + for c in s: + c = bytes((c,)) + if b'0' <= c <= b'9': + i = ord('0') + elif b'a' <= c <= b'f': + i = ord('a')-10 + elif b'A' <= c <= b'F': + i = ord(b'A')-10 + else: + assert False, "non-hex digit "+repr(c) + bits = bits*16 + (ord(c) - i) + return bits + + + +def main(): + import sys + import getopt + try: + opts, args = getopt.getopt(sys.argv[1:], 'td') + except getopt.error as msg: + sys.stdout = sys.stderr + print(msg) + print("usage: quopri [-t | -d] [file] ...") + print("-t: quote tabs") + print("-d: decode; default encode") + sys.exit(2) + deco = False + tabs = False + for o, a in opts: + if o == '-t': tabs = True + if o == '-d': deco = True + if tabs and deco: + sys.stdout = sys.stderr + print("-t and -d are mutually exclusive") + sys.exit(2) + if not args: args = ['-'] + sts = 0 + for file in args: + if file == '-': + fp = sys.stdin.buffer + else: + try: + fp = open(file, "rb") + except OSError as msg: + sys.stderr.write("%s: can't open (%s)\n" % (file, msg)) + sts = 1 + continue + try: + if deco: + decode(fp, sys.stdout.buffer) + else: + encode(fp, sys.stdout.buffer, tabs) + finally: + if file != '-': + fp.close() + if sts: + sys.exit(sts) + + + +if __name__ == '__main__': + main() diff --git a/Python314_4_x86_Template/Lib/random.py b/Python314_4_x86_Template/Lib/random.py new file mode 100644 index 00000000..86d562f0 --- /dev/null +++ b/Python314_4_x86_Template/Lib/random.py @@ -0,0 +1,1078 @@ +"""Random variable generators. + + bytes + ----- + uniform bytes (values between 0 and 255) + + integers + -------- + uniform within range + + sequences + --------- + pick random element + pick random sample + pick weighted random sample + generate random permutation + + distributions on the real line: + ------------------------------ + uniform + triangular + normal (Gaussian) + lognormal + negative exponential + gamma + beta + pareto + Weibull + + distributions on the circle (angles 0 to 2pi) + --------------------------------------------- + circular uniform + von Mises + + discrete distributions + ---------------------- + binomial + + +General notes on the underlying Mersenne Twister core generator: + +* The period is 2**19937-1. +* It is one of the most extensively tested generators in existence. +* The random() method is implemented in C, executes in a single Python step, + and is, therefore, threadsafe. + +""" + +# Translated by Guido van Rossum from C source provided by +# Adrian Baddeley. Adapted by Raymond Hettinger for use with +# the Mersenne Twister and os.urandom() core generators. + +from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil +from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin +from math import tau as TWOPI, floor as _floor, isfinite as _isfinite +from math import lgamma as _lgamma, fabs as _fabs, log2 as _log2 +from os import urandom as _urandom +from _collections_abc import Sequence as _Sequence +from operator import index as _index +from itertools import accumulate as _accumulate, repeat as _repeat +from bisect import bisect as _bisect +import os as _os +import _random + +__all__ = [ + "Random", + "SystemRandom", + "betavariate", + "binomialvariate", + "choice", + "choices", + "expovariate", + "gammavariate", + "gauss", + "getrandbits", + "getstate", + "lognormvariate", + "normalvariate", + "paretovariate", + "randbytes", + "randint", + "random", + "randrange", + "sample", + "seed", + "setstate", + "shuffle", + "triangular", + "uniform", + "vonmisesvariate", + "weibullvariate", +] + +NV_MAGICCONST = 4 * _exp(-0.5) / _sqrt(2.0) +LOG4 = _log(4.0) +SG_MAGICCONST = 1.0 + _log(4.5) +BPF = 53 # Number of bits in a float +RECIP_BPF = 2 ** -BPF +_ONE = 1 +_sha512 = None + + +class Random(_random.Random): + """Random number generator base class used by bound module functions. + + Used to instantiate instances of Random to get generators that don't + share state. + + Class Random can also be subclassed if you want to use a different basic + generator of your own devising: in that case, override the following + methods: random(), seed(), getstate(), and setstate(). + Optionally, implement a getrandbits() method so that randrange() + can cover arbitrarily large ranges. + + """ + + VERSION = 3 # used by getstate/setstate + + def __init__(self, x=None): + """Initialize an instance. + + Optional argument x controls seeding, as for Random.seed(). + """ + + self.seed(x) + self.gauss_next = None + + def seed(self, a=None, version=2): + """Initialize internal state from a seed. + + The only supported seed types are None, int, float, + str, bytes, and bytearray. + + None or no argument seeds from current time or from an operating + system specific randomness source if available. + + If *a* is an int, all bits are used. + + For version 2 (the default), all of the bits are used if *a* is a str, + bytes, or bytearray. For version 1 (provided for reproducing random + sequences from older versions of Python), the algorithm for str and + bytes generates a narrower range of seeds. + + """ + + if version == 1 and isinstance(a, (str, bytes)): + a = a.decode('latin-1') if isinstance(a, bytes) else a + x = ord(a[0]) << 7 if a else 0 + for c in map(ord, a): + x = ((1000003 * x) ^ c) & 0xFFFFFFFFFFFFFFFF + x ^= len(a) + a = -2 if x == -1 else x + + elif version == 2 and isinstance(a, (str, bytes, bytearray)): + global _sha512 + if _sha512 is None: + try: + # hashlib is pretty heavy to load, try lean internal + # module first + from _sha2 import sha512 as _sha512 + except ImportError: + # fallback to official implementation + from hashlib import sha512 as _sha512 + + if isinstance(a, str): + a = a.encode() + a = int.from_bytes(a + _sha512(a).digest()) + + elif not isinstance(a, (type(None), int, float, str, bytes, bytearray)): + raise TypeError('The only supported seed types are:\n' + 'None, int, float, str, bytes, and bytearray.') + + super().seed(a) + self.gauss_next = None + + def getstate(self): + """Return internal state; can be passed to setstate() later.""" + return self.VERSION, super().getstate(), self.gauss_next + + def setstate(self, state): + """Restore internal state from object returned by getstate().""" + version = state[0] + if version == 3: + version, internalstate, self.gauss_next = state + super().setstate(internalstate) + elif version == 2: + version, internalstate, self.gauss_next = state + # In version 2, the state was saved as signed ints, which causes + # inconsistencies between 32/64-bit systems. The state is + # really unsigned 32-bit ints, so we convert negative ints from + # version 2 to positive longs for version 3. + try: + internalstate = tuple(x % (2 ** 32) for x in internalstate) + except ValueError as e: + raise TypeError from e + super().setstate(internalstate) + else: + raise ValueError("state with version %s passed to " + "Random.setstate() of version %s" % + (version, self.VERSION)) + + + ## ------------------------------------------------------- + ## ---- Methods below this point do not need to be overridden or extended + ## ---- when subclassing for the purpose of using a different core generator. + + + ## -------------------- pickle support ------------------- + + # Issue 17489: Since __reduce__ was defined to fix #759889 this is no + # longer called; we leave it here because it has been here since random was + # rewritten back in 2001 and why risk breaking something. + def __getstate__(self): # for pickle + return self.getstate() + + def __setstate__(self, state): # for pickle + self.setstate(state) + + def __reduce__(self): + return self.__class__, (), self.getstate() + + + ## ---- internal support method for evenly distributed integers ---- + + def __init_subclass__(cls, /, **kwargs): + """Control how subclasses generate random integers. + + The algorithm a subclass can use depends on the random() and/or + getrandbits() implementation available to it and determines + whether it can generate random integers from arbitrarily large + ranges. + """ + + for c in cls.__mro__: + if '_randbelow' in c.__dict__: + # just inherit it + break + if 'getrandbits' in c.__dict__: + cls._randbelow = cls._randbelow_with_getrandbits + break + if 'random' in c.__dict__: + cls._randbelow = cls._randbelow_without_getrandbits + break + + def _randbelow_with_getrandbits(self, n): + "Return a random int in the range [0,n). Defined for n > 0." + + k = n.bit_length() + r = self.getrandbits(k) # 0 <= r < 2**k + while r >= n: + r = self.getrandbits(k) + return r + + def _randbelow_without_getrandbits(self, n, maxsize=1< 0. + + The implementation does not use getrandbits, but only random. + """ + + random = self.random + if n >= maxsize: + from warnings import warn + warn("Underlying random() generator does not supply \n" + "enough bits to choose from a population range this large.\n" + "To remove the range limitation, add a getrandbits() method.") + return _floor(random() * n) + rem = maxsize % n + limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0 + r = random() + while r >= limit: + r = random() + return _floor(r * maxsize) % n + + _randbelow = _randbelow_with_getrandbits + + + ## -------------------------------------------------------- + ## ---- Methods below this point generate custom distributions + ## ---- based on the methods defined above. They do not + ## ---- directly touch the underlying generator and only + ## ---- access randomness through the methods: random(), + ## ---- getrandbits(), or _randbelow(). + + + ## -------------------- bytes methods --------------------- + + def randbytes(self, n): + """Generate n random bytes.""" + return self.getrandbits(n * 8).to_bytes(n, 'little') + + + ## -------------------- integer methods ------------------- + + def randrange(self, start, stop=None, step=_ONE): + """Choose a random item from range(stop) or range(start, stop[, step]). + + Roughly equivalent to ``choice(range(start, stop, step))`` but + supports arbitrarily large ranges and is optimized for common cases. + + """ + + # This code is a bit messy to make it fast for the + # common case while still doing adequate error checking. + istart = _index(start) + if stop is None: + # We don't check for "step != 1" because it hasn't been + # type checked and converted to an integer yet. + if step is not _ONE: + raise TypeError("Missing a non-None stop argument") + if istart > 0: + return self._randbelow(istart) + raise ValueError("empty range for randrange()") + + # Stop argument supplied. + istop = _index(stop) + width = istop - istart + istep = _index(step) + # Fast path. + if istep == 1: + if width > 0: + return istart + self._randbelow(width) + raise ValueError(f"empty range in randrange({start}, {stop})") + + # Non-unit step argument supplied. + if istep > 0: + n = (width + istep - 1) // istep + elif istep < 0: + n = (width + istep + 1) // istep + else: + raise ValueError("zero step for randrange()") + if n <= 0: + raise ValueError(f"empty range in randrange({start}, {stop}, {step})") + return istart + istep * self._randbelow(n) + + def randint(self, a, b): + """Return random integer in range [a, b], including both end points. + """ + a = _index(a) + b = _index(b) + if b < a: + raise ValueError(f"empty range in randint({a}, {b})") + return a + self._randbelow(b - a + 1) + + + ## -------------------- sequence methods ------------------- + + def choice(self, seq): + """Choose a random element from a non-empty sequence.""" + + # As an accommodation for NumPy, we don't use "if not seq" + # because bool(numpy.array()) raises a ValueError. + if not len(seq): + raise IndexError('Cannot choose from an empty sequence') + return seq[self._randbelow(len(seq))] + + def shuffle(self, x): + """Shuffle list x in place, and return None.""" + + randbelow = self._randbelow + for i in reversed(range(1, len(x))): + # pick an element in x[:i+1] with which to exchange x[i] + j = randbelow(i + 1) + x[i], x[j] = x[j], x[i] + + def sample(self, population, k, *, counts=None): + """Chooses k unique random elements from a population sequence. + + Returns a new list containing elements from the population while + leaving the original population unchanged. The resulting list is + in selection order so that all sub-slices will also be valid random + samples. This allows raffle winners (the sample) to be partitioned + into grand prize and second place winners (the subslices). + + Members of the population need not be hashable or unique. If the + population contains repeats, then each occurrence is a possible + selection in the sample. + + Repeated elements can be specified one at a time or with the optional + counts parameter. For example: + + sample(['red', 'blue'], counts=[4, 2], k=5) + + is equivalent to: + + sample(['red', 'red', 'red', 'red', 'blue', 'blue'], k=5) + + To choose a sample from a range of integers, use range() for the + population argument. This is especially fast and space efficient + for sampling from a large population: + + sample(range(10000000), 60) + + """ + + # Sampling without replacement entails tracking either potential + # selections (the pool) in a list or previous selections in a set. + + # When the number of selections is small compared to the + # population, then tracking selections is efficient, requiring + # only a small set and an occasional reselection. For + # a larger number of selections, the pool tracking method is + # preferred since the list takes less space than the + # set and it doesn't suffer from frequent reselections. + + # The number of calls to _randbelow() is kept at or near k, the + # theoretical minimum. This is important because running time + # is dominated by _randbelow() and because it extracts the + # least entropy from the underlying random number generators. + + # Memory requirements are kept to the smaller of a k-length + # set or an n-length list. + + # There are other sampling algorithms that do not require + # auxiliary memory, but they were rejected because they made + # too many calls to _randbelow(), making them slower and + # causing them to eat more entropy than necessary. + + if not isinstance(population, _Sequence): + raise TypeError("Population must be a sequence. " + "For dicts or sets, use sorted(d).") + n = len(population) + if counts is not None: + cum_counts = list(_accumulate(counts)) + if len(cum_counts) != n: + raise ValueError('The number of counts does not match the population') + total = cum_counts.pop() if cum_counts else 0 + if not isinstance(total, int): + raise TypeError('Counts must be integers') + if total < 0: + raise ValueError('Counts must be non-negative') + selections = self.sample(range(total), k=k) + bisect = _bisect + return [population[bisect(cum_counts, s)] for s in selections] + randbelow = self._randbelow + if not 0 <= k <= n: + raise ValueError("Sample larger than population or is negative") + result = [None] * k + setsize = 21 # size of a small set minus size of an empty list + if k > 5: + setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets + if n <= setsize: + # An n-length list is smaller than a k-length set. + # Invariant: non-selected at pool[0 : n-i] + pool = list(population) + for i in range(k): + j = randbelow(n - i) + result[i] = pool[j] + pool[j] = pool[n - i - 1] # move non-selected item into vacancy + else: + selected = set() + selected_add = selected.add + for i in range(k): + j = randbelow(n) + while j in selected: + j = randbelow(n) + selected_add(j) + result[i] = population[j] + return result + + def choices(self, population, weights=None, *, cum_weights=None, k=1): + """Return a k sized list of population elements chosen with replacement. + + If the relative weights or cumulative weights are not specified, + the selections are made with equal probability. + + """ + random = self.random + n = len(population) + if cum_weights is None: + if weights is None: + floor = _floor + n += 0.0 # convert to float for a small speed improvement + return [population[floor(random() * n)] for i in _repeat(None, k)] + try: + cum_weights = list(_accumulate(weights)) + except TypeError: + if not isinstance(weights, int): + raise + k = weights + raise TypeError( + f'The number of choices must be a keyword argument: {k=}' + ) from None + elif weights is not None: + raise TypeError('Cannot specify both weights and cumulative weights') + if len(cum_weights) != n: + raise ValueError('The number of weights does not match the population') + total = cum_weights[-1] + 0.0 # convert to float + if total <= 0.0: + raise ValueError('Total of weights must be greater than zero') + if not _isfinite(total): + raise ValueError('Total of weights must be finite') + bisect = _bisect + hi = n - 1 + return [population[bisect(cum_weights, random() * total, 0, hi)] + for i in _repeat(None, k)] + + + ## -------------------- real-valued distributions ------------------- + + def uniform(self, a, b): + """Get a random number in the range [a, b) or [a, b] depending on rounding. + + The mean (expected value) and variance of the random variable are: + + E[X] = (a + b) / 2 + Var[X] = (b - a) ** 2 / 12 + + """ + return a + (b - a) * self.random() + + def triangular(self, low=0.0, high=1.0, mode=None): + """Triangular distribution. + + Continuous distribution bounded by given lower and upper limits, + and having a given mode value in-between. + + http://en.wikipedia.org/wiki/Triangular_distribution + + The mean (expected value) and variance of the random variable are: + + E[X] = (low + high + mode) / 3 + Var[X] = (low**2 + high**2 + mode**2 - low*high - low*mode - high*mode) / 18 + + """ + u = self.random() + try: + c = 0.5 if mode is None else (mode - low) / (high - low) + except ZeroDivisionError: + return low + if u > c: + u = 1.0 - u + c = 1.0 - c + low, high = high, low + return low + (high - low) * _sqrt(u * c) + + def normalvariate(self, mu=0.0, sigma=1.0): + """Normal distribution. + + mu is the mean, and sigma is the standard deviation. + + """ + # Uses Kinderman and Monahan method. Reference: Kinderman, + # A.J. and Monahan, J.F., "Computer generation of random + # variables using the ratio of uniform deviates", ACM Trans + # Math Software, 3, (1977), pp257-260. + + random = self.random + while True: + u1 = random() + u2 = 1.0 - random() + z = NV_MAGICCONST * (u1 - 0.5) / u2 + zz = z * z / 4.0 + if zz <= -_log(u2): + break + return mu + z * sigma + + def gauss(self, mu=0.0, sigma=1.0): + """Gaussian distribution. + + mu is the mean, and sigma is the standard deviation. This is + slightly faster than the normalvariate() function. + + Not thread-safe without a lock around calls. + + """ + # When x and y are two variables from [0, 1), uniformly + # distributed, then + # + # cos(2*pi*x)*sqrt(-2*log(1-y)) + # sin(2*pi*x)*sqrt(-2*log(1-y)) + # + # are two *independent* variables with normal distribution + # (mu = 0, sigma = 1). + # (Lambert Meertens) + # (corrected version; bug discovered by Mike Miller, fixed by LM) + + # Multithreading note: When two threads call this function + # simultaneously, it is possible that they will receive the + # same return value. The window is very small though. To + # avoid this, you have to use a lock around all calls. (I + # didn't want to slow this down in the serial case by using a + # lock here.) + + random = self.random + z = self.gauss_next + self.gauss_next = None + if z is None: + x2pi = random() * TWOPI + g2rad = _sqrt(-2.0 * _log(1.0 - random())) + z = _cos(x2pi) * g2rad + self.gauss_next = _sin(x2pi) * g2rad + + return mu + z * sigma + + def lognormvariate(self, mu, sigma): + """Log normal distribution. + + If you take the natural logarithm of this distribution, you'll get a + normal distribution with mean mu and standard deviation sigma. + mu can have any value, and sigma must be greater than zero. + + """ + return _exp(self.normalvariate(mu, sigma)) + + def expovariate(self, lambd=1.0): + """Exponential distribution. + + lambd is 1.0 divided by the desired mean. It should be + nonzero. (The parameter would be called "lambda", but that is + a reserved word in Python.) Returned values range from 0 to + positive infinity if lambd is positive, and from negative + infinity to 0 if lambd is negative. + + The mean (expected value) and variance of the random variable are: + + E[X] = 1 / lambd + Var[X] = 1 / lambd ** 2 + + """ + # we use 1-random() instead of random() to preclude the + # possibility of taking the log of zero. + + return -_log(1.0 - self.random()) / lambd + + def vonmisesvariate(self, mu, kappa): + """Circular data distribution. + + mu is the mean angle, expressed in radians between 0 and 2*pi, and + kappa is the concentration parameter, which must be greater than or + equal to zero. If kappa is equal to zero, this distribution reduces + to a uniform random angle over the range 0 to 2*pi. + + """ + # Based upon an algorithm published in: Fisher, N.I., + # "Statistical Analysis of Circular Data", Cambridge + # University Press, 1993. + + # Thanks to Magnus Kessler for a correction to the + # implementation of step 4. + + random = self.random + if kappa <= 1e-6: + return TWOPI * random() + + s = 0.5 / kappa + r = s + _sqrt(1.0 + s * s) + + while True: + u1 = random() + z = _cos(_pi * u1) + + d = z / (r + z) + u2 = random() + if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): + break + + q = 1.0 / r + f = (q + z) / (1.0 + q * z) + u3 = random() + if u3 > 0.5: + theta = (mu + _acos(f)) % TWOPI + else: + theta = (mu - _acos(f)) % TWOPI + + return theta + + def gammavariate(self, alpha, beta): + """Gamma distribution. Not the gamma function! + + Conditions on the parameters are alpha > 0 and beta > 0. + + The probability distribution function is: + + x ** (alpha - 1) * math.exp(-x / beta) + pdf(x) = -------------------------------------- + math.gamma(alpha) * beta ** alpha + + The mean (expected value) and variance of the random variable are: + + E[X] = alpha * beta + Var[X] = alpha * beta ** 2 + + """ + + # Warning: a few older sources define the gamma distribution in terms + # of alpha > -1.0 + if alpha <= 0.0 or beta <= 0.0: + raise ValueError('gammavariate: alpha and beta must be > 0.0') + + random = self.random + if alpha > 1.0: + + # Uses R.C.H. Cheng, "The generation of Gamma + # variables with non-integral shape parameters", + # Applied Statistics, (1977), 26, No. 1, p71-74 + + ainv = _sqrt(2.0 * alpha - 1.0) + bbb = alpha - LOG4 + ccc = alpha + ainv + + while True: + u1 = random() + if not 1e-7 < u1 < 0.9999999: + continue + u2 = 1.0 - random() + v = _log(u1 / (1.0 - u1)) / ainv + x = alpha * _exp(v) + z = u1 * u1 * u2 + r = bbb + ccc * v - x + if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z): + return x * beta + + elif alpha == 1.0: + # expovariate(1/beta) + return -_log(1.0 - random()) * beta + + else: + # alpha is between 0 and 1 (exclusive) + # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle + while True: + u = random() + b = (_e + alpha) / _e + p = b * u + if p <= 1.0: + x = p ** (1.0 / alpha) + else: + x = -_log((b - p) / alpha) + u1 = random() + if p > 1.0: + if u1 <= x ** (alpha - 1.0): + break + elif u1 <= _exp(-x): + break + return x * beta + + def betavariate(self, alpha, beta): + """Beta distribution. + + Conditions on the parameters are alpha > 0 and beta > 0. + Returned values range between 0 and 1. + + The mean (expected value) and variance of the random variable are: + + E[X] = alpha / (alpha + beta) + Var[X] = alpha * beta / ((alpha + beta)**2 * (alpha + beta + 1)) + + """ + ## See + ## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html + ## for Ivan Frohne's insightful analysis of why the original implementation: + ## + ## def betavariate(self, alpha, beta): + ## # Discrete Event Simulation in C, pp 87-88. + ## + ## y = self.expovariate(alpha) + ## z = self.expovariate(1.0/beta) + ## return z/(y+z) + ## + ## was dead wrong, and how it probably got that way. + + # This version due to Janne Sinkkonen, and matches all the std + # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution"). + y = self.gammavariate(alpha, 1.0) + if y: + return y / (y + self.gammavariate(beta, 1.0)) + return 0.0 + + def paretovariate(self, alpha): + """Pareto distribution. alpha is the shape parameter.""" + # Jain, pg. 495 + + u = 1.0 - self.random() + return u ** (-1.0 / alpha) + + def weibullvariate(self, alpha, beta): + """Weibull distribution. + + alpha is the scale parameter and beta is the shape parameter. + + """ + # Jain, pg. 499; bug fix courtesy Bill Arms + + u = 1.0 - self.random() + return alpha * (-_log(u)) ** (1.0 / beta) + + + ## -------------------- discrete distributions --------------------- + + def binomialvariate(self, n=1, p=0.5): + """Binomial random variable. + + Gives the number of successes for *n* independent trials + with the probability of success in each trial being *p*: + + sum(random() < p for i in range(n)) + + Returns an integer in the range: + + 0 <= X <= n + + The integer is chosen with the probability: + + P(X == k) = math.comb(n, k) * p ** k * (1 - p) ** (n - k) + + The mean (expected value) and variance of the random variable are: + + E[X] = n * p + Var[X] = n * p * (1 - p) + + """ + # Error check inputs and handle edge cases + if n < 0: + raise ValueError("n must be non-negative") + if p <= 0.0 or p >= 1.0: + if p == 0.0: + return 0 + if p == 1.0: + return n + raise ValueError("p must be in the range 0.0 <= p <= 1.0") + + random = self.random + + # Fast path for a common case + if n == 1: + return _index(random() < p) + + # Exploit symmetry to establish: p <= 0.5 + if p > 0.5: + return n - self.binomialvariate(n, 1.0 - p) + + if n * p < 10.0: + # BG: Geometric method by Devroye with running time of O(np). + # https://dl.acm.org/doi/pdf/10.1145/42372.42381 + x = y = 0 + c = _log2(1.0 - p) + if not c: + return x + while True: + y += _floor(_log2(random()) / c) + 1 + if y > n: + return x + x += 1 + + # BTRS: Transformed rejection with squeeze method by Wolfgang Hörmann + # https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.47.8407&rep=rep1&type=pdf + assert n*p >= 10.0 and p <= 0.5 + setup_complete = False + + spq = _sqrt(n * p * (1.0 - p)) # Standard deviation of the distribution + b = 1.15 + 2.53 * spq + a = -0.0873 + 0.0248 * b + 0.01 * p + c = n * p + 0.5 + vr = 0.92 - 4.2 / b + + while True: + + u = random() + u -= 0.5 + us = 0.5 - _fabs(u) + k = _floor((2.0 * a / us + b) * u + c) + if k < 0 or k > n: + continue + + # The early-out "squeeze" test substantially reduces + # the number of acceptance condition evaluations. + v = random() + if us >= 0.07 and v <= vr: + return k + + # Acceptance-rejection test. + # Note, the original paper erroneously omits the call to log(v) + # when comparing to the log of the rescaled binomial distribution. + if not setup_complete: + alpha = (2.83 + 5.1 / b) * spq + lpq = _log(p / (1.0 - p)) + m = _floor((n + 1) * p) # Mode of the distribution + h = _lgamma(m + 1) + _lgamma(n - m + 1) + setup_complete = True # Only needs to be done once + v *= alpha / (a / (us * us) + b) + if _log(v) <= h - _lgamma(k + 1) - _lgamma(n - k + 1) + (k - m) * lpq: + return k + + +## ------------------------------------------------------------------ +## --------------- Operating System Random Source ------------------ + + +class SystemRandom(Random): + """Alternate random number generator using sources provided + by the operating system (such as /dev/urandom on Unix or + CryptGenRandom on Windows). + + Not available on all systems (see os.urandom() for details). + + """ + + def random(self): + """Get the next random number in the range 0.0 <= X < 1.0.""" + return (int.from_bytes(_urandom(7)) >> 3) * RECIP_BPF + + def getrandbits(self, k): + """getrandbits(k) -> x. Generates an int with k random bits.""" + if k < 0: + raise ValueError('number of bits must be non-negative') + numbytes = (k + 7) // 8 # bits / 8 and rounded up + x = int.from_bytes(_urandom(numbytes)) + return x >> (numbytes * 8 - k) # trim excess bits + + def randbytes(self, n): + """Generate n random bytes.""" + # os.urandom(n) fails with ValueError for n < 0 + # and returns an empty bytes string for n == 0. + return _urandom(n) + + def seed(self, *args, **kwds): + "Stub method. Not used for a system random number generator." + return None + + def _notimplemented(self, *args, **kwds): + "Method should not be called for a system random number generator." + raise NotImplementedError('System entropy source does not have state.') + getstate = setstate = _notimplemented + + +# ---------------------------------------------------------------------- +# Create one instance, seeded from current time, and export its methods +# as module-level functions. The functions share state across all uses +# (both in the user's code and in the Python libraries), but that's fine +# for most programs and is easier for the casual user than making them +# instantiate their own Random() instance. + +_inst = Random() +seed = _inst.seed +random = _inst.random +uniform = _inst.uniform +triangular = _inst.triangular +randint = _inst.randint +choice = _inst.choice +randrange = _inst.randrange +sample = _inst.sample +shuffle = _inst.shuffle +choices = _inst.choices +normalvariate = _inst.normalvariate +lognormvariate = _inst.lognormvariate +expovariate = _inst.expovariate +vonmisesvariate = _inst.vonmisesvariate +gammavariate = _inst.gammavariate +gauss = _inst.gauss +betavariate = _inst.betavariate +binomialvariate = _inst.binomialvariate +paretovariate = _inst.paretovariate +weibullvariate = _inst.weibullvariate +getstate = _inst.getstate +setstate = _inst.setstate +getrandbits = _inst.getrandbits +randbytes = _inst.randbytes + + +## ------------------------------------------------------ +## ----------------- test program ----------------------- + +def _test_generator(n, func, args): + from statistics import stdev, fmean as mean + from time import perf_counter + + t0 = perf_counter() + data = [func(*args) for i in _repeat(None, n)] + t1 = perf_counter() + + xbar = mean(data) + sigma = stdev(data, xbar) + low = min(data) + high = max(data) + + print(f'{t1 - t0:.3f} sec, {n} times {func.__name__}{args!r}') + print('avg %g, stddev %g, min %g, max %g\n' % (xbar, sigma, low, high)) + + +def _test(N=10_000): + _test_generator(N, random, ()) + _test_generator(N, normalvariate, (0.0, 1.0)) + _test_generator(N, lognormvariate, (0.0, 1.0)) + _test_generator(N, vonmisesvariate, (0.0, 1.0)) + _test_generator(N, binomialvariate, (15, 0.60)) + _test_generator(N, binomialvariate, (100, 0.75)) + _test_generator(N, gammavariate, (0.01, 1.0)) + _test_generator(N, gammavariate, (0.1, 1.0)) + _test_generator(N, gammavariate, (0.1, 2.0)) + _test_generator(N, gammavariate, (0.5, 1.0)) + _test_generator(N, gammavariate, (0.9, 1.0)) + _test_generator(N, gammavariate, (1.0, 1.0)) + _test_generator(N, gammavariate, (2.0, 1.0)) + _test_generator(N, gammavariate, (20.0, 1.0)) + _test_generator(N, gammavariate, (200.0, 1.0)) + _test_generator(N, gauss, (0.0, 1.0)) + _test_generator(N, betavariate, (3.0, 3.0)) + _test_generator(N, triangular, (0.0, 1.0, 1.0 / 3.0)) + + +## ------------------------------------------------------ +## ------------------ fork support --------------------- + +if hasattr(_os, "fork"): + _os.register_at_fork(after_in_child=_inst.seed) + + +# ------------------------------------------------------ +# -------------- command-line interface ---------------- + + +def _parse_args(arg_list: list[str] | None): + import argparse + parser = argparse.ArgumentParser( + formatter_class=argparse.RawTextHelpFormatter, color=True) + group = parser.add_mutually_exclusive_group() + group.add_argument( + "-c", "--choice", nargs="+", + help="print a random choice") + group.add_argument( + "-i", "--integer", type=int, metavar="N", + help="print a random integer between 1 and N inclusive") + group.add_argument( + "-f", "--float", type=float, metavar="N", + help="print a random floating-point number between 0 and N inclusive") + group.add_argument( + "--test", type=int, const=10_000, nargs="?", + help=argparse.SUPPRESS) + parser.add_argument("input", nargs="*", + help="""\ +if no options given, output depends on the input + string or multiple: same as --choice + integer: same as --integer + float: same as --float""") + args = parser.parse_args(arg_list) + return args, parser.format_help() + + +def main(arg_list: list[str] | None = None) -> int | str: + args, help_text = _parse_args(arg_list) + + # Explicit arguments + if args.choice: + return choice(args.choice) + + if args.integer is not None: + return randint(1, args.integer) + + if args.float is not None: + return uniform(0, args.float) + + if args.test: + _test(args.test) + return "" + + # No explicit argument, select based on input + if len(args.input) == 1: + val = args.input[0] + try: + # Is it an integer? + val = int(val) + return randint(1, val) + except ValueError: + try: + # Is it a float? + val = float(val) + return uniform(0, val) + except ValueError: + # Split in case of space-separated string: "a b c" + return choice(val.split()) + + if len(args.input) >= 2: + return choice(args.input) + + return help_text + + +if __name__ == '__main__': + print(main()) diff --git a/Python314_4_x86_Template/Lib/re/__init__.py b/Python314_4_x86_Template/Lib/re/__init__.py new file mode 100644 index 00000000..af2808a7 --- /dev/null +++ b/Python314_4_x86_Template/Lib/re/__init__.py @@ -0,0 +1,428 @@ +# +# Secret Labs' Regular Expression Engine +# +# re-compatible interface for the sre matching engine +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# This version of the SRE library can be redistributed under CNRI's +# Python 1.6 license. For any other use, please contact Secret Labs +# AB (info@pythonware.com). +# +# Portions of this engine have been developed in cooperation with +# CNRI. Hewlett-Packard provided funding for 1.6 integration and +# other compatibility work. +# + +r"""Support for regular expressions (RE). + +This module provides regular expression matching operations similar to +those found in Perl. It supports both 8-bit and Unicode strings; both +the pattern and the strings being processed can contain null bytes and +characters outside the US ASCII range. + +Regular expressions can contain both special and ordinary characters. +Most ordinary characters, like "A", "a", or "0", are the simplest +regular expressions; they simply match themselves. You can +concatenate ordinary characters, so last matches the string 'last'. + +The special characters are: + "." Matches any character except a newline. + "^" Matches the start of the string. + "$" Matches the end of the string or just before the newline at + the end of the string. + "*" Matches 0 or more (greedy) repetitions of the preceding RE. + Greedy means that it will match as many repetitions as possible. + "+" Matches 1 or more (greedy) repetitions of the preceding RE. + "?" Matches 0 or 1 (greedy) of the preceding RE. + *?,+?,?? Non-greedy versions of the previous three special characters. + {m,n} Matches from m to n repetitions of the preceding RE. + {m,n}? Non-greedy version of the above. + "\\" Either escapes special characters or signals a special sequence. + [] Indicates a set of characters. + A "^" as the first character indicates a complementing set. + "|" A|B, creates an RE that will match either A or B. + (...) Matches the RE inside the parentheses. + The contents can be retrieved or matched later in the string. + (?aiLmsux) The letters set the corresponding flags defined below. + (?:...) Non-grouping version of regular parentheses. + (?P...) The substring matched by the group is accessible by name. + (?P=name) Matches the text matched earlier by the group named name. + (?#...) A comment; ignored. + (?=...) Matches if ... matches next, but doesn't consume the string. + (?!...) Matches if ... doesn't match next. + (?<=...) Matches if preceded by ... (must be fixed length). + (?= _MAXCACHE: + # Drop the least recently used item. + # next(iter(_cache)) is known to have linear amortized time, + # but it is used here to avoid a dependency from using OrderedDict. + # For the small _MAXCACHE value it doesn't make much of a difference. + try: + del _cache[next(iter(_cache))] + except (StopIteration, RuntimeError, KeyError): + pass + # Append to the end. + _cache[key] = p + + if len(_cache2) >= _MAXCACHE2: + # Drop the oldest item. + try: + del _cache2[next(iter(_cache2))] + except (StopIteration, RuntimeError, KeyError): + pass + _cache2[key] = p + return p + +@functools.lru_cache(_MAXCACHE) +def _compile_template(pattern, repl): + # internal: compile replacement pattern + return _sre.template(pattern, _parser.parse_template(repl, pattern)) + +# register myself for pickling + +import copyreg + +def _pickle(p): + return _compile, (p.pattern, p.flags) + +copyreg.pickle(Pattern, _pickle, _compile) + +# -------------------------------------------------------------------- +# experimental stuff (see python-dev discussions for details) + +class Scanner: + def __init__(self, lexicon, flags=0): + from ._constants import BRANCH, SUBPATTERN + if isinstance(flags, RegexFlag): + flags = flags.value + self.lexicon = lexicon + # combine phrases into a compound pattern + p = [] + s = _parser.State() + s.flags = flags + for phrase, action in lexicon: + gid = s.opengroup() + p.append(_parser.SubPattern(s, [ + (SUBPATTERN, (gid, 0, 0, _parser.parse(phrase, flags))), + ])) + s.closegroup(gid, p[-1]) + p = _parser.SubPattern(s, [(BRANCH, (None, p))]) + self.scanner = _compiler.compile(p) + def scan(self, string): + result = [] + append = result.append + match = self.scanner.scanner(string).match + i = 0 + while True: + m = match() + if not m: + break + j = m.end() + if i == j: + break + action = self.lexicon[m.lastindex-1][1] + if callable(action): + self.match = m + action = action(self, m.group()) + if action is not None: + append(action) + i = j + return result, string[i:] diff --git a/Python314_4_x86_Template/Lib/re/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/re/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..c116b7ce Binary files /dev/null and b/Python314_4_x86_Template/Lib/re/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/re/__pycache__/_casefix.cpython-314.pyc b/Python314_4_x86_Template/Lib/re/__pycache__/_casefix.cpython-314.pyc new file mode 100644 index 00000000..36a1f378 Binary files /dev/null and b/Python314_4_x86_Template/Lib/re/__pycache__/_casefix.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/re/__pycache__/_compiler.cpython-314.pyc b/Python314_4_x86_Template/Lib/re/__pycache__/_compiler.cpython-314.pyc new file mode 100644 index 00000000..ff4d7bba Binary files /dev/null and b/Python314_4_x86_Template/Lib/re/__pycache__/_compiler.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/re/__pycache__/_constants.cpython-314.pyc b/Python314_4_x86_Template/Lib/re/__pycache__/_constants.cpython-314.pyc new file mode 100644 index 00000000..79aae925 Binary files /dev/null and b/Python314_4_x86_Template/Lib/re/__pycache__/_constants.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/re/__pycache__/_parser.cpython-314.pyc b/Python314_4_x86_Template/Lib/re/__pycache__/_parser.cpython-314.pyc new file mode 100644 index 00000000..cc6aaf8a Binary files /dev/null and b/Python314_4_x86_Template/Lib/re/__pycache__/_parser.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/re/_casefix.py b/Python314_4_x86_Template/Lib/re/_casefix.py similarity index 100% rename from Python313_13_x86_Template/Lib/re/_casefix.py rename to Python314_4_x86_Template/Lib/re/_casefix.py diff --git a/Python314_4_x86_Template/Lib/re/_compiler.py b/Python314_4_x86_Template/Lib/re/_compiler.py new file mode 100644 index 00000000..20dd561d --- /dev/null +++ b/Python314_4_x86_Template/Lib/re/_compiler.py @@ -0,0 +1,782 @@ +# +# Secret Labs' Regular Expression Engine +# +# convert template to internal format +# +# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. +# +# See the __init__.py file for information on usage and redistribution. +# + +"""Internal support module for sre""" + +import _sre +from . import _parser +from ._constants import * +from ._casefix import _EXTRA_CASES + +assert _sre.MAGIC == MAGIC, "SRE module mismatch" + +_LITERAL_CODES = {LITERAL, NOT_LITERAL} +_SUCCESS_CODES = {SUCCESS, FAILURE} +_ASSERT_CODES = {ASSERT, ASSERT_NOT} +_UNIT_CODES = _LITERAL_CODES | {ANY, IN} + +_REPEATING_CODES = { + MIN_REPEAT: (REPEAT, MIN_UNTIL, MIN_REPEAT_ONE), + MAX_REPEAT: (REPEAT, MAX_UNTIL, REPEAT_ONE), + POSSESSIVE_REPEAT: (POSSESSIVE_REPEAT, SUCCESS, POSSESSIVE_REPEAT_ONE), +} + +_CHARSET_ALL = [(NEGATE, None)] + +def _combine_flags(flags, add_flags, del_flags, + TYPE_FLAGS=_parser.TYPE_FLAGS): + if add_flags & TYPE_FLAGS: + flags &= ~TYPE_FLAGS + return (flags | add_flags) & ~del_flags + +def _compile(code, pattern, flags): + # internal: compile a (sub)pattern + emit = code.append + _len = len + LITERAL_CODES = _LITERAL_CODES + REPEATING_CODES = _REPEATING_CODES + SUCCESS_CODES = _SUCCESS_CODES + ASSERT_CODES = _ASSERT_CODES + iscased = None + tolower = None + fixes = None + if flags & SRE_FLAG_IGNORECASE and not flags & SRE_FLAG_LOCALE: + if flags & SRE_FLAG_UNICODE: + iscased = _sre.unicode_iscased + tolower = _sre.unicode_tolower + fixes = _EXTRA_CASES + else: + iscased = _sre.ascii_iscased + tolower = _sre.ascii_tolower + for op, av in pattern: + if op in LITERAL_CODES: + if not flags & SRE_FLAG_IGNORECASE: + emit(op) + emit(av) + elif flags & SRE_FLAG_LOCALE: + emit(OP_LOCALE_IGNORE[op]) + emit(av) + elif not iscased(av): + emit(op) + emit(av) + else: + lo = tolower(av) + if not fixes: # ascii + emit(OP_IGNORE[op]) + emit(lo) + elif lo not in fixes: + emit(OP_UNICODE_IGNORE[op]) + emit(lo) + else: + emit(IN_UNI_IGNORE) + skip = _len(code); emit(0) + if op is NOT_LITERAL: + emit(NEGATE) + for k in (lo,) + fixes[lo]: + emit(LITERAL) + emit(k) + emit(FAILURE) + code[skip] = _len(code) - skip + elif op is IN: + charset, hascased = _optimize_charset(av, iscased, tolower, fixes) + if not charset: + emit(FAILURE) + elif charset == _CHARSET_ALL: + emit(ANY_ALL) + else: + if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE: + emit(IN_LOC_IGNORE) + elif not hascased: + emit(IN) + elif not fixes: # ascii + emit(IN_IGNORE) + else: + emit(IN_UNI_IGNORE) + skip = _len(code); emit(0) + _compile_charset(charset, flags, code) + code[skip] = _len(code) - skip + elif op is ANY: + if flags & SRE_FLAG_DOTALL: + emit(ANY_ALL) + else: + emit(ANY) + elif op in REPEATING_CODES: + if _simple(av[2]): + emit(REPEATING_CODES[op][2]) + skip = _len(code); emit(0) + emit(av[0]) + emit(av[1]) + _compile(code, av[2], flags) + emit(SUCCESS) + code[skip] = _len(code) - skip + else: + emit(REPEATING_CODES[op][0]) + skip = _len(code); emit(0) + emit(av[0]) + emit(av[1]) + _compile(code, av[2], flags) + code[skip] = _len(code) - skip + emit(REPEATING_CODES[op][1]) + elif op is SUBPATTERN: + group, add_flags, del_flags, p = av + if group: + emit(MARK) + emit((group-1)*2) + # _compile_info(code, p, _combine_flags(flags, add_flags, del_flags)) + _compile(code, p, _combine_flags(flags, add_flags, del_flags)) + if group: + emit(MARK) + emit((group-1)*2+1) + elif op is ATOMIC_GROUP: + # Atomic Groups are handled by starting with an Atomic + # Group op code, then putting in the atomic group pattern + # and finally a success op code to tell any repeat + # operations within the Atomic Group to stop eating and + # pop their stack if they reach it + emit(ATOMIC_GROUP) + skip = _len(code); emit(0) + _compile(code, av, flags) + emit(SUCCESS) + code[skip] = _len(code) - skip + elif op in SUCCESS_CODES: + emit(op) + elif op in ASSERT_CODES: + emit(op) + skip = _len(code); emit(0) + if av[0] >= 0: + emit(0) # look ahead + else: + lo, hi = av[1].getwidth() + if lo > MAXCODE: + raise error("looks too much behind") + if lo != hi: + raise PatternError("look-behind requires fixed-width pattern") + emit(lo) # look behind + _compile(code, av[1], flags) + emit(SUCCESS) + code[skip] = _len(code) - skip + elif op is AT: + emit(op) + if flags & SRE_FLAG_MULTILINE: + av = AT_MULTILINE.get(av, av) + if flags & SRE_FLAG_LOCALE: + av = AT_LOCALE.get(av, av) + elif flags & SRE_FLAG_UNICODE: + av = AT_UNICODE.get(av, av) + emit(av) + elif op is BRANCH: + emit(op) + tail = [] + tailappend = tail.append + for av in av[1]: + skip = _len(code); emit(0) + # _compile_info(code, av, flags) + _compile(code, av, flags) + emit(JUMP) + tailappend(_len(code)); emit(0) + code[skip] = _len(code) - skip + emit(FAILURE) # end of branch + for tail in tail: + code[tail] = _len(code) - tail + elif op is CATEGORY: + emit(op) + if flags & SRE_FLAG_LOCALE: + av = CH_LOCALE[av] + elif flags & SRE_FLAG_UNICODE: + av = CH_UNICODE[av] + emit(av) + elif op is GROUPREF: + if not flags & SRE_FLAG_IGNORECASE: + emit(op) + elif flags & SRE_FLAG_LOCALE: + emit(GROUPREF_LOC_IGNORE) + elif not fixes: # ascii + emit(GROUPREF_IGNORE) + else: + emit(GROUPREF_UNI_IGNORE) + emit(av-1) + elif op is GROUPREF_EXISTS: + emit(op) + emit(av[0]-1) + skipyes = _len(code); emit(0) + _compile(code, av[1], flags) + if av[2]: + emit(JUMP) + skipno = _len(code); emit(0) + code[skipyes] = _len(code) - skipyes + 1 + _compile(code, av[2], flags) + code[skipno] = _len(code) - skipno + else: + code[skipyes] = _len(code) - skipyes + 1 + else: + raise PatternError(f"internal: unsupported operand type {op!r}") + +def _compile_charset(charset, flags, code): + # compile charset subprogram + emit = code.append + for op, av in charset: + emit(op) + if op is NEGATE: + pass + elif op is LITERAL: + emit(av) + elif op is RANGE or op is RANGE_UNI_IGNORE: + emit(av[0]) + emit(av[1]) + elif op is CHARSET: + code.extend(av) + elif op is BIGCHARSET: + code.extend(av) + elif op is CATEGORY: + if flags & SRE_FLAG_LOCALE: + emit(CH_LOCALE[av]) + elif flags & SRE_FLAG_UNICODE: + emit(CH_UNICODE[av]) + else: + emit(av) + else: + raise PatternError(f"internal: unsupported set operator {op!r}") + emit(FAILURE) + +def _optimize_charset(charset, iscased=None, fixup=None, fixes=None): + # internal: optimize character set + out = [] + tail = [] + charmap = bytearray(256) + hascased = False + for op, av in charset: + while True: + try: + if op is LITERAL: + if fixup: # IGNORECASE and not LOCALE + av = fixup(av) + charmap[av] = 1 + if fixes and av in fixes: + for k in fixes[av]: + charmap[k] = 1 + if not hascased and iscased(av): + hascased = True + else: + charmap[av] = 1 + elif op is RANGE: + r = range(av[0], av[1]+1) + if fixup: # IGNORECASE and not LOCALE + if fixes: + for i in map(fixup, r): + charmap[i] = 1 + if i in fixes: + for k in fixes[i]: + charmap[k] = 1 + else: + for i in map(fixup, r): + charmap[i] = 1 + if not hascased: + hascased = any(map(iscased, r)) + else: + for i in r: + charmap[i] = 1 + elif op is NEGATE: + out.append((op, av)) + elif op is CATEGORY and tail and (CATEGORY, CH_NEGATE[av]) in tail: + # Optimize [\s\S] etc. + out = [] if out else _CHARSET_ALL + return out, False + else: + tail.append((op, av)) + except IndexError: + if len(charmap) == 256: + # character set contains non-UCS1 character codes + charmap += b'\0' * 0xff00 + continue + # Character set contains non-BMP character codes. + # For range, all BMP characters in the range are already + # proceeded. + if fixup: # IGNORECASE and not LOCALE + # For now, IN_UNI_IGNORE+LITERAL and + # IN_UNI_IGNORE+RANGE_UNI_IGNORE work for all non-BMP + # characters, because two characters (at least one of + # which is not in the BMP) match case-insensitively + # if and only if: + # 1) c1.lower() == c2.lower() + # 2) c1.lower() == c2 or c1.lower().upper() == c2 + # Also, both c.lower() and c.lower().upper() are single + # characters for every non-BMP character. + if op is RANGE: + if fixes: # not ASCII + op = RANGE_UNI_IGNORE + hascased = True + else: + assert op is LITERAL + if not hascased and iscased(av): + hascased = True + tail.append((op, av)) + break + + # compress character map + runs = [] + q = 0 + while True: + p = charmap.find(1, q) + if p < 0: + break + if len(runs) >= 2: + runs = None + break + q = charmap.find(0, p) + if q < 0: + runs.append((p, len(charmap))) + break + runs.append((p, q)) + if runs is not None: + # use literal/range + for p, q in runs: + if q - p == 1: + out.append((LITERAL, p)) + else: + out.append((RANGE, (p, q - 1))) + out += tail + # if the case was changed or new representation is more compact + if hascased or len(out) < len(charset): + return out, hascased + # else original character set is good enough + return charset, hascased + + # use bitmap + if len(charmap) == 256: + data = _mk_bitmap(charmap) + out.append((CHARSET, data)) + out += tail + return out, hascased + + # To represent a big charset, first a bitmap of all characters in the + # set is constructed. Then, this bitmap is sliced into chunks of 256 + # characters, duplicate chunks are eliminated, and each chunk is + # given a number. In the compiled expression, the charset is + # represented by a 32-bit word sequence, consisting of one word for + # the number of different chunks, a sequence of 256 bytes (64 words) + # of chunk numbers indexed by their original chunk position, and a + # sequence of 256-bit chunks (8 words each). + + # Compression is normally good: in a typical charset, large ranges of + # Unicode will be either completely excluded (e.g. if only cyrillic + # letters are to be matched), or completely included (e.g. if large + # subranges of Kanji match). These ranges will be represented by + # chunks of all one-bits or all zero-bits. + + # Matching can be also done efficiently: the more significant byte of + # the Unicode character is an index into the chunk number, and the + # less significant byte is a bit index in the chunk (just like the + # CHARSET matching). + + charmap = bytes(charmap) # should be hashable + comps = {} + mapping = bytearray(256) + block = 0 + data = bytearray() + for i in range(0, 65536, 256): + chunk = charmap[i: i + 256] + if chunk in comps: + mapping[i // 256] = comps[chunk] + else: + mapping[i // 256] = comps[chunk] = block + block += 1 + data += chunk + data = _mk_bitmap(data) + data[0:0] = [block] + _bytes_to_codes(mapping) + out.append((BIGCHARSET, data)) + out += tail + return out, hascased + +_CODEBITS = _sre.CODESIZE * 8 +MAXCODE = (1 << _CODEBITS) - 1 +_BITS_TRANS = b'0' + b'1' * 255 +def _mk_bitmap(bits, _CODEBITS=_CODEBITS, _int=int): + s = bits.translate(_BITS_TRANS)[::-1] + return [_int(s[i - _CODEBITS: i], 2) + for i in range(len(s), 0, -_CODEBITS)] + +def _bytes_to_codes(b): + # Convert block indices to word array + a = memoryview(b).cast('I') + assert a.itemsize == _sre.CODESIZE + assert len(a) * a.itemsize == len(b) + return a.tolist() + +def _simple(p): + # check if this subpattern is a "simple" operator + if len(p) != 1: + return False + op, av = p[0] + if op is SUBPATTERN: + return av[0] is None and _simple(av[-1]) + return op in _UNIT_CODES + +def _generate_overlap_table(prefix): + """ + Generate an overlap table for the following prefix. + An overlap table is a table of the same size as the prefix which + informs about the potential self-overlap for each index in the prefix: + - if overlap[i] == 0, prefix[i:] can't overlap prefix[0:...] + - if overlap[i] == k with 0 < k <= i, prefix[i-k+1:i+1] overlaps with + prefix[0:k] + """ + table = [0] * len(prefix) + for i in range(1, len(prefix)): + idx = table[i - 1] + while prefix[i] != prefix[idx]: + if idx == 0: + table[i] = 0 + break + idx = table[idx - 1] + else: + table[i] = idx + 1 + return table + +def _get_iscased(flags): + if not flags & SRE_FLAG_IGNORECASE: + return None + elif flags & SRE_FLAG_UNICODE: + return _sre.unicode_iscased + else: + return _sre.ascii_iscased + +def _get_literal_prefix(pattern, flags): + # look for literal prefix + prefix = [] + prefixappend = prefix.append + prefix_skip = None + iscased = _get_iscased(flags) + for op, av in pattern.data: + if op is LITERAL: + if iscased and iscased(av): + break + prefixappend(av) + elif op is SUBPATTERN: + group, add_flags, del_flags, p = av + flags1 = _combine_flags(flags, add_flags, del_flags) + if flags1 & SRE_FLAG_IGNORECASE and flags1 & SRE_FLAG_LOCALE: + break + prefix1, prefix_skip1, got_all = _get_literal_prefix(p, flags1) + if prefix_skip is None: + if group is not None: + prefix_skip = len(prefix) + elif prefix_skip1 is not None: + prefix_skip = len(prefix) + prefix_skip1 + prefix.extend(prefix1) + if not got_all: + break + else: + break + else: + return prefix, prefix_skip, True + return prefix, prefix_skip, False + +def _get_charset_prefix(pattern, flags): + while True: + if not pattern.data: + return None + op, av = pattern.data[0] + if op is not SUBPATTERN: + break + group, add_flags, del_flags, pattern = av + flags = _combine_flags(flags, add_flags, del_flags) + if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE: + return None + + iscased = _get_iscased(flags) + if op is LITERAL: + if iscased and iscased(av): + return None + return [(op, av)] + elif op is BRANCH: + charset = [] + charsetappend = charset.append + for p in av[1]: + if not p: + return None + op, av = p[0] + if op is LITERAL and not (iscased and iscased(av)): + charsetappend((op, av)) + else: + return None + return charset + elif op is IN: + charset = av + if iscased: + for op, av in charset: + if op is LITERAL: + if iscased(av): + return None + elif op is RANGE: + if av[1] > 0xffff: + return None + if any(map(iscased, range(av[0], av[1]+1))): + return None + return charset + return None + +def _compile_info(code, pattern, flags): + # internal: compile an info block. in the current version, + # this contains min/max pattern width, and an optional literal + # prefix or a character map + lo, hi = pattern.getwidth() + if hi > MAXCODE: + hi = MAXCODE + if lo == 0: + code.extend([INFO, 4, 0, lo, hi]) + return + # look for a literal prefix + prefix = [] + prefix_skip = 0 + charset = None # not used + if not (flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE): + # look for literal prefix + prefix, prefix_skip, got_all = _get_literal_prefix(pattern, flags) + # if no prefix, look for charset prefix + if not prefix: + charset = _get_charset_prefix(pattern, flags) + if charset: + charset, hascased = _optimize_charset(charset) + assert not hascased + if charset == _CHARSET_ALL: + charset = None +## if prefix: +## print("*** PREFIX", prefix, prefix_skip) +## if charset: +## print("*** CHARSET", charset) + # add an info block + emit = code.append + emit(INFO) + skip = len(code); emit(0) + # literal flag + mask = 0 + if prefix: + mask = SRE_INFO_PREFIX + if prefix_skip is None and got_all: + mask = mask | SRE_INFO_LITERAL + elif charset: + mask = mask | SRE_INFO_CHARSET + emit(mask) + # pattern length + if lo < MAXCODE: + emit(lo) + else: + emit(MAXCODE) + prefix = prefix[:MAXCODE] + emit(hi) + # add literal prefix + if prefix: + emit(len(prefix)) # length + if prefix_skip is None: + prefix_skip = len(prefix) + emit(prefix_skip) # skip + code.extend(prefix) + # generate overlap table + code.extend(_generate_overlap_table(prefix)) + elif charset: + _compile_charset(charset, flags, code) + code[skip] = len(code) - skip + +def isstring(obj): + return isinstance(obj, (str, bytes)) + +def _code(p, flags): + + flags = p.state.flags | flags + code = [] + + # compile info block + _compile_info(code, p, flags) + + # compile the pattern + _compile(code, p.data, flags) + + code.append(SUCCESS) + + return code + +def _hex_code(code): + return '[%s]' % ', '.join('%#0*x' % (_sre.CODESIZE*2+2, x) for x in code) + +def dis(code): + import sys + + labels = set() + level = 0 + offset_width = len(str(len(code) - 1)) + + def dis_(start, end): + def print_(*args, to=None): + if to is not None: + labels.add(to) + args += ('(to %d)' % (to,),) + print('%*d%s ' % (offset_width, start, ':' if start in labels else '.'), + end=' '*(level-1)) + print(*args) + + def print_2(*args): + print(end=' '*(offset_width + 2*level)) + print(*args) + + nonlocal level + level += 1 + i = start + while i < end: + start = i + op = code[i] + i += 1 + op = OPCODES[op] + if op in (SUCCESS, FAILURE, ANY, ANY_ALL, + MAX_UNTIL, MIN_UNTIL, NEGATE): + print_(op) + elif op in (LITERAL, NOT_LITERAL, + LITERAL_IGNORE, NOT_LITERAL_IGNORE, + LITERAL_UNI_IGNORE, NOT_LITERAL_UNI_IGNORE, + LITERAL_LOC_IGNORE, NOT_LITERAL_LOC_IGNORE): + arg = code[i] + i += 1 + print_(op, '%#02x (%r)' % (arg, chr(arg))) + elif op is AT: + arg = code[i] + i += 1 + arg = str(ATCODES[arg]) + assert arg[:3] == 'AT_' + print_(op, arg[3:]) + elif op is CATEGORY: + arg = code[i] + i += 1 + arg = str(CHCODES[arg]) + assert arg[:9] == 'CATEGORY_' + print_(op, arg[9:]) + elif op in (IN, IN_IGNORE, IN_UNI_IGNORE, IN_LOC_IGNORE): + skip = code[i] + print_(op, skip, to=i+skip) + dis_(i+1, i+skip) + i += skip + elif op in (RANGE, RANGE_UNI_IGNORE): + lo, hi = code[i: i+2] + i += 2 + print_(op, '%#02x %#02x (%r-%r)' % (lo, hi, chr(lo), chr(hi))) + elif op is CHARSET: + print_(op, _hex_code(code[i: i + 256//_CODEBITS])) + i += 256//_CODEBITS + elif op is BIGCHARSET: + arg = code[i] + i += 1 + mapping = list(b''.join(x.to_bytes(_sre.CODESIZE, sys.byteorder) + for x in code[i: i + 256//_sre.CODESIZE])) + print_(op, arg, mapping) + i += 256//_sre.CODESIZE + level += 1 + for j in range(arg): + print_2(_hex_code(code[i: i + 256//_CODEBITS])) + i += 256//_CODEBITS + level -= 1 + elif op in (MARK, GROUPREF, GROUPREF_IGNORE, GROUPREF_UNI_IGNORE, + GROUPREF_LOC_IGNORE): + arg = code[i] + i += 1 + print_(op, arg) + elif op is JUMP: + skip = code[i] + print_(op, skip, to=i+skip) + i += 1 + elif op is BRANCH: + skip = code[i] + print_(op, skip, to=i+skip) + while skip: + dis_(i+1, i+skip) + i += skip + start = i + skip = code[i] + if skip: + print_('branch', skip, to=i+skip) + else: + print_(FAILURE) + i += 1 + elif op in (REPEAT, REPEAT_ONE, MIN_REPEAT_ONE, + POSSESSIVE_REPEAT, POSSESSIVE_REPEAT_ONE): + skip, min, max = code[i: i+3] + if max == MAXREPEAT: + max = 'MAXREPEAT' + print_(op, skip, min, max, to=i+skip) + dis_(i+3, i+skip) + i += skip + elif op is GROUPREF_EXISTS: + arg, skip = code[i: i+2] + print_(op, arg, skip, to=i+skip) + i += 2 + elif op in (ASSERT, ASSERT_NOT): + skip, arg = code[i: i+2] + print_(op, skip, arg, to=i+skip) + dis_(i+2, i+skip) + i += skip + elif op is ATOMIC_GROUP: + skip = code[i] + print_(op, skip, to=i+skip) + dis_(i+1, i+skip) + i += skip + elif op is INFO: + skip, flags, min, max = code[i: i+4] + if max == MAXREPEAT: + max = 'MAXREPEAT' + print_(op, skip, bin(flags), min, max, to=i+skip) + start = i+4 + if flags & SRE_INFO_PREFIX: + prefix_len, prefix_skip = code[i+4: i+6] + print_2(' prefix_skip', prefix_skip) + start = i + 6 + prefix = code[start: start+prefix_len] + print_2(' prefix', + '[%s]' % ', '.join('%#02x' % x for x in prefix), + '(%r)' % ''.join(map(chr, prefix))) + start += prefix_len + print_2(' overlap', code[start: start+prefix_len]) + start += prefix_len + if flags & SRE_INFO_CHARSET: + level += 1 + print_2('in') + dis_(start, i+skip) + level -= 1 + i += skip + else: + raise ValueError(op) + + level -= 1 + + dis_(0, len(code)) + + +def compile(p, flags=0): + # internal: convert pattern list to internal format + + if isstring(p): + pattern = p + p = _parser.parse(p, flags) + else: + pattern = None + + code = _code(p, flags) + + if flags & SRE_FLAG_DEBUG: + print() + dis(code) + + # map in either direction + groupindex = p.state.groupdict + indexgroup = [None] * p.state.groups + for k, i in groupindex.items(): + indexgroup[i] = k + + return _sre.compile( + pattern, flags | p.state.flags, code, + p.state.groups-1, + groupindex, tuple(indexgroup) + ) diff --git a/Python314_4_x86_Template/Lib/re/_constants.py b/Python314_4_x86_Template/Lib/re/_constants.py new file mode 100644 index 00000000..d6f32302 --- /dev/null +++ b/Python314_4_x86_Template/Lib/re/_constants.py @@ -0,0 +1,224 @@ +# +# Secret Labs' Regular Expression Engine +# +# various symbols used by the regular expression engine. +# run this script to update the _sre include files! +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# See the __init__.py file for information on usage and redistribution. +# + +"""Internal support module for sre""" + +# update when constants are added or removed + +MAGIC = 20230612 + +from _sre import MAXREPEAT, MAXGROUPS # noqa: F401 + +# SRE standard exception (access as sre.error) +# should this really be here? + +class PatternError(Exception): + """Exception raised for invalid regular expressions. + + Attributes: + + msg: The unformatted error message + pattern: The regular expression pattern + pos: The index in the pattern where compilation failed (may be None) + lineno: The line corresponding to pos (may be None) + colno: The column corresponding to pos (may be None) + """ + + __module__ = 're' + + def __init__(self, msg, pattern=None, pos=None): + self.msg = msg + self.pattern = pattern + self.pos = pos + if pattern is not None and pos is not None: + msg = '%s at position %d' % (msg, pos) + if isinstance(pattern, str): + newline = '\n' + else: + newline = b'\n' + self.lineno = pattern.count(newline, 0, pos) + 1 + self.colno = pos - pattern.rfind(newline, 0, pos) + if newline in pattern: + msg = '%s (line %d, column %d)' % (msg, self.lineno, self.colno) + else: + self.lineno = self.colno = None + super().__init__(msg) + + +# Backward compatibility after renaming in 3.13 +error = PatternError + +class _NamedIntConstant(int): + def __new__(cls, value, name): + self = super(_NamedIntConstant, cls).__new__(cls, value) + self.name = name + return self + + def __repr__(self): + return self.name + + __reduce__ = None + +MAXREPEAT = _NamedIntConstant(MAXREPEAT, 'MAXREPEAT') + +def _makecodes(*names): + items = [_NamedIntConstant(i, name) for i, name in enumerate(names)] + globals().update({item.name: item for item in items}) + return items + +# operators +OPCODES = _makecodes( + # failure=0 success=1 (just because it looks better that way :-) + 'FAILURE', 'SUCCESS', + + 'ANY', 'ANY_ALL', + 'ASSERT', 'ASSERT_NOT', + 'AT', + 'BRANCH', + 'CATEGORY', + 'CHARSET', 'BIGCHARSET', + 'GROUPREF', 'GROUPREF_EXISTS', + 'IN', + 'INFO', + 'JUMP', + 'LITERAL', + 'MARK', + 'MAX_UNTIL', + 'MIN_UNTIL', + 'NOT_LITERAL', + 'NEGATE', + 'RANGE', + 'REPEAT', + 'REPEAT_ONE', + 'SUBPATTERN', + 'MIN_REPEAT_ONE', + 'ATOMIC_GROUP', + 'POSSESSIVE_REPEAT', + 'POSSESSIVE_REPEAT_ONE', + + 'GROUPREF_IGNORE', + 'IN_IGNORE', + 'LITERAL_IGNORE', + 'NOT_LITERAL_IGNORE', + + 'GROUPREF_LOC_IGNORE', + 'IN_LOC_IGNORE', + 'LITERAL_LOC_IGNORE', + 'NOT_LITERAL_LOC_IGNORE', + + 'GROUPREF_UNI_IGNORE', + 'IN_UNI_IGNORE', + 'LITERAL_UNI_IGNORE', + 'NOT_LITERAL_UNI_IGNORE', + 'RANGE_UNI_IGNORE', + + # The following opcodes are only occurred in the parser output, + # but not in the compiled code. + 'MIN_REPEAT', 'MAX_REPEAT', +) +del OPCODES[-2:] # remove MIN_REPEAT and MAX_REPEAT + +# positions +ATCODES = _makecodes( + 'AT_BEGINNING', 'AT_BEGINNING_LINE', 'AT_BEGINNING_STRING', + 'AT_BOUNDARY', 'AT_NON_BOUNDARY', + 'AT_END', 'AT_END_LINE', 'AT_END_STRING', + + 'AT_LOC_BOUNDARY', 'AT_LOC_NON_BOUNDARY', + + 'AT_UNI_BOUNDARY', 'AT_UNI_NON_BOUNDARY', +) + +# categories +CHCODES = _makecodes( + 'CATEGORY_DIGIT', 'CATEGORY_NOT_DIGIT', + 'CATEGORY_SPACE', 'CATEGORY_NOT_SPACE', + 'CATEGORY_WORD', 'CATEGORY_NOT_WORD', + 'CATEGORY_LINEBREAK', 'CATEGORY_NOT_LINEBREAK', + + 'CATEGORY_LOC_WORD', 'CATEGORY_LOC_NOT_WORD', + + 'CATEGORY_UNI_DIGIT', 'CATEGORY_UNI_NOT_DIGIT', + 'CATEGORY_UNI_SPACE', 'CATEGORY_UNI_NOT_SPACE', + 'CATEGORY_UNI_WORD', 'CATEGORY_UNI_NOT_WORD', + 'CATEGORY_UNI_LINEBREAK', 'CATEGORY_UNI_NOT_LINEBREAK', +) + + +# replacement operations for "ignore case" mode +OP_IGNORE = { + LITERAL: LITERAL_IGNORE, + NOT_LITERAL: NOT_LITERAL_IGNORE, +} + +OP_LOCALE_IGNORE = { + LITERAL: LITERAL_LOC_IGNORE, + NOT_LITERAL: NOT_LITERAL_LOC_IGNORE, +} + +OP_UNICODE_IGNORE = { + LITERAL: LITERAL_UNI_IGNORE, + NOT_LITERAL: NOT_LITERAL_UNI_IGNORE, +} + +AT_MULTILINE = { + AT_BEGINNING: AT_BEGINNING_LINE, + AT_END: AT_END_LINE +} + +AT_LOCALE = { + AT_BOUNDARY: AT_LOC_BOUNDARY, + AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY +} + +AT_UNICODE = { + AT_BOUNDARY: AT_UNI_BOUNDARY, + AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY +} + +CH_LOCALE = { + CATEGORY_DIGIT: CATEGORY_DIGIT, + CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT, + CATEGORY_SPACE: CATEGORY_SPACE, + CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE, + CATEGORY_WORD: CATEGORY_LOC_WORD, + CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD, + CATEGORY_LINEBREAK: CATEGORY_LINEBREAK, + CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK +} + +CH_UNICODE = { + CATEGORY_DIGIT: CATEGORY_UNI_DIGIT, + CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT, + CATEGORY_SPACE: CATEGORY_UNI_SPACE, + CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE, + CATEGORY_WORD: CATEGORY_UNI_WORD, + CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD, + CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK, + CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK +} + +CH_NEGATE = dict(zip(CHCODES[::2] + CHCODES[1::2], CHCODES[1::2] + CHCODES[::2])) + +# flags +SRE_FLAG_IGNORECASE = 2 # case insensitive +SRE_FLAG_LOCALE = 4 # honour system locale +SRE_FLAG_MULTILINE = 8 # treat target as multiline string +SRE_FLAG_DOTALL = 16 # treat target as a single string +SRE_FLAG_UNICODE = 32 # use unicode "locale" +SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments +SRE_FLAG_DEBUG = 128 # debugging +SRE_FLAG_ASCII = 256 # use ascii "locale" + +# flags for INFO primitive +SRE_INFO_PREFIX = 1 # has prefix +SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix) +SRE_INFO_CHARSET = 4 # pattern starts with character from given set diff --git a/Python314_4_x86_Template/Lib/re/_parser.py b/Python314_4_x86_Template/Lib/re/_parser.py new file mode 100644 index 00000000..35ab7ede --- /dev/null +++ b/Python314_4_x86_Template/Lib/re/_parser.py @@ -0,0 +1,1066 @@ +# +# Secret Labs' Regular Expression Engine +# +# convert re-style regular expression to sre pattern +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# See the __init__.py file for information on usage and redistribution. +# + +"""Internal support module for sre""" + +# XXX: show string offset and offending character for all errors + +from ._constants import * + +SPECIAL_CHARS = ".\\[{()*+?^$|" +REPEAT_CHARS = "*+?{" + +DIGITS = frozenset("0123456789") + +OCTDIGITS = frozenset("01234567") +HEXDIGITS = frozenset("0123456789abcdefABCDEF") +ASCIILETTERS = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +WHITESPACE = frozenset(" \t\n\r\v\f") + +_REPEATCODES = frozenset({MIN_REPEAT, MAX_REPEAT, POSSESSIVE_REPEAT}) +_UNITCODES = frozenset({ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY}) + +ESCAPES = { + r"\a": (LITERAL, ord("\a")), + r"\b": (LITERAL, ord("\b")), + r"\f": (LITERAL, ord("\f")), + r"\n": (LITERAL, ord("\n")), + r"\r": (LITERAL, ord("\r")), + r"\t": (LITERAL, ord("\t")), + r"\v": (LITERAL, ord("\v")), + r"\\": (LITERAL, ord("\\")) +} + +CATEGORIES = { + r"\A": (AT, AT_BEGINNING_STRING), # start of string + r"\b": (AT, AT_BOUNDARY), + r"\B": (AT, AT_NON_BOUNDARY), + r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]), + r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]), + r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]), + r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]), + r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]), + r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]), + r"\z": (AT, AT_END_STRING), # end of string + r"\Z": (AT, AT_END_STRING), # end of string (obsolete) +} + +FLAGS = { + # standard flags + "i": SRE_FLAG_IGNORECASE, + "L": SRE_FLAG_LOCALE, + "m": SRE_FLAG_MULTILINE, + "s": SRE_FLAG_DOTALL, + "x": SRE_FLAG_VERBOSE, + # extensions + "a": SRE_FLAG_ASCII, + "u": SRE_FLAG_UNICODE, +} + +TYPE_FLAGS = SRE_FLAG_ASCII | SRE_FLAG_LOCALE | SRE_FLAG_UNICODE +GLOBAL_FLAGS = SRE_FLAG_DEBUG + +# Maximal value returned by SubPattern.getwidth(). +# Must be larger than MAXREPEAT, MAXCODE and sys.maxsize. +MAXWIDTH = 1 << 64 + +class State: + # keeps track of state for parsing + def __init__(self): + self.flags = 0 + self.groupdict = {} + self.groupwidths = [None] # group 0 + self.lookbehindgroups = None + self.grouprefpos = {} + @property + def groups(self): + return len(self.groupwidths) + def opengroup(self, name=None): + gid = self.groups + self.groupwidths.append(None) + if self.groups > MAXGROUPS: + raise error("too many groups") + if name is not None: + ogid = self.groupdict.get(name, None) + if ogid is not None: + raise error("redefinition of group name %r as group %d; " + "was group %d" % (name, gid, ogid)) + self.groupdict[name] = gid + return gid + def closegroup(self, gid, p): + self.groupwidths[gid] = p.getwidth() + def checkgroup(self, gid): + return gid < self.groups and self.groupwidths[gid] is not None + + def checklookbehindgroup(self, gid, source): + if self.lookbehindgroups is not None: + if not self.checkgroup(gid): + raise source.error('cannot refer to an open group') + if gid >= self.lookbehindgroups: + raise source.error('cannot refer to group defined in the same ' + 'lookbehind subpattern') + +class SubPattern: + # a subpattern, in intermediate form + def __init__(self, state, data=None): + self.state = state + if data is None: + data = [] + self.data = data + self.width = None + + def dump(self, level=0): + seqtypes = (tuple, list) + for op, av in self.data: + print(level*" " + str(op), end='') + if op is IN: + # member sublanguage + print() + for op, a in av: + print((level+1)*" " + str(op), a) + elif op is BRANCH: + print() + for i, a in enumerate(av[1]): + if i: + print(level*" " + "OR") + a.dump(level+1) + elif op is GROUPREF_EXISTS: + condgroup, item_yes, item_no = av + print('', condgroup) + item_yes.dump(level+1) + if item_no: + print(level*" " + "ELSE") + item_no.dump(level+1) + elif isinstance(av, SubPattern): + print() + av.dump(level+1) + elif isinstance(av, seqtypes): + nl = False + for a in av: + if isinstance(a, SubPattern): + if not nl: + print() + a.dump(level+1) + nl = True + else: + if not nl: + print(' ', end='') + print(a, end='') + nl = False + if not nl: + print() + else: + print('', av) + def __repr__(self): + return repr(self.data) + def __len__(self): + return len(self.data) + def __delitem__(self, index): + del self.data[index] + def __getitem__(self, index): + if isinstance(index, slice): + return SubPattern(self.state, self.data[index]) + return self.data[index] + def __setitem__(self, index, code): + self.data[index] = code + def insert(self, index, code): + self.data.insert(index, code) + def append(self, code): + self.data.append(code) + def getwidth(self): + # determine the width (min, max) for this subpattern + if self.width is not None: + return self.width + lo = hi = 0 + for op, av in self.data: + if op is BRANCH: + i = MAXWIDTH + j = 0 + for av in av[1]: + l, h = av.getwidth() + i = min(i, l) + j = max(j, h) + lo = lo + i + hi = hi + j + elif op is ATOMIC_GROUP: + i, j = av.getwidth() + lo = lo + i + hi = hi + j + elif op is SUBPATTERN: + i, j = av[-1].getwidth() + lo = lo + i + hi = hi + j + elif op in _REPEATCODES: + i, j = av[2].getwidth() + lo = lo + i * av[0] + if av[1] == MAXREPEAT and j: + hi = MAXWIDTH + else: + hi = hi + j * av[1] + elif op in _UNITCODES: + lo = lo + 1 + hi = hi + 1 + elif op is GROUPREF: + i, j = self.state.groupwidths[av] + lo = lo + i + hi = hi + j + elif op is GROUPREF_EXISTS: + i, j = av[1].getwidth() + if av[2] is not None: + l, h = av[2].getwidth() + i = min(i, l) + j = max(j, h) + else: + i = 0 + lo = lo + i + hi = hi + j + elif op is SUCCESS: + break + self.width = min(lo, MAXWIDTH), min(hi, MAXWIDTH) + return self.width + +class Tokenizer: + def __init__(self, string): + self.istext = isinstance(string, str) + self.string = string + if not self.istext: + string = str(string, 'latin1') + self.decoded_string = string + self.index = 0 + self.next = None + self.__next() + def __next(self): + index = self.index + try: + char = self.decoded_string[index] + except IndexError: + self.next = None + return + if char == "\\": + index += 1 + try: + char += self.decoded_string[index] + except IndexError: + raise error("bad escape (end of pattern)", + self.string, len(self.string) - 1) from None + self.index = index + 1 + self.next = char + def match(self, char): + if char == self.next: + self.__next() + return True + return False + def get(self): + this = self.next + self.__next() + return this + def getwhile(self, n, charset): + result = '' + for _ in range(n): + c = self.next + if c not in charset: + break + result += c + self.__next() + return result + def getuntil(self, terminator, name): + result = '' + while True: + c = self.next + self.__next() + if c is None: + if not result: + raise self.error("missing " + name) + raise self.error("missing %s, unterminated name" % terminator, + len(result)) + if c == terminator: + if not result: + raise self.error("missing " + name, 1) + break + result += c + return result + @property + def pos(self): + return self.index - len(self.next or '') + def tell(self): + return self.index - len(self.next or '') + def seek(self, index): + self.index = index + self.__next() + + def error(self, msg, offset=0): + if not self.istext: + msg = msg.encode('ascii', 'backslashreplace').decode('ascii') + return error(msg, self.string, self.tell() - offset) + + def checkgroupname(self, name, offset): + if not (self.istext or name.isascii()): + msg = "bad character in group name %a" % name + raise self.error(msg, len(name) + offset) + if not name.isidentifier(): + msg = "bad character in group name %r" % name + raise self.error(msg, len(name) + offset) + +def _class_escape(source, escape): + # handle escape code inside character class + code = ESCAPES.get(escape) + if code: + return code + code = CATEGORIES.get(escape) + if code and code[0] is IN: + return code + try: + c = escape[1:2] + if c == "x": + # hexadecimal escape (exactly two digits) + escape += source.getwhile(2, HEXDIGITS) + if len(escape) != 4: + raise source.error("incomplete escape %s" % escape, len(escape)) + return LITERAL, int(escape[2:], 16) + elif c == "u" and source.istext: + # unicode escape (exactly four digits) + escape += source.getwhile(4, HEXDIGITS) + if len(escape) != 6: + raise source.error("incomplete escape %s" % escape, len(escape)) + return LITERAL, int(escape[2:], 16) + elif c == "U" and source.istext: + # unicode escape (exactly eight digits) + escape += source.getwhile(8, HEXDIGITS) + if len(escape) != 10: + raise source.error("incomplete escape %s" % escape, len(escape)) + c = int(escape[2:], 16) + chr(c) # raise ValueError for invalid code + return LITERAL, c + elif c == "N" and source.istext: + import unicodedata + # named unicode escape e.g. \N{EM DASH} + if not source.match('{'): + raise source.error("missing {") + charname = source.getuntil('}', 'character name') + try: + c = ord(unicodedata.lookup(charname)) + except (KeyError, TypeError): + raise source.error("undefined character name %r" % charname, + len(charname) + len(r'\N{}')) from None + return LITERAL, c + elif c in OCTDIGITS: + # octal escape (up to three digits) + escape += source.getwhile(2, OCTDIGITS) + c = int(escape[1:], 8) + if c > 0o377: + raise source.error('octal escape value %s outside of ' + 'range 0-0o377' % escape, len(escape)) + return LITERAL, c + elif c in DIGITS: + raise ValueError + if len(escape) == 2: + if c in ASCIILETTERS: + raise source.error('bad escape %s' % escape, len(escape)) + return LITERAL, ord(escape[1]) + except ValueError: + pass + raise source.error("bad escape %s" % escape, len(escape)) + +def _escape(source, escape, state): + # handle escape code in expression + code = CATEGORIES.get(escape) + if code: + return code + code = ESCAPES.get(escape) + if code: + return code + try: + c = escape[1:2] + if c == "x": + # hexadecimal escape + escape += source.getwhile(2, HEXDIGITS) + if len(escape) != 4: + raise source.error("incomplete escape %s" % escape, len(escape)) + return LITERAL, int(escape[2:], 16) + elif c == "u" and source.istext: + # unicode escape (exactly four digits) + escape += source.getwhile(4, HEXDIGITS) + if len(escape) != 6: + raise source.error("incomplete escape %s" % escape, len(escape)) + return LITERAL, int(escape[2:], 16) + elif c == "U" and source.istext: + # unicode escape (exactly eight digits) + escape += source.getwhile(8, HEXDIGITS) + if len(escape) != 10: + raise source.error("incomplete escape %s" % escape, len(escape)) + c = int(escape[2:], 16) + chr(c) # raise ValueError for invalid code + return LITERAL, c + elif c == "N" and source.istext: + import unicodedata + # named unicode escape e.g. \N{EM DASH} + if not source.match('{'): + raise source.error("missing {") + charname = source.getuntil('}', 'character name') + try: + c = ord(unicodedata.lookup(charname)) + except (KeyError, TypeError): + raise source.error("undefined character name %r" % charname, + len(charname) + len(r'\N{}')) from None + return LITERAL, c + elif c == "0": + # octal escape + escape += source.getwhile(2, OCTDIGITS) + return LITERAL, int(escape[1:], 8) + elif c in DIGITS: + # octal escape *or* decimal group reference (sigh) + if source.next in DIGITS: + escape += source.get() + if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and + source.next in OCTDIGITS): + # got three octal digits; this is an octal escape + escape += source.get() + c = int(escape[1:], 8) + if c > 0o377: + raise source.error('octal escape value %s outside of ' + 'range 0-0o377' % escape, + len(escape)) + return LITERAL, c + # not an octal escape, so this is a group reference + group = int(escape[1:]) + if group < state.groups: + if not state.checkgroup(group): + raise source.error("cannot refer to an open group", + len(escape)) + state.checklookbehindgroup(group, source) + return GROUPREF, group + raise source.error("invalid group reference %d" % group, len(escape) - 1) + if len(escape) == 2: + if c in ASCIILETTERS: + raise source.error("bad escape %s" % escape, len(escape)) + return LITERAL, ord(escape[1]) + except ValueError: + pass + raise source.error("bad escape %s" % escape, len(escape)) + +def _uniq(items): + return list(dict.fromkeys(items)) + +def _parse_sub(source, state, verbose, nested): + # parse an alternation: a|b|c + + items = [] + itemsappend = items.append + sourcematch = source.match + start = source.tell() + while True: + itemsappend(_parse(source, state, verbose, nested + 1, + not nested and not items)) + if not sourcematch("|"): + break + if not nested: + verbose = state.flags & SRE_FLAG_VERBOSE + + if len(items) == 1: + return items[0] + + subpattern = SubPattern(state) + + # check if all items share a common prefix + while True: + prefix = None + for item in items: + if not item: + break + if prefix is None: + prefix = item[0] + elif item[0] != prefix: + break + else: + # all subitems start with a common "prefix". + # move it out of the branch + for item in items: + del item[0] + subpattern.append(prefix) + continue # check next one + break + + # check if the branch can be replaced by a character set + set = [] + for item in items: + if len(item) != 1: + break + op, av = item[0] + if op is LITERAL: + set.append((op, av)) + elif op is IN and av[0][0] is not NEGATE: + set.extend(av) + else: + break + else: + # we can store this as a character set instead of a + # branch (the compiler may optimize this even more) + subpattern.append((IN, _uniq(set))) + return subpattern + + subpattern.append((BRANCH, (None, items))) + return subpattern + +def _parse(source, state, verbose, nested, first=False): + # parse a simple pattern + subpattern = SubPattern(state) + + # precompute constants into local variables + subpatternappend = subpattern.append + sourceget = source.get + sourcematch = source.match + _len = len + _ord = ord + + while True: + + this = source.next + if this is None: + break # end of pattern + if this in "|)": + break # end of subpattern + sourceget() + + if verbose: + # skip whitespace and comments + if this in WHITESPACE: + continue + if this == "#": + while True: + this = sourceget() + if this is None or this == "\n": + break + continue + + if this[0] == "\\": + code = _escape(source, this, state) + subpatternappend(code) + + elif this not in SPECIAL_CHARS: + subpatternappend((LITERAL, _ord(this))) + + elif this == "[": + here = source.tell() - 1 + # character set + set = [] + setappend = set.append +## if sourcematch(":"): +## pass # handle character classes + if source.next == '[': + import warnings + warnings.warn( + 'Possible nested set at position %d' % source.tell(), + FutureWarning, stacklevel=nested + 6 + ) + negate = sourcematch("^") + # check remaining characters + while True: + this = sourceget() + if this is None: + raise source.error("unterminated character set", + source.tell() - here) + if this == "]" and set: + break + elif this[0] == "\\": + code1 = _class_escape(source, this) + else: + if set and this in '-&~|' and source.next == this: + import warnings + warnings.warn( + 'Possible set %s at position %d' % ( + 'difference' if this == '-' else + 'intersection' if this == '&' else + 'symmetric difference' if this == '~' else + 'union', + source.tell() - 1), + FutureWarning, stacklevel=nested + 6 + ) + code1 = LITERAL, _ord(this) + if sourcematch("-"): + # potential range + that = sourceget() + if that is None: + raise source.error("unterminated character set", + source.tell() - here) + if that == "]": + if code1[0] is IN: + code1 = code1[1][0] + setappend(code1) + setappend((LITERAL, _ord("-"))) + break + if that[0] == "\\": + code2 = _class_escape(source, that) + else: + if that == '-': + import warnings + warnings.warn( + 'Possible set difference at position %d' % ( + source.tell() - 2), + FutureWarning, stacklevel=nested + 6 + ) + code2 = LITERAL, _ord(that) + if code1[0] != LITERAL or code2[0] != LITERAL: + msg = "bad character range %s-%s" % (this, that) + raise source.error(msg, len(this) + 1 + len(that)) + lo = code1[1] + hi = code2[1] + if hi < lo: + msg = "bad character range %s-%s" % (this, that) + raise source.error(msg, len(this) + 1 + len(that)) + setappend((RANGE, (lo, hi))) + else: + if code1[0] is IN: + code1 = code1[1][0] + setappend(code1) + + set = _uniq(set) + # XXX: should move set optimization to compiler! + if _len(set) == 1 and set[0][0] is LITERAL: + # optimization + if negate: + subpatternappend((NOT_LITERAL, set[0][1])) + else: + subpatternappend(set[0]) + else: + if negate: + set.insert(0, (NEGATE, None)) + # charmap optimization can't be added here because + # global flags still are not known + subpatternappend((IN, set)) + + elif this in REPEAT_CHARS: + # repeat previous item + here = source.tell() + if this == "?": + min, max = 0, 1 + elif this == "*": + min, max = 0, MAXREPEAT + + elif this == "+": + min, max = 1, MAXREPEAT + elif this == "{": + if source.next == "}": + subpatternappend((LITERAL, _ord(this))) + continue + + min, max = 0, MAXREPEAT + lo = hi = "" + while source.next in DIGITS: + lo += sourceget() + if sourcematch(","): + while source.next in DIGITS: + hi += sourceget() + else: + hi = lo + if not sourcematch("}"): + subpatternappend((LITERAL, _ord(this))) + source.seek(here) + continue + + if lo: + min = int(lo) + if min >= MAXREPEAT: + raise OverflowError("the repetition number is too large") + if hi: + max = int(hi) + if max >= MAXREPEAT: + raise OverflowError("the repetition number is too large") + if max < min: + raise source.error("min repeat greater than max repeat", + source.tell() - here) + else: + raise AssertionError("unsupported quantifier %r" % (char,)) + # figure out which item to repeat + if subpattern: + item = subpattern[-1:] + else: + item = None + if not item or item[0][0] is AT: + raise source.error("nothing to repeat", + source.tell() - here + len(this)) + if item[0][0] in _REPEATCODES: + raise source.error("multiple repeat", + source.tell() - here + len(this)) + if item[0][0] is SUBPATTERN: + group, add_flags, del_flags, p = item[0][1] + if group is None and not add_flags and not del_flags: + item = p + if sourcematch("?"): + # Non-Greedy Match + subpattern[-1] = (MIN_REPEAT, (min, max, item)) + elif sourcematch("+"): + # Possessive Match (Always Greedy) + subpattern[-1] = (POSSESSIVE_REPEAT, (min, max, item)) + else: + # Greedy Match + subpattern[-1] = (MAX_REPEAT, (min, max, item)) + + elif this == ".": + subpatternappend((ANY, None)) + + elif this == "(": + start = source.tell() - 1 + capture = True + atomic = False + name = None + add_flags = 0 + del_flags = 0 + if sourcematch("?"): + # options + char = sourceget() + if char is None: + raise source.error("unexpected end of pattern") + if char == "P": + # python extensions + if sourcematch("<"): + # named group: skip forward to end of name + name = source.getuntil(">", "group name") + source.checkgroupname(name, 1) + elif sourcematch("="): + # named backreference + name = source.getuntil(")", "group name") + source.checkgroupname(name, 1) + gid = state.groupdict.get(name) + if gid is None: + msg = "unknown group name %r" % name + raise source.error(msg, len(name) + 1) + if not state.checkgroup(gid): + raise source.error("cannot refer to an open group", + len(name) + 1) + state.checklookbehindgroup(gid, source) + subpatternappend((GROUPREF, gid)) + continue + + else: + char = sourceget() + if char is None: + raise source.error("unexpected end of pattern") + raise source.error("unknown extension ?P" + char, + len(char) + 2) + elif char == ":": + # non-capturing group + capture = False + elif char == "#": + # comment + while True: + if source.next is None: + raise source.error("missing ), unterminated comment", + source.tell() - start) + if sourceget() == ")": + break + continue + + elif char in "=!<": + # lookahead assertions + dir = 1 + if char == "<": + char = sourceget() + if char is None: + raise source.error("unexpected end of pattern") + if char not in "=!": + raise source.error("unknown extension ?<" + char, + len(char) + 2) + dir = -1 # lookbehind + lookbehindgroups = state.lookbehindgroups + if lookbehindgroups is None: + state.lookbehindgroups = state.groups + p = _parse_sub(source, state, verbose, nested + 1) + if dir < 0: + if lookbehindgroups is None: + state.lookbehindgroups = None + if not sourcematch(")"): + raise source.error("missing ), unterminated subpattern", + source.tell() - start) + if char == "=": + subpatternappend((ASSERT, (dir, p))) + elif p: + subpatternappend((ASSERT_NOT, (dir, p))) + else: + subpatternappend((FAILURE, ())) + continue + + elif char == "(": + # conditional backreference group + condname = source.getuntil(")", "group name") + if not (condname.isdecimal() and condname.isascii()): + source.checkgroupname(condname, 1) + condgroup = state.groupdict.get(condname) + if condgroup is None: + msg = "unknown group name %r" % condname + raise source.error(msg, len(condname) + 1) + else: + condgroup = int(condname) + if not condgroup: + raise source.error("bad group number", + len(condname) + 1) + if condgroup >= MAXGROUPS: + msg = "invalid group reference %d" % condgroup + raise source.error(msg, len(condname) + 1) + if condgroup not in state.grouprefpos: + state.grouprefpos[condgroup] = ( + source.tell() - len(condname) - 1 + ) + state.checklookbehindgroup(condgroup, source) + item_yes = _parse(source, state, verbose, nested + 1) + if source.match("|"): + item_no = _parse(source, state, verbose, nested + 1) + if source.next == "|": + raise source.error("conditional backref with more than two branches") + else: + item_no = None + if not source.match(")"): + raise source.error("missing ), unterminated subpattern", + source.tell() - start) + subpatternappend((GROUPREF_EXISTS, (condgroup, item_yes, item_no))) + continue + + elif char == ">": + # non-capturing, atomic group + capture = False + atomic = True + elif char in FLAGS or char == "-": + # flags + flags = _parse_flags(source, state, char) + if flags is None: # global flags + if not first or subpattern: + raise source.error('global flags not at the start ' + 'of the expression', + source.tell() - start) + verbose = state.flags & SRE_FLAG_VERBOSE + continue + + add_flags, del_flags = flags + capture = False + else: + raise source.error("unknown extension ?" + char, + len(char) + 1) + + # parse group contents + if capture: + try: + group = state.opengroup(name) + except error as err: + raise source.error(err.msg, len(name) + 1) from None + else: + group = None + sub_verbose = ((verbose or (add_flags & SRE_FLAG_VERBOSE)) and + not (del_flags & SRE_FLAG_VERBOSE)) + p = _parse_sub(source, state, sub_verbose, nested + 1) + if not source.match(")"): + raise source.error("missing ), unterminated subpattern", + source.tell() - start) + if group is not None: + state.closegroup(group, p) + if atomic: + assert group is None + subpatternappend((ATOMIC_GROUP, p)) + else: + subpatternappend((SUBPATTERN, (group, add_flags, del_flags, p))) + + elif this == "^": + subpatternappend((AT, AT_BEGINNING)) + + elif this == "$": + subpatternappend((AT, AT_END)) + + else: + raise AssertionError("unsupported special character %r" % (char,)) + + # unpack non-capturing groups + for i in range(len(subpattern))[::-1]: + op, av = subpattern[i] + if op is SUBPATTERN: + group, add_flags, del_flags, p = av + if group is None and not add_flags and not del_flags: + subpattern[i: i+1] = p + + return subpattern + +def _parse_flags(source, state, char): + sourceget = source.get + add_flags = 0 + del_flags = 0 + if char != "-": + while True: + flag = FLAGS[char] + if source.istext: + if char == 'L': + msg = "bad inline flags: cannot use 'L' flag with a str pattern" + raise source.error(msg) + else: + if char == 'u': + msg = "bad inline flags: cannot use 'u' flag with a bytes pattern" + raise source.error(msg) + add_flags |= flag + if (flag & TYPE_FLAGS) and (add_flags & TYPE_FLAGS) != flag: + msg = "bad inline flags: flags 'a', 'u' and 'L' are incompatible" + raise source.error(msg) + char = sourceget() + if char is None: + raise source.error("missing -, : or )") + if char in ")-:": + break + if char not in FLAGS: + msg = "unknown flag" if char.isalpha() else "missing -, : or )" + raise source.error(msg, len(char)) + if char == ")": + state.flags |= add_flags + return None + if add_flags & GLOBAL_FLAGS: + raise source.error("bad inline flags: cannot turn on global flag", 1) + if char == "-": + char = sourceget() + if char is None: + raise source.error("missing flag") + if char not in FLAGS: + msg = "unknown flag" if char.isalpha() else "missing flag" + raise source.error(msg, len(char)) + while True: + flag = FLAGS[char] + if flag & TYPE_FLAGS: + msg = "bad inline flags: cannot turn off flags 'a', 'u' and 'L'" + raise source.error(msg) + del_flags |= flag + char = sourceget() + if char is None: + raise source.error("missing :") + if char == ":": + break + if char not in FLAGS: + msg = "unknown flag" if char.isalpha() else "missing :" + raise source.error(msg, len(char)) + assert char == ":" + if del_flags & GLOBAL_FLAGS: + raise source.error("bad inline flags: cannot turn off global flag", 1) + if add_flags & del_flags: + raise source.error("bad inline flags: flag turned on and off", 1) + return add_flags, del_flags + +def fix_flags(src, flags): + # Check and fix flags according to the type of pattern (str or bytes) + if isinstance(src, str): + if flags & SRE_FLAG_LOCALE: + raise ValueError("cannot use LOCALE flag with a str pattern") + if not flags & SRE_FLAG_ASCII: + flags |= SRE_FLAG_UNICODE + elif flags & SRE_FLAG_UNICODE: + raise ValueError("ASCII and UNICODE flags are incompatible") + else: + if flags & SRE_FLAG_UNICODE: + raise ValueError("cannot use UNICODE flag with a bytes pattern") + if flags & SRE_FLAG_LOCALE and flags & SRE_FLAG_ASCII: + raise ValueError("ASCII and LOCALE flags are incompatible") + return flags + +def parse(str, flags=0, state=None): + # parse 're' pattern into list of (opcode, argument) tuples + + source = Tokenizer(str) + + if state is None: + state = State() + state.flags = flags + state.str = str + + p = _parse_sub(source, state, flags & SRE_FLAG_VERBOSE, 0) + p.state.flags = fix_flags(str, p.state.flags) + + if source.next is not None: + assert source.next == ")" + raise source.error("unbalanced parenthesis") + + for g in p.state.grouprefpos: + if g >= p.state.groups: + msg = "invalid group reference %d" % g + raise error(msg, str, p.state.grouprefpos[g]) + + if flags & SRE_FLAG_DEBUG: + p.dump() + + return p + +def parse_template(source, pattern): + # parse 're' replacement string into list of literals and + # group references + s = Tokenizer(source) + sget = s.get + result = [] + literal = [] + lappend = literal.append + def addliteral(): + if s.istext: + result.append(''.join(literal)) + else: + # The tokenizer implicitly decodes bytes objects as latin-1, we must + # therefore re-encode the final representation. + result.append(''.join(literal).encode('latin-1')) + del literal[:] + def addgroup(index, pos): + if index > pattern.groups: + raise s.error("invalid group reference %d" % index, pos) + addliteral() + result.append(index) + groupindex = pattern.groupindex + while True: + this = sget() + if this is None: + break # end of replacement string + if this[0] == "\\": + # group + c = this[1] + if c == "g": + if not s.match("<"): + raise s.error("missing <") + name = s.getuntil(">", "group name") + if not (name.isdecimal() and name.isascii()): + s.checkgroupname(name, 1) + try: + index = groupindex[name] + except KeyError: + raise IndexError("unknown group name %r" % name) from None + else: + index = int(name) + if index >= MAXGROUPS: + raise s.error("invalid group reference %d" % index, + len(name) + 1) + addgroup(index, len(name) + 1) + elif c == "0": + if s.next in OCTDIGITS: + this += sget() + if s.next in OCTDIGITS: + this += sget() + lappend(chr(int(this[1:], 8) & 0xff)) + elif c in DIGITS: + isoctal = False + if s.next in DIGITS: + this += sget() + if (c in OCTDIGITS and this[2] in OCTDIGITS and + s.next in OCTDIGITS): + this += sget() + isoctal = True + c = int(this[1:], 8) + if c > 0o377: + raise s.error('octal escape value %s outside of ' + 'range 0-0o377' % this, len(this)) + lappend(chr(c)) + if not isoctal: + addgroup(int(this[1:]), len(this) - 1) + else: + try: + this = chr(ESCAPES[this][1]) + except KeyError: + if c in ASCIILETTERS: + raise s.error('bad escape %s' % this, len(this)) from None + lappend(this) + else: + lappend(this) + addliteral() + return result diff --git a/Python314_4_x86_Template/Lib/reprlib.py b/Python314_4_x86_Template/Lib/reprlib.py new file mode 100644 index 00000000..ab182476 --- /dev/null +++ b/Python314_4_x86_Template/Lib/reprlib.py @@ -0,0 +1,230 @@ +"""Redo the builtin repr() (representation) but with limits on most sizes.""" + +__all__ = ["Repr", "repr", "recursive_repr"] + +import builtins +from itertools import islice +from _thread import get_ident + +def recursive_repr(fillvalue='...'): + 'Decorator to make a repr function return fillvalue for a recursive call' + + def decorating_function(user_function): + repr_running = set() + + def wrapper(self): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + + # Can't use functools.wraps() here because of bootstrap issues + wrapper.__module__ = getattr(user_function, '__module__') + wrapper.__doc__ = getattr(user_function, '__doc__') + wrapper.__name__ = getattr(user_function, '__name__') + wrapper.__qualname__ = getattr(user_function, '__qualname__') + wrapper.__annotate__ = getattr(user_function, '__annotate__', None) + wrapper.__type_params__ = getattr(user_function, '__type_params__', ()) + wrapper.__wrapped__ = user_function + return wrapper + + return decorating_function + +class Repr: + _lookup = { + 'tuple': 'builtins', + 'list': 'builtins', + 'array': 'array', + 'set': 'builtins', + 'frozenset': 'builtins', + 'deque': 'collections', + 'dict': 'builtins', + 'str': 'builtins', + 'int': 'builtins' + } + + def __init__( + self, *, maxlevel=6, maxtuple=6, maxlist=6, maxarray=5, maxdict=4, + maxset=6, maxfrozenset=6, maxdeque=6, maxstring=30, maxlong=40, + maxother=30, fillvalue='...', indent=None, + ): + self.maxlevel = maxlevel + self.maxtuple = maxtuple + self.maxlist = maxlist + self.maxarray = maxarray + self.maxdict = maxdict + self.maxset = maxset + self.maxfrozenset = maxfrozenset + self.maxdeque = maxdeque + self.maxstring = maxstring + self.maxlong = maxlong + self.maxother = maxother + self.fillvalue = fillvalue + self.indent = indent + + def repr(self, x): + return self.repr1(x, self.maxlevel) + + def repr1(self, x, level): + cls = type(x) + typename = cls.__name__ + + if ' ' in typename: + parts = typename.split() + typename = '_'.join(parts) + + method = getattr(self, 'repr_' + typename, None) + if method: + # not defined in this class + if typename not in self._lookup: + return method(x, level) + module = getattr(cls, '__module__', None) + # defined in this class and is the module intended + if module == self._lookup[typename]: + return method(x, level) + + return self.repr_instance(x, level) + + def _join(self, pieces, level): + if self.indent is None: + return ', '.join(pieces) + if not pieces: + return '' + indent = self.indent + if isinstance(indent, int): + if indent < 0: + raise ValueError( + f'Repr.indent cannot be negative int (was {indent!r})' + ) + indent *= ' ' + try: + sep = ',\n' + (self.maxlevel - level + 1) * indent + except TypeError as error: + raise TypeError( + f'Repr.indent must be a str, int or None, not {type(indent)}' + ) from error + return sep.join(('', *pieces, ''))[1:-len(indent) or None] + + def _repr_iterable(self, x, level, left, right, maxiter, trail=''): + n = len(x) + if level <= 0 and n: + s = self.fillvalue + else: + newlevel = level - 1 + repr1 = self.repr1 + pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)] + if n > maxiter: + pieces.append(self.fillvalue) + s = self._join(pieces, level) + if n == 1 and trail and self.indent is None: + right = trail + right + return '%s%s%s' % (left, s, right) + + def repr_tuple(self, x, level): + return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',') + + def repr_list(self, x, level): + return self._repr_iterable(x, level, '[', ']', self.maxlist) + + def repr_array(self, x, level): + if not x: + return "array('%s')" % x.typecode + header = "array('%s', [" % x.typecode + return self._repr_iterable(x, level, header, '])', self.maxarray) + + def repr_set(self, x, level): + if not x: + return 'set()' + x = _possibly_sorted(x) + return self._repr_iterable(x, level, '{', '}', self.maxset) + + def repr_frozenset(self, x, level): + if not x: + return 'frozenset()' + x = _possibly_sorted(x) + return self._repr_iterable(x, level, 'frozenset({', '})', + self.maxfrozenset) + + def repr_deque(self, x, level): + return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque) + + def repr_dict(self, x, level): + n = len(x) + if n == 0: + return '{}' + if level <= 0: + return '{' + self.fillvalue + '}' + newlevel = level - 1 + repr1 = self.repr1 + pieces = [] + for key in islice(_possibly_sorted(x), self.maxdict): + keyrepr = repr1(key, newlevel) + valrepr = repr1(x[key], newlevel) + pieces.append('%s: %s' % (keyrepr, valrepr)) + if n > self.maxdict: + pieces.append(self.fillvalue) + s = self._join(pieces, level) + return '{%s}' % (s,) + + def repr_str(self, x, level): + s = builtins.repr(x[:self.maxstring]) + if len(s) > self.maxstring: + i = max(0, (self.maxstring-3)//2) + j = max(0, self.maxstring-3-i) + s = builtins.repr(x[:i] + x[len(x)-j:]) + s = s[:i] + self.fillvalue + s[len(s)-j:] + return s + + def repr_int(self, x, level): + try: + s = builtins.repr(x) + except ValueError as exc: + assert 'sys.set_int_max_str_digits()' in str(exc) + # Those imports must be deferred due to Python's build system + # where the reprlib module is imported before the math module. + import math, sys + # Integers with more than sys.get_int_max_str_digits() digits + # are rendered differently as their repr() raises a ValueError. + # See https://github.com/python/cpython/issues/135487. + k = 1 + int(math.log10(abs(x))) + # Note: math.log10(abs(x)) may be overestimated or underestimated, + # but for simplicity, we do not compute the exact number of digits. + max_digits = sys.get_int_max_str_digits() + return (f'<{x.__class__.__name__} instance with roughly {k} ' + f'digits (limit at {max_digits}) at 0x{id(x):x}>') + if len(s) > self.maxlong: + i = max(0, (self.maxlong-3)//2) + j = max(0, self.maxlong-3-i) + s = s[:i] + self.fillvalue + s[len(s)-j:] + return s + + def repr_instance(self, x, level): + try: + s = builtins.repr(x) + # Bugs in x.__repr__() can cause arbitrary + # exceptions -- then make up something + except Exception: + return '<%s instance at %#x>' % (x.__class__.__name__, id(x)) + if len(s) > self.maxother: + i = max(0, (self.maxother-3)//2) + j = max(0, self.maxother-3-i) + s = s[:i] + self.fillvalue + s[len(s)-j:] + return s + + +def _possibly_sorted(x): + # Since not all sequences of items can be sorted and comparison + # functions may raise arbitrary exceptions, return an unsorted + # sequence in that case. + try: + return sorted(x) + except Exception: + return list(x) + +aRepr = Repr() +repr = aRepr.repr diff --git a/Python313_13_x86_Template/Lib/rlcompleter.py b/Python314_4_x86_Template/Lib/rlcompleter.py similarity index 100% rename from Python313_13_x86_Template/Lib/rlcompleter.py rename to Python314_4_x86_Template/Lib/rlcompleter.py diff --git a/Python313_13_x86_Template/Lib/runpy.py b/Python314_4_x86_Template/Lib/runpy.py similarity index 100% rename from Python313_13_x86_Template/Lib/runpy.py rename to Python314_4_x86_Template/Lib/runpy.py diff --git a/Python313_13_x86_Template/Lib/sched.py b/Python314_4_x86_Template/Lib/sched.py similarity index 100% rename from Python313_13_x86_Template/Lib/sched.py rename to Python314_4_x86_Template/Lib/sched.py diff --git a/Python313_13_x86_Template/Lib/secrets.py b/Python314_4_x86_Template/Lib/secrets.py similarity index 100% rename from Python313_13_x86_Template/Lib/secrets.py rename to Python314_4_x86_Template/Lib/secrets.py diff --git a/Python313_13_x86_Template/Lib/selectors.py b/Python314_4_x86_Template/Lib/selectors.py similarity index 100% rename from Python313_13_x86_Template/Lib/selectors.py rename to Python314_4_x86_Template/Lib/selectors.py diff --git a/Python313_13_x86_Template/Lib/shelve.py b/Python314_4_x86_Template/Lib/shelve.py similarity index 100% rename from Python313_13_x86_Template/Lib/shelve.py rename to Python314_4_x86_Template/Lib/shelve.py diff --git a/Python314_4_x86_Template/Lib/shlex.py b/Python314_4_x86_Template/Lib/shlex.py new file mode 100644 index 00000000..5959f52d --- /dev/null +++ b/Python314_4_x86_Template/Lib/shlex.py @@ -0,0 +1,351 @@ +"""A lexical analyzer class for simple shell-like syntaxes.""" + +# Module and documentation by Eric S. Raymond, 21 Dec 1998 +# Input stacking and error message cleanup added by ESR, March 2000 +# push_source() and pop_source() made explicit by ESR, January 2001. +# Posix compliance, split(), string arguments, and +# iterator interface by Gustavo Niemeyer, April 2003. +# changes to tokenize more like Posix shells by Vinay Sajip, July 2016. + +import sys +from io import StringIO + +__all__ = ["shlex", "split", "quote", "join"] + +class shlex: + "A lexical analyzer class for simple shell-like syntaxes." + def __init__(self, instream=None, infile=None, posix=False, + punctuation_chars=False): + from collections import deque # deferred import for performance + + if isinstance(instream, str): + instream = StringIO(instream) + if instream is not None: + self.instream = instream + self.infile = infile + else: + self.instream = sys.stdin + self.infile = None + self.posix = posix + if posix: + self.eof = None + else: + self.eof = '' + self.commenters = '#' + self.wordchars = ('abcdfeghijklmnopqrstuvwxyz' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_') + if self.posix: + self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ' + 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ') + self.whitespace = ' \t\r\n' + self.whitespace_split = False + self.quotes = '\'"' + self.escape = '\\' + self.escapedquotes = '"' + self.state = ' ' + self.pushback = deque() + self.lineno = 1 + self.debug = 0 + self.token = '' + self.filestack = deque() + self.source = None + if not punctuation_chars: + punctuation_chars = '' + elif punctuation_chars is True: + punctuation_chars = '();<>|&' + self._punctuation_chars = punctuation_chars + if punctuation_chars: + # _pushback_chars is a push back queue used by lookahead logic + self._pushback_chars = deque() + # these chars added because allowed in file names, args, wildcards + self.wordchars += '~-./*?=' + #remove any punctuation chars from wordchars + t = self.wordchars.maketrans(dict.fromkeys(punctuation_chars)) + self.wordchars = self.wordchars.translate(t) + + @property + def punctuation_chars(self): + return self._punctuation_chars + + def push_token(self, tok): + "Push a token onto the stack popped by the get_token method" + if self.debug >= 1: + print("shlex: pushing token " + repr(tok)) + self.pushback.appendleft(tok) + + def push_source(self, newstream, newfile=None): + "Push an input source onto the lexer's input source stack." + if isinstance(newstream, str): + newstream = StringIO(newstream) + self.filestack.appendleft((self.infile, self.instream, self.lineno)) + self.infile = newfile + self.instream = newstream + self.lineno = 1 + if self.debug: + if newfile is not None: + print('shlex: pushing to file %s' % (self.infile,)) + else: + print('shlex: pushing to stream %s' % (self.instream,)) + + def pop_source(self): + "Pop the input source stack." + self.instream.close() + (self.infile, self.instream, self.lineno) = self.filestack.popleft() + if self.debug: + print('shlex: popping to %s, line %d' \ + % (self.instream, self.lineno)) + self.state = ' ' + + def get_token(self): + "Get a token from the input stream (or from stack if it's nonempty)" + if self.pushback: + tok = self.pushback.popleft() + if self.debug >= 1: + print("shlex: popping token " + repr(tok)) + return tok + # No pushback. Get a token. + raw = self.read_token() + # Handle inclusions + if self.source is not None: + while raw == self.source: + spec = self.sourcehook(self.read_token()) + if spec: + (newfile, newstream) = spec + self.push_source(newstream, newfile) + raw = self.get_token() + # Maybe we got EOF instead? + while raw == self.eof: + if not self.filestack: + return self.eof + else: + self.pop_source() + raw = self.get_token() + # Neither inclusion nor EOF + if self.debug >= 1: + if raw != self.eof: + print("shlex: token=" + repr(raw)) + else: + print("shlex: token=EOF") + return raw + + def read_token(self): + quoted = False + escapedstate = ' ' + while True: + if self.punctuation_chars and self._pushback_chars: + nextchar = self._pushback_chars.pop() + else: + nextchar = self.instream.read(1) + if nextchar == '\n': + self.lineno += 1 + if self.debug >= 3: + print("shlex: in state %r I see character: %r" % (self.state, + nextchar)) + if self.state is None: + self.token = '' # past end of file + break + elif self.state == ' ': + if not nextchar: + self.state = None # end of file + break + elif nextchar in self.whitespace: + if self.debug >= 2: + print("shlex: I see whitespace in whitespace state") + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + elif nextchar in self.commenters: + self.instream.readline() + self.lineno += 1 + elif self.posix and nextchar in self.escape: + escapedstate = 'a' + self.state = nextchar + elif nextchar in self.wordchars: + self.token = nextchar + self.state = 'a' + elif nextchar in self.punctuation_chars: + self.token = nextchar + self.state = 'c' + elif nextchar in self.quotes: + if not self.posix: + self.token = nextchar + self.state = nextchar + elif self.whitespace_split: + self.token = nextchar + self.state = 'a' + else: + self.token = nextchar + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + elif self.state in self.quotes: + quoted = True + if not nextchar: # end of file + if self.debug >= 2: + print("shlex: I see EOF in quotes state") + # XXX what error should be raised here? + raise ValueError("No closing quotation") + if nextchar == self.state: + if not self.posix: + self.token += nextchar + self.state = ' ' + break + else: + self.state = 'a' + elif (self.posix and nextchar in self.escape and self.state + in self.escapedquotes): + escapedstate = self.state + self.state = nextchar + else: + self.token += nextchar + elif self.state in self.escape: + if not nextchar: # end of file + if self.debug >= 2: + print("shlex: I see EOF in escape state") + # XXX what error should be raised here? + raise ValueError("No escaped character") + # In posix shells, only the quote itself or the escape + # character may be escaped within quotes. + if (escapedstate in self.quotes and + nextchar != self.state and nextchar != escapedstate): + self.token += self.state + self.token += nextchar + self.state = escapedstate + elif self.state in ('a', 'c'): + if not nextchar: + self.state = None # end of file + break + elif nextchar in self.whitespace: + if self.debug >= 2: + print("shlex: I see whitespace in word state") + self.state = ' ' + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + elif nextchar in self.commenters: + self.instream.readline() + self.lineno += 1 + if self.posix: + self.state = ' ' + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + elif self.state == 'c': + if nextchar in self.punctuation_chars: + self.token += nextchar + else: + if nextchar not in self.whitespace: + self._pushback_chars.append(nextchar) + self.state = ' ' + break + elif self.posix and nextchar in self.quotes: + self.state = nextchar + elif self.posix and nextchar in self.escape: + escapedstate = 'a' + self.state = nextchar + elif (nextchar in self.wordchars or nextchar in self.quotes + or (self.whitespace_split and + nextchar not in self.punctuation_chars)): + self.token += nextchar + else: + if self.punctuation_chars: + self._pushback_chars.append(nextchar) + else: + self.pushback.appendleft(nextchar) + if self.debug >= 2: + print("shlex: I see punctuation in word state") + self.state = ' ' + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + result = self.token + self.token = '' + if self.posix and not quoted and result == '': + result = None + if self.debug > 1: + if result: + print("shlex: raw token=" + repr(result)) + else: + print("shlex: raw token=EOF") + return result + + def sourcehook(self, newfile): + "Hook called on a filename to be sourced." + import os.path + if newfile[0] == '"': + newfile = newfile[1:-1] + # This implements cpp-like semantics for relative-path inclusion. + if isinstance(self.infile, str) and not os.path.isabs(newfile): + newfile = os.path.join(os.path.dirname(self.infile), newfile) + return (newfile, open(newfile, "r")) + + def error_leader(self, infile=None, lineno=None): + "Emit a C-compiler-like, Emacs-friendly error-message leader." + if infile is None: + infile = self.infile + if lineno is None: + lineno = self.lineno + return "\"%s\", line %d: " % (infile, lineno) + + def __iter__(self): + return self + + def __next__(self): + token = self.get_token() + if token == self.eof: + raise StopIteration + return token + +def split(s, comments=False, posix=True): + """Split the string *s* using shell-like syntax.""" + if s is None: + raise ValueError("s argument must not be None") + lex = shlex(s, posix=posix) + lex.whitespace_split = True + if not comments: + lex.commenters = '' + return list(lex) + + +def join(split_command): + """Return a shell-escaped string from *split_command*.""" + return ' '.join(quote(arg) for arg in split_command) + + +def quote(s): + """Return a shell-escaped version of the string *s*.""" + if not s: + return "''" + + if not isinstance(s, str): + raise TypeError(f"expected string object, got {type(s).__name__!r}") + + # Use bytes.translate() for performance + safe_chars = (b'%+,-./0123456789:=@' + b'ABCDEFGHIJKLMNOPQRSTUVWXYZ_' + b'abcdefghijklmnopqrstuvwxyz') + # No quoting is needed if `s` is an ASCII string consisting only of `safe_chars` + if s.isascii() and not s.encode().translate(None, delete=safe_chars): + return s + + # use single quotes, and put single quotes into double quotes + # the string $'b is then quoted as '$'"'"'b' + return "'" + s.replace("'", "'\"'\"'") + "'" + + +def _print_tokens(lexer): + while tt := lexer.get_token(): + print("Token: " + repr(tt)) + +if __name__ == '__main__': + if len(sys.argv) == 1: + _print_tokens(shlex()) + else: + fn = sys.argv[1] + with open(fn) as f: + _print_tokens(shlex(f, fn)) diff --git a/Python314_4_x86_Template/Lib/shutil.py b/Python314_4_x86_Template/Lib/shutil.py new file mode 100644 index 00000000..8d8fe145 --- /dev/null +++ b/Python314_4_x86_Template/Lib/shutil.py @@ -0,0 +1,1667 @@ +"""Utility functions for copying and archiving files and directory trees. + +XXX The functions here don't copy the resource fork or other metadata on Mac. + +""" + +import os +import sys +import stat +import fnmatch +import collections +import errno + +try: + import zlib + del zlib + _ZLIB_SUPPORTED = True +except ImportError: + _ZLIB_SUPPORTED = False + +try: + import bz2 + del bz2 + _BZ2_SUPPORTED = True +except ImportError: + _BZ2_SUPPORTED = False + +try: + import lzma + del lzma + _LZMA_SUPPORTED = True +except ImportError: + _LZMA_SUPPORTED = False + +try: + from compression import zstd + del zstd + _ZSTD_SUPPORTED = True +except ImportError: + _ZSTD_SUPPORTED = False + +_WINDOWS = os.name == 'nt' +posix = nt = None +if os.name == 'posix': + import posix +elif _WINDOWS: + import nt + +if sys.platform == 'win32': + import _winapi +else: + _winapi = None + +COPY_BUFSIZE = 1024 * 1024 if _WINDOWS else 256 * 1024 +# This should never be removed, see rationale in: +# https://bugs.python.org/issue43743#msg393429 +_USE_CP_SENDFILE = (hasattr(os, "sendfile") + and sys.platform.startswith(("linux", "android", "sunos"))) +_USE_CP_COPY_FILE_RANGE = hasattr(os, "copy_file_range") +_HAS_FCOPYFILE = posix and hasattr(posix, "_fcopyfile") # macOS + +# CMD defaults in Windows 10 +_WIN_DEFAULT_PATHEXT = ".COM;.EXE;.BAT;.CMD;.VBS;.JS;.WS;.MSC" + +__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", + "copytree", "move", "rmtree", "Error", "SpecialFileError", + "make_archive", "get_archive_formats", + "register_archive_format", "unregister_archive_format", + "get_unpack_formats", "register_unpack_format", + "unregister_unpack_format", "unpack_archive", + "ignore_patterns", "chown", "which", "get_terminal_size", + "SameFileError"] + # disk_usage is added later, if available on the platform + +class Error(OSError): + pass + +class SameFileError(Error): + """Raised when source and destination are the same file.""" + +class SpecialFileError(OSError): + """Raised when trying to do a kind of operation (e.g. copying) which is + not supported on a special file (e.g. a named pipe)""" + + +class ReadError(OSError): + """Raised when an archive cannot be read""" + +class RegistryError(Exception): + """Raised when a registry operation with the archiving + and unpacking registries fails""" + +class _GiveupOnFastCopy(Exception): + """Raised as a signal to fallback on using raw read()/write() + file copy when fast-copy functions fail to do so. + """ + +def _fastcopy_fcopyfile(fsrc, fdst, flags): + """Copy a regular file content or metadata by using high-performance + fcopyfile(3) syscall (macOS). + """ + try: + infd = fsrc.fileno() + outfd = fdst.fileno() + except Exception as err: + raise _GiveupOnFastCopy(err) # not a regular file + + try: + posix._fcopyfile(infd, outfd, flags) + except OSError as err: + err.filename = fsrc.name + err.filename2 = fdst.name + if err.errno in {errno.EINVAL, errno.ENOTSUP}: + raise _GiveupOnFastCopy(err) + else: + raise err from None + +def _determine_linux_fastcopy_blocksize(infd): + """Determine blocksize for fastcopying on Linux. + + Hopefully the whole file will be copied in a single call. + The copying itself should be performed in a loop 'till EOF is + reached (0 return) so a blocksize smaller or bigger than the actual + file size should not make any difference, also in case the file + content changes while being copied. + """ + try: + blocksize = max(os.fstat(infd).st_size, 2 ** 23) # min 8 MiB + except OSError: + blocksize = 2 ** 27 # 128 MiB + # On 32-bit architectures truncate to 1 GiB to avoid OverflowError, + # see gh-82500. + if sys.maxsize < 2 ** 32: + blocksize = min(blocksize, 2 ** 30) + return blocksize + +def _fastcopy_copy_file_range(fsrc, fdst): + """Copy data from one regular mmap-like fd to another by using + a high-performance copy_file_range(2) syscall that gives filesystems + an opportunity to implement the use of reflinks or server-side copy. + + This should work on Linux >= 4.5 only. + """ + try: + infd = fsrc.fileno() + outfd = fdst.fileno() + except Exception as err: + raise _GiveupOnFastCopy(err) # not a regular file + + blocksize = _determine_linux_fastcopy_blocksize(infd) + offset = 0 + while True: + try: + n_copied = os.copy_file_range(infd, outfd, blocksize, offset_dst=offset) + except OSError as err: + # ...in oder to have a more informative exception. + err.filename = fsrc.name + err.filename2 = fdst.name + + if err.errno == errno.ENOSPC: # filesystem is full + raise err from None + + # Give up on first call and if no data was copied. + if offset == 0 and os.lseek(outfd, 0, os.SEEK_CUR) == 0: + raise _GiveupOnFastCopy(err) + + raise err + else: + if n_copied == 0: + # If no bytes have been copied yet, copy_file_range + # might silently fail. + # https://lore.kernel.org/linux-fsdevel/20210126233840.GG4626@dread.disaster.area/T/#m05753578c7f7882f6e9ffe01f981bc223edef2b0 + if offset == 0: + raise _GiveupOnFastCopy() + break + offset += n_copied + +def _fastcopy_sendfile(fsrc, fdst): + """Copy data from one regular mmap-like fd to another by using + high-performance sendfile(2) syscall. + This should work on Linux >= 2.6.33, Android and Solaris. + """ + # Note: copyfileobj() is left alone in order to not introduce any + # unexpected breakage. Possible risks by using zero-copy calls + # in copyfileobj() are: + # - fdst cannot be open in "a"(ppend) mode + # - fsrc and fdst may be open in "t"(ext) mode + # - fsrc may be a BufferedReader (which hides unread data in a buffer), + # GzipFile (which decompresses data), HTTPResponse (which decodes + # chunks). + # - possibly others (e.g. encrypted fs/partition?) + global _USE_CP_SENDFILE + try: + infd = fsrc.fileno() + outfd = fdst.fileno() + except Exception as err: + raise _GiveupOnFastCopy(err) # not a regular file + + blocksize = _determine_linux_fastcopy_blocksize(infd) + offset = 0 + while True: + try: + sent = os.sendfile(outfd, infd, offset, blocksize) + except OSError as err: + # ...in order to have a more informative exception. + err.filename = fsrc.name + err.filename2 = fdst.name + + if err.errno == errno.ENOTSOCK: + # sendfile() on this platform (probably Linux < 2.6.33) + # does not support copies between regular files (only + # sockets). + _USE_CP_SENDFILE = False + raise _GiveupOnFastCopy(err) + + if err.errno == errno.ENOSPC: # filesystem is full + raise err from None + + # Give up on first call and if no data was copied. + if offset == 0 and os.lseek(outfd, 0, os.SEEK_CUR) == 0: + raise _GiveupOnFastCopy(err) + + raise err + else: + if sent == 0: + break # EOF + offset += sent + +def _copyfileobj_readinto(fsrc, fdst, length=COPY_BUFSIZE): + """readinto()/memoryview() based variant of copyfileobj(). + *fsrc* must support readinto() method and both files must be + open in binary mode. + """ + # Localize variable access to minimize overhead. + fsrc_readinto = fsrc.readinto + fdst_write = fdst.write + with memoryview(bytearray(length)) as mv: + while True: + n = fsrc_readinto(mv) + if not n: + break + elif n < length: + with mv[:n] as smv: + fdst_write(smv) + break + else: + fdst_write(mv) + +def copyfileobj(fsrc, fdst, length=0): + """copy data from file-like object fsrc to file-like object fdst""" + if not length: + length = COPY_BUFSIZE + # Localize variable access to minimize overhead. + fsrc_read = fsrc.read + fdst_write = fdst.write + while buf := fsrc_read(length): + fdst_write(buf) + +def _samefile(src, dst): + # Macintosh, Unix. + if isinstance(src, os.DirEntry) and hasattr(os.path, 'samestat'): + try: + return os.path.samestat(src.stat(), os.stat(dst)) + except OSError: + return False + + if hasattr(os.path, 'samefile'): + try: + return os.path.samefile(src, dst) + except OSError: + return False + + # All other platforms: check for same pathname. + return (os.path.normcase(os.path.abspath(src)) == + os.path.normcase(os.path.abspath(dst))) + +def _stat(fn): + return fn.stat() if isinstance(fn, os.DirEntry) else os.stat(fn) + +def _islink(fn): + return fn.is_symlink() if isinstance(fn, os.DirEntry) else os.path.islink(fn) + +def copyfile(src, dst, *, follow_symlinks=True): + """Copy data from src to dst in the most efficient way possible. + + If follow_symlinks is not set and src is a symbolic link, a new + symlink will be created instead of copying the file it points to. + + """ + sys.audit("shutil.copyfile", src, dst) + + if _samefile(src, dst): + raise SameFileError("{!r} and {!r} are the same file".format(src, dst)) + + file_size = 0 + for i, fn in enumerate([src, dst]): + try: + st = _stat(fn) + except OSError: + # File most likely does not exist + pass + else: + # XXX What about other special files? (sockets, devices...) + if stat.S_ISFIFO(st.st_mode): + fn = fn.path if isinstance(fn, os.DirEntry) else fn + raise SpecialFileError("`%s` is a named pipe" % fn) + if _WINDOWS and i == 0: + file_size = st.st_size + + if not follow_symlinks and _islink(src): + os.symlink(os.readlink(src), dst) + else: + with open(src, 'rb') as fsrc: + try: + with open(dst, 'wb') as fdst: + # macOS + if _HAS_FCOPYFILE: + try: + _fastcopy_fcopyfile(fsrc, fdst, posix._COPYFILE_DATA) + return dst + except _GiveupOnFastCopy: + pass + # Linux / Android / Solaris + elif _USE_CP_SENDFILE or _USE_CP_COPY_FILE_RANGE: + # reflink may be implicit in copy_file_range. + if _USE_CP_COPY_FILE_RANGE: + try: + _fastcopy_copy_file_range(fsrc, fdst) + return dst + except _GiveupOnFastCopy: + pass + if _USE_CP_SENDFILE: + try: + _fastcopy_sendfile(fsrc, fdst) + return dst + except _GiveupOnFastCopy: + pass + # Windows, see: + # https://github.com/python/cpython/pull/7160#discussion_r195405230 + elif _WINDOWS and file_size > 0: + _copyfileobj_readinto(fsrc, fdst, min(file_size, COPY_BUFSIZE)) + return dst + + copyfileobj(fsrc, fdst) + + # Issue 43219, raise a less confusing exception + except IsADirectoryError as e: + if not os.path.exists(dst): + raise FileNotFoundError(f'Directory does not exist: {dst}') from e + else: + raise + + return dst + +def copymode(src, dst, *, follow_symlinks=True): + """Copy mode bits from src to dst. + + If follow_symlinks is not set, symlinks aren't followed if and only + if both `src` and `dst` are symlinks. If `lchmod` isn't available + (e.g. Linux) this method does nothing. + + """ + sys.audit("shutil.copymode", src, dst) + + if not follow_symlinks and _islink(src) and os.path.islink(dst): + if hasattr(os, 'lchmod'): + stat_func, chmod_func = os.lstat, os.lchmod + else: + return + else: + stat_func = _stat + if os.name == 'nt' and os.path.islink(dst): + def chmod_func(*args): + os.chmod(*args, follow_symlinks=True) + else: + chmod_func = os.chmod + + st = stat_func(src) + chmod_func(dst, stat.S_IMODE(st.st_mode)) + +if hasattr(os, 'listxattr'): + def _copyxattr(src, dst, *, follow_symlinks=True): + """Copy extended filesystem attributes from `src` to `dst`. + + Overwrite existing attributes. + + If `follow_symlinks` is false, symlinks won't be followed. + + """ + + try: + names = os.listxattr(src, follow_symlinks=follow_symlinks) + except OSError as e: + if e.errno not in (errno.ENOTSUP, errno.ENODATA, errno.EINVAL): + raise + return + for name in names: + try: + value = os.getxattr(src, name, follow_symlinks=follow_symlinks) + os.setxattr(dst, name, value, follow_symlinks=follow_symlinks) + except OSError as e: + if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA, + errno.EINVAL, errno.EACCES): + raise +else: + def _copyxattr(*args, **kwargs): + pass + +def copystat(src, dst, *, follow_symlinks=True): + """Copy file metadata + + Copy the permission bits, last access time, last modification time, and + flags from `src` to `dst`. On Linux, copystat() also copies the "extended + attributes" where possible. The file contents, owner, and group are + unaffected. `src` and `dst` are path-like objects or path names given as + strings. + + If the optional flag `follow_symlinks` is not set, symlinks aren't + followed if and only if both `src` and `dst` are symlinks. + """ + sys.audit("shutil.copystat", src, dst) + + def _nop(*args, ns=None, follow_symlinks=None): + pass + + # follow symlinks (aka don't not follow symlinks) + follow = follow_symlinks or not (_islink(src) and os.path.islink(dst)) + if follow: + # use the real function if it exists + def lookup(name): + return getattr(os, name, _nop) + else: + # use the real function only if it exists + # *and* it supports follow_symlinks + def lookup(name): + fn = getattr(os, name, _nop) + if fn in os.supports_follow_symlinks: + return fn + return _nop + + if isinstance(src, os.DirEntry): + st = src.stat(follow_symlinks=follow) + else: + st = lookup("stat")(src, follow_symlinks=follow) + mode = stat.S_IMODE(st.st_mode) + lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns), + follow_symlinks=follow) + # We must copy extended attributes before the file is (potentially) + # chmod()'ed read-only, otherwise setxattr() will error with -EACCES. + _copyxattr(src, dst, follow_symlinks=follow) + try: + lookup("chmod")(dst, mode, follow_symlinks=follow) + except NotImplementedError: + # if we got a NotImplementedError, it's because + # * follow_symlinks=False, + # * lchown() is unavailable, and + # * either + # * fchownat() is unavailable or + # * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW. + # (it returned ENOSUP.) + # therefore we're out of options--we simply cannot chown the + # symlink. give up, suppress the error. + # (which is what shutil always did in this circumstance.) + pass + if hasattr(st, 'st_flags'): + try: + lookup("chflags")(dst, st.st_flags, follow_symlinks=follow) + except OSError as why: + for err in 'EOPNOTSUPP', 'ENOTSUP': + if hasattr(errno, err) and why.errno == getattr(errno, err): + break + else: + raise + +def copy(src, dst, *, follow_symlinks=True): + """Copy data and mode bits ("cp src dst"). Return the file's destination. + + The destination may be a directory. + + If follow_symlinks is false, symlinks won't be followed. This + resembles GNU's "cp -P src dst". + + If source and destination are the same file, a SameFileError will be + raised. + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst, follow_symlinks=follow_symlinks) + copymode(src, dst, follow_symlinks=follow_symlinks) + return dst + +def copy2(src, dst, *, follow_symlinks=True): + """Copy data and metadata. Return the file's destination. + + Metadata is copied with copystat(). Please see the copystat function + for more information. + + The destination may be a directory. + + If follow_symlinks is false, symlinks won't be followed. This + resembles GNU's "cp -P src dst". + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + + if hasattr(_winapi, "CopyFile2"): + src_ = os.fsdecode(src) + dst_ = os.fsdecode(dst) + flags = _winapi.COPY_FILE_ALLOW_DECRYPTED_DESTINATION # for compat + if not follow_symlinks: + flags |= _winapi.COPY_FILE_COPY_SYMLINK + try: + _winapi.CopyFile2(src_, dst_, flags) + return dst + except OSError as exc: + if (exc.winerror == _winapi.ERROR_PRIVILEGE_NOT_HELD + and not follow_symlinks): + # Likely encountered a symlink we aren't allowed to create. + # Fall back on the old code + pass + elif exc.winerror == _winapi.ERROR_ACCESS_DENIED: + # Possibly encountered a hidden or readonly file we can't + # overwrite. Fall back on old code + pass + else: + raise + + copyfile(src, dst, follow_symlinks=follow_symlinks) + copystat(src, dst, follow_symlinks=follow_symlinks) + return dst + +def ignore_patterns(*patterns): + """Function that can be used as copytree() ignore parameter. + + Patterns is a sequence of glob-style patterns + that are used to exclude files""" + def _ignore_patterns(path, names): + ignored_names = [] + for pattern in patterns: + ignored_names.extend(fnmatch.filter(names, pattern)) + return set(ignored_names) + return _ignore_patterns + +def _copytree(entries, src, dst, symlinks, ignore, copy_function, + ignore_dangling_symlinks, dirs_exist_ok=False): + if ignore is not None: + ignored_names = ignore(os.fspath(src), [x.name for x in entries]) + else: + ignored_names = () + + os.makedirs(dst, exist_ok=dirs_exist_ok) + errors = [] + use_srcentry = copy_function is copy2 or copy_function is copy + + for srcentry in entries: + if srcentry.name in ignored_names: + continue + srcname = os.path.join(src, srcentry.name) + dstname = os.path.join(dst, srcentry.name) + srcobj = srcentry if use_srcentry else srcname + try: + is_symlink = srcentry.is_symlink() + if is_symlink and os.name == 'nt': + # Special check for directory junctions, which appear as + # symlinks but we want to recurse. + lstat = srcentry.stat(follow_symlinks=False) + if lstat.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT: + is_symlink = False + if is_symlink: + linkto = os.readlink(srcname) + if symlinks: + # We can't just leave it to `copy_function` because legacy + # code with a custom `copy_function` may rely on copytree + # doing the right thing. + os.symlink(linkto, dstname) + copystat(srcobj, dstname, follow_symlinks=not symlinks) + else: + # ignore dangling symlink if the flag is on + if not os.path.exists(linkto) and ignore_dangling_symlinks: + continue + # otherwise let the copy occur. copy2 will raise an error + if srcentry.is_dir(): + copytree(srcobj, dstname, symlinks, ignore, + copy_function, ignore_dangling_symlinks, + dirs_exist_ok) + else: + copy_function(srcobj, dstname) + elif srcentry.is_dir(): + copytree(srcobj, dstname, symlinks, ignore, copy_function, + ignore_dangling_symlinks, dirs_exist_ok) + else: + # Will raise a SpecialFileError for unsupported file types + copy_function(srcobj, dstname) + # catch the Error from the recursive copytree so that we can + # continue with other files + except Error as err: + errors.extend(err.args[0]) + except OSError as why: + errors.append((srcname, dstname, str(why))) + try: + copystat(src, dst) + except OSError as why: + # Copying file access times may fail on Windows + if getattr(why, 'winerror', None) is None: + errors.append((src, dst, str(why))) + if errors: + raise Error(errors) + return dst + +def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, + ignore_dangling_symlinks=False, dirs_exist_ok=False): + """Recursively copy a directory tree and return the destination directory. + + If exception(s) occur, an Error is raised with a list of reasons. + + If the optional symlinks flag is true, symbolic links in the + source tree result in symbolic links in the destination tree; if + it is false, the contents of the files pointed to by symbolic + links are copied. If the file pointed to by the symlink doesn't + exist, an exception will be added in the list of errors raised in + an Error exception at the end of the copy process. + + You can set the optional ignore_dangling_symlinks flag to true if you + want to silence this exception. Notice that this has no effect on + platforms that don't support os.symlink. + + The optional ignore argument is a callable. If given, it + is called with the `src` parameter, which is the directory + being visited by copytree(), and `names` which is the list of + `src` contents, as returned by os.listdir(): + + callable(src, names) -> ignored_names + + Since copytree() is called recursively, the callable will be + called once for each directory that is copied. It returns a + list of names relative to the `src` directory that should + not be copied. + + The optional copy_function argument is a callable that will be used + to copy each file. It will be called with the source path and the + destination path as arguments. By default, copy2() is used, but any + function that supports the same signature (like copy()) can be used. + + If dirs_exist_ok is false (the default) and `dst` already exists, a + `FileExistsError` is raised. If `dirs_exist_ok` is true, the copying + operation will continue if it encounters existing directories, and files + within the `dst` tree will be overwritten by corresponding files from the + `src` tree. + """ + sys.audit("shutil.copytree", src, dst) + with os.scandir(src) as itr: + entries = list(itr) + return _copytree(entries=entries, src=src, dst=dst, symlinks=symlinks, + ignore=ignore, copy_function=copy_function, + ignore_dangling_symlinks=ignore_dangling_symlinks, + dirs_exist_ok=dirs_exist_ok) + +if hasattr(os.stat_result, 'st_file_attributes'): + def _rmtree_islink(st): + return (stat.S_ISLNK(st.st_mode) or + (st.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT + and st.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT)) +else: + def _rmtree_islink(st): + return stat.S_ISLNK(st.st_mode) + +# version vulnerable to race conditions +def _rmtree_unsafe(path, dir_fd, onexc): + if dir_fd is not None: + raise NotImplementedError("dir_fd unavailable on this platform") + try: + st = os.lstat(path) + except OSError as err: + onexc(os.lstat, path, err) + return + try: + if _rmtree_islink(st): + # symlinks to directories are forbidden, see bug #1669 + raise OSError("Cannot call rmtree on a symbolic link") + except OSError as err: + onexc(os.path.islink, path, err) + # can't continue even if onexc hook returns + return + def onerror(err): + if not isinstance(err, FileNotFoundError): + onexc(os.scandir, err.filename, err) + results = os.walk(path, topdown=False, onerror=onerror, followlinks=os._walk_symlinks_as_files) + for dirpath, dirnames, filenames in results: + for name in dirnames: + fullname = os.path.join(dirpath, name) + try: + os.rmdir(fullname) + except FileNotFoundError: + continue + except OSError as err: + onexc(os.rmdir, fullname, err) + for name in filenames: + fullname = os.path.join(dirpath, name) + try: + os.unlink(fullname) + except FileNotFoundError: + continue + except OSError as err: + onexc(os.unlink, fullname, err) + try: + os.rmdir(path) + except FileNotFoundError: + pass + except OSError as err: + onexc(os.rmdir, path, err) + +# Version using fd-based APIs to protect against races +def _rmtree_safe_fd(path, dir_fd, onexc): + # While the unsafe rmtree works fine on bytes, the fd based does not. + if isinstance(path, bytes): + path = os.fsdecode(path) + stack = [(os.lstat, dir_fd, path, None)] + try: + while stack: + _rmtree_safe_fd_step(stack, onexc) + finally: + # Close any file descriptors still on the stack. + while stack: + func, fd, path, entry = stack.pop() + if func is not os.close: + continue + try: + os.close(fd) + except OSError as err: + onexc(os.close, path, err) + +def _rmtree_safe_fd_step(stack, onexc): + # Each stack item has four elements: + # * func: The first operation to perform: os.lstat, os.close or os.rmdir. + # Walking a directory starts with an os.lstat() to detect symlinks; in + # this case, func is updated before subsequent operations and passed to + # onexc() if an error occurs. + # * dirfd: Open file descriptor, or None if we're processing the top-level + # directory given to rmtree() and the user didn't supply dir_fd. + # * path: Path of file to operate upon. This is passed to onexc() if an + # error occurs. + # * orig_entry: os.DirEntry, or None if we're processing the top-level + # directory given to rmtree(). We used the cached stat() of the entry to + # save a call to os.lstat() when walking subdirectories. + func, dirfd, path, orig_entry = stack.pop() + name = path if orig_entry is None else orig_entry.name + try: + if func is os.close: + os.close(dirfd) + return + if func is os.rmdir: + os.rmdir(name, dir_fd=dirfd) + return + + # Note: To guard against symlink races, we use the standard + # lstat()/open()/fstat() trick. + assert func is os.lstat + if orig_entry is None: + orig_st = os.lstat(name, dir_fd=dirfd) + else: + orig_st = orig_entry.stat(follow_symlinks=False) + + func = os.open # For error reporting. + topfd = os.open(name, os.O_RDONLY | os.O_NONBLOCK, dir_fd=dirfd) + + func = os.path.islink # For error reporting. + try: + if not os.path.samestat(orig_st, os.fstat(topfd)): + # Symlinks to directories are forbidden, see GH-46010. + raise OSError("Cannot call rmtree on a symbolic link") + stack.append((os.rmdir, dirfd, path, orig_entry)) + finally: + stack.append((os.close, topfd, path, orig_entry)) + + func = os.scandir # For error reporting. + with os.scandir(topfd) as scandir_it: + entries = list(scandir_it) + for entry in entries: + fullname = os.path.join(path, entry.name) + try: + if entry.is_dir(follow_symlinks=False): + # Traverse into sub-directory. + stack.append((os.lstat, topfd, fullname, entry)) + continue + except FileNotFoundError: + continue + except OSError: + pass + try: + os.unlink(entry.name, dir_fd=topfd) + except FileNotFoundError: + continue + except OSError as err: + onexc(os.unlink, fullname, err) + except FileNotFoundError as err: + if orig_entry is None or func is os.close: + err.filename = path + onexc(func, path, err) + except OSError as err: + err.filename = path + onexc(func, path, err) + +_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <= + os.supports_dir_fd and + os.scandir in os.supports_fd and + os.stat in os.supports_follow_symlinks) +_rmtree_impl = _rmtree_safe_fd if _use_fd_functions else _rmtree_unsafe + +def rmtree(path, ignore_errors=False, onerror=None, *, onexc=None, dir_fd=None): + """Recursively delete a directory tree. + + If dir_fd is not None, it should be a file descriptor open to a directory; + path will then be relative to that directory. + dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError. + + If ignore_errors is set, errors are ignored; otherwise, if onexc or + onerror is set, it is called to handle the error with arguments (func, + path, exc_info) where func is platform and implementation dependent; + path is the argument to that function that caused it to fail; and + the value of exc_info describes the exception. For onexc it is the + exception instance, and for onerror it is a tuple as returned by + sys.exc_info(). If ignore_errors is false and both onexc and + onerror are None, the exception is reraised. + + onerror is deprecated and only remains for backwards compatibility. + If both onerror and onexc are set, onerror is ignored and onexc is used. + """ + + sys.audit("shutil.rmtree", path, dir_fd) + if ignore_errors: + def onexc(*args): + pass + elif onerror is None and onexc is None: + def onexc(*args): + raise + elif onexc is None: + if onerror is None: + def onexc(*args): + raise + else: + # delegate to onerror + def onexc(*args): + func, path, exc = args + if exc is None: + exc_info = None, None, None + else: + exc_info = type(exc), exc, exc.__traceback__ + return onerror(func, path, exc_info) + + _rmtree_impl(path, dir_fd, onexc) + +# Allow introspection of whether or not the hardening against symlink +# attacks is supported on the current platform +rmtree.avoids_symlink_attacks = _use_fd_functions + +def _basename(path): + """A basename() variant which first strips the trailing slash, if present. + Thus we always get the last component of the path, even for directories. + + path: Union[PathLike, str] + + e.g. + >>> os.path.basename('/bar/foo') + 'foo' + >>> os.path.basename('/bar/foo/') + '' + >>> _basename('/bar/foo/') + 'foo' + """ + path = os.fspath(path) + sep = os.path.sep + (os.path.altsep or '') + return os.path.basename(path.rstrip(sep)) + +def move(src, dst, copy_function=copy2): + """Recursively move a file or directory to another location. This is + similar to the Unix "mv" command. Return the file or directory's + destination. + + If dst is an existing directory or a symlink to a directory, then src is + moved inside that directory. The destination path in that directory must + not already exist. + + If dst already exists but is not a directory, it may be overwritten + depending on os.rename() semantics. + + If the destination is on our current filesystem, then rename() is used. + Otherwise, src is copied to the destination and then removed. Symlinks are + recreated under the new name if os.rename() fails because of cross + filesystem renames. + + The optional `copy_function` argument is a callable that will be used + to copy the source or it will be delegated to `copytree`. + By default, copy2() is used, but any function that supports the same + signature (like copy()) can be used. + + A lot more could be done here... A look at a mv.c shows a lot of + the issues this implementation glosses over. + + """ + sys.audit("shutil.move", src, dst) + real_dst = dst + if os.path.isdir(dst): + if _samefile(src, dst) and not os.path.islink(src): + # We might be on a case insensitive filesystem, + # perform the rename anyway. + os.rename(src, dst) + return + + # Using _basename instead of os.path.basename is important, as we must + # ignore any trailing slash to avoid the basename returning '' + real_dst = os.path.join(dst, _basename(src)) + + if os.path.exists(real_dst): + raise Error("Destination path '%s' already exists" % real_dst) + try: + os.rename(src, real_dst) + except OSError: + if os.path.islink(src): + linkto = os.readlink(src) + os.symlink(linkto, real_dst) + os.unlink(src) + elif os.path.isdir(src): + if _destinsrc(src, dst): + raise Error("Cannot move a directory '%s' into itself" + " '%s'." % (src, dst)) + if (_is_immutable(src) + or (not os.access(src, os.W_OK) and os.listdir(src) + and sys.platform == 'darwin')): + raise PermissionError("Cannot move the non-empty directory " + "'%s': Lacking write permission to '%s'." + % (src, src)) + copytree(src, real_dst, copy_function=copy_function, + symlinks=True) + rmtree(src) + else: + copy_function(src, real_dst) + os.unlink(src) + return real_dst + +def _destinsrc(src, dst): + src = os.path.abspath(src) + dst = os.path.abspath(dst) + if not src.endswith(os.path.sep): + src += os.path.sep + if not dst.endswith(os.path.sep): + dst += os.path.sep + return dst.startswith(src) + +def _is_immutable(src): + st = _stat(src) + immutable_states = [stat.UF_IMMUTABLE, stat.SF_IMMUTABLE] + return hasattr(st, 'st_flags') and st.st_flags in immutable_states + +def _get_gid(name): + """Returns a gid, given a group name.""" + if name is None: + return None + + try: + from grp import getgrnam + except ImportError: + return None + + try: + result = getgrnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _get_uid(name): + """Returns an uid, given a user name.""" + if name is None: + return None + + try: + from pwd import getpwnam + except ImportError: + return None + + try: + result = getpwnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, + owner=None, group=None, logger=None, root_dir=None): + """Create a (possibly compressed) tar file from all the files under + 'base_dir'. + + 'compress' must be "gzip" (the default), "bzip2", "xz", "zst", or None. + + 'owner' and 'group' can be used to define an owner and a group for the + archive that is being built. If not provided, the current owner and group + will be used. + + The output tar file will be named 'base_name' + ".tar", possibly plus + the appropriate compression extension (".gz", ".bz2", ".xz", or ".zst"). + + Returns the output filename. + """ + if compress is None: + tar_compression = '' + elif _ZLIB_SUPPORTED and compress == 'gzip': + tar_compression = 'gz' + elif _BZ2_SUPPORTED and compress == 'bzip2': + tar_compression = 'bz2' + elif _LZMA_SUPPORTED and compress == 'xz': + tar_compression = 'xz' + elif _ZSTD_SUPPORTED and compress == 'zst': + tar_compression = 'zst' + else: + raise ValueError("bad value for 'compress', or compression format not " + "supported : {0}".format(compress)) + + import tarfile # late import for breaking circular dependency + + compress_ext = '.' + tar_compression if compress else '' + archive_name = base_name + '.tar' + compress_ext + archive_dir = os.path.dirname(archive_name) + + if archive_dir and not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # creating the tarball + if logger is not None: + logger.info('Creating tar archive') + + uid = _get_uid(owner) + gid = _get_gid(group) + + def _set_uid_gid(tarinfo): + if gid is not None: + tarinfo.gid = gid + tarinfo.gname = group + if uid is not None: + tarinfo.uid = uid + tarinfo.uname = owner + return tarinfo + + if not dry_run: + tar = tarfile.open(archive_name, 'w|%s' % tar_compression) + arcname = base_dir + if root_dir is not None: + base_dir = os.path.join(root_dir, base_dir) + try: + tar.add(base_dir, arcname, filter=_set_uid_gid) + finally: + tar.close() + + if root_dir is not None: + archive_name = os.path.abspath(archive_name) + return archive_name + +def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, + logger=None, owner=None, group=None, root_dir=None): + """Create a zip file from all the files under 'base_dir'. + + The output zip file will be named 'base_name' + ".zip". Returns the + name of the output zip file. + """ + import zipfile # late import for breaking circular dependency + + zip_filename = base_name + ".zip" + archive_dir = os.path.dirname(base_name) + + if archive_dir and not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + if logger is not None: + logger.info("creating '%s' and adding '%s' to it", + zip_filename, base_dir) + + if not dry_run: + with zipfile.ZipFile(zip_filename, "w", + compression=zipfile.ZIP_DEFLATED) as zf: + arcname = os.path.normpath(base_dir) + if root_dir is not None: + base_dir = os.path.join(root_dir, base_dir) + base_dir = os.path.normpath(base_dir) + if arcname != os.curdir: + zf.write(base_dir, arcname) + if logger is not None: + logger.info("adding '%s'", base_dir) + for dirpath, dirnames, filenames in os.walk(base_dir): + arcdirpath = dirpath + if root_dir is not None: + arcdirpath = os.path.relpath(arcdirpath, root_dir) + arcdirpath = os.path.normpath(arcdirpath) + for name in sorted(dirnames): + path = os.path.join(dirpath, name) + arcname = os.path.join(arcdirpath, name) + zf.write(path, arcname) + if logger is not None: + logger.info("adding '%s'", path) + for name in filenames: + path = os.path.join(dirpath, name) + path = os.path.normpath(path) + if os.path.isfile(path): + arcname = os.path.join(arcdirpath, name) + zf.write(path, arcname) + if logger is not None: + logger.info("adding '%s'", path) + + if root_dir is not None: + zip_filename = os.path.abspath(zip_filename) + return zip_filename + +_make_tarball.supports_root_dir = True +_make_zipfile.supports_root_dir = True + +# Maps the name of the archive format to a tuple containing: +# * the archiving function +# * extra keyword arguments +# * description +_ARCHIVE_FORMATS = { + 'tar': (_make_tarball, [('compress', None)], + "uncompressed tar file"), +} + +if _ZLIB_SUPPORTED: + _ARCHIVE_FORMATS['gztar'] = (_make_tarball, [('compress', 'gzip')], + "gzip'ed tar-file") + _ARCHIVE_FORMATS['zip'] = (_make_zipfile, [], "ZIP file") + +if _BZ2_SUPPORTED: + _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], + "bzip2'ed tar-file") + +if _LZMA_SUPPORTED: + _ARCHIVE_FORMATS['xztar'] = (_make_tarball, [('compress', 'xz')], + "xz'ed tar-file") + +if _ZSTD_SUPPORTED: + _ARCHIVE_FORMATS['zstdtar'] = (_make_tarball, [('compress', 'zst')], + "zstd'ed tar-file") + +def get_archive_formats(): + """Returns a list of supported formats for archiving and unarchiving. + + Each element of the returned sequence is a tuple (name, description) + """ + formats = [(name, registry[2]) for name, registry in + _ARCHIVE_FORMATS.items()] + formats.sort() + return formats + +def register_archive_format(name, function, extra_args=None, description=''): + """Registers an archive format. + + name is the name of the format. function is the callable that will be + used to create archives. If provided, extra_args is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_archive_formats() function. + """ + if extra_args is None: + extra_args = [] + if not callable(function): + raise TypeError('The %s object is not callable' % function) + if not isinstance(extra_args, (tuple, list)): + raise TypeError('extra_args needs to be a sequence') + for element in extra_args: + if not isinstance(element, (tuple, list)) or len(element) !=2: + raise TypeError('extra_args elements are : (arg_name, value)') + + _ARCHIVE_FORMATS[name] = (function, extra_args, description) + +def unregister_archive_format(name): + del _ARCHIVE_FORMATS[name] + +def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, + dry_run=0, owner=None, group=None, logger=None): + """Create an archive file (eg. zip or tar). + + 'base_name' is the name of the file to create, minus any format-specific + extension; 'format' is the archive format: one of "zip", "tar", "gztar", + "bztar", "xztar", or "zstdtar". Or any other registered format. + + 'root_dir' is a directory that will be the root directory of the + archive; ie. we typically chdir into 'root_dir' before creating the + archive. 'base_dir' is the directory where we start archiving from; + ie. 'base_dir' will be the common prefix of all files and + directories in the archive. 'root_dir' and 'base_dir' both default + to the current directory. Returns the name of the archive file. + + 'owner' and 'group' are used when creating a tar archive. By default, + uses the current owner and group. + """ + sys.audit("shutil.make_archive", base_name, format, root_dir, base_dir) + try: + format_info = _ARCHIVE_FORMATS[format] + except KeyError: + raise ValueError("unknown archive format '%s'" % format) from None + + kwargs = {'dry_run': dry_run, 'logger': logger, + 'owner': owner, 'group': group} + + func = format_info[0] + for arg, val in format_info[1]: + kwargs[arg] = val + + if base_dir is None: + base_dir = os.curdir + + supports_root_dir = getattr(func, 'supports_root_dir', False) + save_cwd = None + if root_dir is not None: + stmd = os.stat(root_dir).st_mode + if not stat.S_ISDIR(stmd): + raise NotADirectoryError(errno.ENOTDIR, 'Not a directory', root_dir) + + if supports_root_dir: + # Support path-like base_name here for backwards-compatibility. + base_name = os.fspath(base_name) + kwargs['root_dir'] = root_dir + else: + save_cwd = os.getcwd() + if logger is not None: + logger.debug("changing into '%s'", root_dir) + base_name = os.path.abspath(base_name) + if not dry_run: + os.chdir(root_dir) + + try: + filename = func(base_name, base_dir, **kwargs) + finally: + if save_cwd is not None: + if logger is not None: + logger.debug("changing back to '%s'", save_cwd) + os.chdir(save_cwd) + + return filename + + +def get_unpack_formats(): + """Returns a list of supported formats for unpacking. + + Each element of the returned sequence is a tuple + (name, extensions, description) + """ + formats = [(name, info[0], info[3]) for name, info in + _UNPACK_FORMATS.items()] + formats.sort() + return formats + +def _check_unpack_options(extensions, function, extra_args): + """Checks what gets registered as an unpacker.""" + # first make sure no other unpacker is registered for this extension + existing_extensions = {} + for name, info in _UNPACK_FORMATS.items(): + for ext in info[0]: + existing_extensions[ext] = name + + for extension in extensions: + if extension in existing_extensions: + msg = '%s is already registered for "%s"' + raise RegistryError(msg % (extension, + existing_extensions[extension])) + + if not callable(function): + raise TypeError('The registered function must be a callable') + + +def register_unpack_format(name, extensions, function, extra_args=None, + description=''): + """Registers an unpack format. + + `name` is the name of the format. `extensions` is a list of extensions + corresponding to the format. + + `function` is the callable that will be + used to unpack archives. The callable will receive archives to unpack. + If it's unable to handle an archive, it needs to raise a ReadError + exception. + + If provided, `extra_args` is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_unpack_formats() function. + """ + if extra_args is None: + extra_args = [] + _check_unpack_options(extensions, function, extra_args) + _UNPACK_FORMATS[name] = extensions, function, extra_args, description + +def unregister_unpack_format(name): + """Removes the pack format from the registry.""" + del _UNPACK_FORMATS[name] + +def _ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + if not os.path.isdir(dirname): + os.makedirs(dirname) + +def _unpack_zipfile(filename, extract_dir): + """Unpack zip `filename` to `extract_dir` + """ + import zipfile # late import for breaking circular dependency + + if not zipfile.is_zipfile(filename): + raise ReadError("%s is not a zip file" % filename) + + zip = zipfile.ZipFile(filename) + try: + for info in zip.infolist(): + name = info.filename + + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name: + continue + + targetpath = os.path.join(extract_dir, *name.split('/')) + if not targetpath: + continue + + _ensure_directory(targetpath) + if not name.endswith('/'): + # file + with zip.open(name, 'r') as source, \ + open(targetpath, 'wb') as target: + copyfileobj(source, target) + finally: + zip.close() + +def _unpack_tarfile(filename, extract_dir, *, filter=None): + """Unpack tar/tar.gz/tar.bz2/tar.xz/tar.zst `filename` to `extract_dir` + """ + import tarfile # late import for breaking circular dependency + try: + tarobj = tarfile.open(filename) + except tarfile.TarError: + raise ReadError( + "%s is not a compressed or uncompressed tar file" % filename) + try: + tarobj.extractall(extract_dir, filter=filter) + finally: + tarobj.close() + +# Maps the name of the unpack format to a tuple containing: +# * extensions +# * the unpacking function +# * extra keyword arguments +# * description +_UNPACK_FORMATS = { + 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), + 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file"), +} + +if _ZLIB_SUPPORTED: + _UNPACK_FORMATS['gztar'] = (['.tar.gz', '.tgz'], _unpack_tarfile, [], + "gzip'ed tar-file") + +if _BZ2_SUPPORTED: + _UNPACK_FORMATS['bztar'] = (['.tar.bz2', '.tbz2'], _unpack_tarfile, [], + "bzip2'ed tar-file") + +if _LZMA_SUPPORTED: + _UNPACK_FORMATS['xztar'] = (['.tar.xz', '.txz'], _unpack_tarfile, [], + "xz'ed tar-file") + +if _ZSTD_SUPPORTED: + _UNPACK_FORMATS['zstdtar'] = (['.tar.zst', '.tzst'], _unpack_tarfile, [], + "zstd'ed tar-file") + +def _find_unpack_format(filename): + for name, info in _UNPACK_FORMATS.items(): + for extension in info[0]: + if filename.endswith(extension): + return name + return None + +def unpack_archive(filename, extract_dir=None, format=None, *, filter=None): + """Unpack an archive. + + `filename` is the name of the archive. + + `extract_dir` is the name of the target directory, where the archive + is unpacked. If not provided, the current working directory is used. + + `format` is the archive format: one of "zip", "tar", "gztar", "bztar", + "xztar", or "zstdtar". Or any other registered format. If not provided, + unpack_archive will use the filename extension and see if an unpacker + was registered for that extension. + + In case none is found, a ValueError is raised. + + If `filter` is given, it is passed to the underlying + extraction function. + """ + sys.audit("shutil.unpack_archive", filename, extract_dir, format) + + if extract_dir is None: + extract_dir = os.getcwd() + + extract_dir = os.fspath(extract_dir) + filename = os.fspath(filename) + + if filter is None: + filter_kwargs = {} + else: + filter_kwargs = {'filter': filter} + if format is not None: + try: + format_info = _UNPACK_FORMATS[format] + except KeyError: + raise ValueError("Unknown unpack format '{0}'".format(format)) from None + + func = format_info[1] + func(filename, extract_dir, **dict(format_info[2]), **filter_kwargs) + else: + # we need to look at the registered unpackers supported extensions + format = _find_unpack_format(filename) + if format is None: + raise ReadError("Unknown archive format '{0}'".format(filename)) + + func = _UNPACK_FORMATS[format][1] + kwargs = dict(_UNPACK_FORMATS[format][2]) | filter_kwargs + func(filename, extract_dir, **kwargs) + + +if hasattr(os, 'statvfs'): + + __all__.append('disk_usage') + _ntuple_diskusage = collections.namedtuple('usage', 'total used free') + _ntuple_diskusage.total.__doc__ = 'Total space in bytes' + _ntuple_diskusage.used.__doc__ = 'Used space in bytes' + _ntuple_diskusage.free.__doc__ = 'Free space in bytes' + + def disk_usage(path): + """Return disk usage statistics about the given path. + + Returned value is a named tuple with attributes 'total', 'used' and + 'free', which are the amount of total, used and free space, in bytes. + """ + st = os.statvfs(path) + free = st.f_bavail * st.f_frsize + total = st.f_blocks * st.f_frsize + used = (st.f_blocks - st.f_bfree) * st.f_frsize + return _ntuple_diskusage(total, used, free) + +elif _WINDOWS: + + __all__.append('disk_usage') + _ntuple_diskusage = collections.namedtuple('usage', 'total used free') + + def disk_usage(path): + """Return disk usage statistics about the given path. + + Returned values is a named tuple with attributes 'total', 'used' and + 'free', which are the amount of total, used and free space, in bytes. + """ + total, free = nt._getdiskusage(path) + used = total - free + return _ntuple_diskusage(total, used, free) + + +def chown(path, user=None, group=None, *, dir_fd=None, follow_symlinks=True): + """Change owner user and group of the given path. + + user and group can be the uid/gid or the user/group names, and in that case, + they are converted to their respective uid/gid. + + If dir_fd is set, it should be an open file descriptor to the directory to + be used as the root of *path* if it is relative. + + If follow_symlinks is set to False and the last element of the path is a + symbolic link, chown will modify the link itself and not the file being + referenced by the link. + """ + sys.audit('shutil.chown', path, user, group) + + if user is None and group is None: + raise ValueError("user and/or group must be set") + + _user = user + _group = group + + # -1 means don't change it + if user is None: + _user = -1 + # user can either be an int (the uid) or a string (the system username) + elif isinstance(user, str): + _user = _get_uid(user) + if _user is None: + raise LookupError("no such user: {!r}".format(user)) + + if group is None: + _group = -1 + elif not isinstance(group, int): + _group = _get_gid(group) + if _group is None: + raise LookupError("no such group: {!r}".format(group)) + + os.chown(path, _user, _group, dir_fd=dir_fd, + follow_symlinks=follow_symlinks) + +def get_terminal_size(fallback=(80, 24)): + """Get the size of the terminal window. + + For each of the two dimensions, the environment variable, COLUMNS + and LINES respectively, is checked. If the variable is defined and + the value is a positive integer, it is used. + + When COLUMNS or LINES is not defined, which is the common case, + the terminal connected to sys.__stdout__ is queried + by invoking os.get_terminal_size. + + If the terminal size cannot be successfully queried, either because + the system doesn't support querying, or because we are not + connected to a terminal, the value given in fallback parameter + is used. Fallback defaults to (80, 24) which is the default + size used by many terminal emulators. + + The value returned is a named tuple of type os.terminal_size. + """ + # columns, lines are the working values + try: + columns = int(os.environ['COLUMNS']) + except (KeyError, ValueError): + columns = 0 + + try: + lines = int(os.environ['LINES']) + except (KeyError, ValueError): + lines = 0 + + # only query if necessary + if columns <= 0 or lines <= 0: + try: + size = os.get_terminal_size(sys.__stdout__.fileno()) + except (AttributeError, ValueError, OSError): + # stdout is None, closed, detached, or not a terminal, or + # os.get_terminal_size() is unsupported + size = os.terminal_size(fallback) + if columns <= 0: + columns = size.columns or fallback[0] + if lines <= 0: + lines = size.lines or fallback[1] + + return os.terminal_size((columns, lines)) + + +# Check that a given file can be accessed with the correct mode. +# Additionally check that `file` is not a directory, as on Windows +# directories pass the os.access check. +def _access_check(fn, mode): + return (os.path.exists(fn) and os.access(fn, mode) + and not os.path.isdir(fn)) + + +def _win_path_needs_curdir(cmd, mode): + """ + On Windows, we can use NeedCurrentDirectoryForExePath to figure out + if we should add the cwd to PATH when searching for executables if + the mode is executable. + """ + return (not (mode & os.X_OK)) or _winapi.NeedCurrentDirectoryForExePath( + os.fsdecode(cmd)) + + +def which(cmd, mode=os.F_OK | os.X_OK, path=None): + """Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + + """ + use_bytes = isinstance(cmd, bytes) + + # If we're given a path with a directory part, look it up directly rather + # than referring to PATH directories. This includes checking relative to + # the current directory, e.g. ./script + dirname, cmd = os.path.split(cmd) + if dirname: + path = [dirname] + else: + if path is None: + path = os.environ.get("PATH", None) + if path is None: + try: + path = os.confstr("CS_PATH") + except (AttributeError, ValueError): + # os.confstr() or CS_PATH is not available + path = os.defpath + # bpo-35755: Don't use os.defpath if the PATH environment variable + # is set to an empty string + + # PATH='' doesn't match, whereas PATH=':' looks in the current + # directory + if not path: + return None + + if use_bytes: + path = os.fsencode(path) + path = path.split(os.fsencode(os.pathsep)) + else: + path = os.fsdecode(path) + path = path.split(os.pathsep) + + if sys.platform == "win32" and _win_path_needs_curdir(cmd, mode): + curdir = os.curdir + if use_bytes: + curdir = os.fsencode(curdir) + path.insert(0, curdir) + + if sys.platform == "win32": + # PATHEXT is necessary to check on Windows. + pathext_source = os.getenv("PATHEXT") or _WIN_DEFAULT_PATHEXT + pathext = pathext_source.split(os.pathsep) + pathext = [ext.rstrip('.') for ext in pathext if ext] + + if use_bytes: + pathext = [os.fsencode(ext) for ext in pathext] + + files = [cmd + ext for ext in pathext] + + # If X_OK in mode, simulate the cmd.exe behavior: look at direct + # match if and only if the extension is in PATHEXT. + # If X_OK not in mode, simulate the first result of where.exe: + # always look at direct match before a PATHEXT match. + normcmd = cmd.upper() + if not (mode & os.X_OK) or any(normcmd.endswith(ext.upper()) for ext in pathext): + files.insert(0, cmd) + else: + # On other platforms you don't have things like PATHEXT to tell you + # what file suffixes are executable, so just pass on cmd as-is. + files = [cmd] + + seen = set() + for dir in path: + normdir = os.path.normcase(dir) + if normdir not in seen: + seen.add(normdir) + for thefile in files: + name = os.path.join(dir, thefile) + if _access_check(name, mode): + return name + return None + +def __getattr__(name): + if name == "ExecError": + import warnings + warnings._deprecated( + "shutil.ExecError", + f"{warnings._DEPRECATED_MSG}; it " + "isn't raised by any shutil function.", + remove=(3, 16) + ) + return RuntimeError + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/Python313_13_x86_Template/Lib/signal.py b/Python314_4_x86_Template/Lib/signal.py similarity index 100% rename from Python313_13_x86_Template/Lib/signal.py rename to Python314_4_x86_Template/Lib/signal.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/INSTALLER b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/INSTALLER similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/INSTALLER rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/INSTALLER diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/METADATA b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/METADATA similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/METADATA rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/METADATA diff --git a/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/RECORD b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/RECORD new file mode 100644 index 00000000..f37513d2 --- /dev/null +++ b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/RECORD @@ -0,0 +1,878 @@ +../../Scripts/pip.exe,sha256=NlQzunciK_04cYI_aNbXieP_dnjvl6v4CzpjA3KEoQM,98163 +../../Scripts/pip3.14.exe,sha256=NlQzunciK_04cYI_aNbXieP_dnjvl6v4CzpjA3KEoQM,98163 +../../Scripts/pip3.exe,sha256=NlQzunciK_04cYI_aNbXieP_dnjvl6v4CzpjA3KEoQM,98163 +pip-26.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip-26.0.1.dist-info/METADATA,sha256=ZqIZuNGsG6l2gHiKlQjVQghFQhgSWfhEDHuCVPW3aN8,4675 +pip-26.0.1.dist-info/RECORD,, +pip-26.0.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip-26.0.1.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82 +pip-26.0.1.dist-info/entry_points.txt,sha256=Vhf8s0IYgX37mtd4vGL73BPcxdKnqeCFPzB5-d30x8o,84 +pip-26.0.1.dist-info/licenses/AUTHORS.txt,sha256=grSl9YDNOpOFFJTX8ZYKSdgfouXi_DzlRyYGE2-u5aI,11731 +pip-26.0.1.dist-info/licenses/LICENSE.txt,sha256=Y0MApmnUmurmWxLGxIySTFGkzfPR_whtw0VtyLyqIQQ,1093 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt,sha256=hu7uh74qQ_P_H1ZJb0UfaSQ5JvAl_tuwM2ZsMExMFhs,558 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/certifi/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt,sha256=GrNuPipLqGMWJThPh-ngkdsfrtA0xbIzJbMjmr8sxSU,1099 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt,sha256=gI4QyKarjesUn_mz-xn0R6gICUYG1xKpylf-rTVSWZ0,14531 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/distro/LICENSE,sha256=y16Ofl9KOYjhBjwULGDcLfdWBfTEZRXnduOspt-XbhQ,11325 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md,sha256=t6M2q_OwThgOwGXN0W5wXQeeHMehT5EKpukYfza5zYc,1541 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/msgpack/COPYING,sha256=SS3tuoXaWHL3jmCRvNH-pHTWYNNay03ulkuKqz8AdCc,614 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE,sha256=KeD9YukphQ6G6yjD_czwzv30-pSHkBHP-z0NS-1tTbY,1089 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/pygments/LICENSE,sha256=qdZvHVJt8C4p3Oc0NtNOVuhjL0bCdbvf_HBWnogvnxc,1331 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE,sha256=GyKwSbUmfW38I6Z79KhNjsBLn9-xpR02DkK0NCyLQVQ,1081 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/requests/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE,sha256=84j9OMrRMRLB3A9mm76A5_hFQe26-3LzAw0sp2QsPJ0,751 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/rich/LICENSE,sha256=3u18F6QxgVgZCj6iOcyHmlpQJxzruYrnAl9I--WNyhU,1056 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/truststore/LICENSE,sha256=M757fo-k_Rmxdg4ajtimaL2rhSyRtpLdQUJLy3Jan8o,1086 +pip-26.0.1.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt,sha256=w3vxhuJ8-dvpYZ5V7f486nswCRzrPaY8fay-Dm13kHs,1115 +pip/__init__.py,sha256=3EhKF2588Ab15tmBszgD3Bp0N26sJx7VhS2Akn_qY38,355 +pip/__main__.py,sha256=WzbhHXTbSE6gBY19mNN9m4s5o_365LOvTYSgqgbdBhE,854 +pip/__pip-runner__.py,sha256=JOoEZTwrtv7jRaXBkgSQKAE04yNyfFmGHxqpHiGHvL0,1450 +pip/__pycache__/__init__.cpython-314.pyc,, +pip/__pycache__/__main__.cpython-314.pyc,, +pip/__pycache__/__pip-runner__.cpython-314.pyc,, +pip/_internal/__init__.py,sha256=S7i9Dn9aSZS0MG-2Wrve3dV9TImPzvQn5jjhp9t_uf0,511 +pip/_internal/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/__pycache__/build_env.cpython-314.pyc,, +pip/_internal/__pycache__/cache.cpython-314.pyc,, +pip/_internal/__pycache__/configuration.cpython-314.pyc,, +pip/_internal/__pycache__/exceptions.cpython-314.pyc,, +pip/_internal/__pycache__/main.cpython-314.pyc,, +pip/_internal/__pycache__/pyproject.cpython-314.pyc,, +pip/_internal/__pycache__/self_outdated_check.cpython-314.pyc,, +pip/_internal/__pycache__/wheel_builder.cpython-314.pyc,, +pip/_internal/build_env.py,sha256=XpgOIlTQLgz3PvDT2n7j2NzX_rVFZLCIG7t7b2ddhcM,21911 +pip/_internal/cache.py,sha256=nMh48Yv3yu1HS1yCdscouu6B6B5zYBWdV6bhqs7gL-E,10345 +pip/_internal/cli/__init__.py,sha256=Iqg_tKA771XuMO1P4t_sDHnSKPzkUb9D0DqunAmw_ko,131 +pip/_internal/cli/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/cli/__pycache__/autocompletion.cpython-314.pyc,, +pip/_internal/cli/__pycache__/base_command.cpython-314.pyc,, +pip/_internal/cli/__pycache__/cmdoptions.cpython-314.pyc,, +pip/_internal/cli/__pycache__/command_context.cpython-314.pyc,, +pip/_internal/cli/__pycache__/index_command.cpython-314.pyc,, +pip/_internal/cli/__pycache__/main.cpython-314.pyc,, +pip/_internal/cli/__pycache__/main_parser.cpython-314.pyc,, +pip/_internal/cli/__pycache__/parser.cpython-314.pyc,, +pip/_internal/cli/__pycache__/progress_bars.cpython-314.pyc,, +pip/_internal/cli/__pycache__/req_command.cpython-314.pyc,, +pip/_internal/cli/__pycache__/spinners.cpython-314.pyc,, +pip/_internal/cli/__pycache__/status_codes.cpython-314.pyc,, +pip/_internal/cli/autocompletion.py,sha256=ZG2cM03nlcNrs-WG_SFTW46isx9s2Go5lUD_8-iv70o,7193 +pip/_internal/cli/base_command.py,sha256=6OW75PSGzkH8Fz761WZ3OSz1TsuO3-suc6iap-sQjTM,9168 +pip/_internal/cli/cmdoptions.py,sha256=hfA9B29Nnq2vYMWhFVg7EcWjdlfdPBPU4WwWT2Lkq4A,36164 +pip/_internal/cli/command_context.py,sha256=kmu3EWZbfBega1oDamnGJTA_UaejhIQNuMj2CVmMXu0,817 +pip/_internal/cli/index_command.py,sha256=s3x75lpDXWJtCkBacTQ3qAAprldHMJCniEQ5qkQ0FiI,6484 +pip/_internal/cli/main.py,sha256=ljDQBkvBtC8xTjOdb6rDJzJUNi1s-PnVR_W5C-Mq0Dk,3137 +pip/_internal/cli/main_parser.py,sha256=YjzJAjqf78ARNsLlnJT9l6fNbpyDPJA-arOIXYsK5Ik,4403 +pip/_internal/cli/parser.py,sha256=EIFExrWX_1nrl1Ib--GOor70WYqLtduHByenb1u9xH4,13827 +pip/_internal/cli/progress_bars.py,sha256=IW1PH5n2FPqUBTP7ULQ5Yu-wyNNO9XGY3g1PT4RMu44,4706 +pip/_internal/cli/req_command.py,sha256=QjDXId0hFdopwE8hNx2eustumxUNbnOCvG_ORmUC7vM,16482 +pip/_internal/cli/spinners.py,sha256=EJzZIZNyUtJljp3-WjcsyIrqxW-HUsfWzhuW84n_Tqw,7362 +pip/_internal/cli/status_codes.py,sha256=sEFHUaUJbqv8iArL3HAtcztWZmGOFX01hTesSytDEh0,116 +pip/_internal/commands/__init__.py,sha256=aNeCbQurGWihfhQq7BqaLXHqWDQ0i3I04OS7kxK6plQ,4026 +pip/_internal/commands/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/commands/__pycache__/cache.cpython-314.pyc,, +pip/_internal/commands/__pycache__/check.cpython-314.pyc,, +pip/_internal/commands/__pycache__/completion.cpython-314.pyc,, +pip/_internal/commands/__pycache__/configuration.cpython-314.pyc,, +pip/_internal/commands/__pycache__/debug.cpython-314.pyc,, +pip/_internal/commands/__pycache__/download.cpython-314.pyc,, +pip/_internal/commands/__pycache__/freeze.cpython-314.pyc,, +pip/_internal/commands/__pycache__/hash.cpython-314.pyc,, +pip/_internal/commands/__pycache__/help.cpython-314.pyc,, +pip/_internal/commands/__pycache__/index.cpython-314.pyc,, +pip/_internal/commands/__pycache__/inspect.cpython-314.pyc,, +pip/_internal/commands/__pycache__/install.cpython-314.pyc,, +pip/_internal/commands/__pycache__/list.cpython-314.pyc,, +pip/_internal/commands/__pycache__/lock.cpython-314.pyc,, +pip/_internal/commands/__pycache__/search.cpython-314.pyc,, +pip/_internal/commands/__pycache__/show.cpython-314.pyc,, +pip/_internal/commands/__pycache__/uninstall.cpython-314.pyc,, +pip/_internal/commands/__pycache__/wheel.cpython-314.pyc,, +pip/_internal/commands/cache.py,sha256=XjT7kjY8GSISMksFHsLvjS9Ogfi5extNlUUv-dUoWCM,9142 +pip/_internal/commands/check.py,sha256=hVFBQezQ3zj4EydoWbFQj_afPUppMt7r9JPAlY22U6Y,2244 +pip/_internal/commands/completion.py,sha256=LjvRIZ6QUiDXJL3IOMFeD-_J97HfjMGgEk0j2tWGu1U,4565 +pip/_internal/commands/configuration.py,sha256=6gNOGrVWnOLU15zUnAiNuOMhf76RRIZvCdVD0degPRk,10105 +pip/_internal/commands/debug.py,sha256=_8IqM8Fx1_lY2STu_qspr63tufF7zyFJCyYAXtxz0N4,6805 +pip/_internal/commands/download.py,sha256=LUNVobuvCdagjLBuPBaxHeBiHEiIe03fTO2m6ahC8qw,5178 +pip/_internal/commands/freeze.py,sha256=fxoW8AAc-bAqB_fXdNq2VnZ3JfWkFMg-bR6LcdDVO7A,3099 +pip/_internal/commands/hash.py,sha256=GO9pRN3wXC2kQaovK57TaLYBMc3IltOH92O6QEw6YE0,1679 +pip/_internal/commands/help.py,sha256=Bz3LcjNQXkz4Cu__pL4CZ86o4-HNLZj1NZWdlJhjuu0,1108 +pip/_internal/commands/index.py,sha256=kDpx2MO6ZxTt5PpeY4jqcssVbYhzxpkpreDe_6PPhks,5520 +pip/_internal/commands/inspect.py,sha256=ogm4UT7LRo8bIQcWUS1IiA25QdD4VHLa7JaPAodDttM,3177 +pip/_internal/commands/install.py,sha256=L6X1qi49ROVTGABhwwxDgBBTijlOpVn6XSDVZ7QW1Kc,30588 +pip/_internal/commands/list.py,sha256=L5nWuwawqSrBNsuxfyHLAagfz7XJP86tC9nK3L9YiI8,13497 +pip/_internal/commands/lock.py,sha256=145ihjUK_-7gP8O65XPDi_xMhlh5hne1ptkHdfnbAnQ,6027 +pip/_internal/commands/search.py,sha256=zbMsX_YASj6kXA6XIBgTDv0bGK51xG-CV3IynZJcE-c,5782 +pip/_internal/commands/show.py,sha256=oLVJIfKWmDKm0SsQGEi3pozNiqrXjTras_fbBSYKpBA,8066 +pip/_internal/commands/uninstall.py,sha256=CsOihqvb6ZA6O67L70oXeoLHeOfNzMM88H9g-9aocgw,3868 +pip/_internal/commands/wheel.py,sha256=L9vEzJ_E42scF_Hgh5X4Hk39nqJDKxGg4u7glDYbNWc,5880 +pip/_internal/configuration.py,sha256=WxwwSwY_Bm6QzDgf32BsujEyO8dgRedegCpgbUfDvM8,14568 +pip/_internal/distributions/__init__.py,sha256=Hq6kt6gXBgjNit5hTTWLAzeCNOKoB-N0pGYSqehrli8,858 +pip/_internal/distributions/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/distributions/__pycache__/base.cpython-314.pyc,, +pip/_internal/distributions/__pycache__/installed.cpython-314.pyc,, +pip/_internal/distributions/__pycache__/sdist.cpython-314.pyc,, +pip/_internal/distributions/__pycache__/wheel.cpython-314.pyc,, +pip/_internal/distributions/base.py,sha256=l-OTCAIs25lsapejA6IYpPZxSM5-BET4sdZDkql8jiY,1830 +pip/_internal/distributions/installed.py,sha256=kgIEE_1NzjZxLBSC-v5s64uOFZlVEt3aPrjTtL6x2XY,929 +pip/_internal/distributions/sdist.py,sha256=RYwQIbuxpKy6OjlBZCAefxpMDaoocUQ4dFtheGsiTOQ,6627 +pip/_internal/distributions/wheel.py,sha256=_HbG0OehF8dwj4UX-xV__tXLwgPus9OjMEf2NTRqBbE,1364 +pip/_internal/exceptions.py,sha256=JdPCrQ9iTLvE-GBebzBEeGP3hoTffWEKqbYEsa6cEZc,32165 +pip/_internal/index/__init__.py,sha256=tzwMH_fhQeubwMqHdSivasg1cRgTSbNg2CiMVnzMmyU,29 +pip/_internal/index/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/index/__pycache__/collector.cpython-314.pyc,, +pip/_internal/index/__pycache__/package_finder.cpython-314.pyc,, +pip/_internal/index/__pycache__/sources.cpython-314.pyc,, +pip/_internal/index/collector.py,sha256=R7Gcx_4GEoSEI-iazfAZVEPG3Lp6mbZT4lbAD6NjAc0,16144 +pip/_internal/index/package_finder.py,sha256=a3_L4FDNsuDf3y8Af9J7sfsHR1ahs8o13Ths-WYwFh0,41776 +pip/_internal/index/sources.py,sha256=nXJkOjhLy-O2FsrKU9RIqCOqgY2PsoKWybtZjjRgqU0,8639 +pip/_internal/locations/__init__.py,sha256=Sd67ap1LIemvXArUDFqm8U-HuZvj9i3ApEuiIwUc9UE,14157 +pip/_internal/locations/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/locations/__pycache__/_distutils.cpython-314.pyc,, +pip/_internal/locations/__pycache__/_sysconfig.cpython-314.pyc,, +pip/_internal/locations/__pycache__/base.cpython-314.pyc,, +pip/_internal/locations/_distutils.py,sha256=jpFj4V00rD9IR3vA9TqrGkwcdNVFc58LsChZavge9JY,5975 +pip/_internal/locations/_sysconfig.py,sha256=8CpTjtxaCzHSCrKpaxWnHE7aKcJrRJRmntR1ZLVysLk,7779 +pip/_internal/locations/base.py,sha256=AImjYJWxOtDkc0KKc6Y4Gz677cg91caMA4L94B9FZEg,2550 +pip/_internal/main.py,sha256=1cHqjsfFCrMFf3B5twzocxTJUdHMLoXUpy5lJoFqUi8,338 +pip/_internal/metadata/__init__.py,sha256=vp-JAxiWg_-l5F8AT0Jcey72uUnh8CDwwol9-KktHZ8,5824 +pip/_internal/metadata/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/metadata/__pycache__/_json.cpython-314.pyc,, +pip/_internal/metadata/__pycache__/base.cpython-314.pyc,, +pip/_internal/metadata/__pycache__/pkg_resources.cpython-314.pyc,, +pip/_internal/metadata/_json.py,sha256=hNvnMHOXLAyNlzirWhPL9Nx2CvCqa1iRma6Osq1YfV8,2711 +pip/_internal/metadata/base.py,sha256=BGuMenlcQT8i7j9iclrfdC3vSwgvhr8gjn955cCy16s,25420 +pip/_internal/metadata/importlib/__init__.py,sha256=jUUidoxnHcfITHHaAWG1G2i5fdBYklv_uJcjo2x7VYE,135 +pip/_internal/metadata/importlib/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/metadata/importlib/__pycache__/_compat.cpython-314.pyc,, +pip/_internal/metadata/importlib/__pycache__/_dists.cpython-314.pyc,, +pip/_internal/metadata/importlib/__pycache__/_envs.cpython-314.pyc,, +pip/_internal/metadata/importlib/_compat.py,sha256=sneVh4_6WxQZK4ljdl3ylVuP-q0ttSqbgl9mWt0HnOg,2804 +pip/_internal/metadata/importlib/_dists.py,sha256=znZD7MN4RC73-87KXAn6tKZv9lAQRI0AxxK2bubDvPw,8420 +pip/_internal/metadata/importlib/_envs.py,sha256=H3qVLXVh4LWvrPvu_ekXf3dfbtwnlhNJQP2pxXpccfU,5333 +pip/_internal/metadata/pkg_resources.py,sha256=NO76ZrfR2-LKJTyaXrmQoGhmJMArALvacrlZHViSDT8,10544 +pip/_internal/models/__init__.py,sha256=AjmCEBxX_MH9f_jVjIGNCFJKYCYeSEe18yyvNx4uRKQ,62 +pip/_internal/models/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/models/__pycache__/candidate.cpython-314.pyc,, +pip/_internal/models/__pycache__/direct_url.cpython-314.pyc,, +pip/_internal/models/__pycache__/format_control.cpython-314.pyc,, +pip/_internal/models/__pycache__/index.cpython-314.pyc,, +pip/_internal/models/__pycache__/installation_report.cpython-314.pyc,, +pip/_internal/models/__pycache__/link.cpython-314.pyc,, +pip/_internal/models/__pycache__/release_control.cpython-314.pyc,, +pip/_internal/models/__pycache__/scheme.cpython-314.pyc,, +pip/_internal/models/__pycache__/search_scope.cpython-314.pyc,, +pip/_internal/models/__pycache__/selection_prefs.cpython-314.pyc,, +pip/_internal/models/__pycache__/target_python.cpython-314.pyc,, +pip/_internal/models/__pycache__/wheel.cpython-314.pyc,, +pip/_internal/models/candidate.py,sha256=zzgFRuw_kWPjKpGw7LC0ZUMD2CQ2EberUIYs8izjdCA,753 +pip/_internal/models/direct_url.py,sha256=4NMWacu_QzPPWREC1te7v6Wfv-2HkI4tvSJF-CBgLh4,6555 +pip/_internal/models/format_control.py,sha256=PwemYG1L27BM0f1KP61rm24wShENFyxqlD1TWu34alc,2471 +pip/_internal/models/index.py,sha256=tYnL8oxGi4aSNWur0mG8DAP7rC6yuha_MwJO8xw0crI,1030 +pip/_internal/models/installation_report.py,sha256=cqfWJ93ThCxjcacqSWryOCD2XtIn1CZrgzZxAv5FQZ0,2839 +pip/_internal/models/link.py,sha256=zti5UCx1hT03etYqm6MCqFd714clmTgX8rTZT9CKZDQ,21992 +pip/_internal/models/release_control.py,sha256=XD14Hy_XLh9xWR1p7JHqPZPEv3Nnb1BZGMpClk76sLs,3403 +pip/_internal/models/scheme.py,sha256=PakmHJM3e8OOWSZFtfz1Az7f1meONJnkGuQxFlt3wBE,575 +pip/_internal/models/search_scope.py,sha256=1hxU2IVsAaLZVjp0CbzJbYaYzCxv72_Qbg3JL0qhXo0,4507 +pip/_internal/models/selection_prefs.py,sha256=IDOA3euRtyqWUyIK7lX2bzIZasYiEvunKA6H3Mngk-M,2221 +pip/_internal/models/target_python.py,sha256=I0eFS-eia3kwhrOvgsphFZtNAB2IwXZ9Sr9fp6IjBP4,4243 +pip/_internal/models/wheel.py,sha256=1SdfDvN7ALTsbyZ9EOsNy1GPirP1n6EjHyzPrZyLSh8,2920 +pip/_internal/network/__init__.py,sha256=FMy06P__y6jMjUc8z3ZcQdKF-pmZ2zM14_vBeHPGhUI,49 +pip/_internal/network/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/network/__pycache__/auth.cpython-314.pyc,, +pip/_internal/network/__pycache__/cache.cpython-314.pyc,, +pip/_internal/network/__pycache__/download.cpython-314.pyc,, +pip/_internal/network/__pycache__/lazy_wheel.cpython-314.pyc,, +pip/_internal/network/__pycache__/session.cpython-314.pyc,, +pip/_internal/network/__pycache__/utils.cpython-314.pyc,, +pip/_internal/network/__pycache__/xmlrpc.cpython-314.pyc,, +pip/_internal/network/auth.py,sha256=azFp14I9cyWAAzkxF2VM0Q_xtHnbNz3_NQXszy87KQo,20806 +pip/_internal/network/cache.py,sha256=kmRXKQrG9E26xQRj211LHeEGpDg_SlYU9Dn1fJ-AMeI,4862 +pip/_internal/network/download.py,sha256=8sVwIc9MWwpGlMPYCkO1S9U-FD8TA2utw42tj00skjM,12667 +pip/_internal/network/lazy_wheel.py,sha256=y9gVksdJCSjnLfYzs_m3DYUAtl3hc_k-xFPDBd9DgOs,7646 +pip/_internal/network/session.py,sha256=7zK7EeQCSRFipu4ZzcWl1V3AMKkiXdtGqFr7GvU2LrY,19555 +pip/_internal/network/utils.py,sha256=ACsXd1msqNCidHVXsu7LHUSr8NgaypcOKQ4KG-Z_wJM,4091 +pip/_internal/network/xmlrpc.py,sha256=_-Rnk3vOff8uF9hAGmT6SLALflY1gMBcbGwS12fb_Y4,1830 +pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/operations/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/operations/__pycache__/check.cpython-314.pyc,, +pip/_internal/operations/__pycache__/freeze.cpython-314.pyc,, +pip/_internal/operations/__pycache__/prepare.cpython-314.pyc,, +pip/_internal/operations/build/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/operations/build/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/operations/build/__pycache__/build_tracker.cpython-314.pyc,, +pip/_internal/operations/build/__pycache__/metadata.cpython-314.pyc,, +pip/_internal/operations/build/__pycache__/metadata_editable.cpython-314.pyc,, +pip/_internal/operations/build/__pycache__/wheel.cpython-314.pyc,, +pip/_internal/operations/build/__pycache__/wheel_editable.cpython-314.pyc,, +pip/_internal/operations/build/build_tracker.py,sha256=W3b5cmkMWPaE6QIwfzsTayJo7-OlxFHWDxfPuax1KcE,4771 +pip/_internal/operations/build/metadata.py,sha256=INHaeiRfOiLYCXApfDNRo9Cw2xI4VwTc0KItvfdfOjk,1421 +pip/_internal/operations/build/metadata_editable.py,sha256=oWudMsnjy4loO_Jy7g4N9nxsnaEX_iDlVRgCy7pu1rs,1509 +pip/_internal/operations/build/wheel.py,sha256=3bP-nNiJ4S8JvMaBnyessXQUBhxTqt1GBx6DQ1iPJDY,1136 +pip/_internal/operations/build/wheel_editable.py,sha256=q3kfElclM6FutVbFwE87JOTpVWt5ixDf3_UkHAIVfz4,1478 +pip/_internal/operations/check.py,sha256=yC2XWth6iehGGE_fj7XRJLjVKBsTIG3ZoWRkFi3rOwc,5894 +pip/_internal/operations/freeze.py,sha256=PDdY-y_ZtZZJLAKcaWPIGRKAGW7DXR48f0aMRU0j7BA,9854 +pip/_internal/operations/install/__init__.py,sha256=ak-UETcQPKlFZaWoYKWu5QVXbpFBvg0sXc3i0O4vSYY,50 +pip/_internal/operations/install/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/operations/install/__pycache__/wheel.cpython-314.pyc,, +pip/_internal/operations/install/wheel.py,sha256=FQIl2AnNadHV5YGGOVEmOHtUUNO8lpzj3Icoo4S2xis,27923 +pip/_internal/operations/prepare.py,sha256=ptVsmQf0Mo6jirk1Q5Djdse_wJw5Zdh1Fla2iL9HAJM,28830 +pip/_internal/pyproject.py,sha256=J-sTWqC-XfsKQgz9m1bypMWZPHItsSHzIN_NWeIRmhM,4555 +pip/_internal/req/__init__.py,sha256=WcY9z7D3rlIKX1QY8_tRnAsS_poebiGGdtQ7EJ5JQQo,3041 +pip/_internal/req/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/req/__pycache__/constructors.cpython-314.pyc,, +pip/_internal/req/__pycache__/pep723.cpython-314.pyc,, +pip/_internal/req/__pycache__/req_dependency_group.cpython-314.pyc,, +pip/_internal/req/__pycache__/req_file.cpython-314.pyc,, +pip/_internal/req/__pycache__/req_install.cpython-314.pyc,, +pip/_internal/req/__pycache__/req_set.cpython-314.pyc,, +pip/_internal/req/__pycache__/req_uninstall.cpython-314.pyc,, +pip/_internal/req/constructors.py,sha256=R-6n8irjnaa2DMMXlR4YMouXzykFBlzUFjhOZ1NcUUg,18688 +pip/_internal/req/pep723.py,sha256=olZL3tLmHWJhyLNfbD6U9UuikuzTcLDB06qd9WavTjs,1225 +pip/_internal/req/req_dependency_group.py,sha256=0yEQCUaO5Bza66Y3D5o9JRf0qII5QgCRugn1x5aRivA,2618 +pip/_internal/req/req_file.py,sha256=e32ZQ3kJaL_Sdtf32twGKqIau_AqR43MeSycl0iS2Mw,20685 +pip/_internal/req/req_install.py,sha256=vv5cbs3P5gf43e_1v72gwSQ2N_D_qpsfuXOyerMhDuI,31273 +pip/_internal/req/req_set.py,sha256=awkqIXnYA4Prmsj0Qb3zhqdbYUmXd-1o0P-KZ3mvRQs,2828 +pip/_internal/req/req_uninstall.py,sha256=dCmOHt-9RaJBq921L4tMH3PmIBDetGplnbjRKXmGt00,24099 +pip/_internal/resolution/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/resolution/__pycache__/base.cpython-314.pyc,, +pip/_internal/resolution/base.py,sha256=RIsqSP79olPdOgtPKW-oOQ364ICVopehA6RfGkRfe2s,577 +pip/_internal/resolution/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/legacy/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/resolution/legacy/__pycache__/resolver.cpython-314.pyc,, +pip/_internal/resolution/legacy/resolver.py,sha256=bwUqE66etz2bcPabqxed18-iyqqb-kx3Er2aT6GeUJY,24060 +pip/_internal/resolution/resolvelib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/base.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-314.pyc,, +pip/_internal/resolution/resolvelib/base.py,sha256=_AoP0ZWlaSct8CRDn2ol3CbNn4zDtnh_0zQGjXASDKI,5047 +pip/_internal/resolution/resolvelib/candidates.py,sha256=50AN7BfB-pCfEmbKNlFZSXtdC0C8ms1waJrF2arknQE,20454 +pip/_internal/resolution/resolvelib/factory.py,sha256=82mLwnPlig37mMrDwcgKHJTE9mPczVuJIxeaUb7CQ0Y,34028 +pip/_internal/resolution/resolvelib/found_candidates.py,sha256=8bZYDCZLXSdLHy_s1o5f4r15HmKvqFUhzBUQOF21Lr4,6018 +pip/_internal/resolution/resolvelib/provider.py,sha256=tbVPfFv4Vg780yZ2_XGoGFP5LVo0U2bFnZov3jpSAIk,11441 +pip/_internal/resolution/resolvelib/reporter.py,sha256=faSgjqme0k_uzv1fvM5T0ZatPQ2eEktNvKBqfvXeGjc,3909 +pip/_internal/resolution/resolvelib/requirements.py,sha256=Izl9n8nc188lA1BSPS8QxfudfDQPHgngw-ij6hXt0nQ,8239 +pip/_internal/resolution/resolvelib/resolver.py,sha256=wQ94Hkep-7kWEHAc-NbMJhmzeEzgEAtxeBxyKVzZoeo,13437 +pip/_internal/self_outdated_check.py,sha256=zDKsyLMufFHuEZY16WRu129FBbBp-ADuxyWMIN4ihPE,8284 +pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/utils/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/utils/__pycache__/_jaraco_text.cpython-314.pyc,, +pip/_internal/utils/__pycache__/_log.cpython-314.pyc,, +pip/_internal/utils/__pycache__/appdirs.cpython-314.pyc,, +pip/_internal/utils/__pycache__/compat.cpython-314.pyc,, +pip/_internal/utils/__pycache__/compatibility_tags.cpython-314.pyc,, +pip/_internal/utils/__pycache__/datetime.cpython-314.pyc,, +pip/_internal/utils/__pycache__/deprecation.cpython-314.pyc,, +pip/_internal/utils/__pycache__/direct_url_helpers.cpython-314.pyc,, +pip/_internal/utils/__pycache__/egg_link.cpython-314.pyc,, +pip/_internal/utils/__pycache__/entrypoints.cpython-314.pyc,, +pip/_internal/utils/__pycache__/filesystem.cpython-314.pyc,, +pip/_internal/utils/__pycache__/filetypes.cpython-314.pyc,, +pip/_internal/utils/__pycache__/glibc.cpython-314.pyc,, +pip/_internal/utils/__pycache__/hashes.cpython-314.pyc,, +pip/_internal/utils/__pycache__/logging.cpython-314.pyc,, +pip/_internal/utils/__pycache__/misc.cpython-314.pyc,, +pip/_internal/utils/__pycache__/packaging.cpython-314.pyc,, +pip/_internal/utils/__pycache__/pylock.cpython-314.pyc,, +pip/_internal/utils/__pycache__/retry.cpython-314.pyc,, +pip/_internal/utils/__pycache__/subprocess.cpython-314.pyc,, +pip/_internal/utils/__pycache__/temp_dir.cpython-314.pyc,, +pip/_internal/utils/__pycache__/unpacking.cpython-314.pyc,, +pip/_internal/utils/__pycache__/urls.cpython-314.pyc,, +pip/_internal/utils/__pycache__/virtualenv.cpython-314.pyc,, +pip/_internal/utils/__pycache__/wheel.cpython-314.pyc,, +pip/_internal/utils/_jaraco_text.py,sha256=M15uUPIh5NpP1tdUGBxRau6q1ZAEtI8-XyLEETscFfE,3350 +pip/_internal/utils/_log.py,sha256=-jHLOE_THaZz5BFcCnoSL9EYAtJ0nXem49s9of4jvKw,1015 +pip/_internal/utils/appdirs.py,sha256=LrzDPZMKVh0rubtCx9vu3XlZbLCSug6VSj4Qsvt66BA,1681 +pip/_internal/utils/compat.py,sha256=C9LHXJAKkwAH8Hn3nPkz9EYK3rqPBeO_IXkOG2zzsdQ,2514 +pip/_internal/utils/compatibility_tags.py,sha256=DiNSLqpuruXUamGQwOJ2WZByDGLTGaXi9O-Xf8fOi34,6630 +pip/_internal/utils/datetime.py,sha256=kuJOf1mW8G5tRFN6jWardddS-9qSaR53lK1jmx3NTZY,868 +pip/_internal/utils/deprecation.py,sha256=HVhvyO5qiRFcG88PhZlp_87qdKQNwPTUIIHWtsTR2yI,3696 +pip/_internal/utils/direct_url_helpers.py,sha256=ttKv4GMUqlRwPPog9_CUopy6SDgoxVILzeBJzgfn2tg,3200 +pip/_internal/utils/egg_link.py,sha256=YWfsrbmfcrfWgqQYy6OuIjsyb9IfL1q_2v4zsms1WjI,2459 +pip/_internal/utils/entrypoints.py,sha256=uPjAyShKObdotjQjJUzprQ6r3xQvDIZwUYfHHqZ7Dok,3324 +pip/_internal/utils/filesystem.py,sha256=mJ_PP8z1V1x4HMhydWIWDyEmWikLX0f-NXPCXEcjiLo,6892 +pip/_internal/utils/filetypes.py,sha256=sEMa38qaqjvx1Zid3OCAUja31BOBU-USuSMPBvU3yjo,689 +pip/_internal/utils/glibc.py,sha256=sEh8RJJLYSdRvTqAO4THVPPA-YSDVLD4SI9So-bxX1U,3726 +pip/_internal/utils/hashes.py,sha256=d32UI1en8nyqZzdZQvxUVdfeBoe4ADWx7HtrIM4-XQ4,4998 +pip/_internal/utils/logging.py,sha256=6lJWMC6c7_aD_i4sdgaaeb-Tm3kWpYg0hba_V1-OLnE,13414 +pip/_internal/utils/misc.py,sha256=phFIbHm2kmliHDXJ0eNPxgGP423ZpvZoMKKtJ1_Zvjs,23722 +pip/_internal/utils/packaging.py,sha256=s5tpUmFumwV0H9JSTzryrIY4JwQM8paGt7Sm7eNwt2Y,1601 +pip/_internal/utils/pylock.py,sha256=nKQknZgyswWgzi--hRQX_DLUYQ3g5wGTCwVNQNdoJ54,3817 +pip/_internal/utils/retry.py,sha256=83wReEB2rcntMZ5VLd7ascaYSjn_kLdlQCqxILxWkPM,1461 +pip/_internal/utils/subprocess.py,sha256=r4-Ba_Yc3uZXQpi0K4pZFsCT_QqdSvtF3XJ-204QWaA,8983 +pip/_internal/utils/temp_dir.py,sha256=D9c8D7WOProOO8GGDqpBeVSj10NGFmunG0o2TodjjIU,9307 +pip/_internal/utils/unpacking.py,sha256=4hNg6dqHOn_KzGCzSC76nChG97d_UjtF9AnLSof672o,12972 +pip/_internal/utils/urls.py,sha256=aF_eg9ul5d8bMCxfSSSxQcfs-OpJdbStYqZHoy2K1RE,1601 +pip/_internal/utils/virtualenv.py,sha256=mX-UPyw1MPxhwUxKhbqWWX70J6PHXAJjVVrRnG0h9mc,3455 +pip/_internal/utils/wheel.py,sha256=YdRuj6MicG-Q9Mg03FbUv1WTLam6Lc7AgijY4voVyis,4468 +pip/_internal/vcs/__init__.py,sha256=UAqvzpbi0VbZo3Ub6skEeZAw-ooIZR-zX_WpCbxyCoU,596 +pip/_internal/vcs/__pycache__/__init__.cpython-314.pyc,, +pip/_internal/vcs/__pycache__/bazaar.cpython-314.pyc,, +pip/_internal/vcs/__pycache__/git.cpython-314.pyc,, +pip/_internal/vcs/__pycache__/mercurial.cpython-314.pyc,, +pip/_internal/vcs/__pycache__/subversion.cpython-314.pyc,, +pip/_internal/vcs/__pycache__/versioncontrol.cpython-314.pyc,, +pip/_internal/vcs/bazaar.py,sha256=3W1eHjkYx2vc6boeb2NBh4I_rlGAXM-vrzfNhLm1Rxg,3734 +pip/_internal/vcs/git.py,sha256=TTeqDuzS-_BFSNuUStVWmE2nGDpKuvUhBBJk_CCQXV0,19144 +pip/_internal/vcs/mercurial.py,sha256=w1ZJWLKqNP1onEjkfjlwBVnMqPZNSIER8ayjQcnTq4w,5575 +pip/_internal/vcs/subversion.py,sha256=uUgdPvxmvEB8Qwtjr0Hc0XgFjbiNi5cbvI4vARLOJXo,11787 +pip/_internal/vcs/versioncontrol.py,sha256=Ma_HMZBVveSkeYvxacvqeujnkSIaF1XjxTsS3BwcJ8E,22599 +pip/_internal/wheel_builder.py,sha256=yvEULStZtty9Kplp89tDis3hGdyKQ-2BUbFLmJ_5ink,9010 +pip/_vendor/README.rst,sha256=pKKBwCWhu3M3qQ9dDnsmxb3KdsRr-nWmMq2srbH_Bi0,9394 +pip/_vendor/__init__.py,sha256=WzusPTGWIMeQQWSVJ0h2rafGkVTa9WKJ2HT-2-EoZrU,4907 +pip/_vendor/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/cachecontrol/LICENSE.txt,sha256=hu7uh74qQ_P_H1ZJb0UfaSQ5JvAl_tuwM2ZsMExMFhs,558 +pip/_vendor/cachecontrol/__init__.py,sha256=GxwRkm_TQBtPZpfpVK9r6S9dAy2DVnVgDVHJKTiPZ1k,820 +pip/_vendor/cachecontrol/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/adapter.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/cache.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/controller.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/serialize.cpython-314.pyc,, +pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-314.pyc,, +pip/_vendor/cachecontrol/_cmd.py,sha256=iist2EpzJvDVIhMAxXq8iFnTBsiZAd6iplxfmNboNyk,1737 +pip/_vendor/cachecontrol/adapter.py,sha256=W-HW-l01gyCsnxkOyCbqx7sxrWYoBbKrDsKkVVQN6NE,6586 +pip/_vendor/cachecontrol/cache.py,sha256=OXwv7Fn2AwnKNiahJHnjtvaKLndvVLv_-zO-ltlV9qI,1953 +pip/_vendor/cachecontrol/caches/__init__.py,sha256=dtrrroK5BnADR1GWjCZ19aZ0tFsMfvFBtLQQU1sp_ag,303 +pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-314.pyc,, +pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-314.pyc,, +pip/_vendor/cachecontrol/caches/file_cache.py,sha256=d8upFmy_zwaCmlbWEVBlLXFddt8Zw8c5SFpxeOZsdfw,4117 +pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=9rmqwtYu_ljVkW6_oLqbC7EaX_a8YT_yLuna-eS0dgo,1386 +pip/_vendor/cachecontrol/controller.py,sha256=xBauC-vUSu5GsJsxD4-W-JaKqqbBz0MN6Zv8PA2N8hI,19102 +pip/_vendor/cachecontrol/filewrapper.py,sha256=DhxC_rSk-beKdbsYhfvBUDovQHX9r3gHH_jP9-q_mKk,4354 +pip/_vendor/cachecontrol/heuristics.py,sha256=gqMXU8w0gQuEQiSdu3Yg-0vd9kW7nrWKbLca75rheGE,4881 +pip/_vendor/cachecontrol/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/cachecontrol/serialize.py,sha256=HQd2IllQ05HzPkVLMXTF2uX5mjEQjDBkxCqUJUODpZk,5163 +pip/_vendor/cachecontrol/wrapper.py,sha256=hsGc7g8QGQTT-4f8tgz3AM5qwScg6FO0BSdLSRdEvpU,1417 +pip/_vendor/certifi/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989 +pip/_vendor/certifi/__init__.py,sha256=969deMMS7Uchipr0oO4dbRBUvRi0uNYCn07VmG1aTrg,94 +pip/_vendor/certifi/__main__.py,sha256=1k3Cr95vCxxGRGDljrW3wMdpZdL3Nhf0u1n-k2qdsCY,255 +pip/_vendor/certifi/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/certifi/__pycache__/__main__.cpython-314.pyc,, +pip/_vendor/certifi/__pycache__/core.cpython-314.pyc,, +pip/_vendor/certifi/cacert.pem,sha256=Tzl1_zCrvzVEO0hgZK6Ly0Hf9wf_31dsdtKS-0WKoKk,270954 +pip/_vendor/certifi/core.py,sha256=gu_ECVI1m3Rq0ytpsNE61hgQGcKaOAt9Rs9G8KsTCOI,3442 +pip/_vendor/certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/dependency_groups/LICENSE.txt,sha256=GrNuPipLqGMWJThPh-ngkdsfrtA0xbIzJbMjmr8sxSU,1099 +pip/_vendor/dependency_groups/__init__.py,sha256=C3OFu0NGwDzQ4LOmmSOFPsRSvkbBn-mdd4j_5YqJw-s,250 +pip/_vendor/dependency_groups/__main__.py,sha256=UNTM7P5mfVtT7wDi9kOTXWgV3fu3e8bTrt1Qp1jvjKo,1709 +pip/_vendor/dependency_groups/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/dependency_groups/__pycache__/__main__.cpython-314.pyc,, +pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-314.pyc,, +pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-314.pyc,, +pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-314.pyc,, +pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-314.pyc,, +pip/_vendor/dependency_groups/_implementation.py,sha256=Gqb2DlQELRakeHlKf6QtQSW0M-bcEomxHw4JsvID1ls,8041 +pip/_vendor/dependency_groups/_lint_dependency_groups.py,sha256=yp-DDqKXtbkDTNa0ifa-FmOA8ra24lPZEXftW-R5AuI,1710 +pip/_vendor/dependency_groups/_pip_wrapper.py,sha256=nuVW_w_ntVxpE26ELEvngMY0N04sFLsijXRyZZROFG8,1865 +pip/_vendor/dependency_groups/_toml_compat.py,sha256=BHnXnFacm3DeolsA35GjI6qkDApvua-1F20kv3BfZWE,285 +pip/_vendor/dependency_groups/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/distlib/LICENSE.txt,sha256=gI4QyKarjesUn_mz-xn0R6gICUYG1xKpylf-rTVSWZ0,14531 +pip/_vendor/distlib/__init__.py,sha256=Deo3uo98aUyIfdKJNqofeSEFWwDzrV2QeGLXLsgq0Ag,625 +pip/_vendor/distlib/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/distlib/__pycache__/compat.cpython-314.pyc,, +pip/_vendor/distlib/__pycache__/resources.cpython-314.pyc,, +pip/_vendor/distlib/__pycache__/scripts.cpython-314.pyc,, +pip/_vendor/distlib/__pycache__/util.cpython-314.pyc,, +pip/_vendor/distlib/compat.py,sha256=2jRSjRI4o-vlXeTK2BCGIUhkc6e9ZGhSsacRM5oseTw,41467 +pip/_vendor/distlib/resources.py,sha256=LwbPksc0A1JMbi6XnuPdMBUn83X7BPuFNWqPGEKI698,10820 +pip/_vendor/distlib/scripts.py,sha256=Qvp76E9Jc3IgyYubnpqI9fS7eseGOe4FjpeVKqKt9Iw,18612 +pip/_vendor/distlib/t32.exe,sha256=a0GV5kCoWsMutvliiCKmIgV98eRZ33wXoS-XrqvJQVs,97792 +pip/_vendor/distlib/t64-arm.exe,sha256=68TAa32V504xVBnufojh0PcenpR3U4wAqTqf-MZqbPw,182784 +pip/_vendor/distlib/t64.exe,sha256=gaYY8hy4fbkHYTTnA4i26ct8IQZzkBG2pRdy0iyuBrc,108032 +pip/_vendor/distlib/util.py,sha256=vMPGvsS4j9hF6Y9k3Tyom1aaHLb0rFmZAEyzeAdel9w,66682 +pip/_vendor/distlib/w32.exe,sha256=R4csx3-OGM9kL4aPIzQKRo5TfmRSHZo6QWyLhDhNBks,91648 +pip/_vendor/distlib/w64-arm.exe,sha256=xdyYhKj0WDcVUOCb05blQYvzdYIKMbmJn2SZvzkcey4,168448 +pip/_vendor/distlib/w64.exe,sha256=ejGf-rojoBfXseGLpya6bFTFPWRG21X5KvU8J5iU-K0,101888 +pip/_vendor/distro/LICENSE,sha256=y16Ofl9KOYjhBjwULGDcLfdWBfTEZRXnduOspt-XbhQ,11325 +pip/_vendor/distro/__init__.py,sha256=2fHjF-SfgPvjyNZ1iHh_wjqWdR_Yo5ODHwZC0jLBPhc,981 +pip/_vendor/distro/__main__.py,sha256=bu9d3TifoKciZFcqRBuygV3GSuThnVD_m2IK4cz96Vs,64 +pip/_vendor/distro/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/distro/__pycache__/__main__.cpython-314.pyc,, +pip/_vendor/distro/__pycache__/distro.cpython-314.pyc,, +pip/_vendor/distro/distro.py,sha256=XqbefacAhDT4zr_trnbA15eY8vdK4GTghgmvUGrEM_4,49430 +pip/_vendor/distro/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/idna/LICENSE.md,sha256=t6M2q_OwThgOwGXN0W5wXQeeHMehT5EKpukYfza5zYc,1541 +pip/_vendor/idna/__init__.py,sha256=MPqNDLZbXqGaNdXxAFhiqFPKEQXju2jNQhCey6-5eJM,868 +pip/_vendor/idna/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/idna/__pycache__/codec.cpython-314.pyc,, +pip/_vendor/idna/__pycache__/compat.cpython-314.pyc,, +pip/_vendor/idna/__pycache__/core.cpython-314.pyc,, +pip/_vendor/idna/__pycache__/idnadata.cpython-314.pyc,, +pip/_vendor/idna/__pycache__/intranges.cpython-314.pyc,, +pip/_vendor/idna/__pycache__/package_data.cpython-314.pyc,, +pip/_vendor/idna/__pycache__/uts46data.cpython-314.pyc,, +pip/_vendor/idna/codec.py,sha256=M2SGWN7cs_6B32QmKTyTN6xQGZeYQgQ2wiX3_DR6loE,3438 +pip/_vendor/idna/compat.py,sha256=RzLy6QQCdl9784aFhb2EX9EKGCJjg0P3PilGdeXXcx8,316 +pip/_vendor/idna/core.py,sha256=P26_XVycuMTZ1R2mNK1ZREVzM5mvTzdabBXfyZVU1Lc,13246 +pip/_vendor/idna/idnadata.py,sha256=SG8jhaGE53iiD6B49pt2pwTv_UvClciWE-N54oR2p4U,79623 +pip/_vendor/idna/intranges.py,sha256=amUtkdhYcQG8Zr-CoMM_kVRacxkivC1WgxN1b63KKdU,1898 +pip/_vendor/idna/package_data.py,sha256=_CUavOxobnbyNG2FLyHoN8QHP3QM9W1tKuw7eq9QwBk,21 +pip/_vendor/idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/idna/uts46data.py,sha256=H9J35VkD0F9L9mKOqjeNGd2A-Va6FlPoz6Jz4K7h-ps,243725 +pip/_vendor/msgpack/COPYING,sha256=SS3tuoXaWHL3jmCRvNH-pHTWYNNay03ulkuKqz8AdCc,614 +pip/_vendor/msgpack/__init__.py,sha256=RA8gcqK17YpkxBnNwXJVa1oa2LygWDgfF1nA1NPw3mo,1109 +pip/_vendor/msgpack/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/msgpack/__pycache__/exceptions.cpython-314.pyc,, +pip/_vendor/msgpack/__pycache__/ext.cpython-314.pyc,, +pip/_vendor/msgpack/__pycache__/fallback.cpython-314.pyc,, +pip/_vendor/msgpack/exceptions.py,sha256=dCTWei8dpkrMsQDcjQk74ATl9HsIBH0ybt8zOPNqMYc,1081 +pip/_vendor/msgpack/ext.py,sha256=kteJv03n9tYzd5oo3xYopVTo4vRaAxonBQQJhXohZZo,5726 +pip/_vendor/msgpack/fallback.py,sha256=0g1Pzp0vtmBEmJ5w9F3s_-JMVURP8RS4G1cc5TRaAsI,32390 +pip/_vendor/packaging/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 +pip/_vendor/packaging/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 +pip/_vendor/packaging/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 +pip/_vendor/packaging/__init__.py,sha256=y4lVbpeBzCGk-IPDw5BGBZ_b0P3ukEEJZAbGYc6Ey8c,494 +pip/_vendor/packaging/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/_elffile.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/_manylinux.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/_musllinux.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/_parser.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/_structures.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/_tokenizer.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/markers.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/metadata.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/pylock.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/requirements.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/specifiers.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/tags.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/utils.cpython-314.pyc,, +pip/_vendor/packaging/__pycache__/version.cpython-314.pyc,, +pip/_vendor/packaging/_elffile.py,sha256=-sKkptYqzYw2-x3QByJa5mB4rfPWu1pxkZHRx1WAFCY,3211 +pip/_vendor/packaging/_manylinux.py,sha256=Hf6nB0cOrayEs96-p3oIXAgGnFquv20DO5l-o2_Xnv0,9559 +pip/_vendor/packaging/_musllinux.py,sha256=Z6swjH3MA7XS3qXnmMN7QPhqP3fnoYI0eQ18e9-HgAE,2707 +pip/_vendor/packaging/_parser.py,sha256=U_DajsEx2VoC_F46fSVV3hDKNCWoQYkPkasO3dld0ig,10518 +pip/_vendor/packaging/_structures.py,sha256=Hn49Ta8zV9Wo8GiCL8Nl2ARZY983Un3pruZGVNldPwE,1514 +pip/_vendor/packaging/_tokenizer.py,sha256=M8EwNIdXeL9NMFuFrQtiOKwjka_xFx8KjRQnfE8O_z8,5421 +pip/_vendor/packaging/licenses/__init__.py,sha256=TwXLHZCXwSgdFwRLPxW602T6mSieunSFHM6fp8pgW78,5819 +pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-314.pyc,, +pip/_vendor/packaging/licenses/_spdx.py,sha256=WW7DXiyg68up_YND_wpRYlr1SHhiV4FfJLQffghhMxQ,51122 +pip/_vendor/packaging/markers.py,sha256=ZX-cLvW1S3cZcEc0fHI4z7zSx5U2T19yMpDP_mE-CYw,12771 +pip/_vendor/packaging/metadata.py,sha256=CWVZpN_HfoYMSSDuCP7igOvGgqA9AOmpW8f3qTisfnc,39360 +pip/_vendor/packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/packaging/pylock.py,sha256=-R1uNfJ4PaLto7Mg62YsGOHgvskuiIEqPwxOywl42Jk,22537 +pip/_vendor/packaging/requirements.py,sha256=PMCAWD8aNMnVD-6uZMedhBuAVX2573eZ4yPBLXmz04I,2870 +pip/_vendor/packaging/specifiers.py,sha256=tF2nC-jwW94FYe6So9dNGenQx1Hdif7ErmWlVp1QiXE,40821 +pip/_vendor/packaging/tags.py,sha256=cXLV1pJD3UtJlDg7Wz3zrfdQhRZqr8jumSAKKAAd2xE,22856 +pip/_vendor/packaging/utils.py,sha256=N4c6oZzFJy6klTZ3AnkNz7sSkJesuFWPp68LA3B5dAo,5040 +pip/_vendor/packaging/version.py,sha256=RVRKq8_GD5Bcak6E1kGG8K7siNZYW9n_XK8M2ZLl0H8,23284 +pip/_vendor/pkg_resources/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 +pip/_vendor/pkg_resources/__init__.py,sha256=vbTJ0_ruUgGxQjlEqsruFmiNPVyh2t9q-zyTDT053xI,124451 +pip/_vendor/pkg_resources/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/platformdirs/LICENSE,sha256=KeD9YukphQ6G6yjD_czwzv30-pSHkBHP-z0NS-1tTbY,1089 +pip/_vendor/platformdirs/__init__.py,sha256=UfeSHWl8AeTtbOBOoHAxK4dODOWkZtfy-m_i7cWdJ8c,22344 +pip/_vendor/platformdirs/__main__.py,sha256=jBJ8zb7Mpx5ebcqF83xrpO94MaeCpNGHVf9cvDN2JLg,1505 +pip/_vendor/platformdirs/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/platformdirs/__pycache__/__main__.cpython-314.pyc,, +pip/_vendor/platformdirs/__pycache__/android.cpython-314.pyc,, +pip/_vendor/platformdirs/__pycache__/api.cpython-314.pyc,, +pip/_vendor/platformdirs/__pycache__/macos.cpython-314.pyc,, +pip/_vendor/platformdirs/__pycache__/unix.cpython-314.pyc,, +pip/_vendor/platformdirs/__pycache__/version.cpython-314.pyc,, +pip/_vendor/platformdirs/__pycache__/windows.cpython-314.pyc,, +pip/_vendor/platformdirs/android.py,sha256=r0DshVBf-RO1jXJGX8C4Til7F1XWt-bkdWMgmvEiaYg,9013 +pip/_vendor/platformdirs/api.py,sha256=wPHOlwOsfz2oqQZ6A2FcCu5kEAj-JondzoNOHYFQ0h8,9281 +pip/_vendor/platformdirs/macos.py,sha256=0XoOgin1NK7Qki7iskD-oS8xKxw6bXgoKEgdqpCRAFQ,6322 +pip/_vendor/platformdirs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/platformdirs/unix.py,sha256=WZmkUA--L3JNRGmz32s35YfoD3ica6xKIPdCV_HhLcs,10458 +pip/_vendor/platformdirs/version.py,sha256=BI_dKLSMwlkl57vlxZnT8oVjPiUC2W_sdx_8_h99HeQ,704 +pip/_vendor/platformdirs/windows.py,sha256=XvCfklGUMVxJbXit51jpYMN-lNeScPB82qS1CAeplL0,10362 +pip/_vendor/pygments/LICENSE,sha256=qdZvHVJt8C4p3Oc0NtNOVuhjL0bCdbvf_HBWnogvnxc,1331 +pip/_vendor/pygments/__init__.py,sha256=8uNqJCCwXqbEx5aSsBr0FykUQOBDKBihO5mPqiw1aqo,2983 +pip/_vendor/pygments/__main__.py,sha256=WrndpSe6i1ckX_SQ1KaxD9CTKGzD0EuCOFxcbwFpoLU,353 +pip/_vendor/pygments/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/__main__.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/console.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/filter.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/formatter.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/lexer.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/modeline.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/plugin.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/regexopt.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/scanner.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/sphinxext.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/style.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/token.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/unistring.cpython-314.pyc,, +pip/_vendor/pygments/__pycache__/util.cpython-314.pyc,, +pip/_vendor/pygments/console.py,sha256=AagDWqwea2yBWf10KC9ptBgMpMjxKp8yABAmh-NQOVk,1718 +pip/_vendor/pygments/filter.py,sha256=YLtpTnZiu07nY3oK9nfR6E9Y1FBHhP5PX8gvkJWcfag,1910 +pip/_vendor/pygments/filters/__init__.py,sha256=4U4jtA0X3iP83uQnB9-TI-HDSw8E8y8zMYHa0UjbbaI,40392 +pip/_vendor/pygments/filters/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/pygments/formatter.py,sha256=KZQMmyo_xkOIkQG8g66LYEkBh1bx7a0HyGCBcvhI9Ew,4390 +pip/_vendor/pygments/formatters/__init__.py,sha256=KTwBmnXlaopJhQDOemVHYHskiDghuq-08YtP6xPNJPg,5385 +pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-314.pyc,, +pip/_vendor/pygments/formatters/_mapping.py,sha256=1Cw37FuQlNacnxRKmtlPX4nyLoX9_ttko5ZwscNUZZ4,4176 +pip/_vendor/pygments/lexer.py,sha256=_kBrOJ_NT5Tl0IVM0rA9c8eysP6_yrlGzEQI0eVYB-A,35349 +pip/_vendor/pygments/lexers/__init__.py,sha256=wbIME35GH7bI1B9rNPJFqWT-ij_RApZDYPUlZycaLzA,12115 +pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-314.pyc,, +pip/_vendor/pygments/lexers/__pycache__/python.cpython-314.pyc,, +pip/_vendor/pygments/lexers/_mapping.py,sha256=l4tCXM8e9aPC2BD6sjIr0deT-J-z5tHgCwL-p1fS0PE,77602 +pip/_vendor/pygments/lexers/python.py,sha256=vxjn1cOHclIKJKxoyiBsQTY65GHbkZtZRuKQ2AVCKaw,53853 +pip/_vendor/pygments/modeline.py,sha256=K5eSkR8GS1r5OkXXTHOcV0aM_6xpk9eWNEIAW-OOJ2g,1005 +pip/_vendor/pygments/plugin.py,sha256=tPx0rJCTIZ9ioRgLNYG4pifCbAwTRUZddvLw-NfAk2w,1891 +pip/_vendor/pygments/regexopt.py,sha256=wXaP9Gjp_hKAdnICqoDkRxAOQJSc4v3X6mcxx3z-TNs,3072 +pip/_vendor/pygments/scanner.py,sha256=nNcETRR1tRuiTaHmHSTTECVYFPcLf6mDZu1e4u91A9E,3092 +pip/_vendor/pygments/sphinxext.py,sha256=5x7Zh9YlU6ISJ31dMwduiaanb5dWZnKg3MyEQsseNnQ,7981 +pip/_vendor/pygments/style.py,sha256=PlOZqlsnTVd58RGy50vkA2cXQ_lP5bF5EGMEBTno6DA,6420 +pip/_vendor/pygments/styles/__init__.py,sha256=x9ebctfyvCAFpMTlMJ5YxwcNYBzjgq6zJaKkNm78r4M,2042 +pip/_vendor/pygments/styles/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-314.pyc,, +pip/_vendor/pygments/styles/_mapping.py,sha256=6lovFUE29tz6EsV3XYY4hgozJ7q1JL7cfO3UOlgnS8w,3312 +pip/_vendor/pygments/token.py,sha256=WbdWGhYm_Vosb0DDxW9lHNPgITXfWTsQmHt6cy9RbcM,6226 +pip/_vendor/pygments/unistring.py,sha256=al-_rBemRuGvinsrM6atNsHTmJ6DUbw24q2O2Ru1cBc,63208 +pip/_vendor/pygments/util.py,sha256=oRtSpiAo5jM9ulntkvVbgXUdiAW57jnuYGB7t9fYuhc,10031 +pip/_vendor/pyproject_hooks/LICENSE,sha256=GyKwSbUmfW38I6Z79KhNjsBLn9-xpR02DkK0NCyLQVQ,1081 +pip/_vendor/pyproject_hooks/__init__.py,sha256=cPB_a9LXz5xvsRbX1o2qyAdjLatZJdQ_Lc5McNX-X7Y,691 +pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-314.pyc,, +pip/_vendor/pyproject_hooks/_impl.py,sha256=jY-raxnmyRyB57ruAitrJRUzEexuAhGTpgMygqx67Z4,14936 +pip/_vendor/pyproject_hooks/_in_process/__init__.py,sha256=MJNPpfIxcO-FghxpBbxkG1rFiQf6HOUbV4U5mq0HFns,557 +pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-314.pyc,, +pip/_vendor/pyproject_hooks/_in_process/_in_process.py,sha256=qcXMhmx__MIJq10gGHW3mA4Tl8dy8YzHMccwnNoKlw0,12216 +pip/_vendor/pyproject_hooks/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/requests/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142 +pip/_vendor/requests/__init__.py,sha256=HlB_HzhrzGtfD_aaYUwUh1zWXLZ75_YCLyit75d0Vz8,5057 +pip/_vendor/requests/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/__version__.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/_internal_utils.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/adapters.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/api.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/auth.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/certs.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/compat.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/cookies.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/exceptions.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/help.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/hooks.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/models.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/packages.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/sessions.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/status_codes.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/structures.cpython-314.pyc,, +pip/_vendor/requests/__pycache__/utils.cpython-314.pyc,, +pip/_vendor/requests/__version__.py,sha256=QKDceK8K_ujqwDDc3oYrR0odOBYgKVOQQ5vFap_G_cg,435 +pip/_vendor/requests/_internal_utils.py,sha256=nMQymr4hs32TqVo5AbCrmcJEhvPUh7xXlluyqwslLiQ,1495 +pip/_vendor/requests/adapters.py,sha256=2MLFOK9GpYNhiTd6zLDUrAgSkIB-76i6pmSuUJjHC2w,26429 +pip/_vendor/requests/api.py,sha256=_Zb9Oa7tzVIizTKwFrPjDEY9ejtm_OnSRERnADxGsQs,6449 +pip/_vendor/requests/auth.py,sha256=kF75tqnLctZ9Mf_hm9TZIj4cQWnN5uxRz8oWsx5wmR0,10186 +pip/_vendor/requests/certs.py,sha256=kHDlkK_beuHXeMPc5jta2wgl8gdKeUWt5f2nTDVrvt8,441 +pip/_vendor/requests/compat.py,sha256=QfbmdTFiZzjSHMXiMrd4joCRU6RabtQ9zIcPoVaHIus,1822 +pip/_vendor/requests/cookies.py,sha256=bNi-iqEj4NPZ00-ob-rHvzkvObzN3lEpgw3g6paS3Xw,18590 +pip/_vendor/requests/exceptions.py,sha256=D1wqzYWne1mS2rU43tP9CeN1G7QAy7eqL9o1god6Ejw,4272 +pip/_vendor/requests/help.py,sha256=hRKaf9u0G7fdwrqMHtF3oG16RKktRf6KiwtSq2Fo1_0,3813 +pip/_vendor/requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733 +pip/_vendor/requests/models.py,sha256=taljlg6vJ4b-xMu2TaMNFFkaiwMex_VsEQ6qUTN3wzY,35575 +pip/_vendor/requests/packages.py,sha256=_ZQDCJTJ8SP3kVWunSqBsRZNPzj2c1WFVqbdr08pz3U,1057 +pip/_vendor/requests/sessions.py,sha256=Cl1dpEnOfwrzzPbku-emepNeN4Rt_0_58Iy2x-JGTm8,30503 +pip/_vendor/requests/status_codes.py,sha256=iJUAeA25baTdw-6PfD0eF4qhpINDJRJI-yaMqxs4LEI,4322 +pip/_vendor/requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912 +pip/_vendor/requests/utils.py,sha256=WS3wHSQaaEfceu1syiFo5jf4e_CWKUTep_IabOVI_J0,33225 +pip/_vendor/resolvelib/LICENSE,sha256=84j9OMrRMRLB3A9mm76A5_hFQe26-3LzAw0sp2QsPJ0,751 +pip/_vendor/resolvelib/__init__.py,sha256=yoX-d4STvwGGCiQRE5cJC9Cter69SgVgqClxOCvSP7M,541 +pip/_vendor/resolvelib/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/resolvelib/__pycache__/providers.cpython-314.pyc,, +pip/_vendor/resolvelib/__pycache__/reporters.cpython-314.pyc,, +pip/_vendor/resolvelib/__pycache__/structs.cpython-314.pyc,, +pip/_vendor/resolvelib/providers.py,sha256=pIWJbIdJJ9GFtNbtwTH0Ia43Vj6hYCEJj2DOLue15FM,8914 +pip/_vendor/resolvelib/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/resolvelib/reporters.py,sha256=pNJf4nFxLpAeKxlBUi2GEj0a2Ij1nikY0UabTKXesT4,2037 +pip/_vendor/resolvelib/resolvers/__init__.py,sha256=728M3EvmnPbVXS7ExXlv2kMu6b7wEsoPutEfl-uVk_I,640 +pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-314.pyc,, +pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-314.pyc,, +pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-314.pyc,, +pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-314.pyc,, +pip/_vendor/resolvelib/resolvers/abstract.py,sha256=CNeQPnpAudY77nmzOkONSmAgRlzIf06X-X9mvRYODms,1543 +pip/_vendor/resolvelib/resolvers/criterion.py,sha256=lcmZGv5sKHOnFD_RzZwvlGSj19MeA-5rCMpdf2Sgw7Y,1768 +pip/_vendor/resolvelib/resolvers/exceptions.py,sha256=ln_jaQtgLlRUSFY627yiHG2gD7AgaXzRKaElFVh7fDQ,1768 +pip/_vendor/resolvelib/resolvers/resolution.py,sha256=3J_zkW-sD3EY-BlNXjyln__njpyH5n0UZJT6uV7CheA,24212 +pip/_vendor/resolvelib/structs.py,sha256=pu-EJiR2IBITr2SQeNPRa0rXhjlStfmO_GEgAhr3004,6420 +pip/_vendor/rich/LICENSE,sha256=3u18F6QxgVgZCj6iOcyHmlpQJxzruYrnAl9I--WNyhU,1056 +pip/_vendor/rich/__init__.py,sha256=dRxjIL-SbFVY0q3IjSMrfgBTHrm1LZDgLOygVBwiYZc,6090 +pip/_vendor/rich/__main__.py,sha256=e_aVC-tDzarWQW9SuZMuCgBr6ODV_iDNV2Wh2xkxOlw,7896 +pip/_vendor/rich/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/__main__.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_cell_widths.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_emoji_codes.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_emoji_replace.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_export_format.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_extension.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_fileno.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_inspect.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_log_render.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_loop.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_null_file.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_palettes.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_pick.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_ratio.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_spinners.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_stack.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_timer.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_win32_console.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_windows.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_windows_renderer.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/_wrap.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/abc.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/align.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/ansi.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/bar.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/box.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/cells.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/color.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/color_triplet.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/columns.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/console.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/constrain.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/containers.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/control.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/default_styles.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/diagnose.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/emoji.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/errors.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/file_proxy.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/filesize.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/highlighter.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/json.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/jupyter.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/layout.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/live.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/live_render.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/logging.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/markup.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/measure.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/padding.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/pager.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/palette.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/panel.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/pretty.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/progress.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/progress_bar.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/prompt.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/protocol.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/region.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/repr.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/rule.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/scope.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/screen.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/segment.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/spinner.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/status.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/style.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/styled.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/syntax.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/table.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/terminal_theme.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/text.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/theme.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/themes.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/traceback.cpython-314.pyc,, +pip/_vendor/rich/__pycache__/tree.cpython-314.pyc,, +pip/_vendor/rich/_cell_widths.py,sha256=fbmeyetEdHjzE_Vx2l1uK7tnPOhMs2X1lJfO3vsKDpA,10209 +pip/_vendor/rich/_emoji_codes.py,sha256=hu1VL9nbVdppJrVoijVshRlcRRe_v3dju3Mmd2sKZdY,140235 +pip/_vendor/rich/_emoji_replace.py,sha256=n-kcetsEUx2ZUmhQrfeMNc-teeGhpuSQ5F8VPBsyvDo,1064 +pip/_vendor/rich/_export_format.py,sha256=RI08pSrm5tBSzPMvnbTqbD9WIalaOoN5d4M1RTmLq1Y,2128 +pip/_vendor/rich/_extension.py,sha256=Xt47QacCKwYruzjDi-gOBq724JReDj9Cm9xUi5fr-34,265 +pip/_vendor/rich/_fileno.py,sha256=HWZxP5C2ajMbHryvAQZseflVfQoGzsKOHzKGsLD8ynQ,799 +pip/_vendor/rich/_inspect.py,sha256=ROT0PLC2GMWialWZkqJIjmYq7INRijQQkoSokWTaAiI,9656 +pip/_vendor/rich/_log_render.py,sha256=1ByI0PA1ZpxZY3CGJOK54hjlq4X-Bz_boIjIqCd8Kns,3225 +pip/_vendor/rich/_loop.py,sha256=hV_6CLdoPm0va22Wpw4zKqM0RYsz3TZxXj0PoS-9eDQ,1236 +pip/_vendor/rich/_null_file.py,sha256=ADGKp1yt-k70FMKV6tnqCqecB-rSJzp-WQsD7LPL-kg,1394 +pip/_vendor/rich/_palettes.py,sha256=cdev1JQKZ0JvlguV9ipHgznTdnvlIzUFDBb0It2PzjI,7063 +pip/_vendor/rich/_pick.py,sha256=evDt8QN4lF5CiwrUIXlOJCntitBCOsI3ZLPEIAVRLJU,423 +pip/_vendor/rich/_ratio.py,sha256=IOtl78sQCYZsmHyxhe45krkb68u9xVz7zFsXVJD-b2Y,5325 +pip/_vendor/rich/_spinners.py,sha256=U2r1_g_1zSjsjiUdAESc2iAMc3i4ri_S8PYP6kQ5z1I,19919 +pip/_vendor/rich/_stack.py,sha256=-C8OK7rxn3sIUdVwxZBBpeHhIzX0eI-VM3MemYfaXm0,351 +pip/_vendor/rich/_timer.py,sha256=zelxbT6oPFZnNrwWPpc1ktUeAT-Vc4fuFcRZLQGLtMI,417 +pip/_vendor/rich/_win32_console.py,sha256=BSaDRIMwBLITn_m0mTRLPqME5q-quGdSMuYMpYeYJwc,22755 +pip/_vendor/rich/_windows.py,sha256=aBwaD_S56SbgopIvayVmpk0Y28uwY2C5Bab1wl3Bp-I,1925 +pip/_vendor/rich/_windows_renderer.py,sha256=t74ZL3xuDCP3nmTp9pH1L5LiI2cakJuQRQleHCJerlk,2783 +pip/_vendor/rich/_wrap.py,sha256=FlSsom5EX0LVkA3KWy34yHnCfLtqX-ZIepXKh-70rpc,3404 +pip/_vendor/rich/abc.py,sha256=ON-E-ZqSSheZ88VrKX2M3PXpFbGEUUZPMa_Af0l-4f0,890 +pip/_vendor/rich/align.py,sha256=dg-7uY0ukMLLlUEsBDRLva22_sQgIJD4BK0dmZHFHug,10324 +pip/_vendor/rich/ansi.py,sha256=Avs1LHbSdcyOvDOdpELZUoULcBiYewY76eNBp6uFBhs,6921 +pip/_vendor/rich/bar.py,sha256=ldbVHOzKJOnflVNuv1xS7g6dLX2E3wMnXkdPbpzJTcs,3263 +pip/_vendor/rich/box.py,sha256=kmavBc_dn73L_g_8vxWSwYJD2uzBXOUFTtJOfpbczcM,10686 +pip/_vendor/rich/cells.py,sha256=KrQkj5-LghCCpJLSNQIyAZjndc4bnEqOEmi5YuZ9UCY,5130 +pip/_vendor/rich/color.py,sha256=3HSULVDj7qQkXUdFWv78JOiSZzfy5y1nkcYhna296V0,18211 +pip/_vendor/rich/color_triplet.py,sha256=3lhQkdJbvWPoLDO-AnYImAWmJvV5dlgYNCVZ97ORaN4,1054 +pip/_vendor/rich/columns.py,sha256=HUX0KcMm9dsKNi11fTbiM_h2iDtl8ySCaVcxlalEzq8,7131 +pip/_vendor/rich/console.py,sha256=t9azZpmRMVU5cphVBZSShNsmBxd2-IAWcTTlhor-E1s,100849 +pip/_vendor/rich/constrain.py,sha256=1VIPuC8AgtKWrcncQrjBdYqA3JVWysu6jZo1rrh7c7Q,1288 +pip/_vendor/rich/containers.py,sha256=c_56TxcedGYqDepHBMTuZdUIijitAQgnox-Qde0Z1qo,5502 +pip/_vendor/rich/control.py,sha256=EUTSUFLQbxY6Zmo_sdM-5Ls323vIHTBfN8TPulqeHUY,6487 +pip/_vendor/rich/default_styles.py,sha256=khQFqqaoDs3bprMqWpHw8nO5UpG2DN6QtuTd6LzZwYc,8257 +pip/_vendor/rich/diagnose.py,sha256=fJl1TItRn19gGwouqTg-8zPUW3YqQBqGltrfPQs1H9w,1025 +pip/_vendor/rich/emoji.py,sha256=Wd4bQubZdSy6-PyrRQNuMHtn2VkljK9uPZPVlu2cmx0,2367 +pip/_vendor/rich/errors.py,sha256=5pP3Kc5d4QJ_c0KFsxrfyhjiPVe7J1zOqSFbFAzcV-Y,642 +pip/_vendor/rich/file_proxy.py,sha256=Tl9THMDZ-Pk5Wm8sI1gGg_U5DhusmxD-FZ0fUbcU0W0,1683 +pip/_vendor/rich/filesize.py,sha256=_iz9lIpRgvW7MNSeCZnLg-HwzbP4GETg543WqD8SFs0,2484 +pip/_vendor/rich/highlighter.py,sha256=G_sn-8DKjM1sEjLG_oc4ovkWmiUpWvj8bXi0yed2LnY,9586 +pip/_vendor/rich/json.py,sha256=vVEoKdawoJRjAFayPwXkMBPLy7RSTs-f44wSQDR2nJ0,5031 +pip/_vendor/rich/jupyter.py,sha256=QyoKoE_8IdCbrtiSHp9TsTSNyTHY0FO5whE7jOTd9UE,3252 +pip/_vendor/rich/layout.py,sha256=ajkSFAtEVv9EFTcFs-w4uZfft7nEXhNzL7ZVdgrT5rI,14004 +pip/_vendor/rich/live.py,sha256=tF3ukAAJZ_N2ZbGclqZ-iwLoIoZ8f0HHUz79jAyJqj8,15180 +pip/_vendor/rich/live_render.py,sha256=It_39YdzrBm8o3LL0kaGorPFg-BfZWAcrBjLjFokbx4,3521 +pip/_vendor/rich/logging.py,sha256=5KaPPSMP9FxcXPBcKM4cGd_zW78PMgf-YbMVnvfSw0o,12468 +pip/_vendor/rich/markup.py,sha256=3euGKP5s41NCQwaSjTnJxus5iZMHjxpIM0W6fCxra38,8451 +pip/_vendor/rich/measure.py,sha256=HmrIJX8sWRTHbgh8MxEay_83VkqNW_70s8aKP5ZcYI8,5305 +pip/_vendor/rich/padding.py,sha256=KVEI3tOwo9sgK1YNSuH__M1_jUWmLZwRVV_KmOtVzyM,4908 +pip/_vendor/rich/pager.py,sha256=SO_ETBFKbg3n_AgOzXm41Sv36YxXAyI3_R-KOY2_uSc,828 +pip/_vendor/rich/palette.py,sha256=lInvR1ODDT2f3UZMfL1grq7dY_pDdKHw4bdUgOGaM4Y,3396 +pip/_vendor/rich/panel.py,sha256=9sQl00hPIqH5G2gALQo4NepFwpP0k9wT-s_gOms5pIc,11157 +pip/_vendor/rich/pretty.py,sha256=gy3S72u4FRg2ytoo7N1ZDWDIvB4unbzd5iUGdgm-8fc,36391 +pip/_vendor/rich/progress.py,sha256=CUc2lkU-X59mVdGfjMCBkZeiGPL3uxdONjhNJF2T7wY,60408 +pip/_vendor/rich/progress_bar.py,sha256=mZTPpJUwcfcdgQCTTz3kyY-fc79ddLwtx6Ghhxfo064,8162 +pip/_vendor/rich/prompt.py,sha256=l0RhQU-0UVTV9e08xW1BbIj0Jq2IXyChX4lC0lFNzt4,12447 +pip/_vendor/rich/protocol.py,sha256=5hHHDDNHckdk8iWH5zEbi-zuIVSF5hbU2jIo47R7lTE,1391 +pip/_vendor/rich/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/rich/region.py,sha256=rNT9xZrVZTYIXZC0NYn41CJQwYNbR-KecPOxTgQvB8Y,166 +pip/_vendor/rich/repr.py,sha256=5MZJZmONgC6kud-QW-_m1okXwL2aR6u6y-pUcUCJz28,4431 +pip/_vendor/rich/rule.py,sha256=0fNaS_aERa3UMRc3T5WMpN_sumtDxfaor2y3of1ftBk,4602 +pip/_vendor/rich/scope.py,sha256=TMUU8qo17thyqQCPqjDLYpg_UU1k5qVd-WwiJvnJVas,2843 +pip/_vendor/rich/screen.py,sha256=YoeReESUhx74grqb0mSSb9lghhysWmFHYhsbMVQjXO8,1591 +pip/_vendor/rich/segment.py,sha256=otnKeKGEV-WRlQVosfJVeFDcDxAKHpvJ_hLzSu5lumM,24743 +pip/_vendor/rich/spinner.py,sha256=onIhpKlljRHppTZasxO8kXgtYyCHUkpSgKglRJ3o51g,4214 +pip/_vendor/rich/status.py,sha256=kkPph3YeAZBo-X-4wPp8gTqZyU466NLwZBA4PZTTewo,4424 +pip/_vendor/rich/style.py,sha256=W9Ccy8Py8lNICtlfcp-ryzMTuQaGxAU3av7-g5fHu0s,26990 +pip/_vendor/rich/styled.py,sha256=eZNnzGrI4ki_54pgY3Oj0T-x3lxdXTYh4_ryDB24wBU,1258 +pip/_vendor/rich/syntax.py,sha256=eDKIRwl--eZ0Lwo2da2RRtfutXGavrJO61Cl5OkS59U,36371 +pip/_vendor/rich/table.py,sha256=ZmT7V7MMCOYKw7TGY9SZLyYDf6JdM-WVf07FdVuVhTI,40049 +pip/_vendor/rich/terminal_theme.py,sha256=1j5-ufJfnvlAo5Qsi_ACZiXDmwMXzqgmFByObT9-yJY,3370 +pip/_vendor/rich/text.py,sha256=AO7JPCz6-gaN1thVLXMBntEmDPVYFgFNG1oM61_sanU,47552 +pip/_vendor/rich/theme.py,sha256=oNyhXhGagtDlbDye3tVu3esWOWk0vNkuxFw-_unlaK0,3771 +pip/_vendor/rich/themes.py,sha256=0xgTLozfabebYtcJtDdC5QkX5IVUEaviqDUJJh4YVFk,102 +pip/_vendor/rich/traceback.py,sha256=c0WmB_L04_UfZbLaoH982_U_s7eosxKMUiAVmDPdRYU,35861 +pip/_vendor/rich/tree.py,sha256=yWnQ6rAvRGJ3qZGqBrxS2SW2TKBTNrP0SdY8QxOFPuw,9451 +pip/_vendor/tomli/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 +pip/_vendor/tomli/__init__.py,sha256=qzEGl8QHhqgQPCuLzfKyPIuH3KKPspf-UVPbZ0ppBD4,314 +pip/_vendor/tomli/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/tomli/__pycache__/_parser.cpython-314.pyc,, +pip/_vendor/tomli/__pycache__/_re.cpython-314.pyc,, +pip/_vendor/tomli/__pycache__/_types.cpython-314.pyc,, +pip/_vendor/tomli/_parser.py,sha256=bO8tUYmnyA2K6m4TnbQbfUqmIFcDv7mG1KuC9gqRVmA,25778 +pip/_vendor/tomli/_re.py,sha256=n8-Io8ZK1U-F6jzlg7Pabc40hLFJsawE2uNLKH9w7iU,3235 +pip/_vendor/tomli/_types.py,sha256=-GTG2VUqkpxwMqzmVO4F7ybKddIbAnuAHXfmWQcTi3Q,254 +pip/_vendor/tomli/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 +pip/_vendor/tomli_w/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 +pip/_vendor/tomli_w/__init__.py,sha256=0F8yDtXx3Uunhm874KrAcP76srsM98y7WyHQwCulZbo,169 +pip/_vendor/tomli_w/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/tomli_w/__pycache__/_writer.cpython-314.pyc,, +pip/_vendor/tomli_w/_writer.py,sha256=dsifFS2xYf1i76mmRyfz9y125xC7Z_HQ845ZKhJsYXs,6961 +pip/_vendor/tomli_w/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 +pip/_vendor/truststore/LICENSE,sha256=M757fo-k_Rmxdg4ajtimaL2rhSyRtpLdQUJLy3Jan8o,1086 +pip/_vendor/truststore/__init__.py,sha256=Bu7kqkmpunhLsj5xCu8gT_25ktoPXcSnwe8VHk1GmJo,1320 +pip/_vendor/truststore/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/truststore/__pycache__/_api.cpython-314.pyc,, +pip/_vendor/truststore/__pycache__/_macos.cpython-314.pyc,, +pip/_vendor/truststore/__pycache__/_openssl.cpython-314.pyc,, +pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-314.pyc,, +pip/_vendor/truststore/__pycache__/_windows.cpython-314.pyc,, +pip/_vendor/truststore/_api.py,sha256=CYJCV5BTfttZYfqY3movdMBE-8az7uhET_LYbKT2Nn4,11413 +pip/_vendor/truststore/_macos.py,sha256=nZlLkOmszUE0g6ryRwBVGY5COzPyudcsiJtDWarM5LQ,20503 +pip/_vendor/truststore/_openssl.py,sha256=zB-SQvJydks7tQ0yIwrP6GD3fQNSSaPiq7zw4yF5T40,2412 +pip/_vendor/truststore/_ssl_constants.py,sha256=NUD4fVKdSD02ri7-db0tnO0VqLP9aHuzmStcW7tAl08,1130 +pip/_vendor/truststore/_windows.py,sha256=rAHyKYD8M7t-bXfG8VgOVa3TpfhVhbt4rZQlO45YuP8,17993 +pip/_vendor/truststore/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/LICENSE.txt,sha256=w3vxhuJ8-dvpYZ5V7f486nswCRzrPaY8fay-Dm13kHs,1115 +pip/_vendor/urllib3/__init__.py,sha256=iXLcYiJySn0GNbWOOZDDApgBL1JgP44EZ8i1760S8Mc,3333 +pip/_vendor/urllib3/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/_collections.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/_version.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/connection.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/connectionpool.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/exceptions.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/fields.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/filepost.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/poolmanager.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/request.cpython-314.pyc,, +pip/_vendor/urllib3/__pycache__/response.cpython-314.pyc,, +pip/_vendor/urllib3/_collections.py,sha256=pyASJJhW7wdOpqJj9QJA8FyGRfr8E8uUUhqUvhF0728,11372 +pip/_vendor/urllib3/_version.py,sha256=t9wGB6ooOTXXgiY66K1m6BZS1CJyXHAU8EoWDTe6Shk,64 +pip/_vendor/urllib3/connection.py,sha256=ttIA909BrbTUzwkqEe_TzZVh4JOOj7g61Ysei2mrwGg,20314 +pip/_vendor/urllib3/connectionpool.py,sha256=e2eiAwNbFNCKxj4bwDKNK-w7HIdSz3OmMxU_TIt-evQ,40408 +pip/_vendor/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957 +pip/_vendor/urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-314.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/bindings.py,sha256=4Xk64qIkPBt09A5q-RIFUuDhNc9mXilVapm7WnYnzRw,17632 +pip/_vendor/urllib3/contrib/_securetransport/low_level.py,sha256=B2JBB2_NRP02xK6DCa1Pa9IuxrPwxzDzZbixQkb7U9M,13922 +pip/_vendor/urllib3/contrib/appengine.py,sha256=VR68eAVE137lxTgjBDwCna5UiBZTOKa01Aj_-5BaCz4,11036 +pip/_vendor/urllib3/contrib/ntlmpool.py,sha256=NlfkW7WMdW8ziqudopjHoW299og1BTWi0IeIibquFwk,4528 +pip/_vendor/urllib3/contrib/pyopenssl.py,sha256=hDJh4MhyY_p-oKlFcYcQaVQRDv6GMmBGuW9yjxyeejM,17081 +pip/_vendor/urllib3/contrib/securetransport.py,sha256=Fef1IIUUFHqpevzXiDPbIGkDKchY2FVKeVeLGR1Qq3g,34446 +pip/_vendor/urllib3/contrib/socks.py,sha256=aRi9eWXo9ZEb95XUxef4Z21CFlnnjbEiAo9HOseoMt4,7097 +pip/_vendor/urllib3/exceptions.py,sha256=0Mnno3KHTNfXRfY7638NufOPkUb6mXOm-Lqj-4x2w8A,8217 +pip/_vendor/urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579 +pip/_vendor/urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440 +pip/_vendor/urllib3/packages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/urllib3/packages/__pycache__/six.cpython-314.pyc,, +pip/_vendor/urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-314.pyc,, +pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-314.pyc,, +pip/_vendor/urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417 +pip/_vendor/urllib3/packages/backports/weakref_finalize.py,sha256=tRCal5OAhNSRyb0DhHp-38AtIlCsRP8BxF3NX-6rqIA,5343 +pip/_vendor/urllib3/packages/six.py,sha256=b9LM0wBXv7E7SrbCjAm4wwN-hrH-iNxv18LgWNMMKPo,34665 +pip/_vendor/urllib3/poolmanager.py,sha256=aWyhXRtNO4JUnCSVVqKTKQd8EXTvUm1VN9pgs2bcONo,19990 +pip/_vendor/urllib3/request.py,sha256=YTWFNr7QIwh7E1W9dde9LM77v2VWTJ5V78XuTTw7D1A,6691 +pip/_vendor/urllib3/response.py,sha256=fmDJAFkG71uFTn-sVSTh2Iw0WmcXQYqkbRjihvwBjU8,30641 +pip/_vendor/urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155 +pip/_vendor/urllib3/util/__pycache__/__init__.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/connection.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/proxy.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/queue.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/request.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/response.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/retry.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/timeout.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/url.cpython-314.pyc,, +pip/_vendor/urllib3/util/__pycache__/wait.cpython-314.pyc,, +pip/_vendor/urllib3/util/connection.py,sha256=5Lx2B1PW29KxBn2T0xkN1CBgRBa3gGVJBKoQoRogEVk,4901 +pip/_vendor/urllib3/util/proxy.py,sha256=zUvPPCJrp6dOF0N4GAVbOcl6o-4uXKSrGiTkkr5vUS4,1605 +pip/_vendor/urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498 +pip/_vendor/urllib3/util/request.py,sha256=C0OUt2tcU6LRiQJ7YYNP9GvPrSvl7ziIBekQ-5nlBZk,3997 +pip/_vendor/urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510 +pip/_vendor/urllib3/util/retry.py,sha256=6ENvOZ8PBDzh8kgixpql9lIrb2dxH-k7ZmBanJF2Ng4,22050 +pip/_vendor/urllib3/util/ssl_.py,sha256=QDuuTxPSCj1rYtZ4xpD7Ux-r20TD50aHyqKyhQ7Bq4A,17460 +pip/_vendor/urllib3/util/ssl_match_hostname.py,sha256=Ir4cZVEjmAk8gUAIHWSi7wtOO83UCYABY2xFD1Ql_WA,5758 +pip/_vendor/urllib3/util/ssltransport.py,sha256=NA-u5rMTrDFDFC8QzRKUEKMG0561hOD4qBTr3Z4pv6E,6895 +pip/_vendor/urllib3/util/timeout.py,sha256=cwq4dMk87mJHSBktK1miYJ-85G-3T3RmT20v7SFCpno,10168 +pip/_vendor/urllib3/util/url.py,sha256=lCAE7M5myA8EDdW0sJuyyZhVB9K_j38ljWhHAnFaWoE,14296 +pip/_vendor/urllib3/util/wait.py,sha256=fOX0_faozG2P7iVojQoE1mbydweNyTcm-hXEfFrTtLI,5403 +pip/_vendor/vendor.txt,sha256=f2msFLZ-chXWIZSKW31NLGyMWmt_-Vfy7sY5dHYgmnw,342 +pip/py.typed,sha256=EBVvvPRTn_eIpz5e5QztSCdrMX7Qwd7VP93RSoIlZ2I,286 diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/REQUESTED b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/REQUESTED similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/REQUESTED rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/REQUESTED diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/WHEEL b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/WHEEL similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/WHEEL rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/WHEEL diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/entry_points.txt b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/entry_points.txt similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/entry_points.txt rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/entry_points.txt diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/AUTHORS.txt b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/AUTHORS.txt similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/AUTHORS.txt rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/AUTHORS.txt diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/LICENSE.txt b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/LICENSE.txt similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/LICENSE.txt rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/LICENSE.txt diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/cachecontrol/LICENSE.txt diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/certifi/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/certifi/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/certifi/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/certifi/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/dependency_groups/LICENSE.txt diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distlib/LICENSE.txt diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distro/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distro/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distro/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/distro/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/idna/LICENSE.md diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/msgpack/COPYING b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/msgpack/COPYING similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/msgpack/COPYING rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/msgpack/COPYING diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.APACHE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/packaging/LICENSE.BSD diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pkg_resources/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/platformdirs/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pygments/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pygments/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pygments/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pygments/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/pyproject_hooks/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/requests/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/requests/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/requests/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/requests/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/resolvelib/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/rich/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/rich/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/rich/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/rich/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/tomli_w/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/truststore/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/truststore/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/truststore/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/truststore/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt b/Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt rename to Python314_4_x86_Template/Lib/site-packages/pip-26.0.1.dist-info/licenses/src/pip/_vendor/urllib3/LICENSE.txt diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/__init__.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/__main__.py b/Python314_4_x86_Template/Lib/site-packages/pip/__main__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/__main__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/__main__.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/__pip-runner__.py b/Python314_4_x86_Template/Lib/site-packages/pip/__pip-runner__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/__pip-runner__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/__pip-runner__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..20f065e7 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/__pycache__/__main__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/__pycache__/__main__.cpython-314.pyc new file mode 100644 index 00000000..37ed3309 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/__pycache__/__main__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/__pycache__/__pip-runner__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/__pycache__/__pip-runner__.cpython-314.pyc new file mode 100644 index 00000000..515e4a26 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/__pycache__/__pip-runner__.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..6faecc69 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-314.pyc new file mode 100644 index 00000000..4a3291d4 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-314.pyc new file mode 100644 index 00000000..2f5a64a9 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-314.pyc new file mode 100644 index 00000000..a89d6b17 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-314.pyc new file mode 100644 index 00000000..ecbbed3a Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/main.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/main.cpython-314.pyc new file mode 100644 index 00000000..02b96096 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/main.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-314.pyc new file mode 100644 index 00000000..0b1f5a67 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-314.pyc new file mode 100644 index 00000000..effc141e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-314.pyc new file mode 100644 index 00000000..1dc00e06 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/build_env.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/build_env.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/build_env.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/build_env.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cache.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cache.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/cache.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/cache.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..3165c8c8 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-314.pyc new file mode 100644 index 00000000..2b248ae3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-314.pyc new file mode 100644 index 00000000..3f459cd5 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-314.pyc new file mode 100644 index 00000000..be036591 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-314.pyc new file mode 100644 index 00000000..f4150e37 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-314.pyc new file mode 100644 index 00000000..18710367 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-314.pyc new file mode 100644 index 00000000..30188d55 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-314.pyc new file mode 100644 index 00000000..66295cae Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-314.pyc new file mode 100644 index 00000000..0fc44b6c Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-314.pyc new file mode 100644 index 00000000..f33cc9f6 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-314.pyc new file mode 100644 index 00000000..efc406f0 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-314.pyc new file mode 100644 index 00000000..3fdd0101 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-314.pyc new file mode 100644 index 00000000..77ac2dd5 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/autocompletion.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/autocompletion.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/autocompletion.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/autocompletion.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/base_command.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/base_command.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/base_command.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/base_command.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/cmdoptions.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/cmdoptions.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/cmdoptions.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/cmdoptions.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/command_context.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/command_context.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/command_context.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/command_context.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/index_command.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/index_command.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/index_command.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/index_command.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/main.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/main.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/main.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/main.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/main_parser.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/main_parser.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/main_parser.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/main_parser.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/parser.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/parser.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/parser.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/parser.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/progress_bars.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/progress_bars.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/progress_bars.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/progress_bars.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/req_command.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/req_command.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/req_command.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/req_command.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/spinners.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/spinners.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/spinners.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/spinners.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/status_codes.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/status_codes.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/cli/status_codes.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/cli/status_codes.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..728ad9bc Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-314.pyc new file mode 100644 index 00000000..bca3ab68 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-314.pyc new file mode 100644 index 00000000..fe8cd81c Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-314.pyc new file mode 100644 index 00000000..72d4ec2d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-314.pyc new file mode 100644 index 00000000..15dd0674 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-314.pyc new file mode 100644 index 00000000..8a6c405b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-314.pyc new file mode 100644 index 00000000..53c8b46b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-314.pyc new file mode 100644 index 00000000..34d4c384 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-314.pyc new file mode 100644 index 00000000..02527f80 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-314.pyc new file mode 100644 index 00000000..4feaa8e1 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/index.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/index.cpython-314.pyc new file mode 100644 index 00000000..e0a73d50 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/index.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-314.pyc new file mode 100644 index 00000000..39401311 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/inspect.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-314.pyc new file mode 100644 index 00000000..c23e94ee Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-314.pyc new file mode 100644 index 00000000..2bf6c697 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/lock.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/lock.cpython-314.pyc new file mode 100644 index 00000000..84c115f6 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/lock.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-314.pyc new file mode 100644 index 00000000..145c1d8f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-314.pyc new file mode 100644 index 00000000..188fbb9b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-314.pyc new file mode 100644 index 00000000..d8d07e12 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-314.pyc new file mode 100644 index 00000000..07ed6078 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/cache.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/cache.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/cache.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/cache.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/check.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/check.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/check.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/check.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/completion.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/completion.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/completion.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/completion.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/configuration.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/configuration.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/configuration.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/configuration.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/debug.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/debug.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/debug.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/debug.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/download.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/download.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/download.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/download.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/freeze.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/freeze.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/freeze.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/freeze.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/hash.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/hash.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/hash.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/hash.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/help.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/help.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/help.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/help.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/index.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/index.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/index.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/index.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/inspect.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/inspect.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/inspect.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/inspect.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/install.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/install.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/install.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/install.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/list.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/list.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/list.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/list.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/lock.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/lock.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/lock.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/lock.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/search.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/search.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/search.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/search.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/show.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/show.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/show.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/show.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/uninstall.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/uninstall.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/uninstall.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/uninstall.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/wheel.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/wheel.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/commands/wheel.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/commands/wheel.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/configuration.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/configuration.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/configuration.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/configuration.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..8c46711a Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-314.pyc new file mode 100644 index 00000000..d0663618 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-314.pyc new file mode 100644 index 00000000..9f6a328d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-314.pyc new file mode 100644 index 00000000..4df7cc8b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-314.pyc new file mode 100644 index 00000000..5e64f22d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/base.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/base.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/base.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/base.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/installed.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/installed.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/installed.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/installed.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/sdist.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/sdist.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/sdist.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/sdist.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/wheel.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/wheel.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/distributions/wheel.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/distributions/wheel.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/exceptions.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/exceptions.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/exceptions.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/exceptions.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..3ac9f801 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-314.pyc new file mode 100644 index 00000000..0cb6ff02 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-314.pyc new file mode 100644 index 00000000..146091c6 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-314.pyc new file mode 100644 index 00000000..4a2cef0e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/collector.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/collector.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/collector.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/collector.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/package_finder.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/package_finder.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/package_finder.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/package_finder.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/sources.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/sources.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/index/sources.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/index/sources.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..baebd6ed Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-314.pyc new file mode 100644 index 00000000..6abe3af0 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-314.pyc new file mode 100644 index 00000000..071789ee Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-314.pyc new file mode 100644 index 00000000..1cd71d4a Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/_distutils.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/_distutils.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/_distutils.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/_distutils.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/_sysconfig.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/_sysconfig.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/_sysconfig.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/_sysconfig.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/base.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/base.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/locations/base.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/locations/base.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/main.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/main.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/main.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/main.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..9b56489d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-314.pyc new file mode 100644 index 00000000..08557a66 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/_json.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-314.pyc new file mode 100644 index 00000000..99fa9f68 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-314.pyc new file mode 100644 index 00000000..5825ec22 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/_json.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/_json.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/_json.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/_json.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/base.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/base.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/base.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/base.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..277bbfe9 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-314.pyc new file mode 100644 index 00000000..cba28461 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_compat.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-314.pyc new file mode 100644 index 00000000..ec9f5d88 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_dists.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-314.pyc new file mode 100644 index 00000000..755f10d6 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/__pycache__/_envs.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/_compat.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/_compat.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/_compat.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/_compat.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/pkg_resources.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/pkg_resources.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/metadata/pkg_resources.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/metadata/pkg_resources.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..28e2ddec Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-314.pyc new file mode 100644 index 00000000..3e308cc5 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-314.pyc new file mode 100644 index 00000000..86d02094 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-314.pyc new file mode 100644 index 00000000..bffba57c Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-314.pyc new file mode 100644 index 00000000..350d461a Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-314.pyc new file mode 100644 index 00000000..71a21c1d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-314.pyc new file mode 100644 index 00000000..dac18010 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/release_control.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/release_control.cpython-314.pyc new file mode 100644 index 00000000..ba569053 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/release_control.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-314.pyc new file mode 100644 index 00000000..4a83fc5e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-314.pyc new file mode 100644 index 00000000..e286de18 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-314.pyc new file mode 100644 index 00000000..f26121b5 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-314.pyc new file mode 100644 index 00000000..b5484bbb Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-314.pyc new file mode 100644 index 00000000..a6f9e909 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/candidate.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/candidate.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/candidate.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/candidate.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/direct_url.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/direct_url.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/direct_url.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/direct_url.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/format_control.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/format_control.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/format_control.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/format_control.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/index.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/index.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/index.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/index.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/installation_report.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/installation_report.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/installation_report.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/installation_report.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/link.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/link.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/link.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/link.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/release_control.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/release_control.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/release_control.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/release_control.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/scheme.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/scheme.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/scheme.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/scheme.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/search_scope.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/search_scope.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/search_scope.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/search_scope.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/selection_prefs.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/selection_prefs.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/selection_prefs.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/selection_prefs.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/target_python.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/target_python.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/target_python.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/target_python.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/wheel.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/wheel.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/models/wheel.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/models/wheel.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..7b326cf4 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-314.pyc new file mode 100644 index 00000000..e7f3bfc6 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-314.pyc new file mode 100644 index 00000000..b9752d24 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-314.pyc new file mode 100644 index 00000000..d25ec2ce Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-314.pyc new file mode 100644 index 00000000..a9a614db Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-314.pyc new file mode 100644 index 00000000..0bb67c58 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-314.pyc new file mode 100644 index 00000000..3780e432 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-314.pyc new file mode 100644 index 00000000..571c9be1 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/auth.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/auth.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/auth.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/auth.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/cache.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/cache.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/cache.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/cache.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/download.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/download.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/download.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/download.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/lazy_wheel.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/lazy_wheel.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/lazy_wheel.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/lazy_wheel.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/session.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/session.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/session.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/session.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/utils.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/utils.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/utils.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/utils.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/xmlrpc.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/xmlrpc.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/network/xmlrpc.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/network/xmlrpc.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..304b578e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-314.pyc new file mode 100644 index 00000000..37929e18 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-314.pyc new file mode 100644 index 00000000..db9cd14f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-314.pyc new file mode 100644 index 00000000..6fb8608b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..d4eae62e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-314.pyc new file mode 100644 index 00000000..d6f4282a Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-314.pyc new file mode 100644 index 00000000..0bc88950 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-314.pyc new file mode 100644 index 00000000..b488cef0 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_editable.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-314.pyc new file mode 100644 index 00000000..a08332ef Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-314.pyc new file mode 100644 index 00000000..87173629 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/build_tracker.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/build_tracker.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/build_tracker.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/build_tracker.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/metadata.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/metadata.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/metadata.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/metadata.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/metadata_editable.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/metadata_editable.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/metadata_editable.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/metadata_editable.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/wheel.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/wheel.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/wheel.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/wheel.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/wheel_editable.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/wheel_editable.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/build/wheel_editable.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/build/wheel_editable.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/check.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/check.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/check.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/check.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/freeze.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/freeze.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/freeze.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/freeze.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/install/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/install/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/install/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/install/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..1a157446 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-314.pyc new file mode 100644 index 00000000..bc38d4df Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/install/wheel.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/install/wheel.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/install/wheel.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/install/wheel.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/prepare.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/prepare.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/operations/prepare.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/operations/prepare.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/pyproject.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/pyproject.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/pyproject.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/pyproject.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..41cd0839 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-314.pyc new file mode 100644 index 00000000..20d3e104 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/pep723.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/pep723.cpython-314.pyc new file mode 100644 index 00000000..7c0b7326 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/pep723.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-314.pyc new file mode 100644 index 00000000..25934d97 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_dependency_group.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-314.pyc new file mode 100644 index 00000000..ac601913 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-314.pyc new file mode 100644 index 00000000..e61d189e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-314.pyc new file mode 100644 index 00000000..2879526a Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-314.pyc new file mode 100644 index 00000000..126a05b3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/constructors.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/constructors.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/constructors.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/constructors.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/pep723.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/pep723.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/pep723.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/pep723.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/req_dependency_group.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/req_dependency_group.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/req_dependency_group.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/req_dependency_group.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/req_file.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/req_file.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/req_file.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/req_file.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/req_install.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/req_install.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/req_install.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/req_install.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/req_set.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/req_set.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/req_set.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/req_set.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/req_uninstall.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/req_uninstall.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/req/req_uninstall.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/req/req_uninstall.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..912b9742 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-314.pyc new file mode 100644 index 00000000..ed87a1fc Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/base.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/base.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/base.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/base.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..6a6f04fb Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-314.pyc new file mode 100644 index 00000000..2a009d44 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..dd1a32b3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-314.pyc new file mode 100644 index 00000000..4f949f9f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-314.pyc new file mode 100644 index 00000000..00178a8f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-314.pyc new file mode 100644 index 00000000..274effa9 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-314.pyc new file mode 100644 index 00000000..c1baa655 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-314.pyc new file mode 100644 index 00000000..bdf93885 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-314.pyc new file mode 100644 index 00000000..842d69ab Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-314.pyc new file mode 100644 index 00000000..b482959f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-314.pyc new file mode 100644 index 00000000..09ebe594 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/provider.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/provider.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/provider.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/provider.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/reporter.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/reporter.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/reporter.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/reporter.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/self_outdated_check.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/self_outdated_check.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/self_outdated_check.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/self_outdated_check.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..051369e4 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-314.pyc new file mode 100644 index 00000000..61e2e805 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_log.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_log.cpython-314.pyc new file mode 100644 index 00000000..cd283a14 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/_log.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-314.pyc new file mode 100644 index 00000000..f5189294 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-314.pyc new file mode 100644 index 00000000..cf1a98d8 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-314.pyc new file mode 100644 index 00000000..1caa49b4 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-314.pyc new file mode 100644 index 00000000..d7a76133 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-314.pyc new file mode 100644 index 00000000..870722b7 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-314.pyc new file mode 100644 index 00000000..3bb0ecac Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-314.pyc new file mode 100644 index 00000000..d14b2e0a Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-314.pyc new file mode 100644 index 00000000..a34da882 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-314.pyc new file mode 100644 index 00000000..73c6b76f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-314.pyc new file mode 100644 index 00000000..3b167c4c Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-314.pyc new file mode 100644 index 00000000..209ed8c6 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-314.pyc new file mode 100644 index 00000000..dd168803 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-314.pyc new file mode 100644 index 00000000..8b148f81 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-314.pyc new file mode 100644 index 00000000..a7bb9c23 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-314.pyc new file mode 100644 index 00000000..247ffb2d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/pylock.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/pylock.cpython-314.pyc new file mode 100644 index 00000000..d4ec13b4 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/pylock.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/retry.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/retry.cpython-314.pyc new file mode 100644 index 00000000..e5c5ec67 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/retry.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-314.pyc new file mode 100644 index 00000000..fdd740e8 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-314.pyc new file mode 100644 index 00000000..181f5f7b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-314.pyc new file mode 100644 index 00000000..489f8cb5 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-314.pyc new file mode 100644 index 00000000..3083c0a8 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-314.pyc new file mode 100644 index 00000000..57f6afef Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-314.pyc new file mode 100644 index 00000000..e1f47dca Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/_jaraco_text.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/_jaraco_text.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/_jaraco_text.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/_jaraco_text.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/_log.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/_log.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/_log.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/_log.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/appdirs.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/appdirs.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/appdirs.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/appdirs.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/compat.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/compat.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/compat.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/compat.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/compatibility_tags.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/compatibility_tags.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/compatibility_tags.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/compatibility_tags.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/datetime.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/datetime.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/datetime.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/datetime.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/deprecation.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/deprecation.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/deprecation.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/deprecation.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/direct_url_helpers.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/direct_url_helpers.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/direct_url_helpers.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/direct_url_helpers.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/egg_link.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/egg_link.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/egg_link.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/egg_link.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/entrypoints.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/entrypoints.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/entrypoints.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/entrypoints.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/filesystem.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/filesystem.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/filesystem.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/filesystem.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/filetypes.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/filetypes.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/filetypes.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/filetypes.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/glibc.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/glibc.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/glibc.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/glibc.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/hashes.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/hashes.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/hashes.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/hashes.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/logging.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/logging.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/logging.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/logging.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/misc.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/misc.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/misc.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/misc.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/packaging.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/packaging.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/packaging.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/packaging.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/pylock.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/pylock.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/pylock.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/pylock.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/retry.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/retry.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/retry.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/retry.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/subprocess.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/subprocess.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/subprocess.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/subprocess.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/temp_dir.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/temp_dir.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/temp_dir.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/temp_dir.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/unpacking.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/unpacking.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/unpacking.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/unpacking.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/urls.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/urls.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/urls.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/urls.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/virtualenv.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/virtualenv.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/virtualenv.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/virtualenv.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/wheel.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/wheel.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/utils/wheel.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/utils/wheel.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..26d208bb Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-314.pyc new file mode 100644 index 00000000..2c2923a3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-314.pyc new file mode 100644 index 00000000..416b7bea Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-314.pyc new file mode 100644 index 00000000..5d978e83 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-314.pyc new file mode 100644 index 00000000..1c1cd602 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-314.pyc new file mode 100644 index 00000000..fe5216d2 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/bazaar.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/bazaar.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/bazaar.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/bazaar.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/git.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/git.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/git.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/git.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/mercurial.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/mercurial.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/mercurial.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/mercurial.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/subversion.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/subversion.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/subversion.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/subversion.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/versioncontrol.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/versioncontrol.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/vcs/versioncontrol.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/vcs/versioncontrol.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_internal/wheel_builder.py b/Python314_4_x86_Template/Lib/site-packages/pip/_internal/wheel_builder.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_internal/wheel_builder.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_internal/wheel_builder.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/README.rst b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/README.rst similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/README.rst rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/README.rst diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..2865e652 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/LICENSE.txt b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/LICENSE.txt similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/LICENSE.txt rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/LICENSE.txt diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..d0b994bc Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-314.pyc new file mode 100644 index 00000000..8da31faa Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-314.pyc new file mode 100644 index 00000000..c1e8262e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-314.pyc new file mode 100644 index 00000000..3edb44ad Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-314.pyc new file mode 100644 index 00000000..e7a40858 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-314.pyc new file mode 100644 index 00000000..10221e94 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-314.pyc new file mode 100644 index 00000000..d7634882 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-314.pyc new file mode 100644 index 00000000..d852ad48 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-314.pyc new file mode 100644 index 00000000..bdcde97b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/cache.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/cache.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/cache.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/cache.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..40be53a0 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-314.pyc new file mode 100644 index 00000000..65e47567 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-314.pyc new file mode 100644 index 00000000..f309d29b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/controller.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/controller.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/controller.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/controller.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/py.typed b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/py.typed similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/py.typed rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/py.typed diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/__init__.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/__main__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/__main__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/__main__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/__main__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..9c197a39 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-314.pyc new file mode 100644 index 00000000..c1738d64 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-314.pyc new file mode 100644 index 00000000..de52231d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/cacert.pem b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/cacert.pem similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/cacert.pem rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/cacert.pem diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/core.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/core.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/core.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/core.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/py.typed b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/py.typed similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/certifi/py.typed rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/certifi/py.typed diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/LICENSE.txt b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/LICENSE.txt similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/LICENSE.txt rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/LICENSE.txt diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__init__.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__main__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__main__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__main__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__main__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..6e82de5f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-314.pyc new file mode 100644 index 00000000..01dd3c82 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/__main__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-314.pyc new file mode 100644 index 00000000..1a6d93a0 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_implementation.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-314.pyc new file mode 100644 index 00000000..997eeecc Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_lint_dependency_groups.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-314.pyc new file mode 100644 index 00000000..7d52f0e6 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_pip_wrapper.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-314.pyc new file mode 100644 index 00000000..ff65f9d4 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/__pycache__/_toml_compat.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_implementation.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_implementation.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_implementation.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_implementation.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_lint_dependency_groups.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_lint_dependency_groups.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_lint_dependency_groups.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_lint_dependency_groups.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_pip_wrapper.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_pip_wrapper.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_pip_wrapper.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_pip_wrapper.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_toml_compat.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_toml_compat.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_toml_compat.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/_toml_compat.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/py.typed b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/py.typed similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/py.typed rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/dependency_groups/py.typed diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/LICENSE.txt b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/LICENSE.txt similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/LICENSE.txt rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/LICENSE.txt diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..112758a2 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-314.pyc new file mode 100644 index 00000000..566c396e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-314.pyc new file mode 100644 index 00000000..eae00659 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-314.pyc new file mode 100644 index 00000000..0555b6d1 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-314.pyc new file mode 100644 index 00000000..198748a1 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/compat.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/compat.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/compat.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/compat.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/resources.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/resources.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/resources.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/resources.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/scripts.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/scripts.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/scripts.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/scripts.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/t32.exe b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/t32.exe similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/t32.exe rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/t32.exe diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/t64-arm.exe b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/t64-arm.exe similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/t64-arm.exe rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/t64-arm.exe diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/t64.exe b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/t64.exe similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/t64.exe rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/t64.exe diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/util.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/util.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/util.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/util.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/w32.exe b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/w32.exe similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/w32.exe rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/w32.exe diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/w64-arm.exe b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/w64-arm.exe similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/w64-arm.exe rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/w64-arm.exe diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/w64.exe b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/w64.exe similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distlib/w64.exe rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distlib/w64.exe diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/__init__.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/__main__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/__main__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/__main__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/__main__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..297bc549 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-314.pyc new file mode 100644 index 00000000..88d20421 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/__main__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-314.pyc new file mode 100644 index 00000000..2b7e1def Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/__pycache__/distro.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/distro.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/distro.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/distro.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/distro.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/py.typed b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/py.typed similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/distro/py.typed rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/distro/py.typed diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/LICENSE.md b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/LICENSE.md similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/LICENSE.md rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/LICENSE.md diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..02cccb71 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-314.pyc new file mode 100644 index 00000000..01bf9bba Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-314.pyc new file mode 100644 index 00000000..0ddd0a6b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-314.pyc new file mode 100644 index 00000000..c99d8971 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-314.pyc new file mode 100644 index 00000000..bb233272 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-314.pyc new file mode 100644 index 00000000..bac5191e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-314.pyc new file mode 100644 index 00000000..cd49577f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-314.pyc new file mode 100644 index 00000000..44bcd9ef Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/codec.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/codec.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/codec.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/codec.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/compat.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/compat.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/compat.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/compat.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/core.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/core.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/core.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/core.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/idnadata.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/idnadata.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/idnadata.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/idnadata.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/intranges.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/intranges.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/intranges.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/intranges.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/package_data.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/package_data.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/package_data.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/package_data.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/py.typed b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/py.typed similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/py.typed rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/py.typed diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/uts46data.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/uts46data.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/idna/uts46data.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/idna/uts46data.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/COPYING b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/COPYING similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/COPYING rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/COPYING diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..f9d4cbbc Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-314.pyc new file mode 100644 index 00000000..d9e67907 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-314.pyc new file mode 100644 index 00000000..518d12f7 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-314.pyc new file mode 100644 index 00000000..b5b475fa Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/exceptions.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/exceptions.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/exceptions.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/exceptions.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/ext.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/ext.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/ext.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/ext.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/fallback.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/fallback.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/msgpack/fallback.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/msgpack/fallback.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.APACHE b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.APACHE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.APACHE rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.APACHE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.BSD b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.BSD similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.BSD rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/LICENSE.BSD diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..716e1ad3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-314.pyc new file mode 100644 index 00000000..b0e5c6ce Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_elffile.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-314.pyc new file mode 100644 index 00000000..40c93025 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_manylinux.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-314.pyc new file mode 100644 index 00000000..d5ff37a1 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_musllinux.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-314.pyc new file mode 100644 index 00000000..474eaa5b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_parser.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-314.pyc new file mode 100644 index 00000000..5f8aa33e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-314.pyc new file mode 100644 index 00000000..8eccc43f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/_tokenizer.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-314.pyc new file mode 100644 index 00000000..c16e76d0 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-314.pyc new file mode 100644 index 00000000..8f628603 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/metadata.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/pylock.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/pylock.cpython-314.pyc new file mode 100644 index 00000000..23bc3861 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/pylock.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-314.pyc new file mode 100644 index 00000000..c1d32ab6 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-314.pyc new file mode 100644 index 00000000..171e5a9b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-314.pyc new file mode 100644 index 00000000..d181260d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-314.pyc new file mode 100644 index 00000000..431f08ff Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-314.pyc new file mode 100644 index 00000000..b881d22b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/_elffile.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/_elffile.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/_elffile.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/_elffile.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/_manylinux.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/_manylinux.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/_manylinux.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/_manylinux.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/_musllinux.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/_musllinux.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/_musllinux.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/_musllinux.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/_parser.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/_parser.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/_parser.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/_parser.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/_structures.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/_structures.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/_structures.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/_structures.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/_tokenizer.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/_tokenizer.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/_tokenizer.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/_tokenizer.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..9acd61c2 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-314.pyc new file mode 100644 index 00000000..f36a84de Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/_spdx.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/_spdx.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/_spdx.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/licenses/_spdx.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/markers.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/markers.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/markers.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/markers.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/metadata.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/metadata.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/metadata.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/metadata.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/py.typed b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/py.typed similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/py.typed rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/py.typed diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/pylock.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/pylock.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/pylock.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/pylock.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/requirements.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/requirements.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/requirements.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/requirements.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/specifiers.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/specifiers.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/specifiers.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/specifiers.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/tags.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/tags.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/tags.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/tags.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/utils.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/utils.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/utils.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/utils.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/version.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/version.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/packaging/version.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/packaging/version.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pkg_resources/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pkg_resources/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pkg_resources/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pkg_resources/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..800c4409 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__init__.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__main__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__main__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__main__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__main__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..1136a33d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-314.pyc new file mode 100644 index 00000000..af6b172e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/__main__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-314.pyc new file mode 100644 index 00000000..bb874e9d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/android.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-314.pyc new file mode 100644 index 00000000..cafae26d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/api.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-314.pyc new file mode 100644 index 00000000..292f97d4 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/macos.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-314.pyc new file mode 100644 index 00000000..6cbabf7d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/unix.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-314.pyc new file mode 100644 index 00000000..f0fe2852 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/version.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-314.pyc new file mode 100644 index 00000000..a5c2aeb5 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/__pycache__/windows.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/android.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/android.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/android.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/android.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/api.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/api.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/api.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/api.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/macos.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/macos.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/macos.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/macos.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/py.typed b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/py.typed similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/py.typed rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/py.typed diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/unix.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/unix.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/unix.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/unix.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/version.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/version.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/version.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/version.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/windows.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/windows.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/windows.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/platformdirs/windows.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__init__.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__main__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__main__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/__main__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__main__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..9679759f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-314.pyc new file mode 100644 index 00000000..8ad3dd1c Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-314.pyc new file mode 100644 index 00000000..9b80814d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-314.pyc new file mode 100644 index 00000000..3f8978a3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-314.pyc new file mode 100644 index 00000000..8d18fee8 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-314.pyc new file mode 100644 index 00000000..2a0bdaf1 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-314.pyc new file mode 100644 index 00000000..031f5f4b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-314.pyc new file mode 100644 index 00000000..286e5c3f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-314.pyc new file mode 100644 index 00000000..cd4c3e92 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-314.pyc new file mode 100644 index 00000000..eea150e9 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-314.pyc new file mode 100644 index 00000000..81ae8558 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-314.pyc new file mode 100644 index 00000000..5de9b3c5 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-314.pyc new file mode 100644 index 00000000..d7c820b0 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-314.pyc new file mode 100644 index 00000000..2b245d7d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-314.pyc new file mode 100644 index 00000000..c8e9dcfe Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/console.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/console.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/console.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/console.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/filter.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/filter.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/filter.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/filter.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/filters/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/filters/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/filters/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/filters/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..95eddcee Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatter.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatter.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatter.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatter.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..8abf4874 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-314.pyc new file mode 100644 index 00000000..d2b37321 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/_mapping.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/_mapping.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/_mapping.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/formatters/_mapping.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexer.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexer.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexer.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexer.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..c5f7e62e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-314.pyc new file mode 100644 index 00000000..f93e234d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-314.pyc new file mode 100644 index 00000000..0cc6189e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/_mapping.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/_mapping.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/_mapping.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/_mapping.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/python.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/python.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/python.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/lexers/python.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/modeline.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/modeline.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/modeline.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/modeline.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/plugin.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/plugin.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/plugin.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/plugin.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/regexopt.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/regexopt.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/regexopt.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/regexopt.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/scanner.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/scanner.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/scanner.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/scanner.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/sphinxext.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/sphinxext.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/sphinxext.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/sphinxext.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/style.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/style.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/style.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/style.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..57d58e66 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-314.pyc new file mode 100644 index 00000000..697b9173 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/_mapping.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/_mapping.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/_mapping.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/styles/_mapping.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/token.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/token.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/token.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/token.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/unistring.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/unistring.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/unistring.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/unistring.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/util.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/util.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pygments/util.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pygments/util.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..2509a127 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-314.pyc new file mode 100644 index 00000000..848dfde8 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_impl.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_impl.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_impl.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_impl.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..72052c83 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-314.pyc new file mode 100644 index 00000000..1d2d70c0 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/py.typed b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/py.typed similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/py.typed rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/pyproject_hooks/py.typed diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..0e03b787 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-314.pyc new file mode 100644 index 00000000..c12d9242 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-314.pyc new file mode 100644 index 00000000..5cd9e958 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-314.pyc new file mode 100644 index 00000000..964dfd54 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-314.pyc new file mode 100644 index 00000000..efbe44d2 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-314.pyc new file mode 100644 index 00000000..7a6c2e95 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-314.pyc new file mode 100644 index 00000000..1904e304 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-314.pyc new file mode 100644 index 00000000..1784ae9c Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-314.pyc new file mode 100644 index 00000000..d44498b1 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-314.pyc new file mode 100644 index 00000000..1b37b7a4 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-314.pyc new file mode 100644 index 00000000..9957b01c Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-314.pyc new file mode 100644 index 00000000..c5a3754d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-314.pyc new file mode 100644 index 00000000..bf866712 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-314.pyc new file mode 100644 index 00000000..22700e6f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-314.pyc new file mode 100644 index 00000000..f628d977 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-314.pyc new file mode 100644 index 00000000..459bd976 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-314.pyc new file mode 100644 index 00000000..afe184a0 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-314.pyc new file mode 100644 index 00000000..a220e06b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__version__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__version__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/__version__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/__version__.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/_internal_utils.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/_internal_utils.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/_internal_utils.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/_internal_utils.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/adapters.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/adapters.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/adapters.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/adapters.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/api.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/api.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/api.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/api.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/auth.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/auth.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/auth.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/auth.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/certs.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/certs.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/certs.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/certs.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/compat.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/compat.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/compat.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/compat.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/cookies.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/cookies.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/cookies.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/cookies.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/exceptions.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/exceptions.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/exceptions.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/exceptions.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/help.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/help.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/help.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/help.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/hooks.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/hooks.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/hooks.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/hooks.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/models.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/models.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/models.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/models.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/packages.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/packages.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/packages.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/packages.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/sessions.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/sessions.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/sessions.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/sessions.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/status_codes.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/status_codes.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/status_codes.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/status_codes.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/structures.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/structures.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/structures.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/structures.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/utils.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/utils.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/requests/utils.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/requests/utils.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..195f60f3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-314.pyc new file mode 100644 index 00000000..3554ac00 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-314.pyc new file mode 100644 index 00000000..b9598e5a Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-314.pyc new file mode 100644 index 00000000..b94436c2 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/providers.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/providers.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/providers.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/providers.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/py.typed b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/py.typed similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/py.typed rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/py.typed diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/reporters.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/reporters.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/reporters.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/reporters.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..8ed66a98 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-314.pyc new file mode 100644 index 00000000..174e2713 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/abstract.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-314.pyc new file mode 100644 index 00000000..b4829bc9 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/criterion.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-314.pyc new file mode 100644 index 00000000..a1cb2a23 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/exceptions.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-314.pyc new file mode 100644 index 00000000..600b8663 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/__pycache__/resolution.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/abstract.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/abstract.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/abstract.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/abstract.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/criterion.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/criterion.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/criterion.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/criterion.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/exceptions.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/exceptions.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/exceptions.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/exceptions.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/resolution.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/resolution.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/resolution.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/resolvers/resolution.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/structs.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/structs.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/structs.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/resolvelib/structs.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__init__.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__main__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__main__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/__main__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__main__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..c4086724 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-314.pyc new file mode 100644 index 00000000..4a1a7a5f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-314.pyc new file mode 100644 index 00000000..533f0ff5 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-314.pyc new file mode 100644 index 00000000..7faaa945 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-314.pyc new file mode 100644 index 00000000..c674f31c Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-314.pyc new file mode 100644 index 00000000..83e6985d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-314.pyc new file mode 100644 index 00000000..d7f273d0 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-314.pyc new file mode 100644 index 00000000..1f0d13d7 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-314.pyc new file mode 100644 index 00000000..8b0a9cb7 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-314.pyc new file mode 100644 index 00000000..4785b894 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-314.pyc new file mode 100644 index 00000000..128b2bd0 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-314.pyc new file mode 100644 index 00000000..89fb2eaf Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-314.pyc new file mode 100644 index 00000000..fb566381 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-314.pyc new file mode 100644 index 00000000..565e4eca Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-314.pyc new file mode 100644 index 00000000..6577a620 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-314.pyc new file mode 100644 index 00000000..f275b5a9 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-314.pyc new file mode 100644 index 00000000..3bb8c5a3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-314.pyc new file mode 100644 index 00000000..e5d36abf Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-314.pyc new file mode 100644 index 00000000..bece4f9a Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-314.pyc new file mode 100644 index 00000000..acd27acd Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-314.pyc new file mode 100644 index 00000000..e9f2e774 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-314.pyc new file mode 100644 index 00000000..e77de5a8 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-314.pyc new file mode 100644 index 00000000..cc061c1e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/align.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/align.cpython-314.pyc new file mode 100644 index 00000000..0f0f5451 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/align.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-314.pyc new file mode 100644 index 00000000..496dc005 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-314.pyc new file mode 100644 index 00000000..b8bfcf66 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/box.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/box.cpython-314.pyc new file mode 100644 index 00000000..e64457f3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/box.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-314.pyc new file mode 100644 index 00000000..759e10c1 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color.cpython-314.pyc new file mode 100644 index 00000000..089bbcc5 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-314.pyc new file mode 100644 index 00000000..7df4f2fe Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/color_triplet.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-314.pyc new file mode 100644 index 00000000..6f425664 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/columns.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/console.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/console.cpython-314.pyc new file mode 100644 index 00000000..a764b592 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/console.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-314.pyc new file mode 100644 index 00000000..ab8c639f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/constrain.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-314.pyc new file mode 100644 index 00000000..1db804d3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/containers.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/control.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/control.cpython-314.pyc new file mode 100644 index 00000000..06d65aa1 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/control.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-314.pyc new file mode 100644 index 00000000..eb95744c Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/default_styles.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-314.pyc new file mode 100644 index 00000000..29408af0 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/diagnose.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-314.pyc new file mode 100644 index 00000000..43f7c090 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/emoji.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-314.pyc new file mode 100644 index 00000000..2d5f2655 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/errors.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-314.pyc new file mode 100644 index 00000000..ad169bde Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/file_proxy.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-314.pyc new file mode 100644 index 00000000..0b66bd82 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/filesize.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-314.pyc new file mode 100644 index 00000000..20bd6713 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/highlighter.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/json.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/json.cpython-314.pyc new file mode 100644 index 00000000..7d285bf8 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/json.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-314.pyc new file mode 100644 index 00000000..3f34b324 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/jupyter.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-314.pyc new file mode 100644 index 00000000..9a934d3f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/layout.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live.cpython-314.pyc new file mode 100644 index 00000000..8547c14d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-314.pyc new file mode 100644 index 00000000..1de6220b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/live_render.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-314.pyc new file mode 100644 index 00000000..3fcba560 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/logging.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-314.pyc new file mode 100644 index 00000000..9d235776 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/markup.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-314.pyc new file mode 100644 index 00000000..038dcd45 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/measure.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-314.pyc new file mode 100644 index 00000000..1972709d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/padding.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-314.pyc new file mode 100644 index 00000000..af4c18e9 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pager.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-314.pyc new file mode 100644 index 00000000..86da4d13 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/palette.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-314.pyc new file mode 100644 index 00000000..aadd31c5 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/panel.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-314.pyc new file mode 100644 index 00000000..8ac6d0a7 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/pretty.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-314.pyc new file mode 100644 index 00000000..09d09a67 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-314.pyc new file mode 100644 index 00000000..7fb30ef3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/progress_bar.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/prompt.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/prompt.cpython-314.pyc new file mode 100644 index 00000000..5f41d0c4 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/prompt.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-314.pyc new file mode 100644 index 00000000..c87d6a98 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/protocol.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/region.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/region.cpython-314.pyc new file mode 100644 index 00000000..879268b2 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/region.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-314.pyc new file mode 100644 index 00000000..158ef06b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/repr.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/rule.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/rule.cpython-314.pyc new file mode 100644 index 00000000..18577c49 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/rule.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-314.pyc new file mode 100644 index 00000000..cb9db11d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/scope.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-314.pyc new file mode 100644 index 00000000..36d03095 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/screen.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-314.pyc new file mode 100644 index 00000000..83e92d1e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/segment.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-314.pyc new file mode 100644 index 00000000..cb45d476 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/spinner.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/status.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/status.cpython-314.pyc new file mode 100644 index 00000000..1344788f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/status.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/style.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/style.cpython-314.pyc new file mode 100644 index 00000000..cd304f4b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/style.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-314.pyc new file mode 100644 index 00000000..e5cea4f8 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/styled.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-314.pyc new file mode 100644 index 00000000..381dab6b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/syntax.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/table.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/table.cpython-314.pyc new file mode 100644 index 00000000..51bc02bd Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/table.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-314.pyc new file mode 100644 index 00000000..11b29e6a Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/terminal_theme.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/text.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/text.cpython-314.pyc new file mode 100644 index 00000000..24595968 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/text.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-314.pyc new file mode 100644 index 00000000..e4ac89ce Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/theme.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-314.pyc new file mode 100644 index 00000000..f931bd3e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/themes.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-314.pyc new file mode 100644 index 00000000..1f1c3dab Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/traceback.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/tree.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/tree.cpython-314.pyc new file mode 100644 index 00000000..9aacab0e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/__pycache__/tree.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_cell_widths.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_cell_widths.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_cell_widths.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_cell_widths.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_emoji_codes.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_emoji_codes.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_emoji_codes.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_emoji_codes.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_emoji_replace.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_emoji_replace.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_emoji_replace.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_emoji_replace.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_export_format.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_export_format.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_export_format.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_export_format.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_extension.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_extension.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_extension.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_extension.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_fileno.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_fileno.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_fileno.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_fileno.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_inspect.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_inspect.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_inspect.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_inspect.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_log_render.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_log_render.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_log_render.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_log_render.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_loop.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_loop.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_loop.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_loop.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_null_file.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_null_file.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_null_file.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_null_file.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_palettes.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_palettes.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_palettes.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_palettes.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_pick.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_pick.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_pick.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_pick.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_ratio.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_ratio.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_ratio.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_ratio.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_spinners.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_spinners.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_spinners.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_spinners.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_stack.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_stack.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_stack.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_stack.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_timer.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_timer.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_timer.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_timer.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_win32_console.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_win32_console.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_win32_console.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_win32_console.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_windows.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_windows.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_windows.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_windows.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_windows_renderer.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_windows_renderer.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_windows_renderer.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_windows_renderer.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_wrap.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_wrap.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/_wrap.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/_wrap.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/abc.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/abc.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/abc.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/abc.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/align.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/align.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/align.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/align.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/ansi.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/ansi.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/ansi.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/ansi.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/bar.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/bar.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/bar.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/bar.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/box.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/box.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/box.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/box.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/cells.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/cells.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/cells.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/cells.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/color.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/color.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/color.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/color.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/color_triplet.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/color_triplet.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/color_triplet.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/color_triplet.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/columns.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/columns.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/columns.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/columns.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/console.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/console.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/console.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/console.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/constrain.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/constrain.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/constrain.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/constrain.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/containers.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/containers.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/containers.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/containers.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/control.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/control.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/control.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/control.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/default_styles.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/default_styles.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/default_styles.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/default_styles.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/diagnose.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/diagnose.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/diagnose.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/diagnose.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/emoji.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/emoji.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/emoji.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/emoji.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/errors.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/errors.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/errors.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/errors.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/file_proxy.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/file_proxy.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/file_proxy.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/file_proxy.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/filesize.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/filesize.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/filesize.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/filesize.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/highlighter.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/highlighter.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/highlighter.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/highlighter.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/json.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/json.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/json.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/json.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/jupyter.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/jupyter.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/jupyter.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/jupyter.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/layout.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/layout.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/layout.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/layout.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/live.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/live.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/live.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/live.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/live_render.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/live_render.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/live_render.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/live_render.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/logging.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/logging.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/logging.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/logging.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/markup.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/markup.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/markup.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/markup.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/measure.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/measure.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/measure.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/measure.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/padding.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/padding.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/padding.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/padding.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/pager.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/pager.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/pager.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/pager.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/palette.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/palette.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/palette.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/palette.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/panel.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/panel.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/panel.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/panel.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/pretty.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/pretty.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/pretty.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/pretty.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/progress.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/progress.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/progress.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/progress.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/progress_bar.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/progress_bar.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/progress_bar.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/progress_bar.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/prompt.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/prompt.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/prompt.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/prompt.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/protocol.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/protocol.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/protocol.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/protocol.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/py.typed b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/py.typed similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/py.typed rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/py.typed diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/region.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/region.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/region.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/region.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/repr.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/repr.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/repr.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/repr.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/rule.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/rule.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/rule.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/rule.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/scope.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/scope.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/scope.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/scope.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/screen.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/screen.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/screen.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/screen.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/segment.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/segment.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/segment.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/segment.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/spinner.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/spinner.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/spinner.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/spinner.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/status.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/status.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/status.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/status.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/style.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/style.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/style.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/style.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/styled.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/styled.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/styled.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/styled.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/syntax.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/syntax.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/syntax.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/syntax.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/table.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/table.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/table.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/table.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/terminal_theme.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/terminal_theme.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/terminal_theme.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/terminal_theme.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/text.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/text.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/text.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/text.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/theme.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/theme.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/theme.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/theme.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/themes.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/themes.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/themes.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/themes.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/traceback.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/traceback.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/traceback.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/traceback.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/tree.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/tree.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/rich/tree.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/rich/tree.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..35b419e9 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_parser.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_parser.cpython-314.pyc new file mode 100644 index 00000000..6a660b40 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_parser.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_re.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_re.cpython-314.pyc new file mode 100644 index 00000000..b3347d7c Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_re.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_types.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_types.cpython-314.pyc new file mode 100644 index 00000000..a6851261 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/__pycache__/_types.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/_parser.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/_parser.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/_parser.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/_parser.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/_re.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/_re.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/_re.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/_re.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/_types.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/_types.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/_types.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/_types.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/py.typed b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/py.typed similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli/py.typed rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli/py.typed diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..d7c89271 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/_writer.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/_writer.cpython-314.pyc new file mode 100644 index 00000000..c5dc409d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/__pycache__/_writer.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/_writer.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/_writer.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/_writer.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/_writer.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/py.typed b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/py.typed similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/py.typed rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/tomli_w/py.typed diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/LICENSE b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/LICENSE similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/LICENSE rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/LICENSE diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..3ccef896 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_api.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_api.cpython-314.pyc new file mode 100644 index 00000000..023f5142 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_api.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_macos.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_macos.cpython-314.pyc new file mode 100644 index 00000000..a23e81b9 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_macos.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_openssl.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_openssl.cpython-314.pyc new file mode 100644 index 00000000..22e280c2 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_openssl.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-314.pyc new file mode 100644 index 00000000..54e38586 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_windows.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_windows.cpython-314.pyc new file mode 100644 index 00000000..d6050408 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/__pycache__/_windows.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/_api.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/_api.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/_api.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/_api.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/_macos.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/_macos.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/_macos.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/_macos.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/_openssl.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/_openssl.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/_openssl.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/_openssl.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/_ssl_constants.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/_ssl_constants.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/_ssl_constants.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/_ssl_constants.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/_windows.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/_windows.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/_windows.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/_windows.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/py.typed b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/py.typed similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/truststore/py.typed rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/truststore/py.typed diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/LICENSE.txt b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/LICENSE.txt similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/LICENSE.txt rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/LICENSE.txt diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..d31954f8 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-314.pyc new file mode 100644 index 00000000..4d1f9524 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-314.pyc new file mode 100644 index 00000000..54a5be1e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-314.pyc new file mode 100644 index 00000000..5c93e21d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-314.pyc new file mode 100644 index 00000000..62215679 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-314.pyc new file mode 100644 index 00000000..337834c2 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-314.pyc new file mode 100644 index 00000000..73676fbd Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-314.pyc new file mode 100644 index 00000000..2d4729e0 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-314.pyc new file mode 100644 index 00000000..26fe9816 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-314.pyc new file mode 100644 index 00000000..4b09681b Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-314.pyc new file mode 100644 index 00000000..4abc3394 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/_collections.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/_collections.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/_collections.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/_collections.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/_version.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/_version.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/_version.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/_version.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/connection.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/connection.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/connection.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/connection.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py diff --git a/Python313_13_x86_Template/Lib/urllib/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/urllib/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..60ff1d45 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-314.pyc new file mode 100644 index 00000000..28cc5538 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-314.pyc new file mode 100644 index 00000000..f5b9cda4 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-314.pyc new file mode 100644 index 00000000..5e51379d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-314.pyc new file mode 100644 index 00000000..54454c96 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-314.pyc new file mode 100644 index 00000000..b73400d3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-314.pyc new file mode 100644 index 00000000..3a5ea6ce Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..09e8b11c Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-314.pyc new file mode 100644 index 00000000..fc5788b3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-314.pyc new file mode 100644 index 00000000..e8c3bb5c Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/exceptions.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/exceptions.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/exceptions.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/exceptions.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/fields.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/fields.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/fields.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/fields.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/filepost.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/filepost.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/filepost.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/filepost.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..02d1ded4 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-314.pyc new file mode 100644 index 00000000..bfd2bd59 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..e218c45d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-314.pyc new file mode 100644 index 00000000..53de2e4c Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-314.pyc new file mode 100644 index 00000000..e49ac381 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/weakref_finalize.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/weakref_finalize.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/weakref_finalize.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/backports/weakref_finalize.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/six.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/six.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/six.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/packages/six.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/request.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/request.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/request.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/request.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/response.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/response.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/response.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/response.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__init__.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__init__.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__init__.py diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..a19d9958 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-314.pyc new file mode 100644 index 00000000..f6da5a54 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-314.pyc new file mode 100644 index 00000000..be753227 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-314.pyc new file mode 100644 index 00000000..7153353c Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-314.pyc new file mode 100644 index 00000000..c68c6d7f Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-314.pyc new file mode 100644 index 00000000..33c378bf Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-314.pyc new file mode 100644 index 00000000..f4b41337 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-314.pyc new file mode 100644 index 00000000..19314c4d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-314.pyc new file mode 100644 index 00000000..e7977839 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-314.pyc new file mode 100644 index 00000000..05148f1e Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-314.pyc new file mode 100644 index 00000000..0b4b4f1d Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-314.pyc new file mode 100644 index 00000000..b9bcbeae Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-314.pyc b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-314.pyc new file mode 100644 index 00000000..b8682bb5 Binary files /dev/null and b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/connection.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/connection.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/connection.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/connection.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/queue.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/queue.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/queue.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/queue.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/request.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/request.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/request.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/request.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/response.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/response.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/response.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/response.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/retry.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/retry.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/retry.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/retry.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssltransport.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssltransport.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssltransport.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/ssltransport.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/url.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/url.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/url.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/url.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/wait.py b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/wait.py similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/wait.py rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/urllib3/util/wait.py diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/_vendor/vendor.txt b/Python314_4_x86_Template/Lib/site-packages/pip/_vendor/vendor.txt similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/_vendor/vendor.txt rename to Python314_4_x86_Template/Lib/site-packages/pip/_vendor/vendor.txt diff --git a/Python313_13_x86_Template/Lib/site-packages/pip/py.typed b/Python314_4_x86_Template/Lib/site-packages/pip/py.typed similarity index 100% rename from Python313_13_x86_Template/Lib/site-packages/pip/py.typed rename to Python314_4_x86_Template/Lib/site-packages/pip/py.typed diff --git a/Python314_4_x86_Template/Lib/site.py b/Python314_4_x86_Template/Lib/site.py new file mode 100644 index 00000000..aeb7c6cf --- /dev/null +++ b/Python314_4_x86_Template/Lib/site.py @@ -0,0 +1,779 @@ +"""Append module search paths for third-party packages to sys.path. + +**************************************************************** +* This module is automatically imported during initialization. * +**************************************************************** + +This will append site-specific paths to the module search path. On +Unix (including Mac OSX), it starts with sys.prefix and +sys.exec_prefix (if different) and appends +lib/python/site-packages. +On other platforms (such as Windows), it tries each of the +prefixes directly, as well as with lib/site-packages appended. The +resulting directories, if they exist, are appended to sys.path, and +also inspected for path configuration files. + +If a file named "pyvenv.cfg" exists one directory above sys.executable, +sys.prefix and sys.exec_prefix are set to that directory and +it is also checked for site-packages (sys.base_prefix and +sys.base_exec_prefix will always be the "real" prefixes of the Python +installation). If "pyvenv.cfg" (a bootstrap configuration file) contains +the key "include-system-site-packages" set to anything other than "false" +(case-insensitive), the system-level prefixes will still also be +searched for site-packages; otherwise they won't. + +All of the resulting site-specific directories, if they exist, are +appended to sys.path, and also inspected for path configuration +files. + +A path configuration file is a file whose name has the form +.pth; its contents are additional directories (one per line) +to be added to sys.path. Non-existing directories (or +non-directories) are never added to sys.path; no directory is added to +sys.path more than once. Blank lines and lines beginning with +'#' are skipped. Lines starting with 'import' are executed. + +For example, suppose sys.prefix and sys.exec_prefix are set to +/usr/local and there is a directory /usr/local/lib/python2.5/site-packages +with three subdirectories, foo, bar and spam, and two path +configuration files, foo.pth and bar.pth. Assume foo.pth contains the +following: + + # foo package configuration + foo + bar + bletch + +and bar.pth contains: + + # bar package configuration + bar + +Then the following directories are added to sys.path, in this order: + + /usr/local/lib/python2.5/site-packages/bar + /usr/local/lib/python2.5/site-packages/foo + +Note that bletch is omitted because it doesn't exist; bar precedes foo +because bar.pth comes alphabetically before foo.pth; and spam is +omitted because it is not mentioned in either path configuration file. + +The readline module is also automatically configured to enable +completion for systems that support it. This can be overridden in +sitecustomize, usercustomize or PYTHONSTARTUP. Starting Python in +isolated mode (-I) disables automatic readline configuration. + +After these operations, an attempt is made to import a module +named sitecustomize, which can perform arbitrary additional +site-specific customizations. If this import fails with an +ImportError exception, it is silently ignored. +""" + +import sys +import os +import builtins +import _sitebuiltins +import _io as io +import stat +import errno + +# Prefixes for site-packages; add additional prefixes like /usr/local here +PREFIXES = [sys.prefix, sys.exec_prefix] +# Enable per user site-packages directory +# set it to False to disable the feature or True to force the feature +ENABLE_USER_SITE = None + +# for distutils.commands.install +# These values are initialized by the getuserbase() and getusersitepackages() +# functions, through the main() function when Python starts. +USER_SITE = None +USER_BASE = None + + +def _trace(message): + if sys.flags.verbose: + print(message, file=sys.stderr) + + +def _warn(*args, **kwargs): + import warnings + + warnings.warn(*args, **kwargs) + + +def makepath(*paths): + dir = os.path.join(*paths) + try: + dir = os.path.abspath(dir) + except OSError: + pass + return dir, os.path.normcase(dir) + + +def abs_paths(): + """Set all module __file__ and __cached__ attributes to an absolute path""" + for m in set(sys.modules.values()): + loader_module = None + try: + loader_module = m.__loader__.__module__ + except AttributeError: + try: + loader_module = m.__spec__.loader.__module__ + except AttributeError: + pass + if loader_module not in {'_frozen_importlib', '_frozen_importlib_external'}: + continue # don't mess with a PEP 302-supplied __file__ + try: + m.__file__ = os.path.abspath(m.__file__) + except (AttributeError, OSError, TypeError): + pass + try: + m.__cached__ = os.path.abspath(m.__cached__) + except (AttributeError, OSError, TypeError): + pass + + +def removeduppaths(): + """ Remove duplicate entries from sys.path along with making them + absolute""" + # This ensures that the initial path provided by the interpreter contains + # only absolute pathnames, even if we're running from the build directory. + L = [] + known_paths = set() + for dir in sys.path: + # Filter out duplicate paths (on case-insensitive file systems also + # if they only differ in case); turn relative paths into absolute + # paths. + dir, dircase = makepath(dir) + if dircase not in known_paths: + L.append(dir) + known_paths.add(dircase) + sys.path[:] = L + return known_paths + + +def _init_pathinfo(): + """Return a set containing all existing file system items from sys.path.""" + d = set() + for item in sys.path: + try: + if os.path.exists(item): + _, itemcase = makepath(item) + d.add(itemcase) + except TypeError: + continue + return d + + +def addpackage(sitedir, name, known_paths): + """Process a .pth file within the site-packages directory: + For each line in the file, either combine it with sitedir to a path + and add that to known_paths, or execute it if it starts with 'import '. + """ + if known_paths is None: + known_paths = _init_pathinfo() + reset = True + else: + reset = False + fullname = os.path.join(sitedir, name) + try: + st = os.lstat(fullname) + except OSError: + return + if ((getattr(st, 'st_flags', 0) & stat.UF_HIDDEN) or + (getattr(st, 'st_file_attributes', 0) & stat.FILE_ATTRIBUTE_HIDDEN)): + _trace(f"Skipping hidden .pth file: {fullname!r}") + return + _trace(f"Processing .pth file: {fullname!r}") + try: + with io.open_code(fullname) as f: + pth_content = f.read() + except OSError: + return + + try: + # Accept BOM markers in .pth files as we do in source files + # (Windows PowerShell 5.1 makes it hard to emit UTF-8 files without a BOM) + pth_content = pth_content.decode("utf-8-sig") + except UnicodeDecodeError: + # Fallback to locale encoding for backward compatibility. + # We will deprecate this fallback in the future. + import locale + pth_content = pth_content.decode(locale.getencoding()) + _trace(f"Cannot read {fullname!r} as UTF-8. " + f"Using fallback encoding {locale.getencoding()!r}") + + for n, line in enumerate(pth_content.splitlines(), 1): + if line.startswith("#"): + continue + if line.strip() == "": + continue + try: + if line.startswith(("import ", "import\t")): + exec(line) + continue + line = line.rstrip() + dir, dircase = makepath(sitedir, line) + if dircase not in known_paths and os.path.exists(dir): + sys.path.append(dir) + known_paths.add(dircase) + except Exception as exc: + print(f"Error processing line {n:d} of {fullname}:\n", + file=sys.stderr) + import traceback + for record in traceback.format_exception(exc): + for line in record.splitlines(): + print(' '+line, file=sys.stderr) + print("\nRemainder of file ignored", file=sys.stderr) + break + if reset: + known_paths = None + return known_paths + + +def addsitedir(sitedir, known_paths=None): + """Add 'sitedir' argument to sys.path if missing and handle .pth files in + 'sitedir'""" + _trace(f"Adding directory: {sitedir!r}") + if known_paths is None: + known_paths = _init_pathinfo() + reset = True + else: + reset = False + sitedir, sitedircase = makepath(sitedir) + if not sitedircase in known_paths: + sys.path.append(sitedir) # Add path component + known_paths.add(sitedircase) + try: + names = os.listdir(sitedir) + except OSError: + return + names = [name for name in names + if name.endswith(".pth") and not name.startswith(".")] + for name in sorted(names): + addpackage(sitedir, name, known_paths) + if reset: + known_paths = None + return known_paths + + +def check_enableusersite(): + """Check if user site directory is safe for inclusion + + The function tests for the command line flag (including environment var), + process uid/gid equal to effective uid/gid. + + None: Disabled for security reasons + False: Disabled by user (command line option) + True: Safe and enabled + """ + if sys.flags.no_user_site: + return False + + if hasattr(os, "getuid") and hasattr(os, "geteuid"): + # check process uid == effective uid + if os.geteuid() != os.getuid(): + return None + if hasattr(os, "getgid") and hasattr(os, "getegid"): + # check process gid == effective gid + if os.getegid() != os.getgid(): + return None + + return True + + +# NOTE: sysconfig and it's dependencies are relatively large but site module +# needs very limited part of them. +# To speedup startup time, we have copy of them. +# +# See https://bugs.python.org/issue29585 + +# Copy of sysconfig._get_implementation() +def _get_implementation(): + return 'Python' + +# Copy of sysconfig._getuserbase() +def _getuserbase(): + env_base = os.environ.get("PYTHONUSERBASE", None) + if env_base: + return env_base + + # Emscripten, iOS, tvOS, VxWorks, WASI, and watchOS have no home directories + if sys.platform in {"emscripten", "ios", "tvos", "vxworks", "wasi", "watchos"}: + return None + + def joinuser(*args): + return os.path.expanduser(os.path.join(*args)) + + if os.name == "nt": + base = os.environ.get("APPDATA") or "~" + return joinuser(base, _get_implementation()) + + if sys.platform == "darwin" and sys._framework: + return joinuser("~", "Library", sys._framework, + "%d.%d" % sys.version_info[:2]) + + return joinuser("~", ".local") + + +# Same to sysconfig.get_path('purelib', os.name+'_user') +def _get_path(userbase): + version = sys.version_info + if hasattr(sys, 'abiflags') and 't' in sys.abiflags: + abi_thread = 't' + else: + abi_thread = '' + + implementation = _get_implementation() + implementation_lower = implementation.lower() + if os.name == 'nt': + ver_nodot = sys.winver.replace('.', '') + return f'{userbase}\\{implementation}{ver_nodot}\\site-packages' + + if sys.platform == 'darwin' and sys._framework: + return f'{userbase}/lib/{implementation_lower}/site-packages' + + return f'{userbase}/lib/python{version[0]}.{version[1]}{abi_thread}/site-packages' + + +def getuserbase(): + """Returns the `user base` directory path. + + The `user base` directory can be used to store data. If the global + variable ``USER_BASE`` is not initialized yet, this function will also set + it. + """ + global USER_BASE + if USER_BASE is None: + USER_BASE = _getuserbase() + return USER_BASE + + +def getusersitepackages(): + """Returns the user-specific site-packages directory path. + + If the global variable ``USER_SITE`` is not initialized yet, this + function will also set it. + """ + global USER_SITE, ENABLE_USER_SITE + userbase = getuserbase() # this will also set USER_BASE + + if USER_SITE is None: + if userbase is None: + ENABLE_USER_SITE = False # disable user site and return None + else: + USER_SITE = _get_path(userbase) + + return USER_SITE + +def addusersitepackages(known_paths): + """Add a per user site-package to sys.path + + Each user has its own python directory with site-packages in the + home directory. + """ + # get the per user site-package path + # this call will also make sure USER_BASE and USER_SITE are set + _trace("Processing user site-packages") + user_site = getusersitepackages() + + if ENABLE_USER_SITE and os.path.isdir(user_site): + addsitedir(user_site, known_paths) + return known_paths + +def getsitepackages(prefixes=None): + """Returns a list containing all global site-packages directories. + + For each directory present in ``prefixes`` (or the global ``PREFIXES``), + this function will find its `site-packages` subdirectory depending on the + system environment, and will return a list of full paths. + """ + sitepackages = [] + seen = set() + + if prefixes is None: + prefixes = PREFIXES + + for prefix in prefixes: + if not prefix or prefix in seen: + continue + seen.add(prefix) + + implementation = _get_implementation().lower() + ver = sys.version_info + if hasattr(sys, 'abiflags') and 't' in sys.abiflags: + abi_thread = 't' + else: + abi_thread = '' + if os.sep == '/': + libdirs = [sys.platlibdir] + if sys.platlibdir != "lib": + libdirs.append("lib") + + for libdir in libdirs: + path = os.path.join(prefix, libdir, + f"{implementation}{ver[0]}.{ver[1]}{abi_thread}", + "site-packages") + sitepackages.append(path) + else: + sitepackages.append(prefix) + sitepackages.append(os.path.join(prefix, "Lib", "site-packages")) + return sitepackages + +def addsitepackages(known_paths, prefixes=None): + """Add site-packages to sys.path""" + _trace("Processing global site-packages") + for sitedir in getsitepackages(prefixes): + if os.path.isdir(sitedir): + addsitedir(sitedir, known_paths) + + return known_paths + +def setquit(): + """Define new builtins 'quit' and 'exit'. + + These are objects which make the interpreter exit when called. + The repr of each object contains a hint at how it works. + + """ + if os.sep == '\\': + eof = 'Ctrl-Z plus Return' + else: + eof = 'Ctrl-D (i.e. EOF)' + + builtins.quit = _sitebuiltins.Quitter('quit', eof) + builtins.exit = _sitebuiltins.Quitter('exit', eof) + + +def setcopyright(): + """Set 'copyright' and 'credits' in builtins""" + builtins.copyright = _sitebuiltins._Printer("copyright", sys.copyright) + builtins.credits = _sitebuiltins._Printer("credits", """\ +Thanks to CWI, CNRI, BeOpen, Zope Corporation, the Python Software +Foundation, and a cast of thousands for supporting Python +development. See www.python.org for more information.""") + files, dirs = [], [] + # Not all modules are required to have a __file__ attribute. See + # PEP 420 for more details. + here = getattr(sys, '_stdlib_dir', None) + if not here and hasattr(os, '__file__'): + here = os.path.dirname(os.__file__) + if here: + files.extend(["LICENSE.txt", "LICENSE"]) + dirs.extend([os.path.join(here, os.pardir), here, os.curdir]) + builtins.license = _sitebuiltins._Printer( + "license", + "See https://www.python.org/psf/license/", + files, dirs) + + +def sethelper(): + builtins.help = _sitebuiltins._Helper() + + +def gethistoryfile(): + """Check if the PYTHON_HISTORY environment variable is set and define + it as the .python_history file. If PYTHON_HISTORY is not set, use the + default .python_history file. + """ + if not sys.flags.ignore_environment: + history = os.environ.get("PYTHON_HISTORY") + if history: + return history + return os.path.join(os.path.expanduser('~'), + '.python_history') + + +def enablerlcompleter(): + """Enable default readline configuration on interactive prompts, by + registering a sys.__interactivehook__. + """ + sys.__interactivehook__ = register_readline + + +def register_readline(): + """Configure readline completion on interactive prompts. + + If the readline module can be imported, the hook will set the Tab key + as completion key and register ~/.python_history as history file. + This can be overridden in the sitecustomize or usercustomize module, + or in a PYTHONSTARTUP file. + """ + if not sys.flags.ignore_environment: + PYTHON_BASIC_REPL = os.getenv("PYTHON_BASIC_REPL") + else: + PYTHON_BASIC_REPL = False + + import atexit + + try: + try: + import readline + except ImportError: + readline = None + else: + import rlcompleter # noqa: F401 + except ImportError: + return + + try: + if PYTHON_BASIC_REPL: + CAN_USE_PYREPL = False + else: + original_path = sys.path + sys.path = [p for p in original_path if p != ''] + try: + import _pyrepl.readline + if os.name == "nt": + import _pyrepl.windows_console + console_errors = (_pyrepl.windows_console._error,) + else: + import _pyrepl.unix_console + console_errors = _pyrepl.unix_console._error + from _pyrepl.main import CAN_USE_PYREPL + finally: + sys.path = original_path + except ImportError: + return + + if readline is not None: + # Reading the initialization (config) file may not be enough to set a + # completion key, so we set one first and then read the file. + if readline.backend == 'editline': + readline.parse_and_bind('bind ^I rl_complete') + else: + readline.parse_and_bind('tab: complete') + + try: + readline.read_init_file() + except OSError: + # An OSError here could have many causes, but the most likely one + # is that there's no .inputrc file (or .editrc file in the case of + # Mac OS X + libedit) in the expected location. In that case, we + # want to ignore the exception. + pass + + if readline is None or readline.get_current_history_length() == 0: + # If no history was loaded, default to .python_history, + # or PYTHON_HISTORY. + # The guard is necessary to avoid doubling history size at + # each interpreter exit when readline was already configured + # through a PYTHONSTARTUP hook, see: + # http://bugs.python.org/issue5845#msg198636 + history = gethistoryfile() + + if CAN_USE_PYREPL: + readline_module = _pyrepl.readline + exceptions = (OSError, *console_errors) + else: + if readline is None: + return + readline_module = readline + exceptions = OSError + + try: + readline_module.read_history_file(history) + except exceptions: + pass + + def write_history(): + try: + readline_module.write_history_file(history) + except FileNotFoundError, PermissionError: + # home directory does not exist or is not writable + # https://bugs.python.org/issue19891 + pass + except OSError: + if errno.EROFS: + pass # gh-128066: read-only file system + else: + raise + + atexit.register(write_history) + + +def venv(known_paths): + global PREFIXES, ENABLE_USER_SITE + + env = os.environ + if sys.platform == 'darwin' and '__PYVENV_LAUNCHER__' in env: + executable = sys._base_executable = os.environ['__PYVENV_LAUNCHER__'] + else: + executable = sys.executable + exe_dir = os.path.dirname(os.path.abspath(executable)) + site_prefix = os.path.dirname(exe_dir) + sys._home = None + conf_basename = 'pyvenv.cfg' + candidate_conf = next( + ( + conffile for conffile in ( + os.path.join(exe_dir, conf_basename), + os.path.join(site_prefix, conf_basename) + ) + if os.path.isfile(conffile) + ), + None + ) + + if candidate_conf: + virtual_conf = candidate_conf + system_site = "true" + # Issue 25185: Use UTF-8, as that's what the venv module uses when + # writing the file. + with open(virtual_conf, encoding='utf-8') as f: + for line in f: + if '=' in line: + key, _, value = line.partition('=') + key = key.strip().lower() + value = value.strip() + if key == 'include-system-site-packages': + system_site = value.lower() + elif key == 'home': + sys._home = value + + if sys.prefix != site_prefix: + _warn(f'Unexpected value in sys.prefix, expected {site_prefix}, got {sys.prefix}', RuntimeWarning) + if sys.exec_prefix != site_prefix: + _warn(f'Unexpected value in sys.exec_prefix, expected {site_prefix}, got {sys.exec_prefix}', RuntimeWarning) + + # Doing this here ensures venv takes precedence over user-site + addsitepackages(known_paths, [sys.prefix]) + + if system_site == "true": + PREFIXES += [sys.base_prefix, sys.base_exec_prefix] + else: + ENABLE_USER_SITE = False + + return known_paths + + +def execsitecustomize(): + """Run custom site specific code, if available.""" + try: + try: + import sitecustomize # noqa: F401 + except ImportError as exc: + if exc.name == 'sitecustomize': + pass + else: + raise + except Exception as err: + if sys.flags.verbose: + sys.excepthook(*sys.exc_info()) + else: + sys.stderr.write( + "Error in sitecustomize; set PYTHONVERBOSE for traceback:\n" + "%s: %s\n" % + (err.__class__.__name__, err)) + + +def execusercustomize(): + """Run custom user specific code, if available.""" + try: + try: + import usercustomize # noqa: F401 + except ImportError as exc: + if exc.name == 'usercustomize': + pass + else: + raise + except Exception as err: + if sys.flags.verbose: + sys.excepthook(*sys.exc_info()) + else: + sys.stderr.write( + "Error in usercustomize; set PYTHONVERBOSE for traceback:\n" + "%s: %s\n" % + (err.__class__.__name__, err)) + + +def main(): + """Add standard site-specific directories to the module search path. + + This function is called automatically when this module is imported, + unless the python interpreter was started with the -S flag. + """ + global ENABLE_USER_SITE + + orig_path = sys.path[:] + known_paths = removeduppaths() + if orig_path != sys.path: + # removeduppaths() might make sys.path absolute. + # fix __file__ and __cached__ of already imported modules too. + abs_paths() + + known_paths = venv(known_paths) + if ENABLE_USER_SITE is None: + ENABLE_USER_SITE = check_enableusersite() + known_paths = addusersitepackages(known_paths) + known_paths = addsitepackages(known_paths) + setquit() + setcopyright() + sethelper() + if not sys.flags.isolated: + enablerlcompleter() + execsitecustomize() + if ENABLE_USER_SITE: + execusercustomize() + +# Prevent extending of sys.path when python was started with -S and +# site is imported later. +if not sys.flags.no_site: + main() + +def _script(): + help = """\ + %s [--user-base] [--user-site] + + Without arguments print some useful information + With arguments print the value of USER_BASE and/or USER_SITE separated + by '%s'. + + Exit codes with --user-base or --user-site: + 0 - user site directory is enabled + 1 - user site directory is disabled by user + 2 - user site directory is disabled by super user + or for security reasons + >2 - unknown error + """ + args = sys.argv[1:] + if not args: + user_base = getuserbase() + user_site = getusersitepackages() + print("sys.path = [") + for dir in sys.path: + print(" %r," % (dir,)) + print("]") + def exists(path): + if path is not None and os.path.isdir(path): + return "exists" + else: + return "doesn't exist" + print(f"USER_BASE: {user_base!r} ({exists(user_base)})") + print(f"USER_SITE: {user_site!r} ({exists(user_site)})") + print(f"ENABLE_USER_SITE: {ENABLE_USER_SITE!r}") + sys.exit(0) + + buffer = [] + if '--user-base' in args: + buffer.append(USER_BASE) + if '--user-site' in args: + buffer.append(USER_SITE) + + if buffer: + print(os.pathsep.join(buffer)) + if ENABLE_USER_SITE: + sys.exit(0) + elif ENABLE_USER_SITE is False: + sys.exit(1) + elif ENABLE_USER_SITE is None: + sys.exit(2) + else: + sys.exit(3) + else: + import textwrap + print(textwrap.dedent(help % (sys.argv[0], os.pathsep))) + sys.exit(10) + +if __name__ == '__main__': + _script() diff --git a/Python314_4_x86_Template/Lib/smtplib.py b/Python314_4_x86_Template/Lib/smtplib.py new file mode 100644 index 00000000..72093f7f --- /dev/null +++ b/Python314_4_x86_Template/Lib/smtplib.py @@ -0,0 +1,1121 @@ +'''SMTP/ESMTP client class. + +This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP +Authentication) and RFC 2487 (Secure SMTP over TLS). + +Notes: + +Please remember, when doing ESMTP, that the names of the SMTP service +extensions are NOT the same thing as the option keywords for the RCPT +and MAIL commands! + +Example: + + >>> import smtplib + >>> s=smtplib.SMTP("localhost") + >>> print(s.help()) + This is Sendmail version 8.8.4 + Topics: + HELO EHLO MAIL RCPT DATA + RSET NOOP QUIT HELP VRFY + EXPN VERB ETRN DSN + For more info use "HELP ". + To report bugs in the implementation send email to + sendmail-bugs@sendmail.org. + For local information send email to Postmaster at your site. + End of HELP info + >>> s.putcmd("vrfy","someone@here") + >>> s.getreply() + (250, "Somebody OverHere ") + >>> s.quit() +''' + +# Author: The Dragon De Monsyne +# ESMTP support, test code and doc fixes added by +# Eric S. Raymond +# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data) +# by Carey Evans , for picky mail servers. +# RFC 2554 (authentication) support by Gerhard Haering . +# +# This was modified from the Python 1.5 library HTTP lib. + +import socket +import io +import re +import email.utils +import email.message +import email.generator +import base64 +import hmac +import copy +import datetime +import sys +from email.base64mime import body_encode as encode_base64 + +__all__ = ["SMTPException", "SMTPNotSupportedError", "SMTPServerDisconnected", "SMTPResponseException", + "SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError", + "SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError", + "quoteaddr", "quotedata", "SMTP"] + +SMTP_PORT = 25 +SMTP_SSL_PORT = 465 +CRLF = "\r\n" +bCRLF = b"\r\n" +_MAXLINE = 8192 # more than 8 times larger than RFC 821, 4.5.3 +_MAXCHALLENGE = 5 # Maximum number of AUTH challenges sent + +OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I) + +# Exception classes used by this module. +class SMTPException(OSError): + """Base class for all exceptions raised by this module.""" + +class SMTPNotSupportedError(SMTPException): + """The command or option is not supported by the SMTP server. + + This exception is raised when an attempt is made to run a command or a + command with an option which is not supported by the server. + """ + +class SMTPServerDisconnected(SMTPException): + """Not connected to any SMTP server. + + This exception is raised when the server unexpectedly disconnects, + or when an attempt is made to use the SMTP instance before + connecting it to a server. + """ + +class SMTPResponseException(SMTPException): + """Base class for all exceptions that include an SMTP error code. + + These exceptions are generated in some instances when the SMTP + server returns an error code. The error code is stored in the + `smtp_code' attribute of the error, and the `smtp_error' attribute + is set to the error message. + """ + + def __init__(self, code, msg): + self.smtp_code = code + self.smtp_error = msg + self.args = (code, msg) + +class SMTPSenderRefused(SMTPResponseException): + """Sender address refused. + + In addition to the attributes set by on all SMTPResponseException + exceptions, this sets 'sender' to the string that the SMTP refused. + """ + + def __init__(self, code, msg, sender): + self.smtp_code = code + self.smtp_error = msg + self.sender = sender + self.args = (code, msg, sender) + +class SMTPRecipientsRefused(SMTPException): + """All recipient addresses refused. + + The errors for each recipient are accessible through the attribute + 'recipients', which is a dictionary of exactly the same sort as + SMTP.sendmail() returns. + """ + + def __init__(self, recipients): + self.recipients = recipients + self.args = (recipients,) + + +class SMTPDataError(SMTPResponseException): + """The SMTP server didn't accept the data.""" + +class SMTPConnectError(SMTPResponseException): + """Error during connection establishment.""" + +class SMTPHeloError(SMTPResponseException): + """The server refused our HELO reply.""" + +class SMTPAuthenticationError(SMTPResponseException): + """Authentication error. + + Most probably the server didn't accept the username/password + combination provided. + """ + +def quoteaddr(addrstring): + """Quote a subset of the email addresses defined by RFC 821. + + Should be able to handle anything email.utils.parseaddr can handle. + """ + displayname, addr = email.utils.parseaddr(addrstring) + if (displayname, addr) == ('', ''): + # parseaddr couldn't parse it, use it as is and hope for the best. + if addrstring.strip().startswith('<'): + return addrstring + return "<%s>" % addrstring + return "<%s>" % addr + +def _addr_only(addrstring): + displayname, addr = email.utils.parseaddr(addrstring) + if (displayname, addr) == ('', ''): + # parseaddr couldn't parse it, so use it as is. + return addrstring + return addr + +# Legacy method kept for backward compatibility. +def quotedata(data): + """Quote data for email. + + Double leading '.', and change Unix newline '\\n', or Mac '\\r' into + internet CRLF end-of-line. + """ + return re.sub(r'(?m)^\.', '..', + re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data)) + +def _quote_periods(bindata): + return re.sub(br'(?m)^\.', b'..', bindata) + +def _fix_eols(data): + return re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data) + + +try: + hmac.digest(b'', b'', 'md5') +except ValueError: + _have_cram_md5_support = False +else: + _have_cram_md5_support = True + + +try: + import ssl +except ImportError: + _have_ssl = False +else: + _have_ssl = True + + +class SMTP: + """This class manages a connection to an SMTP or ESMTP server. + SMTP Objects: + SMTP objects have the following attributes: + helo_resp + This is the message given by the server in response to the + most recent HELO command. + + ehlo_resp + This is the message given by the server in response to the + most recent EHLO command. This is usually multiline. + + does_esmtp + This is a True value _after you do an EHLO command_, if the + server supports ESMTP. + + esmtp_features + This is a dictionary, which, if the server supports ESMTP, + will _after you do an EHLO command_, contain the names of the + SMTP service extensions this server supports, and their + parameters (if any). + + Note, all extension names are mapped to lower case in the + dictionary. + + See each method's docstrings for details. In general, there is a + method of the same name to perform each SMTP command. There is also a + method called 'sendmail' that will do an entire mail transaction. + """ + debuglevel = 0 + + sock = None + file = None + helo_resp = None + ehlo_msg = "ehlo" + ehlo_resp = None + does_esmtp = False + default_port = SMTP_PORT + + def __init__(self, host='', port=0, local_hostname=None, + timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None): + """Initialize a new instance. + + If specified, `host` is the name of the remote host to which to + connect. If specified, `port` specifies the port to which to connect. + By default, smtplib.SMTP_PORT is used. If a host is specified the + connect method is called, and if it returns anything other than a + success code an SMTPConnectError is raised. If specified, + `local_hostname` is used as the FQDN of the local host in the HELO/EHLO + command. Otherwise, the local hostname is found using + socket.getfqdn(). The `source_address` parameter takes a 2-tuple (host, + port) for the socket to bind to as its source address before + connecting. If the host is '' and port is 0, the OS default behavior + will be used. + + """ + self._host = host + self.timeout = timeout + self.esmtp_features = {} + self.command_encoding = 'ascii' + self.source_address = source_address + self._auth_challenge_count = 0 + + if host: + (code, msg) = self.connect(host, port) + if code != 220: + self.close() + raise SMTPConnectError(code, msg) + if local_hostname is not None: + self.local_hostname = local_hostname + else: + # RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and + # if that can't be calculated, that we should use a domain literal + # instead (essentially an encoded IP address like [A.B.C.D]). + fqdn = socket.getfqdn() + if '.' in fqdn: + self.local_hostname = fqdn + else: + # We can't find an fqdn hostname, so use a domain literal + addr = '127.0.0.1' + try: + addr = socket.gethostbyname(socket.gethostname()) + except socket.gaierror: + pass + self.local_hostname = '[%s]' % addr + + def __enter__(self): + return self + + def __exit__(self, *args): + try: + code, message = self.docmd("QUIT") + if code != 221: + raise SMTPResponseException(code, message) + except SMTPServerDisconnected: + pass + finally: + self.close() + + def set_debuglevel(self, debuglevel): + """Set the debug output level. + + A non-false value results in debug messages for connection and for all + messages sent to and received from the server. + + """ + self.debuglevel = debuglevel + + def _print_debug(self, *args): + if self.debuglevel > 1: + print(datetime.datetime.now().time(), *args, file=sys.stderr) + else: + print(*args, file=sys.stderr) + + def _get_socket(self, host, port, timeout): + # This makes it simpler for SMTP_SSL to use the SMTP connect code + # and just alter the socket connection bit. + if timeout is not None and not timeout: + raise ValueError('Non-blocking socket (timeout=0) is not supported') + if self.debuglevel > 0: + self._print_debug('connect: to', (host, port), self.source_address) + return socket.create_connection((host, port), timeout, + self.source_address) + + def connect(self, host='localhost', port=0, source_address=None): + """Connect to a host on a given port. + + If the hostname ends with a colon (':') followed by a number, and + there is no port specified, that suffix will be stripped off and the + number interpreted as the port number to use. + + Note: This method is automatically invoked by __init__, if a host is + specified during instantiation. + + """ + + if source_address: + self.source_address = source_address + + if not port and (host.find(':') == host.rfind(':')): + i = host.rfind(':') + if i >= 0: + host, port = host[:i], host[i + 1:] + try: + port = int(port) + except ValueError: + raise OSError("nonnumeric port") + if not port: + port = self.default_port + sys.audit("smtplib.connect", self, host, port) + self.sock = self._get_socket(host, port, self.timeout) + self.file = None + (code, msg) = self.getreply() + if self.debuglevel > 0: + self._print_debug('connect:', repr(msg)) + return (code, msg) + + def send(self, s): + """Send 's' to the server.""" + if self.debuglevel > 0: + self._print_debug('send:', repr(s)) + if self.sock: + if isinstance(s, str): + # send is used by the 'data' command, where command_encoding + # should not be used, but 'data' needs to convert the string to + # binary itself anyway, so that's not a problem. + s = s.encode(self.command_encoding) + sys.audit("smtplib.send", self, s) + try: + self.sock.sendall(s) + except OSError: + self.close() + raise SMTPServerDisconnected('Server not connected') + else: + raise SMTPServerDisconnected('please run connect() first') + + def putcmd(self, cmd, args=""): + """Send a command to the server.""" + if args == "": + s = cmd + else: + s = f'{cmd} {args}' + if '\r' in s or '\n' in s: + s = s.replace('\n', '\\n').replace('\r', '\\r') + raise ValueError( + f'command and arguments contain prohibited newline characters: {s}' + ) + self.send(f'{s}{CRLF}') + + def getreply(self): + """Get a reply from the server. + + Returns a tuple consisting of: + + - server response code (e.g. '250', or such, if all goes well) + Note: returns -1 if it can't read response code. + + - server response string corresponding to response code (multiline + responses are converted to a single, multiline string). + + Raises SMTPServerDisconnected if end-of-file is reached. + """ + resp = [] + if self.file is None: + self.file = self.sock.makefile('rb') + while 1: + try: + line = self.file.readline(_MAXLINE + 1) + except OSError as e: + self.close() + raise SMTPServerDisconnected("Connection unexpectedly closed: " + + str(e)) + if not line: + self.close() + raise SMTPServerDisconnected("Connection unexpectedly closed") + if self.debuglevel > 0: + self._print_debug('reply:', repr(line)) + if len(line) > _MAXLINE: + self.close() + raise SMTPResponseException(500, "Line too long.") + resp.append(line[4:].strip(b' \t\r\n')) + code = line[:3] + # Check that the error code is syntactically correct. + # Don't attempt to read a continuation line if it is broken. + try: + errcode = int(code) + except ValueError: + errcode = -1 + break + # Check if multiline response. + if line[3:4] != b"-": + break + + errmsg = b"\n".join(resp) + if self.debuglevel > 0: + self._print_debug('reply: retcode (%s); Msg: %a' % (errcode, errmsg)) + return errcode, errmsg + + def docmd(self, cmd, args=""): + """Send a command, and return its response code.""" + self.putcmd(cmd, args) + return self.getreply() + + # std smtp commands + def helo(self, name=''): + """SMTP 'helo' command. + Hostname to send for this command defaults to the FQDN of the local + host. + """ + self.putcmd("helo", name or self.local_hostname) + (code, msg) = self.getreply() + self.helo_resp = msg + return (code, msg) + + def ehlo(self, name=''): + """ SMTP 'ehlo' command. + Hostname to send for this command defaults to the FQDN of the local + host. + """ + self.esmtp_features = {} + self.putcmd(self.ehlo_msg, name or self.local_hostname) + (code, msg) = self.getreply() + # According to RFC1869 some (badly written) + # MTA's will disconnect on an ehlo. Toss an exception if + # that happens -ddm + if code == -1 and len(msg) == 0: + self.close() + raise SMTPServerDisconnected("Server not connected") + self.ehlo_resp = msg + if code != 250: + return (code, msg) + self.does_esmtp = True + #parse the ehlo response -ddm + assert isinstance(self.ehlo_resp, bytes), repr(self.ehlo_resp) + resp = self.ehlo_resp.decode("latin-1").split('\n') + del resp[0] + for each in resp: + # To be able to communicate with as many SMTP servers as possible, + # we have to take the old-style auth advertisement into account, + # because: + # 1) Else our SMTP feature parser gets confused. + # 2) There are some servers that only advertise the auth methods we + # support using the old style. + auth_match = OLDSTYLE_AUTH.match(each) + if auth_match: + # This doesn't remove duplicates, but that's no problem + self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \ + + " " + auth_match.groups(0)[0] + continue + + # RFC 1869 requires a space between ehlo keyword and parameters. + # It's actually stricter, in that only spaces are allowed between + # parameters, but were not going to check for that here. Note + # that the space isn't present if there are no parameters. + m = re.match(r'(?P[A-Za-z0-9][A-Za-z0-9\-]*) ?', each) + if m: + feature = m.group("feature").lower() + params = m.string[m.end("feature"):].strip() + if feature == "auth": + self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \ + + " " + params + else: + self.esmtp_features[feature] = params + return (code, msg) + + def has_extn(self, opt): + """Does the server support a given SMTP service extension?""" + return opt.lower() in self.esmtp_features + + def help(self, args=''): + """SMTP 'help' command. + Returns help text from server.""" + self.putcmd("help", args) + return self.getreply()[1] + + def rset(self): + """SMTP 'rset' command -- resets session.""" + self.command_encoding = 'ascii' + return self.docmd("rset") + + def _rset(self): + """Internal 'rset' command which ignores any SMTPServerDisconnected error. + + Used internally in the library, since the server disconnected error + should appear to the application when the *next* command is issued, if + we are doing an internal "safety" reset. + """ + try: + self.rset() + except SMTPServerDisconnected: + pass + + def noop(self): + """SMTP 'noop' command -- doesn't do anything :>""" + return self.docmd("noop") + + def mail(self, sender, options=()): + """SMTP 'mail' command -- begins mail xfer session. + + This method may raise the following exceptions: + + SMTPNotSupportedError The options parameter includes 'SMTPUTF8' + but the SMTPUTF8 extension is not supported by + the server. + """ + optionlist = '' + if options and self.does_esmtp: + if any(x.lower()=='smtputf8' for x in options): + if self.has_extn('smtputf8'): + self.command_encoding = 'utf-8' + else: + raise SMTPNotSupportedError( + 'SMTPUTF8 not supported by server') + optionlist = ' ' + ' '.join(options) + self.putcmd("mail", "from:%s%s" % (quoteaddr(sender), optionlist)) + return self.getreply() + + def rcpt(self, recip, options=()): + """SMTP 'rcpt' command -- indicates 1 recipient for this mail.""" + optionlist = '' + if options and self.does_esmtp: + optionlist = ' ' + ' '.join(options) + self.putcmd("rcpt", "to:%s%s" % (quoteaddr(recip), optionlist)) + return self.getreply() + + def data(self, msg): + """SMTP 'DATA' command -- sends message data to server. + + Automatically quotes lines beginning with a period per rfc821. + Raises SMTPDataError if there is an unexpected reply to the + DATA command; the return value from this method is the final + response code received when the all data is sent. If msg + is a string, lone '\\r' and '\\n' characters are converted to + '\\r\\n' characters. If msg is bytes, it is transmitted as is. + """ + self.putcmd("data") + (code, repl) = self.getreply() + if self.debuglevel > 0: + self._print_debug('data:', (code, repl)) + if code != 354: + raise SMTPDataError(code, repl) + else: + if isinstance(msg, str): + msg = _fix_eols(msg).encode('ascii') + q = _quote_periods(msg) + if q[-2:] != bCRLF: + q = q + bCRLF + q = q + b"." + bCRLF + self.send(q) + (code, msg) = self.getreply() + if self.debuglevel > 0: + self._print_debug('data:', (code, msg)) + return (code, msg) + + def verify(self, address): + """SMTP 'verify' command -- checks for address validity.""" + self.putcmd("vrfy", _addr_only(address)) + return self.getreply() + # a.k.a. + vrfy = verify + + def expn(self, address): + """SMTP 'expn' command -- expands a mailing list.""" + self.putcmd("expn", _addr_only(address)) + return self.getreply() + + # some useful methods + + def ehlo_or_helo_if_needed(self): + """Call self.ehlo() and/or self.helo() if needed. + + If there has been no previous EHLO or HELO command this session, this + method tries ESMTP EHLO first. + + This method may raise the following exceptions: + + SMTPHeloError The server didn't reply properly to + the helo greeting. + """ + if self.helo_resp is None and self.ehlo_resp is None: + if not (200 <= self.ehlo()[0] <= 299): + (code, resp) = self.helo() + if not (200 <= code <= 299): + raise SMTPHeloError(code, resp) + + def auth(self, mechanism, authobject, *, initial_response_ok=True): + """Authentication command - requires response processing. + + 'mechanism' specifies which authentication mechanism is to + be used - the valid values are those listed in the 'auth' + element of 'esmtp_features'. + + 'authobject' must be a callable object taking a single argument: + + data = authobject(challenge) + + It will be called to process the server's challenge response; the + challenge argument it is passed will be a bytes. It should return + an ASCII string that will be base64 encoded and sent to the server. + + Keyword arguments: + - initial_response_ok: Allow sending the RFC 4954 initial-response + to the AUTH command, if the authentication methods supports it. + """ + # RFC 4954 allows auth methods to provide an initial response. Not all + # methods support it. By definition, if they return something other + # than None when challenge is None, then they do. See issue #15014. + mechanism = mechanism.upper() + initial_response = (authobject() if initial_response_ok else None) + if initial_response is not None: + response = encode_base64(initial_response.encode('ascii'), eol='') + (code, resp) = self.docmd("AUTH", mechanism + " " + response) + self._auth_challenge_count = 1 + else: + (code, resp) = self.docmd("AUTH", mechanism) + self._auth_challenge_count = 0 + # If server responds with a challenge, send the response. + while code == 334: + self._auth_challenge_count += 1 + challenge = base64.decodebytes(resp) + response = encode_base64( + authobject(challenge).encode('ascii'), eol='') + (code, resp) = self.docmd(response) + # If server keeps sending challenges, something is wrong. + if self._auth_challenge_count > _MAXCHALLENGE: + raise SMTPException( + "Server AUTH mechanism infinite loop. Last response: " + + repr((code, resp)) + ) + if code in (235, 503): + return (code, resp) + raise SMTPAuthenticationError(code, resp) + + def auth_cram_md5(self, challenge=None): + """ Authobject to use with CRAM-MD5 authentication. Requires self.user + and self.password to be set.""" + # CRAM-MD5 does not support initial-response. + if challenge is None: + return None + if not _have_cram_md5_support: + raise SMTPException("CRAM-MD5 is not supported") + password = self.password.encode('ascii') + authcode = hmac.HMAC(password, challenge, 'md5') + return f"{self.user} {authcode.hexdigest()}" + + def auth_plain(self, challenge=None): + """ Authobject to use with PLAIN authentication. Requires self.user and + self.password to be set.""" + return "\0%s\0%s" % (self.user, self.password) + + def auth_login(self, challenge=None): + """ Authobject to use with LOGIN authentication. Requires self.user and + self.password to be set.""" + if challenge is None or self._auth_challenge_count < 2: + return self.user + else: + return self.password + + def login(self, user, password, *, initial_response_ok=True): + """Log in on an SMTP server that requires authentication. + + The arguments are: + - user: The user name to authenticate with. + - password: The password for the authentication. + + Keyword arguments: + - initial_response_ok: Allow sending the RFC 4954 initial-response + to the AUTH command, if the authentication methods supports it. + + If there has been no previous EHLO or HELO command this session, this + method tries ESMTP EHLO first. + + This method will return normally if the authentication was successful. + + This method may raise the following exceptions: + + SMTPHeloError The server didn't reply properly to + the helo greeting. + SMTPAuthenticationError The server didn't accept the username/ + password combination. + SMTPNotSupportedError The AUTH command is not supported by the + server. + SMTPException No suitable authentication method was + found. + """ + + self.ehlo_or_helo_if_needed() + if not self.has_extn("auth"): + raise SMTPNotSupportedError( + "SMTP AUTH extension not supported by server.") + + # Authentication methods the server claims to support + advertised_authlist = self.esmtp_features["auth"].split() + + # Authentication methods we can handle in our preferred order: + if _have_cram_md5_support: + preferred_auths = ['CRAM-MD5', 'PLAIN', 'LOGIN'] + else: + preferred_auths = ['PLAIN', 'LOGIN'] + # We try the supported authentications in our preferred order, if + # the server supports them. + authlist = [auth for auth in preferred_auths + if auth in advertised_authlist] + if not authlist: + raise SMTPException("No suitable authentication method found.") + + # Some servers advertise authentication methods they don't really + # support, so if authentication fails, we continue until we've tried + # all methods. + self.user, self.password = user, password + for authmethod in authlist: + method_name = 'auth_' + authmethod.lower().replace('-', '_') + try: + (code, resp) = self.auth( + authmethod, getattr(self, method_name), + initial_response_ok=initial_response_ok) + # 235 == 'Authentication successful' + # 503 == 'Error: already authenticated' + if code in (235, 503): + return (code, resp) + except SMTPAuthenticationError as e: + last_exception = e + + # We could not login successfully. Return result of last attempt. + raise last_exception + + def starttls(self, *, context=None): + """Puts the connection to the SMTP server into TLS mode. + + If there has been no previous EHLO or HELO command this session, this + method tries ESMTP EHLO first. + + If the server supports TLS, this will encrypt the rest of the SMTP + session. If you provide the context parameter, + the identity of the SMTP server and client can be checked. This, + however, depends on whether the socket module really checks the + certificates. + + This method may raise the following exceptions: + + SMTPHeloError The server didn't reply properly to + the helo greeting. + """ + self.ehlo_or_helo_if_needed() + if not self.has_extn("starttls"): + raise SMTPNotSupportedError( + "STARTTLS extension not supported by server.") + (resp, reply) = self.docmd("STARTTLS") + if resp == 220: + if not _have_ssl: + raise RuntimeError("No SSL support included in this Python") + if context is None: + context = ssl._create_stdlib_context() + self.sock = context.wrap_socket(self.sock, + server_hostname=self._host) + self.file = None + # RFC 3207: + # The client MUST discard any knowledge obtained from + # the server, such as the list of SMTP service extensions, + # which was not obtained from the TLS negotiation itself. + self.helo_resp = None + self.ehlo_resp = None + self.esmtp_features = {} + self.does_esmtp = False + else: + # RFC 3207: + # 501 Syntax error (no parameters allowed) + # 454 TLS not available due to temporary reason + raise SMTPResponseException(resp, reply) + return (resp, reply) + + def sendmail(self, from_addr, to_addrs, msg, mail_options=(), + rcpt_options=()): + """This command performs an entire mail transaction. + + The arguments are: + - from_addr : The address sending this mail. + - to_addrs : A list of addresses to send this mail to. A bare + string will be treated as a list with 1 address. + - msg : The message to send. + - mail_options : List of ESMTP options (such as 8bitmime) for the + mail command. + - rcpt_options : List of ESMTP options (such as DSN commands) for + all the rcpt commands. + + msg may be a string containing characters in the ASCII range, or a byte + string. A string is encoded to bytes using the ascii codec, and lone + \\r and \\n characters are converted to \\r\\n characters. + + If there has been no previous EHLO or HELO command this session, this + method tries ESMTP EHLO first. If the server does ESMTP, message size + and each of the specified options will be passed to it. If EHLO + fails, HELO will be tried and ESMTP options suppressed. + + This method will return normally if the mail is accepted for at least + one recipient. It returns a dictionary, with one entry for each + recipient that was refused. Each entry contains a tuple of the SMTP + error code and the accompanying error message sent by the server. + + This method may raise the following exceptions: + + SMTPHeloError The server didn't reply properly to + the helo greeting. + SMTPRecipientsRefused The server rejected ALL recipients + (no mail was sent). + SMTPSenderRefused The server didn't accept the from_addr. + SMTPDataError The server replied with an unexpected + error code (other than a refusal of + a recipient). + SMTPNotSupportedError The mail_options parameter includes 'SMTPUTF8' + but the SMTPUTF8 extension is not supported by + the server. + + Note: the connection will be open even after an exception is raised. + + Example: + + >>> import smtplib + >>> s=smtplib.SMTP("localhost") + >>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"] + >>> msg = '''\\ + ... From: Me@my.org + ... Subject: testin'... + ... + ... This is a test ''' + >>> s.sendmail("me@my.org",tolist,msg) + { "three@three.org" : ( 550 ,"User unknown" ) } + >>> s.quit() + + In the above example, the message was accepted for delivery to three + of the four addresses, and one was rejected, with the error code + 550. If all addresses are accepted, then the method will return an + empty dictionary. + + """ + self.ehlo_or_helo_if_needed() + esmtp_opts = [] + if isinstance(msg, str): + msg = _fix_eols(msg).encode('ascii') + if self.does_esmtp: + if self.has_extn('size'): + esmtp_opts.append("size=%d" % len(msg)) + for option in mail_options: + esmtp_opts.append(option) + (code, resp) = self.mail(from_addr, esmtp_opts) + if code != 250: + if code == 421: + self.close() + else: + self._rset() + raise SMTPSenderRefused(code, resp, from_addr) + senderrs = {} + if isinstance(to_addrs, str): + to_addrs = [to_addrs] + for each in to_addrs: + (code, resp) = self.rcpt(each, rcpt_options) + if (code != 250) and (code != 251): + senderrs[each] = (code, resp) + if code == 421: + self.close() + raise SMTPRecipientsRefused(senderrs) + if len(senderrs) == len(to_addrs): + # the server refused all our recipients + self._rset() + raise SMTPRecipientsRefused(senderrs) + (code, resp) = self.data(msg) + if code != 250: + if code == 421: + self.close() + else: + self._rset() + raise SMTPDataError(code, resp) + #if we got here then somebody got our mail + return senderrs + + def send_message(self, msg, from_addr=None, to_addrs=None, + mail_options=(), rcpt_options=()): + """Converts message to a bytestring and passes it to sendmail. + + The arguments are as for sendmail, except that msg is an + email.message.Message object. If from_addr is None or to_addrs is + None, these arguments are taken from the headers of the Message as + described in RFC 5322 (a ValueError is raised if there is more than + one set of 'Resent-' headers). Regardless of the values of from_addr and + to_addr, any Bcc field (or Resent-Bcc field, when the Message is a + resent) of the Message object won't be transmitted. The Message + object is then serialized using email.generator.BytesGenerator and + sendmail is called to transmit the message. If the sender or any of + the recipient addresses contain non-ASCII and the server advertises the + SMTPUTF8 capability, the policy is cloned with utf8 set to True for the + serialization, and SMTPUTF8 and BODY=8BITMIME are asserted on the send. + If the server does not support SMTPUTF8, an SMTPNotSupported error is + raised. Otherwise the generator is called without modifying the + policy. + + """ + # 'Resent-Date' is a mandatory field if the Message is resent (RFC 5322 + # Section 3.6.6). In such a case, we use the 'Resent-*' fields. However, + # if there is more than one 'Resent-' block there's no way to + # unambiguously determine which one is the most recent in all cases, + # so rather than guess we raise a ValueError in that case. + # + # TODO implement heuristics to guess the correct Resent-* block with an + # option allowing the user to enable the heuristics. (It should be + # possible to guess correctly almost all of the time.) + + self.ehlo_or_helo_if_needed() + resent = msg.get_all('Resent-Date') + if resent is None: + header_prefix = '' + elif len(resent) == 1: + header_prefix = 'Resent-' + else: + raise ValueError("message has more than one 'Resent-' header block") + if from_addr is None: + # Prefer the sender field per RFC 5322 section 3.6.2. + from_addr = (msg[header_prefix + 'Sender'] + if (header_prefix + 'Sender') in msg + else msg[header_prefix + 'From']) + from_addr = email.utils.getaddresses([from_addr])[0][1] + if to_addrs is None: + addr_fields = [f for f in (msg[header_prefix + 'To'], + msg[header_prefix + 'Bcc'], + msg[header_prefix + 'Cc']) + if f is not None] + to_addrs = [a[1] for a in email.utils.getaddresses(addr_fields)] + # Make a local copy so we can delete the bcc headers. + msg_copy = copy.copy(msg) + del msg_copy['Bcc'] + del msg_copy['Resent-Bcc'] + international = False + try: + ''.join([from_addr, *to_addrs]).encode('ascii') + except UnicodeEncodeError: + if not self.has_extn('smtputf8'): + raise SMTPNotSupportedError( + "One or more source or delivery addresses require" + " internationalized email support, but the server" + " does not advertise the required SMTPUTF8 capability") + international = True + with io.BytesIO() as bytesmsg: + if international: + g = email.generator.BytesGenerator( + bytesmsg, policy=msg.policy.clone(utf8=True)) + mail_options = (*mail_options, 'SMTPUTF8', 'BODY=8BITMIME') + else: + g = email.generator.BytesGenerator(bytesmsg) + g.flatten(msg_copy, linesep='\r\n') + flatmsg = bytesmsg.getvalue() + return self.sendmail(from_addr, to_addrs, flatmsg, mail_options, + rcpt_options) + + def close(self): + """Close the connection to the SMTP server.""" + try: + file = self.file + self.file = None + if file: + file.close() + finally: + sock = self.sock + self.sock = None + if sock: + sock.close() + + def quit(self): + """Terminate the SMTP session.""" + res = self.docmd("quit") + # A new EHLO is required after reconnecting with connect() + self.ehlo_resp = self.helo_resp = None + self.esmtp_features = {} + self.does_esmtp = False + self.close() + return res + +if _have_ssl: + + class SMTP_SSL(SMTP): + """ This is a subclass derived from SMTP that connects over an SSL + encrypted socket (to use this class you need a socket module that was + compiled with SSL support). If host is not specified, '' (the local + host) is used. If port is omitted, the standard SMTP-over-SSL port + (465) is used. local_hostname and source_address have the same meaning + as they do in the SMTP class. context also optional, can contain a + SSLContext. + + """ + + default_port = SMTP_SSL_PORT + + def __init__(self, host='', port=0, local_hostname=None, + *, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None, context=None): + if context is None: + context = ssl._create_stdlib_context() + self.context = context + SMTP.__init__(self, host, port, local_hostname, timeout, + source_address) + + def _get_socket(self, host, port, timeout): + if self.debuglevel > 0: + self._print_debug('connect:', (host, port)) + new_socket = super()._get_socket(host, port, timeout) + new_socket = self.context.wrap_socket(new_socket, + server_hostname=self._host) + return new_socket + + __all__.append("SMTP_SSL") + +# +# LMTP extension +# +LMTP_PORT = 2003 + +class LMTP(SMTP): + """LMTP - Local Mail Transfer Protocol + + The LMTP protocol, which is very similar to ESMTP, is heavily based + on the standard SMTP client. It's common to use Unix sockets for + LMTP, so our connect() method must support that as well as a regular + host:port server. local_hostname and source_address have the same + meaning as they do in the SMTP class. To specify a Unix socket, + you must use an absolute path as the host, starting with a '/'. + + Authentication is supported, using the regular SMTP mechanism. When + using a Unix socket, LMTP generally don't support or require any + authentication, but your mileage might vary.""" + + ehlo_msg = "lhlo" + + def __init__(self, host='', port=LMTP_PORT, local_hostname=None, + source_address=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): + """Initialize a new instance.""" + super().__init__(host, port, local_hostname=local_hostname, + source_address=source_address, timeout=timeout) + + def connect(self, host='localhost', port=0, source_address=None): + """Connect to the LMTP daemon, on either a Unix or a TCP socket.""" + if host[0] != '/': + return super().connect(host, port, source_address=source_address) + + if self.timeout is not None and not self.timeout: + raise ValueError('Non-blocking socket (timeout=0) is not supported') + + # Handle Unix-domain sockets. + try: + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: + self.sock.settimeout(self.timeout) + self.file = None + self.sock.connect(host) + except OSError: + if self.debuglevel > 0: + self._print_debug('connect fail:', host) + if self.sock: + self.sock.close() + self.sock = None + raise + (code, msg) = self.getreply() + if self.debuglevel > 0: + self._print_debug('connect:', msg) + return (code, msg) + + +# Test the sendmail method, which tests most of the others. +# Note: This always sends to localhost. +if __name__ == '__main__': + def prompt(prompt): + sys.stdout.write(prompt + ": ") + sys.stdout.flush() + return sys.stdin.readline().strip() + + fromaddr = prompt("From") + toaddrs = prompt("To").split(',') + print("Enter message, end with ^D:") + msg = '' + while line := sys.stdin.readline(): + msg = msg + line + print("Message length is %d" % len(msg)) + + server = SMTP('localhost') + server.set_debuglevel(1) + server.sendmail(fromaddr, toaddrs, msg) + server.quit() diff --git a/Python314_4_x86_Template/Lib/socket.py b/Python314_4_x86_Template/Lib/socket.py new file mode 100644 index 00000000..727b0e75 --- /dev/null +++ b/Python314_4_x86_Template/Lib/socket.py @@ -0,0 +1,988 @@ +# Wrapper module for _socket, providing some additional facilities +# implemented in Python. + +"""\ +This module provides socket operations and some related functions. +On Unix, it supports IP (Internet Protocol) and Unix domain sockets. +On other systems, it only supports IP. Functions specific for a +socket are available as methods of the socket object. + +Functions: + +socket() -- create a new socket object +socketpair() -- create a pair of new socket objects [*] +fromfd() -- create a socket object from an open file descriptor [*] +send_fds() -- Send file descriptor to the socket. +recv_fds() -- Receive file descriptors from the socket. +fromshare() -- create a socket object from data received from socket.share() [*] +gethostname() -- return the current hostname +gethostbyname() -- map a hostname to its IP number +gethostbyaddr() -- map an IP number or hostname to DNS info +getservbyname() -- map a service name and a protocol name to a port number +getprotobyname() -- map a protocol name (e.g. 'tcp') to a number +ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order +htons(), htonl() -- convert 16, 32 bit int from host to network byte order +inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format +inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89) +socket.getdefaulttimeout() -- get the default timeout value +socket.setdefaulttimeout() -- set the default timeout value +create_connection() -- connects to an address, with an optional timeout and + optional source address. +create_server() -- create a TCP socket and bind it to a specified address. + + [*] not available on all platforms! + +Special objects: + +SocketType -- type object for socket objects +error -- exception raised for I/O errors +has_ipv6 -- boolean value indicating if IPv6 is supported + +IntEnum constants: + +AF_INET, AF_UNIX -- socket domains (first argument to socket() call) +SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument) + +Integer constants: + +Many other constants may be defined; these may be used in calls to +the setsockopt() and getsockopt() methods. +""" + +import _socket +from _socket import * + +import io +import os +import sys +from enum import IntEnum, IntFlag + +try: + import errno +except ImportError: + errno = None +EBADF = getattr(errno, 'EBADF', 9) +EAGAIN = getattr(errno, 'EAGAIN', 11) +EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11) + +__all__ = ["fromfd", "getfqdn", "create_connection", "create_server", + "has_dualstack_ipv6", "AddressFamily", "SocketKind"] +__all__.extend(os._get_exports_list(_socket)) + +# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for +# nicer string representations. +# Note that _socket only knows about the integer values. The public interface +# in this module understands the enums and translates them back from integers +# where needed (e.g. .family property of a socket object). + +IntEnum._convert_( + 'AddressFamily', + __name__, + lambda C: C.isupper() and C.startswith('AF_')) + +IntEnum._convert_( + 'SocketKind', + __name__, + lambda C: C.isupper() and C.startswith('SOCK_')) + +IntFlag._convert_( + 'MsgFlag', + __name__, + lambda C: C.isupper() and C.startswith('MSG_')) + +IntFlag._convert_( + 'AddressInfo', + __name__, + lambda C: C.isupper() and C.startswith('AI_')) + +_LOCALHOST = '127.0.0.1' +_LOCALHOST_V6 = '::1' + + +def _intenum_converter(value, enum_klass): + """Convert a numeric family value to an IntEnum member. + + If it's not a known member, return the numeric value itself. + """ + try: + return enum_klass(value) + except ValueError: + return value + + +# WSA error codes +if sys.platform.lower().startswith("win"): + errorTab = { + 6: "Specified event object handle is invalid.", + 8: "Insufficient memory available.", + 87: "One or more parameters are invalid.", + 995: "Overlapped operation aborted.", + 996: "Overlapped I/O event object not in signaled state.", + 997: "Overlapped operation will complete later.", + 10004: "The operation was interrupted.", + 10009: "A bad file handle was passed.", + 10013: "Permission denied.", + 10014: "A fault occurred on the network??", + 10022: "An invalid operation was attempted.", + 10024: "Too many open files.", + 10035: "The socket operation would block.", + 10036: "A blocking operation is already in progress.", + 10037: "Operation already in progress.", + 10038: "Socket operation on nonsocket.", + 10039: "Destination address required.", + 10040: "Message too long.", + 10041: "Protocol wrong type for socket.", + 10042: "Bad protocol option.", + 10043: "Protocol not supported.", + 10044: "Socket type not supported.", + 10045: "Operation not supported.", + 10046: "Protocol family not supported.", + 10047: "Address family not supported by protocol family.", + 10048: "The network address is in use.", + 10049: "Cannot assign requested address.", + 10050: "Network is down.", + 10051: "Network is unreachable.", + 10052: "Network dropped connection on reset.", + 10053: "Software caused connection abort.", + 10054: "The connection has been reset.", + 10055: "No buffer space available.", + 10056: "Socket is already connected.", + 10057: "Socket is not connected.", + 10058: "The network has been shut down.", + 10059: "Too many references.", + 10060: "The operation timed out.", + 10061: "Connection refused.", + 10062: "Cannot translate name.", + 10063: "The name is too long.", + 10064: "The host is down.", + 10065: "The host is unreachable.", + 10066: "Directory not empty.", + 10067: "Too many processes.", + 10068: "User quota exceeded.", + 10069: "Disk quota exceeded.", + 10070: "Stale file handle reference.", + 10071: "Item is remote.", + 10091: "Network subsystem is unavailable.", + 10092: "Winsock.dll version out of range.", + 10093: "Successful WSAStartup not yet performed.", + 10101: "Graceful shutdown in progress.", + 10102: "No more results from WSALookupServiceNext.", + 10103: "Call has been canceled.", + 10104: "Procedure call table is invalid.", + 10105: "Service provider is invalid.", + 10106: "Service provider failed to initialize.", + 10107: "System call failure.", + 10108: "Service not found.", + 10109: "Class type not found.", + 10110: "No more results from WSALookupServiceNext.", + 10111: "Call was canceled.", + 10112: "Database query was refused.", + 11001: "Host not found.", + 11002: "Nonauthoritative host not found.", + 11003: "This is a nonrecoverable error.", + 11004: "Valid name, no data record requested type.", + 11005: "QoS receivers.", + 11006: "QoS senders.", + 11007: "No QoS senders.", + 11008: "QoS no receivers.", + 11009: "QoS request confirmed.", + 11010: "QoS admission error.", + 11011: "QoS policy failure.", + 11012: "QoS bad style.", + 11013: "QoS bad object.", + 11014: "QoS traffic control error.", + 11015: "QoS generic error.", + 11016: "QoS service type error.", + 11017: "QoS flowspec error.", + 11018: "Invalid QoS provider buffer.", + 11019: "Invalid QoS filter style.", + 11020: "Invalid QoS filter style.", + 11021: "Incorrect QoS filter count.", + 11022: "Invalid QoS object length.", + 11023: "Incorrect QoS flow count.", + 11024: "Unrecognized QoS object.", + 11025: "Invalid QoS policy object.", + 11026: "Invalid QoS flow descriptor.", + 11027: "Invalid QoS provider-specific flowspec.", + 11028: "Invalid QoS provider-specific filterspec.", + 11029: "Invalid QoS shape discard mode object.", + 11030: "Invalid QoS shaping rate object.", + 11031: "Reserved policy QoS element type." + } + __all__.append("errorTab") + + +class _GiveupOnSendfile(Exception): pass + + +class socket(_socket.socket): + + """A subclass of _socket.socket adding the makefile() method.""" + + __slots__ = ["__weakref__", "_io_refs", "_closed"] + + def __init__(self, family=-1, type=-1, proto=-1, fileno=None): + # For user code address family and type values are IntEnum members, but + # for the underlying _socket.socket they're just integers. The + # constructor of _socket.socket converts the given argument to an + # integer automatically. + if fileno is None: + if family == -1: + family = AF_INET + if type == -1: + type = SOCK_STREAM + if proto == -1: + proto = 0 + _socket.socket.__init__(self, family, type, proto, fileno) + self._io_refs = 0 + self._closed = False + + def __enter__(self): + return self + + def __exit__(self, *args): + if not self._closed: + self.close() + + def __repr__(self): + """Wrap __repr__() to reveal the real class name and socket + address(es). + """ + closed = getattr(self, '_closed', False) + s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \ + % (self.__class__.__module__, + self.__class__.__qualname__, + " [closed]" if closed else "", + self.fileno(), + self.family, + self.type, + self.proto) + if not closed: + # getsockname and getpeername may not be available on WASI. + try: + laddr = self.getsockname() + if laddr: + s += ", laddr=%s" % str(laddr) + except (error, AttributeError): + pass + try: + raddr = self.getpeername() + if raddr: + s += ", raddr=%s" % str(raddr) + except (error, AttributeError): + pass + s += '>' + return s + + def __getstate__(self): + raise TypeError(f"cannot pickle {self.__class__.__name__!r} object") + + def dup(self): + """dup() -> socket object + + Duplicate the socket. Return a new socket object connected to the same + system resource. The new socket is non-inheritable. + """ + fd = dup(self.fileno()) + sock = self.__class__(self.family, self.type, self.proto, fileno=fd) + sock.settimeout(self.gettimeout()) + return sock + + def accept(self): + """accept() -> (socket object, address info) + + Wait for an incoming connection. Return a new socket + representing the connection, and the address of the client. + For IP sockets, the address info is a pair (hostaddr, port). + """ + fd, addr = self._accept() + sock = socket(self.family, self.type, self.proto, fileno=fd) + # Issue #7995: if no default timeout is set and the listening + # socket had a (non-zero) timeout, force the new socket in blocking + # mode to override platform-specific socket flags inheritance. + if getdefaulttimeout() is None and self.gettimeout(): + sock.setblocking(True) + return sock, addr + + def makefile(self, mode="r", buffering=None, *, + encoding=None, errors=None, newline=None): + """makefile(...) -> an I/O stream connected to the socket + + The arguments are as for io.open() after the filename, except the only + supported mode values are 'r' (default), 'w', 'b', or a combination of + those. + """ + # XXX refactor to share code? + if not set(mode) <= {"r", "w", "b"}: + raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,)) + writing = "w" in mode + reading = "r" in mode or not writing + assert reading or writing + binary = "b" in mode + rawmode = "" + if reading: + rawmode += "r" + if writing: + rawmode += "w" + raw = SocketIO(self, rawmode) + self._io_refs += 1 + if buffering is None: + buffering = -1 + if buffering < 0: + buffering = io.DEFAULT_BUFFER_SIZE + if buffering == 0: + if not binary: + raise ValueError("unbuffered streams must be binary") + return raw + if reading and writing: + buffer = io.BufferedRWPair(raw, raw, buffering) + elif reading: + buffer = io.BufferedReader(raw, buffering) + else: + assert writing + buffer = io.BufferedWriter(raw, buffering) + if binary: + return buffer + encoding = io.text_encoding(encoding) + text = io.TextIOWrapper(buffer, encoding, errors, newline) + text.mode = mode + return text + + if hasattr(os, 'sendfile'): + + def _sendfile_use_sendfile(self, file, offset=0, count=None): + # Lazy import to improve module import time + import selectors + + self._check_sendfile_params(file, offset, count) + sockno = self.fileno() + try: + fileno = file.fileno() + except (AttributeError, io.UnsupportedOperation) as err: + raise _GiveupOnSendfile(err) # not a regular file + try: + fsize = os.fstat(fileno).st_size + except OSError as err: + raise _GiveupOnSendfile(err) # not a regular file + if not fsize: + return 0 # empty file + # Truncate to 1GiB to avoid OverflowError, see bpo-38319. + blocksize = min(count or fsize, 2 ** 30) + timeout = self.gettimeout() + if timeout == 0: + raise ValueError("non-blocking sockets are not supported") + # poll/select have the advantage of not requiring any + # extra file descriptor, contrarily to epoll/kqueue + # (also, they require a single syscall). + if hasattr(selectors, 'PollSelector'): + selector = selectors.PollSelector() + else: + selector = selectors.SelectSelector() + selector.register(sockno, selectors.EVENT_WRITE) + + total_sent = 0 + # localize variable access to minimize overhead + selector_select = selector.select + os_sendfile = os.sendfile + try: + while True: + if timeout and not selector_select(timeout): + raise TimeoutError('timed out') + if count: + blocksize = min(count - total_sent, blocksize) + if blocksize <= 0: + break + try: + sent = os_sendfile(sockno, fileno, offset, blocksize) + except BlockingIOError: + if not timeout: + # Block until the socket is ready to send some + # data; avoids hogging CPU resources. + selector_select() + continue + except OSError as err: + if total_sent == 0: + # We can get here for different reasons, the main + # one being 'file' is not a regular mmap(2)-like + # file, in which case we'll fall back on using + # plain send(). + raise _GiveupOnSendfile(err) + raise err from None + else: + if sent == 0: + break # EOF + offset += sent + total_sent += sent + return total_sent + finally: + if total_sent > 0 and hasattr(file, 'seek'): + file.seek(offset) + else: + def _sendfile_use_sendfile(self, file, offset=0, count=None): + raise _GiveupOnSendfile( + "os.sendfile() not available on this platform") + + def _sendfile_use_send(self, file, offset=0, count=None): + self._check_sendfile_params(file, offset, count) + if self.gettimeout() == 0: + raise ValueError("non-blocking sockets are not supported") + if offset: + file.seek(offset) + blocksize = min(count, 8192) if count else 8192 + total_sent = 0 + # localize variable access to minimize overhead + file_read = file.read + sock_send = self.send + try: + while True: + if count: + blocksize = min(count - total_sent, blocksize) + if blocksize <= 0: + break + data = memoryview(file_read(blocksize)) + if not data: + break # EOF + while True: + try: + sent = sock_send(data) + except BlockingIOError: + continue + else: + total_sent += sent + if sent < len(data): + data = data[sent:] + else: + break + return total_sent + finally: + if total_sent > 0 and hasattr(file, 'seek'): + file.seek(offset + total_sent) + + def _check_sendfile_params(self, file, offset, count): + if 'b' not in getattr(file, 'mode', 'b'): + raise ValueError("file should be opened in binary mode") + if not self.type & SOCK_STREAM: + raise ValueError("only SOCK_STREAM type sockets are supported") + if count is not None: + if not isinstance(count, int): + raise TypeError( + "count must be a positive integer (got {!r})".format(count)) + if count <= 0: + raise ValueError( + "count must be a positive integer (got {!r})".format(count)) + + def sendfile(self, file, offset=0, count=None): + """sendfile(file[, offset[, count]]) -> sent + + Send a file until EOF is reached by using high-performance + os.sendfile() and return the total number of bytes which + were sent. + *file* must be a regular file object opened in binary mode. + If os.sendfile() is not available (e.g. Windows) or file is + not a regular file socket.send() will be used instead. + *offset* tells from where to start reading the file. + If specified, *count* is the total number of bytes to transmit + as opposed to sending the file until EOF is reached. + File position is updated on return or also in case of error in + which case file.tell() can be used to figure out the number of + bytes which were sent. + The socket must be of SOCK_STREAM type. + Non-blocking sockets are not supported. + """ + try: + return self._sendfile_use_sendfile(file, offset, count) + except _GiveupOnSendfile: + return self._sendfile_use_send(file, offset, count) + + def _decref_socketios(self): + if self._io_refs > 0: + self._io_refs -= 1 + if self._closed: + self.close() + + def _real_close(self, _ss=_socket.socket): + # This function should not reference any globals. See issue #808164. + _ss.close(self) + + def close(self): + # This function should not reference any globals. See issue #808164. + self._closed = True + if self._io_refs <= 0: + self._real_close() + + def detach(self): + """detach() -> file descriptor + + Close the socket object without closing the underlying file descriptor. + The object cannot be used after this call, but the file descriptor + can be reused for other purposes. The file descriptor is returned. + """ + self._closed = True + return super().detach() + + @property + def family(self): + """Read-only access to the address family for this socket. + """ + return _intenum_converter(super().family, AddressFamily) + + @property + def type(self): + """Read-only access to the socket type. + """ + return _intenum_converter(super().type, SocketKind) + + if os.name == 'nt': + def get_inheritable(self): + return os.get_handle_inheritable(self.fileno()) + def set_inheritable(self, inheritable): + os.set_handle_inheritable(self.fileno(), inheritable) + else: + def get_inheritable(self): + return os.get_inheritable(self.fileno()) + def set_inheritable(self, inheritable): + os.set_inheritable(self.fileno(), inheritable) + get_inheritable.__doc__ = "Get the inheritable flag of the socket" + set_inheritable.__doc__ = "Set the inheritable flag of the socket" + +def fromfd(fd, family, type, proto=0): + """ fromfd(fd, family, type[, proto]) -> socket object + + Create a socket object from a duplicate of the given file + descriptor. The remaining arguments are the same as for socket(). + """ + nfd = dup(fd) + return socket(family, type, proto, nfd) + +if hasattr(_socket.socket, "sendmsg"): + def send_fds(sock, buffers, fds, flags=0, address=None): + """ send_fds(sock, buffers, fds[, flags[, address]]) -> integer + + Send the list of file descriptors fds over an AF_UNIX socket. + """ + import array + + return sock.sendmsg(buffers, [(_socket.SOL_SOCKET, + _socket.SCM_RIGHTS, array.array("i", fds))]) + __all__.append("send_fds") + +if hasattr(_socket.socket, "recvmsg"): + def recv_fds(sock, bufsize, maxfds, flags=0): + """ recv_fds(sock, bufsize, maxfds[, flags]) -> (data, list of file + descriptors, msg_flags, address) + + Receive up to maxfds file descriptors returning the message + data and a list containing the descriptors. + """ + import array + + # Array of ints + fds = array.array("i") + msg, ancdata, flags, addr = sock.recvmsg(bufsize, + _socket.CMSG_LEN(maxfds * fds.itemsize)) + for cmsg_level, cmsg_type, cmsg_data in ancdata: + if (cmsg_level == _socket.SOL_SOCKET and cmsg_type == _socket.SCM_RIGHTS): + fds.frombytes(cmsg_data[: + len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) + + return msg, list(fds), flags, addr + __all__.append("recv_fds") + +if hasattr(_socket.socket, "share"): + def fromshare(info): + """ fromshare(info) -> socket object + + Create a socket object from the bytes object returned by + socket.share(pid). + """ + return socket(0, 0, 0, info) + __all__.append("fromshare") + +# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain. +# This is used if _socket doesn't natively provide socketpair. It's +# always defined so that it can be patched in for testing purposes. +def _fallback_socketpair(family=AF_INET, type=SOCK_STREAM, proto=0): + if family == AF_INET: + host = _LOCALHOST + elif family == AF_INET6: + host = _LOCALHOST_V6 + else: + raise ValueError("Only AF_INET and AF_INET6 socket address families " + "are supported") + if type != SOCK_STREAM: + raise ValueError("Only SOCK_STREAM socket type is supported") + if proto != 0: + raise ValueError("Only protocol zero is supported") + + # We create a connected TCP socket. Note the trick with + # setblocking(False) that prevents us from having to create a thread. + lsock = socket(family, type, proto) + try: + lsock.bind((host, 0)) + lsock.listen() + # On IPv6, ignore flow_info and scope_id + addr, port = lsock.getsockname()[:2] + csock = socket(family, type, proto) + try: + csock.setblocking(False) + try: + csock.connect((addr, port)) + except (BlockingIOError, InterruptedError): + pass + csock.setblocking(True) + ssock, _ = lsock.accept() + except: + csock.close() + raise + finally: + lsock.close() + + # Authenticating avoids using a connection from something else + # able to connect to {host}:{port} instead of us. + # We expect only AF_INET and AF_INET6 families. + try: + if ( + ssock.getsockname() != csock.getpeername() + or csock.getsockname() != ssock.getpeername() + ): + raise ConnectionError("Unexpected peer connection") + except: + # getsockname() and getpeername() can fail + # if either socket isn't connected. + ssock.close() + csock.close() + raise + + return (ssock, csock) + +if hasattr(_socket, "socketpair"): + def socketpair(family=None, type=SOCK_STREAM, proto=0): + if family is None: + try: + family = AF_UNIX + except NameError: + family = AF_INET + a, b = _socket.socketpair(family, type, proto) + a = socket(family, type, proto, a.detach()) + b = socket(family, type, proto, b.detach()) + return a, b + +else: + socketpair = _fallback_socketpair + __all__.append("socketpair") + +socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object) +Create a pair of socket objects from the sockets returned by the platform +socketpair() function. +The arguments are the same as for socket() except the default family is AF_UNIX +if defined on the platform; otherwise, the default is AF_INET. +""" + +_blocking_errnos = { EAGAIN, EWOULDBLOCK } + +class SocketIO(io.RawIOBase): + + """Raw I/O implementation for stream sockets. + + This class supports the makefile() method on sockets. It provides + the raw I/O interface on top of a socket object. + """ + + # One might wonder why not let FileIO do the job instead. There are two + # main reasons why FileIO is not adapted: + # - it wouldn't work under Windows (where you can't used read() and + # write() on a socket handle) + # - it wouldn't work with socket timeouts (FileIO would ignore the + # timeout and consider the socket non-blocking) + + # XXX More docs + + def __init__(self, sock, mode): + if mode not in ("r", "w", "rw", "rb", "wb", "rwb"): + raise ValueError("invalid mode: %r" % mode) + io.RawIOBase.__init__(self) + self._sock = sock + if "b" not in mode: + mode += "b" + self._mode = mode + self._reading = "r" in mode + self._writing = "w" in mode + self._timeout_occurred = False + + def readinto(self, b): + """Read up to len(b) bytes into the writable buffer *b* and return + the number of bytes read. If the socket is non-blocking and no bytes + are available, None is returned. + + If *b* is non-empty, a 0 return value indicates that the connection + was shutdown at the other end. + """ + self._checkClosed() + self._checkReadable() + if self._timeout_occurred: + raise OSError("cannot read from timed out object") + try: + return self._sock.recv_into(b) + except timeout: + self._timeout_occurred = True + raise + except error as e: + if e.errno in _blocking_errnos: + return None + raise + + def write(self, b): + """Write the given bytes or bytearray object *b* to the socket + and return the number of bytes written. This can be less than + len(b) if not all data could be written. If the socket is + non-blocking and no bytes could be written None is returned. + """ + self._checkClosed() + self._checkWritable() + try: + return self._sock.send(b) + except error as e: + # XXX what about EINTR? + if e.errno in _blocking_errnos: + return None + raise + + def readable(self): + """True if the SocketIO is open for reading. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return self._reading + + def writable(self): + """True if the SocketIO is open for writing. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return self._writing + + def seekable(self): + """True if the SocketIO is open for seeking. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return super().seekable() + + def fileno(self): + """Return the file descriptor of the underlying socket. + """ + self._checkClosed() + return self._sock.fileno() + + @property + def name(self): + if not self.closed: + return self.fileno() + else: + return -1 + + @property + def mode(self): + return self._mode + + def close(self): + """Close the SocketIO object. This doesn't close the underlying + socket, except if all references to it have disappeared. + """ + if self.closed: + return + io.RawIOBase.close(self) + self._sock._decref_socketios() + self._sock = None + + +def getfqdn(name=''): + """Get fully qualified domain name from name. + + An empty argument is interpreted as meaning the local host. + + First the hostname returned by gethostbyaddr() is checked, then + possibly existing aliases. In case no FQDN is available and `name` + was given, it is returned unchanged. If `name` was empty, '0.0.0.0' or '::', + hostname from gethostname() is returned. + """ + name = name.strip() + if not name or name in ('0.0.0.0', '::'): + name = gethostname() + try: + hostname, aliases, ipaddrs = gethostbyaddr(name) + except error: + pass + else: + aliases.insert(0, hostname) + for name in aliases: + if '.' in name: + break + else: + name = hostname + return name + + +_GLOBAL_DEFAULT_TIMEOUT = object() + +def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, + source_address=None, *, all_errors=False): + """Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. If *source_address* is set it must be a tuple of (host, port) + for the socket to bind as a source address before making the connection. + A host of '' or port 0 tells the OS to use the default. When a connection + cannot be created, raises the last error if *all_errors* is False, + and an ExceptionGroup of all errors if *all_errors* is True. + """ + + host, port = address + exceptions = [] + for res in getaddrinfo(host, port, 0, SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket(af, socktype, proto) + if timeout is not _GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if source_address: + sock.bind(source_address) + sock.connect(sa) + # Break explicitly a reference cycle + exceptions.clear() + return sock + + except error as exc: + if not all_errors: + exceptions.clear() # raise only the last error + exceptions.append(exc) + if sock is not None: + sock.close() + + if len(exceptions): + try: + if not all_errors: + raise exceptions[0] + raise ExceptionGroup("create_connection failed", exceptions) + finally: + # Break explicitly a reference cycle + exceptions.clear() + else: + raise error("getaddrinfo returns an empty list") + + +def has_dualstack_ipv6(): + """Return True if the platform supports creating a SOCK_STREAM socket + which can handle both AF_INET and AF_INET6 (IPv4 / IPv6) connections. + """ + if not has_ipv6 \ + or not hasattr(_socket, 'IPPROTO_IPV6') \ + or not hasattr(_socket, 'IPV6_V6ONLY'): + return False + try: + with socket(AF_INET6, SOCK_STREAM) as sock: + sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0) + return True + except error: + return False + + +def create_server(address, *, family=AF_INET, backlog=None, reuse_port=False, + dualstack_ipv6=False): + """Convenience function which creates a SOCK_STREAM type socket + bound to *address* (a 2-tuple (host, port)) and return the socket + object. + + *family* should be either AF_INET or AF_INET6. + *backlog* is the queue size passed to socket.listen(). + *reuse_port* dictates whether to use the SO_REUSEPORT socket option. + *dualstack_ipv6*: if true and the platform supports it, it will + create an AF_INET6 socket able to accept both IPv4 or IPv6 + connections. When false it will explicitly disable this option on + platforms that enable it by default (e.g. Linux). + + >>> with create_server(('', 8000)) as server: + ... while True: + ... conn, addr = server.accept() + ... # handle new connection + """ + if reuse_port and not hasattr(_socket, "SO_REUSEPORT"): + raise ValueError("SO_REUSEPORT not supported on this platform") + if dualstack_ipv6: + if not has_dualstack_ipv6(): + raise ValueError("dualstack_ipv6 not supported on this platform") + if family != AF_INET6: + raise ValueError("dualstack_ipv6 requires AF_INET6 family") + sock = socket(family, SOCK_STREAM) + try: + # Note about Windows. We don't set SO_REUSEADDR because: + # 1) It's unnecessary: bind() will succeed even in case of a + # previous closed socket on the same address and still in + # TIME_WAIT state. + # 2) If set, another socket is free to bind() on the same + # address, effectively preventing this one from accepting + # connections. Also, it may set the process in a state where + # it'll no longer respond to any signals or graceful kills. + # See: https://learn.microsoft.com/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse + if os.name not in ('nt', 'cygwin') and \ + hasattr(_socket, 'SO_REUSEADDR'): + try: + sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) + except error: + # Fail later on bind(), for platforms which may not + # support this option. + pass + # Since Linux 6.12.9, SO_REUSEPORT is not allowed + # on other address families than AF_INET/AF_INET6. + if reuse_port and family in (AF_INET, AF_INET6): + sock.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1) + if has_ipv6 and family == AF_INET6: + if dualstack_ipv6: + sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0) + elif hasattr(_socket, "IPV6_V6ONLY") and \ + hasattr(_socket, "IPPROTO_IPV6"): + sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1) + try: + sock.bind(address) + except error as err: + msg = '%s (while attempting to bind on address %r)' % \ + (err.strerror, address) + raise error(err.errno, msg) from None + if backlog is None: + sock.listen() + else: + sock.listen(backlog) + return sock + except error: + sock.close() + raise + + +def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): + """Resolve host and port into list of address info entries. + + Translate the host/port argument into a sequence of 5-tuples that contain + all the necessary arguments for creating a socket connected to that service. + host is a domain name, a string representation of an IPv4/v6 address or + None. port is a string service name such as 'http', a numeric port number or + None. By passing None as the value of host and port, you can pass NULL to + the underlying C API. + + The family, type and proto arguments can be optionally specified in order to + narrow the list of addresses returned. Passing zero as a value for each of + these arguments selects the full range of results. + """ + # We override this function since we want to translate the numeric family + # and socket type values to enum constants. + addrlist = [] + for res in _socket.getaddrinfo(host, port, family, type, proto, flags): + af, socktype, proto, canonname, sa = res + addrlist.append((_intenum_converter(af, AddressFamily), + _intenum_converter(socktype, SocketKind), + proto, canonname, sa)) + return addrlist diff --git a/Python313_13_x86_Template/Lib/socketserver.py b/Python314_4_x86_Template/Lib/socketserver.py similarity index 100% rename from Python313_13_x86_Template/Lib/socketserver.py rename to Python314_4_x86_Template/Lib/socketserver.py diff --git a/Python314_4_x86_Template/Lib/sqlite3/__init__.py b/Python314_4_x86_Template/Lib/sqlite3/__init__.py new file mode 100644 index 00000000..ed727fae --- /dev/null +++ b/Python314_4_x86_Template/Lib/sqlite3/__init__.py @@ -0,0 +1,57 @@ +# pysqlite2/__init__.py: the pysqlite2 package. +# +# Copyright (C) 2005 Gerhard Häring +# +# This file is part of pysqlite. +# +# This software is provided 'as-is', without any express or implied +# warranty. In no event will the authors be held liable for any damages +# arising from the use of this software. +# +# Permission is granted to anyone to use this software for any purpose, +# including commercial applications, and to alter it and redistribute it +# freely, subject to the following restrictions: +# +# 1. The origin of this software must not be misrepresented; you must not +# claim that you wrote the original software. If you use this software +# in a product, an acknowledgment in the product documentation would be +# appreciated but is not required. +# 2. Altered source versions must be plainly marked as such, and must not be +# misrepresented as being the original software. +# 3. This notice may not be removed or altered from any source distribution. + +""" +The sqlite3 extension module provides a DB-API 2.0 (PEP 249) compliant +interface to the SQLite library, and requires SQLite 3.15.2 or newer. + +To use the module, start by creating a database Connection object: + + import sqlite3 + cx = sqlite3.connect("test.db") # test.db will be created or opened + +The special path name ":memory:" can be provided to connect to a transient +in-memory database: + + cx = sqlite3.connect(":memory:") # connect to a database in RAM + +Once a connection has been established, create a Cursor object and call +its execute() method to perform SQL queries: + + cu = cx.cursor() + + # create a table + cu.execute("create table lang(name, first_appeared)") + + # insert values into a table + cu.execute("insert into lang values (?, ?)", ("C", 1972)) + + # execute a query and iterate over the result + for row in cu.execute("select * from lang"): + print(row) + + cx.close() + +The sqlite3 module is written by Gerhard Häring . +""" + +from sqlite3.dbapi2 import * diff --git a/Python314_4_x86_Template/Lib/sqlite3/__main__.py b/Python314_4_x86_Template/Lib/sqlite3/__main__.py new file mode 100644 index 00000000..4ccf292d --- /dev/null +++ b/Python314_4_x86_Template/Lib/sqlite3/__main__.py @@ -0,0 +1,139 @@ +"""A simple SQLite CLI for the sqlite3 module. + +Apart from using 'argparse' for the command-line interface, +this module implements the REPL as a thin wrapper around +the InteractiveConsole class from the 'code' stdlib module. +""" +import sqlite3 +import sys + +from argparse import ArgumentParser +from code import InteractiveConsole +from textwrap import dedent + + +def execute(c, sql, suppress_errors=True): + """Helper that wraps execution of SQL code. + + This is used both by the REPL and by direct execution from the CLI. + + 'c' may be a cursor or a connection. + 'sql' is the SQL string to execute. + """ + + try: + for row in c.execute(sql): + print(row) + except sqlite3.Error as e: + tp = type(e).__name__ + try: + print(f"{tp} ({e.sqlite_errorname}): {e}", file=sys.stderr) + except AttributeError: + print(f"{tp}: {e}", file=sys.stderr) + if not suppress_errors: + sys.exit(1) + + +class SqliteInteractiveConsole(InteractiveConsole): + """A simple SQLite REPL.""" + + def __init__(self, connection): + super().__init__() + self._con = connection + self._cur = connection.cursor() + + def runsource(self, source, filename="", symbol="single"): + """Override runsource, the core of the InteractiveConsole REPL. + + Return True if more input is needed; buffering is done automatically. + Return False if input is a complete statement ready for execution. + """ + if not source or source.isspace(): + return False + if source[0] == ".": + match source[1:].strip(): + case "version": + print(f"{sqlite3.sqlite_version}") + case "help": + print("Enter SQL code and press enter.") + case "quit": + sys.exit(0) + case "": + pass + case _ as unknown: + self.write("Error: unknown command or invalid arguments:" + f' "{unknown}".\n') + else: + if not sqlite3.complete_statement(source): + return True + execute(self._cur, source) + return False + + +def main(*args): + parser = ArgumentParser( + description="Python sqlite3 CLI", + color=True, + ) + parser.add_argument( + "filename", type=str, default=":memory:", nargs="?", + help=( + "SQLite database to open (defaults to ':memory:'). " + "A new database is created if the file does not previously exist." + ), + ) + parser.add_argument( + "sql", type=str, nargs="?", + help=( + "An SQL query to execute. " + "Any returned rows are printed to stdout." + ), + ) + parser.add_argument( + "-v", "--version", action="version", + version=f"SQLite version {sqlite3.sqlite_version}", + help="Print underlying SQLite library version", + ) + args = parser.parse_args(*args) + + if args.filename == ":memory:": + db_name = "a transient in-memory database" + else: + db_name = repr(args.filename) + + # Prepare REPL banner and prompts. + if sys.platform == "win32" and "idlelib.run" not in sys.modules: + eofkey = "CTRL-Z" + else: + eofkey = "CTRL-D" + banner = dedent(f""" + sqlite3 shell, running on SQLite version {sqlite3.sqlite_version} + Connected to {db_name} + + Each command will be run using execute() on the cursor. + Type ".help" for more information; type ".quit" or {eofkey} to quit. + """).strip() + sys.ps1 = "sqlite> " + sys.ps2 = " ... " + + con = sqlite3.connect(args.filename, isolation_level=None) + try: + if args.sql: + # SQL statement provided on the command-line; execute it directly. + execute(con, args.sql, suppress_errors=False) + else: + # No SQL provided; start the REPL. + console = SqliteInteractiveConsole(con) + try: + import readline # noqa: F401 + except ImportError: + pass + console.interact(banner, exitmsg="") + finally: + con.close() + + sys.exit(0) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/Python314_4_x86_Template/Lib/sqlite3/dbapi2.py b/Python314_4_x86_Template/Lib/sqlite3/dbapi2.py new file mode 100644 index 00000000..03157605 --- /dev/null +++ b/Python314_4_x86_Template/Lib/sqlite3/dbapi2.py @@ -0,0 +1,96 @@ +# pysqlite2/dbapi2.py: the DB-API 2.0 interface +# +# Copyright (C) 2004-2005 Gerhard Häring +# +# This file is part of pysqlite. +# +# This software is provided 'as-is', without any express or implied +# warranty. In no event will the authors be held liable for any damages +# arising from the use of this software. +# +# Permission is granted to anyone to use this software for any purpose, +# including commercial applications, and to alter it and redistribute it +# freely, subject to the following restrictions: +# +# 1. The origin of this software must not be misrepresented; you must not +# claim that you wrote the original software. If you use this software +# in a product, an acknowledgment in the product documentation would be +# appreciated but is not required. +# 2. Altered source versions must be plainly marked as such, and must not be +# misrepresented as being the original software. +# 3. This notice may not be removed or altered from any source distribution. + +import datetime +import time +import collections.abc + +from _sqlite3 import * + +paramstyle = "qmark" + +apilevel = "2.0" + +Date = datetime.date + +Time = datetime.time + +Timestamp = datetime.datetime + +def DateFromTicks(ticks): + return Date(*time.localtime(ticks)[:3]) + +def TimeFromTicks(ticks): + return Time(*time.localtime(ticks)[3:6]) + +def TimestampFromTicks(ticks): + return Timestamp(*time.localtime(ticks)[:6]) + + +sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")]) + +Binary = memoryview +collections.abc.Sequence.register(Row) + +def register_adapters_and_converters(): + from warnings import warn + + msg = ("The default {what} is deprecated as of Python 3.12; " + "see the sqlite3 documentation for suggested replacement recipes") + + def adapt_date(val): + warn(msg.format(what="date adapter"), DeprecationWarning, stacklevel=2) + return val.isoformat() + + def adapt_datetime(val): + warn(msg.format(what="datetime adapter"), DeprecationWarning, stacklevel=2) + return val.isoformat(" ") + + def convert_date(val): + warn(msg.format(what="date converter"), DeprecationWarning, stacklevel=2) + return datetime.date(*map(int, val.split(b"-"))) + + def convert_timestamp(val): + warn(msg.format(what="timestamp converter"), DeprecationWarning, stacklevel=2) + datepart, timepart = val.split(b" ") + year, month, day = map(int, datepart.split(b"-")) + timepart_full = timepart.split(b".") + hours, minutes, seconds = map(int, timepart_full[0].split(b":")) + if len(timepart_full) == 2: + microseconds = int('{:0<6.6}'.format(timepart_full[1].decode())) + else: + microseconds = 0 + + val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds) + return val + + + register_adapter(datetime.date, adapt_date) + register_adapter(datetime.datetime, adapt_datetime) + register_converter("date", convert_date) + register_converter("timestamp", convert_timestamp) + +register_adapters_and_converters() + +# Clean up namespace + +del(register_adapters_and_converters) diff --git a/Python313_13_x86_Template/Lib/sqlite3/dump.py b/Python314_4_x86_Template/Lib/sqlite3/dump.py similarity index 100% rename from Python313_13_x86_Template/Lib/sqlite3/dump.py rename to Python314_4_x86_Template/Lib/sqlite3/dump.py diff --git a/Python313_13_x86_Template/Lib/sre_compile.py b/Python314_4_x86_Template/Lib/sre_compile.py similarity index 100% rename from Python313_13_x86_Template/Lib/sre_compile.py rename to Python314_4_x86_Template/Lib/sre_compile.py diff --git a/Python313_13_x86_Template/Lib/sre_constants.py b/Python314_4_x86_Template/Lib/sre_constants.py similarity index 100% rename from Python313_13_x86_Template/Lib/sre_constants.py rename to Python314_4_x86_Template/Lib/sre_constants.py diff --git a/Python313_13_x86_Template/Lib/sre_parse.py b/Python314_4_x86_Template/Lib/sre_parse.py similarity index 100% rename from Python313_13_x86_Template/Lib/sre_parse.py rename to Python314_4_x86_Template/Lib/sre_parse.py diff --git a/Python314_4_x86_Template/Lib/ssl.py b/Python314_4_x86_Template/Lib/ssl.py new file mode 100644 index 00000000..8889aff9 --- /dev/null +++ b/Python314_4_x86_Template/Lib/ssl.py @@ -0,0 +1,1529 @@ +# Wrapper module for _ssl, providing some additional facilities +# implemented in Python. Written by Bill Janssen. + +"""This module provides some more Pythonic support for SSL. + +Object types: + + SSLSocket -- subtype of socket.socket which does SSL over the socket + +Exceptions: + + SSLError -- exception raised for I/O errors + +Functions: + + cert_time_to_seconds -- convert time string used for certificate + notBefore and notAfter functions to integer + seconds past the Epoch (the time values + returned from time.time()) + + get_server_certificate (addr, ssl_version, ca_certs, timeout) -- Retrieve the + certificate from the server at the specified + address and return it as a PEM-encoded string + + +Integer constants: + +SSL_ERROR_ZERO_RETURN +SSL_ERROR_WANT_READ +SSL_ERROR_WANT_WRITE +SSL_ERROR_WANT_X509_LOOKUP +SSL_ERROR_SYSCALL +SSL_ERROR_SSL +SSL_ERROR_WANT_CONNECT + +SSL_ERROR_EOF +SSL_ERROR_INVALID_ERROR_CODE + +The following group define certificate requirements that one side is +allowing/requiring from the other side: + +CERT_NONE - no certificates from the other side are required (or will + be looked at if provided) +CERT_OPTIONAL - certificates are not required, but if provided will be + validated, and if validation fails, the connection will + also fail +CERT_REQUIRED - certificates are required, and will be validated, and + if validation fails, the connection will also fail + +The following constants identify various SSL protocol variants: + +PROTOCOL_SSLv2 +PROTOCOL_SSLv3 +PROTOCOL_SSLv23 +PROTOCOL_TLS +PROTOCOL_TLS_CLIENT +PROTOCOL_TLS_SERVER +PROTOCOL_TLSv1 +PROTOCOL_TLSv1_1 +PROTOCOL_TLSv1_2 + +The following constants identify various SSL alert message descriptions as per +http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-6 + +ALERT_DESCRIPTION_CLOSE_NOTIFY +ALERT_DESCRIPTION_UNEXPECTED_MESSAGE +ALERT_DESCRIPTION_BAD_RECORD_MAC +ALERT_DESCRIPTION_RECORD_OVERFLOW +ALERT_DESCRIPTION_DECOMPRESSION_FAILURE +ALERT_DESCRIPTION_HANDSHAKE_FAILURE +ALERT_DESCRIPTION_BAD_CERTIFICATE +ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE +ALERT_DESCRIPTION_CERTIFICATE_REVOKED +ALERT_DESCRIPTION_CERTIFICATE_EXPIRED +ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN +ALERT_DESCRIPTION_ILLEGAL_PARAMETER +ALERT_DESCRIPTION_UNKNOWN_CA +ALERT_DESCRIPTION_ACCESS_DENIED +ALERT_DESCRIPTION_DECODE_ERROR +ALERT_DESCRIPTION_DECRYPT_ERROR +ALERT_DESCRIPTION_PROTOCOL_VERSION +ALERT_DESCRIPTION_INSUFFICIENT_SECURITY +ALERT_DESCRIPTION_INTERNAL_ERROR +ALERT_DESCRIPTION_USER_CANCELLED +ALERT_DESCRIPTION_NO_RENEGOTIATION +ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION +ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +ALERT_DESCRIPTION_UNRECOGNIZED_NAME +ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE +ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE +ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY +""" + +import sys +import os +from collections import namedtuple +from enum import Enum as _Enum, IntEnum as _IntEnum, IntFlag as _IntFlag +from enum import _simple_enum + +import _ssl # if we can't import it, let the error propagate + +from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION +from _ssl import _SSLContext, MemoryBIO, SSLSession +from _ssl import ( + SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError, + SSLSyscallError, SSLEOFError, SSLCertVerificationError + ) +from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj +from _ssl import RAND_status, RAND_add, RAND_bytes +try: + from _ssl import RAND_egd +except ImportError: + # RAND_egd is not supported on some platforms + pass + + +from _ssl import ( + HAS_SNI, HAS_ECDH, HAS_NPN, HAS_ALPN, HAS_SSLv2, HAS_SSLv3, HAS_TLSv1, + HAS_TLSv1_1, HAS_TLSv1_2, HAS_TLSv1_3, HAS_PSK, HAS_PHA +) +from _ssl import _DEFAULT_CIPHERS, _OPENSSL_API_VERSION + +_IntEnum._convert_( + '_SSLMethod', __name__, + lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23', + source=_ssl) + +_IntFlag._convert_( + 'Options', __name__, + lambda name: name.startswith('OP_'), + source=_ssl) + +_IntEnum._convert_( + 'AlertDescription', __name__, + lambda name: name.startswith('ALERT_DESCRIPTION_'), + source=_ssl) + +_IntEnum._convert_( + 'SSLErrorNumber', __name__, + lambda name: name.startswith('SSL_ERROR_'), + source=_ssl) + +_IntFlag._convert_( + 'VerifyFlags', __name__, + lambda name: name.startswith('VERIFY_'), + source=_ssl) + +_IntEnum._convert_( + 'VerifyMode', __name__, + lambda name: name.startswith('CERT_'), + source=_ssl) + +PROTOCOL_SSLv23 = _SSLMethod.PROTOCOL_SSLv23 = _SSLMethod.PROTOCOL_TLS +_PROTOCOL_NAMES = {value: name for name, value in _SSLMethod.__members__.items()} + +_SSLv2_IF_EXISTS = getattr(_SSLMethod, 'PROTOCOL_SSLv2', None) + + +@_simple_enum(_IntEnum) +class TLSVersion: + MINIMUM_SUPPORTED = _ssl.PROTO_MINIMUM_SUPPORTED + SSLv3 = _ssl.PROTO_SSLv3 + TLSv1 = _ssl.PROTO_TLSv1 + TLSv1_1 = _ssl.PROTO_TLSv1_1 + TLSv1_2 = _ssl.PROTO_TLSv1_2 + TLSv1_3 = _ssl.PROTO_TLSv1_3 + MAXIMUM_SUPPORTED = _ssl.PROTO_MAXIMUM_SUPPORTED + + +@_simple_enum(_IntEnum) +class _TLSContentType: + """Content types (record layer) + + See RFC 8446, section B.1 + """ + CHANGE_CIPHER_SPEC = 20 + ALERT = 21 + HANDSHAKE = 22 + APPLICATION_DATA = 23 + # pseudo content types + HEADER = 0x100 + INNER_CONTENT_TYPE = 0x101 + + +@_simple_enum(_IntEnum) +class _TLSAlertType: + """Alert types for TLSContentType.ALERT messages + + See RFC 8446, section B.2 + """ + CLOSE_NOTIFY = 0 + UNEXPECTED_MESSAGE = 10 + BAD_RECORD_MAC = 20 + DECRYPTION_FAILED = 21 + RECORD_OVERFLOW = 22 + DECOMPRESSION_FAILURE = 30 + HANDSHAKE_FAILURE = 40 + NO_CERTIFICATE = 41 + BAD_CERTIFICATE = 42 + UNSUPPORTED_CERTIFICATE = 43 + CERTIFICATE_REVOKED = 44 + CERTIFICATE_EXPIRED = 45 + CERTIFICATE_UNKNOWN = 46 + ILLEGAL_PARAMETER = 47 + UNKNOWN_CA = 48 + ACCESS_DENIED = 49 + DECODE_ERROR = 50 + DECRYPT_ERROR = 51 + EXPORT_RESTRICTION = 60 + PROTOCOL_VERSION = 70 + INSUFFICIENT_SECURITY = 71 + INTERNAL_ERROR = 80 + INAPPROPRIATE_FALLBACK = 86 + USER_CANCELED = 90 + NO_RENEGOTIATION = 100 + MISSING_EXTENSION = 109 + UNSUPPORTED_EXTENSION = 110 + CERTIFICATE_UNOBTAINABLE = 111 + UNRECOGNIZED_NAME = 112 + BAD_CERTIFICATE_STATUS_RESPONSE = 113 + BAD_CERTIFICATE_HASH_VALUE = 114 + UNKNOWN_PSK_IDENTITY = 115 + CERTIFICATE_REQUIRED = 116 + NO_APPLICATION_PROTOCOL = 120 + + +@_simple_enum(_IntEnum) +class _TLSMessageType: + """Message types (handshake protocol) + + See RFC 8446, section B.3 + """ + HELLO_REQUEST = 0 + CLIENT_HELLO = 1 + SERVER_HELLO = 2 + HELLO_VERIFY_REQUEST = 3 + NEWSESSION_TICKET = 4 + END_OF_EARLY_DATA = 5 + HELLO_RETRY_REQUEST = 6 + ENCRYPTED_EXTENSIONS = 8 + CERTIFICATE = 11 + SERVER_KEY_EXCHANGE = 12 + CERTIFICATE_REQUEST = 13 + SERVER_DONE = 14 + CERTIFICATE_VERIFY = 15 + CLIENT_KEY_EXCHANGE = 16 + FINISHED = 20 + CERTIFICATE_URL = 21 + CERTIFICATE_STATUS = 22 + SUPPLEMENTAL_DATA = 23 + KEY_UPDATE = 24 + NEXT_PROTO = 67 + MESSAGE_HASH = 254 + CHANGE_CIPHER_SPEC = 0x0101 + + +if sys.platform == "win32": + from _ssl import enum_certificates, enum_crls + +from socket import socket, SOCK_STREAM, create_connection +from socket import SOL_SOCKET, SO_TYPE, _GLOBAL_DEFAULT_TIMEOUT +import socket as _socket +import base64 # for DER-to-PEM translation +import errno +import warnings + + +socket_error = OSError # keep that public name in module namespace + +CHANNEL_BINDING_TYPES = ['tls-unique'] + +HAS_NEVER_CHECK_COMMON_NAME = hasattr(_ssl, 'HOSTFLAG_NEVER_CHECK_SUBJECT') + + +_RESTRICTED_SERVER_CIPHERS = _DEFAULT_CIPHERS + +CertificateError = SSLCertVerificationError + + +def _dnsname_match(dn, hostname): + """Matching according to RFC 6125, section 6.4.3 + + - Hostnames are compared lower-case. + - For IDNA, both dn and hostname must be encoded as IDN A-label (ACE). + - Partial wildcards like 'www*.example.org', multiple wildcards, sole + wildcard or wildcards in labels other then the left-most label are not + supported and a CertificateError is raised. + - A wildcard must match at least one character. + """ + if not dn: + return False + + wildcards = dn.count('*') + # speed up common case w/o wildcards + if not wildcards: + return dn.lower() == hostname.lower() + + if wildcards > 1: + raise CertificateError( + "too many wildcards in certificate DNS name: {!r}.".format(dn)) + + dn_leftmost, sep, dn_remainder = dn.partition('.') + + if '*' in dn_remainder: + # Only match wildcard in leftmost segment. + raise CertificateError( + "wildcard can only be present in the leftmost label: " + "{!r}.".format(dn)) + + if not sep: + # no right side + raise CertificateError( + "sole wildcard without additional labels are not support: " + "{!r}.".format(dn)) + + if dn_leftmost != '*': + # no partial wildcard matching + raise CertificateError( + "partial wildcards in leftmost label are not supported: " + "{!r}.".format(dn)) + + hostname_leftmost, sep, hostname_remainder = hostname.partition('.') + if not hostname_leftmost or not sep: + # wildcard must match at least one char + return False + return dn_remainder.lower() == hostname_remainder.lower() + + +def _inet_paton(ipname): + """Try to convert an IP address to packed binary form + + Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6 + support. + """ + # inet_aton() also accepts strings like '1', '127.1', some also trailing + # data like '127.0.0.1 whatever'. + try: + addr = _socket.inet_aton(ipname) + except OSError: + # not an IPv4 address + pass + else: + if _socket.inet_ntoa(addr) == ipname: + # only accept injective ipnames + return addr + else: + # refuse for short IPv4 notation and additional trailing data + raise ValueError( + "{!r} is not a quad-dotted IPv4 address.".format(ipname) + ) + + try: + return _socket.inet_pton(_socket.AF_INET6, ipname) + except OSError: + raise ValueError("{!r} is neither an IPv4 nor an IP6 " + "address.".format(ipname)) + except AttributeError: + # AF_INET6 not available + pass + + raise ValueError("{!r} is not an IPv4 address.".format(ipname)) + + +def _ipaddress_match(cert_ipaddress, host_ip): + """Exact matching of IP addresses. + + RFC 6125 explicitly doesn't define an algorithm for this + (section 1.7.2 - "Out of Scope"). + """ + # OpenSSL may add a trailing newline to a subjectAltName's IP address, + # commonly with IPv6 addresses. Strip off trailing \n. + ip = _inet_paton(cert_ipaddress.rstrip()) + return ip == host_ip + + +DefaultVerifyPaths = namedtuple("DefaultVerifyPaths", + "cafile capath openssl_cafile_env openssl_cafile openssl_capath_env " + "openssl_capath") + +def get_default_verify_paths(): + """Return paths to default cafile and capath. + """ + parts = _ssl.get_default_verify_paths() + + # environment vars shadow paths + cafile = os.environ.get(parts[0], parts[1]) + capath = os.environ.get(parts[2], parts[3]) + + return DefaultVerifyPaths(cafile if os.path.isfile(cafile) else None, + capath if os.path.isdir(capath) else None, + *parts) + + +class _ASN1Object(namedtuple("_ASN1Object", "nid shortname longname oid")): + """ASN.1 object identifier lookup + """ + __slots__ = () + + def __new__(cls, oid): + return super().__new__(cls, *_txt2obj(oid, name=False)) + + @classmethod + def fromnid(cls, nid): + """Create _ASN1Object from OpenSSL numeric ID + """ + return super().__new__(cls, *_nid2obj(nid)) + + @classmethod + def fromname(cls, name): + """Create _ASN1Object from short name, long name or OID + """ + return super().__new__(cls, *_txt2obj(name, name=True)) + + +class Purpose(_ASN1Object, _Enum): + """SSLContext purpose flags with X509v3 Extended Key Usage objects + """ + SERVER_AUTH = '1.3.6.1.5.5.7.3.1' + CLIENT_AUTH = '1.3.6.1.5.5.7.3.2' + + +class SSLContext(_SSLContext): + """An SSLContext holds various SSL-related configuration options and + data, such as certificates and possibly a private key.""" + _windows_cert_stores = ("CA", "ROOT") + + sslsocket_class = None # SSLSocket is assigned later. + sslobject_class = None # SSLObject is assigned later. + + def __new__(cls, protocol=None, *args, **kwargs): + if protocol is None: + warnings.warn( + "ssl.SSLContext() without protocol argument is deprecated.", + category=DeprecationWarning, + stacklevel=2 + ) + protocol = PROTOCOL_TLS + self = _SSLContext.__new__(cls, protocol) + return self + + def _encode_hostname(self, hostname): + if hostname is None: + return None + elif isinstance(hostname, str): + return hostname.encode('idna').decode('ascii') + else: + return hostname.decode('ascii') + + def wrap_socket(self, sock, server_side=False, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, + server_hostname=None, session=None): + # SSLSocket class handles server_hostname encoding before it calls + # ctx._wrap_socket() + return self.sslsocket_class._create( + sock=sock, + server_side=server_side, + do_handshake_on_connect=do_handshake_on_connect, + suppress_ragged_eofs=suppress_ragged_eofs, + server_hostname=server_hostname, + context=self, + session=session + ) + + def wrap_bio(self, incoming, outgoing, server_side=False, + server_hostname=None, session=None): + # Need to encode server_hostname here because _wrap_bio() can only + # handle ASCII str. + return self.sslobject_class._create( + incoming, outgoing, server_side=server_side, + server_hostname=self._encode_hostname(server_hostname), + session=session, context=self, + ) + + def set_npn_protocols(self, npn_protocols): + warnings.warn( + "ssl NPN is deprecated, use ALPN instead", + DeprecationWarning, + stacklevel=2 + ) + protos = bytearray() + for protocol in npn_protocols: + b = bytes(protocol, 'ascii') + if len(b) == 0 or len(b) > 255: + raise SSLError('NPN protocols must be 1 to 255 in length') + protos.append(len(b)) + protos.extend(b) + + self._set_npn_protocols(protos) + + def set_servername_callback(self, server_name_callback): + if server_name_callback is None: + self.sni_callback = None + else: + if not callable(server_name_callback): + raise TypeError("not a callable object") + + def shim_cb(sslobj, servername, sslctx): + servername = self._encode_hostname(servername) + return server_name_callback(sslobj, servername, sslctx) + + self.sni_callback = shim_cb + + def set_alpn_protocols(self, alpn_protocols): + protos = bytearray() + for protocol in alpn_protocols: + b = bytes(protocol, 'ascii') + if len(b) == 0 or len(b) > 255: + raise SSLError('ALPN protocols must be 1 to 255 in length') + protos.append(len(b)) + protos.extend(b) + + self._set_alpn_protocols(protos) + + def _load_windows_store_certs(self, storename, purpose): + try: + for cert, encoding, trust in enum_certificates(storename): + # CA certs are never PKCS#7 encoded + if encoding == "x509_asn": + if trust is True or purpose.oid in trust: + try: + self.load_verify_locations(cadata=cert) + except SSLError as exc: + warnings.warn(f"Bad certificate in Windows certificate store: {exc!s}") + except PermissionError: + warnings.warn("unable to enumerate Windows certificate store") + + def load_default_certs(self, purpose=Purpose.SERVER_AUTH): + if not isinstance(purpose, _ASN1Object): + raise TypeError(purpose) + if sys.platform == "win32": + for storename in self._windows_cert_stores: + self._load_windows_store_certs(storename, purpose) + self.set_default_verify_paths() + + if hasattr(_SSLContext, 'minimum_version'): + @property + def minimum_version(self): + return TLSVersion(super().minimum_version) + + @minimum_version.setter + def minimum_version(self, value): + if value == TLSVersion.SSLv3: + self.options &= ~Options.OP_NO_SSLv3 + super(SSLContext, SSLContext).minimum_version.__set__(self, value) + + @property + def maximum_version(self): + return TLSVersion(super().maximum_version) + + @maximum_version.setter + def maximum_version(self, value): + super(SSLContext, SSLContext).maximum_version.__set__(self, value) + + @property + def options(self): + return Options(super().options) + + @options.setter + def options(self, value): + super(SSLContext, SSLContext).options.__set__(self, value) + + if hasattr(_ssl, 'HOSTFLAG_NEVER_CHECK_SUBJECT'): + @property + def hostname_checks_common_name(self): + ncs = self._host_flags & _ssl.HOSTFLAG_NEVER_CHECK_SUBJECT + return ncs != _ssl.HOSTFLAG_NEVER_CHECK_SUBJECT + + @hostname_checks_common_name.setter + def hostname_checks_common_name(self, value): + if value: + self._host_flags &= ~_ssl.HOSTFLAG_NEVER_CHECK_SUBJECT + else: + self._host_flags |= _ssl.HOSTFLAG_NEVER_CHECK_SUBJECT + else: + @property + def hostname_checks_common_name(self): + return True + + @property + def _msg_callback(self): + """TLS message callback + + The message callback provides a debugging hook to analyze TLS + connections. The callback is called for any TLS protocol message + (header, handshake, alert, and more), but not for application data. + Due to technical limitations, the callback can't be used to filter + traffic or to abort a connection. Any exception raised in the + callback is delayed until the handshake, read, or write operation + has been performed. + + def msg_cb(conn, direction, version, content_type, msg_type, data): + pass + + conn + :class:`SSLSocket` or :class:`SSLObject` instance + direction + ``read`` or ``write`` + version + :class:`TLSVersion` enum member or int for unknown version. For a + frame header, it's the header version. + content_type + :class:`_TLSContentType` enum member or int for unsupported + content type. + msg_type + Either a :class:`_TLSContentType` enum number for a header + message, a :class:`_TLSAlertType` enum member for an alert + message, a :class:`_TLSMessageType` enum member for other + messages, or int for unsupported message types. + data + Raw, decrypted message content as bytes + """ + inner = super()._msg_callback + if inner is not None: + return inner.user_function + else: + return None + + @_msg_callback.setter + def _msg_callback(self, callback): + if callback is None: + super(SSLContext, SSLContext)._msg_callback.__set__(self, None) + return + + if not hasattr(callback, '__call__'): + raise TypeError(f"{callback} is not callable.") + + def inner(conn, direction, version, content_type, msg_type, data): + try: + version = TLSVersion(version) + except ValueError: + pass + + try: + content_type = _TLSContentType(content_type) + except ValueError: + pass + + if content_type == _TLSContentType.HEADER: + msg_enum = _TLSContentType + elif content_type == _TLSContentType.ALERT: + msg_enum = _TLSAlertType + else: + msg_enum = _TLSMessageType + try: + msg_type = msg_enum(msg_type) + except ValueError: + pass + + return callback(conn, direction, version, + content_type, msg_type, data) + + inner.user_function = callback + + super(SSLContext, SSLContext)._msg_callback.__set__(self, inner) + + @property + def protocol(self): + return _SSLMethod(super().protocol) + + @property + def verify_flags(self): + return VerifyFlags(super().verify_flags) + + @verify_flags.setter + def verify_flags(self, value): + super(SSLContext, SSLContext).verify_flags.__set__(self, value) + + @property + def verify_mode(self): + value = super().verify_mode + try: + return VerifyMode(value) + except ValueError: + return value + + @verify_mode.setter + def verify_mode(self, value): + super(SSLContext, SSLContext).verify_mode.__set__(self, value) + + +def create_default_context(purpose=Purpose.SERVER_AUTH, *, cafile=None, + capath=None, cadata=None): + """Create a SSLContext object with default settings. + + NOTE: The protocol and settings may change anytime without prior + deprecation. The values represent a fair balance between maximum + compatibility and security. + """ + if not isinstance(purpose, _ASN1Object): + raise TypeError(purpose) + + # SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION, + # OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE + # by default. + if purpose == Purpose.SERVER_AUTH: + # verify certs and host name in client mode + context = SSLContext(PROTOCOL_TLS_CLIENT) + context.verify_mode = CERT_REQUIRED + context.check_hostname = True + elif purpose == Purpose.CLIENT_AUTH: + context = SSLContext(PROTOCOL_TLS_SERVER) + else: + raise ValueError(purpose) + + # `VERIFY_X509_PARTIAL_CHAIN` makes OpenSSL's chain building behave more + # like RFC 3280 and 5280, which specify that chain building stops with the + # first trust anchor, even if that anchor is not self-signed. + # + # `VERIFY_X509_STRICT` makes OpenSSL more conservative about the + # certificates it accepts, including "disabling workarounds for + # some broken certificates." + context.verify_flags |= (_ssl.VERIFY_X509_PARTIAL_CHAIN | + _ssl.VERIFY_X509_STRICT) + + if cafile or capath or cadata: + context.load_verify_locations(cafile, capath, cadata) + elif context.verify_mode != CERT_NONE: + # no explicit cafile, capath or cadata but the verify mode is + # CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system + # root CA certificates for the given purpose. This may fail silently. + context.load_default_certs(purpose) + # OpenSSL 1.1.1 keylog file + if hasattr(context, 'keylog_filename'): + keylogfile = os.environ.get('SSLKEYLOGFILE') + if keylogfile and not sys.flags.ignore_environment: + context.keylog_filename = keylogfile + return context + +def _create_unverified_context(protocol=None, *, cert_reqs=CERT_NONE, + check_hostname=False, purpose=Purpose.SERVER_AUTH, + certfile=None, keyfile=None, + cafile=None, capath=None, cadata=None): + """Create a SSLContext object for Python stdlib modules + + All Python stdlib modules shall use this function to create SSLContext + objects in order to keep common settings in one place. The configuration + is less restrict than create_default_context()'s to increase backward + compatibility. + """ + if not isinstance(purpose, _ASN1Object): + raise TypeError(purpose) + + # SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION, + # OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE + # by default. + if purpose == Purpose.SERVER_AUTH: + # verify certs and host name in client mode + if protocol is None: + protocol = PROTOCOL_TLS_CLIENT + elif purpose == Purpose.CLIENT_AUTH: + if protocol is None: + protocol = PROTOCOL_TLS_SERVER + else: + raise ValueError(purpose) + + context = SSLContext(protocol) + context.check_hostname = check_hostname + if cert_reqs is not None: + context.verify_mode = cert_reqs + if check_hostname: + context.check_hostname = True + + if keyfile and not certfile: + raise ValueError("certfile must be specified") + if certfile or keyfile: + context.load_cert_chain(certfile, keyfile) + + # load CA root certs + if cafile or capath or cadata: + context.load_verify_locations(cafile, capath, cadata) + elif context.verify_mode != CERT_NONE: + # no explicit cafile, capath or cadata but the verify mode is + # CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system + # root CA certificates for the given purpose. This may fail silently. + context.load_default_certs(purpose) + # OpenSSL 1.1.1 keylog file + if hasattr(context, 'keylog_filename'): + keylogfile = os.environ.get('SSLKEYLOGFILE') + if keylogfile and not sys.flags.ignore_environment: + context.keylog_filename = keylogfile + return context + +# Used by http.client if no context is explicitly passed. +_create_default_https_context = create_default_context + + +# Backwards compatibility alias, even though it's not a public name. +_create_stdlib_context = _create_unverified_context + + +class SSLObject: + """This class implements an interface on top of a low-level SSL object as + implemented by OpenSSL. This object captures the state of an SSL connection + but does not provide any network IO itself. IO needs to be performed + through separate "BIO" objects which are OpenSSL's IO abstraction layer. + + This class does not have a public constructor. Instances are returned by + ``SSLContext.wrap_bio``. This class is typically used by framework authors + that want to implement asynchronous IO for SSL through memory buffers. + + When compared to ``SSLSocket``, this object lacks the following features: + + * Any form of network IO, including methods such as ``recv`` and ``send``. + * The ``do_handshake_on_connect`` and ``suppress_ragged_eofs`` machinery. + """ + def __init__(self, *args, **kwargs): + raise TypeError( + f"{self.__class__.__name__} does not have a public " + f"constructor. Instances are returned by SSLContext.wrap_bio()." + ) + + @classmethod + def _create(cls, incoming, outgoing, server_side=False, + server_hostname=None, session=None, context=None): + self = cls.__new__(cls) + sslobj = context._wrap_bio( + incoming, outgoing, server_side=server_side, + server_hostname=server_hostname, + owner=self, session=session + ) + self._sslobj = sslobj + return self + + @property + def context(self): + """The SSLContext that is currently in use.""" + return self._sslobj.context + + @context.setter + def context(self, ctx): + self._sslobj.context = ctx + + @property + def session(self): + """The SSLSession for client socket.""" + return self._sslobj.session + + @session.setter + def session(self, session): + self._sslobj.session = session + + @property + def session_reused(self): + """Was the client session reused during handshake""" + return self._sslobj.session_reused + + @property + def server_side(self): + """Whether this is a server-side socket.""" + return self._sslobj.server_side + + @property + def server_hostname(self): + """The currently set server hostname (for SNI), or ``None`` if no + server hostname is set.""" + return self._sslobj.server_hostname + + def read(self, len=1024, buffer=None): + """Read up to 'len' bytes from the SSL object and return them. + + If 'buffer' is provided, read into this buffer and return the number of + bytes read. + """ + if buffer is not None: + v = self._sslobj.read(len, buffer) + else: + v = self._sslobj.read(len) + return v + + def write(self, data): + """Write 'data' to the SSL object and return the number of bytes + written. + + The 'data' argument must support the buffer interface. + """ + return self._sslobj.write(data) + + def getpeercert(self, binary_form=False): + """Returns a formatted version of the data in the certificate provided + by the other end of the SSL channel. + + Return None if no certificate was provided, {} if a certificate was + provided, but not validated. + """ + return self._sslobj.getpeercert(binary_form) + + def get_verified_chain(self): + """Returns verified certificate chain provided by the other + end of the SSL channel as a list of DER-encoded bytes. + + If certificate verification was disabled method acts the same as + ``SSLSocket.get_unverified_chain``. + """ + chain = self._sslobj.get_verified_chain() + + if chain is None: + return [] + + return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] + + def get_unverified_chain(self): + """Returns raw certificate chain provided by the other + end of the SSL channel as a list of DER-encoded bytes. + """ + chain = self._sslobj.get_unverified_chain() + + if chain is None: + return [] + + return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] + + def selected_npn_protocol(self): + """Return the currently selected NPN protocol as a string, or ``None`` + if a next protocol was not negotiated or if NPN is not supported by one + of the peers.""" + warnings.warn( + "ssl NPN is deprecated, use ALPN instead", + DeprecationWarning, + stacklevel=2 + ) + + def selected_alpn_protocol(self): + """Return the currently selected ALPN protocol as a string, or ``None`` + if a next protocol was not negotiated or if ALPN is not supported by one + of the peers.""" + return self._sslobj.selected_alpn_protocol() + + def cipher(self): + """Return the currently selected cipher as a 3-tuple ``(name, + ssl_version, secret_bits)``.""" + return self._sslobj.cipher() + + def shared_ciphers(self): + """Return a list of ciphers shared by the client during the handshake or + None if this is not a valid server connection. + """ + return self._sslobj.shared_ciphers() + + def compression(self): + """Return the current compression algorithm in use, or ``None`` if + compression was not negotiated or not supported by one of the peers.""" + return self._sslobj.compression() + + def pending(self): + """Return the number of bytes that can be read immediately.""" + return self._sslobj.pending() + + def do_handshake(self): + """Start the SSL/TLS handshake.""" + self._sslobj.do_handshake() + + def unwrap(self): + """Start the SSL shutdown handshake.""" + return self._sslobj.shutdown() + + def get_channel_binding(self, cb_type="tls-unique"): + """Get channel binding data for current connection. Raise ValueError + if the requested `cb_type` is not supported. Return bytes of the data + or None if the data is not available (e.g. before the handshake).""" + return self._sslobj.get_channel_binding(cb_type) + + def version(self): + """Return a string identifying the protocol version used by the + current SSL channel. """ + return self._sslobj.version() + + def verify_client_post_handshake(self): + return self._sslobj.verify_client_post_handshake() + + +def _sslcopydoc(func): + """Copy docstring from SSLObject to SSLSocket""" + func.__doc__ = getattr(SSLObject, func.__name__).__doc__ + return func + + +class SSLSocket(socket): + """This class implements a subtype of socket.socket that wraps + the underlying OS socket in an SSL context when necessary, and + provides read and write methods over that channel. """ + + def __init__(self, *args, **kwargs): + raise TypeError( + f"{self.__class__.__name__} does not have a public " + f"constructor. Instances are returned by " + f"SSLContext.wrap_socket()." + ) + + @classmethod + def _create(cls, sock, server_side=False, do_handshake_on_connect=True, + suppress_ragged_eofs=True, server_hostname=None, + context=None, session=None): + if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM: + raise NotImplementedError("only stream sockets are supported") + if server_side: + if server_hostname: + raise ValueError("server_hostname can only be specified " + "in client mode") + if session is not None: + raise ValueError("session can only be specified in " + "client mode") + if context.check_hostname and not server_hostname: + raise ValueError("check_hostname requires server_hostname") + + sock_timeout = sock.gettimeout() + kwargs = dict( + family=sock.family, type=sock.type, proto=sock.proto, + fileno=sock.fileno() + ) + self = cls.__new__(cls, **kwargs) + super(SSLSocket, self).__init__(**kwargs) + sock.detach() + # Now SSLSocket is responsible for closing the file descriptor. + try: + self._context = context + self._session = session + self._closed = False + self._sslobj = None + self.server_side = server_side + self.server_hostname = context._encode_hostname(server_hostname) + self.do_handshake_on_connect = do_handshake_on_connect + self.suppress_ragged_eofs = suppress_ragged_eofs + + # See if we are connected + try: + self.getpeername() + except OSError as e: + if e.errno != errno.ENOTCONN: + raise + connected = False + blocking = self.getblocking() + self.setblocking(False) + try: + # We are not connected so this is not supposed to block, but + # testing revealed otherwise on macOS and Windows so we do + # the non-blocking dance regardless. Our raise when any data + # is found means consuming the data is harmless. + notconn_pre_handshake_data = self.recv(1) + except OSError as e: + # EINVAL occurs for recv(1) on non-connected on unix sockets. + if e.errno not in (errno.ENOTCONN, errno.EINVAL): + raise + notconn_pre_handshake_data = b'' + self.setblocking(blocking) + if notconn_pre_handshake_data: + # This prevents pending data sent to the socket before it was + # closed from escaping to the caller who could otherwise + # presume it came through a successful TLS connection. + reason = "Closed before TLS handshake with data in recv buffer." + notconn_pre_handshake_data_error = SSLError(e.errno, reason) + # Add the SSLError attributes that _ssl.c always adds. + notconn_pre_handshake_data_error.reason = reason + notconn_pre_handshake_data_error.library = None + try: + raise notconn_pre_handshake_data_error + finally: + # Explicitly break the reference cycle. + notconn_pre_handshake_data_error = None + else: + connected = True + + self.settimeout(sock_timeout) # Must come after setblocking() calls. + self._connected = connected + if connected: + # create the SSL object + self._sslobj = self._context._wrap_socket( + self, server_side, self.server_hostname, + owner=self, session=self._session, + ) + if do_handshake_on_connect: + timeout = self.gettimeout() + if timeout == 0.0: + # non-blocking + raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets") + self.do_handshake() + except: + try: + self.close() + except OSError: + pass + raise + return self + + @property + @_sslcopydoc + def context(self): + return self._context + + @context.setter + def context(self, ctx): + self._context = ctx + self._sslobj.context = ctx + + @property + @_sslcopydoc + def session(self): + if self._sslobj is not None: + return self._sslobj.session + + @session.setter + def session(self, session): + self._session = session + if self._sslobj is not None: + self._sslobj.session = session + + @property + @_sslcopydoc + def session_reused(self): + if self._sslobj is not None: + return self._sslobj.session_reused + + def dup(self): + raise NotImplementedError("Can't dup() %s instances" % + self.__class__.__name__) + + def _checkClosed(self, msg=None): + # raise an exception here if you wish to check for spurious closes + pass + + def _check_connected(self): + if not self._connected: + # getpeername() will raise ENOTCONN if the socket is really + # not connected; note that we can be connected even without + # _connected being set, e.g. if connect() first returned + # EAGAIN. + self.getpeername() + + def read(self, len=1024, buffer=None): + """Read up to LEN bytes and return them. + Return zero-length string on EOF.""" + + self._checkClosed() + if self._sslobj is None: + raise ValueError("Read on closed or unwrapped SSL socket.") + try: + if buffer is not None: + return self._sslobj.read(len, buffer) + else: + return self._sslobj.read(len) + except SSLError as x: + if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs: + if buffer is not None: + return 0 + else: + return b'' + else: + raise + + def write(self, data): + """Write DATA to the underlying SSL channel. Returns + number of bytes of DATA actually transmitted.""" + + self._checkClosed() + if self._sslobj is None: + raise ValueError("Write on closed or unwrapped SSL socket.") + return self._sslobj.write(data) + + @_sslcopydoc + def getpeercert(self, binary_form=False): + self._checkClosed() + self._check_connected() + return self._sslobj.getpeercert(binary_form) + + @_sslcopydoc + def get_verified_chain(self): + chain = self._sslobj.get_verified_chain() + + if chain is None: + return [] + + return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] + + @_sslcopydoc + def get_unverified_chain(self): + chain = self._sslobj.get_unverified_chain() + + if chain is None: + return [] + + return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain] + + @_sslcopydoc + def selected_npn_protocol(self): + self._checkClosed() + warnings.warn( + "ssl NPN is deprecated, use ALPN instead", + DeprecationWarning, + stacklevel=2 + ) + return None + + @_sslcopydoc + def selected_alpn_protocol(self): + self._checkClosed() + if self._sslobj is None or not _ssl.HAS_ALPN: + return None + else: + return self._sslobj.selected_alpn_protocol() + + @_sslcopydoc + def cipher(self): + self._checkClosed() + if self._sslobj is None: + return None + else: + return self._sslobj.cipher() + + @_sslcopydoc + def shared_ciphers(self): + self._checkClosed() + if self._sslobj is None: + return None + else: + return self._sslobj.shared_ciphers() + + @_sslcopydoc + def compression(self): + self._checkClosed() + if self._sslobj is None: + return None + else: + return self._sslobj.compression() + + def send(self, data, flags=0): + self._checkClosed() + if self._sslobj is not None: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to send() on %s" % + self.__class__) + return self._sslobj.write(data) + else: + return super().send(data, flags) + + def sendto(self, data, flags_or_addr, addr=None): + self._checkClosed() + if self._sslobj is not None: + raise ValueError("sendto not allowed on instances of %s" % + self.__class__) + elif addr is None: + return super().sendto(data, flags_or_addr) + else: + return super().sendto(data, flags_or_addr, addr) + + def sendmsg(self, *args, **kwargs): + # Ensure programs don't send data unencrypted if they try to + # use this method. + raise NotImplementedError("sendmsg not allowed on instances of %s" % + self.__class__) + + def sendall(self, data, flags=0): + self._checkClosed() + if self._sslobj is not None: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to sendall() on %s" % + self.__class__) + count = 0 + with memoryview(data) as view, view.cast("B") as byte_view: + amount = len(byte_view) + while count < amount: + v = self.send(byte_view[count:]) + count += v + else: + return super().sendall(data, flags) + + def sendfile(self, file, offset=0, count=None): + """Send a file, possibly by using os.sendfile() if this is a + clear-text socket. Return the total number of bytes sent. + """ + if self._sslobj is not None: + return self._sendfile_use_send(file, offset, count) + else: + # os.sendfile() works with plain sockets only + return super().sendfile(file, offset, count) + + def recv(self, buflen=1024, flags=0): + self._checkClosed() + if self._sslobj is not None: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to recv() on %s" % + self.__class__) + return self.read(buflen) + else: + return super().recv(buflen, flags) + + def recv_into(self, buffer, nbytes=None, flags=0): + self._checkClosed() + if nbytes is None: + if buffer is not None: + with memoryview(buffer) as view: + nbytes = view.nbytes + if not nbytes: + nbytes = 1024 + else: + nbytes = 1024 + if self._sslobj is not None: + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to recv_into() on %s" % + self.__class__) + return self.read(nbytes, buffer) + else: + return super().recv_into(buffer, nbytes, flags) + + def recvfrom(self, buflen=1024, flags=0): + self._checkClosed() + if self._sslobj is not None: + raise ValueError("recvfrom not allowed on instances of %s" % + self.__class__) + else: + return super().recvfrom(buflen, flags) + + def recvfrom_into(self, buffer, nbytes=None, flags=0): + self._checkClosed() + if self._sslobj is not None: + raise ValueError("recvfrom_into not allowed on instances of %s" % + self.__class__) + else: + return super().recvfrom_into(buffer, nbytes, flags) + + def recvmsg(self, *args, **kwargs): + raise NotImplementedError("recvmsg not allowed on instances of %s" % + self.__class__) + + def recvmsg_into(self, *args, **kwargs): + raise NotImplementedError("recvmsg_into not allowed on instances of " + "%s" % self.__class__) + + @_sslcopydoc + def pending(self): + self._checkClosed() + if self._sslobj is not None: + return self._sslobj.pending() + else: + return 0 + + def shutdown(self, how): + self._checkClosed() + self._sslobj = None + super().shutdown(how) + + @_sslcopydoc + def unwrap(self): + if self._sslobj: + s = self._sslobj.shutdown() + self._sslobj = None + return s + else: + raise ValueError("No SSL wrapper around " + str(self)) + + @_sslcopydoc + def verify_client_post_handshake(self): + if self._sslobj: + return self._sslobj.verify_client_post_handshake() + else: + raise ValueError("No SSL wrapper around " + str(self)) + + def _real_close(self): + self._sslobj = None + super()._real_close() + + @_sslcopydoc + def do_handshake(self, block=False): + self._check_connected() + timeout = self.gettimeout() + try: + if timeout == 0.0 and block: + self.settimeout(None) + self._sslobj.do_handshake() + finally: + self.settimeout(timeout) + + def _real_connect(self, addr, connect_ex): + if self.server_side: + raise ValueError("can't connect in server-side mode") + # Here we assume that the socket is client-side, and not + # connected at the time of the call. We connect it, then wrap it. + if self._connected or self._sslobj is not None: + raise ValueError("attempt to connect already-connected SSLSocket!") + self._sslobj = self.context._wrap_socket( + self, False, self.server_hostname, + owner=self, session=self._session + ) + try: + if connect_ex: + rc = super().connect_ex(addr) + else: + rc = None + super().connect(addr) + if not rc: + self._connected = True + if self.do_handshake_on_connect: + self.do_handshake() + return rc + except (OSError, ValueError): + self._sslobj = None + raise + + def connect(self, addr): + """Connects to remote ADDR, and then wraps the connection in + an SSL channel.""" + self._real_connect(addr, False) + + def connect_ex(self, addr): + """Connects to remote ADDR, and then wraps the connection in + an SSL channel.""" + return self._real_connect(addr, True) + + def accept(self): + """Accepts a new connection from a remote client, and returns + a tuple containing that new connection wrapped with a server-side + SSL channel, and the address of the remote client.""" + + newsock, addr = super().accept() + newsock = self.context.wrap_socket(newsock, + do_handshake_on_connect=self.do_handshake_on_connect, + suppress_ragged_eofs=self.suppress_ragged_eofs, + server_side=True) + return newsock, addr + + @_sslcopydoc + def get_channel_binding(self, cb_type="tls-unique"): + if self._sslobj is not None: + return self._sslobj.get_channel_binding(cb_type) + else: + if cb_type not in CHANNEL_BINDING_TYPES: + raise ValueError( + "{0} channel binding type not implemented".format(cb_type) + ) + return None + + @_sslcopydoc + def version(self): + if self._sslobj is not None: + return self._sslobj.version() + else: + return None + + +# Python does not support forward declaration of types. +SSLContext.sslsocket_class = SSLSocket +SSLContext.sslobject_class = SSLObject + + +# some utility functions + +def cert_time_to_seconds(cert_time): + """Return the time in seconds since the Epoch, given the timestring + representing the "notBefore" or "notAfter" date from a certificate + in ``"%b %d %H:%M:%S %Y %Z"`` strptime format (C locale). + + "notBefore" or "notAfter" dates must use UTC (RFC 5280). + + Month is one of: Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec + UTC should be specified as GMT (see ASN1_TIME_print()) + """ + from time import strptime + from calendar import timegm + + months = ( + "Jan","Feb","Mar","Apr","May","Jun", + "Jul","Aug","Sep","Oct","Nov","Dec" + ) + time_format = ' %d %H:%M:%S %Y GMT' # NOTE: no month, fixed GMT + try: + month_number = months.index(cert_time[:3].title()) + 1 + except ValueError: + raise ValueError('time data %r does not match ' + 'format "%%b%s"' % (cert_time, time_format)) + else: + # found valid month + tt = strptime(cert_time[3:], time_format) + # return an integer, the previous mktime()-based implementation + # returned a float (fractional seconds are always zero here). + return timegm((tt[0], month_number) + tt[2:6]) + +PEM_HEADER = "-----BEGIN CERTIFICATE-----" +PEM_FOOTER = "-----END CERTIFICATE-----" + +def DER_cert_to_PEM_cert(der_cert_bytes): + """Takes a certificate in binary DER format and returns the + PEM version of it as a string.""" + + f = str(base64.standard_b64encode(der_cert_bytes), 'ASCII', 'strict') + ss = [PEM_HEADER] + ss += [f[i:i+64] for i in range(0, len(f), 64)] + ss.append(PEM_FOOTER + '\n') + return '\n'.join(ss) + +def PEM_cert_to_DER_cert(pem_cert_string): + """Takes a certificate in ASCII PEM format and returns the + DER-encoded version of it as a byte sequence""" + + if not pem_cert_string.startswith(PEM_HEADER): + raise ValueError("Invalid PEM encoding; must start with %s" + % PEM_HEADER) + if not pem_cert_string.strip().endswith(PEM_FOOTER): + raise ValueError("Invalid PEM encoding; must end with %s" + % PEM_FOOTER) + d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)] + return base64.decodebytes(d.encode('ASCII', 'strict')) + +def get_server_certificate(addr, ssl_version=PROTOCOL_TLS_CLIENT, + ca_certs=None, timeout=_GLOBAL_DEFAULT_TIMEOUT): + """Retrieve the certificate from the server at the specified address, + and return it as a PEM-encoded string. + If 'ca_certs' is specified, validate the server cert against it. + If 'ssl_version' is specified, use it in the connection attempt. + If 'timeout' is specified, use it in the connection attempt. + """ + + host, port = addr + if ca_certs is not None: + cert_reqs = CERT_REQUIRED + else: + cert_reqs = CERT_NONE + context = _create_stdlib_context(ssl_version, + cert_reqs=cert_reqs, + cafile=ca_certs) + with create_connection(addr, timeout=timeout) as sock: + with context.wrap_socket(sock, server_hostname=host) as sslsock: + dercert = sslsock.getpeercert(True) + return DER_cert_to_PEM_cert(dercert) + +def get_protocol_name(protocol_code): + return _PROTOCOL_NAMES.get(protocol_code, '') diff --git a/Python313_13_x86_Template/Lib/stat.py b/Python314_4_x86_Template/Lib/stat.py similarity index 100% rename from Python313_13_x86_Template/Lib/stat.py rename to Python314_4_x86_Template/Lib/stat.py diff --git a/Python314_4_x86_Template/Lib/statistics.py b/Python314_4_x86_Template/Lib/statistics.py new file mode 100644 index 00000000..26cf9255 --- /dev/null +++ b/Python314_4_x86_Template/Lib/statistics.py @@ -0,0 +1,1879 @@ +""" +Basic statistics module. + +This module provides functions for calculating statistics of data, including +averages, variance, and standard deviation. + +Calculating averages +-------------------- + +================== ================================================== +Function Description +================== ================================================== +mean Arithmetic mean (average) of data. +fmean Fast, floating-point arithmetic mean. +geometric_mean Geometric mean of data. +harmonic_mean Harmonic mean of data. +median Median (middle value) of data. +median_low Low median of data. +median_high High median of data. +median_grouped Median, or 50th percentile, of grouped data. +mode Mode (most common value) of data. +multimode List of modes (most common values of data). +quantiles Divide data into intervals with equal probability. +================== ================================================== + +Calculate the arithmetic mean ("the average") of data: + +>>> mean([-1.0, 2.5, 3.25, 5.75]) +2.625 + + +Calculate the standard median of discrete data: + +>>> median([2, 3, 4, 5]) +3.5 + + +Calculate the median, or 50th percentile, of data grouped into class intervals +centred on the data values provided. E.g. if your data points are rounded to +the nearest whole number: + +>>> median_grouped([2, 2, 3, 3, 3, 4]) #doctest: +ELLIPSIS +2.8333333333... + +This should be interpreted in this way: you have two data points in the class +interval 1.5-2.5, three data points in the class interval 2.5-3.5, and one in +the class interval 3.5-4.5. The median of these data points is 2.8333... + + +Calculating variability or spread +--------------------------------- + +================== ============================================= +Function Description +================== ============================================= +pvariance Population variance of data. +variance Sample variance of data. +pstdev Population standard deviation of data. +stdev Sample standard deviation of data. +================== ============================================= + +Calculate the standard deviation of sample data: + +>>> stdev([2.5, 3.25, 5.5, 11.25, 11.75]) #doctest: +ELLIPSIS +4.38961843444... + +If you have previously calculated the mean, you can pass it as the optional +second argument to the four "spread" functions to avoid recalculating it: + +>>> data = [1, 2, 2, 4, 4, 4, 5, 6] +>>> mu = mean(data) +>>> pvariance(data, mu) +2.5 + + +Statistics for relations between two inputs +------------------------------------------- + +================== ==================================================== +Function Description +================== ==================================================== +covariance Sample covariance for two variables. +correlation Pearson's correlation coefficient for two variables. +linear_regression Intercept and slope for simple linear regression. +================== ==================================================== + +Calculate covariance, Pearson's correlation, and simple linear regression +for two inputs: + +>>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9] +>>> y = [1, 2, 3, 1, 2, 3, 1, 2, 3] +>>> covariance(x, y) +0.75 +>>> correlation(x, y) #doctest: +ELLIPSIS +0.31622776601... +>>> linear_regression(x, y) #doctest: +LinearRegression(slope=0.1, intercept=1.5) + + +Exceptions +---------- + +A single exception is defined: StatisticsError is a subclass of ValueError. + +""" + +__all__ = [ + 'NormalDist', + 'StatisticsError', + 'correlation', + 'covariance', + 'fmean', + 'geometric_mean', + 'harmonic_mean', + 'kde', + 'kde_random', + 'linear_regression', + 'mean', + 'median', + 'median_grouped', + 'median_high', + 'median_low', + 'mode', + 'multimode', + 'pstdev', + 'pvariance', + 'quantiles', + 'stdev', + 'variance', +] + +import math +import numbers +import random +import sys + +from fractions import Fraction +from decimal import Decimal +from itertools import count, groupby, repeat +from bisect import bisect_left, bisect_right +from math import hypot, sqrt, fabs, exp, erfc, tau, log, fsum, sumprod +from math import isfinite, isinf, pi, cos, sin, tan, cosh, asin, atan, acos +from functools import reduce +from operator import itemgetter +from collections import Counter, namedtuple, defaultdict + +_SQRT2 = sqrt(2.0) +_random = random + +## Exceptions ############################################################## + +class StatisticsError(ValueError): + pass + + +## Measures of central tendency (averages) ################################# + +def mean(data): + """Return the sample arithmetic mean of data. + + >>> mean([1, 2, 3, 4, 4]) + 2.8 + + >>> from fractions import Fraction as F + >>> mean([F(3, 7), F(1, 21), F(5, 3), F(1, 3)]) + Fraction(13, 21) + + >>> from decimal import Decimal as D + >>> mean([D("0.5"), D("0.75"), D("0.625"), D("0.375")]) + Decimal('0.5625') + + If ``data`` is empty, StatisticsError will be raised. + + """ + T, total, n = _sum(data) + if n < 1: + raise StatisticsError('mean requires at least one data point') + return _convert(total / n, T) + + +def fmean(data, weights=None): + """Convert data to floats and compute the arithmetic mean. + + This runs faster than the mean() function and it always returns a float. + If the input dataset is empty, it raises a StatisticsError. + + >>> fmean([3.5, 4.0, 5.25]) + 4.25 + + """ + if weights is None: + + try: + n = len(data) + except TypeError: + # Handle iterators that do not define __len__(). + counter = count() + total = fsum(map(itemgetter(0), zip(data, counter))) + n = next(counter) + else: + total = fsum(data) + + if not n: + raise StatisticsError('fmean requires at least one data point') + + return total / n + + if not isinstance(weights, (list, tuple)): + weights = list(weights) + + try: + num = sumprod(data, weights) + except ValueError: + raise StatisticsError('data and weights must be the same length') + + den = fsum(weights) + + if not den: + raise StatisticsError('sum of weights must be non-zero') + + return num / den + + +def geometric_mean(data): + """Convert data to floats and compute the geometric mean. + + Raises a StatisticsError if the input dataset is empty + or if it contains a negative value. + + Returns zero if the product of inputs is zero. + + No special efforts are made to achieve exact results. + (However, this may change in the future.) + + >>> round(geometric_mean([54, 24, 36]), 9) + 36.0 + + """ + n = 0 + found_zero = False + + def count_positive(iterable): + nonlocal n, found_zero + for n, x in enumerate(iterable, start=1): + if x > 0.0 or math.isnan(x): + yield x + elif x == 0.0: + found_zero = True + else: + raise StatisticsError('No negative inputs allowed', x) + + total = fsum(map(log, count_positive(data))) + + if not n: + raise StatisticsError('Must have a non-empty dataset') + if math.isnan(total): + return math.nan + if found_zero: + return math.nan if total == math.inf else 0.0 + + return exp(total / n) + + +def harmonic_mean(data, weights=None): + """Return the harmonic mean of data. + + The harmonic mean is the reciprocal of the arithmetic mean of the + reciprocals of the data. It can be used for averaging ratios or + rates, for example speeds. + + Suppose a car travels 40 km/hr for 5 km and then speeds-up to + 60 km/hr for another 5 km. What is the average speed? + + >>> harmonic_mean([40, 60]) + 48.0 + + Suppose a car travels 40 km/hr for 5 km, and when traffic clears, + speeds-up to 60 km/hr for the remaining 30 km of the journey. What + is the average speed? + + >>> harmonic_mean([40, 60], weights=[5, 30]) + 56.0 + + If ``data`` is empty, or any element is less than zero, + ``harmonic_mean`` will raise ``StatisticsError``. + + """ + if iter(data) is data: + data = list(data) + + errmsg = 'harmonic mean does not support negative values' + + n = len(data) + if n < 1: + raise StatisticsError('harmonic_mean requires at least one data point') + elif n == 1 and weights is None: + x = data[0] + if isinstance(x, (numbers.Real, Decimal)): + if x < 0: + raise StatisticsError(errmsg) + return x + else: + raise TypeError('unsupported type') + + if weights is None: + weights = repeat(1, n) + sum_weights = n + else: + if iter(weights) is weights: + weights = list(weights) + if len(weights) != n: + raise StatisticsError('Number of weights does not match data size') + _, sum_weights, _ = _sum(w for w in _fail_neg(weights, errmsg)) + + try: + data = _fail_neg(data, errmsg) + T, total, count = _sum(w / x if w else 0 for w, x in zip(weights, data)) + except ZeroDivisionError: + return 0 + + if total <= 0: + raise StatisticsError('Weighted sum must be positive') + + return _convert(sum_weights / total, T) + + +def median(data): + """Return the median (middle value) of numeric data. + + When the number of data points is odd, return the middle data point. + When the number of data points is even, the median is interpolated by + taking the average of the two middle values: + + >>> median([1, 3, 5]) + 3 + >>> median([1, 3, 5, 7]) + 4.0 + + """ + data = sorted(data) + n = len(data) + if n == 0: + raise StatisticsError("no median for empty data") + if n % 2 == 1: + return data[n // 2] + else: + i = n // 2 + return (data[i - 1] + data[i]) / 2 + + +def median_low(data): + """Return the low median of numeric data. + + When the number of data points is odd, the middle value is returned. + When it is even, the smaller of the two middle values is returned. + + >>> median_low([1, 3, 5]) + 3 + >>> median_low([1, 3, 5, 7]) + 3 + + """ + # Potentially the sorting step could be replaced with a quickselect. + # However, it would require an excellent implementation to beat our + # highly optimized builtin sort. + data = sorted(data) + n = len(data) + if n == 0: + raise StatisticsError("no median for empty data") + if n % 2 == 1: + return data[n // 2] + else: + return data[n // 2 - 1] + + +def median_high(data): + """Return the high median of data. + + When the number of data points is odd, the middle value is returned. + When it is even, the larger of the two middle values is returned. + + >>> median_high([1, 3, 5]) + 3 + >>> median_high([1, 3, 5, 7]) + 5 + + """ + data = sorted(data) + n = len(data) + if n == 0: + raise StatisticsError("no median for empty data") + return data[n // 2] + + +def median_grouped(data, interval=1.0): + """Estimates the median for numeric data binned around the midpoints + of consecutive, fixed-width intervals. + + The *data* can be any iterable of numeric data with each value being + exactly the midpoint of a bin. At least one value must be present. + + The *interval* is width of each bin. + + For example, demographic information may have been summarized into + consecutive ten-year age groups with each group being represented + by the 5-year midpoints of the intervals: + + >>> demographics = Counter({ + ... 25: 172, # 20 to 30 years old + ... 35: 484, # 30 to 40 years old + ... 45: 387, # 40 to 50 years old + ... 55: 22, # 50 to 60 years old + ... 65: 6, # 60 to 70 years old + ... }) + + The 50th percentile (median) is the 536th person out of the 1071 + member cohort. That person is in the 30 to 40 year old age group. + + The regular median() function would assume that everyone in the + tricenarian age group was exactly 35 years old. A more tenable + assumption is that the 484 members of that age group are evenly + distributed between 30 and 40. For that, we use median_grouped(). + + >>> data = list(demographics.elements()) + >>> median(data) + 35 + >>> round(median_grouped(data, interval=10), 1) + 37.5 + + The caller is responsible for making sure the data points are separated + by exact multiples of *interval*. This is essential for getting a + correct result. The function does not check this precondition. + + Inputs may be any numeric type that can be coerced to a float during + the interpolation step. + + """ + data = sorted(data) + n = len(data) + if not n: + raise StatisticsError("no median for empty data") + + # Find the value at the midpoint. Remember this corresponds to the + # midpoint of the class interval. + x = data[n // 2] + + # Using O(log n) bisection, find where all the x values occur in the data. + # All x will lie within data[i:j]. + i = bisect_left(data, x) + j = bisect_right(data, x, lo=i) + + # Coerce to floats, raising a TypeError if not possible + try: + interval = float(interval) + x = float(x) + except ValueError: + raise TypeError(f'Value cannot be converted to a float') + + # Interpolate the median using the formula found at: + # https://www.cuemath.com/data/median-of-grouped-data/ + L = x - interval / 2.0 # Lower limit of the median interval + cf = i # Cumulative frequency of the preceding interval + f = j - i # Number of elements in the median internal + return L + interval * (n / 2 - cf) / f + + +def mode(data): + """Return the most common data point from discrete or nominal data. + + ``mode`` assumes discrete data, and returns a single value. This is the + standard treatment of the mode as commonly taught in schools: + + >>> mode([1, 1, 2, 3, 3, 3, 3, 4]) + 3 + + This also works with nominal (non-numeric) data: + + >>> mode(["red", "blue", "blue", "red", "green", "red", "red"]) + 'red' + + If there are multiple modes with same frequency, return the first one + encountered: + + >>> mode(['red', 'red', 'green', 'blue', 'blue']) + 'red' + + If *data* is empty, ``mode``, raises StatisticsError. + + """ + pairs = Counter(iter(data)).most_common(1) + try: + return pairs[0][0] + except IndexError: + raise StatisticsError('no mode for empty data') from None + + +def multimode(data): + """Return a list of the most frequently occurring values. + + Will return more than one result if there are multiple modes + or an empty list if *data* is empty. + + >>> multimode('aabbbbbbbbcc') + ['b'] + >>> multimode('aabbbbccddddeeffffgg') + ['b', 'd', 'f'] + >>> multimode('') + [] + + """ + counts = Counter(iter(data)) + if not counts: + return [] + maxcount = max(counts.values()) + return [value for value, count in counts.items() if count == maxcount] + + +## Measures of spread ###################################################### + +def variance(data, xbar=None): + """Return the sample variance of data. + + data should be an iterable of Real-valued numbers, with at least two + values. The optional argument xbar, if given, should be the mean of + the data. If it is missing or None, the mean is automatically calculated. + + Use this function when your data is a sample from a population. To + calculate the variance from the entire population, see ``pvariance``. + + Examples: + + >>> data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5] + >>> variance(data) + 1.3720238095238095 + + If you have already calculated the mean of your data, you can pass it as + the optional second argument ``xbar`` to avoid recalculating it: + + >>> m = mean(data) + >>> variance(data, m) + 1.3720238095238095 + + This function does not check that ``xbar`` is actually the mean of + ``data``. Giving arbitrary values for ``xbar`` may lead to invalid or + impossible results. + + Decimals and Fractions are supported: + + >>> from decimal import Decimal as D + >>> variance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")]) + Decimal('31.01875') + + >>> from fractions import Fraction as F + >>> variance([F(1, 6), F(1, 2), F(5, 3)]) + Fraction(67, 108) + + """ + # http://mathworld.wolfram.com/SampleVariance.html + + T, ss, c, n = _ss(data, xbar) + if n < 2: + raise StatisticsError('variance requires at least two data points') + return _convert(ss / (n - 1), T) + + +def pvariance(data, mu=None): + """Return the population variance of ``data``. + + data should be a sequence or iterable of Real-valued numbers, with at least one + value. The optional argument mu, if given, should be the mean of + the data. If it is missing or None, the mean is automatically calculated. + + Use this function to calculate the variance from the entire population. + To estimate the variance from a sample, the ``variance`` function is + usually a better choice. + + Examples: + + >>> data = [0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25] + >>> pvariance(data) + 1.25 + + If you have already calculated the mean of the data, you can pass it as + the optional second argument to avoid recalculating it: + + >>> mu = mean(data) + >>> pvariance(data, mu) + 1.25 + + Decimals and Fractions are supported: + + >>> from decimal import Decimal as D + >>> pvariance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")]) + Decimal('24.815') + + >>> from fractions import Fraction as F + >>> pvariance([F(1, 4), F(5, 4), F(1, 2)]) + Fraction(13, 72) + + """ + # http://mathworld.wolfram.com/Variance.html + + T, ss, c, n = _ss(data, mu) + if n < 1: + raise StatisticsError('pvariance requires at least one data point') + return _convert(ss / n, T) + + +def stdev(data, xbar=None): + """Return the square root of the sample variance. + + See ``variance`` for arguments and other details. + + >>> stdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75]) + 1.0810874155219827 + + """ + T, ss, c, n = _ss(data, xbar) + if n < 2: + raise StatisticsError('stdev requires at least two data points') + mss = ss / (n - 1) + try: + mss_numerator = mss.numerator + mss_denominator = mss.denominator + except AttributeError: + raise ValueError('inf or nan encountered in data') + if issubclass(T, Decimal): + return _decimal_sqrt_of_frac(mss_numerator, mss_denominator) + return _float_sqrt_of_frac(mss_numerator, mss_denominator) + + +def pstdev(data, mu=None): + """Return the square root of the population variance. + + See ``pvariance`` for arguments and other details. + + >>> pstdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75]) + 0.986893273527251 + + """ + T, ss, c, n = _ss(data, mu) + if n < 1: + raise StatisticsError('pstdev requires at least one data point') + mss = ss / n + try: + mss_numerator = mss.numerator + mss_denominator = mss.denominator + except AttributeError: + raise ValueError('inf or nan encountered in data') + if issubclass(T, Decimal): + return _decimal_sqrt_of_frac(mss_numerator, mss_denominator) + return _float_sqrt_of_frac(mss_numerator, mss_denominator) + + +## Statistics for relations between two inputs ############################# + +def covariance(x, y, /): + """Covariance + + Return the sample covariance of two inputs *x* and *y*. Covariance + is a measure of the joint variability of two inputs. + + >>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> y = [1, 2, 3, 1, 2, 3, 1, 2, 3] + >>> covariance(x, y) + 0.75 + >>> z = [9, 8, 7, 6, 5, 4, 3, 2, 1] + >>> covariance(x, z) + -7.5 + >>> covariance(z, x) + -7.5 + + """ + # https://en.wikipedia.org/wiki/Covariance + n = len(x) + if len(y) != n: + raise StatisticsError('covariance requires that both inputs have same number of data points') + if n < 2: + raise StatisticsError('covariance requires at least two data points') + xbar = fsum(x) / n + ybar = fsum(y) / n + sxy = sumprod((xi - xbar for xi in x), (yi - ybar for yi in y)) + return sxy / (n - 1) + + +def correlation(x, y, /, *, method='linear'): + """Pearson's correlation coefficient + + Return the Pearson's correlation coefficient for two inputs. Pearson's + correlation coefficient *r* takes values between -1 and +1. It measures + the strength and direction of a linear relationship. + + >>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> y = [9, 8, 7, 6, 5, 4, 3, 2, 1] + >>> correlation(x, x) + 1.0 + >>> correlation(x, y) + -1.0 + + If *method* is "ranked", computes Spearman's rank correlation coefficient + for two inputs. The data is replaced by ranks. Ties are averaged + so that equal values receive the same rank. The resulting coefficient + measures the strength of a monotonic relationship. + + Spearman's rank correlation coefficient is appropriate for ordinal + data or for continuous data that doesn't meet the linear proportion + requirement for Pearson's correlation coefficient. + + """ + # https://en.wikipedia.org/wiki/Pearson_correlation_coefficient + # https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient + n = len(x) + if len(y) != n: + raise StatisticsError('correlation requires that both inputs have same number of data points') + if n < 2: + raise StatisticsError('correlation requires at least two data points') + if method not in {'linear', 'ranked'}: + raise ValueError(f'Unknown method: {method!r}') + + if method == 'ranked': + start = (n - 1) / -2 # Center rankings around zero + x = _rank(x, start=start) + y = _rank(y, start=start) + + else: + xbar = fsum(x) / n + ybar = fsum(y) / n + x = [xi - xbar for xi in x] + y = [yi - ybar for yi in y] + + sxy = sumprod(x, y) + sxx = sumprod(x, x) + syy = sumprod(y, y) + + try: + return sxy / _sqrtprod(sxx, syy) + except ZeroDivisionError: + raise StatisticsError('at least one of the inputs is constant') + + +LinearRegression = namedtuple('LinearRegression', ('slope', 'intercept')) + + +def linear_regression(x, y, /, *, proportional=False): + """Slope and intercept for simple linear regression. + + Return the slope and intercept of simple linear regression + parameters estimated using ordinary least squares. Simple linear + regression describes relationship between an independent variable + *x* and a dependent variable *y* in terms of a linear function: + + y = slope * x + intercept + noise + + where *slope* and *intercept* are the regression parameters that are + estimated, and noise represents the variability of the data that was + not explained by the linear regression (it is equal to the + difference between predicted and actual values of the dependent + variable). + + The parameters are returned as a named tuple. + + >>> x = [1, 2, 3, 4, 5] + >>> noise = NormalDist().samples(5, seed=42) + >>> y = [3 * x[i] + 2 + noise[i] for i in range(5)] + >>> linear_regression(x, y) #doctest: +ELLIPSIS + LinearRegression(slope=3.17495..., intercept=1.00925...) + + If *proportional* is true, the independent variable *x* and the + dependent variable *y* are assumed to be directly proportional. + The data is fit to a line passing through the origin. + + Since the *intercept* will always be 0.0, the underlying linear + function simplifies to: + + y = slope * x + noise + + >>> y = [3 * x[i] + noise[i] for i in range(5)] + >>> linear_regression(x, y, proportional=True) #doctest: +ELLIPSIS + LinearRegression(slope=2.90475..., intercept=0.0) + + """ + # https://en.wikipedia.org/wiki/Simple_linear_regression + n = len(x) + if len(y) != n: + raise StatisticsError('linear regression requires that both inputs have same number of data points') + if n < 2: + raise StatisticsError('linear regression requires at least two data points') + + if not proportional: + xbar = fsum(x) / n + ybar = fsum(y) / n + x = [xi - xbar for xi in x] # List because used three times below + y = (yi - ybar for yi in y) # Generator because only used once below + + sxy = sumprod(x, y) + 0.0 # Add zero to coerce result to a float + sxx = sumprod(x, x) + + try: + slope = sxy / sxx # equivalent to: covariance(x, y) / variance(x) + except ZeroDivisionError: + raise StatisticsError('x is constant') + + intercept = 0.0 if proportional else ybar - slope * xbar + return LinearRegression(slope=slope, intercept=intercept) + + +## Kernel Density Estimation ############################################### + +_kernel_specs = {} + +def register(*kernels): + "Load the kernel's pdf, cdf, invcdf, and support into _kernel_specs." + def deco(builder): + spec = dict(zip(('pdf', 'cdf', 'invcdf', 'support'), builder())) + for kernel in kernels: + _kernel_specs[kernel] = spec + return builder + return deco + +@register('normal', 'gauss') +def normal_kernel(): + sqrt2pi = sqrt(2 * pi) + neg_sqrt2 = -sqrt(2) + pdf = lambda t: exp(-1/2 * t * t) / sqrt2pi + cdf = lambda t: 1/2 * erfc(t / neg_sqrt2) + invcdf = lambda t: _normal_dist_inv_cdf(t, 0.0, 1.0) + support = None + return pdf, cdf, invcdf, support + +@register('logistic') +def logistic_kernel(): + # 1.0 / (exp(t) + 2.0 + exp(-t)) + pdf = lambda t: 1/2 / (1.0 + cosh(t)) + cdf = lambda t: 1.0 - 1.0 / (exp(t) + 1.0) + invcdf = lambda p: log(p / (1.0 - p)) + support = None + return pdf, cdf, invcdf, support + +@register('sigmoid') +def sigmoid_kernel(): + # (2/pi) / (exp(t) + exp(-t)) + c1 = 1 / pi + c2 = 2 / pi + c3 = pi / 2 + pdf = lambda t: c1 / cosh(t) + cdf = lambda t: c2 * atan(exp(t)) + invcdf = lambda p: log(tan(p * c3)) + support = None + return pdf, cdf, invcdf, support + +@register('rectangular', 'uniform') +def rectangular_kernel(): + pdf = lambda t: 1/2 + cdf = lambda t: 1/2 * t + 1/2 + invcdf = lambda p: 2.0 * p - 1.0 + support = 1.0 + return pdf, cdf, invcdf, support + +@register('triangular') +def triangular_kernel(): + pdf = lambda t: 1.0 - abs(t) + cdf = lambda t: t*t * (1/2 if t < 0.0 else -1/2) + t + 1/2 + invcdf = lambda p: sqrt(2.0*p) - 1.0 if p < 1/2 else 1.0 - sqrt(2.0 - 2.0*p) + support = 1.0 + return pdf, cdf, invcdf, support + +@register('parabolic', 'epanechnikov') +def parabolic_kernel(): + pdf = lambda t: 3/4 * (1.0 - t * t) + cdf = lambda t: sumprod((-1/4, 3/4, 1/2), (t**3, t, 1.0)) + invcdf = lambda p: 2.0 * cos((acos(2.0*p - 1.0) + pi) / 3.0) + support = 1.0 + return pdf, cdf, invcdf, support + +def _newton_raphson(f_inv_estimate, f, f_prime, tolerance=1e-12): + def f_inv(y): + "Return x such that f(x) ≈ y within the specified tolerance." + x = f_inv_estimate(y) + while abs(diff := f(x) - y) > tolerance: + x -= diff / f_prime(x) + return x + return f_inv + +def _quartic_invcdf_estimate(p): + # A handrolled piecewise approximation. There is no magic here. + sign, p = (1.0, p) if p <= 1/2 else (-1.0, 1.0 - p) + if p < 0.0106: + return ((2.0 * p) ** 0.3838 - 1.0) * sign + x = (2.0 * p) ** 0.4258865685331 - 1.0 + if p < 0.499: + x += 0.026818732 * sin(7.101753784 * p + 2.73230839482953) + return x * sign + +@register('quartic', 'biweight') +def quartic_kernel(): + pdf = lambda t: 15/16 * (1.0 - t * t) ** 2 + cdf = lambda t: sumprod((3/16, -5/8, 15/16, 1/2), + (t**5, t**3, t, 1.0)) + invcdf = _newton_raphson(_quartic_invcdf_estimate, f=cdf, f_prime=pdf) + support = 1.0 + return pdf, cdf, invcdf, support + +def _triweight_invcdf_estimate(p): + # A handrolled piecewise approximation. There is no magic here. + sign, p = (1.0, p) if p <= 1/2 else (-1.0, 1.0 - p) + x = (2.0 * p) ** 0.3400218741872791 - 1.0 + if 0.00001 < p < 0.499: + x -= 0.033 * sin(1.07 * tau * (p - 0.035)) + return x * sign + +@register('triweight') +def triweight_kernel(): + pdf = lambda t: 35/32 * (1.0 - t * t) ** 3 + cdf = lambda t: sumprod((-5/32, 21/32, -35/32, 35/32, 1/2), + (t**7, t**5, t**3, t, 1.0)) + invcdf = _newton_raphson(_triweight_invcdf_estimate, f=cdf, f_prime=pdf) + support = 1.0 + return pdf, cdf, invcdf, support + +@register('cosine') +def cosine_kernel(): + c1 = pi / 4 + c2 = pi / 2 + pdf = lambda t: c1 * cos(c2 * t) + cdf = lambda t: 1/2 * sin(c2 * t) + 1/2 + invcdf = lambda p: 2.0 * asin(2.0 * p - 1.0) / pi + support = 1.0 + return pdf, cdf, invcdf, support + +del register, normal_kernel, logistic_kernel, sigmoid_kernel +del rectangular_kernel, triangular_kernel, parabolic_kernel +del quartic_kernel, triweight_kernel, cosine_kernel + + +def kde(data, h, kernel='normal', *, cumulative=False): + """Kernel Density Estimation: Create a continuous probability density + function or cumulative distribution function from discrete samples. + + The basic idea is to smooth the data using a kernel function + to help draw inferences about a population from a sample. + + The degree of smoothing is controlled by the scaling parameter h + which is called the bandwidth. Smaller values emphasize local + features while larger values give smoother results. + + The kernel determines the relative weights of the sample data + points. Generally, the choice of kernel shape does not matter + as much as the more influential bandwidth smoothing parameter. + + Kernels that give some weight to every sample point: + + normal (gauss) + logistic + sigmoid + + Kernels that only give weight to sample points within + the bandwidth: + + rectangular (uniform) + triangular + parabolic (epanechnikov) + quartic (biweight) + triweight + cosine + + If *cumulative* is true, will return a cumulative distribution function. + + A StatisticsError will be raised if the data sequence is empty. + + Example + ------- + + Given a sample of six data points, construct a continuous + function that estimates the underlying probability density: + + >>> sample = [-2.1, -1.3, -0.4, 1.9, 5.1, 6.2] + >>> f_hat = kde(sample, h=1.5) + + Compute the area under the curve: + + >>> area = sum(f_hat(x) for x in range(-20, 20)) + >>> round(area, 4) + 1.0 + + Plot the estimated probability density function at + evenly spaced points from -6 to 10: + + >>> for x in range(-6, 11): + ... density = f_hat(x) + ... plot = ' ' * int(density * 400) + 'x' + ... print(f'{x:2}: {density:.3f} {plot}') + ... + -6: 0.002 x + -5: 0.009 x + -4: 0.031 x + -3: 0.070 x + -2: 0.111 x + -1: 0.125 x + 0: 0.110 x + 1: 0.086 x + 2: 0.068 x + 3: 0.059 x + 4: 0.066 x + 5: 0.082 x + 6: 0.082 x + 7: 0.058 x + 8: 0.028 x + 9: 0.009 x + 10: 0.002 x + + Estimate P(4.5 < X <= 7.5), the probability that a new sample value + will be between 4.5 and 7.5: + + >>> cdf = kde(sample, h=1.5, cumulative=True) + >>> round(cdf(7.5) - cdf(4.5), 2) + 0.22 + + References + ---------- + + Kernel density estimation and its application: + https://www.itm-conferences.org/articles/itmconf/pdf/2018/08/itmconf_sam2018_00037.pdf + + Kernel functions in common use: + https://en.wikipedia.org/wiki/Kernel_(statistics)#kernel_functions_in_common_use + + Interactive graphical demonstration and exploration: + https://demonstrations.wolfram.com/KernelDensityEstimation/ + + Kernel estimation of cumulative distribution function of a random variable with bounded support + https://www.econstor.eu/bitstream/10419/207829/1/10.21307_stattrans-2016-037.pdf + + """ + + n = len(data) + if not n: + raise StatisticsError('Empty data sequence') + + if not isinstance(data[0], (int, float)): + raise TypeError('Data sequence must contain ints or floats') + + if h <= 0.0: + raise StatisticsError(f'Bandwidth h must be positive, not {h=!r}') + + kernel_spec = _kernel_specs.get(kernel) + if kernel_spec is None: + raise StatisticsError(f'Unknown kernel name: {kernel!r}') + K = kernel_spec['pdf'] + W = kernel_spec['cdf'] + support = kernel_spec['support'] + + if support is None: + + def pdf(x): + return sum(K((x - x_i) / h) for x_i in data) / (len(data) * h) + + def cdf(x): + return sum(W((x - x_i) / h) for x_i in data) / len(data) + + else: + + sample = sorted(data) + bandwidth = h * support + + def pdf(x): + nonlocal n, sample + if len(data) != n: + sample = sorted(data) + n = len(data) + i = bisect_left(sample, x - bandwidth) + j = bisect_right(sample, x + bandwidth) + supported = sample[i : j] + return sum(K((x - x_i) / h) for x_i in supported) / (n * h) + + def cdf(x): + nonlocal n, sample + if len(data) != n: + sample = sorted(data) + n = len(data) + i = bisect_left(sample, x - bandwidth) + j = bisect_right(sample, x + bandwidth) + supported = sample[i : j] + return sum((W((x - x_i) / h) for x_i in supported), i) / n + + if cumulative: + cdf.__doc__ = f'CDF estimate with {h=!r} and {kernel=!r}' + return cdf + + else: + pdf.__doc__ = f'PDF estimate with {h=!r} and {kernel=!r}' + return pdf + + +def kde_random(data, h, kernel='normal', *, seed=None): + """Return a function that makes a random selection from the estimated + probability density function created by kde(data, h, kernel). + + Providing a *seed* allows reproducible selections within a single + thread. The seed may be an integer, float, str, or bytes. + + A StatisticsError will be raised if the *data* sequence is empty. + + Example: + + >>> data = [-2.1, -1.3, -0.4, 1.9, 5.1, 6.2] + >>> rand = kde_random(data, h=1.5, seed=8675309) + >>> new_selections = [rand() for i in range(10)] + >>> [round(x, 1) for x in new_selections] + [0.7, 6.2, 1.2, 6.9, 7.0, 1.8, 2.5, -0.5, -1.8, 5.6] + + """ + n = len(data) + if not n: + raise StatisticsError('Empty data sequence') + + if not isinstance(data[0], (int, float)): + raise TypeError('Data sequence must contain ints or floats') + + if h <= 0.0: + raise StatisticsError(f'Bandwidth h must be positive, not {h=!r}') + + kernel_spec = _kernel_specs.get(kernel) + if kernel_spec is None: + raise StatisticsError(f'Unknown kernel name: {kernel!r}') + invcdf = kernel_spec['invcdf'] + + prng = _random.Random(seed) + random = prng.random + choice = prng.choice + + def rand(): + return choice(data) + h * invcdf(random()) + + rand.__doc__ = f'Random KDE selection with {h=!r} and {kernel=!r}' + + return rand + + +## Quantiles ############################################################### + +# There is no one perfect way to compute quantiles. Here we offer +# two methods that serve common needs. Most other packages +# surveyed offered at least one or both of these two, making them +# "standard" in the sense of "widely-adopted and reproducible". +# They are also easy to explain, easy to compute manually, and have +# straight-forward interpretations that aren't surprising. + +# The default method is known as "R6", "PERCENTILE.EXC", or "expected +# value of rank order statistics". The alternative method is known as +# "R7", "PERCENTILE.INC", or "mode of rank order statistics". + +# For sample data where there is a positive probability for values +# beyond the range of the data, the R6 exclusive method is a +# reasonable choice. Consider a random sample of nine values from a +# population with a uniform distribution from 0.0 to 1.0. The +# distribution of the third ranked sample point is described by +# betavariate(alpha=3, beta=7) which has mode=0.250, median=0.286, and +# mean=0.300. Only the latter (which corresponds with R6) gives the +# desired cut point with 30% of the population falling below that +# value, making it comparable to a result from an inv_cdf() function. +# The R6 exclusive method is also idempotent. + +# For describing population data where the end points are known to +# be included in the data, the R7 inclusive method is a reasonable +# choice. Instead of the mean, it uses the mode of the beta +# distribution for the interior points. Per Hyndman & Fan, "One nice +# property is that the vertices of Q7(p) divide the range into n - 1 +# intervals, and exactly 100p% of the intervals lie to the left of +# Q7(p) and 100(1 - p)% of the intervals lie to the right of Q7(p)." + +# If needed, other methods could be added. However, for now, the +# position is that fewer options make for easier choices and that +# external packages can be used for anything more advanced. + +def quantiles(data, *, n=4, method='exclusive'): + """Divide *data* into *n* continuous intervals with equal probability. + + Returns a list of (n - 1) cut points separating the intervals. + + Set *n* to 4 for quartiles (the default). Set *n* to 10 for deciles. + Set *n* to 100 for percentiles which gives the 99 cuts points that + separate *data* in to 100 equal sized groups. + + The *data* can be any iterable containing sample. + The cut points are linearly interpolated between data points. + + If *method* is set to *inclusive*, *data* is treated as population + data. The minimum value is treated as the 0th percentile and the + maximum value is treated as the 100th percentile. + + """ + if n < 1: + raise StatisticsError('n must be at least 1') + + data = sorted(data) + + ld = len(data) + if ld < 2: + if ld == 1: + return data * (n - 1) + raise StatisticsError('must have at least one data point') + + if method == 'inclusive': + m = ld - 1 + result = [] + for i in range(1, n): + j, delta = divmod(i * m, n) + interpolated = (data[j] * (n - delta) + data[j + 1] * delta) / n + result.append(interpolated) + return result + + if method == 'exclusive': + m = ld + 1 + result = [] + for i in range(1, n): + j = i * m // n # rescale i to m/n + j = 1 if j < 1 else ld-1 if j > ld-1 else j # clamp to 1 .. ld-1 + delta = i*m - j*n # exact integer math + interpolated = (data[j - 1] * (n - delta) + data[j] * delta) / n + result.append(interpolated) + return result + + raise ValueError(f'Unknown method: {method!r}') + + +## Normal Distribution ##################################################### + +class NormalDist: + "Normal distribution of a random variable" + # https://en.wikipedia.org/wiki/Normal_distribution + # https://en.wikipedia.org/wiki/Variance#Properties + + __slots__ = { + '_mu': 'Arithmetic mean of a normal distribution', + '_sigma': 'Standard deviation of a normal distribution', + } + + def __init__(self, mu=0.0, sigma=1.0): + "NormalDist where mu is the mean and sigma is the standard deviation." + if sigma < 0.0: + raise StatisticsError('sigma must be non-negative') + self._mu = float(mu) + self._sigma = float(sigma) + + @classmethod + def from_samples(cls, data): + "Make a normal distribution instance from sample data." + return cls(*_mean_stdev(data)) + + def samples(self, n, *, seed=None): + "Generate *n* samples for a given mean and standard deviation." + rnd = random.random if seed is None else random.Random(seed).random + inv_cdf = _normal_dist_inv_cdf + mu = self._mu + sigma = self._sigma + return [inv_cdf(rnd(), mu, sigma) for _ in repeat(None, n)] + + def pdf(self, x): + "Probability density function. P(x <= X < x+dx) / dx" + variance = self._sigma * self._sigma + if not variance: + raise StatisticsError('pdf() not defined when sigma is zero') + diff = x - self._mu + return exp(diff * diff / (-2.0 * variance)) / sqrt(tau * variance) + + def cdf(self, x): + "Cumulative distribution function. P(X <= x)" + if not self._sigma: + raise StatisticsError('cdf() not defined when sigma is zero') + return 0.5 * erfc((self._mu - x) / (self._sigma * _SQRT2)) + + def inv_cdf(self, p): + """Inverse cumulative distribution function. x : P(X <= x) = p + + Finds the value of the random variable such that the probability of + the variable being less than or equal to that value equals the given + probability. + + This function is also called the percent point function or quantile + function. + """ + if p <= 0.0 or p >= 1.0: + raise StatisticsError('p must be in the range 0.0 < p < 1.0') + return _normal_dist_inv_cdf(p, self._mu, self._sigma) + + def quantiles(self, n=4): + """Divide into *n* continuous intervals with equal probability. + + Returns a list of (n - 1) cut points separating the intervals. + + Set *n* to 4 for quartiles (the default). Set *n* to 10 for deciles. + Set *n* to 100 for percentiles which gives the 99 cuts points that + separate the normal distribution in to 100 equal sized groups. + """ + return [self.inv_cdf(i / n) for i in range(1, n)] + + def overlap(self, other): + """Compute the overlapping coefficient (OVL) between two normal distributions. + + Measures the agreement between two normal probability distributions. + Returns a value between 0.0 and 1.0 giving the overlapping area in + the two underlying probability density functions. + + >>> N1 = NormalDist(2.4, 1.6) + >>> N2 = NormalDist(3.2, 2.0) + >>> N1.overlap(N2) + 0.8035050657330205 + """ + # See: "The overlapping coefficient as a measure of agreement between + # probability distributions and point estimation of the overlap of two + # normal densities" -- Henry F. Inman and Edwin L. Bradley Jr + # http://dx.doi.org/10.1080/03610928908830127 + if not isinstance(other, NormalDist): + raise TypeError('Expected another NormalDist instance') + X, Y = self, other + if (Y._sigma, Y._mu) < (X._sigma, X._mu): # sort to assure commutativity + X, Y = Y, X + X_var, Y_var = X.variance, Y.variance + if not X_var or not Y_var: + raise StatisticsError('overlap() not defined when sigma is zero') + dv = Y_var - X_var + dm = fabs(Y._mu - X._mu) + if not dv: + return erfc(dm / (2.0 * X._sigma * _SQRT2)) + a = X._mu * Y_var - Y._mu * X_var + b = X._sigma * Y._sigma * sqrt(dm * dm + dv * log(Y_var / X_var)) + x1 = (a + b) / dv + x2 = (a - b) / dv + return 1.0 - (fabs(Y.cdf(x1) - X.cdf(x1)) + fabs(Y.cdf(x2) - X.cdf(x2))) + + def zscore(self, x): + """Compute the Standard Score. (x - mean) / stdev + + Describes *x* in terms of the number of standard deviations + above or below the mean of the normal distribution. + """ + # https://www.statisticshowto.com/probability-and-statistics/z-score/ + if not self._sigma: + raise StatisticsError('zscore() not defined when sigma is zero') + return (x - self._mu) / self._sigma + + @property + def mean(self): + "Arithmetic mean of the normal distribution." + return self._mu + + @property + def median(self): + "Return the median of the normal distribution" + return self._mu + + @property + def mode(self): + """Return the mode of the normal distribution + + The mode is the value x where which the probability density + function (pdf) takes its maximum value. + """ + return self._mu + + @property + def stdev(self): + "Standard deviation of the normal distribution." + return self._sigma + + @property + def variance(self): + "Square of the standard deviation." + return self._sigma * self._sigma + + def __add__(x1, x2): + """Add a constant or another NormalDist instance. + + If *other* is a constant, translate mu by the constant, + leaving sigma unchanged. + + If *other* is a NormalDist, add both the means and the variances. + Mathematically, this works only if the two distributions are + independent or if they are jointly normally distributed. + """ + if isinstance(x2, NormalDist): + return NormalDist(x1._mu + x2._mu, hypot(x1._sigma, x2._sigma)) + return NormalDist(x1._mu + x2, x1._sigma) + + def __sub__(x1, x2): + """Subtract a constant or another NormalDist instance. + + If *other* is a constant, translate by the constant mu, + leaving sigma unchanged. + + If *other* is a NormalDist, subtract the means and add the variances. + Mathematically, this works only if the two distributions are + independent or if they are jointly normally distributed. + """ + if isinstance(x2, NormalDist): + return NormalDist(x1._mu - x2._mu, hypot(x1._sigma, x2._sigma)) + return NormalDist(x1._mu - x2, x1._sigma) + + def __mul__(x1, x2): + """Multiply both mu and sigma by a constant. + + Used for rescaling, perhaps to change measurement units. + Sigma is scaled with the absolute value of the constant. + """ + return NormalDist(x1._mu * x2, x1._sigma * fabs(x2)) + + def __truediv__(x1, x2): + """Divide both mu and sigma by a constant. + + Used for rescaling, perhaps to change measurement units. + Sigma is scaled with the absolute value of the constant. + """ + return NormalDist(x1._mu / x2, x1._sigma / fabs(x2)) + + def __pos__(x1): + "Return a copy of the instance." + return NormalDist(x1._mu, x1._sigma) + + def __neg__(x1): + "Negates mu while keeping sigma the same." + return NormalDist(-x1._mu, x1._sigma) + + __radd__ = __add__ + + def __rsub__(x1, x2): + "Subtract a NormalDist from a constant or another NormalDist." + return -(x1 - x2) + + __rmul__ = __mul__ + + def __eq__(x1, x2): + "Two NormalDist objects are equal if their mu and sigma are both equal." + if not isinstance(x2, NormalDist): + return NotImplemented + return x1._mu == x2._mu and x1._sigma == x2._sigma + + def __hash__(self): + "NormalDist objects hash equal if their mu and sigma are both equal." + return hash((self._mu, self._sigma)) + + def __repr__(self): + return f'{type(self).__name__}(mu={self._mu!r}, sigma={self._sigma!r})' + + def __getstate__(self): + return self._mu, self._sigma + + def __setstate__(self, state): + self._mu, self._sigma = state + + +## Private utilities ####################################################### + +def _sum(data): + """_sum(data) -> (type, sum, count) + + Return a high-precision sum of the given numeric data as a fraction, + together with the type to be converted to and the count of items. + + Examples + -------- + + >>> _sum([3, 2.25, 4.5, -0.5, 0.25]) + (, Fraction(19, 2), 5) + + Some sources of round-off error will be avoided: + + # Built-in sum returns zero. + >>> _sum([1e50, 1, -1e50] * 1000) + (, Fraction(1000, 1), 3000) + + Fractions and Decimals are also supported: + + >>> from fractions import Fraction as F + >>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)]) + (, Fraction(63, 20), 4) + + >>> from decimal import Decimal as D + >>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")] + >>> _sum(data) + (, Fraction(6963, 10000), 4) + + Mixed types are currently treated as an error, except that int is + allowed. + + """ + count = 0 + types = set() + types_add = types.add + partials = {} + partials_get = partials.get + + for typ, values in groupby(data, type): + types_add(typ) + for n, d in map(_exact_ratio, values): + count += 1 + partials[d] = partials_get(d, 0) + n + + if None in partials: + # The sum will be a NAN or INF. We can ignore all the finite + # partials, and just look at this special one. + total = partials[None] + assert not _isfinite(total) + else: + # Sum all the partial sums using builtin sum. + total = sum(Fraction(n, d) for d, n in partials.items()) + + T = reduce(_coerce, types, int) # or raise TypeError + return (T, total, count) + + +def _ss(data, c=None): + """Return the exact mean and sum of square deviations of sequence data. + + Calculations are done in a single pass, allowing the input to be an iterator. + + If given *c* is used the mean; otherwise, it is calculated from the data. + Use the *c* argument with care, as it can lead to garbage results. + + """ + if c is not None: + T, ssd, count = _sum((d := x - c) * d for x in data) + return (T, ssd, c, count) + + count = 0 + types = set() + types_add = types.add + sx_partials = defaultdict(int) + sxx_partials = defaultdict(int) + + for typ, values in groupby(data, type): + types_add(typ) + for n, d in map(_exact_ratio, values): + count += 1 + sx_partials[d] += n + sxx_partials[d] += n * n + + if not count: + ssd = c = Fraction(0) + + elif None in sx_partials: + # The sum will be a NAN or INF. We can ignore all the finite + # partials, and just look at this special one. + ssd = c = sx_partials[None] + assert not _isfinite(ssd) + + else: + sx = sum(Fraction(n, d) for d, n in sx_partials.items()) + sxx = sum(Fraction(n, d*d) for d, n in sxx_partials.items()) + # This formula has poor numeric properties for floats, + # but with fractions it is exact. + ssd = (count * sxx - sx * sx) / count + c = sx / count + + T = reduce(_coerce, types, int) # or raise TypeError + return (T, ssd, c, count) + + +def _isfinite(x): + try: + return x.is_finite() # Likely a Decimal. + except AttributeError: + return math.isfinite(x) # Coerces to float first. + + +def _coerce(T, S): + """Coerce types T and S to a common type, or raise TypeError. + + Coercion rules are currently an implementation detail. See the CoerceTest + test class in test_statistics for details. + + """ + # See http://bugs.python.org/issue24068. + assert T is not bool, "initial type T is bool" + # If the types are the same, no need to coerce anything. Put this + # first, so that the usual case (no coercion needed) happens as soon + # as possible. + if T is S: return T + # Mixed int & other coerce to the other type. + if S is int or S is bool: return T + if T is int: return S + # If one is a (strict) subclass of the other, coerce to the subclass. + if issubclass(S, T): return S + if issubclass(T, S): return T + # Ints coerce to the other type. + if issubclass(T, int): return S + if issubclass(S, int): return T + # Mixed fraction & float coerces to float (or float subclass). + if issubclass(T, Fraction) and issubclass(S, float): + return S + if issubclass(T, float) and issubclass(S, Fraction): + return T + # Any other combination is disallowed. + msg = "don't know how to coerce %s and %s" + raise TypeError(msg % (T.__name__, S.__name__)) + + +def _exact_ratio(x): + """Return Real number x to exact (numerator, denominator) pair. + + >>> _exact_ratio(0.25) + (1, 4) + + x is expected to be an int, Fraction, Decimal or float. + + """ + try: + return x.as_integer_ratio() + except AttributeError: + pass + except (OverflowError, ValueError): + # float NAN or INF. + assert not _isfinite(x) + return (x, None) + + try: + # x may be an Integral ABC. + return (x.numerator, x.denominator) + except AttributeError: + msg = f"can't convert type '{type(x).__name__}' to numerator/denominator" + raise TypeError(msg) + + +def _convert(value, T): + """Convert value to given numeric type T.""" + if type(value) is T: + # This covers the cases where T is Fraction, or where value is + # a NAN or INF (Decimal or float). + return value + + if issubclass(T, int) and value.denominator != 1: + T = float + + try: + # FIXME: what do we do if this overflows? + return T(value) + except TypeError: + if issubclass(T, Decimal): + return T(value.numerator) / T(value.denominator) + else: + raise + + +def _fail_neg(values, errmsg='negative value'): + """Iterate over values, failing if any are less than zero.""" + for x in values: + if x < 0: + raise StatisticsError(errmsg) + yield x + + +def _rank(data, /, *, key=None, reverse=False, ties='average', start=1) -> list[float]: + """Rank order a dataset. The lowest value has rank 1. + + Ties are averaged so that equal values receive the same rank: + + >>> data = [31, 56, 31, 25, 75, 18] + >>> _rank(data) + [3.5, 5.0, 3.5, 2.0, 6.0, 1.0] + + The operation is idempotent: + + >>> _rank([3.5, 5.0, 3.5, 2.0, 6.0, 1.0]) + [3.5, 5.0, 3.5, 2.0, 6.0, 1.0] + + It is possible to rank the data in reverse order so that the + highest value has rank 1. Also, a key-function can extract + the field to be ranked: + + >>> goals = [('eagles', 45), ('bears', 48), ('lions', 44)] + >>> _rank(goals, key=itemgetter(1), reverse=True) + [2.0, 1.0, 3.0] + + Ranks are conventionally numbered starting from one; however, + setting *start* to zero allows the ranks to be used as array indices: + + >>> prize = ['Gold', 'Silver', 'Bronze', 'Certificate'] + >>> scores = [8.1, 7.3, 9.4, 8.3] + >>> [prize[int(i)] for i in _rank(scores, start=0, reverse=True)] + ['Bronze', 'Certificate', 'Gold', 'Silver'] + + """ + # If this function becomes public at some point, more thought + # needs to be given to the signature. A list of ints is + # plausible when ties is "min" or "max". When ties is "average", + # either list[float] or list[Fraction] is plausible. + + # Default handling of ties matches scipy.stats.mstats.spearmanr. + if ties != 'average': + raise ValueError(f'Unknown tie resolution method: {ties!r}') + if key is not None: + data = map(key, data) + val_pos = sorted(zip(data, count()), reverse=reverse) + i = start - 1 + result = [0] * len(val_pos) + for _, g in groupby(val_pos, key=itemgetter(0)): + group = list(g) + size = len(group) + rank = i + (size + 1) / 2 + for value, orig_pos in group: + result[orig_pos] = rank + i += size + return result + + +def _integer_sqrt_of_frac_rto(n: int, m: int) -> int: + """Square root of n/m, rounded to the nearest integer using round-to-odd.""" + # Reference: https://www.lri.fr/~melquion/doc/05-imacs17_1-expose.pdf + a = math.isqrt(n // m) + return a | (a*a*m != n) + + +# For 53 bit precision floats, the bit width used in +# _float_sqrt_of_frac() is 109. +_sqrt_bit_width: int = 2 * sys.float_info.mant_dig + 3 + + +def _float_sqrt_of_frac(n: int, m: int) -> float: + """Square root of n/m as a float, correctly rounded.""" + # See principle and proof sketch at: https://bugs.python.org/msg407078 + q = (n.bit_length() - m.bit_length() - _sqrt_bit_width) // 2 + if q >= 0: + numerator = _integer_sqrt_of_frac_rto(n, m << 2 * q) << q + denominator = 1 + else: + numerator = _integer_sqrt_of_frac_rto(n << -2 * q, m) + denominator = 1 << -q + return numerator / denominator # Convert to float + + +def _decimal_sqrt_of_frac(n: int, m: int) -> Decimal: + """Square root of n/m as a Decimal, correctly rounded.""" + # Premise: For decimal, computing (n/m).sqrt() can be off + # by 1 ulp from the correctly rounded result. + # Method: Check the result, moving up or down a step if needed. + if n <= 0: + if not n: + return Decimal('0.0') + n, m = -n, -m + + root = (Decimal(n) / Decimal(m)).sqrt() + nr, dr = root.as_integer_ratio() + + plus = root.next_plus() + np, dp = plus.as_integer_ratio() + # test: n / m > ((root + plus) / 2) ** 2 + if 4 * n * (dr*dp)**2 > m * (dr*np + dp*nr)**2: + return plus + + minus = root.next_minus() + nm, dm = minus.as_integer_ratio() + # test: n / m < ((root + minus) / 2) ** 2 + if 4 * n * (dr*dm)**2 < m * (dr*nm + dm*nr)**2: + return minus + + return root + + +def _mean_stdev(data): + """In one pass, compute the mean and sample standard deviation as floats.""" + T, ss, xbar, n = _ss(data) + if n < 2: + raise StatisticsError('stdev requires at least two data points') + mss = ss / (n - 1) + try: + return float(xbar), _float_sqrt_of_frac(mss.numerator, mss.denominator) + except AttributeError: + # Handle Nans and Infs gracefully + return float(xbar), float(xbar) / float(ss) + + +def _sqrtprod(x: float, y: float) -> float: + "Return sqrt(x * y) computed with improved accuracy and without overflow/underflow." + + h = sqrt(x * y) + + if not isfinite(h): + if isinf(h) and not isinf(x) and not isinf(y): + # Finite inputs overflowed, so scale down, and recompute. + scale = 2.0 ** -512 # sqrt(1 / sys.float_info.max) + return _sqrtprod(scale * x, scale * y) / scale + return h + + if not h: + if x and y: + # Non-zero inputs underflowed, so scale up, and recompute. + # Scale: 1 / sqrt(sys.float_info.min * sys.float_info.epsilon) + scale = 2.0 ** 537 + return _sqrtprod(scale * x, scale * y) / scale + return h + + # Improve accuracy with a differential correction. + # https://www.wolframalpha.com/input/?i=Maclaurin+series+sqrt%28h**2+%2B+x%29+at+x%3D0 + d = sumprod((x, h), (y, -h)) + return h + d / (2.0 * h) + + +def _normal_dist_inv_cdf(p, mu, sigma): + # There is no closed-form solution to the inverse CDF for the normal + # distribution, so we use a rational approximation instead: + # Wichura, M.J. (1988). "Algorithm AS241: The Percentage Points of the + # Normal Distribution". Applied Statistics. Blackwell Publishing. 37 + # (3): 477–484. doi:10.2307/2347330. JSTOR 2347330. + q = p - 0.5 + + if fabs(q) <= 0.425: + r = 0.180625 - q * q + # Hash sum: 55.88319_28806_14901_4439 + num = (((((((2.50908_09287_30122_6727e+3 * r + + 3.34305_75583_58812_8105e+4) * r + + 6.72657_70927_00870_0853e+4) * r + + 4.59219_53931_54987_1457e+4) * r + + 1.37316_93765_50946_1125e+4) * r + + 1.97159_09503_06551_4427e+3) * r + + 1.33141_66789_17843_7745e+2) * r + + 3.38713_28727_96366_6080e+0) * q + den = (((((((5.22649_52788_52854_5610e+3 * r + + 2.87290_85735_72194_2674e+4) * r + + 3.93078_95800_09271_0610e+4) * r + + 2.12137_94301_58659_5867e+4) * r + + 5.39419_60214_24751_1077e+3) * r + + 6.87187_00749_20579_0830e+2) * r + + 4.23133_30701_60091_1252e+1) * r + + 1.0) + x = num / den + return mu + (x * sigma) + + r = p if q <= 0.0 else 1.0 - p + r = sqrt(-log(r)) + if r <= 5.0: + r = r - 1.6 + # Hash sum: 49.33206_50330_16102_89036 + num = (((((((7.74545_01427_83414_07640e-4 * r + + 2.27238_44989_26918_45833e-2) * r + + 2.41780_72517_74506_11770e-1) * r + + 1.27045_82524_52368_38258e+0) * r + + 3.64784_83247_63204_60504e+0) * r + + 5.76949_72214_60691_40550e+0) * r + + 4.63033_78461_56545_29590e+0) * r + + 1.42343_71107_49683_57734e+0) + den = (((((((1.05075_00716_44416_84324e-9 * r + + 5.47593_80849_95344_94600e-4) * r + + 1.51986_66563_61645_71966e-2) * r + + 1.48103_97642_74800_74590e-1) * r + + 6.89767_33498_51000_04550e-1) * r + + 1.67638_48301_83803_84940e+0) * r + + 2.05319_16266_37758_82187e+0) * r + + 1.0) + else: + r = r - 5.0 + # Hash sum: 47.52583_31754_92896_71629 + num = (((((((2.01033_43992_92288_13265e-7 * r + + 2.71155_55687_43487_57815e-5) * r + + 1.24266_09473_88078_43860e-3) * r + + 2.65321_89526_57612_30930e-2) * r + + 2.96560_57182_85048_91230e-1) * r + + 1.78482_65399_17291_33580e+0) * r + + 5.46378_49111_64114_36990e+0) * r + + 6.65790_46435_01103_77720e+0) + den = (((((((2.04426_31033_89939_78564e-15 * r + + 1.42151_17583_16445_88870e-7) * r + + 1.84631_83175_10054_68180e-5) * r + + 7.86869_13114_56132_59100e-4) * r + + 1.48753_61290_85061_48525e-2) * r + + 1.36929_88092_27358_05310e-1) * r + + 5.99832_20655_58879_37690e-1) * r + + 1.0) + + x = num / den + if q < 0.0: + x = -x + + return mu + (x * sigma) + + +# If available, use C implementation +try: + from _statistics import _normal_dist_inv_cdf +except ImportError: + pass diff --git a/Python314_4_x86_Template/Lib/string/__init__.py b/Python314_4_x86_Template/Lib/string/__init__.py new file mode 100644 index 00000000..eab5067c --- /dev/null +++ b/Python314_4_x86_Template/Lib/string/__init__.py @@ -0,0 +1,325 @@ +"""A collection of string constants. + +Public module variables: + +whitespace -- a string containing all ASCII whitespace +ascii_lowercase -- a string containing all ASCII lowercase letters +ascii_uppercase -- a string containing all ASCII uppercase letters +ascii_letters -- a string containing all ASCII letters +digits -- a string containing all ASCII decimal digits +hexdigits -- a string containing all ASCII hexadecimal digits +octdigits -- a string containing all ASCII octal digits +punctuation -- a string containing all ASCII punctuation characters +printable -- a string containing all ASCII characters considered printable + +""" + +__all__ = ["ascii_letters", "ascii_lowercase", "ascii_uppercase", "capwords", + "digits", "hexdigits", "octdigits", "printable", "punctuation", + "whitespace", "Formatter", "Template"] + +import _string + +# Some strings for ctype-style character classification +whitespace = ' \t\n\r\v\f' +ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz' +ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' +ascii_letters = ascii_lowercase + ascii_uppercase +digits = '0123456789' +hexdigits = digits + 'abcdef' + 'ABCDEF' +octdigits = '01234567' +punctuation = r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~""" +printable = digits + ascii_letters + punctuation + whitespace + +# Functions which aren't available as string methods. + +# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def". +def capwords(s, sep=None): + """capwords(s [,sep]) -> string + + Split the argument into words using split, capitalize each + word using capitalize, and join the capitalized words using + join. If the optional second argument sep is absent or None, + runs of whitespace characters are replaced by a single space + and leading and trailing whitespace are removed, otherwise + sep is used to split and join the words. + + """ + return (sep or ' ').join(map(str.capitalize, s.split(sep))) + + +#################################################################### +_sentinel_dict = {} + + +class _TemplatePattern: + # This descriptor is overwritten in ``Template._compile_pattern()``. + def __get__(self, instance, cls=None): + if cls is None: + return self + return cls._compile_pattern() +_TemplatePattern = _TemplatePattern() + + +class Template: + """A string class for supporting $-substitutions.""" + + delimiter = '$' + # r'[a-z]' matches to non-ASCII letters when used with IGNORECASE, but + # without the ASCII flag. We can't add re.ASCII to flags because of + # backward compatibility. So we use the ?a local flag and [a-z] pattern. + # See https://bugs.python.org/issue31672 + idpattern = r'(?a:[_a-z][_a-z0-9]*)' + braceidpattern = None + flags = None # default: re.IGNORECASE + + pattern = _TemplatePattern # use a descriptor to compile the pattern + + def __init_subclass__(cls): + super().__init_subclass__() + cls._compile_pattern() + + @classmethod + def _compile_pattern(cls): + import re # deferred import, for performance + + pattern = cls.__dict__.get('pattern', _TemplatePattern) + if pattern is _TemplatePattern: + delim = re.escape(cls.delimiter) + id = cls.idpattern + bid = cls.braceidpattern or cls.idpattern + pattern = fr""" + {delim}(?: + (?P{delim}) | # Escape sequence of two delimiters + (?P{id}) | # delimiter and a Python identifier + {{(?P{bid})}} | # delimiter and a braced identifier + (?P) # Other ill-formed delimiter exprs + ) + """ + if cls.flags is None: + cls.flags = re.IGNORECASE + pat = cls.pattern = re.compile(pattern, cls.flags | re.VERBOSE) + return pat + + def __init__(self, template): + self.template = template + + # Search for $$, $identifier, ${identifier}, and any bare $'s + + def _invalid(self, mo): + i = mo.start('invalid') + lines = self.template[:i].splitlines(keepends=True) + if not lines: + colno = 1 + lineno = 1 + else: + colno = i - len(''.join(lines[:-1])) + lineno = len(lines) + raise ValueError('Invalid placeholder in string: line %d, col %d' % + (lineno, colno)) + + def substitute(self, mapping=_sentinel_dict, /, **kws): + if mapping is _sentinel_dict: + mapping = kws + elif kws: + from collections import ChainMap + mapping = ChainMap(kws, mapping) + # Helper function for .sub() + def convert(mo): + # Check the most common path first. + named = mo.group('named') or mo.group('braced') + if named is not None: + return str(mapping[named]) + if mo.group('escaped') is not None: + return self.delimiter + if mo.group('invalid') is not None: + self._invalid(mo) + raise ValueError('Unrecognized named group in pattern', + self.pattern) + return self.pattern.sub(convert, self.template) + + def safe_substitute(self, mapping=_sentinel_dict, /, **kws): + if mapping is _sentinel_dict: + mapping = kws + elif kws: + from collections import ChainMap + mapping = ChainMap(kws, mapping) + # Helper function for .sub() + def convert(mo): + named = mo.group('named') or mo.group('braced') + if named is not None: + try: + return str(mapping[named]) + except KeyError: + return mo.group() + if mo.group('escaped') is not None: + return self.delimiter + if mo.group('invalid') is not None: + return mo.group() + raise ValueError('Unrecognized named group in pattern', + self.pattern) + return self.pattern.sub(convert, self.template) + + def is_valid(self): + for mo in self.pattern.finditer(self.template): + if mo.group('invalid') is not None: + return False + if (mo.group('named') is None + and mo.group('braced') is None + and mo.group('escaped') is None): + # If all the groups are None, there must be + # another group we're not expecting + raise ValueError('Unrecognized named group in pattern', + self.pattern) + return True + + def get_identifiers(self): + ids = [] + for mo in self.pattern.finditer(self.template): + named = mo.group('named') or mo.group('braced') + if named is not None and named not in ids: + # add a named group only the first time it appears + ids.append(named) + elif (named is None + and mo.group('invalid') is None + and mo.group('escaped') is None): + # If all the groups are None, there must be + # another group we're not expecting + raise ValueError('Unrecognized named group in pattern', + self.pattern) + return ids + + +######################################################################## +# the Formatter class +# see PEP 3101 for details and purpose of this class + +# The hard parts are reused from the C implementation. They're exposed as "_" +# prefixed methods of str. + +# The overall parser is implemented in _string.formatter_parser. +# The field name parser is implemented in _string.formatter_field_name_split + +class Formatter: + def format(self, format_string, /, *args, **kwargs): + return self.vformat(format_string, args, kwargs) + + def vformat(self, format_string, args, kwargs): + used_args = set() + result, _ = self._vformat(format_string, args, kwargs, used_args, 2) + self.check_unused_args(used_args, args, kwargs) + return result + + def _vformat(self, format_string, args, kwargs, used_args, recursion_depth, + auto_arg_index=0): + if recursion_depth < 0: + raise ValueError('Max string recursion exceeded') + result = [] + for literal_text, field_name, format_spec, conversion in \ + self.parse(format_string): + + # output the literal text + if literal_text: + result.append(literal_text) + + # if there's a field, output it + if field_name is not None: + # this is some markup, find the object and do + # the formatting + + # handle arg indexing when empty field first parts are given. + field_first, _ = _string.formatter_field_name_split(field_name) + if field_first == '': + if auto_arg_index is False: + raise ValueError('cannot switch from manual field ' + 'specification to automatic field ' + 'numbering') + field_name = str(auto_arg_index) + field_name + auto_arg_index += 1 + elif isinstance(field_first, int): + if auto_arg_index: + raise ValueError('cannot switch from automatic field ' + 'numbering to manual field ' + 'specification') + # disable auto arg incrementing, if it gets + # used later on, then an exception will be raised + auto_arg_index = False + + # given the field_name, find the object it references + # and the argument it came from + obj, arg_used = self.get_field(field_name, args, kwargs) + used_args.add(arg_used) + + # do any conversion on the resulting object + obj = self.convert_field(obj, conversion) + + # expand the format spec, if needed + format_spec, auto_arg_index = self._vformat( + format_spec, args, kwargs, + used_args, recursion_depth-1, + auto_arg_index=auto_arg_index) + + # format the object and append to the result + result.append(self.format_field(obj, format_spec)) + + return ''.join(result), auto_arg_index + + + def get_value(self, key, args, kwargs): + if isinstance(key, int): + return args[key] + else: + return kwargs[key] + + + def check_unused_args(self, used_args, args, kwargs): + pass + + + def format_field(self, value, format_spec): + return format(value, format_spec) + + + def convert_field(self, value, conversion): + # do any conversion on the resulting object + if conversion is None: + return value + elif conversion == 's': + return str(value) + elif conversion == 'r': + return repr(value) + elif conversion == 'a': + return ascii(value) + raise ValueError("Unknown conversion specifier {0!s}".format(conversion)) + + + # returns an iterable that contains tuples of the form: + # (literal_text, field_name, format_spec, conversion) + # literal_text can be zero length + # field_name can be None, in which case there's no + # object to format and output + # if field_name is not None, it is looked up, formatted + # with format_spec and conversion and then used + def parse(self, format_string): + return _string.formatter_parser(format_string) + + + # given a field_name, find the object it references. + # field_name: the field being looked up, e.g. "0.name" + # or "lookup[3]" + # used_args: a set of which args have been used + # args, kwargs: as passed in to vformat + def get_field(self, field_name, args, kwargs): + first, rest = _string.formatter_field_name_split(field_name) + + obj = self.get_value(first, args, kwargs) + + # loop through the rest of the field_name, doing + # getattr or getitem as needed + for is_attr, i in rest: + if is_attr: + obj = getattr(obj, i) + else: + obj = obj[i] + + return obj, first diff --git a/Python314_4_x86_Template/Lib/string/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/string/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..83a7dc90 Binary files /dev/null and b/Python314_4_x86_Template/Lib/string/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/string/templatelib.py b/Python314_4_x86_Template/Lib/string/templatelib.py new file mode 100644 index 00000000..81648724 --- /dev/null +++ b/Python314_4_x86_Template/Lib/string/templatelib.py @@ -0,0 +1,33 @@ +"""Support for template string literals (t-strings).""" + +t = t"{0}" +Template = type(t) +Interpolation = type(t.interpolations[0]) +del t + +def convert(obj, /, conversion): + """Convert *obj* using formatted string literal semantics.""" + if conversion is None: + return obj + if conversion == 'r': + return repr(obj) + if conversion == 's': + return str(obj) + if conversion == 'a': + return ascii(obj) + raise ValueError(f'invalid conversion specifier: {conversion}') + +def _template_unpickle(*args): + import itertools + + if len(args) != 2: + raise ValueError('Template expects tuple of length 2 to unpickle') + + strings, interpolations = args + parts = [] + for string, interpolation in itertools.zip_longest(strings, interpolations): + if string is not None: + parts.append(string) + if interpolation is not None: + parts.append(interpolation) + return Template(*parts) diff --git a/Python313_13_x86_Template/Lib/stringprep.py b/Python314_4_x86_Template/Lib/stringprep.py similarity index 100% rename from Python313_13_x86_Template/Lib/stringprep.py rename to Python314_4_x86_Template/Lib/stringprep.py diff --git a/Python314_4_x86_Template/Lib/struct.py b/Python314_4_x86_Template/Lib/struct.py new file mode 100644 index 00000000..ff98e8c4 --- /dev/null +++ b/Python314_4_x86_Template/Lib/struct.py @@ -0,0 +1,15 @@ +__all__ = [ + # Functions + 'calcsize', 'pack', 'pack_into', 'unpack', 'unpack_from', + 'iter_unpack', + + # Classes + 'Struct', + + # Exceptions + 'error' + ] + +from _struct import * +from _struct import _clearcache # noqa: F401 +from _struct import __doc__ # noqa: F401 diff --git a/Python314_4_x86_Template/Lib/subprocess.py b/Python314_4_x86_Template/Lib/subprocess.py new file mode 100644 index 00000000..52b7b711 --- /dev/null +++ b/Python314_4_x86_Template/Lib/subprocess.py @@ -0,0 +1,2257 @@ +# subprocess - Subprocesses with accessible I/O streams +# +# For more information about this module, see PEP 324. +# +# Copyright (c) 2003-2005 by Peter Astrand +# +# Licensed to PSF under a Contributor Agreement. + +r"""Subprocesses with accessible I/O streams + +This module allows you to spawn processes, connect to their +input/output/error pipes, and obtain their return codes. + +For a complete description of this module see the Python documentation. + +Main API +======== +run(...): Runs a command, waits for it to complete, then returns a + CompletedProcess instance. +Popen(...): A class for flexibly executing a command in a new process + +Constants +--------- +DEVNULL: Special value that indicates that os.devnull should be used +PIPE: Special value that indicates a pipe should be created +STDOUT: Special value that indicates that stderr should go to stdout + + +Older API +========= +call(...): Runs a command, waits for it to complete, then returns + the return code. +check_call(...): Same as call() but raises CalledProcessError() + if return code is not 0 +check_output(...): Same as check_call() but returns the contents of + stdout instead of a return code +getoutput(...): Runs a command in the shell, waits for it to complete, + then returns the output +getstatusoutput(...): Runs a command in the shell, waits for it to complete, + then returns a (exitcode, output) tuple +""" + +import builtins +import errno +import io +import locale +import os +import time +import signal +import sys +import threading +import warnings +import contextlib +from time import monotonic as _time +import types + +try: + import fcntl +except ImportError: + fcntl = None + + +__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput", + "getoutput", "check_output", "run", "CalledProcessError", "DEVNULL", + "SubprocessError", "TimeoutExpired", "CompletedProcess"] + # NOTE: We intentionally exclude list2cmdline as it is + # considered an internal implementation detail. issue10838. + +# use presence of msvcrt to detect Windows-like platforms (see bpo-8110) +try: + import msvcrt +except ModuleNotFoundError: + _mswindows = False +else: + _mswindows = True + +# some platforms do not support subprocesses +_can_fork_exec = sys.platform not in {"emscripten", "wasi", "ios", "tvos", "watchos"} + +if _mswindows: + import _winapi + from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP, # noqa: F401 + STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, + STD_ERROR_HANDLE, SW_HIDE, + STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW, + STARTF_FORCEONFEEDBACK, STARTF_FORCEOFFFEEDBACK, + ABOVE_NORMAL_PRIORITY_CLASS, BELOW_NORMAL_PRIORITY_CLASS, + HIGH_PRIORITY_CLASS, IDLE_PRIORITY_CLASS, + NORMAL_PRIORITY_CLASS, REALTIME_PRIORITY_CLASS, + CREATE_NO_WINDOW, DETACHED_PROCESS, + CREATE_DEFAULT_ERROR_MODE, CREATE_BREAKAWAY_FROM_JOB) + + __all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP", + "STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE", + "STD_ERROR_HANDLE", "SW_HIDE", + "STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW", + "STARTF_FORCEONFEEDBACK", "STARTF_FORCEOFFFEEDBACK", + "STARTUPINFO", + "ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS", + "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS", + "NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS", + "CREATE_NO_WINDOW", "DETACHED_PROCESS", + "CREATE_DEFAULT_ERROR_MODE", "CREATE_BREAKAWAY_FROM_JOB"]) +else: + if _can_fork_exec: + from _posixsubprocess import fork_exec as _fork_exec + # used in methods that are called by __del__ + class _del_safe: + waitpid = os.waitpid + waitstatus_to_exitcode = os.waitstatus_to_exitcode + WIFSTOPPED = os.WIFSTOPPED + WSTOPSIG = os.WSTOPSIG + WNOHANG = os.WNOHANG + ECHILD = errno.ECHILD + else: + class _del_safe: + waitpid = None + waitstatus_to_exitcode = None + WIFSTOPPED = None + WSTOPSIG = None + WNOHANG = None + ECHILD = errno.ECHILD + + import select + import selectors + + +# Exception classes used by this module. +class SubprocessError(Exception): pass + + +class CalledProcessError(SubprocessError): + """Raised when run() is called with check=True and the process + returns a non-zero exit status. + + Attributes: + cmd, returncode, stdout, stderr, output + """ + def __init__(self, returncode, cmd, output=None, stderr=None): + self.returncode = returncode + self.cmd = cmd + self.output = output + self.stderr = stderr + + def __str__(self): + if self.returncode and self.returncode < 0: + try: + return "Command '%s' died with %r." % ( + self.cmd, signal.Signals(-self.returncode)) + except ValueError: + return "Command '%s' died with unknown signal %d." % ( + self.cmd, -self.returncode) + else: + return "Command '%s' returned non-zero exit status %d." % ( + self.cmd, self.returncode) + + @property + def stdout(self): + """Alias for output attribute, to match stderr""" + return self.output + + @stdout.setter + def stdout(self, value): + # There's no obvious reason to set this, but allow it anyway so + # .stdout is a transparent alias for .output + self.output = value + + +class TimeoutExpired(SubprocessError): + """This exception is raised when the timeout expires while waiting for a + child process. + + Attributes: + cmd, output, stdout, stderr, timeout + """ + def __init__(self, cmd, timeout, output=None, stderr=None): + self.cmd = cmd + self.timeout = timeout + self.output = output + self.stderr = stderr + + def __str__(self): + return ("Command '%s' timed out after %s seconds" % + (self.cmd, self.timeout)) + + @property + def stdout(self): + return self.output + + @stdout.setter + def stdout(self, value): + # There's no obvious reason to set this, but allow it anyway so + # .stdout is a transparent alias for .output + self.output = value + + +if _mswindows: + class STARTUPINFO: + def __init__(self, *, dwFlags=0, hStdInput=None, hStdOutput=None, + hStdError=None, wShowWindow=0, lpAttributeList=None): + self.dwFlags = dwFlags + self.hStdInput = hStdInput + self.hStdOutput = hStdOutput + self.hStdError = hStdError + self.wShowWindow = wShowWindow + self.lpAttributeList = lpAttributeList or {"handle_list": []} + + def copy(self): + attr_list = self.lpAttributeList.copy() + if 'handle_list' in attr_list: + attr_list['handle_list'] = list(attr_list['handle_list']) + + return STARTUPINFO(dwFlags=self.dwFlags, + hStdInput=self.hStdInput, + hStdOutput=self.hStdOutput, + hStdError=self.hStdError, + wShowWindow=self.wShowWindow, + lpAttributeList=attr_list) + + + class Handle(int): + closed = False + + def Close(self, CloseHandle=_winapi.CloseHandle): + if not self.closed: + self.closed = True + CloseHandle(self) + + def Detach(self): + if not self.closed: + self.closed = True + return int(self) + raise ValueError("already closed") + + def __repr__(self): + return "%s(%d)" % (self.__class__.__name__, int(self)) + + __del__ = Close +else: + # When select or poll has indicated that the file is writable, + # we can write up to _PIPE_BUF bytes without risk of blocking. + # POSIX defines PIPE_BUF as >= 512. + _PIPE_BUF = getattr(select, 'PIPE_BUF', 512) + + # poll/select have the advantage of not requiring any extra file + # descriptor, contrarily to epoll/kqueue (also, they require a single + # syscall). + if hasattr(selectors, 'PollSelector'): + _PopenSelector = selectors.PollSelector + else: + _PopenSelector = selectors.SelectSelector + + +if _mswindows: + # On Windows we just need to close `Popen._handle` when we no longer need + # it, so that the kernel can free it. `Popen._handle` gets closed + # implicitly when the `Popen` instance is finalized (see `Handle.__del__`, + # which is calling `CloseHandle` as requested in [1]), so there is nothing + # for `_cleanup` to do. + # + # [1] https://docs.microsoft.com/en-us/windows/desktop/ProcThread/ + # creating-processes + _active = None + + def _cleanup(): + pass +else: + # This lists holds Popen instances for which the underlying process had not + # exited at the time its __del__ method got called: those processes are + # wait()ed for synchronously from _cleanup() when a new Popen object is + # created, to avoid zombie processes. + _active = [] + + def _cleanup(): + if _active is None: + return + for inst in _active[:]: + res = inst._internal_poll(_deadstate=sys.maxsize) + if res is not None: + try: + _active.remove(inst) + except ValueError: + # This can happen if two threads create a new Popen instance. + # It's harmless that it was already removed, so ignore. + pass + +PIPE = -1 +STDOUT = -2 +DEVNULL = -3 + + +# XXX This function is only used by multiprocessing and the test suite, +# but it's here so that it can be imported when Python is compiled without +# threads. + +def _optim_args_from_interpreter_flags(): + """Return a list of command-line arguments reproducing the current + optimization settings in sys.flags.""" + args = [] + value = sys.flags.optimize + if value > 0: + args.append('-' + 'O' * value) + return args + + +def _args_from_interpreter_flags(): + """Return a list of command-line arguments reproducing the current + settings in sys.flags, sys.warnoptions and sys._xoptions.""" + flag_opt_map = { + 'debug': 'd', + # 'inspect': 'i', + # 'interactive': 'i', + 'dont_write_bytecode': 'B', + 'no_site': 'S', + 'verbose': 'v', + 'bytes_warning': 'b', + 'quiet': 'q', + # -O is handled in _optim_args_from_interpreter_flags() + } + args = _optim_args_from_interpreter_flags() + for flag, opt in flag_opt_map.items(): + v = getattr(sys.flags, flag) + if v > 0: + args.append('-' + opt * v) + + if sys.flags.isolated: + args.append('-I') + else: + if sys.flags.ignore_environment: + args.append('-E') + if sys.flags.no_user_site: + args.append('-s') + if sys.flags.safe_path: + args.append('-P') + + # -W options + warnopts = sys.warnoptions[:] + xoptions = getattr(sys, '_xoptions', {}) + bytes_warning = sys.flags.bytes_warning + dev_mode = sys.flags.dev_mode + + if bytes_warning > 1: + warnopts.remove("error::BytesWarning") + elif bytes_warning: + warnopts.remove("default::BytesWarning") + if dev_mode: + warnopts.remove('default') + for opt in warnopts: + args.append('-W' + opt) + + # -X options + if dev_mode: + args.extend(('-X', 'dev')) + for opt in sorted(xoptions): + if opt == 'dev': + # handled above via sys.flags.dev_mode + continue + value = xoptions[opt] + if value is True: + arg = opt + else: + arg = '%s=%s' % (opt, value) + args.extend(('-X', arg)) + + return args + + +def _text_encoding(): + # Return default text encoding and emit EncodingWarning if + # sys.flags.warn_default_encoding is true. + if sys.flags.warn_default_encoding: + f = sys._getframe() + filename = f.f_code.co_filename + stacklevel = 2 + while f := f.f_back: + if f.f_code.co_filename != filename: + break + stacklevel += 1 + warnings.warn("'encoding' argument not specified.", + EncodingWarning, stacklevel) + + if sys.flags.utf8_mode: + return "utf-8" + else: + return locale.getencoding() + + +def call(*popenargs, timeout=None, **kwargs): + """Run command with arguments. Wait for command to complete or + for timeout seconds, then return the returncode attribute. + + The arguments are the same as for the Popen constructor. Example: + + retcode = call(["ls", "-l"]) + """ + with Popen(*popenargs, **kwargs) as p: + try: + return p.wait(timeout=timeout) + except: # Including KeyboardInterrupt, wait handled that. + p.kill() + # We don't call p.wait() again as p.__exit__ does that for us. + raise + + +def check_call(*popenargs, **kwargs): + """Run command with arguments. Wait for command to complete. If + the exit code was zero then return, otherwise raise + CalledProcessError. The CalledProcessError object will have the + return code in the returncode attribute. + + The arguments are the same as for the call function. Example: + + check_call(["ls", "-l"]) + """ + retcode = call(*popenargs, **kwargs) + if retcode: + cmd = kwargs.get("args") + if cmd is None: + cmd = popenargs[0] + raise CalledProcessError(retcode, cmd) + return 0 + + +def check_output(*popenargs, timeout=None, **kwargs): + r"""Run command with arguments and return its output. + + If the exit code was non-zero it raises a CalledProcessError. The + CalledProcessError object will have the return code in the returncode + attribute and output in the output attribute. + + The arguments are the same as for the Popen constructor. Example: + + >>> check_output(["ls", "-l", "/dev/null"]) + b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' + + The stdout argument is not allowed as it is used internally. + To capture standard error in the result, use stderr=STDOUT. + + >>> check_output(["/bin/sh", "-c", + ... "ls -l non_existent_file ; exit 0"], + ... stderr=STDOUT) + b'ls: non_existent_file: No such file or directory\n' + + There is an additional optional argument, "input", allowing you to + pass a string to the subprocess's stdin. If you use this argument + you may not also use the Popen constructor's "stdin" argument, as + it too will be used internally. Example: + + >>> check_output(["sed", "-e", "s/foo/bar/"], + ... input=b"when in the course of fooman events\n") + b'when in the course of barman events\n' + + By default, all communication is in bytes, and therefore any "input" + should be bytes, and the return value will be bytes. If in text mode, + any "input" should be a string, and the return value will be a string + decoded according to locale encoding, or by "encoding" if set. Text mode + is triggered by setting any of text, encoding, errors or universal_newlines. + """ + for kw in ('stdout', 'check'): + if kw in kwargs: + raise ValueError(f'{kw} argument not allowed, it will be overridden.') + + if 'input' in kwargs and kwargs['input'] is None: + # Explicitly passing input=None was previously equivalent to passing an + # empty string. That is maintained here for backwards compatibility. + if kwargs.get('universal_newlines') or kwargs.get('text') or kwargs.get('encoding') \ + or kwargs.get('errors'): + empty = '' + else: + empty = b'' + kwargs['input'] = empty + + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + **kwargs).stdout + + +class CompletedProcess(object): + """A process that has finished running. + + This is returned by run(). + + Attributes: + args: The list or str args passed to run(). + returncode: The exit code of the process, negative for signals. + stdout: The standard output (None if not captured). + stderr: The standard error (None if not captured). + """ + def __init__(self, args, returncode, stdout=None, stderr=None): + self.args = args + self.returncode = returncode + self.stdout = stdout + self.stderr = stderr + + def __repr__(self): + args = ['args={!r}'.format(self.args), + 'returncode={!r}'.format(self.returncode)] + if self.stdout is not None: + args.append('stdout={!r}'.format(self.stdout)) + if self.stderr is not None: + args.append('stderr={!r}'.format(self.stderr)) + return "{}({})".format(type(self).__name__, ', '.join(args)) + + __class_getitem__ = classmethod(types.GenericAlias) + + + def check_returncode(self): + """Raise CalledProcessError if the exit code is non-zero.""" + if self.returncode: + raise CalledProcessError(self.returncode, self.args, self.stdout, + self.stderr) + + +def run(*popenargs, + input=None, capture_output=False, timeout=None, check=False, **kwargs): + """Run command with arguments and return a CompletedProcess instance. + + The returned instance will have attributes args, returncode, stdout and + stderr. By default, stdout and stderr are not captured, and those attributes + will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them, + or pass capture_output=True to capture both. + + If check is True and the exit code was non-zero, it raises a + CalledProcessError. The CalledProcessError object will have the return code + in the returncode attribute, and output & stderr attributes if those streams + were captured. + + If timeout (seconds) is given and the process takes too long, + a TimeoutExpired exception will be raised. + + There is an optional argument "input", allowing you to + pass bytes or a string to the subprocess's stdin. If you use this argument + you may not also use the Popen constructor's "stdin" argument, as + it will be used internally. + + By default, all communication is in bytes, and therefore any "input" should + be bytes, and the stdout and stderr will be bytes. If in text mode, any + "input" should be a string, and stdout and stderr will be strings decoded + according to locale encoding, or by "encoding" if set. Text mode is + triggered by setting any of text, encoding, errors or universal_newlines. + + The other arguments are the same as for the Popen constructor. + """ + if input is not None: + if kwargs.get('stdin') is not None: + raise ValueError('stdin and input arguments may not both be used.') + kwargs['stdin'] = PIPE + + if capture_output: + if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None: + raise ValueError('stdout and stderr arguments may not be used ' + 'with capture_output.') + kwargs['stdout'] = PIPE + kwargs['stderr'] = PIPE + + with Popen(*popenargs, **kwargs) as process: + try: + stdout, stderr = process.communicate(input, timeout=timeout) + except TimeoutExpired as exc: + process.kill() + if _mswindows: + # Windows accumulates the output in a single blocking + # read() call run on child threads, with the timeout + # being done in a join() on those threads. communicate() + # _after_ kill() is required to collect that and add it + # to the exception. + exc.stdout, exc.stderr = process.communicate() + else: + # POSIX _communicate already populated the output so + # far into the TimeoutExpired exception. + process.wait() + raise + except: # Including KeyboardInterrupt, communicate handled that. + process.kill() + # We don't call process.wait() as .__exit__ does that for us. + raise + retcode = process.poll() + if check and retcode: + raise CalledProcessError(retcode, process.args, + output=stdout, stderr=stderr) + return CompletedProcess(process.args, retcode, stdout, stderr) + + +def list2cmdline(seq): + """ + Translate a sequence of arguments into a command line + string, using the same rules as the MS C runtime: + + 1) Arguments are delimited by white space, which is either a + space or a tab. + + 2) A string surrounded by double quotation marks is + interpreted as a single argument, regardless of white space + contained within. A quoted string can be embedded in an + argument. + + 3) A double quotation mark preceded by a backslash is + interpreted as a literal double quotation mark. + + 4) Backslashes are interpreted literally, unless they + immediately precede a double quotation mark. + + 5) If backslashes immediately precede a double quotation mark, + every pair of backslashes is interpreted as a literal + backslash. If the number of backslashes is odd, the last + backslash escapes the next double quotation mark as + described in rule 3. + """ + + # See + # http://msdn.microsoft.com/en-us/library/17w5ykft.aspx + # or search http://msdn.microsoft.com for + # "Parsing C++ Command-Line Arguments" + result = [] + needquote = False + for arg in map(os.fsdecode, seq): + bs_buf = [] + + # Add a space to separate this argument from the others + if result: + result.append(' ') + + needquote = (" " in arg) or ("\t" in arg) or not arg + if needquote: + result.append('"') + + for c in arg: + if c == '\\': + # Don't know if we need to double yet. + bs_buf.append(c) + elif c == '"': + # Double backslashes. + result.append('\\' * len(bs_buf)*2) + bs_buf = [] + result.append('\\"') + else: + # Normal char + if bs_buf: + result.extend(bs_buf) + bs_buf = [] + result.append(c) + + # Add remaining backslashes, if any. + if bs_buf: + result.extend(bs_buf) + + if needquote: + result.extend(bs_buf) + result.append('"') + + return ''.join(result) + + +# Various tools for executing commands and looking at their output and status. +# + +def getstatusoutput(cmd, *, encoding=None, errors=None): + """Return (exitcode, output) of executing cmd in a shell. + + Execute the string 'cmd' in a shell with 'check_output' and + return a 2-tuple (status, output). The locale encoding is used + to decode the output and process newlines. + + A trailing newline is stripped from the output. + The exit status for the command can be interpreted + according to the rules for the function 'wait'. Example: + + >>> import subprocess + >>> subprocess.getstatusoutput('ls /bin/ls') + (0, '/bin/ls') + >>> subprocess.getstatusoutput('cat /bin/junk') + (1, 'cat: /bin/junk: No such file or directory') + >>> subprocess.getstatusoutput('/bin/junk') + (127, 'sh: /bin/junk: not found') + >>> subprocess.getstatusoutput('/bin/kill $$') + (-15, '') + """ + try: + data = check_output(cmd, shell=True, text=True, stderr=STDOUT, + encoding=encoding, errors=errors) + exitcode = 0 + except CalledProcessError as ex: + data = ex.output + exitcode = ex.returncode + if data[-1:] == '\n': + data = data[:-1] + return exitcode, data + +def getoutput(cmd, *, encoding=None, errors=None): + """Return output (stdout or stderr) of executing cmd in a shell. + + Like getstatusoutput(), except the exit status is ignored and the return + value is a string containing the command's output. Example: + + >>> import subprocess + >>> subprocess.getoutput('ls /bin/ls') + '/bin/ls' + """ + return getstatusoutput(cmd, encoding=encoding, errors=errors)[1] + + + +def _use_posix_spawn(): + """Check if posix_spawn() can be used for subprocess. + + subprocess requires a posix_spawn() implementation that properly reports + errors to the parent process, & sets errno on the following failures: + + * Process attribute actions failed. + * File actions failed. + * exec() failed. + + Prefer an implementation which can use vfork() in some cases for best + performance. + """ + if _mswindows or not hasattr(os, 'posix_spawn'): + # os.posix_spawn() is not available + return False + + if ((_env := os.environ.get('_PYTHON_SUBPROCESS_USE_POSIX_SPAWN')) in ('0', '1')): + return bool(int(_env)) + + if sys.platform in ('darwin', 'sunos5'): + # posix_spawn() is a syscall on both macOS and Solaris, + # and properly reports errors + return True + + # Check libc name and runtime libc version + try: + ver = os.confstr('CS_GNU_LIBC_VERSION') + # parse 'glibc 2.28' as ('glibc', (2, 28)) + parts = ver.split(maxsplit=1) + if len(parts) != 2: + # reject unknown format + raise ValueError + libc = parts[0] + version = tuple(map(int, parts[1].split('.'))) + + if sys.platform == 'linux' and libc == 'glibc' and version >= (2, 24): + # glibc 2.24 has a new Linux posix_spawn implementation using vfork + # which properly reports errors to the parent process. + return True + # Note: Don't use the implementation in earlier glibc because it doesn't + # use vfork (even if glibc 2.26 added a pipe to properly report errors + # to the parent process). + except (AttributeError, ValueError, OSError): + # os.confstr() or CS_GNU_LIBC_VERSION value not available + pass + + # By default, assume that posix_spawn() does not properly report errors. + return False + + +# These are primarily fail-safe knobs for negatives. A True value does not +# guarantee the given libc/syscall API will be used. +_USE_POSIX_SPAWN = _use_posix_spawn() +_HAVE_POSIX_SPAWN_CLOSEFROM = hasattr(os, 'POSIX_SPAWN_CLOSEFROM') + + +class Popen: + """ Execute a child program in a new process. + + For a complete description of the arguments see the Python documentation. + + Arguments: + args: A string, or a sequence of program arguments. + + bufsize: supplied as the buffering argument to the open() function when + creating the stdin/stdout/stderr pipe file objects + + executable: A replacement program to execute. + + stdin, stdout and stderr: These specify the executed programs' standard + input, standard output and standard error file handles, respectively. + + preexec_fn: (POSIX only) An object to be called in the child process + just before the child is executed. + + close_fds: Controls closing or inheriting of file descriptors. + + shell: If true, the command will be executed through the shell. + + cwd: Sets the current directory before the child is executed. + + env: Defines the environment variables for the new process. + + text: If true, decode stdin, stdout and stderr using the given encoding + (if set) or the system default otherwise. + + universal_newlines: Alias of text, provided for backwards compatibility. + + startupinfo and creationflags (Windows only) + + restore_signals (POSIX only) + + start_new_session (POSIX only) + + process_group (POSIX only) + + group (POSIX only) + + extra_groups (POSIX only) + + user (POSIX only) + + umask (POSIX only) + + pass_fds (POSIX only) + + encoding and errors: Text mode encoding and error handling to use for + file objects stdin, stdout and stderr. + + Attributes: + stdin, stdout, stderr, pid, returncode + """ + _child_created = False # Set here since __del__ checks it + + def __init__(self, args, bufsize=-1, executable=None, + stdin=None, stdout=None, stderr=None, + preexec_fn=None, close_fds=True, + shell=False, cwd=None, env=None, universal_newlines=None, + startupinfo=None, creationflags=0, + restore_signals=True, start_new_session=False, + pass_fds=(), *, user=None, group=None, extra_groups=None, + encoding=None, errors=None, text=None, umask=-1, pipesize=-1, + process_group=None): + """Create new Popen instance.""" + if not _can_fork_exec: + raise OSError( + errno.ENOTSUP, f"{sys.platform} does not support processes." + ) + + _cleanup() + # Held while anything is calling waitpid before returncode has been + # updated to prevent clobbering returncode if wait() or poll() are + # called from multiple threads at once. After acquiring the lock, + # code must re-check self.returncode to see if another thread just + # finished a waitpid() call. + self._waitpid_lock = threading.Lock() + + self._input = None + self._communication_started = False + if bufsize is None: + bufsize = -1 # Restore default + if not isinstance(bufsize, int): + raise TypeError("bufsize must be an integer") + + if stdout is STDOUT: + raise ValueError("STDOUT can only be used for stderr") + + if pipesize is None: + pipesize = -1 # Restore default + if not isinstance(pipesize, int): + raise TypeError("pipesize must be an integer") + + if _mswindows: + if preexec_fn is not None: + raise ValueError("preexec_fn is not supported on Windows " + "platforms") + else: + # POSIX + if pass_fds and not close_fds: + warnings.warn("pass_fds overriding close_fds.", RuntimeWarning) + close_fds = True + if startupinfo is not None: + raise ValueError("startupinfo is only supported on Windows " + "platforms") + if creationflags != 0: + raise ValueError("creationflags is only supported on Windows " + "platforms") + + self.args = args + self.stdin = None + self.stdout = None + self.stderr = None + self.pid = None + self.returncode = None + self.encoding = encoding + self.errors = errors + self.pipesize = pipesize + + # Validate the combinations of text and universal_newlines + if (text is not None and universal_newlines is not None + and bool(universal_newlines) != bool(text)): + raise SubprocessError('Cannot disambiguate when both text ' + 'and universal_newlines are supplied but ' + 'different. Pass one or the other.') + + self.text_mode = encoding or errors or text or universal_newlines + if self.text_mode and encoding is None: + self.encoding = encoding = _text_encoding() + + # How long to resume waiting on a child after the first ^C. + # There is no right value for this. The purpose is to be polite + # yet remain good for interactive users trying to exit a tool. + self._sigint_wait_secs = 0.25 # 1/xkcd221.getRandomNumber() + + self._closed_child_pipe_fds = False + + if self.text_mode: + if bufsize == 1: + line_buffering = True + # Use the default buffer size for the underlying binary streams + # since they don't support line buffering. + bufsize = -1 + else: + line_buffering = False + + if process_group is None: + process_group = -1 # The internal APIs are int-only + + gid = None + if group is not None: + if not hasattr(os, 'setregid'): + raise ValueError("The 'group' parameter is not supported on the " + "current platform") + + elif isinstance(group, str): + try: + import grp + except ImportError: + raise ValueError("The group parameter cannot be a string " + "on systems without the grp module") + + gid = grp.getgrnam(group).gr_gid + elif isinstance(group, int): + gid = group + else: + raise TypeError("Group must be a string or an integer, not {}" + .format(type(group))) + + if gid < 0: + raise ValueError(f"Group ID cannot be negative, got {gid}") + + gids = None + if extra_groups is not None: + if not hasattr(os, 'setgroups'): + raise ValueError("The 'extra_groups' parameter is not " + "supported on the current platform") + + elif isinstance(extra_groups, str): + raise ValueError("Groups must be a list, not a string") + + gids = [] + for extra_group in extra_groups: + if isinstance(extra_group, str): + try: + import grp + except ImportError: + raise ValueError("Items in extra_groups cannot be " + "strings on systems without the " + "grp module") + + gids.append(grp.getgrnam(extra_group).gr_gid) + elif isinstance(extra_group, int): + gids.append(extra_group) + else: + raise TypeError("Items in extra_groups must be a string " + "or integer, not {}" + .format(type(extra_group))) + + # make sure that the gids are all positive here so we can do less + # checking in the C code + for gid_check in gids: + if gid_check < 0: + raise ValueError(f"Group ID cannot be negative, got {gid_check}") + + uid = None + if user is not None: + if not hasattr(os, 'setreuid'): + raise ValueError("The 'user' parameter is not supported on " + "the current platform") + + elif isinstance(user, str): + try: + import pwd + except ImportError: + raise ValueError("The user parameter cannot be a string " + "on systems without the pwd module") + uid = pwd.getpwnam(user).pw_uid + elif isinstance(user, int): + uid = user + else: + raise TypeError("User must be a string or an integer") + + if uid < 0: + raise ValueError(f"User ID cannot be negative, got {uid}") + + # Input and output objects. The general principle is like + # this: + # + # Parent Child + # ------ ----- + # p2cwrite ---stdin---> p2cread + # c2pread <--stdout--- c2pwrite + # errread <--stderr--- errwrite + # + # On POSIX, the child objects are file descriptors. On + # Windows, these are Windows file handles. The parent objects + # are file descriptors on both platforms. The parent objects + # are -1 when not using PIPEs. The child objects are -1 + # when not redirecting. + + (p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) = self._get_handles(stdin, stdout, stderr) + + # From here on, raising exceptions may cause file descriptor leakage + + # We wrap OS handles *before* launching the child, otherwise a + # quickly terminating child could make our fds unwrappable + # (see #8458). + + if _mswindows: + if p2cwrite != -1: + p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0) + if c2pread != -1: + c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0) + if errread != -1: + errread = msvcrt.open_osfhandle(errread.Detach(), 0) + + try: + if p2cwrite != -1: + self.stdin = io.open(p2cwrite, 'wb', bufsize) + if self.text_mode: + self.stdin = io.TextIOWrapper(self.stdin, write_through=True, + line_buffering=line_buffering, + encoding=encoding, errors=errors) + if c2pread != -1: + self.stdout = io.open(c2pread, 'rb', bufsize) + if self.text_mode: + self.stdout = io.TextIOWrapper(self.stdout, + encoding=encoding, errors=errors) + if errread != -1: + self.stderr = io.open(errread, 'rb', bufsize) + if self.text_mode: + self.stderr = io.TextIOWrapper(self.stderr, + encoding=encoding, errors=errors) + + self._execute_child(args, executable, preexec_fn, close_fds, + pass_fds, cwd, env, + startupinfo, creationflags, shell, + p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite, + restore_signals, + gid, gids, uid, umask, + start_new_session, process_group) + except: + # Cleanup if the child failed starting. + for f in filter(None, (self.stdin, self.stdout, self.stderr)): + try: + f.close() + except OSError: + pass # Ignore EBADF or other errors. + + if not self._closed_child_pipe_fds: + to_close = [] + if stdin == PIPE: + to_close.append(p2cread) + if stdout == PIPE: + to_close.append(c2pwrite) + if stderr == PIPE: + to_close.append(errwrite) + if hasattr(self, '_devnull'): + to_close.append(self._devnull) + for fd in to_close: + try: + if _mswindows and isinstance(fd, Handle): + fd.Close() + else: + os.close(fd) + except OSError: + pass + + raise + + def __repr__(self): + obj_repr = ( + f"<{self.__class__.__name__}: " + f"returncode: {self.returncode} args: {self.args!r}>" + ) + if len(obj_repr) > 80: + obj_repr = obj_repr[:76] + "...>" + return obj_repr + + __class_getitem__ = classmethod(types.GenericAlias) + + @property + def universal_newlines(self): + # universal_newlines as retained as an alias of text_mode for API + # compatibility. bpo-31756 + return self.text_mode + + @universal_newlines.setter + def universal_newlines(self, universal_newlines): + self.text_mode = bool(universal_newlines) + + def _translate_newlines(self, data, encoding, errors): + data = data.decode(encoding, errors) + return data.replace("\r\n", "\n").replace("\r", "\n") + + def __enter__(self): + return self + + def __exit__(self, exc_type, value, traceback): + if self.stdout: + self.stdout.close() + if self.stderr: + self.stderr.close() + try: # Flushing a BufferedWriter may raise an error + if self.stdin: + self.stdin.close() + finally: + if exc_type == KeyboardInterrupt: + # https://bugs.python.org/issue25942 + # In the case of a KeyboardInterrupt we assume the SIGINT + # was also already sent to our child processes. We can't + # block indefinitely as that is not user friendly. + # If we have not already waited a brief amount of time in + # an interrupted .wait() or .communicate() call, do so here + # for consistency. + if self._sigint_wait_secs > 0: + try: + self._wait(timeout=self._sigint_wait_secs) + except TimeoutExpired: + pass + self._sigint_wait_secs = 0 # Note that this has been done. + else: + # Wait for the process to terminate, to avoid zombies. + self.wait() + + def __del__(self, _maxsize=sys.maxsize, _warn=warnings.warn): + if not self._child_created: + # We didn't get to successfully create a child process. + return + if self.returncode is None: + # Not reading subprocess exit status creates a zombie process which + # is only destroyed at the parent python process exit + _warn("subprocess %s is still running" % self.pid, + ResourceWarning, source=self) + # In case the child hasn't been waited on, check if it's done. + self._internal_poll(_deadstate=_maxsize) + if self.returncode is None and _active is not None: + # Child is still running, keep us alive until we can wait on it. + _active.append(self) + + def _get_devnull(self): + if not hasattr(self, '_devnull'): + self._devnull = os.open(os.devnull, os.O_RDWR) + return self._devnull + + def _stdin_write(self, input): + if input: + try: + self.stdin.write(input) + except BrokenPipeError: + pass # communicate() must ignore broken pipe errors. + except OSError as exc: + if exc.errno == errno.EINVAL: + # bpo-19612, bpo-30418: On Windows, stdin.write() fails + # with EINVAL if the child process exited or if the child + # process is still running but closed the pipe. + pass + else: + raise + + try: + self.stdin.close() + except BrokenPipeError: + pass # communicate() must ignore broken pipe errors. + except OSError as exc: + if exc.errno == errno.EINVAL: + pass + else: + raise + + def communicate(self, input=None, timeout=None): + """Interact with process: Send data to stdin and close it. + Read data from stdout and stderr, until end-of-file is + reached. Wait for process to terminate. + + The optional "input" argument should be data to be sent to the + child process, or None, if no data should be sent to the child. + communicate() returns a tuple (stdout, stderr). + + By default, all communication is in bytes, and therefore any + "input" should be bytes, and the (stdout, stderr) will be bytes. + If in text mode (indicated by self.text_mode), any "input" should + be a string, and (stdout, stderr) will be strings decoded + according to locale encoding, or by "encoding" if set. Text mode + is triggered by setting any of text, encoding, errors or + universal_newlines. + """ + + if self._communication_started and input: + raise ValueError("Cannot send input after starting communication") + + # Optimization: If we are not worried about timeouts, we haven't + # started communicating, and we have one or zero pipes, using select() + # or threads is unnecessary. + if (timeout is None and not self._communication_started and + [self.stdin, self.stdout, self.stderr].count(None) >= 2): + stdout = None + stderr = None + if self.stdin: + self._stdin_write(input) + elif self.stdout: + stdout = self.stdout.read() + self.stdout.close() + elif self.stderr: + stderr = self.stderr.read() + self.stderr.close() + self.wait() + else: + if timeout is not None: + endtime = _time() + timeout + else: + endtime = None + + try: + stdout, stderr = self._communicate(input, endtime, timeout) + except KeyboardInterrupt: + # https://bugs.python.org/issue25942 + # See the detailed comment in .wait(). + if timeout is not None: + sigint_timeout = min(self._sigint_wait_secs, + self._remaining_time(endtime)) + else: + sigint_timeout = self._sigint_wait_secs + self._sigint_wait_secs = 0 # nothing else should wait. + try: + self._wait(timeout=sigint_timeout) + except TimeoutExpired: + pass + raise # resume the KeyboardInterrupt + + finally: + self._communication_started = True + try: + sts = self.wait(timeout=self._remaining_time(endtime)) + except TimeoutExpired as exc: + exc.timeout = timeout + raise + + return (stdout, stderr) + + + def poll(self): + """Check if child process has terminated. Set and return returncode + attribute.""" + return self._internal_poll() + + + def _remaining_time(self, endtime): + """Convenience for _communicate when computing timeouts.""" + if endtime is None: + return None + else: + return endtime - _time() + + + def _check_timeout(self, endtime, orig_timeout, stdout_seq, stderr_seq, + skip_check_and_raise=False): + """Convenience for checking if a timeout has expired.""" + if endtime is None: + return + if skip_check_and_raise or _time() > endtime: + raise TimeoutExpired( + self.args, orig_timeout, + output=b''.join(stdout_seq) if stdout_seq else None, + stderr=b''.join(stderr_seq) if stderr_seq else None) + + + def wait(self, timeout=None): + """Wait for child process to terminate; returns self.returncode.""" + if timeout is not None: + endtime = _time() + timeout + try: + return self._wait(timeout=timeout) + except KeyboardInterrupt: + # https://bugs.python.org/issue25942 + # The first keyboard interrupt waits briefly for the child to + # exit under the common assumption that it also received the ^C + # generated SIGINT and will exit rapidly. + if timeout is not None: + sigint_timeout = min(self._sigint_wait_secs, + self._remaining_time(endtime)) + else: + sigint_timeout = self._sigint_wait_secs + self._sigint_wait_secs = 0 # nothing else should wait. + try: + self._wait(timeout=sigint_timeout) + except TimeoutExpired: + pass + raise # resume the KeyboardInterrupt + + def _close_pipe_fds(self, + p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite): + # self._devnull is not always defined. + devnull_fd = getattr(self, '_devnull', None) + + with contextlib.ExitStack() as stack: + if _mswindows: + if p2cread != -1: + stack.callback(p2cread.Close) + if c2pwrite != -1: + stack.callback(c2pwrite.Close) + if errwrite != -1: + stack.callback(errwrite.Close) + else: + if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd: + stack.callback(os.close, p2cread) + if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd: + stack.callback(os.close, c2pwrite) + if errwrite != -1 and errread != -1 and errwrite != devnull_fd: + stack.callback(os.close, errwrite) + + if devnull_fd is not None: + stack.callback(os.close, devnull_fd) + + # Prevent a double close of these handles/fds from __init__ on error. + self._closed_child_pipe_fds = True + + @contextlib.contextmanager + def _on_error_fd_closer(self): + """Helper to ensure file descriptors opened in _get_handles are closed""" + to_close = [] + try: + yield to_close + except: + if hasattr(self, '_devnull'): + to_close.append(self._devnull) + del self._devnull + for fd in to_close: + try: + if _mswindows and isinstance(fd, Handle): + fd.Close() + else: + os.close(fd) + except OSError: + pass + raise + + if _mswindows: + # + # Windows methods + # + def _get_handles(self, stdin, stdout, stderr): + """Construct and return tuple with IO objects: + p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite + """ + if stdin is None and stdout is None and stderr is None: + return (-1, -1, -1, -1, -1, -1) + + p2cread, p2cwrite = -1, -1 + c2pread, c2pwrite = -1, -1 + errread, errwrite = -1, -1 + + with self._on_error_fd_closer() as err_close_fds: + if stdin is None: + p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE) + if p2cread is None: + p2cread, _ = _winapi.CreatePipe(None, 0) + p2cread = Handle(p2cread) + err_close_fds.append(p2cread) + _winapi.CloseHandle(_) + elif stdin == PIPE: + p2cread, p2cwrite = _winapi.CreatePipe(None, 0) + p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite) + err_close_fds.extend((p2cread, p2cwrite)) + elif stdin == DEVNULL: + p2cread = msvcrt.get_osfhandle(self._get_devnull()) + elif isinstance(stdin, int): + p2cread = msvcrt.get_osfhandle(stdin) + else: + # Assuming file-like object + p2cread = msvcrt.get_osfhandle(stdin.fileno()) + p2cread = self._make_inheritable(p2cread) + + if stdout is None: + c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE) + if c2pwrite is None: + _, c2pwrite = _winapi.CreatePipe(None, 0) + c2pwrite = Handle(c2pwrite) + err_close_fds.append(c2pwrite) + _winapi.CloseHandle(_) + elif stdout == PIPE: + c2pread, c2pwrite = _winapi.CreatePipe(None, 0) + c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite) + err_close_fds.extend((c2pread, c2pwrite)) + elif stdout == DEVNULL: + c2pwrite = msvcrt.get_osfhandle(self._get_devnull()) + elif isinstance(stdout, int): + c2pwrite = msvcrt.get_osfhandle(stdout) + else: + # Assuming file-like object + c2pwrite = msvcrt.get_osfhandle(stdout.fileno()) + c2pwrite = self._make_inheritable(c2pwrite) + + if stderr is None: + errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE) + if errwrite is None: + _, errwrite = _winapi.CreatePipe(None, 0) + errwrite = Handle(errwrite) + err_close_fds.append(errwrite) + _winapi.CloseHandle(_) + elif stderr == PIPE: + errread, errwrite = _winapi.CreatePipe(None, 0) + errread, errwrite = Handle(errread), Handle(errwrite) + err_close_fds.extend((errread, errwrite)) + elif stderr == STDOUT: + errwrite = c2pwrite + elif stderr == DEVNULL: + errwrite = msvcrt.get_osfhandle(self._get_devnull()) + elif isinstance(stderr, int): + errwrite = msvcrt.get_osfhandle(stderr) + else: + # Assuming file-like object + errwrite = msvcrt.get_osfhandle(stderr.fileno()) + errwrite = self._make_inheritable(errwrite) + + return (p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) + + + def _make_inheritable(self, handle): + """Return a duplicate of handle, which is inheritable""" + h = _winapi.DuplicateHandle( + _winapi.GetCurrentProcess(), handle, + _winapi.GetCurrentProcess(), 0, 1, + _winapi.DUPLICATE_SAME_ACCESS) + return Handle(h) + + + def _filter_handle_list(self, handle_list): + """Filter out console handles that can't be used + in lpAttributeList["handle_list"] and make sure the list + isn't empty. This also removes duplicate handles.""" + # An handle with it's lowest two bits set might be a special console + # handle that if passed in lpAttributeList["handle_list"], will + # cause it to fail. + return list({handle for handle in handle_list + if handle & 0x3 != 0x3 + or _winapi.GetFileType(handle) != + _winapi.FILE_TYPE_CHAR}) + + + def _execute_child(self, args, executable, preexec_fn, close_fds, + pass_fds, cwd, env, + startupinfo, creationflags, shell, + p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite, + unused_restore_signals, + unused_gid, unused_gids, unused_uid, + unused_umask, + unused_start_new_session, unused_process_group): + """Execute program (MS Windows version)""" + + assert not pass_fds, "pass_fds not supported on Windows." + + if isinstance(args, str): + pass + elif isinstance(args, bytes): + if shell: + raise TypeError('bytes args is not allowed on Windows') + args = list2cmdline([args]) + elif isinstance(args, os.PathLike): + if shell: + raise TypeError('path-like args is not allowed when ' + 'shell is true') + args = list2cmdline([args]) + else: + args = list2cmdline(args) + + if executable is not None: + executable = os.fsdecode(executable) + + # Process startup details + if startupinfo is None: + startupinfo = STARTUPINFO() + else: + # bpo-34044: Copy STARTUPINFO since it is modified above, + # so the caller can reuse it multiple times. + startupinfo = startupinfo.copy() + + use_std_handles = -1 not in (p2cread, c2pwrite, errwrite) + if use_std_handles: + startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES + startupinfo.hStdInput = p2cread + startupinfo.hStdOutput = c2pwrite + startupinfo.hStdError = errwrite + + attribute_list = startupinfo.lpAttributeList + have_handle_list = bool(attribute_list and + "handle_list" in attribute_list and + attribute_list["handle_list"]) + + # If we were given an handle_list or need to create one + if have_handle_list or (use_std_handles and close_fds): + if attribute_list is None: + attribute_list = startupinfo.lpAttributeList = {} + handle_list = attribute_list["handle_list"] = \ + list(attribute_list.get("handle_list", [])) + + if use_std_handles: + handle_list += [int(p2cread), int(c2pwrite), int(errwrite)] + + handle_list[:] = self._filter_handle_list(handle_list) + + if handle_list: + if not close_fds: + warnings.warn("startupinfo.lpAttributeList['handle_list'] " + "overriding close_fds", RuntimeWarning) + + # When using the handle_list we always request to inherit + # handles but the only handles that will be inherited are + # the ones in the handle_list + close_fds = False + + if shell: + startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW + startupinfo.wShowWindow = _winapi.SW_HIDE + if not executable: + # gh-101283: without a fully-qualified path, before Windows + # checks the system directories, it first looks in the + # application directory, and also the current directory if + # NeedCurrentDirectoryForExePathW(ExeName) is true, so try + # to avoid executing unqualified "cmd.exe". + comspec = os.environ.get('ComSpec') + if not comspec: + system_root = os.environ.get('SystemRoot', '') + comspec = os.path.join(system_root, 'System32', 'cmd.exe') + if not os.path.isabs(comspec): + raise FileNotFoundError('shell not found: neither %ComSpec% nor %SystemRoot% is set') + if os.path.isabs(comspec): + executable = comspec + else: + comspec = executable + + args = '{} /c "{}"'.format (comspec, args) + + if cwd is not None: + cwd = os.fsdecode(cwd) + + sys.audit("subprocess.Popen", executable, args, cwd, env) + + # Start the process + try: + hp, ht, pid, tid = _winapi.CreateProcess(executable, args, + # no special security + None, None, + int(not close_fds), + creationflags, + env, + cwd, + startupinfo) + finally: + # Child is launched. Close the parent's copy of those pipe + # handles that only the child should have open. You need + # to make sure that no handles to the write end of the + # output pipe are maintained in this process or else the + # pipe will not close when the child process exits and the + # ReadFile will hang. + self._close_pipe_fds(p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) + + # Retain the process handle, but close the thread handle + self._child_created = True + self._handle = Handle(hp) + self.pid = pid + _winapi.CloseHandle(ht) + + def _internal_poll(self, _deadstate=None, + _WaitForSingleObject=_winapi.WaitForSingleObject, + _WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0, + _GetExitCodeProcess=_winapi.GetExitCodeProcess): + """Check if child process has terminated. Returns returncode + attribute. + + This method is called by __del__, so it can only refer to objects + in its local scope. + + """ + if self.returncode is None: + if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0: + self.returncode = _GetExitCodeProcess(self._handle) + return self.returncode + + + def _wait(self, timeout): + """Internal implementation of wait() on Windows.""" + if timeout is None: + timeout_millis = _winapi.INFINITE + elif timeout <= 0: + timeout_millis = 0 + else: + timeout_millis = int(timeout * 1000) + if self.returncode is None: + # API note: Returns immediately if timeout_millis == 0. + result = _winapi.WaitForSingleObject(self._handle, + timeout_millis) + if result == _winapi.WAIT_TIMEOUT: + raise TimeoutExpired(self.args, timeout) + self.returncode = _winapi.GetExitCodeProcess(self._handle) + return self.returncode + + + def _readerthread(self, fh, buffer): + buffer.append(fh.read()) + fh.close() + + + def _writerthread(self, input): + self._stdin_write(input) + + + def _communicate(self, input, endtime, orig_timeout): + # Start reader threads feeding into a list hanging off of this + # object, unless they've already been started. + if self.stdout and not hasattr(self, "_stdout_buff"): + self._stdout_buff = [] + self.stdout_thread = \ + threading.Thread(target=self._readerthread, + args=(self.stdout, self._stdout_buff)) + self.stdout_thread.daemon = True + self.stdout_thread.start() + if self.stderr and not hasattr(self, "_stderr_buff"): + self._stderr_buff = [] + self.stderr_thread = \ + threading.Thread(target=self._readerthread, + args=(self.stderr, self._stderr_buff)) + self.stderr_thread.daemon = True + self.stderr_thread.start() + + # Start writer thread to send input to stdin, unless already + # started. The thread writes input and closes stdin when done, + # or continues in the background on timeout. + if self.stdin and not hasattr(self, "_stdin_thread"): + self._stdin_thread = \ + threading.Thread(target=self._writerthread, + args=(input,)) + self._stdin_thread.daemon = True + self._stdin_thread.start() + + # Wait for the writer thread, or time out. If we time out, the + # thread remains writing and the fd left open in case the user + # calls communicate again. + if hasattr(self, "_stdin_thread"): + self._stdin_thread.join(self._remaining_time(endtime)) + if self._stdin_thread.is_alive(): + raise TimeoutExpired(self.args, orig_timeout) + + # Wait for the reader threads, or time out. If we time out, the + # threads remain reading and the fds left open in case the user + # calls communicate again. + if self.stdout is not None: + self.stdout_thread.join(self._remaining_time(endtime)) + if self.stdout_thread.is_alive(): + raise TimeoutExpired(self.args, orig_timeout) + if self.stderr is not None: + self.stderr_thread.join(self._remaining_time(endtime)) + if self.stderr_thread.is_alive(): + raise TimeoutExpired(self.args, orig_timeout) + + # Collect the output from and close both pipes, now that we know + # both have been read successfully. + stdout = None + stderr = None + if self.stdout: + stdout = self._stdout_buff + self.stdout.close() + if self.stderr: + stderr = self._stderr_buff + self.stderr.close() + + # All data exchanged. Translate lists into strings. + stdout = stdout[0] if stdout else None + stderr = stderr[0] if stderr else None + + return (stdout, stderr) + + def send_signal(self, sig): + """Send a signal to the process.""" + # Don't signal a process that we know has already died. + if self.returncode is not None: + return + if sig == signal.SIGTERM: + self.terminate() + elif sig == signal.CTRL_C_EVENT: + os.kill(self.pid, signal.CTRL_C_EVENT) + elif sig == signal.CTRL_BREAK_EVENT: + os.kill(self.pid, signal.CTRL_BREAK_EVENT) + else: + raise ValueError("Unsupported signal: {}".format(sig)) + + def terminate(self): + """Terminates the process.""" + # Don't terminate a process that we know has already died. + if self.returncode is not None: + return + try: + _winapi.TerminateProcess(self._handle, 1) + except PermissionError: + # ERROR_ACCESS_DENIED (winerror 5) is received when the + # process already died. + rc = _winapi.GetExitCodeProcess(self._handle) + if rc == _winapi.STILL_ACTIVE: + raise + self.returncode = rc + + kill = terminate + + else: + # + # POSIX methods + # + def _get_handles(self, stdin, stdout, stderr): + """Construct and return tuple with IO objects: + p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite + """ + p2cread, p2cwrite = -1, -1 + c2pread, c2pwrite = -1, -1 + errread, errwrite = -1, -1 + + with self._on_error_fd_closer() as err_close_fds: + if stdin is None: + pass + elif stdin == PIPE: + p2cread, p2cwrite = os.pipe() + err_close_fds.extend((p2cread, p2cwrite)) + if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"): + fcntl.fcntl(p2cwrite, fcntl.F_SETPIPE_SZ, self.pipesize) + elif stdin == DEVNULL: + p2cread = self._get_devnull() + elif isinstance(stdin, int): + p2cread = stdin + else: + # Assuming file-like object + p2cread = stdin.fileno() + + if stdout is None: + pass + elif stdout == PIPE: + c2pread, c2pwrite = os.pipe() + err_close_fds.extend((c2pread, c2pwrite)) + if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"): + fcntl.fcntl(c2pwrite, fcntl.F_SETPIPE_SZ, self.pipesize) + elif stdout == DEVNULL: + c2pwrite = self._get_devnull() + elif isinstance(stdout, int): + c2pwrite = stdout + else: + # Assuming file-like object + c2pwrite = stdout.fileno() + + if stderr is None: + pass + elif stderr == PIPE: + errread, errwrite = os.pipe() + err_close_fds.extend((errread, errwrite)) + if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"): + fcntl.fcntl(errwrite, fcntl.F_SETPIPE_SZ, self.pipesize) + elif stderr == STDOUT: + if c2pwrite != -1: + errwrite = c2pwrite + else: # child's stdout is not set, use parent's stdout + errwrite = sys.__stdout__.fileno() + elif stderr == DEVNULL: + errwrite = self._get_devnull() + elif isinstance(stderr, int): + errwrite = stderr + else: + # Assuming file-like object + errwrite = stderr.fileno() + + return (p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) + + + def _posix_spawn(self, args, executable, env, restore_signals, close_fds, + p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite): + """Execute program using os.posix_spawn().""" + kwargs = {} + if restore_signals: + # See _Py_RestoreSignals() in Python/pylifecycle.c + sigset = [] + for signame in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'): + signum = getattr(signal, signame, None) + if signum is not None: + sigset.append(signum) + kwargs['setsigdef'] = sigset + + file_actions = [] + for fd in (p2cwrite, c2pread, errread): + if fd != -1: + file_actions.append((os.POSIX_SPAWN_CLOSE, fd)) + for fd, fd2 in ( + (p2cread, 0), + (c2pwrite, 1), + (errwrite, 2), + ): + if fd != -1: + file_actions.append((os.POSIX_SPAWN_DUP2, fd, fd2)) + + if close_fds: + file_actions.append((os.POSIX_SPAWN_CLOSEFROM, 3)) + + if file_actions: + kwargs['file_actions'] = file_actions + + self.pid = os.posix_spawn(executable, args, env, **kwargs) + self._child_created = True + + self._close_pipe_fds(p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) + + def _execute_child(self, args, executable, preexec_fn, close_fds, + pass_fds, cwd, env, + startupinfo, creationflags, shell, + p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite, + restore_signals, + gid, gids, uid, umask, + start_new_session, process_group): + """Execute program (POSIX version)""" + + if isinstance(args, (str, bytes)): + args = [args] + elif isinstance(args, os.PathLike): + if shell: + raise TypeError('path-like args is not allowed when ' + 'shell is true') + args = [args] + else: + args = list(args) + + if shell: + # On Android the default shell is at '/system/bin/sh'. + unix_shell = ('/system/bin/sh' if + hasattr(sys, 'getandroidapilevel') else '/bin/sh') + args = [unix_shell, "-c"] + args + if executable: + args[0] = executable + + if executable is None: + executable = args[0] + + sys.audit("subprocess.Popen", executable, args, cwd, env) + + if (_USE_POSIX_SPAWN + and os.path.dirname(executable) + and preexec_fn is None + and (not close_fds or _HAVE_POSIX_SPAWN_CLOSEFROM) + and not pass_fds + and cwd is None + and (p2cread == -1 or p2cread > 2) + and (c2pwrite == -1 or c2pwrite > 2) + and (errwrite == -1 or errwrite > 2) + and not start_new_session + and process_group == -1 + and gid is None + and gids is None + and uid is None + and umask < 0): + self._posix_spawn(args, executable, env, restore_signals, close_fds, + p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) + return + + orig_executable = executable + + # For transferring possible exec failure from child to parent. + # Data format: "exception name:hex errno:description" + # Pickle is not used; it is complex and involves memory allocation. + errpipe_read, errpipe_write = os.pipe() + # errpipe_write must not be in the standard io 0, 1, or 2 fd range. + low_fds_to_close = [] + while errpipe_write < 3: + low_fds_to_close.append(errpipe_write) + errpipe_write = os.dup(errpipe_write) + for low_fd in low_fds_to_close: + os.close(low_fd) + try: + try: + # We must avoid complex work that could involve + # malloc or free in the child process to avoid + # potential deadlocks, thus we do all this here. + # and pass it to fork_exec() + + if env is not None: + env_list = [] + for k, v in env.items(): + k = os.fsencode(k) + if b'=' in k: + raise ValueError("illegal environment variable name") + env_list.append(k + b'=' + os.fsencode(v)) + else: + env_list = None # Use execv instead of execve. + executable = os.fsencode(executable) + if os.path.dirname(executable): + executable_list = (executable,) + else: + # This matches the behavior of os._execvpe(). + executable_list = tuple( + os.path.join(os.fsencode(dir), executable) + for dir in os.get_exec_path(env)) + fds_to_keep = set(pass_fds) + fds_to_keep.add(errpipe_write) + self.pid = _fork_exec( + args, executable_list, + close_fds, tuple(sorted(map(int, fds_to_keep))), + cwd, env_list, + p2cread, p2cwrite, c2pread, c2pwrite, + errread, errwrite, + errpipe_read, errpipe_write, + restore_signals, start_new_session, + process_group, gid, gids, uid, umask, + preexec_fn) + self._child_created = True + finally: + # be sure the FD is closed no matter what + os.close(errpipe_write) + + self._close_pipe_fds(p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) + + # Wait for exec to fail or succeed; possibly raising an + # exception (limited in size) + errpipe_data = bytearray() + while True: + part = os.read(errpipe_read, 50000) + errpipe_data += part + if not part or len(errpipe_data) > 50000: + break + finally: + # be sure the FD is closed no matter what + os.close(errpipe_read) + + if errpipe_data: + try: + pid, sts = os.waitpid(self.pid, 0) + if pid == self.pid: + self._handle_exitstatus(sts) + else: + self.returncode = sys.maxsize + except ChildProcessError: + pass + + try: + exception_name, hex_errno, err_msg = ( + errpipe_data.split(b':', 2)) + # The encoding here should match the encoding + # written in by the subprocess implementations + # like _posixsubprocess + err_msg = err_msg.decode() + except ValueError: + exception_name = b'SubprocessError' + hex_errno = b'0' + err_msg = 'Bad exception data from child: {!r}'.format( + bytes(errpipe_data)) + child_exception_type = getattr( + builtins, exception_name.decode('ascii'), + SubprocessError) + if issubclass(child_exception_type, OSError) and hex_errno: + errno_num = int(hex_errno, 16) + if err_msg == "noexec:chdir": + err_msg = "" + # The error must be from chdir(cwd). + err_filename = cwd + elif err_msg == "noexec": + err_msg = "" + err_filename = None + else: + err_filename = orig_executable + if errno_num != 0: + err_msg = os.strerror(errno_num) + if err_filename is not None: + raise child_exception_type(errno_num, err_msg, err_filename) + else: + raise child_exception_type(errno_num, err_msg) + raise child_exception_type(err_msg) + + + def _handle_exitstatus(self, sts, _del_safe=_del_safe): + """All callers to this function MUST hold self._waitpid_lock.""" + # This method is called (indirectly) by __del__, so it cannot + # refer to anything outside of its local scope. + if _del_safe.WIFSTOPPED(sts): + self.returncode = -_del_safe.WSTOPSIG(sts) + else: + self.returncode = _del_safe.waitstatus_to_exitcode(sts) + + def _internal_poll(self, _deadstate=None, _del_safe=_del_safe): + """Check if child process has terminated. Returns returncode + attribute. + + This method is called by __del__, so it cannot reference anything + outside of the local scope (nor can any methods it calls). + + """ + if self.returncode is None: + if not self._waitpid_lock.acquire(False): + # Something else is busy calling waitpid. Don't allow two + # at once. We know nothing yet. + return None + try: + if self.returncode is not None: + return self.returncode # Another thread waited. + pid, sts = _del_safe.waitpid(self.pid, _del_safe.WNOHANG) + if pid == self.pid: + self._handle_exitstatus(sts) + except OSError as e: + if _deadstate is not None: + self.returncode = _deadstate + elif e.errno == _del_safe.ECHILD: + # This happens if SIGCLD is set to be ignored or + # waiting for child processes has otherwise been + # disabled for our process. This child is dead, we + # can't get the status. + # http://bugs.python.org/issue15756 + self.returncode = 0 + finally: + self._waitpid_lock.release() + return self.returncode + + + def _try_wait(self, wait_flags): + """All callers to this function MUST hold self._waitpid_lock.""" + try: + (pid, sts) = os.waitpid(self.pid, wait_flags) + except ChildProcessError: + # This happens if SIGCLD is set to be ignored or waiting + # for child processes has otherwise been disabled for our + # process. This child is dead, we can't get the status. + pid = self.pid + sts = 0 + return (pid, sts) + + + def _wait(self, timeout): + """Internal implementation of wait() on POSIX.""" + if self.returncode is not None: + return self.returncode + + if timeout is not None: + endtime = _time() + timeout + # Enter a busy loop if we have a timeout. This busy loop was + # cribbed from Lib/threading.py in Thread.wait() at r71065. + delay = 0.0005 # 500 us -> initial delay of 1 ms + while True: + if self._waitpid_lock.acquire(False): + try: + if self.returncode is not None: + break # Another thread waited. + (pid, sts) = self._try_wait(os.WNOHANG) + assert pid == self.pid or pid == 0 + if pid == self.pid: + self._handle_exitstatus(sts) + break + finally: + self._waitpid_lock.release() + remaining = self._remaining_time(endtime) + if remaining <= 0: + raise TimeoutExpired(self.args, timeout) + delay = min(delay * 2, remaining, .05) + time.sleep(delay) + else: + while self.returncode is None: + with self._waitpid_lock: + if self.returncode is not None: + break # Another thread waited. + (pid, sts) = self._try_wait(0) + # Check the pid and loop as waitpid has been known to + # return 0 even without WNOHANG in odd situations. + # http://bugs.python.org/issue14396. + if pid == self.pid: + self._handle_exitstatus(sts) + return self.returncode + + + def _communicate(self, input, endtime, orig_timeout): + if self.stdin and not self._communication_started: + # Flush stdio buffer. This might block, if the user has + # been writing to .stdin in an uncontrolled fashion. + try: + self.stdin.flush() + except BrokenPipeError: + pass # communicate() must ignore BrokenPipeError. + except ValueError: + # ignore ValueError: I/O operation on closed file. + if not self.stdin.closed: + raise + if not input: + try: + self.stdin.close() + except BrokenPipeError: + pass # communicate() must ignore BrokenPipeError. + + stdout = None + stderr = None + + # Only create this mapping if we haven't already. + if not self._communication_started: + self._fileobj2output = {} + if self.stdout: + self._fileobj2output[self.stdout] = [] + if self.stderr: + self._fileobj2output[self.stderr] = [] + + if self.stdout: + stdout = self._fileobj2output[self.stdout] + if self.stderr: + stderr = self._fileobj2output[self.stderr] + + self._save_input(input) + + if self._input: + if not isinstance(self._input, memoryview): + input_view = memoryview(self._input) + else: + input_view = self._input.cast("b") # byte input required + + with _PopenSelector() as selector: + if self.stdin and not self.stdin.closed and self._input: + selector.register(self.stdin, selectors.EVENT_WRITE) + if self.stdout and not self.stdout.closed: + selector.register(self.stdout, selectors.EVENT_READ) + if self.stderr and not self.stderr.closed: + selector.register(self.stderr, selectors.EVENT_READ) + + while selector.get_map(): + timeout = self._remaining_time(endtime) + if timeout is not None and timeout <= 0: + self._check_timeout(endtime, orig_timeout, + stdout, stderr, + skip_check_and_raise=True) + raise RuntimeError( # Impossible :) + '_check_timeout(..., skip_check_and_raise=True) ' + 'failed to raise TimeoutExpired.') + + ready = selector.select(timeout) + self._check_timeout(endtime, orig_timeout, stdout, stderr) + + # XXX Rewrite these to use non-blocking I/O on the file + # objects; they are no longer using C stdio! + + for key, events in ready: + if key.fileobj is self.stdin: + chunk = input_view[self._input_offset : + self._input_offset + _PIPE_BUF] + try: + self._input_offset += os.write(key.fd, chunk) + except BrokenPipeError: + selector.unregister(key.fileobj) + key.fileobj.close() + else: + if self._input_offset >= len(input_view): + selector.unregister(key.fileobj) + key.fileobj.close() + elif key.fileobj in (self.stdout, self.stderr): + data = os.read(key.fd, 32768) + if not data: + selector.unregister(key.fileobj) + key.fileobj.close() + self._fileobj2output[key.fileobj].append(data) + try: + self.wait(timeout=self._remaining_time(endtime)) + except TimeoutExpired as exc: + exc.timeout = orig_timeout + raise + + # All data exchanged. Translate lists into strings. + if stdout is not None: + stdout = b''.join(stdout) + if stderr is not None: + stderr = b''.join(stderr) + + # Translate newlines, if requested. + # This also turns bytes into strings. + if self.text_mode: + if stdout is not None: + stdout = self._translate_newlines(stdout, + self.stdout.encoding, + self.stdout.errors) + if stderr is not None: + stderr = self._translate_newlines(stderr, + self.stderr.encoding, + self.stderr.errors) + + return (stdout, stderr) + + + def _save_input(self, input): + # This method is called from the _communicate_with_*() methods + # so that if we time out while communicating, we can continue + # sending input if we retry. + if self.stdin and self._input is None: + self._input_offset = 0 + self._input = input + if input is not None and self.text_mode: + self._input = self._input.encode(self.stdin.encoding, + self.stdin.errors) + + + def send_signal(self, sig): + """Send a signal to the process.""" + # bpo-38630: Polling reduces the risk of sending a signal to the + # wrong process if the process completed, the Popen.returncode + # attribute is still None, and the pid has been reassigned + # (recycled) to a new different process. This race condition can + # happens in two cases. + # + # Case 1. Thread A calls Popen.poll(), thread B calls + # Popen.send_signal(). In thread A, waitpid() succeed and returns + # the exit status. Thread B calls kill() because poll() in thread A + # did not set returncode yet. Calling poll() in thread B prevents + # the race condition thanks to Popen._waitpid_lock. + # + # Case 2. waitpid(pid, 0) has been called directly, without + # using Popen methods: returncode is still None is this case. + # Calling Popen.poll() will set returncode to a default value, + # since waitpid() fails with ProcessLookupError. + self.poll() + if self.returncode is not None: + # Skip signalling a process that we know has already died. + return + + # The race condition can still happen if the race condition + # described above happens between the returncode test + # and the kill() call. + try: + os.kill(self.pid, sig) + except ProcessLookupError: + # Suppress the race condition error; bpo-40550. + pass + + def terminate(self): + """Terminate the process with SIGTERM + """ + self.send_signal(signal.SIGTERM) + + def kill(self): + """Kill the process with SIGKILL + """ + self.send_signal(signal.SIGKILL) diff --git a/Python314_4_x86_Template/Lib/symtable.py b/Python314_4_x86_Template/Lib/symtable.py new file mode 100644 index 00000000..7a30e1ac --- /dev/null +++ b/Python314_4_x86_Template/Lib/symtable.py @@ -0,0 +1,451 @@ +"""Interface to the compiler's internal symbol tables""" + +import _symtable +from _symtable import ( + USE, + DEF_GLOBAL, # noqa: F401 + DEF_NONLOCAL, DEF_LOCAL, + DEF_PARAM, DEF_TYPE_PARAM, DEF_FREE_CLASS, + DEF_IMPORT, DEF_BOUND, DEF_ANNOT, + DEF_COMP_ITER, DEF_COMP_CELL, + SCOPE_OFF, SCOPE_MASK, + FREE, LOCAL, GLOBAL_IMPLICIT, GLOBAL_EXPLICIT, CELL +) + +import weakref +from enum import StrEnum + +__all__ = ["symtable", "SymbolTableType", "SymbolTable", "Class", "Function", "Symbol"] + +def symtable(code, filename, compile_type): + """ Return the toplevel *SymbolTable* for the source code. + + *filename* is the name of the file with the code + and *compile_type* is the *compile()* mode argument. + """ + top = _symtable.symtable(code, filename, compile_type) + return _newSymbolTable(top, filename) + +class SymbolTableFactory: + def __init__(self): + self.__memo = weakref.WeakValueDictionary() + + def new(self, table, filename): + if table.type == _symtable.TYPE_FUNCTION: + return Function(table, filename) + if table.type == _symtable.TYPE_CLASS: + return Class(table, filename) + return SymbolTable(table, filename) + + def __call__(self, table, filename): + key = table, filename + obj = self.__memo.get(key, None) + if obj is None: + obj = self.__memo[key] = self.new(table, filename) + return obj + +_newSymbolTable = SymbolTableFactory() + + +class SymbolTableType(StrEnum): + MODULE = "module" + FUNCTION = "function" + CLASS = "class" + ANNOTATION = "annotation" + TYPE_ALIAS = "type alias" + TYPE_PARAMETERS = "type parameters" + TYPE_VARIABLE = "type variable" + + +class SymbolTable: + + def __init__(self, raw_table, filename): + self._table = raw_table + self._filename = filename + self._symbols = {} + + def __repr__(self): + if self.__class__ == SymbolTable: + kind = "" + else: + kind = "%s " % self.__class__.__name__ + + if self._table.name == "top": + return "<{0}SymbolTable for module {1}>".format(kind, self._filename) + else: + return "<{0}SymbolTable for {1} in {2}>".format(kind, + self._table.name, + self._filename) + + def get_type(self): + """Return the type of the symbol table. + + The value returned is one of the values in + the ``SymbolTableType`` enumeration. + """ + if self._table.type == _symtable.TYPE_MODULE: + return SymbolTableType.MODULE + if self._table.type == _symtable.TYPE_FUNCTION: + return SymbolTableType.FUNCTION + if self._table.type == _symtable.TYPE_CLASS: + return SymbolTableType.CLASS + if self._table.type == _symtable.TYPE_ANNOTATION: + return SymbolTableType.ANNOTATION + if self._table.type == _symtable.TYPE_TYPE_ALIAS: + return SymbolTableType.TYPE_ALIAS + if self._table.type == _symtable.TYPE_TYPE_PARAMETERS: + return SymbolTableType.TYPE_PARAMETERS + if self._table.type == _symtable.TYPE_TYPE_VARIABLE: + return SymbolTableType.TYPE_VARIABLE + assert False, f"unexpected type: {self._table.type}" + + def get_id(self): + """Return an identifier for the table. + """ + return self._table.id + + def get_name(self): + """Return the table's name. + + This corresponds to the name of the class, function + or 'top' if the table is for a class, function or + global respectively. + """ + return self._table.name + + def get_lineno(self): + """Return the number of the first line in the + block for the table. + """ + return self._table.lineno + + def is_optimized(self): + """Return *True* if the locals in the table + are optimizable. + """ + return bool(self._table.type == _symtable.TYPE_FUNCTION) + + def is_nested(self): + """Return *True* if the block is a nested class + or function.""" + return bool(self._table.nested) + + def has_children(self): + """Return *True* if the block has nested namespaces. + """ + return bool(self._table.children) + + def get_identifiers(self): + """Return a view object containing the names of symbols in the table. + """ + return self._table.symbols.keys() + + def lookup(self, name): + """Lookup a *name* in the table. + + Returns a *Symbol* instance. + """ + sym = self._symbols.get(name) + if sym is None: + flags = self._table.symbols[name] + namespaces = self.__check_children(name) + module_scope = (self._table.name == "top") + sym = self._symbols[name] = Symbol(name, flags, namespaces, + module_scope=module_scope) + return sym + + def get_symbols(self): + """Return a list of *Symbol* instances for + names in the table. + """ + return [self.lookup(ident) for ident in self.get_identifiers()] + + def __check_children(self, name): + return [_newSymbolTable(st, self._filename) + for st in self._table.children + if st.name == name] + + def get_children(self): + """Return a list of the nested symbol tables. + """ + return [_newSymbolTable(st, self._filename) + for st in self._table.children] + + +def _get_scope(flags): # like _PyST_GetScope() + return (flags >> SCOPE_OFF) & SCOPE_MASK + + +class Function(SymbolTable): + + # Default values for instance variables + __params = None + __locals = None + __frees = None + __globals = None + __nonlocals = None + + def __idents_matching(self, test_func): + return tuple(ident for ident in self.get_identifiers() + if test_func(self._table.symbols[ident])) + + def get_parameters(self): + """Return a tuple of parameters to the function. + """ + if self.__params is None: + self.__params = self.__idents_matching(lambda x:x & DEF_PARAM) + return self.__params + + def get_locals(self): + """Return a tuple of locals in the function. + """ + if self.__locals is None: + locs = (LOCAL, CELL) + test = lambda x: _get_scope(x) in locs + self.__locals = self.__idents_matching(test) + return self.__locals + + def get_globals(self): + """Return a tuple of globals in the function. + """ + if self.__globals is None: + glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT) + test = lambda x: _get_scope(x) in glob + self.__globals = self.__idents_matching(test) + return self.__globals + + def get_nonlocals(self): + """Return a tuple of nonlocals in the function. + """ + if self.__nonlocals is None: + self.__nonlocals = self.__idents_matching(lambda x:x & DEF_NONLOCAL) + return self.__nonlocals + + def get_frees(self): + """Return a tuple of free variables in the function. + """ + if self.__frees is None: + is_free = lambda x: _get_scope(x) == FREE + self.__frees = self.__idents_matching(is_free) + return self.__frees + + +class Class(SymbolTable): + + __methods = None + + def get_methods(self): + """Return a tuple of methods declared in the class. + """ + import warnings + typename = f'{self.__class__.__module__}.{self.__class__.__name__}' + warnings.warn(f'{typename}.get_methods() is deprecated ' + f'and will be removed in Python 3.16.', + DeprecationWarning, stacklevel=2) + + if self.__methods is None: + d = {} + + def is_local_symbol(ident): + flags = self._table.symbols.get(ident, 0) + return ((flags >> SCOPE_OFF) & SCOPE_MASK) == LOCAL + + for st in self._table.children: + # pick the function-like symbols that are local identifiers + if is_local_symbol(st.name): + match st.type: + case _symtable.TYPE_FUNCTION: + # generators are of type TYPE_FUNCTION with a ".0" + # parameter as a first parameter (which makes them + # distinguishable from a function named 'genexpr') + if st.name == 'genexpr' and '.0' in st.varnames: + continue + d[st.name] = 1 + case _symtable.TYPE_TYPE_PARAMETERS: + # Get the function-def block in the annotation + # scope 'st' with the same identifier, if any. + scope_name = st.name + for c in st.children: + if c.name == scope_name and c.type == _symtable.TYPE_FUNCTION: + # A generic generator of type TYPE_FUNCTION + # cannot be a direct child of 'st' (but it + # can be a descendant), e.g.: + # + # class A: + # type genexpr[genexpr] = (x for x in []) + assert scope_name != 'genexpr' or '.0' not in c.varnames + d[scope_name] = 1 + break + self.__methods = tuple(d) + return self.__methods + + +class Symbol: + + def __init__(self, name, flags, namespaces=None, *, module_scope=False): + self.__name = name + self.__flags = flags + self.__scope = _get_scope(flags) + self.__namespaces = namespaces or () + self.__module_scope = module_scope + + def __repr__(self): + flags_str = '|'.join(self._flags_str()) + return f'' + + def _scope_str(self): + return _scopes_value_to_name.get(self.__scope) or str(self.__scope) + + def _flags_str(self): + for flagname, flagvalue in _flags: + if self.__flags & flagvalue == flagvalue: + yield flagname + + def get_name(self): + """Return a name of a symbol. + """ + return self.__name + + def is_referenced(self): + """Return *True* if the symbol is used in + its block. + """ + return bool(self.__flags & USE) + + def is_parameter(self): + """Return *True* if the symbol is a parameter. + """ + return bool(self.__flags & DEF_PARAM) + + def is_type_parameter(self): + """Return *True* if the symbol is a type parameter. + """ + return bool(self.__flags & DEF_TYPE_PARAM) + + def is_global(self): + """Return *True* if the symbol is global. + """ + return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT) + or (self.__module_scope and self.__flags & DEF_BOUND)) + + def is_nonlocal(self): + """Return *True* if the symbol is nonlocal.""" + return bool(self.__flags & DEF_NONLOCAL) + + def is_declared_global(self): + """Return *True* if the symbol is declared global + with a global statement.""" + return bool(self.__scope == GLOBAL_EXPLICIT) + + def is_local(self): + """Return *True* if the symbol is local. + """ + return bool(self.__scope in (LOCAL, CELL) + or (self.__module_scope and self.__flags & DEF_BOUND)) + + def is_annotated(self): + """Return *True* if the symbol is annotated. + """ + return bool(self.__flags & DEF_ANNOT) + + def is_free(self): + """Return *True* if a referenced symbol is + not assigned to. + """ + return bool(self.__scope == FREE) + + def is_free_class(self): + """Return *True* if a class-scoped symbol is free from + the perspective of a method.""" + return bool(self.__flags & DEF_FREE_CLASS) + + def is_imported(self): + """Return *True* if the symbol is created from + an import statement. + """ + return bool(self.__flags & DEF_IMPORT) + + def is_assigned(self): + """Return *True* if a symbol is assigned to.""" + return bool(self.__flags & DEF_LOCAL) + + def is_comp_iter(self): + """Return *True* if the symbol is a comprehension iteration variable. + """ + return bool(self.__flags & DEF_COMP_ITER) + + def is_comp_cell(self): + """Return *True* if the symbol is a cell in an inlined comprehension. + """ + return bool(self.__flags & DEF_COMP_CELL) + + def is_namespace(self): + """Returns *True* if name binding introduces new namespace. + + If the name is used as the target of a function or class + statement, this will be true. + + Note that a single name can be bound to multiple objects. If + is_namespace() is true, the name may also be bound to other + objects, like an int or list, that does not introduce a new + namespace. + """ + return bool(self.__namespaces) + + def get_namespaces(self): + """Return a list of namespaces bound to this name""" + return self.__namespaces + + def get_namespace(self): + """Return the single namespace bound to this name. + + Raises ValueError if the name is bound to multiple namespaces + or no namespace. + """ + if len(self.__namespaces) == 0: + raise ValueError("name is not bound to any namespaces") + elif len(self.__namespaces) > 1: + raise ValueError("name is bound to multiple namespaces") + else: + return self.__namespaces[0] + + +_flags = [('USE', USE)] +_flags.extend(kv for kv in globals().items() if kv[0].startswith('DEF_')) +_scopes_names = ('FREE', 'LOCAL', 'GLOBAL_IMPLICIT', 'GLOBAL_EXPLICIT', 'CELL') +_scopes_value_to_name = {globals()[n]: n for n in _scopes_names} + + +def main(args): + import sys + def print_symbols(table, level=0): + indent = ' ' * level + nested = "nested " if table.is_nested() else "" + if table.get_type() == 'module': + what = f'from file {table._filename!r}' + else: + what = f'{table.get_name()!r}' + print(f'{indent}symbol table for {nested}{table.get_type()} {what}:') + for ident in table.get_identifiers(): + symbol = table.lookup(ident) + flags = ', '.join(symbol._flags_str()).lower() + print(f' {indent}{symbol._scope_str().lower()} symbol {symbol.get_name()!r}: {flags}') + print() + + for table2 in table.get_children(): + print_symbols(table2, level + 1) + + for filename in args or ['-']: + if filename == '-': + src = sys.stdin.read() + filename = '' + else: + with open(filename, 'rb') as f: + src = f.read() + mod = symtable(src, filename, 'exec') + print_symbols(mod) + + +if __name__ == "__main__": + import sys + main(sys.argv[1:]) diff --git a/Python314_4_x86_Template/Lib/sysconfig/__init__.py b/Python314_4_x86_Template/Lib/sysconfig/__init__.py new file mode 100644 index 00000000..faf8273b --- /dev/null +++ b/Python314_4_x86_Template/Lib/sysconfig/__init__.py @@ -0,0 +1,797 @@ +"""Access to Python's configuration information.""" + +import os +import sys +import threading +from os.path import realpath + +__all__ = [ + 'get_config_h_filename', + 'get_config_var', + 'get_config_vars', + 'get_makefile_filename', + 'get_path', + 'get_path_names', + 'get_paths', + 'get_platform', + 'get_python_version', + 'get_scheme_names', + 'parse_config_h', +] + +# Keys for get_config_var() that are never converted to Python integers. +_ALWAYS_STR = { + 'IPHONEOS_DEPLOYMENT_TARGET', + 'MACOSX_DEPLOYMENT_TARGET', +} + +_INSTALL_SCHEMES = { + 'posix_prefix': { + 'stdlib': '{installed_base}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', + 'platstdlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', + 'purelib': '{base}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', + 'platlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}/site-packages', + 'include': + '{installed_base}/include/{implementation_lower}{py_version_short}{abiflags}', + 'platinclude': + '{installed_platbase}/include/{implementation_lower}{py_version_short}{abiflags}', + 'scripts': '{base}/bin', + 'data': '{base}', + }, + 'posix_home': { + 'stdlib': '{installed_base}/lib/{implementation_lower}', + 'platstdlib': '{base}/lib/{implementation_lower}', + 'purelib': '{base}/lib/{implementation_lower}', + 'platlib': '{base}/lib/{implementation_lower}', + 'include': '{installed_base}/include/{implementation_lower}', + 'platinclude': '{installed_base}/include/{implementation_lower}', + 'scripts': '{base}/bin', + 'data': '{base}', + }, + 'nt': { + 'stdlib': '{installed_base}/Lib', + 'platstdlib': '{base}/Lib', + 'purelib': '{base}/Lib/site-packages', + 'platlib': '{base}/Lib/site-packages', + 'include': '{installed_base}/Include', + 'platinclude': '{installed_base}/Include', + 'scripts': '{base}/Scripts', + 'data': '{base}', + }, + + # Downstream distributors can overwrite the default install scheme. + # This is done to support downstream modifications where distributors change + # the installation layout (eg. different site-packages directory). + # So, distributors will change the default scheme to one that correctly + # represents their layout. + # This presents an issue for projects/people that need to bootstrap virtual + # environments, like virtualenv. As distributors might now be customizing + # the default install scheme, there is no guarantee that the information + # returned by sysconfig.get_default_scheme/get_paths is correct for + # a virtual environment, the only guarantee we have is that it is correct + # for the *current* environment. When bootstrapping a virtual environment, + # we need to know its layout, so that we can place the files in the + # correct locations. + # The "*_venv" install scheme is a scheme to bootstrap virtual environments, + # essentially identical to the default posix_prefix/nt schemes. + # Downstream distributors who patch posix_prefix/nt scheme are encouraged to + # leave the following schemes unchanged + 'posix_venv': { + 'stdlib': '{installed_base}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', + 'platstdlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', + 'purelib': '{base}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', + 'platlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}/site-packages', + 'include': + '{installed_base}/include/{implementation_lower}{py_version_short}{abiflags}', + 'platinclude': + '{installed_platbase}/include/{implementation_lower}{py_version_short}{abiflags}', + 'scripts': '{base}/bin', + 'data': '{base}', + }, + 'nt_venv': { + 'stdlib': '{installed_base}/Lib', + 'platstdlib': '{base}/Lib', + 'purelib': '{base}/Lib/site-packages', + 'platlib': '{base}/Lib/site-packages', + 'include': '{installed_base}/Include', + 'platinclude': '{installed_base}/Include', + 'scripts': '{base}/Scripts', + 'data': '{base}', + }, + } + +# For the OS-native venv scheme, we essentially provide an alias: +if os.name == 'nt': + _INSTALL_SCHEMES['venv'] = _INSTALL_SCHEMES['nt_venv'] +else: + _INSTALL_SCHEMES['venv'] = _INSTALL_SCHEMES['posix_venv'] + +def _get_implementation(): + return 'Python' + +# NOTE: site.py has copy of this function. +# Sync it when modify this function. +def _getuserbase(): + env_base = os.environ.get("PYTHONUSERBASE", None) + if env_base: + return env_base + + # Emscripten, iOS, tvOS, VxWorks, WASI, and watchOS have no home directories. + # Use _PYTHON_HOST_PLATFORM to get the correct platform when cross-compiling. + system_name = os.environ.get('_PYTHON_HOST_PLATFORM', sys.platform).split('-')[0] + if system_name in {"emscripten", "ios", "tvos", "vxworks", "wasi", "watchos"}: + return None + + def joinuser(*args): + return os.path.expanduser(os.path.join(*args)) + + if os.name == "nt": + base = os.environ.get("APPDATA") or "~" + return joinuser(base, _get_implementation()) + + if sys.platform == "darwin" and sys._framework: + return joinuser("~", "Library", sys._framework, + f"{sys.version_info[0]}.{sys.version_info[1]}") + + return joinuser("~", ".local") + +_HAS_USER_BASE = (_getuserbase() is not None) + +if _HAS_USER_BASE: + _INSTALL_SCHEMES |= { + # NOTE: When modifying "purelib" scheme, update site._get_path() too. + 'nt_user': { + 'stdlib': '{userbase}/{implementation}{py_version_nodot_plat}', + 'platstdlib': '{userbase}/{implementation}{py_version_nodot_plat}', + 'purelib': '{userbase}/{implementation}{py_version_nodot_plat}/site-packages', + 'platlib': '{userbase}/{implementation}{py_version_nodot_plat}/site-packages', + 'include': '{userbase}/{implementation}{py_version_nodot_plat}/Include', + 'scripts': '{userbase}/{implementation}{py_version_nodot_plat}/Scripts', + 'data': '{userbase}', + }, + 'posix_user': { + 'stdlib': '{userbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', + 'platstdlib': '{userbase}/{platlibdir}/{implementation_lower}{py_version_short}{abi_thread}', + 'purelib': '{userbase}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', + 'platlib': '{userbase}/lib/{implementation_lower}{py_version_short}{abi_thread}/site-packages', + 'include': '{userbase}/include/{implementation_lower}{py_version_short}{abi_thread}', + 'scripts': '{userbase}/bin', + 'data': '{userbase}', + }, + 'osx_framework_user': { + 'stdlib': '{userbase}/lib/{implementation_lower}', + 'platstdlib': '{userbase}/lib/{implementation_lower}', + 'purelib': '{userbase}/lib/{implementation_lower}/site-packages', + 'platlib': '{userbase}/lib/{implementation_lower}/site-packages', + 'include': '{userbase}/include/{implementation_lower}{py_version_short}', + 'scripts': '{userbase}/bin', + 'data': '{userbase}', + }, + } + +_SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include', + 'scripts', 'data') + +_PY_VERSION = sys.version.split()[0] +_PY_VERSION_SHORT = f'{sys.version_info[0]}.{sys.version_info[1]}' +_PY_VERSION_SHORT_NO_DOT = f'{sys.version_info[0]}{sys.version_info[1]}' +_BASE_PREFIX = os.path.normpath(sys.base_prefix) +_BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix) +# Mutex guarding initialization of _CONFIG_VARS. +_CONFIG_VARS_LOCK = threading.RLock() +_CONFIG_VARS = None +# True iff _CONFIG_VARS has been fully initialized. +_CONFIG_VARS_INITIALIZED = False +_USER_BASE = None + + +def _safe_realpath(path): + try: + return realpath(path) + except OSError: + return path + +if sys.executable: + _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) +else: + # sys.executable can be empty if argv[0] has been changed and Python is + # unable to retrieve the real program name + _PROJECT_BASE = _safe_realpath(os.getcwd()) + +# In a virtual environment, `sys._home` gives us the target directory +# `_PROJECT_BASE` for the executable that created it when the virtual +# python is an actual executable ('venv --copies' or Windows). +_sys_home = getattr(sys, '_home', None) +if _sys_home: + _PROJECT_BASE = _sys_home + +if os.name == 'nt': + # In a source build, the executable is in a subdirectory of the root + # that we want (\PCbuild\). + # `_BASE_PREFIX` is used as the base installation is where the source + # will be. The realpath is needed to prevent mount point confusion + # that can occur with just string comparisons. + if _safe_realpath(_PROJECT_BASE).startswith( + _safe_realpath(f'{_BASE_PREFIX}\\PCbuild')): + _PROJECT_BASE = _BASE_PREFIX + +# set for cross builds +if "_PYTHON_PROJECT_BASE" in os.environ: + _PROJECT_BASE = _safe_realpath(os.environ["_PYTHON_PROJECT_BASE"]) + +def is_python_build(check_home=None): + if check_home is not None: + import warnings + warnings.warn( + ( + 'The check_home argument of sysconfig.is_python_build is ' + 'deprecated and its value is ignored. ' + 'It will be removed in Python 3.15.' + ), + DeprecationWarning, + stacklevel=2, + ) + for fn in ("Setup", "Setup.local"): + if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): + return True + return False + +_PYTHON_BUILD = is_python_build() + +if _PYTHON_BUILD: + for scheme in ('posix_prefix', 'posix_home'): + # On POSIX-y platforms, Python will: + # - Build from .h files in 'headers' (which is only added to the + # scheme when building CPython) + # - Install .h files to 'include' + scheme = _INSTALL_SCHEMES[scheme] + scheme['headers'] = scheme['include'] + scheme['include'] = '{srcdir}/Include' + scheme['platinclude'] = '{projectbase}/.' + del scheme + + +def _subst_vars(s, local_vars): + try: + return s.format(**local_vars) + except KeyError as var: + try: + return s.format(**os.environ) + except KeyError: + raise AttributeError(f'{var}') from None + +def _extend_dict(target_dict, other_dict): + target_keys = target_dict.keys() + for key, value in other_dict.items(): + if key in target_keys: + continue + target_dict[key] = value + + +def _expand_vars(scheme, vars): + res = {} + if vars is None: + vars = {} + _extend_dict(vars, get_config_vars()) + if os.name == 'nt': + # On Windows we want to substitute 'lib' for schemes rather + # than the native value (without modifying vars, in case it + # was passed in) + vars = vars | {'platlibdir': 'lib'} + + for key, value in _INSTALL_SCHEMES[scheme].items(): + if os.name in ('posix', 'nt'): + value = os.path.expanduser(value) + res[key] = os.path.normpath(_subst_vars(value, vars)) + return res + + +def _get_preferred_schemes(): + if os.name == 'nt': + return { + 'prefix': 'nt', + 'home': 'posix_home', + 'user': 'nt_user', + } + if sys.platform == 'darwin' and sys._framework: + return { + 'prefix': 'posix_prefix', + 'home': 'posix_home', + 'user': 'osx_framework_user', + } + + return { + 'prefix': 'posix_prefix', + 'home': 'posix_home', + 'user': 'posix_user', + } + + +def get_preferred_scheme(key): + if key == 'prefix' and sys.prefix != sys.base_prefix: + return 'venv' + scheme = _get_preferred_schemes()[key] + if scheme not in _INSTALL_SCHEMES: + raise ValueError( + f"{key!r} returned {scheme!r}, which is not a valid scheme " + f"on this platform" + ) + return scheme + + +def get_default_scheme(): + return get_preferred_scheme('prefix') + + +def get_makefile_filename(): + """Return the path of the Makefile.""" + + # GH-127429: When cross-compiling, use the Makefile from the target, instead of the host Python. + if cross_base := os.environ.get('_PYTHON_PROJECT_BASE'): + return os.path.join(cross_base, 'Makefile') + + if _PYTHON_BUILD: + return os.path.join(_PROJECT_BASE, "Makefile") + + if hasattr(sys, 'abiflags'): + config_dir_name = f'config-{_PY_VERSION_SHORT}{sys.abiflags}' + else: + config_dir_name = 'config' + + if hasattr(sys.implementation, '_multiarch'): + config_dir_name += f'-{sys.implementation._multiarch}' + + return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') + + +def _import_from_directory(path, name): + if name not in sys.modules: + import importlib.machinery + import importlib.util + + spec = importlib.machinery.PathFinder.find_spec(name, [path]) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + sys.modules[name] = module + return sys.modules[name] + + +def _get_sysconfigdata_name(): + multiarch = getattr(sys.implementation, '_multiarch', '') + return os.environ.get( + '_PYTHON_SYSCONFIGDATA_NAME', + f'_sysconfigdata_{sys.abiflags}_{sys.platform}_{multiarch}', + ) + + +def _get_sysconfigdata(): + import importlib + + name = _get_sysconfigdata_name() + path = os.environ.get('_PYTHON_SYSCONFIGDATA_PATH') + module = _import_from_directory(path, name) if path else importlib.import_module(name) + + return module.build_time_vars + + +def _installation_is_relocated(): + """Is the Python installation running from a different prefix than what was targetted when building?""" + if os.name != 'posix': + raise NotImplementedError('sysconfig._installation_is_relocated() is currently only supported on POSIX') + + data = _get_sysconfigdata() + return ( + data['prefix'] != getattr(sys, 'base_prefix', '') + or data['exec_prefix'] != getattr(sys, 'base_exec_prefix', '') + ) + + +def _init_posix(vars): + """Initialize the module as appropriate for POSIX systems.""" + # GH-126920: Make sure we don't overwrite any of the keys already set + vars.update(_get_sysconfigdata() | vars) + + +def _init_non_posix(vars): + """Initialize the module as appropriate for NT""" + # set basic install directories + import _winapi + import _sysconfig + vars['LIBDEST'] = get_path('stdlib') + vars['BINLIBDEST'] = get_path('platstdlib') + vars['INCLUDEPY'] = get_path('include') + + # Add EXT_SUFFIX, SOABI, Py_DEBUG, and Py_GIL_DISABLED + vars.update(_sysconfig.config_vars()) + + # NOTE: ABIFLAGS is only an emulated value. It is not present during build + # on Windows. sys.abiflags is absent on Windows and vars['abiflags'] + # is already widely used to calculate paths, so it should remain an + # empty string. + vars['ABIFLAGS'] = ''.join( + ( + 't' if vars['Py_GIL_DISABLED'] else '', + '_d' if vars['Py_DEBUG'] else '', + ), + ) + + vars['LIBDIR'] = _safe_realpath(os.path.join(get_config_var('installed_base'), 'libs')) + if hasattr(sys, 'dllhandle'): + dllhandle = _winapi.GetModuleFileName(sys.dllhandle) + vars['LIBRARY'] = os.path.basename(_safe_realpath(dllhandle)) + vars['LDLIBRARY'] = vars['LIBRARY'] + vars['EXE'] = '.exe' + vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT + vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) + vars['TZPATH'] = '' + +# +# public APIs +# + + +def parse_config_h(fp, vars=None): + """Parse a config.h-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + if vars is None: + vars = {} + import re + define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") + undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") + + while True: + line = fp.readline() + if not line: + break + m = define_rx.match(line) + if m: + n, v = m.group(1, 2) + try: + if n in _ALWAYS_STR: + raise ValueError + v = int(v) + except ValueError: + pass + vars[n] = v + else: + m = undef_rx.match(line) + if m: + vars[m.group(1)] = 0 + return vars + + +def get_config_h_filename(): + """Return the path of pyconfig.h.""" + if _PYTHON_BUILD: + if os.name == "nt": + inc_dir = os.path.join(_PROJECT_BASE, 'PC') + else: + inc_dir = _PROJECT_BASE + else: + inc_dir = get_path('platinclude') + return os.path.join(inc_dir, 'pyconfig.h') + + +def get_scheme_names(): + """Return a tuple containing the schemes names.""" + return tuple(sorted(_INSTALL_SCHEMES)) + + +def get_path_names(): + """Return a tuple containing the paths names.""" + return _SCHEME_KEYS + + +def get_paths(scheme=get_default_scheme(), vars=None, expand=True): + """Return a mapping containing an install scheme. + + ``scheme`` is the install scheme name. If not provided, it will + return the default scheme for the current platform. + """ + if expand: + return _expand_vars(scheme, vars) + else: + return _INSTALL_SCHEMES[scheme] + + +def get_path(name, scheme=get_default_scheme(), vars=None, expand=True): + """Return a path corresponding to the scheme. + + ``scheme`` is the install scheme name. + """ + return get_paths(scheme, vars, expand)[name] + + +def _init_config_vars(): + global _CONFIG_VARS + _CONFIG_VARS = {} + + prefix = os.path.normpath(sys.prefix) + exec_prefix = os.path.normpath(sys.exec_prefix) + base_prefix = _BASE_PREFIX + base_exec_prefix = _BASE_EXEC_PREFIX + + try: + abiflags = sys.abiflags + except AttributeError: + abiflags = '' + + if os.name == 'posix': + _init_posix(_CONFIG_VARS) + # If we are cross-compiling, load the prefixes from the Makefile instead. + if '_PYTHON_PROJECT_BASE' in os.environ: + prefix = _CONFIG_VARS['host_prefix'] + exec_prefix = _CONFIG_VARS['host_exec_prefix'] + base_prefix = _CONFIG_VARS['host_prefix'] + base_exec_prefix = _CONFIG_VARS['host_exec_prefix'] + abiflags = _CONFIG_VARS['ABIFLAGS'] + + # Normalized versions of prefix and exec_prefix are handy to have; + # in fact, these are the standard versions used most places in the + # Distutils. + _CONFIG_VARS['prefix'] = prefix + _CONFIG_VARS['exec_prefix'] = exec_prefix + _CONFIG_VARS['py_version'] = _PY_VERSION + _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT + _CONFIG_VARS['py_version_nodot'] = _PY_VERSION_SHORT_NO_DOT + _CONFIG_VARS['installed_base'] = base_prefix + _CONFIG_VARS['base'] = prefix + _CONFIG_VARS['installed_platbase'] = base_exec_prefix + _CONFIG_VARS['platbase'] = exec_prefix + _CONFIG_VARS['projectbase'] = _PROJECT_BASE + _CONFIG_VARS['platlibdir'] = sys.platlibdir + _CONFIG_VARS['implementation'] = _get_implementation() + _CONFIG_VARS['implementation_lower'] = _get_implementation().lower() + _CONFIG_VARS['abiflags'] = abiflags + try: + _CONFIG_VARS['py_version_nodot_plat'] = sys.winver.replace('.', '') + except AttributeError: + _CONFIG_VARS['py_version_nodot_plat'] = '' + + if os.name == 'nt': + _init_non_posix(_CONFIG_VARS) + _CONFIG_VARS['VPATH'] = sys._vpath + if _HAS_USER_BASE: + # Setting 'userbase' is done below the call to the + # init function to enable using 'get_config_var' in + # the init-function. + _CONFIG_VARS['userbase'] = _getuserbase() + + # e.g., 't' for free-threaded or '' for default build + _CONFIG_VARS['abi_thread'] = 't' if _CONFIG_VARS.get('Py_GIL_DISABLED') else '' + + # Always convert srcdir to an absolute path + srcdir = _CONFIG_VARS.get('srcdir', _PROJECT_BASE) + if os.name == 'posix': + if _PYTHON_BUILD: + # If srcdir is a relative path (typically '.' or '..') + # then it should be interpreted relative to the directory + # containing Makefile. + base = os.path.dirname(get_makefile_filename()) + srcdir = os.path.join(base, srcdir) + else: + # srcdir is not meaningful since the installation is + # spread about the filesystem. We choose the + # directory containing the Makefile since we know it + # exists. + srcdir = os.path.dirname(get_makefile_filename()) + _CONFIG_VARS['srcdir'] = _safe_realpath(srcdir) + + # OS X platforms require special customization to handle + # multi-architecture, multi-os-version installers + if sys.platform == 'darwin': + import _osx_support + _osx_support.customize_config_vars(_CONFIG_VARS) + + global _CONFIG_VARS_INITIALIZED + _CONFIG_VARS_INITIALIZED = True + + +def get_config_vars(*args): + """With no arguments, return a dictionary of all configuration + variables relevant for the current platform. + + On Unix, this means every variable defined in Python's installed Makefile; + On Windows it's a much smaller set. + + With arguments, return a list of values that result from looking up + each argument in the configuration variable dictionary. + """ + global _CONFIG_VARS_INITIALIZED + + # Avoid claiming the lock once initialization is complete. + if _CONFIG_VARS_INITIALIZED: + # GH-126789: If sys.prefix or sys.exec_prefix were updated, invalidate the cache. + prefix = os.path.normpath(sys.prefix) + exec_prefix = os.path.normpath(sys.exec_prefix) + if _CONFIG_VARS['prefix'] != prefix or _CONFIG_VARS['exec_prefix'] != exec_prefix: + with _CONFIG_VARS_LOCK: + _CONFIG_VARS_INITIALIZED = False + _init_config_vars() + else: + # Initialize the config_vars cache. + with _CONFIG_VARS_LOCK: + # Test again with the lock held to avoid races. Note that + # we test _CONFIG_VARS here, not _CONFIG_VARS_INITIALIZED, + # to ensure that recursive calls to get_config_vars() + # don't re-enter init_config_vars(). + if _CONFIG_VARS is None: + _init_config_vars() + + if args: + vals = [] + for name in args: + vals.append(_CONFIG_VARS.get(name)) + return vals + else: + return _CONFIG_VARS + + +def get_config_var(name): + """Return the value of a single variable using the dictionary returned by + 'get_config_vars()'. + + Equivalent to get_config_vars().get(name) + """ + return get_config_vars().get(name) + + +def get_platform(): + """Return a string that identifies the current platform. + + This is used mainly to distinguish platform-specific build directories and + platform-specific built distributions. Typically includes the OS name and + version and the architecture (as supplied by 'os.uname()'), although the + exact information included depends on the OS; on Linux, the kernel version + isn't particularly important. + + Examples of returned values: + + + Windows: + + - win-amd64 (64-bit Windows on AMD64, aka x86_64, Intel64, and EM64T) + - win-arm64 (64-bit Windows on ARM64, aka AArch64) + - win32 (all others - specifically, sys.platform is returned) + + POSIX based OS: + + - linux-x86_64 + - macosx-15.5-arm64 + - macosx-26.0-universal2 (macOS on Apple Silicon or Intel) + - android-24-arm64_v8a + + For other non-POSIX platforms, currently just returns :data:`sys.platform`.""" + if os.name == 'nt': + if 'amd64' in sys.version.lower(): + return 'win-amd64' + if '(arm)' in sys.version.lower(): + return 'win-arm32' + if '(arm64)' in sys.version.lower(): + return 'win-arm64' + return sys.platform + + if os.name != "posix" or not hasattr(os, 'uname'): + # XXX what about the architecture? NT is Intel or Alpha + return sys.platform + + # Set for cross builds explicitly + if "_PYTHON_HOST_PLATFORM" in os.environ: + osname, _, machine = os.environ["_PYTHON_HOST_PLATFORM"].partition('-') + release = None + else: + # Try to distinguish various flavours of Unix + osname, host, release, version, machine = os.uname() + + # Convert the OS name to lowercase, remove '/' characters, and translate + # spaces (for "Power Macintosh") + osname = osname.lower().replace('/', '') + machine = machine.replace(' ', '_') + machine = machine.replace('/', '-') + + if osname == "android" or sys.platform == "android": + osname = "android" + release = get_config_var("ANDROID_API_LEVEL") + + # Wheel tags use the ABI names from Android's own tools. + # When Python is running on 32-bit ARM Android on a 64-bit ARM kernel, + # 'os.uname().machine' is 'armv8l'. Such devices run the same userspace + # code as 'armv7l' devices. + # During the build process of the Android testbed when targeting 32-bit ARM, + # '_PYTHON_HOST_PLATFORM' is 'arm-linux-androideabi', so 'machine' becomes + # 'arm'. + machine = { + "aarch64": "arm64_v8a", + "arm": "armeabi_v7a", + "armv7l": "armeabi_v7a", + "armv8l": "armeabi_v7a", + "i686": "x86", + "x86_64": "x86_64", + }[machine] + elif osname == "linux": + # At least on Linux/Intel, 'machine' is the processor -- + # i386, etc. + # XXX what about Alpha, SPARC, etc? + return f"{osname}-{machine}" + elif osname[:5] == "sunos": + if release[0] >= "5": # SunOS 5 == Solaris 2 + osname = "solaris" + release = f"{int(release[0]) - 3}.{release[2:]}" + # We can't use "platform.architecture()[0]" because a + # bootstrap problem. We use a dict to get an error + # if some suspicious happens. + bitness = {2147483647:"32bit", 9223372036854775807:"64bit"} + machine += f".{bitness[sys.maxsize]}" + # fall through to standard osname-release-machine representation + elif osname[:3] == "aix": + from _aix_support import aix_platform + return aix_platform() + elif osname[:6] == "cygwin": + osname = "cygwin" + import re + rel_re = re.compile(r'[\d.]+') + m = rel_re.match(release) + if m: + release = m.group() + elif osname[:6] == "darwin": + if sys.platform == "ios": + release = get_config_vars().get("IPHONEOS_DEPLOYMENT_TARGET", "13.0") + osname = sys.platform + machine = sys.implementation._multiarch + else: + import _osx_support + osname, release, machine = _osx_support.get_platform_osx( + get_config_vars(), + osname, release, machine) + + return '-'.join(map(str, filter(None, (osname, release, machine)))) + + +def get_python_version(): + return _PY_VERSION_SHORT + + +def _get_python_version_abi(): + return _PY_VERSION_SHORT + get_config_var("abi_thread") + + +def expand_makefile_vars(s, vars): + """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in + 'string' according to 'vars' (a dictionary mapping variable names to + values). Variables not present in 'vars' are silently expanded to the + empty string. The variable values in 'vars' should not contain further + variable expansions; if 'vars' is the output of 'parse_makefile()', + you're fine. Returns a variable-expanded version of 's'. + """ + + import warnings + warnings.warn( + 'sysconfig.expand_makefile_vars is deprecated and will be removed in ' + 'Python 3.16. Use sysconfig.get_paths(vars=...) instead.', + DeprecationWarning, + stacklevel=2, + ) + + import re + + _findvar1_rx = r"\$\(([A-Za-z][A-Za-z0-9_]*)\)" + _findvar2_rx = r"\${([A-Za-z][A-Za-z0-9_]*)}" + + # This algorithm does multiple expansion, so if vars['foo'] contains + # "${bar}", it will expand ${foo} to ${bar}, and then expand + # ${bar}... and so forth. This is fine as long as 'vars' comes from + # 'parse_makefile()', which takes care of such expansions eagerly, + # according to make's variable expansion semantics. + + while True: + m = re.search(_findvar1_rx, s) or re.search(_findvar2_rx, s) + if m: + (beg, end) = m.span() + s = s[0:beg] + vars.get(m.group(1)) + s[end:] + else: + break + return s diff --git a/Python314_4_x86_Template/Lib/sysconfig/__main__.py b/Python314_4_x86_Template/Lib/sysconfig/__main__.py new file mode 100644 index 00000000..bc2197cf --- /dev/null +++ b/Python314_4_x86_Template/Lib/sysconfig/__main__.py @@ -0,0 +1,276 @@ +import json +import os +import sys +import types +from sysconfig import ( + _ALWAYS_STR, + _PYTHON_BUILD, + _get_sysconfigdata_name, + get_config_h_filename, + get_config_var, + get_config_vars, + get_default_scheme, + get_makefile_filename, + get_paths, + get_platform, + get_python_version, + parse_config_h, +) + + +# Regexes needed for parsing Makefile (and similar syntaxes, +# like old-style Setup files). +_variable_rx = r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)" +_findvar1_rx = r"\$\(([A-Za-z][A-Za-z0-9_]*)\)" +_findvar2_rx = r"\${([A-Za-z][A-Za-z0-9_]*)}" + + +def _parse_makefile(filename, vars=None, keep_unresolved=True): + """Parse a Makefile-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + import re + + if vars is None: + vars = {} + done = {} + notdone = {} + + with open(filename, encoding=sys.getfilesystemencoding(), + errors="surrogateescape") as f: + lines = f.readlines() + + for line in lines: + if line.startswith('#') or line.strip() == '': + continue + m = re.match(_variable_rx, line) + if m: + n, v = m.group(1, 2) + v = v.strip() + # `$$' is a literal `$' in make + tmpv = v.replace('$$', '') + + if "$" in tmpv: + notdone[n] = v + else: + try: + if n in _ALWAYS_STR: + raise ValueError + + v = int(v) + except ValueError: + # insert literal `$' + done[n] = v.replace('$$', '$') + else: + done[n] = v + + # do variable interpolation here + variables = list(notdone.keys()) + + # Variables with a 'PY_' prefix in the makefile. These need to + # be made available without that prefix through sysconfig. + # Special care is needed to ensure that variable expansion works, even + # if the expansion uses the name without a prefix. + renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') + + while len(variables) > 0: + for name in tuple(variables): + value = notdone[name] + m1 = re.search(_findvar1_rx, value) + m2 = re.search(_findvar2_rx, value) + if m1 and m2: + m = m1 if m1.start() < m2.start() else m2 + else: + m = m1 if m1 else m2 + if m is not None: + n = m.group(1) + found = True + if n in done: + item = str(done[n]) + elif n in notdone: + # get it on a subsequent round + found = False + elif n in os.environ: + # do it like make: fall back to environment + item = os.environ[n] + + elif n in renamed_variables: + if (name.startswith('PY_') and + name[3:] in renamed_variables): + item = "" + + elif 'PY_' + n in notdone: + found = False + + else: + item = str(done['PY_' + n]) + + else: + done[n] = item = "" + + if found: + after = value[m.end():] + value = value[:m.start()] + item + after + if "$" in after: + notdone[name] = value + else: + try: + if name in _ALWAYS_STR: + raise ValueError + value = int(value) + except ValueError: + done[name] = value.strip() + else: + done[name] = value + variables.remove(name) + + if name.startswith('PY_') \ + and name[3:] in renamed_variables: + + name = name[3:] + if name not in done: + done[name] = value + + else: + # Adds unresolved variables to the done dict. + # This is disabled when called from distutils.sysconfig + if keep_unresolved: + done[name] = value + # bogus variable reference (e.g. "prefix=$/opt/python"); + # just drop it since we can't deal + variables.remove(name) + + # strip spurious spaces + for k, v in done.items(): + if isinstance(v, str): + done[k] = v.strip() + + # save the results in the global dictionary + vars.update(done) + return vars + + +def _print_config_dict(d, stream): + print ("{", file=stream) + for k, v in sorted(d.items()): + print(f" {k!r}: {v!r},", file=stream) + print ("}", file=stream) + + +def _get_pybuilddir(): + pybuilddir = f'build/lib.{get_platform()}-{get_python_version()}' + if get_config_var('Py_DEBUG') == '1': + pybuilddir += '-pydebug' + return pybuilddir + + +def _get_json_data_name(): + name = _get_sysconfigdata_name() + assert name.startswith('_sysconfigdata') + return name.replace('_sysconfigdata', '_sysconfig_vars') + '.json' + + +def _generate_posix_vars(): + """Generate the Python module containing build-time variables.""" + vars = {} + # load the installed Makefile: + makefile = get_makefile_filename() + try: + _parse_makefile(makefile, vars) + except OSError as e: + msg = f"invalid Python installation: unable to open {makefile}" + if hasattr(e, "strerror"): + msg = f"{msg} ({e.strerror})" + raise OSError(msg) + # load the installed pyconfig.h: + config_h = get_config_h_filename() + try: + with open(config_h, encoding="utf-8") as f: + parse_config_h(f, vars) + except OSError as e: + msg = f"invalid Python installation: unable to open {config_h}" + if hasattr(e, "strerror"): + msg = f"{msg} ({e.strerror})" + raise OSError(msg) + # On AIX, there are wrong paths to the linker scripts in the Makefile + # -- these paths are relative to the Python source, but when installed + # the scripts are in another directory. + if _PYTHON_BUILD: + vars['BLDSHARED'] = vars['LDSHARED'] + + name = _get_sysconfigdata_name() + + # There's a chicken-and-egg situation on OS X with regards to the + # _sysconfigdata module after the changes introduced by #15298: + # get_config_vars() is called by get_platform() as part of the + # `make pybuilddir.txt` target -- which is a precursor to the + # _sysconfigdata.py module being constructed. Unfortunately, + # get_config_vars() eventually calls _init_posix(), which attempts + # to import _sysconfigdata, which we won't have built yet. In order + # for _init_posix() to work, if we're on Darwin, just mock up the + # _sysconfigdata module manually and populate it with the build vars. + # This is more than sufficient for ensuring the subsequent call to + # get_platform() succeeds. + # GH-127178: Since we started generating a .json file, we also need this to + # be able to run sysconfig.get_config_vars(). + module = types.ModuleType(name) + module.build_time_vars = vars + sys.modules[name] = module + + pybuilddir = _get_pybuilddir() + os.makedirs(pybuilddir, exist_ok=True) + destfile = os.path.join(pybuilddir, name + '.py') + + with open(destfile, 'w', encoding='utf8') as f: + f.write('# system configuration generated and used by' + ' the sysconfig module\n') + f.write('build_time_vars = ') + _print_config_dict(vars, stream=f) + + print(f'Written {destfile}') + + install_vars = get_config_vars() + # Fix config vars to match the values after install (of the default environment) + install_vars['projectbase'] = install_vars['BINDIR'] + install_vars['srcdir'] = install_vars['LIBPL'] + # Write a JSON file with the output of sysconfig.get_config_vars + jsonfile = os.path.join(pybuilddir, _get_json_data_name()) + with open(jsonfile, 'w') as f: + json.dump(install_vars, f, indent=2) + + print(f'Written {jsonfile}') + + # Create file used for sys.path fixup -- see Modules/getpath.c + with open('pybuilddir.txt', 'w', encoding='utf8') as f: + f.write(pybuilddir) + + +def _print_dict(title, data): + for index, (key, value) in enumerate(sorted(data.items())): + if index == 0: + print(f'{title}: ') + print(f'\t{key} = "{value}"') + + +def _main(): + """Display all information sysconfig detains.""" + if '--generate-posix-vars' in sys.argv: + _generate_posix_vars() + return + print(f'Platform: "{get_platform()}"') + print(f'Python version: "{get_python_version()}"') + print(f'Current installation scheme: "{get_default_scheme()}"') + print() + _print_dict('Paths', get_paths()) + print() + _print_dict('Variables', get_config_vars()) + + +if __name__ == '__main__': + try: + _main() + except BrokenPipeError: + pass diff --git a/Python314_4_x86_Template/Lib/sysconfig/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/sysconfig/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..ce36e061 Binary files /dev/null and b/Python314_4_x86_Template/Lib/sysconfig/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/tabnanny.py b/Python314_4_x86_Template/Lib/tabnanny.py new file mode 100644 index 00000000..c0097351 --- /dev/null +++ b/Python314_4_x86_Template/Lib/tabnanny.py @@ -0,0 +1,338 @@ +"""The Tab Nanny despises ambiguous indentation. She knows no mercy. + +tabnanny -- Detection of ambiguous indentation + +For the time being this module is intended to be called as a script. +However it is possible to import it into an IDE and use the function +check() described below. + +Warning: The API provided by this module is likely to change in future +releases; such changes may not be backward compatible. +""" + +# Released to the public domain, by Tim Peters, 15 April 1998. + +# XXX Note: this is now a standard library module. +# XXX The API needs to undergo changes however; the current code is too +# XXX script-like. This will be addressed later. + +__version__ = "6" + +import os +import sys +import tokenize + +__all__ = ["check", "NannyNag", "process_tokens"] + +verbose = 0 +filename_only = 0 + +def errprint(*args): + sep = "" + for arg in args: + sys.stderr.write(sep + str(arg)) + sep = " " + sys.stderr.write("\n") + sys.exit(1) + +def main(): + import getopt + + global verbose, filename_only + try: + opts, args = getopt.getopt(sys.argv[1:], "qv") + except getopt.error as msg: + errprint(msg) + for o, a in opts: + if o == '-q': + filename_only = filename_only + 1 + if o == '-v': + verbose = verbose + 1 + if not args: + errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...") + for arg in args: + check(arg) + +class NannyNag(Exception): + """ + Raised by process_tokens() if detecting an ambiguous indent. + Captured and handled in check(). + """ + def __init__(self, lineno, msg, line): + self.lineno, self.msg, self.line = lineno, msg, line + def get_lineno(self): + return self.lineno + def get_msg(self): + return self.msg + def get_line(self): + return self.line + +def check(file): + """check(file_or_dir) + + If file_or_dir is a directory and not a symbolic link, then recursively + descend the directory tree named by file_or_dir, checking all .py files + along the way. If file_or_dir is an ordinary Python source file, it is + checked for whitespace related problems. The diagnostic messages are + written to standard output using the print statement. + """ + + if os.path.isdir(file) and not os.path.islink(file): + if verbose: + print("%r: listing directory" % (file,)) + names = os.listdir(file) + for name in names: + fullname = os.path.join(file, name) + if (os.path.isdir(fullname) and + not os.path.islink(fullname) or + os.path.normcase(name[-3:]) == ".py"): + check(fullname) + return + + try: + f = tokenize.open(file) + except OSError as msg: + errprint("%r: I/O Error: %s" % (file, msg)) + return + + if verbose > 1: + print("checking %r ..." % file) + + try: + process_tokens(tokenize.generate_tokens(f.readline)) + + except tokenize.TokenError as msg: + errprint("%r: Token Error: %s" % (file, msg)) + return + + except IndentationError as msg: + errprint("%r: Indentation Error: %s" % (file, msg)) + return + + except SyntaxError as msg: + errprint("%r: Syntax Error: %s" % (file, msg)) + return + + except NannyNag as nag: + badline = nag.get_lineno() + line = nag.get_line() + if verbose: + print("%r: *** Line %d: trouble in tab city! ***" % (file, badline)) + print("offending line: %r" % (line,)) + print(nag.get_msg()) + else: + if ' ' in file: file = '"' + file + '"' + if filename_only: print(file) + else: print(file, badline, repr(line)) + return + + finally: + f.close() + + if verbose: + print("%r: Clean bill of health." % (file,)) + +class Whitespace: + # the characters used for space and tab + S, T = ' \t' + + # members: + # raw + # the original string + # n + # the number of leading whitespace characters in raw + # nt + # the number of tabs in raw[:n] + # norm + # the normal form as a pair (count, trailing), where: + # count + # a tuple such that raw[:n] contains count[i] + # instances of S * i + T + # trailing + # the number of trailing spaces in raw[:n] + # It's A Theorem that m.indent_level(t) == + # n.indent_level(t) for all t >= 1 iff m.norm == n.norm. + # is_simple + # true iff raw[:n] is of the form (T*)(S*) + + def __init__(self, ws): + self.raw = ws + S, T = Whitespace.S, Whitespace.T + count = [] + b = n = nt = 0 + for ch in self.raw: + if ch == S: + n = n + 1 + b = b + 1 + elif ch == T: + n = n + 1 + nt = nt + 1 + if b >= len(count): + count = count + [0] * (b - len(count) + 1) + count[b] = count[b] + 1 + b = 0 + else: + break + self.n = n + self.nt = nt + self.norm = tuple(count), b + self.is_simple = len(count) <= 1 + + # return length of longest contiguous run of spaces (whether or not + # preceding a tab) + def longest_run_of_spaces(self): + count, trailing = self.norm + return max(len(count)-1, trailing) + + def indent_level(self, tabsize): + # count, il = self.norm + # for i in range(len(count)): + # if count[i]: + # il = il + (i//tabsize + 1)*tabsize * count[i] + # return il + + # quicker: + # il = trailing + sum (i//ts + 1)*ts*count[i] = + # trailing + ts * sum (i//ts + 1)*count[i] = + # trailing + ts * sum i//ts*count[i] + count[i] = + # trailing + ts * [(sum i//ts*count[i]) + (sum count[i])] = + # trailing + ts * [(sum i//ts*count[i]) + num_tabs] + # and note that i//ts*count[i] is 0 when i < ts + + count, trailing = self.norm + il = 0 + for i in range(tabsize, len(count)): + il = il + i//tabsize * count[i] + return trailing + tabsize * (il + self.nt) + + # return true iff self.indent_level(t) == other.indent_level(t) + # for all t >= 1 + def equal(self, other): + return self.norm == other.norm + + # return a list of tuples (ts, i1, i2) such that + # i1 == self.indent_level(ts) != other.indent_level(ts) == i2. + # Intended to be used after not self.equal(other) is known, in which + # case it will return at least one witnessing tab size. + def not_equal_witness(self, other): + n = max(self.longest_run_of_spaces(), + other.longest_run_of_spaces()) + 1 + a = [] + for ts in range(1, n+1): + if self.indent_level(ts) != other.indent_level(ts): + a.append( (ts, + self.indent_level(ts), + other.indent_level(ts)) ) + return a + + # Return True iff self.indent_level(t) < other.indent_level(t) + # for all t >= 1. + # The algorithm is due to Vincent Broman. + # Easy to prove it's correct. + # XXXpost that. + # Trivial to prove n is sharp (consider T vs ST). + # Unknown whether there's a faster general way. I suspected so at + # first, but no longer. + # For the special (but common!) case where M and N are both of the + # form (T*)(S*), M.less(N) iff M.len() < N.len() and + # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded. + # XXXwrite that up. + # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1. + def less(self, other): + if self.n >= other.n: + return False + if self.is_simple and other.is_simple: + return self.nt <= other.nt + n = max(self.longest_run_of_spaces(), + other.longest_run_of_spaces()) + 1 + # the self.n >= other.n test already did it for ts=1 + for ts in range(2, n+1): + if self.indent_level(ts) >= other.indent_level(ts): + return False + return True + + # return a list of tuples (ts, i1, i2) such that + # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2. + # Intended to be used after not self.less(other) is known, in which + # case it will return at least one witnessing tab size. + def not_less_witness(self, other): + n = max(self.longest_run_of_spaces(), + other.longest_run_of_spaces()) + 1 + a = [] + for ts in range(1, n+1): + if self.indent_level(ts) >= other.indent_level(ts): + a.append( (ts, + self.indent_level(ts), + other.indent_level(ts)) ) + return a + +def format_witnesses(w): + firsts = (str(tup[0]) for tup in w) + prefix = "at tab size" + if len(w) > 1: + prefix = prefix + "s" + return prefix + " " + ', '.join(firsts) + +def process_tokens(tokens): + try: + _process_tokens(tokens) + except TabError as e: + raise NannyNag(e.lineno, e.msg, e.text) + +def _process_tokens(tokens): + INDENT = tokenize.INDENT + DEDENT = tokenize.DEDENT + NEWLINE = tokenize.NEWLINE + JUNK = tokenize.COMMENT, tokenize.NL + indents = [Whitespace("")] + check_equal = 0 + + for (type, token, start, end, line) in tokens: + if type == NEWLINE: + # a program statement, or ENDMARKER, will eventually follow, + # after some (possibly empty) run of tokens of the form + # (NL | COMMENT)* (INDENT | DEDENT+)? + # If an INDENT appears, setting check_equal is wrong, and will + # be undone when we see the INDENT. + check_equal = 1 + + elif type == INDENT: + check_equal = 0 + thisguy = Whitespace(token) + if not indents[-1].less(thisguy): + witness = indents[-1].not_less_witness(thisguy) + msg = "indent not greater e.g. " + format_witnesses(witness) + raise NannyNag(start[0], msg, line) + indents.append(thisguy) + + elif type == DEDENT: + # there's nothing we need to check here! what's important is + # that when the run of DEDENTs ends, the indentation of the + # program statement (or ENDMARKER) that triggered the run is + # equal to what's left at the top of the indents stack + + # Ouch! This assert triggers if the last line of the source + # is indented *and* lacks a newline -- then DEDENTs pop out + # of thin air. + # assert check_equal # else no earlier NEWLINE, or an earlier INDENT + check_equal = 1 + + del indents[-1] + + elif check_equal and type not in JUNK: + # this is the first "real token" following a NEWLINE, so it + # must be the first token of the next program statement, or an + # ENDMARKER; the "line" argument exposes the leading whitespace + # for this statement; in the case of ENDMARKER, line is an empty + # string, so will properly match the empty string with which the + # "indents" stack was seeded + check_equal = 0 + thisguy = Whitespace(line) + if not indents[-1].equal(thisguy): + witness = indents[-1].not_equal_witness(thisguy) + msg = "indent not equal e.g. " + format_witnesses(witness) + raise NannyNag(start[0], msg, line) + + +if __name__ == '__main__': + main() diff --git a/Python314_4_x86_Template/Lib/tarfile.py b/Python314_4_x86_Template/Lib/tarfile.py new file mode 100644 index 00000000..414aefe9 --- /dev/null +++ b/Python314_4_x86_Template/Lib/tarfile.py @@ -0,0 +1,3157 @@ +#------------------------------------------------------------------- +# tarfile.py +#------------------------------------------------------------------- +# Copyright (C) 2002 Lars Gustaebel +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +"""Read from and write to tar format archives. +""" + +version = "0.9.0" +__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" +__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." + +#--------- +# Imports +#--------- +from builtins import open as bltn_open +import sys +import os +import io +import shutil +import stat +import time +import struct +import copy +import re + +try: + import pwd +except ImportError: + pwd = None +try: + import grp +except ImportError: + grp = None + +# os.symlink on Windows prior to 6.0 raises NotImplementedError +# OSError (winerror=1314) will be raised if the caller does not hold the +# SeCreateSymbolicLinkPrivilege privilege +symlink_exception = (AttributeError, NotImplementedError, OSError) + +# from tarfile import * +__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError", "ReadError", + "CompressionError", "StreamError", "ExtractError", "HeaderError", + "ENCODING", "USTAR_FORMAT", "GNU_FORMAT", "PAX_FORMAT", + "DEFAULT_FORMAT", "open","fully_trusted_filter", "data_filter", + "tar_filter", "FilterError", "AbsoluteLinkError", + "OutsideDestinationError", "SpecialFileError", "AbsolutePathError", + "LinkOutsideDestinationError", "LinkFallbackError"] + + +#--------------------------------------------------------- +# tar constants +#--------------------------------------------------------- +NUL = b"\0" # the null character +BLOCKSIZE = 512 # length of processing blocks +RECORDSIZE = BLOCKSIZE * 20 # length of records +GNU_MAGIC = b"ustar \0" # magic gnu tar string +POSIX_MAGIC = b"ustar\x0000" # magic posix tar string + +LENGTH_NAME = 100 # maximum length of a filename +LENGTH_LINK = 100 # maximum length of a linkname +LENGTH_PREFIX = 155 # maximum length of the prefix field + +REGTYPE = b"0" # regular file +AREGTYPE = b"\0" # regular file +LNKTYPE = b"1" # link (inside tarfile) +SYMTYPE = b"2" # symbolic link +CHRTYPE = b"3" # character special device +BLKTYPE = b"4" # block special device +DIRTYPE = b"5" # directory +FIFOTYPE = b"6" # fifo special device +CONTTYPE = b"7" # contiguous file + +GNUTYPE_LONGNAME = b"L" # GNU tar longname +GNUTYPE_LONGLINK = b"K" # GNU tar longlink +GNUTYPE_SPARSE = b"S" # GNU tar sparse file + +XHDTYPE = b"x" # POSIX.1-2001 extended header +XGLTYPE = b"g" # POSIX.1-2001 global header +SOLARIS_XHDTYPE = b"X" # Solaris extended header + +USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format +GNU_FORMAT = 1 # GNU tar format +PAX_FORMAT = 2 # POSIX.1-2001 (pax) format +DEFAULT_FORMAT = PAX_FORMAT + +#--------------------------------------------------------- +# tarfile constants +#--------------------------------------------------------- +# File types that tarfile supports: +SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, + SYMTYPE, DIRTYPE, FIFOTYPE, + CONTTYPE, CHRTYPE, BLKTYPE, + GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# File types that will be treated as a regular file. +REGULAR_TYPES = (REGTYPE, AREGTYPE, + CONTTYPE, GNUTYPE_SPARSE) + +# File types that are part of the GNU tar format. +GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# Fields from a pax header that override a TarInfo attribute. +PAX_FIELDS = ("path", "linkpath", "size", "mtime", + "uid", "gid", "uname", "gname") + +# Fields from a pax header that are affected by hdrcharset. +PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"} + +# Fields in a pax header that are numbers, all other fields +# are treated as strings. +PAX_NUMBER_FIELDS = { + "atime": float, + "ctime": float, + "mtime": float, + "uid": int, + "gid": int, + "size": int +} + +#--------------------------------------------------------- +# initialization +#--------------------------------------------------------- +if os.name == "nt": + ENCODING = "utf-8" +else: + ENCODING = sys.getfilesystemencoding() + +#--------------------------------------------------------- +# Some useful functions +#--------------------------------------------------------- + +def stn(s, length, encoding, errors): + """Convert a string to a null-terminated bytes object. + """ + if s is None: + raise ValueError("metadata cannot contain None") + s = s.encode(encoding, errors) + return s[:length] + (length - len(s)) * NUL + +def nts(s, encoding, errors): + """Convert a null-terminated bytes object to a string. + """ + p = s.find(b"\0") + if p != -1: + s = s[:p] + return s.decode(encoding, errors) + +def nti(s): + """Convert a number field to a python number. + """ + # There are two possible encodings for a number field, see + # itn() below. + if s[0] in (0o200, 0o377): + n = 0 + for i in range(len(s) - 1): + n <<= 8 + n += s[i + 1] + if s[0] == 0o377: + n = -(256 ** (len(s) - 1) - n) + else: + try: + s = nts(s, "ascii", "strict") + n = int(s.strip() or "0", 8) + except ValueError: + raise InvalidHeaderError("invalid header") + return n + +def itn(n, digits=8, format=DEFAULT_FORMAT): + """Convert a python number to a number field. + """ + # POSIX 1003.1-1988 requires numbers to be encoded as a string of + # octal digits followed by a null-byte, this allows values up to + # (8**(digits-1))-1. GNU tar allows storing numbers greater than + # that if necessary. A leading 0o200 or 0o377 byte indicate this + # particular encoding, the following digits-1 bytes are a big-endian + # base-256 representation. This allows values up to (256**(digits-1))-1. + # A 0o200 byte indicates a positive number, a 0o377 byte a negative + # number. + original_n = n + n = int(n) + if 0 <= n < 8 ** (digits - 1): + s = bytes("%0*o" % (digits - 1, n), "ascii") + NUL + elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1): + if n >= 0: + s = bytearray([0o200]) + else: + s = bytearray([0o377]) + n = 256 ** digits + n + + for i in range(digits - 1): + s.insert(1, n & 0o377) + n >>= 8 + else: + raise ValueError("overflow in number field") + + return s + +def calc_chksums(buf): + """Calculate the checksum for a member's header by summing up all + characters except for the chksum field which is treated as if + it was filled with spaces. According to the GNU tar sources, + some tars (Sun and NeXT) calculate chksum with signed char, + which will be different if there are chars in the buffer with + the high bit set. So we calculate two checksums, unsigned and + signed. + """ + unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf)) + signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf)) + return unsigned_chksum, signed_chksum + +def copyfileobj(src, dst, length=None, exception=OSError, bufsize=None): + """Copy length bytes from fileobj src to fileobj dst. + If length is None, copy the entire content. + """ + bufsize = bufsize or 16 * 1024 + if length == 0: + return + if length is None: + shutil.copyfileobj(src, dst, bufsize) + return + + blocks, remainder = divmod(length, bufsize) + for b in range(blocks): + buf = src.read(bufsize) + if len(buf) < bufsize: + raise exception("unexpected end of data") + dst.write(buf) + + if remainder != 0: + buf = src.read(remainder) + if len(buf) < remainder: + raise exception("unexpected end of data") + dst.write(buf) + return + +def _safe_print(s): + encoding = getattr(sys.stdout, 'encoding', None) + if encoding is not None: + s = s.encode(encoding, 'backslashreplace').decode(encoding) + print(s, end=' ') + + +class TarError(Exception): + """Base exception.""" + pass +class ExtractError(TarError): + """General exception for extract errors.""" + pass +class ReadError(TarError): + """Exception for unreadable tar archives.""" + pass +class CompressionError(TarError): + """Exception for unavailable compression methods.""" + pass +class StreamError(TarError): + """Exception for unsupported operations on stream-like TarFiles.""" + pass +class HeaderError(TarError): + """Base exception for header errors.""" + pass +class EmptyHeaderError(HeaderError): + """Exception for empty headers.""" + pass +class TruncatedHeaderError(HeaderError): + """Exception for truncated headers.""" + pass +class EOFHeaderError(HeaderError): + """Exception for end of file headers.""" + pass +class InvalidHeaderError(HeaderError): + """Exception for invalid headers.""" + pass +class SubsequentHeaderError(HeaderError): + """Exception for missing and invalid extended headers.""" + pass + +#--------------------------- +# internal stream interface +#--------------------------- +class _LowLevelFile: + """Low-level file object. Supports reading and writing. + It is used instead of a regular file object for streaming + access. + """ + + def __init__(self, name, mode): + mode = { + "r": os.O_RDONLY, + "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, + }[mode] + if hasattr(os, "O_BINARY"): + mode |= os.O_BINARY + self.fd = os.open(name, mode, 0o666) + + def close(self): + os.close(self.fd) + + def read(self, size): + return os.read(self.fd, size) + + def write(self, s): + os.write(self.fd, s) + +class _Stream: + """Class that serves as an adapter between TarFile and + a stream-like object. The stream-like object only + needs to have a read() or write() method that works with bytes, + and the method is accessed blockwise. + Use of gzip or bzip2 compression is possible. + A stream-like object could be for example: sys.stdin.buffer, + sys.stdout.buffer, a socket, a tape device etc. + + _Stream is intended to be used only internally. + """ + + def __init__(self, name, mode, comptype, fileobj, bufsize, + compresslevel, preset): + """Construct a _Stream object. + """ + self._extfileobj = True + if fileobj is None: + fileobj = _LowLevelFile(name, mode) + self._extfileobj = False + + if comptype == '*': + # Enable transparent compression detection for the + # stream interface + fileobj = _StreamProxy(fileobj) + comptype = fileobj.getcomptype() + + self.name = os.fspath(name) if name is not None else "" + self.mode = mode + self.comptype = comptype + self.fileobj = fileobj + self.bufsize = bufsize + self.buf = b"" + self.pos = 0 + self.closed = False + + try: + if comptype == "gz": + try: + import zlib + except ImportError: + raise CompressionError("zlib module is not available") from None + self.zlib = zlib + self.crc = zlib.crc32(b"") + if mode == "r": + self.exception = zlib.error + self._init_read_gz() + else: + self._init_write_gz(compresslevel) + + elif comptype == "bz2": + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") from None + if mode == "r": + self.dbuf = b"" + self.cmp = bz2.BZ2Decompressor() + self.exception = OSError + else: + self.cmp = bz2.BZ2Compressor(compresslevel) + + elif comptype == "xz": + try: + import lzma + except ImportError: + raise CompressionError("lzma module is not available") from None + if mode == "r": + self.dbuf = b"" + self.cmp = lzma.LZMADecompressor() + self.exception = lzma.LZMAError + else: + self.cmp = lzma.LZMACompressor(preset=preset) + elif comptype == "zst": + try: + from compression import zstd + except ImportError: + raise CompressionError("compression.zstd module is not available") from None + if mode == "r": + self.dbuf = b"" + self.cmp = zstd.ZstdDecompressor() + self.exception = zstd.ZstdError + else: + self.cmp = zstd.ZstdCompressor() + elif comptype != "tar": + raise CompressionError("unknown compression type %r" % comptype) + + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + def __del__(self): + if hasattr(self, "closed") and not self.closed: + self.close() + + def _init_write_gz(self, compresslevel): + """Initialize for writing with gzip compression. + """ + self.cmp = self.zlib.compressobj(compresslevel, + self.zlib.DEFLATED, + -self.zlib.MAX_WBITS, + self.zlib.DEF_MEM_LEVEL, + 0) + timestamp = struct.pack(" self.bufsize: + self.fileobj.write(self.buf[:self.bufsize]) + self.buf = self.buf[self.bufsize:] + + def close(self): + """Close the _Stream object. No operation should be + done on it afterwards. + """ + if self.closed: + return + + self.closed = True + try: + if self.mode == "w" and self.comptype != "tar": + self.buf += self.cmp.flush() + + if self.mode == "w" and self.buf: + self.fileobj.write(self.buf) + self.buf = b"" + if self.comptype == "gz": + self.fileobj.write(struct.pack("= 0: + blocks, remainder = divmod(pos - self.pos, self.bufsize) + for i in range(blocks): + self.read(self.bufsize) + self.read(remainder) + else: + raise StreamError("seeking backwards is not allowed") + return self.pos + + def read(self, size): + """Return the next size number of bytes from the stream.""" + assert size is not None + buf = self._read(size) + self.pos += len(buf) + return buf + + def _read(self, size): + """Return size bytes from the stream. + """ + if self.comptype == "tar": + return self.__read(size) + + c = len(self.dbuf) + t = [self.dbuf] + while c < size: + # Skip underlying buffer to avoid unaligned double buffering. + if self.buf: + buf = self.buf + self.buf = b"" + else: + buf = self.fileobj.read(self.bufsize) + if not buf: + break + try: + buf = self.cmp.decompress(buf) + except self.exception as e: + raise ReadError("invalid compressed data") from e + t.append(buf) + c += len(buf) + t = b"".join(t) + self.dbuf = t[size:] + return t[:size] + + def __read(self, size): + """Return size bytes from stream. If internal buffer is empty, + read another block from the stream. + """ + c = len(self.buf) + t = [self.buf] + while c < size: + buf = self.fileobj.read(self.bufsize) + if not buf: + break + t.append(buf) + c += len(buf) + t = b"".join(t) + self.buf = t[size:] + return t[:size] +# class _Stream + +class _StreamProxy(object): + """Small proxy class that enables transparent compression + detection for the Stream interface (mode 'r|*'). + """ + + def __init__(self, fileobj): + self.fileobj = fileobj + self.buf = self.fileobj.read(BLOCKSIZE) + + def read(self, size): + self.read = self.fileobj.read + return self.buf + + def getcomptype(self): + if self.buf.startswith(b"\x1f\x8b\x08"): + return "gz" + elif self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY": + return "bz2" + elif self.buf.startswith((b"\x5d\x00\x00\x80", b"\xfd7zXZ")): + return "xz" + elif self.buf.startswith(b"\x28\xb5\x2f\xfd"): + return "zst" + else: + return "tar" + + def close(self): + self.fileobj.close() +# class StreamProxy + +#------------------------ +# Extraction file object +#------------------------ +class _FileInFile(object): + """A thin wrapper around an existing file object that + provides a part of its data as an individual file + object. + """ + + def __init__(self, fileobj, offset, size, name, blockinfo=None): + self.fileobj = fileobj + self.offset = offset + self.size = size + self.position = 0 + self.name = name + self.closed = False + + if blockinfo is None: + blockinfo = [(0, size)] + + # Construct a map with data and zero blocks. + self.map_index = 0 + self.map = [] + lastpos = 0 + realpos = self.offset + for offset, size in blockinfo: + if offset > lastpos: + self.map.append((False, lastpos, offset, None)) + self.map.append((True, offset, offset + size, realpos)) + realpos += size + lastpos = offset + size + if lastpos < self.size: + self.map.append((False, lastpos, self.size, None)) + + def flush(self): + pass + + @property + def mode(self): + return 'rb' + + def readable(self): + return True + + def writable(self): + return False + + def seekable(self): + return self.fileobj.seekable() + + def tell(self): + """Return the current file position. + """ + return self.position + + def seek(self, position, whence=io.SEEK_SET): + """Seek to a position in the file. + """ + if whence == io.SEEK_SET: + self.position = min(max(position, 0), self.size) + elif whence == io.SEEK_CUR: + if position < 0: + self.position = max(self.position + position, 0) + else: + self.position = min(self.position + position, self.size) + elif whence == io.SEEK_END: + self.position = max(min(self.size + position, self.size), 0) + else: + raise ValueError("Invalid argument") + return self.position + + def read(self, size=None): + """Read data from the file. + """ + if size is None: + size = self.size - self.position + else: + size = min(size, self.size - self.position) + + buf = b"" + while size > 0: + while True: + data, start, stop, offset = self.map[self.map_index] + if start <= self.position < stop: + break + else: + self.map_index += 1 + if self.map_index == len(self.map): + self.map_index = 0 + length = min(size, stop - self.position) + if data: + self.fileobj.seek(offset + (self.position - start)) + b = self.fileobj.read(length) + if len(b) != length: + raise ReadError("unexpected end of data") + buf += b + else: + buf += NUL * length + size -= length + self.position += length + return buf + + def readinto(self, b): + buf = self.read(len(b)) + b[:len(buf)] = buf + return len(buf) + + def close(self): + self.closed = True +#class _FileInFile + +class ExFileObject(io.BufferedReader): + + def __init__(self, tarfile, tarinfo): + fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data, + tarinfo.size, tarinfo.name, tarinfo.sparse) + super().__init__(fileobj) +#class ExFileObject + + +#----------------------------- +# extraction filters (PEP 706) +#----------------------------- + +class FilterError(TarError): + pass + +class AbsolutePathError(FilterError): + def __init__(self, tarinfo): + self.tarinfo = tarinfo + super().__init__(f'member {tarinfo.name!r} has an absolute path') + +class OutsideDestinationError(FilterError): + def __init__(self, tarinfo, path): + self.tarinfo = tarinfo + self._path = path + super().__init__(f'{tarinfo.name!r} would be extracted to {path!r}, ' + + 'which is outside the destination') + +class SpecialFileError(FilterError): + def __init__(self, tarinfo): + self.tarinfo = tarinfo + super().__init__(f'{tarinfo.name!r} is a special file') + +class AbsoluteLinkError(FilterError): + def __init__(self, tarinfo): + self.tarinfo = tarinfo + super().__init__(f'{tarinfo.name!r} is a link to an absolute path') + +class LinkOutsideDestinationError(FilterError): + def __init__(self, tarinfo, path): + self.tarinfo = tarinfo + self._path = path + super().__init__(f'{tarinfo.name!r} would link to {path!r}, ' + + 'which is outside the destination') + +class LinkFallbackError(FilterError): + def __init__(self, tarinfo, path): + self.tarinfo = tarinfo + self._path = path + super().__init__(f'link {tarinfo.name!r} would be extracted as a ' + + f'copy of {path!r}, which was rejected') + +# Errors caused by filters -- both "fatal" and "non-fatal" -- that +# we consider to be issues with the argument, rather than a bug in the +# filter function +_FILTER_ERRORS = (FilterError, OSError, ExtractError) + +def _get_filtered_attrs(member, dest_path, for_data=True): + new_attrs = {} + name = member.name + dest_path = os.path.realpath(dest_path, strict=os.path.ALLOW_MISSING) + # Strip leading / (tar's directory separator) from filenames. + # Include os.sep (target OS directory separator) as well. + if name.startswith(('/', os.sep)): + name = new_attrs['name'] = member.path.lstrip('/' + os.sep) + if os.path.isabs(name): + # Path is absolute even after stripping. + # For example, 'C:/foo' on Windows. + raise AbsolutePathError(member) + # Ensure we stay in the destination + target_path = os.path.realpath(os.path.join(dest_path, name), + strict=os.path.ALLOW_MISSING) + if os.path.commonpath([target_path, dest_path]) != dest_path: + raise OutsideDestinationError(member, target_path) + # Limit permissions (no high bits, and go-w) + mode = member.mode + if mode is not None: + # Strip high bits & group/other write bits + mode = mode & 0o755 + if for_data: + # For data, handle permissions & file types + if member.isreg() or member.islnk(): + if not mode & 0o100: + # Clear executable bits if not executable by user + mode &= ~0o111 + # Ensure owner can read & write + mode |= 0o600 + elif member.isdir() or member.issym(): + # Ignore mode for directories & symlinks + mode = None + else: + # Reject special files + raise SpecialFileError(member) + if mode != member.mode: + new_attrs['mode'] = mode + if for_data: + # Ignore ownership for 'data' + if member.uid is not None: + new_attrs['uid'] = None + if member.gid is not None: + new_attrs['gid'] = None + if member.uname is not None: + new_attrs['uname'] = None + if member.gname is not None: + new_attrs['gname'] = None + # Check link destination for 'data' + if member.islnk() or member.issym(): + if os.path.isabs(member.linkname): + raise AbsoluteLinkError(member) + normalized = os.path.normpath(member.linkname) + if normalized != member.linkname: + new_attrs['linkname'] = normalized + if member.issym(): + target_path = os.path.join(dest_path, + os.path.dirname(name), + member.linkname) + else: + target_path = os.path.join(dest_path, + member.linkname) + target_path = os.path.realpath(target_path, + strict=os.path.ALLOW_MISSING) + if os.path.commonpath([target_path, dest_path]) != dest_path: + raise LinkOutsideDestinationError(member, target_path) + return new_attrs + +def fully_trusted_filter(member, dest_path): + return member + +def tar_filter(member, dest_path): + new_attrs = _get_filtered_attrs(member, dest_path, False) + if new_attrs: + return member.replace(**new_attrs, deep=False) + return member + +def data_filter(member, dest_path): + new_attrs = _get_filtered_attrs(member, dest_path, True) + if new_attrs: + return member.replace(**new_attrs, deep=False) + return member + +_NAMED_FILTERS = { + "fully_trusted": fully_trusted_filter, + "tar": tar_filter, + "data": data_filter, +} + +#------------------ +# Exported Classes +#------------------ + +# Sentinel for replace() defaults, meaning "don't change the attribute" +_KEEP = object() + +# Header length is digits followed by a space. +_header_length_prefix_re = re.compile(br"([0-9]{1,20}) ") + +class TarInfo(object): + """Informational class which holds the details about an + archive member given by a tar header block. + TarInfo objects are returned by TarFile.getmember(), + TarFile.getmembers() and TarFile.gettarinfo() and are + usually created internally. + """ + + __slots__ = dict( + name = 'Name of the archive member.', + mode = 'Permission bits.', + uid = 'User ID of the user who originally stored this member.', + gid = 'Group ID of the user who originally stored this member.', + size = 'Size in bytes.', + mtime = 'Time of last modification.', + chksum = 'Header checksum.', + type = ('File type. type is usually one of these constants: ' + 'REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, ' + 'CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_SPARSE.'), + linkname = ('Name of the target file name, which is only present ' + 'in TarInfo objects of type LNKTYPE and SYMTYPE.'), + uname = 'User name.', + gname = 'Group name.', + devmajor = 'Device major number.', + devminor = 'Device minor number.', + offset = 'The tar header starts here.', + offset_data = "The file's data starts here.", + pax_headers = ('A dictionary containing key-value pairs of an ' + 'associated pax extended header.'), + sparse = 'Sparse member information.', + _tarfile = None, + _sparse_structs = None, + _link_target = None, + ) + + def __init__(self, name=""): + """Construct a TarInfo object. name is the optional name + of the member. + """ + self.name = name # member name + self.mode = 0o644 # file permissions + self.uid = 0 # user id + self.gid = 0 # group id + self.size = 0 # file size + self.mtime = 0 # modification time + self.chksum = 0 # header checksum + self.type = REGTYPE # member type + self.linkname = "" # link name + self.uname = "" # user name + self.gname = "" # group name + self.devmajor = 0 # device major number + self.devminor = 0 # device minor number + + self.offset = 0 # the tar header starts here + self.offset_data = 0 # the file's data starts here + + self.sparse = None # sparse member information + self.pax_headers = {} # pax header information + + @property + def tarfile(self): + import warnings + warnings.warn( + 'The undocumented "tarfile" attribute of TarInfo objects ' + + 'is deprecated and will be removed in Python 3.16', + DeprecationWarning, stacklevel=2) + return self._tarfile + + @tarfile.setter + def tarfile(self, tarfile): + import warnings + warnings.warn( + 'The undocumented "tarfile" attribute of TarInfo objects ' + + 'is deprecated and will be removed in Python 3.16', + DeprecationWarning, stacklevel=2) + self._tarfile = tarfile + + @property + def path(self): + 'In pax headers, "name" is called "path".' + return self.name + + @path.setter + def path(self, name): + self.name = name + + @property + def linkpath(self): + 'In pax headers, "linkname" is called "linkpath".' + return self.linkname + + @linkpath.setter + def linkpath(self, linkname): + self.linkname = linkname + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) + + def replace(self, *, + name=_KEEP, mtime=_KEEP, mode=_KEEP, linkname=_KEEP, + uid=_KEEP, gid=_KEEP, uname=_KEEP, gname=_KEEP, + deep=True, _KEEP=_KEEP): + """Return a deep copy of self with the given attributes replaced. + """ + if deep: + result = copy.deepcopy(self) + else: + result = copy.copy(self) + if name is not _KEEP: + result.name = name + if mtime is not _KEEP: + result.mtime = mtime + if mode is not _KEEP: + result.mode = mode + if linkname is not _KEEP: + result.linkname = linkname + if uid is not _KEEP: + result.uid = uid + if gid is not _KEEP: + result.gid = gid + if uname is not _KEEP: + result.uname = uname + if gname is not _KEEP: + result.gname = gname + return result + + def get_info(self): + """Return the TarInfo's attributes as a dictionary. + """ + if self.mode is None: + mode = None + else: + mode = self.mode & 0o7777 + info = { + "name": self.name, + "mode": mode, + "uid": self.uid, + "gid": self.gid, + "size": self.size, + "mtime": self.mtime, + "chksum": self.chksum, + "type": self.type, + "linkname": self.linkname, + "uname": self.uname, + "gname": self.gname, + "devmajor": self.devmajor, + "devminor": self.devminor + } + + if info["type"] == DIRTYPE and not info["name"].endswith("/"): + info["name"] += "/" + + return info + + def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): + """Return a tar header as a string of 512 byte blocks. + """ + info = self.get_info() + for name, value in info.items(): + if value is None: + raise ValueError("%s may not be None" % name) + + if format == USTAR_FORMAT: + return self.create_ustar_header(info, encoding, errors) + elif format == GNU_FORMAT: + return self.create_gnu_header(info, encoding, errors) + elif format == PAX_FORMAT: + return self.create_pax_header(info, encoding) + else: + raise ValueError("invalid format") + + def create_ustar_header(self, info, encoding, errors): + """Return the object as a ustar header block. + """ + info["magic"] = POSIX_MAGIC + + if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK: + raise ValueError("linkname is too long") + + if len(info["name"].encode(encoding, errors)) > LENGTH_NAME: + info["prefix"], info["name"] = self._posix_split_name(info["name"], encoding, errors) + + return self._create_header(info, USTAR_FORMAT, encoding, errors) + + def create_gnu_header(self, info, encoding, errors): + """Return the object as a GNU header block sequence. + """ + info["magic"] = GNU_MAGIC + + buf = b"" + if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK: + buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) + + if len(info["name"].encode(encoding, errors)) > LENGTH_NAME: + buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) + + return buf + self._create_header(info, GNU_FORMAT, encoding, errors) + + def create_pax_header(self, info, encoding): + """Return the object as a ustar header block. If it cannot be + represented this way, prepend a pax extended header sequence + with supplement information. + """ + info["magic"] = POSIX_MAGIC + pax_headers = self.pax_headers.copy() + + # Test string fields for values that exceed the field length or cannot + # be represented in ASCII encoding. + for name, hname, length in ( + ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), + ("uname", "uname", 32), ("gname", "gname", 32)): + + if hname in pax_headers: + # The pax header has priority. + continue + + # Try to encode the string as ASCII. + try: + info[name].encode("ascii", "strict") + except UnicodeEncodeError: + pax_headers[hname] = info[name] + continue + + if len(info[name]) > length: + pax_headers[hname] = info[name] + + # Test number fields for values that exceed the field limit or values + # that like to be stored as float. + for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): + needs_pax = False + + val = info[name] + val_is_float = isinstance(val, float) + val_int = round(val) if val_is_float else val + if not 0 <= val_int < 8 ** (digits - 1): + # Avoid overflow. + info[name] = 0 + needs_pax = True + elif val_is_float: + # Put rounded value in ustar header, and full + # precision value in pax header. + info[name] = val_int + needs_pax = True + + # The existing pax header has priority. + if needs_pax and name not in pax_headers: + pax_headers[name] = str(val) + + # Create a pax extended header if necessary. + if pax_headers: + buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) + else: + buf = b"" + + return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") + + @classmethod + def create_pax_global_header(cls, pax_headers): + """Return the object as a pax global header block sequence. + """ + return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf-8") + + def _posix_split_name(self, name, encoding, errors): + """Split a name longer than 100 chars into a prefix + and a name part. + """ + components = name.split("/") + for i in range(1, len(components)): + prefix = "/".join(components[:i]) + name = "/".join(components[i:]) + if len(prefix.encode(encoding, errors)) <= LENGTH_PREFIX and \ + len(name.encode(encoding, errors)) <= LENGTH_NAME: + break + else: + raise ValueError("name is too long") + + return prefix, name + + @staticmethod + def _create_header(info, format, encoding, errors): + """Return a header block. info is a dictionary with file + information, format must be one of the *_FORMAT constants. + """ + has_device_fields = info.get("type") in (CHRTYPE, BLKTYPE) + if has_device_fields: + devmajor = itn(info.get("devmajor", 0), 8, format) + devminor = itn(info.get("devminor", 0), 8, format) + else: + devmajor = stn("", 8, encoding, errors) + devminor = stn("", 8, encoding, errors) + + # None values in metadata should cause ValueError. + # itn()/stn() do this for all fields except type. + filetype = info.get("type", REGTYPE) + if filetype is None: + raise ValueError("TarInfo.type must not be None") + + parts = [ + stn(info.get("name", ""), 100, encoding, errors), + itn(info.get("mode", 0) & 0o7777, 8, format), + itn(info.get("uid", 0), 8, format), + itn(info.get("gid", 0), 8, format), + itn(info.get("size", 0), 12, format), + itn(info.get("mtime", 0), 12, format), + b" ", # checksum field + filetype, + stn(info.get("linkname", ""), 100, encoding, errors), + info.get("magic", POSIX_MAGIC), + stn(info.get("uname", ""), 32, encoding, errors), + stn(info.get("gname", ""), 32, encoding, errors), + devmajor, + devminor, + stn(info.get("prefix", ""), 155, encoding, errors) + ] + + buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) + chksum = calc_chksums(buf[-BLOCKSIZE:])[0] + buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:] + return buf + + @staticmethod + def _create_payload(payload): + """Return the string payload filled with zero bytes + up to the next 512 byte border. + """ + blocks, remainder = divmod(len(payload), BLOCKSIZE) + if remainder > 0: + payload += (BLOCKSIZE - remainder) * NUL + return payload + + @classmethod + def _create_gnu_long_header(cls, name, type, encoding, errors): + """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence + for name. + """ + name = name.encode(encoding, errors) + NUL + + info = {} + info["name"] = "././@LongLink" + info["type"] = type + info["size"] = len(name) + info["magic"] = GNU_MAGIC + + # create extended header + name blocks. + return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ + cls._create_payload(name) + + @classmethod + def _create_pax_generic_header(cls, pax_headers, type, encoding): + """Return a POSIX.1-2008 extended or global header sequence + that contains a list of keyword, value pairs. The values + must be strings. + """ + # Check if one of the fields contains surrogate characters and thereby + # forces hdrcharset=BINARY, see _proc_pax() for more information. + binary = False + for keyword, value in pax_headers.items(): + try: + value.encode("utf-8", "strict") + except UnicodeEncodeError: + binary = True + break + + records = b"" + if binary: + # Put the hdrcharset field at the beginning of the header. + records += b"21 hdrcharset=BINARY\n" + + for keyword, value in pax_headers.items(): + keyword = keyword.encode("utf-8") + if binary: + # Try to restore the original byte representation of 'value'. + # Needless to say, that the encoding must match the string. + value = value.encode(encoding, "surrogateescape") + else: + value = value.encode("utf-8") + + l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' + n = p = 0 + while True: + n = l + len(str(p)) + if n == p: + break + p = n + records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" + + # We use a hardcoded "././@PaxHeader" name like star does + # instead of the one that POSIX recommends. + info = {} + info["name"] = "././@PaxHeader" + info["type"] = type + info["size"] = len(records) + info["magic"] = POSIX_MAGIC + + # Create pax header + record blocks. + return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ + cls._create_payload(records) + + @classmethod + def frombuf(cls, buf, encoding, errors): + """Construct a TarInfo object from a 512 byte bytes object. + + To support the old v7 tar format AREGTYPE headers are + transformed to DIRTYPE headers if their name ends in '/'. + """ + return cls._frombuf(buf, encoding, errors) + + @classmethod + def _frombuf(cls, buf, encoding, errors, *, dircheck=True): + """Construct a TarInfo object from a 512 byte bytes object. + + If ``dircheck`` is set to ``True`` then ``AREGTYPE`` headers will + be normalized to ``DIRTYPE`` if the name ends in a trailing slash. + ``dircheck`` must be set to ``False`` if this function is called + on a follow-up header such as ``GNUTYPE_LONGNAME``. + """ + if len(buf) == 0: + raise EmptyHeaderError("empty header") + if len(buf) != BLOCKSIZE: + raise TruncatedHeaderError("truncated header") + if buf.count(NUL) == BLOCKSIZE: + raise EOFHeaderError("end of file header") + + chksum = nti(buf[148:156]) + if chksum not in calc_chksums(buf): + raise InvalidHeaderError("bad checksum") + + obj = cls() + obj.name = nts(buf[0:100], encoding, errors) + obj.mode = nti(buf[100:108]) + obj.uid = nti(buf[108:116]) + obj.gid = nti(buf[116:124]) + obj.size = nti(buf[124:136]) + obj.mtime = nti(buf[136:148]) + obj.chksum = chksum + obj.type = buf[156:157] + obj.linkname = nts(buf[157:257], encoding, errors) + obj.uname = nts(buf[265:297], encoding, errors) + obj.gname = nts(buf[297:329], encoding, errors) + obj.devmajor = nti(buf[329:337]) + obj.devminor = nti(buf[337:345]) + prefix = nts(buf[345:500], encoding, errors) + + # Old V7 tar format represents a directory as a regular + # file with a trailing slash. + if dircheck and obj.type == AREGTYPE and obj.name.endswith("/"): + obj.type = DIRTYPE + + # The old GNU sparse format occupies some of the unused + # space in the buffer for up to 4 sparse structures. + # Save them for later processing in _proc_sparse(). + if obj.type == GNUTYPE_SPARSE: + pos = 386 + structs = [] + for i in range(4): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[482]) + origsize = nti(buf[483:495]) + obj._sparse_structs = (structs, isextended, origsize) + + # Remove redundant slashes from directories. + if obj.isdir(): + obj.name = obj.name.rstrip("/") + + # Reconstruct a ustar longname. + if prefix and obj.type not in GNU_TYPES: + obj.name = prefix + "/" + obj.name + return obj + + @classmethod + def fromtarfile(cls, tarfile): + """Return the next TarInfo object from TarFile object + tarfile. + """ + return cls._fromtarfile(tarfile) + + @classmethod + def _fromtarfile(cls, tarfile, *, dircheck=True): + """ + See dircheck documentation in _frombuf(). + """ + buf = tarfile.fileobj.read(BLOCKSIZE) + obj = cls._frombuf(buf, tarfile.encoding, tarfile.errors, dircheck=dircheck) + obj.offset = tarfile.fileobj.tell() - BLOCKSIZE + return obj._proc_member(tarfile) + + #-------------------------------------------------------------------------- + # The following are methods that are called depending on the type of a + # member. The entry point is _proc_member() which can be overridden in a + # subclass to add custom _proc_*() methods. A _proc_*() method MUST + # implement the following + # operations: + # 1. Set self.offset_data to the position where the data blocks begin, + # if there is data that follows. + # 2. Set tarfile.offset to the position where the next member's header will + # begin. + # 3. Return self or another valid TarInfo object. + def _proc_member(self, tarfile): + """Choose the right processing method depending on + the type and call it. + """ + if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): + return self._proc_gnulong(tarfile) + elif self.type == GNUTYPE_SPARSE: + return self._proc_sparse(tarfile) + elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): + return self._proc_pax(tarfile) + else: + return self._proc_builtin(tarfile) + + def _proc_builtin(self, tarfile): + """Process a builtin type or an unknown type which + will be treated as a regular file. + """ + self.offset_data = tarfile.fileobj.tell() + offset = self.offset_data + if self.isreg() or self.type not in SUPPORTED_TYPES: + # Skip the following data blocks. + offset += self._block(self.size) + tarfile.offset = offset + + # Patch the TarInfo object with saved global + # header information. + self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) + + # Remove redundant slashes from directories. This is to be consistent + # with frombuf(). + if self.isdir(): + self.name = self.name.rstrip("/") + + return self + + def _proc_gnulong(self, tarfile): + """Process the blocks that hold a GNU longname + or longlink member. + """ + buf = tarfile.fileobj.read(self._block(self.size)) + + # Fetch the next header and process it. + try: + next = self._fromtarfile(tarfile, dircheck=False) + except HeaderError as e: + raise SubsequentHeaderError(str(e)) from None + + # Patch the TarInfo object from the next header with + # the longname information. + next.offset = self.offset + if self.type == GNUTYPE_LONGNAME: + next.name = nts(buf, tarfile.encoding, tarfile.errors) + elif self.type == GNUTYPE_LONGLINK: + next.linkname = nts(buf, tarfile.encoding, tarfile.errors) + + # Remove redundant slashes from directories. This is to be consistent + # with frombuf(). + if next.isdir(): + next.name = next.name.removesuffix("/") + + return next + + def _proc_sparse(self, tarfile): + """Process a GNU sparse header plus extra headers. + """ + # We already collected some sparse structures in frombuf(). + structs, isextended, origsize = self._sparse_structs + del self._sparse_structs + + # Collect sparse structures from extended header blocks. + while isextended: + buf = tarfile.fileobj.read(BLOCKSIZE) + pos = 0 + for i in range(21): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + if offset and numbytes: + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[504]) + self.sparse = structs + + self.offset_data = tarfile.fileobj.tell() + tarfile.offset = self.offset_data + self._block(self.size) + self.size = origsize + return self + + def _proc_pax(self, tarfile): + """Process an extended or global header as described in + POSIX.1-2008. + """ + # Read the header information. + buf = tarfile.fileobj.read(self._block(self.size)) + + # A pax header stores supplemental information for either + # the following file (extended) or all following files + # (global). + if self.type == XGLTYPE: + pax_headers = tarfile.pax_headers + else: + pax_headers = tarfile.pax_headers.copy() + + # Parse pax header information. A record looks like that: + # "%d %s=%s\n" % (length, keyword, value). length is the size + # of the complete record including the length field itself and + # the newline. + pos = 0 + encoding = None + raw_headers = [] + while len(buf) > pos and buf[pos] != 0x00: + if not (match := _header_length_prefix_re.match(buf, pos)): + raise InvalidHeaderError("invalid header") + try: + length = int(match.group(1)) + except ValueError: + raise InvalidHeaderError("invalid header") + # Headers must be at least 5 bytes, shortest being '5 x=\n'. + # Value is allowed to be empty. + if length < 5: + raise InvalidHeaderError("invalid header") + if pos + length > len(buf): + raise InvalidHeaderError("invalid header") + + header_value_end_offset = match.start(1) + length - 1 # Last byte of the header + keyword_and_value = buf[match.end(1) + 1:header_value_end_offset] + raw_keyword, equals, raw_value = keyword_and_value.partition(b"=") + + # Check the framing of the header. The last character must be '\n' (0x0A) + if not raw_keyword or equals != b"=" or buf[header_value_end_offset] != 0x0A: + raise InvalidHeaderError("invalid header") + raw_headers.append((length, raw_keyword, raw_value)) + + # Check if the pax header contains a hdrcharset field. This tells us + # the encoding of the path, linkpath, uname and gname fields. Normally, + # these fields are UTF-8 encoded but since POSIX.1-2008 tar + # implementations are allowed to store them as raw binary strings if + # the translation to UTF-8 fails. For the time being, we don't care about + # anything other than "BINARY". The only other value that is currently + # allowed by the standard is "ISO-IR 10646 2000 UTF-8" in other words UTF-8. + # Note that we only follow the initial 'hdrcharset' setting to preserve + # the initial behavior of the 'tarfile' module. + if raw_keyword == b"hdrcharset" and encoding is None: + if raw_value == b"BINARY": + encoding = tarfile.encoding + else: # This branch ensures only the first 'hdrcharset' header is used. + encoding = "utf-8" + + pos += length + + # If no explicit hdrcharset is set, we use UTF-8 as a default. + if encoding is None: + encoding = "utf-8" + + # After parsing the raw headers we can decode them to text. + for length, raw_keyword, raw_value in raw_headers: + # Normally, we could just use "utf-8" as the encoding and "strict" + # as the error handler, but we better not take the risk. For + # example, GNU tar <= 1.23 is known to store filenames it cannot + # translate to UTF-8 as raw strings (unfortunately without a + # hdrcharset=BINARY header). + # We first try the strict standard encoding, and if that fails we + # fall back on the user's encoding and error handler. + keyword = self._decode_pax_field(raw_keyword, "utf-8", "utf-8", + tarfile.errors) + if keyword in PAX_NAME_FIELDS: + value = self._decode_pax_field(raw_value, encoding, tarfile.encoding, + tarfile.errors) + else: + value = self._decode_pax_field(raw_value, "utf-8", "utf-8", + tarfile.errors) + + pax_headers[keyword] = value + + # Fetch the next header. + try: + next = self._fromtarfile(tarfile, dircheck=False) + except HeaderError as e: + raise SubsequentHeaderError(str(e)) from None + + # Process GNU sparse information. + if "GNU.sparse.map" in pax_headers: + # GNU extended sparse format version 0.1. + self._proc_gnusparse_01(next, pax_headers) + + elif "GNU.sparse.size" in pax_headers: + # GNU extended sparse format version 0.0. + self._proc_gnusparse_00(next, raw_headers) + + elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": + # GNU extended sparse format version 1.0. + self._proc_gnusparse_10(next, pax_headers, tarfile) + + if self.type in (XHDTYPE, SOLARIS_XHDTYPE): + # Patch the TarInfo object with the extended header info. + next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) + next.offset = self.offset + + if "size" in pax_headers: + # If the extended header replaces the size field, + # we need to recalculate the offset where the next + # header starts. + offset = next.offset_data + if next.isreg() or next.type not in SUPPORTED_TYPES: + offset += next._block(next.size) + tarfile.offset = offset + + return next + + def _proc_gnusparse_00(self, next, raw_headers): + """Process a GNU tar extended sparse header, version 0.0. + """ + offsets = [] + numbytes = [] + for _, keyword, value in raw_headers: + if keyword == b"GNU.sparse.offset": + try: + offsets.append(int(value.decode())) + except ValueError: + raise InvalidHeaderError("invalid header") + + elif keyword == b"GNU.sparse.numbytes": + try: + numbytes.append(int(value.decode())) + except ValueError: + raise InvalidHeaderError("invalid header") + + next.sparse = list(zip(offsets, numbytes)) + + def _proc_gnusparse_01(self, next, pax_headers): + """Process a GNU tar extended sparse header, version 0.1. + """ + sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _proc_gnusparse_10(self, next, pax_headers, tarfile): + """Process a GNU tar extended sparse header, version 1.0. + """ + fields = None + sparse = [] + buf = tarfile.fileobj.read(BLOCKSIZE) + fields, buf = buf.split(b"\n", 1) + fields = int(fields) + while len(sparse) < fields * 2: + if b"\n" not in buf: + buf += tarfile.fileobj.read(BLOCKSIZE) + number, buf = buf.split(b"\n", 1) + sparse.append(int(number)) + next.offset_data = tarfile.fileobj.tell() + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _apply_pax_info(self, pax_headers, encoding, errors): + """Replace fields with supplemental information from a previous + pax extended or global header. + """ + for keyword, value in pax_headers.items(): + if keyword == "GNU.sparse.name": + setattr(self, "path", value) + elif keyword == "GNU.sparse.size": + setattr(self, "size", int(value)) + elif keyword == "GNU.sparse.realsize": + setattr(self, "size", int(value)) + elif keyword in PAX_FIELDS: + if keyword in PAX_NUMBER_FIELDS: + try: + value = PAX_NUMBER_FIELDS[keyword](value) + except ValueError: + value = 0 + if keyword == "path": + value = value.rstrip("/") + setattr(self, keyword, value) + + self.pax_headers = pax_headers.copy() + + def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): + """Decode a single field from a pax record. + """ + try: + return value.decode(encoding, "strict") + except UnicodeDecodeError: + return value.decode(fallback_encoding, fallback_errors) + + def _block(self, count): + """Round up a byte count by BLOCKSIZE and return it, + e.g. _block(834) => 1024. + """ + # Only non-negative offsets are allowed + if count < 0: + raise InvalidHeaderError("invalid offset") + blocks, remainder = divmod(count, BLOCKSIZE) + if remainder: + blocks += 1 + return blocks * BLOCKSIZE + + def isreg(self): + 'Return True if the Tarinfo object is a regular file.' + return self.type in REGULAR_TYPES + + def isfile(self): + 'Return True if the Tarinfo object is a regular file.' + return self.isreg() + + def isdir(self): + 'Return True if it is a directory.' + return self.type == DIRTYPE + + def issym(self): + 'Return True if it is a symbolic link.' + return self.type == SYMTYPE + + def islnk(self): + 'Return True if it is a hard link.' + return self.type == LNKTYPE + + def ischr(self): + 'Return True if it is a character device.' + return self.type == CHRTYPE + + def isblk(self): + 'Return True if it is a block device.' + return self.type == BLKTYPE + + def isfifo(self): + 'Return True if it is a FIFO.' + return self.type == FIFOTYPE + + def issparse(self): + return self.sparse is not None + + def isdev(self): + 'Return True if it is one of character device, block device or FIFO.' + return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) +# class TarInfo + +class TarFile(object): + """The TarFile Class provides an interface to tar archives. + """ + + debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) + + dereference = False # If true, add content of linked file to the + # tar file, else the link. + + ignore_zeros = False # If true, skips empty or invalid blocks and + # continues processing. + + errorlevel = 1 # If 0, fatal errors only appear in debug + # messages (if debug >= 0). If > 0, errors + # are passed to the caller as exceptions. + + format = DEFAULT_FORMAT # The format to use when creating an archive. + + encoding = ENCODING # Encoding for 8-bit character strings. + + errors = None # Error handler for unicode conversion. + + tarinfo = TarInfo # The default TarInfo class to use. + + fileobject = ExFileObject # The file-object for extractfile(). + + extraction_filter = None # The default filter for extraction. + + def __init__(self, name=None, mode="r", fileobj=None, format=None, + tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, + errors="surrogateescape", pax_headers=None, debug=None, + errorlevel=None, copybufsize=None, stream=False): + """Open an (uncompressed) tar archive 'name'. 'mode' is either 'r' to + read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. 'mode' + defaults to 'r'. + If 'fileobj' is given, it is used for reading or writing data. If it + can be determined, 'mode' is overridden by 'fileobj's mode. + 'fileobj' is not closed, when TarFile is closed. + """ + modes = {"r": "rb", "a": "r+b", "w": "wb", "x": "xb"} + if mode not in modes: + raise ValueError("mode must be 'r', 'a', 'w' or 'x'") + self.mode = mode + self._mode = modes[mode] + + if not fileobj: + if self.mode == "a" and not os.path.exists(name): + # Create nonexistent files in append mode. + self.mode = "w" + self._mode = "wb" + fileobj = bltn_open(name, self._mode) + self._extfileobj = False + else: + if (name is None and hasattr(fileobj, "name") and + isinstance(fileobj.name, (str, bytes))): + name = fileobj.name + if hasattr(fileobj, "mode"): + self._mode = fileobj.mode + self._extfileobj = True + self.name = os.path.abspath(name) if name else None + self.fileobj = fileobj + + self.stream = stream + + # Init attributes. + if format is not None: + self.format = format + if tarinfo is not None: + self.tarinfo = tarinfo + if dereference is not None: + self.dereference = dereference + if ignore_zeros is not None: + self.ignore_zeros = ignore_zeros + if encoding is not None: + self.encoding = encoding + self.errors = errors + + if pax_headers is not None and self.format == PAX_FORMAT: + self.pax_headers = pax_headers + else: + self.pax_headers = {} + + if debug is not None: + self.debug = debug + if errorlevel is not None: + self.errorlevel = errorlevel + + # Init datastructures. + self.copybufsize = copybufsize + self.closed = False + self.members = [] # list of members as TarInfo objects + self._loaded = False # flag if all members have been read + self.offset = self.fileobj.tell() + # current position in the archive file + self.inodes = {} # dictionary caching the inodes of + # archive members already added + self._unames = {} # Cached mappings of uid -> uname + self._gnames = {} # Cached mappings of gid -> gname + + try: + if self.mode == "r": + self.firstmember = None + self.firstmember = self.next() + + if self.mode == "a": + # Move to the end of the archive, + # before the first empty block. + while True: + self.fileobj.seek(self.offset) + try: + tarinfo = self.tarinfo.fromtarfile(self) + self.members.append(tarinfo) + except EOFHeaderError: + self.fileobj.seek(self.offset) + break + except HeaderError as e: + raise ReadError(str(e)) from None + + if self.mode in ("a", "w", "x"): + self._loaded = True + + if self.pax_headers: + buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) + self.fileobj.write(buf) + self.offset += len(buf) + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + #-------------------------------------------------------------------------- + # Below are the classmethods which act as alternate constructors to the + # TarFile class. The open() method is the only one that is needed for + # public use; it is the "super"-constructor and is able to select an + # adequate "sub"-constructor for a particular compression using the mapping + # from OPEN_METH. + # + # This concept allows one to subclass TarFile without losing the comfort of + # the super-constructor. A sub-constructor is registered and made available + # by adding it to the mapping in OPEN_METH. + + @classmethod + def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): + """Open a tar archive for reading, writing or appending. Return + an appropriate TarFile class. + + mode: + 'r' or 'r:*' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'r:xz' open for reading with lzma compression + 'r:zst' open for reading with zstd compression + 'a' or 'a:' open for appending, creating the file if necessary + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression + 'w:xz' open for writing with lzma compression + 'w:zst' open for writing with zstd compression + + 'x' or 'x:' create a tarfile exclusively without compression, raise + an exception if the file is already created + 'x:gz' create a gzip compressed tarfile, raise an exception + if the file is already created + 'x:bz2' create a bzip2 compressed tarfile, raise an exception + if the file is already created + 'x:xz' create an lzma compressed tarfile, raise an exception + if the file is already created + 'x:zst' create a zstd compressed tarfile, raise an exception + if the file is already created + + 'r|*' open a stream of tar blocks with transparent compression + 'r|' open an uncompressed stream of tar blocks for reading + 'r|gz' open a gzip compressed stream of tar blocks + 'r|bz2' open a bzip2 compressed stream of tar blocks + 'r|xz' open an lzma compressed stream of tar blocks + 'r|zst' open a zstd compressed stream of tar blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing + 'w|xz' open an lzma compressed stream for writing + 'w|zst' open a zstd compressed stream for writing + """ + + if not name and not fileobj: + raise ValueError("nothing to open") + + if mode in ("r", "r:*"): + # Find out which *open() is appropriate for opening the file. + def not_compressed(comptype): + return cls.OPEN_METH[comptype] == 'taropen' + error_msgs = [] + for comptype in sorted(cls.OPEN_METH, key=not_compressed): + func = getattr(cls, cls.OPEN_METH[comptype]) + if fileobj is not None: + saved_pos = fileobj.tell() + try: + return func(name, "r", fileobj, **kwargs) + except (ReadError, CompressionError) as e: + error_msgs.append(f'- method {comptype}: {e!r}') + if fileobj is not None: + fileobj.seek(saved_pos) + continue + error_msgs_summary = '\n'.join(error_msgs) + raise ReadError(f"file could not be opened successfully:\n{error_msgs_summary}") + + elif ":" in mode: + filemode, comptype = mode.split(":", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + # Select the *open() function according to + # given compression. + if comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + else: + raise CompressionError("unknown compression type %r" % comptype) + return func(name, filemode, fileobj, **kwargs) + + elif "|" in mode: + filemode, comptype = mode.split("|", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + if filemode not in ("r", "w"): + raise ValueError("mode must be 'r' or 'w'") + if "compresslevel" in kwargs and comptype not in ("gz", "bz2"): + raise ValueError( + "compresslevel is only valid for w|gz and w|bz2 modes" + ) + if "preset" in kwargs and comptype not in ("xz",): + raise ValueError("preset is only valid for w|xz mode") + + compresslevel = kwargs.pop("compresslevel", 9) + preset = kwargs.pop("preset", None) + stream = _Stream(name, filemode, comptype, fileobj, bufsize, + compresslevel, preset) + try: + t = cls(name, filemode, stream, **kwargs) + except: + stream.close() + raise + t._extfileobj = False + return t + + elif mode in ("a", "w", "x"): + return cls.taropen(name, mode, fileobj, **kwargs) + + raise ValueError("undiscernible mode") + + @classmethod + def taropen(cls, name, mode="r", fileobj=None, **kwargs): + """Open uncompressed tar archive name for reading or writing. + """ + if mode not in ("r", "a", "w", "x"): + raise ValueError("mode must be 'r', 'a', 'w' or 'x'") + return cls(name, mode, fileobj, **kwargs) + + @classmethod + def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open gzip compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if mode not in ("r", "w", "x"): + raise ValueError("mode must be 'r', 'w' or 'x'") + + try: + from gzip import GzipFile + except ImportError: + raise CompressionError("gzip module is not available") from None + + try: + fileobj = GzipFile(name, mode + "b", compresslevel, fileobj) + except OSError as e: + if fileobj is not None and mode == 'r': + raise ReadError("not a gzip file") from e + raise + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except OSError as e: + fileobj.close() + if mode == 'r': + raise ReadError("not a gzip file") from e + raise + except: + fileobj.close() + raise + t._extfileobj = False + return t + + @classmethod + def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open bzip2 compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if mode not in ("r", "w", "x"): + raise ValueError("mode must be 'r', 'w' or 'x'") + + try: + from bz2 import BZ2File + except ImportError: + raise CompressionError("bz2 module is not available") from None + + fileobj = BZ2File(fileobj or name, mode, compresslevel=compresslevel) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (OSError, EOFError) as e: + fileobj.close() + if mode == 'r': + raise ReadError("not a bzip2 file") from e + raise + except: + fileobj.close() + raise + t._extfileobj = False + return t + + @classmethod + def xzopen(cls, name, mode="r", fileobj=None, preset=None, **kwargs): + """Open lzma compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if mode not in ("r", "w", "x"): + raise ValueError("mode must be 'r', 'w' or 'x'") + + try: + from lzma import LZMAFile, LZMAError + except ImportError: + raise CompressionError("lzma module is not available") from None + + fileobj = LZMAFile(fileobj or name, mode, preset=preset) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (LZMAError, EOFError) as e: + fileobj.close() + if mode == 'r': + raise ReadError("not an lzma file") from e + raise + except: + fileobj.close() + raise + t._extfileobj = False + return t + + @classmethod + def zstopen(cls, name, mode="r", fileobj=None, level=None, options=None, + zstd_dict=None, **kwargs): + """Open zstd compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if mode not in ("r", "w", "x"): + raise ValueError("mode must be 'r', 'w' or 'x'") + + try: + from compression.zstd import ZstdFile, ZstdError + except ImportError: + raise CompressionError("compression.zstd module is not available") from None + + fileobj = ZstdFile( + fileobj or name, + mode, + level=level, + options=options, + zstd_dict=zstd_dict + ) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (ZstdError, EOFError) as e: + fileobj.close() + if mode == 'r': + raise ReadError("not a zstd file") from e + raise + except Exception: + fileobj.close() + raise + t._extfileobj = False + return t + + # All *open() methods are registered here. + OPEN_METH = { + "tar": "taropen", # uncompressed tar + "gz": "gzopen", # gzip compressed tar + "bz2": "bz2open", # bzip2 compressed tar + "xz": "xzopen", # lzma compressed tar + "zst": "zstopen", # zstd compressed tar + } + + #-------------------------------------------------------------------------- + # The public methods which TarFile provides: + + def close(self): + """Close the TarFile. In write-mode, two finishing zero blocks are + appended to the archive. + """ + if self.closed: + return + + self.closed = True + try: + if self.mode in ("a", "w", "x"): + self.fileobj.write(NUL * (BLOCKSIZE * 2)) + self.offset += (BLOCKSIZE * 2) + # fill up the end with zero-blocks + # (like option -b20 for tar does) + blocks, remainder = divmod(self.offset, RECORDSIZE) + if remainder > 0: + self.fileobj.write(NUL * (RECORDSIZE - remainder)) + finally: + if not self._extfileobj: + self.fileobj.close() + + def getmember(self, name): + """Return a TarInfo object for member 'name'. If 'name' can not be + found in the archive, KeyError is raised. If a member occurs more + than once in the archive, its last occurrence is assumed to be the + most up-to-date version. + """ + tarinfo = self._getmember(name.rstrip('/')) + if tarinfo is None: + raise KeyError("filename %r not found" % name) + return tarinfo + + def getmembers(self): + """Return the members of the archive as a list of TarInfo objects. The + list has the same order as the members in the archive. + """ + self._check() + if not self._loaded: # if we want to obtain a list of + self._load() # all members, we first have to + # scan the whole archive. + return self.members + + def getnames(self): + """Return the members of the archive as a list of their names. It has + the same order as the list returned by getmembers(). + """ + return [tarinfo.name for tarinfo in self.getmembers()] + + def gettarinfo(self, name=None, arcname=None, fileobj=None): + """Create a TarInfo object from the result of os.stat or equivalent + on an existing file. The file is either named by 'name', or + specified as a file object 'fileobj' with a file descriptor. If + given, 'arcname' specifies an alternative name for the file in the + archive, otherwise, the name is taken from the 'name' attribute of + 'fileobj', or the 'name' argument. The name should be a text + string. + """ + self._check("awx") + + # When fileobj is given, replace name by + # fileobj's real name. + if fileobj is not None: + name = fileobj.name + + # Building the name of the member in the archive. + # Backward slashes are converted to forward slashes, + # Absolute paths are turned to relative paths. + if arcname is None: + arcname = name + drv, arcname = os.path.splitdrive(arcname) + arcname = arcname.replace(os.sep, "/") + arcname = arcname.lstrip("/") + + # Now, fill the TarInfo object with + # information specific for the file. + tarinfo = self.tarinfo() + tarinfo._tarfile = self # To be removed in 3.16. + + # Use os.stat or os.lstat, depending on if symlinks shall be resolved. + if fileobj is None: + if not self.dereference: + statres = os.lstat(name) + else: + statres = os.stat(name) + else: + statres = os.fstat(fileobj.fileno()) + linkname = "" + + stmd = statres.st_mode + if stat.S_ISREG(stmd): + inode = (statres.st_ino, statres.st_dev) + if not self.dereference and statres.st_nlink > 1 and \ + inode in self.inodes and arcname != self.inodes[inode]: + # Is it a hardlink to an already + # archived file? + type = LNKTYPE + linkname = self.inodes[inode] + else: + # The inode is added only if its valid. + # For win32 it is always 0. + type = REGTYPE + if inode[0]: + self.inodes[inode] = arcname + elif stat.S_ISDIR(stmd): + type = DIRTYPE + elif stat.S_ISFIFO(stmd): + type = FIFOTYPE + elif stat.S_ISLNK(stmd): + type = SYMTYPE + linkname = os.readlink(name) + elif stat.S_ISCHR(stmd): + type = CHRTYPE + elif stat.S_ISBLK(stmd): + type = BLKTYPE + else: + return None + + # Fill the TarInfo object with all + # information we can get. + tarinfo.name = arcname + tarinfo.mode = stmd + tarinfo.uid = statres.st_uid + tarinfo.gid = statres.st_gid + if type == REGTYPE: + tarinfo.size = statres.st_size + else: + tarinfo.size = 0 + tarinfo.mtime = statres.st_mtime + tarinfo.type = type + tarinfo.linkname = linkname + + # Calls to pwd.getpwuid() and grp.getgrgid() tend to be expensive. To + # speed things up, cache the resolved usernames and group names. + if pwd: + if tarinfo.uid not in self._unames: + try: + self._unames[tarinfo.uid] = pwd.getpwuid(tarinfo.uid)[0] + except KeyError: + self._unames[tarinfo.uid] = '' + tarinfo.uname = self._unames[tarinfo.uid] + if grp: + if tarinfo.gid not in self._gnames: + try: + self._gnames[tarinfo.gid] = grp.getgrgid(tarinfo.gid)[0] + except KeyError: + self._gnames[tarinfo.gid] = '' + tarinfo.gname = self._gnames[tarinfo.gid] + + if type in (CHRTYPE, BLKTYPE): + if hasattr(os, "major") and hasattr(os, "minor"): + tarinfo.devmajor = os.major(statres.st_rdev) + tarinfo.devminor = os.minor(statres.st_rdev) + return tarinfo + + def list(self, verbose=True, *, members=None): + """Print a table of contents to sys.stdout. If 'verbose' is False, only + the names of the members are printed. If it is True, an 'ls -l'-like + output is produced. 'members' is optional and must be a subset of the + list returned by getmembers(). + """ + # Convert tarinfo type to stat type. + type2mode = {REGTYPE: stat.S_IFREG, SYMTYPE: stat.S_IFLNK, + FIFOTYPE: stat.S_IFIFO, CHRTYPE: stat.S_IFCHR, + DIRTYPE: stat.S_IFDIR, BLKTYPE: stat.S_IFBLK} + self._check() + + if members is None: + members = self + for tarinfo in members: + if verbose: + if tarinfo.mode is None: + _safe_print("??????????") + else: + modetype = type2mode.get(tarinfo.type, 0) + _safe_print(stat.filemode(modetype | tarinfo.mode)) + _safe_print("%s/%s" % (tarinfo.uname or tarinfo.uid, + tarinfo.gname or tarinfo.gid)) + if tarinfo.ischr() or tarinfo.isblk(): + _safe_print("%10s" % + ("%d,%d" % (tarinfo.devmajor, tarinfo.devminor))) + else: + _safe_print("%10d" % tarinfo.size) + if tarinfo.mtime is None: + _safe_print("????-??-?? ??:??:??") + else: + _safe_print("%d-%02d-%02d %02d:%02d:%02d" \ + % time.localtime(tarinfo.mtime)[:6]) + + _safe_print(tarinfo.name + ("/" if tarinfo.isdir() else "")) + + if verbose: + if tarinfo.issym(): + _safe_print("-> " + tarinfo.linkname) + if tarinfo.islnk(): + _safe_print("link to " + tarinfo.linkname) + print() + + def add(self, name, arcname=None, recursive=True, *, filter=None): + """Add the file 'name' to the archive. 'name' may be any type of file + (directory, fifo, symbolic link, etc.). If given, 'arcname' + specifies an alternative name for the file in the archive. + Directories are added recursively by default. This can be avoided by + setting 'recursive' to False. 'filter' is a function + that expects a TarInfo object argument and returns the changed + TarInfo object, if it returns None the TarInfo object will be + excluded from the archive. + """ + self._check("awx") + + if arcname is None: + arcname = name + + # Skip if somebody tries to archive the archive... + if self.name is not None and os.path.abspath(name) == self.name: + self._dbg(2, "tarfile: Skipped %r" % name) + return + + self._dbg(1, name) + + # Create a TarInfo object from the file. + tarinfo = self.gettarinfo(name, arcname) + + if tarinfo is None: + self._dbg(1, "tarfile: Unsupported type %r" % name) + return + + # Change or exclude the TarInfo object. + if filter is not None: + tarinfo = filter(tarinfo) + if tarinfo is None: + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Append the tar header and data to the archive. + if tarinfo.isreg(): + with bltn_open(name, "rb") as f: + self.addfile(tarinfo, f) + + elif tarinfo.isdir(): + self.addfile(tarinfo) + if recursive: + for f in sorted(os.listdir(name)): + self.add(os.path.join(name, f), os.path.join(arcname, f), + recursive, filter=filter) + + else: + self.addfile(tarinfo) + + def addfile(self, tarinfo, fileobj=None): + """Add the TarInfo object 'tarinfo' to the archive. If 'tarinfo' represents + a non zero-size regular file, the 'fileobj' argument should be a binary file, + and tarinfo.size bytes are read from it and added to the archive. + You can create TarInfo objects directly, or by using gettarinfo(). + """ + self._check("awx") + + if fileobj is None and tarinfo.isreg() and tarinfo.size != 0: + raise ValueError("fileobj not provided for non zero-size regular file") + + tarinfo = copy.copy(tarinfo) + + buf = tarinfo.tobuf(self.format, self.encoding, self.errors) + self.fileobj.write(buf) + self.offset += len(buf) + bufsize=self.copybufsize + # If there's data to follow, append it. + if fileobj is not None: + copyfileobj(fileobj, self.fileobj, tarinfo.size, bufsize=bufsize) + blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) + if remainder > 0: + self.fileobj.write(NUL * (BLOCKSIZE - remainder)) + blocks += 1 + self.offset += blocks * BLOCKSIZE + + self.members.append(tarinfo) + + def _get_filter_function(self, filter): + if filter is None: + filter = self.extraction_filter + if filter is None: + return data_filter + if isinstance(filter, str): + raise TypeError( + 'String names are not supported for ' + + 'TarFile.extraction_filter. Use a function such as ' + + 'tarfile.data_filter directly.') + return filter + if callable(filter): + return filter + try: + return _NAMED_FILTERS[filter] + except KeyError: + raise ValueError(f"filter {filter!r} not found") from None + + def extractall(self, path=".", members=None, *, numeric_owner=False, + filter=None): + """Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. 'path' specifies a different directory + to extract to. 'members' is optional and must be a subset of the + list returned by getmembers(). If 'numeric_owner' is True, only + the numbers for user/group names are used and not the names. + + The 'filter' function will be called on each member just + before extraction. + It can return a changed TarInfo or None to skip the member. + String names of common filters are accepted. + """ + directories = [] + + filter_function = self._get_filter_function(filter) + if members is None: + members = self + + for member in members: + tarinfo, unfiltered = self._get_extract_tarinfo( + member, filter_function, path) + if tarinfo is None: + continue + if tarinfo.isdir(): + # For directories, delay setting attributes until later, + # since permissions can interfere with extraction and + # extracting contents can reset mtime. + directories.append(unfiltered) + self._extract_one(tarinfo, path, set_attrs=not tarinfo.isdir(), + numeric_owner=numeric_owner, + filter_function=filter_function) + + # Reverse sort directories. + directories.sort(key=lambda a: a.name, reverse=True) + + + # Set correct owner, mtime and filemode on directories. + for unfiltered in directories: + try: + # Need to re-apply any filter, to take the *current* filesystem + # state into account. + try: + tarinfo = filter_function(unfiltered, path) + except _FILTER_ERRORS as exc: + self._log_no_directory_fixup(unfiltered, repr(exc)) + continue + if tarinfo is None: + self._log_no_directory_fixup(unfiltered, + 'excluded by filter') + continue + dirpath = os.path.join(path, tarinfo.name) + try: + lstat = os.lstat(dirpath) + except FileNotFoundError: + self._log_no_directory_fixup(tarinfo, 'missing') + continue + if not stat.S_ISDIR(lstat.st_mode): + # This is no longer a directory; presumably a later + # member overwrote the entry. + self._log_no_directory_fixup(tarinfo, 'not a directory') + continue + self.chown(tarinfo, dirpath, numeric_owner=numeric_owner) + self.utime(tarinfo, dirpath) + self.chmod(tarinfo, dirpath) + except ExtractError as e: + self._handle_nonfatal_error(e) + + def _log_no_directory_fixup(self, member, reason): + self._dbg(2, "tarfile: Not fixing up directory %r (%s)" % + (member.name, reason)) + + def extract(self, member, path="", set_attrs=True, *, numeric_owner=False, + filter=None): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. 'member' may be a filename or a TarInfo object. You can + specify a different directory using 'path'. File attributes (owner, + mtime, mode) are set unless 'set_attrs' is False. If 'numeric_owner' + is True, only the numbers for user/group names are used and not + the names. + + The 'filter' function will be called before extraction. + It can return a changed TarInfo or None to skip the member. + String names of common filters are accepted. + """ + filter_function = self._get_filter_function(filter) + tarinfo, unfiltered = self._get_extract_tarinfo( + member, filter_function, path) + if tarinfo is not None: + self._extract_one(tarinfo, path, set_attrs, numeric_owner) + + def _get_extract_tarinfo(self, member, filter_function, path): + """Get (filtered, unfiltered) TarInfos from *member* + + *member* might be a string. + + Return (None, None) if not found. + """ + + if isinstance(member, str): + unfiltered = self.getmember(member) + else: + unfiltered = member + + filtered = None + try: + filtered = filter_function(unfiltered, path) + except (OSError, UnicodeEncodeError, FilterError) as e: + self._handle_fatal_error(e) + except ExtractError as e: + self._handle_nonfatal_error(e) + if filtered is None: + self._dbg(2, "tarfile: Excluded %r" % unfiltered.name) + return None, None + + # Prepare the link target for makelink(). + if filtered.islnk(): + filtered = copy.copy(filtered) + filtered._link_target = os.path.join(path, filtered.linkname) + return filtered, unfiltered + + def _extract_one(self, tarinfo, path, set_attrs, numeric_owner, + filter_function=None): + """Extract from filtered tarinfo to disk. + + filter_function is only used when extracting a *different* + member (e.g. as fallback to creating a symlink) + """ + self._check("r") + + try: + self._extract_member(tarinfo, os.path.join(path, tarinfo.name), + set_attrs=set_attrs, + numeric_owner=numeric_owner, + filter_function=filter_function, + extraction_root=path) + except (OSError, UnicodeEncodeError) as e: + self._handle_fatal_error(e) + except ExtractError as e: + self._handle_nonfatal_error(e) + + def _handle_nonfatal_error(self, e): + """Handle non-fatal error (ExtractError) according to errorlevel""" + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def _handle_fatal_error(self, e): + """Handle "fatal" error according to self.errorlevel""" + if self.errorlevel > 0: + raise + elif isinstance(e, OSError): + if e.filename is None: + self._dbg(1, "tarfile: %s" % e.strerror) + else: + self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) + else: + self._dbg(1, "tarfile: %s %s" % (type(e).__name__, e)) + + def extractfile(self, member): + """Extract a member from the archive as a file object. 'member' may be + a filename or a TarInfo object. If 'member' is a regular file or + a link, an io.BufferedReader object is returned. For all other + existing members, None is returned. If 'member' does not appear + in the archive, KeyError is raised. + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES: + # Members with unknown types are treated as regular files. + return self.fileobject(self, tarinfo) + + elif tarinfo.islnk() or tarinfo.issym(): + if isinstance(self.fileobj, _Stream): + # A small but ugly workaround for the case that someone tries + # to extract a (sym)link as a file-object from a non-seekable + # stream of tar blocks. + raise StreamError("cannot extract (sym)link as file object") + else: + # A (sym)link's file object is its target's file object. + return self.extractfile(self._find_link_target(tarinfo)) + else: + # If there's no data associated with the member (directory, chrdev, + # blkdev, etc.), return None instead of a file object. + return None + + def _extract_member(self, tarinfo, targetpath, set_attrs=True, + numeric_owner=False, *, filter_function=None, + extraction_root=None): + """Extract the filtered TarInfo object tarinfo to a physical + file called targetpath. + + filter_function is only used when extracting a *different* + member (e.g. as fallback to creating a symlink) + """ + # Fetch the TarInfo object for the given name + # and build the destination pathname, replacing + # forward slashes to platform specific separators. + targetpath = targetpath.rstrip("/") + targetpath = targetpath.replace("/", os.sep) + + # Create all upper directories. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + # Create directories that are not part of the archive with + # default permissions. + os.makedirs(upperdirs, exist_ok=True) + + if tarinfo.islnk() or tarinfo.issym(): + self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) + else: + self._dbg(1, tarinfo.name) + + if tarinfo.isreg(): + self.makefile(tarinfo, targetpath) + elif tarinfo.isdir(): + self.makedir(tarinfo, targetpath) + elif tarinfo.isfifo(): + self.makefifo(tarinfo, targetpath) + elif tarinfo.ischr() or tarinfo.isblk(): + self.makedev(tarinfo, targetpath) + elif tarinfo.islnk() or tarinfo.issym(): + self.makelink_with_filter( + tarinfo, targetpath, + filter_function=filter_function, + extraction_root=extraction_root) + elif tarinfo.type not in SUPPORTED_TYPES: + self.makeunknown(tarinfo, targetpath) + else: + self.makefile(tarinfo, targetpath) + + if set_attrs: + self.chown(tarinfo, targetpath, numeric_owner) + if not tarinfo.issym(): + self.chmod(tarinfo, targetpath) + self.utime(tarinfo, targetpath) + + #-------------------------------------------------------------------------- + # Below are the different file methods. They are called via + # _extract_member() when extract() is called. They can be replaced in a + # subclass to implement other functionality. + + def makedir(self, tarinfo, targetpath): + """Make a directory called targetpath. + """ + try: + if tarinfo.mode is None: + # Use the system's default mode + os.mkdir(targetpath) + else: + # Use a safe mode for the directory, the real mode is set + # later in _extract_member(). + os.mkdir(targetpath, 0o700) + except FileExistsError: + if not os.path.isdir(targetpath): + raise + + def makefile(self, tarinfo, targetpath): + """Make a file called targetpath. + """ + source = self.fileobj + source.seek(tarinfo.offset_data) + bufsize = self.copybufsize + with bltn_open(targetpath, "wb") as target: + if tarinfo.sparse is not None: + for offset, size in tarinfo.sparse: + target.seek(offset) + copyfileobj(source, target, size, ReadError, bufsize) + target.seek(tarinfo.size) + target.truncate() + else: + copyfileobj(source, target, tarinfo.size, ReadError, bufsize) + + def makeunknown(self, tarinfo, targetpath): + """Make a file from a TarInfo object with an unknown type + at targetpath. + """ + self.makefile(tarinfo, targetpath) + self._dbg(1, "tarfile: Unknown file type %r, " \ + "extracted as regular file." % tarinfo.type) + + def makefifo(self, tarinfo, targetpath): + """Make a fifo called targetpath. + """ + if hasattr(os, "mkfifo"): + os.mkfifo(targetpath) + else: + raise ExtractError("fifo not supported by system") + + def makedev(self, tarinfo, targetpath): + """Make a character or block device called targetpath. + """ + if not hasattr(os, "mknod") or not hasattr(os, "makedev"): + raise ExtractError("special devices not supported by system") + + mode = tarinfo.mode + if mode is None: + # Use mknod's default + mode = 0o600 + if tarinfo.isblk(): + mode |= stat.S_IFBLK + else: + mode |= stat.S_IFCHR + + os.mknod(targetpath, mode, + os.makedev(tarinfo.devmajor, tarinfo.devminor)) + + def makelink(self, tarinfo, targetpath): + return self.makelink_with_filter(tarinfo, targetpath, None, None) + + def makelink_with_filter(self, tarinfo, targetpath, + filter_function, extraction_root): + """Make a (symbolic) link called targetpath. If it cannot be created + (platform limitation), we try to make a copy of the referenced file + instead of a link. + + filter_function is only used when extracting a *different* + member (e.g. as fallback to creating a link). + """ + keyerror_to_extracterror = False + try: + # For systems that support symbolic and hard links. + if tarinfo.issym(): + if os.path.lexists(targetpath): + # Avoid FileExistsError on following os.symlink. + os.unlink(targetpath) + os.symlink(tarinfo.linkname, targetpath) + return + else: + if os.path.exists(tarinfo._link_target): + if os.path.lexists(targetpath): + # Avoid FileExistsError on following os.link. + os.unlink(targetpath) + os.link(tarinfo._link_target, targetpath) + return + except symlink_exception: + keyerror_to_extracterror = True + + try: + unfiltered = self._find_link_target(tarinfo) + except KeyError: + if keyerror_to_extracterror: + raise ExtractError( + "unable to resolve link inside archive") from None + else: + raise + + if filter_function is None: + filtered = unfiltered + else: + if extraction_root is None: + raise ExtractError( + "makelink_with_filter: if filter_function is not None, " + + "extraction_root must also not be None") + try: + filtered = filter_function(unfiltered, extraction_root) + except _FILTER_ERRORS as cause: + raise LinkFallbackError(tarinfo, unfiltered.name) from cause + if filtered is not None: + self._extract_member(filtered, targetpath, + filter_function=filter_function, + extraction_root=extraction_root) + + def chown(self, tarinfo, targetpath, numeric_owner): + """Set owner of targetpath according to tarinfo. If numeric_owner + is True, use .gid/.uid instead of .gname/.uname. If numeric_owner + is False, fall back to .gid/.uid when the search based on name + fails. + """ + if hasattr(os, "geteuid") and os.geteuid() == 0: + # We have to be root to do so. + g = tarinfo.gid + u = tarinfo.uid + if not numeric_owner: + try: + if grp and tarinfo.gname: + g = grp.getgrnam(tarinfo.gname)[2] + except KeyError: + pass + try: + if pwd and tarinfo.uname: + u = pwd.getpwnam(tarinfo.uname)[2] + except KeyError: + pass + if g is None: + g = -1 + if u is None: + u = -1 + try: + if tarinfo.issym() and hasattr(os, "lchown"): + os.lchown(targetpath, u, g) + else: + os.chown(targetpath, u, g) + except (OSError, OverflowError) as e: + # OverflowError can be raised if an ID doesn't fit in 'id_t' + raise ExtractError("could not change owner") from e + + def chmod(self, tarinfo, targetpath): + """Set file permissions of targetpath according to tarinfo. + """ + if tarinfo.mode is None: + return + try: + os.chmod(targetpath, tarinfo.mode) + except OSError as e: + raise ExtractError("could not change mode") from e + + def utime(self, tarinfo, targetpath): + """Set modification time of targetpath according to tarinfo. + """ + mtime = tarinfo.mtime + if mtime is None: + return + if not hasattr(os, 'utime'): + return + try: + os.utime(targetpath, (mtime, mtime)) + except OSError as e: + raise ExtractError("could not change modification time") from e + + #-------------------------------------------------------------------------- + def next(self): + """Return the next member of the archive as a TarInfo object, when + TarFile is opened for reading. Return None if there is no more + available. + """ + self._check("ra") + if self.firstmember is not None: + m = self.firstmember + self.firstmember = None + return m + + # Advance the file pointer. + if self.offset != self.fileobj.tell(): + if self.offset == 0: + return None + self.fileobj.seek(self.offset - 1) + if not self.fileobj.read(1): + raise ReadError("unexpected end of data") + + # Read the next block. + tarinfo = None + while True: + try: + tarinfo = self.tarinfo.fromtarfile(self) + except EOFHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + except InvalidHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + elif self.offset == 0: + raise ReadError(str(e)) from None + except EmptyHeaderError: + if self.offset == 0: + raise ReadError("empty file") from None + except TruncatedHeaderError as e: + if self.offset == 0: + raise ReadError(str(e)) from None + except SubsequentHeaderError as e: + raise ReadError(str(e)) from None + except Exception as e: + try: + import zlib + if isinstance(e, zlib.error): + raise ReadError(f'zlib error: {e}') from None + else: + raise e + except ImportError: + raise e + break + + if tarinfo is not None: + # if streaming the file we do not want to cache the tarinfo + if not self.stream: + self.members.append(tarinfo) + else: + self._loaded = True + + return tarinfo + + #-------------------------------------------------------------------------- + # Little helper methods: + + def _getmember(self, name, tarinfo=None, normalize=False): + """Find an archive member by name from bottom to top. + If tarinfo is given, it is used as the starting point. + """ + # Ensure that all members have been loaded. + members = self.getmembers() + + # Limit the member search list up to tarinfo. + skipping = False + if tarinfo is not None: + try: + index = members.index(tarinfo) + except ValueError: + # The given starting point might be a (modified) copy. + # We'll later skip members until we find an equivalent. + skipping = True + else: + # Happy fast path + members = members[:index] + + if normalize: + name = os.path.normpath(name) + + for member in reversed(members): + if skipping: + if tarinfo.offset == member.offset: + skipping = False + continue + if normalize: + member_name = os.path.normpath(member.name) + else: + member_name = member.name + + if name == member_name: + return member + + if skipping: + # Starting point was not found + raise ValueError(tarinfo) + + def _load(self): + """Read through the entire archive file and look for readable + members. This should not run if the file is set to stream. + """ + if not self.stream: + while self.next() is not None: + pass + self._loaded = True + + def _check(self, mode=None): + """Check if TarFile is still open, and if the operation's mode + corresponds to TarFile's mode. + """ + if self.closed: + raise OSError("%s is closed" % self.__class__.__name__) + if mode is not None and self.mode not in mode: + raise OSError("bad operation for mode %r" % self.mode) + + def _find_link_target(self, tarinfo): + """Find the target member of a symlink or hardlink member in the + archive. + """ + if tarinfo.issym(): + # Always search the entire archive. + linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname))) + limit = None + else: + # Search the archive before the link, because a hard link is + # just a reference to an already archived file. + linkname = tarinfo.linkname + limit = tarinfo + + member = self._getmember(linkname, tarinfo=limit, normalize=True) + if member is None: + raise KeyError("linkname %r not found" % linkname) + return member + + def __iter__(self): + """Provide an iterator object. + """ + if self._loaded: + yield from self.members + return + + # Yield items using TarFile's next() method. + # When all members have been read, set TarFile as _loaded. + index = 0 + # Fix for SF #1100429: Under rare circumstances it can + # happen that getmembers() is called during iteration, + # which will have already exhausted the next() method. + if self.firstmember is not None: + tarinfo = self.next() + index += 1 + yield tarinfo + + while True: + if index < len(self.members): + tarinfo = self.members[index] + elif not self._loaded: + tarinfo = self.next() + if not tarinfo: + self._loaded = True + return + else: + return + index += 1 + yield tarinfo + + def _dbg(self, level, msg): + """Write debugging output to sys.stderr. + """ + if level <= self.debug: + print(msg, file=sys.stderr) + + def __enter__(self): + self._check() + return self + + def __exit__(self, type, value, traceback): + if type is None: + self.close() + else: + # An exception occurred. We must not call close() because + # it would try to write end-of-archive blocks and padding. + if not self._extfileobj: + self.fileobj.close() + self.closed = True + +#-------------------- +# exported functions +#-------------------- + +def is_tarfile(name): + """Return True if name points to a tar archive that we + are able to handle, else return False. + + 'name' should be a string, file, or file-like object. + """ + try: + if hasattr(name, "read"): + pos = name.tell() + t = open(fileobj=name) + name.seek(pos) + else: + t = open(name) + t.close() + return True + except TarError: + return False + +open = TarFile.open + + +def main(): + import argparse + + description = 'A simple command-line interface for tarfile module.' + parser = argparse.ArgumentParser(description=description, color=True) + parser.add_argument('-v', '--verbose', action='store_true', default=False, + help='Verbose output') + parser.add_argument('--filter', metavar='', + choices=_NAMED_FILTERS, + help='Filter for extraction') + + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('-l', '--list', metavar='', + help='Show listing of a tarfile') + group.add_argument('-e', '--extract', nargs='+', + metavar=('', ''), + help='Extract tarfile into target dir') + group.add_argument('-c', '--create', nargs='+', + metavar=('', ''), + help='Create tarfile from sources') + group.add_argument('-t', '--test', metavar='', + help='Test if a tarfile is valid') + + args = parser.parse_args() + + if args.filter and args.extract is None: + parser.exit(1, '--filter is only valid for extraction\n') + + if args.test is not None: + src = args.test + if is_tarfile(src): + with open(src, 'r') as tar: + tar.getmembers() + print(tar.getmembers(), file=sys.stderr) + if args.verbose: + print('{!r} is a tar archive.'.format(src)) + else: + parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) + + elif args.list is not None: + src = args.list + if is_tarfile(src): + with TarFile.open(src, 'r:*') as tf: + tf.list(verbose=args.verbose) + else: + parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) + + elif args.extract is not None: + if len(args.extract) == 1: + src = args.extract[0] + curdir = os.curdir + elif len(args.extract) == 2: + src, curdir = args.extract + else: + parser.exit(1, parser.format_help()) + + if is_tarfile(src): + with TarFile.open(src, 'r:*') as tf: + tf.extractall(path=curdir, filter=args.filter) + if args.verbose: + if curdir == '.': + msg = '{!r} file is extracted.'.format(src) + else: + msg = ('{!r} file is extracted ' + 'into {!r} directory.').format(src, curdir) + print(msg) + else: + parser.exit(1, '{!r} is not a tar archive.\n'.format(src)) + + elif args.create is not None: + tar_name = args.create.pop(0) + _, ext = os.path.splitext(tar_name) + compressions = { + # gz + '.gz': 'gz', + '.tgz': 'gz', + # xz + '.xz': 'xz', + '.txz': 'xz', + # bz2 + '.bz2': 'bz2', + '.tbz': 'bz2', + '.tbz2': 'bz2', + '.tb2': 'bz2', + # zstd + '.zst': 'zst', + '.tzst': 'zst', + } + tar_mode = 'w:' + compressions[ext] if ext in compressions else 'w' + tar_files = args.create + + with TarFile.open(tar_name, tar_mode) as tf: + for file_name in tar_files: + tf.add(file_name) + + if args.verbose: + print('{!r} file created.'.format(tar_name)) + +if __name__ == '__main__': + main() diff --git a/Python314_4_x86_Template/Lib/tempfile.py b/Python314_4_x86_Template/Lib/tempfile.py new file mode 100644 index 00000000..a34e062f --- /dev/null +++ b/Python314_4_x86_Template/Lib/tempfile.py @@ -0,0 +1,977 @@ +"""Temporary files. + +This module provides generic, low- and high-level interfaces for +creating temporary files and directories. All of the interfaces +provided by this module can be used without fear of race conditions +except for 'mktemp'. 'mktemp' is subject to race conditions and +should not be used; it is provided for backward compatibility only. + +The default path names are returned as str. If you supply bytes as +input, all return values will be in bytes. Ex: + + >>> tempfile.mkstemp() + (4, '/tmp/tmptpu9nin8') + >>> tempfile.mkdtemp(suffix=b'') + b'/tmp/tmppbi8f0hy' + +This module also provides some data items to the user: + + TMP_MAX - maximum number of names that will be tried before + giving up. + tempdir - If this is set to a string before the first use of + any routine from this module, it will be considered as + another candidate location to store temporary files. +""" + +__all__ = [ + "NamedTemporaryFile", "TemporaryFile", # high level safe interfaces + "SpooledTemporaryFile", "TemporaryDirectory", + "mkstemp", "mkdtemp", # low level safe interfaces + "mktemp", # deprecated unsafe interface + "TMP_MAX", "gettempprefix", # constants + "tempdir", "gettempdir", + "gettempprefixb", "gettempdirb", + ] + + +# Imports. + +import functools as _functools +import warnings as _warnings +import io as _io +import os as _os +import shutil as _shutil +import errno as _errno +from random import Random as _Random +import sys as _sys +import types as _types +import weakref as _weakref +import _thread +_allocate_lock = _thread.allocate_lock + +_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL +if hasattr(_os, 'O_NOFOLLOW'): + _text_openflags |= _os.O_NOFOLLOW + +_bin_openflags = _text_openflags +if hasattr(_os, 'O_BINARY'): + _bin_openflags |= _os.O_BINARY + +# This is more than enough. +# Each name contains over 40 random bits. Even with a million temporary +# files, the chance of a conflict is less than 1 in a million, and with +# 20 attempts, it is less than 1e-120. +TMP_MAX = 20 + +# This variable _was_ unused for legacy reasons, see issue 10354. +# But as of 3.5 we actually use it at runtime so changing it would +# have a possibly desirable side effect... But we do not want to support +# that as an API. It is undocumented on purpose. Do not depend on this. +template = "tmp" + +# Internal routines. + +_once_lock = _allocate_lock() + + +def _exists(fn): + try: + _os.lstat(fn) + except OSError: + return False + else: + return True + + +def _infer_return_type(*args): + """Look at the type of all args and divine their implied return type.""" + return_type = None + for arg in args: + if arg is None: + continue + + if isinstance(arg, _os.PathLike): + arg = _os.fspath(arg) + + if isinstance(arg, bytes): + if return_type is str: + raise TypeError("Can't mix bytes and non-bytes in " + "path components.") + return_type = bytes + else: + if return_type is bytes: + raise TypeError("Can't mix bytes and non-bytes in " + "path components.") + return_type = str + if return_type is None: + if tempdir is None or isinstance(tempdir, str): + return str # tempfile APIs return a str by default. + else: + # we could check for bytes but it'll fail later on anyway + return bytes + return return_type + + +def _sanitize_params(prefix, suffix, dir): + """Common parameter processing for most APIs in this module.""" + output_type = _infer_return_type(prefix, suffix, dir) + if suffix is None: + suffix = output_type() + if prefix is None: + if output_type is str: + prefix = template + else: + prefix = _os.fsencode(template) + if dir is None: + if output_type is str: + dir = gettempdir() + else: + dir = gettempdirb() + return prefix, suffix, dir, output_type + + +class _RandomNameSequence: + """An instance of _RandomNameSequence generates an endless + sequence of unpredictable strings which can safely be incorporated + into file names. Each string is eight characters long. Multiple + threads can safely use the same instance at the same time. + + _RandomNameSequence is an iterator.""" + + characters = "abcdefghijklmnopqrstuvwxyz0123456789_" + + @property + def rng(self): + cur_pid = _os.getpid() + if cur_pid != getattr(self, '_rng_pid', None): + self._rng = _Random() + self._rng_pid = cur_pid + return self._rng + + def __iter__(self): + return self + + def __next__(self): + return ''.join(self.rng.choices(self.characters, k=8)) + +def _candidate_tempdir_list(): + """Generate a list of candidate temporary directories which + _get_default_tempdir will try.""" + + dirlist = [] + + # First, try the environment. + for envname in 'TMPDIR', 'TEMP', 'TMP': + dirname = _os.getenv(envname) + if dirname: dirlist.append(dirname) + + # Failing that, try OS-specific locations. + if _os.name == 'nt': + dirlist.extend([ _os.path.expanduser(r'~\AppData\Local\Temp'), + _os.path.expandvars(r'%SYSTEMROOT%\Temp'), + r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ]) + else: + dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ]) + + # As a last resort, the current directory. + try: + dirlist.append(_os.getcwd()) + except (AttributeError, OSError): + dirlist.append(_os.curdir) + + return dirlist + +def _get_default_tempdir(dirlist=None): + """Calculate the default directory to use for temporary files. + This routine should be called exactly once. + + We determine whether or not a candidate temp dir is usable by + trying to create and write to a file in that directory. If this + is successful, the test file is deleted. To prevent denial of + service, the name of the test file must be randomized.""" + + namer = _RandomNameSequence() + if dirlist is None: + dirlist = _candidate_tempdir_list() + + for dir in dirlist: + if dir != _os.curdir: + dir = _os.path.abspath(dir) + for seq in range(TMP_MAX): + name = next(namer) + filename = _os.path.join(dir, name) + try: + fd = _os.open(filename, _bin_openflags, 0o600) + try: + try: + _os.write(fd, b'blat') + finally: + _os.close(fd) + finally: + _os.unlink(filename) + return dir + except FileExistsError: + pass + except PermissionError: + # See the comment in mkdtemp(). + if _os.name == 'nt' and _os.path.isdir(dir): + continue + break # no point trying more names in this directory + except OSError: + break # no point trying more names in this directory + raise FileNotFoundError(_errno.ENOENT, + "No usable temporary directory found in %s" % + dirlist) + +_name_sequence = None + +def _get_candidate_names(): + """Common setup sequence for all user-callable interfaces.""" + + global _name_sequence + if _name_sequence is None: + _once_lock.acquire() + try: + if _name_sequence is None: + _name_sequence = _RandomNameSequence() + finally: + _once_lock.release() + return _name_sequence + + +def _mkstemp_inner(dir, pre, suf, flags, output_type): + """Code common to mkstemp, TemporaryFile, and NamedTemporaryFile.""" + + dir = _os.path.abspath(dir) + names = _get_candidate_names() + if output_type is bytes: + names = map(_os.fsencode, names) + + for seq in range(TMP_MAX): + name = next(names) + file = _os.path.join(dir, pre + name + suf) + _sys.audit("tempfile.mkstemp", file) + try: + fd = _os.open(file, flags, 0o600) + except FileExistsError: + continue # try again + except PermissionError: + # See the comment in mkdtemp(). + if _os.name == 'nt' and _os.path.isdir(dir) and seq < TMP_MAX - 1: + continue + else: + raise + return fd, file + + raise FileExistsError(_errno.EEXIST, + "No usable temporary file name found") + +def _dont_follow_symlinks(func, path, *args): + # Pass follow_symlinks=False, unless not supported on this platform. + if func in _os.supports_follow_symlinks: + func(path, *args, follow_symlinks=False) + elif not _os.path.islink(path): + func(path, *args) + +def _resetperms(path): + try: + chflags = _os.chflags + except AttributeError: + pass + else: + _dont_follow_symlinks(chflags, path, 0) + _dont_follow_symlinks(_os.chmod, path, 0o700) + + +# User visible interfaces. + +def gettempprefix(): + """The default prefix for temporary directories as string.""" + return _os.fsdecode(template) + +def gettempprefixb(): + """The default prefix for temporary directories as bytes.""" + return _os.fsencode(template) + +tempdir = None + +def _gettempdir(): + """Private accessor for tempfile.tempdir.""" + global tempdir + if tempdir is None: + _once_lock.acquire() + try: + if tempdir is None: + tempdir = _get_default_tempdir() + finally: + _once_lock.release() + return tempdir + +def gettempdir(): + """Returns tempfile.tempdir as str.""" + return _os.fsdecode(_gettempdir()) + +def gettempdirb(): + """Returns tempfile.tempdir as bytes.""" + return _os.fsencode(_gettempdir()) + +def mkstemp(suffix=None, prefix=None, dir=None, text=False): + """User-callable function to create and return a unique temporary + file. The return value is a pair (fd, name) where fd is the + file descriptor returned by os.open, and name is the filename. + + If 'suffix' is not None, the file name will end with that suffix, + otherwise there will be no suffix. + + If 'prefix' is not None, the file name will begin with that prefix, + otherwise a default prefix is used. + + If 'dir' is not None, the file will be created in that directory, + otherwise a default directory is used. + + If 'text' is specified and true, the file is opened in text + mode. Else (the default) the file is opened in binary mode. + + If any of 'suffix', 'prefix' and 'dir' are not None, they must be the + same type. If they are bytes, the returned name will be bytes; str + otherwise. + + The file is readable and writable only by the creating user ID. + If the operating system uses permission bits to indicate whether a + file is executable, the file is executable by no one. The file + descriptor is not inherited by children of this process. + + Caller is responsible for deleting the file when done with it. + """ + + prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) + + if text: + flags = _text_openflags + else: + flags = _bin_openflags + + return _mkstemp_inner(dir, prefix, suffix, flags, output_type) + + +def mkdtemp(suffix=None, prefix=None, dir=None): + """User-callable function to create and return a unique temporary + directory. The return value is the pathname of the directory. + + Arguments are as for mkstemp, except that the 'text' argument is + not accepted. + + The directory is readable, writable, and searchable only by the + creating user. + + Caller is responsible for deleting the directory when done with it. + """ + + prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) + + names = _get_candidate_names() + if output_type is bytes: + names = map(_os.fsencode, names) + + for seq in range(TMP_MAX): + name = next(names) + file = _os.path.join(dir, prefix + name + suffix) + _sys.audit("tempfile.mkdtemp", file) + try: + _os.mkdir(file, 0o700) + except FileExistsError: + continue # try again + except PermissionError: + # On Posix, this exception is raised when the user has no + # write access to the parent directory. + # On Windows, it is also raised when a directory with + # the chosen name already exists, or if the parent directory + # is not a directory. + # We cannot distinguish between "directory-exists-error" and + # "access-denied-error". + if _os.name == 'nt' and _os.path.isdir(dir) and seq < TMP_MAX - 1: + continue + else: + raise + return _os.path.abspath(file) + + raise FileExistsError(_errno.EEXIST, + "No usable temporary directory name found") + +def mktemp(suffix="", prefix=template, dir=None): + """User-callable function to return a unique temporary file name. The + file is not created. + + Arguments are similar to mkstemp, except that the 'text' argument is + not accepted, and suffix=None, prefix=None and bytes file names are not + supported. + + THIS FUNCTION IS UNSAFE AND SHOULD NOT BE USED. The file name may + refer to a file that did not exist at some point, but by the time + you get around to creating it, someone else may have beaten you to + the punch. + """ + +## from warnings import warn as _warn +## _warn("mktemp is a potential security risk to your program", +## RuntimeWarning, stacklevel=2) + + if dir is None: + dir = gettempdir() + + names = _get_candidate_names() + for seq in range(TMP_MAX): + name = next(names) + file = _os.path.join(dir, prefix + name + suffix) + if not _exists(file): + return file + + raise FileExistsError(_errno.EEXIST, + "No usable temporary filename found") + + +class _TemporaryFileCloser: + """A separate object allowing proper closing of a temporary file's + underlying file object, without adding a __del__ method to the + temporary file.""" + + cleanup_called = False + close_called = False + + def __init__( + self, + file, + name, + delete=True, + delete_on_close=True, + warn_message="Implicitly cleaning up unknown file", + ): + self.file = file + self.name = name + self.delete = delete + self.delete_on_close = delete_on_close + self.warn_message = warn_message + + def cleanup(self, windows=(_os.name == 'nt'), unlink=_os.unlink): + if not self.cleanup_called: + self.cleanup_called = True + try: + if not self.close_called: + self.close_called = True + self.file.close() + finally: + # Windows provides delete-on-close as a primitive, in which + # case the file was deleted by self.file.close(). + if self.delete and not (windows and self.delete_on_close): + try: + unlink(self.name) + except FileNotFoundError: + pass + + def close(self): + if not self.close_called: + self.close_called = True + try: + self.file.close() + finally: + if self.delete and self.delete_on_close: + self.cleanup() + + def __del__(self): + close_called = self.close_called + self.cleanup() + if not close_called: + _warnings.warn(self.warn_message, ResourceWarning) + + +class _TemporaryFileWrapper: + """Temporary file wrapper + + This class provides a wrapper around files opened for + temporary use. In particular, it seeks to automatically + remove the file when it is no longer needed. + """ + + def __init__(self, file, name, delete=True, delete_on_close=True): + self.file = file + self.name = name + self._closer = _TemporaryFileCloser( + file, + name, + delete, + delete_on_close, + warn_message=f"Implicitly cleaning up {self!r}", + ) + + def __repr__(self): + file = self.__dict__['file'] + return f"<{type(self).__name__} {file=}>" + + def __getattr__(self, name): + # Attribute lookups are delegated to the underlying file + # and cached for non-numeric results + # (i.e. methods are cached, closed and friends are not) + file = self.__dict__['file'] + a = getattr(file, name) + if hasattr(a, '__call__'): + func = a + @_functools.wraps(func) + def func_wrapper(*args, **kwargs): + return func(*args, **kwargs) + # Avoid closing the file as long as the wrapper is alive, + # see issue #18879. + func_wrapper._closer = self._closer + a = func_wrapper + if not isinstance(a, int): + setattr(self, name, a) + return a + + # The underlying __enter__ method returns the wrong object + # (self.file) so override it to return the wrapper + def __enter__(self): + self.file.__enter__() + return self + + # Need to trap __exit__ as well to ensure the file gets + # deleted when used in a with statement + def __exit__(self, exc, value, tb): + result = self.file.__exit__(exc, value, tb) + self._closer.cleanup() + return result + + def close(self): + """ + Close the temporary file, possibly deleting it. + """ + self._closer.close() + + # iter() doesn't use __getattr__ to find the __iter__ method + def __iter__(self): + # Don't return iter(self.file), but yield from it to avoid closing + # file as long as it's being used as iterator (see issue #23700). We + # can't use 'yield from' here because iter(file) returns the file + # object itself, which has a close method, and thus the file would get + # closed when the generator is finalized, due to PEP380 semantics. + for line in self.file: + yield line + +def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None, + newline=None, suffix=None, prefix=None, + dir=None, delete=True, *, errors=None, + delete_on_close=True): + """Create and return a temporary file. + Arguments: + 'prefix', 'suffix', 'dir' -- as for mkstemp. + 'mode' -- the mode argument to io.open (default "w+b"). + 'buffering' -- the buffer size argument to io.open (default -1). + 'encoding' -- the encoding argument to io.open (default None) + 'newline' -- the newline argument to io.open (default None) + 'delete' -- whether the file is automatically deleted (default True). + 'delete_on_close' -- if 'delete', whether the file is deleted on close + (default True) or otherwise either on context manager exit + (if context manager was used) or on object finalization. . + 'errors' -- the errors argument to io.open (default None) + The file is created as mkstemp() would do it. + + Returns an object with a file-like interface; the name of the file + is accessible as its 'name' attribute. The file will be automatically + deleted when it is closed unless the 'delete' argument is set to False. + + On POSIX, NamedTemporaryFiles cannot be automatically deleted if + the creating process is terminated abruptly with a SIGKILL signal. + Windows can delete the file even in this case. + """ + + prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) + + flags = _bin_openflags + + # Setting O_TEMPORARY in the flags causes the OS to delete + # the file when it is closed. This is only supported by Windows. + if _os.name == 'nt' and delete and delete_on_close: + flags |= _os.O_TEMPORARY + + if "b" not in mode: + encoding = _io.text_encoding(encoding) + + name = None + def opener(*args): + nonlocal name + fd, name = _mkstemp_inner(dir, prefix, suffix, flags, output_type) + return fd + try: + file = _io.open(dir, mode, buffering=buffering, + newline=newline, encoding=encoding, errors=errors, + opener=opener) + try: + raw = getattr(file, 'buffer', file) + raw = getattr(raw, 'raw', raw) + raw.name = name + return _TemporaryFileWrapper(file, name, delete, delete_on_close) + except: + file.close() + raise + except: + if name is not None and not ( + _os.name == 'nt' and delete and delete_on_close): + _os.unlink(name) + raise + +if _os.name != 'posix' or _sys.platform == 'cygwin': + # On non-POSIX and Cygwin systems, assume that we cannot unlink a file + # while it is open. + TemporaryFile = NamedTemporaryFile + +else: + # Is the O_TMPFILE flag available and does it work? + # The flag is set to False if os.open(dir, os.O_TMPFILE) raises an + # IsADirectoryError exception + _O_TMPFILE_WORKS = hasattr(_os, 'O_TMPFILE') + + def TemporaryFile(mode='w+b', buffering=-1, encoding=None, + newline=None, suffix=None, prefix=None, + dir=None, *, errors=None): + """Create and return a temporary file. + Arguments: + 'prefix', 'suffix', 'dir' -- as for mkstemp. + 'mode' -- the mode argument to io.open (default "w+b"). + 'buffering' -- the buffer size argument to io.open (default -1). + 'encoding' -- the encoding argument to io.open (default None) + 'newline' -- the newline argument to io.open (default None) + 'errors' -- the errors argument to io.open (default None) + The file is created as mkstemp() would do it. + + Returns an object with a file-like interface. The file has no + name, and will cease to exist when it is closed. + """ + global _O_TMPFILE_WORKS + + if "b" not in mode: + encoding = _io.text_encoding(encoding) + + prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) + + flags = _bin_openflags + if _O_TMPFILE_WORKS: + fd = None + def opener(*args): + nonlocal fd + flags2 = (flags | _os.O_TMPFILE) & ~_os.O_CREAT + fd = _os.open(dir, flags2, 0o600) + return fd + try: + file = _io.open(dir, mode, buffering=buffering, + newline=newline, encoding=encoding, + errors=errors, opener=opener) + raw = getattr(file, 'buffer', file) + raw = getattr(raw, 'raw', raw) + raw.name = fd + return file + except IsADirectoryError: + # Linux kernel older than 3.11 ignores the O_TMPFILE flag: + # O_TMPFILE is read as O_DIRECTORY. Trying to open a directory + # with O_RDWR|O_DIRECTORY fails with IsADirectoryError, a + # directory cannot be open to write. Set flag to False to not + # try again. + _O_TMPFILE_WORKS = False + except OSError: + # The filesystem of the directory does not support O_TMPFILE. + # For example, OSError(95, 'Operation not supported'). + # + # On Linux kernel older than 3.11, trying to open a regular + # file (or a symbolic link to a regular file) with O_TMPFILE + # fails with NotADirectoryError, because O_TMPFILE is read as + # O_DIRECTORY. + pass + # Fallback to _mkstemp_inner(). + + fd = None + def opener(*args): + nonlocal fd + fd, name = _mkstemp_inner(dir, prefix, suffix, flags, output_type) + try: + _os.unlink(name) + except BaseException as e: + _os.close(fd) + raise + return fd + file = _io.open(dir, mode, buffering=buffering, + newline=newline, encoding=encoding, errors=errors, + opener=opener) + raw = getattr(file, 'buffer', file) + raw = getattr(raw, 'raw', raw) + raw.name = fd + return file + +class SpooledTemporaryFile(_io.IOBase): + """Temporary file wrapper, specialized to switch from BytesIO + or StringIO to a real file when it exceeds a certain size or + when a fileno is needed. + """ + _rolled = False + + def __init__(self, max_size=0, mode='w+b', buffering=-1, + encoding=None, newline=None, + suffix=None, prefix=None, dir=None, *, errors=None): + if 'b' in mode: + self._file = _io.BytesIO() + else: + encoding = _io.text_encoding(encoding) + self._file = _io.TextIOWrapper(_io.BytesIO(), + encoding=encoding, errors=errors, + newline=newline) + self._max_size = max_size + self._rolled = False + self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering, + 'suffix': suffix, 'prefix': prefix, + 'encoding': encoding, 'newline': newline, + 'dir': dir, 'errors': errors} + + __class_getitem__ = classmethod(_types.GenericAlias) + + def _check(self, file): + if self._rolled: return + max_size = self._max_size + if max_size and file.tell() > max_size: + self.rollover() + + def rollover(self): + if self._rolled: return + file = self._file + newfile = self._file = TemporaryFile(**self._TemporaryFileArgs) + del self._TemporaryFileArgs + + pos = file.tell() + if hasattr(newfile, 'buffer'): + newfile.buffer.write(file.detach().getvalue()) + else: + newfile.write(file.getvalue()) + newfile.seek(pos, 0) + + self._rolled = True + + # The method caching trick from NamedTemporaryFile + # won't work here, because _file may change from a + # BytesIO/StringIO instance to a real file. So we list + # all the methods directly. + + # Context management protocol + def __enter__(self): + if self._file.closed: + raise ValueError("Cannot enter context with closed file") + return self + + def __exit__(self, exc, value, tb): + self._file.close() + + # file protocol + def __iter__(self): + return self._file.__iter__() + + def __del__(self): + if not self.closed: + _warnings.warn( + "Unclosed file {!r}".format(self), + ResourceWarning, + stacklevel=2, + source=self + ) + self.close() + + def close(self): + self._file.close() + + @property + def closed(self): + return self._file.closed + + @property + def encoding(self): + return self._file.encoding + + @property + def errors(self): + return self._file.errors + + def fileno(self): + self.rollover() + return self._file.fileno() + + def flush(self): + self._file.flush() + + def isatty(self): + return self._file.isatty() + + @property + def mode(self): + try: + return self._file.mode + except AttributeError: + return self._TemporaryFileArgs['mode'] + + @property + def name(self): + try: + return self._file.name + except AttributeError: + return None + + @property + def newlines(self): + return self._file.newlines + + def readable(self): + return self._file.readable() + + def read(self, *args): + return self._file.read(*args) + + def read1(self, *args): + return self._file.read1(*args) + + def readinto(self, b): + return self._file.readinto(b) + + def readinto1(self, b): + return self._file.readinto1(b) + + def readline(self, *args): + return self._file.readline(*args) + + def readlines(self, *args): + return self._file.readlines(*args) + + def seekable(self): + return self._file.seekable() + + def seek(self, *args): + return self._file.seek(*args) + + def tell(self): + return self._file.tell() + + def truncate(self, size=None): + if size is None: + return self._file.truncate() + else: + if size > self._max_size: + self.rollover() + return self._file.truncate(size) + + def writable(self): + return self._file.writable() + + def write(self, s): + file = self._file + rv = file.write(s) + self._check(file) + return rv + + def writelines(self, iterable): + if self._max_size == 0 or self._rolled: + return self._file.writelines(iterable) + + it = iter(iterable) + for line in it: + self.write(line) + if self._rolled: + return self._file.writelines(it) + + def detach(self): + return self._file.detach() + + +class TemporaryDirectory: + """Create and return a temporary directory. This has the same + behavior as mkdtemp but can be used as a context manager. For + example: + + with TemporaryDirectory() as tmpdir: + ... + + Upon exiting the context, the directory and everything contained + in it are removed (unless delete=False is passed or an exception + is raised during cleanup and ignore_cleanup_errors is not True). + + Optional Arguments: + suffix - A str suffix for the directory name. (see mkdtemp) + prefix - A str prefix for the directory name. (see mkdtemp) + dir - A directory to create this temp dir in. (see mkdtemp) + ignore_cleanup_errors - False; ignore exceptions during cleanup? + delete - True; whether the directory is automatically deleted. + """ + + def __init__(self, suffix=None, prefix=None, dir=None, + ignore_cleanup_errors=False, *, delete=True): + self.name = mkdtemp(suffix, prefix, dir) + self._ignore_cleanup_errors = ignore_cleanup_errors + self._delete = delete + self._finalizer = _weakref.finalize( + self, self._cleanup, self.name, + warn_message="Implicitly cleaning up {!r}".format(self), + ignore_errors=self._ignore_cleanup_errors, delete=self._delete) + + @classmethod + def _rmtree(cls, name, ignore_errors=False, repeated=False): + def onexc(func, path, exc): + if isinstance(exc, PermissionError): + if repeated and path == name: + if ignore_errors: + return + raise + + try: + if path != name: + _resetperms(_os.path.dirname(path)) + _resetperms(path) + + try: + _os.unlink(path) + except IsADirectoryError: + cls._rmtree(path, ignore_errors=ignore_errors) + except PermissionError: + # The PermissionError handler was originally added for + # FreeBSD in directories, but it seems that it is raised + # on Windows too. + # bpo-43153: Calling _rmtree again may + # raise NotADirectoryError and mask the PermissionError. + # So we must re-raise the current PermissionError if + # path is not a directory. + if not _os.path.isdir(path) or _os.path.isjunction(path): + if ignore_errors: + return + raise + cls._rmtree(path, ignore_errors=ignore_errors, + repeated=(path == name)) + except FileNotFoundError: + pass + elif isinstance(exc, FileNotFoundError): + pass + else: + if not ignore_errors: + raise + + _shutil.rmtree(name, onexc=onexc) + + @classmethod + def _cleanup(cls, name, warn_message, ignore_errors=False, delete=True): + if delete: + cls._rmtree(name, ignore_errors=ignore_errors) + _warnings.warn(warn_message, ResourceWarning) + + def __repr__(self): + return "<{} {!r}>".format(self.__class__.__name__, self.name) + + def __enter__(self): + return self.name + + def __exit__(self, exc, value, tb): + if self._delete: + self.cleanup() + + def cleanup(self): + if self._finalizer.detach() or _os.path.exists(self.name): + self._rmtree(self.name, ignore_errors=self._ignore_cleanup_errors) + + __class_getitem__ = classmethod(_types.GenericAlias) diff --git a/Python314_4_x86_Template/Lib/textwrap.py b/Python314_4_x86_Template/Lib/textwrap.py new file mode 100644 index 00000000..41366fbf --- /dev/null +++ b/Python314_4_x86_Template/Lib/textwrap.py @@ -0,0 +1,475 @@ +"""Text wrapping and filling. +""" + +# Copyright (C) 1999-2001 Gregory P. Ward. +# Copyright (C) 2002 Python Software Foundation. +# Written by Greg Ward + +import re + +__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent', 'shorten'] + +# Hardcode the recognized whitespace characters to the US-ASCII +# whitespace characters. The main reason for doing this is that +# some Unicode spaces (like \u00a0) are non-breaking whitespaces. +_whitespace = '\t\n\x0b\x0c\r ' + +class TextWrapper: + """ + Object for wrapping/filling text. The public interface consists of + the wrap() and fill() methods; the other methods are just there for + subclasses to override in order to tweak the default behaviour. + If you want to completely replace the main wrapping algorithm, + you'll probably have to override _wrap_chunks(). + + Several instance attributes control various aspects of wrapping: + width (default: 70) + the maximum width of wrapped lines (unless break_long_words + is false) + initial_indent (default: "") + string that will be prepended to the first line of wrapped + output. Counts towards the line's width. + subsequent_indent (default: "") + string that will be prepended to all lines save the first + of wrapped output; also counts towards each line's width. + expand_tabs (default: true) + Expand tabs in input text to spaces before further processing. + Each tab will become 0 .. 'tabsize' spaces, depending on its position + in its line. If false, each tab is treated as a single character. + tabsize (default: 8) + Expand tabs in input text to 0 .. 'tabsize' spaces, unless + 'expand_tabs' is false. + replace_whitespace (default: true) + Replace all whitespace characters in the input text by spaces + after tab expansion. Note that if expand_tabs is false and + replace_whitespace is true, every tab will be converted to a + single space! + fix_sentence_endings (default: false) + Ensure that sentence-ending punctuation is always followed + by two spaces. Off by default because the algorithm is + (unavoidably) imperfect. + break_long_words (default: true) + Break words longer than 'width'. If false, those words will not + be broken, and some lines might be longer than 'width'. + break_on_hyphens (default: true) + Allow breaking hyphenated words. If true, wrapping will occur + preferably on whitespaces and right after hyphens part of + compound words. + drop_whitespace (default: true) + Drop leading and trailing whitespace from lines. + max_lines (default: None) + Truncate wrapped lines. + placeholder (default: ' [...]') + Append to the last line of truncated text. + """ + + unicode_whitespace_trans = dict.fromkeys(map(ord, _whitespace), ord(' ')) + + # This funky little regex is just the trick for splitting + # text up into word-wrappable chunks. E.g. + # "Hello there -- you goof-ball, use the -b option!" + # splits into + # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option! + # (after stripping out empty strings). + word_punct = r'[\w!"\'&.,?]' + letter = r'[^\d\W]' + whitespace = r'[%s]' % re.escape(_whitespace) + nowhitespace = '[^' + whitespace[1:] + wordsep_re = re.compile(r''' + ( # any whitespace + %(ws)s+ + | # em-dash between words + (?<=%(wp)s) -{2,} (?=\w) + | # word, possibly hyphenated + %(nws)s+? (?: + # hyphenated word + -(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-)) + (?= %(lt)s -? %(lt)s) + | # end of word + (?=%(ws)s|\z) + | # em-dash + (?<=%(wp)s) (?=-{2,}\w) + ) + )''' % {'wp': word_punct, 'lt': letter, + 'ws': whitespace, 'nws': nowhitespace}, + re.VERBOSE) + del word_punct, letter, nowhitespace + + # This less funky little regex just split on recognized spaces. E.g. + # "Hello there -- you goof-ball, use the -b option!" + # splits into + # Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/ + wordsep_simple_re = re.compile(r'(%s+)' % whitespace) + del whitespace + + # XXX this is not locale- or charset-aware -- string.lowercase + # is US-ASCII only (and therefore English-only) + sentence_end_re = re.compile(r'[a-z]' # lowercase letter + r'[\.\!\?]' # sentence-ending punct. + r'[\"\']?' # optional end-of-quote + r'\z') # end of chunk + + def __init__(self, + width=70, + initial_indent="", + subsequent_indent="", + expand_tabs=True, + replace_whitespace=True, + fix_sentence_endings=False, + break_long_words=True, + drop_whitespace=True, + break_on_hyphens=True, + tabsize=8, + *, + max_lines=None, + placeholder=' [...]'): + self.width = width + self.initial_indent = initial_indent + self.subsequent_indent = subsequent_indent + self.expand_tabs = expand_tabs + self.replace_whitespace = replace_whitespace + self.fix_sentence_endings = fix_sentence_endings + self.break_long_words = break_long_words + self.drop_whitespace = drop_whitespace + self.break_on_hyphens = break_on_hyphens + self.tabsize = tabsize + self.max_lines = max_lines + self.placeholder = placeholder + + + # -- Private methods ----------------------------------------------- + # (possibly useful for subclasses to override) + + def _munge_whitespace(self, text): + """_munge_whitespace(text : string) -> string + + Munge whitespace in text: expand tabs and convert all other + whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz" + becomes " foo bar baz". + """ + if self.expand_tabs: + text = text.expandtabs(self.tabsize) + if self.replace_whitespace: + text = text.translate(self.unicode_whitespace_trans) + return text + + + def _split(self, text): + """_split(text : string) -> [string] + + Split the text to wrap into indivisible chunks. Chunks are + not quite the same as words; see _wrap_chunks() for full + details. As an example, the text + Look, goof-ball -- use the -b option! + breaks into the following chunks: + 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', 'option!' + if break_on_hyphens is True, or in: + 'Look,', ' ', 'goof-ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', option!' + otherwise. + """ + if self.break_on_hyphens is True: + chunks = self.wordsep_re.split(text) + else: + chunks = self.wordsep_simple_re.split(text) + chunks = [c for c in chunks if c] + return chunks + + def _fix_sentence_endings(self, chunks): + """_fix_sentence_endings(chunks : [string]) + + Correct for sentence endings buried in 'chunks'. Eg. when the + original text contains "... foo.\\nBar ...", munge_whitespace() + and split() will convert that to [..., "foo.", " ", "Bar", ...] + which has one too few spaces; this method simply changes the one + space to two. + """ + i = 0 + patsearch = self.sentence_end_re.search + while i < len(chunks)-1: + if chunks[i+1] == " " and patsearch(chunks[i]): + chunks[i+1] = " " + i += 2 + else: + i += 1 + + def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): + """_handle_long_word(chunks : [string], + cur_line : [string], + cur_len : int, width : int) + + Handle a chunk of text (most likely a word, not whitespace) that + is too long to fit in any line. + """ + # Figure out when indent is larger than the specified width, and make + # sure at least one character is stripped off on every pass + if width < 1: + space_left = 1 + else: + space_left = width - cur_len + + # If we're allowed to break long words, then do so: put as much + # of the next chunk onto the current line as will fit. + if self.break_long_words and space_left > 0: + end = space_left + chunk = reversed_chunks[-1] + if self.break_on_hyphens and len(chunk) > space_left: + # break after last hyphen, but only if there are + # non-hyphens before it + hyphen = chunk.rfind('-', 0, space_left) + if hyphen > 0 and any(c != '-' for c in chunk[:hyphen]): + end = hyphen + 1 + cur_line.append(chunk[:end]) + reversed_chunks[-1] = chunk[end:] + + # Otherwise, we have to preserve the long word intact. Only add + # it to the current line if there's nothing already there -- + # that minimizes how much we violate the width constraint. + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + # If we're not allowed to break long words, and there's already + # text on the current line, do nothing. Next time through the + # main loop of _wrap_chunks(), we'll wind up here again, but + # cur_len will be zero, so the next line will be entirely + # devoted to the long word that we can't handle right now. + + def _wrap_chunks(self, chunks): + """_wrap_chunks(chunks : [string]) -> [string] + + Wrap a sequence of text chunks and return a list of lines of + length 'self.width' or less. (If 'break_long_words' is false, + some lines may be longer than this.) Chunks correspond roughly + to words and the whitespace between them: each chunk is + indivisible (modulo 'break_long_words'), but a line break can + come between any two chunks. Chunks should not have internal + whitespace; ie. a chunk is either all whitespace or a "word". + Whitespace chunks will be removed from the beginning and end of + lines, but apart from that whitespace is preserved. + """ + lines = [] + if self.width <= 0: + raise ValueError("invalid width %r (must be > 0)" % self.width) + if self.max_lines is not None: + if self.max_lines > 1: + indent = self.subsequent_indent + else: + indent = self.initial_indent + if len(indent) + len(self.placeholder.lstrip()) > self.width: + raise ValueError("placeholder too large for max width") + + # Arrange in reverse order so items can be efficiently popped + # from a stack of chucks. + chunks.reverse() + + while chunks: + + # Start the list of chunks that will make up the current line. + # cur_len is just the length of all the chunks in cur_line. + cur_line = [] + cur_len = 0 + + # Figure out which static string will prefix this line. + if lines: + indent = self.subsequent_indent + else: + indent = self.initial_indent + + # Maximum width for this line. + width = self.width - len(indent) + + # First chunk on line is whitespace -- drop it, unless this + # is the very beginning of the text (ie. no lines started yet). + if self.drop_whitespace and chunks[-1].strip() == '' and lines: + del chunks[-1] + + while chunks: + l = len(chunks[-1]) + + # Can at least squeeze this chunk onto the current line. + if cur_len + l <= width: + cur_line.append(chunks.pop()) + cur_len += l + + # Nope, this line is full. + else: + break + + # The current line is full, and the next chunk is too big to + # fit on *any* line (not just this one). + if chunks and len(chunks[-1]) > width: + self._handle_long_word(chunks, cur_line, cur_len, width) + cur_len = sum(map(len, cur_line)) + + # If the last chunk on this line is all whitespace, drop it. + if self.drop_whitespace and cur_line and cur_line[-1].strip() == '': + cur_len -= len(cur_line[-1]) + del cur_line[-1] + + if cur_line: + if (self.max_lines is None or + len(lines) + 1 < self.max_lines or + (not chunks or + self.drop_whitespace and + len(chunks) == 1 and + not chunks[0].strip()) and cur_len <= width): + # Convert current line back to a string and store it in + # list of all lines (return value). + lines.append(indent + ''.join(cur_line)) + else: + while cur_line: + if (cur_line[-1].strip() and + cur_len + len(self.placeholder) <= width): + cur_line.append(self.placeholder) + lines.append(indent + ''.join(cur_line)) + break + cur_len -= len(cur_line[-1]) + del cur_line[-1] + else: + if lines: + prev_line = lines[-1].rstrip() + if (len(prev_line) + len(self.placeholder) <= + self.width): + lines[-1] = prev_line + self.placeholder + break + lines.append(indent + self.placeholder.lstrip()) + break + + return lines + + def _split_chunks(self, text): + text = self._munge_whitespace(text) + return self._split(text) + + # -- Public interface ---------------------------------------------- + + def wrap(self, text): + """wrap(text : string) -> [string] + + Reformat the single paragraph in 'text' so it fits in lines of + no more than 'self.width' columns, and return a list of wrapped + lines. Tabs in 'text' are expanded with string.expandtabs(), + and all other whitespace characters (including newline) are + converted to space. + """ + chunks = self._split_chunks(text) + if self.fix_sentence_endings: + self._fix_sentence_endings(chunks) + return self._wrap_chunks(chunks) + + def fill(self, text): + """fill(text : string) -> string + + Reformat the single paragraph in 'text' to fit in lines of no + more than 'self.width' columns, and return a new string + containing the entire wrapped paragraph. + """ + return "\n".join(self.wrap(text)) + + +# -- Convenience interface --------------------------------------------- + +def wrap(text, width=70, **kwargs): + """Wrap a single paragraph of text, returning a list of wrapped lines. + + Reformat the single paragraph in 'text' so it fits in lines of no + more than 'width' columns, and return a list of wrapped lines. By + default, tabs in 'text' are expanded with string.expandtabs(), and + all other whitespace characters (including newline) are converted to + space. See TextWrapper class for available keyword args to customize + wrapping behaviour. + """ + w = TextWrapper(width=width, **kwargs) + return w.wrap(text) + +def fill(text, width=70, **kwargs): + """Fill a single paragraph of text, returning a new string. + + Reformat the single paragraph in 'text' to fit in lines of no more + than 'width' columns, and return a new string containing the entire + wrapped paragraph. As with wrap(), tabs are expanded and other + whitespace characters converted to space. See TextWrapper class for + available keyword args to customize wrapping behaviour. + """ + w = TextWrapper(width=width, **kwargs) + return w.fill(text) + +def shorten(text, width, **kwargs): + """Collapse and truncate the given text to fit in the given width. + + The text first has its whitespace collapsed. If it then fits in + the *width*, it is returned as is. Otherwise, as many words + as possible are joined and then the placeholder is appended:: + + >>> textwrap.shorten("Hello world!", width=12) + 'Hello world!' + >>> textwrap.shorten("Hello world!", width=11) + 'Hello [...]' + """ + w = TextWrapper(width=width, max_lines=1, **kwargs) + return w.fill(' '.join(text.strip().split())) + + +# -- Loosely related functionality ------------------------------------- + +def dedent(text): + """Remove any common leading whitespace from every line in `text`. + + This can be used to make triple-quoted strings line up with the left + edge of the display, while still presenting them in the source code + in indented form. + + Note that tabs and spaces are both treated as whitespace, but they + are not equal: the lines " hello" and "\\thello" are + considered to have no common leading whitespace. + + Entirely blank lines are normalized to a newline character. + """ + try: + lines = text.split('\n') + except (AttributeError, TypeError): + msg = f'expected str object, not {type(text).__qualname__!r}' + raise TypeError(msg) from None + + # Get length of leading whitespace, inspired by ``os.path.commonprefix()``. + non_blank_lines = [l for l in lines if l and not l.isspace()] + l1 = min(non_blank_lines, default='') + l2 = max(non_blank_lines, default='') + margin = 0 + for margin, c in enumerate(l1): + if c != l2[margin] or c not in ' \t': + break + + return '\n'.join([l[margin:] if not l.isspace() else '' for l in lines]) + + +def indent(text, prefix, predicate=None): + """Adds 'prefix' to the beginning of selected lines in 'text'. + + If 'predicate' is provided, 'prefix' will only be added to the lines + where 'predicate(line)' is True. If 'predicate' is not provided, + it will default to adding 'prefix' to all non-empty lines that do not + consist solely of whitespace characters. + """ + prefixed_lines = [] + if predicate is None: + # str.splitlines(keepends=True) doesn't produce the empty string, + # so we need to use `str.isspace()` rather than a truth test. + # Inlining the predicate leads to a ~30% performance improvement. + for line in text.splitlines(True): + if not line.isspace(): + prefixed_lines.append(prefix) + prefixed_lines.append(line) + else: + for line in text.splitlines(True): + if predicate(line): + prefixed_lines.append(prefix) + prefixed_lines.append(line) + return ''.join(prefixed_lines) + + +if __name__ == "__main__": + #print dedent("\tfoo\n\tbar") + #print dedent(" \thello there\n \t how are you?") + print(dedent("Hello there.\n This is indented.")) diff --git a/Python313_13_x86_Template/Lib/this.py b/Python314_4_x86_Template/Lib/this.py similarity index 100% rename from Python313_13_x86_Template/Lib/this.py rename to Python314_4_x86_Template/Lib/this.py diff --git a/Python314_4_x86_Template/Lib/threading.py b/Python314_4_x86_Template/Lib/threading.py new file mode 100644 index 00000000..c03b0b53 --- /dev/null +++ b/Python314_4_x86_Template/Lib/threading.py @@ -0,0 +1,1642 @@ +"""Thread module emulating a subset of Java's threading model.""" + +import os as _os +import sys as _sys +import _thread +import _contextvars + +from time import monotonic as _time +from _weakrefset import WeakSet +from itertools import count as _count +try: + from _collections import deque as _deque +except ImportError: + from collections import deque as _deque + +# Note regarding PEP 8 compliant names +# This threading model was originally inspired by Java, and inherited +# the convention of camelCase function and method names from that +# language. Those original names are not in any imminent danger of +# being deprecated (even for Py3k),so this module provides them as an +# alias for the PEP 8 compliant names +# Note that using the new PEP 8 compliant names facilitates substitution +# with the multiprocessing module, which doesn't provide the old +# Java inspired names. + +__all__ = ['get_ident', 'active_count', 'Condition', 'current_thread', + 'enumerate', 'main_thread', 'TIMEOUT_MAX', + 'Event', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', + 'Barrier', 'BrokenBarrierError', 'Timer', 'ThreadError', + 'setprofile', 'settrace', 'local', 'stack_size', + 'excepthook', 'ExceptHookArgs', 'gettrace', 'getprofile', + 'setprofile_all_threads','settrace_all_threads'] + +# Rename some stuff so "from threading import *" is safe +_start_joinable_thread = _thread.start_joinable_thread +_daemon_threads_allowed = _thread.daemon_threads_allowed +_allocate_lock = _thread.allocate_lock +_LockType = _thread.LockType +_thread_shutdown = _thread._shutdown +_make_thread_handle = _thread._make_thread_handle +_ThreadHandle = _thread._ThreadHandle +get_ident = _thread.get_ident +_get_main_thread_ident = _thread._get_main_thread_ident +_is_main_interpreter = _thread._is_main_interpreter +try: + get_native_id = _thread.get_native_id + _HAVE_THREAD_NATIVE_ID = True + __all__.append('get_native_id') +except AttributeError: + _HAVE_THREAD_NATIVE_ID = False +try: + _set_name = _thread.set_name +except AttributeError: + _set_name = None +ThreadError = _thread.error +try: + _CRLock = _thread.RLock +except AttributeError: + _CRLock = None +TIMEOUT_MAX = _thread.TIMEOUT_MAX +del _thread + +# get thread-local implementation, either from the thread +# module, or from the python fallback + +try: + from _thread import _local as local +except ImportError: + from _threading_local import local + +# Support for profile and trace hooks + +_profile_hook = None +_trace_hook = None + +def setprofile(func): + """Set a profile function for all threads started from the threading module. + + The func will be passed to sys.setprofile() for each thread, before its + run() method is called. + """ + global _profile_hook + _profile_hook = func + +def setprofile_all_threads(func): + """Set a profile function for all threads started from the threading module + and all Python threads that are currently executing. + + The func will be passed to sys.setprofile() for each thread, before its + run() method is called. + """ + setprofile(func) + _sys._setprofileallthreads(func) + +def getprofile(): + """Get the profiler function as set by threading.setprofile().""" + return _profile_hook + +def settrace(func): + """Set a trace function for all threads started from the threading module. + + The func will be passed to sys.settrace() for each thread, before its run() + method is called. + """ + global _trace_hook + _trace_hook = func + +def settrace_all_threads(func): + """Set a trace function for all threads started from the threading module + and all Python threads that are currently executing. + + The func will be passed to sys.settrace() for each thread, before its run() + method is called. + """ + settrace(func) + _sys._settraceallthreads(func) + +def gettrace(): + """Get the trace function as set by threading.settrace().""" + return _trace_hook + +# Synchronization classes + +Lock = _LockType + +def RLock(*args, **kwargs): + """Factory function that returns a new reentrant lock. + + A reentrant lock must be released by the thread that acquired it. Once a + thread has acquired a reentrant lock, the same thread may acquire it again + without blocking; the thread must release it once for each time it has + acquired it. + + """ + if args or kwargs: + import warnings + warnings.warn( + 'Passing arguments to RLock is deprecated and will be removed in 3.15', + DeprecationWarning, + stacklevel=2, + ) + if _CRLock is None: + return _PyRLock(*args, **kwargs) + return _CRLock(*args, **kwargs) + +class _RLock: + """This class implements reentrant lock objects. + + A reentrant lock must be released by the thread that acquired it. Once a + thread has acquired a reentrant lock, the same thread may acquire it + again without blocking; the thread must release it once for each time it + has acquired it. + + """ + + def __init__(self): + self._block = _allocate_lock() + self._owner = None + self._count = 0 + + def __repr__(self): + owner = self._owner + try: + owner = _active[owner].name + except KeyError: + pass + return "<%s %s.%s object owner=%r count=%d at %s>" % ( + "locked" if self.locked() else "unlocked", + self.__class__.__module__, + self.__class__.__qualname__, + owner, + self._count, + hex(id(self)) + ) + + def _at_fork_reinit(self): + self._block._at_fork_reinit() + self._owner = None + self._count = 0 + + def acquire(self, blocking=True, timeout=-1): + """Acquire a lock, blocking or non-blocking. + + When invoked without arguments: if this thread already owns the lock, + increment the recursion level by one, and return immediately. Otherwise, + if another thread owns the lock, block until the lock is unlocked. Once + the lock is unlocked (not owned by any thread), then grab ownership, set + the recursion level to one, and return. If more than one thread is + blocked waiting until the lock is unlocked, only one at a time will be + able to grab ownership of the lock. There is no return value in this + case. + + When invoked with the blocking argument set to true, do the same thing + as when called without arguments, and return true. + + When invoked with the blocking argument set to false, do not block. If a + call without an argument would block, return false immediately; + otherwise, do the same thing as when called without arguments, and + return true. + + When invoked with the floating-point timeout argument set to a positive + value, block for at most the number of seconds specified by timeout + and as long as the lock cannot be acquired. Return true if the lock has + been acquired, false if the timeout has elapsed. + + """ + me = get_ident() + if self._owner == me: + self._count += 1 + return 1 + rc = self._block.acquire(blocking, timeout) + if rc: + self._owner = me + self._count = 1 + return rc + + __enter__ = acquire + + def release(self): + """Release a lock, decrementing the recursion level. + + If after the decrement it is zero, reset the lock to unlocked (not owned + by any thread), and if any other threads are blocked waiting for the + lock to become unlocked, allow exactly one of them to proceed. If after + the decrement the recursion level is still nonzero, the lock remains + locked and owned by the calling thread. + + Only call this method when the calling thread owns the lock. A + RuntimeError is raised if this method is called when the lock is + unlocked. + + There is no return value. + + """ + if self._owner != get_ident(): + raise RuntimeError("cannot release un-acquired lock") + self._count = count = self._count - 1 + if not count: + self._owner = None + self._block.release() + + def __exit__(self, t, v, tb): + self.release() + + def locked(self): + """Return whether this object is locked.""" + return self._block.locked() + + # Internal methods used by condition variables + + def _acquire_restore(self, state): + self._block.acquire() + self._count, self._owner = state + + def _release_save(self): + if self._count == 0: + raise RuntimeError("cannot release un-acquired lock") + count = self._count + self._count = 0 + owner = self._owner + self._owner = None + self._block.release() + return (count, owner) + + def _is_owned(self): + return self._owner == get_ident() + + # Internal method used for reentrancy checks + + def _recursion_count(self): + if self._owner != get_ident(): + return 0 + return self._count + +_PyRLock = _RLock + + +class Condition: + """Class that implements a condition variable. + + A condition variable allows one or more threads to wait until they are + notified by another thread. + + If the lock argument is given and not None, it must be a Lock or RLock + object, and it is used as the underlying lock. Otherwise, a new RLock object + is created and used as the underlying lock. + + """ + + def __init__(self, lock=None): + if lock is None: + lock = RLock() + self._lock = lock + # Export the lock's acquire(), release(), and locked() methods + self.acquire = lock.acquire + self.release = lock.release + self.locked = lock.locked + # If the lock defines _release_save() and/or _acquire_restore(), + # these override the default implementations (which just call + # release() and acquire() on the lock). Ditto for _is_owned(). + if hasattr(lock, '_release_save'): + self._release_save = lock._release_save + if hasattr(lock, '_acquire_restore'): + self._acquire_restore = lock._acquire_restore + if hasattr(lock, '_is_owned'): + self._is_owned = lock._is_owned + self._waiters = _deque() + + def _at_fork_reinit(self): + self._lock._at_fork_reinit() + self._waiters.clear() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def __repr__(self): + return "" % (self._lock, len(self._waiters)) + + def _release_save(self): + self._lock.release() # No state to save + + def _acquire_restore(self, x): + self._lock.acquire() # Ignore saved state + + def _is_owned(self): + # Return True if lock is owned by current_thread. + # This method is called only if _lock doesn't have _is_owned(). + if self._lock.acquire(False): + self._lock.release() + return False + else: + return True + + def wait(self, timeout=None): + """Wait until notified or until a timeout occurs. + + If the calling thread has not acquired the lock when this method is + called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks until it is + awakened by a notify() or notify_all() call for the same condition + variable in another thread, or until the optional timeout occurs. Once + awakened or timed out, it re-acquires the lock and returns. + + When the timeout argument is present and not None, it should be a + floating-point number specifying a timeout for the operation in seconds + (or fractions thereof). + + When the underlying lock is an RLock, it is not released using its + release() method, since this may not actually unlock the lock when it + was acquired multiple times recursively. Instead, an internal interface + of the RLock class is used, which really unlocks it even when it has + been recursively acquired several times. Another internal interface is + then used to restore the recursion level when the lock is reacquired. + + """ + if not self._is_owned(): + raise RuntimeError("cannot wait on un-acquired lock") + waiter = _allocate_lock() + waiter.acquire() + self._waiters.append(waiter) + saved_state = self._release_save() + gotit = False + try: # restore state no matter what (e.g., KeyboardInterrupt) + if timeout is None: + waiter.acquire() + gotit = True + else: + if timeout > 0: + gotit = waiter.acquire(True, timeout) + else: + gotit = waiter.acquire(False) + return gotit + finally: + self._acquire_restore(saved_state) + if not gotit: + try: + self._waiters.remove(waiter) + except ValueError: + pass + + def wait_for(self, predicate, timeout=None): + """Wait until a condition evaluates to True. + + predicate should be a callable which result will be interpreted as a + boolean value. A timeout may be provided giving the maximum time to + wait. + + """ + endtime = None + waittime = timeout + result = predicate() + while not result: + if waittime is not None: + if endtime is None: + endtime = _time() + waittime + else: + waittime = endtime - _time() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + + def notify(self, n=1): + """Wake up one or more threads waiting on this condition, if any. + + If the calling thread has not acquired the lock when this method is + called, a RuntimeError is raised. + + This method wakes up at most n of the threads waiting for the condition + variable; it is a no-op if no threads are waiting. + + """ + if not self._is_owned(): + raise RuntimeError("cannot notify on un-acquired lock") + waiters = self._waiters + while waiters and n > 0: + waiter = waiters[0] + try: + waiter.release() + except RuntimeError: + # gh-92530: The previous call of notify() released the lock, + # but was interrupted before removing it from the queue. + # It can happen if a signal handler raises an exception, + # like CTRL+C which raises KeyboardInterrupt. + pass + else: + n -= 1 + try: + waiters.remove(waiter) + except ValueError: + pass + + def notify_all(self): + """Wake up all threads waiting on this condition. + + If the calling thread has not acquired the lock when this method + is called, a RuntimeError is raised. + + """ + self.notify(len(self._waiters)) + + def notifyAll(self): + """Wake up all threads waiting on this condition. + + This method is deprecated, use notify_all() instead. + + """ + import warnings + warnings.warn('notifyAll() is deprecated, use notify_all() instead', + DeprecationWarning, stacklevel=2) + self.notify_all() + + +class Semaphore: + """This class implements semaphore objects. + + Semaphores manage a counter representing the number of release() calls minus + the number of acquire() calls, plus an initial value. The acquire() method + blocks if necessary until it can return without making the counter + negative. If not given, value defaults to 1. + + """ + + # After Tim Peters' semaphore class, but not quite the same (no maximum) + + def __init__(self, value=1): + if value < 0: + raise ValueError("semaphore initial value must be >= 0") + self._cond = Condition(Lock()) + self._value = value + + def __repr__(self): + cls = self.__class__ + return (f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}:" + f" value={self._value}>") + + def acquire(self, blocking=True, timeout=None): + """Acquire a semaphore, decrementing the internal counter by one. + + When invoked without arguments: if the internal counter is larger than + zero on entry, decrement it by one and return immediately. If it is zero + on entry, block, waiting until some other thread has called release() to + make it larger than zero. This is done with proper interlocking so that + if multiple acquire() calls are blocked, release() will wake exactly one + of them up. The implementation may pick one at random, so the order in + which blocked threads are awakened should not be relied on. There is no + return value in this case. + + When invoked with blocking set to true, do the same thing as when called + without arguments, and return true. + + When invoked with blocking set to false, do not block. If a call without + an argument would block, return false immediately; otherwise, do the + same thing as when called without arguments, and return true. + + When invoked with a timeout other than None, it will block for at + most timeout seconds. If acquire does not complete successfully in + that interval, return false. Return true otherwise. + + """ + if not blocking and timeout is not None: + raise ValueError("can't specify timeout for non-blocking acquire") + rc = False + endtime = None + with self._cond: + while self._value == 0: + if not blocking: + break + if timeout is not None: + if endtime is None: + endtime = _time() + timeout + else: + timeout = endtime - _time() + if timeout <= 0: + break + self._cond.wait(timeout) + else: + self._value -= 1 + rc = True + return rc + + __enter__ = acquire + + def release(self, n=1): + """Release a semaphore, incrementing the internal counter by one or more. + + When the counter is zero on entry and another thread is waiting for it + to become larger than zero again, wake up that thread. + + """ + if n < 1: + raise ValueError('n must be one or more') + with self._cond: + self._value += n + self._cond.notify(n) + + def __exit__(self, t, v, tb): + self.release() + + +class BoundedSemaphore(Semaphore): + """Implements a bounded semaphore. + + A bounded semaphore checks to make sure its current value doesn't exceed its + initial value. If it does, ValueError is raised. In most situations + semaphores are used to guard resources with limited capacity. + + If the semaphore is released too many times it's a sign of a bug. If not + given, value defaults to 1. + + Like regular semaphores, bounded semaphores manage a counter representing + the number of release() calls minus the number of acquire() calls, plus an + initial value. The acquire() method blocks if necessary until it can return + without making the counter negative. If not given, value defaults to 1. + + """ + + def __init__(self, value=1): + super().__init__(value) + self._initial_value = value + + def __repr__(self): + cls = self.__class__ + return (f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}:" + f" value={self._value}/{self._initial_value}>") + + def release(self, n=1): + """Release a semaphore, incrementing the internal counter by one or more. + + When the counter is zero on entry and another thread is waiting for it + to become larger than zero again, wake up that thread. + + If the number of releases exceeds the number of acquires, + raise a ValueError. + + """ + if n < 1: + raise ValueError('n must be one or more') + with self._cond: + if self._value + n > self._initial_value: + raise ValueError("Semaphore released too many times") + self._value += n + self._cond.notify(n) + + +class Event: + """Class implementing event objects. + + Events manage a flag that can be set to true with the set() method and reset + to false with the clear() method. The wait() method blocks until the flag is + true. The flag is initially false. + + """ + + # After Tim Peters' event class (without is_posted()) + + def __init__(self): + self._cond = Condition(Lock()) + self._flag = False + + def __repr__(self): + cls = self.__class__ + status = 'set' if self._flag else 'unset' + return f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}: {status}>" + + def _at_fork_reinit(self): + # Private method called by Thread._after_fork() + self._cond._at_fork_reinit() + + def is_set(self): + """Return true if and only if the internal flag is true.""" + return self._flag + + def isSet(self): + """Return true if and only if the internal flag is true. + + This method is deprecated, use is_set() instead. + + """ + import warnings + warnings.warn('isSet() is deprecated, use is_set() instead', + DeprecationWarning, stacklevel=2) + return self.is_set() + + def set(self): + """Set the internal flag to true. + + All threads waiting for it to become true are awakened. Threads + that call wait() once the flag is true will not block at all. + + """ + with self._cond: + self._flag = True + self._cond.notify_all() + + def clear(self): + """Reset the internal flag to false. + + Subsequently, threads calling wait() will block until set() is called to + set the internal flag to true again. + + """ + with self._cond: + self._flag = False + + def wait(self, timeout=None): + """Block until the internal flag is true. + + If the internal flag is true on entry, return immediately. Otherwise, + block until another thread calls set() to set the flag to true, or until + the optional timeout occurs. + + When the timeout argument is present and not None, it should be a + floating-point number specifying a timeout for the operation in seconds + (or fractions thereof). + + This method returns the internal flag on exit, so it will always return + ``True`` except if a timeout is given and the operation times out, when + it will return ``False``. + + """ + with self._cond: + signaled = self._flag + if not signaled: + signaled = self._cond.wait(timeout) + return signaled + + +# A barrier class. Inspired in part by the pthread_barrier_* api and +# the CyclicBarrier class from Java. See +# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and +# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/ +# CyclicBarrier.html +# for information. +# We maintain two main states, 'filling' and 'draining' enabling the barrier +# to be cyclic. Threads are not allowed into it until it has fully drained +# since the previous cycle. In addition, a 'resetting' state exists which is +# similar to 'draining' except that threads leave with a BrokenBarrierError, +# and a 'broken' state in which all threads get the exception. +class Barrier: + """Implements a Barrier. + + Useful for synchronizing a fixed number of threads at known synchronization + points. Threads block on 'wait()' and are simultaneously awoken once they + have all made that call. + + """ + + def __init__(self, parties, action=None, timeout=None): + """Create a barrier, initialised to 'parties' threads. + + 'action' is a callable which, when supplied, will be called by one of + the threads after they have all entered the barrier and just prior to + releasing them all. If a 'timeout' is provided, it is used as the + default for all subsequent 'wait()' calls. + + """ + if parties < 1: + raise ValueError("parties must be >= 1") + self._cond = Condition(Lock()) + self._action = action + self._timeout = timeout + self._parties = parties + self._state = 0 # 0 filling, 1 draining, -1 resetting, -2 broken + self._count = 0 + + def __repr__(self): + cls = self.__class__ + if self.broken: + return f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}: broken>" + return (f"<{cls.__module__}.{cls.__qualname__} at {id(self):#x}:" + f" waiters={self.n_waiting}/{self.parties}>") + + def wait(self, timeout=None): + """Wait for the barrier. + + When the specified number of threads have started waiting, they are all + simultaneously awoken. If an 'action' was provided for the barrier, one + of the threads will have executed that callback prior to returning. + Returns an individual index number from 0 to 'parties-1'. + + """ + if timeout is None: + timeout = self._timeout + with self._cond: + self._enter() # Block while the barrier drains. + index = self._count + self._count += 1 + try: + if index + 1 == self._parties: + # We release the barrier + self._release() + else: + # We wait until someone releases us + self._wait(timeout) + return index + finally: + self._count -= 1 + # Wake up any threads waiting for barrier to drain. + self._exit() + + # Block until the barrier is ready for us, or raise an exception + # if it is broken. + def _enter(self): + while self._state in (-1, 1): + # It is draining or resetting, wait until done + self._cond.wait() + #see if the barrier is in a broken state + if self._state < 0: + raise BrokenBarrierError + assert self._state == 0 + + # Optionally run the 'action' and release the threads waiting + # in the barrier. + def _release(self): + try: + if self._action: + self._action() + # enter draining state + self._state = 1 + self._cond.notify_all() + except: + #an exception during the _action handler. Break and reraise + self._break() + raise + + # Wait in the barrier until we are released. Raise an exception + # if the barrier is reset or broken. + def _wait(self, timeout): + if not self._cond.wait_for(lambda : self._state != 0, timeout): + #timed out. Break the barrier + self._break() + raise BrokenBarrierError + if self._state < 0: + raise BrokenBarrierError + assert self._state == 1 + + # If we are the last thread to exit the barrier, signal any threads + # waiting for the barrier to drain. + def _exit(self): + if self._count == 0: + if self._state in (-1, 1): + #resetting or draining + self._state = 0 + self._cond.notify_all() + + def reset(self): + """Reset the barrier to the initial state. + + Any threads currently waiting will get the BrokenBarrier exception + raised. + + """ + with self._cond: + if self._count > 0: + if self._state == 0: + #reset the barrier, waking up threads + self._state = -1 + elif self._state == -2: + #was broken, set it to reset state + #which clears when the last thread exits + self._state = -1 + else: + self._state = 0 + self._cond.notify_all() + + def abort(self): + """Place the barrier into a 'broken' state. + + Useful in case of error. Any currently waiting threads and threads + attempting to 'wait()' will have BrokenBarrierError raised. + + """ + with self._cond: + self._break() + + def _break(self): + # An internal error was detected. The barrier is set to + # a broken state all parties awakened. + self._state = -2 + self._cond.notify_all() + + @property + def parties(self): + """Return the number of threads required to trip the barrier.""" + return self._parties + + @property + def n_waiting(self): + """Return the number of threads currently waiting at the barrier.""" + # We don't need synchronization here since this is an ephemeral result + # anyway. It returns the correct value in the steady state. + if self._state == 0: + return self._count + return 0 + + @property + def broken(self): + """Return True if the barrier is in a broken state.""" + return self._state == -2 + +# exception raised by the Barrier class +class BrokenBarrierError(RuntimeError): + pass + + +# Helper to generate new thread names +_counter = _count(1).__next__ +def _newname(name_template): + return name_template % _counter() + +# Active thread administration. +# +# bpo-44422: Use a reentrant lock to allow reentrant calls to functions like +# threading.enumerate(). +_active_limbo_lock = RLock() +_active = {} # maps thread id to Thread object +_limbo = {} +_dangling = WeakSet() + + +# Main class for threads + +class Thread: + """A class that represents a thread of control. + + This class can be safely subclassed in a limited fashion. There are two ways + to specify the activity: by passing a callable object to the constructor, or + by overriding the run() method in a subclass. + + """ + + _initialized = False + + def __init__(self, group=None, target=None, name=None, + args=(), kwargs=None, *, daemon=None, context=None): + """This constructor should always be called with keyword arguments. Arguments are: + + *group* should be None; reserved for future extension when a ThreadGroup + class is implemented. + + *target* is the callable object to be invoked by the run() + method. Defaults to None, meaning nothing is called. + + *name* is the thread name. By default, a unique name is constructed of + the form "Thread-N" where N is a small decimal number. + + *args* is a list or tuple of arguments for the target invocation. Defaults to (). + + *kwargs* is a dictionary of keyword arguments for the target + invocation. Defaults to {}. + + *context* is the contextvars.Context value to use for the thread. + The default value is None, which means to check + sys.flags.thread_inherit_context. If that flag is true, use a copy + of the context of the caller. If false, use an empty context. To + explicitly start with an empty context, pass a new instance of + contextvars.Context(). To explicitly start with a copy of the current + context, pass the value from contextvars.copy_context(). + + If a subclass overrides the constructor, it must make sure to invoke + the base class constructor (Thread.__init__()) before doing anything + else to the thread. + + """ + assert group is None, "group argument must be None for now" + if kwargs is None: + kwargs = {} + if name: + name = str(name) + else: + name = _newname("Thread-%d") + if target is not None: + try: + target_name = target.__name__ + name += f" ({target_name})" + except AttributeError: + pass + + self._target = target + self._name = name + self._args = args + self._kwargs = kwargs + if daemon is not None: + if daemon and not _daemon_threads_allowed(): + raise RuntimeError('daemon threads are disabled in this (sub)interpreter') + self._daemonic = daemon + else: + self._daemonic = current_thread().daemon + self._context = context + self._ident = None + if _HAVE_THREAD_NATIVE_ID: + self._native_id = None + self._os_thread_handle = _ThreadHandle() + self._started = Event() + self._initialized = True + # Copy of sys.stderr used by self._invoke_excepthook() + self._stderr = _sys.stderr + self._invoke_excepthook = _make_invoke_excepthook() + # For debugging and _after_fork() + _dangling.add(self) + + def _after_fork(self, new_ident=None): + # Private! Called by threading._after_fork(). + self._started._at_fork_reinit() + if new_ident is not None: + # This thread is alive. + self._ident = new_ident + assert self._os_thread_handle.ident == new_ident + if _HAVE_THREAD_NATIVE_ID: + self._set_native_id() + else: + # Otherwise, the thread is dead, Jim. _PyThread_AfterFork() + # already marked our handle done. + pass + + def __repr__(self): + assert self._initialized, "Thread.__init__() was not called" + status = "initial" + if self._started.is_set(): + status = "started" + if self._os_thread_handle.is_done(): + status = "stopped" + if self._daemonic: + status += " daemon" + if self._ident is not None: + status += " %s" % self._ident + return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status) + + def start(self): + """Start the thread's activity. + + It must be called at most once per thread object. It arranges for the + object's run() method to be invoked in a separate thread of control. + + This method will raise a RuntimeError if called more than once on the + same thread object. + + """ + if not self._initialized: + raise RuntimeError("thread.__init__() not called") + + if self._started.is_set(): + raise RuntimeError("threads can only be started once") + + with _active_limbo_lock: + _limbo[self] = self + + if self._context is None: + # No context provided + if _sys.flags.thread_inherit_context: + # start with a copy of the context of the caller + self._context = _contextvars.copy_context() + else: + # start with an empty context + self._context = _contextvars.Context() + + try: + # Start joinable thread + _start_joinable_thread(self._bootstrap, handle=self._os_thread_handle, + daemon=self.daemon) + except Exception: + with _active_limbo_lock: + del _limbo[self] + raise + self._started.wait() # Will set ident and native_id + + def run(self): + """Method representing the thread's activity. + + You may override this method in a subclass. The standard run() method + invokes the callable object passed to the object's constructor as the + target argument, if any, with sequential and keyword arguments taken + from the args and kwargs arguments, respectively. + + """ + try: + if self._target is not None: + self._target(*self._args, **self._kwargs) + finally: + # Avoid a refcycle if the thread is running a function with + # an argument that has a member that points to the thread. + del self._target, self._args, self._kwargs + + def _bootstrap(self): + # Wrapper around the real bootstrap code that ignores + # exceptions during interpreter cleanup. Those typically + # happen when a daemon thread wakes up at an unfortunate + # moment, finds the world around it destroyed, and raises some + # random exception *** while trying to report the exception in + # _bootstrap_inner() below ***. Those random exceptions + # don't help anybody, and they confuse users, so we suppress + # them. We suppress them only when it appears that the world + # indeed has already been destroyed, so that exceptions in + # _bootstrap_inner() during normal business hours are properly + # reported. Also, we only suppress them for daemonic threads; + # if a non-daemonic encounters this, something else is wrong. + try: + self._bootstrap_inner() + except: + if self._daemonic and _sys is None: + return + raise + + def _set_ident(self): + self._ident = get_ident() + + if _HAVE_THREAD_NATIVE_ID: + def _set_native_id(self): + self._native_id = get_native_id() + + def _set_os_name(self): + if _set_name is None or not self._name: + return + try: + _set_name(self._name) + except OSError: + pass + + def _bootstrap_inner(self): + try: + self._set_ident() + if _HAVE_THREAD_NATIVE_ID: + self._set_native_id() + self._set_os_name() + self._started.set() + with _active_limbo_lock: + _active[self._ident] = self + del _limbo[self] + + if _trace_hook: + _sys.settrace(_trace_hook) + if _profile_hook: + _sys.setprofile(_profile_hook) + + try: + self._context.run(self.run) + except: + self._invoke_excepthook(self) + finally: + self._delete() + + def _delete(self): + "Remove current thread from the dict of currently running threads." + with _active_limbo_lock: + del _active[get_ident()] + # There must not be any python code between the previous line + # and after the lock is released. Otherwise a tracing function + # could try to acquire the lock again in the same thread, (in + # current_thread()), and would block. + + def join(self, timeout=None): + """Wait until the thread terminates. + + This blocks the calling thread until the thread whose join() method is + called terminates -- either normally or through an unhandled exception + or until the optional timeout occurs. + + When the timeout argument is present and not None, it should be a + floating-point number specifying a timeout for the operation in seconds + (or fractions thereof). As join() always returns None, you must call + is_alive() after join() to decide whether a timeout happened -- if the + thread is still alive, the join() call timed out. + + When the timeout argument is not present or None, the operation will + block until the thread terminates. + + A thread can be join()ed many times. + + join() raises a RuntimeError if an attempt is made to join the current + thread as that would cause a deadlock. It is also an error to join() a + thread before it has been started and attempts to do so raises the same + exception. + + """ + if not self._initialized: + raise RuntimeError("Thread.__init__() not called") + if not self._started.is_set(): + raise RuntimeError("cannot join thread before it is started") + if self is current_thread(): + raise RuntimeError("cannot join current thread") + + # the behavior of a negative timeout isn't documented, but + # historically .join(timeout=x) for x<0 has acted as if timeout=0 + if timeout is not None: + timeout = max(timeout, 0) + + self._os_thread_handle.join(timeout) + + @property + def name(self): + """A string used for identification purposes only. + + It has no semantics. Multiple threads may be given the same name. The + initial name is set by the constructor. + + """ + assert self._initialized, "Thread.__init__() not called" + return self._name + + @name.setter + def name(self, name): + assert self._initialized, "Thread.__init__() not called" + self._name = str(name) + if get_ident() == self._ident: + self._set_os_name() + + @property + def ident(self): + """Thread identifier of this thread or None if it has not been started. + + This is a nonzero integer. See the get_ident() function. Thread + identifiers may be recycled when a thread exits and another thread is + created. The identifier is available even after the thread has exited. + + """ + assert self._initialized, "Thread.__init__() not called" + return self._ident + + if _HAVE_THREAD_NATIVE_ID: + @property + def native_id(self): + """Native integral thread ID of this thread, or None if it has not been started. + + This is a non-negative integer. See the get_native_id() function. + This represents the Thread ID as reported by the kernel. + + """ + assert self._initialized, "Thread.__init__() not called" + return self._native_id + + def is_alive(self): + """Return whether the thread is alive. + + This method returns True just before the run() method starts until just + after the run() method terminates. See also the module function + enumerate(). + + """ + assert self._initialized, "Thread.__init__() not called" + return self._started.is_set() and not self._os_thread_handle.is_done() + + @property + def daemon(self): + """A boolean value indicating whether this thread is a daemon thread. + + This must be set before start() is called, otherwise RuntimeError is + raised. Its initial value is inherited from the creating thread; the + main thread is not a daemon thread and therefore all threads created in + the main thread default to daemon = False. + + The entire Python program exits when only daemon threads are left. + + """ + assert self._initialized, "Thread.__init__() not called" + return self._daemonic + + @daemon.setter + def daemon(self, daemonic): + if not self._initialized: + raise RuntimeError("Thread.__init__() not called") + if daemonic and not _daemon_threads_allowed(): + raise RuntimeError('daemon threads are disabled in this interpreter') + if self._started.is_set(): + raise RuntimeError("cannot set daemon status of active thread") + self._daemonic = daemonic + + def isDaemon(self): + """Return whether this thread is a daemon. + + This method is deprecated, use the daemon attribute instead. + + """ + import warnings + warnings.warn('isDaemon() is deprecated, get the daemon attribute instead', + DeprecationWarning, stacklevel=2) + return self.daemon + + def setDaemon(self, daemonic): + """Set whether this thread is a daemon. + + This method is deprecated, use the .daemon property instead. + + """ + import warnings + warnings.warn('setDaemon() is deprecated, set the daemon attribute instead', + DeprecationWarning, stacklevel=2) + self.daemon = daemonic + + def getName(self): + """Return a string used for identification purposes only. + + This method is deprecated, use the name attribute instead. + + """ + import warnings + warnings.warn('getName() is deprecated, get the name attribute instead', + DeprecationWarning, stacklevel=2) + return self.name + + def setName(self, name): + """Set the name string for this thread. + + This method is deprecated, use the name attribute instead. + + """ + import warnings + warnings.warn('setName() is deprecated, set the name attribute instead', + DeprecationWarning, stacklevel=2) + self.name = name + + +try: + from _thread import (_excepthook as excepthook, + _ExceptHookArgs as ExceptHookArgs) +except ImportError: + # Simple Python implementation if _thread._excepthook() is not available + from traceback import print_exception as _print_exception + from collections import namedtuple + + _ExceptHookArgs = namedtuple( + 'ExceptHookArgs', + 'exc_type exc_value exc_traceback thread') + + def ExceptHookArgs(args): + return _ExceptHookArgs(*args) + + def excepthook(args, /): + """ + Handle uncaught Thread.run() exception. + """ + if args.exc_type == SystemExit: + # silently ignore SystemExit + return + + if _sys is not None and _sys.stderr is not None: + stderr = _sys.stderr + elif args.thread is not None: + stderr = args.thread._stderr + if stderr is None: + # do nothing if sys.stderr is None and sys.stderr was None + # when the thread was created + return + else: + # do nothing if sys.stderr is None and args.thread is None + return + + if args.thread is not None: + name = args.thread.name + else: + name = get_ident() + print(f"Exception in thread {name}:", + file=stderr, flush=True) + _print_exception(args.exc_type, args.exc_value, args.exc_traceback, + file=stderr) + stderr.flush() + + +# Original value of threading.excepthook +__excepthook__ = excepthook + + +def _make_invoke_excepthook(): + # Create a local namespace to ensure that variables remain alive + # when _invoke_excepthook() is called, even if it is called late during + # Python shutdown. It is mostly needed for daemon threads. + + old_excepthook = excepthook + old_sys_excepthook = _sys.excepthook + if old_excepthook is None: + raise RuntimeError("threading.excepthook is None") + if old_sys_excepthook is None: + raise RuntimeError("sys.excepthook is None") + + sys_exc_info = _sys.exc_info + local_print = print + local_sys = _sys + + def invoke_excepthook(thread): + global excepthook + try: + hook = excepthook + if hook is None: + hook = old_excepthook + + args = ExceptHookArgs([*sys_exc_info(), thread]) + + hook(args) + except Exception as exc: + exc.__suppress_context__ = True + del exc + + if local_sys is not None and local_sys.stderr is not None: + stderr = local_sys.stderr + else: + stderr = thread._stderr + + local_print("Exception in threading.excepthook:", + file=stderr, flush=True) + + if local_sys is not None and local_sys.excepthook is not None: + sys_excepthook = local_sys.excepthook + else: + sys_excepthook = old_sys_excepthook + + sys_excepthook(*sys_exc_info()) + finally: + # Break reference cycle (exception stored in a variable) + args = None + + return invoke_excepthook + + +# The timer class was contributed by Itamar Shtull-Trauring + +class Timer(Thread): + """Call a function after a specified number of seconds: + + t = Timer(30.0, f, args=None, kwargs=None) + t.start() + t.cancel() # stop the timer's action if it's still waiting + + """ + + def __init__(self, interval, function, args=None, kwargs=None): + Thread.__init__(self) + self.interval = interval + self.function = function + self.args = args if args is not None else [] + self.kwargs = kwargs if kwargs is not None else {} + self.finished = Event() + + def cancel(self): + """Stop the timer if it hasn't finished yet.""" + self.finished.set() + + def run(self): + self.finished.wait(self.interval) + if not self.finished.is_set(): + self.function(*self.args, **self.kwargs) + self.finished.set() + + +# Special thread class to represent the main thread + +class _MainThread(Thread): + + def __init__(self): + Thread.__init__(self, name="MainThread", daemon=False) + self._started.set() + self._ident = _get_main_thread_ident() + self._os_thread_handle = _make_thread_handle(self._ident) + if _HAVE_THREAD_NATIVE_ID: + self._set_native_id() + with _active_limbo_lock: + _active[self._ident] = self + + +# Helper thread-local instance to detect when a _DummyThread +# is collected. Not a part of the public API. +_thread_local_info = local() + + +class _DeleteDummyThreadOnDel: + ''' + Helper class to remove a dummy thread from threading._active on __del__. + ''' + + def __init__(self, dummy_thread): + self._dummy_thread = dummy_thread + self._tident = dummy_thread.ident + # Put the thread on a thread local variable so that when + # the related thread finishes this instance is collected. + # + # Note: no other references to this instance may be created. + # If any client code creates a reference to this instance, + # the related _DummyThread will be kept forever! + _thread_local_info._track_dummy_thread_ref = self + + def __del__(self, _active_limbo_lock=_active_limbo_lock, _active=_active): + with _active_limbo_lock: + if _active.get(self._tident) is self._dummy_thread: + _active.pop(self._tident, None) + + +# Dummy thread class to represent threads not started here. +# These should be added to `_active` and removed automatically +# when they die, although they can't be waited for. +# Their purpose is to return *something* from current_thread(). +# They are marked as daemon threads so we won't wait for them +# when we exit (conform previous semantics). + +class _DummyThread(Thread): + + def __init__(self): + Thread.__init__(self, name=_newname("Dummy-%d"), + daemon=_daemon_threads_allowed()) + self._started.set() + self._set_ident() + self._os_thread_handle = _make_thread_handle(self._ident) + if _HAVE_THREAD_NATIVE_ID: + self._set_native_id() + with _active_limbo_lock: + _active[self._ident] = self + _DeleteDummyThreadOnDel(self) + + def is_alive(self): + if not self._os_thread_handle.is_done() and self._started.is_set(): + return True + raise RuntimeError("thread is not alive") + + def join(self, timeout=None): + raise RuntimeError("cannot join a dummy thread") + + def _after_fork(self, new_ident=None): + if new_ident is not None: + self.__class__ = _MainThread + self._name = 'MainThread' + self._daemonic = False + Thread._after_fork(self, new_ident=new_ident) + + +# Global API functions + +def current_thread(): + """Return the current Thread object, corresponding to the caller's thread of control. + + If the caller's thread of control was not created through the threading + module, a dummy thread object with limited functionality is returned. + + """ + try: + return _active[get_ident()] + except KeyError: + return _DummyThread() + +def currentThread(): + """Return the current Thread object, corresponding to the caller's thread of control. + + This function is deprecated, use current_thread() instead. + + """ + import warnings + warnings.warn('currentThread() is deprecated, use current_thread() instead', + DeprecationWarning, stacklevel=2) + return current_thread() + +def active_count(): + """Return the number of Thread objects currently alive. + + The returned count is equal to the length of the list returned by + enumerate(). + + """ + # NOTE: if the logic in here ever changes, update Modules/posixmodule.c + # warn_about_fork_with_threads() to match. + with _active_limbo_lock: + return len(_active) + len(_limbo) + +def activeCount(): + """Return the number of Thread objects currently alive. + + This function is deprecated, use active_count() instead. + + """ + import warnings + warnings.warn('activeCount() is deprecated, use active_count() instead', + DeprecationWarning, stacklevel=2) + return active_count() + +def _enumerate(): + # Same as enumerate(), but without the lock. Internal use only. + return list(_active.values()) + list(_limbo.values()) + +def enumerate(): + """Return a list of all Thread objects currently alive. + + The list includes daemonic threads, dummy thread objects created by + current_thread(), and the main thread. It excludes terminated threads and + threads that have not yet been started. + + """ + with _active_limbo_lock: + return list(_active.values()) + list(_limbo.values()) + + +_threading_atexits = [] +_SHUTTING_DOWN = False + +def _register_atexit(func, *arg, **kwargs): + """CPython internal: register *func* to be called before joining threads. + + The registered *func* is called with its arguments just before all + non-daemon threads are joined in `_shutdown()`. It provides a similar + purpose to `atexit.register()`, but its functions are called prior to + threading shutdown instead of interpreter shutdown. + + For similarity to atexit, the registered functions are called in reverse. + """ + if _SHUTTING_DOWN: + raise RuntimeError("can't register atexit after shutdown") + + _threading_atexits.append(lambda: func(*arg, **kwargs)) + + +from _thread import stack_size + +# Create the main thread object, +# and make it available for the interpreter +# (Py_Main) as threading._shutdown. + +_main_thread = _MainThread() + +def _shutdown(): + """ + Wait until the Python thread state of all non-daemon threads get deleted. + """ + # Obscure: other threads may be waiting to join _main_thread. That's + # dubious, but some code does it. We can't wait for it to be marked as done + # normally - that won't happen until the interpreter is nearly dead. So + # mark it done here. + if _main_thread._os_thread_handle.is_done() and _is_main_interpreter(): + # _shutdown() was already called + return + + global _SHUTTING_DOWN + _SHUTTING_DOWN = True + + # Call registered threading atexit functions before threads are joined. + # Order is reversed, similar to atexit. + for atexit_call in reversed(_threading_atexits): + atexit_call() + + if _is_main_interpreter(): + _main_thread._os_thread_handle._set_done() + + # Wait for all non-daemon threads to exit. + _thread_shutdown() + + +def main_thread(): + """Return the main thread object. + + In normal conditions, the main thread is the thread from which the + Python interpreter was started. + """ + # XXX Figure this out for subinterpreters. (See gh-75698.) + return _main_thread + + +def _after_fork(): + """ + Cleanup threading module state that should not exist after a fork. + """ + # Reset _active_limbo_lock, in case we forked while the lock was held + # by another (non-forked) thread. http://bugs.python.org/issue874900 + global _active_limbo_lock, _main_thread + _active_limbo_lock = RLock() + + # fork() only copied the current thread; clear references to others. + new_active = {} + + try: + current = _active[get_ident()] + except KeyError: + # fork() was called in a thread which was not spawned + # by threading.Thread. For example, a thread spawned + # by thread.start_new_thread(). + current = _MainThread() + + _main_thread = current + + with _active_limbo_lock: + # Dangling thread instances must still have their locks reset, + # because someone may join() them. + threads = set(_enumerate()) + threads.update(_dangling) + for thread in threads: + # Any lock/condition variable may be currently locked or in an + # invalid state, so we reinitialize them. + if thread is current: + # This is the one and only active thread. + ident = get_ident() + thread._after_fork(new_ident=ident) + new_active[ident] = thread + else: + # All the others are already stopped. + thread._after_fork() + + _limbo.clear() + _active.clear() + _active.update(new_active) + assert len(_active) == 1 + + +if hasattr(_os, "register_at_fork"): + _os.register_at_fork(after_in_child=_after_fork) diff --git a/Python314_4_x86_Template/Lib/timeit.py b/Python314_4_x86_Template/Lib/timeit.py new file mode 100644 index 00000000..e767f018 --- /dev/null +++ b/Python314_4_x86_Template/Lib/timeit.py @@ -0,0 +1,378 @@ +"""Tool for measuring execution time of small code snippets. + +This module avoids a number of common traps for measuring execution +times. See also Tim Peters' introduction to the Algorithms chapter in +the Python Cookbook, published by O'Reilly. + +Library usage: see the Timer class. + +Command line usage: + python timeit.py [-n N] [-r N] [-s S] [-p] [-h] [--] [statement] + +Options: + -n/--number N: how many times to execute 'statement' (default: see below) + -r/--repeat N: how many times to repeat the timer (default 5) + -s/--setup S: statement to be executed once initially (default 'pass'). + Execution time of this setup statement is NOT timed. + -p/--process: use time.process_time() (default is time.perf_counter()) + -v/--verbose: print raw timing results; repeat for more digits precision + -u/--unit: set the output time unit (nsec, usec, msec, or sec) + -h/--help: print this usage message and exit + --: separate options from statement, use when statement starts with - + statement: statement to be timed (default 'pass') + +A multi-line statement may be given by specifying each line as a +separate argument; indented lines are possible by enclosing an +argument in quotes and using leading spaces. Multiple -s options are +treated similarly. + +If -n is not given, a suitable number of loops is calculated by trying +increasing numbers from the sequence 1, 2, 5, 10, 20, 50, ... until the +total time is at least 0.2 seconds. + +Note: there is a certain baseline overhead associated with executing a +pass statement. It differs between versions. The code here doesn't try +to hide it, but you should be aware of it. The baseline overhead can be +measured by invoking the program without arguments. + +Classes: + + Timer + +Functions: + + timeit(string, string) -> float + repeat(string, string) -> list + default_timer() -> float +""" + +import gc +import itertools +import sys +import time + +__all__ = ["Timer", "timeit", "repeat", "default_timer"] + +dummy_src_name = "" +default_number = 1000000 +default_repeat = 5 +default_timer = time.perf_counter + +_globals = globals + +# Don't change the indentation of the template; the reindent() calls +# in Timer.__init__() depend on setup being indented 4 spaces and stmt +# being indented 8 spaces. +template = """ +def inner(_it, _timer{init}): + {setup} + _t0 = _timer() + for _i in _it: + {stmt} + pass + _t1 = _timer() + return _t1 - _t0 +""" + + +def reindent(src, indent): + """Helper to reindent a multi-line statement.""" + return src.replace("\n", "\n" + " " * indent) + + +class Timer: + """Class for timing execution speed of small code snippets. + + The constructor takes a statement to be timed, an additional + statement used for setup, and a timer function. Both statements + default to 'pass'; the timer function is platform-dependent (see + module doc string). If 'globals' is specified, the code will be + executed within that namespace (as opposed to inside timeit's + namespace). + + To measure the execution time of the first statement, use the + timeit() method. The repeat() method is a convenience to call + timeit() multiple times and return a list of results. + + The statements may contain newlines, as long as they don't contain + multi-line string literals. + """ + + def __init__(self, stmt="pass", setup="pass", timer=default_timer, + globals=None): + """Constructor. See class doc string.""" + self.timer = timer + local_ns = {} + global_ns = _globals() if globals is None else globals + init = '' + if isinstance(setup, str): + # Check that the code can be compiled outside a function + compile(setup, dummy_src_name, "exec") + stmtprefix = setup + '\n' + setup = reindent(setup, 4) + elif callable(setup): + local_ns['_setup'] = setup + init += ', _setup=_setup' + stmtprefix = '' + setup = '_setup()' + else: + raise ValueError("setup is neither a string nor callable") + if isinstance(stmt, str): + # Check that the code can be compiled outside a function + compile(stmtprefix + stmt, dummy_src_name, "exec") + stmt = reindent(stmt, 8) + elif callable(stmt): + local_ns['_stmt'] = stmt + init += ', _stmt=_stmt' + stmt = '_stmt()' + else: + raise ValueError("stmt is neither a string nor callable") + src = template.format(stmt=stmt, setup=setup, init=init) + self.src = src # Save for traceback display + code = compile(src, dummy_src_name, "exec") + exec(code, global_ns, local_ns) + self.inner = local_ns["inner"] + + def print_exc(self, file=None): + """Helper to print a traceback from the timed code. + + Typical use: + + t = Timer(...) # outside the try/except + try: + t.timeit(...) # or t.repeat(...) + except: + t.print_exc() + + The advantage over the standard traceback is that source lines + in the compiled template will be displayed. + + The optional file argument directs where the traceback is + sent; it defaults to sys.stderr. + """ + import linecache, traceback + if self.src is not None: + linecache.cache[dummy_src_name] = (len(self.src), + None, + self.src.split("\n"), + dummy_src_name) + # else the source is already stored somewhere else + + traceback.print_exc(file=file) + + def timeit(self, number=default_number): + """Time 'number' executions of the main statement. + + To be precise, this executes the setup statement once, and + then returns the time it takes to execute the main statement + a number of times, as float seconds if using the default timer. The + argument is the number of times through the loop, defaulting + to one million. The main statement, the setup statement and + the timer function to be used are passed to the constructor. + """ + it = itertools.repeat(None, number) + gcold = gc.isenabled() + gc.disable() + try: + timing = self.inner(it, self.timer) + finally: + if gcold: + gc.enable() + return timing + + def repeat(self, repeat=default_repeat, number=default_number): + """Call timeit() a few times. + + This is a convenience function that calls the timeit() + repeatedly, returning a list of results. The first argument + specifies how many times to call timeit(), defaulting to 5; + the second argument specifies the timer argument, defaulting + to one million. + + Note: it's tempting to calculate mean and standard deviation + from the result vector and report these. However, this is not + very useful. In a typical case, the lowest value gives a + lower bound for how fast your machine can run the given code + snippet; higher values in the result vector are typically not + caused by variability in Python's speed, but by other + processes interfering with your timing accuracy. So the min() + of the result is probably the only number you should be + interested in. After that, you should look at the entire + vector and apply common sense rather than statistics. + """ + r = [] + for i in range(repeat): + t = self.timeit(number) + r.append(t) + return r + + def autorange(self, callback=None): + """Return the number of loops and time taken so that total time >= 0.2. + + Calls the timeit method with increasing numbers from the sequence + 1, 2, 5, 10, 20, 50, ... until the time taken is at least 0.2 + second. Returns (number, time_taken). + + If *callback* is given and is not None, it will be called after + each trial with two arguments: ``callback(number, time_taken)``. + """ + i = 1 + while True: + for j in 1, 2, 5: + number = i * j + time_taken = self.timeit(number) + if callback: + callback(number, time_taken) + if time_taken >= 0.2: + return (number, time_taken) + i *= 10 + + +def timeit(stmt="pass", setup="pass", timer=default_timer, + number=default_number, globals=None): + """Convenience function to create Timer object and call timeit method.""" + return Timer(stmt, setup, timer, globals).timeit(number) + + +def repeat(stmt="pass", setup="pass", timer=default_timer, + repeat=default_repeat, number=default_number, globals=None): + """Convenience function to create Timer object and call repeat method.""" + return Timer(stmt, setup, timer, globals).repeat(repeat, number) + + +def main(args=None, *, _wrap_timer=None): + """Main program, used when run as a script. + + The optional 'args' argument specifies the command line to be parsed, + defaulting to sys.argv[1:]. + + The return value is an exit code to be passed to sys.exit(); it + may be None to indicate success. + + When an exception happens during timing, a traceback is printed to + stderr and the return value is 1. Exceptions at other times + (including the template compilation) are not caught. + + '_wrap_timer' is an internal interface used for unit testing. If it + is not None, it must be a callable that accepts a timer function + and returns another timer function (used for unit testing). + """ + if args is None: + args = sys.argv[1:] + import getopt + try: + opts, args = getopt.getopt(args, "n:u:s:r:pvh", + ["number=", "setup=", "repeat=", + "process", "verbose", "unit=", "help"]) + except getopt.error as err: + print(err) + print("use -h/--help for command line help") + return 2 + + timer = default_timer + stmt = "\n".join(args) or "pass" + number = 0 # auto-determine + setup = [] + repeat = default_repeat + verbose = 0 + time_unit = None + units = {"nsec": 1e-9, "usec": 1e-6, "msec": 1e-3, "sec": 1.0} + precision = 3 + for o, a in opts: + if o in ("-n", "--number"): + number = int(a) + if o in ("-s", "--setup"): + setup.append(a) + if o in ("-u", "--unit"): + if a in units: + time_unit = a + else: + print("Unrecognized unit. Please select nsec, usec, msec, or sec.", + file=sys.stderr) + return 2 + if o in ("-r", "--repeat"): + repeat = int(a) + if repeat <= 0: + repeat = 1 + if o in ("-p", "--process"): + timer = time.process_time + if o in ("-v", "--verbose"): + if verbose: + precision += 1 + verbose += 1 + if o in ("-h", "--help"): + print(__doc__, end="") + return 0 + setup = "\n".join(setup) or "pass" + + # Include the current directory, so that local imports work (sys.path + # contains the directory of this script, rather than the current + # directory) + import os + sys.path.insert(0, os.curdir) + if _wrap_timer is not None: + timer = _wrap_timer(timer) + + t = Timer(stmt, setup, timer) + if number == 0: + # determine number so that 0.2 <= total time < 2.0 + callback = None + if verbose: + def callback(number, time_taken): + msg = "{num} loop{s} -> {secs:.{prec}g} secs" + plural = (number != 1) + print(msg.format(num=number, s='s' if plural else '', + secs=time_taken, prec=precision)) + try: + number, _ = t.autorange(callback) + except: + t.print_exc() + return 1 + + if verbose: + print() + + try: + raw_timings = t.repeat(repeat, number) + except: + t.print_exc() + return 1 + + def format_time(dt): + unit = time_unit + + if unit is not None: + scale = units[unit] + else: + scales = [(scale, unit) for unit, scale in units.items()] + scales.sort(reverse=True) + for scale, unit in scales: + if dt >= scale: + break + + return "%.*g %s" % (precision, dt / scale, unit) + + if verbose: + print("raw times: %s" % ", ".join(map(format_time, raw_timings))) + print() + timings = [dt / number for dt in raw_timings] + + best = min(timings) + print("%d loop%s, best of %d: %s per loop" + % (number, 's' if number != 1 else '', + repeat, format_time(best))) + + best = min(timings) + worst = max(timings) + if worst >= best * 4: + import warnings + warnings.warn_explicit("The test results are likely unreliable. " + "The worst time (%s) was more than four times " + "slower than the best time (%s)." + % (format_time(worst), format_time(best)), + UserWarning, '', 0) + return None + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/Python314_4_x86_Template/Lib/token.py b/Python314_4_x86_Template/Lib/token.py new file mode 100644 index 00000000..f61723cc --- /dev/null +++ b/Python314_4_x86_Template/Lib/token.py @@ -0,0 +1,144 @@ +"""Token constants.""" +# Auto-generated by Tools/build/generate_token.py + +__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF', + 'EXACT_TOKEN_TYPES'] + +ENDMARKER = 0 +NAME = 1 +NUMBER = 2 +STRING = 3 +NEWLINE = 4 +INDENT = 5 +DEDENT = 6 +LPAR = 7 +RPAR = 8 +LSQB = 9 +RSQB = 10 +COLON = 11 +COMMA = 12 +SEMI = 13 +PLUS = 14 +MINUS = 15 +STAR = 16 +SLASH = 17 +VBAR = 18 +AMPER = 19 +LESS = 20 +GREATER = 21 +EQUAL = 22 +DOT = 23 +PERCENT = 24 +LBRACE = 25 +RBRACE = 26 +EQEQUAL = 27 +NOTEQUAL = 28 +LESSEQUAL = 29 +GREATEREQUAL = 30 +TILDE = 31 +CIRCUMFLEX = 32 +LEFTSHIFT = 33 +RIGHTSHIFT = 34 +DOUBLESTAR = 35 +PLUSEQUAL = 36 +MINEQUAL = 37 +STAREQUAL = 38 +SLASHEQUAL = 39 +PERCENTEQUAL = 40 +AMPEREQUAL = 41 +VBAREQUAL = 42 +CIRCUMFLEXEQUAL = 43 +LEFTSHIFTEQUAL = 44 +RIGHTSHIFTEQUAL = 45 +DOUBLESTAREQUAL = 46 +DOUBLESLASH = 47 +DOUBLESLASHEQUAL = 48 +AT = 49 +ATEQUAL = 50 +RARROW = 51 +ELLIPSIS = 52 +COLONEQUAL = 53 +EXCLAMATION = 54 +OP = 55 +TYPE_IGNORE = 56 +TYPE_COMMENT = 57 +SOFT_KEYWORD = 58 +FSTRING_START = 59 +FSTRING_MIDDLE = 60 +FSTRING_END = 61 +TSTRING_START = 62 +TSTRING_MIDDLE = 63 +TSTRING_END = 64 +COMMENT = 65 +NL = 66 +# These aren't used by the C tokenizer but are needed for tokenize.py +ERRORTOKEN = 67 +ENCODING = 68 +N_TOKENS = 69 +# Special definitions for cooperation with parser +NT_OFFSET = 256 + +tok_name = {value: name + for name, value in globals().items() + if isinstance(value, int) and not name.startswith('_')} +__all__.extend(tok_name.values()) + +EXACT_TOKEN_TYPES = { + '!': EXCLAMATION, + '!=': NOTEQUAL, + '%': PERCENT, + '%=': PERCENTEQUAL, + '&': AMPER, + '&=': AMPEREQUAL, + '(': LPAR, + ')': RPAR, + '*': STAR, + '**': DOUBLESTAR, + '**=': DOUBLESTAREQUAL, + '*=': STAREQUAL, + '+': PLUS, + '+=': PLUSEQUAL, + ',': COMMA, + '-': MINUS, + '-=': MINEQUAL, + '->': RARROW, + '.': DOT, + '...': ELLIPSIS, + '/': SLASH, + '//': DOUBLESLASH, + '//=': DOUBLESLASHEQUAL, + '/=': SLASHEQUAL, + ':': COLON, + ':=': COLONEQUAL, + ';': SEMI, + '<': LESS, + '<<': LEFTSHIFT, + '<<=': LEFTSHIFTEQUAL, + '<=': LESSEQUAL, + '=': EQUAL, + '==': EQEQUAL, + '>': GREATER, + '>=': GREATEREQUAL, + '>>': RIGHTSHIFT, + '>>=': RIGHTSHIFTEQUAL, + '@': AT, + '@=': ATEQUAL, + '[': LSQB, + ']': RSQB, + '^': CIRCUMFLEX, + '^=': CIRCUMFLEXEQUAL, + '{': LBRACE, + '|': VBAR, + '|=': VBAREQUAL, + '}': RBRACE, + '~': TILDE, +} + +def ISTERMINAL(x: int) -> bool: + return x < NT_OFFSET + +def ISNONTERMINAL(x: int) -> bool: + return x >= NT_OFFSET + +def ISEOF(x: int) -> bool: + return x == ENDMARKER diff --git a/Python314_4_x86_Template/Lib/tokenize.py b/Python314_4_x86_Template/Lib/tokenize.py new file mode 100644 index 00000000..1f31258c --- /dev/null +++ b/Python314_4_x86_Template/Lib/tokenize.py @@ -0,0 +1,598 @@ +"""Tokenization help for Python programs. + +tokenize(readline) is a generator that breaks a stream of bytes into +Python tokens. It decodes the bytes according to PEP-0263 for +determining source file encoding. + +It accepts a readline-like method which is called repeatedly to get the +next line of input (or b"" for EOF). It generates 5-tuples with these +members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators. Additionally, all token lists start with an ENCODING token +which tells you which encoding was used to decode the bytes stream. +""" + +__author__ = 'Ka-Ping Yee ' +__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' + 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' + 'Michael Foord') +from builtins import open as _builtin_open +from codecs import lookup, BOM_UTF8 +import collections +import functools +from io import TextIOWrapper +import itertools as _itertools +import re +import sys +from token import * +from token import EXACT_TOKEN_TYPES +import _tokenize + +cookie_re = re.compile(br'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII) +blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) + +import token +__all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding", + "untokenize", "TokenInfo", "open", "TokenError"] +del token + +class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): + def __repr__(self): + annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) + return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % + self._replace(type=annotated_type)) + + @property + def exact_type(self): + if self.type == OP and self.string in EXACT_TOKEN_TYPES: + return EXACT_TOKEN_TYPES[self.string] + else: + return self.type + +def group(*choices): return '(' + '|'.join(choices) + ')' +def any(*choices): return group(*choices) + '*' +def maybe(*choices): return group(*choices) + '?' + +# Note: we use unicode matching for names ("\w") but ascii matching for +# number literals. +Whitespace = r'[ \f\t]*' +Comment = r'#[^\r\n]*' +Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) +Name = r'\w+' + +Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+' +Binnumber = r'0[bB](?:_?[01])+' +Octnumber = r'0[oO](?:_?[0-7])+' +Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)' +Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) +Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*' +Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?', + r'\.[0-9](?:_?[0-9])*') + maybe(Exponent) +Expfloat = r'[0-9](?:_?[0-9])*' + Exponent +Floatnumber = group(Pointfloat, Expfloat) +Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]') +Number = group(Imagnumber, Floatnumber, Intnumber) + +# Return the empty string, plus all of the valid string prefixes. +def _all_string_prefixes(): + # The valid string prefixes. Only contain the lower case versions, + # and don't contain any permutations (include 'fr', but not + # 'rf'). The various permutations will be generated. + _valid_string_prefixes = ['b', 'r', 'u', 'f', 't', 'br', 'fr', 'tr'] + # if we add binary f-strings, add: ['fb', 'fbr'] + result = {''} + for prefix in _valid_string_prefixes: + for t in _itertools.permutations(prefix): + # create a list with upper and lower versions of each + # character + for u in _itertools.product(*[(c, c.upper()) for c in t]): + result.add(''.join(u)) + return result + +@functools.lru_cache +def _compile(expr): + return re.compile(expr, re.UNICODE) + +# Note that since _all_string_prefixes includes the empty string, +# StringPrefix can be the empty string (making it optional). +StringPrefix = group(*_all_string_prefixes()) + +# Tail end of ' string. +Single = r"[^'\\]*(?:\\.[^'\\]*)*'" +# Tail end of " string. +Double = r'[^"\\]*(?:\\.[^"\\]*)*"' +# Tail end of ''' string. +Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" +# Tail end of """ string. +Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' +Triple = group(StringPrefix + "'''", StringPrefix + '"""') +# Single-line ' or " string. +String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", + StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') + +# Sorting in reverse order puts the long operators before their prefixes. +# Otherwise if = came before ==, == would get recognized as two instances +# of =. +Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse=True))) +Funny = group(r'\r?\n', Special) + +PlainToken = group(Number, Funny, String, Name) +Token = Ignore + PlainToken + +# First (or only) line of ' or " string. +ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + + group("'", r'\\\r?\n'), + StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + + group('"', r'\\\r?\n')) +PseudoExtras = group(r'\\\r?\n|\z', Comment, Triple) +PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) + +# For a given string prefix plus quotes, endpats maps it to a regex +# to match the remainder of that string. _prefix can be empty, for +# a normal single or triple quoted string (with no prefix). +endpats = {} +for _prefix in _all_string_prefixes(): + endpats[_prefix + "'"] = Single + endpats[_prefix + '"'] = Double + endpats[_prefix + "'''"] = Single3 + endpats[_prefix + '"""'] = Double3 +del _prefix + +# A set of all of the single and triple quoted string prefixes, +# including the opening quotes. +single_quoted = set() +triple_quoted = set() +for t in _all_string_prefixes(): + for u in (t + '"', t + "'"): + single_quoted.add(u) + for u in (t + '"""', t + "'''"): + triple_quoted.add(u) +del t, u + +tabsize = 8 + +class TokenError(Exception): pass + + +class Untokenizer: + + def __init__(self): + self.tokens = [] + self.prev_row = 1 + self.prev_col = 0 + self.prev_type = None + self.prev_line = "" + self.encoding = None + + def add_whitespace(self, start): + row, col = start + if row < self.prev_row or row == self.prev_row and col < self.prev_col: + raise ValueError("start ({},{}) precedes previous end ({},{})" + .format(row, col, self.prev_row, self.prev_col)) + self.add_backslash_continuation(start) + col_offset = col - self.prev_col + if col_offset: + self.tokens.append(" " * col_offset) + + def add_backslash_continuation(self, start): + """Add backslash continuation characters if the row has increased + without encountering a newline token. + + This also inserts the correct amount of whitespace before the backslash. + """ + row = start[0] + row_offset = row - self.prev_row + if row_offset == 0: + return + + newline = '\r\n' if self.prev_line.endswith('\r\n') else '\n' + line = self.prev_line.rstrip('\\\r\n') + ws = ''.join(_itertools.takewhile(str.isspace, reversed(line))) + self.tokens.append(ws + f"\\{newline}" * row_offset) + self.prev_col = 0 + + def escape_brackets(self, token): + characters = [] + consume_until_next_bracket = False + for character in token: + if character == "}": + if consume_until_next_bracket: + consume_until_next_bracket = False + else: + characters.append(character) + if character == "{": + n_backslashes = sum( + 1 for char in _itertools.takewhile( + "\\".__eq__, + characters[-2::-1] + ) + ) + if n_backslashes % 2 == 0 or characters[-1] != "N": + characters.append(character) + else: + consume_until_next_bracket = True + characters.append(character) + return "".join(characters) + + def untokenize(self, iterable): + it = iter(iterable) + indents = [] + startline = False + for t in it: + if len(t) == 2: + self.compat(t, it) + break + tok_type, token, start, end, line = t + if tok_type == ENCODING: + self.encoding = token + continue + if tok_type == ENDMARKER: + break + if tok_type == INDENT: + indents.append(token) + continue + elif tok_type == DEDENT: + indents.pop() + self.prev_row, self.prev_col = end + continue + elif tok_type in (NEWLINE, NL): + startline = True + elif startline and indents: + indent = indents[-1] + if start[1] >= len(indent): + self.tokens.append(indent) + self.prev_col = len(indent) + startline = False + elif tok_type in {FSTRING_MIDDLE, TSTRING_MIDDLE}: + if '{' in token or '}' in token: + token = self.escape_brackets(token) + last_line = token.splitlines()[-1] + end_line, end_col = end + extra_chars = last_line.count("{{") + last_line.count("}}") + end = (end_line, end_col + extra_chars) + + self.add_whitespace(start) + self.tokens.append(token) + self.prev_row, self.prev_col = end + if tok_type in (NEWLINE, NL): + self.prev_row += 1 + self.prev_col = 0 + self.prev_type = tok_type + self.prev_line = line + return "".join(self.tokens) + + def compat(self, token, iterable): + indents = [] + toks_append = self.tokens.append + startline = token[0] in (NEWLINE, NL) + prevstring = False + in_fstring_or_tstring = 0 + + for tok in _itertools.chain([token], iterable): + toknum, tokval = tok[:2] + if toknum == ENCODING: + self.encoding = tokval + continue + + if toknum in (NAME, NUMBER): + tokval += ' ' + + # Insert a space between two consecutive strings + if toknum == STRING: + if prevstring: + tokval = ' ' + tokval + prevstring = True + else: + prevstring = False + + if toknum in {FSTRING_START, TSTRING_START}: + in_fstring_or_tstring += 1 + elif toknum in {FSTRING_END, TSTRING_END}: + in_fstring_or_tstring -= 1 + if toknum == INDENT: + indents.append(tokval) + continue + elif toknum == DEDENT: + indents.pop() + continue + elif toknum in (NEWLINE, NL): + startline = True + elif startline and indents: + toks_append(indents[-1]) + startline = False + elif toknum in {FSTRING_MIDDLE, TSTRING_MIDDLE}: + tokval = self.escape_brackets(tokval) + + # Insert a space between two consecutive brackets if we are in an f-string or t-string + if tokval in {"{", "}"} and self.tokens and self.tokens[-1] == tokval and in_fstring_or_tstring: + tokval = ' ' + tokval + + # Insert a space between two consecutive f-strings + if toknum in (STRING, FSTRING_START) and self.prev_type in (STRING, FSTRING_END): + self.tokens.append(" ") + + toks_append(tokval) + self.prev_type = toknum + + +def untokenize(iterable): + """Transform tokens back into Python source code. + It returns a bytes object, encoded using the ENCODING + token, which is the first token sequence output by tokenize. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + The result is guaranteed to tokenize back to match the input so + that the conversion is lossless and round-trips are assured. + The guarantee applies only to the token type and token string as + the spacing between tokens (column positions) may change. + """ + ut = Untokenizer() + out = ut.untokenize(iterable) + if ut.encoding is not None: + out = out.encode(ut.encoding) + return out + + +def _get_normal_name(orig_enc): + """Imitates get_normal_name in Parser/tokenizer/helpers.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if enc == "utf-8" or enc.startswith("utf-8-"): + return "utf-8" + if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ + enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): + return "iso-8859-1" + return orig_enc + +def detect_encoding(readline): + """ + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argument, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + """ + try: + filename = readline.__self__.name + except AttributeError: + filename = None + bom_found = False + encoding = None + default = 'utf-8' + def read_or_stop(): + try: + return readline() + except StopIteration: + return b'' + + def check(line, encoding): + # Check if the line matches the encoding. + if 0 in line: + raise SyntaxError("source code cannot contain null bytes") + try: + line.decode(encoding) + except UnicodeDecodeError: + msg = "invalid or missing encoding declaration" + if filename is not None: + msg = '{} for {!r}'.format(msg, filename) + raise SyntaxError(msg) + + def find_cookie(line): + match = cookie_re.match(line) + if not match: + return None + encoding = _get_normal_name(match.group(1).decode()) + try: + codec = lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + if filename is None: + msg = "unknown encoding: " + encoding + else: + msg = "unknown encoding for {!r}: {}".format(filename, + encoding) + raise SyntaxError(msg) + + if bom_found: + if encoding != 'utf-8': + # This behaviour mimics the Python interpreter + if filename is None: + msg = 'encoding problem: utf-8' + else: + msg = 'encoding problem for {!r}: utf-8'.format(filename) + raise SyntaxError(msg) + encoding += '-sig' + return encoding + + first = read_or_stop() + if first.startswith(BOM_UTF8): + bom_found = True + first = first[3:] + default = 'utf-8-sig' + if not first: + return default, [] + + encoding = find_cookie(first) + if encoding: + check(first, encoding) + return encoding, [first] + if not blank_re.match(first): + check(first, default) + return default, [first] + + second = read_or_stop() + if not second: + check(first, default) + return default, [first] + + encoding = find_cookie(second) + if encoding: + check(first + second, encoding) + return encoding, [first, second] + + check(first + second, default) + return default, [first, second] + + +def open(filename): + """Open a file in read only mode using the encoding detected by + detect_encoding(). + """ + buffer = _builtin_open(filename, 'rb') + try: + encoding, lines = detect_encoding(buffer.readline) + buffer.seek(0) + text = TextIOWrapper(buffer, encoding, line_buffering=True) + text.mode = 'r' + return text + except: + buffer.close() + raise + +def tokenize(readline): + """ + The tokenize() generator requires one argument, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as bytes. Alternatively, readline + can be a callable function terminating with StopIteration: + readline = open(myfile, 'rb').__next__ # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + physical line. + + The first token sequence will always be an ENCODING token + which tells you which encoding was used to decode the bytes stream. + """ + encoding, consumed = detect_encoding(readline) + rl_gen = _itertools.chain(consumed, iter(readline, b"")) + if encoding is not None: + if encoding == "utf-8-sig": + # BOM will already have been stripped. + encoding = "utf-8" + yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') + yield from _generate_tokens_from_c_tokenizer(rl_gen.__next__, encoding, extra_tokens=True) + +def generate_tokens(readline): + """Tokenize a source reading Python code as unicode strings. + + This has the same API as tokenize(), except that it expects the *readline* + callable to return str objects instead of bytes. + """ + return _generate_tokens_from_c_tokenizer(readline, extra_tokens=True) + +def _main(args=None): + import argparse + + # Helper error handling routines + def perror(message): + sys.stderr.write(message) + sys.stderr.write('\n') + + def error(message, filename=None, location=None): + if location: + args = (filename,) + location + (message,) + perror("%s:%d:%d: error: %s" % args) + elif filename: + perror("%s: error: %s" % (filename, message)) + else: + perror("error: %s" % message) + sys.exit(1) + + # Parse the arguments and options + parser = argparse.ArgumentParser(color=True) + parser.add_argument(dest='filename', nargs='?', + metavar='filename.py', + help='the file to tokenize; defaults to stdin') + parser.add_argument('-e', '--exact', dest='exact', action='store_true', + help='display token names using the exact type') + args = parser.parse_args(args) + + try: + # Tokenize the input + if args.filename: + filename = args.filename + with _builtin_open(filename, 'rb') as f: + tokens = list(tokenize(f.readline)) + else: + filename = "" + tokens = _generate_tokens_from_c_tokenizer( + sys.stdin.readline, extra_tokens=True) + + + # Output the tokenization + for token in tokens: + token_type = token.type + if args.exact: + token_type = token.exact_type + token_range = "%d,%d-%d,%d:" % (token.start + token.end) + print("%-20s%-15s%-15r" % + (token_range, tok_name[token_type], token.string)) + except IndentationError as err: + line, column = err.args[1][1:3] + error(err.args[0], filename, (line, column)) + except TokenError as err: + line, column = err.args[1] + error(err.args[0], filename, (line, column)) + except SyntaxError as err: + error(err, filename) + except OSError as err: + error(err) + except KeyboardInterrupt: + print("interrupted\n") + except Exception as err: + perror("unexpected error: %s" % err) + raise + +def _transform_msg(msg): + """Transform error messages from the C tokenizer into the Python tokenize + + The C tokenizer is more picky than the Python one, so we need to massage + the error messages a bit for backwards compatibility. + """ + if "unterminated triple-quoted string literal" in msg: + return "EOF in multi-line string" + return msg + +def _generate_tokens_from_c_tokenizer(source, encoding=None, extra_tokens=False): + """Tokenize a source reading Python code as unicode strings using the internal C tokenizer""" + if encoding is None: + it = _tokenize.TokenizerIter(source, extra_tokens=extra_tokens) + else: + it = _tokenize.TokenizerIter(source, encoding=encoding, extra_tokens=extra_tokens) + try: + for info in it: + yield TokenInfo._make(info) + except SyntaxError as e: + if type(e) != SyntaxError: + raise e from None + msg = _transform_msg(e.msg) + raise TokenError(msg, (e.lineno, e.offset)) from None + + +if __name__ == "__main__": + _main() diff --git a/Python313_13_x86_Template/Lib/tomllib/__init__.py b/Python314_4_x86_Template/Lib/tomllib/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/tomllib/__init__.py rename to Python314_4_x86_Template/Lib/tomllib/__init__.py diff --git a/Python314_4_x86_Template/Lib/tomllib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/tomllib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..0ef51cef Binary files /dev/null and b/Python314_4_x86_Template/Lib/tomllib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/tomllib/__pycache__/_parser.cpython-314.pyc b/Python314_4_x86_Template/Lib/tomllib/__pycache__/_parser.cpython-314.pyc new file mode 100644 index 00000000..1d5d11d5 Binary files /dev/null and b/Python314_4_x86_Template/Lib/tomllib/__pycache__/_parser.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/tomllib/__pycache__/_re.cpython-314.pyc b/Python314_4_x86_Template/Lib/tomllib/__pycache__/_re.cpython-314.pyc new file mode 100644 index 00000000..532b1d12 Binary files /dev/null and b/Python314_4_x86_Template/Lib/tomllib/__pycache__/_re.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/tomllib/_parser.py b/Python314_4_x86_Template/Lib/tomllib/_parser.py new file mode 100644 index 00000000..3ee47aa9 --- /dev/null +++ b/Python314_4_x86_Template/Lib/tomllib/_parser.py @@ -0,0 +1,753 @@ +# SPDX-License-Identifier: MIT +# SPDX-FileCopyrightText: 2021 Taneli Hukkinen +# Licensed to PSF under a Contributor Agreement. + +from __future__ import annotations + +from types import MappingProxyType + +from ._re import ( + RE_DATETIME, + RE_LOCALTIME, + RE_NUMBER, + match_to_datetime, + match_to_localtime, + match_to_number, +) + +TYPE_CHECKING = False +if TYPE_CHECKING: + from collections.abc import Iterable + from typing import IO, Any + + from ._types import Key, ParseFloat, Pos + +ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) + +# Neither of these sets include quotation mark or backslash. They are +# currently handled as separate cases in the parser functions. +ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t") +ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n") + +ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS +ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS + +ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS + +TOML_WS = frozenset(" \t") +TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n") +BARE_KEY_CHARS = frozenset( + "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789" "-_" +) +KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'") +HEXDIGIT_CHARS = frozenset("abcdef" "ABCDEF" "0123456789") + +BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType( + { + "\\b": "\u0008", # backspace + "\\t": "\u0009", # tab + "\\n": "\u000A", # linefeed + "\\f": "\u000C", # form feed + "\\r": "\u000D", # carriage return + '\\"': "\u0022", # quote + "\\\\": "\u005C", # backslash + } +) + + +class DEPRECATED_DEFAULT: + """Sentinel to be used as default arg during deprecation + period of TOMLDecodeError's free-form arguments.""" + + +class TOMLDecodeError(ValueError): + """An error raised if a document is not valid TOML. + + Adds the following attributes to ValueError: + msg: The unformatted error message + doc: The TOML document being parsed + pos: The index of doc where parsing failed + lineno: The line corresponding to pos + colno: The column corresponding to pos + """ + + def __init__( + self, + msg: str = DEPRECATED_DEFAULT, # type: ignore[assignment] + doc: str = DEPRECATED_DEFAULT, # type: ignore[assignment] + pos: Pos = DEPRECATED_DEFAULT, # type: ignore[assignment] + *args: Any, + ): + if ( + args + or not isinstance(msg, str) + or not isinstance(doc, str) + or not isinstance(pos, int) + ): + import warnings + + warnings.warn( + "Free-form arguments for TOMLDecodeError are deprecated. " + "Please set 'msg' (str), 'doc' (str) and 'pos' (int) arguments only.", + DeprecationWarning, + stacklevel=2, + ) + if pos is not DEPRECATED_DEFAULT: # type: ignore[comparison-overlap] + args = pos, *args + if doc is not DEPRECATED_DEFAULT: # type: ignore[comparison-overlap] + args = doc, *args + if msg is not DEPRECATED_DEFAULT: # type: ignore[comparison-overlap] + args = msg, *args + ValueError.__init__(self, *args) + return + + lineno = doc.count("\n", 0, pos) + 1 + if lineno == 1: + colno = pos + 1 + else: + colno = pos - doc.rindex("\n", 0, pos) + + if pos >= len(doc): + coord_repr = "end of document" + else: + coord_repr = f"line {lineno}, column {colno}" + errmsg = f"{msg} (at {coord_repr})" + ValueError.__init__(self, errmsg) + + self.msg = msg + self.doc = doc + self.pos = pos + self.lineno = lineno + self.colno = colno + + +def load(fp: IO[bytes], /, *, parse_float: ParseFloat = float) -> dict[str, Any]: + """Parse TOML from a binary file object.""" + b = fp.read() + try: + s = b.decode() + except AttributeError: + raise TypeError( + "File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`" + ) from None + return loads(s, parse_float=parse_float) + + +def loads(s: str, /, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901 + """Parse TOML from a string.""" + + # The spec allows converting "\r\n" to "\n", even in string + # literals. Let's do so to simplify parsing. + try: + src = s.replace("\r\n", "\n") + except (AttributeError, TypeError): + raise TypeError( + f"Expected str object, not '{type(s).__qualname__}'" + ) from None + pos = 0 + out = Output() + header: Key = () + parse_float = make_safe_parse_float(parse_float) + + # Parse one statement at a time + # (typically means one line in TOML source) + while True: + # 1. Skip line leading whitespace + pos = skip_chars(src, pos, TOML_WS) + + # 2. Parse rules. Expect one of the following: + # - end of file + # - end of line + # - comment + # - key/value pair + # - append dict to list (and move to its namespace) + # - create dict (and move to its namespace) + # Skip trailing whitespace when applicable. + try: + char = src[pos] + except IndexError: + break + if char == "\n": + pos += 1 + continue + if char in KEY_INITIAL_CHARS: + pos = key_value_rule(src, pos, out, header, parse_float) + pos = skip_chars(src, pos, TOML_WS) + elif char == "[": + try: + second_char: str | None = src[pos + 1] + except IndexError: + second_char = None + out.flags.finalize_pending() + if second_char == "[": + pos, header = create_list_rule(src, pos, out) + else: + pos, header = create_dict_rule(src, pos, out) + pos = skip_chars(src, pos, TOML_WS) + elif char != "#": + raise TOMLDecodeError("Invalid statement", src, pos) + + # 3. Skip comment + pos = skip_comment(src, pos) + + # 4. Expect end of line or end of file + try: + char = src[pos] + except IndexError: + break + if char != "\n": + raise TOMLDecodeError( + "Expected newline or end of document after a statement", src, pos + ) + pos += 1 + + return out.data.dict + + +class Flags: + """Flags that map to parsed keys/namespaces.""" + + # Marks an immutable namespace (inline array or inline table). + FROZEN = 0 + # Marks a nest that has been explicitly created and can no longer + # be opened using the "[table]" syntax. + EXPLICIT_NEST = 1 + + def __init__(self) -> None: + self._flags: dict[str, dict[Any, Any]] = {} + self._pending_flags: set[tuple[Key, int]] = set() + + def add_pending(self, key: Key, flag: int) -> None: + self._pending_flags.add((key, flag)) + + def finalize_pending(self) -> None: + for key, flag in self._pending_flags: + self.set(key, flag, recursive=False) + self._pending_flags.clear() + + def unset_all(self, key: Key) -> None: + cont = self._flags + for k in key[:-1]: + if k not in cont: + return + cont = cont[k]["nested"] + cont.pop(key[-1], None) + + def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003 + cont = self._flags + key_parent, key_stem = key[:-1], key[-1] + for k in key_parent: + if k not in cont: + cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont = cont[k]["nested"] + if key_stem not in cont: + cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag) + + def is_(self, key: Key, flag: int) -> bool: + if not key: + return False # document root has no flags + cont = self._flags + for k in key[:-1]: + if k not in cont: + return False + inner_cont = cont[k] + if flag in inner_cont["recursive_flags"]: + return True + cont = inner_cont["nested"] + key_stem = key[-1] + if key_stem in cont: + cont = cont[key_stem] + return flag in cont["flags"] or flag in cont["recursive_flags"] + return False + + +class NestedDict: + def __init__(self) -> None: + # The parsed content of the TOML document + self.dict: dict[str, Any] = {} + + def get_or_create_nest( + self, + key: Key, + *, + access_lists: bool = True, + ) -> dict[str, Any]: + cont: Any = self.dict + for k in key: + if k not in cont: + cont[k] = {} + cont = cont[k] + if access_lists and isinstance(cont, list): + cont = cont[-1] + if not isinstance(cont, dict): + raise KeyError("There is no nest behind this key") + return cont # type: ignore[no-any-return] + + def append_nest_to_list(self, key: Key) -> None: + cont = self.get_or_create_nest(key[:-1]) + last_key = key[-1] + if last_key in cont: + list_ = cont[last_key] + if not isinstance(list_, list): + raise KeyError("An object other than list found behind this key") + list_.append({}) + else: + cont[last_key] = [{}] + + +class Output: + def __init__(self) -> None: + self.data = NestedDict() + self.flags = Flags() + + +def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: + try: + while src[pos] in chars: + pos += 1 + except IndexError: + pass + return pos + + +def skip_until( + src: str, + pos: Pos, + expect: str, + *, + error_on: frozenset[str], + error_on_eof: bool, +) -> Pos: + try: + new_pos = src.index(expect, pos) + except ValueError: + new_pos = len(src) + if error_on_eof: + raise TOMLDecodeError(f"Expected {expect!r}", src, new_pos) from None + + if not error_on.isdisjoint(src[pos:new_pos]): + while src[pos] not in error_on: + pos += 1 + raise TOMLDecodeError(f"Found invalid character {src[pos]!r}", src, pos) + return new_pos + + +def skip_comment(src: str, pos: Pos) -> Pos: + try: + char: str | None = src[pos] + except IndexError: + char = None + if char == "#": + return skip_until( + src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False + ) + return pos + + +def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos: + while True: + pos_before_skip = pos + pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) + pos = skip_comment(src, pos) + if pos == pos_before_skip: + return pos + + +def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: + pos += 1 # Skip "[" + pos = skip_chars(src, pos, TOML_WS) + pos, key = parse_key(src, pos) + + if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN): + raise TOMLDecodeError(f"Cannot declare {key} twice", src, pos) + out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) + try: + out.data.get_or_create_nest(key) + except KeyError: + raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None + + if not src.startswith("]", pos): + raise TOMLDecodeError( + "Expected ']' at the end of a table declaration", src, pos + ) + return pos + 1, key + + +def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: + pos += 2 # Skip "[[" + pos = skip_chars(src, pos, TOML_WS) + pos, key = parse_key(src, pos) + + if out.flags.is_(key, Flags.FROZEN): + raise TOMLDecodeError(f"Cannot mutate immutable namespace {key}", src, pos) + # Free the namespace now that it points to another empty list item... + out.flags.unset_all(key) + # ...but this key precisely is still prohibited from table declaration + out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) + try: + out.data.append_nest_to_list(key) + except KeyError: + raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None + + if not src.startswith("]]", pos): + raise TOMLDecodeError( + "Expected ']]' at the end of an array declaration", src, pos + ) + return pos + 2, key + + +def key_value_rule( + src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat +) -> Pos: + pos, key, value = parse_key_value_pair(src, pos, parse_float) + key_parent, key_stem = key[:-1], key[-1] + abs_key_parent = header + key_parent + + relative_path_cont_keys = (header + key[:i] for i in range(1, len(key))) + for cont_key in relative_path_cont_keys: + # Check that dotted key syntax does not redefine an existing table + if out.flags.is_(cont_key, Flags.EXPLICIT_NEST): + raise TOMLDecodeError(f"Cannot redefine namespace {cont_key}", src, pos) + # Containers in the relative path can't be opened with the table syntax or + # dotted key/value syntax in following table sections. + out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST) + + if out.flags.is_(abs_key_parent, Flags.FROZEN): + raise TOMLDecodeError( + f"Cannot mutate immutable namespace {abs_key_parent}", src, pos + ) + + try: + nest = out.data.get_or_create_nest(abs_key_parent) + except KeyError: + raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None + if key_stem in nest: + raise TOMLDecodeError("Cannot overwrite a value", src, pos) + # Mark inline table and array namespaces recursively immutable + if isinstance(value, (dict, list)): + out.flags.set(header + key, Flags.FROZEN, recursive=True) + nest[key_stem] = value + return pos + + +def parse_key_value_pair( + src: str, pos: Pos, parse_float: ParseFloat +) -> tuple[Pos, Key, Any]: + pos, key = parse_key(src, pos) + try: + char: str | None = src[pos] + except IndexError: + char = None + if char != "=": + raise TOMLDecodeError("Expected '=' after a key in a key/value pair", src, pos) + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + pos, value = parse_value(src, pos, parse_float) + return pos, key, value + + +def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]: + pos, key_part = parse_key_part(src, pos) + key: Key = (key_part,) + pos = skip_chars(src, pos, TOML_WS) + while True: + try: + char: str | None = src[pos] + except IndexError: + char = None + if char != ".": + return pos, key + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + pos, key_part = parse_key_part(src, pos) + key += (key_part,) + pos = skip_chars(src, pos, TOML_WS) + + +def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]: + try: + char: str | None = src[pos] + except IndexError: + char = None + if char in BARE_KEY_CHARS: + start_pos = pos + pos = skip_chars(src, pos, BARE_KEY_CHARS) + return pos, src[start_pos:pos] + if char == "'": + return parse_literal_str(src, pos) + if char == '"': + return parse_one_line_basic_str(src, pos) + raise TOMLDecodeError("Invalid initial character for a key part", src, pos) + + +def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]: + pos += 1 + return parse_basic_str(src, pos, multiline=False) + + +def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list[Any]]: + pos += 1 + array: list[Any] = [] + + pos = skip_comments_and_array_ws(src, pos) + if src.startswith("]", pos): + return pos + 1, array + while True: + pos, val = parse_value(src, pos, parse_float) + array.append(val) + pos = skip_comments_and_array_ws(src, pos) + + c = src[pos : pos + 1] + if c == "]": + return pos + 1, array + if c != ",": + raise TOMLDecodeError("Unclosed array", src, pos) + pos += 1 + + pos = skip_comments_and_array_ws(src, pos) + if src.startswith("]", pos): + return pos + 1, array + + +def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict[str, Any]]: + pos += 1 + nested_dict = NestedDict() + flags = Flags() + + pos = skip_chars(src, pos, TOML_WS) + if src.startswith("}", pos): + return pos + 1, nested_dict.dict + while True: + pos, key, value = parse_key_value_pair(src, pos, parse_float) + key_parent, key_stem = key[:-1], key[-1] + if flags.is_(key, Flags.FROZEN): + raise TOMLDecodeError(f"Cannot mutate immutable namespace {key}", src, pos) + try: + nest = nested_dict.get_or_create_nest(key_parent, access_lists=False) + except KeyError: + raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None + if key_stem in nest: + raise TOMLDecodeError(f"Duplicate inline table key {key_stem!r}", src, pos) + nest[key_stem] = value + pos = skip_chars(src, pos, TOML_WS) + c = src[pos : pos + 1] + if c == "}": + return pos + 1, nested_dict.dict + if c != ",": + raise TOMLDecodeError("Unclosed inline table", src, pos) + if isinstance(value, (dict, list)): + flags.set(key, Flags.FROZEN, recursive=True) + pos += 1 + pos = skip_chars(src, pos, TOML_WS) + + +def parse_basic_str_escape( + src: str, pos: Pos, *, multiline: bool = False +) -> tuple[Pos, str]: + escape_id = src[pos : pos + 2] + pos += 2 + if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}: + # Skip whitespace until next non-whitespace character or end of + # the doc. Error if non-whitespace is found before newline. + if escape_id != "\\\n": + pos = skip_chars(src, pos, TOML_WS) + try: + char = src[pos] + except IndexError: + return pos, "" + if char != "\n": + raise TOMLDecodeError("Unescaped '\\' in a string", src, pos) + pos += 1 + pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) + return pos, "" + if escape_id == "\\u": + return parse_hex_char(src, pos, 4) + if escape_id == "\\U": + return parse_hex_char(src, pos, 8) + try: + return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id] + except KeyError: + raise TOMLDecodeError("Unescaped '\\' in a string", src, pos) from None + + +def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]: + return parse_basic_str_escape(src, pos, multiline=True) + + +def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]: + hex_str = src[pos : pos + hex_len] + if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str): + raise TOMLDecodeError("Invalid hex value", src, pos) + pos += hex_len + hex_int = int(hex_str, 16) + if not is_unicode_scalar_value(hex_int): + raise TOMLDecodeError( + "Escaped character is not a Unicode scalar value", src, pos + ) + return pos, chr(hex_int) + + +def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]: + pos += 1 # Skip starting apostrophe + start_pos = pos + pos = skip_until( + src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True + ) + return pos + 1, src[start_pos:pos] # Skip ending apostrophe + + +def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]: + pos += 3 + if src.startswith("\n", pos): + pos += 1 + + if literal: + delim = "'" + end_pos = skip_until( + src, + pos, + "'''", + error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS, + error_on_eof=True, + ) + result = src[pos:end_pos] + pos = end_pos + 3 + else: + delim = '"' + pos, result = parse_basic_str(src, pos, multiline=True) + + # Add at maximum two extra apostrophes/quotes if the end sequence + # is 4 or 5 chars long instead of just 3. + if not src.startswith(delim, pos): + return pos, result + pos += 1 + if not src.startswith(delim, pos): + return pos, result + delim + pos += 1 + return pos, result + (delim * 2) + + +def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]: + if multiline: + error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS + parse_escapes = parse_basic_str_escape_multiline + else: + error_on = ILLEGAL_BASIC_STR_CHARS + parse_escapes = parse_basic_str_escape + result = "" + start_pos = pos + while True: + try: + char = src[pos] + except IndexError: + raise TOMLDecodeError("Unterminated string", src, pos) from None + if char == '"': + if not multiline: + return pos + 1, result + src[start_pos:pos] + if src.startswith('"""', pos): + return pos + 3, result + src[start_pos:pos] + pos += 1 + continue + if char == "\\": + result += src[start_pos:pos] + pos, parsed_escape = parse_escapes(src, pos) + result += parsed_escape + start_pos = pos + continue + if char in error_on: + raise TOMLDecodeError(f"Illegal character {char!r}", src, pos) + pos += 1 + + +def parse_value( # noqa: C901 + src: str, pos: Pos, parse_float: ParseFloat +) -> tuple[Pos, Any]: + try: + char: str | None = src[pos] + except IndexError: + char = None + + # IMPORTANT: order conditions based on speed of checking and likelihood + + # Basic strings + if char == '"': + if src.startswith('"""', pos): + return parse_multiline_str(src, pos, literal=False) + return parse_one_line_basic_str(src, pos) + + # Literal strings + if char == "'": + if src.startswith("'''", pos): + return parse_multiline_str(src, pos, literal=True) + return parse_literal_str(src, pos) + + # Booleans + if char == "t": + if src.startswith("true", pos): + return pos + 4, True + if char == "f": + if src.startswith("false", pos): + return pos + 5, False + + # Arrays + if char == "[": + return parse_array(src, pos, parse_float) + + # Inline tables + if char == "{": + return parse_inline_table(src, pos, parse_float) + + # Dates and times + datetime_match = RE_DATETIME.match(src, pos) + if datetime_match: + try: + datetime_obj = match_to_datetime(datetime_match) + except ValueError as e: + raise TOMLDecodeError("Invalid date or datetime", src, pos) from e + return datetime_match.end(), datetime_obj + localtime_match = RE_LOCALTIME.match(src, pos) + if localtime_match: + return localtime_match.end(), match_to_localtime(localtime_match) + + # Integers and "normal" floats. + # The regex will greedily match any type starting with a decimal + # char, so needs to be located after handling of dates and times. + number_match = RE_NUMBER.match(src, pos) + if number_match: + return number_match.end(), match_to_number(number_match, parse_float) + + # Special floats + first_three = src[pos : pos + 3] + if first_three in {"inf", "nan"}: + return pos + 3, parse_float(first_three) + first_four = src[pos : pos + 4] + if first_four in {"-inf", "+inf", "-nan", "+nan"}: + return pos + 4, parse_float(first_four) + + raise TOMLDecodeError("Invalid value", src, pos) + + +def is_unicode_scalar_value(codepoint: int) -> bool: + return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111) + + +def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat: + """A decorator to make `parse_float` safe. + + `parse_float` must not return dicts or lists, because these types + would be mixed with parsed TOML tables and arrays, thus confusing + the parser. The returned decorated callable raises `ValueError` + instead of returning illegal types. + """ + # The default `float` callable never returns illegal types. Optimize it. + if parse_float is float: + return float + + def safe_parse_float(float_str: str) -> Any: + float_value = parse_float(float_str) + if isinstance(float_value, (dict, list)): + raise ValueError("parse_float must not return dicts or lists") + return float_value + + return safe_parse_float diff --git a/Python314_4_x86_Template/Lib/tomllib/_re.py b/Python314_4_x86_Template/Lib/tomllib/_re.py new file mode 100644 index 00000000..eb8beb19 --- /dev/null +++ b/Python314_4_x86_Template/Lib/tomllib/_re.py @@ -0,0 +1,113 @@ +# SPDX-License-Identifier: MIT +# SPDX-FileCopyrightText: 2021 Taneli Hukkinen +# Licensed to PSF under a Contributor Agreement. + +from __future__ import annotations + +from datetime import date, datetime, time, timedelta, timezone, tzinfo +from functools import lru_cache +import re + +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Any + + from ._types import ParseFloat + +# E.g. +# - 00:32:00.999999 +# - 00:32:00 +_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?" + +RE_NUMBER = re.compile( + r""" +0 +(?: + x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex + | + b[01](?:_?[01])* # bin + | + o[0-7](?:_?[0-7])* # oct +) +| +[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part +(?P + (?:\.[0-9](?:_?[0-9])*)? # optional fractional part + (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part +) +""", + flags=re.VERBOSE, +) +RE_LOCALTIME = re.compile(_TIME_RE_STR) +RE_DATETIME = re.compile( + rf""" +([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27 +(?: + [Tt ] + {_TIME_RE_STR} + (?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset +)? +""", + flags=re.VERBOSE, +) + + +def match_to_datetime(match: re.Match[str]) -> datetime | date: + """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. + + Raises ValueError if the match does not correspond to a valid date + or datetime. + """ + ( + year_str, + month_str, + day_str, + hour_str, + minute_str, + sec_str, + micros_str, + zulu_time, + offset_sign_str, + offset_hour_str, + offset_minute_str, + ) = match.groups() + year, month, day = int(year_str), int(month_str), int(day_str) + if hour_str is None: + return date(year, month, day) + hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) + micros = int(micros_str.ljust(6, "0")) if micros_str else 0 + if offset_sign_str: + tz: tzinfo | None = cached_tz( + offset_hour_str, offset_minute_str, offset_sign_str + ) + elif zulu_time: + tz = timezone.utc + else: # local date-time + tz = None + return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz) + + +# No need to limit cache size. This is only ever called on input +# that matched RE_DATETIME, so there is an implicit bound of +# 24 (hours) * 60 (minutes) * 2 (offset direction) = 2880. +@lru_cache(maxsize=None) +def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone: + sign = 1 if sign_str == "+" else -1 + return timezone( + timedelta( + hours=sign * int(hour_str), + minutes=sign * int(minute_str), + ) + ) + + +def match_to_localtime(match: re.Match[str]) -> time: + hour_str, minute_str, sec_str, micros_str = match.groups() + micros = int(micros_str.ljust(6, "0")) if micros_str else 0 + return time(int(hour_str), int(minute_str), int(sec_str), micros) + + +def match_to_number(match: re.Match[str], parse_float: ParseFloat) -> Any: + if match.group("floatpart"): + return parse_float(match.group()) + return int(match.group(), 0) diff --git a/Python313_13_x86_Template/Lib/tomllib/_types.py b/Python314_4_x86_Template/Lib/tomllib/_types.py similarity index 100% rename from Python313_13_x86_Template/Lib/tomllib/_types.py rename to Python314_4_x86_Template/Lib/tomllib/_types.py diff --git a/Python313_13_x86_Template/Lib/tomllib/mypy.ini b/Python314_4_x86_Template/Lib/tomllib/mypy.ini similarity index 100% rename from Python313_13_x86_Template/Lib/tomllib/mypy.ini rename to Python314_4_x86_Template/Lib/tomllib/mypy.ini diff --git a/Python314_4_x86_Template/Lib/trace.py b/Python314_4_x86_Template/Lib/trace.py new file mode 100644 index 00000000..cf8817f4 --- /dev/null +++ b/Python314_4_x86_Template/Lib/trace.py @@ -0,0 +1,751 @@ +# portions copyright 2001, Autonomous Zones Industries, Inc., all rights... +# err... reserved and offered to the public under the terms of the +# Python 2.2 license. +# Author: Zooko O'Whielacronx +# http://zooko.com/ +# mailto:zooko@zooko.com +# +# Copyright 2000, Mojam Media, Inc., all rights reserved. +# Author: Skip Montanaro +# +# Copyright 1999, Bioreason, Inc., all rights reserved. +# Author: Andrew Dalke +# +# Copyright 1995-1997, Automatrix, Inc., all rights reserved. +# Author: Skip Montanaro +# +# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved. +# +# +# Permission to use, copy, modify, and distribute this Python software and +# its associated documentation for any purpose without fee is hereby +# granted, provided that the above copyright notice appears in all copies, +# and that both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of neither Automatrix, +# Bioreason or Mojam Media be used in advertising or publicity pertaining to +# distribution of the software without specific, written prior permission. +# +"""program/module to trace Python program or function execution + +Sample use, command line: + trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs + trace.py -t --ignore-dir '$prefix' spam.py eggs + trace.py --trackcalls spam.py eggs + +Sample use, programmatically + import sys + + # create a Trace object, telling it what to ignore, and whether to + # do tracing or line-counting or both. + tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,], + trace=0, count=1) + # run the new command using the given tracer + tracer.run('main()') + # make a report, placing output in /tmp + r = tracer.results() + r.write_results(show_missing=True, coverdir="/tmp") +""" +__all__ = ['Trace', 'CoverageResults'] + +import io +import linecache +import os +import sys +import sysconfig +import token +import tokenize +import inspect +import gc +import dis +import pickle +from time import monotonic as _time + +import threading + +PRAGMA_NOCOVER = "#pragma NO COVER" + +class _Ignore: + def __init__(self, modules=None, dirs=None): + self._mods = set() if not modules else set(modules) + self._dirs = [] if not dirs else [os.path.normpath(d) + for d in dirs] + self._ignore = { '': 1 } + + def names(self, filename, modulename): + if modulename in self._ignore: + return self._ignore[modulename] + + # haven't seen this one before, so see if the module name is + # on the ignore list. + if modulename in self._mods: # Identical names, so ignore + self._ignore[modulename] = 1 + return 1 + + # check if the module is a proper submodule of something on + # the ignore list + for mod in self._mods: + # Need to take some care since ignoring + # "cmp" mustn't mean ignoring "cmpcache" but ignoring + # "Spam" must also mean ignoring "Spam.Eggs". + if modulename.startswith(mod + '.'): + self._ignore[modulename] = 1 + return 1 + + # Now check that filename isn't in one of the directories + if filename is None: + # must be a built-in, so we must ignore + self._ignore[modulename] = 1 + return 1 + + # Ignore a file when it contains one of the ignorable paths + for d in self._dirs: + # The '+ os.sep' is to ensure that d is a parent directory, + # as compared to cases like: + # d = "/usr/local" + # filename = "/usr/local.py" + # or + # d = "/usr/local.py" + # filename = "/usr/local.py" + if filename.startswith(d + os.sep): + self._ignore[modulename] = 1 + return 1 + + # Tried the different ways, so we don't ignore this module + self._ignore[modulename] = 0 + return 0 + +def _modname(path): + """Return a plausible module name for the path.""" + + base = os.path.basename(path) + filename, ext = os.path.splitext(base) + return filename + +def _fullmodname(path): + """Return a plausible module name for the path.""" + + # If the file 'path' is part of a package, then the filename isn't + # enough to uniquely identify it. Try to do the right thing by + # looking in sys.path for the longest matching prefix. We'll + # assume that the rest is the package name. + + comparepath = os.path.normcase(path) + longest = "" + for dir in sys.path: + dir = os.path.normcase(dir) + if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep: + if len(dir) > len(longest): + longest = dir + + if longest: + base = path[len(longest) + 1:] + else: + base = path + # the drive letter is never part of the module name + drive, base = os.path.splitdrive(base) + base = base.replace(os.sep, ".") + if os.altsep: + base = base.replace(os.altsep, ".") + filename, ext = os.path.splitext(base) + return filename.lstrip(".") + +class CoverageResults: + def __init__(self, counts=None, calledfuncs=None, infile=None, + callers=None, outfile=None): + self.counts = counts + if self.counts is None: + self.counts = {} + self.counter = self.counts.copy() # map (filename, lineno) to count + self.calledfuncs = calledfuncs + if self.calledfuncs is None: + self.calledfuncs = {} + self.calledfuncs = self.calledfuncs.copy() + self.callers = callers + if self.callers is None: + self.callers = {} + self.callers = self.callers.copy() + self.infile = infile + self.outfile = outfile + if self.infile: + # Try to merge existing counts file. + try: + with open(self.infile, 'rb') as f: + counts, calledfuncs, callers = pickle.load(f) + self.update(self.__class__(counts, calledfuncs, callers=callers)) + except (OSError, EOFError, ValueError) as err: + print(("Skipping counts file %r: %s" + % (self.infile, err)), file=sys.stderr) + + def is_ignored_filename(self, filename): + """Return True if the filename does not refer to a file + we want to have reported. + """ + return filename.startswith('<') and filename.endswith('>') + + def update(self, other): + """Merge in the data from another CoverageResults""" + counts = self.counts + calledfuncs = self.calledfuncs + callers = self.callers + other_counts = other.counts + other_calledfuncs = other.calledfuncs + other_callers = other.callers + + for key in other_counts: + counts[key] = counts.get(key, 0) + other_counts[key] + + for key in other_calledfuncs: + calledfuncs[key] = 1 + + for key in other_callers: + callers[key] = 1 + + def write_results(self, show_missing=True, summary=False, coverdir=None, *, + ignore_missing_files=False): + """ + Write the coverage results. + + :param show_missing: Show lines that had no hits. + :param summary: Include coverage summary per module. + :param coverdir: If None, the results of each module are placed in its + directory, otherwise it is included in the directory + specified. + :param ignore_missing_files: If True, counts for files that no longer + exist are silently ignored. Otherwise, a missing file + will raise a FileNotFoundError. + """ + if self.calledfuncs: + print() + print("functions called:") + calls = self.calledfuncs + for filename, modulename, funcname in sorted(calls): + print(("filename: %s, modulename: %s, funcname: %s" + % (filename, modulename, funcname))) + + if self.callers: + print() + print("calling relationships:") + lastfile = lastcfile = "" + for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) \ + in sorted(self.callers): + if pfile != lastfile: + print() + print("***", pfile, "***") + lastfile = pfile + lastcfile = "" + if cfile != pfile and lastcfile != cfile: + print(" -->", cfile) + lastcfile = cfile + print(" %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc)) + + # turn the counts data ("(filename, lineno) = count") into something + # accessible on a per-file basis + per_file = {} + for filename, lineno in self.counts: + lines_hit = per_file[filename] = per_file.get(filename, {}) + lines_hit[lineno] = self.counts[(filename, lineno)] + + # accumulate summary info, if needed + sums = {} + + for filename, count in per_file.items(): + if self.is_ignored_filename(filename): + continue + + if filename.endswith(".pyc"): + filename = filename[:-1] + + if ignore_missing_files and not os.path.isfile(filename): + continue + + if coverdir is None: + dir = os.path.dirname(os.path.abspath(filename)) + modulename = _modname(filename) + else: + dir = coverdir + os.makedirs(dir, exist_ok=True) + modulename = _fullmodname(filename) + + # If desired, get a list of the line numbers which represent + # executable content (returned as a dict for better lookup speed) + if show_missing: + lnotab = _find_executable_linenos(filename) + else: + lnotab = {} + source = linecache.getlines(filename) + coverpath = os.path.join(dir, modulename + ".cover") + with open(filename, 'rb') as fp: + encoding, _ = tokenize.detect_encoding(fp.readline) + n_hits, n_lines = self.write_results_file(coverpath, source, + lnotab, count, encoding) + if summary and n_lines: + sums[modulename] = n_lines, n_hits, modulename, filename + + if summary and sums: + print("lines cov% module (path)") + for m in sorted(sums): + n_lines, n_hits, modulename, filename = sums[m] + print(f"{n_lines:5d} {n_hits/n_lines:.1%} {modulename} ({filename})") + + if self.outfile: + # try and store counts and module info into self.outfile + try: + with open(self.outfile, 'wb') as f: + pickle.dump((self.counts, self.calledfuncs, self.callers), + f, 1) + except OSError as err: + print("Can't save counts files because %s" % err, file=sys.stderr) + + def write_results_file(self, path, lines, lnotab, lines_hit, encoding=None): + """Return a coverage results file in path.""" + # ``lnotab`` is a dict of executable lines, or a line number "table" + + try: + outfile = open(path, "w", encoding=encoding) + except OSError as err: + print(("trace: Could not open %r for writing: %s " + "- skipping" % (path, err)), file=sys.stderr) + return 0, 0 + + n_lines = 0 + n_hits = 0 + with outfile: + for lineno, line in enumerate(lines, 1): + # do the blank/comment match to try to mark more lines + # (help the reader find stuff that hasn't been covered) + if lineno in lines_hit: + outfile.write("%5d: " % lines_hit[lineno]) + n_hits += 1 + n_lines += 1 + elif lineno in lnotab and not PRAGMA_NOCOVER in line: + # Highlight never-executed lines, unless the line contains + # #pragma: NO COVER + outfile.write(">>>>>> ") + n_lines += 1 + else: + outfile.write(" ") + outfile.write(line.expandtabs(8)) + + return n_hits, n_lines + +def _find_lines_from_code(code, strs): + """Return dict where keys are lines in the line number table.""" + linenos = {} + + for _, lineno in dis.findlinestarts(code): + if lineno not in strs: + linenos[lineno] = 1 + + return linenos + +def _find_lines(code, strs): + """Return lineno dict for all code objects reachable from code.""" + # get all of the lineno information from the code of this scope level + linenos = _find_lines_from_code(code, strs) + + # and check the constants for references to other code objects + for c in code.co_consts: + if inspect.iscode(c): + # find another code object, so recurse into it + linenos.update(_find_lines(c, strs)) + return linenos + +def _find_strings(filename, encoding=None): + """Return a dict of possible docstring positions. + + The dict maps line numbers to strings. There is an entry for + line that contains only a string or a part of a triple-quoted + string. + """ + d = {} + # If the first token is a string, then it's the module docstring. + # Add this special case so that the test in the loop passes. + prev_ttype = token.INDENT + with open(filename, encoding=encoding) as f: + tok = tokenize.generate_tokens(f.readline) + for ttype, tstr, start, end, line in tok: + if ttype == token.STRING: + if prev_ttype == token.INDENT: + sline, scol = start + eline, ecol = end + for i in range(sline, eline + 1): + d[i] = 1 + prev_ttype = ttype + return d + +def _find_executable_linenos(filename): + """Return dict where keys are line numbers in the line number table.""" + try: + with tokenize.open(filename) as f: + prog = f.read() + encoding = f.encoding + except OSError as err: + print(("Not printing coverage data for %r: %s" + % (filename, err)), file=sys.stderr) + return {} + code = compile(prog, filename, "exec") + strs = _find_strings(filename, encoding) + return _find_lines(code, strs) + +class Trace: + def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0, + ignoremods=(), ignoredirs=(), infile=None, outfile=None, + timing=False): + """ + @param count true iff it should count number of times each + line is executed + @param trace true iff it should print out each line that is + being counted + @param countfuncs true iff it should just output a list of + (filename, modulename, funcname,) for functions + that were called at least once; This overrides + 'count' and 'trace' + @param ignoremods a list of the names of modules to ignore + @param ignoredirs a list of the names of directories to ignore + all of the (recursive) contents of + @param infile file from which to read stored counts to be + added into the results + @param outfile file in which to write the results + @param timing true iff timing information be displayed + """ + self.infile = infile + self.outfile = outfile + self.ignore = _Ignore(ignoremods, ignoredirs) + self.counts = {} # keys are (filename, linenumber) + self.pathtobasename = {} # for memoizing os.path.basename + self.donothing = 0 + self.trace = trace + self._calledfuncs = {} + self._callers = {} + self._caller_cache = {} + self.start_time = None + if timing: + self.start_time = _time() + if countcallers: + self.globaltrace = self.globaltrace_trackcallers + elif countfuncs: + self.globaltrace = self.globaltrace_countfuncs + elif trace and count: + self.globaltrace = self.globaltrace_lt + self.localtrace = self.localtrace_trace_and_count + elif trace: + self.globaltrace = self.globaltrace_lt + self.localtrace = self.localtrace_trace + elif count: + self.globaltrace = self.globaltrace_lt + self.localtrace = self.localtrace_count + else: + # Ahem -- do nothing? Okay. + self.donothing = 1 + + def run(self, cmd): + import __main__ + dict = __main__.__dict__ + self.runctx(cmd, dict, dict) + + def runctx(self, cmd, globals=None, locals=None): + if globals is None: globals = {} + if locals is None: locals = {} + if not self.donothing: + threading.settrace(self.globaltrace) + sys.settrace(self.globaltrace) + try: + exec(cmd, globals, locals) + finally: + if not self.donothing: + sys.settrace(None) + threading.settrace(None) + + def runfunc(self, func, /, *args, **kw): + result = None + if not self.donothing: + sys.settrace(self.globaltrace) + try: + result = func(*args, **kw) + finally: + if not self.donothing: + sys.settrace(None) + return result + + def file_module_function_of(self, frame): + code = frame.f_code + filename = code.co_filename + if filename: + modulename = _modname(filename) + else: + modulename = None + + funcname = code.co_name + clsname = None + if code in self._caller_cache: + if self._caller_cache[code] is not None: + clsname = self._caller_cache[code] + else: + self._caller_cache[code] = None + ## use of gc.get_referrers() was suggested by Michael Hudson + # all functions which refer to this code object + funcs = [f for f in gc.get_referrers(code) + if inspect.isfunction(f)] + # require len(func) == 1 to avoid ambiguity caused by calls to + # new.function(): "In the face of ambiguity, refuse the + # temptation to guess." + if len(funcs) == 1: + dicts = [d for d in gc.get_referrers(funcs[0]) + if isinstance(d, dict)] + if len(dicts) == 1: + classes = [c for c in gc.get_referrers(dicts[0]) + if hasattr(c, "__bases__")] + if len(classes) == 1: + # ditto for new.classobj() + clsname = classes[0].__name__ + # cache the result - assumption is that new.* is + # not called later to disturb this relationship + # _caller_cache could be flushed if functions in + # the new module get called. + self._caller_cache[code] = clsname + if clsname is not None: + funcname = "%s.%s" % (clsname, funcname) + + return filename, modulename, funcname + + def globaltrace_trackcallers(self, frame, why, arg): + """Handler for call events. + + Adds information about who called who to the self._callers dict. + """ + if why == 'call': + # XXX Should do a better job of identifying methods + this_func = self.file_module_function_of(frame) + parent_func = self.file_module_function_of(frame.f_back) + self._callers[(parent_func, this_func)] = 1 + + def globaltrace_countfuncs(self, frame, why, arg): + """Handler for call events. + + Adds (filename, modulename, funcname) to the self._calledfuncs dict. + """ + if why == 'call': + this_func = self.file_module_function_of(frame) + self._calledfuncs[this_func] = 1 + + def globaltrace_lt(self, frame, why, arg): + """Handler for call events. + + If the code block being entered is to be ignored, returns 'None', + else returns self.localtrace. + """ + if why == 'call': + code = frame.f_code + filename = frame.f_globals.get('__file__', None) + if filename: + # XXX _modname() doesn't work right for packages, so + # the ignore support won't work right for packages + modulename = _modname(filename) + if modulename is not None: + ignore_it = self.ignore.names(filename, modulename) + if not ignore_it: + if self.trace: + print((" --- modulename: %s, funcname: %s" + % (modulename, code.co_name))) + return self.localtrace + else: + return None + + def localtrace_trace_and_count(self, frame, why, arg): + if why == "line": + # record the file name and line number of every trace + filename = frame.f_code.co_filename + lineno = frame.f_lineno + key = filename, lineno + self.counts[key] = self.counts.get(key, 0) + 1 + + if self.start_time: + print('%.2f' % (_time() - self.start_time), end=' ') + bname = os.path.basename(filename) + line = linecache.getline(filename, lineno) + print("%s(%d)" % (bname, lineno), end='') + if line: + print(": ", line, end='') + else: + print() + return self.localtrace + + def localtrace_trace(self, frame, why, arg): + if why == "line": + # record the file name and line number of every trace + filename = frame.f_code.co_filename + lineno = frame.f_lineno + + if self.start_time: + print('%.2f' % (_time() - self.start_time), end=' ') + bname = os.path.basename(filename) + line = linecache.getline(filename, lineno) + print("%s(%d)" % (bname, lineno), end='') + if line: + print(": ", line, end='') + else: + print() + return self.localtrace + + def localtrace_count(self, frame, why, arg): + if why == "line": + filename = frame.f_code.co_filename + lineno = frame.f_lineno + key = filename, lineno + self.counts[key] = self.counts.get(key, 0) + 1 + return self.localtrace + + def results(self): + return CoverageResults(self.counts, infile=self.infile, + outfile=self.outfile, + calledfuncs=self._calledfuncs, + callers=self._callers) + +def main(): + import argparse + + parser = argparse.ArgumentParser(color=True) + parser.add_argument('--version', action='version', version='trace 2.0') + + grp = parser.add_argument_group('Main options', + 'One of these (or --report) must be given') + + grp.add_argument('-c', '--count', action='store_true', + help='Count the number of times each line is executed and write ' + 'the counts to .cover for each module executed, in ' + 'the module\'s directory. See also --coverdir, --file, ' + '--no-report below.') + grp.add_argument('-t', '--trace', action='store_true', + help='Print each line to sys.stdout before it is executed') + grp.add_argument('-l', '--listfuncs', action='store_true', + help='Keep track of which functions are executed at least once ' + 'and write the results to sys.stdout after the program exits. ' + 'Cannot be specified alongside --trace or --count.') + grp.add_argument('-T', '--trackcalls', action='store_true', + help='Keep track of caller/called pairs and write the results to ' + 'sys.stdout after the program exits.') + + grp = parser.add_argument_group('Modifiers') + + _grp = grp.add_mutually_exclusive_group() + _grp.add_argument('-r', '--report', action='store_true', + help='Generate a report from a counts file; does not execute any ' + 'code. --file must specify the results file to read, which ' + 'must have been created in a previous run with --count ' + '--file=FILE') + _grp.add_argument('-R', '--no-report', action='store_true', + help='Do not generate the coverage report files. ' + 'Useful if you want to accumulate over several runs.') + + grp.add_argument('-f', '--file', + help='File to accumulate counts over several runs') + grp.add_argument('-C', '--coverdir', + help='Directory where the report files go. The coverage report ' + 'for . will be written to file ' + '//.cover') + grp.add_argument('-m', '--missing', action='store_true', + help='Annotate executable lines that were not executed with ' + '">>>>>> "') + grp.add_argument('-s', '--summary', action='store_true', + help='Write a brief summary for each file to sys.stdout. ' + 'Can only be used with --count or --report') + grp.add_argument('-g', '--timing', action='store_true', + help='Prefix each line with the time since the program started. ' + 'Only used while tracing') + + grp = parser.add_argument_group('Filters', + 'Can be specified multiple times') + grp.add_argument('--ignore-module', action='append', default=[], + help='Ignore the given module(s) and its submodules ' + '(if it is a package). Accepts comma separated list of ' + 'module names.') + grp.add_argument('--ignore-dir', action='append', default=[], + help='Ignore files in the given directory ' + '(multiple directories can be joined by os.pathsep).') + + parser.add_argument('--module', action='store_true', default=False, + help='Trace a module. ') + parser.add_argument('progname', nargs='?', + help='file to run as main program') + parser.add_argument('arguments', nargs=argparse.REMAINDER, + help='arguments to the program') + + opts = parser.parse_args() + + if opts.ignore_dir: + _prefix = sysconfig.get_path("stdlib") + _exec_prefix = sysconfig.get_path("platstdlib") + + def parse_ignore_dir(s): + s = os.path.expanduser(os.path.expandvars(s)) + s = s.replace('$prefix', _prefix).replace('$exec_prefix', _exec_prefix) + return os.path.normpath(s) + + opts.ignore_module = [mod.strip() + for i in opts.ignore_module for mod in i.split(',')] + opts.ignore_dir = [parse_ignore_dir(s) + for i in opts.ignore_dir for s in i.split(os.pathsep)] + + if opts.report: + if not opts.file: + parser.error('-r/--report requires -f/--file') + results = CoverageResults(infile=opts.file, outfile=opts.file) + return results.write_results(opts.missing, opts.summary, opts.coverdir) + + if not any([opts.trace, opts.count, opts.listfuncs, opts.trackcalls]): + parser.error('must specify one of --trace, --count, --report, ' + '--listfuncs, or --trackcalls') + + if opts.listfuncs and (opts.count or opts.trace): + parser.error('cannot specify both --listfuncs and (--trace or --count)') + + if opts.summary and not opts.count: + parser.error('--summary can only be used with --count or --report') + + if opts.progname is None: + parser.error('progname is missing: required with the main options') + + t = Trace(opts.count, opts.trace, countfuncs=opts.listfuncs, + countcallers=opts.trackcalls, ignoremods=opts.ignore_module, + ignoredirs=opts.ignore_dir, infile=opts.file, + outfile=opts.file, timing=opts.timing) + try: + if opts.module: + import runpy + module_name = opts.progname + mod_name, mod_spec, code = runpy._get_module_details(module_name) + sys.argv = [code.co_filename, *opts.arguments] + globs = { + '__name__': '__main__', + '__file__': code.co_filename, + '__package__': mod_spec.parent, + '__loader__': mod_spec.loader, + '__spec__': mod_spec, + '__cached__': None, + } + else: + sys.argv = [opts.progname, *opts.arguments] + sys.path[0] = os.path.dirname(opts.progname) + + with io.open_code(opts.progname) as fp: + code = compile(fp.read(), opts.progname, 'exec') + # try to emulate __main__ namespace as much as possible + globs = { + '__file__': opts.progname, + '__name__': '__main__', + '__package__': None, + '__cached__': None, + } + t.runctx(code, globs, globs) + except OSError as err: + sys.exit("Cannot run file %r because: %s" % (sys.argv[0], err)) + except SystemExit: + pass + + results = t.results() + + if not opts.no_report: + results.write_results(opts.missing, opts.summary, opts.coverdir) + +if __name__=='__main__': + main() diff --git a/Python314_4_x86_Template/Lib/traceback.py b/Python314_4_x86_Template/Lib/traceback.py new file mode 100644 index 00000000..79f67b98 --- /dev/null +++ b/Python314_4_x86_Template/Lib/traceback.py @@ -0,0 +1,1745 @@ +"""Extract, format and print information about Python stack traces.""" + +import collections.abc +import itertools +import linecache +import sys +import textwrap +import warnings +import codeop +import keyword +import tokenize +import io +import _colorize + +from contextlib import suppress + +__all__ = ['extract_stack', 'extract_tb', 'format_exception', + 'format_exception_only', 'format_list', 'format_stack', + 'format_tb', 'print_exc', 'format_exc', 'print_exception', + 'print_last', 'print_stack', 'print_tb', 'clear_frames', + 'FrameSummary', 'StackSummary', 'TracebackException', + 'walk_stack', 'walk_tb', 'print_list'] + +# +# Formatting and printing lists of traceback lines. +# + + +def print_list(extracted_list, file=None): + """Print the list of tuples as returned by extract_tb() or + extract_stack() as a formatted stack trace to the given file.""" + if file is None: + file = sys.stderr + for item in StackSummary.from_list(extracted_list).format(): + print(item, file=file, end="") + +def format_list(extracted_list): + """Format a list of tuples or FrameSummary objects for printing. + + Given a list of tuples or FrameSummary objects as returned by + extract_tb() or extract_stack(), return a list of strings ready + for printing. + + Each string in the resulting list corresponds to the item with the + same index in the argument list. Each string ends in a newline; + the strings may contain internal newlines as well, for those items + whose source text line is not None. + """ + return StackSummary.from_list(extracted_list).format() + +# +# Printing and Extracting Tracebacks. +# + +def print_tb(tb, limit=None, file=None): + """Print up to 'limit' stack trace entries from the traceback 'tb'. + + If 'limit' is omitted or None, all entries are printed. If 'file' + is omitted or None, the output goes to sys.stderr; otherwise + 'file' should be an open file or file-like object with a write() + method. + """ + print_list(extract_tb(tb, limit=limit), file=file) + +def format_tb(tb, limit=None): + """A shorthand for 'format_list(extract_tb(tb, limit))'.""" + return extract_tb(tb, limit=limit).format() + +def extract_tb(tb, limit=None): + """ + Return a StackSummary object representing a list of + pre-processed entries from traceback. + + This is useful for alternate formatting of stack traces. If + 'limit' is omitted or None, all entries are extracted. A + pre-processed stack trace entry is a FrameSummary object + containing attributes filename, lineno, name, and line + representing the information that is usually printed for a stack + trace. The line is a string with leading and trailing + whitespace stripped; if the source is not available it is None. + """ + return StackSummary._extract_from_extended_frame_gen( + _walk_tb_with_full_positions(tb), limit=limit) + +# +# Exception formatting and output. +# + +_cause_message = ( + "\nThe above exception was the direct cause " + "of the following exception:\n\n") + +_context_message = ( + "\nDuring handling of the above exception, " + "another exception occurred:\n\n") + + +class _Sentinel: + def __repr__(self): + return "" + +_sentinel = _Sentinel() + +def _parse_value_tb(exc, value, tb): + if (value is _sentinel) != (tb is _sentinel): + raise ValueError("Both or neither of value and tb must be given") + if value is tb is _sentinel: + if exc is not None: + if isinstance(exc, BaseException): + return exc, exc.__traceback__ + + raise TypeError(f'Exception expected for value, ' + f'{type(exc).__name__} found') + else: + return None, None + return value, tb + + +def print_exception(exc, /, value=_sentinel, tb=_sentinel, limit=None, \ + file=None, chain=True, **kwargs): + """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. + + This differs from print_tb() in the following ways: (1) if + traceback is not None, it prints a header "Traceback (most recent + call last):"; (2) it prints the exception type and value after the + stack trace; (3) if type is SyntaxError and value has the + appropriate format, it prints the line where the syntax error + occurred with a caret on the next line indicating the approximate + position of the error. + """ + colorize = kwargs.get("colorize", False) + value, tb = _parse_value_tb(exc, value, tb) + te = TracebackException(type(value), value, tb, limit=limit, compact=True) + te.print(file=file, chain=chain, colorize=colorize) + + +BUILTIN_EXCEPTION_LIMIT = object() + + +def _print_exception_bltin(exc, /): + file = sys.stderr if sys.stderr is not None else sys.__stderr__ + colorize = _colorize.can_colorize(file=file) + return print_exception(exc, limit=BUILTIN_EXCEPTION_LIMIT, file=file, colorize=colorize) + + +def format_exception(exc, /, value=_sentinel, tb=_sentinel, limit=None, \ + chain=True, **kwargs): + """Format a stack trace and the exception information. + + The arguments have the same meaning as the corresponding arguments + to print_exception(). The return value is a list of strings, each + ending in a newline and some containing internal newlines. When + these lines are concatenated and printed, exactly the same text is + printed as does print_exception(). + """ + colorize = kwargs.get("colorize", False) + value, tb = _parse_value_tb(exc, value, tb) + te = TracebackException(type(value), value, tb, limit=limit, compact=True) + return list(te.format(chain=chain, colorize=colorize)) + + +def format_exception_only(exc, /, value=_sentinel, *, show_group=False, **kwargs): + """Format the exception part of a traceback. + + The return value is a list of strings, each ending in a newline. + + The list contains the exception's message, which is + normally a single string; however, for :exc:`SyntaxError` exceptions, it + contains several lines that (when printed) display detailed information + about where the syntax error occurred. Following the message, the list + contains the exception's ``__notes__``. + + When *show_group* is ``True``, and the exception is an instance of + :exc:`BaseExceptionGroup`, the nested exceptions are included as + well, recursively, with indentation relative to their nesting depth. + """ + colorize = kwargs.get("colorize", False) + if value is _sentinel: + value = exc + te = TracebackException(type(value), value, None, compact=True) + return list(te.format_exception_only(show_group=show_group, colorize=colorize)) + + +# -- not official API but folk probably use these two functions. + +def _format_final_exc_line(etype, value, *, insert_final_newline=True, colorize=False): + valuestr = _safe_string(value, 'exception') + end_char = "\n" if insert_final_newline else "" + if colorize: + theme = _colorize.get_theme(force_color=True).traceback + else: + theme = _colorize.get_theme(force_no_color=True).traceback + if value is None or not valuestr: + line = f"{theme.type}{etype}{theme.reset}{end_char}" + else: + line = f"{theme.type}{etype}{theme.reset}: {theme.message}{valuestr}{theme.reset}{end_char}" + return line + + +def _safe_string(value, what, func=str): + try: + return func(value) + except: + return f'<{what} {func.__name__}() failed>' + +# -- + +def print_exc(limit=None, file=None, chain=True): + """Shorthand for 'print_exception(sys.exception(), limit=limit, file=file, chain=chain)'.""" + print_exception(sys.exception(), limit=limit, file=file, chain=chain) + +def format_exc(limit=None, chain=True): + """Like print_exc() but return a string.""" + return "".join(format_exception(sys.exception(), limit=limit, chain=chain)) + +def print_last(limit=None, file=None, chain=True): + """This is a shorthand for 'print_exception(sys.last_exc, limit=limit, file=file, chain=chain)'.""" + if not hasattr(sys, "last_exc") and not hasattr(sys, "last_type"): + raise ValueError("no last exception") + + if hasattr(sys, "last_exc"): + print_exception(sys.last_exc, limit=limit, file=file, chain=chain) + else: + print_exception(sys.last_type, sys.last_value, sys.last_traceback, + limit=limit, file=file, chain=chain) + + +# +# Printing and Extracting Stacks. +# + +def print_stack(f=None, limit=None, file=None): + """Print a stack trace from its invocation point. + + The optional 'f' argument can be used to specify an alternate + stack frame at which to start. The optional 'limit' and 'file' + arguments have the same meaning as for print_exception(). + """ + if f is None: + f = sys._getframe().f_back + print_list(extract_stack(f, limit=limit), file=file) + + +def format_stack(f=None, limit=None): + """Shorthand for 'format_list(extract_stack(f, limit))'.""" + if f is None: + f = sys._getframe().f_back + return format_list(extract_stack(f, limit=limit)) + + +def extract_stack(f=None, limit=None): + """Extract the raw traceback from the current stack frame. + + The return value has the same format as for extract_tb(). The + optional 'f' and 'limit' arguments have the same meaning as for + print_stack(). Each item in the list is a quadruple (filename, + line number, function name, text), and the entries are in order + from oldest to newest stack frame. + """ + if f is None: + f = sys._getframe().f_back + stack = StackSummary.extract(walk_stack(f), limit=limit) + stack.reverse() + return stack + + +def clear_frames(tb): + "Clear all references to local variables in the frames of a traceback." + while tb is not None: + try: + tb.tb_frame.clear() + except RuntimeError: + # Ignore the exception raised if the frame is still executing. + pass + tb = tb.tb_next + + +class FrameSummary: + """Information about a single frame from a traceback. + + - :attr:`filename` The filename for the frame. + - :attr:`lineno` The line within filename for the frame that was + active when the frame was captured. + - :attr:`name` The name of the function or method that was executing + when the frame was captured. + - :attr:`line` The text from the linecache module for the + of code that was running when the frame was captured. + - :attr:`locals` Either None if locals were not supplied, or a dict + mapping the name to the repr() of the variable. + """ + + __slots__ = ('filename', 'lineno', 'end_lineno', 'colno', 'end_colno', + 'name', '_lines', '_lines_dedented', 'locals', '_code') + + def __init__(self, filename, lineno, name, *, lookup_line=True, + locals=None, line=None, + end_lineno=None, colno=None, end_colno=None, **kwargs): + """Construct a FrameSummary. + + :param lookup_line: If True, `linecache` is consulted for the source + code line. Otherwise, the line will be looked up when first needed. + :param locals: If supplied the frame locals, which will be captured as + object representations. + :param line: If provided, use this instead of looking up the line in + the linecache. + """ + self.filename = filename + self.lineno = lineno + self.end_lineno = lineno if end_lineno is None else end_lineno + self.colno = colno + self.end_colno = end_colno + self.name = name + self._code = kwargs.get("_code") + self._lines = line + self._lines_dedented = None + if lookup_line: + self.line + self.locals = {k: _safe_string(v, 'local', func=repr) + for k, v in locals.items()} if locals else None + + def __eq__(self, other): + if isinstance(other, FrameSummary): + return (self.filename == other.filename and + self.lineno == other.lineno and + self.name == other.name and + self.locals == other.locals) + if isinstance(other, tuple): + return (self.filename, self.lineno, self.name, self.line) == other + return NotImplemented + + def __getitem__(self, pos): + return (self.filename, self.lineno, self.name, self.line)[pos] + + def __iter__(self): + return iter([self.filename, self.lineno, self.name, self.line]) + + def __repr__(self): + return "".format( + filename=self.filename, lineno=self.lineno, name=self.name) + + def __len__(self): + return 4 + + def _set_lines(self): + if ( + self._lines is None + and self.lineno is not None + and self.end_lineno is not None + ): + lines = [] + for lineno in range(self.lineno, self.end_lineno + 1): + # treat errors (empty string) and empty lines (newline) as the same + line = linecache.getline(self.filename, lineno).rstrip() + if not line and self._code is not None and self.filename.startswith("<"): + line = linecache._getline_from_code(self._code, lineno).rstrip() + lines.append(line) + self._lines = "\n".join(lines) + "\n" + + @property + def _original_lines(self): + # Returns the line as-is from the source, without modifying whitespace. + self._set_lines() + return self._lines + + @property + def _dedented_lines(self): + # Returns _original_lines, but dedented + self._set_lines() + if self._lines_dedented is None and self._lines is not None: + self._lines_dedented = textwrap.dedent(self._lines) + return self._lines_dedented + + @property + def line(self): + self._set_lines() + if self._lines is None: + return None + # return only the first line, stripped + return self._lines.partition("\n")[0].strip() + + +def walk_stack(f): + """Walk a stack yielding the frame and line number for each frame. + + This will follow f.f_back from the given frame. If no frame is given, the + current stack is used. Usually used with StackSummary.extract. + """ + if f is None: + f = sys._getframe().f_back + + def walk_stack_generator(frame): + while frame is not None: + yield frame, frame.f_lineno + frame = frame.f_back + + return walk_stack_generator(f) + + +def walk_tb(tb): + """Walk a traceback yielding the frame and line number for each frame. + + This will follow tb.tb_next (and thus is in the opposite order to + walk_stack). Usually used with StackSummary.extract. + """ + while tb is not None: + yield tb.tb_frame, tb.tb_lineno + tb = tb.tb_next + + +def _walk_tb_with_full_positions(tb): + # Internal version of walk_tb that yields full code positions including + # end line and column information. + while tb is not None: + positions = _get_code_position(tb.tb_frame.f_code, tb.tb_lasti) + # Yield tb_lineno when co_positions does not have a line number to + # maintain behavior with walk_tb. + if positions[0] is None: + yield tb.tb_frame, (tb.tb_lineno, ) + positions[1:] + else: + yield tb.tb_frame, positions + tb = tb.tb_next + + +def _get_code_position(code, instruction_index): + if instruction_index < 0: + return (None, None, None, None) + positions_gen = code.co_positions() + return next(itertools.islice(positions_gen, instruction_index // 2, None)) + + +_RECURSIVE_CUTOFF = 3 # Also hardcoded in traceback.c. + + +class StackSummary(list): + """A list of FrameSummary objects, representing a stack of frames.""" + + @classmethod + def extract(klass, frame_gen, *, limit=None, lookup_lines=True, + capture_locals=False): + """Create a StackSummary from a traceback or stack object. + + :param frame_gen: A generator that yields (frame, lineno) tuples + whose summaries are to be included in the stack. + :param limit: None to include all frames or the number of frames to + include. + :param lookup_lines: If True, lookup lines for each frame immediately, + otherwise lookup is deferred until the frame is rendered. + :param capture_locals: If True, the local variables from each frame will + be captured as object representations into the FrameSummary. + """ + def extended_frame_gen(): + for f, lineno in frame_gen: + yield f, (lineno, None, None, None) + + return klass._extract_from_extended_frame_gen( + extended_frame_gen(), limit=limit, lookup_lines=lookup_lines, + capture_locals=capture_locals) + + @classmethod + def _extract_from_extended_frame_gen(klass, frame_gen, *, limit=None, + lookup_lines=True, capture_locals=False): + # Same as extract but operates on a frame generator that yields + # (frame, (lineno, end_lineno, colno, end_colno)) in the stack. + # Only lineno is required, the remaining fields can be None if the + # information is not available. + builtin_limit = limit is BUILTIN_EXCEPTION_LIMIT + if limit is None or builtin_limit: + limit = getattr(sys, 'tracebacklimit', None) + if limit is not None and limit < 0: + limit = 0 + if limit is not None: + if builtin_limit: + frame_gen = tuple(frame_gen) + frame_gen = frame_gen[len(frame_gen) - limit:] + elif limit >= 0: + frame_gen = itertools.islice(frame_gen, limit) + else: + frame_gen = collections.deque(frame_gen, maxlen=-limit) + + result = klass() + fnames = set() + for f, (lineno, end_lineno, colno, end_colno) in frame_gen: + co = f.f_code + filename = co.co_filename + name = co.co_name + fnames.add(filename) + linecache.lazycache(filename, f.f_globals) + # Must defer line lookups until we have called checkcache. + if capture_locals: + f_locals = f.f_locals + else: + f_locals = None + result.append( + FrameSummary(filename, lineno, name, + lookup_line=False, locals=f_locals, + end_lineno=end_lineno, colno=colno, end_colno=end_colno, + _code=f.f_code, + ) + ) + for filename in fnames: + linecache.checkcache(filename) + + # If immediate lookup was desired, trigger lookups now. + if lookup_lines: + for f in result: + f.line + return result + + @classmethod + def from_list(klass, a_list): + """ + Create a StackSummary object from a supplied list of + FrameSummary objects or old-style list of tuples. + """ + # While doing a fast-path check for isinstance(a_list, StackSummary) is + # appealing, idlelib.run.cleanup_traceback and other similar code may + # break this by making arbitrary frames plain tuples, so we need to + # check on a frame by frame basis. + result = StackSummary() + for frame in a_list: + if isinstance(frame, FrameSummary): + result.append(frame) + else: + filename, lineno, name, line = frame + result.append(FrameSummary(filename, lineno, name, line=line)) + return result + + def format_frame_summary(self, frame_summary, **kwargs): + """Format the lines for a single FrameSummary. + + Returns a string representing one frame involved in the stack. This + gets called for every frame to be printed in the stack summary. + """ + colorize = kwargs.get("colorize", False) + row = [] + filename = frame_summary.filename + if frame_summary.filename.startswith("'): + filename = "" + if colorize: + theme = _colorize.get_theme(force_color=True).traceback + else: + theme = _colorize.get_theme(force_no_color=True).traceback + row.append( + ' File {}"{}"{}, line {}{}{}, in {}{}{}\n'.format( + theme.filename, + filename, + theme.reset, + theme.line_no, + frame_summary.lineno, + theme.reset, + theme.frame, + frame_summary.name, + theme.reset, + ) + ) + if frame_summary._dedented_lines and frame_summary._dedented_lines.strip(): + if ( + frame_summary.colno is None or + frame_summary.end_colno is None + ): + # only output first line if column information is missing + row.append(textwrap.indent(frame_summary.line, ' ') + "\n") + else: + # get first and last line + all_lines_original = frame_summary._original_lines.splitlines() + first_line = all_lines_original[0] + # assume all_lines_original has enough lines (since we constructed it) + last_line = all_lines_original[frame_summary.end_lineno - frame_summary.lineno] + + # character index of the start/end of the instruction + start_offset = _byte_offset_to_character_offset(first_line, frame_summary.colno) + end_offset = _byte_offset_to_character_offset(last_line, frame_summary.end_colno) + + all_lines = frame_summary._dedented_lines.splitlines()[ + :frame_summary.end_lineno - frame_summary.lineno + 1 + ] + + # adjust start/end offset based on dedent + dedent_characters = len(first_line) - len(all_lines[0]) + start_offset = max(0, start_offset - dedent_characters) + end_offset = max(0, end_offset - dedent_characters) + + # When showing this on a terminal, some of the non-ASCII characters + # might be rendered as double-width characters, so we need to take + # that into account when calculating the length of the line. + dp_start_offset = _display_width(all_lines[0], offset=start_offset) + dp_end_offset = _display_width(all_lines[-1], offset=end_offset) + + # get exact code segment corresponding to the instruction + segment = "\n".join(all_lines) + segment = segment[start_offset:len(segment) - (len(all_lines[-1]) - end_offset)] + + # attempt to parse for anchors + anchors = None + show_carets = False + with suppress(Exception): + anchors = _extract_caret_anchors_from_line_segment(segment) + show_carets = self._should_show_carets(start_offset, end_offset, all_lines, anchors) + + result = [] + + # only display first line, last line, and lines around anchor start/end + significant_lines = {0, len(all_lines) - 1} + + anchors_left_end_offset = 0 + anchors_right_start_offset = 0 + primary_char = "^" + secondary_char = "^" + if anchors: + anchors_left_end_offset = anchors.left_end_offset + anchors_right_start_offset = anchors.right_start_offset + # computed anchor positions do not take start_offset into account, + # so account for it here + if anchors.left_end_lineno == 0: + anchors_left_end_offset += start_offset + if anchors.right_start_lineno == 0: + anchors_right_start_offset += start_offset + + # account for display width + anchors_left_end_offset = _display_width( + all_lines[anchors.left_end_lineno], offset=anchors_left_end_offset + ) + anchors_right_start_offset = _display_width( + all_lines[anchors.right_start_lineno], offset=anchors_right_start_offset + ) + + primary_char = anchors.primary_char + secondary_char = anchors.secondary_char + significant_lines.update( + range(anchors.left_end_lineno - 1, anchors.left_end_lineno + 2) + ) + significant_lines.update( + range(anchors.right_start_lineno - 1, anchors.right_start_lineno + 2) + ) + + # remove bad line numbers + significant_lines.discard(-1) + significant_lines.discard(len(all_lines)) + + def output_line(lineno): + """output all_lines[lineno] along with carets""" + result.append(all_lines[lineno] + "\n") + if not show_carets: + return + num_spaces = len(all_lines[lineno]) - len(all_lines[lineno].lstrip()) + carets = [] + num_carets = dp_end_offset if lineno == len(all_lines) - 1 else _display_width(all_lines[lineno]) + # compute caret character for each position + for col in range(num_carets): + if col < num_spaces or (lineno == 0 and col < dp_start_offset): + # before first non-ws char of the line, or before start of instruction + carets.append(' ') + elif anchors and ( + lineno > anchors.left_end_lineno or + (lineno == anchors.left_end_lineno and col >= anchors_left_end_offset) + ) and ( + lineno < anchors.right_start_lineno or + (lineno == anchors.right_start_lineno and col < anchors_right_start_offset) + ): + # within anchors + carets.append(secondary_char) + else: + carets.append(primary_char) + if colorize: + # Replace the previous line with a red version of it only in the parts covered + # by the carets. + line = result[-1] + colorized_line_parts = [] + colorized_carets_parts = [] + + for color, group in itertools.groupby(itertools.zip_longest(line, carets, fillvalue=""), key=lambda x: x[1]): + caret_group = list(group) + if color == "^": + colorized_line_parts.append(theme.error_highlight + "".join(char for char, _ in caret_group) + theme.reset) + colorized_carets_parts.append(theme.error_highlight + "".join(caret for _, caret in caret_group) + theme.reset) + elif color == "~": + colorized_line_parts.append(theme.error_range + "".join(char for char, _ in caret_group) + theme.reset) + colorized_carets_parts.append(theme.error_range + "".join(caret for _, caret in caret_group) + theme.reset) + else: + colorized_line_parts.append("".join(char for char, _ in caret_group)) + colorized_carets_parts.append("".join(caret for _, caret in caret_group)) + + colorized_line = "".join(colorized_line_parts) + colorized_carets = "".join(colorized_carets_parts) + result[-1] = colorized_line + result.append(colorized_carets + "\n") + else: + result.append("".join(carets) + "\n") + + # display significant lines + sig_lines_list = sorted(significant_lines) + for i, lineno in enumerate(sig_lines_list): + if i: + linediff = lineno - sig_lines_list[i - 1] + if linediff == 2: + # 1 line in between - just output it + output_line(lineno - 1) + elif linediff > 2: + # > 1 line in between - abbreviate + result.append(f"...<{linediff - 1} lines>...\n") + output_line(lineno) + + row.append( + textwrap.indent(textwrap.dedent("".join(result)), ' ', lambda line: True) + ) + if frame_summary.locals: + for name, value in sorted(frame_summary.locals.items()): + row.append(' {name} = {value}\n'.format(name=name, value=value)) + + return ''.join(row) + + def _should_show_carets(self, start_offset, end_offset, all_lines, anchors): + with suppress(SyntaxError, ImportError): + import ast + tree = ast.parse('\n'.join(all_lines)) + if not tree.body: + return False + statement = tree.body[0] + value = None + def _spawns_full_line(value): + return ( + value.lineno == 1 + and value.end_lineno == len(all_lines) + and value.col_offset == start_offset + and value.end_col_offset == end_offset + ) + match statement: + case ast.Return(value=ast.Call()): + if isinstance(statement.value.func, ast.Name): + value = statement.value + case ast.Assign(value=ast.Call()): + if ( + len(statement.targets) == 1 and + isinstance(statement.targets[0], ast.Name) + ): + value = statement.value + if value is not None and _spawns_full_line(value): + return False + if anchors: + return True + if all_lines[0][:start_offset].lstrip() or all_lines[-1][end_offset:].rstrip(): + return True + return False + + def format(self, **kwargs): + """Format the stack ready for printing. + + Returns a list of strings ready for printing. Each string in the + resulting list corresponds to a single frame from the stack. + Each string ends in a newline; the strings may contain internal + newlines as well, for those items with source text lines. + + For long sequences of the same frame and line, the first few + repetitions are shown, followed by a summary line stating the exact + number of further repetitions. + """ + colorize = kwargs.get("colorize", False) + result = [] + last_file = None + last_line = None + last_name = None + count = 0 + for frame_summary in self: + formatted_frame = self.format_frame_summary(frame_summary, colorize=colorize) + if formatted_frame is None: + continue + if (last_file is None or last_file != frame_summary.filename or + last_line is None or last_line != frame_summary.lineno or + last_name is None or last_name != frame_summary.name): + if count > _RECURSIVE_CUTOFF: + count -= _RECURSIVE_CUTOFF + result.append( + f' [Previous line repeated {count} more ' + f'time{"s" if count > 1 else ""}]\n' + ) + last_file = frame_summary.filename + last_line = frame_summary.lineno + last_name = frame_summary.name + count = 0 + count += 1 + if count > _RECURSIVE_CUTOFF: + continue + result.append(formatted_frame) + + if count > _RECURSIVE_CUTOFF: + count -= _RECURSIVE_CUTOFF + result.append( + f' [Previous line repeated {count} more ' + f'time{"s" if count > 1 else ""}]\n' + ) + return result + + +def _byte_offset_to_character_offset(str, offset): + as_utf8 = str.encode('utf-8') + return len(as_utf8[:offset].decode("utf-8", errors="replace")) + + +_Anchors = collections.namedtuple( + "_Anchors", + [ + "left_end_lineno", + "left_end_offset", + "right_start_lineno", + "right_start_offset", + "primary_char", + "secondary_char", + ], + defaults=["~", "^"] +) + +def _extract_caret_anchors_from_line_segment(segment): + """ + Given source code `segment` corresponding to a FrameSummary, determine: + - for binary ops, the location of the binary op + - for indexing and function calls, the location of the brackets. + `segment` is expected to be a valid Python expression. + """ + import ast + + try: + # Without parentheses, `segment` is parsed as a statement. + # Binary ops, subscripts, and calls are expressions, so + # we can wrap them with parentheses to parse them as + # (possibly multi-line) expressions. + # e.g. if we try to highlight the addition in + # x = ( + # a + + # b + # ) + # then we would ast.parse + # a + + # b + # which is not a valid statement because of the newline. + # Adding brackets makes it a valid expression. + # ( + # a + + # b + # ) + # Line locations will be different than the original, + # which is taken into account later on. + tree = ast.parse(f"(\n{segment}\n)") + except SyntaxError: + return None + + if len(tree.body) != 1: + return None + + lines = segment.splitlines() + + def normalize(lineno, offset): + """Get character index given byte offset""" + return _byte_offset_to_character_offset(lines[lineno], offset) + + def next_valid_char(lineno, col): + """Gets the next valid character index in `lines`, if + the current location is not valid. Handles empty lines. + """ + while lineno < len(lines) and col >= len(lines[lineno]): + col = 0 + lineno += 1 + assert lineno < len(lines) and col < len(lines[lineno]) + return lineno, col + + def increment(lineno, col): + """Get the next valid character index in `lines`.""" + col += 1 + lineno, col = next_valid_char(lineno, col) + return lineno, col + + def nextline(lineno, col): + """Get the next valid character at least on the next line""" + col = 0 + lineno += 1 + lineno, col = next_valid_char(lineno, col) + return lineno, col + + def increment_until(lineno, col, stop): + """Get the next valid non-"\\#" character that satisfies the `stop` predicate""" + while True: + ch = lines[lineno][col] + if ch in "\\#": + lineno, col = nextline(lineno, col) + elif not stop(ch): + lineno, col = increment(lineno, col) + else: + break + return lineno, col + + def setup_positions(expr, force_valid=True): + """Get the lineno/col position of the end of `expr`. If `force_valid` is True, + forces the position to be a valid character (e.g. if the position is beyond the + end of the line, move to the next line) + """ + # -2 since end_lineno is 1-indexed and because we added an extra + # bracket + newline to `segment` when calling ast.parse + lineno = expr.end_lineno - 2 + col = normalize(lineno, expr.end_col_offset) + return next_valid_char(lineno, col) if force_valid else (lineno, col) + + statement = tree.body[0] + match statement: + case ast.Expr(expr): + match expr: + case ast.BinOp(): + # ast gives these locations for BinOp subexpressions + # ( left_expr ) + ( right_expr ) + # left^^^^^ right^^^^^ + lineno, col = setup_positions(expr.left) + + # First operator character is the first non-space/')' character + lineno, col = increment_until(lineno, col, lambda x: not x.isspace() and x != ')') + + # binary op is 1 or 2 characters long, on the same line, + # before the right subexpression + right_col = col + 1 + if ( + right_col < len(lines[lineno]) + and ( + # operator char should not be in the right subexpression + expr.right.lineno - 2 > lineno or + right_col < normalize(expr.right.lineno - 2, expr.right.col_offset) + ) + and not (ch := lines[lineno][right_col]).isspace() + and ch not in "\\#" + ): + right_col += 1 + + # right_col can be invalid since it is exclusive + return _Anchors(lineno, col, lineno, right_col) + case ast.Subscript(): + # ast gives these locations for value and slice subexpressions + # ( value_expr ) [ slice_expr ] + # value^^^^^ slice^^^^^ + # subscript^^^^^^^^^^^^^^^^^^^^ + + # find left bracket + left_lineno, left_col = setup_positions(expr.value) + left_lineno, left_col = increment_until(left_lineno, left_col, lambda x: x == '[') + # find right bracket (final character of expression) + right_lineno, right_col = setup_positions(expr, force_valid=False) + return _Anchors(left_lineno, left_col, right_lineno, right_col) + case ast.Call(): + # ast gives these locations for function call expressions + # ( func_expr ) (args, kwargs) + # func^^^^^ + # call^^^^^^^^^^^^^^^^^^^^^^^^ + + # find left bracket + left_lineno, left_col = setup_positions(expr.func) + left_lineno, left_col = increment_until(left_lineno, left_col, lambda x: x == '(') + # find right bracket (final character of expression) + right_lineno, right_col = setup_positions(expr, force_valid=False) + return _Anchors(left_lineno, left_col, right_lineno, right_col) + + return None + +_WIDE_CHAR_SPECIFIERS = "WF" + +def _display_width(line, offset=None): + """Calculate the extra amount of width space the given source + code segment might take if it were to be displayed on a fixed + width output device. Supports wide unicode characters and emojis.""" + + if offset is None: + offset = len(line) + + # Fast track for ASCII-only strings + if line.isascii(): + return offset + + import unicodedata + + return sum( + 2 if unicodedata.east_asian_width(char) in _WIDE_CHAR_SPECIFIERS else 1 + for char in line[:offset] + ) + + + +class _ExceptionPrintContext: + def __init__(self): + self.seen = set() + self.exception_group_depth = 0 + self.need_close = False + + def indent(self): + return ' ' * (2 * self.exception_group_depth) + + def emit(self, text_gen, margin_char=None): + if margin_char is None: + margin_char = '|' + indent_str = self.indent() + if self.exception_group_depth: + indent_str += margin_char + ' ' + + if isinstance(text_gen, str): + yield textwrap.indent(text_gen, indent_str, lambda line: True) + else: + for text in text_gen: + yield textwrap.indent(text, indent_str, lambda line: True) + + +class TracebackException: + """An exception ready for rendering. + + The traceback module captures enough attributes from the original exception + to this intermediary form to ensure that no references are held, while + still being able to fully print or format it. + + max_group_width and max_group_depth control the formatting of exception + groups. The depth refers to the nesting level of the group, and the width + refers to the size of a single exception group's exceptions array. The + formatted output is truncated when either limit is exceeded. + + Use `from_exception` to create TracebackException instances from exception + objects, or the constructor to create TracebackException instances from + individual components. + + - :attr:`__cause__` A TracebackException of the original *__cause__*. + - :attr:`__context__` A TracebackException of the original *__context__*. + - :attr:`exceptions` For exception groups - a list of TracebackException + instances for the nested *exceptions*. ``None`` for other exceptions. + - :attr:`__suppress_context__` The *__suppress_context__* value from the + original exception. + - :attr:`stack` A `StackSummary` representing the traceback. + - :attr:`exc_type` (deprecated) The class of the original traceback. + - :attr:`exc_type_str` String display of exc_type + - :attr:`filename` For syntax errors - the filename where the error + occurred. + - :attr:`lineno` For syntax errors - the linenumber where the error + occurred. + - :attr:`end_lineno` For syntax errors - the end linenumber where the error + occurred. Can be `None` if not present. + - :attr:`text` For syntax errors - the text where the error + occurred. + - :attr:`offset` For syntax errors - the offset into the text where the + error occurred. + - :attr:`end_offset` For syntax errors - the end offset into the text where + the error occurred. Can be `None` if not present. + - :attr:`msg` For syntax errors - the compiler error message. + """ + + def __init__(self, exc_type, exc_value, exc_traceback, *, limit=None, + lookup_lines=True, capture_locals=False, compact=False, + max_group_width=15, max_group_depth=10, save_exc_type=True, _seen=None): + # NB: we need to accept exc_traceback, exc_value, exc_traceback to + # permit backwards compat with the existing API, otherwise we + # need stub thunk objects just to glue it together. + # Handle loops in __cause__ or __context__. + is_recursive_call = _seen is not None + if _seen is None: + _seen = set() + _seen.add(id(exc_value)) + + self.max_group_width = max_group_width + self.max_group_depth = max_group_depth + + self.stack = StackSummary._extract_from_extended_frame_gen( + _walk_tb_with_full_positions(exc_traceback), + limit=limit, lookup_lines=lookup_lines, + capture_locals=capture_locals) + + self._exc_type = exc_type if save_exc_type else None + + # Capture now to permit freeing resources: only complication is in the + # unofficial API _format_final_exc_line + self._str = _safe_string(exc_value, 'exception') + try: + self.__notes__ = getattr(exc_value, '__notes__', None) + except Exception as e: + self.__notes__ = [ + f'Ignored error getting __notes__: {_safe_string(e, '__notes__', repr)}'] + + self._is_syntax_error = False + self._have_exc_type = exc_type is not None + if exc_type is not None: + self.exc_type_qualname = exc_type.__qualname__ + self.exc_type_module = exc_type.__module__ + else: + self.exc_type_qualname = None + self.exc_type_module = None + + if exc_type and issubclass(exc_type, SyntaxError): + # Handle SyntaxError's specially + self.filename = exc_value.filename + lno = exc_value.lineno + self.lineno = str(lno) if lno is not None else None + end_lno = exc_value.end_lineno + self.end_lineno = str(end_lno) if end_lno is not None else None + self.text = exc_value.text + self.offset = exc_value.offset + self.end_offset = exc_value.end_offset + self.msg = exc_value.msg + self._is_syntax_error = True + self._exc_metadata = getattr(exc_value, "_metadata", None) + elif exc_type and issubclass(exc_type, ImportError) and \ + getattr(exc_value, "name_from", None) is not None: + wrong_name = getattr(exc_value, "name_from", None) + suggestion = _compute_suggestion_error(exc_value, exc_traceback, wrong_name) + if suggestion: + self._str += f". Did you mean: '{suggestion}'?" + elif exc_type and issubclass(exc_type, (NameError, AttributeError)) and \ + getattr(exc_value, "name", None) is not None: + wrong_name = getattr(exc_value, "name", None) + suggestion = _compute_suggestion_error(exc_value, exc_traceback, wrong_name) + if suggestion: + self._str += f". Did you mean: '{suggestion}'?" + if issubclass(exc_type, NameError): + wrong_name = getattr(exc_value, "name", None) + if wrong_name is not None and wrong_name in sys.stdlib_module_names: + if suggestion: + self._str += f" Or did you forget to import '{wrong_name}'?" + else: + self._str += f". Did you forget to import '{wrong_name}'?" + if lookup_lines: + self._load_lines() + self.__suppress_context__ = \ + exc_value.__suppress_context__ if exc_value is not None else False + + # Convert __cause__ and __context__ to `TracebackExceptions`s, use a + # queue to avoid recursion (only the top-level call gets _seen == None) + if not is_recursive_call: + queue = [(self, exc_value)] + while queue: + te, e = queue.pop() + if (e is not None and e.__cause__ is not None + and id(e.__cause__) not in _seen): + cause = TracebackException( + type(e.__cause__), + e.__cause__, + e.__cause__.__traceback__, + limit=limit, + lookup_lines=lookup_lines, + capture_locals=capture_locals, + max_group_width=max_group_width, + max_group_depth=max_group_depth, + _seen=_seen) + else: + cause = None + + if compact: + need_context = (cause is None and + e is not None and + not e.__suppress_context__) + else: + need_context = True + if (e is not None and e.__context__ is not None + and need_context and id(e.__context__) not in _seen): + context = TracebackException( + type(e.__context__), + e.__context__, + e.__context__.__traceback__, + limit=limit, + lookup_lines=lookup_lines, + capture_locals=capture_locals, + max_group_width=max_group_width, + max_group_depth=max_group_depth, + _seen=_seen) + else: + context = None + + if e is not None and isinstance(e, BaseExceptionGroup): + exceptions = [] + for exc in e.exceptions: + texc = TracebackException( + type(exc), + exc, + exc.__traceback__, + limit=limit, + lookup_lines=lookup_lines, + capture_locals=capture_locals, + max_group_width=max_group_width, + max_group_depth=max_group_depth, + _seen=_seen) + exceptions.append(texc) + else: + exceptions = None + + te.__cause__ = cause + te.__context__ = context + te.exceptions = exceptions + if cause: + queue.append((te.__cause__, e.__cause__)) + if context: + queue.append((te.__context__, e.__context__)) + if exceptions: + queue.extend(zip(te.exceptions, e.exceptions)) + + @classmethod + def from_exception(cls, exc, *args, **kwargs): + """Create a TracebackException from an exception.""" + return cls(type(exc), exc, exc.__traceback__, *args, **kwargs) + + @property + def exc_type(self): + warnings.warn('Deprecated in 3.13. Use exc_type_str instead.', + DeprecationWarning, stacklevel=2) + return self._exc_type + + @property + def exc_type_str(self): + if not self._have_exc_type: + return None + stype = self.exc_type_qualname + smod = self.exc_type_module + if smod not in ("__main__", "builtins"): + if not isinstance(smod, str): + smod = "" + stype = smod + '.' + stype + return stype + + def _load_lines(self): + """Private API. force all lines in the stack to be loaded.""" + for frame in self.stack: + frame.line + + def __eq__(self, other): + if isinstance(other, TracebackException): + return self.__dict__ == other.__dict__ + return NotImplemented + + def __str__(self): + return self._str + + def format_exception_only(self, *, show_group=False, _depth=0, **kwargs): + """Format the exception part of the traceback. + + The return value is a generator of strings, each ending in a newline. + + Generator yields the exception message. + For :exc:`SyntaxError` exceptions, it + also yields (before the exception message) + several lines that (when printed) + display detailed information about where the syntax error occurred. + Following the message, generator also yields + all the exception's ``__notes__``. + + When *show_group* is ``True``, and the exception is an instance of + :exc:`BaseExceptionGroup`, the nested exceptions are included as + well, recursively, with indentation relative to their nesting depth. + """ + colorize = kwargs.get("colorize", False) + + indent = 3 * _depth * ' ' + if not self._have_exc_type: + yield indent + _format_final_exc_line(None, self._str, colorize=colorize) + return + + stype = self.exc_type_str + if not self._is_syntax_error: + if _depth > 0: + # Nested exceptions needs correct handling of multiline messages. + formatted = _format_final_exc_line( + stype, self._str, insert_final_newline=False, colorize=colorize + ).split('\n') + yield from [ + indent + l + '\n' + for l in formatted + ] + else: + yield _format_final_exc_line(stype, self._str, colorize=colorize) + else: + yield from [indent + l for l in self._format_syntax_error(stype, colorize=colorize)] + + if ( + isinstance(self.__notes__, collections.abc.Sequence) + and not isinstance(self.__notes__, (str, bytes)) + ): + for note in self.__notes__: + note = _safe_string(note, 'note') + yield from [indent + l + '\n' for l in note.split('\n')] + elif self.__notes__ is not None: + yield indent + "{}\n".format(_safe_string(self.__notes__, '__notes__', func=repr)) + + if self.exceptions and show_group: + for ex in self.exceptions: + yield from ex.format_exception_only(show_group=show_group, _depth=_depth+1, colorize=colorize) + + def _find_keyword_typos(self): + assert self._is_syntax_error + try: + import _suggestions + except ImportError: + _suggestions = None + + # Only try to find keyword typos if there is no custom message + if self.msg != "invalid syntax" and "Perhaps you forgot a comma" not in self.msg: + return + + if not self._exc_metadata: + return + + line, offset, source = self._exc_metadata + end_line = int(self.lineno) if self.lineno is not None else 0 + lines = None + from_filename = False + + if source is None: + if self.filename: + try: + with open(self.filename) as f: + lines = f.read().splitlines() + except Exception: + line, end_line, offset = 0,1,0 + else: + from_filename = True + lines = lines if lines is not None else self.text.splitlines() + else: + lines = source.splitlines() + + error_code = lines[line -1 if line > 0 else 0:end_line] + error_code = textwrap.dedent('\n'.join(error_code)) + + # Do not continue if the source is too large + if len(error_code) > 1024: + return + + error_lines = error_code.splitlines() + tokens = tokenize.generate_tokens(io.StringIO(error_code).readline) + tokens_left_to_process = 10 + import difflib + for token in tokens: + start, end = token.start, token.end + if token.type != tokenize.NAME: + continue + # Only consider NAME tokens on the same line as the error + the_end = end_line if line == 0 else end_line + 1 + if from_filename and token.start[0]+line != the_end: + continue + wrong_name = token.string + if wrong_name in keyword.kwlist: + continue + + # Limit the number of valid tokens to consider to not spend + # to much time in this function + tokens_left_to_process -= 1 + if tokens_left_to_process < 0: + break + # Limit the number of possible matches to try + max_matches = 3 + matches = [] + if _suggestions is not None: + suggestion = _suggestions._generate_suggestions(keyword.kwlist, wrong_name) + if suggestion: + matches.append(suggestion) + matches.extend(difflib.get_close_matches(wrong_name, keyword.kwlist, n=max_matches, cutoff=0.5)) + matches = matches[:max_matches] + for suggestion in matches: + if not suggestion or suggestion == wrong_name: + continue + # Try to replace the token with the keyword + the_lines = error_lines.copy() + the_line = the_lines[start[0] - 1][:] + chars = list(the_line) + chars[token.start[1]:token.end[1]] = suggestion + the_lines[start[0] - 1] = ''.join(chars) + code = '\n'.join(the_lines) + + # Check if it works + try: + codeop.compile_command(code, symbol="exec", flags=codeop.PyCF_ONLY_AST) + except SyntaxError: + continue + + # Keep token.line but handle offsets correctly + self.text = token.line + self.offset = token.start[1] + 1 + self.end_offset = token.end[1] + 1 + self.lineno = start[0] + self.end_lineno = end[0] + self.msg = f"invalid syntax. Did you mean '{suggestion}'?" + return + + + def _format_syntax_error(self, stype, **kwargs): + """Format SyntaxError exceptions (internal helper).""" + # Show exactly where the problem was found. + colorize = kwargs.get("colorize", False) + if colorize: + theme = _colorize.get_theme(force_color=True).traceback + else: + theme = _colorize.get_theme(force_no_color=True).traceback + filename_suffix = '' + if self.lineno is not None: + yield ' File {}"{}"{}, line {}{}{}\n'.format( + theme.filename, + self.filename or "", + theme.reset, + theme.line_no, + self.lineno, + theme.reset, + ) + elif self.filename is not None: + filename_suffix = ' ({})'.format(self.filename) + + text = self.text + if isinstance(text, str): + # text = " foo\n" + # rtext = " foo" + # ltext = "foo" + with suppress(Exception): + self._find_keyword_typos() + text = self.text + rtext = text.rstrip('\n') + ltext = rtext.lstrip(' \n\f') + spaces = len(rtext) - len(ltext) + if self.offset is None: + yield ' {}\n'.format(ltext) + elif isinstance(self.offset, int): + offset = self.offset + if self.lineno == self.end_lineno: + end_offset = ( + self.end_offset + if ( + isinstance(self.end_offset, int) + and self.end_offset != 0 + ) + else offset + ) + else: + end_offset = len(rtext) + 1 + + if self.text and offset > len(self.text): + offset = len(rtext) + 1 + if self.text and end_offset > len(self.text): + end_offset = len(rtext) + 1 + if offset >= end_offset or end_offset < 0: + end_offset = offset + 1 + + # Convert 1-based column offset to 0-based index into stripped text + colno = offset - 1 - spaces + end_colno = end_offset - 1 - spaces + caretspace = ' ' + if colno >= 0: + # non-space whitespace (likes tabs) must be kept for alignment + caretspace = ((c if c.isspace() else ' ') for c in ltext[:colno]) + start_color = end_color = "" + if colorize: + # colorize from colno to end_colno + ltext = ( + ltext[:colno] + + theme.error_highlight + ltext[colno:end_colno] + theme.reset + + ltext[end_colno:] + ) + start_color = theme.error_highlight + end_color = theme.reset + yield ' {}\n'.format(ltext) + yield ' {}{}{}{}\n'.format( + "".join(caretspace), + start_color, + ('^' * (end_colno - colno)), + end_color, + ) + else: + yield ' {}\n'.format(ltext) + msg = self.msg or "" + yield "{}{}{}: {}{}{}{}\n".format( + theme.type, + stype, + theme.reset, + theme.message, + msg, + theme.reset, + filename_suffix, + ) + + def format(self, *, chain=True, _ctx=None, **kwargs): + """Format the exception. + + If chain is not *True*, *__cause__* and *__context__* will not be formatted. + + The return value is a generator of strings, each ending in a newline and + some containing internal newlines. `print_exception` is a wrapper around + this method which just prints the lines to a file. + + The message indicating which exception occurred is always the last + string in the output. + """ + colorize = kwargs.get("colorize", False) + if _ctx is None: + _ctx = _ExceptionPrintContext() + + output = [] + exc = self + if chain: + while exc: + if exc.__cause__ is not None: + chained_msg = _cause_message + chained_exc = exc.__cause__ + elif (exc.__context__ is not None and + not exc.__suppress_context__): + chained_msg = _context_message + chained_exc = exc.__context__ + else: + chained_msg = None + chained_exc = None + + output.append((chained_msg, exc)) + exc = chained_exc + else: + output.append((None, exc)) + + for msg, exc in reversed(output): + if msg is not None: + yield from _ctx.emit(msg) + if exc.exceptions is None: + if exc.stack: + yield from _ctx.emit('Traceback (most recent call last):\n') + yield from _ctx.emit(exc.stack.format(colorize=colorize)) + yield from _ctx.emit(exc.format_exception_only(colorize=colorize)) + elif _ctx.exception_group_depth > self.max_group_depth: + # exception group, but depth exceeds limit + yield from _ctx.emit( + f"... (max_group_depth is {self.max_group_depth})\n") + else: + # format exception group + is_toplevel = (_ctx.exception_group_depth == 0) + if is_toplevel: + _ctx.exception_group_depth += 1 + + if exc.stack: + yield from _ctx.emit( + 'Exception Group Traceback (most recent call last):\n', + margin_char = '+' if is_toplevel else None) + yield from _ctx.emit(exc.stack.format(colorize=colorize)) + + yield from _ctx.emit(exc.format_exception_only(colorize=colorize)) + num_excs = len(exc.exceptions) + if num_excs <= self.max_group_width: + n = num_excs + else: + n = self.max_group_width + 1 + _ctx.need_close = False + for i in range(n): + last_exc = (i == n-1) + if last_exc: + # The closing frame may be added by a recursive call + _ctx.need_close = True + + if self.max_group_width is not None: + truncated = (i >= self.max_group_width) + else: + truncated = False + title = f'{i+1}' if not truncated else '...' + yield (_ctx.indent() + + ('+-' if i==0 else ' ') + + f'+---------------- {title} ----------------\n') + _ctx.exception_group_depth += 1 + if not truncated: + yield from exc.exceptions[i].format(chain=chain, _ctx=_ctx, colorize=colorize) + else: + remaining = num_excs - self.max_group_width + plural = 's' if remaining > 1 else '' + yield from _ctx.emit( + f"and {remaining} more exception{plural}\n") + + if last_exc and _ctx.need_close: + yield (_ctx.indent() + + "+------------------------------------\n") + _ctx.need_close = False + _ctx.exception_group_depth -= 1 + + if is_toplevel: + assert _ctx.exception_group_depth == 1 + _ctx.exception_group_depth = 0 + + + def print(self, *, file=None, chain=True, **kwargs): + """Print the result of self.format(chain=chain) to 'file'.""" + colorize = kwargs.get("colorize", False) + if file is None: + file = sys.stderr + for line in self.format(chain=chain, colorize=colorize): + print(line, file=file, end="") + + +_MAX_CANDIDATE_ITEMS = 750 +_MAX_STRING_SIZE = 40 +_MOVE_COST = 2 +_CASE_COST = 1 + + +def _substitution_cost(ch_a, ch_b): + if ch_a == ch_b: + return 0 + if ch_a.lower() == ch_b.lower(): + return _CASE_COST + return _MOVE_COST + + +def _get_safe___dir__(obj): + # Use obj.__dir__() to avoid a TypeError when calling dir(obj). + # See gh-131001 and gh-139933. + try: + d = obj.__dir__() + except TypeError: # when obj is a class + d = type(obj).__dir__(obj) + return sorted(x for x in d if isinstance(x, str)) + + +def _compute_suggestion_error(exc_value, tb, wrong_name): + if wrong_name is None or not isinstance(wrong_name, str): + return None + if isinstance(exc_value, AttributeError): + obj = exc_value.obj + try: + d = _get_safe___dir__(obj) + hide_underscored = (wrong_name[:1] != '_') + if hide_underscored and tb is not None: + while tb.tb_next is not None: + tb = tb.tb_next + frame = tb.tb_frame + if 'self' in frame.f_locals and frame.f_locals['self'] is obj: + hide_underscored = False + if hide_underscored: + d = [x for x in d if x[:1] != '_'] + except Exception: + return None + elif isinstance(exc_value, ImportError): + try: + mod = __import__(exc_value.name) + d = _get_safe___dir__(mod) + if wrong_name[:1] != '_': + d = [x for x in d if x[:1] != '_'] + except Exception: + return None + else: + assert isinstance(exc_value, NameError) + # find most recent frame + if tb is None: + return None + while tb.tb_next is not None: + tb = tb.tb_next + frame = tb.tb_frame + d = ( + list(frame.f_locals) + + list(frame.f_globals) + + list(frame.f_builtins) + ) + d = [x for x in d if isinstance(x, str)] + + # Check first if we are in a method and the instance + # has the wrong name as attribute + if 'self' in frame.f_locals: + self = frame.f_locals['self'] + try: + has_wrong_name = hasattr(self, wrong_name) + except Exception: + has_wrong_name = False + if has_wrong_name: + return f"self.{wrong_name}" + + try: + import _suggestions + except ImportError: + pass + else: + return _suggestions._generate_suggestions(d, wrong_name) + + # Compute closest match + + if len(d) > _MAX_CANDIDATE_ITEMS: + return None + wrong_name_len = len(wrong_name) + if wrong_name_len > _MAX_STRING_SIZE: + return None + best_distance = wrong_name_len + suggestion = None + for possible_name in d: + if possible_name == wrong_name: + # A missing attribute is "found". Don't suggest it (see GH-88821). + continue + # No more than 1/3 of the involved characters should need changed. + max_distance = (len(possible_name) + wrong_name_len + 3) * _MOVE_COST // 6 + # Don't take matches we've already beaten. + max_distance = min(max_distance, best_distance - 1) + current_distance = _levenshtein_distance(wrong_name, possible_name, max_distance) + if current_distance > max_distance: + continue + if not suggestion or current_distance < best_distance: + suggestion = possible_name + best_distance = current_distance + return suggestion + + +def _levenshtein_distance(a, b, max_cost): + # A Python implementation of Python/suggestions.c:levenshtein_distance. + + # Both strings are the same + if a == b: + return 0 + + # Trim away common affixes + pre = 0 + while a[pre:] and b[pre:] and a[pre] == b[pre]: + pre += 1 + a = a[pre:] + b = b[pre:] + post = 0 + while a[:post or None] and b[:post or None] and a[post-1] == b[post-1]: + post -= 1 + a = a[:post or None] + b = b[:post or None] + if not a or not b: + return _MOVE_COST * (len(a) + len(b)) + if len(a) > _MAX_STRING_SIZE or len(b) > _MAX_STRING_SIZE: + return max_cost + 1 + + # Prefer shorter buffer + if len(b) < len(a): + a, b = b, a + + # Quick fail when a match is impossible + if (len(b) - len(a)) * _MOVE_COST > max_cost: + return max_cost + 1 + + # Instead of producing the whole traditional len(a)-by-len(b) + # matrix, we can update just one row in place. + # Initialize the buffer row + row = list(range(_MOVE_COST, _MOVE_COST * (len(a) + 1), _MOVE_COST)) + + result = 0 + for bindex in range(len(b)): + bchar = b[bindex] + distance = result = bindex * _MOVE_COST + minimum = sys.maxsize + for index in range(len(a)): + # 1) Previous distance in this row is cost(b[:b_index], a[:index]) + substitute = distance + _substitution_cost(bchar, a[index]) + # 2) cost(b[:b_index], a[:index+1]) from previous row + distance = row[index] + # 3) existing result is cost(b[:b_index+1], a[index]) + + insert_delete = min(result, distance) + _MOVE_COST + result = min(insert_delete, substitute) + + # cost(b[:b_index+1], a[:index+1]) + row[index] = result + if result < minimum: + minimum = result + if minimum > max_cost: + # Everything in this row is too big, so bail early. + return max_cost + 1 + return result diff --git a/Python313_13_x86_Template/Lib/tracemalloc.py b/Python314_4_x86_Template/Lib/tracemalloc.py similarity index 100% rename from Python313_13_x86_Template/Lib/tracemalloc.py rename to Python314_4_x86_Template/Lib/tracemalloc.py diff --git a/Python313_13_x86_Template/Lib/tty.py b/Python314_4_x86_Template/Lib/tty.py similarity index 100% rename from Python313_13_x86_Template/Lib/tty.py rename to Python314_4_x86_Template/Lib/tty.py diff --git a/Python314_4_x86_Template/Lib/types.py b/Python314_4_x86_Template/Lib/types.py new file mode 100644 index 00000000..fa6324fb --- /dev/null +++ b/Python314_4_x86_Template/Lib/types.py @@ -0,0 +1,344 @@ +""" +Define names for built-in types that aren't directly accessible as a builtin. +""" + +# Iterators in Python aren't a matter of type but of protocol. A large +# and changing number of builtin types implement *some* flavor of +# iterator. Don't check the type! Use hasattr to check for both +# "__iter__" and "__next__" attributes instead. + +try: + from _types import * +except ImportError: + import sys + + def _f(): pass + FunctionType = type(_f) + LambdaType = type(lambda: None) # Same as FunctionType + CodeType = type(_f.__code__) + MappingProxyType = type(type.__dict__) + SimpleNamespace = type(sys.implementation) + + def _cell_factory(): + a = 1 + def f(): + nonlocal a + return f.__closure__[0] + CellType = type(_cell_factory()) + + def _g(): + yield 1 + GeneratorType = type(_g()) + + async def _c(): pass + _c = _c() + CoroutineType = type(_c) + _c.close() # Prevent ResourceWarning + + async def _ag(): + yield + _ag = _ag() + AsyncGeneratorType = type(_ag) + + class _C: + def _m(self): pass + MethodType = type(_C()._m) + + BuiltinFunctionType = type(len) + BuiltinMethodType = type([].append) # Same as BuiltinFunctionType + + WrapperDescriptorType = type(object.__init__) + MethodWrapperType = type(object().__str__) + MethodDescriptorType = type(str.join) + ClassMethodDescriptorType = type(dict.__dict__['fromkeys']) + + ModuleType = type(sys) + + try: + raise TypeError + except TypeError as exc: + TracebackType = type(exc.__traceback__) + FrameType = type(exc.__traceback__.tb_frame) + + GetSetDescriptorType = type(FunctionType.__code__) + MemberDescriptorType = type(FunctionType.__globals__) + + GenericAlias = type(list[int]) + UnionType = type(int | str) + + EllipsisType = type(Ellipsis) + NoneType = type(None) + NotImplementedType = type(NotImplemented) + + # CapsuleType cannot be accessed from pure Python, + # so there is no fallback definition. + + del sys, _f, _g, _C, _c, _ag, _cell_factory # Not for export + + +# Provide a PEP 3115 compliant mechanism for class creation +def new_class(name, bases=(), kwds=None, exec_body=None): + """Create a class object dynamically using the appropriate metaclass.""" + resolved_bases = resolve_bases(bases) + meta, ns, kwds = prepare_class(name, resolved_bases, kwds) + if exec_body is not None: + exec_body(ns) + if resolved_bases is not bases: + ns['__orig_bases__'] = bases + return meta(name, resolved_bases, ns, **kwds) + +def resolve_bases(bases): + """Resolve MRO entries dynamically as specified by PEP 560.""" + new_bases = list(bases) + updated = False + shift = 0 + for i, base in enumerate(bases): + if isinstance(base, type): + continue + if not hasattr(base, "__mro_entries__"): + continue + new_base = base.__mro_entries__(bases) + updated = True + if not isinstance(new_base, tuple): + raise TypeError("__mro_entries__ must return a tuple") + else: + new_bases[i+shift:i+shift+1] = new_base + shift += len(new_base) - 1 + if not updated: + return bases + return tuple(new_bases) + +def prepare_class(name, bases=(), kwds=None): + """Call the __prepare__ method of the appropriate metaclass. + + Returns (metaclass, namespace, kwds) as a 3-tuple + + *metaclass* is the appropriate metaclass + *namespace* is the prepared class namespace + *kwds* is an updated copy of the passed in kwds argument with any + 'metaclass' entry removed. If no kwds argument is passed in, this will + be an empty dict. + """ + if kwds is None: + kwds = {} + else: + kwds = dict(kwds) # Don't alter the provided mapping + if 'metaclass' in kwds: + meta = kwds.pop('metaclass') + else: + if bases: + meta = type(bases[0]) + else: + meta = type + if isinstance(meta, type): + # when meta is a type, we first determine the most-derived metaclass + # instead of invoking the initial candidate directly + meta = _calculate_meta(meta, bases) + if hasattr(meta, '__prepare__'): + ns = meta.__prepare__(name, bases, **kwds) + else: + ns = {} + return meta, ns, kwds + +def _calculate_meta(meta, bases): + """Calculate the most derived metaclass.""" + winner = meta + for base in bases: + base_meta = type(base) + if issubclass(winner, base_meta): + continue + if issubclass(base_meta, winner): + winner = base_meta + continue + # else: + raise TypeError("metaclass conflict: " + "the metaclass of a derived class " + "must be a (non-strict) subclass " + "of the metaclasses of all its bases") + return winner + + +def get_original_bases(cls, /): + """Return the class's "original" bases prior to modification by `__mro_entries__`. + + Examples:: + + from typing import TypeVar, Generic, NamedTuple, TypedDict + + T = TypeVar("T") + class Foo(Generic[T]): ... + class Bar(Foo[int], float): ... + class Baz(list[str]): ... + Eggs = NamedTuple("Eggs", [("a", int), ("b", str)]) + Spam = TypedDict("Spam", {"a": int, "b": str}) + + assert get_original_bases(Bar) == (Foo[int], float) + assert get_original_bases(Baz) == (list[str],) + assert get_original_bases(Eggs) == (NamedTuple,) + assert get_original_bases(Spam) == (TypedDict,) + assert get_original_bases(int) == (object,) + """ + try: + return cls.__dict__.get("__orig_bases__", cls.__bases__) + except AttributeError: + raise TypeError( + f"Expected an instance of type, not {type(cls).__name__!r}" + ) from None + + +class DynamicClassAttribute: + """Route attribute access on a class to __getattr__. + + This is a descriptor, used to define attributes that act differently when + accessed through an instance and through a class. Instance access remains + normal, but access to an attribute through a class will be routed to the + class's __getattr__ method; this is done by raising AttributeError. + + This allows one to have properties active on an instance, and have virtual + attributes on the class with the same name. (Enum used this between Python + versions 3.4 - 3.9 .) + + Subclass from this to use a different method of accessing virtual attributes + and still be treated properly by the inspect module. (Enum uses this since + Python 3.10 .) + + """ + def __init__(self, fget=None, fset=None, fdel=None, doc=None): + self.fget = fget + self.fset = fset + self.fdel = fdel + # next two lines make DynamicClassAttribute act the same as property + self.__doc__ = doc or fget.__doc__ + self.overwrite_doc = doc is None + # support for abstract methods + self.__isabstractmethod__ = bool(getattr(fget, '__isabstractmethod__', False)) + + def __get__(self, instance, ownerclass=None): + if instance is None: + if self.__isabstractmethod__: + return self + raise AttributeError() + elif self.fget is None: + raise AttributeError("unreadable attribute") + return self.fget(instance) + + def __set__(self, instance, value): + if self.fset is None: + raise AttributeError("can't set attribute") + self.fset(instance, value) + + def __delete__(self, instance): + if self.fdel is None: + raise AttributeError("can't delete attribute") + self.fdel(instance) + + def getter(self, fget): + fdoc = fget.__doc__ if self.overwrite_doc else None + result = type(self)(fget, self.fset, self.fdel, fdoc or self.__doc__) + result.overwrite_doc = self.overwrite_doc + return result + + def setter(self, fset): + result = type(self)(self.fget, fset, self.fdel, self.__doc__) + result.overwrite_doc = self.overwrite_doc + return result + + def deleter(self, fdel): + result = type(self)(self.fget, self.fset, fdel, self.__doc__) + result.overwrite_doc = self.overwrite_doc + return result + + +class _GeneratorWrapper: + # TODO: Implement this in C. + def __init__(self, gen): + self.__wrapped = gen + self.__isgen = gen.__class__ is GeneratorType + self.__name__ = getattr(gen, '__name__', None) + self.__qualname__ = getattr(gen, '__qualname__', None) + def send(self, val): + return self.__wrapped.send(val) + def throw(self, tp, *rest): + return self.__wrapped.throw(tp, *rest) + def close(self): + return self.__wrapped.close() + @property + def gi_code(self): + return self.__wrapped.gi_code + @property + def gi_frame(self): + return self.__wrapped.gi_frame + @property + def gi_running(self): + return self.__wrapped.gi_running + @property + def gi_yieldfrom(self): + return self.__wrapped.gi_yieldfrom + @property + def gi_suspended(self): + return self.__wrapped.gi_suspended + cr_code = gi_code + cr_frame = gi_frame + cr_running = gi_running + cr_await = gi_yieldfrom + cr_suspended = gi_suspended + def __next__(self): + return next(self.__wrapped) + def __iter__(self): + if self.__isgen: + return self.__wrapped + return self + __await__ = __iter__ + +def coroutine(func): + """Convert regular generator function to a coroutine.""" + + if not callable(func): + raise TypeError('types.coroutine() expects a callable') + + if (func.__class__ is FunctionType and + getattr(func, '__code__', None).__class__ is CodeType): + + co_flags = func.__code__.co_flags + + # Check if 'func' is a coroutine function. + # (0x180 == CO_COROUTINE | CO_ITERABLE_COROUTINE) + if co_flags & 0x180: + return func + + # Check if 'func' is a generator function. + # (0x20 == CO_GENERATOR) + if co_flags & 0x20: + # TODO: Implement this in C. + co = func.__code__ + # 0x100 == CO_ITERABLE_COROUTINE + func.__code__ = co.replace(co_flags=co.co_flags | 0x100) + return func + + # The following code is primarily to support functions that + # return generator-like objects (for instance generators + # compiled with Cython). + + # Delay functools and _collections_abc import for speeding up types import. + import functools + import _collections_abc + @functools.wraps(func) + def wrapped(*args, **kwargs): + coro = func(*args, **kwargs) + if (coro.__class__ is CoroutineType or + coro.__class__ is GeneratorType and coro.gi_code.co_flags & 0x100): + # 'coro' is a native coroutine object or an iterable coroutine + return coro + if (isinstance(coro, _collections_abc.Generator) and + not isinstance(coro, _collections_abc.Coroutine)): + # 'coro' is either a pure Python generator iterator, or it + # implements collections.abc.Generator (and does not implement + # collections.abc.Coroutine). + return _GeneratorWrapper(coro) + # 'coro' is either an instance of collections.abc.Coroutine or + # some other object -- pass it through. + return coro + + return wrapped + +__all__ = [n for n in globals() if not n.startswith('_')] # for pydoc diff --git a/Python314_4_x86_Template/Lib/typing.py b/Python314_4_x86_Template/Lib/typing.py new file mode 100644 index 00000000..38021118 --- /dev/null +++ b/Python314_4_x86_Template/Lib/typing.py @@ -0,0 +1,3854 @@ +""" +The typing module: Support for gradual typing as defined by PEP 484 and subsequent PEPs. + +Among other things, the module includes the following: +* Generic, Protocol, and internal machinery to support generic aliases. + All subscripted types like X[int], Union[int, str] are generic aliases. +* Various "special forms" that have unique meanings in type annotations: + NoReturn, Never, ClassVar, Self, Concatenate, Unpack, and others. +* Classes whose instances can be type arguments to generic classes and functions: + TypeVar, ParamSpec, TypeVarTuple. +* Public helper functions: get_type_hints, overload, cast, final, and others. +* Several protocols to support duck-typing: + SupportsFloat, SupportsIndex, SupportsAbs, and others. +* Special types: NewType, NamedTuple, TypedDict. +* Deprecated aliases for builtin types and collections.abc ABCs. + +Any name not present in __all__ is an implementation detail +that may be changed without notice. Use at your own risk! +""" + +from abc import abstractmethod, ABCMeta +import collections +from collections import defaultdict +import collections.abc +import copyreg +import functools +import operator +import sys +import types +from types import GenericAlias + +from _typing import ( + _idfunc, + TypeVar, + ParamSpec, + TypeVarTuple, + ParamSpecArgs, + ParamSpecKwargs, + TypeAliasType, + Generic, + Union, + NoDefault, +) + +# Please keep __all__ alphabetized within each category. +__all__ = [ + # Super-special typing primitives. + 'Annotated', + 'Any', + 'Callable', + 'ClassVar', + 'Concatenate', + 'Final', + 'ForwardRef', + 'Generic', + 'Literal', + 'Optional', + 'ParamSpec', + 'Protocol', + 'Tuple', + 'Type', + 'TypeVar', + 'TypeVarTuple', + 'Union', + + # ABCs (from collections.abc). + 'AbstractSet', # collections.abc.Set. + 'ByteString', + 'Container', + 'ContextManager', + 'Hashable', + 'ItemsView', + 'Iterable', + 'Iterator', + 'KeysView', + 'Mapping', + 'MappingView', + 'MutableMapping', + 'MutableSequence', + 'MutableSet', + 'Sequence', + 'Sized', + 'ValuesView', + 'Awaitable', + 'AsyncIterator', + 'AsyncIterable', + 'Coroutine', + 'Collection', + 'AsyncGenerator', + 'AsyncContextManager', + + # Structural checks, a.k.a. protocols. + 'Reversible', + 'SupportsAbs', + 'SupportsBytes', + 'SupportsComplex', + 'SupportsFloat', + 'SupportsIndex', + 'SupportsInt', + 'SupportsRound', + + # Concrete collection types. + 'ChainMap', + 'Counter', + 'Deque', + 'Dict', + 'DefaultDict', + 'List', + 'OrderedDict', + 'Set', + 'FrozenSet', + 'NamedTuple', # Not really a type. + 'TypedDict', # Not really a type. + 'Generator', + + # Other concrete types. + 'BinaryIO', + 'IO', + 'Match', + 'Pattern', + 'TextIO', + + # One-off things. + 'AnyStr', + 'assert_type', + 'assert_never', + 'cast', + 'clear_overloads', + 'dataclass_transform', + 'evaluate_forward_ref', + 'final', + 'get_args', + 'get_origin', + 'get_overloads', + 'get_protocol_members', + 'get_type_hints', + 'is_protocol', + 'is_typeddict', + 'LiteralString', + 'Never', + 'NewType', + 'no_type_check', + 'no_type_check_decorator', + 'NoDefault', + 'NoReturn', + 'NotRequired', + 'overload', + 'override', + 'ParamSpecArgs', + 'ParamSpecKwargs', + 'ReadOnly', + 'Required', + 'reveal_type', + 'runtime_checkable', + 'Self', + 'Text', + 'TYPE_CHECKING', + 'TypeAlias', + 'TypeGuard', + 'TypeIs', + 'TypeAliasType', + 'Unpack', +] + +class _LazyAnnotationLib: + def __getattr__(self, attr): + global _lazy_annotationlib + import annotationlib + _lazy_annotationlib = annotationlib + return getattr(annotationlib, attr) + +_lazy_annotationlib = _LazyAnnotationLib() + + +def _type_convert(arg, module=None, *, allow_special_forms=False, owner=None): + """For converting None to type(None), and strings to ForwardRef.""" + if arg is None: + return type(None) + if isinstance(arg, str): + return _make_forward_ref(arg, module=module, is_class=allow_special_forms, owner=owner) + return arg + + +def _type_check(arg, msg, is_argument=True, module=None, *, allow_special_forms=False, owner=None): + """Check that the argument is a type, and return it (internal helper). + + As a special case, accept None and return type(None) instead. Also wrap strings + into ForwardRef instances. Consider several corner cases, for example plain + special forms like Union are not valid, while Union[int, str] is OK, etc. + The msg argument is a human-readable error message, e.g.:: + + "Union[arg, ...]: arg should be a type." + + We append the repr() of the actual value (truncated to 100 chars). + """ + invalid_generic_forms = (Generic, Protocol) + if not allow_special_forms: + invalid_generic_forms += (ClassVar,) + if is_argument: + invalid_generic_forms += (Final,) + + arg = _type_convert(arg, module=module, allow_special_forms=allow_special_forms, owner=owner) + if (isinstance(arg, _GenericAlias) and + arg.__origin__ in invalid_generic_forms): + raise TypeError(f"{arg} is not valid as type argument") + if arg in (Any, LiteralString, NoReturn, Never, Self, TypeAlias): + return arg + if allow_special_forms and arg in (ClassVar, Final): + return arg + if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol): + raise TypeError(f"Plain {arg} is not valid as type argument") + if type(arg) is tuple: + raise TypeError(f"{msg} Got {arg!r:.100}.") + return arg + + +def _is_param_expr(arg): + return arg is ... or isinstance(arg, + (tuple, list, ParamSpec, _ConcatenateGenericAlias)) + + +def _should_unflatten_callable_args(typ, args): + """Internal helper for munging collections.abc.Callable's __args__. + + The canonical representation for a Callable's __args__ flattens the + argument types, see https://github.com/python/cpython/issues/86361. + + For example:: + + >>> import collections.abc + >>> P = ParamSpec('P') + >>> collections.abc.Callable[[int, int], str].__args__ == (int, int, str) + True + >>> collections.abc.Callable[P, str].__args__ == (P, str) + True + + As a result, if we need to reconstruct the Callable from its __args__, + we need to unflatten it. + """ + return ( + typ.__origin__ is collections.abc.Callable + and not (len(args) == 2 and _is_param_expr(args[0])) + ) + + +def _type_repr(obj): + """Return the repr() of an object, special-casing types (internal helper). + + If obj is a type, we return a shorter version than the default + type.__repr__, based on the module and qualified name, which is + typically enough to uniquely identify a type. For everything + else, we fall back on repr(obj). + """ + if isinstance(obj, tuple): + # Special case for `repr` of types with `ParamSpec`: + return '[' + ', '.join(_type_repr(t) for t in obj) + ']' + return _lazy_annotationlib.type_repr(obj) + + +def _collect_type_parameters(args, *, enforce_default_ordering: bool = True): + """Collect all type parameters in args + in order of first appearance (lexicographic order). + + For example:: + + >>> P = ParamSpec('P') + >>> T = TypeVar('T') + >>> _collect_type_parameters((T, Callable[P, T])) + (~T, ~P) + """ + # required type parameter cannot appear after parameter with default + default_encountered = False + # or after TypeVarTuple + type_var_tuple_encountered = False + parameters = [] + for t in args: + if isinstance(t, type): + # We don't want __parameters__ descriptor of a bare Python class. + pass + elif isinstance(t, tuple): + # `t` might be a tuple, when `ParamSpec` is substituted with + # `[T, int]`, or `[int, *Ts]`, etc. + for x in t: + for collected in _collect_type_parameters([x]): + if collected not in parameters: + parameters.append(collected) + elif hasattr(t, '__typing_subst__'): + if t not in parameters: + if enforce_default_ordering: + if type_var_tuple_encountered and t.has_default(): + raise TypeError('Type parameter with a default' + ' follows TypeVarTuple') + + if t.has_default(): + default_encountered = True + elif default_encountered: + raise TypeError(f'Type parameter {t!r} without a default' + ' follows type parameter with a default') + + parameters.append(t) + else: + if _is_unpacked_typevartuple(t): + type_var_tuple_encountered = True + for x in getattr(t, '__parameters__', ()): + if x not in parameters: + parameters.append(x) + return tuple(parameters) + + +def _check_generic_specialization(cls, arguments): + """Check correct count for parameters of a generic cls (internal helper). + + This gives a nice error message in case of count mismatch. + """ + expected_len = len(cls.__parameters__) + if not expected_len: + raise TypeError(f"{cls} is not a generic class") + actual_len = len(arguments) + if actual_len != expected_len: + # deal with defaults + if actual_len < expected_len: + # If the parameter at index `actual_len` in the parameters list + # has a default, then all parameters after it must also have + # one, because we validated as much in _collect_type_parameters(). + # That means that no error needs to be raised here, despite + # the number of arguments being passed not matching the number + # of parameters: all parameters that aren't explicitly + # specialized in this call are parameters with default values. + if cls.__parameters__[actual_len].has_default(): + return + + expected_len -= sum(p.has_default() for p in cls.__parameters__) + expect_val = f"at least {expected_len}" + else: + expect_val = expected_len + + raise TypeError(f"Too {'many' if actual_len > expected_len else 'few'} arguments" + f" for {cls}; actual {actual_len}, expected {expect_val}") + + +def _unpack_args(*args): + newargs = [] + for arg in args: + subargs = getattr(arg, '__typing_unpacked_tuple_args__', None) + if subargs is not None and not (subargs and subargs[-1] is ...): + newargs.extend(subargs) + else: + newargs.append(arg) + return newargs + +def _deduplicate(params, *, unhashable_fallback=False): + # Weed out strict duplicates, preserving the first of each occurrence. + try: + return dict.fromkeys(params) + except TypeError: + if not unhashable_fallback: + raise + # Happens for cases like `Annotated[dict, {'x': IntValidator()}]` + new_unhashable = [] + for t in params: + if t not in new_unhashable: + new_unhashable.append(t) + return new_unhashable + +def _flatten_literal_params(parameters): + """Internal helper for Literal creation: flatten Literals among parameters.""" + params = [] + for p in parameters: + if isinstance(p, _LiteralGenericAlias): + params.extend(p.__args__) + else: + params.append(p) + return tuple(params) + + +_cleanups = [] +_caches = {} + + +def _tp_cache(func=None, /, *, typed=False): + """Internal wrapper caching __getitem__ of generic types. + + For non-hashable arguments, the original function is used as a fallback. + """ + def decorator(func): + # The callback 'inner' references the newly created lru_cache + # indirectly by performing a lookup in the global '_caches' dictionary. + # This breaks a reference that can be problematic when combined with + # C API extensions that leak references to types. See GH-98253. + + cache = functools.lru_cache(typed=typed)(func) + _caches[func] = cache + _cleanups.append(cache.cache_clear) + del cache + + @functools.wraps(func) + def inner(*args, **kwds): + try: + return _caches[func](*args, **kwds) + except TypeError: + pass # All real errors (not unhashable args) are raised below. + return func(*args, **kwds) + return inner + + if func is not None: + return decorator(func) + + return decorator + + +def _deprecation_warning_for_no_type_params_passed(funcname: str) -> None: + import warnings + + depr_message = ( + f"Failing to pass a value to the 'type_params' parameter " + f"of {funcname!r} is deprecated, as it leads to incorrect behaviour " + f"when calling {funcname} on a stringified annotation " + f"that references a PEP 695 type parameter. " + f"It will be disallowed in Python 3.15." + ) + warnings.warn(depr_message, category=DeprecationWarning, stacklevel=3) + + +class _Sentinel: + __slots__ = () + def __repr__(self): + return '' + + +_sentinel = _Sentinel() + + +def _eval_type(t, globalns, localns, type_params=_sentinel, *, recursive_guard=frozenset(), + format=None, owner=None, parent_fwdref=None, prefer_fwd_module=False): + """Evaluate all forward references in the given type t. + + For use of globalns and localns see the docstring for get_type_hints(). + recursive_guard is used to prevent infinite recursion with a recursive + ForwardRef. + """ + if type_params is _sentinel: + _deprecation_warning_for_no_type_params_passed("typing._eval_type") + type_params = () + if isinstance(t, _lazy_annotationlib.ForwardRef): + # If the forward_ref has __forward_module__ set, evaluate() infers the globals + # from the module, and it will probably pick better than the globals we have here. + # We do this only for calls from get_type_hints() (which opts in through the + # prefer_fwd_module flag), so that the default behavior remains more straightforward. + if prefer_fwd_module and t.__forward_module__ is not None: + globalns = None + # If there are type params on the owner, we need to add them back, because + # annotationlib won't. + if owner_type_params := getattr(owner, "__type_params__", None): + globalns = getattr( + sys.modules.get(t.__forward_module__, None), "__dict__", None + ) + if globalns is not None: + globalns = dict(globalns) + for type_param in owner_type_params: + globalns[type_param.__name__] = type_param + return evaluate_forward_ref(t, globals=globalns, locals=localns, + type_params=type_params, owner=owner, + _recursive_guard=recursive_guard, format=format) + if isinstance(t, (_GenericAlias, GenericAlias, Union)): + if isinstance(t, GenericAlias): + args = tuple( + _make_forward_ref(arg, parent_fwdref=parent_fwdref) if isinstance(arg, str) else arg + for arg in t.__args__ + ) + is_unpacked = t.__unpacked__ + if _should_unflatten_callable_args(t, args): + t = t.__origin__[(args[:-1], args[-1])] + else: + t = t.__origin__[args] + if is_unpacked: + t = Unpack[t] + + ev_args = tuple( + _eval_type( + a, globalns, localns, type_params, recursive_guard=recursive_guard, + format=format, owner=owner, prefer_fwd_module=prefer_fwd_module, + ) + for a in t.__args__ + ) + if ev_args == t.__args__: + return t + if isinstance(t, GenericAlias): + return GenericAlias(t.__origin__, ev_args) + if isinstance(t, Union): + return functools.reduce(operator.or_, ev_args) + else: + return t.copy_with(ev_args) + return t + + +class _Final: + """Mixin to prohibit subclassing.""" + + __slots__ = ('__weakref__',) + + def __init_subclass__(cls, /, *args, **kwds): + if '_root' not in kwds: + raise TypeError("Cannot subclass special typing classes") + + +class _NotIterable: + """Mixin to prevent iteration, without being compatible with Iterable. + + That is, we could do:: + + def __iter__(self): raise TypeError() + + But this would make users of this mixin duck type-compatible with + collections.abc.Iterable - isinstance(foo, Iterable) would be True. + + Luckily, we can instead prevent iteration by setting __iter__ to None, which + is treated specially. + """ + + __slots__ = () + __iter__ = None + + +# Internal indicator of special typing constructs. +# See __doc__ instance attribute for specific docs. +class _SpecialForm(_Final, _NotIterable, _root=True): + __slots__ = ('_name', '__doc__', '_getitem') + + def __init__(self, getitem): + self._getitem = getitem + self._name = getitem.__name__ + self.__doc__ = getitem.__doc__ + + def __getattr__(self, item): + if item in {'__name__', '__qualname__'}: + return self._name + + raise AttributeError(item) + + def __mro_entries__(self, bases): + raise TypeError(f"Cannot subclass {self!r}") + + def __repr__(self): + return 'typing.' + self._name + + def __reduce__(self): + return self._name + + def __call__(self, *args, **kwds): + raise TypeError(f"Cannot instantiate {self!r}") + + def __or__(self, other): + return Union[self, other] + + def __ror__(self, other): + return Union[other, self] + + def __instancecheck__(self, obj): + raise TypeError(f"{self} cannot be used with isinstance()") + + def __subclasscheck__(self, cls): + raise TypeError(f"{self} cannot be used with issubclass()") + + @_tp_cache + def __getitem__(self, parameters): + return self._getitem(self, parameters) + + +class _TypedCacheSpecialForm(_SpecialForm, _root=True): + def __getitem__(self, parameters): + if not isinstance(parameters, tuple): + parameters = (parameters,) + return self._getitem(self, *parameters) + + +class _AnyMeta(type): + def __instancecheck__(self, obj): + if self is Any: + raise TypeError("typing.Any cannot be used with isinstance()") + return super().__instancecheck__(obj) + + def __repr__(self): + if self is Any: + return "typing.Any" + return super().__repr__() # respect to subclasses + + +class Any(metaclass=_AnyMeta): + """Special type indicating an unconstrained type. + + - Any is compatible with every type. + - Any assumed to have all methods. + - All values assumed to be instances of Any. + + Note that all the above statements are true from the point of view of + static type checkers. At runtime, Any should not be used with instance + checks. + """ + + def __new__(cls, *args, **kwargs): + if cls is Any: + raise TypeError("Any cannot be instantiated") + return super().__new__(cls) + + +@_SpecialForm +def NoReturn(self, parameters): + """Special type indicating functions that never return. + + Example:: + + from typing import NoReturn + + def stop() -> NoReturn: + raise Exception('no way') + + NoReturn can also be used as a bottom type, a type that + has no values. Starting in Python 3.11, the Never type should + be used for this concept instead. Type checkers should treat the two + equivalently. + """ + raise TypeError(f"{self} is not subscriptable") + +# This is semantically identical to NoReturn, but it is implemented +# separately so that type checkers can distinguish between the two +# if they want. +@_SpecialForm +def Never(self, parameters): + """The bottom type, a type that has no members. + + This can be used to define a function that should never be + called, or a function that never returns:: + + from typing import Never + + def never_call_me(arg: Never) -> None: + pass + + def int_or_str(arg: int | str) -> None: + never_call_me(arg) # type checker error + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + never_call_me(arg) # OK, arg is of type Never + """ + raise TypeError(f"{self} is not subscriptable") + + +@_SpecialForm +def Self(self, parameters): + """Used to spell the type of "self" in classes. + + Example:: + + from typing import Self + + class Foo: + def return_self(self) -> Self: + ... + return self + + This is especially useful for: + - classmethods that are used as alternative constructors + - annotating an `__enter__` method which returns self + """ + raise TypeError(f"{self} is not subscriptable") + + +@_SpecialForm +def LiteralString(self, parameters): + """Represents an arbitrary literal string. + + Example:: + + from typing import LiteralString + + def run_query(sql: LiteralString) -> None: + ... + + def caller(arbitrary_string: str, literal_string: LiteralString) -> None: + run_query("SELECT * FROM students") # OK + run_query(literal_string) # OK + run_query("SELECT * FROM " + literal_string) # OK + run_query(arbitrary_string) # type checker error + run_query( # type checker error + f"SELECT * FROM students WHERE name = {arbitrary_string}" + ) + + Only string literals and other LiteralStrings are compatible + with LiteralString. This provides a tool to help prevent + security issues such as SQL injection. + """ + raise TypeError(f"{self} is not subscriptable") + + +@_SpecialForm +def ClassVar(self, parameters): + """Special type construct to mark class variables. + + An annotation wrapped in ClassVar indicates that a given + attribute is intended to be used as a class variable and + should not be set on instances of that class. + + Usage:: + + class Starship: + stats: ClassVar[dict[str, int]] = {} # class variable + damage: int = 10 # instance variable + + ClassVar accepts only types and cannot be further subscribed. + + Note that ClassVar is not a class itself, and should not + be used with isinstance() or issubclass(). + """ + item = _type_check(parameters, f'{self} accepts only single type.', allow_special_forms=True) + return _GenericAlias(self, (item,)) + +@_SpecialForm +def Final(self, parameters): + """Special typing construct to indicate final names to type checkers. + + A final name cannot be re-assigned or overridden in a subclass. + + For example:: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties. + """ + item = _type_check(parameters, f'{self} accepts only single type.', allow_special_forms=True) + return _GenericAlias(self, (item,)) + +@_SpecialForm +def Optional(self, parameters): + """Optional[X] is equivalent to Union[X, None].""" + arg = _type_check(parameters, f"{self} requires a single type.") + return Union[arg, type(None)] + +@_TypedCacheSpecialForm +@_tp_cache(typed=True) +def Literal(self, *parameters): + """Special typing form to define literal types (a.k.a. value types). + + This form can be used to indicate to type checkers that the corresponding + variable or function parameter has a value equivalent to the provided + literal (or one of several literals):: + + def validate_simple(data: Any) -> Literal[True]: # always returns True + ... + + MODE = Literal['r', 'rb', 'w', 'wb'] + def open_helper(file: str, mode: MODE) -> str: + ... + + open_helper('/some/path', 'r') # Passes type check + open_helper('/other/path', 'typo') # Error in type checker + + Literal[...] cannot be subclassed. At runtime, an arbitrary value + is allowed as type argument to Literal[...], but type checkers may + impose restrictions. + """ + # There is no '_type_check' call because arguments to Literal[...] are + # values, not types. + parameters = _flatten_literal_params(parameters) + + try: + parameters = tuple(p for p, _ in _deduplicate(list(_value_and_type_iter(parameters)))) + except TypeError: # unhashable parameters + pass + + return _LiteralGenericAlias(self, parameters) + + +@_SpecialForm +def TypeAlias(self, parameters): + """Special form for marking type aliases. + + Use TypeAlias to indicate that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example above. + """ + raise TypeError(f"{self} is not subscriptable") + + +@_SpecialForm +def Concatenate(self, parameters): + """Special form for annotating higher-order functions. + + ``Concatenate`` can be used in conjunction with ``ParamSpec`` and + ``Callable`` to represent a higher-order function which adds, removes or + transforms the parameters of a callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """ + if parameters == (): + raise TypeError("Cannot take a Concatenate of no types.") + if not isinstance(parameters, tuple): + parameters = (parameters,) + if not (parameters[-1] is ... or isinstance(parameters[-1], ParamSpec)): + raise TypeError("The last parameter to Concatenate should be a " + "ParamSpec variable or ellipsis.") + msg = "Concatenate[arg, ...]: each arg must be a type." + parameters = (*(_type_check(p, msg) for p in parameters[:-1]), parameters[-1]) + return _ConcatenateGenericAlias(self, parameters) + + +@_SpecialForm +def TypeGuard(self, parameters): + """Special typing construct for marking user-defined type predicate functions. + + ``TypeGuard`` can be used to annotate the return type of a user-defined + type predicate function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type predicate". + + Sometimes it would be convenient to use a user-defined boolean function + as a type predicate. Such a function should use ``TypeGuard[...]`` or + ``TypeIs[...]`` as its return type to alert static type checkers to + this intention. ``TypeGuard`` should be used over ``TypeIs`` when narrowing + from an incompatible type (e.g., ``list[object]`` to ``list[int]``) or when + the function does not return ``True`` for all instances of the narrowed type. + + Using ``-> TypeGuard[NarrowedType]`` tells the static type checker that + for a given function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is ``NarrowedType``. + + For example:: + + def is_str_list(val: list[object]) -> TypeGuard[list[str]]: + '''Determines whether all objects in the list are strings''' + return all(isinstance(x, str) for x in val) + + def func1(val: list[object]): + if is_str_list(val): + # Type of ``val`` is narrowed to ``list[str]``. + print(" ".join(val)) + else: + # Type of ``val`` remains as ``list[object]``. + print("Not a list of strings!") + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``list[object]`` to ``list[str]`` even though the latter is not + a subtype of the former, since ``list`` is invariant. The responsibility of + writing type-safe type predicates is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """ + item = _type_check(parameters, f'{self} accepts only single type.') + return _GenericAlias(self, (item,)) + + +@_SpecialForm +def TypeIs(self, parameters): + """Special typing construct for marking user-defined type predicate functions. + + ``TypeIs`` can be used to annotate the return type of a user-defined + type predicate function. ``TypeIs`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean and accept + at least one argument. + + ``TypeIs`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type predicate". + + Sometimes it would be convenient to use a user-defined boolean function + as a type predicate. Such a function should use ``TypeIs[...]`` or + ``TypeGuard[...]`` as its return type to alert static type checkers to + this intention. ``TypeIs`` usually has more intuitive behavior than + ``TypeGuard``, but it cannot be used when the input and output types + are incompatible (e.g., ``list[object]`` to ``list[int]``) or when the + function does not return ``True`` for all instances of the narrowed type. + + Using ``-> TypeIs[NarrowedType]`` tells the static type checker that for + a given function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the intersection of the argument's original type and + ``NarrowedType``. + 3. If the return value is ``False``, the type of its argument + is narrowed to exclude ``NarrowedType``. + + For example:: + + from typing import assert_type, final, TypeIs + + class Parent: pass + class Child(Parent): pass + @final + class Unrelated: pass + + def is_parent(val: object) -> TypeIs[Parent]: + return isinstance(val, Parent) + + def run(arg: Child | Unrelated): + if is_parent(arg): + # Type of ``arg`` is narrowed to the intersection + # of ``Parent`` and ``Child``, which is equivalent to + # ``Child``. + assert_type(arg, Child) + else: + # Type of ``arg`` is narrowed to exclude ``Parent``, + # so only ``Unrelated`` is left. + assert_type(arg, Unrelated) + + The type inside ``TypeIs`` must be consistent with the type of the + function's argument; if it is not, static type checkers will raise + an error. An incorrectly written ``TypeIs`` function can lead to + unsound behavior in the type system; it is the user's responsibility + to write such functions in a type-safe manner. + + ``TypeIs`` also works with type variables. For more information, see + PEP 742 (Narrowing types with ``TypeIs``). + """ + item = _type_check(parameters, f'{self} accepts only single type.') + return _GenericAlias(self, (item,)) + + +def _make_forward_ref(code, *, parent_fwdref=None, **kwargs): + if parent_fwdref is not None: + if parent_fwdref.__forward_module__ is not None: + kwargs['module'] = parent_fwdref.__forward_module__ + if parent_fwdref.__owner__ is not None: + kwargs['owner'] = parent_fwdref.__owner__ + forward_ref = _lazy_annotationlib.ForwardRef(code, **kwargs) + # For compatibility, eagerly compile the forwardref's code. + forward_ref.__forward_code__ + return forward_ref + + +def evaluate_forward_ref( + forward_ref, + *, + owner=None, + globals=None, + locals=None, + type_params=None, + format=None, + _recursive_guard=frozenset(), +): + """Evaluate a forward reference as a type hint. + + This is similar to calling the ForwardRef.evaluate() method, + but unlike that method, evaluate_forward_ref() also + recursively evaluates forward references nested within the type hint. + + *forward_ref* must be an instance of ForwardRef. *owner*, if given, + should be the object that holds the annotations that the forward reference + derived from, such as a module, class object, or function. It is used to + infer the namespaces to use for looking up names. *globals* and *locals* + can also be explicitly given to provide the global and local namespaces. + *type_params* is a tuple of type parameters that are in scope when + evaluating the forward reference. This parameter should be provided (though + it may be an empty tuple) if *owner* is not given and the forward reference + does not already have an owner set. *format* specifies the format of the + annotation and is a member of the annotationlib.Format enum, defaulting to + VALUE. + + """ + if format == _lazy_annotationlib.Format.STRING: + return forward_ref.__forward_arg__ + if forward_ref.__forward_arg__ in _recursive_guard: + return forward_ref + + if format is None: + format = _lazy_annotationlib.Format.VALUE + value = forward_ref.evaluate(globals=globals, locals=locals, + type_params=type_params, owner=owner, format=format) + + if (isinstance(value, _lazy_annotationlib.ForwardRef) + and format == _lazy_annotationlib.Format.FORWARDREF): + return value + + if isinstance(value, str): + value = _make_forward_ref(value, module=forward_ref.__forward_module__, + owner=owner or forward_ref.__owner__, + is_argument=forward_ref.__forward_is_argument__, + is_class=forward_ref.__forward_is_class__) + if owner is None: + owner = forward_ref.__owner__ + return _eval_type( + value, + globals, + locals, + type_params, + recursive_guard=_recursive_guard | {forward_ref.__forward_arg__}, + format=format, + owner=owner, + parent_fwdref=forward_ref, + ) + + +def _is_unpacked_typevartuple(x: Any) -> bool: + # Need to check 'is True' here + # See: https://github.com/python/cpython/issues/137706 + return ((not isinstance(x, type)) and + getattr(x, '__typing_is_unpacked_typevartuple__', False) is True) + + +def _is_typevar_like(x: Any) -> bool: + return isinstance(x, (TypeVar, ParamSpec)) or _is_unpacked_typevartuple(x) + + +def _typevar_subst(self, arg): + msg = "Parameters to generic types must be types." + arg = _type_check(arg, msg, is_argument=True) + if ((isinstance(arg, _GenericAlias) and arg.__origin__ is Unpack) or + (isinstance(arg, GenericAlias) and getattr(arg, '__unpacked__', False))): + raise TypeError(f"{arg} is not valid as type argument") + return arg + + +def _typevartuple_prepare_subst(self, alias, args): + params = alias.__parameters__ + typevartuple_index = params.index(self) + for param in params[typevartuple_index + 1:]: + if isinstance(param, TypeVarTuple): + raise TypeError(f"More than one TypeVarTuple parameter in {alias}") + + alen = len(args) + plen = len(params) + left = typevartuple_index + right = plen - typevartuple_index - 1 + var_tuple_index = None + fillarg = None + for k, arg in enumerate(args): + if not isinstance(arg, type): + subargs = getattr(arg, '__typing_unpacked_tuple_args__', None) + if subargs and len(subargs) == 2 and subargs[-1] is ...: + if var_tuple_index is not None: + raise TypeError("More than one unpacked arbitrary-length tuple argument") + var_tuple_index = k + fillarg = subargs[0] + if var_tuple_index is not None: + left = min(left, var_tuple_index) + right = min(right, alen - var_tuple_index - 1) + elif left + right > alen: + raise TypeError(f"Too few arguments for {alias};" + f" actual {alen}, expected at least {plen-1}") + if left == alen - right and self.has_default(): + replacement = _unpack_args(self.__default__) + else: + replacement = args[left: alen - right] + + return ( + *args[:left], + *([fillarg]*(typevartuple_index - left)), + replacement, + *([fillarg]*(plen - right - left - typevartuple_index - 1)), + *args[alen - right:], + ) + + +def _paramspec_subst(self, arg): + if isinstance(arg, (list, tuple)): + arg = tuple(_type_check(a, "Expected a type.") for a in arg) + elif not _is_param_expr(arg): + raise TypeError(f"Expected a list of types, an ellipsis, " + f"ParamSpec, or Concatenate. Got {arg}") + return arg + + +def _paramspec_prepare_subst(self, alias, args): + params = alias.__parameters__ + i = params.index(self) + if i == len(args) and self.has_default(): + args = (*args, self.__default__) + if i >= len(args): + raise TypeError(f"Too few arguments for {alias}") + # Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612. + if len(params) == 1 and not _is_param_expr(args[0]): + assert i == 0 + args = (args,) + # Convert lists to tuples to help other libraries cache the results. + elif isinstance(args[i], list): + args = (*args[:i], tuple(args[i]), *args[i+1:]) + return args + + +@_tp_cache +def _generic_class_getitem(cls, args): + """Parameterizes a generic class. + + At least, parameterizing a generic class is the *main* thing this method + does. For example, for some generic class `Foo`, this is called when we + do `Foo[int]` - there, with `cls=Foo` and `args=int`. + + However, note that this method is also called when defining generic + classes in the first place with `class Foo(Generic[T]): ...`. + """ + if not isinstance(args, tuple): + args = (args,) + + args = tuple(_type_convert(p) for p in args) + is_generic_or_protocol = cls in (Generic, Protocol) + + if is_generic_or_protocol: + # Generic and Protocol can only be subscripted with unique type variables. + if not args: + raise TypeError( + f"Parameter list to {cls.__qualname__}[...] cannot be empty" + ) + if not all(_is_typevar_like(p) for p in args): + raise TypeError( + f"Parameters to {cls.__name__}[...] must all be type variables " + f"or parameter specification variables.") + if len(set(args)) != len(args): + raise TypeError( + f"Parameters to {cls.__name__}[...] must all be unique") + else: + # Subscripting a regular Generic subclass. + try: + parameters = cls.__parameters__ + except AttributeError as e: + init_subclass = getattr(cls, '__init_subclass__', None) + if init_subclass not in {None, Generic.__init_subclass__}: + e.add_note( + f"Note: this exception may have been caused by " + f"{init_subclass.__qualname__!r} (or the " + f"'__init_subclass__' method on a superclass) not " + f"calling 'super().__init_subclass__()'" + ) + raise + for param in parameters: + prepare = getattr(param, '__typing_prepare_subst__', None) + if prepare is not None: + args = prepare(cls, args) + _check_generic_specialization(cls, args) + + new_args = [] + for param, new_arg in zip(parameters, args): + if isinstance(param, TypeVarTuple): + new_args.extend(new_arg) + else: + new_args.append(new_arg) + args = tuple(new_args) + + return _GenericAlias(cls, args) + + +def _generic_init_subclass(cls, *args, **kwargs): + super(Generic, cls).__init_subclass__(*args, **kwargs) + tvars = [] + if '__orig_bases__' in cls.__dict__: + error = Generic in cls.__orig_bases__ + else: + error = (Generic in cls.__bases__ and + cls.__name__ != 'Protocol' and + type(cls) != _TypedDictMeta) + if error: + raise TypeError("Cannot inherit from plain Generic") + if '__orig_bases__' in cls.__dict__: + tvars = _collect_type_parameters(cls.__orig_bases__) + # Look for Generic[T1, ..., Tn]. + # If found, tvars must be a subset of it. + # If not found, tvars is it. + # Also check for and reject plain Generic, + # and reject multiple Generic[...]. + gvars = None + for base in cls.__orig_bases__: + if (isinstance(base, _GenericAlias) and + base.__origin__ is Generic): + if gvars is not None: + raise TypeError( + "Cannot inherit from Generic[...] multiple times.") + gvars = base.__parameters__ + if gvars is not None: + tvarset = set(tvars) + gvarset = set(gvars) + if not tvarset <= gvarset: + s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) + s_args = ', '.join(str(g) for g in gvars) + raise TypeError(f"Some type variables ({s_vars}) are" + f" not listed in Generic[{s_args}]") + tvars = gvars + cls.__parameters__ = tuple(tvars) + + +def _is_dunder(attr): + return attr.startswith('__') and attr.endswith('__') + +class _BaseGenericAlias(_Final, _root=True): + """The central part of the internal API. + + This represents a generic version of type 'origin' with type arguments 'params'. + There are two kind of these aliases: user defined and special. The special ones + are wrappers around builtin collections and ABCs in collections.abc. These must + have 'name' always set. If 'inst' is False, then the alias can't be instantiated; + this is used by e.g. typing.List and typing.Dict. + """ + + def __init__(self, origin, *, inst=True, name=None): + self._inst = inst + self._name = name + self.__origin__ = origin + self.__slots__ = None # This is not documented. + + def __call__(self, *args, **kwargs): + if not self._inst: + raise TypeError(f"Type {self._name} cannot be instantiated; " + f"use {self.__origin__.__name__}() instead") + result = self.__origin__(*args, **kwargs) + try: + result.__orig_class__ = self + # Some objects raise TypeError (or something even more exotic) + # if you try to set attributes on them; we guard against that here + except Exception: + pass + return result + + def __mro_entries__(self, bases): + res = [] + if self.__origin__ not in bases: + res.append(self.__origin__) + + # Check if any base that occurs after us in `bases` is either itself a + # subclass of Generic, or something which will add a subclass of Generic + # to `__bases__` via its `__mro_entries__`. If not, add Generic + # ourselves. The goal is to ensure that Generic (or a subclass) will + # appear exactly once in the final bases tuple. If we let it appear + # multiple times, we risk "can't form a consistent MRO" errors. + i = bases.index(self) + for b in bases[i+1:]: + if isinstance(b, _BaseGenericAlias): + break + if not isinstance(b, type): + meth = getattr(b, "__mro_entries__", None) + new_bases = meth(bases) if meth else None + if ( + isinstance(new_bases, tuple) and + any( + isinstance(b2, type) and issubclass(b2, Generic) + for b2 in new_bases + ) + ): + break + elif issubclass(b, Generic): + break + else: + res.append(Generic) + return tuple(res) + + def __getattr__(self, attr): + if attr in {'__name__', '__qualname__'}: + return self._name or self.__origin__.__name__ + + # We are careful for copy and pickle. + # Also for simplicity we don't relay any dunder names + if '__origin__' in self.__dict__ and not _is_dunder(attr): + return getattr(self.__origin__, attr) + raise AttributeError(attr) + + def __setattr__(self, attr, val): + if _is_dunder(attr) or attr in {'_name', '_inst', '_nparams', '_defaults'}: + super().__setattr__(attr, val) + else: + setattr(self.__origin__, attr, val) + + def __instancecheck__(self, obj): + return self.__subclasscheck__(type(obj)) + + def __subclasscheck__(self, cls): + raise TypeError("Subscripted generics cannot be used with" + " class and instance checks") + + def __dir__(self): + return list(set(super().__dir__() + + [attr for attr in dir(self.__origin__) if not _is_dunder(attr)])) + + +# Special typing constructs Union, Optional, Generic, Callable and Tuple +# use three special attributes for internal bookkeeping of generic types: +# * __parameters__ is a tuple of unique free type parameters of a generic +# type, for example, Dict[T, T].__parameters__ == (T,); +# * __origin__ keeps a reference to a type that was subscripted, +# e.g., Union[T, int].__origin__ == Union, or the non-generic version of +# the type. +# * __args__ is a tuple of all arguments used in subscripting, +# e.g., Dict[T, int].__args__ == (T, int). + + +class _GenericAlias(_BaseGenericAlias, _root=True): + # The type of parameterized generics. + # + # That is, for example, `type(List[int])` is `_GenericAlias`. + # + # Objects which are instances of this class include: + # * Parameterized container types, e.g. `Tuple[int]`, `List[int]`. + # * Note that native container types, e.g. `tuple`, `list`, use + # `types.GenericAlias` instead. + # * Parameterized classes: + # class C[T]: pass + # # C[int] is a _GenericAlias + # * `Callable` aliases, generic `Callable` aliases, and + # parameterized `Callable` aliases: + # T = TypeVar('T') + # # _CallableGenericAlias inherits from _GenericAlias. + # A = Callable[[], None] # _CallableGenericAlias + # B = Callable[[T], None] # _CallableGenericAlias + # C = B[int] # _CallableGenericAlias + # * Parameterized `Final`, `ClassVar`, `TypeGuard`, and `TypeIs`: + # # All _GenericAlias + # Final[int] + # ClassVar[float] + # TypeGuard[bool] + # TypeIs[range] + + def __init__(self, origin, args, *, inst=True, name=None): + super().__init__(origin, inst=inst, name=name) + if not isinstance(args, tuple): + args = (args,) + self.__args__ = tuple(... if a is _TypingEllipsis else + a for a in args) + enforce_default_ordering = origin in (Generic, Protocol) + self.__parameters__ = _collect_type_parameters( + args, + enforce_default_ordering=enforce_default_ordering, + ) + if not name: + self.__module__ = origin.__module__ + + def __eq__(self, other): + if not isinstance(other, _GenericAlias): + return NotImplemented + return (self.__origin__ == other.__origin__ + and self.__args__ == other.__args__) + + def __hash__(self): + return hash((self.__origin__, self.__args__)) + + def __or__(self, right): + return Union[self, right] + + def __ror__(self, left): + return Union[left, self] + + @_tp_cache + def __getitem__(self, args): + # Parameterizes an already-parameterized object. + # + # For example, we arrive here doing something like: + # T1 = TypeVar('T1') + # T2 = TypeVar('T2') + # T3 = TypeVar('T3') + # class A(Generic[T1]): pass + # B = A[T2] # B is a _GenericAlias + # C = B[T3] # Invokes _GenericAlias.__getitem__ + # + # We also arrive here when parameterizing a generic `Callable` alias: + # T = TypeVar('T') + # C = Callable[[T], None] + # C[int] # Invokes _GenericAlias.__getitem__ + + if self.__origin__ in (Generic, Protocol): + # Can't subscript Generic[...] or Protocol[...]. + raise TypeError(f"Cannot subscript already-subscripted {self}") + if not self.__parameters__: + raise TypeError(f"{self} is not a generic class") + + # Preprocess `args`. + if not isinstance(args, tuple): + args = (args,) + args = _unpack_args(*(_type_convert(p) for p in args)) + new_args = self._determine_new_args(args) + r = self.copy_with(new_args) + return r + + def _determine_new_args(self, args): + # Determines new __args__ for __getitem__. + # + # For example, suppose we had: + # T1 = TypeVar('T1') + # T2 = TypeVar('T2') + # class A(Generic[T1, T2]): pass + # T3 = TypeVar('T3') + # B = A[int, T3] + # C = B[str] + # `B.__args__` is `(int, T3)`, so `C.__args__` should be `(int, str)`. + # Unfortunately, this is harder than it looks, because if `T3` is + # anything more exotic than a plain `TypeVar`, we need to consider + # edge cases. + + params = self.__parameters__ + # In the example above, this would be {T3: str} + for param in params: + prepare = getattr(param, '__typing_prepare_subst__', None) + if prepare is not None: + args = prepare(self, args) + alen = len(args) + plen = len(params) + if alen != plen: + raise TypeError(f"Too {'many' if alen > plen else 'few'} arguments for {self};" + f" actual {alen}, expected {plen}") + new_arg_by_param = dict(zip(params, args)) + return tuple(self._make_substitution(self.__args__, new_arg_by_param)) + + def _make_substitution(self, args, new_arg_by_param): + """Create a list of new type arguments.""" + new_args = [] + for old_arg in args: + if isinstance(old_arg, type): + new_args.append(old_arg) + continue + + substfunc = getattr(old_arg, '__typing_subst__', None) + if substfunc: + new_arg = substfunc(new_arg_by_param[old_arg]) + else: + subparams = getattr(old_arg, '__parameters__', ()) + if not subparams: + new_arg = old_arg + else: + subargs = [] + for x in subparams: + if isinstance(x, TypeVarTuple): + subargs.extend(new_arg_by_param[x]) + else: + subargs.append(new_arg_by_param[x]) + new_arg = old_arg[tuple(subargs)] + + if self.__origin__ == collections.abc.Callable and isinstance(new_arg, tuple): + # Consider the following `Callable`. + # C = Callable[[int], str] + # Here, `C.__args__` should be (int, str) - NOT ([int], str). + # That means that if we had something like... + # P = ParamSpec('P') + # T = TypeVar('T') + # C = Callable[P, T] + # D = C[[int, str], float] + # ...we need to be careful; `new_args` should end up as + # `(int, str, float)` rather than `([int, str], float)`. + new_args.extend(new_arg) + elif _is_unpacked_typevartuple(old_arg): + # Consider the following `_GenericAlias`, `B`: + # class A(Generic[*Ts]): ... + # B = A[T, *Ts] + # If we then do: + # B[float, int, str] + # The `new_arg` corresponding to `T` will be `float`, and the + # `new_arg` corresponding to `*Ts` will be `(int, str)`. We + # should join all these types together in a flat list + # `(float, int, str)` - so again, we should `extend`. + new_args.extend(new_arg) + elif isinstance(old_arg, tuple): + # Corner case: + # P = ParamSpec('P') + # T = TypeVar('T') + # class Base(Generic[P]): ... + # Can be substituted like this: + # X = Base[[int, T]] + # In this case, `old_arg` will be a tuple: + new_args.append( + tuple(self._make_substitution(old_arg, new_arg_by_param)), + ) + else: + new_args.append(new_arg) + return new_args + + def copy_with(self, args): + return self.__class__(self.__origin__, args, name=self._name, inst=self._inst) + + def __repr__(self): + if self._name: + name = 'typing.' + self._name + else: + name = _type_repr(self.__origin__) + if self.__args__: + args = ", ".join([_type_repr(a) for a in self.__args__]) + else: + # To ensure the repr is eval-able. + args = "()" + return f'{name}[{args}]' + + def __reduce__(self): + if self._name: + origin = globals()[self._name] + else: + origin = self.__origin__ + args = tuple(self.__args__) + if len(args) == 1 and not isinstance(args[0], tuple): + args, = args + return operator.getitem, (origin, args) + + def __mro_entries__(self, bases): + if isinstance(self.__origin__, _SpecialForm): + raise TypeError(f"Cannot subclass {self!r}") + + if self._name: # generic version of an ABC or built-in class + return super().__mro_entries__(bases) + if self.__origin__ is Generic: + if Protocol in bases: + return () + i = bases.index(self) + for b in bases[i+1:]: + if isinstance(b, _BaseGenericAlias) and b is not self: + return () + return (self.__origin__,) + + def __iter__(self): + yield Unpack[self] + + +# _nparams is the number of accepted parameters, e.g. 0 for Hashable, +# 1 for List and 2 for Dict. It may be -1 if variable number of +# parameters are accepted (needs custom __getitem__). + +class _SpecialGenericAlias(_NotIterable, _BaseGenericAlias, _root=True): + def __init__(self, origin, nparams, *, inst=True, name=None, defaults=()): + if name is None: + name = origin.__name__ + super().__init__(origin, inst=inst, name=name) + self._nparams = nparams + self._defaults = defaults + if origin.__module__ == 'builtins': + self.__doc__ = f'Deprecated alias to {origin.__qualname__}.' + else: + self.__doc__ = f'Deprecated alias to {origin.__module__}.{origin.__qualname__}.' + + @_tp_cache + def __getitem__(self, params): + if not isinstance(params, tuple): + params = (params,) + msg = "Parameters to generic types must be types." + params = tuple(_type_check(p, msg) for p in params) + if (self._defaults + and len(params) < self._nparams + and len(params) + len(self._defaults) >= self._nparams + ): + params = (*params, *self._defaults[len(params) - self._nparams:]) + actual_len = len(params) + + if actual_len != self._nparams: + if self._defaults: + expected = f"at least {self._nparams - len(self._defaults)}" + else: + expected = str(self._nparams) + if not self._nparams: + raise TypeError(f"{self} is not a generic class") + raise TypeError(f"Too {'many' if actual_len > self._nparams else 'few'} arguments for {self};" + f" actual {actual_len}, expected {expected}") + return self.copy_with(params) + + def copy_with(self, params): + return _GenericAlias(self.__origin__, params, + name=self._name, inst=self._inst) + + def __repr__(self): + return 'typing.' + self._name + + def __subclasscheck__(self, cls): + if isinstance(cls, _SpecialGenericAlias): + return issubclass(cls.__origin__, self.__origin__) + if not isinstance(cls, _GenericAlias): + return issubclass(cls, self.__origin__) + return super().__subclasscheck__(cls) + + def __reduce__(self): + return self._name + + def __or__(self, right): + return Union[self, right] + + def __ror__(self, left): + return Union[left, self] + + +class _DeprecatedGenericAlias(_SpecialGenericAlias, _root=True): + def __init__( + self, origin, nparams, *, removal_version, inst=True, name=None + ): + super().__init__(origin, nparams, inst=inst, name=name) + self._removal_version = removal_version + + def __instancecheck__(self, inst): + import warnings + warnings._deprecated( + f"{self.__module__}.{self._name}", remove=self._removal_version + ) + return super().__instancecheck__(inst) + + +class _CallableGenericAlias(_NotIterable, _GenericAlias, _root=True): + def __repr__(self): + assert self._name == 'Callable' + args = self.__args__ + if len(args) == 2 and _is_param_expr(args[0]): + return super().__repr__() + return (f'typing.Callable' + f'[[{", ".join([_type_repr(a) for a in args[:-1]])}], ' + f'{_type_repr(args[-1])}]') + + def __reduce__(self): + args = self.__args__ + if not (len(args) == 2 and _is_param_expr(args[0])): + args = list(args[:-1]), args[-1] + return operator.getitem, (Callable, args) + + +class _CallableType(_SpecialGenericAlias, _root=True): + def copy_with(self, params): + return _CallableGenericAlias(self.__origin__, params, + name=self._name, inst=self._inst) + + def __getitem__(self, params): + if not isinstance(params, tuple) or len(params) != 2: + raise TypeError("Callable must be used as " + "Callable[[arg, ...], result].") + args, result = params + # This relaxes what args can be on purpose to allow things like + # PEP 612 ParamSpec. Responsibility for whether a user is using + # Callable[...] properly is deferred to static type checkers. + if isinstance(args, list): + params = (tuple(args), result) + else: + params = (args, result) + return self.__getitem_inner__(params) + + @_tp_cache + def __getitem_inner__(self, params): + args, result = params + msg = "Callable[args, result]: result must be a type." + result = _type_check(result, msg) + if args is Ellipsis: + return self.copy_with((_TypingEllipsis, result)) + if not isinstance(args, tuple): + args = (args,) + args = tuple(_type_convert(arg) for arg in args) + params = args + (result,) + return self.copy_with(params) + + +class _TupleType(_SpecialGenericAlias, _root=True): + @_tp_cache + def __getitem__(self, params): + if not isinstance(params, tuple): + params = (params,) + if len(params) >= 2 and params[-1] is ...: + msg = "Tuple[t, ...]: t must be a type." + params = tuple(_type_check(p, msg) for p in params[:-1]) + return self.copy_with((*params, _TypingEllipsis)) + msg = "Tuple[t0, t1, ...]: each t must be a type." + params = tuple(_type_check(p, msg) for p in params) + return self.copy_with(params) + + +class _UnionGenericAliasMeta(type): + def __instancecheck__(self, inst: object) -> bool: + import warnings + warnings._deprecated("_UnionGenericAlias", remove=(3, 17)) + return isinstance(inst, Union) + + def __subclasscheck__(self, inst: type) -> bool: + import warnings + warnings._deprecated("_UnionGenericAlias", remove=(3, 17)) + return issubclass(inst, Union) + + def __eq__(self, other): + import warnings + warnings._deprecated("_UnionGenericAlias", remove=(3, 17)) + if other is _UnionGenericAlias or other is Union: + return True + return NotImplemented + + def __hash__(self): + return hash(Union) + + +class _UnionGenericAlias(metaclass=_UnionGenericAliasMeta): + """Compatibility hack. + + A class named _UnionGenericAlias used to be used to implement + typing.Union. This class exists to serve as a shim to preserve + the meaning of some code that used to use _UnionGenericAlias + directly. + + """ + def __new__(cls, self_cls, parameters, /, *, name=None): + import warnings + warnings._deprecated("_UnionGenericAlias", remove=(3, 17)) + return Union[parameters] + + +def _value_and_type_iter(parameters): + return ((p, type(p)) for p in parameters) + + +class _LiteralGenericAlias(_GenericAlias, _root=True): + def __eq__(self, other): + if not isinstance(other, _LiteralGenericAlias): + return NotImplemented + + return set(_value_and_type_iter(self.__args__)) == set(_value_and_type_iter(other.__args__)) + + def __hash__(self): + return hash(frozenset(_value_and_type_iter(self.__args__))) + + +class _ConcatenateGenericAlias(_GenericAlias, _root=True): + def copy_with(self, params): + if isinstance(params[-1], (list, tuple)): + return (*params[:-1], *params[-1]) + if isinstance(params[-1], _ConcatenateGenericAlias): + params = (*params[:-1], *params[-1].__args__) + return super().copy_with(params) + + +@_SpecialForm +def Unpack(self, parameters): + """Type unpack operator. + + The type unpack operator takes the child types from some container type, + such as `tuple[int, str]` or a `TypeVarTuple`, and 'pulls them out'. + + For example:: + + # For some generic class `Foo`: + Foo[Unpack[tuple[int, str]]] # Equivalent to Foo[int, str] + + Ts = TypeVarTuple('Ts') + # Specifies that `Bar` is generic in an arbitrary number of types. + # (Think of `Ts` as a tuple of an arbitrary number of individual + # `TypeVar`s, which the `Unpack` is 'pulling out' directly into the + # `Generic[]`.) + class Bar(Generic[Unpack[Ts]]): ... + Bar[int] # Valid + Bar[int, str] # Also valid + + From Python 3.11, this can also be done using the `*` operator:: + + Foo[*tuple[int, str]] + class Bar(Generic[*Ts]): ... + + And from Python 3.12, it can be done using built-in syntax for generics:: + + Foo[*tuple[int, str]] + class Bar[*Ts]: ... + + The operator can also be used along with a `TypedDict` to annotate + `**kwargs` in a function signature:: + + class Movie(TypedDict): + name: str + year: int + + # This function expects two keyword arguments - *name* of type `str` and + # *year* of type `int`. + def foo(**kwargs: Unpack[Movie]): ... + + Note that there is only some runtime checking of this operator. Not + everything the runtime allows may be accepted by static type checkers. + + For more information, see PEPs 646 and 692. + """ + item = _type_check(parameters, f'{self} accepts only single type.') + return _UnpackGenericAlias(origin=self, args=(item,)) + + +class _UnpackGenericAlias(_GenericAlias, _root=True): + def __repr__(self): + # `Unpack` only takes one argument, so __args__ should contain only + # a single item. + return f'typing.Unpack[{_type_repr(self.__args__[0])}]' + + def __getitem__(self, args): + if self.__typing_is_unpacked_typevartuple__: + return args + return super().__getitem__(args) + + @property + def __typing_unpacked_tuple_args__(self): + assert self.__origin__ is Unpack + assert len(self.__args__) == 1 + arg, = self.__args__ + if isinstance(arg, (_GenericAlias, types.GenericAlias)): + if arg.__origin__ is not tuple: + raise TypeError("Unpack[...] must be used with a tuple type") + return arg.__args__ + return None + + @property + def __typing_is_unpacked_typevartuple__(self): + assert self.__origin__ is Unpack + assert len(self.__args__) == 1 + return isinstance(self.__args__[0], TypeVarTuple) + + +class _TypingEllipsis: + """Internal placeholder for ... (ellipsis).""" + + +_TYPING_INTERNALS = frozenset({ + '__parameters__', '__orig_bases__', '__orig_class__', + '_is_protocol', '_is_runtime_protocol', '__protocol_attrs__', + '__non_callable_proto_members__', '__type_params__', +}) + +_SPECIAL_NAMES = frozenset({ + '__abstractmethods__', '__annotations__', '__dict__', '__doc__', + '__init__', '__module__', '__new__', '__slots__', + '__subclasshook__', '__weakref__', '__class_getitem__', + '__match_args__', '__static_attributes__', '__firstlineno__', + '__annotate__', '__annotate_func__', '__annotations_cache__', +}) + +# These special attributes will be not collected as protocol members. +EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS | _SPECIAL_NAMES | {'_MutableMapping__marker'} + + +def _get_protocol_attrs(cls): + """Collect protocol members from a protocol class objects. + + This includes names actually defined in the class dictionary, as well + as names that appear in annotations. Special names (above) are skipped. + """ + attrs = set() + for base in cls.__mro__[:-1]: # without object + if base.__name__ in {'Protocol', 'Generic'}: + continue + try: + annotations = base.__annotations__ + except Exception: + # Only go through annotationlib to handle deferred annotations if we need to + annotations = _lazy_annotationlib.get_annotations( + base, format=_lazy_annotationlib.Format.FORWARDREF + ) + for attr in (*base.__dict__, *annotations): + if not attr.startswith('_abc_') and attr not in EXCLUDED_ATTRIBUTES: + attrs.add(attr) + return attrs + + +def _no_init_or_replace_init(self, *args, **kwargs): + cls = type(self) + + if cls._is_protocol: + raise TypeError('Protocols cannot be instantiated') + + # Already using a custom `__init__`. No need to calculate correct + # `__init__` to call. This can lead to RecursionError. See bpo-45121. + if cls.__init__ is not _no_init_or_replace_init: + return + + # Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`. + # The first instantiation of the subclass will call `_no_init_or_replace_init` which + # searches for a proper new `__init__` in the MRO. The new `__init__` + # replaces the subclass' old `__init__` (ie `_no_init_or_replace_init`). Subsequent + # instantiation of the protocol subclass will thus use the new + # `__init__` and no longer call `_no_init_or_replace_init`. + for base in cls.__mro__: + init = base.__dict__.get('__init__', _no_init_or_replace_init) + if init is not _no_init_or_replace_init: + cls.__init__ = init + break + else: + # should not happen + cls.__init__ = object.__init__ + + cls.__init__(self, *args, **kwargs) + + +def _caller(depth=1, default='__main__'): + try: + return sys._getframemodulename(depth + 1) or default + except AttributeError: # For platforms without _getframemodulename() + pass + try: + return sys._getframe(depth + 1).f_globals.get('__name__', default) + except (AttributeError, ValueError): # For platforms without _getframe() + pass + return None + +def _allow_reckless_class_checks(depth=2): + """Allow instance and class checks for special stdlib modules. + + The abc and functools modules indiscriminately call isinstance() and + issubclass() on the whole MRO of a user class, which may contain protocols. + """ + return _caller(depth) in {'abc', 'functools', None} + + +_PROTO_ALLOWLIST = { + 'collections.abc': [ + 'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable', + 'AsyncIterator', 'Hashable', 'Sized', 'Container', 'Collection', + 'Reversible', 'Buffer', + ], + 'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'], + 'io': ['Reader', 'Writer'], + 'os': ['PathLike'], +} + + +@functools.cache +def _lazy_load_getattr_static(): + # Import getattr_static lazily so as not to slow down the import of typing.py + # Cache the result so we don't slow down _ProtocolMeta.__instancecheck__ unnecessarily + from inspect import getattr_static + return getattr_static + + +_cleanups.append(_lazy_load_getattr_static.cache_clear) + +def _pickle_psargs(psargs): + return ParamSpecArgs, (psargs.__origin__,) + +copyreg.pickle(ParamSpecArgs, _pickle_psargs) + +def _pickle_pskwargs(pskwargs): + return ParamSpecKwargs, (pskwargs.__origin__,) + +copyreg.pickle(ParamSpecKwargs, _pickle_pskwargs) + +del _pickle_psargs, _pickle_pskwargs + + +# Preload these once, as globals, as a micro-optimisation. +# This makes a significant difference to the time it takes +# to do `isinstance()`/`issubclass()` checks +# against runtime-checkable protocols with only one callable member. +_abc_instancecheck = ABCMeta.__instancecheck__ +_abc_subclasscheck = ABCMeta.__subclasscheck__ + + +def _type_check_issubclass_arg_1(arg): + """Raise TypeError if `arg` is not an instance of `type` + in `issubclass(arg, )`. + + In most cases, this is verified by type.__subclasscheck__. + Checking it again unnecessarily would slow down issubclass() checks, + so, we don't perform this check unless we absolutely have to. + + For various error paths, however, + we want to ensure that *this* error message is shown to the user + where relevant, rather than a typing.py-specific error message. + """ + if not isinstance(arg, type): + # Same error message as for issubclass(1, int). + raise TypeError('issubclass() arg 1 must be a class') + + +class _ProtocolMeta(ABCMeta): + # This metaclass is somewhat unfortunate, + # but is necessary for several reasons... + def __new__(mcls, name, bases, namespace, /, **kwargs): + if name == "Protocol" and bases == (Generic,): + pass + elif Protocol in bases: + for base in bases: + if not ( + base in {object, Generic} + or base.__name__ in _PROTO_ALLOWLIST.get(base.__module__, []) + or ( + issubclass(base, Generic) + and getattr(base, "_is_protocol", False) + ) + ): + raise TypeError( + f"Protocols can only inherit from other protocols, " + f"got {base!r}" + ) + return super().__new__(mcls, name, bases, namespace, **kwargs) + + def __init__(cls, *args, **kwargs): + super().__init__(*args, **kwargs) + if getattr(cls, "_is_protocol", False): + cls.__protocol_attrs__ = _get_protocol_attrs(cls) + + def __subclasscheck__(cls, other): + if cls is Protocol: + return type.__subclasscheck__(cls, other) + if ( + getattr(cls, '_is_protocol', False) + and not _allow_reckless_class_checks() + ): + if not getattr(cls, '_is_runtime_protocol', False): + _type_check_issubclass_arg_1(other) + raise TypeError( + "Instance and class checks can only be used with " + "@runtime_checkable protocols" + ) + if ( + # this attribute is set by @runtime_checkable: + cls.__non_callable_proto_members__ + and cls.__dict__.get("__subclasshook__") is _proto_hook + ): + _type_check_issubclass_arg_1(other) + non_method_attrs = sorted(cls.__non_callable_proto_members__) + raise TypeError( + "Protocols with non-method members don't support issubclass()." + f" Non-method members: {str(non_method_attrs)[1:-1]}." + ) + return _abc_subclasscheck(cls, other) + + def __instancecheck__(cls, instance): + # We need this method for situations where attributes are + # assigned in __init__. + if cls is Protocol: + return type.__instancecheck__(cls, instance) + if not getattr(cls, "_is_protocol", False): + # i.e., it's a concrete subclass of a protocol + return _abc_instancecheck(cls, instance) + + if ( + not getattr(cls, '_is_runtime_protocol', False) and + not _allow_reckless_class_checks() + ): + raise TypeError("Instance and class checks can only be used with" + " @runtime_checkable protocols") + + if _abc_instancecheck(cls, instance): + return True + + getattr_static = _lazy_load_getattr_static() + for attr in cls.__protocol_attrs__: + try: + val = getattr_static(instance, attr) + except AttributeError: + break + # this attribute is set by @runtime_checkable: + if val is None and attr not in cls.__non_callable_proto_members__: + break + else: + return True + + return False + + +@classmethod +def _proto_hook(cls, other): + if not cls.__dict__.get('_is_protocol', False): + return NotImplemented + + for attr in cls.__protocol_attrs__: + for base in other.__mro__: + # Check if the members appears in the class dictionary... + if attr in base.__dict__: + if base.__dict__[attr] is None: + return NotImplemented + break + + # ...or in annotations, if it is a sub-protocol. + if issubclass(other, Generic) and getattr(other, "_is_protocol", False): + # We avoid the slower path through annotationlib here because in most + # cases it should be unnecessary. + try: + annos = base.__annotations__ + except Exception: + annos = _lazy_annotationlib.get_annotations( + base, format=_lazy_annotationlib.Format.FORWARDREF + ) + if attr in annos: + break + else: + return NotImplemented + return True + + +class Protocol(Generic, metaclass=_ProtocolMeta): + """Base class for protocol classes. + + Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing). + + For example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing.runtime_checkable act as simple-minded runtime protocols that check + only the presence of given attributes, ignoring their type signatures. + Protocol classes can be generic, they are defined as:: + + class GenProto[T](Protocol): + def meth(self) -> T: + ... + """ + + __slots__ = () + _is_protocol = True + _is_runtime_protocol = False + + def __init_subclass__(cls, *args, **kwargs): + super().__init_subclass__(*args, **kwargs) + + # Determine if this is a protocol or a concrete subclass. + if not cls.__dict__.get('_is_protocol', False): + cls._is_protocol = any(b is Protocol for b in cls.__bases__) + + # Set (or override) the protocol subclass hook. + if '__subclasshook__' not in cls.__dict__: + cls.__subclasshook__ = _proto_hook + + # Prohibit instantiation for protocol classes + if cls._is_protocol and cls.__init__ is Protocol.__init__: + cls.__init__ = _no_init_or_replace_init + + +class _AnnotatedAlias(_NotIterable, _GenericAlias, _root=True): + """Runtime representation of an annotated type. + + At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' + with extra metadata. The alias behaves like a normal typing alias. + Instantiating is the same as instantiating the underlying type; binding + it to types is also the same. + + The metadata itself is stored in a '__metadata__' attribute as a tuple. + """ + + def __init__(self, origin, metadata): + if isinstance(origin, _AnnotatedAlias): + metadata = origin.__metadata__ + metadata + origin = origin.__origin__ + super().__init__(origin, origin, name='Annotated') + self.__metadata__ = metadata + + def copy_with(self, params): + assert len(params) == 1 + new_type = params[0] + return _AnnotatedAlias(new_type, self.__metadata__) + + def __repr__(self): + return "typing.Annotated[{}, {}]".format( + _type_repr(self.__origin__), + ", ".join(repr(a) for a in self.__metadata__) + ) + + def __reduce__(self): + return operator.getitem, ( + Annotated, (self.__origin__,) + self.__metadata__ + ) + + def __eq__(self, other): + if not isinstance(other, _AnnotatedAlias): + return NotImplemented + return (self.__origin__ == other.__origin__ + and self.__metadata__ == other.__metadata__) + + def __hash__(self): + return hash((self.__origin__, self.__metadata__)) + + def __getattr__(self, attr): + if attr in {'__name__', '__qualname__'}: + return 'Annotated' + return super().__getattr__(attr) + + def __mro_entries__(self, bases): + return (self.__origin__,) + + +@_TypedCacheSpecialForm +@_tp_cache(typed=True) +def Annotated(self, *params): + """Add context-specific metadata to a type. + + Example: Annotated[int, runtime_check.Unsigned] indicates to the + hypothetical runtime_check module that this type is an unsigned int. + Every other consumer of this type can ignore this metadata and treat + this type as int. + + The first argument to Annotated must be a valid type. + + Details: + + - It's an error to call `Annotated` with less than two arguments. + - Access the metadata via the ``__metadata__`` attribute:: + + assert Annotated[int, '$'].__metadata__ == ('$',) + + - Nested Annotated types are flattened:: + + assert Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] + + - Instantiating an annotated type is equivalent to instantiating the + underlying type:: + + assert Annotated[C, Ann1](5) == C(5) + + - Annotated can be used as a generic type alias:: + + type Optimized[T] = Annotated[T, runtime.Optimize()] + # type checker will treat Optimized[int] + # as equivalent to Annotated[int, runtime.Optimize()] + + type OptimizedList[T] = Annotated[list[T], runtime.Optimize()] + # type checker will treat OptimizedList[int] + # as equivalent to Annotated[list[int], runtime.Optimize()] + + - Annotated cannot be used with an unpacked TypeVarTuple:: + + type Variadic[*Ts] = Annotated[*Ts, Ann1] # NOT valid + + This would be equivalent to:: + + Annotated[T1, T2, T3, ..., Ann1] + + where T1, T2 etc. are TypeVars, which would be invalid, because + only one type should be passed to Annotated. + """ + if len(params) < 2: + raise TypeError("Annotated[...] should be used " + "with at least two arguments (a type and an " + "annotation).") + if _is_unpacked_typevartuple(params[0]): + raise TypeError("Annotated[...] should not be used with an " + "unpacked TypeVarTuple") + msg = "Annotated[t, ...]: t must be a type." + origin = _type_check(params[0], msg, allow_special_forms=True) + metadata = tuple(params[1:]) + return _AnnotatedAlias(origin, metadata) + + +def runtime_checkable(cls): + """Mark a protocol class as a runtime protocol. + + Such protocol can be used with isinstance() and issubclass(). + Raise TypeError if applied to a non-protocol class. + This allows a simple-minded structural check very similar to + one trick ponies in collections.abc such as Iterable. + + For example:: + + @runtime_checkable + class Closable(Protocol): + def close(self): ... + + assert isinstance(open('/some/file'), Closable) + + Warning: this will check only the presence of the required methods, + not their type signatures! + """ + if not issubclass(cls, Generic) or not getattr(cls, '_is_protocol', False): + raise TypeError('@runtime_checkable can be only applied to protocol classes,' + ' got %r' % cls) + cls._is_runtime_protocol = True + # PEP 544 prohibits using issubclass() + # with protocols that have non-method members. + # See gh-113320 for why we compute this attribute here, + # rather than in `_ProtocolMeta.__init__` + cls.__non_callable_proto_members__ = set() + for attr in cls.__protocol_attrs__: + try: + is_callable = callable(getattr(cls, attr, None)) + except Exception as e: + raise TypeError( + f"Failed to determine whether protocol member {attr!r} " + "is a method member" + ) from e + else: + if not is_callable: + cls.__non_callable_proto_members__.add(attr) + return cls + + +def cast(typ, val): + """Cast a value to a type. + + This returns the value unchanged. To the type checker this + signals that the return value has the designated type, but at + runtime we intentionally don't check anything (we want this + to be as fast as possible). + """ + return val + + +def assert_type(val, typ, /): + """Ask a static type checker to confirm that the value is of the given type. + + At runtime this does nothing: it returns the first argument unchanged with no + checks or side effects, no matter the actual type of the argument. + + When a static type checker encounters a call to assert_type(), it + emits an error if the value is not of the specified type:: + + def greet(name: str) -> None: + assert_type(name, str) # OK + assert_type(name, int) # type checker error + """ + return val + + +def get_type_hints(obj, globalns=None, localns=None, include_extras=False, + *, format=None): + """Return type hints for an object. + + This is often the same as obj.__annotations__, but it handles + forward references encoded as string literals and recursively replaces all + 'Annotated[T, ...]' with 'T' (unless 'include_extras=True'). + + The argument may be a module, class, method, or function. The annotations + are returned as a dictionary. For classes, annotations include also + inherited members. + + TypeError is raised if the argument is not of a type that can contain + annotations, and an empty dictionary is returned if no annotations are + present. + + BEWARE -- the behavior of globalns and localns is counterintuitive + (unless you are familiar with how eval() and exec() work). The + search order is locals first, then globals. + + - If no dict arguments are passed, an attempt is made to use the + globals from obj (or the respective module's globals for classes), + and these are also used as the locals. If the object does not appear + to have globals, an empty dictionary is used. For classes, the search + order is globals first then locals. + + - If one dict argument is passed, it is used for both globals and + locals. + + - If two dict arguments are passed, they specify globals and + locals, respectively. + """ + if getattr(obj, '__no_type_check__', None): + return {} + Format = _lazy_annotationlib.Format + if format is None: + format = Format.VALUE + # Classes require a special treatment. + if isinstance(obj, type): + hints = {} + for base in reversed(obj.__mro__): + ann = _lazy_annotationlib.get_annotations(base, format=format) + if format == Format.STRING: + hints.update(ann) + continue + if globalns is None: + base_globals = getattr(sys.modules.get(base.__module__, None), '__dict__', {}) + else: + base_globals = globalns + base_locals = dict(vars(base)) if localns is None else localns + if localns is None and globalns is None: + # This is surprising, but required. Before Python 3.10, + # get_type_hints only evaluated the globalns of + # a class. To maintain backwards compatibility, we reverse + # the globalns and localns order so that eval() looks into + # *base_globals* first rather than *base_locals*. + # This only affects ForwardRefs. + base_globals, base_locals = base_locals, base_globals + type_params = base.__type_params__ + base_globals, base_locals = _add_type_params_to_scope( + type_params, base_globals, base_locals, True) + for name, value in ann.items(): + if isinstance(value, str): + value = _make_forward_ref(value, is_argument=False, is_class=True) + value = _eval_type(value, base_globals, base_locals, (), + format=format, owner=obj, prefer_fwd_module=True) + if value is None: + value = type(None) + hints[name] = value + if include_extras or format == Format.STRING: + return hints + else: + return {k: _strip_annotations(t) for k, t in hints.items()} + + hints = _lazy_annotationlib.get_annotations(obj, format=format) + if ( + not hints + and not isinstance(obj, types.ModuleType) + and not callable(obj) + and not hasattr(obj, '__annotations__') + and not hasattr(obj, '__annotate__') + ): + raise TypeError(f"{obj!r} is not a module, class, or callable.") + if format == Format.STRING: + return hints + + if globalns is None: + if isinstance(obj, types.ModuleType): + globalns = obj.__dict__ + else: + nsobj = obj + # Find globalns for the unwrapped object. + while hasattr(nsobj, '__wrapped__'): + nsobj = nsobj.__wrapped__ + globalns = getattr(nsobj, '__globals__', {}) + if localns is None: + localns = globalns + elif localns is None: + localns = globalns + type_params = getattr(obj, "__type_params__", ()) + globalns, localns = _add_type_params_to_scope(type_params, globalns, localns, False) + for name, value in hints.items(): + if isinstance(value, str): + # class-level forward refs were handled above, this must be either + # a module-level annotation or a function argument annotation + value = _make_forward_ref( + value, + is_argument=not isinstance(obj, types.ModuleType), + is_class=False, + ) + value = _eval_type(value, globalns, localns, (), format=format, owner=obj, prefer_fwd_module=True) + if value is None: + value = type(None) + hints[name] = value + return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()} + + +# Add type parameters to the globals and locals scope. This is needed for +# compatibility. +def _add_type_params_to_scope(type_params, globalns, localns, is_class): + if not type_params: + return globalns, localns + globalns = dict(globalns) + localns = dict(localns) + for param in type_params: + if not is_class or param.__name__ not in globalns: + globalns[param.__name__] = param + localns.pop(param.__name__, None) + return globalns, localns + + +def _strip_annotations(t): + """Strip the annotations from a given type.""" + if isinstance(t, _AnnotatedAlias): + return _strip_annotations(t.__origin__) + if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired, ReadOnly): + return _strip_annotations(t.__args__[0]) + if isinstance(t, _GenericAlias): + stripped_args = tuple(_strip_annotations(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return t.copy_with(stripped_args) + if isinstance(t, GenericAlias): + stripped_args = tuple(_strip_annotations(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return GenericAlias(t.__origin__, stripped_args) + if isinstance(t, Union): + stripped_args = tuple(_strip_annotations(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return functools.reduce(operator.or_, stripped_args) + + return t + + +def get_origin(tp): + """Get the unsubscripted version of a type. + + This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar, + Annotated, and others. Return None for unsupported types. + + Examples:: + + >>> P = ParamSpec('P') + >>> assert get_origin(Literal[42]) is Literal + >>> assert get_origin(int) is None + >>> assert get_origin(ClassVar[int]) is ClassVar + >>> assert get_origin(Generic) is Generic + >>> assert get_origin(Generic[T]) is Generic + >>> assert get_origin(Union[T, int]) is Union + >>> assert get_origin(List[Tuple[T, T]][int]) is list + >>> assert get_origin(P.args) is P + """ + if isinstance(tp, _AnnotatedAlias): + return Annotated + if isinstance(tp, (_BaseGenericAlias, GenericAlias, + ParamSpecArgs, ParamSpecKwargs)): + return tp.__origin__ + if tp is Generic: + return Generic + if isinstance(tp, Union): + return Union + return None + + +def get_args(tp): + """Get type arguments with all substitutions performed. + + For unions, basic simplifications used by Union constructor are performed. + + Examples:: + + >>> T = TypeVar('T') + >>> assert get_args(Dict[str, int]) == (str, int) + >>> assert get_args(int) == () + >>> assert get_args(Union[int, Union[T, int], str][int]) == (int, str) + >>> assert get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) + >>> assert get_args(Callable[[], T][int]) == ([], int) + """ + if isinstance(tp, _AnnotatedAlias): + return (tp.__origin__,) + tp.__metadata__ + if isinstance(tp, (_GenericAlias, GenericAlias)): + res = tp.__args__ + if _should_unflatten_callable_args(tp, res): + res = (list(res[:-1]), res[-1]) + return res + if isinstance(tp, Union): + return tp.__args__ + return () + + +def is_typeddict(tp): + """Check if an annotation is a TypedDict class. + + For example:: + + >>> from typing import TypedDict + >>> class Film(TypedDict): + ... title: str + ... year: int + ... + >>> is_typeddict(Film) + True + >>> is_typeddict(dict) + False + """ + return isinstance(tp, _TypedDictMeta) + + +_ASSERT_NEVER_REPR_MAX_LENGTH = 100 + + +def assert_never(arg: Never, /) -> Never: + """Statically assert that a line of code is unreachable. + + Example:: + + def int_or_str(arg: int | str) -> None: + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + assert_never(arg) + + If a type checker finds that a call to assert_never() is + reachable, it will emit an error. + + At runtime, this throws an exception when called. + """ + value = repr(arg) + if len(value) > _ASSERT_NEVER_REPR_MAX_LENGTH: + value = value[:_ASSERT_NEVER_REPR_MAX_LENGTH] + '...' + raise AssertionError(f"Expected code to be unreachable, but got: {value}") + + +def no_type_check(arg): + """Decorator to indicate that annotations are not type hints. + + The argument must be a class or function; if it is a class, it + applies recursively to all methods and classes defined in that class + (but not to methods defined in its superclasses or subclasses). + + This mutates the function(s) or class(es) in place. + """ + if isinstance(arg, type): + for key in dir(arg): + obj = getattr(arg, key) + if ( + not hasattr(obj, '__qualname__') + or obj.__qualname__ != f'{arg.__qualname__}.{obj.__name__}' + or getattr(obj, '__module__', None) != arg.__module__ + ): + # We only modify objects that are defined in this type directly. + # If classes / methods are nested in multiple layers, + # we will modify them when processing their direct holders. + continue + # Instance, class, and static methods: + if isinstance(obj, types.FunctionType): + obj.__no_type_check__ = True + if isinstance(obj, types.MethodType): + obj.__func__.__no_type_check__ = True + # Nested types: + if isinstance(obj, type): + no_type_check(obj) + try: + arg.__no_type_check__ = True + except TypeError: # built-in classes + pass + return arg + + +def no_type_check_decorator(decorator): + """Decorator to give another decorator the @no_type_check effect. + + This wraps the decorator with something that wraps the decorated + function in @no_type_check. + """ + import warnings + warnings._deprecated("typing.no_type_check_decorator", remove=(3, 15)) + @functools.wraps(decorator) + def wrapped_decorator(*args, **kwds): + func = decorator(*args, **kwds) + func = no_type_check(func) + return func + + return wrapped_decorator + + +def _overload_dummy(*args, **kwds): + """Helper for @overload to raise when called.""" + raise NotImplementedError( + "You should not call an overloaded function. " + "A series of @overload-decorated functions " + "outside a stub module should always be followed " + "by an implementation that is not @overload-ed.") + + +# {module: {qualname: {firstlineno: func}}} +_overload_registry = defaultdict(functools.partial(defaultdict, dict)) + + +def overload(func): + """Decorator for overloaded functions/methods. + + In a stub file, place two or more stub definitions for the same + function in a row, each decorated with @overload. + + For example:: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + + In a non-stub file (i.e. a regular .py file), do the same but + follow it with an implementation. The implementation should *not* + be decorated with @overload:: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + def utf8(value): + ... # implementation goes here + + The overloads for a function can be retrieved at runtime using the + get_overloads() function. + """ + # classmethod and staticmethod + f = getattr(func, "__func__", func) + try: + _overload_registry[f.__module__][f.__qualname__][f.__code__.co_firstlineno] = func + except AttributeError: + # Not a normal function; ignore. + pass + return _overload_dummy + + +def get_overloads(func): + """Return all defined overloads for *func* as a sequence.""" + # classmethod and staticmethod + f = getattr(func, "__func__", func) + if f.__module__ not in _overload_registry: + return [] + mod_dict = _overload_registry[f.__module__] + if f.__qualname__ not in mod_dict: + return [] + return list(mod_dict[f.__qualname__].values()) + + +def clear_overloads(): + """Clear all overloads in the registry.""" + _overload_registry.clear() + + +def final(f): + """Decorator to indicate final methods and final classes. + + Use this decorator to indicate to type checkers that the decorated + method cannot be overridden, and decorated class cannot be subclassed. + + For example:: + + class Base: + @final + def done(self) -> None: + ... + class Sub(Base): + def done(self) -> None: # Error reported by type checker + ... + + @final + class Leaf: + ... + class Other(Leaf): # Error reported by type checker + ... + + There is no runtime checking of these properties. The decorator + attempts to set the ``__final__`` attribute to ``True`` on the decorated + object to allow runtime introspection. + """ + try: + f.__final__ = True + except (AttributeError, TypeError): + # Skip the attribute silently if it is not writable. + # AttributeError happens if the object has __slots__ or a + # read-only property, TypeError if it's a builtin class. + pass + return f + + +# Some unconstrained type variables. These were initially used by the container types. +# They were never meant for export and are now unused, but we keep them around to +# avoid breaking compatibility with users who import them. +T = TypeVar('T') # Any type. +KT = TypeVar('KT') # Key type. +VT = TypeVar('VT') # Value type. +T_co = TypeVar('T_co', covariant=True) # Any type covariant containers. +V_co = TypeVar('V_co', covariant=True) # Any type covariant containers. +VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers. +T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant. +# Internal type variable used for Type[]. +CT_co = TypeVar('CT_co', covariant=True, bound=type) + + +# A useful type variable with constraints. This represents string types. +# (This one *is* for export!) +AnyStr = TypeVar('AnyStr', bytes, str) + + +# Various ABCs mimicking those in collections.abc. +_alias = _SpecialGenericAlias + +Hashable = _alias(collections.abc.Hashable, 0) # Not generic. +Awaitable = _alias(collections.abc.Awaitable, 1) +Coroutine = _alias(collections.abc.Coroutine, 3) +AsyncIterable = _alias(collections.abc.AsyncIterable, 1) +AsyncIterator = _alias(collections.abc.AsyncIterator, 1) +Iterable = _alias(collections.abc.Iterable, 1) +Iterator = _alias(collections.abc.Iterator, 1) +Reversible = _alias(collections.abc.Reversible, 1) +Sized = _alias(collections.abc.Sized, 0) # Not generic. +Container = _alias(collections.abc.Container, 1) +Collection = _alias(collections.abc.Collection, 1) +Callable = _CallableType(collections.abc.Callable, 2) +Callable.__doc__ = \ + """Deprecated alias to collections.abc.Callable. + + Callable[[int], str] signifies a function that takes a single + parameter of type int and returns a str. + + The subscription syntax must always be used with exactly two + values: the argument list and the return type. + The argument list must be a list of types, a ParamSpec, + Concatenate or ellipsis. The return type must be a single type. + + There is no syntax to indicate optional or keyword arguments; + such function types are rarely used as callback types. + """ +AbstractSet = _alias(collections.abc.Set, 1, name='AbstractSet') +MutableSet = _alias(collections.abc.MutableSet, 1) +# NOTE: Mapping is only covariant in the value type. +Mapping = _alias(collections.abc.Mapping, 2) +MutableMapping = _alias(collections.abc.MutableMapping, 2) +Sequence = _alias(collections.abc.Sequence, 1) +MutableSequence = _alias(collections.abc.MutableSequence, 1) +ByteString = _DeprecatedGenericAlias( + collections.abc.ByteString, 0, removal_version=(3, 17) # Not generic. +) +# Tuple accepts variable number of parameters. +Tuple = _TupleType(tuple, -1, inst=False, name='Tuple') +Tuple.__doc__ = \ + """Deprecated alias to builtins.tuple. + + Tuple[X, Y] is the cross-product type of X and Y. + + Example: Tuple[T1, T2] is a tuple of two elements corresponding + to type variables T1 and T2. Tuple[int, float, str] is a tuple + of an int, a float and a string. + + To specify a variable-length tuple of homogeneous type, use Tuple[T, ...]. + """ +List = _alias(list, 1, inst=False, name='List') +Deque = _alias(collections.deque, 1, name='Deque') +Set = _alias(set, 1, inst=False, name='Set') +FrozenSet = _alias(frozenset, 1, inst=False, name='FrozenSet') +MappingView = _alias(collections.abc.MappingView, 1) +KeysView = _alias(collections.abc.KeysView, 1) +ItemsView = _alias(collections.abc.ItemsView, 2) +ValuesView = _alias(collections.abc.ValuesView, 1) +Dict = _alias(dict, 2, inst=False, name='Dict') +DefaultDict = _alias(collections.defaultdict, 2, name='DefaultDict') +OrderedDict = _alias(collections.OrderedDict, 2) +Counter = _alias(collections.Counter, 1) +ChainMap = _alias(collections.ChainMap, 2) +Generator = _alias(collections.abc.Generator, 3, defaults=(types.NoneType, types.NoneType)) +AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2, defaults=(types.NoneType,)) +Type = _alias(type, 1, inst=False, name='Type') +Type.__doc__ = \ + """Deprecated alias to builtins.type. + + builtins.type or typing.Type can be used to annotate class objects. + For example, suppose we have the following classes:: + + class User: ... # Abstract base for User classes + class BasicUser(User): ... + class ProUser(User): ... + class TeamUser(User): ... + + And a function that takes a class argument that's a subclass of + User and returns an instance of the corresponding class:: + + def new_user[U](user_class: Type[U]) -> U: + user = user_class() + # (Here we could write the user object to a database) + return user + + joe = new_user(BasicUser) + + At this point the type checker knows that joe has type BasicUser. + """ + + +@runtime_checkable +class SupportsInt(Protocol): + """An ABC with one abstract method __int__.""" + + __slots__ = () + + @abstractmethod + def __int__(self) -> int: + pass + + +@runtime_checkable +class SupportsFloat(Protocol): + """An ABC with one abstract method __float__.""" + + __slots__ = () + + @abstractmethod + def __float__(self) -> float: + pass + + +@runtime_checkable +class SupportsComplex(Protocol): + """An ABC with one abstract method __complex__.""" + + __slots__ = () + + @abstractmethod + def __complex__(self) -> complex: + pass + + +@runtime_checkable +class SupportsBytes(Protocol): + """An ABC with one abstract method __bytes__.""" + + __slots__ = () + + @abstractmethod + def __bytes__(self) -> bytes: + pass + + +@runtime_checkable +class SupportsIndex(Protocol): + """An ABC with one abstract method __index__.""" + + __slots__ = () + + @abstractmethod + def __index__(self) -> int: + pass + + +@runtime_checkable +class SupportsAbs[T](Protocol): + """An ABC with one abstract method __abs__ that is covariant in its return type.""" + + __slots__ = () + + @abstractmethod + def __abs__(self) -> T: + pass + + +@runtime_checkable +class SupportsRound[T](Protocol): + """An ABC with one abstract method __round__ that is covariant in its return type.""" + + __slots__ = () + + @abstractmethod + def __round__(self, ndigits: int = 0) -> T: + pass + + +def _make_nmtuple(name, fields, annotate_func, module, defaults = ()): + nm_tpl = collections.namedtuple(name, fields, + defaults=defaults, module=module) + nm_tpl.__annotate__ = nm_tpl.__new__.__annotate__ = annotate_func + return nm_tpl + + +def _make_eager_annotate(types): + checked_types = {key: _type_check(val, f"field {key} annotation must be a type") + for key, val in types.items()} + def annotate(format): + match format: + case _lazy_annotationlib.Format.VALUE | _lazy_annotationlib.Format.FORWARDREF: + return checked_types + case _lazy_annotationlib.Format.STRING: + return _lazy_annotationlib.annotations_to_string(types) + case _: + raise NotImplementedError(format) + return annotate + + +# attributes prohibited to set in NamedTuple class syntax +_prohibited = frozenset({'__new__', '__init__', '__slots__', '__getnewargs__', + '_fields', '_field_defaults', + '_make', '_replace', '_asdict', '_source'}) + +_special = frozenset({'__module__', '__name__', '__annotations__', '__annotate__', + '__annotate_func__', '__annotations_cache__'}) + + +class NamedTupleMeta(type): + def __new__(cls, typename, bases, ns): + assert _NamedTuple in bases + if "__classcell__" in ns: + raise TypeError( + "uses of super() and __class__ are unsupported in methods of NamedTuple subclasses") + for base in bases: + if base is not _NamedTuple and base is not Generic: + raise TypeError( + 'can only inherit from a NamedTuple type and Generic') + bases = tuple(tuple if base is _NamedTuple else base for base in bases) + if "__annotations__" in ns: + types = ns["__annotations__"] + field_names = list(types) + annotate = _make_eager_annotate(types) + elif (original_annotate := _lazy_annotationlib.get_annotate_from_class_namespace(ns)) is not None: + types = _lazy_annotationlib.call_annotate_function( + original_annotate, _lazy_annotationlib.Format.FORWARDREF) + field_names = list(types) + + # For backward compatibility, type-check all the types at creation time + for typ in types.values(): + _type_check(typ, "field annotation must be a type") + + def annotate(format): + annos = _lazy_annotationlib.call_annotate_function( + original_annotate, format) + if format != _lazy_annotationlib.Format.STRING: + return {key: _type_check(val, f"field {key} annotation must be a type") + for key, val in annos.items()} + return annos + else: + # Empty NamedTuple + field_names = [] + annotate = lambda format: {} + default_names = [] + for field_name in field_names: + if field_name in ns: + default_names.append(field_name) + elif default_names: + raise TypeError(f"Non-default namedtuple field {field_name} " + f"cannot follow default field" + f"{'s' if len(default_names) > 1 else ''} " + f"{', '.join(default_names)}") + nm_tpl = _make_nmtuple(typename, field_names, annotate, + defaults=[ns[n] for n in default_names], + module=ns['__module__']) + nm_tpl.__bases__ = bases + if Generic in bases: + class_getitem = _generic_class_getitem + nm_tpl.__class_getitem__ = classmethod(class_getitem) + # update from user namespace without overriding special namedtuple attributes + for key, val in ns.items(): + if key in _prohibited: + raise AttributeError("Cannot overwrite NamedTuple attribute " + key) + elif key not in _special: + if key not in nm_tpl._fields: + setattr(nm_tpl, key, val) + try: + set_name = type(val).__set_name__ + except AttributeError: + pass + else: + try: + set_name(val, nm_tpl, key) + except BaseException as e: + e.add_note( + f"Error calling __set_name__ on {type(val).__name__!r} " + f"instance {key!r} in {typename!r}" + ) + raise + + if Generic in bases: + nm_tpl.__init_subclass__() + return nm_tpl + + +def NamedTuple(typename, fields=_sentinel, /, **kwargs): + """Typed version of namedtuple. + + Usage:: + + class Employee(NamedTuple): + name: str + id: int + + This is equivalent to:: + + Employee = collections.namedtuple('Employee', ['name', 'id']) + + The resulting class has an extra __annotations__ attribute, giving a + dict that maps field names to types. (The field names are also in + the _fields attribute, which is part of the namedtuple API.) + An alternative equivalent functional syntax is also accepted:: + + Employee = NamedTuple('Employee', [('name', str), ('id', int)]) + """ + if fields is _sentinel: + if kwargs: + deprecated_thing = "Creating NamedTuple classes using keyword arguments" + deprecation_msg = ( + "{name} is deprecated and will be disallowed in Python {remove}. " + "Use the class-based or functional syntax instead." + ) + else: + deprecated_thing = "Failing to pass a value for the 'fields' parameter" + example = f"`{typename} = NamedTuple({typename!r}, [])`" + deprecation_msg = ( + "{name} is deprecated and will be disallowed in Python {remove}. " + "To create a NamedTuple class with 0 fields " + "using the functional syntax, " + "pass an empty list, e.g. " + ) + example + "." + elif fields is None: + if kwargs: + raise TypeError( + "Cannot pass `None` as the 'fields' parameter " + "and also specify fields using keyword arguments" + ) + else: + deprecated_thing = "Passing `None` as the 'fields' parameter" + example = f"`{typename} = NamedTuple({typename!r}, [])`" + deprecation_msg = ( + "{name} is deprecated and will be disallowed in Python {remove}. " + "To create a NamedTuple class with 0 fields " + "using the functional syntax, " + "pass an empty list, e.g. " + ) + example + "." + elif kwargs: + raise TypeError("Either list of fields or keywords" + " can be provided to NamedTuple, not both") + if fields is _sentinel or fields is None: + import warnings + warnings._deprecated(deprecated_thing, message=deprecation_msg, remove=(3, 15)) + fields = kwargs.items() + types = {n: _type_check(t, f"field {n} annotation must be a type") + for n, t in fields} + field_names = [n for n, _ in fields] + + nt = _make_nmtuple(typename, field_names, _make_eager_annotate(types), module=_caller()) + nt.__orig_bases__ = (NamedTuple,) + return nt + +_NamedTuple = type.__new__(NamedTupleMeta, 'NamedTuple', (), {}) + +def _namedtuple_mro_entries(bases): + assert NamedTuple in bases + return (_NamedTuple,) + +NamedTuple.__mro_entries__ = _namedtuple_mro_entries + + +def _get_typeddict_qualifiers(annotation_type): + while True: + annotation_origin = get_origin(annotation_type) + if annotation_origin is Annotated: + annotation_args = get_args(annotation_type) + if annotation_args: + annotation_type = annotation_args[0] + else: + break + elif annotation_origin is Required: + yield Required + (annotation_type,) = get_args(annotation_type) + elif annotation_origin is NotRequired: + yield NotRequired + (annotation_type,) = get_args(annotation_type) + elif annotation_origin is ReadOnly: + yield ReadOnly + (annotation_type,) = get_args(annotation_type) + else: + break + + +class _TypedDictMeta(type): + def __new__(cls, name, bases, ns, total=True): + """Create a new typed dict class object. + + This method is called when TypedDict is subclassed, + or when TypedDict is instantiated. This way + TypedDict supports all three syntax forms described in its docstring. + Subclasses and instances of TypedDict return actual dictionaries. + """ + for base in bases: + if type(base) is not _TypedDictMeta and base is not Generic: + raise TypeError('cannot inherit from both a TypedDict type ' + 'and a non-TypedDict base class') + + if any(issubclass(b, Generic) for b in bases): + generic_base = (Generic,) + else: + generic_base = () + + ns_annotations = ns.pop('__annotations__', None) + + tp_dict = type.__new__(_TypedDictMeta, name, (*generic_base, dict), ns) + + if not hasattr(tp_dict, '__orig_bases__'): + tp_dict.__orig_bases__ = bases + + if ns_annotations is not None: + own_annotate = None + own_annotations = ns_annotations + elif (own_annotate := _lazy_annotationlib.get_annotate_from_class_namespace(ns)) is not None: + own_annotations = _lazy_annotationlib.call_annotate_function( + own_annotate, _lazy_annotationlib.Format.FORWARDREF, owner=tp_dict + ) + else: + own_annotate = None + own_annotations = {} + msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" + own_checked_annotations = { + n: _type_check(tp, msg, owner=tp_dict, module=tp_dict.__module__) + for n, tp in own_annotations.items() + } + required_keys = set() + optional_keys = set() + readonly_keys = set() + mutable_keys = set() + + for base in bases: + base_required = base.__dict__.get('__required_keys__', set()) + required_keys |= base_required + optional_keys -= base_required + + base_optional = base.__dict__.get('__optional_keys__', set()) + required_keys -= base_optional + optional_keys |= base_optional + + readonly_keys.update(base.__dict__.get('__readonly_keys__', ())) + mutable_keys.update(base.__dict__.get('__mutable_keys__', ())) + + for annotation_key, annotation_type in own_checked_annotations.items(): + qualifiers = set(_get_typeddict_qualifiers(annotation_type)) + if Required in qualifiers: + is_required = True + elif NotRequired in qualifiers: + is_required = False + else: + is_required = total + + if is_required: + required_keys.add(annotation_key) + optional_keys.discard(annotation_key) + else: + optional_keys.add(annotation_key) + required_keys.discard(annotation_key) + + if ReadOnly in qualifiers: + if annotation_key in mutable_keys: + raise TypeError( + f"Cannot override mutable key {annotation_key!r}" + " with read-only key" + ) + readonly_keys.add(annotation_key) + else: + mutable_keys.add(annotation_key) + readonly_keys.discard(annotation_key) + + assert required_keys.isdisjoint(optional_keys), ( + f"Required keys overlap with optional keys in {name}:" + f" {required_keys=}, {optional_keys=}" + ) + + def __annotate__(format): + annos = {} + for base in bases: + if base is Generic: + continue + base_annotate = base.__annotate__ + if base_annotate is None: + continue + base_annos = _lazy_annotationlib.call_annotate_function( + base_annotate, format, owner=base) + annos.update(base_annos) + if own_annotate is not None: + own = _lazy_annotationlib.call_annotate_function( + own_annotate, format, owner=tp_dict) + if format != _lazy_annotationlib.Format.STRING: + own = { + n: _type_check(tp, msg, module=tp_dict.__module__) + for n, tp in own.items() + } + elif format == _lazy_annotationlib.Format.STRING: + own = _lazy_annotationlib.annotations_to_string(own_annotations) + elif format in (_lazy_annotationlib.Format.FORWARDREF, _lazy_annotationlib.Format.VALUE): + own = own_checked_annotations + else: + raise NotImplementedError(format) + annos.update(own) + return annos + + tp_dict.__annotate__ = __annotate__ + tp_dict.__required_keys__ = frozenset(required_keys) + tp_dict.__optional_keys__ = frozenset(optional_keys) + tp_dict.__readonly_keys__ = frozenset(readonly_keys) + tp_dict.__mutable_keys__ = frozenset(mutable_keys) + tp_dict.__total__ = total + return tp_dict + + __call__ = dict # static method + + def __subclasscheck__(cls, other): + # Typed dicts are only for static structural subtyping. + raise TypeError('TypedDict does not support instance and class checks') + + __instancecheck__ = __subclasscheck__ + + +def TypedDict(typename, fields=_sentinel, /, *, total=True): + """A simple typed namespace. At runtime it is equivalent to a plain dict. + + TypedDict creates a dictionary type such that a type checker will expect all + instances to have a certain set of keys, where each key is + associated with a value of a consistent type. This expectation + is not checked at runtime. + + Usage:: + + >>> class Point2D(TypedDict): + ... x: int + ... y: int + ... label: str + ... + >>> a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK + >>> b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check + >>> Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') + True + + The type info can be accessed via the Point2D.__annotations__ dict, and + the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets. + TypedDict supports an additional equivalent form:: + + Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) + + By default, all keys must be present in a TypedDict. It is possible + to override this by specifying totality:: + + class Point2D(TypedDict, total=False): + x: int + y: int + + This means that a Point2D TypedDict can have any of the keys omitted. A type + checker is only expected to support a literal False or True as the value of + the total argument. True is the default, and makes all items defined in the + class body be required. + + The Required and NotRequired special forms can also be used to mark + individual keys as being required or not required:: + + class Point2D(TypedDict): + x: int # the "x" key must always be present (Required is the default) + y: NotRequired[int] # the "y" key can be omitted + + See PEP 655 for more details on Required and NotRequired. + + The ReadOnly special form can be used + to mark individual keys as immutable for type checkers:: + + class DatabaseUser(TypedDict): + id: ReadOnly[int] # the "id" key must not be modified + username: str # the "username" key can be changed + + """ + if fields is _sentinel or fields is None: + import warnings + + if fields is _sentinel: + deprecated_thing = "Failing to pass a value for the 'fields' parameter" + else: + deprecated_thing = "Passing `None` as the 'fields' parameter" + + example = f"`{typename} = TypedDict({typename!r}, {{{{}}}})`" + deprecation_msg = ( + "{name} is deprecated and will be disallowed in Python {remove}. " + "To create a TypedDict class with 0 fields " + "using the functional syntax, " + "pass an empty dictionary, e.g. " + ) + example + "." + warnings._deprecated(deprecated_thing, message=deprecation_msg, remove=(3, 15)) + fields = {} + + ns = {'__annotations__': dict(fields)} + module = _caller() + if module is not None: + # Setting correct module is necessary to make typed dict classes pickleable. + ns['__module__'] = module + + td = _TypedDictMeta(typename, (), ns, total=total) + td.__orig_bases__ = (TypedDict,) + return td + +_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {}) +TypedDict.__mro_entries__ = lambda bases: (_TypedDict,) + + +@_SpecialForm +def Required(self, parameters): + """Special typing construct to mark a TypedDict key as required. + + This is mainly useful for total=False TypedDicts. + + For example:: + + class Movie(TypedDict, total=False): + title: Required[str] + year: int + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + + There is no runtime checking that a required key is actually provided + when instantiating a related TypedDict. + """ + item = _type_check(parameters, f'{self._name} accepts only a single type.') + return _GenericAlias(self, (item,)) + + +@_SpecialForm +def NotRequired(self, parameters): + """Special typing construct to mark a TypedDict key as potentially missing. + + For example:: + + class Movie(TypedDict): + title: str + year: NotRequired[int] + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + """ + item = _type_check(parameters, f'{self._name} accepts only a single type.') + return _GenericAlias(self, (item,)) + + +@_SpecialForm +def ReadOnly(self, parameters): + """A special typing construct to mark an item of a TypedDict as read-only. + + For example:: + + class Movie(TypedDict): + title: ReadOnly[str] + year: int + + def mutate_movie(m: Movie) -> None: + m["year"] = 1992 # allowed + m["title"] = "The Matrix" # typechecker error + + There is no runtime checking for this property. + """ + item = _type_check(parameters, f'{self._name} accepts only a single type.') + return _GenericAlias(self, (item,)) + + +class NewType: + """NewType creates simple unique types with almost zero runtime overhead. + + NewType(name, tp) is considered a subtype of tp + by static type checkers. At runtime, NewType(name, tp) returns + a dummy callable that simply returns its argument. + + Usage:: + + UserId = NewType('UserId', int) + + def name_by_id(user_id: UserId) -> str: + ... + + UserId('user') # Fails type check + + name_by_id(42) # Fails type check + name_by_id(UserId(42)) # OK + + num = UserId(5) + 1 # type: int + """ + + __call__ = _idfunc + + def __init__(self, name, tp): + self.__qualname__ = name + if '.' in name: + name = name.rpartition('.')[-1] + self.__name__ = name + self.__supertype__ = tp + def_mod = _caller() + if def_mod != 'typing': + self.__module__ = def_mod + + def __mro_entries__(self, bases): + # We defined __mro_entries__ to get a better error message + # if a user attempts to subclass a NewType instance. bpo-46170 + superclass_name = self.__name__ + + class Dummy: + def __init_subclass__(cls): + subclass_name = cls.__name__ + raise TypeError( + f"Cannot subclass an instance of NewType. Perhaps you were looking for: " + f"`{subclass_name} = NewType({subclass_name!r}, {superclass_name})`" + ) + + return (Dummy,) + + def __repr__(self): + return f'{self.__module__}.{self.__qualname__}' + + def __reduce__(self): + return self.__qualname__ + + def __or__(self, other): + return Union[self, other] + + def __ror__(self, other): + return Union[other, self] + + +# Python-version-specific alias (Python 2: unicode; Python 3: str) +Text = str + + +# Constant that's True when type checking, but False here. +TYPE_CHECKING = False + + +class IO(Generic[AnyStr]): + """Generic base class for TextIO and BinaryIO. + + This is an abstract, generic version of the return of open(). + + NOTE: This does not distinguish between the different possible + classes (text vs. binary, read vs. write vs. read/write, + append-only, unbuffered). The TextIO and BinaryIO subclasses + below capture the distinctions between text vs. binary, which is + pervasive in the interface; however we currently do not offer a + way to track the other distinctions in the type system. + """ + + __slots__ = () + + @property + @abstractmethod + def mode(self) -> str: + pass + + @property + @abstractmethod + def name(self) -> str: + pass + + @abstractmethod + def close(self) -> None: + pass + + @property + @abstractmethod + def closed(self) -> bool: + pass + + @abstractmethod + def fileno(self) -> int: + pass + + @abstractmethod + def flush(self) -> None: + pass + + @abstractmethod + def isatty(self) -> bool: + pass + + @abstractmethod + def read(self, n: int = -1) -> AnyStr: + pass + + @abstractmethod + def readable(self) -> bool: + pass + + @abstractmethod + def readline(self, limit: int = -1) -> AnyStr: + pass + + @abstractmethod + def readlines(self, hint: int = -1) -> list[AnyStr]: + pass + + @abstractmethod + def seek(self, offset: int, whence: int = 0) -> int: + pass + + @abstractmethod + def seekable(self) -> bool: + pass + + @abstractmethod + def tell(self) -> int: + pass + + @abstractmethod + def truncate(self, size: int | None = None) -> int: + pass + + @abstractmethod + def writable(self) -> bool: + pass + + @abstractmethod + def write(self, s: AnyStr) -> int: + pass + + @abstractmethod + def writelines(self, lines: list[AnyStr]) -> None: + pass + + @abstractmethod + def __enter__(self) -> IO[AnyStr]: + pass + + @abstractmethod + def __exit__(self, type, value, traceback) -> None: + pass + + +class BinaryIO(IO[bytes]): + """Typed version of the return of open() in binary mode.""" + + __slots__ = () + + @abstractmethod + def write(self, s: bytes | bytearray) -> int: + pass + + @abstractmethod + def __enter__(self) -> BinaryIO: + pass + + +class TextIO(IO[str]): + """Typed version of the return of open() in text mode.""" + + __slots__ = () + + @property + @abstractmethod + def buffer(self) -> BinaryIO: + pass + + @property + @abstractmethod + def encoding(self) -> str: + pass + + @property + @abstractmethod + def errors(self) -> str | None: + pass + + @property + @abstractmethod + def line_buffering(self) -> bool: + pass + + @property + @abstractmethod + def newlines(self) -> Any: + pass + + @abstractmethod + def __enter__(self) -> TextIO: + pass + + +def reveal_type[T](obj: T, /) -> T: + """Ask a static type checker to reveal the inferred type of an expression. + + When a static type checker encounters a call to ``reveal_type()``, + it will emit the inferred type of the argument:: + + x: int = 1 + reveal_type(x) + + Running a static type checker (e.g., mypy) on this example + will produce output similar to 'Revealed type is "builtins.int"'. + + At runtime, the function prints the runtime type of the + argument and returns the argument unchanged. + """ + print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr) + return obj + + +class _IdentityCallable(Protocol): + def __call__[T](self, arg: T, /) -> T: + ... + + +def dataclass_transform( + *, + eq_default: bool = True, + order_default: bool = False, + kw_only_default: bool = False, + frozen_default: bool = False, + field_specifiers: tuple[type[Any] | Callable[..., Any], ...] = (), + **kwargs: Any, +) -> _IdentityCallable: + """Decorator to mark an object as providing dataclass-like behaviour. + + The decorator can be applied to a function, class, or metaclass. + + Example usage with a decorator function:: + + @dataclass_transform() + def create_model[T](cls: type[T]) -> type[T]: + ... + return cls + + @create_model + class CustomerModel: + id: int + name: str + + On a base class:: + + @dataclass_transform() + class ModelBase: ... + + class CustomerModel(ModelBase): + id: int + name: str + + On a metaclass:: + + @dataclass_transform() + class ModelMeta(type): ... + + class ModelBase(metaclass=ModelMeta): ... + + class CustomerModel(ModelBase): + id: int + name: str + + The ``CustomerModel`` classes defined above will + be treated by type checkers similarly to classes created with + ``@dataclasses.dataclass``. + For example, type checkers will assume these classes have + ``__init__`` methods that accept ``id`` and ``name``. + + The arguments to this decorator can be used to customize this behavior: + - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be + ``True`` or ``False`` if it is omitted by the caller. + - ``order_default`` indicates whether the ``order`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``kw_only_default`` indicates whether the ``kw_only`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``frozen_default`` indicates whether the ``frozen`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``field_specifiers`` specifies a static list of supported classes + or functions that describe fields, similar to ``dataclasses.field()``. + - Arbitrary other keyword arguments are accepted in order to allow for + possible future extensions. + + At runtime, this decorator records its arguments in the + ``__dataclass_transform__`` attribute on the decorated object. + It has no other runtime effect. + + See PEP 681 for more details. + """ + def decorator(cls_or_fn): + cls_or_fn.__dataclass_transform__ = { + "eq_default": eq_default, + "order_default": order_default, + "kw_only_default": kw_only_default, + "frozen_default": frozen_default, + "field_specifiers": field_specifiers, + "kwargs": kwargs, + } + return cls_or_fn + return decorator + + +type _Func = Callable[..., Any] + + +def override[F: _Func](method: F, /) -> F: + """Indicate that a method is intended to override a method in a base class. + + Usage:: + + class Base: + def method(self) -> None: + pass + + class Child(Base): + @override + def method(self) -> None: + super().method() + + When this decorator is applied to a method, the type checker will + validate that it overrides a method or attribute with the same name on a + base class. This helps prevent bugs that may occur when a base class is + changed without an equivalent change to a child class. + + There is no runtime checking of this property. The decorator attempts to + set the ``__override__`` attribute to ``True`` on the decorated object to + allow runtime introspection. + + See PEP 698 for details. + """ + try: + method.__override__ = True + except (AttributeError, TypeError): + # Skip the attribute silently if it is not writable. + # AttributeError happens if the object has __slots__ or a + # read-only property, TypeError if it's a builtin class. + pass + return method + + +def is_protocol(tp: type, /) -> bool: + """Return True if the given type is a Protocol. + + Example:: + + >>> from typing import Protocol, is_protocol + >>> class P(Protocol): + ... def a(self) -> str: ... + ... b: int + >>> is_protocol(P) + True + >>> is_protocol(int) + False + """ + return ( + isinstance(tp, type) + and getattr(tp, '_is_protocol', False) + and tp != Protocol + ) + + +def get_protocol_members(tp: type, /) -> frozenset[str]: + """Return the set of members defined in a Protocol. + + Example:: + + >>> from typing import Protocol, get_protocol_members + >>> class P(Protocol): + ... def a(self) -> str: ... + ... b: int + >>> get_protocol_members(P) == frozenset({'a', 'b'}) + True + + Raise a TypeError for arguments that are not Protocols. + """ + if not is_protocol(tp): + raise TypeError(f'{tp!r} is not a Protocol') + return frozenset(tp.__protocol_attrs__) + + +def __getattr__(attr): + """Improve the import time of the typing module. + + Soft-deprecated objects which are costly to create + are only created on-demand here. + """ + if attr == "ForwardRef": + obj = _lazy_annotationlib.ForwardRef + elif attr in {"Pattern", "Match"}: + import re + obj = _alias(getattr(re, attr), 1) + elif attr in {"ContextManager", "AsyncContextManager"}: + import contextlib + obj = _alias(getattr(contextlib, f"Abstract{attr}"), 2, name=attr, defaults=(bool | None,)) + elif attr == "_collect_parameters": + import warnings + + depr_message = ( + "The private _collect_parameters function is deprecated and will be" + " removed in a future version of Python. Any use of private functions" + " is discouraged and may break in the future." + ) + warnings.warn(depr_message, category=DeprecationWarning, stacklevel=2) + obj = _collect_type_parameters + else: + raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") + globals()[attr] = obj + return obj diff --git a/Python314_4_x86_Template/Lib/unittest/__init__.py b/Python314_4_x86_Template/Lib/unittest/__init__.py new file mode 100644 index 00000000..78ff6bb4 --- /dev/null +++ b/Python314_4_x86_Template/Lib/unittest/__init__.py @@ -0,0 +1,80 @@ +""" +Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's +Smalltalk testing framework (used with permission). + +This module contains the core framework classes that form the basis of +specific test cases and suites (TestCase, TestSuite etc.), and also a +text-based utility class for running the tests and reporting the results + (TextTestRunner). + +Simple usage: + + import unittest + + class IntegerArithmeticTestCase(unittest.TestCase): + def testAdd(self): # test method names begin with 'test' + self.assertEqual((1 + 2), 3) + self.assertEqual(0 + 1, 1) + def testMultiply(self): + self.assertEqual((0 * 10), 0) + self.assertEqual((5 * 8), 40) + + if __name__ == '__main__': + unittest.main() + +Further information is available in the bundled documentation, and from + + http://docs.python.org/library/unittest.html + +Copyright (c) 1999-2003 Steve Purcell +Copyright (c) 2003 Python Software Foundation +This module is free software, and you may redistribute it and/or modify +it under the same terms as Python itself, so long as this copyright message +and disclaimer are retained in their original form. + +IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, +SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF +THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, +AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, +SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +""" + +__all__ = ['TestResult', 'TestCase', 'IsolatedAsyncioTestCase', 'TestSuite', + 'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main', + 'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless', + 'expectedFailure', 'TextTestResult', 'installHandler', + 'registerResult', 'removeResult', 'removeHandler', + 'addModuleCleanup', 'doModuleCleanups', 'enterModuleContext'] + +__unittest = True + +from .result import TestResult +from .case import (addModuleCleanup, TestCase, FunctionTestCase, SkipTest, skip, + skipIf, skipUnless, expectedFailure, doModuleCleanups, + enterModuleContext) +from .suite import BaseTestSuite, TestSuite # noqa: F401 +from .loader import TestLoader, defaultTestLoader +from .main import TestProgram, main # noqa: F401 +from .runner import TextTestRunner, TextTestResult +from .signals import installHandler, registerResult, removeResult, removeHandler +# IsolatedAsyncioTestCase will be imported lazily. + + +# Lazy import of IsolatedAsyncioTestCase from .async_case +# It imports asyncio, which is relatively heavy, but most tests +# do not need it. + +def __dir__(): + return globals().keys() | {'IsolatedAsyncioTestCase'} + +def __getattr__(name): + if name == 'IsolatedAsyncioTestCase': + global IsolatedAsyncioTestCase + from .async_case import IsolatedAsyncioTestCase + return IsolatedAsyncioTestCase + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/Python313_13_x86_Template/Lib/unittest/__main__.py b/Python314_4_x86_Template/Lib/unittest/__main__.py similarity index 100% rename from Python313_13_x86_Template/Lib/unittest/__main__.py rename to Python314_4_x86_Template/Lib/unittest/__main__.py diff --git a/Python313_13_x86_Template/Lib/unittest/_log.py b/Python314_4_x86_Template/Lib/unittest/_log.py similarity index 100% rename from Python313_13_x86_Template/Lib/unittest/_log.py rename to Python314_4_x86_Template/Lib/unittest/_log.py diff --git a/Python314_4_x86_Template/Lib/unittest/async_case.py b/Python314_4_x86_Template/Lib/unittest/async_case.py new file mode 100644 index 00000000..a1c0d6c3 --- /dev/null +++ b/Python314_4_x86_Template/Lib/unittest/async_case.py @@ -0,0 +1,158 @@ +import asyncio +import contextvars +import inspect +import warnings + +from .case import TestCase + +__unittest = True + +class IsolatedAsyncioTestCase(TestCase): + # Names intentionally have a long prefix + # to reduce a chance of clashing with user-defined attributes + # from inherited test case + # + # The class doesn't call loop.run_until_complete(self.setUp()) and family + # but uses a different approach: + # 1. create a long-running task that reads self.setUp() + # awaitable from queue along with a future + # 2. await the awaitable object passing in and set the result + # into the future object + # 3. Outer code puts the awaitable and the future object into a queue + # with waiting for the future + # The trick is necessary because every run_until_complete() call + # creates a new task with embedded ContextVar context. + # To share contextvars between setUp(), test and tearDown() we need to execute + # them inside the same task. + + # Note: the test case modifies event loop policy if the policy was not instantiated + # yet, unless loop_factory=asyncio.EventLoop is set. + # asyncio.get_event_loop_policy() creates a default policy on demand but never + # returns None + # I believe this is not an issue in user level tests but python itself for testing + # should reset a policy in every test module + # by calling asyncio.set_event_loop_policy(None) in tearDownModule() + # or set loop_factory=asyncio.EventLoop + + loop_factory = None + + def __init__(self, methodName='runTest'): + super().__init__(methodName) + self._asyncioRunner = None + self._asyncioTestContext = contextvars.copy_context() + + async def asyncSetUp(self): + pass + + async def asyncTearDown(self): + pass + + def addAsyncCleanup(self, func, /, *args, **kwargs): + # A trivial trampoline to addCleanup() + # the function exists because it has a different semantics + # and signature: + # addCleanup() accepts regular functions + # but addAsyncCleanup() accepts coroutines + # + # We intentionally don't add inspect.iscoroutinefunction() check + # for func argument because there is no way + # to check for async function reliably: + # 1. It can be "async def func()" itself + # 2. Class can implement "async def __call__()" method + # 3. Regular "def func()" that returns awaitable object + self.addCleanup(*(func, *args), **kwargs) + + async def enterAsyncContext(self, cm): + """Enters the supplied asynchronous context manager. + + If successful, also adds its __aexit__ method as a cleanup + function and returns the result of the __aenter__ method. + """ + # We look up the special methods on the type to match the with + # statement. + cls = type(cm) + try: + enter = cls.__aenter__ + exit = cls.__aexit__ + except AttributeError: + msg = (f"'{cls.__module__}.{cls.__qualname__}' object does " + "not support the asynchronous context manager protocol") + try: + cls.__enter__ + cls.__exit__ + except AttributeError: + pass + else: + msg += (" but it supports the context manager protocol. " + "Did you mean to use enterContext()?") + raise TypeError(msg) from None + result = await enter(cm) + self.addAsyncCleanup(exit, cm, None, None, None) + return result + + def _callSetUp(self): + # Force loop to be initialized and set as the current loop + # so that setUp functions can use get_event_loop() and get the + # correct loop instance. + self._asyncioRunner.get_loop() + self._asyncioTestContext.run(self.setUp) + self._callAsync(self.asyncSetUp) + + def _callTestMethod(self, method): + result = self._callMaybeAsync(method) + if result is not None: + msg = ( + f'It is deprecated to return a value that is not None ' + f'from a test case ({method} returned {type(result).__name__!r})', + ) + warnings.warn(msg, DeprecationWarning, stacklevel=4) + + def _callTearDown(self): + self._callAsync(self.asyncTearDown) + self._asyncioTestContext.run(self.tearDown) + + def _callCleanup(self, function, *args, **kwargs): + self._callMaybeAsync(function, *args, **kwargs) + + def _callAsync(self, func, /, *args, **kwargs): + assert self._asyncioRunner is not None, 'asyncio runner is not initialized' + assert inspect.iscoroutinefunction(func), f'{func!r} is not an async function' + return self._asyncioRunner.run( + func(*args, **kwargs), + context=self._asyncioTestContext + ) + + def _callMaybeAsync(self, func, /, *args, **kwargs): + assert self._asyncioRunner is not None, 'asyncio runner is not initialized' + if inspect.iscoroutinefunction(func): + return self._asyncioRunner.run( + func(*args, **kwargs), + context=self._asyncioTestContext, + ) + else: + return self._asyncioTestContext.run(func, *args, **kwargs) + + def _setupAsyncioRunner(self): + assert self._asyncioRunner is None, 'asyncio runner is already initialized' + runner = asyncio.Runner(debug=True, loop_factory=self.loop_factory) + self._asyncioRunner = runner + + def _tearDownAsyncioRunner(self): + runner = self._asyncioRunner + runner.close() + + def run(self, result=None): + self._setupAsyncioRunner() + try: + return super().run(result) + finally: + self._tearDownAsyncioRunner() + + def debug(self): + self._setupAsyncioRunner() + super().debug() + self._tearDownAsyncioRunner() + + def __del__(self): + if self._asyncioRunner is not None: + self._tearDownAsyncioRunner() diff --git a/Python314_4_x86_Template/Lib/unittest/case.py b/Python314_4_x86_Template/Lib/unittest/case.py new file mode 100644 index 00000000..884fc1b2 --- /dev/null +++ b/Python314_4_x86_Template/Lib/unittest/case.py @@ -0,0 +1,1628 @@ +"""Test case implementation""" + +import sys +import functools +import difflib +import pprint +import re +import warnings +import collections +import contextlib +import traceback +import time +import types + +from . import result +from .util import (strclass, safe_repr, _count_diff_all_purpose, + _count_diff_hashable, _common_shorten_repr) + +__unittest = True + +_subtest_msg_sentinel = object() + +DIFF_OMITTED = ('\nDiff is %s characters long. ' + 'Set self.maxDiff to None to see it.') + +class SkipTest(Exception): + """ + Raise this exception in a test to skip it. + + Usually you can use TestCase.skipTest() or one of the skipping decorators + instead of raising this directly. + """ + +class _ShouldStop(Exception): + """ + The test should stop. + """ + +class _UnexpectedSuccess(Exception): + """ + The test was supposed to fail, but it didn't! + """ + + +class _Outcome(object): + def __init__(self, result=None): + self.expecting_failure = False + self.result = result + self.result_supports_subtests = hasattr(result, "addSubTest") + self.success = True + self.expectedFailure = None + + @contextlib.contextmanager + def testPartExecutor(self, test_case, subTest=False): + old_success = self.success + self.success = True + try: + yield + except KeyboardInterrupt: + raise + except SkipTest as e: + self.success = False + _addSkip(self.result, test_case, str(e)) + except _ShouldStop: + pass + except: + exc_info = sys.exc_info() + if self.expecting_failure: + self.expectedFailure = exc_info + else: + self.success = False + if subTest: + self.result.addSubTest(test_case.test_case, test_case, exc_info) + else: + _addError(self.result, test_case, exc_info) + # explicitly break a reference cycle: + # exc_info -> frame -> exc_info + exc_info = None + else: + if subTest and self.success: + self.result.addSubTest(test_case.test_case, test_case, None) + finally: + self.success = self.success and old_success + + +def _addSkip(result, test_case, reason): + addSkip = getattr(result, 'addSkip', None) + if addSkip is not None: + addSkip(test_case, reason) + else: + warnings.warn("TestResult has no addSkip method, skips not reported", + RuntimeWarning, 2) + result.addSuccess(test_case) + +def _addError(result, test, exc_info): + if result is not None and exc_info is not None: + if issubclass(exc_info[0], test.failureException): + result.addFailure(test, exc_info) + else: + result.addError(test, exc_info) + +def _id(obj): + return obj + + +def _enter_context(cm, addcleanup): + # We look up the special methods on the type to match the with + # statement. + cls = type(cm) + try: + enter = cls.__enter__ + exit = cls.__exit__ + except AttributeError: + msg = (f"'{cls.__module__}.{cls.__qualname__}' object does " + "not support the context manager protocol") + try: + cls.__aenter__ + cls.__aexit__ + except AttributeError: + pass + else: + msg += (" but it supports the asynchronous context manager " + "protocol. Did you mean to use enterAsyncContext()?") + raise TypeError(msg) from None + result = enter(cm) + addcleanup(exit, cm, None, None, None) + return result + + +_module_cleanups = [] +def addModuleCleanup(function, /, *args, **kwargs): + """Same as addCleanup, except the cleanup items are called even if + setUpModule fails (unlike tearDownModule).""" + _module_cleanups.append((function, args, kwargs)) + +def enterModuleContext(cm): + """Same as enterContext, but module-wide.""" + return _enter_context(cm, addModuleCleanup) + + +def doModuleCleanups(): + """Execute all module cleanup functions. Normally called for you after + tearDownModule.""" + exceptions = [] + while _module_cleanups: + function, args, kwargs = _module_cleanups.pop() + try: + function(*args, **kwargs) + except Exception as exc: + exceptions.append(exc) + if exceptions: + # Swallows all but first exception. If a multi-exception handler + # gets written we should use that here instead. + raise exceptions[0] + + +def skip(reason): + """ + Unconditionally skip a test. + """ + def decorator(test_item): + if not isinstance(test_item, type): + @functools.wraps(test_item) + def skip_wrapper(*args, **kwargs): + raise SkipTest(reason) + test_item = skip_wrapper + + test_item.__unittest_skip__ = True + test_item.__unittest_skip_why__ = reason + return test_item + if isinstance(reason, types.FunctionType): + test_item = reason + reason = '' + return decorator(test_item) + return decorator + +def skipIf(condition, reason): + """ + Skip a test if the condition is true. + """ + if condition: + return skip(reason) + return _id + +def skipUnless(condition, reason): + """ + Skip a test unless the condition is true. + """ + if not condition: + return skip(reason) + return _id + +def expectedFailure(test_item): + test_item.__unittest_expecting_failure__ = True + return test_item + +def _is_subtype(expected, basetype): + if isinstance(expected, tuple): + return all(_is_subtype(e, basetype) for e in expected) + return isinstance(expected, type) and issubclass(expected, basetype) + +class _BaseTestCaseContext: + + def __init__(self, test_case): + self.test_case = test_case + + def _raiseFailure(self, standardMsg): + msg = self.test_case._formatMessage(self.msg, standardMsg) + raise self.test_case.failureException(msg) + +class _AssertRaisesBaseContext(_BaseTestCaseContext): + + def __init__(self, expected, test_case, expected_regex=None): + _BaseTestCaseContext.__init__(self, test_case) + self.expected = expected + self.test_case = test_case + if expected_regex is not None: + expected_regex = re.compile(expected_regex) + self.expected_regex = expected_regex + self.obj_name = None + self.msg = None + + def handle(self, name, args, kwargs): + """ + If args is empty, assertRaises/Warns is being used as a + context manager, so check for a 'msg' kwarg and return self. + If args is not empty, call a callable passing positional and keyword + arguments. + """ + try: + if not _is_subtype(self.expected, self._base_type): + raise TypeError('%s() arg 1 must be %s' % + (name, self._base_type_str)) + if not args: + self.msg = kwargs.pop('msg', None) + if kwargs: + raise TypeError('%r is an invalid keyword argument for ' + 'this function' % (next(iter(kwargs)),)) + return self + + callable_obj, *args = args + try: + self.obj_name = callable_obj.__name__ + except AttributeError: + self.obj_name = str(callable_obj) + with self: + callable_obj(*args, **kwargs) + finally: + # bpo-23890: manually break a reference cycle + self = None + + +class _AssertRaisesContext(_AssertRaisesBaseContext): + """A context manager used to implement TestCase.assertRaises* methods.""" + + _base_type = BaseException + _base_type_str = 'an exception type or tuple of exception types' + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + if exc_type is None: + try: + exc_name = self.expected.__name__ + except AttributeError: + exc_name = str(self.expected) + if self.obj_name: + self._raiseFailure("{} not raised by {}".format(exc_name, + self.obj_name)) + else: + self._raiseFailure("{} not raised".format(exc_name)) + else: + traceback.clear_frames(tb) + if not issubclass(exc_type, self.expected): + # let unexpected exceptions pass through + return False + # store exception, without traceback, for later retrieval + self.exception = exc_value.with_traceback(None) + if self.expected_regex is None: + return True + + expected_regex = self.expected_regex + if not expected_regex.search(str(exc_value)): + self._raiseFailure('"{}" does not match "{}"'.format( + expected_regex.pattern, str(exc_value))) + return True + + __class_getitem__ = classmethod(types.GenericAlias) + + +class _AssertWarnsContext(_AssertRaisesBaseContext): + """A context manager used to implement TestCase.assertWarns* methods.""" + + _base_type = Warning + _base_type_str = 'a warning type or tuple of warning types' + + def __enter__(self): + # The __warningregistry__'s need to be in a pristine state for tests + # to work properly. + for v in list(sys.modules.values()): + if getattr(v, '__warningregistry__', None): + v.__warningregistry__ = {} + self.warnings_manager = warnings.catch_warnings(record=True) + self.warnings = self.warnings_manager.__enter__() + warnings.simplefilter("always", self.expected) + return self + + def __exit__(self, exc_type, exc_value, tb): + self.warnings_manager.__exit__(exc_type, exc_value, tb) + if exc_type is not None: + # let unexpected exceptions pass through + return + try: + exc_name = self.expected.__name__ + except AttributeError: + exc_name = str(self.expected) + first_matching = None + for m in self.warnings: + w = m.message + if not isinstance(w, self.expected): + continue + if first_matching is None: + first_matching = w + if (self.expected_regex is not None and + not self.expected_regex.search(str(w))): + continue + # store warning for later retrieval + self.warning = w + self.filename = m.filename + self.lineno = m.lineno + return + # Now we simply try to choose a helpful failure message + if first_matching is not None: + self._raiseFailure('"{}" does not match "{}"'.format( + self.expected_regex.pattern, str(first_matching))) + if self.obj_name: + self._raiseFailure("{} not triggered by {}".format(exc_name, + self.obj_name)) + else: + self._raiseFailure("{} not triggered".format(exc_name)) + + +class _AssertNotWarnsContext(_AssertWarnsContext): + + def __exit__(self, exc_type, exc_value, tb): + self.warnings_manager.__exit__(exc_type, exc_value, tb) + if exc_type is not None: + # let unexpected exceptions pass through + return + try: + exc_name = self.expected.__name__ + except AttributeError: + exc_name = str(self.expected) + for m in self.warnings: + w = m.message + if isinstance(w, self.expected): + self._raiseFailure(f"{exc_name} triggered") + + +class _OrderedChainMap(collections.ChainMap): + def __iter__(self): + seen = set() + for mapping in self.maps: + for k in mapping: + if k not in seen: + seen.add(k) + yield k + + +class TestCase(object): + """A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + """ + + failureException = AssertionError + + longMessage = True + + maxDiff = 80*8 + + # If a string is longer than _diffThreshold, use normal comparison instead + # of difflib. See #11763. + _diffThreshold = 2**16 + + def __init_subclass__(cls, *args, **kwargs): + # Attribute used by TestSuite for classSetUp + cls._classSetupFailed = False + cls._class_cleanups = [] + super().__init_subclass__(*args, **kwargs) + + def __init__(self, methodName='runTest'): + """Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + """ + self._testMethodName = methodName + self._outcome = None + self._testMethodDoc = 'No test' + try: + testMethod = getattr(self, methodName) + except AttributeError: + if methodName != 'runTest': + # we allow instantiation with no explicit method name + # but not an *incorrect* or missing method name + raise ValueError("no such test method in %s: %s" % + (self.__class__, methodName)) + else: + self._testMethodDoc = testMethod.__doc__ + self._cleanups = [] + self._subtest = None + + # Map types to custom assertEqual functions that will compare + # instances of said type in more detail to generate a more useful + # error message. + self._type_equality_funcs = {} + self.addTypeEqualityFunc(dict, 'assertDictEqual') + self.addTypeEqualityFunc(list, 'assertListEqual') + self.addTypeEqualityFunc(tuple, 'assertTupleEqual') + self.addTypeEqualityFunc(set, 'assertSetEqual') + self.addTypeEqualityFunc(frozenset, 'assertSetEqual') + self.addTypeEqualityFunc(str, 'assertMultiLineEqual') + + def addTypeEqualityFunc(self, typeobj, function): + """Add a type specific assertEqual style function to compare a type. + + This method is for use by TestCase subclasses that need to register + their own type equality functions to provide nicer error messages. + + Args: + typeobj: The data type to call this function on when both values + are of the same type in assertEqual(). + function: The callable taking two arguments and an optional + msg= argument that raises self.failureException with a + useful error message when the two arguments are not equal. + """ + self._type_equality_funcs[typeobj] = function + + def addCleanup(self, function, /, *args, **kwargs): + """Add a function, with arguments, to be called when the test is + completed. Functions added are called on a LIFO basis and are + called after tearDown on test failure or success. + + Cleanup items are called even if setUp fails (unlike tearDown).""" + self._cleanups.append((function, args, kwargs)) + + def enterContext(self, cm): + """Enters the supplied context manager. + + If successful, also adds its __exit__ method as a cleanup + function and returns the result of the __enter__ method. + """ + return _enter_context(cm, self.addCleanup) + + @classmethod + def addClassCleanup(cls, function, /, *args, **kwargs): + """Same as addCleanup, except the cleanup items are called even if + setUpClass fails (unlike tearDownClass).""" + cls._class_cleanups.append((function, args, kwargs)) + + @classmethod + def enterClassContext(cls, cm): + """Same as enterContext, but class-wide.""" + return _enter_context(cm, cls.addClassCleanup) + + def setUp(self): + "Hook method for setting up the test fixture before exercising it." + pass + + def tearDown(self): + "Hook method for deconstructing the test fixture after testing it." + pass + + @classmethod + def setUpClass(cls): + "Hook method for setting up class fixture before running tests in the class." + + @classmethod + def tearDownClass(cls): + "Hook method for deconstructing the class fixture after running all tests in the class." + + def countTestCases(self): + return 1 + + def defaultTestResult(self): + return result.TestResult() + + def shortDescription(self): + """Returns a one-line description of the test, or None if no + description has been provided. + + The default implementation of this method returns the first line of + the specified test method's docstring. + """ + doc = self._testMethodDoc + return doc.strip().split("\n")[0].strip() if doc else None + + + def id(self): + return "%s.%s" % (strclass(self.__class__), self._testMethodName) + + def __eq__(self, other): + if type(self) is not type(other): + return NotImplemented + + return self._testMethodName == other._testMethodName + + def __hash__(self): + return hash((type(self), self._testMethodName)) + + def __str__(self): + return "%s (%s.%s)" % (self._testMethodName, strclass(self.__class__), self._testMethodName) + + def __repr__(self): + return "<%s testMethod=%s>" % \ + (strclass(self.__class__), self._testMethodName) + + @contextlib.contextmanager + def subTest(self, msg=_subtest_msg_sentinel, **params): + """Return a context manager that will return the enclosed block + of code in a subtest identified by the optional message and + keyword parameters. A failure in the subtest marks the test + case as failed but resumes execution at the end of the enclosed + block, allowing further test code to be executed. + """ + if self._outcome is None or not self._outcome.result_supports_subtests: + yield + return + parent = self._subtest + if parent is None: + params_map = _OrderedChainMap(params) + else: + params_map = parent.params.new_child(params) + self._subtest = _SubTest(self, msg, params_map) + try: + with self._outcome.testPartExecutor(self._subtest, subTest=True): + yield + if not self._outcome.success: + result = self._outcome.result + if result is not None and result.failfast: + raise _ShouldStop + elif self._outcome.expectedFailure: + # If the test is expecting a failure, we really want to + # stop now and register the expected failure. + raise _ShouldStop + finally: + self._subtest = parent + + def _addExpectedFailure(self, result, exc_info): + try: + addExpectedFailure = result.addExpectedFailure + except AttributeError: + warnings.warn("TestResult has no addExpectedFailure method, reporting as passes", + RuntimeWarning) + result.addSuccess(self) + else: + addExpectedFailure(self, exc_info) + + def _addUnexpectedSuccess(self, result): + try: + addUnexpectedSuccess = result.addUnexpectedSuccess + except AttributeError: + warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failure", + RuntimeWarning) + # We need to pass an actual exception and traceback to addFailure, + # otherwise the legacy result can choke. + try: + raise _UnexpectedSuccess from None + except _UnexpectedSuccess: + result.addFailure(self, sys.exc_info()) + else: + addUnexpectedSuccess(self) + + def _addDuration(self, result, elapsed): + try: + addDuration = result.addDuration + except AttributeError: + warnings.warn("TestResult has no addDuration method", + RuntimeWarning) + else: + addDuration(self, elapsed) + + def _callSetUp(self): + self.setUp() + + def _callTestMethod(self, method): + result = method() + if result is not None: + import inspect + msg = ( + f'It is deprecated to return a value that is not None ' + f'from a test case ({method} returned {type(result).__name__!r})' + ) + if inspect.iscoroutine(result): + msg += ( + '. Maybe you forgot to use IsolatedAsyncioTestCase as the base class?' + ) + warnings.warn(msg, DeprecationWarning, stacklevel=3) + + def _callTearDown(self): + self.tearDown() + + def _callCleanup(self, function, /, *args, **kwargs): + function(*args, **kwargs) + + def run(self, result=None): + if result is None: + result = self.defaultTestResult() + startTestRun = getattr(result, 'startTestRun', None) + stopTestRun = getattr(result, 'stopTestRun', None) + if startTestRun is not None: + startTestRun() + else: + stopTestRun = None + + result.startTest(self) + try: + testMethod = getattr(self, self._testMethodName) + if (getattr(self.__class__, "__unittest_skip__", False) or + getattr(testMethod, "__unittest_skip__", False)): + # If the class or method was skipped. + skip_why = (getattr(self.__class__, '__unittest_skip_why__', '') + or getattr(testMethod, '__unittest_skip_why__', '')) + _addSkip(result, self, skip_why) + return result + + expecting_failure = ( + getattr(self, "__unittest_expecting_failure__", False) or + getattr(testMethod, "__unittest_expecting_failure__", False) + ) + outcome = _Outcome(result) + start_time = time.perf_counter() + try: + self._outcome = outcome + + with outcome.testPartExecutor(self): + self._callSetUp() + if outcome.success: + outcome.expecting_failure = expecting_failure + with outcome.testPartExecutor(self): + self._callTestMethod(testMethod) + outcome.expecting_failure = False + with outcome.testPartExecutor(self): + self._callTearDown() + self.doCleanups() + self._addDuration(result, (time.perf_counter() - start_time)) + + if outcome.success: + if expecting_failure: + if outcome.expectedFailure: + self._addExpectedFailure(result, outcome.expectedFailure) + else: + self._addUnexpectedSuccess(result) + else: + result.addSuccess(self) + return result + finally: + # explicitly break reference cycle: + # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure + outcome.expectedFailure = None + outcome = None + + # clear the outcome, no more needed + self._outcome = None + + finally: + result.stopTest(self) + if stopTestRun is not None: + stopTestRun() + + def doCleanups(self): + """Execute all cleanup functions. Normally called for you after + tearDown.""" + outcome = self._outcome or _Outcome() + while self._cleanups: + function, args, kwargs = self._cleanups.pop() + with outcome.testPartExecutor(self): + self._callCleanup(function, *args, **kwargs) + + # return this for backwards compatibility + # even though we no longer use it internally + return outcome.success + + @classmethod + def doClassCleanups(cls): + """Execute all class cleanup functions. Normally called for you after + tearDownClass.""" + cls.tearDown_exceptions = [] + while cls._class_cleanups: + function, args, kwargs = cls._class_cleanups.pop() + try: + function(*args, **kwargs) + except Exception: + cls.tearDown_exceptions.append(sys.exc_info()) + + def __call__(self, *args, **kwds): + return self.run(*args, **kwds) + + def debug(self): + """Run the test without collecting errors in a TestResult""" + testMethod = getattr(self, self._testMethodName) + if (getattr(self.__class__, "__unittest_skip__", False) or + getattr(testMethod, "__unittest_skip__", False)): + # If the class or method was skipped. + skip_why = (getattr(self.__class__, '__unittest_skip_why__', '') + or getattr(testMethod, '__unittest_skip_why__', '')) + raise SkipTest(skip_why) + + self._callSetUp() + self._callTestMethod(testMethod) + self._callTearDown() + while self._cleanups: + function, args, kwargs = self._cleanups.pop() + self._callCleanup(function, *args, **kwargs) + + def skipTest(self, reason): + """Skip this test.""" + raise SkipTest(reason) + + def fail(self, msg=None): + """Fail immediately, with the given message.""" + raise self.failureException(msg) + + def assertFalse(self, expr, msg=None): + """Check that the expression is false.""" + if expr: + msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr)) + raise self.failureException(msg) + + def assertTrue(self, expr, msg=None): + """Check that the expression is true.""" + if not expr: + msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr)) + raise self.failureException(msg) + + def _formatMessage(self, msg, standardMsg): + """Honour the longMessage attribute when generating failure messages. + If longMessage is False this means: + * Use only an explicit message if it is provided + * Otherwise use the standard message for the assert + + If longMessage is True: + * Use the standard message + * If an explicit message is provided, plus ' : ' and the explicit message + """ + if not self.longMessage: + return msg or standardMsg + if msg is None: + return standardMsg + try: + # don't switch to '{}' formatting in Python 2.X + # it changes the way unicode input is handled + return '%s : %s' % (standardMsg, msg) + except UnicodeDecodeError: + return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg)) + + def assertRaises(self, expected_exception, *args, **kwargs): + """Fail unless an exception of class expected_exception is raised + by the callable when invoked with specified positional and + keyword arguments. If a different type of exception is + raised, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + If called with the callable and arguments omitted, will return a + context object used like this:: + + with self.assertRaises(SomeException): + do_something() + + An optional keyword argument 'msg' can be provided when assertRaises + is used as a context object. + + The context manager keeps a reference to the exception as + the 'exception' attribute. This allows you to inspect the + exception after the assertion:: + + with self.assertRaises(SomeException) as cm: + do_something() + the_exception = cm.exception + self.assertEqual(the_exception.error_code, 3) + """ + context = _AssertRaisesContext(expected_exception, self) + try: + return context.handle('assertRaises', args, kwargs) + finally: + # bpo-23890: manually break a reference cycle + context = None + + def assertWarns(self, expected_warning, *args, **kwargs): + """Fail unless a warning of class warnClass is triggered + by the callable when invoked with specified positional and + keyword arguments. If a different type of warning is + triggered, it will not be handled: depending on the other + warning filtering rules in effect, it might be silenced, printed + out, or raised as an exception. + + If called with the callable and arguments omitted, will return a + context object used like this:: + + with self.assertWarns(SomeWarning): + do_something() + + An optional keyword argument 'msg' can be provided when assertWarns + is used as a context object. + + The context manager keeps a reference to the first matching + warning as the 'warning' attribute; similarly, the 'filename' + and 'lineno' attributes give you information about the line + of Python code from which the warning was triggered. + This allows you to inspect the warning after the assertion:: + + with self.assertWarns(SomeWarning) as cm: + do_something() + the_warning = cm.warning + self.assertEqual(the_warning.some_attribute, 147) + """ + context = _AssertWarnsContext(expected_warning, self) + return context.handle('assertWarns', args, kwargs) + + def _assertNotWarns(self, expected_warning, *args, **kwargs): + """The opposite of assertWarns. Private due to low demand.""" + context = _AssertNotWarnsContext(expected_warning, self) + return context.handle('_assertNotWarns', args, kwargs) + + def assertLogs(self, logger=None, level=None): + """Fail unless a log message of level *level* or higher is emitted + on *logger_name* or its children. If omitted, *level* defaults to + INFO and *logger* defaults to the root logger. + + This method must be used as a context manager, and will yield + a recording object with two attributes: `output` and `records`. + At the end of the context manager, the `output` attribute will + be a list of the matching formatted log messages and the + `records` attribute will be a list of the corresponding LogRecord + objects. + + Example:: + + with self.assertLogs('foo', level='INFO') as cm: + logging.getLogger('foo').info('first message') + logging.getLogger('foo.bar').error('second message') + self.assertEqual(cm.output, ['INFO:foo:first message', + 'ERROR:foo.bar:second message']) + """ + # Lazy import to avoid importing logging if it is not needed. + from ._log import _AssertLogsContext + return _AssertLogsContext(self, logger, level, no_logs=False) + + def assertNoLogs(self, logger=None, level=None): + """ Fail unless no log messages of level *level* or higher are emitted + on *logger_name* or its children. + + This method must be used as a context manager. + """ + from ._log import _AssertLogsContext + return _AssertLogsContext(self, logger, level, no_logs=True) + + def _getAssertEqualityFunc(self, first, second): + """Get a detailed comparison function for the types of the two args. + + Returns: A callable accepting (first, second, msg=None) that will + raise a failure exception if first != second with a useful human + readable error message for those types. + """ + # + # NOTE(gregory.p.smith): I considered isinstance(first, type(second)) + # and vice versa. I opted for the conservative approach in case + # subclasses are not intended to be compared in detail to their super + # class instances using a type equality func. This means testing + # subtypes won't automagically use the detailed comparison. Callers + # should use their type specific assertSpamEqual method to compare + # subclasses if the detailed comparison is desired and appropriate. + # See the discussion in http://bugs.python.org/issue2578. + # + if type(first) is type(second): + asserter = self._type_equality_funcs.get(type(first)) + if asserter is not None: + if isinstance(asserter, str): + asserter = getattr(self, asserter) + return asserter + + return self._baseAssertEqual + + def _baseAssertEqual(self, first, second, msg=None): + """The default assertEqual implementation, not type specific.""" + if not first == second: + standardMsg = '%s != %s' % _common_shorten_repr(first, second) + msg = self._formatMessage(msg, standardMsg) + raise self.failureException(msg) + + def assertEqual(self, first, second, msg=None): + """Fail if the two objects are unequal as determined by the '==' + operator. + """ + assertion_func = self._getAssertEqualityFunc(first, second) + assertion_func(first, second, msg=msg) + + def assertNotEqual(self, first, second, msg=None): + """Fail if the two objects are equal as determined by the '!=' + operator. + """ + if not first != second: + msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first), + safe_repr(second))) + raise self.failureException(msg) + + def assertAlmostEqual(self, first, second, places=None, msg=None, + delta=None): + """Fail if the two objects are unequal as determined by their + difference rounded to the given number of decimal places + (default 7) and comparing to zero, or by comparing that the + difference between the two objects is more than the given + delta. + + Note that decimal places (from zero) are usually not the same + as significant digits (measured from the most significant digit). + + If the two objects compare equal then they will automatically + compare almost equal. + """ + if first == second: + # shortcut + return + if delta is not None and places is not None: + raise TypeError("specify delta or places not both") + + diff = abs(first - second) + if delta is not None: + if diff <= delta: + return + + standardMsg = '%s != %s within %s delta (%s difference)' % ( + safe_repr(first), + safe_repr(second), + safe_repr(delta), + safe_repr(diff)) + else: + if places is None: + places = 7 + + if round(diff, places) == 0: + return + + standardMsg = '%s != %s within %r places (%s difference)' % ( + safe_repr(first), + safe_repr(second), + places, + safe_repr(diff)) + msg = self._formatMessage(msg, standardMsg) + raise self.failureException(msg) + + def assertNotAlmostEqual(self, first, second, places=None, msg=None, + delta=None): + """Fail if the two objects are equal as determined by their + difference rounded to the given number of decimal places + (default 7) and comparing to zero, or by comparing that the + difference between the two objects is less than the given delta. + + Note that decimal places (from zero) are usually not the same + as significant digits (measured from the most significant digit). + + Objects that are equal automatically fail. + """ + if delta is not None and places is not None: + raise TypeError("specify delta or places not both") + diff = abs(first - second) + if delta is not None: + if not (first == second) and diff > delta: + return + standardMsg = '%s == %s within %s delta (%s difference)' % ( + safe_repr(first), + safe_repr(second), + safe_repr(delta), + safe_repr(diff)) + else: + if places is None: + places = 7 + if not (first == second) and round(diff, places) != 0: + return + standardMsg = '%s == %s within %r places' % (safe_repr(first), + safe_repr(second), + places) + + msg = self._formatMessage(msg, standardMsg) + raise self.failureException(msg) + + def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None): + """An equality assertion for ordered sequences (like lists and tuples). + + For the purposes of this function, a valid ordered sequence type is one + which can be indexed, has a length, and has an equality operator. + + Args: + seq1: The first sequence to compare. + seq2: The second sequence to compare. + seq_type: The expected datatype of the sequences, or None if no + datatype should be enforced. + msg: Optional message to use on failure instead of a list of + differences. + """ + if seq_type is not None: + seq_type_name = seq_type.__name__ + if not isinstance(seq1, seq_type): + raise self.failureException('First sequence is not a %s: %s' + % (seq_type_name, safe_repr(seq1))) + if not isinstance(seq2, seq_type): + raise self.failureException('Second sequence is not a %s: %s' + % (seq_type_name, safe_repr(seq2))) + else: + seq_type_name = "sequence" + + differing = None + try: + len1 = len(seq1) + except (TypeError, NotImplementedError): + differing = 'First %s has no length. Non-sequence?' % ( + seq_type_name) + + if differing is None: + try: + len2 = len(seq2) + except (TypeError, NotImplementedError): + differing = 'Second %s has no length. Non-sequence?' % ( + seq_type_name) + + if differing is None: + if seq1 == seq2: + return + + differing = '%ss differ: %s != %s\n' % ( + (seq_type_name.capitalize(),) + + _common_shorten_repr(seq1, seq2)) + + for i in range(min(len1, len2)): + try: + item1 = seq1[i] + except (TypeError, IndexError, NotImplementedError): + differing += ('\nUnable to index element %d of first %s\n' % + (i, seq_type_name)) + break + + try: + item2 = seq2[i] + except (TypeError, IndexError, NotImplementedError): + differing += ('\nUnable to index element %d of second %s\n' % + (i, seq_type_name)) + break + + if item1 != item2: + differing += ('\nFirst differing element %d:\n%s\n%s\n' % + ((i,) + _common_shorten_repr(item1, item2))) + break + else: + if (len1 == len2 and seq_type is None and + type(seq1) != type(seq2)): + # The sequences are the same, but have differing types. + return + + if len1 > len2: + differing += ('\nFirst %s contains %d additional ' + 'elements.\n' % (seq_type_name, len1 - len2)) + try: + differing += ('First extra element %d:\n%s\n' % + (len2, safe_repr(seq1[len2]))) + except (TypeError, IndexError, NotImplementedError): + differing += ('Unable to index element %d ' + 'of first %s\n' % (len2, seq_type_name)) + elif len1 < len2: + differing += ('\nSecond %s contains %d additional ' + 'elements.\n' % (seq_type_name, len2 - len1)) + try: + differing += ('First extra element %d:\n%s\n' % + (len1, safe_repr(seq2[len1]))) + except (TypeError, IndexError, NotImplementedError): + differing += ('Unable to index element %d ' + 'of second %s\n' % (len1, seq_type_name)) + standardMsg = differing + diffMsg = '\n' + '\n'.join( + difflib.ndiff(pprint.pformat(seq1).splitlines(), + pprint.pformat(seq2).splitlines())) + + standardMsg = self._truncateMessage(standardMsg, diffMsg) + msg = self._formatMessage(msg, standardMsg) + self.fail(msg) + + def _truncateMessage(self, message, diff): + max_diff = self.maxDiff + if max_diff is None or len(diff) <= max_diff: + return message + diff + return message + (DIFF_OMITTED % len(diff)) + + def assertListEqual(self, list1, list2, msg=None): + """A list-specific equality assertion. + + Args: + list1: The first list to compare. + list2: The second list to compare. + msg: Optional message to use on failure instead of a list of + differences. + + """ + self.assertSequenceEqual(list1, list2, msg, seq_type=list) + + def assertTupleEqual(self, tuple1, tuple2, msg=None): + """A tuple-specific equality assertion. + + Args: + tuple1: The first tuple to compare. + tuple2: The second tuple to compare. + msg: Optional message to use on failure instead of a list of + differences. + """ + self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple) + + def assertSetEqual(self, set1, set2, msg=None): + """A set-specific equality assertion. + + Args: + set1: The first set to compare. + set2: The second set to compare. + msg: Optional message to use on failure instead of a list of + differences. + + assertSetEqual uses ducktyping to support different types of sets, and + is optimized for sets specifically (parameters must support a + difference method). + """ + try: + difference1 = set1.difference(set2) + except TypeError as e: + self.fail('invalid type when attempting set difference: %s' % e) + except AttributeError as e: + self.fail('first argument does not support set difference: %s' % e) + + try: + difference2 = set2.difference(set1) + except TypeError as e: + self.fail('invalid type when attempting set difference: %s' % e) + except AttributeError as e: + self.fail('second argument does not support set difference: %s' % e) + + if not (difference1 or difference2): + return + + lines = [] + if difference1: + lines.append('Items in the first set but not the second:') + for item in difference1: + lines.append(repr(item)) + if difference2: + lines.append('Items in the second set but not the first:') + for item in difference2: + lines.append(repr(item)) + + standardMsg = '\n'.join(lines) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertIn(self, member, container, msg=None): + """Just like self.assertTrue(a in b), but with a nicer default message.""" + if member not in container: + standardMsg = '%s not found in %s' % (safe_repr(member), + safe_repr(container)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertNotIn(self, member, container, msg=None): + """Just like self.assertTrue(a not in b), but with a nicer default message.""" + if member in container: + standardMsg = '%s unexpectedly found in %s' % (safe_repr(member), + safe_repr(container)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertIs(self, expr1, expr2, msg=None): + """Just like self.assertTrue(a is b), but with a nicer default message.""" + if expr1 is not expr2: + standardMsg = '%s is not %s' % (safe_repr(expr1), + safe_repr(expr2)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertIsNot(self, expr1, expr2, msg=None): + """Just like self.assertTrue(a is not b), but with a nicer default message.""" + if expr1 is expr2: + standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertDictEqual(self, d1, d2, msg=None): + self.assertIsInstance(d1, dict, 'First argument is not a dictionary') + self.assertIsInstance(d2, dict, 'Second argument is not a dictionary') + + if d1 != d2: + standardMsg = '%s != %s' % _common_shorten_repr(d1, d2) + diff = ('\n' + '\n'.join(difflib.ndiff( + pprint.pformat(d1).splitlines(), + pprint.pformat(d2).splitlines()))) + standardMsg = self._truncateMessage(standardMsg, diff) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertCountEqual(self, first, second, msg=None): + """Asserts that two iterables have the same elements, the same number of + times, without regard to order. + + self.assertEqual(Counter(list(first)), + Counter(list(second))) + + Example: + - [0, 1, 1] and [1, 0, 1] compare equal. + - [0, 0, 1] and [0, 1] compare unequal. + + """ + first_seq, second_seq = list(first), list(second) + try: + first = collections.Counter(first_seq) + second = collections.Counter(second_seq) + except TypeError: + # Handle case with unhashable elements + differences = _count_diff_all_purpose(first_seq, second_seq) + else: + if first == second: + return + differences = _count_diff_hashable(first_seq, second_seq) + + if differences: + standardMsg = 'Element counts were not equal:\n' + lines = ['First has %d, Second has %d: %r' % diff for diff in differences] + diffMsg = '\n'.join(lines) + standardMsg = self._truncateMessage(standardMsg, diffMsg) + msg = self._formatMessage(msg, standardMsg) + self.fail(msg) + + def assertMultiLineEqual(self, first, second, msg=None): + """Assert that two multi-line strings are equal.""" + self.assertIsInstance(first, str, "First argument is not a string") + self.assertIsInstance(second, str, "Second argument is not a string") + + if first != second: + # Don't use difflib if the strings are too long + if (len(first) > self._diffThreshold or + len(second) > self._diffThreshold): + self._baseAssertEqual(first, second, msg) + + # Append \n to both strings if either is missing the \n. + # This allows the final ndiff to show the \n difference. The + # exception here is if the string is empty, in which case no + # \n should be added + first_presplit = first + second_presplit = second + if first and second: + if first[-1] != '\n' or second[-1] != '\n': + first_presplit += '\n' + second_presplit += '\n' + elif second and second[-1] != '\n': + second_presplit += '\n' + elif first and first[-1] != '\n': + first_presplit += '\n' + + firstlines = first_presplit.splitlines(keepends=True) + secondlines = second_presplit.splitlines(keepends=True) + + # Generate the message and diff, then raise the exception + standardMsg = '%s != %s' % _common_shorten_repr(first, second) + diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines)) + standardMsg = self._truncateMessage(standardMsg, diff) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertLess(self, a, b, msg=None): + """Just like self.assertTrue(a < b), but with a nicer default message.""" + if not a < b: + standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertLessEqual(self, a, b, msg=None): + """Just like self.assertTrue(a <= b), but with a nicer default message.""" + if not a <= b: + standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertGreater(self, a, b, msg=None): + """Just like self.assertTrue(a > b), but with a nicer default message.""" + if not a > b: + standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertGreaterEqual(self, a, b, msg=None): + """Just like self.assertTrue(a >= b), but with a nicer default message.""" + if not a >= b: + standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertIsNone(self, obj, msg=None): + """Same as self.assertTrue(obj is None), with a nicer default message.""" + if obj is not None: + standardMsg = '%s is not None' % (safe_repr(obj),) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertIsNotNone(self, obj, msg=None): + """Included for symmetry with assertIsNone.""" + if obj is None: + standardMsg = 'unexpectedly None' + self.fail(self._formatMessage(msg, standardMsg)) + + def assertIsInstance(self, obj, cls, msg=None): + """Same as self.assertTrue(isinstance(obj, cls)), with a nicer + default message.""" + if not isinstance(obj, cls): + if isinstance(cls, tuple): + standardMsg = f'{safe_repr(obj)} is not an instance of any of {cls!r}' + else: + standardMsg = f'{safe_repr(obj)} is not an instance of {cls!r}' + self.fail(self._formatMessage(msg, standardMsg)) + + def assertNotIsInstance(self, obj, cls, msg=None): + """Included for symmetry with assertIsInstance.""" + if isinstance(obj, cls): + if isinstance(cls, tuple): + for x in cls: + if isinstance(obj, x): + cls = x + break + standardMsg = f'{safe_repr(obj)} is an instance of {cls!r}' + self.fail(self._formatMessage(msg, standardMsg)) + + def assertIsSubclass(self, cls, superclass, msg=None): + try: + if issubclass(cls, superclass): + return + except TypeError: + if not isinstance(cls, type): + self.fail(self._formatMessage(msg, f'{cls!r} is not a class')) + raise + if isinstance(superclass, tuple): + standardMsg = f'{cls!r} is not a subclass of any of {superclass!r}' + else: + standardMsg = f'{cls!r} is not a subclass of {superclass!r}' + self.fail(self._formatMessage(msg, standardMsg)) + + def assertNotIsSubclass(self, cls, superclass, msg=None): + try: + if not issubclass(cls, superclass): + return + except TypeError: + if not isinstance(cls, type): + self.fail(self._formatMessage(msg, f'{cls!r} is not a class')) + raise + if isinstance(superclass, tuple): + for x in superclass: + if issubclass(cls, x): + superclass = x + break + standardMsg = f'{cls!r} is a subclass of {superclass!r}' + self.fail(self._formatMessage(msg, standardMsg)) + + def assertHasAttr(self, obj, name, msg=None): + if not hasattr(obj, name): + if isinstance(obj, types.ModuleType): + standardMsg = f'module {obj.__name__!r} has no attribute {name!r}' + elif isinstance(obj, type): + standardMsg = f'type object {obj.__name__!r} has no attribute {name!r}' + else: + standardMsg = f'{type(obj).__name__!r} object has no attribute {name!r}' + self.fail(self._formatMessage(msg, standardMsg)) + + def assertNotHasAttr(self, obj, name, msg=None): + if hasattr(obj, name): + if isinstance(obj, types.ModuleType): + standardMsg = f'module {obj.__name__!r} has unexpected attribute {name!r}' + elif isinstance(obj, type): + standardMsg = f'type object {obj.__name__!r} has unexpected attribute {name!r}' + else: + standardMsg = f'{type(obj).__name__!r} object has unexpected attribute {name!r}' + self.fail(self._formatMessage(msg, standardMsg)) + + def assertRaisesRegex(self, expected_exception, expected_regex, + *args, **kwargs): + """Asserts that the message in a raised exception matches a regex. + + Args: + expected_exception: Exception class expected to be raised. + expected_regex: Regex (re.Pattern object or string) expected + to be found in error message. + args: Function to be called and extra positional args. + kwargs: Extra kwargs. + msg: Optional message used in case of failure. Can only be used + when assertRaisesRegex is used as a context manager. + """ + context = _AssertRaisesContext(expected_exception, self, expected_regex) + return context.handle('assertRaisesRegex', args, kwargs) + + def assertWarnsRegex(self, expected_warning, expected_regex, + *args, **kwargs): + """Asserts that the message in a triggered warning matches a regexp. + Basic functioning is similar to assertWarns() with the addition + that only warnings whose messages also match the regular expression + are considered successful matches. + + Args: + expected_warning: Warning class expected to be triggered. + expected_regex: Regex (re.Pattern object or string) expected + to be found in error message. + args: Function to be called and extra positional args. + kwargs: Extra kwargs. + msg: Optional message used in case of failure. Can only be used + when assertWarnsRegex is used as a context manager. + """ + context = _AssertWarnsContext(expected_warning, self, expected_regex) + return context.handle('assertWarnsRegex', args, kwargs) + + def assertRegex(self, text, expected_regex, msg=None): + """Fail the test unless the text matches the regular expression.""" + if isinstance(expected_regex, (str, bytes)): + assert expected_regex, "expected_regex must not be empty." + expected_regex = re.compile(expected_regex) + if not expected_regex.search(text): + standardMsg = "Regex didn't match: %r not found in %r" % ( + expected_regex.pattern, text) + # _formatMessage ensures the longMessage option is respected + msg = self._formatMessage(msg, standardMsg) + raise self.failureException(msg) + + def assertNotRegex(self, text, unexpected_regex, msg=None): + """Fail the test if the text matches the regular expression.""" + if isinstance(unexpected_regex, (str, bytes)): + unexpected_regex = re.compile(unexpected_regex) + match = unexpected_regex.search(text) + if match: + standardMsg = 'Regex matched: %r matches %r in %r' % ( + text[match.start() : match.end()], + unexpected_regex.pattern, + text) + # _formatMessage ensures the longMessage option is respected + msg = self._formatMessage(msg, standardMsg) + raise self.failureException(msg) + + def _tail_type_check(self, s, tails, msg): + if not isinstance(tails, tuple): + tails = (tails,) + for tail in tails: + if isinstance(tail, str): + if not isinstance(s, str): + self.fail(self._formatMessage(msg, + f'Expected str, not {type(s).__name__}')) + elif isinstance(tail, (bytes, bytearray)): + if not isinstance(s, (bytes, bytearray)): + self.fail(self._formatMessage(msg, + f'Expected bytes, not {type(s).__name__}')) + + def assertStartsWith(self, s, prefix, msg=None): + try: + if s.startswith(prefix): + return + except (AttributeError, TypeError): + self._tail_type_check(s, prefix, msg) + raise + a = safe_repr(s, short=True) + b = safe_repr(prefix) + if isinstance(prefix, tuple): + standardMsg = f"{a} doesn't start with any of {b}" + else: + standardMsg = f"{a} doesn't start with {b}" + self.fail(self._formatMessage(msg, standardMsg)) + + def assertNotStartsWith(self, s, prefix, msg=None): + try: + if not s.startswith(prefix): + return + except (AttributeError, TypeError): + self._tail_type_check(s, prefix, msg) + raise + if isinstance(prefix, tuple): + for x in prefix: + if s.startswith(x): + prefix = x + break + a = safe_repr(s, short=True) + b = safe_repr(prefix) + self.fail(self._formatMessage(msg, f"{a} starts with {b}")) + + def assertEndsWith(self, s, suffix, msg=None): + try: + if s.endswith(suffix): + return + except (AttributeError, TypeError): + self._tail_type_check(s, suffix, msg) + raise + a = safe_repr(s, short=True) + b = safe_repr(suffix) + if isinstance(suffix, tuple): + standardMsg = f"{a} doesn't end with any of {b}" + else: + standardMsg = f"{a} doesn't end with {b}" + self.fail(self._formatMessage(msg, standardMsg)) + + def assertNotEndsWith(self, s, suffix, msg=None): + try: + if not s.endswith(suffix): + return + except (AttributeError, TypeError): + self._tail_type_check(s, suffix, msg) + raise + if isinstance(suffix, tuple): + for x in suffix: + if s.endswith(x): + suffix = x + break + a = safe_repr(s, short=True) + b = safe_repr(suffix) + self.fail(self._formatMessage(msg, f"{a} ends with {b}")) + + +class FunctionTestCase(TestCase): + """A test case that wraps a test function. + + This is useful for slipping pre-existing test functions into the + unittest framework. Optionally, set-up and tidy-up functions can be + supplied. As with TestCase, the tidy-up ('tearDown') function will + always be called if the set-up ('setUp') function ran successfully. + """ + + def __init__(self, testFunc, setUp=None, tearDown=None, description=None): + super(FunctionTestCase, self).__init__() + self._setUpFunc = setUp + self._tearDownFunc = tearDown + self._testFunc = testFunc + self._description = description + + def setUp(self): + if self._setUpFunc is not None: + self._setUpFunc() + + def tearDown(self): + if self._tearDownFunc is not None: + self._tearDownFunc() + + def runTest(self): + self._testFunc() + + def id(self): + return self._testFunc.__name__ + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + + return self._setUpFunc == other._setUpFunc and \ + self._tearDownFunc == other._tearDownFunc and \ + self._testFunc == other._testFunc and \ + self._description == other._description + + def __hash__(self): + return hash((type(self), self._setUpFunc, self._tearDownFunc, + self._testFunc, self._description)) + + def __str__(self): + return "%s (%s)" % (strclass(self.__class__), + self._testFunc.__name__) + + def __repr__(self): + return "<%s tec=%s>" % (strclass(self.__class__), + self._testFunc) + + def shortDescription(self): + if self._description is not None: + return self._description + doc = self._testFunc.__doc__ + return doc and doc.split("\n")[0].strip() or None + + +class _SubTest(TestCase): + + def __init__(self, test_case, message, params): + super().__init__() + self._message = message + self.test_case = test_case + self.params = params + self.failureException = test_case.failureException + + def runTest(self): + raise NotImplementedError("subtests cannot be run directly") + + def _subDescription(self): + parts = [] + if self._message is not _subtest_msg_sentinel: + parts.append("[{}]".format(self._message)) + if self.params: + params_desc = ', '.join( + "{}={!r}".format(k, v) + for (k, v) in self.params.items()) + parts.append("({})".format(params_desc)) + return " ".join(parts) or '()' + + def id(self): + return "{} {}".format(self.test_case.id(), self._subDescription()) + + def shortDescription(self): + """Returns a one-line description of the subtest, or None if no + description has been provided. + """ + return self.test_case.shortDescription() + + def __str__(self): + return "{} {}".format(self.test_case, self._subDescription()) diff --git a/Python314_4_x86_Template/Lib/unittest/loader.py b/Python314_4_x86_Template/Lib/unittest/loader.py new file mode 100644 index 00000000..a52950da --- /dev/null +++ b/Python314_4_x86_Template/Lib/unittest/loader.py @@ -0,0 +1,484 @@ +"""Loading unittests.""" + +import os +import re +import sys +import traceback +import types +import functools + +from fnmatch import fnmatch, fnmatchcase + +from . import case, suite, util + +__unittest = True + +# what about .pyc (etc) +# we would need to avoid loading the same tests multiple times +# from '.py', *and* '.pyc' +VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE) + + +class _FailedTest(case.TestCase): + _testMethodName = None + + def __init__(self, method_name, exception): + self._exception = exception + super(_FailedTest, self).__init__(method_name) + + def __getattr__(self, name): + if name != self._testMethodName: + return super(_FailedTest, self).__getattr__(name) + def testFailure(): + raise self._exception + return testFailure + + +def _make_failed_import_test(name, suiteClass): + message = 'Failed to import test module: %s\n%s' % ( + name, traceback.format_exc()) + return _make_failed_test(name, ImportError(message), suiteClass, message) + +def _make_failed_load_tests(name, exception, suiteClass): + message = 'Failed to call load_tests:\n%s' % (traceback.format_exc(),) + return _make_failed_test( + name, exception, suiteClass, message) + +def _make_failed_test(methodname, exception, suiteClass, message): + test = _FailedTest(methodname, exception) + return suiteClass((test,)), message + +def _make_skipped_test(methodname, exception, suiteClass): + @case.skip(str(exception)) + def testSkipped(self): + pass + attrs = {methodname: testSkipped} + TestClass = type("ModuleSkipped", (case.TestCase,), attrs) + return suiteClass((TestClass(methodname),)) + +def _splitext(path): + return os.path.splitext(path)[0] + + +class TestLoader(object): + """ + This class is responsible for loading tests according to various criteria + and returning them wrapped in a TestSuite + """ + testMethodPrefix = 'test' + sortTestMethodsUsing = staticmethod(util.three_way_cmp) + testNamePatterns = None + suiteClass = suite.TestSuite + _top_level_dir = None + + def __init__(self): + super(TestLoader, self).__init__() + self.errors = [] + # Tracks packages which we have called into via load_tests, to + # avoid infinite re-entrancy. + self._loading_packages = set() + + def loadTestsFromTestCase(self, testCaseClass): + """Return a suite of all test cases contained in testCaseClass""" + if issubclass(testCaseClass, suite.TestSuite): + raise TypeError("Test cases should not be derived from " + "TestSuite. Maybe you meant to derive from " + "TestCase?") + if testCaseClass in (case.TestCase, case.FunctionTestCase): + # We don't load any tests from base types that should not be loaded. + testCaseNames = [] + else: + testCaseNames = self.getTestCaseNames(testCaseClass) + if not testCaseNames and hasattr(testCaseClass, 'runTest'): + testCaseNames = ['runTest'] + loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames)) + return loaded_suite + + def loadTestsFromModule(self, module, *, pattern=None): + """Return a suite of all test cases contained in the given module""" + tests = [] + for name in dir(module): + obj = getattr(module, name) + if ( + isinstance(obj, type) + and issubclass(obj, case.TestCase) + and obj not in (case.TestCase, case.FunctionTestCase) + ): + tests.append(self.loadTestsFromTestCase(obj)) + + load_tests = getattr(module, 'load_tests', None) + tests = self.suiteClass(tests) + if load_tests is not None: + try: + return load_tests(self, tests, pattern) + except Exception as e: + error_case, error_message = _make_failed_load_tests( + module.__name__, e, self.suiteClass) + self.errors.append(error_message) + return error_case + return tests + + def loadTestsFromName(self, name, module=None): + """Return a suite of all test cases given a string specifier. + + The name may resolve either to a module, a test case class, a + test method within a test case class, or a callable object which + returns a TestCase or TestSuite instance. + + The method optionally resolves the names relative to a given module. + """ + parts = name.split('.') + error_case, error_message = None, None + if module is None: + parts_copy = parts[:] + while parts_copy: + try: + module_name = '.'.join(parts_copy) + module = __import__(module_name) + break + except ImportError: + next_attribute = parts_copy.pop() + # Last error so we can give it to the user if needed. + error_case, error_message = _make_failed_import_test( + next_attribute, self.suiteClass) + if not parts_copy: + # Even the top level import failed: report that error. + self.errors.append(error_message) + return error_case + parts = parts[1:] + obj = module + for part in parts: + try: + parent, obj = obj, getattr(obj, part) + except AttributeError as e: + # We can't traverse some part of the name. + if (getattr(obj, '__path__', None) is not None + and error_case is not None): + # This is a package (no __path__ per importlib docs), and we + # encountered an error importing something. We cannot tell + # the difference between package.WrongNameTestClass and + # package.wrong_module_name so we just report the + # ImportError - it is more informative. + self.errors.append(error_message) + return error_case + else: + # Otherwise, we signal that an AttributeError has occurred. + error_case, error_message = _make_failed_test( + part, e, self.suiteClass, + 'Failed to access attribute:\n%s' % ( + traceback.format_exc(),)) + self.errors.append(error_message) + return error_case + + if isinstance(obj, types.ModuleType): + return self.loadTestsFromModule(obj) + elif ( + isinstance(obj, type) + and issubclass(obj, case.TestCase) + and obj not in (case.TestCase, case.FunctionTestCase) + ): + return self.loadTestsFromTestCase(obj) + elif (isinstance(obj, types.FunctionType) and + isinstance(parent, type) and + issubclass(parent, case.TestCase)): + name = parts[-1] + inst = parent(name) + # static methods follow a different path + if not isinstance(getattr(inst, name), types.FunctionType): + return self.suiteClass([inst]) + elif isinstance(obj, suite.TestSuite): + return obj + if callable(obj): + test = obj() + if isinstance(test, suite.TestSuite): + return test + elif isinstance(test, case.TestCase): + return self.suiteClass([test]) + else: + raise TypeError("calling %s returned %s, not a test" % + (obj, test)) + else: + raise TypeError("don't know how to make test from: %s" % obj) + + def loadTestsFromNames(self, names, module=None): + """Return a suite of all test cases found using the given sequence + of string specifiers. See 'loadTestsFromName()'. + """ + suites = [self.loadTestsFromName(name, module) for name in names] + return self.suiteClass(suites) + + def getTestCaseNames(self, testCaseClass): + """Return a sorted sequence of method names found within testCaseClass + """ + def shouldIncludeMethod(attrname): + if not attrname.startswith(self.testMethodPrefix): + return False + testFunc = getattr(testCaseClass, attrname) + if not callable(testFunc): + return False + fullName = f'%s.%s.%s' % ( + testCaseClass.__module__, testCaseClass.__qualname__, attrname + ) + return self.testNamePatterns is None or \ + any(fnmatchcase(fullName, pattern) for pattern in self.testNamePatterns) + testFnNames = list(filter(shouldIncludeMethod, dir(testCaseClass))) + if self.sortTestMethodsUsing: + testFnNames.sort(key=functools.cmp_to_key(self.sortTestMethodsUsing)) + return testFnNames + + def discover(self, start_dir, pattern='test*.py', top_level_dir=None): + """Find and return all test modules from the specified start + directory, recursing into subdirectories to find them and return all + tests found within them. Only test files that match the pattern will + be loaded. (Using shell style pattern matching.) + + All test modules must be importable from the top level of the project. + If the start directory is not the top level directory then the top + level directory must be specified separately. + + If a test package name (directory with '__init__.py') matches the + pattern then the package will be checked for a 'load_tests' function. If + this exists then it will be called with (loader, tests, pattern) unless + the package has already had load_tests called from the same discovery + invocation, in which case the package module object is not scanned for + tests - this ensures that when a package uses discover to further + discover child tests that infinite recursion does not happen. + + If load_tests exists then discovery does *not* recurse into the package, + load_tests is responsible for loading all tests in the package. + + The pattern is deliberately not stored as a loader attribute so that + packages can continue discovery themselves. top_level_dir is stored so + load_tests does not need to pass this argument in to loader.discover(). + + Paths are sorted before being imported to ensure reproducible execution + order even on filesystems with non-alphabetical ordering like ext3/4. + """ + original_top_level_dir = self._top_level_dir + set_implicit_top = False + if top_level_dir is None and self._top_level_dir is not None: + # make top_level_dir optional if called from load_tests in a package + top_level_dir = self._top_level_dir + elif top_level_dir is None: + set_implicit_top = True + top_level_dir = start_dir + + top_level_dir = os.path.abspath(top_level_dir) + + if not top_level_dir in sys.path: + # all test modules must be importable from the top level directory + # should we *unconditionally* put the start directory in first + # in sys.path to minimise likelihood of conflicts between installed + # modules and development versions? + sys.path.insert(0, top_level_dir) + self._top_level_dir = top_level_dir + + is_not_importable = False + is_namespace = False + tests = [] + if os.path.isdir(os.path.abspath(start_dir)): + start_dir = os.path.abspath(start_dir) + if start_dir != top_level_dir: + is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py')) + else: + # support for discovery from dotted module names + try: + __import__(start_dir) + except ImportError: + is_not_importable = True + else: + the_module = sys.modules[start_dir] + if not hasattr(the_module, "__file__") or the_module.__file__ is None: + # look for namespace packages + try: + spec = the_module.__spec__ + except AttributeError: + spec = None + + if spec and spec.submodule_search_locations is not None: + is_namespace = True + + for path in the_module.__path__: + if (not set_implicit_top and + not path.startswith(top_level_dir)): + continue + self._top_level_dir = \ + (path.split(the_module.__name__ + .replace(".", os.path.sep))[0]) + tests.extend(self._find_tests(path, pattern, namespace=True)) + elif the_module.__name__ in sys.builtin_module_names: + # builtin module + raise TypeError('Can not use builtin modules ' + 'as dotted module names') from None + else: + raise TypeError( + f"don't know how to discover from {the_module!r}" + ) from None + + else: + top_part = start_dir.split('.')[0] + start_dir = os.path.abspath(os.path.dirname((the_module.__file__))) + + if set_implicit_top: + if not is_namespace: + if sys.modules[top_part].__file__ is None: + self._top_level_dir = os.path.dirname(the_module.__file__) + if self._top_level_dir not in sys.path: + sys.path.insert(0, self._top_level_dir) + else: + self._top_level_dir = \ + self._get_directory_containing_module(top_part) + sys.path.remove(top_level_dir) + + if is_not_importable: + raise ImportError('Start directory is not importable: %r' % start_dir) + + if not is_namespace: + tests = list(self._find_tests(start_dir, pattern)) + + self._top_level_dir = original_top_level_dir + return self.suiteClass(tests) + + def _get_directory_containing_module(self, module_name): + module = sys.modules[module_name] + full_path = os.path.abspath(module.__file__) + + if os.path.basename(full_path).lower().startswith('__init__.py'): + return os.path.dirname(os.path.dirname(full_path)) + else: + # here we have been given a module rather than a package - so + # all we can do is search the *same* directory the module is in + # should an exception be raised instead + return os.path.dirname(full_path) + + def _get_name_from_path(self, path): + if path == self._top_level_dir: + return '.' + path = _splitext(os.path.normpath(path)) + + _relpath = os.path.relpath(path, self._top_level_dir) + assert not os.path.isabs(_relpath), "Path must be within the project" + assert not _relpath.startswith('..'), "Path must be within the project" + + name = _relpath.replace(os.path.sep, '.') + return name + + def _get_module_from_name(self, name): + __import__(name) + return sys.modules[name] + + def _match_path(self, path, full_path, pattern): + # override this method to use alternative matching strategy + return fnmatch(path, pattern) + + def _find_tests(self, start_dir, pattern, namespace=False): + """Used by discovery. Yields test suites it loads.""" + # Handle the __init__ in this package + name = self._get_name_from_path(start_dir) + # name is '.' when start_dir == top_level_dir (and top_level_dir is by + # definition not a package). + if name != '.' and name not in self._loading_packages: + # name is in self._loading_packages while we have called into + # loadTestsFromModule with name. + tests, should_recurse = self._find_test_path( + start_dir, pattern, namespace) + if tests is not None: + yield tests + if not should_recurse: + # Either an error occurred, or load_tests was used by the + # package. + return + # Handle the contents. + paths = sorted(os.listdir(start_dir)) + for path in paths: + full_path = os.path.join(start_dir, path) + tests, should_recurse = self._find_test_path( + full_path, pattern, False) + if tests is not None: + yield tests + if should_recurse: + # we found a package that didn't use load_tests. + name = self._get_name_from_path(full_path) + self._loading_packages.add(name) + try: + yield from self._find_tests(full_path, pattern, False) + finally: + self._loading_packages.discard(name) + + def _find_test_path(self, full_path, pattern, namespace=False): + """Used by discovery. + + Loads tests from a single file, or a directories' __init__.py when + passed the directory. + + Returns a tuple (None_or_tests_from_file, should_recurse). + """ + basename = os.path.basename(full_path) + if os.path.isfile(full_path): + if not VALID_MODULE_NAME.match(basename): + # valid Python identifiers only + return None, False + if not self._match_path(basename, full_path, pattern): + return None, False + # if the test file matches, load it + name = self._get_name_from_path(full_path) + try: + module = self._get_module_from_name(name) + except case.SkipTest as e: + return _make_skipped_test(name, e, self.suiteClass), False + except: + error_case, error_message = \ + _make_failed_import_test(name, self.suiteClass) + self.errors.append(error_message) + return error_case, False + else: + mod_file = os.path.abspath( + getattr(module, '__file__', full_path)) + realpath = _splitext( + os.path.realpath(mod_file)) + fullpath_noext = _splitext( + os.path.realpath(full_path)) + if realpath.lower() != fullpath_noext.lower(): + module_dir = os.path.dirname(realpath) + mod_name = _splitext( + os.path.basename(full_path)) + expected_dir = os.path.dirname(full_path) + msg = ("%r module incorrectly imported from %r. Expected " + "%r. Is this module globally installed?") + raise ImportError( + msg % (mod_name, module_dir, expected_dir)) + return self.loadTestsFromModule(module, pattern=pattern), False + elif os.path.isdir(full_path): + if (not namespace and + not os.path.isfile(os.path.join(full_path, '__init__.py'))): + return None, False + + load_tests = None + tests = None + name = self._get_name_from_path(full_path) + try: + package = self._get_module_from_name(name) + except case.SkipTest as e: + return _make_skipped_test(name, e, self.suiteClass), False + except: + error_case, error_message = \ + _make_failed_import_test(name, self.suiteClass) + self.errors.append(error_message) + return error_case, False + else: + load_tests = getattr(package, 'load_tests', None) + # Mark this package as being in load_tests (possibly ;)) + self._loading_packages.add(name) + try: + tests = self.loadTestsFromModule(package, pattern=pattern) + if load_tests is not None: + # loadTestsFromModule(package) has loaded tests for us. + return tests, False + return tests, True + finally: + self._loading_packages.discard(name) + else: + return None, False + + +defaultTestLoader = TestLoader() diff --git a/Python314_4_x86_Template/Lib/unittest/main.py b/Python314_4_x86_Template/Lib/unittest/main.py new file mode 100644 index 00000000..be99d93c --- /dev/null +++ b/Python314_4_x86_Template/Lib/unittest/main.py @@ -0,0 +1,280 @@ +"""Unittest main program""" + +import sys +import argparse +import os + +from . import loader, runner +from .signals import installHandler + +__unittest = True +_NO_TESTS_EXITCODE = 5 + +MAIN_EXAMPLES = """\ +Examples: + %(prog)s test_module - run tests from test_module + %(prog)s module.TestClass - run tests from module.TestClass + %(prog)s module.Class.test_method - run specified test method + %(prog)s path/to/test_file.py - run tests from test_file.py +""" + +MODULE_EXAMPLES = """\ +Examples: + %(prog)s - run default set of tests + %(prog)s MyTestSuite - run suite 'MyTestSuite' + %(prog)s MyTestCase.testSomething - run MyTestCase.testSomething + %(prog)s MyTestCase - run all 'test*' test methods + in MyTestCase +""" + +def _convert_name(name): + # on Linux / Mac OS X 'foo.PY' is not importable, but on + # Windows it is. Simpler to do a case insensitive match + # a better check would be to check that the name is a + # valid Python module name. + if os.path.isfile(name) and name.lower().endswith('.py'): + if os.path.isabs(name): + rel_path = os.path.relpath(name, os.getcwd()) + if os.path.isabs(rel_path) or rel_path.startswith(os.pardir): + return name + name = rel_path + # on Windows both '\' and '/' are used as path + # separators. Better to replace both than rely on os.path.sep + return os.path.normpath(name)[:-3].replace('\\', '.').replace('/', '.') + return name + +def _convert_names(names): + return [_convert_name(name) for name in names] + + +def _convert_select_pattern(pattern): + if not '*' in pattern: + pattern = '*%s*' % pattern + return pattern + + +class TestProgram(object): + """A command-line program that runs a set of tests; this is primarily + for making test modules conveniently executable. + """ + # defaults for testing + module=None + verbosity = 1 + failfast = catchbreak = buffer = progName = warnings = testNamePatterns = None + _discovery_parser = None + + def __init__(self, module='__main__', defaultTest=None, argv=None, + testRunner=None, testLoader=loader.defaultTestLoader, + exit=True, verbosity=1, failfast=None, catchbreak=None, + buffer=None, warnings=None, *, tb_locals=False, + durations=None): + if isinstance(module, str): + self.module = __import__(module) + for part in module.split('.')[1:]: + self.module = getattr(self.module, part) + else: + self.module = module + if argv is None: + argv = sys.argv + + self.exit = exit + self.failfast = failfast + self.catchbreak = catchbreak + self.verbosity = verbosity + self.buffer = buffer + self.tb_locals = tb_locals + self.durations = durations + if warnings is None and not sys.warnoptions: + # even if DeprecationWarnings are ignored by default + # print them anyway unless other warnings settings are + # specified by the warnings arg or the -W python flag + self.warnings = 'default' + else: + # here self.warnings is set either to the value passed + # to the warnings args or to None. + # If the user didn't pass a value self.warnings will + # be None. This means that the behavior is unchanged + # and depends on the values passed to -W. + self.warnings = warnings + self.defaultTest = defaultTest + self.testRunner = testRunner + self.testLoader = testLoader + self.progName = os.path.basename(argv[0]) + self.parseArgs(argv) + self.runTests() + + def _print_help(self, *args, **kwargs): + if self.module is None: + print(self._main_parser.format_help()) + print(MAIN_EXAMPLES % {'prog': self.progName}) + self._discovery_parser.print_help() + else: + print(self._main_parser.format_help()) + print(MODULE_EXAMPLES % {'prog': self.progName}) + + def parseArgs(self, argv): + self._initArgParsers() + if self.module is None: + if len(argv) > 1 and argv[1].lower() == 'discover': + self._do_discovery(argv[2:]) + return + self._main_parser.parse_args(argv[1:], self) + if not self.tests: + # this allows "python -m unittest -v" to still work for + # test discovery. + self._do_discovery([]) + return + else: + self._main_parser.parse_args(argv[1:], self) + + if self.tests: + self.testNames = _convert_names(self.tests) + if __name__ == '__main__': + # to support python -m unittest ... + self.module = None + elif self.defaultTest is None: + # createTests will load tests from self.module + self.testNames = None + elif isinstance(self.defaultTest, str): + self.testNames = (self.defaultTest,) + else: + self.testNames = list(self.defaultTest) + self.createTests() + + def createTests(self, from_discovery=False, Loader=None): + if self.testNamePatterns: + self.testLoader.testNamePatterns = self.testNamePatterns + if from_discovery: + loader = self.testLoader if Loader is None else Loader() + self.test = loader.discover(self.start, self.pattern, self.top) + elif self.testNames is None: + self.test = self.testLoader.loadTestsFromModule(self.module) + else: + self.test = self.testLoader.loadTestsFromNames(self.testNames, + self.module) + + def _initArgParsers(self): + parent_parser = self._getParentArgParser() + self._main_parser = self._getMainArgParser(parent_parser) + self._discovery_parser = self._getDiscoveryArgParser(parent_parser) + + def _getParentArgParser(self): + parser = argparse.ArgumentParser(add_help=False) + + parser.add_argument('-v', '--verbose', dest='verbosity', + action='store_const', const=2, + help='Verbose output') + parser.add_argument('-q', '--quiet', dest='verbosity', + action='store_const', const=0, + help='Quiet output') + parser.add_argument('--locals', dest='tb_locals', + action='store_true', + help='Show local variables in tracebacks') + parser.add_argument('--durations', dest='durations', type=int, + default=None, metavar="N", + help='Show the N slowest test cases (N=0 for all)') + if self.failfast is None: + parser.add_argument('-f', '--failfast', dest='failfast', + action='store_true', + help='Stop on first fail or error') + self.failfast = False + if self.catchbreak is None: + parser.add_argument('-c', '--catch', dest='catchbreak', + action='store_true', + help='Catch Ctrl-C and display results so far') + self.catchbreak = False + if self.buffer is None: + parser.add_argument('-b', '--buffer', dest='buffer', + action='store_true', + help='Buffer stdout and stderr during tests') + self.buffer = False + if self.testNamePatterns is None: + parser.add_argument('-k', dest='testNamePatterns', + action='append', type=_convert_select_pattern, + help='Only run tests which match the given substring') + self.testNamePatterns = [] + + return parser + + def _getMainArgParser(self, parent): + parser = argparse.ArgumentParser(parents=[parent], color=True) + parser.prog = self.progName + parser.print_help = self._print_help + + parser.add_argument('tests', nargs='*', + help='a list of any number of test modules, ' + 'classes and test methods.') + + return parser + + def _getDiscoveryArgParser(self, parent): + parser = argparse.ArgumentParser(parents=[parent], color=True) + parser.prog = '%s discover' % self.progName + parser.epilog = ('For test discovery all test modules must be ' + 'importable from the top level directory of the ' + 'project.') + + parser.add_argument('-s', '--start-directory', dest='start', + help="Directory to start discovery ('.' default)") + parser.add_argument('-p', '--pattern', dest='pattern', + help="Pattern to match tests ('test*.py' default)") + parser.add_argument('-t', '--top-level-directory', dest='top', + help='Top level directory of project (defaults to ' + 'start directory)') + for arg in ('start', 'pattern', 'top'): + parser.add_argument(arg, nargs='?', + default=argparse.SUPPRESS, + help=argparse.SUPPRESS) + + return parser + + def _do_discovery(self, argv, Loader=None): + self.start = '.' + self.pattern = 'test*.py' + self.top = None + if argv is not None: + # handle command line args for test discovery + if self._discovery_parser is None: + # for testing + self._initArgParsers() + self._discovery_parser.parse_args(argv, self) + + self.createTests(from_discovery=True, Loader=Loader) + + def runTests(self): + if self.catchbreak: + installHandler() + if self.testRunner is None: + self.testRunner = runner.TextTestRunner + if isinstance(self.testRunner, type): + try: + try: + testRunner = self.testRunner(verbosity=self.verbosity, + failfast=self.failfast, + buffer=self.buffer, + warnings=self.warnings, + tb_locals=self.tb_locals, + durations=self.durations) + except TypeError: + # didn't accept the tb_locals or durations argument + testRunner = self.testRunner(verbosity=self.verbosity, + failfast=self.failfast, + buffer=self.buffer, + warnings=self.warnings) + except TypeError: + # didn't accept the verbosity, buffer or failfast arguments + testRunner = self.testRunner() + else: + # it is assumed to be a TestRunner instance + testRunner = self.testRunner + self.result = testRunner.run(self.test) + if self.exit: + if not self.result.wasSuccessful(): + sys.exit(1) + elif self.result.testsRun == 0 and len(self.result.skipped) == 0: + sys.exit(_NO_TESTS_EXITCODE) + else: + sys.exit(0) + + +main = TestProgram diff --git a/Python314_4_x86_Template/Lib/unittest/mock.py b/Python314_4_x86_Template/Lib/unittest/mock.py new file mode 100644 index 00000000..92b81d15 --- /dev/null +++ b/Python314_4_x86_Template/Lib/unittest/mock.py @@ -0,0 +1,3204 @@ +# mock.py +# Test tools for mocking and patching. +# Maintained by Michael Foord +# Backport for other versions of Python available from +# https://pypi.org/project/mock + +__all__ = ( + 'Mock', + 'MagicMock', + 'patch', + 'sentinel', + 'DEFAULT', + 'ANY', + 'call', + 'create_autospec', + 'AsyncMock', + 'ThreadingMock', + 'FILTER_DIR', + 'NonCallableMock', + 'NonCallableMagicMock', + 'mock_open', + 'PropertyMock', + 'seal', +) + + +import asyncio +import contextlib +import io +import inspect +import pprint +import sys +import builtins +import pkgutil +from inspect import iscoroutinefunction +import threading +from annotationlib import Format +from dataclasses import fields, is_dataclass +from types import CodeType, ModuleType, MethodType +from unittest.util import safe_repr +from functools import wraps, partial +from threading import RLock + + +class InvalidSpecError(Exception): + """Indicates that an invalid value was used as a mock spec.""" + + +_builtins = {name for name in dir(builtins) if not name.startswith('_')} + +FILTER_DIR = True + +# Workaround for issue #12370 +# Without this, the __class__ properties wouldn't be set correctly +_safe_super = super + +def _is_async_obj(obj): + if _is_instance_mock(obj) and not isinstance(obj, AsyncMock): + return False + if hasattr(obj, '__func__'): + obj = getattr(obj, '__func__') + return iscoroutinefunction(obj) or inspect.isawaitable(obj) + + +def _is_async_func(func): + if getattr(func, '__code__', None): + return iscoroutinefunction(func) + else: + return False + + +def _is_instance_mock(obj): + # can't use isinstance on Mock objects because they override __class__ + # The base class for all mocks is NonCallableMock + return issubclass(type(obj), NonCallableMock) + + +def _is_exception(obj): + return ( + isinstance(obj, BaseException) or + isinstance(obj, type) and issubclass(obj, BaseException) + ) + + +def _extract_mock(obj): + # Autospecced functions will return a FunctionType with "mock" attribute + # which is the actual mock object that needs to be used. + if isinstance(obj, FunctionTypes) and hasattr(obj, 'mock'): + return obj.mock + else: + return obj + + +def _get_signature_object(func, as_instance, eat_self): + """ + Given an arbitrary, possibly callable object, try to create a suitable + signature object. + Return a (reduced func, signature) tuple, or None. + """ + if isinstance(func, type) and not as_instance: + # If it's a type and should be modelled as a type, use __init__. + func = func.__init__ + # Skip the `self` argument in __init__ + eat_self = True + elif isinstance(func, (classmethod, staticmethod)): + if isinstance(func, classmethod): + # Skip the `cls` argument of a class method + eat_self = True + # Use the original decorated method to extract the correct function signature + func = func.__func__ + elif not isinstance(func, FunctionTypes): + # If we really want to model an instance of the passed type, + # __call__ should be looked up, not __init__. + try: + func = func.__call__ + except AttributeError: + return None + if eat_self: + sig_func = partial(func, None) + else: + sig_func = func + try: + return func, inspect.signature(sig_func, annotation_format=Format.FORWARDREF) + except ValueError: + # Certain callable types are not supported by inspect.signature() + return None + + +def _check_signature(func, mock, skipfirst, instance=False): + sig = _get_signature_object(func, instance, skipfirst) + if sig is None: + return + func, sig = sig + def checksig(self, /, *args, **kwargs): + sig.bind(*args, **kwargs) + _copy_func_details(func, checksig) + type(mock)._mock_check_sig = checksig + type(mock).__signature__ = sig + + +def _copy_func_details(func, funcopy): + # we explicitly don't copy func.__dict__ into this copy as it would + # expose original attributes that should be mocked + for attribute in ( + '__name__', '__doc__', '__text_signature__', + '__module__', '__defaults__', '__kwdefaults__', + ): + try: + setattr(funcopy, attribute, getattr(func, attribute)) + except AttributeError: + pass + + +def _callable(obj): + if isinstance(obj, type): + return True + if isinstance(obj, (staticmethod, classmethod, MethodType)): + return _callable(obj.__func__) + if getattr(obj, '__call__', None) is not None: + return True + return False + + +def _is_list(obj): + # checks for list or tuples + # XXXX badly named! + return type(obj) in (list, tuple) + + +def _instance_callable(obj): + """Given an object, return True if the object is callable. + For classes, return True if instances would be callable.""" + if not isinstance(obj, type): + # already an instance + return getattr(obj, '__call__', None) is not None + + # *could* be broken by a class overriding __mro__ or __dict__ via + # a metaclass + for base in (obj,) + obj.__mro__: + if base.__dict__.get('__call__') is not None: + return True + return False + + +def _set_signature(mock, original, instance=False): + # creates a function with signature (*args, **kwargs) that delegates to a + # mock. It still does signature checking by calling a lambda with the same + # signature as the original. + + skipfirst = isinstance(original, type) + result = _get_signature_object(original, instance, skipfirst) + if result is None: + return mock + func, sig = result + def checksig(*args, **kwargs): + sig.bind(*args, **kwargs) + _copy_func_details(func, checksig) + + name = original.__name__ + if not name.isidentifier(): + name = 'funcopy' + context = {'_checksig_': checksig, 'mock': mock} + src = """def %s(*args, **kwargs): + _checksig_(*args, **kwargs) + return mock(*args, **kwargs)""" % name + exec (src, context) + funcopy = context[name] + _setup_func(funcopy, mock, sig) + return funcopy + +def _set_async_signature(mock, original, instance=False, is_async_mock=False): + # creates an async function with signature (*args, **kwargs) that delegates to a + # mock. It still does signature checking by calling a lambda with the same + # signature as the original. + + skipfirst = isinstance(original, type) + func, sig = _get_signature_object(original, instance, skipfirst) + def checksig(*args, **kwargs): + sig.bind(*args, **kwargs) + _copy_func_details(func, checksig) + + name = original.__name__ + context = {'_checksig_': checksig, 'mock': mock} + src = """async def %s(*args, **kwargs): + _checksig_(*args, **kwargs) + return await mock(*args, **kwargs)""" % name + exec (src, context) + funcopy = context[name] + _setup_func(funcopy, mock, sig) + _setup_async_mock(funcopy) + return funcopy + + +def _setup_func(funcopy, mock, sig): + funcopy.mock = mock + + def assert_called_with(*args, **kwargs): + return mock.assert_called_with(*args, **kwargs) + def assert_called(*args, **kwargs): + return mock.assert_called(*args, **kwargs) + def assert_not_called(*args, **kwargs): + return mock.assert_not_called(*args, **kwargs) + def assert_called_once(*args, **kwargs): + return mock.assert_called_once(*args, **kwargs) + def assert_called_once_with(*args, **kwargs): + return mock.assert_called_once_with(*args, **kwargs) + def assert_has_calls(*args, **kwargs): + return mock.assert_has_calls(*args, **kwargs) + def assert_any_call(*args, **kwargs): + return mock.assert_any_call(*args, **kwargs) + def reset_mock(): + funcopy.method_calls = _CallList() + funcopy.mock_calls = _CallList() + mock.reset_mock() + ret = funcopy.return_value + if _is_instance_mock(ret) and not ret is mock: + ret.reset_mock() + + funcopy.called = False + funcopy.call_count = 0 + funcopy.call_args = None + funcopy.call_args_list = _CallList() + funcopy.method_calls = _CallList() + funcopy.mock_calls = _CallList() + + funcopy.return_value = mock.return_value + funcopy.side_effect = mock.side_effect + funcopy._mock_children = mock._mock_children + + funcopy.assert_called_with = assert_called_with + funcopy.assert_called_once_with = assert_called_once_with + funcopy.assert_has_calls = assert_has_calls + funcopy.assert_any_call = assert_any_call + funcopy.reset_mock = reset_mock + funcopy.assert_called = assert_called + funcopy.assert_not_called = assert_not_called + funcopy.assert_called_once = assert_called_once + funcopy.__signature__ = sig + + mock._mock_delegate = funcopy + + +def _setup_async_mock(mock): + mock._is_coroutine = asyncio.coroutines._is_coroutine + mock.await_count = 0 + mock.await_args = None + mock.await_args_list = _CallList() + + # Mock is not configured yet so the attributes are set + # to a function and then the corresponding mock helper function + # is called when the helper is accessed similar to _setup_func. + def wrapper(attr, /, *args, **kwargs): + return getattr(mock.mock, attr)(*args, **kwargs) + + for attribute in ('assert_awaited', + 'assert_awaited_once', + 'assert_awaited_with', + 'assert_awaited_once_with', + 'assert_any_await', + 'assert_has_awaits', + 'assert_not_awaited'): + + # setattr(mock, attribute, wrapper) causes late binding + # hence attribute will always be the last value in the loop + # Use partial(wrapper, attribute) to ensure the attribute is bound + # correctly. + setattr(mock, attribute, partial(wrapper, attribute)) + + +def _is_magic(name): + return '__%s__' % name[2:-2] == name + + +class _SentinelObject(object): + "A unique, named, sentinel object." + def __init__(self, name): + self.name = name + + def __repr__(self): + return 'sentinel.%s' % self.name + + def __reduce__(self): + return 'sentinel.%s' % self.name + + +class _Sentinel(object): + """Access attributes to return a named object, usable as a sentinel.""" + def __init__(self): + self._sentinels = {} + + def __getattr__(self, name): + if name == '__bases__': + # Without this help(unittest.mock) raises an exception + raise AttributeError + return self._sentinels.setdefault(name, _SentinelObject(name)) + + def __reduce__(self): + return 'sentinel' + + +sentinel = _Sentinel() + +DEFAULT = sentinel.DEFAULT +_missing = sentinel.MISSING +_deleted = sentinel.DELETED + + +_allowed_names = { + 'return_value', '_mock_return_value', 'side_effect', + '_mock_side_effect', '_mock_parent', '_mock_new_parent', + '_mock_name', '_mock_new_name' +} + + +def _delegating_property(name): + _allowed_names.add(name) + _the_name = '_mock_' + name + def _get(self, name=name, _the_name=_the_name): + sig = self._mock_delegate + if sig is None: + return getattr(self, _the_name) + return getattr(sig, name) + def _set(self, value, name=name, _the_name=_the_name): + sig = self._mock_delegate + if sig is None: + self.__dict__[_the_name] = value + else: + setattr(sig, name, value) + + return property(_get, _set) + + + +class _CallList(list): + + def __contains__(self, value): + if not isinstance(value, list): + return list.__contains__(self, value) + len_value = len(value) + len_self = len(self) + if len_value > len_self: + return False + + for i in range(0, len_self - len_value + 1): + sub_list = self[i:i+len_value] + if sub_list == value: + return True + return False + + def __repr__(self): + return pprint.pformat(list(self)) + + +def _check_and_set_parent(parent, value, name, new_name): + value = _extract_mock(value) + + if not _is_instance_mock(value): + return False + if ((value._mock_name or value._mock_new_name) or + (value._mock_parent is not None) or + (value._mock_new_parent is not None)): + return False + + _parent = parent + while _parent is not None: + # setting a mock (value) as a child or return value of itself + # should not modify the mock + if _parent is value: + return False + _parent = _parent._mock_new_parent + + if new_name: + value._mock_new_parent = parent + value._mock_new_name = new_name + if name: + value._mock_parent = parent + value._mock_name = name + return True + +# Internal class to identify if we wrapped an iterator object or not. +class _MockIter(object): + def __init__(self, obj): + self.obj = iter(obj) + def __next__(self): + return next(self.obj) + +class Base(object): + _mock_return_value = DEFAULT + _mock_side_effect = None + def __init__(self, /, *args, **kwargs): + pass + + + +class NonCallableMock(Base): + """A non-callable version of `Mock`""" + + # Store a mutex as a class attribute in order to protect concurrent access + # to mock attributes. Using a class attribute allows all NonCallableMock + # instances to share the mutex for simplicity. + # + # See https://github.com/python/cpython/issues/98624 for why this is + # necessary. + _lock = RLock() + + def __new__( + cls, spec=None, wraps=None, name=None, spec_set=None, + parent=None, _spec_state=None, _new_name='', _new_parent=None, + _spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs + ): + # every instance has its own class + # so we can create magic methods on the + # class without stomping on other mocks + bases = (cls,) + if not issubclass(cls, AsyncMockMixin): + # Check if spec is an async object or function + spec_arg = spec_set or spec + if spec_arg is not None and _is_async_obj(spec_arg): + bases = (AsyncMockMixin, cls) + new = type(cls.__name__, bases, {'__doc__': cls.__doc__}) + instance = _safe_super(NonCallableMock, cls).__new__(new) + return instance + + + def __init__( + self, spec=None, wraps=None, name=None, spec_set=None, + parent=None, _spec_state=None, _new_name='', _new_parent=None, + _spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs + ): + if _new_parent is None: + _new_parent = parent + + __dict__ = self.__dict__ + __dict__['_mock_parent'] = parent + __dict__['_mock_name'] = name + __dict__['_mock_new_name'] = _new_name + __dict__['_mock_new_parent'] = _new_parent + __dict__['_mock_sealed'] = False + + if spec_set is not None: + spec = spec_set + spec_set = True + if _eat_self is None: + _eat_self = parent is not None + + self._mock_add_spec(spec, spec_set, _spec_as_instance, _eat_self) + + __dict__['_mock_children'] = {} + __dict__['_mock_wraps'] = wraps + __dict__['_mock_delegate'] = None + + __dict__['_mock_called'] = False + __dict__['_mock_call_args'] = None + __dict__['_mock_call_count'] = 0 + __dict__['_mock_call_args_list'] = _CallList() + __dict__['_mock_mock_calls'] = _CallList() + + __dict__['method_calls'] = _CallList() + __dict__['_mock_unsafe'] = unsafe + + if kwargs: + self.configure_mock(**kwargs) + + _safe_super(NonCallableMock, self).__init__( + spec, wraps, name, spec_set, parent, + _spec_state + ) + + + def attach_mock(self, mock, attribute): + """ + Attach a mock as an attribute of this one, replacing its name and + parent. Calls to the attached mock will be recorded in the + `method_calls` and `mock_calls` attributes of this one.""" + inner_mock = _extract_mock(mock) + + inner_mock._mock_parent = None + inner_mock._mock_new_parent = None + inner_mock._mock_name = '' + inner_mock._mock_new_name = None + + setattr(self, attribute, mock) + + + def mock_add_spec(self, spec, spec_set=False): + """Add a spec to a mock. `spec` can either be an object or a + list of strings. Only attributes on the `spec` can be fetched as + attributes from the mock. + + If `spec_set` is True then only attributes on the spec can be set.""" + self._mock_add_spec(spec, spec_set) + + + def _mock_add_spec(self, spec, spec_set, _spec_as_instance=False, + _eat_self=False): + if _is_instance_mock(spec): + raise InvalidSpecError(f'Cannot spec a Mock object. [object={spec!r}]') + + _spec_class = None + _spec_signature = None + _spec_asyncs = [] + + if spec is not None and not _is_list(spec): + if isinstance(spec, type): + _spec_class = spec + else: + _spec_class = type(spec) + res = _get_signature_object(spec, + _spec_as_instance, _eat_self) + _spec_signature = res and res[1] + + spec_list = dir(spec) + + for attr in spec_list: + static_attr = inspect.getattr_static(spec, attr, None) + unwrapped_attr = static_attr + try: + unwrapped_attr = inspect.unwrap(unwrapped_attr) + except ValueError: + pass + if iscoroutinefunction(unwrapped_attr): + _spec_asyncs.append(attr) + + spec = spec_list + + __dict__ = self.__dict__ + __dict__['_spec_class'] = _spec_class + __dict__['_spec_set'] = spec_set + __dict__['_spec_signature'] = _spec_signature + __dict__['_mock_methods'] = spec + __dict__['_spec_asyncs'] = _spec_asyncs + + def _mock_extend_spec_methods(self, spec_methods): + methods = self.__dict__.get('_mock_methods') or [] + methods.extend(spec_methods) + self.__dict__['_mock_methods'] = methods + + def __get_return_value(self): + ret = self._mock_return_value + if self._mock_delegate is not None: + ret = self._mock_delegate.return_value + + if ret is DEFAULT and self._mock_wraps is None: + ret = self._get_child_mock( + _new_parent=self, _new_name='()' + ) + self.return_value = ret + return ret + + + def __set_return_value(self, value): + if self._mock_delegate is not None: + self._mock_delegate.return_value = value + else: + self._mock_return_value = value + _check_and_set_parent(self, value, None, '()') + + __return_value_doc = "The value to be returned when the mock is called." + return_value = property(__get_return_value, __set_return_value, + __return_value_doc) + + + @property + def __class__(self): + if self._spec_class is None: + return type(self) + return self._spec_class + + called = _delegating_property('called') + call_count = _delegating_property('call_count') + call_args = _delegating_property('call_args') + call_args_list = _delegating_property('call_args_list') + mock_calls = _delegating_property('mock_calls') + + + def __get_side_effect(self): + delegated = self._mock_delegate + if delegated is None: + return self._mock_side_effect + sf = delegated.side_effect + if (sf is not None and not callable(sf) + and not isinstance(sf, _MockIter) and not _is_exception(sf)): + sf = _MockIter(sf) + delegated.side_effect = sf + return sf + + def __set_side_effect(self, value): + value = _try_iter(value) + delegated = self._mock_delegate + if delegated is None: + self._mock_side_effect = value + else: + delegated.side_effect = value + + side_effect = property(__get_side_effect, __set_side_effect) + + + def reset_mock(self, visited=None, *, + return_value: bool = False, + side_effect: bool = False): + "Restore the mock object to its initial state." + if visited is None: + visited = [] + if id(self) in visited: + return + visited.append(id(self)) + + self.called = False + self.call_args = None + self.call_count = 0 + self.mock_calls = _CallList() + self.call_args_list = _CallList() + self.method_calls = _CallList() + + if return_value: + self._mock_return_value = DEFAULT + if side_effect: + self._mock_side_effect = None + + for child in self._mock_children.values(): + if isinstance(child, _SpecState) or child is _deleted: + continue + child.reset_mock(visited, return_value=return_value, side_effect=side_effect) + + ret = self._mock_return_value + if _is_instance_mock(ret) and ret is not self: + ret.reset_mock(visited) + + + def configure_mock(self, /, **kwargs): + """Set attributes on the mock through keyword arguments. + + Attributes plus return values and side effects can be set on child + mocks using standard dot notation and unpacking a dictionary in the + method call: + + >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError} + >>> mock.configure_mock(**attrs)""" + for arg, val in sorted(kwargs.items(), + # we sort on the number of dots so that + # attributes are set before we set attributes on + # attributes + key=lambda entry: entry[0].count('.')): + args = arg.split('.') + final = args.pop() + obj = self + for entry in args: + obj = getattr(obj, entry) + setattr(obj, final, val) + + + def __getattr__(self, name): + if name in {'_mock_methods', '_mock_unsafe'}: + raise AttributeError(name) + elif self._mock_methods is not None: + if name not in self._mock_methods or name in _all_magics: + raise AttributeError("Mock object has no attribute %r" % name) + elif _is_magic(name): + raise AttributeError(name) + if not self._mock_unsafe and (not self._mock_methods or name not in self._mock_methods): + if name.startswith(('assert', 'assret', 'asert', 'aseert', 'assrt')) or name in _ATTRIB_DENY_LIST: + raise AttributeError( + f"{name!r} is not a valid assertion. Use a spec " + f"for the mock if {name!r} is meant to be an attribute.") + + with NonCallableMock._lock: + result = self._mock_children.get(name) + if result is _deleted: + raise AttributeError(name) + elif result is None: + wraps = None + if self._mock_wraps is not None: + # XXXX should we get the attribute without triggering code + # execution? + wraps = getattr(self._mock_wraps, name) + + result = self._get_child_mock( + parent=self, name=name, wraps=wraps, _new_name=name, + _new_parent=self + ) + self._mock_children[name] = result + + elif isinstance(result, _SpecState): + try: + result = create_autospec( + result.spec, result.spec_set, result.instance, + result.parent, result.name + ) + except InvalidSpecError: + target_name = self.__dict__['_mock_name'] or self + raise InvalidSpecError( + f'Cannot autospec attr {name!r} from target ' + f'{target_name!r} as it has already been mocked out. ' + f'[target={self!r}, attr={result.spec!r}]') + self._mock_children[name] = result + + return result + + + def _extract_mock_name(self): + _name_list = [self._mock_new_name] + _parent = self._mock_new_parent + last = self + + dot = '.' + if _name_list == ['()']: + dot = '' + + while _parent is not None: + last = _parent + + _name_list.append(_parent._mock_new_name + dot) + dot = '.' + if _parent._mock_new_name == '()': + dot = '' + + _parent = _parent._mock_new_parent + + _name_list = list(reversed(_name_list)) + _first = last._mock_name or 'mock' + if len(_name_list) > 1: + if _name_list[1] not in ('()', '().'): + _first += '.' + _name_list[0] = _first + return ''.join(_name_list) + + def __repr__(self): + name = self._extract_mock_name() + + name_string = '' + if name not in ('mock', 'mock.'): + name_string = ' name=%r' % name + + spec_string = '' + if self._spec_class is not None: + spec_string = ' spec=%r' + if self._spec_set: + spec_string = ' spec_set=%r' + spec_string = spec_string % self._spec_class.__name__ + return "<%s%s%s id='%s'>" % ( + type(self).__name__, + name_string, + spec_string, + id(self) + ) + + + def __dir__(self): + """Filter the output of `dir(mock)` to only useful members.""" + if not FILTER_DIR: + return object.__dir__(self) + + extras = self._mock_methods or [] + from_type = dir(type(self)) + from_dict = list(self.__dict__) + from_child_mocks = [ + m_name for m_name, m_value in self._mock_children.items() + if m_value is not _deleted] + + from_type = [e for e in from_type if not e.startswith('_')] + from_dict = [e for e in from_dict if not e.startswith('_') or + _is_magic(e)] + return sorted(set(extras + from_type + from_dict + from_child_mocks)) + + + def __setattr__(self, name, value): + if name in _allowed_names: + # property setters go through here + return object.__setattr__(self, name, value) + elif (self._spec_set and self._mock_methods is not None and + name not in self._mock_methods and + name not in self.__dict__): + raise AttributeError("Mock object has no attribute '%s'" % name) + elif name in _unsupported_magics: + msg = 'Attempting to set unsupported magic method %r.' % name + raise AttributeError(msg) + elif name in _all_magics: + if self._mock_methods is not None and name not in self._mock_methods: + raise AttributeError("Mock object has no attribute '%s'" % name) + + if not _is_instance_mock(value): + setattr(type(self), name, _get_method(name, value)) + original = value + value = lambda *args, **kw: original(self, *args, **kw) + else: + # only set _new_name and not name so that mock_calls is tracked + # but not method calls + _check_and_set_parent(self, value, None, name) + setattr(type(self), name, value) + self._mock_children[name] = value + elif name == '__class__': + self._spec_class = value + return + else: + if _check_and_set_parent(self, value, name, name): + self._mock_children[name] = value + + if self._mock_sealed and not hasattr(self, name): + mock_name = f'{self._extract_mock_name()}.{name}' + raise AttributeError(f'Cannot set {mock_name}') + + if isinstance(value, PropertyMock): + self.__dict__[name] = value + return + return object.__setattr__(self, name, value) + + + def __delattr__(self, name): + if name in _all_magics and name in type(self).__dict__: + delattr(type(self), name) + if name not in self.__dict__: + # for magic methods that are still MagicProxy objects and + # not set on the instance itself + return + + obj = self._mock_children.get(name, _missing) + if name in self.__dict__: + _safe_super(NonCallableMock, self).__delattr__(name) + elif obj is _deleted: + raise AttributeError(name) + if obj is not _missing: + del self._mock_children[name] + self._mock_children[name] = _deleted + + + def _format_mock_call_signature(self, args, kwargs): + name = self._mock_name or 'mock' + return _format_call_signature(name, args, kwargs) + + + def _format_mock_failure_message(self, args, kwargs, action='call'): + message = 'expected %s not found.\nExpected: %s\n Actual: %s' + expected_string = self._format_mock_call_signature(args, kwargs) + call_args = self.call_args + actual_string = self._format_mock_call_signature(*call_args) + return message % (action, expected_string, actual_string) + + + def _get_call_signature_from_name(self, name): + """ + * If call objects are asserted against a method/function like obj.meth1 + then there could be no name for the call object to lookup. Hence just + return the spec_signature of the method/function being asserted against. + * If the name is not empty then remove () and split by '.' to get + list of names to iterate through the children until a potential + match is found. A child mock is created only during attribute access + so if we get a _SpecState then no attributes of the spec were accessed + and can be safely exited. + """ + if not name: + return self._spec_signature + + sig = None + names = name.replace('()', '').split('.') + children = self._mock_children + + for name in names: + child = children.get(name) + if child is None or isinstance(child, _SpecState): + break + else: + # If an autospecced object is attached using attach_mock the + # child would be a function with mock object as attribute from + # which signature has to be derived. + child = _extract_mock(child) + children = child._mock_children + sig = child._spec_signature + + return sig + + + def _call_matcher(self, _call): + """ + Given a call (or simply an (args, kwargs) tuple), return a + comparison key suitable for matching with other calls. + This is a best effort method which relies on the spec's signature, + if available, or falls back on the arguments themselves. + """ + + if isinstance(_call, tuple) and len(_call) > 2: + sig = self._get_call_signature_from_name(_call[0]) + else: + sig = self._spec_signature + + if sig is not None: + if len(_call) == 2: + name = '' + args, kwargs = _call + else: + name, args, kwargs = _call + try: + bound_call = sig.bind(*args, **kwargs) + return call(name, bound_call.args, bound_call.kwargs) + except TypeError as e: + return e.with_traceback(None) + else: + return _call + + def assert_not_called(self): + """assert that the mock was never called. + """ + if self.call_count != 0: + msg = ("Expected '%s' to not have been called. Called %s times.%s" + % (self._mock_name or 'mock', + self.call_count, + self._calls_repr())) + raise AssertionError(msg) + + def assert_called(self): + """assert that the mock was called at least once + """ + if self.call_count == 0: + msg = ("Expected '%s' to have been called." % + (self._mock_name or 'mock')) + raise AssertionError(msg) + + def assert_called_once(self): + """assert that the mock was called only once. + """ + if not self.call_count == 1: + msg = ("Expected '%s' to have been called once. Called %s times.%s" + % (self._mock_name or 'mock', + self.call_count, + self._calls_repr())) + raise AssertionError(msg) + + def assert_called_with(self, /, *args, **kwargs): + """assert that the last call was made with the specified arguments. + + Raises an AssertionError if the args and keyword args passed in are + different to the last call to the mock.""" + if self.call_args is None: + expected = self._format_mock_call_signature(args, kwargs) + actual = 'not called.' + error_message = ('expected call not found.\nExpected: %s\n Actual: %s' + % (expected, actual)) + raise AssertionError(error_message) + + def _error_message(): + msg = self._format_mock_failure_message(args, kwargs) + return msg + expected = self._call_matcher(_Call((args, kwargs), two=True)) + actual = self._call_matcher(self.call_args) + if actual != expected: + cause = expected if isinstance(expected, Exception) else None + raise AssertionError(_error_message()) from cause + + + def assert_called_once_with(self, /, *args, **kwargs): + """assert that the mock was called exactly once and that that call was + with the specified arguments.""" + if not self.call_count == 1: + msg = ("Expected '%s' to be called once. Called %s times.%s" + % (self._mock_name or 'mock', + self.call_count, + self._calls_repr())) + raise AssertionError(msg) + return self.assert_called_with(*args, **kwargs) + + + def assert_has_calls(self, calls, any_order=False): + """assert the mock has been called with the specified calls. + The `mock_calls` list is checked for the calls. + + If `any_order` is False (the default) then the calls must be + sequential. There can be extra calls before or after the + specified calls. + + If `any_order` is True then the calls can be in any order, but + they must all appear in `mock_calls`.""" + expected = [self._call_matcher(c) for c in calls] + cause = next((e for e in expected if isinstance(e, Exception)), None) + all_calls = _CallList(self._call_matcher(c) for c in self.mock_calls) + if not any_order: + if expected not in all_calls: + if cause is None: + problem = 'Calls not found.' + else: + problem = ('Error processing expected calls.\n' + 'Errors: {}').format( + [e if isinstance(e, Exception) else None + for e in expected]) + raise AssertionError( + f'{problem}\n' + f'Expected: {_CallList(calls)}\n' + f' Actual: {safe_repr(self.mock_calls)}' + ) from cause + return + + all_calls = list(all_calls) + + not_found = [] + for kall in expected: + try: + all_calls.remove(kall) + except ValueError: + not_found.append(kall) + if not_found: + raise AssertionError( + '%r does not contain all of %r in its call list, ' + 'found %r instead' % (self._mock_name or 'mock', + tuple(not_found), all_calls) + ) from cause + + + def assert_any_call(self, /, *args, **kwargs): + """assert the mock has been called with the specified arguments. + + The assert passes if the mock has *ever* been called, unlike + `assert_called_with` and `assert_called_once_with` that only pass if + the call is the most recent one.""" + expected = self._call_matcher(_Call((args, kwargs), two=True)) + cause = expected if isinstance(expected, Exception) else None + actual = [self._call_matcher(c) for c in self.call_args_list] + if cause or expected not in _AnyComparer(actual): + expected_string = self._format_mock_call_signature(args, kwargs) + raise AssertionError( + '%s call not found' % expected_string + ) from cause + + + def _get_child_mock(self, /, **kw): + """Create the child mocks for attributes and return value. + By default child mocks will be the same type as the parent. + Subclasses of Mock may want to override this to customize the way + child mocks are made. + + For non-callable mocks the callable variant will be used (rather than + any custom subclass).""" + if self._mock_sealed: + attribute = f".{kw['name']}" if "name" in kw else "()" + mock_name = self._extract_mock_name() + attribute + raise AttributeError(mock_name) + + _new_name = kw.get("_new_name") + if _new_name in self.__dict__['_spec_asyncs']: + return AsyncMock(**kw) + + _type = type(self) + if issubclass(_type, MagicMock) and _new_name in _async_method_magics: + # Any asynchronous magic becomes an AsyncMock + klass = AsyncMock + elif issubclass(_type, AsyncMockMixin): + if (_new_name in _all_sync_magics or + self._mock_methods and _new_name in self._mock_methods): + # Any synchronous method on AsyncMock becomes a MagicMock + klass = MagicMock + else: + klass = AsyncMock + elif not issubclass(_type, CallableMixin): + if issubclass(_type, NonCallableMagicMock): + klass = MagicMock + elif issubclass(_type, NonCallableMock): + klass = Mock + else: + klass = _type.__mro__[1] + return klass(**kw) + + + def _calls_repr(self): + """Renders self.mock_calls as a string. + + Example: "\nCalls: [call(1), call(2)]." + + If self.mock_calls is empty, an empty string is returned. The + output will be truncated if very long. + """ + if not self.mock_calls: + return "" + return f"\nCalls: {safe_repr(self.mock_calls)}." + + +# Denylist for forbidden attribute names in safe mode +_ATTRIB_DENY_LIST = frozenset({ + name.removeprefix("assert_") + for name in dir(NonCallableMock) + if name.startswith("assert_") +}) + + +class _AnyComparer(list): + """A list which checks if it contains a call which may have an + argument of ANY, flipping the components of item and self from + their traditional locations so that ANY is guaranteed to be on + the left.""" + def __contains__(self, item): + for _call in self: + assert len(item) == len(_call) + if all([ + expected == actual + for expected, actual in zip(item, _call) + ]): + return True + return False + + +def _try_iter(obj): + if obj is None: + return obj + if _is_exception(obj): + return obj + if _callable(obj): + return obj + try: + return iter(obj) + except TypeError: + # XXXX backwards compatibility + # but this will blow up on first call - so maybe we should fail early? + return obj + + +class CallableMixin(Base): + + def __init__(self, spec=None, side_effect=None, return_value=DEFAULT, + wraps=None, name=None, spec_set=None, parent=None, + _spec_state=None, _new_name='', _new_parent=None, **kwargs): + self.__dict__['_mock_return_value'] = return_value + _safe_super(CallableMixin, self).__init__( + spec, wraps, name, spec_set, parent, + _spec_state, _new_name, _new_parent, **kwargs + ) + + self.side_effect = side_effect + + + def _mock_check_sig(self, /, *args, **kwargs): + # stub method that can be replaced with one with a specific signature + pass + + + def __call__(self, /, *args, **kwargs): + # can't use self in-case a function / method we are mocking uses self + # in the signature + self._mock_check_sig(*args, **kwargs) + self._increment_mock_call(*args, **kwargs) + return self._mock_call(*args, **kwargs) + + + def _mock_call(self, /, *args, **kwargs): + return self._execute_mock_call(*args, **kwargs) + + def _increment_mock_call(self, /, *args, **kwargs): + self.called = True + + # handle call_args + # needs to be set here so assertions on call arguments pass before + # execution in the case of awaited calls + with NonCallableMock._lock: + # Lock is used here so that call_args_list and call_count are + # set atomically otherwise it is possible that by the time call_count + # is set another thread may have appended to call_args_list. + # The rest of this function relies on list.append being atomic and + # skips locking. + _call = _Call((args, kwargs), two=True) + self.call_args = _call + self.call_args_list.append(_call) + self.call_count = len(self.call_args_list) + + # initial stuff for method_calls: + do_method_calls = self._mock_parent is not None + method_call_name = self._mock_name + + # initial stuff for mock_calls: + mock_call_name = self._mock_new_name + is_a_call = mock_call_name == '()' + self.mock_calls.append(_Call(('', args, kwargs))) + + # follow up the chain of mocks: + _new_parent = self._mock_new_parent + while _new_parent is not None: + + # handle method_calls: + if do_method_calls: + _new_parent.method_calls.append(_Call((method_call_name, args, kwargs))) + do_method_calls = _new_parent._mock_parent is not None + if do_method_calls: + method_call_name = _new_parent._mock_name + '.' + method_call_name + + # handle mock_calls: + this_mock_call = _Call((mock_call_name, args, kwargs)) + _new_parent.mock_calls.append(this_mock_call) + + if _new_parent._mock_new_name: + if is_a_call: + dot = '' + else: + dot = '.' + is_a_call = _new_parent._mock_new_name == '()' + mock_call_name = _new_parent._mock_new_name + dot + mock_call_name + + # follow the parental chain: + _new_parent = _new_parent._mock_new_parent + + def _execute_mock_call(self, /, *args, **kwargs): + # separate from _increment_mock_call so that awaited functions are + # executed separately from their call, also AsyncMock overrides this method + + effect = self.side_effect + if effect is not None: + if _is_exception(effect): + raise effect + elif not _callable(effect): + result = next(effect) + if _is_exception(result): + raise result + else: + result = effect(*args, **kwargs) + + if result is not DEFAULT: + return result + + if self._mock_return_value is not DEFAULT: + return self.return_value + + if self._mock_delegate and self._mock_delegate.return_value is not DEFAULT: + return self.return_value + + if self._mock_wraps is not None: + return self._mock_wraps(*args, **kwargs) + + return self.return_value + + + +class Mock(CallableMixin, NonCallableMock): + """ + Create a new `Mock` object. `Mock` takes several optional arguments + that specify the behaviour of the Mock object: + + * `spec`: This can be either a list of strings or an existing object (a + class or instance) that acts as the specification for the mock object. If + you pass in an object then a list of strings is formed by calling dir on + the object (excluding unsupported magic attributes and methods). Accessing + any attribute not in this list will raise an `AttributeError`. + + If `spec` is an object (rather than a list of strings) then + `mock.__class__` returns the class of the spec object. This allows mocks + to pass `isinstance` tests. + + * `spec_set`: A stricter variant of `spec`. If used, attempting to *set* + or get an attribute on the mock that isn't on the object passed as + `spec_set` will raise an `AttributeError`. + + * `side_effect`: A function to be called whenever the Mock is called. See + the `side_effect` attribute. Useful for raising exceptions or + dynamically changing return values. The function is called with the same + arguments as the mock, and unless it returns `DEFAULT`, the return + value of this function is used as the return value. + + If `side_effect` is an iterable then each call to the mock will return + the next value from the iterable. If any of the members of the iterable + are exceptions they will be raised instead of returned. + + * `return_value`: The value returned when the mock is called. By default + this is a new Mock (created on first access). See the + `return_value` attribute. + + * `unsafe`: By default, accessing any attribute whose name starts with + *assert*, *assret*, *asert*, *aseert*, or *assrt* raises an AttributeError. + Additionally, an AttributeError is raised when accessing + attributes that match the name of an assertion method without the prefix + `assert_`, e.g. accessing `called_once` instead of `assert_called_once`. + Passing `unsafe=True` will allow access to these attributes. + + * `wraps`: Item for the mock object to wrap. If `wraps` is not None then + calling the Mock will pass the call through to the wrapped object + (returning the real result). Attribute access on the mock will return a + Mock object that wraps the corresponding attribute of the wrapped object + (so attempting to access an attribute that doesn't exist will raise an + `AttributeError`). + + If the mock has an explicit `return_value` set then calls are not passed + to the wrapped object and the `return_value` is returned instead. + + * `name`: If the mock has a name then it will be used in the repr of the + mock. This can be useful for debugging. The name is propagated to child + mocks. + + Mocks can also be called with arbitrary keyword arguments. These will be + used to set attributes on the mock after it is created. + """ + + +# _check_spec_arg_typos takes kwargs from commands like patch and checks that +# they don't contain common misspellings of arguments related to autospeccing. +def _check_spec_arg_typos(kwargs_to_check): + typos = ("autospect", "auto_spec", "set_spec") + for typo in typos: + if typo in kwargs_to_check: + raise RuntimeError( + f"{typo!r} might be a typo; use unsafe=True if this is intended" + ) + + +class _patch(object): + + attribute_name = None + _active_patches = [] + + def __init__( + self, getter, attribute, new, spec, create, + spec_set, autospec, new_callable, kwargs, *, unsafe=False + ): + if new_callable is not None: + if new is not DEFAULT: + raise ValueError( + "Cannot use 'new' and 'new_callable' together" + ) + if autospec is not None: + raise ValueError( + "Cannot use 'autospec' and 'new_callable' together" + ) + if not unsafe: + _check_spec_arg_typos(kwargs) + if _is_instance_mock(spec): + raise InvalidSpecError( + f'Cannot spec attr {attribute!r} as the spec ' + f'has already been mocked out. [spec={spec!r}]') + if _is_instance_mock(spec_set): + raise InvalidSpecError( + f'Cannot spec attr {attribute!r} as the spec_set ' + f'target has already been mocked out. [spec_set={spec_set!r}]') + + self.getter = getter + self.attribute = attribute + self.new = new + self.new_callable = new_callable + self.spec = spec + self.create = create + self.has_local = False + self.spec_set = spec_set + self.autospec = autospec + self.kwargs = kwargs + self.additional_patchers = [] + self.is_started = False + + + def copy(self): + patcher = _patch( + self.getter, self.attribute, self.new, self.spec, + self.create, self.spec_set, + self.autospec, self.new_callable, self.kwargs + ) + patcher.attribute_name = self.attribute_name + patcher.additional_patchers = [ + p.copy() for p in self.additional_patchers + ] + return patcher + + + def __call__(self, func): + if isinstance(func, type): + return self.decorate_class(func) + if inspect.iscoroutinefunction(func): + return self.decorate_async_callable(func) + return self.decorate_callable(func) + + + def decorate_class(self, klass): + for attr in dir(klass): + if not attr.startswith(patch.TEST_PREFIX): + continue + + attr_value = getattr(klass, attr) + if not hasattr(attr_value, "__call__"): + continue + + patcher = self.copy() + setattr(klass, attr, patcher(attr_value)) + return klass + + + @contextlib.contextmanager + def decoration_helper(self, patched, args, keywargs): + extra_args = [] + with contextlib.ExitStack() as exit_stack: + for patching in patched.patchings: + arg = exit_stack.enter_context(patching) + if patching.attribute_name is not None: + keywargs.update(arg) + elif patching.new is DEFAULT: + extra_args.append(arg) + + args += tuple(extra_args) + yield (args, keywargs) + + + def decorate_callable(self, func): + # NB. Keep the method in sync with decorate_async_callable() + if hasattr(func, 'patchings'): + func.patchings.append(self) + return func + + @wraps(func) + def patched(*args, **keywargs): + with self.decoration_helper(patched, + args, + keywargs) as (newargs, newkeywargs): + return func(*newargs, **newkeywargs) + + patched.patchings = [self] + return patched + + + def decorate_async_callable(self, func): + # NB. Keep the method in sync with decorate_callable() + if hasattr(func, 'patchings'): + func.patchings.append(self) + return func + + @wraps(func) + async def patched(*args, **keywargs): + with self.decoration_helper(patched, + args, + keywargs) as (newargs, newkeywargs): + return await func(*newargs, **newkeywargs) + + patched.patchings = [self] + return patched + + + def get_original(self): + target = self.getter() + name = self.attribute + + original = DEFAULT + local = False + + try: + original = target.__dict__[name] + except (AttributeError, KeyError): + original = getattr(target, name, DEFAULT) + else: + local = True + + if name in _builtins and isinstance(target, ModuleType): + self.create = True + + if not self.create and original is DEFAULT: + raise AttributeError( + "%s does not have the attribute %r" % (target, name) + ) + return original, local + + + def __enter__(self): + """Perform the patch.""" + if self.is_started: + raise RuntimeError("Patch is already started") + + new, spec, spec_set = self.new, self.spec, self.spec_set + autospec, kwargs = self.autospec, self.kwargs + new_callable = self.new_callable + self.target = self.getter() + + # normalise False to None + if spec is False: + spec = None + if spec_set is False: + spec_set = None + if autospec is False: + autospec = None + + if spec is not None and autospec is not None: + raise TypeError("Can't specify spec and autospec") + if ((spec is not None or autospec is not None) and + spec_set not in (True, None)): + raise TypeError("Can't provide explicit spec_set *and* spec or autospec") + + original, local = self.get_original() + + if new is DEFAULT and autospec is None: + inherit = False + if spec is True: + # set spec to the object we are replacing + spec = original + if spec_set is True: + spec_set = original + spec = None + elif spec is not None: + if spec_set is True: + spec_set = spec + spec = None + elif spec_set is True: + spec_set = original + + if spec is not None or spec_set is not None: + if original is DEFAULT: + raise TypeError("Can't use 'spec' with create=True") + if isinstance(original, type): + # If we're patching out a class and there is a spec + inherit = True + + # Determine the Klass to use + if new_callable is not None: + Klass = new_callable + elif spec is None and _is_async_obj(original): + Klass = AsyncMock + elif spec is not None or spec_set is not None: + this_spec = spec + if spec_set is not None: + this_spec = spec_set + if _is_list(this_spec): + not_callable = '__call__' not in this_spec + else: + not_callable = not callable(this_spec) + if _is_async_obj(this_spec): + Klass = AsyncMock + elif not_callable: + Klass = NonCallableMagicMock + else: + Klass = MagicMock + else: + Klass = MagicMock + + _kwargs = {} + if spec is not None: + _kwargs['spec'] = spec + if spec_set is not None: + _kwargs['spec_set'] = spec_set + + # add a name to mocks + if (isinstance(Klass, type) and + issubclass(Klass, NonCallableMock) and self.attribute): + _kwargs['name'] = self.attribute + + _kwargs.update(kwargs) + new = Klass(**_kwargs) + + if inherit and _is_instance_mock(new): + # we can only tell if the instance should be callable if the + # spec is not a list + this_spec = spec + if spec_set is not None: + this_spec = spec_set + if (not _is_list(this_spec) and not + _instance_callable(this_spec)): + Klass = NonCallableMagicMock + + _kwargs.pop('name') + new.return_value = Klass(_new_parent=new, _new_name='()', + **_kwargs) + elif autospec is not None: + # spec is ignored, new *must* be default, spec_set is treated + # as a boolean. Should we check spec is not None and that spec_set + # is a bool? + if new is not DEFAULT: + raise TypeError( + "autospec creates the mock for you. Can't specify " + "autospec and new." + ) + if original is DEFAULT: + raise TypeError("Can't use 'autospec' with create=True") + spec_set = bool(spec_set) + if autospec is True: + autospec = original + + if _is_instance_mock(self.target): + raise InvalidSpecError( + f'Cannot autospec attr {self.attribute!r} as the patch ' + f'target has already been mocked out. ' + f'[target={self.target!r}, attr={autospec!r}]') + if _is_instance_mock(autospec): + target_name = getattr(self.target, '__name__', self.target) + raise InvalidSpecError( + f'Cannot autospec attr {self.attribute!r} from target ' + f'{target_name!r} as it has already been mocked out. ' + f'[target={self.target!r}, attr={autospec!r}]') + + new = create_autospec(autospec, spec_set=spec_set, + _name=self.attribute, **kwargs) + elif kwargs: + # can't set keyword args when we aren't creating the mock + # XXXX If new is a Mock we could call new.configure_mock(**kwargs) + raise TypeError("Can't pass kwargs to a mock we aren't creating") + + new_attr = new + + self.temp_original = original + self.is_local = local + self._exit_stack = contextlib.ExitStack() + self.is_started = True + try: + setattr(self.target, self.attribute, new_attr) + if self.attribute_name is not None: + extra_args = {} + if self.new is DEFAULT: + extra_args[self.attribute_name] = new + for patching in self.additional_patchers: + arg = self._exit_stack.enter_context(patching) + if patching.new is DEFAULT: + extra_args.update(arg) + return extra_args + + return new + except: + if not self.__exit__(*sys.exc_info()): + raise + + def __exit__(self, *exc_info): + """Undo the patch.""" + if not self.is_started: + return + + if self.is_local and self.temp_original is not DEFAULT: + setattr(self.target, self.attribute, self.temp_original) + else: + delattr(self.target, self.attribute) + if not self.create and (not hasattr(self.target, self.attribute) or + self.attribute in ('__doc__', '__module__', + '__defaults__', '__annotations__', + '__kwdefaults__')): + # needed for proxy objects like django settings + setattr(self.target, self.attribute, self.temp_original) + + del self.temp_original + del self.is_local + del self.target + exit_stack = self._exit_stack + del self._exit_stack + self.is_started = False + return exit_stack.__exit__(*exc_info) + + + def start(self): + """Activate a patch, returning any created mock.""" + result = self.__enter__() + self._active_patches.append(self) + return result + + + def stop(self): + """Stop an active patch.""" + try: + self._active_patches.remove(self) + except ValueError: + # If the patch hasn't been started this will fail + return None + + return self.__exit__(None, None, None) + + + +def _get_target(target): + try: + target, attribute = target.rsplit('.', 1) + except (TypeError, ValueError, AttributeError): + raise TypeError( + f"Need a valid target to patch. You supplied: {target!r}") + return partial(pkgutil.resolve_name, target), attribute + + +def _patch_object( + target, attribute, new=DEFAULT, spec=None, + create=False, spec_set=None, autospec=None, + new_callable=None, *, unsafe=False, **kwargs + ): + """ + patch the named member (`attribute`) on an object (`target`) with a mock + object. + + `patch.object` can be used as a decorator, class decorator or a context + manager. Arguments `new`, `spec`, `create`, `spec_set`, + `autospec` and `new_callable` have the same meaning as for `patch`. Like + `patch`, `patch.object` takes arbitrary keyword arguments for configuring + the mock object it creates. + + When used as a class decorator `patch.object` honours `patch.TEST_PREFIX` + for choosing which methods to wrap. + """ + if type(target) is str: + raise TypeError( + f"{target!r} must be the actual object to be patched, not a str" + ) + getter = lambda: target + return _patch( + getter, attribute, new, spec, create, + spec_set, autospec, new_callable, kwargs, unsafe=unsafe + ) + + +def _patch_multiple(target, spec=None, create=False, spec_set=None, + autospec=None, new_callable=None, **kwargs): + """Perform multiple patches in a single call. It takes the object to be + patched (either as an object or a string to fetch the object by importing) + and keyword arguments for the patches:: + + with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'): + ... + + Use `DEFAULT` as the value if you want `patch.multiple` to create + mocks for you. In this case the created mocks are passed into a decorated + function by keyword, and a dictionary is returned when `patch.multiple` is + used as a context manager. + + `patch.multiple` can be used as a decorator, class decorator or a context + manager. The arguments `spec`, `spec_set`, `create`, + `autospec` and `new_callable` have the same meaning as for `patch`. These + arguments will be applied to *all* patches done by `patch.multiple`. + + When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX` + for choosing which methods to wrap. + """ + if type(target) is str: + getter = partial(pkgutil.resolve_name, target) + else: + getter = lambda: target + + if not kwargs: + raise ValueError( + 'Must supply at least one keyword argument with patch.multiple' + ) + # need to wrap in a list for python 3, where items is a view + items = list(kwargs.items()) + attribute, new = items[0] + patcher = _patch( + getter, attribute, new, spec, create, spec_set, + autospec, new_callable, {} + ) + patcher.attribute_name = attribute + for attribute, new in items[1:]: + this_patcher = _patch( + getter, attribute, new, spec, create, spec_set, + autospec, new_callable, {} + ) + this_patcher.attribute_name = attribute + patcher.additional_patchers.append(this_patcher) + return patcher + + +def patch( + target, new=DEFAULT, spec=None, create=False, + spec_set=None, autospec=None, new_callable=None, *, unsafe=False, **kwargs + ): + """ + `patch` acts as a function decorator, class decorator or a context + manager. Inside the body of the function or with statement, the `target` + is patched with a `new` object. When the function/with statement exits + the patch is undone. + + If `new` is omitted, then the target is replaced with an + `AsyncMock` if the patched object is an async function or a + `MagicMock` otherwise. If `patch` is used as a decorator and `new` is + omitted, the created mock is passed in as an extra argument to the + decorated function. If `patch` is used as a context manager the created + mock is returned by the context manager. + + `target` should be a string in the form `'package.module.ClassName'`. The + `target` is imported and the specified object replaced with the `new` + object, so the `target` must be importable from the environment you are + calling `patch` from. The target is imported when the decorated function + is executed, not at decoration time. + + The `spec` and `spec_set` keyword arguments are passed to the `MagicMock` + if patch is creating one for you. + + In addition you can pass `spec=True` or `spec_set=True`, which causes + patch to pass in the object being mocked as the spec/spec_set object. + + `new_callable` allows you to specify a different class, or callable object, + that will be called to create the `new` object. By default `AsyncMock` is + used for async functions and `MagicMock` for the rest. + + A more powerful form of `spec` is `autospec`. If you set `autospec=True` + then the mock will be created with a spec from the object being replaced. + All attributes of the mock will also have the spec of the corresponding + attribute of the object being replaced. Methods and functions being + mocked will have their arguments checked and will raise a `TypeError` if + they are called with the wrong signature. For mocks replacing a class, + their return value (the 'instance') will have the same spec as the class. + + Instead of `autospec=True` you can pass `autospec=some_object` to use an + arbitrary object as the spec instead of the one being replaced. + + By default `patch` will fail to replace attributes that don't exist. If + you pass in `create=True`, and the attribute doesn't exist, patch will + create the attribute for you when the patched function is called, and + delete it again afterwards. This is useful for writing tests against + attributes that your production code creates at runtime. It is off by + default because it can be dangerous. With it switched on you can write + passing tests against APIs that don't actually exist! + + Patch can be used as a `TestCase` class decorator. It works by + decorating each test method in the class. This reduces the boilerplate + code when your test methods share a common patchings set. `patch` finds + tests by looking for method names that start with `patch.TEST_PREFIX`. + By default this is `test`, which matches the way `unittest` finds tests. + You can specify an alternative prefix by setting `patch.TEST_PREFIX`. + + Patch can be used as a context manager, with the with statement. Here the + patching applies to the indented block after the with statement. If you + use "as" then the patched object will be bound to the name after the + "as"; very useful if `patch` is creating a mock object for you. + + Patch will raise a `RuntimeError` if passed some common misspellings of + the arguments autospec and spec_set. Pass the argument `unsafe` with the + value True to disable that check. + + `patch` takes arbitrary keyword arguments. These will be passed to + `AsyncMock` if the patched object is asynchronous, to `MagicMock` + otherwise or to `new_callable` if specified. + + `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are + available for alternate use-cases. + """ + getter, attribute = _get_target(target) + return _patch( + getter, attribute, new, spec, create, + spec_set, autospec, new_callable, kwargs, unsafe=unsafe + ) + + +class _patch_dict(object): + """ + Patch a dictionary, or dictionary like object, and restore the dictionary + to its original state after the test, where the restored dictionary is + a copy of the dictionary as it was before the test. + + `in_dict` can be a dictionary or a mapping like container. If it is a + mapping then it must at least support getting, setting and deleting items + plus iterating over keys. + + `in_dict` can also be a string specifying the name of the dictionary, which + will then be fetched by importing it. + + `values` can be a dictionary of values to set in the dictionary. `values` + can also be an iterable of `(key, value)` pairs. + + If `clear` is True then the dictionary will be cleared before the new + values are set. + + `patch.dict` can also be called with arbitrary keyword arguments to set + values in the dictionary:: + + with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()): + ... + + `patch.dict` can be used as a context manager, decorator or class + decorator. When used as a class decorator `patch.dict` honours + `patch.TEST_PREFIX` for choosing which methods to wrap. + """ + + def __init__(self, in_dict, values=(), clear=False, **kwargs): + self.in_dict = in_dict + # support any argument supported by dict(...) constructor + self.values = dict(values) + self.values.update(kwargs) + self.clear = clear + self._original = None + + + def __call__(self, f): + if isinstance(f, type): + return self.decorate_class(f) + if inspect.iscoroutinefunction(f): + return self.decorate_async_callable(f) + return self.decorate_callable(f) + + + def decorate_callable(self, f): + @wraps(f) + def _inner(*args, **kw): + self._patch_dict() + try: + return f(*args, **kw) + finally: + self._unpatch_dict() + + return _inner + + + def decorate_async_callable(self, f): + @wraps(f) + async def _inner(*args, **kw): + self._patch_dict() + try: + return await f(*args, **kw) + finally: + self._unpatch_dict() + + return _inner + + + def decorate_class(self, klass): + for attr in dir(klass): + attr_value = getattr(klass, attr) + if (attr.startswith(patch.TEST_PREFIX) and + hasattr(attr_value, "__call__")): + decorator = _patch_dict(self.in_dict, self.values, self.clear) + decorated = decorator(attr_value) + setattr(klass, attr, decorated) + return klass + + + def __enter__(self): + """Patch the dict.""" + self._patch_dict() + return self.in_dict + + + def _patch_dict(self): + values = self.values + if isinstance(self.in_dict, str): + self.in_dict = pkgutil.resolve_name(self.in_dict) + in_dict = self.in_dict + clear = self.clear + + try: + original = in_dict.copy() + except AttributeError: + # dict like object with no copy method + # must support iteration over keys + original = {} + for key in in_dict: + original[key] = in_dict[key] + self._original = original + + if clear: + _clear_dict(in_dict) + + try: + in_dict.update(values) + except AttributeError: + # dict like object with no update method + for key in values: + in_dict[key] = values[key] + + + def _unpatch_dict(self): + in_dict = self.in_dict + original = self._original + + _clear_dict(in_dict) + + try: + in_dict.update(original) + except AttributeError: + for key in original: + in_dict[key] = original[key] + + + def __exit__(self, *args): + """Unpatch the dict.""" + if self._original is not None: + self._unpatch_dict() + return False + + + def start(self): + """Activate a patch, returning any created mock.""" + result = self.__enter__() + _patch._active_patches.append(self) + return result + + + def stop(self): + """Stop an active patch.""" + try: + _patch._active_patches.remove(self) + except ValueError: + # If the patch hasn't been started this will fail + return None + + return self.__exit__(None, None, None) + + +def _clear_dict(in_dict): + try: + in_dict.clear() + except AttributeError: + keys = list(in_dict) + for key in keys: + del in_dict[key] + + +def _patch_stopall(): + """Stop all active patches. LIFO to unroll nested patches.""" + for patch in reversed(_patch._active_patches): + patch.stop() + + +patch.object = _patch_object +patch.dict = _patch_dict +patch.multiple = _patch_multiple +patch.stopall = _patch_stopall +patch.TEST_PREFIX = 'test' + +magic_methods = ( + "lt le gt ge eq ne " + "getitem setitem delitem " + "len contains iter " + "hash str sizeof " + "enter exit " + # we added divmod and rdivmod here instead of numerics + # because there is no idivmod + "divmod rdivmod neg pos abs invert " + "complex int float index " + "round trunc floor ceil " + "bool next " + "fspath " + "aiter " +) + +numerics = ( + "add sub mul matmul truediv floordiv mod lshift rshift and xor or pow" +) +inplace = ' '.join('i%s' % n for n in numerics.split()) +right = ' '.join('r%s' % n for n in numerics.split()) + +# not including __prepare__, __instancecheck__, __subclasscheck__ +# (as they are metaclass methods) +# __del__ is not supported at all as it causes problems if it exists + +_non_defaults = { + '__get__', '__set__', '__delete__', '__reversed__', '__missing__', + '__reduce__', '__reduce_ex__', '__getinitargs__', '__getnewargs__', + '__getstate__', '__setstate__', '__getformat__', + '__repr__', '__dir__', '__subclasses__', '__format__', + '__getnewargs_ex__', +} + + +def _get_method(name, func): + "Turns a callable object (like a mock) into a real function" + def method(self, /, *args, **kw): + return func(self, *args, **kw) + method.__name__ = name + return method + + +_magics = { + '__%s__' % method for method in + ' '.join([magic_methods, numerics, inplace, right]).split() +} + +# Magic methods used for async `with` statements +_async_method_magics = {"__aenter__", "__aexit__", "__anext__"} +# Magic methods that are only used with async calls but are synchronous functions themselves +_sync_async_magics = {"__aiter__"} +_async_magics = _async_method_magics | _sync_async_magics + +_all_sync_magics = _magics | _non_defaults +_all_magics = _all_sync_magics | _async_magics + +_unsupported_magics = { + '__getattr__', '__setattr__', + '__init__', '__new__', '__prepare__', + '__instancecheck__', '__subclasscheck__', + '__del__' +} + +_calculate_return_value = { + '__hash__': lambda self: object.__hash__(self), + '__str__': lambda self: object.__str__(self), + '__sizeof__': lambda self: object.__sizeof__(self), + '__fspath__': lambda self: f"{type(self).__name__}/{self._extract_mock_name()}/{id(self)}", +} + +_return_values = { + '__lt__': NotImplemented, + '__gt__': NotImplemented, + '__le__': NotImplemented, + '__ge__': NotImplemented, + '__int__': 1, + '__contains__': False, + '__len__': 0, + '__exit__': False, + '__complex__': 1j, + '__float__': 1.0, + '__bool__': True, + '__index__': 1, + '__aexit__': False, +} + + +def _get_eq(self): + def __eq__(other): + ret_val = self.__eq__._mock_return_value + if ret_val is not DEFAULT: + return ret_val + if self is other: + return True + return NotImplemented + return __eq__ + +def _get_ne(self): + def __ne__(other): + if self.__ne__._mock_return_value is not DEFAULT: + return DEFAULT + if self is other: + return False + return NotImplemented + return __ne__ + +def _get_iter(self): + def __iter__(): + ret_val = self.__iter__._mock_return_value + if ret_val is DEFAULT: + return iter([]) + # if ret_val was already an iterator, then calling iter on it should + # return the iterator unchanged + return iter(ret_val) + return __iter__ + +def _get_async_iter(self): + def __aiter__(): + ret_val = self.__aiter__._mock_return_value + if ret_val is DEFAULT: + return _AsyncIterator(iter([])) + return _AsyncIterator(iter(ret_val)) + return __aiter__ + +_side_effect_methods = { + '__eq__': _get_eq, + '__ne__': _get_ne, + '__iter__': _get_iter, + '__aiter__': _get_async_iter +} + + + +def _set_return_value(mock, method, name): + fixed = _return_values.get(name, DEFAULT) + if fixed is not DEFAULT: + method.return_value = fixed + return + + return_calculator = _calculate_return_value.get(name) + if return_calculator is not None: + return_value = return_calculator(mock) + method.return_value = return_value + return + + side_effector = _side_effect_methods.get(name) + if side_effector is not None: + method.side_effect = side_effector(mock) + + + +class MagicMixin(Base): + def __init__(self, /, *args, **kw): + self._mock_set_magics() # make magic work for kwargs in init + _safe_super(MagicMixin, self).__init__(*args, **kw) + self._mock_set_magics() # fix magic broken by upper level init + + + def _mock_set_magics(self): + orig_magics = _magics | _async_method_magics + these_magics = orig_magics + + if getattr(self, "_mock_methods", None) is not None: + these_magics = orig_magics.intersection(self._mock_methods) + remove_magics = orig_magics - these_magics + + for entry in remove_magics: + if entry in type(self).__dict__: + # remove unneeded magic methods + delattr(self, entry) + + # don't overwrite existing attributes if called a second time + these_magics = these_magics - set(type(self).__dict__) + + _type = type(self) + for entry in these_magics: + setattr(_type, entry, MagicProxy(entry, self)) + + + +class NonCallableMagicMock(MagicMixin, NonCallableMock): + """A version of `MagicMock` that isn't callable.""" + def mock_add_spec(self, spec, spec_set=False): + """Add a spec to a mock. `spec` can either be an object or a + list of strings. Only attributes on the `spec` can be fetched as + attributes from the mock. + + If `spec_set` is True then only attributes on the spec can be set.""" + self._mock_add_spec(spec, spec_set) + self._mock_set_magics() + + +class AsyncMagicMixin(MagicMixin): + pass + + +class MagicMock(MagicMixin, Mock): + """ + MagicMock is a subclass of Mock with default implementations + of most of the magic methods. You can use MagicMock without having to + configure the magic methods yourself. + + If you use the `spec` or `spec_set` arguments then *only* magic + methods that exist in the spec will be created. + + Attributes and the return value of a `MagicMock` will also be `MagicMocks`. + """ + def mock_add_spec(self, spec, spec_set=False): + """Add a spec to a mock. `spec` can either be an object or a + list of strings. Only attributes on the `spec` can be fetched as + attributes from the mock. + + If `spec_set` is True then only attributes on the spec can be set.""" + self._mock_add_spec(spec, spec_set) + self._mock_set_magics() + + def reset_mock(self, /, *args, return_value: bool = False, **kwargs): + if ( + return_value + and self._mock_name + and _is_magic(self._mock_name) + ): + # Don't reset return values for magic methods, + # otherwise `m.__str__` will start + # to return `MagicMock` instances, instead of `str` instances. + return_value = False + super().reset_mock(*args, return_value=return_value, **kwargs) + + +class MagicProxy(Base): + def __init__(self, name, parent): + self.name = name + self.parent = parent + + def create_mock(self): + entry = self.name + parent = self.parent + m = parent._get_child_mock(name=entry, _new_name=entry, + _new_parent=parent) + setattr(parent, entry, m) + _set_return_value(parent, m, entry) + return m + + def __get__(self, obj, _type=None): + return self.create_mock() + + +try: + _CODE_SIG = inspect.signature(partial(CodeType.__init__, None)) + _CODE_ATTRS = dir(CodeType) +except ValueError: + _CODE_SIG = None + + +class AsyncMockMixin(Base): + await_count = _delegating_property('await_count') + await_args = _delegating_property('await_args') + await_args_list = _delegating_property('await_args_list') + + def __init__(self, /, *args, **kwargs): + super().__init__(*args, **kwargs) + # iscoroutinefunction() checks _is_coroutine property to say if an + # object is a coroutine. Without this check it looks to see if it is a + # function/method, which in this case it is not (since it is an + # AsyncMock). + # It is set through __dict__ because when spec_set is True, this + # attribute is likely undefined. + self.__dict__['_is_coroutine'] = asyncio.coroutines._is_coroutine + self.__dict__['_mock_await_count'] = 0 + self.__dict__['_mock_await_args'] = None + self.__dict__['_mock_await_args_list'] = _CallList() + if _CODE_SIG: + code_mock = NonCallableMock(spec_set=_CODE_ATTRS) + code_mock.__dict__["_spec_class"] = CodeType + code_mock.__dict__["_spec_signature"] = _CODE_SIG + else: + code_mock = NonCallableMock(spec_set=CodeType) + code_mock.co_flags = ( + inspect.CO_COROUTINE + + inspect.CO_VARARGS + + inspect.CO_VARKEYWORDS + ) + code_mock.co_argcount = 0 + code_mock.co_varnames = ('args', 'kwargs') + code_mock.co_posonlyargcount = 0 + code_mock.co_kwonlyargcount = 0 + self.__dict__['__code__'] = code_mock + self.__dict__['__name__'] = 'AsyncMock' + self.__dict__['__defaults__'] = tuple() + self.__dict__['__kwdefaults__'] = {} + self.__dict__['__annotations__'] = None + + async def _execute_mock_call(self, /, *args, **kwargs): + # This is nearly just like super(), except for special handling + # of coroutines + + _call = _Call((args, kwargs), two=True) + self.await_count += 1 + self.await_args = _call + self.await_args_list.append(_call) + + effect = self.side_effect + if effect is not None: + if _is_exception(effect): + raise effect + elif not _callable(effect): + try: + result = next(effect) + except StopIteration: + # It is impossible to propagate a StopIteration + # through coroutines because of PEP 479 + raise StopAsyncIteration + if _is_exception(result): + raise result + elif iscoroutinefunction(effect): + result = await effect(*args, **kwargs) + else: + result = effect(*args, **kwargs) + + if result is not DEFAULT: + return result + + if self._mock_return_value is not DEFAULT: + return self.return_value + + if self._mock_wraps is not None: + if iscoroutinefunction(self._mock_wraps): + return await self._mock_wraps(*args, **kwargs) + return self._mock_wraps(*args, **kwargs) + + return self.return_value + + def assert_awaited(self): + """ + Assert that the mock was awaited at least once. + """ + if self.await_count == 0: + msg = f"Expected {self._mock_name or 'mock'} to have been awaited." + raise AssertionError(msg) + + def assert_awaited_once(self): + """ + Assert that the mock was awaited exactly once. + """ + if not self.await_count == 1: + msg = (f"Expected {self._mock_name or 'mock'} to have been awaited once." + f" Awaited {self.await_count} times.") + raise AssertionError(msg) + + def assert_awaited_with(self, /, *args, **kwargs): + """ + Assert that the last await was with the specified arguments. + """ + if self.await_args is None: + expected = self._format_mock_call_signature(args, kwargs) + raise AssertionError(f'Expected await: {expected}\nNot awaited') + + def _error_message(): + msg = self._format_mock_failure_message(args, kwargs, action='await') + return msg + + expected = self._call_matcher(_Call((args, kwargs), two=True)) + actual = self._call_matcher(self.await_args) + if actual != expected: + cause = expected if isinstance(expected, Exception) else None + raise AssertionError(_error_message()) from cause + + def assert_awaited_once_with(self, /, *args, **kwargs): + """ + Assert that the mock was awaited exactly once and with the specified + arguments. + """ + if not self.await_count == 1: + msg = (f"Expected {self._mock_name or 'mock'} to have been awaited once." + f" Awaited {self.await_count} times.") + raise AssertionError(msg) + return self.assert_awaited_with(*args, **kwargs) + + def assert_any_await(self, /, *args, **kwargs): + """ + Assert the mock has ever been awaited with the specified arguments. + """ + expected = self._call_matcher(_Call((args, kwargs), two=True)) + cause = expected if isinstance(expected, Exception) else None + actual = [self._call_matcher(c) for c in self.await_args_list] + if cause or expected not in _AnyComparer(actual): + expected_string = self._format_mock_call_signature(args, kwargs) + raise AssertionError( + '%s await not found' % expected_string + ) from cause + + def assert_has_awaits(self, calls, any_order=False): + """ + Assert the mock has been awaited with the specified calls. + The :attr:`await_args_list` list is checked for the awaits. + + If `any_order` is False (the default) then the awaits must be + sequential. There can be extra calls before or after the + specified awaits. + + If `any_order` is True then the awaits can be in any order, but + they must all appear in :attr:`await_args_list`. + """ + expected = [self._call_matcher(c) for c in calls] + cause = next((e for e in expected if isinstance(e, Exception)), None) + all_awaits = _CallList(self._call_matcher(c) for c in self.await_args_list) + if not any_order: + if expected not in all_awaits: + if cause is None: + problem = 'Awaits not found.' + else: + problem = ('Error processing expected awaits.\n' + 'Errors: {}').format( + [e if isinstance(e, Exception) else None + for e in expected]) + raise AssertionError( + f'{problem}\n' + f'Expected: {_CallList(calls)}\n' + f'Actual: {self.await_args_list}' + ) from cause + return + + all_awaits = list(all_awaits) + + not_found = [] + for kall in expected: + try: + all_awaits.remove(kall) + except ValueError: + not_found.append(kall) + if not_found: + raise AssertionError( + '%r not all found in await list' % (tuple(not_found),) + ) from cause + + def assert_not_awaited(self): + """ + Assert that the mock was never awaited. + """ + if self.await_count != 0: + msg = (f"Expected {self._mock_name or 'mock'} to not have been awaited." + f" Awaited {self.await_count} times.") + raise AssertionError(msg) + + def reset_mock(self, /, *args, **kwargs): + """ + See :func:`.Mock.reset_mock()` + """ + super().reset_mock(*args, **kwargs) + self.await_count = 0 + self.await_args = None + self.await_args_list = _CallList() + + +class AsyncMock(AsyncMockMixin, AsyncMagicMixin, Mock): + """ + Enhance :class:`Mock` with features allowing to mock + an async function. + + The :class:`AsyncMock` object will behave so the object is + recognized as an async function, and the result of a call is an awaitable: + + >>> mock = AsyncMock() + >>> inspect.iscoroutinefunction(mock) + True + >>> inspect.isawaitable(mock()) + True + + + The result of ``mock()`` is an async function which will have the outcome + of ``side_effect`` or ``return_value``: + + - if ``side_effect`` is a function, the async function will return the + result of that function, + - if ``side_effect`` is an exception, the async function will raise the + exception, + - if ``side_effect`` is an iterable, the async function will return the + next value of the iterable, however, if the sequence of result is + exhausted, ``StopIteration`` is raised immediately, + - if ``side_effect`` is not defined, the async function will return the + value defined by ``return_value``, hence, by default, the async function + returns a new :class:`AsyncMock` object. + + If the outcome of ``side_effect`` or ``return_value`` is an async function, + the mock async function obtained when the mock object is called will be this + async function itself (and not an async function returning an async + function). + + The test author can also specify a wrapped object with ``wraps``. In this + case, the :class:`Mock` object behavior is the same as with an + :class:`.Mock` object: the wrapped object may have methods + defined as async function functions. + + Based on Martin Richard's asynctest project. + """ + + +class _ANY(object): + "A helper object that compares equal to everything." + + def __eq__(self, other): + return True + + def __ne__(self, other): + return False + + def __repr__(self): + return '' + +ANY = _ANY() + + + +def _format_call_signature(name, args, kwargs): + message = '%s(%%s)' % name + formatted_args = '' + args_string = ', '.join([repr(arg) for arg in args]) + kwargs_string = ', '.join([ + '%s=%r' % (key, value) for key, value in kwargs.items() + ]) + if args_string: + formatted_args = args_string + if kwargs_string: + if formatted_args: + formatted_args += ', ' + formatted_args += kwargs_string + + return message % formatted_args + + + +class _Call(tuple): + """ + A tuple for holding the results of a call to a mock, either in the form + `(args, kwargs)` or `(name, args, kwargs)`. + + If args or kwargs are empty then a call tuple will compare equal to + a tuple without those values. This makes comparisons less verbose:: + + _Call(('name', (), {})) == ('name',) + _Call(('name', (1,), {})) == ('name', (1,)) + _Call(((), {'a': 'b'})) == ({'a': 'b'},) + + The `_Call` object provides a useful shortcut for comparing with call:: + + _Call(((1, 2), {'a': 3})) == call(1, 2, a=3) + _Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3) + + If the _Call has no name then it will match any name. + """ + def __new__(cls, value=(), name='', parent=None, two=False, + from_kall=True): + args = () + kwargs = {} + _len = len(value) + if _len == 3: + name, args, kwargs = value + elif _len == 2: + first, second = value + if isinstance(first, str): + name = first + if isinstance(second, tuple): + args = second + else: + kwargs = second + else: + args, kwargs = first, second + elif _len == 1: + value, = value + if isinstance(value, str): + name = value + elif isinstance(value, tuple): + args = value + else: + kwargs = value + + if two: + return tuple.__new__(cls, (args, kwargs)) + + return tuple.__new__(cls, (name, args, kwargs)) + + + def __init__(self, value=(), name=None, parent=None, two=False, + from_kall=True): + self._mock_name = name + self._mock_parent = parent + self._mock_from_kall = from_kall + + + def __eq__(self, other): + try: + len_other = len(other) + except TypeError: + return NotImplemented + + self_name = '' + if len(self) == 2: + self_args, self_kwargs = self + else: + self_name, self_args, self_kwargs = self + + if (getattr(self, '_mock_parent', None) and getattr(other, '_mock_parent', None) + and self._mock_parent != other._mock_parent): + return False + + other_name = '' + if len_other == 0: + other_args, other_kwargs = (), {} + elif len_other == 3: + other_name, other_args, other_kwargs = other + elif len_other == 1: + value, = other + if isinstance(value, tuple): + other_args = value + other_kwargs = {} + elif isinstance(value, str): + other_name = value + other_args, other_kwargs = (), {} + else: + other_args = () + other_kwargs = value + elif len_other == 2: + # could be (name, args) or (name, kwargs) or (args, kwargs) + first, second = other + if isinstance(first, str): + other_name = first + if isinstance(second, tuple): + other_args, other_kwargs = second, {} + else: + other_args, other_kwargs = (), second + else: + other_args, other_kwargs = first, second + else: + return False + + if self_name and other_name != self_name: + return False + + # this order is important for ANY to work! + return (other_args, other_kwargs) == (self_args, self_kwargs) + + + __ne__ = object.__ne__ + + + def __call__(self, /, *args, **kwargs): + if self._mock_name is None: + return _Call(('', args, kwargs), name='()') + + name = self._mock_name + '()' + return _Call((self._mock_name, args, kwargs), name=name, parent=self) + + + def __getattr__(self, attr): + if self._mock_name is None: + return _Call(name=attr, from_kall=False) + name = '%s.%s' % (self._mock_name, attr) + return _Call(name=name, parent=self, from_kall=False) + + + def __getattribute__(self, attr): + if attr in tuple.__dict__: + raise AttributeError + return tuple.__getattribute__(self, attr) + + + def _get_call_arguments(self): + if len(self) == 2: + args, kwargs = self + else: + name, args, kwargs = self + + return args, kwargs + + @property + def args(self): + return self._get_call_arguments()[0] + + @property + def kwargs(self): + return self._get_call_arguments()[1] + + def __repr__(self): + if not self._mock_from_kall: + name = self._mock_name or 'call' + if name.startswith('()'): + name = 'call%s' % name + return name + + if len(self) == 2: + name = 'call' + args, kwargs = self + else: + name, args, kwargs = self + if not name: + name = 'call' + elif not name.startswith('()'): + name = 'call.%s' % name + else: + name = 'call%s' % name + return _format_call_signature(name, args, kwargs) + + + def call_list(self): + """For a call object that represents multiple calls, `call_list` + returns a list of all the intermediate calls as well as the + final call.""" + vals = [] + thing = self + while thing is not None: + if thing._mock_from_kall: + vals.append(thing) + thing = thing._mock_parent + return _CallList(reversed(vals)) + + +call = _Call(from_kall=False) + + +def create_autospec(spec, spec_set=False, instance=False, _parent=None, + _name=None, *, unsafe=False, **kwargs): + """Create a mock object using another object as a spec. Attributes on the + mock will use the corresponding attribute on the `spec` object as their + spec. + + Functions or methods being mocked will have their arguments checked + to check that they are called with the correct signature. + + If `spec_set` is True then attempting to set attributes that don't exist + on the spec object will raise an `AttributeError`. + + If a class is used as a spec then the return value of the mock (the + instance of the class) will have the same spec. You can use a class as the + spec for an instance object by passing `instance=True`. The returned mock + will only be callable if instances of the mock are callable. + + `create_autospec` will raise a `RuntimeError` if passed some common + misspellings of the arguments autospec and spec_set. Pass the argument + `unsafe` with the value True to disable that check. + + `create_autospec` also takes arbitrary keyword arguments that are passed to + the constructor of the created mock.""" + if _is_list(spec): + # can't pass a list instance to the mock constructor as it will be + # interpreted as a list of strings + spec = type(spec) + + is_type = isinstance(spec, type) + if _is_instance_mock(spec): + raise InvalidSpecError(f'Cannot autospec a Mock object. ' + f'[object={spec!r}]') + is_async_func = _is_async_func(spec) + _kwargs = {'spec': spec} + + entries = [(entry, _missing) for entry in dir(spec)] + if is_type and instance and is_dataclass(spec): + is_dataclass_spec = True + dataclass_fields = fields(spec) + entries.extend((f.name, f.type) for f in dataclass_fields) + dataclass_spec_list = [f.name for f in dataclass_fields] + else: + is_dataclass_spec = False + + if spec_set: + _kwargs = {'spec_set': spec} + elif spec is None: + # None we mock with a normal mock without a spec + _kwargs = {} + if _kwargs and instance: + _kwargs['_spec_as_instance'] = True + if not unsafe: + _check_spec_arg_typos(kwargs) + + _name = kwargs.pop('name', _name) + _new_name = _name + if _parent is None: + # for a top level object no _new_name should be set + _new_name = '' + + _kwargs.update(kwargs) + + Klass = MagicMock + if inspect.isdatadescriptor(spec): + # descriptors don't have a spec + # because we don't know what type they return + _kwargs = {} + elif is_async_func: + if instance: + raise RuntimeError("Instance can not be True when create_autospec " + "is mocking an async function") + Klass = AsyncMock + elif not _callable(spec): + Klass = NonCallableMagicMock + elif is_type and instance and not _instance_callable(spec): + Klass = NonCallableMagicMock + + mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name, + name=_name, **_kwargs) + if is_dataclass_spec: + mock._mock_extend_spec_methods(dataclass_spec_list) + + if isinstance(spec, FunctionTypes): + # should only happen at the top level because we don't + # recurse for functions + if is_async_func: + mock = _set_async_signature(mock, spec) + else: + mock = _set_signature(mock, spec) + else: + _check_signature(spec, mock, is_type, instance) + + if _parent is not None and not instance: + _parent._mock_children[_name] = mock + + # Pop wraps from kwargs because it must not be passed to configure_mock. + wrapped = kwargs.pop('wraps', None) + if is_type and not instance and 'return_value' not in kwargs: + mock.return_value = create_autospec(spec, spec_set, instance=True, + _name='()', _parent=mock, + wraps=wrapped) + + for entry, original in entries: + if _is_magic(entry): + # MagicMock already does the useful magic methods for us + continue + + # XXXX do we need a better way of getting attributes without + # triggering code execution (?) Probably not - we need the actual + # object to mock it so we would rather trigger a property than mock + # the property descriptor. Likewise we want to mock out dynamically + # provided attributes. + # XXXX what about attributes that raise exceptions other than + # AttributeError on being fetched? + # we could be resilient against it, or catch and propagate the + # exception when the attribute is fetched from the mock + if original is _missing: + try: + original = getattr(spec, entry) + except AttributeError: + continue + + child_kwargs = {'spec': original} + # Wrap child attributes also. + if wrapped and hasattr(wrapped, entry): + child_kwargs.update(wraps=original) + if spec_set: + child_kwargs = {'spec_set': original} + + if not isinstance(original, FunctionTypes): + new = _SpecState(original, spec_set, mock, entry, instance) + mock._mock_children[entry] = new + else: + parent = mock + if isinstance(spec, FunctionTypes): + parent = mock.mock + + skipfirst = _must_skip(spec, entry, is_type) + child_kwargs['_eat_self'] = skipfirst + if iscoroutinefunction(original): + child_klass = AsyncMock + else: + child_klass = MagicMock + new = child_klass(parent=parent, name=entry, _new_name=entry, + _new_parent=parent, **child_kwargs) + mock._mock_children[entry] = new + new.return_value = child_klass() + _check_signature(original, new, skipfirst=skipfirst) + + # so functions created with _set_signature become instance attributes, + # *plus* their underlying mock exists in _mock_children of the parent + # mock. Adding to _mock_children may be unnecessary where we are also + # setting as an instance attribute? + if isinstance(new, FunctionTypes): + setattr(mock, entry, new) + # kwargs are passed with respect to the parent mock so, they are not used + # for creating return_value of the parent mock. So, this condition + # should be true only for the parent mock if kwargs are given. + if _is_instance_mock(mock) and kwargs: + mock.configure_mock(**kwargs) + + return mock + + +def _must_skip(spec, entry, is_type): + """ + Return whether we should skip the first argument on spec's `entry` + attribute. + """ + if not isinstance(spec, type): + if entry in getattr(spec, '__dict__', {}): + # instance attribute - shouldn't skip + return False + spec = spec.__class__ + + for klass in spec.__mro__: + result = klass.__dict__.get(entry, DEFAULT) + if result is DEFAULT: + continue + if isinstance(result, (staticmethod, classmethod)): + return False + elif isinstance(result, FunctionTypes): + # Normal method => skip if looked up on type + # (if looked up on instance, self is already skipped) + return is_type + else: + return False + + # function is a dynamically provided attribute + return is_type + + +class _SpecState(object): + + def __init__(self, spec, spec_set=False, parent=None, + name=None, ids=None, instance=False): + self.spec = spec + self.ids = ids + self.spec_set = spec_set + self.parent = parent + self.instance = instance + self.name = name + + +FunctionTypes = ( + # python function + type(create_autospec), + # instance method + type(ANY.__eq__), +) + + +file_spec = None +open_spec = None + + +def _to_stream(read_data): + if isinstance(read_data, bytes): + return io.BytesIO(read_data) + else: + return io.StringIO(read_data) + + +def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read`, `readline` and `readlines` of the + file handle to return. This is an empty string by default. + """ + _read_data = _to_stream(read_data) + _state = [_read_data, None] + + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return _state[0].readlines(*args, **kwargs) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return _state[0].read(*args, **kwargs) + + def _readline_side_effect(*args, **kwargs): + yield from _iter_side_effect() + while True: + yield _state[0].readline(*args, **kwargs) + + def _iter_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _state[0]: + yield line + + def _next_side_effect(): + if handle.readline.return_value is not None: + return handle.readline.return_value + return next(_state[0]) + + def _exit_side_effect(exctype, excinst, exctb): + handle.close() + + global file_spec + if file_spec is None: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + global open_spec + if open_spec is None: + import _io + open_spec = list(set(dir(_io.open))) + if mock is None: + mock = MagicMock(name='open', spec=open_spec) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + _state[1] = _readline_side_effect() + handle.readline.side_effect = _state[1] + handle.readlines.side_effect = _readlines_side_effect + handle.__iter__.side_effect = _iter_side_effect + handle.__next__.side_effect = _next_side_effect + handle.__exit__.side_effect = _exit_side_effect + + def reset_data(*args, **kwargs): + _state[0] = _to_stream(read_data) + if handle.readline.side_effect == _state[1]: + # Only reset the side effect if the user hasn't overridden it. + _state[1] = _readline_side_effect() + handle.readline.side_effect = _state[1] + return DEFAULT + + mock.side_effect = reset_data + mock.return_value = handle + return mock + + +class PropertyMock(Mock): + """ + A mock intended to be used as a property, or other descriptor, on a class. + `PropertyMock` provides `__get__` and `__set__` methods so you can specify + a return value when it is fetched. + + Fetching a `PropertyMock` instance from an object calls the mock, with + no args. Setting it calls the mock with the value being set. + """ + def _get_child_mock(self, /, **kwargs): + return MagicMock(**kwargs) + + def __get__(self, obj, obj_type=None): + return self() + def __set__(self, obj, val): + self(val) + + +_timeout_unset = sentinel.TIMEOUT_UNSET + +class ThreadingMixin(Base): + + DEFAULT_TIMEOUT = None + + def _get_child_mock(self, /, **kw): + if isinstance(kw.get("parent"), ThreadingMixin): + kw["timeout"] = kw["parent"]._mock_wait_timeout + elif isinstance(kw.get("_new_parent"), ThreadingMixin): + kw["timeout"] = kw["_new_parent"]._mock_wait_timeout + return super()._get_child_mock(**kw) + + def __init__(self, *args, timeout=_timeout_unset, **kwargs): + super().__init__(*args, **kwargs) + if timeout is _timeout_unset: + timeout = self.DEFAULT_TIMEOUT + self.__dict__["_mock_event"] = threading.Event() # Event for any call + self.__dict__["_mock_calls_events"] = [] # Events for each of the calls + self.__dict__["_mock_calls_events_lock"] = threading.Lock() + self.__dict__["_mock_wait_timeout"] = timeout + + def reset_mock(self, /, *args, **kwargs): + """ + See :func:`.Mock.reset_mock()` + """ + super().reset_mock(*args, **kwargs) + self.__dict__["_mock_event"] = threading.Event() + self.__dict__["_mock_calls_events"] = [] + + def __get_event(self, expected_args, expected_kwargs): + with self._mock_calls_events_lock: + for args, kwargs, event in self._mock_calls_events: + if (args, kwargs) == (expected_args, expected_kwargs): + return event + new_event = threading.Event() + self._mock_calls_events.append((expected_args, expected_kwargs, new_event)) + return new_event + + def _mock_call(self, *args, **kwargs): + ret_value = super()._mock_call(*args, **kwargs) + + call_event = self.__get_event(args, kwargs) + call_event.set() + + self._mock_event.set() + + return ret_value + + def wait_until_called(self, *, timeout=_timeout_unset): + """Wait until the mock object is called. + + `timeout` - time to wait for in seconds, waits forever otherwise. + Defaults to the constructor provided timeout. + Use None to block undefinetively. + """ + if timeout is _timeout_unset: + timeout = self._mock_wait_timeout + if not self._mock_event.wait(timeout=timeout): + msg = (f"{self._mock_name or 'mock'} was not called before" + f" timeout({timeout}).") + raise AssertionError(msg) + + def wait_until_any_call_with(self, *args, **kwargs): + """Wait until the mock object is called with given args. + + Waits for the timeout in seconds provided in the constructor. + """ + event = self.__get_event(args, kwargs) + if not event.wait(timeout=self._mock_wait_timeout): + expected_string = self._format_mock_call_signature(args, kwargs) + raise AssertionError(f'{expected_string} call not found') + + +class ThreadingMock(ThreadingMixin, MagicMixin, Mock): + """ + A mock that can be used to wait until on calls happening + in a different thread. + + The constructor can take a `timeout` argument which + controls the timeout in seconds for all `wait` calls of the mock. + + You can change the default timeout of all instances via the + `ThreadingMock.DEFAULT_TIMEOUT` attribute. + + If no timeout is set, it will block undefinetively. + """ + pass + + +def seal(mock): + """Disable the automatic generation of child mocks. + + Given an input Mock, seals it to ensure no further mocks will be generated + when accessing an attribute that was not already defined. + + The operation recursively seals the mock passed in, meaning that + the mock itself, any mocks generated by accessing one of its attributes, + and all assigned mocks without a name or spec will be sealed. + """ + mock._mock_sealed = True + for attr in dir(mock): + try: + m = getattr(mock, attr) + except AttributeError: + continue + if not isinstance(m, NonCallableMock): + continue + if isinstance(m._mock_children.get(attr), _SpecState): + continue + if m._mock_new_parent is mock: + seal(m) + + +class _AsyncIterator: + """ + Wraps an iterator in an asynchronous iterator. + """ + def __init__(self, iterator): + self.iterator = iterator + code_mock = NonCallableMock(spec_set=CodeType) + code_mock.co_flags = inspect.CO_ITERABLE_COROUTINE + self.__dict__['__code__'] = code_mock + + async def __anext__(self): + try: + return next(self.iterator) + except StopIteration: + pass + raise StopAsyncIteration diff --git a/Python314_4_x86_Template/Lib/unittest/result.py b/Python314_4_x86_Template/Lib/unittest/result.py new file mode 100644 index 00000000..b8ea396d --- /dev/null +++ b/Python314_4_x86_Template/Lib/unittest/result.py @@ -0,0 +1,259 @@ +"""Test result object""" + +import io +import sys +import traceback + +from . import util +from functools import wraps + +__unittest = True + +def failfast(method): + @wraps(method) + def inner(self, *args, **kw): + if getattr(self, 'failfast', False): + self.stop() + return method(self, *args, **kw) + return inner + +STDOUT_LINE = '\nStdout:\n%s' +STDERR_LINE = '\nStderr:\n%s' + + +class TestResult(object): + """Holder for test result information. + + Test results are automatically managed by the TestCase and TestSuite + classes, and do not need to be explicitly manipulated by writers of tests. + + Each instance holds the total number of tests run, and collections of + failures and errors that occurred among those test runs. The collections + contain tuples of (testcase, exceptioninfo), where exceptioninfo is the + formatted traceback of the error that occurred. + """ + _previousTestClass = None + _testRunEntered = False + _moduleSetUpFailed = False + def __init__(self, stream=None, descriptions=None, verbosity=None): + self.failfast = False + self.failures = [] + self.errors = [] + self.testsRun = 0 + self.skipped = [] + self.expectedFailures = [] + self.unexpectedSuccesses = [] + self.collectedDurations = [] + self.shouldStop = False + self.buffer = False + self.tb_locals = False + self._stdout_buffer = None + self._stderr_buffer = None + self._original_stdout = sys.stdout + self._original_stderr = sys.stderr + self._mirrorOutput = False + + def printErrors(self): + "Called by TestRunner after test run" + + def startTest(self, test): + "Called when the given test is about to be run" + self.testsRun += 1 + self._mirrorOutput = False + self._setupStdout() + + def _setupStdout(self): + if self.buffer: + if self._stderr_buffer is None: + self._stderr_buffer = io.StringIO() + self._stdout_buffer = io.StringIO() + sys.stdout = self._stdout_buffer + sys.stderr = self._stderr_buffer + + def startTestRun(self): + """Called once before any tests are executed. + + See startTest for a method called before each test. + """ + + def stopTest(self, test): + """Called when the given test has been run""" + self._restoreStdout() + self._mirrorOutput = False + + def _restoreStdout(self): + if self.buffer: + if self._mirrorOutput: + output = sys.stdout.getvalue() + error = sys.stderr.getvalue() + if output: + if not output.endswith('\n'): + output += '\n' + self._original_stdout.write(STDOUT_LINE % output) + if error: + if not error.endswith('\n'): + error += '\n' + self._original_stderr.write(STDERR_LINE % error) + + sys.stdout = self._original_stdout + sys.stderr = self._original_stderr + self._stdout_buffer.seek(0) + self._stdout_buffer.truncate() + self._stderr_buffer.seek(0) + self._stderr_buffer.truncate() + + def stopTestRun(self): + """Called once after all tests are executed. + + See stopTest for a method called after each test. + """ + + @failfast + def addError(self, test, err): + """Called when an error has occurred. 'err' is a tuple of values as + returned by sys.exc_info(). + """ + self.errors.append((test, self._exc_info_to_string(err, test))) + self._mirrorOutput = True + + @failfast + def addFailure(self, test, err): + """Called when an error has occurred. 'err' is a tuple of values as + returned by sys.exc_info().""" + self.failures.append((test, self._exc_info_to_string(err, test))) + self._mirrorOutput = True + + def addSubTest(self, test, subtest, err): + """Called at the end of a subtest. + 'err' is None if the subtest ended successfully, otherwise it's a + tuple of values as returned by sys.exc_info(). + """ + # By default, we don't do anything with successful subtests, but + # more sophisticated test results might want to record them. + if err is not None: + if getattr(self, 'failfast', False): + self.stop() + if issubclass(err[0], test.failureException): + errors = self.failures + else: + errors = self.errors + errors.append((subtest, self._exc_info_to_string(err, test))) + self._mirrorOutput = True + + def addSuccess(self, test): + "Called when a test has completed successfully" + pass + + def addSkip(self, test, reason): + """Called when a test is skipped.""" + self.skipped.append((test, reason)) + + def addExpectedFailure(self, test, err): + """Called when an expected failure/error occurred.""" + self.expectedFailures.append( + (test, self._exc_info_to_string(err, test))) + + @failfast + def addUnexpectedSuccess(self, test): + """Called when a test was expected to fail, but succeed.""" + self.unexpectedSuccesses.append(test) + + def addDuration(self, test, elapsed): + """Called when a test finished to run, regardless of its outcome. + *test* is the test case corresponding to the test method. + *elapsed* is the time represented in seconds, and it includes the + execution of cleanup functions. + """ + # support for a TextTestRunner using an old TestResult class + if hasattr(self, "collectedDurations"): + # Pass test repr and not the test object itself to avoid resources leak + self.collectedDurations.append((str(test), elapsed)) + + def wasSuccessful(self): + """Tells whether or not this result was a success.""" + # The hasattr check is for test_result's OldResult test. That + # way this method works on objects that lack the attribute. + # (where would such result instances come from? old stored pickles?) + return ((len(self.failures) == len(self.errors) == 0) and + (not hasattr(self, 'unexpectedSuccesses') or + len(self.unexpectedSuccesses) == 0)) + + def stop(self): + """Indicates that the tests should be aborted.""" + self.shouldStop = True + + def _exc_info_to_string(self, err, test): + """Converts a sys.exc_info()-style tuple of values into a string.""" + exctype, value, tb = err + tb = self._clean_tracebacks(exctype, value, tb, test) + tb_e = traceback.TracebackException( + exctype, value, tb, + capture_locals=self.tb_locals, compact=True) + from _colorize import can_colorize + + colorize = hasattr(self, "stream") and can_colorize(file=self.stream) + msgLines = list(tb_e.format(colorize=colorize)) + + if self.buffer: + output = sys.stdout.getvalue() + error = sys.stderr.getvalue() + if output: + if not output.endswith('\n'): + output += '\n' + msgLines.append(STDOUT_LINE % output) + if error: + if not error.endswith('\n'): + error += '\n' + msgLines.append(STDERR_LINE % error) + return ''.join(msgLines) + + def _clean_tracebacks(self, exctype, value, tb, test): + ret = None + first = True + excs = [(exctype, value, tb)] + seen = {id(value)} # Detect loops in chained exceptions. + while excs: + (exctype, value, tb) = excs.pop() + # Skip test runner traceback levels + while tb and self._is_relevant_tb_level(tb): + tb = tb.tb_next + + # Skip assert*() traceback levels + if exctype is test.failureException: + self._remove_unittest_tb_frames(tb) + + if first: + ret = tb + first = False + else: + value.__traceback__ = tb + + if value is not None: + for c in (value.__cause__, value.__context__): + if c is not None and id(c) not in seen: + excs.append((type(c), c, c.__traceback__)) + seen.add(id(c)) + return ret + + def _is_relevant_tb_level(self, tb): + return '__unittest' in tb.tb_frame.f_globals + + def _remove_unittest_tb_frames(self, tb): + '''Truncates usercode tb at the first unittest frame. + + If the first frame of the traceback is in user code, + the prefix up to the first unittest frame is returned. + If the first frame is already in the unittest module, + the traceback is not modified. + ''' + prev = None + while tb and not self._is_relevant_tb_level(tb): + prev = tb + tb = tb.tb_next + if prev is not None: + prev.tb_next = None + + def __repr__(self): + return ("<%s run=%i errors=%i failures=%i>" % + (util.strclass(self.__class__), self.testsRun, len(self.errors), + len(self.failures))) diff --git a/Python314_4_x86_Template/Lib/unittest/runner.py b/Python314_4_x86_Template/Lib/unittest/runner.py new file mode 100644 index 00000000..5f22d91a --- /dev/null +++ b/Python314_4_x86_Template/Lib/unittest/runner.py @@ -0,0 +1,313 @@ +"""Running tests""" + +import sys +import time +import warnings + +from _colorize import get_theme + +from . import result +from .case import _SubTest +from .signals import registerResult + +__unittest = True + + +class _WritelnDecorator(object): + """Used to decorate file-like objects with a handy 'writeln' method""" + def __init__(self, stream): + self.stream = stream + + def __getattr__(self, attr): + if attr in ('stream', '__getstate__'): + raise AttributeError(attr) + return getattr(self.stream, attr) + + def writeln(self, arg=None): + if arg: + self.write(arg) + self.write('\n') # text-mode streams translate to \r\n if needed + + +class TextTestResult(result.TestResult): + """A test result class that can print formatted text results to a stream. + + Used by TextTestRunner. + """ + separator1 = '=' * 70 + separator2 = '-' * 70 + + def __init__(self, stream, descriptions, verbosity, *, durations=None): + """Construct a TextTestResult. Subclasses should accept **kwargs + to ensure compatibility as the interface changes.""" + super(TextTestResult, self).__init__(stream, descriptions, verbosity) + self.stream = stream + self.showAll = verbosity > 1 + self.dots = verbosity == 1 + self.descriptions = descriptions + self._theme = get_theme(tty_file=stream).unittest + self._newline = True + self.durations = durations + + def getDescription(self, test): + doc_first_line = test.shortDescription() + if self.descriptions and doc_first_line: + return '\n'.join((str(test), doc_first_line)) + else: + return str(test) + + def startTest(self, test): + super(TextTestResult, self).startTest(test) + if self.showAll: + self.stream.write(self.getDescription(test)) + self.stream.write(" ... ") + self.stream.flush() + self._newline = False + + def _write_status(self, test, status): + is_subtest = isinstance(test, _SubTest) + if is_subtest or self._newline: + if not self._newline: + self.stream.writeln() + if is_subtest: + self.stream.write(" ") + self.stream.write(self.getDescription(test)) + self.stream.write(" ... ") + self.stream.writeln(status) + self.stream.flush() + self._newline = True + + def addSubTest(self, test, subtest, err): + if err is not None: + t = self._theme + if self.showAll: + if issubclass(err[0], subtest.failureException): + self._write_status(subtest, f"{t.fail}FAIL{t.reset}") + else: + self._write_status(subtest, f"{t.fail}ERROR{t.reset}") + elif self.dots: + if issubclass(err[0], subtest.failureException): + self.stream.write(f"{t.fail}F{t.reset}") + else: + self.stream.write(f"{t.fail}E{t.reset}") + self.stream.flush() + super(TextTestResult, self).addSubTest(test, subtest, err) + + def addSuccess(self, test): + super(TextTestResult, self).addSuccess(test) + t = self._theme + if self.showAll: + self._write_status(test, f"{t.passed}ok{t.reset}") + elif self.dots: + self.stream.write(f"{t.passed}.{t.reset}") + self.stream.flush() + + def addError(self, test, err): + super(TextTestResult, self).addError(test, err) + t = self._theme + if self.showAll: + self._write_status(test, f"{t.fail}ERROR{t.reset}") + elif self.dots: + self.stream.write(f"{t.fail}E{t.reset}") + self.stream.flush() + + def addFailure(self, test, err): + super(TextTestResult, self).addFailure(test, err) + t = self._theme + if self.showAll: + self._write_status(test, f"{t.fail}FAIL{t.reset}") + elif self.dots: + self.stream.write(f"{t.fail}F{t.reset}") + self.stream.flush() + + def addSkip(self, test, reason): + super(TextTestResult, self).addSkip(test, reason) + t = self._theme + if self.showAll: + self._write_status(test, f"{t.warn}skipped{t.reset} {reason!r}") + elif self.dots: + self.stream.write(f"{t.warn}s{t.reset}") + self.stream.flush() + + def addExpectedFailure(self, test, err): + super(TextTestResult, self).addExpectedFailure(test, err) + t = self._theme + if self.showAll: + self.stream.writeln(f"{t.warn}expected failure{t.reset}") + self.stream.flush() + elif self.dots: + self.stream.write(f"{t.warn}x{t.reset}") + self.stream.flush() + + def addUnexpectedSuccess(self, test): + super(TextTestResult, self).addUnexpectedSuccess(test) + t = self._theme + if self.showAll: + self.stream.writeln(f"{t.fail}unexpected success{t.reset}") + self.stream.flush() + elif self.dots: + self.stream.write(f"{t.fail}u{t.reset}") + self.stream.flush() + + def printErrors(self): + t = self._theme + if self.dots or self.showAll: + self.stream.writeln() + self.stream.flush() + self.printErrorList(f"{t.fail}ERROR{t.reset}", self.errors) + self.printErrorList(f"{t.fail}FAIL{t.reset}", self.failures) + unexpectedSuccesses = getattr(self, "unexpectedSuccesses", ()) + if unexpectedSuccesses: + self.stream.writeln(self.separator1) + for test in unexpectedSuccesses: + self.stream.writeln( + f"{t.fail}UNEXPECTED SUCCESS{t.fail_info}: " + f"{self.getDescription(test)}{t.reset}" + ) + self.stream.flush() + + def printErrorList(self, flavour, errors): + t = self._theme + for test, err in errors: + self.stream.writeln(self.separator1) + self.stream.writeln( + f"{flavour}{t.fail_info}: {self.getDescription(test)}{t.reset}" + ) + self.stream.writeln(self.separator2) + self.stream.writeln("%s" % err) + self.stream.flush() + + +class TextTestRunner(object): + """A test runner class that displays results in textual form. + + It prints out the names of tests as they are run, errors as they + occur, and a summary of the results at the end of the test run. + """ + resultclass = TextTestResult + + def __init__(self, stream=None, descriptions=True, verbosity=1, + failfast=False, buffer=False, resultclass=None, warnings=None, + *, tb_locals=False, durations=None): + """Construct a TextTestRunner. + + Subclasses should accept **kwargs to ensure compatibility as the + interface changes. + """ + if stream is None: + stream = sys.stderr + self.stream = _WritelnDecorator(stream) + self.descriptions = descriptions + self.verbosity = verbosity + self.failfast = failfast + self.buffer = buffer + self.tb_locals = tb_locals + self.durations = durations + self.warnings = warnings + if resultclass is not None: + self.resultclass = resultclass + + def _makeResult(self): + try: + return self.resultclass(self.stream, self.descriptions, + self.verbosity, durations=self.durations) + except TypeError: + # didn't accept the durations argument + return self.resultclass(self.stream, self.descriptions, + self.verbosity) + + def _printDurations(self, result): + if not result.collectedDurations: + return + ls = sorted(result.collectedDurations, key=lambda x: x[1], + reverse=True) + if self.durations > 0: + ls = ls[:self.durations] + self.stream.writeln("Slowest test durations") + if hasattr(result, 'separator2'): + self.stream.writeln(result.separator2) + hidden = False + for test, elapsed in ls: + if self.verbosity < 2 and elapsed < 0.001: + hidden = True + continue + self.stream.writeln("%-10s %s" % ("%.3fs" % elapsed, test)) + if hidden: + self.stream.writeln("\n(durations < 0.001s were hidden; " + "use -v to show these durations)") + else: + self.stream.writeln("") + + def run(self, test): + "Run the given test case or test suite." + result = self._makeResult() + registerResult(result) + result.failfast = self.failfast + result.buffer = self.buffer + result.tb_locals = self.tb_locals + with warnings.catch_warnings(): + if self.warnings: + # if self.warnings is set, use it to filter all the warnings + warnings.simplefilter(self.warnings) + start_time = time.perf_counter() + startTestRun = getattr(result, 'startTestRun', None) + if startTestRun is not None: + startTestRun() + try: + test(result) + finally: + stopTestRun = getattr(result, 'stopTestRun', None) + if stopTestRun is not None: + stopTestRun() + stop_time = time.perf_counter() + time_taken = stop_time - start_time + result.printErrors() + if self.durations is not None: + self._printDurations(result) + + if hasattr(result, 'separator2'): + self.stream.writeln(result.separator2) + + run = result.testsRun + self.stream.writeln("Ran %d test%s in %.3fs" % + (run, run != 1 and "s" or "", time_taken)) + self.stream.writeln() + + expected_fails = unexpected_successes = skipped = 0 + try: + results = map(len, (result.expectedFailures, + result.unexpectedSuccesses, + result.skipped)) + except AttributeError: + pass + else: + expected_fails, unexpected_successes, skipped = results + + infos = [] + t = get_theme(tty_file=self.stream).unittest + + if not result.wasSuccessful(): + self.stream.write(f"{t.fail_info}FAILED{t.reset}") + failed, errored = len(result.failures), len(result.errors) + if failed: + infos.append(f"{t.fail_info}failures={failed}{t.reset}") + if errored: + infos.append(f"{t.fail_info}errors={errored}{t.reset}") + elif run == 0 and not skipped: + self.stream.write(f"{t.warn}NO TESTS RAN{t.reset}") + else: + self.stream.write(f"{t.passed}OK{t.reset}") + if skipped: + infos.append(f"{t.warn}skipped={skipped}{t.reset}") + if expected_fails: + infos.append(f"{t.warn}expected failures={expected_fails}{t.reset}") + if unexpected_successes: + infos.append( + f"{t.fail}unexpected successes={unexpected_successes}{t.reset}" + ) + if infos: + self.stream.writeln(" (%s)" % (", ".join(infos),)) + else: + self.stream.write("\n") + self.stream.flush() + return result diff --git a/Python313_13_x86_Template/Lib/unittest/signals.py b/Python314_4_x86_Template/Lib/unittest/signals.py similarity index 100% rename from Python313_13_x86_Template/Lib/unittest/signals.py rename to Python314_4_x86_Template/Lib/unittest/signals.py diff --git a/Python313_13_x86_Template/Lib/unittest/suite.py b/Python314_4_x86_Template/Lib/unittest/suite.py similarity index 100% rename from Python313_13_x86_Template/Lib/unittest/suite.py rename to Python314_4_x86_Template/Lib/unittest/suite.py diff --git a/Python313_13_x86_Template/Lib/unittest/util.py b/Python314_4_x86_Template/Lib/unittest/util.py similarity index 100% rename from Python313_13_x86_Template/Lib/unittest/util.py rename to Python314_4_x86_Template/Lib/unittest/util.py diff --git a/Python314_4_x86_Template/Lib/urllib/__init__.py b/Python314_4_x86_Template/Lib/urllib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Python314_4_x86_Template/Lib/urllib/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/urllib/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..37df6d2f Binary files /dev/null and b/Python314_4_x86_Template/Lib/urllib/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/urllib/__pycache__/error.cpython-314.pyc b/Python314_4_x86_Template/Lib/urllib/__pycache__/error.cpython-314.pyc new file mode 100644 index 00000000..3062f17b Binary files /dev/null and b/Python314_4_x86_Template/Lib/urllib/__pycache__/error.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/urllib/__pycache__/parse.cpython-314.pyc b/Python314_4_x86_Template/Lib/urllib/__pycache__/parse.cpython-314.pyc new file mode 100644 index 00000000..99b174ec Binary files /dev/null and b/Python314_4_x86_Template/Lib/urllib/__pycache__/parse.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/urllib/__pycache__/request.cpython-314.pyc b/Python314_4_x86_Template/Lib/urllib/__pycache__/request.cpython-314.pyc new file mode 100644 index 00000000..99cc394c Binary files /dev/null and b/Python314_4_x86_Template/Lib/urllib/__pycache__/request.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/urllib/__pycache__/response.cpython-314.pyc b/Python314_4_x86_Template/Lib/urllib/__pycache__/response.cpython-314.pyc new file mode 100644 index 00000000..32e55b3e Binary files /dev/null and b/Python314_4_x86_Template/Lib/urllib/__pycache__/response.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/urllib/error.py b/Python314_4_x86_Template/Lib/urllib/error.py similarity index 100% rename from Python313_13_x86_Template/Lib/urllib/error.py rename to Python314_4_x86_Template/Lib/urllib/error.py diff --git a/Python314_4_x86_Template/Lib/urllib/parse.py b/Python314_4_x86_Template/Lib/urllib/parse.py new file mode 100644 index 00000000..a651e815 --- /dev/null +++ b/Python314_4_x86_Template/Lib/urllib/parse.py @@ -0,0 +1,1289 @@ +"""Parse (absolute and relative) URLs. + +urllib.parse module is based upon the following RFC specifications. + +RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding +and L. Masinter, January 2005. + +RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter +and L.Masinter, December 1999. + +RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. +Berners-Lee, R. Fielding, and L. Masinter, August 1998. + +RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. + +RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June +1995. + +RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. +McCahill, December 1994 + +RFC 3986 is considered the current standard and any future changes to +urllib.parse module should conform with it. The urllib.parse module is +currently not entirely compliant with this RFC due to defacto +scenarios for parsing, and for backward compatibility purposes, some +parsing quirks from older RFCs are retained. The testcases in +test_urlparse.py provides a good indicator of parsing behavior. + +The WHATWG URL Parser spec should also be considered. We are not compliant with +it either due to existing user code API behavior expectations (Hyrum's Law). +It serves as a useful guide when making changes. +""" + +from collections import namedtuple +import functools +import math +import re +import types +import warnings +import ipaddress + +__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", + "urlsplit", "urlunsplit", "urlencode", "parse_qs", + "parse_qsl", "quote", "quote_plus", "quote_from_bytes", + "unquote", "unquote_plus", "unquote_to_bytes", + "DefragResult", "ParseResult", "SplitResult", + "DefragResultBytes", "ParseResultBytes", "SplitResultBytes"] + +# A classification of schemes. +# The empty string classifies URLs with no scheme specified, +# being the default value returned by “urlsplit” and “urlparse”. + +uses_relative = ['', 'ftp', 'http', 'gopher', 'nntp', 'imap', + 'wais', 'file', 'https', 'shttp', 'mms', + 'prospero', 'rtsp', 'rtsps', 'rtspu', 'sftp', + 'svn', 'svn+ssh', 'ws', 'wss'] + +uses_netloc = ['', 'ftp', 'http', 'gopher', 'nntp', 'telnet', + 'imap', 'wais', 'file', 'mms', 'https', 'shttp', + 'snews', 'prospero', 'rtsp', 'rtsps', 'rtspu', 'rsync', + 'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh', + 'ws', 'wss', 'itms-services'] + +uses_params = ['', 'ftp', 'hdl', 'prospero', 'http', 'imap', + 'https', 'shttp', 'rtsp', 'rtsps', 'rtspu', 'sip', + 'sips', 'mms', 'sftp', 'tel'] + +# These are not actually used anymore, but should stay for backwards +# compatibility. (They are undocumented, but have a public-looking name.) + +non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', + 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] + +uses_query = ['', 'http', 'wais', 'imap', 'https', 'shttp', 'mms', + 'gopher', 'rtsp', 'rtsps', 'rtspu', 'sip', 'sips'] + +uses_fragment = ['', 'ftp', 'hdl', 'http', 'gopher', 'news', + 'nntp', 'wais', 'https', 'shttp', 'snews', + 'file', 'prospero'] + +# Characters valid in scheme names +scheme_chars = ('abcdefghijklmnopqrstuvwxyz' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + '0123456789' + '+-.') + +# Leading and trailing C0 control and space to be stripped per WHATWG spec. +# == "".join([chr(i) for i in range(0, 0x20 + 1)]) +_WHATWG_C0_CONTROL_OR_SPACE = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f ' + +# Unsafe bytes to be removed per WHATWG spec +_UNSAFE_URL_BYTES_TO_REMOVE = ['\t', '\r', '\n'] + +def clear_cache(): + """Clear internal performance caches. Undocumented; some tests want it.""" + urlsplit.cache_clear() + _byte_quoter_factory.cache_clear() + +# Helpers for bytes handling +# For 3.2, we deliberately require applications that +# handle improperly quoted URLs to do their own +# decoding and encoding. If valid use cases are +# presented, we may relax this by using latin-1 +# decoding internally for 3.3 +_implicit_encoding = 'ascii' +_implicit_errors = 'strict' + +def _noop(obj): + return obj + +def _encode_result(obj, encoding=_implicit_encoding, + errors=_implicit_errors): + return obj.encode(encoding, errors) + +def _decode_args(args, encoding=_implicit_encoding, + errors=_implicit_errors): + return tuple(x.decode(encoding, errors) if x else '' for x in args) + +def _coerce_args(*args): + # Invokes decode if necessary to create str args + # and returns the coerced inputs along with + # an appropriate result coercion function + # - noop for str inputs + # - encoding function otherwise + str_input = isinstance(args[0], str) + for arg in args[1:]: + # We special-case the empty string to support the + # "scheme=''" default argument to some functions + if arg and isinstance(arg, str) != str_input: + raise TypeError("Cannot mix str and non-str arguments") + if str_input: + return args + (_noop,) + return _decode_args(args) + (_encode_result,) + +# Result objects are more helpful than simple tuples +class _ResultMixinStr(object): + """Standard approach to encoding parsed results from str to bytes""" + __slots__ = () + + def encode(self, encoding='ascii', errors='strict'): + return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self)) + + +class _ResultMixinBytes(object): + """Standard approach to decoding parsed results from bytes to str""" + __slots__ = () + + def decode(self, encoding='ascii', errors='strict'): + return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self)) + + +class _NetlocResultMixinBase(object): + """Shared methods for the parsed result objects containing a netloc element""" + __slots__ = () + + @property + def username(self): + return self._userinfo[0] + + @property + def password(self): + return self._userinfo[1] + + @property + def hostname(self): + hostname = self._hostinfo[0] + if not hostname: + return None + # Scoped IPv6 address may have zone info, which must not be lowercased + # like http://[fe80::822a:a8ff:fe49:470c%tESt]:1234/keys + separator = '%' if isinstance(hostname, str) else b'%' + hostname, percent, zone = hostname.partition(separator) + return hostname.lower() + percent + zone + + @property + def port(self): + port = self._hostinfo[1] + if port is not None: + if port.isdigit() and port.isascii(): + port = int(port) + else: + raise ValueError(f"Port could not be cast to integer value as {port!r}") + if not (0 <= port <= 65535): + raise ValueError("Port out of range 0-65535") + return port + + __class_getitem__ = classmethod(types.GenericAlias) + + +class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr): + __slots__ = () + + @property + def _userinfo(self): + netloc = self.netloc + userinfo, have_info, hostinfo = netloc.rpartition('@') + if have_info: + username, have_password, password = userinfo.partition(':') + if not have_password: + password = None + else: + username = password = None + return username, password + + @property + def _hostinfo(self): + netloc = self.netloc + _, _, hostinfo = netloc.rpartition('@') + _, have_open_br, bracketed = hostinfo.partition('[') + if have_open_br: + hostname, _, port = bracketed.partition(']') + _, _, port = port.partition(':') + else: + hostname, _, port = hostinfo.partition(':') + if not port: + port = None + return hostname, port + + +class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes): + __slots__ = () + + @property + def _userinfo(self): + netloc = self.netloc + userinfo, have_info, hostinfo = netloc.rpartition(b'@') + if have_info: + username, have_password, password = userinfo.partition(b':') + if not have_password: + password = None + else: + username = password = None + return username, password + + @property + def _hostinfo(self): + netloc = self.netloc + _, _, hostinfo = netloc.rpartition(b'@') + _, have_open_br, bracketed = hostinfo.partition(b'[') + if have_open_br: + hostname, _, port = bracketed.partition(b']') + _, _, port = port.partition(b':') + else: + hostname, _, port = hostinfo.partition(b':') + if not port: + port = None + return hostname, port + + +_DefragResultBase = namedtuple('_DefragResultBase', 'url fragment') +_SplitResultBase = namedtuple( + '_SplitResultBase', 'scheme netloc path query fragment') +_ParseResultBase = namedtuple( + '_ParseResultBase', 'scheme netloc path params query fragment') + +_DefragResultBase.__doc__ = """ +DefragResult(url, fragment) + +A 2-tuple that contains the url without fragment identifier and the fragment +identifier as a separate argument. +""" + +_DefragResultBase.url.__doc__ = """The URL with no fragment identifier.""" + +_DefragResultBase.fragment.__doc__ = """ +Fragment identifier separated from URL, that allows indirect identification of a +secondary resource by reference to a primary resource and additional identifying +information. +""" + +_SplitResultBase.__doc__ = """ +SplitResult(scheme, netloc, path, query, fragment) + +A 5-tuple that contains the different components of a URL. Similar to +ParseResult, but does not split params. +""" + +_SplitResultBase.scheme.__doc__ = """Specifies URL scheme for the request.""" + +_SplitResultBase.netloc.__doc__ = """ +Network location where the request is made to. +""" + +_SplitResultBase.path.__doc__ = """ +The hierarchical path, such as the path to a file to download. +""" + +_SplitResultBase.query.__doc__ = """ +The query component, that contains non-hierarchical data, that along with data +in path component, identifies a resource in the scope of URI's scheme and +network location. +""" + +_SplitResultBase.fragment.__doc__ = """ +Fragment identifier, that allows indirect identification of a secondary resource +by reference to a primary resource and additional identifying information. +""" + +_ParseResultBase.__doc__ = """ +ParseResult(scheme, netloc, path, params, query, fragment) + +A 6-tuple that contains components of a parsed URL. +""" + +_ParseResultBase.scheme.__doc__ = _SplitResultBase.scheme.__doc__ +_ParseResultBase.netloc.__doc__ = _SplitResultBase.netloc.__doc__ +_ParseResultBase.path.__doc__ = _SplitResultBase.path.__doc__ +_ParseResultBase.params.__doc__ = """ +Parameters for last path element used to dereference the URI in order to provide +access to perform some operation on the resource. +""" + +_ParseResultBase.query.__doc__ = _SplitResultBase.query.__doc__ +_ParseResultBase.fragment.__doc__ = _SplitResultBase.fragment.__doc__ + + +# For backwards compatibility, alias _NetlocResultMixinStr +# ResultBase is no longer part of the documented API, but it is +# retained since deprecating it isn't worth the hassle +ResultBase = _NetlocResultMixinStr + +# Structured result objects for string data +class DefragResult(_DefragResultBase, _ResultMixinStr): + __slots__ = () + def geturl(self): + if self.fragment: + return self.url + '#' + self.fragment + else: + return self.url + +class SplitResult(_SplitResultBase, _NetlocResultMixinStr): + __slots__ = () + def geturl(self): + return urlunsplit(self) + +class ParseResult(_ParseResultBase, _NetlocResultMixinStr): + __slots__ = () + def geturl(self): + return urlunparse(self) + +# Structured result objects for bytes data +class DefragResultBytes(_DefragResultBase, _ResultMixinBytes): + __slots__ = () + def geturl(self): + if self.fragment: + return self.url + b'#' + self.fragment + else: + return self.url + +class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes): + __slots__ = () + def geturl(self): + return urlunsplit(self) + +class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes): + __slots__ = () + def geturl(self): + return urlunparse(self) + +# Set up the encode/decode result pairs +def _fix_result_transcoding(): + _result_pairs = ( + (DefragResult, DefragResultBytes), + (SplitResult, SplitResultBytes), + (ParseResult, ParseResultBytes), + ) + for _decoded, _encoded in _result_pairs: + _decoded._encoded_counterpart = _encoded + _encoded._decoded_counterpart = _decoded + +_fix_result_transcoding() +del _fix_result_transcoding + +def urlparse(url, scheme='', allow_fragments=True): + """Parse a URL into 6 components: + :///;?# + + The result is a named 6-tuple with fields corresponding to the + above. It is either a ParseResult or ParseResultBytes object, + depending on the type of the url parameter. + + The username, password, hostname, and port sub-components of netloc + can also be accessed as attributes of the returned object. + + The scheme argument provides the default value of the scheme + component when no scheme is found in url. + + If allow_fragments is False, no attempt is made to separate the + fragment component from the previous component, which can be either + path or query. + + Note that % escapes are not expanded. + + urlsplit() should generally be used instead of urlparse(). + """ + url, scheme, _coerce_result = _coerce_args(url, scheme) + scheme, netloc, url, params, query, fragment = _urlparse(url, scheme, allow_fragments) + result = ParseResult(scheme or '', netloc or '', url, params or '', query or '', fragment or '') + return _coerce_result(result) + +def _urlparse(url, scheme=None, allow_fragments=True): + scheme, netloc, url, query, fragment = _urlsplit(url, scheme, allow_fragments) + if (scheme or '') in uses_params and ';' in url: + url, params = _splitparams(url, allow_none=True) + else: + params = None + return (scheme, netloc, url, params, query, fragment) + +def _splitparams(url, allow_none=False): + if '/' in url: + i = url.find(';', url.rfind('/')) + if i < 0: + return url, None if allow_none else '' + else: + i = url.find(';') + return url[:i], url[i+1:] + +def _splitnetloc(url, start=0): + delim = len(url) # position of end of domain part of url, default is end + for c in '/?#': # look for delimiters; the order is NOT important + wdelim = url.find(c, start) # find first of this delim + if wdelim >= 0: # if found + delim = min(delim, wdelim) # use earliest delim position + return url[start:delim], url[delim:] # return (domain, rest) + +def _checknetloc(netloc): + if not netloc or netloc.isascii(): + return + # looking for characters like \u2100 that expand to 'a/c' + # IDNA uses NFKC equivalence, so normalize for this check + import unicodedata + n = netloc.replace('@', '') # ignore characters already included + n = n.replace(':', '') # but not the surrounding text + n = n.replace('#', '') + n = n.replace('?', '') + netloc2 = unicodedata.normalize('NFKC', n) + if n == netloc2: + return + for c in '/?#@:': + if c in netloc2: + raise ValueError("netloc '" + netloc + "' contains invalid " + + "characters under NFKC normalization") + +def _check_bracketed_netloc(netloc): + # Note that this function must mirror the splitting + # done in NetlocResultMixins._hostinfo(). + hostname_and_port = netloc.rpartition('@')[2] + before_bracket, have_open_br, bracketed = hostname_and_port.partition('[') + if have_open_br: + # No data is allowed before a bracket. + if before_bracket: + raise ValueError("Invalid IPv6 URL") + hostname, _, port = bracketed.partition(']') + # No data is allowed after the bracket but before the port delimiter. + if port and not port.startswith(":"): + raise ValueError("Invalid IPv6 URL") + else: + hostname, _, port = hostname_and_port.partition(':') + _check_bracketed_host(hostname) + +# Valid bracketed hosts are defined in +# https://www.rfc-editor.org/rfc/rfc3986#page-49 and https://url.spec.whatwg.org/ +def _check_bracketed_host(hostname): + if hostname.startswith('v'): + if not re.match(r"\Av[a-fA-F0-9]+\..+\z", hostname): + raise ValueError(f"IPvFuture address is invalid") + else: + ip = ipaddress.ip_address(hostname) # Throws Value Error if not IPv6 or IPv4 + if isinstance(ip, ipaddress.IPv4Address): + raise ValueError(f"An IPv4 address cannot be in brackets") + +# typed=True avoids BytesWarnings being emitted during cache key +# comparison since this API supports both bytes and str input. +@functools.lru_cache(typed=True) +def urlsplit(url, scheme='', allow_fragments=True): + """Parse a URL into 5 components: + :///?# + + The result is a named 5-tuple with fields corresponding to the + above. It is either a SplitResult or SplitResultBytes object, + depending on the type of the url parameter. + + The username, password, hostname, and port sub-components of netloc + can also be accessed as attributes of the returned object. + + The scheme argument provides the default value of the scheme + component when no scheme is found in url. + + If allow_fragments is False, no attempt is made to separate the + fragment component from the previous component, which can be either + path or query. + + Note that % escapes are not expanded. + """ + + url, scheme, _coerce_result = _coerce_args(url, scheme) + scheme, netloc, url, query, fragment = _urlsplit(url, scheme, allow_fragments) + v = SplitResult(scheme or '', netloc or '', url, query or '', fragment or '') + return _coerce_result(v) + +def _urlsplit(url, scheme=None, allow_fragments=True): + # Only lstrip url as some applications rely on preserving trailing space. + # (https://url.spec.whatwg.org/#concept-basic-url-parser would strip both) + url = url.lstrip(_WHATWG_C0_CONTROL_OR_SPACE) + for b in _UNSAFE_URL_BYTES_TO_REMOVE: + url = url.replace(b, "") + if scheme is not None: + scheme = scheme.strip(_WHATWG_C0_CONTROL_OR_SPACE) + for b in _UNSAFE_URL_BYTES_TO_REMOVE: + scheme = scheme.replace(b, "") + + allow_fragments = bool(allow_fragments) + netloc = query = fragment = None + i = url.find(':') + if i > 0 and url[0].isascii() and url[0].isalpha(): + for c in url[:i]: + if c not in scheme_chars: + break + else: + scheme, url = url[:i].lower(), url[i+1:] + if url[:2] == '//': + netloc, url = _splitnetloc(url, 2) + if (('[' in netloc and ']' not in netloc) or + (']' in netloc and '[' not in netloc)): + raise ValueError("Invalid IPv6 URL") + if '[' in netloc and ']' in netloc: + _check_bracketed_netloc(netloc) + if allow_fragments and '#' in url: + url, fragment = url.split('#', 1) + if '?' in url: + url, query = url.split('?', 1) + _checknetloc(netloc) + return (scheme, netloc, url, query, fragment) + +def urlunparse(components): + """Put a parsed URL back together again. This may result in a + slightly different, but equivalent URL, if the URL that was parsed + originally had redundant delimiters, e.g. a ? with an empty query + (the draft states that these are equivalent).""" + scheme, netloc, url, params, query, fragment, _coerce_result = ( + _coerce_args(*components)) + if not netloc: + if scheme and scheme in uses_netloc and (not url or url[:1] == '/'): + netloc = '' + else: + netloc = None + if params: + url = "%s;%s" % (url, params) + return _coerce_result(_urlunsplit(scheme or None, netloc, url, + query or None, fragment or None)) + +def urlunsplit(components): + """Combine the elements of a tuple as returned by urlsplit() into a + complete URL as a string. The data argument can be any five-item iterable. + This may result in a slightly different, but equivalent URL, if the URL that + was parsed originally had unnecessary delimiters (for example, a ? with an + empty query; the RFC states that these are equivalent).""" + scheme, netloc, url, query, fragment, _coerce_result = ( + _coerce_args(*components)) + if not netloc: + if scheme and scheme in uses_netloc and (not url or url[:1] == '/'): + netloc = '' + else: + netloc = None + return _coerce_result(_urlunsplit(scheme or None, netloc, url, + query or None, fragment or None)) + +def _urlunsplit(scheme, netloc, url, query, fragment): + if netloc is not None: + if url and url[:1] != '/': url = '/' + url + url = '//' + netloc + url + elif url[:2] == '//': + url = '//' + url + if scheme: + url = scheme + ':' + url + if query is not None: + url = url + '?' + query + if fragment is not None: + url = url + '#' + fragment + return url + +def urljoin(base, url, allow_fragments=True): + """Join a base URL and a possibly relative URL to form an absolute + interpretation of the latter.""" + if not base: + return url + if not url: + return base + + base, url, _coerce_result = _coerce_args(base, url) + bscheme, bnetloc, bpath, bquery, bfragment = \ + _urlsplit(base, None, allow_fragments) + scheme, netloc, path, query, fragment = \ + _urlsplit(url, None, allow_fragments) + + if scheme is None: + scheme = bscheme + if scheme != bscheme or (scheme and scheme not in uses_relative): + return _coerce_result(url) + if not scheme or scheme in uses_netloc: + if netloc: + return _coerce_result(_urlunsplit(scheme, netloc, path, + query, fragment)) + netloc = bnetloc + + if not path: + path = bpath + if query is None: + query = bquery + if fragment is None: + fragment = bfragment + return _coerce_result(_urlunsplit(scheme, netloc, path, + query, fragment)) + + base_parts = bpath.split('/') + if base_parts[-1] != '': + # the last item is not a directory, so will not be taken into account + # in resolving the relative path + del base_parts[-1] + + # for rfc3986, ignore all base path should the first character be root. + if path[:1] == '/': + segments = path.split('/') + else: + segments = base_parts + path.split('/') + # filter out elements that would cause redundant slashes on re-joining + # the resolved_path + segments[1:-1] = filter(None, segments[1:-1]) + + resolved_path = [] + + for seg in segments: + if seg == '..': + try: + resolved_path.pop() + except IndexError: + # ignore any .. segments that would otherwise cause an IndexError + # when popped from resolved_path if resolving for rfc3986 + pass + elif seg == '.': + continue + else: + resolved_path.append(seg) + + if segments[-1] in ('.', '..'): + # do some post-processing here. if the last segment was a relative dir, + # then we need to append the trailing '/' + resolved_path.append('') + + return _coerce_result(_urlunsplit(scheme, netloc, '/'.join( + resolved_path) or '/', query, fragment)) + + +def urldefrag(url): + """Removes any existing fragment from URL. + + Returns a tuple of the defragmented URL and the fragment. If + the URL contained no fragments, the second element is the + empty string. + """ + url, _coerce_result = _coerce_args(url) + if '#' in url: + s, n, p, q, frag = _urlsplit(url) + defrag = _urlunsplit(s, n, p, q, None) + else: + frag = '' + defrag = url + return _coerce_result(DefragResult(defrag, frag or '')) + +_hexdig = '0123456789ABCDEFabcdef' +_hextobyte = None + +def unquote_to_bytes(string): + """unquote_to_bytes('abc%20def') -> b'abc def'.""" + return bytes(_unquote_impl(string)) + +def _unquote_impl(string: bytes | bytearray | str) -> bytes | bytearray: + # Note: strings are encoded as UTF-8. This is only an issue if it contains + # unescaped non-ASCII characters, which URIs should not. + if not string: + # Is it a string-like object? + string.split + return b'' + if isinstance(string, str): + string = string.encode('utf-8') + bits = string.split(b'%') + if len(bits) == 1: + return string + res = bytearray(bits[0]) + append = res.extend + # Delay the initialization of the table to not waste memory + # if the function is never called + global _hextobyte + if _hextobyte is None: + _hextobyte = {(a + b).encode(): bytes.fromhex(a + b) + for a in _hexdig for b in _hexdig} + for item in bits[1:]: + try: + append(_hextobyte[item[:2]]) + append(item[2:]) + except KeyError: + append(b'%') + append(item) + return res + +_asciire = re.compile('([\x00-\x7f]+)') + +def _generate_unquoted_parts(string, encoding, errors): + previous_match_end = 0 + for ascii_match in _asciire.finditer(string): + start, end = ascii_match.span() + yield string[previous_match_end:start] # Non-ASCII + # The ascii_match[1] group == string[start:end]. + yield _unquote_impl(ascii_match[1]).decode(encoding, errors) + previous_match_end = end + yield string[previous_match_end:] # Non-ASCII tail + +def unquote(string, encoding='utf-8', errors='replace'): + """Replace %xx escapes by their single-character equivalent. The optional + encoding and errors parameters specify how to decode percent-encoded + sequences into Unicode characters, as accepted by the bytes.decode() + method. + By default, percent-encoded sequences are decoded with UTF-8, and invalid + sequences are replaced by a placeholder character. + + unquote('abc%20def') -> 'abc def'. + """ + if isinstance(string, bytes): + return _unquote_impl(string).decode(encoding, errors) + if '%' not in string: + # Is it a string-like object? + string.split + return string + if encoding is None: + encoding = 'utf-8' + if errors is None: + errors = 'replace' + return ''.join(_generate_unquoted_parts(string, encoding, errors)) + + +def parse_qs(qs, keep_blank_values=False, strict_parsing=False, + encoding='utf-8', errors='replace', max_num_fields=None, separator='&'): + """Parse a query given as a string argument. + + Arguments: + + qs: percent-encoded query string to be parsed + + keep_blank_values: flag indicating whether blank values in + percent-encoded queries should be treated as blank strings. + A true value indicates that blanks should be retained as + blank strings. The default false value indicates that + blank values are to be ignored and treated as if they were + not included. + + strict_parsing: flag indicating what to do with parsing errors. + If false (the default), errors are silently ignored. + If true, errors raise a ValueError exception. + + encoding and errors: specify how to decode percent-encoded sequences + into Unicode characters, as accepted by the bytes.decode() method. + + max_num_fields: int. If set, then throws a ValueError if there + are more than n fields read by parse_qsl(). + + separator: str. The symbol to use for separating the query arguments. + Defaults to &. + + Returns a dictionary. + """ + parsed_result = {} + pairs = parse_qsl(qs, keep_blank_values, strict_parsing, + encoding=encoding, errors=errors, + max_num_fields=max_num_fields, separator=separator, + _stacklevel=2) + for name, value in pairs: + if name in parsed_result: + parsed_result[name].append(value) + else: + parsed_result[name] = [value] + return parsed_result + + +def parse_qsl(qs, keep_blank_values=False, strict_parsing=False, + encoding='utf-8', errors='replace', max_num_fields=None, separator='&', *, _stacklevel=1): + """Parse a query given as a string argument. + + Arguments: + + qs: percent-encoded query string to be parsed + + keep_blank_values: flag indicating whether blank values in + percent-encoded queries should be treated as blank strings. + A true value indicates that blanks should be retained as blank + strings. The default false value indicates that blank values + are to be ignored and treated as if they were not included. + + strict_parsing: flag indicating what to do with parsing errors. If + false (the default), errors are silently ignored. If true, + errors raise a ValueError exception. + + encoding and errors: specify how to decode percent-encoded sequences + into Unicode characters, as accepted by the bytes.decode() method. + + max_num_fields: int. If set, then throws a ValueError + if there are more than n fields read by parse_qsl(). + + separator: str. The symbol to use for separating the query arguments. + Defaults to &. + + Returns a list, as G-d intended. + """ + if not separator or not isinstance(separator, (str, bytes)): + raise ValueError("Separator must be of type string or bytes.") + if isinstance(qs, str): + if not isinstance(separator, str): + separator = str(separator, 'ascii') + eq = '=' + def _unquote(s): + return unquote_plus(s, encoding=encoding, errors=errors) + elif qs is None: + return [] + else: + try: + # Use memoryview() to reject integers and iterables, + # acceptable by the bytes constructor. + qs = bytes(memoryview(qs)) + except TypeError: + if not qs: + warnings.warn(f"Accepting {type(qs).__name__} objects with " + f"false value in urllib.parse.parse_qsl() is " + f"deprecated as of 3.14", + DeprecationWarning, stacklevel=_stacklevel + 1) + return [] + raise + if isinstance(separator, str): + separator = bytes(separator, 'ascii') + eq = b'=' + def _unquote(s): + return unquote_to_bytes(s.replace(b'+', b' ')) + + if not qs: + return [] + + # If max_num_fields is defined then check that the number of fields + # is less than max_num_fields. This prevents a memory exhaustion DOS + # attack via post bodies with many fields. + if max_num_fields is not None: + num_fields = 1 + qs.count(separator) + if max_num_fields < num_fields: + raise ValueError('Max number of fields exceeded') + + r = [] + for name_value in qs.split(separator): + if name_value or strict_parsing: + name, has_eq, value = name_value.partition(eq) + if not has_eq and strict_parsing: + raise ValueError("bad query field: %r" % (name_value,)) + if value or keep_blank_values: + name = _unquote(name) + value = _unquote(value) + r.append((name, value)) + return r + +def unquote_plus(string, encoding='utf-8', errors='replace'): + """Like unquote(), but also replace plus signs by spaces, as required for + unquoting HTML form values. + + unquote_plus('%7e/abc+def') -> '~/abc def' + """ + string = string.replace('+', ' ') + return unquote(string, encoding, errors) + +_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + b'abcdefghijklmnopqrstuvwxyz' + b'0123456789' + b'_.-~') +_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE) + + +class _Quoter(dict): + """A mapping from bytes numbers (in range(0,256)) to strings. + + String values are percent-encoded byte values, unless the key < 128, and + in either of the specified safe set, or the always safe set. + """ + # Keeps a cache internally, via __missing__, for efficiency (lookups + # of cached keys don't call Python code at all). + def __init__(self, safe): + """safe: bytes object.""" + self.safe = _ALWAYS_SAFE.union(safe) + + def __repr__(self): + return f"" + + def __missing__(self, b): + # Handle a cache miss. Store quoted string in cache and return. + res = chr(b) if b in self.safe else '%{:02X}'.format(b) + self[b] = res + return res + +def quote(string, safe='/', encoding=None, errors=None): + """quote('abc def') -> 'abc%20def' + + Each part of a URL, e.g. the path info, the query, etc., has a + different set of reserved characters that must be quoted. The + quote function offers a cautious (not minimal) way to quote a + string for most of these parts. + + RFC 3986 Uniform Resource Identifier (URI): Generic Syntax lists + the following (un)reserved characters. + + unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" + reserved = gen-delims / sub-delims + gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" + sub-delims = "!" / "$" / "&" / "'" / "(" / ")" + / "*" / "+" / "," / ";" / "=" + + Each of the reserved characters is reserved in some component of a URL, + but not necessarily in all of them. + + The quote function %-escapes all characters that are neither in the + unreserved chars ("always safe") nor the additional chars set via the + safe arg. + + The default for the safe arg is '/'. The character is reserved, but in + typical usage the quote function is being called on a path where the + existing slash characters are to be preserved. + + Python 3.7 updates from using RFC 2396 to RFC 3986 to quote URL strings. + Now, "~" is included in the set of unreserved characters. + + string and safe may be either str or bytes objects. encoding and errors + must not be specified if string is a bytes object. + + The optional encoding and errors parameters specify how to deal with + non-ASCII characters, as accepted by the str.encode method. + By default, encoding='utf-8' (characters are encoded with UTF-8), and + errors='strict' (unsupported characters raise a UnicodeEncodeError). + """ + if isinstance(string, str): + if not string: + return string + if encoding is None: + encoding = 'utf-8' + if errors is None: + errors = 'strict' + string = string.encode(encoding, errors) + else: + if encoding is not None: + raise TypeError("quote() doesn't support 'encoding' for bytes") + if errors is not None: + raise TypeError("quote() doesn't support 'errors' for bytes") + return quote_from_bytes(string, safe) + +def quote_plus(string, safe='', encoding=None, errors=None): + """Like quote(), but also replace ' ' with '+', as required for quoting + HTML form values. Plus signs in the original string are escaped unless + they are included in safe. It also does not have safe default to '/'. + """ + # Check if ' ' in string, where string may either be a str or bytes. If + # there are no spaces, the regular quote will produce the right answer. + if ((isinstance(string, str) and ' ' not in string) or + (isinstance(string, bytes) and b' ' not in string)): + return quote(string, safe, encoding, errors) + if isinstance(safe, str): + space = ' ' + else: + space = b' ' + string = quote(string, safe + space, encoding, errors) + return string.replace(' ', '+') + +# Expectation: A typical program is unlikely to create more than 5 of these. +@functools.lru_cache +def _byte_quoter_factory(safe): + return _Quoter(safe).__getitem__ + +def quote_from_bytes(bs, safe='/'): + """Like quote(), but accepts a bytes object rather than a str, and does + not perform string-to-bytes encoding. It always returns an ASCII string. + quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f' + """ + if not isinstance(bs, (bytes, bytearray)): + raise TypeError("quote_from_bytes() expected bytes") + if not bs: + return '' + if isinstance(safe, str): + # Normalize 'safe' by converting to bytes and removing non-ASCII chars + safe = safe.encode('ascii', 'ignore') + else: + # List comprehensions are faster than generator expressions. + safe = bytes([c for c in safe if c < 128]) + if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe): + return bs.decode() + quoter = _byte_quoter_factory(safe) + if (bs_len := len(bs)) < 200_000: + return ''.join(map(quoter, bs)) + else: + # This saves memory - https://github.com/python/cpython/issues/95865 + chunk_size = math.isqrt(bs_len) + chunks = [''.join(map(quoter, bs[i:i+chunk_size])) + for i in range(0, bs_len, chunk_size)] + return ''.join(chunks) + +def urlencode(query, doseq=False, safe='', encoding=None, errors=None, + quote_via=quote_plus): + """Encode a dict or sequence of two-element tuples into a URL query string. + + If any values in the query arg are sequences and doseq is true, each + sequence element is converted to a separate parameter. + + If the query arg is a sequence of two-element tuples, the order of the + parameters in the output will match the order of parameters in the + input. + + The components of a query arg may each be either a string or a bytes type. + + The safe, encoding, and errors parameters are passed down to the function + specified by quote_via (encoding and errors only if a component is a str). + """ + + if hasattr(query, "items"): + query = query.items() + else: + # It's a bother at times that strings and string-like objects are + # sequences. + try: + # non-sequence items should not work with len() + # non-empty strings will fail this + if len(query) and not isinstance(query[0], tuple): + raise TypeError + # Zero-length sequences of all types will get here and succeed, + # but that's a minor nit. Since the original implementation + # allowed empty dicts that type of behavior probably should be + # preserved for consistency + except TypeError as err: + raise TypeError("not a valid non-string sequence " + "or mapping object") from err + + l = [] + if not doseq: + for k, v in query: + if isinstance(k, bytes): + k = quote_via(k, safe) + else: + k = quote_via(str(k), safe, encoding, errors) + + if isinstance(v, bytes): + v = quote_via(v, safe) + else: + v = quote_via(str(v), safe, encoding, errors) + l.append(k + '=' + v) + else: + for k, v in query: + if isinstance(k, bytes): + k = quote_via(k, safe) + else: + k = quote_via(str(k), safe, encoding, errors) + + if isinstance(v, bytes): + v = quote_via(v, safe) + l.append(k + '=' + v) + elif isinstance(v, str): + v = quote_via(v, safe, encoding, errors) + l.append(k + '=' + v) + else: + try: + # Is this a sufficient test for sequence-ness? + x = len(v) + except TypeError: + # not a sequence + v = quote_via(str(v), safe, encoding, errors) + l.append(k + '=' + v) + else: + # loop over the sequence + for elt in v: + if isinstance(elt, bytes): + elt = quote_via(elt, safe) + else: + elt = quote_via(str(elt), safe, encoding, errors) + l.append(k + '=' + elt) + return '&'.join(l) + + +def to_bytes(url): + warnings.warn("urllib.parse.to_bytes() is deprecated as of 3.8", + DeprecationWarning, stacklevel=2) + return _to_bytes(url) + + +def _to_bytes(url): + """to_bytes(u"URL") --> 'URL'.""" + # Most URL schemes require ASCII. If that changes, the conversion + # can be relaxed. + # XXX get rid of to_bytes() + if isinstance(url, str): + try: + url = url.encode("ASCII").decode() + except UnicodeError: + raise UnicodeError("URL " + repr(url) + + " contains non-ASCII characters") + return url + + +def unwrap(url): + """Transform a string like '' into 'scheme://host/path'. + + The string is returned unchanged if it's not a wrapped URL. + """ + url = str(url).strip() + if url[:1] == '<' and url[-1:] == '>': + url = url[1:-1].strip() + if url[:4] == 'URL:': + url = url[4:].strip() + return url + + +def splittype(url): + warnings.warn("urllib.parse.splittype() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splittype(url) + + +_typeprog = None +def _splittype(url): + """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" + global _typeprog + if _typeprog is None: + _typeprog = re.compile('([^/:]+):(.*)', re.DOTALL) + + match = _typeprog.match(url) + if match: + scheme, data = match.groups() + return scheme.lower(), data + return None, url + + +def splithost(url): + warnings.warn("urllib.parse.splithost() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splithost(url) + + +_hostprog = None +def _splithost(url): + """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" + global _hostprog + if _hostprog is None: + _hostprog = re.compile('//([^/#?]*)(.*)', re.DOTALL) + + match = _hostprog.match(url) + if match: + host_port, path = match.groups() + if path and path[0] != '/': + path = '/' + path + return host_port, path + return None, url + + +def splituser(host): + warnings.warn("urllib.parse.splituser() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splituser(host) + + +def _splituser(host): + """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + user, delim, host = host.rpartition('@') + return (user if delim else None), host + + +def splitpasswd(user): + warnings.warn("urllib.parse.splitpasswd() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splitpasswd(user) + + +def _splitpasswd(user): + """splitpasswd('user:passwd') -> 'user', 'passwd'.""" + user, delim, passwd = user.partition(':') + return user, (passwd if delim else None) + + +def splitport(host): + warnings.warn("urllib.parse.splitport() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splitport(host) + + +# splittag('/path#tag') --> '/path', 'tag' +_portprog = None +def _splitport(host): + """splitport('host:port') --> 'host', 'port'.""" + global _portprog + if _portprog is None: + _portprog = re.compile('(.*):([0-9]*)', re.DOTALL) + + match = _portprog.fullmatch(host) + if match: + host, port = match.groups() + if port: + return host, port + return host, None + + +def splitnport(host, defport=-1): + warnings.warn("urllib.parse.splitnport() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splitnport(host, defport) + + +def _splitnport(host, defport=-1): + """Split host and port, returning numeric port. + Return given default port if no ':' found; defaults to -1. + Return numerical port if a valid number is found after ':'. + Return None if ':' but not a valid number.""" + host, delim, port = host.rpartition(':') + if not delim: + host = port + elif port: + if port.isdigit() and port.isascii(): + nport = int(port) + else: + nport = None + return host, nport + return host, defport + + +def splitquery(url): + warnings.warn("urllib.parse.splitquery() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splitquery(url) + + +def _splitquery(url): + """splitquery('/path?query') --> '/path', 'query'.""" + path, delim, query = url.rpartition('?') + if delim: + return path, query + return url, None + + +def splittag(url): + warnings.warn("urllib.parse.splittag() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splittag(url) + + +def _splittag(url): + """splittag('/path#tag') --> '/path', 'tag'.""" + path, delim, tag = url.rpartition('#') + if delim: + return path, tag + return url, None + + +def splitattr(url): + warnings.warn("urllib.parse.splitattr() is deprecated as of 3.8, " + "use urllib.parse.urlparse() instead", + DeprecationWarning, stacklevel=2) + return _splitattr(url) + + +def _splitattr(url): + """splitattr('/path;attr1=value1;attr2=value2;...') -> + '/path', ['attr1=value1', 'attr2=value2', ...].""" + words = url.split(';') + return words[0], words[1:] + + +def splitvalue(attr): + warnings.warn("urllib.parse.splitvalue() is deprecated as of 3.8, " + "use urllib.parse.parse_qsl() instead", + DeprecationWarning, stacklevel=2) + return _splitvalue(attr) + + +def _splitvalue(attr): + """splitvalue('attr=value') --> 'attr', 'value'.""" + attr, delim, value = attr.partition('=') + return attr, (value if delim else None) diff --git a/Python314_4_x86_Template/Lib/urllib/request.py b/Python314_4_x86_Template/Lib/urllib/request.py new file mode 100644 index 00000000..8d7470a2 --- /dev/null +++ b/Python314_4_x86_Template/Lib/urllib/request.py @@ -0,0 +1,2163 @@ +"""An extensible library for opening URLs using a variety of protocols + +The simplest way to use this module is to call the urlopen function, +which accepts a string containing a URL or a Request object (described +below). It opens the URL and returns the results as file-like +object; the returned object has some extra methods described below. + +The OpenerDirector manages a collection of Handler objects that do +all the actual work. Each Handler implements a particular protocol or +option. The OpenerDirector is a composite object that invokes the +Handlers needed to open the requested URL. For example, the +HTTPHandler performs HTTP GET and POST requests and deals with +non-error returns. The HTTPRedirectHandler automatically deals with +HTTP 301, 302, 303, 307, and 308 redirect errors, and the +HTTPDigestAuthHandler deals with digest authentication. + +urlopen(url, data=None) -- Basic usage is the same as original +urllib. pass the url and optionally data to post to an HTTP URL, and +get a file-like object back. One difference is that you can also pass +a Request instance instead of URL. Raises a URLError (subclass of +OSError); for HTTP errors, raises an HTTPError, which can also be +treated as a valid response. + +build_opener -- Function that creates a new OpenerDirector instance. +Will install the default handlers. Accepts one or more Handlers as +arguments, either instances or Handler classes that it will +instantiate. If one of the argument is a subclass of the default +handler, the argument will be installed instead of the default. + +install_opener -- Installs a new opener as the default opener. + +objects of interest: + +OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages +the Handler classes, while dealing with requests and responses. + +Request -- An object that encapsulates the state of a request. The +state can be as simple as the URL. It can also include extra HTTP +headers, e.g. a User-Agent. + +BaseHandler -- + +internals: +BaseHandler and parent +_call_chain conventions + +Example usage: + +import urllib.request + +# set up authentication info +authinfo = urllib.request.HTTPBasicAuthHandler() +authinfo.add_password(realm='PDQ Application', + uri='https://mahler:8092/site-updates.py', + user='klem', + passwd='geheim$parole') + +proxy_support = urllib.request.ProxyHandler({"http" : "http://ahad-haam:3128"}) + +# build a new opener that adds authentication and caching FTP handlers +opener = urllib.request.build_opener(proxy_support, authinfo, + urllib.request.CacheFTPHandler) + +# install it +urllib.request.install_opener(opener) + +f = urllib.request.urlopen('https://www.python.org/') +""" + +# XXX issues: +# If an authentication error handler that tries to perform +# authentication for some reason but fails, how should the error be +# signalled? The client needs to know the HTTP error code. But if +# the handler knows that the problem was, e.g., that it didn't know +# that hash algo that requested in the challenge, it would be good to +# pass that information along to the client, too. +# ftp errors aren't handled cleanly +# check digest against correct (i.e. non-apache) implementation + +# Possible extensions: +# complex proxies XXX not sure what exactly was meant by this +# abstract factory for opener + +import base64 +import bisect +import contextlib +import email +import hashlib +import http.client +import io +import os +import re +import socket +import string +import sys +import time +import tempfile + + +from urllib.error import URLError, HTTPError, ContentTooShortError +from urllib.parse import ( + urlparse, urlsplit, urljoin, unwrap, quote, unquote, + _splittype, _splithost, _splitport, _splituser, _splitpasswd, + _splitattr, _splitvalue, _splittag, + unquote_to_bytes, urlunparse) +from urllib.response import addinfourl, addclosehook + +# check for SSL +try: + import ssl # noqa: F401 +except ImportError: + _have_ssl = False +else: + _have_ssl = True + +__all__ = [ + # Classes + 'Request', 'OpenerDirector', 'BaseHandler', 'HTTPDefaultErrorHandler', + 'HTTPRedirectHandler', 'HTTPCookieProcessor', 'ProxyHandler', + 'HTTPPasswordMgr', 'HTTPPasswordMgrWithDefaultRealm', + 'HTTPPasswordMgrWithPriorAuth', 'AbstractBasicAuthHandler', + 'HTTPBasicAuthHandler', 'ProxyBasicAuthHandler', 'AbstractDigestAuthHandler', + 'HTTPDigestAuthHandler', 'ProxyDigestAuthHandler', 'HTTPHandler', + 'FileHandler', 'FTPHandler', 'CacheFTPHandler', 'DataHandler', + 'UnknownHandler', 'HTTPErrorProcessor', + # Functions + 'urlopen', 'install_opener', 'build_opener', + 'pathname2url', 'url2pathname', 'getproxies', + # Legacy interface + 'urlretrieve', 'urlcleanup', +] + +# used in User-Agent header sent +__version__ = '%d.%d' % sys.version_info[:2] + +_opener = None +def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + *, context=None): + '''Open the URL url, which can be either a string or a Request object. + + *data* must be an object specifying additional data to be sent to + the server, or None if no such data is needed. See Request for + details. + + urllib.request module uses HTTP/1.1 and includes a "Connection:close" + header in its HTTP requests. + + The optional *timeout* parameter specifies a timeout in seconds for + blocking operations like the connection attempt (if not specified, the + global default timeout setting will be used). This only works for HTTP, + HTTPS and FTP connections. + + If *context* is specified, it must be a ssl.SSLContext instance describing + the various SSL options. See HTTPSConnection for more details. + + + This function always returns an object which can work as a + context manager and has the properties url, headers, and status. + See urllib.response.addinfourl for more detail on these properties. + + For HTTP and HTTPS URLs, this function returns a http.client.HTTPResponse + object slightly modified. In addition to the three new methods above, the + msg attribute contains the same information as the reason attribute --- + the reason phrase returned by the server --- instead of the response + headers as it is specified in the documentation for HTTPResponse. + + For FTP, file, and data URLs, this function returns a + urllib.response.addinfourl object. + + Note that None may be returned if no handler handles the request (though + the default installed global OpenerDirector uses UnknownHandler to ensure + this never happens). + + In addition, if proxy settings are detected (for example, when a *_proxy + environment variable like http_proxy is set), ProxyHandler is default + installed and makes sure the requests are handled through the proxy. + + ''' + global _opener + if context: + https_handler = HTTPSHandler(context=context) + opener = build_opener(https_handler) + elif _opener is None: + _opener = opener = build_opener() + else: + opener = _opener + return opener.open(url, data, timeout) + +def install_opener(opener): + global _opener + _opener = opener + +_url_tempfiles = [] +def urlretrieve(url, filename=None, reporthook=None, data=None): + """ + Retrieve a URL into a temporary location on disk. + + Requires a URL argument. If a filename is passed, it is used as + the temporary file location. The reporthook argument should be + a callable that accepts a block number, a read size, and the + total file size of the URL target. The data argument should be + valid URL encoded data. + + If a filename is passed and the URL points to a local resource, + the result is a copy from local file to new file. + + Returns a tuple containing the path to the newly created + data file as well as the resulting HTTPMessage object. + """ + url_type, path = _splittype(url) + + with contextlib.closing(urlopen(url, data)) as fp: + headers = fp.info() + + # Just return the local path and the "headers" for file:// + # URLs. No sense in performing a copy unless requested. + if url_type == "file" and not filename: + return os.path.normpath(path), headers + + # Handle temporary file setup. + if filename: + tfp = open(filename, 'wb') + else: + tfp = tempfile.NamedTemporaryFile(delete=False) + filename = tfp.name + _url_tempfiles.append(filename) + + with tfp: + result = filename, headers + bs = 1024*8 + size = -1 + read = 0 + blocknum = 0 + if "content-length" in headers: + size = int(headers["Content-Length"]) + + if reporthook: + reporthook(blocknum, bs, size) + + while block := fp.read(bs): + read += len(block) + tfp.write(block) + blocknum += 1 + if reporthook: + reporthook(blocknum, bs, size) + + if size >= 0 and read < size: + raise ContentTooShortError( + "retrieval incomplete: got only %i out of %i bytes" + % (read, size), result) + + return result + +def urlcleanup(): + """Clean up temporary files from urlretrieve calls.""" + for temp_file in _url_tempfiles: + try: + os.unlink(temp_file) + except OSError: + pass + + del _url_tempfiles[:] + global _opener + if _opener: + _opener = None + +# copied from cookielib.py +_cut_port_re = re.compile(r":\d+$", re.ASCII) +def request_host(request): + """Return request-host, as defined by RFC 2965. + + Variation from RFC: returned value is lowercased, for convenient + comparison. + + """ + url = request.full_url + host = urlparse(url)[1] + if host == "": + host = request.get_header("Host", "") + + # remove port, if present + host = _cut_port_re.sub("", host, 1) + return host.lower() + +class Request: + + def __init__(self, url, data=None, headers={}, + origin_req_host=None, unverifiable=False, + method=None): + self.full_url = url + self.headers = {} + self.unredirected_hdrs = {} + self._data = None + self.data = data + self._tunnel_host = None + for key, value in headers.items(): + self.add_header(key, value) + if origin_req_host is None: + origin_req_host = request_host(self) + self.origin_req_host = origin_req_host + self.unverifiable = unverifiable + if method: + self.method = method + + @property + def full_url(self): + if self.fragment: + return '{}#{}'.format(self._full_url, self.fragment) + return self._full_url + + @full_url.setter + def full_url(self, url): + # unwrap('') --> 'type://host/path' + self._full_url = unwrap(url) + self._full_url, self.fragment = _splittag(self._full_url) + self._parse() + + @full_url.deleter + def full_url(self): + self._full_url = None + self.fragment = None + self.selector = '' + + @property + def data(self): + return self._data + + @data.setter + def data(self, data): + if data != self._data: + self._data = data + # issue 16464 + # if we change data we need to remove content-length header + # (cause it's most probably calculated for previous value) + if self.has_header("Content-length"): + self.remove_header("Content-length") + + @data.deleter + def data(self): + self.data = None + + def _parse(self): + self.type, rest = _splittype(self._full_url) + if self.type is None: + raise ValueError("unknown url type: %r" % self.full_url) + self.host, self.selector = _splithost(rest) + if self.host: + self.host = unquote(self.host) + + def get_method(self): + """Return a string indicating the HTTP request method.""" + default_method = "POST" if self.data is not None else "GET" + return getattr(self, 'method', default_method) + + def get_full_url(self): + return self.full_url + + def set_proxy(self, host, type): + if self.type == 'https' and not self._tunnel_host: + self._tunnel_host = self.host + else: + self.type= type + self.selector = self.full_url + self.host = host + + def has_proxy(self): + return self.selector == self.full_url + + def add_header(self, key, val): + # useful for something like authentication + self.headers[key.capitalize()] = val + + def add_unredirected_header(self, key, val): + # will not be added to a redirected request + self.unredirected_hdrs[key.capitalize()] = val + + def has_header(self, header_name): + return (header_name in self.headers or + header_name in self.unredirected_hdrs) + + def get_header(self, header_name, default=None): + return self.headers.get( + header_name, + self.unredirected_hdrs.get(header_name, default)) + + def remove_header(self, header_name): + self.headers.pop(header_name, None) + self.unredirected_hdrs.pop(header_name, None) + + def header_items(self): + hdrs = {**self.unredirected_hdrs, **self.headers} + return list(hdrs.items()) + +class OpenerDirector: + def __init__(self): + client_version = "Python-urllib/%s" % __version__ + self.addheaders = [('User-agent', client_version)] + # self.handlers is retained only for backward compatibility + self.handlers = [] + # manage the individual handlers + self.handle_open = {} + self.handle_error = {} + self.process_response = {} + self.process_request = {} + + def add_handler(self, handler): + if not hasattr(handler, "add_parent"): + raise TypeError("expected BaseHandler instance, got %r" % + type(handler)) + + added = False + for meth in dir(handler): + if meth in ["redirect_request", "do_open", "proxy_open"]: + # oops, coincidental match + continue + + i = meth.find("_") + protocol = meth[:i] + condition = meth[i+1:] + + if condition.startswith("error"): + j = condition.find("_") + i + 1 + kind = meth[j+1:] + try: + kind = int(kind) + except ValueError: + pass + lookup = self.handle_error.get(protocol, {}) + self.handle_error[protocol] = lookup + elif condition == "open": + kind = protocol + lookup = self.handle_open + elif condition == "response": + kind = protocol + lookup = self.process_response + elif condition == "request": + kind = protocol + lookup = self.process_request + else: + continue + + handlers = lookup.setdefault(kind, []) + if handlers: + bisect.insort(handlers, handler) + else: + handlers.append(handler) + added = True + + if added: + bisect.insort(self.handlers, handler) + handler.add_parent(self) + + def close(self): + # Only exists for backwards compatibility. + pass + + def _call_chain(self, chain, kind, meth_name, *args): + # Handlers raise an exception if no one else should try to handle + # the request, or return None if they can't but another handler + # could. Otherwise, they return the response. + handlers = chain.get(kind, ()) + for handler in handlers: + func = getattr(handler, meth_name) + result = func(*args) + if result is not None: + return result + + def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): + # accept a URL or a Request object + if isinstance(fullurl, str): + req = Request(fullurl, data) + else: + req = fullurl + if data is not None: + req.data = data + + req.timeout = timeout + protocol = req.type + + # pre-process request + meth_name = protocol+"_request" + for processor in self.process_request.get(protocol, []): + meth = getattr(processor, meth_name) + req = meth(req) + + sys.audit('urllib.Request', req.full_url, req.data, req.headers, req.get_method()) + response = self._open(req, data) + + # post-process response + meth_name = protocol+"_response" + for processor in self.process_response.get(protocol, []): + meth = getattr(processor, meth_name) + response = meth(req, response) + + return response + + def _open(self, req, data=None): + result = self._call_chain(self.handle_open, 'default', + 'default_open', req) + if result: + return result + + protocol = req.type + result = self._call_chain(self.handle_open, protocol, protocol + + '_open', req) + if result: + return result + + return self._call_chain(self.handle_open, 'unknown', + 'unknown_open', req) + + def error(self, proto, *args): + if proto in ('http', 'https'): + # XXX http[s] protocols are special-cased + dict = self.handle_error['http'] # https is not different than http + proto = args[2] # YUCK! + meth_name = 'http_error_%s' % proto + http_err = 1 + orig_args = args + else: + dict = self.handle_error + meth_name = proto + '_error' + http_err = 0 + args = (dict, proto, meth_name) + args + result = self._call_chain(*args) + if result: + return result + + if http_err: + args = (dict, 'default', 'http_error_default') + orig_args + return self._call_chain(*args) + +# XXX probably also want an abstract factory that knows when it makes +# sense to skip a superclass in favor of a subclass and when it might +# make sense to include both + +def build_opener(*handlers): + """Create an opener object from a list of handlers. + + The opener will use several default handlers, including support + for HTTP, FTP and when applicable HTTPS. + + If any of the handlers passed as arguments are subclasses of the + default handlers, the default handlers will not be used. + """ + opener = OpenerDirector() + default_classes = [ProxyHandler, UnknownHandler, HTTPHandler, + HTTPDefaultErrorHandler, HTTPRedirectHandler, + FTPHandler, FileHandler, HTTPErrorProcessor, + DataHandler] + if hasattr(http.client, "HTTPSConnection"): + default_classes.append(HTTPSHandler) + skip = set() + for klass in default_classes: + for check in handlers: + if isinstance(check, type): + if issubclass(check, klass): + skip.add(klass) + elif isinstance(check, klass): + skip.add(klass) + for klass in skip: + default_classes.remove(klass) + + for klass in default_classes: + opener.add_handler(klass()) + + for h in handlers: + if isinstance(h, type): + h = h() + opener.add_handler(h) + return opener + +class BaseHandler: + handler_order = 500 + + def add_parent(self, parent): + self.parent = parent + + def close(self): + # Only exists for backwards compatibility + pass + + def __lt__(self, other): + if not hasattr(other, "handler_order"): + # Try to preserve the old behavior of having custom classes + # inserted after default ones (works only for custom user + # classes which are not aware of handler_order). + return True + return self.handler_order < other.handler_order + + +class HTTPErrorProcessor(BaseHandler): + """Process HTTP error responses.""" + handler_order = 1000 # after all other processing + + def http_response(self, request, response): + code, msg, hdrs = response.code, response.msg, response.info() + + # According to RFC 2616, "2xx" code indicates that the client's + # request was successfully received, understood, and accepted. + if not (200 <= code < 300): + response = self.parent.error( + 'http', request, response, code, msg, hdrs) + + return response + + https_response = http_response + +class HTTPDefaultErrorHandler(BaseHandler): + def http_error_default(self, req, fp, code, msg, hdrs): + raise HTTPError(req.full_url, code, msg, hdrs, fp) + +class HTTPRedirectHandler(BaseHandler): + # maximum number of redirections to any single URL + # this is needed because of the state that cookies introduce + max_repeats = 4 + # maximum total number of redirections (regardless of URL) before + # assuming we're in a loop + max_redirections = 10 + + def redirect_request(self, req, fp, code, msg, headers, newurl): + """Return a Request or None in response to a redirect. + + This is called by the http_error_30x methods when a + redirection response is received. If a redirection should + take place, return a new Request to allow http_error_30x to + perform the redirect. Otherwise, raise HTTPError if no-one + else should try to handle this url. Return None if you can't + but another Handler might. + """ + m = req.get_method() + if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD") + or code in (301, 302, 303) and m == "POST")): + raise HTTPError(req.full_url, code, msg, headers, fp) + + # Strictly (according to RFC 2616), 301 or 302 in response to + # a POST MUST NOT cause a redirection without confirmation + # from the user (of urllib.request, in this case). In practice, + # essentially all clients do redirect in this case, so we do + # the same. + + # Be conciliant with URIs containing a space. This is mainly + # redundant with the more complete encoding done in http_error_302(), + # but it is kept for compatibility with other callers. + newurl = newurl.replace(' ', '%20') + + CONTENT_HEADERS = ("content-length", "content-type") + newheaders = {k: v for k, v in req.headers.items() + if k.lower() not in CONTENT_HEADERS} + return Request(newurl, + method="HEAD" if m == "HEAD" else "GET", + headers=newheaders, + origin_req_host=req.origin_req_host, + unverifiable=True) + + # Implementation note: To avoid the server sending us into an + # infinite loop, the request object needs to track what URLs we + # have already seen. Do this by adding a handler-specific + # attribute to the Request object. + def http_error_302(self, req, fp, code, msg, headers): + # Some servers (incorrectly) return multiple Location headers + # (so probably same goes for URI). Use first header. + if "location" in headers: + newurl = headers["location"] + elif "uri" in headers: + newurl = headers["uri"] + else: + return + + # fix a possible malformed URL + urlparts = urlparse(newurl) + + # For security reasons we don't allow redirection to anything other + # than http, https or ftp. + + if urlparts.scheme not in ('http', 'https', 'ftp', ''): + raise HTTPError( + newurl, code, + "%s - Redirection to url '%s' is not allowed" % (msg, newurl), + headers, fp) + + if not urlparts.path and urlparts.netloc: + urlparts = list(urlparts) + urlparts[2] = "/" + newurl = urlunparse(urlparts) + + # http.client.parse_headers() decodes as ISO-8859-1. Recover the + # original bytes and percent-encode non-ASCII bytes, and any special + # characters such as the space. + newurl = quote( + newurl, encoding="iso-8859-1", safe=string.punctuation) + newurl = urljoin(req.full_url, newurl) + + # XXX Probably want to forget about the state of the current + # request, although that might interact poorly with other + # handlers that also use handler-specific request attributes + new = self.redirect_request(req, fp, code, msg, headers, newurl) + if new is None: + return + + # loop detection + # .redirect_dict has a key url if url was previously visited. + if hasattr(req, 'redirect_dict'): + visited = new.redirect_dict = req.redirect_dict + if (visited.get(newurl, 0) >= self.max_repeats or + len(visited) >= self.max_redirections): + raise HTTPError(req.full_url, code, + self.inf_msg + msg, headers, fp) + else: + visited = new.redirect_dict = req.redirect_dict = {} + visited[newurl] = visited.get(newurl, 0) + 1 + + # Don't close the fp until we are sure that we won't use it + # with HTTPError. + fp.read() + fp.close() + + return self.parent.open(new, timeout=req.timeout) + + http_error_301 = http_error_303 = http_error_307 = http_error_308 = http_error_302 + + inf_msg = "The HTTP server returned a redirect error that would " \ + "lead to an infinite loop.\n" \ + "The last 30x error message was:\n" + + +def _parse_proxy(proxy): + """Return (scheme, user, password, host/port) given a URL or an authority. + + If a URL is supplied, it must have an authority (host:port) component. + According to RFC 3986, having an authority component means the URL must + have two slashes after the scheme. + """ + scheme, r_scheme = _splittype(proxy) + if not r_scheme.startswith("/"): + # authority + scheme = None + authority = proxy + else: + # URL + if not r_scheme.startswith("//"): + raise ValueError("proxy URL with no authority: %r" % proxy) + # We have an authority, so for RFC 3986-compliant URLs (by ss 3. + # and 3.3.), path is empty or starts with '/' + if '@' in r_scheme: + host_separator = r_scheme.find('@') + end = r_scheme.find("/", host_separator) + else: + end = r_scheme.find("/", 2) + if end == -1: + end = None + authority = r_scheme[2:end] + userinfo, hostport = _splituser(authority) + if userinfo is not None: + user, password = _splitpasswd(userinfo) + else: + user = password = None + return scheme, user, password, hostport + +class ProxyHandler(BaseHandler): + # Proxies must be in front + handler_order = 100 + + def __init__(self, proxies=None): + if proxies is None: + proxies = getproxies() + assert hasattr(proxies, 'keys'), "proxies must be a mapping" + self.proxies = proxies + for type, url in proxies.items(): + type = type.lower() + setattr(self, '%s_open' % type, + lambda r, proxy=url, type=type, meth=self.proxy_open: + meth(r, proxy, type)) + + def proxy_open(self, req, proxy, type): + orig_type = req.type + proxy_type, user, password, hostport = _parse_proxy(proxy) + if proxy_type is None: + proxy_type = orig_type + + if req.host and proxy_bypass(req.host): + return None + + if user and password: + user_pass = '%s:%s' % (unquote(user), + unquote(password)) + creds = base64.b64encode(user_pass.encode()).decode("ascii") + req.add_header('Proxy-authorization', 'Basic ' + creds) + hostport = unquote(hostport) + req.set_proxy(hostport, proxy_type) + if orig_type == proxy_type or orig_type == 'https': + # let other handlers take care of it + return None + else: + # need to start over, because the other handlers don't + # grok the proxy's URL type + # e.g. if we have a constructor arg proxies like so: + # {'http': 'ftp://proxy.example.com'}, we may end up turning + # a request for http://acme.example.com/a into one for + # ftp://proxy.example.com/a + return self.parent.open(req, timeout=req.timeout) + +class HTTPPasswordMgr: + + def __init__(self): + self.passwd = {} + + def add_password(self, realm, uri, user, passwd): + # uri could be a single URI or a sequence + if isinstance(uri, str): + uri = [uri] + if realm not in self.passwd: + self.passwd[realm] = {} + for default_port in True, False: + reduced_uri = tuple( + self.reduce_uri(u, default_port) for u in uri) + self.passwd[realm][reduced_uri] = (user, passwd) + + def find_user_password(self, realm, authuri): + domains = self.passwd.get(realm, {}) + for default_port in True, False: + reduced_authuri = self.reduce_uri(authuri, default_port) + for uris, authinfo in domains.items(): + for uri in uris: + if self.is_suburi(uri, reduced_authuri): + return authinfo + return None, None + + def reduce_uri(self, uri, default_port=True): + """Accept authority or URI and extract only the authority and path.""" + # note HTTP URLs do not have a userinfo component + parts = urlsplit(uri) + if parts[1]: + # URI + scheme = parts[0] + authority = parts[1] + path = parts[2] or '/' + else: + # host or host:port + scheme = None + authority = uri + path = '/' + host, port = _splitport(authority) + if default_port and port is None and scheme is not None: + dport = {"http": 80, + "https": 443, + }.get(scheme) + if dport is not None: + authority = "%s:%d" % (host, dport) + return authority, path + + def is_suburi(self, base, test): + """Check if test is below base in a URI tree + + Both args must be URIs in reduced form. + """ + if base == test: + return True + if base[0] != test[0]: + return False + prefix = base[1] + if prefix[-1:] != '/': + prefix += '/' + return test[1].startswith(prefix) + + +class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr): + + def find_user_password(self, realm, authuri): + user, password = HTTPPasswordMgr.find_user_password(self, realm, + authuri) + if user is not None: + return user, password + return HTTPPasswordMgr.find_user_password(self, None, authuri) + + +class HTTPPasswordMgrWithPriorAuth(HTTPPasswordMgrWithDefaultRealm): + + def __init__(self): + self.authenticated = {} + super().__init__() + + def add_password(self, realm, uri, user, passwd, is_authenticated=False): + self.update_authenticated(uri, is_authenticated) + # Add a default for prior auth requests + if realm is not None: + super().add_password(None, uri, user, passwd) + super().add_password(realm, uri, user, passwd) + + def update_authenticated(self, uri, is_authenticated=False): + # uri could be a single URI or a sequence + if isinstance(uri, str): + uri = [uri] + + for default_port in True, False: + for u in uri: + reduced_uri = self.reduce_uri(u, default_port) + self.authenticated[reduced_uri] = is_authenticated + + def is_authenticated(self, authuri): + for default_port in True, False: + reduced_authuri = self.reduce_uri(authuri, default_port) + for uri in self.authenticated: + if self.is_suburi(uri, reduced_authuri): + return self.authenticated[uri] + + +class AbstractBasicAuthHandler: + + # XXX this allows for multiple auth-schemes, but will stupidly pick + # the last one with a realm specified. + + # allow for double- and single-quoted realm values + # (single quotes are a violation of the RFC, but appear in the wild) + rx = re.compile('(?:^|,)' # start of the string or ',' + '[ \t]*' # optional whitespaces + '([^ \t,]+)' # scheme like "Basic" + '[ \t]+' # mandatory whitespaces + # realm=xxx + # realm='xxx' + # realm="xxx" + 'realm=(["\']?)([^"\']*)\\2', + re.I) + + # XXX could pre-emptively send auth info already accepted (RFC 2617, + # end of section 2, and section 1.2 immediately after "credentials" + # production). + + def __init__(self, password_mgr=None): + if password_mgr is None: + password_mgr = HTTPPasswordMgr() + self.passwd = password_mgr + self.add_password = self.passwd.add_password + + def _parse_realm(self, header): + # parse WWW-Authenticate header: accept multiple challenges per header + found_challenge = False + for mo in AbstractBasicAuthHandler.rx.finditer(header): + scheme, quote, realm = mo.groups() + if quote not in ['"', "'"]: + import warnings + warnings.warn("Basic Auth Realm was unquoted", + UserWarning, 3) + + yield (scheme, realm) + + found_challenge = True + + if not found_challenge: + if header: + scheme = header.split()[0] + else: + scheme = '' + yield (scheme, None) + + def http_error_auth_reqed(self, authreq, host, req, headers): + # host may be an authority (without userinfo) or a URL with an + # authority + headers = headers.get_all(authreq) + if not headers: + # no header found + return + + unsupported = None + for header in headers: + for scheme, realm in self._parse_realm(header): + if scheme.lower() != 'basic': + unsupported = scheme + continue + + if realm is not None: + # Use the first matching Basic challenge. + # Ignore following challenges even if they use the Basic + # scheme. + return self.retry_http_basic_auth(host, req, realm) + + if unsupported is not None: + raise ValueError("AbstractBasicAuthHandler does not " + "support the following scheme: %r" + % (scheme,)) + + def retry_http_basic_auth(self, host, req, realm): + user, pw = self.passwd.find_user_password(realm, host) + if pw is not None: + raw = "%s:%s" % (user, pw) + auth = "Basic " + base64.b64encode(raw.encode()).decode("ascii") + if req.get_header(self.auth_header, None) == auth: + return None + req.add_unredirected_header(self.auth_header, auth) + return self.parent.open(req, timeout=req.timeout) + else: + return None + + def http_request(self, req): + if (not hasattr(self.passwd, 'is_authenticated') or + not self.passwd.is_authenticated(req.full_url)): + return req + + if not req.has_header('Authorization'): + user, passwd = self.passwd.find_user_password(None, req.full_url) + credentials = '{0}:{1}'.format(user, passwd).encode() + auth_str = base64.standard_b64encode(credentials).decode() + req.add_unredirected_header('Authorization', + 'Basic {}'.format(auth_str.strip())) + return req + + def http_response(self, req, response): + if hasattr(self.passwd, 'is_authenticated'): + if 200 <= response.code < 300: + self.passwd.update_authenticated(req.full_url, True) + else: + self.passwd.update_authenticated(req.full_url, False) + return response + + https_request = http_request + https_response = http_response + + + +class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): + + auth_header = 'Authorization' + + def http_error_401(self, req, fp, code, msg, headers): + url = req.full_url + response = self.http_error_auth_reqed('www-authenticate', + url, req, headers) + return response + + +class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): + + auth_header = 'Proxy-authorization' + + def http_error_407(self, req, fp, code, msg, headers): + # http_error_auth_reqed requires that there is no userinfo component in + # authority. Assume there isn't one, since urllib.request does not (and + # should not, RFC 3986 s. 3.2.1) support requests for URLs containing + # userinfo. + authority = req.host + response = self.http_error_auth_reqed('proxy-authenticate', + authority, req, headers) + return response + + +# Return n random bytes. +_randombytes = os.urandom + + +class AbstractDigestAuthHandler: + # Digest authentication is specified in RFC 2617/7616. + + # XXX The client does not inspect the Authentication-Info header + # in a successful response. + + # XXX It should be possible to test this implementation against + # a mock server that just generates a static set of challenges. + + # XXX qop="auth-int" supports is shaky + + def __init__(self, passwd=None): + if passwd is None: + passwd = HTTPPasswordMgr() + self.passwd = passwd + self.add_password = self.passwd.add_password + self.retried = 0 + self.nonce_count = 0 + self.last_nonce = None + + def reset_retry_count(self): + self.retried = 0 + + def http_error_auth_reqed(self, auth_header, host, req, headers): + authreq = headers.get(auth_header, None) + if self.retried > 5: + # Don't fail endlessly - if we failed once, we'll probably + # fail a second time. Hm. Unless the Password Manager is + # prompting for the information. Crap. This isn't great + # but it's better than the current 'repeat until recursion + # depth exceeded' approach + raise HTTPError(req.full_url, 401, "digest auth failed", + headers, None) + else: + self.retried += 1 + if authreq: + scheme = authreq.split()[0] + if scheme.lower() == 'digest': + return self.retry_http_digest_auth(req, authreq) + elif scheme.lower() != 'basic': + raise ValueError("AbstractDigestAuthHandler does not support" + " the following scheme: '%s'" % scheme) + + def retry_http_digest_auth(self, req, auth): + token, challenge = auth.split(' ', 1) + chal = parse_keqv_list(filter(None, parse_http_list(challenge))) + auth = self.get_authorization(req, chal) + if auth: + auth_val = 'Digest %s' % auth + if req.headers.get(self.auth_header, None) == auth_val: + return None + req.add_unredirected_header(self.auth_header, auth_val) + resp = self.parent.open(req, timeout=req.timeout) + return resp + + def get_cnonce(self, nonce): + # The cnonce-value is an opaque + # quoted string value provided by the client and used by both client + # and server to avoid chosen plaintext attacks, to provide mutual + # authentication, and to provide some message integrity protection. + # This isn't a fabulous effort, but it's probably Good Enough. + s = "%s:%s:%s:" % (self.nonce_count, nonce, time.ctime()) + b = s.encode("ascii") + _randombytes(8) + dig = hashlib.sha1(b).hexdigest() + return dig[:16] + + def get_authorization(self, req, chal): + try: + realm = chal['realm'] + nonce = chal['nonce'] + qop = chal.get('qop') + algorithm = chal.get('algorithm', 'MD5') + # mod_digest doesn't send an opaque, even though it isn't + # supposed to be optional + opaque = chal.get('opaque', None) + except KeyError: + return None + + H, KD = self.get_algorithm_impls(algorithm) + if H is None: + return None + + user, pw = self.passwd.find_user_password(realm, req.full_url) + if user is None: + return None + + # XXX not implemented yet + if req.data is not None: + entdig = self.get_entity_digest(req.data, chal) + else: + entdig = None + + A1 = "%s:%s:%s" % (user, realm, pw) + A2 = "%s:%s" % (req.get_method(), + # XXX selector: what about proxies and full urls + req.selector) + # NOTE: As per RFC 2617, when server sends "auth,auth-int", the client could use either `auth` + # or `auth-int` to the response back. we use `auth` to send the response back. + if qop is None: + respdig = KD(H(A1), "%s:%s" % (nonce, H(A2))) + elif 'auth' in qop.split(','): + if nonce == self.last_nonce: + self.nonce_count += 1 + else: + self.nonce_count = 1 + self.last_nonce = nonce + ncvalue = '%08x' % self.nonce_count + cnonce = self.get_cnonce(nonce) + noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, 'auth', H(A2)) + respdig = KD(H(A1), noncebit) + else: + # XXX handle auth-int. + raise URLError("qop '%s' is not supported." % qop) + + # XXX should the partial digests be encoded too? + + base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ + 'response="%s"' % (user, realm, nonce, req.selector, + respdig) + if opaque: + base += ', opaque="%s"' % opaque + if entdig: + base += ', digest="%s"' % entdig + base += ', algorithm="%s"' % algorithm + if qop: + base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce) + return base + + def get_algorithm_impls(self, algorithm): + # algorithm names taken from RFC 7616 Section 6.1 + # lambdas assume digest modules are imported at the top level + if algorithm == 'MD5': + H = lambda x: hashlib.md5(x.encode("ascii")).hexdigest() + elif algorithm == 'SHA': # non-standard, retained for compatibility. + H = lambda x: hashlib.sha1(x.encode("ascii")).hexdigest() + elif algorithm == 'SHA-256': + H = lambda x: hashlib.sha256(x.encode("ascii")).hexdigest() + # XXX MD5-sess + else: + raise ValueError("Unsupported digest authentication " + "algorithm %r" % algorithm) + KD = lambda s, d: H("%s:%s" % (s, d)) + return H, KD + + def get_entity_digest(self, data, chal): + # XXX not implemented yet + return None + + +class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): + """An authentication protocol defined by RFC 2069 + + Digest authentication improves on basic authentication because it + does not transmit passwords in the clear. + """ + + auth_header = 'Authorization' + handler_order = 490 # before Basic auth + + def http_error_401(self, req, fp, code, msg, headers): + host = urlparse(req.full_url)[1] + retry = self.http_error_auth_reqed('www-authenticate', + host, req, headers) + self.reset_retry_count() + return retry + + +class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): + + auth_header = 'Proxy-Authorization' + handler_order = 490 # before Basic auth + + def http_error_407(self, req, fp, code, msg, headers): + host = req.host + retry = self.http_error_auth_reqed('proxy-authenticate', + host, req, headers) + self.reset_retry_count() + return retry + +class AbstractHTTPHandler(BaseHandler): + + def __init__(self, debuglevel=None): + self._debuglevel = debuglevel if debuglevel is not None else http.client.HTTPConnection.debuglevel + + def set_http_debuglevel(self, level): + self._debuglevel = level + + def _get_content_length(self, request): + return http.client.HTTPConnection._get_content_length( + request.data, + request.get_method()) + + def do_request_(self, request): + host = request.host + if not host: + raise URLError('no host given') + + if request.data is not None: # POST + data = request.data + if isinstance(data, str): + msg = "POST data should be bytes, an iterable of bytes, " \ + "or a file object. It cannot be of type str." + raise TypeError(msg) + if not request.has_header('Content-type'): + request.add_unredirected_header( + 'Content-type', + 'application/x-www-form-urlencoded') + if (not request.has_header('Content-length') + and not request.has_header('Transfer-encoding')): + content_length = self._get_content_length(request) + if content_length is not None: + request.add_unredirected_header( + 'Content-length', str(content_length)) + else: + request.add_unredirected_header( + 'Transfer-encoding', 'chunked') + + sel_host = host + if request.has_proxy(): + scheme, sel = _splittype(request.selector) + sel_host, sel_path = _splithost(sel) + if not request.has_header('Host'): + request.add_unredirected_header('Host', sel_host) + for name, value in self.parent.addheaders: + name = name.capitalize() + if not request.has_header(name): + request.add_unredirected_header(name, value) + + return request + + def do_open(self, http_class, req, **http_conn_args): + """Return an HTTPResponse object for the request, using http_class. + + http_class must implement the HTTPConnection API from http.client. + """ + host = req.host + if not host: + raise URLError('no host given') + + # will parse host:port + h = http_class(host, timeout=req.timeout, **http_conn_args) + h.set_debuglevel(self._debuglevel) + + headers = dict(req.unredirected_hdrs) + headers.update({k: v for k, v in req.headers.items() + if k not in headers}) + + # TODO(jhylton): Should this be redesigned to handle + # persistent connections? + + # We want to make an HTTP/1.1 request, but the addinfourl + # class isn't prepared to deal with a persistent connection. + # It will try to read all remaining data from the socket, + # which will block while the server waits for the next request. + # So make sure the connection gets closed after the (only) + # request. + headers["Connection"] = "close" + headers = {name.title(): val for name, val in headers.items()} + + if req._tunnel_host: + tunnel_headers = {} + proxy_auth_hdr = "Proxy-Authorization" + if proxy_auth_hdr in headers: + tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr] + # Proxy-Authorization should not be sent to origin + # server. + del headers[proxy_auth_hdr] + h.set_tunnel(req._tunnel_host, headers=tunnel_headers) + + try: + try: + h.request(req.get_method(), req.selector, req.data, headers, + encode_chunked=req.has_header('Transfer-encoding')) + except OSError as err: # timeout error + raise URLError(err) + r = h.getresponse() + except: + h.close() + raise + + # If the server does not send us a 'Connection: close' header, + # HTTPConnection assumes the socket should be left open. Manually + # mark the socket to be closed when this response object goes away. + if h.sock: + h.sock.close() + h.sock = None + + r.url = req.get_full_url() + # This line replaces the .msg attribute of the HTTPResponse + # with .headers, because urllib clients expect the response to + # have the reason in .msg. It would be good to mark this + # attribute is deprecated and get then to use info() or + # .headers. + r.msg = r.reason + return r + + +class HTTPHandler(AbstractHTTPHandler): + + def http_open(self, req): + return self.do_open(http.client.HTTPConnection, req) + + http_request = AbstractHTTPHandler.do_request_ + +if hasattr(http.client, 'HTTPSConnection'): + + class HTTPSHandler(AbstractHTTPHandler): + + def __init__(self, debuglevel=None, context=None, check_hostname=None): + debuglevel = debuglevel if debuglevel is not None else http.client.HTTPSConnection.debuglevel + AbstractHTTPHandler.__init__(self, debuglevel) + if context is None: + http_version = http.client.HTTPSConnection._http_vsn + context = http.client._create_https_context(http_version) + if check_hostname is not None: + context.check_hostname = check_hostname + self._context = context + + def https_open(self, req): + return self.do_open(http.client.HTTPSConnection, req, + context=self._context) + + https_request = AbstractHTTPHandler.do_request_ + + __all__.append('HTTPSHandler') + +class HTTPCookieProcessor(BaseHandler): + def __init__(self, cookiejar=None): + import http.cookiejar + if cookiejar is None: + cookiejar = http.cookiejar.CookieJar() + self.cookiejar = cookiejar + + def http_request(self, request): + self.cookiejar.add_cookie_header(request) + return request + + def http_response(self, request, response): + self.cookiejar.extract_cookies(response, request) + return response + + https_request = http_request + https_response = http_response + +class UnknownHandler(BaseHandler): + def unknown_open(self, req): + type = req.type + raise URLError('unknown url type: %s' % type) + +def parse_keqv_list(l): + """Parse list of key=value strings where keys are not duplicated.""" + parsed = {} + for elt in l: + k, v = elt.split('=', 1) + if v[0] == '"' and v[-1] == '"': + v = v[1:-1] + parsed[k] = v + return parsed + +def parse_http_list(s): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Neither commas nor quotes count if they are escaped. + Only double-quotes count, not single-quotes. + """ + res = [] + part = '' + + escape = quote = False + for cur in s: + if escape: + part += cur + escape = False + continue + if quote: + if cur == '\\': + escape = True + continue + elif cur == '"': + quote = False + part += cur + continue + + if cur == ',': + res.append(part) + part = '' + continue + + if cur == '"': + quote = True + + part += cur + + # append last part + if part: + res.append(part) + + return [part.strip() for part in res] + +class FileHandler(BaseHandler): + # names for the localhost + names = None + def get_names(self): + if FileHandler.names is None: + try: + FileHandler.names = tuple( + socket.gethostbyname_ex('localhost')[2] + + socket.gethostbyname_ex(socket.gethostname())[2]) + except socket.gaierror: + FileHandler.names = (socket.gethostbyname('localhost'),) + return FileHandler.names + + # not entirely sure what the rules are here + def open_local_file(self, req): + import email.utils + import mimetypes + localfile = url2pathname(req.full_url, require_scheme=True, resolve_host=True) + try: + stats = os.stat(localfile) + size = stats.st_size + modified = email.utils.formatdate(stats.st_mtime, usegmt=True) + mtype = mimetypes.guess_file_type(localfile)[0] + headers = email.message_from_string( + 'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' % + (mtype or 'text/plain', size, modified)) + origurl = pathname2url(localfile, add_scheme=True) + return addinfourl(open(localfile, 'rb'), headers, origurl) + except OSError as exp: + raise URLError(exp, exp.filename) + + file_open = open_local_file + +def _is_local_authority(authority, resolve): + # Compare hostnames + if not authority or authority == 'localhost': + return True + try: + hostname = socket.gethostname() + except (socket.gaierror, AttributeError): + pass + else: + if authority == hostname: + return True + # Compare IP addresses + if not resolve: + return False + try: + address = socket.gethostbyname(authority) + except (socket.gaierror, AttributeError, UnicodeEncodeError): + return False + return address in FileHandler().get_names() + +class FTPHandler(BaseHandler): + def ftp_open(self, req): + import ftplib + import mimetypes + host = req.host + if not host: + raise URLError('ftp error: no host given') + host, port = _splitport(host) + if port is None: + port = ftplib.FTP_PORT + else: + port = int(port) + + # username/password handling + user, host = _splituser(host) + if user: + user, passwd = _splitpasswd(user) + else: + passwd = None + host = unquote(host) + user = user or '' + passwd = passwd or '' + + try: + host = socket.gethostbyname(host) + except OSError as msg: + raise URLError(msg) + path, attrs = _splitattr(req.selector) + dirs = path.split('/') + dirs = list(map(unquote, dirs)) + dirs, file = dirs[:-1], dirs[-1] + if dirs and not dirs[0]: + dirs = dirs[1:] + fw = None + try: + fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout) + type = file and 'I' or 'D' + for attr in attrs: + attr, value = _splitvalue(attr) + if attr.lower() == 'type' and \ + value in ('a', 'A', 'i', 'I', 'd', 'D'): + type = value.upper() + fp, retrlen = fw.retrfile(file, type) + headers = "" + mtype = mimetypes.guess_type(req.full_url)[0] + if mtype: + headers += "Content-type: %s\n" % mtype + if retrlen is not None and retrlen >= 0: + headers += "Content-length: %d\n" % retrlen + headers = email.message_from_string(headers) + return addinfourl(fp, headers, req.full_url) + except Exception as exp: + if fw is not None and not fw.keepalive: + fw.close() + if isinstance(exp, ftplib.all_errors): + raise URLError(f"ftp error: {exp}") from exp + raise + + def connect_ftp(self, user, passwd, host, port, dirs, timeout): + return ftpwrapper(user, passwd, host, port, dirs, timeout, + persistent=False) + +class CacheFTPHandler(FTPHandler): + # XXX would be nice to have pluggable cache strategies + # XXX this stuff is definitely not thread safe + def __init__(self): + self.cache = {} + self.timeout = {} + self.soonest = 0 + self.delay = 60 + self.max_conns = 16 + + def setTimeout(self, t): + self.delay = t + + def setMaxConns(self, m): + self.max_conns = m + + def connect_ftp(self, user, passwd, host, port, dirs, timeout): + key = user, host, port, '/'.join(dirs), timeout + conn = self.cache.get(key) + if conn is None or not conn.keepalive: + if conn is not None: + conn.close() + conn = self.cache[key] = ftpwrapper(user, passwd, host, port, + dirs, timeout) + self.timeout[key] = time.time() + self.delay + self.check_cache() + return conn + + def check_cache(self): + # first check for old ones + t = time.time() + if self.soonest <= t: + for k, v in list(self.timeout.items()): + if v < t: + self.cache[k].close() + del self.cache[k] + del self.timeout[k] + self.soonest = min(list(self.timeout.values())) + + # then check the size + if len(self.cache) == self.max_conns: + for k, v in list(self.timeout.items()): + if v == self.soonest: + del self.cache[k] + del self.timeout[k] + break + self.soonest = min(list(self.timeout.values())) + + def clear_cache(self): + for conn in self.cache.values(): + conn.close() + self.cache.clear() + self.timeout.clear() + +class DataHandler(BaseHandler): + def data_open(self, req): + # data URLs as specified in RFC 2397. + # + # ignores POSTed data + # + # syntax: + # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data + # mediatype := [ type "/" subtype ] *( ";" parameter ) + # data := *urlchar + # parameter := attribute "=" value + url = req.full_url + + scheme, data = url.split(":",1) + mediatype, data = data.split(",",1) + + # Disallow control characters within mediatype. + if re.search(r"[\x00-\x1F\x7F]", mediatype): + raise ValueError( + "Control characters not allowed in data: mediatype") + + # even base64 encoded data URLs might be quoted so unquote in any case: + data = unquote_to_bytes(data) + if mediatype.endswith(";base64"): + data = base64.decodebytes(data) + mediatype = mediatype[:-7] + + if not mediatype: + mediatype = "text/plain;charset=US-ASCII" + + headers = email.message_from_string("Content-type: %s\nContent-length: %d\n" % + (mediatype, len(data))) + + return addinfourl(io.BytesIO(data), headers, url) + + +# Code moved from the old urllib module + +def url2pathname(url, *, require_scheme=False, resolve_host=False): + """Convert the given file URL to a local file system path. + + The 'file:' scheme prefix must be omitted unless *require_scheme* + is set to true. + + The URL authority may be resolved with gethostbyname() if + *resolve_host* is set to true. + """ + if not require_scheme: + url = 'file:' + url + scheme, authority, url = urlsplit(url)[:3] # Discard query and fragment. + if scheme != 'file': + raise URLError("URL is missing a 'file:' scheme") + if os.name == 'nt': + if authority[1:2] == ':': + # e.g. file://c:/file.txt + url = authority + url + elif not _is_local_authority(authority, resolve_host): + # e.g. file://server/share/file.txt + url = '//' + authority + url + elif url[:3] == '///': + # e.g. file://///server/share/file.txt + url = url[1:] + else: + if url[:1] == '/' and url[2:3] in (':', '|'): + # Skip past extra slash before DOS drive in URL path. + url = url[1:] + if url[1:2] == '|': + # Older URLs use a pipe after a drive letter + url = url[:1] + ':' + url[2:] + url = url.replace('/', '\\') + elif not _is_local_authority(authority, resolve_host): + raise URLError("file:// scheme is supported only on localhost") + encoding = sys.getfilesystemencoding() + errors = sys.getfilesystemencodeerrors() + return unquote(url, encoding=encoding, errors=errors) + + +def pathname2url(pathname, *, add_scheme=False): + """Convert the given local file system path to a file URL. + + The 'file:' scheme prefix is omitted unless *add_scheme* + is set to true. + """ + if os.name == 'nt': + pathname = pathname.replace('\\', '/') + encoding = sys.getfilesystemencoding() + errors = sys.getfilesystemencodeerrors() + scheme = 'file:' if add_scheme else '' + drive, root, tail = os.path.splitroot(pathname) + if drive: + # First, clean up some special forms. We are going to sacrifice the + # additional information anyway + if drive[:4] == '//?/': + drive = drive[4:] + if drive[:4].upper() == 'UNC/': + drive = '//' + drive[4:] + if drive[1:] == ':': + # DOS drive specified. Add three slashes to the start, producing + # an authority section with a zero-length authority, and a path + # section starting with a single slash. + drive = '///' + drive + drive = quote(drive, encoding=encoding, errors=errors, safe='/:') + elif root: + # Add explicitly empty authority to absolute path. If the path + # starts with exactly one slash then this change is mostly + # cosmetic, but if it begins with two or more slashes then this + # avoids interpreting the path as a URL authority. + root = '//' + root + tail = quote(tail, encoding=encoding, errors=errors) + return scheme + drive + root + tail + + +# Utility functions + +_localhost = None +def localhost(): + """Return the IP address of the magic hostname 'localhost'.""" + global _localhost + if _localhost is None: + _localhost = socket.gethostbyname('localhost') + return _localhost + +_thishost = None +def thishost(): + """Return the IP addresses of the current host.""" + global _thishost + if _thishost is None: + try: + _thishost = tuple(socket.gethostbyname_ex(socket.gethostname())[2]) + except socket.gaierror: + _thishost = tuple(socket.gethostbyname_ex('localhost')[2]) + return _thishost + +_ftperrors = None +def ftperrors(): + """Return the set of errors raised by the FTP class.""" + global _ftperrors + if _ftperrors is None: + import ftplib + _ftperrors = ftplib.all_errors + return _ftperrors + +_noheaders = None +def noheaders(): + """Return an empty email Message object.""" + global _noheaders + if _noheaders is None: + _noheaders = email.message_from_string("") + return _noheaders + + +# Utility classes + +class ftpwrapper: + """Class used by open_ftp() for cache of open FTP connections.""" + + def __init__(self, user, passwd, host, port, dirs, timeout=None, + persistent=True): + self.user = user + self.passwd = passwd + self.host = host + self.port = port + self.dirs = dirs + self.timeout = timeout + self.refcount = 0 + self.keepalive = persistent + try: + self.init() + except: + self.close() + raise + + def init(self): + import ftplib + self.busy = 0 + self.ftp = ftplib.FTP() + self.ftp.connect(self.host, self.port, self.timeout) + self.ftp.login(self.user, self.passwd) + _target = '/'.join(self.dirs) + self.ftp.cwd(_target) + + def retrfile(self, file, type): + import ftplib + self.endtransfer() + if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1 + else: cmd = 'TYPE ' + type; isdir = 0 + try: + self.ftp.voidcmd(cmd) + except ftplib.all_errors: + self.init() + self.ftp.voidcmd(cmd) + conn = None + if file and not isdir: + # Try to retrieve as a file + try: + cmd = 'RETR ' + file + conn, retrlen = self.ftp.ntransfercmd(cmd) + except ftplib.error_perm as reason: + if str(reason)[:3] != '550': + raise URLError(f'ftp error: {reason}') from reason + if not conn: + # Set transfer mode to ASCII! + self.ftp.voidcmd('TYPE A') + # Try a directory listing. Verify that directory exists. + if file: + pwd = self.ftp.pwd() + try: + try: + self.ftp.cwd(file) + except ftplib.error_perm as reason: + raise URLError('ftp error: %r' % reason) from reason + finally: + self.ftp.cwd(pwd) + cmd = 'LIST ' + file + else: + cmd = 'LIST' + conn, retrlen = self.ftp.ntransfercmd(cmd) + self.busy = 1 + + ftpobj = addclosehook(conn.makefile('rb'), self.file_close) + self.refcount += 1 + conn.close() + # Pass back both a suitably decorated object and a retrieval length + return (ftpobj, retrlen) + + def endtransfer(self): + if not self.busy: + return + self.busy = 0 + try: + self.ftp.voidresp() + except ftperrors(): + pass + + def close(self): + self.keepalive = False + if self.refcount <= 0: + self.real_close() + + def file_close(self): + self.endtransfer() + self.refcount -= 1 + if self.refcount <= 0 and not self.keepalive: + self.real_close() + + def real_close(self): + self.endtransfer() + try: + self.ftp.close() + except ftperrors(): + pass + +# Proxy handling +def getproxies_environment(): + """Return a dictionary of scheme -> proxy server URL mappings. + + Scan the environment for variables named _proxy; + this seems to be the standard convention. + """ + # in order to prefer lowercase variables, process environment in + # two passes: first matches any, second pass matches lowercase only + + # select only environment variables which end in (after making lowercase) _proxy + proxies = {} + environment = [] + for name in os.environ: + # fast screen underscore position before more expensive case-folding + if len(name) > 5 and name[-6] == "_" and name[-5:].lower() == "proxy": + value = os.environ[name] + proxy_name = name[:-6].lower() + environment.append((name, value, proxy_name)) + if value: + proxies[proxy_name] = value + # CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY + # (non-all-lowercase) as it may be set from the web server by a "Proxy:" + # header from the client + # If "proxy" is lowercase, it will still be used thanks to the next block + if 'REQUEST_METHOD' in os.environ: + proxies.pop('http', None) + for name, value, proxy_name in environment: + # not case-folded, checking here for lower-case env vars only + if name[-6:] == '_proxy': + if value: + proxies[proxy_name] = value + else: + proxies.pop(proxy_name, None) + return proxies + +def proxy_bypass_environment(host, proxies=None): + """Test if proxies should not be used for a particular host. + + Checks the proxy dict for the value of no_proxy, which should + be a list of comma separated DNS suffixes, or '*' for all hosts. + + """ + if proxies is None: + proxies = getproxies_environment() + # don't bypass, if no_proxy isn't specified + try: + no_proxy = proxies['no'] + except KeyError: + return False + # '*' is special case for always bypass + if no_proxy == '*': + return True + host = host.lower() + # strip port off host + hostonly, port = _splitport(host) + # check if the host ends with any of the DNS suffixes + for name in no_proxy.split(','): + name = name.strip() + if name: + name = name.lstrip('.') # ignore leading dots + name = name.lower() + if hostonly == name or host == name: + return True + name = '.' + name + if hostonly.endswith(name) or host.endswith(name): + return True + # otherwise, don't bypass + return False + + +# This code tests an OSX specific data structure but is testable on all +# platforms +def _proxy_bypass_macosx_sysconf(host, proxy_settings): + """ + Return True iff this host shouldn't be accessed using a proxy + + This function uses the MacOSX framework SystemConfiguration + to fetch the proxy information. + + proxy_settings come from _scproxy._get_proxy_settings or get mocked ie: + { 'exclude_simple': bool, + 'exceptions': ['foo.bar', '*.bar.com', '127.0.0.1', '10.1', '10.0/16'] + } + """ + from fnmatch import fnmatch + from ipaddress import AddressValueError, IPv4Address + + hostonly, port = _splitport(host) + + def ip2num(ipAddr): + parts = ipAddr.split('.') + parts = list(map(int, parts)) + if len(parts) != 4: + parts = (parts + [0, 0, 0, 0])[:4] + return (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3] + + # Check for simple host names: + if '.' not in host: + if proxy_settings['exclude_simple']: + return True + + hostIP = None + try: + hostIP = int(IPv4Address(hostonly)) + except AddressValueError: + pass + + for value in proxy_settings.get('exceptions', ()): + # Items in the list are strings like these: *.local, 169.254/16 + if not value: continue + + m = re.match(r"(\d+(?:\.\d+)*)(/\d+)?", value) + if m is not None and hostIP is not None: + base = ip2num(m.group(1)) + mask = m.group(2) + if mask is None: + mask = 8 * (m.group(1).count('.') + 1) + else: + mask = int(mask[1:]) + + if mask < 0 or mask > 32: + # System libraries ignore invalid prefix lengths + continue + + mask = 32 - mask + + if (hostIP >> mask) == (base >> mask): + return True + + elif fnmatch(host, value): + return True + + return False + + +# Same as _proxy_bypass_macosx_sysconf, testable on all platforms +def _proxy_bypass_winreg_override(host, override): + """Return True if the host should bypass the proxy server. + + The proxy override list is obtained from the Windows + Internet settings proxy override registry value. + + An example of a proxy override value is: + "www.example.com;*.example.net; 192.168.0.1" + """ + from fnmatch import fnmatch + + host, _ = _splitport(host) + proxy_override = override.split(';') + for test in proxy_override: + test = test.strip() + # "" should bypass the proxy server for all intranet addresses + if test == '': + if '.' not in host: + return True + elif fnmatch(host, test): + return True + return False + + +if sys.platform == 'darwin': + from _scproxy import _get_proxy_settings, _get_proxies + + def proxy_bypass_macosx_sysconf(host): + proxy_settings = _get_proxy_settings() + return _proxy_bypass_macosx_sysconf(host, proxy_settings) + + def getproxies_macosx_sysconf(): + """Return a dictionary of scheme -> proxy server URL mappings. + + This function uses the MacOSX framework SystemConfiguration + to fetch the proxy information. + """ + return _get_proxies() + + + + def proxy_bypass(host): + """Return True, if host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or from the MacOSX framework SystemConfiguration. + + """ + proxies = getproxies_environment() + if proxies: + return proxy_bypass_environment(host, proxies) + else: + return proxy_bypass_macosx_sysconf(host) + + def getproxies(): + return getproxies_environment() or getproxies_macosx_sysconf() + + +elif os.name == 'nt': + def getproxies_registry(): + """Return a dictionary of scheme -> proxy server URL mappings. + + Win32 uses the registry to store proxies. + + """ + proxies = {} + try: + import winreg + except ImportError: + # Std module, so should be around - but you never know! + return proxies + try: + internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, + r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') + proxyEnable = winreg.QueryValueEx(internetSettings, + 'ProxyEnable')[0] + if proxyEnable: + # Returned as Unicode but problems if not converted to ASCII + proxyServer = str(winreg.QueryValueEx(internetSettings, + 'ProxyServer')[0]) + if '=' not in proxyServer and ';' not in proxyServer: + # Use one setting for all protocols. + proxyServer = 'http={0};https={0};ftp={0}'.format(proxyServer) + for p in proxyServer.split(';'): + protocol, address = p.split('=', 1) + # See if address has a type:// prefix + if not re.match('(?:[^/:]+)://', address): + # Add type:// prefix to address without specifying type + if protocol in ('http', 'https', 'ftp'): + # The default proxy type of Windows is HTTP + address = 'http://' + address + elif protocol == 'socks': + address = 'socks://' + address + proxies[protocol] = address + # Use SOCKS proxy for HTTP(S) protocols + if proxies.get('socks'): + # The default SOCKS proxy type of Windows is SOCKS4 + address = re.sub(r'^socks://', 'socks4://', proxies['socks']) + proxies['http'] = proxies.get('http') or address + proxies['https'] = proxies.get('https') or address + internetSettings.Close() + except (OSError, ValueError, TypeError): + # Either registry key not found etc, or the value in an + # unexpected format. + # proxies already set up to be empty so nothing to do + pass + return proxies + + def getproxies(): + """Return a dictionary of scheme -> proxy server URL mappings. + + Returns settings gathered from the environment, if specified, + or the registry. + + """ + return getproxies_environment() or getproxies_registry() + + def proxy_bypass_registry(host): + try: + import winreg + except ImportError: + # Std modules, so should be around - but you never know! + return False + try: + internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, + r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') + proxyEnable = winreg.QueryValueEx(internetSettings, + 'ProxyEnable')[0] + proxyOverride = str(winreg.QueryValueEx(internetSettings, + 'ProxyOverride')[0]) + # ^^^^ Returned as Unicode but problems if not converted to ASCII + except OSError: + return False + if not proxyEnable or not proxyOverride: + return False + return _proxy_bypass_winreg_override(host, proxyOverride) + + def proxy_bypass(host): + """Return True, if host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or the registry. + + """ + proxies = getproxies_environment() + if proxies: + return proxy_bypass_environment(host, proxies) + else: + return proxy_bypass_registry(host) + +else: + # By default use environment variables + getproxies = getproxies_environment + proxy_bypass = proxy_bypass_environment diff --git a/Python313_13_x86_Template/Lib/urllib/response.py b/Python314_4_x86_Template/Lib/urllib/response.py similarity index 100% rename from Python313_13_x86_Template/Lib/urllib/response.py rename to Python314_4_x86_Template/Lib/urllib/response.py diff --git a/Python314_4_x86_Template/Lib/urllib/robotparser.py b/Python314_4_x86_Template/Lib/urllib/robotparser.py new file mode 100644 index 00000000..4009fd6b --- /dev/null +++ b/Python314_4_x86_Template/Lib/urllib/robotparser.py @@ -0,0 +1,288 @@ +""" robotparser.py + + Copyright (C) 2000 Bastian Kleineidam + + You can choose between two licenses when using this package: + 1) GNU GPLv2 + 2) PSF license for Python 2.2 + + The robots.txt Exclusion Protocol is implemented as specified in + http://www.robotstxt.org/norobots-rfc.txt +""" + +import collections +import re +import urllib.error +import urllib.parse +import urllib.request + +__all__ = ["RobotFileParser"] + +RequestRate = collections.namedtuple("RequestRate", "requests seconds") + + +def normalize(path): + unquoted = urllib.parse.unquote(path, errors='surrogateescape') + return urllib.parse.quote(unquoted, errors='surrogateescape') + +def normalize_path(path): + path, sep, query = path.partition('?') + path = normalize(path) + if sep: + query = re.sub(r'[^=&]+', lambda m: normalize(m[0]), query) + path += '?' + query + return path + + +class RobotFileParser: + """ This class provides a set of methods to read, parse and answer + questions about a single robots.txt file. + + """ + + def __init__(self, url=''): + self.entries = [] + self.sitemaps = [] + self.default_entry = None + self.disallow_all = False + self.allow_all = False + self.set_url(url) + self.last_checked = 0 + + def mtime(self): + """Returns the time the robots.txt file was last fetched. + + This is useful for long-running web spiders that need to + check for new robots.txt files periodically. + + """ + return self.last_checked + + def modified(self): + """Sets the time the robots.txt file was last fetched to the + current time. + + """ + import time + self.last_checked = time.time() + + def set_url(self, url): + """Sets the URL referring to a robots.txt file.""" + self.url = url + self.host, self.path = urllib.parse.urlsplit(url)[1:3] + + def read(self): + """Reads the robots.txt URL and feeds it to the parser.""" + try: + f = urllib.request.urlopen(self.url) + except urllib.error.HTTPError as err: + if err.code in (401, 403): + self.disallow_all = True + elif err.code >= 400 and err.code < 500: + self.allow_all = True + err.close() + else: + raw = f.read() + self.parse(raw.decode("utf-8", "surrogateescape").splitlines()) + + def _add_entry(self, entry): + if "*" in entry.useragents: + # the default entry is considered last + if self.default_entry is None: + # the first default entry wins + self.default_entry = entry + else: + self.entries.append(entry) + + def parse(self, lines): + """Parse the input lines from a robots.txt file. + + We allow that a user-agent: line is not preceded by + one or more blank lines. + """ + # states: + # 0: start state + # 1: saw user-agent line + # 2: saw an allow or disallow line + state = 0 + entry = Entry() + + self.modified() + for line in lines: + if not line: + if state == 1: + entry = Entry() + state = 0 + elif state == 2: + self._add_entry(entry) + entry = Entry() + state = 0 + # remove optional comment and strip line + i = line.find('#') + if i >= 0: + line = line[:i] + line = line.strip() + if not line: + continue + line = line.split(':', 1) + if len(line) == 2: + line[0] = line[0].strip().lower() + line[1] = line[1].strip() + if line[0] == "user-agent": + if state == 2: + self._add_entry(entry) + entry = Entry() + entry.useragents.append(line[1]) + state = 1 + elif line[0] == "disallow": + if state != 0: + entry.rulelines.append(RuleLine(line[1], False)) + state = 2 + elif line[0] == "allow": + if state != 0: + entry.rulelines.append(RuleLine(line[1], True)) + state = 2 + elif line[0] == "crawl-delay": + if state != 0: + # before trying to convert to int we need to make + # sure that robots.txt has valid syntax otherwise + # it will crash + if line[1].strip().isdigit(): + entry.delay = int(line[1]) + state = 2 + elif line[0] == "request-rate": + if state != 0: + numbers = line[1].split('/') + # check if all values are sane + if (len(numbers) == 2 and numbers[0].strip().isdigit() + and numbers[1].strip().isdigit()): + entry.req_rate = RequestRate(int(numbers[0]), int(numbers[1])) + state = 2 + elif line[0] == "sitemap": + # According to http://www.sitemaps.org/protocol.html + # "This directive is independent of the user-agent line, + # so it doesn't matter where you place it in your file." + # Therefore we do not change the state of the parser. + self.sitemaps.append(line[1]) + if state == 2: + self._add_entry(entry) + + def can_fetch(self, useragent, url): + """using the parsed robots.txt decide if useragent can fetch url""" + if self.disallow_all: + return False + if self.allow_all: + return True + # Until the robots.txt file has been read or found not + # to exist, we must assume that no url is allowable. + # This prevents false positives when a user erroneously + # calls can_fetch() before calling read(). + if not self.last_checked: + return False + # search for given user agent matches + # the first match counts + # TODO: The private API is used in order to preserve an empty query. + # This is temporary until the public API starts supporting this feature. + parsed_url = urllib.parse._urlsplit(url, '') + url = urllib.parse._urlunsplit(None, None, *parsed_url[2:]) + url = normalize_path(url) + if not url: + url = "/" + for entry in self.entries: + if entry.applies_to(useragent): + return entry.allowance(url) + # try the default entry last + if self.default_entry: + return self.default_entry.allowance(url) + # agent not found ==> access granted + return True + + def crawl_delay(self, useragent): + if not self.mtime(): + return None + for entry in self.entries: + if entry.applies_to(useragent): + return entry.delay + if self.default_entry: + return self.default_entry.delay + return None + + def request_rate(self, useragent): + if not self.mtime(): + return None + for entry in self.entries: + if entry.applies_to(useragent): + return entry.req_rate + if self.default_entry: + return self.default_entry.req_rate + return None + + def site_maps(self): + if not self.sitemaps: + return None + return self.sitemaps + + def __str__(self): + entries = self.entries + if self.default_entry is not None: + entries = entries + [self.default_entry] + return '\n\n'.join(map(str, entries)) + +class RuleLine: + """A rule line is a single "Allow:" (allowance==True) or "Disallow:" + (allowance==False) followed by a path.""" + def __init__(self, path, allowance): + if path == '' and not allowance: + # an empty value means allow all + allowance = True + self.path = normalize_path(path) + self.allowance = allowance + + def applies_to(self, filename): + return self.path == "*" or filename.startswith(self.path) + + def __str__(self): + return ("Allow" if self.allowance else "Disallow") + ": " + self.path + + +class Entry: + """An entry has one or more user-agents and zero or more rulelines""" + def __init__(self): + self.useragents = [] + self.rulelines = [] + self.delay = None + self.req_rate = None + + def __str__(self): + ret = [] + for agent in self.useragents: + ret.append(f"User-agent: {agent}") + if self.delay is not None: + ret.append(f"Crawl-delay: {self.delay}") + if self.req_rate is not None: + rate = self.req_rate + ret.append(f"Request-rate: {rate.requests}/{rate.seconds}") + ret.extend(map(str, self.rulelines)) + return '\n'.join(ret) + + def applies_to(self, useragent): + """check if this entry applies to the specified agent""" + # split the name token and make it lower case + useragent = useragent.split("/")[0].lower() + for agent in self.useragents: + if agent == '*': + # we have the catch-all agent + return True + agent = agent.lower() + if agent in useragent: + return True + return False + + def allowance(self, filename): + """Preconditions: + - our agent applies to this entry + - filename is URL encoded""" + for line in self.rulelines: + if line.applies_to(filename): + return line.allowance + return True diff --git a/Python314_4_x86_Template/Lib/uuid.py b/Python314_4_x86_Template/Lib/uuid.py new file mode 100644 index 00000000..313f2fc4 --- /dev/null +++ b/Python314_4_x86_Template/Lib/uuid.py @@ -0,0 +1,1007 @@ +r"""UUID objects (universally unique identifiers) according to RFC 4122/9562. + +This module provides immutable UUID objects (class UUID) and functions for +generating UUIDs corresponding to a specific UUID version as specified in +RFC 4122/9562, e.g., uuid1() for UUID version 1, uuid3() for UUID version 3, +and so on. + +Note that UUID version 2 is deliberately omitted as it is outside the scope +of the RFC. + +If all you want is a unique ID, you should probably call uuid1() or uuid4(). +Note that uuid1() may compromise privacy since it creates a UUID containing +the computer's network address. uuid4() creates a random UUID. + +Typical usage: + + >>> import uuid + + # make a UUID based on the host ID and current time + >>> uuid.uuid1() # doctest: +SKIP + UUID('a8098c1a-f86e-11da-bd1a-00112444be1e') + + # make a UUID using an MD5 hash of a namespace UUID and a name + >>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org') + UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e') + + # make a random UUID + >>> uuid.uuid4() # doctest: +SKIP + UUID('16fd2706-8baf-433b-82eb-8c7fada847da') + + # make a UUID using a SHA-1 hash of a namespace UUID and a name + >>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org') + UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d') + + # make a UUID from a string of hex digits (braces and hyphens ignored) + >>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}') + + # convert a UUID to a string of hex digits in standard form + >>> str(x) + '00010203-0405-0607-0809-0a0b0c0d0e0f' + + # get the raw 16 bytes of the UUID + >>> x.bytes + b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f' + + # make a UUID from a 16-byte string + >>> uuid.UUID(bytes=x.bytes) + UUID('00010203-0405-0607-0809-0a0b0c0d0e0f') + + # get the Nil UUID + >>> uuid.NIL + UUID('00000000-0000-0000-0000-000000000000') + + # get the Max UUID + >>> uuid.MAX + UUID('ffffffff-ffff-ffff-ffff-ffffffffffff') +""" + +import os +import sys +import time + +from enum import Enum, _simple_enum + + +__author__ = 'Ka-Ping Yee ' + +# The recognized platforms - known behaviors +if sys.platform in {'win32', 'darwin', 'emscripten', 'wasi'}: + _AIX = _LINUX = False +elif sys.platform == 'linux': + _LINUX = True + _AIX = False +else: + import platform + _platform_system = platform.system() + _AIX = _platform_system == 'AIX' + _LINUX = _platform_system in ('Linux', 'Android') + +_MAC_DELIM = b':' +_MAC_OMITS_LEADING_ZEROES = False +if _AIX: + _MAC_DELIM = b'.' + _MAC_OMITS_LEADING_ZEROES = True + +RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [ + 'reserved for NCS compatibility', 'specified in RFC 4122', + 'reserved for Microsoft compatibility', 'reserved for future definition'] + +int_ = int # The built-in int type +bytes_ = bytes # The built-in bytes type + + +@_simple_enum(Enum) +class SafeUUID: + safe = 0 + unsafe = -1 + unknown = None + + +_UINT_128_MAX = (1 << 128) - 1 +# 128-bit mask to clear the variant and version bits of a UUID integral value +_RFC_4122_CLEARFLAGS_MASK = ~((0xf000 << 64) | (0xc000 << 48)) +# RFC 4122 variant bits and version bits to activate on a UUID integral value. +_RFC_4122_VERSION_1_FLAGS = ((1 << 76) | (0x8000 << 48)) +_RFC_4122_VERSION_3_FLAGS = ((3 << 76) | (0x8000 << 48)) +_RFC_4122_VERSION_4_FLAGS = ((4 << 76) | (0x8000 << 48)) +_RFC_4122_VERSION_5_FLAGS = ((5 << 76) | (0x8000 << 48)) +_RFC_4122_VERSION_6_FLAGS = ((6 << 76) | (0x8000 << 48)) +_RFC_4122_VERSION_7_FLAGS = ((7 << 76) | (0x8000 << 48)) +_RFC_4122_VERSION_8_FLAGS = ((8 << 76) | (0x8000 << 48)) + + +class UUID: + """Instances of the UUID class represent UUIDs as specified in RFC 4122. + UUID objects are immutable, hashable, and usable as dictionary keys. + Converting a UUID to a string with str() yields something in the form + '12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts + five possible forms: a similar string of hexadecimal digits, or a tuple + of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and + 48-bit values respectively) as an argument named 'fields', or a string + of 16 bytes (with all the integer fields in big-endian order) as an + argument named 'bytes', or a string of 16 bytes (with the first three + fields in little-endian order) as an argument named 'bytes_le', or a + single 128-bit integer as an argument named 'int'. + + UUIDs have these read-only attributes: + + bytes the UUID as a 16-byte string (containing the six + integer fields in big-endian byte order) + + bytes_le the UUID as a 16-byte string (with time_low, time_mid, + and time_hi_version in little-endian byte order) + + fields a tuple of the six integer fields of the UUID, + which are also available as six individual attributes + and two derived attributes. Those attributes are not + always relevant to all UUID versions: + + The 'time_*' attributes are only relevant to version 1. + + The 'clock_seq*' and 'node' attributes are only relevant + to versions 1 and 6. + + The 'time' attribute is only relevant to versions 1, 6 + and 7. + + time_low the first 32 bits of the UUID + time_mid the next 16 bits of the UUID + time_hi_version the next 16 bits of the UUID + clock_seq_hi_variant the next 8 bits of the UUID + clock_seq_low the next 8 bits of the UUID + node the last 48 bits of the UUID + + time the 60-bit timestamp for UUIDv1/v6, + or the 48-bit timestamp for UUIDv7 + clock_seq the 14-bit sequence number + + hex the UUID as a 32-character hexadecimal string + + int the UUID as a 128-bit integer + + urn the UUID as a URN as specified in RFC 4122/9562 + + variant the UUID variant (one of the constants RESERVED_NCS, + RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE) + + version the UUID version number (1 through 8, meaningful only + when the variant is RFC_4122) + + is_safe An enum indicating whether the UUID has been generated in + a way that is safe for multiprocessing applications, via + uuid_generate_time_safe(3). + """ + + __slots__ = ('int', 'is_safe', '__weakref__') + + def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None, + int=None, version=None, + *, is_safe=SafeUUID.unknown): + r"""Create a UUID from either a string of 32 hexadecimal digits, + a string of 16 bytes as the 'bytes' argument, a string of 16 bytes + in little-endian order as the 'bytes_le' argument, a tuple of six + integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version, + 8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as + the 'fields' argument, or a single 128-bit integer as the 'int' + argument. When a string of hex digits is given, curly braces, + hyphens, and a URN prefix are all optional. For example, these + expressions all yield the same UUID: + + UUID('{12345678-1234-5678-1234-567812345678}') + UUID('12345678123456781234567812345678') + UUID('urn:uuid:12345678-1234-5678-1234-567812345678') + UUID(bytes='\x12\x34\x56\x78'*4) + UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' + + '\x12\x34\x56\x78\x12\x34\x56\x78') + UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678)) + UUID(int=0x12345678123456781234567812345678) + + Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must + be given. The 'version' argument is optional; if given, the resulting + UUID will have its variant and version set according to RFC 4122, + overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'. + + is_safe is an enum exposed as an attribute on the instance. It + indicates whether the UUID has been generated in a way that is safe + for multiprocessing applications, via uuid_generate_time_safe(3). + """ + + if [hex, bytes, bytes_le, fields, int].count(None) != 4: + raise TypeError('one of the hex, bytes, bytes_le, fields, ' + 'or int arguments must be given') + if int is not None: + pass + elif hex is not None: + hex = hex.replace('urn:', '').replace('uuid:', '') + hex = hex.strip('{}').replace('-', '') + if len(hex) != 32: + raise ValueError('badly formed hexadecimal UUID string') + int = int_(hex, 16) + elif bytes_le is not None: + if len(bytes_le) != 16: + raise ValueError('bytes_le is not a 16-char string') + assert isinstance(bytes_le, bytes_), repr(bytes_le) + bytes = (bytes_le[4-1::-1] + bytes_le[6-1:4-1:-1] + + bytes_le[8-1:6-1:-1] + bytes_le[8:]) + int = int_.from_bytes(bytes) # big endian + elif bytes is not None: + if len(bytes) != 16: + raise ValueError('bytes is not a 16-char string') + assert isinstance(bytes, bytes_), repr(bytes) + int = int_.from_bytes(bytes) # big endian + elif fields is not None: + if len(fields) != 6: + raise ValueError('fields is not a 6-tuple') + (time_low, time_mid, time_hi_version, + clock_seq_hi_variant, clock_seq_low, node) = fields + if not 0 <= time_low < (1 << 32): + raise ValueError('field 1 out of range (need a 32-bit value)') + if not 0 <= time_mid < (1 << 16): + raise ValueError('field 2 out of range (need a 16-bit value)') + if not 0 <= time_hi_version < (1 << 16): + raise ValueError('field 3 out of range (need a 16-bit value)') + if not 0 <= clock_seq_hi_variant < (1 << 8): + raise ValueError('field 4 out of range (need an 8-bit value)') + if not 0 <= clock_seq_low < (1 << 8): + raise ValueError('field 5 out of range (need an 8-bit value)') + if not 0 <= node < (1 << 48): + raise ValueError('field 6 out of range (need a 48-bit value)') + clock_seq = (clock_seq_hi_variant << 8) | clock_seq_low + int = ((time_low << 96) | (time_mid << 80) | + (time_hi_version << 64) | (clock_seq << 48) | node) + if not 0 <= int <= _UINT_128_MAX: + raise ValueError('int is out of range (need a 128-bit value)') + if version is not None: + if not 1 <= version <= 8: + raise ValueError('illegal version number') + # clear the variant and the version number bits + int &= _RFC_4122_CLEARFLAGS_MASK + # Set the variant to RFC 4122/9562. + int |= 0x8000_0000_0000_0000 # (0x8000 << 48) + # Set the version number. + int |= version << 76 + object.__setattr__(self, 'int', int) + object.__setattr__(self, 'is_safe', is_safe) + + @classmethod + def _from_int(cls, value): + """Create a UUID from an integer *value*. Internal use only.""" + assert 0 <= value <= _UINT_128_MAX, repr(value) + self = object.__new__(cls) + object.__setattr__(self, 'int', value) + object.__setattr__(self, 'is_safe', SafeUUID.unknown) + return self + + def __getstate__(self): + d = {'int': self.int} + if self.is_safe != SafeUUID.unknown: + # is_safe is a SafeUUID instance. Return just its value, so that + # it can be un-pickled in older Python versions without SafeUUID. + d['is_safe'] = self.is_safe.value + return d + + def __setstate__(self, state): + object.__setattr__(self, 'int', state['int']) + # is_safe was added in 3.7; it is also omitted when it is "unknown" + object.__setattr__(self, 'is_safe', + SafeUUID(state['is_safe']) + if 'is_safe' in state else SafeUUID.unknown) + + def __eq__(self, other): + if isinstance(other, UUID): + return self.int == other.int + return NotImplemented + + # Q. What's the value of being able to sort UUIDs? + # A. Use them as keys in a B-Tree or similar mapping. + + def __lt__(self, other): + if isinstance(other, UUID): + return self.int < other.int + return NotImplemented + + def __gt__(self, other): + if isinstance(other, UUID): + return self.int > other.int + return NotImplemented + + def __le__(self, other): + if isinstance(other, UUID): + return self.int <= other.int + return NotImplemented + + def __ge__(self, other): + if isinstance(other, UUID): + return self.int >= other.int + return NotImplemented + + def __hash__(self): + return hash(self.int) + + def __int__(self): + return self.int + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, str(self)) + + def __setattr__(self, name, value): + raise TypeError('UUID objects are immutable') + + def __str__(self): + x = self.hex + return f'{x[:8]}-{x[8:12]}-{x[12:16]}-{x[16:20]}-{x[20:]}' + + @property + def bytes(self): + return self.int.to_bytes(16) # big endian + + @property + def bytes_le(self): + bytes = self.bytes + return (bytes[4-1::-1] + bytes[6-1:4-1:-1] + bytes[8-1:6-1:-1] + + bytes[8:]) + + @property + def fields(self): + return (self.time_low, self.time_mid, self.time_hi_version, + self.clock_seq_hi_variant, self.clock_seq_low, self.node) + + @property + def time_low(self): + return self.int >> 96 + + @property + def time_mid(self): + return (self.int >> 80) & 0xffff + + @property + def time_hi_version(self): + return (self.int >> 64) & 0xffff + + @property + def clock_seq_hi_variant(self): + return (self.int >> 56) & 0xff + + @property + def clock_seq_low(self): + return (self.int >> 48) & 0xff + + @property + def time(self): + if self.version == 6: + # time_hi (32) | time_mid (16) | ver (4) | time_lo (12) | ... (64) + time_hi = self.int >> 96 + time_lo = (self.int >> 64) & 0x0fff + return time_hi << 28 | (self.time_mid << 12) | time_lo + elif self.version == 7: + # unix_ts_ms (48) | ... (80) + return self.int >> 80 + else: + # time_lo (32) | time_mid (16) | ver (4) | time_hi (12) | ... (64) + # + # For compatibility purposes, we do not warn or raise when the + # version is not 1 (timestamp is irrelevant to other versions). + time_hi = (self.int >> 64) & 0x0fff + time_lo = self.int >> 96 + return time_hi << 48 | (self.time_mid << 32) | time_lo + + @property + def clock_seq(self): + return (((self.clock_seq_hi_variant & 0x3f) << 8) | + self.clock_seq_low) + + @property + def node(self): + return self.int & 0xffffffffffff + + @property + def hex(self): + return self.bytes.hex() + + @property + def urn(self): + return 'urn:uuid:' + str(self) + + @property + def variant(self): + if not self.int & (0x8000 << 48): + return RESERVED_NCS + elif not self.int & (0x4000 << 48): + return RFC_4122 + elif not self.int & (0x2000 << 48): + return RESERVED_MICROSOFT + else: + return RESERVED_FUTURE + + @property + def version(self): + # The version bits are only meaningful for RFC 4122/9562 UUIDs. + if self.variant == RFC_4122: + return int((self.int >> 76) & 0xf) + + +def _get_command_stdout(command, *args): + import io, os, shutil, subprocess + + try: + path_dirs = os.environ.get('PATH', os.defpath).split(os.pathsep) + path_dirs.extend(['/sbin', '/usr/sbin']) + executable = shutil.which(command, path=os.pathsep.join(path_dirs)) + if executable is None: + return None + # LC_ALL=C to ensure English output, stderr=DEVNULL to prevent output + # on stderr (Note: we don't have an example where the words we search + # for are actually localized, but in theory some system could do so.) + env = dict(os.environ) + env['LC_ALL'] = 'C' + # Empty strings will be quoted by popen so we should just omit it + if args != ('',): + command = (executable, *args) + else: + command = (executable,) + proc = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + env=env) + if not proc: + return None + stdout, stderr = proc.communicate() + return io.BytesIO(stdout) + except (OSError, subprocess.SubprocessError): + return None + + +# For MAC (a.k.a. IEEE 802, or EUI-48) addresses, the second least significant +# bit of the first octet signifies whether the MAC address is universally (0) +# or locally (1) administered. Network cards from hardware manufacturers will +# always be universally administered to guarantee global uniqueness of the MAC +# address, but any particular machine may have other interfaces which are +# locally administered. An example of the latter is the bridge interface to +# the Touch Bar on MacBook Pros. +# +# This bit works out to be the 42nd bit counting from 1 being the least +# significant, or 1<<41. We'll prefer universally administered MAC addresses +# over locally administered ones since the former are globally unique, but +# we'll return the first of the latter found if that's all the machine has. +# +# See https://en.wikipedia.org/wiki/MAC_address#Universal_vs._local_(U/L_bit) + +def _is_universal(mac): + return not (mac & (1 << 41)) + + +def _find_mac_near_keyword(command, args, keywords, get_word_index): + """Searches a command's output for a MAC address near a keyword. + + Each line of words in the output is case-insensitively searched for + any of the given keywords. Upon a match, get_word_index is invoked + to pick a word from the line, given the index of the match. For + example, lambda i: 0 would get the first word on the line, while + lambda i: i - 1 would get the word preceding the keyword. + """ + stdout = _get_command_stdout(command, args) + if stdout is None: + return None + + first_local_mac = None + for line in stdout: + words = line.lower().rstrip().split() + for i in range(len(words)): + if words[i] in keywords: + try: + word = words[get_word_index(i)] + mac = int(word.replace(_MAC_DELIM, b''), 16) + except (ValueError, IndexError): + # Virtual interfaces, such as those provided by + # VPNs, do not have a colon-delimited MAC address + # as expected, but a 16-byte HWAddr separated by + # dashes. These should be ignored in favor of a + # real MAC address + pass + else: + if _is_universal(mac): + return mac + first_local_mac = first_local_mac or mac + return first_local_mac or None + + +def _parse_mac(word): + # Accept 'HH:HH:HH:HH:HH:HH' MAC address (ex: '52:54:00:9d:0e:67'), + # but reject IPv6 address (ex: 'fe80::5054:ff:fe9' or '123:2:3:4:5:6:7:8'). + # + # Virtual interfaces, such as those provided by VPNs, do not have a + # colon-delimited MAC address as expected, but a 16-byte HWAddr separated + # by dashes. These should be ignored in favor of a real MAC address + parts = word.split(_MAC_DELIM) + if len(parts) != 6: + return + if _MAC_OMITS_LEADING_ZEROES: + # (Only) on AIX the macaddr value given is not prefixed by 0, e.g. + # en0 1500 link#2 fa.bc.de.f7.62.4 110854824 0 160133733 0 0 + # not + # en0 1500 link#2 fa.bc.de.f7.62.04 110854824 0 160133733 0 0 + if not all(1 <= len(part) <= 2 for part in parts): + return + hexstr = b''.join(part.rjust(2, b'0') for part in parts) + else: + if not all(len(part) == 2 for part in parts): + return + hexstr = b''.join(parts) + try: + return int(hexstr, 16) + except ValueError: + return + + +def _find_mac_under_heading(command, args, heading): + """Looks for a MAC address under a heading in a command's output. + + The first line of words in the output is searched for the given + heading. Words at the same word index as the heading in subsequent + lines are then examined to see if they look like MAC addresses. + """ + stdout = _get_command_stdout(command, args) + if stdout is None: + return None + + keywords = stdout.readline().rstrip().split() + try: + column_index = keywords.index(heading) + except ValueError: + return None + + first_local_mac = None + for line in stdout: + words = line.rstrip().split() + try: + word = words[column_index] + except IndexError: + continue + + mac = _parse_mac(word) + if mac is None: + continue + if _is_universal(mac): + return mac + if first_local_mac is None: + first_local_mac = mac + + return first_local_mac + + +# The following functions call external programs to 'get' a macaddr value to +# be used as basis for an uuid +def _ifconfig_getnode(): + """Get the hardware address on Unix by running ifconfig.""" + # This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes. + keywords = (b'hwaddr', b'ether', b'address:', b'lladdr') + for args in ('', '-a', '-av'): + mac = _find_mac_near_keyword('ifconfig', args, keywords, lambda i: i+1) + if mac: + return mac + return None + +def _ip_getnode(): + """Get the hardware address on Unix by running ip.""" + # This works on Linux with iproute2. + mac = _find_mac_near_keyword('ip', 'link', [b'link/ether'], lambda i: i+1) + if mac: + return mac + return None + +def _arp_getnode(): + """Get the hardware address on Unix by running arp.""" + import os, socket + if not hasattr(socket, "gethostbyname"): + return None + try: + ip_addr = socket.gethostbyname(socket.gethostname()) + except OSError: + return None + + # Try getting the MAC addr from arp based on our IP address (Solaris). + mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: -1) + if mac: + return mac + + # This works on OpenBSD + mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: i+1) + if mac: + return mac + + # This works on Linux, FreeBSD and NetBSD + mac = _find_mac_near_keyword('arp', '-an', [os.fsencode('(%s)' % ip_addr)], + lambda i: i+2) + # Return None instead of 0. + if mac: + return mac + return None + +def _lanscan_getnode(): + """Get the hardware address on Unix by running lanscan.""" + # This might work on HP-UX. + return _find_mac_near_keyword('lanscan', '-ai', [b'lan0'], lambda i: 0) + +def _netstat_getnode(): + """Get the hardware address on Unix by running netstat.""" + # This works on AIX and might work on Tru64 UNIX. + return _find_mac_under_heading('netstat', '-ian', b'Address') + + +# Import optional C extension at toplevel, to help disabling it when testing +try: + import _uuid + _generate_time_safe = getattr(_uuid, "generate_time_safe", None) + _has_stable_extractable_node = _uuid.has_stable_extractable_node + _UuidCreate = getattr(_uuid, "UuidCreate", None) +except ImportError: + _uuid = None + _generate_time_safe = None + _has_stable_extractable_node = False + _UuidCreate = None + + +def _unix_getnode(): + """Get the hardware address on Unix using the _uuid extension module.""" + if _generate_time_safe and _has_stable_extractable_node: + uuid_time, _ = _generate_time_safe() + return UUID(bytes=uuid_time).node + +def _windll_getnode(): + """Get the hardware address on Windows using the _uuid extension module.""" + if _UuidCreate and _has_stable_extractable_node: + uuid_bytes = _UuidCreate() + return UUID(bytes_le=uuid_bytes).node + +def _random_getnode(): + """Get a random node ID.""" + # RFC 9562, §6.10-3 says that + # + # Implementations MAY elect to obtain a 48-bit cryptographic-quality + # random number as per Section 6.9 to use as the Node ID. [...] [and] + # implementations MUST set the least significant bit of the first octet + # of the Node ID to 1. This bit is the unicast or multicast bit, which + # will never be set in IEEE 802 addresses obtained from network cards. + # + # The "multicast bit" of a MAC address is defined to be "the least + # significant bit of the first octet". This works out to be the 41st bit + # counting from 1 being the least significant bit, or 1<<40. + # + # See https://en.wikipedia.org/w/index.php?title=MAC_address&oldid=1128764812#Universal_vs._local_(U/L_bit) + return int.from_bytes(os.urandom(6)) | (1 << 40) + + +# _OS_GETTERS, when known, are targeted for a specific OS or platform. +# The order is by 'common practice' on the specified platform. +# Note: 'posix' and 'windows' _OS_GETTERS are prefixed by a dll/dlload() method +# which, when successful, means none of these "external" methods are called. +# _GETTERS is (also) used by test_uuid.py to SkipUnless(), e.g., +# @unittest.skipUnless(_uuid._ifconfig_getnode in _uuid._GETTERS, ...) +if _LINUX: + _OS_GETTERS = [_ip_getnode, _ifconfig_getnode] +elif sys.platform == 'darwin': + _OS_GETTERS = [_ifconfig_getnode, _arp_getnode, _netstat_getnode] +elif sys.platform == 'win32': + # bpo-40201: _windll_getnode will always succeed, so these are not needed + _OS_GETTERS = [] +elif _AIX: + _OS_GETTERS = [_netstat_getnode] +else: + _OS_GETTERS = [_ifconfig_getnode, _ip_getnode, _arp_getnode, + _netstat_getnode, _lanscan_getnode] +if os.name == 'posix': + _GETTERS = [_unix_getnode] + _OS_GETTERS +elif os.name == 'nt': + _GETTERS = [_windll_getnode] + _OS_GETTERS +else: + _GETTERS = _OS_GETTERS + +_node = None + +def getnode(): + """Get the hardware address as a 48-bit positive integer. + + The first time this runs, it may launch a separate program, which could + be quite slow. If all attempts to obtain the hardware address fail, we + choose a random 48-bit number with its eighth bit set to 1 as recommended + in RFC 4122. + """ + global _node + if _node is not None: + return _node + + for getter in _GETTERS + [_random_getnode]: + try: + _node = getter() + except: + continue + if (_node is not None) and (0 <= _node < (1 << 48)): + return _node + assert False, '_random_getnode() returned invalid value: {}'.format(_node) + + +_last_timestamp = None + +def uuid1(node=None, clock_seq=None): + """Generate a UUID from a host ID, sequence number, and the current time. + If 'node' is not given, getnode() is used to obtain the hardware + address. If 'clock_seq' is given, it is used as the sequence number; + otherwise a random 14-bit sequence number is chosen.""" + + # When the system provides a version-1 UUID generator, use it (but don't + # use UuidCreate here because its UUIDs don't conform to RFC 4122). + if _generate_time_safe is not None and node is clock_seq is None: + uuid_time, safely_generated = _generate_time_safe() + try: + is_safe = SafeUUID(safely_generated) + except ValueError: + is_safe = SafeUUID.unknown + return UUID(bytes=uuid_time, is_safe=is_safe) + + global _last_timestamp + nanoseconds = time.time_ns() + # 0x01b21dd213814000 is the number of 100-ns intervals between the + # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00. + timestamp = nanoseconds // 100 + 0x01b21dd213814000 + if _last_timestamp is not None and timestamp <= _last_timestamp: + timestamp = _last_timestamp + 1 + _last_timestamp = timestamp + if clock_seq is None: + import random + clock_seq = random.getrandbits(14) # instead of stable storage + time_low = timestamp & 0xffffffff + time_mid = (timestamp >> 32) & 0xffff + time_hi_version = (timestamp >> 48) & 0x0fff + clock_seq_low = clock_seq & 0xff + clock_seq_hi_variant = (clock_seq >> 8) & 0x3f + if node is None: + node = getnode() + return UUID(fields=(time_low, time_mid, time_hi_version, + clock_seq_hi_variant, clock_seq_low, node), version=1) + +def uuid3(namespace, name): + """Generate a UUID from the MD5 hash of a namespace UUID and a name.""" + if isinstance(name, str): + name = bytes(name, "utf-8") + import hashlib + h = hashlib.md5(namespace.bytes + name, usedforsecurity=False) + int_uuid_3 = int.from_bytes(h.digest()) + int_uuid_3 &= _RFC_4122_CLEARFLAGS_MASK + int_uuid_3 |= _RFC_4122_VERSION_3_FLAGS + return UUID._from_int(int_uuid_3) + +def uuid4(): + """Generate a random UUID.""" + int_uuid_4 = int.from_bytes(os.urandom(16)) + int_uuid_4 &= _RFC_4122_CLEARFLAGS_MASK + int_uuid_4 |= _RFC_4122_VERSION_4_FLAGS + return UUID._from_int(int_uuid_4) + +def uuid5(namespace, name): + """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" + if isinstance(name, str): + name = bytes(name, "utf-8") + import hashlib + h = hashlib.sha1(namespace.bytes + name, usedforsecurity=False) + int_uuid_5 = int.from_bytes(h.digest()[:16]) + int_uuid_5 &= _RFC_4122_CLEARFLAGS_MASK + int_uuid_5 |= _RFC_4122_VERSION_5_FLAGS + return UUID._from_int(int_uuid_5) + + +_last_timestamp_v6 = None + +def uuid6(node=None, clock_seq=None): + """Similar to :func:`uuid1` but where fields are ordered differently + for improved DB locality. + + More precisely, given a 60-bit timestamp value as specified for UUIDv1, + for UUIDv6 the first 48 most significant bits are stored first, followed + by the 4-bit version (same position), followed by the remaining 12 bits + of the original 60-bit timestamp. + """ + global _last_timestamp_v6 + import time + nanoseconds = time.time_ns() + # 0x01b21dd213814000 is the number of 100-ns intervals between the + # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00. + timestamp = nanoseconds // 100 + 0x01b21dd213814000 + if _last_timestamp_v6 is not None and timestamp <= _last_timestamp_v6: + timestamp = _last_timestamp_v6 + 1 + _last_timestamp_v6 = timestamp + if clock_seq is None: + import random + clock_seq = random.getrandbits(14) # instead of stable storage + time_hi_and_mid = (timestamp >> 12) & 0xffff_ffff_ffff + time_lo = timestamp & 0x0fff # keep 12 bits and clear version bits + clock_s = clock_seq & 0x3fff # keep 14 bits and clear variant bits + if node is None: + node = getnode() + # --- 32 + 16 --- -- 4 -- -- 12 -- -- 2 -- -- 14 --- 48 + # time_hi_and_mid | version | time_lo | variant | clock_seq | node + int_uuid_6 = time_hi_and_mid << 80 + int_uuid_6 |= time_lo << 64 + int_uuid_6 |= clock_s << 48 + int_uuid_6 |= node & 0xffff_ffff_ffff + # by construction, the variant and version bits are already cleared + int_uuid_6 |= _RFC_4122_VERSION_6_FLAGS + return UUID._from_int(int_uuid_6) + + +_last_timestamp_v7 = None +_last_counter_v7 = 0 # 42-bit counter + +def _uuid7_get_counter_and_tail(): + rand = int.from_bytes(os.urandom(10)) + # 42-bit counter with MSB set to 0 + counter = (rand >> 32) & 0x1ff_ffff_ffff + # 32-bit random data + tail = rand & 0xffff_ffff + return counter, tail + + +def uuid7(): + """Generate a UUID from a Unix timestamp in milliseconds and random bits. + + UUIDv7 objects feature monotonicity within a millisecond. + """ + # --- 48 --- -- 4 -- --- 12 --- -- 2 -- --- 30 --- - 32 - + # unix_ts_ms | version | counter_hi | variant | counter_lo | random + # + # 'counter = counter_hi | counter_lo' is a 42-bit counter constructed + # with Method 1 of RFC 9562, §6.2, and its MSB is set to 0. + # + # 'random' is a 32-bit random value regenerated for every new UUID. + # + # If multiple UUIDs are generated within the same millisecond, the LSB + # of 'counter' is incremented by 1. When overflowing, the timestamp is + # advanced and the counter is reset to a random 42-bit integer with MSB + # set to 0. + + global _last_timestamp_v7 + global _last_counter_v7 + + nanoseconds = time.time_ns() + timestamp_ms = nanoseconds // 1_000_000 + + if _last_timestamp_v7 is None or timestamp_ms > _last_timestamp_v7: + counter, tail = _uuid7_get_counter_and_tail() + else: + if timestamp_ms < _last_timestamp_v7: + timestamp_ms = _last_timestamp_v7 + 1 + # advance the 42-bit counter + counter = _last_counter_v7 + 1 + if counter > 0x3ff_ffff_ffff: + # advance the 48-bit timestamp + timestamp_ms += 1 + counter, tail = _uuid7_get_counter_and_tail() + else: + # 32-bit random data + tail = int.from_bytes(os.urandom(4)) + + unix_ts_ms = timestamp_ms & 0xffff_ffff_ffff + counter_msbs = counter >> 30 + # keep 12 counter's MSBs and clear variant bits + counter_hi = counter_msbs & 0x0fff + # keep 30 counter's LSBs and clear version bits + counter_lo = counter & 0x3fff_ffff + # ensure that the tail is always a 32-bit integer (by construction, + # it is already the case, but future interfaces may allow the user + # to specify the random tail) + tail &= 0xffff_ffff + + int_uuid_7 = unix_ts_ms << 80 + int_uuid_7 |= counter_hi << 64 + int_uuid_7 |= counter_lo << 32 + int_uuid_7 |= tail + # by construction, the variant and version bits are already cleared + int_uuid_7 |= _RFC_4122_VERSION_7_FLAGS + res = UUID._from_int(int_uuid_7) + + # defer global update until all computations are done + _last_timestamp_v7 = timestamp_ms + _last_counter_v7 = counter + return res + + +def uuid8(a=None, b=None, c=None): + """Generate a UUID from three custom blocks. + + * 'a' is the first 48-bit chunk of the UUID (octets 0-5); + * 'b' is the mid 12-bit chunk (octets 6-7); + * 'c' is the last 62-bit chunk (octets 8-15). + + When a value is not specified, a pseudo-random value is generated. + """ + if a is None: + import random + a = random.getrandbits(48) + if b is None: + import random + b = random.getrandbits(12) + if c is None: + import random + c = random.getrandbits(62) + int_uuid_8 = (a & 0xffff_ffff_ffff) << 80 + int_uuid_8 |= (b & 0xfff) << 64 + int_uuid_8 |= c & 0x3fff_ffff_ffff_ffff + # by construction, the variant and version bits are already cleared + int_uuid_8 |= _RFC_4122_VERSION_8_FLAGS + return UUID._from_int(int_uuid_8) + + +def main(): + """Run the uuid command line interface.""" + uuid_funcs = { + "uuid1": uuid1, + "uuid3": uuid3, + "uuid4": uuid4, + "uuid5": uuid5, + "uuid6": uuid6, + "uuid7": uuid7, + "uuid8": uuid8, + } + uuid_namespace_funcs = ("uuid3", "uuid5") + namespaces = { + "@dns": NAMESPACE_DNS, + "@url": NAMESPACE_URL, + "@oid": NAMESPACE_OID, + "@x500": NAMESPACE_X500 + } + + import argparse + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Generate a UUID using the selected UUID function.", + color=True, + ) + parser.add_argument("-u", "--uuid", + choices=uuid_funcs.keys(), + default="uuid4", + help="function to generate the UUID") + parser.add_argument("-n", "--namespace", + choices=["any UUID", *namespaces.keys()], + help="uuid3/uuid5 only: " + "a UUID, or a well-known predefined UUID addressed " + "by namespace name") + parser.add_argument("-N", "--name", + help="uuid3/uuid5 only: " + "name used as part of generating the UUID") + parser.add_argument("-C", "--count", metavar="NUM", type=int, default=1, + help="generate NUM fresh UUIDs") + + args = parser.parse_args() + uuid_func = uuid_funcs[args.uuid] + namespace = args.namespace + name = args.name + + if args.uuid in uuid_namespace_funcs: + if not namespace or not name: + parser.error( + "Incorrect number of arguments. " + f"{args.uuid} requires a namespace and a name. " + "Run 'python -m uuid -h' for more information." + ) + namespace = namespaces[namespace] if namespace in namespaces else UUID(namespace) + for _ in range(args.count): + print(uuid_func(namespace, name)) + else: + for _ in range(args.count): + print(uuid_func()) + + +# The following standard UUIDs are for use with uuid3() or uuid5(). + +NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8') +NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8') +NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8') +NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8') + +# RFC 9562 Sections 5.9 and 5.10 define the special Nil and Max UUID formats. + +NIL = UUID('00000000-0000-0000-0000-000000000000') +MAX = UUID('ffffffff-ffff-ffff-ffff-ffffffffffff') + +if __name__ == "__main__": + main() diff --git a/Python314_4_x86_Template/Lib/venv/__init__.py b/Python314_4_x86_Template/Lib/venv/__init__.py new file mode 100644 index 00000000..88f3340a --- /dev/null +++ b/Python314_4_x86_Template/Lib/venv/__init__.py @@ -0,0 +1,700 @@ +""" +Virtual environment (venv) package for Python. Based on PEP 405. + +Copyright (C) 2011-2014 Vinay Sajip. +Licensed to the PSF under a contributor agreement. +""" +import logging +import os +import shutil +import subprocess +import sys +import sysconfig +import types +import shlex + + +CORE_VENV_DEPS = ('pip',) +logger = logging.getLogger(__name__) + + +class EnvBuilder: + """ + This class exists to allow virtual environment creation to be + customized. The constructor parameters determine the builder's + behaviour when called upon to create a virtual environment. + + By default, the builder makes the system (global) site-packages dir + *un*available to the created environment. + + If invoked using the Python -m option, the default is to use copying + on Windows platforms but symlinks elsewhere. If instantiated some + other way, the default is to *not* use symlinks. + + :param system_site_packages: If True, the system (global) site-packages + dir is available to created environments. + :param clear: If True, delete the contents of the environment directory if + it already exists, before environment creation. + :param symlinks: If True, attempt to symlink rather than copy files into + virtual environment. + :param upgrade: If True, upgrade an existing virtual environment. + :param with_pip: If True, ensure pip is installed in the virtual + environment + :param prompt: Alternative terminal prefix for the environment. + :param upgrade_deps: Update the base venv modules to the latest on PyPI + :param scm_ignore_files: Create ignore files for the SCMs specified by the + iterable. + """ + + def __init__(self, system_site_packages=False, clear=False, + symlinks=False, upgrade=False, with_pip=False, prompt=None, + upgrade_deps=False, *, scm_ignore_files=frozenset()): + self.system_site_packages = system_site_packages + self.clear = clear + self.symlinks = symlinks + self.upgrade = upgrade + self.with_pip = with_pip + self.orig_prompt = prompt + if prompt == '.': # see bpo-38901 + prompt = os.path.basename(os.getcwd()) + self.prompt = prompt + self.upgrade_deps = upgrade_deps + self.scm_ignore_files = frozenset(map(str.lower, scm_ignore_files)) + + def create(self, env_dir): + """ + Create a virtual environment in a directory. + + :param env_dir: The target directory to create an environment in. + + """ + env_dir = os.path.abspath(env_dir) + context = self.ensure_directories(env_dir) + for scm in self.scm_ignore_files: + getattr(self, f"create_{scm}_ignore_file")(context) + # See issue 24875. We need system_site_packages to be False + # until after pip is installed. + true_system_site_packages = self.system_site_packages + self.system_site_packages = False + self.create_configuration(context) + self.setup_python(context) + if self.with_pip: + self._setup_pip(context) + if not self.upgrade: + self.setup_scripts(context) + self.post_setup(context) + if true_system_site_packages: + # We had set it to False before, now + # restore it and rewrite the configuration + self.system_site_packages = True + self.create_configuration(context) + if self.upgrade_deps: + self.upgrade_dependencies(context) + + def clear_directory(self, path): + for fn in os.listdir(path): + fn = os.path.join(path, fn) + if os.path.islink(fn) or os.path.isfile(fn): + os.remove(fn) + elif os.path.isdir(fn): + shutil.rmtree(fn) + + def _venv_path(self, env_dir, name): + vars = { + 'base': env_dir, + 'platbase': env_dir, + } + return sysconfig.get_path(name, scheme='venv', vars=vars) + + @classmethod + def _same_path(cls, path1, path2): + """Check whether two paths appear the same. + + Whether they refer to the same file is irrelevant; we're testing for + whether a human reader would look at the path string and easily tell + that they're the same file. + """ + if sys.platform == 'win32': + if os.path.normcase(path1) == os.path.normcase(path2): + return True + # gh-90329: Don't display a warning for short/long names + import _winapi + try: + path1 = _winapi.GetLongPathName(os.fsdecode(path1)) + except OSError: + pass + try: + path2 = _winapi.GetLongPathName(os.fsdecode(path2)) + except OSError: + pass + if os.path.normcase(path1) == os.path.normcase(path2): + return True + return False + else: + return path1 == path2 + + def ensure_directories(self, env_dir): + """ + Create the directories for the environment. + + Returns a context object which holds paths in the environment, + for use by subsequent logic. + """ + + def create_if_needed(d): + if not os.path.exists(d): + os.makedirs(d) + elif os.path.islink(d) or os.path.isfile(d): + raise ValueError('Unable to create directory %r' % d) + + if os.pathsep in os.fspath(env_dir): + raise ValueError(f'Refusing to create a venv in {env_dir} because ' + f'it contains the PATH separator {os.pathsep}.') + if os.path.exists(env_dir) and self.clear: + self.clear_directory(env_dir) + context = types.SimpleNamespace() + context.env_dir = env_dir + context.env_name = os.path.split(env_dir)[1] + context.prompt = self.prompt if self.prompt is not None else context.env_name + create_if_needed(env_dir) + executable = sys._base_executable + if not executable: # see gh-96861 + raise ValueError('Unable to determine path to the running ' + 'Python interpreter. Provide an explicit path or ' + 'check that your PATH environment variable is ' + 'correctly set.') + dirname, exename = os.path.split(os.path.abspath(executable)) + if sys.platform == 'win32': + # Always create the simplest name in the venv. It will either be a + # link back to executable, or a copy of the appropriate launcher + _d = '_d' if os.path.splitext(exename)[0].endswith('_d') else '' + exename = f'python{_d}.exe' + context.executable = executable + context.python_dir = dirname + context.python_exe = exename + binpath = self._venv_path(env_dir, 'scripts') + libpath = self._venv_path(env_dir, 'purelib') + + # PEP 405 says venvs should create a local include directory. + # See https://peps.python.org/pep-0405/#include-files + # XXX: This directory is not exposed in sysconfig or anywhere else, and + # doesn't seem to be utilized by modern packaging tools. We keep it + # for backwards-compatibility, and to follow the PEP, but I would + # recommend against using it, as most tooling does not pass it to + # compilers. Instead, until we standardize a site-specific include + # directory, I would recommend installing headers as package data, + # and providing some sort of API to get the include directories. + # Example: https://numpy.org/doc/2.1/reference/generated/numpy.get_include.html + incpath = os.path.join(env_dir, 'Include' if os.name == 'nt' else 'include') + + context.inc_path = incpath + create_if_needed(incpath) + context.lib_path = libpath + create_if_needed(libpath) + # Issue 21197: create lib64 as a symlink to lib on 64-bit non-OS X POSIX + if ((sys.maxsize > 2**32) and (os.name == 'posix') and + (sys.platform != 'darwin')): + link_path = os.path.join(env_dir, 'lib64') + if not os.path.exists(link_path): # Issue #21643 + os.symlink('lib', link_path) + context.bin_path = binpath + context.bin_name = os.path.relpath(binpath, env_dir) + context.env_exe = os.path.join(binpath, exename) + create_if_needed(binpath) + # Assign and update the command to use when launching the newly created + # environment, in case it isn't simply the executable script (e.g. bpo-45337) + context.env_exec_cmd = context.env_exe + if sys.platform == 'win32': + # bpo-45337: Fix up env_exec_cmd to account for file system redirections. + # Some redirects only apply to CreateFile and not CreateProcess + real_env_exe = os.path.realpath(context.env_exe) + if not self._same_path(real_env_exe, context.env_exe): + logger.warning('Actual environment location may have moved due to ' + 'redirects, links or junctions.\n' + ' Requested location: "%s"\n' + ' Actual location: "%s"', + context.env_exe, real_env_exe) + context.env_exec_cmd = real_env_exe + return context + + def create_configuration(self, context): + """ + Create a configuration file indicating where the environment's Python + was copied from, and whether the system site-packages should be made + available in the environment. + + :param context: The information for the environment creation request + being processed. + """ + context.cfg_path = path = os.path.join(context.env_dir, 'pyvenv.cfg') + with open(path, 'w', encoding='utf-8') as f: + f.write('home = %s\n' % context.python_dir) + if self.system_site_packages: + incl = 'true' + else: + incl = 'false' + f.write('include-system-site-packages = %s\n' % incl) + f.write('version = %d.%d.%d\n' % sys.version_info[:3]) + if self.prompt is not None: + f.write(f'prompt = {self.prompt!r}\n') + f.write('executable = %s\n' % os.path.realpath(sys.executable)) + args = [] + nt = os.name == 'nt' + if nt and self.symlinks: + args.append('--symlinks') + if not nt and not self.symlinks: + args.append('--copies') + if not self.with_pip: + args.append('--without-pip') + if self.system_site_packages: + args.append('--system-site-packages') + if self.clear: + args.append('--clear') + if self.upgrade: + args.append('--upgrade') + if self.upgrade_deps: + args.append('--upgrade-deps') + if self.orig_prompt is not None: + args.append(f'--prompt="{self.orig_prompt}"') + if not self.scm_ignore_files: + args.append('--without-scm-ignore-files') + + args.append(context.env_dir) + args = ' '.join(args) + f.write(f'command = {sys.executable} -m venv {args}\n') + + def symlink_or_copy(self, src, dst, relative_symlinks_ok=False): + """ + Try symlinking a file, and if that fails, fall back to copying. + (Unused on Windows, because we can't just copy a failed symlink file: we + switch to a different set of files instead.) + """ + assert os.name != 'nt' + force_copy = not self.symlinks + if not force_copy: + try: + if not os.path.islink(dst): # can't link to itself! + if relative_symlinks_ok: + assert os.path.dirname(src) == os.path.dirname(dst) + os.symlink(os.path.basename(src), dst) + else: + os.symlink(src, dst) + except Exception: # may need to use a more specific exception + logger.warning('Unable to symlink %r to %r', src, dst) + force_copy = True + if force_copy: + shutil.copyfile(src, dst) + + def create_git_ignore_file(self, context): + """ + Create a .gitignore file in the environment directory. + + The contents of the file cause the entire environment directory to be + ignored by git. + """ + gitignore_path = os.path.join(context.env_dir, '.gitignore') + with open(gitignore_path, 'w', encoding='utf-8') as file: + file.write('# Created by venv; ' + 'see https://docs.python.org/3/library/venv.html\n') + file.write('*\n') + + if os.name != 'nt': + def setup_python(self, context): + """ + Set up a Python executable in the environment. + + :param context: The information for the environment creation request + being processed. + """ + binpath = context.bin_path + path = context.env_exe + copier = self.symlink_or_copy + dirname = context.python_dir + copier(context.executable, path) + if not os.path.islink(path): + os.chmod(path, 0o755) + + suffixes = ['python', 'python3', f'python3.{sys.version_info[1]}'] + if sys.version_info[:2] == (3, 14) and sys.getfilesystemencoding() == 'utf-8': + suffixes.append('𝜋thon') + for suffix in suffixes: + path = os.path.join(binpath, suffix) + if not os.path.exists(path): + # Issue 18807: make copies if + # symlinks are not wanted + copier(context.env_exe, path, relative_symlinks_ok=True) + if not os.path.islink(path): + os.chmod(path, 0o755) + + else: + def setup_python(self, context): + """ + Set up a Python executable in the environment. + + :param context: The information for the environment creation request + being processed. + """ + binpath = context.bin_path + dirname = context.python_dir + exename = os.path.basename(context.env_exe) + exe_stem = os.path.splitext(exename)[0] + exe_d = '_d' if os.path.normcase(exe_stem).endswith('_d') else '' + if sysconfig.is_python_build(): + scripts = dirname + else: + scripts = os.path.join(os.path.dirname(__file__), + 'scripts', 'nt') + if not sysconfig.get_config_var("Py_GIL_DISABLED"): + python_exe = os.path.join(dirname, f'python{exe_d}.exe') + pythonw_exe = os.path.join(dirname, f'pythonw{exe_d}.exe') + link_sources = { + 'python.exe': python_exe, + f'python{exe_d}.exe': python_exe, + 'pythonw.exe': pythonw_exe, + f'pythonw{exe_d}.exe': pythonw_exe, + } + python_exe = os.path.join(scripts, f'venvlauncher{exe_d}.exe') + pythonw_exe = os.path.join(scripts, f'venvwlauncher{exe_d}.exe') + copy_sources = { + 'python.exe': python_exe, + f'python{exe_d}.exe': python_exe, + 'pythonw.exe': pythonw_exe, + f'pythonw{exe_d}.exe': pythonw_exe, + } + else: + exe_t = f'3.{sys.version_info[1]}t' + python_exe = os.path.join(dirname, f'python{exe_t}{exe_d}.exe') + pythonw_exe = os.path.join(dirname, f'pythonw{exe_t}{exe_d}.exe') + link_sources = { + 'python.exe': python_exe, + f'python{exe_d}.exe': python_exe, + f'python{exe_t}.exe': python_exe, + f'python{exe_t}{exe_d}.exe': python_exe, + 'pythonw.exe': pythonw_exe, + f'pythonw{exe_d}.exe': pythonw_exe, + f'pythonw{exe_t}.exe': pythonw_exe, + f'pythonw{exe_t}{exe_d}.exe': pythonw_exe, + } + python_exe = os.path.join(scripts, f'venvlaunchert{exe_d}.exe') + pythonw_exe = os.path.join(scripts, f'venvwlaunchert{exe_d}.exe') + copy_sources = { + 'python.exe': python_exe, + f'python{exe_d}.exe': python_exe, + f'python{exe_t}.exe': python_exe, + f'python{exe_t}{exe_d}.exe': python_exe, + 'pythonw.exe': pythonw_exe, + f'pythonw{exe_d}.exe': pythonw_exe, + f'pythonw{exe_t}.exe': pythonw_exe, + f'pythonw{exe_t}{exe_d}.exe': pythonw_exe, + } + + do_copies = True + if self.symlinks: + do_copies = False + # For symlinking, we need all the DLLs to be available alongside + # the executables. + link_sources.update({ + f: os.path.join(dirname, f) for f in os.listdir(dirname) + if os.path.normcase(f).startswith(('python', 'vcruntime')) + and os.path.normcase(os.path.splitext(f)[1]) == '.dll' + }) + + to_unlink = [] + for dest, src in link_sources.items(): + dest = os.path.join(binpath, dest) + try: + os.symlink(src, dest) + to_unlink.append(dest) + except OSError: + logger.warning('Unable to symlink %r to %r', src, dest) + do_copies = True + for f in to_unlink: + try: + os.unlink(f) + except OSError: + logger.warning('Failed to clean up symlink %r', + f) + logger.warning('Retrying with copies') + break + + if do_copies: + for dest, src in copy_sources.items(): + dest = os.path.join(binpath, dest) + try: + shutil.copy2(src, dest) + except OSError: + logger.warning('Unable to copy %r to %r', src, dest) + + if sysconfig.is_python_build(): + # copy init.tcl + for root, dirs, files in os.walk(context.python_dir): + if 'init.tcl' in files: + tcldir = os.path.basename(root) + tcldir = os.path.join(context.env_dir, 'Lib', tcldir) + if not os.path.exists(tcldir): + os.makedirs(tcldir) + src = os.path.join(root, 'init.tcl') + dst = os.path.join(tcldir, 'init.tcl') + shutil.copyfile(src, dst) + break + + def _call_new_python(self, context, *py_args, **kwargs): + """Executes the newly created Python using safe-ish options""" + # gh-98251: We do not want to just use '-I' because that masks + # legitimate user preferences (such as not writing bytecode). All we + # really need is to ensure that the path variables do not overrule + # normal venv handling. + args = [context.env_exec_cmd, *py_args] + kwargs['env'] = env = os.environ.copy() + env['VIRTUAL_ENV'] = context.env_dir + env.pop('PYTHONHOME', None) + env.pop('PYTHONPATH', None) + kwargs['cwd'] = context.env_dir + kwargs['executable'] = context.env_exec_cmd + subprocess.check_output(args, **kwargs) + + def _setup_pip(self, context): + """Installs or upgrades pip in a virtual environment""" + self._call_new_python(context, '-m', 'ensurepip', '--upgrade', + '--default-pip', stderr=subprocess.STDOUT) + + def setup_scripts(self, context): + """ + Set up scripts into the created environment from a directory. + + This method installs the default scripts into the environment + being created. You can prevent the default installation by overriding + this method if you really need to, or if you need to specify + a different location for the scripts to install. By default, the + 'scripts' directory in the venv package is used as the source of + scripts to install. + """ + path = os.path.abspath(os.path.dirname(__file__)) + path = os.path.join(path, 'scripts') + self.install_scripts(context, path) + + def post_setup(self, context): + """ + Hook for post-setup modification of the venv. Subclasses may install + additional packages or scripts here, add activation shell scripts, etc. + + :param context: The information for the environment creation request + being processed. + """ + pass + + def replace_variables(self, text, context): + """ + Replace variable placeholders in script text with context-specific + variables. + + Return the text passed in , but with variables replaced. + + :param text: The text in which to replace placeholder variables. + :param context: The information for the environment creation request + being processed. + """ + replacements = { + '__VENV_DIR__': context.env_dir, + '__VENV_NAME__': context.env_name, + '__VENV_PROMPT__': context.prompt, + '__VENV_BIN_NAME__': context.bin_name, + '__VENV_PYTHON__': context.env_exe, + } + + def quote_ps1(s): + """ + This should satisfy PowerShell quoting rules [1], unless the quoted + string is passed directly to Windows native commands [2]. + [1]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_quoting_rules + [2]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_parsing#passing-arguments-that-contain-quote-characters + """ + s = s.replace("'", "''") + return f"'{s}'" + + def quote_bat(s): + return s + + # gh-124651: need to quote the template strings properly + quote = shlex.quote + script_path = context.script_path + if script_path.endswith('.ps1'): + quote = quote_ps1 + elif script_path.endswith('.bat'): + quote = quote_bat + else: + # fallbacks to POSIX shell compliant quote + quote = shlex.quote + + replacements = {key: quote(s) for key, s in replacements.items()} + for key, quoted in replacements.items(): + text = text.replace(key, quoted) + return text + + def install_scripts(self, context, path): + """ + Install scripts into the created environment from a directory. + + :param context: The information for the environment creation request + being processed. + :param path: Absolute pathname of a directory containing script. + Scripts in the 'common' subdirectory of this directory, + and those in the directory named for the platform + being run on, are installed in the created environment. + Placeholder variables are replaced with environment- + specific values. + """ + binpath = context.bin_path + plen = len(path) + if os.name == 'nt': + def skip_file(f): + f = os.path.normcase(f) + return (f.startswith(('python', 'venv')) + and f.endswith(('.exe', '.pdb'))) + else: + def skip_file(f): + return False + for root, dirs, files in os.walk(path): + if root == path: # at top-level, remove irrelevant dirs + for d in dirs[:]: + if d not in ('common', os.name): + dirs.remove(d) + continue # ignore files in top level + for f in files: + if skip_file(f): + continue + srcfile = os.path.join(root, f) + suffix = root[plen:].split(os.sep)[2:] + if not suffix: + dstdir = binpath + else: + dstdir = os.path.join(binpath, *suffix) + if not os.path.exists(dstdir): + os.makedirs(dstdir) + dstfile = os.path.join(dstdir, f) + if os.name == 'nt' and srcfile.endswith(('.exe', '.pdb')): + shutil.copy2(srcfile, dstfile) + continue + with open(srcfile, 'rb') as f: + data = f.read() + try: + context.script_path = srcfile + new_data = ( + self.replace_variables(data.decode('utf-8'), context) + .encode('utf-8') + ) + except UnicodeError as e: + logger.warning('unable to copy script %r, ' + 'may be binary: %s', srcfile, e) + continue + if new_data == data: + shutil.copy(srcfile, dstfile) + else: + with open(dstfile, 'wb') as f: + f.write(new_data) + shutil.copymode(srcfile, dstfile) + + def upgrade_dependencies(self, context): + logger.debug( + f'Upgrading {CORE_VENV_DEPS} packages in {context.bin_path}' + ) + self._call_new_python(context, '-m', 'pip', 'install', '--upgrade', + *CORE_VENV_DEPS) + + +def create(env_dir, system_site_packages=False, clear=False, + symlinks=False, with_pip=False, prompt=None, upgrade_deps=False, + *, scm_ignore_files=frozenset()): + """Create a virtual environment in a directory.""" + builder = EnvBuilder(system_site_packages=system_site_packages, + clear=clear, symlinks=symlinks, with_pip=with_pip, + prompt=prompt, upgrade_deps=upgrade_deps, + scm_ignore_files=scm_ignore_files) + builder.create(env_dir) + + +def main(args=None): + import argparse + + parser = argparse.ArgumentParser(description='Creates virtual Python ' + 'environments in one or ' + 'more target ' + 'directories.', + epilog='Once an environment has been ' + 'created, you may wish to ' + 'activate it, e.g. by ' + 'sourcing an activate script ' + 'in its bin directory.', + color=True, + ) + parser.add_argument('dirs', metavar='ENV_DIR', nargs='+', + help='A directory to create the environment in.') + parser.add_argument('--system-site-packages', default=False, + action='store_true', dest='system_site', + help='Give the virtual environment access to the ' + 'system site-packages dir.') + if os.name == 'nt': + use_symlinks = False + else: + use_symlinks = True + group = parser.add_mutually_exclusive_group() + group.add_argument('--symlinks', default=use_symlinks, + action='store_true', dest='symlinks', + help='Try to use symlinks rather than copies, ' + 'when symlinks are not the default for ' + 'the platform.') + group.add_argument('--copies', default=not use_symlinks, + action='store_false', dest='symlinks', + help='Try to use copies rather than symlinks, ' + 'even when symlinks are the default for ' + 'the platform.') + parser.add_argument('--clear', default=False, action='store_true', + dest='clear', help='Delete the contents of the ' + 'environment directory if it ' + 'already exists, before ' + 'environment creation.') + parser.add_argument('--upgrade', default=False, action='store_true', + dest='upgrade', help='Upgrade the environment ' + 'directory to use this version ' + 'of Python, assuming Python ' + 'has been upgraded in-place.') + parser.add_argument('--without-pip', dest='with_pip', + default=True, action='store_false', + help='Skips installing or upgrading pip in the ' + 'virtual environment (pip is bootstrapped ' + 'by default)') + parser.add_argument('--prompt', + help='Provides an alternative prompt prefix for ' + 'this environment.') + parser.add_argument('--upgrade-deps', default=False, action='store_true', + dest='upgrade_deps', + help=f'Upgrade core dependencies ({", ".join(CORE_VENV_DEPS)}) ' + 'to the latest version in PyPI') + parser.add_argument('--without-scm-ignore-files', dest='scm_ignore_files', + action='store_const', const=frozenset(), + default=frozenset(['git']), + help='Skips adding SCM ignore files to the environment ' + 'directory (Git is supported by default).') + options = parser.parse_args(args) + if options.upgrade and options.clear: + raise ValueError('you cannot supply --upgrade and --clear together.') + builder = EnvBuilder(system_site_packages=options.system_site, + clear=options.clear, + symlinks=options.symlinks, + upgrade=options.upgrade, + with_pip=options.with_pip, + prompt=options.prompt, + upgrade_deps=options.upgrade_deps, + scm_ignore_files=options.scm_ignore_files) + for d in options.dirs: + builder.create(d) + + +if __name__ == '__main__': + rc = 1 + try: + main() + rc = 0 + except Exception as e: + print('Error: %s' % e, file=sys.stderr) + sys.exit(rc) diff --git a/Python313_13_x86_Template/Lib/venv/__main__.py b/Python314_4_x86_Template/Lib/venv/__main__.py similarity index 100% rename from Python313_13_x86_Template/Lib/venv/__main__.py rename to Python314_4_x86_Template/Lib/venv/__main__.py diff --git a/Python314_4_x86_Template/Lib/venv/scripts/common/Activate.ps1 b/Python314_4_x86_Template/Lib/venv/scripts/common/Activate.ps1 new file mode 100644 index 00000000..2cc90919 --- /dev/null +++ b/Python314_4_x86_Template/Lib/venv/scripts/common/Activate.ps1 @@ -0,0 +1,547 @@ +<# +.Synopsis +Activate a Python virtual environment for the current PowerShell session. + +.Description +Pushes the python executable for a virtual environment to the front of the +$Env:PATH environment variable and sets the prompt to signify that you are +in a Python virtual environment. Makes use of the command line switches as +well as the `pyvenv.cfg` file values present in the virtual environment. + +.Parameter VenvDir +Path to the directory that contains the virtual environment to activate. The +default value for this is the parent of the directory that the Activate.ps1 +script is located within. + +.Parameter Prompt +The prompt prefix to display when this virtual environment is activated. By +default, this prompt is the name of the virtual environment folder (VenvDir) +surrounded by parentheses and followed by a single space (ie. '(.venv) '). + +.Example +Activate.ps1 +Activates the Python virtual environment that contains the Activate.ps1 script. + +.Example +Activate.ps1 -Verbose +Activates the Python virtual environment that contains the Activate.ps1 script, +and shows extra information about the activation as it executes. + +.Example +Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv +Activates the Python virtual environment located in the specified location. + +.Example +Activate.ps1 -Prompt "MyPython" +Activates the Python virtual environment that contains the Activate.ps1 script, +and prefixes the current prompt with the specified string (surrounded in +parentheses) while the virtual environment is active. + +.Notes +On Windows, it may be required to enable this Activate.ps1 script by setting the +execution policy for the user. You can do this by issuing the following PowerShell +command: + +PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser + +For more information on Execution Policies: +https://go.microsoft.com/fwlink/?LinkID=135170 + +#> +Param( + [Parameter(Mandatory = $false)] + [String] + $VenvDir, + [Parameter(Mandatory = $false)] + [String] + $Prompt +) + +<# Function declarations --------------------------------------------------- #> + +<# +.Synopsis +Remove all shell session elements added by the Activate script, including the +addition of the virtual environment's Python executable from the beginning of +the PATH variable. + +.Parameter NonDestructive +If present, do not remove this function from the global namespace for the +session. + +#> +function global:deactivate ([switch]$NonDestructive) { + # Revert to original values + + # The prior prompt: + if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { + Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt + Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT + } + + # The prior PYTHONHOME: + if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { + Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME + Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME + } + + # The prior PATH: + if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { + Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH + Remove-Item -Path Env:_OLD_VIRTUAL_PATH + } + + # Just remove the VIRTUAL_ENV altogether: + if (Test-Path -Path Env:VIRTUAL_ENV) { + Remove-Item -Path env:VIRTUAL_ENV + } + + # Just remove VIRTUAL_ENV_PROMPT altogether. + if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { + Remove-Item -Path env:VIRTUAL_ENV_PROMPT + } + + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: + if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { + Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force + } + + # Leave deactivate function in the global namespace if requested: + if (-not $NonDestructive) { + Remove-Item -Path function:deactivate + } +} + +<# +.Description +Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the +given folder, and returns them in a map. + +For each line in the pyvenv.cfg file, if that line can be parsed into exactly +two strings separated by `=` (with any amount of whitespace surrounding the =) +then it is considered a `key = value` line. The left hand string is the key, +the right hand is the value. + +If the value starts with a `'` or a `"` then the first and last character is +stripped from the value before being captured. + +.Parameter ConfigDir +Path to the directory that contains the `pyvenv.cfg` file. +#> +function Get-PyVenvConfig( + [String] + $ConfigDir +) { + Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" + + # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). + $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue + + # An empty map will be returned if no config file is found. + $pyvenvConfig = @{ } + + if ($pyvenvConfigPath) { + + Write-Verbose "File exists, parse `key = value` lines" + $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath + + $pyvenvConfigContent | ForEach-Object { + $keyval = $PSItem -split "\s*=\s*", 2 + if ($keyval[0] -and $keyval[1]) { + $val = $keyval[1] + + # Remove extraneous quotations around a string value. + if ("'""".Contains($val.Substring(0, 1))) { + $val = $val.Substring(1, $val.Length - 2) + } + + $pyvenvConfig[$keyval[0]] = $val + Write-Verbose "Adding Key: '$($keyval[0])'='$val'" + } + } + } + return $pyvenvConfig +} + + +<# Begin Activate script --------------------------------------------------- #> + +# Determine the containing directory of this script +$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition +$VenvExecDir = Get-Item -Path $VenvExecPath + +Write-Verbose "Activation script is located in path: '$VenvExecPath'" +Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" +Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" + +# Set values required in priority: CmdLine, ConfigFile, Default +# First, get the location of the virtual environment, it might not be +# VenvExecDir if specified on the command line. +if ($VenvDir) { + Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" +} +else { + Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." + $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") + Write-Verbose "VenvDir=$VenvDir" +} + +# Next, read the `pyvenv.cfg` file to determine any required value such +# as `prompt`. +$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir + +# Next, set the prompt from the command line, or the config file, or +# just use the name of the virtual environment folder. +if ($Prompt) { + Write-Verbose "Prompt specified as argument, using '$Prompt'" +} +else { + Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" + if ($pyvenvCfg -and $pyvenvCfg['prompt']) { + Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" + $Prompt = $pyvenvCfg['prompt']; + } + else { + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" + Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" + $Prompt = Split-Path -Path $venvDir -Leaf + } +} + +Write-Verbose "Prompt = '$Prompt'" +Write-Verbose "VenvDir='$VenvDir'" + +# Deactivate any currently active virtual environment, but leave the +# deactivate function in place. +deactivate -nondestructive + +# Now set the environment variable VIRTUAL_ENV, used by many tools to determine +# that there is an activated venv. +$env:VIRTUAL_ENV = $VenvDir + +$env:VIRTUAL_ENV_PROMPT = $Prompt + +if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { + + Write-Verbose "Setting prompt to '$Prompt'" + + # Set the prompt to include the env name + # Make sure _OLD_VIRTUAL_PROMPT is global + function global:_OLD_VIRTUAL_PROMPT { "" } + Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT + New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt + + function global:prompt { + Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " + _OLD_VIRTUAL_PROMPT + } +} + +# Clear PYTHONHOME +if (Test-Path -Path Env:PYTHONHOME) { + Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME + Remove-Item -Path Env:PYTHONHOME +} + +# Add the venv to the PATH +Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH +$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" + +# SIG # Begin signature block +# MII3YgYJKoZIhvcNAQcCoII3UzCCN08CAQExDzANBglghkgBZQMEAgEFADB5Bgor +# BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG +# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCBALKwKRFIhr2RY +# IW/WJLd9pc8a9sj/IoThKU92fTfKsKCCG9IwggXMMIIDtKADAgECAhBUmNLR1FsZ +# lUgTecgRwIeZMA0GCSqGSIb3DQEBDAUAMHcxCzAJBgNVBAYTAlVTMR4wHAYDVQQK +# ExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xSDBGBgNVBAMTP01pY3Jvc29mdCBJZGVu +# dGl0eSBWZXJpZmljYXRpb24gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgMjAy +# MDAeFw0yMDA0MTYxODM2MTZaFw00NTA0MTYxODQ0NDBaMHcxCzAJBgNVBAYTAlVT +# MR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xSDBGBgNVBAMTP01pY3Jv +# c29mdCBJZGVudGl0eSBWZXJpZmljYXRpb24gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRo +# b3JpdHkgMjAyMDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALORKgeD +# Bmf9np3gx8C3pOZCBH8Ppttf+9Va10Wg+3cL8IDzpm1aTXlT2KCGhFdFIMeiVPvH +# or+Kx24186IVxC9O40qFlkkN/76Z2BT2vCcH7kKbK/ULkgbk/WkTZaiRcvKYhOuD +# PQ7k13ESSCHLDe32R0m3m/nJxxe2hE//uKya13NnSYXjhr03QNAlhtTetcJtYmrV +# qXi8LW9J+eVsFBT9FMfTZRY33stuvF4pjf1imxUs1gXmuYkyM6Nix9fWUmcIxC70 +# ViueC4fM7Ke0pqrrBc0ZV6U6CwQnHJFnni1iLS8evtrAIMsEGcoz+4m+mOJyoHI1 +# vnnhnINv5G0Xb5DzPQCGdTiO0OBJmrvb0/gwytVXiGhNctO/bX9x2P29Da6SZEi3 +# W295JrXNm5UhhNHvDzI9e1eM80UHTHzgXhgONXaLbZ7LNnSrBfjgc10yVpRnlyUK +# xjU9lJfnwUSLgP3B+PR0GeUw9gb7IVc+BhyLaxWGJ0l7gpPKWeh1R+g/OPTHU3mg +# trTiXFHvvV84wRPmeAyVWi7FQFkozA8kwOy6CXcjmTimthzax7ogttc32H83rwjj +# O3HbbnMbfZlysOSGM1l0tRYAe1BtxoYT2v3EOYI9JACaYNq6lMAFUSw0rFCZE4e7 +# swWAsk0wAly4JoNdtGNz764jlU9gKL431VulAgMBAAGjVDBSMA4GA1UdDwEB/wQE +# AwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTIftJqhSobyhmYBAcnz1AQ +# T2ioojAQBgkrBgEEAYI3FQEEAwIBADANBgkqhkiG9w0BAQwFAAOCAgEAr2rd5hnn +# LZRDGU7L6VCVZKUDkQKL4jaAOxWiUsIWGbZqWl10QzD0m/9gdAmxIR6QFm3FJI9c +# Zohj9E/MffISTEAQiwGf2qnIrvKVG8+dBetJPnSgaFvlVixlHIJ+U9pW2UYXeZJF +# xBA2CFIpF8svpvJ+1Gkkih6PsHMNzBxKq7Kq7aeRYwFkIqgyuH4yKLNncy2RtNwx +# AQv3Rwqm8ddK7VZgxCwIo3tAsLx0J1KH1r6I3TeKiW5niB31yV2g/rarOoDXGpc8 +# FzYiQR6sTdWD5jw4vU8w6VSp07YEwzJ2YbuwGMUrGLPAgNW3lbBeUU0i/OxYqujY +# lLSlLu2S3ucYfCFX3VVj979tzR/SpncocMfiWzpbCNJbTsgAlrPhgzavhgplXHT2 +# 6ux6anSg8Evu75SjrFDyh+3XOjCDyft9V77l4/hByuVkrrOj7FjshZrM77nq81YY +# uVxzmq/FdxeDWds3GhhyVKVB0rYjdaNDmuV3fJZ5t0GNv+zcgKCf0Xd1WF81E+Al +# GmcLfc4l+gcK5GEh2NQc5QfGNpn0ltDGFf5Ozdeui53bFv0ExpK91IjmqaOqu/dk +# ODtfzAzQNb50GQOmxapMomE2gj4d8yu8l13bS3g7LfU772Aj6PXsCyM2la+YZr9T +# 03u4aUoqlmZpxJTG9F9urJh4iIAGXKKy7aIwggb+MIIE5qADAgECAhMzAAfqVHr/ +# 4Q/aDzAcAAAAB+pUMA0GCSqGSIb3DQEBDAUAMFoxCzAJBgNVBAYTAlVTMR4wHAYD +# VQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKzApBgNVBAMTIk1pY3Jvc29mdCBJ +# RCBWZXJpZmllZCBDUyBFT0MgQ0EgMDIwHhcNMjYwNDA3MDcyODM1WhcNMjYwNDEw +# MDcyODM1WjB8MQswCQYDVQQGEwJVUzEPMA0GA1UECBMGT3JlZ29uMRIwEAYDVQQH +# EwlCZWF2ZXJ0b24xIzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9u +# MSMwIQYDVQQDExpQeXRob24gU29mdHdhcmUgRm91bmRhdGlvbjCCAaIwDQYJKoZI +# hvcNAQEBBQADggGPADCCAYoCggGBAND/lHfn3OCIvUzMUIL6OdsKJrpnvuRtahV1 +# 6NCf0YSqOQemwQw2bTIyTkgSFwY4WaCvfHzcliURiPidXiqy56OmeC19A95BarKA +# UmKRv3bVpM0XEK7OLvMyRFNg9aPUi1nmdF3Vx02RI9p88wBHQR5nNIpOTXlwfONQ +# klggyEZSxkBf+dCL6jtz4jiqoreiEmRwesOrtQxKNsRuezbumpmVMZGxrMQVLBIX +# OWG9a3GS6Sqfi+cJgxQhSKa9JENPRojyxOyVG8vdwJQiMqSjm2ZMFAkIkSWBQSfx +# WjrRmw8/20WaBENattpqb7/cjX7zwimJ86uV48D8AQIGzAxfYAySG6NG9iMfU5S5 +# wzDFpiCuXyfrlgAbZu4fnBIyOmGcq01XxruzJ3FcdLMif5YXZU+n30XOaJfgY9/x +# Gq2HiEIQF5MeuxknfD+vYi/GXGtC/nlKS0Tx91+YXt6RctxgJEwpZCGzFZmmaiUa +# Y0GBp4jzXXwLqX8T15lgxAGoqoPvvwIDAQABo4ICGTCCAhUwDAYDVR0TAQH/BAIw +# ADAOBgNVHQ8BAf8EBAMCB4AwPAYDVR0lBDUwMwYKKwYBBAGCN2EBAAYIKwYBBQUH +# AwMGGysGAQQBgjdhgqKNuwqmkohkgZH0oEWCk/3hbzAdBgNVHQ4EFgQUy3N6DzeS +# y91jju8Ihmm3r+5AO58wHwYDVR0jBBgwFoAUZZ9RzoVofy+KRYiq3acxux4NAF4w +# ZwYDVR0fBGAwXjBcoFqgWIZWaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9w +# cy9jcmwvTWljcm9zb2Z0JTIwSUQlMjBWZXJpZmllZCUyMENTJTIwRU9DJTIwQ0El +# MjAwMi5jcmwwgaUGCCsGAQUFBwEBBIGYMIGVMGQGCCsGAQUFBzAChlhodHRwOi8v +# d3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NlcnRzL01pY3Jvc29mdCUyMElEJTIw +# VmVyaWZpZWQlMjBDUyUyMEVPQyUyMENBJTIwMDIuY3J0MC0GCCsGAQUFBzABhiFo +# dHRwOi8vb25lb2NzcC5taWNyb3NvZnQuY29tL29jc3AwZgYDVR0gBF8wXTBRBgwr +# BgEEAYI3TIN9AQEwQTA/BggrBgEFBQcCARYzaHR0cDovL3d3dy5taWNyb3NvZnQu +# Y29tL3BraW9wcy9Eb2NzL1JlcG9zaXRvcnkuaHRtMAgGBmeBDAEEATANBgkqhkiG +# 9w0BAQwFAAOCAgEAPPwJPfkrkQMH39/iTBbir6tGnQpLCpOuP1A6mmKp22GxCG0/ +# 1IPx4QK1qXpy8hYd/G9ySDSYu3DSg22/icSmGSxdcI3zoRsj9vdJeesQrxtK8v9y +# 4zMxN5TaLV5CmatSUZPyX1t7Tee9wiLBUeZIj+3Lg2gNUsdvavywRYxSYkWGuGaM +# jGtJrs4PoJW3f4KkOc5mShCpUgl4Mo9ZO+ChcQpKEP99UJ9CXB9wrNzXnEOTyGnR +# f1sYklPqBifC7hrnKIPZiJte1efmGeExmspWewmUSNXCIGenDAN8XDut2yi1iSSQ +# n1VtL6deCRhS1cTn+FAzy2q7a/8Jhhq+HUlcJwRGtrxgKZHrwEvGRvIWNK5l1rKl +# Q+WQ7RqRrH6PpSfR/xoptfpJX9LNUoHS0m114HcE2xk2hbv+U/5ZgxUtSd4MbF7/ +# C8eShz4Os8CznYXJ/d+kfvoyEqKE9VCbc4BUC+w1iufQOPo4tRvK4TFJu1N4IqJk +# NsChWXUef7lIT5CoaJw4np0dVS2NosmRCxi1dMyADzqFNDXGKQxq5k6MpnXbevL5 +# JdcznhhxgwRUcwNK/3f9WSaU2mnI+6tHrnATteL7Ct6FzZWjqWDbURkU66bRqrBh +# +u5KyLZAAQXTfdsaDUfxtElQJf5wROgYvwnW1dGvujgc+XKVvf1VT3GSFRIwggda +# MIIFQqADAgECAhMzAAAABft6XDITYd9dAAAAAAAFMA0GCSqGSIb3DQEBDAUAMGMx +# CzAJBgNVBAYTAlVTMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xNDAy +# BgNVBAMTK01pY3Jvc29mdCBJRCBWZXJpZmllZCBDb2RlIFNpZ25pbmcgUENBIDIw +# MjEwHhcNMjEwNDEzMTczMTUzWhcNMjYwNDEzMTczMTUzWjBaMQswCQYDVQQGEwJV +# UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSswKQYDVQQDEyJNaWNy +# b3NvZnQgSUQgVmVyaWZpZWQgQ1MgRU9DIENBIDAyMIICIjANBgkqhkiG9w0BAQEF +# AAOCAg8AMIICCgKCAgEA0hqZfD8ykKTA6CDbWvshmBpDoBf7Lv132RVuSqVwQO3a +# ALLkuRnnTIoRmMGo0fIMQrtwR6UHB06xdqOkAfqB6exubXTHu44+duHUCdE4ngjE +# LBQyluMuSOnHaEdveIbt31OhMEX/4nQkph4+Ah0eR4H2sTRrVKmKrlOoQlhia73Q +# g2dHoitcX1uT1vW3Knpt9Mt76H7ZHbLNspMZLkWBabKMl6BdaWZXYpPGdS+qY80g +# DaNCvFq0d10UMu7xHesIqXpTDT3Q3AeOxSylSTc/74P3og9j3OuemEFauFzL55t1 +# MvpadEhQmD8uFMxFv/iZOjwvcdY1zhanVLLyplz13/NzSoU3QjhPdqAGhRIwh/YD +# zo3jCdVJgWQRrW83P3qWFFkxNiME2iO4IuYgj7RwseGwv7I9cxOyaHihKMdT9Neo +# SjpSNzVnKKGcYMtOdMtKFqoV7Cim2m84GmIYZTBorR/Po9iwlasTYKFpGZqdWKyY +# nJO2FV8oMmWkIK1iagLLgEt6ZaR0rk/1jUYssyTiRqWr84Qs3XL/V5KUBEtUEQfQ +# /4RtnI09uFFUIGJZV9mD/xOUksWodGrCQSem6Hy261xMJAHqTqMuDKgwi8xk/mfl +# r7yhXPL73SOULmu1Aqu4I7Gpe6QwNW2TtQBxM3vtSTmdPW6rK5y0gED51RjsyK0C +# AwEAAaOCAg4wggIKMA4GA1UdDwEB/wQEAwIBhjAQBgkrBgEEAYI3FQEEAwIBADAd +# BgNVHQ4EFgQUZZ9RzoVofy+KRYiq3acxux4NAF4wVAYDVR0gBE0wSzBJBgRVHSAA +# MEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9wa2lvcHMv +# RG9jcy9SZXBvc2l0b3J5Lmh0bTAZBgkrBgEEAYI3FAIEDB4KAFMAdQBiAEMAQTAS +# BgNVHRMBAf8ECDAGAQH/AgEAMB8GA1UdIwQYMBaAFNlBKbAPD2Ns72nX9c0pnqRI +# ajDmMHAGA1UdHwRpMGcwZaBjoGGGX2h0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9w +# a2lvcHMvY3JsL01pY3Jvc29mdCUyMElEJTIwVmVyaWZpZWQlMjBDb2RlJTIwU2ln +# bmluZyUyMFBDQSUyMDIwMjEuY3JsMIGuBggrBgEFBQcBAQSBoTCBnjBtBggrBgEF +# BQcwAoZhaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9jZXJ0cy9NaWNy +# b3NvZnQlMjBJRCUyMFZlcmlmaWVkJTIwQ29kZSUyMFNpZ25pbmclMjBQQ0ElMjAy +# MDIxLmNydDAtBggrBgEFBQcwAYYhaHR0cDovL29uZW9jc3AubWljcm9zb2Z0LmNv +# bS9vY3NwMA0GCSqGSIb3DQEBDAUAA4ICAQBFSWDUd08X4g5HzvVfrB1SiV8pk6XP +# HT9jPkCmvU/uvBzmZRAjYk2gKYR3pXoStRJaJ/lhjC5Dq/2R7P1YRZHCDYyK0zvS +# RMdE6YQtgGjmsdhzD0nCS6hVVcgfmNQscPJ1WHxbvG5EQgYQ0ZED1FN0MOPQzWe1 +# zbH5Va0dSxtnodBVRjnyDYEm7sNEcvJHTG3eXzAyd00E5KDCsEl4z5O0mvXqwaH2 +# PS0200E6P4WqLwgs/NmUu5+Aa8Lw/2En2VkIW7Pkir4Un1jG6+tj/ehuqgFyUPPC +# h6kbnvk48bisi/zPjAVkj7qErr7fSYICCzJ4s4YUNVVHgdoFn2xbW7ZfBT3QA9zf +# hq9u4ExXbrVD5rxXSTFEUg2gzQq9JHxsdHyMfcCKLFQOXODSzcYeLpCd+r6GcoDB +# ToyPdKccjC6mAq6+/hiMDnpvKUIHpyYEzWUeattyKXtMf+QrJeQ+ny5jBL+xqdOO +# PEz3dg7qn8/oprUrUbGLBv9fWm18fWXdAv1PCtLL/acMLtHoyeSVMKQYqDHb3Qm0 +# uQ+NQ0YE4kUxSQa+W/cCzYAI32uN0nb9M4Mr1pj4bJZidNkM4JyYqezohILxYkgH +# bboJQISrQWrm5RYdyhKBpptJ9JJn0Z63LjdnzlOUxjlsAbQir2Wmz/OJE703BbHm +# QZRwzPx1vu7S5zCCB54wggWGoAMCAQICEzMAAAAHh6M0o3uljhwAAAAAAAcwDQYJ +# KoZIhvcNAQEMBQAwdzELMAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBD +# b3Jwb3JhdGlvbjFIMEYGA1UEAxM/TWljcm9zb2Z0IElkZW50aXR5IFZlcmlmaWNh +# dGlvbiBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAyMDIwMB4XDTIxMDQwMTIw +# MDUyMFoXDTM2MDQwMTIwMTUyMFowYzELMAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1p +# Y3Jvc29mdCBDb3Jwb3JhdGlvbjE0MDIGA1UEAxMrTWljcm9zb2Z0IElEIFZlcmlm +# aWVkIENvZGUgU2lnbmluZyBQQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +# ADCCAgoCggIBALLwwK8ZiCji3VR6TElsaQhVCbRS/3pK+MHrJSj3Zxd3KU3rlfL3 +# qrZilYKJNqztA9OQacr1AwoNcHbKBLbsQAhBnIB34zxf52bDpIO3NJlfIaTE/xrw +# eLoQ71lzCHkD7A4As1Bs076Iu+mA6cQzsYYH/Cbl1icwQ6C65rU4V9NQhNUwgrx9 +# rGQ//h890Q8JdjLLw0nV+ayQ2Fbkd242o9kH82RZsH3HEyqjAB5a8+Ae2nPIPc8s +# ZU6ZE7iRrRZywRmrKDp5+TcmJX9MRff241UaOBs4NmHOyke8oU1TYrkxh+YeHgfW +# o5tTgkoSMoayqoDpHOLJs+qG8Tvh8SnifW2Jj3+ii11TS8/FGngEaNAWrbyfNrC6 +# 9oKpRQXY9bGH6jn9NEJv9weFxhTwyvx9OJLXmRGbAUXN1U9nf4lXezky6Uh/cgjk +# Vd6CGUAf0K+Jw+GE/5VpIVbcNr9rNE50Sbmy/4RTCEGvOq3GhjITbCa4crCzTTHg +# YYjHs1NbOc6brH+eKpWLtr+bGecy9CrwQyx7S/BfYJ+ozst7+yZtG2wR461uckFu +# 0t+gCwLdN0A6cFtSRtR8bvxVFyWwTtgMMFRuBa3vmUOTnfKLsLefRaQcVTgRnzeL +# zdpt32cdYKp+dhr2ogc+qM6K4CBI5/j4VFyC4QFeUP2YAidLtvpXRRo3AgMBAAGj +# ggI1MIICMTAOBgNVHQ8BAf8EBAMCAYYwEAYJKwYBBAGCNxUBBAMCAQAwHQYDVR0O +# BBYEFNlBKbAPD2Ns72nX9c0pnqRIajDmMFQGA1UdIARNMEswSQYEVR0gADBBMD8G +# CCsGAQUFBwIBFjNodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL0RvY3Mv +# UmVwb3NpdG9yeS5odG0wGQYJKwYBBAGCNxQCBAweCgBTAHUAYgBDAEEwDwYDVR0T +# AQH/BAUwAwEB/zAfBgNVHSMEGDAWgBTIftJqhSobyhmYBAcnz1AQT2ioojCBhAYD +# VR0fBH0wezB5oHegdYZzaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9j +# cmwvTWljcm9zb2Z0JTIwSWRlbnRpdHklMjBWZXJpZmljYXRpb24lMjBSb290JTIw +# Q2VydGlmaWNhdGUlMjBBdXRob3JpdHklMjAyMDIwLmNybDCBwwYIKwYBBQUHAQEE +# gbYwgbMwgYEGCCsGAQUFBzAChnVodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtp +# b3BzL2NlcnRzL01pY3Jvc29mdCUyMElkZW50aXR5JTIwVmVyaWZpY2F0aW9uJTIw +# Um9vdCUyMENlcnRpZmljYXRlJTIwQXV0aG9yaXR5JTIwMjAyMC5jcnQwLQYIKwYB +# BQUHMAGGIWh0dHA6Ly9vbmVvY3NwLm1pY3Jvc29mdC5jb20vb2NzcDANBgkqhkiG +# 9w0BAQwFAAOCAgEAfyUqnv7Uq+rdZgrbVyNMul5skONbhls5fccPlmIbzi+OwVdP +# Q4H55v7VOInnmezQEeW4LqK0wja+fBznANbXLB0KrdMCbHQpbLvG6UA/Xv2pfpVI +# E1CRFfNF4XKO8XYEa3oW8oVH+KZHgIQRIwAbyFKQ9iyj4aOWeAzwk+f9E5StNp5T +# 8FG7/VEURIVWArbAzPt9ThVN3w1fAZkF7+YU9kbq1bCR2YD+MtunSQ1Rft6XG7b4 +# e0ejRA7mB2IoX5hNh3UEauY0byxNRG+fT2MCEhQl9g2i2fs6VOG19CNep7SquKaB +# jhWmirYyANb0RJSLWjinMLXNOAga10n8i9jqeprzSMU5ODmrMCJE12xS/NWShg/t +# uLjAsKP6SzYZ+1Ry358ZTFcx0FS/mx2vSoU8s8HRvy+rnXqyUJ9HBqS0DErVLjQw +# K8VtsBdekBmdTbQVoCgPCqr+PDPB3xajYnzevs7eidBsM71PINK2BoE2UfMwxCCX +# 3mccFgx6UsQeRSdVVVNSyALQe6PT12418xon2iDGE81OGCreLzDcMAZnrUAx4XQL +# Uz6ZTl65yPUiOh3k7Yww94lDf+8oG2oZmDh5O1Qe38E+M3vhKwmzIeoB1dVLlz4i +# 3IpaDcR+iuGjH2TdaC1ZOmBXiCRKJLj4DT2uhJ04ji+tHD6n58vhavFIrmcxghrm +# MIIa4gIBATBxMFoxCzAJBgNVBAYTAlVTMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29y +# cG9yYXRpb24xKzApBgNVBAMTIk1pY3Jvc29mdCBJRCBWZXJpZmllZCBDUyBFT0Mg +# Q0EgMDICEzMAB+pUev/hD9oPMBwAAAAH6lQwDQYJYIZIAWUDBAIBBQCggbIwGQYJ +# KoZIhvcNAQkDMQwGCisGAQQBgjcCAQQwHAYKKwYBBAGCNwIBCzEOMAwGCisGAQQB +# gjcCARUwLwYJKoZIhvcNAQkEMSIEICpXe3RS3b2coD0CJveEHlglqtPUYZ2FqSrO +# UfP6C6Y4MEYGCisGAQQBgjcCAQwxODA2oDCALgBQAHkAdABoAG8AbgAgADMALgAx +# ADQALgA0ACAAKAAyADMAMQAxADYAZgA5ACmhAoAAMA0GCSqGSIb3DQEBAQUABIIB +# gHaAK9dRSQxQvAiBXu8BOjm/3WL7Hdh4vVPDdI7TVKrNk9GE/8isBY5v3SDISaGV +# VzilkWjgUJX1tp5Wq3Ix9zJVToVG3kaHlNrEjb+cK8oqkMJqIS0GTrS70Xs1UPKk +# PNSzUfi1ddmCW9Up3bmvR7e5SotcgAKysucyRPHmhDZKdC0tM3FdzqbFMs0QV1QL +# gUzdgWEMqRQp7PN9Y3uHeO7/FUUGkEGBHuKq9kGXbnYGwZEazzy6Uxx2Nd47iCsu +# cEiOdpecA13fGE6lnM+uTZGOvshUVQeTIkr5pb2NS+lXF+CEq6uqMntdAfblYzCs +# gTCp8OflGXHVnbo4p8SsrGaBV7KnbzCk7uuEqW3SJiQfMrkQjvjc1cRwQvOVVaPj +# F1Qknn7KmxukU7FEIwyGXPKQ5OG8oUugOTS/5aqSbpmY6HuTbbWH/7MULJlfikOq +# zuteCVjvd18Y6LMk+mK1x9PAtoHmZuMIedP6rHNE8admogur39k1IhRJIcG9S8Kk +# 3aGCGBEwghgNBgorBgEEAYI3AwMBMYIX/TCCF/kGCSqGSIb3DQEHAqCCF+owghfm +# AgEDMQ8wDQYJYIZIAWUDBAIBBQAwggFiBgsqhkiG9w0BCRABBKCCAVEEggFNMIIB +# SQIBAQYKKwYBBAGEWQoDATAxMA0GCWCGSAFlAwQCAQUABCASSpbbgjgopvaqW5Og +# e0ZpH9C7TBmrsY+qwoqXJMywiwIGacJyyM2cGBMyMDI2MDQwNzE2MTkwNy44NjJa +# MASAAgH0oIHhpIHeMIHbMQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3Rv +# bjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0 +# aW9uMSUwIwYDVQQLExxNaWNyb3NvZnQgQW1lcmljYSBPcGVyYXRpb25zMScwJQYD +# VQQLEx5uU2hpZWxkIFRTUyBFU046N0QwMC0wNUUwLUQ5NDcxNTAzBgNVBAMTLE1p +# Y3Jvc29mdCBQdWJsaWMgUlNBIFRpbWUgU3RhbXBpbmcgQXV0aG9yaXR5oIIPITCC +# B4IwggVqoAMCAQICEzMAAAAF5c8P/2YuyYcAAAAAAAUwDQYJKoZIhvcNAQEMBQAw +# dzELMAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjFI +# MEYGA1UEAxM/TWljcm9zb2Z0IElkZW50aXR5IFZlcmlmaWNhdGlvbiBSb290IENl +# cnRpZmljYXRlIEF1dGhvcml0eSAyMDIwMB4XDTIwMTExOTIwMzIzMVoXDTM1MTEx +# OTIwNDIzMVowYTELMAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jw +# b3JhdGlvbjEyMDAGA1UEAxMpTWljcm9zb2Z0IFB1YmxpYyBSU0EgVGltZXN0YW1w +# aW5nIENBIDIwMjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCefOdS +# Y/3gxZ8FfWO1BiKjHB7X55cz0RMFvWVGR3eRwV1wb3+yq0OXDEqhUhxqoNv6iYWK +# jkMcLhEFxvJAeNcLAyT+XdM5i2CgGPGcb95WJLiw7HzLiBKrxmDj1EQB/mG5eEiR +# BEp7dDGzxKCnTYocDOcRr9KxqHydajmEkzXHOeRGwU+7qt8Md5l4bVZrXAhK+WSk +# 5CihNQsWbzT1nRliVDwunuLkX1hyIWXIArCfrKM3+RHh+Sq5RZ8aYyik2r8HxT+l +# 2hmRllBvE2Wok6IEaAJanHr24qoqFM9WLeBUSudz+qL51HwDYyIDPSQ3SeHtKog0 +# ZubDk4hELQSxnfVYXdTGncaBnB60QrEuazvcob9n4yR65pUNBCF5qeA4QwYnilBk +# fnmeAjRN3LVuLr0g0FXkqfYdUmj1fFFhH8k8YBozrEaXnsSL3kdTD01X+4LfIWOu +# FzTzuoslBrBILfHNj8RfOxPgjuwNvE6YzauXi4orp4Sm6tF245DaFOSYbWFK5ZgG +# 6cUY2/bUq3g3bQAqZt65KcaewEJ3ZyNEobv35Nf6xN6FrA6jF9447+NHvCjeWLCQ +# Z3M8lgeCcnnhTFtyQX3XgCoc6IRXvFOcPVrr3D9RPHCMS6Ckg8wggTrtIVnY8yjb +# vGOUsAdZbeXUIQAWMs0d3cRDv09SvwVRd61evQIDAQABo4ICGzCCAhcwDgYDVR0P +# AQH/BAQDAgGGMBAGCSsGAQQBgjcVAQQDAgEAMB0GA1UdDgQWBBRraSg6NS9IY0DP +# e9ivSek+2T3bITBUBgNVHSAETTBLMEkGBFUdIAAwQTA/BggrBgEFBQcCARYzaHR0 +# cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9Eb2NzL1JlcG9zaXRvcnkuaHRt +# MBMGA1UdJQQMMAoGCCsGAQUFBwMIMBkGCSsGAQQBgjcUAgQMHgoAUwB1AGIAQwBB +# MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUyH7SaoUqG8oZmAQHJ89QEE9o +# qKIwgYQGA1UdHwR9MHsweaB3oHWGc2h0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9w +# a2lvcHMvY3JsL01pY3Jvc29mdCUyMElkZW50aXR5JTIwVmVyaWZpY2F0aW9uJTIw +# Um9vdCUyMENlcnRpZmljYXRlJTIwQXV0aG9yaXR5JTIwMjAyMC5jcmwwgZQGCCsG +# AQUFBwEBBIGHMIGEMIGBBggrBgEFBQcwAoZ1aHR0cDovL3d3dy5taWNyb3NvZnQu +# Y29tL3BraW9wcy9jZXJ0cy9NaWNyb3NvZnQlMjBJZGVudGl0eSUyMFZlcmlmaWNh +# dGlvbiUyMFJvb3QlMjBDZXJ0aWZpY2F0ZSUyMEF1dGhvcml0eSUyMDIwMjAuY3J0 +# MA0GCSqGSIb3DQEBDAUAA4ICAQBfiHbHfm21WhV150x4aPpO4dhEmSUVpbixNDmv +# 6TvuIHv1xIs174bNGO/ilWMm+Jx5boAXrJxagRhHQtiFprSjMktTliL4sKZyt2i+ +# SXncM23gRezzsoOiBhv14YSd1Klnlkzvgs29XNjT+c8hIfPRe9rvVCMPiH7zPZcw +# 5nNjthDQ+zD563I1nUJ6y59TbXWsuyUsqw7wXZoGzZwijWT5oc6GvD3HDokJY401 +# uhnj3ubBhbkR83RbfMvmzdp3he2bvIUztSOuFzRqrLfEvsPkVHYnvH1wtYyrt5vS +# hiKheGpXa2AWpsod4OJyT4/y0dggWi8g/tgbhmQlZqDUf3UqUQsZaLdIu/XSjgoZ +# qDjamzCPJtOLi2hBwL+KsCh0Nbwc21f5xvPSwym0Ukr4o5sCcMUcSy6TEP7uMV8R +# X0eH/4JLEpGyae6Ki8JYg5v4fsNGif1OXHJ2IWG+7zyjTDfkmQ1snFOTgyEX8qBp +# efQbF0fx6URrYiarjmBprwP6ZObwtZXJ23jK3Fg/9uqM3j0P01nzVygTppBabzxP +# Ah/hHhhls6kwo3QLJ6No803jUsZcd4JQxiYHHc+Q/wAMcPUnYKv/q2O444LO1+n6 +# j01z5mggCSlRwD9faBIySAcA9S8h22hIAcRQqIGEjolCK9F6nK9ZyX4lhthsGHum +# aABdWzCCB5cwggV/oAMCAQICEzMAAABV2d1pJij5+OIAAAAAAFUwDQYJKoZIhvcN +# AQEMBQAwYTELMAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3Jh +# dGlvbjEyMDAGA1UEAxMpTWljcm9zb2Z0IFB1YmxpYyBSU0EgVGltZXN0YW1waW5n +# IENBIDIwMjAwHhcNMjUxMDIzMjA0NjQ5WhcNMjYxMDIyMjA0NjQ5WjCB2zELMAkG +# A1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQx +# HjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjElMCMGA1UECxMcTWljcm9z +# b2Z0IEFtZXJpY2EgT3BlcmF0aW9uczEnMCUGA1UECxMeblNoaWVsZCBUU1MgRVNO +# OjdEMDAtMDVFMC1EOTQ3MTUwMwYDVQQDEyxNaWNyb3NvZnQgUHVibGljIFJTQSBU +# aW1lIFN0YW1waW5nIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCC +# AgoCggIBAL25H5IeWUiz9DAlFmn2sPymaFWbvYkMfK+ScIWb3a1IvOlIwghUDjY0 +# Gp6yMRhfYURiGS0GedIB6ywvuH6VBCX3+bdOFcAclgtv21jrpOjZmk4fSaT2Q3Bs +# zUfeUJa8o3xI7ZfoMY9dszTxHQAz6ZVX87fHGEVhQcfxW33IdPJOj/ae419qtYxT +# 21MVmCfsTshgtWioQxmOW/vMC9/b+qgtBxSMf798vm3qfmhF6KCvFaHlivrM32hY +# 16PGE3L0PFC+LM7vRxU7mTb+r76CeybvqOWk4+dbKYftPhV1t/E5S/6wwXeYmu/Y +# 7JC7Tnh2w45G5Y4pcM3oHMb/YuPRdOWa0v+RC2QgmNVWqjuxDiylWscXQDuaMtb2 +# 9AcdGUVV9ZsRY2M2sthAtOdZOshiR5ufMtaHtiCkWv0jNfgUxrHurxzYuUNneWZ6 +# EfQDgFAw8CSCKkSOK2c9jEop4ddVq10xvbqxdrqMneVXvvIcXrPQAXj9j2ECpV2E +# wMb3Wnmpw00P78JpzPsk3Fs61ZvOGd/F1RcOBu6f2TWdp7HL7+rq7tgHr13Mldbf +# IWu4lpoYYE1gTQa1Yrg5XN4j7zs9klT2z3qocmPzV8DWQgIHNh+aTs7bujMEMQyI +# 7Xt1zPxZCgcR6H0tmmzU/9BxvsWbRalCQ2sYGyWupTdc4e7KY7kPAgMBAAGjggHL +# MIIBxzAdBgNVHQ4EFgQUVgRfEG3cCAPwyL+pyRbKwdesZbYwHwYDVR0jBBgwFoAU +# a2koOjUvSGNAz3vYr0npPtk92yEwbAYDVR0fBGUwYzBhoF+gXYZbaHR0cDovL3d3 +# dy5taWNyb3NvZnQuY29tL3BraW9wcy9jcmwvTWljcm9zb2Z0JTIwUHVibGljJTIw +# UlNBJTIwVGltZXN0YW1waW5nJTIwQ0ElMjAyMDIwLmNybDB5BggrBgEFBQcBAQRt +# MGswaQYIKwYBBQUHMAKGXWh0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9wa2lvcHMv +# Y2VydHMvTWljcm9zb2Z0JTIwUHVibGljJTIwUlNBJTIwVGltZXN0YW1waW5nJTIw +# Q0ElMjAyMDIwLmNydDAMBgNVHRMBAf8EAjAAMBYGA1UdJQEB/wQMMAoGCCsGAQUF +# BwMIMA4GA1UdDwEB/wQEAwIHgDBmBgNVHSAEXzBdMFEGDCsGAQQBgjdMg30BATBB +# MD8GCCsGAQUFBwIBFjNodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL0Rv +# Y3MvUmVwb3NpdG9yeS5odG0wCAYGZ4EMAQQCMA0GCSqGSIb3DQEBDAUAA4ICAQBS +# HuGSVHvalCnFnlsqXIQefH1xP2SFr9g+Vz+f5P7QeywjfQb5jUlSmd1XnJUDPe/M +# HxL7r3TEElL+mNtG6CDPAytStSFPXD9tTBtBMYh8Wqo64pH9qm361yIqeBH979mz +# WCkMQsTd0nM6dUl9B+7qiti+ToXwxIl39eYqLuYYfhD2mqqePXMzUKSQzkf73yYI +# VHP6nLJQz4aAmaWcfG9jg78sBkDV8KpW7JgktuLhphJEN1B+SVHjenPdcmrFXIUu +# /K4jK5ukfWaQIjuaXzSjBlNjC5tQN6adPfA3GxUwHPeR4ekL5If/9vBf13tmzBW+ +# gy+0sNGTveb9IL9GU8iX8UvywsX62nhCCPRUhTigDBKdczRUrNrntBhowbfchBDF +# ML8avRMRc9Gmc2JvIryX336SFQ51//q1UU2HMSJEMhWLJSIWJVhfUowsOa+PampI +# zETYfFvTu2mqKJUlWZXkGYxrdCvCczJcqeoadpW1ul6kcdnDh228SQ8ZhDc6IRlM +# 4iNd5SNoNgX+aom3wuGyjUaSaPZWxPB1G2NKiYhPLt0lPHg0Gskj1zhISY8UQkMM +# Dr3o2JgRuT+wnJEDQUp55ddvhSkSoD6I9DL/s+TjIY/c9jLaW5xywJHqdKHUApRM +# sghv7kebSua1upmR+TquelFktDSOjVdSRkuya4uoxTGCB0Mwggc/AgEBMHgwYTEL +# MAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEyMDAG +# A1UEAxMpTWljcm9zb2Z0IFB1YmxpYyBSU0EgVGltZXN0YW1waW5nIENBIDIwMjAC +# EzMAAABV2d1pJij5+OIAAAAAAFUwDQYJYIZIAWUDBAIBBQCgggScMBEGCyqGSIb3 +# DQEJEAIPMQIFADAaBgkqhkiG9w0BCQMxDQYLKoZIhvcNAQkQAQQwHAYJKoZIhvcN +# AQkFMQ8XDTI2MDQwNzE2MTkwN1owLwYJKoZIhvcNAQkEMSIEIO1tbnR5rJvq+GXf +# 4bgeV3HDcP+8Ud48R8sLBQBLI+rBMIG5BgsqhkiG9w0BCRACLzGBqTCBpjCBozCB +# oAQg2Lk8l2SGYru/ff7+D2qrJnkswcYdK6pGKu7GGGr4/s0wfDBlpGMwYTELMAkG +# A1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEyMDAGA1UE +# AxMpTWljcm9zb2Z0IFB1YmxpYyBSU0EgVGltZXN0YW1waW5nIENBIDIwMjACEzMA +# AABV2d1pJij5+OIAAAAAAFUwggNeBgsqhkiG9w0BCRACEjGCA00wggNJoYIDRTCC +# A0EwggIpAgEBMIIBCaGB4aSB3jCB2zELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldh +# c2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBD +# b3Jwb3JhdGlvbjElMCMGA1UECxMcTWljcm9zb2Z0IEFtZXJpY2EgT3BlcmF0aW9u +# czEnMCUGA1UECxMeblNoaWVsZCBUU1MgRVNOOjdEMDAtMDVFMC1EOTQ3MTUwMwYD +# VQQDEyxNaWNyb3NvZnQgUHVibGljIFJTQSBUaW1lIFN0YW1waW5nIEF1dGhvcml0 +# eaIjCgEBMAcGBSsOAwIaAxUAHTtUAYJlv7bgWVeRBo4X7FeHDeqgZzBlpGMwYTEL +# MAkGA1UEBhMCVVMxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEyMDAG +# A1UEAxMpTWljcm9zb2Z0IFB1YmxpYyBSU0EgVGltZXN0YW1waW5nIENBIDIwMjAw +# DQYJKoZIhvcNAQELBQACBQDtf2YdMCIYDzIwMjYwNDA3MTExNjQ1WhgPMjAyNjA0 +# MDgxMTE2NDVaMHQwOgYKKwYBBAGEWQoEATEsMCowCgIFAO1/Zh0CAQAwBwIBAAIC +# Ew4wBwIBAAICHEcwCgIFAO2At50CAQAwNgYKKwYBBAGEWQoEAjEoMCYwDAYKKwYB +# BAGEWQoDAqAKMAgCAQACAwehIKEKMAgCAQACAwGGoDANBgkqhkiG9w0BAQsFAAOC +# AQEAj/DYTzrEaV8hMcUsbVdSBihFoM0FgJoy99LL0e/ztow4UwgL27kPtwlbyVfW +# Onw+tLBT2sbsS4yIj+QnTUoHeqNA/Ue9FPkPH801qthzRDZ4UTwz09KgVfKchw0Y +# jGTSpL8GQL6cAYgFuf/Zh0qEVwYW4OxiyKEMKpzYdqoNwRx9LrCgE0km4WZ3bDBw +# VFCKcd176gCpV5Yle8MbDWFizSUrylp7v7q/Knx0BrWs8Q17RUHnYAyehZCD0d0H +# zXEClh+sKxScyk8TjI2o7MKau8YRHzeLDN9wj24hJ9URkNThjDhY3ruNMvves5xS +# fKmhpIqWsb5ZvZW4hmbqdxwWbDANBgkqhkiG9w0BAQEFAASCAgA6C6+LOeV/wer0 +# SB0XI0QO89oeTrCbxFNxmKegd8rz7ERBKLcxLh9WFOfDf4t5oi0RlPP8cFwByVIb +# TIgtggkIq5uN6/qJ9+iF+w7a4123dTuQSAi35d7VMC/q8rtHL6ev5zeKeYVnov5C +# JqmlUyVL5NPRxrUaWeXNXOmOMZwmNTqZspf/eadKTiBwDVJZ4DLXkrZNoR3Gd7Wq +# w6N/gt5MDhL26NU2xnysk2pXRdTna4Fh545XD2YGqZAJdJ6nzv/uSQBDnAL7uOV1 +# U5I+hEBgbCMLAkNzTJUprQFaz1X9taVTbYQOE8IGkseOg1nw8kDc5nYga3kNZQ/k +# u4gbx3opZuxUL43teYm17AJwHphWgQojwdvi+OozR0bS27xVK5O5Y+5yReg4U6DG +# XjuaFzxxA8efdy7DDETfFWhO6fSfREWcf+Nb9uA7i+qPOJvK4ls5p3yTwJgG1KUr +# UNFhIw5LZZxn+yAArVY712VcCDsdBQiJ3ZbS90/AXjtsgFP/Cr99YhoKi7WNOtJ9 +# CHXI8Y5RMy3AAGBXmSJ2f1bEVX7Ya1Wdi7UlwgYbchhfL25XRKonczGNxAzymO6t +# MCAweDvuELomErT+70YfQXd1kye4j5KiCQXYtpedFgb9sDqGv5j7c5ACHVyAsvPA +# Wx3T3b6yi2pK9C6xC8g5cmjf8c7Rnw== +# SIG # End signature block diff --git a/Python313_13_x86_Template/Lib/venv/scripts/common/activate b/Python314_4_x86_Template/Lib/venv/scripts/common/activate similarity index 100% rename from Python313_13_x86_Template/Lib/venv/scripts/common/activate rename to Python314_4_x86_Template/Lib/venv/scripts/common/activate diff --git a/Python313_13_x86_Template/Lib/venv/scripts/common/activate.fish b/Python314_4_x86_Template/Lib/venv/scripts/common/activate.fish similarity index 100% rename from Python313_13_x86_Template/Lib/venv/scripts/common/activate.fish rename to Python314_4_x86_Template/Lib/venv/scripts/common/activate.fish diff --git a/Python313_13_x86_Template/Lib/venv/scripts/nt/activate.bat b/Python314_4_x86_Template/Lib/venv/scripts/nt/activate.bat similarity index 100% rename from Python313_13_x86_Template/Lib/venv/scripts/nt/activate.bat rename to Python314_4_x86_Template/Lib/venv/scripts/nt/activate.bat diff --git a/Python313_13_x86_Template/Lib/venv/scripts/nt/deactivate.bat b/Python314_4_x86_Template/Lib/venv/scripts/nt/deactivate.bat similarity index 100% rename from Python313_13_x86_Template/Lib/venv/scripts/nt/deactivate.bat rename to Python314_4_x86_Template/Lib/venv/scripts/nt/deactivate.bat diff --git a/Python314_4_x86_Template/Lib/venv/scripts/nt/venvlauncher.exe b/Python314_4_x86_Template/Lib/venv/scripts/nt/venvlauncher.exe new file mode 100644 index 00000000..c5761f98 Binary files /dev/null and b/Python314_4_x86_Template/Lib/venv/scripts/nt/venvlauncher.exe differ diff --git a/Python314_4_x86_Template/Lib/venv/scripts/nt/venvwlauncher.exe b/Python314_4_x86_Template/Lib/venv/scripts/nt/venvwlauncher.exe new file mode 100644 index 00000000..1b62a5b7 Binary files /dev/null and b/Python314_4_x86_Template/Lib/venv/scripts/nt/venvwlauncher.exe differ diff --git a/Python313_13_x86_Template/Lib/venv/scripts/posix/activate.csh b/Python314_4_x86_Template/Lib/venv/scripts/posix/activate.csh similarity index 100% rename from Python313_13_x86_Template/Lib/venv/scripts/posix/activate.csh rename to Python314_4_x86_Template/Lib/venv/scripts/posix/activate.csh diff --git a/Python314_4_x86_Template/Lib/warnings.py b/Python314_4_x86_Template/Lib/warnings.py new file mode 100644 index 00000000..6759857d --- /dev/null +++ b/Python314_4_x86_Template/Lib/warnings.py @@ -0,0 +1,99 @@ +import sys + +__all__ = [ + "warn", + "warn_explicit", + "showwarning", + "formatwarning", + "filterwarnings", + "simplefilter", + "resetwarnings", + "catch_warnings", + "deprecated", +] + +from _py_warnings import ( + WarningMessage, + _DEPRECATED_MSG, + _OptionError, + _add_filter, + _deprecated, + _filters_mutated, + _filters_mutated_lock_held, + _filters_version, + _formatwarning_orig, + _formatwarnmsg, + _formatwarnmsg_impl, + _get_context, + _get_filters, + _getaction, + _getcategory, + _is_filename_to_skip, + _is_internal_filename, + _is_internal_frame, + _lock, + _new_context, + _next_external_frame, + _processoptions, + _set_context, + _set_module, + _setoption, + _setup_defaults, + _showwarning_orig, + _showwarnmsg, + _showwarnmsg_impl, + _use_context, + _warn_unawaited_coroutine, + _warnings_context, + catch_warnings, + defaultaction, + deprecated, + filters, + filterwarnings, + formatwarning, + onceregistry, + resetwarnings, + showwarning, + simplefilter, + warn, + warn_explicit, +) + +try: + # Try to use the C extension, this will replace some parts of the + # _py_warnings implementation imported above. + from _warnings import ( + _acquire_lock, + _defaultaction as defaultaction, + _filters_mutated_lock_held, + _onceregistry as onceregistry, + _release_lock, + _warnings_context, + filters, + warn, + warn_explicit, + ) + + _warnings_defaults = True + + class _Lock: + def __enter__(self): + _acquire_lock() + return self + + def __exit__(self, *args): + _release_lock() + + _lock = _Lock() +except ImportError: + _warnings_defaults = False + + +# Module initialization +_set_module(sys.modules[__name__]) +_processoptions(sys.warnoptions) +if not _warnings_defaults: + _setup_defaults() + +del _warnings_defaults +del _setup_defaults diff --git a/Python313_13_x86_Template/Lib/wave.py b/Python314_4_x86_Template/Lib/wave.py similarity index 100% rename from Python313_13_x86_Template/Lib/wave.py rename to Python314_4_x86_Template/Lib/wave.py diff --git a/Python314_4_x86_Template/Lib/weakref.py b/Python314_4_x86_Template/Lib/weakref.py new file mode 100644 index 00000000..94e42781 --- /dev/null +++ b/Python314_4_x86_Template/Lib/weakref.py @@ -0,0 +1,574 @@ +"""Weak reference support for Python. + +This module is an implementation of PEP 205: + +https://peps.python.org/pep-0205/ +""" + +# Naming convention: Variables named "wr" are weak reference objects; +# they are called this instead of "ref" to avoid name collisions with +# the module-global ref() function imported from _weakref. + +from _weakref import ( + getweakrefcount, + getweakrefs, + ref, + proxy, + CallableProxyType, + ProxyType, + ReferenceType, + _remove_dead_weakref) + +from _weakrefset import WeakSet + +import _collections_abc # Import after _weakref to avoid circular import. +import sys +import itertools + +ProxyTypes = (ProxyType, CallableProxyType) + +__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs", + "WeakKeyDictionary", "ReferenceType", "ProxyType", + "CallableProxyType", "ProxyTypes", "WeakValueDictionary", + "WeakSet", "WeakMethod", "finalize"] + + +_collections_abc.MutableSet.register(WeakSet) + +class WeakMethod(ref): + """ + A custom `weakref.ref` subclass which simulates a weak reference to + a bound method, working around the lifetime problem of bound methods. + """ + + __slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__" + + def __new__(cls, meth, callback=None): + try: + obj = meth.__self__ + func = meth.__func__ + except AttributeError: + raise TypeError("argument should be a bound method, not {}" + .format(type(meth))) from None + def _cb(arg): + # The self-weakref trick is needed to avoid creating a reference + # cycle. + self = self_wr() + if self._alive: + self._alive = False + if callback is not None: + callback(self) + self = ref.__new__(cls, obj, _cb) + self._func_ref = ref(func, _cb) + self._meth_type = type(meth) + self._alive = True + self_wr = ref(self) + return self + + def __call__(self): + obj = super().__call__() + func = self._func_ref() + if obj is None or func is None: + return None + return self._meth_type(func, obj) + + def __eq__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is other + return ref.__eq__(self, other) and self._func_ref == other._func_ref + return NotImplemented + + def __ne__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is not other + return ref.__ne__(self, other) or self._func_ref != other._func_ref + return NotImplemented + + __hash__ = ref.__hash__ + + +class WeakValueDictionary(_collections_abc.MutableMapping): + """Mapping class that references values weakly. + + Entries in the dictionary will be discarded when no strong + reference to the value exists anymore + """ + # We inherit the constructor without worrying about the input + # dictionary; since it uses our .update() method, we get the right + # checks (if the other dictionary is a WeakValueDictionary, + # objects are unwrapped on the way out, and we always wrap on the + # way in). + + def __init__(self, other=(), /, **kw): + def remove(wr, selfref=ref(self), _atomic_removal=_remove_dead_weakref): + self = selfref() + if self is not None: + # Atomic removal is necessary since this function + # can be called asynchronously by the GC + _atomic_removal(self.data, wr.key) + self._remove = remove + self.data = {} + self.update(other, **kw) + + def __getitem__(self, key): + o = self.data[key]() + if o is None: + raise KeyError(key) + else: + return o + + def __delitem__(self, key): + del self.data[key] + + def __len__(self): + return len(self.data) + + def __contains__(self, key): + try: + o = self.data[key]() + except KeyError: + return False + return o is not None + + def __repr__(self): + return "<%s at %#x>" % (self.__class__.__name__, id(self)) + + def __setitem__(self, key, value): + self.data[key] = KeyedRef(value, self._remove, key) + + def copy(self): + new = WeakValueDictionary() + for key, wr in self.data.copy().items(): + o = wr() + if o is not None: + new[key] = o + return new + + __copy__ = copy + + def __deepcopy__(self, memo): + from copy import deepcopy + new = self.__class__() + for key, wr in self.data.copy().items(): + o = wr() + if o is not None: + new[deepcopy(key, memo)] = o + return new + + def get(self, key, default=None): + try: + wr = self.data[key] + except KeyError: + return default + else: + o = wr() + if o is None: + # This should only happen + return default + else: + return o + + def items(self): + for k, wr in self.data.copy().items(): + v = wr() + if v is not None: + yield k, v + + def keys(self): + for k, wr in self.data.copy().items(): + if wr() is not None: + yield k + + __iter__ = keys + + def itervaluerefs(self): + """Return an iterator that yields the weak references to the values. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the values around longer than needed. + + """ + yield from self.data.copy().values() + + def values(self): + for wr in self.data.copy().values(): + obj = wr() + if obj is not None: + yield obj + + def popitem(self): + while True: + key, wr = self.data.popitem() + o = wr() + if o is not None: + return key, o + + def pop(self, key, *args): + try: + o = self.data.pop(key)() + except KeyError: + o = None + if o is None: + if args: + return args[0] + else: + raise KeyError(key) + else: + return o + + def setdefault(self, key, default=None): + try: + o = self.data[key]() + except KeyError: + o = None + if o is None: + self.data[key] = KeyedRef(default, self._remove, key) + return default + else: + return o + + def update(self, other=None, /, **kwargs): + d = self.data + if other is not None: + if not hasattr(other, "items"): + other = dict(other) + for key, o in other.items(): + d[key] = KeyedRef(o, self._remove, key) + for key, o in kwargs.items(): + d[key] = KeyedRef(o, self._remove, key) + + def valuerefs(self): + """Return a list of weak references to the values. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the values around longer than needed. + + """ + return list(self.data.copy().values()) + + def __ior__(self, other): + self.update(other) + return self + + def __or__(self, other): + if isinstance(other, _collections_abc.Mapping): + c = self.copy() + c.update(other) + return c + return NotImplemented + + def __ror__(self, other): + if isinstance(other, _collections_abc.Mapping): + c = self.__class__() + c.update(other) + c.update(self) + return c + return NotImplemented + + +class KeyedRef(ref): + """Specialized reference that includes a key corresponding to the value. + + This is used in the WeakValueDictionary to avoid having to create + a function object for each key stored in the mapping. A shared + callback object can use the 'key' attribute of a KeyedRef instead + of getting a reference to the key from an enclosing scope. + + """ + + __slots__ = "key", + + def __new__(type, ob, callback, key): + self = ref.__new__(type, ob, callback) + self.key = key + return self + + def __init__(self, ob, callback, key): + super().__init__(ob, callback) + + +class WeakKeyDictionary(_collections_abc.MutableMapping): + """ Mapping class that references keys weakly. + + Entries in the dictionary will be discarded when there is no + longer a strong reference to the key. This can be used to + associate additional data with an object owned by other parts of + an application without adding attributes to those objects. This + can be especially useful with objects that override attribute + accesses. + """ + + def __init__(self, dict=None): + self.data = {} + def remove(k, selfref=ref(self)): + self = selfref() + if self is not None: + try: + del self.data[k] + except KeyError: + pass + self._remove = remove + if dict is not None: + self.update(dict) + + def __delitem__(self, key): + del self.data[ref(key)] + + def __getitem__(self, key): + return self.data[ref(key)] + + def __len__(self): + return len(self.data) + + def __repr__(self): + return "<%s at %#x>" % (self.__class__.__name__, id(self)) + + def __setitem__(self, key, value): + self.data[ref(key, self._remove)] = value + + def copy(self): + new = WeakKeyDictionary() + for key, value in self.data.copy().items(): + o = key() + if o is not None: + new[o] = value + return new + + __copy__ = copy + + def __deepcopy__(self, memo): + from copy import deepcopy + new = self.__class__() + for key, value in self.data.copy().items(): + o = key() + if o is not None: + new[o] = deepcopy(value, memo) + return new + + def get(self, key, default=None): + return self.data.get(ref(key),default) + + def __contains__(self, key): + try: + wr = ref(key) + except TypeError: + return False + return wr in self.data + + def items(self): + for wr, value in self.data.copy().items(): + key = wr() + if key is not None: + yield key, value + + def keys(self): + for wr in self.data.copy(): + obj = wr() + if obj is not None: + yield obj + + __iter__ = keys + + def values(self): + for wr, value in self.data.copy().items(): + if wr() is not None: + yield value + + def keyrefs(self): + """Return a list of weak references to the keys. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the keys around longer than needed. + + """ + return list(self.data) + + def popitem(self): + while True: + key, value = self.data.popitem() + o = key() + if o is not None: + return o, value + + def pop(self, key, *args): + return self.data.pop(ref(key), *args) + + def setdefault(self, key, default=None): + return self.data.setdefault(ref(key, self._remove),default) + + def update(self, dict=None, /, **kwargs): + d = self.data + if dict is not None: + if not hasattr(dict, "items"): + dict = type({})(dict) + for key, value in dict.items(): + d[ref(key, self._remove)] = value + if len(kwargs): + self.update(kwargs) + + def __ior__(self, other): + self.update(other) + return self + + def __or__(self, other): + if isinstance(other, _collections_abc.Mapping): + c = self.copy() + c.update(other) + return c + return NotImplemented + + def __ror__(self, other): + if isinstance(other, _collections_abc.Mapping): + c = self.__class__() + c.update(other) + c.update(self) + return c + return NotImplemented + + +class finalize: + """Class for finalization of weakrefable objects + + finalize(obj, func, *args, **kwargs) returns a callable finalizer + object which will be called when obj is garbage collected. The + first time the finalizer is called it evaluates func(*arg, **kwargs) + and returns the result. After this the finalizer is dead, and + calling it just returns None. + + When the program exits any remaining finalizers for which the + atexit attribute is true will be run in reverse order of creation. + By default atexit is true. + """ + + # Finalizer objects don't have any state of their own. They are + # just used as keys to lookup _Info objects in the registry. This + # ensures that they cannot be part of a ref-cycle. + + __slots__ = () + _registry = {} + _shutdown = False + _index_iter = itertools.count() + _dirty = False + _registered_with_atexit = False + + class _Info: + __slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index") + + def __init__(self, obj, func, /, *args, **kwargs): + if not self._registered_with_atexit: + # We may register the exit function more than once because + # of a thread race, but that is harmless + import atexit + atexit.register(self._exitfunc) + finalize._registered_with_atexit = True + info = self._Info() + info.weakref = ref(obj, self) + info.func = func + info.args = args + info.kwargs = kwargs or None + info.atexit = True + info.index = next(self._index_iter) + self._registry[self] = info + finalize._dirty = True + + def __call__(self, _=None): + """If alive then mark as dead and return func(*args, **kwargs); + otherwise return None""" + info = self._registry.pop(self, None) + if info and not self._shutdown: + return info.func(*info.args, **(info.kwargs or {})) + + def detach(self): + """If alive then mark as dead and return (obj, func, args, kwargs); + otherwise return None""" + info = self._registry.get(self) + obj = info and info.weakref() + if obj is not None and self._registry.pop(self, None): + return (obj, info.func, info.args, info.kwargs or {}) + + def peek(self): + """If alive then return (obj, func, args, kwargs); + otherwise return None""" + info = self._registry.get(self) + obj = info and info.weakref() + if obj is not None: + return (obj, info.func, info.args, info.kwargs or {}) + + @property + def alive(self): + """Whether finalizer is alive""" + return self in self._registry + + @property + def atexit(self): + """Whether finalizer should be called at exit""" + info = self._registry.get(self) + return bool(info) and info.atexit + + @atexit.setter + def atexit(self, value): + info = self._registry.get(self) + if info: + info.atexit = bool(value) + + def __repr__(self): + info = self._registry.get(self) + obj = info and info.weakref() + if obj is None: + return '<%s object at %#x; dead>' % (type(self).__name__, id(self)) + else: + return '<%s object at %#x; for %r at %#x>' % \ + (type(self).__name__, id(self), type(obj).__name__, id(obj)) + + @classmethod + def _select_for_exit(cls): + # Return live finalizers marked for exit, oldest first + L = [(f,i) for (f,i) in cls._registry.items() if i.atexit] + L.sort(key=lambda item:item[1].index) + return [f for (f,i) in L] + + @classmethod + def _exitfunc(cls): + # At shutdown invoke finalizers for which atexit is true. + # This is called once all other non-daemonic threads have been + # joined. + reenable_gc = False + try: + if cls._registry: + import gc + if gc.isenabled(): + reenable_gc = True + gc.disable() + pending = None + while True: + if pending is None or finalize._dirty: + pending = cls._select_for_exit() + finalize._dirty = False + if not pending: + break + f = pending.pop() + try: + # gc is disabled, so (assuming no daemonic + # threads) the following is the only line in + # this function which might trigger creation + # of a new finalizer + f() + except Exception: + sys.excepthook(*sys.exc_info()) + assert f not in cls._registry + finally: + # prevent any more finalizers from executing during shutdown + finalize._shutdown = True + if reenable_gc: + gc.enable() diff --git a/Python314_4_x86_Template/Lib/webbrowser.py b/Python314_4_x86_Template/Lib/webbrowser.py new file mode 100644 index 00000000..0e0b5034 --- /dev/null +++ b/Python314_4_x86_Template/Lib/webbrowser.py @@ -0,0 +1,762 @@ +"""Interfaces for launching and remotely controlling web browsers.""" +# Maintained by Georg Brandl. + +import os +import shlex +import shutil +import sys +import subprocess +import threading + +__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"] + + +class Error(Exception): + pass + + +_lock = threading.RLock() +_browsers = {} # Dictionary of available browser controllers +_tryorder = None # Preference order of available browsers +_os_preferred_browser = None # The preferred browser + + +def register(name, klass, instance=None, *, preferred=False): + """Register a browser connector.""" + with _lock: + if _tryorder is None: + register_standard_browsers() + _browsers[name.lower()] = [klass, instance] + + # Preferred browsers go to the front of the list. + # Need to match to the default browser returned by xdg-settings, which + # may be of the form e.g. "firefox.desktop". + if preferred or (_os_preferred_browser and f'{name}.desktop' == _os_preferred_browser): + _tryorder.insert(0, name) + else: + _tryorder.append(name) + + +def get(using=None): + """Return a browser launcher instance appropriate for the environment.""" + if _tryorder is None: + with _lock: + if _tryorder is None: + register_standard_browsers() + if using is not None: + alternatives = [using] + else: + alternatives = _tryorder + for browser in alternatives: + if '%s' in browser: + # User gave us a command line, split it into name and args + browser = shlex.split(browser) + if browser[-1] == '&': + return BackgroundBrowser(browser[:-1]) + else: + return GenericBrowser(browser) + else: + # User gave us a browser name or path. + try: + command = _browsers[browser.lower()] + except KeyError: + command = _synthesize(browser) + if command[1] is not None: + return command[1] + elif command[0] is not None: + return command[0]() + raise Error("could not locate runnable browser") + + +# Please note: the following definition hides a builtin function. +# It is recommended one does "import webbrowser" and uses webbrowser.open(url) +# instead of "from webbrowser import *". + +def open(url, new=0, autoraise=True): + """Display url using the default browser. + + If possible, open url in a location determined by new. + - 0: the same browser window (the default). + - 1: a new browser window. + - 2: a new browser page ("tab"). + If possible, autoraise raises the window (the default) or not. + + If opening the browser succeeds, return True. + If there is a problem, return False. + """ + if _tryorder is None: + with _lock: + if _tryorder is None: + register_standard_browsers() + for name in _tryorder: + browser = get(name) + if browser.open(url, new, autoraise): + return True + return False + + +def open_new(url): + """Open url in a new window of the default browser. + + If not possible, then open url in the only browser window. + """ + return open(url, 1) + + +def open_new_tab(url): + """Open url in a new page ("tab") of the default browser. + + If not possible, then the behavior becomes equivalent to open_new(). + """ + return open(url, 2) + + +def _synthesize(browser, *, preferred=False): + """Attempt to synthesize a controller based on existing controllers. + + This is useful to create a controller when a user specifies a path to + an entry in the BROWSER environment variable -- we can copy a general + controller to operate using a specific installation of the desired + browser in this way. + + If we can't create a controller in this way, or if there is no + executable for the requested browser, return [None, None]. + + """ + cmd = browser.split()[0] + if not shutil.which(cmd): + return [None, None] + name = os.path.basename(cmd) + try: + command = _browsers[name.lower()] + except KeyError: + return [None, None] + # now attempt to clone to fit the new name: + controller = command[1] + if controller and name.lower() == controller.basename: + import copy + controller = copy.copy(controller) + controller.name = browser + controller.basename = os.path.basename(browser) + register(browser, None, instance=controller, preferred=preferred) + return [None, controller] + return [None, None] + + +# General parent classes + +class BaseBrowser: + """Parent class for all browsers. Do not use directly.""" + + args = ['%s'] + + def __init__(self, name=""): + self.name = name + self.basename = name + + def open(self, url, new=0, autoraise=True): + raise NotImplementedError + + def open_new(self, url): + return self.open(url, 1) + + def open_new_tab(self, url): + return self.open(url, 2) + + @staticmethod + def _check_url(url): + """Ensures that the URL is safe to pass to subprocesses as a parameter""" + if url and url.lstrip().startswith("-"): + raise ValueError(f"Invalid URL (leading dash disallowed): {url!r}") + + +class GenericBrowser(BaseBrowser): + """Class for all browsers started with a command + and without remote functionality.""" + + def __init__(self, name): + if isinstance(name, str): + self.name = name + self.args = ["%s"] + else: + # name should be a list with arguments + self.name = name[0] + self.args = name[1:] + self.basename = os.path.basename(self.name) + + def open(self, url, new=0, autoraise=True): + sys.audit("webbrowser.open", url) + self._check_url(url) + cmdline = [self.name] + [arg.replace("%s", url) + for arg in self.args] + try: + if sys.platform[:3] == 'win': + p = subprocess.Popen(cmdline) + else: + p = subprocess.Popen(cmdline, close_fds=True) + return not p.wait() + except OSError: + return False + + +class BackgroundBrowser(GenericBrowser): + """Class for all browsers which are to be started in the + background.""" + + def open(self, url, new=0, autoraise=True): + cmdline = [self.name] + [arg.replace("%s", url) + for arg in self.args] + sys.audit("webbrowser.open", url) + self._check_url(url) + try: + if sys.platform[:3] == 'win': + p = subprocess.Popen(cmdline) + else: + p = subprocess.Popen(cmdline, close_fds=True, + start_new_session=True) + return p.poll() is None + except OSError: + return False + + +class UnixBrowser(BaseBrowser): + """Parent class for all Unix browsers with remote functionality.""" + + raise_opts = None + background = False + redirect_stdout = True + # In remote_args, %s will be replaced with the requested URL. %action will + # be replaced depending on the value of 'new' passed to open. + # remote_action is used for new=0 (open). If newwin is not None, it is + # used for new=1 (open_new). If newtab is not None, it is used for + # new=3 (open_new_tab). After both substitutions are made, any empty + # strings in the transformed remote_args list will be removed. + remote_args = ['%action', '%s'] + remote_action = None + remote_action_newwin = None + remote_action_newtab = None + + def _invoke(self, args, remote, autoraise, url=None): + raise_opt = [] + if remote and self.raise_opts: + # use autoraise argument only for remote invocation + autoraise = int(autoraise) + opt = self.raise_opts[autoraise] + if opt: + raise_opt = [opt] + + cmdline = [self.name] + raise_opt + args + + if remote or self.background: + inout = subprocess.DEVNULL + else: + # for TTY browsers, we need stdin/out + inout = None + p = subprocess.Popen(cmdline, close_fds=True, stdin=inout, + stdout=(self.redirect_stdout and inout or None), + stderr=inout, start_new_session=True) + if remote: + # wait at most five seconds. If the subprocess is not finished, the + # remote invocation has (hopefully) started a new instance. + try: + rc = p.wait(5) + # if remote call failed, open() will try direct invocation + return not rc + except subprocess.TimeoutExpired: + return True + elif self.background: + if p.poll() is None: + return True + else: + return False + else: + return not p.wait() + + def open(self, url, new=0, autoraise=True): + sys.audit("webbrowser.open", url) + self._check_url(url) + if new == 0: + action = self.remote_action + elif new == 1: + action = self.remote_action_newwin + elif new == 2: + if self.remote_action_newtab is None: + action = self.remote_action_newwin + else: + action = self.remote_action_newtab + else: + raise Error("Bad 'new' parameter to open(); " + f"expected 0, 1, or 2, got {new}") + + args = [arg.replace("%s", url).replace("%action", action) + for arg in self.remote_args] + args = [arg for arg in args if arg] + success = self._invoke(args, True, autoraise, url) + if not success: + # remote invocation failed, try straight way + args = [arg.replace("%s", url) for arg in self.args] + return self._invoke(args, False, False) + else: + return True + + +class Mozilla(UnixBrowser): + """Launcher class for Mozilla browsers.""" + + remote_args = ['%action', '%s'] + remote_action = "" + remote_action_newwin = "-new-window" + remote_action_newtab = "-new-tab" + background = True + + +class Epiphany(UnixBrowser): + """Launcher class for Epiphany browser.""" + + raise_opts = ["-noraise", ""] + remote_args = ['%action', '%s'] + remote_action = "-n" + remote_action_newwin = "-w" + background = True + + +class Chrome(UnixBrowser): + """Launcher class for Google Chrome browser.""" + + remote_args = ['%action', '%s'] + remote_action = "" + remote_action_newwin = "--new-window" + remote_action_newtab = "" + background = True + + +Chromium = Chrome + + +class Opera(UnixBrowser): + """Launcher class for Opera browser.""" + + remote_args = ['%action', '%s'] + remote_action = "" + remote_action_newwin = "--new-window" + remote_action_newtab = "" + background = True + + +class Elinks(UnixBrowser): + """Launcher class for Elinks browsers.""" + + remote_args = ['-remote', 'openURL(%s%action)'] + remote_action = "" + remote_action_newwin = ",new-window" + remote_action_newtab = ",new-tab" + background = False + + # elinks doesn't like its stdout to be redirected - + # it uses redirected stdout as a signal to do -dump + redirect_stdout = False + + +class Konqueror(BaseBrowser): + """Controller for the KDE File Manager (kfm, or Konqueror). + + See the output of ``kfmclient --commands`` + for more information on the Konqueror remote-control interface. + """ + + def open(self, url, new=0, autoraise=True): + sys.audit("webbrowser.open", url) + self._check_url(url) + # XXX Currently I know no way to prevent KFM from opening a new win. + if new == 2: + action = "newTab" + else: + action = "openURL" + + devnull = subprocess.DEVNULL + + try: + p = subprocess.Popen(["kfmclient", action, url], + close_fds=True, stdin=devnull, + stdout=devnull, stderr=devnull) + except OSError: + # fall through to next variant + pass + else: + p.wait() + # kfmclient's return code unfortunately has no meaning as it seems + return True + + try: + p = subprocess.Popen(["konqueror", "--silent", url], + close_fds=True, stdin=devnull, + stdout=devnull, stderr=devnull, + start_new_session=True) + except OSError: + # fall through to next variant + pass + else: + if p.poll() is None: + # Should be running now. + return True + + try: + p = subprocess.Popen(["kfm", "-d", url], + close_fds=True, stdin=devnull, + stdout=devnull, stderr=devnull, + start_new_session=True) + except OSError: + return False + else: + return p.poll() is None + + +class Edge(UnixBrowser): + """Launcher class for Microsoft Edge browser.""" + + remote_args = ['%action', '%s'] + remote_action = "" + remote_action_newwin = "--new-window" + remote_action_newtab = "" + background = True + + +# +# Platform support for Unix +# + +# These are the right tests because all these Unix browsers require either +# a console terminal or an X display to run. + +def register_X_browsers(): + + # use xdg-open if around + if shutil.which("xdg-open"): + register("xdg-open", None, BackgroundBrowser("xdg-open")) + + # Opens an appropriate browser for the URL scheme according to + # freedesktop.org settings (GNOME, KDE, XFCE, etc.) + if shutil.which("gio"): + register("gio", None, BackgroundBrowser(["gio", "open", "--", "%s"])) + + xdg_desktop = os.getenv("XDG_CURRENT_DESKTOP", "").split(":") + + # The default GNOME3 browser + if (("GNOME" in xdg_desktop or + "GNOME_DESKTOP_SESSION_ID" in os.environ) and + shutil.which("gvfs-open")): + register("gvfs-open", None, BackgroundBrowser("gvfs-open")) + + # The default KDE browser + if (("KDE" in xdg_desktop or + "KDE_FULL_SESSION" in os.environ) and + shutil.which("kfmclient")): + register("kfmclient", Konqueror, Konqueror("kfmclient")) + + # Common symbolic link for the default X11 browser + if shutil.which("x-www-browser"): + register("x-www-browser", None, BackgroundBrowser("x-www-browser")) + + # The Mozilla browsers + for browser in ("firefox", "iceweasel", "seamonkey", "mozilla-firefox", + "mozilla"): + if shutil.which(browser): + register(browser, None, Mozilla(browser)) + + # Konqueror/kfm, the KDE browser. + if shutil.which("kfm"): + register("kfm", Konqueror, Konqueror("kfm")) + elif shutil.which("konqueror"): + register("konqueror", Konqueror, Konqueror("konqueror")) + + # Gnome's Epiphany + if shutil.which("epiphany"): + register("epiphany", None, Epiphany("epiphany")) + + # Google Chrome/Chromium browsers + for browser in ("google-chrome", "chrome", "chromium", "chromium-browser"): + if shutil.which(browser): + register(browser, None, Chrome(browser)) + + # Opera, quite popular + if shutil.which("opera"): + register("opera", None, Opera("opera")) + + if shutil.which("microsoft-edge"): + register("microsoft-edge", None, Edge("microsoft-edge")) + + +def register_standard_browsers(): + global _tryorder + _tryorder = [] + + if sys.platform == 'darwin': + register("MacOSX", None, MacOSXOSAScript('default')) + register("chrome", None, MacOSXOSAScript('google chrome')) + register("firefox", None, MacOSXOSAScript('firefox')) + register("safari", None, MacOSXOSAScript('safari')) + # macOS can use below Unix support (but we prefer using the macOS + # specific stuff) + + if sys.platform == "ios": + register("iosbrowser", None, IOSBrowser(), preferred=True) + + if sys.platform == "serenityos": + # SerenityOS webbrowser, simply called "Browser". + register("Browser", None, BackgroundBrowser("Browser")) + + if sys.platform[:3] == "win": + # First try to use the default Windows browser + register("windows-default", WindowsDefault) + + # Detect some common Windows browsers, fallback to Microsoft Edge + # location in 64-bit Windows + edge64 = os.path.join(os.environ.get("PROGRAMFILES(x86)", "C:\\Program Files (x86)"), + "Microsoft\\Edge\\Application\\msedge.exe") + # location in 32-bit Windows + edge32 = os.path.join(os.environ.get("PROGRAMFILES", "C:\\Program Files"), + "Microsoft\\Edge\\Application\\msedge.exe") + for browser in ("firefox", "seamonkey", "mozilla", "chrome", + "opera", edge64, edge32): + if shutil.which(browser): + register(browser, None, BackgroundBrowser(browser)) + if shutil.which("MicrosoftEdge.exe"): + register("microsoft-edge", None, Edge("MicrosoftEdge.exe")) + else: + # Prefer X browsers if present + # + # NOTE: Do not check for X11 browser on macOS, + # XQuartz installation sets a DISPLAY environment variable and will + # autostart when someone tries to access the display. Mac users in + # general don't need an X11 browser. + if sys.platform != "darwin" and (os.environ.get("DISPLAY") or os.environ.get("WAYLAND_DISPLAY")): + try: + cmd = "xdg-settings get default-web-browser".split() + raw_result = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) + result = raw_result.decode().strip() + except (FileNotFoundError, subprocess.CalledProcessError, + PermissionError, NotADirectoryError): + pass + else: + global _os_preferred_browser + _os_preferred_browser = result + + register_X_browsers() + + # Also try console browsers + if os.environ.get("TERM"): + # Common symbolic link for the default text-based browser + if shutil.which("www-browser"): + register("www-browser", None, GenericBrowser("www-browser")) + # The Links/elinks browsers + if shutil.which("links"): + register("links", None, GenericBrowser("links")) + if shutil.which("elinks"): + register("elinks", None, Elinks("elinks")) + # The Lynx browser , + if shutil.which("lynx"): + register("lynx", None, GenericBrowser("lynx")) + # The w3m browser + if shutil.which("w3m"): + register("w3m", None, GenericBrowser("w3m")) + + # OK, now that we know what the default preference orders for each + # platform are, allow user to override them with the BROWSER variable. + if "BROWSER" in os.environ: + userchoices = os.environ["BROWSER"].split(os.pathsep) + userchoices.reverse() + + # Treat choices in same way as if passed into get() but do register + # and prepend to _tryorder + for cmdline in userchoices: + if all(x not in cmdline for x in " \t"): + # Assume this is the name of a registered command, use + # that unless it is a GenericBrowser. + try: + command = _browsers[cmdline.lower()] + except KeyError: + pass + + else: + if not isinstance(command[1], GenericBrowser): + _tryorder.insert(0, cmdline.lower()) + continue + + if cmdline != '': + cmd = _synthesize(cmdline, preferred=True) + if cmd[1] is None: + register(cmdline, None, GenericBrowser(cmdline), preferred=True) + + # what to do if _tryorder is now empty? + + +# +# Platform support for Windows +# + +if sys.platform[:3] == "win": + class WindowsDefault(BaseBrowser): + def open(self, url, new=0, autoraise=True): + sys.audit("webbrowser.open", url) + self._check_url(url) + try: + os.startfile(url) + except OSError: + # [Error 22] No application is associated with the specified + # file for this operation: '' + return False + else: + return True + +# +# Platform support for macOS +# + +if sys.platform == 'darwin': + class MacOSXOSAScript(BaseBrowser): + def __init__(self, name='default'): + super().__init__(name) + + def open(self, url, new=0, autoraise=True): + sys.audit("webbrowser.open", url) + self._check_url(url) + url = url.replace('"', '%22') + if self.name == 'default': + proto, _sep, _rest = url.partition(":") + if _sep and proto.lower() in {"http", "https"}: + # default web URL, don't need to lookup browser + script = f'open location "{url}"' + else: + # if not a web URL, need to lookup default browser to ensure a browser is launched + # this should always work, but is overkill to lookup http handler + # before launching http + script = f""" + use framework "AppKit" + use AppleScript version "2.4" + use scripting additions + + property NSWorkspace : a reference to current application's NSWorkspace + property NSURL : a reference to current application's NSURL + + set http_url to NSURL's URLWithString:"https://python.org" + set browser_url to (NSWorkspace's sharedWorkspace)'s ¬ + URLForApplicationToOpenURL:http_url + set app_path to browser_url's relativePath as text -- NSURL to absolute path '/Applications/Safari.app' + + tell application app_path + activate + open location "{url}" + end tell + """ + else: + script = f''' + tell application "{self.name}" + activate + open location "{url}" + end + ''' + + osapipe = os.popen("/usr/bin/osascript", "w") + if osapipe is None: + return False + + osapipe.write(script) + rc = osapipe.close() + return not rc + +# +# Platform support for iOS +# +if sys.platform == "ios": + from _ios_support import objc + if objc: + # If objc exists, we know ctypes is also importable. + from ctypes import c_void_p, c_char_p, c_ulong + + class IOSBrowser(BaseBrowser): + def open(self, url, new=0, autoraise=True): + sys.audit("webbrowser.open", url) + self._check_url(url) + # If ctypes isn't available, we can't open a browser + if objc is None: + return False + + # All the messages in this call return object references. + objc.objc_msgSend.restype = c_void_p + + # This is the equivalent of: + # NSString url_string = + # [NSString stringWithCString:url.encode("utf-8") + # encoding:NSUTF8StringEncoding]; + NSString = objc.objc_getClass(b"NSString") + constructor = objc.sel_registerName(b"stringWithCString:encoding:") + objc.objc_msgSend.argtypes = [c_void_p, c_void_p, c_char_p, c_ulong] + url_string = objc.objc_msgSend( + NSString, + constructor, + url.encode("utf-8"), + 4, # NSUTF8StringEncoding = 4 + ) + + # Create an NSURL object representing the URL + # This is the equivalent of: + # NSURL *nsurl = [NSURL URLWithString:url]; + NSURL = objc.objc_getClass(b"NSURL") + urlWithString_ = objc.sel_registerName(b"URLWithString:") + objc.objc_msgSend.argtypes = [c_void_p, c_void_p, c_void_p] + ns_url = objc.objc_msgSend(NSURL, urlWithString_, url_string) + + # Get the shared UIApplication instance + # This code is the equivalent of: + # UIApplication shared_app = [UIApplication sharedApplication] + UIApplication = objc.objc_getClass(b"UIApplication") + sharedApplication = objc.sel_registerName(b"sharedApplication") + objc.objc_msgSend.argtypes = [c_void_p, c_void_p] + shared_app = objc.objc_msgSend(UIApplication, sharedApplication) + + # Open the URL on the shared application + # This code is the equivalent of: + # [shared_app openURL:ns_url + # options:NIL + # completionHandler:NIL]; + openURL_ = objc.sel_registerName(b"openURL:options:completionHandler:") + objc.objc_msgSend.argtypes = [ + c_void_p, c_void_p, c_void_p, c_void_p, c_void_p + ] + # Method returns void + objc.objc_msgSend.restype = None + objc.objc_msgSend(shared_app, openURL_, ns_url, None, None) + + return True + + +def parse_args(arg_list: list[str] | None): + import argparse + parser = argparse.ArgumentParser( + description="Open URL in a web browser.", color=True, + ) + parser.add_argument("url", help="URL to open") + + group = parser.add_mutually_exclusive_group() + group.add_argument("-n", "--new-window", action="store_const", + const=1, default=0, dest="new_win", + help="open new window") + group.add_argument("-t", "--new-tab", action="store_const", + const=2, default=0, dest="new_win", + help="open new tab") + + args = parser.parse_args(arg_list) + + return args + + +def main(arg_list: list[str] | None = None): + args = parse_args(arg_list) + + open(args.url, args.new_win) + + print("\a") + + +if __name__ == "__main__": + main() diff --git a/Python313_13_x86_Template/Lib/wsgiref/__init__.py b/Python314_4_x86_Template/Lib/wsgiref/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/wsgiref/__init__.py rename to Python314_4_x86_Template/Lib/wsgiref/__init__.py diff --git a/Python313_13_x86_Template/Lib/wsgiref/handlers.py b/Python314_4_x86_Template/Lib/wsgiref/handlers.py similarity index 100% rename from Python313_13_x86_Template/Lib/wsgiref/handlers.py rename to Python314_4_x86_Template/Lib/wsgiref/handlers.py diff --git a/Python314_4_x86_Template/Lib/wsgiref/headers.py b/Python314_4_x86_Template/Lib/wsgiref/headers.py new file mode 100644 index 00000000..eb6ea6a4 --- /dev/null +++ b/Python314_4_x86_Template/Lib/wsgiref/headers.py @@ -0,0 +1,192 @@ +"""Manage HTTP Response Headers + +Much of this module is red-handedly pilfered from email.message in the stdlib, +so portions are Copyright (C) 2001 Python Software Foundation, and were +written by Barry Warsaw. +""" + +# Regular expression that matches 'special' characters in parameters, the +# existence of which force quoting of the parameter value. +import re +tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]') +# Disallowed characters for headers and values. +# HTAB (\x09) is allowed in header values, but +# not in header names. (RFC 9110 Section 5.5) +_name_disallowed_re = re.compile(r'[\x00-\x1F\x7F]') +_value_disallowed_re = re.compile(r'[\x00-\x08\x0A-\x1F\x7F]') + +def _formatparam(param, value=None, quote=1): + """Convenience function to format and return a key=value pair. + + This will quote the value if needed or if quote is true. + """ + if value is not None and len(value) > 0: + if quote or tspecials.search(value): + value = value.replace('\\', '\\\\').replace('"', r'\"') + return '%s="%s"' % (param, value) + else: + return '%s=%s' % (param, value) + else: + return param + + +class Headers: + """Manage a collection of HTTP response headers""" + + def __init__(self, headers=None): + headers = headers if headers is not None else [] + if type(headers) is not list: + raise TypeError("Headers must be a list of name/value tuples") + self._headers = headers + if __debug__: + for k, v in headers: + self._convert_string_type(k, name=True) + self._convert_string_type(v, name=False) + + def _convert_string_type(self, value, *, name): + """Convert/check value type.""" + if type(value) is str: + regex = (_name_disallowed_re if name else _value_disallowed_re) + if regex.search(value): + raise ValueError("Control characters not allowed in headers") + return value + raise AssertionError("Header names/values must be" + " of type str (got {0})".format(repr(value))) + + def __len__(self): + """Return the total number of headers, including duplicates.""" + return len(self._headers) + + def __setitem__(self, name, val): + """Set the value of a header.""" + del self[name] + self._headers.append( + (self._convert_string_type(name, name=True), self._convert_string_type(val, name=False))) + + def __delitem__(self,name): + """Delete all occurrences of a header, if present. + + Does *not* raise an exception if the header is missing. + """ + name = self._convert_string_type(name.lower(), name=True) + self._headers[:] = [kv for kv in self._headers if kv[0].lower() != name] + + def __getitem__(self,name): + """Get the first header value for 'name' + + Return None if the header is missing instead of raising an exception. + + Note that if the header appeared multiple times, the first exactly which + occurrence gets returned is undefined. Use getall() to get all + the values matching a header field name. + """ + return self.get(name) + + def __contains__(self, name): + """Return true if the message contains the header.""" + return self.get(name) is not None + + + def get_all(self, name): + """Return a list of all the values for the named field. + + These will be sorted in the order they appeared in the original header + list or were added to this instance, and may contain duplicates. Any + fields deleted and re-inserted are always appended to the header list. + If no fields exist with the given name, returns an empty list. + """ + name = self._convert_string_type(name.lower(), name=True) + return [kv[1] for kv in self._headers if kv[0].lower()==name] + + + def get(self,name,default=None): + """Get the first header value for 'name', or return 'default'""" + name = self._convert_string_type(name.lower(), name=True) + for k,v in self._headers: + if k.lower()==name: + return v + return default + + + def keys(self): + """Return a list of all the header field names. + + These will be sorted in the order they appeared in the original header + list, or were added to this instance, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + """ + return [k for k, v in self._headers] + + def values(self): + """Return a list of all header values. + + These will be sorted in the order they appeared in the original header + list, or were added to this instance, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + """ + return [v for k, v in self._headers] + + def items(self): + """Get all the header fields and values. + + These will be sorted in the order they were in the original header + list, or were added to this instance, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + """ + return self._headers[:] + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self._headers) + + def __str__(self): + """str() returns the formatted headers, complete with end line, + suitable for direct HTTP transmission.""" + return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['','']) + + def __bytes__(self): + return str(self).encode('iso-8859-1') + + def setdefault(self,name,value): + """Return first matching header value for 'name', or 'value' + + If there is no header named 'name', add a new header with name 'name' + and value 'value'.""" + result = self.get(name) + if result is None: + self._headers.append((self._convert_string_type(name, name=True), + self._convert_string_type(value, name=False))) + return value + else: + return result + + def add_header(self, _name, _value, **_params): + """Extended header setting. + + _name is the header field to add. keyword arguments can be used to set + additional parameters for the header field, with underscores converted + to dashes. Normally the parameter will be added as key="value" unless + value is None, in which case only the key will be added. + + Example: + + h.add_header('content-disposition', 'attachment', filename='bud.gif') + + Note that unlike the corresponding 'email.message' method, this does + *not* handle '(charset, language, value)' tuples: all values must be + strings or None. + """ + parts = [] + if _value is not None: + _value = self._convert_string_type(_value, name=False) + parts.append(_value) + for k, v in _params.items(): + k = self._convert_string_type(k, name=True) + if v is None: + parts.append(k.replace('_', '-')) + else: + v = self._convert_string_type(v, name=False) + parts.append(_formatparam(k.replace('_', '-'), v)) + self._headers.append((self._convert_string_type(_name, name=True), "; ".join(parts))) diff --git a/Python313_13_x86_Template/Lib/wsgiref/simple_server.py b/Python314_4_x86_Template/Lib/wsgiref/simple_server.py similarity index 100% rename from Python313_13_x86_Template/Lib/wsgiref/simple_server.py rename to Python314_4_x86_Template/Lib/wsgiref/simple_server.py diff --git a/Python313_13_x86_Template/Lib/wsgiref/types.py b/Python314_4_x86_Template/Lib/wsgiref/types.py similarity index 100% rename from Python313_13_x86_Template/Lib/wsgiref/types.py rename to Python314_4_x86_Template/Lib/wsgiref/types.py diff --git a/Python313_13_x86_Template/Lib/wsgiref/util.py b/Python314_4_x86_Template/Lib/wsgiref/util.py similarity index 100% rename from Python313_13_x86_Template/Lib/wsgiref/util.py rename to Python314_4_x86_Template/Lib/wsgiref/util.py diff --git a/Python313_13_x86_Template/Lib/wsgiref/validate.py b/Python314_4_x86_Template/Lib/wsgiref/validate.py similarity index 100% rename from Python313_13_x86_Template/Lib/wsgiref/validate.py rename to Python314_4_x86_Template/Lib/wsgiref/validate.py diff --git a/Python313_13_x86_Template/Lib/xml/__init__.py b/Python314_4_x86_Template/Lib/xml/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/__init__.py rename to Python314_4_x86_Template/Lib/xml/__init__.py diff --git a/Python314_4_x86_Template/Lib/xml/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/xml/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..5076e30b Binary files /dev/null and b/Python314_4_x86_Template/Lib/xml/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/xml/dom/NodeFilter.py b/Python314_4_x86_Template/Lib/xml/dom/NodeFilter.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/dom/NodeFilter.py rename to Python314_4_x86_Template/Lib/xml/dom/NodeFilter.py diff --git a/Python314_4_x86_Template/Lib/xml/dom/__init__.py b/Python314_4_x86_Template/Lib/xml/dom/__init__.py new file mode 100644 index 00000000..dd7fb996 --- /dev/null +++ b/Python314_4_x86_Template/Lib/xml/dom/__init__.py @@ -0,0 +1,140 @@ +"""W3C Document Object Model implementation for Python. + +The Python mapping of the Document Object Model is documented in the +Python Library Reference in the section on the xml.dom package. + +This package contains the following modules: + +minidom -- A simple implementation of the Level 1 DOM with namespace + support added (based on the Level 2 specification) and other + minor Level 2 functionality. + +pulldom -- DOM builder supporting on-demand tree-building for selected + subtrees of the document. + +""" + + +class Node: + """Class giving the NodeType constants.""" + __slots__ = () + + # DOM implementations may use this as a base class for their own + # Node implementations. If they don't, the constants defined here + # should still be used as the canonical definitions as they match + # the values given in the W3C recommendation. Client code can + # safely refer to these values in all tests of Node.nodeType + # values. + + ELEMENT_NODE = 1 + ATTRIBUTE_NODE = 2 + TEXT_NODE = 3 + CDATA_SECTION_NODE = 4 + ENTITY_REFERENCE_NODE = 5 + ENTITY_NODE = 6 + PROCESSING_INSTRUCTION_NODE = 7 + COMMENT_NODE = 8 + DOCUMENT_NODE = 9 + DOCUMENT_TYPE_NODE = 10 + DOCUMENT_FRAGMENT_NODE = 11 + NOTATION_NODE = 12 + + +#ExceptionCode +INDEX_SIZE_ERR = 1 +DOMSTRING_SIZE_ERR = 2 +HIERARCHY_REQUEST_ERR = 3 +WRONG_DOCUMENT_ERR = 4 +INVALID_CHARACTER_ERR = 5 +NO_DATA_ALLOWED_ERR = 6 +NO_MODIFICATION_ALLOWED_ERR = 7 +NOT_FOUND_ERR = 8 +NOT_SUPPORTED_ERR = 9 +INUSE_ATTRIBUTE_ERR = 10 +INVALID_STATE_ERR = 11 +SYNTAX_ERR = 12 +INVALID_MODIFICATION_ERR = 13 +NAMESPACE_ERR = 14 +INVALID_ACCESS_ERR = 15 +VALIDATION_ERR = 16 + + +class DOMException(Exception): + """Abstract base class for DOM exceptions. + Exceptions with specific codes are specializations of this class.""" + + def __init__(self, *args, **kw): + if self.__class__ is DOMException: + raise RuntimeError( + "DOMException should not be instantiated directly") + Exception.__init__(self, *args, **kw) + + def _get_code(self): + return self.code + + +class IndexSizeErr(DOMException): + code = INDEX_SIZE_ERR + +class DomstringSizeErr(DOMException): + code = DOMSTRING_SIZE_ERR + +class HierarchyRequestErr(DOMException): + code = HIERARCHY_REQUEST_ERR + +class WrongDocumentErr(DOMException): + code = WRONG_DOCUMENT_ERR + +class InvalidCharacterErr(DOMException): + code = INVALID_CHARACTER_ERR + +class NoDataAllowedErr(DOMException): + code = NO_DATA_ALLOWED_ERR + +class NoModificationAllowedErr(DOMException): + code = NO_MODIFICATION_ALLOWED_ERR + +class NotFoundErr(DOMException): + code = NOT_FOUND_ERR + +class NotSupportedErr(DOMException): + code = NOT_SUPPORTED_ERR + +class InuseAttributeErr(DOMException): + code = INUSE_ATTRIBUTE_ERR + +class InvalidStateErr(DOMException): + code = INVALID_STATE_ERR + +class SyntaxErr(DOMException): + code = SYNTAX_ERR + +class InvalidModificationErr(DOMException): + code = INVALID_MODIFICATION_ERR + +class NamespaceErr(DOMException): + code = NAMESPACE_ERR + +class InvalidAccessErr(DOMException): + code = INVALID_ACCESS_ERR + +class ValidationErr(DOMException): + code = VALIDATION_ERR + +class UserDataHandler: + """Class giving the operation constants for UserDataHandler.handle().""" + + # Based on DOM Level 3 (WD 9 April 2002) + + NODE_CLONED = 1 + NODE_IMPORTED = 2 + NODE_DELETED = 3 + NODE_RENAMED = 4 + +XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace" +XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/" +XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml" +EMPTY_NAMESPACE = None +EMPTY_PREFIX = None + +from .domreg import getDOMImplementation, registerDOMImplementation # noqa: F401 diff --git a/Python313_13_x86_Template/Lib/xml/dom/domreg.py b/Python314_4_x86_Template/Lib/xml/dom/domreg.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/dom/domreg.py rename to Python314_4_x86_Template/Lib/xml/dom/domreg.py diff --git a/Python313_13_x86_Template/Lib/xml/dom/expatbuilder.py b/Python314_4_x86_Template/Lib/xml/dom/expatbuilder.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/dom/expatbuilder.py rename to Python314_4_x86_Template/Lib/xml/dom/expatbuilder.py diff --git a/Python313_13_x86_Template/Lib/xml/dom/minicompat.py b/Python314_4_x86_Template/Lib/xml/dom/minicompat.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/dom/minicompat.py rename to Python314_4_x86_Template/Lib/xml/dom/minicompat.py diff --git a/Python313_13_x86_Template/Lib/xml/dom/minidom.py b/Python314_4_x86_Template/Lib/xml/dom/minidom.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/dom/minidom.py rename to Python314_4_x86_Template/Lib/xml/dom/minidom.py diff --git a/Python313_13_x86_Template/Lib/xml/dom/pulldom.py b/Python314_4_x86_Template/Lib/xml/dom/pulldom.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/dom/pulldom.py rename to Python314_4_x86_Template/Lib/xml/dom/pulldom.py diff --git a/Python313_13_x86_Template/Lib/xml/dom/xmlbuilder.py b/Python314_4_x86_Template/Lib/xml/dom/xmlbuilder.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/dom/xmlbuilder.py rename to Python314_4_x86_Template/Lib/xml/dom/xmlbuilder.py diff --git a/Python313_13_x86_Template/Lib/xml/etree/ElementInclude.py b/Python314_4_x86_Template/Lib/xml/etree/ElementInclude.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/etree/ElementInclude.py rename to Python314_4_x86_Template/Lib/xml/etree/ElementInclude.py diff --git a/Python313_13_x86_Template/Lib/xml/etree/ElementPath.py b/Python314_4_x86_Template/Lib/xml/etree/ElementPath.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/etree/ElementPath.py rename to Python314_4_x86_Template/Lib/xml/etree/ElementPath.py diff --git a/Python314_4_x86_Template/Lib/xml/etree/ElementTree.py b/Python314_4_x86_Template/Lib/xml/etree/ElementTree.py new file mode 100644 index 00000000..dafe5b1b --- /dev/null +++ b/Python314_4_x86_Template/Lib/xml/etree/ElementTree.py @@ -0,0 +1,2102 @@ +"""Lightweight XML support for Python. + + XML is an inherently hierarchical data format, and the most natural way to + represent it is with a tree. This module has two classes for this purpose: + + 1. ElementTree represents the whole XML document as a tree and + + 2. Element represents a single node in this tree. + + Interactions with the whole document (reading and writing to/from files) are + usually done on the ElementTree level. Interactions with a single XML element + and its sub-elements are done on the Element level. + + Element is a flexible container object designed to store hierarchical data + structures in memory. It can be described as a cross between a list and a + dictionary. Each Element has a number of properties associated with it: + + 'tag' - a string containing the element's name. + + 'attributes' - a Python dictionary storing the element's attributes. + + 'text' - a string containing the element's text content. + + 'tail' - an optional string containing text after the element's end tag. + + And a number of child elements stored in a Python sequence. + + To create an element instance, use the Element constructor, + or the SubElement factory function. + + You can also use the ElementTree class to wrap an element structure + and convert it to and from XML. + +""" + +#--------------------------------------------------------------------- +# Licensed to PSF under a Contributor Agreement. +# See https://www.python.org/psf/license for licensing details. +# +# ElementTree +# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved. +# +# fredrik@pythonware.com +# http://www.pythonware.com +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2008 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +__all__ = [ + # public symbols + "Comment", + "dump", + "Element", "ElementTree", + "fromstring", "fromstringlist", + "indent", "iselement", "iterparse", + "parse", "ParseError", + "PI", "ProcessingInstruction", + "QName", + "SubElement", + "tostring", "tostringlist", + "TreeBuilder", + "VERSION", + "XML", "XMLID", + "XMLParser", "XMLPullParser", + "register_namespace", + "canonicalize", "C14NWriterTarget", + ] + +VERSION = "1.3.0" + +import sys +import re +import warnings +import io +import collections +import collections.abc +import contextlib +import weakref + +from . import ElementPath + + +class ParseError(SyntaxError): + """An error when parsing an XML document. + + In addition to its exception value, a ParseError contains + two extra attributes: + 'code' - the specific exception code + 'position' - the line and column of the error + + """ + pass + +# -------------------------------------------------------------------- + + +def iselement(element): + """Return True if *element* appears to be an Element.""" + return hasattr(element, 'tag') + + +class Element: + """An XML element. + + This class is the reference implementation of the Element interface. + + An element's length is its number of subelements. That means if you + want to check if an element is truly empty, you should check BOTH + its length AND its text attribute. + + The element tag, attribute names, and attribute values can be either + bytes or strings. + + *tag* is the element name. *attrib* is an optional dictionary containing + element attributes. *extra* are additional element attributes given as + keyword arguments. + + Example form: + text...tail + + """ + + tag = None + """The element's name.""" + + attrib = None + """Dictionary of the element's attributes.""" + + text = None + """ + Text before first subelement. This is either a string or the value None. + Note that if there is no text, this attribute may be either + None or the empty string, depending on the parser. + + """ + + tail = None + """ + Text after this element's end tag, but before the next sibling element's + start tag. This is either a string or the value None. Note that if there + was no text, this attribute may be either None or an empty string, + depending on the parser. + + """ + + def __init__(self, tag, attrib={}, **extra): + if not isinstance(attrib, dict): + raise TypeError("attrib must be dict, not %s" % ( + attrib.__class__.__name__,)) + self.tag = tag + self.attrib = {**attrib, **extra} + self._children = [] + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__, self.tag, id(self)) + + def makeelement(self, tag, attrib): + """Create a new element with the same type. + + *tag* is a string containing the element name. + *attrib* is a dictionary containing the element attributes. + + Do not call this method, use the SubElement factory function instead. + + """ + return self.__class__(tag, attrib) + + def __copy__(self): + elem = self.makeelement(self.tag, self.attrib) + elem.text = self.text + elem.tail = self.tail + elem[:] = self + return elem + + def __len__(self): + return len(self._children) + + def __bool__(self): + warnings.warn( + "Testing an element's truth value will always return True in " + "future versions. " + "Use specific 'len(elem)' or 'elem is not None' test instead.", + DeprecationWarning, stacklevel=2 + ) + return len(self._children) != 0 # emulate old behaviour, for now + + def __getitem__(self, index): + return self._children[index] + + def __setitem__(self, index, element): + if isinstance(index, slice): + for elt in element: + self._assert_is_element(elt) + else: + self._assert_is_element(element) + self._children[index] = element + + def __delitem__(self, index): + del self._children[index] + + def append(self, subelement): + """Add *subelement* to the end of this element. + + The new element will appear in document order after the last existing + subelement (or directly after the text, if it's the first subelement), + but before the end tag for this element. + + """ + self._assert_is_element(subelement) + self._children.append(subelement) + + def extend(self, elements): + """Append subelements from a sequence. + + *elements* is a sequence with zero or more elements. + + """ + for element in elements: + self._assert_is_element(element) + self._children.append(element) + + def insert(self, index, subelement): + """Insert *subelement* at position *index*.""" + self._assert_is_element(subelement) + self._children.insert(index, subelement) + + def _assert_is_element(self, e): + # Need to refer to the actual Python implementation, not the + # shadowing C implementation. + if not isinstance(e, _Element_Py): + raise TypeError('expected an Element, not %s' % type(e).__name__) + + def remove(self, subelement): + """Remove matching subelement. + + Unlike the find methods, this method compares elements based on + identity, NOT ON tag value or contents. To remove subelements by + other means, the easiest way is to use a list comprehension to + select what elements to keep, and then use slice assignment to update + the parent element. + + ValueError is raised if a matching element could not be found. + + """ + # assert iselement(element) + try: + self._children.remove(subelement) + except ValueError: + # to align the error message with the C implementation + raise ValueError("Element.remove(x): element not found") from None + + def find(self, path, namespaces=None): + """Find first matching element by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + """ + return ElementPath.find(self, path, namespaces) + + def findtext(self, path, default=None, namespaces=None): + """Find text for first matching element by tag name or path. + + *path* is a string having either an element tag or an XPath, + *default* is the value to return if the element was not found, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return text content of first matching element, or default value if + none was found. Note that if an element is found having no text + content, the empty string is returned. + + """ + return ElementPath.findtext(self, path, default, namespaces) + + def findall(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Returns list containing all matching elements in document order. + + """ + return ElementPath.findall(self, path, namespaces) + + def iterfind(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return an iterable yielding all matching elements in document order. + + """ + return ElementPath.iterfind(self, path, namespaces) + + def clear(self): + """Reset element. + + This function removes all subelements, clears all attributes, and sets + the text and tail attributes to None. + + """ + self.attrib.clear() + self._children = [] + self.text = self.tail = None + + def get(self, key, default=None): + """Get element attribute. + + Equivalent to attrib.get, but some implementations may handle this a + bit more efficiently. *key* is what attribute to look for, and + *default* is what to return if the attribute was not found. + + Returns a string containing the attribute value, or the default if + attribute was not found. + + """ + return self.attrib.get(key, default) + + def set(self, key, value): + """Set element attribute. + + Equivalent to attrib[key] = value, but some implementations may handle + this a bit more efficiently. *key* is what attribute to set, and + *value* is the attribute value to set it to. + + """ + self.attrib[key] = value + + def keys(self): + """Get list of attribute names. + + Names are returned in an arbitrary order, just like an ordinary + Python dict. Equivalent to attrib.keys() + + """ + return self.attrib.keys() + + def items(self): + """Get element attributes as a sequence. + + The attributes are returned in arbitrary order. Equivalent to + attrib.items(). + + Return a list of (name, value) tuples. + + """ + return self.attrib.items() + + def iter(self, tag=None): + """Create tree iterator. + + The iterator loops over the element and all subelements in document + order, returning all elements with a matching tag. + + If the tree structure is modified during iteration, new or removed + elements may or may not be included. To get a stable set, use the + list() function on the iterator, and loop over the resulting list. + + *tag* is what tags to look for (default is to return all elements) + + Return an iterator containing all the matching elements. + + """ + if tag == "*": + tag = None + if tag is None or self.tag == tag: + yield self + for e in self._children: + yield from e.iter(tag) + + def itertext(self): + """Create text iterator. + + The iterator loops over the element and all subelements in document + order, returning all inner text. + + """ + tag = self.tag + if not isinstance(tag, str) and tag is not None: + return + t = self.text + if t: + yield t + for e in self: + yield from e.itertext() + t = e.tail + if t: + yield t + + +def SubElement(parent, tag, attrib={}, **extra): + """Subelement factory which creates an element instance, and appends it + to an existing parent. + + The element tag, attribute names, and attribute values can be either + bytes or Unicode strings. + + *parent* is the parent element, *tag* is the subelements name, *attrib* is + an optional directory containing element attributes, *extra* are + additional attributes given as keyword arguments. + + """ + attrib = {**attrib, **extra} + element = parent.makeelement(tag, attrib) + parent.append(element) + return element + + +def Comment(text=None): + """Comment element factory. + + This function creates a special element which the standard serializer + serializes as an XML comment. + + *text* is a string containing the comment string. + + """ + element = Element(Comment) + element.text = text + return element + + +def ProcessingInstruction(target, text=None): + """Processing Instruction element factory. + + This function creates a special element which the standard serializer + serializes as an XML comment. + + *target* is a string containing the processing instruction, *text* is a + string containing the processing instruction contents, if any. + + """ + element = Element(ProcessingInstruction) + element.text = target + if text: + element.text = element.text + " " + text + return element + +PI = ProcessingInstruction + + +class QName: + """Qualified name wrapper. + + This class can be used to wrap a QName attribute value in order to get + proper namespace handing on output. + + *text_or_uri* is a string containing the QName value either in the form + {uri}local, or if the tag argument is given, the URI part of a QName. + + *tag* is an optional argument which if given, will make the first + argument (text_or_uri) be interpreted as a URI, and this argument (tag) + be interpreted as a local name. + + """ + def __init__(self, text_or_uri, tag=None): + if tag: + text_or_uri = "{%s}%s" % (text_or_uri, tag) + self.text = text_or_uri + def __str__(self): + return self.text + def __repr__(self): + return '<%s %r>' % (self.__class__.__name__, self.text) + def __hash__(self): + return hash(self.text) + def __le__(self, other): + if isinstance(other, QName): + return self.text <= other.text + return self.text <= other + def __lt__(self, other): + if isinstance(other, QName): + return self.text < other.text + return self.text < other + def __ge__(self, other): + if isinstance(other, QName): + return self.text >= other.text + return self.text >= other + def __gt__(self, other): + if isinstance(other, QName): + return self.text > other.text + return self.text > other + def __eq__(self, other): + if isinstance(other, QName): + return self.text == other.text + return self.text == other + +# -------------------------------------------------------------------- + + +class ElementTree: + """An XML element hierarchy. + + This class also provides support for serialization to and from + standard XML. + + *element* is an optional root element node, + *file* is an optional file handle or file name of an XML file whose + contents will be used to initialize the tree with. + + """ + def __init__(self, element=None, file=None): + if element is not None and not iselement(element): + raise TypeError('expected an Element, not %s' % + type(element).__name__) + self._root = element # first node + if file: + self.parse(file) + + def getroot(self): + """Return root element of this tree.""" + return self._root + + def _setroot(self, element): + """Replace root element of this tree. + + This will discard the current contents of the tree and replace it + with the given element. Use with care! + + """ + if not iselement(element): + raise TypeError('expected an Element, not %s' + % type(element).__name__) + self._root = element + + def parse(self, source, parser=None): + """Load external XML document into element tree. + + *source* is a file name or file object, *parser* is an optional parser + instance that defaults to XMLParser. + + ParseError is raised if the parser fails to parse the document. + + Returns the root element of the given source document. + + """ + close_source = False + if not hasattr(source, "read"): + source = open(source, "rb") + close_source = True + try: + if parser is None: + # If no parser was specified, create a default XMLParser + parser = XMLParser() + if hasattr(parser, '_parse_whole'): + # The default XMLParser, when it comes from an accelerator, + # can define an internal _parse_whole API for efficiency. + # It can be used to parse the whole source without feeding + # it with chunks. + self._root = parser._parse_whole(source) + return self._root + while data := source.read(65536): + parser.feed(data) + self._root = parser.close() + return self._root + finally: + if close_source: + source.close() + + def iter(self, tag=None): + """Create and return tree iterator for the root element. + + The iterator loops over all elements in this tree, in document order. + + *tag* is a string with the tag name to iterate over + (default is to return all elements). + + """ + # assert self._root is not None + return self._root.iter(tag) + + def find(self, path, namespaces=None): + """Find first matching element by tag name or path. + + Same as getroot().find(path), which is Element.find() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.find(path, namespaces) + + def findtext(self, path, default=None, namespaces=None): + """Find first matching element by tag name or path. + + Same as getroot().findtext(path), which is Element.findtext() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.findtext(path, default, namespaces) + + def findall(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + Same as getroot().findall(path), which is Element.findall(). + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return list containing all matching elements in document order. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.findall(path, namespaces) + + def iterfind(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + Same as getroot().iterfind(path), which is element.iterfind() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return an iterable yielding all matching elements in document order. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.iterfind(path, namespaces) + + def write(self, file_or_filename, + encoding=None, + xml_declaration=None, + default_namespace=None, + method=None, *, + short_empty_elements=True): + """Write element tree to a file as XML. + + Arguments: + *file_or_filename* -- file name or a file object opened for writing + + *encoding* -- the output encoding (default: US-ASCII) + + *xml_declaration* -- bool indicating if an XML declaration should be + added to the output. If None, an XML declaration + is added if encoding IS NOT either of: + US-ASCII, UTF-8, or Unicode + + *default_namespace* -- sets the default XML namespace (for "xmlns") + + *method* -- either "xml" (default), "html, "text", or "c14n" + + *short_empty_elements* -- controls the formatting of elements + that contain no content. If True (default) + they are emitted as a single self-closed + tag, otherwise they are emitted as a pair + of start/end tags + + """ + if self._root is None: + raise TypeError('ElementTree not initialized') + if not method: + method = "xml" + elif method not in _serialize: + raise ValueError("unknown method %r" % method) + if not encoding: + if method == "c14n": + encoding = "utf-8" + else: + encoding = "us-ascii" + with _get_writer(file_or_filename, encoding) as (write, declared_encoding): + if method == "xml" and (xml_declaration or + (xml_declaration is None and + encoding.lower() != "unicode" and + declared_encoding.lower() not in ("utf-8", "us-ascii"))): + write("\n" % ( + declared_encoding,)) + if method == "text": + _serialize_text(write, self._root) + else: + qnames, namespaces = _namespaces(self._root, default_namespace) + serialize = _serialize[method] + serialize(write, self._root, qnames, namespaces, + short_empty_elements=short_empty_elements) + + def write_c14n(self, file): + # lxml.etree compatibility. use output method instead + return self.write(file, method="c14n") + +# -------------------------------------------------------------------- +# serialization support + +@contextlib.contextmanager +def _get_writer(file_or_filename, encoding): + # returns text write method and release all resources after using + try: + write = file_or_filename.write + except AttributeError: + # file_or_filename is a file name + if encoding.lower() == "unicode": + encoding="utf-8" + with open(file_or_filename, "w", encoding=encoding, + errors="xmlcharrefreplace") as file: + yield file.write, encoding + else: + # file_or_filename is a file-like object + # encoding determines if it is a text or binary writer + if encoding.lower() == "unicode": + # use a text writer as is + yield write, getattr(file_or_filename, "encoding", None) or "utf-8" + else: + # wrap a binary writer with TextIOWrapper + with contextlib.ExitStack() as stack: + if isinstance(file_or_filename, io.BufferedIOBase): + file = file_or_filename + elif isinstance(file_or_filename, io.RawIOBase): + file = io.BufferedWriter(file_or_filename) + # Keep the original file open when the BufferedWriter is + # destroyed + stack.callback(file.detach) + else: + # This is to handle passed objects that aren't in the + # IOBase hierarchy, but just have a write method + file = io.BufferedIOBase() + file.writable = lambda: True + file.write = write + try: + # TextIOWrapper uses this methods to determine + # if BOM (for UTF-16, etc) should be added + file.seekable = file_or_filename.seekable + file.tell = file_or_filename.tell + except AttributeError: + pass + file = io.TextIOWrapper(file, + encoding=encoding, + errors="xmlcharrefreplace", + newline="\n") + # Keep the original file open when the TextIOWrapper is + # destroyed + stack.callback(file.detach) + yield file.write, encoding + +def _namespaces(elem, default_namespace=None): + # identify namespaces used in this tree + + # maps qnames to *encoded* prefix:local names + qnames = {None: None} + + # maps uri:s to prefixes + namespaces = {} + if default_namespace: + namespaces[default_namespace] = "" + + def add_qname(qname): + # calculate serialized qname representation + try: + if qname[:1] == "{": + uri, tag = qname[1:].rsplit("}", 1) + prefix = namespaces.get(uri) + if prefix is None: + prefix = _namespace_map.get(uri) + if prefix is None: + prefix = "ns%d" % len(namespaces) + if prefix != "xml": + namespaces[uri] = prefix + if prefix: + qnames[qname] = "%s:%s" % (prefix, tag) + else: + qnames[qname] = tag # default element + else: + if default_namespace: + # FIXME: can this be handled in XML 1.0? + raise ValueError( + "cannot use non-qualified names with " + "default_namespace option" + ) + qnames[qname] = qname + except TypeError: + _raise_serialization_error(qname) + + # populate qname and namespaces table + for elem in elem.iter(): + tag = elem.tag + if isinstance(tag, QName): + if tag.text not in qnames: + add_qname(tag.text) + elif isinstance(tag, str): + if tag not in qnames: + add_qname(tag) + elif tag is not None and tag is not Comment and tag is not PI: + _raise_serialization_error(tag) + for key, value in elem.items(): + if isinstance(key, QName): + key = key.text + if key not in qnames: + add_qname(key) + if isinstance(value, QName) and value.text not in qnames: + add_qname(value.text) + text = elem.text + if isinstance(text, QName) and text.text not in qnames: + add_qname(text.text) + return qnames, namespaces + +def _serialize_xml(write, elem, qnames, namespaces, + short_empty_elements, **kwargs): + tag = elem.tag + text = elem.text + if tag is Comment: + write("" % text) + elif tag is ProcessingInstruction: + write("" % text) + else: + tag = qnames[tag] + if tag is None: + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_xml(write, e, qnames, None, + short_empty_elements=short_empty_elements) + else: + write("<" + tag) + items = list(elem.items()) + if items or namespaces: + if namespaces: + for v, k in sorted(namespaces.items(), + key=lambda x: x[1]): # sort on prefix + if k: + k = ":" + k + write(" xmlns%s=\"%s\"" % ( + k, + _escape_attrib(v) + )) + for k, v in items: + if isinstance(k, QName): + k = k.text + if isinstance(v, QName): + v = qnames[v.text] + else: + v = _escape_attrib(v) + write(" %s=\"%s\"" % (qnames[k], v)) + if text or len(elem) or not short_empty_elements: + write(">") + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_xml(write, e, qnames, None, + short_empty_elements=short_empty_elements) + write("") + else: + write(" />") + if elem.tail: + write(_escape_cdata(elem.tail)) + +HTML_EMPTY = {"area", "base", "basefont", "br", "col", "embed", "frame", "hr", + "img", "input", "isindex", "link", "meta", "param", "source", + "track", "wbr"} + +def _serialize_html(write, elem, qnames, namespaces, **kwargs): + tag = elem.tag + text = elem.text + if tag is Comment: + write("" % _escape_cdata(text)) + elif tag is ProcessingInstruction: + write("" % _escape_cdata(text)) + else: + tag = qnames[tag] + if tag is None: + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_html(write, e, qnames, None) + else: + write("<" + tag) + items = list(elem.items()) + if items or namespaces: + if namespaces: + for v, k in sorted(namespaces.items(), + key=lambda x: x[1]): # sort on prefix + if k: + k = ":" + k + write(" xmlns%s=\"%s\"" % ( + k, + _escape_attrib(v) + )) + for k, v in items: + if isinstance(k, QName): + k = k.text + if isinstance(v, QName): + v = qnames[v.text] + else: + v = _escape_attrib_html(v) + # FIXME: handle boolean attributes + write(" %s=\"%s\"" % (qnames[k], v)) + write(">") + ltag = tag.lower() + if text: + if ltag == "script" or ltag == "style": + write(text) + else: + write(_escape_cdata(text)) + for e in elem: + _serialize_html(write, e, qnames, None) + if ltag not in HTML_EMPTY: + write("") + if elem.tail: + write(_escape_cdata(elem.tail)) + +def _serialize_text(write, elem): + for part in elem.itertext(): + write(part) + if elem.tail: + write(elem.tail) + +_serialize = { + "xml": _serialize_xml, + "html": _serialize_html, + "text": _serialize_text, +# this optional method is imported at the end of the module +# "c14n": _serialize_c14n, +} + + +def register_namespace(prefix, uri): + """Register a namespace prefix. + + The registry is global, and any existing mapping for either the + given prefix or the namespace URI will be removed. + + *prefix* is the namespace prefix, *uri* is a namespace uri. Tags and + attributes in this namespace will be serialized with prefix if possible. + + ValueError is raised if prefix is reserved or is invalid. + + """ + if re.match(r"ns\d+$", prefix): + raise ValueError("Prefix format reserved for internal use") + for k, v in list(_namespace_map.items()): + if k == uri or v == prefix: + del _namespace_map[k] + _namespace_map[uri] = prefix + +_namespace_map = { + # "well-known" namespace prefixes + "http://www.w3.org/XML/1998/namespace": "xml", + "http://www.w3.org/1999/xhtml": "html", + "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", + "http://schemas.xmlsoap.org/wsdl/": "wsdl", + # xml schema + "http://www.w3.org/2001/XMLSchema": "xs", + "http://www.w3.org/2001/XMLSchema-instance": "xsi", + # dublin core + "http://purl.org/dc/elements/1.1/": "dc", +} +# For tests and troubleshooting +register_namespace._namespace_map = _namespace_map + +def _raise_serialization_error(text): + raise TypeError( + "cannot serialize %r (type %s)" % (text, type(text).__name__) + ) + +def _escape_cdata(text): + # escape character data + try: + # it's worth avoiding do-nothing calls for strings that are + # shorter than 500 characters, or so. assume that's, by far, + # the most common case in most applications. + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + +def _escape_attrib(text): + # escape attribute value + try: + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + # Although section 2.11 of the XML specification states that CR or + # CR LN should be replaced with just LN, it applies only to EOLNs + # which take part of organizing file into lines. Within attributes, + # we are replacing these with entity numbers, so they do not count. + # http://www.w3.org/TR/REC-xml/#sec-line-ends + # The current solution, contained in following six lines, was + # discussed in issue 17582 and 39011. + if "\r" in text: + text = text.replace("\r", " ") + if "\n" in text: + text = text.replace("\n", " ") + if "\t" in text: + text = text.replace("\t", " ") + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + +def _escape_attrib_html(text): + # escape attribute value + try: + if "&" in text: + text = text.replace("&", "&") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + +# -------------------------------------------------------------------- + +def tostring(element, encoding=None, method=None, *, + xml_declaration=None, default_namespace=None, + short_empty_elements=True): + """Generate string representation of XML element. + + All subelements are included. If encoding is "unicode", a string + is returned. Otherwise a bytestring is returned. + + *element* is an Element instance, *encoding* is an optional output + encoding defaulting to US-ASCII, *method* is an optional output which can + be one of "xml" (default), "html", "text" or "c14n", *default_namespace* + sets the default XML namespace (for "xmlns"). + + Returns an (optionally) encoded string containing the XML data. + + """ + stream = io.StringIO() if encoding == 'unicode' else io.BytesIO() + ElementTree(element).write(stream, encoding, + xml_declaration=xml_declaration, + default_namespace=default_namespace, + method=method, + short_empty_elements=short_empty_elements) + return stream.getvalue() + +class _ListDataStream(io.BufferedIOBase): + """An auxiliary stream accumulating into a list reference.""" + def __init__(self, lst): + self.lst = lst + + def writable(self): + return True + + def seekable(self): + return True + + def write(self, b): + self.lst.append(b) + + def tell(self): + return len(self.lst) + +def tostringlist(element, encoding=None, method=None, *, + xml_declaration=None, default_namespace=None, + short_empty_elements=True): + lst = [] + stream = _ListDataStream(lst) + ElementTree(element).write(stream, encoding, + xml_declaration=xml_declaration, + default_namespace=default_namespace, + method=method, + short_empty_elements=short_empty_elements) + return lst + + +def dump(elem): + """Write element tree or element structure to sys.stdout. + + This function should be used for debugging only. + + *elem* is either an ElementTree, or a single Element. The exact output + format is implementation dependent. In this version, it's written as an + ordinary XML file. + + """ + # debugging + if not isinstance(elem, ElementTree): + elem = ElementTree(elem) + elem.write(sys.stdout, encoding="unicode") + tail = elem.getroot().tail + if not tail or tail[-1] != "\n": + sys.stdout.write("\n") + + +def indent(tree, space=" ", level=0): + """Indent an XML document by inserting newlines and indentation space + after elements. + + *tree* is the ElementTree or Element to modify. The (root) element + itself will not be changed, but the tail text of all elements in its + subtree will be adapted. + + *space* is the whitespace to insert for each indentation level, two + space characters by default. + + *level* is the initial indentation level. Setting this to a higher + value than 0 can be used for indenting subtrees that are more deeply + nested inside of a document. + """ + if isinstance(tree, ElementTree): + tree = tree.getroot() + if level < 0: + raise ValueError(f"Initial indentation level must be >= 0, got {level}") + if not len(tree): + return + + # Reduce the memory consumption by reusing indentation strings. + indentations = ["\n" + level * space] + + def _indent_children(elem, level): + # Start a new indentation level for the first child. + child_level = level + 1 + try: + child_indentation = indentations[child_level] + except IndexError: + child_indentation = indentations[level] + space + indentations.append(child_indentation) + + if not elem.text or not elem.text.strip(): + elem.text = child_indentation + + for child in elem: + if len(child): + _indent_children(child, child_level) + if not child.tail or not child.tail.strip(): + child.tail = child_indentation + + # Dedent after the last child by overwriting the previous indentation. + if not child.tail.strip(): + child.tail = indentations[level] + + _indent_children(tree, 0) + + +# -------------------------------------------------------------------- +# parsing + + +def parse(source, parser=None): + """Parse XML document into element tree. + + *source* is a filename or file object containing XML data, + *parser* is an optional parser instance defaulting to XMLParser. + + Return an ElementTree instance. + + """ + tree = ElementTree() + tree.parse(source, parser) + return tree + + +def iterparse(source, events=None, parser=None): + """Incrementally parse XML document into ElementTree. + + This class also reports what's going on to the user based on the + *events* it is initialized with. The supported events are the strings + "start", "end", "start-ns" and "end-ns" (the "ns" events are used to get + detailed namespace information). If *events* is omitted, only + "end" events are reported. + + *source* is a filename or file object containing XML data, *events* is + a list of events to report back, *parser* is an optional parser instance. + + Returns an iterator providing (event, elem) pairs. + + """ + # Use the internal, undocumented _parser argument for now; When the + # parser argument of iterparse is removed, this can be killed. + pullparser = XMLPullParser(events=events, _parser=parser) + + if not hasattr(source, "read"): + source = open(source, "rb") + close_source = True + else: + close_source = False + + def iterator(source): + try: + while True: + yield from pullparser.read_events() + # load event buffer + data = source.read(16 * 1024) + if not data: + break + pullparser.feed(data) + root = pullparser._close_and_return_root() + yield from pullparser.read_events() + it = wr() + if it is not None: + it.root = root + finally: + if close_source: + source.close() + + gen = iterator(source) + class IterParseIterator(collections.abc.Iterator): + __next__ = gen.__next__ + def close(self): + if close_source: + source.close() + gen.close() + + def __del__(self): + # TODO: Emit a ResourceWarning if it was not explicitly closed. + # (When the close() method will be supported in all maintained Python versions.) + if close_source: + source.close() + + it = IterParseIterator() + it.root = None + wr = weakref.ref(it) + return it + + +class XMLPullParser: + + def __init__(self, events=None, *, _parser=None): + # The _parser argument is for internal use only and must not be relied + # upon in user code. It will be removed in a future release. + # See https://bugs.python.org/issue17741 for more details. + + self._events_queue = collections.deque() + self._parser = _parser or XMLParser(target=TreeBuilder()) + # wire up the parser for event reporting + if events is None: + events = ("end",) + self._parser._setevents(self._events_queue, events) + + def feed(self, data): + """Feed encoded data to parser.""" + if self._parser is None: + raise ValueError("feed() called after end of stream") + if data: + try: + self._parser.feed(data) + except SyntaxError as exc: + self._events_queue.append(exc) + + def _close_and_return_root(self): + # iterparse needs this to set its root attribute properly :( + root = self._parser.close() + self._parser = None + return root + + def close(self): + """Finish feeding data to parser. + + Unlike XMLParser, does not return the root element. Use + read_events() to consume elements from XMLPullParser. + """ + self._close_and_return_root() + + def read_events(self): + """Return an iterator over currently available (event, elem) pairs. + + Events are consumed from the internal event queue as they are + retrieved from the iterator. + """ + events = self._events_queue + while events: + event = events.popleft() + if isinstance(event, Exception): + raise event + else: + yield event + + def flush(self): + if self._parser is None: + raise ValueError("flush() called after end of stream") + self._parser.flush() + + +def XML(text, parser=None): + """Parse XML document from string constant. + + This function can be used to embed "XML Literals" in Python code. + + *text* is a string containing XML data, *parser* is an + optional parser instance, defaulting to the standard XMLParser. + + Returns an Element instance. + + """ + if not parser: + parser = XMLParser(target=TreeBuilder()) + parser.feed(text) + return parser.close() + + +def XMLID(text, parser=None): + """Parse XML document from string constant for its IDs. + + *text* is a string containing XML data, *parser* is an + optional parser instance, defaulting to the standard XMLParser. + + Returns an (Element, dict) tuple, in which the + dict maps element id:s to elements. + + """ + if not parser: + parser = XMLParser(target=TreeBuilder()) + parser.feed(text) + tree = parser.close() + ids = {} + for elem in tree.iter(): + id = elem.get("id") + if id: + ids[id] = elem + return tree, ids + +# Parse XML document from string constant. Alias for XML(). +fromstring = XML + +def fromstringlist(sequence, parser=None): + """Parse XML document from sequence of string fragments. + + *sequence* is a list of other sequence, *parser* is an optional parser + instance, defaulting to the standard XMLParser. + + Returns an Element instance. + + """ + if not parser: + parser = XMLParser(target=TreeBuilder()) + for text in sequence: + parser.feed(text) + return parser.close() + +# -------------------------------------------------------------------- + + +class TreeBuilder: + """Generic element structure builder. + + This builder converts a sequence of start, data, and end method + calls to a well-formed element structure. + + You can use this class to build an element structure using a custom XML + parser, or a parser for some other XML-like format. + + *element_factory* is an optional element factory which is called + to create new Element instances, as necessary. + + *comment_factory* is a factory to create comments to be used instead of + the standard factory. If *insert_comments* is false (the default), + comments will not be inserted into the tree. + + *pi_factory* is a factory to create processing instructions to be used + instead of the standard factory. If *insert_pis* is false (the default), + processing instructions will not be inserted into the tree. + """ + def __init__(self, element_factory=None, *, + comment_factory=None, pi_factory=None, + insert_comments=False, insert_pis=False): + self._data = [] # data collector + self._elem = [] # element stack + self._last = None # last element + self._root = None # root element + self._tail = None # true if we're after an end tag + if comment_factory is None: + comment_factory = Comment + self._comment_factory = comment_factory + self.insert_comments = insert_comments + if pi_factory is None: + pi_factory = ProcessingInstruction + self._pi_factory = pi_factory + self.insert_pis = insert_pis + if element_factory is None: + element_factory = Element + self._factory = element_factory + + def close(self): + """Flush builder buffers and return toplevel document Element.""" + assert len(self._elem) == 0, "missing end tags" + assert self._root is not None, "missing toplevel element" + return self._root + + def _flush(self): + if self._data: + if self._last is not None: + text = "".join(self._data) + if self._tail: + assert self._last.tail is None, "internal error (tail)" + self._last.tail = text + else: + assert self._last.text is None, "internal error (text)" + self._last.text = text + self._data = [] + + def data(self, data): + """Add text to current element.""" + self._data.append(data) + + def start(self, tag, attrs): + """Open new element and return it. + + *tag* is the element name, *attrs* is a dict containing element + attributes. + + """ + self._flush() + self._last = elem = self._factory(tag, attrs) + if self._elem: + self._elem[-1].append(elem) + elif self._root is None: + self._root = elem + self._elem.append(elem) + self._tail = 0 + return elem + + def end(self, tag): + """Close and return current Element. + + *tag* is the element name. + + """ + self._flush() + self._last = self._elem.pop() + assert self._last.tag == tag,\ + "end tag mismatch (expected %s, got %s)" % ( + self._last.tag, tag) + self._tail = 1 + return self._last + + def comment(self, text): + """Create a comment using the comment_factory. + + *text* is the text of the comment. + """ + return self._handle_single( + self._comment_factory, self.insert_comments, text) + + def pi(self, target, text=None): + """Create a processing instruction using the pi_factory. + + *target* is the target name of the processing instruction. + *text* is the data of the processing instruction, or ''. + """ + return self._handle_single( + self._pi_factory, self.insert_pis, target, text) + + def _handle_single(self, factory, insert, *args): + elem = factory(*args) + if insert: + self._flush() + self._last = elem + if self._elem: + self._elem[-1].append(elem) + self._tail = 1 + return elem + + +# also see ElementTree and TreeBuilder +class XMLParser: + """Element structure builder for XML source data based on the expat parser. + + *target* is an optional target object which defaults to an instance of the + standard TreeBuilder class, *encoding* is an optional encoding string + which if given, overrides the encoding specified in the XML file: + http://www.iana.org/assignments/character-sets + + """ + + def __init__(self, *, target=None, encoding=None): + try: + from xml.parsers import expat + except ImportError: + try: + import pyexpat as expat + except ImportError: + raise ImportError( + "No module named expat; use SimpleXMLTreeBuilder instead" + ) + parser = expat.ParserCreate(encoding, "}") + if target is None: + target = TreeBuilder() + # underscored names are provided for compatibility only + self.parser = self._parser = parser + self.target = self._target = target + self._error = expat.error + self._names = {} # name memo cache + # main callbacks + parser.DefaultHandlerExpand = self._default + if hasattr(target, 'start'): + parser.StartElementHandler = self._start + if hasattr(target, 'end'): + parser.EndElementHandler = self._end + if hasattr(target, 'start_ns'): + parser.StartNamespaceDeclHandler = self._start_ns + if hasattr(target, 'end_ns'): + parser.EndNamespaceDeclHandler = self._end_ns + if hasattr(target, 'data'): + parser.CharacterDataHandler = target.data + # miscellaneous callbacks + if hasattr(target, 'comment'): + parser.CommentHandler = target.comment + if hasattr(target, 'pi'): + parser.ProcessingInstructionHandler = target.pi + # Configure pyexpat: buffering, new-style attribute handling. + parser.buffer_text = 1 + parser.ordered_attributes = 1 + self._doctype = None + self.entity = {} + try: + self.version = "Expat %d.%d.%d" % expat.version_info + except AttributeError: + pass # unknown + + def _setevents(self, events_queue, events_to_report): + # Internal API for XMLPullParser + # events_to_report: a list of events to report during parsing (same as + # the *events* of XMLPullParser's constructor. + # events_queue: a list of actual parsing events that will be populated + # by the underlying parser. + # + parser = self._parser + append = events_queue.append + for event_name in events_to_report: + if event_name == "start": + parser.ordered_attributes = 1 + def handler(tag, attrib_in, event=event_name, append=append, + start=self._start): + append((event, start(tag, attrib_in))) + parser.StartElementHandler = handler + elif event_name == "end": + def handler(tag, event=event_name, append=append, + end=self._end): + append((event, end(tag))) + parser.EndElementHandler = handler + elif event_name == "start-ns": + # TreeBuilder does not implement .start_ns() + if hasattr(self.target, "start_ns"): + def handler(prefix, uri, event=event_name, append=append, + start_ns=self._start_ns): + append((event, start_ns(prefix, uri))) + else: + def handler(prefix, uri, event=event_name, append=append): + append((event, (prefix or '', uri or ''))) + parser.StartNamespaceDeclHandler = handler + elif event_name == "end-ns": + # TreeBuilder does not implement .end_ns() + if hasattr(self.target, "end_ns"): + def handler(prefix, event=event_name, append=append, + end_ns=self._end_ns): + append((event, end_ns(prefix))) + else: + def handler(prefix, event=event_name, append=append): + append((event, None)) + parser.EndNamespaceDeclHandler = handler + elif event_name == 'comment': + def handler(text, event=event_name, append=append, self=self): + append((event, self.target.comment(text))) + parser.CommentHandler = handler + elif event_name == 'pi': + def handler(pi_target, data, event=event_name, append=append, + self=self): + append((event, self.target.pi(pi_target, data))) + parser.ProcessingInstructionHandler = handler + else: + raise ValueError("unknown event %r" % event_name) + + def _raiseerror(self, value): + err = ParseError(value) + err.code = value.code + err.position = value.lineno, value.offset + raise err + + def _fixname(self, key): + # expand qname, and convert name string to ascii, if possible + try: + name = self._names[key] + except KeyError: + name = key + if "}" in name: + name = "{" + name + self._names[key] = name + return name + + def _start_ns(self, prefix, uri): + return self.target.start_ns(prefix or '', uri or '') + + def _end_ns(self, prefix): + return self.target.end_ns(prefix or '') + + def _start(self, tag, attr_list): + # Handler for expat's StartElementHandler. Since ordered_attributes + # is set, the attributes are reported as a list of alternating + # attribute name,value. + fixname = self._fixname + tag = fixname(tag) + attrib = {} + if attr_list: + for i in range(0, len(attr_list), 2): + attrib[fixname(attr_list[i])] = attr_list[i+1] + return self.target.start(tag, attrib) + + def _end(self, tag): + return self.target.end(self._fixname(tag)) + + def _default(self, text): + prefix = text[:1] + if prefix == "&": + # deal with undefined entities + try: + data_handler = self.target.data + except AttributeError: + return + try: + data_handler(self.entity[text[1:-1]]) + except KeyError: + from xml.parsers import expat + err = expat.error( + "undefined entity %s: line %d, column %d" % + (text, self.parser.ErrorLineNumber, + self.parser.ErrorColumnNumber) + ) + err.code = 11 # XML_ERROR_UNDEFINED_ENTITY + err.lineno = self.parser.ErrorLineNumber + err.offset = self.parser.ErrorColumnNumber + raise err + elif prefix == "<" and text[:9] == "": + self._doctype = None + return + text = text.strip() + if not text: + return + self._doctype.append(text) + n = len(self._doctype) + if n > 2: + type = self._doctype[1] + if type == "PUBLIC" and n == 4: + name, type, pubid, system = self._doctype + if pubid: + pubid = pubid[1:-1] + elif type == "SYSTEM" and n == 3: + name, type, system = self._doctype + pubid = None + else: + return + if hasattr(self.target, "doctype"): + self.target.doctype(name, pubid, system[1:-1]) + elif hasattr(self, "doctype"): + warnings.warn( + "The doctype() method of XMLParser is ignored. " + "Define doctype() method on the TreeBuilder target.", + RuntimeWarning) + + self._doctype = None + + def feed(self, data): + """Feed encoded data to parser.""" + try: + self.parser.Parse(data, False) + except self._error as v: + self._raiseerror(v) + + def close(self): + """Finish feeding data to parser and return element structure.""" + try: + self.parser.Parse(b"", True) # end of data + except self._error as v: + self._raiseerror(v) + try: + close_handler = self.target.close + except AttributeError: + pass + else: + return close_handler() + finally: + # get rid of circular references + del self.parser, self._parser + del self.target, self._target + + def flush(self): + was_enabled = self.parser.GetReparseDeferralEnabled() + try: + self.parser.SetReparseDeferralEnabled(False) + self.parser.Parse(b"", False) + except self._error as v: + self._raiseerror(v) + finally: + self.parser.SetReparseDeferralEnabled(was_enabled) + +# -------------------------------------------------------------------- +# C14N 2.0 + +def canonicalize(xml_data=None, *, out=None, from_file=None, **options): + """Convert XML to its C14N 2.0 serialised form. + + If *out* is provided, it must be a file or file-like object that receives + the serialised canonical XML output (text, not bytes) through its ``.write()`` + method. To write to a file, open it in text mode with encoding "utf-8". + If *out* is not provided, this function returns the output as text string. + + Either *xml_data* (an XML string) or *from_file* (a file path or + file-like object) must be provided as input. + + The configuration options are the same as for the ``C14NWriterTarget``. + """ + if xml_data is None and from_file is None: + raise ValueError("Either 'xml_data' or 'from_file' must be provided as input") + sio = None + if out is None: + sio = out = io.StringIO() + + parser = XMLParser(target=C14NWriterTarget(out.write, **options)) + + if xml_data is not None: + parser.feed(xml_data) + parser.close() + elif from_file is not None: + parse(from_file, parser=parser) + + return sio.getvalue() if sio is not None else None + + +_looks_like_prefix_name = re.compile(r'^\w+:\w+$', re.UNICODE).match + + +class C14NWriterTarget: + """ + Canonicalization writer target for the XMLParser. + + Serialises parse events to XML C14N 2.0. + + The *write* function is used for writing out the resulting data stream + as text (not bytes). To write to a file, open it in text mode with encoding + "utf-8" and pass its ``.write`` method. + + Configuration options: + + - *with_comments*: set to true to include comments + - *strip_text*: set to true to strip whitespace before and after text content + - *rewrite_prefixes*: set to true to replace namespace prefixes by "n{number}" + - *qname_aware_tags*: a set of qname aware tag names in which prefixes + should be replaced in text content + - *qname_aware_attrs*: a set of qname aware attribute names in which prefixes + should be replaced in text content + - *exclude_attrs*: a set of attribute names that should not be serialised + - *exclude_tags*: a set of tag names that should not be serialised + """ + def __init__(self, write, *, + with_comments=False, strip_text=False, rewrite_prefixes=False, + qname_aware_tags=None, qname_aware_attrs=None, + exclude_attrs=None, exclude_tags=None): + self._write = write + self._data = [] + self._with_comments = with_comments + self._strip_text = strip_text + self._exclude_attrs = set(exclude_attrs) if exclude_attrs else None + self._exclude_tags = set(exclude_tags) if exclude_tags else None + + self._rewrite_prefixes = rewrite_prefixes + if qname_aware_tags: + self._qname_aware_tags = set(qname_aware_tags) + else: + self._qname_aware_tags = None + if qname_aware_attrs: + self._find_qname_aware_attrs = set(qname_aware_attrs).intersection + else: + self._find_qname_aware_attrs = None + + # Stack with globally and newly declared namespaces as (uri, prefix) pairs. + self._declared_ns_stack = [[ + ("http://www.w3.org/XML/1998/namespace", "xml"), + ]] + # Stack with user declared namespace prefixes as (uri, prefix) pairs. + self._ns_stack = [] + if not rewrite_prefixes: + self._ns_stack.append(list(_namespace_map.items())) + self._ns_stack.append([]) + self._prefix_map = {} + self._preserve_space = [False] + self._pending_start = None + self._root_seen = False + self._root_done = False + self._ignored_depth = 0 + + def _iter_namespaces(self, ns_stack, _reversed=reversed): + for namespaces in _reversed(ns_stack): + if namespaces: # almost no element declares new namespaces + yield from namespaces + + def _resolve_prefix_name(self, prefixed_name): + prefix, name = prefixed_name.split(':', 1) + for uri, p in self._iter_namespaces(self._ns_stack): + if p == prefix: + return f'{{{uri}}}{name}' + raise ValueError(f'Prefix {prefix} of QName "{prefixed_name}" is not declared in scope') + + def _qname(self, qname, uri=None): + if uri is None: + uri, tag = qname[1:].rsplit('}', 1) if qname[:1] == '{' else ('', qname) + else: + tag = qname + + prefixes_seen = set() + for u, prefix in self._iter_namespaces(self._declared_ns_stack): + if u == uri and prefix not in prefixes_seen: + return f'{prefix}:{tag}' if prefix else tag, tag, uri + prefixes_seen.add(prefix) + + # Not declared yet => add new declaration. + if self._rewrite_prefixes: + if uri in self._prefix_map: + prefix = self._prefix_map[uri] + else: + prefix = self._prefix_map[uri] = f'n{len(self._prefix_map)}' + self._declared_ns_stack[-1].append((uri, prefix)) + return f'{prefix}:{tag}', tag, uri + + if not uri and '' not in prefixes_seen: + # No default namespace declared => no prefix needed. + return tag, tag, uri + + for u, prefix in self._iter_namespaces(self._ns_stack): + if u == uri: + self._declared_ns_stack[-1].append((uri, prefix)) + return f'{prefix}:{tag}' if prefix else tag, tag, uri + + if not uri: + # As soon as a default namespace is defined, + # anything that has no namespace (and thus, no prefix) goes there. + return tag, tag, uri + + raise ValueError(f'Namespace "{uri}" is not declared in scope') + + def data(self, data): + if not self._ignored_depth: + self._data.append(data) + + def _flush(self, _join_text=''.join): + data = _join_text(self._data) + del self._data[:] + if self._strip_text and not self._preserve_space[-1]: + data = data.strip() + if self._pending_start is not None: + args, self._pending_start = self._pending_start, None + qname_text = data if data and _looks_like_prefix_name(data) else None + self._start(*args, qname_text) + if qname_text is not None: + return + if data and self._root_seen: + self._write(_escape_cdata_c14n(data)) + + def start_ns(self, prefix, uri): + if self._ignored_depth: + return + # we may have to resolve qnames in text content + if self._data: + self._flush() + self._ns_stack[-1].append((uri, prefix)) + + def start(self, tag, attrs): + if self._exclude_tags is not None and ( + self._ignored_depth or tag in self._exclude_tags): + self._ignored_depth += 1 + return + if self._data: + self._flush() + + new_namespaces = [] + self._declared_ns_stack.append(new_namespaces) + + if self._qname_aware_tags is not None and tag in self._qname_aware_tags: + # Need to parse text first to see if it requires a prefix declaration. + self._pending_start = (tag, attrs, new_namespaces) + return + self._start(tag, attrs, new_namespaces) + + def _start(self, tag, attrs, new_namespaces, qname_text=None): + if self._exclude_attrs is not None and attrs: + attrs = {k: v for k, v in attrs.items() if k not in self._exclude_attrs} + + qnames = {tag, *attrs} + resolved_names = {} + + # Resolve prefixes in attribute and tag text. + if qname_text is not None: + qname = resolved_names[qname_text] = self._resolve_prefix_name(qname_text) + qnames.add(qname) + if self._find_qname_aware_attrs is not None and attrs: + qattrs = self._find_qname_aware_attrs(attrs) + if qattrs: + for attr_name in qattrs: + value = attrs[attr_name] + if _looks_like_prefix_name(value): + qname = resolved_names[value] = self._resolve_prefix_name(value) + qnames.add(qname) + else: + qattrs = None + else: + qattrs = None + + # Assign prefixes in lexicographical order of used URIs. + parse_qname = self._qname + parsed_qnames = {n: parse_qname(n) for n in sorted( + qnames, key=lambda n: n.split('}', 1))} + + # Write namespace declarations in prefix order ... + if new_namespaces: + attr_list = [ + ('xmlns:' + prefix if prefix else 'xmlns', uri) + for uri, prefix in new_namespaces + ] + attr_list.sort() + else: + # almost always empty + attr_list = [] + + # ... followed by attributes in URI+name order + if attrs: + for k, v in sorted(attrs.items()): + if qattrs is not None and k in qattrs and v in resolved_names: + v = parsed_qnames[resolved_names[v]][0] + attr_qname, attr_name, uri = parsed_qnames[k] + # No prefix for attributes in default ('') namespace. + attr_list.append((attr_qname if uri else attr_name, v)) + + # Honour xml:space attributes. + space_behaviour = attrs.get('{http://www.w3.org/XML/1998/namespace}space') + self._preserve_space.append( + space_behaviour == 'preserve' if space_behaviour + else self._preserve_space[-1]) + + # Write the tag. + write = self._write + write('<' + parsed_qnames[tag][0]) + if attr_list: + write(''.join([f' {k}="{_escape_attrib_c14n(v)}"' for k, v in attr_list])) + write('>') + + # Write the resolved qname text content. + if qname_text is not None: + write(_escape_cdata_c14n(parsed_qnames[resolved_names[qname_text]][0])) + + self._root_seen = True + self._ns_stack.append([]) + + def end(self, tag): + if self._ignored_depth: + self._ignored_depth -= 1 + return + if self._data: + self._flush() + self._write(f'') + self._preserve_space.pop() + self._root_done = len(self._preserve_space) == 1 + self._declared_ns_stack.pop() + self._ns_stack.pop() + + def comment(self, text): + if not self._with_comments: + return + if self._ignored_depth: + return + if self._root_done: + self._write('\n') + elif self._root_seen and self._data: + self._flush() + self._write(f'') + if not self._root_seen: + self._write('\n') + + def pi(self, target, data): + if self._ignored_depth: + return + if self._root_done: + self._write('\n') + elif self._root_seen and self._data: + self._flush() + self._write( + f'' if data else f'') + if not self._root_seen: + self._write('\n') + + +def _escape_cdata_c14n(text): + # escape character data + try: + # it's worth avoiding do-nothing calls for strings that are + # shorter than 500 character, or so. assume that's, by far, + # the most common case in most applications. + if '&' in text: + text = text.replace('&', '&') + if '<' in text: + text = text.replace('<', '<') + if '>' in text: + text = text.replace('>', '>') + if '\r' in text: + text = text.replace('\r', ' ') + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + + +def _escape_attrib_c14n(text): + # escape attribute value + try: + if '&' in text: + text = text.replace('&', '&') + if '<' in text: + text = text.replace('<', '<') + if '"' in text: + text = text.replace('"', '"') + if '\t' in text: + text = text.replace('\t', ' ') + if '\n' in text: + text = text.replace('\n', ' ') + if '\r' in text: + text = text.replace('\r', ' ') + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + + +# -------------------------------------------------------------------- + +# Import the C accelerators +try: + # Element is going to be shadowed by the C implementation. We need to keep + # the Python version of it accessible for some "creative" by external code + # (see tests) + _Element_Py = Element + + # Element, SubElement, ParseError, TreeBuilder, XMLParser, _set_factories + from _elementtree import * + from _elementtree import _set_factories +except ImportError: + pass +else: + _set_factories(Comment, ProcessingInstruction) diff --git a/Python313_13_x86_Template/Lib/xml/etree/__init__.py b/Python314_4_x86_Template/Lib/xml/etree/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/etree/__init__.py rename to Python314_4_x86_Template/Lib/xml/etree/__init__.py diff --git a/Python313_13_x86_Template/Lib/xml/etree/cElementTree.py b/Python314_4_x86_Template/Lib/xml/etree/cElementTree.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/etree/cElementTree.py rename to Python314_4_x86_Template/Lib/xml/etree/cElementTree.py diff --git a/Python313_13_x86_Template/Lib/xml/parsers/__init__.py b/Python314_4_x86_Template/Lib/xml/parsers/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/parsers/__init__.py rename to Python314_4_x86_Template/Lib/xml/parsers/__init__.py diff --git a/Python314_4_x86_Template/Lib/xml/parsers/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/xml/parsers/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..9b0e120d Binary files /dev/null and b/Python314_4_x86_Template/Lib/xml/parsers/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/xml/parsers/__pycache__/expat.cpython-314.pyc b/Python314_4_x86_Template/Lib/xml/parsers/__pycache__/expat.cpython-314.pyc new file mode 100644 index 00000000..3e1aae26 Binary files /dev/null and b/Python314_4_x86_Template/Lib/xml/parsers/__pycache__/expat.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/xml/parsers/expat.py b/Python314_4_x86_Template/Lib/xml/parsers/expat.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/parsers/expat.py rename to Python314_4_x86_Template/Lib/xml/parsers/expat.py diff --git a/Python314_4_x86_Template/Lib/xml/sax/__init__.py b/Python314_4_x86_Template/Lib/xml/sax/__init__.py new file mode 100644 index 00000000..fe4582c6 --- /dev/null +++ b/Python314_4_x86_Template/Lib/xml/sax/__init__.py @@ -0,0 +1,100 @@ +"""Simple API for XML (SAX) implementation for Python. + +This module provides an implementation of the SAX 2 interface; +information about the Java version of the interface can be found at +http://www.megginson.com/SAX/. The Python version of the interface is +documented at <...>. + +This package contains the following modules: + +handler -- Base classes and constants which define the SAX 2 API for + the 'client-side' of SAX for Python. + +saxutils -- Implementation of the convenience classes commonly used to + work with SAX. + +xmlreader -- Base classes and constants which define the SAX 2 API for + the parsers used with SAX for Python. + +expatreader -- Driver that allows use of the Expat parser with SAX. +""" + +from .xmlreader import InputSource +from .handler import ContentHandler, ErrorHandler +from ._exceptions import (SAXException, SAXNotRecognizedException, + SAXParseException, SAXNotSupportedException, + SAXReaderNotAvailable) + + +def parse(source, handler, errorHandler=ErrorHandler()): + parser = make_parser() + parser.setContentHandler(handler) + parser.setErrorHandler(errorHandler) + parser.parse(source) + +def parseString(string, handler, errorHandler=ErrorHandler()): + import io + if errorHandler is None: + errorHandler = ErrorHandler() + parser = make_parser() + parser.setContentHandler(handler) + parser.setErrorHandler(errorHandler) + + inpsrc = InputSource() + if isinstance(string, str): + inpsrc.setCharacterStream(io.StringIO(string)) + else: + inpsrc.setByteStream(io.BytesIO(string)) + parser.parse(inpsrc) + +# this is the parser list used by the make_parser function if no +# alternatives are given as parameters to the function + +default_parser_list = ["xml.sax.expatreader"] + +# tell modulefinder that importing sax potentially imports expatreader +_false = 0 +if _false: + import xml.sax.expatreader # noqa: F401 + +import os, sys +if not sys.flags.ignore_environment and "PY_SAX_PARSER" in os.environ: + default_parser_list = os.environ["PY_SAX_PARSER"].split(",") +del os, sys + + +def make_parser(parser_list=()): + """Creates and returns a SAX parser. + + Creates the first parser it is able to instantiate of the ones + given in the iterable created by chaining parser_list and + default_parser_list. The iterables must contain the names of Python + modules containing both a SAX parser and a create_parser function.""" + + for parser_name in list(parser_list) + default_parser_list: + try: + return _create_parser(parser_name) + except ImportError: + import sys + if parser_name in sys.modules: + # The parser module was found, but importing it + # failed unexpectedly, pass this exception through + raise + except SAXReaderNotAvailable: + # The parser module detected that it won't work properly, + # so try the next one + pass + + raise SAXReaderNotAvailable("No parsers found", None) + +# --- Internal utility methods used by make_parser + +def _create_parser(parser_name): + drv_module = __import__(parser_name,{},{},['create_parser']) + return drv_module.create_parser() + + +__all__ = ['ContentHandler', 'ErrorHandler', 'InputSource', 'SAXException', + 'SAXNotRecognizedException', 'SAXNotSupportedException', + 'SAXParseException', 'SAXReaderNotAvailable', + 'default_parser_list', 'make_parser', 'parse', 'parseString'] diff --git a/Python313_13_x86_Template/Lib/xml/sax/_exceptions.py b/Python314_4_x86_Template/Lib/xml/sax/_exceptions.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/sax/_exceptions.py rename to Python314_4_x86_Template/Lib/xml/sax/_exceptions.py diff --git a/Python313_13_x86_Template/Lib/xml/sax/expatreader.py b/Python314_4_x86_Template/Lib/xml/sax/expatreader.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/sax/expatreader.py rename to Python314_4_x86_Template/Lib/xml/sax/expatreader.py diff --git a/Python314_4_x86_Template/Lib/xml/sax/handler.py b/Python314_4_x86_Template/Lib/xml/sax/handler.py new file mode 100644 index 00000000..3183c3fe --- /dev/null +++ b/Python314_4_x86_Template/Lib/xml/sax/handler.py @@ -0,0 +1,387 @@ +""" +This module contains the core classes of version 2.0 of SAX for Python. +This file provides only default classes with absolutely minimum +functionality, from which drivers and applications can be subclassed. + +Many of these classes are empty and are included only as documentation +of the interfaces. + +$Id$ +""" + +version = '2.0beta' + +#============================================================================ +# +# HANDLER INTERFACES +# +#============================================================================ + +# ===== ERRORHANDLER ===== + +class ErrorHandler: + """Basic interface for SAX error handlers. + + If you create an object that implements this interface, then + register the object with your XMLReader, the parser will call the + methods in your object to report all warnings and errors. There + are three levels of errors available: warnings, (possibly) + recoverable errors, and unrecoverable errors. All methods take a + SAXParseException as the only parameter.""" + + def error(self, exception): + "Handle a recoverable error." + raise exception + + def fatalError(self, exception): + "Handle a non-recoverable error." + raise exception + + def warning(self, exception): + "Handle a warning." + print(exception) + + +# ===== CONTENTHANDLER ===== + +class ContentHandler: + """Interface for receiving logical document content events. + + This is the main callback interface in SAX, and the one most + important to applications. The order of events in this interface + mirrors the order of the information in the document.""" + + def __init__(self): + self._locator = None + + def setDocumentLocator(self, locator): + """Called by the parser to give the application a locator for + locating the origin of document events. + + SAX parsers are strongly encouraged (though not absolutely + required) to supply a locator: if it does so, it must supply + the locator to the application by invoking this method before + invoking any of the other methods in the DocumentHandler + interface. + + The locator allows the application to determine the end + position of any document-related event, even if the parser is + not reporting an error. Typically, the application will use + this information for reporting its own errors (such as + character content that does not match an application's + business rules). The information returned by the locator is + probably not sufficient for use with a search engine. + + Note that the locator will return correct information only + during the invocation of the events in this interface. The + application should not attempt to use it at any other time.""" + self._locator = locator + + def startDocument(self): + """Receive notification of the beginning of a document. + + The SAX parser will invoke this method only once, before any + other methods in this interface or in DTDHandler (except for + setDocumentLocator).""" + + def endDocument(self): + """Receive notification of the end of a document. + + The SAX parser will invoke this method only once, and it will + be the last method invoked during the parse. The parser shall + not invoke this method until it has either abandoned parsing + (because of an unrecoverable error) or reached the end of + input.""" + + def startPrefixMapping(self, prefix, uri): + """Begin the scope of a prefix-URI Namespace mapping. + + The information from this event is not necessary for normal + Namespace processing: the SAX XML reader will automatically + replace prefixes for element and attribute names when the + http://xml.org/sax/features/namespaces feature is true (the + default). + + There are cases, however, when applications need to use + prefixes in character data or in attribute values, where they + cannot safely be expanded automatically; the + start/endPrefixMapping event supplies the information to the + application to expand prefixes in those contexts itself, if + necessary. + + Note that start/endPrefixMapping events are not guaranteed to + be properly nested relative to each-other: all + startPrefixMapping events will occur before the corresponding + startElement event, and all endPrefixMapping events will occur + after the corresponding endElement event, but their order is + not guaranteed.""" + + def endPrefixMapping(self, prefix): + """End the scope of a prefix-URI mapping. + + See startPrefixMapping for details. This event will always + occur after the corresponding endElement event, but the order + of endPrefixMapping events is not otherwise guaranteed.""" + + def startElement(self, name, attrs): + """Signals the start of an element in non-namespace mode. + + The name parameter contains the raw XML 1.0 name of the + element type as a string and the attrs parameter holds an + instance of the Attributes class containing the attributes of + the element.""" + + def endElement(self, name): + """Signals the end of an element in non-namespace mode. + + The name parameter contains the name of the element type, just + as with the startElement event.""" + + def startElementNS(self, name, qname, attrs): + """Signals the start of an element in namespace mode. + + The name parameter contains the name of the element type as a + (uri, localname) tuple, the qname parameter the raw XML 1.0 + name used in the source document, and the attrs parameter + holds an instance of the Attributes class containing the + attributes of the element. + + The uri part of the name tuple is None for elements which have + no namespace.""" + + def endElementNS(self, name, qname): + """Signals the end of an element in namespace mode. + + The name parameter contains the name of the element type, just + as with the startElementNS event.""" + + def characters(self, content): + """Receive notification of character data. + + The Parser will call this method to report each chunk of + character data. SAX parsers may return all contiguous + character data in a single chunk, or they may split it into + several chunks; however, all of the characters in any single + event must come from the same external entity so that the + Locator provides useful information.""" + + def ignorableWhitespace(self, whitespace): + """Receive notification of ignorable whitespace in element content. + + Validating Parsers must use this method to report each chunk + of ignorable whitespace (see the W3C XML 1.0 recommendation, + section 2.10): non-validating parsers may also use this method + if they are capable of parsing and using content models. + + SAX parsers may return all contiguous whitespace in a single + chunk, or they may split it into several chunks; however, all + of the characters in any single event must come from the same + external entity, so that the Locator provides useful + information.""" + + def processingInstruction(self, target, data): + """Receive notification of a processing instruction. + + The Parser will invoke this method once for each processing + instruction found: note that processing instructions may occur + before or after the main document element. + + A SAX parser should never report an XML declaration (XML 1.0, + section 2.8) or a text declaration (XML 1.0, section 4.3.1) + using this method.""" + + def skippedEntity(self, name): + """Receive notification of a skipped entity. + + The Parser will invoke this method once for each entity + skipped. Non-validating processors may skip entities if they + have not seen the declarations (because, for example, the + entity was declared in an external DTD subset). All processors + may skip external entities, depending on the values of the + http://xml.org/sax/features/external-general-entities and the + http://xml.org/sax/features/external-parameter-entities + properties.""" + + +# ===== DTDHandler ===== + +class DTDHandler: + """Handle DTD events. + + This interface specifies only those DTD events required for basic + parsing (unparsed entities and attributes).""" + + def notationDecl(self, name, publicId, systemId): + "Handle a notation declaration event." + + def unparsedEntityDecl(self, name, publicId, systemId, ndata): + "Handle an unparsed entity declaration event." + + +# ===== ENTITYRESOLVER ===== + +class EntityResolver: + """Basic interface for resolving entities. If you create an object + implementing this interface, then register the object with your + Parser, the parser will call the method in your object to + resolve all external entities. Note that DefaultHandler implements + this interface with the default behaviour.""" + + def resolveEntity(self, publicId, systemId): + """Resolve the system identifier of an entity and return either + the system identifier to read from as a string, or an InputSource + to read from.""" + return systemId + + +#============================================================================ +# +# CORE FEATURES +# +#============================================================================ + +feature_namespaces = "http://xml.org/sax/features/namespaces" +# true: Perform Namespace processing (default). +# false: Optionally do not perform Namespace processing +# (implies namespace-prefixes). +# access: (parsing) read-only; (not parsing) read/write + +feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes" +# true: Report the original prefixed names and attributes used for Namespace +# declarations. +# false: Do not report attributes used for Namespace declarations, and +# optionally do not report original prefixed names (default). +# access: (parsing) read-only; (not parsing) read/write + +feature_string_interning = "http://xml.org/sax/features/string-interning" +# true: All element names, prefixes, attribute names, Namespace URIs, and +# local names are interned using the built-in intern function. +# false: Names are not necessarily interned, although they may be (default). +# access: (parsing) read-only; (not parsing) read/write + +feature_validation = "http://xml.org/sax/features/validation" +# true: Report all validation errors (implies external-general-entities and +# external-parameter-entities). +# false: Do not report validation errors. +# access: (parsing) read-only; (not parsing) read/write + +feature_external_ges = "http://xml.org/sax/features/external-general-entities" +# true: Include all external general (text) entities. +# false: Do not include external general entities. +# access: (parsing) read-only; (not parsing) read/write + +feature_external_pes = "http://xml.org/sax/features/external-parameter-entities" +# true: Include all external parameter entities, including the external +# DTD subset. +# false: Do not include any external parameter entities, even the external +# DTD subset. +# access: (parsing) read-only; (not parsing) read/write + +all_features = [feature_namespaces, + feature_namespace_prefixes, + feature_string_interning, + feature_validation, + feature_external_ges, + feature_external_pes] + + +#============================================================================ +# +# CORE PROPERTIES +# +#============================================================================ + +property_lexical_handler = "http://xml.org/sax/properties/lexical-handler" +# data type: xml.sax.sax2lib.LexicalHandler +# description: An optional extension handler for lexical events like comments. +# access: read/write + +property_declaration_handler = "http://xml.org/sax/properties/declaration-handler" +# data type: xml.sax.sax2lib.DeclHandler +# description: An optional extension handler for DTD-related events other +# than notations and unparsed entities. +# access: read/write + +property_dom_node = "http://xml.org/sax/properties/dom-node" +# data type: org.w3c.dom.Node +# description: When parsing, the current DOM node being visited if this is +# a DOM iterator; when not parsing, the root DOM node for +# iteration. +# access: (parsing) read-only; (not parsing) read/write + +property_xml_string = "http://xml.org/sax/properties/xml-string" +# data type: String +# description: The literal string of characters that was the source for +# the current event. +# access: read-only + +property_encoding = "http://www.python.org/sax/properties/encoding" +# data type: String +# description: The name of the encoding to assume for input data. +# access: write: set the encoding, e.g. established by a higher-level +# protocol. May change during parsing (e.g. after +# processing a META tag) +# read: return the current encoding (possibly established through +# auto-detection. +# initial value: UTF-8 +# + +property_interning_dict = "http://www.python.org/sax/properties/interning-dict" +# data type: Dictionary +# description: The dictionary used to intern common strings in the document +# access: write: Request that the parser uses a specific dictionary, to +# allow interning across different documents +# read: return the current interning dictionary, or None +# + +all_properties = [property_lexical_handler, + property_dom_node, + property_declaration_handler, + property_xml_string, + property_encoding, + property_interning_dict] + + +class LexicalHandler: + """Optional SAX2 handler for lexical events. + + This handler is used to obtain lexical information about an XML + document, that is, information about how the document was encoded + (as opposed to what it contains, which is reported to the + ContentHandler), such as comments and CDATA marked section + boundaries. + + To set the LexicalHandler of an XMLReader, use the setProperty + method with the property identifier + 'http://xml.org/sax/properties/lexical-handler'.""" + + def comment(self, content): + """Reports a comment anywhere in the document (including the + DTD and outside the document element). + + content is a string that holds the contents of the comment.""" + + def startDTD(self, name, public_id, system_id): + """Report the start of the DTD declarations, if the document + has an associated DTD. + + A startEntity event will be reported before declaration events + from the external DTD subset are reported, and this can be + used to infer from which subset DTD declarations derive. + + name is the name of the document element type, public_id the + public identifier of the DTD (or None if none were supplied) + and system_id the system identifier of the external subset (or + None if none were supplied).""" + + def endDTD(self): + """Signals the end of DTD declarations.""" + + def startCDATA(self): + """Reports the beginning of a CDATA marked section. + + The contents of the CDATA marked section will be reported + through the characters event.""" + + def endCDATA(self): + """Reports the end of a CDATA marked section.""" diff --git a/Python313_13_x86_Template/Lib/xml/sax/saxutils.py b/Python314_4_x86_Template/Lib/xml/sax/saxutils.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/sax/saxutils.py rename to Python314_4_x86_Template/Lib/xml/sax/saxutils.py diff --git a/Python313_13_x86_Template/Lib/xml/sax/xmlreader.py b/Python314_4_x86_Template/Lib/xml/sax/xmlreader.py similarity index 100% rename from Python313_13_x86_Template/Lib/xml/sax/xmlreader.py rename to Python314_4_x86_Template/Lib/xml/sax/xmlreader.py diff --git a/Python313_13_x86_Template/Lib/xmlrpc/__init__.py b/Python314_4_x86_Template/Lib/xmlrpc/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/xmlrpc/__init__.py rename to Python314_4_x86_Template/Lib/xmlrpc/__init__.py diff --git a/Python314_4_x86_Template/Lib/xmlrpc/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/xmlrpc/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..a626bac3 Binary files /dev/null and b/Python314_4_x86_Template/Lib/xmlrpc/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/xmlrpc/__pycache__/client.cpython-314.pyc b/Python314_4_x86_Template/Lib/xmlrpc/__pycache__/client.cpython-314.pyc new file mode 100644 index 00000000..0c878edc Binary files /dev/null and b/Python314_4_x86_Template/Lib/xmlrpc/__pycache__/client.cpython-314.pyc differ diff --git a/Python313_13_x86_Template/Lib/xmlrpc/client.py b/Python314_4_x86_Template/Lib/xmlrpc/client.py similarity index 100% rename from Python313_13_x86_Template/Lib/xmlrpc/client.py rename to Python314_4_x86_Template/Lib/xmlrpc/client.py diff --git a/Python314_4_x86_Template/Lib/xmlrpc/server.py b/Python314_4_x86_Template/Lib/xmlrpc/server.py new file mode 100644 index 00000000..3e687115 --- /dev/null +++ b/Python314_4_x86_Template/Lib/xmlrpc/server.py @@ -0,0 +1,1003 @@ +r"""XML-RPC Servers. + +This module can be used to create simple XML-RPC servers +by creating a server and either installing functions, a +class instance, or by extending the SimpleXMLRPCServer +class. + +It can also be used to handle XML-RPC requests in a CGI +environment using CGIXMLRPCRequestHandler. + +The Doc* classes can be used to create XML-RPC servers that +serve pydoc-style documentation in response to HTTP +GET requests. This documentation is dynamically generated +based on the functions and methods registered with the +server. + +A list of possible usage patterns follows: + +1. Install functions: + +server = SimpleXMLRPCServer(("localhost", 8000)) +server.register_function(pow) +server.register_function(lambda x,y: x+y, 'add') +server.serve_forever() + +2. Install an instance: + +class MyFuncs: + def __init__(self): + # make all of the sys functions available through sys.func_name + import sys + self.sys = sys + def _listMethods(self): + # implement this method so that system.listMethods + # knows to advertise the sys methods + return list_public_methods(self) + \ + ['sys.' + method for method in list_public_methods(self.sys)] + def pow(self, x, y): return pow(x, y) + def add(self, x, y) : return x + y + +server = SimpleXMLRPCServer(("localhost", 8000)) +server.register_introspection_functions() +server.register_instance(MyFuncs()) +server.serve_forever() + +3. Install an instance with custom dispatch method: + +class Math: + def _listMethods(self): + # this method must be present for system.listMethods + # to work + return ['add', 'pow'] + def _methodHelp(self, method): + # this method must be present for system.methodHelp + # to work + if method == 'add': + return "add(2,3) => 5" + elif method == 'pow': + return "pow(x, y[, z]) => number" + else: + # By convention, return empty + # string if no help is available + return "" + def _dispatch(self, method, params): + if method == 'pow': + return pow(*params) + elif method == 'add': + return params[0] + params[1] + else: + raise ValueError('bad method') + +server = SimpleXMLRPCServer(("localhost", 8000)) +server.register_introspection_functions() +server.register_instance(Math()) +server.serve_forever() + +4. Subclass SimpleXMLRPCServer: + +class MathServer(SimpleXMLRPCServer): + def _dispatch(self, method, params): + try: + # We are forcing the 'export_' prefix on methods that are + # callable through XML-RPC to prevent potential security + # problems + func = getattr(self, 'export_' + method) + except AttributeError: + raise Exception('method "%s" is not supported' % method) + else: + return func(*params) + + def export_add(self, x, y): + return x + y + +server = MathServer(("localhost", 8000)) +server.serve_forever() + +5. CGI script: + +server = CGIXMLRPCRequestHandler() +server.register_function(pow) +server.handle_request() +""" + +# Written by Brian Quinlan (brian@sweetapp.com). +# Based on code written by Fredrik Lundh. + +from xmlrpc.client import Fault, dumps, loads, gzip_encode, gzip_decode +from http.server import BaseHTTPRequestHandler +from functools import partial +from inspect import signature +import html +import http.server +import socketserver +import sys +import os +import re +import pydoc +import traceback +try: + import fcntl +except ImportError: + fcntl = None + +def resolve_dotted_attribute(obj, attr, allow_dotted_names=True): + """resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d + + Resolves a dotted attribute name to an object. Raises + an AttributeError if any attribute in the chain starts with a '_'. + + If the optional allow_dotted_names argument is false, dots are not + supported and this function operates similar to getattr(obj, attr). + """ + + if allow_dotted_names: + attrs = attr.split('.') + else: + attrs = [attr] + + for i in attrs: + if i.startswith('_'): + raise AttributeError( + 'attempt to access private attribute "%s"' % i + ) + else: + obj = getattr(obj,i) + return obj + +def list_public_methods(obj): + """Returns a list of attribute strings, found in the specified + object, which represent callable attributes""" + + return [member for member in dir(obj) + if not member.startswith('_') and + callable(getattr(obj, member))] + +class SimpleXMLRPCDispatcher: + """Mix-in class that dispatches XML-RPC requests. + + This class is used to register XML-RPC method handlers + and then to dispatch them. This class doesn't need to be + instanced directly when used by SimpleXMLRPCServer but it + can be instanced when used by the MultiPathXMLRPCServer + """ + + def __init__(self, allow_none=False, encoding=None, + use_builtin_types=False): + self.funcs = {} + self.instance = None + self.allow_none = allow_none + self.encoding = encoding or 'utf-8' + self.use_builtin_types = use_builtin_types + + def register_instance(self, instance, allow_dotted_names=False): + """Registers an instance to respond to XML-RPC requests. + + Only one instance can be installed at a time. + + If the registered instance has a _dispatch method then that + method will be called with the name of the XML-RPC method and + its parameters as a tuple + e.g. instance._dispatch('add',(2,3)) + + If the registered instance does not have a _dispatch method + then the instance will be searched to find a matching method + and, if found, will be called. Methods beginning with an '_' + are considered private and will not be called by + SimpleXMLRPCServer. + + If a registered function matches an XML-RPC request, then it + will be called instead of the registered instance. + + If the optional allow_dotted_names argument is true and the + instance does not have a _dispatch method, method names + containing dots are supported and resolved, as long as none of + the name segments start with an '_'. + + *** SECURITY WARNING: *** + + Enabling the allow_dotted_names options allows intruders + to access your module's global variables and may allow + intruders to execute arbitrary code on your machine. Only + use this option on a secure, closed network. + + """ + + self.instance = instance + self.allow_dotted_names = allow_dotted_names + + def register_function(self, function=None, name=None): + """Registers a function to respond to XML-RPC requests. + + The optional name argument can be used to set a Unicode name + for the function. + """ + # decorator factory + if function is None: + return partial(self.register_function, name=name) + + if name is None: + name = function.__name__ + self.funcs[name] = function + + return function + + def register_introspection_functions(self): + """Registers the XML-RPC introspection methods in the system + namespace. + + see http://xmlrpc.usefulinc.com/doc/reserved.html + """ + + self.funcs.update({'system.listMethods' : self.system_listMethods, + 'system.methodSignature' : self.system_methodSignature, + 'system.methodHelp' : self.system_methodHelp}) + + def register_multicall_functions(self): + """Registers the XML-RPC multicall method in the system + namespace. + + see http://www.xmlrpc.com/discuss/msgReader$1208""" + + self.funcs['system.multicall'] = self.system_multicall + + def _marshaled_dispatch(self, data, dispatch_method = None, path = None): + """Dispatches an XML-RPC method from marshalled (XML) data. + + XML-RPC methods are dispatched from the marshalled (XML) data + using the _dispatch method and the result is returned as + marshalled data. For backwards compatibility, a dispatch + function can be provided as an argument (see comment in + SimpleXMLRPCRequestHandler.do_POST) but overriding the + existing method through subclassing is the preferred means + of changing method dispatch behavior. + """ + + try: + params, method = loads(data, use_builtin_types=self.use_builtin_types) + + # generate response + if dispatch_method is not None: + response = dispatch_method(method, params) + else: + response = self._dispatch(method, params) + # wrap response in a singleton tuple + response = (response,) + response = dumps(response, methodresponse=1, + allow_none=self.allow_none, encoding=self.encoding) + except Fault as fault: + response = dumps(fault, allow_none=self.allow_none, + encoding=self.encoding) + except BaseException as exc: + response = dumps( + Fault(1, "%s:%s" % (type(exc), exc)), + encoding=self.encoding, allow_none=self.allow_none, + ) + + return response.encode(self.encoding, 'xmlcharrefreplace') + + def system_listMethods(self): + """system.listMethods() => ['add', 'subtract', 'multiple'] + + Returns a list of the methods supported by the server.""" + + methods = set(self.funcs.keys()) + if self.instance is not None: + # Instance can implement _listMethod to return a list of + # methods + if hasattr(self.instance, '_listMethods'): + methods |= set(self.instance._listMethods()) + # if the instance has a _dispatch method then we + # don't have enough information to provide a list + # of methods + elif not hasattr(self.instance, '_dispatch'): + methods |= set(list_public_methods(self.instance)) + return sorted(methods) + + def system_methodSignature(self, method_name): + """system.methodSignature('add') => [double, int, int] + + Returns a list describing the signature of the method. In the + above example, the add method takes two integers as arguments + and returns a double result. + + This server does NOT support system.methodSignature.""" + + # See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html + + return 'signatures not supported' + + def system_methodHelp(self, method_name): + """system.methodHelp('add') => "Adds two integers together" + + Returns a string containing documentation for the specified method.""" + + method = None + if method_name in self.funcs: + method = self.funcs[method_name] + elif self.instance is not None: + # Instance can implement _methodHelp to return help for a method + if hasattr(self.instance, '_methodHelp'): + return self.instance._methodHelp(method_name) + # if the instance has a _dispatch method then we + # don't have enough information to provide help + elif not hasattr(self.instance, '_dispatch'): + try: + method = resolve_dotted_attribute( + self.instance, + method_name, + self.allow_dotted_names + ) + except AttributeError: + pass + + # Note that we aren't checking that the method actually + # be a callable object of some kind + if method is None: + return "" + else: + return pydoc.getdoc(method) + + def system_multicall(self, call_list): + """system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \ +[[4], ...] + + Allows the caller to package multiple XML-RPC calls into a single + request. + + See http://www.xmlrpc.com/discuss/msgReader$1208 + """ + + results = [] + for call in call_list: + method_name = call['methodName'] + params = call['params'] + + try: + # XXX A marshalling error in any response will fail the entire + # multicall. If someone cares they should fix this. + results.append([self._dispatch(method_name, params)]) + except Fault as fault: + results.append( + {'faultCode' : fault.faultCode, + 'faultString' : fault.faultString} + ) + except BaseException as exc: + results.append( + {'faultCode' : 1, + 'faultString' : "%s:%s" % (type(exc), exc)} + ) + return results + + def _dispatch(self, method, params): + """Dispatches the XML-RPC method. + + XML-RPC calls are forwarded to a registered function that + matches the called XML-RPC method name. If no such function + exists then the call is forwarded to the registered instance, + if available. + + If the registered instance has a _dispatch method then that + method will be called with the name of the XML-RPC method and + its parameters as a tuple + e.g. instance._dispatch('add',(2,3)) + + If the registered instance does not have a _dispatch method + then the instance will be searched to find a matching method + and, if found, will be called. + + Methods beginning with an '_' are considered private and will + not be called. + """ + + try: + # call the matching registered function + func = self.funcs[method] + except KeyError: + pass + else: + if func is not None: + return func(*params) + raise Exception('method "%s" is not supported' % method) + + if self.instance is not None: + if hasattr(self.instance, '_dispatch'): + # call the `_dispatch` method on the instance + return self.instance._dispatch(method, params) + + # call the instance's method directly + try: + func = resolve_dotted_attribute( + self.instance, + method, + self.allow_dotted_names + ) + except AttributeError: + pass + else: + if func is not None: + return func(*params) + + raise Exception('method "%s" is not supported' % method) + +class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler): + """Simple XML-RPC request handler class. + + Handles all HTTP POST requests and attempts to decode them as + XML-RPC requests. + """ + + # Class attribute listing the accessible path components; + # paths not on this list will result in a 404 error. + rpc_paths = ('/', '/RPC2', '/pydoc.css') + + #if not None, encode responses larger than this, if possible + encode_threshold = 1400 #a common MTU + + #Override form StreamRequestHandler: full buffering of output + #and no Nagle. + wbufsize = -1 + disable_nagle_algorithm = True + + # a re to match a gzip Accept-Encoding + aepattern = re.compile(r""" + \s* ([^\s;]+) \s* #content-coding + (;\s* q \s*=\s* ([0-9\.]+))? #q + """, re.VERBOSE | re.IGNORECASE) + + def accept_encodings(self): + r = {} + ae = self.headers.get("Accept-Encoding", "") + for e in ae.split(","): + match = self.aepattern.match(e) + if match: + v = match.group(3) + v = float(v) if v else 1.0 + r[match.group(1)] = v + return r + + def is_rpc_path_valid(self): + if self.rpc_paths: + return self.path in self.rpc_paths + else: + # If .rpc_paths is empty, just assume all paths are legal + return True + + def do_POST(self): + """Handles the HTTP POST request. + + Attempts to interpret all HTTP POST requests as XML-RPC calls, + which are forwarded to the server's _dispatch method for handling. + """ + + # Check that the path is legal + if not self.is_rpc_path_valid(): + self.report_404() + return + + try: + # Get arguments by reading body of request. + # We read this in chunks to avoid straining + # socket.read(); around the 10 or 15Mb mark, some platforms + # begin to have problems (bug #792570). + max_chunk_size = 10*1024*1024 + size_remaining = int(self.headers["content-length"]) + L = [] + while size_remaining: + chunk_size = min(size_remaining, max_chunk_size) + chunk = self.rfile.read(chunk_size) + if not chunk: + break + L.append(chunk) + size_remaining -= len(L[-1]) + data = b''.join(L) + + data = self.decode_request_content(data) + if data is None: + return #response has been sent + + # In previous versions of SimpleXMLRPCServer, _dispatch + # could be overridden in this class, instead of in + # SimpleXMLRPCDispatcher. To maintain backwards compatibility, + # check to see if a subclass implements _dispatch and dispatch + # using that method if present. + response = self.server._marshaled_dispatch( + data, getattr(self, '_dispatch', None), self.path + ) + except Exception as e: # This should only happen if the module is buggy + # internal error, report as HTTP server error + self.send_response(500) + + # Send information about the exception if requested + if hasattr(self.server, '_send_traceback_header') and \ + self.server._send_traceback_header: + self.send_header("X-exception", str(e)) + trace = traceback.format_exc() + trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII') + self.send_header("X-traceback", trace) + + self.send_header("Content-length", "0") + self.end_headers() + else: + self.send_response(200) + self.send_header("Content-type", "text/xml") + if self.encode_threshold is not None: + if len(response) > self.encode_threshold: + q = self.accept_encodings().get("gzip", 0) + if q: + try: + response = gzip_encode(response) + self.send_header("Content-Encoding", "gzip") + except NotImplementedError: + pass + self.send_header("Content-length", str(len(response))) + self.end_headers() + self.wfile.write(response) + + def decode_request_content(self, data): + #support gzip encoding of request + encoding = self.headers.get("content-encoding", "identity").lower() + if encoding == "identity": + return data + if encoding == "gzip": + try: + return gzip_decode(data) + except NotImplementedError: + self.send_response(501, "encoding %r not supported" % encoding) + except ValueError: + self.send_response(400, "error decoding gzip content") + else: + self.send_response(501, "encoding %r not supported" % encoding) + self.send_header("Content-length", "0") + self.end_headers() + + def report_404 (self): + # Report a 404 error + self.send_response(404) + response = b'No such page' + self.send_header("Content-type", "text/plain") + self.send_header("Content-length", str(len(response))) + self.end_headers() + self.wfile.write(response) + + def log_request(self, code='-', size='-'): + """Selectively log an accepted request.""" + + if self.server.logRequests: + BaseHTTPRequestHandler.log_request(self, code, size) + +class SimpleXMLRPCServer(socketserver.TCPServer, + SimpleXMLRPCDispatcher): + """Simple XML-RPC server. + + Simple XML-RPC server that allows functions and a single instance + to be installed to handle requests. The default implementation + attempts to dispatch XML-RPC calls to the functions or instance + installed in the server. Override the _dispatch method inherited + from SimpleXMLRPCDispatcher to change this behavior. + """ + + allow_reuse_address = True + allow_reuse_port = False + + # Warning: this is for debugging purposes only! Never set this to True in + # production code, as will be sending out sensitive information (exception + # and stack trace details) when exceptions are raised inside + # SimpleXMLRPCRequestHandler.do_POST + _send_traceback_header = False + + def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, + logRequests=True, allow_none=False, encoding=None, + bind_and_activate=True, use_builtin_types=False): + self.logRequests = logRequests + + SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types) + socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate) + + +class MultiPathXMLRPCServer(SimpleXMLRPCServer): + """Multipath XML-RPC Server + This specialization of SimpleXMLRPCServer allows the user to create + multiple Dispatcher instances and assign them to different + HTTP request paths. This makes it possible to run two or more + 'virtual XML-RPC servers' at the same port. + Make sure that the requestHandler accepts the paths in question. + """ + def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, + logRequests=True, allow_none=False, encoding=None, + bind_and_activate=True, use_builtin_types=False): + + SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none, + encoding, bind_and_activate, use_builtin_types) + self.dispatchers = {} + self.allow_none = allow_none + self.encoding = encoding or 'utf-8' + + def add_dispatcher(self, path, dispatcher): + self.dispatchers[path] = dispatcher + return dispatcher + + def get_dispatcher(self, path): + return self.dispatchers[path] + + def _marshaled_dispatch(self, data, dispatch_method = None, path = None): + try: + response = self.dispatchers[path]._marshaled_dispatch( + data, dispatch_method, path) + except BaseException as exc: + # report low level exception back to server + # (each dispatcher should have handled their own + # exceptions) + response = dumps( + Fault(1, "%s:%s" % (type(exc), exc)), + encoding=self.encoding, allow_none=self.allow_none) + response = response.encode(self.encoding, 'xmlcharrefreplace') + return response + +class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher): + """Simple handler for XML-RPC data passed through CGI.""" + + def __init__(self, allow_none=False, encoding=None, use_builtin_types=False): + SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types) + + def handle_xmlrpc(self, request_text): + """Handle a single XML-RPC request""" + + response = self._marshaled_dispatch(request_text) + + print('Content-Type: text/xml') + print('Content-Length: %d' % len(response)) + print() + sys.stdout.flush() + sys.stdout.buffer.write(response) + sys.stdout.buffer.flush() + + def handle_get(self): + """Handle a single HTTP GET request. + + Default implementation indicates an error because + XML-RPC uses the POST method. + """ + + code = 400 + message, explain = BaseHTTPRequestHandler.responses[code] + + response = http.server.DEFAULT_ERROR_MESSAGE % \ + { + 'code' : code, + 'message' : message, + 'explain' : explain + } + response = response.encode('utf-8') + print('Status: %d %s' % (code, message)) + print('Content-Type: %s' % http.server.DEFAULT_ERROR_CONTENT_TYPE) + print('Content-Length: %d' % len(response)) + print() + sys.stdout.flush() + sys.stdout.buffer.write(response) + sys.stdout.buffer.flush() + + def handle_request(self, request_text=None): + """Handle a single XML-RPC request passed through a CGI post method. + + If no XML data is given then it is read from stdin. The resulting + XML-RPC response is printed to stdout along with the correct HTTP + headers. + """ + + if request_text is None and \ + os.environ.get('REQUEST_METHOD', None) == 'GET': + self.handle_get() + else: + # POST data is normally available through stdin + try: + length = int(os.environ.get('CONTENT_LENGTH', None)) + except (ValueError, TypeError): + length = -1 + if request_text is None: + request_text = sys.stdin.read(length) + + self.handle_xmlrpc(request_text) + + +# ----------------------------------------------------------------------------- +# Self documenting XML-RPC Server. + +class ServerHTMLDoc(pydoc.HTMLDoc): + """Class used to generate pydoc HTML document for a server""" + + def markup(self, text, escape=None, funcs={}, classes={}, methods={}): + """Mark up some plain text, given a context of symbols to look for. + Each context dictionary maps object names to anchor names.""" + escape = escape or self.escape + results = [] + here = 0 + + # XXX Note that this regular expression does not allow for the + # hyperlinking of arbitrary strings being used as method + # names. Only methods with names consisting of word characters + # and '.'s are hyperlinked. + pattern = re.compile(r'\b((http|https|ftp)://\S+[\w/]|' + r'RFC[- ]?(\d+)|' + r'PEP[- ]?(\d+)|' + r'(self\.)?((?:\w|\.)+))\b') + while match := pattern.search(text, here): + start, end = match.span() + results.append(escape(text[here:start])) + + all, scheme, rfc, pep, selfdot, name = match.groups() + if scheme: + url = escape(all).replace('"', '"') + results.append('%s' % (url, url)) + elif rfc: + url = 'https://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc) + results.append('%s' % (url, escape(all))) + elif pep: + url = 'https://peps.python.org/pep-%04d/' % int(pep) + results.append('%s' % (url, escape(all))) + elif text[end:end+1] == '(': + results.append(self.namelink(name, methods, funcs, classes)) + elif selfdot: + results.append('self.%s' % name) + else: + results.append(self.namelink(name, classes)) + here = end + results.append(escape(text[here:])) + return ''.join(results) + + def docroutine(self, object, name, mod=None, + funcs={}, classes={}, methods={}, cl=None): + """Produce HTML documentation for a function or method object.""" + + anchor = (cl and cl.__name__ or '') + '-' + name + note = '' + + title = '%s' % ( + self.escape(anchor), self.escape(name)) + + if callable(object): + argspec = str(signature(object)) + else: + argspec = '(...)' + + if isinstance(object, tuple): + argspec = object[0] or argspec + docstring = object[1] or "" + else: + docstring = pydoc.getdoc(object) + + decl = title + argspec + (note and self.grey( + '%s' % note)) + + doc = self.markup( + docstring, self.preformat, funcs, classes, methods) + doc = doc and '
%s
' % doc + return '
%s
%s
\n' % (decl, doc) + + def docserver(self, server_name, package_documentation, methods): + """Produce HTML documentation for an XML-RPC server.""" + + fdict = {} + for key, value in methods.items(): + fdict[key] = '#-' + key + fdict[value] = fdict[key] + + server_name = self.escape(server_name) + head = '%s' % server_name + result = self.heading(head) + + doc = self.markup(package_documentation, self.preformat, fdict) + doc = doc and '%s' % doc + result = result + '

%s

\n' % doc + + contents = [] + method_items = sorted(methods.items()) + for key, value in method_items: + contents.append(self.docroutine(value, key, funcs=fdict)) + result = result + self.bigsection( + 'Methods', 'functions', ''.join(contents)) + + return result + + + def page(self, title, contents): + """Format an HTML page.""" + css_path = "/pydoc.css" + css_link = ( + '' % + css_path) + return '''\ + + + + +Python: %s +%s%s''' % (title, css_link, contents) + +class XMLRPCDocGenerator: + """Generates documentation for an XML-RPC server. + + This class is designed as mix-in and should not + be constructed directly. + """ + + def __init__(self): + # setup variables used for HTML documentation + self.server_name = 'XML-RPC Server Documentation' + self.server_documentation = \ + "This server exports the following methods through the XML-RPC "\ + "protocol." + self.server_title = 'XML-RPC Server Documentation' + + def set_server_title(self, server_title): + """Set the HTML title of the generated server documentation""" + + self.server_title = server_title + + def set_server_name(self, server_name): + """Set the name of the generated HTML server documentation""" + + self.server_name = server_name + + def set_server_documentation(self, server_documentation): + """Set the documentation string for the entire server.""" + + self.server_documentation = server_documentation + + def generate_html_documentation(self): + """generate_html_documentation() => html documentation for the server + + Generates HTML documentation for the server using introspection for + installed functions and instances that do not implement the + _dispatch method. Alternatively, instances can choose to implement + the _get_method_argstring(method_name) method to provide the + argument string used in the documentation and the + _methodHelp(method_name) method to provide the help text used + in the documentation.""" + + methods = {} + + for method_name in self.system_listMethods(): + if method_name in self.funcs: + method = self.funcs[method_name] + elif self.instance is not None: + method_info = [None, None] # argspec, documentation + if hasattr(self.instance, '_get_method_argstring'): + method_info[0] = self.instance._get_method_argstring(method_name) + if hasattr(self.instance, '_methodHelp'): + method_info[1] = self.instance._methodHelp(method_name) + + method_info = tuple(method_info) + if method_info != (None, None): + method = method_info + elif not hasattr(self.instance, '_dispatch'): + try: + method = resolve_dotted_attribute( + self.instance, + method_name + ) + except AttributeError: + method = method_info + else: + method = method_info + else: + assert 0, "Could not find method in self.functions and no "\ + "instance installed" + + methods[method_name] = method + + documenter = ServerHTMLDoc() + documentation = documenter.docserver( + self.server_name, + self.server_documentation, + methods + ) + + return documenter.page(html.escape(self.server_title), documentation) + +class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler): + """XML-RPC and documentation request handler class. + + Handles all HTTP POST requests and attempts to decode them as + XML-RPC requests. + + Handles all HTTP GET requests and interprets them as requests + for documentation. + """ + + def _get_css(self, url): + path_here = os.path.dirname(os.path.realpath(__file__)) + css_path = os.path.join(path_here, "..", "pydoc_data", "_pydoc.css") + with open(css_path, mode="rb") as fp: + return fp.read() + + def do_GET(self): + """Handles the HTTP GET request. + + Interpret all HTTP GET requests as requests for server + documentation. + """ + # Check that the path is legal + if not self.is_rpc_path_valid(): + self.report_404() + return + + if self.path.endswith('.css'): + content_type = 'text/css' + response = self._get_css(self.path) + else: + content_type = 'text/html' + response = self.server.generate_html_documentation().encode('utf-8') + + self.send_response(200) + self.send_header('Content-Type', '%s; charset=UTF-8' % content_type) + self.send_header("Content-length", str(len(response))) + self.end_headers() + self.wfile.write(response) + +class DocXMLRPCServer( SimpleXMLRPCServer, + XMLRPCDocGenerator): + """XML-RPC and HTML documentation server. + + Adds the ability to serve server documentation to the capabilities + of SimpleXMLRPCServer. + """ + + def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler, + logRequests=True, allow_none=False, encoding=None, + bind_and_activate=True, use_builtin_types=False): + SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, + allow_none, encoding, bind_and_activate, + use_builtin_types) + XMLRPCDocGenerator.__init__(self) + +class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler, + XMLRPCDocGenerator): + """Handler for XML-RPC data and documentation requests passed through + CGI""" + + def handle_get(self): + """Handles the HTTP GET request. + + Interpret all HTTP GET requests as requests for server + documentation. + """ + + response = self.generate_html_documentation().encode('utf-8') + + print('Content-Type: text/html') + print('Content-Length: %d' % len(response)) + print() + sys.stdout.flush() + sys.stdout.buffer.write(response) + sys.stdout.buffer.flush() + + def __init__(self): + CGIXMLRPCRequestHandler.__init__(self) + XMLRPCDocGenerator.__init__(self) + + +if __name__ == '__main__': + import datetime + + class ExampleService: + def getData(self): + return '42' + + class currentTime: + @staticmethod + def getCurrentTime(): + return datetime.datetime.now() + + with SimpleXMLRPCServer(("localhost", 8000)) as server: + server.register_function(pow) + server.register_function(lambda x,y: x+y, 'add') + server.register_instance(ExampleService(), allow_dotted_names=True) + server.register_multicall_functions() + print('Serving XML-RPC on localhost port 8000') + print('It is advisable to run this example server within a secure, closed network.') + try: + server.serve_forever() + except KeyboardInterrupt: + print("\nKeyboard interrupt received, exiting.") + sys.exit(0) diff --git a/Python314_4_x86_Template/Lib/zipapp.py b/Python314_4_x86_Template/Lib/zipapp.py new file mode 100644 index 00000000..7a4ef96e --- /dev/null +++ b/Python314_4_x86_Template/Lib/zipapp.py @@ -0,0 +1,231 @@ +import contextlib +import os +import pathlib +import shutil +import stat +import sys +import zipfile + +__all__ = ['ZipAppError', 'create_archive', 'get_interpreter'] + + +# The __main__.py used if the users specifies "-m module:fn". +# Note that this will always be written as UTF-8 (module and +# function names can be non-ASCII in Python 3). +# We add a coding cookie even though UTF-8 is the default in Python 3 +# because the resulting archive may be intended to be run under Python 2. +MAIN_TEMPLATE = """\ +# -*- coding: utf-8 -*- +import {module} +{module}.{fn}() +""" + + +# The Windows launcher defaults to UTF-8 when parsing shebang lines if the +# file has no BOM. So use UTF-8 on Windows. +# On Unix, use the filesystem encoding. +if sys.platform.startswith('win'): + shebang_encoding = 'utf-8' +else: + shebang_encoding = sys.getfilesystemencoding() + + +class ZipAppError(ValueError): + pass + + +@contextlib.contextmanager +def _maybe_open(archive, mode): + if isinstance(archive, (str, os.PathLike)): + with open(archive, mode) as f: + yield f + else: + yield archive + + +def _write_file_prefix(f, interpreter): + """Write a shebang line.""" + if interpreter: + shebang = b'#!' + interpreter.encode(shebang_encoding) + b'\n' + f.write(shebang) + + +def _copy_archive(archive, new_archive, interpreter=None): + """Copy an application archive, modifying the shebang line.""" + with _maybe_open(archive, 'rb') as src: + # Skip the shebang line from the source. + # Read 2 bytes of the source and check if they are #!. + first_2 = src.read(2) + if first_2 == b'#!': + # Discard the initial 2 bytes and the rest of the shebang line. + first_2 = b'' + src.readline() + + with _maybe_open(new_archive, 'wb') as dst: + _write_file_prefix(dst, interpreter) + # If there was no shebang, "first_2" contains the first 2 bytes + # of the source file, so write them before copying the rest + # of the file. + dst.write(first_2) + shutil.copyfileobj(src, dst) + + if interpreter and isinstance(new_archive, str): + os.chmod(new_archive, os.stat(new_archive).st_mode | stat.S_IEXEC) + + +def create_archive(source, target=None, interpreter=None, main=None, + filter=None, compressed=False): + """Create an application archive from SOURCE. + + The SOURCE can be the name of a directory, or a filename or a file-like + object referring to an existing archive. + + The content of SOURCE is packed into an application archive in TARGET, + which can be a filename or a file-like object. If SOURCE is a directory, + TARGET can be omitted and will default to the name of SOURCE with .pyz + appended. + + The created application archive will have a shebang line specifying + that it should run with INTERPRETER (there will be no shebang line if + INTERPRETER is None), and a __main__.py which runs MAIN (if MAIN is + not specified, an existing __main__.py will be used). It is an error + to specify MAIN for anything other than a directory source with no + __main__.py, and it is an error to omit MAIN if the directory has no + __main__.py. + """ + # Are we copying an existing archive? + source_is_file = False + if hasattr(source, 'read') and hasattr(source, 'readline'): + source_is_file = True + else: + source = pathlib.Path(source) + if source.is_file(): + source_is_file = True + + if source_is_file: + _copy_archive(source, target, interpreter) + return + + # We are creating a new archive from a directory. + if not source.exists(): + raise ZipAppError("Source does not exist") + has_main = (source / '__main__.py').is_file() + if main and has_main: + raise ZipAppError( + "Cannot specify entry point if the source has __main__.py") + if not (main or has_main): + raise ZipAppError("Archive has no entry point") + + main_py = None + if main: + # Check that main has the right format. + mod, sep, fn = main.partition(':') + mod_ok = all(part.isidentifier() for part in mod.split('.')) + fn_ok = all(part.isidentifier() for part in fn.split('.')) + if not (sep == ':' and mod_ok and fn_ok): + raise ZipAppError("Invalid entry point: " + main) + main_py = MAIN_TEMPLATE.format(module=mod, fn=fn) + + if target is None: + target = source.with_suffix('.pyz') + elif not hasattr(target, 'write'): + target = pathlib.Path(target) + + # Create the list of files to add to the archive now, in case + # the target is being created in the source directory - we + # don't want the target being added to itself + files_to_add = {} + for path in sorted(source.rglob('*')): + relative_path = path.relative_to(source) + if filter is None or filter(relative_path): + files_to_add[path] = relative_path + + # The target cannot be in the list of files to add. If it were, we'd + # end up overwriting the source file and writing the archive into + # itself, which is an error. We therefore check for that case and + # provide a helpful message for the user. + + # Note that we only do a simple path equality check. This won't + # catch every case, but it will catch the common case where the + # source is the CWD and the target is a file in the CWD. More + # thorough checks don't provide enough value to justify the extra + # cost. + + # If target is a file-like object, it will simply fail to compare + # equal to any of the entries in files_to_add, so there's no need + # to add a special check for that. + if target in files_to_add: + raise ZipAppError( + f"The target archive {target} overwrites one of the source files.") + + with _maybe_open(target, 'wb') as fd: + _write_file_prefix(fd, interpreter) + compression = (zipfile.ZIP_DEFLATED if compressed else + zipfile.ZIP_STORED) + with zipfile.ZipFile(fd, 'w', compression=compression) as z: + for path, relative_path in files_to_add.items(): + z.write(path, relative_path.as_posix()) + if main_py: + z.writestr('__main__.py', main_py.encode('utf-8')) + + if interpreter and not hasattr(target, 'write'): + target.chmod(target.stat().st_mode | stat.S_IEXEC) + + +def get_interpreter(archive): + with _maybe_open(archive, 'rb') as f: + if f.read(2) == b'#!': + return f.readline().strip().decode(shebang_encoding) + + +def main(args=None): + """Run the zipapp command line interface. + + The ARGS parameter lets you specify the argument list directly. + Omitting ARGS (or setting it to None) works as for argparse, using + sys.argv[1:] as the argument list. + """ + import argparse + + parser = argparse.ArgumentParser(color=True) + parser.add_argument('--output', '-o', default=None, + help="The name of the output archive. " + "Required if SOURCE is an archive.") + parser.add_argument('--python', '-p', default=None, + help="The name of the Python interpreter to use " + "(default: no shebang line).") + parser.add_argument('--main', '-m', default=None, + help="The main function of the application " + "(default: use an existing __main__.py).") + parser.add_argument('--compress', '-c', action='store_true', + help="Compress files with the deflate method. " + "Files are stored uncompressed by default.") + parser.add_argument('--info', default=False, action='store_true', + help="Display the interpreter from the archive.") + parser.add_argument('source', + help="Source directory (or existing archive).") + + args = parser.parse_args(args) + + # Handle `python -m zipapp archive.pyz --info`. + if args.info: + if not os.path.isfile(args.source): + raise SystemExit("Can only get info for an archive file") + interpreter = get_interpreter(args.source) + print("Interpreter: {}".format(interpreter or "")) + sys.exit(0) + + if os.path.isfile(args.source): + if args.output is None or (os.path.exists(args.output) and + os.path.samefile(args.source, args.output)): + raise SystemExit("In-place editing of archives is not supported") + if args.main: + raise SystemExit("Cannot change the main function when copying") + + create_archive(args.source, args.output, + interpreter=args.python, main=args.main, + compressed=args.compress) + + +if __name__ == '__main__': + main() diff --git a/Python314_4_x86_Template/Lib/zipfile/__init__.py b/Python314_4_x86_Template/Lib/zipfile/__init__.py new file mode 100644 index 00000000..19aea290 --- /dev/null +++ b/Python314_4_x86_Template/Lib/zipfile/__init__.py @@ -0,0 +1,2435 @@ +""" +Read and write ZIP files. + +XXX references to utf-8 need further investigation. +""" +import binascii +import importlib.util +import io +import os +import shutil +import stat +import struct +import sys +import threading +import time + +try: + import zlib # We may need its compression method + crc32 = zlib.crc32 +except ImportError: + zlib = None + crc32 = binascii.crc32 + +try: + import bz2 # We may need its compression method +except ImportError: + bz2 = None + +try: + import lzma # We may need its compression method +except ImportError: + lzma = None + +try: + from compression import zstd # We may need its compression method +except ImportError: + zstd = None + +__all__ = ["BadZipFile", "BadZipfile", "error", + "ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA", + "ZIP_ZSTANDARD", "is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", + "LargeZipFile", "Path"] + +class BadZipFile(Exception): + pass + + +class LargeZipFile(Exception): + """ + Raised when writing a zipfile, the zipfile requires ZIP64 extensions + and those extensions are disabled. + """ + +error = BadZipfile = BadZipFile # Pre-3.2 compatibility names + + +ZIP64_LIMIT = (1 << 31) - 1 +ZIP_FILECOUNT_LIMIT = (1 << 16) - 1 +ZIP_MAX_COMMENT = (1 << 16) - 1 + +# constants for Zip file compression methods +ZIP_STORED = 0 +ZIP_DEFLATED = 8 +ZIP_BZIP2 = 12 +ZIP_LZMA = 14 +ZIP_ZSTANDARD = 93 +# Other ZIP compression methods not supported + +DEFAULT_VERSION = 20 +ZIP64_VERSION = 45 +BZIP2_VERSION = 46 +LZMA_VERSION = 63 +ZSTANDARD_VERSION = 63 +# we recognize (but not necessarily support) all features up to that version +MAX_EXTRACT_VERSION = 63 + +# Below are some formats and associated data for reading/writing headers using +# the struct module. The names and structures of headers/records are those used +# in the PKWARE description of the ZIP file format: +# http://www.pkware.com/documents/casestudies/APPNOTE.TXT +# (URL valid as of January 2008) + +# The "end of central directory" structure, magic number, size, and indices +# (section V.I in the format document) +structEndArchive = b"<4s4H2LH" +stringEndArchive = b"PK\005\006" +sizeEndCentDir = struct.calcsize(structEndArchive) + +_ECD_SIGNATURE = 0 +_ECD_DISK_NUMBER = 1 +_ECD_DISK_START = 2 +_ECD_ENTRIES_THIS_DISK = 3 +_ECD_ENTRIES_TOTAL = 4 +_ECD_SIZE = 5 +_ECD_OFFSET = 6 +_ECD_COMMENT_SIZE = 7 +# These last two indices are not part of the structure as defined in the +# spec, but they are used internally by this module as a convenience +_ECD_COMMENT = 8 +_ECD_LOCATION = 9 + +# The "central directory" structure, magic number, size, and indices +# of entries in the structure (section V.F in the format document) +structCentralDir = "<4s4B4HL2L5H2L" +stringCentralDir = b"PK\001\002" +sizeCentralDir = struct.calcsize(structCentralDir) + +# indexes of entries in the central directory structure +_CD_SIGNATURE = 0 +_CD_CREATE_VERSION = 1 +_CD_CREATE_SYSTEM = 2 +_CD_EXTRACT_VERSION = 3 +_CD_EXTRACT_SYSTEM = 4 +_CD_FLAG_BITS = 5 +_CD_COMPRESS_TYPE = 6 +_CD_TIME = 7 +_CD_DATE = 8 +_CD_CRC = 9 +_CD_COMPRESSED_SIZE = 10 +_CD_UNCOMPRESSED_SIZE = 11 +_CD_FILENAME_LENGTH = 12 +_CD_EXTRA_FIELD_LENGTH = 13 +_CD_COMMENT_LENGTH = 14 +_CD_DISK_NUMBER_START = 15 +_CD_INTERNAL_FILE_ATTRIBUTES = 16 +_CD_EXTERNAL_FILE_ATTRIBUTES = 17 +_CD_LOCAL_HEADER_OFFSET = 18 + +# General purpose bit flags +# Zip Appnote: 4.4.4 general purpose bit flag: (2 bytes) +_MASK_ENCRYPTED = 1 << 0 +# Bits 1 and 2 have different meanings depending on the compression used. +_MASK_COMPRESS_OPTION_1 = 1 << 1 +# _MASK_COMPRESS_OPTION_2 = 1 << 2 +# _MASK_USE_DATA_DESCRIPTOR: If set, crc-32, compressed size and uncompressed +# size are zero in the local header and the real values are written in the data +# descriptor immediately following the compressed data. +_MASK_USE_DATA_DESCRIPTOR = 1 << 3 +# Bit 4: Reserved for use with compression method 8, for enhanced deflating. +# _MASK_RESERVED_BIT_4 = 1 << 4 +_MASK_COMPRESSED_PATCH = 1 << 5 +_MASK_STRONG_ENCRYPTION = 1 << 6 +# _MASK_UNUSED_BIT_7 = 1 << 7 +# _MASK_UNUSED_BIT_8 = 1 << 8 +# _MASK_UNUSED_BIT_9 = 1 << 9 +# _MASK_UNUSED_BIT_10 = 1 << 10 +_MASK_UTF_FILENAME = 1 << 11 +# Bit 12: Reserved by PKWARE for enhanced compression. +# _MASK_RESERVED_BIT_12 = 1 << 12 +# _MASK_ENCRYPTED_CENTRAL_DIR = 1 << 13 +# Bit 14, 15: Reserved by PKWARE +# _MASK_RESERVED_BIT_14 = 1 << 14 +# _MASK_RESERVED_BIT_15 = 1 << 15 + +# The "local file header" structure, magic number, size, and indices +# (section V.A in the format document) +structFileHeader = "<4s2B4HL2L2H" +stringFileHeader = b"PK\003\004" +sizeFileHeader = struct.calcsize(structFileHeader) + +_FH_SIGNATURE = 0 +_FH_EXTRACT_VERSION = 1 +_FH_EXTRACT_SYSTEM = 2 +_FH_GENERAL_PURPOSE_FLAG_BITS = 3 +_FH_COMPRESSION_METHOD = 4 +_FH_LAST_MOD_TIME = 5 +_FH_LAST_MOD_DATE = 6 +_FH_CRC = 7 +_FH_COMPRESSED_SIZE = 8 +_FH_UNCOMPRESSED_SIZE = 9 +_FH_FILENAME_LENGTH = 10 +_FH_EXTRA_FIELD_LENGTH = 11 + +# The "Zip64 end of central directory locator" structure, magic number, and size +structEndArchive64Locator = "<4sLQL" +stringEndArchive64Locator = b"PK\x06\x07" +sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator) + +# The "Zip64 end of central directory" record, magic number, size, and indices +# (section V.G in the format document) +structEndArchive64 = "<4sQ2H2L4Q" +stringEndArchive64 = b"PK\x06\x06" +sizeEndCentDir64 = struct.calcsize(structEndArchive64) + +_CD64_SIGNATURE = 0 +_CD64_DIRECTORY_RECSIZE = 1 +_CD64_CREATE_VERSION = 2 +_CD64_EXTRACT_VERSION = 3 +_CD64_DISK_NUMBER = 4 +_CD64_DISK_NUMBER_START = 5 +_CD64_NUMBER_ENTRIES_THIS_DISK = 6 +_CD64_NUMBER_ENTRIES_TOTAL = 7 +_CD64_DIRECTORY_SIZE = 8 +_CD64_OFFSET_START_CENTDIR = 9 + +_DD_SIGNATURE = 0x08074b50 + + +class _Extra(bytes): + FIELD_STRUCT = struct.Struct('= sizeCentralDir: + data = fp.read(sizeCentralDir) # CD is where we expect it to be + if len(data) == sizeCentralDir: + centdir = struct.unpack(structCentralDir, data) # CD is the right size + if centdir[_CD_SIGNATURE] == stringCentralDir: + return True # First central directory entry has correct magic number + except OSError: + pass + return False + +def is_zipfile(filename): + """Quickly see if a file is a ZIP file by checking the magic number. + + The filename argument may be a file or file-like object too. + """ + result = False + try: + if hasattr(filename, "read"): + pos = filename.tell() + result = _check_zipfile(fp=filename) + filename.seek(pos) + else: + with open(filename, "rb") as fp: + result = _check_zipfile(fp) + except (OSError, BadZipFile): + pass + return result + +def _handle_prepended_data(endrec, debug=0): + size_cd = endrec[_ECD_SIZE] # bytes in central directory + offset_cd = endrec[_ECD_OFFSET] # offset of central directory + + # "concat" is zero, unless zip was concatenated to another file + concat = endrec[_ECD_LOCATION] - size_cd - offset_cd + + if debug > 2: + inferred = concat + offset_cd + print("given, inferred, offset", offset_cd, inferred, concat) + + return offset_cd, concat + +def _EndRecData64(fpin, offset, endrec): + """ + Read the ZIP64 end-of-archive records and use that to update endrec + """ + offset -= sizeEndCentDir64Locator + if offset < 0: + # The file is not large enough to contain a ZIP64 + # end-of-archive record, so just return the end record we were given. + return endrec + fpin.seek(offset) + data = fpin.read(sizeEndCentDir64Locator) + if len(data) != sizeEndCentDir64Locator: + raise OSError("Unknown I/O error") + sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data) + if sig != stringEndArchive64Locator: + return endrec + + if diskno != 0 or disks > 1: + raise BadZipFile("zipfiles that span multiple disks are not supported") + + offset -= sizeEndCentDir64 + if reloff > offset: + raise BadZipFile("Corrupt zip64 end of central directory locator") + # First, check the assumption that there is no prepended data. + fpin.seek(reloff) + extrasz = offset - reloff + data = fpin.read(sizeEndCentDir64) + if len(data) != sizeEndCentDir64: + raise OSError("Unknown I/O error") + if not data.startswith(stringEndArchive64) and reloff != offset: + # Since we already have seen the Zip64 EOCD Locator, it's + # possible we got here because there is prepended data. + # Assume no 'zip64 extensible data' + fpin.seek(offset) + extrasz = 0 + data = fpin.read(sizeEndCentDir64) + if len(data) != sizeEndCentDir64: + raise OSError("Unknown I/O error") + if not data.startswith(stringEndArchive64): + raise BadZipFile("Zip64 end of central directory record not found") + + sig, sz, create_version, read_version, disk_num, disk_dir, \ + dircount, dircount2, dirsize, diroffset = \ + struct.unpack(structEndArchive64, data) + if (diroffset + dirsize != reloff or + sz + 12 != sizeEndCentDir64 + extrasz): + raise BadZipFile("Corrupt zip64 end of central directory record") + + # Update the original endrec using data from the ZIP64 record + endrec[_ECD_SIGNATURE] = sig + endrec[_ECD_DISK_NUMBER] = disk_num + endrec[_ECD_DISK_START] = disk_dir + endrec[_ECD_ENTRIES_THIS_DISK] = dircount + endrec[_ECD_ENTRIES_TOTAL] = dircount2 + endrec[_ECD_SIZE] = dirsize + endrec[_ECD_OFFSET] = diroffset + endrec[_ECD_LOCATION] = offset - extrasz + return endrec + + +def _EndRecData(fpin): + """Return data from the "End of Central Directory" record, or None. + + The data is a list of the nine items in the ZIP "End of central dir" + record followed by a tenth item, the file seek offset of this record.""" + + # Determine file size + fpin.seek(0, 2) + filesize = fpin.tell() + + # Check to see if this is ZIP file with no archive comment (the + # "end of central directory" structure should be the last item in the + # file if this is the case). + try: + fpin.seek(-sizeEndCentDir, 2) + except OSError: + return None + data = fpin.read(sizeEndCentDir) + if (len(data) == sizeEndCentDir and + data[0:4] == stringEndArchive and + data[-2:] == b"\000\000"): + # the signature is correct and there's no comment, unpack structure + endrec = struct.unpack(structEndArchive, data) + endrec=list(endrec) + + # Append a blank comment and record start offset + endrec.append(b"") + endrec.append(filesize - sizeEndCentDir) + + # Try to read the "Zip64 end of central directory" structure + return _EndRecData64(fpin, filesize - sizeEndCentDir, endrec) + + # Either this is not a ZIP file, or it is a ZIP file with an archive + # comment. Search the end of the file for the "end of central directory" + # record signature. The comment is the last item in the ZIP file and may be + # up to 64K long. It is assumed that the "end of central directory" magic + # number does not appear in the comment. + maxCommentStart = max(filesize - ZIP_MAX_COMMENT - sizeEndCentDir, 0) + fpin.seek(maxCommentStart, 0) + data = fpin.read(ZIP_MAX_COMMENT + sizeEndCentDir) + start = data.rfind(stringEndArchive) + if start >= 0: + # found the magic number; attempt to unpack and interpret + recData = data[start:start+sizeEndCentDir] + if len(recData) != sizeEndCentDir: + # Zip file is corrupted. + return None + endrec = list(struct.unpack(structEndArchive, recData)) + commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file + comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize] + endrec.append(comment) + endrec.append(maxCommentStart + start) + + # Try to read the "Zip64 end of central directory" structure + return _EndRecData64(fpin, maxCommentStart + start, endrec) + + # Unable to find a valid end of central directory structure + return None + +def _sanitize_filename(filename): + """Terminate the file name at the first null byte and + ensure paths always use forward slashes as the directory separator.""" + + # Terminate the file name at the first null byte. Null bytes in file + # names are used as tricks by viruses in archives. + null_byte = filename.find(chr(0)) + if null_byte >= 0: + filename = filename[0:null_byte] + # This is used to ensure paths in generated ZIP files always use + # forward slashes as the directory separator, as required by the + # ZIP format specification. + if os.sep != "/" and os.sep in filename: + filename = filename.replace(os.sep, "/") + if os.altsep and os.altsep != "/" and os.altsep in filename: + filename = filename.replace(os.altsep, "/") + return filename + + +class ZipInfo: + """Class with attributes describing each file in the ZIP archive.""" + + __slots__ = ( + 'orig_filename', + 'filename', + 'date_time', + 'compress_type', + 'compress_level', + 'comment', + 'extra', + 'create_system', + 'create_version', + 'extract_version', + 'reserved', + 'flag_bits', + 'volume', + 'internal_attr', + 'external_attr', + 'header_offset', + 'CRC', + 'compress_size', + 'file_size', + '_raw_time', + '_end_offset', + ) + + def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)): + self.orig_filename = filename # Original file name in archive + + # Terminate the file name at the first null byte and + # ensure paths always use forward slashes as the directory separator. + filename = _sanitize_filename(filename) + + self.filename = filename # Normalized file name + self.date_time = date_time # year, month, day, hour, min, sec + + if date_time[0] < 1980: + raise ValueError('ZIP does not support timestamps before 1980') + + # Standard values: + self.compress_type = ZIP_STORED # Type of compression for the file + self.compress_level = None # Level for the compressor + self.comment = b"" # Comment for each file + self.extra = b"" # ZIP extra data + if sys.platform == 'win32': + self.create_system = 0 # System which created ZIP archive + else: + # Assume everything else is unix-y + self.create_system = 3 # System which created ZIP archive + self.create_version = DEFAULT_VERSION # Version which created ZIP archive + self.extract_version = DEFAULT_VERSION # Version needed to extract archive + self.reserved = 0 # Must be zero + self.flag_bits = 0 # ZIP flag bits + self.volume = 0 # Volume number of file header + self.internal_attr = 0 # Internal attributes + self.external_attr = 0 # External file attributes + self.compress_size = 0 # Size of the compressed file + self.file_size = 0 # Size of the uncompressed file + self._end_offset = None # Start of the next local header or central directory + # Other attributes are set by class ZipFile: + # header_offset Byte offset to the file header + # CRC CRC-32 of the uncompressed file + + # Maintain backward compatibility with the old protected attribute name. + @property + def _compresslevel(self): + return self.compress_level + + @_compresslevel.setter + def _compresslevel(self, value): + self.compress_level = value + + def __repr__(self): + result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)] + if self.compress_type != ZIP_STORED: + result.append(' compress_type=%s' % + compressor_names.get(self.compress_type, + self.compress_type)) + hi = self.external_attr >> 16 + lo = self.external_attr & 0xFFFF + if hi: + result.append(' filemode=%r' % stat.filemode(hi)) + if lo: + result.append(' external_attr=%#x' % lo) + isdir = self.is_dir() + if not isdir or self.file_size: + result.append(' file_size=%r' % self.file_size) + if ((not isdir or self.compress_size) and + (self.compress_type != ZIP_STORED or + self.file_size != self.compress_size)): + result.append(' compress_size=%r' % self.compress_size) + result.append('>') + return ''.join(result) + + def FileHeader(self, zip64=None): + """Return the per-file header as a bytes object. + + When the optional zip64 arg is None rather than a bool, we will + decide based upon the file_size and compress_size, if known, + False otherwise. + """ + dt = self.date_time + dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] + dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) + if self.flag_bits & _MASK_USE_DATA_DESCRIPTOR: + # Set these to zero because we write them after the file data + CRC = compress_size = file_size = 0 + else: + CRC = self.CRC + compress_size = self.compress_size + file_size = self.file_size + + extra = self.extra + + min_version = 0 + if zip64 is None: + # We always explicitly pass zip64 within this module.... This + # remains for anyone using ZipInfo.FileHeader as a public API. + zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT + if zip64: + fmt = '= 4: + tp, ln = unpack(' len(extra): + raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln)) + if tp == 0x0001: + data = extra[4:ln+4] + # ZIP64 extension (large files and/or large archives) + try: + if self.file_size in (0xFFFF_FFFF_FFFF_FFFF, 0xFFFF_FFFF): + field = "File size" + self.file_size, = unpack(' 2107: + date_time = (2107, 12, 31, 23, 59, 59) + # Create ZipInfo instance to store file information + if arcname is None: + arcname = filename + arcname = os.path.normpath(os.path.splitdrive(arcname)[1]) + while arcname[0] in (os.sep, os.altsep): + arcname = arcname[1:] + if isdir: + arcname += '/' + zinfo = cls(arcname, date_time) + zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes + if isdir: + zinfo.file_size = 0 + zinfo.external_attr |= 0x10 # MS-DOS directory flag + else: + zinfo.file_size = st.st_size + + return zinfo + + def _for_archive(self, archive): + """Resolve suitable defaults from the archive. + + Resolve the date_time, compression attributes, and external attributes + to suitable defaults as used by :method:`ZipFile.writestr`. + + Return self. + """ + # gh-91279: Set the SOURCE_DATE_EPOCH to a specific timestamp + epoch = os.environ.get('SOURCE_DATE_EPOCH') + get_time = int(epoch) if epoch else time.time() + self.date_time = time.localtime(get_time)[:6] + + self.compress_type = archive.compression + self.compress_level = archive.compresslevel + if self.filename.endswith('/'): # pragma: no cover + self.external_attr = 0o40775 << 16 # drwxrwxr-x + self.external_attr |= 0x10 # MS-DOS directory flag + else: + self.external_attr = 0o600 << 16 # ?rw------- + return self + + def is_dir(self): + """Return True if this archive member is a directory.""" + if self.filename.endswith('/'): + return True + # The ZIP format specification requires to use forward slashes + # as the directory separator, but in practice some ZIP files + # created on Windows can use backward slashes. For compatibility + # with the extraction code which already handles this: + if os.path.altsep: + return self.filename.endswith((os.path.sep, os.path.altsep)) + return False + + +# ZIP encryption uses the CRC32 one-byte primitive for scrambling some +# internal keys. We noticed that a direct implementation is faster than +# relying on binascii.crc32(). + +_crctable = None +def _gen_crc(crc): + for j in range(8): + if crc & 1: + crc = (crc >> 1) ^ 0xEDB88320 + else: + crc >>= 1 + return crc + +# ZIP supports a password-based form of encryption. Even though known +# plaintext attacks have been found against it, it is still useful +# to be able to get data out of such a file. +# +# Usage: +# zd = _ZipDecrypter(mypwd) +# plain_bytes = zd(cypher_bytes) + +def _ZipDecrypter(pwd): + key0 = 305419896 + key1 = 591751049 + key2 = 878082192 + + global _crctable + if _crctable is None: + _crctable = list(map(_gen_crc, range(256))) + crctable = _crctable + + def crc32(ch, crc): + """Compute the CRC32 primitive on one byte.""" + return (crc >> 8) ^ crctable[(crc ^ ch) & 0xFF] + + def update_keys(c): + nonlocal key0, key1, key2 + key0 = crc32(c, key0) + key1 = (key1 + (key0 & 0xFF)) & 0xFFFFFFFF + key1 = (key1 * 134775813 + 1) & 0xFFFFFFFF + key2 = crc32(key1 >> 24, key2) + + for p in pwd: + update_keys(p) + + def decrypter(data): + """Decrypt a bytes object.""" + result = bytearray() + append = result.append + for c in data: + k = key2 | 2 + c ^= ((k * (k^1)) >> 8) & 0xFF + update_keys(c) + append(c) + return bytes(result) + + return decrypter + + +class LZMACompressor: + + def __init__(self): + self._comp = None + + def _init(self): + props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1}) + self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[ + lzma._decode_filter_properties(lzma.FILTER_LZMA1, props) + ]) + return struct.pack('> 8) & 0xff + else: + # compare against the CRC otherwise + check_byte = (zipinfo.CRC >> 24) & 0xff + h = self._init_decrypter() + if h != check_byte: + raise RuntimeError("Bad password for file %r" % zipinfo.orig_filename) + + + def _init_decrypter(self): + self._decrypter = _ZipDecrypter(self._pwd) + # The first 12 bytes in the cypher stream is an encryption header + # used to strengthen the algorithm. The first 11 bytes are + # completely random, while the 12th contains the MSB of the CRC, + # or the MSB of the file time depending on the header type + # and is used to check the correctness of the password. + header = self._fileobj.read(12) + self._compress_left -= 12 + return self._decrypter(header)[11] + + def __repr__(self): + result = ['<%s.%s' % (self.__class__.__module__, + self.__class__.__qualname__)] + if not self.closed: + result.append(' name=%r' % (self.name,)) + if self._compress_type != ZIP_STORED: + result.append(' compress_type=%s' % + compressor_names.get(self._compress_type, + self._compress_type)) + else: + result.append(' [closed]') + result.append('>') + return ''.join(result) + + def readline(self, limit=-1): + """Read and return a line from the stream. + + If limit is specified, at most limit bytes will be read. + """ + + if limit < 0: + # Shortcut common case - newline found in buffer. + i = self._readbuffer.find(b'\n', self._offset) + 1 + if i > 0: + line = self._readbuffer[self._offset: i] + self._offset = i + return line + + return io.BufferedIOBase.readline(self, limit) + + def peek(self, n=1): + """Returns buffered bytes without advancing the position.""" + if n > len(self._readbuffer) - self._offset: + chunk = self.read(n) + if len(chunk) > self._offset: + self._readbuffer = chunk + self._readbuffer[self._offset:] + self._offset = 0 + else: + self._offset -= len(chunk) + + # Return up to 512 bytes to reduce allocation overhead for tight loops. + return self._readbuffer[self._offset: self._offset + 512] + + def readable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") + return True + + def read(self, n=-1): + """Read and return up to n bytes. + If the argument is omitted, None, or negative, data is read and returned until EOF is reached. + """ + if self.closed: + raise ValueError("read from closed file.") + if n is None or n < 0: + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + while not self._eof: + buf += self._read1(self.MAX_N) + return buf + + end = n + self._offset + if end < len(self._readbuffer): + buf = self._readbuffer[self._offset:end] + self._offset = end + return buf + + n = end - len(self._readbuffer) + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + while n > 0 and not self._eof: + data = self._read1(n) + if n < len(data): + self._readbuffer = data + self._offset = n + buf += data[:n] + break + buf += data + n -= len(data) + return buf + + def _update_crc(self, newdata): + # Update the CRC using the given data. + if self._expected_crc is None: + # No need to compute the CRC if we don't have a reference value + return + self._running_crc = crc32(newdata, self._running_crc) + # Check the CRC if we're at the end of the file + if self._eof and self._running_crc != self._expected_crc: + raise BadZipFile("Bad CRC-32 for file %r" % self.name) + + def read1(self, n): + """Read up to n bytes with at most one read() system call.""" + + if n is None or n < 0: + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + while not self._eof: + data = self._read1(self.MAX_N) + if data: + buf += data + break + return buf + + end = n + self._offset + if end < len(self._readbuffer): + buf = self._readbuffer[self._offset:end] + self._offset = end + return buf + + n = end - len(self._readbuffer) + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + if n > 0: + while not self._eof: + data = self._read1(n) + if n < len(data): + self._readbuffer = data + self._offset = n + buf += data[:n] + break + if data: + buf += data + break + return buf + + def _read1(self, n): + # Read up to n compressed bytes with at most one read() system call, + # decrypt and decompress them. + if self._eof or n <= 0: + return b'' + + # Read from file. + if self._compress_type == ZIP_DEFLATED: + ## Handle unconsumed data. + data = self._decompressor.unconsumed_tail + if n > len(data): + data += self._read2(n - len(data)) + else: + data = self._read2(n) + + if self._compress_type == ZIP_STORED: + self._eof = self._compress_left <= 0 + elif self._compress_type == ZIP_DEFLATED: + n = max(n, self.MIN_READ_SIZE) + data = self._decompressor.decompress(data, n) + self._eof = (self._decompressor.eof or + self._compress_left <= 0 and + not self._decompressor.unconsumed_tail) + if self._eof: + data += self._decompressor.flush() + else: + data = self._decompressor.decompress(data) + self._eof = self._decompressor.eof or self._compress_left <= 0 + + data = data[:self._left] + self._left -= len(data) + if self._left <= 0: + self._eof = True + self._update_crc(data) + return data + + def _read2(self, n): + if self._compress_left <= 0: + return b'' + + n = max(n, self.MIN_READ_SIZE) + n = min(n, self._compress_left) + + data = self._fileobj.read(n) + self._compress_left -= len(data) + if not data: + raise EOFError + + if self._decrypter is not None: + data = self._decrypter(data) + return data + + def close(self): + try: + if self._close_fileobj: + self._fileobj.close() + finally: + super().close() + + def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") + return self._seekable + + def seek(self, offset, whence=os.SEEK_SET): + if self.closed: + raise ValueError("seek on closed file.") + if not self._seekable: + raise io.UnsupportedOperation("underlying stream is not seekable") + curr_pos = self.tell() + if whence == os.SEEK_SET: + new_pos = offset + elif whence == os.SEEK_CUR: + new_pos = curr_pos + offset + elif whence == os.SEEK_END: + new_pos = self._orig_file_size + offset + else: + raise ValueError("whence must be os.SEEK_SET (0), " + "os.SEEK_CUR (1), or os.SEEK_END (2)") + + if new_pos > self._orig_file_size: + new_pos = self._orig_file_size + + if new_pos < 0: + new_pos = 0 + + read_offset = new_pos - curr_pos + buff_offset = read_offset + self._offset + + if buff_offset >= 0 and buff_offset < len(self._readbuffer): + # Just move the _offset index if the new position is in the _readbuffer + self._offset = buff_offset + read_offset = 0 + # Fast seek uncompressed unencrypted file + elif self._compress_type == ZIP_STORED and self._decrypter is None and read_offset != 0: + # disable CRC checking after first seeking - it would be invalid + self._expected_crc = None + # seek actual file taking already buffered data into account + read_offset -= len(self._readbuffer) - self._offset + self._fileobj.seek(read_offset, os.SEEK_CUR) + self._left -= read_offset + self._compress_left -= read_offset + self._eof = self._left <= 0 + read_offset = 0 + # flush read buffer + self._readbuffer = b'' + self._offset = 0 + elif read_offset < 0: + # Position is before the current position. Reset the ZipExtFile + self._fileobj.seek(self._orig_compress_start) + self._running_crc = self._orig_start_crc + self._expected_crc = self._orig_crc + self._compress_left = self._orig_compress_size + self._left = self._orig_file_size + self._readbuffer = b'' + self._offset = 0 + self._decompressor = _get_decompressor(self._compress_type) + self._eof = False + read_offset = new_pos + if self._decrypter is not None: + self._init_decrypter() + + while read_offset > 0: + read_len = min(self.MAX_SEEK_READ, read_offset) + self.read(read_len) + read_offset -= read_len + + return self.tell() + + def tell(self): + if self.closed: + raise ValueError("tell on closed file.") + if not self._seekable: + raise io.UnsupportedOperation("underlying stream is not seekable") + filepos = self._orig_file_size - self._left - len(self._readbuffer) + self._offset + return filepos + + +class _ZipWriteFile(io.BufferedIOBase): + def __init__(self, zf, zinfo, zip64): + self._zinfo = zinfo + self._zip64 = zip64 + self._zipfile = zf + self._compressor = _get_compressor(zinfo.compress_type, + zinfo.compress_level) + self._file_size = 0 + self._compress_size = 0 + self._crc = 0 + + @property + def _fileobj(self): + return self._zipfile.fp + + @property + def name(self): + return self._zinfo.filename + + @property + def mode(self): + return 'wb' + + def writable(self): + return True + + def write(self, data): + if self.closed: + raise ValueError('I/O operation on closed file.') + + # Accept any data that supports the buffer protocol + if isinstance(data, (bytes, bytearray)): + nbytes = len(data) + else: + data = memoryview(data) + nbytes = data.nbytes + self._file_size += nbytes + + self._crc = crc32(data, self._crc) + if self._compressor: + data = self._compressor.compress(data) + self._compress_size += len(data) + self._fileobj.write(data) + return nbytes + + def close(self): + if self.closed: + return + try: + super().close() + # Flush any data from the compressor, and update header info + if self._compressor: + buf = self._compressor.flush() + self._compress_size += len(buf) + self._fileobj.write(buf) + self._zinfo.compress_size = self._compress_size + else: + self._zinfo.compress_size = self._file_size + self._zinfo.CRC = self._crc + self._zinfo.file_size = self._file_size + + if not self._zip64: + if self._file_size > ZIP64_LIMIT: + raise RuntimeError("File size too large, try using force_zip64") + if self._compress_size > ZIP64_LIMIT: + raise RuntimeError("Compressed size too large, try using force_zip64") + + # Write updated header info + if self._zinfo.flag_bits & _MASK_USE_DATA_DESCRIPTOR: + # Write CRC and file sizes after the file data + fmt = '') + return ''.join(result) + + def _RealGetContents(self): + """Read in the table of contents for the ZIP file.""" + fp = self.fp + try: + endrec = _EndRecData(fp) + except OSError: + raise BadZipFile("File is not a zip file") + if not endrec: + raise BadZipFile("File is not a zip file") + if self.debug > 1: + print(endrec) + self._comment = endrec[_ECD_COMMENT] # archive comment + + offset_cd, concat = _handle_prepended_data(endrec, self.debug) + + # self.start_dir: Position of start of central directory + self.start_dir = offset_cd + concat + + if self.start_dir < 0: + raise BadZipFile("Bad offset for central directory") + fp.seek(self.start_dir, 0) + size_cd = endrec[_ECD_SIZE] + data = fp.read(size_cd) + fp = io.BytesIO(data) + total = 0 + while total < size_cd: + centdir = fp.read(sizeCentralDir) + if len(centdir) != sizeCentralDir: + raise BadZipFile("Truncated central directory") + centdir = struct.unpack(structCentralDir, centdir) + if centdir[_CD_SIGNATURE] != stringCentralDir: + raise BadZipFile("Bad magic number for central directory") + if self.debug > 2: + print(centdir) + filename = fp.read(centdir[_CD_FILENAME_LENGTH]) + orig_filename_crc = crc32(filename) + flags = centdir[_CD_FLAG_BITS] + if flags & _MASK_UTF_FILENAME: + # UTF-8 file names extension + filename = filename.decode('utf-8') + else: + # Historical ZIP filename encoding + filename = filename.decode(self.metadata_encoding or 'cp437') + # Create ZipInfo instance to store file information + x = ZipInfo(filename) + x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) + x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) + x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] + (x.create_version, x.create_system, x.extract_version, x.reserved, + x.flag_bits, x.compress_type, t, d, + x.CRC, x.compress_size, x.file_size) = centdir[1:12] + if x.extract_version > MAX_EXTRACT_VERSION: + raise NotImplementedError("zip file version %.1f" % + (x.extract_version / 10)) + x.volume, x.internal_attr, x.external_attr = centdir[15:18] + # Convert date/time code to (year, month, day, hour, min, sec) + x._raw_time = t + x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, + t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) + x._decodeExtra(orig_filename_crc) + x.header_offset = x.header_offset + concat + self.filelist.append(x) + self.NameToInfo[x.filename] = x + + # update total bytes read from central directory + total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] + + centdir[_CD_EXTRA_FIELD_LENGTH] + + centdir[_CD_COMMENT_LENGTH]) + + if self.debug > 2: + print("total", total) + + end_offset = self.start_dir + for zinfo in reversed(sorted(self.filelist, + key=lambda zinfo: zinfo.header_offset)): + zinfo._end_offset = end_offset + end_offset = zinfo.header_offset + + def namelist(self): + """Return a list of file names in the archive.""" + return [data.filename for data in self.filelist] + + def infolist(self): + """Return a list of class ZipInfo instances for files in the + archive.""" + return self.filelist + + def printdir(self, file=None): + """Print a table of contents for the zip file.""" + print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"), + file=file) + for zinfo in self.filelist: + date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6] + print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size), + file=file) + + def testzip(self): + """Read all the files and check the CRC. + + Return None if all files could be read successfully, or the name + of the offending file otherwise.""" + chunk_size = 2 ** 20 + for zinfo in self.filelist: + try: + # Read by chunks, to avoid an OverflowError or a + # MemoryError with very large embedded files. + with self.open(zinfo.filename, "r") as f: + while f.read(chunk_size): # Check CRC-32 + pass + except BadZipFile: + return zinfo.filename + + def getinfo(self, name): + """Return the instance of ZipInfo given 'name'.""" + info = self.NameToInfo.get(name) + if info is None: + raise KeyError( + 'There is no item named %r in the archive' % name) + + return info + + def setpassword(self, pwd): + """Set default password for encrypted files.""" + if pwd and not isinstance(pwd, bytes): + raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) + if pwd: + self.pwd = pwd + else: + self.pwd = None + + @property + def comment(self): + """The comment text associated with the ZIP file.""" + return self._comment + + @comment.setter + def comment(self, comment): + if not isinstance(comment, bytes): + raise TypeError("comment: expected bytes, got %s" % type(comment).__name__) + # check for valid comment length + if len(comment) > ZIP_MAX_COMMENT: + import warnings + warnings.warn('Archive comment is too long; truncating to %d bytes' + % ZIP_MAX_COMMENT, stacklevel=2) + comment = comment[:ZIP_MAX_COMMENT] + self._comment = comment + self._didModify = True + + def read(self, name, pwd=None): + """Return file bytes for name. 'pwd' is the password to decrypt + encrypted files.""" + with self.open(name, "r", pwd) as fp: + return fp.read() + + def open(self, name, mode="r", pwd=None, *, force_zip64=False): + """Return file-like object for 'name'. + + name is a string for the file name within the ZIP file, or a ZipInfo + object. + + mode should be 'r' to read a file already in the ZIP file, or 'w' to + write to a file newly added to the archive. + + pwd is the password to decrypt files (only used for reading). + + When writing, if the file size is not known in advance but may exceed + 2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large + files. If the size is known in advance, it is best to pass a ZipInfo + instance for name, with zinfo.file_size set. + """ + if mode not in {"r", "w"}: + raise ValueError('open() requires mode "r" or "w"') + if pwd and (mode == "w"): + raise ValueError("pwd is only supported for reading files") + if not self.fp: + raise ValueError( + "Attempt to use ZIP archive that was already closed") + + # Make sure we have an info object + if isinstance(name, ZipInfo): + # 'name' is already an info object + zinfo = name + elif mode == 'w': + zinfo = ZipInfo(name) + zinfo.compress_type = self.compression + zinfo.compress_level = self.compresslevel + else: + # Get info object for name + zinfo = self.getinfo(name) + + if mode == 'w': + return self._open_to_write(zinfo, force_zip64=force_zip64) + + if self._writing: + raise ValueError("Can't read from the ZIP file while there " + "is an open writing handle on it. " + "Close the writing handle before trying to read.") + + # Open for reading: + self._fileRefCnt += 1 + zef_file = _SharedFile(self.fp, zinfo.header_offset, + self._fpclose, self._lock, lambda: self._writing) + try: + # Skip the file header: + fheader = zef_file.read(sizeFileHeader) + if len(fheader) != sizeFileHeader: + raise BadZipFile("Truncated file header") + fheader = struct.unpack(structFileHeader, fheader) + if fheader[_FH_SIGNATURE] != stringFileHeader: + raise BadZipFile("Bad magic number for file header") + + fname = zef_file.read(fheader[_FH_FILENAME_LENGTH]) + if fheader[_FH_EXTRA_FIELD_LENGTH]: + zef_file.seek(fheader[_FH_EXTRA_FIELD_LENGTH], whence=1) + + if zinfo.flag_bits & _MASK_COMPRESSED_PATCH: + # Zip 2.7: compressed patched data + raise NotImplementedError("compressed patched data (flag bit 5)") + + if zinfo.flag_bits & _MASK_STRONG_ENCRYPTION: + # strong encryption + raise NotImplementedError("strong encryption (flag bit 6)") + + if fheader[_FH_GENERAL_PURPOSE_FLAG_BITS] & _MASK_UTF_FILENAME: + # UTF-8 filename + fname_str = fname.decode("utf-8") + else: + fname_str = fname.decode(self.metadata_encoding or "cp437") + + if fname_str != zinfo.orig_filename: + raise BadZipFile( + 'File name in directory %r and header %r differ.' + % (zinfo.orig_filename, fname)) + + if (zinfo._end_offset is not None and + zef_file.tell() + zinfo.compress_size > zinfo._end_offset): + if zinfo._end_offset == zinfo.header_offset: + import warnings + warnings.warn( + f"Overlapped entries: {zinfo.orig_filename!r} " + f"(possible zip bomb)", + skip_file_prefixes=(os.path.dirname(__file__),)) + else: + raise BadZipFile( + f"Overlapped entries: {zinfo.orig_filename!r} " + f"(possible zip bomb)") + + # check for encrypted flag & handle password + is_encrypted = zinfo.flag_bits & _MASK_ENCRYPTED + if is_encrypted: + if not pwd: + pwd = self.pwd + if pwd and not isinstance(pwd, bytes): + raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) + if not pwd: + raise RuntimeError("File %r is encrypted, password " + "required for extraction" % name) + else: + pwd = None + + return ZipExtFile(zef_file, mode + 'b', zinfo, pwd, True) + except: + zef_file.close() + raise + + def _open_to_write(self, zinfo, force_zip64=False): + if force_zip64 and not self._allowZip64: + raise ValueError( + "force_zip64 is True, but allowZip64 was False when opening " + "the ZIP file." + ) + if self._writing: + raise ValueError("Can't write to the ZIP file while there is " + "another write handle open on it. " + "Close the first handle before opening another.") + + # Size and CRC are overwritten with correct data after processing the file + zinfo.compress_size = 0 + zinfo.CRC = 0 + + zinfo.flag_bits = 0x00 + if zinfo.compress_type == ZIP_LZMA: + # Compressed data includes an end-of-stream (EOS) marker + zinfo.flag_bits |= _MASK_COMPRESS_OPTION_1 + if not self._seekable: + zinfo.flag_bits |= _MASK_USE_DATA_DESCRIPTOR + + if not zinfo.external_attr: + zinfo.external_attr = 0o600 << 16 # permissions: ?rw------- + + # Compressed size can be larger than uncompressed size + zip64 = force_zip64 or (zinfo.file_size * 1.05 > ZIP64_LIMIT) + if not self._allowZip64 and zip64: + raise LargeZipFile("Filesize would require ZIP64 extensions") + + if self._seekable: + self.fp.seek(self.start_dir) + zinfo.header_offset = self.fp.tell() + + self._writecheck(zinfo) + self._didModify = True + + self.fp.write(zinfo.FileHeader(zip64)) + + self._writing = True + return _ZipWriteFile(self, zinfo, zip64) + + def extract(self, member, path=None, pwd=None): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. 'member' may be a filename or a ZipInfo object. You can + specify a different directory using 'path'. You can specify the + password to decrypt the file using 'pwd'. + """ + if path is None: + path = os.getcwd() + else: + path = os.fspath(path) + + return self._extract_member(member, path, pwd) + + def extractall(self, path=None, members=None, pwd=None): + """Extract all members from the archive to the current working + directory. 'path' specifies a different directory to extract to. + 'members' is optional and must be a subset of the list returned + by namelist(). You can specify the password to decrypt all files + using 'pwd'. + """ + if members is None: + members = self.namelist() + + if path is None: + path = os.getcwd() + else: + path = os.fspath(path) + + for zipinfo in members: + self._extract_member(zipinfo, path, pwd) + + @classmethod + def _sanitize_windows_name(cls, arcname, pathsep): + """Replace bad characters and remove trailing dots from parts.""" + table = cls._windows_illegal_name_trans_table + if not table: + illegal = ':<>|"?*' + table = str.maketrans(illegal, '_' * len(illegal)) + cls._windows_illegal_name_trans_table = table + arcname = arcname.translate(table) + # remove trailing dots and spaces + arcname = (x.rstrip(' .') for x in arcname.split(pathsep)) + # rejoin, removing empty parts. + arcname = pathsep.join(x for x in arcname if x) + return arcname + + def _extract_member(self, member, targetpath, pwd): + """Extract the ZipInfo object 'member' to a physical + file on the path targetpath. + """ + if not isinstance(member, ZipInfo): + member = self.getinfo(member) + + # build the destination pathname, replacing + # forward slashes to platform specific separators. + arcname = member.filename.replace('/', os.path.sep) + + if os.path.altsep: + arcname = arcname.replace(os.path.altsep, os.path.sep) + # interpret absolute pathname as relative, remove drive letter or + # UNC path, redundant separators, "." and ".." components. + arcname = os.path.splitdrive(arcname)[1] + invalid_path_parts = ('', os.path.curdir, os.path.pardir) + arcname = os.path.sep.join(x for x in arcname.split(os.path.sep) + if x not in invalid_path_parts) + if os.path.sep == '\\': + # filter illegal characters on Windows + arcname = self._sanitize_windows_name(arcname, os.path.sep) + + if not arcname and not member.is_dir(): + raise ValueError("Empty filename.") + + targetpath = os.path.join(targetpath, arcname) + targetpath = os.path.normpath(targetpath) + + # Create all upper directories if necessary. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + os.makedirs(upperdirs, exist_ok=True) + + if member.is_dir(): + if not os.path.isdir(targetpath): + try: + os.mkdir(targetpath) + except FileExistsError: + if not os.path.isdir(targetpath): + raise + return targetpath + + with self.open(member, pwd=pwd) as source, \ + open(targetpath, "wb") as target: + shutil.copyfileobj(source, target) + + return targetpath + + def _writecheck(self, zinfo): + """Check for errors before writing a file to the archive.""" + if zinfo.filename in self.NameToInfo: + import warnings + warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3) + if self.mode not in ('w', 'x', 'a'): + raise ValueError("write() requires mode 'w', 'x', or 'a'") + if not self.fp: + raise ValueError( + "Attempt to write ZIP archive that was already closed") + _check_compression(zinfo.compress_type) + if not self._allowZip64: + requires_zip64 = None + if len(self.filelist) >= ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif zinfo.file_size > ZIP64_LIMIT: + requires_zip64 = "Filesize" + elif zinfo.header_offset > ZIP64_LIMIT: + requires_zip64 = "Zipfile size" + if requires_zip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") + + def write(self, filename, arcname=None, + compress_type=None, compresslevel=None): + """Put the bytes from filename into the archive under the name + arcname.""" + if not self.fp: + raise ValueError( + "Attempt to write to ZIP archive that was already closed") + if self._writing: + raise ValueError( + "Can't write to ZIP archive while an open writing handle exists" + ) + + zinfo = ZipInfo.from_file(filename, arcname, + strict_timestamps=self._strict_timestamps) + + if zinfo.is_dir(): + zinfo.compress_size = 0 + zinfo.CRC = 0 + self.mkdir(zinfo) + else: + if compress_type is not None: + zinfo.compress_type = compress_type + else: + zinfo.compress_type = self.compression + + if compresslevel is not None: + zinfo.compress_level = compresslevel + else: + zinfo.compress_level = self.compresslevel + + with open(filename, "rb") as src, self.open(zinfo, 'w') as dest: + shutil.copyfileobj(src, dest, 1024*8) + + def writestr(self, zinfo_or_arcname, data, + compress_type=None, compresslevel=None): + """Write a file into the archive. The contents is 'data', which + may be either a 'str' or a 'bytes' instance; if it is a 'str', + it is encoded as UTF-8 first. + 'zinfo_or_arcname' is either a ZipInfo instance or + the name of the file in the archive.""" + if isinstance(data, str): + data = data.encode("utf-8") + if isinstance(zinfo_or_arcname, ZipInfo): + zinfo = zinfo_or_arcname + else: + zinfo = ZipInfo(zinfo_or_arcname)._for_archive(self) + + if not self.fp: + raise ValueError( + "Attempt to write to ZIP archive that was already closed") + if self._writing: + raise ValueError( + "Can't write to ZIP archive while an open writing handle exists." + ) + + if compress_type is not None: + zinfo.compress_type = compress_type + + if compresslevel is not None: + zinfo.compress_level = compresslevel + + zinfo.file_size = len(data) # Uncompressed size + with self._lock: + with self.open(zinfo, mode='w') as dest: + dest.write(data) + + def mkdir(self, zinfo_or_directory_name, mode=511): + """Creates a directory inside the zip archive.""" + if isinstance(zinfo_or_directory_name, ZipInfo): + zinfo = zinfo_or_directory_name + if not zinfo.is_dir(): + raise ValueError("The given ZipInfo does not describe a directory") + elif isinstance(zinfo_or_directory_name, str): + directory_name = zinfo_or_directory_name + if not directory_name.endswith("/"): + directory_name += "/" + zinfo = ZipInfo(directory_name) + zinfo.compress_size = 0 + zinfo.CRC = 0 + zinfo.external_attr = ((0o40000 | mode) & 0xFFFF) << 16 + zinfo.file_size = 0 + zinfo.external_attr |= 0x10 + else: + raise TypeError("Expected type str or ZipInfo") + + with self._lock: + if self._seekable: + self.fp.seek(self.start_dir) + zinfo.header_offset = self.fp.tell() # Start of header bytes + if zinfo.compress_type == ZIP_LZMA: + # Compressed data includes an end-of-stream (EOS) marker + zinfo.flag_bits |= _MASK_COMPRESS_OPTION_1 + + self._writecheck(zinfo) + self._didModify = True + + self.filelist.append(zinfo) + self.NameToInfo[zinfo.filename] = zinfo + self.fp.write(zinfo.FileHeader(False)) + self.start_dir = self.fp.tell() + + def __del__(self): + """Call the "close()" method in case the user forgot.""" + self.close() + + def close(self): + """Close the file, and for mode 'w', 'x' and 'a' write the ending + records.""" + if self.fp is None: + return + + if self._writing: + raise ValueError("Can't close the ZIP file while there is " + "an open writing handle on it. " + "Close the writing handle before closing the zip.") + + try: + if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records + with self._lock: + if self._seekable: + self.fp.seek(self.start_dir) + self._write_end_record() + finally: + fp = self.fp + self.fp = None + self._fpclose(fp) + + def _write_end_record(self): + for zinfo in self.filelist: # write central directory + dt = zinfo.date_time + dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] + dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) + extra = [] + if zinfo.file_size > ZIP64_LIMIT \ + or zinfo.compress_size > ZIP64_LIMIT: + extra.append(zinfo.file_size) + extra.append(zinfo.compress_size) + file_size = 0xffffffff + compress_size = 0xffffffff + else: + file_size = zinfo.file_size + compress_size = zinfo.compress_size + + if zinfo.header_offset > ZIP64_LIMIT: + extra.append(zinfo.header_offset) + header_offset = 0xffffffff + else: + header_offset = zinfo.header_offset + + extra_data = zinfo.extra + min_version = 0 + if extra: + # Append a ZIP64 field to the extra's + extra_data = _Extra.strip(extra_data, (1,)) + extra_data = struct.pack( + ' ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif centDirOffset > ZIP64_LIMIT: + requires_zip64 = "Central directory offset" + elif centDirSize > ZIP64_LIMIT: + requires_zip64 = "Central directory size" + if requires_zip64: + # Need to write the ZIP64 end-of-archive records + if not self._allowZip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") + zip64endrec = struct.pack( + structEndArchive64, stringEndArchive64, + sizeEndCentDir64 - 12, 45, 45, 0, 0, centDirCount, centDirCount, + centDirSize, centDirOffset) + self.fp.write(zip64endrec) + + zip64locrec = struct.pack( + structEndArchive64Locator, + stringEndArchive64Locator, 0, pos2, 1) + self.fp.write(zip64locrec) + centDirCount = min(centDirCount, 0xFFFF) + centDirSize = min(centDirSize, 0xFFFFFFFF) + centDirOffset = min(centDirOffset, 0xFFFFFFFF) + + endrec = struct.pack(structEndArchive, stringEndArchive, + 0, 0, centDirCount, centDirCount, + centDirSize, centDirOffset, len(self._comment)) + self.fp.write(endrec) + self.fp.write(self._comment) + if self.mode == "a": + self.fp.truncate() + self.fp.flush() + + def _fpclose(self, fp): + assert self._fileRefCnt > 0 + self._fileRefCnt -= 1 + if not self._fileRefCnt and not self._filePassed: + fp.close() + + +class PyZipFile(ZipFile): + """Class to create ZIP archives with Python library files and packages.""" + + def __init__(self, file, mode="r", compression=ZIP_STORED, + allowZip64=True, optimize=-1): + ZipFile.__init__(self, file, mode=mode, compression=compression, + allowZip64=allowZip64) + self._optimize = optimize + + def writepy(self, pathname, basename="", filterfunc=None): + """Add all files from "pathname" to the ZIP archive. + + If pathname is a package directory, search the directory and + all package subdirectories recursively for all *.py and enter + the modules into the archive. If pathname is a plain + directory, listdir *.py and enter all modules. Else, pathname + must be a Python *.py file and the module will be put into the + archive. Added modules are always module.pyc. + This method will compile the module.py into module.pyc if + necessary. + If filterfunc(pathname) is given, it is called with every argument. + When it is False, the file or directory is skipped. + """ + pathname = os.fspath(pathname) + if filterfunc and not filterfunc(pathname): + if self.debug: + label = 'path' if os.path.isdir(pathname) else 'file' + print('%s %r skipped by filterfunc' % (label, pathname)) + return + dir, name = os.path.split(pathname) + if os.path.isdir(pathname): + initname = os.path.join(pathname, "__init__.py") + if os.path.isfile(initname): + # This is a package directory, add it + if basename: + basename = "%s/%s" % (basename, name) + else: + basename = name + if self.debug: + print("Adding package in", pathname, "as", basename) + fname, arcname = self._get_codename(initname[0:-3], basename) + if self.debug: + print("Adding", arcname) + self.write(fname, arcname) + dirlist = sorted(os.listdir(pathname)) + dirlist.remove("__init__.py") + # Add all *.py files and package subdirectories + for filename in dirlist: + path = os.path.join(pathname, filename) + root, ext = os.path.splitext(filename) + if os.path.isdir(path): + if os.path.isfile(os.path.join(path, "__init__.py")): + # This is a package directory, add it + self.writepy(path, basename, + filterfunc=filterfunc) # Recursive call + elif ext == ".py": + if filterfunc and not filterfunc(path): + if self.debug: + print('file %r skipped by filterfunc' % path) + continue + fname, arcname = self._get_codename(path[0:-3], + basename) + if self.debug: + print("Adding", arcname) + self.write(fname, arcname) + else: + # This is NOT a package directory, add its files at top level + if self.debug: + print("Adding files from directory", pathname) + for filename in sorted(os.listdir(pathname)): + path = os.path.join(pathname, filename) + root, ext = os.path.splitext(filename) + if ext == ".py": + if filterfunc and not filterfunc(path): + if self.debug: + print('file %r skipped by filterfunc' % path) + continue + fname, arcname = self._get_codename(path[0:-3], + basename) + if self.debug: + print("Adding", arcname) + self.write(fname, arcname) + else: + if pathname[-3:] != ".py": + raise RuntimeError( + 'Files added with writepy() must end with ".py"') + fname, arcname = self._get_codename(pathname[0:-3], basename) + if self.debug: + print("Adding file", arcname) + self.write(fname, arcname) + + def _get_codename(self, pathname, basename): + """Return (filename, archivename) for the path. + + Given a module name path, return the correct file path and + archive name, compiling if necessary. For example, given + /python/lib/string, return (/python/lib/string.pyc, string). + """ + def _compile(file, optimize=-1): + import py_compile + if self.debug: + print("Compiling", file) + try: + py_compile.compile(file, doraise=True, optimize=optimize) + except py_compile.PyCompileError as err: + print(err.msg) + return False + return True + + file_py = pathname + ".py" + file_pyc = pathname + ".pyc" + pycache_opt0 = importlib.util.cache_from_source(file_py, optimization='') + pycache_opt1 = importlib.util.cache_from_source(file_py, optimization=1) + pycache_opt2 = importlib.util.cache_from_source(file_py, optimization=2) + if self._optimize == -1: + # legacy mode: use whatever file is present + if (os.path.isfile(file_pyc) and + os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime): + # Use .pyc file. + arcname = fname = file_pyc + elif (os.path.isfile(pycache_opt0) and + os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime): + # Use the __pycache__/*.pyc file, but write it to the legacy pyc + # file name in the archive. + fname = pycache_opt0 + arcname = file_pyc + elif (os.path.isfile(pycache_opt1) and + os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime): + # Use the __pycache__/*.pyc file, but write it to the legacy pyc + # file name in the archive. + fname = pycache_opt1 + arcname = file_pyc + elif (os.path.isfile(pycache_opt2) and + os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime): + # Use the __pycache__/*.pyc file, but write it to the legacy pyc + # file name in the archive. + fname = pycache_opt2 + arcname = file_pyc + else: + # Compile py into PEP 3147 pyc file. + if _compile(file_py): + if sys.flags.optimize == 0: + fname = pycache_opt0 + elif sys.flags.optimize == 1: + fname = pycache_opt1 + else: + fname = pycache_opt2 + arcname = file_pyc + else: + fname = arcname = file_py + else: + # new mode: use given optimization level + if self._optimize == 0: + fname = pycache_opt0 + arcname = file_pyc + else: + arcname = file_pyc + if self._optimize == 1: + fname = pycache_opt1 + elif self._optimize == 2: + fname = pycache_opt2 + else: + msg = "invalid value for 'optimize': {!r}".format(self._optimize) + raise ValueError(msg) + if not (os.path.isfile(fname) and + os.stat(fname).st_mtime >= os.stat(file_py).st_mtime): + if not _compile(file_py, optimize=self._optimize): + fname = arcname = file_py + archivename = os.path.split(arcname)[1] + if basename: + archivename = "%s/%s" % (basename, archivename) + return (fname, archivename) + + +def main(args=None): + import argparse + + description = 'A simple command-line interface for zipfile module.' + parser = argparse.ArgumentParser(description=description, color=True) + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('-l', '--list', metavar='', + help='Show listing of a zipfile') + group.add_argument('-e', '--extract', nargs=2, + metavar=('', ''), + help='Extract zipfile into target dir') + group.add_argument('-c', '--create', nargs='+', + metavar=('', ''), + help='Create zipfile from sources') + group.add_argument('-t', '--test', metavar='', + help='Test if a zipfile is valid') + parser.add_argument('--metadata-encoding', metavar='', + help='Specify encoding of member names for -l, -e and -t') + args = parser.parse_args(args) + + encoding = args.metadata_encoding + + if args.test is not None: + src = args.test + with ZipFile(src, 'r', metadata_encoding=encoding) as zf: + badfile = zf.testzip() + if badfile: + print("The following enclosed file is corrupted: {!r}".format(badfile)) + print("Done testing") + + elif args.list is not None: + src = args.list + with ZipFile(src, 'r', metadata_encoding=encoding) as zf: + zf.printdir() + + elif args.extract is not None: + src, curdir = args.extract + with ZipFile(src, 'r', metadata_encoding=encoding) as zf: + zf.extractall(curdir) + + elif args.create is not None: + if encoding: + print("Non-conforming encodings not supported with -c.", + file=sys.stderr) + sys.exit(1) + + zip_name = args.create.pop(0) + files = args.create + + def addToZip(zf, path, zippath): + if os.path.isfile(path): + zf.write(path, zippath, ZIP_DEFLATED) + elif os.path.isdir(path): + if zippath: + zf.write(path, zippath) + for nm in sorted(os.listdir(path)): + addToZip(zf, + os.path.join(path, nm), os.path.join(zippath, nm)) + # else: ignore + + with ZipFile(zip_name, 'w') as zf: + for path in files: + zippath = os.path.basename(path) + if not zippath: + zippath = os.path.basename(os.path.dirname(path)) + if zippath in ('', os.curdir, os.pardir): + zippath = '' + addToZip(zf, path, zippath) + + +from ._path import ( # noqa: E402 + Path, + + # used privately for tests + CompleteDirs, # noqa: F401 +) diff --git a/Python313_13_x86_Template/Lib/zipfile/__main__.py b/Python314_4_x86_Template/Lib/zipfile/__main__.py similarity index 100% rename from Python313_13_x86_Template/Lib/zipfile/__main__.py rename to Python314_4_x86_Template/Lib/zipfile/__main__.py diff --git a/Python314_4_x86_Template/Lib/zipfile/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/zipfile/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..cb01849b Binary files /dev/null and b/Python314_4_x86_Template/Lib/zipfile/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/zipfile/_path/__init__.py b/Python314_4_x86_Template/Lib/zipfile/_path/__init__.py new file mode 100644 index 00000000..80f5d607 --- /dev/null +++ b/Python314_4_x86_Template/Lib/zipfile/_path/__init__.py @@ -0,0 +1,452 @@ +""" +A Path-like interface for zipfiles. + +This codebase is shared between zipfile.Path in the stdlib +and zipp in PyPI. See +https://github.com/python/importlib_metadata/wiki/Development-Methodology +for more detail. +""" + +import contextlib +import io +import itertools +import pathlib +import posixpath +import re +import stat +import sys +import zipfile + +from .glob import Translator + +__all__ = ['Path'] + + +def _parents(path): + """ + Given a path with elements separated by + posixpath.sep, generate all parents of that path. + + >>> list(_parents('b/d')) + ['b'] + >>> list(_parents('/b/d/')) + ['/b'] + >>> list(_parents('b/d/f/')) + ['b/d', 'b'] + >>> list(_parents('b')) + [] + >>> list(_parents('')) + [] + """ + return itertools.islice(_ancestry(path), 1, None) + + +def _ancestry(path): + """ + Given a path with elements separated by + posixpath.sep, generate all elements of that path. + + >>> list(_ancestry('b/d')) + ['b/d', 'b'] + >>> list(_ancestry('/b/d/')) + ['/b/d', '/b'] + >>> list(_ancestry('b/d/f/')) + ['b/d/f', 'b/d', 'b'] + >>> list(_ancestry('b')) + ['b'] + >>> list(_ancestry('')) + [] + + Multiple separators are treated like a single. + + >>> list(_ancestry('//b//d///f//')) + ['//b//d///f', '//b//d', '//b'] + """ + path = path.rstrip(posixpath.sep) + while path.rstrip(posixpath.sep): + yield path + path, tail = posixpath.split(path) + + +_dedupe = dict.fromkeys +"""Deduplicate an iterable in original order""" + + +def _difference(minuend, subtrahend): + """ + Return items in minuend not in subtrahend, retaining order + with O(1) lookup. + """ + return itertools.filterfalse(set(subtrahend).__contains__, minuend) + + +class InitializedState: + """ + Mix-in to save the initialization state for pickling. + """ + + def __init__(self, *args, **kwargs): + self.__args = args + self.__kwargs = kwargs + super().__init__(*args, **kwargs) + + def __getstate__(self): + return self.__args, self.__kwargs + + def __setstate__(self, state): + args, kwargs = state + super().__init__(*args, **kwargs) + + +class CompleteDirs(InitializedState, zipfile.ZipFile): + """ + A ZipFile subclass that ensures that implied directories + are always included in the namelist. + + >>> list(CompleteDirs._implied_dirs(['foo/bar.txt', 'foo/bar/baz.txt'])) + ['foo/', 'foo/bar/'] + >>> list(CompleteDirs._implied_dirs(['foo/bar.txt', 'foo/bar/baz.txt', 'foo/bar/'])) + ['foo/'] + """ + + @staticmethod + def _implied_dirs(names): + parents = itertools.chain.from_iterable(map(_parents, names)) + as_dirs = (p + posixpath.sep for p in parents) + return _dedupe(_difference(as_dirs, names)) + + def namelist(self): + names = super().namelist() + return names + list(self._implied_dirs(names)) + + def _name_set(self): + return set(self.namelist()) + + def resolve_dir(self, name): + """ + If the name represents a directory, return that name + as a directory (with the trailing slash). + """ + names = self._name_set() + dirname = name + '/' + dir_match = name not in names and dirname in names + return dirname if dir_match else name + + def getinfo(self, name): + """ + Supplement getinfo for implied dirs. + """ + try: + return super().getinfo(name) + except KeyError: + if not name.endswith('/') or name not in self._name_set(): + raise + return zipfile.ZipInfo(filename=name) + + @classmethod + def make(cls, source): + """ + Given a source (filename or zipfile), return an + appropriate CompleteDirs subclass. + """ + if isinstance(source, CompleteDirs): + return source + + if not isinstance(source, zipfile.ZipFile): + return cls(source) + + # Only allow for FastLookup when supplied zipfile is read-only + if 'r' not in source.mode: + cls = CompleteDirs + + source.__class__ = cls + return source + + @classmethod + def inject(cls, zf: zipfile.ZipFile) -> zipfile.ZipFile: + """ + Given a writable zip file zf, inject directory entries for + any directories implied by the presence of children. + """ + for name in cls._implied_dirs(zf.namelist()): + zf.writestr(name, b"") + return zf + + +class FastLookup(CompleteDirs): + """ + ZipFile subclass to ensure implicit + dirs exist and are resolved rapidly. + """ + + def namelist(self): + with contextlib.suppress(AttributeError): + return self.__names + self.__names = super().namelist() + return self.__names + + def _name_set(self): + with contextlib.suppress(AttributeError): + return self.__lookup + self.__lookup = super()._name_set() + return self.__lookup + +def _extract_text_encoding(encoding=None, *args, **kwargs): + # compute stack level so that the caller of the caller sees any warning. + is_pypy = sys.implementation.name == 'pypy' + # PyPy no longer special cased after 7.3.19 (or maybe 7.3.18) + # See jaraco/zipp#143 + is_old_pypi = is_pypy and sys.pypy_version_info < (7, 3, 19) + stack_level = 3 + is_old_pypi + return io.text_encoding(encoding, stack_level), args, kwargs + + +class Path: + """ + A :class:`importlib.resources.abc.Traversable` interface for zip files. + + Implements many of the features users enjoy from + :class:`pathlib.Path`. + + Consider a zip file with this structure:: + + . + ├── a.txt + └── b + ├── c.txt + └── d + └── e.txt + + >>> data = io.BytesIO() + >>> zf = ZipFile(data, 'w') + >>> zf.writestr('a.txt', 'content of a') + >>> zf.writestr('b/c.txt', 'content of c') + >>> zf.writestr('b/d/e.txt', 'content of e') + >>> zf.filename = 'mem/abcde.zip' + + Path accepts the zipfile object itself or a filename + + >>> path = Path(zf) + + From there, several path operations are available. + + Directory iteration (including the zip file itself): + + >>> a, b = path.iterdir() + >>> a + Path('mem/abcde.zip', 'a.txt') + >>> b + Path('mem/abcde.zip', 'b/') + + name property: + + >>> b.name + 'b' + + join with divide operator: + + >>> c = b / 'c.txt' + >>> c + Path('mem/abcde.zip', 'b/c.txt') + >>> c.name + 'c.txt' + + Read text: + + >>> c.read_text(encoding='utf-8') + 'content of c' + + existence: + + >>> c.exists() + True + >>> (b / 'missing.txt').exists() + False + + Coercion to string: + + >>> import os + >>> str(c).replace(os.sep, posixpath.sep) + 'mem/abcde.zip/b/c.txt' + + At the root, ``name``, ``filename``, and ``parent`` + resolve to the zipfile. + + >>> str(path) + 'mem/abcde.zip/' + >>> path.name + 'abcde.zip' + >>> path.filename == pathlib.Path('mem/abcde.zip') + True + >>> str(path.parent) + 'mem' + + If the zipfile has no filename, such attributes are not + valid and accessing them will raise an Exception. + + >>> zf.filename = None + >>> path.name + Traceback (most recent call last): + ... + TypeError: ... + + >>> path.filename + Traceback (most recent call last): + ... + TypeError: ... + + >>> path.parent + Traceback (most recent call last): + ... + TypeError: ... + + # workaround python/cpython#106763 + >>> pass + """ + + __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" + + def __init__(self, root, at=""): + """ + Construct a Path from a ZipFile or filename. + + Note: When the source is an existing ZipFile object, + its type (__class__) will be mutated to a + specialized type. If the caller wishes to retain the + original type, the caller should either create a + separate ZipFile object or pass a filename. + """ + self.root = FastLookup.make(root) + self.at = at + + def __eq__(self, other): + """ + >>> Path(zipfile.ZipFile(io.BytesIO(), 'w')) == 'foo' + False + """ + if self.__class__ is not other.__class__: + return NotImplemented + return (self.root, self.at) == (other.root, other.at) + + def __hash__(self): + return hash((self.root, self.at)) + + def open(self, mode='r', *args, pwd=None, **kwargs): + """ + Open this entry as text or binary following the semantics + of ``pathlib.Path.open()`` by passing arguments through + to io.TextIOWrapper(). + """ + if self.is_dir(): + raise IsADirectoryError(self) + zip_mode = mode[0] + if zip_mode == 'r' and not self.exists(): + raise FileNotFoundError(self) + stream = self.root.open(self.at, zip_mode, pwd=pwd) + if 'b' in mode: + if args or kwargs: + raise ValueError("encoding args invalid for binary operation") + return stream + # Text mode: + encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) + return io.TextIOWrapper(stream, encoding, *args, **kwargs) + + def _base(self): + return pathlib.PurePosixPath(self.at) if self.at else self.filename + + @property + def name(self): + return self._base().name + + @property + def suffix(self): + return self._base().suffix + + @property + def suffixes(self): + return self._base().suffixes + + @property + def stem(self): + return self._base().stem + + @property + def filename(self): + return pathlib.Path(self.root.filename).joinpath(self.at) + + def read_text(self, *args, **kwargs): + encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) + with self.open('r', encoding, *args, **kwargs) as strm: + return strm.read() + + def read_bytes(self): + with self.open('rb') as strm: + return strm.read() + + def _is_child(self, path): + return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") + + def _next(self, at): + return self.__class__(self.root, at) + + def is_dir(self): + return not self.at or self.at.endswith("/") + + def is_file(self): + return self.exists() and not self.is_dir() + + def exists(self): + return self.at in self.root._name_set() + + def iterdir(self): + if not self.is_dir(): + raise ValueError("Can't listdir a file") + subs = map(self._next, self.root.namelist()) + return filter(self._is_child, subs) + + def match(self, path_pattern): + return pathlib.PurePosixPath(self.at).match(path_pattern) + + def is_symlink(self): + """ + Return whether this path is a symlink. + """ + info = self.root.getinfo(self.at) + mode = info.external_attr >> 16 + return stat.S_ISLNK(mode) + + def glob(self, pattern): + if not pattern: + raise ValueError(f"Unacceptable pattern: {pattern!r}") + + prefix = re.escape(self.at) + tr = Translator(seps='/') + matches = re.compile(prefix + tr.translate(pattern)).fullmatch + return map(self._next, filter(matches, self.root.namelist())) + + def rglob(self, pattern): + return self.glob(f'**/{pattern}') + + def relative_to(self, other, *extra): + return posixpath.relpath(str(self), str(other.joinpath(*extra))) + + def __str__(self): + return posixpath.join(self.root.filename, self.at) + + def __repr__(self): + return self.__repr.format(self=self) + + def joinpath(self, *other): + next = posixpath.join(self.at, *other) + return self._next(self.root.resolve_dir(next)) + + __truediv__ = joinpath + + @property + def parent(self): + if not self.at: + return self.filename.parent + parent_at = posixpath.dirname(self.at.rstrip('/')) + if parent_at: + parent_at += '/' + return self._next(parent_at) diff --git a/Python314_4_x86_Template/Lib/zipfile/_path/__pycache__/__init__.cpython-314.pyc b/Python314_4_x86_Template/Lib/zipfile/_path/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 00000000..e56826af Binary files /dev/null and b/Python314_4_x86_Template/Lib/zipfile/_path/__pycache__/__init__.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/zipfile/_path/__pycache__/glob.cpython-314.pyc b/Python314_4_x86_Template/Lib/zipfile/_path/__pycache__/glob.cpython-314.pyc new file mode 100644 index 00000000..1f45b685 Binary files /dev/null and b/Python314_4_x86_Template/Lib/zipfile/_path/__pycache__/glob.cpython-314.pyc differ diff --git a/Python314_4_x86_Template/Lib/zipfile/_path/glob.py b/Python314_4_x86_Template/Lib/zipfile/_path/glob.py new file mode 100644 index 00000000..bd283930 --- /dev/null +++ b/Python314_4_x86_Template/Lib/zipfile/_path/glob.py @@ -0,0 +1,113 @@ +import os +import re + +_default_seps = os.sep + str(os.altsep) * bool(os.altsep) + + +class Translator: + """ + >>> Translator('xyz') + Traceback (most recent call last): + ... + AssertionError: Invalid separators + + >>> Translator('') + Traceback (most recent call last): + ... + AssertionError: Invalid separators + """ + + seps: str + + def __init__(self, seps: str = _default_seps): + assert seps and set(seps) <= set(_default_seps), "Invalid separators" + self.seps = seps + + def translate(self, pattern): + """ + Given a glob pattern, produce a regex that matches it. + """ + return self.extend(self.match_dirs(self.translate_core(pattern))) + + def extend(self, pattern): + r""" + Extend regex for pattern-wide concerns. + + Apply '(?s:)' to create a non-matching group that + matches newlines (valid on Unix). + + Append '\z' to imply fullmatch even when match is used. + """ + return rf'(?s:{pattern})\z' + + def match_dirs(self, pattern): + """ + Ensure that zipfile.Path directory names are matched. + + zipfile.Path directory names always end in a slash. + """ + return rf'{pattern}[/]?' + + def translate_core(self, pattern): + r""" + Given a glob pattern, produce a regex that matches it. + + >>> t = Translator() + >>> t.translate_core('*.txt').replace('\\\\', '') + '[^/]*\\.txt' + >>> t.translate_core('a?txt') + 'a[^/]txt' + >>> t.translate_core('**/*').replace('\\\\', '') + '.*/[^/][^/]*' + """ + self.restrict_rglob(pattern) + return ''.join(map(self.replace, separate(self.star_not_empty(pattern)))) + + def replace(self, match): + """ + Perform the replacements for a match from :func:`separate`. + """ + return match.group('set') or ( + re.escape(match.group(0)) + .replace('\\*\\*', r'.*') + .replace('\\*', rf'[^{re.escape(self.seps)}]*') + .replace('\\?', r'[^/]') + ) + + def restrict_rglob(self, pattern): + """ + Raise ValueError if ** appears in anything but a full path segment. + + >>> Translator().translate('**foo') + Traceback (most recent call last): + ... + ValueError: ** must appear alone in a path segment + """ + seps_pattern = rf'[{re.escape(self.seps)}]+' + segments = re.split(seps_pattern, pattern) + if any('**' in segment and segment != '**' for segment in segments): + raise ValueError("** must appear alone in a path segment") + + def star_not_empty(self, pattern): + """ + Ensure that * will not match an empty segment. + """ + + def handle_segment(match): + segment = match.group(0) + return '?*' if segment == '*' else segment + + not_seps_pattern = rf'[^{re.escape(self.seps)}]+' + return re.sub(not_seps_pattern, handle_segment, pattern) + + +def separate(pattern): + """ + Separate out character sets to avoid translating their contents. + + >>> [m.group(0) for m in separate('*.txt')] + ['*.txt'] + >>> [m.group(0) for m in separate('a[?]txt')] + ['a', '[?]', 'txt'] + """ + return re.finditer(r'([^\[]+)|(?P[\[].*?[\]])|([\[][^\]]*$)', pattern) diff --git a/Python314_4_x86_Template/Lib/zipimport.py b/Python314_4_x86_Template/Lib/zipimport.py new file mode 100644 index 00000000..444c9dd1 --- /dev/null +++ b/Python314_4_x86_Template/Lib/zipimport.py @@ -0,0 +1,822 @@ +"""zipimport provides support for importing Python modules from Zip archives. + +This module exports two objects: +- zipimporter: a class; its constructor takes a path to a Zip archive. +- ZipImportError: exception raised by zipimporter objects. It's a + subclass of ImportError, so it can be caught as ImportError, too. + +It is usually not needed to use the zipimport module explicitly; it is +used by the builtin import mechanism for sys.path items that are paths +to Zip archives. +""" + +#from importlib import _bootstrap_external +#from importlib import _bootstrap # for _verbose_message +import _frozen_importlib_external as _bootstrap_external +from _frozen_importlib_external import _unpack_uint16, _unpack_uint32, _unpack_uint64 +import _frozen_importlib as _bootstrap # for _verbose_message +import _imp # for check_hash_based_pycs +import _io # for open +import marshal # for loads +import sys # for modules +import time # for mktime + +__all__ = ['ZipImportError', 'zipimporter'] + + +path_sep = _bootstrap_external.path_sep +alt_path_sep = _bootstrap_external.path_separators[1:] + + +class ZipImportError(ImportError): + pass + +# _read_directory() cache +_zip_directory_cache = {} + +_module_type = type(sys) + +END_CENTRAL_DIR_SIZE = 22 +END_CENTRAL_DIR_SIZE_64 = 56 +END_CENTRAL_DIR_LOCATOR_SIZE_64 = 20 +STRING_END_ARCHIVE = b'PK\x05\x06' # standard EOCD signature +STRING_END_LOCATOR_64 = b'PK\x06\x07' # Zip64 EOCD Locator signature +STRING_END_ZIP_64 = b'PK\x06\x06' # Zip64 EOCD signature +MAX_COMMENT_LEN = (1 << 16) - 1 +MAX_UINT32 = 0xffffffff +ZIP64_EXTRA_TAG = 0x1 + +class zipimporter(_bootstrap_external._LoaderBasics): + """zipimporter(archivepath) -> zipimporter object + + Create a new zipimporter instance. 'archivepath' must be a path to + a zipfile, or to a specific path inside a zipfile. For example, it can be + '/tmp/myimport.zip', or '/tmp/myimport.zip/mydirectory', if mydirectory is a + valid directory inside the archive. + + 'ZipImportError is raised if 'archivepath' doesn't point to a valid Zip + archive. + + The 'archive' attribute of zipimporter objects contains the name of the + zipfile targeted. + """ + + # Split the "subdirectory" from the Zip archive path, lookup a matching + # entry in sys.path_importer_cache, fetch the file directory from there + # if found, or else read it from the archive. + def __init__(self, path): + if not isinstance(path, str): + raise TypeError(f"expected str, not {type(path)!r}") + if not path: + raise ZipImportError('archive path is empty', path=path) + if alt_path_sep: + path = path.replace(alt_path_sep, path_sep) + + prefix = [] + while True: + try: + st = _bootstrap_external._path_stat(path) + except (OSError, ValueError): + # On Windows a ValueError is raised for too long paths. + # Back up one path element. + dirname, basename = _bootstrap_external._path_split(path) + if dirname == path: + raise ZipImportError('not a Zip file', path=path) + path = dirname + prefix.append(basename) + else: + # it exists + if (st.st_mode & 0o170000) != 0o100000: # stat.S_ISREG + # it's a not file + raise ZipImportError('not a Zip file', path=path) + break + + if path not in _zip_directory_cache: + _zip_directory_cache[path] = _read_directory(path) + self.archive = path + # a prefix directory following the ZIP file path. + self.prefix = _bootstrap_external._path_join(*prefix[::-1]) + if self.prefix: + self.prefix += path_sep + + + def find_spec(self, fullname, target=None): + """Create a ModuleSpec for the specified module. + + Returns None if the module cannot be found. + """ + module_info = _get_module_info(self, fullname) + if module_info is not None: + return _bootstrap.spec_from_loader(fullname, self, is_package=module_info) + else: + # Not a module or regular package. See if this is a directory, and + # therefore possibly a portion of a namespace package. + + # We're only interested in the last path component of fullname + # earlier components are recorded in self.prefix. + modpath = _get_module_path(self, fullname) + if _is_dir(self, modpath): + # This is possibly a portion of a namespace + # package. Return the string representing its path, + # without a trailing separator. + path = f'{self.archive}{path_sep}{modpath}' + spec = _bootstrap.ModuleSpec(name=fullname, loader=None, + is_package=True) + spec.submodule_search_locations.append(path) + return spec + else: + return None + + def get_code(self, fullname): + """get_code(fullname) -> code object. + + Return the code object for the specified module. Raise ZipImportError + if the module couldn't be imported. + """ + code, ispackage, modpath = _get_module_code(self, fullname) + return code + + + def get_data(self, pathname): + """get_data(pathname) -> string with file data. + + Return the data associated with 'pathname'. Raise OSError if + the file wasn't found. + """ + if alt_path_sep: + pathname = pathname.replace(alt_path_sep, path_sep) + + key = pathname + if pathname.startswith(self.archive + path_sep): + key = pathname[len(self.archive + path_sep):] + + try: + toc_entry = self._get_files()[key] + except KeyError: + raise OSError(0, '', key) + if toc_entry is None: + return b'' + return _get_data(self.archive, toc_entry) + + + # Return a string matching __file__ for the named module + def get_filename(self, fullname): + """get_filename(fullname) -> filename string. + + Return the filename for the specified module or raise ZipImportError + if it couldn't be imported. + """ + # Deciding the filename requires working out where the code + # would come from if the module was actually loaded + code, ispackage, modpath = _get_module_code(self, fullname) + return modpath + + + def get_source(self, fullname): + """get_source(fullname) -> source string. + + Return the source code for the specified module. Raise ZipImportError + if the module couldn't be found, return None if the archive does + contain the module, but has no source for it. + """ + mi = _get_module_info(self, fullname) + if mi is None: + raise ZipImportError(f"can't find module {fullname!r}", name=fullname) + + path = _get_module_path(self, fullname) + if mi: + fullpath = _bootstrap_external._path_join(path, '__init__.py') + else: + fullpath = f'{path}.py' + + try: + toc_entry = self._get_files()[fullpath] + except KeyError: + # we have the module, but no source + return None + return _get_data(self.archive, toc_entry).decode() + + + # Return a bool signifying whether the module is a package or not. + def is_package(self, fullname): + """is_package(fullname) -> bool. + + Return True if the module specified by fullname is a package. + Raise ZipImportError if the module couldn't be found. + """ + mi = _get_module_info(self, fullname) + if mi is None: + raise ZipImportError(f"can't find module {fullname!r}", name=fullname) + return mi + + + # Load and return the module named by 'fullname'. + def load_module(self, fullname): + """load_module(fullname) -> module. + + Load the module specified by 'fullname'. 'fullname' must be the + fully qualified (dotted) module name. It returns the imported + module, or raises ZipImportError if it could not be imported. + + Deprecated since Python 3.10. Use exec_module() instead. + """ + import warnings + warnings._deprecated("zipimport.zipimporter.load_module", + f"{warnings._DEPRECATED_MSG}; " + "use zipimport.zipimporter.exec_module() instead", + remove=(3, 15)) + code, ispackage, modpath = _get_module_code(self, fullname) + mod = sys.modules.get(fullname) + if mod is None or not isinstance(mod, _module_type): + mod = _module_type(fullname) + sys.modules[fullname] = mod + mod.__loader__ = self + + try: + if ispackage: + # add __path__ to the module *before* the code gets + # executed + path = _get_module_path(self, fullname) + fullpath = _bootstrap_external._path_join(self.archive, path) + mod.__path__ = [fullpath] + + if not hasattr(mod, '__builtins__'): + mod.__builtins__ = __builtins__ + _bootstrap_external._fix_up_module(mod.__dict__, fullname, modpath) + exec(code, mod.__dict__) + except: + del sys.modules[fullname] + raise + + try: + mod = sys.modules[fullname] + except KeyError: + raise ImportError(f'Loaded module {fullname!r} not found in sys.modules') + _bootstrap._verbose_message('import {} # loaded from Zip {}', fullname, modpath) + return mod + + + def get_resource_reader(self, fullname): + """Return the ResourceReader for a module in a zip file.""" + from importlib.readers import ZipReader + + return ZipReader(self, fullname) + + + def _get_files(self): + """Return the files within the archive path.""" + try: + files = _zip_directory_cache[self.archive] + except KeyError: + try: + files = _zip_directory_cache[self.archive] = _read_directory(self.archive) + except ZipImportError: + files = {} + + return files + + + def invalidate_caches(self): + """Invalidates the cache of file data of the archive path.""" + _zip_directory_cache.pop(self.archive, None) + + + def __repr__(self): + return f'' + + +# _zip_searchorder defines how we search for a module in the Zip +# archive: we first search for a package __init__, then for +# non-package .pyc, and .py entries. The .pyc entries +# are swapped by initzipimport() if we run in optimized mode. Also, +# '/' is replaced by path_sep there. +_zip_searchorder = ( + (path_sep + '__init__.pyc', True, True), + (path_sep + '__init__.py', False, True), + ('.pyc', True, False), + ('.py', False, False), +) + +# Given a module name, return the potential file path in the +# archive (without extension). +def _get_module_path(self, fullname): + return self.prefix + fullname.rpartition('.')[2] + +# Does this path represent a directory? +def _is_dir(self, path): + # See if this is a "directory". If so, it's eligible to be part + # of a namespace package. We test by seeing if the name, with an + # appended path separator, exists. + dirpath = path + path_sep + # If dirpath is present in self._get_files(), we have a directory. + return dirpath in self._get_files() + +# Return some information about a module. +def _get_module_info(self, fullname): + path = _get_module_path(self, fullname) + for suffix, isbytecode, ispackage in _zip_searchorder: + fullpath = path + suffix + if fullpath in self._get_files(): + return ispackage + return None + + +# implementation + +# _read_directory(archive) -> files dict (new reference) +# +# Given a path to a Zip archive, build a dict, mapping file names +# (local to the archive, using SEP as a separator) to toc entries. +# +# A toc_entry is a tuple: +# +# (__file__, # value to use for __file__, available for all files, +# # encoded to the filesystem encoding +# compress, # compression kind; 0 for uncompressed +# data_size, # size of compressed data on disk +# file_size, # size of decompressed data +# file_offset, # offset of file header from start of archive +# time, # mod time of file (in dos format) +# date, # mod data of file (in dos format) +# crc, # crc checksum of the data +# ) +# +# Directories can be recognized by the trailing path_sep in the name, +# data_size and file_offset are 0. +def _read_directory(archive): + try: + fp = _io.open_code(archive) + except OSError: + raise ZipImportError(f"can't open Zip file: {archive!r}", path=archive) + + with fp: + # GH-87235: On macOS all file descriptors for /dev/fd/N share the same + # file offset, reset the file offset after scanning the zipfile directory + # to not cause problems when some runs 'python3 /dev/fd/9 9= 0 and pos64+END_CENTRAL_DIR_SIZE_64+END_CENTRAL_DIR_LOCATOR_SIZE_64==pos): + # Zip64 at "correct" offset from standard EOCD + buffer = data[pos64:pos64 + END_CENTRAL_DIR_SIZE_64] + if len(buffer) != END_CENTRAL_DIR_SIZE_64: + raise ZipImportError( + f"corrupt Zip64 file: Expected {END_CENTRAL_DIR_SIZE_64} byte " + f"zip64 central directory, but read {len(buffer)} bytes.", + path=archive) + header_position = file_size - len(data) + pos64 + + central_directory_size = _unpack_uint64(buffer[40:48]) + central_directory_position = _unpack_uint64(buffer[48:56]) + num_entries = _unpack_uint64(buffer[24:32]) + elif pos >= 0: + buffer = data[pos:pos+END_CENTRAL_DIR_SIZE] + if len(buffer) != END_CENTRAL_DIR_SIZE: + raise ZipImportError(f"corrupt Zip file: {archive!r}", + path=archive) + + header_position = file_size - len(data) + pos + + # Buffer now contains a valid EOCD, and header_position gives the + # starting position of it. + central_directory_size = _unpack_uint32(buffer[12:16]) + central_directory_position = _unpack_uint32(buffer[16:20]) + num_entries = _unpack_uint16(buffer[8:10]) + + # N.b. if someday you want to prefer the standard (non-zip64) EOCD, + # you need to adjust position by 76 for arc to be 0. + else: + raise ZipImportError(f'not a Zip file: {archive!r}', + path=archive) + + # Buffer now contains a valid EOCD, and header_position gives the + # starting position of it. + # XXX: These are cursory checks but are not as exact or strict as they + # could be. Checking the arc-adjusted value is probably good too. + if header_position < central_directory_size: + raise ZipImportError(f'bad central directory size: {archive!r}', path=archive) + if header_position < central_directory_position: + raise ZipImportError(f'bad central directory offset: {archive!r}', path=archive) + header_position -= central_directory_size + # On just-a-zipfile these values are the same and arc_offset is zero; if + # the file has some bytes prepended, `arc_offset` is the number of such + # bytes. This is used for pex as well as self-extracting .exe. + arc_offset = header_position - central_directory_position + if arc_offset < 0: + raise ZipImportError(f'bad central directory size or offset: {archive!r}', path=archive) + + files = {} + # Start of Central Directory + count = 0 + try: + fp.seek(header_position) + except OSError: + raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) + while True: + buffer = fp.read(46) + if len(buffer) < 4: + raise EOFError('EOF read where not expected') + # Start of file header + if buffer[:4] != b'PK\x01\x02': + if count != num_entries: + raise ZipImportError( + f"mismatched num_entries: {count} should be {num_entries} in {archive!r}", + path=archive, + ) + break # Bad: Central Dir File Header + if len(buffer) != 46: + raise EOFError('EOF read where not expected') + flags = _unpack_uint16(buffer[8:10]) + compress = _unpack_uint16(buffer[10:12]) + time = _unpack_uint16(buffer[12:14]) + date = _unpack_uint16(buffer[14:16]) + crc = _unpack_uint32(buffer[16:20]) + data_size = _unpack_uint32(buffer[20:24]) + file_size = _unpack_uint32(buffer[24:28]) + name_size = _unpack_uint16(buffer[28:30]) + extra_size = _unpack_uint16(buffer[30:32]) + comment_size = _unpack_uint16(buffer[32:34]) + file_offset = _unpack_uint32(buffer[42:46]) + header_size = name_size + extra_size + comment_size + + try: + name = fp.read(name_size) + except OSError: + raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) + if len(name) != name_size: + raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) + # On Windows, calling fseek to skip over the fields we don't use is + # slower than reading the data because fseek flushes stdio's + # internal buffers. See issue #8745. + try: + extra_data_len = header_size - name_size + extra_data = memoryview(fp.read(extra_data_len)) + + if len(extra_data) != extra_data_len: + raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) + except OSError: + raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) + + if flags & 0x800: + # UTF-8 file names extension + name = name.decode() + else: + # Historical ZIP filename encoding + try: + name = name.decode('ascii') + except UnicodeDecodeError: + name = name.decode('latin1').translate(cp437_table) + + name = name.replace('/', path_sep) + path = _bootstrap_external._path_join(archive, name) + + # Ordering matches unpacking below. + if ( + file_size == MAX_UINT32 or + data_size == MAX_UINT32 or + file_offset == MAX_UINT32 + ): + # need to decode extra_data looking for a zip64 extra (which might not + # be present) + while extra_data: + if len(extra_data) < 4: + raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) + tag = _unpack_uint16(extra_data[:2]) + size = _unpack_uint16(extra_data[2:4]) + if len(extra_data) < 4 + size: + raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) + if tag == ZIP64_EXTRA_TAG: + if (len(extra_data) - 4) % 8 != 0: + raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) + num_extra_values = (len(extra_data) - 4) // 8 + if num_extra_values > 3: + raise ZipImportError(f"can't read header extra: {archive!r}", path=archive) + import struct + values = list(struct.unpack_from(f"<{min(num_extra_values, 3)}Q", + extra_data, offset=4)) + + # N.b. Here be dragons: the ordering of these is different than + # the header fields, and it's really easy to get it wrong since + # naturally-occurring zips that use all 3 are >4GB + if file_size == MAX_UINT32: + file_size = values.pop(0) + if data_size == MAX_UINT32: + data_size = values.pop(0) + if file_offset == MAX_UINT32: + file_offset = values.pop(0) + + break + + # For a typical zip, this bytes-slicing only happens 2-3 times, on + # small data like timestamps and filesizes. + extra_data = extra_data[4+size:] + else: + _bootstrap._verbose_message( + "zipimport: suspected zip64 but no zip64 extra for {!r}", + path, + ) + # XXX These two statements seem swapped because `central_directory_position` + # is a position within the actual file, but `file_offset` (when compared) is + # as encoded in the entry, not adjusted for this file. + # N.b. this must be after we've potentially read the zip64 extra which can + # change `file_offset`. + if file_offset > central_directory_position: + raise ZipImportError(f'bad local header offset: {archive!r}', path=archive) + file_offset += arc_offset + + t = (path, compress, data_size, file_size, file_offset, time, date, crc) + files[name] = t + count += 1 + finally: + fp.seek(start_offset) + _bootstrap._verbose_message('zipimport: found {} names in {!r}', count, archive) + + # Add implicit directories. + count = 0 + for name in list(files): + while True: + i = name.rstrip(path_sep).rfind(path_sep) + if i < 0: + break + name = name[:i + 1] + if name in files: + break + files[name] = None + count += 1 + if count: + _bootstrap._verbose_message('zipimport: added {} implicit directories in {!r}', + count, archive) + return files + +# During bootstrap, we may need to load the encodings +# package from a ZIP file. But the cp437 encoding is implemented +# in Python in the encodings package. +# +# Break out of this dependency by using the translation table for +# the cp437 encoding. +cp437_table = ( + # ASCII part, 8 rows x 16 chars + '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f' + '\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f' + ' !"#$%&\'()*+,-./' + '0123456789:;<=>?' + '@ABCDEFGHIJKLMNO' + 'PQRSTUVWXYZ[\\]^_' + '`abcdefghijklmno' + 'pqrstuvwxyz{|}~\x7f' + # non-ASCII part, 16 rows x 8 chars + '\xc7\xfc\xe9\xe2\xe4\xe0\xe5\xe7' + '\xea\xeb\xe8\xef\xee\xec\xc4\xc5' + '\xc9\xe6\xc6\xf4\xf6\xf2\xfb\xf9' + '\xff\xd6\xdc\xa2\xa3\xa5\u20a7\u0192' + '\xe1\xed\xf3\xfa\xf1\xd1\xaa\xba' + '\xbf\u2310\xac\xbd\xbc\xa1\xab\xbb' + '\u2591\u2592\u2593\u2502\u2524\u2561\u2562\u2556' + '\u2555\u2563\u2551\u2557\u255d\u255c\u255b\u2510' + '\u2514\u2534\u252c\u251c\u2500\u253c\u255e\u255f' + '\u255a\u2554\u2569\u2566\u2560\u2550\u256c\u2567' + '\u2568\u2564\u2565\u2559\u2558\u2552\u2553\u256b' + '\u256a\u2518\u250c\u2588\u2584\u258c\u2590\u2580' + '\u03b1\xdf\u0393\u03c0\u03a3\u03c3\xb5\u03c4' + '\u03a6\u0398\u03a9\u03b4\u221e\u03c6\u03b5\u2229' + '\u2261\xb1\u2265\u2264\u2320\u2321\xf7\u2248' + '\xb0\u2219\xb7\u221a\u207f\xb2\u25a0\xa0' +) + +_importing_zlib = False + +# Return the zlib.decompress function object, or NULL if zlib couldn't +# be imported. The function is cached when found, so subsequent calls +# don't import zlib again. +def _get_decompress_func(): + global _importing_zlib + if _importing_zlib: + # Someone has a zlib.py[co] in their Zip file + # let's avoid a stack overflow. + _bootstrap._verbose_message('zipimport: zlib UNAVAILABLE') + raise ZipImportError("can't decompress data; zlib not available") + + _importing_zlib = True + try: + from zlib import decompress + except Exception: + _bootstrap._verbose_message('zipimport: zlib UNAVAILABLE') + raise ZipImportError("can't decompress data; zlib not available") + finally: + _importing_zlib = False + + _bootstrap._verbose_message('zipimport: zlib available') + return decompress + +# Given a path to a Zip file and a toc_entry, return the (uncompressed) data. +def _get_data(archive, toc_entry): + datapath, compress, data_size, file_size, file_offset, time, date, crc = toc_entry + if data_size < 0: + raise ZipImportError('negative data size') + + with _io.open_code(archive) as fp: + # Check to make sure the local file header is correct + try: + fp.seek(file_offset) + except OSError: + raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) + buffer = fp.read(30) + if len(buffer) != 30: + raise EOFError('EOF read where not expected') + + if buffer[:4] != b'PK\x03\x04': + # Bad: Local File Header + raise ZipImportError(f'bad local file header: {archive!r}', path=archive) + + name_size = _unpack_uint16(buffer[26:28]) + extra_size = _unpack_uint16(buffer[28:30]) + header_size = 30 + name_size + extra_size + file_offset += header_size # Start of file data + try: + fp.seek(file_offset) + except OSError: + raise ZipImportError(f"can't read Zip file: {archive!r}", path=archive) + raw_data = fp.read(data_size) + if len(raw_data) != data_size: + raise OSError("zipimport: can't read data") + + if compress == 0: + # data is not compressed + return raw_data + + # Decompress with zlib + try: + decompress = _get_decompress_func() + except Exception: + raise ZipImportError("can't decompress data; zlib not available") + return decompress(raw_data, -15) + + +# Lenient date/time comparison function. The precision of the mtime +# in the archive is lower than the mtime stored in a .pyc: we +# must allow a difference of at most one second. +def _eq_mtime(t1, t2): + # dostime only stores even seconds, so be lenient + return abs(t1 - t2) <= 1 + + +# Given the contents of a .py[co] file, unmarshal the data +# and return the code object. Raises ImportError it the magic word doesn't +# match, or if the recorded .py[co] metadata does not match the source. +def _unmarshal_code(self, pathname, fullpath, fullname, data): + exc_details = { + 'name': fullname, + 'path': fullpath, + } + + flags = _bootstrap_external._classify_pyc(data, fullname, exc_details) + + hash_based = flags & 0b1 != 0 + if hash_based: + check_source = flags & 0b10 != 0 + if (_imp.check_hash_based_pycs != 'never' and + (check_source or _imp.check_hash_based_pycs == 'always')): + source_bytes = _get_pyc_source(self, fullpath) + if source_bytes is not None: + source_hash = _imp.source_hash( + _imp.pyc_magic_number_token, + source_bytes, + ) + + _bootstrap_external._validate_hash_pyc( + data, source_hash, fullname, exc_details) + else: + source_mtime, source_size = \ + _get_mtime_and_size_of_source(self, fullpath) + + if source_mtime: + # We don't use _bootstrap_external._validate_timestamp_pyc + # to allow for a more lenient timestamp check. + if (not _eq_mtime(_unpack_uint32(data[8:12]), source_mtime) or + _unpack_uint32(data[12:16]) != source_size): + _bootstrap._verbose_message( + f'bytecode is stale for {fullname!r}') + return None + + code = marshal.loads(data[16:]) + if not isinstance(code, _code_type): + raise TypeError(f'compiled module {pathname!r} is not a code object') + return code + +_code_type = type(_unmarshal_code.__code__) + + +# Replace any occurrences of '\r\n?' in the input string with '\n'. +# This converts DOS and Mac line endings to Unix line endings. +def _normalize_line_endings(source): + source = source.replace(b'\r\n', b'\n') + source = source.replace(b'\r', b'\n') + return source + +# Given a string buffer containing Python source code, compile it +# and return a code object. +def _compile_source(pathname, source): + source = _normalize_line_endings(source) + return compile(source, pathname, 'exec', dont_inherit=True) + +# Convert the date/time values found in the Zip archive to a value +# that's compatible with the time stamp stored in .pyc files. +def _parse_dostime(d, t): + return time.mktime(( + (d >> 9) + 1980, # bits 9..15: year + (d >> 5) & 0xF, # bits 5..8: month + d & 0x1F, # bits 0..4: day + t >> 11, # bits 11..15: hours + (t >> 5) & 0x3F, # bits 8..10: minutes + (t & 0x1F) * 2, # bits 0..7: seconds / 2 + -1, -1, -1)) + +# Given a path to a .pyc file in the archive, return the +# modification time of the matching .py file and its size, +# or (0, 0) if no source is available. +def _get_mtime_and_size_of_source(self, path): + try: + # strip 'c' or 'o' from *.py[co] + assert path[-1:] in ('c', 'o') + path = path[:-1] + toc_entry = self._get_files()[path] + # fetch the time stamp of the .py file for comparison + # with an embedded pyc time stamp + time = toc_entry[5] + date = toc_entry[6] + uncompressed_size = toc_entry[3] + return _parse_dostime(date, time), uncompressed_size + except (KeyError, IndexError, TypeError): + return 0, 0 + + +# Given a path to a .pyc file in the archive, return the +# contents of the matching .py file, or None if no source +# is available. +def _get_pyc_source(self, path): + # strip 'c' or 'o' from *.py[co] + assert path[-1:] in ('c', 'o') + path = path[:-1] + + try: + toc_entry = self._get_files()[path] + except KeyError: + return None + else: + return _get_data(self.archive, toc_entry) + + +# Get the code object associated with the module specified by +# 'fullname'. +def _get_module_code(self, fullname): + path = _get_module_path(self, fullname) + import_error = None + for suffix, isbytecode, ispackage in _zip_searchorder: + fullpath = path + suffix + _bootstrap._verbose_message('trying {}{}{}', self.archive, path_sep, fullpath, verbosity=2) + try: + toc_entry = self._get_files()[fullpath] + except KeyError: + pass + else: + modpath = toc_entry[0] + data = _get_data(self.archive, toc_entry) + code = None + if isbytecode: + try: + code = _unmarshal_code(self, modpath, fullpath, fullname, data) + except ImportError as exc: + import_error = exc + else: + code = _compile_source(modpath, data) + if code is None: + # bad magic number or non-matching mtime + # in byte code, try next + continue + modpath = toc_entry[0] + return code, ispackage, modpath + else: + if import_error: + msg = f"module load failed: {import_error}" + raise ZipImportError(msg, name=fullname) from import_error + else: + raise ZipImportError(f"can't find module {fullname!r}", name=fullname) diff --git a/Python313_13_x86_Template/Lib/zoneinfo/__init__.py b/Python314_4_x86_Template/Lib/zoneinfo/__init__.py similarity index 100% rename from Python313_13_x86_Template/Lib/zoneinfo/__init__.py rename to Python314_4_x86_Template/Lib/zoneinfo/__init__.py diff --git a/Python313_13_x86_Template/Lib/zoneinfo/_common.py b/Python314_4_x86_Template/Lib/zoneinfo/_common.py similarity index 100% rename from Python313_13_x86_Template/Lib/zoneinfo/_common.py rename to Python314_4_x86_Template/Lib/zoneinfo/_common.py diff --git a/Python313_13_x86_Template/Lib/zoneinfo/_tzpath.py b/Python314_4_x86_Template/Lib/zoneinfo/_tzpath.py similarity index 100% rename from Python313_13_x86_Template/Lib/zoneinfo/_tzpath.py rename to Python314_4_x86_Template/Lib/zoneinfo/_tzpath.py diff --git a/Python313_13_x86_Template/Lib/zoneinfo/_zoneinfo.py b/Python314_4_x86_Template/Lib/zoneinfo/_zoneinfo.py similarity index 100% rename from Python313_13_x86_Template/Lib/zoneinfo/_zoneinfo.py rename to Python314_4_x86_Template/Lib/zoneinfo/_zoneinfo.py diff --git a/Python314_4_x86_Template/NEWS.txt b/Python314_4_x86_Template/NEWS.txt new file mode 100644 index 00000000..6b05d9f3 --- /dev/null +++ b/Python314_4_x86_Template/NEWS.txt @@ -0,0 +1,55267 @@ ++++++++++++ +Python News ++++++++++++ + +What's New in Python 3.14.4 final? +================================== + +*Release date: 2026-04-07* + +Security +-------- + +- gh-145986: :mod:`xml.parsers.expat`: Fixed a crash caused by unbounded C + recursion when converting deeply nested XML content models with + :meth:`~xml.parsers.expat.xmlparser.ElementDeclHandler`. This addresses + :cve:`2026-4224`. + +- gh-145599: Reject control characters in :class:`http.cookies.Morsel` + :meth:`~http.cookies.Morsel.update` and + :meth:`~http.cookies.BaseCookie.js_output`. This addresses + :cve:`2026-3644`. + +- gh-145506: Fixes :cve:`2026-2297` by ensuring that + ``SourcelessFileLoader`` uses :func:`io.open_code` when opening ``.pyc`` + files. + +- gh-144370: Disallow usage of control characters in status in + :mod:`wsgiref.handlers` to prevent HTTP header injections. Patch by + Benedikt Johannes. + +- gh-143930: Reject leading dashes in URLs passed to + :func:`webbrowser.open`. + +Core and Builtins +----------------- + +- gh-148157: Fix an unlikely crash when parsing an invalid type comments for + function parameters. Found by OSS Fuzz in :oss-fuzz:`492782951`. + +- gh-148144: Initialize ``_PyInterpreterFrame.visited`` when copying + interpreter frames so incremental GC does not read an uninitialized byte + from generator and frame-object copies. + +- gh-146615: Fix a crash in :meth:`~object.__get__` for + :c:expr:`METH_METHOD` descriptors when an invalid (non-type) object is + passed as the second argument. Patch by Steven Sun. + +- gh-146308: Fixed several error handling issues in the + :mod:`!_remote_debugging` module, including safer validation of remote + ``int`` objects, clearer asyncio task chain failures, and cache cleanup + fixes that avoid leaking or double-freeing metadata on allocation failure. + Patch by Pablo Galindo. + +- gh-146128: Fix a bug which could cause constant values to be partially + corrupted in AArch64 JIT code. This issue is theoretical, and hasn't + actually been observed in unmodified Python interpreters. + +- gh-146250: Fixed a memory leak in :exc:`SyntaxError` when re-initializing + it. + +- gh-146245: Fixed reference leaks in :mod:`socket` when audit hooks raise + exceptions in :func:`socket.getaddrinfo` and :meth:`!socket.sendto`. + +- gh-146196: Fix potential Undefined Behavior in + :c:func:`PyUnicodeWriter_WriteASCII` by adding a zero-length check. Patch + by Shamil Abdulaev. + +- gh-146227: Fix wrong type in ``_Py_atomic_load_uint16`` in the C11 atomics + backend (``pyatomic_std.h``), which used a 32-bit atomic load instead of + 16-bit. Found by Mohammed Zuhaib. + +- gh-146056: Fix :func:`repr` for lists and tuples containing ``NULL``\ s. + +- gh-146092: Handle properly memory allocation failures on str and float + opcodes. Patch by Victor Stinner. + +- gh-146041: Fix free-threading scaling bottleneck in :func:`sys.intern` and + :c:func:`PyObject_SetAttr` by avoiding the interpreter-wide lock when the + string is already interned and immortalized. + +- gh-145990: ``python --help-env`` sections are now sorted by environment + variable name. + +- gh-145990: ``python --help-xoptions`` is now sorted by ``-X`` option name. + +- gh-145376: Fix GC tracking in ``structseq.__replace__()``. + +- gh-145792: Fix out-of-bounds access when invoking faulthandler on a + CPython build compiled without support for VLAs. + +- gh-142183: Avoid a pathological case where repeated calls at a specific + stack depth could be significantly slower. + +- gh-145779: Improve scaling of :func:`classmethod` and :func:`staticmethod` + calls in the free-threaded build by avoiding the descriptor ``__get__`` + call. + +- gh-145783: Fix an unlikely crash in the parser when certain errors were + erroneously not propagated. Found by OSS Fuzz in :oss-fuzz:`491369109`. + +- gh-145685: Improve scaling of type attribute lookups in the + :term:`free-threaded build` by avoiding contention on the internal type + lock. + +- gh-145701: Fix :exc:`SystemError` when ``__classdict__`` or + ``__conditional_annotations__`` is in a class-scope inlined comprehension. + Found by OSS Fuzz in :oss-fuzz:`491105000`. + +- gh-145713: Make :meth:`bytearray.resize` thread-safe in the free-threaded + build by using a critical section and calling the lock-held variant of the + resize function. + +- gh-145615: Fixed a memory leak in the :term:`free-threaded build` where + mimalloc pages could become permanently unreclaimable until the owning + thread exited. + +- gh-145566: In the free threading build, skip the stop-the-world pause when + reassigning ``__class__`` on a newly created object. + +- gh-145335: Fix a crash in :func:`os.pathconf` when called with ``-1`` as + the path argument. + +- gh-145036: In free-threaded build, fix race condition when calling + :meth:`!__sizeof__` on a :class:`list` + +- gh-145376: Fix reference leaks in various unusual error scenarios. + +- gh-145234: Fixed a ``SystemError`` in the parser when an encoding cookie + (for example, UTF-7) decodes to carriage returns (``\r``). Newlines are + now normalized after decoding in the string tokenizer. + + Patch by Pablo Galindo. + +- gh-130555: Fix use-after-free in :meth:`dict.clear` when the dictionary + values are embedded in an object and a destructor causes re-entrant + mutation of the dictionary. + +- gh-145187: Fix compiler assertion fail when a type parameter bound + contains an invalid expression in a conditional block. + +- gh-145142: Fix a crash in the free-threaded build when the dictionary + argument to :meth:`str.maketrans` is concurrently modified. + +- gh-144872: Fix heap buffer overflow in the parser found by OSS-Fuzz. + +- gh-144766: Fix a crash in fork child process when perf support is enabled. + +- gh-144759: Fix undefined behavior in the lexer when ``start`` and + ``multi_line_start`` pointers are ``NULL`` in + ``_PyLexer_remember_fstring_buffers()`` and + ``_PyLexer_restore_fstring_buffers()``. The ``NULL`` pointer arithmetic + (``NULL - valid_pointer``) is now guarded with explicit ``NULL`` checks. + +- gh-144563: Fix interaction of the Tachyon profiler and :mod:`ctypes` and + other modules that load the Python shared library (if present) in an + independent map as this was causing the mechanism that loads the binary + information to be confused. Patch by Pablo Galindo + +- gh-144601: Fix crash when importing a module whose ``PyInit`` function + raises an exception from a subinterpreter. + +- gh-144438: Align the QSBR thread state array to a 64-byte cache line + boundary to avoid false sharing in the :term:`free-threaded build`. + +- gh-144513: Fix potential deadlock when using critical sections during + stop-the-world pauses in the free-threaded build. + +- gh-144446: Fix data races in the free-threaded build when reading frame + object attributes while another thread is executing the frame. + +- gh-143636: Fix a crash when calling :class:`SimpleNamespace.__replace__() + ` on non-namespace instances. Patch by Bénédikt + Tran. + +- gh-143650: Fix race condition in :mod:`importlib` where a thread could + receive a stale module reference when another thread's import fails. + +- gh-141732: Ensure the :meth:`~object.__repr__` for :exc:`ExceptionGroup` + and :exc:`BaseExceptionGroup` does not change when the exception sequence + that was original passed in to its constructor is subsequently mutated. + +- gh-140594: Fix an out of bounds read when a single NUL character is read + from the standard input. Patch by Shamil Abdulaev. + +- gh-91636: While performing garbage collection, clear weakrefs to + unreachable objects that are created during running of finalizers. If + those weakrefs were are not cleared, they could reveal unreachable + objects. + +- gh-130327: Fix erroneous clearing of an object's :attr:`~object.__dict__` + if overwritten at runtime. + +- gh-80667: Literals using the ``\N{name}`` escape syntax can now construct + CJK ideographs and Hangul syllables using case-insensitive names. + +Library +------- + +- gh-144503: Fix a regression introduced in 3.14.3 and 3.13.12 where the + :mod:`multiprocessing` ``forkserver`` start method would fail with + :exc:`BrokenPipeError` when the parent process had a very large + :data:`sys.argv`. The argv is now passed to the forkserver as separate + command-line arguments rather than being embedded in the ``-c`` command + string, avoiding the operating system's per-argument length limit. + +- gh-146613: :mod:`itertools`: Fix a crash in :func:`itertools.groupby` when + the grouper iterator is concurrently mutated. + +- gh-146080: :mod:`ssl`: fix a crash when an SNI callback tries to use an + SSL object that has already been garbage-collected. Patch by Bénédikt + Tran. + +- gh-146556: Fix :func:`annotationlib.get_annotations` hanging indefinitely + when called with ``eval_str=True`` on a callable that has a circular + ``__wrapped__`` chain (e.g. ``f.__wrapped__ = f``). Cycle detection using + an id-based visited set now stops the traversal and falls back to the + globals found so far, mirroring the approach of :func:`inspect.unwrap`. + +- gh-146090: :mod:`sqlite3`: fix a crash when + :meth:`sqlite3.Connection.create_collation` fails with `SQLITE_BUSY + `__. Patch by Bénédikt Tran. + +- gh-146090: :mod:`sqlite3`: properly raise :exc:`MemoryError` instead of + :exc:`SystemError` when a context callback fails to be allocated. Patch by + Bénédikt Tran. + +- gh-145633: Fix ``struct.pack('f', float)``: use :c:func:`PyFloat_Pack4` to + raise :exc:`OverflowError`. Patch by Sergey B Kirpichev and Victor + Stinner. + +- gh-146310: The :mod:`ensurepip` module no longer looks for ``pip-*.whl`` + wheel packages in the current directory. + +- gh-146083: Update bundled `libexpat `_ to + version 2.7.5. + +- gh-146076: :mod:`zoneinfo`: fix crashes when deleting ``_weak_cache`` from + a :class:`zoneinfo.ZoneInfo` subclass. + +- gh-146054: Limit the size of :func:`encodings.search_function` cache. + Found by OSS Fuzz in :oss-fuzz:`493449985`. + +- gh-146004: All :option:`-X` options from the Python command line are now + propagated to child processes spawned by :mod:`multiprocessing`, not just + a hard-coded subset. This makes the behavior consistent between default + "spawn" and "forkserver" start methods and the old "fork" start method. + The options that were previously not propagated are: + ``context_aware_warnings``, ``cpu_count``, ``disable-remote-debug``, + ``int_max_str_digits``, ``lazy_imports``, ``no_debug_ranges``, + ``pathconfig_warnings``, ``perf``, ``perf_jit``, ``presite``, + ``pycache_prefix``, ``thread_inherit_context``, and + ``warn_default_encoding``. + +- gh-145883: :mod:`zoneinfo`: Fix heap buffer overflow reads from malformed + TZif data. Found by OSS Fuzz, issues :oss-fuzz:`492245058` and + :oss-fuzz:`492230068`. + +- gh-145754: Request signature during mock autospec with ``FORWARDREF`` + annotation format. This prevents runtime errors when an annotation uses a + name that is not defined at runtime. + +- gh-145750: Avoid undefined behaviour from signed integer overflow when + parsing format strings in the :mod:`struct` module. Found by OSS Fuzz in + :oss-fuzz:`488466741`. + +- gh-145492: Fix infinite recursion in :class:`collections.defaultdict` + ``__repr__`` when a ``defaultdict`` contains itself. Based on analysis by + KowalskiThomas in :gh:`145492`. + +- gh-145623: Fix crash in :mod:`struct` when calling :func:`repr` or + ``__sizeof__()`` on an uninitialized :class:`struct.Struct` object created + via ``Struct.__new__()`` without calling ``__init__()``. + +- gh-145616: Detect Android sysconfig ABI correctly on 32-bit ARM Android on + 64-bit ARM kernel + +- gh-145551: Fix InvalidStateError when cancelling process created by + :func:`asyncio.create_subprocess_exec` or + :func:`asyncio.create_subprocess_shell`. Patch by Daan De Meyer. + +- gh-145446: Now :mod:`functools` is safer in free-threaded build when using + keywords in :func:`functools.partial` + +- gh-145417: :mod:`venv`: Prevent incorrect preservation of SELinux context + when copying the ``Activate.ps1`` script. The script inherited the SELinux + security context of the system template directory, rather than the + destination project directory. + +- gh-145376: Fix double free and null pointer dereference in unusual error + scenarios in :mod:`hashlib` and :mod:`hmac` modules. + +- gh-145301: :mod:`hmac`: fix a crash when the initialization of the + underlying C extension module fails. + +- gh-145301: :mod:`hashlib`: fix a crash when the initialization of the + underlying C extension module fails. + +- gh-145264: Base64 decoder (see :func:`binascii.a2b_base64`, + :func:`base64.b64decode`, etc) no longer ignores excess data after the + first padded quad in non-strict (default) mode. Instead, in conformance + with :rfc:`4648`, section 3.3, it now ignores the pad character, "=", if + it is present before the end of the encoded data. + +- gh-145158: Avoid undefined behaviour from signed integer overflow when + parsing format strings in the :mod:`struct` module. + +- gh-144984: Fix crash in + :meth:`xml.parsers.expat.xmlparser.ExternalEntityParserCreate` when an + allocation fails. The error paths could dereference NULL ``handlers`` and + double-decrement the parent parser's reference count. + +- gh-88091: Fix :func:`unicodedata.decomposition` for Hangul characters. + +- gh-144986: Fix a memory leak in :func:`atexit.register`. Patch by Shamil + Abdulaev. + +- gh-144777: Fix data races in :class:`io.IncrementalNewlineDecoder` in the + :term:`free-threaded build`. + +- gh-144809: Make :class:`collections.deque` copy atomic in the + :term:`free-threaded build`. + +- gh-144835: Added missing explanations for some parameters in + :func:`glob.glob` and :func:`glob.iglob`. + +- gh-144833: Fixed a use-after-free in :mod:`ssl` when ``SSL_new()`` returns + NULL in ``newPySSLSocket()``. The error was reported via a dangling + pointer after the object had already been freed. + +- gh-144782: Fix :class:`argparse.ArgumentParser` to be :mod:`pickleable + `. + +- gh-144259: Fix inconsistent display of long multiline pasted content in + the REPL. + +- gh-144156: Fix the folding of headers by the :mod:`email` library when + :rfc:`2047` encoded words are used. Now whitespace is correctly preserved + and also correctly added between adjacent encoded words. The latter + property was broken by the fix for gh-92081, which mostly fixed previous + failures to preserve whitespace. + +- gh-66305: Fixed a hang on Windows in the :mod:`tempfile` module when + trying to create a temporary file or subdirectory in a non-writable + directory. + +- gh-140814: :func:`multiprocessing.freeze_support` no longer sets the + default start method as a side effect, which previously caused a + subsequent :func:`multiprocessing.set_start_method` call to raise + :exc:`RuntimeError`. + +- gh-144475: Calling :func:`repr` on :func:`functools.partial` is now safer + when the partial object's internal attributes are replaced while the + string representation is being generated. + +- gh-144538: Bump the version of pip bundled in ensurepip to version 26.0.1 + +- gh-144494: Fix performance regression in :func:`asyncio.all_tasks` on + :term:`free-threaded builds `. Patch by Kumar Aditya. + +- gh-144316: Fix crash in ``_remote_debugging`` that caused + ``test_external_inspection`` to intermittently fail. Patch by Taegyun Kim. + +- gh-144363: Update bundled `libexpat `_ to + 2.7.4 + +- gh-143637: Fixed a crash in socket.sendmsg() that could occur if ancillary + data is mutated re-entrantly during argument parsing. + +- gh-143543: Fix a crash in itertools.groupby that could occur when a + user-defined :meth:`~object.__eq__` method re-enters the iterator during + key comparison. + +- gh-140652: Fix a crash in :func:`!_interpchannels.list_all` after closing + a channel. + +- gh-143698: Allow *scheduler* and *setpgroup* arguments to be explicitly + :const:`None` when calling :func:`os.posix_spawn` or + :func:`os.posix_spawnp`. Patch by Bénédikt Tran. + +- gh-143698: Raise :exc:`TypeError` instead of :exc:`SystemError` when the + *scheduler* in :func:`os.posix_spawn` or :func:`os.posix_spawnp` is not a + tuple. Patch by Bénédikt Tran. + +- gh-142516: :mod:`ssl`: fix reference leaks in :class:`ssl.SSLContext` + objects. Patch by Bénédikt Tran. + +- gh-143304: Fix :class:`ctypes.CDLL` to honor the ``handle`` parameter on + POSIX systems. + +- gh-142781: :mod:`zoneinfo`: fix a crash when instantiating + :class:`~zoneinfo.ZoneInfo` objects for which the internal class-level + cache is inconsistent. + +- gh-142763: Fix a race condition between :class:`zoneinfo.ZoneInfo` + creation and :func:`zoneinfo.ZoneInfo.clear_cache` that could raise + :exc:`KeyError`. + +- gh-142787: Fix assertion failure in :mod:`sqlite3` blob subscript when + slicing with indices that result in an empty slice. + +- gh-142352: Fix :meth:`asyncio.StreamWriter.start_tls` to transfer buffered + data from :class:`~asyncio.StreamReader` to the SSL layer, preventing data + loss when upgrading a connection to TLS mid-stream (e.g., when + implementing PROXY protocol support). + +- gh-141707: Don't change :class:`tarfile.TarInfo` type from ``AREGTYPE`` to + ``DIRTYPE`` when parsing GNU long name or link headers. + +- gh-139933: Improve :exc:`AttributeError` suggestions for classes with a + custom :meth:`~object.__dir__` method returning a list of unsortable + values. Patch by Bénédikt Tran. + +- gh-137335: Get rid of any possibility of a name conflict for named pipes + in :mod:`multiprocessing` and :mod:`asyncio` on Windows, no matter how + small. + +- gh-80667: Support lookup for Tangut Ideographs in :mod:`unicodedata`. + +- bpo-40243: Fix :meth:`!unicodedata.ucd_3_2_0.numeric` for non-decimal + values. + +Documentation +------------- + +- gh-126676: Expand :mod:`argparse` documentation for ``type=bool`` with a + demonstration of the surprising behavior and pointers to common + alternatives. + +- gh-145649: Fix text wrapping and formatting of ``-X`` option descriptions + in the :manpage:`python(1)` man page by using proper roff markup. + +- gh-145450: Document missing public :class:`wave.Wave_write` getter + methods. + +- gh-136246: A new "Improve this page" link is available in the left-hand + sidebar of the docs, offering links to create GitHub issues, discussion + forum posts, or pull requests. + +Tests +----- + +- gh-144418: The Android testbed's emulator RAM has been increased from 2 GB + to 4 GB. + +- gh-146202: Fix a race condition in regrtest: make sure that the temporary + directory is created in the worker process. Previously, temp_cwd() could + fail on Windows if the "build" directory was not created. Patch by Victor + Stinner. + +- gh-144739: When Python was compiled with system expat older then 2.7.2 but + tests run with newer expat, still skip + :class:`!test.test_pyexpat.MemoryProtectionTest`. + +Build +----- + +- gh-146541: The Android testbed can now be built for 32-bit ARM and x86 + targets. + +- gh-146498: The iOS XCframework build script now ensures libpython isn't + included in installed app content, and is more robust in identifying + standard library binary content that requires processing. + +- gh-146450: The Android build script was modified to improve parity with + other platform build scripts. + +- gh-146446: The clean target for the Apple/iOS XCframework build script is + now more selective when targeting a single architecture. + +- gh-145801: When Python build is optimized with GCC using PGO, use + ``-fprofile-update=atomic`` option to use atomic operations when updating + profile information. This option reduces the risk of gcov Data Files + (.gcda) corruption which can cause random GCC crashes. Patch by Victor + Stinner. + +Windows +------- + +- gh-145307: Defers loading of the ``psapi.dll`` module until it is used by + :func:`ctypes.util.dllist`. + +- gh-144551: Updated bundled version of OpenSSL to 3.0.19. + +- gh-140131: Fix REPL cursor position on Windows when module completion + suggestion line hits console width. + +macOS +----- + +- gh-144551: Update macOS installer to use OpenSSL 3.0.19. + +- gh-137586: Invoke :program:`osascript` with absolute path in + :mod:`webbrowser` and :mod:`!turtledemo`. + +C API +----- + +- gh-146056: :c:func:`PyUnicodeWriter_WriteRepr` now supports ``NULL`` + argument. + +- gh-145010: Use GCC dialect alternatives for inline assembly in + ``object.h`` so that the Python headers compile correctly with + ``-masm=intel``. + +- gh-144981: Made :c:func:`PyUnstable_Code_SetExtra`, + :c:func:`PyUnstable_Code_GetExtra`, and + :c:func:`PyUnstable_Eval_RequestCodeExtraIndex` thread-safe on the + :term:`free threaded ` build. + + +What's New in Python 3.14.3 final? +================================== + +*Release date: 2026-02-03* + +Windows +------- + +- gh-128067: Fix a bug in PyREPL on Windows where output without a trailing + newline was overwritten by the next prompt. + +Tools/Demos +----------- + +- gh-142095: Make gdb 'py-bt' command use frame from thread local state when + available. Patch by Sam Gross and Victor Stinner. + +Tests +----- + +- gh-144415: The Android testbed now distinguishes between stdout/stderr + messages which were triggered by a newline, and those triggered by a + manual call to ``flush``. This fixes logging of progress indicators and + similar content. + +- gh-143460: Skip tests relying on infinite recusion if stack size is + unlimited. + +- gh-65784: Add support for parametrized resource ``wantobjects`` in + regrtests, which allows to run Tkinter tests with the specified value of + :data:`!tkinter.wantobjects`, for example ``-u wantobjects=0``. + +- gh-143553: Add support for parametrized resources, such as ``-u + xpickle=2.7``. + +- gh-142836: Accommodated Solaris in + ``test_pdb.test_script_target_anonymous_pipe``. + +- bpo-31391: Forward-port test_xpickle from Python 2 to Python 3 and add the + resource back to test's command line. + +Security +-------- + +- gh-144125: :mod:`~email.generator.BytesGenerator` will now refuse to + serialize (write) headers that are unsafely folded or delimited; see + :attr:`~email.policy.Policy.verify_generated_headers`. (Contributed by Bas + Bloemsaat and Petr Viktorin in :gh:`121650`). + +- gh-143935: Fixed a bug in the folding of comments when flattening an email + message using a modern email policy. Comments consisting of a very long + sequence of non-foldable characters could trigger a forced line wrap that + omitted the required leading space on the continuation line, causing the + remainder of the comment to be interpreted as a new header field. This + enabled header injection with carefully crafted inputs. + +- gh-143925: Reject control characters in ``data:`` URL media types. + +- gh-143919: Reject control characters in :class:`http.cookies.Morsel` + fields and values. + +- gh-143916: Reject C0 control characters within wsgiref.headers.Headers + fields, values, and parameters. + +Library +------- + +- gh-144380: Improve performance of :class:`io.BufferedReader` line + iteration by ~49%. + +- gh-144169: Fix three crashes when non-string keyword arguments are + supplied to objects in the :mod:`ast` module. + +- gh-144100: Fixed a crash in ctypes when using a deprecated + ``POINTER(str)`` type in ``argtypes``. Instead of aborting, ctypes now + raises a proper Python exception when the pointer target type is + unresolved. + +- gh-144050: Fix :func:`stat.filemode` in the pure-Python implementation to + avoid misclassifying invalid mode values as block devices. + +- gh-144023: Fixed validation of file descriptor 0 in posix functions when + used with follow_symlinks parameter. + +- gh-143999: Fix an issue where :func:`inspect.getgeneratorstate` and + :func:`inspect.getcoroutinestate` could fail for generators wrapped by + :func:`types.coroutine` in the suspended state. + +- gh-143831: :class:`annotationlib.ForwardRef` objects are now hashable when + created from annotation scopes with closures. Previously, hashing such + objects would throw an exception. Patch by Bartosz Sławecki. + +- gh-143874: Fixed a bug in :mod:`pdb` where expression results were not + sent back to remote client. + +- gh-143880: Fix data race in :func:`functools.partial` in the :term:`free + threading` build. + +- gh-143706: Fix :mod:`multiprocessing` forkserver so that :data:`sys.argv` + is correctly set before ``__main__`` is preloaded. Previously, + :data:`sys.argv` was empty during main module import in forkserver child + processes. This fixes a regression introduced in 3.13.8 and 3.14.1. Root + caused by Aaron Wieczorek, test provided by Thomas Watson, thanks! + +- gh-143638: Forbid reentrant calls of the :class:`pickle.Pickler` and + :class:`pickle.Unpickler` methods for the C implementation. Previously, + this could cause crash or data corruption, now concurrent calls of methods + of the same object raise :exc:`RuntimeError`. + +- gh-78724: Raise :exc:`RuntimeError`'s when user attempts to call methods + on half-initialized :class:`~struct.Struct` objects, For example, created + by ``Struct.__new__(Struct)``. Patch by Sergey B Kirpichev. + +- gh-143196: Fix crash when the internal encoder object returned by + undocumented function :func:`!json.encoder.c_make_encoder` was called with + non-zero second (*_current_indent_level*) argument. + +- gh-143191: :func:`_thread.stack_size` now raises :exc:`ValueError` if the + stack size is too small. Patch by Victor Stinner. + +- gh-143602: Fix a inconsistency issue in :meth:`~io.RawIOBase.write` that + leads to unexpected buffer overwrite by deduplicating the buffer exports. + +- gh-143547: Fix :func:`sys.unraisablehook` when the hook raises an + exception and changes :func:`sys.unraisablehook`: hold a strong reference + to the old hook. Patch by Victor Stinner. + +- gh-143517: :func:`annotationlib.get_annotations` no longer raises a + :exc:`SyntaxError` when evaluating a stringified starred annotation that + starts with one or more whitespace characters followed by a ``*``. Patch + by Bartosz Sławecki. + +- gh-143378: Fix use-after-free crashes when a :class:`~io.BytesIO` object + is concurrently mutated during :meth:`~io.RawIOBase.write` or + :meth:`~io.IOBase.writelines`. + +- gh-143346: Fix incorrect wrapping of the Base64 data in + :class:`!plistlib._PlistWriter` when the indent contains a mix of tabs and + spaces. + +- gh-143310: :mod:`tkinter`: fix a crash when a Python :class:`list` is + mutated during the conversion to a Tcl object (e.g., when setting a Tcl + variable). Patch by Bénédikt Tran. + +- gh-143309: Fix a crash in :func:`os.execve` on non-Windows platforms when + given a custom environment mapping which is then mutated during parsing. + Patch by Bénédikt Tran. + +- gh-143308: :mod:`pickle`: fix use-after-free crashes when a + :class:`~pickle.PickleBuffer` is concurrently mutated by a custom buffer + callback during pickling. Patch by Bénédikt Tran and Aaron Wieczorek. + +- gh-143237: Fix support of named pipes in the rotating :mod:`logging` + handlers. + +- gh-143249: Fix possible buffer leaks in Windows overlapped I/O on error + handling. + +- gh-143241: :mod:`zoneinfo`: fix infinite loop in :meth:`ZoneInfo.from_file + ` when parsing a malformed TZif file. Patch + by Fatih Celik. + +- gh-142830: :mod:`sqlite3`: fix use-after-free crashes when the + connection's callbacks are mutated during a callback execution. Patch by + Bénédikt Tran. + +- gh-143200: :mod:`xml.etree.ElementTree`: fix use-after-free crashes in + :meth:`~object.__getitem__` and :meth:`~object.__setitem__` methods of + :class:`~xml.etree.ElementTree.Element` when the element is concurrently + mutated. Patch by Bénédikt Tran. + +- gh-142195: Updated timeout evaluation logic in :mod:`subprocess` to be + compatible with deterministic environments like Shadow where time moves + exactly as requested. + +- gh-142164: Fix the ctypes bitfield overflow error message to report the + correct offset and size calculation. + +- gh-143145: Fixed a possible reference leak in ctypes when constructing + results with multiple output parameters on error. + +- gh-122431: Corrected the error message in + :func:`readline.append_history_file` to state that ``nelements`` must be + non-negative instead of positive. + +- gh-143004: Fix a potential use-after-free in + :meth:`collections.Counter.update` when user code mutates the Counter + during an update. + +- gh-143046: The :mod:`asyncio` REPL no longer prints copyright and version + messages in the quiet mode (:option:`-q`). Patch by Bartosz Sławecki. + +- gh-140648: The :mod:`asyncio` REPL now respects the :option:`-I` flag + (isolated mode). Previously, it would load and execute + :envvar:`PYTHONSTARTUP` even if the flag was set. Contributed by Bartosz + Sławecki. + +- gh-142991: Fixed socket operations such as recvfrom() and sendto() for + FreeBSD divert(4) socket. + +- gh-143010: Fixed a bug in :mod:`mailbox` where the precise timing of an + external event could result in the library opening an existing file + instead of a file it expected to create. + +- gh-142881: Fix concurrent and reentrant call of :func:`atexit.unregister`. + +- gh-112127: Fix possible use-after-free in :func:`atexit.unregister` when + the callback is unregistered during comparison. + +- gh-142783: Fix zoneinfo use-after-free with descriptor _weak_cache. a + descriptor as _weak_cache could cause crashes during object creation. The + fix ensures proper reference counting for descriptor-provided objects. + +- gh-142754: Add the *ownerDocument* attribute to :mod:`xml.dom.minidom` + elements and attributes created by directly instantiating the ``Element`` + or ``Attr`` class. Note that this way of creating nodes is not supported; + creator functions like :py:meth:`xml.dom.Document.documentElement` should + be used instead. + +- gh-142784: The :mod:`asyncio` REPL now properly closes the loop upon the + end of interactive session. Previously, it could cause surprising + warnings. Contributed by Bartosz Sławecki. + +- gh-142555: :mod:`array`: fix a crash in ``a[i] = v`` when converting *i* + to an index via :meth:`i.__index__ ` or + :meth:`i.__float__ ` mutates the array. + +- gh-142594: Fix crash in ``TextIOWrapper.close()`` when the underlying + buffer's ``closed`` property calls :meth:`~io.TextIOBase.detach`. + +- gh-142451: :mod:`hmac`: Ensure that the :attr:`HMAC.block_size + ` attribute is correctly copied by :meth:`HMAC.copy + `. Patch by Bénédikt Tran. + +- gh-142495: :class:`collections.defaultdict` now prioritizes + :meth:`~object.__setitem__` when inserting default values from + ``default_factory``. This prevents race conditions where a default value + would overwrite a value set before ``default_factory`` returns. + +- gh-142651: :mod:`unittest.mock`: fix a thread safety issue where + :attr:`Mock.call_count ` may return + inaccurate values when the mock is called concurrently from multiple + threads. + +- gh-142595: Added type check during initialization of the :mod:`decimal` + module to prevent a crash in case of broken stdlib. Patch by Sergey B + Kirpichev. + +- gh-142556: Fix crash when a task gets re-registered during finalization in + :mod:`asyncio`. Patch by Kumar Aditya. + +- gh-123241: Avoid reference count operations in garbage collection of + :mod:`ctypes` objects. + +- gh-142517: The non-``compat32`` :mod:`email` policies now correctly handle + refolding encoded words that contain bytes that can not be decoded in + their specified character set. Previously this resulted in an encoding + exception during folding. + +- gh-112527: The help text for required options in :mod:`argparse` no longer + extended with " (default: None)". + +- gh-142346: Fix usage formatting for mutually exclusive groups in + :mod:`argparse` when they are preceded by positional arguments or followed + or intermixed with other optional arguments. + +- gh-142315: Pdb can now run scripts from anonymous pipes used in process + substitution. Patch by Bartosz Sławecki. + +- gh-142332: Fix usage formatting for positional arguments in mutually + exclusive groups in :mod:`argparse`. in :mod:`argparse`. + +- gh-142282: Fix :func:`winreg.QueryValueEx` to not accidentally read + garbage buffer under race condition. + +- gh-75949: Fix :mod:`argparse` to preserve ``|`` separators in mutually + exclusive groups when the usage line wraps due to length. + +- gh-142267: Improve :mod:`argparse` performance by caching the formatter + used for argument validation. + +- gh-68552: ``MisplacedEnvelopeHeaderDefect`` and ``Missing header name`` + defects are now correctly passed to the ``handle_defect`` method of + ``policy`` in :class:`~email.parser.FeedParser`. + +- gh-142006: Fix a bug in the :mod:`email.policy.default` folding algorithm + which incorrectly resulted in a doubled newline when a line ending at + exactly max_line_length was followed by an unfoldable token. + +- gh-105836: Fix :meth:`asyncio.run_coroutine_threadsafe` leaving underlying + cancelled asyncio task running. + +- gh-139971: :mod:`pydoc`: Ensure that the link to the online documentation + of a :term:`stdlib` module is correct. + +- gh-139262: Some keystrokes can be swallowed in the new ``PyREPL`` on + Windows, especially when used together with the ALT key. Fix by Chris + Eibl. + +- gh-138897: Improved :data:`license`/:data:`copyright`/:data:`credits` + display in the :term:`REPL`: now uses a pager. + +- gh-79986: Add parsing for ``References`` and ``In-Reply-To`` headers to + the :mod:`email` library that parses the header content as lists of + message id tokens. This prevents them from being folded incorrectly. + +- gh-136282: Add support for :const:`~configparser.UNNAMED_SECTION` when + creating a section via the mapping protocol access + +- gh-109263: Starting a process from spawn context in :mod:`multiprocessing` + no longer sets the start method globally. + +- gh-133253: Fix thread-safety issues in :mod:`linecache`. + +- gh-132715: Skip writing objects during marshalling once a failure has + occurred. + +IDLE +---- + +- gh-143774: Better explain the operation of Format / Format Paragraph. + +Documentation +------------- + +- gh-140806: Add documentation for :func:`enum.bin`. + +Core and Builtins +----------------- + +- gh-144307: Prevent a reference leak in module teardown at interpreter + finalization. + +- gh-144194: Fix error handling in perf jitdump initialization on memory + allocation failure. + +- gh-144012: Check if the result is ``NULL`` in ``BINARY_OP_EXTENT`` opcode. + +- gh-141805: Fix crash in :class:`set` when objects with the same hash are + concurrently added to the set after removing an element with the same hash + while the set still contains elements with the same hash. + +- gh-143670: Fixes a crash in ``ga_repr_items_list`` function. + +- gh-143377: Fix a crash in :func:`!_interpreters.capture_exception` when + the exception is incorrectly formatted. Patch by Bénédikt Tran. + +- gh-136924: The interactive help mode in the :term:`REPL` no longer + incorrectly syntax highlights text input as Python code. Contributed by + Olga Matoula. + +- gh-143189: Fix crash when inserting a non-:class:`str` key into a split + table dictionary when the key matches an existing key in the split table + but has no corresponding value in the dict. + +- gh-143228: Fix use-after-free in perf trampoline when toggling profiling + while threads are running or during interpreter finalization with daemon + threads active. The fix uses reference counting to ensure trampolines are + not freed while any code object could still reference them. Pach by Pablo + Galindo + +- gh-142664: Fix a use-after-free crash in :meth:`memoryview.__hash__ + ` when the ``__hash__`` method of the referenced object + mutates that object or the view. Patch by Bénédikt Tran. + +- gh-142557: Fix a use-after-free crash in :ref:`bytearray.__mod__ + ` when the :class:`!bytearray` is mutated while + formatting the ``%``-style arguments. Patch by Bénédikt Tran. + +- gh-143195: Fix use-after-free crashes in :meth:`bytearray.hex` and + :meth:`memoryview.hex` when the separator's :meth:`~object.__len__` + mutates the original object. Patch by Bénédikt Tran. + +- gh-142975: Fix crash after unfreezing all objects tracked by the garbage + collector on the :term:`free threaded ` build. + +- gh-143135: Set :data:`sys.flags.inspect` to ``1`` when + :envvar:`PYTHONINSPECT` is ``0``. Previously, it was set to ``0`` in this + case. + +- gh-143003: Fix an overflow of the shared empty buffer in + :meth:`bytearray.extend` when ``__length_hint__()`` returns 0 for + non-empty iterator. + +- gh-143006: Fix a possible assertion error when comparing negative + non-integer ``float`` and ``int`` with the same number of bits in the + integer part. + +- gh-143057: Avoid locking in :c:func:`PyTraceMalloc_Track` and + :c:func:`PyTraceMalloc_Untrack` when :mod:`tracemalloc` is not enabled. + +- gh-142776: Fix a file descriptor leak in import.c + +- gh-142829: Fix a use-after-free crash in :class:`contextvars.Context` + comparison when a custom ``__eq__`` method modifies the context via + :meth:`~contextvars.ContextVar.set`. + +- gh-142766: Clear the frame of a generator when :meth:`generator.close` is + called. + +- gh-142737: Tracebacks will be displayed in fallback mode even if + :func:`io.open` is lost. Previously, this would crash the interpreter. + Patch by Bartosz Sławecki. + +- gh-142554: Fix a crash in :func:`divmod` when :func:`!_pylong.int_divmod` + does not return a tuple of length two exactly. Patch by Bénédikt Tran. + +- gh-142560: Fix use-after-free in :class:`bytearray` search-like methods + (:meth:`~bytearray.find`, :meth:`~bytearray.count`, + :meth:`~bytearray.index`, :meth:`~bytearray.rindex`, and + :meth:`~bytearray.rfind`) by marking the storage as exported which causes + reallocation attempts to raise :exc:`BufferError`. For + :func:`~operator.contains`, :meth:`~bytearray.split`, and + :meth:`~bytearray.rsplit` the :ref:`buffer protocol ` is + used for this. + +- gh-142531: Fix a free-threaded GC performance regression. If there are + many untracked tuples, the GC will run too often, resulting in poor + performance. The fix is to include untracked tuples in the "long lived" + object count. The number of frozen objects is also now included since the + free-threaded GC must scan those too. + +- gh-142402: Fix reference counting when adjacent literal parts are merged + while constructing :class:`string.templatelib.Template`, preventing the + displaced string object from leaking. + +- gh-133932: Fix crash in the free threading build when clearing frames that + hold tagged integers. + +- gh-142343: Fix SIGILL crash on m68k due to incorrect assembly constraint. + +- gh-100964: Fix reference cycle in exhausted generator frames. Patch by + Savannah Ostrowski. + +- gh-69605: Fix edge-cases around already imported modules in the + :term:`REPL` auto-completion of imports. + +- gh-138568: Adjusted the built-in :func:`help` function so that empty + inputs are ignored in interactive mode. + +- gh-137007: Fix a bug during JIT compilation failure which caused garbage + collection debug assertions to fail. + +C API +----- + +- gh-142589: Fix :c:func:`PyUnstable_Object_IsUniqueReferencedTemporary()` + handling of tagged ints on the interpreter stack. + +- gh-142571: :c:func:`!PyUnstable_CopyPerfMapFile` now checks that opening + the file succeeded before flushing. + +Build +----- + +- gh-142454: When calculating the digest of the JIT stencils input, sort the + hashed files by filenames before adding their content to the hasher. This + ensures deterministic hash input and hence deterministic hash, independent + on filesystem order. + +- gh-141808: When running ``make clean-retain-profile``, keep the generated + JIT stencils. That way, the stencils are not generated twice when + Profile-guided optimization (PGO) is used. It also allows distributors to + supply their own pre-built JIT stencils. + +- gh-138061: Ensure reproducible builds by making JIT stencil header + generation deterministic. + + +What's New in Python 3.14.2 final? +================================== + +*Release date: 2025-12-05* + +Security +-------- + +- gh-142145: Remove quadratic behavior in ``xml.minidom`` node ID cache + clearing. + +- gh-119452: Fix a potential memory denial of service in the + :mod:`http.server` module. When a malicious user is connected to the CGI + server on Windows, it could cause an arbitrary amount of memory to be + allocated. This could have led to symptoms including a :exc:`MemoryError`, + swapping, out of memory (OOM) killed processes or containers, or even + system crashes. + +Library +------- + +- gh-140797: Revert changes to the undocumented :class:`!re.Scanner` class. + Capturing groups are still allowed for backward compatibility, although + using them can lead to incorrect result. They will be forbidden in future + Python versions. + +- gh-142206: The resource tracker in the :mod:`multiprocessing` module now + uses the original communication protocol, as in Python 3.14.0 and below, + by default. This avoids issues with upgrading Python while it is running. + (Note that such 'in-place' upgrades are not tested.) The tracker remains + compatible with subprocesses that use new protocol (that is, subprocesses + using Python 3.13.10, 3.14.1 and 3.15). + +- gh-142214: Fix two regressions in :mod:`dataclasses` in Python 3.14.1 + related to annotations. + + * An exception is no longer raised if ``slots=True`` is used and the + ``__init__`` method does not have an ``__annotate__`` attribute + (likely because ``init=False`` was used). + + * An exception is no longer raised if annotations are requested on the + ``__init__`` method and one of the fields is not present in the class + annotations. This can occur in certain dynamic scenarios. + + Patch by Jelle Zijlstra. + +Core and Builtins +----------------- + +- gh-142218: Fix crash when inserting into a split table dictionary with a + non :class:`str` key that matches an existing key. + +Library +------- + +- gh-116738: Fix :mod:`cmath` data race when initializing trigonometric + tables with subinterpreters. + + +What's New in Python 3.14.1 final? +================================== + +*Release date: 2025-12-02* + +Windows +------- + +- gh-139810: Installing with ``py install 3[.x]-dev`` will now select final + versions as well as prereleases. + +Tools/Demos +----------- + +- gh-141692: Each slice of an iOS XCframework now contains a ``lib`` folder + that contains a symlink to the libpython dylib. This allows binary modules + to be compiled for iOS using dynamic libreary linking, rather than + Framework linking. + +- gh-141442: The iOS testbed now correctly handles test arguments that + contain spaces. + +- gh-140702: The iOS testbed app will now expose the ``GITHUB_ACTIONS`` + environment variable to iOS apps being tested. + +- gh-137484: Have ``Tools/wasm/wasi`` put the build Python into a directory + named after the build triple instead of "build". + +- gh-137248: Add a ``--logdir`` option to ``Tools/wasm/wasi`` for specifying + where to write log files. + +- gh-137243: Have Tools/wasm/wasi detect a WASI SDK install in /opt when it + was directly extracted from a release tarball. + +Tests +----- + +- gh-140482: Preserve and restore the state of ``stty echo`` as part of the + test environment. + +- gh-140082: Update ``python -m test`` to set ``FORCE_COLOR=1`` when being + run with color enabled so that :mod:`unittest` which is run by it with + redirected output will output in color. + +- gh-139208: Fix regrtest ``--fast-ci --verbose``: don't ignore the + ``--verbose`` option anymore. Patch by Victor Stinner. + +- gh-136442: Use exitcode ``1`` instead of ``5`` if + :func:`unittest.TestCase.setUpClass` raises an exception + +Security +-------- + +- gh-139700: Check consistency of the zip64 end of central directory record. + Support records with "zip64 extensible data" if there are no bytes + prepended to the ZIP file. + +- gh-139283: :mod:`sqlite3`: correctly handle maximum number of rows to + fetch in :meth:`Cursor.fetchmany ` and reject + negative values for :attr:`Cursor.arraysize `. + Patch by Bénédikt Tran. + +- gh-137836: Add support of the "plaintext" element, RAWTEXT elements "xmp", + "iframe", "noembed" and "noframes", and optionally RAWTEXT element + "noscript" in :class:`html.parser.HTMLParser`. + +- gh-136063: :mod:`email.message`: ensure linear complexity for legacy HTTP + parameters parsing. Patch by Bénédikt Tran. + +- gh-136065: Fix quadratic complexity in :func:`os.path.expandvars`. + +- gh-119451: Fix a potential memory denial of service in the + :mod:`http.client` module. When connecting to a malicious server, it could + cause an arbitrary amount of memory to be allocated. This could have led + to symptoms including a :exc:`MemoryError`, swapping, out of memory (OOM) + killed processes or containers, or even system crashes. + +- gh-119342: Fix a potential memory denial of service in the :mod:`plistlib` + module. When reading a Plist file received from untrusted source, it could + cause an arbitrary amount of memory to be allocated. This could have led + to symptoms including a :exc:`MemoryError`, swapping, out of memory (OOM) + killed processes or containers, or even system crashes. + +Library +------- + +- gh-74389: When the stdin being used by a :class:`subprocess.Popen` + instance is closed, this is now ignored in + :meth:`subprocess.Popen.communicate` instead of leaving the class in an + inconsistent state. + +- gh-87512: Fix :func:`subprocess.Popen.communicate` timeout handling on + Windows when writing large input. Previously, the timeout was ignored + during stdin writing, causing the method to block indefinitely if the + child process did not consume input quickly. The stdin write is now + performed in a background thread, allowing the timeout to be properly + enforced. + +- gh-141473: When :meth:`subprocess.Popen.communicate` was called with + *input* and a *timeout* and is called for a second time after a + :exc:`~subprocess.TimeoutExpired` exception before the process has died, + it should no longer hang. + +- gh-59000: Fix :mod:`pdb` breakpoint resolution for class methods when the + module defining the class is not imported. + +- gh-141570: Support :term:`file-like object` raising :exc:`OSError` from + :meth:`~io.IOBase.fileno` in color detection + (``_colorize.can_colorize()``). This can occur when ``sys.stdout`` is + redirected. + +- gh-141659: Fix bad file descriptor errors from ``_posixsubprocess`` on + AIX. + +- gh-141600: Fix musl version detection on Void Linux. + +- gh-141497: :mod:`ipaddress`: ensure that the methods + :meth:`IPv4Network.hosts() ` and + :meth:`IPv6Network.hosts() ` always return an + iterator. + +- gh-140938: The :func:`statistics.stdev` and :func:`statistics.pstdev` + functions now raise a :exc:`ValueError` when the input contains an + infinity or a NaN. + +- gh-124111: Updated Tcl threading configuration in :mod:`_tkinter` to + assume that threads are always available in Tcl 9 and later. + +- gh-137109: The :mod:`os.fork` and related forking APIs will no longer warn + in the common case where Linux or macOS platform APIs return the number of + threads in a process and find the answer to be 1 even when a + :func:`os.register_at_fork` ``after_in_parent=`` callback (re)starts a + thread. + +- gh-141314: Fix assertion failure in :meth:`io.TextIOWrapper.tell` when + reading files with standalone carriage return (``\r``) line endings. + +- gh-141311: Fix assertion failure in :func:`!io.BytesIO.readinto` and + undefined behavior arising when read position is above capcity in + :class:`io.BytesIO`. + +- gh-141141: Fix a thread safety issue with :func:`base64.b85decode`. + Contributed by Benel Tayar. + +- gh-137969: Fix :meth:`annotationlib.ForwardRef.evaluate` returning + :class:`~annotationlib.ForwardRef` objects which don't update with new + globals. + +- gh-140911: :mod:`collections`: Ensure that the methods + ``UserString.rindex()`` and ``UserString.index()`` accept + :class:`collections.UserString` instances as the sub argument. + +- gh-140797: The undocumented :class:`!re.Scanner` class now forbids regular + expressions containing capturing groups in its lexicon patterns. Patterns + using capturing groups could previously lead to crashes with segmentation + fault. Use non-capturing groups (?:...) instead. + +- gh-125115: Refactor the :mod:`pdb` parsing issue so positional arguments + can pass through intuitively. + +- gh-140815: :mod:`faulthandler` now detects if a frame or a code object is + invalid or freed. Patch by Victor Stinner. + +- gh-100218: Correctly set :attr:`~OSError.errno` when + :func:`socket.if_nametoindex` or :func:`socket.if_indextoname` raise an + :exc:`OSError`. Patch by Bénédikt Tran. + +- gh-140875: Fix handling of unclosed character references (named and + numerical) followed by the end of file in :class:`html.parser.HTMLParser` + with ``convert_charrefs=False``. + +- gh-140734: :mod:`multiprocessing`: fix off-by-one error when checking the + length of a temporary socket file path. Patch by Bénédikt Tran. + +- gh-140874: Bump the version of pip bundled in ensurepip to version 25.3 + +- gh-140691: In :mod:`urllib.request`, when opening a FTP URL fails because + a data connection cannot be made, the control connection's socket is now + closed to avoid a :exc:`ResourceWarning`. + +- gh-103847: Fix hang when cancelling process created by + :func:`asyncio.create_subprocess_exec` or + :func:`asyncio.create_subprocess_shell`. Patch by Kumar Aditya. + +- gh-120057: Add :func:`os.reload_environ` to ``os.__all__``. + +- gh-140228: Avoid making unnecessary filesystem calls for frozen modules in + :mod:`linecache` when the global module cache is not present. + +- gh-140590: Fix arguments checking for the + :meth:`!functools.partial.__setstate__` that may lead to internal state + corruption and crash. Patch by Sergey Miryanov. + +- gh-125434: Display thread name in :mod:`faulthandler` on Windows. Patch by + Victor Stinner. + +- gh-140634: Fix a reference counting bug in + :meth:`!os.sched_param.__reduce__`. + +- gh-140633: Ignore :exc:`AttributeError` when setting a module's + ``__file__`` attribute when loading an extension module packaged as Apple + Framework. + +- gh-140593: :mod:`xml.parsers.expat`: Fix a memory leak that could affect + users with :meth:`~xml.parsers.expat.xmlparser.ElementDeclHandler` set to + a custom element declaration handler. Patch by Sebastian Pipping. + +- gh-140607: Inside :meth:`io.RawIOBase.read`, validate that the count of + bytes returned by :meth:`io.RawIOBase.readinto` is valid (inside the + provided buffer). + +- gh-138162: Fix :class:`logging.LoggerAdapter` with ``merge_extra=True`` + and without the *extra* argument. + +- gh-138774: :func:`ast.unparse` now generates full source code when + handling :class:`ast.Interpolation` nodes that do not have a specified + source. + +- gh-140474: Fix memory leak in :class:`array.array` when creating arrays + from an empty :class:`str` and the ``u`` type code. + +- gh-137530: :mod:`dataclasses` Fix annotations for generated ``__init__`` + methods by replacing the annotations that were in-line in the generated + source code with ``__annotate__`` functions attached to the methods. + +- gh-140348: Fix regression in Python 3.14.0 where using the ``|`` operator + on a :class:`typing.Union` object combined with an object that is not a + type would raise an error. + +- gh-140272: Fix memory leak in the :meth:`!clear` method of the + :mod:`dbm.gnu` database. + +- gh-140041: Fix import of :mod:`ctypes` on Android and Cygwin when ABI + flags are present. + +- gh-140120: Fixed a memory leak in :mod:`hmac` when it was using the + hacl-star backend. Discovered by ``@ashm-dev`` using AddressSanitizer. + +- gh-139905: Add suggestion to error message for :class:`typing.Generic` + subclasses when ``cls.__parameters__`` is missing due to a parent class + failing to call :meth:`super().__init_subclass__() + ` in its ``__init_subclass__``. + +- gh-139894: Fix incorrect sharing of current task with the child process + while forking in :mod:`asyncio`. Patch by Kumar Aditya. + +- gh-139845: Fix to not print KeyboardInterrupt twice in default asyncio + REPL. + +- gh-139783: Fix :func:`inspect.getsourcelines` for the case when a + decorator is followed by a comment or an empty line. + +- gh-139809: Prevent premature colorization of subparser ``prog`` in + :meth:`argparse.ArgumentParser.add_subparsers` to respect color + environment variable changes after parser creation. + +- gh-139736: Fix excessive indentation in the default :mod:`argparse` + :class:`!HelpFormatter`. Patch by Alexander Edland. + +- gh-70765: :mod:`http.server`: fix default handling of HTTP/0.9 requests in + :class:`~http.server.BaseHTTPRequestHandler`. Previously, + :meth:`!BaseHTTPRequestHandler.parse_request` incorrectly waited for + headers in the request although those are not supported in HTTP/0.9. Patch + by Bénédikt Tran. + +- gh-63161: Fix :func:`tokenize.detect_encoding`. Support non-UTF-8 shebang + and comments if non-UTF-8 encoding is specified. Detect decoding error for + non-UTF-8 encoding. Detect null bytes in source code. + +- gh-139391: Fix an issue when, on non-Windows platforms, it was not + possible to gracefully exit a ``python -m asyncio`` process suspended by + Ctrl+Z and later resumed by :manpage:`fg` other than with :manpage:`kill`. + +- gh-101828: Fix ``'shift_jisx0213'``, ``'shift_jis_2004'``, + ``'euc_jisx0213'`` and ``'euc_jis_2004'`` codecs truncating null chars as + they were treated as part of multi-character sequences. + +- gh-139289: Do a real lazy-import on :mod:`rlcompleter` in :mod:`pdb` and + restore the existing completer after importing :mod:`rlcompleter`. + +- gh-139246: fix: paste zero-width in default repl width is wrong. + +- gh-90949: Add + :meth:`~xml.parsers.expat.xmlparser.SetAllocTrackerActivationThreshold` + and + :meth:`~xml.parsers.expat.xmlparser.SetAllocTrackerMaximumAmplification` + to :ref:`xmlparser ` objects to prevent use of + disproportional amounts of dynamic memory from within an Expat parser. + Patch by Bénédikt Tran. + +- gh-139210: Fix use-after-free when reporting unknown event in + :func:`xml.etree.ElementTree.iterparse`. Patch by Ken Jin. + +- gh-138860: Lazy import :mod:`rlcompleter` in :mod:`pdb` to avoid deadlock + in subprocess. + +- gh-112729: Fix crash when calling :func:`concurrent.interpreters.create` + when the process is out of memory. + +- gh-135729: Fix unraisable exception during finalization when using + :mod:`concurrent.interpreters` in the REPL. + +- gh-139076: Fix a bug in the :mod:`pydoc` module that was hiding functions + in a Python module if they were implemented in an extension module and the + module did not have ``__all__``. + +- gh-139065: Fix trailing space before a wrapped long word if the line + length is exactly *width* in :mod:`textwrap`. + +- gh-139001: Fix race condition in :class:`pathlib.Path` on the internal + ``_raw_paths`` field. + +- gh-138813: :class:`!multiprocessing.BaseProcess` defaults ``kwargs`` to + ``None`` instead of a shared dictionary. + +- gh-138993: Dedent :data:`credits` text. + +- gh-138891: Fix ``SyntaxError`` when ``inspect.get_annotations(f, + eval_str=True)`` is called on a function annotated with a :pep:`646` + ``star_expression`` + +- gh-130567: Fix possible crash in :func:`locale.strxfrm` due to a platform + bug on macOS. + +- gh-138859: Fix generic type parameterization raising a :exc:`TypeError` + when omitting a :class:`ParamSpec` that has a default which is not a list + of types. + +- gh-138764: Prevent :func:`annotationlib.call_annotate_function` from + calling ``__annotate__`` functions that don't support + ``VALUE_WITH_FAKE_GLOBALS`` in a fake globals namespace with empty + globals. + + Make ``FORWARDREF`` and ``STRING`` annotations fall back to using + ``VALUE`` annotations in the case that neither their own format, nor + ``VALUE_WITH_FAKE_GLOBALS`` are supported. + +- gh-138775: Use of ``python -m`` with :mod:`base64` has been fixed to + detect input from a terminal so that it properly notices EOF. + +- gh-138779: Support device numbers larger than ``2**63-1`` for the + :attr:`~os.stat_result.st_rdev` field of the :class:`os.stat_result` + structure. + +- gh-137706: Fix the partial evaluation of annotations that use + ``typing.Annotated[T, x]`` where ``T`` is a forward reference. + +- gh-88375: Fix normalization of the ``robots.txt`` rules and URLs in the + :mod:`urllib.robotparser` module. No longer ignore trailing ``?``. + Distinguish raw special characters ``?``, ``=`` and ``&`` from the + percent-encoded ones. + +- gh-111788: Fix parsing errors in the :mod:`urllib.robotparser` module. + Don't fail trying to parse weird paths. Don't fail trying to decode + non-UTF-8 ``robots.txt`` files. + +- gh-98896: Fix a failure in multiprocessing resource_tracker when + SharedMemory names contain colons. Patch by Rani Pinchuk. + +- gh-138425: Fix partial evaluation of :class:`annotationlib.ForwardRef` + objects which rely on names defined as globals. + +- gh-138432: :meth:`zoneinfo.reset_tzpath` will now convert any + :class:`os.PathLike` objects it receives into strings before adding them + to ``TZPATH``. It will raise ``TypeError`` if anything other than a string + is found after this conversion. If given an :class:`os.PathLike` object + that represents a relative path, it will now raise ``ValueError`` instead + of ``TypeError``, and present a more informative error message. + +- gh-138008: Fix segmentation faults in the :mod:`ctypes` module due to + invalid :attr:`~ctypes._CFuncPtr.argtypes`. Patch by Dung Nguyen. + +- gh-60462: Fix :func:`locale.strxfrm` on Solaris (and possibly other + platforms). + +- gh-138239: The REPL now highlights :keyword:`type` as a soft keyword in + :ref:`type statements `. + +- gh-138204: Forbid expansion of shared anonymous :mod:`memory maps ` + on Linux, which caused a bus error. + +- gh-138010: Fix an issue where defining a class with an + :func:`@warnings.deprecated `-decorated base class + may not invoke the correct :meth:`~object.__init_subclass__` method in + cases involving multiple inheritance. Patch by Brian Schubert. + +- gh-138151: In :mod:`annotationlib`, improve evaluation of forward + references to nonlocal variables that are not yet defined when the + annotations are initially evaluated. + +- gh-137317: :func:`inspect.signature` now correctly handles classes that + use a descriptor on a wrapped :meth:`!__init__` or :meth:`!__new__` + method. Contributed by Yongyu Yan. + +- gh-137754: Fix import of the :mod:`zoneinfo` module if the C + implementation of the :mod:`datetime` module is not available. + +- gh-137490: Handle :data:`~errno.ECANCELED` in the same way as + :data:`~errno.EINTR` in :func:`signal.sigwaitinfo` on NetBSD. + +- gh-137477: Fix :func:`!inspect.getblock`, :func:`inspect.getsourcelines` + and :func:`inspect.getsource` for generator expressions. + +- gh-137044: Return large limit values as positive integers instead of + negative integers in :func:`resource.getrlimit`. Accept large values and + reject negative values (except :data:`~resource.RLIM_INFINITY`) for limits + in :func:`resource.setrlimit`. + +- gh-75989: :func:`tarfile.TarFile.extractall` and + :func:`tarfile.TarFile.extract` now overwrite symlinks when extracting + hardlinks. (Contributed by Alexander Enrique Urieles Nieto in + :gh:`75989`.) + +- gh-137017: Fix :obj:`threading.Thread.is_alive` to remain ``True`` until + the underlying OS thread is fully cleaned up. This avoids false negatives + in edge cases involving thread monitoring or premature + :obj:`threading.Thread.is_alive` calls. + +- gh-137273: Fix debug assertion failure in :func:`locale.setlocale` on + Windows. + +- gh-137239: :mod:`heapq`: Update :data:`!heapq.__all__` with ``*_max`` + functions. + +- gh-81325: :class:`tarfile.TarFile` now accepts a :term:`path-like + ` when working on a tar archive. (Contributed by + Alexander Enrique Urieles Nieto in :gh:`81325`.) + +- gh-137185: Fix a potential async-signal-safety issue in + :mod:`faulthandler` when printing C stack traces. + +- gh-136914: Fix retrieval of :attr:`doctest.DocTest.lineno` for objects + decorated with :func:`functools.cache` or + :class:`functools.cached_property`. + +- gh-136912: :func:`hmac.digest` now properly handles large keys and + messages by falling back to the pure Python implementation when necessary. + Patch by Bénédikt Tran. + +- gh-83424: Allows creating a :class:`ctypes.CDLL` without name when passing + a handle as an argument. + +- gh-136234: Fix :meth:`asyncio.WriteTransport.writelines` to be robust to + connection failure, by using the same behavior as + :meth:`~asyncio.WriteTransport.write`. + +- gh-136507: Fix mimetypes CLI to handle multiple file parameters. + +- gh-136057: Fixed the bug in :mod:`pdb` and :mod:`bdb` where ``next`` and + ``step`` can't go over the line if a loop exists in the line. + +- gh-135386: Fix opening a :mod:`dbm.sqlite3` database for reading from + read-only file or directory. + +- gh-135444: Fix :meth:`asyncio.DatagramTransport.sendto` to account for + datagram header size when data cannot be sent. + +- gh-126631: Fix :mod:`multiprocessing` ``forkserver`` bug which prevented + ``__main__`` from being preloaded. + +- gh-135307: :mod:`email`: Fix exception in ``set_content()`` when encoding + text and max_line_length is set to ``0`` or ``None`` (unlimited). + +- gh-134453: Fixed :func:`subprocess.Popen.communicate` ``input=`` handling + of :class:`memoryview` instances that were non-byte shaped on POSIX + platforms. Those are now properly cast to a byte shaped view instead of + truncating the input. Windows platforms did not have this bug. + +- gh-134698: Fix a crash when calling methods of :class:`ssl.SSLContext` or + :class:`ssl.SSLSocket` across multiple threads. + +- gh-125996: Fix thread safety of :class:`collections.OrderedDict`. Patch by + Kumar Aditya. + +- gh-133789: Fix unpickling of :mod:`pathlib` objects that were pickled in + Python 3.13. + +- gh-127081: Fix libc thread safety issues with :mod:`dbm` by performing + stateful operations in critical sections. + +- gh-132551: Make :class:`io.BytesIO` safe in :term:`free-threaded ` build. + +- gh-131788: Make ``ResourceTracker.send`` from :mod:`multiprocessing` + re-entrant safe + +- gh-118981: Fix potential hang in ``multiprocessing.popen_spawn_posix`` + that can happen when the child proc dies early by closing the child fds + right away. + +- gh-102431: Clarify constraints for "logical" arguments in methods of + :class:`decimal.Context`. + +- gh-78319: UTF8 support for the IMAP APPEND command has been made RFC + compliant. + +- bpo-38735: Fix failure when importing a module from the root directory on + unix-like platforms with sys.pycache_prefix set. + +- bpo-41839: Allow negative priority values from + :func:`os.sched_get_priority_min` and :func:`os.sched_get_priority_max` + functions. + +IDLE +---- + +- gh-96491: Deduplicate version number in IDLE shell title bar after saving + to a file. + +- gh-139742: Colorize t-string prefixes for template strings in IDLE, as + done for f-string prefixes. + +Documentation +------------- + +- gh-141994: :mod:`xml.sax.handler`: Make Documentation of + :data:`xml.sax.handler.feature_external_ges` warn of opening up to + `external entity attacks + `_. Patch by + Sebastian Pipping. + +- gh-140578: Remove outdated sencence in the documentation for + :mod:`multiprocessing`, that implied that + :class:`concurrent.futures.ThreadPoolExecutor` did not exist. + +Core and Builtins +----------------- + +- gh-142048: Fix quadratically increasing garbage collection delays in + free-threaded build. + +Library +------- + +- gh-116738: Fix thread safety issue with :mod:`re` scanner objects in + free-threaded builds. + +Core and Builtins +----------------- + +- gh-141930: When importing a module, use Python's regular file object to + ensure that writes to ``.pyc`` files are complete or an appropriate error + is raised. + +- gh-120158: Fix inconsistent state when enabling or disabling monitoring + events too many times. + +- gh-139653: Only raise a ``RecursionError`` or trigger a fatal error if the + stack pointer is both below the limit pointer *and* above the stack base. + If outside of these bounds assume that it is OK. This prevents false + positives when user-space threads swap stacks. + +- gh-139103: Improve multithreaded scaling of dataclasses on the + free-threaded build. + +- gh-141579: Fix :func:`sys.activate_stack_trampoline` to properly support + the ``perf_jit`` backend. Patch by Pablo Galindo. + +- gh-114203: Skip locking if object is already locked by two-mutex critical + section. + +- gh-141528: Suggest using :meth:`concurrent.interpreters.Interpreter.close` + instead of the private ``_interpreters.destroy`` function when warning + about remaining subinterpreters. Patch by Sergey Miryanov. + +- gh-141312: Fix the assertion failure in the ``__setstate__`` method of the + range iterator when a non-integer argument is passed. Patch by Sergey + Miryanov. + +Library +------- + +- gh-116738: Make csv module thread-safe on the :term:`free threaded ` build. + +Core and Builtins +----------------- + +- gh-140939: Fix memory leak when :class:`bytearray` or :class:`bytes` is + formated with the ``%*b`` format with a large width that results in a + :exc:`MemoryError`. + +Library +------- + +- gh-140260: Fix :mod:`struct` data race in endian table initialization with + subinterpreters. Patch by Shamil Abdulaev. + +Core and Builtins +----------------- + +- gh-140530: Fix a reference leak when ``raise exc from cause`` fails. Patch + by Bénédikt Tran. + +- gh-140373: Correctly emit ``PY_UNWIND`` event when generator object is + closed. Patch by Mikhail Efimov. + +- gh-140576: Fixed crash in :func:`tokenize.generate_tokens` in case of + specific incorrect input. Patch by Mikhail Efimov. + +- gh-140551: Fixed crash in :class:`dict` if :meth:`dict.clear` is called at + the lookup stage. Patch by Mikhail Efimov and Inada Naoki. + +- gh-140517: Fixed a reference leak when iterating over the result of + :func:`map` with ``strict=True`` when the input iterables have different + lengths. Patch by Mikhail Efimov. + +- gh-140471: Fix potential buffer overflow in :class:`ast.AST` node + initialization when encountering malformed :attr:`~ast.AST._fields` + containing non-:class:`str`. + +- gh-140431: Fix a crash in Python's :term:`garbage collector ` due to partially initialized :term:`coroutine` objects when + coroutine origin tracking depth is enabled + (:func:`sys.set_coroutine_origin_tracking_depth`). + +Library +------- + +- gh-140398: Fix memory leaks in :mod:`readline` functions + :func:`~readline.read_init_file`, :func:`~readline.read_history_file`, + :func:`~readline.write_history_file`, and + :func:`~readline.append_history_file` when :c:func:`PySys_Audit` fails. + +Core and Builtins +----------------- + +- gh-140406: Fix memory leak when an object's :meth:`~object.__hash__` + method returns an object that isn't an :class:`int`. + +- gh-140358: Restore elapsed time and unreachable object count in GC debug + output. These were inadvertently removed during a refactor of ``gc.c``. + The debug log now again reports elapsed collection time and the number of + unreachable objects. Contributed by Pål Grønås Drange. + +- gh-140306: Fix memory leaks in cross-interpreter channel operations and + shared namespace handling. + +- gh-140301: Fix memory leak of ``PyConfig`` in subinterpreters. + +- gh-140257: Fix data race between interpreter_clear() and take_gil() on + eval_breaker during finalization with daemon threads. + +- gh-139951: Fixes a regression in GC performance for a growing heap + composed mostly of small tuples. + + * Counts number of actually tracked objects, instead of trackable objects. + This ensures that untracking tuples has the desired effect of reducing GC overhead. + * Does not track most untrackable tuples during creation. + This prevents large numbers of small tuples causing excessive GCs. + +- gh-140104: Fix a bug with exception handling in the JIT. Patch by Ken Jin. + Bug reported by Daniel Diniz. + +- gh-140061: Fixing the checking of whether an object is uniquely referenced + to ensure free-threaded compatibility. Patch by Sergey Miryanov. + +- gh-140067: Fix memory leak in sub-interpreter creation. + +- gh-140000: Fix potential memory leak when a reference cycle exists between + an instance of :class:`typing.TypeAliasType`, :class:`typing.TypeVar`, + :class:`typing.ParamSpec`, or :class:`typing.TypeVarTuple` and its + ``__name__`` attribute. Patch by Mikhail Efimov. + +- gh-139914: Restore support for HP PA-RISC, which has an upwards-growing + stack. + +- gh-139988: Fix a memory leak when failing to create a + :class:`~typing.Union` type. Patch by Bénédikt Tran. + +- gh-139748: Fix reference leaks in error branches of functions accepting + path strings or bytes such as :func:`compile` and :func:`os.system`. Patch + by Bénédikt Tran. + +- gh-139516: Fix lambda colon erroneously start format spec in f-string in + tokenizer. + +- gh-139640: :func:`ast.parse` no longer emits syntax warnings for + ``return``/``break``/``continue`` in ``finally`` (see :pep:`765`) -- they + are only emitted during compilation. + +- gh-139640: Fix swallowing some syntax warnings in different modules if + they accidentally have the same message and are emitted from the same + line. Fix duplicated warnings in the ``finally`` block. + +- gh-63161: Support non-UTF-8 shebang and comments in Python source files if + non-UTF-8 encoding is specified. Detect decoding error in comments for + default (UTF-8) encoding. Show the line and position of decoding error for + default encoding in a traceback. Show the line containing the coding + cookie when it conflicts with the BOM in a traceback. + +Library +------- + +- gh-116738: Make :mod:`mmap` thread-safe on the :term:`free threaded ` build. + +Core and Builtins +----------------- + +- gh-138558: Fix handling of unusual t-string annotations in annotationlib. + Patch by Dave Peck. + +- gh-134466: Don't run PyREPL in a degraded environment where setting + termios attributes is not allowed. + +- gh-138944: Fix :exc:`SyntaxError` message when invalid syntax appears on + the same line as a valid ``import ... as ...`` or ``from ... import ... as + ...`` statement. Patch by Brian Schubert. + +- gh-105487: Remove non-existent :meth:`~object.__copy__`, + :meth:`~object.__deepcopy__`, and :attr:`~type.__bases__` from the + :meth:`~object.__dir__` entries of :class:`types.GenericAlias`. + +- gh-69605: Fix some standard library submodules missing from the + :term:`REPL` auto-completion of imports. + +Library +------- + +- gh-116738: Make :mod:`cProfile` thread-safe on the :term:`free threaded + ` build. + +- gh-138004: On Solaris/Illumos platforms, thread names are now encoded as + ASCII to avoid errors on systems (e.g. OpenIndiana) that don't support + non-ASCII names. + +Core and Builtins +----------------- + +- gh-137433: Fix a potential deadlock in the :term:`free threading` build + when daemon threads enable or disable profiling or tracing while the main + thread is shutting down the interpreter. + +- gh-137400: Fix a crash in the :term:`free threading` build when disabling + profiling or tracing across all threads with + :c:func:`PyEval_SetProfileAllThreads` or + :c:func:`PyEval_SetTraceAllThreads` or their Python equivalents + :func:`threading.settrace_all_threads` and + :func:`threading.setprofile_all_threads`. + +- gh-58124: Fix name of the Python encoding in Unicode errors of the code + page codec: use "cp65000" and "cp65001" instead of "CP_UTF7" and "CP_UTF8" + which are not valid Python code names. Patch by Victor Stinner. + +- gh-132657: Improve performance of :class:`frozenset` by removing locks in + the free-threading build. + +- gh-133400: Fixed Ctrl+D (^D) behavior in _pyrepl module to match old + pre-3.13 REPL behavior. + +- gh-128640: Fix a crash when using threads inside of a subinterpreter. + +C API +----- + +- gh-137422: Fix :term:`free threading` race condition in + :c:func:`PyImport_AddModuleRef`. It was previously possible for two calls + to the function return two different objects, only one of which was stored + in :data:`sys.modules`. + +- gh-140042: Removed the sqlite3_shutdown call that could cause closing + connections for sqlite when used with multiple sub interpreters. + +- gh-141042: Make qNaN in :c:func:`PyFloat_Pack2` and + :c:func:`PyFloat_Pack4`, if while conversion to a narrower precision + floating-point format --- the remaining after truncation payload will be + zero. Patch by Sergey B Kirpichev. + +- gh-140487: Fix :c:macro:`Py_RETURN_NOTIMPLEMENTED` in limited C API 3.11 + and older: don't treat ``Py_NotImplemented`` as immortal. Patch by Victor + Stinner. + +- gh-140153: Fix :c:func:`Py_REFCNT` definition on limited C API 3.11-3.13. + Patch by Victor Stinner. + +- gh-139653: Add :c:func:`PyUnstable_ThreadState_SetStackProtection` and + :c:func:`PyUnstable_ThreadState_ResetStackProtection` functions to set the + stack protection base address and stack protection size of a Python thread + state. Patch by Victor Stinner. + +Build +----- + +- gh-141808: Do not generate the jit stencils twice in case of PGO builds on + Windows. + +- gh-141784: Fix ``_remote_debugging_module.c`` compilation on 32-bit Linux. + Include Python.h before system headers to make sure that + ``_remote_debugging_module.c`` uses the same types (ABI) than Python. + Patch by Victor Stinner. + +- gh-140768: Warn when the WASI SDK version doesn't match what's supported. + +- gh-140513: Generate a clear compilation error when + ``_Py_TAIL_CALL_INTERP`` is enabled but either ``preserve_none`` or + ``musttail`` is not supported. + +- gh-140189: iOS builds were added to CI. + +- gh-138489: When cross-compiling for WASI by ``build_wasm`` or + ``build_emscripten``, the ``build-details.json`` step is now included in + the build process, just like with native builds. + + This fixes the ``libinstall`` task which requires the + ``build-details.json`` file during the process. + +- gh-137618: ``PYTHON_FOR_REGEN`` now requires Python 3.10 to Python 3.15. + Patch by Adam Turner. + +- gh-123681: Check the ``strftime()`` behavior at runtime instead of at the + compile time to support cross-compiling. Remove the internal macro + ``_Py_NORMALIZE_CENTURY``. + + +What's New in Python 3.14.0 final? +================================== + +*Release date: 2025-10-07* + +macOS +----- + +- gh-124111: Update macOS installer to use Tcl/Tk 8.6.17. + +- gh-139573: Updated bundled version of OpenSSL to 3.0.18. + +Windows +------- + +- gh-139573: Updated bundled version of OpenSSL to 3.0.18. + +Tools/Demos +----------- + +- gh-139330: SBOM generation tool didn't cross-check the version and + checksum values against the ``Modules/expat/refresh.sh`` script, leading + to the values becoming out-of-date during routine updates. + +- gh-132006: XCframeworks now include privacy manifests to satisfy Apple App + Store submission requirements. + +- gh-138171: A script for building an iOS XCframework was added. As part of + this change, the top level ``iOS`` folder has been moved to be a + subdirectory of the ``Apple`` folder. + +Security +-------- + +- gh-139400: :mod:`xml.parsers.expat`: Make sure that parent Expat parsers + are only garbage-collected once they are no longer referenced by + subparsers created by + :meth:`~xml.parsers.expat.xmlparser.ExternalEntityParserCreate`. Patch by + Sebastian Pipping. + +Library +------- + +- gh-139312: Upgrade bundled libexpat to 2.7.3 + + +What's New in Python 3.14.0 release candidate 3? +================================================ + +*Release date: 2025-09-18* + +Windows +------- + +- gh-138896: Fix error installing C runtime on non-updated Windows machines + +Tools/Demos +----------- + +- gh-137873: The iOS test runner has been simplified, resolving some issues + that have been observed using the runner in GitHub Actions and Azure + Pipelines test environments. + +Security +-------- + +- gh-135661: Fix CDATA section parsing in :class:`html.parser.HTMLParser` + according to the HTML5 standard: ``] ]>`` and ``]] >`` no longer end the + CDATA section. Add private method ``_set_support_cdata()`` which can be + used to specify how to parse ``<[CDATA[`` --- as a CDATA section in + foreign content (SVG or MathML) or as a bogus comment in the HTML + namespace. + +Library +------- + +- gh-138998: Update bundled libexpat to 2.7.2 + +- gh-118803: Add back :class:`collections.abc.ByteString` and + :class:`typing.ByteString`. Both had been removed in prior alpha, beta and + release candidates for Python 3.14, but their removal has now been + postponed to Python 3.17. + +- gh-137226: Fix :func:`typing.get_type_hints` calls on generic + :class:`typing.TypedDict` classes defined with string annotations. + +- gh-138804: Raise :exc:`TypeError` instead of :exc:`AttributeError` when an + argument of incorrect type is passed to :func:`shlex.quote`. This restores + the behavior of the function prior to 3.14. + +- gh-128636: Fix crash in PyREPL when os.environ is overwritten with an + invalid value for mac + +- gh-138514: Raise :exc:`ValueError` when a multi-character string is passed + to the *echo_char* parameter of :func:`getpass.getpass`. Patch by Benjamin + Johnson. + +- gh-138515: :mod:`email` is added to Emscripten build. + +- gh-99948: :func:`ctypes.util.find_library` now works in Emscripten build. + +- gh-138253: Add the *block* parameter in the :meth:`!put` and :meth:`!get` + methods of the :mod:`concurrent.interpreters` queues for compatibility + with the :class:`queue.Queue` interface. + +- gh-138133: Prevent infinite traceback loop when sending CTRL^C to Python + through ``strace``. + +- gh-134869: Fix an issue where pressing Ctrl+C during tab completion in the + REPL would leave the autocompletion menu in a corrupted state. + +- gh-90548: Fix ``musl`` detection for :func:`platform.libc_ver` on Alpine + Linux if compiled with --strip-all. + +- gh-136134: :meth:`!SMTP.auth_cram_md5` now raises an + :exc:`~smtplib.SMTPException` instead of a :exc:`ValueError` if Python has + been built without MD5 support. In particular, :class:`~smtplib.SMTP` + clients will not attempt to use this method even if the remote server is + assumed to support it. Patch by Bénédikt Tran. + +- gh-136134: :meth:`IMAP4.login_cram_md5 ` now + raises an :exc:`IMAP4.error ` if CRAM-MD5 + authentication is not supported. Patch by Bénédikt Tran. + +- gh-134953: Expand ``_colorize`` theme with ``keyword_constant`` and + implement in :term:`repl`. + +Core and Builtins +----------------- + +- gh-71810: Raise :exc:`OverflowError` for ``(-1).to_bytes()`` for signed + conversions when bytes count is zero. Patch by Sergey B Kirpichev. + +- gh-138192: Fix :mod:`contextvars` initialization so that all + subinterpreters are assigned the :attr:`~contextvars.Token.MISSING` value. + +- gh-138479: Fix a crash when a generic object's ``__typing_subst__`` + returns an object that isn't a :class:`tuple`. + +- gh-138372: Fix :exc:`SyntaxWarning` emitted for erroneous subscript + expressions involving :ref:`template string literals `. Patch + by Brian Schubert. + +- gh-138318: The default REPL now avoids highlighting built-in names (for + instance :class:`set` or :func:`format`) when they are used as attribute + names (for instance in ``value.set`` or ``text.format``). + +- gh-138349: Fix crash in certain cases where a module contains both a + module-level annotation and a comprehension. + +- gh-137384: Fix a crash when using the :mod:`warnings` module in a + finalizer at shutdown. Patch by Kumar Aditya. + +- gh-137883: Fix runaway recursion when calling a function with keyword + arguments. + +- gh-137079: Fix keyword typo recognition when parsing files. Patch by Pablo + Galindo. + +- gh-137728: Fix the JIT's handling of many local variables. This previously + caused a segfault. + +- gh-137576: Fix for incorrect source code being shown in tracebacks from + the Basic REPL when :envvar:`PYTHONSTARTUP` is given. Patch by Adam Hartz. + + +What's New in Python 3.14.0 release candidate 2? +================================================ + +*Release date: 2025-08-14* + +macOS +----- + +- gh-137450: macOS installer shell path management improvements: separate + the installer ``Shell profile updater`` postinstall script from the + ``Update Shell Profile.command`` to enable more robust error handling. + +- gh-137134: Update macOS installer to ship with SQLite version 3.50.4. + +Windows +------- + +- gh-137134: Update Windows installer to ship with SQLite 3.50.4. + +Library +------- + +- gh-137426: Remove the code deprecation of + ``importlib.abc.ResourceLoader``. It is documented as deprecated, but left + for backwards compatibility with other classes in ``importlib.abc``. + +- gh-137282: Fix tab completion and :func:`dir` on + :mod:`concurrent.futures`. + +- gh-137257: Bump the version of pip bundled in ensurepip to version 25.2 + +- gh-137226: Fix behavior of :meth:`annotationlib.ForwardRef.evaluate` when + the *type_params* parameter is passed and the name of a type param is also + present in an enclosing scope. + +- gh-130522: Fix unraisable :exc:`TypeError` raised during + :term:`interpreter shutdown` in the :mod:`threading` module. + +- gh-137059: Fix handling of file URLs with a Windows drive letter in the + URL authority by :func:`urllib.request.url2pathname`. This fixes a + regression in earlier pre-releases of Python 3.14. + +- gh-130577: :mod:`tarfile` now validates archives to ensure member offsets + are non-negative. (Contributed by Alexander Enrique Urieles Nieto in + :gh:`130577`.) + +- gh-135228: When :mod:`dataclasses` replaces a class with a slotted + dataclass, the original class can now be garbage collected again. Earlier + changes in Python 3.14 caused this class to always remain in existence + together with the replacement class synthesized by :mod:`dataclasses`. + +Documentation +------------- + +- gh-136155: We are now checking for fatal errors in EPUB builds in CI. + +Core and Builtins +----------------- + +- gh-137400: Fix a crash in the :term:`free threading` build when disabling + profiling or tracing across all threads with + :c:func:`PyEval_SetProfileAllThreads` or + :c:func:`PyEval_SetTraceAllThreads` or their Python equivalents + :func:`threading.settrace_all_threads` and + :func:`threading.setprofile_all_threads`. + +- gh-137314: Fixed a regression where raw f-strings incorrectly interpreted + escape sequences in format specifications. Raw f-strings now properly + preserve literal backslashes in format specs, matching the behavior from + Python 3.11. For example, ``rf"{obj:\xFF}"`` now correctly produces + ``'\\xFF'`` instead of ``'ÿ'``. Patch by Pablo Galindo. + +- gh-137308: A standalone docstring in a node body is optimized as a + :keyword:`pass` statement to ensure that the node's body is never empty. + There was a :exc:`ValueError` in :func:`compile` otherwise. + +- gh-137288: Fix bug where some bytecode instructions of a boolean + expression are not associated with the correct exception handler. + +- gh-134291: Remove some newer macOS API usage from the JIT compiler in + order to restore compatibility with older OSX 10.15 deployment targets. + +- gh-131338: Disable computed stack limit checks on non-glibc linux + platforms to fix crashes on deep recursion. + +- gh-136870: Fix data races while de-instrumenting bytecode of code objects + running concurrently in threads. + +C API +----- + +- gh-137573: Mark ``_PyOptimizer_Optimize`` as :c:macro:`Py_NO_INLINE` to + prevent stack overflow crashes on macOS. + +Build +----- + +- gh-132339: Add support for OpenSSL 3.5. + + +What's New in Python 3.14.0 release candidate 1? +================================================ + +*Release date: 2025-07-22* + +Tools/Demos +----------- + +- gh-136251: Fixes and usability improvements for + ``Tools/wasm/emscripten/web_example`` + +Security +-------- + +- gh-135661: Fix parsing attributes with whitespaces around the ``=`` + separator in :class:`html.parser.HTMLParser` according to the HTML5 + standard. + +- gh-118350: Fix support of escapable raw text mode (elements "textarea" and + "title") in :class:`html.parser.HTMLParser`. + +Library +------- + +- gh-136170: Removed the unreleased ``zipfile.ZipFile.data_offset`` property + added in 3.14.0a7 as it wasn't fully clear which behavior it should have + in some situations so the result was not always what a user might expect. + +- gh-124621: pyrepl now works in Emscripten. + +- gh-136874: Discard URL query and fragment in + :func:`urllib.request.url2pathname`. + +- gh-130645: Enable color help by default in :mod:`argparse`. + +- gh-136549: Fix signature of :func:`threading.excepthook`. + +- gh-136523: Fix :class:`wave.Wave_write` emitting an unraisable when open + raises. + +- gh-52876: Add missing ``keepends`` (default ``True``) parameter to + :meth:`!codecs.StreamReaderWriter.readline` and + :meth:`!codecs.StreamReaderWriter.readlines`. + +- gh-136470: Correct :class:`concurrent.futures.InterpreterPoolExecutor`'s + default thread name. + +- gh-136476: Fix a bug that was causing the ``get_async_stack_trace`` + function to miss some frames in the stack trace. + +- gh-136434: Fix docs generation of ``UnboundItem`` in + :mod:`concurrent.interpreters` when running with :option:`-OO`. + +- gh-136380: Raises :exc:`AttributeError` when accessing + :class:`concurrent.futures.InterpreterPoolExecutor` and subinterpreters + are not available. + +- gh-134759: Fix :exc:`UnboundLocalError` in + :func:`email.message.Message.get_payload` when the payload to decode is a + :class:`bytes` object. Patch by Kliment Lamonov. + +- gh-134657: :mod:`asyncio`: Remove some private names from + ``asyncio.__all__``. + +Core and Builtins +----------------- + +- gh-136801: Fix PyREPL syntax highlighting on match cases after multi-line + case. Contributed by Olga Matoula. + +Library +------- + +- gh-136421: Fix crash when initializing :mod:`datetime` concurrently. + +Core and Builtins +----------------- + +- gh-136541: Fix some issues with the perf trampolines on x86-64 and + aarch64. The trampolines were not being generated correctly for some + cases, which could lead to the perf integration not working correctly. + Patch by Pablo Galindo. + +- gh-136517: Fixed a typo that prevented printing of uncollectable objects + when the :const:`gc.DEBUG_UNCOLLECTABLE` mode was set. + +- gh-136525: Fix issue where per-thread bytecode was not instrumented for + newly created threads. + +- gh-132661: ``Interpolation.expression`` now has a default, the empty + string. + +- gh-132661: Reflect recent :pep:`750` change. + + Disallow concatenation of ``string.templatelib.Template`` and + :class:`str`. Also, disallow implicit concatenation of t-string literals + with string or f-string literals. + +Library +------- + +- gh-116738: Make functions in :mod:`grp` thread-safe on the :term:`free + threaded ` build. + +Core and Builtins +----------------- + +- gh-135148: Fixed a bug where f-string debug expressions (using =) would + incorrectly strip out parts of strings containing escaped quotes and # + characters. Patch by Pablo Galindo. + +- gh-133136: Limit excess memory usage in the :term:`free threading` build + when a large dictionary or list is resized and accessed by multiple + threads. + +- gh-91153: Fix a crash when a :class:`bytearray` is concurrently mutated + during item assignment. + +- gh-127971: Fix off-by-one read beyond the end of a string in string + search. + +C API +----- + +- gh-112068: Revert support of nullable arguments in :c:func:`PyArg_Parse`. + +- gh-133296: New variants for the critical section API that accept one or + two :c:type:`PyMutex` pointers rather than :c:type:`PyObject` instances + are now public in the non-limited C API. + +- gh-134009: Expose :c:func:`PyMutex_IsLocked` as part of the public C API. + +Build +----- + +- gh-135621: PyREPL no longer depends on the :mod:`curses` standard library. + Contributed by Łukasz Langa. + + +What's New in Python 3.14.0 beta 4? +=================================== + +*Release date: 2025-07-08* + +Tools/Demos +----------- + +- gh-135968: Stubs for ``strip`` are now provided as part of an iOS install. + +- gh-133600: Backport file reorganization for Tools/wasm/wasi. + + This should make backporting future code changes easier. It also + simplifies instructions around how to do WASI builds in the devguide. + +Tests +----- + +- gh-135966: The iOS testbed now handles the ``app_packages`` folder as a + site directory. + +- gh-135494: Fix regrtest to support excluding tests from ``--pgo`` tests. + Patch by Victor Stinner. + +Security +-------- + +- gh-136053: :mod:`marshal`: fix a possible crash when deserializing + :class:`slice` objects. + +- gh-135661: Fix parsing start and end tags in + :class:`html.parser.HTMLParser` according to the HTML5 standard. + + * Whitespaces no longer accepted between ```` does not end the script section. + + * Vertical tabulation (``\v``) and non-ASCII whitespaces no longer recognized + as whitespaces. The only whitespaces are ``\t\n\r\f`` and space. + + * Null character (U+0000) no longer ends the tag name. + + * Attributes and slashes after the tag name in end tags are now ignored, + instead of terminating after the first ``>`` in quoted attribute value. + E.g. ````. + + * Multiple slashes and whitespaces between the last attribute and closing ``>`` + are now ignored in both start and end tags. E.g. ````. + + * Multiple ``=`` between attribute name and value are no longer collapsed. + E.g. ```` produces attribute "foo" with value "=bar". + + * [Reverted in :gh:`136927`] Whitespaces between the ``=`` separator and attribute name or value are no + longer ignored. E.g. ```` produces two attributes "foo" and + "=bar", both with value None; ```` produces two attributes: + "foo" with value "" and "bar" with value None. + +- gh-102555: Fix comment parsing in :class:`html.parser.HTMLParser` + according to the HTML5 standard. ``--!>`` now ends the comment. ``-- >`` + no longer ends the comment. Support abnormally ended empty comments + ``<-->`` and ``<--->``. + +Library +------- + +- gh-136286: Fix pickling failures for protocols 0 and 1 for many objects + realted to subinterpreters. + +- gh-136316: Improve support for evaluating nested forward references in + :func:`typing.evaluate_forward_ref`. + +- gh-85702: If ``zoneinfo._common.load_tzdata`` is given a package without a + resource a :exc:`zoneinfo.ZoneInfoNotFoundError` is raised rather than a + :exc:`PermissionError`. Patch by Victor Stinner. + +- gh-136028: Fix parsing month names containing "İ" (U+0130, LATIN CAPITAL + LETTER I WITH DOT ABOVE) in :func:`time.strptime`. This affects locales + az_AZ, ber_DZ, ber_MA and crh_UA. + +- gh-135995: In the palmos encoding, make byte ``0x9b`` decode to ``›`` + (U+203A - SINGLE RIGHT-POINTING ANGLE QUOTATION MARK). + +- gh-53203: Fix :func:`time.strptime` for ``%c`` and ``%x`` formats on + locales byn_ER, wal_ET and lzh_TW, and for ``%X`` format on locales ar_SA, + bg_BG and lzh_TW. + +- gh-91555: An earlier change, which was introduced in 3.14.0b2, has been + reverted. It disabled logging for a logger during handling of log messages + for that logger. Since the reversion, the behaviour should be as it was + before 3.14.0b2. + +- gh-135878: Fixes a crash of :class:`types.SimpleNamespace` on :term:`free + threading` builds, when several threads were calling its + :meth:`~object.__repr__` method at the same time. + +- gh-135836: Fix :exc:`IndexError` in :meth:`asyncio.loop.create_connection` + that could occur when non-\ :exc:`OSError` exception is raised during + connection and socket's ``close()`` raises :exc:`!OSError`. + +- gh-135836: Fix :exc:`IndexError` in :meth:`asyncio.loop.create_connection` + that could occur when the Happy Eyeballs algorithm resulted in an empty + exceptions list during connection attempts. + +- gh-135855: Raise :exc:`TypeError` instead of :exc:`SystemError` when + :func:`!_interpreters.set___main___attrs` is passed a non-dict object. + Patch by Brian Schubert. + +- gh-135815: :mod:`netrc`: skip security checks if :func:`os.getuid` is + missing. Patch by Bénédikt Tran. + +- gh-135640: Address bug where it was possible to call + :func:`xml.etree.ElementTree.ElementTree.write` on an ElementTree object + with an invalid root element. This behavior blanked the file passed to + ``write`` if it already existed. + +- gh-135645: Added ``supports_isolated_interpreters`` field to + :data:`sys.implementation`. + +- gh-135646: Raise consistent :exc:`NameError` exceptions in + :func:`annotationlib.ForwardRef.evaluate` + +- gh-135557: Fix races on :mod:`heapq` updates and :class:`list` reads on + the :term:`free threaded ` build. + +- gh-119180: Only fetch globals and locals if necessary in + :func:`annotationlib.get_annotations` + +- gh-135561: Fix a crash on DEBUG builds when an HACL* HMAC routine fails. + Patch by Bénédikt Tran. + +- gh-135487: Fix :meth:`!reprlib.Repr.repr_int` when given integers with + more than :func:`sys.get_int_max_str_digits` digits. Patch by Bénédikt + Tran. + +- gh-135335: :mod:`multiprocessing`: Flush ``stdout`` and ``stderr`` after + preloading modules in the ``forkserver``. + +- gh-135069: Fix the "Invalid error handling" exception in + :class:`!encodings.idna.IncrementalDecoder` to correctly replace the + 'errors' parameter. + +- gh-130662: +Accept leading zeros in precision and width fields for + +:class:`~decimal.Decimal` formatting, for example ``format(Decimal(1.25), + '.016f')``. + +- gh-130662: Accept leading zeros in precision and width fields for + :class:`~fractions.Fraction` formatting, for example ``format(Fraction(1, + 3), '.016f')``. + +- gh-87790: Support underscore and comma as thousands separators in the + fractional part for :class:`~fractions.Fraction`'s formatting. Patch by + Sergey B Kirpichev. + +- gh-87790: Support underscore and comma as thousands separators in the + fractional part for :class:`~decimal.Decimal`'s formatting. Patch by + Sergey B Kirpichev. + +- gh-130664: Handle corner-case for :class:`~fractions.Fraction`'s + formatting: treat zero-padding (preceding the width field by a zero + (``'0'``) character) as an equivalent to a fill character of ``'0'`` with + an alignment type of ``'='``, just as in case of :class:`float`'s. + +Documentation +------------- + +- gh-136155: EPUB builds are fixed by excluding non-XHTML-compatible tags. + +Core and Builtins +----------------- + +- gh-109700: Fix memory error handling in :c:func:`PyDict_SetDefault`. + +- gh-78465: Fix error message for ``cls.__new__(cls, ...)`` where ``cls`` is + not instantiable builtin or extension type (with ``tp_new`` set to + ``NULL``). + +- gh-129958: Differentiate between t-strings and f-strings in syntax error + for newlines in format specifiers of single-quoted interpolated strings. + +- gh-135871: Non-blocking mutex lock attempts now return immediately when + the lock is busy instead of briefly spinning in the :term:`free threading` + build. + +- gh-135106: Restrict the trashcan mechanism to GC'ed objects and untrack + them while in the trashcan to prevent the GC and trashcan mechanisms + conflicting. + +- gh-135607: Fix potential :mod:`weakref` races in an object's destructor on + the :term:`free threaded ` build. + +- gh-135608: Fix a crash in the JIT involving attributes of modules. + +- gh-135543: Emit ``sys.remote_exec`` audit event when + :func:`sys.remote_exec` is called and migrate ``remote_debugger_script`` + to ``cpython.remote_debugger_script``. + +- gh-134280: Disable constant folding for ``~`` with a boolean argument. + This moves the deprecation warning from compile time to runtime. + +C API +----- + +- gh-135906: Fix compilation errors when compiling the internal headers with + a C++ compiler. + +Build +----- + +- gh-134273: Add support for configuring compiler flags for the JIT with + ``CFLAGS_JIT`` + + +What's New in Python 3.14.0 beta 3? +=================================== + +*Release date: 2025-06-17* + +Windows +------- + +- gh-135099: Fix a crash that could occur on Windows when a background + thread waits on a :c:type:`PyMutex` while the main thread is shutting down + the interpreter. + +Tests +----- + +- gh-132815: Fix test__opcode: add ``JUMP_BACKWARD`` to specialization + stats. + +- gh-135489: Show verbose output for failing tests during PGO profiling step + with --enable-optimizations. + +- gh-135120: Add :func:`!test.support.subTests`. + +Security +-------- + +- gh-135462: Fix quadratic complexity in processing specially crafted input + in :class:`html.parser.HTMLParser`. End-of-file errors are now handled + according to the HTML5 specs -- comments and declarations are + automatically closed, tags are ignored. + +- gh-135034: Fixes multiple issues that allowed ``tarfile`` extraction + filters (``filter="data"`` and ``filter="tar"``) to be bypassed using + crafted symlinks and hard links. + + Addresses :cve:`2024-12718`, :cve:`2025-4138`, :cve:`2025-4330`, and + :cve:`2025-4517`. + +Library +------- + +- gh-65697: :class:`configparser`'s error message when attempting to write + an invalid key is now more helpful. + +- gh-135497: Fix :func:`os.getlogin` failing for longer usernames on + BSD-based platforms. + +- gh-135429: Fix the argument mismatch in ``_lsprof`` for ``PY_THROW`` + event. + +- gh-135368: Fix :class:`unittest.mock.Mock` generation on + :func:`dataclasses.dataclass` objects. Now all special attributes are set + as it was before :gh:`124429`. + +- gh-133967: Do not normalize :mod:`locale` name 'C.UTF-8' to 'en_US.UTF-8'. + +- gh-135321: Raise a correct exception for values greater than 0x7fffffff + for the ``BINSTRING`` opcode in the C implementation of :mod:`pickle`. + +- gh-135276: Backported bugfixes in zipfile.Path from zipp 3.23. Fixed + ``.name``, ``.stem`` and other basename-based properties on Windows when + working with a zipfile on disk. + +- gh-135244: :mod:`uuid`: when the MAC address cannot be determined, the + 48-bit node ID is now generated with a cryptographically-secure + pseudo-random number generator (CSPRNG) as per :rfc:`RFC 9562, §6.10.3 + <9562#section-6.10-3>`. This affects :func:`~uuid.uuid1` and + :func:`~uuid.uuid6`. + +- gh-134970: Fix the "unknown action" exception in + :meth:`argparse.ArgumentParser.add_argument_group` to correctly replace + the action class. + +- gh-134718: :func:`ast.dump` now only omits ``None`` and ``[]`` values if + they are default values. + +- gh-134939: Add the :mod:`concurrent.interpreters` module. See :pep:`734`. + +- gh-134885: Fix possible crash in the :mod:`compression.zstd` module + related to setting parameter types. Patch by Jelle Zijlstra. + +- gh-134857: Improve error report for :mod:`doctest`\ s run with + :mod:`unittest`. Remove :mod:`!doctest` module frames from tracebacks and + redundant newline character from a failure message. + +- gh-128840: Fix parsing long IPv6 addresses with embedded IPv4 address. + +- gh-134637: Fix performance regression in calling a :mod:`ctypes` function + pointer in :term:`free threading`. + +- gh-134696: Built-in HACL* and OpenSSL implementations of hash function + constructors now correctly accept the same *documented* named arguments. + For instance, :func:`~hashlib.md5` could be previously invoked as + ``md5(data=data)`` or ``md5(string=string)`` depending on the underlying + implementation but these calls were not compatible. Patch by Bénédikt + Tran. + +- gh-134151: :mod:`email`: Fix :exc:`TypeError` in + :func:`email.utils.decode_params` when sorting :rfc:`2231` continuations + that contain an unnumbered section. + +- gh-134210: :func:`curses.window.getch` now correctly handles signals. + Patch by Bénédikt Tran. + +- gh-134152: :mod:`email`: Fix parsing of email message ID with invalid + domain. + +- gh-133489: :func:`random.getrandbits` can now generate more that 2\ + :sup:`31` bits. :func:`random.randbytes` can now generate more that 256 + MiB. + +- gh-132813: Improve error messages for incorrect types and values of + :class:`csv.Dialect` attributes. + +- gh-132969: Prevent the :class:`~concurrent.futures.ProcessPoolExecutor` + executor thread, which remains running when :meth:`shutdown(wait=False) + `, from attempting to adjust the + pool's worker processes after the object state has already been reset + during shutdown. A combination of conditions, including a worker process + having terminated abormally, resulted in an exception and a potential hang + when the still-running executor thread attempted to replace dead workers + within the pool. + +- gh-127081: Fix libc thread safety issues with :mod:`os` by replacing + ``getlogin`` with ``getlogin_r`` re-entrant version. + +- gh-131884: Fix formatting issues in :func:`json.dump` when both *indent* + and *skipkeys* are used. + +- gh-130999: Avoid exiting the new REPL and offer suggestions even if there + are non-string candidates when errors occur. + +Documentation +------------- + +- gh-135171: Document that the :term:`iterator` for the leftmost + :keyword:`!for` clause in the generator expression is created immediately. + +- bpo-45210: Document that error indicator may be set in tp_dealloc, and how + to avoid clobbering it. + +Core and Builtins +----------------- + +- gh-135496: Fix typo in the f-string conversion type error ("exclamanation" + -> "exclamation"). + +- gh-135371: Fixed :mod:`asyncio` debugging tools to properly display + internal coroutine call stacks alongside external task dependencies. The + ``python -m asyncio ps`` and ``python -m asyncio pstree`` commands now + show complete execution context. Patch by Pablo Galindo. + +Library +------- + +- gh-127319: Set the ``allow_reuse_port`` class variable to ``False`` on the + XMLRPC, logging, and HTTP servers. This matches the behavior in prior + Python releases, which is to not allow port reuse. + +Core and Builtins +----------------- + +- gh-135171: Reverts the behavior of async generator expressions when + created with object w/o __aiter__ method to the pre-3.13 behavior of + raising a TypeError. + +- gh-130077: Properly raise custom syntax errors when incorrect syntax + containing names that are prefixes of soft keywords is encountered. Patch + by Pablo Galindo. + +- gh-135171: Reverts the behavior of generator expressions when created with + a non-iterable to the pre-3.13 behavior of raising a TypeError. It is no + longer possible to cause a crash in the debugger by altering the generator + expression's local variables. This is achieved by moving the ``GET_ITER`` + instruction back to the creation of the generator expression and adding an + additional check to ``FOR_ITER``. + +Library +------- + +- gh-116738: Make methods in :mod:`heapq` thread-safe on the :term:`free + threaded ` build. + +Core and Builtins +----------------- + +- gh-134876: Add support to :pep:`768` remote debugging for Linux kernels + which don't have CONFIG_CROSS_MEMORY_ATTACH configured. + +- gh-134889: Fix handling of a few opcodes that leave operands on the stack + when optimizing ``LOAD_FAST``. + +Library +------- + +- gh-134908: Fix crash when iterating over lines in a text file on the + :term:`free threaded ` build. + +Core and Builtins +----------------- + +- gh-132617: Fix :meth:`dict.update` modification check that could + incorrectly raise a "dict mutated during update" error when a different + dictionary was modified that happens to share the same underlying keys + object. + +- gh-134679: Fix crash in the :term:`free threading` build's QSBR code that + could occur when changing an object's ``__dict__`` attribute. + +- gh-127682: No longer call ``__iter__`` twice in list comprehensions. This + brings the behavior of list comprehensions in line with other forms of + iteration + +- gh-133912: Fix the C API function ``PyObject_GenericSetDict`` to handle + extension classes with inline values. + +C API +----- + +- gh-134989: Fix ``Py_RETURN_NONE``, ``Py_RETURN_TRUE`` and + ``Py_RETURN_FALSE`` macros in the limited C API 3.11 and older: don't + treat ``Py_None``, ``Py_True`` and ``Py_False`` as immortal. Patch by + Victor Stinner. + +- gh-134989: Implement :c:func:`PyObject_DelAttr` and + :c:func:`PyObject_DelAttrString` as macros in the limited C API 3.12 and + older. Patch by Victor Stinner. + +- gh-133968: Add :c:func:`PyUnicodeWriter_WriteASCII` function to write an + ASCII string into a :c:type:`PyUnicodeWriter`. The function is faster than + :c:func:`PyUnicodeWriter_WriteUTF8`, but has an undefined behavior if the + input string contains non-ASCII characters. Patch by Victor Stinner. + +Build +----- + +- gh-119132: Remove "experimental" tag from the CPython free-threading + build. + +- gh-135497: Fix the detection of ``MAXLOGNAME`` in the ``configure.ac`` + script. + +- gh-134923: Windows builds with profile-guided optimization enabled now use + ``/GENPROFILE`` and ``/USEPROFILE`` instead of deprecated ``/LTCG:`` + options. + +- gh-134774: Fix :c:macro:`Py_DEBUG` macro redefinition warnings on Windows + debug builds. Patch by Chris Eibl. + +- gh-134632: Fixed ``build-details.json`` generation to use ``INCLUDEPY``, + in order to reference the ``pythonX.Y`` subdirectory of the include + directory, as required in :pep:`739`, instead of the top-level include + directory. + + +What's New in Python 3.14.0 beta 2? +=================================== + +*Release date: 2025-05-26* + +Windows +------- + +- gh-130727: Fix a race in internal calls into WMI that can result in an + "invalid handle" exception under high load. Patch by Chris Eibl. + +- gh-76023: Make :func:`os.path.realpath` ignore Windows error 1005 when in + non-strict mode. + +- gh-133779: Reverts the change to generate different :file:`pyconfig.h` + files based on compiler settings, as it was frequently causing extension + builds to break. In particular, the ``Py_GIL_DISABLED`` preprocessor + variable must now always be defined explicitly when compiling for the + experimental free-threaded runtime. The :func:`sysconfig.get_config_var` + function can be used to determine whether the current runtime was compiled + with that flag or not. + +- gh-133626: Ensures packages are not accidentally bundled into the + traditional installer. + +Tools/Demos +----------- + +- gh-134215: :term:`REPL` import autocomplete only suggests private modules + when explicitly specified. + +Tests +----- + +- gh-133744: Fix multiprocessing interrupt test. Add an event to synchronize + the parent process with the child process: wait until the child process + starts sleeping. Patch by Victor Stinner. + +- gh-133682: Fixed test case + ``test.test_annotationlib.TestStringFormat.test_displays`` which ensures + proper handling of complex data structures (lists, sets, dictionaries, and + tuples) in string annotations. + +- gh-133639: Fix ``TestPyReplAutoindent.test_auto_indent_default()`` doesn't + run ``input_code``. + +Security +-------- + +- gh-133767: Fix use-after-free in the "unicode-escape" decoder with a + non-"strict" error handler. + +- gh-128840: Short-circuit the processing of long IPv6 addresses early in + :mod:`ipaddress` to prevent excessive memory consumption and a minor + denial-of-service. + +Library +------- + +- gh-132710: If possible, ensure that :func:`uuid.getnode` returns the same + result even across different processes. Previously, the result was + constant only within the same process. Patch by Bénédikt Tran. + +- gh-80334: :func:`multiprocessing.freeze_support` now checks for work on + any "spawn" start method platform rather than only on Windows. + +- gh-134582: Fix tokenize.untokenize() round-trip errors related to + t-strings braces escaping + +- gh-134546: Ensure :mod:`pdb` remote debugging script is readable by remote + Python process. + +- gh-134451: Converted ``asyncio.tools.CycleFoundException`` from dataclass + to a regular exception type. + +- gh-114177: Fix :mod:`asyncio` to not close subprocess pipes which would + otherwise error out when the event loop is already closed. + +- gh-90871: Fixed an off by one error concerning the backlog parameter in + :meth:`~asyncio.loop.create_unix_server`. Contributed by Christian + Harries. + +- gh-134323: Fix the :meth:`threading.RLock.locked` method. + +- gh-86802: Fixed asyncio memory leak in cancelled shield tasks. For + shielded tasks where the shield was cancelled, log potential exceptions + through the exception handler. Contributed by Christian Harries. + +- gh-134209: :mod:`curses`: The :meth:`curses.window.instr` and + :meth:`curses.window.getstr` methods now allocate their internal buffer on + the heap instead of the stack; in addition, the max buffer size is + increased from 1023 to 2047. + +- gh-134235: Updated tab completion on REPL to include builtin modules. + Contributed by Tom Wang, Hunter Young + +- gh-134152: Fixed :exc:`UnboundLocalError` that could occur during + :mod:`email` header parsing if an expected trailing delimiter is missing + in some contexts. + +- gh-134168: :mod:`http.server`: Fix IPv6 address binding and + :option:`--directory ` handling when using HTTPS. + +- gh-62184: Remove import of C implementation of :class:`io.FileIO` from + Python implementation which has its own implementation + +- gh-133982: Emit :exc:`RuntimeWarning` in the Python implementation of + :mod:`io` when the :term:`file-like object ` is not closed + explicitly in the presence of multiple I/O layers. + +- gh-133890: The :mod:`tarfile` module now handles :exc:`UnicodeEncodeError` + in the same way as :exc:`OSError` when cannot extract a member. + +- gh-134097: Fix interaction of the new :term:`REPL` and :option:`-X + showrefcount <-X>` command line option. + +- gh-133889: The generated directory listing page in + :class:`http.server.SimpleHTTPRequestHandler` now only shows the decoded + path component of the requested URL, and not the query and fragment. + +- gh-134098: Fix handling paths that end with a percent-encoded slash + (``%2f`` or ``%2F``) in :class:`http.server.SimpleHTTPRequestHandler`. + +- gh-132124: On POSIX-compliant systems, + :func:`!multiprocessing.util.get_temp_dir` now ignores :envvar:`TMPDIR` + (and similar environment variables) if the path length of ``AF_UNIX`` + socket files exceeds the platform-specific maximum length when using the + :ref:`forkserver ` start method. + Patch by Bénédikt Tran. + +- gh-134062: :mod:`ipaddress`: fix collisions in :meth:`~object.__hash__` + for :class:`~ipaddress.IPv4Network` and :class:`~ipaddress.IPv6Network` + objects. + +- gh-133970: Make :class:`!string.templatelib.Template` and + :class:`!string.templatelib.Interpolation` generic. + +- gh-71253: Raise :exc:`ValueError` in :func:`open` if *opener* returns a + negative file-descriptor in the Python implementation of :mod:`io` to + match the C implementation. + +- gh-133960: Simplify and improve :func:`typing.evaluate_forward_ref`. It + now no longer raises errors on certain invalid types. In several + situations, it is now able to evaluate forward references that were + previously unsupported. + +- gh-133925: Make the private class ``typing._UnionGenericAlias`` hashable. + +- gh-133653: Fix :class:`argparse.ArgumentParser` with the *formatter_class* + argument. Fix TypeError when *formatter_class* is a custom subclass of + :class:`!HelpFormatter`. Fix TypeError when *formatter_class* is not a + subclass of :class:`!HelpFormatter` and non-standard *prefix_char* is + used. Fix support of colorizing when *formatter_class* is not a subclass + of :class:`!HelpFormatter`. + +- gh-132641: Fixed a race in :func:`functools.lru_cache` under + free-threading. + +- gh-133783: Fix bug with applying :func:`copy.replace` to :mod:`ast` + objects. Attributes that default to ``None`` were incorrectly treated as + required for manually created AST nodes. + +- gh-133684: Fix bug where :func:`annotationlib.get_annotations` would + return the wrong result for certain classes that are part of a class + hierarchy where ``from __future__ import annotations`` is used. + +- gh-77057: Fix handling of invalid markup declarations in + :class:`html.parser.HTMLParser`. + +- gh-130328: Speedup pasting in ``PyREPL`` on Windows in a legacy console. + Patch by Chris Eibl. + +- gh-133701: Fix bug where :class:`typing.TypedDict` classes defined under + ``from __future__ import annotations`` and inheriting from another + ``TypedDict`` had an incorrect ``__annotations__`` attribute. + +- gh-133581: Improve unparsing of t-strings in :func:`ast.unparse` and + ``from __future__ import annotations``. Empty t-strings now round-trip + correctly and formatting in interpolations is preserved. Patch by Jelle + Zijlstra. + +- gh-133551: Support t-strings (:pep:`750`) in :mod:`annotationlib`. Patch + by Jelle Zijlstra. + +- gh-133439: Fix dot commands with trailing spaces are mistaken for + multi-line SQL statements in the sqlite3 command-line interface. + +- gh-132493: Avoid accessing ``__annotations__`` unnecessarily in + :func:`inspect.signature`. + +- gh-132876: ``ldexp()`` on Windows doesn't round subnormal results before + Windows 11, but should. Python's :func:`math.ldexp` wrapper now does + round them, so results may change slightly, in rare cases of very small + results, on Windows versions before 11. + +- gh-133009: :mod:`xml.etree.ElementTree`: Fix a crash in + :meth:`Element.__deepcopy__ ` when the element is + concurrently mutated. Patch by Bénédikt Tran. + +- gh-91555: Ignore log messages generated during handling of log messages, + to avoid deadlock or infinite recursion. [NOTE: This change has since been + reverted.] + +- gh-125028: :data:`functools.Placeholder` cannot be passed to + :func:`functools.partial` as a keyword argument. + +- gh-62824: Fix aliases for ``iso8859_8`` encoding. Patch by Dave Goncalves. + +- gh-86155: :meth:`html.parser.HTMLParser.close` no longer loses data when + the ``